diff --git a/.drone.yml b/.drone.yml index fd968fe4ef..f8da804dd4 100644 --- a/.drone.yml +++ b/.drone.yml @@ -1,200 +1,246 @@ --- -# Quick checks to make before spending time on test and package.clone: -# on failure -> failed-pre-checks pipeline -# on success --> cargo-test (parallel) ---> [ test-package-success | test-package-failure ] -# \-> package (parallel) ----/ kind: pipeline type: docker -name: pre-checks +name: cargo-test + +environment: + RUSTC_WRAPPER: '/root/.cargo/bin/cachepot' + CACHEPOT_BUCKET: 'drone-sccache' + CACHEPOT_S3_KEY_PREFIX: ci + CACHEPOT_REGION: 'us-east-2' + CARGO_INCREMENTAL: '0' + +__buildenv: &buildenv + image: casperlabs/node-build-u1804 + volumes: + - name: rustup + path: "/root/.rustup" + - name: cargo + path: "/root/.cargo" + - name: drone + path: "/drone" + environment: + AWS_ACCESS_KEY_ID: + from_secret: cachepot_aws_ak + AWS_SECRET_ACCESS_KEY: + from_secret: cachepot_aws_sk -# Steps perform as fast serially, due to file thrashing. steps: - - name: cargo-fmt - image: casperlabs/node-build-u1804 - commands: - - rustup component add rustfmt - - cargo fmt --all -- --check - - - name: cargo-clippy - image: casperlabs/node-build-u1804 - environment: - RUSTFLAGS: '-D warnings' - commands: - - make setup-rs - - rustup component add clippy - - cargo clippy --all-targets --all-features --workspace - - - name: cargo-audit - image: casperlabs/node-build-u1804 - commands: - - cargo install cargo-audit - - cargo generate-lockfile - - cargo audit +- name: setup + <<: *buildenv + commands: + - make setup -trigger: - branch: - - master - - trying - - staging +# The below is duplicated for pull and push +# due to environment bug with caching. +- name: cargo-test-pr + <<: *buildenv + environment: + SCCACHE_S3_PUBLIC: true + commands: + - make check-std-features + - make check-testing-features + - make test CARGO_FLAGS=--release + - make test-contracts CARGO_FLAGS=--release + - cachepot --show-stats + when: + event: + - pull_request + +- name: cargo-test-push + <<: *buildenv + commands: + - make check-std-features + - make check-testing-features + - make test CARGO_FLAGS=--release + - make test-contracts CARGO_FLAGS=--release + - cachepot --show-stats + when: + event: + - push + +- name: notify + image: plugins/slack + settings: + webhook: + from_secret: slack_webhook + template: + - | + Cargo-Test Pipeline Status: *{{ uppercasefirst build.status }}* + Drone Build: <{{ build.link }}|#{{ build.number }}> + Commit Link: + when: + event: + - push + status: + - failure + branch: - dev - "release-*" - event: - exclude: - - tag + - "feat-*" ---- -# Failure state from pre-checks pipeline -kind: pipeline -type: docker -name: failed-pre-checks - -clone: - disable: true +volumes: +- name: rustup + temp: {} +- name: cargo + temp: {} +- name: drone + temp: {} -steps: - - name: notify - image: plugins/slack - settings: - webhook: - from_secret: slack_webhook - template: - - | - casper-node build status: *{{ uppercasefirst build.status }}* - Author: {{ build.author }} - Drone Build: <{{ build.link }}|#{{ build.number }}> - Commit Link: trigger: - status: - - failure branch: - - master - - trying - - staging - - dev - - "release-*" + - trying + - staging + - dev + - "release-*" + - "feat-*" event: + include: + - pull_request + - push exclude: - - tag - -depends_on: - - pre-checks + - tag + - cron --- -# Testing pipeline, runs in parallel with package pipeline kind: pipeline type: docker -name: cargo-test +name: nctl-testing -steps: -- name: updater-dry-run - image: casperlabs/node-build-u1804 - commands: - - cargo run --package=casper-updater -- --root-dir=. --dry-run +environment: + RUSTC_WRAPPER: '/root/.cargo/bin/cachepot' + CACHEPOT_BUCKET: 'drone-sccache' + CACHEPOT_S3_KEY_PREFIX: ci + CACHEPOT_REGION: 'us-east-2' + CARGO_INCREMENTAL: '0' -- name: cargo-test +__buildenv: &buildenv image: casperlabs/node-build-u1804 + volumes: + - name: rustup + path: "/root/.rustup" + - name: cargo + path: "/root/.cargo" + - name: drone + path: "/drone" + environment: + AWS_ACCESS_KEY_ID: + from_secret: cachepot_aws_ak + AWS_SECRET_ACCESS_KEY: + from_secret: cachepot_aws_sk + +steps: +- name: setup + <<: *buildenv commands: - make setup - - make test CARGO_FLAGS=--release - - make test-contracts CARGO_FLAGS=--release - - make test-fast-sync CARGO_FLAGS=--release + # `elfx86exts` is used to determine the CPU features used by the compiled node binary. + - cargo install elfx86exts --version 0.5.0 -- name: client-ffi-tests-and-examples - image: casperlabs/node-build-u1804 +- name: nctl-compile + <<: *buildenv + commands: + - bash -c ./ci/nctl_compile.sh + +- name: nctl-upgrade-test + <<: *buildenv + environment: + AWS_ACCESS_KEY_ID: + from_secret: put-drone-aws-ak + AWS_SECRET_ACCESS_KEY: + from_secret: put-drone-aws-sk commands: - - make setup-rs - - cmake -Hclient/examples/ffi -Btarget/build -DCMAKE_BUILD_TYPE=Debug - - cmake --build target/build - - ./target/build/ffi-tests - - ./target/build/get-auction-info - - ./target/build/put-deploy + - bash -c ./ci/nctl_upgrade.sh -depends_on: - - pre-checks +- name: check CPU features + <<: *buildenv + commands: + - ./ci/check_cpu_features.sh + +volumes: +- name: rustup + temp: {} +- name: cargo + temp: {} +- name: drone + temp: {} trigger: branch: - - master - - trying - - staging - - dev - - "release-*" + - trying + - staging event: + include: + - push exclude: - - tag + - pull_request + - tag + - cron --- -# Packaging pipeline, runs in parallel with cargo-test pipeline kind: pipeline type: docker name: package -steps: -- name: build-deb-update-revision +__buildenv: &buildenv image: casperlabs/node-build-u1804 - commands: - - "./update-rev.sh" - -- name: build-deb + volumes: + - name: rustup + path: "/root/.rustup" + - name: cargo + path: "/root/.cargo" + - name: drone + path: "/drone" + - name: nctl-temp-dir + path: "/tmp/nctl_upgrade_stage" + +__buildenv_upload: &buildenv_upload image: casperlabs/node-build-u1804 - commands: - - make setup-rs - - "make deb" + volumes: + - name: rustup + path: "/root/.rustup" + - name: cargo + path: "/root/.cargo" + - name: drone + path: "/drone" + - name: nctl-temp-dir + path: "/tmp/nctl_upgrade_stage" + environment: + AWS_ACCESS_KEY_ID: + from_secret: put-drone-aws-ak + AWS_SECRET_ACCESS_KEY: + from_secret: put-drone-aws-sk -- name: test-deb - image: ubuntu:bionic +steps: +- name: setup + <<: *buildenv commands: - - "apt update" - - "$(pwd)/ci/test_deb_install.sh $(pwd) casper-client" + - make setup -- name: put-drone-s3-cache - image: casperlabs/s3cmd-build:latest +- name: build-client-contracts + <<: *buildenv commands: - - ./ci/drone_s3_storage.sh put $(pwd)/target/debian/ debian/ - environment: - CL_VAULT_TOKEN: - from_secret: vault_token - CL_VAULT_HOST: - from_secret: vault_host - when: - branch: - - master - - dev - - "release-*" - event: - - push + - make build-client-contracts -- name: build-wasm-package-push-to-s3 +- name: stest-wasm-package-push-to-s3 image: casperlabs/s3cmd-build:latest - commands: - - "./build_wasm_package.sh" environment: - CL_VAULT_TOKEN: - from_secret: vault_token - CL_VAULT_HOST: - from_secret: vault_host - when: - branch: - - master - - dev - - "release-*" - event: - - push - -- name: build-upgrade-package - image: casperlabs/node-build-u1804 + AWS_ACCESS_KEY_ID: + from_secret: put-drone-aws-ak + AWS_SECRET_ACCESS_KEY: + from_secret: put-drone-aws-sk commands: - - "./ci/build_update_package.sh" + - "./build_wasm_package.sh" -- name: dry-run-publish - image: casperlabs/node-build-u1804 +- name: build-upgrade-package + <<: *buildenv commands: - - "cd types && cargo publish --dry-run" + - "./ci/build_update_package.sh" - name: upload-to-s3-genesis image: plugins/s3 settings: - bucket: 'genesis.casperlabs.io' + bucket: 'genesis.casper.network' region: 'us-east-2' access_key: from_secret: drone_genesis_key_id @@ -203,163 +249,35 @@ steps: source: "target/upgrade_build/**/*" strip_prefix: 'target/upgrade_build/' target: "/drone/${DRONE_COMMIT}/" - when: - branch: - - master - - dev - - "release-*" - event: - - push - -depends_on: - - pre-checks - -trigger: - branch: - - master - - trying - - staging - - dev - - "release-*" - event: - exclude: - - tag - ---- -# Run on success of cargo-test and package pipelines. -kind: pipeline -type: docker -name: test-package-success - -steps: - # Retrieving packages built and put in s3 from package pipeline. -- name: get-and-del-drone-s3-cache - image: casperlabs/s3cmd-build:latest - commands: - - ./ci/drone_s3_storage.sh get debian/ $(pwd)/target/debian/ - - ./ci/drone_s3_storage.sh del - environment: - CL_VAULT_TOKEN: - from_secret: vault_token - CL_VAULT_HOST: - from_secret: vault_host - when: - branch: - - master - - dev - - "release-*" - event: - - push - # we want to publish to the test repo, only when code is pushed to master or release-* branch. - # bors should make sure, that it has passed on staging or trying branches -- name: publish-test-bintray - image: casperlabs/node-build-u1804 - # Keeping casper-node from int test publish to allow networks stood up without casper-node-launcher for now. +- name: nctl-s3-build + <<: *buildenv_upload commands: - - "./upload.sh --repo-name casper-debian-tests --package-name casper-client" - environment: - CL_VAULT_TOKEN: - from_secret: vault_token - CL_VAULT_HOST: - from_secret: vault_host + - "aws s3 rm s3://nctl.casper.network/${DRONE_BRANCH} --recursive" + - "./ci/nctl_upgrade_stage.sh" when: branch: - - master - - dev - - "release-*" - event: - - push + - dev + - "release-*" -- name: publish-repo-test - image: casperlabs/aptly:latest - failure: ignore - environment: - AWS_SECRET_ACCESS_KEY: - from_secret: APTLY_SECRET_KEY - AWS_ACCESS_KEY_ID: - from_secret: APTLY_KEY_ID +- name: nctl-bucket-upload + image: plugins/s3-sync:latest settings: - repo_name: - from_secret: APTLY_REPO_NAME - region: - from_secret: APTLY_REGION - gpg_key: - from_secret: APTLY_GPG_KEY - gpg_pass: - from_secret: APTLY_GPG_PASS - distribution_id: - from_secret: APTLY_DISTRIBUTION_ID - acl: 'public-read' - prefix: 'releases' - deb_path: './target/debian' - deb_name: '*.deb' + bucket: 'nctl.casper.network' + access_key: + from_secret: put-drone-aws-ak + secret_key: + from_secret: put-drone-aws-sk + region: us-east-2 + source: '../../tmp/nctl_upgrade_stage/' + target: "/${DRONE_BRANCH}/" + volumes: + - name: nctl-temp-dir + path: /tmp/nctl_upgrade_stage when: branch: - - master - - dev - - "release-*" - event: - - push - -depends_on: - - cargo-test - - package - -trigger: - branch: - - master - - trying - - staging - dev - "release-*" - event: - exclude: - - tag - ---- -# Runs on failure of cargo-test or package pipelines. -kind: pipeline -type: docker -name: test-package-failure - -clone: - disable: true - -steps: -- name: del-s3-cache - image: casperlabs/s3cmd-build:latest - commands: - - ./ci/drone_s3_storage.sh del - environment: - CL_VAULT_TOKEN: - from_secret: vault_token - CL_VAULT_HOST: - from_secret: vault_host - when: - branch: - - master - - dev - - "release-*" - event: - - push - -# Build failed so remove the update_package candidate -#- name: del-upgrade_package-s3 -# image: casperlabs/s3cmd-build:latest -# commands: -# - ./ci/upgrade_package_s3_storage.sh del -# environment: -# CL_VAULT_TOKEN: -# from_secret: vault_token -# CL_VAULT_HOST: -# from_secret: vault_host -# when: -# branch: -# - master -# - "release-*" -# event: -# - push - name: notify image: plugins/slack @@ -367,83 +285,81 @@ steps: webhook: from_secret: slack_webhook template: - - | - casper-node build status: *{{ uppercasefirst build.status }}* - Author: {{ build.author }} - Drone Build: <{{ build.link }}|#{{ build.number }}> - Commit Link: + - | + Package Pipeline Status: *{{ uppercasefirst build.status }}* + Drone Build: <{{ build.link }}|#{{ build.number }}> + Commit Link: + when: + status: + - failure + +volumes: +- name: rustup + temp: {} +- name: cargo + temp: {} +- name: drone + temp: {} +- name: nctl-temp-dir + temp: {} trigger: - status: - - failure branch: - - master - - trying - - staging - - dev - - "release-*" + - dev + - "release-*" + - "feat-*" event: + include: + - push exclude: - - tag - -depends_on: - - cargo-test - - package + - pull_request + - tag + - cron -# TAGGING PIPELINES -# release-by-tag -# | (failure) -# failed-tag --- -# act on release - when the tag is created kind: pipeline type: docker name: release-by-tag -steps: -- name: build-deb +__buildenv: &buildenv image: casperlabs/node-build-u1804 - commands: - - "make deb" - -- name: publish-prod-bintray + volumes: + - name: rustup + path: "/root/.rustup" + - name: cargo + path: "/root/.cargo" + - name: drone + path: "/drone" + - name: nctl-temp-dir + path: "/tmp/nctl_upgrade_stage" + +__buildenv_upload: &buildenv_upload image: casperlabs/node-build-u1804 - commands: - - "./upload.sh --repo-name debian --package-name casper-client --package-tag true" - environment: - CL_VAULT_TOKEN: - from_secret: vault_token - CL_VAULT_HOST: - from_secret: vault_host - -- name: publish-repo-prod - image: casperlabs/aptly:latest - failure: ignore + volumes: + - name: rustup + path: "/root/.rustup" + - name: cargo + path: "/root/.cargo" + - name: drone + path: "/drone" + - name: nctl-temp-dir + path: "/tmp/nctl_upgrade_stage" environment: - AWS_SECRET_ACCESS_KEY: - from_secret: aptly_prod_secret_key AWS_ACCESS_KEY_ID: - from_secret: aptly_prod_key_id - settings: - repo_name: - from_secret: aptly_prod_repo - region: - from_secret: aptly_prod_region - gpg_key: - from_secret: aptly_prod_gpg_key - gpg_pass: - from_secret: aptly_prod_gpg_pass - distribution_id: - from_secret: aptly_prod_dist_id - acl: 'public-read' - prefix: 'releases' - deb_path: './target/debian' - deb_name: '*.deb' + from_secret: put-drone-aws-ak + AWS_SECRET_ACCESS_KEY: + from_secret: put-drone-aws-sk + +steps: +- name: setup + <<: *buildenv + commands: + - make setup - name: build-upgrade-package - image: casperlabs/node-build-u1804 + <<: *buildenv commands: - - "./ci/build_update_package.sh" + - "./ci/build_update_package.sh" - name: publish-github-pre-release image: plugins/github-release @@ -454,21 +370,38 @@ steps: - sha256 - md5 files: - - "./target/debian/*.deb" - - "./target/upgrade_build/*.gz" + - "./target/upgrade_build/*/bin.tar.gz" prerelease: - true - when: - ref: - - refs/tags/v* -- name: publish-crate - image: casperlabs/node-build-u1804 +- name: nctl-s3-build + <<: *buildenv_upload commands: - - "./ci/publish_to_crates_io.sh" + - "aws s3 rm s3://nctl.casper.network/${DRONE_TAG} --recursive" + - "./ci/nctl_upgrade_stage.sh" + +- name: nctl-bucket-upload + image: plugins/s3-sync:latest + settings: + bucket: 'nctl.casper.network' + access_key: + from_secret: put-drone-aws-ak + secret_key: + from_secret: put-drone-aws-sk + region: us-east-2 + source: '../../tmp/nctl_upgrade_stage/' + target: "/${DRONE_TAG}/" + volumes: + - name: nctl-temp-dir + path: /tmp/nctl_upgrade_stage + +- name: publish-crates + <<: *buildenv environment: CARGO_TOKEN: from_secret: crates_io_token + commands: + - "./ci/publish_to_crates_io.sh" - name: as-contract-publish image: plugins/npm @@ -480,24 +413,12 @@ steps: email: from_secret: npm_email folder: - - "smart_contracts/contract_as" + - "smart_contracts/contract_as" fail_on_version_conflict: - - true + - true access: - - "public" + - "public" -trigger: - ref: - - refs/tags/v* ---- -kind: pipeline -type: docker -name: failed-tag - -clone: - disable: true - -steps: - name: notify image: plugins/slack settings: @@ -505,49 +426,24 @@ steps: from_secret: slack_webhook template: - | - casper-node build status: *{{ uppercasefirst build.status }}* - Author: {{ build.author }} - Drone Build: <{{ build.link }}|#{{ build.number }}> - Commit Link: -trigger: - status: - - failure - ref: - - refs/tags/v* - -depends_on: -- release-by-tag - ---- -kind: pipeline -type: docker -name: nightly-tests-cron - -steps: -- name: nctl-nighly-script - image: casperlabs/node-build-u1804 - commands: - - "python3 -m pip install supervisor toml" - - "apt update && apt install lsof -y" - - "bash -i ci/nightly-test.sh" - -- name: notify - image: plugins/slack - settings: - webhook: - from_secret: slack_webhook_nightly - template: - - | - Nightly Test Run Status: *{{ uppercasefirst build.status }}* - Author: {{ build.author }} + Casper-Node Release Status: *{{ uppercasefirst build.status }}* Drone Build: <{{ build.link }}|#{{ build.number }}> Commit Link: when: status: - failure - success - depends_on: - - nctl-nighly-script + +volumes: +- name: rustup + temp: {} +- name: cargo + temp: {} +- name: drone + temp: {} +- name: nctl-temp-dir + temp: {} trigger: - cron: [ nightly-tests-cron ] + ref: + - refs/tags/v* diff --git a/.github/ISSUE_TEMPLATE/feedback.yml b/.github/ISSUE_TEMPLATE/feedback.yml new file mode 100644 index 0000000000..471efaae9b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feedback.yml @@ -0,0 +1,93 @@ +name: Condor Release Feedback / Issue Form +description: Please share your feedback or issues you face in incorporating Condor Release changes in your application/project. +title: '[Condor-Release]: Specify your feedback/issue briefly' +labels: + - condor-feedback +assignees: + - devendran-m + - piotr-dziubecki + - sacherjj + - cspramit + - SaiProServ +body: + - type: markdown + attributes: + value: | + This is a feedback form, to consolidate Condor feedback and/or issues. + - type: dropdown + id: type + attributes: + label: Category + description: Please choose the category that best describes your needs. + options: + - Feedback + - Bug/Issue + validations: + required: true + - type: input + id: email + attributes: + label: Your email address + placeholder: john@doe.com + validations: + required: true + - type: input + id: project-name + attributes: + label: Integration Project Name(Optional) + placeholder: CasperWallet + description: Name of the project with Casper, if applicable + validations: + required: false + - type: dropdown + id: casper-network + attributes: + label: Casper Network + description: Please choose the network or environment related to the feedback, bug, or issue. + options: + - Devnet + - Integration-Test + - Testnet + - Mainnet + validations: + required: true + - type: dropdown + id: node-functionality + attributes: + label: Node Functionality + description: Please specify the primary function of the node on the Casper Network related to the feedback or issue. + options: + - Node + - JSON RPC + - Execution Engine + - SSE + - NCTL + - CCTL + - Validator + - Consensus + - Other + validations: + required: true + - type: textarea + id: feedback-issue + attributes: + label: Description + placeholder: Please elaborate your feedback/ bug or issue here. + description: Please provide a detailed description of your feedback, bug, or issue. + validations: + required: true + - type: input + id: date-since + attributes: + label: Date Issue Began(optional) + placeholder: dd/mm/yyyy + description: When did you first notice this issue? + validations: + required: false + - type: textarea + id: attachments + attributes: + label: Attachments (optional) + description: Please attach any logs, screenshots, or links that may help with the analysis. + validations: + required: false \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..3fb04c8107 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,6 @@ +Please consider the following when creating a PR: + +* Provide a useful description of this PR, suitably verbose, aimed at helping reviewers and contributors +* Update all relevant changelogs +* Provide a link to the GitHub issue relating to this PR +* Identify if any downstream impact as in to, SDKs, SmartContracts etc diff --git a/.github/workflows/casper-node.yml b/.github/workflows/casper-node.yml new file mode 100644 index 0000000000..81cb35282a --- /dev/null +++ b/.github/workflows/casper-node.yml @@ -0,0 +1,67 @@ +--- +name: casper-node +# runs steps that are OK with normal rust based on ./rust-toolchain.toml +permissions: + contents: read + +on: + push: + branches: + - dev + - trying + - staging + - 'release-**' + - 'feat-**' + paths-ignore: + - '**.md' + + pull_request: + branches: + - dev + - 'release-**' + - 'feat-**' + paths-ignore: + - '**.md' + +jobs: + lints: + name: tests + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: setup + run: make setup + + - name: setup ubuntu + run: | + sudo apt-get -y install wabt + + - uses: Swatinem/rust-cache@v2 + + - name: check-format + run: make check-format + + - name: doc + run: make doc + + - name: lint + run: make lint + + - name: audit + run: make audit + + - name: check-std-features + run: make check-std-features + + - name: check-testing-features + run: make check-testing-features + + - name: test + run: make test CARGO_FLAGS=--release + + - name: test-contracts + run: make test-contracts CARGO_FLAGS=--release diff --git a/.github/workflows/lints-md.yml b/.github/workflows/lints-md.yml new file mode 100644 index 0000000000..27312d1ee8 --- /dev/null +++ b/.github/workflows/lints-md.yml @@ -0,0 +1,30 @@ +--- +name: lints +permissions: + contents: read + +on: + push: + branches: + - dev + - trying + - staging + - 'release-**' + - 'feat-**' + paths: + - '**.md' + + pull_request: + branches: + - dev + - 'release-**' + - 'feat-**' + paths: + - '**.md' + +jobs: + lints: + name: lints + runs-on: ubuntu-latest + steps: + - run: 'echo "Markdown only change, no lints required"' diff --git a/.github/workflows/manual-dev-net-upgrade.yml b/.github/workflows/manual-dev-net-upgrade.yml new file mode 100644 index 0000000000..8dc4ab3bb6 --- /dev/null +++ b/.github/workflows/manual-dev-net-upgrade.yml @@ -0,0 +1,34 @@ +name: Dev-Net deploy + +on: + pull_request: + branches: + - dev-build-test + workflow_dispatch: + inputs: + branch_to_run: + description: 'Branch to pull latest from' + type: string + default: 'dev' # Default to dev branch + required: true + +jobs: + publish_protocol: + runs-on: ubuntu-latest + + steps: + - name: Checkout branch + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.branch_to_run }} + + - name: Display Branch + run: echo "Running on branch ${{ github.event.inputs.branch_to_run }}" + + - name: Set CURRENT_HASH as environment variable + run: echo "CURRENT_HASH=${{ github.sha }}" >> $GITHUB_ENV + + - name: Run script + run: ./ci/dev_net_protocol_publish.sh + + # S3 upload \ No newline at end of file diff --git a/.github/workflows/publish-global-state-update-gen.yml b/.github/workflows/publish-global-state-update-gen.yml new file mode 100644 index 0000000000..05f03e8f18 --- /dev/null +++ b/.github/workflows/publish-global-state-update-gen.yml @@ -0,0 +1,71 @@ +--- +name: publish-global-state-update-gen +permissions: + contents: read + id-token: write + +on: + push: + tags: + - "v*" + +jobs: + publish_deb: + strategy: + matrix: + include: + - os: ubuntu-22.04 + code_name: jammy +# - os: ubuntu-24.04 +# code_name: noble + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ACCESS_ROLE_REPO }} + role-session-name: GitHub_to_AWS_via_FederatedOIDC + aws-region: ${{ secrets.AWS_ACCESS_REGION_REPO }} + + - name: Install deps + run: | + echo "deb http://repo.aptly.info/ squeeze main" | sudo tee -a /etc/apt/sources.list.d/aptly.list + wget -qO - https://www.aptly.info/pubkey.txt | sudo apt-key add - + sudo apt-get update + sudo apt-get install -y aptly=1.4.0 + aptly config show + + - name: Import GPG key + uses: crazy-max/ghaction-import-gpg@c8bb57c57e8df1be8c73ff3d59deab1dbc00e0d1 #v5.1.0 + with: + gpg_private_key: ${{ secrets.APTLY_GPG_KEY }} + passphrase: ${{ secrets.APTLY_GPG_PASS }} + + - name: Install cargo deb + run: cargo install cargo-deb + + - name: Cargo build + run: cargo build -p global-state-update-gen --release + + - name: Cargo deb + run: cargo deb -p global-state-update-gen --no-build --variant ${{ matrix.code_name }} + + - name: Upload binaries to repo + env: + PLUGIN_REPO_NAME: ${{ secrets.AWS_BUCKET_REPO }} + PLUGIN_REGION: ${{ secrets.AWS_ACCESS_REGION_REPO }} + PLUGIN_GPG_KEY: ${{ secrets.APTLY_GPG_KEY }} + PLUGIN_GPG_PASS: ${{ secrets.APTLY_GPG_PASS }} + PLUGIN_ACL: 'private' + PLUGIN_PREFIX: 'releases' + PLUGIN_DEB_PATH: './target/debian' + PLUGIN_OS_CODENAME: ${{ matrix.code_name }} + run: ./ci/publish_deb_to_repo.sh + + - name: Invalidate CloudFront cache + run: | + aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_CLOUDFRONT_REPO }} --paths "/*" diff --git a/.github/workflows/publish-release-and-crates.yml b/.github/workflows/publish-release-and-crates.yml new file mode 100644 index 0000000000..d8fc73fdb0 --- /dev/null +++ b/.github/workflows/publish-release-and-crates.yml @@ -0,0 +1,51 @@ +--- +name: publish-release-and-crates +permissions: + contents: read + id-token: write + +on: + push: + tags: + - 'v*' + +jobs: + push_release_and_crates: + strategy: + matrix: + include: + - os: ubuntu-22.04 + code_name: jammy + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2 + + # jq python and python toml required for build_update_package.sh + - name: Install deps + run: | + sudo apt-get update + sudo apt-get install -y jq python3 python3-toml + python3 --version + + - name: Install cargo deb + run: cargo install cargo-deb + + - name: Build update package + run: ./ci/build_update_package.sh + + # Add config.tar.gz, bin.tar.gz to release + - name: Upload files to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: target/upgrade_build/*.tar.gz + tag: ${{ github.ref }} + overwrite: true + file_glob: true + + - name: Publish to crates.io + env: + CARGO_TOKEN: ${{ secrets.crates_io_token }} + run: ./ci/publish_to_crates.io diff --git a/.github/workflows/push-artifacts.yml b/.github/workflows/push-artifacts.yml new file mode 100644 index 0000000000..14a8111c49 --- /dev/null +++ b/.github/workflows/push-artifacts.yml @@ -0,0 +1,56 @@ +--- +name: push-artifacts +permissions: + contents: read + id-token: write + +on: + push: + branches: + - dev + - 'feat-**' + - 'release-**' + +jobs: + push_artifacts: + strategy: + matrix: + include: + - os: ubuntu-22.04 + code_name: jammy + + runs-on: ${{ matrix.os }} + + steps: + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b #v3.0.2 + + # Assign AWS PROD role to get access to production cloudfronts and S3 buckets + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.AWS_ACCESS_ROLE_GENESIS }} + role-session-name: GitHub_to_AWS_via_FederatedOIDC + aws-region: ${{ secrets.AWS_ACCESS_REGION_GENESIS }} + + # jq python and python toml required for build_update_package.sh + - name: Install deps + run: | + sudo apt-get update + sudo apt-get install -y jq python3 python3-toml + python3 --version + + - name: Install cargo deb + run: cargo install cargo-deb + + - name: Build update package + run: ./ci/build_update_package.sh + + - name: Upload artifacts to S3 + run: aws s3 sync ./target/upgrade_build/ s3://${{ secrets.AWS_BUCKET_GENESIS }}/artifacts/casper-node/$(git rev-parse HEAD)/ + + - name: Upload branch_name.latest file to S3 + run: aws s3 sync ./target/latest/ s3://${{ secrets.AWS_BUCKET_GENESIS }}/artifacts/casper-node/ + + # Required in case of overwrite + - name: Invalidate CloudFront cache + run: aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_CLOUDFRONT_GENESIS }} --paths "/artifacts/casper-node/*" diff --git a/.gitignore b/.gitignore index 4d4ccc4a11..655163f353 100644 --- a/.gitignore +++ b/.gitignore @@ -133,17 +133,37 @@ resources/production/*.wasm resources/node-storage/* resources/local/chainspec.toml +execution_engine_testing/test_support/resources/chainspec.toml + # CLion .idea/ cmake-build-debug/ # vscode .vscode/ +.dccache + +# utils data dirs +/utils/**/chain-download +/utils/**/lmdb-data + +# OS X +.DS_Store + +# Notes +notes + +# sw* files in vim +.*.sw* + +# disk use reports +**/disk_use_report.csv + +# index files for VSCode +.lh/* -# NCTL transient assets -/utils/nctl/assets -/utils/nctl/dumps -/utils/nctl/tmp +*.patch -# client c headers -client/headers/** +# direnv-related files +.envrc +.direnv/ diff --git a/Cargo.lock b/Cargo.lock index 96912ee2f8..17d8ac5795 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,17 +1,17 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. +version = 4 + [[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +name = "activate-bid" +version = "0.1.0" dependencies = [ - "lazy_static", - "regex", + "casper-contract", + "casper-types", ] [[package]] -name = "activate-bid" +name = "add-associated-key" version = "0.1.0" dependencies = [ "casper-contract", @@ -35,7 +35,7 @@ dependencies = [ ] [[package]] -name = "add-update-associated-key" +name = "add-reservations" version = "0.1.0" dependencies = [ "casper-contract", @@ -43,97 +43,79 @@ dependencies = [ ] [[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +name = "add-update-associated-key" +version = "0.1.0" dependencies = [ - "gimli", + "casper-contract", + "casper-types", ] [[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aead" -version = "0.3.2" +name = "addr2line" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ - "generic-array 0.14.4", + "gimli 0.31.1", ] [[package]] -name = "aes" -version = "0.5.0" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" -dependencies = [ - "aes-soft", - "aesni", - "block-cipher", -] +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] -name = "aes-gcm" -version = "0.7.0" +name = "ahash" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ - "aead", - "aes", - "block-cipher", - "ghash", - "subtle 2.4.0", + "cfg-if 1.0.0", + "getrandom 0.3.3", + "once_cell", + "version_check", + "zerocopy", ] [[package]] -name = "aes-soft" -version = "0.5.0" +name = "aho-corasick" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.3.0", + "memchr", ] [[package]] -name = "aesni" -version = "0.8.0" +name = "aligned-vec" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" dependencies = [ - "block-cipher", - "opaque-debug 0.3.0", + "equator", ] [[package]] -name = "ahash" -version = "0.4.7" +name = "alloc-no-stdlib" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] -name = "aho-corasick" -version = "0.7.15" +name = "alloc-stdlib" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ - "memchr", + "alloc-no-stdlib", ] [[package]] -name = "ansi_term" -version = "0.11.0" +name = "anes" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi 0.3.9", -] +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "ansi_term" @@ -141,235 +123,149 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] -name = "anyhow" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b" - -[[package]] -name = "arrayref" -version = "0.3.6" +name = "anstream" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] [[package]] -name = "arrayvec" -version = "0.5.2" +name = "anstyle" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] -name = "asn1_der" -version = "0.6.3" +name = "anstyle-parse" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ - "asn1_der_derive", + "utf8parse", ] [[package]] -name = "asn1_der_derive" -version = "0.1.2" +name = "anstyle-query" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "quote", - "syn", + "windows-sys 0.59.0", ] [[package]] -name = "assert_cmd" -version = "1.0.3" +name = "anstyle-wincon" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2475b58cd94eb4f70159f4fd8844ba3b807532fe3131b3373fae060bbe30396" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ - "bstr", - "doc-comment", - "predicates", - "predicates-core", - "predicates-tree", - "wait-timeout", + "anstyle", + "once_cell", + "windows-sys 0.59.0", ] [[package]] -name = "assert_matches" -version = "1.5.0" +name = "anyhow" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] -name = "async-channel" -version = "1.6.1" +name = "aquamarine" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "a941c39708478e8eea39243b5983f1c42d2717b3620ee91f4a52115fd02ac43f" dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", + "itertools 0.9.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "async-executor" -version = "1.4.0" +name = "arrayref" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", - "vec-arena", -] +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] -name = "async-global-executor" -version = "2.0.2" +name = "arrayvec" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-mutex", - "blocking", - "futures-lite", - "num_cpus", - "once_cell", + "nodrop", ] [[package]] -name = "async-io" -version = "1.3.1" +name = "arrayvec" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" -dependencies = [ - "concurrent-queue", - "fastrand", - "futures-lite", - "libc", - "log 0.4.14", - "nb-connect", - "once_cell", - "parking", - "polling", - "vec-arena", - "waker-fn", - "winapi 0.3.9", -] +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] -name = "async-lock" -version = "2.3.0" +name = "assert-json-diff" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" dependencies = [ - "event-listener", + "serde", + "serde_json", ] [[package]] -name = "async-mutex" -version = "1.4.0" +name = "assert_matches" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" -dependencies = [ - "event-listener", -] +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" [[package]] -name = "async-std" -version = "1.9.0" +name = "async-compression" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" +checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "crossbeam-utils 0.8.3", - "futures-channel", + "brotli", + "flate2", "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log 0.4.14", "memchr", - "num_cpus", - "once_cell", - "pin-project-lite 0.2.6", - "pin-utils", - "slab", - "wasm-bindgen-futures", + "pin-project-lite", + "tokio", ] -[[package]] -name = "async-task" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" - [[package]] name = "async-trait" -version = "0.1.48" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "asynchronous-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" -dependencies = [ - "bytes 1.0.1", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite 0.2.6", -] - -[[package]] -name = "atomic" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" -dependencies = [ - "autocfg", -] - -[[package]] -name = "atomic-shim" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d20fdac7156779a1a30d970e838195558b4810dd06aa69e7c7461bdc518edf9b" -dependencies = [ - "crossbeam", + "syn 2.0.101", ] -[[package]] -name = "atomic-waker" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" - [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -388,32 +284,25 @@ dependencies = [ "casper-types", ] -[[package]] -name = "authorized-keys" -version = "0.1.0" -dependencies = [ - "casper-contract", - "casper-types", -] - [[package]] name = "autocfg" -version = "1.0.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backtrace" -version = "0.3.56" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object", + "object 0.36.7", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -422,306 +311,464 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d27c3610c36aee21ce8ac510e6224498de4228ad772a171ed65643a24693a5a8" +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bincode" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "byteorder", "serde", ] +[[package]] +name = "bindgen" +version = "0.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +dependencies = [ + "bitflags 2.9.1", + "cexpr", + "clang-sys", + "itertools 0.13.0", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.101", +] + [[package]] name = "bit-set" -version = "0.5.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" -version = "0.5.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f67931368edf3a9a51d29886d245f1c3db2f1ef0dcc9e35ff70341b78c10d23" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "1.2.1" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] -name = "bitvec" -version = "0.18.5" +name = "blake2" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98fcd36dda4e17b7d7abc64cb549bf0201f4ab71e00700c798ca7e62ed3761fa" +checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" dependencies = [ - "funty", - "radium", - "wyz", + "crypto-mac", + "digest 0.9.0", + "opaque-debug", ] [[package]] name = "blake2" -version = "0.9.1" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", + "digest 0.10.7", +] + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq 0.1.5", ] [[package]] name = "blake2b" -version = "0.1.0" +version = "0.8.0" dependencies = [ "casper-contract", "casper-types", ] [[package]] -name = "blake2b_simd" -version = "0.5.11" +name = "blake3" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" dependencies = [ "arrayref", - "arrayvec", - "constant_time_eq", + "arrayvec 0.7.6", + "cc", + "cfg-if 1.0.0", + "constant_time_eq 0.3.1", ] [[package]] -name = "blake2s_simd" -version = "0.5.11" +name = "block-buffer" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "arrayref", - "arrayvec", - "constant_time_eq", + "generic-array", ] [[package]] -name = "blake3" -version = "0.3.7" +name = "bnum" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f" +checksum = "119771309b95163ec7aaf79810da82f7cd0599c19722d48b9c03894dca833966" dependencies = [ - "arrayref", - "arrayvec", - "cc", - "cfg-if 0.1.10", - "constant_time_eq", - "crypto-mac 0.8.0", - "digest 0.9.0", + "borsh", + "num-integer", + "num-traits", ] [[package]] -name = "block-buffer" -version = "0.7.3" +name = "borsh" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", + "borsh-derive", + "cfg_aliases", ] [[package]] -name = "block-buffer" -version = "0.9.0" +name = "borsh-derive" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.4", + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "block-cipher" -version = "0.8.0" +name = "brotli" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" +checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" dependencies = [ - "generic-array 0.14.4", + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "brotli-decompressor" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ - "byte-tools", + "alloc-no-stdlib", + "alloc-stdlib", ] [[package]] -name = "block-padding" -version = "0.2.1" +name = "bumpalo" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] -name = "blocking" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" +name = "burn" +version = "0.1.0" dependencies = [ - "async-channel", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", - "once_cell", + "casper-contract", + "casper-types", ] [[package]] -name = "bs58" -version = "0.4.0" +name = "bytecheck" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" +dependencies = [ + "bytecheck_derive 0.6.12", + "ptr_meta 0.1.4", + "simdutf8", +] [[package]] -name = "bstr" -version = "0.2.15" +name = "bytecheck" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +checksum = "50690fb3370fb9fe3550372746084c46f2ac8c9685c583d2be10eefd89d3d1a3" dependencies = [ - "lazy_static", - "memchr", - "regex-automata", - "serde", + "bytecheck_derive 0.8.1", + "ptr_meta 0.3.0", + "rancor", + "simdutf8", ] [[package]] -name = "buf_redux" -version = "0.8.4" +name = "bytecheck_derive" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "memchr", - "safemem", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "bumpalo" -version = "3.6.1" +name = "bytecheck_derive" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "efb7846e0cb180355c2dec69e721edafa36919850f1a9f52ffba4ebc0393cb71" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] [[package]] -name = "byte-tools" -version = "0.3.1" +name = "bytemuck" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" [[package]] name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "bytes" -version = "0.5.6" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.0.1" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] -name = "cache-padded" -version = "1.1.1" +name = "call-contract" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "call-package-version-by-hash" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "camino" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +dependencies = [ + "serde", +] [[package]] -name = "cargo-casper" -version = "1.0.0" +name = "cancel-reservations" +version = "0.1.0" dependencies = [ - "assert_cmd", - "clap", - "colour", - "once_cell", - "tempfile", - "toml", + "casper-contract", + "casper-types", ] [[package]] -name = "casper-client" -version = "1.0.0" +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" dependencies = [ - "anyhow", - "base64", - "casper-execution-engine", - "casper-node", + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror 1.0.69", +] + +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "casper-binary-port" +version = "1.1.1" +dependencies = [ + "bincode", + "bytes", "casper-types", - "cbindgen", - "clap", - "futures", - "hex", - "humantime", - "hyper 0.14.5", - "jsonrpc-lite", + "num-derive", + "num-traits", "once_cell", - "rand 0.8.3", - "reqwest 0.11.2", - "semver 0.11.0", + "rand", "serde", "serde_json", - "tempfile", - "thiserror", - "tokio 1.4.0", - "tower", - "warp", - "warp-json-rpc", + "serde_test", + "strum 0.27.1", + "strum_macros 0.27.1", + "thiserror 1.0.69", + "tokio-util 0.6.10", + "tracing", ] [[package]] name = "casper-contract" -version = "1.0.0" +version = "5.1.1" dependencies = [ "casper-types", "hex_fmt", - "thiserror", "version-sync", "wee_alloc", ] +[[package]] +name = "casper-contract-macros" +version = "0.1.3" +dependencies = [ + "blake2-rfc", + "casper-contract-sdk-sys", + "casper-executor-wasm-common", + "darling", + "paste", + "proc-macro2", + "quote", + "static_assertions", + "syn 2.0.101", +] + +[[package]] +name = "casper-contract-sdk" +version = "0.1.3" +dependencies = [ + "base16", + "bitflags 2.9.1", + "bnum", + "borsh", + "bytes", + "casper-contract-macros", + "casper-contract-sdk-sys", + "casper-executor-wasm-common", + "cfg-if 1.0.0", + "clap 4.5.38", + "const-fnv1a-hash", + "impl-trait-for-tuples", + "linkme", + "once_cell", + "rand", + "serde", + "serde_json", + "thiserror 2.0.12", +] + +[[package]] +name = "casper-contract-sdk-codegen" +version = "0.1.3" +dependencies = [ + "borsh", + "casper-contract-sdk", + "codegen", + "indexmap 2.9.0", + "serde", + "serde_json", + "syn 2.0.101", + "tempfile", + "trybuild", +] + +[[package]] +name = "casper-contract-sdk-sys" +version = "0.1.3" + [[package]] name = "casper-engine-test-support" -version = "1.0.0" +version = "8.1.1" dependencies = [ - "casper-contract", + "blake2 0.9.2", "casper-execution-engine", + "casper-storage", "casper-types", - "lmdb", - "log 0.4.14", - "num-rational 0.4.0", + "env_logger", + "filesize", + "humantime", + "lmdb-rkv", + "log", + "num-rational", "num-traits", "once_cell", - "rand 0.8.3", + "rand", + "serde", + "tempfile", + "toml 0.5.11", + "toml_edit 0.21.0", "version-sync", ] @@ -731,340 +778,663 @@ version = "0.1.0" dependencies = [ "assert_matches", "base16", - "casper-contract", "casper-engine-test-support", "casper-execution-engine", + "casper-storage", "casper-types", - "clap", + "casper-wasm", + "clap 2.34.0", "criterion", - "crossbeam-channel 0.5.0", - "env_logger", - "log 0.4.14", - "num-rational 0.4.0", + "dictionary", + "dictionary-call", + "ed25519-dalek", + "fs_extra", + "get-call-stack-recursive-subcall", + "gh-1470-regression", + "gh-1470-regression-call", + "lmdb-rkv", + "log", + "num-rational", "num-traits", "once_cell", - "parity-wasm", - "rand 0.8.3", + "rand", + "regex", + "serde", "serde_json", "tempfile", + "walrus", + "wasmprinter", + "wat", ] [[package]] name = "casper-execution-engine" -version = "1.0.0" +version = "8.1.1" dependencies = [ "anyhow", "assert_matches", "base16", "bincode", - "blake2", + "blake2 0.10.6", + "blake3", + "casper-storage", "casper-types", - "chrono", + "casper-wasm", + "casper-wasm-utils", + "casper-wasmi", + "clap 4.5.38", "criterion", "datasize", - "hex", - "hex-buffer-serde", + "either", + "hex-buffer-serde 0.2.2", "hex_fmt", "hostname", - "itertools 0.10.0", - "libc", + "humantime", + "itertools 0.10.5", "linked-hash-map", - "lmdb", - "log 0.4.14", + "log", "num", "num-derive", - "num-rational 0.4.0", + "num-rational", "num-traits", + "num_cpus", "once_cell", - "parity-wasm", "proptest", - "pwasm-utils", - "rand 0.8.3", - "rand_chacha 0.3.0", + "rand", + "rand_chacha", "schemars", "serde", "serde_bytes", "serde_json", + "sha2", + "strum 0.24.1", "tempfile", - "thiserror", + "thiserror 1.0.69", + "toml 0.8.22", "tracing", "uint", - "uuid", - "wasmi", + "walrus", + "wat", +] + +[[package]] +name = "casper-executor-wasm" +version = "0.1.3" +dependencies = [ + "base16", + "blake2 0.10.6", + "borsh", + "bytes", + "casper-execution-engine", + "casper-executor-wasm-common", + "casper-executor-wasm-host", + "casper-executor-wasm-interface", + "casper-executor-wasmer-backend", + "casper-storage", + "casper-types", + "digest 0.10.7", + "fs_extra", + "itertools 0.14.0", + "once_cell", + "parking_lot", + "serde_json", + "tempfile", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "casper-executor-wasm-common" +version = "0.1.3" +dependencies = [ + "bitflags 2.9.1", + "blake2 0.10.6", + "borsh", + "casper-contract-sdk-sys", + "hex", + "num-derive", + "num-traits", + "safe-transmute", + "serde", + "thiserror 2.0.12", +] + +[[package]] +name = "casper-executor-wasm-host" +version = "0.1.3" +dependencies = [ + "base16", + "bytes", + "casper-executor-wasm-common", + "casper-executor-wasm-interface", + "casper-storage", + "casper-types", + "either", + "num-derive", + "num-traits", + "parking_lot", + "safe-transmute", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "casper-executor-wasm-interface" +version = "0.1.3" +dependencies = [ + "borsh", + "bytes", + "casper-executor-wasm-common", + "casper-storage", + "casper-types", + "parking_lot", + "thiserror 2.0.12", +] + +[[package]] +name = "casper-executor-wasmer-backend" +version = "0.1.3" +dependencies = [ + "bytes", + "casper-contract-sdk-sys", + "casper-executor-wasm-common", + "casper-executor-wasm-host", + "casper-executor-wasm-interface", + "casper-storage", + "casper-types", + "regex", + "tracing", + "wasmer", + "wasmer-compiler-singlepass", + "wasmer-middlewares", + "wasmer-types", + "wat", ] [[package]] name = "casper-node" -version = "1.0.0" +version = "2.0.4" dependencies = [ - "ansi_term 0.12.1", + "ansi_term", "anyhow", + "aquamarine", + "assert-json-diff", "assert_matches", + "async-trait", "backtrace", "base16", - "base64", + "base64 0.13.1", "bincode", - "blake2", + "bytes", + "casper-binary-port", "casper-execution-engine", - "casper-node-macros", + "casper-executor-wasm", + "casper-executor-wasm-interface", + "casper-storage", "casper-types", - "chrono", "datasize", - "derive_more", - "derp", - "ed25519-dalek", + "derive_more 0.99.20", "either", - "enum-iterator", + "enum-iterator 0.6.0", + "erased-serde", "fake_instant", "fs2", + "fs_extra", "futures", "futures-io", - "getrandom 0.2.2", - "hex", - "hex-buffer-serde", + "hex-buffer-serde 0.3.0", "hex_fmt", "hostname", - "http", + "http 0.2.12", "humantime", - "hyper 0.14.5", - "itertools 0.10.0", - "jemalloc-ctl", - "jemallocator", - "k256", + "hyper", + "itertools 0.10.5", "libc", - "libp2p", "linked-hash-map", - "lmdb", - "log 0.4.14", - "multihash", + "lmdb-rkv", + "log", + "mio 0.8.11", "num", "num-derive", - "num-rational 0.4.0", + "num-rational", "num-traits", "num_cpus", "once_cell", "openssl", - "parking_lot", - "pem", + "pin-project", "pnet", + "pretty_assertions", "prometheus", "proptest", + "proptest-derive", "quanta", - "rand 0.8.3", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_pcg", + "rand", + "rand_chacha", + "rand_core", "regex", - "reqwest 0.10.10", + "reqwest", "rmp-serde", "schemars", - "sd-notify", "serde", "serde-big-array", + "serde-map-to-array", "serde_bytes", "serde_json", "serde_repr", - "signal-hook 0.3.8", - "signature", + "shlex", + "signal-hook", + "signature 1.6.4", "smallvec", "static_assertions", + "stats_alloc", "structopt", + "strum 0.24.1", "sys-info", "tempfile", - "thiserror", - "tokio 1.4.0", + "thiserror 1.0.69", + "tokio", "tokio-openssl", "tokio-serde", "tokio-stream", - "tokio-util 0.6.5", - "toml", + "tokio-util 0.6.10", + "toml 0.8.22", "tower", "tracing", "tracing-futures", "tracing-subscriber", "uint", - "untrusted", - "uuid", - "vergen", + "uuid 0.8.2", "warp", - "warp-json-rpc", "wheelbuf", ] [[package]] -name = "casper-node-macros" -version = "1.0.0" +name = "casper-storage" +version = "2.1.1" dependencies = [ - "Inflector", - "indexmap", - "proc-macro2", - "quote", - "syn", + "anyhow", + "assert_matches", + "base16", + "bincode", + "casper-types", + "criterion", + "datasize", + "either", + "itertools 0.10.5", + "linked-hash-map", + "lmdb-rkv", + "num", + "num-derive", + "num-rational", + "num-traits", + "once_cell", + "parking_lot", + "pprof", + "proptest", + "rand", + "rand_chacha", + "serde", + "serde_json", + "tempfile", + "thiserror 1.0.69", + "tracing", + "uuid 0.8.2", ] [[package]] name = "casper-types" -version = "1.0.0" +version = "6.0.1" dependencies = [ "base16", - "base64", + "base64 0.13.1", "bincode", - "bitflags 1.2.1", - "blake2", + "bitflags 1.3.2", + "blake2 0.9.2", "criterion", "datasize", - "displaydoc", + "derive_more 0.99.20", + "derp", "ed25519-dalek", - "getrandom 0.2.2", + "getrandom 0.2.16", "hex", "hex_fmt", + "humantime", + "itertools 0.10.5", "k256", + "libc", + "num", "num-derive", "num-integer", - "num-rational 0.4.0", + "num-rational", "num-traits", "once_cell", + "openssl", + "pem", "proptest", - "rand 0.8.3", + "proptest-attr-macro", + "proptest-derive", + "rand", + "rand_pcg", "schemars", "serde", + "serde-map-to-array", + "serde_bytes", "serde_json", "serde_test", - "thiserror", + "strum 0.27.1", + "tempfile", + "thiserror 1.0.69", + "tracing", "uint", + "untrusted 0.7.1", + "url", "version-sync", ] [[package]] name = "casper-updater" -version = "0.2.0" +version = "0.4.0" dependencies = [ - "casper-types", - "clap", + "clap 4.5.38", "once_cell", "regex", + "semver", ] [[package]] -name = "cast" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" -dependencies = [ - "rustc_version 0.2.3", -] - -[[package]] -name = "cbindgen" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97449daf9b8c245bcad10bbc7c9f4a37c06172c18dd5f9fac340deefc309b957" +name = "casper-validation" +version = "0.1.0" dependencies = [ - "clap", - "heck", - "indexmap", - "log 0.4.14", - "proc-macro2", - "quote", + "anyhow", + "base16", + "casper-types", + "clap 3.2.25", + "derive_more 0.99.20", + "hex", "serde", "serde_json", - "syn", - "tempfile", - "toml", + "thiserror 1.0.69", ] [[package]] -name = "cc" -version = "1.0.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" +name = "casper-wasm" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "ae5f83854f18c7ac13e6a26899807ea842929ada0735071d1271fe182668c907" [[package]] -name = "chacha20" -version = "0.5.0" +name = "casper-wasm-utils" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" +checksum = "529aa2929d3df679a1a4d471f36478ba266298647245c24b451ba8a507122fdc" dependencies = [ - "stream-cipher", - "zeroize", + "byteorder", + "casper-wasm", + "log", ] [[package]] -name = "chacha20poly1305" -version = "0.6.0" +name = "casper-wasmi" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" +checksum = "52b097d82c4765906b58ae22a9349ac2c5b3f8da0b8a9cb8fb0b9e932b65196b" dependencies = [ - "aead", - "chacha20", - "poly1305", - "stream-cipher", - "zeroize", + "casper-wasm", + "casper-wasmi-core", + "casper-wasmi-validation", ] [[package]] -name = "chrono" -version = "0.4.19" +name = "casper-wasmi-core" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "077494ae45ba9d6df93da1bc3e6e686c6f4cf8ea658f2a1152980edb2c0c60bd" dependencies = [ - "libc", - "num-integer", + "downcast-rs", + "libm", + "memory_units", + "num-rational", "num-traits", - "time", - "winapi 0.3.9", ] [[package]] -name = "clap" -version = "2.33.3" +name = "casper-wasmi-validation" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "688a46c29a4e861e42a3e849c1df494b7e83255537b891739181753ee9786c97" dependencies = [ - "ansi_term 0.11.0", - "atty", - "bitflags 1.2.1", - "strsim", - "textwrap", - "unicode-width", - "vec_map", + "casper-wasm", ] [[package]] -name = "colour" -version = "0.6.0" +name = "cast" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27e4532f26f510c24bb8477d963c0c3ef27e293c3b2c507cccb0536d493201a" -dependencies = [ - "crossterm", +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f4ac86a9e5bc1e2b3449ab9d7d3a6a405e3d1bb28d7b9be8614f55846ae3766" +dependencies = [ + "shlex", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "change_bid_public_key" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob 0.3.2", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags 1.3.2", + "strsim 0.8.0", + "textwrap 0.11.0", + "unicode-width 0.1.14", + "vec_map", +] + +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_derive 3.2.25", + "clap_lex 0.2.4", + "indexmap 1.9.3", + "once_cell", + "strsim 0.10.0", + "termcolor", + "textwrap 0.16.2", +] + +[[package]] +name = "clap" +version = "4.5.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +dependencies = [ + "clap_builder", + "clap_derive 4.5.32", +] + +[[package]] +name = "clap-cargo" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b2ea69cefa96b848b73ad516ad1d59a195cdf9263087d977f648a818c8b43e" +dependencies = [ + "anstyle", + "cargo_metadata 0.18.1", + "clap 4.5.38", +] + +[[package]] +name = "clap_builder" +version = "4.5.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +dependencies = [ + "anstream", + "anstyle", + "clap_lex 0.7.4", + "strsim 0.11.1", + "terminal_size", +] + +[[package]] +name = "clap_derive" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "clap_derive" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", ] [[package]] -name = "concurrent-queue" -version = "1.2.2" +name = "codegen" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "ff61280aed771c3070e7dcc9e050c66f1eb1e3b96431ba66f9f74641d02fc41d" dependencies = [ - "cache-padded", + "indexmap 1.9.3", ] +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "const-fnv1a-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b13ea120a812beba79e34316b3942a857c86ec1593cb34f27bb28272ce2cca" + [[package]] name = "const-oid" -version = "0.4.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f6b64db6932c7e49332728e3a6bd82c6b7e16016607d20923b537c3bc4c0d5f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "constant_time_eq" @@ -1072,6 +1442,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + [[package]] name = "contract-context" version = "0.1.0" @@ -1080,17 +1456,66 @@ dependencies = [ "casper-types", ] +[[package]] +name = "contract-funds" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "contract-funds-call" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "contract-messages-emitter" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "contract-messages-from-account" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "contract-messages-upgrader" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "convert_case" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +[[package]] +name = "convert_case" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1098,12 +1523,25 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "corosensei" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad067b451c08956709f8762dba86e049c124ea52858e3ab8d076ba2892caa437" +dependencies = [ + "autocfg", + "cfg-if 1.0.0", + "libc", + "scopeguard", + "windows-sys 0.59.0", +] [[package]] -name = "counter-define" +name = "counter-factory" version = "0.1.0" dependencies = [ "casper-contract", @@ -1111,22 +1549,36 @@ dependencies = [ ] [[package]] -name = "cpuid-bool" -version = "0.1.2" +name = "counter-installer" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "cpp_demangle" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" +checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" +dependencies = [ + "cfg-if 1.0.0", +] [[package]] -name = "cpuid-bool" -version = "0.2.0" +name = "cpufeatures" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if 1.0.0", ] @@ -1187,24 +1639,24 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab327ed7354547cc2ef43cbe20ef68b988e70b4b593cbd66a2a61733123a3d23" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" dependencies = [ - "atty", + "anes", "cast", - "clap", + "ciborium", + "clap 4.5.38", "criterion-plot", - "csv", - "itertools 0.10.0", - "lazy_static", + "is-terminal", + "itertools 0.10.5", "num-traits", + "once_cell", "oorandom", "plotters", "rayon", "regex", "serde", - "serde_cbor", "serde_derive", "serde_json", "tinytemplate", @@ -1213,280 +1665,301 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools 0.9.0", + "itertools 0.10.5", ] [[package]] -name = "crossbeam" -version = "0.7.3" +name = "crossbeam-deque" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.3", - "crossbeam-epoch 0.8.2", - "crossbeam-queue", - "crossbeam-utils 0.7.2", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] -name = "crossbeam-channel" -version = "0.4.4" +name = "crossbeam-epoch" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] -name = "crossbeam-channel" -version = "0.5.0" +name = "crossbeam-queue" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.3", + "crossbeam-utils", ] [[package]] -name = "crossbeam-deque" -version = "0.7.3" +name = "crossbeam-utils" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] -name = "crossbeam-deque" -version = "0.8.0" +name = "crossterm" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.3", - "crossbeam-utils 0.8.3", + "bitflags 2.9.1", + "crossterm_winapi", + "derive_more 2.0.1", + "document-features", + "mio 1.0.3", + "parking_lot", + "rustix", + "signal-hook", + "signal-hook-mio", + "winapi", ] [[package]] -name = "crossbeam-epoch" -version = "0.8.2" +name = "crossterm_winapi" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", + "winapi", ] [[package]] -name = "crossbeam-epoch" -version = "0.9.3" +name = "crunchy" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.3", - "lazy_static", - "memoffset 0.6.3", - "scopeguard", + "generic-array", + "rand_core", + "subtle", + "zeroize", ] [[package]] -name = "crossbeam-queue" -version = "0.2.3" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "generic-array", + "typenum", ] [[package]] -name = "crossbeam-utils" -version = "0.7.2" +name = "crypto-mac" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "generic-array", + "subtle", ] [[package]] -name = "crossbeam-utils" -version = "0.8.3" +name = "ctor" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "lazy_static", + "quote", + "syn 1.0.109", ] [[package]] -name = "crossterm" -version = "0.19.0" +name = "curve25519-dalek" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c36c10130df424b2f3552fcc2ddcd9b28a27b1e54b358b45874f88d1ca6888c" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "bitflags 1.2.1", - "crossterm_winapi", - "lazy_static", - "libc", - "mio 0.7.11", - "parking_lot", - "signal-hook 0.1.17", - "winapi 0.3.9", + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", ] [[package]] -name = "crossterm_winapi" -version = "0.7.0" +name = "curve25519-dalek-derive" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da8964ace4d3e4a044fd027919b2237000b24315a37c916f61809f1ff2140b9" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "winapi 0.3.9", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "crunchy" -version = "0.2.2" +name = "darling" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core", + "darling_macro", +] [[package]] -name = "crypto-mac" -version = "0.7.0" +name = "darling_core" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ - "generic-array 0.12.4", - "subtle 1.0.0", + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.101", ] [[package]] -name = "crypto-mac" -version = "0.8.0" +name = "darling_macro" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "darling_core", + "quote", + "syn 2.0.101", ] [[package]] -name = "crypto-mac" -version = "0.10.0" +name = "dashmap" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4857fd85a0c34b3c3297875b747c1e02e06b6a0ea32dd892d8192b9ce0813ea6" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "cfg-if 1.0.0", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", ] [[package]] -name = "csv" -version = "1.1.6" +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "datasize" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22813a6dc45b335f9bade10bf7271dc477e81113e89eb251a0bc2a8a81c536e1" +checksum = "e65c07d59e45d77a8bda53458c24a828893a99ac6cdd9c84111e09176ab739a2" dependencies = [ - "bstr", - "csv-core", - "itoa", - "ryu", + "datasize_derive", + "fake_instant", + "futures", "serde", + "smallvec", ] [[package]] -name = "csv-core" -version = "0.1.10" +name = "datasize_derive" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +checksum = "613e4ee15899913285b7612004bbd490abd605be7b11d35afada5902fb6b91d5" dependencies = [ - "memchr", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "ctor" -version = "0.1.20" +name = "debugid" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e98e2ad1a782e33928b96fc3948e7c355e5af34ba4de7670fe8bac2a3b2006d" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ - "quote", - "syn", + "uuid 1.16.0", ] [[package]] -name = "cuckoofilter" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +name = "delegate" +version = "0.1.0" dependencies = [ - "byteorder", - "fnv", - "rand 0.7.3", + "casper-contract", + "casper-types", ] [[package]] -name = "curve25519-dalek" -version = "3.0.2" +name = "der" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.4.0", + "const-oid", "zeroize", ] [[package]] -name = "data-encoding" -version = "2.3.2" +name = "derive_more" +version = "0.99.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" +dependencies = [ + "convert_case 0.4.0", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.101", +] [[package]] -name = "datasize" -version = "0.2.9" +name = "derive_more" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cfa50a16bc31c1e8d1682876a26aa205e6669ac65645ae484064cbbc5263abc" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ - "datasize_derive", - "fake_instant", - "futures", - "serde", - "smallvec", + "derive_more-impl", ] [[package]] -name = "datasize_derive" -version = "0.2.9" +name = "derive_more-impl" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ebcbe9ac751b6e1700a10201b44ae32fa36396b46849fdb4f7ec5fb86326de3" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ + "convert_case 0.7.1", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] -name = "delegate" +name = "derp" +version = "0.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9b84cfd9b6fa437e498215e5625e9e3ae3bf9bb54d623028a181c40820db169" +dependencies = [ + "untrusted 0.7.1", +] + +[[package]] +name = "deserialize-error" version = "0.1.0" dependencies = [ "casper-contract", @@ -1494,37 +1967,32 @@ dependencies = [ ] [[package]] -name = "der" +name = "dictionary" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f59c66c30bb7445c8320a5f9233e437e3572368099f25532a59054328899b4" dependencies = [ - "const-oid", + "casper-contract", + "casper-types", ] [[package]] -name = "derive_more" -version = "0.99.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b1b72f1263f214c0f823371768776c4f5841b942c9883aa8e5ec584fd0ba6" +name = "dictionary-call" +version = "0.1.0" dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "syn", + "casper-contract", + "casper-types", + "dictionary", ] [[package]] -name = "derp" -version = "0.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9b84cfd9b6fa437e498215e5625e9e3ae3bf9bb54d623028a181c40820db169" +name = "dictionary-item-key-length" +version = "0.1.0" dependencies = [ - "untrusted", + "casper-contract", + "casper-types", ] [[package]] -name = "deserialize-error" +name = "dictionary-read" version = "0.1.0" dependencies = [ "casper-contract", @@ -1532,48 +2000,57 @@ dependencies = [ ] [[package]] -name = "difference" -version = "2.0.0" +name = "diff" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "digest" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.12.4", + "generic-array", ] [[package]] name = "digest" -version = "0.9.0" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ - "generic-array 0.14.4", + "block-buffer", + "const-oid", + "crypto-common", + "subtle", ] [[package]] -name = "displaydoc" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc2ab4d5a16117f9029e9a6b5e4e79f4c67f6519bc134210d4d4a04ba31f41b" +name = "disable-contract" +version = "0.1.0" dependencies = [ - "proc-macro2", - "quote", - "syn", + "casper-contract", + "casper-types", ] [[package]] -name = "dns-parser" -version = "0.8.0" +name = "disable-contract-by-contract-hash" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ - "byteorder", - "quick-error 1.2.3", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] @@ -1609,69 +2086,99 @@ dependencies = [ ] [[package]] -name = "doc-comment" -version = "0.3.3" +name = "document-features" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" +dependencies = [ + "litrs", +] [[package]] name = "downcast-rs" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" [[package]] name = "dyn-clone" -version = "1.0.4" +version = "1.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" + +[[package]] +name = "dynasm" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add9a102807b524ec050363f09e06f1504214b0e1c7797f64261c891022dce8b" +dependencies = [ + "bitflags 1.3.2", + "byteorder", + "lazy_static", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "dynasmrt" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" +checksum = "64fba5a42bd76a17cad4bfa00de168ee1cbfa06a5e8ce992ae880218c05641a9" +dependencies = [ + "byteorder", + "dynasm", + "memmap2 0.5.10", +] [[package]] name = "ecdsa" -version = "0.10.2" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fbdb4ff710acb4db8ca29f93b897529ea6d6a45626d5183b47e012aa6ae7e4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ + "der", + "digest 0.10.7", "elliptic-curve", - "hmac 0.10.1", - "signature", + "rfc6979", + "signature 2.2.0", ] [[package]] name = "ed25519" -version = "1.0.3" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "serde", - "signature", + "pkcs8", + "signature 2.2.0", ] [[package]] name = "ed25519-dalek" -version = "1.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand 0.7.3", "serde", - "serde_bytes", - "sha2 0.9.3", + "sha2", + "subtle", "zeroize", ] [[package]] name = "educe" -version = "0.4.14" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ed56329d95e524ef98177ad672881bdfe7f22f254eb6ae80deb6fdd2ab20c4" +checksum = "0f0042ff8246a363dbe77d2ceedb073339e85a804b9a47636c6e016a9a32c05f" dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -1690,6 +2197,22 @@ dependencies = [ "casper-types", ] +[[package]] +name = "ee-1217-regression" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "ee-1225-regression" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "ee-221-regression" version = "0.1.0" @@ -1839,33 +2362,44 @@ version = "0.1.0" [[package]] name = "either" -version = "1.6.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +dependencies = [ + "serde", +] [[package]] name = "elliptic-curve" -version = "0.8.5" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2db227e61a43a34915680bdda462ec0e212095518020a88a1f91acd16092c39" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "bitvec", - "digest 0.9.0", + "base16ct", + "crypto-bigint", + "digest 0.10.7", "ff", - "funty", - "generic-array 0.14.4", + "generic-array", "group", - "pkcs8", - "rand_core 0.5.1", - "subtle 2.4.0", + "rand_core", + "sec1", + "subtle", "zeroize", ] +[[package]] +name = "enable-contract" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if 1.0.0", ] @@ -1878,13 +2412,30 @@ dependencies = [ "casper-types", ] +[[package]] +name = "endless-loop-with-effects" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "enum-iterator" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c79a6321a1197d7730510c7e3f6cb80432dfefecb32426de8cea0aa19b4bb8d7" dependencies = [ - "enum-iterator-derive", + "enum-iterator-derive 0.6.0", +] + +[[package]] +name = "enum-iterator" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4eeac5c5edb79e4e39fe8439ef35207780a11f69c52cbe424ce3dfad4cb78de6" +dependencies = [ + "enum-iterator-derive 0.7.0", ] [[package]] @@ -1895,49 +2446,111 @@ checksum = "1e94aa31f7c0dc764f57896dc615ddd76fc13b0d5dca7eb6cc5e018a5a09ec06" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] -name = "enum-ordinalize" -version = "3.1.9" +name = "enum-iterator-derive" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d52ff39419d3e16961ecfb9e32f5042bdaacf9a4cc553d2d688057117bae49b" +checksum = "c134c37760b27a871ba422106eedbb8247da973a09e82558bf26d619c882b159" dependencies = [ - "num-bigint 0.3.2", - "num-traits", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] -name = "env_logger" -version = "0.8.3" +name = "enum-ordinalize" +version = "3.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" dependencies = [ - "atty", - "humantime", - "log 0.4.14", - "regex", - "termcolor", + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "erased-serde" -version = "0.3.13" +name = "enumset" +version = "1.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11a6b7c3d347de0a9f7bfd2f853be43fe32fa6fac30c70f6d6d67a1e936b87ee" +dependencies = [ + "enumset_derive", +] + +[[package]] +name = "enumset_derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6da3ea9e1d1a3b1593e15781f930120e72aa7501610b2f82e5b6739c72e8eac5" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "erased-serde" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ "serde", ] [[package]] -name = "event-listener" -version = "2.5.1" +name = "errno" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] [[package]] name = "expensive-calculation" @@ -1947,26 +2560,29 @@ dependencies = [ "casper-types", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fake_instant" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3006df2e7bf21592b4983931164020b02f54eefdc1e35b2f70147858cc1e20ad" +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + [[package]] name = "fastrand" -version = "1.4.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" -dependencies = [ - "instant", -] +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "faucet" @@ -1987,13 +2603,39 @@ dependencies = [ [[package]] name = "ff" -version = "0.8.0" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +dependencies = [ + "rand_core", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "filesize" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12d741e2415d4e2e5bd1c1d00409d1a8865a57892c2d689b504365655d237d43" +dependencies = [ + "winapi", +] + +[[package]] +name = "filetime" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01646e077d4ebda82b73f1bca002ea1e91561a77df2431a9e79729bcc31950ef" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ - "bitvec", - "rand_core 0.5.1", - "subtle 2.4.0", + "cfg-if 1.0.0", + "libc", + "libredox", + "windows-sys 0.59.0", ] [[package]] @@ -2005,20 +2647,24 @@ dependencies = [ ] [[package]] -name = "fixedbitset" -version = "0.2.0" +name = "findshlibs" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] [[package]] name = "flate2" -version = "1.0.20" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", "miniz_oxide", ] @@ -2045,11 +2691,10 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ - "matches", "percent-encoding", ] @@ -2060,42 +2705,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] name = "fs_extra" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags 1.2.1", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - -[[package]] -name = "funty" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" [[package]] name = "futures" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2108,9 +2731,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2118,78 +2741,55 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", "futures-util", - "num_cpus", ] [[package]] name = "futures-io" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" - -[[package]] -name = "futures-lite" -version = "1.11.3" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite 0.2.6", - "waker-fn", -] +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-macro" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] name = "futures-sink" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.13" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2198,30 +2798,28 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.6", + "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "generic-array" -version = "0.12.4" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", + "version_check", + "zeroize", ] [[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +name = "generic-hash" +version = "0.1.0" dependencies = [ - "typenum", - "version_check", + "casper-contract", + "casper-types", ] [[package]] @@ -2232,6 +2830,14 @@ dependencies = [ "casper-types", ] +[[package]] +name = "get-blockinfo" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "get-blocktime" version = "0.1.0" @@ -2240,6 +2846,23 @@ dependencies = [ "casper-types", ] +[[package]] +name = "get-call-stack-call-recursive-subcall" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", + "get-call-stack-recursive-subcall", +] + +[[package]] +name = "get-call-stack-recursive-subcall" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + [[package]] name = "get-caller" version = "0.1.0" @@ -2282,74 +2905,56 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", - "wasi 0.9.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.2.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", ] [[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +name = "gh-1470-regression" +version = "0.1.0" dependencies = [ - "opaque-debug 0.3.0", - "polyval", + "casper-contract", + "casper-types", ] [[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - -[[package]] -name = "glob" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" - -[[package]] -name = "gloo-timers" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" +name = "gh-1470-regression-call" +version = "0.1.0" dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", - "web-sys", + "casper-contract", + "casper-types", + "gh-1470-regression", ] [[package]] -name = "group" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc11f9f5fbf1943b48ae7c2bf6846e7d827a512d1be4f23af708f5ca5d01dde1" +name = "gh-1688-regression" +version = "0.1.0" dependencies = [ - "ff", - "rand_core 0.5.1", - "subtle 2.4.0", + "casper-contract", + "casper-types", ] [[package]] -name = "groups" +name = "gh-2280-regression" version = "0.1.0" dependencies = [ "casper-contract", @@ -2357,105 +2962,257 @@ dependencies = [ ] [[package]] -name = "h2" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +name = "gh-2280-regression-call" +version = "0.1.0" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 0.2.25", - "tokio-util 0.3.1", - "tracing", - "tracing-futures", + "casper-contract", + "casper-types", ] [[package]] -name = "h2" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +name = "gh-3097-regression" +version = "0.1.0" dependencies = [ - "bytes 1.0.1", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio 1.4.0", - "tokio-util 0.6.5", - "tracing", + "casper-contract", + "casper-types", ] [[package]] -name = "half" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" +name = "gh-3097-regression-call" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +name = "gh-4771-regression" +version = "0.1.0" dependencies = [ - "ahash", + "casper-contract", + "casper-types", ] [[package]] -name = "headers" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b7591fb62902706ae8e7aaff416b1b0fa2c0fd0878b46dc13baa3712d8a855" +name = "gh-4898-regression" +version = "0.1.0" dependencies = [ - "base64", - "bitflags 1.2.1", - "bytes 1.0.1", - "headers-core", - "http", - "mime", - "sha-1", - "time", + "casper-contract", + "casper-types", ] [[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +name = "gh-5058-regression" +version = "0.1.0" dependencies = [ - "http", + "casper-contract", + "casper-types", ] [[package]] -name = "heck" -version = "0.3.2" +name = "gimli" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ - "unicode-segmentation", + "fallible-iterator 0.2.0", + "indexmap 1.9.3", + "stable_deref_trait", ] [[package]] -name = "hermit-abi" -version = "0.1.18" +name = "gimli" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" dependencies = [ - "libc", + "fallible-iterator 0.3.0", + "indexmap 2.9.0", + "stable_deref_trait", ] [[package]] -name = "hex" -version = "0.4.3" +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "global-state-update-gen" +version = "0.3.0" +dependencies = [ + "base16", + "base64 0.13.1", + "casper-engine-test-support", + "casper-execution-engine", + "casper-storage", + "casper-types", + "clap 2.34.0", + "itertools 0.10.5", + "lmdb-rkv", + "rand", + "serde", + "toml 0.5.11", +] + +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + +[[package]] +name = "groups" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "h2" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.12", + "indexmap 2.9.0", + "slab", + "tokio", + "tokio-util 0.7.15", + "tracing", +] + +[[package]] +name = "half" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +dependencies = [ + "cfg-if 1.0.0", + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" + +[[package]] +name = "headers" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 0.2.12", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.12", +] + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hello-world" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" + +[[package]] +name = "hex" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" dependencies = [ @@ -2473,40 +3230,28 @@ dependencies = [ ] [[package]] -name = "hex_fmt" +name = "hex-buffer-serde" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +checksum = "08f52012c160668b4494727f3588045aa00429849fcae51de70d68fa98228039" dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", + "hex", + "serde", ] [[package]] -name = "hmac" -version = "0.10.1" +name = "hex_fmt" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" -dependencies = [ - "crypto-mac 0.10.0", - "digest 0.9.0", -] +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] -name = "hmac-drbg" -version = "0.2.0" +name = "hmac" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.8.1", - "generic-array 0.12.4", - "hmac 0.7.1", + "digest 0.10.7", ] [[package]] @@ -2523,7 +3268,7 @@ version = "0.1.0" dependencies = [ "casper-contract", "casper-types", - "rand 0.8.3", + "rand", ] [[package]] @@ -2534,736 +3279,501 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] name = "http" -version = "0.2.4" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.0.1", + "bytes", "fnv", "itoa", ] [[package]] -name = "http-body" -version = "0.3.1" +name = "http" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ - "bytes 0.5.6", - "http", + "bytes", + "fnv", + "itoa", ] [[package]] name = "http-body" -version = "0.4.1" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes 1.0.1", - "http", - "pin-project-lite 0.2.6", + "bytes", + "http 0.2.12", + "pin-project-lite", ] [[package]] name = "httparse" -version = "1.3.6" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc35c995b9d93ec174cf9a27d425c7892722101e14993cd227fdb51d70cf9589" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" -version = "0.3.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.13.10" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" -dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.7", - "http", - "http-body 0.3.1", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.6", - "socket2 0.3.19", - "tokio 0.2.25", - "tower-service", - "tracing", - "want", -] +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hyper" -version = "0.14.5" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf09f61b52cfcf4c00de50df88ae423d6c02354e385a86341133b5338630ad1" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ - "bytes 1.0.1", + "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.3.2", - "http", - "http-body 0.4.1", + "h2", + "http 0.2.12", + "http-body", "httparse", "httpdate", "itoa", - "pin-project 1.0.6", - "socket2 0.4.0", - "tokio 1.4.0", + "pin-project-lite", + "socket2", + "tokio", "tower-service", "tracing", "want", ] -[[package]] -name = "hyper-tls" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" -dependencies = [ - "bytes 0.5.6", - "hyper 0.13.10", - "native-tls", - "tokio 0.2.25", - "tokio-tls", -] - [[package]] name = "hyper-tls" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.0.1", - "hyper 0.14.5", + "bytes", + "hyper", "native-tls", - "tokio 1.4.0", + "tokio", "tokio-native-tls", ] [[package]] -name = "idna" -version = "0.2.2" +name = "icu_collections" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", ] [[package]] -name = "if-addrs" -version = "0.6.5" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ - "if-addrs-sys", - "libc", - "winapi 0.3.9", + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", ] [[package]] -name = "if-addrs-sys" -version = "0.3.2" +name = "icu_normalizer" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ - "cc", - "libc", + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", ] [[package]] -name = "if-watch" -version = "0.1.8" +name = "icu_normalizer_data" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" -dependencies = [ - "async-io", - "futures", - "futures-lite", - "if-addrs", - "ipnet", - "libc", - "log 0.4.14", - "winapi 0.3.9", -] +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] -name = "indexmap" -version = "1.6.2" +name = "icu_properties" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ - "autocfg", - "hashbrown", - "serde", + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "icu_properties_data" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" -dependencies = [ - "bytes 1.0.1", -] +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] -name = "instant" -version = "0.1.9" +name = "icu_provider" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ - "cfg-if 1.0.0", + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", ] [[package]] -name = "iovec" -version = "0.1.4" +name = "id-arena" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] +checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" [[package]] -name = "ipnet" -version = "2.3.0" +name = "ident_case" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] -name = "ipnetwork" -version = "0.17.0" +name = "idna" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c3eaab3ac0ede60ffa41add21970a7df7d91772c03383aac6c2c3d53cc716b" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "serde", + "idna_adapter", + "smallvec", + "utf8_iter", ] [[package]] -name = "itertools" -version = "0.9.0" +name = "idna_adapter" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ - "either", + "icu_normalizer", + "icu_properties", ] [[package]] -name = "itertools" -version = "0.10.0" +name = "impl-trait-for-tuples" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ - "either", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "itoa" -version = "0.4.7" +name = "impls" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "7a46645bbd70538861a90d0f26c31537cdf1e44aae99a794fb75a664b70951bc" [[package]] -name = "jemalloc-ctl" -version = "0.3.3" +name = "include_dir" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c502a5ff9dd2924f1ed32ba96e3b65735d837b4bfd978d3161b1702e66aca4b7" +checksum = "923d117408f1e49d914f1a379a309cffe4f18c05cf4e3d12e613a15fc81bd0dd" dependencies = [ - "jemalloc-sys", - "libc", - "paste", + "include_dir_macros", ] [[package]] -name = "jemalloc-sys" -version = "0.3.2" +name = "include_dir_macros" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d3b9f3f5c9b31aa0f5ed3260385ac205db665baa41d49bb8338008ae94ede45" +checksum = "7cab85a7ed0bd5f0e76d93846e0147172bed2e2d3f859bcc33a8d9699cad1a75" dependencies = [ - "cc", - "fs_extra", - "libc", + "proc-macro2", + "quote", ] [[package]] -name = "jemallocator" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43ae63fcfc45e99ab3d1b29a46782ad679e98436c3169d15a167a1108a724b69" +name = "increment-counter" +version = "1.0.0" dependencies = [ - "jemalloc-sys", - "libc", + "casper-contract", + "casper-types", ] [[package]] -name = "js-sys" -version = "0.3.50" +name = "indexmap" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "wasm-bindgen", + "autocfg", + "hashbrown 0.12.3", + "serde", ] [[package]] -name = "jsonrpc-lite" -version = "0.5.0" +name = "indexmap" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98d245f26984add78277a5306ca0cf774863d4eddb4912b31d94ee3fa1a22d4" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ - "serde", - "serde_derive", - "serde_json", + "equivalent", + "hashbrown 0.15.3", ] [[package]] -name = "k256" -version = "0.7.2" +name = "inferno" +version = "0.11.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf02ecc966e1b7e8db1c81ac8f321ba24d1cfab5b634961fab10111f015858e1" +checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sha2 0.9.3", + "ahash", + "indexmap 2.9.0", + "is-terminal", + "itoa", + "log", + "num-format", + "once_cell", + "quick-xml", + "rgb", + "str_stack", ] [[package]] -name = "keccak" -version = "0.1.0" +name = "ipnet" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] -name = "kernel32-sys" -version = "0.2.2" +name = "ipnetwork" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "key-management-thresholds" -version = "0.1.0" +checksum = "4088d739b183546b239688ddbc79891831df421773df95e236daf7867866d355" dependencies = [ - "casper-contract", - "casper-types", + "serde", ] [[package]] -name = "kv-log-macro" -version = "1.0.7" +name = "is-terminal" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "log 0.4.14", + "hermit-abi 0.5.1", + "libc", + "windows-sys 0.59.0", ] [[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "libc" -version = "0.2.93" +name = "is_terminal_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] -name = "libp2p" -version = "0.35.1" +name = "itertools" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" dependencies = [ - "atomic", - "bytes 1.0.1", - "futures", - "lazy_static", - "libp2p-core", - "libp2p-deflate", - "libp2p-dns", - "libp2p-floodsub", - "libp2p-gossipsub", - "libp2p-identify", - "libp2p-kad", - "libp2p-mdns", - "libp2p-mplex", - "libp2p-noise", - "libp2p-ping", - "libp2p-request-response", - "libp2p-swarm", - "libp2p-swarm-derive", - "libp2p-tcp", - "libp2p-uds", - "libp2p-yamux", - "parity-multiaddr", - "parking_lot", - "pin-project 1.0.6", - "smallvec", - "wasm-timer", + "either", ] [[package]] -name = "libp2p-core" -version = "0.27.1" +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", "either", - "fnv", - "futures", - "futures-timer", - "lazy_static", - "libsecp256k1", - "log 0.4.14", - "multihash", - "multistream-select", - "parity-multiaddr", - "parking_lot", - "pin-project 1.0.6", - "prost", - "prost-build", - "rand 0.7.3", - "ring", - "rw-stream-sink", - "sha2 0.9.3", - "smallvec", - "thiserror", - "unsigned-varint 0.7.0", - "void", - "zeroize", ] [[package]] -name = "libp2p-deflate" -version = "0.27.1" +name = "itertools" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d42eed63305f0420736fa487f9acef720c4528bd7852a6a760f5ccde4813345" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ - "flate2", - "futures", - "libp2p-core", + "either", ] [[package]] -name = "libp2p-dns" -version = "0.27.0" +name = "itertools" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ - "futures", - "libp2p-core", - "log 0.4.14", + "either", ] [[package]] -name = "libp2p-floodsub" -version = "0.27.0" +name = "itoa" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c63dfa06581b24b1d12bf9815b43689a784424be217d6545c800c7c75a207f" -dependencies = [ - "cuckoofilter", - "fnv", - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "prost", - "prost-build", - "rand 0.7.3", - "smallvec", -] +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] -name = "libp2p-gossipsub" -version = "0.28.0" +name = "js-sys" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ - "asynchronous-codec", - "base64", - "byteorder", - "bytes 1.0.1", - "fnv", - "futures", - "hex_fmt", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "prost", - "prost-build", - "rand 0.7.3", - "regex", - "sha2 0.9.3", - "smallvec", - "unsigned-varint 0.7.0", - "wasm-timer", + "once_cell", + "wasm-bindgen", ] [[package]] -name = "libp2p-identify" -version = "0.27.0" +name = "k256" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "prost", - "prost-build", - "smallvec", - "wasm-timer", + "cfg-if 1.0.0", + "ecdsa", + "elliptic-curve", + "sha2", ] [[package]] -name = "libp2p-kad" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3da6c9acbcc05f93235d201d7d45ef4e8b88a45d8836f98becd8b4d443f066" +name = "key-management-thresholds" +version = "0.1.0" dependencies = [ - "arrayvec", - "asynchronous-codec", - "bytes 1.0.1", - "either", - "fnv", - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.3", - "smallvec", - "uint", - "unsigned-varint 0.7.0", - "void", - "wasm-timer", + "casper-contract", + "casper-types", ] [[package]] -name = "libp2p-mdns" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9e6374814d1b118d97ccabdfc975c8910bd16dc38a8bc058eeb08bf2080fe1" +name = "key-putter" +version = "0.1.0" dependencies = [ - "async-io", - "data-encoding", - "dns-parser", - "futures", - "if-watch", - "lazy_static", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "rand 0.7.3", - "smallvec", - "socket2 0.3.19", - "void", + "casper-contract", + "casper-types", ] [[package]] -name = "libp2p-mplex" -version = "0.27.1" +name = "lazy_static" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" -dependencies = [ - "asynchronous-codec", - "bytes 1.0.1", - "futures", - "libp2p-core", - "log 0.4.14", - "nohash-hasher", - "parking_lot", - "rand 0.7.3", - "smallvec", - "unsigned-varint 0.7.0", -] +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] -name = "libp2p-noise" -version = "0.29.0" +name = "leb128" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" -dependencies = [ - "bytes 1.0.1", - "curve25519-dalek", - "futures", - "lazy_static", - "libp2p-core", - "log 0.4.14", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.3", - "snow", - "static_assertions", - "x25519-dalek", - "zeroize", -] +checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] -name = "libp2p-ping" -version = "0.27.0" +name = "leb128fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3813276d0708c8db0f500d8beda1bda9ad955723b9cb272c41f4727256f73c" -dependencies = [ - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "rand 0.7.3", - "void", - "wasm-timer", -] +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] -name = "libp2p-request-response" -version = "0.9.1" +name = "libc" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10e5552827c33d8326502682da73a0ba4bfa40c1b55b216af3c303f32169dd89" -dependencies = [ - "async-trait", - "bytes 1.0.1", - "futures", - "libp2p-core", - "libp2p-swarm", - "log 0.4.14", - "lru", - "minicbor", - "rand 0.7.3", - "smallvec", - "unsigned-varint 0.7.0", - "wasm-timer", -] +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] -name = "libp2p-swarm" -version = "0.27.2" +name = "libloading" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" +checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" dependencies = [ - "either", - "futures", - "libp2p-core", - "log 0.4.14", - "rand 0.7.3", - "smallvec", - "void", - "wasm-timer", + "cfg-if 1.0.0", + "windows-targets 0.53.0", ] [[package]] -name = "libp2p-swarm-derive" -version = "0.22.0" +name = "libm" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c564ebaa36a64839f51eaddb0243aaaa29ce64affb56129193cc3248b72af273" -dependencies = [ - "quote", - "syn", -] +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] -name = "libp2p-tcp" -version = "0.27.1" +name = "libredox" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "async-io", - "futures", - "futures-timer", - "if-addrs", - "if-watch", - "ipnet", + "bitflags 2.9.1", "libc", - "libp2p-core", - "log 0.4.14", - "socket2 0.3.19", - "tokio 1.4.0", + "redox_syscall", ] [[package]] -name = "libp2p-uds" -version = "0.27.0" +name = "linked-hash-map" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80ac51ce419f60be966e02103c17f67ff5dc4422ba83ba54d251d6c62a4ed487" -dependencies = [ - "async-std", - "futures", - "libp2p-core", - "log 0.4.14", -] +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] -name = "libp2p-yamux" -version = "0.30.1" +name = "linkme" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" +checksum = "70fe496a7af8c406f877635cbf3cd6a9fac9d6f443f58691cd8afe6ce0971af4" dependencies = [ - "futures", - "libp2p-core", - "parking_lot", - "thiserror", - "yamux", + "linkme-impl", ] [[package]] -name = "libsecp256k1" -version = "0.3.5" +name = "linkme-impl" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +checksum = "b01f197a15988fb5b2ec0a5a9800c97e70771499c456ad757d63b3c5e9b96e75" dependencies = [ - "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg", - "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", - "typenum", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "linked-hash-map" -version = "0.5.4" +name = "linux-raw-sys" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "list-authorization-keys" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] name = "list-named-keys" @@ -3274,21 +3784,34 @@ dependencies = [ ] [[package]] -name = "lmdb" +name = "litemap" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0908efb5d6496aa977d96f91413da2635a902e5e31dbef0bfb88986c248539" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "litrs" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" + +[[package]] +name = "lmdb-rkv" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b" dependencies = [ - "bitflags 1.2.1", + "bitflags 1.3.2", + "byteorder", "libc", - "lmdb-sys", + "lmdb-rkv-sys", ] [[package]] -name = "lmdb-sys" -version = "0.8.0" +name = "lmdb-rkv-sys" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5b392838cfe8858e86fac37cf97a0e8c55cc60ba0a18365cadc33092f128ce9" +checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" dependencies = [ "cc", "libc", @@ -3296,48 +3819,47 @@ dependencies = [ ] [[package]] -name = "lock_api" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" +name = "load-caller-info" +version = "0.1.0" dependencies = [ - "scopeguard", + "casper-contract", + "casper-types", ] [[package]] -name = "log" -version = "0.3.9" +name = "lock_api" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ - "log 0.4.14", + "autocfg", + "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" dependencies = [ - "cfg-if 1.0.0", "serde", "value-bag", ] [[package]] -name = "lru" -version = "0.6.5" +name = "mach" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f374d42cdfc1d7dbf3d3dec28afab2eb97ffbf43a3234d795b5986dbf4b90ba" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" dependencies = [ - "hashbrown", + "libc", ] [[package]] -name = "mach" -version = "0.3.2" +name = "mach2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" dependencies = [ "libc", ] @@ -3366,25 +3888,13 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "measure-gas-subcall" version = "0.1.0" @@ -3395,33 +3905,45 @@ dependencies = [ [[package]] name = "memchr" -version = "2.3.4" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] -name = "memoffset" -version = "0.5.6" +name = "memmap2" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "83faa42c0a078c393f6b29d5db232d8be22776a891f8f56e5284faee4a20b327" dependencies = [ - "autocfg", + "libc", ] [[package]] -name = "memoffset" -version = "0.6.3" +name = "memmap2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83fb6581e8ed1f85fd45c116db8405483899489e38406156c25eb743554361d" +checksum = "6d28bba84adfe6646737845bc5ebbfa2c08424eb1c37e94a1fd2a82adb56a872" dependencies = [ - "autocfg", + "libc", ] [[package]] -name = "memory_units" -version = "0.3.0" +name = "memmap2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + +[[package]] +name = "memoffset" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] [[package]] name = "memory_units" @@ -3431,52 +3953,45 @@ checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.3" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", ] [[package]] -name = "minicbor" -version = "0.7.2" +name = "minimal-lexical" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c2b2c73f9640fccab53947e2b3474d5071fcbc8f82cac51ddf6c8041a30a9ea" -dependencies = [ - "minicbor-derive", -] +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] -name = "minicbor-derive" -version = "0.6.2" +name = "miniz_oxide" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ - "proc-macro2", - "quote", - "syn", + "adler2", ] [[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +name = "mint-purse" +version = "0.1.0" dependencies = [ - "adler", - "autocfg", + "casper-contract", + "casper-types", ] [[package]] -name = "mint-purse" +name = "mint-transfer-proxy" version = "0.1.0" dependencies = [ "casper-contract", @@ -3485,128 +4000,106 @@ dependencies = [ [[package]] name = "mio" -version = "0.6.23" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", "libc", - "log 0.4.14", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", ] [[package]] name = "mio" -version = "0.7.11" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "log 0.4.14", - "miow 0.3.7", - "ntapi", - "winapi 0.3.9", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", ] [[package]] -name = "miow" +name = "more-asserts" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" + +[[package]] +name = "multer" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01acbdc23469fd8fe07ab135923371d5f5a422fbf9c522158677c8eb15bc51c2" dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "bytes", + "encoding_rs", + "futures-util", + "http 0.2.12", + "httparse", + "log", + "memchr", + "mime", + "spin", + "version_check", ] [[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" +name = "multisig-authorization" +version = "0.1.0" dependencies = [ - "winapi 0.3.9", + "casper-contract", + "casper-types", ] [[package]] -name = "multihash" -version = "0.13.2" +name = "munge" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" +checksum = "9e22e7961c873e8b305b176d2a4e1d41ce7ba31bc1c52d2a107a89568ec74c55" dependencies = [ - "blake2b_simd", - "blake2s_simd", - "blake3", - "digest 0.9.0", - "generic-array 0.14.4", - "multihash-derive", - "sha2 0.9.3", - "sha3", - "unsigned-varint 0.5.1", + "munge_macro", ] [[package]] -name = "multihash-derive" -version = "0.7.1" +name = "munge_macro" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f" +checksum = "0ac7d860b767c6398e88fe93db73ce53eb496057aa6895ffa4d60cb02e1d1c6b" dependencies = [ - "proc-macro-crate", - "proc-macro-error", "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.101", ] [[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +name = "named-dictionary-test" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "multipart" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +name = "named-keys" +version = "0.1.0" dependencies = [ - "buf_redux", - "httparse", - "log 0.4.14", - "mime", - "mime_guess", - "quick-error 1.2.3", - "rand 0.7.3", - "safemem", - "tempfile", - "twoway", + "casper-contract", + "casper-types", ] [[package]] -name = "multistream-select" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8" +name = "named-keys-stored" +version = "0.1.0" dependencies = [ - "bytes 1.0.1", - "futures", - "log 0.4.14", - "pin-project 1.0.6", - "smallvec", - "unsigned-varint 0.7.0", + "casper-contract", + "casper-types", ] [[package]] -name = "named-keys" +name = "named-keys-stored-call" version = "0.1.0" dependencies = [ "casper-contract", @@ -3623,13 +4116,12 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.7" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" dependencies = [ - "lazy_static", "libc", - "log 0.4.14", + "log", "openssl", "openssl-probe", "openssl-sys", @@ -3640,148 +4132,146 @@ dependencies = [ ] [[package]] -name = "nb-connect" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19900e7eee95eb2b3c2e26d12a874cc80aaf750e31be6fcbe743ead369fa45d" +name = "nctl-dictionary" +version = "0.1.0" dependencies = [ - "libc", - "socket2 0.4.0", + "casper-contract", + "casper-types", +] + +[[package]] +name = "new-named-uref" +version = "0.1.0" +dependencies = [ + "casper-contract", ] [[package]] -name = "net2" -version = "0.2.37" +name = "nix" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ - "cfg-if 0.1.10", + "bitflags 1.3.2", + "cfg-if 1.0.0", "libc", - "winapi 0.3.9", ] [[package]] -name = "nohash-hasher" -version = "0.2.0" +name = "nodrop" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" [[package]] -name = "ntapi" -version = "0.3.6" +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ - "winapi 0.3.9", + "memchr", + "minimal-lexical", ] [[package]] -name = "num" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43db66d1170d347f9a065114077f7dccb00c1b9478c89384490a3425279a4606" +name = "non-standard-payment" +version = "0.1.0" dependencies = [ - "num-complex", - "num-integer", - "num-iter", - "num-rational 0.4.0", - "num-traits", + "casper-contract", + "casper-types", ] [[package]] -name = "num-bigint" -version = "0.2.6" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ - "autocfg", - "num-integer", - "num-traits", + "overload", + "winapi", ] [[package]] -name = "num-bigint" -version = "0.3.2" +name = "num" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d0a3d5e207573f948a9e5376662aa743a2ea13f7c50a554d7af443a73fbfeba" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ - "autocfg", + "num-bigint", + "num-complex", "num-integer", + "num-iter", + "num-rational", "num-traits", ] [[package]] name = "num-bigint" -version = "0.4.0" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d047c1062aa51e256408c560894e5251f08925980e53cf1aa5bd00eec6512" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.4.0" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] [[package]] name = "num-derive" -version = "0.3.3" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] -name = "num-integer" -version = "0.1.44" +name = "num-format" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" dependencies = [ - "autocfg", - "num-traits", + "arrayvec 0.7.6", + "itoa", ] [[package]] -name = "num-iter" -version = "0.1.42" +name = "num-integer" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", - "num-integer", "num-traits", ] [[package]] -name = "num-rational" -version = "0.2.4" +name = "num-iter" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", - "num-bigint 0.2.6", "num-integer", "num-traits", ] [[package]] name = "num-rational" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", - "num-bigint 0.4.0", + "num-bigint", "num-integer", "num-traits", "serde", @@ -3789,89 +4279,111 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.9", "libc", ] [[package]] name = "object" -version = "0.23.0" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "crc32fast", + "flate2", + "hashbrown 0.14.5", + "indexmap 2.9.0", + "memchr", + "ruzstd", +] [[package]] -name = "once_cell" -version = "1.7.2" +name = "object" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] [[package]] -name = "oorandom" -version = "11.1.3" +name = "once_cell" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] -name = "opaque-debug" -version = "0.2.3" +name = "oorandom" +version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" -version = "0.10.33" +version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577" +checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ - "bitflags 1.2.1", + "bitflags 2.9.1", "cfg-if 1.0.0", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "111.15.0+1.1.1k" +version = "300.5.0+3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a" +checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.61" +version = "0.9.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" +checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" dependencies = [ - "autocfg", "cc", "libc", "openssl-src", @@ -3880,7 +4392,7 @@ dependencies = [ ] [[package]] -name = "overwrite-uref-content" +name = "ordered-transforms" version = "0.1.0" dependencies = [ "casper-contract", @@ -3888,77 +4400,69 @@ dependencies = [ ] [[package]] -name = "parity-multiaddr" -version = "0.11.2" +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "output_vt100" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding", - "serde", - "static_assertions", - "unsigned-varint 0.7.0", - "url", + "winapi", ] [[package]] -name = "parity-wasm" -version = "0.41.0" +name = "overload" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +name = "overwrite-uref-content" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", - "instant", "libc", "redox_syscall", "smallvec", - "winapi 0.3.9", + "windows-targets 0.52.6", ] [[package]] name = "paste" -version = "0.1.18" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] -name = "paste-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +name = "payment-purse-persist" +version = "0.1.0" dependencies = [ - "proc-macro-hack", + "casper-contract", + "casper-types", ] [[package]] @@ -3967,87 +4471,42 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" dependencies = [ - "base64", + "base64 0.13.1", "once_cell", "regex", ] [[package]] name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "pest" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" -dependencies = [ - "ucd-trie", -] - -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "pin-project" -version = "0.4.28" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" -dependencies = [ - "pin-project-internal 0.4.28", -] +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" -dependencies = [ - "pin-project-internal 1.0.6", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.28" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.6" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.6" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -4057,24 +4516,25 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.3.3" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4839a901843f3942576e65857f0ebf2e190ef7024d3c62a94099ba3f819ad1d" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ "der", + "spki", ] [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "plotters" -version = "0.3.0" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca0ae5f169d0917a7c7f5a9c1a3d3d9598f18f529dd2b8373ed988efea307a" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -4085,24 +4545,24 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.0" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07fffcddc1cb3a1de753caa4e4df03b79922ba43cf882acc1bdd7e8df9f4590" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.0" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b38a02e23bd9604b842a812063aec4ef702b57989c37b655254bb61c471ad211" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] [[package]] name = "pnet" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b657d5b9a98a2c81b82549922b8b15984e49f8120cd130b11a09f81b9b55d633" +checksum = "4b6d2a0409666964722368ef5fb74b9f93fac11c18bef3308693c16c6733f103" dependencies = [ "ipnetwork", "pnet_base", @@ -4114,71 +4574,71 @@ dependencies = [ [[package]] name = "pnet_base" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4688aa497ef62129f302a5800ebde67825f8ff129f43690ca84099f6620bed" +checksum = "25488cd551a753dcaaa6fffc9f69a7610a412dd8954425bf7ffad5f7d1156fb8" [[package]] name = "pnet_datalink" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59001c9c4d9d23bf2f61afaaf134a766fd6932ba2557c606b9112157053b9ac7" +checksum = "d4d1f8ab1ef6c914cf51dc5dfe0be64088ea5f3b08bbf5a31abc70356d271198" dependencies = [ "ipnetwork", "libc", "pnet_base", "pnet_sys", - "winapi 0.3.9", + "winapi", ] [[package]] name = "pnet_macros" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d894a90dbdbe976e624453fc31b1912f658083778329442dda1cca94f76a3e76" +checksum = "30490e0852e58402b8fae0d39897b08a24f493023a4d6cf56b2e30f31ed57548" dependencies = [ + "proc-macro2", + "quote", "regex", - "syntex", - "syntex_syntax", + "syn 1.0.109", ] [[package]] name = "pnet_macros_support" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b99269a458570bc06a9132254349f6543d9abc92e88b68d8de934aac9481f6c" +checksum = "d4714e10f30cab023005adce048f2d30dd4ac4f093662abf2220855655ef8f90" dependencies = [ "pnet_base", ] [[package]] name = "pnet_packet" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33f8238f4eb897a55ca06510cd71afb5b5ca7b4ff2d7188f1ca855fc1710133e" +checksum = "8588067671d03c9f4254b2e66fecb4d8b93b5d3e703195b84f311cd137e32130" dependencies = [ - "glob", + "glob 0.3.2", "pnet_base", "pnet_macros", "pnet_macros_support", - "syntex", ] [[package]] name = "pnet_sys" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7589e4c4e7ed72a3ffdff8a65d3bea84e8c3a23e19d0a10e8f45efdf632fff15" +checksum = "d9a3f32b0df45515befd19eed04616f6b56a488da92afc61164ef455e955f07f" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] name = "pnet_transport" -version = "0.27.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "326abdfd2e70e8e943bd58087b59686de170cac050a3b19c9fcc84db01690af5" +checksum = "932b2916d693bcc5fa18443dc99142e0a6fd31a6ce75a511868f7174c17e2bce" dependencies = [ "libc", "pnet_base", @@ -4187,78 +4647,75 @@ dependencies = [ ] [[package]] -name = "polling" -version = "2.0.3" +name = "potential_utf" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fc12d774e799ee9ebae13f4076ca003b40d18a11ac0f3641e6f899618580b7b" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" dependencies = [ - "cfg-if 1.0.0", - "libc", - "log 0.4.14", - "wepoll-sys", - "winapi 0.3.9", + "zerovec", ] [[package]] -name = "poly1305" -version = "0.6.2" +name = "pprof" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" +checksum = "ebbe2f8898beba44815fdc9e5a4ae9c929e21c5dc29b0c774a15555f7f58d6d0" dependencies = [ - "cpuid-bool 0.2.0", - "universal-hash", + "aligned-vec", + "backtrace", + "cfg-if 1.0.0", + "criterion", + "findshlibs", + "inferno", + "libc", + "log", + "nix", + "once_cell", + "parking_lot", + "smallvec", + "symbolic-demangle", + "tempfile", + "thiserror 1.0.69", ] [[package]] -name = "polyval" -version = "0.4.5" +name = "ppv-lite86" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "cpuid-bool 0.2.0", - "opaque-debug 0.3.0", - "universal-hash", + "zerocopy", ] [[package]] -name = "ppv-lite86" -version = "0.2.10" +name = "pretty_assertions" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +dependencies = [ + "ansi_term", + "ctor", + "diff", + "output_vt100", +] [[package]] -name = "predicates" -version = "1.0.7" +name = "prettyplease" +version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb433456c1a57cc93554dea3ce40b4c19c4057e41c55d4a0f3d84ea71c325aa" +checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" dependencies = [ - "difference", - "predicates-core", -] - -[[package]] -name = "predicates-core" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" - -[[package]] -name = "predicates-tree" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f553275e5721409451eb85e15fd9a860a6e5ab4496eb215987502b5f5391f2" -dependencies = [ - "predicates-core", - "treeline", + "proc-macro2", + "syn 2.0.101", ] [[package]] name = "proc-macro-crate" -version = "0.1.5" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ - "toml", + "toml_edit 0.22.26", ] [[package]] @@ -4270,7 +4727,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -4286,125 +4743,139 @@ dependencies = [ ] [[package]] -name = "proc-macro-hack" -version = "0.5.19" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] [[package]] -name = "proc-macro-nested" -version = "0.1.7" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.101", +] [[package]] name = "proc-macro2" -version = "1.0.26" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ - "unicode-xid 0.2.1", + "unicode-ident", ] [[package]] name = "prometheus" -version = "0.12.0" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ "cfg-if 1.0.0", "fnv", "lazy_static", "memchr", "parking_lot", - "protobuf", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "proptest" -version = "1.0.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0d9cc07f18492d879586c92b485def06bc850da3118075cd45d50e9c95b0e5" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", - "bitflags 1.2.1", - "byteorder", + "bit-vec", + "bitflags 2.9.1", "lazy_static", "num-traits", - "quick-error 2.0.0", - "rand 0.8.3", - "rand_chacha 0.3.0", + "rand", + "rand_chacha", "rand_xorshift", - "regex-syntax", + "regex-syntax 0.8.5", "rusty-fork", "tempfile", + "unarray", ] [[package]] -name = "prost" -version = "0.7.0" +name = "proptest-attr-macro" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" +checksum = "9fa06db3abc95f048e0afa371db5569b24912bb98a8e2e2e89c75c5b43bc2aa8" dependencies = [ - "bytes 1.0.1", - "prost-derive", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "prost-build" -version = "0.7.0" +name = "proptest-derive" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ - "bytes 1.0.1", - "heck", - "itertools 0.9.0", - "log 0.4.14", - "multimap", - "petgraph", - "prost", - "prost-types", - "tempfile", - "which", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "prost-derive" -version = "0.7.0" +name = "ptr_meta" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" dependencies = [ - "anyhow", - "itertools 0.9.0", - "proc-macro2", - "quote", - "syn", + "ptr_meta_derive 0.1.4", ] [[package]] -name = "prost-types" -version = "0.7.0" +name = "ptr_meta" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9e76f66d3f9606f44e45598d155cb13ecf09f4a28199e48daf8c8fc937ea90" +dependencies = [ + "ptr_meta_derive 0.3.0", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "bytes 1.0.1", - "prost", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "protobuf" -version = "2.22.1" +name = "ptr_meta_derive" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b7f4a129bb3754c25a4e04032a90173c68f85168f77118ac4cb4936e7f06f92" +checksum = "ca414edb151b4c8d125c12566ab0d74dc9cdba36fb80eb7b848c15f495fd32d1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] [[package]] name = "pulldown-cmark" -version = "0.8.0" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffade02495f22453cd593159ea2f59827aae7f53fa8323f756799b670881dcf8" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 1.2.1", + "bitflags 2.9.1", "memchr", "unicase", ] @@ -4434,29 +4905,27 @@ dependencies = [ ] [[package]] -name = "pwasm-utils" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c8ac87af529432d3a4f0e2b3bbf08af49f28f09cc73ed7e551161bdaef5f78d" +name = "purse-holder-stored-upgrader-v2-2" +version = "0.1.0" dependencies = [ - "byteorder", - "log 0.4.14", - "parity-wasm", + "casper-contract", + "casper-types", ] [[package]] name = "quanta" -version = "0.7.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e76a3afdefd0ce2c0363bf3146271e947782240ea617885dd64e56c4de9fb3c9" +checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" dependencies = [ - "atomic-shim", - "ctor", + "crossbeam-utils", "libc", "mach", "once_cell", "raw-cpuid", - "winapi 0.3.9", + "wasi 0.10.2+wasi-snapshot-preview1", + "web-sys", + "winapi", ] [[package]] @@ -4466,166 +4935,162 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] -name = "quick-error" -version = "2.0.0" +name = "quick-xml" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" +checksum = "7f50b1c63b38611e7d4d7f68b82d3ad0cc71a2ad2e7f61fc10f1328d917c93cd" +dependencies = [ + "memchr", +] [[package]] name = "quote" -version = "1.0.9" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] [[package]] -name = "radium" -version = "0.3.0" +name = "r-efi" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" [[package]] -name = "rand" -version = "0.7.3" +name = "rancor" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "caf5f7161924b9d1cea0e4cabc97c372cea92b5f927fc13c6bca67157a0ad947" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", + "ptr_meta 0.3.0", ] [[package]] name = "rand" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", + "rand_chacha", + "rand_core", ] [[package]] name = "rand_chacha" -version = "0.2.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.5.1", + "rand_core", ] [[package]] -name = "rand_chacha" -version = "0.3.0" +name = "rand_core" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "ppv-lite86", - "rand_core 0.6.2", + "getrandom 0.2.16", ] [[package]] -name = "rand_core" -version = "0.5.1" +name = "rand_pcg" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] -name = "rand_core" -version = "0.6.2" +name = "rand_xorshift" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "getrandom 0.2.2", + "rand_core", ] [[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +name = "random-bytes" +version = "0.1.0" dependencies = [ - "rand_core 0.5.1", + "casper-contract", + "casper-types", ] [[package]] -name = "rand_hc" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +name = "random-bytes-payment" +version = "0.1.0" dependencies = [ - "rand_core 0.6.2", + "casper-contract", + "casper-types", ] [[package]] -name = "rand_pcg" -version = "0.3.0" +name = "raw-cpuid" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de198537002b913568a3847e53535ace266f93526caf5c360ec41d72c5787f0" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "rand_core 0.6.2", + "bitflags 1.3.2", ] [[package]] -name = "rand_xorshift" -version = "0.3.0" +name = "rayon" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ - "rand_core 0.6.2", + "either", + "rayon-core", ] [[package]] -name = "raw-cpuid" -version = "9.0.0" +name = "rayon-core" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c27cb5785b85bd05d4eb171556c9a1a514552e26123aeae6bb7d811353148026" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "bitflags 1.2.1", + "crossbeam-deque", + "crossbeam-utils", ] [[package]] -name = "rayon" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" +name = "read-from-key" +version = "0.1.0" dependencies = [ - "autocfg", - "crossbeam-deque 0.8.0", - "either", - "rayon-core", + "casper-contract", + "casper-types", ] [[package]] -name = "rayon-core" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" +name = "recover-secp256k1" +version = "0.1.0" dependencies = [ - "crossbeam-channel 0.5.0", - "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.3", - "lazy_static", - "num_cpus", + "casper-contract", + "casper-types", +] + +[[package]] +name = "redelegate" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", ] [[package]] name = "redox_syscall" -version = "0.2.5" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ - "bitflags 1.2.1", + "bitflags 2.9.1", ] [[package]] @@ -4638,120 +5103,62 @@ dependencies = [ [[package]] name = "regex" -version = "1.4.5" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", - "regex-syntax", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", ] [[package]] name = "regex-automata" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ - "byteorder", - "regex-syntax", + "regex-syntax 0.6.29", ] [[package]] -name = "regex-syntax" -version = "0.6.23" +name = "regex-automata" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" - -[[package]] -name = "remove-associated-key" -version = "0.1.0" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ - "casper-contract", - "casper-types", + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", ] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "regex-syntax" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] -name = "reqwest" -version = "0.10.10" +name = "regex-syntax" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" -dependencies = [ - "base64", - "bytes 0.5.6", - "encoding_rs", - "futures-core", - "futures-util", - "http", - "http-body 0.3.1", - "hyper 0.13.10", - "hyper-tls 0.4.3", - "ipnet", - "js-sys", - "lazy_static", - "log 0.4.14", - "mime", - "mime_guess", - "native-tls", - "percent-encoding", - "pin-project-lite 0.2.6", - "serde", - "serde_urlencoded", - "tokio 0.2.25", - "tokio-tls", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] -name = "reqwest" -version = "0.11.2" +name = "region" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" +checksum = "e6b6ebd13bc009aef9cd476c1310d49ac354d36e240cf1bd753290f3dc7199a7" dependencies = [ - "base64", - "bytes 1.0.1", - "encoding_rs", - "futures-core", - "futures-util", - "http", - "http-body 0.4.1", - "hyper 0.14.5", - "hyper-tls 0.5.0", - "ipnet", - "js-sys", - "lazy_static", - "log 0.4.14", - "mime", - "native-tls", - "percent-encoding", - "pin-project-lite 0.2.6", - "serde", - "serde_json", - "serde_urlencoded", - "tokio 1.4.0", - "tokio-native-tls", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", + "bitflags 1.3.2", + "libc", + "mach2", + "windows-sys 0.52.0", ] [[package]] -name = "revert" +name = "regression-20210707" version = "0.1.0" dependencies = [ "casper-contract", @@ -4759,71 +5166,377 @@ dependencies = [ ] [[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +name = "regression-20210831" +version = "0.1.0" dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi 0.3.9", + "casper-contract", + "casper-types", ] [[package]] -name = "rmp" -version = "0.8.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f10b46df14cf1ee1ac7baa4d2fbc2c52c0622a4b82fa8740e37bc452ac0184f" +name = "regression-20220204" +version = "0.1.0" dependencies = [ - "byteorder", - "num-traits", + "casper-contract", + "casper-types", ] [[package]] -name = "rmp-serde" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce7d70c926fe472aed493b902010bccc17fa9f7284145cb8772fd22fdb052d8" +name = "regression-20220204-call" +version = "0.1.0" dependencies = [ - "byteorder", - "rmp", - "serde", + "casper-contract", + "casper-types", ] [[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" +name = "regression-20220204-nontrivial" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "rustc-serialize" -version = "0.3.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" +name = "regression-20220207" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +name = "regression-20220208" +version = "0.1.0" dependencies = [ - "semver 0.9.0", + "casper-contract", + "casper-types", ] [[package]] -name = "rustc_version" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +name = "regression-20220211" +version = "0.1.0" dependencies = [ - "semver 0.11.0", + "casper-contract", + "casper-types", ] +[[package]] +name = "regression-20220211-call" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression-20220222" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression-add-bid" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression-delegate" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression-payment" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression-transfer" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression_20211110" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression_20220119" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "regression_20240105" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "remove-associated-key" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "rend" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a35e8a6bf28cd121053a66aa2e6a2e3eaffad4a60012179f0e864aa5ffeff215" +dependencies = [ + "bytecheck 0.8.1", +] + +[[package]] +name = "reqwest" +version = "0.11.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +dependencies = [ + "base64 0.21.7", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http 0.2.12", + "http-body", + "hyper", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "system-configuration", + "tokio", + "tokio-native-tls", + "tokio-util 0.7.15", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-streams", + "web-sys", + "winreg", +] + +[[package]] +name = "ret-uref" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "revert" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rgb" +version = "0.8.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "getrandom 0.2.16", + "libc", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + +[[package]] +name = "rkyv" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e147371c75553e1e2fcdb483944a8540b8438c31426279553b9a8182a9b7b65" +dependencies = [ + "bytecheck 0.8.1", + "bytes", + "hashbrown 0.15.3", + "indexmap 2.9.0", + "munge", + "ptr_meta 0.3.0", + "rancor", + "rend", + "rkyv_derive", + "tinyvec", + "uuid 1.16.0", +] + +[[package]] +name = "rkyv_derive" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246b40ac189af6c675d124b802e8ef6d5246c53e17367ce9501f8f66a81abb7a" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "rmp" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + +[[package]] +name = "rmp-serde" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ce7d70c926fe472aed493b902010bccc17fa9f7284145cb8772fd22fdb052d8" +dependencies = [ + "byteorder", + "rmp", + "serde", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted 0.9.0", +] + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + [[package]] name = "rusty-fork" version = "0.3.0" @@ -4831,33 +5544,33 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", "wait-timeout", ] [[package]] -name = "rw-stream-sink" -version = "0.2.1" +name = "ruzstd" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" +checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" dependencies = [ - "futures", - "pin-project 0.4.28", - "static_assertions", + "byteorder", + "derive_more 0.99.20", + "twox-hash", ] [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] -name = "safemem" -version = "0.3.3" +name = "safe-transmute" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" +checksum = "3944826ff8fa8093089aba3acb4ef44b9446a99a16f3bf4e74af3f77d340ab7d" [[package]] name = "same-file" @@ -4870,22 +5583,21 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ - "lazy_static", - "winapi 0.3.9", + "windows-sys 0.59.0", ] [[package]] name = "schemars" -version = "0.8.3" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6ab463ae35acccb5cba66c0084c985257b797d288b6050cc2f6ac1b266cb78" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" dependencies = [ "dyn-clone", - "indexmap", + "indexmap 1.9.3", "schemars_derive", "serde", "serde_json", @@ -4893,300 +5605,1148 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.3" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "902fdfbcf871ae8f653bddf4b2c05905ddaabc08f69d32a915787e3be0d31356" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn", + "syn 2.0.101", ] [[package]] name = "scoped-tls" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "subtle", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.1", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "self_cell" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f7d95a54511e0c7be3f51e8867aa8cf35148d7b9445d44de2f943e2b206e749" + +[[package]] +name = "semver" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-big-array" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd31f59f6fe2b0c055371bb2f16d7f0aa7d8881676c04a55b1596d1a17cd10a4" +dependencies = [ + "serde", +] + +[[package]] +name = "serde-map-to-array" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c14b52efc56c711e0dbae3f26e0cc233f5dac336c1bf0b07e1b7dc2dca3b2cc7" +dependencies = [ + "schemars", + "serde", +] + +[[package]] +name = "serde-wasm-bindgen" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b4c031cd0d9014307d82b8abf653c0290fbdaeb4c02d00c63cf52f728628bf" +dependencies = [ + "js-sys", + "serde", + "wasm-bindgen", +] + +[[package]] +name = "serde_bytes" +version = "0.11.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "indexmap 2.9.0", + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_test" +version = "1.0.177" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f901ee573cab6b3060453d2d5f0bae4e6d628c23c0a962ff9b5f1d7c8d4f1ed" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "set-action-thresholds" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shared-buffer" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6c99835bad52957e7aa241d3975ed17c1e5f8c92026377d117a606f36b84b16" +dependencies = [ + "bytes", + "memmap2 0.6.2", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" +dependencies = [ + "libc", + "mio 1.0.3", + "signal-hook", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest 0.10.7", + "rand_core", +] + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "staking" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "staking-stored" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", + "staking", +] + +[[package]] +name = "state-initializer" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "stats_alloc" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c0e04424e733e69714ca1bbb9204c1a57f09f5493439520f9f68c132ad25eec" + +[[package]] +name = "storage-costs" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "str_stack" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "structopt" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" +dependencies = [ + "clap 2.34.0", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" +dependencies = [ + "heck 0.3.3", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "strum" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros 0.24.3", +] + +[[package]] +name = "strum" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +dependencies = [ + "strum_macros 0.27.1", +] + +[[package]] +name = "strum_macros" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "rustversion", + "syn 1.0.109", +] + +[[package]] +name = "strum_macros" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.101", +] + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "symbolic-common" +version = "12.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a1150bdda9314f6cfeeea801c23f5593c6e6a6c72e64f67e48d723a12b8efdb" +dependencies = [ + "debugid", + "memmap2 0.9.5", + "stable_deref_trait", + "uuid 1.16.0", +] + +[[package]] +name = "symbolic-demangle" +version = "12.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f66537def48fbc704a92e4fdaab7833bc7cb2255faca8182592fb5fa617eb82" +dependencies = [ + "cpp_demangle", + "rustc-demangle", + "symbolic-common", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "sys-info" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f3e7ba888a12ddcf0084e36ae4609b055845f38022d1946b67356fbc27d5795" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "system-contract-hashes" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "tar" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + +[[package]] +name = "target-triple" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "terminal_size" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +dependencies = [ + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "test-payment-stored" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width 0.1.14", +] + +[[package]] +name = "textwrap" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if 1.0.0", + "once_cell", +] + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio 1.0.3", + "pin-project-lite", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-openssl" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59df6849caa43bb7567f9a36f863c447d95a11d5903c9cc334ba32576a27eadd" +dependencies = [ + "openssl", + "openssl-sys", + "tokio", +] + +[[package]] +name = "tokio-serde" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" +dependencies = [ + "bincode", + "bytes", + "educe", + "futures-core", + "futures-sink", + "pin-project", + "serde", +] [[package]] -name = "sd-notify" -version = "0.3.0" +name = "tokio-stream" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd08a21f852bd2fe42e3b2a6c76a0db6a95a5b5bd29c0521dd0b30fa1712ec8" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util 0.7.15", +] [[package]] -name = "security-framework" -version = "2.2.0" +name = "tokio-tungstenite" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" dependencies = [ - "bitflags 1.2.1", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", + "futures-util", + "log", + "tokio", + "tungstenite", ] [[package]] -name = "security-framework-sys" -version = "2.2.0" +name = "tokio-util" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" +checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ - "core-foundation-sys", - "libc", + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", ] [[package]] -name = "semver" -version = "0.9.0" +name = "tokio-util" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ - "semver-parser 0.7.0", + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", ] [[package]] -name = "semver" -version = "0.11.0" +name = "toml" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "semver-parser 0.10.2", + "serde", ] [[package]] -name = "semver-parser" -version = "0.7.0" +name = "toml" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.19.15", +] [[package]] -name = "semver-parser" -version = "0.9.0" +name = "toml" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b46e1121e8180c12ff69a742aabc4f310542b6ccb69f1691689ac17fdf8618aa" +checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +dependencies = [ + "indexmap 2.9.0", + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.26", +] [[package]] -name = "semver-parser" -version = "0.10.2" +name = "toml_datetime" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" dependencies = [ - "pest", + "serde", ] [[package]] -name = "serde" -version = "1.0.125" +name = "toml_edit" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "serde_derive", + "indexmap 2.9.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow 0.5.40", ] [[package]] -name = "serde-big-array" -version = "0.3.2" +name = "toml_edit" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b20e7752957bbe9661cff4e0bb04d183d0948cdab2ea58cdb9df36a61dfe62" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "serde", - "serde_derive", + "indexmap 2.9.0", + "toml_datetime", + "winnow 0.5.40", ] [[package]] -name = "serde_bytes" -version = "0.11.5" +name = "toml_edit" +version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" +checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ + "indexmap 2.9.0", "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow 0.7.10", ] [[package]] -name = "serde_cbor" -version = "0.11.1" +name = "toml_write" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e18acfa2f90e8b735b2836ab8d538de304cbb6729a7360729ea5a895d15a622" +checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ - "half", - "serde", + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util 0.7.15", + "tower-layer", + "tower-service", + "tracing", ] [[package]] -name = "serde_derive" -version = "1.0.125" +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ - "proc-macro2", - "quote", - "syn", + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", ] [[package]] -name = "serde_derive_internals" -version = "0.25.0" +name = "tracing-attributes" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dbab34ca63057a1f15280bdf3c39f2b1eb1b54c17e98360e511637aef7418c6" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.101", ] [[package]] -name = "serde_json" -version = "1.0.64" +name = "tracing-core" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ - "itoa", - "ryu", - "serde", + "once_cell", + "valuable", ] [[package]] -name = "serde_repr" -version = "0.1.6" +name = "tracing-futures" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc6b7951b17b051f3210b063f12cc17320e2fe30ae05b0fe2a3abb068551c76" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "proc-macro2", - "quote", - "syn", + "pin-project", + "tracing", ] [[package]] -name = "serde_test" -version = "1.0.125" +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4bb5fef7eaf5a97917567183607ac4224c5b451c15023930f23b937cce879fe" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ - "serde", + "log", + "once_cell", + "tracing-core", ] [[package]] -name = "serde_urlencoded" -version = "0.7.0" +name = "tracing-serde" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ - "form_urlencoded", - "itoa", - "ryu", "serde", + "tracing-core", ] [[package]] -name = "sha-1" -version = "0.9.4" +name = "tracing-subscriber" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool 0.1.2", - "digest 0.9.0", - "opaque-debug 0.3.0", + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +name = "transfer-main-purse-to-new-purse" +version = "0.1.0" dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", + "casper-contract", + "casper-types", ] [[package]] -name = "sha2" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" +name = "transfer-main-purse-to-two-purses" +version = "0.1.0" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool 0.1.2", - "digest 0.9.0", - "opaque-debug 0.3.0", + "casper-contract", + "casper-types", ] [[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +name = "transfer-purse-to-account" +version = "0.1.0" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", + "casper-contract", + "casper-types", ] [[package]] -name = "sharded-slab" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +name = "transfer-purse-to-account-stored" +version = "0.1.0" dependencies = [ - "lazy_static", + "casper-contract", + "casper-types", + "transfer-purse-to-account", ] [[package]] -name = "signal-hook" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" +name = "transfer-purse-to-account-with-id" +version = "0.1.0" dependencies = [ - "libc", - "mio 0.7.11", - "signal-hook-registry", + "casper-contract", + "casper-types", ] [[package]] -name = "signal-hook" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef33d6d0cd06e0840fba9985aab098c147e67e05cee14d412d3345ed14ff30ac" +name = "transfer-purse-to-accounts" +version = "0.1.0" dependencies = [ - "libc", - "signal-hook-registry", + "casper-contract", + "casper-types", ] [[package]] -name = "signal-hook-registry" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +name = "transfer-purse-to-accounts-stored" +version = "0.1.0" dependencies = [ - "libc", + "casper-contract", + "casper-types", + "transfer-purse-to-accounts", ] [[package]] -name = "signature" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" +name = "transfer-purse-to-accounts-subcall" +version = "0.1.0" dependencies = [ - "digest 0.9.0", - "rand_core 0.5.1", + "casper-contract", + "casper-types", ] [[package]] -name = "simple-transfer" +name = "transfer-purse-to-public-key" version = "0.1.0" dependencies = [ "casper-contract", @@ -5194,67 +6754,55 @@ dependencies = [ ] [[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +name = "transfer-purse-to-purse" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "smallvec" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +name = "transfer-to-account" +version = "0.1.0" dependencies = [ - "serde", + "casper-contract", + "casper-types", ] [[package]] -name = "snow" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" +name = "transfer-to-account-u512" +version = "0.1.0" dependencies = [ - "aes-gcm", - "blake2", - "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", - "ring", - "rustc_version 0.2.3", - "sha2 0.9.3", - "subtle 2.4.0", - "x25519-dalek", + "casper-contract", + "casper-types", ] [[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +name = "transfer-to-existing-account" +version = "0.1.0" dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi 0.3.9", + "casper-contract", + "casper-types", ] [[package]] -name = "socket2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +name = "transfer-to-named-purse" +version = "0.1.0" dependencies = [ - "libc", - "winapi 0.3.9", + "casper-contract", + "casper-types", ] [[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +name = "transfer-to-public-key" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "state-initializer" +name = "transfer-to-purse" version = "0.1.0" dependencies = [ "casper-contract", @@ -5262,155 +6810,139 @@ dependencies = [ ] [[package]] -name = "static_assertions" -version = "1.1.0" +name = "try-lock" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "trybuild" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c9bf9513a2f4aeef5fdac8677d7d349c79fdbcc03b9c86da6e9d254f1e43be2" +dependencies = [ + "glob 0.3.2", + "serde", + "serde_derive", + "serde_json", + "target-triple", + "termcolor", + "toml 0.8.22", +] [[package]] -name = "storage-costs" -version = "0.1.0" +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" dependencies = [ - "casper-contract", - "casper-types", + "byteorder", + "bytes", + "data-encoding", + "http 1.3.1", + "httparse", + "log", + "rand", + "sha1", + "thiserror 1.0.69", + "url", + "utf-8", ] [[package]] -name = "stream-cipher" -version = "0.7.1" +name = "twox-hash" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "block-cipher", - "generic-array 0.14.4", + "cfg-if 1.0.0", + "static_assertions", ] [[package]] -name = "strsim" -version = "0.8.0" +name = "typenum" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] -name = "structopt" -version = "0.3.21" +name = "uint" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" +checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" dependencies = [ - "clap", - "lazy_static", - "structopt-derive", + "byteorder", + "crunchy", + "hex", + "static_assertions", ] [[package]] -name = "structopt-derive" -version = "0.4.14" +name = "unarray" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" +name = "undelegate" +version = "0.1.0" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "subtle" -version = "2.4.0" +name = "unicase" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] -name = "syn" -version = "1.0.69" +name = "unicode-ident" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fe99c6bd8b1cc636890bcc071842de909d902c81ac7dab53ba33c421ab8ffb" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid 0.2.1", -] +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] -name = "synstructure" -version = "0.12.4" +name = "unicode-segmentation" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid 0.2.1", -] +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] -name = "syntex" -version = "0.42.2" +name = "unicode-width" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a30b08a6b383a22e5f6edc127d169670d48f905bb00ca79a00ea3e442ebe317" -dependencies = [ - "syntex_errors", - "syntex_syntax", -] +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] -name = "syntex_errors" -version = "0.42.0" +name = "unicode-width" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c48f32867b6114449155b2a82114b86d4b09e1bddb21c47ff104ab9172b646" -dependencies = [ - "libc", - "log 0.3.9", - "rustc-serialize", - "syntex_pos", - "term", - "unicode-xid 0.0.3", -] +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" [[package]] -name = "syntex_pos" -version = "0.42.0" +name = "untrusted" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fd49988e52451813c61fecbe9abb5cfd4e1b7bb6cdbb980a6fbcbab859171a6" -dependencies = [ - "rustc-serialize", -] +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] -name = "syntex_syntax" -version = "0.42.0" +name = "untrusted" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7628a0506e8f9666fdabb5f265d0059b059edac9a3f810bda077abb5d826bd8d" -dependencies = [ - "bitflags 0.5.0", - "libc", - "log 0.3.9", - "rustc-serialize", - "syntex_errors", - "syntex_pos", - "term", - "unicode-xid 0.0.3", -] +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] -name = "sys-info" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f3e7ba888a12ddcf0084e36ae4609b055845f38022d1946b67356fbc27d5795" +name = "update-associated-key" +version = "0.1.0" dependencies = [ - "cc", - "libc", + "casper-contract", + "casper-types", ] [[package]] -name = "system-contract-hashes" +name = "upgrade-threshold" version = "0.1.0" dependencies = [ "casper-contract", @@ -5418,7 +6950,7 @@ dependencies = [ ] [[package]] -name = "system-hashes" +name = "upgrade-threshold-upgrader" version = "0.1.0" dependencies = [ "casper-contract", @@ -5426,1126 +6958,1131 @@ dependencies = [ ] [[package]] -name = "tempfile" -version = "3.2.0" +name = "ureq" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" dependencies = [ - "cfg-if 1.0.0", - "libc", - "rand 0.8.3", - "redox_syscall", - "remove_dir_all", - "winapi 0.3.9", + "base64 0.22.1", + "flate2", + "log", + "once_cell", + "rustls", + "rustls-pki-types", + "url", + "webpki-roots 0.26.11", ] [[package]] -name = "term" -version = "0.4.6" +name = "url" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ - "kernel32-sys", - "winapi 0.2.8", + "form_urlencoded", + "idna", + "percent-encoding", ] [[package]] -name = "termcolor" -version = "1.1.2" +name = "utf-8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] -name = "test-payment-stored" -version = "0.1.0" -dependencies = [ - "casper-contract", - "casper-types", -] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] -name = "textwrap" -version = "0.11.0" +name = "utf8parse" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] -name = "thiserror" -version = "1.0.24" +name = "uuid" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "thiserror-impl", + "getrandom 0.2.16", + "serde", ] [[package]] -name = "thiserror-impl" -version = "1.0.24" +name = "uuid" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" [[package]] -name = "thread_local" -version = "1.1.3" +name = "valuable" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" -dependencies = [ - "once_cell", -] +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] -name = "time" -version = "0.1.43" +name = "value-bag" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] +checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" [[package]] -name = "tinytemplate" -version = "1.2.1" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" -dependencies = [ - "serde", - "serde_json", -] +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] -name = "tinyvec" -version = "1.2.0" +name = "vec_map" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" -dependencies = [ - "tinyvec_macros", -] +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] -name = "tinyvec_macros" +name = "verify-signature" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +dependencies = [ + "casper-contract", + "casper-types", +] [[package]] -name = "tokio" -version = "0.2.25" +name = "version-sync" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" +checksum = "835169da0173ea373ddf5987632aac1f918967fbbe58195e304342282efa6089" dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "memchr", - "mio 0.6.23", - "pin-project-lite 0.1.12", - "slab", + "proc-macro2", + "pulldown-cmark", + "regex", + "semver", + "syn 2.0.101", + "toml 0.7.8", + "url", ] [[package]] -name = "tokio" -version = "1.4.0" +name = "version_check" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "vm2-cargo-casper" +version = "0.1.0" dependencies = [ - "autocfg", - "bytes 1.0.1", - "libc", - "memchr", - "mio 0.7.11", - "num_cpus", - "pin-project-lite 0.2.6", - "tokio-macros", + "anyhow", + "atty", + "cargo_metadata 0.19.2", + "casper-contract-sdk", + "casper-contract-sdk-sys", + "clap 4.5.38", + "clap-cargo", + "crossterm", + "include_dir", + "libloading", + "once_cell", + "serde", + "serde_json", + "thiserror 2.0.12", + "wabt", ] [[package]] -name = "tokio-macros" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf7b11a536f46a809a8a9f0bb4237020f70ecbf115b842360afb127ea2fda57" +name = "vm2-cep18" +version = "0.1.0" dependencies = [ - "proc-macro2", - "quote", - "syn", + "casper-contract-sdk", + "casper-contract-sdk-codegen", + "serde_json", ] [[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +name = "vm2-cep18-caller" +version = "0.1.0" dependencies = [ - "native-tls", - "tokio 1.4.0", + "borsh", + "casper-contract-sdk", + "vm2-cep18", ] [[package]] -name = "tokio-openssl" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1bec5c0a4aa71e3459802c7a12e8912c2091ce2151004f9ce95cc5d1c6124e" +name = "vm2-flipper" +version = "0.1.0" dependencies = [ - "futures", - "openssl", - "pin-project 1.0.6", - "tokio 1.4.0", + "casper-contract-sdk", ] [[package]] -name = "tokio-serde" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911a61637386b789af998ee23f50aa30d5fd7edcec8d6d3dedae5e5815205466" +name = "vm2-harness" +version = "0.1.0" dependencies = [ - "bincode", - "bytes 1.0.1", - "educe", - "futures-core", - "futures-sink", - "pin-project 1.0.6", - "serde", + "casper-contract-macros", + "casper-contract-sdk", + "casper-executor-wasm-common", + "impls", + "serde_json", + "thiserror 2.0.12", ] [[package]] -name = "tokio-stream" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e177a5d8c3bf36de9ebe6d58537d8879e964332f93fb3339e43f618c81361af0" +name = "vm2-host" +version = "0.1.0" dependencies = [ - "futures-core", - "pin-project-lite 0.2.6", - "tokio 1.4.0", - "tokio-util 0.6.5", + "casper-contract-macros", + "casper-contract-sdk", ] [[package]] -name = "tokio-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" +name = "vm2-legacy-counter-proxy" +version = "0.1.0" dependencies = [ - "native-tls", - "tokio 0.2.25", + "casper-contract-macros", + "casper-contract-sdk", + "casper-contract-sdk-codegen", + "serde_json", ] [[package]] -name = "tokio-tungstenite" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1a5f475f1b9d077ea1017ecbc60890fda8e54942d680ca0b1d2b47cfa2d861b" +name = "vm2-trait" +version = "0.1.0" dependencies = [ - "futures-util", - "log 0.4.14", - "pin-project 1.0.6", - "tokio 1.4.0", - "tungstenite", + "base16", + "casper-contract-macros", + "casper-contract-sdk", + "serde_json", ] [[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +name = "vm2-upgradable" +version = "0.1.0" dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log 0.4.14", - "pin-project-lite 0.1.12", - "tokio 0.2.25", + "casper-contract-macros", + "casper-contract-sdk", ] [[package]] -name = "tokio-util" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" +name = "vm2-upgradable-v2" +version = "0.1.0" dependencies = [ - "bytes 1.0.1", - "futures-core", - "futures-sink", - "log 0.4.14", - "pin-project-lite 0.2.6", - "tokio 1.4.0", + "casper-contract-macros", + "casper-contract-sdk", ] [[package]] -name = "toml" -version = "0.5.8" +name = "wabt" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "00bef93d5e6c81a293bccf107cf43aa47239382f455ba14869d36695d8963b9c" dependencies = [ "serde", + "serde_derive", + "serde_json", + "wabt-sys", ] [[package]] -name = "tower" -version = "0.4.6" +name = "wabt-sys" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f715efe02c0862926eb463e49368d38ddb119383475686178e32e26d15d06a66" +checksum = "1a4e043159f63e16986e713e9b5e1c06043df4848565bf672e27c523864c7791" dependencies = [ - "futures-core", - "pin-project 1.0.6", - "tokio 1.4.0", - "tokio-util 0.6.5", - "tower-layer", - "tower-service", - "tracing", + "cc", + "cmake", + "glob 0.2.11", ] [[package]] -name = "tower-layer" -version = "0.3.1" +name = "wait-timeout" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] [[package]] -name = "tower-service" -version = "0.3.1" +name = "walkdir" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] [[package]] -name = "tracing" -version = "0.1.25" +name = "walrus" +version = "0.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +checksum = "2c03529cd0c4400a2449f640d2f27cd1b48c3065226d15e26d98e4429ab0adb7" dependencies = [ - "cfg-if 1.0.0", - "log 0.4.14", - "pin-project-lite 0.2.6", - "tracing-attributes", - "tracing-core", + "anyhow", + "gimli 0.26.2", + "id-arena", + "leb128", + "log", + "walrus-macro", + "wasm-encoder 0.29.0", + "wasmparser 0.80.2", ] [[package]] -name = "tracing-attributes" -version = "0.1.15" +name = "walrus-macro" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "0a6e5bd22c71e77d60140b0bd5be56155a37e5bd14e24f5f87298040d0cc40d7" dependencies = [ + "heck 0.3.3", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] -name = "tracing-core" -version = "0.1.17" +name = "want" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "lazy_static", + "try-lock", ] [[package]] -name = "tracing-futures" -version = "0.2.5" +name = "warp" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +checksum = "4378d202ff965b011c64817db11d5829506d3404edeadb61f190d111da3f231c" dependencies = [ - "pin-project 1.0.6", + "async-compression", + "bytes", + "futures-channel", + "futures-util", + "headers", + "http 0.2.12", + "hyper", + "log", + "mime", + "mime_guess", + "multer", + "percent-encoding", + "pin-project", + "scoped-tls", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-tungstenite", + "tokio-util 0.7.15", + "tower-service", "tracing", ] [[package]] -name = "tracing-log" -version = "0.1.2" +name = "wasi" +version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" -dependencies = [ - "lazy_static", - "log 0.4.14", - "tracing-core", -] +checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] -name = "tracing-serde" -version = "0.1.2" +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] -name = "tracing-subscriber" -version = "0.2.17" +name = "wasi" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705096c6f83bf68ea5d357a6aa01829ddbdac531b357b45abeca842938085baa" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ - "ansi_term 0.12.1", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", + "wit-bindgen-rt", ] [[package]] -name = "transfer-main-purse-to-new-purse" -version = "0.1.0" +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ - "casper-contract", - "casper-types", + "cfg-if 1.0.0", + "once_cell", + "rustversion", + "wasm-bindgen-macro", ] [[package]] -name = "transfer-main-purse-to-two-purses" -version = "0.1.0" +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ - "casper-contract", - "casper-types", + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.101", + "wasm-bindgen-shared", ] [[package]] -name = "transfer-purse-to-account" -version = "0.1.0" +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ - "casper-contract", - "casper-types", + "cfg-if 1.0.0", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", ] [[package]] -name = "transfer-purse-to-account-stored" -version = "0.1.0" +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ - "casper-contract", - "casper-types", - "transfer-purse-to-account", + "quote", + "wasm-bindgen-macro-support", ] [[package]] -name = "transfer-purse-to-account-with-id" -version = "0.1.0" +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ - "casper-contract", - "casper-types", + "proc-macro2", + "quote", + "syn 2.0.101", + "wasm-bindgen-backend", + "wasm-bindgen-shared", ] [[package]] -name = "transfer-purse-to-accounts" -version = "0.1.0" +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" dependencies = [ - "casper-contract", - "casper-types", + "unicode-ident", ] [[package]] -name = "transfer-purse-to-accounts-stored" -version = "0.1.0" +name = "wasm-encoder" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18c41dbd92eaebf3612a39be316540b8377c871cb9bde6b064af962984912881" dependencies = [ - "casper-contract", - "casper-types", - "transfer-purse-to-accounts", + "leb128", ] [[package]] -name = "transfer-purse-to-accounts-subcall" -version = "0.1.0" +name = "wasm-encoder" +version = "0.230.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4349d0943718e6e434b51b9639e876293093dca4b96384fb136ab5bd5ce6660" dependencies = [ - "casper-contract", - "casper-types", + "leb128fmt", + "wasmparser 0.230.0", ] [[package]] -name = "transfer-purse-to-purse" -version = "0.1.0" +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ - "casper-contract", - "casper-types", + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", ] [[package]] -name = "transfer-to-account" -version = "0.1.0" +name = "wasmer" +version = "5.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b104b9437e9100943fb01880cc210ebe250cc4aa2f7e121f068033a76d29cc4" dependencies = [ - "casper-contract", - "casper-types", + "bindgen", + "bytes", + "cfg-if 1.0.0", + "cmake", + "indexmap 2.9.0", + "js-sys", + "more-asserts", + "rustc-demangle", + "serde", + "serde-wasm-bindgen", + "shared-buffer", + "tar", + "target-lexicon", + "thiserror 1.0.69", + "tracing", + "ureq", + "wasm-bindgen", + "wasmer-compiler", + "wasmer-compiler-singlepass", + "wasmer-derive", + "wasmer-types", + "wasmer-vm", + "windows-sys 0.59.0", ] [[package]] -name = "transfer-to-account-stored" -version = "0.1.0" +name = "wasmer-compiler" +version = "5.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9dd5c640b9e6dcc64bcad987b3133e19f1c9919a8e0c732eb11a33f650bbf54" dependencies = [ - "casper-contract", - "casper-types", - "transfer-to-account", + "backtrace", + "bytes", + "cfg-if 1.0.0", + "enum-iterator 0.7.0", + "enumset", + "leb128", + "libc", + "memmap2 0.6.2", + "more-asserts", + "object 0.32.2", + "region", + "rkyv", + "self_cell", + "shared-buffer", + "smallvec", + "target-lexicon", + "thiserror 1.0.69", + "wasmer-types", + "wasmer-vm", + "wasmparser 0.216.1", + "windows-sys 0.59.0", + "xxhash-rust", ] [[package]] -name = "transfer-to-account-u512" -version = "0.1.0" +name = "wasmer-compiler-singlepass" +version = "5.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b95cad6ba04afeb3a339529e880c3290f8516bc6324c3082155a79f00129f5a1" dependencies = [ - "casper-contract", - "casper-types", + "byteorder", + "dynasm", + "dynasmrt", + "enumset", + "gimli 0.28.1", + "more-asserts", + "rayon", + "smallvec", + "wasmer-compiler", + "wasmer-types", ] [[package]] -name = "transfer-to-account-u512-stored" -version = "0.1.0" +name = "wasmer-derive" +version = "5.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b4c4970530327054e6effa876eadfd57079866c7429e31fde2568d6354ec61d" dependencies = [ - "casper-contract", - "casper-types", - "transfer-to-account-u512", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] -name = "transfer-to-existing-account" -version = "0.1.0" +name = "wasmer-middlewares" +version = "5.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "111eee5478867554d4496f89f472499fe90469f7473dbf90e466c1deb5505293" dependencies = [ - "casper-contract", - "casper-types", + "wasmer", + "wasmer-types", + "wasmer-vm", ] [[package]] -name = "transfer-to-purse" -version = "0.1.0" +name = "wasmer-types" +version = "5.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "554f389473d61915754b1873c5ef392a1a75b55c7d616e2a78f67c1af45785ae" dependencies = [ - "casper-contract", - "casper-types", + "bytecheck 0.6.12", + "enum-iterator 0.7.0", + "enumset", + "getrandom 0.2.16", + "hex", + "indexmap 2.9.0", + "more-asserts", + "rkyv", + "sha2", + "target-lexicon", + "thiserror 1.0.69", + "xxhash-rust", ] [[package]] -name = "treeline" -version = "0.1.0" +name = "wasmer-vm" +version = "5.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" +checksum = "20b3f40e1e18d6cd040d6d1ea32affbf2f64ff059eff3b85614bccb8ff95c59b" +dependencies = [ + "backtrace", + "cc", + "cfg-if 1.0.0", + "corosensei", + "crossbeam-queue", + "dashmap", + "enum-iterator 0.7.0", + "fnv", + "indexmap 2.9.0", + "libc", + "mach2", + "memoffset", + "more-asserts", + "region", + "scopeguard", + "thiserror 1.0.69", + "wasmer-types", + "windows-sys 0.59.0", +] [[package]] -name = "try-lock" -version = "0.2.3" +name = "wasmparser" +version = "0.80.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "449167e2832691a1bff24cde28d2804e90e09586a448c8e76984792c44334a6b" [[package]] -name = "tungstenite" -version = "0.12.0" +name = "wasmparser" +version = "0.216.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "1cc7c63191ae61c70befbe6045b9be65ef2082fa89421a386ae172cb1e08e92d" dependencies = [ - "base64", - "byteorder", - "bytes 1.0.1", - "http", - "httparse", - "input_buffer", - "log 0.4.14", - "rand 0.8.3", - "sha-1", - "url", - "utf-8", + "ahash", + "bitflags 2.9.1", + "hashbrown 0.14.5", + "indexmap 2.9.0", + "semver", ] [[package]] -name = "twoway" -version = "0.1.8" +name = "wasmparser" +version = "0.219.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +checksum = "5220ee4c6ffcc0cb9d7c47398052203bc902c8ef3985b0c8134118440c0b2921" dependencies = [ - "memchr", + "bitflags 2.9.1", + "indexmap 2.9.0", ] [[package]] -name = "typenum" -version = "1.13.0" +name = "wasmparser" +version = "0.230.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" +checksum = "808198a69b5a0535583370a51d459baa14261dfab04800c4864ee9e1a14346ed" +dependencies = [ + "bitflags 2.9.1", + "indexmap 2.9.0", + "semver", +] [[package]] -name = "ucd-trie" -version = "0.1.3" +name = "wasmprinter" +version = "0.219.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "c68c93bcc5e934985afd8b65214bdd77abd3863b2e1855eae1b07a11c4ef30a8" +dependencies = [ + "anyhow", + "termcolor", + "wasmparser 0.219.2", +] [[package]] -name = "uint" -version = "0.9.0" +name = "wast" +version = "230.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" +checksum = "b8edac03c5fa691551531533928443faf3dc61a44f814a235c7ec5d17b7b34f1" dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", + "bumpalo", + "leb128fmt", + "memchr", + "unicode-width 0.2.0", + "wasm-encoder 0.230.0", ] [[package]] -name = "undelegate" -version = "0.1.0" +name = "wat" +version = "1.230.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d77d62229e38db83eac32bacb5f61ebb952366ab0dae90cf2b3c07a65eea894" dependencies = [ - "casper-contract", - "casper-types", + "wast", ] [[package]] -name = "unicase" -version = "2.6.0" +name = "web-sys" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ - "version_check", + "js-sys", + "wasm-bindgen", ] [[package]] -name = "unicode-bidi" -version = "0.3.5" +name = "webpki-roots" +version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "matches", + "webpki-roots 1.0.0", ] [[package]] -name = "unicode-normalization" -version = "0.1.17" +name = "webpki-roots" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +checksum = "2853738d1cc4f2da3a225c18ec6c3721abb31961096e9dbf5ab35fa88b19cfdb" dependencies = [ - "tinyvec", + "rustls-pki-types", ] [[package]] -name = "unicode-segmentation" -version = "1.7.1" +name = "wee_alloc" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" +checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "memory_units", + "winapi", +] [[package]] -name = "unicode-width" -version = "0.1.8" +name = "wheelbuf" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" +checksum = "62945bc99a6a121cb2759c7bfa7b779ddf0e69b68bb35a9b23ab72276cfdcd3c" [[package]] -name = "unicode-xid" -version = "0.0.3" +name = "winapi" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] [[package]] -name = "unicode-xid" -version = "0.2.1" +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] -name = "universal-hash" -version = "0.4.0" +name = "winapi-util" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", + "windows-sys 0.59.0", ] [[package]] -name = "unsigned-varint" -version = "0.5.1" +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "unsigned-varint" -version = "0.7.0" +name = "windows-sys" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "asynchronous-codec", - "bytes 1.0.1", - "futures-io", - "futures-util", + "windows-targets 0.48.5", ] [[package]] -name = "untrusted" -version = "0.7.1" +name = "windows-sys" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] [[package]] -name = "url" -version = "2.2.1" +name = "windows-sys" +version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "form_urlencoded", - "idna", - "matches", - "percent-encoding", + "windows-targets 0.52.6", ] [[package]] -name = "utf-8" -version = "0.7.5" +name = "windows-targets" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] [[package]] -name = "uuid" -version = "0.8.2" +name = "windows-targets" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "getrandom 0.2.2", - "serde", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] -name = "value-bag" -version = "1.0.0-alpha.6" +name = "windows-targets" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" dependencies = [ - "ctor", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", ] [[package]] -name = "vcpkg" -version = "0.2.11" +name = "windows_aarch64_gnullvm" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] -name = "vec-arena" -version = "1.1.0" +name = "windows_aarch64_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34b2f665b594b07095e3ac3f718e13c2197143416fae4c5706cffb7b1af8d7f1" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] -name = "vec_map" -version = "0.8.2" +name = "windows_aarch64_gnullvm" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" [[package]] -name = "vergen" -version = "3.2.0" +name = "windows_aarch64_msvc" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7141e445af09c8919f1d5f8a20dae0b20c3b57a45dee0d5823c6ed5d237f15a" -dependencies = [ - "bitflags 1.2.1", - "chrono", - "rustc_version 0.3.3", -] +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] -name = "version-sync" -version = "0.9.2" +name = "windows_aarch64_msvc" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cb94ca10ca0cf44f5d926ac977f0cac2d13e9789aa4bbe9d9388de445e61028" -dependencies = [ - "proc-macro2", - "pulldown-cmark", - "regex", - "semver-parser 0.9.0", - "syn", - "toml", - "url", -] +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] -name = "version_check" -version = "0.9.3" +name = "windows_aarch64_msvc" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" [[package]] -name = "void" -version = "1.0.2" +name = "windows_i686_gnu" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] -name = "wait-timeout" -version = "0.2.0" +name = "windows_i686_gnu" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" -dependencies = [ - "libc", -] +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] -name = "waker-fn" -version = "1.1.0" +name = "windows_i686_gnu" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" [[package]] -name = "walkdir" -version = "2.3.2" +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi 0.3.9", - "winapi-util", -] +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] -name = "want" -version = "0.3.0" +name = "windows_i686_gnullvm" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log 0.4.14", - "try-lock", -] +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" [[package]] -name = "warp" -version = "0.3.1" +name = "windows_i686_msvc" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "332d47745e9a0c38636dbd454729b147d16bd1ed08ae67b3ab281c4506771054" -dependencies = [ - "bytes 1.0.1", - "futures", - "headers", - "http", - "hyper 0.14.5", - "log 0.4.14", - "mime", - "mime_guess", - "multipart", - "percent-encoding", - "pin-project 1.0.6", - "scoped-tls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio 1.4.0", - "tokio-stream", - "tokio-tungstenite", - "tokio-util 0.6.5", - "tower-service", - "tracing", -] +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] -name = "warp-json-rpc" -version = "0.3.0" +name = "windows_i686_msvc" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7267422020cd1544b9eb9d1a5e54128ccfd2e203a12b7f5eeec992d656c72fd8" -dependencies = [ - "anyhow", - "erased-serde", - "futures", - "http", - "hyper 0.14.5", - "lazycell", - "log 0.4.14", - "serde", - "serde_json", - "warp", -] +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" +name = "windows_i686_msvc" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" [[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +name = "windows_x86_64_gnu" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] -name = "wasm-bindgen" -version = "0.2.73" +name = "windows_x86_64_gnu" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" -dependencies = [ - "cfg-if 1.0.0", - "serde", - "serde_json", - "wasm-bindgen-macro", -] +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] -name = "wasm-bindgen-backend" -version = "0.2.73" +name = "windows_x86_64_gnu" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" -dependencies = [ - "bumpalo", - "lazy_static", - "log 0.4.14", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" [[package]] -name = "wasm-bindgen-futures" -version = "0.4.23" +name = "windows_x86_64_gnullvm" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] -name = "wasm-bindgen-macro" -version = "0.2.73" +name = "windows_x86_64_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.73" +name = "windows_x86_64_gnullvm" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" [[package]] -name = "wasm-bindgen-shared" -version = "0.2.73" +name = "windows_x86_64_msvc" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] -name = "wasm-timer" -version = "0.2.5" +name = "windows_x86_64_msvc" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" -dependencies = [ - "futures", - "js-sys", - "parking_lot", - "pin-utils", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] -name = "wasmi" -version = "0.8.0" +name = "windows_x86_64_msvc" +version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad7e265153e1010a73e595eef3e2fd2a1fd644ba4e2dd3af4dd6bd7ec692342" -dependencies = [ - "downcast-rs", - "libc", - "memory_units 0.3.0", - "num-rational 0.2.4", - "num-traits", - "parity-wasm", - "wasmi-validation", -] +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] -name = "wasmi-validation" -version = "0.3.0" +name = "winnow" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ - "parity-wasm", + "memchr", ] [[package]] -name = "web-sys" -version = "0.3.50" +name = "winnow" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" +checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" dependencies = [ - "js-sys", - "wasm-bindgen", + "memchr", ] [[package]] -name = "wee_alloc" -version = "0.4.5" +name = "winreg" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 0.1.10", - "libc", - "memory_units 0.4.0", - "winapi 0.3.9", + "cfg-if 1.0.0", + "windows-sys 0.48.0", ] [[package]] -name = "wepoll-sys" -version = "3.0.1" +name = "wit-bindgen-rt" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "cc", + "bitflags 2.9.1", ] [[package]] -name = "wheelbuf" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62945bc99a6a121cb2759c7bfa7b779ddf0e69b68bb35a9b23ab72276cfdcd3c" - -[[package]] -name = "which" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55551e42cbdf2ce2bedd2203d0cc08dba002c27510f86dab6d0ce304cba3dfe" +name = "withdraw-bid" +version = "0.1.0" dependencies = [ - "either", - "libc", + "casper-contract", + "casper-types", ] [[package]] -name = "winapi" -version = "0.2.8" +name = "writeable" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] -name = "winapi" -version = "0.3.9" +name = "xattr" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", + "libc", + "rustix", ] [[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" +name = "xxhash-rust" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3" [[package]] -name = "winapi-util" -version = "0.1.5" +name = "yoke" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ - "winapi 0.3.9", + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", ] [[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" +name = "yoke-derive" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", +] [[package]] -name = "winreg" -version = "0.7.0" +name = "zerocopy" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ - "winapi 0.3.9", + "zerocopy-derive", ] [[package]] -name = "withdraw-bid" -version = "0.1.0" +name = "zerocopy-derive" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ - "casper-contract", - "casper-types", + "proc-macro2", + "quote", + "syn 2.0.101", ] [[package]] -name = "ws2_32-sys" -version = "0.2.1" +name = "zerofrom" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ - "winapi 0.2.8", - "winapi-build", + "zerofrom-derive", ] [[package]] -name = "wyz" -version = "0.2.0" +name = "zerofrom-derive" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.101", + "synstructure", +] [[package]] -name = "x25519-dalek" -version = "1.1.0" +name = "zeroize" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" -dependencies = [ - "curve25519-dalek", - "rand_core 0.5.1", - "zeroize", -] +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] -name = "yamux" -version = "0.8.1" +name = "zerotrie" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" dependencies = [ - "futures", - "log 0.4.14", - "nohash-hasher", - "parking_lot", - "rand 0.7.3", - "static_assertions", + "displaydoc", + "yoke", + "zerofrom", ] [[package]] -name = "zeroize" -version = "1.2.0" +name = "zerovec" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ - "zeroize_derive", + "yoke", + "zerofrom", + "zerovec-derive", ] [[package]] -name = "zeroize_derive" -version = "1.0.1" +name = "zerovec-derive" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.101", ] diff --git a/Cargo.toml b/Cargo.toml index 000abe3df0..cd76c5989d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,31 +1,57 @@ [workspace] +# highway-rewards-analysis and highway-state-grapher are temporarily disabled becasue +# they use old rewards calculation logic and need to be updated. + members = [ "ci/casper_updater", - "client", "execution_engine", - "execution_engine_testing/cargo_casper", "execution_engine_testing/test_support", "execution_engine_testing/tests", "node", "smart_contracts/contract", "smart_contracts/contracts/[!.]*/*", + "storage", "types", + "utils/global-state-update-gen", + "utils/validation", + "binary_port", + "smart_contracts/sdk", + "smart_contracts/sdk_codegen", + "smart_contracts/sdk_sys", + "smart_contracts/macros", + "vm2_cargo_casper", + # "utils/highway-rewards-analysis", + # "utils/highway-state-grapher", + "executor/wasm_common", + "executor/wasm_interface", + "executor/wasm_host", + "executor/wasmer_backend", + "executor/wasm", ] default-members = [ "ci/casper_updater", - "client", "execution_engine", - "execution_engine_testing/cargo_casper", "execution_engine_testing/test_support", "execution_engine_testing/tests", "node", - "smart_contracts/contract", + "storage", "types", + "utils/global-state-update-gen", + "utils/validation", + "binary_port", + "smart_contracts/sdk", + "smart_contracts/sdk_sys", + "smart_contracts/sdk_codegen", + "smart_contracts/macros", + # "utils/highway-rewards-analysis", + # "utils/highway-state-grapher", ] -exclude = ["casper-node-macros"] +exclude = ["utils/nctl/remotes/casper-client-rs"] + +resolver = "2" # Include debug symbols in the release build of `casper-engine-tests` so that `simple-transfer` will yield useful # perf data. @@ -33,9 +59,13 @@ exclude = ["casper-node-macros"] debug = true [profile.release] -# TODO: nightly compiler has issues with linking libraries with LTO enabled. -# Change this back to true once stable is supported by default. -lto = false +codegen-units = 1 +lto = true [profile.bench] -lto = true \ No newline at end of file +codegen-units = 1 +lto = true + +[workspace.dependencies] +num-derive = "0.4.2" +num-traits = "0.2.19" diff --git a/LICENSE b/LICENSE index 702b546747..7b192489f3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,433 +1,201 @@ -CasperLabs Open Source License (COSL) - -Version 1.0 - February 22, 2019 - -https://github.com/CasperLabs/CasperLabs/blob/master/LICENSE - -TERMS AND CONDITIONS FOR USE, REPRODUCTION AND DISTRIBUTION - -0. Preamble - - This license, the CasperLabs Open Source License v1.0 (COSLv1.0), is an - open source license that modifies the Apache 2.0 License ("Apache 2.0"; see - https://www.apache.org/licenses/LICENSE-2.0) to be free (libre) open source - software (FLOSS) compliant and to extend to the decentralized application - ecosystem. FLOSS philosophy is incorporated in this COSLv1.0 not only by - adding copyleft provisions, but also by extending the concept of governance - by community as under a license by defining user classes and a license - directed process to identify community compliant versus proprietary - behavior. This COSLv1.0 also explicitly enumerates participants in a - decentralized application ecosystem and activities that harm the community. - -1. Definitions - - This COSLv1.0 incorporates by reference, the definitions within section 1, - "Definitions," of the Apache 2.0 License. - -2. Bifurcation of Subject Matter - - This COSLv1.0 applies to a subject matter code base. The subject matter - code base is composed of both object and source code embodiments ("Novel - Code") and legacy code subject to the Apache 2.0 License ("Legacy Code") as - defined in this section 2. - - Fork Demarcation: The subject matter code of this COSLv1.0 may be - demarcated by a code share fork made on Midnight GMT, December 2, 2018 - ("Fork Demarcation Time"). - - Subject Matter Code Base: The subject matter of this COSLv1.0 is composed - of all files in the GitHub share initially committed to - https://github.com/casperlabs or copies made on or after the Fork - Demarcation Time ("Subject Matter Code"). (See initial commit at: - https://github.com/CasperLabs/CasperLabs/commit/9c83ec3ecaa955dacfa3e42054d05098fc32bfc7). - - Novel Code Base: The scope of Novel Code is all additions or modifications - to the Subject Matter Code made on the Fork Demarcation Time or afterwards. - - Legacy Code Base: The scope of the Legacy Code is all code included in the - Subject Matter Code share added or created before the Fork Demarcation - Time. - -3. Express Invocation of Sections 4 and 9 of the Apache 2.0 License - - In some cases, the Novel Code may be used, reproduced and/or distributed - without the Legacy Code. However, where the Legacy Code is used, - reproduced and/or distributed with the Novel Code, this COSLv1.0, which - applies to the Novel Code, is not to be construed to relicense, modify, or - otherwise disturb the Apache 2.0 License of the Legacy Code. This license, - the COSLv1.0, is an extension of the Apache 2.0 License under its section 4 - which provides for "additional or different license terms and conditions - for use, reproduction, or distribution." Furthermore, per the Apache 2.0 - License under its section 9, a distributor of the Subject Matter Code may - solely accept "warrantee or additional liability" of the distributed - Subject Matter Code. - -4. Express Disclaimer of Contributions Under the Apache 2.0 License - - Notwithstanding the terms of the Apache License 2.0 regarding - contributions, unless explicitly stated otherwise, all additions or - modifications to the Subject Matter Code are contributions under this - COSLv1.0. In the case of bug fix code changes or integration specific code - changes to the Legacy Code that are propagated to the pre-Fork Demarcation - date open source project share, those code changes are to be subject to the - Apache 2.0 License. - -5. Contributions are Under COSLv1.0 and are Subject to Relicense - - All contributions to Novel Code will be under this COSLv1.0 and are subject - to the contributor executing a relicensing consent ("Relicensing Consent") - which provides that contributor's agreement to allow their contributions to - be subject to updates to this COSLv1.0 or relicensing in the future, - provided that the updated COSLv1.0 or new license is an open source license - and is consistent with the present COSLv1.0 license including the copyleft - provisions in section 9. - -6. Decentralized Ecosystem Parties - - The COSLv1.0 is a license intended to benefit the open source community in - the context of the distributed and decentralized platform and decentralized - applications ("Decentralized Applications" or "DApps") based on a - distributed ledger built from the Subject Matter Code. Licensees of the - Subject Matter Code under this COSLv1.0 include the following parties: - - Node Operators: A node operator ("Node Operator") is a party that - contributes compute processing for a decentralized platform with a - computing node where at least a portion of a distributed ledger resides on - the node. Node Operators include distributed ledger validators including - miners for Proof of Work decentralized platforms and validators for Proof - of Stake decentralized platforms. - - Decentralized Application Developers: A decentralized application developer - ("Decentralized Application Developer" or "DApp Developer") is a party that - creates or modifies a DApp. Where a DApp Developer is developing a DApp in - the scope of work for a company or is otherwise contractually bound to - develop a DApp for a company, that company is also a DApp Developer. - - Decentralized Application Distributor: A decentralized application - distributor ("Decentralized Application Distributor" or "DApp Distributor") - is a party that distributes a DApp either freely or for compensation. - - Decentralized Application Users: A decentralized application user - ("Decentralized Application User" or "DApp User") is a party who is an end - user of a DApp. Where an end user uses a DApp in the scope of work for a - company or is otherwise contractually bound to a company to use a DApp, the - company is also a DApp User. - - Decentralized Platform Developers: A decentralized platform developer - ("Decentralized Platform Developer") is a party that contributes code to - software that enables the decentralized maintenance of a distributed - ledger, development tools to develop DApps on that distributed ledger, or - tools to administer a distributed ledger or applications built on the - distributed ledger. - - Token Issuers: A token issuer ("Token Issuer") is a party, including a - party other than the COSLv1.0 Licensor, that releases and manages quanta of - value as managed by the distributed ledger ("tokens") to be used to - transfer value between parties. Tokens may be used to measure work effort, - such as amount of compute power estimated to be used to validate - transactions on a distributed ledger. Where the tokens are hashed by the - distributed ledger and are in a format suitable for use as a currency, - those tokens may be referred to as a "cryptocurrency." - - Token Holders: A token holder ("Token Holder") is a party the retains - tokens either for holding, or for use as currency or in distributed - ledger-based transactions. - - Decentralized Network Users: A decentralized network user ("Decentralized - Network User") is a general term for any party that avails itself to a - distributed ledger, or any application, tool, or token using the - distributed ledger, or any application, tool, or token that administers, - maintains, or develops any application, tool, or token using the - distributed ledger. - - All the above parties defined in this section 6(a)-(h), in their context as - users of the Subject Matter Code and Licensees under this COSLv1.0 and any - other parties who access the Subject Matter Code are collectively known as - "Decentralized Ecosystem Parties". - -7. User Classes - - The COSLv1.0 Licensor and the Decentralized Ecosystem Parties as set forth - in section 6 of this COSLv1.0 solely comprise the members of a community - ("Community") around the Subject Matter Code. The COSLv1.0 Licensor is the - granting party of rights to the Subject Matter Code and the Decentralized - Ecosystem Parties are the receiving party of rights to the Subject Matter - Code. The Decentralized Ecosystem Parties all belong to one and only one - of the following two user classes ("User Classes"): - - Proprietary User Class: The Proprietary User Class ("Proprietary User - Class") is a Decentralized Ecosystem Party that performs, or permits to - occur, any of the following acts or uses: - - Co-opting the Development of the Decentralized Platform from the Community: - This form of use occurs when only one party, or a set of parties either via - explicit contract or via participation, substantially interferes, limits or - prevents other parties from materially participating in the decentralized - platform, in roles defined by a protocol of the decentralized platform, - and/or in roles that are required for the use and support of a protocol of - the decentralized platform. Examples of defined and/or required roles - include end users and their client software to participate in a centralized - platform, node operators both for validation and for forming consensus, - decentralized application developers, decentralized platform developers, - and other parties set forth in section 6 of this COSLv1.0. - - Such co-opting may include closed forks where the Subject Matter Code is - forked either in full or in part, and where the other parties are - substantively limited from materially participating in the development or - use of the decentralized platform, including in roles set forth in section - 7(a)(i) above. This may also include forks where a party is to create a - cryptocurrency, native token or other quanta of value from operations of - the forked decentralized platform without remuneration to the Community as - set forth in section 11 of this COSLv1.0. - - Co-opting the Operation or Use of the Decentralized Platform: This form of - use occurs when a party, or a set of parties either via explicit contract - or via participation, substantially interferes, limits or prevents other - parties from materially operating nodes, using the decentralized platform, - or deploying and using DApps. - - Interfering with the Economic Operation of the Decentralized Platform: This - form of use occurs when a party or parties seek to operate nodes, develop - DApps, or run DApps to drive or to result in changes in the price of an - underlying cryptocurrency of a distributed ledger, resulting in - interference of the economic operation of the distributed ledger. - - Open Source User Class: The Open Source User Class ("Open Source User - Class") is any Decentralized Ecosystem Party that is not in a Proprietary - User Class. - -8. Governance and Conversion - - Governance under the COSLv1.0 is performed by COSLv1.0 Licensor in close - cooperation with the Community as set forth in this COSLv1.0. The - Community includes any Decentralized Ecosystem Party as set forth in - section 6 of this COSLv1.0. Specifically, the COSLv1.0 Licensor will - designate a public, transparent online forum ("Community Forum") associated - with the Subject Matter Code and will designate one or more moderators - ("Community Forum Moderators") to monitor and moderate the Community Forum. - The Community Forum to receives requests and notifications from the - Community, including proposed feature additions, proposed code changes, - additions or deletions, and Community member status and user class. The - Community Forum may host online discussions about topics broadly related to - the Subject Matter Code. While acceptance or rejection of requests and - notifications are at the sole discretion of the COSLv1.0 Licensor, - acceptance will not be unreasonably withheld. Moderation policy of the - Community Forum is at the sole discretion of the COSLv1.0 Licensor. - - Rights to the Subject Matter Code, including intellectual property rights - under this COSLv1.0 are subject to membership of the Open Source User - Class. Where a party of the Open Source User Class performs an act or use - as set forth in section 7(a) of this COSLv1.0, that party converts to the - Proprietary User Class ("Conversion"). Where a party contemplates - performing an act that might be construed as proprietary, that party may - demonstrate good faith by providing a sixty (60) calendar day advance - notice to the COSLv1.0 Licensor and the Community, by sending a notice via - the Community Forum describing the proposed activity, how that activity - benefits the Community, and how that activity may not benefit the - Community. If the COSLv1.0 Licensor accepts the activity in consultation - with the Community, that activity will not trigger Conversion. - - Where a party is a member of the Proprietary User Class, that party may - seek to be re-characterized as a member of the Open Source User Class by - providing a notice via the Community Forum to the COSLv1.0 Licensor and the - Community. The notice is to specify the activities that party is ceasing - and/or activities to remediate prior acts that triggered Conversion. If - the COSLv1.0 Licensor in consultation with the Community accepts the - recharacterization, then that party will become a member of the Open Source - User Class. - - The COSLv1.0 Licensor may opt to extend consideration of a - recharacterization by increments of sixty (60) calendar days at a - time. Neither the failure nor delay on the part of the COSLv1.0 Licensor to - respond to or act upon any requests and notifications from the member - parties of the Community under this section 8 shall operate as a waiver - thereof unless made in writing and signed by the COSLv1.0 Licensor. - -9. Intellectual Property Rights - - Subject to the terms and conditions of this COSLv1.0, each contributor - grants to all parties who are in the Open Source User Class a copyright - license under the terms of section 2 of the Apache 2.0 License. Absent a - separate agreement to the contrary, members of the Proprietary User Class, - do not have a copyright license. Where a member of the Proprietary User - Class converted from the Open Source User Class, that member retains no - prior copyright license. Where a party performs an act that triggers a - Conversion of the party to the Proprietary User Class, the copyright - license is revoked. A party whose copyright license is revoked exposes - that party to damages and other remedies for copyright infringement - including statutory damages and injunctive relief. For purposes of - injunctive relieve, the Licensee stipulates the availability of equitable - damages. - - Subject to the terms and conditions of this COSLv1.0, each contributor - grants to all parties who are in the Open Source User Class a patent - license under the terms of section 2 of the Apache 2.0 License. Absent a - separate agreement to the contrary, members of the Proprietary User Class, - do not have a patent license. Where a member of the Proprietary User Class - converted from the Open Source User Class, that member retains no prior - patent license. Acceptance of this COSLv1.0 includes a stipulation of - knowledge that some portions of the Subject Matter code are subject to - patent coverage. Where a party performs an act that triggers a Conversion - of the party to the Proprietary User Class, the patent license is revoked. - A party whose patent license is revoked exposes that party to damages and - other remedies for patent infringement including enhanced damages and - injunctive relief. For purposes of injunctive relieve, the Licensee - stipulates the availability of equitable damages. - - Subject to the terms and conditions of this COSLv1.0, the COSLv1.0 Licensor - grants to all parties who are in the Open Source User Class a perpetual, - irrevocable (except as provided in this COSLv1.0), non-transferrable, - sublicensable, royalty-free, fully paid, worldwide and non-exclusive right - and licensee to use any marks to name distributions of the Subject Matter - Code (under any operative nominative fair use law). Absent a separate - agreement to the contrary, members of the Proprietary User Class, do not - have trademark rights or trademark license. Where a member of the - Proprietary User Class converted from the Open Source User Class, that - member retains no prior trademark right or trademark license. No other - mark license is provided, including for marks relating to company name, - trade name, or other marks COSLv1.0 Licensor reserves the right to police - the quality and use of the licensed marks and to revoke license of those - marks at its sole discretion where those distributions of Subject Matter - Code are not compliant with this COSLv1.0 or FLOSS principles. License of - those marks shall not be unreasonably withheld. A party whose trademark - license is revoked exposes that party to damages and other remedies for - trademark infringement, unfair trade practices and injunctive relief. For - purposes of injunctive relieve, the Licensee stipulates the availability of - equitable damages. - -10. Copyleft - - Any recipient of any copy, fork, distribution or conveyance of the Novel - Code, in full or in part, receives a license from the original licensors to - run, modify and propagate the Novel Code, modified or unaltered, subject to - this COSLv1.0. Any copy, fork, distribution or conveyance is to be subject - to the COSLv1.0. Additional conditions, including payment and/or - compensation terms, representations and warrantees, may be added beyond - COSLv1.0 license terms provided they are consistent with COSLv1.0 and free - (libre) and open source principles, and do not prejudice the COSLv1.0 - Licensor's use of Novel code. Contributors to the Novel Code are to be - fully indemnified by any party creating additional conditions. - -11. Forks - - Community friendly copies of all or part of the Subject Matter Code for - separate development from the original Subject Matter Code tree ("Forks") - are supported by this COSLv1.0, under the following conditions: - - Propagation of COSLv1.0 Licensing Principles: Code in a Fork is to be - subject to and controlled by this COSLv1.0 or under a FLOSS compliant - relicensing under the terms of this COSLv1.0. Failure to do so constitutes - an act co-opting the development of the decentralized platform from the - Community, thereby triggering a Conversion of the forking party to a - Proprietary User Class subject to penalties set forth in this COSLv1.0 - including but limited to section 9. - - For-Profit Fork Airdrop: Where code in a Fork is to be used to deploy a new - independent blockchain with a cryptocurrency, native token or other native - quantum of value that party is to notify the COSLv1.0 Licensor with an - offer to compensate development efforts of the Community with an Airdrop of - cryptocurrency supported by a distributed ledger deployment from the Fork. - The COSLv1.0 Licensor in consultation with the Community, has sixty (60) - calendar days to accept or reject the offer. - - Backpropagation of Changes: The COSLv1.0 Licensor in consultation with the - Community may select features and code from the Fork code for addition to - the Subject Matter Code at any time without penalty, fee, or other - remuneration. - -12. Attribution and Marking - - Reproduction and distribution of the Subject Matter Code, in full or in - part, original or modified, ("Subject Matter Code Distribution") is subject - to the following conditions: - - Licensee shall provide all recipients of the Subject Matter Code - Distribution, a copy of this COSLv1.0; - - Licensee shall cause any files modified from the Subject Matter Code - distributed in the Subject Matter Code Distribution to carry prominent - notices stating that Licensee has changed the files; - - Licensee shall retain, in the Source form of any Subject Matter Code - Distribution that the Licensee distributes, all pertinent copyright, - patent, trademark, and attribution notices from the Source form Subject - Matter Code; and - - If the Subject Matter Code includes a "NOTICE" text file as part of its - distribution, then any Subject Matter Code Distribution, that the Licensee - distributes must include a readable copy of the pertinent portions of the - attribution notices contained within such NOTICE file, in at least one of - the following places: - - within a NOTICE text file in the distribution; within the Source form or - documentation, if provided along with the Subject Matter Code Distribution; - - within a display generated by the Subject Matter Code Distribution, if and - wherever such third-party notices normally appear. - - The contents of the NOTICE file are for informational purposes only and do - not modify the CasperLabs Open Source License, Version 1.0 (COSLv1.0). - Licensee of the Subject Matter Code may add their own attribution notices - within the Subject Matter Code Distribution, alongside or as an addendum to - the NOTICE text from the Subject Matter Code, provided that such additional - attribution notices cannot be construed as modifying the COSLv1.0. - - Copyright under the COSLv1.0 is to be as follows: - - Copyright [yyyy] [name of copyright owner (Subject Matter Code)] - - Licensed under the CasperLabs Open Source License, Version 1.0 (COSLv1.0); - you may not use this file except in compliance with this COSLv1.0. You may - obtain a copy of this COSLv1.0 at - - https://github.com/CasperLabs/CasperLabs/blob/master/LICENSE - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - COSLv1.0 for the specific language governing permissions and limitations - under the COSLv1.0. - - Where Legacy Code is to be distributed, copyright under the Apache 2.0 - License is to be as follows: - - Copyright [yyyy] [name of copyright owner (Legacy Code)] - - Licensed under the Apache License, Version 2.0 ("License"); you may not use - this file except in compliance with the License. You may obtain a copy of - the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -13. Disclaimer of Warranty - - Unless required by applicable law or agreed to in writing, COSLv1.0 - Licensor provides the Subject Matter Code and materials provided with the - Subject Matter Code (and each Contributor provides its Contributions) on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - or implied, including, without limitation, any warranties or conditions of - TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR - PURPOSE. You are solely responsible for determining the appropriateness of - using or redistributing the same and assume any risks associated with your - exercise of permissions under this COSLv1.0. - -14. Miscellaneous - - The following miscellaneous terms apply to this COSLv1.0: - - Clickwrap: The terms of this COSLv1.0 may be accepted via clickwrap or - other analogous means. - - Choice of Law: For purposes of conflicts of law, for contract - interpretation of this COSLv1.0, New York state law is to be used, and for - intellectual property law, of this COSLv1.0, United States Federal law is - to be used. - - Venue: State venue is to be New York County, New York. Federal venue is to - be U.S. District Court, Southern District of New York. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 Casper Association + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile index 9e760d4aa2..42a7bb3aed 100644 --- a/Makefile +++ b/Makefile @@ -1,174 +1,168 @@ # This supports environments where $HOME/.cargo/env has not been sourced (CI, CLion Makefile runner) CARGO = $(or $(shell which cargo), $(HOME)/.cargo/bin/cargo) RUSTUP = $(or $(shell which rustup), $(HOME)/.cargo/bin/rustup) -NPM = $(or $(shell which npm), /usr/bin/npm) -RUST_TOOLCHAIN := $(shell cat rust-toolchain) +PINNED_NIGHTLY := $(shell cat smart_contracts/rust-toolchain) +PINNED_STABLE := $(shell sed -nr 's/channel *= *\"(.*)\"/\1/p' rust-toolchain.toml) +WASM_STRIP_VERSION := $(shell wasm-strip --version) CARGO_OPTS := --locked -CARGO := $(CARGO) $(CARGO_TOOLCHAIN) $(CARGO_OPTS) +CARGO_PINNED_NIGHTLY := $(CARGO) +$(PINNED_NIGHTLY) $(CARGO_OPTS) +CARGO := $(CARGO) $(CARGO_OPTS) DISABLE_LOGGING = RUST_LOG=MatchesNothing # Rust Contracts -# Directory names should match crate names -BENCH = $(shell find ./smart_contracts/contracts/bench -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) -CLIENT = $(shell find ./smart_contracts/contracts/client -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) -EXPLORER = $(shell find ./smart_contracts/contracts/explorer -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) -PROFILING = $(shell find ./smart_contracts/contracts/profiling -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) -SRE = $(shell find ./smart_contracts/contracts/SRE -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) -TEST = $(shell find ./smart_contracts/contracts/test -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) - -BENCH_CONTRACTS := $(patsubst %, build-contract-rs/%, $(BENCH)) -CLIENT_CONTRACTS := $(patsubst %, build-contract-rs/%, $(CLIENT)) -EXPLORER_CONTRACTS := $(patsubst %, build-contract-rs/%, $(EXPLORER)) -PROFILING_CONTRACTS := $(patsubst %, build-contract-rs/%, $(PROFILING)) -SRE_CONTRACTS := $(patsubst %, build-contract-rs/%, $(SRE)) -TEST_CONTRACTS := $(patsubst %, build-contract-rs/%, $(TEST)) - -# AssemblyScript Contracts -CLIENT_CONTRACTS_AS = $(shell find ./smart_contracts/contracts_as/client -mindepth 1 -maxdepth 1 -type d) -TEST_CONTRACTS_AS = $(shell find ./smart_contracts/contracts_as/test -mindepth 1 -maxdepth 1 -type d) - -CLIENT_CONTRACTS_AS := $(patsubst %, build-contract-as/%, $(CLIENT_CONTRACTS_AS)) -TEST_CONTRACTS_AS := $(patsubst %, build-contract-as/%, $(TEST_CONTRACTS_AS)) +VM2_CONTRACTS = $(shell find ./smart_contracts/contracts/vm2 -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) +ALL_CONTRACTS = $(shell find ./smart_contracts/contracts/[!.]* -mindepth 1 -maxdepth 1 -not -path "./smart_contracts/contracts/vm2*" -type d -exec basename {} \;) +CLIENT_CONTRACTS = $(shell find ./smart_contracts/contracts/client -mindepth 1 -maxdepth 1 -type d -exec basename {} \;) +CARGO_HOME_REMAP = $(if $(CARGO_HOME),$(CARGO_HOME),$(HOME)/.cargo) +RUSTC_FLAGS = "--remap-path-prefix=$(CARGO_HOME_REMAP)=/home/cargo --remap-path-prefix=$$PWD=/dir" CONTRACT_TARGET_DIR = target/wasm32-unknown-unknown/release -CONTRACT_TARGET_DIR_AS = target_as -CRATES_WITH_DOCS_RS_MANIFEST_TABLE = \ - execution_engine_testing/test_support \ - node \ - smart_contracts/contract \ - types +build-contract-rs/%: + cd smart_contracts/contracts && RUSTFLAGS=$(RUSTC_FLAGS) $(CARGO) build --verbose --release $(filter-out --release, $(CARGO_FLAGS)) --package $* -CRATES_WITH_DOCS_RS_MANIFEST_TABLE := $(patsubst %, doc-stable/%, $(CRATES_WITH_DOCS_RS_MANIFEST_TABLE)) +build-vm2-contract-rs/%: + RUSTFLAGS=$(RUSTC_FLAGS) $(CARGO) run -p cargo-casper --bin cargo-casper -- build-schema --package $* + cd smart_contracts/contracts/vm2 && RUSTFLAGS=$(RUSTC_FLAGS) $(CARGO) build --verbose --release $(filter-out --release, $(CARGO_FLAGS)) --package $* -.PHONY: all -all: build build-contracts +.PHONY: build-vm2-contracts-rs +build-vm2-contracts-rs: $(patsubst %, build-vm2-contract-rs/%, $(VM2_CONTRACTS)) -.PHONY: build -build: - $(CARGO) build $(CARGO_FLAGS) +.PHONY: build-all-contracts-rs +build-all-contracts-rs: $(patsubst %, build-contract-rs/%, $(ALL_CONTRACTS)) -build-contract-rs/%: - $(CARGO) build \ - --release $(filter-out --release, $(CARGO_FLAGS)) \ - --package $* \ - --target wasm32-unknown-unknown - wasm-strip target/wasm32-unknown-unknown/release/$(subst -,_,$*).wasm 2>/dev/null | true - -build-contracts-rs: \ - $(BENCH_CONTRACTS) \ - $(CLIENT_CONTRACTS) \ - $(EXPLORER_CONTRACTS) \ - $(PROFILING_CONTRACTS) \ - $(SRE_CONTRACTS) \ - $(TEST_CONTRACTS) +.PHONY: build-client-contracts-rs +build-client-contracts-rs: $(patsubst %, build-contract-rs/%, $(CLIENT_CONTRACTS)) -.PHONY: build-client-contracts -build-client-contracts: $(CLIENT_CONTRACTS) +strip-contract/%: + wasm-strip $(CONTRACT_TARGET_DIR)/$(subst -,_,$*).wasm 2>/dev/null | true + +.PHONY: strip-all-contracts +strip-all-contracts: $(info Using 'wasm-strip' version $(WASM_STRIP_VERSION)) $(patsubst %, strip-contract/%, $(ALL_CONTRACTS)) -build-contract-as/%: - cd $* && $(NPM) run asbuild +.PHONY: strip-client-contracts +strip-client-contracts: $(patsubst %, strip-contract/%, $(CLIENT_CONTRACTS)) -.PHONY: build-contracts-as -build-contracts-as: \ - $(CLIENT_CONTRACTS_AS) \ - $(TEST_CONTRACTS_AS) \ - $(EXAMPLE_CONTRACTS_AS) +.PHONY: build-contracts-rs +build-contracts-rs: build-all-contracts-rs strip-all-contracts + +.PHONY: build-client-contracts +build-client-contracts: build-client-contracts-rs strip-client-contracts .PHONY: build-contracts -build-contracts: build-contracts-rs build-contracts-as +build-contracts: build-contracts-rs resources/local/chainspec.toml: generate-chainspec.sh resources/local/chainspec.toml.in @./$< .PHONY: test-rs -test-rs: resources/local/chainspec.toml - $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) --workspace - $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) --features=std --manifest-path=types/Cargo.toml - $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) --features=std --manifest-path=smart_contracts/contract/Cargo.toml +test-rs: resources/local/chainspec.toml build-contracts-rs + $(LEGACY) $(DISABLE_LOGGING) $(CARGO) test --all-features --no-fail-fast $(CARGO_FLAGS) -- --nocapture -.PHONY: test-as -test-as: setup-as - cd smart_contracts/contract_as && npm run asbuild && npm run test +.PHONY: resources/local/chainspec.toml +test-rs-no-default-features: + cd smart_contracts/contract && $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) --no-default-features --features=version-sync .PHONY: test -test: test-rs test-as +test: test-rs-no-default-features test-rs .PHONY: test-contracts-rs test-contracts-rs: build-contracts-rs - $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) -p casper-engine-tests -- --ignored + $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) -p casper-engine-tests -- --ignored --skip repeated_ffi_call_should_gas_out_quickly -.PHONY: test-contracts-as -test-contracts-as: build-contracts-rs build-contracts-as - @# see https://github.com/rust-lang/cargo/issues/5015#issuecomment-515544290 - $(DISABLE_LOGGING) $(CARGO) test $(CARGO_FLAGS) --manifest-path "execution_engine_testing/tests/Cargo.toml" --features "use-as-wasm" -- --ignored +.PHONY: test-contracts-timings +test-contracts-timings: build-contracts-rs + $(DISABLE_LOGGING) $(CARGO) test --release $(filter-out --release, $(CARGO_FLAGS)) -p casper-engine-tests -- --ignored --test-threads=1 repeated_ffi_call_should_gas_out_quickly .PHONY: test-contracts -test-contracts: test-contracts-rs test-contracts-as +test-contracts: test-contracts-rs + +.PHONY: check-no-default-features +check-no-default-features: + cd types && $(CARGO) check --all-targets --no-default-features + +.PHONY: check-std-features +check-std-features: + cd types && $(CARGO) check --all-targets --no-default-features --features=std + cd types && $(CARGO) check --all-targets --features=std + cd smart_contracts/contract && $(CARGO) check --all-targets --no-default-features --features=std + cd smart_contracts/contract && $(CARGO) check --all-targets --features=std -.PHONY: test-fast-sync -test-fast-sync: - cd $(CURDIR)/node && $(CARGO) test --lib testing::multi_stage_test_reactor::test_chain --features "fast-sync" +check-std-fs-io-features: + cd types && $(CARGO) check --all-targets --features=std-fs-io + cd types && $(CARGO) check --lib --features=std-fs-io + +check-testing-features: + cd types && $(CARGO) check --all-targets --no-default-features --features=testing + cd types && $(CARGO) check --all-targets --features=testing .PHONY: check-format check-format: - $(CARGO) fmt --all -- --check + $(CARGO_PINNED_NIGHTLY) fmt --all -- --check .PHONY: format format: - $(CARGO) fmt --all + $(CARGO_PINNED_NIGHTLY) fmt --all + +lint-contracts-rs: + cd smart_contracts/contracts && $(CARGO) clippy $(patsubst %, -p %, $(ALL_CONTRACTS)) -- -D warnings -A renamed_and_removed_lints .PHONY: lint -lint: - $(CARGO) clippy --all-targets --all-features --workspace -- -D warnings -A renamed_and_removed_lints +lint: lint-contracts-rs lint-default-features lint-all-features lint-smart-contracts lint-no-default-features -.PHONY: audit -audit: - $(CARGO) audit +.PHONY: lint-default-features +lint-default-features: + $(CARGO) clippy --all-targets -- -D warnings + +.PHONY: lint-no-default-features +lint-no-default-features: + $(CARGO) clippy --all-targets --no-default-features -- -D warnings -.PHONY: build-docs-stable-rs -build-docs-stable-rs: $(CRATES_WITH_DOCS_RS_MANIFEST_TABLE) +.PHONY: lint-all-features +lint-all-features: + $(CARGO) clippy --all-targets --all-features -- -D warnings -doc-stable/%: CARGO_TOOLCHAIN += +stable -doc-stable/%: - $(CARGO) doc $(CARGO_FLAGS) --manifest-path "$*/Cargo.toml" --no-deps +.PHONY: lint-smart-contracts +lint-smart-contracts: + cd smart_contracts/contract && $(CARGO) clippy --all-targets -- -D warnings -A renamed_and_removed_lints + +.PHONY: audit-rs +audit-rs: + $(CARGO) audit --ignore RUSTSEC-2024-0437 --ignore RUSTSEC-2025-0022 + +.PHONY: audit +audit: audit-rs + +.PHONY: doc +doc: + RUSTFLAGS="-D warnings" RUSTDOCFLAGS="--cfg docsrs" $(CARGO_PINNED_NIGHTLY) doc --all-features $(CARGO_FLAGS) --no-deps + cd smart_contracts/contract && RUSTFLAGS="-D warnings" RUSTDOCFLAGS="--cfg docsrs" $(CARGO_PINNED_NIGHTLY) doc --all-features $(CARGO_FLAGS) --no-deps .PHONY: check-rs -check-rs: \ - build-docs-stable-rs \ - build \ +check: \ check-format \ + doc \ lint \ audit \ + check-no-default-features \ + check-std-features \ + check-std-fs-io-features \ + check-testing-features \ test-rs \ + test-rs-no-default-features \ test-contracts-rs -.PHONY: check -check: \ - build-docs-stable-rs \ - build \ - check-format \ - lint \ - audit \ - test \ - test-contracts - .PHONY: clean clean: rm -rf resources/local/chainspec.toml - rm -rf $(CONTRACT_TARGET_DIR_AS) $(CARGO) clean .PHONY: build-for-packaging build-for-packaging: build-client-contracts - $(CARGO) build --release - -.PHONY: deb -deb: setup build-for-packaging - cd client && $(CARGO) deb -p casper-client --no-build + $(LEGACY) $(CARGO) build --release .PHONY: package package: @@ -184,30 +178,17 @@ bench: build-contracts-rs .PHONY: setup-cargo-packagers setup-cargo-packagers: - $(CARGO) install cargo-rpm || exit 0 $(CARGO) install cargo-deb || exit 0 -.PHONY: setup-audit -setup-audit: - $(CARGO) install cargo-audit - .PHONY: setup-rs -setup-rs: rust-toolchain - $(RUSTUP) update --no-self-update - $(RUSTUP) toolchain install --no-self-update $(RUST_TOOLCHAIN) - $(RUSTUP) target add --toolchain $(RUST_TOOLCHAIN) wasm32-unknown-unknown - -.PHONY: setup-stable-rs -setup-stable-rs: RUST_TOOLCHAIN := stable -setup-stable-rs: setup-rs - -.PHONY: setup-nightly-rs -setup-nightly-rs: RUST_TOOLCHAIN := nightly -setup-nightly-rs: setup-rs - -.PHONY: setup-as -setup-as: smart_contracts/contract_as/package.json - cd smart_contracts/contract_as && $(NPM) ci +setup-rs: + $(RUSTUP) update + $(RUSTUP) toolchain install $(PINNED_STABLE) $(PINNED_NIGHTLY) + $(RUSTUP) target add --toolchain $(PINNED_STABLE) wasm32-unknown-unknown + $(RUSTUP) target add --toolchain $(PINNED_NIGHTLY) wasm32-unknown-unknown + $(RUSTUP) component add --toolchain $(PINNED_NIGHTLY) rustfmt clippy-preview + $(RUSTUP) component add --toolchain $(PINNED_STABLE) clippy-preview + $(CARGO) install cargo-audit .PHONY: setup -setup: setup-rs setup-as +setup: setup-rs diff --git a/README.md b/README.md index 35a7e04818..1283225c23 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,42 @@ -[![LOGO](images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) +Casper Network Logo -Casper is the blockchain platform purpose-built to scale opportunity for everyone. Building toward blockchain’s next frontier, Casper is designed for real-world applications without sacrificing usability, cost, decentralization, or security. It removes the barriers that prevent mainstream blockchain adoption by making blockchain friendly to use, open to the world, and future-proof to support innovations today and tomorrow. Guided by open-source principles and built from the ground up to empower individuals, the team seeks to provide an equitable foundation made for long-lasting impact. Read more about our mission at: https://casperlabs.io/company +# casper-node + +Reference node for the Casper Blockchain Protocol. + +## Casper Blockchain -## Current Development Status -The status on development is reported during the Community calls and is found [here](https://github.com/CasperLabs/Governance/wiki/Current-Status) +Casper is the blockchain platform purpose-built to scale opportunity for everyone. Building toward blockchain’s next frontier, +Casper is designed for real-world applications without sacrificing usability, cost, decentralization, or security. It removes +the barriers that prevent mainstream blockchain adoption by making blockchain friendly to use, open to the world, and +future-proof to support innovations today and tomorrow. Guided by open-source principles and built from the ground up to +empower individuals, the team seeks to provide an equitable foundation made for long-lasting impact. Read more about our +mission at: https://casper.network -The Casper Testnet is live. -- Transactions can be sent to: deploy.casperlabs.io via the client or via Clarity. -- [Clarity Block Exporer](https://clarity.casperlabs.io) +The Casper MainNet is live. +- [cspr.live Block Explorer](https://cspr.live) -## Specification +### Specification -- [Platform Specification](https://docs.casperlabs.io/en/latest/implementation/index.html) -- [Highway Consensus Proofs](https://github.com/CasperLabs/highway/releases/latest) +- [Platform Specification](https://docs.casper.network/design) +- [Highway Consensus Proofs](https://github.com/casper-network/highway/releases/latest) +- [Zug Consensus Whitepaper](http://arxiv.org/pdf/2205.06314) -## Get Started with Smart Contracts -- [Writing Smart Contracts](https://docs.casperlabs.io/en/latest/dapp-dev-guide/index.html) +### Get Started with Smart Contracts +- [Writing Smart Contracts](https://docs.casper.network/developers/) - [Rust Smart Contract SDK](https://crates.io/crates/cargo-casper) - [Rust Smart Contract API Docs](https://docs.rs/casper-contract/latest/casper_contract/contract_api/index.html) -- [AssemblyScript Smart Contract API](https://www.npmjs.com/package/@casperlabs/contract) +- [AssemblyScript Smart Contract API](https://www.npmjs.com/package/casper-contract) -## Community +### Community -- [Discord Server](https://discord.gg/mpZ9AYD) -- [CasperLabs Community Forum](https://forums.casperlabs.io/) -- [Telegram Channel](https://t.me/CasperLabs) +- [Discord Server](https://discord.gg/caspernetwork) +- [Telegram Channel](https://t.me/casperofficialann) +- [X (Twitter)](https://x.com/Casper_Network) -# casper-node -This is the core application for the Casper blockchain. -## Running a validator node from Source +## Running a casper-node from source ### Pre-Requisites for Building @@ -40,7 +46,16 @@ This is the core application for the Casper blockchain. * pkg-config * gcc * g++ -* optionally [wasm-strip](https://github.com/WebAssembly/wabt) (used to reduce the size of compiled Wasm) +* recommended [wasm-strip](https://github.com/WebAssembly/wabt) (used to reduce the size of compiled Wasm) + +```sh +# Ubuntu prerequisites setup example +apt update +apt install cmake libssl-dev pkg-config gcc g++ -y +# the '-s -- -y' part ensures silent mode. Omit if you want to customize +curl https://sh.rustup.rs -sSf | sh -s -- -y +``` + ### Setup @@ -80,7 +95,7 @@ __The node will not run properly without another node to connect to. It is reco ### Running multiple nodes on one machine -There is a [tool](https://github.com/CasperLabs/casper-node/tree/master/utils/nctl) which automates the process of running multiple nodes on a single machine. +There is a [tool](https://github.com/casper-network/casper-nctl) which automates the process of running multiple nodes on a single machine. Note that running multiple nodes on a single machine is normally only recommended for test purposes. @@ -104,7 +119,7 @@ casper-node validator /etc/casper-node/config.toml Note how the semicolon is used to separate configuration overrides here. -### Development environment variables +### Other environment variables To set the threshold at which a warn-level log message is generated for a long-running reactor event, use the env var `CL_EVENT_MAX_MICROSECS`. For example, to set the threshold to 1 millisecond: @@ -113,12 +128,18 @@ To set the threshold at which a warn-level log message is generated for a long-r CL_EVENT_MAX_MICROSECS=1000 ``` -To set the threshold at which a queue dump will occur, use the env var `CL_MEM_DUMP_THRESHOLD_MB`. When the process reaches this level of memory allocation a dump will occur, but this will only occur once. Queue dumps can be found in `/tmp` once they are complete. For example, to set the threshold to 16000 megabytes: +To set the threshold above which the size of the current scheduler queues will be dumped to logs, use the `CL_EVENT_QUEUE_DUMP_THRESHOLD` variable. For example, to set the threshold to 10000 events: ``` -CL_MEM_DUMP_THRESHOLD_MB=16000 +CL_EVENT_QUEUE_DUMP_THRESHOLD=10000 ``` +This will dump a line to the log if the total number of events in queues exceeds 10000. After each dump, the threshold will be automatically increased by 10% to avoid log flooding. + +Example log entry: +``` +Current event queue size (11000) is above the threshold (10000): details [("FinalitySignature", 3000), ("FromStorage", 1000), ("NetworkIncoming", 6500), ("Regular", 500)] +``` ## Logging @@ -131,7 +152,7 @@ RUST_LOG=info cargo run --release -- validator resources/local/config.toml If the environment variable is unset, it is equivalent to setting `RUST_LOG=error`. -#### Log message format +### Log message format A typical log message will look like: @@ -146,62 +167,138 @@ This is comprised of the following parts: * filename and line number of the source of the message * message -#### Filtering log messages +### Filtering log messages `RUST_LOG` can be set to enable varying levels for different modules. Simply set it to a comma-separated list of `module-path=level`, where the module path is as shown above in the typical log message, with the end truncated to suit. -For example, to enable `trace` level logging for the `small_network` module in `components`, `info` level for all other +For example, to enable `trace` level logging for the `network` module in `components`, `info` level for all other modules in `components`, and `warn` level for the remaining codebase: ``` -RUST_LOG=casper_node::components::small=trace,casper_node::comp=info,warn +RUST_LOG=casper_node::components::network=trace,casper_node::comp=info,warn +``` + +### Logging network messages and tracing events + +Special logging targets exist in `net_in` and `net_out` which can be used to log every single network message leaving or +entering a node when set to trace level: + +``` +RUST_LOG=net_in::TRACE,net_out::TRACE ``` +All messages in these logs are also assigned a unique ID that is different even if the same message is sent to multiple +nodes. The receiving node will log them using the same ID as the sender, thus enabling the tracing of a message across +multiple nodes provided all logs are available. + +Another helpful logging feature is ancestor logging. If the target `dispatch` is set to at least debug level, events +being dispatched will be logged as well. Any event has an id (`ev`) and may have an ancestor (`a`), which is the previous +event whose effects caused the resulting event to be scheduled. As an example, if an incoming network message gets +assigned an ID of `ev=123`, the first round of subsequent events will show `a=123` as their ancestor in the logs. + +### Changing the logging filter at runtime + +If necessary, the filter of a running node can be changed using the diagnostics port, using the `set-log-filter` +command. See the "Diagnostics port" section for details on how to access it. + ## Debugging Some additional debug functionality is available, mainly allowed for inspections of the internal event queue. -### Event queue dump +### Diagnostics port + +If the configuration option `diagnostics_port.enabled` is set to `true`, a unix socket named `debug.socket` by default can be found next to the configuration while the node is running. + +#### Interactive use + +The `debug.socket` can be connected to by tools like `socat` for interactive use: + +```sh +socat readline unix:/path/to/debug.socket +``` + +Entering `help` will show available commands. The `set` command allows configuring the current connection, see `set --help`. -The event queue can be dumped by sending a `SIGUSR1` to the running node process, e.g. if the node's process ID was `$NODE_PID`: +#### Example: Collecting a consensus dump -```console -kill -USR1 $NODE_PID +After connecting using `socat` (see above), we set the output format to JSON: + +``` +set --output=json +``` + +A confirmation will acknowledge the settings change (unless `--quiet=true` is set): + +``` +{ + "Success": { + "msg": "session unchanged" + } +} +``` + +We can now call `dump-consensus` to get the _latest_ era serialized in JSON format: + +``` +dump-consensus +{ + "Success": { + "msg": "dumping consensus state" + } +} +{"id":8,"start_time":"2022-03-01T14:54:42.176Z","start_height":88,"new_faulty" ... ``` -This will create a `queue_dump.json` in the working directory of the node. A tool like [jq](https://stedolan.github.io/jq/) can then be used to format and display it: +An era other than the latest can be dumped by specifying as a parameter, _e.g._ `dump-consensus 3` will dump the third era. See `dump-consensus --help` for details. + +#### Example: Dumping the event queue -```console -$ jq < queue_dump.json +With the connection set to JSON output (see previous example), we can also dump the event queues: + +``` +dump-queues { - "NetworkIncoming": [], - "Network": [], - "Regular": [ - "AddressGossiper" - ], - "Api": [] + "Success": { + "msg": "dumping queues" + } } +{"queues":{"Regular":[],"Api":[],"Network":[],"Control":[],"NetworkIncoming":[] +}}{"queues":{"Api":[],"Regular":[],"Control":[],"NetworkIncoming":[],"Network": +[]}}{"queues":{"Network":[],"Control":[],"Api":[],"NetworkIncoming":[],"Regular +":[]}} ``` -#### jq Examples +Empty output will be produced on a node that is working without external pressure, as the queues will be empty most of the time. + -Dump the type of events: +#### Non-interactive use -```console -jq 'map_values( map(keys[0] | {"type": ., weight: 1})| group_by(.type) | map ([.[0].type,(.|length)]) | map({(.[0]): .[1]}) )' queue_dump.json +The diagnostics port can also be scripted by sending a newline-terminated list of commands through `socat`. For example, the following sequence of commands will collect a consensus dump without the success-indicating header: + +``` +set -o json -q true +dump-consensus ``` -Count number of events in each queue: +For ad-hoc dumps, this can be shortened and piped into `socat`: -```console -jq 'map_values(map(keys[0]))' queue_dump.json +```sh +echo -e 'set -o json -q true\ndump-consensus' | socat - unix-client:debug.socket > consensus-dump.json ``` +This results in the latest era being dumped into `consensus-dump.json`. + + ## Running a client -See [the client README](client/README.md). +See [the client README](https://github.com/casper-ecosystem/casper-client-rs#readme). ## Running a local network -See [the nctl utility README](utils/nctl/README.md). +See [the nctl utility README](https://github.com/casper-network/casper-nctl#readme). + +## Running on an existing network + +To support upgrades with a network, the casper-node is installed using scripts distributed with the +[casper-node-launcher](https://github.com/casper-network/casper-node-launcher). diff --git a/binary_port/CHANGELOG.md b/binary_port/CHANGELOG.md new file mode 100644 index 0000000000..708d6be9c9 --- /dev/null +++ b/binary_port/CHANGELOG.md @@ -0,0 +1,20 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + +## [Unreleased] + +### Added +* `ErrorCode` has a new code `117` + +## [1.0.0] - + +### Added +* Initial release of node for Casper mainnet. \ No newline at end of file diff --git a/binary_port/Cargo.toml b/binary_port/Cargo.toml new file mode 100644 index 0000000000..94e4f21849 --- /dev/null +++ b/binary_port/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "casper-binary-port" +version = "1.1.1" +edition = "2018" +description = "Types for the casper node binary port" +documentation = "https://docs.rs/casper-binary-port" +readme = "README.md" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/master/binary_port" +license = "Apache-2.0" +exclude = ["proptest-regressions"] + +[dependencies] +bincode = "1.3.3" +bytes = "1.0.1" +casper-types = { version = "6.0.1", path = "../types", features = ["datasize", "json-schema", "std"] } +num-derive = { workspace = true } +num-traits = { workspace = true } +once_cell = { version = "1.5.2" } +rand = "0.8.3" +serde = { version = "1.0.183", features = ["derive"] } +strum = "0.27" +strum_macros = "0.27" +thiserror = "1.0.45" +tokio-util = { version = "0.6.4", features = ["codec"] } +tracing = "0.1.18" + +[dev-dependencies] +casper-types = { path = "../types", features = ["datasize", "json-schema", "std", "testing"] } +serde_json = "1" +serde_test = "1" + +[package.metadata.docs.rs] +all-features = true +rustc-args = ["--cfg", "docsrs"] + +[features] +testing = ["rand/default"] diff --git a/binary_port/README.md b/binary_port/README.md new file mode 100644 index 0000000000..98a00a4d54 --- /dev/null +++ b/binary_port/README.md @@ -0,0 +1,15 @@ +# `casper-binary-port` + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Crates.io](https://img.shields.io/crates/v/casper-hashing)](https://crates.io/crates/casper-binary-port) +[![Documentation](https://docs.rs/casper-hashing/badge.svg)](https://docs.rs/casper-binary-port) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) + +Types for the binary port on a casper network node. + +[Node Operator Guide](https://docs.casper.network/operators/) + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/binary_port/src/balance_response.rs b/binary_port/src/balance_response.rs new file mode 100644 index 0000000000..cae8ac3804 --- /dev/null +++ b/binary_port/src/balance_response.rs @@ -0,0 +1,105 @@ +use std::collections::BTreeMap; +#[cfg(test)] +use std::{collections::VecDeque, iter::FromIterator}; + +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + global_state::TrieMerkleProof, + system::mint::BalanceHoldAddrTag, + BlockTime, Key, StoredValue, U512, +}; +#[cfg(test)] +use casper_types::{global_state::TrieMerkleProofStep, CLValue}; +#[cfg(test)] +use rand::Rng; + +/// Response to a balance query. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BalanceResponse { + /// The purses total balance, not considering holds. + pub total_balance: U512, + /// The available balance (total balance - sum of all active holds). + pub available_balance: U512, + /// A proof that the given value is present in the Merkle trie. + pub total_balance_proof: Box>, + /// Any time-relevant active holds on the balance. + pub balance_holds: BTreeMap, +} + +impl BalanceResponse { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + BalanceResponse { + total_balance: rng.gen(), + available_balance: rng.gen(), + total_balance_proof: Box::new(TrieMerkleProof::new( + Key::URef(rng.gen()), + StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()), + VecDeque::from_iter([TrieMerkleProofStep::random(rng)]), + )), + balance_holds: BTreeMap::new(), + } + } +} + +impl ToBytes for BalanceResponse { + fn to_bytes(&self) -> Result, casper_types::bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), casper_types::bytesrepr::Error> { + self.total_balance.write_bytes(writer)?; + self.available_balance.write_bytes(writer)?; + self.total_balance_proof.write_bytes(writer)?; + self.balance_holds.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.total_balance.serialized_length() + + self.available_balance.serialized_length() + + self.total_balance_proof.serialized_length() + + self.balance_holds.serialized_length() + } +} + +impl FromBytes for BalanceResponse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> { + let (total_balance, remainder) = U512::from_bytes(bytes)?; + let (available_balance, remainder) = U512::from_bytes(remainder)?; + let (total_balance_proof, remainder) = + TrieMerkleProof::::from_bytes(remainder)?; + let (balance_holds, remainder) = + BTreeMap::::from_bytes(remainder)?; + Ok(( + BalanceResponse { + total_balance, + available_balance, + total_balance_proof: Box::new(total_balance_proof), + balance_holds, + }, + remainder, + )) + } +} + +/// Balance holds with Merkle proofs. +pub type BalanceHoldsWithProof = + BTreeMap)>; + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BalanceResponse::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/binary_message.rs b/binary_port/src/binary_message.rs new file mode 100644 index 0000000000..0c3240a586 --- /dev/null +++ b/binary_port/src/binary_message.rs @@ -0,0 +1,294 @@ +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +use bytes::{Buf, Bytes}; +use tokio_util::codec::{self}; + +use crate::error::Error; + +type LengthEncoding = u32; +const LENGTH_ENCODING_SIZE_BYTES: usize = size_of::(); + +#[derive(Clone, PartialEq, Debug)] +pub struct BinaryMessage(Bytes); + +impl BinaryMessage { + pub fn new(payload: Vec) -> Self { + Self(payload.into()) + } + + pub fn payload(&self) -> &[u8] { + &self.0 + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let len = rng.gen_range(1..=1024); + let payload = std::iter::repeat_with(|| rng.gen()).take(len).collect(); + BinaryMessage(payload) + } +} + +#[derive(Clone, Copy)] +pub struct BinaryMessageCodec { + max_message_size_bytes: u32, +} + +impl BinaryMessageCodec { + pub fn new(max_message_size_bytes: u32) -> Self { + Self { + max_message_size_bytes, + } + } + + pub fn max_message_size_bytes(&self) -> u32 { + self.max_message_size_bytes + } +} + +impl codec::Encoder for BinaryMessageCodec { + type Error = Error; + + fn encode( + &mut self, + item: BinaryMessage, + dst: &mut bytes::BytesMut, + ) -> Result<(), Self::Error> { + let length = item.0.len() as LengthEncoding; + if length > self.max_message_size_bytes { + return Err(Error::RequestTooLarge { + allowed: self.max_message_size_bytes, + got: length, + }); + } + let length_bytes = length.to_le_bytes(); + dst.extend(length_bytes.iter().chain(item.0.iter())); + Ok(()) + } +} + +impl codec::Decoder for BinaryMessageCodec { + type Item = BinaryMessage; + + type Error = Error; + + fn decode(&mut self, src: &mut bytes::BytesMut) -> Result, Self::Error> { + let (length, have_full_frame) = if let [b1, b2, b3, b4, remainder @ ..] = &src[..] { + let length = LengthEncoding::from_le_bytes([*b1, *b2, *b3, *b4]) as usize; + if length == 0 { + return Err(Error::EmptyRequest); + } + let remainder_length = remainder.len(); + (length, remainder_length >= length) + } else { + // Not enough bytes to read the length. + return Ok(None); + }; + + if length > self.max_message_size_bytes as usize { + return Err(Error::RequestTooLarge { + allowed: self.max_message_size_bytes, + got: length as u32, + }); + } + + if !have_full_frame { + // Not enough bytes to read the whole message. + return Ok(None); + }; + + src.advance(LENGTH_ENCODING_SIZE_BYTES); + Ok(Some(BinaryMessage(src.split_to(length).freeze()))) + } +} + +#[cfg(test)] +mod tests { + use casper_types::testing::TestRng; + use rand::Rng; + use tokio_util::codec::{Decoder, Encoder}; + + use crate::{ + binary_message::{LengthEncoding, LENGTH_ENCODING_SIZE_BYTES}, + error::Error, + BinaryMessage, BinaryMessageCodec, + }; + + const MAX_MESSAGE_SIZE_BYTES: u32 = 1024 * 1024; + + #[test] + fn binary_message_codec() { + let rng = &mut TestRng::new(); + let val = BinaryMessage::random(rng); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + codec + .encode(val.clone(), &mut bytes) + .expect("should encode"); + + let decoded = codec + .decode(&mut bytes) + .expect("should decode") + .expect("should be Some"); + + assert_eq!(val, decoded); + } + + #[test] + fn should_not_decode_when_not_enough_bytes_to_decode_length() { + let rng = &mut TestRng::new(); + let val = BinaryMessage::random(rng); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + codec.encode(val, &mut bytes).expect("should encode"); + + let _ = bytes.split_off(LENGTH_ENCODING_SIZE_BYTES / 2); + let in_bytes = bytes.clone(); + assert!(codec.decode(&mut bytes).expect("should decode").is_none()); + + // Ensure that the bytes are not consumed. + assert_eq!(in_bytes, bytes); + } + + #[test] + fn should_not_decode_when_not_enough_bytes_to_decode_full_frame() { + let rng = &mut TestRng::new(); + let val = BinaryMessage::random(rng); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + codec.encode(val, &mut bytes).expect("should encode"); + + let _ = bytes.split_off(bytes.len() - 1); + let in_bytes = bytes.clone(); + assert!(codec.decode(&mut bytes).expect("should decode").is_none()); + + // Ensure that the bytes are not consumed. + assert_eq!(in_bytes, bytes); + } + + #[test] + fn should_leave_remainder_in_buffer() { + let rng = &mut TestRng::new(); + let val = BinaryMessage::random(rng); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + codec.encode(val, &mut bytes).expect("should encode"); + let suffix = bytes::Bytes::from_static(b"suffix"); + bytes.extend(&suffix); + + let _ = codec.decode(&mut bytes); + // Ensure that the bytes are not consumed. + assert_eq!(bytes, suffix); + } + + #[test] + fn encode_should_bail_on_too_large_request() { + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let too_large = MAX_MESSAGE_SIZE_BYTES as usize + 1; + let val = BinaryMessage::new(vec![0; too_large]); + let mut bytes = bytes::BytesMut::new(); + let result = codec.encode(val, &mut bytes).unwrap_err(); + + assert!(matches!(result, Error::RequestTooLarge { allowed, got } + if allowed == codec.max_message_size_bytes && got == too_large as u32)); + } + + #[test] + fn should_encode_request_of_maximum_size() { + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let just_right_size = MAX_MESSAGE_SIZE_BYTES as usize; + let val = BinaryMessage::new(vec![0; just_right_size]); + let mut bytes = bytes::BytesMut::new(); + + let result = codec.encode(val, &mut bytes); + assert!(result.is_ok()); + } + + #[test] + fn decode_should_bail_on_too_large_request() { + let rng = &mut TestRng::new(); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + let too_large = (codec.max_message_size_bytes + 1) as LengthEncoding; + bytes.extend(too_large.to_le_bytes()); + bytes.extend(std::iter::repeat_with(|| rng.gen::()).take(too_large as usize)); + + let result = codec.decode(&mut bytes).unwrap_err(); + assert!(matches!(result, Error::RequestTooLarge { allowed, got } + if allowed == codec.max_message_size_bytes && got == too_large)); + } + + #[test] + fn should_decode_request_of_maximum_size() { + let rng = &mut TestRng::new(); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + let just_right_size = (codec.max_message_size_bytes) as LengthEncoding; + bytes.extend(just_right_size.to_le_bytes()); + bytes.extend(std::iter::repeat_with(|| rng.gen::()).take(just_right_size as usize)); + + let result = codec.decode(&mut bytes); + assert!(result.is_ok()); + } + + #[test] + fn should_bail_on_empty_request() { + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + let empty = 0 as LengthEncoding; + bytes.extend(&empty.to_le_bytes()); + + let result = codec.decode(&mut bytes).unwrap_err(); + assert!(matches!(result, Error::EmptyRequest)); + } + + #[test] + fn should_decoded_queued_messages() { + let rng = &mut TestRng::new(); + let count = rng.gen_range(10000..20000); + let messages = (0..count) + .map(|_| BinaryMessage::random(rng)) + .collect::>(); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_SIZE_BYTES); + let mut bytes = bytes::BytesMut::new(); + for msg in &messages { + codec + .encode(msg.clone(), &mut bytes) + .expect("should encode"); + } + + let mut decoded_messages = vec![]; + loop { + let maybe_message = codec.decode(&mut bytes).expect("should decode"); + match maybe_message { + Some(message) => decoded_messages.push(message), + None => break, + } + } + assert_eq!(messages, decoded_messages); + } + + #[test] + fn should_not_decode_when_read_bytes_extend_max() { + const MAX_MESSAGE_BYTES: usize = 1000; + let rng = &mut TestRng::new(); + let mut codec = BinaryMessageCodec::new(MAX_MESSAGE_BYTES as u32); + let mut bytes = bytes::BytesMut::new(); + let some_length = (MAX_MESSAGE_BYTES * 2_usize) as LengthEncoding; //This value doesn't match the + // length of mock_bytes intentionally so we can be sure at what point did the encoder bail - + // we want to ensure that the encoder doesn't read the whole message before it bails + bytes.extend(&some_length.to_le_bytes()); + bytes.extend(std::iter::repeat_with(|| rng.gen::()).take(MAX_MESSAGE_BYTES * 3)); + + let message_res = codec.decode(&mut bytes); + assert!(message_res.is_err()); + let err = message_res.err().unwrap(); + assert!(matches!( + err, + Error::RequestTooLarge { allowed, got} + if allowed == MAX_MESSAGE_BYTES as u32 && got == MAX_MESSAGE_BYTES as u32 * 2, + )) + } +} diff --git a/binary_port/src/binary_response.rs b/binary_port/src/binary_response.rs new file mode 100644 index 0000000000..57ba54fc78 --- /dev/null +++ b/binary_port/src/binary_response.rs @@ -0,0 +1,151 @@ +use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +use crate::{ + binary_response_header::BinaryResponseHeader, + error_code::ErrorCode, + response_type::{PayloadEntity, ResponseType}, +}; + +#[cfg(test)] +use casper_types::testing::TestRng; + +/// The response used in the binary port protocol. +#[derive(Debug, PartialEq)] +pub struct BinaryResponse { + /// Header of the binary response. + header: BinaryResponseHeader, + /// The response. + payload: Vec, +} + +impl BinaryResponse { + /// Creates new empty binary response. + pub fn new_empty() -> Self { + Self { + header: BinaryResponseHeader::new(None), + payload: vec![], + } + } + + /// Creates new binary response with error code. + pub fn new_error(error: ErrorCode) -> Self { + BinaryResponse { + header: BinaryResponseHeader::new_error(error), + payload: vec![], + } + } + + /// Creates new binary response from raw bytes. + pub fn from_raw_bytes(payload_type: ResponseType, payload: Vec) -> Self { + BinaryResponse { + header: BinaryResponseHeader::new(Some(payload_type)), + payload, + } + } + + /// Creates a new binary response from a value. + pub fn from_value(val: V) -> Self + where + V: ToBytes + PayloadEntity, + { + ToBytes::to_bytes(&val).map_or( + BinaryResponse::new_error(ErrorCode::InternalError), + |payload| BinaryResponse { + payload, + header: BinaryResponseHeader::new(Some(V::RESPONSE_TYPE)), + }, + ) + } + + /// Creates a new binary response from an optional value. + pub fn from_option(opt: Option) -> Self + where + V: ToBytes + PayloadEntity, + { + match opt { + Some(val) => Self::from_value(val), + None => Self::new_empty(), + } + } + + /// Returns true if response is success. + pub fn is_success(&self) -> bool { + self.header.is_success() + } + + /// Returns the error code. + pub fn error_code(&self) -> u16 { + self.header.error_code() + } + + /// Returns the payload type of the response. + pub fn returned_data_type_tag(&self) -> Option { + self.header.returned_data_type_tag() + } + + /// Returns true if the response means that data has not been found. + pub fn is_not_found(&self) -> bool { + self.header.is_not_found() + } + + /// Returns the payload. + pub fn payload(&self) -> &[u8] { + self.payload.as_ref() + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + header: BinaryResponseHeader::random(rng), + payload: rng.random_vec(64..128), + } + } +} + +impl ToBytes for BinaryResponse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let BinaryResponse { header, payload } = self; + + header.write_bytes(writer)?; + payload.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.header.serialized_length() + self.payload.serialized_length() + } +} + +impl FromBytes for BinaryResponse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (header, remainder) = FromBytes::from_bytes(bytes)?; + let (payload, remainder) = Bytes::from_bytes(remainder)?; + + Ok(( + BinaryResponse { + header, + payload: payload.into(), + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryResponse::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/binary_response_and_request.rs b/binary_port/src/binary_response_and_request.rs new file mode 100644 index 0000000000..2614981f1e --- /dev/null +++ b/binary_port/src/binary_response_and_request.rs @@ -0,0 +1,136 @@ +use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +use crate::{binary_response::BinaryResponse, response_type::PayloadEntity, ResponseType}; + +use crate::record_id::RecordId; +#[cfg(test)] +use casper_types::testing::TestRng; + +/// The binary response along with the original binary request attached. +#[derive(Debug, PartialEq)] +pub struct BinaryResponseAndRequest { + /// Context of the original request. + request: Bytes, + /// The response. + response: BinaryResponse, +} + +impl BinaryResponseAndRequest { + /// Creates new binary response with the original request attached. + pub fn new(data: BinaryResponse, request: Bytes) -> Self { + Self { + request, + response: data, + } + } + + /// Returns a new binary response with specified data and no original request. + pub fn new_test_response( + record_id: RecordId, + data: &A, + ) -> BinaryResponseAndRequest { + let response = BinaryResponse::from_raw_bytes( + ResponseType::from_record_id(record_id, false), + data.to_bytes().unwrap(), + ); + Self::new(response, Bytes::from(vec![])) + } + + /// Returns a new binary response with specified legacy data and no original request. + pub fn new_legacy_test_response( + record_id: RecordId, + data: &A, + ) -> BinaryResponseAndRequest { + let response = BinaryResponse::from_raw_bytes( + ResponseType::from_record_id(record_id, true), + bincode::serialize(data).unwrap(), + ); + Self::new(response, Bytes::from(vec![])) + } + + /// Returns true if response is success. + pub fn is_success(&self) -> bool { + self.response.is_success() + } + + /// Returns the error code. + pub fn error_code(&self) -> u16 { + self.response.error_code() + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let bytes = vec![1; 155]; + Self { + request: Bytes::from(bytes), + response: BinaryResponse::random(rng), + } + } + + /// Returns serialized bytes representing the original request. + pub fn request(&self) -> &[u8] { + &self.request + } + + /// Returns the inner binary response. + pub fn response(&self) -> &BinaryResponse { + &self.response + } +} + +impl ToBytes for BinaryResponseAndRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let BinaryResponseAndRequest { request, response } = self; + request.write_bytes(writer)?; + response.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.request.serialized_length() + self.response.serialized_length() + } +} + +impl FromBytes for BinaryResponseAndRequest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (request, remainder) = FromBytes::from_bytes(bytes)?; + let (response, remainder) = FromBytes::from_bytes(remainder)?; + + Ok((BinaryResponseAndRequest { request, response }, remainder)) + } +} + +impl From for BinaryResponse { + fn from(response_and_request: BinaryResponseAndRequest) -> Self { + let BinaryResponseAndRequest { response, .. } = response_and_request; + response + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn roundtrip() { + let rng = &mut TestRng::new(); + let bytes = vec![1; 155]; + let response = BinaryResponse::random(rng); + let val = BinaryResponseAndRequest::new(response, Bytes::from(bytes)); + bytesrepr::test_serialization_roundtrip(&val); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryResponseAndRequest::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/binary_response_header.rs b/binary_port/src/binary_response_header.rs new file mode 100644 index 0000000000..2ee7502bbe --- /dev/null +++ b/binary_port/src/binary_response_header.rs @@ -0,0 +1,125 @@ +use crate::{error_code::ErrorCode, response_type::ResponseType}; +use casper_types::bytesrepr::{self, FromBytes, ToBytes}; + +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +/// Header of the binary response. +#[derive(Debug, PartialEq)] +pub struct BinaryResponseHeader { + binary_response_version: u16, + error: u16, + returned_data_type_tag: Option, +} + +impl BinaryResponseHeader { + pub const BINARY_RESPONSE_VERSION: u16 = 1; + /// Creates new binary response header representing success. + pub fn new(returned_data_type: Option) -> Self { + Self { + binary_response_version: Self::BINARY_RESPONSE_VERSION, + error: ErrorCode::NoError as u16, + returned_data_type_tag: returned_data_type.map(|ty| ty as u8), + } + } + + /// Creates new binary response header representing error. + pub fn new_error(error: ErrorCode) -> Self { + Self { + binary_response_version: Self::BINARY_RESPONSE_VERSION, + error: error as u16, + returned_data_type_tag: None, + } + } + + /// Returns the type of the returned data. + pub fn returned_data_type_tag(&self) -> Option { + self.returned_data_type_tag + } + + /// Returns the error code. + pub fn error_code(&self) -> u16 { + self.error + } + + /// Returns true if the response represents success. + pub fn is_success(&self) -> bool { + self.error == ErrorCode::NoError as u16 + } + + /// Returns true if the response indicates the data was not found. + pub fn is_not_found(&self) -> bool { + self.error == ErrorCode::NotFound as u16 + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let error = rng.gen(); + let returned_data_type_tag = if rng.gen() { None } else { Some(rng.gen()) }; + + BinaryResponseHeader { + binary_response_version: Self::BINARY_RESPONSE_VERSION, + error, + returned_data_type_tag, + } + } +} + +impl ToBytes for BinaryResponseHeader { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let Self { + binary_response_version, + error, + returned_data_type_tag, + } = self; + + binary_response_version.write_bytes(writer)?; + error.write_bytes(writer)?; + returned_data_type_tag.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.binary_response_version.serialized_length() + + self.error.serialized_length() + + self.returned_data_type_tag.serialized_length() + } +} + +impl FromBytes for BinaryResponseHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (binary_response_version, remainder) = FromBytes::from_bytes(bytes)?; + let (error, remainder) = FromBytes::from_bytes(remainder)?; + let (returned_data_type_tag, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + BinaryResponseHeader { + binary_response_version, + error, + returned_data_type_tag, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BinaryResponseHeader::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/command.rs b/binary_port/src/command.rs new file mode 100644 index 0000000000..a3099028d0 --- /dev/null +++ b/binary_port/src/command.rs @@ -0,0 +1,262 @@ +use core::convert::TryFrom; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + Transaction, +}; + +use crate::get_request::GetRequest; + +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +/// The header of a binary request. +#[derive(Debug, PartialEq)] +pub struct CommandHeader { + header_version: u16, + type_tag: u8, + id: u16, +} + +impl CommandHeader { + // Defines the current version of the header, in practice defining the current version of the + // binary port protocol. Requests with mismatched header version will be dropped. + pub const HEADER_VERSION: u16 = 1; + + /// Creates new binary request header. + pub fn new(type_tag: CommandTag, id: u16) -> Self { + Self { + header_version: Self::HEADER_VERSION, + type_tag: type_tag.into(), + id, + } + } + + /// Returns the type tag of the request. + pub fn type_tag(&self) -> u8 { + self.type_tag + } + + /// Returns the request id. + pub fn id(&self) -> u16 { + self.id + } + + /// Returns the header version. + pub fn version(&self) -> u16 { + self.header_version + } + + #[cfg(any(feature = "testing", test))] + pub fn set_binary_request_version(&mut self, version: u16) { + self.header_version = version; + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + header_version: rng.gen(), + type_tag: CommandTag::random(rng).into(), + id: rng.gen(), + } + } +} + +impl ToBytes for CommandHeader { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.header_version.write_bytes(writer)?; + self.type_tag.write_bytes(writer)?; + self.id.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.header_version.serialized_length() + + self.type_tag.serialized_length() + + self.id.serialized_length() + } +} + +impl FromBytes for CommandHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (binary_request_version, remainder) = FromBytes::from_bytes(bytes)?; + let (type_tag, remainder) = FromBytes::from_bytes(remainder)?; + let (id, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + CommandHeader { + header_version: binary_request_version, + type_tag, + id, + }, + remainder, + )) + } +} + +/// A request to the binary access interface. +#[derive(Debug, PartialEq)] + +pub enum Command { + /// Request to get data from the node + Get(GetRequest), + /// Request to add a transaction into a blockchain. + TryAcceptTransaction { + /// Transaction to be handled. + transaction: Transaction, + }, + /// Request to execute a transaction speculatively. + TrySpeculativeExec { + /// Transaction to execute. + transaction: Transaction, + }, +} + +impl Command { + /// Returns the type tag of the request. + pub fn tag(&self) -> CommandTag { + match self { + Command::Get(_) => CommandTag::Get, + Command::TryAcceptTransaction { .. } => CommandTag::TryAcceptTransaction, + Command::TrySpeculativeExec { .. } => CommandTag::TrySpeculativeExec, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match CommandTag::random(rng) { + CommandTag::Get => Self::Get(GetRequest::random(rng)), + CommandTag::TryAcceptTransaction => Self::TryAcceptTransaction { + transaction: Transaction::random(rng), + }, + CommandTag::TrySpeculativeExec => Self::TrySpeculativeExec { + transaction: Transaction::random(rng), + }, + } + } +} + +impl ToBytes for Command { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Command::Get(inner) => inner.write_bytes(writer), + Command::TryAcceptTransaction { transaction } => transaction.write_bytes(writer), + Command::TrySpeculativeExec { transaction } => transaction.write_bytes(writer), + } + } + + fn serialized_length(&self) -> usize { + match self { + Command::Get(inner) => inner.serialized_length(), + Command::TryAcceptTransaction { transaction } => transaction.serialized_length(), + Command::TrySpeculativeExec { transaction } => transaction.serialized_length(), + } + } +} + +impl TryFrom<(CommandTag, &[u8])> for Command { + type Error = bytesrepr::Error; + + fn try_from((tag, bytes): (CommandTag, &[u8])) -> Result { + let (req, remainder) = match tag { + CommandTag::Get => { + let (get_request, remainder) = FromBytes::from_bytes(bytes)?; + (Command::Get(get_request), remainder) + } + CommandTag::TryAcceptTransaction => { + let (transaction, remainder) = FromBytes::from_bytes(bytes)?; + (Command::TryAcceptTransaction { transaction }, remainder) + } + CommandTag::TrySpeculativeExec => { + let (transaction, remainder) = FromBytes::from_bytes(bytes)?; + (Command::TrySpeculativeExec { transaction }, remainder) + } + }; + if !remainder.is_empty() { + return Err(bytesrepr::Error::LeftOverBytes); + } + Ok(req) + } +} + +/// The type tag of a binary request. +#[derive(Debug, PartialEq)] +#[repr(u8)] +pub enum CommandTag { + /// Request to get data from the node + Get = 0, + /// Request to add a transaction into a blockchain. + TryAcceptTransaction = 1, + /// Request to execute a transaction speculatively. + TrySpeculativeExec = 2, +} + +impl CommandTag { + /// Creates a random `CommandTag`. + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => CommandTag::Get, + 1 => CommandTag::TryAcceptTransaction, + 2 => CommandTag::TrySpeculativeExec, + _ => unreachable!(), + } + } +} + +impl TryFrom for CommandTag { + type Error = InvalidCommandTag; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(CommandTag::Get), + 1 => Ok(CommandTag::TryAcceptTransaction), + 2 => Ok(CommandTag::TrySpeculativeExec), + _ => Err(InvalidCommandTag), + } + } +} + +impl From for u8 { + fn from(value: CommandTag) -> Self { + value as u8 + } +} + +/// Error raised when trying to convert an invalid u8 into a `CommandTag`. +pub struct InvalidCommandTag; + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn header_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = CommandHeader::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } + + #[test] + fn request_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = Command::random(rng); + let bytes = val.to_bytes().expect("should serialize"); + assert_eq!(Command::try_from((val.tag(), &bytes[..])), Ok(val)); + } +} diff --git a/binary_port/src/dictionary_item_identifier.rs b/binary_port/src/dictionary_item_identifier.rs new file mode 100644 index 0000000000..28e5c81150 --- /dev/null +++ b/binary_port/src/dictionary_item_identifier.rs @@ -0,0 +1,256 @@ +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +use casper_types::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + DictionaryAddr, EntityAddr, HashAddr, URef, +}; + +const ACCOUNT_NAMED_KEY_TAG: u8 = 0; +const CONTRACT_NAMED_KEY_TAG: u8 = 1; +const ENTITY_NAMED_KEY_TAG: u8 = 2; +const UREF_TAG: u8 = 3; +const DICTIONARY_ITEM_TAG: u8 = 4; + +/// Options for dictionary item lookups. +#[derive(Clone, Debug, PartialEq)] +pub enum DictionaryItemIdentifier { + /// Lookup a dictionary item via an accounts named keys. + AccountNamedKey { + /// The account hash. + hash: AccountHash, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via a contracts named keys. + ContractNamedKey { + /// The contract hash. + hash: HashAddr, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via an entities named keys. + EntityNamedKey { + /// The entity address. + addr: EntityAddr, + /// The named key under which the dictionary seed URef is stored. + dictionary_name: String, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via its seed URef. + URef { + /// The dictionary's seed URef. + seed_uref: URef, + /// The dictionary item key formatted as a string. + dictionary_item_key: String, + }, + /// Lookup a dictionary item via its unique key. + DictionaryItem(DictionaryAddr), +} + +impl DictionaryItemIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..5) { + 0 => DictionaryItemIdentifier::AccountNamedKey { + hash: rng.gen(), + dictionary_name: rng.random_string(32..64), + dictionary_item_key: rng.random_string(32..64), + }, + 1 => DictionaryItemIdentifier::ContractNamedKey { + hash: rng.gen(), + dictionary_name: rng.random_string(32..64), + dictionary_item_key: rng.random_string(32..64), + }, + 2 => DictionaryItemIdentifier::EntityNamedKey { + addr: rng.gen(), + dictionary_name: rng.random_string(32..64), + dictionary_item_key: rng.random_string(32..64), + }, + 3 => DictionaryItemIdentifier::URef { + seed_uref: rng.gen(), + dictionary_item_key: rng.random_string(32..64), + }, + 4 => DictionaryItemIdentifier::DictionaryItem(rng.gen()), + _ => unreachable!(), + } + } +} + +impl ToBytes for DictionaryItemIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + DictionaryItemIdentifier::AccountNamedKey { + hash: key, + dictionary_name, + dictionary_item_key, + } => { + ACCOUNT_NAMED_KEY_TAG.write_bytes(writer)?; + key.write_bytes(writer)?; + dictionary_name.write_bytes(writer)?; + dictionary_item_key.write_bytes(writer) + } + DictionaryItemIdentifier::ContractNamedKey { + hash: key, + dictionary_name, + dictionary_item_key, + } => { + CONTRACT_NAMED_KEY_TAG.write_bytes(writer)?; + key.write_bytes(writer)?; + dictionary_name.write_bytes(writer)?; + dictionary_item_key.write_bytes(writer) + } + DictionaryItemIdentifier::EntityNamedKey { + addr, + dictionary_name, + dictionary_item_key, + } => { + ENTITY_NAMED_KEY_TAG.write_bytes(writer)?; + addr.write_bytes(writer)?; + dictionary_name.write_bytes(writer)?; + dictionary_item_key.write_bytes(writer) + } + DictionaryItemIdentifier::URef { + seed_uref, + dictionary_item_key, + } => { + UREF_TAG.write_bytes(writer)?; + seed_uref.write_bytes(writer)?; + dictionary_item_key.write_bytes(writer) + } + DictionaryItemIdentifier::DictionaryItem(addr) => { + DICTIONARY_ITEM_TAG.write_bytes(writer)?; + addr.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + DictionaryItemIdentifier::AccountNamedKey { + hash, + dictionary_name, + dictionary_item_key, + } => { + hash.serialized_length() + + dictionary_name.serialized_length() + + dictionary_item_key.serialized_length() + } + DictionaryItemIdentifier::ContractNamedKey { + hash, + dictionary_name, + dictionary_item_key, + } => { + hash.serialized_length() + + dictionary_name.serialized_length() + + dictionary_item_key.serialized_length() + } + DictionaryItemIdentifier::EntityNamedKey { + addr, + dictionary_name, + dictionary_item_key, + } => { + addr.serialized_length() + + dictionary_name.serialized_length() + + dictionary_item_key.serialized_length() + } + DictionaryItemIdentifier::URef { + seed_uref, + dictionary_item_key, + } => seed_uref.serialized_length() + dictionary_item_key.serialized_length(), + DictionaryItemIdentifier::DictionaryItem(addr) => addr.serialized_length(), + } + } +} + +impl FromBytes for DictionaryItemIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ACCOUNT_NAMED_KEY_TAG => { + let (key, remainder) = FromBytes::from_bytes(remainder)?; + let (dictionary_name, remainder) = String::from_bytes(remainder)?; + let (dictionary_item_key, remainder) = String::from_bytes(remainder)?; + Ok(( + DictionaryItemIdentifier::AccountNamedKey { + hash: key, + dictionary_name, + dictionary_item_key, + }, + remainder, + )) + } + CONTRACT_NAMED_KEY_TAG => { + let (key, remainder) = FromBytes::from_bytes(remainder)?; + let (dictionary_name, remainder) = String::from_bytes(remainder)?; + let (dictionary_item_key, remainder) = String::from_bytes(remainder)?; + Ok(( + DictionaryItemIdentifier::ContractNamedKey { + hash: key, + dictionary_name, + dictionary_item_key, + }, + remainder, + )) + } + ENTITY_NAMED_KEY_TAG => { + let (addr, remainder) = FromBytes::from_bytes(remainder)?; + let (dictionary_name, remainder) = String::from_bytes(remainder)?; + let (dictionary_item_key, remainder) = String::from_bytes(remainder)?; + Ok(( + DictionaryItemIdentifier::EntityNamedKey { + addr, + dictionary_name, + dictionary_item_key, + }, + remainder, + )) + } + UREF_TAG => { + let (seed_uref, remainder) = FromBytes::from_bytes(remainder)?; + let (dictionary_item_key, remainder) = String::from_bytes(remainder)?; + Ok(( + DictionaryItemIdentifier::URef { + seed_uref, + dictionary_item_key, + }, + remainder, + )) + } + DICTIONARY_ITEM_TAG => { + let (addr, remainder) = FromBytes::from_bytes(remainder)?; + Ok((DictionaryItemIdentifier::DictionaryItem(addr), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = DictionaryItemIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/entity_qualifier.rs b/binary_port/src/entity_qualifier.rs new file mode 100644 index 0000000000..93f8bf82bb --- /dev/null +++ b/binary_port/src/entity_qualifier.rs @@ -0,0 +1,193 @@ +use super::dictionary_item_identifier::DictionaryItemIdentifier; +use crate::{KeyPrefix, PurseIdentifier}; +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Key, KeyTag, +}; +#[cfg(test)] +use rand::Rng; + +const ITEM_TAG: u8 = 0; +const ALL_ITEMS_TAG: u8 = 1; +const DICTIONARY_ITEM_TAG: u8 = 2; +const BALANCE_TAG: u8 = 3; +const ITEMS_BY_PREFIX_TAG: u8 = 4; + +/// A request to get data from the global state. +#[derive(Clone, Debug, PartialEq)] +pub enum GlobalStateEntityQualifier { + /// Gets an item from the global state. + Item { + /// Key under which data is stored. + base_key: Key, + /// Path under which the value is stored. + path: Vec, + }, + /// Get all items under the given key tag. + AllItems { + /// Key tag + key_tag: KeyTag, + }, + /// Get a dictionary item by its identifier. + DictionaryItem { + /// Dictionary item identifier. + identifier: DictionaryItemIdentifier, + }, + /// Get balance by state root and purse. + Balance { + /// Purse identifier. + purse_identifier: PurseIdentifier, + }, + ItemsByPrefix { + /// Key prefix to search for. + key_prefix: KeyPrefix, + }, +} + +impl GlobalStateEntityQualifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let gen_range = TestRng::gen_range(rng, 0..5); + random_for_variant(gen_range, rng) + } +} + +#[cfg(test)] +fn random_for_variant(gen_range: u8, rng: &mut TestRng) -> GlobalStateEntityQualifier { + match gen_range { + ITEM_TAG => { + let path_count = rng.gen_range(10..20); + GlobalStateEntityQualifier::Item { + base_key: rng.gen(), + path: std::iter::repeat_with(|| rng.random_string(32..64)) + .take(path_count) + .collect(), + } + } + ALL_ITEMS_TAG => GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::random(rng), + }, + DICTIONARY_ITEM_TAG => GlobalStateEntityQualifier::DictionaryItem { + identifier: DictionaryItemIdentifier::random(rng), + }, + BALANCE_TAG => GlobalStateEntityQualifier::Balance { + purse_identifier: PurseIdentifier::random(rng), + }, + ITEMS_BY_PREFIX_TAG => GlobalStateEntityQualifier::ItemsByPrefix { + key_prefix: KeyPrefix::random(rng), + }, + _ => unreachable!(), + } +} + +impl ToBytes for GlobalStateEntityQualifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + GlobalStateEntityQualifier::Item { base_key, path } => { + ITEM_TAG.write_bytes(writer)?; + base_key.write_bytes(writer)?; + path.write_bytes(writer) + } + GlobalStateEntityQualifier::AllItems { key_tag } => { + ALL_ITEMS_TAG.write_bytes(writer)?; + key_tag.write_bytes(writer) + } + GlobalStateEntityQualifier::DictionaryItem { identifier } => { + DICTIONARY_ITEM_TAG.write_bytes(writer)?; + identifier.write_bytes(writer) + } + GlobalStateEntityQualifier::Balance { purse_identifier } => { + BALANCE_TAG.write_bytes(writer)?; + purse_identifier.write_bytes(writer) + } + GlobalStateEntityQualifier::ItemsByPrefix { key_prefix } => { + ITEMS_BY_PREFIX_TAG.write_bytes(writer)?; + key_prefix.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + GlobalStateEntityQualifier::Item { base_key, path } => { + base_key.serialized_length() + path.serialized_length() + } + GlobalStateEntityQualifier::AllItems { key_tag } => key_tag.serialized_length(), + GlobalStateEntityQualifier::DictionaryItem { identifier } => { + identifier.serialized_length() + } + GlobalStateEntityQualifier::Balance { purse_identifier } => { + purse_identifier.serialized_length() + } + GlobalStateEntityQualifier::ItemsByPrefix { key_prefix } => { + key_prefix.serialized_length() + } + } + } +} + +impl FromBytes for GlobalStateEntityQualifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ITEM_TAG => { + let (base_key, remainder) = FromBytes::from_bytes(remainder)?; + let (path, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateEntityQualifier::Item { base_key, path }, + remainder, + )) + } + ALL_ITEMS_TAG => { + let (key_tag, remainder) = FromBytes::from_bytes(remainder)?; + Ok((GlobalStateEntityQualifier::AllItems { key_tag }, remainder)) + } + DICTIONARY_ITEM_TAG => { + let (identifier, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateEntityQualifier::DictionaryItem { identifier }, + remainder, + )) + } + BALANCE_TAG => { + let (purse_identifier, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateEntityQualifier::Balance { purse_identifier }, + remainder, + )) + } + ITEMS_BY_PREFIX_TAG => { + let (key_prefix, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateEntityQualifier::ItemsByPrefix { key_prefix }, + remainder, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for i in 0..5 { + let qualifier = random_for_variant(i, rng); + bytesrepr::test_serialization_roundtrip(&qualifier); + } + } +} diff --git a/binary_port/src/era_identifier.rs b/binary_port/src/era_identifier.rs new file mode 100644 index 0000000000..92a5d58c39 --- /dev/null +++ b/binary_port/src/era_identifier.rs @@ -0,0 +1,90 @@ +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockIdentifier, EraId, +}; + +const ERA_TAG: u8 = 0; +const BLOCK_TAG: u8 = 1; + +/// Identifier for an era. +#[derive(Clone, Debug, PartialEq)] +pub enum EraIdentifier { + Era(EraId), + Block(BlockIdentifier), +} + +impl EraIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..2) { + ERA_TAG => EraIdentifier::Era(EraId::random(rng)), + BLOCK_TAG => EraIdentifier::Block(BlockIdentifier::random(rng)), + _ => unreachable!(), + } + } +} + +impl ToBytes for EraIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + EraIdentifier::Era(era_id) => { + ERA_TAG.write_bytes(writer)?; + era_id.write_bytes(writer) + } + EraIdentifier::Block(block_id) => { + BLOCK_TAG.write_bytes(writer)?; + block_id.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + EraIdentifier::Era(era_id) => era_id.serialized_length(), + EraIdentifier::Block(block_id) => block_id.serialized_length(), + } + } +} + +impl FromBytes for EraIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ERA_TAG => { + let (era_id, remainder) = EraId::from_bytes(remainder)?; + Ok((EraIdentifier::Era(era_id), remainder)) + } + BLOCK_TAG => { + let (block_id, remainder) = BlockIdentifier::from_bytes(remainder)?; + Ok((EraIdentifier::Block(block_id), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = EraIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/error.rs b/binary_port/src/error.rs new file mode 100644 index 0000000000..96ea2b7827 --- /dev/null +++ b/binary_port/src/error.rs @@ -0,0 +1,15 @@ +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum Error { + #[error("Invalid command tag ({0})")] + InvalidCommandTag(u8), + #[error("Request too large: allowed {allowed} bytes, got {got} bytes")] + RequestTooLarge { allowed: u32, got: u32 }, + #[error("Empty request")] + EmptyRequest, + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + BytesRepr(#[from] casper_types::bytesrepr::Error), +} diff --git a/binary_port/src/error_code.rs b/binary_port/src/error_code.rs new file mode 100644 index 0000000000..e3ea4b3a0a --- /dev/null +++ b/binary_port/src/error_code.rs @@ -0,0 +1,634 @@ +use core::{convert::TryFrom, fmt}; + +use casper_types::{InvalidDeploy, InvalidTransaction, InvalidTransactionV1}; + +use num_derive::FromPrimitive; +use num_traits::FromPrimitive; +#[cfg(test)] +use strum_macros::EnumIter; + +/// The error code indicating the result of handling the binary request. +#[derive(Debug, Copy, Clone, thiserror::Error, Eq, PartialEq, FromPrimitive)] +#[repr(u16)] +#[cfg_attr(test, derive(EnumIter))] +pub enum ErrorCode { + /// Request executed correctly. + #[error("request executed correctly")] + NoError = 0, + /// This function is disabled. + #[error("this function is disabled")] + FunctionDisabled = 1, + /// Data not found. + #[error("data not found")] + NotFound = 2, + /// Root not found. + #[error("root not found")] + RootNotFound = 3, + /// Invalid item variant. + #[error("invalid item variant")] + InvalidItemVariant = 4, + /// Wasm preprocessing. + #[error("wasm preprocessing")] + WasmPreprocessing = 5, + /// Internal error. + #[error("internal error")] + InternalError = 6, + /// The query failed. + #[error("the query failed")] + FailedQuery = 7, + /// Bad request. + #[error("bad request")] + BadRequest = 8, + /// Received an unsupported type of request. + #[error("unsupported request")] + UnsupportedRequest = 9, + /// Dictionary URef not found. + #[error("dictionary URef not found")] + DictionaryURefNotFound = 10, + /// This node has no complete blocks. + #[error("no complete blocks")] + NoCompleteBlocks = 11, + /// The deploy had an invalid chain name + #[error("the deploy had an invalid chain name")] + InvalidDeployChainName = 12, + /// Deploy dependencies are no longer supported + #[error("the dependencies for this transaction are no longer supported")] + InvalidDeployDependenciesNoLongerSupported = 13, + /// The deploy sent to the network had an excessive size + #[error("the deploy had an excessive size")] + InvalidDeployExcessiveSize = 14, + /// The deploy sent to the network had an excessive time to live + #[error("the deploy had an excessive time to live")] + InvalidDeployExcessiveTimeToLive = 15, + /// The deploy sent to the network had a timestamp referencing a time that has yet to occur. + #[error("the deploys timestamp is in the future")] + InvalidDeployTimestampInFuture = 16, + /// The deploy sent to the network had an invalid body hash + #[error("the deploy had an invalid body hash")] + InvalidDeployBodyHash = 17, + /// The deploy sent to the network had an invalid deploy hash i.e. the provided deploy hash + /// didn't match the derived deploy hash + #[error("the deploy had an invalid deploy hash")] + InvalidDeployHash = 18, + /// The deploy sent to the network had an empty approval set + #[error("the deploy had no approvals")] + InvalidDeployEmptyApprovals = 19, + /// The deploy sent to the network had an invalid approval + #[error("the deploy had an invalid approval")] + InvalidDeployApproval = 20, + /// The deploy sent to the network had an excessive session args length + #[error("the deploy had an excessive session args length")] + InvalidDeployExcessiveSessionArgsLength = 21, + /// The deploy sent to the network had an excessive payment args length + #[error("the deploy had an excessive payment args length")] + InvalidDeployExcessivePaymentArgsLength = 22, + /// The deploy sent to the network had a missing payment amount + #[error("the deploy had a missing payment amount")] + InvalidDeployMissingPaymentAmount = 23, + /// The deploy sent to the network had a payment amount that was not parseable + #[error("the deploy sent to the network had a payment amount that was unable to be parsed")] + InvalidDeployFailedToParsePaymentAmount = 24, + /// The deploy sent to the network exceeded the block gas limit + #[error("the deploy sent to the network exceeded the block gas limit")] + InvalidDeployExceededBlockGasLimit = 25, + /// The deploy sent to the network was missing a transfer amount + #[error("the deploy sent to the network was missing a transfer amount")] + InvalidDeployMissingTransferAmount = 26, + /// The deploy sent to the network had a transfer amount that was unable to be parseable + #[error("the deploy sent to the network had a transfer amount that was unable to be parsed")] + InvalidDeployFailedToParseTransferAmount = 27, + /// The deploy sent to the network had a transfer amount that was insufficient + #[error("the deploy sent to the network had an insufficient transfer amount")] + InvalidDeployInsufficientTransferAmount = 28, + /// The deploy sent to the network had excessive approvals + #[error("the deploy sent to the network had excessive approvals")] + InvalidDeployExcessiveApprovals = 29, + /// The network was unable to calculate the gas limit for the deploy + #[error("the network was unable to calculate the gas limit associated with the deploy")] + InvalidDeployUnableToCalculateGasLimit = 30, + /// The network was unable to calculate the gas cost for the deploy + #[error("the network was unable to calculate the gas cost for the deploy")] + InvalidDeployUnableToCalculateGasCost = 31, + /// The deploy sent to the network was invalid for an unspecified reason + #[error("the deploy sent to the network was invalid for an unspecified reason")] + InvalidDeployUnspecified = 32, + /// The transaction sent to the network had an invalid chain name + #[error("the transaction sent to the network had an invalid chain name")] + InvalidTransactionChainName = 33, + /// The transaction sent to the network had an excessive size + #[error("the transaction sent to the network had an excessive size")] + InvalidTransactionExcessiveSize = 34, + /// The transaction sent to the network had an excessive time to live + #[error("the transaction sent to the network had an excessive time to live")] + InvalidTransactionExcessiveTimeToLive = 35, + /// The transaction sent to the network had a timestamp located in the future. + #[error("the transaction sent to the network had a timestamp that has not yet occurred")] + InvalidTransactionTimestampInFuture = 36, + /// The transaction sent to the network had a provided body hash that conflicted with hash + /// derived by the network + #[error("the transaction sent to the network had an invalid body hash")] + InvalidTransactionBodyHash = 37, + /// The transaction sent to the network had a provided hash that conflicted with the hash + /// derived by the network + #[error("the transaction sent to the network had an invalid hash")] + InvalidTransactionHash = 38, + /// The transaction sent to the network had an empty approvals set + #[error("the transaction sent to the network had no approvals")] + InvalidTransactionEmptyApprovals = 39, + /// The transaction sent to the network had an invalid approval + #[error("the transaction sent to the network had an invalid approval")] + InvalidTransactionInvalidApproval = 40, + /// The transaction sent to the network had excessive args length + #[error("the transaction sent to the network had excessive args length")] + InvalidTransactionExcessiveArgsLength = 41, + /// The transaction sent to the network had excessive approvals + #[error("the transaction sent to the network had excessive approvals")] + InvalidTransactionExcessiveApprovals = 42, + /// The transaction sent to the network exceeds the block gas limit + #[error("the transaction sent to the network exceeds the networks block gas limit")] + InvalidTransactionExceedsBlockGasLimit = 43, + /// The transaction sent to the network had a missing arg + #[error("the transaction sent to the network was missing an argument")] + InvalidTransactionMissingArg = 44, + /// The transaction sent to the network had an argument with an unexpected type + #[error("the transaction sent to the network had an unexpected argument type")] + InvalidTransactionUnexpectedArgType = 45, + /// The transaction sent to the network had an invalid argument + #[error("the transaction sent to the network had an invalid argument")] + InvalidTransactionInvalidArg = 46, + /// The transaction sent to the network had an insufficient transfer amount + #[error("the transaction sent to the network had an insufficient transfer amount")] + InvalidTransactionInsufficientTransferAmount = 47, + /// The transaction sent to the network had a custom entry point when it should have a non + /// custom entry point. + #[error("the native transaction sent to the network should not have a custom entry point")] + InvalidTransactionEntryPointCannotBeCustom = 48, + /// The transaction sent to the network had a standard entry point when it must be custom. + #[error("the non-native transaction sent to the network must have a custom entry point")] + InvalidTransactionEntryPointMustBeCustom = 49, + /// The transaction sent to the network had empty module bytes + #[error("the transaction sent to the network had empty module bytes")] + InvalidTransactionEmptyModuleBytes = 50, + /// The transaction sent to the network had an invalid gas price conversion + #[error("the transaction sent to the network had an invalid gas price conversion")] + InvalidTransactionGasPriceConversion = 51, + /// The network was unable to calculate the gas limit for the transaction sent. + #[error("the network was unable to calculate the gas limit for the transaction sent")] + InvalidTransactionUnableToCalculateGasLimit = 52, + /// The network was unable to calculate the gas cost for the transaction sent. + #[error("the network was unable to calculate the gas cost for the transaction sent.")] + InvalidTransactionUnableToCalculateGasCost = 53, + /// The transaction sent to the network had an invalid pricing mode + #[error("the transaction sent to the network had an invalid pricing mode")] + InvalidTransactionPricingMode = 54, + /// The transaction sent to the network was invalid for an unspecified reason + #[error("the transaction sent to the network was invalid for an unspecified reason")] + InvalidTransactionUnspecified = 55, + /// As the various enums are tagged non_exhaustive, it is possible that in the future none of + /// these previous errors cover the error that occurred, therefore we need some catchall in + /// the case that nothing else works. + #[error("the transaction or deploy sent to the network was invalid for an unspecified reason")] + InvalidTransactionOrDeployUnspecified = 56, + /// The switch block for the requested era was not found + #[error("the switch block for the requested era was not found")] + SwitchBlockNotFound = 57, + #[error("the parent of the switch block for the requested era was not found")] + /// The parent of the switch block for the requested era was not found + SwitchBlockParentNotFound = 58, + #[error("cannot serve rewards stored in V1 format")] + /// Cannot serve rewards stored in V1 format + UnsupportedRewardsV1Request = 59, + /// Invalid binary request header versions. + #[error("binary request header versions mismatch")] + CommandHeaderVersionMismatch = 60, + /// Blockchain is empty + #[error("blockchain is empty")] + EmptyBlockchain = 61, + /// Expected deploy, but got transaction + #[error("expected deploy, got transaction")] + ExpectedDeploy = 62, + /// Expected transaction, but got deploy + #[error("expected transaction V1, got deploy")] + ExpectedTransaction = 63, + /// Transaction has expired + #[error("transaction has expired")] + TransactionExpired = 64, + /// Transactions parameters are missing or incorrect + #[error("missing or incorrect transaction parameters")] + MissingOrIncorrectParameters = 65, + /// No such addressable entity + #[error("no such addressable entity")] + NoSuchAddressableEntity = 66, + // No such contract at hash + #[error("no such contract at hash")] + NoSuchContractAtHash = 67, + /// No such entry point + #[error("no such entry point")] + NoSuchEntryPoint = 68, + /// No such package at hash + #[error("no such package at hash")] + NoSuchPackageAtHash = 69, + /// Invalid entity at version + #[error("invalid entity at version")] + InvalidEntityAtVersion = 70, + /// Disabled entity at version + #[error("disabled entity at version")] + DisabledEntityAtVersion = 71, + /// Missing entity at version + #[error("missing entity at version")] + MissingEntityAtVersion = 72, + /// Invalid associated keys + #[error("invalid associated keys")] + InvalidAssociatedKeys = 73, + /// Insufficient signature weight + #[error("insufficient signature weight")] + InsufficientSignatureWeight = 74, + /// Insufficient balance + #[error("insufficient balance")] + InsufficientBalance = 75, + /// Unknown balance + #[error("unknown balance")] + UnknownBalance = 76, + /// Invalid payment variant for deploy + #[error("invalid payment variant for deploy")] + DeployInvalidPaymentVariant = 77, + /// Missing payment amount for deploy + #[error("missing payment amount for deploy")] + DeployMissingPaymentAmount = 78, + /// Failed to parse payment amount for deploy + #[error("failed to parse payment amount for deploy")] + DeployFailedToParsePaymentAmount = 79, + /// Missing transfer target for deploy + #[error("missing transfer target for deploy")] + DeployMissingTransferTarget = 80, + /// Missing module bytes for deploy + #[error("missing module bytes for deploy")] + DeployMissingModuleBytes = 81, + /// Entry point cannot be 'call' + #[error("entry point cannot be 'call'")] + InvalidTransactionEntryPointCannotBeCall = 82, + /// Invalid transaction lane + #[error("invalid transaction lane")] + InvalidTransactionInvalidTransactionLane = 83, + /// Gas price tolerance too low + #[error("gas price tolerance too low")] + GasPriceToleranceTooLow = 84, + /// Received V1 Transaction for spec exec. + #[error("received v1 transaction for speculative execution")] + ReceivedV1Transaction = 85, + /// Purse was not found for given identifier. + #[error("purse was not found for given identifier")] + PurseNotFound = 86, + /// Too many requests per second. + #[error("request was throttled")] + RequestThrottled = 87, + /// Expected named arguments. + #[error("expected named arguments")] + ExpectedNamedArguments = 88, + /// Invalid transaction runtime. + #[error("invalid transaction runtime")] + InvalidTransactionRuntime = 89, + /// Key in transfer request malformed + #[error("malformed transfer record key")] + TransferRecordMalformedKey = 90, + /// Malformed information request + #[error("malformed information request")] + MalformedInformationRequest = 91, + /// Malformed binary version + #[error("not enough bytes to read version of the binary request header")] + TooLittleBytesForRequestHeaderVersion = 92, + /// Malformed command header version + #[error("malformed commnd header version")] + MalformedCommandHeaderVersion = 93, + /// Malformed header + #[error("malformed command header")] + MalformedCommandHeader = 94, + /// Malformed command + #[error("malformed command")] + MalformedCommand = 95, + /// No matching lane for transaction + #[error("couldn't associate a transaction lane with the transaction")] + InvalidTransactionNoLaneMatches = 96, + /// Entry point must be 'call' + #[error("entry point must be 'call'")] + InvalidTransactionEntryPointMustBeCall = 97, + /// One of the payloads field cannot be deserialized + #[error("One of the payloads field cannot be deserialized")] + InvalidTransactionCannotDeserializeField = 98, + /// Can't calculate hash of the payload fields + #[error("Can't calculate hash of the payload fields")] + InvalidTransactionCannotCalculateFieldsHash = 99, + /// Unexpected fields in payload + #[error("Unexpected fields in payload")] + InvalidTransactionUnexpectedFields = 100, + /// Expected bytes arguments + #[error("expected bytes arguments")] + InvalidTransactionExpectedBytesArguments = 101, + /// Missing seed field in transaction + #[error("Missing seed field in transaction")] + InvalidTransactionMissingSeed = 102, + /// Pricing mode not supported + #[error("Pricing mode not supported")] + PricingModeNotSupported = 103, + /// Gas limit not supported + #[error("Gas limit not supported")] + InvalidDeployGasLimitNotSupported = 104, + /// Invalid runtime for Transaction::Deploy + #[error("Invalid runtime for Transaction::Deploy")] + InvalidDeployInvalidRuntime = 105, + /// Deploy exceeds wasm lane gas limit + #[error("Transaction::Deploy exceeds lane gas limit")] + InvalidDeployExceededWasmLaneGasLimit = 106, + /// Invalid runtime for Transaction::Deploy + #[error("Invalid payment amount for Transaction::Deploy")] + InvalidDeployInvalidPaymentAmount = 107, + /// Insufficient burn amount for Transaction::V1 + #[error("Insufficient burn amount for Transaction::V1")] + InvalidTransactionInsufficientBurnAmount = 108, + /// Invalid payment amount for Transaction::V1 + #[error("Invalid payment amount for Transaction::V1")] + InvalidTransactionInvalidPaymentAmount = 109, + /// Unexpected entry point for Transaction::V1 + #[error("Unexpected entry point for Transaction::V1")] + InvalidTransactionUnexpectedEntryPoint = 110, + /// Cannot serialize transaction + #[error("Transaction has malformed binary representation")] + TransactionHasMalformedBinaryRepresentation = 111, + #[error("Transaction includes an argument named amount with a value below a relevant limit")] + InsufficientAmountArgValue = 112, + #[error( + "Transaction attempts to set a minimum delegation amount below the lowest allowed value" + )] + InvalidMinimumDelegationAmount = 113, + #[error( + "Transaction attempts to set a maximum delegation amount above the highest allowed value" + )] + InvalidMaximumDelegationAmount = 114, + #[error("Transaction attempts to set a reserved slots count above the highest allowed value")] + InvalidReservedSlots = 115, + #[error("Transaction attempts to set a delegation amount above the highest allowed value")] + InvalidDelegationAmount = 116, + #[error("the transaction invocation target is unsupported under V2 runtime")] + UnsupportedInvocationTarget = 117, +} + +impl TryFrom for ErrorCode { + type Error = UnknownErrorCode; + + fn try_from(value: u16) -> Result { + FromPrimitive::from_u16(value).ok_or(UnknownErrorCode) + } +} + +/// Error indicating that the error code is unknown. +#[derive(Debug, Clone, Copy)] +pub struct UnknownErrorCode; + +impl fmt::Display for UnknownErrorCode { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "unknown node error code") + } +} + +impl std::error::Error for UnknownErrorCode {} + +impl From for ErrorCode { + fn from(value: InvalidTransaction) -> Self { + match value { + InvalidTransaction::Deploy(invalid_deploy) => ErrorCode::from(invalid_deploy), + InvalidTransaction::V1(invalid_transaction) => ErrorCode::from(invalid_transaction), + _ => ErrorCode::InvalidTransactionOrDeployUnspecified, + } + } +} + +impl From for ErrorCode { + fn from(value: InvalidDeploy) -> Self { + match value { + InvalidDeploy::InvalidChainName { .. } => ErrorCode::InvalidDeployChainName, + InvalidDeploy::DependenciesNoLongerSupported => { + ErrorCode::InvalidDeployDependenciesNoLongerSupported + } + InvalidDeploy::ExcessiveSize(_) => ErrorCode::InvalidDeployExcessiveSize, + InvalidDeploy::ExcessiveTimeToLive { .. } => { + ErrorCode::InvalidDeployExcessiveTimeToLive + } + InvalidDeploy::TimestampInFuture { .. } => ErrorCode::InvalidDeployTimestampInFuture, + InvalidDeploy::InvalidBodyHash => ErrorCode::InvalidDeployBodyHash, + InvalidDeploy::InvalidDeployHash => ErrorCode::InvalidDeployHash, + InvalidDeploy::EmptyApprovals => ErrorCode::InvalidDeployEmptyApprovals, + InvalidDeploy::InvalidApproval { .. } => ErrorCode::InvalidDeployApproval, + InvalidDeploy::ExcessiveSessionArgsLength { .. } => { + ErrorCode::InvalidDeployExcessiveSessionArgsLength + } + InvalidDeploy::ExcessivePaymentArgsLength { .. } => { + ErrorCode::InvalidDeployExcessivePaymentArgsLength + } + InvalidDeploy::MissingPaymentAmount => ErrorCode::InvalidDeployMissingPaymentAmount, + InvalidDeploy::FailedToParsePaymentAmount => { + ErrorCode::InvalidDeployFailedToParsePaymentAmount + } + InvalidDeploy::ExceededBlockGasLimit { .. } => { + ErrorCode::InvalidDeployExceededBlockGasLimit + } + InvalidDeploy::MissingTransferAmount => ErrorCode::InvalidDeployMissingTransferAmount, + InvalidDeploy::FailedToParseTransferAmount => { + ErrorCode::InvalidDeployFailedToParseTransferAmount + } + InvalidDeploy::InsufficientTransferAmount { .. } => { + ErrorCode::InvalidDeployInsufficientTransferAmount + } + InvalidDeploy::ExcessiveApprovals { .. } => ErrorCode::InvalidDeployExcessiveApprovals, + InvalidDeploy::UnableToCalculateGasLimit => { + ErrorCode::InvalidDeployUnableToCalculateGasLimit + } + InvalidDeploy::UnableToCalculateGasCost => { + ErrorCode::InvalidDeployUnableToCalculateGasCost + } + InvalidDeploy::GasPriceToleranceTooLow { .. } => ErrorCode::GasPriceToleranceTooLow, + InvalidDeploy::GasLimitNotSupported => ErrorCode::InvalidDeployGasLimitNotSupported, + InvalidDeploy::InvalidRuntime => ErrorCode::InvalidDeployInvalidRuntime, + InvalidDeploy::NoLaneMatch => ErrorCode::InvalidTransactionNoLaneMatches, + InvalidDeploy::ExceededLaneGasLimit { .. } => { + ErrorCode::InvalidDeployExceededWasmLaneGasLimit + } + InvalidDeploy::InvalidPaymentAmount => ErrorCode::InvalidDeployInvalidPaymentAmount, + InvalidDeploy::PricingModeNotSupported => ErrorCode::PricingModeNotSupported, + _ => ErrorCode::InvalidDeployUnspecified, + } + } +} + +impl From for ErrorCode { + fn from(value: InvalidTransactionV1) -> Self { + match value { + InvalidTransactionV1::InvalidChainName { .. } => ErrorCode::InvalidTransactionChainName, + InvalidTransactionV1::ExcessiveSize(_) => ErrorCode::InvalidTransactionExcessiveSize, + InvalidTransactionV1::ExcessiveTimeToLive { .. } => { + ErrorCode::InvalidTransactionExcessiveTimeToLive + } + InvalidTransactionV1::TimestampInFuture { .. } => { + ErrorCode::InvalidTransactionTimestampInFuture + } + InvalidTransactionV1::InvalidBodyHash => ErrorCode::InvalidTransactionBodyHash, + InvalidTransactionV1::InvalidTransactionHash => ErrorCode::InvalidTransactionHash, + InvalidTransactionV1::EmptyApprovals => ErrorCode::InvalidTransactionEmptyApprovals, + InvalidTransactionV1::InvalidApproval { .. } => { + ErrorCode::InvalidTransactionInvalidApproval + } + InvalidTransactionV1::ExcessiveArgsLength { .. } => { + ErrorCode::InvalidTransactionExcessiveArgsLength + } + InvalidTransactionV1::ExcessiveApprovals { .. } => { + ErrorCode::InvalidTransactionExcessiveApprovals + } + InvalidTransactionV1::ExceedsBlockGasLimit { .. } => { + ErrorCode::InvalidTransactionExceedsBlockGasLimit + } + InvalidTransactionV1::MissingArg { .. } => ErrorCode::InvalidTransactionMissingArg, + InvalidTransactionV1::UnexpectedArgType { .. } => { + ErrorCode::InvalidTransactionUnexpectedArgType + } + InvalidTransactionV1::InvalidArg { .. } => ErrorCode::InvalidTransactionInvalidArg, + InvalidTransactionV1::InsufficientTransferAmount { .. } => { + ErrorCode::InvalidTransactionInsufficientTransferAmount + } + InvalidTransactionV1::EntryPointCannotBeCustom { .. } => { + ErrorCode::InvalidTransactionEntryPointCannotBeCustom + } + InvalidTransactionV1::EntryPointMustBeCustom { .. } => { + ErrorCode::InvalidTransactionEntryPointMustBeCustom + } + InvalidTransactionV1::EmptyModuleBytes => ErrorCode::InvalidTransactionEmptyModuleBytes, + InvalidTransactionV1::GasPriceConversion { .. } => { + ErrorCode::InvalidTransactionGasPriceConversion + } + InvalidTransactionV1::UnableToCalculateGasLimit => { + ErrorCode::InvalidTransactionUnableToCalculateGasLimit + } + InvalidTransactionV1::UnableToCalculateGasCost => { + ErrorCode::InvalidTransactionUnableToCalculateGasCost + } + InvalidTransactionV1::InvalidPricingMode { .. } => { + ErrorCode::InvalidTransactionPricingMode + } + InvalidTransactionV1::EntryPointCannotBeCall => { + ErrorCode::InvalidTransactionEntryPointCannotBeCall + } + InvalidTransactionV1::InvalidTransactionLane(_) => { + ErrorCode::InvalidTransactionInvalidTransactionLane + } + InvalidTransactionV1::GasPriceToleranceTooLow { .. } => { + ErrorCode::GasPriceToleranceTooLow + } + InvalidTransactionV1::ExpectedNamedArguments => ErrorCode::ExpectedNamedArguments, + InvalidTransactionV1::InvalidTransactionRuntime { .. } => { + ErrorCode::InvalidTransactionRuntime + } + InvalidTransactionV1::NoLaneMatch => ErrorCode::InvalidTransactionNoLaneMatches, + InvalidTransactionV1::EntryPointMustBeCall { .. } => { + ErrorCode::InvalidTransactionEntryPointMustBeCall + } + InvalidTransactionV1::CouldNotDeserializeField { .. } => { + ErrorCode::InvalidTransactionCannotDeserializeField + } + InvalidTransactionV1::CannotCalculateFieldsHash => { + ErrorCode::InvalidTransactionCannotCalculateFieldsHash + } + InvalidTransactionV1::UnexpectedTransactionFieldEntries => { + ErrorCode::InvalidTransactionUnexpectedFields + } + InvalidTransactionV1::ExpectedBytesArguments => { + ErrorCode::InvalidTransactionExpectedBytesArguments + } + InvalidTransactionV1::MissingSeed => ErrorCode::InvalidTransactionMissingSeed, + InvalidTransactionV1::PricingModeNotSupported => ErrorCode::PricingModeNotSupported, + InvalidTransactionV1::InsufficientBurnAmount { .. } => { + ErrorCode::InvalidTransactionInsufficientBurnAmount + } + InvalidTransactionV1::InvalidPaymentAmount => { + ErrorCode::InvalidTransactionInvalidPaymentAmount + } + InvalidTransactionV1::UnexpectedEntryPoint { .. } => { + ErrorCode::InvalidTransactionUnexpectedEntryPoint + } + InvalidTransactionV1::CouldNotSerializeTransaction { .. } => { + ErrorCode::TransactionHasMalformedBinaryRepresentation + } + InvalidTransactionV1::InsufficientAmount { .. } => { + ErrorCode::InsufficientAmountArgValue + } + InvalidTransactionV1::InvalidMinimumDelegationAmount { .. } => { + ErrorCode::InvalidMinimumDelegationAmount + } + InvalidTransactionV1::InvalidMaximumDelegationAmount { .. } => { + ErrorCode::InvalidMaximumDelegationAmount + } + InvalidTransactionV1::InvalidReservedSlots { .. } => ErrorCode::InvalidReservedSlots, + InvalidTransactionV1::InvalidDelegationAmount { .. } => { + ErrorCode::InvalidDelegationAmount + } + InvalidTransactionV1::UnsupportedInvocationTarget { .. } => { + ErrorCode::UnsupportedInvocationTarget + } + _other => ErrorCode::InvalidTransactionUnspecified, + } + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryFrom; + + use crate::ErrorCode; + use casper_types::{InvalidDeploy, InvalidTransactionV1}; + use strum::IntoEnumIterator; + + #[test] + fn verify_all_invalid_transaction_v1_errors_have_error_codes() { + for error in InvalidTransactionV1::iter() { + let code = ErrorCode::from(error.clone()); + assert_ne!( + code, + ErrorCode::InvalidTransactionUnspecified, + "Seems like InvalidTransactionV1 {error:?} has no corresponding error code" + ); + assert_ne!( + code, + ErrorCode::InvalidDeployUnspecified, + "Seems like InvalidTransactionV1 {error:?} has no corresponding error code" + ) + } + } + + #[test] + fn verify_all_invalid_deploy_errors_have_error_codes() { + for error in InvalidDeploy::iter() { + let code = ErrorCode::from(error.clone()); + assert_ne!( + code, + ErrorCode::InvalidTransactionUnspecified, + "Seems like InvalidDeploy {error} has no corresponding error code" + ); + assert_ne!( + code, + ErrorCode::InvalidDeployUnspecified, + "Seems like InvalidDeploy {error} has no corresponding error code" + ) + } + } + + #[test] + fn try_from_decoded_all_variants() { + for variant in ErrorCode::iter() { + let as_int = variant as u16; + let decoded = ErrorCode::try_from(as_int); + assert!( + decoded.is_ok(), + "variant {} not covered by TryFrom implementation", + as_int + ); + assert_eq!(decoded.unwrap(), variant); + } + } +} diff --git a/binary_port/src/get_request.rs b/binary_port/src/get_request.rs new file mode 100644 index 0000000000..483a05f9ae --- /dev/null +++ b/binary_port/src/get_request.rs @@ -0,0 +1,165 @@ +use casper_types::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, +}; + +use crate::state_request::GlobalStateRequest; + +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +const RECORD_TAG: u8 = 0; +const INFORMATION_TAG: u8 = 1; +const STATE_TAG: u8 = 2; +const TRIE_TAG: u8 = 3; + +/// A request to get data from the node. +#[derive(Clone, Debug, PartialEq)] +pub enum GetRequest { + /// Retrieves a record from the node. + Record { + /// Type tag of the record to retrieve. + record_type_tag: u16, + /// Key encoded into bytes. + key: Vec, + }, + /// Retrieves information from the node. + Information { + /// Type tag of the information to retrieve. + info_type_tag: u16, + /// Key encoded into bytes. + key: Vec, + }, + /// Retrieves data from the global state. + State(Box), + /// Get a trie by its Digest. + Trie { + /// A trie key. + trie_key: Digest, + }, +} + +impl GetRequest { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..4) { + 0 => GetRequest::Record { + record_type_tag: rng.gen(), + key: rng.random_vec(16..32), + }, + 1 => GetRequest::Information { + info_type_tag: rng.gen(), + key: rng.random_vec(16..32), + }, + 2 => GetRequest::State(Box::new(GlobalStateRequest::random(rng))), + 3 => GetRequest::Trie { + trie_key: Digest::random(rng), + }, + _ => unreachable!(), + } + } +} + +impl ToBytes for GetRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + GetRequest::Record { + record_type_tag, + key, + } => { + RECORD_TAG.write_bytes(writer)?; + record_type_tag.write_bytes(writer)?; + key.write_bytes(writer) + } + GetRequest::Information { info_type_tag, key } => { + INFORMATION_TAG.write_bytes(writer)?; + info_type_tag.write_bytes(writer)?; + key.write_bytes(writer) + } + GetRequest::State(req) => { + STATE_TAG.write_bytes(writer)?; + req.write_bytes(writer) + } + GetRequest::Trie { trie_key } => { + TRIE_TAG.write_bytes(writer)?; + trie_key.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + GetRequest::Record { + record_type_tag, + key, + } => record_type_tag.serialized_length() + key.serialized_length(), + GetRequest::Information { info_type_tag, key } => { + info_type_tag.serialized_length() + key.serialized_length() + } + GetRequest::State(req) => req.serialized_length(), + GetRequest::Trie { trie_key } => trie_key.serialized_length(), + } + } +} + +impl FromBytes for GetRequest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = FromBytes::from_bytes(bytes)?; + match tag { + RECORD_TAG => { + let (record_type_tag, remainder) = FromBytes::from_bytes(remainder)?; + let (key, remainder) = Bytes::from_bytes(remainder)?; + Ok(( + GetRequest::Record { + record_type_tag, + key: key.into(), + }, + remainder, + )) + } + INFORMATION_TAG => { + let (info_type_tag, remainder) = FromBytes::from_bytes(remainder)?; + let (key, remainder) = Bytes::from_bytes(remainder)?; + Ok(( + GetRequest::Information { + info_type_tag, + key: key.into(), + }, + remainder, + )) + } + STATE_TAG => { + let (req, remainder) = FromBytes::from_bytes(remainder)?; + Ok((GetRequest::State(Box::new(req)), remainder)) + } + TRIE_TAG => { + let (trie_key, remainder) = FromBytes::from_bytes(remainder)?; + Ok((GetRequest::Trie { trie_key }, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GetRequest::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/global_state_query_result.rs b/binary_port/src/global_state_query_result.rs new file mode 100644 index 0000000000..009e5b60a5 --- /dev/null +++ b/binary_port/src/global_state_query_result.rs @@ -0,0 +1,119 @@ +//! The result of the query for the global state value. + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + global_state::TrieMerkleProof, + Key, StoredValue, +}; + +#[cfg(test)] +use casper_types::testing::TestRng; + +#[cfg(test)] +use casper_types::{ByteCode, ByteCodeKind}; +use serde::Serialize; + +/// Carries the successful result of the global state query. +#[derive(Debug, PartialEq, Clone, Serialize)] +pub struct GlobalStateQueryResult { + /// Stored value. + value: StoredValue, + /// Proof. + merkle_proof: Vec>, +} + +impl GlobalStateQueryResult { + /// Creates the global state query result. + pub fn new(value: StoredValue, merkle_proof: Vec>) -> Self { + Self { + value, + merkle_proof, + } + } + + /// Returns the stored value. + pub fn value(&self) -> &StoredValue { + &self.value + } + + /// Returns the stored value and the merkle proof. + pub fn into_inner(self) -> (StoredValue, Vec>) { + (self.value, self.merkle_proof) + } + + #[cfg(test)] + pub(crate) fn random_invalid(rng: &mut TestRng) -> Self { + use casper_types::{global_state::TrieMerkleProofStep, CLValue}; + use rand::Rng; + // Note: This does NOT create a logically-valid struct. Instance created by this function + // should be used in `bytesrepr` tests only. + + let mut merkle_proof = vec![]; + for _ in 0..rng.gen_range(0..10) { + let stored_value = StoredValue::CLValue( + CLValue::from_t(rng.gen::()).expect("should create CLValue"), + ); + let steps = (0..rng.gen_range(0..10)) + .map(|_| TrieMerkleProofStep::random(rng)) + .collect(); + merkle_proof.push(TrieMerkleProof::new(rng.gen(), stored_value, steps)); + } + + Self { + value: StoredValue::ByteCode(ByteCode::new( + ByteCodeKind::V1CasperWasm, + rng.random_vec(10..20), + )), + merkle_proof, + } + } +} + +impl ToBytes for GlobalStateQueryResult { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let GlobalStateQueryResult { + value, + merkle_proof, + } = self; + value.write_bytes(writer)?; + merkle_proof.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.value.serialized_length() + self.merkle_proof.serialized_length() + } +} + +impl FromBytes for GlobalStateQueryResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = FromBytes::from_bytes(bytes)?; + let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateQueryResult { + value, + merkle_proof, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GlobalStateQueryResult::random_invalid(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/information_request.rs b/binary_port/src/information_request.rs new file mode 100644 index 0000000000..d5e25d4d35 --- /dev/null +++ b/binary_port/src/information_request.rs @@ -0,0 +1,711 @@ +use core::convert::TryFrom; + +#[cfg(test)] +use rand::Rng; + +use crate::{get_request::GetRequest, EraIdentifier}; +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::{ContractHash, ContractPackageHash}, + system::auction::DelegatorKind, + BlockIdentifier, EntityAddr, GlobalStateIdentifier, PackageAddr, PublicKey, TransactionHash, +}; + +/// Request for information from the node. +#[derive(Clone, Debug, PartialEq)] +pub enum InformationRequest { + /// Returns the block header by an identifier, no identifier indicates the latest block. + BlockHeader(Option), + /// Returns the block with signatures by an identifier, no identifier indicates the latest + /// block. + BlockWithSignatures(Option), + /// Returns a transaction with approvals and execution info for a given hash. + Transaction { + /// Hash of the transaction to retrieve. + hash: TransactionHash, + /// Whether to return the deploy with the finalized approvals substituted. + with_finalized_approvals: bool, + }, + /// Returns connected peers. + Peers, + /// Returns node uptime. + Uptime, + /// Returns last progress of the sync process. + LastProgress, + /// Returns current state of the main reactor. + ReactorState, + /// Returns network name. + NetworkName, + /// Returns consensus validator changes. + ConsensusValidatorChanges, + /// Returns status of the BlockSynchronizer. + BlockSynchronizerStatus, + /// Returns the available block range. + AvailableBlockRange, + /// Returns info about next upgrade. + NextUpgrade, + /// Returns consensus status. + ConsensusStatus, + /// Returns chainspec raw bytes. + ChainspecRawBytes, + /// Returns the status information of the node. + NodeStatus, + /// Returns the latest switch block header. + LatestSwitchBlockHeader, + /// Returns the reward for a validator or a delegator in a specific era. + Reward { + /// Identifier of the era to get the reward for. Must point to either a switch block or + /// a valid `EraId`. If `None`, the reward for the latest switch block is returned. + era_identifier: Option, + /// Public key of the validator to get the reward for. + validator: Box, + /// Identity of the delegator to get the reward for. + /// If `None`, the reward for the validator is returned. + delegator: Option>, + }, + /// Returns the current Casper protocol version. + ProtocolVersion, + /// Returns the contract package by an identifier. + Package { + /// Global state identifier, `None` means "latest block state". + state_identifier: Option, + /// Identifier of the contract package to retrieve. + identifier: PackageIdentifier, + }, + /// Returns the entity by an identifier. + Entity { + /// Global state identifier, `None` means "latest block state". + state_identifier: Option, + /// Identifier of the entity to retrieve. + identifier: EntityIdentifier, + /// Whether to return the bytecode with the entity. + include_bytecode: bool, + }, +} + +impl InformationRequest { + /// Returns the tag of the request. + pub fn tag(&self) -> InformationRequestTag { + match self { + InformationRequest::BlockHeader(_) => InformationRequestTag::BlockHeader, + InformationRequest::BlockWithSignatures(_) => { + InformationRequestTag::BlockWithSignatures + } + InformationRequest::Transaction { .. } => InformationRequestTag::Transaction, + InformationRequest::Peers => InformationRequestTag::Peers, + InformationRequest::Uptime => InformationRequestTag::Uptime, + InformationRequest::LastProgress => InformationRequestTag::LastProgress, + InformationRequest::ReactorState => InformationRequestTag::ReactorState, + InformationRequest::NetworkName => InformationRequestTag::NetworkName, + InformationRequest::ConsensusValidatorChanges => { + InformationRequestTag::ConsensusValidatorChanges + } + InformationRequest::BlockSynchronizerStatus => { + InformationRequestTag::BlockSynchronizerStatus + } + InformationRequest::AvailableBlockRange => InformationRequestTag::AvailableBlockRange, + InformationRequest::NextUpgrade => InformationRequestTag::NextUpgrade, + InformationRequest::ConsensusStatus => InformationRequestTag::ConsensusStatus, + InformationRequest::ChainspecRawBytes => InformationRequestTag::ChainspecRawBytes, + InformationRequest::NodeStatus => InformationRequestTag::NodeStatus, + InformationRequest::LatestSwitchBlockHeader => { + InformationRequestTag::LatestSwitchBlockHeader + } + InformationRequest::Reward { .. } => InformationRequestTag::Reward, + InformationRequest::ProtocolVersion => InformationRequestTag::ProtocolVersion, + InformationRequest::Package { .. } => InformationRequestTag::Package, + InformationRequest::Entity { .. } => InformationRequestTag::Entity, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match InformationRequestTag::random(rng) { + InformationRequestTag::BlockHeader => InformationRequest::BlockHeader( + rng.gen::().then(|| BlockIdentifier::random(rng)), + ), + InformationRequestTag::BlockWithSignatures => InformationRequest::BlockWithSignatures( + rng.gen::().then(|| BlockIdentifier::random(rng)), + ), + InformationRequestTag::Transaction => InformationRequest::Transaction { + hash: TransactionHash::random(rng), + with_finalized_approvals: rng.gen(), + }, + InformationRequestTag::Peers => InformationRequest::Peers, + InformationRequestTag::Uptime => InformationRequest::Uptime, + InformationRequestTag::LastProgress => InformationRequest::LastProgress, + InformationRequestTag::ReactorState => InformationRequest::ReactorState, + InformationRequestTag::NetworkName => InformationRequest::NetworkName, + InformationRequestTag::ConsensusValidatorChanges => { + InformationRequest::ConsensusValidatorChanges + } + InformationRequestTag::BlockSynchronizerStatus => { + InformationRequest::BlockSynchronizerStatus + } + InformationRequestTag::AvailableBlockRange => InformationRequest::AvailableBlockRange, + InformationRequestTag::NextUpgrade => InformationRequest::NextUpgrade, + InformationRequestTag::ConsensusStatus => InformationRequest::ConsensusStatus, + InformationRequestTag::ChainspecRawBytes => InformationRequest::ChainspecRawBytes, + InformationRequestTag::NodeStatus => InformationRequest::NodeStatus, + InformationRequestTag::LatestSwitchBlockHeader => { + InformationRequest::LatestSwitchBlockHeader + } + InformationRequestTag::Reward => InformationRequest::Reward { + era_identifier: rng.gen::().then(|| EraIdentifier::random(rng)), + validator: PublicKey::random(rng).into(), + delegator: rng + .gen::() + .then(|| Box::new(DelegatorKind::PublicKey(PublicKey::random(rng)))), + }, + InformationRequestTag::ProtocolVersion => InformationRequest::ProtocolVersion, + InformationRequestTag::Package => InformationRequest::Package { + state_identifier: rng + .gen::() + .then(|| GlobalStateIdentifier::random(rng)), + identifier: PackageIdentifier::random(rng), + }, + InformationRequestTag::Entity => InformationRequest::Entity { + state_identifier: rng + .gen::() + .then(|| GlobalStateIdentifier::random(rng)), + identifier: EntityIdentifier::random(rng), + include_bytecode: rng.gen(), + }, + } + } +} + +impl ToBytes for InformationRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + InformationRequest::BlockHeader(block_identifier) => { + block_identifier.write_bytes(writer) + } + InformationRequest::BlockWithSignatures(block_identifier) => { + block_identifier.write_bytes(writer) + } + InformationRequest::Transaction { + hash, + with_finalized_approvals, + } => { + hash.write_bytes(writer)?; + with_finalized_approvals.write_bytes(writer) + } + InformationRequest::Peers + | InformationRequest::Uptime + | InformationRequest::LastProgress + | InformationRequest::ReactorState + | InformationRequest::NetworkName + | InformationRequest::ConsensusValidatorChanges + | InformationRequest::BlockSynchronizerStatus + | InformationRequest::AvailableBlockRange + | InformationRequest::NextUpgrade + | InformationRequest::ConsensusStatus + | InformationRequest::ChainspecRawBytes + | InformationRequest::NodeStatus + | InformationRequest::LatestSwitchBlockHeader + | InformationRequest::ProtocolVersion => Ok(()), + InformationRequest::Reward { + era_identifier, + validator, + delegator, + } => { + era_identifier.write_bytes(writer)?; + validator.write_bytes(writer)?; + delegator.as_deref().write_bytes(writer)?; + Ok(()) + } + InformationRequest::Package { + state_identifier, + identifier, + } => { + state_identifier.write_bytes(writer)?; + identifier.write_bytes(writer) + } + InformationRequest::Entity { + state_identifier, + identifier, + include_bytecode, + } => { + state_identifier.write_bytes(writer)?; + identifier.write_bytes(writer)?; + include_bytecode.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + match self { + InformationRequest::BlockHeader(block_identifier) => { + block_identifier.serialized_length() + } + InformationRequest::BlockWithSignatures(block_identifier) => { + block_identifier.serialized_length() + } + InformationRequest::Transaction { + hash, + with_finalized_approvals, + } => hash.serialized_length() + with_finalized_approvals.serialized_length(), + InformationRequest::Peers + | InformationRequest::Uptime + | InformationRequest::LastProgress + | InformationRequest::ReactorState + | InformationRequest::NetworkName + | InformationRequest::ConsensusValidatorChanges + | InformationRequest::BlockSynchronizerStatus + | InformationRequest::AvailableBlockRange + | InformationRequest::NextUpgrade + | InformationRequest::ConsensusStatus + | InformationRequest::ChainspecRawBytes + | InformationRequest::NodeStatus + | InformationRequest::LatestSwitchBlockHeader + | InformationRequest::ProtocolVersion => 0, + InformationRequest::Reward { + era_identifier, + validator, + delegator, + } => { + era_identifier.serialized_length() + + validator.serialized_length() + + delegator.as_deref().serialized_length() + } + InformationRequest::Package { + state_identifier, + identifier, + } => state_identifier.serialized_length() + identifier.serialized_length(), + InformationRequest::Entity { + state_identifier, + identifier, + include_bytecode, + } => { + state_identifier.serialized_length() + + identifier.serialized_length() + + include_bytecode.serialized_length() + } + } + } +} + +impl TryFrom<(InformationRequestTag, &[u8])> for InformationRequest { + type Error = bytesrepr::Error; + + fn try_from((tag, key_bytes): (InformationRequestTag, &[u8])) -> Result { + let (req, remainder) = match tag { + InformationRequestTag::BlockHeader => { + let (block_identifier, remainder) = FromBytes::from_bytes(key_bytes)?; + (InformationRequest::BlockHeader(block_identifier), remainder) + } + InformationRequestTag::BlockWithSignatures => { + let (block_identifier, remainder) = FromBytes::from_bytes(key_bytes)?; + ( + InformationRequest::BlockWithSignatures(block_identifier), + remainder, + ) + } + InformationRequestTag::Transaction => { + let (hash, remainder) = FromBytes::from_bytes(key_bytes)?; + let (with_finalized_approvals, remainder) = FromBytes::from_bytes(remainder)?; + ( + InformationRequest::Transaction { + hash, + with_finalized_approvals, + }, + remainder, + ) + } + InformationRequestTag::Peers => (InformationRequest::Peers, key_bytes), + InformationRequestTag::Uptime => (InformationRequest::Uptime, key_bytes), + InformationRequestTag::LastProgress => (InformationRequest::LastProgress, key_bytes), + InformationRequestTag::ReactorState => (InformationRequest::ReactorState, key_bytes), + InformationRequestTag::NetworkName => (InformationRequest::NetworkName, key_bytes), + InformationRequestTag::ConsensusValidatorChanges => { + (InformationRequest::ConsensusValidatorChanges, key_bytes) + } + InformationRequestTag::BlockSynchronizerStatus => { + (InformationRequest::BlockSynchronizerStatus, key_bytes) + } + InformationRequestTag::AvailableBlockRange => { + (InformationRequest::AvailableBlockRange, key_bytes) + } + InformationRequestTag::NextUpgrade => (InformationRequest::NextUpgrade, key_bytes), + InformationRequestTag::ConsensusStatus => { + (InformationRequest::ConsensusStatus, key_bytes) + } + InformationRequestTag::ChainspecRawBytes => { + (InformationRequest::ChainspecRawBytes, key_bytes) + } + InformationRequestTag::NodeStatus => (InformationRequest::NodeStatus, key_bytes), + InformationRequestTag::LatestSwitchBlockHeader => { + (InformationRequest::LatestSwitchBlockHeader, key_bytes) + } + InformationRequestTag::Reward => { + let (era_identifier, remainder) = >::from_bytes(key_bytes)?; + let (validator, remainder) = PublicKey::from_bytes(remainder)?; + let (delegator, remainder) = >::from_bytes(remainder)?; + ( + InformationRequest::Reward { + era_identifier, + validator: Box::new(validator), + delegator: delegator.map(Box::new), + }, + remainder, + ) + } + InformationRequestTag::ProtocolVersion => { + (InformationRequest::ProtocolVersion, key_bytes) + } + InformationRequestTag::Package => { + let (state_identifier, remainder) = FromBytes::from_bytes(key_bytes)?; + let (identifier, remainder) = FromBytes::from_bytes(remainder)?; + ( + InformationRequest::Package { + state_identifier, + identifier, + }, + remainder, + ) + } + InformationRequestTag::Entity => { + let (state_identifier, remainder) = FromBytes::from_bytes(key_bytes)?; + let (identifier, remainder) = FromBytes::from_bytes(remainder)?; + let (include_bytecode, remainder) = FromBytes::from_bytes(remainder)?; + ( + InformationRequest::Entity { + state_identifier, + identifier, + include_bytecode, + }, + remainder, + ) + } + }; + if !remainder.is_empty() { + return Err(bytesrepr::Error::LeftOverBytes); + } + Ok(req) + } +} + +impl TryFrom for GetRequest { + type Error = bytesrepr::Error; + + fn try_from(request: InformationRequest) -> Result { + Ok(GetRequest::Information { + info_type_tag: request.tag().into(), + key: request.to_bytes()?, + }) + } +} + +/// Identifier of an information request. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +#[repr(u16)] +pub enum InformationRequestTag { + /// Block header request. + BlockHeader = 0, + /// Block with signatures request. + BlockWithSignatures = 1, + /// Transaction request. + Transaction = 2, + /// Peers request. + Peers = 3, + /// Uptime request. + Uptime = 4, + /// Last progress request. + LastProgress = 5, + /// Reactor state request. + ReactorState = 6, + /// Network name request. + NetworkName = 7, + /// Consensus validator changes request. + ConsensusValidatorChanges = 8, + /// Block synchronizer status request. + BlockSynchronizerStatus = 9, + /// Available block range request. + AvailableBlockRange = 10, + /// Next upgrade request. + NextUpgrade = 11, + /// Consensus status request. + ConsensusStatus = 12, + /// Chainspec raw bytes request. + ChainspecRawBytes = 13, + /// Node status request. + NodeStatus = 14, + /// Latest switch block header request. + LatestSwitchBlockHeader = 15, + /// Reward for a validator or a delegator in a specific era. + Reward = 16, + /// Protocol version request. + ProtocolVersion = 17, + /// Contract package request. + Package = 18, + /// Addressable entity request. + Entity = 19, +} + +impl InformationRequestTag { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..20) { + 0 => InformationRequestTag::BlockHeader, + 1 => InformationRequestTag::BlockWithSignatures, + 2 => InformationRequestTag::Transaction, + 3 => InformationRequestTag::Peers, + 4 => InformationRequestTag::Uptime, + 5 => InformationRequestTag::LastProgress, + 6 => InformationRequestTag::ReactorState, + 7 => InformationRequestTag::NetworkName, + 8 => InformationRequestTag::ConsensusValidatorChanges, + 9 => InformationRequestTag::BlockSynchronizerStatus, + 10 => InformationRequestTag::AvailableBlockRange, + 11 => InformationRequestTag::NextUpgrade, + 12 => InformationRequestTag::ConsensusStatus, + 13 => InformationRequestTag::ChainspecRawBytes, + 14 => InformationRequestTag::NodeStatus, + 15 => InformationRequestTag::LatestSwitchBlockHeader, + 16 => InformationRequestTag::Reward, + 17 => InformationRequestTag::ProtocolVersion, + 18 => InformationRequestTag::Package, + 19 => InformationRequestTag::Entity, + _ => unreachable!(), + } + } +} + +impl TryFrom for InformationRequestTag { + type Error = UnknownInformationRequestTag; + + fn try_from(value: u16) -> Result { + match value { + 0 => Ok(InformationRequestTag::BlockHeader), + 1 => Ok(InformationRequestTag::BlockWithSignatures), + 2 => Ok(InformationRequestTag::Transaction), + 3 => Ok(InformationRequestTag::Peers), + 4 => Ok(InformationRequestTag::Uptime), + 5 => Ok(InformationRequestTag::LastProgress), + 6 => Ok(InformationRequestTag::ReactorState), + 7 => Ok(InformationRequestTag::NetworkName), + 8 => Ok(InformationRequestTag::ConsensusValidatorChanges), + 9 => Ok(InformationRequestTag::BlockSynchronizerStatus), + 10 => Ok(InformationRequestTag::AvailableBlockRange), + 11 => Ok(InformationRequestTag::NextUpgrade), + 12 => Ok(InformationRequestTag::ConsensusStatus), + 13 => Ok(InformationRequestTag::ChainspecRawBytes), + 14 => Ok(InformationRequestTag::NodeStatus), + 15 => Ok(InformationRequestTag::LatestSwitchBlockHeader), + 16 => Ok(InformationRequestTag::Reward), + 17 => Ok(InformationRequestTag::ProtocolVersion), + 18 => Ok(InformationRequestTag::Package), + 19 => Ok(InformationRequestTag::Entity), + _ => Err(UnknownInformationRequestTag(value)), + } + } +} + +impl From for u16 { + fn from(value: InformationRequestTag) -> Self { + value as u16 + } +} + +/// Error returned when trying to convert a `u16` into a `RecordId`. +#[derive(Debug, Clone, PartialEq)] +pub struct UnknownInformationRequestTag(u16); + +#[derive(Debug, Clone, PartialEq)] +pub enum EntityIdentifier { + ContractHash(ContractHash), + AccountHash(AccountHash), + PublicKey(PublicKey), + EntityAddr(EntityAddr), +} + +impl EntityIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..4) { + 0 => EntityIdentifier::ContractHash(ContractHash::new(rng.gen())), + 1 => EntityIdentifier::PublicKey(PublicKey::random(rng)), + 2 => EntityIdentifier::AccountHash(AccountHash::new(rng.gen())), + 3 => EntityIdentifier::EntityAddr(rng.gen()), + _ => unreachable!(), + } + } +} + +impl FromBytes for EntityIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let (identifier, remainder) = match tag { + 0 => { + let (hash, remainder) = FromBytes::from_bytes(remainder)?; + (EntityIdentifier::ContractHash(hash), remainder) + } + 1 => { + let (key, remainder) = FromBytes::from_bytes(remainder)?; + (EntityIdentifier::PublicKey(key), remainder) + } + 2 => { + let (hash, remainder) = FromBytes::from_bytes(remainder)?; + (EntityIdentifier::AccountHash(hash), remainder) + } + 3 => { + let (entity, remainder) = FromBytes::from_bytes(remainder)?; + (EntityIdentifier::EntityAddr(entity), remainder) + } + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok((identifier, remainder)) + } +} + +impl ToBytes for EntityIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let tag: u8 = match self { + EntityIdentifier::ContractHash(_) => 0, + EntityIdentifier::PublicKey(_) => 1, + EntityIdentifier::AccountHash(_) => 2, + EntityIdentifier::EntityAddr(_) => 3, + }; + tag.write_bytes(writer)?; + match self { + EntityIdentifier::ContractHash(hash) => hash.write_bytes(writer), + EntityIdentifier::PublicKey(key) => key.write_bytes(writer), + EntityIdentifier::AccountHash(hash) => hash.write_bytes(writer), + EntityIdentifier::EntityAddr(entity) => entity.write_bytes(writer), + } + } + + fn serialized_length(&self) -> usize { + let identifier_length = match self { + EntityIdentifier::ContractHash(hash) => hash.serialized_length(), + EntityIdentifier::PublicKey(key) => key.serialized_length(), + EntityIdentifier::AccountHash(hash) => hash.serialized_length(), + EntityIdentifier::EntityAddr(entity) => entity.serialized_length(), + }; + U8_SERIALIZED_LENGTH + identifier_length + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum PackageIdentifier { + ContractPackageHash(ContractPackageHash), + PackageAddr(PackageAddr), +} + +impl PackageIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..2) { + 0 => PackageIdentifier::ContractPackageHash(ContractPackageHash::new(rng.gen())), + 1 => PackageIdentifier::PackageAddr(rng.gen()), + _ => unreachable!(), + } + } +} + +impl FromBytes for PackageIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let (identifier, remainder) = match tag { + 0 => { + let (hash, remainder) = FromBytes::from_bytes(remainder)?; + (PackageIdentifier::ContractPackageHash(hash), remainder) + } + 1 => { + let (addr, remainder) = FromBytes::from_bytes(remainder)?; + (PackageIdentifier::PackageAddr(addr), remainder) + } + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok((identifier, remainder)) + } +} + +impl ToBytes for PackageIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let tag: u8 = match self { + PackageIdentifier::ContractPackageHash(_) => 0, + PackageIdentifier::PackageAddr(_) => 1, + }; + tag.write_bytes(writer)?; + match self { + PackageIdentifier::ContractPackageHash(hash) => hash.write_bytes(writer), + PackageIdentifier::PackageAddr(addr) => addr.write_bytes(writer), + } + } + + fn serialized_length(&self) -> usize { + let identifier_length = match self { + PackageIdentifier::ContractPackageHash(hash) => hash.serialized_length(), + PackageIdentifier::PackageAddr(addr) => addr.serialized_length(), + }; + U8_SERIALIZED_LENGTH + identifier_length + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn tag_roundtrip() { + let rng = &mut TestRng::new(); + + let val = InformationRequestTag::random(rng); + let tag = u16::from(val); + assert_eq!(InformationRequestTag::try_from(tag), Ok(val)); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = InformationRequest::random(rng); + let bytes = val.to_bytes().expect("should serialize"); + assert_eq!( + InformationRequest::try_from((val.tag(), &bytes[..])), + Ok(val) + ); + } + + #[test] + fn entity_identifier_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = EntityIdentifier::random(rng); + let bytes = val.to_bytes().expect("should serialize"); + assert_eq!(bytesrepr::deserialize_from_slice(bytes), Ok(val)); + } + + #[test] + fn package_identifier_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = PackageIdentifier::random(rng); + let bytes = val.to_bytes().expect("should serialize"); + assert_eq!(bytesrepr::deserialize_from_slice(bytes), Ok(val)); + } +} diff --git a/binary_port/src/key_prefix.rs b/binary_port/src/key_prefix.rs new file mode 100644 index 0000000000..774e3a36b0 --- /dev/null +++ b/binary_port/src/key_prefix.rs @@ -0,0 +1,202 @@ +#[cfg(any(feature = "testing", test))] +use casper_types::testing::TestRng; +use casper_types::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contract_messages::TopicNameHash, + system::{auction::BidAddrTag, mint::BalanceHoldAddrTag}, + EntityAddr, KeyTag, URefAddr, +}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +/// Key prefixes used for querying the global state. +#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] +pub enum KeyPrefix { + /// Retrieves all delegator bid addresses for a given validator. + DelegatorBidAddrsByValidator(AccountHash), + /// Retrieves all messages for a given entity. + MessagesByEntity(EntityAddr), + /// Retrieves all messages for a given entity and topic. + MessagesByEntityAndTopic(EntityAddr, TopicNameHash), + /// Retrieves all named keys for a given entity. + NamedKeysByEntity(EntityAddr), + /// Retrieves all gas balance holds for a given purse. + GasBalanceHoldsByPurse(URefAddr), + /// Retrieves all processing balance holds for a given purse. + ProcessingBalanceHoldsByPurse(URefAddr), + /// Retrieves all V1 entry points for a given entity. + EntryPointsV1ByEntity(EntityAddr), + /// Retrieves all V2 entry points for a given entity. + EntryPointsV2ByEntity(EntityAddr), +} + +impl KeyPrefix { + /// Returns a random `KeyPrefix`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..8) { + 0 => KeyPrefix::DelegatorBidAddrsByValidator(rng.gen()), + 1 => KeyPrefix::MessagesByEntity(rng.gen()), + 2 => KeyPrefix::MessagesByEntityAndTopic(rng.gen(), rng.gen()), + 3 => KeyPrefix::NamedKeysByEntity(rng.gen()), + 4 => KeyPrefix::GasBalanceHoldsByPurse(rng.gen()), + 5 => KeyPrefix::ProcessingBalanceHoldsByPurse(rng.gen()), + 6 => KeyPrefix::EntryPointsV1ByEntity(rng.gen()), + 7 => KeyPrefix::EntryPointsV2ByEntity(rng.gen()), + _ => unreachable!(), + } + } +} + +impl ToBytes for KeyPrefix { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + KeyPrefix::DelegatorBidAddrsByValidator(validator) => { + writer.push(KeyTag::BidAddr as u8); + writer.push(BidAddrTag::DelegatedAccount as u8); + validator.write_bytes(writer)?; + } + KeyPrefix::MessagesByEntity(entity) => { + writer.push(KeyTag::Message as u8); + entity.write_bytes(writer)?; + } + KeyPrefix::MessagesByEntityAndTopic(entity, topic) => { + writer.push(KeyTag::Message as u8); + entity.write_bytes(writer)?; + topic.write_bytes(writer)?; + } + KeyPrefix::NamedKeysByEntity(entity) => { + writer.push(KeyTag::NamedKey as u8); + entity.write_bytes(writer)?; + } + KeyPrefix::GasBalanceHoldsByPurse(uref) => { + writer.push(KeyTag::BalanceHold as u8); + writer.push(BalanceHoldAddrTag::Gas as u8); + uref.write_bytes(writer)?; + } + KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => { + writer.push(KeyTag::BalanceHold as u8); + writer.push(BalanceHoldAddrTag::Processing as u8); + uref.write_bytes(writer)?; + } + KeyPrefix::EntryPointsV1ByEntity(entity) => { + writer.push(KeyTag::EntryPoint as u8); + writer.push(0); + entity.write_bytes(writer)?; + } + KeyPrefix::EntryPointsV2ByEntity(entity) => { + writer.push(KeyTag::EntryPoint as u8); + writer.push(1); + entity.write_bytes(writer)?; + } + } + Ok(()) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + KeyPrefix::DelegatorBidAddrsByValidator(validator) => { + U8_SERIALIZED_LENGTH + validator.serialized_length() + } + KeyPrefix::MessagesByEntity(entity) => entity.serialized_length(), + KeyPrefix::MessagesByEntityAndTopic(entity, topic) => { + entity.serialized_length() + topic.serialized_length() + } + KeyPrefix::NamedKeysByEntity(entity) => entity.serialized_length(), + KeyPrefix::GasBalanceHoldsByPurse(uref) => { + U8_SERIALIZED_LENGTH + uref.serialized_length() + } + KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => { + U8_SERIALIZED_LENGTH + uref.serialized_length() + } + KeyPrefix::EntryPointsV1ByEntity(entity) => { + U8_SERIALIZED_LENGTH + entity.serialized_length() + } + KeyPrefix::EntryPointsV2ByEntity(entity) => { + U8_SERIALIZED_LENGTH + entity.serialized_length() + } + } + } +} + +impl FromBytes for KeyPrefix { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let result = match tag { + tag if tag == KeyTag::BidAddr as u8 => { + let (bid_addr_tag, remainder) = u8::from_bytes(remainder)?; + match bid_addr_tag { + tag if tag == BidAddrTag::DelegatedAccount as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + ( + KeyPrefix::DelegatorBidAddrsByValidator(validator), + remainder, + ) + } + _ => return Err(bytesrepr::Error::Formatting), + } + } + tag if tag == KeyTag::Message as u8 => { + let (entity_addr, remainder) = EntityAddr::from_bytes(remainder)?; + if remainder.is_empty() { + (KeyPrefix::MessagesByEntity(entity_addr), remainder) + } else { + let (topic, remainder) = TopicNameHash::from_bytes(remainder)?; + ( + KeyPrefix::MessagesByEntityAndTopic(entity_addr, topic), + remainder, + ) + } + } + tag if tag == KeyTag::NamedKey as u8 => { + let (entity, remainder) = EntityAddr::from_bytes(remainder)?; + (KeyPrefix::NamedKeysByEntity(entity), remainder) + } + tag if tag == KeyTag::BalanceHold as u8 => { + let (balance_hold_addr_tag, remainder) = u8::from_bytes(remainder)?; + let (uref, remainder) = URefAddr::from_bytes(remainder)?; + match balance_hold_addr_tag { + tag if tag == BalanceHoldAddrTag::Gas as u8 => { + (KeyPrefix::GasBalanceHoldsByPurse(uref), remainder) + } + tag if tag == BalanceHoldAddrTag::Processing as u8 => { + (KeyPrefix::ProcessingBalanceHoldsByPurse(uref), remainder) + } + _ => return Err(bytesrepr::Error::Formatting), + } + } + tag if tag == KeyTag::EntryPoint as u8 => { + let (entry_point_type, remainder) = u8::from_bytes(remainder)?; + let (entity, remainder) = EntityAddr::from_bytes(remainder)?; + match entry_point_type { + 0 => (KeyPrefix::EntryPointsV1ByEntity(entity), remainder), + 1 => (KeyPrefix::EntryPointsV2ByEntity(entity), remainder), + _ => return Err(bytesrepr::Error::Formatting), + } + } + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok(result) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let key_prefix = KeyPrefix::random(rng); + bytesrepr::test_serialization_roundtrip(&key_prefix); + } +} diff --git a/binary_port/src/lib.rs b/binary_port/src/lib.rs new file mode 100644 index 0000000000..dacd9ab7cb --- /dev/null +++ b/binary_port/src/lib.rs @@ -0,0 +1,55 @@ +//! A Rust library for types used by the binary port of a casper node. + +mod balance_response; +mod binary_message; +mod binary_response; +mod binary_response_and_request; +mod binary_response_header; +mod command; +mod dictionary_item_identifier; +mod entity_qualifier; +mod era_identifier; +mod error; +mod error_code; +mod get_request; +mod global_state_query_result; +mod information_request; +mod key_prefix; +mod minimal_block_info; +mod node_status; +mod purse_identifier; +pub mod record_id; +mod response_type; +mod speculative_execution_result; +mod state_request; +mod type_wrappers; + +pub use balance_response::BalanceResponse; +pub use binary_message::{BinaryMessage, BinaryMessageCodec}; +pub use binary_response::BinaryResponse; +pub use binary_response_and_request::BinaryResponseAndRequest; +pub use binary_response_header::BinaryResponseHeader; +pub use command::{Command, CommandHeader, CommandTag}; +pub use dictionary_item_identifier::DictionaryItemIdentifier; +pub use entity_qualifier::GlobalStateEntityQualifier; +pub use era_identifier::EraIdentifier; +pub use error::Error; +pub use error_code::ErrorCode; +pub use get_request::GetRequest; +pub use global_state_query_result::GlobalStateQueryResult; +pub use information_request::{ + EntityIdentifier, InformationRequest, InformationRequestTag, PackageIdentifier, +}; +pub use key_prefix::KeyPrefix; +pub use minimal_block_info::MinimalBlockInfo; +pub use node_status::NodeStatus; +pub use purse_identifier::PurseIdentifier; +pub use record_id::{RecordId, UnknownRecordId}; +pub use response_type::{PayloadEntity, ResponseType}; +pub use speculative_execution_result::SpeculativeExecutionResult; +pub use state_request::GlobalStateRequest; +pub use type_wrappers::{ + AccountInformation, AddressableEntityInformation, ConsensusStatus, ConsensusValidatorChanges, + ContractInformation, DictionaryQueryResult, GetTrieFullResult, LastProgress, NetworkName, + ReactorStateName, RewardResponse, TransactionWithExecutionInfo, Uptime, ValueWithProof, +}; diff --git a/binary_port/src/minimal_block_info.rs b/binary_port/src/minimal_block_info.rs new file mode 100644 index 0000000000..53f4597546 --- /dev/null +++ b/binary_port/src/minimal_block_info.rs @@ -0,0 +1,117 @@ +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + Block, BlockHash, Digest, EraId, PublicKey, Timestamp, +}; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use casper_types::testing::TestRng; + +/// Minimal info about a `Block` needed to satisfy the node status request. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct MinimalBlockInfo { + hash: BlockHash, + timestamp: Timestamp, + era_id: EraId, + height: u64, + state_root_hash: Digest, + creator: PublicKey, +} + +impl MinimalBlockInfo { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + hash: BlockHash::random(rng), + timestamp: Timestamp::random(rng), + era_id: EraId::random(rng), + height: rng.gen(), + state_root_hash: Digest::random(rng), + creator: PublicKey::random(rng), + } + } +} + +impl FromBytes for MinimalBlockInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = BlockHash::from_bytes(bytes)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (height, remainder) = u64::from_bytes(remainder)?; + let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (creator, remainder) = PublicKey::from_bytes(remainder)?; + Ok(( + MinimalBlockInfo { + hash, + timestamp, + era_id, + height, + state_root_hash, + creator, + }, + remainder, + )) + } +} + +impl ToBytes for MinimalBlockInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.height.write_bytes(writer)?; + self.state_root_hash.write_bytes(writer)?; + self.creator.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.timestamp.serialized_length() + + self.era_id.serialized_length() + + self.height.serialized_length() + + self.state_root_hash.serialized_length() + + self.creator.serialized_length() + } +} + +impl From for MinimalBlockInfo { + fn from(block: Block) -> Self { + let proposer = match &block { + Block::V1(v1) => v1.proposer().clone(), + Block::V2(v2) => v2.proposer().clone(), + }; + + MinimalBlockInfo { + hash: *block.hash(), + timestamp: block.timestamp(), + era_id: block.era_id(), + height: block.height(), + state_root_hash: *block.state_root_hash(), + creator: proposer, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = MinimalBlockInfo::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/node_status.rs b/binary_port/src/node_status.rs new file mode 100644 index 0000000000..3666412df7 --- /dev/null +++ b/binary_port/src/node_status.rs @@ -0,0 +1,188 @@ +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + AvailableBlockRange, BlockHash, BlockSynchronizerStatus, Digest, NextUpgrade, Peers, + ProtocolVersion, PublicKey, TimeDiff, Timestamp, +}; + +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; +use serde::Serialize; + +use crate::{minimal_block_info::MinimalBlockInfo, type_wrappers::ReactorStateName}; + +/// Status information about the node. +#[derive(Debug, PartialEq, Serialize)] +pub struct NodeStatus { + /// The current protocol version. + pub protocol_version: ProtocolVersion, + /// The node ID and network address of each connected peer. + pub peers: Peers, + /// The compiled node version. + pub build_version: String, + /// The chainspec name. + pub chainspec_name: String, + /// The state root hash of the lowest block in the available block range. + pub starting_state_root_hash: Digest, + /// The minimal info of the last block from the linear chain. + pub last_added_block_info: Option, + /// Our public signing key. + pub our_public_signing_key: Option, + /// The next round length if this node is a validator. + pub round_length: Option, + /// Information about the next scheduled upgrade. + pub next_upgrade: Option, + /// Time that passed since the node has started. + pub uptime: TimeDiff, + /// The current state of node reactor. + pub reactor_state: ReactorStateName, + /// Timestamp of the last recorded progress in the reactor. + pub last_progress: Timestamp, + /// The available block range in storage. + pub available_block_range: AvailableBlockRange, + /// The status of the block synchronizer builders. + pub block_sync: BlockSynchronizerStatus, + /// The hash of the latest switch block. + pub latest_switch_block_hash: Option, +} + +impl NodeStatus { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()), + peers: Peers::random(rng), + build_version: rng.random_string(5..10), + chainspec_name: rng.random_string(5..10), + starting_state_root_hash: Digest::random(rng), + last_added_block_info: rng.gen::().then_some(MinimalBlockInfo::random(rng)), + our_public_signing_key: rng.gen::().then_some(PublicKey::random(rng)), + round_length: rng + .gen::() + .then_some(TimeDiff::from_millis(rng.gen())), + next_upgrade: rng.gen::().then_some(NextUpgrade::random(rng)), + uptime: TimeDiff::from_millis(rng.gen()), + reactor_state: ReactorStateName::new(rng.random_string(5..10)), + last_progress: Timestamp::random(rng), + available_block_range: AvailableBlockRange::random(rng), + block_sync: BlockSynchronizerStatus::random(rng), + latest_switch_block_hash: rng.gen::().then_some(BlockHash::random(rng)), + } + } +} + +impl FromBytes for NodeStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version, remainder) = ProtocolVersion::from_bytes(bytes)?; + let (peers, remainder) = Peers::from_bytes(remainder)?; + let (build_version, remainder) = String::from_bytes(remainder)?; + let (chainspec_name, remainder) = String::from_bytes(remainder)?; + let (starting_state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (last_added_block_info, remainder) = Option::::from_bytes(remainder)?; + let (our_public_signing_key, remainder) = Option::::from_bytes(remainder)?; + let (round_length, remainder) = Option::::from_bytes(remainder)?; + let (next_upgrade, remainder) = Option::::from_bytes(remainder)?; + let (uptime, remainder) = TimeDiff::from_bytes(remainder)?; + let (reactor_state, remainder) = ReactorStateName::from_bytes(remainder)?; + let (last_progress, remainder) = Timestamp::from_bytes(remainder)?; + let (available_block_range, remainder) = AvailableBlockRange::from_bytes(remainder)?; + let (block_sync, remainder) = BlockSynchronizerStatus::from_bytes(remainder)?; + let (latest_switch_block_hash, remainder) = Option::::from_bytes(remainder)?; + Ok(( + NodeStatus { + protocol_version, + peers, + build_version, + chainspec_name, + starting_state_root_hash, + last_added_block_info, + our_public_signing_key, + round_length, + next_upgrade, + uptime, + reactor_state, + last_progress, + available_block_range, + block_sync, + latest_switch_block_hash, + }, + remainder, + )) + } +} + +impl ToBytes for NodeStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let NodeStatus { + protocol_version, + peers, + build_version, + chainspec_name, + starting_state_root_hash, + last_added_block_info, + our_public_signing_key, + round_length, + next_upgrade, + uptime, + reactor_state, + last_progress, + available_block_range, + block_sync, + latest_switch_block_hash, + } = self; + protocol_version.write_bytes(writer)?; + peers.write_bytes(writer)?; + build_version.write_bytes(writer)?; + chainspec_name.write_bytes(writer)?; + starting_state_root_hash.write_bytes(writer)?; + last_added_block_info.write_bytes(writer)?; + our_public_signing_key.write_bytes(writer)?; + round_length.write_bytes(writer)?; + next_upgrade.write_bytes(writer)?; + uptime.write_bytes(writer)?; + reactor_state.write_bytes(writer)?; + last_progress.write_bytes(writer)?; + available_block_range.write_bytes(writer)?; + block_sync.write_bytes(writer)?; + latest_switch_block_hash.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.protocol_version.serialized_length() + + self.peers.serialized_length() + + self.build_version.serialized_length() + + self.chainspec_name.serialized_length() + + self.starting_state_root_hash.serialized_length() + + self.last_added_block_info.serialized_length() + + self.our_public_signing_key.serialized_length() + + self.round_length.serialized_length() + + self.next_upgrade.serialized_length() + + self.uptime.serialized_length() + + self.reactor_state.serialized_length() + + self.last_progress.serialized_length() + + self.available_block_range.serialized_length() + + self.block_sync.serialized_length() + + self.latest_switch_block_hash.serialized_length() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = NodeStatus::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/purse_identifier.rs b/binary_port/src/purse_identifier.rs new file mode 100644 index 0000000000..9894ff7dd1 --- /dev/null +++ b/binary_port/src/purse_identifier.rs @@ -0,0 +1,127 @@ +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(test)] +use rand::Rng; + +use casper_types::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + EntityAddr, PublicKey, URef, +}; + +const PAYMENT_PURSE_TAG: u8 = 0; +const ACCUMULATE_PURSE_TAG: u8 = 1; +const UREF_PURSE_TAG: u8 = 2; +const PUBLIC_KEY_PURSE_TAG: u8 = 3; +const ACCOUNT_PURSE_TAG: u8 = 4; +const ENTITY_PURSE_TAG: u8 = 5; + +/// Identifier for balance lookup. +#[derive(Clone, Debug, PartialEq)] +pub enum PurseIdentifier { + Payment, + Accumulate, + Purse(URef), + PublicKey(PublicKey), + Account(AccountHash), + Entity(EntityAddr), +} + +impl PurseIdentifier { + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..6) { + PAYMENT_PURSE_TAG => PurseIdentifier::Payment, + ACCUMULATE_PURSE_TAG => PurseIdentifier::Accumulate, + UREF_PURSE_TAG => PurseIdentifier::Purse(rng.gen()), + PUBLIC_KEY_PURSE_TAG => PurseIdentifier::PublicKey(PublicKey::random(rng)), + ACCOUNT_PURSE_TAG => PurseIdentifier::Account(rng.gen()), + ENTITY_PURSE_TAG => PurseIdentifier::Entity(rng.gen()), + _ => unreachable!(), + } + } +} + +impl ToBytes for PurseIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PurseIdentifier::Payment => PAYMENT_PURSE_TAG.write_bytes(writer), + PurseIdentifier::Accumulate => ACCUMULATE_PURSE_TAG.write_bytes(writer), + PurseIdentifier::Purse(uref) => { + UREF_PURSE_TAG.write_bytes(writer)?; + uref.write_bytes(writer) + } + PurseIdentifier::PublicKey(key) => { + PUBLIC_KEY_PURSE_TAG.write_bytes(writer)?; + key.write_bytes(writer) + } + PurseIdentifier::Account(account) => { + ACCOUNT_PURSE_TAG.write_bytes(writer)?; + account.write_bytes(writer) + } + PurseIdentifier::Entity(entity) => { + ENTITY_PURSE_TAG.write_bytes(writer)?; + entity.write_bytes(writer) + } + } + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + PurseIdentifier::Payment => 0, + PurseIdentifier::Accumulate => 0, + PurseIdentifier::Purse(uref) => uref.serialized_length(), + PurseIdentifier::PublicKey(key) => key.serialized_length(), + PurseIdentifier::Account(account) => account.serialized_length(), + PurseIdentifier::Entity(entity) => entity.serialized_length(), + } + } +} + +impl FromBytes for PurseIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + PAYMENT_PURSE_TAG => Ok((PurseIdentifier::Payment, remainder)), + ACCUMULATE_PURSE_TAG => Ok((PurseIdentifier::Accumulate, remainder)), + UREF_PURSE_TAG => { + let (uref, remainder) = URef::from_bytes(remainder)?; + Ok((PurseIdentifier::Purse(uref), remainder)) + } + PUBLIC_KEY_PURSE_TAG => { + let (key, remainder) = PublicKey::from_bytes(remainder)?; + Ok((PurseIdentifier::PublicKey(key), remainder)) + } + ACCOUNT_PURSE_TAG => { + let (account, remainder) = AccountHash::from_bytes(remainder)?; + Ok((PurseIdentifier::Account(account), remainder)) + } + ENTITY_PURSE_TAG => { + let (entity, remainder) = EntityAddr::from_bytes(remainder)?; + Ok((PurseIdentifier::Entity(entity), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = PurseIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/record_id.rs b/binary_port/src/record_id.rs new file mode 100644 index 0000000000..9721b64391 --- /dev/null +++ b/binary_port/src/record_id.rs @@ -0,0 +1,114 @@ +use core::convert::TryFrom; + +#[cfg(test)] +use rand::Rng; +use serde::Serialize; + +#[cfg(test)] +use casper_types::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use strum::IntoEnumIterator; +#[cfg(any(feature = "testing", test))] +use strum_macros::EnumIter; + +/// An identifier of a record type. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)] +#[repr(u16)] +#[cfg_attr(any(feature = "testing", test), derive(EnumIter))] +pub enum RecordId { + /// Refers to `BlockHeader` record. + BlockHeader = 0, + /// Refers to `BlockBody` record. + BlockBody = 1, + /// Refers to `ApprovalsHashes` record. + ApprovalsHashes = 2, + /// Refers to `BlockMetadata` record. + BlockMetadata = 3, + /// Refers to `Transaction` record. + Transaction = 4, + /// Refers to `ExecutionResult` record. + ExecutionResult = 5, + /// Refers to `Transfer` record. + Transfer = 6, + /// Refers to `FinalizedTransactionApprovals` record. + FinalizedTransactionApprovals = 7, +} + +impl RecordId { + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..8) { + 0 => RecordId::BlockHeader, + 1 => RecordId::BlockBody, + 2 => RecordId::ApprovalsHashes, + 3 => RecordId::BlockMetadata, + 4 => RecordId::Transaction, + 5 => RecordId::ExecutionResult, + 6 => RecordId::Transfer, + 7 => RecordId::FinalizedTransactionApprovals, + _ => unreachable!(), + } + } + #[cfg(any(feature = "testing", test))] + pub fn all() -> impl Iterator { + RecordId::iter() + } +} + +impl TryFrom for RecordId { + type Error = UnknownRecordId; + + fn try_from(value: u16) -> Result { + match value { + 0 => Ok(RecordId::BlockHeader), + 1 => Ok(RecordId::BlockBody), + 2 => Ok(RecordId::ApprovalsHashes), + 3 => Ok(RecordId::BlockMetadata), + 4 => Ok(RecordId::Transaction), + 5 => Ok(RecordId::ExecutionResult), + 6 => Ok(RecordId::Transfer), + 7 => Ok(RecordId::FinalizedTransactionApprovals), + _ => Err(UnknownRecordId(value)), + } + } +} + +impl From for u16 { + fn from(value: RecordId) -> Self { + value as u16 + } +} + +impl core::fmt::Display for RecordId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + RecordId::BlockHeader => write!(f, "BlockHeader"), + RecordId::BlockBody => write!(f, "BlockBody"), + RecordId::ApprovalsHashes => write!(f, "ApprovalsHashes"), + RecordId::BlockMetadata => write!(f, "BlockMetadata"), + RecordId::Transaction => write!(f, "Transaction"), + RecordId::ExecutionResult => write!(f, "ExecutionResult"), + RecordId::Transfer => write!(f, "Transfer"), + RecordId::FinalizedTransactionApprovals => write!(f, "FinalizedTransactionApprovals"), + } + } +} + +/// Error returned when trying to convert a `u16` into a `RecordId`. +#[derive(Debug, PartialEq, Eq)] +pub struct UnknownRecordId(u16); + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn tag_roundtrip() { + let rng = &mut TestRng::new(); + + let val = RecordId::random(rng); + let tag = u16::from(val); + assert_eq!(RecordId::try_from(tag), Ok(val)); + } +} diff --git a/binary_port/src/response_type.rs b/binary_port/src/response_type.rs new file mode 100644 index 0000000000..b9cff4caad --- /dev/null +++ b/binary_port/src/response_type.rs @@ -0,0 +1,474 @@ +//! The payload type. + +use core::{convert::TryFrom, fmt}; + +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + contracts::ContractPackage, + execution::{ExecutionResult, ExecutionResultV1}, + AvailableBlockRange, BlockBody, BlockBodyV1, BlockHeader, BlockHeaderV1, BlockSignatures, + BlockSignaturesV1, BlockSynchronizerStatus, BlockWithSignatures, ChainspecRawBytes, Deploy, + NextUpgrade, Package, Peers, ProtocolVersion, StoredValue, Transaction, Transfer, +}; + +use crate::{ + global_state_query_result::GlobalStateQueryResult, + node_status::NodeStatus, + speculative_execution_result::SpeculativeExecutionResult, + type_wrappers::{ + ConsensusStatus, ConsensusValidatorChanges, GetTrieFullResult, LastProgress, NetworkName, + ReactorStateName, RewardResponse, + }, + AccountInformation, AddressableEntityInformation, BalanceResponse, ContractInformation, + DictionaryQueryResult, RecordId, TransactionWithExecutionInfo, Uptime, ValueWithProof, +}; + +/// A type of the payload being returned in a binary response. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +pub enum ResponseType { + /// Legacy version of the block header. + BlockHeaderV1, + /// Block header. + BlockHeader, + /// Legacy version of the block body. + BlockBodyV1, + /// Block body. + BlockBody, + /// Legacy version of the approvals hashes. + ApprovalsHashesV1, + /// Approvals hashes + ApprovalsHashes, + /// Legacy version of the block signatures. + BlockSignaturesV1, + /// Block signatures. + BlockSignatures, + /// Deploy. + Deploy, + /// Transaction. + Transaction, + /// Legacy version of the execution result. + ExecutionResultV1, + /// Execution result. + ExecutionResult, + /// Wasm V1 execution result. + WasmV1Result, + /// Transfers. + Transfers, + /// Finalized deploy approvals. + FinalizedDeployApprovals, + /// Finalized approvals. + FinalizedApprovals, + /// Block with signatures. + BlockWithSignatures, + /// Transaction with approvals and execution info. + TransactionWithExecutionInfo, + /// Peers. + Peers, + /// Last progress. + LastProgress, + /// State of the reactor. + ReactorState, + /// Network name. + NetworkName, + /// Consensus validator changes. + ConsensusValidatorChanges, // return type in `effects.rs` will be turned into dedicated type. + /// Status of the block synchronizer. + BlockSynchronizerStatus, + /// Available block range. + AvailableBlockRange, + /// Information about the next network upgrade. + NextUpgrade, + /// Consensus status. + ConsensusStatus, // return type in `effects.rs` will be turned into dedicated type. + /// Chainspec represented as raw bytes. + ChainspecRawBytes, + /// Uptime. + Uptime, + /// Result of checking if given block is in the highest available block range. + HighestBlockSequenceCheckResult, + /// Result of the speculative execution, + SpeculativeExecutionResult, + /// Result of querying global state, + GlobalStateQueryResult, + /// Result of querying global state for all values under a specified key. + StoredValues, + /// Result of querying global state for a full trie. + GetTrieFullResult, + /// Node status. + NodeStatus, + /// Result of querying for a dictionary item. + DictionaryQueryResult, + /// Balance query response. + BalanceResponse, + /// Reward response. + Reward, + /// Protocol version. + ProtocolVersion, + /// Contract package with Merkle proof. + ContractPackageWithProof, + /// Contract information. + ContractInformation, + /// Account information. + AccountInformation, + /// Package with Merkle proof. + PackageWithProof, + /// Addressable entity information. + AddressableEntityInformation, +} + +impl ResponseType { + pub fn from_record_id(record_id: RecordId, is_legacy: bool) -> Self { + match (is_legacy, record_id) { + (true, RecordId::BlockHeader) => Self::BlockHeaderV1, + (true, RecordId::BlockBody) => Self::BlockBodyV1, + (true, RecordId::ApprovalsHashes) => Self::ApprovalsHashesV1, + (true, RecordId::BlockMetadata) => Self::BlockSignaturesV1, + (true, RecordId::Transaction) => Self::Deploy, + (true, RecordId::ExecutionResult) => Self::ExecutionResultV1, + (true, RecordId::Transfer) => Self::Transfers, + (true, RecordId::FinalizedTransactionApprovals) => Self::FinalizedDeployApprovals, + (false, RecordId::BlockHeader) => Self::BlockHeader, + (false, RecordId::BlockBody) => Self::BlockBody, + (false, RecordId::ApprovalsHashes) => Self::ApprovalsHashes, + (false, RecordId::BlockMetadata) => Self::BlockSignatures, + (false, RecordId::Transaction) => Self::Transaction, + (false, RecordId::ExecutionResult) => Self::ExecutionResult, + (false, RecordId::Transfer) => Self::Transfers, + (false, RecordId::FinalizedTransactionApprovals) => Self::FinalizedApprovals, + } + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self::try_from(rng.gen_range(0..44)).unwrap() + } +} + +impl TryFrom for ResponseType { + type Error = (); + + fn try_from(v: u8) -> Result { + match v { + x if x == ResponseType::BlockHeaderV1 as u8 => Ok(ResponseType::BlockHeaderV1), + x if x == ResponseType::BlockHeader as u8 => Ok(ResponseType::BlockHeader), + x if x == ResponseType::BlockBodyV1 as u8 => Ok(ResponseType::BlockBodyV1), + x if x == ResponseType::BlockBody as u8 => Ok(ResponseType::BlockBody), + x if x == ResponseType::ApprovalsHashesV1 as u8 => Ok(ResponseType::ApprovalsHashesV1), + x if x == ResponseType::ApprovalsHashes as u8 => Ok(ResponseType::ApprovalsHashes), + x if x == ResponseType::BlockSignaturesV1 as u8 => Ok(ResponseType::BlockSignaturesV1), + x if x == ResponseType::BlockSignatures as u8 => Ok(ResponseType::BlockSignatures), + x if x == ResponseType::Deploy as u8 => Ok(ResponseType::Deploy), + x if x == ResponseType::Transaction as u8 => Ok(ResponseType::Transaction), + x if x == ResponseType::ExecutionResultV1 as u8 => Ok(ResponseType::ExecutionResultV1), + x if x == ResponseType::ExecutionResult as u8 => Ok(ResponseType::ExecutionResult), + x if x == ResponseType::Transfers as u8 => Ok(ResponseType::Transfers), + x if x == ResponseType::FinalizedDeployApprovals as u8 => { + Ok(ResponseType::FinalizedDeployApprovals) + } + x if x == ResponseType::FinalizedApprovals as u8 => { + Ok(ResponseType::FinalizedApprovals) + } + x if x == ResponseType::BlockWithSignatures as u8 => { + Ok(ResponseType::BlockWithSignatures) + } + x if x == ResponseType::TransactionWithExecutionInfo as u8 => { + Ok(ResponseType::TransactionWithExecutionInfo) + } + x if x == ResponseType::Peers as u8 => Ok(ResponseType::Peers), + x if x == ResponseType::Uptime as u8 => Ok(ResponseType::Uptime), + x if x == ResponseType::LastProgress as u8 => Ok(ResponseType::LastProgress), + x if x == ResponseType::ReactorState as u8 => Ok(ResponseType::ReactorState), + x if x == ResponseType::NetworkName as u8 => Ok(ResponseType::NetworkName), + x if x == ResponseType::ConsensusValidatorChanges as u8 => { + Ok(ResponseType::ConsensusValidatorChanges) + } + x if x == ResponseType::BlockSynchronizerStatus as u8 => { + Ok(ResponseType::BlockSynchronizerStatus) + } + x if x == ResponseType::AvailableBlockRange as u8 => { + Ok(ResponseType::AvailableBlockRange) + } + x if x == ResponseType::NextUpgrade as u8 => Ok(ResponseType::NextUpgrade), + x if x == ResponseType::ConsensusStatus as u8 => Ok(ResponseType::ConsensusStatus), + x if x == ResponseType::ChainspecRawBytes as u8 => Ok(ResponseType::ChainspecRawBytes), + x if x == ResponseType::HighestBlockSequenceCheckResult as u8 => { + Ok(ResponseType::HighestBlockSequenceCheckResult) + } + x if x == ResponseType::SpeculativeExecutionResult as u8 => { + Ok(ResponseType::SpeculativeExecutionResult) + } + x if x == ResponseType::GlobalStateQueryResult as u8 => { + Ok(ResponseType::GlobalStateQueryResult) + } + x if x == ResponseType::StoredValues as u8 => Ok(ResponseType::StoredValues), + x if x == ResponseType::GetTrieFullResult as u8 => Ok(ResponseType::GetTrieFullResult), + x if x == ResponseType::NodeStatus as u8 => Ok(ResponseType::NodeStatus), + x if x == ResponseType::DictionaryQueryResult as u8 => { + Ok(ResponseType::DictionaryQueryResult) + } + x if x == ResponseType::WasmV1Result as u8 => Ok(ResponseType::WasmV1Result), + x if x == ResponseType::BalanceResponse as u8 => Ok(ResponseType::BalanceResponse), + x if x == ResponseType::Reward as u8 => Ok(ResponseType::Reward), + x if x == ResponseType::ProtocolVersion as u8 => Ok(ResponseType::ProtocolVersion), + x if x == ResponseType::ContractPackageWithProof as u8 => { + Ok(ResponseType::ContractPackageWithProof) + } + x if x == ResponseType::ContractInformation as u8 => { + Ok(ResponseType::ContractInformation) + } + x if x == ResponseType::AccountInformation as u8 => { + Ok(ResponseType::AccountInformation) + } + x if x == ResponseType::PackageWithProof as u8 => Ok(ResponseType::PackageWithProof), + x if x == ResponseType::AddressableEntityInformation as u8 => { + Ok(ResponseType::AddressableEntityInformation) + } + _ => Err(()), + } + } +} + +impl From for u8 { + fn from(value: ResponseType) -> Self { + value as u8 + } +} + +impl fmt::Display for ResponseType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ResponseType::BlockHeaderV1 => write!(f, "BlockHeaderV1"), + ResponseType::BlockHeader => write!(f, "BlockHeader"), + ResponseType::BlockBodyV1 => write!(f, "BlockBodyV1"), + ResponseType::BlockBody => write!(f, "BlockBody"), + ResponseType::ApprovalsHashesV1 => write!(f, "ApprovalsHashesV1"), + ResponseType::ApprovalsHashes => write!(f, "ApprovalsHashes"), + ResponseType::BlockSignaturesV1 => write!(f, "BlockSignaturesV1"), + ResponseType::BlockSignatures => write!(f, "BlockSignatures"), + ResponseType::Deploy => write!(f, "Deploy"), + ResponseType::Transaction => write!(f, "Transaction"), + ResponseType::ExecutionResultV1 => write!(f, "ExecutionResultV1"), + ResponseType::ExecutionResult => write!(f, "ExecutionResult"), + ResponseType::Transfers => write!(f, "Transfers"), + ResponseType::FinalizedDeployApprovals => write!(f, "FinalizedDeployApprovals"), + ResponseType::FinalizedApprovals => write!(f, "FinalizedApprovals"), + ResponseType::BlockWithSignatures => write!(f, "BlockWithSignatures"), + ResponseType::TransactionWithExecutionInfo => write!(f, "TransactionWithExecutionInfo"), + ResponseType::Peers => write!(f, "Peers"), + ResponseType::LastProgress => write!(f, "LastProgress"), + ResponseType::ReactorState => write!(f, "ReactorState"), + ResponseType::NetworkName => write!(f, "NetworkName"), + ResponseType::ConsensusValidatorChanges => write!(f, "ConsensusValidatorChanges"), + ResponseType::BlockSynchronizerStatus => write!(f, "BlockSynchronizerStatus"), + ResponseType::AvailableBlockRange => write!(f, "AvailableBlockRange"), + ResponseType::NextUpgrade => write!(f, "NextUpgrade"), + ResponseType::ConsensusStatus => write!(f, "ConsensusStatus"), + ResponseType::ChainspecRawBytes => write!(f, "ChainspecRawBytes"), + ResponseType::Uptime => write!(f, "Uptime"), + ResponseType::HighestBlockSequenceCheckResult => { + write!(f, "HighestBlockSequenceCheckResult") + } + ResponseType::SpeculativeExecutionResult => write!(f, "SpeculativeExecutionResult"), + ResponseType::GlobalStateQueryResult => write!(f, "GlobalStateQueryResult"), + ResponseType::StoredValues => write!(f, "StoredValues"), + ResponseType::GetTrieFullResult => write!(f, "GetTrieFullResult"), + ResponseType::NodeStatus => write!(f, "NodeStatus"), + ResponseType::WasmV1Result => write!(f, "WasmV1Result"), + ResponseType::DictionaryQueryResult => write!(f, "DictionaryQueryResult"), + ResponseType::BalanceResponse => write!(f, "BalanceResponse"), + ResponseType::Reward => write!(f, "Reward"), + ResponseType::ProtocolVersion => write!(f, "ProtocolVersion"), + ResponseType::ContractPackageWithProof => write!(f, "ContractPackageWithProof"), + ResponseType::ContractInformation => write!(f, "ContractInformation"), + ResponseType::AccountInformation => write!(f, "AccountInformation"), + ResponseType::PackageWithProof => write!(f, "PackageWithProof"), + ResponseType::AddressableEntityInformation => { + write!(f, "AddressableEntityInformation") + } + } + } +} + +/// Represents an entity that can be sent as a payload. +pub trait PayloadEntity { + /// Returns the payload type of the entity. + const RESPONSE_TYPE: ResponseType; +} + +impl PayloadEntity for Transaction { + const RESPONSE_TYPE: ResponseType = ResponseType::Transaction; +} + +impl PayloadEntity for Deploy { + const RESPONSE_TYPE: ResponseType = ResponseType::Deploy; +} + +impl PayloadEntity for BlockHeader { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockHeader; +} + +impl PayloadEntity for BlockHeaderV1 { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockHeaderV1; +} + +impl PayloadEntity for BlockBody { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockBody; +} + +impl PayloadEntity for BlockBodyV1 { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockBodyV1; +} + +impl PayloadEntity for BlockSignatures { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockSignatures; +} + +impl PayloadEntity for BlockSignaturesV1 { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockSignaturesV1; +} + +impl PayloadEntity for ExecutionResult { + const RESPONSE_TYPE: ResponseType = ResponseType::ExecutionResult; +} + +impl PayloadEntity for ExecutionResultV1 { + const RESPONSE_TYPE: ResponseType = ResponseType::ExecutionResultV1; +} + +impl PayloadEntity for BlockWithSignatures { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockWithSignatures; +} + +impl PayloadEntity for TransactionWithExecutionInfo { + const RESPONSE_TYPE: ResponseType = ResponseType::TransactionWithExecutionInfo; +} + +impl PayloadEntity for Peers { + const RESPONSE_TYPE: ResponseType = ResponseType::Peers; +} + +impl PayloadEntity for Vec { + const RESPONSE_TYPE: ResponseType = ResponseType::Transfers; +} + +impl PayloadEntity for AvailableBlockRange { + const RESPONSE_TYPE: ResponseType = ResponseType::AvailableBlockRange; +} + +impl PayloadEntity for ChainspecRawBytes { + const RESPONSE_TYPE: ResponseType = ResponseType::ChainspecRawBytes; +} + +impl PayloadEntity for ConsensusValidatorChanges { + const RESPONSE_TYPE: ResponseType = ResponseType::ConsensusValidatorChanges; +} + +impl PayloadEntity for GlobalStateQueryResult { + const RESPONSE_TYPE: ResponseType = ResponseType::GlobalStateQueryResult; +} + +impl PayloadEntity for DictionaryQueryResult { + const RESPONSE_TYPE: ResponseType = ResponseType::DictionaryQueryResult; +} + +impl PayloadEntity for Vec { + const RESPONSE_TYPE: ResponseType = ResponseType::StoredValues; +} + +impl PayloadEntity for GetTrieFullResult { + const RESPONSE_TYPE: ResponseType = ResponseType::GetTrieFullResult; +} + +impl PayloadEntity for SpeculativeExecutionResult { + const RESPONSE_TYPE: ResponseType = ResponseType::SpeculativeExecutionResult; +} + +impl PayloadEntity for NodeStatus { + const RESPONSE_TYPE: ResponseType = ResponseType::NodeStatus; +} + +impl PayloadEntity for NextUpgrade { + const RESPONSE_TYPE: ResponseType = ResponseType::NextUpgrade; +} + +impl PayloadEntity for Uptime { + const RESPONSE_TYPE: ResponseType = ResponseType::Uptime; +} + +impl PayloadEntity for LastProgress { + const RESPONSE_TYPE: ResponseType = ResponseType::LastProgress; +} + +impl PayloadEntity for ReactorStateName { + const RESPONSE_TYPE: ResponseType = ResponseType::ReactorState; +} + +impl PayloadEntity for NetworkName { + const RESPONSE_TYPE: ResponseType = ResponseType::NetworkName; +} + +impl PayloadEntity for BlockSynchronizerStatus { + const RESPONSE_TYPE: ResponseType = ResponseType::BlockSynchronizerStatus; +} + +impl PayloadEntity for ConsensusStatus { + const RESPONSE_TYPE: ResponseType = ResponseType::ConsensusStatus; +} + +impl PayloadEntity for BalanceResponse { + const RESPONSE_TYPE: ResponseType = ResponseType::BalanceResponse; +} + +impl PayloadEntity for RewardResponse { + const RESPONSE_TYPE: ResponseType = ResponseType::Reward; +} + +impl PayloadEntity for ProtocolVersion { + const RESPONSE_TYPE: ResponseType = ResponseType::ProtocolVersion; +} + +impl PayloadEntity for ValueWithProof { + const RESPONSE_TYPE: ResponseType = ResponseType::ContractPackageWithProof; +} + +impl PayloadEntity for ContractInformation { + const RESPONSE_TYPE: ResponseType = ResponseType::ContractInformation; +} + +impl PayloadEntity for AccountInformation { + const RESPONSE_TYPE: ResponseType = ResponseType::AccountInformation; +} + +impl PayloadEntity for ValueWithProof { + const RESPONSE_TYPE: ResponseType = ResponseType::PackageWithProof; +} + +impl PayloadEntity for AddressableEntityInformation { + const RESPONSE_TYPE: ResponseType = ResponseType::AddressableEntityInformation; +} + +impl PayloadEntity for Box +where + T: PayloadEntity, +{ + const RESPONSE_TYPE: ResponseType = T::RESPONSE_TYPE; +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn convert_u8_roundtrip() { + let rng = &mut TestRng::new(); + + let val = ResponseType::random(rng); + assert_eq!(ResponseType::try_from(val as u8), Ok(val)); + } +} diff --git a/binary_port/src/speculative_execution_result.rs b/binary_port/src/speculative_execution_result.rs new file mode 100644 index 0000000000..cd06312f78 --- /dev/null +++ b/binary_port/src/speculative_execution_result.rs @@ -0,0 +1,181 @@ +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use rand::distributions::{Alphanumeric, DistString}; + +#[cfg(any(feature = "testing", test))] +use casper_types::testing::TestRng; +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + contract_messages::Messages, + execution::Effects, + BlockHash, Digest, Gas, InvalidTransaction, Transfer, +}; + +static SPECULATIVE_EXECUTION_RESULT: Lazy = Lazy::new(|| { + SpeculativeExecutionResult::new( + BlockHash::new(Digest::from([0; Digest::LENGTH])), + vec![], + Gas::zero(), + Gas::zero(), + Effects::new(), + Messages::new(), + None, + ) +}); + +#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)] +pub struct SpeculativeExecutionResult { + /// Block hash against which the execution was performed. + block_hash: BlockHash, + /// List of transfers that happened during execution. + transfers: Vec, + /// Gas limit. + limit: Gas, + /// Gas consumed. + consumed: Gas, + /// Execution effects. + effects: Effects, + /// Messages emitted during execution. + messages: Messages, + /// Did the wasm execute successfully? + error: Option, +} + +impl SpeculativeExecutionResult { + pub fn new( + block_hash: BlockHash, + transfers: Vec, + limit: Gas, + consumed: Gas, + effects: Effects, + messages: Messages, + error: Option, + ) -> Self { + SpeculativeExecutionResult { + transfers, + limit, + consumed, + effects, + messages, + error, + block_hash, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn example() -> &'static Self { + &SPECULATIVE_EXECUTION_RESULT + } + + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use casper_types::contract_messages::Message; + + let random_messages = |rng: &mut TestRng| -> Messages { + let count = rng.gen_range(16..128); + std::iter::repeat_with(|| Message::random(rng)) + .take(count) + .collect() + }; + + SpeculativeExecutionResult { + block_hash: BlockHash::new(rng.gen()), + transfers: vec![Transfer::random(rng)], + limit: Gas::random(rng), + consumed: Gas::random(rng), + effects: Effects::random(rng), + messages: random_messages(rng), + error: if rng.gen() { + None + } else { + let count = rng.gen_range(16..128); + Some(Alphanumeric.sample_string(rng, count)) + }, + } + } +} + +impl From for SpeculativeExecutionResult { + fn from(invalid_transaction: InvalidTransaction) -> Self { + SpeculativeExecutionResult { + transfers: Default::default(), + limit: Default::default(), + consumed: Default::default(), + effects: Default::default(), + messages: Default::default(), + error: Some(format!("{}", invalid_transaction)), + block_hash: Default::default(), + } + } +} + +impl ToBytes for SpeculativeExecutionResult { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.transfers) + + ToBytes::serialized_length(&self.limit) + + ToBytes::serialized_length(&self.consumed) + + ToBytes::serialized_length(&self.effects) + + ToBytes::serialized_length(&self.messages) + + ToBytes::serialized_length(&self.error) + + ToBytes::serialized_length(&self.block_hash) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.transfers.write_bytes(writer)?; + self.limit.write_bytes(writer)?; + self.consumed.write_bytes(writer)?; + self.effects.write_bytes(writer)?; + self.messages.write_bytes(writer)?; + self.error.write_bytes(writer)?; + self.block_hash.write_bytes(writer) + } +} + +impl FromBytes for SpeculativeExecutionResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transfers, bytes) = Vec::::from_bytes(bytes)?; + let (limit, bytes) = Gas::from_bytes(bytes)?; + let (consumed, bytes) = Gas::from_bytes(bytes)?; + let (effects, bytes) = Effects::from_bytes(bytes)?; + let (messages, bytes) = Messages::from_bytes(bytes)?; + let (error, bytes) = Option::::from_bytes(bytes)?; + let (block_hash, bytes) = BlockHash::from_bytes(bytes)?; + Ok(( + SpeculativeExecutionResult { + transfers, + limit, + consumed, + effects, + messages, + error, + block_hash, + }, + bytes, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = SpeculativeExecutionResult::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/state_request.rs b/binary_port/src/state_request.rs new file mode 100644 index 0000000000..267c678e62 --- /dev/null +++ b/binary_port/src/state_request.rs @@ -0,0 +1,118 @@ +use std::fmt::{Display, Formatter, Result as DisplayResult}; + +use crate::entity_qualifier::GlobalStateEntityQualifier; +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + GlobalStateIdentifier, +}; +#[cfg(test)] +use rand::Rng; + +/// A request to get data from the global state. +#[derive(Clone, Debug, PartialEq)] +pub struct GlobalStateRequest { + /// Global state identifier, `None` means "latest block state". + state_identifier: Option, + /// qualifier that points to a specific item (or items) in the global state. + qualifier: GlobalStateEntityQualifier, +} + +impl GlobalStateRequest { + pub fn new( + state_identifier: Option, + qualifier: GlobalStateEntityQualifier, + ) -> Self { + GlobalStateRequest { + state_identifier, + qualifier, + } + } + pub fn destructure(self) -> (Option, GlobalStateEntityQualifier) { + (self.state_identifier, self.qualifier) + } + + pub fn state_identifier(self) -> Option { + self.state_identifier + } + + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + let state_identifier = rng + .gen::() + .then(|| GlobalStateIdentifier::random(rng)); + let qualifier = GlobalStateEntityQualifier::random(rng); + Self { + state_identifier, + qualifier, + } + } +} + +impl ToBytes for GlobalStateRequest { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.state_identifier.write_bytes(writer)?; + self.qualifier.write_bytes(writer)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.state_identifier.serialized_length() + self.qualifier.serialized_length() + } +} + +impl FromBytes for GlobalStateRequest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (state_identifier, remainder) = FromBytes::from_bytes(bytes)?; + let (qualifier, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + GlobalStateRequest { + state_identifier, + qualifier, + }, + remainder, + )) + } +} + +impl Display for GlobalStateRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> DisplayResult { + match self.qualifier { + GlobalStateEntityQualifier::Item { base_key, .. } => { + write!(f, "get item from global state ({})", base_key) + } + GlobalStateEntityQualifier::AllItems { key_tag, .. } => { + write!(f, "get all items ({})", key_tag) + } + GlobalStateEntityQualifier::DictionaryItem { .. } => { + write!(f, "get dictionary item") + } + GlobalStateEntityQualifier::Balance { .. } => { + write!(f, "get balance by state root",) + } + GlobalStateEntityQualifier::ItemsByPrefix { .. } => { + write!(f, "get items by prefix") + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let val = GlobalStateRequest::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/binary_port/src/type_wrappers.rs b/binary_port/src/type_wrappers.rs new file mode 100644 index 0000000000..89a14cbf78 --- /dev/null +++ b/binary_port/src/type_wrappers.rs @@ -0,0 +1,833 @@ +use core::{convert::TryFrom, num::TryFromIntError, time::Duration}; +use std::collections::BTreeMap; + +use casper_types::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + contracts::ContractHash, + global_state::TrieMerkleProof, + system::auction::DelegationRate, + Account, AddressableEntity, BlockHash, ByteCode, Contract, ContractWasm, EntityAddr, EraId, + ExecutionInfo, Key, PublicKey, StoredValue, TimeDiff, Timestamp, Transaction, ValidatorChange, + U512, +}; +use serde::Serialize; + +use super::GlobalStateQueryResult; + +// `bytesrepr` implementations for type wrappers are repetitive, hence this macro helper. We should +// get rid of this after we introduce the proper "bytesrepr-derive" proc macro. +macro_rules! impl_bytesrepr_for_type_wrapper { + ($t:ident) => { + impl ToBytes for $t { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + } + + impl FromBytes for $t { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = FromBytes::from_bytes(bytes)?; + Ok(($t(inner), remainder)) + } + } + }; +} + +/// Type representing uptime. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +pub struct Uptime(u64); + +impl Uptime { + /// Constructs new uptime. + pub fn new(value: u64) -> Self { + Self(value) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> u64 { + self.0 + } +} + +impl From for Duration { + fn from(uptime: Uptime) -> Self { + Duration::from_secs(uptime.0) + } +} + +impl TryFrom for TimeDiff { + type Error = TryFromIntError; + + fn try_from(uptime: Uptime) -> Result { + u32::try_from(uptime.0).map(TimeDiff::from_seconds) + } +} + +/// Type representing changes in consensus validators. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct ConsensusValidatorChanges(BTreeMap>); + +impl ConsensusValidatorChanges { + /// Constructs new consensus validator changes. + pub fn new(value: BTreeMap>) -> Self { + Self(value) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> BTreeMap> { + self.0 + } +} + +impl From for BTreeMap> { + fn from(consensus_validator_changes: ConsensusValidatorChanges) -> Self { + consensus_validator_changes.0 + } +} + +/// Type representing network name. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct NetworkName(String); + +impl NetworkName { + /// Constructs new network name. + pub fn new(value: impl ToString) -> Self { + Self(value.to_string()) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> String { + self.0 + } +} + +impl From for String { + fn from(network_name: NetworkName) -> Self { + network_name.0 + } +} + +/// Type representing the reactor state name. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct ReactorStateName(String); + +impl ReactorStateName { + /// Constructs new reactor state name. + pub fn new(value: impl ToString) -> Self { + Self(value.to_string()) + } + + /// Retrieve the name as a `String`. + pub fn into_inner(self) -> String { + self.0 + } +} + +impl From for String { + fn from(reactor_state: ReactorStateName) -> Self { + reactor_state.0 + } +} + +/// Type representing last progress of the sync process. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct LastProgress(Timestamp); + +impl LastProgress { + /// Constructs new last progress. + pub fn new(value: Timestamp) -> Self { + Self(value) + } + + /// Retrieve the inner value. + pub fn into_inner(self) -> Timestamp { + self.0 + } +} + +impl From for Timestamp { + fn from(last_progress: LastProgress) -> Self { + last_progress.0 + } +} + +/// Type representing results of the get full trie request. +#[derive(Debug, PartialEq, Eq)] +pub struct GetTrieFullResult(Option); + +impl GetTrieFullResult { + /// Constructs new get trie result. + pub fn new(value: Option) -> Self { + Self(value) + } + + /// Returns the inner value. + pub fn into_inner(self) -> Option { + self.0 + } +} + +/// Type representing the reward of a validator or a delegator. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct RewardResponse { + amount: U512, + era_id: EraId, + delegation_rate: DelegationRate, + switch_block_hash: BlockHash, +} + +impl RewardResponse { + /// Constructs new reward response. + pub fn new( + amount: U512, + era_id: EraId, + delegation_rate: DelegationRate, + switch_block_hash: BlockHash, + ) -> Self { + Self { + amount, + era_id, + delegation_rate, + switch_block_hash, + } + } + + /// Returns the amount of the reward. + pub fn amount(&self) -> U512 { + self.amount + } + + /// Returns the era ID. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the delegation rate of the validator. + pub fn delegation_rate(&self) -> DelegationRate { + self.delegation_rate + } + + /// Returns the switch block hash at which the reward was distributed. + pub fn switch_block_hash(&self) -> BlockHash { + self.switch_block_hash + } +} + +impl ToBytes for RewardResponse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.amount.serialized_length() + + self.era_id.serialized_length() + + self.delegation_rate.serialized_length() + + self.switch_block_hash.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.amount.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + self.switch_block_hash.write_bytes(writer) + } +} + +impl FromBytes for RewardResponse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (amount, remainder) = FromBytes::from_bytes(bytes)?; + let (era_id, remainder) = FromBytes::from_bytes(remainder)?; + let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?; + let (switch_block_hash, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + RewardResponse::new(amount, era_id, delegation_rate, switch_block_hash), + remainder, + )) + } +} + +/// Describes the consensus status. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct ConsensusStatus { + validator_public_key: PublicKey, + round_length: Option, +} + +impl ConsensusStatus { + /// Constructs new consensus status. + pub fn new(validator_public_key: PublicKey, round_length: Option) -> Self { + Self { + validator_public_key, + round_length, + } + } + + /// Returns the validator public key. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns the round length. + pub fn round_length(&self) -> Option { + self.round_length + } +} + +impl ToBytes for ConsensusStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + self.round_length.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.round_length.write_bytes(writer) + } +} + +impl FromBytes for ConsensusStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (round_length, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + ConsensusStatus::new(validator_public_key, round_length), + remainder, + )) + } +} + +/// A transaction with execution info. +#[derive(Debug, PartialEq, Eq, Serialize)] +pub struct TransactionWithExecutionInfo { + transaction: Transaction, + execution_info: Option, +} + +impl TransactionWithExecutionInfo { + /// Constructs new transaction with execution info. + pub fn new(transaction: Transaction, execution_info: Option) -> Self { + Self { + transaction, + execution_info, + } + } + + /// Converts `self` into the transaction and execution info. + pub fn into_inner(self) -> (Transaction, Option) { + (self.transaction, self.execution_info) + } +} + +impl ToBytes for TransactionWithExecutionInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.transaction.serialized_length() + self.execution_info.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.transaction.write_bytes(writer)?; + self.execution_info.write_bytes(writer) + } +} + +impl FromBytes for TransactionWithExecutionInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transaction, remainder) = FromBytes::from_bytes(bytes)?; + let (execution_info, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + TransactionWithExecutionInfo::new(transaction, execution_info), + remainder, + )) + } +} + +/// A query result for a dictionary item, contains the dictionary item key and a global state query +/// result. +#[derive(Debug, Clone, PartialEq)] +pub struct DictionaryQueryResult { + key: Key, + query_result: GlobalStateQueryResult, +} + +impl DictionaryQueryResult { + /// Constructs new dictionary query result. + pub fn new(key: Key, query_result: GlobalStateQueryResult) -> Self { + Self { key, query_result } + } + + /// Converts `self` into the dictionary item key and global state query result. + pub fn into_inner(self) -> (Key, GlobalStateQueryResult) { + (self.key, self.query_result) + } +} + +impl ToBytes for DictionaryQueryResult { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + self.query_result.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.query_result.serialized_length() + } +} + +impl FromBytes for DictionaryQueryResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = FromBytes::from_bytes(bytes)?; + let (query_result, remainder) = FromBytes::from_bytes(remainder)?; + Ok((DictionaryQueryResult::new(key, query_result), remainder)) + } +} + +/// An account with its associated merkle proof. +#[derive(Debug, PartialEq)] +pub struct AccountInformation { + account: Account, + merkle_proof: Vec>, +} + +impl AccountInformation { + /// Constructs a new `AccountResponse`. + pub fn new(account: Account, merkle_proof: Vec>) -> Self { + Self { + account, + merkle_proof, + } + } + + /// Returns the inner `Account`. + pub fn account(&self) -> &Account { + &self.account + } + + /// Returns the merkle proof. + pub fn merkle_proof(&self) -> &Vec> { + &self.merkle_proof + } + + /// Converts `self` into the account and merkle proof. + pub fn into_inner(self) -> (Account, Vec>) { + (self.account, self.merkle_proof) + } +} + +impl ToBytes for AccountInformation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.account.write_bytes(writer)?; + self.merkle_proof.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.account.serialized_length() + self.merkle_proof.serialized_length() + } +} + +impl FromBytes for AccountInformation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (account, remainder) = FromBytes::from_bytes(bytes)?; + let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?; + Ok((AccountInformation::new(account, merkle_proof), remainder)) + } +} + +/// A contract with its associated Wasm and merkle proof. +#[derive(Debug, PartialEq)] +pub struct ContractInformation { + hash: ContractHash, + contract: ValueWithProof, + wasm: Option>, +} + +impl ContractInformation { + /// Constructs new `ContractInformation`. + pub fn new( + hash: ContractHash, + contract: ValueWithProof, + wasm: Option>, + ) -> Self { + Self { + hash, + contract, + wasm, + } + } + + /// Returns the hash of the contract. + pub fn hash(&self) -> ContractHash { + self.hash + } + + /// Returns the inner `Contract`. + pub fn contract(&self) -> &Contract { + &self.contract.value + } + + /// Returns the Merkle proof of the contract. + pub fn contract_proof(&self) -> &Vec> { + &self.contract.merkle_proof + } + + /// Returns the inner `ContractWasm` with its proof. + pub fn wasm(&self) -> Option<&ValueWithProof> { + self.wasm.as_ref() + } + + /// Converts `self` into the contract hash, contract and Wasm. + pub fn into_inner( + self, + ) -> ( + ContractHash, + ValueWithProof, + Option>, + ) { + (self.hash, self.contract, self.wasm) + } +} + +impl ToBytes for ContractInformation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.contract.write_bytes(writer)?; + self.wasm.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.contract.serialized_length() + + self.wasm.serialized_length() + } +} + +impl FromBytes for ContractInformation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = FromBytes::from_bytes(bytes)?; + let (contract, remainder) = FromBytes::from_bytes(remainder)?; + let (wasm, remainder) = FromBytes::from_bytes(remainder)?; + Ok((ContractInformation::new(hash, contract, wasm), remainder)) + } +} + +/// A contract entity with its associated ByteCode. +#[derive(Debug, PartialEq)] +pub struct AddressableEntityInformation { + addr: EntityAddr, + entity: ValueWithProof, + bytecode: Option>, +} + +impl AddressableEntityInformation { + /// Constructs new contract entity with ByteCode. + pub fn new( + addr: EntityAddr, + entity: ValueWithProof, + bytecode: Option>, + ) -> Self { + Self { + addr, + entity, + bytecode, + } + } + + /// Returns the entity address. + pub fn addr(&self) -> EntityAddr { + self.addr + } + + /// Returns the inner `AddressableEntity`. + pub fn entity(&self) -> &AddressableEntity { + &self.entity.value + } + + /// Returns the inner `ByteCodeWithProof`. + pub fn entity_merkle_proof(&self) -> &Vec> { + &self.entity.merkle_proof + } + + /// Returns the inner `ByteCode`. + pub fn bytecode(&self) -> Option<&ValueWithProof> { + self.bytecode.as_ref() + } + + /// Converts `self` into the entity address, entity and ByteCode. + pub fn into_inner( + self, + ) -> ( + EntityAddr, + ValueWithProof, + Option>, + ) { + (self.addr, self.entity, self.bytecode) + } +} + +impl ToBytes for AddressableEntityInformation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.addr.write_bytes(writer)?; + self.entity.write_bytes(writer)?; + self.bytecode.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.addr.serialized_length() + + self.entity.serialized_length() + + self.bytecode.serialized_length() + } +} + +impl FromBytes for AddressableEntityInformation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (addr, remainder) = FromBytes::from_bytes(bytes)?; + let (entity, remainder) = FromBytes::from_bytes(remainder)?; + let (bytecode, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + AddressableEntityInformation::new(addr, entity, bytecode), + remainder, + )) + } +} + +/// A value with its associated Merkle proof. +#[derive(Debug, PartialEq)] +pub struct ValueWithProof { + value: T, + merkle_proof: Vec>, +} + +impl ValueWithProof { + /// Constructs a new `ValueWithProof`. + pub fn new(value: T, merkle_proof: Vec>) -> Self { + Self { + value, + merkle_proof, + } + } + + /// Returns the value. + pub fn value(&self) -> &T { + &self.value + } + + /// Returns the Merkle proof. + pub fn merkle_proof(&self) -> &[TrieMerkleProof] { + &self.merkle_proof + } + + /// Converts `self` into the value and Merkle proof. + pub fn into_inner(self) -> (T, Vec>) { + (self.value, self.merkle_proof) + } +} + +impl ToBytes for ValueWithProof { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.value.write_bytes(writer)?; + self.merkle_proof.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.value.serialized_length() + self.merkle_proof.serialized_length() + } +} + +impl FromBytes for ValueWithProof { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = FromBytes::from_bytes(bytes)?; + let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?; + Ok((ValueWithProof::new(value, merkle_proof), remainder)) + } +} + +impl_bytesrepr_for_type_wrapper!(Uptime); +impl_bytesrepr_for_type_wrapper!(ConsensusValidatorChanges); +impl_bytesrepr_for_type_wrapper!(NetworkName); +impl_bytesrepr_for_type_wrapper!(ReactorStateName); +impl_bytesrepr_for_type_wrapper!(LastProgress); +impl_bytesrepr_for_type_wrapper!(GetTrieFullResult); + +#[cfg(test)] +mod tests { + use core::iter::FromIterator; + use rand::Rng; + + use super::*; + use casper_types::{ + contracts::ContractPackageHash, execution::ExecutionResult, testing::TestRng, BlockHash, + CLValue, ContractWasmHash, StoredValue, + }; + + #[test] + fn uptime_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&Uptime::new(rng.gen())); + } + + #[test] + fn consensus_validator_changes_roundtrip() { + let rng = &mut TestRng::new(); + let map = BTreeMap::from_iter([( + PublicKey::random(rng), + vec![(EraId::random(rng), ValidatorChange::random(rng))], + )]); + bytesrepr::test_serialization_roundtrip(&ConsensusValidatorChanges::new(map)); + } + + #[test] + fn network_name_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&NetworkName::new(rng.random_string(5..20))); + } + + #[test] + fn reactor_state_name_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&ReactorStateName::new(rng.random_string(5..20))); + } + + #[test] + fn last_progress_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&LastProgress::new(Timestamp::random(rng))); + } + + #[test] + fn get_trie_full_result_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&GetTrieFullResult::new(rng.gen())); + } + + #[test] + fn reward_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&RewardResponse::new( + rng.gen(), + EraId::random(rng), + rng.gen(), + BlockHash::random(rng), + )); + } + + #[test] + fn consensus_status_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&ConsensusStatus::new( + PublicKey::random(rng), + Some(TimeDiff::from_millis(rng.gen())), + )); + } + + #[test] + fn transaction_with_execution_info_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&TransactionWithExecutionInfo::new( + Transaction::random(rng), + rng.gen::().then(|| ExecutionInfo { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + execution_result: rng.gen::().then(|| ExecutionResult::random(rng)), + }), + )); + } + + #[test] + fn dictionary_query_result_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&DictionaryQueryResult::new( + Key::Account(rng.gen()), + GlobalStateQueryResult::new( + StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()), + vec![], + ), + )); + } + + #[test] + fn contract_with_wasm_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&ContractInformation::new( + ContractHash::new(rng.gen()), + ValueWithProof::new( + Contract::new( + ContractPackageHash::new(rng.gen()), + ContractWasmHash::new(rng.gen()), + Default::default(), + Default::default(), + Default::default(), + ), + Default::default(), + ), + rng.gen::().then(|| { + ValueWithProof::new( + ContractWasm::new(rng.random_vec(10..50)), + Default::default(), + ) + }), + )); + } + + #[test] + fn addressable_entity_with_byte_code_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&AddressableEntityInformation::new( + rng.gen(), + ValueWithProof::new(AddressableEntity::example().clone(), Default::default()), + rng.gen::().then(|| { + ValueWithProof::new( + ByteCode::new(rng.gen(), rng.random_vec(10..50)), + Default::default(), + ) + }), + )); + } +} diff --git a/build_wasm_package.sh b/build_wasm_package.sh index e9afd1a73f..b1437556c4 100755 --- a/build_wasm_package.sh +++ b/build_wasm_package.sh @@ -24,13 +24,9 @@ export WASM_PACKAGE_VERSION="$(grep ^version $NODE_CONFIG_FILE | sed -e s'/.*= " export CL_WASM_DIR="$RUN_DIR/target/wasm32-unknown-unknown/release" export CL_OUTPUT_S3_DIR="$RUN_DIR/s3_artifacts/${WASM_PACKAGE_VERSION}" export CL_WASM_PACKAGE="$CL_OUTPUT_S3_DIR/casper-contracts.tar.gz" -export CL_VAULT_URL="${CL_VAULT_HOST}/v1/sre/cicd/s3/aws_credentials" -export CREDENTIAL_FILE_TMP="$RUN_DIR/s3_vault_output.json" export CL_S3_BUCKET='casperlabs-cicd-artifacts' export CL_S3_LOCATION="wasm_contracts/${WASM_PACKAGE_VERSION}" -echo "-H \"X-Vault-Token: $CL_VAULT_TOKEN\"" > ~/.curlrc - if [ ! -d $CL_OUTPUT_S3_DIR ]; then mkdir -p "${CL_OUTPUT_S3_DIR}" fi @@ -47,17 +43,10 @@ else exit 1 fi -# get aws credentials files -curl -s -q -X GET $CL_VAULT_URL --output $CREDENTIAL_FILE_TMP -if [ ! -f $CREDENTIAL_FILE_TMP ]; then - echo "[ERROR] Unable to fetch aws credentials from vault: $CL_VAULT_URL" - exit 1 +# upload to s3 +if [ -z "$AWS_SECRET_ACCESS_KEY" ] || [ -z "$AWS_ACCESS_KEY_ID" ]; then + log "ERROR: AWS KEYS needed to run. Contact SRE." + exit 1 else - echo "[INFO] Found credentials file - $CREDENTIAL_FILE_TMP" - # get just the body required by bintray, strip off vault payload - export AWS_ACCESS_KEY_ID=$(/bin/cat $CREDENTIAL_FILE_TMP | jq -r .data.cicd_agent_to_s3.aws_access_key) - export AWS_SECRET_ACCESS_KEY=$(/bin/cat $CREDENTIAL_FILE_TMP | jq -r .data.cicd_agent_to_s3.aws_secret_key) - echo "[INFO] Going to upload wasm package: ${CL_WASM_PACKAGE} to s3 bucket: s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}" - s3cmd put ${CL_WASM_PACKAGE} s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/casper-contracts.tar.gz + s3cmd put ${CL_WASM_PACKAGE} s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/casper-contracts.tar.gz fi - diff --git a/ci/approx_next_era_starts.sh b/ci/approx_next_era_starts.sh new file mode 100755 index 0000000000..1d58a97fbd --- /dev/null +++ b/ci/approx_next_era_starts.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +if [ "$#" -lt 1 ]; then + echo "Usage: $0 [number of future eras]" + exit 1 +fi + +if [[ $1 == http* ]]; then + # Starts with http, so assume good full url + NODE_ADDRESS="--node-address $1" +else + NODE_ADDRESS="--node-address http://$NODE_IP:7777" +fi + +if ! command -v "casper-client" &> /dev/null ; then + echo "casper-client is not installed and required. Exiting..." + exit 1 +fi + +if [ "$#" -lt 2 ]; then + echo "No number of future eras given, using default." + FUTURE_ERAS=10 +else + FUTURE_ERAS=$2 +fi + +LAST_SWITCH_BLOCK=$(casper-client get-era-summary $NODE_ADDRESS | jq -r .result.era_summary.block_hash | tr -d "/n") + +# Getting Timestamp and Era with one call using `@` delimiter +SB_TIMESTAMP_AND_ERA=$(casper-client get-block -b $LAST_SWITCH_BLOCK $NODE_ADDRESS | jq -r '.result.block_with_signatures.block.Version2.header | [.timestamp,.era_id] | join("@")' | tr -d "/n") + +# Parsing this back into seperate variables +IFS=@ read -r SB_TIMESTAMP LAST_ERA_ID <<< "$SB_TIMESTAMP_AND_ERA" + +SB_EPOCH=$(date -d "$SB_TIMESTAMP" +%s) + +START_ERA_ID=$(( LAST_ERA_ID + 1 )) +NEXT_ERA_ID=$(( START_ERA_ID + 1 )) + +FINAL_ERA_ID=$(( START_ERA_ID + FUTURE_ERAS )) + +ERA_TIME_SECONDS=$(( 120*60+9 )) + +#echo "current_era:$START_ERA_ID started_utc:$SB_TIMESTAMP" + +while (( NEXT_ERA_ID <= FINAL_ERA_ID )); do + TIMESTAMP_FROM_Z=$(date -u -d "@$SB_EPOCH" +"%Y-%m-%dT%H:%M:%SZ") + TIMESTAMP_FROM_L=$(date -d "@$SB_EPOCH" +"%Y-%m-%dT%H:%M:%S%z") + echo "era:$NEXT_ERA_ID utc:$TIMESTAMP_FROM_Z local:$TIMESTAMP_FROM_L" + let NEXT_ERA_ID++ + SB_EPOCH=$(( SB_EPOCH + ERA_TIME_SECONDS )) +done + diff --git a/ci/build_update_package.sh b/ci/build_update_package.sh index 314b378743..9c8924ecf6 100755 --- a/ci/build_update_package.sh +++ b/ci/build_update_package.sh @@ -1,51 +1,134 @@ #!/usr/bin/env bash -# This script will build bin.tar.gz and config.tar.gz in target/upgrade_build +# This script will build +# - bin.tar.gz +# - config.tar.gz +# - version.json +# in target/upgrade_build set -e +if command -v jq >&2; then + echo "jq installed" +else + echo "ERROR: jq is not installed and required" + exit 1 +fi + ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" -GENESIS_FILES_DIR="$ROOT_DIR/resources/production" +LATEST_DIR="$ROOT_DIR/target/latest" +GENESIS_FILES_DIR="$ROOT_DIR/resources" +PRODUCTION_GENESIS_FILES_DIR="$GENESIS_FILES_DIR/production" +MAIN_GENESIS_FILES_DIR="$GENESIS_FILES_DIR/mainnet" +TEST_GENESIS_FILES_DIR="$GENESIS_FILES_DIR/testnet" +INT_GENESIS_FILES_DIR="$GENESIS_FILES_DIR/integration-test" +DEV_GENESIS_FILES_DIR="$GENESIS_FILES_DIR/dev-net" NODE_BUILD_TARGET="$ROOT_DIR/target/release/casper-node" -PROTOCOL_VERSION=$(cat "$GENESIS_FILES_DIR/chainspec.toml" | python3 -c "import sys, toml; print(toml.load(sys.stdin)['protocol']['version'].replace('.','_'))") -UPGRADE_DIR="$ROOT_DIR/target/upgrade_build/$PROTOCOL_VERSION" +NODE_BUILD_DIR="$ROOT_DIR/node" +UPGRADE_DIR="$ROOT_DIR/target/upgrade_build/" BIN_DIR="$UPGRADE_DIR/bin" CONFIG_DIR="$UPGRADE_DIR/config" -NODE_BUILD_DIR="$ROOT_DIR/node" + +GIT_HASH=$(git rev-parse HEAD) +BRANCH_NAME=$(git branch --show-current) +PROTOCOL_VERSION=$(cat "$GENESIS_FILES_DIR/chainspec.toml" | python3 -c "import sys, toml; print(toml.load(sys.stdin)['protocol']['version'].replace('.','_'))") +NODE_VERSION=$(cat "$NODE_BUILD_DIR/Cargo.toml" | python3 -c "import sys, toml; print(toml.load(sys.stdin)['package']['version'])") + +echo "Creating $BRANCH_NAME.latest file" +mkdir -p "$LATEST_DIR" +echo -n "$GIT_HASH" > "$LATEST_DIR/$BRANCH_NAME.latest" echo "Building casper-node" -cd "$NODE_BUILD_DIR" +cd "$NODE_BUILD_DIR" || exit cargo build --release +echo "Building global-state-update-gen" +cd "$ROOT_DIR" || exit +cargo build --release --package global-state-update-gen +cargo deb --package global-state-update-gen +mkdir -p "$UPGRADE_DIR" +cp "$ROOT_DIR/target/debian/"* "$UPGRADE_DIR" || exit + echo "Generating bin README.md" mkdir -p "$BIN_DIR" readme="$BIN_DIR/README.md" { - echo "Build for Ubuntu 18.04." + echo "Build for Ubuntu 22.04." echo "" - echo "To run on other platforms, build from https://github.com/CasperLabs/casper-node" + echo "To run on other platforms, build from https://github.com/casper-network/casper-node" echo " cd node" echo " cargo build --release" echo "" - echo "git commit hash: $(git rev-parse HEAD)" + echo "git commit hash: $GIT_HASH" } > "$readme" echo "Packaging bin.tar.gz" mkdir -p "$BIN_DIR" cp "$NODE_BUILD_TARGET" "$BIN_DIR" # To get no path in tar, need to cd in. -cd "$BIN_DIR" +cd "$BIN_DIR" || exit tar -czvf "../bin.tar.gz" . cd .. rm -rf "$BIN_DIR" echo "Packaging config.tar.gz" mkdir -p "$CONFIG_DIR" -cp "$GENESIS_FILES_DIR/chainspec.toml" "$CONFIG_DIR" -cp "$GENESIS_FILES_DIR/config-example.toml" "$CONFIG_DIR" -cp "$GENESIS_FILES_DIR/accounts.toml" "$CONFIG_DIR" +cp "$PRODUCTION_GENESIS_FILES_DIR/chainspec.toml" "$CONFIG_DIR" +cp "$PRODUCTION_GENESIS_FILES_DIR/config-example.toml" "$CONFIG_DIR" +cp "$PRODUCTION_GENESIS_FILES_DIR/accounts.toml" "$CONFIG_DIR" # To get no path in tar, need to cd in. -cd "$CONFIG_DIR" +cd "$CONFIG_DIR" || exit tar -czvf "../config.tar.gz" . cd .. rm -rf "$CONFIG_DIR" + +echo "Packaging config-main.tar.gz" +mkdir -p "$CONFIG_DIR" +cp "$MAIN_GENESIS_FILES_DIR/chainspec.toml" "$CONFIG_DIR" +cp "$MAIN_GENESIS_FILES_DIR/config-example.toml" "$CONFIG_DIR" +# To get no path in tar, need to cd in. +cd "$CONFIG_DIR" || exit +tar -czvf "../config-main.tar.gz" . +cd .. +rm -rf "$CONFIG_DIR" + +echo "Packaging config-test.tar.gz" +mkdir -p "$CONFIG_DIR" +cp "$TEST_GENESIS_FILES_DIR/chainspec.toml" "$CONFIG_DIR" +cp "$TEST_GENESIS_FILES_DIR/config-example.toml" "$CONFIG_DIR" +# To get no path in tar, need to cd in. +cd "$CONFIG_DIR" || exit +tar -czvf "../config-test.tar.gz" . +cd .. +rm -rf "$CONFIG_DIR" + +echo "Packaging config-int.tar.gz" +mkdir -p "$CONFIG_DIR" +cp "$INT_GENESIS_FILES_DIR/chainspec.toml" "$CONFIG_DIR" +cp "$INT_GENESIS_FILES_DIR/config-example.toml" "$CONFIG_DIR" +# To get no path in tar, need to cd in. +cd "$CONFIG_DIR" || exit +tar -czvf "../config-int.tar.gz" . +cd .. +rm -rf "$CONFIG_DIR" + +echo "Packaging config-dev.tar.gz" +mkdir -p "$DEV_CONFIG_DIR" +cp "$DEV_GENESIS_FILES_DIR/chainspec.toml" "$CONFIG_DIR" +cp "$DEV_GENESIS_FILES_DIR/config-example.toml" "$CONFIG_DIR" +# To get no path in tar, need to cd in. +cd "$CONFIG_DIR" || exit +tar -czvf "../config-dev.tar.gz" . +cd .. +rm -rf "$CONFIG_DIR" + +echo "Building version.json" +jq --null-input \ +--arg branch "$BRANCH_NAME" \ +--arg version "$NODE_VERSION" \ +--arg pv "$PROTOCOL_VERSION" \ +--arg ghash "$GIT_HASH" \ +--arg now "$(jq -nr 'now | strftime("%Y-%m-%dT%H:%M:%SZ")')" \ +--arg files "$(ls "$UPGRADE_DIR" | jq -nRc '[inputs]')" \ +'{"branch": $branch, "version": $version, "protocol_version": $pv, "git-hash": $ghash, "timestamp": $now, "files": $files}' \ +> "$UPGRADE_DIR/version.json" diff --git a/ci/casper_updater/Cargo.toml b/ci/casper_updater/Cargo.toml index 05fb817468..09856ba349 100644 --- a/ci/casper_updater/Cargo.toml +++ b/ci/casper_updater/Cargo.toml @@ -1,14 +1,14 @@ [package] -authors = ["Fraser Hutchison "] -description = "A tool to update versions of all published CasperLabs packages." -edition = "2018" -license-file = "../../LICENSE" +authors = ["Joe Sacher "] +description = "A tool to update versions of all published Casper packages." +edition = "2021" +license = "Apache-2.0" name = "casper-updater" readme = "README.md" -version = "0.2.0" +version = "0.4.0" [dependencies] -casper-types = { version = "1.0.0", path = "../../types", features = ["std"] } -clap = "2" -once_cell = "1" -regex = "1" +clap = { version = "4.2.7", features = ["cargo", "deprecated", "wrap_help"] } +once_cell = "1.17.1" +regex = "1.8.1" +semver = "1.0.17" diff --git a/ci/casper_updater/README.md b/ci/casper_updater/README.md index f50e6afdd7..a9f3a8dbfd 100644 --- a/ci/casper_updater/README.md +++ b/ci/casper_updater/README.md @@ -1,10 +1,10 @@ # casper-updater -A tool to update versions of all published CasperLabs packages. +A tool to update versions of all published Casper packages. # Usage -The tool iterates through each published CasperLabs package, asking for a new version for each or automatically bumping the major, minor or patch version if `--bump=[major|minor|patch]` was specified. Once a valid version is specified, all files dependent on that version are updated. +The tool iterates through each published package, asking for a new version for each or automatically bumping the major, minor or patch version if `--bump=[major|minor|patch]` was specified. Once a valid version is specified, all files dependent on that version are updated. If you run the tool from its own directory it will expect to find the casper-node root directory at '../..'. Alternatively, you can give the path to the casper-node root directory via `--root-dir`. diff --git a/ci/casper_updater/src/dependent_file.rs b/ci/casper_updater/src/dependent_file.rs index c58f3b976d..572271508e 100644 --- a/ci/casper_updater/src/dependent_file.rs +++ b/ci/casper_updater/src/dependent_file.rs @@ -5,12 +5,10 @@ use std::{ use regex::Regex; -/// A file which is dependent on the version of a certain CasperLabs crate. +/// A file which is dependent on the version of a certain Casper crate. pub struct DependentFile { /// Full path to the file. path: PathBuf, - /// Current contents of the file. - contents: String, /// Regex applicable to the portion to be updated. regex: Regex, /// Function which generates the replacement string once the updated version is known. @@ -24,27 +22,26 @@ impl DependentFile { replacement: fn(&str) -> String, ) -> Self { let path = crate::root_dir().join(relative_path); - let contents = fs::read_to_string(&path) - .unwrap_or_else(|error| panic!("should read {}: {:?}", path.display(), error)); - assert!( - regex.find(&contents).is_some(), - "regex '{}' failed to get a match in {}", - regex, - path.display() - ); - - DependentFile { + let dependent_file = DependentFile { path, - contents, regex, replacement, - } + }; + let contents = dependent_file.contents(); + assert!( + dependent_file.regex.find(&contents).is_some(), + "regex '{}' failed to get a match in {}", + dependent_file.regex, + dependent_file.path.display() + ); + dependent_file } pub fn update(&self, updated_version: &str) { + let contents = self.contents(); let updated_contents = self .regex - .replace(&self.contents, (self.replacement)(updated_version).as_str()); + .replace_all(&contents, (self.replacement)(updated_version).as_str()); fs::write(&self.path, updated_contents.as_ref()) .unwrap_or_else(|error| panic!("should write {}: {:?}", self.path.display(), error)); } @@ -53,7 +50,14 @@ impl DependentFile { &self.path } - pub fn contents(&self) -> &str { - &self.contents + pub fn relative_path(&self) -> &Path { + self.path + .strip_prefix(crate::root_dir()) + .expect("should strip prefix") + } + + pub fn contents(&self) -> String { + fs::read_to_string(&self.path) + .unwrap_or_else(|error| panic!("should read {}: {:?}", self.path.display(), error)) } } diff --git a/ci/casper_updater/src/main.rs b/ci/casper_updater/src/main.rs index 30ea18076b..bd87be4f00 100644 --- a/ci/casper_updater/src/main.rs +++ b/ci/casper_updater/src/main.rs @@ -1,4 +1,4 @@ -//! A tool to update versions of all published CasperLabs packages. +//! A tool to update versions of all published Casper packages. #![warn(unused, missing_copy_implementations, missing_docs)] #![deny( @@ -20,13 +20,11 @@ clippy::all )] #![forbid( - const_err, arithmetic_overflow, invalid_type_param_default, macro_expanded_macro_exports_accessed_by_absolute_paths, mutable_transmutes, no_mangle_const_items, - order_dependent_trait_objects, overflowing_literals, pub_use_of_private_extern_crate, unknown_crate_types @@ -40,39 +38,43 @@ use std::{ env, path::{Path, PathBuf}, process::Command, - str::FromStr, }; -use clap::{crate_version, App, Arg}; +use clap::{ + builder::{PathBufValueParser, PossibleValue}, + crate_version, Arg, ArgAction, Command as App, +}; use once_cell::sync::Lazy; +use semver::Version; use package::Package; const APP_NAME: &str = "Casper Updater"; const ROOT_DIR_ARG_NAME: &str = "root-dir"; -const ROOT_DIR_ARG_SHORT: &str = "r"; +const ROOT_DIR_ARG_SHORT: char = 'r'; const ROOT_DIR_ARG_VALUE_NAME: &str = "PATH"; const ROOT_DIR_ARG_HELP: &str = "Path to casper-node root directory. If not supplied, assumes it is at ../.."; const BUMP_ARG_NAME: &str = "bump"; -const BUMP_ARG_SHORT: &str = "b"; +const BUMP_ARG_SHORT: char = 'b'; const BUMP_ARG_VALUE_NAME: &str = "VERSION-COMPONENT"; const BUMP_ARG_HELP: &str = - "Increase all crates' versions automatically without asking for user input. For a crate at \ + "Increases all crates' versions automatically without asking for user input. For a crate at \ version x.y.z, the version will be bumped to (x+1).0.0, x.(y+1).0, or x.y.(z+1) depending on \ - which version component is specified"; + which version component is specified. If this option is specified, --activation-point must \ + also be specified."; const MAJOR: &str = "major"; const MINOR: &str = "minor"; const PATCH: &str = "patch"; const DRY_RUN_ARG_NAME: &str = "dry-run"; -const DRY_RUN_ARG_SHORT: &str = "d"; -const DRY_RUN_ARG_HELP: &str = "Check all regexes get matches in current casper-node repo"; +const DRY_RUN_ARG_SHORT: char = 'd'; +const DRY_RUN_ARG_HELP: &str = "Checks all regexes get matches in current casper-node repo"; const ALLOW_EARLIER_VERSION_NAME: &str = "allow-earlier-version"; -const ALLOW_EARLIER_VERSION_HELP: &str = "Allow manual setting of version earlier than current"; +const ALLOW_EARLIER_VERSION_HELP: &str = "Allows manual setting of version earlier than current"; #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] pub(crate) enum BumpVersion { @@ -81,6 +83,20 @@ pub(crate) enum BumpVersion { Patch, } +impl BumpVersion { + pub(crate) fn update(self, current_version: &Version) -> Version { + match self { + BumpVersion::Major => Version::new(current_version.major + 1, 0, 0), + BumpVersion::Minor => Version::new(current_version.major, current_version.minor + 1, 0), + BumpVersion::Patch => Version::new( + current_version.major, + current_version.minor, + current_version.patch + 1, + ), + } + } +} + struct Args { root_dir: PathBuf, bump_version: Option, @@ -114,37 +130,42 @@ fn get_args() -> Args { let arg_matches = App::new(APP_NAME) .version(crate_version!()) .arg( - Arg::with_name(ROOT_DIR_ARG_NAME) + Arg::new(ROOT_DIR_ARG_NAME) .long(ROOT_DIR_ARG_NAME) .short(ROOT_DIR_ARG_SHORT) .value_name(ROOT_DIR_ARG_VALUE_NAME) .help(ROOT_DIR_ARG_HELP) - .takes_value(true), + .value_parser(PathBufValueParser::new()), ) .arg( - Arg::with_name(BUMP_ARG_NAME) + Arg::new(BUMP_ARG_NAME) .long(BUMP_ARG_NAME) .short(BUMP_ARG_SHORT) .value_name(BUMP_ARG_VALUE_NAME) .help(BUMP_ARG_HELP) - .takes_value(true) - .possible_values(&[MAJOR, MINOR, PATCH]), + .value_parser([ + PossibleValue::new(MAJOR), + PossibleValue::new(MINOR), + PossibleValue::new(PATCH), + ]), ) .arg( - Arg::with_name(DRY_RUN_ARG_NAME) + Arg::new(DRY_RUN_ARG_NAME) .long(DRY_RUN_ARG_NAME) .short(DRY_RUN_ARG_SHORT) + .action(ArgAction::SetTrue) .help(DRY_RUN_ARG_HELP), ) .arg( - Arg::with_name(ALLOW_EARLIER_VERSION_NAME) + Arg::new(ALLOW_EARLIER_VERSION_NAME) .long(ALLOW_EARLIER_VERSION_NAME) + .action(ArgAction::SetTrue) .help(ALLOW_EARLIER_VERSION_HELP), ) .get_matches(); - let root_dir = match arg_matches.value_of(ROOT_DIR_ARG_NAME) { - Some(path) => PathBuf::from_str(path).expect("should be a valid unicode path"), + let root_dir = match arg_matches.get_one::(ROOT_DIR_ARG_NAME) { + Some(path) => path.clone(), None => env::current_dir() .expect("should be able to access current working dir") .parent() @@ -155,17 +176,17 @@ fn get_args() -> Args { }; let bump_version = arg_matches - .value_of(BUMP_ARG_NAME) - .map(|value| match value { + .get_one::<&str>(BUMP_ARG_NAME) + .map(|value| match *value { MAJOR => BumpVersion::Major, MINOR => BumpVersion::Minor, PATCH => BumpVersion::Patch, _ => unreachable!(), }); - let dry_run = arg_matches.is_present(DRY_RUN_ARG_NAME); + let dry_run = arg_matches.get_flag(DRY_RUN_ARG_NAME); - let allow_earlier_version = arg_matches.is_present(ALLOW_EARLIER_VERSION_NAME); + let allow_earlier_version = arg_matches.get_flag(ALLOW_EARLIER_VERSION_NAME); Args { root_dir, @@ -176,56 +197,75 @@ fn get_args() -> Args { } fn main() { - let types = Package::cargo("types", &*regex_data::types::DEPENDENT_FILES); - types.update(); - - let execution_engine = Package::cargo( - "execution_engine", - &*regex_data::execution_engine::DEPENDENT_FILES, - ); - execution_engine.update(); - - let node_macros = Package::cargo("node_macros", &*regex_data::node_macros::DEPENDENT_FILES); - node_macros.update(); - - let node = Package::cargo("node", &*regex_data::node::DEPENDENT_FILES); - node.update(); - - let client = Package::cargo("client", &*regex_data::client::DEPENDENT_FILES); - client.update(); - - let smart_contracts_contract = Package::cargo( - "smart_contracts/contract", - &*regex_data::smart_contracts_contract::DEPENDENT_FILES, - ); - smart_contracts_contract.update(); - - let smart_contracts_contract_as = Package::assembly_script( - "smart_contracts/contract_as", - &*regex_data::smart_contracts_contract_as::DEPENDENT_FILES, - ); - smart_contracts_contract_as.update(); - - let execution_engine_testing_test_support = Package::cargo( - "execution_engine_testing/test_support", - &*regex_data::execution_engine_testing_test_support::DEPENDENT_FILES, - ); - execution_engine_testing_test_support.update(); - - let execution_engine_testing_cargo_casper = Package::cargo( - "execution_engine_testing/cargo_casper", - &*regex_data::execution_engine_testing_cargo_casper::DEPENDENT_FILES, - ); - execution_engine_testing_cargo_casper.update(); + let rust_packages = [ + Package::cargo("types", ®ex_data::types::DEPENDENT_FILES), + Package::cargo("binary_port", ®ex_data::binary_port::DEPENDENT_FILES), + Package::cargo("storage", ®ex_data::storage::DEPENDENT_FILES), + Package::cargo( + "execution_engine", + ®ex_data::execution_engine::DEPENDENT_FILES, + ), + Package::cargo( + "execution_engine_testing/test_support", + ®ex_data::execution_engine_testing_test_support::DEPENDENT_FILES, + ), + Package::cargo("node", ®ex_data::node::DEPENDENT_FILES), + Package::cargo( + "smart_contracts/contract", + ®ex_data::smart_contracts_contract::DEPENDENT_FILES, + ), + Package::cargo( + "smart_contracts/sdk_sys", + ®ex_data::smart_contracts_sdk_sys::DEPENDENT_FILES, + ), + Package::cargo( + "smart_contracts/sdk", + ®ex_data::smart_contracts_sdk::DEPENDENT_FILES, + ), + Package::cargo( + "smart_contracts/sdk_codegen", + ®ex_data::smart_contracts_sdk_codegen::DEPENDENT_FILES, + ), + Package::cargo( + "smart_contracts/macros", + ®ex_data::smart_contracts_macros::DEPENDENT_FILES, + ), + Package::cargo( + "executor/wasm_common", + ®ex_data::executor_wasm_common::DEPENDENT_FILES, + ), + Package::cargo( + "executor/wasm_interface", + ®ex_data::executor_wasm_interface::DEPENDENT_FILES, + ), + Package::cargo( + "executor/wasm_host", + ®ex_data::executor_wasm_host::DEPENDENT_FILES, + ), + Package::cargo( + "executor/wasmer_backend", + ®ex_data::executor_wasmer_backend::DEPENDENT_FILES, + ), + Package::cargo("executor/wasm", ®ex_data::executor_wasm::DEPENDENT_FILES), + ]; + + for rust_package in &rust_packages { + rust_package.update() + } // Update Cargo.lock if this isn't a dry run. if !is_dry_run() { - let status = Command::new(env!("CARGO")) - .arg("generate-lockfile") - .arg("--offline") + let mut command = Command::new(env!("CARGO")); + let _ = command .current_dir(root_dir()) + .arg("update") + .arg("--offline"); + for rust_package in &rust_packages { + let _ = command.arg("--package").arg(rust_package.name()); + } + let status = command .status() - .expect("Failed to execute 'cargo generate-lockfile'"); + .unwrap_or_else(|error| panic!("Failed to execute '{:?}': {}", command, error)); assert!(status.success(), "Failed to update Cargo.lock"); } } diff --git a/ci/casper_updater/src/package.rs b/ci/casper_updater/src/package.rs index 2dbec0bb0b..b9e277bf48 100644 --- a/ci/casper_updater/src/package.rs +++ b/ci/casper_updater/src/package.rs @@ -1,31 +1,25 @@ use std::{ - convert::TryFrom, io::{self, Write}, path::Path, }; use regex::Regex; +use semver::Version; use crate::{ dependent_file::DependentFile, - regex_data::{ - MANIFEST_NAME_REGEX, MANIFEST_VERSION_REGEX, PACKAGE_JSON_NAME_REGEX, - PACKAGE_JSON_VERSION_REGEX, - }, - BumpVersion, + regex_data::{MANIFEST_NAME_REGEX, MANIFEST_VERSION_REGEX}, }; -use casper_types::SemVer; - const CAPTURE_INDEX: usize = 2; -/// Represents a published CasperLabs crate or AssemblyScript package which may need its version +/// Represents a published Casper crate or AssemblyScript package which may need its version /// updated. pub struct Package { /// This package's name as specified in its manifest. name: String, /// This package's current version as specified in its manifest. - current_version: SemVer, + current_version: Version, /// Files which must be updated if this package's version is changed, including this package's /// own manifest file. The other files will often be from a different package. dependent_files: &'static Vec, @@ -43,25 +37,11 @@ impl PackageConsts for CargoPackage { const MANIFEST: &'static str = "Cargo.toml"; fn name_regex() -> &'static Regex { - &*MANIFEST_NAME_REGEX + &MANIFEST_NAME_REGEX } fn version_regex() -> &'static Regex { - &*MANIFEST_VERSION_REGEX - } -} - -struct AssemblyScriptPackage; - -impl PackageConsts for AssemblyScriptPackage { - const MANIFEST: &'static str = "package.json"; - - fn name_regex() -> &'static Regex { - &*PACKAGE_JSON_NAME_REGEX - } - - fn version_regex() -> &'static Regex { - &*PACKAGE_JSON_VERSION_REGEX + &MANIFEST_VERSION_REGEX } } @@ -74,11 +54,8 @@ impl Package { Self::new::<_, CargoPackage>(relative_path, dependent_files) } - pub fn assembly_script>( - relative_path: P, - dependent_files: &'static Vec, - ) -> Self { - Self::new::<_, AssemblyScriptPackage>(relative_path, dependent_files) + pub fn name(&self) -> &str { + &self.name } fn new, T: PackageConsts>( @@ -97,10 +74,11 @@ impl Package { relative_path.as_ref().display() ) }); + let contents = manifest.contents(); let find_value = |regex: &Regex| { regex - .captures(manifest.contents()) + .captures(&contents) .unwrap_or_else(|| { panic!( "should find package name and version in {}", @@ -121,7 +99,7 @@ impl Package { let name = find_value(T::name_regex()); let version = find_value(T::version_regex()); - let current_version = SemVer::try_from(&*version).expect("should parse current version"); + let current_version = Version::parse(&version).expect("should parse current version"); Package { name, @@ -137,27 +115,23 @@ impl Package { self.name, self.current_version ); if let Some(bump_version) = crate::bump_version() { - let updated_version = self.get_updated_version_from_bump(bump_version); + let updated_version = bump_version.update(&self.current_version); println!("Will be updated to {}", updated_version); } println!("Files affected by this package's version:"); for dependent_file in self.dependent_files { - let relative_path = dependent_file - .path() - .strip_prefix(crate::root_dir()) - .expect("should strip prefix"); - println!("\t* {}", relative_path.display()); + println!("\t* {}", dependent_file.relative_path().display()); } println!(); return; } let updated_version = match crate::bump_version() { - None => match self.get_updated_version_from_user() { + None => match get_updated_version_from_user(&self.name, &self.current_version) { Some(version) => version, None => return, }, - Some(bump_version) => self.get_updated_version_from_bump(bump_version), + Some(bump_version) => bump_version.update(&self.current_version), }; for dependent_file in self.dependent_files { @@ -169,66 +143,50 @@ impl Package { self.name, self.current_version, updated_version ); } +} - fn get_updated_version_from_bump(&self, bump_version: BumpVersion) -> SemVer { - match bump_version { - BumpVersion::Major => SemVer::new(self.current_version.major + 1, 0, 0), - BumpVersion::Minor => SemVer::new( - self.current_version.major, - self.current_version.minor + 1, - 0, - ), - BumpVersion::Patch => SemVer::new( - self.current_version.major, - self.current_version.minor, - self.current_version.patch + 1, - ), - } - } - - fn get_updated_version_from_user(&self) -> Option { - loop { - print!( - "Current version of {} is {}. Enter new version (leave blank for unchanged): ", - self.name, self.current_version - ); - io::stdout().flush().expect("should flush stdout"); - let mut input = String::new(); - match io::stdin().read_line(&mut input) { - Ok(_) => { - input = input.trim_end().to_string(); - if input.is_empty() { - return None; - } +pub fn get_updated_version_from_user(name: &str, current_version: &Version) -> Option { + loop { + print!( + "Current {} version is {}. Enter new version (leave blank for unchanged): ", + name, current_version + ); + io::stdout().flush().expect("should flush stdout"); + let mut input = String::new(); + match io::stdin().read_line(&mut input) { + Ok(_) => { + input = input.trim_end().to_string(); + if input.is_empty() { + return None; + } - let new_version = match SemVer::try_from(&*input) { - Ok(version) => version, - Err(error) => { - println!("\n{} is not a valid version: {}.", input, error); - continue; - } - }; - - if new_version < self.current_version { - println!( - "Updated version ({}) is lower than current version ({})", - new_version, self.current_version - ); - if crate::allow_earlier_version() { - println!("Allowing earlier version due to flag.") - } else { - continue; - } + let new_version = match Version::parse(&input) { + Ok(version) => version, + Err(error) => { + println!("\n{} is not a valid version: {}.", input, error); + continue; } - - return if new_version == self.current_version { - None + }; + + if new_version < *current_version { + println!( + "Updated version ({}) is lower than current version ({})", + new_version, current_version + ); + if crate::allow_earlier_version() { + println!("Allowing earlier version due to flag.") } else { - Some(new_version) - }; + continue; + } } - Err(error) => println!("\nFailed to read from stdin: {}.", error), + + return if new_version == *current_version { + None + } else { + Some(new_version) + }; } + Err(error) => println!("\nFailed to read from stdin: {}.", error), } } } diff --git a/ci/casper_updater/src/regex_data.rs b/ci/casper_updater/src/regex_data.rs index 8bcba39c0b..c64d882256 100644 --- a/ci/casper_updater/src/regex_data.rs +++ b/ci/casper_updater/src/regex_data.rs @@ -9,10 +9,6 @@ pub static MANIFEST_NAME_REGEX: Lazy = Lazy::new(|| Regex::new(r#"(?m)(^name = )"([^"]+)"#).unwrap()); pub static MANIFEST_VERSION_REGEX: Lazy = Lazy::new(|| Regex::new(r#"(?m)(^version = )"([^"]+)"#).unwrap()); -pub static PACKAGE_JSON_NAME_REGEX: Lazy = - Lazy::new(|| Regex::new(r#"(?m)(^ "name": )"([^"]+)"#).unwrap()); -pub static PACKAGE_JSON_VERSION_REGEX: Lazy = - Lazy::new(|| Regex::new(r#"(?m)(^ "version": )"([^"]+)"#).unwrap()); fn replacement(updated_version: &str) -> String { format!(r#"$1"{}"#, updated_version) @@ -22,132 +18,224 @@ fn replacement_with_slash(updated_version: &str) -> String { format!(r#"$1/{}"#, updated_version) } +pub static TYPES_VERSION_REGEX: Lazy = + Lazy::new(|| Regex::new(r#"(?m)(^casper-types = \{[^\}]*version = )"(?:[^"]+)"#).unwrap()); + +pub mod binary_port { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ + DependentFile::new( + "binary_port/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "node/Cargo.toml", + Regex::new(r#"(?m)(^casper-binary-port = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + replacement, + ), + ] + }); +} + pub mod types { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![ DependentFile::new( - "client/Cargo.toml", - Regex::new(r#"(?m)(^casper-types = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + "types/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "execution_engine/Cargo.toml", - Regex::new(r#"(?m)(^casper-types = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + "types/src/lib.rs", + Regex::new( + r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-types)/(?:[^"]+)"#, + ) + .unwrap(), + replacement_with_slash, + ), + DependentFile::new( + "binary_port/Cargo.toml", + TYPES_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "execution_engine_testing/cargo_casper/src/common.rs", - Regex::new(r#"(?m)("casper-types",\s*)"(?:[^"]+)"#).unwrap(), + "storage/Cargo.toml", + TYPES_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "execution_engine_testing/test_support/Cargo.toml", - Regex::new(r#"(?m)(^casper-types = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + "execution_engine/Cargo.toml", + TYPES_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "node/Cargo.toml", - Regex::new(r#"(?m)(^casper-types = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + "execution_engine_testing/test_support/Cargo.toml", + TYPES_VERSION_REGEX.clone(), replacement, ), + DependentFile::new("node/Cargo.toml", TYPES_VERSION_REGEX.clone(), replacement), DependentFile::new( "smart_contracts/contract/Cargo.toml", - Regex::new(r#"(?m)(^casper-types = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + TYPES_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "types/Cargo.toml", + "executor/wasm/Cargo.toml", + TYPES_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_host/Cargo.toml", + TYPES_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_interface/Cargo.toml", + TYPES_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasmer_backend/Cargo.toml", + TYPES_VERSION_REGEX.clone(), + replacement, + ), + ] + }); +} + +pub static STORAGE_VERSION_REGEX: Lazy = + Lazy::new(|| Regex::new(r#"(?m)(^casper-storage = \{[^\}]*version = )"(?:[^"]+)"#).unwrap()); +pub mod storage { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ + DependentFile::new( + "storage/Cargo.toml", MANIFEST_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "types/src/lib.rs", + "storage/src/lib.rs", Regex::new( - r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-types)/(?:[^"]+)"#, + r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-storage)/(?:[^"]+)"#, ) .unwrap(), replacement_with_slash, ), + DependentFile::new( + "execution_engine/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "execution_engine_testing/test_support/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "node/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_host/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_interface/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasmer_backend/Cargo.toml", + STORAGE_VERSION_REGEX.clone(), + replacement, + ), ] }); } +pub static EXECUTION_ENGINE_VERSION_REGEX: Lazy = Lazy::new(|| { + Regex::new(r#"(?m)(^casper-execution-engine = \{[^\}]*version = )"(?:[^"]+)"#).unwrap() +}); pub mod execution_engine { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![ - DependentFile::new( - "client/Cargo.toml", - Regex::new(r#"(?m)(^casper-execution-engine = \{[^\}]*version = )"(?:[^"]+)"#) - .unwrap(), - replacement, - ), + DependentFile::new( + "execution_engine/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "execution_engine/src/lib.rs", + Regex::new(r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-execution-engine)/(?:[^"]+)"#).unwrap(), + replacement_with_slash, + ), DependentFile::new( "execution_engine_testing/test_support/Cargo.toml", - Regex::new(r#"(?m)(^casper-execution-engine = \{[^\}]*version = )"(?:[^"]+)"#) - .unwrap(), + EXECUTION_ENGINE_VERSION_REGEX.clone(), replacement, ), DependentFile::new( "node/Cargo.toml", - Regex::new(r#"(?m)(^casper-execution-engine = \{[^\}]*version = )"(?:[^"]+)"#) - .unwrap(), - replacement, - ), - DependentFile::new( - "execution_engine/Cargo.toml", - MANIFEST_VERSION_REGEX.clone(), + EXECUTION_ENGINE_VERSION_REGEX.clone(), replacement, ), - DependentFile::new( - "execution_engine/src/lib.rs", - Regex::new(r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-execution-engine)/(?:[^"]+)"#).unwrap(), - replacement_with_slash, - ), + DependentFile::new( + "executor/wasm/Cargo.toml", + EXECUTION_ENGINE_VERSION_REGEX.clone(), + replacement, + ), ] }); } -pub mod node_macros { +pub mod execution_engine_testing_test_support { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![ - DependentFile::new( - "node/Cargo.toml", - Regex::new(r#"(?m)(^casper-node-macros = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), - replacement, - ), - DependentFile::new( - "node_macros/Cargo.toml", - MANIFEST_VERSION_REGEX.clone(), - replacement, - ), - DependentFile::new( - "node_macros/src/lib.rs", - Regex::new( - r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-node-macros)/(?:[^"]+)"#, - ) - .unwrap(), - replacement_with_slash, - ), - ] + DependentFile::new( + "execution_engine_testing/test_support/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "execution_engine_testing/test_support/src/lib.rs", + Regex::new(r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-engine-test-support)/(?:[^"]+)"#).unwrap(), + replacement_with_slash, + ), + ] }); } pub mod node { use super::*; + pub static CHAINSPEC_REGEX: Lazy = + Lazy::new(|| Regex::new(r#"(?m)(^version = )'([^']+)"#).unwrap()); + + fn chainspec_toml_replacement(updated_version: &str) -> String { + format!(r#"$1'{}"#, updated_version) + } + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ - DependentFile::new( - "client/Cargo.toml", - Regex::new(r#"(?m)(^casper-node = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), - replacement, - ), DependentFile::new( "node/Cargo.toml", MANIFEST_VERSION_REGEX.clone(), @@ -161,109 +249,294 @@ pub mod node { .unwrap(), replacement_with_slash, ), + DependentFile::new( + "resources/local/chainspec.toml.in", + CHAINSPEC_REGEX.clone(), + chainspec_toml_replacement, + ), + DependentFile::new( + "resources/production/chainspec.toml", + CHAINSPEC_REGEX.clone(), + chainspec_toml_replacement, + ), + DependentFile::new( + "resources/mainnet/chainspec.toml", + CHAINSPEC_REGEX.clone(), + chainspec_toml_replacement, + ), + DependentFile::new( + "resources/testnet/chainspec.toml", + CHAINSPEC_REGEX.clone(), + chainspec_toml_replacement, + ), + DependentFile::new( + "resources/integration-test/chainspec.toml", + CHAINSPEC_REGEX.clone(), + chainspec_toml_replacement, + ), + DependentFile::new( + "resources/dev-net/chainspec.toml", + CHAINSPEC_REGEX.clone(), + chainspec_toml_replacement, + ), ] }); } -pub mod client { +pub mod smart_contracts_contract { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ + DependentFile::new( + "smart_contracts/contract/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "smart_contracts/contract/src/lib.rs", + Regex::new( + r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-contract)/(?:[^"]+)"#, + ) + .unwrap(), + replacement_with_slash, + ), + ] + }); +} + +pub static SMART_CONTRACTS_SDK_SYS_VERSION_REGEX: Lazy = Lazy::new(|| { + Regex::new(r#"(?m)(^casper-contract-sdk-sys = \{[^\}]*version = )"(?:[^"]+)"#).unwrap() +}); + +pub mod smart_contracts_sdk_sys { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ + DependentFile::new( + "smart_contracts/sdk_sys/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_common/Cargo.toml", + SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasmer_backend/Cargo.toml", + SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "smart_contracts/macros/Cargo.toml", + SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "smart_contracts/sdk/Cargo.toml", + SMART_CONTRACTS_SDK_SYS_VERSION_REGEX.clone(), + replacement, + ), + ] + }); +} + +pub mod smart_contracts_sdk { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ + DependentFile::new( + "smart_contracts/sdk/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "smart_contracts/sdk_codegen/Cargo.toml", + Regex::new(r#"(?m)(^casper-contract-sdk = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + replacement, + ), + ] + }); +} + +pub mod smart_contracts_sdk_codegen { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![DependentFile::new( - "client/Cargo.toml", + "smart_contracts/sdk_codegen/Cargo.toml", MANIFEST_VERSION_REGEX.clone(), replacement, )] }); } - -pub mod smart_contracts_contract { +pub mod smart_contracts_macros { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![ DependentFile::new( - "execution_engine_testing/cargo_casper/src/common.rs", - Regex::new(r#"(?m)("casper-contract",\s*)"(?:[^"]+)"#).unwrap(), + "smart_contracts/macros/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "execution_engine_testing/test_support/Cargo.toml", - Regex::new(r#"(?m)(^casper-contract = \{[^\}]*version = )"(?:[^"]+)"#).unwrap(), + "smart_contracts/sdk/Cargo.toml", + Regex::new(r#"(?m)(^casper-contract-macros = \{[^\}]*version = )"(?:[^"]+)"#) + .unwrap(), replacement, ), + ] + }); +} + +pub static EXECUTOR_WASM_COMMON_VERSION_REGEX: Lazy = Lazy::new(|| { + Regex::new(r#"(?m)(^casper-executor-wasm-common = \{[^\}]*version = )"(?:[^"]+)"#).unwrap() +}); +pub mod executor_wasm_common { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ DependentFile::new( - "smart_contracts/contract/Cargo.toml", + "executor/wasm_common/Cargo.toml", MANIFEST_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "smart_contracts/contract/src/lib.rs", - Regex::new( - r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-contract)/(?:[^"]+)"#, - ) - .unwrap(), - replacement_with_slash, + "executor/wasm/Cargo.toml", + EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_host/Cargo.toml", + EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_interface/Cargo.toml", + EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasmer_backend/Cargo.toml", + EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "smart_contracts/macros/Cargo.toml", + EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "smart_contracts/sdk/Cargo.toml", + EXECUTOR_WASM_COMMON_VERSION_REGEX.clone(), + replacement, ), ] }); } -pub mod smart_contracts_contract_as { +pub static EXECUTOR_WASM_INTERFACE_VERSION_REGEX: Lazy = Lazy::new(|| { + Regex::new(r#"(?m)(^casper-executor-wasm-interface = \{[^\}]*version = )"(?:[^"]+)"#).unwrap() +}); +pub mod executor_wasm_interface { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![ DependentFile::new( - "smart_contracts/contract_as/package.json", - PACKAGE_JSON_VERSION_REGEX.clone(), + "executor/wasm_interface/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), replacement, ), DependentFile::new( - "smart_contracts/contract_as/package-lock.json", - PACKAGE_JSON_VERSION_REGEX.clone(), + "executor/wasm/Cargo.toml", + EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm_host/Cargo.toml", + EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasmer_backend/Cargo.toml", + EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "node/Cargo.toml", + EXECUTOR_WASM_INTERFACE_VERSION_REGEX.clone(), replacement, ), ] }); } -pub mod execution_engine_testing_test_support { +pub static EXECUTOR_WASM_HOST_VERSION_REGEX: Lazy = Lazy::new(|| { + Regex::new(r#"(?m)(^casper-executor-wasm-host = \{[^\}]*version = )"(?:[^"]+)"#).unwrap() +}); +pub mod executor_wasm_host { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { vec![ - DependentFile::new( - "execution_engine_testing/cargo_casper/src/tests_package.rs", - Regex::new(r#"(?m)("casper-engine-test-support",\s*)"(?:[^"]+)"#).unwrap(), - cargo_casper_src_test_package_rs_replacement, - ), - DependentFile::new( - "execution_engine_testing/test_support/Cargo.toml", - MANIFEST_VERSION_REGEX.clone(), - replacement, - ), - DependentFile::new( - "execution_engine_testing/test_support/src/lib.rs", - Regex::new(r#"(?m)(#!\[doc\(html_root_url = "https://docs.rs/casper-engine-test-support)/(?:[^"]+)"#).unwrap(), - replacement_with_slash, - ), - ] + DependentFile::new( + "executor/wasm_host/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm/Cargo.toml", + EXECUTOR_WASM_HOST_VERSION_REGEX.clone(), + replacement, + ), + ] }); +} - fn cargo_casper_src_test_package_rs_replacement(updated_version: &str) -> String { - format!(r#"$1"{}"#, updated_version) - } +pub static EXECUTOR_WASMER_BACKEND_VERSION_REGEX: Lazy = Lazy::new(|| { + Regex::new(r#"(?m)(^casper-executor-wasmer-backend = \{[^\}]*version = )"(?:[^"]+)"#).unwrap() +}); +pub mod executor_wasmer_backend { + use super::*; + + pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { + vec![ + DependentFile::new( + "executor/wasmer_backend/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "executor/wasm/Cargo.toml", + EXECUTOR_WASMER_BACKEND_VERSION_REGEX.clone(), + replacement, + ), + ] + }); } -pub mod execution_engine_testing_cargo_casper { +pub mod executor_wasm { use super::*; pub static DEPENDENT_FILES: Lazy> = Lazy::new(|| { - vec![DependentFile::new( - "execution_engine_testing/cargo_casper/Cargo.toml", - MANIFEST_VERSION_REGEX.clone(), - replacement, - )] + vec![ + DependentFile::new( + "executor/wasm/Cargo.toml", + MANIFEST_VERSION_REGEX.clone(), + replacement, + ), + DependentFile::new( + "node/Cargo.toml", + Regex::new(r#"(?m)(^casper-executor-wasm = \{[^\}]*version = )"(?:[^"]+)"#) + .unwrap(), + replacement, + ), + ] }); } diff --git a/ci/check_cpu_features.sh b/ci/check_cpu_features.sh new file mode 100755 index 0000000000..3c18c08ef1 --- /dev/null +++ b/ci/check_cpu_features.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# Ensure that there has not been a change in CPU features used. + +set -e + +cd $(dirname $0)/.. + +cargo build --release --bin casper-node +utils/dump-cpu-features.sh target/release/casper-node > current-build-cpu-features.txt +if [[ $(comm -23 current-build-cpu-features.txt ci/cpu-features-1.4.13-release.txt) ]]; then + exit 1 +fi +echo "Check passed, instruction set extensions in node binary have not been changed since 1.4.13" diff --git a/ci/ci.json b/ci/ci.json new file mode 100644 index 0000000000..f39e71916c --- /dev/null +++ b/ci/ci.json @@ -0,0 +1,23 @@ +{ + "external_deps": { + "casper-client-rs": { + "github_repo_url": "https://github.com/casper-ecosystem/casper-client-rs.git", + "branch": "dev" + }, + "casper-node-launcher": { + "github_repo_url": "https://github.com/casper-network/casper-node-launcher.git", + "branch": "main" + }, + "casper-sidecar": { + "github_repo_url": "https://github.com/casper-network/casper-sidecar", + "branch": "dev" + }, + "casper-nctl": { + "github_repo_url": "https://github.com/casper-network/casper-nctl", + "branch": "dev" + } + }, + "nctl_upgrade_tests": { + "protocol_1": "1.5.6" + } +} \ No newline at end of file diff --git a/ci/cpu-features-1.4.13-release.txt b/ci/cpu-features-1.4.13-release.txt new file mode 100644 index 0000000000..30db8c7f3b --- /dev/null +++ b/ci/cpu-features-1.4.13-release.txt @@ -0,0 +1,13 @@ +AVX +AVX2 +BMI +CMOV +MODE64 +NOVLX +PCLMUL +SHA +SSE1 +SSE2 +SSE3 +SSE41 +SSSE3 diff --git a/ci/dev_net_protocol_generate.sh b/ci/dev_net_protocol_generate.sh new file mode 100755 index 0000000000..e4eaf0c855 --- /dev/null +++ b/ci/dev_net_protocol_generate.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash + +# Need IP for +NODE_RPC_URL="https://node-1.dev.casper.network/rpc" + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" +CI_SCRIPT_DIR="$ROOT_DIR/ci" +TARGET_DIR="$ROOT_DIR/target" +GENESIS_DIR="$TARGET_DIR/genesis" +CONFIG_DIR="$TARGET_DIR/config" + +CURRENT_HASH=128ff6fb9472f0abab3d4d8392c23f378a8552e8 +echo "Checked out Github hash $CURRENT_HASH" + +LATEST_HASH=$(curl -s https://genesis.casper.network/dev-net/latest_git_hash | tr -d '\n') +echo "Latest Hash from dev-net protocol is $LATEST_HASH" + +echo + +if [ "$CURRENT_HASH" == "$LATEST_HASH" ]; then + echo "Last published dev-net protocol has same hash, erroring out." + exit 1 # This fails job and stops workflow +fi + +LATEST_PROTOCOL_VERSION="$(curl -s https://genesis.casper.network/dev-net/protocol_versions | tail -n 1 | tr -d '\n')" +echo "Latest dev-net protocol version: $LATEST_PROTOCOL_VERSION" + +IFS="_" +# Read latest protocol parts into array +read -ra LPVA <<< "$LATEST_PROTOCOL_VERSION" + +# Incrementing one to patch +NEW_PROTOCOL_VERSION=${LPVA[0]}_${LPVA[1]}_$((${LPVA[2]} + 1)) +echo "New dev-net protocol version: $NEW_PROTOCOL_VERSION" +echo + +PROTOCOL_DIR="$GENESIS_DIR/$NEW_PROTOCOL_VERSION" +echo "## Creating $PROTOCOL_DIR" +mkdir -p "$PROTOCOL_DIR" +echo + +echo $NEW_PROTOCOL_VERSION > "$GENESIS_DIR/protocol_versions" +echo "## protocol_versions file contents:" +echo "---" +cat "$GENESIS_DIR/protocol_versions" +echo "---" +echo + +echo $CURRENT_HASH > "$GENESIS_DIR/latest_git_hash" +echo "## latest_git_hash file contents:" +echo "---" +cat "$GENESIS_DIR/latest_git_hash" +echo "---" +echo + +mkdir -p "$CONFIG_DIR" +cd "$TARGET_DIR" || exit 1 +echo "## Downloading: https://genesis.casper.network/artifacts/casper-node/$LATEST_HASH/bin.tar.gz" +curl -JLO "https://genesis.casper.network/artifacts/casper-node/$LATEST_HASH/bin.tar.gz" || exit 1 + +echo "## Downloading: https://genesis.casper.network/artifacts/casper-node/$LATEST_HASH/config-dev.tar.gz" +curl -JLO "https://genesis.casper.network/artifacts/casper-node/$LATEST_HASH/config-dev.tar.gz" || exit 1 + +pwd +ls -alr + +cd "$CONFIG_DIR" || exit 1 +# This will validate that files retrieved were good. Should error if just curl output + +echo "## Decompressing config" +tar -xzvf ../config-dev.tar.gz . || exit 1 + +ACTIVATION_POINT=$("$CI_SCRIPT_DIR/next_upgrade_era_with_buffer.sh" "$NODE_RPC_URL" 15) + +echo "## Replacing activation_point in chainspec.toml with $ACTIVATION_POINT" +# chainspec.toml replacement +sed -i '/^activation_point = /c\activation_point = '"$ACTIVATION_POINT" chainspec.toml + +# config_example.toml replacement +echo "## Retrieving statuses to make known_addresses" +KNOWN_ADDRESSES="[$( (curl -s https://node-1.dev.casper.network/status | jq -r '.peers[] | .address'; + curl -s https://node-2.dev.casper.network/status | jq -r '.peers[] | .address'; + curl -s https://node-3.dev.casper.network/status | jq -r '.peers[] | .address'; + curl -s https://node-4.dev.casper.network/status | jq -r '.peers[] | .address';) | + sort | uniq | xargs -d '\n' printf "'%s'," | sed 's/, $//' )]" +echo "## Replacing known_addresses in config-example.toml with $KNOWN_ADDRESSES" +sed -i '/^known_addresses = /c\known_addresses = '"$KNOWN_ADDRESSES" config-example.toml + +CS_PROTOCOL=$(echo -n "$NEW_PROTOCOL_VERSION" | tr '_' '.') +echo "## Replacing protocol.version with $CS_PROTOCOL" +sed -i '/^version = /c\version = '\'"$CS_PROTOCOL"\' chainspec.toml + +echo "## Compressing new config.tar.gz" +tar -czvf ../config.tar.gz . + +cd .. +pwd + +echo "## Moving files bin and config into $PROTOCOL_DIR" +mv config.tar.gz "$PROTOCOL_DIR/" +mv bin.tar.gz "$PROTOCOL_DIR/" + +echo "## Listing contents of $GENESIS_DIR" +ls -alrR "$GENESIS_DIR" diff --git a/ci/drone_s3_storage.sh b/ci/drone_s3_storage.sh deleted file mode 100755 index b23cb8d742..0000000000 --- a/ci/drone_s3_storage.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -set -ex - -# This script allows uploading, downloading and purging of files to s3 for sharing between drone pipelines. - -# Making unique string for temp folder name in S3 -# Adding DRONE_REPO to DRONE_BUILD_NUMBER, because build is only unique per repo. -# replacing the / in DRONE_REPO name with _ to not be path in S3 -DRONE_UNIQUE="${DRONE_BUILD_NUMBER}_${DRONE_REPO/\//_}" - -valid_commands=("put" "get" "del") -ACTION=$1 -if [[ " ${valid_commands[*]} " != *" $ACTION "* ]]; then - echo "Invalid command passed: $ACTION" - echo "Possible commands are:" - echo " put " - echo " get " - echo " del " - exit 1 -fi - -if [[ "$ACTION" != "del" ]]; then - SOURCE=$2 - TARGET=$3 - - if [ -z "$SOURCE" ]; then - echo "Source not provided" - exit 1 - fi - - if [ -z "$TARGET" ]; then - echo "Target not provided" - exit 1 - fi -fi - -CL_S3_BUCKET="casperlabs-cicd-artifacts" -CL_S3_LOCATION="drone_temp/${DRONE_UNIQUE}" - -echo "CL_VAULT_TOKEN: '${CL_VAULT_TOKEN}'" -echo "CL_VAULT_HOST: '${CL_VAULT_HOST}'" -# get aws credentials files -CL_VAULT_URL="${CL_VAULT_HOST}/v1/sre/cicd/s3/aws_credentials" -CREDENTIALS=$(curl -s -q -H "X-Vault-Token: $CL_VAULT_TOKEN" -X GET "$CL_VAULT_URL") -# get just the body required by s3cmd, strip off vault payload -AWS_ACCESS_KEY_ID=$(echo "$CREDENTIALS" | jq -r .data.cicd_agent_to_s3.aws_access_key) -export AWS_ACCESS_KEY_ID -AWS_SECRET_ACCESS_KEY=$(echo "$CREDENTIALS" | jq -r .data.cicd_agent_to_s3.aws_secret_key) -export AWS_SECRET_ACCESS_KEY - -case "$ACTION" in - "put") - echo "sync ${SOURCE} s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${TARGET}" - s3cmd sync "${SOURCE}" "s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${TARGET}" - ;; - "get") - echo "sync s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${SOURCE} ${TARGET}" - s3cmd sync "s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}/${SOURCE}" "${TARGET}" - ;; - "del") - echo "del --recursive s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}" - s3cmd del --recursive "s3://${CL_S3_BUCKET}/${CL_S3_LOCATION}" - ;; -esac diff --git a/ci/markdown-link-check-config.json b/ci/markdown-link-check-config.json new file mode 100644 index 0000000000..e64da7ecd1 --- /dev/null +++ b/ci/markdown-link-check-config.json @@ -0,0 +1,19 @@ +{ + "ignorePatterns": [ + { + "pattern": "^http://localhost.*" + } + ], + "httpHeaders": [ + { + "urls": ["https://crates.io"], + "headers": { + "Accept": "text/html" + } + } + ], + "timeout": "10s", + "retryOn429": true, + "retryCount": 5, + "fallbackRetryDelay": "2s" +} diff --git a/ci/markdown_link_check.sh b/ci/markdown_link_check.sh new file mode 100755 index 0000000000..7df82891d2 --- /dev/null +++ b/ci/markdown_link_check.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" +CONFIG_PATH="$ROOT_DIR/ci/markdown-link-check-config.json" +pushd "$ROOT_DIR" + +FILES=($(find . -name "*.md" -not -path ".*/node_modules/*")) + +for file in "${FILES[@]}"; do + markdown-link-check -v -r -p -c "$CONFIG_PATH" "$file" +done +popd diff --git a/ci/mins_to_switch_block.sh b/ci/mins_to_switch_block.sh new file mode 100755 index 0000000000..e05226776d --- /dev/null +++ b/ci/mins_to_switch_block.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +if [ "$#" -ne 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +if ! command -v "casper-client" &> /dev/null ; then + echo "casper-client is not installed and required. Exiting..." + exit 1 +fi + +if [[ $1 == http* ]]; then + # Starts with http, so assume good full url + NODE_ADDRESS="--node-address $1" +else + NODE_ADDRESS="--node-address http://$NODE_IP:7777" +fi + +LAST_SWITCH_BLOCK=$(casper-client get-era-summary $NODE_ADDRESS | jq -r .result.era_summary.block_hash | tr -d "/n") + +# Getting Timestamp and Era with one call using `@` delimiter +SB_TIMESTAMP_AND_ERA=$(casper-client get-block -b $LAST_SWITCH_BLOCK $NODE_ADDRESS | jq -r '.result.block_with_signatures.block.Version2.header | [.timestamp,.era_id] | join("@")' | tr -d "/n") + +# Parsing this back into seperate variables +IFS=@ read -r SB_TIMESTAMP LAST_ERA_ID <<< "$SB_TIMESTAMP_AND_ERA" + +# Converting timestamp into Unix second based Epoch +SB_EPOCH=$(date -d "$SB_TIMESTAMP" +%s) +NOW_EPOCH=$(date +%s) + +# Assuming Era length 120 minutes, doing math till next Era +MINS_TILL_SB=$(( 120 - ((NOW_EPOCH - $SB_EPOCH) / 60) )) +NEXT_ERA=$(( LAST_ERA_ID + 1 )) +echo "$MINS_TILL_SB mins till era $NEXT_ERA" diff --git a/ci/nctl_compile.sh b/ci/nctl_compile.sh new file mode 100755 index 0000000000..3848851814 --- /dev/null +++ b/ci/nctl_compile.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -e +shopt -s expand_aliases + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" +JSON_CONFIG_FILE="$ROOT_DIR/ci/ci.json" +JSON_KEYS=($(jq -r '.external_deps | keys[]' "$JSON_CONFIG_FILE")) + +function clone_external_repo() { + local NAME=${1} + local JSON_FILE=${2} + local URL + local BRANCH + local CLONE_REPO_PATH + + CLONE_REPO_PATH="$ROOT_DIR/../$NAME" + URL=$(jq -r ".external_deps.\"${NAME}\".github_repo_url" "$JSON_FILE") + BRANCH=$(jq -r ".external_deps.\"${NAME}\".branch" "$JSON_FILE") + + if [ ! -d "$CLONE_REPO_PATH" ]; then + echo "... cloning $NAME: branch=$BRANCH" + git clone -b "$BRANCH" "$URL" "$CLONE_REPO_PATH" + else + echo "skipping clone of $NAME: directory already exists." + fi +} + +# Clone external dependencies +for i in "${JSON_KEYS[@]}"; do + clone_external_repo "$i" "$JSON_CONFIG_FILE" +done + +NCTL_HOME="$ROOT_DIR/../casper-nctl" +NCTL_CASPER_HOME="$ROOT_DIR" + +if [ ! -d "$NCTL_HOME" ]; then + echo "ERROR: nctl was not set up correctly, check ci/ci.json, exiting..." + exit 1 +fi + +# Activate Environment +pushd "$ROOT_DIR" +source "$NCTL_HOME/activate" +popd + +# NCTL Build +nctl-compile +cachepot --show-stats diff --git a/ci/nctl_upgrade.sh b/ci/nctl_upgrade.sh new file mode 100755 index 0000000000..0b25900212 --- /dev/null +++ b/ci/nctl_upgrade.sh @@ -0,0 +1,262 @@ +#!/usr/bin/env bash +set -e +shopt -s expand_aliases + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" +NCTL_HOME="$ROOT_DIR/../casper-nctl" +NCTL_CASPER_HOME="$ROOT_DIR" + +# Activate Environment +pushd "$ROOT_DIR" +source "$NCTL_HOME/activate" + +# Call compile wrapper for client, launcher, and nctl-compile +bash -c "$ROOT_DIR/ci/nctl_compile.sh" + +function main() { + local TEST_ID=${1} + local SKIP_SETUP=${2} + if [ "$SKIP_SETUP" != "true" ]; then + + # NCTL Build + pushd "$ROOT_DIR" + nctl-compile + + # Clear Old Stages + log "removing old remotes and stages" + nctl-stage-teardown + rm -rf $(get_path_to_stages) + rm -rf $(get_path_to_remotes) + + # Stage + get_remotes + stage_remotes + build_from_settings_file + source "$NCTL/sh/staging/set_override_tomls.sh" upgrade_test='true' + fi + + if [ -z "$TEST_ID" ]; then + log "tooling needs to be updated to deal with AddressableEntity after 2.0 upgrade" + log "disabling for now" + # PR CI tests + start_upgrade_scenario_1 +# start_upgrade_scenario_3 +# start_upgrade_scenario_11 + else + start_upgrade_scenario_"$TEST_ID" + fi +} + +# Pulls down remotely staged file +# from s3 bucket to NCTL remotes directory. +function get_remotes() { + local CI_JSON_CONFIG_FILE + local PROTO_1 + + CI_JSON_CONFIG_FILE="$NCTL_CASPER_HOME/ci/ci.json" + PROTO_1=$(jq -r '.nctl_upgrade_tests."protocol_1"' "$CI_JSON_CONFIG_FILE") + nctl-stage-set-remotes "$PROTO_1" +} + +# Sets up settings.sh for CI test. +# If local arg is passed it will skip this step +# and use whats currently in settings.sh +# arg: local is for debug testing only +function stage_remotes() { + local PATH_TO_STAGE + + PATH_TO_STAGE="$(get_path_to_stage 1)" + dev_branch_settings "$PATH_TO_STAGE" +} + +# Generates stage-1 directory for test execution +# Just here for a log message +function build_from_settings_file() { + log "... setting build from settings.sh file" + nctl-stage-build-from-settings +} + +# Produces settings.sh needed for CI testing. +# It will always setup latest RC -> minor incremented by 1. +# i.e: if current RC is 1.2 then dev will be setup as 1.3 +function dev_branch_settings() { + local PATH_TO_STAGE=${1} + local STARTING_VERSION=${2} + local INCREMENT + local RC_VERSION + + pushd "$(get_path_to_remotes)" + RC_VERSION="$(ls --group-directories-first -d */ | sort -r | head -n 1 | tr -d '/')" + + [[ "$RC_VERSION" =~ (.*[^0-9])([0-9])(.)([0-9]+) ]] && INCREMENT="2.0${BASH_REMATCH[3]}${BASH_REMATCH[4]}" + + RC_VERSION=$(echo "$RC_VERSION" | sed 's/\./\_/g') + INCREMENT=$(echo "$INCREMENT" | sed 's/\./\_/g') + + # check if a version to start at was given + if [ ! -z $STARTING_VERSION ]; then + # overwrite start version + RC_VERSION=$(echo "$STARTING_VERSION" | sed 's/\./\_/g') + fi + + mkdir -p "$(get_path_to_stage '1')" + + cat < "$(get_path_to_stage_settings 1)" +export NCTL_STAGE_SHORT_NAME="YOUR-SHORT-NAME" + +export NCTL_STAGE_DESCRIPTION="YOUR-DESCRIPTION" + +export NCTL_STAGE_TARGETS=( + "${RC_VERSION}:remote" + "${INCREMENT}:local" +) +EOF + cat "$(get_path_to_stage_settings 1)" + popd +} + +# Kicks off the scenario +# Just here for a log message +function start_upgrade_scenario_1() { + log "... Starting Upgrade Scenario 1" + nctl-exec-upgrade-scenario-1 +} + +function start_upgrade_scenario_3() { + log "... Starting Upgrade Scenario 3" + nctl-exec-upgrade-scenario-3 +} + +function start_upgrade_scenario_4() { + log "... Starting Upgrade Scenario 4" + nctl-exec-upgrade-scenario-4 +} + +function start_upgrade_scenario_5() { + log "... Starting Upgrade Scenario 5" + nctl-exec-upgrade-scenario-5 +} + +function start_upgrade_scenario_6() { + log "... Starting Upgrade Scenario 6" + nctl-exec-upgrade-scenario-6 +} + +function start_upgrade_scenario_7() { + log "... Starting Upgrade Scenario 7" + nctl-exec-upgrade-scenario-7 +} + +function start_upgrade_scenario_8() { + log "... Starting Upgrade Scenario 8" + nctl-exec-upgrade-scenario-8 +} + +function start_upgrade_scenario_9() { + log "... Starting Upgrade Scenario 9" + nctl-exec-upgrade-scenario-9 +} + +function start_upgrade_scenario_10() { + log "... Setting up custom starting version" + local PATH_TO_STAGE + + PATH_TO_STAGE="$(get_path_to_stage 1)" + + log "... downloading remote for 1.4.5" + nctl-stage-set-remotes "1.4.5" + + log "... tearing down old stages" + nctl-stage-teardown + + log "... creating new stage" + dev_branch_settings "$PATH_TO_STAGE" "1.4.5" + build_from_settings_file + + log "... Starting Upgrade Scenario 10" + nctl-exec-upgrade-scenario-10 +} + +function start_upgrade_scenario_11() { + log "... Starting Upgrade Scenario 11" + nctl-exec-upgrade-scenario-11 +} + +function start_upgrade_scenario_12() { + log "... Setting up custom starting version" + local PATH_TO_STAGE + + PATH_TO_STAGE="$(get_path_to_stage 1)" + + log "... downloading remote for 1.3.0" + nctl-stage-set-remotes "1.3.0" + + log "... tearing down old stages" + nctl-stage-teardown + + log "... creating new stage" + dev_branch_settings "$PATH_TO_STAGE" "1.3.0" + build_from_settings_file + + log "... Starting Upgrade Scenario 12" + nctl-exec-upgrade-scenario-12 +} + +function start_upgrade_scenario_13() { + log "... Setting up custom starting version" + local PATH_TO_STAGE + + PATH_TO_STAGE="$(get_path_to_stage 1)" + + log "... downloading remote for 1.4.13" + nctl-stage-set-remotes "1.4.13" + + log "... tearing down old stages" + nctl-stage-teardown + + log "... creating new stage" + dev_branch_settings "$PATH_TO_STAGE" "1.4.13" + build_from_settings_file + + log "... Starting Upgrade Scenario 13" + nctl-exec-upgrade-scenario-13 +} + +function start_upgrade_scenario_14() { + log "... Setting up custom starting version" + local PATH_TO_STAGE + + PATH_TO_STAGE="$(get_path_to_stage 1)" + + log "... downloading remote for 1.4.13" + nctl-stage-set-remotes "1.4.13" + + log "... tearing down old stages" + nctl-stage-teardown + + log "... creating new stage" + dev_branch_settings "$PATH_TO_STAGE" "1.4.13" + build_from_settings_file + + log "... Starting Upgrade Scenario 14" + nctl-exec-upgrade-scenario-14 +} + +# ---------------------------------------------------------------- +# ENTRY POINT +# ---------------------------------------------------------------- + +unset TEST_ID +unset SKIP_SETUP + +for ARGUMENT in "$@"; do + KEY=$(echo "$ARGUMENT" | cut -f1 -d=) + VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) + case "$KEY" in + test_id) TEST_ID=${VALUE} ;; + skip_setup) SKIP_SETUP=${VALUE} ;; + *) ;; + esac +done + +main "$TEST_ID" "$SKIP_SETUP" diff --git a/ci/nctl_upgrade_stage.sh b/ci/nctl_upgrade_stage.sh new file mode 100755 index 0000000000..b9c533ea29 --- /dev/null +++ b/ci/nctl_upgrade_stage.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash + +# Script used to group everything needed for nctl upgrade remotes. + +set -e +shopt -s expand_aliases + +trap clean_up EXIT + +function clean_up() { + local EXIT_CODE=$? + + if [ "$EXIT_CODE" = '0' ] && [ ! -z ${DRONE} ]; then + # Running in CI so don't cleanup stage dir + echo "Script completed successfully!" + return + fi + + if [ -d "$TEMP_STAGE_DIR" ]; then + echo "Script exited $EXIT_CODE" + echo "... Removing stage dir: $TEMP_STAGE_DIR" + rm -rf "$TEMP_STAGE_DIR" + exit "$EXIT_CODE" + fi +} + +# DIRECTORIES +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" +BIN_BUILD_DIR="$ROOT_DIR/target/release" +WASM_BUILD_DIR="$ROOT_DIR/target/wasm32-unknown-unknown/release" +CONFIG_DIR="$ROOT_DIR/resources/local" +TEMP_STAGE_DIR='/tmp/nctl_upgrade_stage' + +# FILES +BIN_ARRAY=(casper-node) + +WASM_ARRAY=(add_bid.wasm \ + delegate.wasm \ + transfer_to_account_u512.wasm \ + undelegate.wasm \ + withdraw_bid.wasm) + +CONFIG_ARRAY=(chainspec.toml.in config.toml accounts.toml) + +# Create temporary staging directory +if [ ! -d "$TEMP_STAGE_DIR" ]; then + mkdir -p '/tmp/nctl_upgrade_stage' +fi + +# Ensure files are built +cd "$ROOT_DIR" +cargo build --release --package casper-node +make build-contract-rs/activate-bid +make build-contract-rs/add-bid +make build-contract-rs/delegate +make build-contract-rs/named-purse-payment +make build-contract-rs/transfer-to-account-u512 +make build-contract-rs/undelegate +make build-contract-rs/withdraw-bid + +# Copy binaries to staging dir +for i in "${BIN_ARRAY[@]}"; do + if [ -f "$BIN_BUILD_DIR/$i" ]; then + echo "Copying $BIN_BUILD_DIR/$i to $TEMP_STAGE_DIR" + cp "$BIN_BUILD_DIR/$i" "$TEMP_STAGE_DIR" + else + echo "ERROR: $BIN_BUILD_DIR/$i not found!" + exit 1 + fi + echo "" +done + +# Copy wasm to staging dir +for i in "${WASM_ARRAY[@]}"; do + if [ -f "$WASM_BUILD_DIR/$i" ]; then + echo "Copying $WASM_BUILD_DIR/$i to $TEMP_STAGE_DIR" + cp "$WASM_BUILD_DIR/$i" "$TEMP_STAGE_DIR" + else + echo "ERROR: $WASM_BUILD_DIR/$i not found!" + exit 2 + fi + echo "" +done + +# Copy configs to staging dir +for i in "${CONFIG_ARRAY[@]}"; do + if [ -f "$CONFIG_DIR/$i" ]; then + echo "Copying $CONFIG_DIR/$i to $TEMP_STAGE_DIR" + cp "$CONFIG_DIR/$i" "$TEMP_STAGE_DIR" + else + echo "ERROR: $CONFIG_DIR/$i not found!" + exit 3 + fi + echo "" +done diff --git a/ci/next_upgrade_era_with_buffer.sh b/ci/next_upgrade_era_with_buffer.sh new file mode 100755 index 0000000000..3ed34b8a51 --- /dev/null +++ b/ci/next_upgrade_era_with_buffer.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +if ! command -v "casper-client" &> /dev/null ; then + echo "casper-client is not installed and required. Exiting..." + exit 1 +fi + +if [[ $1 == http* ]]; then + # Starts with http, so assume good full url + NODE_ADDRESS="--node-address $1" +else + NODE_ADDRESS="--node-address http://$NODE_IP:7777" +fi + +BUFFER_MINS=$2 + +LAST_SWITCH_BLOCK=$(casper-client get-era-summary $NODE_ADDRESS | jq -r .result.era_summary.block_hash | tr -d "/n") + +# Getting Timestamp and Era with one call using `@` delimiter +SB_TIMESTAMP_AND_ERA=$(casper-client get-block -b $LAST_SWITCH_BLOCK $NODE_ADDRESS | jq -r '.result.block_with_signatures.block.Version2.header | [.timestamp,.era_id] | join("@")' | tr -d "/n") + +# Parsing this back into seperate variables +IFS=@ read -r SB_TIMESTAMP LAST_ERA_ID <<< "$SB_TIMESTAMP_AND_ERA" + +SB_EPOCH=$(date -d "$SB_TIMESTAMP" +%s) +NOW_EPOCH=$(date +%s) + +MINS_TILL_SB=$(( 120 - ((NOW_EPOCH - $SB_EPOCH) / 60) )) + +if [ "$MINS_TILL_SB" -gt "$BUFFER_MINS" ]; then + NEXT_BUF_ERA=$(( LAST_ERA_ID + 1 )); +else + NEXT_BUF_ERA=$(( LAST_ERA_ID + 2 )); +fi + +echo "$NEXT_BUF_ERA" diff --git a/ci/nightly-test.sh b/ci/nightly-test.sh index 639579f789..dfe3b3f1f4 100755 --- a/ci/nightly-test.sh +++ b/ci/nightly-test.sh @@ -1,68 +1,148 @@ #!/usr/bin/env bash set -e +shopt -s expand_aliases -# Meant to run only in CI -if [ -z "${DRONE}" ]; then - echo "Must be run on Drone!" - exit 1 -fi +DRONE_ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd)" +NCTL_HOME="$DRONE_ROOT_DIR/../casper-nctl" +NCTL_CASPER_HOME="$DRONE_ROOT_DIR" -DRONE_ROOT_DIR="/drone/src" -SCENARIOS_DIR="$DRONE_ROOT_DIR/utils/nctl/sh/scenarios" +SCENARIOS_DIR="$NCTL_HOME/sh/scenarios" SCENARIOS_CHAINSPEC_DIR="$SCENARIOS_DIR/chainspecs" SCENARIOS_ACCOUNTS_DIR="$SCENARIOS_DIR/accounts_toml" -LAUNCHER_DIR="/drone" +SCENARIOS_CONFIGS_DIR="$SCENARIOS_DIR/configs" -# NCTL requires casper-node-launcher -pushd $LAUNCHER_DIR -git clone https://github.com/CasperLabs/casper-node-launcher.git +NCTL_CLIENT_BRANCH="${DRONE_BRANCH:='dev'}" # Activate Environment -pushd $DRONE_ROOT_DIR -source $(pwd)/utils/nctl/activate -# Build, Setup, and Start NCTL -nctl-compile +pushd "$DRONE_ROOT_DIR" +source "$NCTL_HOME/activate" + +# Call compile wrapper for client, launcher, and nctl-compile +bash -c "$DRONE_ROOT_DIR/ci/nctl_compile.sh" function start_run_teardown() { local RUN_CMD=$1 - local RUN_CHAINSPEC=$2 - local RUN_ACCOUNTS=$3 + local TEST_NAME + local STAGE_TOML_DIR + local SETUP_ARGS + local CONFIG_TOML + local CHAINSPEC_TOML + local ACCOUNTS_TOML + + # Capture test prefix for custom file checks + TEST_NAME="$(echo $RUN_CMD | awk -F'.sh' '{ print $1 }')" + STAGE_TOML_DIR="$NCTL/overrides" + CONFIG_TOML="$STAGE_TOML_DIR/$TEST_NAME.config.toml" + CHAINSPEC_TOML="$STAGE_TOML_DIR/$TEST_NAME.chainspec.toml.in" + ACCOUNTS_TOML="$STAGE_TOML_DIR/$TEST_NAME.accounts.toml" + # Really-really make sure nothing is leftover nctl-assets-teardown - echo "Setting up network: $RUN_CMD $RUN_CHAINSPEC $RUN_ACCOUNTS" - if [ -z "$RUN_CHAINSPEC" ] && [ -z "$RUN_ACCOUNTS" ]; then - nctl-assets-setup - elif [ ! -z "$RUN_CHAINSPEC" ] && [ -z "$RUN_ACCOUNTS" ]; then - nctl-assets-setup chainspec_path="$SCENARIOS_CHAINSPEC_DIR/$RUN_CHAINSPEC" - elif [ -z "$RUN_CHAINSPEC" ] && [ ! -z "$RUN_ACCOUNTS" ]; then - nctl-assets-setup accounts_path="$SCENARIOS_ACCOUNTS_DIR/$RUN_ACCOUNTS" - else - nctl-assets-setup chainspec_path="$SCENARIOS_CHAINSPEC_DIR/$RUN_CHAINSPEC" accounts_path="$SCENARIOS_ACCOUNTS_DIR/$RUN_ACCOUNTS" + + # Overrides chainspec.toml + if [ -f "$CHAINSPEC_TOML" ]; then + SETUP_ARGS+=("chainspec_path=$CHAINSPEC_TOML") + fi + + # Overrides accounts.toml + if [ -f "$ACCOUNTS_TOML" ]; then + SETUP_ARGS+=("accounts_path=$ACCOUNTS_TOML") + fi + + # Overrides config.toml + if [ -f "$CONFIG_TOML" ]; then + SETUP_ARGS+=("config_path=$CONFIG_TOML") fi + + # Setup nctl files for test + echo "Setting up network: nctl-assets-setup ${SETUP_ARGS[@]}" + nctl-assets-setup "${SETUP_ARGS[@]}" sleep 1 + + # Start nctl network nctl-start - echo "Sleeping 90 to allow network startup" - sleep 90 - pushd $SCENARIOS_DIR + echo "Sleeping 10s to allow network startup" + sleep 10 + + # Run passed in test + pushd "$SCENARIOS_DIR" + echo "Starting scenario: $RUN_CMD" # Don't qoute the cmd - echo "Starting scenario: $RUN_CMD $RUN_CHAINSPEC $RUN_ACCOUNTS" source $RUN_CMD + + # Cleanup after test completion popd nctl-assets-teardown sleep 1 } -start_run_teardown "itst01.sh" -start_run_teardown "itst02.sh" -start_run_teardown "itst11.sh" -start_run_teardown "itst13.sh" "itst13.chainspec.toml.in" -start_run_teardown "itst14.sh" "itst14.chainspec.toml.in" "itst14.accounts.toml" -start_run_teardown "bond_its.sh" "bond_its.chainspec.toml.in" "bond_its.accounts.toml" -start_run_teardown "sync_test.sh node=6 timeout=500" -# Keep this test last -start_run_teardown "sync_upgrade_test.sh node=6 era=5 timeout=500" - -# Clean up cloned repo -popd -echo "Removing $LAUNCHER_DIR/casper-node-launcher" -rm -rf "$LAUNCHER_DIR/casper-node-launcher" +function run_test_and_count { + CASPER_NCTL_NIGHTLY_TEST_COUNT=$((CASPER_NCTL_NIGHTLY_TEST_COUNT+1)) + eval $1 +} + +function run_nightly_upgrade_test() { + # setup only needed the first time + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=4"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=5 skip_setup=true"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=6 skip_setup=true"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=7 skip_setup=true"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=8 skip_setup=true"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=9 skip_setup=true"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=10"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=11"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=12"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=13"' + run_test_and_count 'bash -c "./ci/nctl_upgrade.sh test_id=14"' +} + +function run_soundness_test() { + echo "Starting network soundness test" + + # Really-really make sure nothing is leftover + nctl-assets-teardown + + $NCTL/sh/scenarios/network_soundness.py + + # Clean up after the test + nctl-assets-teardown +} + +CASPER_NCTL_NIGHTLY_TEST_COUNT=0 + +source "$NCTL/sh/staging/set_override_tomls.sh" +run_test_and_count 'start_run_teardown "client.sh"' +run_test_and_count 'start_run_teardown "itst01.sh"' +run_test_and_count 'start_run_teardown "itst01_private_chain.sh"' +run_test_and_count 'start_run_teardown "itst02.sh"' +run_test_and_count 'start_run_teardown "itst02_private_chain.sh"' +run_test_and_count 'start_run_teardown "itst11.sh"' +run_test_and_count 'start_run_teardown "itst11_private_chain.sh"' +run_test_and_count 'start_run_teardown "itst13.sh"' +run_test_and_count 'start_run_teardown "itst14.sh"' +run_test_and_count 'start_run_teardown "itst14_private_chain.sh"' +run_test_and_count 'start_run_teardown "bond_its.sh"' +run_test_and_count 'start_run_teardown "emergency_upgrade_test.sh"' +run_test_and_count 'start_run_teardown "emergency_upgrade_test_balances.sh"' +run_test_and_count 'start_run_teardown "upgrade_after_emergency_upgrade_test.sh"' +run_test_and_count 'start_run_teardown "sync_test.sh timeout=500"' +run_test_and_count 'start_run_teardown "swap_validator_set.sh"' +run_test_and_count 'start_run_teardown "sync_upgrade_test.sh node=6 era=5 timeout=500"' +run_test_and_count 'start_run_teardown "validators_disconnect.sh"' +run_test_and_count 'start_run_teardown "event_stream.sh"' +run_test_and_count 'start_run_teardown "regression_4771.sh"' +# Without start_run_teardown - these ones perform their own assets setup, network start and teardown +run_test_and_count 'source "$SCENARIOS_DIR/upgrade_after_emergency_upgrade_test_pre_1.5.sh"' +run_test_and_count 'source "$SCENARIOS_DIR/regression_3976.sh"' + +run_nightly_upgrade_test + +run_test_and_count 'run_soundness_test' + +# Run these last as they occasionally fail (see https://github.com/casper-network/casper-node/issues/2973) +run_test_and_count 'start_run_teardown "itst06.sh"' +run_test_and_count 'start_run_teardown "itst06_private_chain.sh"' +run_test_and_count 'start_run_teardown "itst07.sh"' +run_test_and_count 'start_run_teardown "itst07_private_chain.sh"' + +echo "All tests passed. Test count: $CASPER_NCTL_NIGHTLY_TEST_COUNT" \ No newline at end of file diff --git a/ci/publish_deb_to_repo.sh b/ci/publish_deb_to_repo.sh new file mode 100755 index 0000000000..5f7ebb92bf --- /dev/null +++ b/ci/publish_deb_to_repo.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +set -e + +# Verify all variables are present +if [[ -z $PLUGIN_GPG_KEY || -z $PLUGIN_GPG_PASS || -z $PLUGIN_REGION \ + || -z $PLUGIN_REPO_NAME || -z $PLUGIN_ACL || -z $PLUGIN_PREFIX \ + || -z $PLUGIN_DEB_PATH || -z $PLUGIN_OS_CODENAME ]]; then + echo "ERROR: Environment Variable Missing!" + exit 1 +fi + +# Verify if its the first time publishing. Will need to know later. +# Probably an easier way to do this check :) +EXISTS_RET=$(aws s3 ls s3://"$PLUGIN_REPO_NAME"/releases/dists/ --region "$PLUGIN_REGION" | grep "$PLUGIN_OS_CODENAME") || EXISTS_RET="false" + +# Sanity Check for later +if [ "$EXISTS_RET" = "false" ]; then + echo "First time uploading repo!" +else + echo "Repo Exists! Defaulting to publish update..." +fi + +### APTLY SECTION + +# Move old config file to use in jq query +mv ~/.aptly.conf ~/.aptly.conf.orig + +# Inject ENV Variables and save as .aptly.conf +jq --arg region "$PLUGIN_REGION" --arg bucket "$PLUGIN_REPO_NAME" --arg acl "$PLUGIN_ACL" --arg prefix "$PLUGIN_PREFIX" '.S3PublishEndpoints[$bucket] = {"region":$region, "bucket":$bucket, "acl": $acl, "prefix": $prefix}' ~/.aptly.conf.orig > ~/.aptly.conf + +# If aptly repo DOESNT exist locally already +if [ ! "$(aptly repo list | grep $PLUGIN_OS_CODENAME)" ]; then + aptly repo create -distribution="$PLUGIN_OS_CODENAME" -component=main "release-$PLUGIN_OS_CODENAME" +fi + +# If aptly mirror DOESNT exist locally already +if [ ! "$(aptly mirror list | grep $PLUGIN_OS_CODENAME)" ] && [ ! "$EXISTS_RET" = "false" ] ; then + aptly mirror create -ignore-signatures "local-repo-$PLUGIN_OS_CODENAME" https://"${PLUGIN_REPO_NAME}"/"${PLUGIN_PREFIX}"/ "${PLUGIN_OS_CODENAME}" main +fi + +# When it's not the first time uploading. +if [ ! "$EXISTS_RET" = "false" ]; then + aptly mirror update -ignore-signatures "local-repo-$PLUGIN_OS_CODENAME" + # Found an article that said using 'Name' will select all packages for us + aptly repo import "local-repo-$PLUGIN_OS_CODENAME" "release-$PLUGIN_OS_CODENAME" Name +fi + +# Add .debs to the local repo +aptly repo add -force-replace "release-$PLUGIN_OS_CODENAME" "$PLUGIN_DEB_PATH"/*.deb + +# Publish to S3 +if [ ! "$(aptly publish list | grep $PLUGIN_REPO_NAME | grep $PLUGIN_OS_CODENAME)" ]; then + # If the repo is new + aptly publish repo -batch -force-overwrite -passphrase="$PLUGIN_GPG_PASS" "release-$PLUGIN_OS_CODENAME" s3:"${PLUGIN_REPO_NAME}": +else + # If the repo exists + aptly publish update -batch -force-overwrite -passphrase="$PLUGIN_GPG_PASS" "$PLUGIN_OS_CODENAME" s3:"${PLUGIN_REPO_NAME}": +fi diff --git a/ci/publish_to_crates_io.sh b/ci/publish_to_crates_io.sh index 189250cc1c..f9b56a641c 100755 --- a/ci/publish_to_crates_io.sh +++ b/ci/publish_to_crates_io.sh @@ -62,7 +62,7 @@ publish() { printf "Publishing...\n" pushd $ROOT_DIR/$CRATE_DIR >/dev/null set +u - cargo publish ${@:2} --token ${CARGO_TOKEN} + cargo publish "${@:2}" --token ${CARGO_TOKEN} set -u popd >/dev/null printf "Published version %s\n" $LOCAL_VERSION @@ -76,11 +76,96 @@ check_python_has_toml # These are the subdirs of casper-node which contain packages for publishing. They should remain ordered from # least-dependent to most. +# +# Header format for dependencies: +# () +# + +# types (casper-types) -> None publish types + +# storage (casper-storage) +# casper-types +publish storage + +# binary_port (casper-binary-port) +# casper-types +publish binary_port + +# execution-engine (casper-execution-engine) +# casper-storage +# casper-types publish execution_engine -publish node_macros -publish node -publish client -publish smart_contracts/contract --features=std + +# execution_engine_testing/test_support (casper-engine-test-support) +# casper-storage +# casper-types +# casper-execution-engine publish execution_engine_testing/test_support -publish execution_engine_testing/cargo_casper --allow-dirty + +# smart_contracts/contract (casper-contract) +# casper-types +publish smart_contracts/contract + +# smart_contracts/sdk_sys (casper-contract-sdk_sys) -> None +publish smart_contracts/sdk_sys + +# executor/wasm_common (casper-executor-wasm_common) +# casper-contract-sdk_sys +publish executor/wasm_common + +# smart_contracts/macros (casper-contract-macros) +# casper-executor-wasm_common +# casper-contract-sdk_sys +publish smart_contracts/macros + +# smart_contracts/sdk (casper-contract-sdk) +# casper-contract-sdk_sys +# casper-executor-wasm_common +# casper-contract-macros +publish smart_contracts/sdk + +# smart_contracts/sdk_codegen (casper-contract-sdk_codegen) +# casper-contract-sdk +publish smart_contracts/sdk_codegen + +# executor/wasm_interface (casper-executor-wasm_interface) +# casper-executor-wasm_common +# casper-storage +# casper-types +publish executor/wasm_interface + +# executor/wasm_host (casper-executor-wasm_host) +# casper-executor-wasm_common +# casper-executor-wasm_interface +# casper-storage +# casper-types +publish executor/wasm_host + +# executor/wasmer_backend (casper-executor-wasmer_backend) +# casper-executor-wasm_common +# casper-executor-wasm_interface +# casper-executor-wasm_host +# casper-storage +# casper-contract-sdk_sys +# casper-types +publish executor/wasmer_backend + +# executor/wasm (casper-executor-wasm) +# casper-executor-wasm_common +# casper-executor-wasm_host +# casper-executor-wasm_interface +# casper-executor-wasmer_backend +# casper-storage +# casper-types +# casper-execution-engine +publish executor/wasm + +# node (casper-node) +# casper-binary-port +# casper-storage +# casper-types +# casper-execution-engine +# casper-executor-wasm +# casper-executor-wasm_interface +publish node diff --git a/ci/upgrade_package_s3_storage.sh b/ci/upgrade_package_s3_storage.sh index 7a2a1e2829..45db109827 100755 --- a/ci/upgrade_package_s3_storage.sh +++ b/ci/upgrade_package_s3_storage.sh @@ -2,10 +2,10 @@ set -e -# This script allows uploading, downloading and purging of files to genesis.casperlabs.io s3 for storing +# This script allows uploading, downloading and purging of files to genesis.casper.network s3 for storing # possible upgrade package releases to promote to a network or use for testing. -# Using drone/GIT_HASH/PROTOCOL_VERSION as s3 bucket location in genesis.casperlabs.io +# Using drone/GIT_HASH/PROTOCOL_VERSION as s3 bucket location in genesis.casper.network # Check python has toml for getting PROTOCOL_VERSION set +e @@ -58,7 +58,7 @@ export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY=$(echo "$CREDENTIALS" | jq -r .data.cicd_agent_to_s3.aws_secret_key) export AWS_SECRET_ACCESS_KEY -CL_S3_BUCKET="genesis.casperlabs.io" +CL_S3_BUCKET="genesis.casper.network" CL_S3_LOCATION="drone/$GIT_HASH" case "$ACTION" in diff --git a/client/Cargo.toml b/client/Cargo.toml deleted file mode 100644 index 005107b1a8..0000000000 --- a/client/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -[package] -name = "casper-client" -version = "1.0.0" -authors = ["Marc Brinkmann ", "Fraser Hutchison "] -edition = "2018" -description = "A client for interacting with the Casper network" -readme = "README.md" -documentation = "https://docs.rs/casper-client" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/client" -license-file = "../LICENSE" - -[lib] -name = "casper_client" -crate-type = ["rlib", "cdylib"] -path = "lib/lib.rs" - -[[bin]] -name = "casper-client" -path = "src/main.rs" -doc = false - -[dependencies] -base64 = "0.13.0" -casper-execution-engine = { version = "1.0.0", path = "../execution_engine" } -casper-node = { version = "1.0.0", path = "../node" } -casper-types = { version = "1.0.0", path = "../types", features = ["std"] } -clap = "2" -futures = "0.3.5" -hex = { version = "0.4.2", features = ["serde"] } -humantime = "2" -jsonrpc-lite = "0.5.0" -once_cell = "1" -rand = "0.8.3" -reqwest = { version = "0.11.1", features = ["json"] } -serde = { version = "1", default-features = false, features = ["derive"] } -serde_json = "1" -tempfile = "3" -thiserror = "1" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "sync", "time"] } - -[build-dependencies] -cbindgen = { version = "0.18", optional = true } - -[dev-dependencies] -anyhow = "1" -casper-node = { path = "../node" } -futures = "0.3.13" -hyper = "0.14.4" -jsonrpc-lite = "0.5.0" -semver = "0.11" -serde = "1" -tower = "0.4.6" -warp = "0.3.0" -warp-json-rpc = "0.3.0" - -[features] -default = ["ffi"] -ffi = ["cbindgen"] - -[package.metadata.deb] -features = ["vendored-openssl"] -revision = "0" -assets = [ - ["../target/release/casper-client", "/usr/bin/casper-client", "755"], -] -extended-description = """ -Package for Casper Client to connect to Casper Node. - -For information on using package, see https://github.com/CasperLabs/casper-node -""" diff --git a/client/README.md b/client/README.md deleted file mode 100644 index 84563a8716..0000000000 --- a/client/README.md +++ /dev/null @@ -1,433 +0,0 @@ -# casper-client - -A client for interacting with the Casper network. - - -## Running the client - -The client runs in one of several modes, each mode performing a single action. To see all available commands: - -``` -cd client -cargo run --release -- help -``` - -
example output - -```commandline -Casper client 1.5.0 -A client for interacting with the Casper network - -USAGE: - casper-client [SUBCOMMAND] - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -SUBCOMMANDS: - put-deploy Creates a deploy and sends it to the network for execution - make-deploy Creates a deploy and outputs it to a file or stdout. As a file, the deploy can - subsequently be signed by other parties using the 'sign-deploy' subcommand and then sent - to the network for execution using the 'send-deploy' subcommand - sign-deploy Reads a previously-saved deploy from a file, cryptographically signs it, and outputs it - to a file or stdout - send-deploy Reads a previously-saved deploy from a file and sends it to the network for execution - transfer Transfers funds between purses - get-deploy Retrieves a deploy from the network - get-block Retrieves a block from the network - get-block-transfers Retrieves all transfers for a block from the network - list-deploys Retrieves the list of all deploy hashes in a given block - get-state-root-hash Retrieves a state root hash at a given block - query-state Retrieves a stored value from the network - get-balance Retrieves a purse's balance from the network - get-auction-info Retrieves the bids and validators as of the most recently added block - keygen Generates account key files in the given directory - generate-completion Generates a shell completion script - help Prints this message or the help of the given subcommand(s) -``` -
- -To get further info on any command, run `help` followed by the subcommand, e.g. - -``` -cargo run --release -- help keygen -``` - -
example output - -```commandline -casper-client-keygen -Generates account key files in the given directory. Creates ["secret_key.pem", "public_key.pem", "public_key_hex"]. -"public_key_hex" contains the hex-encoded key's bytes with the hex-encoded algorithm tag prefixed - -USAGE: - casper-client keygen [FLAGS] [OPTIONS] [PATH] - -FLAGS: - -f If this flag is passed, any existing output files will be overwritten. Without this flag, if any - output file exists, no output files will be generated and the command will fail - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - -a, --algorithm The type of keys to generate [default: Ed25519] [possible values: Ed25519, secp256k1] - -ARGS: - Path to output directory where key files will be created. If the path doesn't exist, it will be - created. If not set, the current working directory will be used -``` -
- - -### Generate asymmetric signing keys - -Some commands require the use of a secret key for signing data. To generate a secret and public key pair: - -``` -cargo run --release -- keygen $HOME/.client_keys -``` - - -## Interacting with a local node - -Many client commands require to send HTTP requests and receive responses. To do this with a local node running on the -same machine, follow the instructions in [the `nctl` README](../utils/nctl/README.md) to set up a local test network. - -Ensure the network has fully started before running client commands. This can be determined by running -`nctl-view-node-peers` and checking each node has connections to all others. - -For client commands requiring a node address (specified via the `--node-address` or `-n` arg), the default value is -`http://localhost:7777`, which is the address for a real network node. The `--node-address=http://localhost:50101` -argument must be included for the address of the first node of a testnet started via `nctl`. - - -### Transfer funds between purses - -The testnet will be set up so that the nodes each have an initial balance of tokens in their main purses. Let's say we -want to create a new purse under the public key we just created (in the "Generate asymmetric signing keys" section). We -can do this by creating a new deploy which will transfer funds between two purses once executed. The simplest way to -achieve this is via the `transfer` subcommand. - -First, set the contents of the `public_key_hex` file to a variable. We'll use this as the target account: - -``` -PUBLIC_KEY=$(cat $HOME/.client_keys/public_key_hex) -``` - -Then execute the `transfer` subcommand. We'll specify that we want to transfer 1,234,567 tokens from the main purse of -node 3, and that we'll pay a maximum of 10,000 tokens to execute this deploy: - -``` -cargo run --release -- transfer \ - --node-address=http://localhost:50101 \ - --secret-key=../utils/nctl/assets/net-1/nodes/node-3/keys/secret_key.pem \ - --amount=1234567 \ - --target-account=$PUBLIC_KEY \ - --chain-name=casper-net-1 \ - --payment-amount=10000 -``` - -
example output - -```commandline -{ - "jsonrpc": "2.0", - "result": { - "api_version": "1.0.0", - "deploy_hash": "c42210759368a07a1b1ff4f019f7e77e7c9eaf2961b8c9dfc4237ea2218246c9" - }, - "id": 2564730065 -} -``` -
- -The `deploy_hash` in the response is worth noting, as it can be used to identify this deploy. - - -### Get details of a deploy - -To see information about a deploy sent to the network via `transfer`, `put-deploy`, or `send-deploy`, you can use -`get-deploy`, along with the deploy hash printed after executing one of these subcommands. - -For example, to see if our previous `transfer` command generated a deploy which was executed by the network: - -``` -cargo run --release -- get-deploy --node-address=http://localhost:50101 c42210759368a07a1b1ff4f019f7e77e7c9eaf2961b8c9dfc4237ea2218246c9 -``` - -
example output - -```commandline -{ - "jsonrpc": "2.0", - "result": { - "api_version": "1.0.0", - "deploy": { - "approvals": [ - { - "signature": "0140850c4f74aaad24894ce2d0e3efb64f599633fad4e280f39529dbd062ab49ca6a1f0bd6f20a8cddeab68e95ae5ea416a5b2ae3a02a0bc7a714c2915106e1c09", - "signer": "015b7723f1d9499fa02bd17dfe4e1315cfe1660a071e27ab1f29d6ceb6e2abcd73" - } - ], - "hash": "c42210759368a07a1b1ff4f019f7e77e7c9eaf2961b8c9dfc4237ea2218246c9", - "header": { - "account": "015b7723f1d9499fa02bd17dfe4e1315cfe1660a071e27ab1f29d6ceb6e2abcd73", - "body_hash": "c66f1040f8f2aeafee73b7c0811e00fd6eb63a6a5992d7cc0f967e14704dd35b", - "chain_name": "casper-net-1", - "dependencies": [], - "gas_price": 10, - "timestamp": "2020-10-15T13:23:45.355Z", - "ttl": "1h" - }, - "payment": { - "ModuleBytes": { - "args": "0100000006000000616d6f756e740300000002102708", - "module_bytes": "" - } - }, - "session": { - "Transfer": { - "args": "0200000006000000616d6f756e74040000000387d612080600000074617267657420000000018189fd2d42c36d951f9803e595795a3a0fc07aa999c88a28d286c7cbf338940f0320000000" - } - } - }, - "execution_results": [ - { - "block_hash": "80a09df67f45bfb290c8f36021daf2fb898587a48fa0e4f7c506202ae8f791b8", - "result": { - "cost": "0", - "effect": { - "operations": { - "account-hash-018189fd2d42c36d951f9803e595795a3a0fc07aa999c88a28d286c7cbf33894": "Write", - "hash-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db": "Write", - "hash-d46e35465520ef9f868be3f26eaded1585dd66ac410706bab4b7adf92bdf528a": "Read", - "hash-ea274222cc975e4daec2cced17a0270df7c282e865115d98f544a35877af5271": "Add", - "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-000": "Write", - "uref-8e7893be4b33bc5eacde4dd684b030593200364a211b8566ed9458ccbafbcde9-000": "Write", - "uref-b645152645faa6c3f7708fd362a118296f7f4d39dc065c120877d13b6981cd67-000": "Write" - }, - "transforms": { - "account-hash-018189fd2d42c36d951f9803e595795a3a0fc07aa999c88a28d286c7cbf33894": "WriteAccount", - "hash-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db": { - "WriteCLValue": { - "bytes": "02b645152645faa6c3f7708fd362a118296f7f4d39dc065c120877d13b6981cd6707", - "cl_type": "Key" - } - }, - "hash-d46e35465520ef9f868be3f26eaded1585dd66ac410706bab4b7adf92bdf528a": "Identity", - "hash-ea274222cc975e4daec2cced17a0270df7c282e865115d98f544a35877af5271": { - "AddKeys": { - "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-000": "uref-b645152645faa6c3f7708fd362a118296f7f4d39dc065c120877d13b6981cd67-007" - } - }, - "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-000": { - "WriteCLValue": { - "bytes": "", - "cl_type": "Unit" - } - }, - "uref-8e7893be4b33bc5eacde4dd684b030593200364a211b8566ed9458ccbafbcde9-000": { - "WriteCLValue": { - "bytes": "087929775d78456301", - "cl_type": "U512" - } - }, - "uref-b645152645faa6c3f7708fd362a118296f7f4d39dc065c120877d13b6981cd67-000": { - "WriteCLValue": { - "bytes": "0387d612", - "cl_type": "U512" - } - } - } - }, - "error_message": null - } - } - ] - }, - "id": 592430140 -} -``` -
- -The `block_hash` in the response's `execution_results` is worth noting, as it can be used to identify the block in which -the deploy is included. If the deploy was successfully received and parsed by the node, but failed to execute, the -`error_message` in `execution_results` may provide useful information. - - -### Get details of a `Block` - -To see information about a `Block` created by the network, you can use `get-block`. For example: - -``` -cargo run --release -- get-block \ - --node-address=http://localhost:50101 \ - --block-hash=80a09df67f45bfb290c8f36021daf2fb898587a48fa0e4f7c506202ae8f791b8 -``` - -
example output - -```commandline -{ - "jsonrpc": "2.0", - "result": { - "api_version": "1.0.0", - "block": { - "body": null, - "hash": "80a09df67f45bfb290c8f36021daf2fb898587a48fa0e4f7c506202ae8f791b8", - "header": { - "accumulated_seed": "e8c65524331dc950d9065c289deb05458d3f9d8beba15e663a5418f5a6c7bed5", - "body_hash": "0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8", - "deploy_hashes": [ - "c42210759368a07a1b1ff4f019f7e77e7c9eaf2961b8c9dfc4237ea2218246c9" - ], - "era_end": null, - "era_id": 89, - "state_root_hash": "c79f4c9a017532fe265593d86d3917581479fd1601093e16d17ec90aeaa63b83", - "height": 987, - "parent_hash": "ffb95eac42eae1112d37797a1ecc67860e88a9364c44845cb7a96eb426dca502", - "proposer": "015b7723f1d9499fa02bd17dfe4e1315cfe1660a071e27ab1f29d6ceb6e2abcd73", - "random_bit": true, - "timestamp": "2020-10-15T13:23:48.352Z" - }, - "proofs": [ - "0104df3fe39567d22a48b68c4b046dadf5af6552c45b1a93613c89a65caa98b12a4564ba1a794e77787eb3d37c19617ca344f2a304387a0364fee0e8f89da2da0d" - ] - } - }, - "id": 3484548969 -} -``` -
- -The `state_root_hash` in the response's `header` is worth noting, as it can be used to identify the state root hash -for the purposes of querying the global state. - -### Get all `Transfers` contained in a `Block` - -To retrieve all `Transfer` transactions processed in a `Block` created by the network, you can use `get-block-transfers`. For example: - -``` -cargo run --release -- get-block-transfers \ - --node-address=http://localhost:50101 \ - --block-hash=80a09df67f45bfb290c8f36021daf2fb898587a48fa0e4f7c506202ae8f791b8 -``` - -
example output - -```commandline -{ - "jsonrpc": "2.0", - "result": { - "api_version": "1.0.0", - "block_hash": "80a09df67f45bfb290c8f36021daf2fb898587a48fa0e4f7c506202ae8f791b8", - "transfers": [ - { - "amount": "100000000", - "deploy_hash": "ab87c5f2c0f6f331bf488703676fb0c68f897282dfbb8e085752f220a3dfc25e", - "from": "account-hash-1ace33e66142d5a0679ba5507ef75b9c09888d1567e86100d1db535fa819a962", - "gas": "0", - "id": null, - "source": "uref-21f7316e72d1baa7b706a9083077d643665ad3a56673c594db9762ceac4f3788-007", - "target": "uref-c5eb9788156b53c9a599dfb5e591c6399580b491c72086a6bc028dd18fdfcb2d-004" - } - ] - }, - "id": 7229488934468542904 -} -``` -
- - -### Query the global state - -To view data stored to global state after executing a deploy, you can use `query-state`. For example, to see the value -stored under our new account's public key: - -``` -cargo run --release -- query-state \ - --node-address=http://localhost:50101 \ - --state-root-hash=242666f5959e6a51b7a75c23264f3cb326eecd6bec6dbab147f5801ec23daed6 \ - --key=$PUBLIC_KEY -``` - -
example output - -```commandline -{ - "jsonrpc": "2.0", - "result": { - "api_version": "1.0.0", - "stored_value": { - "Account": { - "account_hash": "018189fd2d42c36d951f9803e595795a3a0fc07aa999c88a28d286c7cbf33894", - "action_thresholds": { - "deployment": 1, - "key_management": 1 - }, - "associated_keys": [ - { - "account_hash": "018189fd2d42c36d951f9803e595795a3a0fc07aa999c88a28d286c7cbf33894", - "weight": 1 - } - ], - "main_purse": "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", - "named_keys": {} - } - } - }, - "id": 3649040235 -} -``` -
- -This yields details of the newly-created account object, including the `URef` of the account's main purse. - - -### Get the balance of a purse - -This can be done via `get-balance`. For example, to get the balance of the main purse of our newly-created account: - -``` -cargo run --release -- get-balance \ - --node-address=http://localhost:50101 \ - --state-root-hash=242666f5959e6a51b7a75c23264f3cb326eecd6bec6dbab147f5801ec23daed6 \ - --purse-uref=uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007 -``` - -
example output - -```commandline -{ - "jsonrpc": "2.0", - "result": { - "api_version": "1.0.0", - "balance_value": "1234567" - }, - "id": 4193583276 -} -``` -
- -Note that the system mint contract is required to retrieve the balance of any given purse. If you execute a -`query-state` specifying a purse `URef` as the `--key` argument, you'll find that the actual value stored there is a -unit value `()`. This makes the `get-balance` subcommand particularly useful. - ---- - - -## Client library - -The `lib` directory contains source for the client library, which may be called directly rather than through the CLI -binary. The CLI app `casper-client` makes use of this library to implement its functionality. - - -## Client library C wrapper - -An optional feature of the client library is to use `cbindgen` to build a C wrapper for functions in the library. This -can then be leveraged to build bindings for the library in any language that can access an `extern "C"` interface. - -The feature is named `ffi` and is enabled by default. - -See `examples/ffi/README.md` for more information. diff --git a/client/build.rs b/client/build.rs deleted file mode 100644 index 1e5a06e322..0000000000 --- a/client/build.rs +++ /dev/null @@ -1,33 +0,0 @@ -fn main() { - #[cfg(feature = "ffi")] - { - use std::env; - - use cbindgen::{Builder, Language}; - - let output_file = format!( - "{}/../../../../headers/casper_client.h", - env::var("OUT_DIR").expect("should have env var OUT_DIR set"), - ); - - let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - Builder::new() - .with_crate(crate_dir) - .with_language(Language::C) - .with_autogen_warning( - "/* WARNING: this file is autogenerated by cbindgen. Don't modify this manually. */" - ) - .with_include_guard("__CASPER_CLIENT_H__") - .with_no_includes() - // add sys headers explicitly - .with_sys_include("stdint.h") - .with_sys_include("stdbool.h") - // individual exported struct definitions need to be explicitly included - .include_item("casper_deploy_params_t") - .include_item("casper_payment_params_t") - .include_item("casper_session_params_t") - .generate() - .expect("Unable to generate bindings") - .write_to_file(&output_file); - } -} diff --git a/client/examples/ffi/.clang-format b/client/examples/ffi/.clang-format deleted file mode 100644 index 683521a7d7..0000000000 --- a/client/examples/ffi/.clang-format +++ /dev/null @@ -1,127 +0,0 @@ ---- -Language: Cpp -# BasedOnStyle: LLVM -AccessModifierOffset: -2 -AlignAfterOpenBracket: Align -AlignConsecutiveMacros: false -AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: false -AlignEscapedNewlines: Right -AlignOperands: true -AlignTrailingComments: true -AllowAllArgumentsOnNextLine: true -AllowAllConstructorInitializersOnNextLine: true -AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: All -AllowShortLambdasOnASingleLine: All -AllowShortIfStatementsOnASingleLine: Never -AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: MultiLine -BinPackArguments: true -BinPackParameters: true -BraceWrapping: - AfterCaseLabel: false - AfterClass: false - AfterControlStatement: false - AfterEnum: false - AfterFunction: false - AfterNamespace: false - AfterObjCDeclaration: false - AfterStruct: false - AfterUnion: false - AfterExternBlock: false - BeforeCatch: false - BeforeElse: false - IndentBraces: false - SplitEmptyFunction: true - SplitEmptyRecord: true - SplitEmptyNamespace: true -BreakBeforeBinaryOperators: None -BreakBeforeBraces: Attach -BreakBeforeInheritanceComma: false -BreakInheritanceList: BeforeColon -BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: false -BreakConstructorInitializers: BeforeColon -BreakAfterJavaFieldAnnotations: false -BreakStringLiterals: true -ColumnLimit: 80 -CommentPragmas: '^ IWYU pragma:' -CompactNamespaces: false -ConstructorInitializerAllOnOneLineOrOnePerLine: false -ConstructorInitializerIndentWidth: 4 -ContinuationIndentWidth: 4 -Cpp11BracedListStyle: true -DerivePointerAlignment: false -DisableFormat: false -ExperimentalAutoDetectBinPacking: false -FixNamespaceComments: true -ForEachMacros: - - foreach - - Q_FOREACH - - BOOST_FOREACH -IncludeBlocks: Preserve -IncludeCategories: - - Regex: '^"(llvm|llvm-c|clang|clang-c)/' - Priority: 2 - - Regex: '^(<|"(gtest|gmock|isl|json)/)' - Priority: 3 - - Regex: '.*' - Priority: 1 -IncludeIsMainRegex: '(Test)?$' -IndentCaseLabels: false -IndentPPDirectives: None -IndentWidth: 4 -IndentWrappedFunctionNames: false -JavaScriptQuotes: Leave -JavaScriptWrapImports: true -KeepEmptyLinesAtTheStartOfBlocks: true -MacroBlockBegin: '' -MacroBlockEnd: '' -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -ObjCBinPackProtocolList: Auto -ObjCBlockIndentWidth: 2 -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: true -PenaltyBreakAssignment: 2 -PenaltyBreakBeforeFirstCallParameter: 19 -PenaltyBreakComment: 300 -PenaltyBreakFirstLessLess: 120 -PenaltyBreakString: 1000 -PenaltyBreakTemplateDeclaration: 10 -PenaltyExcessCharacter: 1000000 -PenaltyReturnTypeOnItsOwnLine: 60 -PointerAlignment: Right -ReflowComments: true -SortIncludes: true -SortUsingDeclarations: true -SpaceAfterCStyleCast: false -SpaceAfterLogicalNot: false -SpaceAfterTemplateKeyword: true -SpaceBeforeAssignmentOperators: true -SpaceBeforeCpp11BracedList: false -SpaceBeforeCtorInitializerColon: true -SpaceBeforeInheritanceColon: true -SpaceBeforeParens: ControlStatements -SpaceBeforeRangeBasedForLoopColon: true -SpaceInEmptyParentheses: false -SpacesBeforeTrailingComments: 1 -SpacesInAngles: false -SpacesInContainerLiterals: true -SpacesInCStyleCastParentheses: false -SpacesInParentheses: false -SpacesInSquareBrackets: false -Standard: Cpp11 -StatementMacros: - - Q_UNUSED - - QT_REQUIRE_VERSION -TabWidth: 8 -UseTab: Never -... - diff --git a/client/examples/ffi/CMakeLists.txt b/client/examples/ffi/CMakeLists.txt deleted file mode 100644 index b01409a8e6..0000000000 --- a/client/examples/ffi/CMakeLists.txt +++ /dev/null @@ -1,58 +0,0 @@ -cmake_minimum_required(VERSION 3.10) -project(CasperClientWrapper C) - -find_program(Cargo cargo HINTS $ENV{HOME}/.cargo/bin) -if(NOT Cargo) - message(FATAL_ERROR "cargo not found") -endif() - -if(CMAKE_BUILD_TYPE STREQUAL Release) - set(TARGET_DIR release) -else() - set(TARGET_DIR debug) -endif() - -# The root path to install casper_client and Unity test lib. -set(InstallPath "${CMAKE_CURRENT_BINARY_DIR}/installed") -# The casper_client library's name - e.g. 'libcasper_client.so' on Unix. -set(ClientLibName ${CMAKE_SHARED_LIBRARY_PREFIX}casper_client${CMAKE_SHARED_LIBRARY_SUFFIX}) -# The initial build location of the casper_client library after being built by cargo. -set(ClientBuiltLibSource "${CMAKE_CURRENT_LIST_DIR}/../../../target/${TARGET_DIR}/${ClientLibName}") -# The location of the casper_client library after being moved to the ffi examples build directory. -set(ClientBuiltLibTarget "${InstallPath}/lib/${ClientLibName}") -# The initial location of the generated header for the casper_client library. -set(ClientHeadersDirSource "${CMAKE_CURRENT_LIST_DIR}/../../../target/headers") -# The location of the casper_client header after being moved to the ffi examples build directory. -set(ClientHeadersDirTarget "${InstallPath}/include") - -# This target builds the casper_client library and copies it and its header(s) to our build directory. -add_custom_target( - ClientSharedLibrary - COMMENT "Building casper_client library and copying from '${ClientBuiltLibSource}' to '${ClientBuiltLibTarget}'." - COMMAND "${Cargo}" build --lib $<$:--release> - COMMAND ${CMAKE_COMMAND} -E copy "${ClientBuiltLibSource}" "${ClientBuiltLibTarget}" - COMMAND ${CMAKE_COMMAND} -E copy_directory "${ClientHeadersDirSource}" "${ClientHeadersDirTarget}" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} -) - -include_directories("${ClientHeadersDirTarget}") - -add_executable(put-deploy src/put_deploy.c) -target_link_libraries(put-deploy PRIVATE ${ClientBuiltLibTarget}) -add_dependencies(put-deploy ClientSharedLibrary) - -add_executable(get-auction-info src/get_auction_info.c) -target_link_libraries(get-auction-info PRIVATE ${ClientBuiltLibTarget}) -add_dependencies(get-auction-info ClientSharedLibrary) - -include(ExternalProject) -ExternalProject_Add(Unity - URL https://github.com/ThrowTheSwitch/Unity/archive/v2.5.1.tar.gz - CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${InstallPath} -) -set(UnityLibName ${CMAKE_STATIC_LIBRARY_PREFIX}unity${CMAKE_STATIC_LIBRARY_SUFFIX}) - -add_executable(ffi-tests tests/ffi_tests.c) -target_link_libraries(ffi-tests PRIVATE ${ClientBuiltLibTarget} ${UnityLibName}) -target_include_directories(ffi-tests PRIVATE "${ClientHeadersDirTarget}" "${InstallPath}/include/unity") -add_dependencies(ffi-tests ClientSharedLibrary Unity) diff --git a/client/examples/ffi/README.md b/client/examples/ffi/README.md deleted file mode 100644 index 61253c0893..0000000000 --- a/client/examples/ffi/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# casper_client FFI examples - -It's possible to use the `casper_client` library from C: - - -## Building with CMake - -To build the examples, from the root of `casper-node` run: - -``` -cmake -Hclient/examples/ffi -Btarget/build -DCMAKE_BUILD_TYPE=Debug -cmake --build target/build -``` - -In the `target/build` directory which was created, you should see the binaries for the examples that have been compiled. - -The build also produces a shared library in `target/build/installed/lib/libcasper_client.so` and its header file in -`target/build/installed/include/casper_client.h`. - -``` -#include "casper_client.h -``` - - -## Initial setup - -Some resources need to be initialized before library functions can be called: - -``` -/* initialize casper-client library */ -casper_setup_client(); -``` - -After this, it's possible to call library functions to query the node. - -For example: - -``` -unsigned char response_buffer[RESPONSE_BUFFER_LEN] = {0}; -casper_error_t response_code = casper_get_auction_info( - RPC_ID, NODE_ADDRESS, VERBOSE, response_buffer, RESPONSE_BUFFER_LEN); -if (response_code == CASPER_SUCCESS) { - printf("get_auction_info: got successful response\n%s\n", response_buffer); -} else { - /* handle error... see Error Handling below */ -} -``` - - -## Error handling - -Errors are returned from the various library functions as `casper_error_t`, but more detail can be gathered using -`get_last_error`, which will pull the last error that occurred in the library as a string. - -``` -if (response == CASPER_IO_ERROR) { - /* first, initialize a buffer to hold our error string */ - unsigned char error[ERROR_LEN] = {0}; - - /* ask for the description of the latest error, which was a CASPER_IO_ERROR in this case */ - casper_get_last_error(error, ERROR_LEN); - - printf("got an IO error:\n%s\n", error); -} -``` - -Refer to `/client/headers/casper_client.h` as well as the examples in the `src` directory for more -information about specific functions and their arguments. - - -## Cleanup - -In order to clean up and free any resources that the library has allocated, run: - -``` -/* finally, clean up after ourselves */ -casper_shutdown_client(); -``` diff --git a/client/examples/ffi/check_format.bash b/client/examples/ffi/check_format.bash deleted file mode 100755 index 12cc74bec3..0000000000 --- a/client/examples/ffi/check_format.bash +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/bash -for f in ./src/*.c; do - OUTPUT=$(diff <(clang-format $f) <(cat $f)); - ret=$? - if [[ $ret -ne 0 ]]; then - echo "Source file format $f differs - try running:" - echo "" - echo "clang-format -i $f" - echo "" - echo $"$OUTPUT" - exit $ret - fi -done -for f in ./tests/*.c; do - OUTPUT=$(diff <(clang-format $f) <(cat $f)); - ret=$? - if [[ $ret -ne 0 ]]; then - echo "Source file format $f differs - try running:" - echo "" - echo "clang-format -i $f" - echo "" - echo $"$OUTPUT" - exit $ret - fi -done diff --git a/client/examples/ffi/src/get_auction_info.c b/client/examples/ffi/src/get_auction_info.c deleted file mode 100644 index c7080b7b87..0000000000 --- a/client/examples/ffi/src/get_auction_info.c +++ /dev/null @@ -1,29 +0,0 @@ -#include - -#include "casper_client.h" - -#define RESPONSE_BUFFER_LEN 1048576 -#define ERROR_LEN 255 -#define NODE_ADDRESS "http://localhost:50101" -#define RPC_ID "1" -#define VERBOSE 0 - -int main(int argc, char **argv) { - casper_setup_client(); - - unsigned char response_buffer[RESPONSE_BUFFER_LEN] = {0}; - casper_error_t success = casper_get_auction_info( - RPC_ID, NODE_ADDRESS, VERBOSE, response_buffer, RESPONSE_BUFFER_LEN); - if (success == CASPER_SUCCESS) { - printf("Got successful response:\n%s\n", response_buffer); - } else { - unsigned char error[ERROR_LEN] = {0}; - casper_get_last_error(error, ERROR_LEN); - printf("Got error:\n%s\n", error); - } - printf("Done.\n"); - - casper_shutdown_client(); - - return 0; -} diff --git a/client/examples/ffi/src/put_deploy.c b/client/examples/ffi/src/put_deploy.c deleted file mode 100644 index b392c83040..0000000000 --- a/client/examples/ffi/src/put_deploy.c +++ /dev/null @@ -1,48 +0,0 @@ -#include - -#include "casper_client.h" - -#define RESPONSE_BUFFER_LEN 1024 -#define ERROR_LEN 255 -#define NODE_ADDRESS "http://localhost:50101" -#define RPC_ID "1" -#define VERBOSE 0 - -int main(int argc, char **argv) { - casper_setup_client(); - - casper_deploy_params_t deploy_params = {0}; - deploy_params.secret_key = "resources/local/secret_keys/node-1.pem"; - deploy_params.ttl = "10s"; - deploy_params.chain_name = "casper-charlie-testnet1"; - deploy_params.gas_price = "11"; - - casper_payment_params_t payment_params = {0}; - payment_params.payment_amount = "1000"; - - const char *payment_args[2] = { - "name_01:bool='false'", - "name_02:i32='42'", - }; - payment_params.payment_args_simple = (const char *const *)&payment_args; - payment_params.payment_args_simple_len = 2; - - casper_session_params_t session_params = {0}; - - unsigned char response_buffer[RESPONSE_BUFFER_LEN] = {0}; - casper_error_t success = casper_put_deploy( - RPC_ID, NODE_ADDRESS, VERBOSE, &deploy_params, &session_params, - &payment_params, response_buffer, RESPONSE_BUFFER_LEN); - if (success == CASPER_SUCCESS) { - printf("Got successful response\n%s\n", response_buffer); - } else { - unsigned char error[ERROR_LEN] = {0}; - casper_get_last_error(error, ERROR_LEN); - printf("Got error:\n%s\n", error); - } - printf("Done.\n"); - - casper_shutdown_client(); - - return 0; -} diff --git a/client/examples/ffi/tests/ffi_tests.c b/client/examples/ffi/tests/ffi_tests.c deleted file mode 100644 index 16e96d28e0..0000000000 --- a/client/examples/ffi/tests/ffi_tests.c +++ /dev/null @@ -1,58 +0,0 @@ -#include "unity.h" -#include "casper_client.h" - -void setUp(void) { - casper_setup_client(); -} - -void tearDown(void) { - casper_shutdown_client(); -} - -void test_should_be_no_last_error_on_startup(void) { - unsigned char error[255] = {0}; - int bytes_read = casper_get_last_error(error, 255); - TEST_ASSERT_EQUAL_INT(0, bytes_read); -} - -void test_should_get_last_error_after_bad_request(void) { - casper_deploy_params_t deploy_params = {0}; - deploy_params.secret_key = "resources/local/secret_keys/node-1.pem"; - deploy_params.ttl = "10s"; - deploy_params.chain_name = "casper-charlie-testnet1"; - deploy_params.gas_price = "11"; - - casper_payment_params_t payment_params = {0}; - payment_params.payment_amount = "1000"; - - const char *payment_args[2] = { - "name_01:bool='false'", - "name_02:i32='42'", - }; - payment_params.payment_args_simple = (const char *const *)&payment_args; - payment_params.payment_args_simple_len = 2; - - casper_session_params_t session_params = {0}; - session_params.session_name = "standard_payment"; - session_params.session_entry_point = "session_entry_point"; - - unsigned char response_buffer[1024] = {0}; - - casper_error_t success = - casper_put_deploy("1", "", false, &deploy_params, &session_params, - &payment_params, response_buffer, 1024); - - TEST_ASSERT_NOT_EQUAL_INT(CASPER_SUCCESS, success); - - unsigned char error[255] = {0}; - int bytes_read = casper_get_last_error(error, 255); - TEST_ASSERT_EQUAL_STRING("Failed to get RPC response: builder error: relative URL without a base", - error); -} - -int main(int argc, char **argv) { - UNITY_BEGIN(); - RUN_TEST(test_should_be_no_last_error_on_startup); - RUN_TEST(test_should_get_last_error_after_bad_request); - return UNITY_END(); -} diff --git a/client/lib/cl_type.rs b/client/lib/cl_type.rs deleted file mode 100644 index d5b5a7102f..0000000000 --- a/client/lib/cl_type.rs +++ /dev/null @@ -1,373 +0,0 @@ -//! Supported `CLType` and `CLValue` parsing and validation. - -use std::{result::Result as StdResult, str::FromStr}; - -use casper_types::{ - account::AccountHash, bytesrepr::ToBytes, AsymmetricType, CLType, CLTyped, CLValue, Key, - PublicKey, URef, U128, U256, U512, -}; - -use crate::error::{Error, Result}; - -/// Parse a `CLType` from `&str`. -pub(crate) fn parse(strval: &str) -> StdResult { - let supported_types = supported_cl_types(); - let cl_type = match strval.to_lowercase() { - t if t == supported_types[0].0 => supported_types[0].1.clone(), - t if t == supported_types[1].0 => supported_types[1].1.clone(), - t if t == supported_types[2].0 => supported_types[2].1.clone(), - t if t == supported_types[3].0 => supported_types[3].1.clone(), - t if t == supported_types[4].0 => supported_types[4].1.clone(), - t if t == supported_types[5].0 => supported_types[5].1.clone(), - t if t == supported_types[6].0 => supported_types[6].1.clone(), - t if t == supported_types[7].0 => supported_types[7].1.clone(), - t if t == supported_types[8].0 => supported_types[8].1.clone(), - t if t == supported_types[9].0 => supported_types[9].1.clone(), - t if t == supported_types[10].0 => supported_types[10].1.clone(), - t if t == supported_types[11].0 => supported_types[11].1.clone(), - t if t == supported_types[12].0 => supported_types[12].1.clone(), - t if t == supported_types[13].0 => supported_types[13].1.clone(), - t if t == supported_types[14].0 => supported_types[14].1.clone(), - t if t == supported_types[15].0 => supported_types[15].1.clone(), - t if t == supported_types[16].0 => supported_types[16].1.clone(), - t if t == supported_types[17].0 => supported_types[17].1.clone(), - t if t == supported_types[18].0 => supported_types[18].1.clone(), - t if t == supported_types[19].0 => supported_types[19].1.clone(), - t if t == supported_types[20].0 => supported_types[20].1.clone(), - t if t == supported_types[21].0 => supported_types[21].1.clone(), - t if t == supported_types[22].0 => supported_types[22].1.clone(), - t if t == supported_types[23].0 => supported_types[23].1.clone(), - t if t == supported_types[24].0 => supported_types[24].1.clone(), - t if t == supported_types[25].0 => supported_types[25].1.clone(), - t if t == supported_types[26].0 => supported_types[26].1.clone(), - t if t == supported_types[27].0 => supported_types[27].1.clone(), - t if t == supported_types[28].0 => supported_types[28].1.clone(), - t if t == supported_types[29].0 => supported_types[29].1.clone(), - _ => return Err(()), - }; - Ok(cl_type) -} - -pub(crate) fn supported_cl_types() -> Vec<(&'static str, CLType)> { - vec![ - ("bool", CLType::Bool), - ("i32", CLType::I32), - ("i64", CLType::I64), - ("u8", CLType::U8), - ("u32", CLType::U32), - ("u64", CLType::U64), - ("u128", CLType::U128), - ("u256", CLType::U256), - ("u512", CLType::U512), - ("unit", CLType::Unit), - ("string", CLType::String), - ("key", CLType::Key), - ("account_hash", AccountHash::cl_type()), - ("uref", CLType::URef), - ("public_key", CLType::PublicKey), - ("opt_bool", CLType::Option(Box::new(CLType::Bool))), - ("opt_i32", CLType::Option(Box::new(CLType::I32))), - ("opt_i64", CLType::Option(Box::new(CLType::I64))), - ("opt_u8", CLType::Option(Box::new(CLType::U8))), - ("opt_u32", CLType::Option(Box::new(CLType::U32))), - ("opt_u64", CLType::Option(Box::new(CLType::U64))), - ("opt_u128", CLType::Option(Box::new(CLType::U128))), - ("opt_u256", CLType::Option(Box::new(CLType::U256))), - ("opt_u512", CLType::Option(Box::new(CLType::U512))), - ("opt_unit", CLType::Option(Box::new(CLType::Unit))), - ("opt_string", CLType::Option(Box::new(CLType::String))), - ("opt_key", CLType::Option(Box::new(CLType::Key))), - ( - "opt_account_hash", - CLType::Option(Box::new(AccountHash::cl_type())), - ), - ("opt_uref", CLType::Option(Box::new(CLType::URef))), - ( - "opt_public_key", - CLType::Option(Box::new(CLType::PublicKey)), - ), - ] -} - -/// Functions for use in help commands. -pub mod help { - use std::convert::TryFrom; - - use casper_types::{account::AccountHash, AccessRights, AsymmetricType, Key, PublicKey, URef}; - - /// Returns a list of `CLType`s able to be passed as a string for use as payment code or session - /// code args. - pub fn supported_cl_type_list() -> String { - let mut msg = String::new(); - let supported_types = super::supported_cl_types(); - for (index, item) in supported_types.iter().map(|(name, _)| name).enumerate() { - msg.push_str(item); - if index < supported_types.len() - 1 { - msg.push_str(", ") - } - } - msg - } - - /// Returns a string listing examples of the format required when passing in payment code or - /// session code args. - pub fn supported_cl_type_examples() -> String { - let bytes = (1..33).collect::>(); - let array = <[u8; 32]>::try_from(bytes.as_ref()).unwrap(); - - format!( - r#""name_01:bool='false'" -"name_02:i32='-1'" -"name_03:i64='-2'" -"name_04:u8='3'" -"name_05:u32='4'" -"name_06:u64='5'" -"name_07:u128='6'" -"name_08:u256='7'" -"name_09:u512='8'" -"name_10:unit=''" -"name_11:string='a value'" -"key_account_name:key='{}'" -"key_hash_name:key='{}'" -"key_uref_name:key='{}'" -"account_hash_name:account_hash='{}'" -"uref_name:uref='{}'" -"public_key_name:public_key='{}'" - -Optional values of all of these types can also be specified. -Prefix the type with "opt_" and use the term "null" without quotes to specify a None value: -"name_01:opt_bool='true'" # Some(true) -"name_02:opt_bool='false'" # Some(false) -"name_03:opt_bool=null" # None -"name_04:opt_i32='-1'" # Some(-1) -"name_05:opt_i32=null" # None -"name_06:opt_unit=''" # Some(()) -"name_07:opt_unit=null" # None -"name_08:opt_string='a value'" # Some("a value".to_string()) -"name_09:opt_string='null'" # Some("null".to_string()) -"name_10:opt_string=null" # None -"#, - Key::Account(AccountHash::new(array)).to_formatted_string(), - Key::Hash(array).to_formatted_string(), - Key::URef(URef::new(array, AccessRights::NONE)).to_formatted_string(), - AccountHash::new(array).to_formatted_string(), - URef::new(array, AccessRights::READ_ADD_WRITE).to_formatted_string(), - PublicKey::from_hex( - "0119bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1" - ) - .unwrap() - .to_hex(), - ) - } -} - -#[derive(Debug, PartialEq, Eq)] -enum OptionalStatus { - Some, - None, - NotOptional, -} - -/// Parses to a given CLValue taking into account whether the arg represents an optional type or -/// not. -fn parse_to_cl_value(optional_status: OptionalStatus, parse: F) -> Result -where - T: CLTyped + ToBytes, - F: FnOnce() -> Result, -{ - match optional_status { - OptionalStatus::Some => CLValue::from_t(Some(parse()?)), - OptionalStatus::None => CLValue::from_t::>(None), - OptionalStatus::NotOptional => CLValue::from_t(parse()?), - } - .map_err(|error| { - Error::InvalidCLValue(format!( - "unable to parse cl value {:?} with optional_status {:?}", - error, optional_status - )) - }) -} - -/// Returns a value built from a single arg which has been split into its constituent parts. -pub fn parts_to_cl_value(cl_type: CLType, value: &str) -> Result { - let (cl_type_to_parse, optional_status, trimmed_value) = match cl_type { - CLType::Option(inner_type) => { - if value == "null" { - (*inner_type, OptionalStatus::None, "") - } else { - (*inner_type, OptionalStatus::Some, value.trim_matches('\'')) - } - } - _ => ( - cl_type, - OptionalStatus::NotOptional, - value.trim_matches('\''), - ), - }; - - if value == trimmed_value { - return Err(Error::InvalidCLValue(format!( - "value in simple arg should be surrounded by single quotes unless it's a null \ - optional value (value passed: {})", - value - ))); - } - - match cl_type_to_parse { - CLType::Bool => { - let parse = || match trimmed_value.to_lowercase().as_str() { - "true" | "t" => Ok(true), - "false" | "f" => Ok(false), - invalid => Err(Error::InvalidCLValue(format!( - "can't parse {} as a bool. Should be 'true' or 'false'", - invalid - ))), - }; - parse_to_cl_value(optional_status, parse) - } - CLType::I32 => { - let parse = || { - i32::from_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!("can't parse {} as i32: {}", value, error)) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::I64 => { - let parse = || { - i64::from_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as i64: {}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::U8 => { - let parse = || { - u8::from_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!("can't parse {} as u8: {}", trimmed_value, error)) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::U32 => { - let parse = || { - u32::from_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as u32: {}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::U64 => { - let parse = || { - u64::from_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as u64: {}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::U128 => { - let parse = || { - U128::from_dec_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as U128: {}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::U256 => { - let parse = || { - U256::from_dec_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as U256: {}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::U512 => { - let parse = || { - U512::from_dec_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as U512: {}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::Unit => { - let parse = || { - if !trimmed_value.is_empty() { - return Err(Error::InvalidCLValue(format!( - "can't parse {} as unit. Should be ''", - trimmed_value - ))); - } - Ok(()) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::String => { - let parse = || Ok(trimmed_value.to_string()); - parse_to_cl_value(optional_status, parse) - } - CLType::Key => { - let parse = || { - Key::from_formatted_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as Key: {:?}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::ByteArray(32) => { - let parse = || { - AccountHash::from_formatted_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as AccountHash: {:?}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::URef => { - let parse = || { - URef::from_formatted_str(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as URef: {:?}", - trimmed_value, error - )) - }) - }; - parse_to_cl_value(optional_status, parse) - } - CLType::PublicKey => { - let parse = || { - let pub_key = PublicKey::from_hex(trimmed_value).map_err(|error| { - Error::InvalidCLValue(format!( - "can't parse {} as PublicKey: {:?}", - trimmed_value, error - )) - })?; - Ok(pub_key) - }; - parse_to_cl_value(optional_status, parse) - } - _ => unreachable!(), - } -} diff --git a/client/lib/deploy.rs b/client/lib/deploy.rs deleted file mode 100644 index 6b459e87af..0000000000 --- a/client/lib/deploy.rs +++ /dev/null @@ -1,377 +0,0 @@ -use std::{ - fs::File, - io::{self, BufReader, Read, Write}, -}; - -use serde::{Deserialize, Serialize}; - -use casper_execution_engine::core::engine_state::ExecutableDeployItem; -use casper_node::{ - rpcs::{account::PutDeploy, chain::GetBlockResult, info::GetDeploy, RpcWithParams}, - types::{Deploy, DeployHash, TimeDiff, Timestamp}, -}; -use casper_types::{ProtocolVersion, SecretKey}; - -use crate::{ - error::{Error, Result}, - rpc::RpcClient, -}; - -/// SendDeploy allows sending a deploy to the node. -pub(crate) struct SendDeploy; - -/// Transfer allows transferring an amount between accounts. -pub(crate) struct Transfer {} - -impl RpcClient for PutDeploy { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for GetDeploy { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for SendDeploy { - const RPC_METHOD: &'static str = PutDeploy::METHOD; -} - -impl RpcClient for Transfer { - const RPC_METHOD: &'static str = PutDeploy::METHOD; -} - -/// Result for "chain_get_block" RPC response. -#[derive(Serialize, Deserialize, Debug)] -pub struct ListDeploysResult { - /// The RPC API version. - pub api_version: ProtocolVersion, - /// The deploy hashes of the block, if found. - pub deploy_hashes: Option>, - /// The transfer deploy hashes of the block, if found. - pub transfer_hashes: Option>, -} - -impl From for ListDeploysResult { - fn from(get_block_result: GetBlockResult) -> Self { - ListDeploysResult { - api_version: get_block_result.api_version, - deploy_hashes: get_block_result - .block - .as_ref() - .map(|block| block.deploy_hashes().clone()), - transfer_hashes: get_block_result - .block - .as_ref() - .map(|block| block.transfer_hashes().clone()), - } - } -} - -/// Creates a `Write` trait object respective to the path value passed. A `File` is returned if -/// `maybe_path` is `Some`. If `maybe_path` is `None`, a `Stdout` or `Sink` is returned; `Sink` for -/// test configuration to avoid cluttering test output. -pub(super) fn output_or_stdout(maybe_path: Option<&str>) -> io::Result> { - match maybe_path { - Some(output_path) => File::create(&output_path).map(|file| { - let write: Box = Box::new(file); - write - }), - None => Ok(if cfg!(test) { - Box::new(io::sink()) - } else { - Box::new(io::stdout()) - }), - } -} - -/// `DeployParams` are used as a helper to construct a `Deploy` with -/// `DeployExt::with_payment_and_session`. -pub struct DeployParams { - /// The secret key for this `Deploy`. - pub secret_key: SecretKey, - - /// The creation timestamp of this `Deploy`. - pub timestamp: Timestamp, - - /// The time to live for this `Deploy`. - pub ttl: TimeDiff, - - /// The gas price for this `Deploy`. - pub gas_price: u64, - - /// A list of other `Deploy`s (hashes) that this `Deploy` depends upon. - pub dependencies: Vec, - - /// The name of the chain this `Deploy` will be considered for inclusion in. - pub chain_name: String, -} - -/// An extension trait that adds some client-specific functionality to `Deploy`. -pub(super) trait DeployExt { - /// Constructs a `Deploy`. - fn with_payment_and_session( - params: DeployParams, - payment: ExecutableDeployItem, - session: ExecutableDeployItem, - ) -> Deploy; - - /// Writes the `Deploy` to `output`. - fn write_deploy(&self, output: W) -> Result<()> - where - W: Write; - - /// Reads a `Deploy` from the `input`. - fn read_deploy(input: R) -> Result - where - R: Read; - - /// Reads a `Deploy` from the reader at `input`, signs it, then writes it back to `output`. - fn sign_and_write_deploy(input: R, secret_key: SecretKey, output: W) -> Result<()> - where - R: Read, - W: Write; -} - -impl DeployExt for Deploy { - fn with_payment_and_session( - params: DeployParams, - payment: ExecutableDeployItem, - session: ExecutableDeployItem, - ) -> Deploy { - let DeployParams { - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - secret_key, - } = params; - Deploy::new( - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - payment, - session, - &secret_key, - ) - } - - fn write_deploy(&self, mut output: W) -> Result<()> - where - W: Write, - { - let content = serde_json::to_string_pretty(self)?; - output - .write_all(content.as_bytes()) - .map_err(|error| Error::IoError { - context: "unable to write deploy".to_owned(), - error, - }) - } - - fn read_deploy(input: R) -> Result - where - R: Read, - { - let reader = BufReader::new(input); - Ok(serde_json::from_reader(reader)?) - } - - fn sign_and_write_deploy(input: R, secret_key: SecretKey, output: W) -> Result<()> - where - R: Read, - W: Write, - { - let mut deploy = Deploy::read_deploy(input)?; - deploy.sign(&secret_key); - deploy.write_deploy(output)?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::convert::TryInto; - - use casper_node::crypto::AsymmetricKeyExt; - - use super::*; - use crate::{DeployStrParams, PaymentStrParams, SessionStrParams}; - - const PKG_HASH: &str = "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"; - const ENTRYPOINT: &str = "entrypoint"; - const VERSION: &str = "0.1.0"; - const SAMPLE_DEPLOY: &str = r#"{ - "hash": "4858bbd79ab7b825244c4e6959cbcd588a05608168ef36518bc6590937191d55", - "header": { - "account": "01f60bce2bb1059c41910eac1e7ee6c3ef4c8fcc63a901eb9603c1524cadfb0c18", - "timestamp": "2021-01-19T01:18:19.120Z", - "ttl": "10s", - "gas_price": 1, - "body_hash": "95f2f2358c4864f01f8b073ae6f5ae67baeaf7747fc0799d0078743c513bc1de", - "dependencies": [ - "be5fdeea0240e999e376f8ecbce1bd4fd9336f58dae4a5842558a4da6ad35aa8", - "168d7ea9c88e76b3eef72759f2a7af24663cc871a469c7ba1387ca479e82fb41" - ], - "chain_name": "casper-test-chain-name-1" - }, - "payment": { - "StoredVersionedContractByHash": { - "hash": "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6", - "version": null, - "entry_point": "entrypoint", - "args": [ - [ - "name_01", - { - "cl_type": "Bool", - "bytes": "00", - "parsed": false - } - ], - [ - "name_02", - { - "cl_type": "I32", - "bytes": "2a000000", - "parsed": 42 - } - ] - ] - } - }, - "session": { - "StoredVersionedContractByHash": { - "hash": "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6", - "version": null, - "entry_point": "entrypoint", - "args": [ - [ - "name_01", - { - "cl_type": "Bool", - "bytes": "00", - "parsed": false - } - ], - [ - "name_02", - { - "cl_type": "I32", - "bytes": "2a000000", - "parsed": 42 - } - ] - ] - } - }, - "approvals": [ - { - "signer": "01f60bce2bb1059c41910eac1e7ee6c3ef4c8fcc63a901eb9603c1524cadfb0c18", - "signature": "010f538ef188770cdbf608bc2d7aa9460108b419b2b629f5e0714204a7f29149809a1d52776b0c514e3320494fdf6f9e9747f06f2c14ddf6f924ce218148e2840a" - }, - { - "signer": "01e67d6e56ae07eca98b07ecec8cfbe826b4d5bc51f3a86590c0882cdafbd72fcc", - "signature": "01c4f58d7f6145c1e4397efce766149cde5450cbe74991269161e5e1f30a397e6bc4c484f3c72a645cefd42c55cfde0294bfd91de55ca977798c3c8d2a7e43a40c" - } - ] - }"#; - - #[derive(Debug)] - struct ErrWrapper(pub Error); - - impl PartialEq for ErrWrapper { - fn eq(&self, other: &ErrWrapper) -> bool { - format!("{:?}", self.0) == format!("{:?}", other.0) - } - } - - pub fn deploy_params() -> DeployStrParams<'static> { - DeployStrParams { - secret_key: "../resources/local/secret_keys/node-1.pem", - ttl: "10s", - chain_name: "casper-test-chain-name-1", - gas_price: "1", - dependencies: vec![ - "be5fdeea0240e999e376f8ecbce1bd4fd9336f58dae4a5842558a4da6ad35aa8", - "168d7ea9c88e76b3eef72759f2a7af24663cc871a469c7ba1387ca479e82fb41", - ], - ..Default::default() - } - } - - fn args_simple() -> Vec<&'static str> { - vec!["name_01:bool='false'", "name_02:i32='42'"] - } - - #[test] - fn should_create_deploy() { - let deploy_params = deploy_params(); - let payment_params = - PaymentStrParams::with_package_hash(PKG_HASH, VERSION, ENTRYPOINT, args_simple(), ""); - let session_params = - SessionStrParams::with_package_hash(PKG_HASH, VERSION, ENTRYPOINT, args_simple(), ""); - - let mut output = Vec::new(); - - let deploy = Deploy::with_payment_and_session( - deploy_params.try_into().unwrap(), - payment_params.try_into().unwrap(), - session_params.try_into().unwrap(), - ); - deploy.write_deploy(&mut output).unwrap(); - - // The test output can be used to generate data for SAMPLE_DEPLOY: - // let secret_key = SecretKey::generate_ed25519().unwrap(); - // deploy.sign(&secret_key, &mut casper_node::new_rng()); - // println!("{}", serde_json::to_string_pretty(&deploy).unwrap()); - - let result = String::from_utf8(output).unwrap(); - - let expected = Deploy::read_deploy(SAMPLE_DEPLOY.as_bytes()).unwrap(); - let actual = Deploy::read_deploy(result.as_bytes()).unwrap(); - - assert_eq!(expected.header().account(), actual.header().account()); - assert_eq!(expected.header().ttl(), actual.header().ttl()); - assert_eq!(expected.header().gas_price(), actual.header().gas_price()); - assert_eq!(expected.header().body_hash(), actual.header().body_hash()); - assert_eq!(expected.payment(), actual.payment()); - assert_eq!(expected.session(), actual.session()); - } - - #[test] - fn should_read_deploy() { - let bytes = SAMPLE_DEPLOY.as_bytes(); - assert_eq!( - Deploy::read_deploy(bytes).map(|_| ()).map_err(ErrWrapper), - Ok(()) - ); - } - - #[test] - fn should_sign_deploy() { - let bytes = SAMPLE_DEPLOY.as_bytes(); - let mut deploy = Deploy::read_deploy(bytes).unwrap(); - deploy - .is_valid() - .unwrap_or_else(|error| panic!("{} - {:#?}", error, deploy)); - assert_eq!( - deploy.approvals().len(), - 2, - "Sample deploy should have 2 approvals." - ); - - let mut result = Vec::new(); - let secret_key = SecretKey::generate_ed25519().unwrap(); - Deploy::sign_and_write_deploy(bytes, secret_key, &mut result).unwrap(); - let signed_deploy = Deploy::read_deploy(&result[..]).unwrap(); - - assert_eq!( - signed_deploy.approvals().len(), - deploy.approvals().len() + 1, - "deploy should be is_valid() because it has been signed {:#?}", - signed_deploy - ); - } -} diff --git a/client/lib/error.rs b/client/lib/error.rs deleted file mode 100644 index 3bbf6c00e7..0000000000 --- a/client/lib/error.rs +++ /dev/null @@ -1,146 +0,0 @@ -use std::{num::ParseIntError, path::PathBuf}; - -use humantime::{DurationError, TimestampError}; -use jsonrpc_lite::JsonRpc; -use thiserror::Error; - -use casper_node::crypto::Error as CryptoError; -use casper_types::{ - bytesrepr::Error as ToBytesError, CLValueError, UIntParseError, URefFromStrError, -}; - -use crate::validation::ValidateResponseError; - -/// Crate-wide Result type wrapper. -pub(crate) type Result = std::result::Result; - -/// Error that can be returned by `casper-client`. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to parse a - /// [`Key`](https://docs.rs/casper-types/latest/casper-types/enum.PublicKey.html) from a - /// formatted string. - #[error("Failed to parse as a key")] - FailedToParseKey, - - /// Failed to parse a `URef` from a formatted string. - #[error("Failed to parse '{0}' as a uref: {1:?}")] - FailedToParseURef(&'static str, URefFromStrError), - - /// Failed to parse an integer from a string. - #[error("Failed to parse '{0}' as an integer: {1:?}")] - FailedToParseInt(&'static str, ParseIntError), - - /// Failed to parse a `TimeDiff` from a formatted string. - #[error("Failed to parse '{0}' as a time diff: {1}")] - FailedToParseTimeDiff(&'static str, DurationError), - - /// Failed to parse a `Timestamp` from a formatted string. - #[error("Failed to parse '{0}' as a timestamp: {1}")] - FailedToParseTimestamp(&'static str, TimestampError), - - /// Failed to parse a `U128`, `U256` or `U512` from a string. - #[error("Failed to parse '{0}' as U128, U256, or U512: {1:?}")] - FailedToParseUint(&'static str, UIntParseError), - - /// Failed to get a response from the node. - #[error("Failed to get RPC response: {0}")] - FailedToGetResponse(reqwest::Error), - - /// Failed to parse the response from the node. - #[error("Failed to parse as JSON-RPC response: {0}")] - FailedToParseResponse(reqwest::Error), - - /// Failed to create new key file because it already exists. - #[error("File at {} already exists", .0.display())] - FileAlreadyExists(PathBuf), - - /// Unsupported keygen algorithm. - #[error("Unsupported keygen algorithm: {0}")] - UnsupportedAlgorithm(String), - - /// JSON-RPC error returned from the node. - #[error("RPC response is error: {0}")] - ResponseIsError(#[from] jsonrpc_lite::Error), - - /// Invalid JSON returned from the node. - #[error("Invalid JSON: {0}")] - InvalidJson(#[from] serde_json::Error), - - /// Invalid response returned from the node. - #[error("Invalid response: {0:?}")] - InvalidRpcResponse(JsonRpc), - - /// Failed to send the request to the node. - #[error("Failed sending {0:?}")] - FailedSending(JsonRpc), - - /// Context-adding wrapper for `std::io::Error`. - #[error("IO error: {context}: {error}")] - IoError { - /// Contextual description of where this error occurred including relevant paths, - /// filenames, etc. - context: String, - /// std::io::Error raised during the operation in question. - error: std::io::Error, - }, - - /// Failed to serialize to bytes. - #[error("Serialization error: {0}")] - ToBytesError(ToBytesError), - - /// Cryptographic error. - #[error("Cryptographic error: {context}: {error}")] - CryptoError { - /// Contextual text, such as callsite. - context: &'static str, - /// Underlying Cryptoerror. - error: CryptoError, - }, - - /// Invalid `CLValue`. - #[error("Invalid CLValue error: {0}")] - InvalidCLValue(String), - - /// Invalid argument. - #[error("Invalid argument '{0}': {1}")] - InvalidArgument(&'static str, String), - - /// Conflicting arguments. - #[error("Conflicting arguments passed '{context}' {args:?}")] - ConflictingArguments { - /// Contextual text, such as callsite. - context: &'static str, - /// Arguments passed, with their values. - args: Vec, - }, - - /// Failed to validate response. - #[error("Invalid response: {0}")] - InvalidResponse(#[from] ValidateResponseError), - - /// Must call FFI's setup function prior to making ffi calls. - #[cfg(feature = "ffi")] - #[error("Failed to call casper_setup_client()")] - FFISetupNotCalled, - - /// Must pass valid pointer values to FFI calls. - #[cfg(feature = "ffi")] - #[error("Required argument '{0}' was null")] - FFIPtrNullButRequired(&'static str), -} - -impl From for Error { - fn from(error: ToBytesError) -> Self { - Error::ToBytesError(error) - } -} - -impl From for Error { - fn from(error: CLValueError) -> Self { - match error { - CLValueError::Serialization(bytesrepr_error) => bytesrepr_error.into(), - CLValueError::Type(type_mismatch) => Error::InvalidCLValue(type_mismatch.to_string()), - } - } -} diff --git a/client/lib/ffi.rs b/client/lib/ffi.rs deleted file mode 100644 index 14f4842253..0000000000 --- a/client/lib/ffi.rs +++ /dev/null @@ -1,802 +0,0 @@ -//! Foreign function interfaces. - -use std::{ - convert::TryInto, - ffi::CStr, - os::raw::{c_char, c_uchar}, - slice, - sync::Mutex, -}; - -use once_cell::sync::Lazy; -use tokio::runtime; - -use super::error::{Error, Result}; - -static LAST_ERROR: Lazy>> = Lazy::new(|| Mutex::new(None)); -static RUNTIME: Lazy>> = Lazy::new(|| Mutex::new(None)); - -fn set_last_error(error: Error) { - let last_error = &mut *LAST_ERROR.lock().expect("should lock"); - *last_error = Some(error) -} - -/// FFI representation of [super::Error](super::Error) -/// -/// The full error can be extracted with get_last_error. -/// See [super::Error](super::Error) for more details on what these mean. -#[allow(non_snake_case, non_camel_case_types, missing_docs)] -#[repr(C)] -pub enum casper_error_t { - CASPER_SUCCESS = 0, - CASPER_FAILED_TO_PARSE_KEY = -1, - CASPER_FAILED_TO_PARSE_UREF = -2, - CASPER_FAILED_TO_PARSE_INT = -3, - CASPER_FAILED_TO_PARSE_TIME_DIFF = -4, - CASPER_FAILED_TO_PARSE_TIMESTAMP = -5, - CASPER_FAILED_TO_PARSE_UINT = -6, - CASPER_FAILED_TO_GET_RESPONSE = -7, - CASPER_FAILED_TO_PARSE_RESPONSE = -8, - CASPER_FILE_ALREADY_EXISTS = -9, - CASPER_UNSUPPORTED_ALGORITHM = -10, - CASPER_REPSONSE_IS_ERROR = -11, - CASPER_INVALID_JSON = -12, - CASPER_INVALID_RPC_RESPONSE = -13, - CASPER_FAILED_SENDING = -14, - CASPER_IO_ERROR = -15, - CASPER_TO_BYTES_ERROR = -16, - CASPER_CRYPTO_ERROR = -17, - CASPER_INVALID_CL_VALUE = -18, - CASPER_INVALID_ARGUMENT = -19, - CASPER_INVALID_RESPONSE = -20, - CASPER_FFI_SETUP_NOT_CALLED = -21, - CASPER_FFI_PTR_NULL_BUT_REQUIRED = -22, - CASPER_CONFLICTING_ARGUMENTS = -23, -} - -trait AsFFIError { - fn as_ffi_error(&self) -> casper_error_t; -} - -impl AsFFIError for Error { - fn as_ffi_error(&self) -> casper_error_t { - match self { - Error::FailedToParseKey => casper_error_t::CASPER_FAILED_TO_PARSE_KEY, - Error::FailedToParseURef(_, _) => casper_error_t::CASPER_FAILED_TO_PARSE_UREF, - Error::FailedToParseInt(_, _) => casper_error_t::CASPER_FAILED_TO_PARSE_INT, - Error::FailedToParseTimeDiff(_, _) => casper_error_t::CASPER_FAILED_TO_PARSE_TIME_DIFF, - Error::FailedToParseTimestamp(_, _) => casper_error_t::CASPER_FAILED_TO_PARSE_TIMESTAMP, - Error::FailedToParseUint(_, _) => casper_error_t::CASPER_FAILED_TO_PARSE_UINT, - Error::FailedToGetResponse(_) => casper_error_t::CASPER_FAILED_TO_GET_RESPONSE, - Error::FailedToParseResponse(_) => casper_error_t::CASPER_FAILED_TO_PARSE_RESPONSE, - Error::FileAlreadyExists(_) => casper_error_t::CASPER_FILE_ALREADY_EXISTS, - Error::UnsupportedAlgorithm(_) => casper_error_t::CASPER_UNSUPPORTED_ALGORITHM, - Error::ResponseIsError(_) => casper_error_t::CASPER_REPSONSE_IS_ERROR, - Error::InvalidJson(_) => casper_error_t::CASPER_INVALID_JSON, - Error::InvalidRpcResponse(_) => casper_error_t::CASPER_INVALID_RPC_RESPONSE, - Error::FailedSending(_) => casper_error_t::CASPER_FAILED_SENDING, - Error::IoError { .. } => casper_error_t::CASPER_IO_ERROR, - Error::ToBytesError(_) => casper_error_t::CASPER_TO_BYTES_ERROR, - Error::CryptoError { .. } => casper_error_t::CASPER_CRYPTO_ERROR, - Error::InvalidCLValue(_) => casper_error_t::CASPER_INVALID_CL_VALUE, - Error::InvalidArgument(_, _) => casper_error_t::CASPER_INVALID_ARGUMENT, - Error::InvalidResponse(_) => casper_error_t::CASPER_INVALID_RESPONSE, - Error::FFISetupNotCalled => casper_error_t::CASPER_FFI_SETUP_NOT_CALLED, - Error::FFIPtrNullButRequired(_) => casper_error_t::CASPER_FFI_PTR_NULL_BUT_REQUIRED, - Error::ConflictingArguments { .. } => casper_error_t::CASPER_CONFLICTING_ARGUMENTS, - } - } -} - -/// Private macro for parsing arguments from c strings, (const char *, or *const c_char in rust -/// terms). The sad path contract here is that we indicate there was an error by returning `false`, -/// then we store the argument -name- as an Error::InvalidArgument in LAST_ERROR. The happy path is -/// left up to callsites to define. -macro_rules! r#try_unsafe_arg { - ($arg:expr) => {{ - let result = unsafe_str_arg($arg, stringify!($arg)); - try_unwrap_result!(result) - }}; -} - -/// Private macro for unwrapping a result value or setting an appropriate error and returning -/// early with `false` to indicate it's existence. -macro_rules! r#try_unwrap_result { - ($result:expr) => { - match $result { - Ok(value) => value, - Err(error) => { - let error_code = AsFFIError::as_ffi_error(&error); - set_last_error(error); - return error_code; - } - } - }; -} - -/// Private macro for unwrapping an optional value or setting an appropriate error and returning -/// early with `false` to indicate it's existence. -macro_rules! r#try_unwrap_option { - ($arg:expr, or_else => $err:expr) => { - match $arg { - Some(value) => value, - None => { - let err_code = $err.as_ffi_error(); - set_last_error($err); - return err_code; - } - } - }; -} - -/// Private macro for unwrapping our internal json-rpcs or, optionally, storing the error in -/// LAST_ERROR and returning `false` to indicate that an error has occurred. Similar to -/// `try_unsafe_arg!`, this handles the sad path, and the happy path is left up to callsites. -macro_rules! r#try_unwrap_rpc { - ($rpc:expr) => {{ - let rpc = try_unwrap_result!($rpc.map_err(Into::into)); - let rpc_result = try_unwrap_option!(rpc.get_result(), or_else => { - let rpc_err = rpc.get_error().expect("should be error"); - Error::ResponseIsError(rpc_err.to_owned()) - }); - try_unwrap_result!(serde_json::to_string(&rpc_result).map_err(Into::into)) - }}; -} - -/// Private macro to wrap TryInto implementing types with a human-readable error message describing -/// the field name at the callsite. -macro_rules! r#try_arg_into { - ($arg:expr) => {{ - try_unwrap_result!(unsafe_try_into($arg, stringify!($arg))) - }}; -} - -fn unsafe_str_arg(arg: *const c_char, arg_name: &'static str) -> Result<&'static str> { - unsafe { - // Strings are never required to be passed at this level, instead we return "" if the ptr == - // null and let the library deal with parsing values. - if arg.is_null() { - return Ok(Default::default()); - } - CStr::from_ptr(arg).to_str() - } - .map_err(|error| { - Error::InvalidArgument( - arg_name, - format!( - "invalid utf8 value passed for arg '{}': {:?}", - stringify!($arg), - error, - ), - ) - }) -} - -fn unsafe_vec_of_str_arg( - arg: *const *const c_char, - len: usize, - arg_name: &'static str, -) -> Result> { - let slice = unsafe { slice::from_raw_parts(arg, len) }; - let mut vec = Vec::with_capacity(len); - for bytes in slice { - // While null-ptr strings are usually allowed as single arguments, an array of strings - // required to not contain null values. - if bytes.is_null() { - return Err(Error::FFIPtrNullButRequired(arg_name)); - } - vec.push(unsafe_str_arg(*bytes, arg_name)?); - } - Ok(vec) -} - -/// Helper to call TryInto::try_into on a *const ptr of our rust type implementing it. -/// This is used for -fn unsafe_try_into(value: *const I, field_name: &'static str) -> Result -where - I: Clone, - I: TryInto, -{ - if value.is_null() { - Err(Error::FFIPtrNullButRequired(field_name)) - } else { - let value: T = unsafe { (*value).clone().try_into()? }; - Ok(value) - } -} - -/// Copy the contents of `strval` to a user-provided buffer. -/// -/// `strval` is the rust `&str` utf8 string to copy. -/// `buf` is the caller-provided buffer to write into. -/// `len` is the size of the buffer `buf` in bytes. -/// - returns the number of bytes written to `buf`. -fn copy_str_to_buf(strval: &str, buf: *mut c_uchar, len: usize) -> usize { - let mut_buf = unsafe { slice::from_raw_parts_mut::(buf, len) }; - let lesser_len = len.min(strval.len()); - let strval = strval.as_bytes(); - mut_buf[0..lesser_len].clone_from_slice(&strval[0..lesser_len]); - len -} - -/// Perform needed setup for the client library. -#[no_mangle] -pub extern "C" fn casper_setup_client() { - let mut runtime = RUNTIME.lock().expect("should lock"); - // TODO: runtime opts - *runtime = Some(runtime::Runtime::new().expect("should create tokio runtime")); -} - -/// Perform a clean shutdown of resources gathered in the client library. -#[no_mangle] -pub extern "C" fn casper_shutdown_client() { - let mut runtime = RUNTIME.lock().expect("should lock"); - *runtime = None; // triggers drop on our runtime -} - -/// Gets the last error copied to the provided buffer. -/// -/// * `buf` is the buffer where the result will be stored. -/// * `len` is the length of the `buf` buffer in bytes. -/// - returns the number of bytes written to `buf`. -#[no_mangle] -pub extern "C" fn casper_get_last_error(buf: *mut c_uchar, len: usize) -> usize { - if let Some(last_err) = &*LAST_ERROR.lock().expect("should lock") { - let err_str = format!("{}", last_err); - return copy_str_to_buf(&err_str, buf, len); - } - 0 -} - -/// Creates a `Deploy` and sends it to the network for execution. -/// -/// See [super::put_deploy](super::put_deploy) for more details -#[no_mangle] -pub extern "C" fn casper_put_deploy( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - deploy_params: *const casper_deploy_params_t, - session_params: *const casper_session_params_t, - payment_params: *const casper_payment_params_t, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let deploy_params = try_arg_into!(deploy_params); - let session_params = try_arg_into!(session_params); - let payment_params = try_arg_into!(payment_params); - runtime.block_on(async move { - let result = super::put_deploy( - maybe_rpc_id, - node_address, - verbosity_level, - deploy_params, - session_params, - payment_params, - ); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Creates a `Deploy` and outputs it to a file or stdout. -/// -/// See [super::make_deploy](super::make_deploy) for more details -#[no_mangle] -pub extern "C" fn casper_make_deploy( - maybe_output_path: *const c_char, - deploy_params: *const casper_deploy_params_t, - session_params: *const casper_session_params_t, - payment_params: *const casper_payment_params_t, -) -> casper_error_t { - let maybe_output_path = try_unsafe_arg!(maybe_output_path); - let deploy_params = try_arg_into!(deploy_params); - let session_params = try_arg_into!(session_params); - let payment_params = try_arg_into!(payment_params); - let result = super::make_deploy( - maybe_output_path, - deploy_params, - session_params, - payment_params, - ); - try_unwrap_result!(result); - casper_error_t::CASPER_SUCCESS -} - -/// Reads a previously-saved `Deploy` from a file, cryptographically signs it, and outputs it to a -/// file or stdout. -/// -/// See [super::sign_deploy_file](super::sign_deploy_file) for more details. -#[no_mangle] -pub extern "C" fn casper_sign_deploy_file( - input_path: *const c_char, - secret_key: *const c_char, - maybe_output_path: *const c_char, -) -> casper_error_t { - let input_path = try_unsafe_arg!(input_path); - let secret_key = try_unsafe_arg!(secret_key); - let maybe_output_path = try_unsafe_arg!(maybe_output_path); - let result = super::sign_deploy_file(input_path, secret_key, maybe_output_path); - try_unwrap_result!(result); - casper_error_t::CASPER_SUCCESS -} - -/// Reads a previously-saved `Deploy` from a file and sends it to the network for execution. -/// -/// See [super::send_deploy_file](super::send_deploy_file) for more details. -#[no_mangle] -pub extern "C" fn casper_send_deploy_file( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - input_path: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let input_path = try_unsafe_arg!(input_path); - runtime.block_on(async move { - let result = - super::send_deploy_file(maybe_rpc_id, node_address, verbosity_level, input_path); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Transfers funds between purses. -/// -/// See [super::transfer](super::transfer) for more details -#[no_mangle] -pub extern "C" fn casper_transfer( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - amount: *const c_char, - maybe_target_account: *const c_char, - maybe_id: *const c_char, - deploy_params: *const casper_deploy_params_t, - payment_params: *const casper_payment_params_t, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let amount = try_unsafe_arg!(amount); - let maybe_target_account = try_unsafe_arg!(maybe_target_account); - let maybe_id = try_unsafe_arg!(maybe_id); - let deploy_params = try_arg_into!(deploy_params); - let payment_params = try_arg_into!(payment_params); - runtime.block_on(async move { - let result = super::transfer( - maybe_rpc_id, - node_address, - verbosity_level, - amount, - maybe_target_account, - maybe_id, - deploy_params, - payment_params, - ); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves a `Deploy` from the network. -/// -/// See [super::get_deploy](super::get_deploy) for more details. -#[no_mangle] -pub extern "C" fn casper_get_deploy( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - deploy_hash: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let deploy_hash = try_unsafe_arg!(deploy_hash); - runtime.block_on(async move { - let result = super::get_deploy(maybe_rpc_id, node_address, verbosity_level, deploy_hash); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves a `Block` from the network. -/// -/// See [super::get_block](super::get_block) for more details. -#[no_mangle] -pub extern "C" fn casper_get_block( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - maybe_block_id: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let maybe_block_id = try_unsafe_arg!(maybe_block_id); - runtime.block_on(async move { - let result = super::get_block(maybe_rpc_id, node_address, verbosity_level, maybe_block_id); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves all `Transfer` items for a `Block` from the network. -/// -/// See [super::casper_get_block_transfers](super::casper_get_block_transfers) for more details. -#[no_mangle] -pub extern "C" fn casper_get_block_transfers( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - maybe_block_id: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let maybe_block_id = try_unsafe_arg!(maybe_block_id); - runtime.block_on(async move { - let result = - super::get_block_transfers(maybe_rpc_id, node_address, verbosity_level, maybe_block_id); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves a state root hash at a given `Block`. -/// -/// See [super::get_state_root_hash](super::get_state_root_hash) for more details. -#[no_mangle] -pub extern "C" fn casper_get_state_root_hash( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - maybe_block_id: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let maybe_block_id = try_unsafe_arg!(maybe_block_id); - runtime.block_on(async move { - let result = - super::get_state_root_hash(maybe_rpc_id, node_address, verbosity_level, maybe_block_id); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves a stored value from the network. -/// -/// See [super::get_item](super::get_item) for more details. -#[no_mangle] -pub extern "C" fn casper_get_item( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - state_root_hash: *const c_char, - key: *const c_char, - path: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let state_root_hash = try_unsafe_arg!(state_root_hash); - let key = try_unsafe_arg!(key); - let path = try_unsafe_arg!(path); - runtime.block_on(async move { - let result = super::get_item( - maybe_rpc_id, - node_address, - verbosity_level, - state_root_hash, - key, - path, - ); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves a purse's balance from the network. -/// -/// See [super::get_balance](super::get_balance) for more details. -#[no_mangle] -pub extern "C" fn casper_get_balance( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - state_root_hash: *const c_char, - purse: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let state_root_hash = try_unsafe_arg!(state_root_hash); - let purse = try_unsafe_arg!(purse); - runtime.block_on(async move { - let result = super::get_balance( - maybe_rpc_id, - node_address, - verbosity_level, - state_root_hash, - purse, - ); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves era information from the network. -/// -/// See [super::get_era_info_by_switch_block](super::get_era_info_by_switch_block) for more details. -#[no_mangle] -pub extern "C" fn casper_get_era_info_by_switch_block( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - maybe_block_id: *const c_char, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - let maybe_block_id = try_unsafe_arg!(maybe_block_id); - runtime.block_on(async move { - let result = super::get_era_info_by_switch_block( - maybe_rpc_id, - node_address, - verbosity_level, - maybe_block_id, - ); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Retrieves the bids and validators as of the most recently added `Block`. -/// -/// See [super::get_auction_info](super::get_auction_info) for more details. -#[no_mangle] -pub extern "C" fn casper_get_auction_info( - maybe_rpc_id: *const c_char, - node_address: *const c_char, - verbosity_level: u64, - response_buf: *mut c_uchar, - response_buf_len: usize, -) -> casper_error_t { - let mut runtime = RUNTIME.lock().expect("should lock"); - let runtime = try_unwrap_option!(&mut *runtime, or_else => Error::FFISetupNotCalled); - let maybe_rpc_id = try_unsafe_arg!(maybe_rpc_id); - let node_address = try_unsafe_arg!(node_address); - runtime.block_on(async move { - let result = super::get_auction_info(maybe_rpc_id, node_address, verbosity_level); - let response = try_unwrap_rpc!(result); - copy_str_to_buf(&response, response_buf, response_buf_len); - casper_error_t::CASPER_SUCCESS - }) -} - -/// Container for `Deploy` construction options. -/// -/// See [DeployStrParams](super::DeployStrParams) for more info. -#[allow(non_snake_case)] -#[repr(C)] -#[derive(Clone)] -pub struct casper_deploy_params_t { - secret_key: *const c_char, - timestamp: *const c_char, - ttl: *const c_char, - gas_price: *const c_char, - dependencies: *const *const c_char, - dependencies_len: usize, - chain_name: *const c_char, -} - -impl TryInto> for casper_deploy_params_t { - type Error = Error; - - fn try_into(self) -> Result> { - let secret_key = unsafe_str_arg(self.secret_key, "casper_deploy_params_t.secret_key")?; - let timestamp = unsafe_str_arg(self.timestamp, "casper_deploy_params_t.timestamp")?; - let ttl = unsafe_str_arg(self.ttl, "casper_deploy_params_t.ttl")?; - let gas_price = unsafe_str_arg(self.gas_price, "casper_deploy_params_t.gas_price")?; - let chain_name = unsafe_str_arg(self.chain_name, "casper_deploy_params_t.chain_name")?; - let dependencies = unsafe_vec_of_str_arg( - self.dependencies, - self.dependencies_len, - "casper_deploy_params_t.dependencies", - )?; - Ok(super::DeployStrParams { - secret_key, - timestamp, - ttl, - gas_price, - chain_name, - dependencies, - }) - } -} - -/// Container for `Payment` construction options. -/// -/// See [PaymentStrParams](super::PaymentStrParams) for more info. -#[allow(non_snake_case)] -#[repr(C)] -#[derive(Clone)] -pub struct casper_payment_params_t { - payment_amount: *const c_char, - payment_hash: *const c_char, - payment_name: *const c_char, - payment_package_hash: *const c_char, - payment_package_name: *const c_char, - payment_path: *const c_char, - payment_args_simple: *const *const c_char, - payment_args_simple_len: usize, - payment_args_complex: *const c_char, - payment_version: *const c_char, - payment_entry_point: *const c_char, -} - -impl TryInto> for casper_payment_params_t { - type Error = Error; - - fn try_into(self) -> Result> { - let payment_amount = unsafe_str_arg( - self.payment_amount, - "casper_payment_params_t.payment_amount", - )?; - let payment_hash = - unsafe_str_arg(self.payment_hash, "casper_payment_params_t.payment_hash")?; - let payment_name = - unsafe_str_arg(self.payment_name, "casper_payment_params_t.payment_name")?; - let payment_package_hash = unsafe_str_arg( - self.payment_package_hash, - "casper_payment_params_t.payment_package_hash", - )?; - let payment_package_name = unsafe_str_arg( - self.payment_package_name, - "casper_payment_params_t.payment_package_name", - )?; - let payment_path = - unsafe_str_arg(self.payment_path, "casper_payment_params_t.payment_path")?; - let payment_args_simple = unsafe_vec_of_str_arg( - self.payment_args_simple, - self.payment_args_simple_len, - "caser_payment_params_t.payment_args_simple", - )?; - let payment_args_complex = unsafe_str_arg( - self.payment_args_complex, - "casper_payment_params_t.payment_args_complex", - )?; - let payment_version = unsafe_str_arg( - self.payment_version, - "casper_payment_params_t.payment_version", - )?; - let payment_entry_point = unsafe_str_arg( - self.payment_entry_point, - "casper_payment_params_t.payment_entry_point", - )?; - Ok(super::PaymentStrParams { - payment_amount, - payment_hash, - payment_name, - payment_package_hash, - payment_package_name, - payment_path, - payment_args_simple, - payment_args_complex, - payment_version, - payment_entry_point, - }) - } -} - -/// Container for `Session` construction options. -/// -/// See [SessionStrParams](super::SessionStrParams) for more info. -#[allow(non_snake_case)] -#[repr(C)] -#[derive(Clone)] -pub struct casper_session_params_t { - session_hash: *const c_char, - session_name: *const c_char, - session_package_hash: *const c_char, - session_package_name: *const c_char, - session_path: *const c_char, - session_args_simple: *const *const c_char, - session_args_simple_len: usize, - session_args_complex: *const c_char, - session_version: *const c_char, - session_entry_point: *const c_char, -} - -impl TryInto> for casper_session_params_t { - type Error = Error; - - fn try_into(self) -> Result> { - let session_hash = - unsafe_str_arg(self.session_hash, "casper_session_params_t.session_hash")?; - let session_name = - unsafe_str_arg(self.session_name, "casper_session_params_t.session_name")?; - let session_package_hash = unsafe_str_arg( - self.session_package_hash, - "casper_session_params_t.sessio_package_hash", - )?; - let session_package_name = unsafe_str_arg( - self.session_package_name, - "casper_session_params_t.session_package_name", - )?; - let session_path = - unsafe_str_arg(self.session_path, "casper_session_params_t.session_path")?; - let session_args_simple = unsafe_vec_of_str_arg( - self.session_args_simple, - self.session_args_simple_len, - "casper_session_params_t.session_args_simple", - )?; - let session_args_complex = unsafe_str_arg( - self.session_args_complex, - "casper_session_params_t.session_args_complex", - )?; - let session_version = unsafe_str_arg( - self.session_version, - "casper_session_params_t.session_version", - )?; - let session_entry_point = unsafe_str_arg( - self.session_entry_point, - "casper_session_params_t.session_entry_point", - )?; - Ok(super::SessionStrParams { - session_hash, - session_name, - session_package_hash, - session_package_name, - session_path, - session_args_simple, - session_args_complex, - session_version, - session_entry_point, - }) - } -} diff --git a/client/lib/keygen.rs b/client/lib/keygen.rs deleted file mode 100644 index 95641df132..0000000000 --- a/client/lib/keygen.rs +++ /dev/null @@ -1,97 +0,0 @@ -//! Cryptographic key generation. - -use std::{fs, path::Path}; - -use casper_node::crypto::AsymmetricKeyExt; -use casper_types::{AsymmetricType, PublicKey, SecretKey}; - -use crate::error::{Error, Result}; - -/// Default filename for the PEM-encoded secret key file. -pub const SECRET_KEY_PEM: &str = "secret_key.pem"; -/// Default filename for the hex-encoded public key file. -pub const PUBLIC_KEY_HEX: &str = "public_key_hex"; -/// Default filename for the PEM-encoded public key file. -pub const PUBLIC_KEY_PEM: &str = "public_key.pem"; - -/// List of keygen related filenames: "secret_key.pem", "public_key.pem" and "public_key_hex". -pub const FILES: [&str; 3] = [SECRET_KEY_PEM, PUBLIC_KEY_PEM, PUBLIC_KEY_HEX]; - -/// Name of Ed25519 algorithm. -pub const ED25519: &str = "Ed25519"; -/// Name of secp256k1 algorithm. -pub const SECP256K1: &str = "secp256k1"; - -/// Generates a new asymmetric key pair using the specified algorithm, and writes them to files in -/// the specified directory. -/// -/// The secret key is written to "secret_key.pem", and the public key is written to "public_key.pem" -/// and also in hex format to "public_key_hex". For the hex format, the algorithm's tag is -/// prepended, e.g. `01` for Ed25519, `02` for secp256k1. -/// -/// If `force` is true, existing files will be overwritten. If `force` is false and any of the -/// files exist, [`Error::FileAlreadyExists`](../enum.Error.html#variant.FileAlreadyExists) is -/// returned and no files are written. -pub fn generate_files(output_dir: &str, algorithm: &str, force: bool) -> Result<()> { - if output_dir.is_empty() { - return Err(Error::InvalidArgument( - "generate_files", - "empty output_dir provided, must be a valid path".to_string(), - )); - } - let _ = fs::create_dir_all(output_dir).map_err(move |error| Error::IoError { - context: format!("unable to create directory at '{}'", output_dir), - error, - })?; - let output_dir = Path::new(output_dir) - .canonicalize() - .map_err(|error| Error::IoError { - context: format!("unable get canonical path at '{}'", output_dir), - error, - })?; - - if !force { - for file in FILES.iter().map(|filename| output_dir.join(filename)) { - if file.exists() { - return Err(Error::FileAlreadyExists(file)); - } - } - } - - let secret_key = if algorithm.eq_ignore_ascii_case(ED25519) { - SecretKey::generate_ed25519().unwrap() - } else if algorithm.eq_ignore_ascii_case(SECP256K1) { - SecretKey::generate_secp256k1().unwrap() - } else { - return Err(Error::UnsupportedAlgorithm(algorithm.to_string())); - }; - - let public_key = PublicKey::from(&secret_key); - - let public_key_hex_path = output_dir.join(PUBLIC_KEY_HEX); - fs::write(public_key_hex_path, public_key.to_hex()).map_err(|error| Error::IoError { - context: format!( - "unable to write public key hex file at {:?}", - output_dir.join(PUBLIC_KEY_HEX) - ), - error, - })?; - - let secret_key_path = output_dir.join(SECRET_KEY_PEM); - secret_key - .to_file(&secret_key_path) - .map_err(|error| Error::CryptoError { - context: "secret_key", - error, - })?; - - let public_key_path = output_dir.join(PUBLIC_KEY_PEM); - public_key - .to_file(&public_key_path) - .map_err(|error| Error::CryptoError { - context: "public_key", - error, - })?; - - Ok(()) -} diff --git a/client/lib/lib.rs b/client/lib/lib.rs deleted file mode 100644 index 4de330f29c..0000000000 --- a/client/lib/lib.rs +++ /dev/null @@ -1,1252 +0,0 @@ -//! # Casper node client library -#![doc( - html_root_url = "https://docs.rs/casper-client/0.1.0", - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) -)] -#![warn( - missing_docs, - trivial_casts, - trivial_numeric_casts, - unused_qualifications -)] - -mod cl_type; -mod deploy; -mod error; -#[cfg(feature = "ffi")] -pub mod ffi; -pub mod keygen; -mod parsing; -mod rpc; -mod validation; - -use std::{convert::TryInto, fs::File}; - -use jsonrpc_lite::JsonRpc; -use serde::Serialize; - -use casper_execution_engine::core::engine_state::ExecutableDeployItem; -use casper_node::types::Deploy; -use casper_types::{UIntParseError, U512}; - -pub use cl_type::help; -pub use deploy::ListDeploysResult; -use deploy::{DeployExt, DeployParams}; -pub use error::Error; -use error::Result; -use parsing::none_if_empty; -use rpc::{RpcCall, TransferTarget}; -pub use validation::ValidateResponseError; - -/// Creates a `Deploy` and sends it to the network for execution. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `deploy` contains deploy-related options for this `Deploy`. See -/// [`DeployStrParams`](struct.DeployStrParams.html) for more details. -/// * `session` contains session-related options for this `Deploy`. See -/// [`SessionStrParams`](struct.SessionStrParams.html) for more details. -/// * `payment` contains payment-related options for this `Deploy`. See -/// [`PaymentStrParams`](struct.PaymentStrParams.html) for more details. -pub fn put_deploy( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - deploy: DeployStrParams<'_>, - session: SessionStrParams<'_>, - payment: PaymentStrParams<'_>, -) -> Result { - let deploy = Deploy::with_payment_and_session( - deploy.try_into()?, - payment.try_into()?, - session.try_into()?, - ); - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).put_deploy(deploy) -} - -/// Creates a `Deploy` and outputs it to a file or stdout. -/// -/// As a file, the `Deploy` can subsequently be signed by other parties using -/// [`sign_deploy_file()`](fn.sign_deploy_file.html) and then sent to the network for execution -/// using [`send_deploy_file()`](fn.send_deploy_file.html). -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * `maybe_output_path` specifies the output file, or if empty, will print it to `stdout`. If the -/// file already exists, it will be overwritten. -/// * `deploy` contains deploy-related options for this `Deploy`. See -/// [`DeployStrParams`](struct.DeployStrParams.html) for more details. -/// * `session` contains session-related options for this `Deploy`. See -/// [`SessionStrParams`](struct.SessionStrParams.html) for more details. -/// * `payment` contains payment-related options for this `Deploy`. See -/// [`PaymentStrParams`](struct.PaymentStrParams.html) for more details. -pub fn make_deploy( - maybe_output_path: &str, - deploy: DeployStrParams<'_>, - session: SessionStrParams<'_>, - payment: PaymentStrParams<'_>, -) -> Result<()> { - let output = deploy::output_or_stdout(none_if_empty(maybe_output_path)).map_err(|error| { - Error::IoError { - context: format!( - "unable to get file or stdout, provided '{:?}'", - maybe_output_path - ), - error, - } - })?; - - Deploy::with_payment_and_session(deploy.try_into()?, payment.try_into()?, session.try_into()?) - .write_deploy(output) -} - -/// Reads a previously-saved `Deploy` from a file, cryptographically signs it, and outputs it to a -/// file or stdout. -/// -/// * `input_path` specifies the path to the previously-saved `Deploy` file. -/// * `secret_key` specifies the path to the secret key with which to sign the `Deploy`. -/// * `maybe_output_path` specifies the output file, or if empty, will print it to `stdout`. If the -/// file already exists, it will be overwritten. -pub fn sign_deploy_file(input_path: &str, secret_key: &str, maybe_output_path: &str) -> Result<()> { - let secret_key = parsing::secret_key(secret_key)?; - let maybe_output_path = parsing::output(maybe_output_path); - - let output = deploy::output_or_stdout(maybe_output_path).map_err(|error| Error::IoError { - context: format!( - "unable to get file or stdout, provided '{:?}'", - maybe_output_path - ), - error, - })?; - - let input = File::open(&input_path).map_err(|error| Error::IoError { - context: format!("unable to read deploy file at '{}'", input_path), - error, - })?; - - Deploy::sign_and_write_deploy(input, secret_key, output) -} - -/// Reads a previously-saved `Deploy` from a file and sends it to the network for execution. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `input_path` specifies the path to the previously-saved `Deploy` file. -pub fn send_deploy_file( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - input_path: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).send_deploy_file(input_path) -} - -/// Transfers funds between purses. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `amount` specifies the amount to be transferred. -/// * `maybe_source_purse` is the purse `URef` from which the funds will be transferred, formatted -/// as e.g. `uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-007`. If it is -/// an empty string, the network will use the main purse from the sender's account. -/// * `maybe_target_purse` is the purse `URef` into which the funds will be transferred, formatted -/// as e.g. `uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-007`. If it is -/// an empty string, `maybe_target_account` must be specified instead. These options are -/// incompatible: exactly one must be empty and the other valid. -/// * `maybe_target_account` is the account `PublicKey` into which the funds will be transferred, -/// formatted as a hex-encoded string. The account's main purse will receive the funds. If it is -/// an empty string, `maybe_target_purse` must be specified instead. These options are -/// incompatible: exactly one must be empty and the other valid. -/// * `deploy` contains deploy-related options for this `Deploy`. See -/// [`DeployStrParams`](struct.DeployStrParams.html) for more details. -/// * `payment` contains payment-related options for this `Deploy`. See -/// [`PaymentStrParams`](struct.PaymentStrParams.html) for more details. -#[allow(clippy::too_many_arguments)] -pub fn transfer( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - amount: &str, - maybe_target_account: &str, - maybe_id: &str, - deploy_params: DeployStrParams<'_>, - payment_params: PaymentStrParams<'_>, -) -> Result { - let target = parsing::get_transfer_target(maybe_target_account)?; - - let amount = U512::from_dec_str(amount) - .map_err(|err| Error::FailedToParseUint("amount", UIntParseError::FromDecStr(err)))?; - - let source_purse = None; - - let maybe_id = parsing::transfer_id(maybe_id)?; - - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).transfer( - amount, - source_purse, - target, - maybe_id, - deploy_params.try_into()?, - payment_params.try_into()?, - ) -} - -/// Retrieves a `Deploy` from the network. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `deploy_hash` must be a hex-encoded, 32-byte hash digest. -pub fn get_deploy( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - deploy_hash: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_deploy(deploy_hash) -} - -/// Retrieves a `Block` from the network. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `maybe_block_id` must be a hex-encoded, 32-byte hash digest or a `u64` representing the -/// `Block` height or empty. If empty, the latest `Block` will be retrieved. -pub fn get_block( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - maybe_block_id: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_block(maybe_block_id) -} - -/// Retrieves all `Transfer` items for a `Block` from the network. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `maybe_block_id` must be a hex-encoded, 32-byte hash digest or a `u64` representing the -/// `Block` height or empty. If empty, the latest `Block` transfers will be retrieved. -pub fn get_block_transfers( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - maybe_block_id: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_block_transfers(maybe_block_id) -} - -/// Retrieves a state root hash at a given `Block`. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `maybe_block_id` must be a hex-encoded, 32-byte hash digest or a `u64` representing the -/// `Block` height or empty. If empty, the latest `Block` will be used. -pub fn get_state_root_hash( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - maybe_block_id: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_state_root_hash(maybe_block_id) -} - -/// Retrieves a stored value from the network. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `state_root_hash` must be a hex-encoded, 32-byte hash digest. -/// * `key` must be a formatted [`PublicKey`](https://docs.rs/casper-node/latest/casper-node/crypto/asymmetric_key/enum.PublicKey.html) -/// or [`Key`](https://docs.rs/casper-types/latest/casper-types/enum.PublicKey.html). This will -/// take one of the following forms: -/// ```text -/// 01c9e33693951aaac23c49bee44ad6f863eedcd38c084a3a8f11237716a3df9c2c # PublicKey -/// account-hash-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 # Key::Account -/// hash-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 # Key::Hash -/// uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-007 # Key::URef -/// transfer-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 # Key::Transfer -/// deploy-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20 # Key::DeployInfo -/// ``` -/// * `path` is comprised of components starting from the `key`, separated by `/`s. -pub fn get_item( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - state_root_hash: &str, - key: &str, - path: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_item(state_root_hash, key, path) -} - -/// Retrieves a purse's balance from the network. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `state_root_hash` must be a hex-encoded, 32-byte hash digest. -/// * `purse` is a URef, formatted as e.g. -/// ```text -/// uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-007 -/// ``` -pub fn get_balance( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - state_root_hash: &str, - purse: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_balance(state_root_hash, purse) -} - -/// Retrieves era information from the network. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -/// * `maybe_block_id` must be a hex-encoded, 32-byte hash digest or a `u64` representing the -/// `Block` height or empty. If empty, era information from the latest block will be returned if -/// available. -pub fn get_era_info_by_switch_block( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, - maybe_block_id: &str, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level) - .get_era_info_by_switch_block(maybe_block_id) -} - -/// Retrieves the bids and validators as of the most recently added `Block`. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -pub fn get_auction_info( - maybe_rpc_id: &str, - node_address: &str, - verbosity_level: u64, -) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).get_auction_info() -} - -/// Retrieves information and examples for all currently supported RPCs. -/// -/// * `maybe_rpc_id` is the JSON-RPC identifier, applied to the request and returned in the -/// response. If it can be parsed as an `i64` it will be used as a JSON integer. If empty, a -/// random `i64` will be assigned. Otherwise the provided string will be used verbatim. -/// * `node_address` is the hostname or IP and port of the node on which the HTTP service is -/// running, e.g. `"http://127.0.0.1:7777"`. -/// * When `verbosity_level` is `1`, the JSON-RPC request will be printed to `stdout` with long -/// string fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char -/// count of the field. When `verbosity_level` is greater than `1`, the request will be printed -/// to `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request -/// will not be printed to `stdout`. -pub fn list_rpcs(maybe_rpc_id: &str, node_address: &str, verbosity_level: u64) -> Result { - RpcCall::new(maybe_rpc_id, node_address, verbosity_level).list_rpcs() -} - -/// Container for `Deploy` construction options. -#[derive(Default, Debug)] -pub struct DeployStrParams<'a> { - /// Path to secret key file. - pub secret_key: &'a str, - /// RFC3339-like formatted timestamp. e.g. `2018-02-16T00:31:37Z`. - /// - /// If `timestamp` is empty, the current time will be used. Note that timestamp is UTC, not - /// local. - /// - /// See - /// [the `humantime` docs](https://docs.rs/humantime/latest/humantime/fn.parse_rfc3339_weak.html) - /// for more information. - pub timestamp: &'a str, - /// Time that the `Deploy` will remain valid for. - /// - /// A `Deploy` can only be included in a `Block` between `timestamp` and `timestamp + ttl`. - /// Input examples: '1hr 12min', '30min 50sec', '1day'. - /// - /// See - /// [the `humantime` docs](https://docs.rs/humantime/latest/humantime/fn.parse_duration.html) - /// for more information. - pub ttl: &'a str, - /// Conversion rate between the cost of Wasm opcodes and the motes sent by the payment code. - pub gas_price: &'a str, - /// Hex-encoded `Deploy` hashes of deploys which must be executed before this one. - pub dependencies: Vec<&'a str>, - /// Name of the chain, to avoid the `Deploy` from being accidentally or maliciously included in - /// a different chain. - pub chain_name: &'a str, -} - -impl<'a> TryInto for DeployStrParams<'a> { - type Error = Error; - - fn try_into(self) -> Result { - let DeployStrParams { - secret_key, - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - } = self; - parsing::parse_deploy_params( - secret_key, - timestamp, - ttl, - gas_price, - &dependencies, - chain_name, - ) - } -} - -/// Container for payment-related arguments used while constructing a `Deploy`. -/// -/// ## `payment_args_simple` -/// -/// For methods taking `payment_args_simple`, this parameter is the payment contract arguments, in -/// the form `` or ``. -/// -/// It can only be used with the following simple `CLType`s: bool, i32, i64, u8, u32, u64, u128, -/// u256, u512, unit, string, key, account_hash, uref, public_key and `Option` of each of these. -/// -/// Example inputs are: -/// -/// ```text -/// name_01:bool='false' -/// name_02:i32='-1' -/// name_03:i64='-2' -/// name_04:u8='3' -/// name_05:u32='4' -/// name_06:u64='5' -/// name_07:u128='6' -/// name_08:u256='7' -/// name_09:u512='8' -/// name_10:unit='' -/// name_11:string='a value' -/// key_account_name:key='account-hash-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20' -/// key_hash_name:key='hash-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20' -/// key_uref_name:key='uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-000' -/// account_hash_name:account_hash='account-hash-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20' -/// uref_name:uref='uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-007' -/// public_key_name:public_key='0119bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1' -/// ``` -/// -/// For optional values of any these types, prefix the type with "opt_" and use the term "null" -/// without quotes to specify a None value: -/// -/// ```text -/// name_01:opt_bool='true' # Some(true) -/// name_02:opt_bool='false' # Some(false) -/// name_03:opt_bool=null # None -/// name_04:opt_i32='-1' # Some(-1) -/// name_05:opt_i32=null # None -/// name_06:opt_unit='' # Some(()) -/// name_07:opt_unit=null # None -/// name_08:opt_string='a value' # Some("a value".to_string()) -/// name_09:opt_string='null' # Some("null".to_string()) -/// name_10:opt_string=null # None -/// ``` -/// -/// To get a list of supported types, call -/// [`supported_cl_type_list()`](help/fn.supported_cl_type_list.html). To get this list of examples -/// for supported types, call -/// [`supported_cl_type_examples()`](help/fn.supported_cl_type_examples.html). -/// -/// ## `payment_args_complex` -/// -/// For methods taking `payment_args_complex`, this parameter is the payment contract arguments, in -/// the form of a `ToBytes`-encoded file. -/// -/// --- -/// -/// **Note** while multiple payment args can be specified for a single payment code instance, only -/// one of `payment_args_simple` and `payment_args_complex` may be used. -#[derive(Default)] -pub struct PaymentStrParams<'a> { - payment_amount: &'a str, - payment_hash: &'a str, - payment_name: &'a str, - payment_package_hash: &'a str, - payment_package_name: &'a str, - payment_path: &'a str, - payment_args_simple: Vec<&'a str>, - payment_args_complex: &'a str, - payment_version: &'a str, - payment_entry_point: &'a str, -} - -impl<'a> TryInto for PaymentStrParams<'a> { - type Error = Error; - - fn try_into(self) -> Result { - let PaymentStrParams { - payment_amount, - payment_hash, - payment_name, - payment_package_hash, - payment_package_name, - payment_path, - payment_args_simple, - payment_args_complex, - payment_version, - payment_entry_point, - } = self; - - parsing::parse_payment_info( - payment_amount, - payment_hash, - payment_name, - payment_package_hash, - payment_package_name, - payment_path, - &payment_args_simple, - payment_args_complex, - payment_version, - payment_entry_point, - ) - } -} - -impl<'a> PaymentStrParams<'a> { - /// Constructs a `PaymentStrParams` using a payment smart contract file. - /// - /// * `payment_path` is the path to the compiled Wasm payment code. - /// * See the struct docs for a description of [`payment_args_simple`](#payment_args_simple) and - /// [`payment_args_complex`](#payment_args_complex). - pub fn with_path( - payment_path: &'a str, - payment_args_simple: Vec<&'a str>, - payment_args_complex: &'a str, - ) -> Self { - Self { - payment_path, - payment_args_simple, - payment_args_complex, - ..Default::default() - } - } - - /// Constructs a `PaymentStrParams` using a payment amount. - /// - /// `payment_amount` uses the standard-payment system contract rather than custom payment Wasm. - /// The value is the 'amount' arg of the standard-payment contract. - pub fn with_amount(payment_amount: &'a str) -> Self { - Self { - payment_amount, - ..Default::default() - } - } - - /// Constructs a `PaymentStrParams` using a stored contract's name. - /// - /// * `payment_name` is the name of the stored contract (associated with the executing account) - /// to be called as the payment. - /// * `payment_entry_point` is the name of the method that will be used when calling the payment - /// contract. - /// * See the struct docs for a description of [`payment_args_simple`](#payment_args_simple) and - /// [`payment_args_complex`](#payment_args_complex). - pub fn with_name( - payment_name: &'a str, - payment_entry_point: &'a str, - payment_args_simple: Vec<&'a str>, - payment_args_complex: &'a str, - ) -> Self { - Self { - payment_name, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ..Default::default() - } - } - - /// Constructs a `PaymentStrParams` using a stored contract's hex-encoded hash. - /// - /// * `payment_hash` is the hex-encoded hash of the stored contract to be called as the payment. - /// * `payment_entry_point` is the name of the method that will be used when calling the payment - /// contract. - /// * See the struct docs for a description of [`payment_args_simple`](#payment_args_simple) and - /// [`payment_args_complex`](#payment_args_complex). - pub fn with_hash( - payment_hash: &'a str, - payment_entry_point: &'a str, - payment_args_simple: Vec<&'a str>, - payment_args_complex: &'a str, - ) -> Self { - Self { - payment_hash, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ..Default::default() - } - } - - /// Constructs a `PaymentStrParams` using a stored contract's package name. - /// - /// * `payment_package_name` is the name of the stored package to be called as the payment. - /// * `payment_version` is the version of the called payment contract. The latest will be used - /// if `payment_version` is empty. - /// * `payment_entry_point` is the name of the method that will be used when calling the payment - /// contract. - /// * See the struct docs for a description of [`payment_args_simple`](#payment_args_simple) and - /// [`payment_args_complex`](#payment_args_complex). - pub fn with_package_name( - payment_package_name: &'a str, - payment_version: &'a str, - payment_entry_point: &'a str, - payment_args_simple: Vec<&'a str>, - payment_args_complex: &'a str, - ) -> Self { - Self { - payment_package_name, - payment_version, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ..Default::default() - } - } - - /// Constructs a `PaymentStrParams` using a stored contract's package hash. - /// - /// * `payment_package_hash` is the hex-encoded hash of the stored package to be called as the - /// payment. - /// * `payment_version` is the version of the called payment contract. The latest will be used - /// if `payment_version` is empty. - /// * `payment_entry_point` is the name of the method that will be used when calling the payment - /// contract. - /// * See the struct docs for a description of [`payment_args_simple`](#payment_args_simple) and - /// [`payment_args_complex`](#payment_args_complex). - pub fn with_package_hash( - payment_package_hash: &'a str, - payment_version: &'a str, - payment_entry_point: &'a str, - payment_args_simple: Vec<&'a str>, - payment_args_complex: &'a str, - ) -> Self { - Self { - payment_package_hash, - payment_version, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ..Default::default() - } - } -} - -impl<'a> TryInto for SessionStrParams<'a> { - type Error = Error; - - fn try_into(self) -> Result { - let SessionStrParams { - session_hash, - session_name, - session_package_hash, - session_package_name, - session_path, - session_args_simple, - session_args_complex, - session_version, - session_entry_point, - } = self; - - parsing::parse_session_info( - session_hash, - session_name, - session_package_hash, - session_package_name, - session_path, - &session_args_simple, - session_args_complex, - session_version, - session_entry_point, - ) - } -} - -/// Container for session-related arguments used while constructing a `Deploy`. -/// -/// ## `session_args_simple` -/// -/// For methods taking `session_args_simple`, this parameter is the session contract arguments, in -/// the form `` or ``. -/// -/// There are further details in -/// [the docs for the equivalent -/// `payment_args_simple`](struct.PaymentStrParams.html#payment_args_simple). -/// -/// ## `session_args_complex` -/// -/// For methods taking `session_args_complex`, this parameter is the session contract arguments, in -/// the form of a `ToBytes`-encoded file. -/// -/// --- -/// -/// **Note** while multiple payment args can be specified for a single session code instance, only -/// one of `session_args_simple` and `session_args_complex` may be used. -#[derive(Default)] -pub struct SessionStrParams<'a> { - session_hash: &'a str, - session_name: &'a str, - session_package_hash: &'a str, - session_package_name: &'a str, - session_path: &'a str, - session_args_simple: Vec<&'a str>, - session_args_complex: &'a str, - session_version: &'a str, - session_entry_point: &'a str, -} - -impl<'a> SessionStrParams<'a> { - /// Constructs a `SessionStrParams` using a session smart contract file. - /// - /// * `session_path` is the path to the compiled Wasm session code. - /// * See the struct docs for a description of [`session_args_simple`](#session_args_simple) and - /// [`session_args_complex`](#session_args_complex). - pub fn with_path( - session_path: &'a str, - session_args_simple: Vec<&'a str>, - session_args_complex: &'a str, - ) -> Self { - Self { - session_path, - session_args_simple, - session_args_complex, - ..Default::default() - } - } - - /// Constructs a `SessionStrParams` using a stored contract's name. - /// - /// * `session_name` is the name of the stored contract (associated with the executing account) - /// to be called as the session. - /// * `session_entry_point` is the name of the method that will be used when calling the session - /// contract. - /// * See the struct docs for a description of [`session_args_simple`](#session_args_simple) and - /// [`session_args_complex`](#session_args_complex). - pub fn with_name( - session_name: &'a str, - session_entry_point: &'a str, - session_args_simple: Vec<&'a str>, - session_args_complex: &'a str, - ) -> Self { - Self { - session_name, - session_entry_point, - session_args_simple, - session_args_complex, - ..Default::default() - } - } - - /// Constructs a `SessionStrParams` using a stored contract's hex-encoded hash. - /// - /// * `session_hash` is the hex-encoded hash of the stored contract to be called as the session. - /// * `session_entry_point` is the name of the method that will be used when calling the session - /// contract. - /// * See the struct docs for a description of [`session_args_simple`](#session_args_simple) and - /// [`session_args_complex`](#session_args_complex). - pub fn with_hash( - session_hash: &'a str, - session_entry_point: &'a str, - session_args_simple: Vec<&'a str>, - session_args_complex: &'a str, - ) -> Self { - Self { - session_hash, - session_entry_point, - session_args_simple, - session_args_complex, - ..Default::default() - } - } - - /// Constructs a `SessionStrParams` using a stored contract's package name. - /// - /// * `session_package_name` is the name of the stored package to be called as the session. - /// * `session_version` is the version of the called session contract. The latest will be used - /// if `session_version` is empty. - /// * `session_entry_point` is the name of the method that will be used when calling the session - /// contract. - /// * See the struct docs for a description of [`session_args_simple`](#session_args_simple) and - /// [`session_args_complex`](#session_args_complex). - pub fn with_package_name( - session_package_name: &'a str, - session_version: &'a str, - session_entry_point: &'a str, - session_args_simple: Vec<&'a str>, - session_args_complex: &'a str, - ) -> Self { - Self { - session_package_name, - session_version, - session_entry_point, - session_args_simple, - session_args_complex, - ..Default::default() - } - } - - /// Constructs a `SessionStrParams` using a stored contract's package hash. - /// - /// * `session_package_hash` is the hex-encoded hash of the stored package to be called as the - /// session. - /// * `session_version` is the version of the called session contract. The latest will be used - /// if `session_version` is empty. - /// * `session_entry_point` is the name of the method that will be used when calling the session - /// contract. - /// * See the struct docs for a description of [`session_args_simple`](#session_args_simple) and - /// [`session_args_complex`](#session_args_complex). - pub fn with_package_hash( - session_package_hash: &'a str, - session_version: &'a str, - session_entry_point: &'a str, - session_args_simple: Vec<&'a str>, - session_args_complex: &'a str, - ) -> Self { - Self { - session_package_hash, - session_version, - session_entry_point, - session_args_simple, - session_args_complex, - ..Default::default() - } - } -} - -/// When `verbosity_level` is `1`, the value will be printed to `stdout` with long string fields -/// (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char count of the -/// field. When `verbosity_level` is greater than `1`, the value will be printed to `stdout` with -/// no abbreviation of long fields. When `verbosity_level` is `0`, the value will not be printed to -/// `stdout`. -pub fn pretty_print_at_level(value: &T, verbosity_level: u64) { - match verbosity_level { - 0 => (), - 1 => { - println!( - "{}", - casper_types::json_pretty_print(value).expect("should encode to JSON") - ); - } - _ => { - println!( - "{}", - serde_json::to_string_pretty(value).expect("should encode to JSON") - ); - } - } -} - -#[cfg(test)] -mod param_tests { - use super::*; - - #[derive(Debug)] - struct ErrWrapper(pub Error); - - impl PartialEq for ErrWrapper { - fn eq(&self, other: &ErrWrapper) -> bool { - format!("{:?}", self.0) == format!("{:?}", other.0) - } - } - - impl Into for Error { - fn into(self) -> ErrWrapper { - ErrWrapper(self) - } - } - - const HASH: &str = "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"; - const NAME: &str = "name"; - const PKG_NAME: &str = "pkg_name"; - const PKG_HASH: &str = "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"; - const ENTRYPOINT: &str = "entrypoint"; - const VERSION: &str = "0.1.0"; - - fn args_simple() -> Vec<&'static str> { - vec!["name_01:bool='false'", "name_02:u32='42'"] - } - - /// Sample data creation methods for PaymentStrParams - mod session_params { - use std::collections::BTreeMap; - - use casper_types::CLValue; - - use super::*; - - #[test] - pub fn with_hash() { - let params: Result = - SessionStrParams::with_hash(HASH, ENTRYPOINT, args_simple(), "").try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredContractByHash { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_name() { - let params: Result = - SessionStrParams::with_name(NAME, ENTRYPOINT, args_simple(), "").try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredContractByName { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_package_name() { - let params: Result = SessionStrParams::with_package_name( - PKG_NAME, - VERSION, - ENTRYPOINT, - args_simple(), - "", - ) - .try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredVersionedContractByName { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_package_hash() { - let params: Result = SessionStrParams::with_package_hash( - PKG_HASH, - VERSION, - ENTRYPOINT, - args_simple(), - "", - ) - .try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredVersionedContractByHash { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - } - - /// Sample data creation methods for PaymentStrParams - mod payment_params { - use std::collections::BTreeMap; - - use casper_types::CLValue; - - use super::*; - - #[test] - pub fn with_amount() { - let params: Result = - PaymentStrParams::with_amount("100").try_into(); - match params { - Ok(item @ ExecutableDeployItem::ModuleBytes { .. }) => { - let amount = CLValue::from_t(U512::from(100)).unwrap(); - assert_eq!(item.args().get("amount"), Some(&amount)); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_hash() { - let params: Result = - PaymentStrParams::with_hash(HASH, ENTRYPOINT, args_simple(), "").try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredContractByHash { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_name() { - let params: Result = - PaymentStrParams::with_name(NAME, ENTRYPOINT, args_simple(), "").try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredContractByName { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_package_name() { - let params: Result = PaymentStrParams::with_package_name( - PKG_NAME, - VERSION, - ENTRYPOINT, - args_simple(), - "", - ) - .try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredVersionedContractByName { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - - #[test] - pub fn with_package_hash() { - let params: Result = PaymentStrParams::with_package_hash( - PKG_HASH, - VERSION, - ENTRYPOINT, - args_simple(), - "", - ) - .try_into(); - match params { - Ok(item @ ExecutableDeployItem::StoredVersionedContractByHash { .. }) => { - let actual: BTreeMap = item.args().clone().into(); - let mut expected = BTreeMap::new(); - expected.insert("name_01".to_owned(), CLValue::from_t(false).unwrap()); - expected.insert("name_02".to_owned(), CLValue::from_t(42u32).unwrap()); - assert_eq!(actual, expected); - } - other => panic!("incorrect type parsed {:?}", other), - } - } - } - - mod deploy_str_params { - use humantime::{DurationError, TimestampError}; - - use super::*; - - use std::{convert::TryInto, result::Result as StdResult}; - - use crate::DeployStrParams; - - fn test_value() -> DeployStrParams<'static> { - DeployStrParams { - secret_key: "../resources/local/secret_keys/node-1.pem", - ttl: "10s", - chain_name: "casper-test-chain-name-1", - gas_price: "1", - ..Default::default() - } - } - - #[test] - fn should_convert_into_deploy_params() { - let deploy_params: StdResult = - test_value().try_into().map_err(ErrWrapper); - assert!(deploy_params.is_ok()); - } - - #[test] - fn should_fail_to_convert_with_bad_timestamp() { - let mut params = test_value(); - params.timestamp = "garbage"; - let result: StdResult = params.try_into(); - let result = result.map(|_| ()).map_err(ErrWrapper); - assert_eq!( - result, - Err( - Error::FailedToParseTimestamp("timestamp", TimestampError::InvalidFormat) - .into() - ) - ); - } - - #[test] - fn should_fail_to_convert_with_bad_gas_price() { - let mut params = test_value(); - params.gas_price = "fifteen"; - let result: StdResult = params.try_into(); - let result = result.map(|_| ()); - if let Err(Error::FailedToParseInt(context, _)) = result { - assert_eq!(context, "gas_price"); - } else { - panic!("should be an error"); - } - } - - #[test] - fn should_fail_to_convert_with_bad_chain_name() { - let mut params = test_value(); - params.chain_name = ""; - let result: StdResult = params.try_into(); - let result = result.map(|_| ()).map_err(ErrWrapper); - assert_eq!(result, Ok(())); - } - - #[test] - fn should_fail_to_convert_with_bad_ttl() { - let mut params = test_value(); - params.ttl = "not_a_ttl"; - let result: StdResult = params.try_into(); - let result = result.map(|_| ()).map_err(ErrWrapper); - assert_eq!( - result, - Err(Error::FailedToParseTimeDiff("ttl", DurationError::NumberExpected(0)).into()) - ); - } - - #[test] - fn should_fail_to_convert_with_bad_secret_key_path() { - let mut params = test_value(); - params.secret_key = ""; - let result: StdResult = params.try_into(); - let result = result.map(|_| ()); - if let Err(Error::CryptoError { context, .. }) = result { - assert_eq!(context, "secret_key"); - } else { - panic!("should be an error") - } - } - - #[test] - fn should_fail_to_convert_with_bad_dependencies() { - use casper_node::crypto::Error as CryptoError; - let mut params = test_value(); - params.dependencies = vec!["invalid dep"]; - let result: StdResult = params.try_into(); - let result = result.map(|_| ()).map_err(ErrWrapper); - assert_eq!( - result, - Err(Error::CryptoError { - context: "dependencies", - error: CryptoError::FromHex(hex::FromHexError::OddLength) - } - .into()) - ); - } - } -} diff --git a/client/lib/parsing.rs b/client/lib/parsing.rs deleted file mode 100644 index bff861e01b..0000000000 --- a/client/lib/parsing.rs +++ /dev/null @@ -1,1270 +0,0 @@ -//! This module contains structs and helpers which are used by multiple subcommands related to -//! creating deploys. - -use std::{convert::TryInto, fs, io, path::PathBuf, str::FromStr}; - -use serde::{self, Deserialize}; - -use casper_execution_engine::core::engine_state::executable_deploy_item::ExecutableDeployItem; -use casper_node::{ - crypto::{hash::Digest, AsymmetricKeyExt}, - types::{DeployHash, TimeDiff, Timestamp}, -}; -use casper_types::{ - bytesrepr, AsymmetricType, CLType, CLValue, HashAddr, Key, NamedArg, PublicKey, RuntimeArgs, - SecretKey, UIntParseError, U512, -}; - -use crate::{ - cl_type, - deploy::DeployParams, - error::{Error, Result}, - help, TransferTarget, -}; - -pub(super) fn none_if_empty(value: &'_ str) -> Option<&'_ str> { - if value.is_empty() { - return None; - } - Some(value) -} - -fn timestamp(value: &str) -> Result { - if value.is_empty() { - return Ok(Timestamp::now()); - } - Timestamp::from_str(value).map_err(|error| Error::FailedToParseTimestamp("timestamp", error)) -} - -fn ttl(value: &str) -> Result { - TimeDiff::from_str(value).map_err(|error| Error::FailedToParseTimeDiff("ttl", error)) -} - -fn gas_price(value: &str) -> Result { - Ok(value - .parse::() - .map_err(|error| Error::FailedToParseInt("gas_price", error))?) -} - -fn dependencies(values: &[&str]) -> Result> { - let mut hashes = Vec::with_capacity(values.len()); - for value in values { - let digest = Digest::from_hex(value).map_err(|error| Error::CryptoError { - context: "dependencies", - error, - })?; - hashes.push(DeployHash::new(digest)) - } - Ok(hashes) -} - -/// Handles providing the arg for and retrieval of simple session and payment args. -mod arg_simple { - use super::*; - - const ARG_VALUE_NAME: &str = r#""NAME:TYPE='VALUE'" OR "NAME:TYPE=null""#; - - pub(crate) mod session { - use super::*; - - pub fn parse(values: &[&str]) -> Result> { - Ok(if values.is_empty() { - None - } else { - Some(get(values)?) - }) - } - } - - pub(crate) mod payment { - use super::*; - - pub fn parse(values: &[&str]) -> Result> { - Ok(if values.is_empty() { - None - } else { - Some(get(values)?) - }) - } - } - - fn get(values: &[&str]) -> Result { - let mut runtime_args = RuntimeArgs::new(); - for arg in values { - let parts = split_arg(arg)?; - parts_to_cl_value(parts, &mut runtime_args)?; - } - Ok(runtime_args) - } - - /// Splits a single arg of the form `NAME:TYPE='VALUE'` into its constituent parts. - fn split_arg(arg: &str) -> Result<(&str, CLType, &str)> { - let parts: Vec<_> = arg.splitn(3, &[':', '='][..]).collect(); - if parts.len() != 3 { - return Err(Error::InvalidCLValue(format!( - "arg {} should be formatted as {}", - arg, ARG_VALUE_NAME - ))); - } - let cl_type = cl_type::parse(&parts[1]).map_err(|_| { - Error::InvalidCLValue(format!( - "unknown variant {}, expected one of {}", - parts[1], - help::supported_cl_type_list() - )) - })?; - Ok((parts[0], cl_type, parts[2])) - } - - /// Insert a value built from a single arg which has been split into its constituent parts. - fn parts_to_cl_value( - parts: (&str, CLType, &str), - runtime_args: &mut RuntimeArgs, - ) -> Result<()> { - let (name, cl_type, value) = parts; - let cl_value = cl_type::parts_to_cl_value(cl_type, value)?; - runtime_args.insert_cl_value(name, cl_value); - Ok(()) - } -} - -/// Handles providing the arg for and retrieval of complex session and payment args. These are read -/// in from a file. -mod args_complex { - use super::*; - - #[derive(Debug, Deserialize)] - #[serde(rename_all = "snake_case")] - enum DeployArgValue { - /// Contains `CLValue` serialized into bytes in base16 form. - #[serde(deserialize_with = "hex::deserialize")] - RawBytes(Vec), - } - - #[derive(Debug, Deserialize)] - #[serde(rename_all = "snake_case")] - struct DeployArg { - /// Deploy argument's name. - name: String, - value: DeployArgValue, - } - - impl From for CLValue { - fn from(value: DeployArgValue) -> Self { - match value { - DeployArgValue::RawBytes(bytes) => bytesrepr::deserialize(bytes) - .unwrap_or_else(|error| panic!("should deserialize deploy arg: {}", error)), - } - } - } - - impl From for NamedArg { - fn from(deploy_arg: DeployArg) -> Self { - let cl_value = deploy_arg - .value - .try_into() - .unwrap_or_else(|error| panic!("should serialize deploy arg: {}", error)); - NamedArg::new(deploy_arg.name, cl_value) - } - } - - pub mod session { - use super::*; - - pub fn parse(path: &str) -> Result { - if path.is_empty() { - return Err(Error::InvalidArgument("session_path", path.to_string())); - } - get(path).map_err(|error| Error::IoError { - context: format!("error reading session file at '{}'", path), - error, - }) - } - } - - pub mod payment { - use super::*; - - pub fn parse(path: &str) -> Result { - if path.is_empty() { - return Err(Error::InvalidArgument("payment_path", path.to_string())); - } - get(path).map_err(|error| Error::IoError { - context: format!("error reading payment file at '{}'", path), - error, - }) - } - } - - fn get(path: &str) -> io::Result { - let bytes = fs::read(path)?; - // Received structured args in JSON format. - let args: Vec = serde_json::from_slice(&bytes)?; - // Convert JSON deploy args into vector of named args. - let mut named_args = Vec::with_capacity(args.len()); - for arg in args { - named_args.push(arg.into()); - } - Ok(RuntimeArgs::from(named_args)) - } -} - -const STANDARD_PAYMENT_ARG_NAME: &str = "amount"; -fn standard_payment(value: &str) -> Result { - if value.is_empty() { - return Err(Error::InvalidCLValue(value.to_string())); - } - let arg = U512::from_dec_str(value) - .map_err(|err| Error::FailedToParseUint("amount", UIntParseError::FromDecStr(err)))?; - let mut runtime_args = RuntimeArgs::new(); - runtime_args.insert(STANDARD_PAYMENT_ARG_NAME, arg)?; - Ok(runtime_args) -} - -pub(crate) fn secret_key(value: &str) -> Result { - let path = PathBuf::from(value); - SecretKey::from_file(path).map_err(|error| Error::CryptoError { - context: "secret_key", - error, - }) -} - -fn args_from_simple_or_complex( - simple: Option, - complex: Option, -) -> RuntimeArgs { - // We can have exactly zero or one of the two as `Some`. - match (simple, complex) { - (Some(args), None) | (None, Some(args)) => args, - (None, None) => RuntimeArgs::new(), - (Some(_), Some(_)) => unreachable!("should not have both simple and complex args"), - } -} - -/// Private macro for enforcing parameter validity. -/// e.g. check_exactly_one_not_empty!( -/// (field1) requires[another_field], -/// (field2) requires[another_field, yet_another_field] -/// (field3) requires[] -/// ) -/// Returns an error if: -/// - More than one parameter is non-empty. -/// - Any parameter that is non-empty has requires[] requirements that are empty. -macro_rules! check_exactly_one_not_empty { - ( context: $site:expr, $( ($x:expr) requires[$($y:expr),*] requires_empty[$($z:expr),*] ),+ $(,)? ) => {{ - - let field_is_empty_map = &[$( - (stringify!($x), $x.is_empty()) - ),+]; - - let required_arguments = field_is_empty_map - .iter() - .filter(|(_, is_empty)| !*is_empty) - .map(|(field, _)| field.to_string()) - .collect::>(); - - if required_arguments.is_empty() { - let required_param_names = vec![$((stringify!($x))),+]; - return Err(Error::InvalidArgument( - $site, - format!("Missing a required arg - exactly one of the following must be provided: {:?}", required_param_names), - )); - } - if required_arguments.len() == 1 { - let name = &required_arguments[0]; - let field_requirements = &[$( - ( - stringify!($x), - $x, - vec![$((stringify!($y), $y)),*], - vec![$((stringify!($z), $z)),*], - ) - ),+]; - - // Check requires[] and requires_requires_empty[] fields - let (_, value, requirements, required_empty) = field_requirements - .iter() - .find(|(field, _, _, _)| *field == name).expect("should exist"); - let required_arguments = requirements - .iter() - .filter(|(_, value)| !value.is_empty()) - .collect::>(); - - if requirements.len() != required_arguments.len() { - let required_param_names = requirements - .iter() - .map(|(requirement_name, _)| requirement_name) - .collect::>(); - return Err(Error::InvalidArgument( - $site, - format!("Field {} also requires following fields to be provided: {:?}", name, required_param_names), - )); - } - - let mut conflicting_fields = required_empty - .iter() - .filter(|(_, value)| !value.is_empty()) - .map(|(field, value)| format!("{}={}", field, value)).collect::>(); - - if !conflicting_fields.is_empty() { - conflicting_fields.push(format!("{}={}", name, value)); - conflicting_fields.sort(); - return Err(Error::ConflictingArguments{ - context: $site, - args: conflicting_fields, - }); - } - } else { - let mut non_empty_fields_with_values = vec![$((stringify!($x), $x)),+] - .iter() - .filter_map(|(field, value)| if !value.is_empty() { - Some(format!("{}={}", field, value)) - } else { - None - }) - .collect::>(); - non_empty_fields_with_values.sort(); - return Err(Error::ConflictingArguments { - context: $site, - args: non_empty_fields_with_values, - }); - } - }} -} - -pub(super) fn parse_deploy_params( - secret_key: &str, - timestamp: &str, - ttl: &str, - gas_price: &str, - dependencies: &[&str], - chain_name: &str, -) -> Result { - let secret_key = self::secret_key(secret_key)?; - let timestamp = self::timestamp(timestamp)?; - let ttl = self::ttl(ttl)?; - let gas_price = self::gas_price(gas_price)?; - let dependencies = self::dependencies(dependencies)?; - let chain_name = chain_name.to_string(); - - Ok(DeployParams { - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - secret_key, - }) -} - -#[allow(clippy::too_many_arguments)] -pub(super) fn parse_session_info( - session_hash: &str, - session_name: &str, - session_package_hash: &str, - session_package_name: &str, - session_path: &str, - session_args: &[&str], - session_args_complex: &str, - session_version: &str, - session_entry_point: &str, -) -> Result { - check_exactly_one_not_empty!( - context: "parse_session_info", - (session_hash) - requires[session_entry_point] requires_empty[session_version], - (session_name) - requires[session_entry_point] requires_empty[session_version], - (session_package_hash) - requires[session_entry_point] requires_empty[], - (session_package_name) - requires[session_entry_point] requires_empty[], - (session_path) - requires[] requires_empty[session_entry_point, session_version], - ); - if !session_args.is_empty() && !session_args_complex.is_empty() { - return Err(Error::ConflictingArguments { - context: "parse_session_info", - args: vec!["session_args".to_owned(), "session_args_complex".to_owned()], - }); - } - - let session_args = args_from_simple_or_complex( - arg_simple::session::parse(session_args)?, - args_complex::session::parse(session_args_complex).ok(), - ); - let invalid_entry_point = - || Error::InvalidArgument("session_entry_point", session_entry_point.to_string()); - if let Some(session_name) = name(session_name) { - return Ok(ExecutableDeployItem::StoredContractByName { - name: session_name, - entry_point: entry_point(session_entry_point).ok_or_else(invalid_entry_point)?, - args: session_args, - }); - } - - if let Some(session_hash) = parse_contract_hash(session_hash)? { - return Ok(ExecutableDeployItem::StoredContractByHash { - hash: session_hash.into(), - entry_point: entry_point(session_entry_point).ok_or_else(invalid_entry_point)?, - args: session_args, - }); - } - - let version = version(session_version).ok(); - if let Some(package_name) = name(session_package_name) { - return Ok(ExecutableDeployItem::StoredVersionedContractByName { - name: package_name, - version, // defaults to highest enabled version - entry_point: entry_point(session_entry_point).ok_or_else(invalid_entry_point)?, - args: session_args, - }); - } - - if let Some(package_hash) = parse_contract_hash(session_package_hash)? { - return Ok(ExecutableDeployItem::StoredVersionedContractByHash { - hash: package_hash.into(), - version, // defaults to highest enabled version - entry_point: entry_point(session_entry_point).ok_or_else(invalid_entry_point)?, - args: session_args, - }); - } - - let module_bytes = fs::read(session_path).map_err(|error| Error::IoError { - context: format!("unable to read session file at '{}'", session_path), - error, - })?; - Ok(ExecutableDeployItem::ModuleBytes { - module_bytes: module_bytes.into(), - args: session_args, - }) -} - -#[allow(clippy::too_many_arguments)] -pub(super) fn parse_payment_info( - payment_amount: &str, - payment_hash: &str, - payment_name: &str, - payment_package_hash: &str, - payment_package_name: &str, - payment_path: &str, - payment_args: &[&str], - payment_args_complex: &str, - payment_version: &str, - payment_entry_point: &str, -) -> Result { - check_exactly_one_not_empty!( - context: "parse_payment_info", - (payment_amount) - requires[] requires_empty[payment_entry_point, payment_version], - (payment_hash) - requires[payment_entry_point] requires_empty[payment_version], - (payment_name) - requires[payment_entry_point] requires_empty[payment_version], - (payment_package_hash) - requires[payment_entry_point] requires_empty[], - (payment_package_name) - requires[payment_entry_point] requires_empty[], - (payment_path) requires[] requires_empty[payment_entry_point, payment_version], - ); - if !payment_args.is_empty() && !payment_args_complex.is_empty() { - return Err(Error::ConflictingArguments { - context: "parse_payment_info", - args: vec!["payment_args".to_owned(), "payment_args_complex".to_owned()], - }); - } - - if let Ok(payment_args) = standard_payment(payment_amount) { - return Ok(ExecutableDeployItem::ModuleBytes { - module_bytes: vec![].into(), - args: payment_args, - }); - } - - let invalid_entry_point = - || Error::InvalidArgument("payment_entry_point", payment_entry_point.to_string()); - - let payment_args = args_from_simple_or_complex( - arg_simple::payment::parse(payment_args)?, - args_complex::payment::parse(payment_args_complex).ok(), - ); - - if let Some(payment_name) = name(payment_name) { - return Ok(ExecutableDeployItem::StoredContractByName { - name: payment_name, - entry_point: entry_point(payment_entry_point).ok_or_else(invalid_entry_point)?, - args: payment_args, - }); - } - - if let Some(payment_hash) = parse_contract_hash(payment_hash)? { - return Ok(ExecutableDeployItem::StoredContractByHash { - hash: payment_hash.into(), - entry_point: entry_point(payment_entry_point).ok_or_else(invalid_entry_point)?, - args: payment_args, - }); - } - - let version = version(payment_version).ok(); - if let Some(package_name) = name(payment_package_name) { - return Ok(ExecutableDeployItem::StoredVersionedContractByName { - name: package_name, - version, // defaults to highest enabled version - entry_point: entry_point(payment_entry_point).ok_or_else(invalid_entry_point)?, - args: payment_args, - }); - } - - if let Some(package_hash) = parse_contract_hash(payment_package_hash)? { - return Ok(ExecutableDeployItem::StoredVersionedContractByHash { - hash: package_hash.into(), - version, // defaults to highest enabled version - entry_point: entry_point(payment_entry_point).ok_or_else(invalid_entry_point)?, - args: payment_args, - }); - } - - let module_bytes = fs::read(payment_path).map_err(|error| Error::IoError { - context: format!("unable to read payment file at '{}'", payment_path), - error, - })?; - Ok(ExecutableDeployItem::ModuleBytes { - module_bytes: module_bytes.into(), - args: payment_args, - }) -} - -pub(crate) fn get_transfer_target(target_account: &str) -> Result { - if !target_account.is_empty() { - let account = account(target_account)?; - Ok(TransferTarget::Account(account)) - } else { - Err(Error::InvalidArgument( - "target_account", - format!( - "Invalid arguments to get_transfer_target - must provide either a target account. account={}", - target_account - ), - )) - } -} - -pub(crate) fn output(value: &str) -> Option<&str> { - none_if_empty(value) -} - -fn parse_contract_hash(value: &str) -> Result> { - if value.is_empty() { - return Ok(None); - } - if let Ok(digest) = Digest::from_hex(value) { - return Ok(Some(digest.to_array())); - } - if let Ok(Key::Hash(hash)) = Key::from_formatted_str(value) { - return Ok(Some(hash)); - } - Err(Error::FailedToParseKey) -} - -fn name(value: &str) -> Option { - none_if_empty(value).map(str::to_string) -} - -fn entry_point(value: &str) -> Option { - none_if_empty(value).map(str::to_string) -} - -fn version(value: &str) -> Result { - value - .parse::() - .map_err(|error| Error::FailedToParseInt("version", error)) -} - -fn account(value: &str) -> Result { - PublicKey::from_hex(value).map_err(|error| Error::CryptoError { - context: "account", - error: error.into(), - }) -} - -pub(crate) fn transfer_id(value: &str) -> Result> { - if str::is_empty(value) { - return Ok(None); - } - let value = value - .parse::() - .map_err(|error| Error::FailedToParseInt("transfer_id", error))?; - Ok(Some(value)) -} - -#[cfg(test)] -mod tests { - use std::{convert::TryFrom, result::Result as StdResult}; - - use casper_types::{ - account::AccountHash, bytesrepr::ToBytes, AccessRights, CLTyped, CLValue, NamedArg, - PublicKey, RuntimeArgs, URef, U128, U256, U512, - }; - - use crate::{PaymentStrParams, SessionStrParams}; - - use super::*; - - #[derive(Debug)] - struct ErrWrapper(pub Error); - - impl PartialEq for ErrWrapper { - fn eq(&self, other: &ErrWrapper) -> bool { - format!("{:?}", self.0) == format!("{:?}", other.0) - } - } - - impl Into for Error { - fn into(self) -> ErrWrapper { - ErrWrapper(self) - } - } - - mod bad { - pub const EMPTY: &str = ""; - pub const ARG_UNQUOTED: &str = "name:u32=0"; // value needs single quotes to be valid - pub const ARG_BAD_TYPE: &str = "name:wat='false'"; - pub const ARG_GIBBERISH: &str = "asdf|1234(..)"; - pub const LARGE_2K_INPUT: &str = r#" - eJy2irIizK6zT0XOklyBAY1KVUsAbyF6eJUYBmRPHqX2rONbaEieJt4Ci1eZYjBdHdEq46oMBH0LeiQO8RIJb95 - SJGEp83RxakDj7trunJVvMbj2KZFnpJOyEauFa35dlaVG9Ki7hjFy4BLlDyA0Wgwk20RXFkbgKQIQVvR16RPffR - WO86WqZ3gMuOh447svZRYfhbRF3NVBaWRz7SJ9Zm3w8djisvS0Y3GSnpzKnSEQirApqomfQTHTrU9ww2SMgdGuu - EllGLsj3ze8WzIbXLlJvXdnJFz7UfsgX4xowG4d6xSiUVWCY4sVItNXlqs8adfZZHH7AjqLjlRRvWwjNCiWsiqx - ICe9jlkdEVeRAO0BqF6FhjSxPt9X3y6WXAomB0YTIFQGyto4jMBOhWb96ny3DG3WISUSdaKWf8KaRuAQD4ao3ML - jJZSXkTlovZTYQmYlkYo4s3635YLthuh0hSorRs0ju7ffeY3tu7VRvttgvbBLVjFJjYrwW1YAEOaxDdLnhiTIQn - H0zRLWnCQ4Czk5BWsRLDdupJbKRWRZcQ7pehSgfc5qtXpJRFVtL2L82hxfBdiXqzXl3KdQ21CnGxTzcgEv0ptrs - XGJwNgd04YiZzHrZL7iF3xFann6DJVyEZ0eEifTfY8rtxPCMDutjr68iFjnjy40c7SfhvsZLODuEjS4VQkIwfJc - QP5fH3cQ2K4A4whpzTVc3yqig468Cjbxfobw4Z7YquZnuFw1TXSrM35ZBXpI4WKo9QLxmE2HkgMI1Uac2dWyG0U - iCAxpHxC4uTIFEq2MUuGd7ZgYs8zoYpODvtAcZ8nUqKssdugQUGfXw9Cs1pcDZgEppYVVw1nYoHXKCjK3oItexs - uIaZ0m1o91L9Js5lhaDybyDoye9zPFOnEIwKdcH0dO9cZmv6UyvVZS2oVKJm7nHQAJDARjVfC7GYAT2AQhFZxIQ - DP9jjHCqxMJz6p499G5lk8cYAhnlUm7GCr4AwvjsEU7sEsJcZLDCLG6FaFMdLHJS5v2yPYzpuWebjcNCXbk4yER - F9NsvlDBrLhoDt1GDgJPlRF8B5h5BSzPHsCjNVa9h2YWx1GVl6Yrrk04FSMSj0nRO8OoxkyU0ugtBQlUv3rQ833 - Vcs7jCGetaazcvaI45dRDGe6LyEPwojlC4IaB8PtljKo2zn0u91lQGJY7rj1qLUtFBRDCKERs7W1j9A2eGJ3ORY - Db7Q3K7BY9XbANGoYiwtLoytopYCQs5RYHepkoQ19f1E9IcqCFQg9h0rWK494xb88GfSGKBpPHddrQYXFrr715u - NkAj885V8Mnam5kSzsOmrg504QhPSOaqpkY36xyXUP13yWK4fEf39tJ2PN2DlAsxFAWJUec4CiS47rgrU87oESt - KZJni3Jhccczlq1CaRKaYYV38joEzPL0UNKr5RiCodTWJmdN07JI5txtQqgc8kvHOrxgOASPQOPSbAUz33vZx3b - eNsTYUD0Dxa4IkMUNHSy6mpaSOElO7wgUvWJEajnVWZJ5gWehyE4yqo6PkL3VBj51Jg2uozPa8xnbSfymlVVLFl - EIfMyPwUj1J9ngQw0J3bn33IIOB3bkNfB50f1MkKkhyn1TMZJcnZ7IS16PXBH6DD7Sht1PVKhER2E3QS7z8YQ6B - q27ktZZ33IcCnayahxHnyf2Wzab9ic5eSJLzsVi0VWP7DePt2GnCbz5D2tcAxgVVFmdIsEakytjmeEGyMu9k2R7 - Q8d1wPtqKgayVtgdIaMbvsnXMkRqITkf3o8Qh495pm1wkKArTGFGODXc1cCKheFUEtJWdK92DHH7OuRENHAb5KS - PKzSUg2k18wyf9XCy1pQKv31wii3rWrWMCbxOWmhuzw1N9tqO8U97NsThRSoPAjpd05G2roia4m4CaPWTAUmVky - RfiWoA7bglAh4Aoz2LN2ezFleTNJjjLw3n9bYPg5BdRL8n8wimhXDo9SW46A5YS62C08ZOVtvfn82YRaYkuKKz7 - 3NJ25PnQG6diMm4Lm3wi22yR7lY7oYYJjLNcaLYOI6HOvaJ - "#; - } - - mod happy { - pub const HASH: &str = "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"; - pub const NAME: &str = "name"; - pub const PACKAGE_HASH: &str = - "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"; - pub const PACKAGE_NAME: &str = "package_name"; - pub const PATH: &str = "./session.wasm"; - pub const ENTRY_POINT: &str = "entrypoint"; - pub const VERSION: &str = "1.0.0"; - } - - fn invalid_simple_args_test(cli_string: &str) { - assert!( - arg_simple::payment::parse(&[cli_string]) - .map_err(ErrWrapper) - .is_err(), - "{} should be an error", - cli_string - ); - assert!( - arg_simple::session::parse(&[cli_string]) - .map_err(ErrWrapper) - .is_err(), - "{} should be an error", - cli_string - ); - } - - fn valid_simple_args_test(cli_string: &str, expected: T) { - let expected = Some(RuntimeArgs::from(vec![NamedArg::new( - "x".to_string(), - CLValue::from_t(expected).unwrap(), - )])); - - assert_eq!( - arg_simple::payment::parse(&[cli_string]).expect("should parse"), - expected - ); - assert_eq!( - arg_simple::session::parse(&[cli_string]).expect("should parse"), - expected - ); - } - - #[test] - fn should_parse_bool_via_args_simple() { - valid_simple_args_test("x:bool='f'", false); - valid_simple_args_test("x:bool='false'", false); - valid_simple_args_test("x:bool='t'", true); - valid_simple_args_test("x:bool='true'", true); - valid_simple_args_test("x:opt_bool='f'", Some(false)); - valid_simple_args_test("x:opt_bool='t'", Some(true)); - valid_simple_args_test::>("x:opt_bool=null", None); - } - - #[test] - fn should_parse_i32_via_args_simple() { - valid_simple_args_test("x:i32='2147483647'", i32::max_value()); - valid_simple_args_test("x:i32='0'", 0_i32); - valid_simple_args_test("x:i32='-2147483648'", i32::min_value()); - valid_simple_args_test("x:opt_i32='-1'", Some(-1_i32)); - valid_simple_args_test::>("x:opt_i32=null", None); - } - - #[test] - fn should_parse_i64_via_args_simple() { - valid_simple_args_test("x:i64='9223372036854775807'", i64::max_value()); - valid_simple_args_test("x:i64='0'", 0_i64); - valid_simple_args_test("x:i64='-9223372036854775808'", i64::min_value()); - valid_simple_args_test("x:opt_i64='-1'", Some(-1_i64)); - valid_simple_args_test::>("x:opt_i64=null", None); - } - - #[test] - fn should_parse_u8_via_args_simple() { - valid_simple_args_test("x:u8='0'", 0_u8); - valid_simple_args_test("x:u8='255'", u8::max_value()); - valid_simple_args_test("x:opt_u8='1'", Some(1_u8)); - valid_simple_args_test::>("x:opt_u8=null", None); - } - - #[test] - fn should_parse_u32_via_args_simple() { - valid_simple_args_test("x:u32='0'", 0_u32); - valid_simple_args_test("x:u32='4294967295'", u32::max_value()); - valid_simple_args_test("x:opt_u32='1'", Some(1_u32)); - valid_simple_args_test::>("x:opt_u32=null", None); - } - - #[test] - fn should_parse_u64_via_args_simple() { - valid_simple_args_test("x:u64='0'", 0_u64); - valid_simple_args_test("x:u64='18446744073709551615'", u64::max_value()); - valid_simple_args_test("x:opt_u64='1'", Some(1_u64)); - valid_simple_args_test::>("x:opt_u64=null", None); - } - - #[test] - fn should_parse_u128_via_args_simple() { - valid_simple_args_test("x:u128='0'", U128::zero()); - valid_simple_args_test( - "x:u128='340282366920938463463374607431768211455'", - U128::max_value(), - ); - valid_simple_args_test("x:opt_u128='1'", Some(U128::from(1))); - valid_simple_args_test::>("x:opt_u128=null", None); - } - - #[test] - fn should_parse_u256_via_args_simple() { - valid_simple_args_test("x:u256='0'", U256::zero()); - valid_simple_args_test( - "x:u256='115792089237316195423570985008687907853269984665640564039457584007913129639935'", - U256::max_value(), - ); - valid_simple_args_test("x:opt_u256='1'", Some(U256::from(1))); - valid_simple_args_test::>("x:opt_u256=null", None); - } - - #[test] - fn should_parse_u512_via_args_simple() { - valid_simple_args_test("x:u512='0'", U512::zero()); - valid_simple_args_test( - "x:u512='134078079299425970995740249982058461274793658205923933777235614437217640300735\ - 46976801874298166903427690031858186486050853753882811946569946433649006084095'", - U512::max_value(), - ); - valid_simple_args_test("x:opt_u512='1'", Some(U512::from(1))); - valid_simple_args_test::>("x:opt_u512=null", None); - } - - #[test] - fn should_parse_unit_via_args_simple() { - valid_simple_args_test("x:unit=''", ()); - valid_simple_args_test("x:opt_unit=''", Some(())); - valid_simple_args_test::>("x:opt_unit=null", None); - } - - #[test] - fn should_parse_string_via_args_simple() { - let value = String::from("test string"); - valid_simple_args_test(&format!("x:string='{}'", value), value.clone()); - valid_simple_args_test(&format!("x:opt_string='{}'", value), Some(value)); - valid_simple_args_test::>("x:opt_string=null", None); - } - - #[test] - fn should_parse_key_via_args_simple() { - let bytes = (1..33).collect::>(); - let array = <[u8; 32]>::try_from(bytes.as_ref()).unwrap(); - - let key_account = Key::Account(AccountHash::new(array)); - let key_hash = Key::Hash(array); - let key_uref = Key::URef(URef::new(array, AccessRights::NONE)); - - for key in &[key_account, key_hash, key_uref] { - valid_simple_args_test(&format!("x:key='{}'", key.to_formatted_string()), *key); - valid_simple_args_test( - &format!("x:opt_key='{}'", key.to_formatted_string()), - Some(*key), - ); - valid_simple_args_test::>("x:opt_key=null", None); - } - } - - #[test] - fn should_parse_account_hash_via_args_simple() { - let bytes = (1..33).collect::>(); - let array = <[u8; 32]>::try_from(bytes.as_ref()).unwrap(); - let value = AccountHash::new(array); - valid_simple_args_test( - &format!("x:account_hash='{}'", value.to_formatted_string()), - value, - ); - valid_simple_args_test( - &format!("x:opt_account_hash='{}'", value.to_formatted_string()), - Some(value), - ); - valid_simple_args_test::>("x:opt_account_hash=null", None); - } - - #[test] - fn should_parse_uref_via_args_simple() { - let bytes = (1..33).collect::>(); - let array = <[u8; 32]>::try_from(bytes.as_ref()).unwrap(); - let value = URef::new(array, AccessRights::READ_ADD_WRITE); - valid_simple_args_test(&format!("x:uref='{}'", value.to_formatted_string()), value); - valid_simple_args_test( - &format!("x:opt_uref='{}'", value.to_formatted_string()), - Some(value), - ); - valid_simple_args_test::>("x:opt_uref=null", None); - } - - #[test] - fn should_parse_public_key_via_args_simple() { - let hex_value = "0119bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1"; - let value = PublicKey::from_hex(hex_value).unwrap(); - valid_simple_args_test(&format!("x:public_key='{}'", hex_value), value.clone()); - valid_simple_args_test(&format!("x:opt_public_key='{}'", hex_value), Some(value)); - valid_simple_args_test::>("x:opt_public_key=null", None); - } - - #[test] - fn should_fail_to_parse_bad_args() { - invalid_simple_args_test(bad::ARG_BAD_TYPE); - invalid_simple_args_test(bad::ARG_GIBBERISH); - invalid_simple_args_test(bad::ARG_UNQUOTED); - invalid_simple_args_test(bad::EMPTY); - invalid_simple_args_test(bad::LARGE_2K_INPUT); - } - - #[test] - fn should_fail_to_parse_conflicting_arg_types() { - assert_eq!( - parse_session_info( - "", - "name", - "", - "", - "", - &["something:u32='0'"], - "path_to/file", - "", - "entrypoint", - ) - .map(|_| ()) - .map_err(ErrWrapper), - Err(Error::ConflictingArguments { - context: "parse_session_info", - args: vec!["session_args".to_owned(), "session_args_complex".to_owned()] - } - .into()) - ); - assert_eq!( - parse_payment_info( - "", - "name", - "", - "", - "", - "", - &["something:u32='0'"], - "path_to/file", - "", - "entrypoint", - ) - .map(|_| ()) - .map_err(ErrWrapper), - Err(Error::ConflictingArguments { - context: "parse_payment_info", - args: vec!["payment_args".to_owned(), "payment_args_complex".to_owned()] - } - .into()) - ); - } - - #[test] - fn should_fail_to_parse_conflicting_session_parameters() { - assert_eq!( - parse_session_info( - happy::HASH, - happy::NAME, - happy::PACKAGE_HASH, - happy::PACKAGE_NAME, - happy::PATH, - &[], - "", - "", - "", - ) - .map(|_| ()) - .map_err(ErrWrapper), - Err(Error::ConflictingArguments { - context: "parse_session_info", - args: vec![ - "session_hash=09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6".into(), - "session_name=name".into(), - "session_package_hash=09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6".into(), - "session_package_name=package_name".into(), - "session_path=./session.wasm".into() - ] - } - .into()) - ); - } - - #[test] - fn should_fail_to_parse_conflicting_payment_parameters() { - assert_eq!( - parse_payment_info("12345", happy::HASH, happy::NAME, happy::PACKAGE_HASH, happy::PACKAGE_NAME, happy::PATH, &[], "", "", "",) - .map(|_| ()) - .map_err(ErrWrapper), - Err(Error::ConflictingArguments { - context: "parse_payment_info", - args: vec![ - "payment_amount=12345".into(), - "payment_hash=09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6".into(), - "payment_name=name".into(), - "payment_package_hash=09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6".into(), - "payment_package_name=package_name".into(), - "payment_path=./session.wasm".into(), - ] - } - .into()) - ); - } - - mod missing_args { - - use super::*; - - /// Implements a unit test that ensures missing fields result in an error to the caller. - macro_rules! impl_test_missing_required_arg { - ($t:ident, $name:ident, $field:tt => $value:expr, missing: $missing:expr, context: $context:expr) => { - #[test] - fn $name() { - let info: StdResult = $t { - $field: $value, - ..Default::default() - } - .try_into() - .map_err(ErrWrapper); - assert_eq!( - info, - Err(Error::InvalidArgument( - $context, - format!( - "Field {} also requires following fields to be provided: {:?}", - stringify!($field), - $missing - ) - ) - .into()) - ); - } - }; - } - - impl_test_missing_required_arg!( - SessionStrParams, - session_name_should_fail_to_parse_missing_entry_point, - session_name => happy::NAME, - missing: ["session_entry_point"], - context: "parse_session_info" - ); - impl_test_missing_required_arg!( - SessionStrParams, - session_hash_should_fail_to_parse_missing_entry_point, - session_hash => happy::HASH, - missing: ["session_entry_point"], - context: "parse_session_info" - ); - impl_test_missing_required_arg!( - SessionStrParams, - session_package_hash_should_fail_to_parse_missing_entry_point, - session_package_hash => happy::PACKAGE_HASH, - missing: ["session_entry_point"], - context: "parse_session_info" - ); - impl_test_missing_required_arg!( - SessionStrParams, - session_package_name_should_fail_to_parse_missing_entry_point, - session_package_name => happy::PACKAGE_NAME, - missing: ["session_entry_point"], - context: "parse_session_info" - ); - - impl_test_missing_required_arg!( - PaymentStrParams, - payment_name_should_fail_to_parse_missing_entry_point, - payment_name => happy::NAME, - missing: ["payment_entry_point"], - context: "parse_payment_info" - ); - impl_test_missing_required_arg!( - PaymentStrParams, - payment_hash_should_fail_to_parse_missing_entry_point, - payment_hash => happy::HASH, - missing: ["payment_entry_point"], - context: "parse_payment_info" - ); - impl_test_missing_required_arg!( - PaymentStrParams, - payment_package_hash_should_fail_to_parse_missing_entry_point, - payment_package_hash => happy::HASH, - missing: ["payment_entry_point"], - context: "parse_payment_info" - ); - impl_test_missing_required_arg!( - PaymentStrParams, - payment_package_name_should_fail_to_parse_missing_entry_point, - payment_package_name => happy::HASH, - missing: ["payment_entry_point"], - context: "parse_payment_info" - ); - } - - mod conflicting_args { - use super::*; - - /// impl_test_matrix - implements many tests for SessionStrParams or PaymentStrParams which - /// ensures that an error is returned when the permutation they define is executed. - /// - /// For instance, it is neccesary to check that when `session_path` is set, other arguments - /// are not. - /// - /// For example, a sample invocation with one test: ``` - /// impl_test_matrix![ - /// type: SessionStrParams, - /// context: "parse_session_info", - /// session_str_params[ - /// test[ - /// session_path => happy::PATH, - /// conflict: session_package_hash => happy::PACKAGE_HASH, - /// requires[], - /// path_conflicts_with_package_hash - /// ] - /// ] - /// ]; - /// ``` - /// - /// This generates the following test module (with the fn name passed), with one test per line in `session_str_params[]`: - /// ``` - /// #[cfg(test)] - /// mod session_str_params { - /// use super::*; - /// - /// #[test] - /// fn path_conflicts_with_package_hash() { - /// let info: StdResult = SessionStrParams { - /// session_path: happy::PATH, - /// session_package_hash: happy::PACKAGE_HASH, - /// ..Default::default() - /// } - /// .try_into() - /// .map_err(ErrWrapper); - /// let mut conflicting = vec![ - /// format!("{}={}", "session_path", happy::PATH), - /// format!("{}={}", "session_package_hash", happy::PACKAGE_HASH), - /// ]; - /// conflicting.sort(); - /// assert_eq!( - /// info, - /// Err(Error::ConflictingArguments { - /// context: "parse_session_info", - /// args: conflicting - /// } - /// .into()) - /// ); - /// } - /// } - /// ``` - macro_rules! impl_test_matrix { - ( - /// Struct for which to define the following tests. In our case, SessionStrParams or PaymentStrParams. - type: $t:ident, - /// Expected `context` field to be returned in the `Error::ConflictingArguments{ context, .. }` field. - context: $context:expr, - - /// $module will be our module name. - $module:ident [$( - // many tests can be defined - test[ - /// The argument's ident to be tested, followed by it's value. - $arg:tt => $arg_value:expr, - /// The conflicting argument's ident to be tested, followed by it's value. - conflict: $con:tt => $con_value:expr, - /// A list of any additional fields required by the argument, and their values. - requires[$($req:tt => $req_value:expr),*], - /// fn name for the defined test. - $test_fn_name:ident - ] - )+] - ) => { - #[cfg(test)] - mod $module { - use super::*; - - $( - #[test] - fn $test_fn_name() { - let info: StdResult = $t { - $arg: $arg_value, - $con: $con_value, - $($req: $req_value,),* - ..Default::default() - } - .try_into() - .map_err(ErrWrapper); - let mut conflicting = vec![ - format!("{}={}", stringify!($arg), $arg_value), - format!("{}={}", stringify!($con), $con_value), - ]; - conflicting.sort(); - assert_eq!( - info, - Err(Error::ConflictingArguments { - context: $context, - args: conflicting - } - .into()) - ); - } - )+ - } - }; - } - - // NOTE: there's no need to test a conflicting argument in both directions, since they - // amount to passing two fields to a structs constructor. - // Where a reverse test like this is omitted, a comment should be left. - impl_test_matrix![ - type: SessionStrParams, - context: "parse_session_info", - session_str_params[ - - // path - test[session_path => happy::PATH, conflict: session_package_hash => happy::PACKAGE_HASH, requires[], path_conflicts_with_package_hash] - test[session_path => happy::PATH, conflict: session_package_name => happy::PACKAGE_NAME, requires[], path_conflicts_with_package_name] - test[session_path => happy::PATH, conflict: session_hash => happy::HASH, requires[], path_conflicts_with_hash] - test[session_path => happy::PATH, conflict: session_name => happy::HASH, requires[], path_conflicts_with_name] - test[session_path => happy::PATH, conflict: session_version => happy::VERSION, requires[], path_conflicts_with_version] - test[session_path => happy::PATH, conflict: session_entry_point => happy::ENTRY_POINT, requires[], path_conflicts_with_entry_point] - - // name - test[session_name => happy::NAME, conflict: session_package_hash => happy::PACKAGE_HASH, requires[session_entry_point => happy::ENTRY_POINT], name_conflicts_with_package_hash] - test[session_name => happy::NAME, conflict: session_package_name => happy::PACKAGE_NAME, requires[session_entry_point => happy::ENTRY_POINT], name_conflicts_with_package_name] - test[session_name => happy::NAME, conflict: session_hash => happy::HASH, requires[session_entry_point => happy::ENTRY_POINT], name_conflicts_with_hash] - test[session_name => happy::NAME, conflict: session_version => happy::VERSION, requires[session_entry_point => happy::ENTRY_POINT], name_conflicts_with_version] - - // hash - test[session_hash => happy::HASH, conflict: session_package_hash => happy::PACKAGE_HASH, requires[session_entry_point => happy::ENTRY_POINT], hash_conflicts_with_package_hash] - test[session_hash => happy::HASH, conflict: session_package_name => happy::PACKAGE_NAME, requires[session_entry_point => happy::ENTRY_POINT], hash_conflicts_with_package_name] - test[session_hash => happy::HASH, conflict: session_version => happy::VERSION, requires[session_entry_point => happy::ENTRY_POINT], hash_conflicts_with_version] - // name <-> hash is already checked - // name <-> path is already checked - - // package_name - // package_name + session_version is optional and allowed - test[session_package_name => happy::PACKAGE_NAME, conflict: session_package_hash => happy::PACKAGE_HASH, requires[session_entry_point => happy::ENTRY_POINT], package_name_conflicts_with_package_hash] - // package_name <-> hash is already checked - // package_name <-> name is already checked - // package_name <-> path is already checked - - // package_hash - // package_hash + session_version is optional and allowed - // package_hash <-> package_name is already checked - // package_hash <-> hash is already checked - // package_hash <-> name is already checked - // package_hash <-> path is already checked - - ] - ]; - - impl_test_matrix![ - type: PaymentStrParams, - context: "parse_payment_info", - payment_str_params[ - - // amount - test[payment_amount => happy::PATH, conflict: payment_package_hash => happy::PACKAGE_HASH, requires[], amount_conflicts_with_package_hash] - test[payment_amount => happy::PATH, conflict: payment_package_name => happy::PACKAGE_NAME, requires[], amount_conflicts_with_package_name] - test[payment_amount => happy::PATH, conflict: payment_hash => happy::HASH, requires[], amount_conflicts_with_hash] - test[payment_amount => happy::PATH, conflict: payment_name => happy::HASH, requires[], amount_conflicts_with_name] - test[payment_amount => happy::PATH, conflict: payment_version => happy::VERSION, requires[], amount_conflicts_with_version] - test[payment_amount => happy::PATH, conflict: payment_entry_point => happy::ENTRY_POINT, requires[], amount_conflicts_with_entry_point] - - // path - // amount <-> path is already checked - test[payment_path => happy::PATH, conflict: payment_package_hash => happy::PACKAGE_HASH, requires[], path_conflicts_with_package_hash] - test[payment_path => happy::PATH, conflict: payment_package_name => happy::PACKAGE_NAME, requires[], path_conflicts_with_package_name] - test[payment_path => happy::PATH, conflict: payment_hash => happy::HASH, requires[], path_conflicts_with_hash] - test[payment_path => happy::PATH, conflict: payment_name => happy::HASH, requires[], path_conflicts_with_name] - test[payment_path => happy::PATH, conflict: payment_version => happy::VERSION, requires[], path_conflicts_with_version] - test[payment_path => happy::PATH, conflict: payment_entry_point => happy::ENTRY_POINT, requires[], path_conflicts_with_entry_point] - - // name - // amount <-> path is already checked - test[payment_name => happy::NAME, conflict: payment_package_hash => happy::PACKAGE_HASH, requires[payment_entry_point => happy::ENTRY_POINT], name_conflicts_with_package_hash] - test[payment_name => happy::NAME, conflict: payment_package_name => happy::PACKAGE_NAME, requires[payment_entry_point => happy::ENTRY_POINT], name_conflicts_with_package_name] - test[payment_name => happy::NAME, conflict: payment_hash => happy::HASH, requires[payment_entry_point => happy::ENTRY_POINT], name_conflicts_with_hash] - test[payment_name => happy::NAME, conflict: payment_version => happy::VERSION, requires[payment_entry_point => happy::ENTRY_POINT], name_conflicts_with_version] - - // hash - // amount <-> hash is already checked - test[payment_hash => happy::HASH, conflict: payment_package_hash => happy::PACKAGE_HASH, requires[payment_entry_point => happy::ENTRY_POINT], hash_conflicts_with_package_hash] - test[payment_hash => happy::HASH, conflict: payment_package_name => happy::PACKAGE_NAME, requires[payment_entry_point => happy::ENTRY_POINT], hash_conflicts_with_package_name] - test[payment_hash => happy::HASH, conflict: payment_version => happy::VERSION, requires[payment_entry_point => happy::ENTRY_POINT], hash_conflicts_with_version] - // name <-> hash is already checked - // name <-> path is already checked - - // package_name - // amount <-> package_name is already checked - test[payment_package_name => happy::PACKAGE_NAME, conflict: payment_package_hash => happy::PACKAGE_HASH, requires[payment_entry_point => happy::ENTRY_POINT], package_name_conflicts_with_package_hash] - // package_name <-> hash is already checked - // package_name <-> name is already checked - // package_name <-> path is already checked - - // package_hash - // package_hash + session_version is optional and allowed - // amount <-> package_hash is already checked - // package_hash <-> package_name is already checked - // package_hash <-> hash is already checked - // package_hash <-> name is already checked - // package_hash <-> path is already checked - ] - ]; - } -} diff --git a/client/lib/rpc.rs b/client/lib/rpc.rs deleted file mode 100644 index 4ae1f5d525..0000000000 --- a/client/lib/rpc.rs +++ /dev/null @@ -1,393 +0,0 @@ -use std::fs::File; - -use futures::executor; -use jsonrpc_lite::{Id, JsonRpc, Params}; -use rand::Rng; -use reqwest::Client; -use serde::Serialize; -use serde_json::{json, Map, Value}; - -use casper_execution_engine::core::engine_state::ExecutableDeployItem; -use casper_node::{ - crypto::hash::Digest, - rpcs::{ - account::{PutDeploy, PutDeployParams}, - chain::{ - BlockIdentifier, GetBlock, GetBlockParams, GetBlockTransfers, GetBlockTransfersParams, - GetEraInfoBySwitchBlock, GetEraInfoParams, GetStateRootHash, GetStateRootHashParams, - }, - docs::ListRpcs, - info::{GetDeploy, GetDeployParams}, - state::{GetAuctionInfo, GetBalance, GetBalanceParams, GetItem, GetItemParams}, - RpcWithOptionalParams, RpcWithParams, RpcWithoutParams, RPC_API_PATH, - }, - types::{BlockHash, Deploy, DeployHash}, -}; -use casper_types::{AsymmetricType, Key, PublicKey, RuntimeArgs, URef, U512}; - -use crate::{ - deploy::{DeployExt, DeployParams, SendDeploy, Transfer}, - error::{Error, Result}, - validation, -}; - -/// Target for a given transfer. -pub(crate) enum TransferTarget { - /// Transfer to another account. - Account(PublicKey), -} - -/// Struct representing a single JSON-RPC call to the casper node. -#[derive(Debug)] -pub(crate) struct RpcCall { - rpc_id: Id, - node_address: String, - verbosity_level: u64, -} - -/// `RpcCall` encapsulates calls made to the casper node service via JSON-RPC. -impl RpcCall { - /// Creates a new RPC instance. - /// - /// `rpc_id` is used for RPC-ID as required by the JSON-RPC specification, and is returned by - /// the node in the corresponding response. - /// - /// `node_address` identifies the network address of the target node's HTTP server, e.g. - /// `"http://127.0.0.1:7777"`. - /// - /// When `verbosity_level` is `1`, the request will be printed to `stdout` with long string - /// fields (e.g. hex-formatted raw Wasm bytes) shortened to a string indicating the char count - /// of the field. When `verbosity_level` is greater than `1`, the request will be printed to - /// `stdout` with no abbreviation of long fields. When `verbosity_level` is `0`, the request - /// will not be printed to `stdout`. - pub(crate) fn new(maybe_rpc_id: &str, node_address: &str, verbosity_level: u64) -> Self { - let rpc_id = if maybe_rpc_id.is_empty() { - Id::from(rand::thread_rng().gen::()) - } else if let Ok(i64_id) = maybe_rpc_id.parse::() { - Id::from(i64_id) - } else { - Id::from(maybe_rpc_id.to_string()) - }; - - Self { - rpc_id, - node_address: node_address.trim_end_matches('/').to_string(), - verbosity_level, - } - } - - pub(crate) fn get_deploy(self, deploy_hash: &str) -> Result { - let hash = Digest::from_hex(deploy_hash).map_err(|error| Error::CryptoError { - context: "deploy_hash", - error, - })?; - let params = GetDeployParams { - deploy_hash: DeployHash::new(hash), - }; - GetDeploy::request_with_map_params(self, params) - } - - pub(crate) fn get_item(self, state_root_hash: &str, key: &str, path: &str) -> Result { - let state_root_hash = - Digest::from_hex(state_root_hash).map_err(|error| Error::CryptoError { - context: "state_root_hash", - error, - })?; - - let key = { - if let Ok(key) = Key::from_formatted_str(key) { - key - } else if let Ok(public_key) = PublicKey::from_hex(key) { - Key::Account(public_key.to_account_hash()) - } else { - return Err(Error::FailedToParseKey); - } - }; - - let path = if path.is_empty() { - vec![] - } else { - path.split('/').map(ToString::to_string).collect() - }; - - let params = GetItemParams { - state_root_hash, - key: key.to_formatted_string(), - path: path.clone(), - }; - let response = GetItem::request_with_map_params(self, params)?; - validation::validate_query_response(&response, &state_root_hash, &key, &path)?; - Ok(response) - } - - pub(crate) fn get_state_root_hash(self, maybe_block_identifier: &str) -> Result { - match Self::block_identifier(maybe_block_identifier)? { - Some(block_identifier) => { - let params = GetStateRootHashParams { block_identifier }; - GetStateRootHash::request_with_map_params(self, params) - } - None => GetStateRootHash::request(self), - } - } - - pub(crate) fn get_balance(self, state_root_hash: &str, purse_uref: &str) -> Result { - let state_root_hash = - Digest::from_hex(state_root_hash).map_err(|error| Error::CryptoError { - context: "state_root_hash", - error, - })?; - let uref = URef::from_formatted_str(purse_uref) - .map_err(|error| Error::FailedToParseURef("purse_uref", error))?; - let key = Key::from(uref); - - let params = GetBalanceParams { - state_root_hash, - purse_uref: purse_uref.to_string(), - }; - let response = GetBalance::request_with_map_params(self, params)?; - validation::validate_get_balance_response(&response, &state_root_hash, &key)?; - Ok(response) - } - - pub(crate) fn get_era_info_by_switch_block( - self, - maybe_block_identifier: &str, - ) -> Result { - let response = match Self::block_identifier(maybe_block_identifier)? { - None => GetEraInfoBySwitchBlock::request(self), - Some(block_identifier) => { - let params = GetEraInfoParams { block_identifier }; - GetEraInfoBySwitchBlock::request_with_map_params(self, params) - } - }?; - validation::validate_get_era_info_response(&response)?; - Ok(response) - } - - pub(crate) fn get_auction_info(self) -> Result { - GetAuctionInfo::request(self) - } - - pub(crate) fn list_rpcs(self) -> Result { - ListRpcs::request(self) - } - - pub(crate) fn transfer( - self, - amount: U512, - source_purse: Option, - target: TransferTarget, - id: Option, - deploy_params: DeployParams, - payment: ExecutableDeployItem, - ) -> Result { - const TRANSFER_ARG_AMOUNT: &str = "amount"; - const TRANSFER_ARG_SOURCE: &str = "source"; - const TRANSFER_ARG_TARGET: &str = "target"; - const TRANSFER_ARG_ID: &str = "id"; - - let mut transfer_args = RuntimeArgs::new(); - transfer_args.insert(TRANSFER_ARG_AMOUNT, amount)?; - if let Some(source_purse) = source_purse { - transfer_args.insert(TRANSFER_ARG_SOURCE, source_purse)?; - } - match target { - TransferTarget::Account(target_account) => { - let target_account_hash = target_account.to_account_hash().value(); - transfer_args.insert(TRANSFER_ARG_TARGET, target_account_hash)?; - } - } - transfer_args.insert(TRANSFER_ARG_ID, id)?; - let session = ExecutableDeployItem::Transfer { - args: transfer_args, - }; - let deploy = Deploy::with_payment_and_session(deploy_params, payment, session); - let params = PutDeployParams { deploy }; - Transfer::request_with_map_params(self, params) - } - - pub(crate) fn send_deploy_file(self, input_path: &str) -> Result { - let input = File::open(input_path).map_err(|error| Error::IoError { - context: format!("unable to read input file '{}'", input_path), - error, - })?; - let deploy = Deploy::read_deploy(input)?; - let params = PutDeployParams { deploy }; - SendDeploy::request_with_map_params(self, params) - } - - pub(crate) fn put_deploy(self, deploy: Deploy) -> Result { - let params = PutDeployParams { deploy }; - PutDeploy::request_with_map_params(self, params) - } - - pub(crate) fn get_block(self, maybe_block_identifier: &str) -> Result { - let maybe_block_identifier = Self::block_identifier(maybe_block_identifier)?; - let response = match maybe_block_identifier { - Some(block_identifier) => { - let params = GetBlockParams { block_identifier }; - GetBlock::request_with_map_params(self, params) - } - None => GetBlock::request(self), - }?; - validation::validate_get_block_response(&response, &maybe_block_identifier)?; - Ok(response) - } - - pub(crate) fn get_block_transfers(self, maybe_block_identifier: &str) -> Result { - let maybe_block_identifier = Self::block_identifier(maybe_block_identifier)?; - let response = match maybe_block_identifier { - Some(block_identifier) => { - let params = GetBlockTransfersParams { block_identifier }; - GetBlockTransfers::request_with_map_params(self, params) - } - None => GetBlockTransfers::request(self), - }?; - Ok(response) - } - - fn block_identifier(maybe_block_identifier: &str) -> Result> { - if maybe_block_identifier.is_empty() { - return Ok(None); - } - - if maybe_block_identifier.len() == (Digest::LENGTH * 2) { - let hash = - Digest::from_hex(maybe_block_identifier).map_err(|error| Error::CryptoError { - context: "block_identifier", - error, - })?; - Ok(Some(BlockIdentifier::Hash(BlockHash::new(hash)))) - } else { - let height = maybe_block_identifier - .parse() - .map_err(|error| Error::FailedToParseInt("block_identifier", error))?; - Ok(Some(BlockIdentifier::Height(height))) - } - } - - async fn request(self, method: &str, params: Params) -> Result { - let url = format!("{}/{}", self.node_address, RPC_API_PATH); - let rpc_req = JsonRpc::request_with_params(self.rpc_id, method, params); - - crate::pretty_print_at_level(&rpc_req, self.verbosity_level); - - let client = Client::new(); - let response = client - .post(&url) - .json(&rpc_req) - .send() - .await - .map_err(Error::FailedToGetResponse)?; - - if let Err(error) = response.error_for_status_ref() { - if self.verbosity_level > 0 { - println!("Failed Sending {}", error); - } - return Err(Error::FailedSending(rpc_req)); - } - - let rpc_response = response.json().await.map_err(Error::FailedToParseResponse); - - if let Err(error) = rpc_response { - if self.verbosity_level > 0 { - println!("Failed parsing as a JSON-RPC response: {}", error); - } - return Err(error); - } - - let rpc_response: JsonRpc = rpc_response?; - - if rpc_response.get_result().is_some() { - if self.verbosity_level > 0 { - println!("Received successful response:"); - } - return Ok(rpc_response); - } - - if let Some(error) = rpc_response.get_error() { - if self.verbosity_level > 0 { - println!("Response returned an error"); - } - return Err(Error::ResponseIsError(error.clone())); - } - - if self.verbosity_level > 0 { - println!("Invalid response returned"); - } - Err(Error::InvalidRpcResponse(rpc_response)) - } -} - -/// General purpose client trait for making requests to casper node's HTTP endpoints. -pub(crate) trait RpcClient { - const RPC_METHOD: &'static str; - - /// Calls a casper node's JSON-RPC endpoint. - fn request(rpc_call: RpcCall) -> Result { - executor::block_on(async { rpc_call.request(Self::RPC_METHOD, Params::None(())).await }) - } - - /// Calls a casper node's JSON-RPC endpoint with parameters. - fn request_with_map_params(rpc_call: RpcCall, params: T) -> Result { - executor::block_on(async { - rpc_call - .request(Self::RPC_METHOD, Params::from(params.into_json_map())) - .await - }) - } -} - -impl RpcClient for GetBalance { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for GetBlock { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for GetBlockTransfers { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for GetStateRootHash { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for GetItem { - const RPC_METHOD: &'static str = ::METHOD; -} - -impl RpcClient for GetEraInfoBySwitchBlock { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for GetAuctionInfo { - const RPC_METHOD: &'static str = Self::METHOD; -} - -impl RpcClient for ListRpcs { - const RPC_METHOD: &'static str = Self::METHOD; -} - -pub(crate) trait IntoJsonMap: Serialize { - fn into_json_map(self) -> Map - where - Self: Sized, - { - json!(self) - .as_object() - .unwrap_or_else(|| panic!("should be a JSON object")) - .clone() - } -} - -impl IntoJsonMap for PutDeployParams {} -impl IntoJsonMap for GetBlockParams {} -impl IntoJsonMap for GetBlockTransfersParams {} -impl IntoJsonMap for GetStateRootHashParams {} -impl IntoJsonMap for GetDeployParams {} -impl IntoJsonMap for GetBalanceParams {} -impl IntoJsonMap for GetItemParams {} -impl IntoJsonMap for GetEraInfoParams {} -impl IntoJsonMap for ListRpcs {} diff --git a/client/lib/validation.rs b/client/lib/validation.rs deleted file mode 100644 index e36c76dbbb..0000000000 --- a/client/lib/validation.rs +++ /dev/null @@ -1,256 +0,0 @@ -use std::convert::TryFrom; - -use jsonrpc_lite::JsonRpc; -use thiserror::Error; - -use casper_execution_engine::{ - core, core::ValidationError, shared::stored_value::StoredValue, - storage::trie::merkle_proof::TrieMerkleProof, -}; -use casper_node::{ - crypto::hash::Digest, - rpcs::chain::{BlockIdentifier, EraSummary, GetEraInfoResult}, - types::{json_compatibility, Block, BlockValidationError, JsonBlock}, -}; -use casper_types::{bytesrepr, Key, U512}; - -const GET_ITEM_RESULT_BALANCE_VALUE: &str = "balance_value"; -const GET_ITEM_RESULT_STORED_VALUE: &str = "stored_value"; -const GET_ITEM_RESULT_MERKLE_PROOF: &str = "merkle_proof"; - -/// Error that can be returned when validating a block returned from a JSON-RPC method. -#[derive(Error, Debug)] -pub enum ValidateResponseError { - /// Failed to marshall value. - #[error("Failed to marshall value {0}")] - BytesRepr(bytesrepr::Error), - - /// Error from serde. - #[error(transparent)] - Serde(#[from] serde_json::Error), - - /// Failed to parse JSON. - #[error("validate_response failed to parse")] - ValidateResponseFailedToParse, - - /// Failed to validate Merkle proofs. - #[error(transparent)] - ValidationError(#[from] ValidationError), - - /// Failed to validate a block. - #[error("Block validation error {0}")] - BlockValidationError(BlockValidationError), - - /// Serialized value not contained in proof. - #[error("serialized value not contained in proof")] - SerializedValueNotContainedInProof, - - /// No block in response. - #[error("no block in response")] - NoBlockInResponse, - - /// Block hash requested does not correspond to response. - #[error("block hash requested does not correspond to response")] - UnexpectedBlockHash, - - /// Block height was not as requested. - #[error("block height was not as requested")] - UnexpectedBlockHeight, -} - -impl From for ValidateResponseError { - fn from(e: bytesrepr::Error) -> Self { - ValidateResponseError::BytesRepr(e) - } -} - -impl From for ValidateResponseError { - fn from(e: BlockValidationError) -> Self { - ValidateResponseError::BlockValidationError(e) - } -} - -pub(crate) fn validate_get_era_info_response( - response: &JsonRpc, -) -> Result<(), ValidateResponseError> { - let value = response - .get_result() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - - let result: GetEraInfoResult = serde_json::from_value(value.to_owned())?; - - match result.era_summary { - Some(EraSummary { - state_root_hash, - era_id, - merkle_proof, - stored_value, - .. - }) => { - let proof_bytes = hex::decode(merkle_proof) - .map_err(|_| ValidateResponseError::ValidateResponseFailedToParse)?; - let proofs: Vec> = - bytesrepr::deserialize(proof_bytes)?; - let key = Key::EraInfo(era_id); - let path = &[]; - - let proof_value = match stored_value { - json_compatibility::StoredValue::EraInfo(era_info) => { - StoredValue::EraInfo(era_info) - } - _ => return Err(ValidateResponseError::ValidateResponseFailedToParse), - }; - - core::validate_query_proof( - &state_root_hash.to_owned().into(), - &proofs, - &key, - path, - &proof_value, - ) - .map_err(Into::into) - } - None => Ok(()), - } -} - -pub(crate) fn validate_query_response( - response: &JsonRpc, - state_root_hash: &Digest, - key: &Key, - path: &[String], -) -> Result<(), ValidateResponseError> { - let value = response - .get_result() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - - let object = value - .as_object() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - - let proofs: Vec> = { - let proof = object - .get(GET_ITEM_RESULT_MERKLE_PROOF) - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - let proof_str = proof - .as_str() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - let proof_bytes = hex::decode(proof_str) - .map_err(|_| ValidateResponseError::ValidateResponseFailedToParse)?; - bytesrepr::deserialize(proof_bytes)? - }; - - let proof_value: &StoredValue = { - let last_proof = proofs - .last() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - last_proof.value() - }; - - // Here we need to validate that JSON `stored_value` is contained in the proof. - // - // Possible to deserialize that field into a `StoredValue` and pass below to - // `validate_query_proof` instead of using this approach? - { - let value: json_compatibility::StoredValue = { - let value = object - .get(GET_ITEM_RESULT_STORED_VALUE) - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - serde_json::from_value(value.to_owned())? - }; - match json_compatibility::StoredValue::try_from(proof_value) { - Ok(json_proof_value) if json_proof_value == value => (), - _ => return Err(ValidateResponseError::SerializedValueNotContainedInProof), - } - } - - core::validate_query_proof( - &state_root_hash.to_owned().into(), - &proofs, - key, - path, - proof_value, - ) - .map_err(Into::into) -} - -pub(crate) fn validate_get_balance_response( - response: &JsonRpc, - state_root_hash: &Digest, - key: &Key, -) -> Result<(), ValidateResponseError> { - let value = response - .get_result() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - - let object = value - .as_object() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - - let balance_proof: TrieMerkleProof = { - let proof = object - .get(GET_ITEM_RESULT_MERKLE_PROOF) - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - let proof_str = proof - .as_str() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - let proof_bytes = hex::decode(proof_str) - .map_err(|_| ValidateResponseError::ValidateResponseFailedToParse)?; - bytesrepr::deserialize(proof_bytes)? - }; - - let balance: U512 = { - let value = object - .get(GET_ITEM_RESULT_BALANCE_VALUE) - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - let value_str = value - .as_str() - .ok_or(ValidateResponseError::ValidateResponseFailedToParse)?; - U512::from_dec_str(value_str) - .map_err(|_| ValidateResponseError::ValidateResponseFailedToParse)? - }; - - core::validate_balance_proof( - &state_root_hash.to_owned().into(), - &balance_proof, - *key, - &balance, - ) - .map_err(Into::into) -} - -pub(crate) fn validate_get_block_response( - response: &JsonRpc, - maybe_block_identifier: &Option, -) -> Result<(), ValidateResponseError> { - let maybe_result = response.get_result(); - let json_block_value = maybe_result - .and_then(|value| value.get("block")) - .ok_or(ValidateResponseError::NoBlockInResponse)?; - let maybe_json_block: Option = serde_json::from_value(json_block_value.to_owned())?; - let json_block = if let Some(json_block) = maybe_json_block { - json_block - } else { - return Ok(()); - }; - let block = Block::from(json_block); - block.verify()?; - match maybe_block_identifier { - Some(BlockIdentifier::Hash(block_hash)) => { - if block_hash != block.hash() { - return Err(ValidateResponseError::UnexpectedBlockHash); - } - } - Some(BlockIdentifier::Height(height)) => { - // More is necessary here to mitigate a MITM attack - if height != &block.height() { - return Err(ValidateResponseError::UnexpectedBlockHeight); - } - } - // More is necessary here to mitigate a MITM attack. In this case we would want to validate - // `block.proofs()` to make sure that 1/3 of the validator weight signed the block, and we - // would have to know the latest validators through some trustworthy means - None => (), - } - Ok(()) -} diff --git a/client/src/account_address.rs b/client/src/account_address.rs deleted file mode 100644 index 6120c2fd9a..0000000000 --- a/client/src/account_address.rs +++ /dev/null @@ -1,91 +0,0 @@ -use std::{fs, str}; - -use clap::{App, Arg, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_types::{AsymmetricType, PublicKey}; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - Key, -} - -/// Handles providing the arg for and retrieval of the public key. -mod public_key { - use casper_node::crypto::AsymmetricKeyExt; - use casper_types::AsymmetricType; - - use super::*; - - const ARG_NAME: &str = "public-key"; - const ARG_SHORT: &str = "p"; - const ARG_VALUE_NAME: &str = "FORMATTED STRING or PATH"; - const ARG_HELP: &str = - "This must be a properly formatted public key. The public key may instead be read in from \ - a file, in which case enter the path to the file as the --public-key argument. The file \ - should be one of the two public key files generated via the `keygen` subcommand; \ - \"public_key_hex\" or \"public_key.pem\""; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(true) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Key as usize) - } - - pub(super) fn get(matches: &ArgMatches) -> Result { - let value = matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)); - - // Try to read as a PublicKey PEM file first. - if let Ok(public_key) = PublicKey::from_file(value) { - return Ok(public_key.to_hex()); - } - - // Try to read as a hex-encoded PublicKey file next. - if let Ok(hex_public_key) = fs::read_to_string(value) { - let _ = PublicKey::from_hex(&hex_public_key).map_err(|error| { - eprintln!( - "Can't parse the contents of {} as a public key: {}", - value, error - ); - Error::FailedToParseKey - })?; - return Ok(hex_public_key); - } - - Ok(value.to_string()) - } -} - -pub struct GenerateAccountHash {} - -impl<'a, 'b> ClientCommand<'a, 'b> for GenerateAccountHash { - const NAME: &'static str = "account-address"; - const ABOUT: &'static str = "Generates an account hash from a given public key"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(public_key::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let hex_public_key = public_key::get(matches)?; - let public_key = PublicKey::from_hex(&hex_public_key).map_err(|error| { - eprintln!("Can't parse {} as a public key: {}", hex_public_key, error); - Error::FailedToParseKey - })?; - let account_hash = public_key.to_account_hash(); - Ok(Success::Output(account_hash.to_string())) - } -} diff --git a/client/src/block.rs b/client/src/block.rs deleted file mode 100644 index 799127da1b..0000000000 --- a/client/src/block.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod get; -mod transfers; diff --git a/client/src/block/get.rs b/client/src/block/get.rs deleted file mode 100644 index d6c7b3b8e3..0000000000 --- a/client/src/block/get.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::chain::GetBlock; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - BlockIdentifier, -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetBlock { - const NAME: &'static str = "get-block"; - const ABOUT: &'static str = "Retrieves a block from the network"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::block_identifier::arg( - DisplayOrder::BlockIdentifier as usize, - )) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let maybe_block_id = common::block_identifier::get(matches); - - casper_client::get_block(maybe_rpc_id, node_address, verbosity_level, maybe_block_id) - .map(Success::from) - } -} diff --git a/client/src/block/transfers.rs b/client/src/block/transfers.rs deleted file mode 100644 index a30fc3a018..0000000000 --- a/client/src/block/transfers.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::chain::GetBlockTransfers; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - BlockIdentifier, -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetBlockTransfers { - const NAME: &'static str = "get-block-transfers"; - const ABOUT: &'static str = "Retrieves all transfers for a block from the network"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::block_identifier::arg( - DisplayOrder::BlockIdentifier as usize, - )) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let maybe_block_id = common::block_identifier::get(matches); - - casper_client::get_block_transfers( - maybe_rpc_id, - node_address, - verbosity_level, - maybe_block_id, - ) - .map(Success::from) - } -} diff --git a/client/src/command.rs b/client/src/command.rs deleted file mode 100644 index 9207a62964..0000000000 --- a/client/src/command.rs +++ /dev/null @@ -1,27 +0,0 @@ -use clap::{App, ArgMatches}; -use jsonrpc_lite::JsonRpc; - -use casper_client::Error; - -/// The result of a successful execution of a given client command. -pub enum Success { - /// The success response to a JSON-RPC request. - Response(JsonRpc), - /// The output which should be presented to the user for non-RPC client commands. - Output(String), -} - -impl From for Success { - fn from(response: JsonRpc) -> Self { - Success::Response(response) - } -} - -pub trait ClientCommand<'a, 'b> { - const NAME: &'static str; - const ABOUT: &'static str; - /// Constructs the clap `SubCommand` and returns the clap `App`. - fn build(display_order: usize) -> App<'a, 'b>; - /// Parses the arg matches and runs the subcommand. - fn run(matches: &ArgMatches<'_>) -> Result; -} diff --git a/client/src/common.rs b/client/src/common.rs deleted file mode 100644 index 7e3eb35a65..0000000000 --- a/client/src/common.rs +++ /dev/null @@ -1,191 +0,0 @@ -use clap::{Arg, ArgMatches}; - -pub const ARG_PATH: &str = "PATH"; -pub const ARG_HEX_STRING: &str = "HEX STRING"; -pub const ARG_STRING: &str = "STRING"; -pub const ARG_INTEGER: &str = "INTEGER"; - -/// Handles the arg for whether verbose output is required or not. -pub mod verbose { - use super::*; - - pub const ARG_NAME: &str = "verbose"; - const ARG_NAME_SHORT: &str = "v"; - const ARG_HELP: &str = - "Generates verbose output, e.g. prints the RPC request. If repeated by using '-vv' then \ - all output will be extra verbose, meaning that large JSON strings will be shown in full"; - - pub fn arg(order: usize) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .short(ARG_NAME_SHORT) - .required(false) - .multiple(true) - .help(ARG_HELP) - .display_order(order) - } - - pub fn get(matches: &ArgMatches) -> u64 { - matches.occurrences_of(ARG_NAME) - } -} - -/// Handles providing the arg for and retrieval of the node hostname/IP and port. -pub mod node_address { - use super::*; - - const ARG_NAME: &str = "node-address"; - const ARG_SHORT: &str = "n"; - const ARG_VALUE_NAME: &str = "HOST:PORT"; - const ARG_DEFAULT: &str = "http://localhost:7777"; - const ARG_HELP: &str = "Hostname or IP and port of node on which HTTP service is running"; - - pub fn arg(order: usize) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(true) - .default_value(ARG_DEFAULT) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(order) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -/// Handles providing the arg for the RPC ID. -pub mod rpc_id { - use super::*; - - const ARG_NAME: &str = "id"; - const ARG_VALUE_NAME: &str = "STRING OR INTEGER"; - const ARG_HELP: &str = - "JSON-RPC identifier, applied to the request and returned in the response. If not \ - provided, a random integer will be assigned"; - - pub fn arg(order: usize) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(order) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -/// Handles providing the arg for and retrieval of the secret key. -pub mod secret_key { - use super::*; - - const ARG_NAME: &str = "secret-key"; - const ARG_SHORT: &str = "k"; - const ARG_VALUE_NAME: &str = super::ARG_PATH; - const ARG_HELP: &str = "Path to secret key file"; - - pub fn arg(order: usize) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(true) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(order) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -/// Handles the arg for whether to overwrite existing output file(s). -pub mod force { - use super::*; - - pub const ARG_NAME: &str = "force"; - const ARG_NAME_SHORT: &str = "f"; - const ARG_HELP_SINGULAR: &str = - "If this flag is passed and the output file already exists, it will be overwritten. \ - Without this flag, if the output file already exists, the command will fail"; - const ARG_HELP_PLURAL: &str = - "If this flag is passed, any existing output files will be overwritten. Without this flag, \ - if any output file exists, no output files will be generated and the command will fail"; - - pub fn arg(order: usize, singular: bool) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_NAME_SHORT) - .required(false) - .help(if singular { - ARG_HELP_SINGULAR - } else { - ARG_HELP_PLURAL - }) - .display_order(order) - } - - pub fn get(matches: &ArgMatches) -> bool { - matches.is_present(ARG_NAME) - } -} - -/// Handles providing the arg for and retrieval of the state root hash. -pub mod state_root_hash { - use super::*; - - const ARG_NAME: &str = "state-root-hash"; - const ARG_SHORT: &str = "s"; - const ARG_VALUE_NAME: &str = super::ARG_HEX_STRING; - const ARG_HELP: &str = "Hex-encoded hash of the state root"; - - pub(crate) fn arg(order: usize) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(true) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(order) - } - - pub(crate) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -/// Handles providing the arg for and retrieval of the block hash or block height. -pub mod block_identifier { - use super::*; - - const ARG_NAME: &str = "block-identifier"; - const ARG_SHORT: &str = "b"; - const ARG_VALUE_NAME: &str = "HEX STRING OR INTEGER"; - const ARG_HELP: &str = - "Hex-encoded block hash or height of the block. If not given, the last block added to the \ - chain as known at the given node will be used"; - - pub(crate) fn arg(order: usize) -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(order) - } - - pub(crate) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} diff --git a/client/src/deploy.rs b/client/src/deploy.rs deleted file mode 100644 index 04dd1f1be2..0000000000 --- a/client/src/deploy.rs +++ /dev/null @@ -1,15 +0,0 @@ -mod creation_common; -mod get; -mod list; -mod make; -mod put; -mod send; -mod sign; -mod transfer; - -pub use transfer::Transfer; - -pub use list::ListDeploys; -pub use make::MakeDeploy; -pub use send::SendDeploy; -pub use sign::SignDeploy; diff --git a/client/src/deploy/creation_common.rs b/client/src/deploy/creation_common.rs deleted file mode 100644 index 46a7aecba5..0000000000 --- a/client/src/deploy/creation_common.rs +++ /dev/null @@ -1,916 +0,0 @@ -//! This module contains structs and helpers which are used by multiple subcommands related to -//! creating deploys. - -use std::process; - -use clap::{App, AppSettings, Arg, ArgGroup, ArgMatches}; - -use casper_client::{help, PaymentStrParams, SessionStrParams}; - -use crate::common; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -pub(super) enum DisplayOrder { - ShowArgExamples, - Verbose, - NodeAddress, - RpcId, - SecretKey, - Input, - Output, - TransferAmount, - TransferTargetAccount, - TransferId, - Timestamp, - Ttl, - GasPrice, - Dependencies, - ChainName, - SessionCode, - SessionArgSimple, - SessionArgsComplex, - SessionHash, - SessionName, - SessionPackageHash, - SessionPackageName, - SessionEntryPoint, - SessionVersion, - StandardPayment, - PaymentCode, - PaymentArgSimple, - PaymentArgsComplex, - PaymentHash, - PaymentName, - PaymentPackageHash, - PaymentPackageName, - PaymentEntryPoint, - PaymentVersion, -} - -/// Handles providing the arg for and executing the show-arg-examples option. -pub(super) mod show_arg_examples { - use super::*; - - pub(in crate::deploy) const ARG_NAME: &str = "show-arg-examples"; - const ARG_SHORT: &str = "e"; - const ARG_HELP: &str = - "If passed, all other options are ignored and a set of examples of session-/payment-args \ - is printed"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(false) - .help(ARG_HELP) - .display_order(DisplayOrder::ShowArgExamples as usize) - } - - pub(in crate::deploy) fn get(matches: &ArgMatches) -> bool { - if !matches.is_present(ARG_NAME) { - return false; - } - - println!("Examples for passing values via --session-arg or --payment-arg:"); - println!("{}", help::supported_cl_type_examples()); - - true - } -} - -pub(super) fn session_str_params<'a>(matches: &'a ArgMatches) -> SessionStrParams<'a> { - let session_args_simple = arg_simple::session::get(matches); - let session_args_complex = args_complex::session::get(matches); - if let Some(session_path) = session_path::get(matches) { - return SessionStrParams::with_path( - session_path, - session_args_simple, - session_args_complex, - ); - } - let session_entry_point = session_entry_point::get(matches); - if let Some(session_hash) = session_hash::get(matches) { - return SessionStrParams::with_hash( - session_hash, - session_entry_point, - session_args_simple, - session_args_complex, - ); - } - if let Some(session_name) = session_name::get(matches) { - return SessionStrParams::with_name( - session_name, - session_entry_point, - session_args_simple, - session_args_complex, - ); - } - let session_version = session_version::get(matches); - if let Some(session_package_hash) = session_package_hash::get(matches) { - return SessionStrParams::with_package_hash( - session_package_hash, - session_version, - session_entry_point, - session_args_simple, - session_args_complex, - ); - } - if let Some(session_package_name) = session_package_name::get(matches) { - return SessionStrParams::with_package_name( - session_package_name, - session_version, - session_entry_point, - session_args_simple, - session_args_complex, - ); - } - unreachable!("clap arg groups and parsing should prevent this") -} - -pub(super) fn payment_str_params<'a>(matches: &'a ArgMatches) -> PaymentStrParams<'a> { - if let Some(payment_amount) = standard_payment_amount::get(matches) { - return PaymentStrParams::with_amount(payment_amount); - } - let payment_args_simple = arg_simple::payment::get(matches); - let payment_args_complex = args_complex::payment::get(matches); - if let Some(payment_path) = payment_path::get(matches) { - return PaymentStrParams::with_path( - payment_path, - payment_args_simple, - payment_args_complex, - ); - } - let payment_entry_point = payment_entry_point::get(matches); - if let Some(payment_hash) = payment_hash::get(matches) { - return PaymentStrParams::with_hash( - payment_hash, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ); - } - if let Some(payment_name) = payment_name::get(matches) { - return PaymentStrParams::with_name( - payment_name, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ); - } - let payment_version = payment_version::get(matches); - if let Some(payment_package_hash) = payment_package_hash::get(matches) { - return PaymentStrParams::with_package_hash( - payment_package_hash, - payment_version, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ); - } - if let Some(payment_package_name) = payment_package_name::get(matches) { - return PaymentStrParams::with_package_name( - payment_package_name, - payment_version, - payment_entry_point, - payment_args_simple, - payment_args_complex, - ); - } - unreachable!("clap arg groups and parsing should prevent this") -} - -/// Handles providing the arg for and retrieval of the timestamp. -pub(super) mod timestamp { - use super::*; - - const ARG_NAME: &str = "timestamp"; - const ARG_VALUE_NAME: &str = "TIMESTAMP"; - const ARG_HELP: &str = - "RFC3339-like formatted timestamp, e.g. '2018-02-16 00:31:37'. If not provided, the \ - current time will be used. Note that timestamp is UTC, not local. See \ - https://docs.rs/humantime/latest/humantime/fn.parse_rfc3339_weak.html for more \ - information."; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Timestamp as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -/// Handles providing the arg for and retrieval of the time to live. -pub(super) mod ttl { - use super::*; - - const ARG_NAME: &str = "ttl"; - const ARG_VALUE_NAME: &str = "DURATION"; - const ARG_DEFAULT: &str = "30min"; - const ARG_HELP: &str = - "Time that the deploy will remain valid for. A deploy can only be included in a block \ - between `timestamp` and `timestamp + ttl`. Input examples: '1hr 12min', '30min 50sec', \ - '1day'. For all options, see \ - https://docs.rs/humantime/latest/humantime/fn.parse_duration.html"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .default_value(ARG_DEFAULT) - .help(ARG_HELP) - .display_order(DisplayOrder::Ttl as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -/// Handles providing the arg for and retrieval of the gas price. -pub(super) mod gas_price { - use super::*; - - const ARG_NAME: &str = "gas-price"; - const ARG_VALUE_NAME: &str = common::ARG_INTEGER; - const ARG_DEFAULT: &str = "1"; - const ARG_HELP: &str = - "Conversion rate between the cost of Wasm opcodes and the motes sent by the payment code"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .default_value(ARG_DEFAULT) - .help(ARG_HELP) - .display_order(DisplayOrder::GasPrice as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -/// Handles providing the arg for and retrieval of the deploy dependencies. -pub(super) mod dependencies { - use super::*; - - const ARG_NAME: &str = "dependency"; - const ARG_VALUE_NAME: &str = common::ARG_HEX_STRING; - const ARG_HELP: &str = - "A hex-encoded deploy hash of a deploy which must be executed before this deploy"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .multiple(true) - .value_name(ARG_VALUE_NAME) - .takes_value(true) - .help(ARG_HELP) - .display_order(DisplayOrder::Dependencies as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> Vec<&'a str> { - matches - .values_of(ARG_NAME) - .iter() - .cloned() - .flatten() - .collect() - } -} - -/// Handles providing the arg for and retrieval of the chain name. -pub(super) mod chain_name { - use super::*; - - const ARG_NAME: &str = "chain-name"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = - "Name of the chain, to avoid the deploy from being accidentally or maliciously included in \ - a different chain"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required_unless(show_arg_examples::ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::ChainName as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -/// Handles providing the arg for and retrieval of the session code bytes. -pub(super) mod session_path { - use super::*; - - pub(super) const ARG_NAME: &str = "session-path"; - const ARG_SHORT: &str = "s"; - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = "Path to the compiled Wasm session code"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .short(ARG_SHORT) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::SessionCode as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -/// Handles providing the arg for and retrieval of simple session and payment args. -pub(super) mod arg_simple { - use super::*; - use once_cell::sync::Lazy; - - const ARG_VALUE_NAME: &str = r#""NAME:TYPE='VALUE'" OR "NAME:TYPE=null""#; - - static ARG_HELP: Lazy = Lazy::new(|| { - format!( - "For simple CLTypes, a named and typed arg which is passed to the Wasm code. To see \ - an example for each type, run '--{}'. This arg can be repeated to pass multiple named, \ - typed args, but can only be used for the following types: {}", - super::show_arg_examples::ARG_NAME, - help::supported_cl_type_list() - ) - }); - - pub(in crate::deploy) mod session { - use super::*; - - pub const ARG_NAME: &str = "session-arg"; - const ARG_SHORT: &str = "a"; - - pub fn arg() -> Arg<'static, 'static> { - super::arg(ARG_NAME, DisplayOrder::SessionArgSimple as usize) - .short(ARG_SHORT) - .requires(super::session::ARG_NAME) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Vec<&'a str> { - matches - .values_of(ARG_NAME) - .iter() - .cloned() - .flatten() - .collect() - } - } - - pub(in crate::deploy) mod payment { - use super::*; - - pub const ARG_NAME: &str = "payment-arg"; - - pub fn arg() -> Arg<'static, 'static> { - super::arg(ARG_NAME, DisplayOrder::PaymentArgSimple as usize) - .requires(super::payment::ARG_NAME) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Vec<&'a str> { - matches - .values_of(ARG_NAME) - .iter() - .cloned() - .flatten() - .collect() - } - } - - fn arg(name: &'static str, order: usize) -> Arg<'static, 'static> { - Arg::with_name(name) - .long(name) - .required(false) - .multiple(true) - .value_name(ARG_VALUE_NAME) - .help(&*ARG_HELP) - .display_order(order) - } -} - -/// Handles providing the arg for and retrieval of complex session and payment args. These are read -/// in from a file. -pub(super) mod args_complex { - use super::*; - - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = - "Path to file containing 'ToBytes'-encoded named and typed args for passing to the Wasm \ - code"; - - pub(in crate::deploy) mod session { - use super::*; - - pub const ARG_NAME: &str = "session-args-complex"; - - pub fn arg() -> Arg<'static, 'static> { - super::arg(ARG_NAME, DisplayOrder::SessionArgsComplex as usize) - .requires(super::session::ARG_NAME) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } - } - - pub(in crate::deploy) mod payment { - use super::*; - - pub const ARG_NAME: &str = "payment-args-complex"; - - pub fn arg() -> Arg<'static, 'static> { - super::arg(ARG_NAME, DisplayOrder::PaymentArgsComplex as usize) - .requires(super::payment::ARG_NAME) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } - } - - fn arg(name: &'static str, order: usize) -> Arg<'static, 'static> { - Arg::with_name(name) - .long(name) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(order) - } -} - -/// Handles providing the arg for and retrieval of the payment code bytes. -pub(super) mod payment_path { - use super::*; - - pub(in crate::deploy) const ARG_NAME: &str = "payment-path"; - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = "Path to the compiled Wasm payment code"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::PaymentCode as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -/// Handles providing the arg for and retrieval of the payment-amount arg. -pub(super) mod standard_payment_amount { - use super::*; - - pub(in crate::deploy) const ARG_NAME: &str = "payment-amount"; - const ARG_VALUE_NAME: &str = "AMOUNT"; - const ARG_SHORT: &str = "p"; - const ARG_HELP: &str = - "If provided, uses the standard-payment system contract rather than custom payment Wasm. \ - The value is the 'amount' arg of the standard-payment contract. This arg is incompatible \ - with all other --payment-xxx args"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::StandardPayment as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) fn apply_common_creation_options<'a, 'b>( - subcommand: App<'a, 'b>, - include_node_address: bool, -) -> App<'a, 'b> { - let mut subcommand = subcommand - .setting(AppSettings::NextLineHelp) - .arg(show_arg_examples::arg()); - - if include_node_address { - subcommand = subcommand.arg( - common::node_address::arg(DisplayOrder::NodeAddress as usize) - .required_unless(show_arg_examples::ARG_NAME), - ); - } - - subcommand = subcommand - .arg( - common::secret_key::arg(DisplayOrder::SecretKey as usize) - .required_unless(show_arg_examples::ARG_NAME), - ) - .arg(timestamp::arg()) - .arg(ttl::arg()) - .arg(gas_price::arg()) - .arg(dependencies::arg()) - .arg(chain_name::arg()); - subcommand -} - -pub(super) fn apply_common_session_options<'a, 'b>(subcommand: App<'a, 'b>) -> App<'a, 'b> { - subcommand - .arg(session_path::arg()) - .arg(session_package_hash::arg()) - .arg(session_package_name::arg()) - .arg(session_hash::arg()) - .arg(session_name::arg()) - .arg(arg_simple::session::arg()) - .arg(args_complex::session::arg()) - // Group the session-arg args so only one style is used to ensure consistent ordering. - .group( - ArgGroup::with_name("session-args") - .arg(arg_simple::session::ARG_NAME) - .arg(args_complex::session::ARG_NAME) - .required(false), - ) - .arg(session_entry_point::arg()) - .arg(session_version::arg()) - .group( - ArgGroup::with_name("session") - .arg(session_path::ARG_NAME) - .arg(session_package_hash::ARG_NAME) - .arg(session_package_name::ARG_NAME) - .arg(session_hash::ARG_NAME) - .arg(session_name::ARG_NAME) - .arg(show_arg_examples::ARG_NAME) - .required(true), - ) -} - -pub(crate) fn apply_common_payment_options( - subcommand: App<'static, 'static>, -) -> App<'static, 'static> { - subcommand - .arg(standard_payment_amount::arg()) - .arg(payment_path::arg()) - .arg(payment_package_hash::arg()) - .arg(payment_package_name::arg()) - .arg(payment_hash::arg()) - .arg(payment_name::arg()) - .arg(arg_simple::payment::arg()) - .arg(args_complex::payment::arg()) - // Group the payment-arg args so only one style is used to ensure consistent ordering. - .group( - ArgGroup::with_name("payment-args") - .arg(arg_simple::payment::ARG_NAME) - .arg(args_complex::payment::ARG_NAME) - .required(false), - ) - .arg(payment_entry_point::arg()) - .arg(payment_version::arg()) - .group( - ArgGroup::with_name("payment") - .arg(standard_payment_amount::ARG_NAME) - .arg(payment_path::ARG_NAME) - .arg(payment_package_hash::ARG_NAME) - .arg(payment_package_name::ARG_NAME) - .arg(payment_hash::ARG_NAME) - .arg(payment_name::ARG_NAME) - .arg(show_arg_examples::ARG_NAME) - .required(true), - ) -} - -pub(super) fn show_arg_examples_and_exit_if_required(matches: &ArgMatches<'_>) { - // If we printed the arg examples, exit the process. - if show_arg_examples::get(matches) { - process::exit(0); - } -} - -pub(super) mod output { - use super::*; - - const ARG_NAME: &str = "output"; - const ARG_SHORT_NAME: &str = "o"; - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = "Path to output deploy file. If omitted, defaults to stdout. If file exists, it will be overwritten"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .required(false) - .long(ARG_NAME) - .short(ARG_SHORT_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Output as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod input { - use super::*; - - const ARG_NAME: &str = "input"; - const ARG_SHORT_NAME: &str = "i"; - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = "Path to input deploy file"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .required_unless(show_arg_examples::ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Input as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -pub(super) mod session_hash { - use super::*; - - pub const ARG_NAME: &str = "session-hash"; - const ARG_VALUE_NAME: &str = common::ARG_HEX_STRING; - const ARG_HELP: &str = "Hex-encoded hash of the stored contract to be called as the session"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(session_entry_point::ARG_NAME) - .display_order(DisplayOrder::SessionHash as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod session_name { - use super::*; - - pub const ARG_NAME: &str = "session-name"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = "Name of the stored contract (associated with the executing account) to be called as the session"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(session_entry_point::ARG_NAME) - .display_order(DisplayOrder::SessionName as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod session_package_hash { - use super::*; - - pub const ARG_NAME: &str = "session-package-hash"; - const ARG_VALUE_NAME: &str = common::ARG_HEX_STRING; - const ARG_HELP: &str = "Hex-encoded hash of the stored package to be called as the session"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(session_entry_point::ARG_NAME) - .display_order(DisplayOrder::SessionPackageHash as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod session_package_name { - use super::*; - - pub const ARG_NAME: &str = "session-package-name"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = "Name of the stored package to be called as the session"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(session_entry_point::ARG_NAME) - .display_order(DisplayOrder::SessionPackageName as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod session_entry_point { - use super::*; - - pub const ARG_NAME: &str = "session-entry-point"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = "Name of the method that will be used when calling the session contract"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .display_order(DisplayOrder::SessionEntryPoint as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -pub(super) mod session_version { - use super::*; - - pub const ARG_NAME: &str = "session-version"; - const ARG_VALUE_NAME: &str = common::ARG_INTEGER; - const ARG_HELP: &str = "Version of the called session contract. Latest will be used by default"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .display_order(DisplayOrder::SessionVersion as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -pub(super) mod payment_hash { - use super::*; - - pub const ARG_NAME: &str = "payment-hash"; - const ARG_VALUE_NAME: &str = common::ARG_HEX_STRING; - const ARG_HELP: &str = "Hex-encoded hash of the stored contract to be called as the payment"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(payment_entry_point::ARG_NAME) - .display_order(DisplayOrder::PaymentHash as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod payment_name { - use super::*; - - pub const ARG_NAME: &str = "payment-name"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = "Name of the stored contract (associated with the executing account) \ - to be called as the payment"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(payment_entry_point::ARG_NAME) - .display_order(DisplayOrder::PaymentName as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod payment_package_hash { - use super::*; - - pub const ARG_NAME: &str = "payment-package-hash"; - const ARG_VALUE_NAME: &str = common::ARG_HEX_STRING; - const ARG_HELP: &str = "Hex-encoded hash of the stored package to be called as the payment"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(payment_entry_point::ARG_NAME) - .display_order(DisplayOrder::PaymentPackageHash as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod payment_package_name { - use super::*; - - pub const ARG_NAME: &str = "payment-package-name"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = "Name of the stored package to be called as the payment"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .requires(payment_entry_point::ARG_NAME) - .display_order(DisplayOrder::PaymentPackageName as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> Option<&'a str> { - matches.value_of(ARG_NAME) - } -} - -pub(super) mod payment_entry_point { - use super::*; - - pub const ARG_NAME: &str = "payment-entry-point"; - const ARG_VALUE_NAME: &str = "NAME"; - const ARG_HELP: &str = "Name of the method that will be used when calling the payment contract"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .display_order(DisplayOrder::PaymentEntryPoint as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -pub(super) mod payment_version { - use super::*; - - pub const ARG_NAME: &str = "payment-version"; - const ARG_VALUE_NAME: &str = common::ARG_INTEGER; - const ARG_HELP: &str = "Version of the called payment contract. Latest will be used by default"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .required(false) - .display_order(DisplayOrder::PaymentVersion as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} diff --git a/client/src/deploy/get.rs b/client/src/deploy/get.rs deleted file mode 100644 index 6e7b1ab8f3..0000000000 --- a/client/src/deploy/get.rs +++ /dev/null @@ -1,66 +0,0 @@ -use std::str; - -use clap::{App, Arg, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::info::GetDeploy; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - DeployHash, -} - -/// Handles providing the arg for and retrieval of the deploy hash. -mod deploy_hash { - use super::*; - - const ARG_NAME: &str = "deploy-hash"; - const ARG_VALUE_NAME: &str = "HEX STRING"; - const ARG_HELP: &str = "Hex-encoded deploy hash"; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .required(true) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::DeployHash as usize) - } - - pub(super) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetDeploy { - const NAME: &'static str = "get-deploy"; - const ABOUT: &'static str = "Retrieves a deploy from the network"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(deploy_hash::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let deploy_hash = deploy_hash::get(matches); - - casper_client::get_deploy(maybe_rpc_id, node_address, verbosity_level, deploy_hash) - .map(Success::from) - } -} diff --git a/client/src/deploy/list.rs b/client/src/deploy/list.rs deleted file mode 100644 index 24421e9447..0000000000 --- a/client/src/deploy/list.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::{Error, ListDeploysResult}; -use casper_node::rpcs::chain::GetBlockResult; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - BlockHash, -} - -pub struct ListDeploys; - -impl<'a, 'b> ClientCommand<'a, 'b> for ListDeploys { - const NAME: &'static str = "list-deploys"; - const ABOUT: &'static str = "Retrieves the list of all deploy hashes in a given block"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::block_identifier::arg( - DisplayOrder::BlockHash as usize, - )) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let maybe_block_id = common::block_identifier::get(matches); - - let result = - casper_client::get_block(maybe_rpc_id, node_address, verbosity_level, maybe_block_id); - - result.map(|response| { - let response_value = response.get_result().cloned().unwrap(); - let get_block_result = - serde_json::from_value::(response_value).expect("should parse"); - let list = ListDeploysResult::from(get_block_result); - Success::Output(serde_json::to_string_pretty(&list).expect("should encode")) - }) - } -} diff --git a/client/src/deploy/make.rs b/client/src/deploy/make.rs deleted file mode 100644 index dc15b1105e..0000000000 --- a/client/src/deploy/make.rs +++ /dev/null @@ -1,57 +0,0 @@ -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::{DeployStrParams, Error}; - -use super::creation_common; -use crate::{command::ClientCommand, common, Success}; - -pub struct MakeDeploy; - -impl<'a, 'b> ClientCommand<'a, 'b> for MakeDeploy { - const NAME: &'static str = "make-deploy"; - const ABOUT: &'static str = - "Creates a deploy and outputs it to a file or stdout. As a file, the deploy can \ - subsequently be signed by other parties using the 'sign-deploy' subcommand and then sent \ - to the network for execution using the 'send-deploy' subcommand"; - - fn build(display_order: usize) -> App<'a, 'b> { - let subcommand = SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .arg(creation_common::output::arg()) - .display_order(display_order); - let subcommand = creation_common::apply_common_session_options(subcommand); - let subcommand = creation_common::apply_common_payment_options(subcommand); - creation_common::apply_common_creation_options(subcommand, false) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - creation_common::show_arg_examples_and_exit_if_required(matches); - - let secret_key = common::secret_key::get(matches); - let timestamp = creation_common::timestamp::get(matches); - let ttl = creation_common::ttl::get(matches); - let gas_price = creation_common::gas_price::get(matches); - let dependencies = creation_common::dependencies::get(matches); - let chain_name = creation_common::chain_name::get(matches); - - let session_str_params = creation_common::session_str_params(matches); - let payment_str_params = creation_common::payment_str_params(matches); - - let maybe_output_path = creation_common::output::get(matches); - - casper_client::make_deploy( - maybe_output_path.unwrap_or_default(), - DeployStrParams { - secret_key, - timestamp, - ttl, - dependencies, - gas_price, - chain_name, - }, - session_str_params, - payment_str_params, - ) - .map(|_| Success::Output("Made the deploy".to_string())) - } -} diff --git a/client/src/deploy/put.rs b/client/src/deploy/put.rs deleted file mode 100644 index d6c1f7bb87..0000000000 --- a/client/src/deploy/put.rs +++ /dev/null @@ -1,58 +0,0 @@ -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::{DeployStrParams, Error}; -use casper_node::rpcs::account::PutDeploy; - -use super::creation_common::{self, DisplayOrder}; -use crate::{command::ClientCommand, common, Success}; - -impl<'a, 'b> ClientCommand<'a, 'b> for PutDeploy { - const NAME: &'static str = "put-deploy"; - const ABOUT: &'static str = "Creates a deploy and sends it to the network for execution"; - - fn build(display_order: usize) -> App<'a, 'b> { - let subcommand = SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)); - let subcommand = creation_common::apply_common_session_options(subcommand); - let subcommand = creation_common::apply_common_payment_options(subcommand); - creation_common::apply_common_creation_options(subcommand, true) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - creation_common::show_arg_examples_and_exit_if_required(matches); - - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - - let secret_key = common::secret_key::get(matches); - let timestamp = creation_common::timestamp::get(matches); - let ttl = creation_common::ttl::get(matches); - let gas_price = creation_common::gas_price::get(matches); - let dependencies = creation_common::dependencies::get(matches); - let chain_name = creation_common::chain_name::get(matches); - - let session_str_params = creation_common::session_str_params(matches); - let payment_str_params = creation_common::payment_str_params(matches); - - casper_client::put_deploy( - maybe_rpc_id, - node_address, - verbosity_level, - DeployStrParams { - secret_key, - timestamp, - ttl, - dependencies, - gas_price, - chain_name, - }, - session_str_params, - payment_str_params, - ) - .map(Success::from) - } -} diff --git a/client/src/deploy/send.rs b/client/src/deploy/send.rs deleted file mode 100644 index 5f38f7f2bd..0000000000 --- a/client/src/deploy/send.rs +++ /dev/null @@ -1,36 +0,0 @@ -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; - -use super::creation_common::{self, DisplayOrder}; -use crate::{command::ClientCommand, common, Success}; - -pub struct SendDeploy; - -impl<'a, 'b> ClientCommand<'a, 'b> for SendDeploy { - const NAME: &'static str = "send-deploy"; - const ABOUT: &'static str = - "Reads a previously-saved deploy from a file and sends it to the network for execution"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(creation_common::input::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let input_path = creation_common::input::get(matches); - - casper_client::send_deploy_file(maybe_rpc_id, node_address, verbosity_level, &input_path) - .map(Success::from) - } -} diff --git a/client/src/deploy/sign.rs b/client/src/deploy/sign.rs deleted file mode 100644 index 53fd688bd3..0000000000 --- a/client/src/deploy/sign.rs +++ /dev/null @@ -1,34 +0,0 @@ -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; - -use super::creation_common; -use crate::{command::ClientCommand, common, Success}; - -pub struct SignDeploy; - -impl<'a, 'b> ClientCommand<'a, 'b> for SignDeploy { - const NAME: &'static str = "sign-deploy"; - const ABOUT: &'static str = - "Reads a previously-saved deploy from a file, cryptographically signs it, and outputs it \ - to a file or stdout"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::secret_key::arg( - creation_common::DisplayOrder::SecretKey as usize, - )) - .arg(creation_common::input::arg()) - .arg(creation_common::output::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let input_path = creation_common::input::get(matches); - let secret_key = common::secret_key::get(matches); - let maybe_output = creation_common::output::get(matches); - casper_client::sign_deploy_file(&input_path, secret_key, maybe_output.unwrap_or_default()) - .map(|_| Success::Output("Signed the deploy".to_string())) - } -} diff --git a/client/src/deploy/transfer.rs b/client/src/deploy/transfer.rs deleted file mode 100644 index 730264ac42..0000000000 --- a/client/src/deploy/transfer.rs +++ /dev/null @@ -1,149 +0,0 @@ -use clap::{App, Arg, ArgGroup, ArgMatches, SubCommand}; - -use casper_client::{DeployStrParams, Error}; - -use super::creation_common::{self, DisplayOrder}; -use crate::{command::ClientCommand, common, Success}; - -/// Handles providing the arg for and retrieval of the transfer amount. -pub(super) mod amount { - use super::*; - - const ARG_NAME: &str = "amount"; - const ARG_SHORT: &str = "a"; - const ARG_VALUE_NAME: &str = "512-BIT INTEGER"; - const ARG_HELP: &str = "The number of motes to transfer"; - - pub(in crate::deploy) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required_unless(creation_common::show_arg_examples::ARG_NAME) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::TransferAmount as usize) - } - - pub(in crate::deploy) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -/// Handles providing the arg for and retrieval of the target account. -pub(super) mod target_account { - use super::*; - - pub(super) const ARG_NAME: &str = "target-account"; - const ARG_SHORT: &str = "t"; - const ARG_VALUE_NAME: &str = "HEX STRING"; - const ARG_HELP: &str = - "Hex-encoded public key of the account from which the main purse will be used as the \ - target."; - - // Conflicts with --target-purse, but that's handled via an `ArgGroup` in the subcommand. Don't - // add a `conflicts_with()` to the arg or the `ArgGroup` fails to work correctly. - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::TransferTargetAccount as usize) - } - - pub(super) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -/// Handles providing the arg for and retrieval of the transfer id. -pub(super) mod transfer_id { - use super::*; - - pub(super) const ARG_NAME: &str = "transfer-id"; - const ARG_VALUE_NAME: &str = "64-BIT INTEGER"; - const ARG_HELP: &str = "user-defined transfer id"; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::TransferId as usize) - } - - pub(super) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -pub struct Transfer {} - -impl<'a, 'b> ClientCommand<'a, 'b> for Transfer { - const NAME: &'static str = "transfer"; - const ABOUT: &'static str = "Transfers funds between purses"; - - fn build(display_order: usize) -> App<'a, 'b> { - let subcommand = SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(amount::arg()) - .arg(target_account::arg()) - .arg(transfer_id::arg()) - // Group the target args to ensure exactly one is required. - .group( - ArgGroup::with_name("required-target-args") - .arg(target_account::ARG_NAME) - .arg(creation_common::show_arg_examples::ARG_NAME) - .required(true), - ); - let subcommand = creation_common::apply_common_payment_options(subcommand); - creation_common::apply_common_creation_options(subcommand, true) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - creation_common::show_arg_examples_and_exit_if_required(matches); - - let amount = amount::get(matches); - let target_account = target_account::get(matches); - let transfer_id = transfer_id::get(matches); - - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - - let secret_key = common::secret_key::get(matches); - let timestamp = creation_common::timestamp::get(matches); - let ttl = creation_common::ttl::get(matches); - let gas_price = creation_common::gas_price::get(matches); - let dependencies = creation_common::dependencies::get(matches); - let chain_name = creation_common::chain_name::get(matches); - - let payment_str_params = creation_common::payment_str_params(matches); - - casper_client::transfer( - maybe_rpc_id, - node_address, - verbosity_level, - amount, - target_account, - transfer_id, - DeployStrParams { - secret_key, - timestamp, - ttl, - dependencies, - gas_price, - chain_name, - }, - payment_str_params, - ) - .map(Success::from) - } -} diff --git a/client/src/docs.rs b/client/src/docs.rs deleted file mode 100644 index 395d823968..0000000000 --- a/client/src/docs.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::docs::ListRpcs; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, -} - -impl<'a, 'b> ClientCommand<'a, 'b> for ListRpcs { - const NAME: &'static str = "list-rpcs"; - const ABOUT: &'static str = "List all currently supported RPCs"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - - casper_client::list_rpcs(maybe_rpc_id, node_address, verbosity_level).map(Success::from) - } -} diff --git a/client/src/generate_completion.rs b/client/src/generate_completion.rs deleted file mode 100644 index 7e01855d66..0000000000 --- a/client/src/generate_completion.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::{fs::File, path::PathBuf, process, str::FromStr}; - -use clap::{crate_name, App, Arg, ArgMatches, Shell, SubCommand}; - -use casper_client::Error; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - OutputFile, - Force, - Shell, -} - -/// Handles providing the arg for and retrieval of the output file. -mod output_file { - use super::*; - use once_cell::sync::Lazy; - - const ARG_NAME: &str = "output"; - const ARG_NAME_SHORT: &str = "o"; - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = - "Path to output file. If the path's parent folder doesn't exist, the command will fail. \ - Default path normally requires running the command with sudo"; - - static ARG_DEFAULT: Lazy = - Lazy::new(|| format!("/usr/share/bash-completion/completions/{}", crate_name!())); - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_NAME_SHORT) - .required(false) - .default_value(&*ARG_DEFAULT) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::OutputFile as usize) - } - - pub(super) fn get(matches: &ArgMatches) -> PathBuf { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - .into() - } -} - -/// Handles providing the arg for and retrieval of shell type. -mod shell { - use super::*; - - const ARG_NAME: &str = "shell"; - const ARG_VALUE_NAME: &str = common::ARG_STRING; - const ARG_DEFAULT: &str = "bash"; - const ARG_HELP: &str = "The type of shell to generate the completion script for"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .required(false) - .default_value(ARG_DEFAULT) - .possible_values(&Shell::variants()[..]) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Shell as usize) - } - - pub fn get(matches: &ArgMatches) -> Shell { - Shell::from_str( - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)), - ) - .unwrap_or_else(|error| panic!("invalid value for --{}: {}", ARG_NAME, error)) - } -} - -pub struct GenerateCompletion {} - -impl<'a, 'b> ClientCommand<'a, 'b> for GenerateCompletion { - const NAME: &'static str = "generate-completion"; - const ABOUT: &'static str = "Generates a shell completion script"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(output_file::arg()) - .arg(common::force::arg(DisplayOrder::Force as usize, true)) - .arg(shell::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let output_path = output_file::get(matches); - let force = common::force::get(matches); - let shell = shell::get(matches); - - if !force && output_path.exists() { - eprintln!( - "{} exists. To overwrite, rerun with --{}", - output_path.display(), - common::force::ARG_NAME - ); - process::exit(1); - } - - let mut output_file = File::create(&output_path).map_err(|error| Error::IoError { - context: output_path.display().to_string(), - error, - })?; - super::cli().gen_completions_to(crate_name!(), shell, &mut output_file); - - Ok(Success::Output(format!( - "Wrote completion script for {} to {}", - shell, - output_path.display() - ))) - } -} diff --git a/client/src/get_auction_info.rs b/client/src/get_auction_info.rs deleted file mode 100644 index e7ec4b8ecb..0000000000 --- a/client/src/get_auction_info.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::state::GetAuctionInfo; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetAuctionInfo { - const NAME: &'static str = "get-auction-info"; - const ABOUT: &'static str = - "Retrieves the bids and validators as of the most recently added block"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - - casper_client::get_auction_info(maybe_rpc_id, node_address, verbosity_level) - .map(Success::from) - } -} diff --git a/client/src/get_balance.rs b/client/src/get_balance.rs deleted file mode 100644 index 693dc26fb4..0000000000 --- a/client/src/get_balance.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::str; - -use clap::{App, Arg, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::state::GetBalance; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - StateRootHash, - PurseURef, -} - -/// Handles providing the arg for and retrieval of the purse URef. -mod purse_uref { - use super::*; - - const ARG_NAME: &str = "purse-uref"; - const ARG_SHORT: &str = "p"; - const ARG_VALUE_NAME: &str = "FORMATTED STRING"; - const ARG_HELP: &str = - "The URef under which the purse is stored. This must be a properly formatted URef \ - \"uref--\""; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(true) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::PurseURef as usize) - } - - pub(super) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetBalance { - const NAME: &'static str = "get-balance"; - const ABOUT: &'static str = "Retrieves a purse's balance from the network"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::state_root_hash::arg( - DisplayOrder::StateRootHash as usize, - )) - .arg(purse_uref::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let state_root_hash = common::state_root_hash::get(&matches); - let purse_uref = purse_uref::get(&matches); - - casper_client::get_balance( - maybe_rpc_id, - node_address, - verbosity_level, - state_root_hash, - purse_uref, - ) - .map(Success::from) - } -} diff --git a/client/src/get_era_info_by_switch_block.rs b/client/src/get_era_info_by_switch_block.rs deleted file mode 100644 index 2847912e75..0000000000 --- a/client/src/get_era_info_by_switch_block.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::chain::GetEraInfoBySwitchBlock; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - BlockIdentifier, -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetEraInfoBySwitchBlock { - const NAME: &'static str = "get-era-info-by-switch-block"; - const ABOUT: &'static str = "Retrieves era information from the network"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::block_identifier::arg( - DisplayOrder::BlockIdentifier as usize, - )) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let maybe_block_id = common::block_identifier::get(&matches); - - casper_client::get_era_info_by_switch_block( - maybe_rpc_id, - node_address, - verbosity_level, - maybe_block_id, - ) - .map(Success::from) - } -} diff --git a/client/src/get_state_hash.rs b/client/src/get_state_hash.rs deleted file mode 100644 index 3648d5773f..0000000000 --- a/client/src/get_state_hash.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::str; - -use clap::{App, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::chain::GetStateRootHash; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - BlockHash, -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetStateRootHash { - const NAME: &'static str = "get-state-root-hash"; - const ABOUT: &'static str = "Retrieves a state root hash at a given block"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::block_identifier::arg( - DisplayOrder::BlockHash as usize, - )) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let maybe_block_id = common::block_identifier::get(matches); - - casper_client::get_state_root_hash( - maybe_rpc_id, - node_address, - verbosity_level, - maybe_block_id, - ) - .map(Success::from) - } -} diff --git a/client/src/keygen.rs b/client/src/keygen.rs deleted file mode 100644 index adca8ae004..0000000000 --- a/client/src/keygen.rs +++ /dev/null @@ -1,104 +0,0 @@ -use clap::{App, Arg, ArgMatches, SubCommand}; -use once_cell::sync::Lazy; - -use casper_client::{ - keygen::{self, FILES, PUBLIC_KEY_HEX}, - Error, -}; - -use crate::{command::ClientCommand, common, Success}; - -static MORE_ABOUT: Lazy = Lazy::new(|| { - format!( - "{}. Creates {:?}. \"{}\" contains the hex-encoded key's bytes with the hex-encoded \ - algorithm tag prefixed", - Keygen::ABOUT, - FILES, - PUBLIC_KEY_HEX - ) -}); - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - OutputDir, - Force, - Algorithm, -} - -/// Handles providing the arg for and retrieval of the output directory. -mod output_dir { - use super::*; - - const ARG_NAME: &str = "output-dir"; - const ARG_VALUE_NAME: &str = common::ARG_PATH; - const ARG_HELP: &str = - "Path to output directory where key files will be created. If the path doesn't exist, it \ - will be created. If not set, the current working directory will be used"; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::OutputDir as usize) - } - - pub(super) fn get(matches: &ArgMatches) -> String { - matches.value_of(ARG_NAME).unwrap_or(".").to_string() - } -} - -/// Handles providing the arg for and retrieval of the key algorithm. -mod algorithm { - use super::*; - - const ARG_NAME: &str = "algorithm"; - const ARG_SHORT: &str = "a"; - const ARG_VALUE_NAME: &str = common::ARG_STRING; - const ARG_HELP: &str = "The type of keys to generate"; - - pub fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(false) - .default_value(keygen::ED25519) - .possible_value(keygen::ED25519) - .possible_value(keygen::SECP256K1) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Algorithm as usize) - } - - pub fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)) - } -} - -pub struct Keygen {} - -impl<'a, 'b> ClientCommand<'a, 'b> for Keygen { - const NAME: &'static str = "keygen"; - const ABOUT: &'static str = "Generates account key files in the given directory"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .long_about(MORE_ABOUT.as_str()) - .display_order(display_order) - .arg(output_dir::arg()) - .arg(common::force::arg(DisplayOrder::Force as usize, false)) - .arg(algorithm::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let output_dir = output_dir::get(matches); - let algorithm = algorithm::get(matches); - let force = common::force::get(matches); - - keygen::generate_files(&output_dir, algorithm, force) - .map(|_| Success::Output(format!("Wrote files to {}", output_dir))) - } -} diff --git a/client/src/main.rs b/client/src/main.rs deleted file mode 100644 index 18c8b7afae..0000000000 --- a/client/src/main.rs +++ /dev/null @@ -1,142 +0,0 @@ -mod account_address; -mod block; -mod command; -mod common; -mod deploy; -mod docs; -mod generate_completion; -mod get_auction_info; -mod get_balance; -mod get_era_info_by_switch_block; -mod get_state_hash; -mod keygen; -mod query_state; - -use std::process; - -use clap::{crate_description, crate_version, App}; - -use casper_client::Error; -use casper_node::rpcs::{ - account::PutDeploy, - chain::{GetBlock, GetBlockTransfers, GetEraInfoBySwitchBlock, GetStateRootHash}, - docs::ListRpcs, - info::GetDeploy, - state::{GetAuctionInfo, GetBalance, GetItem as QueryState}, -}; - -use deploy::{ListDeploys, MakeDeploy, SendDeploy, SignDeploy}; - -use account_address::GenerateAccountHash as AccountAddress; -use command::{ClientCommand, Success}; -use deploy::Transfer; -use generate_completion::GenerateCompletion; -use keygen::Keygen; - -const APP_NAME: &str = "Casper client"; - -/// This struct defines the order in which the subcommands are shown in the app's help message. -enum DisplayOrder { - PutDeploy, - MakeDeploy, - SignDeploy, - SendDeploy, - Transfer, - GetDeploy, - GetBlock, - GetBlockTransfers, - ListDeploys, - GetStateRootHash, - QueryState, - GetBalance, - GetEraInfo, - GetAuctionInfo, - Keygen, - GenerateCompletion, - GetRpcs, - AccountAddress, -} - -fn cli<'a, 'b>() -> App<'a, 'b> { - App::new(APP_NAME) - .version(crate_version!()) - .about(crate_description!()) - .subcommand(PutDeploy::build(DisplayOrder::PutDeploy as usize)) - .subcommand(MakeDeploy::build(DisplayOrder::MakeDeploy as usize)) - .subcommand(SignDeploy::build(DisplayOrder::SignDeploy as usize)) - .subcommand(SendDeploy::build(DisplayOrder::SendDeploy as usize)) - .subcommand(Transfer::build(DisplayOrder::Transfer as usize)) - .subcommand(GetDeploy::build(DisplayOrder::GetDeploy as usize)) - .subcommand(GetBlock::build(DisplayOrder::GetBlock as usize)) - .subcommand(GetBlockTransfers::build( - DisplayOrder::GetBlockTransfers as usize, - )) - .subcommand(ListDeploys::build(DisplayOrder::ListDeploys as usize)) - .subcommand(GetBalance::build(DisplayOrder::GetBalance as usize)) - .subcommand(GetStateRootHash::build( - DisplayOrder::GetStateRootHash as usize, - )) - .subcommand(QueryState::build(DisplayOrder::QueryState as usize)) - .subcommand(GetEraInfoBySwitchBlock::build( - DisplayOrder::GetEraInfo as usize, - )) - .subcommand(GetAuctionInfo::build(DisplayOrder::GetAuctionInfo as usize)) - .subcommand(Keygen::build(DisplayOrder::Keygen as usize)) - .subcommand(GenerateCompletion::build( - DisplayOrder::GenerateCompletion as usize, - )) - .subcommand(ListRpcs::build(DisplayOrder::GetRpcs as usize)) - .subcommand(AccountAddress::build(DisplayOrder::AccountAddress as usize)) -} - -#[tokio::main] -async fn main() { - let arg_matches = cli().get_matches(); - let (result, matches) = match arg_matches.subcommand() { - (PutDeploy::NAME, Some(matches)) => (PutDeploy::run(matches), matches), - (MakeDeploy::NAME, Some(matches)) => (MakeDeploy::run(matches), matches), - (SignDeploy::NAME, Some(matches)) => (SignDeploy::run(matches), matches), - (SendDeploy::NAME, Some(matches)) => (SendDeploy::run(matches), matches), - (Transfer::NAME, Some(matches)) => (Transfer::run(matches), matches), - (GetDeploy::NAME, Some(matches)) => (GetDeploy::run(matches), matches), - (GetBlock::NAME, Some(matches)) => (GetBlock::run(matches), matches), - (GetBlockTransfers::NAME, Some(matches)) => (GetBlockTransfers::run(matches), matches), - (ListDeploys::NAME, Some(matches)) => (ListDeploys::run(matches), matches), - (GetBalance::NAME, Some(matches)) => (GetBalance::run(matches), matches), - (GetStateRootHash::NAME, Some(matches)) => (GetStateRootHash::run(matches), matches), - (QueryState::NAME, Some(matches)) => (QueryState::run(matches), matches), - (GetEraInfoBySwitchBlock::NAME, Some(matches)) => { - (GetEraInfoBySwitchBlock::run(matches), matches) - } - (GetAuctionInfo::NAME, Some(matches)) => (GetAuctionInfo::run(matches), matches), - (Keygen::NAME, Some(matches)) => (Keygen::run(matches), matches), - (GenerateCompletion::NAME, Some(matches)) => (GenerateCompletion::run(matches), matches), - (ListRpcs::NAME, Some(matches)) => (ListRpcs::run(matches), matches), - (AccountAddress::NAME, Some(matches)) => (AccountAddress::run(matches), matches), - _ => { - let _ = cli().print_long_help(); - println!(); - process::exit(1); - } - }; - - let mut verbosity_level = common::verbose::get(matches); - if verbosity_level == 0 { - verbosity_level += 1 - } - - match &result { - Ok(Success::Response(response)) => { - casper_client::pretty_print_at_level(&response, verbosity_level) - } - Ok(Success::Output(output)) => println!("{}", output), - Err(Error::ResponseIsError(error)) => { - casper_client::pretty_print_at_level(&error, verbosity_level); - process::exit(1); - } - Err(error) => { - println!("{}", error); - process::exit(1); - } - } -} diff --git a/client/src/query_state.rs b/client/src/query_state.rs deleted file mode 100644 index f96e2172b9..0000000000 --- a/client/src/query_state.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::{fs, str}; - -use clap::{App, Arg, ArgMatches, SubCommand}; - -use casper_client::Error; -use casper_node::rpcs::state::GetItem; -use casper_types::PublicKey; - -use crate::{command::ClientCommand, common, Success}; - -/// This struct defines the order in which the args are shown for this subcommand's help message. -enum DisplayOrder { - Verbose, - NodeAddress, - RpcId, - StateRootHash, - Key, - Path, -} - -/// Handles providing the arg for and retrieval of the key. -mod key { - use casper_node::crypto::AsymmetricKeyExt; - use casper_types::AsymmetricType; - - use super::*; - - const ARG_NAME: &str = "key"; - const ARG_SHORT: &str = "k"; - const ARG_VALUE_NAME: &str = "FORMATTED STRING or PATH"; - const ARG_HELP: &str = - "The base key for the query. This must be a properly formatted public key, account hash, \ - contract address hash, URef, transfer hash or deploy-info hash. The format for each \ - respectively is \"\", \"account-hash-\", \"hash-\", \ - \"uref--\", \"transfer-\" and \ - \"deploy-\". The public key may instead be read in from a file, in which case \ - enter the path to the file as the --key argument. The file should be one of the two public \ - key files generated via the `keygen` subcommand; \"public_key_hex\" or \"public_key.pem\""; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(true) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Key as usize) - } - - pub(super) fn get(matches: &ArgMatches) -> Result { - let value = matches - .value_of(ARG_NAME) - .unwrap_or_else(|| panic!("should have {} arg", ARG_NAME)); - - // Try to read as a PublicKey PEM file first. - if let Ok(public_key) = PublicKey::from_file(value) { - return Ok(public_key.to_hex()); - } - - // Try to read as a hex-encoded PublicKey file next. - if let Ok(hex_public_key) = fs::read_to_string(value) { - let _ = PublicKey::from_hex(&hex_public_key).map_err(|error| { - eprintln!( - "Can't parse the contents of {} as a public key: {}", - value, error - ); - Error::FailedToParseKey - })?; - return Ok(hex_public_key); - } - - // Just return the value. - Ok(value.to_string()) - } -} - -/// Handles providing the arg for and retrieval of the key. -mod path { - use super::*; - - const ARG_NAME: &str = "query-path"; - const ARG_SHORT: &str = "q"; - const ARG_VALUE_NAME: &str = "PATH/FROM/KEY"; - const ARG_HELP: &str = "The path from the key of the query"; - - pub(super) fn arg() -> Arg<'static, 'static> { - Arg::with_name(ARG_NAME) - .long(ARG_NAME) - .short(ARG_SHORT) - .required(false) - .value_name(ARG_VALUE_NAME) - .help(ARG_HELP) - .display_order(DisplayOrder::Path as usize) - } - - pub(super) fn get<'a>(matches: &'a ArgMatches) -> &'a str { - matches.value_of(ARG_NAME).unwrap_or_default() - } -} - -impl<'a, 'b> ClientCommand<'a, 'b> for GetItem { - const NAME: &'static str = "query-state"; - const ABOUT: &'static str = "Retrieves a stored value from the network"; - - fn build(display_order: usize) -> App<'a, 'b> { - SubCommand::with_name(Self::NAME) - .about(Self::ABOUT) - .display_order(display_order) - .arg(common::verbose::arg(DisplayOrder::Verbose as usize)) - .arg(common::node_address::arg( - DisplayOrder::NodeAddress as usize, - )) - .arg(common::rpc_id::arg(DisplayOrder::RpcId as usize)) - .arg(common::state_root_hash::arg( - DisplayOrder::StateRootHash as usize, - )) - .arg(key::arg()) - .arg(path::arg()) - } - - fn run(matches: &ArgMatches<'_>) -> Result { - let maybe_rpc_id = common::rpc_id::get(matches); - let node_address = common::node_address::get(matches); - let verbosity_level = common::verbose::get(matches); - let state_root_hash = common::state_root_hash::get(matches); - let key = key::get(matches)?; - let path = path::get(matches); - - casper_client::get_item( - maybe_rpc_id, - node_address, - verbosity_level, - state_root_hash, - &key, - path, - ) - .map(Success::from) - } -} diff --git a/client/tests/integration_test.rs b/client/tests/integration_test.rs deleted file mode 100644 index 0c7cdada8f..0000000000 --- a/client/tests/integration_test.rs +++ /dev/null @@ -1,883 +0,0 @@ -use std::{convert::Infallible, net::SocketAddr, sync::Arc, time::Duration}; - -use futures::{channel::oneshot, future}; -use hyper::{Body, Response, Server}; -use serde::Deserialize; -use tempfile::TempDir; -use tokio::{sync::Mutex, task, task::JoinHandle}; -use tower::builder::ServiceBuilder; -use warp::{Filter, Rejection}; -use warp_json_rpc::Builder; - -use casper_node::crypto::Error as CryptoError; -use hex::FromHexError; - -use casper_client::{DeployStrParams, Error, PaymentStrParams, SessionStrParams}; -use casper_node::rpcs::{ - account::{PutDeploy, PutDeployParams}, - chain::{GetStateRootHash, GetStateRootHashParams}, - info::{GetDeploy, GetDeployParams}, - state::{GetBalance, GetBalanceParams}, - RpcWithOptionalParams, RpcWithParams, -}; - -const VALID_PURSE_UREF: &str = - "uref-0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20-007"; -const VALID_STATE_ROOT_HASH: &str = - "55db08058acb54c295b115cbd9b282eb2862e76d5bb8493bb80c0598a50a12a5"; - -const DEFAULT_RATE_LIMIT: u64 = 1; -const DEFAULT_RATE_PER: Duration = Duration::from_secs(1); - -fn test_filter

( - method: &'static str, -) -> impl Filter,), Error = Rejection> + Copy -where - for<'de> P: Deserialize<'de> + Send, -{ - warp_json_rpc::filters::json_rpc() - .and(warp_json_rpc::filters::method(method)) - .and(warp_json_rpc::filters::params::

()) - .map(|builder: Builder, _params: P| builder.success(()).unwrap()) -} - -fn test_filter_without_params( - method: &'static str, -) -> impl Filter,), Error = Rejection> + Copy { - warp_json_rpc::filters::json_rpc() - .and(warp_json_rpc::filters::method(method)) - .map(|builder: Builder| builder.success(()).unwrap()) -} - -type ServerJoiner = Option>>>>; - -struct MockServerHandle { - graceful_shutdown: Option>, - server_joiner: ServerJoiner, - address: SocketAddr, -} - -trait Captures<'a> {} -impl<'a, T: ?Sized> Captures<'a> for T {} - -impl MockServerHandle { - fn url(&self) -> String { - format!("http://{}", self.address) - } - - /// Will spawn a server on localhost and respond to JSON-RPC requests that successfully - /// deserialize as `P`. - fn spawn

(method: &'static str) -> Self - where - P: 'static, - for<'de> P: Deserialize<'de> + Send, - { - Self::spawn_with_filter( - test_filter::

(method), - DEFAULT_RATE_LIMIT, - DEFAULT_RATE_PER, - ) - } - - /// Will spawn a server on localhost and respond to JSON-RPC requests that don't take - /// parameters. - fn spawn_without_params(method: &'static str) -> Self { - Self::spawn_with_filter( - test_filter_without_params(method), - DEFAULT_RATE_LIMIT, - DEFAULT_RATE_PER, - ) - } - - fn spawn_with_filter(filter: F, rate: u64, per: Duration) -> Self - where - F: Filter,), Error = Rejection> + Send + Sync + 'static + Copy, - { - let service = warp_json_rpc::service(filter); - - let make_svc = - hyper::service::make_service_fn(move |_| future::ok::<_, Infallible>(service.clone())); - - let make_svc = ServiceBuilder::new() - .rate_limit(rate, per) - .service(make_svc); - - let builder = Server::try_bind(&([127, 0, 0, 1], 0).into()).unwrap(); - let (graceful_shutdown, shutdown_receiver) = oneshot::channel::<()>(); - let graceful_shutdown = Some(graceful_shutdown); - let server = builder.serve(make_svc); - let address = server.local_addr(); - let server_with_shutdown = server.with_graceful_shutdown(async { - shutdown_receiver.await.ok(); - }); - let server_joiner = tokio::spawn(server_with_shutdown); - let server_joiner = Some(Arc::new(Mutex::new(server_joiner))); - MockServerHandle { - graceful_shutdown, - server_joiner, - address, - } - } - - fn get_balance(&self, state_root_hash: &str, purse_uref: &str) -> Result<(), ErrWrapper> { - casper_client::get_balance("1", &self.url(), 0, state_root_hash, purse_uref) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn get_deploy(&self, deploy_hash: &str) -> Result<(), ErrWrapper> { - casper_client::get_deploy("1", &self.url(), 0, deploy_hash) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn get_state_root_hash(&self, maybe_block_id: &str) -> Result<(), ErrWrapper> { - casper_client::get_state_root_hash("1", &self.url(), 0, maybe_block_id) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn get_block(&self, maybe_block_id: &str) -> Result<(), ErrWrapper> { - casper_client::get_block("1", &self.url(), 0, maybe_block_id) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn get_item(&self, state_root_hash: &str, key: &str, path: &str) -> Result<(), ErrWrapper> { - casper_client::get_item("1", &self.url(), 0, state_root_hash, key, path) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn transfer( - &self, - amount: &str, - maybe_target_account: &str, - deploy_params: DeployStrParams, - payment_params: PaymentStrParams, - ) -> Result<(), ErrWrapper> { - casper_client::transfer( - "1", - &self.url(), - 0, - amount, - maybe_target_account, - "", - deploy_params, - payment_params, - ) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn put_deploy( - &self, - deploy_params: DeployStrParams, - session_params: SessionStrParams, - payment_params: PaymentStrParams, - ) -> Result<(), ErrWrapper> { - casper_client::put_deploy( - "1", - &self.url(), - 0, - deploy_params, - session_params, - payment_params, - ) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn send_deploy_file(&self, input_path: &str) -> Result<(), ErrWrapper> { - casper_client::send_deploy_file("1", &self.url(), 0, input_path) - .map(|_| ()) - .map_err(ErrWrapper) - } - - fn get_auction_info(&self) -> Result<(), ErrWrapper> { - casper_client::get_auction_info("1", &self.url(), 0) - .map(|_| ()) - .map_err(ErrWrapper) - } -} - -impl Drop for MockServerHandle { - fn drop(&mut self) { - let _ = self.graceful_shutdown.take().unwrap().send(()); - let joiner = self.server_joiner.take().unwrap(); - futures::executor::block_on(async { - let join = &mut *joiner.lock().await; - let _ = join.await; - }); - } -} - -#[derive(Debug)] -struct ErrWrapper(pub Error); - -impl PartialEq for ErrWrapper { - fn eq(&self, other: &ErrWrapper) -> bool { - format!("{:?}", self.0) == format!("{:?}", other.0) - } -} - -impl Into for Error { - fn into(self) -> ErrWrapper { - ErrWrapper(self) - } -} - -mod deploy_params { - use super::*; - - pub fn test_data_valid() -> DeployStrParams<'static> { - DeployStrParams { - secret_key: "../resources/local/secret_keys/node-1.pem", - ttl: "10s", - chain_name: "casper-test-chain-name-1", - gas_price: "1", - ..Default::default() - } - } -} - -/// Sample data creation methods for PaymentStrParams -mod payment_params { - use super::*; - - const NAME: &str = "name"; - const ENTRYPOINT: &str = "entrypoint"; - - fn args_simple() -> Vec<&'static str> { - vec!["name_01:bool='false'", "name_02:i32='42'"] - } - - pub fn test_data_with_name() -> PaymentStrParams<'static> { - PaymentStrParams::with_name(NAME, ENTRYPOINT, args_simple(), "") - } -} - -/// Sample data creation methods for SessionStrParams -mod session_params { - use super::*; - - const PKG_HASH: &str = "09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"; - const ENTRYPOINT: &str = "entrypoint"; - const VERSION: &str = "0.1.0"; - - fn args_simple() -> Vec<&'static str> { - vec!["name_01:bool='false'", "name_02:i32='42'"] - } - - pub fn test_data_with_package_hash() -> SessionStrParams<'static> { - SessionStrParams::with_package_hash(PKG_HASH, VERSION, ENTRYPOINT, args_simple(), "") - } -} - -mod get_balance { - use super::*; - - use casper_client::ValidateResponseError; - use casper_types::URefFromStrError; - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_arguments() { - let server_handle = MockServerHandle::spawn::(GetBalance::METHOD); - assert_eq!( - server_handle.get_balance(VALID_STATE_ROOT_HASH, VALID_PURSE_UREF), - // NOTE: this "success" means that we then fail to validate the response, but that - // is outside the scope of this test. - // The MockServerHandle could support a pre-baked response, which should successfully - // validate - Err( - Error::InvalidResponse(ValidateResponseError::ValidateResponseFailedToParse).into() - ) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_empty_arguments() { - let server_handle = MockServerHandle::spawn::(GetBalance::METHOD); - assert_eq!( - server_handle.get_balance("", ""), - Err(Error::CryptoError { - context: "state_root_hash", - error: CryptoError::FromHex(FromHexError::InvalidStringLength) - } - .into()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_empty_state_root_hash() { - let server_handle = MockServerHandle::spawn::(GetBalance::METHOD); - assert_eq!( - server_handle.get_balance("", VALID_PURSE_UREF), - Err(Error::CryptoError { - context: "state_root_hash", - error: CryptoError::FromHex(FromHexError::InvalidStringLength) - } - .into()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_empty_purse_uref() { - let server_handle = MockServerHandle::spawn::(GetBalance::METHOD); - assert_eq!( - server_handle.get_balance(VALID_STATE_ROOT_HASH, ""), - Err(Error::FailedToParseURef("purse_uref", URefFromStrError::InvalidPrefix).into()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_bad_state_root_hash() { - let server_handle = MockServerHandle::spawn::(GetBalance::METHOD); - assert_eq!( - server_handle.get_balance("deadbeef", VALID_PURSE_UREF), - Err(Error::CryptoError { - context: "state_root_hash", - error: CryptoError::FromHex(FromHexError::InvalidStringLength) - } - .into()) - ); - } -} - -mod get_state_root_hash { - use super::*; - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_block_id() { - let server_handle = - MockServerHandle::spawn::(GetStateRootHash::METHOD); - assert_eq!( - server_handle.get_state_root_hash( - "7a073a340bb5e0ca60f4c1dbb3254fb0641da79cda7c5aeb5303efa74fcc9eb1", - ), - Ok(()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_empty_block_id() { - let server_handle = MockServerHandle::spawn_without_params(GetStateRootHash::METHOD); - assert_eq!(server_handle.get_state_root_hash(""), Ok(())); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_block_height() { - let server_handle = MockServerHandle::spawn_without_params(GetStateRootHash::METHOD); - assert_eq!(server_handle.get_state_root_hash("1"), Ok(())); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_bad_block_id() { - let server_handle = MockServerHandle::spawn_without_params(GetStateRootHash::METHOD); - let input = ""; - assert!( - server_handle.get_state_root_hash(input).is_err(), - "input '{}' should not parse to a valid block id", - input - ); - } -} - -mod get_block { - use casper_client::ValidateResponseError; - use casper_node::rpcs::chain::{GetBlock, GetBlockParams}; - - use super::*; - - // in this case, the error means that the request was sent successfully, but due to to the - // mock implementation fails to validate - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_block_hash() { - let server_handle = MockServerHandle::spawn::(GetBlock::METHOD); - assert_eq!( - server_handle.get_block(VALID_STATE_ROOT_HASH), - Err(ErrWrapper(Error::InvalidResponse( - ValidateResponseError::NoBlockInResponse - ))) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_block_height() { - let server_handle = MockServerHandle::spawn::(GetBlock::METHOD); - assert_eq!( - server_handle.get_block("1"), - Err(ErrWrapper(Error::InvalidResponse( - ValidateResponseError::NoBlockInResponse - ))) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_empty_block_hash() { - let server_handle = MockServerHandle::spawn_without_params(GetBlock::METHOD); - assert_eq!( - server_handle.get_block(""), - Err(ErrWrapper(Error::InvalidResponse( - ValidateResponseError::NoBlockInResponse - ))) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_invalid_block_id() { - let server_handle = MockServerHandle::spawn::(GetBlock::METHOD); - match server_handle.get_block("") { - Err(ErrWrapper(Error::FailedToParseInt("block_identifier", _))) => {} - other => panic!("incorrect error returned from client {:?}", other), - } - } -} - -mod get_item { - use casper_client::ValidateResponseError; - use casper_node::rpcs::state::{GetItem, GetItemParams}; - - use super::*; - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_state_root_hash() { - let server_handle = MockServerHandle::spawn::(GetItem::METHOD); - - // in this case, the error means that the request was sent successfully, but due to to the - // mock implementation fails to validate - - assert_eq!( - server_handle.get_item(VALID_STATE_ROOT_HASH, VALID_PURSE_UREF, ""), - Err( - Error::InvalidResponse(ValidateResponseError::ValidateResponseFailedToParse).into() - ) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_invalid_state_root_hash() { - let server_handle = MockServerHandle::spawn::(GetItem::METHOD); - assert_eq!( - server_handle.get_item("", VALID_PURSE_UREF, ""), - Err(Error::CryptoError { - context: "state_root_hash", - error: CryptoError::FromHex(FromHexError::OddLength) - } - .into()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_invalid_key() { - let server_handle = MockServerHandle::spawn::(GetItem::METHOD); - assert_eq!( - server_handle.get_item(VALID_STATE_ROOT_HASH, "invalid key", ""), - Err(Error::FailedToParseKey.into()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_empty_key() { - let server_handle = MockServerHandle::spawn::(GetItem::METHOD); - assert_eq!( - server_handle.get_item("", "", ""), - Err(Error::CryptoError { - context: "state_root_hash", - error: CryptoError::FromHex(FromHexError::OddLength) - } - .into()) - ); - } -} - -mod get_deploy { - use super::*; - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_with_valid_hash() { - let server_handle = MockServerHandle::spawn::(GetDeploy::METHOD); - assert_eq!( - server_handle - .get_deploy("09dcee4b212cfd53642ab323fbef07dafafc6f945a80a00147f62910a915c4e6"), - Ok(()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_invalid_hash() { - let server_handle = MockServerHandle::spawn::(GetDeploy::METHOD); - assert_eq!( - server_handle.get_deploy("012345",), - Err(Error::CryptoError { - context: "deploy_hash", - error: CryptoError::FromHex(FromHexError::InvalidStringLength) - } - .into()) - ); - } -} - -mod get_auction_info { - use super::*; - - use casper_node::rpcs::{state::GetAuctionInfo, RpcWithoutParams}; - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed() { - let server_handle = MockServerHandle::spawn_without_params(GetAuctionInfo::METHOD); - assert_eq!(server_handle.get_auction_info(), Ok(())); - } -} - -mod make_deploy { - use super::*; - - #[test] - fn should_succeed_for_stdout() { - assert_eq!( - casper_client::make_deploy( - "", - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ) - .map_err(ErrWrapper), - Ok(()) - ); - } - - #[test] - fn should_succeed_for_file() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create temp dir with error: {}", err)); - let file_path = temp_dir.path().join("test_deploy.json"); - assert_eq!( - casper_client::make_deploy( - file_path.to_str().unwrap(), - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ) - .map_err(ErrWrapper), - Ok(()) - ); - } -} - -mod send_deploy { - use super::*; - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_with_bad_deploy_file_path() { - let server_handle = MockServerHandle::spawn::(PutDeploy::METHOD); - if let Err(ErrWrapper(Error::IoError { context, .. })) = - server_handle.send_deploy_file("") - { - assert_eq!(context, "unable to read input file \'\'") - } - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed_for_file() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create temp dir with error: {}", err)); - let file_path = temp_dir.path().join("test_send_deploy.json"); - assert_eq!( - casper_client::make_deploy( - file_path.to_str().unwrap(), - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ) - .map_err(ErrWrapper), - Ok(()) - ); - let server_handle = MockServerHandle::spawn::(PutDeploy::METHOD); - assert_eq!( - server_handle.send_deploy_file(file_path.to_str().unwrap()), - Ok(()) - ); - } -} - -mod sign_deploy { - use super::*; - - #[test] - fn should_succeed_for_file() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create temp dir with error: {}", err)); - let unsigned_file_path = temp_dir.path().join("test_deploy.json"); - let signed_file_path = temp_dir.path().join("signed_test_deploy.json"); - assert_eq!( - casper_client::make_deploy( - unsigned_file_path.to_str().unwrap(), - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ) - .map_err(ErrWrapper), - Ok(()) - ); - assert_eq!( - casper_client::sign_deploy_file( - unsigned_file_path.to_str().unwrap(), - "../resources/local/secret_keys/node-1.pem", - signed_file_path.to_str().unwrap(), - ) - .map_err(ErrWrapper), - Ok(()) - ); - } - - #[test] - fn should_succeed_for_stdout() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create temp dir with error: {}", err)); - let unsigned_file_path = temp_dir.path().join("test_deploy.json"); - assert_eq!( - casper_client::make_deploy( - unsigned_file_path.to_str().unwrap(), - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ) - .map_err(ErrWrapper), - Ok(()) - ); - assert_eq!( - casper_client::sign_deploy_file( - unsigned_file_path.to_str().unwrap(), - "../resources/local/secret_keys/node-1.pem", - "" - ) - .map_err(ErrWrapper), - Ok(()) - ); - } - - #[test] - fn should_fail_with_bad_secret_key_path() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create temp dir with error: {}", err)); - let unsigned_file_path = temp_dir.path().join("test_deploy.json"); - assert_eq!( - casper_client::make_deploy( - unsigned_file_path.to_str().unwrap(), - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ) - .map_err(ErrWrapper), - Ok(()) - ); - assert!(casper_client::sign_deploy_file( - unsigned_file_path.to_str().unwrap(), - "", - "" - ) - .is_err()); - } -} - -mod keygen_generate_files { - - use super::*; - - #[test] - fn should_succeed_for_valid_args_ed25519() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create a temp dir with error: {}", err)); - let path = temp_dir.path().join("test-keygen-ed25519"); - let result = casper_client::keygen::generate_files( - path.to_str().unwrap(), - casper_client::keygen::ED25519, - true, - ) - .map_err(ErrWrapper); - assert_eq!(result, Ok(())); - } - - #[test] - fn should_succeed_for_valid_args_secp256k1() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create a temp dir with error: {}", err)); - let path = temp_dir.path().join("test-keygen-secp256k1"); - let result = casper_client::keygen::generate_files( - path.to_str().unwrap(), - casper_client::keygen::SECP256K1, - true, - ) - .map_err(ErrWrapper); - assert_eq!(result, Ok(())); - } - - #[test] - fn should_force_overwrite_when_set() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create a temp dir with error: {}", err)); - let path = temp_dir.path().join("test-keygen-force"); - let result = casper_client::keygen::generate_files( - path.to_str().unwrap(), - casper_client::keygen::SECP256K1, - false, - ) - .map_err(ErrWrapper); - assert_eq!(result, Ok(())); - let result = casper_client::keygen::generate_files( - path.to_str().unwrap(), - casper_client::keygen::SECP256K1, - false, - ) - .map_err(ErrWrapper); - assert_eq!( - result, - Err(Error::FileAlreadyExists(path.join("secret_key.pem")).into()) - ); - let result = casper_client::keygen::generate_files( - path.to_str().unwrap(), - casper_client::keygen::SECP256K1, - true, - ) - .map_err(ErrWrapper); - assert_eq!(result, Ok(())); - } - - #[test] - fn should_fail_for_invalid_algorithm() { - let temp_dir = TempDir::new() - .unwrap_or_else(|err| panic!("Failed to create a temp dir with error: {}", err)); - let path = temp_dir.path().join("test-keygen-invalid-algo"); - let result = casper_client::keygen::generate_files( - path.to_str().unwrap(), - "", - true, - ) - .map_err(ErrWrapper); - assert_eq!( - result, - Err(Error::UnsupportedAlgorithm("".to_string()).into()) - ); - } - - #[test] - fn should_fail_for_invalid_output_dir() { - let path = ""; - let result = - casper_client::keygen::generate_files(path, casper_client::keygen::ED25519, true) - .map_err(ErrWrapper); - assert_eq!( - result, - Err(Error::InvalidArgument( - "generate_files", - "empty output_dir provided, must be a valid path".to_string() - ) - .into()) - ); - } -} - -mod put_deploy { - use super::*; - - #[tokio::test(flavor = "multi_thread")] - async fn should_send_put_deploy() { - let server_handle = MockServerHandle::spawn::(PutDeploy::METHOD); - assert_eq!( - server_handle.put_deploy( - deploy_params::test_data_valid(), - session_params::test_data_with_package_hash(), - payment_params::test_data_with_name() - ), - Ok(()) - ); - } -} - -mod rate_limit { - use super::*; - use casper_node::types::Timestamp; - - #[tokio::test(flavor = "multi_thread")] - async fn client_should_should_be_rate_limited_to_approx_1_qps() { - // Transfer uses PutDeployParams + PutDeploy - let server_handle = Arc::new(MockServerHandle::spawn::( - PutDeploy::METHOD, - )); - - let now = Timestamp::now(); - // Our default is 1 req/s, so this will hit the threshold - for _ in 0..3u32 { - let amount = "100"; - let maybe_target_account = - "01522ef6c89038019cb7af05c340623804392dd2bb1f4dab5e4a9c3ab752fc0179"; - - // If you remove the tokio::task::spawn_blocking call wrapping the call to transfer, the - // client will eventually eat all executor threads and deadlock (at 64 consecutive - // requests). I believe this is happening because the server is executing on the same - // tokio runtime as the client. - let server_handle = server_handle.clone(); - let join_handle = task::spawn_blocking(move || { - server_handle.transfer( - amount, - maybe_target_account, - deploy_params::test_data_valid(), - payment_params::test_data_with_name(), - ) - }); - assert_eq!(join_handle.await.unwrap(), Ok(())); - } - - let diff = now.elapsed(); - assert!( - diff < Duration::from_millis(4000).into(), - "Rate limiting of 1 qps for 3 sec took too long at {}ms", - diff.millis() - ); - assert!( - diff > Duration::from_millis(2000).into(), - "Rate limiting of 1 qps for 3 sec too fast took {}ms", - diff.millis() - ); - } -} - -mod transfer { - use super::*; - - #[tokio::test(flavor = "multi_thread")] - async fn should_succeed() { - // Transfer uses PutDeployParams + PutDeploy - let server_handle = MockServerHandle::spawn::(PutDeploy::METHOD); - let amount = "100"; - let maybe_target_account = - "01522ef6c89038019cb7af05c340623804392dd2bb1f4dab5e4a9c3ab752fc0179"; - assert_eq!( - server_handle.transfer( - amount, - maybe_target_account, - deploy_params::test_data_valid(), - payment_params::test_data_with_name() - ), - Ok(()) - ); - } - - #[tokio::test(flavor = "multi_thread")] - async fn should_fail_if_both_target_purse_and_target_account_are_excluded() { - let server_handle = MockServerHandle::spawn::(PutDeploy::METHOD); - assert_eq!( - server_handle.transfer( - "100", - "", - deploy_params::test_data_valid(), - payment_params::test_data_with_name() - ), - Err(Error::InvalidArgument( - "target_account", - "Invalid arguments to get_transfer_target - must provide either a target account. account=".to_string()).into()) - ); - } -} diff --git a/docker_make.sh b/docker_make.sh index 0cb6c22f0f..45857f4935 100755 --- a/docker_make.sh +++ b/docker_make.sh @@ -1,5 +1,5 @@ -# Images used in this script are build in CasperLabs/buildenv repo +# Images used in this script are build in Casper/buildenv repo # This allows make commands without local build environment setup or # using an OS version other than locally installed. diff --git a/execution_engine/CHANGELOG.md b/execution_engine/CHANGELOG.md new file mode 100644 index 0000000000..d7d941a268 --- /dev/null +++ b/execution_engine/CHANGELOG.md @@ -0,0 +1,598 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## [Unreleased] (node 2.0) + +## [Unreleased] (node 2.0) + +### Added + +- Add support for a factory pattern on the host side. +- struct casper_execution_engine::engine_state::engine_config::EngineConfig +- struct casper_execution_engine::engine_state::engine_config::EngineConfigBuilder +- const casper_execution_engine::engine_state::engine_config::DEFAULT_ALLOW_AUCTION_BIDS: bool +- const casper_execution_engine::engine_state::engine_config::DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS: bool +- const casper_execution_engine::engine_state::engine_config::DEFAULT_BALANCE_HOLD_INTERVAL: casper_types::timestamp::TimeDiff +- const casper_execution_engine::engine_state::engine_config::DEFAULT_COMPUTE_REWARDS: bool +- const casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY: bool +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAXIMUM_DELEGATION_AMOUNT: u64 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_ASSOCIATED_KEYS: u32 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_DELEGATORS_PER_VALIDATOR: u32 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_QUERY_DEPTH: u64 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MAX_STORED_VALUE_SIZE: u32 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT: u64 +- const casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION: casper_types::protocol_version::ProtocolVersion +- const casper_execution_engine::engine_state::engine_config::DEFAULT_STRICT_ARGUMENT_CHECKING: bool +- const casper_execution_engine::engine_state::engine_config::DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS: u64 +- enum casper_execution_engine::engine_state::Error +- enum casper_execution_engine::engine_state::ExecutableItem +- enum casper_execution_engine::engine_state::InvalidRequest +- enum casper_execution_engine::engine_state::SessionInputData<'a> +- struct casper_execution_engine::engine_state::BlockInfo +- struct casper_execution_engine::engine_state::EngineConfig +- struct casper_execution_engine::engine_state::EngineConfigBuilder +- struct casper_execution_engine::engine_state::ExecutionEngineV1 +- struct casper_execution_engine::engine_state::SessionDataDeploy<'a> +- struct casper_execution_engine::engine_state::SessionDataV1<'a> +- struct casper_execution_engine::engine_state::WasmV1Request +- struct casper_execution_engine::engine_state::WasmV1Result +- const casper_execution_engine::engine_state::DEFAULT_MAX_QUERY_DEPTH: u64 +- const casper_execution_engine::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 +- const casper_execution_engine::engine_state::MAX_PAYMENT_AMOUNT: u64 +- const casper_execution_engine::engine_state::WASMLESS_TRANSFER_FIXED_GAS_PRICE: u8 +- static casper_execution_engine::engine_state::MAX_PAYMENT: once_cell::sync::Lazy +- enum casper_execution_engine::execution::ExecError +- enum casper_execution_engine::resolvers::error::ResolverError +- trait casper_execution_engine::resolvers::memory_resolver::MemoryResolver +- const casper_execution_engine::runtime::cryptography::DIGEST_LENGTH: usize +- fn casper_execution_engine::runtime::cryptography::blake2b>(data: T) -> [u8; 32] +- fn casper_execution_engine::runtime::cryptography::blake3>(data: T) -> [u8; 32] +- fn casper_execution_engine::runtime::cryptography::sha256>(data: T) -> [u8; 32] +- struct casper_execution_engine::runtime::stack::RuntimeStack +- struct casper_execution_engine::runtime::stack::RuntimeStackOverflow +- type casper_execution_engine::runtime::stack::RuntimeStackFrame = casper_types::system::caller::Caller +- enum casper_execution_engine::runtime::PreprocessingError +- enum casper_execution_engine::runtime::WasmValidationError +- struct casper_execution_engine::runtime::Runtime<'a, R> +- struct casper_execution_engine::runtime::RuntimeStack +- struct casper_execution_engine::runtime::RuntimeStackOverflow +- const casper_execution_engine::runtime::DEFAULT_BR_TABLE_MAX_SIZE: u32 +- const casper_execution_engine::runtime::DEFAULT_MAX_GLOBALS: u32 +- const casper_execution_engine::runtime::DEFAULT_MAX_PARAMETER_COUNT: u32 +- const casper_execution_engine::runtime::DEFAULT_MAX_TABLE_SIZE: u32 +- fn casper_execution_engine::runtime::cycles_for_instruction(instruction: &casper_wasm::elements::ops::Instruction) -> u32 +- fn casper_execution_engine::runtime::preprocess(wasm_config: casper_types::chainspec::vm_config::wasm_config::WasmConfig, module_bytes: &[u8]) -> core::result::Result +- type casper_execution_engine::runtime::RuntimeStackFrame = casper_types::system::caller::Caller +- enum casper_execution_engine::runtime_context::AllowInstallUpgrade +- struct casper_execution_engine::runtime_context::RuntimeContext<'a, R> +- const casper_execution_engine::runtime_context::RANDOM_BYTES_COUNT: usize + +### Removed + +- struct casper_execution_engine::config::Config +- enum casper_execution_engine::core::engine_state::balance::BalanceResult +- struct casper_execution_engine::core::engine_state::balance::BalanceRequest +- struct casper_execution_engine::core::engine_state::chainspec_registry::ChainspecRegistry +- struct casper_execution_engine::core::engine_state::checksum_registry::ChecksumRegistry +- struct casper_execution_engine::core::engine_state::deploy_item::DeployItem +- enum casper_execution_engine::core::engine_state::engine_config::FeeHandling +- enum casper_execution_engine::core::engine_state::engine_config::RefundHandling +- struct casper_execution_engine::core::engine_state::engine_config::EngineConfig +- struct casper_execution_engine::core::engine_state::engine_config::EngineConfigBuilder +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_ALLOW_AUCTION_BIDS: bool +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS: bool +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_FEE_HANDLING +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_ASSOCIATED_KEYS +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_QUERY_DEPTH +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MAX_STORED_VALUE_SIZE +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MINIMUM_BID_AMOUNT +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_REFUND_HANDLING +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_STRICT_ARGUMENT_CHECKING +- const casper_execution_engine::core::engine_state::engine_config::DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS +- enum casper_execution_engine::core::engine_state::era_validators::GetEraValidatorsError +- struct casper_execution_engine::core::engine_state::era_validators::GetEraValidatorsRequest +- enum casper_execution_engine::core::engine_state::executable_deploy_item::ContractIdentifier +- enum casper_execution_engine::core::engine_state::executable_deploy_item::ContractPackageIdentifier +- enum casper_execution_engine::core::engine_state::executable_deploy_item::DeployKind +- enum casper_execution_engine::core::engine_state::executable_deploy_item::ExecutableDeployItem +- enum casper_execution_engine::core::engine_state::executable_deploy_item::ExecutionKind +- struct casper_execution_engine::core::engine_state::executable_deploy_item::ExecutableDeployItemDiscriminantsIter +- struct casper_execution_engine::core::engine_state::execute_request::ExecuteRequest +- struct casper_execution_engine::core::engine_state::execution_effect::ExecutionEffect +- enum casper_execution_engine::core::engine_state::execution_result::ExecutionResult +- enum casper_execution_engine::core::engine_state::execution_result::ForcedTransferResult +- struct casper_execution_engine::core::engine_state::execution_result::ExecutionResultBuilder +- type casper_execution_engine::core::engine_state::execution_result::ExecutionResults = alloc::collections::vec_deque::VecDeque +- enum casper_execution_engine::core::engine_state::genesis::GenesisAccount +- enum casper_execution_engine::core::engine_state::genesis::GenesisError +- struct casper_execution_engine::core::engine_state::genesis::AdministratorAccount +- struct casper_execution_engine::core::engine_state::genesis::ExecConfig +- struct casper_execution_engine::core::engine_state::genesis::ExecConfigBuilder +- struct casper_execution_engine::core::engine_state::genesis::GenesisConfig +- struct casper_execution_engine::core::engine_state::genesis::GenesisSuccess +- struct casper_execution_engine::core::engine_state::genesis::GenesisValidator +- const casper_execution_engine::core::engine_state::genesis::DEFAULT_AUCTION_DELAY: u64 +- const casper_execution_engine::core::engine_state::genesis::DEFAULT_GENESIS_TIMESTAMP_MILLIS: u64 +- const casper_execution_engine::core::engine_state::genesis::DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 +- const casper_execution_engine::core::engine_state::genesis::DEFAULT_ROUND_SEIGNIORAGE_RATE: num_rational::Ratio +- const casper_execution_engine::core::engine_state::genesis::DEFAULT_UNBONDING_DELAY: u64 +- const casper_execution_engine::core::engine_state::genesis::DEFAULT_VALIDATOR_SLOTS: u32 +- enum casper_execution_engine::core::engine_state::get_bids::GetBidsResult +- struct casper_execution_engine::core::engine_state::get_bids::GetBidsRequest +- enum casper_execution_engine::core::engine_state::op::Op +- enum casper_execution_engine::core::engine_state::query::QueryResult +- struct casper_execution_engine::core::engine_state::query::QueryRequest +- struct casper_execution_engine::core::engine_state::run_genesis_request::RunGenesisRequest +- enum casper_execution_engine::core::engine_state::step::StepError +- struct casper_execution_engine::core::engine_state::step::EvictItem +- struct casper_execution_engine::core::engine_state::step::RewardItem +- struct casper_execution_engine::core::engine_state::step::SlashItem +- struct casper_execution_engine::core::engine_state::step::StepRequest +- struct casper_execution_engine::core::engine_state::step::StepSuccess +- struct casper_execution_engine::core::engine_state::system_contract_registry::SystemContractRegistry +- enum casper_execution_engine::core::engine_state::upgrade::ProtocolUpgradeError +- struct casper_execution_engine::core::engine_state::upgrade::UpgradeConfig +- struct casper_execution_engine::core::engine_state::upgrade::UpgradeSuccess +- enum casper_execution_engine::core::engine_state::BalanceResult +- enum casper_execution_engine::core::engine_state::Error +- enum casper_execution_engine::core::engine_state::ExecError +- enum casper_execution_engine::core::engine_state::ExecutableDeployItem +- enum casper_execution_engine::core::engine_state::ExecutionResult +- enum casper_execution_engine::core::engine_state::ForcedTransferResult +- enum casper_execution_engine::core::engine_state::GenesisAccount +- enum casper_execution_engine::core::engine_state::GetBidsResult +- enum casper_execution_engine::core::engine_state::GetEraValidatorsError +- enum casper_execution_engine::core::engine_state::PruneResult +- enum casper_execution_engine::core::engine_state::QueryResult +- enum casper_execution_engine::core::engine_state::StepError +- enum casper_execution_engine::core::engine_state::TransferTargetMode +- struct casper_execution_engine::core::engine_state::BalanceRequest +- struct casper_execution_engine::core::engine_state::ChainspecRegistry +- struct casper_execution_engine::core::engine_state::ChecksumRegistry +- struct casper_execution_engine::core::engine_state::DeployItem +- struct casper_execution_engine::core::engine_state::EngineConfig +- struct casper_execution_engine::core::engine_state::EngineConfigBuilder +- struct casper_execution_engine::core::engine_state::EngineState +- struct casper_execution_engine::core::engine_state::ExecConfig +- struct casper_execution_engine::core::engine_state::ExecuteRequest +- struct casper_execution_engine::core::engine_state::GenesisConfig +- struct casper_execution_engine::core::engine_state::GenesisSuccess +- struct casper_execution_engine::core::engine_state::GetBidsRequest +- struct casper_execution_engine::core::engine_state::GetEraValidatorsRequest +- struct casper_execution_engine::core::engine_state::PruneConfig +- struct casper_execution_engine::core::engine_state::QueryRequest +- struct casper_execution_engine::core::engine_state::RewardItem +- struct casper_execution_engine::core::engine_state::RunGenesisRequest +- struct casper_execution_engine::core::engine_state::SlashItem +- struct casper_execution_engine::core::engine_state::StepRequest +- struct casper_execution_engine::core::engine_state::StepSuccess +- struct casper_execution_engine::core::engine_state::SystemContractRegistry +- struct casper_execution_engine::core::engine_state::TransferArgs +- struct casper_execution_engine::core::engine_state::TransferRuntimeArgsBuilder +- struct casper_execution_engine::core::engine_state::UpgradeConfig +- struct casper_execution_engine::core::engine_state::UpgradeSuccess +- const casper_execution_engine::core::engine_state::DEFAULT_MAX_QUERY_DEPTH: u64 +- const casper_execution_engine::core::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 +- const casper_execution_engine::core::engine_state::MAX_PAYMENT_AMOUNT: u64 +- const casper_execution_engine::core::engine_state::WASMLESS_TRANSFER_FIXED_GAS_PRICE: u64 +- static casper_execution_engine::core::engine_state::MAX_PAYMENT: once_cell::sync::Lazy +- enum casper_execution_engine::core::execution::Error +- enum casper_execution_engine::core::resolvers::error::ResolverError +- trait casper_execution_engine::core::resolvers::memory_resolver::MemoryResolver +- struct casper_execution_engine::core::runtime::stack::RuntimeStack +- struct casper_execution_engine::core::runtime::stack::RuntimeStackOverflow +- type casper_execution_engine::core::runtime::stack::RuntimeStackFrame = casper_types::system::call_stack_element::CallStackElement +- struct casper_execution_engine::core::runtime::Runtime<'a, R> +- struct casper_execution_engine::core::runtime_context::RuntimeContext<'a, R> +- const casper_execution_engine::core::runtime_context::RANDOM_BYTES_COUNT: usize +- fn casper_execution_engine::core::runtime_context::validate_group_membership(contract_package: &casper_types::contracts::ContractPackage, access: &casper_types::contracts::EntryPointAccess, validator: impl core::ops::function::Fn(&casper_types::uref::URef) -> bool) -> core::result::Result<(), casper_execution_engine::core::engine_state::ExecError> +- enum casper_execution_engine::core::tracking_copy::AddResult +- enum casper_execution_engine::core::tracking_copy::TrackingCopyQueryResult +- enum casper_execution_engine::core::tracking_copy::ValidationError +- struct casper_execution_engine::core::tracking_copy::TrackingCopy +- struct casper_execution_engine::core::tracking_copy::TrackingCopyCache +- trait casper_execution_engine::core::tracking_copy::TrackingCopyExt +- fn casper_execution_engine::core::tracking_copy::validate_balance_proof(hash: &casper_hashing::Digest, balance_proof: &casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof, expected_purse_key: casper_types::key::Key, expected_motes: &casper_types::uint::macro_code::U512) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError> +- fn casper_execution_engine::core::tracking_copy::validate_query_proof(hash: &casper_hashing::Digest, proofs: &[casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof], expected_first_key: &casper_types::key::Key, path: &[alloc::string::String], expected_value: &casper_types::stored_value::StoredValue) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError> +- enum casper_execution_engine::core::ValidationError +- const casper_execution_engine::core::ADDRESS_LENGTH: usize +- fn casper_execution_engine::core::validate_balance_proof(hash: &casper_hashing::Digest, balance_proof: &casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof, expected_purse_key: casper_types::key::Key, expected_motes: &casper_types::uint::macro_code::U512) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError> +- fn casper_execution_engine::core::validate_query_proof(hash: &casper_hashing::Digest, proofs: &[casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof], expected_first_key: &casper_types::key::Key, path: &[alloc::string::String], expected_value: &casper_types::stored_value::StoredValue) -> core::result::Result<(), casper_execution_engine::core::tracking_copy::ValidationError> +- type casper_execution_engine::core::Address = [u8; 32] +- struct casper_execution_engine::shared::additive_map::AdditiveMap +- struct casper_execution_engine::shared::execution_journal::ExecutionJournal +- struct casper_execution_engine::shared::host_function_costs::HostFunction +- struct casper_execution_engine::shared::host_function_costs::HostFunctionCosts +- type casper_execution_engine::shared::host_function_costs::Cost = u32 +- enum casper_execution_engine::shared::logging::Style +- struct casper_execution_engine::shared::logging::Settings +- fn casper_execution_engine::shared::logging::initialize(settings: casper_execution_engine::shared::logging::Settings) -> core::result::Result<(), log::SetLoggerError> +- fn casper_execution_engine::shared::logging::log_details(\_log_level: log::Level, \_message_format: alloc::string::String, \_properties: alloc::collections::btree::map::BTreeMap<&str, alloc::string::String>) +- fn casper_execution_engine::shared::logging::log_host_function_metrics(\_host_function: &str, \_properties: alloc::collections::btree::map::BTreeMap<&str, alloc::string::String>) +- struct casper_execution_engine::shared::newtypes::CorrelationId +- struct casper_execution_engine::shared::opcode_costs::BrTableCost +- struct casper_execution_engine::shared::opcode_costs::ControlFlowCosts +- struct casper_execution_engine::shared::opcode_costs::OpcodeCosts +- const casper_execution_engine::shared::opcode_costs::DEFAULT_ADD_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_BIT_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONST_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BLOCK_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_IF_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_CALL_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_DROP_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_ELSE_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_END_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_IF_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_LOOP_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_RETURN_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONTROL_FLOW_SELECT_OPCODE: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CONVERSION_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_CURRENT_MEMORY_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_DIV_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_GLOBAL_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_GROW_MEMORY_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_INTEGER_COMPARISON_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_LOAD_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_LOCAL_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_MUL_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_NOP_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_STORE_COST: u32 +- const casper_execution_engine::shared::opcode_costs::DEFAULT_UNREACHABLE_COST: u32 +- struct casper_execution_engine::shared::storage_costs::StorageCosts +- const casper_execution_engine::shared::storage_costs::DEFAULT_GAS_PER_BYTE_COST: u32 +- struct casper_execution_engine::shared::system_config::auction_costs::AuctionCosts +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_ACTIVATE_BID_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_ADD_BID_COST: u64 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_DELEGATE_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_DISTRIBUTE_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_GET_ERA_VALIDATORS_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_READ_ERA_ID_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_REDELEGATE_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_RUN_AUCTION_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_SLASH_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_UNDELEGATE_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_WITHDRAW_BID_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST: u32 +- const casper_execution_engine::shared::system_config::auction_costs::DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST: u32 +- struct casper_execution_engine::shared::system_config::handle_payment_costs::HandlePaymentCosts +- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_FINALIZE_PAYMENT_COST: u32 +- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_GET_PAYMENT_PURSE_COST: u32 +- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_GET_REFUND_PURSE_COST: u32 +- const casper_execution_engine::shared::system_config::handle_payment_costs::DEFAULT_SET_REFUND_PURSE_COST: u32 +- struct casper_execution_engine::shared::system_config::mint_costs::MintCosts +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_BALANCE_COST: u32 +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_CREATE_COST: u32 +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_MINT_COST: u32 +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_MINT_INTO_EXISTING_PURSE_COST: u32 +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_READ_BASE_ROUND_REWARD_COST: u32 +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_REDUCE_TOTAL_SUPPLY_COST: u32 +- const casper_execution_engine::shared::system_config::mint_costs::DEFAULT_TRANSFER_COST: u32 +- struct casper_execution_engine::shared::system_config::standard_payment_costs::StandardPaymentCosts +- struct casper_execution_engine::shared::system_config::SystemConfig +- const casper_execution_engine::shared::system_config::DEFAULT_WASMLESS_TRANSFER_COST: u32 +- fn casper_execution_engine::shared::test_utils::mocked_account(account_hash: casper_types::account::account_hash::AccountHash) -> alloc::vec::Vec<(casper_types::key::Key, casper_types::stored_value::StoredValue)> +- enum casper_execution_engine::shared::transform::Error +- enum casper_execution_engine::shared::transform::Transform +- static casper_execution_engine::shared::utils::OS_PAGE_SIZE: once_cell::sync::Lazy +- fn casper_execution_engine::shared::utils::check_multiple_of_page_size(value: usize) +- fn casper_execution_engine::shared::utils::jsonify(value: T, pretty_print: bool) -> alloc::string::String where T: serde::ser::Serialize +- struct casper_execution_engine::shared::wasm_config::WasmConfig +- const casper_execution_engine::shared::wasm_config::DEFAULT_MAX_STACK_HEIGHT: u32 +- const casper_execution_engine::shared::wasm_config::DEFAULT_WASM_MAX_MEMORY: u32 +- enum casper_execution_engine::shared::wasm_prep::PreprocessingError +- enum casper_execution_engine::shared::wasm_prep::WasmValidationError +- const casper_execution_engine::shared::wasm_prep::DEFAULT_BR_TABLE_MAX_SIZE: u32 +- const casper_execution_engine::shared::wasm_prep::DEFAULT_MAX_GLOBALS: u32 +- const casper_execution_engine::shared::wasm_prep::DEFAULT_MAX_PARAMETER_COUNT: u32 +- const casper_execution_engine::shared::wasm_prep::DEFAULT_MAX_TABLE_SIZE: u32 +- fn casper_execution_engine::shared::wasm_prep::deserialize(module_bytes: &[u8]) -> core::result::Result +- fn casper_execution_engine::shared::wasm_prep::get_module_from_entry_points(entry_point_names: alloc::vec::Vec<&str>, module: casper_wasm::elements::module::Module) -> core::result::Result, casper_execution_engine::core::engine_state::ExecError> +- fn casper_execution_engine::shared::wasm_prep::preprocess(wasm_config: casper_execution_engine::shared::wasm_config::WasmConfig, module_bytes: &[u8]) -> core::result::Result +- enum casper_execution_engine::storage::error::in_memory::Error +- enum casper_execution_engine::storage::error::lmdb::Error +- enum casper_execution_engine::storage::error::Error +- struct casper_execution_engine::storage::global_state::in_memory::InMemoryGlobalState +- struct casper_execution_engine::storage::global_state::lmdb::LmdbGlobalState +- struct casper_execution_engine::storage::global_state::scratch::ScratchGlobalState +- enum casper_execution_engine::storage::global_state::CommitError +- trait casper_execution_engine::storage::global_state::CommitProvider: casper_execution_engine::storage::global_state::StateProvider +- trait casper_execution_engine::storage::global_state::StateProvider +- trait casper_execution_engine::storage::global_state::StateReader +- fn casper_execution_engine::storage::global_state::commit<'a, R, S, H, E>(environment: &'a R, store: &S, correlation_id: casper_execution_engine::shared::newtypes::CorrelationId, prestate_hash: casper_hashing::Digest, effects: casper_execution_engine::shared::additive_map::AdditiveMap) -> core::result::Result where R: casper_execution_engine::storage::transaction_source::TransactionSource<'a, Handle = ::Handle>, S: casper_execution_engine::storage::trie_store::TrieStore, ::Error: core::convert::From<::Error>, E: core::convert::From<::Error> + core::convert::From<::Error> + core::convert::From + core::convert::From, H: core::hash::BuildHasher +- fn casper_execution_engine::storage::global_state::put_stored_values<'a, R, S, E>(environment: &'a R, store: &S, correlation_id: casper_execution_engine::shared::newtypes::CorrelationId, prestate_hash: casper_hashing::Digest, stored_values: std::collections::hash::map::HashMap) -> core::result::Result where R: casper_execution_engine::storage::transaction_source::TransactionSource<'a, Handle = ::Handle>, S: casper_execution_engine::storage::trie_store::TrieStore, ::Error: core::convert::From<::Error>, E: core::convert::From<::Error> + core::convert::From<::Error> + core::convert::From + core::convert::From +- trait casper_execution_engine::storage::store::Store +- trait casper_execution_engine::storage::store::StoreExt: casper_execution_engine::storage::store::Store +- struct casper_execution_engine::storage::transaction_source::in_memory::InMemoryEnvironment +- struct casper_execution_engine::storage::transaction_source::in_memory::InMemoryReadTransaction +- struct casper_execution_engine::storage::transaction_source::in_memory::InMemoryReadWriteTransaction<'a> +- struct casper_execution_engine::storage::transaction_source::lmdb::LmdbEnvironment +- trait casper_execution_engine::storage::transaction_source::Readable: casper_execution_engine::storage::transaction_source::Transaction +- trait casper_execution_engine::storage::transaction_source::Transaction: core::marker::Sized +- trait casper_execution_engine::storage::transaction_source::TransactionSource<'a> +- trait casper_execution_engine::storage::transaction_source::Writable: casper_execution_engine::storage::transaction_source::Transaction +- fn casper_execution_engine::storage::transaction_source::Writable::write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> core::result::Result<(), Self::Error> +- impl<'a> casper_execution_engine::storage::transaction_source::Writable for lmdb::transaction::RwTransaction<'a> +- fn lmdb::transaction::RwTransaction<'a>::write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> core::result::Result<(), Self::Error> +- enum casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProofStep +- struct casper_execution_engine::storage::trie::merkle_proof::TrieMerkleProof +- enum casper_execution_engine::storage::trie::DescendantsIterator<'a> +- enum casper_execution_engine::storage::trie::Pointer +- enum casper_execution_engine::storage::trie::Trie +- struct casper_execution_engine::storage::trie::PointerBlock +- struct casper_execution_engine::storage::trie::TrieRaw +- type casper_execution_engine::storage::trie::Parents = alloc::vec::Vec<(u8, casper_execution_engine::storage::trie::Trie)> +- type casper_execution_engine::storage::trie::PointerBlockArray = [casper_execution_engine::storage::trie::PointerBlockValue; 256] +- type casper_execution_engine::storage::trie::PointerBlockValue = core::option::Option +- struct casper_execution_engine::storage::trie_store::in_memory::InMemoryTrieStore +- struct casper_execution_engine::storage::trie_store::lmdb::LmdbTrieStore +- trait casper_execution_engine::storage::trie_store::TrieStore: casper_execution_engine::storage::store::Store> +- macro casper_execution_engine::make_array_newtype! + + +## 7.0.1 + +### Changed +* Change the cost of `wasm.storage_costs.gas_per_byte` and `shared::storage_costs::DEFAULT_GAS_PER_BYTE_COST` from `630_000` to `1_117_587`. +* Change the cost of the host function `casper_add_associated_key` from `9_000` to `1_200_000`. +* Change the cost of the argument `entry_points_size` of host function `casper_add_contract_version` from `0` to `120_000`. +* Change the cost of the host function `casper_blake2b`and its argument `in_size` from `200` and `0` respectively to `1_200_000` to `120_000`. +* Change the cost of the host function `casper_call_contract` and its arguments `entry_point_name_size` and `runtime_args_size` from `4_500`, `0` and `420` respectively to `300_000_000`, `120_000` and `120_000`. +* Change the cost of the host function `casper_call_versioned_contract` and the arguments `entry_point_name_size` and `runtime_args_size` from `4_500`, `0` and `420` respectively to `300_000_000`, `120_000` and `120_000`. +* Change the cost of the host function `casper_get_balance` from `3_800` to `3_000_000`. +* Change the cost of arguments `name_size` and `dest_size` of host function `casper_get_named_arg` from `0` to `120_000`. +* Change the cost of the host function `casper_put_key` and its arguments `name_size` and `key_size` from `38_000`, `1_100` and `0` respectively to `100_000_000`, `120_000` and `120_000`. +* Change the cost of the host function `casper_read_value` and its argument `key_size` from `6_000` and `0` respectively to `60_000` and `120_000`. +* Change the cost of the argument `urefs_size` of host function `casper_remove_contract_user_group_urefs` from `0` to `120_000`. +* Change the cost of the host function `casper_transfer_from_purse_to_purse` from `82_000` to `82_000_000`. + + + +## [Unreleased] (node 1.5.4) +## 7.0.0 + +### Added +* Add chainspec option `core.allow_unrestricted_transfers` that, if enabled, allows token transfers between any two peers. Disabling this option makes sense only for private chains. +* Add chainspec option `core.allow_auction_bids` that, if enabled, allows auction entrypoints `delegate` and `add_bid` to operate. Disabling this option makes sense only for private chains. +* Add chainspec option `core.compute_rewards` that, if enabled, computes rewards for each era. Disabling this option makes sense only for private chains. +* Add chainspec option `core.refund_handling` that specifies how payment refunds are handled. +* Add chainspec option `core.fee_handling` that specifies how transaction fees are handled. +* Add chainspec option `core.administrators` that, if set, contains list of administrator accounts. This option makes sense only for private chains. +* Add support for a new FFI function `enable_contract_version` for enabling a specific version of a contract. + +### Changed +* `current stack height` is written to `stderr` in case `Trap(Unreachable)` error is encountered during Wasm execution. +* Tweak upgrade logic transforming withdraw purses to early exit if possible. +* Lower the default gas costs of opcodes. + - Set the cost for branching opcodes to 35,000 (`br`, `br_if`, `br_table`). + - Set the cost for call opcodes to 68,000 (`call`, `call_indirect`). +* Default value for round seigniorage rate is halved to `7/175070816` due to reduction in block times, to maintain current seigniorage rate (per unit of time). +* Refund ratio is changed from 0% to 99%. + + + +## 6.0.0 + +### Changed +* Default value for `max_stack_height` is increased to 500. +* Replace usage of `parity-wasm` and `wasmi` with Casper forks `casper-wasm` and `casper-wasmi` respectively. + +### Fixed +* Fix incorrect handling of unbonding purses for validators that were also evicted in that era. +* Fix issue with one-time code used for migrating data to support redelegations. + +### Security +* Fix unbounded memory allocation issue while parsing Wasm. + + + +## 5.0.0 + +### Added +* Add a new entry point `redelegate` to the Auction system contract which allows users to redelegate to another validator without having to unbond. The function signature for the entrypoint is: `redelegate(delegator: PublicKey, validator: PublicKey, amount: U512, new_validator: PublicKey)` +* Add a new type `ChainspecRegistry` which contains the hashes of the `chainspec.toml` and will optionally contain the hashes for `accounts.toml` and `global_state.toml`. +* Add ability to enable strict args checking when executing a contract; i.e. that all non-optional args are provided and of the correct `CLType`. + +### Changed +* Fix some integer casts. +* Change both genesis and upgrade functions to write `ChainspecRegistry` under the fixed `Key::ChainspecRegistry`. +* Lift the temporary limit of the size of individual values stored in global state. +* Providing incorrect Wasm for execution will cause the default 2.5CSPR to be charged. +* Update the default `control_flow` opcode cost from `440` to `440_000`. + + + +## 4.0.0 + +### Changed +* Update dependencies (in particular `casper-types` to v2.0.0 due to additional `Key` variant, requiring a major version bump here). + + + +## 3.1.1 + +### Changed +* Update the following constant values to match settings in production chainspec: + * `DEFAULT_RET_VALUE_SIZE_WEIGHT` + * `DEFAULT_CONTROL_FLOW_CALL_OPCODE` + * `DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE` + * `DEFAULT_GAS_PER_BYTE_COST` + * `DEFAULT_ADD_BID_COST` + * `DEFAULT_WITHDRAW_BID_COST` + * `DEFAULT_DELEGATE_COST` + * `DEFAULT_UNDELEGATE_COST` + * `DEFAULT_MAX_STACK_HEIGHT` + + + +## 3.1.0 + +### Added +* Add `commit_prune` functionality to support pruning of entries in global storage. + +### Changed +* Update to use `casper-wasm-utils`; a patched fork of the archived `wasm-utils`. + + + +## 3.0.0 + +### Changed +* Implement more precise control over opcode costs that lowers the gas cost. +* Increase cost of `withdraw_bid` and `undelegate` auction entry points to 2.5CSPR. + + + +## 2.0.1 + +### Security +* Implement checks before preprocessing Wasm to avoid potential OOM when initializing table section. +* Implement checks before preprocessing Wasm to avoid references to undeclared functions or globals. +* Implement checks before preprocessing Wasm to avoid possibility to import internal host functions. + + +## 2.0.0 - 2022-05-11 + +### Changed +* Change contract runtime to allow caching global state changes during execution of a single block, also avoiding writing interstitial data to global state. + + + +## 1.5.0 - 2022-04-05 + +### Changed +* Temporarily limit the size of individual values stored in global state. + +### Security +* `amount` argument is now required for transactions wanting to send tokens using account's main purse. It is now an upper limit on all tokens being transferred within the transaction. +* Significant rework around the responsibilities of the executor, runtime and runtime context objects, with a focus on removing alternate execution paths where unintended escalation of privilege was possible. +* Attenuate the main purse URef to remove WRITE permissions by default when returned via `ret` or passed as a runtime argument. +* Fix a potential panic during Wasm preprocessing. +* `get_era_validators` performs a query rather than execution. + + + +## 1.4.4 - 2021-12-29 + +### Changed +* No longer checksum-hex encode hash digest and address types. + + + +## 1.4.3 - 2021-12-06 + +### Changed +* Auction contract now handles minting into an existing purse. +* Default maximum stack size in `WasmConfig` changed to 188. +* Default behavior of LMDB changed to use [`NO_READAHEAD`](https://docs.rs/lmdb/0.8.0/lmdb/struct.EnvironmentFlags.html#associatedconstant.NO_READAHEAD) + +### Fixed +* Fix a case where an unlocked and partially unbonded genesis validator with smaller stake incorrectly occupies slot for a non-genesis validator with higher stake. + + + +## [1.4.2] - 2021-11-11 + +### Changed +* Execution transforms are returned in their insertion order. + +### Removed +* Removed `SystemContractCache` as it was not being used anymore + +## [1.4.0] - 2021-10-04 + +### Added +* Added genesis validation step to ensure there are more genesis validators than validator slots. +* Added a support for passing a public key as a `target` argument in native transfers. +* Added a `max_associated_keys` configuration option for a hard limit of associated keys under accounts. + +### Changed +* Documented `storage` module and children. +* Reduced visibility to `pub(crate)` in several areas, allowing some dead code to be noticed and pruned. +* Support building and testing using stable Rust. +* Increase price of `create_purse` to 2.5CSPR. +* Increase price of native transfer to 100 million motes (0.1 CSPR). +* Improve doc comments to clarify behavior of the bidding functionality. +* Document `core` and `shared` modules and their children. +* Change parameters to `LmdbEnvironment`'s constructor enabling manual flushing to disk. + +### Fixed +* Fix a case where user could potentially supply a refund purse as a payment purse. + + + +## [1.3.0] - 2021-07-19 + +### Changed +* Update pinned version of Rust to `nightly-2021-06-17`. + + + +## [1.2.0] - 2021-05-27 + +### Added +* Add validation that the delegated amount of each genesis account is non-zero. +* Add `activate-bid` client contract. +* Add a check in `Mint::transfer` that the source has `Read` permissions. + +### Changed +* Change to Apache 2.0 license. +* Remove the strict expectation that minor and patch protocol versions must always increase by 1. + +### Removed +* Remove `RootNotFound` error struct. + + + +## [1.1.1] - 2021-04-19 + +No changes. + + + +## [1.1.0] - 2021-04-13 [YANKED] + +No changes. + + + +## [1.0.1] - 2021-04-08 + +No changes. + + + +## [1.0.0] - 2021-03-30 + +### Added +* Initial release of execution engine for Casper mainnet. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/37d561634adf73dab40fffa7f1f1ee47e80bf8a1...dev +[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.0...37d561634adf73dab40fffa7f1f1ee47e80bf8a1 +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 diff --git a/execution_engine/Cargo.toml b/execution_engine/Cargo.toml index b175701c84..9c2756b77d 100644 --- a/execution_engine/Cargo.toml +++ b/execution_engine/Cargo.toml @@ -1,67 +1,76 @@ [package] name = "casper-execution-engine" -version = "1.0.0" # when updating, also update 'html_root_url' in lib.rs -authors = ["Henry Till ", "Ed Hastings "] -edition = "2018" -description = "CasperLabs execution engine crates." +version = "8.1.1" # when updating, also update 'html_root_url' in lib.rs +authors = ["Henry Till ", "Ed Hastings ", "Michał Papierski "] +edition = "2021" +description = "Casper execution engine crates." readme = "README.md" documentation = "https://docs.rs/casper-execution-engine" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/execution_engine" -license-file = "../LICENSE" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/master/execution_engine" +license = "Apache-2.0" [dependencies] anyhow = "1.0.33" base16 = "0.2.1" bincode = "1.3.1" -blake2 = "0.9.0" -casper-types = { version = "1.0.0", path = "../types", features = ["std", "gens"] } -chrono = "0.4.10" +blake2 = { version = "0.10.6", default-features = false } +blake3 = { version = "1.5.0", default-features = false, features = ["pure"] } +sha2 = { version = "0.10.8", default-features = false } +casper-storage = { version = "2.1.1", path = "../storage", default-features = true } +casper-types = { version = "6.0.1", path = "../types", default-features = false, features = ["datasize", "gens", "json-schema", "std"] } +casper-wasm = { version = "1.0.0", default-features = false, features = ["sign_ext", "call_indirect_overlong"] } +casper-wasm-utils = { version = "4.0.0", default-features = false, features = ["sign_ext", "call_indirect_overlong"] } +casper-wasmi = { version = "1.0.0", features = ["sign_ext", "call_indirect_overlong"] } datasize = "0.2.4" -hex = "0.4.2" +either = "1.8.1" hex-buffer-serde = "0.2.1" hex_fmt = "0.3.0" hostname = "0.3.0" -itertools = "0.10.0" -libc = "0.2.66" +humantime = "2" +itertools = "0.10.3" linked-hash-map = "0.5.3" -lmdb = "0.8" log = { version = "0.4.8", features = ["std", "serde", "kv_unstable"] } num = { version = "0.4.0", default-features = false } -num-derive = "0.3.0" +num-derive = { workspace = true } num-rational = { version = "0.4.0", features = ["serde"] } -num-traits = "0.2.10" +num-traits = { workspace = true } +num_cpus = "1" once_cell = "1.5.2" -parity-wasm = "0.41.0" proptest = { version = "1.0.0", optional = true } -pwasm-utils = "0.16.0" rand = "0.8.3" rand_chacha = "0.3.0" -schemars = { version = "0.8.0", features = ["preserve_order"] } +schemars = { version = "0.8.16", features = ["preserve_order"] } serde = { version = "1", features = ["derive"] } serde_bytes = "0.11.5" -serde_json = "1" +serde_json = { version = "1", features = ["preserve_order"] } +strum = { version = "0.24.1", features = ["strum_macros", "derive"], optional = true } +tempfile = "3.4.0" thiserror = "1.0.18" tracing = "0.1.18" uint = "0.9.0" -uuid = { version = "0.8.1", features = ["serde", "v4"] } -# By depending on wasmi 0.8.0 we are stuck with parity-wasm 0.41.0 -# and pwasm-utils 0.16 as upstream wasmi still depends on 0.41.0. -# https://github.com/paritytech/wasmi/commit/f5fd480260490ff0de455017229caf7baee68195 -wasmi = "0.8.0" +clap = { version = "4.5.21", features = ["derive"] } +toml = "0.8.19" +wat = "1.220.0" [dev-dependencies] assert_matches = "1.3.0" -criterion = "0.3.3" +casper-types = { path = "../types", features = ["datasize", "json-schema", "testing", "std"] } +criterion = "0.5.1" proptest = "1.0.0" -tempfile = "3.1.0" +tempfile = "3.4.0" +walrus = "0.20.2" [features] default = ["gens"] -gens = ["proptest"] +# DEPRECATED +gens = ["casper-types/testing", "proptest", "strum"] test-support = [] [[bench]] name = "trie_bench" harness = false +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/execution_engine/README.md b/execution_engine/README.md index 5c38731551..0dd5a4bbcd 100644 --- a/execution_engine/README.md +++ b/execution_engine/README.md @@ -1,14 +1,13 @@ # `casper-execution-engine` -[![LOGO](https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) [![Crates.io](https://img.shields.io/crates/v/casper-execution-engine)](https://crates.io/crates/casper-execution-engine) [![Documentation](https://docs.rs/casper-execution-engine/badge.svg)](https://docs.rs/casper-execution-engine) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) -The main component of the CasperLabs Wasm execution engine. +The main component of the Casper Wasm execution engine. ## License -Licensed under the [CasperLabs Open Source License (COSL)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE). \ No newline at end of file +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). \ No newline at end of file diff --git a/execution_engine/benches/trie_bench.rs b/execution_engine/benches/trie_bench.rs index 706514b5b3..7329eee218 100644 --- a/execution_engine/benches/trie_bench.rs +++ b/execution_engine/benches/trie_bench.rs @@ -1,27 +1,34 @@ use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; -use casper_execution_engine::{ - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::trie::{Pointer, PointerBlock, Trie}, -}; +use casper_storage::global_state::trie::{PointerBlock, Trie}; use casper_types::{ account::AccountHash, + addressable_entity::EntityKindTag, bytesrepr::{FromBytes, ToBytes}, - CLValue, Key, + global_state::Pointer, + AddressableEntityHash, CLValue, Digest, Key, StoredValue, }; fn serialize_trie_leaf(b: &mut Bencher) { + let contract_key = Key::addressable_entity_key( + EntityKindTag::SmartContract, + AddressableEntityHash::new([42; 32]), + ); let leaf = Trie::Leaf { key: Key::Account(AccountHash::new([0; 32])), - value: StoredValue::CLValue(CLValue::from_t(42_i32).unwrap()), + value: StoredValue::CLValue(CLValue::from_t(contract_key).unwrap()), }; b.iter(|| ToBytes::to_bytes(black_box(&leaf))); } fn deserialize_trie_leaf(b: &mut Bencher) { + let contract_key: Key = Key::addressable_entity_key( + EntityKindTag::SmartContract, + AddressableEntityHash::new([42; 32]), + ); let leaf = Trie::Leaf { key: Key::Account(AccountHash::new([0; 32])), - value: StoredValue::CLValue(CLValue::from_t(42_i32).unwrap()), + value: StoredValue::CLValue(CLValue::from_t(contract_key).unwrap()), }; let leaf_bytes = leaf.to_bytes().unwrap(); b.iter(|| Trie::::from_bytes(black_box(&leaf_bytes))); @@ -29,14 +36,14 @@ fn deserialize_trie_leaf(b: &mut Bencher) { fn serialize_trie_node(b: &mut Bencher) { let node = Trie::::Node { - pointer_block: Box::new(PointerBlock::default()), + pointer_block: Box::::default(), }; b.iter(|| ToBytes::to_bytes(black_box(&node))); } fn deserialize_trie_node(b: &mut Bencher) { let node = Trie::::Node { - pointer_block: Box::new(PointerBlock::default()), + pointer_block: Box::::default(), }; let node_bytes = node.to_bytes().unwrap(); @@ -44,19 +51,19 @@ fn deserialize_trie_node(b: &mut Bencher) { } fn serialize_trie_node_pointer(b: &mut Bencher) { - let node = Trie::::Extension { - affix: (0..255).collect(), - pointer: Pointer::NodePointer(Blake2bHash::new(&[0; 32])), - }; + let node = Trie::::extension( + (0..255).collect(), + Pointer::NodePointer(Digest::hash([0; 32])), + ); b.iter(|| ToBytes::to_bytes(black_box(&node))); } fn deserialize_trie_node_pointer(b: &mut Bencher) { - let node = Trie::::Extension { - affix: (0..255).collect(), - pointer: Pointer::NodePointer(Blake2bHash::new(&[0; 32])), - }; + let node = Trie::::extension( + (0..255).collect(), + Pointer::NodePointer(Digest::hash([0; 32])), + ); let node_bytes = node.to_bytes().unwrap(); b.iter(|| Trie::::from_bytes(black_box(&node_bytes))); diff --git a/execution_engine/src/bin/run_wasm.rs b/execution_engine/src/bin/run_wasm.rs new file mode 100644 index 0000000000..d92e7105ff --- /dev/null +++ b/execution_engine/src/bin/run_wasm.rs @@ -0,0 +1,250 @@ +use std::{ + fs, + path::{Path, PathBuf}, + time::{Duration, Instant}, +}; + +use casper_types::WasmConfig; + +use casper_execution_engine::runtime; +use casper_wasmi::{ + memory_units::Pages, Externals, FuncInstance, HostError, ImportsBuilder, MemoryInstance, + ModuleImportResolver, ModuleInstance, RuntimeValue, Signature, +}; + +fn prepare_instance(module_bytes: &[u8], chainspec: &ChainspecConfig) -> casper_wasmi::ModuleRef { + let wasm_module = runtime::preprocess(chainspec.wasm_config, module_bytes).unwrap(); + let module = casper_wasmi::Module::from_casper_wasm_module(wasm_module).unwrap(); + let resolver = MinimalWasmiResolver::default(); + let mut imports = ImportsBuilder::new(); + imports.push_resolver("env", &resolver); + let not_started_module = ModuleInstance::new(&module, &imports).unwrap(); + + assert!(!not_started_module.has_start()); + + let instance = not_started_module.not_started_instance(); + instance.clone() +} + +struct RunWasmInfo { + elapsed: Duration, + gas_used: u64, +} + +fn run_wasm( + module_bytes: Vec, + cli_args: &Args, + chainspec: &ChainspecConfig, + func_name: &str, +) -> ( + Result, casper_wasmi::Error>, + RunWasmInfo, +) { + println!( + "Invoke export {:?} with args {:?}", + func_name, cli_args.args + ); + + let instance = prepare_instance(&module_bytes, chainspec); + + let params = { + let export = instance.export_by_name(func_name).unwrap(); + let func = export.as_func().unwrap(); + func.signature().params().to_owned() + }; + + let args = { + assert_eq!( + cli_args.args.len(), + params.len(), + "Not enough arguments supplied" + ); + let mut vec = Vec::new(); + for (input_arg, func_arg) in cli_args.args.iter().zip(params.into_iter()) { + let value = match func_arg { + casper_wasmi::ValueType::I32 => { + casper_wasmi::RuntimeValue::I32(input_arg.parse().unwrap()) + } + casper_wasmi::ValueType::I64 => { + casper_wasmi::RuntimeValue::I64(input_arg.parse().unwrap()) + } + casper_wasmi::ValueType::F32 => todo!(), + casper_wasmi::ValueType::F64 => todo!(), + }; + vec.push(value); + } + vec + }; + + let start = Instant::now(); + + let gas_limit = cli_args + .gas_limit + .unwrap_or(chainspec.transaction_config.block_gas_limit); + + let mut externals = MinimalWasmiExternals::new(0, gas_limit); + let result: Result, casper_wasmi::Error> = + instance + .clone() + .invoke_export(func_name, &args, &mut externals); + + let info = RunWasmInfo { + elapsed: start.elapsed(), + gas_used: externals.gas_used, + }; + + (result, info) +} +use clap::Parser; +use serde::Deserialize; + +#[derive(Parser, Clone, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[arg(value_name = "MODULE")] + wasm_file: PathBuf, + #[arg(long = "gas_limit")] + gas_limit: Option, + #[arg(long = "invoke", value_name = "FUNCTION")] + invoke: Option, + /// Arguments given to the Wasm module or the invoked function. + #[arg(value_name = "ARGS")] + args: Vec, + #[arg(short, long)] + chainspec_file: Option, +} + +fn load_wasm_file>(path: P) -> Vec { + let path = path.as_ref(); + let bytes = fs::read(path).expect("valid file"); + match path.extension() { + Some(ext) if ext.eq_ignore_ascii_case("wat") => { + wat::parse_bytes(&bytes).expect("valid wat").into_owned() + } + None | Some(_) => bytes, + } +} + +#[derive(Deserialize, Clone, Default, Debug)] +struct TransactionConfig { + block_gas_limit: u64, +} + +/// in the chainspec file, it can continue to be parsed as an `ChainspecConfig`. +#[derive(Deserialize, Clone, Default, Debug)] +struct ChainspecConfig { + /// WasmConfig. + #[serde(rename = "wasm")] + pub wasm_config: WasmConfig, + #[serde(rename = "transactions")] + pub transaction_config: TransactionConfig, +} + +fn main() { + let args = Args::parse(); + + let chainspec_file = args.clone().chainspec_file.expect("chainspec file"); + println!("Using chainspec file {:?}", chainspec_file.display()); + let chainspec_data = fs::read_to_string(chainspec_file.as_path()).expect("valid file"); + let chainspec_config: ChainspecConfig = + toml::from_str(&chainspec_data).expect("valid chainspec"); + + let wasm_bytes = load_wasm_file(&args.wasm_file); + + if let Some(ref func_name) = args.invoke { + let (result, info) = run_wasm(wasm_bytes, &args, &chainspec_config, func_name); + + println!("result: {:?}", result); + println!("elapsed: {:?}", info.elapsed); + println!("gas used: {}", info.gas_used); + } +} + +#[derive(Default)] +struct MinimalWasmiResolver(()); + +#[derive(Debug)] +struct MinimalWasmiExternals { + gas_used: u64, + block_gas_limit: u64, +} + +impl MinimalWasmiExternals { + fn new(gas_used: u64, block_gas_limit: u64) -> Self { + Self { + gas_used, + block_gas_limit, + } + } +} + +const GAS_FUNC_IDX: usize = 0; + +impl ModuleImportResolver for MinimalWasmiResolver { + fn resolve_func( + &self, + field_name: &str, + _signature: &casper_wasmi::Signature, + ) -> Result { + if field_name == "gas" { + Ok(FuncInstance::alloc_host( + Signature::new(&[casper_wasmi::ValueType::I32; 1][..], None), + GAS_FUNC_IDX, + )) + } else { + Err(casper_wasmi::Error::Instantiation(format!( + "Export {} not found", + field_name + ))) + } + } + + fn resolve_memory( + &self, + field_name: &str, + memory_type: &casper_wasmi::MemoryDescriptor, + ) -> Result { + if field_name == "memory" { + Ok(MemoryInstance::alloc( + Pages(memory_type.initial() as usize), + memory_type.maximum().map(|x| Pages(x as usize)), + )?) + } else { + panic!("invalid exported memory name {}", field_name); + } + } +} + +#[derive(thiserror::Error, Debug)] +#[error("gas limit")] +struct GasLimit; + +impl HostError for GasLimit {} + +impl Externals for MinimalWasmiExternals { + fn invoke_index( + &mut self, + index: usize, + args: casper_wasmi::RuntimeArgs, + ) -> Result, casper_wasmi::Trap> { + if index == GAS_FUNC_IDX { + let gas_used: u32 = args.nth_checked(0)?; + // match gas_used.checked_add( + match self.gas_used.checked_add(gas_used.into()) { + Some(new_gas_used) if new_gas_used > self.block_gas_limit => { + return Err(GasLimit.into()); + } + Some(new_gas_used) => { + // dbg!(&new_gas_used, &self.block_gas_limit); + self.gas_used = new_gas_used; + } + None => { + unreachable!(); + } + } + Ok(None) + } else { + unreachable!(); + } + } +} diff --git a/execution_engine/src/config.rs b/execution_engine/src/config.rs deleted file mode 100644 index 472543e893..0000000000 --- a/execution_engine/src/config.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Configuration options for the execution engine. - -use serde::{Deserialize, Serialize}; - -use crate::shared::utils; - -const DEFAULT_MAX_GLOBAL_STATE_SIZE: usize = 805_306_368_000; // 750 GiB - -/// Contract runtime configuration. -#[derive(Clone, Copy, Debug, Deserialize, Serialize)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct Config { - max_global_state_size: Option, -} - -impl Config { - /// The maximum size of the database to use for the global state store. - /// - /// Defaults to 805,306,368,000 == 750 GiB. - /// - /// The size should be a multiple of the OS page size. - pub fn max_global_state_size(&self) -> usize { - let value = self - .max_global_state_size - .unwrap_or(DEFAULT_MAX_GLOBAL_STATE_SIZE); - utils::check_multiple_of_page_size(value); - value - } -} - -impl Default for Config { - fn default() -> Self { - Config { - max_global_state_size: Some(DEFAULT_MAX_GLOBAL_STATE_SIZE), - } - } -} diff --git a/execution_engine/src/core.rs b/execution_engine/src/core.rs deleted file mode 100644 index f70266a309..0000000000 --- a/execution_engine/src/core.rs +++ /dev/null @@ -1,14 +0,0 @@ -#![allow(missing_docs)] - -pub mod engine_state; -pub mod execution; -pub mod resolvers; -pub mod runtime; -pub mod runtime_context; -pub(crate) mod tracking_copy; - -pub use tracking_copy::{validate_balance_proof, validate_query_proof, ValidationError}; - -pub const ADDRESS_LENGTH: usize = 32; - -pub type Address = [u8; ADDRESS_LENGTH]; diff --git a/execution_engine/src/core/engine_state/balance.rs b/execution_engine/src/core/engine_state/balance.rs deleted file mode 100644 index 3b73edb301..0000000000 --- a/execution_engine/src/core/engine_state/balance.rs +++ /dev/null @@ -1,54 +0,0 @@ -use casper_types::{Key, URef, U512}; - -use crate::{ - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::trie::merkle_proof::TrieMerkleProof, -}; - -#[derive(Debug)] -pub enum BalanceResult { - RootNotFound, - Success { - motes: U512, - proof: Box>, - }, -} - -impl BalanceResult { - pub fn motes(&self) -> Option<&U512> { - match self { - BalanceResult::Success { motes, .. } => Some(motes), - _ => None, - } - } - - pub fn proof(self) -> Option> { - match self { - BalanceResult::Success { proof, .. } => Some(*proof), - _ => None, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct BalanceRequest { - state_hash: Blake2bHash, - purse_uref: URef, -} - -impl BalanceRequest { - pub fn new(state_hash: Blake2bHash, purse_uref: URef) -> Self { - BalanceRequest { - state_hash, - purse_uref, - } - } - - pub fn state_hash(&self) -> Blake2bHash { - self.state_hash - } - - pub fn purse_uref(&self) -> URef { - self.purse_uref - } -} diff --git a/execution_engine/src/core/engine_state/deploy_item.rs b/execution_engine/src/core/engine_state/deploy_item.rs deleted file mode 100644 index 79bde553b2..0000000000 --- a/execution_engine/src/core/engine_state/deploy_item.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::collections::BTreeSet; - -use casper_types::{account::AccountHash, DeployHash}; - -use crate::core::engine_state::executable_deploy_item::ExecutableDeployItem; - -type GasPrice = u64; - -/// Represents a deploy to be executed. Corresponds to the similarly-named ipc protobuf message. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct DeployItem { - pub address: AccountHash, - pub session: ExecutableDeployItem, - pub payment: ExecutableDeployItem, - pub gas_price: GasPrice, - pub authorization_keys: BTreeSet, - pub deploy_hash: DeployHash, -} - -impl DeployItem { - /// Creates a [`DeployItem`]. - pub fn new( - address: AccountHash, - session: ExecutableDeployItem, - payment: ExecutableDeployItem, - gas_price: GasPrice, - authorization_keys: BTreeSet, - deploy_hash: DeployHash, - ) -> Self { - DeployItem { - address, - session, - payment, - gas_price, - authorization_keys, - deploy_hash, - } - } -} diff --git a/execution_engine/src/core/engine_state/engine_config.rs b/execution_engine/src/core/engine_state/engine_config.rs deleted file mode 100644 index 8ce12dfa95..0000000000 --- a/execution_engine/src/core/engine_state/engine_config.rs +++ /dev/null @@ -1,12 +0,0 @@ -/// The runtime configuration of the execution engine -#[derive(Debug, Copy, Clone, Default)] -pub struct EngineConfig { - // feature flags go here -} - -impl EngineConfig { - /// Creates a new engine configuration with default parameters. - pub fn new() -> EngineConfig { - Default::default() - } -} diff --git a/execution_engine/src/core/engine_state/era_validators.rs b/execution_engine/src/core/engine_state/era_validators.rs deleted file mode 100644 index a3184ef3c8..0000000000 --- a/execution_engine/src/core/engine_state/era_validators.rs +++ /dev/null @@ -1,49 +0,0 @@ -use thiserror::Error; - -use datasize::DataSize; - -use casper_types::ProtocolVersion; - -use crate::{core::engine_state::error::Error, shared::newtypes::Blake2bHash}; - -#[derive(Debug, Error, DataSize)] -pub enum GetEraValidatorsError { - /// Invalid state hash was used to make this request - #[error("Invalid state hash")] - RootNotFound, - /// Engine state error - #[error(transparent)] - Other(#[from] Error), - /// EraValidators missing - #[error("Era validators missing")] - EraValidatorsMissing, -} - -impl GetEraValidatorsError { - pub fn is_era_validators_missing(&self) -> bool { - matches!(self, GetEraValidatorsError::EraValidatorsMissing) - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct GetEraValidatorsRequest { - state_hash: Blake2bHash, - protocol_version: ProtocolVersion, -} - -impl GetEraValidatorsRequest { - pub fn new(state_hash: Blake2bHash, protocol_version: ProtocolVersion) -> Self { - GetEraValidatorsRequest { - state_hash, - protocol_version, - } - } - - pub fn state_hash(&self) -> Blake2bHash { - self.state_hash - } - - pub fn protocol_version(&self) -> ProtocolVersion { - self.protocol_version - } -} diff --git a/execution_engine/src/core/engine_state/error.rs b/execution_engine/src/core/engine_state/error.rs deleted file mode 100644 index 441cff1c48..0000000000 --- a/execution_engine/src/core/engine_state/error.rs +++ /dev/null @@ -1,98 +0,0 @@ -use datasize::DataSize; -use thiserror::Error; - -use casper_types::{bytesrepr, system::mint, ProtocolVersion}; - -use crate::{ - core::{ - engine_state::{genesis::GenesisError, upgrade::ProtocolUpgradeError}, - execution, - }, - shared::{newtypes::Blake2bHash, wasm_prep}, - storage, -}; - -#[derive(Clone, Error, Debug)] -pub enum Error { - #[error("Root not found: {0}")] - RootNotFound(Blake2bHash), - #[error("Invalid hash length: expected {expected}, actual {actual}")] - InvalidHashLength { expected: usize, actual: usize }, - #[error("Invalid account hash length: expected {expected}, actual {actual}")] - InvalidAccountHashLength { expected: usize, actual: usize }, - #[error("Invalid protocol version: {0}")] - InvalidProtocolVersion(ProtocolVersion), - #[error("{0:?}")] - Genesis(Box), - #[error("Wasm preprocessing error: {0}")] - WasmPreprocessing(#[from] wasm_prep::PreprocessingError), - #[error("Wasm serialization error: {0:?}")] - WasmSerialization(#[from] parity_wasm::SerializationError), - #[error(transparent)] - Exec(execution::Error), - #[error("Storage error: {0}")] - Storage(#[from] storage::error::Error), - #[error("Authorization failure: not authorized.")] - Authorization, - #[error("Insufficient payment")] - InsufficientPayment, - #[error("Gas conversion overflow")] - GasConversionOverflow, - #[error("Deploy error")] - Deploy, - #[error("Payment finalization error")] - Finalization, - #[error("Missing system contract association: {0}")] - MissingSystemContract(String), - #[error("Bytesrepr error: {0}")] - Bytesrepr(String), - #[error("Mint error: {0}")] - Mint(String), - #[error("Unsupported key type")] - InvalidKeyVariant, - #[error("Protocol upgrade error: {0}")] - ProtocolUpgrade(ProtocolUpgradeError), - #[error("Unsupported deploy item variant: {0}")] - InvalidDeployItemVariant(String), -} - -impl From for Error { - fn from(error: execution::Error) -> Self { - match error { - execution::Error::WasmPreprocessing(preprocessing_error) => { - Error::WasmPreprocessing(preprocessing_error) - } - _ => Error::Exec(error), - } - } -} - -impl From for Error { - fn from(error: bytesrepr::Error) -> Self { - Error::Bytesrepr(format!("{}", error)) - } -} - -impl From for Error { - fn from(error: mint::Error) -> Self { - Error::Mint(format!("{}", error)) - } -} - -impl From for Error { - fn from(genesis_error: GenesisError) -> Self { - Self::Genesis(Box::new(genesis_error)) - } -} - -impl DataSize for Error { - const IS_DYNAMIC: bool = true; - - const STATIC_HEAP_SIZE: usize = 0; - - // TODO - #[inline] - fn estimate_heap_size(&self) -> usize { - 12 // TODO: replace with some actual estimation depending on the variant - } -} diff --git a/execution_engine/src/core/engine_state/executable_deploy_item.rs b/execution_engine/src/core/engine_state/executable_deploy_item.rs deleted file mode 100644 index 752c6aef2b..0000000000 --- a/execution_engine/src/core/engine_state/executable_deploy_item.rs +++ /dev/null @@ -1,725 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::{ - cell::RefCell, - fmt::{self, Debug, Display, Formatter}, - rc::Rc, -}; - -use datasize::DataSize; -use hex_buffer_serde::{Hex, HexForm}; -use hex_fmt::HexFmt; -use parity_wasm::elements::Module; -use rand::{ - distributions::{Alphanumeric, Distribution, Standard}, - Rng, -}; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - contracts::{ContractVersion, DEFAULT_ENTRY_POINT_NAME}, - system::mint::ARG_AMOUNT, - CLValue, Contract, ContractHash, ContractPackage, ContractPackageHash, ContractVersionKey, - EntryPoint, EntryPointType, Key, Phase, ProtocolVersion, RuntimeArgs, U512, -}; - -use super::error; -use crate::{ - core::{ - engine_state::{Error, ExecError, MAX_PAYMENT_AMOUNT}, - execution, - tracking_copy::{TrackingCopy, TrackingCopyExt}, - }, - shared::{ - account::Account, newtypes::CorrelationId, stored_value::StoredValue, wasm_prep, - wasm_prep::Preprocessor, - }, - storage::{global_state::StateReader, protocol_data::ProtocolData}, -}; - -const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; -const MODULE_BYTES_TAG: u8 = 0; -const STORED_CONTRACT_BY_HASH_TAG: u8 = 1; -const STORED_CONTRACT_BY_NAME_TAG: u8 = 2; -const STORED_VERSIONED_CONTRACT_BY_HASH_TAG: u8 = 3; -const STORED_VERSIONED_CONTRACT_BY_NAME_TAG: u8 = 4; -const TRANSFER_TAG: u8 = 5; - -#[derive( - Clone, DataSize, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, -)] -#[serde(deny_unknown_fields)] -pub enum ExecutableDeployItem { - ModuleBytes { - #[serde(with = "HexForm")] - #[schemars(with = "String", description = "Hex-encoded raw Wasm bytes.")] - module_bytes: Bytes, - // assumes implicit `call` noarg entrypoint - args: RuntimeArgs, - }, - StoredContractByHash { - #[serde(with = "HexForm")] - #[schemars(with = "String", description = "Hex-encoded hash.")] - hash: ContractHash, - entry_point: String, - args: RuntimeArgs, - }, - StoredContractByName { - name: String, - entry_point: String, - args: RuntimeArgs, - }, - StoredVersionedContractByHash { - #[serde(with = "HexForm")] - #[schemars(with = "String", description = "Hex-encoded hash.")] - hash: ContractPackageHash, - version: Option, // defaults to highest enabled version - entry_point: String, - args: RuntimeArgs, - }, - StoredVersionedContractByName { - name: String, - version: Option, // defaults to highest enabled version - entry_point: String, - args: RuntimeArgs, - }, - Transfer { - args: RuntimeArgs, - }, -} - -impl ExecutableDeployItem { - pub(crate) fn to_contract_hash_key(&self, account: &Account) -> Result, Error> { - match self { - ExecutableDeployItem::StoredContractByHash { hash, .. } => { - Ok(Some(Key::from(hash.value()))) - } - ExecutableDeployItem::StoredVersionedContractByHash { hash, .. } => { - Ok(Some(Key::from(hash.value()))) - } - ExecutableDeployItem::StoredContractByName { name, .. } - | ExecutableDeployItem::StoredVersionedContractByName { name, .. } => { - let key = account.named_keys().get(name).cloned().ok_or_else(|| { - error::Error::Exec(execution::Error::NamedKeyNotFound(name.to_string())) - })?; - Ok(Some(key)) - } - ExecutableDeployItem::ModuleBytes { .. } | ExecutableDeployItem::Transfer { .. } => { - Ok(None) - } - } - } - - pub fn entry_point_name(&self) -> &str { - match self { - ExecutableDeployItem::ModuleBytes { .. } | ExecutableDeployItem::Transfer { .. } => { - DEFAULT_ENTRY_POINT_NAME - } - ExecutableDeployItem::StoredVersionedContractByName { entry_point, .. } - | ExecutableDeployItem::StoredVersionedContractByHash { entry_point, .. } - | ExecutableDeployItem::StoredContractByHash { entry_point, .. } - | ExecutableDeployItem::StoredContractByName { entry_point, .. } => &entry_point, - } - } - - pub fn args(&self) -> &RuntimeArgs { - match self { - ExecutableDeployItem::ModuleBytes { args, .. } - | ExecutableDeployItem::StoredContractByHash { args, .. } - | ExecutableDeployItem::StoredContractByName { args, .. } - | ExecutableDeployItem::StoredVersionedContractByHash { args, .. } - | ExecutableDeployItem::StoredVersionedContractByName { args, .. } - | ExecutableDeployItem::Transfer { args } => args, - } - } - - pub fn is_transfer(&self) -> bool { - matches!(self, ExecutableDeployItem::Transfer { .. }) - } - - #[allow(clippy::too_many_arguments)] - pub fn get_deploy_metadata( - &self, - tracking_copy: Rc>>, - account: &Account, - correlation_id: CorrelationId, - preprocessor: &Preprocessor, - protocol_version: &ProtocolVersion, - protocol_data: &ProtocolData, - phase: Phase, - ) -> Result - where - R: StateReader, - R::Error: Into, - { - let (contract_package, contract, contract_hash, base_key) = match self { - ExecutableDeployItem::ModuleBytes { module_bytes, .. } => { - let is_payment = matches!(phase, Phase::Payment); - if is_payment && module_bytes.is_empty() { - return Ok(DeployMetadata::System { - base_key: account.account_hash().into(), - contract: Contract::default(), - contract_package: ContractPackage::default(), - entry_point: EntryPoint::default(), - }); - } - - let module = preprocessor.preprocess(&module_bytes.as_ref())?; - return Ok(DeployMetadata::Session { - module, - contract_package: ContractPackage::default(), - entry_point: EntryPoint::default(), - }); - } - ExecutableDeployItem::StoredContractByHash { .. } - | ExecutableDeployItem::StoredContractByName { .. } => { - // NOTE: `to_contract_hash_key` ensures it returns valid value only for - // ByHash/ByName variants - let stored_contract_key = self.to_contract_hash_key(&account)?.unwrap(); - - let contract_hash = stored_contract_key - .into_hash() - .ok_or(Error::InvalidKeyVariant)?; - - let contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, contract_hash.into())?; - - if !contract.is_compatible_protocol_version(*protocol_version) { - let exec_error = execution::Error::IncompatibleProtocolMajorVersion { - expected: protocol_version.value().major, - actual: contract.protocol_version().value().major, - }; - return Err(error::Error::Exec(exec_error)); - } - - let contract_package = tracking_copy - .borrow_mut() - .get_contract_package(correlation_id, contract.contract_package_hash())?; - - ( - contract_package, - contract, - contract_hash, - stored_contract_key, - ) - } - ExecutableDeployItem::StoredVersionedContractByName { version, .. } - | ExecutableDeployItem::StoredVersionedContractByHash { version, .. } => { - // NOTE: `to_contract_hash_key` ensures it returns valid value only for - // ByHash/ByName variants - let contract_package_key = self.to_contract_hash_key(&account)?.unwrap(); - let contract_package_hash = contract_package_key - .into_hash() - .ok_or(Error::InvalidKeyVariant)?; - - let contract_package = tracking_copy - .borrow_mut() - .get_contract_package(correlation_id, contract_package_hash.into())?; - - let maybe_version_key = - version.map(|ver| ContractVersionKey::new(protocol_version.value().major, ver)); - - let contract_version_key = maybe_version_key - .or_else(|| contract_package.current_contract_version()) - .ok_or_else(|| { - error::Error::Exec(execution::Error::NoActiveContractVersions( - contract_package_hash.into(), - )) - })?; - - if !contract_package.is_version_enabled(contract_version_key) { - return Err(error::Error::Exec( - execution::Error::InvalidContractVersion(contract_version_key), - )); - } - - let contract_hash = *contract_package - .lookup_contract_hash(contract_version_key) - .ok_or(error::Error::Exec( - execution::Error::InvalidContractVersion(contract_version_key), - ))?; - - let contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, contract_hash)?; - - ( - contract_package, - contract, - contract_hash.value(), - contract_package_key, - ) - } - ExecutableDeployItem::Transfer { .. } => { - return Err(error::Error::InvalidDeployItemVariant(String::from( - "Transfer", - ))) - } - }; - - let entry_point_name = self.entry_point_name(); - - let entry_point = contract - .entry_point(entry_point_name) - .cloned() - .ok_or_else(|| { - error::Error::Exec(execution::Error::NoSuchMethod(entry_point_name.to_owned())) - })?; - - if protocol_data - .system_contracts() - .contains(&contract_hash.into()) - { - return Ok(DeployMetadata::System { - base_key, - contract, - contract_package, - entry_point, - }); - } - - let contract_wasm = tracking_copy - .borrow_mut() - .get_contract_wasm(correlation_id, contract.contract_wasm_hash())?; - - let module = wasm_prep::deserialize(contract_wasm.bytes())?; - - match entry_point.entry_point_type() { - EntryPointType::Session => Ok(DeployMetadata::Session { - module, - contract_package, - entry_point, - }), - EntryPointType::Contract => Ok(DeployMetadata::Contract { - module, - base_key, - contract, - contract_package, - entry_point, - }), - } - } -} - -impl ToBytes for ExecutableDeployItem { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - ExecutableDeployItem::ModuleBytes { module_bytes, args } => { - buffer.insert(0, MODULE_BYTES_TAG); - buffer.extend(module_bytes.to_bytes()?); - buffer.extend(args.to_bytes()?); - } - ExecutableDeployItem::StoredContractByHash { - hash, - entry_point, - args, - } => { - buffer.insert(0, STORED_CONTRACT_BY_HASH_TAG); - buffer.extend(hash.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?) - } - ExecutableDeployItem::StoredContractByName { - name, - entry_point, - args, - } => { - buffer.insert(0, STORED_CONTRACT_BY_NAME_TAG); - buffer.extend(name.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?) - } - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version, - entry_point, - args, - } => { - buffer.insert(0, STORED_VERSIONED_CONTRACT_BY_HASH_TAG); - buffer.extend(hash.to_bytes()?); - buffer.extend(version.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?) - } - ExecutableDeployItem::StoredVersionedContractByName { - name, - version, - entry_point, - args, - } => { - buffer.insert(0, STORED_VERSIONED_CONTRACT_BY_NAME_TAG); - buffer.extend(name.to_bytes()?); - buffer.extend(version.to_bytes()?); - buffer.extend(entry_point.to_bytes()?); - buffer.extend(args.to_bytes()?) - } - ExecutableDeployItem::Transfer { args } => { - buffer.insert(0, TRANSFER_TAG); - buffer.extend(args.to_bytes()?) - } - } - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - TAG_LENGTH - + match self { - ExecutableDeployItem::ModuleBytes { module_bytes, args } => { - module_bytes.serialized_length() + args.serialized_length() - } - ExecutableDeployItem::StoredContractByHash { - hash, - entry_point, - args, - } => { - hash.serialized_length() - + entry_point.serialized_length() - + args.serialized_length() - } - ExecutableDeployItem::StoredContractByName { - name, - entry_point, - args, - } => { - name.serialized_length() - + entry_point.serialized_length() - + args.serialized_length() - } - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version, - entry_point, - args, - } => { - hash.serialized_length() - + version.serialized_length() - + entry_point.serialized_length() - + args.serialized_length() - } - ExecutableDeployItem::StoredVersionedContractByName { - name, - version, - entry_point, - args, - } => { - name.serialized_length() - + version.serialized_length() - + entry_point.serialized_length() - + args.serialized_length() - } - ExecutableDeployItem::Transfer { args } => args.serialized_length(), - } - } -} - -impl FromBytes for ExecutableDeployItem { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; - match tag { - MODULE_BYTES_TAG => { - let (module_bytes, remainder) = FromBytes::from_bytes(remainder)?; - let (args, remainder) = FromBytes::from_bytes(remainder)?; - Ok(( - ExecutableDeployItem::ModuleBytes { module_bytes, args }, - remainder, - )) - } - STORED_CONTRACT_BY_HASH_TAG => { - let (hash, remainder) = FromBytes::from_bytes(remainder)?; - let (entry_point, remainder) = String::from_bytes(remainder)?; - let (args, remainder) = FromBytes::from_bytes(remainder)?; - Ok(( - ExecutableDeployItem::StoredContractByHash { - hash, - entry_point, - args, - }, - remainder, - )) - } - STORED_CONTRACT_BY_NAME_TAG => { - let (name, remainder) = String::from_bytes(remainder)?; - let (entry_point, remainder) = String::from_bytes(remainder)?; - let (args, remainder) = FromBytes::from_bytes(remainder)?; - Ok(( - ExecutableDeployItem::StoredContractByName { - name, - entry_point, - args, - }, - remainder, - )) - } - STORED_VERSIONED_CONTRACT_BY_HASH_TAG => { - let (hash, remainder) = FromBytes::from_bytes(remainder)?; - let (version, remainder) = Option::::from_bytes(remainder)?; - let (entry_point, remainder) = String::from_bytes(remainder)?; - let (args, remainder) = FromBytes::from_bytes(remainder)?; - Ok(( - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version, - entry_point, - args, - }, - remainder, - )) - } - STORED_VERSIONED_CONTRACT_BY_NAME_TAG => { - let (name, remainder) = String::from_bytes(remainder)?; - let (version, remainder) = Option::::from_bytes(remainder)?; - let (entry_point, remainder) = String::from_bytes(remainder)?; - let (args, remainder) = FromBytes::from_bytes(remainder)?; - Ok(( - ExecutableDeployItem::StoredVersionedContractByName { - name, - version, - entry_point, - args, - }, - remainder, - )) - } - TRANSFER_TAG => { - let (args, remainder) = FromBytes::from_bytes(remainder)?; - Ok((ExecutableDeployItem::Transfer { args }, remainder)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -impl Display for ExecutableDeployItem { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - ExecutableDeployItem::ModuleBytes { module_bytes, .. } => { - write!(f, "module-bytes [{} bytes]", module_bytes.len()) - } - ExecutableDeployItem::StoredContractByHash { - hash, entry_point, .. - } => write!( - f, - "stored-contract-by-hash: {:10}, entry-point: {}", - HexFmt(hash), - entry_point, - ), - ExecutableDeployItem::StoredContractByName { - name, entry_point, .. - } => write!( - f, - "stored-contract-by-name: {}, entry-point: {}", - name, entry_point, - ), - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version: Some(ver), - entry_point, - .. - } => write!( - f, - "stored-versioned-contract-by-hash: {:10}, version: {}, entry-point: {}", - HexFmt(hash), - ver, - entry_point, - ), - ExecutableDeployItem::StoredVersionedContractByHash { - hash, entry_point, .. - } => write!( - f, - "stored-versioned-contract-by-hash: {:10}, version: latest, entry-point: {}", - HexFmt(hash), - entry_point, - ), - ExecutableDeployItem::StoredVersionedContractByName { - name, - version: Some(ver), - entry_point, - .. - } => write!( - f, - "stored-versioned-contract: {}, version: {}, entry-point: {}", - name, ver, entry_point, - ), - ExecutableDeployItem::StoredVersionedContractByName { - name, entry_point, .. - } => write!( - f, - "stored-versioned-contract: {}, version: latest, entry-point: {}", - name, entry_point, - ), - ExecutableDeployItem::Transfer { .. } => write!(f, "transfer"), - } - } -} - -impl Debug for ExecutableDeployItem { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - ExecutableDeployItem::ModuleBytes { module_bytes, args } => f - .debug_struct("ModuleBytes") - .field("module_bytes", &format!("[{} bytes]", module_bytes.len())) - .field("args", args) - .finish(), - ExecutableDeployItem::StoredContractByHash { - hash, - entry_point, - args, - } => f - .debug_struct("StoredContractByHash") - .field("hash", &HexFmt(hash)) - .field("entry_point", &entry_point) - .field("args", args) - .finish(), - ExecutableDeployItem::StoredContractByName { - name, - entry_point, - args, - } => f - .debug_struct("StoredContractByName") - .field("name", &name) - .field("entry_point", &entry_point) - .field("args", args) - .finish(), - ExecutableDeployItem::StoredVersionedContractByHash { - hash, - version, - entry_point, - args, - } => f - .debug_struct("StoredVersionedContractByHash") - .field("hash", &HexFmt(hash)) - .field("version", version) - .field("entry_point", &entry_point) - .field("args", args) - .finish(), - ExecutableDeployItem::StoredVersionedContractByName { - name, - version, - entry_point, - args, - } => f - .debug_struct("StoredVersionedContractByName") - .field("name", &name) - .field("version", version) - .field("entry_point", &entry_point) - .field("args", args) - .finish(), - ExecutableDeployItem::Transfer { args } => { - f.debug_struct("Transfer").field("args", args).finish() - } - } - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> ExecutableDeployItem { - fn random_bytes(rng: &mut R) -> Vec { - let mut bytes = vec![0u8; rng.gen_range(0..100)]; - rng.fill_bytes(bytes.as_mut()); - bytes - } - - fn random_string(rng: &mut R) -> String { - rng.sample_iter(&Alphanumeric) - .take(20) - .map(char::from) - .collect() - } - - let mut args = RuntimeArgs::new(); - let _ = args.insert(random_string(rng), Bytes::from(random_bytes(rng))); - - match rng.gen_range(0..5) { - 0 => ExecutableDeployItem::ModuleBytes { - module_bytes: random_bytes(rng).into(), - args, - }, - 1 => ExecutableDeployItem::StoredContractByHash { - hash: ContractHash::new(rng.gen()), - entry_point: random_string(rng), - args, - }, - 2 => ExecutableDeployItem::StoredContractByName { - name: random_string(rng), - entry_point: random_string(rng), - args, - }, - 3 => ExecutableDeployItem::StoredVersionedContractByHash { - hash: ContractPackageHash::new(rng.gen()), - version: rng.gen(), - entry_point: random_string(rng), - args, - }, - 4 => ExecutableDeployItem::StoredVersionedContractByName { - name: random_string(rng), - version: rng.gen(), - entry_point: random_string(rng), - args, - }, - 5 => { - let amount = rng.gen_range(MAX_PAYMENT_AMOUNT..1_000_000_000_000_000); - let mut transfer_args = RuntimeArgs::new(); - transfer_args.insert_cl_value( - ARG_AMOUNT, - CLValue::from_t(U512::from(amount)).expect("should get CLValue from U512"), - ); - ExecutableDeployItem::Transfer { - args: transfer_args, - } - } - _ => unreachable!(), - } - } -} - -#[derive(Clone, Debug)] -pub enum DeployMetadata { - Session { - module: Module, - contract_package: ContractPackage, - entry_point: EntryPoint, - }, - Contract { - // Contract hash - base_key: Key, - module: Module, - contract: Contract, - contract_package: ContractPackage, - entry_point: EntryPoint, - }, - System { - base_key: Key, - contract: Contract, - contract_package: ContractPackage, - entry_point: EntryPoint, - }, -} - -impl DeployMetadata { - pub fn take_module(self) -> Option { - match self { - DeployMetadata::System { .. } => None, - DeployMetadata::Session { module, .. } => Some(module), - DeployMetadata::Contract { module, .. } => Some(module), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serialization_roundtrip() { - let mut rng = rand::thread_rng(); - for _ in 0..10 { - let executable_deploy_item: ExecutableDeployItem = rng.gen(); - bytesrepr::test_serialization_roundtrip(&executable_deploy_item); - } - } -} diff --git a/execution_engine/src/core/engine_state/execute_request.rs b/execution_engine/src/core/engine_state/execute_request.rs deleted file mode 100644 index ab9c22e7df..0000000000 --- a/execution_engine/src/core/engine_state/execute_request.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::mem; - -use casper_types::{ProtocolVersion, PublicKey, SecretKey}; - -use super::deploy_item::DeployItem; -use crate::shared::newtypes::Blake2bHash; - -#[derive(Debug)] -pub struct ExecuteRequest { - pub parent_state_hash: Blake2bHash, - pub block_time: u64, - pub deploys: Vec, - pub protocol_version: ProtocolVersion, - pub proposer: PublicKey, -} - -impl ExecuteRequest { - pub fn new( - parent_state_hash: Blake2bHash, - block_time: u64, - deploys: Vec, - protocol_version: ProtocolVersion, - proposer: PublicKey, - ) -> Self { - Self { - parent_state_hash, - block_time, - deploys, - protocol_version, - proposer, - } - } - - pub fn take_deploys(&mut self) -> Vec { - mem::replace(&mut self.deploys, vec![]) - } - - pub fn deploys(&self) -> &Vec { - &self.deploys - } -} - -impl Default for ExecuteRequest { - fn default() -> Self { - let proposer = SecretKey::ed25519_from_bytes([0; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - Self { - parent_state_hash: Blake2bHash::new(&[]), - block_time: 0, - deploys: vec![], - protocol_version: Default::default(), - proposer, - } - } -} diff --git a/execution_engine/src/core/engine_state/execution_effect.rs b/execution_engine/src/core/engine_state/execution_effect.rs deleted file mode 100644 index 6a83c847b0..0000000000 --- a/execution_engine/src/core/engine_state/execution_effect.rs +++ /dev/null @@ -1,39 +0,0 @@ -use casper_types::Key; - -use super::op::Op; -use crate::shared::{additive_map::AdditiveMap, transform::Transform}; - -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ExecutionEffect { - pub ops: AdditiveMap, - pub transforms: AdditiveMap, -} - -impl ExecutionEffect { - pub fn new(ops: AdditiveMap, transforms: AdditiveMap) -> Self { - ExecutionEffect { ops, transforms } - } -} - -impl From<&ExecutionEffect> for casper_types::ExecutionEffect { - fn from(effect: &ExecutionEffect) -> Self { - casper_types::ExecutionEffect { - operations: effect - .ops - .iter() - .map(|(key, op)| casper_types::Operation { - key: key.to_formatted_string(), - kind: op.into(), - }) - .collect(), - transforms: effect - .transforms - .iter() - .map(|(key, transform)| casper_types::TransformEntry { - key: key.to_formatted_string(), - transform: transform.into(), - }) - .collect(), - } - } -} diff --git a/execution_engine/src/core/engine_state/execution_result.rs b/execution_engine/src/core/engine_state/execution_result.rs deleted file mode 100644 index c2cc017eed..0000000000 --- a/execution_engine/src/core/engine_state/execution_result.rs +++ /dev/null @@ -1,490 +0,0 @@ -use std::collections::VecDeque; - -use casper_types::{bytesrepr::FromBytes, CLTyped, CLValue, CLValueError, Key, TransferAddr}; - -use super::{error, execution_effect::ExecutionEffect, op::Op}; -use crate::{ - shared::{ - additive_map::AdditiveMap, gas::Gas, motes::Motes, newtypes::CorrelationId, - stored_value::StoredValue, transform::Transform, - }, - storage::global_state::StateReader, -}; - -fn make_payment_error_effects( - max_payment_cost: Motes, - account_main_purse_balance: Motes, - account_main_purse_balance_key: Key, - proposer_main_purse_balance_key: Key, -) -> Result { - let mut ops = AdditiveMap::new(); - let mut transforms = AdditiveMap::new(); - - let new_balance = account_main_purse_balance - max_payment_cost; - // from_t for U512 is assumed to never panic - let new_balance_clvalue = CLValue::from_t(new_balance.value())?; - let new_balance_value = StoredValue::CLValue(new_balance_clvalue); - - let account_main_purse_balance_normalize = account_main_purse_balance_key.normalize(); - let proposer_main_purse_balance_normalize = proposer_main_purse_balance_key.normalize(); - - ops.insert(account_main_purse_balance_normalize, Op::Write); - transforms.insert( - account_main_purse_balance_normalize, - Transform::Write(new_balance_value), - ); - - ops.insert(proposer_main_purse_balance_normalize, Op::Add); - transforms.insert( - proposer_main_purse_balance_normalize, - Transform::AddUInt512(max_payment_cost.value()), - ); - - Ok(ExecutionEffect::new(ops, transforms)) -} - -#[derive(Clone, Debug)] -pub enum ExecutionResult { - /// An error condition that happened during execution - Failure { - error: error::Error, - effect: ExecutionEffect, - transfers: Vec, - cost: Gas, - }, - /// Execution was finished successfully - Success { - effect: ExecutionEffect, - transfers: Vec, - cost: Gas, - }, -} - -impl Default for ExecutionResult { - fn default() -> Self { - ExecutionResult::Success { - effect: ExecutionEffect::default(), - transfers: Vec::default(), - cost: Gas::default(), - } - } -} - -/// A type alias that represents multiple execution results. -pub type ExecutionResults = VecDeque; - -pub enum ForcedTransferResult { - /// Payment code ran out of gas during execution - InsufficientPayment, - /// Gas conversion overflow - GasConversionOverflow, - /// Payment code execution resulted in an error - PaymentFailure, -} - -impl ExecutionResult { - /// Constructs [ExecutionResult::Failure] that has 0 cost and no effects. - /// This is the case for failures that we can't (or don't want to) charge - /// for, like `PreprocessingError` or `InvalidNonce`. - pub fn precondition_failure(error: error::Error) -> ExecutionResult { - ExecutionResult::Failure { - error, - effect: Default::default(), - transfers: Vec::default(), - cost: Gas::default(), - } - } - - pub fn is_success(&self) -> bool { - match self { - ExecutionResult::Failure { .. } => false, - ExecutionResult::Success { .. } => true, - } - } - - pub fn is_failure(&self) -> bool { - match self { - ExecutionResult::Failure { .. } => true, - ExecutionResult::Success { .. } => false, - } - } - - pub fn has_precondition_failure(&self) -> bool { - match self { - ExecutionResult::Failure { cost, effect, .. } => { - cost.value() == 0.into() && *effect == Default::default() - } - ExecutionResult::Success { .. } => false, - } - } - - pub fn cost(&self) -> Gas { - match self { - ExecutionResult::Failure { cost, .. } => *cost, - ExecutionResult::Success { cost, .. } => *cost, - } - } - - pub fn effect(&self) -> &ExecutionEffect { - match self { - ExecutionResult::Failure { effect, .. } => effect, - ExecutionResult::Success { effect, .. } => effect, - } - } - - pub fn transfers(&self) -> &Vec { - match self { - ExecutionResult::Failure { transfers, .. } => transfers, - ExecutionResult::Success { transfers, .. } => transfers, - } - } - - pub fn with_cost(self, cost: Gas) -> Self { - match self { - ExecutionResult::Failure { - error, - effect, - transfers, - .. - } => ExecutionResult::Failure { - error, - effect, - transfers, - cost, - }, - ExecutionResult::Success { - effect, transfers, .. - } => ExecutionResult::Success { - effect, - transfers, - cost, - }, - } - } - - pub fn with_effect(self, effect: ExecutionEffect) -> Self { - match self { - ExecutionResult::Failure { - error, - cost, - transfers, - .. - } => ExecutionResult::Failure { - error, - effect, - transfers, - cost, - }, - ExecutionResult::Success { - cost, transfers, .. - } => ExecutionResult::Success { - effect, - transfers, - cost, - }, - } - } - - pub fn with_transfers(self, transfers: Vec) -> Self { - match self { - ExecutionResult::Failure { - error, - effect, - cost, - .. - } => ExecutionResult::Failure { - error, - effect, - transfers, - cost, - }, - ExecutionResult::Success { cost, effect, .. } => ExecutionResult::Success { - effect, - transfers, - cost, - }, - } - } - - pub fn as_error(&self) -> Option<&error::Error> { - match self { - ExecutionResult::Failure { error, .. } => Some(error), - ExecutionResult::Success { .. } => None, - } - } - - /// Consumes [`ExecutionResult`] instance and optionally returns [`error::Error`] instance for - /// [`ExecutionResult::Failure`] variant. - pub fn take_error(self) -> Option { - match self { - ExecutionResult::Failure { error, .. } => Some(error), - ExecutionResult::Success { .. } => None, - } - } - - pub fn check_forced_transfer( - &self, - payment_purse_balance: Motes, - gas_price: u64, - ) -> Option { - let payment_result_cost = match Motes::from_gas(self.cost(), gas_price) { - Some(cost) => cost, - None => return Some(ForcedTransferResult::GasConversionOverflow), - }; - // payment_code_spec_3_b_ii: if (balance of handle payment pay purse) < (gas spent during - // payment code execution) * gas_price, no session - let insufficient_balance_to_continue = payment_purse_balance < payment_result_cost; - - match self { - ExecutionResult::Success { .. } if insufficient_balance_to_continue => { - // payment_code_spec_4: insufficient payment - Some(ForcedTransferResult::InsufficientPayment) - } - ExecutionResult::Success { .. } => { - // payment_code_spec_3_b_ii: continue execution - None - } - ExecutionResult::Failure { .. } => { - // payment_code_spec_3_a: report payment error in the deploy response - Some(ForcedTransferResult::PaymentFailure) - } - } - } - - pub fn new_payment_code_error( - error: error::Error, - max_payment_cost: Motes, - account_main_purse_balance: Motes, - gas_cost: Gas, - account_main_purse_balance_key: Key, - proposer_main_purse_balance_key: Key, - ) -> Result { - let effect = make_payment_error_effects( - max_payment_cost, - account_main_purse_balance, - account_main_purse_balance_key, - proposer_main_purse_balance_key, - )?; - let transfers = Vec::default(); - Ok(ExecutionResult::Failure { - error, - effect, - transfers, - cost: gas_cost, - }) - } - - pub fn take_with_ret(self, ret: T) -> (Option, Self) { - (Some(ret), self) - } - - pub fn take_without_ret(self) -> (Option, Self) { - (None, self) - } -} - -impl From<&ExecutionResult> for casper_types::ExecutionResult { - fn from(ee_execution_result: &ExecutionResult) -> Self { - match ee_execution_result { - ExecutionResult::Success { - effect, - transfers, - cost, - } => casper_types::ExecutionResult::Success { - effect: effect.into(), - transfers: transfers.clone(), - cost: cost.value(), - }, - ExecutionResult::Failure { - error, - effect, - transfers, - cost, - } => casper_types::ExecutionResult::Failure { - effect: effect.into(), - transfers: transfers.clone(), - cost: cost.value(), - error_message: error.to_string(), - }, - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ExecutionResultBuilderError { - MissingPaymentExecutionResult, - MissingSessionExecutionResult, - MissingFinalizeExecutionResult, -} - -pub struct ExecutionResultBuilder { - payment_execution_result: Option, - session_execution_result: Option, - finalize_execution_result: Option, -} - -impl Default for ExecutionResultBuilder { - fn default() -> Self { - ExecutionResultBuilder { - payment_execution_result: None, - session_execution_result: None, - finalize_execution_result: None, - } - } -} - -impl ExecutionResultBuilder { - pub fn new() -> ExecutionResultBuilder { - ExecutionResultBuilder::default() - } - - pub fn set_payment_execution_result(&mut self, payment_result: ExecutionResult) -> &mut Self { - self.payment_execution_result = Some(payment_result); - self - } - - pub fn set_session_execution_result( - &mut self, - session_execution_result: ExecutionResult, - ) -> &mut ExecutionResultBuilder { - self.session_execution_result = Some(session_execution_result); - self - } - - pub fn set_finalize_execution_result( - &mut self, - finalize_execution_result: ExecutionResult, - ) -> &mut ExecutionResultBuilder { - self.finalize_execution_result = Some(finalize_execution_result); - self - } - - pub fn total_cost(&self) -> Gas { - let payment_cost = self - .payment_execution_result - .as_ref() - .map(ExecutionResult::cost) - .unwrap_or_default(); - let session_cost = self - .session_execution_result - .as_ref() - .map(ExecutionResult::cost) - .unwrap_or_default(); - payment_cost + session_cost - } - - pub fn transfers(&self) -> Vec { - self.session_execution_result - .as_ref() - .map(ExecutionResult::transfers) - .cloned() - .unwrap_or_default() - } - - pub fn build>( - self, - reader: &R, - correlation_id: CorrelationId, - ) -> Result { - let transfers = self.transfers(); - let cost = self.total_cost(); - let mut ops = AdditiveMap::new(); - let mut transforms = AdditiveMap::new(); - - let mut ret: ExecutionResult = ExecutionResult::Success { - effect: Default::default(), - transfers, - cost, - }; - - match self.payment_execution_result { - Some(result) => { - if result.is_failure() { - return Ok(result); - } else { - Self::add_effects(&mut ops, &mut transforms, result.effect()); - } - } - None => return Err(ExecutionResultBuilderError::MissingPaymentExecutionResult), - }; - - // session_code_spec_3: only include session exec effects if there is no session - // exec error - match self.session_execution_result { - Some(result) => { - if result.is_failure() { - ret = result.with_cost(cost); - } else { - Self::add_effects(&mut ops, &mut transforms, result.effect()); - } - } - None => return Err(ExecutionResultBuilderError::MissingSessionExecutionResult), - }; - - match self.finalize_execution_result { - Some(result) => { - if result.is_failure() { - // payment_code_spec_5_a: Finalization Error should only ever be raised here - return Ok(ExecutionResult::precondition_failure( - error::Error::Finalization, - )); - } else { - Self::add_effects(&mut ops, &mut transforms, result.effect()); - } - } - None => return Err(ExecutionResultBuilderError::MissingFinalizeExecutionResult), - } - - // Remove redundant writes to allow more opportunity to commute - let reduced_effect = Self::reduce_identity_writes(ops, transforms, reader, correlation_id); - - Ok(ret.with_effect(reduced_effect)) - } - - fn add_effects( - ops: &mut AdditiveMap, - transforms: &mut AdditiveMap, - effect: &ExecutionEffect, - ) { - for (k, op) in effect.ops.iter() { - ops.insert_add(*k, *op); - } - for (k, t) in effect.transforms.iter() { - transforms.insert_add(*k, t.clone()) - } - } - - /// In the case we are writing the same value as was there originally, - /// it is equivalent to having a `Transform::Identity` and `Op::Read`. - /// This function makes that reduction before returning the `ExecutionEffect`. - fn reduce_identity_writes>( - mut ops: AdditiveMap, - mut transforms: AdditiveMap, - reader: &R, - correlation_id: CorrelationId, - ) -> ExecutionEffect { - let kvs: Vec<(Key, StoredValue)> = transforms - .keys() - .filter_map(|k| match transforms.get(k) { - Some(Transform::Write(_)) => reader - .read(correlation_id, k) - .ok() - .and_then(|maybe_v| maybe_v.map(|v| (*k, v))), - _ => None, - }) - .collect(); - - for (k, old_value) in kvs { - if let Some(Transform::Write(new_value)) = transforms.remove(&k) { - if new_value == old_value { - transforms.insert(k, Transform::Identity); - ops.insert(k, Op::Read); - } else { - transforms.insert(k, Transform::Write(new_value)); - } - } - } - - ExecutionEffect::new(ops, transforms) - } -} diff --git a/execution_engine/src/core/engine_state/genesis.rs b/execution_engine/src/core/engine_state/genesis.rs deleted file mode 100644 index 7dddd8f2de..0000000000 --- a/execution_engine/src/core/engine_state/genesis.rs +++ /dev/null @@ -1,1612 +0,0 @@ -use std::{cell::RefCell, collections::BTreeMap, fmt, iter, rc::Rc}; - -use datasize::DataSize; -use num_rational::Ratio; -use num_traits::Zero; -use parity_wasm::elements::Module; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - account::AccountHash, - bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - contracts::{ - ContractPackageStatus, ContractVersions, DisabledVersions, Groups, NamedKeys, Parameters, - }, - runtime_args, - system::{ - auction::{ - Bid, Bids, DelegationRate, Delegator, SeigniorageRecipient, SeigniorageRecipients, - SeigniorageRecipientsSnapshot, ValidatorWeights, ARG_DELEGATION_RATE, ARG_DELEGATOR, - ARG_ERA_END_TIMESTAMP_MILLIS, ARG_PUBLIC_KEY, ARG_REWARD_FACTORS, ARG_VALIDATOR, - ARG_VALIDATOR_PUBLIC_KEY, AUCTION_DELAY_KEY, DELEGATION_RATE_DENOMINATOR, - ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, INITIAL_ERA_END_TIMESTAMP_MILLIS, - INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY, METHOD_ACTIVATE_BID, METHOD_ADD_BID, - METHOD_DELEGATE, METHOD_DISTRIBUTE, METHOD_GET_ERA_VALIDATORS, METHOD_READ_ERA_ID, - METHOD_RUN_AUCTION, METHOD_SLASH, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID, - UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, - }, - handle_payment::{ - self, ARG_ACCOUNT, METHOD_FINALIZE_PAYMENT, METHOD_GET_PAYMENT_PURSE, - METHOD_GET_REFUND_PURSE, METHOD_SET_REFUND_PURSE, - }, - mint::{ - self, ARG_AMOUNT, ARG_ID, ARG_PURSE, ARG_ROUND_SEIGNIORAGE_RATE, ARG_SOURCE, - ARG_TARGET, METHOD_BALANCE, METHOD_CREATE, METHOD_MINT, METHOD_READ_BASE_ROUND_REWARD, - METHOD_REDUCE_TOTAL_SUPPLY, METHOD_TRANSFER, ROUND_SEIGNIORAGE_RATE_KEY, - TOTAL_SUPPLY_KEY, - }, - standard_payment::METHOD_PAY, - SystemContractType, - }, - AccessRights, CLType, CLTyped, CLValue, Contract, ContractHash, ContractPackage, - ContractPackageHash, ContractWasm, ContractWasmHash, DeployHash, EntryPoint, EntryPointAccess, - EntryPointType, EntryPoints, EraId, Key, Parameter, Phase, ProtocolVersion, PublicKey, - RuntimeArgs, SecretKey, URef, U512, -}; - -use crate::{ - core::{ - engine_state::{execution_effect::ExecutionEffect, EngineConfig}, - execution, - execution::{AddressGenerator, Executor}, - tracking_copy::TrackingCopy, - }, - shared::{ - account::Account, - gas::Gas, - motes::Motes, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - system_config::SystemConfig, - wasm_config::WasmConfig, - TypeMismatch, - }, - storage::{ - global_state::{CommitResult, StateProvider}, - protocol_data::ProtocolData, - }, -}; - -pub const PLACEHOLDER_KEY: Key = Key::Hash([0u8; 32]); -const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; - -#[derive(Debug, Serialize)] -pub enum GenesisResult { - RootNotFound, - KeyNotFound(Key), - TypeMismatch(TypeMismatch), - Serialization(bytesrepr::Error), - Success { - post_state_hash: Blake2bHash, - #[serde(skip_serializing)] - effect: ExecutionEffect, - }, -} - -impl fmt::Display for GenesisResult { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match self { - GenesisResult::RootNotFound => write!(f, "Root not found"), - GenesisResult::KeyNotFound(key) => write!(f, "Key not found: {}", key), - GenesisResult::TypeMismatch(type_mismatch) => { - write!(f, "Type mismatch: {:?}", type_mismatch) - } - GenesisResult::Serialization(error) => write!(f, "Serialization error: {:?}", error), - GenesisResult::Success { - post_state_hash, - effect, - } => write!(f, "Success: {} {:?}", post_state_hash, effect), - } - } -} - -impl GenesisResult { - pub fn from_commit_result(commit_result: CommitResult, effect: ExecutionEffect) -> Self { - match commit_result { - CommitResult::RootNotFound => GenesisResult::RootNotFound, - CommitResult::KeyNotFound(key) => GenesisResult::KeyNotFound(key), - CommitResult::TypeMismatch(type_mismatch) => GenesisResult::TypeMismatch(type_mismatch), - CommitResult::Serialization(error) => GenesisResult::Serialization(error), - CommitResult::Success { state_root, .. } => GenesisResult::Success { - post_state_hash: state_root, - effect, - }, - } - } -} - -#[repr(u8)] -enum GenesisAccountTag { - System = 0, - Account = 1, - Delegator = 2, -} - -#[derive(DataSize, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct GenesisValidator { - bonded_amount: Motes, - delegation_rate: DelegationRate, -} - -impl ToBytes for GenesisValidator { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.bonded_amount.to_bytes()?); - buffer.extend(self.delegation_rate.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.bonded_amount.serialized_length() + self.delegation_rate.serialized_length() - } -} - -impl FromBytes for GenesisValidator { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (bonded_amount, remainder) = FromBytes::from_bytes(bytes)?; - let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?; - let genesis_validator = GenesisValidator { - bonded_amount, - delegation_rate, - }; - Ok((genesis_validator, remainder)) - } -} - -impl GenesisValidator { - pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self { - Self { - bonded_amount, - delegation_rate, - } - } - - pub fn bonded_amount(&self) -> Motes { - self.bonded_amount - } - - pub fn delegation_rate(&self) -> DelegationRate { - self.delegation_rate - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> GenesisValidator { - let bonded_amount = Motes::new(rng.gen()); - let delegation_rate = rng.gen(); - - GenesisValidator::new(bonded_amount, delegation_rate) - } -} - -#[derive(DataSize, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum GenesisAccount { - System, - Account { - public_key: PublicKey, - balance: Motes, - validator: Option, - }, - Delegator { - validator_public_key: PublicKey, - delegator_public_key: PublicKey, - balance: Motes, - delegated_amount: Motes, - }, -} - -impl GenesisAccount { - /// Create a system account variant. - pub fn system() -> Self { - Self::System - } - - /// Create a standard account variant. - pub fn account( - public_key: PublicKey, - balance: Motes, - validator: Option, - ) -> Self { - Self::Account { - public_key, - balance, - validator, - } - } - - /// Create a delegator account variant. - pub fn delegator( - validator_public_key: PublicKey, - delegator_public_key: PublicKey, - balance: Motes, - delegated_amount: Motes, - ) -> Self { - Self::Delegator { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - } - } - - /// The public key (if any) associated with the account. - pub fn public_key(&self) -> PublicKey { - match self { - GenesisAccount::System => PublicKey::System, - GenesisAccount::Account { public_key, .. } => public_key.clone(), - GenesisAccount::Delegator { - delegator_public_key, - .. - } => delegator_public_key.clone(), - } - } - - /// The account hash for the account. - pub fn account_hash(&self) -> AccountHash { - match self { - GenesisAccount::System => PublicKey::System.to_account_hash(), - GenesisAccount::Account { public_key, .. } => public_key.to_account_hash(), - GenesisAccount::Delegator { - delegator_public_key, - .. - } => delegator_public_key.to_account_hash(), - } - } - - /// How many motes are to be deposited in the account's main purse. - pub fn balance(&self) -> Motes { - match self { - GenesisAccount::System => Motes::zero(), - GenesisAccount::Account { balance, .. } => *balance, - GenesisAccount::Delegator { balance, .. } => *balance, - } - } - - /// How many motes are to be staked. - /// - /// Staked accounts are either validators with some amount of bonded stake or delgators with - /// some amount of delegated stake. - pub fn staked_amount(&self) -> Motes { - match self { - GenesisAccount::System { .. } - | GenesisAccount::Account { - validator: None, .. - } => Motes::zero(), - GenesisAccount::Account { - validator: Some(genesis_validator), - .. - } => genesis_validator.bonded_amount(), - GenesisAccount::Delegator { - delegated_amount, .. - } => *delegated_amount, - } - } - - /// What is the delegation rate of a validator. - pub fn delegation_rate(&self) -> DelegationRate { - match self { - GenesisAccount::Account { - validator: Some(genesis_validator), - .. - } => genesis_validator.delegation_rate(), - GenesisAccount::System - | GenesisAccount::Account { - validator: None, .. - } - | GenesisAccount::Delegator { .. } => { - // This value represents a delegation rate in invalid state that system is supposed - // to reject if used. - DelegationRate::max_value() - } - } - } - - /// Is this a virtual system account. - pub fn is_system_account(&self) -> bool { - matches!(self, GenesisAccount::System { .. }) - } - - /// Is this a validator account. - pub fn is_validator(&self) -> bool { - match self { - GenesisAccount::Account { - validator: Some(_), .. - } => true, - GenesisAccount::System { .. } - | GenesisAccount::Account { - validator: None, .. - } - | GenesisAccount::Delegator { .. } => false, - } - } - - /// Details about the genesis validator. - pub fn validator(&self) -> Option<&GenesisValidator> { - match self { - GenesisAccount::Account { - validator: Some(genesis_validator), - .. - } => Some(genesis_validator), - _ => None, - } - } - - /// Is this a delegator account. - pub fn is_delegator(&self) -> bool { - matches!(self, GenesisAccount::Delegator { .. }) - } - - /// Details about the genesis delegator. - pub fn as_delegator(&self) -> Option<(&PublicKey, &PublicKey, &Motes, &Motes)> { - match self { - GenesisAccount::Delegator { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - } => Some(( - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - )), - _ => None, - } - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> GenesisAccount { - let mut bytes = [0u8; 32]; - rng.fill_bytes(&mut bytes[..]); - let public_key: PublicKey = SecretKey::ed25519_from_bytes(bytes).unwrap().into(); - let balance = Motes::new(rng.gen()); - let validator = rng.gen(); - - GenesisAccount::account(public_key, balance, validator) - } -} - -impl ToBytes for GenesisAccount { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - GenesisAccount::System => { - buffer.push(GenesisAccountTag::System as u8); - } - GenesisAccount::Account { - public_key, - balance, - validator, - } => { - buffer.push(GenesisAccountTag::Account as u8); - buffer.extend(public_key.to_bytes()?); - buffer.extend(balance.value().to_bytes()?); - buffer.extend(validator.to_bytes()?); - } - GenesisAccount::Delegator { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - } => { - buffer.push(GenesisAccountTag::Delegator as u8); - buffer.extend(validator_public_key.to_bytes()?); - buffer.extend(delegator_public_key.to_bytes()?); - buffer.extend(balance.value().to_bytes()?); - buffer.extend(delegated_amount.value().to_bytes()?); - } - } - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - match self { - GenesisAccount::System => TAG_LENGTH, - GenesisAccount::Account { - public_key, - balance, - validator, - } => { - public_key.serialized_length() - + balance.value().serialized_length() - + validator.serialized_length() - + TAG_LENGTH - } - GenesisAccount::Delegator { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - } => { - validator_public_key.serialized_length() - + delegator_public_key.serialized_length() - + balance.value().serialized_length() - + delegated_amount.value().serialized_length() - + TAG_LENGTH - } - } - } -} - -impl FromBytes for GenesisAccount { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; - match tag { - tag if tag == GenesisAccountTag::System as u8 => { - let genesis_account = GenesisAccount::system(); - Ok((genesis_account, remainder)) - } - tag if tag == GenesisAccountTag::Account as u8 => { - let (public_key, remainder) = FromBytes::from_bytes(remainder)?; - let (balance, remainder) = FromBytes::from_bytes(remainder)?; - let (validator, remainder) = FromBytes::from_bytes(remainder)?; - let genesis_account = GenesisAccount::account(public_key, balance, validator); - Ok((genesis_account, remainder)) - } - tag if tag == GenesisAccountTag::Delegator as u8 => { - let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; - let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?; - let (balance, remainder) = FromBytes::from_bytes(remainder)?; - let (delegated_amount_value, remainder) = FromBytes::from_bytes(remainder)?; - let genesis_account = GenesisAccount::delegator( - validator_public_key, - delegator_public_key, - balance, - Motes::new(delegated_amount_value), - ); - Ok((genesis_account, remainder)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct GenesisConfig { - name: String, - timestamp: u64, - protocol_version: ProtocolVersion, - ee_config: ExecConfig, -} - -impl GenesisConfig { - pub fn new( - name: String, - timestamp: u64, - protocol_version: ProtocolVersion, - ee_config: ExecConfig, - ) -> Self { - GenesisConfig { - name, - timestamp, - protocol_version, - ee_config, - } - } - - pub fn name(&self) -> &str { - self.name.as_str() - } - - pub fn timestamp(&self) -> u64 { - self.timestamp - } - - pub fn protocol_version(&self) -> ProtocolVersion { - self.protocol_version - } - - pub fn ee_config(&self) -> &ExecConfig { - &self.ee_config - } - - pub fn ee_config_mut(&mut self) -> &mut ExecConfig { - &mut self.ee_config - } - - pub fn take_ee_config(self) -> ExecConfig { - self.ee_config - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> GenesisConfig { - let count = rng.gen_range(1..1000); - let name = iter::repeat(()) - .map(|_| rng.gen::()) - .take(count) - .collect(); - - let timestamp = rng.gen(); - - let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()); - - let ee_config = rng.gen(); - - GenesisConfig { - name, - timestamp, - protocol_version, - ee_config, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ExecConfig { - accounts: Vec, - wasm_config: WasmConfig, - system_config: SystemConfig, - validator_slots: u32, - auction_delay: u64, - locked_funds_period_millis: u64, - round_seigniorage_rate: Ratio, - unbonding_delay: u64, - genesis_timestamp_millis: u64, -} - -impl ExecConfig { - #[allow(clippy::too_many_arguments)] - pub fn new( - accounts: Vec, - wasm_config: WasmConfig, - system_config: SystemConfig, - validator_slots: u32, - auction_delay: u64, - locked_funds_period_millis: u64, - round_seigniorage_rate: Ratio, - unbonding_delay: u64, - genesis_timestamp_millis: u64, - ) -> ExecConfig { - ExecConfig { - accounts, - wasm_config, - system_config, - validator_slots, - auction_delay, - locked_funds_period_millis, - round_seigniorage_rate, - unbonding_delay, - genesis_timestamp_millis, - } - } - - pub fn wasm_config(&self) -> &WasmConfig { - &self.wasm_config - } - - pub fn system_config(&self) -> &SystemConfig { - &self.system_config - } - - pub fn get_bonded_validators(&self) -> impl Iterator { - self.accounts - .iter() - .filter(|&genesis_account| genesis_account.is_validator()) - } - - pub fn get_bonded_delegators( - &self, - ) -> impl Iterator { - self.accounts - .iter() - .filter_map(|genesis_account| genesis_account.as_delegator()) - } - - pub fn accounts(&self) -> &[GenesisAccount] { - self.accounts.as_slice() - } - - pub fn push_account(&mut self, account: GenesisAccount) { - self.accounts.push(account) - } - - pub fn validator_slots(&self) -> u32 { - self.validator_slots - } - - pub fn auction_delay(&self) -> u64 { - self.auction_delay - } - - pub fn locked_funds_period_millis(&self) -> u64 { - self.locked_funds_period_millis - } - - pub fn round_seigniorage_rate(&self) -> Ratio { - self.round_seigniorage_rate - } - - pub fn unbonding_delay(&self) -> u64 { - self.unbonding_delay - } - - pub fn genesis_timestamp_millis(&self) -> u64 { - self.genesis_timestamp_millis - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> ExecConfig { - let count = rng.gen_range(1..10); - - let accounts = iter::repeat(()).map(|_| rng.gen()).take(count).collect(); - - let wasm_config = rng.gen(); - - let system_config = rng.gen(); - - let validator_slots = rng.gen(); - - let auction_delay = rng.gen(); - - let locked_funds_period_millis = rng.gen(); - - let round_seigniorage_rate = Ratio::new( - rng.gen_range(1..1_000_000_000), - rng.gen_range(1..1_000_000_000), - ); - - let unbonding_delay = rng.gen(); - - let genesis_timestamp_millis = rng.gen(); - - ExecConfig { - accounts, - wasm_config, - system_config, - validator_slots, - auction_delay, - locked_funds_period_millis, - round_seigniorage_rate, - unbonding_delay, - genesis_timestamp_millis, - } - } -} - -#[derive(Clone, Debug)] -pub enum GenesisError { - UnableToCreateRuntime, - InvalidMintKey, - MissingMintContract, - UnexpectedStoredValue, - MissingPublicKey, - ExecutionError(execution::Error), - MintError(mint::Error), - CLValue(String), - OrphanedDelegator { - validator_public_key: PublicKey, - delegator_public_key: PublicKey, - }, - DuplicatedDelegatorEntry { - validator_public_key: PublicKey, - delegator_public_key: PublicKey, - }, - InvalidDelegationRate { - public_key: PublicKey, - delegation_rate: DelegationRate, - }, - InvalidBondAmount { - public_key: PublicKey, - }, - InvalidDelegatedAmount { - public_key: PublicKey, - }, -} - -pub(crate) struct GenesisInstaller -where - S: StateProvider, - S::Error: Into, -{ - genesis_config_hash: Blake2bHash, - virtual_system_account: Account, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - exec_config: ExecConfig, - uref_address_generator: Rc>, - hash_address_generator: Rc>, - transfer_address_generator: Rc>, - executor: Executor, - tracking_copy: Rc::Reader>>>, - protocol_data: ProtocolData, - system_module: Module, -} - -impl GenesisInstaller -where - S: StateProvider, - S::Error: Into, -{ - pub(crate) fn new( - genesis_config_hash: Blake2bHash, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - engine_config: EngineConfig, - exec_config: ExecConfig, - tracking_copy: Rc::Reader>>>, - system_module: Module, - ) -> Self { - let executor = Executor::new(engine_config); - - let phase = Phase::System; - let genesis_config_hash_bytes = genesis_config_hash.as_ref(); - let uref_address_generator = { - let generator = AddressGenerator::new(genesis_config_hash_bytes, phase); - Rc::new(RefCell::new(generator)) - }; - let hash_address_generator = { - let generator = AddressGenerator::new(genesis_config_hash_bytes, phase); - Rc::new(RefCell::new(generator)) - }; - let transfer_address_generator = { - let generator = AddressGenerator::new(genesis_config_hash_bytes, phase); - Rc::new(RefCell::new(generator)) - }; - - let protocol_data = ProtocolData::default(); - - let system_account_addr = PublicKey::System.to_account_hash(); - - let virtual_system_account = { - let named_keys = NamedKeys::new(); - let purse = URef::new(Default::default(), AccessRights::READ_ADD_WRITE); - Account::create(system_account_addr, named_keys, purse) - }; - - let key = Key::Account(system_account_addr); - let value = { StoredValue::Account(virtual_system_account.clone()) }; - - tracking_copy.borrow_mut().write(key, value); - - GenesisInstaller { - genesis_config_hash, - virtual_system_account, - protocol_version, - correlation_id, - exec_config, - uref_address_generator, - hash_address_generator, - transfer_address_generator, - executor, - tracking_copy, - protocol_data, - system_module, - } - } - - pub(crate) fn finalize(self) -> ExecutionEffect { - self.tracking_copy.borrow_mut().effect() - } - - pub(crate) fn create_mint(&mut self) -> Result { - let round_seigniorage_rate_uref = - { - let round_seigniorage_rate_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - - let (round_seigniorage_rate_numer, round_seigniorage_rate_denom) = - self.exec_config.round_seigniorage_rate().into(); - let round_seigniorage_rate: Ratio = Ratio::new( - round_seigniorage_rate_numer.into(), - round_seigniorage_rate_denom.into(), - ); - - self.tracking_copy.borrow_mut().write( - round_seigniorage_rate_uref.into(), - StoredValue::CLValue(CLValue::from_t(round_seigniorage_rate).map_err( - |_| GenesisError::CLValue(ARG_ROUND_SEIGNIORAGE_RATE.to_string()), - )?), - ); - round_seigniorage_rate_uref - }; - - let total_supply_uref = { - let total_supply_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - - self.tracking_copy.borrow_mut().write( - total_supply_uref.into(), - StoredValue::CLValue( - CLValue::from_t(U512::zero()) - .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, - ), - ); - total_supply_uref - }; - - let named_keys = { - let mut named_keys = NamedKeys::new(); - named_keys.insert( - ROUND_SEIGNIORAGE_RATE_KEY.to_string(), - round_seigniorage_rate_uref.into(), - ); - named_keys.insert(TOTAL_SUPPLY_KEY.to_string(), total_supply_uref.into()); - named_keys - }; - - let entry_points = self.mint_entry_points(); - - let access_key = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - - let contract_hash = self.store_system_contract( - SystemContractType::Mint, - access_key, - named_keys, - entry_points, - ); - - self.protocol_data = ProtocolData::partial_with_mint(contract_hash); - - Ok(contract_hash) - } - - pub fn create_handle_payment(&self) -> Result { - let handle_payment_payment_purse = self.create_purse( - U512::zero(), - DeployHash::new(self.genesis_config_hash.value()), - )?; - - let named_keys = { - let mut named_keys = NamedKeys::new(); - let named_key = Key::URef(handle_payment_payment_purse); - named_keys.insert(handle_payment::PAYMENT_PURSE_KEY.to_string(), named_key); - named_keys - }; - - let entry_points = self.handle_payment_entry_points(); - - let access_key = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - - let contract_hash = self.store_system_contract( - SystemContractType::HandlePayment, - access_key, - named_keys, - entry_points, - ); - - Ok(contract_hash) - } - - pub(crate) fn create_auction(&self) -> Result { - let locked_funds_period_millis = self.exec_config.locked_funds_period_millis(); - let auction_delay: u64 = self.exec_config.auction_delay(); - let genesis_timestamp_millis: u64 = self.exec_config.genesis_timestamp_millis(); - - let mut named_keys = NamedKeys::new(); - - let genesis_validators: Vec<_> = self.exec_config.get_bonded_validators().collect(); - - let genesis_delegators: Vec<_> = self.exec_config.get_bonded_delegators().collect(); - - // Make sure all delegators have corresponding genesis validator entries - for (validator_public_key, delegator_public_key, _balance, delegated_amount) in - genesis_delegators.iter() - { - if delegated_amount.is_zero() { - return Err(GenesisError::InvalidDelegatedAmount { - public_key: (*delegator_public_key).clone(), - }); - } - - let orphan_condition = genesis_validators.iter().find(|genesis_validator| { - genesis_validator.public_key() == (*validator_public_key).clone() - }); - - if orphan_condition.is_none() { - return Err(GenesisError::OrphanedDelegator { - validator_public_key: (*validator_public_key).clone(), - delegator_public_key: (*delegator_public_key).clone(), - }); - } - } - - let validators = { - let mut validators = Bids::new(); - - for genesis_validator in genesis_validators { - let public_key = genesis_validator.public_key(); - - let staked_amount = genesis_validator.staked_amount(); - if staked_amount.is_zero() { - return Err(GenesisError::InvalidBondAmount { public_key }); - } - - let delegation_rate = genesis_validator.delegation_rate(); - if delegation_rate > DELEGATION_RATE_DENOMINATOR { - return Err(GenesisError::InvalidDelegationRate { - public_key, - delegation_rate, - }); - } - debug_assert_ne!(public_key, PublicKey::System); - - let purse_uref = self.create_purse( - staked_amount.value(), - DeployHash::new(public_key.to_account_hash().value()), - )?; - let release_timestamp_millis = - genesis_timestamp_millis + locked_funds_period_millis; - let founding_validator = { - let mut bid = Bid::locked( - public_key.clone(), - purse_uref, - staked_amount.value(), - delegation_rate, - release_timestamp_millis, - ); - - // Set up delegator entries attached to genesis validators - for ( - validator_public_key, - delegator_public_key, - _delegator_balance, - &delegator_delegated_amount, - ) in genesis_delegators.iter() - { - if (*validator_public_key).clone() == public_key.clone() { - let purse_uref = self.create_purse( - delegator_delegated_amount.value(), - DeployHash::new(delegator_public_key.to_account_hash().value()), - )?; - - let delegator = Delegator::locked( - (*delegator_public_key).clone(), - delegator_delegated_amount.value(), - purse_uref, - (*validator_public_key).clone(), - release_timestamp_millis, - ); - - if bid - .delegators_mut() - .insert((*delegator_public_key).clone(), delegator) - .is_some() - { - return Err(GenesisError::DuplicatedDelegatorEntry { - validator_public_key: (*validator_public_key).clone(), - delegator_public_key: (*delegator_public_key).clone(), - }); - } - } - } - - bid - }; - - validators.insert(public_key, founding_validator); - } - validators - }; - - let initial_seigniorage_recipients = - self.initial_seigniorage_recipients(&validators, auction_delay); - - for (era_id, recipients) in initial_seigniorage_recipients.into_iter() { - self.tracking_copy.borrow_mut().write( - Key::EraValidators(era_id), - StoredValue::EraValidators(recipients), - ) - } - - let era_id_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.tracking_copy.borrow_mut().write( - era_id_uref.into(), - StoredValue::CLValue( - CLValue::from_t(INITIAL_ERA_ID) - .map_err(|_| GenesisError::CLValue(ERA_ID_KEY.to_string()))?, - ), - ); - named_keys.insert(ERA_ID_KEY.into(), era_id_uref.into()); - - let era_end_timestamp_millis_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.tracking_copy.borrow_mut().write( - era_end_timestamp_millis_uref.into(), - StoredValue::CLValue( - CLValue::from_t(INITIAL_ERA_END_TIMESTAMP_MILLIS) - .map_err(|_| GenesisError::CLValue(ERA_END_TIMESTAMP_MILLIS_KEY.to_string()))?, - ), - ); - named_keys.insert( - ERA_END_TIMESTAMP_MILLIS_KEY.into(), - era_end_timestamp_millis_uref.into(), - ); - - for (validator_public_key, bid) in validators.into_iter() { - let validator_account_hash = AccountHash::from(&validator_public_key); - self.tracking_copy.borrow_mut().write( - Key::Bid(validator_account_hash), - StoredValue::Bid(Box::new(bid)), - ) - } - - let validator_slots = self.exec_config.validator_slots(); - let validator_slots_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.tracking_copy.borrow_mut().write( - validator_slots_uref.into(), - StoredValue::CLValue( - CLValue::from_t(validator_slots) - .map_err(|_| GenesisError::CLValue(VALIDATOR_SLOTS_KEY.to_string()))?, - ), - ); - named_keys.insert(VALIDATOR_SLOTS_KEY.into(), validator_slots_uref.into()); - - let auction_delay_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.tracking_copy.borrow_mut().write( - auction_delay_uref.into(), - StoredValue::CLValue( - CLValue::from_t(auction_delay) - .map_err(|_| GenesisError::CLValue(AUCTION_DELAY_KEY.to_string()))?, - ), - ); - named_keys.insert(AUCTION_DELAY_KEY.into(), auction_delay_uref.into()); - - let locked_funds_period_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.tracking_copy.borrow_mut().write( - locked_funds_period_uref.into(), - StoredValue::CLValue( - CLValue::from_t(locked_funds_period_millis) - .map_err(|_| GenesisError::CLValue(LOCKED_FUNDS_PERIOD_KEY.to_string()))?, - ), - ); - named_keys.insert( - LOCKED_FUNDS_PERIOD_KEY.into(), - locked_funds_period_uref.into(), - ); - - let unbonding_delay = self.exec_config.unbonding_delay(); - let unbonding_delay_uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.tracking_copy.borrow_mut().write( - unbonding_delay_uref.into(), - StoredValue::CLValue( - CLValue::from_t(unbonding_delay) - .map_err(|_| GenesisError::CLValue(UNBONDING_DELAY_KEY.to_string()))?, - ), - ); - named_keys.insert(UNBONDING_DELAY_KEY.into(), unbonding_delay_uref.into()); - - let entry_points = self.auction_entry_points(); - - let access_key = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - - let contract_hash = self.store_system_contract( - SystemContractType::Auction, - access_key, - named_keys, - entry_points, - ); - - Ok(contract_hash) - } - - pub(crate) fn create_standard_payment(&self) -> ContractHash { - let named_keys = NamedKeys::new(); - - let entry_points = self.standard_payment_entry_points(); - - let access_key = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - - self.store_system_contract( - SystemContractType::StandardPayment, - access_key, - named_keys, - entry_points, - ) - } - - pub(crate) fn create_accounts(&self) -> Result<(), GenesisError> { - let accounts = { - let mut ret: Vec = - self.exec_config.accounts().to_vec().into_iter().collect(); - let system_account = GenesisAccount::system(); - ret.push(system_account); - ret - }; - - for account in accounts { - let account_hash = account.account_hash(); - let main_purse = self.create_purse( - account.balance().value(), - DeployHash::new(account_hash.value()), - )?; - - let key = Key::Account(account_hash); - let stored_value = StoredValue::Account(Account::create( - account_hash, - Default::default(), - main_purse, - )); - - self.tracking_copy.borrow_mut().write(key, stored_value); - } - - Ok(()) - } - - fn initial_seigniorage_recipients( - &self, - validators: &BTreeMap, - auction_delay: u64, - ) -> BTreeMap { - let initial_snapshot_range = INITIAL_ERA_ID.iter_inclusive(auction_delay); - - let mut seigniorage_recipients = SeigniorageRecipients::new(); - for (era_validator, founding_validator) in validators { - seigniorage_recipients.insert( - era_validator.clone(), - SeigniorageRecipient::from(founding_validator), - ); - } - - let mut initial_seigniorage_recipients = SeigniorageRecipientsSnapshot::new(); - for era_id in initial_snapshot_range { - initial_seigniorage_recipients.insert(era_id, seigniorage_recipients.clone()); - } - initial_seigniorage_recipients - } - - fn create_purse(&self, amount: U512, deploy_hash: DeployHash) -> Result { - let args = runtime_args! { - ARG_AMOUNT => amount, - }; - - let base_key = Key::Hash(self.protocol_data.mint().value()); - let mint = { - if let StoredValue::Contract(contract) = self - .tracking_copy - .borrow_mut() - .read(self.correlation_id, &base_key) - .map_err(|_| GenesisError::InvalidMintKey)? - .ok_or(GenesisError::MissingMintContract)? - { - contract - } else { - return Err(GenesisError::UnexpectedStoredValue); - } - }; - - let mut named_keys = mint.named_keys().clone(); - - let (_instance, mut runtime) = self - .executor - .create_runtime( - self.system_module.clone(), - EntryPointType::Contract, - args.clone(), - &mut named_keys, - Default::default(), - base_key, - &self.virtual_system_account, - Default::default(), - Default::default(), - deploy_hash, - Gas::new(U512::MAX), - Rc::clone(&self.hash_address_generator), - Rc::clone(&self.uref_address_generator), - Rc::clone(&self.transfer_address_generator), - self.protocol_version, - self.correlation_id, - Rc::clone(&self.tracking_copy), - Phase::System, - self.protocol_data, - Default::default(), - ) - .map_err(|_| GenesisError::UnableToCreateRuntime)?; - - let purse_uref = runtime - .call_contract(self.protocol_data.mint(), METHOD_MINT, args) - .map_err(GenesisError::ExecutionError)? - .into_t::>() - .map_err(|cl_value_error| GenesisError::CLValue(cl_value_error.to_string()))? - .map_err(GenesisError::MintError)?; - Ok(purse_uref) - } - - fn store_system_contract( - &self, - contract_type: SystemContractType, - access_key: URef, - named_keys: NamedKeys, - entry_points: EntryPoints, - ) -> ContractHash { - let protocol_version = self.protocol_version; - - let contract_hash = contract_type.into_contract_hash(); - let contract_wasm_hash = - ContractWasmHash::new(self.hash_address_generator.borrow_mut().new_hash_address()); - let contract_package_hash = - ContractPackageHash::new(self.hash_address_generator.borrow_mut().new_hash_address()); - - let contract_wasm = ContractWasm::new(vec![]); - let contract = Contract::new( - contract_package_hash, - contract_wasm_hash, - named_keys, - entry_points, - protocol_version, - ); - - // Genesis contracts can be versioned contracts. - let contract_package = { - let mut contract_package = ContractPackage::new( - access_key, - ContractVersions::default(), - DisabledVersions::default(), - Groups::default(), - ContractPackageStatus::default(), - ); - contract_package.insert_contract_version(protocol_version.value().major, contract_hash); - contract_package - }; - - self.tracking_copy.borrow_mut().write( - contract_wasm_hash.into(), - StoredValue::ContractWasm(contract_wasm), - ); - self.tracking_copy - .borrow_mut() - .write(contract_hash.into(), StoredValue::Contract(contract)); - self.tracking_copy.borrow_mut().write( - contract_package_hash.into(), - StoredValue::ContractPackage(contract_package), - ); - - contract_hash - } - - fn mint_entry_points(&self) -> EntryPoints { - let mut entry_points = EntryPoints::new(); - - let entry_point = EntryPoint::new( - METHOD_MINT, - vec![Parameter::new(ARG_AMOUNT, CLType::U512)], - CLType::Result { - ok: Box::new(CLType::URef), - err: Box::new(CLType::U8), - }, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_REDUCE_TOTAL_SUPPLY, - vec![Parameter::new(ARG_AMOUNT, CLType::U512)], - CLType::Result { - ok: Box::new(CLType::Unit), - err: Box::new(CLType::U8), - }, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_CREATE, - Parameters::new(), - CLType::URef, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_BALANCE, - vec![Parameter::new(ARG_PURSE, CLType::URef)], - CLType::Option(Box::new(CLType::U512)), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_TRANSFER, - vec![ - Parameter::new(ARG_SOURCE, CLType::URef), - Parameter::new(ARG_TARGET, CLType::URef), - Parameter::new(ARG_AMOUNT, CLType::U512), - Parameter::new(ARG_ID, CLType::Option(Box::new(CLType::U64))), - ], - CLType::Result { - ok: Box::new(CLType::Unit), - err: Box::new(CLType::U8), - }, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_READ_BASE_ROUND_REWARD, - Parameters::new(), - CLType::U512, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - entry_points - } - - fn handle_payment_entry_points(&self) -> EntryPoints { - let mut entry_points = EntryPoints::new(); - - let get_payment_purse = EntryPoint::new( - METHOD_GET_PAYMENT_PURSE, - vec![], - CLType::URef, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(get_payment_purse); - - let set_refund_purse = EntryPoint::new( - METHOD_SET_REFUND_PURSE, - vec![Parameter::new(ARG_PURSE, CLType::URef)], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(set_refund_purse); - - let get_refund_purse = EntryPoint::new( - METHOD_GET_REFUND_PURSE, - vec![], - CLType::Option(Box::new(CLType::URef)), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(get_refund_purse); - - let finalize_payment = EntryPoint::new( - METHOD_FINALIZE_PAYMENT, - vec![ - Parameter::new(ARG_AMOUNT, CLType::U512), - Parameter::new(ARG_ACCOUNT, CLType::ByteArray(32)), - ], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(finalize_payment); - - entry_points - } - - fn auction_entry_points(&self) -> EntryPoints { - let mut entry_points = EntryPoints::new(); - - let entry_point = EntryPoint::new( - METHOD_GET_ERA_VALIDATORS, - vec![], - Option::::cl_type(), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_ADD_BID, - vec![ - Parameter::new(ARG_PUBLIC_KEY, AccountHash::cl_type()), - Parameter::new(ARG_DELEGATION_RATE, DelegationRate::cl_type()), - Parameter::new(ARG_AMOUNT, U512::cl_type()), - ], - U512::cl_type(), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_WITHDRAW_BID, - vec![ - Parameter::new(ARG_PUBLIC_KEY, AccountHash::cl_type()), - Parameter::new(ARG_AMOUNT, U512::cl_type()), - ], - U512::cl_type(), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_DELEGATE, - vec![ - Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), - Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), - Parameter::new(ARG_AMOUNT, U512::cl_type()), - ], - U512::cl_type(), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_UNDELEGATE, - vec![ - Parameter::new(ARG_DELEGATOR, AccountHash::cl_type()), - Parameter::new(ARG_VALIDATOR, AccountHash::cl_type()), - Parameter::new(ARG_AMOUNT, U512::cl_type()), - ], - U512::cl_type(), - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_RUN_AUCTION, - vec![Parameter::new(ARG_ERA_END_TIMESTAMP_MILLIS, u64::cl_type())], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_SLASH, - vec![], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_DISTRIBUTE, - vec![Parameter::new( - ARG_REWARD_FACTORS, - CLType::Map { - key: Box::new(CLType::PublicKey), - value: Box::new(CLType::U64), - }, - )], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_READ_ERA_ID, - vec![], - CLType::U64, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - let entry_point = EntryPoint::new( - METHOD_ACTIVATE_BID, - vec![Parameter::new(ARG_VALIDATOR_PUBLIC_KEY, CLType::PublicKey)], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - entry_points - } - - fn standard_payment_entry_points(&self) -> EntryPoints { - let mut entry_points = EntryPoints::new(); - - let entry_point = EntryPoint::new( - METHOD_PAY.to_string(), - vec![Parameter::new(ARG_AMOUNT, CLType::U512)], - CLType::Result { - ok: Box::new(CLType::Unit), - err: Box::new(CLType::U32), - }, - EntryPointAccess::Public, - EntryPointType::Session, - ); - entry_points.add_entry_point(entry_point); - - entry_points - } -} - -#[cfg(test)] -mod tests { - use super::*; - use rand::RngCore; - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = rand::thread_rng(); - let genesis_account: GenesisAccount = rng.gen(); - bytesrepr::test_serialization_roundtrip(&genesis_account); - } - - #[test] - fn system_account_bytesrepr_roundtrip() { - let genesis_account = GenesisAccount::system(); - - bytesrepr::test_serialization_roundtrip(&genesis_account); - } - - #[test] - fn account_bytesrepr_roundtrip() { - let mut rng = rand::thread_rng(); - let mut bytes = [0u8; 32]; - rng.fill_bytes(&mut bytes[..]); - let public_key: PublicKey = SecretKey::ed25519_from_bytes(bytes).unwrap().into(); - - let genesis_account_1 = - GenesisAccount::account(public_key.clone(), Motes::new(U512::from(100)), None); - - bytesrepr::test_serialization_roundtrip(&genesis_account_1); - - let genesis_account_2 = - GenesisAccount::account(public_key, Motes::new(U512::from(100)), Some(rng.gen())); - - bytesrepr::test_serialization_roundtrip(&genesis_account_2); - } - - #[test] - fn delegator_bytesrepr_roundtrip() { - let mut rng = rand::thread_rng(); - let mut validator_bytes = [0u8; 32]; - let mut delegator_bytes = [0u8; 32]; - rng.fill_bytes(&mut validator_bytes[..]); - rng.fill_bytes(&mut delegator_bytes[..]); - let validator_public_key = SecretKey::ed25519_from_bytes(validator_bytes) - .unwrap() - .into(); - let delegator_public_key = SecretKey::ed25519_from_bytes(delegator_bytes) - .unwrap() - .into(); - - let genesis_account = GenesisAccount::delegator( - validator_public_key, - delegator_public_key, - Motes::new(U512::from(100)), - Motes::zero(), - ); - - bytesrepr::test_serialization_roundtrip(&genesis_account); - } -} diff --git a/execution_engine/src/core/engine_state/mod.rs b/execution_engine/src/core/engine_state/mod.rs deleted file mode 100644 index 445ac84bd0..0000000000 --- a/execution_engine/src/core/engine_state/mod.rs +++ /dev/null @@ -1,2070 +0,0 @@ -pub mod balance; -pub mod deploy_item; -pub mod engine_config; -pub mod era_validators; -mod error; -pub mod executable_deploy_item; -pub mod execute_request; -pub mod execution_effect; -pub mod execution_result; -pub mod genesis; -pub mod op; -pub mod query; -pub mod run_genesis_request; -pub mod step; -pub mod system_contract_cache; -mod transfer; -pub mod upgrade; - -use std::{ - cell::RefCell, - collections::{BTreeMap, BTreeSet}, - convert::TryFrom, - iter::FromIterator, - rc::Rc, -}; - -use num_rational::Ratio; -use once_cell::sync::Lazy; -use tracing::{debug, error}; - -use casper_types::{ - account::AccountHash, - bytesrepr::ToBytes, - contracts::NamedKeys, - system::{ - auction::{ - EraValidators, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_EVICTED_VALIDATORS, - ARG_REWARD_FACTORS, ARG_VALIDATOR_PUBLIC_KEYS, AUCTION_DELAY_KEY, - LOCKED_FUNDS_PERIOD_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, - }, - handle_payment, - mint::{self, ROUND_SEIGNIORAGE_RATE_KEY}, - }, - AccessRights, ApiError, BlockTime, CLValue, Contract, DeployHash, DeployInfo, Key, KeyTag, - Phase, ProtocolVersion, PublicKey, RuntimeArgs, URef, U512, -}; - -pub use self::{ - balance::{BalanceRequest, BalanceResult}, - deploy_item::DeployItem, - engine_config::EngineConfig, - era_validators::{GetEraValidatorsError, GetEraValidatorsRequest}, - error::Error, - executable_deploy_item::ExecutableDeployItem, - execute_request::ExecuteRequest, - execution::Error as ExecError, - execution_result::{ExecutionResult, ExecutionResults, ForcedTransferResult}, - genesis::{ExecConfig, GenesisAccount, GenesisResult}, - query::{GetBidsRequest, GetBidsResult, QueryRequest, QueryResult}, - step::{RewardItem, SlashItem, StepRequest, StepResult}, - system_contract_cache::SystemContractCache, - transfer::{TransferArgs, TransferRuntimeArgsBuilder, TransferTargetMode}, - upgrade::{UpgradeConfig, UpgradeResult}, -}; -use crate::{ - core::{ - engine_state::{ - executable_deploy_item::DeployMetadata, execution_result::ExecutionResultBuilder, - genesis::GenesisInstaller, upgrade::SystemUpgrader, - }, - execution::{self, DirectSystemContractCall, Executor}, - tracking_copy::{TrackingCopy, TrackingCopyExt}, - }, - shared::{ - account::Account, - additive_map::AdditiveMap, - gas::Gas, - motes::Motes, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - transform::Transform, - wasm_prep::Preprocessor, - }, - storage::{ - global_state::{CommitResult, StateProvider}, - protocol_data::ProtocolData, - trie::Trie, - }, -}; - -pub const MAX_PAYMENT_AMOUNT: u64 = 2_500_000_000; -pub static MAX_PAYMENT: Lazy = Lazy::new(|| U512::from(MAX_PAYMENT_AMOUNT)); - -/// Gas/motes conversion rate of wasmless transfer cost is always 1 regardless of what user wants to -/// pay. -pub const WASMLESS_TRANSFER_FIXED_GAS_PRICE: u64 = 1; - -#[derive(Debug)] -pub struct EngineState { - config: EngineConfig, - system_contract_cache: SystemContractCache, - state: S, -} - -impl EngineState -where - S: StateProvider, - S::Error: Into, -{ - pub fn new(state: S, config: EngineConfig) -> EngineState { - let system_contract_cache = Default::default(); - EngineState { - config, - system_contract_cache, - state, - } - } - - pub fn config(&self) -> &EngineConfig { - &self.config - } - - pub fn get_protocol_data( - &self, - protocol_version: ProtocolVersion, - ) -> Result, Error> { - match self.state.get_protocol_data(protocol_version) { - Ok(Some(protocol_data)) => Ok(Some(protocol_data)), - Err(error) => Err(Error::Exec(error.into())), - _ => Ok(None), - } - } - - pub fn commit_genesis( - &self, - correlation_id: CorrelationId, - genesis_config_hash: Blake2bHash, - protocol_version: ProtocolVersion, - ee_config: &ExecConfig, - ) -> Result { - // Preliminaries - let initial_root_hash = self.state.empty_root(); - let system_config = ee_config.system_config(); - - let tracking_copy = match self.tracking_copy(initial_root_hash) { - Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), - // NOTE: As genesis is ran once per instance condition below is considered programming - // error - Ok(None) => panic!("state has not been initialized properly"), - Err(error) => return Err(error), - }; - - let wasm_config = ee_config.wasm_config(); - let preprocessor = Preprocessor::new(*wasm_config); - - let system_module = tracking_copy - .borrow_mut() - .get_system_module(&preprocessor)?; - - let mut genesis_installer: GenesisInstaller = GenesisInstaller::new( - genesis_config_hash, - protocol_version, - correlation_id, - self.config, - ee_config.clone(), - tracking_copy, - system_module, - ); - - // Create mint - let mint_hash = genesis_installer.create_mint()?; - - // Create accounts - genesis_installer.create_accounts()?; - - // Create handle payment - let handle_payment_hash = genesis_installer.create_handle_payment()?; - - // Create auction - let auction_hash = genesis_installer.create_auction()?; - - // Create standard payment - let standard_payment_hash = genesis_installer.create_standard_payment(); - - // Associate given CostTable with given ProtocolVersion. - { - let protocol_data = ProtocolData::new( - *wasm_config, - *system_config, - mint_hash, - handle_payment_hash, - standard_payment_hash, - auction_hash, - ); - - self.state - .put_protocol_data(protocol_version, &protocol_data) - .map_err(Into::into)?; - } - - // Commit the transforms. - let execution_effect = genesis_installer.finalize(); - - let commit_result = self - .state - .commit( - correlation_id, - initial_root_hash, - execution_effect.transforms.to_owned(), - ) - .map_err(Into::into)?; - - // Return the result - let genesis_result = GenesisResult::from_commit_result(commit_result, execution_effect); - - Ok(genesis_result) - } - - pub fn commit_upgrade( - &self, - correlation_id: CorrelationId, - upgrade_config: UpgradeConfig, - ) -> Result { - // per specification: - // https://casperlabs.atlassian.net/wiki/spaces/EN/pages/139854367/Upgrading+System+Contracts+Specification - - // 3.1.1.1.1.1 validate pre state hash exists - // 3.1.2.1 get a tracking_copy at the provided pre_state_hash - let pre_state_hash = upgrade_config.pre_state_hash(); - let tracking_copy = match self.tracking_copy(pre_state_hash)? { - Some(tracking_copy) => Rc::new(RefCell::new(tracking_copy)), - None => return Ok(UpgradeResult::RootNotFound), - }; - - // 3.1.1.1.1.2 current protocol version is required - let current_protocol_version = upgrade_config.current_protocol_version(); - let current_protocol_data = match self.state.get_protocol_data(current_protocol_version) { - Ok(Some(protocol_data)) => protocol_data, - Ok(None) => { - return Err(Error::InvalidProtocolVersion(current_protocol_version)); - } - Err(error) => { - return Err(Error::Exec(error.into())); - } - }; - - // 3.1.1.1.1.3 activation point is not currently used by EE; skipping - // 3.1.1.1.1.4 upgrade point protocol version validation - let new_protocol_version = upgrade_config.new_protocol_version(); - - let upgrade_check_result = - current_protocol_version.check_next_version(&new_protocol_version); - - if upgrade_check_result.is_invalid() { - return Err(Error::InvalidProtocolVersion(new_protocol_version)); - } - - // 3.1.1.1.1.5 bump system contract major versions - if upgrade_check_result.is_major_version() { - let system_upgrader: SystemUpgrader = SystemUpgrader::new( - new_protocol_version, - current_protocol_data, - tracking_copy.clone(), - ); - - system_upgrader - .upgrade_system_contracts_major_version(correlation_id) - .map_err(Error::ProtocolUpgrade)?; - } - - // 3.1.1.1.1.6 resolve wasm CostTable for new protocol version - let new_wasm_config = match upgrade_config.wasm_config() { - Some(new_wasm_costs) => new_wasm_costs, - None => current_protocol_data.wasm_config(), - }; - - let new_system_config = match upgrade_config.system_config() { - Some(new_system_config) => new_system_config, - None => current_protocol_data.system_config(), - }; - - // 3.1.2.2 persist wasm CostTable - let new_protocol_data = ProtocolData::new( - *new_wasm_config, - *new_system_config, - current_protocol_data.mint(), - current_protocol_data.handle_payment(), - current_protocol_data.standard_payment(), - current_protocol_data.auction(), - ); - - self.state - .put_protocol_data(new_protocol_version, &new_protocol_data) - .map_err(Into::into)?; - - // 3.1.1.1.1.7 new total validator slots is optional - if let Some(new_validator_slots) = upgrade_config.new_validator_slots() { - // 3.1.2.4 if new total validator slots is provided, update auction contract state - let auction_contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, new_protocol_data.auction())?; - - let validator_slots_key = auction_contract.named_keys()[VALIDATOR_SLOTS_KEY]; - let value = StoredValue::CLValue( - CLValue::from_t(new_validator_slots) - .map_err(|_| Error::Bytesrepr("new_validator_slots".to_string()))?, - ); - tracking_copy.borrow_mut().write(validator_slots_key, value); - } - - if let Some(new_auction_delay) = upgrade_config.new_auction_delay() { - let auction_contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, new_protocol_data.auction())?; - - let auction_delay_key = auction_contract.named_keys()[AUCTION_DELAY_KEY]; - let value = StoredValue::CLValue( - CLValue::from_t(new_auction_delay) - .map_err(|_| Error::Bytesrepr("new_auction_delay".to_string()))?, - ); - tracking_copy.borrow_mut().write(auction_delay_key, value); - } - - if let Some(new_locked_funds_period) = upgrade_config.new_locked_funds_period_millis() { - let auction_contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, new_protocol_data.auction())?; - - let locked_funds_period_key = auction_contract.named_keys()[LOCKED_FUNDS_PERIOD_KEY]; - let value = StoredValue::CLValue( - CLValue::from_t(new_locked_funds_period) - .map_err(|_| Error::Bytesrepr("new_locked_funds_period".to_string()))?, - ); - tracking_copy - .borrow_mut() - .write(locked_funds_period_key, value); - } - - if let Some(new_unbonding_delay) = upgrade_config.new_unbonding_delay() { - let auction_contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, new_protocol_data.auction())?; - - let unbonding_delay_key = auction_contract.named_keys()[UNBONDING_DELAY_KEY]; - let value = StoredValue::CLValue( - CLValue::from_t(new_unbonding_delay) - .map_err(|_| Error::Bytesrepr("new_unbonding_delay".to_string()))?, - ); - tracking_copy.borrow_mut().write(unbonding_delay_key, value); - } - - if let Some(new_round_seigniorage_rate) = upgrade_config.new_round_seigniorage_rate() { - let new_round_seigniorage_rate: Ratio = { - let (numer, denom) = new_round_seigniorage_rate.into(); - Ratio::new(numer.into(), denom.into()) - }; - - let mint_contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, new_protocol_data.mint())?; - - let locked_funds_period_key = mint_contract.named_keys()[ROUND_SEIGNIORAGE_RATE_KEY]; - let value = StoredValue::CLValue( - CLValue::from_t(new_round_seigniorage_rate) - .map_err(|_| Error::Bytesrepr("new_round_seigniorage_rate".to_string()))?, - ); - tracking_copy - .borrow_mut() - .write(locked_funds_period_key, value); - } - - // apply the arbitrary modifications - for (key, value) in upgrade_config.global_state_update() { - tracking_copy.borrow_mut().write(*key, value.clone()); - } - - let effects = tracking_copy.borrow().effect(); - - // commit - let commit_result = self - .state - .commit( - correlation_id, - pre_state_hash, - effects.transforms.to_owned(), - ) - .map_err(Into::into)?; - - // return result and effects - Ok(UpgradeResult::from_commit_result(commit_result, effects)) - } - - pub fn tracking_copy( - &self, - hash: Blake2bHash, - ) -> Result>, Error> { - match self.state.checkout(hash).map_err(Into::into)? { - Some(tc) => Ok(Some(TrackingCopy::new(tc))), - None => Ok(None), - } - } - - pub fn run_query( - &self, - correlation_id: CorrelationId, - query_request: QueryRequest, - ) -> Result { - let tracking_copy = match self.tracking_copy(query_request.state_hash())? { - Some(tracking_copy) => Rc::new(RefCell::new(tracking_copy)), - None => return Ok(QueryResult::RootNotFound), - }; - - let tracking_copy = tracking_copy.borrow(); - - Ok(tracking_copy - .query(correlation_id, query_request.key(), query_request.path()) - .map_err(|err| Error::Exec(err.into()))? - .into()) - } - - pub fn run_execute( - &self, - correlation_id: CorrelationId, - mut exec_request: ExecuteRequest, - ) -> Result { - let executor = Executor::new(self.config); - - let deploys = exec_request.take_deploys(); - let mut results = ExecutionResults::with_capacity(deploys.len()); - - for deploy_item in deploys { - let result = match deploy_item.session { - ExecutableDeployItem::Transfer { .. } => self.transfer( - correlation_id, - &executor, - exec_request.protocol_version, - exec_request.parent_state_hash, - BlockTime::new(exec_request.block_time), - deploy_item, - exec_request.proposer.clone(), - ), - _ => self.deploy( - correlation_id, - &executor, - exec_request.protocol_version, - exec_request.parent_state_hash, - BlockTime::new(exec_request.block_time), - deploy_item, - exec_request.proposer.clone(), - ), - }; - match result { - Ok(result) => results.push_back(result), - Err(error) => { - return Err(error); - } - }; - } - - Ok(results) - } - - fn get_authorized_account( - &self, - correlation_id: CorrelationId, - account_hash: AccountHash, - authorization_keys: &BTreeSet, - tracking_copy: Rc::Reader>>>, - ) -> Result { - let account: Account = match tracking_copy - .borrow_mut() - .get_account(correlation_id, account_hash) - { - Ok(account) => account, - Err(_) => { - return Err(error::Error::Authorization); - } - }; - - // Authorize using provided authorization keys - if !account.can_authorize(authorization_keys) { - return Err(error::Error::Authorization); - } - - // Check total key weight against deploy threshold - if !account.can_deploy_with(authorization_keys) { - return Err(execution::Error::DeploymentAuthorizationFailure.into()); - } - - Ok(account) - } - - pub fn get_purse_balance( - &self, - correlation_id: CorrelationId, - state_hash: Blake2bHash, - purse_uref: URef, - ) -> Result { - let tracking_copy = match self.tracking_copy(state_hash)? { - Some(tracking_copy) => tracking_copy, - None => return Ok(BalanceResult::RootNotFound), - }; - let purse_balance_key = - tracking_copy.get_purse_balance_key(correlation_id, purse_uref.into())?; - let (balance, proof) = - tracking_copy.get_purse_balance_with_proof(correlation_id, purse_balance_key)?; - let proof = Box::new(proof); - let motes = balance.value(); - Ok(BalanceResult::Success { motes, proof }) - } - - #[allow(clippy::too_many_arguments)] - pub fn transfer( - &self, - correlation_id: CorrelationId, - executor: &Executor, - protocol_version: ProtocolVersion, - prestate_hash: Blake2bHash, - blocktime: BlockTime, - deploy_item: DeployItem, - proposer: PublicKey, - ) -> Result { - let protocol_data = match self.state.get_protocol_data(protocol_version) { - Ok(Some(protocol_data)) => protocol_data, - Ok(None) => { - let error = Error::InvalidProtocolVersion(protocol_version); - return Ok(ExecutionResult::precondition_failure(error)); - } - Err(error) => { - return Ok(ExecutionResult::precondition_failure(Error::Exec( - error.into(), - ))); - } - }; - - let tracking_copy = match self.tracking_copy(prestate_hash) { - Err(error) => return Ok(ExecutionResult::precondition_failure(error)), - Ok(None) => return Err(Error::RootNotFound(prestate_hash)), - Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), - }; - - let preprocessor = { - let wasm_config = protocol_data.wasm_config(); - Preprocessor::new(*wasm_config) - }; - - let system_module = { - match tracking_copy.borrow_mut().get_system_module(&preprocessor) { - Ok(module) => module, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - } - }; - - let base_key = Key::Account(deploy_item.address); - - let account_public_key = match base_key.into_account() { - Some(account_addr) => account_addr, - None => { - return Ok(ExecutionResult::precondition_failure( - error::Error::Authorization, - )); - } - }; - - let authorization_keys = deploy_item.authorization_keys; - - let account = match self.get_authorized_account( - correlation_id, - account_public_key, - &authorization_keys, - Rc::clone(&tracking_copy), - ) { - Ok(account) => account, - Err(e) => return Ok(ExecutionResult::precondition_failure(e)), - }; - - let proposer_addr = proposer.to_account_hash(); - let proposer_account = match tracking_copy - .borrow_mut() - .get_account(correlation_id, proposer_addr) - { - Ok(proposer) => proposer, - Err(error) => return Ok(ExecutionResult::precondition_failure(Error::Exec(error))), - }; - - let mint_contract = match tracking_copy - .borrow_mut() - .get_contract(correlation_id, protocol_data.mint()) - { - Ok(contract) => contract, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - }; - - let mut mint_named_keys = mint_contract.named_keys().to_owned(); - let mut mint_extra_keys: Vec = vec![]; - let mint_base_key = Key::from(protocol_data.mint()); - - let handle_payment_contract = match tracking_copy - .borrow_mut() - .get_contract(correlation_id, protocol_data.handle_payment()) - { - Ok(contract) => contract, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - }; - - let mut handle_payment_named_keys = handle_payment_contract.named_keys().to_owned(); - let handle_payment_extra_keys: Vec = vec![]; - let handle_payment_base_key = Key::from(protocol_data.handle_payment()); - - let gas_limit = Gas::new(U512::from(std::u64::MAX)); - - let wasmless_transfer_gas_cost = Gas::new(U512::from( - protocol_data.system_config().wasmless_transfer_cost(), - )); - - let wasmless_transfer_motes = match Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) { - Some(motes) => motes, - None => { - return Ok(ExecutionResult::precondition_failure( - Error::GasConversionOverflow, - )) - } - }; - - let proposer_main_purse_balance_key = { - let proposer_main_purse = proposer_account.main_purse(); - - match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, proposer_main_purse.into()) - { - Ok(balance_key) => balance_key, - Err(error) => return Ok(ExecutionResult::precondition_failure(Error::Exec(error))), - } - }; - - let proposer_purse = proposer_account.main_purse(); - - let account_main_purse = account.main_purse(); - - let account_main_purse_balance_key = match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, account_main_purse.into()) - { - Ok(balance_key) => balance_key, - Err(error) => return Ok(ExecutionResult::precondition_failure(Error::Exec(error))), - }; - - let account_main_purse_balance = match tracking_copy - .borrow_mut() - .get_purse_balance(correlation_id, account_main_purse_balance_key) - { - Ok(balance_key) => balance_key, - Err(error) => return Ok(ExecutionResult::precondition_failure(Error::Exec(error))), - }; - - if account_main_purse_balance < wasmless_transfer_motes { - // We don't have minimum balance to operate and therefore we can't charge for user - // errors. - return Ok(ExecutionResult::precondition_failure( - Error::InsufficientPayment, - )); - } - - // Function below creates an ExecutionResult with precomputed effects of "finalize_payment". - let make_charged_execution_failure = |error| match ExecutionResult::new_payment_code_error( - error, - wasmless_transfer_motes, - account_main_purse_balance, - wasmless_transfer_gas_cost, - account_main_purse_balance_key, - proposer_main_purse_balance_key, - ) { - Ok(execution_result) => execution_result, - Err(error) => { - let exec_error = ExecError::from(error); - ExecutionResult::precondition_failure(exec_error.into()) - } - }; - - // All wasmless transfer preconditions are met. - // Any error that occurs in logic below this point would result in a charge for user error. - - let mut runtime_args_builder = - TransferRuntimeArgsBuilder::new(deploy_item.session.args().clone()); - - match runtime_args_builder.transfer_target_mode(correlation_id, Rc::clone(&tracking_copy)) { - Ok(mode) => match mode { - TransferTargetMode::Unknown | TransferTargetMode::PurseExists(_) => { /* noop */ } - TransferTargetMode::CreateAccount(public_key) => { - let (maybe_uref, execution_result): (Option, ExecutionResult) = executor - .exec_system_contract( - DirectSystemContractCall::CreatePurse, - system_module.clone(), - RuntimeArgs::new(), // mint create takes no arguments - &mut mint_named_keys, - Default::default(), - mint_base_key, - &account, - authorization_keys.clone(), - blocktime, - deploy_item.deploy_hash, - gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Session, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - match maybe_uref { - Some(main_purse) => { - let new_account = - Account::create(public_key, Default::default(), main_purse); - mint_extra_keys.push(Key::from(main_purse)); - // write new account - tracking_copy - .borrow_mut() - .write(Key::Account(public_key), StoredValue::Account(new_account)) - } - None => { - // This case implies that the execution_result is a failure variant as - // implemented inside host_exec(). - let error = execution_result - .take_error() - .unwrap_or(Error::InsufficientPayment); - return Ok(make_charged_execution_failure(error)); - } - } - } - }, - Err(error) => return Ok(make_charged_execution_failure(error)), - } - - let transfer_args = - match runtime_args_builder.build(&account, correlation_id, Rc::clone(&tracking_copy)) { - Ok(transfer_args) => transfer_args, - Err(error) => return Ok(make_charged_execution_failure(error)), - }; - - // Construct a payment code that will put cost of wasmless payment into payment purse - let payment_result = { - // Check source purses minimum balance - let source_uref = transfer_args.source(); - let source_purse_balance = if source_uref != account_main_purse { - let source_purse_balance_key = match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, Key::URef(source_uref)) - { - Ok(purse_balance_key) => purse_balance_key, - Err(error) => return Ok(make_charged_execution_failure(Error::Exec(error))), - }; - - match tracking_copy - .borrow_mut() - .get_purse_balance(correlation_id, source_purse_balance_key) - { - Ok(purse_balance) => purse_balance, - Err(error) => return Ok(make_charged_execution_failure(Error::Exec(error))), - } - } else { - // If source purse is main purse then we already have the balance. - account_main_purse_balance - }; - - let transfer_amount_motes = Motes::new(transfer_args.amount()); - - match wasmless_transfer_motes.checked_add(transfer_amount_motes) { - Some(total_amount) if source_purse_balance < total_amount => { - // We can't continue if the minimum funds in source purse are lower than the - // required cost. - return Ok(make_charged_execution_failure(Error::InsufficientPayment)); - } - None => { - // When trying to send too much that could cause an overflow. - return Ok(make_charged_execution_failure(Error::InsufficientPayment)); - } - Some(_) => {} - } - - let (payment_uref, get_payment_purse_result): (Option, ExecutionResult) = - executor.exec_system_contract( - DirectSystemContractCall::GetPaymentPurse, - system_module.clone(), - RuntimeArgs::default(), - &mut handle_payment_named_keys, - handle_payment_extra_keys.as_slice(), - handle_payment_base_key, - &account, - authorization_keys.clone(), - blocktime, - deploy_item.deploy_hash, - gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Payment, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - let payment_uref = match payment_uref { - Some(payment_uref) => payment_uref, - None => return Ok(make_charged_execution_failure(Error::InsufficientPayment)), - }; - - if let Some(error) = get_payment_purse_result.take_error() { - return Ok(make_charged_execution_failure(error)); - } - - // Create a new arguments to transfer cost of wasmless transfer into the payment purse. - - let new_transfer_args = TransferArgs::new( - transfer_args.to(), - transfer_args.source(), - payment_uref, - wasmless_transfer_motes.value(), - transfer_args.arg_id(), - ); - - let runtime_args = match RuntimeArgs::try_from(new_transfer_args) { - Ok(runtime_args) => runtime_args, - Err(error) => return Ok(make_charged_execution_failure(Error::Exec(error.into()))), - }; - - let (actual_result, payment_result): (Option>, ExecutionResult) = - executor.exec_system_contract( - DirectSystemContractCall::Transfer, - system_module.clone(), - runtime_args, - &mut mint_named_keys, - mint_extra_keys.as_slice(), - mint_base_key, - &account, - authorization_keys.clone(), - blocktime, - deploy_item.deploy_hash, - gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Payment, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - if let Some(error) = payment_result.as_error().cloned() { - return Ok(make_charged_execution_failure(error)); - } - - let transfer_result = match actual_result { - Some(Ok(())) => Ok(()), - Some(Err(mint_error)) => match mint::Error::try_from(mint_error) { - Ok(mint_error) => Err(ApiError::from(mint_error)), - Err(_) => Err(ApiError::Transfer), - }, - None => Err(ApiError::Transfer), - }; - - if let Err(error) = transfer_result { - return Ok(make_charged_execution_failure(Error::Exec( - ExecError::Revert(error), - ))); - } - - let payment_purse_balance = { - let payment_purse_balance_key = match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, Key::URef(payment_uref)) - { - Ok(payment_purse_balance_key) => payment_purse_balance_key, - Err(error) => return Ok(make_charged_execution_failure(Error::Exec(error))), - }; - - match tracking_copy - .borrow_mut() - .get_purse_balance(correlation_id, payment_purse_balance_key) - { - Ok(payment_purse_balance) => payment_purse_balance, - Err(error) => return Ok(make_charged_execution_failure(Error::Exec(error))), - } - }; - - // Wasmless transfer payment code pre & post conditions: - // (a) payment purse should be empty before the payment operation - // (b) after executing payment code it's balance has to be equal to the wasmless gas - // cost price - - let payment_gas = - match Gas::from_motes(payment_purse_balance, WASMLESS_TRANSFER_FIXED_GAS_PRICE) { - Some(gas) => gas, - None => { - return Ok(make_charged_execution_failure(Error::GasConversionOverflow)) - } - }; - - debug_assert_eq!(payment_gas, wasmless_transfer_gas_cost); - - // This assumes the cost incurred is already denominated in gas - - payment_result.with_cost(payment_gas) - }; - - let runtime_args = match RuntimeArgs::try_from(transfer_args) { - Ok(runtime_args) => runtime_args, - Err(error) => { - return Ok(make_charged_execution_failure( - ExecError::from(error).into(), - )) - } - }; - - let (_, mut session_result): (Option>, ExecutionResult) = executor - .exec_system_contract( - DirectSystemContractCall::Transfer, - system_module.clone(), - runtime_args, - &mut mint_named_keys, - mint_extra_keys.as_slice(), - mint_base_key, - &account, - authorization_keys.clone(), - blocktime, - deploy_item.deploy_hash, - gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Session, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - // User is already charged fee for wasmless contract, and we need to make sure we will not - // charge for anything that happens while calling transfer entrypoint. - session_result = session_result.with_cost(Gas::default()); - - let finalize_result = { - let handle_payment_args = { - // Gas spent during payment code execution - let finalize_cost_motes = { - // A case where payment_result.cost() is different than wasmless transfer cost - // is considered a programming error. - debug_assert_eq!(payment_result.cost(), wasmless_transfer_gas_cost); - wasmless_transfer_motes - }; - - let account = deploy_item.address; - let maybe_runtime_args = RuntimeArgs::try_new(|args| { - args.insert(handle_payment::ARG_AMOUNT, finalize_cost_motes.value())?; - args.insert(handle_payment::ARG_ACCOUNT, account)?; - args.insert(handle_payment::ARG_TARGET, proposer_purse)?; - Ok(()) - }); - - match maybe_runtime_args { - Ok(runtime_args) => runtime_args, - Err(error) => { - let exec_error = ExecError::from(error); - return Ok(ExecutionResult::precondition_failure(exec_error.into())); - } - } - }; - - let system_account = Account::new( - PublicKey::System.to_account_hash(), - Default::default(), - URef::new(Default::default(), AccessRights::READ_ADD_WRITE), - Default::default(), - Default::default(), - ); - - let tc = tracking_copy.borrow(); - let finalization_tc = Rc::new(RefCell::new(tc.fork())); - - let (_ret, finalize_result): (Option<()>, ExecutionResult) = executor - .exec_system_contract( - DirectSystemContractCall::FinalizePayment, - system_module, - handle_payment_args, - &mut handle_payment_named_keys, - Default::default(), - Key::from(protocol_data.handle_payment()), - &system_account, - authorization_keys, - blocktime, - deploy_item.deploy_hash, - gas_limit, - protocol_version, - correlation_id, - finalization_tc, - Phase::FinalizePayment, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - finalize_result - }; - - // Create + persist deploy info. - { - let transfers = session_result.transfers(); - let cost = wasmless_transfer_gas_cost.value(); - let deploy_info = DeployInfo::new( - deploy_item.deploy_hash, - &transfers, - account.account_hash(), - account.main_purse(), - cost, - ); - tracking_copy.borrow_mut().write( - Key::DeployInfo(deploy_item.deploy_hash), - StoredValue::DeployInfo(deploy_info), - ); - } - - if session_result.is_success() { - session_result = session_result.with_effect(tracking_copy.borrow_mut().effect()) - } - - let mut execution_result_builder = ExecutionResultBuilder::new(); - execution_result_builder.set_payment_execution_result(payment_result); - execution_result_builder.set_session_execution_result(session_result); - execution_result_builder.set_finalize_execution_result(finalize_result); - - let execution_result = execution_result_builder - .build(tracking_copy.borrow().reader(), correlation_id) - .expect("ExecutionResultBuilder not initialized properly"); - - Ok(execution_result) - } - - #[allow(clippy::too_many_arguments)] - pub fn deploy( - &self, - correlation_id: CorrelationId, - executor: &Executor, - protocol_version: ProtocolVersion, - prestate_hash: Blake2bHash, - blocktime: BlockTime, - deploy_item: DeployItem, - proposer: PublicKey, - ) -> Result { - // spec: https://casperlabs.atlassian.net/wiki/spaces/EN/pages/123404576/Payment+code+execution+specification - - // Obtain current protocol data for given version - // do this first, as there is no reason to proceed if protocol version is invalid - let protocol_data = match self.state.get_protocol_data(protocol_version) { - Ok(Some(protocol_data)) => protocol_data, - Ok(None) => { - let error = Error::InvalidProtocolVersion(protocol_version); - return Ok(ExecutionResult::precondition_failure(error)); - } - Err(error) => { - return Ok(ExecutionResult::precondition_failure(Error::Exec( - error.into(), - ))); - } - }; - - let preprocessor = { - let wasm_config = protocol_data.wasm_config(); - Preprocessor::new(*wasm_config) - }; - - // Create tracking copy (which functions as a deploy context) - // validation_spec_2: prestate_hash check - // do this second; as there is no reason to proceed if the prestate hash is invalid - let tracking_copy = match self.tracking_copy(prestate_hash) { - Err(error) => return Ok(ExecutionResult::precondition_failure(error)), - Ok(None) => return Err(Error::RootNotFound(prestate_hash)), - Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), - }; - - let system_module = { - match tracking_copy.borrow_mut().get_system_module(&preprocessor) { - Ok(module) => module, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - } - }; - - // vestigial system_contract_cache - self.system_contract_cache - .initialize_with_protocol_data(&protocol_data, &system_module); - - let base_key = Key::Account(deploy_item.address); - - // Get addr bytes from `address` (which is actually a Key) - // validation_spec_3: account validity - let account_hash = match base_key.into_account() { - Some(account_addr) => account_addr, - None => { - return Ok(ExecutionResult::precondition_failure( - error::Error::Authorization, - )); - } - }; - - let authorization_keys = deploy_item.authorization_keys; - - // Get account from tracking copy - // validation_spec_3: account validity - let account = match self.get_authorized_account( - correlation_id, - account_hash, - &authorization_keys, - Rc::clone(&tracking_copy), - ) { - Ok(account) => account, - Err(e) => return Ok(ExecutionResult::precondition_failure(e)), - }; - - let session = deploy_item.session; - let payment = deploy_item.payment; - let deploy_hash = deploy_item.deploy_hash; - - // Create session code `A` from provided session bytes - // validation_spec_1: valid wasm bytes - // we do this upfront as there is no reason to continue if session logic is invalid - let session_metadata = match session.get_deploy_metadata( - Rc::clone(&tracking_copy), - &account, - correlation_id, - &preprocessor, - &protocol_version, - &protocol_data, - Phase::Session, - ) { - Ok(metadata) => metadata, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error)); - } - }; - - // Get account main purse balance key - // validation_spec_5: account main purse minimum balance - let account_main_purse_balance_key: Key = { - let account_key = Key::URef(account.main_purse()); - match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, account_key) - { - Ok(key) => key, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - } - }; - - // Get account main purse balance to enforce precondition and in case of forced - // transfer validation_spec_5: account main purse minimum balance - let account_main_purse_balance: Motes = match tracking_copy - .borrow_mut() - .get_purse_balance(correlation_id, account_main_purse_balance_key) - { - Ok(balance) => balance, - Err(error) => return Ok(ExecutionResult::precondition_failure(error.into())), - }; - - let max_payment_cost = Motes::new(*MAX_PAYMENT); - - // Enforce minimum main purse balance validation - // validation_spec_5: account main purse minimum balance - if account_main_purse_balance < max_payment_cost { - return Ok(ExecutionResult::precondition_failure( - Error::InsufficientPayment, - )); - } - - // Finalization is executed by system account (currently genesis account) - // payment_code_spec_5: system executes finalization - let system_account = Account::new( - PublicKey::System.to_account_hash(), - Default::default(), - URef::new(Default::default(), AccessRights::READ_ADD_WRITE), - Default::default(), - Default::default(), - ); - - // [`ExecutionResultBuilder`] handles merging of multiple execution results - let mut execution_result_builder = execution_result::ExecutionResultBuilder::new(); - - // Execute provided payment code - let payment_result = { - // payment_code_spec_1: init pay environment w/ gas limit == (max_payment_cost / - // gas_price) - let payment_gas_limit = match Gas::from_motes(max_payment_cost, deploy_item.gas_price) { - Some(gas) => gas, - None => { - return Ok(ExecutionResult::precondition_failure( - Error::GasConversionOverflow, - )) - } - }; - - // Create payment code module from bytes - // validation_spec_1: valid wasm bytes - let phase = Phase::Payment; - let payment_metadata = match payment.get_deploy_metadata( - Rc::clone(&tracking_copy), - &account, - correlation_id, - &preprocessor, - &protocol_version, - &protocol_data, - phase, - ) { - Ok(metadata) => metadata, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error)); - } - }; - - // payment_code_spec_2: execute payment code - let ( - payment_module, - payment_base_key, - mut payment_named_keys, - payment_package, - payment_entry_point, - is_standard_payment, - ) = match payment_metadata { - DeployMetadata::System { - contract_package, - entry_point, - .. - } => ( - system_module.clone(), - base_key, // this is account key - account.named_keys().clone(), // standard payment uses account keys - contract_package, - entry_point, - true, - ), - DeployMetadata::Session { - module, - contract_package, - entry_point, - } => ( - module, - base_key, // this is account key - account.named_keys().clone(), - contract_package, - entry_point, - false, - ), - DeployMetadata::Contract { - module, - base_key, - contract, - contract_package, - entry_point, - } => ( - module, - base_key, // this is contract key - contract.named_keys().clone(), - contract_package, - entry_point, - false, - ), - }; - - let payment_args = payment.args().clone(); - let system_contract_cache = SystemContractCache::clone(&self.system_contract_cache); - - if is_standard_payment { - executor.exec_standard_payment( - system_module.clone(), - payment_args, - payment_base_key, - &account, - &mut payment_named_keys, - authorization_keys.clone(), - blocktime, - deploy_hash, - payment_gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - phase, - protocol_data, - system_contract_cache, - ) - } else { - executor.exec( - payment_module, - payment_entry_point, - payment_args, - payment_base_key, - &account, - &mut payment_named_keys, - authorization_keys.clone(), - blocktime, - deploy_hash, - payment_gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - phase, - protocol_data, - system_contract_cache, - &payment_package, - ) - } - }; - - debug!("Payment result: {:?}", payment_result); - - let payment_result_cost = payment_result.cost(); - // payment_code_spec_3: fork based upon payment purse balance and cost of - // payment code execution - let payment_purse_balance: Motes = { - // Get handle payment system contract details - // payment_code_spec_6: system contract validity - let handle_payment_contract = match tracking_copy - .borrow_mut() - .get_contract(correlation_id, protocol_data.handle_payment()) - { - Ok(contract) => contract, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - }; - - // Get payment purse Key from handle payment contract - // payment_code_spec_6: system contract validity - let payment_purse_key: Key = match handle_payment_contract - .named_keys() - .get(handle_payment::PAYMENT_PURSE_KEY) - { - Some(key) => *key, - None => return Ok(ExecutionResult::precondition_failure(Error::Deploy)), - }; - - let purse_balance_key = match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, payment_purse_key) - { - Ok(key) => key, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - }; - - match tracking_copy - .borrow_mut() - .get_purse_balance(correlation_id, purse_balance_key) - { - Ok(balance) => balance, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - } - }; - - // the proposer of the block this deploy is in receives the gas from this deploy execution - let proposer_purse = { - let proposer_account: Account = match tracking_copy - .borrow_mut() - .get_account(correlation_id, AccountHash::from(&proposer)) - { - Ok(account) => account, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - }; - proposer_account.main_purse() - }; - - if let Some(forced_transfer) = - payment_result.check_forced_transfer(payment_purse_balance, deploy_item.gas_price) - { - // Get rewards purse balance key - // payment_code_spec_6: system contract validity - let proposer_main_purse_balance_key = { - // Get reward purse Key from handle payment contract - // payment_code_spec_6: system contract validity - match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, proposer_purse.into()) - { - Ok(key) => key, - Err(error) => { - return Ok(ExecutionResult::precondition_failure(error.into())); - } - } - }; - - let error = match forced_transfer { - ForcedTransferResult::InsufficientPayment => Error::InsufficientPayment, - ForcedTransferResult::GasConversionOverflow => Error::GasConversionOverflow, - ForcedTransferResult::PaymentFailure => payment_result - .take_error() - .unwrap_or(Error::InsufficientPayment), - }; - - let gas_cost = match Gas::from_motes(max_payment_cost, deploy_item.gas_price) { - Some(gas) => gas, - None => { - return Ok(ExecutionResult::precondition_failure( - Error::GasConversionOverflow, - )) - } - }; - - match ExecutionResult::new_payment_code_error( - error, - max_payment_cost, - account_main_purse_balance, - gas_cost, - account_main_purse_balance_key, - proposer_main_purse_balance_key, - ) { - Ok(execution_result) => return Ok(execution_result), - Err(error) => { - let exec_error = ExecError::from(error); - return Ok(ExecutionResult::precondition_failure(exec_error.into())); - } - } - }; - - // Transfer the contents of the rewards purse to block proposer - execution_result_builder.set_payment_execution_result(payment_result); - - // Begin session logic handling - let post_payment_tracking_copy = tracking_copy.borrow(); - let session_tracking_copy = Rc::new(RefCell::new(post_payment_tracking_copy.fork())); - - // session_code_spec_2: execute session code - let ( - session_module, - session_base_key, - mut session_named_keys, - session_package, - session_entry_point, - ) = match session_metadata { - DeployMetadata::System { - base_key, - contract, - contract_package, - entry_point, - } => { - ( - system_module.clone(), - base_key, // this is contract key - contract.named_keys().clone(), - contract_package, - entry_point, - ) - } - DeployMetadata::Session { - module, - contract_package, - entry_point, - } => ( - module, - base_key, - account.named_keys().clone(), - contract_package, - entry_point, - ), - DeployMetadata::Contract { - module, - base_key, - contract, - contract_package, - entry_point, - } => ( - module, - base_key, - contract.named_keys().clone(), - contract_package, - entry_point, - ), - }; - - let session_args = session.args().clone(); - let mut session_result = { - // payment_code_spec_3_b_i: if (balance of handle payment pay purse) >= (gas spent - // during payment code execution) * gas_price, yes session - // session_code_spec_1: gas limit = ((balance of handle payment payment purse) / - // gas_price) - // - (gas spent during payment execution) - let session_gas_limit: Gas = - match Gas::from_motes(payment_purse_balance, deploy_item.gas_price) - .and_then(|gas| gas.checked_sub(payment_result_cost)) - { - Some(gas) => gas, - None => { - return Ok(ExecutionResult::precondition_failure( - Error::GasConversionOverflow, - )) - } - }; - let system_contract_cache = SystemContractCache::clone(&self.system_contract_cache); - - executor.exec( - session_module, - session_entry_point, - session_args, - session_base_key, - &account, - &mut session_named_keys, - authorization_keys.clone(), - blocktime, - deploy_hash, - session_gas_limit, - protocol_version, - correlation_id, - Rc::clone(&session_tracking_copy), - Phase::Session, - protocol_data, - system_contract_cache, - &session_package, - ) - }; - debug!("Session result: {:?}", session_result); - - // Create + persist deploy info. - { - let transfers = session_result.transfers(); - let cost = payment_result_cost.value() + session_result.cost().value(); - let deploy_info = DeployInfo::new( - deploy_hash, - &transfers, - account.account_hash(), - account.main_purse(), - cost, - ); - session_tracking_copy.borrow_mut().write( - Key::DeployInfo(deploy_hash), - StoredValue::DeployInfo(deploy_info), - ); - } - - let post_session_rc = if session_result.is_failure() { - // If session code fails we do not include its effects, - // so we start again from the post-payment state. - Rc::new(RefCell::new(post_payment_tracking_copy.fork())) - } else { - session_result = session_result.with_effect(session_tracking_copy.borrow().effect()); - session_tracking_copy - }; - - // NOTE: session_code_spec_3: (do not include session execution effects in - // results) is enforced in execution_result_builder.build() - execution_result_builder.set_session_execution_result(session_result); - - // payment_code_spec_5: run finalize process - let finalize_result: ExecutionResult = { - let post_session_tc = post_session_rc.borrow(); - let finalization_tc = Rc::new(RefCell::new(post_session_tc.fork())); - - let handle_payment_args = { - //((gas spent during payment code execution) + (gas spent during session code execution)) * gas_price - let finalize_cost_motes = match Motes::from_gas(execution_result_builder.total_cost(), deploy_item.gas_price) { - Some(motes) => motes, - None => return Ok(ExecutionResult::precondition_failure(Error::GasConversionOverflow)), - }; - - let maybe_runtime_args = RuntimeArgs::try_new(|args| { - args.insert(handle_payment::ARG_AMOUNT, finalize_cost_motes.value())?; - args.insert(handle_payment::ARG_ACCOUNT, account_hash)?; - args.insert(handle_payment::ARG_TARGET, proposer_purse)?; - Ok(()) - }); - match maybe_runtime_args { - Ok(runtime_args) => runtime_args, - Err(error) => { - let exec_error = ExecError::from(error); - return Ok(ExecutionResult::precondition_failure(exec_error.into())); - } - } - }; - - // The Handle Payment keys may have changed because of effects during payment and/or - // session, so we need to look them up again from the tracking copy - let handle_payment_contract = match finalization_tc - .borrow_mut() - .get_contract(correlation_id, protocol_data.handle_payment()) - { - Ok(info) => info, - Err(error) => return Ok(ExecutionResult::precondition_failure(error.into())), - }; - - let mut handle_payment_keys = handle_payment_contract.named_keys().to_owned(); - - let gas_limit = Gas::new(U512::from(std::u64::MAX)); - let system_contract_cache = SystemContractCache::clone(&self.system_contract_cache); - - let (_ret, finalize_result): (Option<()>, ExecutionResult) = executor - .exec_system_contract( - DirectSystemContractCall::FinalizePayment, - system_module, - handle_payment_args, - &mut handle_payment_keys, - Default::default(), - Key::from(protocol_data.handle_payment()), - &system_account, - authorization_keys, - blocktime, - deploy_hash, - gas_limit, - protocol_version, - correlation_id, - finalization_tc, - Phase::FinalizePayment, - protocol_data, - system_contract_cache, - ); - - finalize_result - }; - - execution_result_builder.set_finalize_execution_result(finalize_result); - - // We panic here to indicate that the builder was not used properly. - let ret = execution_result_builder - .build(tracking_copy.borrow().reader(), correlation_id) - .expect("ExecutionResultBuilder not initialized properly"); - - // NOTE: payment_code_spec_5_a is enforced in execution_result_builder.build() - // payment_code_spec_6: return properly combined set of transforms and - // appropriate error - Ok(ret) - } - - pub fn apply_effect( - &self, - correlation_id: CorrelationId, - pre_state_hash: Blake2bHash, - effects: AdditiveMap, - ) -> Result - where - Error: From, - { - self.state - .commit(correlation_id, pre_state_hash, effects) - .map_err(Error::from) - } - - pub fn read_trie( - &self, - correlation_id: CorrelationId, - trie_key: Blake2bHash, - ) -> Result>, Error> - where - Error: From, - { - self.state - .read_trie(correlation_id, &trie_key) - .map_err(Error::from) - } - - pub fn put_trie_and_find_missing_descendant_trie_keys( - &self, - correlation_id: CorrelationId, - trie: &Trie, - ) -> Result, Error> - where - Error: From, - { - let inserted_trie_key = self.state.put_trie(correlation_id, trie)?; - let missing_descendant_trie_keys = self - .state - .missing_trie_keys(correlation_id, vec![inserted_trie_key])?; - Ok(missing_descendant_trie_keys) - } - - pub fn missing_trie_keys( - &self, - correlation_id: CorrelationId, - trie_keys: Vec, - ) -> Result, Error> - where - Error: From, - { - self.state - .missing_trie_keys(correlation_id, trie_keys) - .map_err(Error::from) - } - - /// Obtains validator weights for given era. - pub fn get_era_validators( - &self, - correlation_id: CorrelationId, - get_era_validators_request: GetEraValidatorsRequest, - ) -> Result { - let protocol_version = get_era_validators_request.protocol_version(); - - let tracking_copy = match self.tracking_copy(get_era_validators_request.state_hash())? { - Some(tracking_copy) => Rc::new(RefCell::new(tracking_copy)), - None => return Err(GetEraValidatorsError::RootNotFound), - }; - - let protocol_data = match self.get_protocol_data(protocol_version)? { - Some(protocol_data) => protocol_data, - None => return Err(Error::InvalidProtocolVersion(protocol_version).into()), - }; - - let wasm_config = protocol_data.wasm_config(); - - let preprocessor = Preprocessor::new(*wasm_config); - - let auction_contract: Contract = tracking_copy - .borrow_mut() - .get_contract(correlation_id, protocol_data.auction()) - .map_err(Error::from)?; - - let system_module = { - tracking_copy - .borrow_mut() - .get_system_module(&preprocessor) - .map_err(Error::from)? - }; - - let executor = Executor::new(self.config); - - let mut named_keys = auction_contract.named_keys().to_owned(); - let base_key = Key::from(protocol_data.auction()); - let gas_limit = Gas::new(U512::from(std::u64::MAX)); - let virtual_system_account = { - let named_keys = NamedKeys::new(); - let purse = URef::new(Default::default(), AccessRights::READ_ADD_WRITE); - Account::create(PublicKey::System.to_account_hash(), named_keys, purse) - }; - let authorization_keys = BTreeSet::from_iter(vec![PublicKey::System.to_account_hash()]); - let blocktime = BlockTime::default(); - let deploy_hash = { - // seeds address generator w/ protocol version - let bytes: Vec = get_era_validators_request - .protocol_version() - .value() - .into_bytes() - .map_err(Error::from)? - .to_vec(); - DeployHash::new(Blake2bHash::new(&bytes).value()) - }; - - let (era_validators, execution_result): (Option, ExecutionResult) = executor - .exec_system_contract( - DirectSystemContractCall::GetEraValidators, - system_module, - RuntimeArgs::new(), - &mut named_keys, - Default::default(), - base_key, - &virtual_system_account, - authorization_keys, - blocktime, - deploy_hash, - gas_limit, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Session, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - if let Some(error) = execution_result.take_error() { - return Err(error.into()); - } - - match era_validators { - None => Err(GetEraValidatorsError::EraValidatorsMissing), - Some(era_validators) => Ok(era_validators), - } - } - - pub fn get_bids( - &self, - correlation_id: CorrelationId, - get_bids_request: GetBidsRequest, - ) -> Result { - let tracking_copy = match self.tracking_copy(get_bids_request.state_hash())? { - Some(tracking_copy) => Rc::new(RefCell::new(tracking_copy)), - None => return Ok(GetBidsResult::RootNotFound), - }; - - let mut tracking_copy = tracking_copy.borrow_mut(); - - let bid_keys = tracking_copy - .get_keys(correlation_id, &KeyTag::Bid) - .map_err(|err| Error::Exec(err.into()))?; - - let mut bids = BTreeMap::new(); - - for key in bid_keys.iter() { - if let Some(StoredValue::Bid(bid)) = - tracking_copy.get(correlation_id, key).map_err(Into::into)? - { - bids.insert(bid.validator_public_key().clone(), *bid); - }; - } - - Ok(GetBidsResult::Success { bids }) - } - - pub fn commit_step( - &self, - correlation_id: CorrelationId, - step_request: StepRequest, - ) -> Result { - let protocol_data = match self.state.get_protocol_data(step_request.protocol_version) { - Ok(Some(protocol_data)) => protocol_data, - Ok(None) => { - return Ok(StepResult::InvalidProtocolVersion); - } - Err(error) => return Ok(StepResult::GetProtocolDataError(Error::Exec(error.into()))), - }; - - let tracking_copy = match self.tracking_copy(step_request.pre_state_hash) { - Err(error) => return Ok(StepResult::TrackingCopyError(error)), - Ok(None) => return Ok(StepResult::RootNotFound), - Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), - }; - - let executor = Executor::new(self.config); - - let preprocessor = { - let wasm_config = protocol_data.wasm_config(); - Preprocessor::new(*wasm_config) - }; - - let auction_hash = protocol_data.auction(); - - let auction_contract = match tracking_copy - .borrow_mut() - .get_contract(correlation_id, auction_hash) - { - Ok(contract) => contract, - Err(error) => { - return Ok(StepResult::GetContractError(error.into())); - } - }; - - let system_module = match tracking_copy.borrow_mut().get_system_module(&preprocessor) { - Ok(module) => module, - Err(error) => { - return Ok(StepResult::GetSystemModuleError(error.into())); - } - }; - - self.system_contract_cache - .initialize_with_protocol_data(&protocol_data, &system_module); - - let system_account_addr = PublicKey::System.to_account_hash(); - - let virtual_system_account = { - let named_keys = NamedKeys::new(); - let purse = URef::new(Default::default(), AccessRights::READ_ADD_WRITE); - Account::create(system_account_addr, named_keys, purse) - }; - let authorization_keys = { - let mut ret = BTreeSet::new(); - ret.insert(system_account_addr); - ret - }; - let mut named_keys = auction_contract.named_keys().to_owned(); - let gas_limit = Gas::new(U512::from(std::u64::MAX)); - let deploy_hash = { - // seeds address generator w/ protocol version - let bytes: Vec = step_request.protocol_version.value().into_bytes()?.to_vec(); - DeployHash::new(Blake2bHash::new(&bytes).value()) - }; - - let base_key = Key::from(protocol_data.auction()); - - let reward_factors = match step_request.reward_factors() { - Ok(reward_factors) => reward_factors, - Err(error) => { - error!( - "failed to deserialize reward factors: {}", - error.to_string() - ); - return Ok(StepResult::Serialization(error)); - } - }; - - let reward_args = { - let maybe_runtime_args = RuntimeArgs::try_new(|args| { - args.insert(ARG_REWARD_FACTORS, reward_factors)?; - Ok(()) - }); - - match maybe_runtime_args { - Ok(runtime_args) => runtime_args, - Err(error) => return Ok(StepResult::CLValueError(error)), - } - }; - - let (_, execution_result): (Option<()>, ExecutionResult) = executor.exec_system_contract( - DirectSystemContractCall::DistributeRewards, - system_module.clone(), - reward_args, - &mut named_keys, - Default::default(), - base_key, - &virtual_system_account, - authorization_keys.clone(), - BlockTime::default(), - deploy_hash, - gas_limit, - step_request.protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Session, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - if let Some(exec_error) = execution_result.take_error() { - return Ok(StepResult::DistributeError(exec_error)); - } - - let slashed_validators = match step_request.slashed_validators() { - Ok(slashed_validators) => slashed_validators, - Err(error) => { - error!( - "failed to deserialize validator_ids for slashing: {}", - error.to_string() - ); - return Ok(StepResult::Serialization(error)); - } - }; - - let slash_args = { - let mut runtime_args = RuntimeArgs::new(); - runtime_args - .insert(ARG_VALIDATOR_PUBLIC_KEYS, slashed_validators) - .map_err(|e| Error::Exec(e.into()))?; - runtime_args - }; - - let (_, execution_result): (Option<()>, ExecutionResult) = executor.exec_system_contract( - DirectSystemContractCall::Slash, - system_module.clone(), - slash_args, - &mut named_keys, - Default::default(), - base_key, - &virtual_system_account, - authorization_keys.clone(), - BlockTime::default(), - deploy_hash, - gas_limit, - step_request.protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Session, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - if let Some(exec_error) = execution_result.take_error() { - return Ok(StepResult::SlashingError(exec_error)); - } - - if step_request.run_auction { - let run_auction_args = { - let maybe_runtime_args = RuntimeArgs::try_new(|args| { - args.insert( - ARG_ERA_END_TIMESTAMP_MILLIS, - step_request.era_end_timestamp_millis, - )?; - args.insert( - ARG_EVICTED_VALIDATORS, - step_request - .evict_items - .iter() - .map(|item| item.validator_id.clone()) - .collect::>(), - )?; - Ok(()) - }); - - match maybe_runtime_args { - Ok(runtime_args) => runtime_args, - Err(error) => return Ok(StepResult::CLValueError(error)), - } - }; - - let (_, execution_result): (Option<()>, ExecutionResult) = executor - .exec_system_contract( - DirectSystemContractCall::RunAuction, - system_module, - run_auction_args, - &mut named_keys, - Default::default(), - base_key, - &virtual_system_account, - authorization_keys, - BlockTime::default(), - deploy_hash, - gas_limit, - step_request.protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - Phase::Session, - protocol_data, - SystemContractCache::clone(&self.system_contract_cache), - ); - - if let Some(exec_error) = execution_result.take_error() { - return Ok(StepResult::AuctionError(exec_error)); - } - } - - let effects = tracking_copy.borrow().effect(); - - // commit - let commit_result = self - .state - .commit( - correlation_id, - step_request.pre_state_hash, - effects.transforms, - ) - .map_err(Into::into)?; - - let post_state_hash = match commit_result { - CommitResult::Success { state_root } => state_root, - CommitResult::RootNotFound => return Ok(StepResult::RootNotFound), - CommitResult::KeyNotFound(key) => return Ok(StepResult::KeyNotFound(key)), - CommitResult::TypeMismatch(type_mismatch) => { - return Ok(StepResult::TypeMismatch(type_mismatch)) - } - CommitResult::Serialization(bytesrepr_error) => { - return Ok(StepResult::Serialization(bytesrepr_error)) - } - }; - - let next_era_validators = { - let mut era_validators = match self.get_era_validators( - correlation_id, - GetEraValidatorsRequest::new(post_state_hash, step_request.protocol_version), - ) { - Ok(era_validators) => era_validators, - Err(error) => { - return Ok(StepResult::GetEraValidatorsError(error)); - } - }; - - let era_id = &step_request.next_era_id; - match era_validators.remove(era_id) { - Some(validator_weights) => validator_weights, - None => { - return Ok(StepResult::EraValidatorsMissing(*era_id)); - } - } - }; - - Ok(StepResult::Success { - post_state_hash, - next_era_validators, - }) - } -} diff --git a/execution_engine/src/core/engine_state/op.rs b/execution_engine/src/core/engine_state/op.rs deleted file mode 100644 index 4a35007b9c..0000000000 --- a/execution_engine/src/core/engine_state/op.rs +++ /dev/null @@ -1,56 +0,0 @@ -use std::{ - default::Default, - fmt::{self, Display, Formatter}, - ops::{Add, AddAssign}, -}; - -#[derive(PartialEq, Eq, Debug, Clone, Copy)] -pub enum Op { - Read, - Write, - Add, - NoOp, -} - -impl Add for Op { - type Output = Op; - - fn add(self, other: Op) -> Op { - match (self, other) { - (a, Op::NoOp) => a, - (Op::NoOp, b) => b, - (Op::Read, Op::Read) => Op::Read, - (Op::Add, Op::Add) => Op::Add, - _ => Op::Write, - } - } -} - -impl AddAssign for Op { - fn add_assign(&mut self, other: Self) { - *self = *self + other; - } -} - -impl Display for Op { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl Default for Op { - fn default() -> Self { - Op::NoOp - } -} - -impl From<&Op> for casper_types::OpKind { - fn from(op: &Op) -> Self { - match op { - Op::Read => casper_types::OpKind::Read, - Op::Write => casper_types::OpKind::Write, - Op::Add => casper_types::OpKind::Add, - Op::NoOp => casper_types::OpKind::NoOp, - } - } -} diff --git a/execution_engine/src/core/engine_state/query.rs b/execution_engine/src/core/engine_state/query.rs deleted file mode 100644 index 4181daec72..0000000000 --- a/execution_engine/src/core/engine_state/query.rs +++ /dev/null @@ -1,96 +0,0 @@ -use casper_types::{system::auction::Bids, Key}; - -use crate::{ - core::tracking_copy::TrackingCopyQueryResult, - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::trie::merkle_proof::TrieMerkleProof, -}; - -#[derive(Debug)] -pub enum QueryResult { - RootNotFound, - ValueNotFound(String), - CircularReference(String), - Success { - value: Box, - proofs: Vec>, - }, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct QueryRequest { - state_hash: Blake2bHash, - key: Key, - path: Vec, -} - -impl QueryRequest { - pub fn new(state_hash: Blake2bHash, key: Key, path: Vec) -> Self { - QueryRequest { - state_hash, - key, - path, - } - } - - pub fn state_hash(&self) -> Blake2bHash { - self.state_hash - } - - pub fn key(&self) -> Key { - self.key - } - - pub fn path(&self) -> &[String] { - &self.path - } -} - -impl From for QueryResult { - fn from(tracking_copy_query_result: TrackingCopyQueryResult) -> Self { - match tracking_copy_query_result { - TrackingCopyQueryResult::ValueNotFound(message) => QueryResult::ValueNotFound(message), - TrackingCopyQueryResult::CircularReference(message) => { - QueryResult::CircularReference(message) - } - TrackingCopyQueryResult::Success { value, proofs } => { - let value = Box::new(value); - QueryResult::Success { value, proofs } - } - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct GetBidsRequest { - state_hash: Blake2bHash, -} - -impl GetBidsRequest { - pub fn new(state_hash: Blake2bHash) -> Self { - GetBidsRequest { state_hash } - } - - pub fn state_hash(&self) -> Blake2bHash { - self.state_hash - } -} - -#[derive(Debug)] -pub enum GetBidsResult { - RootNotFound, - Success { bids: Bids }, -} - -impl GetBidsResult { - pub fn success(bids: Bids) -> Self { - GetBidsResult::Success { bids } - } - - pub fn bids(&self) -> Option<&Bids> { - match self { - GetBidsResult::RootNotFound => None, - GetBidsResult::Success { bids } => Some(bids), - } - } -} diff --git a/execution_engine/src/core/engine_state/run_genesis_request.rs b/execution_engine/src/core/engine_state/run_genesis_request.rs deleted file mode 100644 index f97e7ca452..0000000000 --- a/execution_engine/src/core/engine_state/run_genesis_request.rs +++ /dev/null @@ -1,56 +0,0 @@ -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; - -use casper_types::ProtocolVersion; - -use super::genesis::ExecConfig; -use crate::shared::newtypes::Blake2bHash; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct RunGenesisRequest { - genesis_config_hash: Blake2bHash, - protocol_version: ProtocolVersion, - ee_config: ExecConfig, -} - -impl RunGenesisRequest { - pub fn new( - genesis_config_hash: Blake2bHash, - protocol_version: ProtocolVersion, - ee_config: ExecConfig, - ) -> RunGenesisRequest { - RunGenesisRequest { - genesis_config_hash, - protocol_version, - ee_config, - } - } - - pub fn genesis_config_hash(&self) -> Blake2bHash { - self.genesis_config_hash - } - - pub fn protocol_version(&self) -> ProtocolVersion { - self.protocol_version - } - - pub fn ee_config(&self) -> &ExecConfig { - &self.ee_config - } - - pub fn take_ee_config(self) -> ExecConfig { - self.ee_config - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> RunGenesisRequest { - let input: [u8; 32] = rng.gen(); - let genesis_config_hash = Blake2bHash::new(&input); - let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()); - let ee_config = rng.gen(); - RunGenesisRequest::new(genesis_config_hash, protocol_version, ee_config) - } -} diff --git a/execution_engine/src/core/engine_state/step.rs b/execution_engine/src/core/engine_state/step.rs deleted file mode 100644 index 51683f8ae4..0000000000 --- a/execution_engine/src/core/engine_state/step.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::{collections::BTreeMap, fmt::Display, vec::Vec}; - -use core::fmt; -use uint::static_assertions::_core::fmt::Formatter; - -use casper_types::{ - bytesrepr, bytesrepr::ToBytes, CLValueError, EraId, Key, ProtocolVersion, PublicKey, U512, -}; - -use crate::{ - core::engine_state::{Error, GetEraValidatorsError}, - shared::{newtypes::Blake2bHash, TypeMismatch}, -}; - -#[derive(Debug)] -pub struct SlashItem { - pub validator_id: PublicKey, -} - -impl SlashItem { - pub fn new(validator_id: PublicKey) -> Self { - Self { validator_id } - } -} - -#[derive(Debug)] -pub struct RewardItem { - pub validator_id: PublicKey, - pub value: u64, -} - -impl RewardItem { - pub fn new(validator_id: PublicKey, value: u64) -> Self { - Self { - validator_id, - value, - } - } -} - -#[derive(Debug)] -pub struct EvictItem { - pub validator_id: PublicKey, -} - -impl EvictItem { - pub fn new(validator_id: PublicKey) -> Self { - Self { validator_id } - } -} - -#[derive(Debug)] -pub struct StepRequest { - pub pre_state_hash: Blake2bHash, - pub protocol_version: ProtocolVersion, - pub slash_items: Vec, - pub reward_items: Vec, - pub evict_items: Vec, - pub run_auction: bool, - pub next_era_id: EraId, - pub era_end_timestamp_millis: u64, -} - -impl StepRequest { - #[allow(clippy::too_many_arguments)] - pub fn new( - pre_state_hash: Blake2bHash, - protocol_version: ProtocolVersion, - slash_items: Vec, - reward_items: Vec, - evict_items: Vec, - run_auction: bool, - next_era_id: EraId, - era_end_timestamp_millis: u64, - ) -> Self { - Self { - pre_state_hash, - protocol_version, - slash_items, - reward_items, - evict_items, - run_auction, - next_era_id, - era_end_timestamp_millis, - } - } - - pub fn slashed_validators(&self) -> Result, bytesrepr::Error> { - let mut ret = vec![]; - for slash_item in &self.slash_items { - let public_key: PublicKey = - bytesrepr::deserialize(slash_item.validator_id.clone().to_bytes()?)?; - ret.push(public_key); - } - Ok(ret) - } - - pub fn reward_factors(&self) -> Result, bytesrepr::Error> { - let mut ret = BTreeMap::new(); - for reward_item in &self.reward_items { - ret.insert(reward_item.validator_id.clone(), reward_item.value); - } - Ok(ret) - } -} - -#[derive(Debug)] -pub enum StepResult { - RootNotFound, - GetProtocolDataError(Error), - TrackingCopyError(Error), - GetContractError(Error), - GetSystemModuleError(Error), - SlashingError(Error), - AuctionError(Error), - DistributeError(Error), - InvalidProtocolVersion, - KeyNotFound(Key), - TypeMismatch(TypeMismatch), - Serialization(bytesrepr::Error), - CLValueError(CLValueError), - GetEraValidatorsError(GetEraValidatorsError), - EraValidatorsMissing(EraId), - Success { - post_state_hash: Blake2bHash, - next_era_validators: BTreeMap, - }, -} - -impl Display for StepResult { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} diff --git a/execution_engine/src/core/engine_state/system_contract_cache.rs b/execution_engine/src/core/engine_state/system_contract_cache.rs deleted file mode 100644 index 653fd3a971..0000000000 --- a/execution_engine/src/core/engine_state/system_contract_cache.rs +++ /dev/null @@ -1,283 +0,0 @@ -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -use parity_wasm::elements::Module; - -use crate::storage::protocol_data::ProtocolData; -use casper_types::ContractHash; - -/// A cache of deserialized contracts. -#[derive(Clone, Default, Debug)] -pub struct SystemContractCache(Arc>>); - -impl SystemContractCache { - /// Returns `true` if the cache has a contract corresponding to `contract_hash`. - pub fn has(&self, contract_hash: ContractHash) -> bool { - let guarded_map = self.0.read().unwrap(); - guarded_map.contains_key(&contract_hash) - } - - /// Inserts `contract` into the cache under `contract_hash`. - /// - /// If the cache did not have this key present, `None` is returned. - /// - /// If the cache did have this key present, the value is updated, and the old value is returned. - pub fn insert(&self, contract_hash: ContractHash, module: Module) -> Option { - let mut guarded_map = self.0.write().unwrap(); - guarded_map.insert(contract_hash, module) - } - - /// Returns a clone of the contract corresponding to `contract_hash`. - pub fn get(&self, contract_hash: ContractHash) -> Option { - let guarded_map = self.0.read().unwrap(); - guarded_map.get(&contract_hash).cloned() - } - - /// Initializes cache from protocol data. - pub fn initialize_with_protocol_data(&self, protocol_data: &ProtocolData, module: &Module) { - // TODO: the SystemContractCache is vestigial and should be removed. In the meantime, - // a minimal viable wasm module is used as a placeholder to fulfil expectations of - // the runtime. - let mint_hash = protocol_data.mint(); - if !self.has(mint_hash) { - self.insert(mint_hash, module.clone()); - } - let auction_hash = protocol_data.auction(); - if !self.has(auction_hash) { - self.insert(auction_hash, module.clone()); - } - let handle_payment_hash = protocol_data.handle_payment(); - if !self.has(handle_payment_hash) { - self.insert(handle_payment_hash, module.clone()); - } - } -} - -#[cfg(test)] -mod tests { - use std::sync::Mutex; - - use once_cell::sync::Lazy; - use parity_wasm::elements::{Module, ModuleNameSubsection, NameSection, Section}; - - use crate::core::{ - engine_state::system_contract_cache::SystemContractCache, - execution::{AddressGenerator, AddressGeneratorBuilder}, - }; - use casper_types::contracts::ContractHash; - - static ADDRESS_GENERATOR: Lazy> = Lazy::new(|| { - Mutex::new( - AddressGeneratorBuilder::new() - .seed_with(b"test_seed") - .build(), - ) - }); - - #[test] - fn should_insert_module() { - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - }; - let module = Module::default(); - - let cache = SystemContractCache::default(); - - let result = cache.insert(reference.into(), module); - - assert!(result.is_none()) - } - - #[test] - fn should_has_false() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - - assert!(!cache.has(reference)) - } - - #[test] - fn should_has_true() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let module = Module::default(); - - cache.insert(reference, module); - - assert!(cache.has(reference)) - } - - #[test] - fn should_has_true_normalized_has() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let module = Module::default(); - - cache.insert(reference, module); - - assert!(cache.has(reference)) - } - - #[test] - fn should_has_true_normalized_insert() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let module = Module::default(); - - cache.insert(reference, module); - - assert!(cache.has(reference)) - } - - #[test] - fn should_get_none() { - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let cache = SystemContractCache::default(); - - let result = cache.get(reference); - - assert!(result.is_none()) - } - - #[test] - fn should_get_module() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let module = Module::default(); - - cache.insert(reference, module.clone()); - - let result = cache.get(reference); - - assert_eq!(result, Some(module)) - } - - #[test] - fn should_get_module_normalized_get() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let module = Module::default(); - - cache.insert(reference, module.clone()); - - let result = cache.get(reference); - - assert_eq!(result, Some(module.clone())); - - let result = cache.get(reference); - - assert_eq!(result, Some(module)) - } - - #[test] - fn should_get_module_normalized_insert() { - let cache = SystemContractCache::default(); - let reference: ContractHash = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let module = Module::default(); - - cache.insert(reference, module.clone()); - - let result = cache.get(reference); - - assert_eq!(result, Some(module.clone())); - - let result = cache.get(reference); - - assert_eq!(result, Some(module)) - } - - #[test] - fn should_update_module() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let initial_module = Module::default(); - let updated_module = { - let section = NameSection::new(Some(ModuleNameSubsection::new("a_mod")), None, None); - let sections = vec![Section::Name(section)]; - Module::new(sections) - }; - - assert_ne!(initial_module, updated_module); - - let result = cache.insert(reference, initial_module.clone()); - - assert!(result.is_none()); - - let result = cache.insert(reference, updated_module.clone()); - - assert_eq!(result, Some(initial_module)); - - let result = cache.get(reference); - - assert_eq!(result, Some(updated_module)) - } - - #[test] - fn should_update_module_normalized() { - let cache = SystemContractCache::default(); - let reference = { - let mut address_generator = ADDRESS_GENERATOR.lock().unwrap(); - address_generator.create_address() - } - .into(); - let initial_module = Module::default(); - let updated_module = { - let section = NameSection::new(Some(ModuleNameSubsection::new("a_mod")), None, None); - let sections = vec![Section::Name(section)]; - Module::new(sections) - }; - - assert_ne!(initial_module, updated_module); - - let result = cache.insert(reference, initial_module.clone()); - - assert!(result.is_none()); - - let result = cache.insert(reference, updated_module.clone()); - - assert_eq!(result, Some(initial_module)); - - let result = cache.get(reference); - - assert_eq!(result, Some(updated_module)) - } -} diff --git a/execution_engine/src/core/engine_state/transfer.rs b/execution_engine/src/core/engine_state/transfer.rs deleted file mode 100644 index 2c4ead33b7..0000000000 --- a/execution_engine/src/core/engine_state/transfer.rs +++ /dev/null @@ -1,361 +0,0 @@ -use std::{cell::RefCell, convert::TryFrom, rc::Rc}; - -use casper_types::{ - account::AccountHash, system::mint, AccessRights, ApiError, CLType, CLValueError, Key, - RuntimeArgs, URef, U512, -}; - -use crate::{ - core::{ - engine_state::Error, - execution::Error as ExecError, - tracking_copy::{TrackingCopy, TrackingCopyExt}, - }, - shared::{self, account::Account, newtypes::CorrelationId, stored_value::StoredValue}, - storage::global_state::StateReader, -}; - -#[derive(Copy, Clone, Debug, PartialEq)] -pub enum TransferTargetMode { - Unknown, - PurseExists(URef), - CreateAccount(AccountHash), -} - -#[derive(Debug, Clone, Copy)] -pub struct TransferArgs { - to: Option, - source: URef, - target: URef, - amount: U512, - arg_id: Option, -} - -impl TransferArgs { - pub fn new( - to: Option, - source: URef, - target: URef, - amount: U512, - arg_id: Option, - ) -> Self { - Self { - to, - source, - target, - amount, - arg_id, - } - } - - pub fn to(&self) -> Option { - self.to - } - - pub fn source(&self) -> URef { - self.source - } - - pub fn arg_id(&self) -> Option { - self.arg_id - } - - pub fn amount(&self) -> U512 { - self.amount - } -} - -impl TryFrom for RuntimeArgs { - type Error = CLValueError; - - fn try_from(transfer_args: TransferArgs) -> Result { - let mut runtime_args = RuntimeArgs::new(); - - runtime_args.insert(mint::ARG_TO, transfer_args.to)?; - runtime_args.insert(mint::ARG_SOURCE, transfer_args.source)?; - runtime_args.insert(mint::ARG_TARGET, transfer_args.target)?; - runtime_args.insert(mint::ARG_AMOUNT, transfer_args.amount)?; - runtime_args.insert(mint::ARG_ID, transfer_args.arg_id)?; - - Ok(runtime_args) - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct TransferRuntimeArgsBuilder { - inner: RuntimeArgs, - transfer_target_mode: TransferTargetMode, - to: Option, -} - -impl TransferRuntimeArgsBuilder { - pub fn new(imputed_runtime_args: RuntimeArgs) -> TransferRuntimeArgsBuilder { - TransferRuntimeArgsBuilder { - inner: imputed_runtime_args, - transfer_target_mode: TransferTargetMode::Unknown, - to: None, - } - } - - fn purse_exists( - &self, - uref: URef, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - ) -> bool - where - R: StateReader, - R::Error: Into, - { - let key = match tracking_copy - .borrow_mut() - .get_purse_balance_key(correlation_id, uref.into()) - { - Ok(key) => key, - Err(_) => return false, - }; - tracking_copy - .borrow_mut() - .get_purse_balance(correlation_id, key) - .is_ok() - } - - fn resolve_source_uref( - &self, - account: &Account, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - ) -> Result - where - R: StateReader, - R::Error: Into, - { - let imputed_runtime_args = &self.inner; - let arg_name = mint::ARG_SOURCE; - match imputed_runtime_args.get(arg_name) { - Some(cl_value) if *cl_value.cl_type() == CLType::URef => { - let uref: URef = match cl_value.clone().into_t() { - Ok(uref) => uref, - Err(error) => { - return Err(Error::Exec(ExecError::Revert(error.into()))); - } - }; - if account.main_purse().addr() == uref.addr() { - return Ok(uref); - } - - let normalized_uref = Key::URef(uref).normalize(); - let maybe_named_key = account - .named_keys() - .values() - .find(|&named_key| named_key.normalize() == normalized_uref); - match maybe_named_key { - Some(Key::URef(found_uref)) => { - if found_uref.is_writeable() { - // it is a URef and caller has access but is it a purse URef? - if !self.purse_exists( - found_uref.to_owned(), - correlation_id, - tracking_copy, - ) { - return Err(Error::Exec(ExecError::Revert(ApiError::InvalidPurse))); - } - - Ok(uref) - } else { - Err(Error::Exec(ExecError::InvalidAccess { - required: AccessRights::WRITE, - })) - } - } - Some(key) => Err(Error::Exec(ExecError::TypeMismatch( - shared::TypeMismatch::new("Key::URef".to_string(), key.type_string()), - ))), - None => Err(Error::Exec(ExecError::ForgedReference(uref))), - } - } - Some(_) => Err(Error::Exec(ExecError::Revert(ApiError::InvalidArgument))), - None => Ok(account.main_purse()), // if no source purse passed use account main purse - } - } - - fn resolve_transfer_target_mode( - &mut self, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - ) -> Result - where - R: StateReader, - R::Error: Into, - { - let imputed_runtime_args = &self.inner; - let arg_name = mint::ARG_TARGET; - match imputed_runtime_args.get(arg_name) { - Some(cl_value) if *cl_value.cl_type() == CLType::URef => { - let uref: URef = match cl_value.clone().into_t() { - Ok(uref) => uref, - Err(error) => { - return Err(Error::Exec(ExecError::Revert(error.into()))); - } - }; - - if !self.purse_exists(uref, correlation_id, tracking_copy) { - return Err(Error::Exec(ExecError::Revert(ApiError::InvalidPurse))); - } - - Ok(TransferTargetMode::PurseExists(uref)) - } - Some(cl_value) if *cl_value.cl_type() == CLType::ByteArray(32) => { - let account_key: Key = { - let hash: AccountHash = match cl_value.clone().into_t() { - Ok(hash) => hash, - Err(error) => { - return Err(Error::Exec(ExecError::Revert(error.into()))); - } - }; - self.to = Some(hash.to_owned()); - Key::Account(hash) - }; - match account_key.into_account() { - Some(public_key) => { - match tracking_copy - .borrow_mut() - .read_account(correlation_id, public_key) - { - Ok(account) => Ok(TransferTargetMode::PurseExists( - account.main_purse().with_access_rights(AccessRights::ADD), - )), - Err(_) => Ok(TransferTargetMode::CreateAccount(public_key)), - } - } - None => Err(Error::Exec(ExecError::Revert(ApiError::Transfer))), - } - } - Some(cl_value) if *cl_value.cl_type() == CLType::Key => { - let account_key: Key = match cl_value.clone().into_t() { - Ok(key) => key, - Err(error) => { - return Err(Error::Exec(ExecError::Revert(error.into()))); - } - }; - match account_key.into_account() { - Some(account_hash) => { - self.to = Some(account_hash.to_owned()); - match tracking_copy - .borrow_mut() - .read_account(correlation_id, account_hash) - { - Ok(account) => Ok(TransferTargetMode::PurseExists( - account.main_purse().with_access_rights(AccessRights::ADD), - )), - Err(_) => Ok(TransferTargetMode::CreateAccount(account_hash)), - } - } - None => Err(Error::Exec(ExecError::Revert(ApiError::Transfer))), - } - } - Some(_) => Err(Error::Exec(ExecError::Revert(ApiError::InvalidArgument))), - None => Err(Error::Exec(ExecError::Revert(ApiError::MissingArgument))), - } - } - - fn resolve_amount(&self) -> Result { - let imputed_runtime_args = &self.inner; - match imputed_runtime_args.get(mint::ARG_AMOUNT) { - Some(amount_value) if *amount_value.cl_type() == CLType::U512 => { - match amount_value.clone().into_t::() { - Ok(amount) => { - if amount == U512::zero() { - Err(Error::Exec(ExecError::Revert(ApiError::Transfer))) - } else { - Ok(amount) - } - } - Err(error) => Err(Error::Exec(ExecError::Revert(error.into()))), - } - } - Some(amount_value) if *amount_value.cl_type() == CLType::U64 => { - match amount_value.clone().into_t::() { - Ok(amount) => match amount { - 0 => Err(Error::Exec(ExecError::Revert(ApiError::Transfer))), - _ => Ok(U512::from(amount)), - }, - Err(error) => Err(Error::Exec(ExecError::Revert(error.into()))), - } - } - Some(_) => Err(Error::Exec(ExecError::Revert(ApiError::InvalidArgument))), - None => Err(Error::Exec(ExecError::Revert(ApiError::MissingArgument))), - } - } - - pub fn transfer_target_mode( - &mut self, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - ) -> Result - where - R: StateReader, - R::Error: Into, - { - let mode = self.transfer_target_mode; - if mode != TransferTargetMode::Unknown { - return Ok(mode); - } - match self.resolve_transfer_target_mode(correlation_id, tracking_copy) { - Ok(mode) => { - self.transfer_target_mode = mode; - Ok(mode) - } - Err(error) => Err(error), - } - } - - pub fn build( - mut self, - from: &Account, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - ) -> Result - where - R: StateReader, - R::Error: Into, - { - let to = self.to; - - let target_uref = - match self.resolve_transfer_target_mode(correlation_id, Rc::clone(&tracking_copy))? { - TransferTargetMode::PurseExists(uref) => uref, - _ => { - return Err(Error::Exec(ExecError::Revert(ApiError::Transfer))); - } - }; - - let source_uref = - self.resolve_source_uref(from, correlation_id, Rc::clone(&tracking_copy))?; - - if source_uref.addr() == target_uref.addr() { - return Err(ExecError::Revert(ApiError::InvalidPurse).into()); - } - - let amount = self.resolve_amount()?; - - let id = { - let id_bytes: Result, _> = match self.inner.get(mint::ARG_ID) { - Some(id_bytes) => id_bytes.clone().into_t(), - None => return Err(ExecError::Revert(ApiError::MissingArgument).into()), - }; - match id_bytes { - Ok(id) => id, - Err(err) => return Err(Error::Exec(ExecError::Revert(err.into()))), - } - }; - - Ok(TransferArgs { - to, - source: source_uref, - target: target_uref, - amount, - arg_id: id, - }) - } -} diff --git a/execution_engine/src/core/engine_state/upgrade.rs b/execution_engine/src/core/engine_state/upgrade.rs deleted file mode 100644 index 14ff007cbc..0000000000 --- a/execution_engine/src/core/engine_state/upgrade.rs +++ /dev/null @@ -1,301 +0,0 @@ -use std::{cell::RefCell, collections::BTreeMap, fmt, rc::Rc}; - -use num_rational::Ratio; -use thiserror::Error; - -use casper_types::{ - bytesrepr, - system::{AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT}, - ContractHash, EraId, Key, ProtocolVersion, -}; - -use crate::{ - core::{engine_state::execution_effect::ExecutionEffect, tracking_copy::TrackingCopy}, - shared::{ - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - system_config::SystemConfig, - wasm_config::WasmConfig, - TypeMismatch, - }, - storage::{ - global_state::{CommitResult, StateProvider}, - protocol_data::ProtocolData, - }, -}; - -#[derive(Debug, Clone)] -pub enum UpgradeResult { - RootNotFound, - KeyNotFound(Key), - TypeMismatch(TypeMismatch), - Serialization(bytesrepr::Error), - Success { - post_state_hash: Blake2bHash, - effect: ExecutionEffect, - }, -} - -impl fmt::Display for UpgradeResult { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match self { - UpgradeResult::RootNotFound => write!(f, "Root not found"), - UpgradeResult::KeyNotFound(key) => write!(f, "Key not found: {}", key), - UpgradeResult::TypeMismatch(type_mismatch) => { - write!(f, "Type mismatch: {:?}", type_mismatch) - } - UpgradeResult::Serialization(error) => write!(f, "Serialization error: {:?}", error), - UpgradeResult::Success { - post_state_hash, - effect, - } => write!(f, "Success: {} {:?}", post_state_hash, effect), - } - } -} - -impl UpgradeResult { - pub fn from_commit_result(commit_result: CommitResult, effect: ExecutionEffect) -> Self { - match commit_result { - CommitResult::RootNotFound => UpgradeResult::RootNotFound, - CommitResult::KeyNotFound(key) => UpgradeResult::KeyNotFound(key), - CommitResult::TypeMismatch(type_mismatch) => UpgradeResult::TypeMismatch(type_mismatch), - CommitResult::Serialization(error) => UpgradeResult::Serialization(error), - CommitResult::Success { state_root, .. } => UpgradeResult::Success { - post_state_hash: state_root, - effect, - }, - } - } - - pub fn is_success(&self) -> bool { - matches!(&self, UpgradeResult::Success { .. }) - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct UpgradeConfig { - pre_state_hash: Blake2bHash, - current_protocol_version: ProtocolVersion, - new_protocol_version: ProtocolVersion, - wasm_config: Option, - system_config: Option, - activation_point: Option, - new_validator_slots: Option, - new_auction_delay: Option, - new_locked_funds_period_millis: Option, - new_round_seigniorage_rate: Option>, - new_unbonding_delay: Option, - global_state_update: BTreeMap, -} - -impl UpgradeConfig { - #[allow(clippy::too_many_arguments)] - pub fn new( - pre_state_hash: Blake2bHash, - current_protocol_version: ProtocolVersion, - new_protocol_version: ProtocolVersion, - wasm_config: Option, - system_config: Option, - activation_point: Option, - new_validator_slots: Option, - new_auction_delay: Option, - new_locked_funds_period_millis: Option, - new_round_seigniorage_rate: Option>, - new_unbonding_delay: Option, - global_state_update: BTreeMap, - ) -> Self { - UpgradeConfig { - pre_state_hash, - current_protocol_version, - new_protocol_version, - wasm_config, - system_config, - activation_point, - new_validator_slots, - new_auction_delay, - new_locked_funds_period_millis, - new_round_seigniorage_rate, - new_unbonding_delay, - global_state_update, - } - } - - pub fn pre_state_hash(&self) -> Blake2bHash { - self.pre_state_hash - } - - pub fn current_protocol_version(&self) -> ProtocolVersion { - self.current_protocol_version - } - - pub fn new_protocol_version(&self) -> ProtocolVersion { - self.new_protocol_version - } - - pub fn wasm_config(&self) -> Option<&WasmConfig> { - self.wasm_config.as_ref() - } - - pub fn system_config(&self) -> Option<&SystemConfig> { - self.system_config.as_ref() - } - - pub fn activation_point(&self) -> Option { - self.activation_point - } - - pub fn new_validator_slots(&self) -> Option { - self.new_validator_slots - } - - pub fn new_auction_delay(&self) -> Option { - self.new_auction_delay - } - - pub fn new_locked_funds_period_millis(&self) -> Option { - self.new_locked_funds_period_millis - } - - pub fn new_round_seigniorage_rate(&self) -> Option> { - self.new_round_seigniorage_rate - } - - pub fn new_unbonding_delay(&self) -> Option { - self.new_unbonding_delay - } - - pub fn global_state_update(&self) -> &BTreeMap { - &self.global_state_update - } - - pub fn with_pre_state_hash(&mut self, pre_state_hash: Blake2bHash) { - self.pre_state_hash = pre_state_hash; - } -} - -#[derive(Clone, Error, Debug)] -pub enum ProtocolUpgradeError { - #[error("Invalid upgrade config")] - InvalidUpgradeConfig, - #[error("Unable to retrieve system contract: {0}")] - UnableToRetrieveSystemContract(String), - #[error("Unable to retrieve system contract package: {0}")] - UnableToRetrieveSystemContractPackage(String), - #[error("Failed to disable previous version of system contract: {0}")] - FailedToDisablePreviousVersion(String), -} - -pub(crate) struct SystemUpgrader -where - S: StateProvider, -{ - new_protocol_version: ProtocolVersion, - protocol_data: ProtocolData, - tracking_copy: Rc::Reader>>>, -} - -impl SystemUpgrader -where - S: StateProvider, -{ - pub(crate) fn new( - new_protocol_version: ProtocolVersion, - protocol_data: ProtocolData, - tracking_copy: Rc::Reader>>>, - ) -> Self { - SystemUpgrader { - new_protocol_version, - protocol_data, - tracking_copy, - } - } - - /// Bump major version for system contracts. - pub(crate) fn upgrade_system_contracts_major_version( - &self, - correlation_id: CorrelationId, - ) -> Result<(), ProtocolUpgradeError> { - self.store_contract(correlation_id, self.protocol_data.mint(), MINT)?; - self.store_contract(correlation_id, self.protocol_data.auction(), AUCTION)?; - self.store_contract( - correlation_id, - self.protocol_data.handle_payment(), - HANDLE_PAYMENT, - )?; - self.store_contract( - correlation_id, - self.protocol_data.standard_payment(), - STANDARD_PAYMENT, - )?; - - Ok(()) - } - - fn store_contract( - &self, - correlation_id: CorrelationId, - contract_hash: ContractHash, - contract_name: &str, - ) -> Result<(), ProtocolUpgradeError> { - let contract_key = Key::Hash(contract_hash.value()); - - let mut contract = if let StoredValue::Contract(contract) = self - .tracking_copy - .borrow_mut() - .read(correlation_id, &contract_key) - .map_err(|_| { - ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string()) - })? - .ok_or_else(|| { - ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string()) - })? { - contract - } else { - return Err(ProtocolUpgradeError::UnableToRetrieveSystemContract( - contract_name.to_string(), - )); - }; - - let contract_package_key = Key::Hash(contract.contract_package_hash().value()); - - let mut contract_package = if let StoredValue::ContractPackage(contract_package) = self - .tracking_copy - .borrow_mut() - .read(correlation_id, &contract_package_key) - .map_err(|_| { - ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( - contract_name.to_string(), - ) - })? - .ok_or_else(|| { - ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( - contract_name.to_string(), - ) - })? { - contract_package - } else { - return Err(ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( - contract_name.to_string(), - )); - }; - - contract_package - .disable_contract_version(contract_hash) - .map_err(|_| { - ProtocolUpgradeError::FailedToDisablePreviousVersion(contract_name.to_string()) - })?; - contract.set_protocol_version(self.new_protocol_version); - contract_package - .insert_contract_version(self.new_protocol_version.value().major, contract_hash); - - self.tracking_copy - .borrow_mut() - .write(contract_hash.into(), StoredValue::Contract(contract)); - self.tracking_copy.borrow_mut().write( - contract_package_key, - StoredValue::ContractPackage(contract_package), - ); - - Ok(()) - } -} diff --git a/execution_engine/src/core/execution/address_generator.rs b/execution_engine/src/core/execution/address_generator.rs deleted file mode 100644 index 431a73f407..0000000000 --- a/execution_engine/src/core/execution/address_generator.rs +++ /dev/null @@ -1,123 +0,0 @@ -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; -use rand::{RngCore, SeedableRng}; -use rand_chacha::ChaChaRng; - -use casper_types::{AccessRights, Phase, URef}; - -use crate::core::{Address, ADDRESS_LENGTH}; - -const SEED_LENGTH: usize = 32; - -/// An `AddressGenerator` generates `URef` addresses. -pub struct AddressGenerator(ChaChaRng); - -impl AddressGenerator { - /// Creates an [`AddressGenerator`] from a 32-byte hash digest and [`Phase`]. - pub fn new(hash: &[u8], phase: Phase) -> AddressGenerator { - AddressGeneratorBuilder::new() - .seed_with(&hash) - .seed_with(&[phase as u8]) - .build() - } - - pub fn create_address(&mut self) -> Address { - let mut buff = [0u8; ADDRESS_LENGTH]; - self.0.fill_bytes(&mut buff); - buff - } - - pub fn new_hash_address(&mut self) -> Address { - // TODO: this appears to duplicate the logic of AddressGeneratorBuilder::build() - let pre_hash_bytes = self.create_address(); - // NOTE: Unwrap below is assumed safe as output size of `ADDRESS_LENGTH` is a valid value. - let mut hasher = VarBlake2b::new(ADDRESS_LENGTH).unwrap(); - hasher.update(&pre_hash_bytes); - let mut hash_bytes = [0; ADDRESS_LENGTH]; - hasher.finalize_variable(|hash| hash_bytes.clone_from_slice(hash)); - hash_bytes - } - - pub fn new_uref(&mut self, access_rights: AccessRights) -> URef { - let addr = self.create_address(); - URef::new(addr, access_rights) - } -} - -/// A builder for [`AddressGenerator`]. -#[derive(Default)] -pub struct AddressGeneratorBuilder { - data: Vec, -} - -impl AddressGeneratorBuilder { - pub fn new() -> Self { - Default::default() - } - - pub fn seed_with(mut self, bytes: &[u8]) -> Self { - self.data.extend(bytes); - self - } - - pub fn build(self) -> AddressGenerator { - let mut seed: [u8; SEED_LENGTH] = [0u8; SEED_LENGTH]; - // NOTE: Unwrap below is assumed safe as output size of `SEED_LENGTH` is a valid value. - let mut hasher = VarBlake2b::new(SEED_LENGTH).unwrap(); - hasher.update(self.data); - hasher.finalize_variable(|hash| seed.clone_from_slice(hash)); - AddressGenerator(ChaChaRng::from_seed(seed)) - } -} - -#[cfg(test)] -mod tests { - use casper_types::Phase; - - use super::AddressGenerator; - - const DEPLOY_HASH_1: [u8; 32] = [1u8; 32]; - const DEPLOY_HASH_2: [u8; 32] = [2u8; 32]; - - #[test] - fn should_generate_different_numbers_for_different_seeds() { - let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); - let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_2, Phase::Session); - let random_a = ag_a.create_address(); - let random_b = ag_b.create_address(); - - assert_ne!(random_a, random_b) - } - - #[test] - fn should_generate_same_numbers_for_same_seed() { - let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); - let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); - let random_a = ag_a.create_address(); - let random_b = ag_b.create_address(); - - assert_eq!(random_a, random_b) - } - - #[test] - fn should_not_generate_same_numbers_for_different_phase() { - let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Payment); - let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); - let mut ag_c = AddressGenerator::new(&DEPLOY_HASH_1, Phase::FinalizePayment); - let random_a = ag_a.create_address(); - let random_b = ag_b.create_address(); - let random_c = ag_c.create_address(); - - assert_ne!( - random_a, random_b, - "different phase should have different output" - ); - - assert_ne!( - random_a, random_c, - "different phase should have different output" - ); - } -} diff --git a/execution_engine/src/core/execution/error.rs b/execution_engine/src/core/execution/error.rs deleted file mode 100644 index fab2f845bb..0000000000 --- a/execution_engine/src/core/execution/error.rs +++ /dev/null @@ -1,192 +0,0 @@ -use parity_wasm::elements; -use thiserror::Error; - -use casper_types::{ - account::{AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure}, - bytesrepr, system, AccessRights, ApiError, CLType, CLValueError, ContractPackageHash, - ContractVersionKey, Key, URef, -}; - -use crate::{ - core::resolvers::error::ResolverError, - shared::{wasm_prep, TypeMismatch}, - storage, -}; - -#[derive(Error, Debug, Clone)] -pub enum Error { - #[error("Interpreter error: {}", _0)] - Interpreter(String), - #[error("Storage error: {}", _0)] - Storage(storage::error::Error), - #[error("Serialization error: {}", _0)] - BytesRepr(bytesrepr::Error), - #[error("Named key {} not found", _0)] - NamedKeyNotFound(String), - #[error("Key {} not found", _0)] - KeyNotFound(Key), - #[error("Account {:?} not found", _0)] - AccountNotFound(Key), - #[error("{}", _0)] - TypeMismatch(TypeMismatch), - #[error("Invalid access rights: {}", required)] - InvalidAccess { required: AccessRights }, - #[error("Forged reference: {}", _0)] - ForgedReference(URef), - #[error("URef not found: {}", _0)] - URefNotFound(URef), - #[error("Function not found: {}", _0)] - FunctionNotFound(String), - #[error("{}", _0)] - ParityWasm(elements::Error), - #[error("WASM optimizer error")] - WasmOptimizer, - #[error("Out of gas error")] - GasLimit, - #[error("Return")] - Ret(Vec), - #[error("{}", _0)] - Rng(String), - #[error("Resolver error: {}", _0)] - Resolver(ResolverError), - /// Reverts execution with a provided status - #[error("{}", _0)] - Revert(ApiError), - #[error("{}", _0)] - AddKeyFailure(AddKeyFailure), - #[error("{}", _0)] - RemoveKeyFailure(RemoveKeyFailure), - #[error("{}", _0)] - UpdateKeyFailure(UpdateKeyFailure), - #[error("{}", _0)] - SetThresholdFailure(SetThresholdFailure), - #[error("{}", _0)] - SystemContract(system::Error), - #[error("Deployment authorization failure")] - DeploymentAuthorizationFailure, - #[error("Expected return value")] - ExpectedReturnValue, - #[error("Unexpected return value")] - UnexpectedReturnValue, - #[error("Invalid context")] - InvalidContext, - #[error("Incompatible protocol major version. Expected version {expected} but actual version is {actual}")] - IncompatibleProtocolMajorVersion { expected: u32, actual: u32 }, - #[error("{0}")] - CLValue(CLValueError), - #[error("Host buffer is empty")] - HostBufferEmpty, - #[error("Unsupported WASM start")] - UnsupportedWasmStart, - #[error("No active contract versions for contract package")] - NoActiveContractVersions(ContractPackageHash), - #[error("Invalid contract version: {}", _0)] - InvalidContractVersion(ContractVersionKey), - #[error("No such method: {}", _0)] - NoSuchMethod(String), - #[error("Wasm preprocessing error: {}", _0)] - WasmPreprocessing(wasm_prep::PreprocessingError), - #[error("Unexpected Key length. Expected length {expected} but actual length is {actual}")] - InvalidKeyLength { expected: usize, actual: usize }, - #[error("Key is not a URef: {}", _0)] - KeyIsNotAURef(Key), - #[error("Unexpected variant of a stored value")] - UnexpectedStoredValueVariant, - #[error("A locked contract cannot be upgraded")] - LockedContract(ContractPackageHash), -} - -impl From for Error { - fn from(error: wasm_prep::PreprocessingError) -> Self { - Error::WasmPreprocessing(error) - } -} - -impl From for Error { - fn from(_optimizer_error: pwasm_utils::OptimizerError) -> Self { - Error::WasmOptimizer - } -} - -impl Error { - pub fn type_mismatch(expected: CLType, found: CLType) -> Error { - Error::TypeMismatch(TypeMismatch { - expected: format!("{:?}", expected), - found: format!("{:?}", found), - }) - } -} - -impl wasmi::HostError for Error {} - -impl From for Error { - fn from(error: wasmi::Error) -> Self { - match error - .as_host_error() - .and_then(|host_error| host_error.downcast_ref::()) - { - Some(error) => error.clone(), - None => Error::Interpreter(error.into()), - } - } -} - -impl From for Error { - fn from(e: storage::error::Error) -> Self { - Error::Storage(e) - } -} - -impl From for Error { - fn from(e: bytesrepr::Error) -> Self { - Error::BytesRepr(e) - } -} - -impl From for Error { - fn from(e: elements::Error) -> Self { - Error::ParityWasm(e) - } -} - -impl From for Error { - fn from(err: ResolverError) -> Self { - Error::Resolver(err) - } -} - -impl From for Error { - fn from(err: AddKeyFailure) -> Self { - Error::AddKeyFailure(err) - } -} - -impl From for Error { - fn from(err: RemoveKeyFailure) -> Self { - Error::RemoveKeyFailure(err) - } -} - -impl From for Error { - fn from(err: UpdateKeyFailure) -> Self { - Error::UpdateKeyFailure(err) - } -} - -impl From for Error { - fn from(err: SetThresholdFailure) -> Self { - Error::SetThresholdFailure(err) - } -} - -impl From for Error { - fn from(error: system::Error) -> Self { - Error::SystemContract(error) - } -} - -impl From for Error { - fn from(e: CLValueError) -> Self { - Error::CLValue(e) - } -} diff --git a/execution_engine/src/core/execution/executor.rs b/execution_engine/src/core/execution/executor.rs deleted file mode 100644 index de47d82153..0000000000 --- a/execution_engine/src/core/execution/executor.rs +++ /dev/null @@ -1,755 +0,0 @@ -use std::{cell::RefCell, collections::BTreeSet, rc::Rc}; - -use parity_wasm::elements::Module; -use tracing::warn; -use wasmi::ModuleRef; - -use casper_types::{ - account::AccountHash, - bytesrepr::FromBytes, - contracts::NamedKeys, - system::{auction, handle_payment, mint}, - BlockTime, CLTyped, CLValue, ContractPackage, DeployHash, EntryPoint, EntryPointType, Key, - Phase, ProtocolVersion, RuntimeArgs, -}; - -use crate::{ - core::{ - engine_state::{ - execution_effect::ExecutionEffect, execution_result::ExecutionResult, - system_contract_cache::SystemContractCache, EngineConfig, - }, - execution::{address_generator::AddressGenerator, Error}, - runtime::{extract_access_rights_from_keys, instance_and_memory, Runtime}, - runtime_context::{self, RuntimeContext}, - tracking_copy::TrackingCopy, - }, - shared::{account::Account, gas::Gas, newtypes::CorrelationId, stored_value::StoredValue}, - storage::{global_state::StateReader, protocol_data::ProtocolData}, -}; - -macro_rules! on_fail_charge { - ($fn:expr) => { - match $fn { - Ok(res) => res, - Err(e) => { - let exec_err: Error = e.into(); - warn!("Execution failed: {:?}", exec_err); - return ExecutionResult::precondition_failure(exec_err.into()); - } - } - }; - ($fn:expr, $cost:expr, $transfers:expr) => { - match $fn { - Ok(res) => res, - Err(e) => { - let exec_err: Error = e.into(); - warn!("Execution failed: {:?}", exec_err); - return ExecutionResult::Failure { - error: exec_err.into(), - effect: Default::default(), - transfers: $transfers, - cost: $cost, - }; - } - } - }; - ($fn:expr, $cost:expr, $effect:expr, $transfers:expr) => { - match $fn { - Ok(res) => res, - Err(e) => { - let exec_err: Error = e.into(); - warn!("Execution failed: {:?}", exec_err); - return ExecutionResult::Failure { - error: exec_err.into(), - effect: $effect, - transfers: $transfers, - cost: $cost, - }; - } - } - }; -} - -pub struct Executor { - config: EngineConfig, -} - -#[allow(clippy::too_many_arguments)] -impl Executor { - pub fn new(config: EngineConfig) -> Self { - Executor { config } - } - - pub fn config(&self) -> EngineConfig { - self.config - } - - pub fn exec( - &self, - module: Module, - entry_point: EntryPoint, - args: RuntimeArgs, - base_key: Key, - account: &Account, - named_keys: &mut NamedKeys, - authorization_keys: BTreeSet, - blocktime: BlockTime, - deploy_hash: DeployHash, - gas_limit: Gas, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - phase: Phase, - protocol_data: ProtocolData, - system_contract_cache: SystemContractCache, - contract_package: &ContractPackage, - ) -> ExecutionResult - where - R: StateReader, - R::Error: Into, - { - let entry_point_name = entry_point.name(); - let entry_point_type = entry_point.entry_point_type(); - let entry_point_access = entry_point.access(); - - let (instance, memory) = on_fail_charge!(instance_and_memory( - module.clone(), - protocol_version, - protocol_data.wasm_config() - )); - - let access_rights = { - let keys: Vec = named_keys.values().cloned().collect(); - extract_access_rights_from_keys(keys) - }; - - let hash_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let uref_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let target_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let gas_counter: Gas = Gas::default(); - let transfers = Vec::default(); - - // Snapshot of effects before execution, so in case of error - // only nonce update can be returned. - let effects_snapshot = tracking_copy.borrow().effect(); - - let context = RuntimeContext::new( - tracking_copy, - entry_point_type, - named_keys, - access_rights, - args.clone(), - authorization_keys, - &account, - base_key, - blocktime, - deploy_hash, - gas_limit, - gas_counter, - hash_address_generator, - uref_address_generator, - target_address_generator, - protocol_version, - correlation_id, - phase, - protocol_data, - transfers, - ); - - let mut runtime = Runtime::new(self.config, system_contract_cache, memory, module, context); - - let accounts_access_rights = { - let keys: Vec = account.named_keys().values().cloned().collect(); - extract_access_rights_from_keys(keys) - }; - - on_fail_charge!(runtime_context::validate_entry_point_access_with( - &contract_package, - entry_point_access, - |uref| runtime_context::uref_has_access_rights(uref, &accounts_access_rights) - )); - - if runtime.is_mint(base_key) { - match runtime.call_host_mint( - protocol_version, - entry_point.name(), - &mut runtime.context().named_keys().to_owned(), - &args, - Default::default(), - ) { - Ok(_value) => { - return ExecutionResult::Success { - effect: runtime.context().effect(), - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - }; - } - Err(error) => { - return ExecutionResult::Failure { - error: error.into(), - effect: effects_snapshot, - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - }; - } - } - } else if runtime.is_handle_payment(base_key) { - match runtime.call_host_handle_payment( - protocol_version, - entry_point.name(), - &mut runtime.context().named_keys().to_owned(), - &args, - Default::default(), - ) { - Ok(_value) => { - return ExecutionResult::Success { - effect: runtime.context().effect(), - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - }; - } - Err(error) => { - return ExecutionResult::Failure { - error: error.into(), - effect: effects_snapshot, - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - }; - } - } - } else if runtime.is_auction(base_key) { - match runtime.call_host_auction( - protocol_version, - entry_point.name(), - &mut runtime.context().named_keys().to_owned(), - &args, - Default::default(), - ) { - Ok(_value) => { - return ExecutionResult::Success { - effect: runtime.context().effect(), - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - } - } - Err(error) => { - return ExecutionResult::Failure { - error: error.into(), - effect: effects_snapshot, - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - } - } - } - } - on_fail_charge!( - instance.invoke_export(entry_point_name, &[], &mut runtime), - runtime.context().gas_counter(), - effects_snapshot, - runtime.context().transfers().to_owned() - ); - - ExecutionResult::Success { - effect: runtime.context().effect(), - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - } - } - - pub fn exec_standard_payment( - &self, - system_module: Module, - payment_args: RuntimeArgs, - payment_base_key: Key, - account: &Account, - payment_named_keys: &mut NamedKeys, - authorization_keys: BTreeSet, - blocktime: BlockTime, - deploy_hash: DeployHash, - payment_gas_limit: Gas, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - phase: Phase, - protocol_data: ProtocolData, - system_contract_cache: SystemContractCache, - ) -> ExecutionResult - where - R: StateReader, - R::Error: Into, - { - // use host side standard payment - let hash_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let uref_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let transfer_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - - let mut runtime = match self.create_runtime( - system_module, - EntryPointType::Session, - payment_args, - payment_named_keys, - Default::default(), - payment_base_key, - &account, - authorization_keys, - blocktime, - deploy_hash, - payment_gas_limit, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - Rc::clone(&tracking_copy), - phase, - protocol_data, - system_contract_cache, - ) { - Ok((_instance, runtime)) => runtime, - Err(error) => { - return ExecutionResult::Failure { - error: error.into(), - effect: Default::default(), - transfers: Vec::default(), - cost: Gas::default(), - }; - } - }; - - let effects_snapshot = tracking_copy.borrow().effect(); - - match runtime.call_host_standard_payment() { - Ok(()) => ExecutionResult::Success { - effect: runtime.context().effect(), - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - }, - Err(error) => ExecutionResult::Failure { - error: error.into(), - effect: effects_snapshot, - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - }, - } - } - - pub fn exec_system_contract( - &self, - direct_system_contract_call: DirectSystemContractCall, - module: Module, - runtime_args: RuntimeArgs, - named_keys: &mut NamedKeys, - extra_keys: &[Key], - base_key: Key, - account: &Account, - authorization_keys: BTreeSet, - blocktime: BlockTime, - deploy_hash: DeployHash, - gas_limit: Gas, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - phase: Phase, - protocol_data: ProtocolData, - system_contract_cache: SystemContractCache, - ) -> (Option, ExecutionResult) - where - R: StateReader, - R::Error: Into, - T: FromBytes + CLTyped, - { - match direct_system_contract_call { - DirectSystemContractCall::Slash - | DirectSystemContractCall::RunAuction - | DirectSystemContractCall::DistributeRewards => { - if Some(protocol_data.auction().value()) != base_key.into_hash() { - panic!( - "{} should only be called with the auction contract", - direct_system_contract_call.entry_point_name() - ); - } - } - DirectSystemContractCall::FinalizePayment - | DirectSystemContractCall::GetPaymentPurse => { - if Some(protocol_data.handle_payment().value()) != base_key.into_hash() { - panic!( - "{} should only be called with the handle payment contract", - direct_system_contract_call.entry_point_name() - ); - } - } - DirectSystemContractCall::CreatePurse | DirectSystemContractCall::Transfer => { - if Some(protocol_data.mint().value()) != base_key.into_hash() { - panic!( - "{} should only be called with the mint contract", - direct_system_contract_call.entry_point_name() - ); - } - } - DirectSystemContractCall::GetEraValidators => { - if Some(protocol_data.auction().value()) != base_key.into_hash() { - panic!( - "{} should only be called with the auction contract", - direct_system_contract_call.entry_point_name() - ); - } - } - } - - let hash_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let uref_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let transfer_address_generator = { - let generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(generator)) - }; - let gas_counter = Gas::default(); // maybe const? - - // Snapshot of effects before execution, so in case of error only nonce update - // can be returned. - let effect_snapshot = tracking_copy.borrow().effect(); - - let transfers = Vec::default(); - - let (_, runtime) = match self.create_runtime( - module, - EntryPointType::Contract, - runtime_args.clone(), - named_keys, - extra_keys, - base_key, - account, - authorization_keys, - blocktime, - deploy_hash, - gas_limit, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - tracking_copy, - phase, - protocol_data, - system_contract_cache, - ) { - Ok((instance, runtime)) => (instance, runtime), - Err(error) => { - return ExecutionResult::Failure { - effect: effect_snapshot, - transfers, - cost: gas_counter, - error: error.into(), - } - .take_without_ret() - } - }; - - let mut inner_named_keys = runtime.context().named_keys().clone(); - let ret = direct_system_contract_call.host_exec( - runtime, - protocol_version, - &mut inner_named_keys, - &runtime_args, - extra_keys, - effect_snapshot, - ); - *named_keys = inner_named_keys; - ret - } - - /// Used to execute arbitrary wasm; necessary for running system contract installers / upgraders - /// This is not meant to be used for executing system contracts. - pub fn exec_wasm_direct( - &self, - module: Module, - entry_point_name: &str, - args: RuntimeArgs, - account: &mut Account, - authorization_keys: BTreeSet, - blocktime: BlockTime, - deploy_hash: DeployHash, - gas_limit: Gas, - hash_address_generator: Rc>, - uref_address_generator: Rc>, - transfer_address_generator: Rc>, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - phase: Phase, - protocol_data: ProtocolData, - system_contract_cache: SystemContractCache, - ) -> Result - where - R: StateReader, - R::Error: Into, - T: FromBytes + CLTyped, - { - let mut named_keys: NamedKeys = account.named_keys().clone(); - let base_key = account.account_hash().into(); - - let (instance, mut runtime) = self.create_runtime( - module, - EntryPointType::Session, - args, - &mut named_keys, - Default::default(), - base_key, - account, - authorization_keys, - blocktime, - deploy_hash, - gas_limit, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - tracking_copy, - phase, - protocol_data, - system_contract_cache, - )?; - - let error: wasmi::Error = match instance.invoke_export(entry_point_name, &[], &mut runtime) - { - Err(error) => error, - Ok(_) => { - // This duplicates the behavior of runtime sub_call. - // If `instance.invoke_export` returns `Ok` and the `host_buffer` is `None`, the - // contract's execution succeeded but did not explicitly call `runtime::ret()`. - // Treat as though the execution returned the unit type `()` as per Rust - // functions which don't specify a return value. - let result = runtime.take_host_buffer().unwrap_or(CLValue::from_t(())?); - let ret = result.into_t()?; - *account.named_keys_mut() = named_keys; - return Ok(ret); - } - }; - - let return_value: CLValue = match error - .as_host_error() - .and_then(|host_error| host_error.downcast_ref::()) - { - Some(Error::Ret(_)) => runtime - .take_host_buffer() - .ok_or(Error::ExpectedReturnValue)?, - Some(Error::Revert(code)) => return Err(Error::Revert(*code)), - Some(error) => return Err(error.clone()), - _ => return Err(Error::Interpreter(error.into())), - }; - - let ret = return_value.into_t()?; - *account.named_keys_mut() = named_keys; - Ok(ret) - } - - pub fn create_runtime<'a, R>( - &self, - module: Module, - entry_point_type: EntryPointType, - runtime_args: RuntimeArgs, - named_keys: &'a mut NamedKeys, - extra_keys: &[Key], - base_key: Key, - account: &'a Account, - authorization_keys: BTreeSet, - blocktime: BlockTime, - deploy_hash: DeployHash, - gas_limit: Gas, - hash_address_generator: Rc>, - uref_address_generator: Rc>, - transfer_address_generator: Rc>, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - tracking_copy: Rc>>, - phase: Phase, - protocol_data: ProtocolData, - system_contract_cache: SystemContractCache, - ) -> Result<(ModuleRef, Runtime<'a, R>), Error> - where - R: StateReader, - R::Error: Into, - { - let access_rights = { - let mut keys: Vec = named_keys.values().cloned().collect(); - keys.extend(extra_keys); - extract_access_rights_from_keys(keys) - }; - - let gas_counter = Gas::default(); - let transfers = Vec::default(); - - let runtime_context = RuntimeContext::new( - tracking_copy, - entry_point_type, - named_keys, - access_rights, - runtime_args, - authorization_keys, - account, - base_key, - blocktime, - deploy_hash, - gas_limit, - gas_counter, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - phase, - protocol_data, - transfers, - ); - - let (instance, memory) = instance_and_memory( - module.clone(), - protocol_version, - protocol_data.wasm_config(), - )?; - - let runtime = Runtime::new( - self.config, - system_contract_cache, - memory, - module, - runtime_context, - ); - - Ok((instance, runtime)) - } -} - -pub enum DirectSystemContractCall { - Slash, - RunAuction, - DistributeRewards, - FinalizePayment, - CreatePurse, - Transfer, - GetEraValidators, - GetPaymentPurse, -} - -impl DirectSystemContractCall { - fn entry_point_name(&self) -> &str { - match self { - DirectSystemContractCall::Slash => auction::METHOD_SLASH, - DirectSystemContractCall::RunAuction => auction::METHOD_RUN_AUCTION, - DirectSystemContractCall::DistributeRewards => auction::METHOD_DISTRIBUTE, - DirectSystemContractCall::FinalizePayment => handle_payment::METHOD_FINALIZE_PAYMENT, - DirectSystemContractCall::CreatePurse => mint::METHOD_CREATE, - DirectSystemContractCall::Transfer => mint::METHOD_TRANSFER, - DirectSystemContractCall::GetEraValidators => auction::METHOD_GET_ERA_VALIDATORS, - DirectSystemContractCall::GetPaymentPurse => handle_payment::METHOD_GET_PAYMENT_PURSE, - } - } - - fn host_exec( - &self, - mut runtime: Runtime, - protocol_version: ProtocolVersion, - named_keys: &mut NamedKeys, - runtime_args: &RuntimeArgs, - extra_keys: &[Key], - execution_effect: ExecutionEffect, - ) -> (Option, ExecutionResult) - where - R: StateReader, - R::Error: Into, - T: FromBytes + CLTyped, - { - let entry_point_name = self.entry_point_name(); - let result = match self { - DirectSystemContractCall::Slash - | DirectSystemContractCall::RunAuction - | DirectSystemContractCall::DistributeRewards => runtime.call_host_auction( - protocol_version, - entry_point_name, - named_keys, - runtime_args, - extra_keys, - ), - DirectSystemContractCall::FinalizePayment => runtime.call_host_handle_payment( - protocol_version, - entry_point_name, - named_keys, - runtime_args, - extra_keys, - ), - DirectSystemContractCall::CreatePurse | DirectSystemContractCall::Transfer => runtime - .call_host_mint( - protocol_version, - entry_point_name, - named_keys, - runtime_args, - extra_keys, - ), - DirectSystemContractCall::GetEraValidators => runtime.call_host_auction( - protocol_version, - entry_point_name, - named_keys, - runtime_args, - extra_keys, - ), - - DirectSystemContractCall::GetPaymentPurse => runtime.call_host_handle_payment( - protocol_version, - entry_point_name, - named_keys, - runtime_args, - extra_keys, - ), - }; - - match result { - Ok(value) => match value.into_t() { - Ok(ret) => ExecutionResult::Success { - effect: runtime.context().effect(), - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - } - .take_with_ret(ret), - Err(error) => ExecutionResult::Failure { - error: Error::CLValue(error).into(), - effect: execution_effect, - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - } - .take_without_ret(), - }, - Err(error) => ExecutionResult::Failure { - error: error.into(), - effect: execution_effect, - transfers: runtime.context().transfers().to_owned(), - cost: runtime.context().gas_counter(), - } - .take_without_ret(), - } - } -} diff --git a/execution_engine/src/core/execution/mod.rs b/execution_engine/src/core/execution/mod.rs deleted file mode 100644 index b6103640aa..0000000000 --- a/execution_engine/src/core/execution/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -mod address_generator; -mod error; -#[macro_use] -mod executor; -#[cfg(test)] -mod tests; - -pub use self::{ - address_generator::{AddressGenerator, AddressGeneratorBuilder}, - error::Error, - executor::{DirectSystemContractCall, Executor}, -}; diff --git a/execution_engine/src/core/execution/tests.rs b/execution_engine/src/core/execution/tests.rs deleted file mode 100644 index d1cc178d43..0000000000 --- a/execution_engine/src/core/execution/tests.rs +++ /dev/null @@ -1,81 +0,0 @@ -use tracing::warn; - -use casper_types::{Key, U512}; - -use super::Error; -use crate::{ - core::engine_state::{ - execution_effect::ExecutionEffect, execution_result::ExecutionResult, op::Op, - }, - shared::{gas::Gas, transform::Transform}, -}; - -fn on_fail_charge_test_helper( - f: impl Fn() -> Result, - success_cost: Gas, - error_cost: Gas, -) -> ExecutionResult { - let transfers = Vec::default(); - let _result = on_fail_charge!(f(), error_cost, transfers); - ExecutionResult::Success { - effect: Default::default(), - transfers, - cost: success_cost, - } -} - -#[test] -fn on_fail_charge_ok_test() { - let val = Gas::new(U512::from(123)); - match on_fail_charge_test_helper(|| Ok(()), val, Gas::new(U512::from(456))) { - ExecutionResult::Success { cost, .. } => assert_eq!(cost, val), - ExecutionResult::Failure { .. } => panic!("Should be success"), - } -} - -#[test] -fn on_fail_charge_err_laziness_test() { - let input: Result<(), Error> = Err(Error::GasLimit); - let error_cost = Gas::new(U512::from(456)); - match on_fail_charge_test_helper(|| input.clone(), Gas::new(U512::from(123)), error_cost) { - ExecutionResult::Success { .. } => panic!("Should fail"), - ExecutionResult::Failure { cost, .. } => assert_eq!(cost, error_cost), - } -} - -#[test] -fn on_fail_charge_with_action() { - let f = || { - let input: Result<(), Error> = Err(Error::GasLimit); - let transfers = Vec::default(); - on_fail_charge!( - input, - Gas::new(U512::from(456)), - { - let mut effect = ExecutionEffect::default(); - - effect.ops.insert(Key::Hash([42u8; 32]), Op::Read); - effect - .transforms - .insert(Key::Hash([42u8; 32]), Transform::Identity); - - effect - }, - transfers - ); - ExecutionResult::Success { - effect: Default::default(), - transfers: Vec::default(), - cost: Gas::default(), - } - }; - match f() { - ExecutionResult::Success { .. } => panic!("Should fail"), - ExecutionResult::Failure { cost, effect, .. } => { - assert_eq!(cost, Gas::new(U512::from(456))); - // Check if the containers are non-empty - assert_eq!(effect.ops.len(), 1); - assert_eq!(effect.transforms.len(), 1); - } - } -} diff --git a/execution_engine/src/core/resolvers/error.rs b/execution_engine/src/core/resolvers/error.rs deleted file mode 100644 index 5426cb2028..0000000000 --- a/execution_engine/src/core/resolvers/error.rs +++ /dev/null @@ -1,11 +0,0 @@ -use thiserror::Error; - -use casper_types::ProtocolVersion; - -#[derive(Error, Debug, Copy, Clone)] -pub enum ResolverError { - #[error("Unknown protocol version: {}", _0)] - UnknownProtocolVersion(ProtocolVersion), - #[error("No imported memory")] - NoImportedMemory, -} diff --git a/execution_engine/src/core/resolvers/memory_resolver.rs b/execution_engine/src/core/resolvers/memory_resolver.rs deleted file mode 100644 index 5caf1d871b..0000000000 --- a/execution_engine/src/core/resolvers/memory_resolver.rs +++ /dev/null @@ -1,11 +0,0 @@ -use wasmi::MemoryRef; - -use super::error::ResolverError; - -/// This trait takes care of returning an instance of allocated memory. -/// -/// This happens once the WASM program tries to resolve "memory". Whenever -/// contract didn't request a memory this method should return an Error. -pub trait MemoryResolver { - fn memory_ref(&self) -> Result; -} diff --git a/execution_engine/src/core/resolvers/mod.rs b/execution_engine/src/core/resolvers/mod.rs deleted file mode 100644 index cf3d23070f..0000000000 --- a/execution_engine/src/core/resolvers/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -pub mod error; -pub mod memory_resolver; -pub mod v1_function_index; -mod v1_resolver; - -use wasmi::ModuleImportResolver; - -use casper_types::ProtocolVersion; - -use self::error::ResolverError; -use crate::{core::resolvers::memory_resolver::MemoryResolver, shared::wasm_config::WasmConfig}; - -/// Creates a module resolver for given protocol version. -/// -/// * `protocol_version` Version of the protocol. Can't be lower than 1. -pub fn create_module_resolver( - protocol_version: ProtocolVersion, - wasm_config: &WasmConfig, -) -> Result { - // TODO: revisit how protocol_version check here is meant to combine with upgrade - if protocol_version >= ProtocolVersion::V1_0_0 { - return Ok(v1_resolver::RuntimeModuleImportResolver::new( - wasm_config.max_memory, - )); - } - Err(ResolverError::UnknownProtocolVersion(protocol_version)) -} - -#[cfg(test)] -mod tests { - use casper_types::ProtocolVersion; - - use super::*; - use crate::shared::wasm_config::WasmConfig; - - #[test] - fn resolve_invalid_module() { - assert!( - create_module_resolver(ProtocolVersion::default(), &WasmConfig::default()).is_err() - ); - } - - #[test] - fn protocol_version_1_always_resolves() { - assert!(create_module_resolver(ProtocolVersion::V1_0_0, &WasmConfig::default()).is_ok()); - } -} diff --git a/execution_engine/src/core/resolvers/v1_function_index.rs b/execution_engine/src/core/resolvers/v1_function_index.rs deleted file mode 100644 index 46143ef207..0000000000 --- a/execution_engine/src/core/resolvers/v1_function_index.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::convert::TryFrom; - -use num_derive::{FromPrimitive, ToPrimitive}; -use num_traits::{FromPrimitive, ToPrimitive}; - -#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive, Clone, Copy)] -#[repr(usize)] -pub enum FunctionIndex { - WriteFuncIndex, - ReadFuncIndex, - AddFuncIndex, - NewFuncIndex, - RetFuncIndex, - CallContractFuncIndex, - GetKeyFuncIndex, - GasFuncIndex, - HasKeyFuncIndex, - PutKeyFuncIndex, - IsValidURefFnIndex, - RevertFuncIndex, - AddAssociatedKeyFuncIndex, - RemoveAssociatedKeyFuncIndex, - UpdateAssociatedKeyFuncIndex, - SetActionThresholdFuncIndex, - LoadNamedKeysFuncIndex, - RemoveKeyFuncIndex, - GetCallerIndex, - GetBlocktimeIndex, - CreatePurseIndex, - TransferToAccountIndex, - TransferFromPurseToAccountIndex, - TransferFromPurseToPurseIndex, - GetBalanceIndex, - GetPhaseIndex, - GetSystemContractIndex, - GetMainPurseIndex, - ReadHostBufferIndex, - CreateContractPackageAtHash, - AddContractVersion, - DisableContractVersion, - CallVersionedContract, - CreateContractUserGroup, - #[cfg(feature = "test-support")] - PrintIndex, - GetRuntimeArgsizeIndex, - GetRuntimeArgIndex, - RemoveContractUserGroupIndex, - ExtendContractUserGroupURefsIndex, - RemoveContractUserGroupURefsIndex, - Blake2b, - RecordTransfer, - RecordEraInfo, -} - -impl Into for FunctionIndex { - fn into(self) -> usize { - // NOTE: This can't fail as `FunctionIndex` is represented by usize, - // so this serves mostly as a syntax sugar. - self.to_usize().unwrap() - } -} - -impl TryFrom for FunctionIndex { - type Error = &'static str; - fn try_from(value: usize) -> Result { - FromPrimitive::from_usize(value).ok_or("Invalid function index") - } -} - -#[cfg(test)] -mod tests { - use super::FunctionIndex; - use std::convert::TryFrom; - - #[test] - fn primitive_to_enum() { - FunctionIndex::try_from(19).expect("Unable to create enum from number"); - } - - #[test] - fn enum_to_primitive() { - let element = FunctionIndex::UpdateAssociatedKeyFuncIndex; - let _primitive: usize = element.into(); - } - - #[test] - fn invalid_index() { - assert!(FunctionIndex::try_from(123_456_789usize).is_err()); - } -} diff --git a/execution_engine/src/core/runtime/args.rs b/execution_engine/src/core/runtime/args.rs deleted file mode 100644 index 330c92a968..0000000000 --- a/execution_engine/src/core/runtime/args.rs +++ /dev/null @@ -1,232 +0,0 @@ -use wasmi::{FromRuntimeValue, RuntimeArgs, Trap}; - -pub(crate) trait Args -where - Self: Sized, -{ - fn parse(args: RuntimeArgs) -> Result; -} - -impl Args for u32 { - fn parse(args: RuntimeArgs) -> Result { - args.nth_checked(0) - } -} - -impl Args for usize { - fn parse(args: RuntimeArgs) -> Result { - let a0: u32 = args.nth_checked(0)?; - Ok(a0 as usize) - } -} - -impl Args for (T1, T2) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - Ok((a0, a1)) - } -} - -impl Args for (T1, T2, T3) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - Ok((a0, a1, a2)) - } -} - -impl Args for (T1, T2, T3, T4) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - Ok((a0, a1, a2, a3)) - } -} - -impl Args for (T1, T2, T3, T4, T5) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - Ok((a0, a1, a2, a3, a4)) - } -} - -impl Args for (T1, T2, T3, T4, T5, T6) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - let a5: T6 = args.nth_checked(5)?; - Ok((a0, a1, a2, a3, a4, a5)) - } -} - -impl Args for (T1, T2, T3, T4, T5, T6, T7) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - let a5: T6 = args.nth_checked(5)?; - let a6: T7 = args.nth_checked(6)?; - Ok((a0, a1, a2, a3, a4, a5, a6)) - } -} - -impl Args for (T1, T2, T3, T4, T5, T6, T7, T8) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - let a5: T6 = args.nth_checked(5)?; - let a6: T7 = args.nth_checked(6)?; - let a7: T8 = args.nth_checked(7)?; - Ok((a0, a1, a2, a3, a4, a5, a6, a7)) - } -} - -impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, - T9: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - let a5: T6 = args.nth_checked(5)?; - let a6: T7 = args.nth_checked(6)?; - let a7: T8 = args.nth_checked(7)?; - let a8: T9 = args.nth_checked(8)?; - Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8)) - } -} - -impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, - T9: FromRuntimeValue + Sized, - T10: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - let a5: T6 = args.nth_checked(5)?; - let a6: T7 = args.nth_checked(6)?; - let a7: T8 = args.nth_checked(7)?; - let a8: T9 = args.nth_checked(8)?; - let a9: T10 = args.nth_checked(9)?; - Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)) - } -} - -impl Args - for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) -where - T1: FromRuntimeValue + Sized, - T2: FromRuntimeValue + Sized, - T3: FromRuntimeValue + Sized, - T4: FromRuntimeValue + Sized, - T5: FromRuntimeValue + Sized, - T6: FromRuntimeValue + Sized, - T7: FromRuntimeValue + Sized, - T8: FromRuntimeValue + Sized, - T9: FromRuntimeValue + Sized, - T10: FromRuntimeValue + Sized, - T11: FromRuntimeValue + Sized, -{ - fn parse(args: RuntimeArgs) -> Result { - let a0: T1 = args.nth_checked(0)?; - let a1: T2 = args.nth_checked(1)?; - let a2: T3 = args.nth_checked(2)?; - let a3: T4 = args.nth_checked(3)?; - let a4: T5 = args.nth_checked(4)?; - let a5: T6 = args.nth_checked(5)?; - let a6: T7 = args.nth_checked(6)?; - let a7: T8 = args.nth_checked(7)?; - let a8: T9 = args.nth_checked(8)?; - let a9: T10 = args.nth_checked(9)?; - let a10: T11 = args.nth_checked(10)?; - Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)) - } -} diff --git a/execution_engine/src/core/runtime/auction_internal.rs b/execution_engine/src/core/runtime/auction_internal.rs deleted file mode 100644 index 4038ee963a..0000000000 --- a/execution_engine/src/core/runtime/auction_internal.rs +++ /dev/null @@ -1,268 +0,0 @@ -use std::collections::BTreeSet; - -use casper_types::{ - account, - account::AccountHash, - bytesrepr::{FromBytes, ToBytes}, - system::auction::{ - AccountProvider, Auction, Bid, EraInfo, Error, MintProvider, RuntimeProvider, - SeigniorageRecipients, StorageProvider, SystemProvider, UnbondingPurse, - }, - CLTyped, CLValue, EraId, Key, KeyTag, TransferredTo, URef, BLAKE2B_DIGEST_LENGTH, U512, -}; - -use super::Runtime; -use crate::{ - core::execution, shared::stored_value::StoredValue, storage::global_state::StateReader, -}; - -impl From for Option { - fn from(exec_error: execution::Error) -> Self { - match exec_error { - // This is used to propagate [`execution::Error::GasLimit`] to make sure [`Auction`] - // contract running natively supports propagating gas limit errors without a panic. - execution::Error::GasLimit => Some(Error::GasLimit), - // There are possibly other exec errors happening but such translation would be lossy. - _ => None, - } - } -} - -impl<'a, R> StorageProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn read(&mut self, uref: URef) -> Result, Error> { - match self.context.read_gs(&uref.into()) { - Ok(Some(StoredValue::CLValue(cl_value))) => { - Ok(Some(cl_value.into_t().map_err(|_| Error::CLValue)?)) - } - Ok(Some(_)) => Err(Error::Storage), - Ok(None) => Ok(None), - Err(execution::Error::BytesRepr(_)) => Err(Error::Serialization), - // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See - // also [`Runtime::reverter`] and [`to_auction_error`] - Err(execution::Error::GasLimit) => Err(Error::GasLimit), - Err(_) => Err(Error::Storage), - } - } - - fn write(&mut self, uref: URef, value: T) -> Result<(), Error> { - let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; - self.context - .metered_write_gs(uref.into(), StoredValue::CLValue(cl_value)) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } - - fn read_bid(&mut self, account_hash: &AccountHash) -> Result, Error> { - match self.context.read_gs(&Key::Bid(*account_hash)) { - Ok(Some(StoredValue::Bid(bid))) => Ok(Some(*bid)), - Ok(Some(_)) => Err(Error::Storage), - Ok(None) => Ok(None), - Err(execution::Error::BytesRepr(_)) => Err(Error::Serialization), - // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See - // also [`Runtime::reverter`] and [`to_auction_error`] - Err(execution::Error::GasLimit) => Err(Error::GasLimit), - Err(_) => Err(Error::Storage), - } - } - - fn write_bid(&mut self, account_hash: AccountHash, bid: Bid) -> Result<(), Error> { - self.context - .metered_write_gs_unsafe(Key::Bid(account_hash), StoredValue::Bid(Box::new(bid))) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } - - fn read_withdraw(&mut self, account_hash: &AccountHash) -> Result, Error> { - match self.context.read_gs(&Key::Withdraw(*account_hash)) { - Ok(Some(StoredValue::Withdraw(unbonding_purses))) => Ok(unbonding_purses), - Ok(Some(_)) => Err(Error::Storage), - Ok(None) => Ok(Vec::new()), - Err(execution::Error::BytesRepr(_)) => Err(Error::Serialization), - // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See - // also [`Runtime::reverter`] and [`to_auction_error`] - Err(execution::Error::GasLimit) => Err(Error::GasLimit), - Err(_) => Err(Error::Storage), - } - } - - fn write_withdraw( - &mut self, - account_hash: AccountHash, - unbonding_purses: Vec, - ) -> Result<(), Error> { - self.context - .metered_write_gs_unsafe( - Key::Withdraw(account_hash), - StoredValue::Withdraw(unbonding_purses), - ) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } - - fn read_era_validators( - &mut self, - era_id: EraId, - ) -> Result, Error> { - match self.context.read_gs(&Key::EraValidators(era_id)) { - Ok(Some(StoredValue::EraValidators(recipients))) => Ok(Some(recipients)), - Ok(Some(_)) => Err(Error::Storage), - Ok(None) => Ok(None), - Err(execution::Error::BytesRepr(_)) => Err(Error::Serialization), - // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See - // also [`Runtime::reverter`] and [`to_auction_error`] - Err(execution::Error::GasLimit) => Err(Error::GasLimit), - Err(_) => Err(Error::Storage), - } - } - - fn write_era_validators( - &mut self, - era_id: EraId, - recipients: SeigniorageRecipients, - ) -> Result<(), Error> { - self.context - .metered_write_gs_unsafe( - Key::EraValidators(era_id), - StoredValue::EraValidators(recipients), - ) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } -} - -impl<'a, R> SystemProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn create_purse(&mut self) -> Result { - Runtime::create_purse(self).map_err(|exec_error| { - >::from(exec_error).unwrap_or(Error::CreatePurseFailed) - }) - } - - fn get_balance(&mut self, purse: URef) -> Result, Error> { - Runtime::get_balance(self, purse) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::GetBalance)) - } - - fn transfer_from_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), Error> { - let mint_contract_hash = self.get_mint_contract(); - match self.mint_transfer(mint_contract_hash, None, source, target, amount, None) { - Ok(Ok(_)) => Ok(()), - // NOTE: Error below is a mint error which is lossy conversion. In calling code we map - // it anyway into more specific error. - Ok(Err(_mint_error)) => Err(Error::Transfer), - Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), - } - } - - fn record_era_info(&mut self, era_id: EraId, era_info: EraInfo) -> Result<(), Error> { - Runtime::record_era_info(self, era_id, era_info) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::RecordEraInfo)) - } -} - -impl<'a, R> RuntimeProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn get_caller(&self) -> AccountHash { - self.context.get_caller() - } - - fn named_keys_get(&self, name: &str) -> Option { - self.context.named_keys_get(name).cloned() - } - - fn get_keys(&mut self, key_tag: &KeyTag) -> Result, Error> { - self.context.get_keys(key_tag).map_err(|_| Error::Storage) - } - - fn blake2b>(&self, data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { - account::blake2b(data) - } -} - -impl<'a, R> MintProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn transfer_purse_to_account( - &mut self, - source: URef, - target: AccountHash, - amount: U512, - ) -> Result { - match self.transfer_from_purse_to_account(source, target, amount, None) { - Ok(Ok(transferred_to)) => Ok(transferred_to), - Ok(Err(_api_error)) => Err(Error::Transfer), - Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), - } - } - - fn transfer_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), Error> { - let mint_contract_hash = self.get_mint_contract(); - match self.mint_transfer(mint_contract_hash, None, source, target, amount, None) { - Ok(Ok(_)) => Ok(()), - Ok(Err(_mint_error)) => Err(Error::Transfer), - Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), - } - } - - fn balance(&mut self, purse: URef) -> Result, Error> { - self.get_balance(purse) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::GetBalance)) - } - - fn read_base_round_reward(&mut self) -> Result { - let mint_contract = self.get_mint_contract(); - self.mint_read_base_round_reward(mint_contract) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::MissingValue)) - } - - fn mint(&mut self, amount: U512) -> Result { - let mint_contract = self.get_mint_contract(); - self.mint_mint(mint_contract, amount) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::MintReward)) - } - - fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { - let mint_contract = self.get_mint_contract(); - self.mint_reduce_total_supply(mint_contract, amount) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::MintReward)) - } -} - -impl<'a, R> AccountProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn get_main_purse(&self) -> Result { - // NOTE: This violates security as system contract is a contract entrypoint and normal - // "get_main_purse" won't work for security reasons. But since we're not running it as a - // WASM contract, and purses are going to be removed anytime soon, we're making this - // exception here. - Ok(Runtime::context(self).account().main_purse()) - } -} - -impl<'a, R> Auction for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ -} diff --git a/execution_engine/src/core/runtime/externals.rs b/execution_engine/src/core/runtime/externals.rs deleted file mode 100644 index 002d79b013..0000000000 --- a/execution_engine/src/core/runtime/externals.rs +++ /dev/null @@ -1,973 +0,0 @@ -use std::{collections::BTreeSet, convert::TryFrom}; - -use wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; - -use casper_types::{ - account, - account::AccountHash, - api_error, - bytesrepr::{self, ToBytes}, - contracts::{ContractPackageStatus, EntryPoints, NamedKeys}, - system::auction::EraInfo, - ContractHash, ContractPackageHash, ContractVersion, EraId, Group, Key, URef, U512, -}; - -use super::{args::Args, scoped_instrumenter::ScopedInstrumenter, Error, Runtime}; -use crate::{ - core::resolvers::v1_function_index::FunctionIndex, - shared::{gas::Gas, host_function_costs::Cost, stored_value::StoredValue}, - storage::global_state::StateReader, -}; - -impl<'a, R> Externals for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn invoke_index( - &mut self, - index: usize, - args: RuntimeArgs, - ) -> Result, Trap> { - let func = FunctionIndex::try_from(index).expect("unknown function index"); - let mut scoped_instrumenter = ScopedInstrumenter::new(func); - - let host_function_costs = self - .protocol_data() - .wasm_config() - .take_host_function_costs(); - - match func { - FunctionIndex::ReadFuncIndex => { - // args(0) = pointer to key in Wasm memory - // args(1) = size of key in Wasm memory - // args(2) = pointer to output size (output param) - let (key_ptr, key_size, output_size_ptr) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.read_value, - [key_ptr, key_size, output_size_ptr], - )?; - let ret = self.read(key_ptr, key_size, output_size_ptr)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::LoadNamedKeysFuncIndex => { - // args(0) = pointer to amount of keys (output) - // args(1) = pointer to amount of serialized bytes (output) - let (total_keys_ptr, result_size_ptr) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.load_named_keys, - [total_keys_ptr, result_size_ptr], - )?; - let ret = self.load_named_keys( - total_keys_ptr, - result_size_ptr, - &mut scoped_instrumenter, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::WriteFuncIndex => { - // args(0) = pointer to key in Wasm memory - // args(1) = size of key - // args(2) = pointer to value - // args(3) = size of value - let (key_ptr, key_size, value_ptr, value_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.write, - [key_ptr, key_size, value_ptr, value_size], - )?; - scoped_instrumenter.add_property("value_size", value_size); - self.write(key_ptr, key_size, value_ptr, value_size)?; - Ok(None) - } - - FunctionIndex::AddFuncIndex => { - // args(0) = pointer to key in Wasm memory - // args(1) = size of key - // args(2) = pointer to value - // args(3) = size of value - let (key_ptr, key_size, value_ptr, value_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.add, - [key_ptr, key_size, value_ptr, value_size], - )?; - self.add(key_ptr, key_size, value_ptr, value_size)?; - Ok(None) - } - - FunctionIndex::NewFuncIndex => { - // args(0) = pointer to uref destination in Wasm memory - // args(1) = pointer to initial value - // args(2) = size of initial value - let (uref_ptr, value_ptr, value_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.new_uref, - [uref_ptr, value_ptr, value_size], - )?; - scoped_instrumenter.add_property("value_size", value_size); - self.new_uref(uref_ptr, value_ptr, value_size)?; - Ok(None) - } - - FunctionIndex::RetFuncIndex => { - // args(0) = pointer to value - // args(1) = size of value - let (value_ptr, value_size) = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.ret, [value_ptr, value_size])?; - scoped_instrumenter.add_property("value_size", value_size); - Err(self.ret(value_ptr, value_size as usize, &mut scoped_instrumenter)) - } - - FunctionIndex::GetKeyFuncIndex => { - // args(0) = pointer to key name in Wasm memory - // args(1) = size of key name - // args(2) = pointer to output buffer for serialized key - // args(3) = size of output buffer - // args(4) = pointer to bytes written - let (name_ptr, name_size, output_ptr, output_size, bytes_written) = - Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.get_key, - [name_ptr, name_size, output_ptr, output_size, bytes_written], - )?; - scoped_instrumenter.add_property("name_size", name_size); - let ret = self.load_key( - name_ptr, - name_size, - output_ptr, - output_size as usize, - bytes_written, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::HasKeyFuncIndex => { - // args(0) = pointer to key name in Wasm memory - // args(1) = size of key name - let (name_ptr, name_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.has_key, - [name_ptr, name_size], - )?; - scoped_instrumenter.add_property("name_size", name_size); - let result = self.has_key(name_ptr, name_size)?; - Ok(Some(RuntimeValue::I32(result))) - } - - FunctionIndex::PutKeyFuncIndex => { - // args(0) = pointer to key name in Wasm memory - // args(1) = size of key name - // args(2) = pointer to key in Wasm memory - // args(3) = size of key - let (name_ptr, name_size, key_ptr, key_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.put_key, - [name_ptr, name_size, key_ptr, key_size], - )?; - scoped_instrumenter.add_property("name_size", name_size); - self.put_key(name_ptr, name_size, key_ptr, key_size)?; - Ok(None) - } - - FunctionIndex::RemoveKeyFuncIndex => { - // args(0) = pointer to key name in Wasm memory - // args(1) = size of key name - let (name_ptr, name_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.remove_key, - [name_ptr, name_size], - )?; - scoped_instrumenter.add_property("name_size", name_size); - self.remove_key(name_ptr, name_size)?; - Ok(None) - } - - FunctionIndex::GetCallerIndex => { - // args(0) = pointer where a size of serialized bytes will be stored - let output_size = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.get_caller, [output_size])?; - let ret = self.get_caller(output_size)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::GetBlocktimeIndex => { - // args(0) = pointer to Wasm memory where to write. - let dest_ptr = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.get_blocktime, [dest_ptr])?; - self.get_blocktime(dest_ptr)?; - Ok(None) - } - - FunctionIndex::GasFuncIndex => { - let gas_arg: u32 = Args::parse(args)?; - // Gas is special cased internal host function and for accounting purposes it isn't - // represented in protocol data. - self.gas(Gas::new(gas_arg.into()))?; - Ok(None) - } - - FunctionIndex::IsValidURefFnIndex => { - // args(0) = pointer to value to validate - // args(1) = size of value - let (uref_ptr, uref_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.is_valid_uref, - [uref_ptr, uref_size], - )?; - Ok(Some(RuntimeValue::I32(i32::from( - self.is_valid_uref(uref_ptr, uref_size)?, - )))) - } - - FunctionIndex::RevertFuncIndex => { - // args(0) = status u32 - let status = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.revert, [status])?; - Err(self.revert(status)) - } - - FunctionIndex::AddAssociatedKeyFuncIndex => { - // args(0) = pointer to array of bytes of an account hash - // args(1) = size of an account hash - // args(2) = weight of the key - let (account_hash_ptr, account_hash_size, weight_value) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.add_associated_key, - [account_hash_ptr, account_hash_size, weight_value as Cost], - )?; - let value = self.add_associated_key( - account_hash_ptr, - account_hash_size as usize, - weight_value, - )?; - Ok(Some(RuntimeValue::I32(value))) - } - - FunctionIndex::RemoveAssociatedKeyFuncIndex => { - // args(0) = pointer to array of bytes of an account hash - // args(1) = size of an account hash - let (account_hash_ptr, account_hash_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.remove_associated_key, - [account_hash_ptr, account_hash_size], - )?; - let value = - self.remove_associated_key(account_hash_ptr, account_hash_size as usize)?; - Ok(Some(RuntimeValue::I32(value))) - } - - FunctionIndex::UpdateAssociatedKeyFuncIndex => { - // args(0) = pointer to array of bytes of an account hash - // args(1) = size of an account hash - // args(2) = weight of the key - let (account_hash_ptr, account_hash_size, weight_value) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.update_associated_key, - [account_hash_ptr, account_hash_size, weight_value as Cost], - )?; - let value = self.update_associated_key( - account_hash_ptr, - account_hash_size as usize, - weight_value, - )?; - Ok(Some(RuntimeValue::I32(value))) - } - - FunctionIndex::SetActionThresholdFuncIndex => { - // args(0) = action type - // args(1) = new threshold - let (action_type_value, threshold_value) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.set_action_threshold, - [action_type_value, threshold_value as Cost], - )?; - let value = self.set_action_threshold(action_type_value, threshold_value)?; - Ok(Some(RuntimeValue::I32(value))) - } - - FunctionIndex::CreatePurseIndex => { - // args(0) = pointer to array for return value - // args(1) = length of array for return value - let (dest_ptr, dest_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.create_purse, - [dest_ptr, dest_size], - )?; - let purse = self.create_purse()?; - let purse_bytes = purse.into_bytes().map_err(Error::BytesRepr)?; - assert_eq!(dest_size, purse_bytes.len() as u32); - self.memory - .set(dest_ptr, &purse_bytes) - .map_err(|e| Error::Interpreter(e.into()))?; - Ok(Some(RuntimeValue::I32(0))) - } - - FunctionIndex::TransferToAccountIndex => { - // args(0) = pointer to array of bytes of an account hash - // args(1) = length of array of bytes of an account hash - // args(2) = pointer to array of bytes of an amount - // args(3) = length of array of bytes of an amount - // args(4) = pointer to array of bytes of an id - // args(5) = length of array of bytes of an id - // args(6) = pointer to a value where new value will be set - let (key_ptr, key_size, amount_ptr, amount_size, id_ptr, id_size, result_ptr) = - Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.transfer_to_account, - [ - key_ptr, - key_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - result_ptr, - ], - )?; - let account_hash: AccountHash = { - let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - let amount: U512 = { - let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - let id: Option = { - let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - - let ret = match self.transfer_to_account(account_hash, amount, id)? { - Ok(transferred_to) => { - let result_value: u32 = transferred_to as u32; - let result_value_bytes = result_value.to_le_bytes(); - self.memory - .set(result_ptr, &result_value_bytes) - .map_err(|error| Error::Interpreter(error.into()))?; - Ok(()) - } - Err(api_error) => Err(api_error), - }; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::TransferFromPurseToAccountIndex => { - // args(0) = pointer to array of bytes in Wasm memory of a source purse - // args(1) = length of array of bytes in Wasm memory of a source purse - // args(2) = pointer to array of bytes in Wasm memory of an account hash - // args(3) = length of array of bytes in Wasm memory of an account hash - // args(4) = pointer to array of bytes in Wasm memory of an amount - // args(5) = length of array of bytes in Wasm memory of an amount - // args(6) = pointer to array of bytes in Wasm memory of an id - // args(7) = length of array of bytes in Wasm memory of an id - // args(8) = pointer to a value where value of `TransferredTo` enum will be set - let ( - source_ptr, - source_size, - key_ptr, - key_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - result_ptr, - ) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.transfer_from_purse_to_account, - [ - source_ptr, - source_size, - key_ptr, - key_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - result_ptr, - ], - )?; - let source_purse = { - let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - let account_hash: AccountHash = { - let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - let amount: U512 = { - let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - let id: Option = { - let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - let ret = match self.transfer_from_purse_to_account( - source_purse, - account_hash, - amount, - id, - )? { - Ok(transferred_to) => { - let result_value: u32 = transferred_to as u32; - let result_value_bytes = result_value.to_le_bytes(); - self.memory - .set(result_ptr, &result_value_bytes) - .map_err(|error| Error::Interpreter(error.into()))?; - Ok(()) - } - Err(api_error) => Err(api_error), - }; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::TransferFromPurseToPurseIndex => { - // args(0) = pointer to array of bytes in Wasm memory of a source purse - // args(1) = length of array of bytes in Wasm memory of a source purse - // args(2) = pointer to array of bytes in Wasm memory of a target purse - // args(3) = length of array of bytes in Wasm memory of a target purse - // args(4) = pointer to array of bytes in Wasm memory of an amount - // args(5) = length of array of bytes in Wasm memory of an amount - // args(6) = pointer to array of bytes in Wasm memory of an id - // args(7) = length of array of bytes in Wasm memory of an id - let ( - source_ptr, - source_size, - target_ptr, - target_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - ) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.transfer_from_purse_to_purse, - [ - source_ptr, - source_size, - target_ptr, - target_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - ], - )?; - let ret = self.transfer_from_purse_to_purse( - source_ptr, - source_size, - target_ptr, - target_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::GetBalanceIndex => { - // args(0) = pointer to purse input - // args(1) = length of purse - // args(2) = pointer to output size (output) - let (ptr, ptr_size, output_size_ptr) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.get_balance, - [ptr, ptr_size, output_size_ptr], - )?; - let ret = self.get_balance_host_buffer(ptr, ptr_size as usize, output_size_ptr)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::GetPhaseIndex => { - // args(0) = pointer to Wasm memory where to write. - let dest_ptr = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.get_phase, [dest_ptr])?; - self.get_phase(dest_ptr)?; - Ok(None) - } - - FunctionIndex::GetSystemContractIndex => { - // args(0) = system contract index - // args(1) = dest pointer for storing serialized result - // args(2) = dest pointer size - let (system_contract_index, dest_ptr, dest_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.get_system_contract, - [system_contract_index, dest_ptr, dest_size], - )?; - let ret = self.get_system_contract(system_contract_index, dest_ptr, dest_size)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::GetMainPurseIndex => { - // args(0) = pointer to Wasm memory where to write. - let dest_ptr = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.get_main_purse, [dest_ptr])?; - self.get_main_purse(dest_ptr)?; - Ok(None) - } - - FunctionIndex::ReadHostBufferIndex => { - // args(0) = pointer to Wasm memory where to write size. - let (dest_ptr, dest_size, bytes_written_ptr) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.read_host_buffer, - [dest_ptr, dest_size, bytes_written_ptr], - )?; - scoped_instrumenter.add_property("dest_size", dest_size); - let ret = self.read_host_buffer(dest_ptr, dest_size as usize, bytes_written_ptr)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::CreateContractPackageAtHash => { - // args(0) = pointer to wasm memory where to write 32-byte Hash address - // args(1) = pointer to wasm memory where to write 32-byte access key address - // args(2) = boolean flag to determine if the contract can be versioned - let (hash_dest_ptr, access_dest_ptr, is_locked) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.create_contract_package_at_hash, - [hash_dest_ptr, access_dest_ptr], - )?; - let package_status = ContractPackageStatus::new(is_locked); - let (hash_addr, access_addr) = - self.create_contract_package_at_hash(package_status)?; - - self.function_address(hash_addr, hash_dest_ptr)?; - self.function_address(access_addr, access_dest_ptr)?; - Ok(None) - } - - FunctionIndex::CreateContractUserGroup => { - // args(0) = pointer to package key in wasm memory - // args(1) = size of package key in wasm memory - // args(2) = pointer to group label in wasm memory - // args(3) = size of group label in wasm memory - // args(4) = number of new urefs to generate for the group - // args(5) = pointer to existing_urefs in wasm memory - // args(6) = size of existing_urefs in wasm memory - // args(7) = pointer to location to write size of output (written to host buffer) - let ( - package_key_ptr, - package_key_size, - label_ptr, - label_size, - num_new_urefs, - existing_urefs_ptr, - existing_urefs_size, - output_size_ptr, - ) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.create_contract_user_group, - [ - package_key_ptr, - package_key_size, - label_ptr, - label_size, - num_new_urefs, - existing_urefs_ptr, - existing_urefs_size, - output_size_ptr, - ], - )?; - scoped_instrumenter - .add_property("existing_urefs_size", existing_urefs_size.to_string()); - scoped_instrumenter.add_property("label_size", label_size.to_string()); - - let contract_package_hash: ContractPackageHash = - self.t_from_mem(package_key_ptr, package_key_size)?; - let label: String = self.t_from_mem(label_ptr, label_size)?; - let existing_urefs: BTreeSet = - self.t_from_mem(existing_urefs_ptr, existing_urefs_size)?; - - let ret = self.create_contract_user_group( - contract_package_hash, - label, - num_new_urefs, - existing_urefs, - output_size_ptr, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::AddContractVersion => { - // args(0) = pointer to package key in wasm memory - // args(1) = size of package key in wasm memory - // args(2) = pointer to entrypoints in wasm memory - // args(3) = size of entrypoints in wasm memory - // args(4) = pointer to named keys in wasm memory - // args(5) = size of named keys in wasm memory - // args(6) = pointer to output buffer for serialized key - // args(7) = size of output buffer - // args(8) = pointer to bytes written - let ( - contract_package_hash_ptr, - contract_package_hash_size, - version_ptr, - entry_points_ptr, - entry_points_size, - named_keys_ptr, - named_keys_size, - output_ptr, - output_size, - bytes_written_ptr, - ) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.add_contract_version, - [ - contract_package_hash_ptr, - contract_package_hash_size, - version_ptr, - entry_points_ptr, - entry_points_size, - named_keys_ptr, - named_keys_size, - output_ptr, - output_size, - bytes_written_ptr, - ], - )?; - scoped_instrumenter - .add_property("entry_points_size", entry_points_size.to_string()); - scoped_instrumenter.add_property("named_keys_size", named_keys_size.to_string()); - - let contract_package_hash: ContractPackageHash = - self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; - let entry_points: EntryPoints = - self.t_from_mem(entry_points_ptr, entry_points_size)?; - let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?; - let ret = self.add_contract_version( - contract_package_hash, - entry_points, - named_keys, - output_ptr, - output_size as usize, - bytes_written_ptr, - version_ptr, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::DisableContractVersion => { - // args(0) = pointer to package hash in wasm memory - // args(1) = size of package hash in wasm memory - // args(2) = pointer to contract hash in wasm memory - // args(3) = size of contract hash in wasm memory - let (package_key_ptr, package_key_size, contract_hash_ptr, contract_hash_size) = - Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.disable_contract_version, - [ - package_key_ptr, - package_key_size, - contract_hash_ptr, - contract_hash_size, - ], - )?; - let contract_package_hash = self.t_from_mem(package_key_ptr, package_key_size)?; - let contract_hash = self.t_from_mem(contract_hash_ptr, contract_hash_size)?; - - let result = self.disable_contract_version(contract_package_hash, contract_hash)?; - - Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))) - } - - FunctionIndex::CallContractFuncIndex => { - // args(0) = pointer to contract hash where contract is at in global state - // args(1) = size of contract hash - // args(2) = pointer to entry point - // args(3) = size of entry point - // args(4) = pointer to function arguments in Wasm memory - // args(5) = size of arguments - // args(6) = pointer to result size (output) - let ( - contract_hash_ptr, - contract_hash_size, - entry_point_name_ptr, - entry_point_name_size, - args_ptr, - args_size, - result_size_ptr, - ) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.call_contract, - [ - contract_hash_ptr, - contract_hash_size, - entry_point_name_ptr, - entry_point_name_size, - args_ptr, - args_size, - result_size_ptr, - ], - )?; - scoped_instrumenter - .add_property("entry_point_name_size", entry_point_name_size.to_string()); - scoped_instrumenter.add_property("args_size", args_size.to_string()); - - let contract_hash: ContractHash = - self.t_from_mem(contract_hash_ptr, contract_hash_size)?; - let entry_point_name: String = - self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; - let args_bytes: Vec = { - let args_size: u32 = args_size; - self.bytes_from_mem(args_ptr, args_size as usize)? - }; - - let ret = self.call_contract_host_buffer( - contract_hash, - &entry_point_name, - args_bytes, - result_size_ptr, - &mut scoped_instrumenter, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::CallVersionedContract => { - // args(0) = pointer to contract_package_hash where contract is at in global state - // args(1) = size of contract_package_hash - // args(2) = pointer to contract version in wasm memory - // args(3) = size of contract version in wasm memory - // args(4) = pointer to method name in wasm memory - // args(5) = size of method name in wasm memory - // args(6) = pointer to function arguments in Wasm memory - // args(7) = size of arguments - // args(8) = pointer to result size (output) - let ( - contract_package_hash_ptr, - contract_package_hash_size, - contract_version_ptr, - contract_package_size, - entry_point_name_ptr, - entry_point_name_size, - args_ptr, - args_size, - result_size_ptr, - ) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.call_versioned_contract, - [ - contract_package_hash_ptr, - contract_package_hash_size, - contract_version_ptr, - contract_package_size, - entry_point_name_ptr, - entry_point_name_size, - args_ptr, - args_size, - result_size_ptr, - ], - )?; - scoped_instrumenter - .add_property("entry_point_name_size", entry_point_name_size.to_string()); - scoped_instrumenter.add_property("args_size", args_size.to_string()); - - let contract_package_hash: ContractPackageHash = - self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; - let contract_version: Option = - self.t_from_mem(contract_version_ptr, contract_package_size)?; - let entry_point_name: String = - self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; - let args_bytes: Vec = { - let args_size: u32 = args_size; - self.bytes_from_mem(args_ptr, args_size as usize)? - }; - - let ret = self.call_versioned_contract_host_buffer( - contract_package_hash, - contract_version, - entry_point_name, - args_bytes, - result_size_ptr, - &mut scoped_instrumenter, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - #[cfg(feature = "test-support")] - FunctionIndex::PrintIndex => { - let (text_ptr, text_size) = Args::parse(args)?; - self.charge_host_function_call(&host_function_costs.print, [text_ptr, text_size])?; - scoped_instrumenter.add_property("text_size", text_size); - self.print(text_ptr, text_size)?; - Ok(None) - } - - FunctionIndex::GetRuntimeArgsizeIndex => { - // args(0) = pointer to name of host runtime arg to load - // args(1) = size of name of the host runtime arg - // args(2) = pointer to a argument size (output) - let (name_ptr, name_size, size_ptr) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.get_named_arg_size, - [name_ptr, name_size, size_ptr], - )?; - scoped_instrumenter.add_property("name_size", name_size.to_string()); - let ret = self.get_named_arg_size(name_ptr, name_size as usize, size_ptr)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::GetRuntimeArgIndex => { - // args(0) = pointer to serialized argument name - // args(1) = size of serialized argument name - // args(2) = pointer to output pointer where host will write argument bytes - // args(3) = size of available data under output pointer - let (name_ptr, name_size, dest_ptr, dest_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.get_named_arg, - [name_ptr, name_size, dest_ptr, dest_size], - )?; - scoped_instrumenter.add_property("name_size", name_size.to_string()); - scoped_instrumenter.add_property("dest_size", dest_size.to_string()); - let ret = - self.get_named_arg(name_ptr, name_size as usize, dest_ptr, dest_size as usize)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::RemoveContractUserGroupIndex => { - // args(0) = pointer to package key in wasm memory - // args(1) = size of package key in wasm memory - // args(2) = pointer to serialized group label - // args(3) = size of serialized group label - let (package_key_ptr, package_key_size, label_ptr, label_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.remove_contract_user_group, - [package_key_ptr, package_key_size, label_ptr, label_size], - )?; - scoped_instrumenter.add_property("label_size", label_size.to_string()); - let package_key = self.t_from_mem(package_key_ptr, package_key_size)?; - let label: Group = self.t_from_mem(label_ptr, label_size)?; - - let ret = self.remove_contract_user_group(package_key, label)?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::ExtendContractUserGroupURefsIndex => { - // args(0) = pointer to package key in wasm memory - // args(1) = size of package key in wasm memory - // args(2) = pointer to label name - // args(3) = label size bytes - // args(4) = output of size value of host bytes data - let (package_ptr, package_size, label_ptr, label_size, value_size_ptr) = - Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.provision_contract_user_group_uref, - [ - package_ptr, - package_size, - label_ptr, - label_size, - value_size_ptr, - ], - )?; - scoped_instrumenter.add_property("label_size", label_size.to_string()); - let ret = self.provision_contract_user_group_uref( - package_ptr, - package_size, - label_ptr, - label_size, - value_size_ptr, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::RemoveContractUserGroupURefsIndex => { - // args(0) = pointer to package key in wasm memory - // args(1) = size of package key in wasm memory - // args(2) = pointer to label name - // args(3) = label size bytes - // args(4) = pointer to urefs - // args(5) = size of urefs pointer - let (package_ptr, package_size, label_ptr, label_size, urefs_ptr, urefs_size) = - Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.remove_contract_user_group_urefs, - [ - package_ptr, - package_size, - label_ptr, - label_size, - urefs_ptr, - urefs_size, - ], - )?; - scoped_instrumenter.add_property("label_size", label_size.to_string()); - scoped_instrumenter.add_property("urefs_size", urefs_size.to_string()); - let ret = self.remove_contract_user_group_urefs( - package_ptr, - package_size, - label_ptr, - label_size, - urefs_ptr, - urefs_size, - )?; - Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) - } - - FunctionIndex::Blake2b => { - let (in_ptr, in_size, out_ptr, out_size) = Args::parse(args)?; - self.charge_host_function_call( - &host_function_costs.blake2b, - [in_ptr, in_size, out_ptr, out_size], - )?; - scoped_instrumenter.add_property("in_size", in_size.to_string()); - scoped_instrumenter.add_property("out_size", out_size.to_string()); - let input: Vec = self.bytes_from_mem(in_ptr, in_size as usize)?; - let digest = account::blake2b(&input); - if digest.len() != out_size as usize { - let err_value = u32::from(api_error::ApiError::BufferTooSmall) as i32; - return Ok(Some(RuntimeValue::I32(err_value))); - } - self.memory - .set(out_ptr, &digest) - .map_err(|error| Error::Interpreter(error.into()))?; - Ok(Some(RuntimeValue::I32(0))) - } - - FunctionIndex::RecordTransfer => { - // RecordTransfer is a special cased internal host function only callable by the - // mint contract and for accounting purposes it isn't represented in protocol data. - let ( - maybe_to_ptr, - maybe_to_size, - source_ptr, - source_size, - target_ptr, - target_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - ): (u32, u32, u32, u32, u32, u32, u32, u32, u32, u32) = Args::parse(args)?; - scoped_instrumenter.add_property("maybe_to_size", maybe_to_size.to_string()); - scoped_instrumenter.add_property("source_size", source_size.to_string()); - scoped_instrumenter.add_property("target_size", target_size.to_string()); - scoped_instrumenter.add_property("amount_size", amount_size.to_string()); - scoped_instrumenter.add_property("id_size", id_size.to_string()); - let maybe_to: Option = self.t_from_mem(maybe_to_ptr, maybe_to_size)?; - let source: URef = self.t_from_mem(source_ptr, source_size)?; - let target: URef = self.t_from_mem(target_ptr, target_size)?; - let amount: U512 = self.t_from_mem(amount_ptr, amount_size)?; - let id: Option = self.t_from_mem(id_ptr, id_size)?; - self.record_transfer(maybe_to, source, target, amount, id)?; - Ok(Some(RuntimeValue::I32(0))) - } - - FunctionIndex::RecordEraInfo => { - // RecordEraInfo is a special cased internal host function only callable by the - // auction contract and for accounting purposes it isn't represented in protocol - // data. - let (era_id_ptr, era_id_size, era_info_ptr, era_info_size): (u32, u32, u32, u32) = - Args::parse(args)?; - scoped_instrumenter.add_property("era_id_size", era_id_size.to_string()); - scoped_instrumenter.add_property("era_info_size", era_info_size.to_string()); - let era_id: EraId = self.t_from_mem(era_id_ptr, era_id_size)?; - let era_info: EraInfo = self.t_from_mem(era_info_ptr, era_info_size)?; - self.record_era_info(era_id, era_info)?; - Ok(Some(RuntimeValue::I32(0))) - } - } - } -} diff --git a/execution_engine/src/core/runtime/handle_payment_internal.rs b/execution_engine/src/core/runtime/handle_payment_internal.rs deleted file mode 100644 index c5a5946e38..0000000000 --- a/execution_engine/src/core/runtime/handle_payment_internal.rs +++ /dev/null @@ -1,105 +0,0 @@ -use casper_types::{ - account::AccountHash, - system::handle_payment::{Error, HandlePayment, MintProvider, RuntimeProvider}, - BlockTime, Key, Phase, TransferredTo, URef, U512, -}; - -use crate::{ - core::{execution, runtime::Runtime}, - shared::stored_value::StoredValue, - storage::global_state::StateReader, -}; - -impl From for Option { - fn from(exec_error: execution::Error) -> Self { - match exec_error { - // This is used to propagate [`execution::Error::GasLimit`] to make sure - // [`HandlePayment`] contract running natively supports propagating gas limit - // errors without a panic. - execution::Error::GasLimit => Some(Error::GasLimit), - // There are possibly other exec errors happening but such translation would be lossy. - _ => None, - } - } -} - -// TODO: Update MintProvider to better handle errors -impl<'a, R> MintProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn transfer_purse_to_account( - &mut self, - source: URef, - target: AccountHash, - amount: U512, - ) -> Result { - match self.transfer_from_purse_to_account(source, target, amount, None) { - Ok(Ok(transferred_to)) => Ok(transferred_to), - Ok(Err(_mint_error)) => Err(Error::Transfer), - Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), - } - } - - fn transfer_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), Error> { - let mint_contract_key = self.get_mint_contract(); - match self.mint_transfer(mint_contract_key, None, source, target, amount, None) { - Ok(Ok(_)) => Ok(()), - Ok(Err(_mint_error)) => Err(Error::Transfer), - Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), - } - } - - fn balance(&mut self, purse: URef) -> Result, Error> { - self.get_balance(purse) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::GetBalance)) - } -} - -// TODO: Update RuntimeProvider to better handle errors -impl<'a, R> RuntimeProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn get_key(&self, name: &str) -> Option { - self.context.named_keys_get(name).cloned() - } - - fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error> { - self.context - .put_key(name.to_string(), key) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::PutKey)) - } - - fn remove_key(&mut self, name: &str) -> Result<(), Error> { - self.context - .remove_key(name) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::RemoveKey)) - } - - fn get_phase(&self) -> Phase { - self.context.phase() - } - - fn get_block_time(&self) -> BlockTime { - self.context.get_blocktime() - } - - fn get_caller(&self) -> AccountHash { - self.context.get_caller() - } -} - -impl<'a, R> HandlePayment for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ -} diff --git a/execution_engine/src/core/runtime/mint_internal.rs b/execution_engine/src/core/runtime/mint_internal.rs deleted file mode 100644 index 324930236b..0000000000 --- a/execution_engine/src/core/runtime/mint_internal.rs +++ /dev/null @@ -1,142 +0,0 @@ -use casper_types::{ - account::AccountHash, - bytesrepr::{FromBytes, ToBytes}, - system::mint::{Error, Mint, RuntimeProvider, StorageProvider, SystemProvider}, - CLTyped, CLValue, Key, URef, U512, -}; - -use super::Runtime; -use crate::{ - core::execution, shared::stored_value::StoredValue, storage::global_state::StateReader, -}; - -impl From for Option { - fn from(exec_error: execution::Error) -> Self { - match exec_error { - // This is used to propagate [`execution::Error::GasLimit`] to make sure [`Mint`] - // contract running natively supports propagating gas limit errors without a panic. - execution::Error::GasLimit => Some(Error::GasLimit), - // There are possibly other exec errors happening but such translation would be lossy. - _ => None, - } - } -} - -impl<'a, R> RuntimeProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn get_caller(&self) -> AccountHash { - self.context.get_caller() - } - - fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error> { - self.context - .put_key(name.to_string(), key) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::PutKey)) - } - - fn get_key(&self, name: &str) -> Option { - self.context.named_keys_get(name).cloned() - } -} - -// TODO: update Mint + StorageProvider to better handle errors -impl<'a, R> StorageProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn new_uref(&mut self, init: T) -> Result { - let cl_value: CLValue = CLValue::from_t(init).map_err(|_| Error::CLValue)?; - self.context - .new_uref(StoredValue::CLValue(cl_value)) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::NewURef)) - } - - fn read(&mut self, uref: URef) -> Result, Error> { - let maybe_value = self - .context - .read_gs(&Key::URef(uref)) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage))?; - match maybe_value { - Some(StoredValue::CLValue(value)) => { - let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?; - Ok(Some(value)) - } - Some(_cl_value) => Err(Error::CLValue), - None => Ok(None), - } - } - - fn write(&mut self, uref: URef, value: T) -> Result<(), Error> { - let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; - self.context - .metered_write_gs(Key::URef(uref), StoredValue::CLValue(cl_value)) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } - - fn add(&mut self, uref: URef, value: T) -> Result<(), Error> { - let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; - self.context - .metered_add_gs(uref, cl_value) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } - - fn read_balance(&mut self, uref: URef) -> Result, Error> { - let maybe_value = self - .context - .read_gs_direct(&Key::Balance(uref.addr())) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage))?; - match maybe_value { - Some(StoredValue::CLValue(value)) => { - let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?; - Ok(Some(value)) - } - Some(_cl_value) => Err(Error::CLValue), - None => Ok(None), - } - } - - fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error> { - let cl_value = CLValue::from_t(balance).map_err(|_| Error::CLValue)?; - self.context - .metered_write_gs_unsafe(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value)) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } - - fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error> { - let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; - self.context - .metered_add_gs_unsafe(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value)) - .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) - } -} - -impl<'a, R> SystemProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn record_transfer( - &mut self, - maybe_to: Option, - source: URef, - target: URef, - amount: U512, - id: Option, - ) -> Result<(), Error> { - let result = Runtime::record_transfer(self, maybe_to, source, target, amount, id); - result.map_err(|exec_error| { - >::from(exec_error).unwrap_or(Error::RecordTransferFailure) - }) - } -} - -impl<'a, R> Mint for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ -} diff --git a/execution_engine/src/core/runtime/mod.rs b/execution_engine/src/core/runtime/mod.rs deleted file mode 100644 index 1474913816..0000000000 --- a/execution_engine/src/core/runtime/mod.rs +++ /dev/null @@ -1,3529 +0,0 @@ -mod args; -mod auction_internal; -mod externals; -mod handle_payment_internal; -mod mint_internal; -mod scoped_instrumenter; -mod standard_payment_internal; - -use std::{ - cmp, - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - convert::TryFrom, - iter::IntoIterator, -}; - -use itertools::Itertools; -use parity_wasm::elements::Module; -use wasmi::{ImportsBuilder, MemoryRef, ModuleInstance, ModuleRef, Trap, TrapKind}; - -use casper_types::{ - account::{AccountHash, ActionType, Weight}, - bytesrepr::{self, FromBytes, ToBytes}, - contracts::{ - self, Contract, ContractPackage, ContractPackageStatus, ContractVersion, ContractVersions, - DisabledVersions, EntryPoint, EntryPointAccess, EntryPoints, Group, Groups, NamedKeys, - }, - system::{ - self, - auction::{self, Auction, EraInfo}, - handle_payment::{self, HandlePayment}, - mint::{self, Mint}, - standard_payment::{self, StandardPayment}, - SystemContractType, - }, - AccessRights, ApiError, CLType, CLTyped, CLValue, ContractHash, ContractPackageHash, - ContractVersionKey, ContractWasm, DeployHash, EntryPointType, EraId, Key, Phase, - ProtocolVersion, PublicKey, RuntimeArgs, Transfer, TransferResult, TransferredTo, URef, U128, - U256, U512, -}; - -use crate::{ - core::{ - engine_state::{system_contract_cache::SystemContractCache, EngineConfig}, - execution::{self, Error}, - resolvers::{create_module_resolver, memory_resolver::MemoryResolver}, - runtime::scoped_instrumenter::ScopedInstrumenter, - runtime_context::{self, RuntimeContext}, - Address, - }, - shared::{ - account::Account, - gas::Gas, - host_function_costs::{Cost, HostFunction}, - stored_value::StoredValue, - wasm_config::WasmConfig, - }, - storage::{global_state::StateReader, protocol_data::ProtocolData}, -}; - -pub struct Runtime<'a, R> { - system_contract_cache: SystemContractCache, - config: EngineConfig, - memory: MemoryRef, - module: Module, - host_buffer: Option, - context: RuntimeContext<'a, R>, -} - -pub fn instance_and_memory( - parity_module: Module, - protocol_version: ProtocolVersion, - wasm_config: &WasmConfig, -) -> Result<(ModuleRef, MemoryRef), Error> { - let module = wasmi::Module::from_parity_wasm_module(parity_module)?; - let resolver = create_module_resolver(protocol_version, wasm_config)?; - let mut imports = ImportsBuilder::new(); - imports.push_resolver("env", &resolver); - let not_started_module = ModuleInstance::new(&module, &imports)?; - if not_started_module.has_start() { - return Err(Error::UnsupportedWasmStart); - } - let instance = not_started_module.not_started_instance().clone(); - let memory = resolver.memory_ref()?; - Ok((instance, memory)) -} - -/// Turns `key` into a `([u8; 32], AccessRights)` tuple. -/// Returns None if `key` is not `Key::URef` as it wouldn't have `AccessRights` -/// associated with it. Helper function for creating `named_keys` associating -/// addresses and corresponding `AccessRights`. -pub fn key_to_tuple(key: Key) -> Option<([u8; 32], AccessRights)> { - match key { - Key::URef(uref) => Some((uref.addr(), uref.access_rights())), - Key::Account(_) => None, - Key::Hash(_) => None, - Key::Transfer(_) => None, - Key::DeployInfo(_) => None, - Key::EraInfo(_) => None, - Key::Balance(_) => None, - Key::Bid(_) => None, - Key::Withdraw(_) => None, - Key::EraValidators(_) => None, - } -} - -/// Groups a collection of urefs by their addresses and accumulates access -/// rights per key -pub fn extract_access_rights_from_urefs>( - input: I, -) -> HashMap> { - input - .into_iter() - .map(|uref: URef| (uref.addr(), uref.access_rights())) - .group_by(|(key, _)| *key) - .into_iter() - .map(|(key, group)| { - ( - key, - group.map(|(_, x)| x).collect::>(), - ) - }) - .collect() -} - -/// Groups a collection of keys by their address and accumulates access rights -/// per key. -pub fn extract_access_rights_from_keys>( - input: I, -) -> HashMap> { - input - .into_iter() - .map(key_to_tuple) - .flatten() - .group_by(|(key, _)| *key) - .into_iter() - .map(|(key, group)| { - ( - key, - group.map(|(_, x)| x).collect::>(), - ) - }) - .collect() -} - -#[allow(clippy::cognitive_complexity)] -fn extract_urefs(cl_value: &CLValue) -> Result, Error> { - match cl_value.cl_type() { - CLType::Bool - | CLType::I32 - | CLType::I64 - | CLType::U8 - | CLType::U32 - | CLType::U64 - | CLType::U128 - | CLType::U256 - | CLType::U512 - | CLType::Unit - | CLType::String - | CLType::PublicKey - | CLType::Any => Ok(vec![]), - CLType::Option(ty) => match **ty { - CLType::URef => { - let opt: Option = cl_value.to_owned().into_t()?; - Ok(opt.into_iter().collect()) - } - CLType::Key => { - let opt: Option = cl_value.to_owned().into_t()?; - Ok(opt.into_iter().flat_map(Key::into_uref).collect()) - } - _ => Ok(vec![]), - }, - CLType::List(ty) => match **ty { - CLType::URef => Ok(cl_value.to_owned().into_t()?), - CLType::Key => { - let keys: Vec = cl_value.to_owned().into_t()?; - Ok(keys.into_iter().filter_map(Key::into_uref).collect()) - } - _ => Ok(vec![]), - }, - CLType::ByteArray(_) => Ok(vec![]), - CLType::Result { ok, err } => match (&**ok, &**err) { - (CLType::URef, CLType::Bool) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::I32) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::I64) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::U8) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::U32) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::U64) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::U128) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::U256) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::U512) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::Unit) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::String) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(_) => Ok(vec![]), - } - } - (CLType::URef, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::URef, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(uref) => Ok(vec![uref]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::Key, CLType::Bool) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::I32) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::I64) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::U8) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::U32) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::U64) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::U128) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::U256) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::U512) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::Unit) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::String) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(_) => Ok(vec![]), - } - } - (CLType::Key, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::Key, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(key) => Ok(key.into_uref().into_iter().collect()), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::Bool, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::I32, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::I64, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::U8, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::U32, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::U64, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::U128, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::U256, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::U512, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::Unit, CLType::URef) => { - let res: Result<(), URef> = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::String, CLType::URef) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(uref) => Ok(vec![uref]), - } - } - (CLType::Bool, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::I32, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::I64, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::U8, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::U32, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::U64, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::U128, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::U256, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::U512, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::Unit, CLType::Key) => { - let res: Result<(), Key> = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (CLType::String, CLType::Key) => { - let res: Result = cl_value.to_owned().into_t()?; - match res { - Ok(_) => Ok(vec![]), - Err(key) => Ok(key.into_uref().into_iter().collect()), - } - } - (_, _) => Ok(vec![]), - }, - CLType::Map { key, value } => match (&**key, &**value) { - (CLType::URef, CLType::Bool) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::I32) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::I64) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::U8) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::U32) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::U64) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::U128) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::U256) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::U512) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::Unit) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::String) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().collect()) - } - (CLType::URef, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map - .keys() - .cloned() - .chain(map.values().cloned().filter_map(Key::into_uref)) - .collect()) - } - (CLType::URef, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().chain(map.values().cloned()).collect()) - } - (CLType::Key, CLType::Bool) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::I32) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::I64) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::U8) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::U32) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::U64) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::U128) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::U256) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::U512) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::Unit) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::String) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.keys().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Key, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map - .keys() - .cloned() - .filter_map(Key::into_uref) - .chain(map.values().cloned()) - .collect()) - } - (CLType::Key, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map - .keys() - .cloned() - .filter_map(Key::into_uref) - .chain(map.values().cloned().filter_map(Key::into_uref)) - .collect()) - } - (CLType::Bool, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::I32, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::I64, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::U8, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::U32, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::U64, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::U128, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::U256, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::U512, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::Unit, CLType::URef) => { - let map: BTreeMap<(), URef> = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::String, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::PublicKey, CLType::URef) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().collect()) - } - (CLType::Bool, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::I32, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::I64, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::U8, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::U32, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::U64, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::U128, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::U256, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::U512, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::Unit, CLType::Key) => { - let map: BTreeMap<(), Key> = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::String, CLType::Key) => { - let map: NamedKeys = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (CLType::PublicKey, CLType::Key) => { - let map: BTreeMap = cl_value.to_owned().into_t()?; - Ok(map.values().cloned().filter_map(Key::into_uref).collect()) - } - (_, _) => Ok(vec![]), - }, - CLType::Tuple1([ty]) => match **ty { - CLType::URef => { - let val: (URef,) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - CLType::Key => { - let val: (Key,) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - _ => Ok(vec![]), - }, - CLType::Tuple2([ty1, ty2]) => match (&**ty1, &**ty2) { - (CLType::URef, CLType::Bool) => { - let val: (URef, bool) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::I32) => { - let val: (URef, i32) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::I64) => { - let val: (URef, i64) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::U8) => { - let val: (URef, u8) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::U32) => { - let val: (URef, u32) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::U64) => { - let val: (URef, u64) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::U128) => { - let val: (URef, U128) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::U256) => { - let val: (URef, U256) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::U512) => { - let val: (URef, U512) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::Unit) => { - let val: (URef, ()) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::String) => { - let val: (URef, String) = cl_value.to_owned().into_t()?; - Ok(vec![val.0]) - } - (CLType::URef, CLType::Key) => { - let val: (URef, Key) = cl_value.to_owned().into_t()?; - let mut res = vec![val.0]; - res.extend(val.1.into_uref().into_iter()); - Ok(res) - } - (CLType::URef, CLType::URef) => { - let val: (URef, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.0, val.1]) - } - (CLType::Key, CLType::Bool) => { - let val: (Key, bool) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::I32) => { - let val: (Key, i32) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::I64) => { - let val: (Key, i64) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::U8) => { - let val: (Key, u8) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::U32) => { - let val: (Key, u32) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::U64) => { - let val: (Key, u64) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::U128) => { - let val: (Key, U128) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::U256) => { - let val: (Key, U256) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::U512) => { - let val: (Key, U512) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::Unit) => { - let val: (Key, ()) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::String) => { - let val: (Key, String) = cl_value.to_owned().into_t()?; - Ok(val.0.into_uref().into_iter().collect()) - } - (CLType::Key, CLType::URef) => { - let val: (Key, URef) = cl_value.to_owned().into_t()?; - let mut res: Vec = val.0.into_uref().into_iter().collect(); - res.push(val.1); - Ok(res) - } - (CLType::Key, CLType::Key) => { - let val: (Key, Key) = cl_value.to_owned().into_t()?; - Ok(val - .0 - .into_uref() - .into_iter() - .chain(val.1.into_uref().into_iter()) - .collect()) - } - (CLType::Bool, CLType::URef) => { - let val: (bool, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::I32, CLType::URef) => { - let val: (i32, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::I64, CLType::URef) => { - let val: (i64, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::U8, CLType::URef) => { - let val: (u8, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::U32, CLType::URef) => { - let val: (u32, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::U64, CLType::URef) => { - let val: (u64, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::U128, CLType::URef) => { - let val: (U128, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::U256, CLType::URef) => { - let val: (U256, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::U512, CLType::URef) => { - let val: (U512, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::Unit, CLType::URef) => { - let val: ((), URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::String, CLType::URef) => { - let val: (String, URef) = cl_value.to_owned().into_t()?; - Ok(vec![val.1]) - } - (CLType::Bool, CLType::Key) => { - let val: (bool, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::I32, CLType::Key) => { - let val: (i32, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::I64, CLType::Key) => { - let val: (i64, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::U8, CLType::Key) => { - let val: (u8, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::U32, CLType::Key) => { - let val: (u32, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::U64, CLType::Key) => { - let val: (u64, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::U128, CLType::Key) => { - let val: (U128, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::U256, CLType::Key) => { - let val: (U256, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::U512, CLType::Key) => { - let val: (U512, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::Unit, CLType::Key) => { - let val: ((), Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (CLType::String, CLType::Key) => { - let val: (String, Key) = cl_value.to_owned().into_t()?; - Ok(val.1.into_uref().into_iter().collect()) - } - (_, _) => Ok(vec![]), - }, - // TODO: nested matches for Tuple3? - CLType::Tuple3(_) => Ok(vec![]), - CLType::Key => { - let key: Key = cl_value.to_owned().into_t()?; // TODO: optimize? - Ok(key.into_uref().into_iter().collect()) - } - CLType::URef => { - let uref: URef = cl_value.to_owned().into_t()?; // TODO: optimize? - Ok(vec![uref]) - } - } -} - -impl<'a, R> Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - pub fn new( - config: EngineConfig, - system_contract_cache: SystemContractCache, - memory: MemoryRef, - module: Module, - context: RuntimeContext<'a, R>, - ) -> Self { - Runtime { - config, - system_contract_cache, - memory, - module, - host_buffer: None, - context, - } - } - - pub fn memory(&self) -> &MemoryRef { - &self.memory - } - - pub fn module(&self) -> &Module { - &self.module - } - - pub fn context(&self) -> &RuntimeContext<'a, R> { - &self.context - } - - pub fn protocol_data(&self) -> &ProtocolData { - self.context.protocol_data() - } - - fn gas(&mut self, amount: Gas) -> Result<(), Error> { - self.context.charge_gas(amount) - } - - fn gas_counter(&self) -> Gas { - self.context.gas_counter() - } - - fn set_gas_counter(&mut self, new_gas_counter: Gas) { - self.context.set_gas_counter(new_gas_counter); - } - - pub(crate) fn charge_system_contract_call(&mut self, amount: T) -> Result<(), Error> - where - T: Into, - { - self.context.charge_system_contract_call(amount) - } - - fn bytes_from_mem(&self, ptr: u32, size: usize) -> Result, Error> { - self.memory.get(ptr, size).map_err(Into::into) - } - - fn t_from_mem(&self, ptr: u32, size: u32) -> Result { - let bytes = self.bytes_from_mem(ptr, size as usize)?; - bytesrepr::deserialize(bytes).map_err(Into::into) - } - - /// Reads key (defined as `key_ptr` and `key_size` tuple) from Wasm memory. - fn key_from_mem(&mut self, key_ptr: u32, key_size: u32) -> Result { - let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Into::into) - } - - /// Reads `CLValue` (defined as `cl_value_ptr` and `cl_value_size` tuple) from Wasm memory. - fn cl_value_from_mem( - &mut self, - cl_value_ptr: u32, - cl_value_size: u32, - ) -> Result { - let bytes = self.bytes_from_mem(cl_value_ptr, cl_value_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Into::into) - } - - fn string_from_mem(&self, ptr: u32, size: u32) -> Result { - let bytes = self.bytes_from_mem(ptr, size as usize)?; - bytesrepr::deserialize(bytes).map_err(|e| Error::BytesRepr(e).into()) - } - - fn get_module_from_entry_points( - &mut self, - entry_points: &EntryPoints, - ) -> Result, Error> { - let export_section = self - .module - .export_section() - .ok_or_else(|| Error::FunctionNotFound(String::from("Missing Export Section")))?; - - let entry_point_names: Vec<&str> = entry_points.keys().map(|s| s.as_str()).collect(); - - let maybe_missing_name: Option = entry_point_names - .iter() - .find(|name| { - export_section - .entries() - .iter() - .find(|export_entry| export_entry.field() == **name) - .is_none() - }) - .map(|s| String::from(*s)); - - if let Some(missing_name) = maybe_missing_name { - Err(Error::FunctionNotFound(missing_name)) - } else { - let mut module = self.module.clone(); - pwasm_utils::optimize(&mut module, entry_point_names)?; - parity_wasm::serialize(module).map_err(Error::ParityWasm) - } - } - - fn is_valid_uref(&mut self, uref_ptr: u32, uref_size: u32) -> Result { - let bytes = self.bytes_from_mem(uref_ptr, uref_size as usize)?; - let uref: URef = bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)?; - Ok(self.context.validate_uref(&uref).is_ok()) - } - - /// Load the uref known by the given name into the Wasm memory - fn load_key( - &mut self, - name_ptr: u32, - name_size: u32, - output_ptr: u32, - output_size: usize, - bytes_written_ptr: u32, - ) -> Result, Trap> { - let name = self.string_from_mem(name_ptr, name_size)?; - - // Get a key and serialize it - let key = match self.context.named_keys_get(&name) { - Some(key) => key, - None => return Ok(Err(ApiError::MissingKey)), - }; - - let key_bytes = match key.to_bytes() { - Ok(bytes) => bytes, - Err(error) => return Ok(Err(error.into())), - }; - - // `output_size` has to be greater or equal to the actual length of serialized Key bytes - if output_size < key_bytes.len() { - return Ok(Err(ApiError::BufferTooSmall)); - } - - // Set serialized Key bytes into the output buffer - if let Err(error) = self.memory.set(output_ptr, &key_bytes) { - return Err(Error::Interpreter(error.into()).into()); - } - - // For all practical purposes following cast is assumed to be safe - let bytes_size = key_bytes.len() as u32; - let size_bytes = bytes_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(bytes_written_ptr, &size_bytes) { - return Err(Error::Interpreter(error.into()).into()); - } - - Ok(Ok(())) - } - - fn has_key(&mut self, name_ptr: u32, name_size: u32) -> Result { - let name = self.string_from_mem(name_ptr, name_size)?; - if self.context.named_keys_contains_key(&name) { - Ok(0) - } else { - Ok(1) - } - } - - fn put_key( - &mut self, - name_ptr: u32, - name_size: u32, - key_ptr: u32, - key_size: u32, - ) -> Result<(), Trap> { - let name = self.string_from_mem(name_ptr, name_size)?; - let key = self.key_from_mem(key_ptr, key_size)?; - self.context.put_key(name, key).map_err(Into::into) - } - - fn remove_key(&mut self, name_ptr: u32, name_size: u32) -> Result<(), Trap> { - let name = self.string_from_mem(name_ptr, name_size)?; - self.context.remove_key(&name)?; - Ok(()) - } - - /// Writes runtime context's account main purse to dest_ptr in the Wasm memory. - fn get_main_purse(&mut self, dest_ptr: u32) -> Result<(), Trap> { - let purse = self.context.get_main_purse()?; - let purse_bytes = purse.into_bytes().map_err(Error::BytesRepr)?; - self.memory - .set(dest_ptr, &purse_bytes) - .map_err(|e| Error::Interpreter(e.into()).into()) - } - - /// Writes caller (deploy) account public key to dest_ptr in the Wasm - /// memory. - fn get_caller(&mut self, output_size: u32) -> Result, Trap> { - if !self.can_write_to_host_buffer() { - // Exit early if the host buffer is already occupied - return Ok(Err(ApiError::HostBufferFull)); - } - let value = CLValue::from_t(self.context.get_caller()).map_err(Error::CLValue)?; - let value_size = value.inner_bytes().len(); - - // Save serialized public key into host buffer - if let Err(error) = self.write_host_buffer(value) { - return Ok(Err(error)); - } - - // Write output - let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(output_size, &output_size_bytes) { - return Err(Error::Interpreter(error.into()).into()); - } - Ok(Ok(())) - } - - /// Writes runtime context's phase to dest_ptr in the Wasm memory. - fn get_phase(&mut self, dest_ptr: u32) -> Result<(), Trap> { - let phase = self.context.phase(); - let bytes = phase.into_bytes().map_err(Error::BytesRepr)?; - self.memory - .set(dest_ptr, &bytes) - .map_err(|e| Error::Interpreter(e.into()).into()) - } - - /// Writes current blocktime to dest_ptr in Wasm memory. - fn get_blocktime(&self, dest_ptr: u32) -> Result<(), Trap> { - let blocktime = self - .context - .get_blocktime() - .into_bytes() - .map_err(Error::BytesRepr)?; - self.memory - .set(dest_ptr, &blocktime) - .map_err(|e| Error::Interpreter(e.into()).into()) - } - - /// Return some bytes from the memory and terminate the current `sub_call`. Note that the return - /// type is `Trap`, indicating that this function will always kill the current Wasm instance. - fn ret( - &mut self, - value_ptr: u32, - value_size: usize, - scoped_instrumenter: &mut ScopedInstrumenter, - ) -> Trap { - const UREF_COUNT: &str = "uref_count"; - self.host_buffer = None; - let mem_get = self - .memory - .get(value_ptr, value_size) - .map_err(|e| Error::Interpreter(e.into())); - match mem_get { - Ok(buf) => { - // Set the result field in the runtime and return the proper element of the `Error` - // enum indicating that the reason for exiting the module was a call to ret. - self.host_buffer = bytesrepr::deserialize(buf).ok(); - - let urefs = match &self.host_buffer { - Some(buf) => extract_urefs(buf), - None => Ok(vec![]), - }; - match urefs { - Ok(urefs) => { - scoped_instrumenter.add_property(UREF_COUNT, urefs.len()); - Error::Ret(urefs).into() - } - Err(e) => { - scoped_instrumenter.add_property(UREF_COUNT, 0); - e.into() - } - } - } - Err(e) => { - scoped_instrumenter.add_property(UREF_COUNT, 0); - e.into() - } - } - } - - pub fn is_mint(&self, key: Key) -> bool { - key.into_hash() == Some(self.protocol_data().mint().value()) - } - - pub fn is_handle_payment(&self, key: Key) -> bool { - key.into_hash() == Some(self.protocol_data().handle_payment().value()) - } - - pub fn is_auction(&self, key: Key) -> bool { - key.into_hash() == Some(self.protocol_data().auction().value()) - } - - fn get_named_argument( - args: &RuntimeArgs, - name: &str, - ) -> Result { - let arg: CLValue = args - .get(name) - .cloned() - .ok_or(Error::Revert(ApiError::MissingArgument))?; - arg.into_t() - .map_err(|_| Error::Revert(ApiError::InvalidArgument)) - } - - fn reverter>(error: T) -> Error { - let api_error: ApiError = error.into(); - // NOTE: This is special casing needed to keep the native system contracts propagate - // GasLimit properly to the user. Once support for wasm system contract will be dropped this - // won't be necessary anymore. - match api_error { - ApiError::Mint(mint_error) if mint_error == mint::Error::GasLimit as u8 => { - Error::GasLimit - } - ApiError::AuctionError(auction_error) - if auction_error == auction::Error::GasLimit as u8 => - { - Error::GasLimit - } - ApiError::HandlePayment(handle_payment_error) - if handle_payment_error == handle_payment::Error::GasLimit as u8 => - { - Error::GasLimit - } - api_error => Error::Revert(api_error), - } - } - - pub fn call_host_mint( - &mut self, - protocol_version: ProtocolVersion, - entry_point_name: &str, - named_keys: &mut NamedKeys, - runtime_args: &RuntimeArgs, - extra_keys: &[Key], - ) -> Result { - let access_rights = { - let mut keys: Vec = named_keys.values().cloned().collect(); - keys.extend(extra_keys); - keys.push(self.get_mint_contract().into()); - keys.push(self.get_handle_payment_contract().into()); - extract_access_rights_from_keys(keys) - }; - let authorization_keys = self.context.authorization_keys().to_owned(); - let account = self.context.account(); - let base_key = self.protocol_data().mint().into(); - let blocktime = self.context.get_blocktime(); - let deploy_hash = self.context.get_deploy_hash(); - let gas_limit = self.context.gas_limit(); - let gas_counter = self.context.gas_counter(); - let hash_address_generator = self.context.hash_address_generator(); - let uref_address_generator = self.context.uref_address_generator(); - let transfer_address_generator = self.context.transfer_address_generator(); - let correlation_id = self.context.correlation_id(); - let phase = self.context.phase(); - let protocol_data = self.context.protocol_data(); - let transfers = self.context.transfers().to_owned(); - - let mint_context = RuntimeContext::new( - self.context.state(), - EntryPointType::Contract, - named_keys, - access_rights, - runtime_args.to_owned(), - authorization_keys, - account, - base_key, - blocktime, - deploy_hash, - gas_limit, - gas_counter, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - phase, - *protocol_data, - transfers, - ); - - let mut mint_runtime = Runtime::new( - self.config, - SystemContractCache::clone(&self.system_contract_cache), - self.memory.clone(), - self.module.clone(), - mint_context, - ); - - let system_config = protocol_data.system_config(); - let mint_costs = system_config.mint_costs(); - - let result = match entry_point_name { - // Type: `fn mint(amount: U512) -> Result` - mint::METHOD_MINT => (|| { - mint_runtime.charge_system_contract_call(mint_costs.mint)?; - - let amount: U512 = Self::get_named_argument(&runtime_args, mint::ARG_AMOUNT)?; - let result: Result = mint_runtime.mint(amount); - if let Err(mint::Error::GasLimit) = result { - return Err(execution::Error::GasLimit); - } - CLValue::from_t(result).map_err(Self::reverter) - })(), - mint::METHOD_REDUCE_TOTAL_SUPPLY => (|| { - mint_runtime.charge_system_contract_call(mint_costs.reduce_total_supply)?; - - let amount: U512 = Self::get_named_argument(&runtime_args, mint::ARG_AMOUNT)?; - let result: Result<(), mint::Error> = mint_runtime.reduce_total_supply(amount); - CLValue::from_t(result).map_err(Self::reverter) - })(), - // Type: `fn create() -> URef` - mint::METHOD_CREATE => (|| { - mint_runtime.charge_system_contract_call(mint_costs.create)?; - - let uref = mint_runtime.mint(U512::zero()).map_err(Self::reverter)?; - CLValue::from_t(uref).map_err(Self::reverter) - })(), - // Type: `fn balance(purse: URef) -> Option` - mint::METHOD_BALANCE => (|| { - mint_runtime.charge_system_contract_call(mint_costs.balance)?; - - let uref: URef = Self::get_named_argument(&runtime_args, mint::ARG_PURSE)?; - let maybe_balance: Option = - mint_runtime.balance(uref).map_err(Self::reverter)?; - CLValue::from_t(maybe_balance).map_err(Self::reverter) - })(), - // Type: `fn transfer(maybe_to: Option, source: URef, target: URef, amount: - // U512, id: Option) -> Result<(), Error>` - mint::METHOD_TRANSFER => (|| { - mint_runtime.charge_system_contract_call(mint_costs.transfer)?; - - let maybe_to: Option = - Self::get_named_argument(&runtime_args, mint::ARG_TO)?; - let source: URef = Self::get_named_argument(&runtime_args, mint::ARG_SOURCE)?; - let target: URef = Self::get_named_argument(&runtime_args, mint::ARG_TARGET)?; - let amount: U512 = Self::get_named_argument(&runtime_args, mint::ARG_AMOUNT)?; - let id: Option = Self::get_named_argument(&runtime_args, mint::ARG_ID)?; - let result: Result<(), mint::Error> = - mint_runtime.transfer(maybe_to, source, target, amount, id); - CLValue::from_t(result).map_err(Self::reverter) - })(), - // Type: `fn read_base_round_reward() -> Result` - mint::METHOD_READ_BASE_ROUND_REWARD => (|| { - mint_runtime.charge_system_contract_call(mint_costs.read_base_round_reward)?; - - let result: U512 = mint_runtime - .read_base_round_reward() - .map_err(Self::reverter)?; - CLValue::from_t(result).map_err(Self::reverter) - })(), - - _ => CLValue::from_t(()).map_err(Self::reverter), - }; - - // Charge just for the amount that particular entry point cost - using gas cost from the - // isolated runtime might have a recursive costs whenever system contract calls other system - // contract. - self.gas(mint_runtime.gas_counter() - gas_counter)?; - - // Result still contains a result, but the entrypoints logic does not exit early on errors. - let ret = result?; - - let urefs = extract_urefs(&ret)?; - let access_rights = extract_access_rights_from_urefs(urefs); - self.context.access_rights_extend(access_rights); - { - let transfers = self.context.transfers_mut(); - *transfers = mint_runtime.context.transfers().to_owned(); - } - Ok(ret) - } - - pub fn call_host_handle_payment( - &mut self, - protocol_version: ProtocolVersion, - entry_point_name: &str, - named_keys: &mut NamedKeys, - runtime_args: &RuntimeArgs, - extra_keys: &[Key], - ) -> Result { - let access_rights = { - let mut keys: Vec = named_keys.values().cloned().collect(); - keys.extend(extra_keys); - keys.push(self.get_mint_contract().into()); - keys.push(self.get_handle_payment_contract().into()); - extract_access_rights_from_keys(keys) - }; - let authorization_keys = self.context.authorization_keys().to_owned(); - let account = self.context.account(); - let base_key = self.protocol_data().handle_payment().into(); - let blocktime = self.context.get_blocktime(); - let deploy_hash = self.context.get_deploy_hash(); - let gas_limit = self.context.gas_limit(); - let gas_counter = self.context.gas_counter(); - let fn_store_id = self.context.hash_address_generator(); - let address_generator = self.context.uref_address_generator(); - let transfer_address_generator = self.context.transfer_address_generator(); - let correlation_id = self.context.correlation_id(); - let phase = self.context.phase(); - let protocol_data = self.context.protocol_data(); - let transfers = self.context.transfers().to_owned(); - - let runtime_context = RuntimeContext::new( - self.context.state(), - EntryPointType::Contract, - named_keys, - access_rights, - runtime_args.to_owned(), - authorization_keys, - account, - base_key, - blocktime, - deploy_hash, - gas_limit, - gas_counter, - fn_store_id, - address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - phase, - *protocol_data, - transfers, - ); - - let mut runtime = Runtime::new( - self.config, - SystemContractCache::clone(&self.system_contract_cache), - self.memory.clone(), - self.module.clone(), - runtime_context, - ); - - let system_config = protocol_data.system_config(); - let handle_payment_costs = system_config.handle_payment_costs(); - - let result = match entry_point_name { - handle_payment::METHOD_GET_PAYMENT_PURSE => (|| { - runtime.charge_system_contract_call(handle_payment_costs.get_payment_purse)?; - - let rights_controlled_purse = - runtime.get_payment_purse().map_err(Self::reverter)?; - CLValue::from_t(rights_controlled_purse).map_err(Self::reverter) - })(), - handle_payment::METHOD_SET_REFUND_PURSE => (|| { - runtime.charge_system_contract_call(handle_payment_costs.set_refund_purse)?; - - let purse: URef = - Self::get_named_argument(&runtime_args, handle_payment::ARG_PURSE)?; - runtime.set_refund_purse(purse).map_err(Self::reverter)?; - CLValue::from_t(()).map_err(Self::reverter) - })(), - handle_payment::METHOD_GET_REFUND_PURSE => (|| { - runtime.charge_system_contract_call(handle_payment_costs.get_refund_purse)?; - - let maybe_purse = runtime.get_refund_purse().map_err(Self::reverter)?; - CLValue::from_t(maybe_purse).map_err(Self::reverter) - })(), - handle_payment::METHOD_FINALIZE_PAYMENT => (|| { - runtime.charge_system_contract_call(handle_payment_costs.finalize_payment)?; - - let amount_spent: U512 = - Self::get_named_argument(&runtime_args, handle_payment::ARG_AMOUNT)?; - let account: AccountHash = - Self::get_named_argument(&runtime_args, handle_payment::ARG_ACCOUNT)?; - let target: URef = - Self::get_named_argument(&runtime_args, handle_payment::ARG_TARGET)?; - runtime - .finalize_payment(amount_spent, account, target) - .map_err(Self::reverter)?; - CLValue::from_t(()).map_err(Self::reverter) - })(), - _ => CLValue::from_t(()).map_err(Self::reverter), - }; - - self.gas(runtime.gas_counter() - gas_counter)?; - - let ret = result?; - let urefs = extract_urefs(&ret)?; - let access_rights = extract_access_rights_from_urefs(urefs); - self.context.access_rights_extend(access_rights); - { - let transfers = self.context.transfers_mut(); - *transfers = runtime.context.transfers().to_owned(); - } - Ok(ret) - } - - pub fn call_host_standard_payment(&mut self) -> Result<(), Error> { - // NOTE: This method (unlike other call_host_* methods) already runs on its own runtime - // context. - let gas_counter = self.gas_counter(); - let amount: U512 = - Self::get_named_argument(&self.context.args(), standard_payment::ARG_AMOUNT)?; - let result = self.pay(amount).map_err(Self::reverter); - self.set_gas_counter(gas_counter); - result - } - - pub fn call_host_auction( - &mut self, - protocol_version: ProtocolVersion, - entry_point_name: &str, - named_keys: &mut NamedKeys, - runtime_args: &RuntimeArgs, - extra_keys: &[Key], - ) -> Result { - let access_rights = { - let mut keys: Vec = named_keys.values().cloned().collect(); - keys.extend(extra_keys); - keys.push(self.get_mint_contract().into()); - keys.push(self.get_handle_payment_contract().into()); - extract_access_rights_from_keys(keys) - }; - let authorization_keys = self.context.authorization_keys().to_owned(); - let account = self.context.account(); - let base_key = self.protocol_data().auction().into(); - let blocktime = self.context.get_blocktime(); - let deploy_hash = self.context.get_deploy_hash(); - let gas_limit = self.context.gas_limit(); - let gas_counter = self.context.gas_counter(); - let fn_store_id = self.context.hash_address_generator(); - let address_generator = self.context.uref_address_generator(); - let transfer_address_generator = self.context.transfer_address_generator(); - let correlation_id = self.context.correlation_id(); - let phase = self.context.phase(); - let protocol_data = self.context.protocol_data(); - let transfers = self.context.transfers().to_owned(); - - let runtime_context = RuntimeContext::new( - self.context.state(), - EntryPointType::Contract, - named_keys, - access_rights, - runtime_args.to_owned(), - authorization_keys, - account, - base_key, - blocktime, - deploy_hash, - gas_limit, - gas_counter, - fn_store_id, - address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - phase, - *protocol_data, - transfers, - ); - - let mut runtime = Runtime::new( - self.config, - SystemContractCache::clone(&self.system_contract_cache), - self.memory.clone(), - self.module.clone(), - runtime_context, - ); - - let system_config = protocol_data.system_config(); - let auction_costs = system_config.auction_costs(); - - let result = match entry_point_name { - auction::METHOD_GET_ERA_VALIDATORS => (|| { - runtime.charge_system_contract_call(auction_costs.get_era_validators)?; - - let result = runtime.get_era_validators().map_err(Self::reverter)?; - - CLValue::from_t(result).map_err(Self::reverter) - })(), - - auction::METHOD_ADD_BID => (|| { - runtime.charge_system_contract_call(auction_costs.add_bid)?; - - let account_hash = - Self::get_named_argument(&runtime_args, auction::ARG_PUBLIC_KEY)?; - let delegation_rate = - Self::get_named_argument(&runtime_args, auction::ARG_DELEGATION_RATE)?; - let amount = Self::get_named_argument(&runtime_args, auction::ARG_AMOUNT)?; - - let result = runtime - .add_bid(account_hash, delegation_rate, amount) - .map_err(Self::reverter)?; - - CLValue::from_t(result).map_err(Self::reverter) - })(), - - auction::METHOD_WITHDRAW_BID => (|| { - runtime.charge_system_contract_call(auction_costs.withdraw_bid)?; - - let account_hash = - Self::get_named_argument(&runtime_args, auction::ARG_PUBLIC_KEY)?; - let amount = Self::get_named_argument(&runtime_args, auction::ARG_AMOUNT)?; - - let result = runtime - .withdraw_bid(account_hash, amount) - .map_err(Self::reverter)?; - CLValue::from_t(result).map_err(Self::reverter) - })(), - - auction::METHOD_DELEGATE => (|| { - runtime.charge_system_contract_call(auction_costs.delegate)?; - - let delegator = Self::get_named_argument(&runtime_args, auction::ARG_DELEGATOR)?; - let validator = Self::get_named_argument(&runtime_args, auction::ARG_VALIDATOR)?; - let amount = Self::get_named_argument(&runtime_args, auction::ARG_AMOUNT)?; - - let result = runtime - .delegate(delegator, validator, amount) - .map_err(Self::reverter)?; - - CLValue::from_t(result).map_err(Self::reverter) - })(), - - auction::METHOD_UNDELEGATE => (|| { - runtime.charge_system_contract_call(auction_costs.undelegate)?; - - let delegator = Self::get_named_argument(&runtime_args, auction::ARG_DELEGATOR)?; - let validator = Self::get_named_argument(&runtime_args, auction::ARG_VALIDATOR)?; - let amount = Self::get_named_argument(&runtime_args, auction::ARG_AMOUNT)?; - - let result = runtime - .undelegate(delegator, validator, amount) - .map_err(Self::reverter)?; - - CLValue::from_t(result).map_err(Self::reverter) - })(), - - auction::METHOD_RUN_AUCTION => (|| { - runtime.charge_system_contract_call(auction_costs.run_auction)?; - - let era_end_timestamp_millis = - Self::get_named_argument(&runtime_args, auction::ARG_ERA_END_TIMESTAMP_MILLIS)?; - let evicted_validators = - Self::get_named_argument(&runtime_args, auction::ARG_EVICTED_VALIDATORS)?; - - runtime - .run_auction(era_end_timestamp_millis, evicted_validators) - .map_err(Self::reverter)?; - - CLValue::from_t(()).map_err(Self::reverter) - })(), - - // Type: `fn slash(validator_account_hashes: &[AccountHash]) -> Result<(), Error>` - auction::METHOD_SLASH => (|| { - runtime.charge_system_contract_call(auction_costs.slash)?; - - let validator_public_keys = - Self::get_named_argument(&runtime_args, auction::ARG_VALIDATOR_PUBLIC_KEYS)?; - runtime - .slash(validator_public_keys) - .map_err(Self::reverter)?; - CLValue::from_t(()).map_err(Self::reverter) - })(), - - // Type: `fn distribute(reward_factors: BTreeMap) -> Result<(), Error>` - auction::METHOD_DISTRIBUTE => (|| { - runtime.charge_system_contract_call(auction_costs.distribute)?; - - let reward_factors: BTreeMap = - Self::get_named_argument(&runtime_args, auction::ARG_REWARD_FACTORS)?; - runtime.distribute(reward_factors).map_err(Self::reverter)?; - CLValue::from_t(()).map_err(Self::reverter) - })(), - - // Type: `fn read_era_id() -> Result` - auction::METHOD_READ_ERA_ID => (|| { - runtime.charge_system_contract_call(auction_costs.read_era_id)?; - - let result = runtime.read_era_id().map_err(Self::reverter)?; - CLValue::from_t(result).map_err(Self::reverter) - })(), - - auction::METHOD_ACTIVATE_BID => (|| { - runtime.charge_system_contract_call(auction_costs.read_era_id)?; - - let validator_public_key: PublicKey = - Self::get_named_argument(&runtime_args, auction::ARG_VALIDATOR_PUBLIC_KEY)?; - - runtime - .activate_bid(validator_public_key) - .map_err(Self::reverter)?; - - CLValue::from_t(()).map_err(Self::reverter) - })(), - - _ => CLValue::from_t(()).map_err(Self::reverter), - }; - - // Charge for the gas spent during execution in an isolated runtime. - self.gas(runtime.gas_counter() - gas_counter)?; - - // Result still contains a result, but the entrypoints logic does not exit early on errors. - let ret = result?; - - let urefs = extract_urefs(&ret)?; - let access_rights = extract_access_rights_from_urefs(urefs); - self.context.access_rights_extend(access_rights); - { - let transfers = self.context.transfers_mut(); - *transfers = runtime.context.transfers().to_owned(); - } - - Ok(ret) - } - - /// Calls contract living under a `key`, with supplied `args`. - pub fn call_contract( - &mut self, - contract_hash: ContractHash, - entry_point_name: &str, - args: RuntimeArgs, - ) -> Result { - let key = contract_hash.into(); - let contract = match self.context.read_gs(&key)? { - Some(StoredValue::Contract(contract)) => contract, - Some(_) => { - return Err(Error::FunctionNotFound(format!( - "Value at {:?} is not a contract", - key - ))); - } - None => return Err(Error::KeyNotFound(key)), - }; - - let entry_point = contract - .entry_point(entry_point_name) - .cloned() - .ok_or_else(|| Error::NoSuchMethod(entry_point_name.to_owned()))?; - - let context_key = self.get_context_key_for_contract_call(contract_hash, &entry_point)?; - - self.execute_contract( - key, - context_key, - contract, - args, - entry_point, - self.context.protocol_version(), - ) - } - - /// Calls `version` of the contract living at `key`, invoking `method` with - /// supplied `args`. This function also checks the args conform with the - /// types given in the contract header. - pub fn call_versioned_contract( - &mut self, - contract_package_hash: ContractPackageHash, - contract_version: Option, - entry_point_name: String, - args: RuntimeArgs, - ) -> Result { - let key = contract_package_hash.into(); - - let contract_package = match self.context.read_gs(&key)? { - Some(StoredValue::ContractPackage(contract_package)) => contract_package, - Some(_) => { - return Err(Error::FunctionNotFound(format!( - "Value at {:?} is not a versioned contract", - contract_package_hash - ))); - } - None => return Err(Error::KeyNotFound(key)), - }; - - let contract_version_key = match contract_version { - Some(version) => { - ContractVersionKey::new(self.context.protocol_version().value().major, version) - } - None => match contract_package.current_contract_version() { - Some(v) => v, - None => return Err(Error::NoActiveContractVersions(contract_package_hash)), - }, - }; - - // Get contract entry point hash - let contract_hash = contract_package - .lookup_contract_hash(contract_version_key) - .cloned() - .ok_or(Error::InvalidContractVersion(contract_version_key))?; - - // Get contract data - let contract = match self.context.read_gs(&contract_hash.into())? { - Some(StoredValue::Contract(contract)) => contract, - Some(_) => { - return Err(Error::FunctionNotFound(format!( - "Value at {:?} is not a contract", - contract_package_hash - ))); - } - None => return Err(Error::KeyNotFound(key)), - }; - - let entry_point = contract - .entry_point(&entry_point_name) - .cloned() - .ok_or_else(|| Error::NoSuchMethod(entry_point_name.to_owned()))?; - - self.validate_entry_point_access(&contract_package, entry_point.access())?; - - for (expected, found) in entry_point - .args() - .iter() - .map(|a| a.cl_type()) - .cloned() - .zip(args.to_values().into_iter().map(|v| v.cl_type()).cloned()) - { - if expected != found { - return Err(Error::type_mismatch(expected, found)); - } - } - - let context_key = self.get_context_key_for_contract_call(contract_hash, &entry_point)?; - - self.execute_contract( - context_key, - context_key, - contract, - args, - entry_point, - self.context.protocol_version(), - ) - } - - fn get_context_key_for_contract_call( - &self, - contract_hash: ContractHash, - entry_point: &EntryPoint, - ) -> Result { - match entry_point.entry_point_type() { - EntryPointType::Session - if self.context.entry_point_type() == EntryPointType::Contract => - { - // Session code can't be called from Contract code for security reasons. - Err(Error::InvalidContext) - } - EntryPointType::Session => { - assert_eq!(self.context.entry_point_type(), EntryPointType::Session); - // Session code called from session reuses current base key - Ok(self.context.base_key()) - } - EntryPointType::Contract => Ok(contract_hash.into()), - } - } - - fn execute_contract( - &mut self, - key: Key, - base_key: Key, - contract: Contract, - args: RuntimeArgs, - entry_point: EntryPoint, - protocol_version: ProtocolVersion, - ) -> Result { - // Check for major version compatibility before calling - if !contract.is_compatible_protocol_version(protocol_version) { - return Err(Error::IncompatibleProtocolMajorVersion { - actual: contract.protocol_version().value().major, - expected: protocol_version.value().major, - }); - } - - // TODO: should we be using named_keys_mut() instead? - let mut named_keys = match entry_point.entry_point_type() { - EntryPointType::Session => self.context.account().named_keys().clone(), - EntryPointType::Contract => contract.named_keys().clone(), - }; - - let extra_keys = { - let mut extra_keys = vec![]; - // A loop is needed to be able to use the '?' operator - for arg in args.to_values() { - extra_keys.extend( - extract_urefs(arg)? - .into_iter() - .map(>::from), - ); - } - for key in &extra_keys { - if let Err(Error::ForgedReference(maybe_forged_uref)) = - self.context.validate_key(key) - { - if !extra_keys.contains(&Key::from(maybe_forged_uref)) { - return Err(Error::ForgedReference(maybe_forged_uref)); - } - } - } - - if self.is_mint(key) { - return self.call_host_mint( - self.context.protocol_version(), - entry_point.name(), - &mut named_keys, - &args, - &extra_keys, - ); - } else if self.is_handle_payment(key) { - return self.call_host_handle_payment( - self.context.protocol_version(), - entry_point.name(), - &mut named_keys, - &args, - &extra_keys, - ); - } else if self.is_auction(key) { - return self.call_host_auction( - self.context.protocol_version(), - entry_point.name(), - &mut named_keys, - &args, - &extra_keys, - ); - } - - extra_keys - }; - - let module = { - let maybe_module = key - .into_hash() - .and_then(|hash_addr| self.system_contract_cache.get(hash_addr.into())); - let wasm_key = contract.contract_wasm_key(); - - let contract_wasm: ContractWasm = match self.context.read_gs(&wasm_key)? { - Some(StoredValue::ContractWasm(contract_wasm)) => contract_wasm, - Some(_) => { - return Err(Error::FunctionNotFound(format!( - "Value at {:?} is not contract wasm", - key - ))); - } - None => return Err(Error::KeyNotFound(key)), - }; - match maybe_module { - Some(module) => module, - None => parity_wasm::deserialize_buffer(contract_wasm.bytes())?, - } - }; - - let entry_point_name = entry_point.name(); - - let (instance, memory) = instance_and_memory( - module.clone(), - protocol_version, - self.protocol_data().wasm_config(), - )?; - - let access_rights = { - let mut keys: Vec = named_keys.values().cloned().collect(); - keys.extend(extra_keys); - keys.push(self.get_mint_contract().into()); - keys.push(self.get_handle_payment_contract().into()); - extract_access_rights_from_keys(keys) - }; - - let system_contract_cache = SystemContractCache::clone(&self.system_contract_cache); - - let config = self.config; - - let host_buffer = None; - - let context = RuntimeContext::new( - self.context.state(), - entry_point.entry_point_type(), - &mut named_keys, - access_rights, - args, - self.context.authorization_keys().clone(), - &self.context.account(), - base_key, - self.context.get_blocktime(), - self.context.get_deploy_hash(), - self.context.gas_limit(), - self.context.gas_counter(), - self.context.hash_address_generator(), - self.context.uref_address_generator(), - self.context.transfer_address_generator(), - protocol_version, - self.context.correlation_id(), - self.context.phase(), - *self.context.protocol_data(), - self.context.transfers().to_owned(), - ); - - let mut runtime = Runtime { - system_contract_cache, - config, - memory, - module, - host_buffer, - context, - }; - - let result = instance.invoke_export(entry_point_name, &[], &mut runtime); - - // The `runtime`'s context was initialized with our counter from before the call and any gas - // charged by the sub-call was added to its counter - so let's copy the correct value of the - // counter from there to our counter - self.context.set_gas_counter(runtime.context.gas_counter()); - - { - let transfers = self.context.transfers_mut(); - *transfers = runtime.context.transfers().to_owned(); - } - - let error = match result { - Err(error) => error, - // If `Ok` and the `host_buffer` is `None`, the contract's execution succeeded but did - // not explicitly call `runtime::ret()`. Treat as though the execution - // returned the unit type `()` as per Rust functions which don't specify a - // return value. - Ok(_) => { - if self.context.entry_point_type() == EntryPointType::Session - && runtime.context.entry_point_type() == EntryPointType::Session - { - // Overwrites parent's named keys with child's new named key but only when - // running session code - *self.context.named_keys_mut() = runtime.context.named_keys().clone(); - } - return Ok(runtime.take_host_buffer().unwrap_or(CLValue::from_t(())?)); - } - }; - - if let Some(host_error) = error.as_host_error() { - // If the "error" was in fact a trap caused by calling `ret` then - // this is normal operation and we should return the value captured - // in the Runtime result field. - let downcasted_error = host_error.downcast_ref::(); - match downcasted_error { - Some(Error::Ret(ref ret_urefs)) => { - // insert extra urefs returned from call - let ret_urefs_map: HashMap> = - extract_access_rights_from_urefs(ret_urefs.clone()); - self.context.access_rights_extend(ret_urefs_map); - // if ret has not set host_buffer consider it programmer error - if self.context.entry_point_type() == EntryPointType::Session - && runtime.context.entry_point_type() == EntryPointType::Session - { - // Overwrites parent's named keys with child's new named key but only when - // running session code - *self.context.named_keys_mut() = runtime.context.named_keys().clone(); - } - return runtime.take_host_buffer().ok_or(Error::ExpectedReturnValue); - } - Some(error) => return Err(error.clone()), - None => return Err(Error::Interpreter(host_error.to_string())), - } - } - - Err(Error::Interpreter(error.into())) - } - - fn call_contract_host_buffer( - &mut self, - contract_hash: ContractHash, - entry_point_name: &str, - args_bytes: Vec, - result_size_ptr: u32, - scoped_instrumenter: &mut ScopedInstrumenter, - ) -> Result, Error> { - // Exit early if the host buffer is already occupied - if let Err(err) = self.check_host_buffer() { - return Ok(Err(err)); - } - let args: RuntimeArgs = bytesrepr::deserialize(args_bytes)?; - scoped_instrumenter.pause(); - let result = self.call_contract(contract_hash, entry_point_name, args)?; - scoped_instrumenter.unpause(); - self.manage_call_contract_host_buffer(result_size_ptr, result) - } - - fn call_versioned_contract_host_buffer( - &mut self, - contract_package_hash: ContractPackageHash, - contract_version: Option, - entry_point_name: String, - args_bytes: Vec, - result_size_ptr: u32, - scoped_instrumenter: &mut ScopedInstrumenter, - ) -> Result, Error> { - // Exit early if the host buffer is already occupied - if let Err(err) = self.check_host_buffer() { - return Ok(Err(err)); - } - let args: RuntimeArgs = bytesrepr::deserialize(args_bytes)?; - scoped_instrumenter.pause(); - let result = self.call_versioned_contract( - contract_package_hash, - contract_version, - entry_point_name, - args, - )?; - scoped_instrumenter.unpause(); - self.manage_call_contract_host_buffer(result_size_ptr, result) - } - - fn check_host_buffer(&mut self) -> Result<(), ApiError> { - if !self.can_write_to_host_buffer() { - Err(ApiError::HostBufferFull) - } else { - Ok(()) - } - } - - fn manage_call_contract_host_buffer( - &mut self, - result_size_ptr: u32, - result: CLValue, - ) -> Result, Error> { - let result_size = result.inner_bytes().len() as u32; // considered to be safe - - // leave the host buffer set to `None` if there's nothing to write there - if result_size != 0 { - if let Err(error) = self.write_host_buffer(result) { - return Ok(Err(error)); - } - } - - let result_size_bytes = result_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(result_size_ptr, &result_size_bytes) { - return Err(Error::Interpreter(error.into())); - } - - Ok(Ok(())) - } - - fn load_named_keys( - &mut self, - total_keys_ptr: u32, - result_size_ptr: u32, - scoped_instrumenter: &mut ScopedInstrumenter, - ) -> Result, Trap> { - scoped_instrumenter.add_property( - "names_total_length", - self.context - .named_keys() - .keys() - .map(|name| name.len()) - .sum::(), - ); - - if !self.can_write_to_host_buffer() { - // Exit early if the host buffer is already occupied - return Ok(Err(ApiError::HostBufferFull)); - } - - let total_keys = self.context.named_keys().len() as u32; - let total_keys_bytes = total_keys.to_le_bytes(); - if let Err(error) = self.memory.set(total_keys_ptr, &total_keys_bytes) { - return Err(Error::Interpreter(error.into()).into()); - } - - if total_keys == 0 { - // No need to do anything else, we leave host buffer empty. - return Ok(Ok(())); - } - - let named_keys = - CLValue::from_t(self.context.named_keys().clone()).map_err(Error::CLValue)?; - - let length = named_keys.inner_bytes().len() as u32; - if let Err(error) = self.write_host_buffer(named_keys) { - return Ok(Err(error)); - } - - let length_bytes = length.to_le_bytes(); - if let Err(error) = self.memory.set(result_size_ptr, &length_bytes) { - return Err(Error::Interpreter(error.into()).into()); - } - - Ok(Ok(())) - } - - fn create_contract_package( - &mut self, - is_locked: ContractPackageStatus, - ) -> Result<(ContractPackage, URef), Error> { - let access_key = self.context.new_unit_uref()?; - let contract_package = ContractPackage::new( - access_key, - ContractVersions::default(), - DisabledVersions::default(), - Groups::default(), - is_locked, - ); - - Ok((contract_package, access_key)) - } - - fn create_contract_package_at_hash( - &mut self, - lock_status: ContractPackageStatus, - ) -> Result<([u8; 32], [u8; 32]), Error> { - let addr = self.context.new_hash_address()?; - let (contract_package, access_key) = self.create_contract_package(lock_status)?; - self.context - .metered_write_gs_unsafe(addr, contract_package)?; - Ok((addr, access_key.addr())) - } - - fn create_contract_user_group( - &mut self, - contract_package_hash: ContractPackageHash, - label: String, - num_new_urefs: u32, - mut existing_urefs: BTreeSet, - output_size_ptr: u32, - ) -> Result, Error> { - let mut contract_package: ContractPackage = self - .context - .get_validated_contract_package(contract_package_hash)?; - - let groups = contract_package.groups_mut(); - let new_group = Group::new(label); - - // Ensure group does not already exist - if groups.get(&new_group).is_some() { - return Ok(Err(contracts::Error::GroupAlreadyExists.into())); - } - - // Ensure there are not too many groups - if groups.len() >= (contracts::MAX_GROUPS as usize) { - return Ok(Err(contracts::Error::MaxGroupsExceeded.into())); - } - - // Ensure there are not too many urefs - let total_urefs: usize = groups.values().map(|urefs| urefs.len()).sum::() - + (num_new_urefs as usize) - + existing_urefs.len(); - if total_urefs > contracts::MAX_TOTAL_UREFS { - let err = contracts::Error::MaxTotalURefsExceeded; - return Ok(Err(ApiError::ContractHeader(err as u8))); - } - - // Proceed with creating user group - let mut new_urefs = Vec::with_capacity(num_new_urefs as usize); - for _ in 0..num_new_urefs { - let u = self.context.new_unit_uref()?; - new_urefs.push(u); - } - - for u in new_urefs.iter().cloned() { - existing_urefs.insert(u); - } - groups.insert(new_group, existing_urefs); - - // check we can write to the host buffer - if let Err(err) = self.check_host_buffer() { - return Ok(Err(err)); - } - // create CLValue for return value - let new_urefs_value = CLValue::from_t(new_urefs)?; - let value_size = new_urefs_value.inner_bytes().len(); - // write return value to buffer - if let Err(err) = self.write_host_buffer(new_urefs_value) { - return Ok(Err(err)); - } - // Write return value size to output location - let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(output_size_ptr, &output_size_bytes) { - return Err(Error::Interpreter(error.into())); - } - - // Write updated package to the global state - self.context - .metered_write_gs_unsafe(contract_package_hash, contract_package)?; - - Ok(Ok(())) - } - - #[allow(clippy::too_many_arguments)] - fn add_contract_version( - &mut self, - contract_package_hash: ContractPackageHash, - entry_points: EntryPoints, - mut named_keys: NamedKeys, - output_ptr: u32, - output_size: usize, - bytes_written_ptr: u32, - version_ptr: u32, - ) -> Result, Error> { - self.context - .validate_key(&Key::from(contract_package_hash))?; - - let mut contract_package: ContractPackage = self - .context - .get_validated_contract_package(contract_package_hash)?; - - let version = contract_package.current_contract_version(); - - // Return an error if the contract is locked and has some version associated with it. - if contract_package.is_locked() && version.is_some() { - return Err(Error::LockedContract(contract_package_hash)); - } - - let contract_wasm_hash = self.context.new_hash_address()?; - let contract_wasm = { - let module_bytes = self.get_module_from_entry_points(&entry_points)?; - ContractWasm::new(module_bytes) - }; - - let contract_hash = self.context.new_hash_address()?; - - let protocol_version = self.context.protocol_version(); - let major = protocol_version.value().major; - - // TODO: EE-1032 - Implement different ways of carrying on existing named keys - if let Some(previous_contract_hash) = contract_package.current_contract_hash() { - let previous_contract: Contract = - self.context.read_gs_typed(&previous_contract_hash.into())?; - - let mut previous_named_keys = previous_contract.take_named_keys(); - named_keys.append(&mut previous_named_keys); - } - - let contract = Contract::new( - contract_package_hash, - contract_wasm_hash.into(), - named_keys, - entry_points, - protocol_version, - ); - - let insert_contract_result = - contract_package.insert_contract_version(major, contract_hash.into()); - - self.context - .metered_write_gs_unsafe(contract_wasm_hash, contract_wasm)?; - self.context - .metered_write_gs_unsafe(contract_hash, contract)?; - self.context - .metered_write_gs_unsafe(contract_package_hash, contract_package)?; - - // return contract key to caller - { - let key_bytes = match contract_hash.to_bytes() { - Ok(bytes) => bytes, - Err(error) => return Ok(Err(error.into())), - }; - - // `output_size` must be >= actual length of serialized Key bytes - if output_size < key_bytes.len() { - return Ok(Err(ApiError::BufferTooSmall)); - } - - // Set serialized Key bytes into the output buffer - if let Err(error) = self.memory.set(output_ptr, &key_bytes) { - return Err(Error::Interpreter(error.into())); - } - - // Following cast is assumed to be safe - let bytes_size = key_bytes.len() as u32; - let size_bytes = bytes_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(bytes_written_ptr, &size_bytes) { - return Err(Error::Interpreter(error.into())); - } - - let version_value: u32 = insert_contract_result.contract_version(); - let version_bytes = version_value.to_le_bytes(); - if let Err(error) = self.memory.set(version_ptr, &version_bytes) { - return Err(Error::Interpreter(error.into())); - } - } - - Ok(Ok(())) - } - - fn disable_contract_version( - &mut self, - contract_package_hash: ContractPackageHash, - contract_hash: ContractHash, - ) -> Result, Error> { - let contract_package_key = contract_package_hash.into(); - self.context.validate_key(&contract_package_key)?; - - let mut contract_package: ContractPackage = self - .context - .get_validated_contract_package(contract_package_hash)?; - - // Return an error in trying to disable the (singular) version of a locked contract. - if contract_package.is_locked() { - return Err(Error::LockedContract(contract_package_hash)); - } - - if let Err(err) = contract_package.disable_contract_version(contract_hash) { - return Ok(Err(err.into())); - } - - self.context - .metered_write_gs_unsafe(contract_package_key, contract_package)?; - - Ok(Ok(())) - } - - /// Writes function address (`hash_bytes`) into the Wasm memory (at - /// `dest_ptr` pointer). - fn function_address(&mut self, hash_bytes: [u8; 32], dest_ptr: u32) -> Result<(), Trap> { - self.memory - .set(dest_ptr, &hash_bytes) - .map_err(|e| Error::Interpreter(e.into()).into()) - } - - /// Generates new unforgable reference and adds it to the context's - /// access_rights set. - fn new_uref(&mut self, uref_ptr: u32, value_ptr: u32, value_size: u32) -> Result<(), Trap> { - let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; // read initial value from memory - let uref = self.context.new_uref(StoredValue::CLValue(cl_value))?; - self.memory - .set(uref_ptr, &uref.into_bytes().map_err(Error::BytesRepr)?) - .map_err(|e| Error::Interpreter(e.into()).into()) - } - - /// Writes `value` under `key` in GlobalState. - fn write( - &mut self, - key_ptr: u32, - key_size: u32, - value_ptr: u32, - value_size: u32, - ) -> Result<(), Trap> { - let key = self.key_from_mem(key_ptr, key_size)?; - let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; - self.context - .metered_write_gs(key, cl_value) - .map_err(Into::into) - } - - /// Records a transfer. - fn record_transfer( - &mut self, - maybe_to: Option, - source: URef, - target: URef, - amount: U512, - id: Option, - ) -> Result<(), Error> { - if self.context.base_key() != Key::from(self.protocol_data().mint()) { - return Err(Error::InvalidContext); - } - - if self.context.phase() != Phase::Session { - return Ok(()); - } - - let transfer_addr = self.context.new_transfer_addr()?; - let transfer = { - let deploy_hash: DeployHash = self.context.get_deploy_hash(); - let from: AccountHash = self.context.account().account_hash(); - let fee: U512 = U512::zero(); // TODO - Transfer::new(deploy_hash, from, maybe_to, source, target, amount, fee, id) - }; - { - let transfers = self.context.transfers_mut(); - transfers.push(transfer_addr); - } - self.context - .write_transfer(Key::Transfer(transfer_addr), transfer); - Ok(()) - } - - /// Records given auction info at a given era id - fn record_era_info(&mut self, era_id: EraId, era_info: EraInfo) -> Result<(), Error> { - if self.context.base_key() != Key::from(self.protocol_data().auction()) { - return Err(Error::InvalidContext); - } - - if self.context.phase() != Phase::Session { - return Ok(()); - } - - self.context.write_era_info(Key::EraInfo(era_id), era_info); - - Ok(()) - } - - /// Adds `value` to the cell that `key` points at. - fn add( - &mut self, - key_ptr: u32, - key_size: u32, - value_ptr: u32, - value_size: u32, - ) -> Result<(), Trap> { - let key = self.key_from_mem(key_ptr, key_size)?; - let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; - self.context - .metered_add_gs(key, cl_value) - .map_err(Into::into) - } - - /// Reads value from the GS living under key specified by `key_ptr` and - /// `key_size`. Wasm and host communicate through memory that Wasm - /// module exports. If contract wants to pass data to the host, it has - /// to tell it [the host] where this data lives in the exported memory - /// (pass its pointer and length). - fn read( - &mut self, - key_ptr: u32, - key_size: u32, - output_size_ptr: u32, - ) -> Result, Trap> { - if !self.can_write_to_host_buffer() { - // Exit early if the host buffer is already occupied - return Ok(Err(ApiError::HostBufferFull)); - } - - let key = self.key_from_mem(key_ptr, key_size)?; - let cl_value = match self.context.read_gs(&key)? { - Some(stored_value) => CLValue::try_from(stored_value).map_err(Error::TypeMismatch)?, - None => return Ok(Err(ApiError::ValueNotFound)), - }; - - let value_size = cl_value.inner_bytes().len() as u32; - if let Err(error) = self.write_host_buffer(cl_value) { - return Ok(Err(error)); - } - - let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(output_size_ptr, &value_bytes) { - return Err(Error::Interpreter(error.into()).into()); - } - - Ok(Ok(())) - } - - /// Reverts contract execution with a status specified. - fn revert(&mut self, status: u32) -> Trap { - Error::Revert(status.into()).into() - } - - fn add_associated_key( - &mut self, - account_hash_ptr: u32, - account_hash_size: usize, - weight_value: u8, - ) -> Result { - let account_hash = { - // Account hash as serialized bytes - let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; - // Account hash deserialized - let source: AccountHash = - bytesrepr::deserialize(source_serialized).map_err(Error::BytesRepr)?; - source - }; - let weight = Weight::new(weight_value); - - match self.context.add_associated_key(account_hash, weight) { - Ok(_) => Ok(0), - // This relies on the fact that `AddKeyFailure` is represented as - // i32 and first variant start with number `1`, so all other variants - // are greater than the first one, so it's safe to assume `0` is success, - // and any error is greater than 0. - Err(Error::AddKeyFailure(e)) => Ok(e as i32), - // Any other variant just pass as `Trap` - Err(e) => Err(e.into()), - } - } - - fn remove_associated_key( - &mut self, - account_hash_ptr: u32, - account_hash_size: usize, - ) -> Result { - let account_hash = { - // Account hash as serialized bytes - let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; - // Account hash deserialized - let source: AccountHash = - bytesrepr::deserialize(source_serialized).map_err(Error::BytesRepr)?; - source - }; - match self.context.remove_associated_key(account_hash) { - Ok(_) => Ok(0), - Err(Error::RemoveKeyFailure(e)) => Ok(e as i32), - Err(e) => Err(e.into()), - } - } - - fn update_associated_key( - &mut self, - account_hash_ptr: u32, - account_hash_size: usize, - weight_value: u8, - ) -> Result { - let account_hash = { - // Account hash as serialized bytes - let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; - // Account hash deserialized - let source: AccountHash = - bytesrepr::deserialize(source_serialized).map_err(Error::BytesRepr)?; - source - }; - let weight = Weight::new(weight_value); - - match self.context.update_associated_key(account_hash, weight) { - Ok(_) => Ok(0), - // This relies on the fact that `UpdateKeyFailure` is represented as - // i32 and first variant start with number `1`, so all other variants - // are greater than the first one, so it's safe to assume `0` is success, - // and any error is greater than 0. - Err(Error::UpdateKeyFailure(e)) => Ok(e as i32), - // Any other variant just pass as `Trap` - Err(e) => Err(e.into()), - } - } - - fn set_action_threshold( - &mut self, - action_type_value: u32, - threshold_value: u8, - ) -> Result { - match ActionType::try_from(action_type_value) { - Ok(action_type) => { - let threshold = Weight::new(threshold_value); - match self.context.set_action_threshold(action_type, threshold) { - Ok(_) => Ok(0), - Err(Error::SetThresholdFailure(e)) => Ok(e as i32), - Err(e) => Err(e.into()), - } - } - Err(_) => Err(Trap::new(TrapKind::Unreachable)), - } - } - - /// Looks up the public mint contract key in the context's protocol data. - /// - /// Returned URef is already attenuated depending on the calling account. - fn get_mint_contract(&self) -> ContractHash { - self.context.protocol_data().mint() - } - - /// Looks up the public handle payment contract key in the context's protocol data. - /// - /// Returned URef is already attenuated depending on the calling account. - fn get_handle_payment_contract(&self) -> ContractHash { - self.context.protocol_data().handle_payment() - } - - /// Looks up the public standard payment contract key in the context's protocol data. - /// - /// Returned URef is already attenuated depending on the calling account. - fn get_standard_payment_contract(&self) -> ContractHash { - self.context.protocol_data().standard_payment() - } - - /// Looks up the public auction contract key in the context's protocol data. - /// - /// Returned URef is already attenuated depending on the calling account. - fn get_auction_contract(&self) -> ContractHash { - self.context.protocol_data().auction() - } - - /// Calls the `read_base_round_reward` method on the mint contract at the given mint - /// contract key - fn mint_read_base_round_reward( - &mut self, - mint_contract_hash: ContractHash, - ) -> Result { - let gas_counter = self.gas_counter(); - let call_result = self.call_contract( - mint_contract_hash, - mint::METHOD_READ_BASE_ROUND_REWARD, - RuntimeArgs::default(), - ); - self.set_gas_counter(gas_counter); - - let reward = call_result?.into_t()?; - Ok(reward) - } - - /// Calls the `mint` method on the mint contract at the given mint - /// contract key - fn mint_mint(&mut self, mint_contract_hash: ContractHash, amount: U512) -> Result { - let gas_counter = self.gas_counter(); - let runtime_args = { - let mut runtime_args = RuntimeArgs::new(); - runtime_args.insert(mint::ARG_AMOUNT, amount)?; - runtime_args - }; - let call_result = self.call_contract(mint_contract_hash, mint::METHOD_MINT, runtime_args); - self.set_gas_counter(gas_counter); - - let result: Result = call_result?.into_t()?; - Ok(result.map_err(system::Error::from)?) - } - - /// Calls the `reduce_total_supply` method on the mint contract at the given mint - /// contract key - fn mint_reduce_total_supply( - &mut self, - mint_contract_hash: ContractHash, - amount: U512, - ) -> Result<(), Error> { - let gas_counter = self.gas_counter(); - let runtime_args = { - let mut runtime_args = RuntimeArgs::new(); - runtime_args.insert(mint::ARG_AMOUNT, amount)?; - runtime_args - }; - let call_result = self.call_contract( - mint_contract_hash, - mint::METHOD_REDUCE_TOTAL_SUPPLY, - runtime_args, - ); - self.set_gas_counter(gas_counter); - - let result: Result<(), mint::Error> = call_result?.into_t()?; - Ok(result.map_err(system::Error::from)?) - } - - /// Calls the "create" method on the mint contract at the given mint - /// contract key - fn mint_create(&mut self, mint_contract_hash: ContractHash) -> Result { - let gas_counter = self.gas_counter(); - let result = - self.call_contract(mint_contract_hash, mint::METHOD_CREATE, RuntimeArgs::new()); - self.set_gas_counter(gas_counter); - - let purse = result?.into_t()?; - Ok(purse) - } - - fn create_purse(&mut self) -> Result { - self.mint_create(self.get_mint_contract()) - } - - /// Calls the "transfer" method on the mint contract at the given mint - /// contract key - fn mint_transfer( - &mut self, - mint_contract_hash: ContractHash, - to: Option, - source: URef, - target: URef, - amount: U512, - id: Option, - ) -> Result, Error> { - let args_values = { - let mut runtime_args = RuntimeArgs::new(); - runtime_args.insert(mint::ARG_TO, to)?; - runtime_args.insert(mint::ARG_SOURCE, source)?; - runtime_args.insert(mint::ARG_TARGET, target)?; - runtime_args.insert(mint::ARG_AMOUNT, amount)?; - runtime_args.insert(mint::ARG_ID, id)?; - runtime_args - }; - - let gas_counter = self.gas_counter(); - let call_result = - self.call_contract(mint_contract_hash, mint::METHOD_TRANSFER, args_values); - self.set_gas_counter(gas_counter); - - Ok(call_result?.into_t()?) - } - - /// Creates a new account at a given public key, transferring a given amount - /// of motes from the given source purse to the new account's purse. - fn transfer_to_new_account( - &mut self, - source: URef, - target: AccountHash, - amount: U512, - id: Option, - ) -> Result { - let mint_contract_hash = self.get_mint_contract(); - - let target_key = Key::Account(target); - - // A precondition check that verifies that the transfer can be done - // as the source purse has enough funds to cover the transfer. - if amount > self.get_balance(source)?.unwrap_or_default() { - return Ok(Err(mint::Error::InsufficientFunds.into())); - } - - let target_purse = self.mint_create(mint_contract_hash)?; - - if source == target_purse { - return Ok(Err(mint::Error::EqualSourceAndTarget.into())); - } - - match self.mint_transfer( - mint_contract_hash, - Some(target), - source, - target_purse.with_access_rights(AccessRights::ADD), - amount, - id, - )? { - Ok(()) => { - let account = Account::create(target, Default::default(), target_purse); - self.context.write_account(target_key, account)?; - Ok(Ok(TransferredTo::NewAccount)) - } - Err(mint_error) => Ok(Err(mint_error.into())), - } - } - - /// Transferring a given amount of motes from the given source purse to the - /// new account's purse. Requires that the [`URef`]s have already - /// been created by the mint contract (or are the genesis account's). - fn transfer_to_existing_account( - &mut self, - to: Option, - source: URef, - target: URef, - amount: U512, - id: Option, - ) -> Result { - let mint_contract_key = self.get_mint_contract(); - - // This appears to be a load-bearing use of `RuntimeContext::insert_uref`. - self.context.insert_uref(target); - - match self.mint_transfer(mint_contract_key, to, source, target, amount, id)? { - Ok(()) => Ok(Ok(TransferredTo::ExistingAccount)), - Err(error) => Ok(Err(error.into())), - } - } - - /// Transfers `amount` of motes from default purse of the account to - /// `target` account. If that account does not exist, creates one. - fn transfer_to_account( - &mut self, - target: AccountHash, - amount: U512, - id: Option, - ) -> Result { - let source = self.context.get_main_purse()?; - self.transfer_from_purse_to_account(source, target, amount, id) - } - - /// Transfers `amount` of motes from `source` purse to `target` account. - /// If that account does not exist, creates one. - fn transfer_from_purse_to_account( - &mut self, - source: URef, - target: AccountHash, - amount: U512, - id: Option, - ) -> Result { - let target_key = Key::Account(target); - // Look up the account at the given public key's address - match self.context.read_account(&target_key)? { - None => { - // If no account exists, create a new account and transfer the amount to its - // purse. - self.transfer_to_new_account(source, target, amount, id) - } - Some(StoredValue::Account(account)) => { - let target_uref = account.main_purse_add_only(); - if source.with_access_rights(AccessRights::ADD) == target_uref { - return Ok(Ok(TransferredTo::ExistingAccount)); - } - // If an account exists, transfer the amount to its purse - self.transfer_to_existing_account(Some(target), source, target_uref, amount, id) - } - Some(_) => { - // If some other value exists, return an error - Err(Error::AccountNotFound(target_key)) - } - } - } - - /// Transfers `amount` of motes from `source` purse to `target` purse. - #[allow(clippy::too_many_arguments)] - fn transfer_from_purse_to_purse( - &mut self, - source_ptr: u32, - source_size: u32, - target_ptr: u32, - target_size: u32, - amount_ptr: u32, - amount_size: u32, - id_ptr: u32, - id_size: u32, - ) -> Result, Error> { - let source: URef = { - let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - - let target: URef = { - let bytes = self.bytes_from_mem(target_ptr, target_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - - let amount: U512 = { - let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - - let id: Option = { - let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; - bytesrepr::deserialize(bytes).map_err(Error::BytesRepr)? - }; - - let mint_contract_key = self.get_mint_contract(); - - match self.mint_transfer(mint_contract_key, None, source, target, amount, id)? { - Ok(()) => Ok(Ok(())), - Err(mint_error) => Ok(Err(mint_error)), - } - } - - fn get_balance(&mut self, purse: URef) -> Result, Error> { - let maybe_value = self.context.read_gs_direct(&Key::Balance(purse.addr()))?; - match maybe_value { - Some(StoredValue::CLValue(value)) => { - let value = CLValue::into_t(value)?; - Ok(Some(value)) - } - Some(_) => Err(Error::UnexpectedStoredValueVariant), - None => Ok(None), - } - } - - fn get_balance_host_buffer( - &mut self, - purse_ptr: u32, - purse_size: usize, - output_size_ptr: u32, - ) -> Result, Error> { - if !self.can_write_to_host_buffer() { - // Exit early if the host buffer is already occupied - return Ok(Err(ApiError::HostBufferFull)); - } - - let purse: URef = { - let bytes = self.bytes_from_mem(purse_ptr, purse_size)?; - match bytesrepr::deserialize(bytes) { - Ok(purse) => purse, - Err(error) => return Ok(Err(error.into())), - } - }; - - let balance = match self.get_balance(purse)? { - Some(balance) => balance, - None => return Ok(Err(ApiError::InvalidPurse)), - }; - - let balance_cl_value = match CLValue::from_t(balance) { - Ok(cl_value) => cl_value, - Err(error) => return Ok(Err(error.into())), - }; - - let balance_size = balance_cl_value.inner_bytes().len() as i32; - if let Err(error) = self.write_host_buffer(balance_cl_value) { - return Ok(Err(error)); - } - - let balance_size_bytes = balance_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(output_size_ptr, &balance_size_bytes) { - return Err(Error::Interpreter(error.into())); - } - - Ok(Ok(())) - } - - fn get_system_contract( - &mut self, - system_contract_index: u32, - dest_ptr: u32, - _dest_size: u32, - ) -> Result, Trap> { - let contract_hash: ContractHash = match SystemContractType::try_from(system_contract_index) - { - Ok(SystemContractType::Mint) => self.get_mint_contract(), - Ok(SystemContractType::HandlePayment) => self.get_handle_payment_contract(), - Ok(SystemContractType::StandardPayment) => self.get_standard_payment_contract(), - Ok(SystemContractType::Auction) => self.get_auction_contract(), - Err(error) => return Ok(Err(error)), - }; - - match self.memory.set(dest_ptr, contract_hash.as_ref()) { - Ok(_) => Ok(Ok(())), - Err(error) => Err(Error::Interpreter(error.into()).into()), - } - } - - /// If host_buffer set, clears the host_buffer and returns value, else None - pub fn take_host_buffer(&mut self) -> Option { - self.host_buffer.take() - } - - /// Checks if a write to host buffer can happen. - /// - /// This will check if the host buffer is empty. - fn can_write_to_host_buffer(&self) -> bool { - self.host_buffer.is_none() - } - - /// Overwrites data in host buffer only if it's in empty state - fn write_host_buffer(&mut self, data: CLValue) -> Result<(), ApiError> { - match self.host_buffer { - Some(_) => return Err(ApiError::HostBufferFull), - None => self.host_buffer = Some(data), - } - Ok(()) - } - - fn read_host_buffer( - &mut self, - dest_ptr: u32, - dest_size: usize, - bytes_written_ptr: u32, - ) -> Result, Error> { - let (_cl_type, serialized_value) = match self.take_host_buffer() { - None => return Ok(Err(ApiError::HostBufferEmpty)), - Some(cl_value) => cl_value.destructure(), - }; - - if serialized_value.len() > u32::max_value() as usize { - return Ok(Err(ApiError::OutOfMemory)); - } - if serialized_value.len() > dest_size { - return Ok(Err(ApiError::BufferTooSmall)); - } - - // Slice data, so if `dest_size` is larger than host_buffer size, it will take host_buffer - // as whole. - let sliced_buf = &serialized_value[..cmp::min(dest_size, serialized_value.len())]; - if let Err(error) = self.memory.set(dest_ptr, sliced_buf) { - return Err(Error::Interpreter(error.into())); - } - - let bytes_written = sliced_buf.len() as u32; - let bytes_written_data = bytes_written.to_le_bytes(); - - if let Err(error) = self.memory.set(bytes_written_ptr, &bytes_written_data) { - return Err(Error::Interpreter(error.into())); - } - - Ok(Ok(())) - } - - #[cfg(feature = "test-support")] - fn print(&mut self, text_ptr: u32, text_size: u32) -> Result<(), Trap> { - let text = self.string_from_mem(text_ptr, text_size)?; - println!("{}", text); - Ok(()) - } - - fn get_named_arg_size( - &mut self, - name_ptr: u32, - name_size: usize, - size_ptr: u32, - ) -> Result, Trap> { - let name_bytes = self.bytes_from_mem(name_ptr, name_size)?; - let name = String::from_utf8_lossy(&name_bytes); - - let arg_size = match self.context.args().get(&name) { - Some(arg) if arg.inner_bytes().len() > u32::max_value() as usize => { - return Ok(Err(ApiError::OutOfMemory)); - } - Some(arg) => arg.inner_bytes().len() as u32, - None => return Ok(Err(ApiError::MissingArgument)), - }; - - let arg_size_bytes = arg_size.to_le_bytes(); // Wasm is little-endian - - if let Err(e) = self.memory.set(size_ptr, &arg_size_bytes) { - return Err(Error::Interpreter(e.into()).into()); - } - - Ok(Ok(())) - } - - fn get_named_arg( - &mut self, - name_ptr: u32, - name_size: usize, - output_ptr: u32, - output_size: usize, - ) -> Result, Trap> { - let name_bytes = self.bytes_from_mem(name_ptr, name_size)?; - let name = String::from_utf8_lossy(&name_bytes); - - let arg = match self.context.args().get(&name) { - Some(arg) => arg, - None => return Ok(Err(ApiError::MissingArgument)), - }; - - if arg.inner_bytes().len() > output_size { - return Ok(Err(ApiError::OutOfMemory)); - } - - if let Err(e) = self - .memory - .set(output_ptr, &arg.inner_bytes()[..output_size]) - { - return Err(Error::Interpreter(e.into()).into()); - } - - Ok(Ok(())) - } - - fn validate_entry_point_access( - &self, - package: &ContractPackage, - access: &EntryPointAccess, - ) -> Result<(), Error> { - runtime_context::validate_entry_point_access_with(package, access, |uref| { - self.context.validate_uref(uref).is_ok() - }) - } - - /// Remove a user group from access to a contract - fn remove_contract_user_group( - &mut self, - package_key: ContractPackageHash, - label: Group, - ) -> Result, Error> { - let mut package: ContractPackage = - self.context.get_validated_contract_package(package_key)?; - - let group_to_remove = Group::new(label); - let groups = package.groups_mut(); - - // Ensure group exists in groups - if groups.get(&group_to_remove).is_none() { - return Ok(Err(contracts::Error::GroupDoesNotExist.into())); - } - - // Remove group if it is not referenced by at least one entry_point in active versions. - let versions = package.versions(); - for contract_hash in versions.values() { - let entry_points = { - let contract: Contract = self.context.read_gs_typed(&Key::from(*contract_hash))?; - contract.entry_points().clone().take_entry_points() - }; - for entry_point in entry_points { - match entry_point.access() { - EntryPointAccess::Public => { - continue; - } - EntryPointAccess::Groups(groups) => { - if groups.contains(&group_to_remove) { - return Ok(Err(contracts::Error::GroupInUse.into())); - } - } - } - } - } - - if !package.remove_group(&group_to_remove) { - return Ok(Err(contracts::Error::GroupInUse.into())); - } - - // Write updated package to the global state - self.context.metered_write_gs_unsafe(package_key, package)?; - Ok(Ok(())) - } - - #[allow(clippy::too_many_arguments)] - fn provision_contract_user_group_uref( - &mut self, - package_ptr: u32, - package_size: u32, - label_ptr: u32, - label_size: u32, - output_size_ptr: u32, - ) -> Result, Error> { - let contract_package_hash = self.t_from_mem(package_ptr, package_size)?; - let label: String = self.t_from_mem(label_ptr, label_size)?; - let mut contract_package = self - .context - .get_validated_contract_package(contract_package_hash)?; - let groups = contract_package.groups_mut(); - - let group_label = Group::new(label); - - // Ensure there are not too many urefs - let total_urefs: usize = groups.values().map(|urefs| urefs.len()).sum(); - - if total_urefs + 1 > contracts::MAX_TOTAL_UREFS { - return Ok(Err(contracts::Error::MaxTotalURefsExceeded.into())); - } - - // Ensure given group exists and does not exceed limits - let group = match groups.get_mut(&group_label) { - Some(group) if group.len() + 1 > contracts::MAX_GROUPS as usize => { - // Ensures there are not too many groups to fit in amount of new urefs - return Ok(Err(contracts::Error::MaxTotalURefsExceeded.into())); - } - Some(group) => group, - None => return Ok(Err(contracts::Error::GroupDoesNotExist.into())), - }; - - // Proceed with creating new URefs - let new_uref = self.context.new_unit_uref()?; - if !group.insert(new_uref) { - return Ok(Err(contracts::Error::URefAlreadyExists.into())); - } - - // check we can write to the host buffer - if let Err(err) = self.check_host_buffer() { - return Ok(Err(err)); - } - // create CLValue for return value - let new_uref_value = CLValue::from_t(new_uref)?; - let value_size = new_uref_value.inner_bytes().len(); - // write return value to buffer - if let Err(err) = self.write_host_buffer(new_uref_value) { - return Ok(Err(err)); - } - // Write return value size to output location - let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian - if let Err(error) = self.memory.set(output_size_ptr, &output_size_bytes) { - return Err(Error::Interpreter(error.into())); - } - - // Write updated package to the global state - self.context - .metered_write_gs_unsafe(contract_package_hash, contract_package)?; - - Ok(Ok(())) - } - - #[allow(clippy::too_many_arguments)] - fn remove_contract_user_group_urefs( - &mut self, - package_ptr: u32, - package_size: u32, - label_ptr: u32, - label_size: u32, - urefs_ptr: u32, - urefs_size: u32, - ) -> Result, Error> { - let contract_package_hash: ContractPackageHash = - self.t_from_mem(package_ptr, package_size)?; - let label: String = self.t_from_mem(label_ptr, label_size)?; - let urefs: BTreeSet = self.t_from_mem(urefs_ptr, urefs_size)?; - - let mut contract_package = self - .context - .get_validated_contract_package(contract_package_hash)?; - - let groups = contract_package.groups_mut(); - let group_label = Group::new(label); - - let group = match groups.get_mut(&group_label) { - Some(group) => group, - None => return Ok(Err(contracts::Error::GroupDoesNotExist.into())), - }; - - if urefs.is_empty() { - return Ok(Ok(())); - } - - for uref in urefs { - if !group.remove(&uref) { - return Ok(Err(contracts::Error::UnableToRemoveURef.into())); - } - } - // Write updated package to the global state - self.context - .metered_write_gs_unsafe(contract_package_hash, contract_package)?; - - Ok(Ok(())) - } - - /// Calculate gas cost for a host function - fn charge_host_function_call( - &mut self, - host_function: &HostFunction, - weights: T, - ) -> Result<(), Trap> - where - T: AsRef<[Cost]> + Copy, - { - let cost = host_function.calculate_gas_cost(weights); - self.gas(cost)?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::collections::BTreeMap; - - use proptest::{ - array::uniform32, - collection::{btree_map, vec}, - option, - prelude::*, - result, - }; - - use casper_types::{gens::*, AccessRights, CLType, CLValue, Key, PublicKey, SecretKey, URef}; - - use super::extract_urefs; - - fn cl_value_with_urefs_arb() -> impl Strategy)> { - // If compiler brings you here it most probably means you've added a variant to `CLType` - // enum but forgot to add generator for it. - let stub: Option = None; - if let Some(cl_type) = stub { - match cl_type { - CLType::Bool - | CLType::I32 - | CLType::I64 - | CLType::U8 - | CLType::U32 - | CLType::U64 - | CLType::U128 - | CLType::U256 - | CLType::U512 - | CLType::Unit - | CLType::String - | CLType::Key - | CLType::URef - | CLType::Option(_) - | CLType::List(_) - | CLType::ByteArray(..) - | CLType::Result { .. } - | CLType::Map { .. } - | CLType::Tuple1(_) - | CLType::Tuple2(_) - | CLType::Tuple3(_) - | CLType::PublicKey - | CLType::Any => (), - } - }; - - prop_oneof![ - Just((CLValue::from_t(()).expect("should create CLValue"), vec![])), - any::() - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - u128_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - u256_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - u512_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - key_arb().prop_map(|x| { - let urefs = x.as_uref().into_iter().cloned().collect(); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - uref_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![x])), - ".*".prop_map(|x: String| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - option::of(any::()) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - option::of(uref_arb()).prop_map(|x| { - let urefs = x.iter().cloned().collect(); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - option::of(key_arb()).prop_map(|x| { - let urefs = x.iter().filter_map(Key::as_uref).cloned().collect(); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - vec(any::(), 0..100) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - vec(uref_arb(), 0..100).prop_map(|x| ( - CLValue::from_t(x.clone()).expect("should create CLValue"), - x - )), - vec(key_arb(), 0..100).prop_map(|x| ( - CLValue::from_t(x.clone()).expect("should create CLValue"), - x.into_iter().filter_map(Key::into_uref).collect() - )), - uniform32(any::()) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - result::maybe_err(key_arb(), ".*").prop_map(|x| { - let urefs = match &x { - Ok(key) => key.as_uref().into_iter().cloned().collect(), - Err(_) => vec![], - }; - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - result::maybe_ok(".*", uref_arb()).prop_map(|x| { - let urefs = match &x { - Ok(_) => vec![], - Err(uref) => vec![*uref], - }; - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - btree_map(".*", u512_arb(), 0..100) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - btree_map(uref_arb(), u512_arb(), 0..100).prop_map(|x| { - let urefs = x.keys().cloned().collect(); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - btree_map(".*", uref_arb(), 0..100).prop_map(|x| { - let urefs = x.values().cloned().collect(); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - btree_map(uref_arb(), key_arb(), 0..100).prop_map(|x| { - let mut urefs: Vec = x.keys().cloned().collect(); - urefs.extend(x.values().filter_map(Key::as_uref).cloned()); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - (any::()) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - (uref_arb()) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![x])), - (any::(), any::()) - .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), - (uref_arb(), any::()).prop_map(|x| { - let uref = x.0; - ( - CLValue::from_t(x).expect("should create CLValue"), - vec![uref], - ) - }), - (any::(), key_arb()).prop_map(|x| { - let urefs = x.1.as_uref().into_iter().cloned().collect(); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - (uref_arb(), key_arb()).prop_map(|x| { - let mut urefs = vec![x.0]; - urefs.extend(x.1.as_uref().into_iter().cloned()); - (CLValue::from_t(x).expect("should create CLValue"), urefs) - }), - ] - } - - proptest! { - #[test] - fn should_extract_urefs((cl_value, urefs) in cl_value_with_urefs_arb()) { - let extracted_urefs = extract_urefs(&cl_value).unwrap(); - assert_eq!(extracted_urefs, urefs); - } - } - - #[test] - fn extract_from_public_keys_to_urefs_map() { - let uref = URef::new([43; 32], AccessRights::READ_ADD_WRITE); - let mut map = BTreeMap::new(); - map.insert( - PublicKey::from( - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), - ), - uref, - ); - let cl_value = CLValue::from_t(map).unwrap(); - assert_eq!(extract_urefs(&cl_value).unwrap(), vec![uref]); - } - - #[test] - fn extract_from_public_keys_to_uref_keys_map() { - let uref = URef::new([43; 32], AccessRights::READ_ADD_WRITE); - let key = Key::from(uref); - let mut map = BTreeMap::new(); - map.insert( - PublicKey::from( - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), - ), - key, - ); - let cl_value = CLValue::from_t(map).unwrap(); - assert_eq!(extract_urefs(&cl_value).unwrap(), vec![uref]); - } -} diff --git a/execution_engine/src/core/runtime/scoped_instrumenter.rs b/execution_engine/src/core/runtime/scoped_instrumenter.rs deleted file mode 100644 index 67e35bcee2..0000000000 --- a/execution_engine/src/core/runtime/scoped_instrumenter.rs +++ /dev/null @@ -1,156 +0,0 @@ -use std::{ - collections::BTreeMap, - mem, - time::{Duration, Instant}, -}; - -use crate::{ - core::resolvers::v1_function_index::FunctionIndex, shared::logging::log_host_function_metrics, -}; - -enum PauseState { - NotStarted, - Started(Instant), - Completed(Duration), -} - -impl PauseState { - fn new() -> Self { - PauseState::NotStarted - } - - fn activate(&mut self) { - match self { - PauseState::NotStarted => { - *self = PauseState::Started(Instant::now()); - } - _ => panic!("PauseState must be NotStarted"), - } - } - - fn complete(&mut self) { - match self { - PauseState::Started(start) => { - *self = PauseState::Completed(start.elapsed()); - } - _ => panic!("Pause must already be active"), - } - } - - fn duration(&self) -> Duration { - match self { - PauseState::NotStarted => Duration::default(), - PauseState::Completed(duration) => *duration, - PauseState::Started(start) => start.elapsed(), - } - } -} - -pub(super) struct ScopedInstrumenter { - start: Instant, - pause_state: PauseState, - function_index: FunctionIndex, - properties: BTreeMap<&'static str, String>, -} - -impl ScopedInstrumenter { - pub(super) fn new(function_index: FunctionIndex) -> Self { - ScopedInstrumenter { - start: Instant::now(), - pause_state: PauseState::new(), - function_index, - properties: BTreeMap::new(), - } - } - - pub(super) fn add_property(&mut self, key: &'static str, value: T) { - assert!(self.properties.insert(key, value.to_string()).is_none()); - } - - /// Can be called once only to effectively pause the running timer. `unpause` can likewise be - /// called once if the timer has already been paused. - pub(super) fn pause(&mut self) { - self.pause_state.activate(); - } - - pub(super) fn unpause(&mut self) { - self.pause_state.complete(); - } - - fn duration(&self) -> Duration { - self.start - .elapsed() - .checked_sub(self.pause_state.duration()) - .unwrap_or_default() - } -} - -impl Drop for ScopedInstrumenter { - fn drop(&mut self) { - let duration = self.duration(); - let host_function = match self.function_index { - FunctionIndex::GasFuncIndex => return, - FunctionIndex::WriteFuncIndex => "host_function_write", - FunctionIndex::ReadFuncIndex => "host_function_read_value", - FunctionIndex::AddFuncIndex => "host_function_add", - FunctionIndex::NewFuncIndex => "host_function_new_uref", - FunctionIndex::RetFuncIndex => "host_function_ret", - FunctionIndex::CallContractFuncIndex => "host_function_call_contract", - FunctionIndex::GetKeyFuncIndex => "host_function_get_key", - FunctionIndex::HasKeyFuncIndex => "host_function_has_key", - FunctionIndex::PutKeyFuncIndex => "host_function_put_key", - FunctionIndex::IsValidURefFnIndex => "host_function_is_valid_uref", - FunctionIndex::RevertFuncIndex => "host_function_revert", - FunctionIndex::AddAssociatedKeyFuncIndex => "host_function_add_associated_key", - FunctionIndex::RemoveAssociatedKeyFuncIndex => "host_function_remove_associated_key", - FunctionIndex::UpdateAssociatedKeyFuncIndex => "host_function_update_associated_key", - FunctionIndex::SetActionThresholdFuncIndex => "host_function_set_action_threshold", - FunctionIndex::LoadNamedKeysFuncIndex => "host_function_load_named_keys", - FunctionIndex::RemoveKeyFuncIndex => "host_function_remove_key", - FunctionIndex::GetCallerIndex => "host_function_get_caller", - FunctionIndex::GetBlocktimeIndex => "host_function_get_blocktime", - FunctionIndex::CreatePurseIndex => "host_function_create_purse", - FunctionIndex::TransferToAccountIndex => "host_function_transfer_to_account", - FunctionIndex::TransferFromPurseToAccountIndex => { - "host_function_transfer_from_purse_to_account" - } - FunctionIndex::TransferFromPurseToPurseIndex => { - "host_function_transfer_from_purse_to_purse" - } - FunctionIndex::GetBalanceIndex => "host_function_get_balance", - FunctionIndex::GetPhaseIndex => "host_function_get_phase", - FunctionIndex::GetSystemContractIndex => "host_function_get_system_contract", - FunctionIndex::GetMainPurseIndex => "host_function_get_main_purse", - FunctionIndex::ReadHostBufferIndex => "host_function_read_host_buffer", - FunctionIndex::CreateContractPackageAtHash => { - "host_function_create_contract_package_at_hash" - } - FunctionIndex::AddContractVersion => "host_function_add_contract_version", - FunctionIndex::DisableContractVersion => "host_remove_contract_version", - FunctionIndex::CallVersionedContract => "host_call_versioned_contract", - FunctionIndex::CreateContractUserGroup => "create_contract_user_group", - #[cfg(feature = "test-support")] - FunctionIndex::PrintIndex => "host_function_print", - FunctionIndex::GetRuntimeArgsizeIndex => "host_get_named_arg_size", - FunctionIndex::GetRuntimeArgIndex => "host_get_named_arg", - FunctionIndex::RemoveContractUserGroupIndex => "host_remove_contract_user_group", - FunctionIndex::ExtendContractUserGroupURefsIndex => { - "host_provision_contract_user_group_uref" - } - FunctionIndex::RemoveContractUserGroupURefsIndex => { - "host_remove_contract_user_group_urefs" - } - FunctionIndex::Blake2b => "host_blake2b", - FunctionIndex::RecordTransfer => "host_record_transfer", - FunctionIndex::RecordEraInfo => "host_record_era_info", - }; - - let mut properties = mem::take(&mut self.properties); - properties.insert( - "duration_in_seconds", - format!("{:.06e}", duration.as_secs_f64()), - ); - - log_host_function_metrics(host_function, properties); - } -} diff --git a/execution_engine/src/core/runtime/standard_payment_internal.rs b/execution_engine/src/core/runtime/standard_payment_internal.rs deleted file mode 100644 index a9957dfe83..0000000000 --- a/execution_engine/src/core/runtime/standard_payment_internal.rs +++ /dev/null @@ -1,95 +0,0 @@ -use casper_types::{ - system::{ - handle_payment, mint, - standard_payment::{AccountProvider, HandlePaymentProvider, MintProvider, StandardPayment}, - }, - ApiError, Key, RuntimeArgs, URef, U512, -}; - -use crate::{ - core::{execution, runtime::Runtime}, - shared::stored_value::StoredValue, - storage::global_state::StateReader, -}; - -pub(crate) const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; - -impl From for Option { - fn from(exec_error: execution::Error) -> Self { - match exec_error { - // This is used to propagate [`execution::Error::GasLimit`] to make sure - // [`StanadrdPayment`] contract running natively supports propagating gas limit - // errors without a panic. - execution::Error::GasLimit => Some(mint::Error::GasLimit.into()), - // There are possibly other exec errors happening but such translation would be lossy. - _ => None, - } - } -} - -impl<'a, R> AccountProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn get_main_purse(&self) -> Result { - self.context.get_main_purse().map_err(|exec_error| { - >::from(exec_error).unwrap_or(ApiError::InvalidPurse) - }) - } -} - -impl<'a, R> MintProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn transfer_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), ApiError> { - let mint_contract_hash = self.get_mint_contract(); - match self.mint_transfer(mint_contract_hash, None, source, target, amount, None) { - Ok(Ok(_)) => Ok(()), - Ok(Err(mint_error)) => Err(mint_error.into()), - Err(exec_error) => { - let maybe_api_error: Option = exec_error.into(); - Err(maybe_api_error.unwrap_or(ApiError::Transfer)) - } - } - } -} - -impl<'a, R> HandlePaymentProvider for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ - fn get_payment_purse(&mut self) -> Result { - let handle_payment_contract_hash = self.get_handle_payment_contract(); - - let cl_value = self - .call_contract( - handle_payment_contract_hash, - METHOD_GET_PAYMENT_PURSE, - RuntimeArgs::new(), - ) - .map_err(|exec_error| { - let maybe_api_error: Option = exec_error.into(); - maybe_api_error - .unwrap_or_else(|| handle_payment::Error::PaymentPurseNotFound.into()) - })?; - - let payment_purse_ref: URef = cl_value.into_t()?; - Ok(payment_purse_ref) - } -} - -impl<'a, R> StandardPayment for Runtime<'a, R> -where - R: StateReader, - R::Error: Into, -{ -} diff --git a/execution_engine/src/core/runtime_context/mod.rs b/execution_engine/src/core/runtime_context/mod.rs deleted file mode 100644 index 7e6bb52308..0000000000 --- a/execution_engine/src/core/runtime_context/mod.rs +++ /dev/null @@ -1,1040 +0,0 @@ -use std::{ - cell::RefCell, - collections::{BTreeSet, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - fmt::Debug, - rc::Rc, -}; - -use casper_types::{ - account::{ - AccountHash, ActionType, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, - UpdateKeyFailure, Weight, - }, - bytesrepr, - bytesrepr::ToBytes, - contracts::NamedKeys, - system::auction::EraInfo, - AccessRights, BlockTime, CLType, CLValue, Contract, ContractPackage, ContractPackageHash, - DeployHash, DeployInfo, EntryPointAccess, EntryPointType, Key, KeyTag, Phase, ProtocolVersion, - PublicKey, RuntimeArgs, Transfer, TransferAddr, URef, KEY_HASH_LENGTH, -}; - -use crate::{ - core::{ - engine_state::execution_effect::ExecutionEffect, - execution::{AddressGenerator, Error}, - tracking_copy::{AddResult, TrackingCopy}, - Address, - }, - shared::{account::Account, gas::Gas, newtypes::CorrelationId, stored_value::StoredValue}, - storage::{global_state::StateReader, protocol_data::ProtocolData}, -}; - -#[cfg(test)] -mod tests; - -/// Checks whether given uref has enough access rights. -pub(crate) fn uref_has_access_rights( - uref: &URef, - access_rights: &HashMap>, -) -> bool { - if let Some(known_rights) = access_rights.get(&uref.addr()) { - let new_rights = uref.access_rights(); - // check if we have sufficient access rights - known_rights - .iter() - .any(|right| *right & new_rights == new_rights) - } else { - // URef is not known - false - } -} - -pub fn validate_entry_point_access_with( - contract_package: &ContractPackage, - access: &EntryPointAccess, - validator: impl Fn(&URef) -> bool, -) -> Result<(), Error> { - if let EntryPointAccess::Groups(groups) = access { - if groups.is_empty() { - // Exits early in a special case of empty list of groups regardless of the group - // checking logic below it. - return Err(Error::InvalidContext); - } - - let find_result = groups.iter().find(|g| { - contract_package - .groups() - .get(g) - .and_then(|set| set.iter().find(|u| validator(u))) - .is_some() - }); - - if find_result.is_none() { - return Err(Error::InvalidContext); - } - } - Ok(()) -} - -/// Holds information specific to the deployed contract. -pub struct RuntimeContext<'a, R> { - tracking_copy: Rc>>, - // Enables look up of specific uref based on human-readable name - named_keys: &'a mut NamedKeys, - // Used to check uref is known before use (prevents forging urefs) - access_rights: HashMap>, - // Original account for read only tasks taken before execution - account: &'a Account, - args: RuntimeArgs, - authorization_keys: BTreeSet, - // Key pointing to the entity we are currently running - //(could point at an account or contract in the global state) - base_key: Key, - blocktime: BlockTime, - deploy_hash: DeployHash, - gas_limit: Gas, - gas_counter: Gas, - hash_address_generator: Rc>, - uref_address_generator: Rc>, - transfer_address_generator: Rc>, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - phase: Phase, - protocol_data: ProtocolData, - entry_point_type: EntryPointType, - transfers: Vec, -} - -impl<'a, R> RuntimeContext<'a, R> -where - R: StateReader, - R::Error: Into, -{ - #[allow(clippy::too_many_arguments)] - pub fn new( - tracking_copy: Rc>>, - entry_point_type: EntryPointType, - named_keys: &'a mut NamedKeys, - access_rights: HashMap>, - runtime_args: RuntimeArgs, - authorization_keys: BTreeSet, - account: &'a Account, - base_key: Key, - blocktime: BlockTime, - deploy_hash: DeployHash, - gas_limit: Gas, - gas_counter: Gas, - hash_address_generator: Rc>, - uref_address_generator: Rc>, - transfer_address_generator: Rc>, - protocol_version: ProtocolVersion, - correlation_id: CorrelationId, - phase: Phase, - protocol_data: ProtocolData, - transfers: Vec, - ) -> Self { - RuntimeContext { - tracking_copy, - entry_point_type, - named_keys, - access_rights, - args: runtime_args, - account, - authorization_keys, - blocktime, - deploy_hash, - base_key, - gas_limit, - gas_counter, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - phase, - protocol_data, - transfers, - } - } - - pub fn authorization_keys(&self) -> &BTreeSet { - &self.authorization_keys - } - - pub fn named_keys_get(&self, name: &str) -> Option<&Key> { - self.named_keys.get(name) - } - - pub fn named_keys(&self) -> &NamedKeys { - &self.named_keys - } - - pub fn named_keys_mut(&mut self) -> &mut NamedKeys { - &mut self.named_keys - } - - pub fn named_keys_contains_key(&self, name: &str) -> bool { - self.named_keys.contains_key(name) - } - - // Helper function to avoid duplication in `remove_uref`. - fn remove_key_from_contract( - &mut self, - key: Key, - mut contract: Contract, - name: &str, - ) -> Result<(), Error> { - if contract.remove_named_key(name).is_none() { - return Ok(()); - } - self.metered_write_gs_unsafe(key, contract)?; - Ok(()) - } - - /// Remove Key from the `named_keys` map of the current context. - /// It removes both from the ephemeral map (RuntimeContext::named_keys) but - /// also persistable map (one that is found in the - /// TrackingCopy/GlobalState). - pub fn remove_key(&mut self, name: &str) -> Result<(), Error> { - match self.base_key() { - account_hash @ Key::Account(_) => { - let account: Account = { - let mut account: Account = self.read_gs_typed(&account_hash)?; - account.named_keys_mut().remove(name); - account - }; - self.named_keys.remove(name); - let account_value = self.account_to_validated_value(account)?; - self.metered_write_gs_unsafe(account_hash, account_value)?; - Ok(()) - } - contract_uref @ Key::URef(_) => { - let contract: Contract = { - let value: StoredValue = self - .tracking_copy - .borrow_mut() - .read(self.correlation_id, &contract_uref) - .map_err(Into::into)? - .ok_or(Error::KeyNotFound(contract_uref))?; - - value.try_into().map_err(Error::TypeMismatch)? - }; - - self.named_keys.remove(name); - self.remove_key_from_contract(contract_uref, contract, name) - } - contract_hash @ Key::Hash(_) => { - let contract: Contract = self.read_gs_typed(&contract_hash)?; - self.named_keys.remove(name); - self.remove_key_from_contract(contract_hash, contract, name) - } - transfer_addr @ Key::Transfer(_) => { - let _transfer: Transfer = self.read_gs_typed(&transfer_addr)?; - self.named_keys.remove(name); - // Users cannot remove transfers from global state - Ok(()) - } - deploy_info_addr @ Key::DeployInfo(_) => { - let _deploy_info: DeployInfo = self.read_gs_typed(&deploy_info_addr)?; - self.named_keys.remove(name); - // Users cannot remove deploy infos from global state - Ok(()) - } - era_info_addr @ Key::EraInfo(_) => { - let _era_info: EraInfo = self.read_gs_typed(&era_info_addr)?; - self.named_keys.remove(name); - // Users cannot remove era infos from global state - Ok(()) - } - Key::Balance(_) => { - self.named_keys.remove(name); - Ok(()) - } - Key::Bid(_) => { - self.named_keys.remove(name); - Ok(()) - } - Key::Withdraw(_) => { - self.named_keys.remove(name); - Ok(()) - } - Key::EraValidators(_) => { - self.named_keys.remove(name); - // Users cannot remove era validators info from global state - Ok(()) - } - } - } - - pub fn get_caller(&self) -> AccountHash { - self.account.account_hash() - } - - pub fn get_blocktime(&self) -> BlockTime { - self.blocktime - } - - pub fn get_deploy_hash(&self) -> DeployHash { - self.deploy_hash - } - - pub fn access_rights_extend(&mut self, access_rights: HashMap>) { - self.access_rights.extend(access_rights); - } - - pub fn access_rights(&self) -> &HashMap> { - &self.access_rights - } - - pub fn account(&self) -> &'a Account { - &self.account - } - - pub fn args(&self) -> &RuntimeArgs { - &self.args - } - - pub fn uref_address_generator(&self) -> Rc> { - Rc::clone(&self.uref_address_generator) - } - - pub fn hash_address_generator(&self) -> Rc> { - Rc::clone(&self.hash_address_generator) - } - - pub fn transfer_address_generator(&self) -> Rc> { - Rc::clone(&self.transfer_address_generator) - } - - pub(super) fn state(&self) -> Rc>> { - Rc::clone(&self.tracking_copy) - } - - pub fn gas_limit(&self) -> Gas { - self.gas_limit - } - - pub fn gas_counter(&self) -> Gas { - self.gas_counter - } - - pub fn set_gas_counter(&mut self, new_gas_counter: Gas) { - self.gas_counter = new_gas_counter; - } - - pub fn base_key(&self) -> Key { - self.base_key - } - - pub fn protocol_version(&self) -> ProtocolVersion { - self.protocol_version - } - - pub fn correlation_id(&self) -> CorrelationId { - self.correlation_id - } - - pub fn phase(&self) -> Phase { - self.phase - } - - /// Generates new deterministic hash for uses as an address. - pub fn new_hash_address(&mut self) -> Result<[u8; KEY_HASH_LENGTH], Error> { - Ok(self.hash_address_generator.borrow_mut().new_hash_address()) - } - - pub fn new_uref(&mut self, value: StoredValue) -> Result { - let uref = self - .uref_address_generator - .borrow_mut() - .new_uref(AccessRights::READ_ADD_WRITE); - self.insert_uref(uref); - self.metered_write_gs(Key::URef(uref), value)?; - Ok(uref) - } - - /// Creates a new URef where the value it stores is CLType::Unit. - pub(crate) fn new_unit_uref(&mut self) -> Result { - self.new_uref(StoredValue::CLValue(CLValue::unit())) - } - - pub fn new_transfer_addr(&mut self) -> Result { - let transfer_addr = self - .transfer_address_generator - .borrow_mut() - .create_address(); - Ok(TransferAddr::new(transfer_addr)) - } - - /// Puts `key` to the map of named keys of current context. - pub fn put_key(&mut self, name: String, key: Key) -> Result<(), Error> { - // No need to perform actual validation on the base key because an account or contract (i.e. - // the element stored under `base_key`) is allowed to add new named keys to itself. - let named_key_value = StoredValue::CLValue(CLValue::from_t((name.clone(), key))?); - self.validate_value(&named_key_value)?; - self.metered_add_gs_unsafe(self.base_key(), named_key_value)?; - self.insert_key(name, key); - Ok(()) - } - - pub fn read_purse_uref(&mut self, purse_uref: &URef) -> Result, Error> { - match self - .tracking_copy - .borrow_mut() - .read(self.correlation_id, &Key::Hash(purse_uref.addr())) - .map_err(Into::into)? - { - Some(stored_value) => Ok(Some(stored_value.try_into().map_err(Error::TypeMismatch)?)), - None => Ok(None), - } - } - - pub fn write_purse_uref(&mut self, purse_uref: URef, cl_value: CLValue) -> Result<(), Error> { - self.metered_write_gs_unsafe(Key::Hash(purse_uref.addr()), cl_value) - } - - pub fn read_gs(&mut self, key: &Key) -> Result, Error> { - self.validate_readable(key)?; - self.validate_key(key)?; - - self.tracking_copy - .borrow_mut() - .read(self.correlation_id, key) - .map_err(Into::into) - } - - /// DO NOT EXPOSE THIS VIA THE FFI - pub fn read_gs_direct(&mut self, key: &Key) -> Result, Error> { - self.tracking_copy - .borrow_mut() - .read(self.correlation_id, key) - .map_err(Into::into) - } - - /// This method is a wrapper over `read_gs` in the sense that it extracts the type held by a - /// `StoredValue` stored in the global state in a type safe manner. - /// - /// This is useful if you want to get the exact type from global state. - pub fn read_gs_typed(&mut self, key: &Key) -> Result - where - T: TryFrom, - T::Error: Debug, - { - let value = match self.read_gs(&key)? { - None => return Err(Error::KeyNotFound(*key)), - Some(value) => value, - }; - - value.try_into().map_err(|error| { - Error::FunctionNotFound(format!( - "Type mismatch for value under {:?}: {:?}", - key, error - )) - }) - } - - pub fn get_keys(&mut self, key_tag: &KeyTag) -> Result, Error> { - self.tracking_copy - .borrow_mut() - .get_keys(self.correlation_id, key_tag) - .map_err(Into::into) - } - - pub fn read_account(&mut self, key: &Key) -> Result, Error> { - if let Key::Account(_) = key { - self.validate_key(key)?; - self.tracking_copy - .borrow_mut() - .read(self.correlation_id, key) - .map_err(Into::into) - } else { - panic!("Do not use this function for reading from non-account keys") - } - } - - pub fn write_account(&mut self, key: Key, account: Account) -> Result<(), Error> { - if let Key::Account(_) = key { - self.validate_key(&key)?; - let account_value = self.account_to_validated_value(account)?; - self.metered_write_gs_unsafe(key, account_value)?; - Ok(()) - } else { - panic!("Do not use this function for writing non-account keys") - } - } - - pub fn write_transfer(&mut self, key: Key, value: Transfer) { - if let Key::Transfer(_) = key { - self.tracking_copy - .borrow_mut() - .write(key, StoredValue::Transfer(value)); - } else { - panic!("Do not use this function for writing non-transfer keys") - } - } - - pub fn write_era_info(&mut self, key: Key, value: EraInfo) { - if let Key::EraInfo(_) = key { - self.tracking_copy - .borrow_mut() - .write(key, StoredValue::EraInfo(value)); - } else { - panic!("Do not use this function for writing non-era-info keys") - } - } - - pub fn store_function( - &mut self, - contract: StoredValue, - ) -> Result<[u8; KEY_HASH_LENGTH], Error> { - self.validate_value(&contract)?; - self.new_uref(contract).map(|uref| uref.addr()) - } - - pub fn store_function_at_hash( - &mut self, - contract: StoredValue, - ) -> Result<[u8; KEY_HASH_LENGTH], Error> { - let new_hash = self.new_hash_address()?; - self.validate_value(&contract)?; - self.metered_write_gs_unsafe(new_hash, contract)?; - Ok(new_hash) - } - - pub fn insert_key(&mut self, name: String, key: Key) { - if let Key::URef(uref) = key { - self.insert_uref(uref); - } - self.named_keys.insert(name, key); - } - - pub fn insert_uref(&mut self, uref: URef) { - let rights = uref.access_rights(); - let entry = self - .access_rights - .entry(uref.addr()) - .or_insert_with(|| std::iter::empty().collect()); - entry.insert(rights); - } - - pub fn effect(&self) -> ExecutionEffect { - self.tracking_copy.borrow_mut().effect() - } - - pub fn transfers(&self) -> &Vec { - &self.transfers - } - - pub fn transfers_mut(&mut self) -> &mut Vec { - &mut self.transfers - } - - /// Validates whether keys used in the `value` are not forged. - fn validate_value(&self, value: &StoredValue) -> Result<(), Error> { - match value { - StoredValue::CLValue(cl_value) => match cl_value.cl_type() { - CLType::Bool - | CLType::I32 - | CLType::I64 - | CLType::U8 - | CLType::U32 - | CLType::U64 - | CLType::U128 - | CLType::U256 - | CLType::U512 - | CLType::Unit - | CLType::String - | CLType::Option(_) - | CLType::List(_) - | CLType::ByteArray(..) - | CLType::Result { .. } - | CLType::Map { .. } - | CLType::Tuple1(_) - | CLType::Tuple3(_) - | CLType::Any - | CLType::PublicKey => Ok(()), - CLType::Key => { - let key: Key = cl_value.to_owned().into_t()?; // TODO: optimize? - self.validate_key(&key) - } - CLType::URef => { - let uref: URef = cl_value.to_owned().into_t()?; // TODO: optimize? - self.validate_uref(&uref) - } - tuple @ CLType::Tuple2(_) if *tuple == casper_types::named_key_type() => { - let (_name, key): (String, Key) = cl_value.to_owned().into_t()?; // TODO: optimize? - self.validate_key(&key) - } - CLType::Tuple2(_) => Ok(()), - }, - StoredValue::Account(account) => { - // This should never happen as accounts can't be created by contracts. - // I am putting this here for the sake of completeness. - account - .named_keys() - .values() - .try_for_each(|key| self.validate_key(key)) - } - StoredValue::ContractWasm(_) => Ok(()), - StoredValue::Contract(contract_header) => contract_header - .named_keys() - .values() - .try_for_each(|key| self.validate_key(key)), - // TODO: anything to validate here? - StoredValue::ContractPackage(_) => Ok(()), - StoredValue::Transfer(_) => Ok(()), - StoredValue::DeployInfo(_) => Ok(()), - StoredValue::EraInfo(_) => Ok(()), - StoredValue::Bid(_) => Ok(()), - StoredValue::Withdraw(_) => Ok(()), - StoredValue::EraValidators(_) => Ok(()), - } - } - - /// Validates whether key is not forged (whether it can be found in the - /// `named_keys`) and whether the version of a key that contract wants - /// to use, has access rights that are less powerful than access rights' - /// of the key in the `named_keys`. - pub fn validate_key(&self, key: &Key) -> Result<(), Error> { - let uref = match key { - Key::URef(uref) => uref, - _ => return Ok(()), - }; - self.validate_uref(uref) - } - - pub fn validate_uref(&self, uref: &URef) -> Result<(), Error> { - if self.account.main_purse().addr() == uref.addr() { - // If passed uref matches account's purse then we have to also validate their - // access rights. - let rights = self.account.main_purse().access_rights(); - let uref_rights = uref.access_rights(); - // Access rights of the passed uref, and the account's purse should match - if rights & uref_rights == uref_rights { - return Ok(()); - } - } - - // Check if the `key` is known - if uref_has_access_rights(uref, &self.access_rights) { - Ok(()) - } else { - Err(Error::ForgedReference(*uref)) - } - } - - pub fn deserialize_keys(&self, bytes: Vec) -> Result, Error> { - let keys: Vec = bytesrepr::deserialize(bytes)?; - keys.iter().try_for_each(|k| self.validate_key(k))?; - Ok(keys) - } - - pub fn deserialize_urefs(&self, bytes: Vec) -> Result, Error> { - let keys: Vec = bytesrepr::deserialize(bytes)?; - keys.iter().try_for_each(|k| self.validate_uref(k))?; - Ok(keys) - } - - fn validate_readable(&self, key: &Key) -> Result<(), Error> { - if self.is_readable(&key) { - Ok(()) - } else { - Err(Error::InvalidAccess { - required: AccessRights::READ, - }) - } - } - - fn validate_addable(&self, key: &Key) -> Result<(), Error> { - if self.is_addable(&key) { - Ok(()) - } else { - Err(Error::InvalidAccess { - required: AccessRights::ADD, - }) - } - } - - fn validate_writeable(&self, key: &Key) -> Result<(), Error> { - if self.is_writeable(&key) { - Ok(()) - } else { - Err(Error::InvalidAccess { - required: AccessRights::WRITE, - }) - } - } - - /// Tests whether reading from the `key` is valid. - pub fn is_readable(&self, key: &Key) -> bool { - match key { - Key::Account(_) => &self.base_key() == key, - Key::Hash(_) => true, - Key::URef(uref) => uref.is_readable(), - Key::Transfer(_) => true, - Key::DeployInfo(_) => true, - Key::EraInfo(_) => true, - Key::Balance(_) => false, - Key::Bid(_) => true, - Key::Withdraw(_) => true, - Key::EraValidators(_) => true, - } - } - - /// Tests whether addition to `key` is valid. - pub fn is_addable(&self, key: &Key) -> bool { - match key { - Key::Account(_) | Key::Hash(_) => &self.base_key() == key, // ??? - Key::URef(uref) => uref.is_addable(), - Key::Transfer(_) => false, - Key::DeployInfo(_) => false, - Key::EraInfo(_) => false, - Key::Balance(_) => false, - Key::Bid(_) => false, - Key::Withdraw(_) => false, - Key::EraValidators(_) => false, - } - } - - /// Tests whether writing to `key` is valid. - pub fn is_writeable(&self, key: &Key) -> bool { - match key { - Key::Account(_) | Key::Hash(_) => false, - Key::URef(uref) => uref.is_writeable(), - Key::Transfer(_) => false, - Key::DeployInfo(_) => false, - Key::EraInfo(_) => false, - Key::Balance(_) => false, - Key::Bid(_) => false, - Key::Withdraw(_) => false, - Key::EraValidators(_) => false, - } - } - - /// Safely charge the specified amount of gas, up to the available gas limit. - /// - /// Returns [`Error::GasLimit`] if gas limit exceeded and `()` if not. - /// Intuition about the return value sense is to answer the question 'are we - /// allowed to continue?' - pub(crate) fn charge_gas(&mut self, amount: Gas) -> Result<(), Error> { - let prev = self.gas_counter(); - let gas_limit = self.gas_limit(); - // gas charge overflow protection - match prev.checked_add(amount) { - None => { - self.set_gas_counter(gas_limit); - Err(Error::GasLimit) - } - Some(val) if val > gas_limit => { - self.set_gas_counter(gas_limit); - Err(Error::GasLimit) - } - Some(val) => { - self.set_gas_counter(val); - Ok(()) - } - } - } - - /// Checks if we are calling a system contract. - pub(crate) fn is_system_contract(&self) -> bool { - if let Some(hash) = self.base_key().into_hash() { - let system_contracts = self.protocol_data().system_contracts(); - if system_contracts.contains(&hash.into()) { - return true; - } - } - false - } - - /// Charges gas for specified amount of bytes used. - fn charge_gas_storage(&mut self, bytes_count: usize) -> Result<(), Error> { - if self.is_system_contract() { - // Don't charge storage used while executing a system contract. - return Ok(()); - } - - let storage_costs = self.protocol_data().wasm_config().storage_costs(); - - let gas_cost = storage_costs.calculate_gas_cost(bytes_count); - - self.charge_gas(gas_cost) - } - - /// Charges gas for using a host system contract's entrypoint. - pub(crate) fn charge_system_contract_call(&mut self, call_cost: T) -> Result<(), Error> - where - T: Into, - { - if self.account.account_hash() == PublicKey::System.to_account_hash() { - // Don't try to charge a system account for calling a system contract's entry point. - // This will make sure that (for example) calling a mint's transfer from within auction - // wouldn't try to incur cost to system account. - return Ok(()); - } - let amount: Gas = call_cost.into(); - self.charge_gas(amount) - } - - /// Writes data to global state with a measurement - pub(crate) fn metered_write_gs_unsafe(&mut self, key: K, value: V) -> Result<(), Error> - where - K: Into, - V: Into, - { - let stored_value = value.into(); - - // Charge for amount as measured by serialized length - let bytes_count = stored_value.serialized_length(); - self.charge_gas_storage(bytes_count)?; - - self.tracking_copy - .borrow_mut() - .write(key.into(), stored_value); - Ok(()) - } - - pub fn metered_write_gs(&mut self, key: Key, value: T) -> Result<(), Error> - where - T: Into, - { - let stored_value = value.into(); - self.validate_writeable(&key)?; - self.validate_key(&key)?; - self.validate_value(&stored_value)?; - self.metered_write_gs_unsafe(key, stored_value)?; - Ok(()) - } - - pub(crate) fn metered_add_gs_unsafe( - &mut self, - key: Key, - value: StoredValue, - ) -> Result<(), Error> { - let value_bytes_count = value.serialized_length(); - self.charge_gas_storage(value_bytes_count)?; - - match self - .tracking_copy - .borrow_mut() - .add(self.correlation_id, key, value) - { - Err(storage_error) => Err(storage_error.into()), - Ok(AddResult::Success) => Ok(()), - Ok(AddResult::KeyNotFound(key)) => Err(Error::KeyNotFound(key)), - Ok(AddResult::TypeMismatch(type_mismatch)) => Err(Error::TypeMismatch(type_mismatch)), - Ok(AddResult::Serialization(error)) => Err(Error::BytesRepr(error)), - } - } - - /// Adds `value` to the `key`. The premise for being able to `add` value is - /// that the type of it value can be added (is a Monoid). If the - /// values can't be added, either because they're not a Monoid or if the - /// value stored under `key` has different type, then `TypeMismatch` - /// errors is returned. - pub(crate) fn metered_add_gs(&mut self, key: K, value: V) -> Result<(), Error> - where - K: Into, - V: Into, - { - let key = key.into(); - let value = value.into(); - self.validate_addable(&key)?; - self.validate_key(&key)?; - self.validate_value(&value)?; - self.metered_add_gs_unsafe(key, value) - } - - pub fn add_associated_key( - &mut self, - account_hash: AccountHash, - weight: Weight, - ) -> Result<(), Error> { - // Check permission to modify associated keys - if !self.is_valid_context() { - // Exit early with error to avoid mutations - return Err(AddKeyFailure::PermissionDenied.into()); - } - - if !self - .account() - .can_manage_keys_with(&self.authorization_keys) - { - // Exit early if authorization keys weight doesn't exceed required - // key management threshold - return Err(AddKeyFailure::PermissionDenied.into()); - } - - // Converts an account's public key into a URef - let key = Key::Account(self.account().account_hash()); - - // Take an account out of the global state - let account = { - let mut account: Account = self.read_gs_typed(&key)?; - // Exit early in case of error without updating global state - account - .add_associated_key(account_hash, weight) - .map_err(Error::from)?; - account - }; - - let account_value = self.account_to_validated_value(account)?; - - self.metered_write_gs_unsafe(key, account_value)?; - - Ok(()) - } - - pub fn remove_associated_key(&mut self, account_hash: AccountHash) -> Result<(), Error> { - // Check permission to modify associated keys - if !self.is_valid_context() { - // Exit early with error to avoid mutations - return Err(RemoveKeyFailure::PermissionDenied.into()); - } - - if !self - .account() - .can_manage_keys_with(&self.authorization_keys) - { - // Exit early if authorization keys weight doesn't exceed required - // key management threshold - return Err(RemoveKeyFailure::PermissionDenied.into()); - } - - // Converts an account's public key into a URef - let key = Key::Account(self.account().account_hash()); - - // Take an account out of the global state - let mut account: Account = self.read_gs_typed(&key)?; - - // Exit early in case of error without updating global state - account - .remove_associated_key(account_hash) - .map_err(Error::from)?; - - let account_value = self.account_to_validated_value(account)?; - - self.metered_write_gs_unsafe(key, account_value)?; - - Ok(()) - } - - pub fn update_associated_key( - &mut self, - account_hash: AccountHash, - weight: Weight, - ) -> Result<(), Error> { - // Check permission to modify associated keys - if !self.is_valid_context() { - // Exit early with error to avoid mutations - return Err(UpdateKeyFailure::PermissionDenied.into()); - } - - if !self - .account() - .can_manage_keys_with(&self.authorization_keys) - { - // Exit early if authorization keys weight doesn't exceed required - // key management threshold - return Err(UpdateKeyFailure::PermissionDenied.into()); - } - - // Converts an account's public key into a URef - let key = Key::Account(self.account().account_hash()); - - // Take an account out of the global state - let mut account: Account = self.read_gs_typed(&key)?; - - // Exit early in case of error without updating global state - account - .update_associated_key(account_hash, weight) - .map_err(Error::from)?; - - let account_value = self.account_to_validated_value(account)?; - - self.metered_write_gs_unsafe(key, account_value)?; - - Ok(()) - } - - pub fn set_action_threshold( - &mut self, - action_type: ActionType, - threshold: Weight, - ) -> Result<(), Error> { - // Check permission to modify associated keys - if !self.is_valid_context() { - // Exit early with error to avoid mutations - return Err(SetThresholdFailure::PermissionDeniedError.into()); - } - - if !self - .account() - .can_manage_keys_with(&self.authorization_keys) - { - // Exit early if authorization keys weight doesn't exceed required - // key management threshold - return Err(SetThresholdFailure::PermissionDeniedError.into()); - } - - // Converts an account's public key into a URef - let key = Key::Account(self.account().account_hash()); - - // Take an account out of the global state - let mut account: Account = self.read_gs_typed(&key)?; - - // Exit early in case of error without updating global state - account - .set_action_threshold(action_type, threshold) - .map_err(Error::from)?; - - let account_value = self.account_to_validated_value(account)?; - - self.metered_write_gs_unsafe(key, account_value)?; - - Ok(()) - } - - pub fn protocol_data(&self) -> &ProtocolData { - &self.protocol_data - } - - /// Creates validated instance of `StoredValue` from `account`. - fn account_to_validated_value(&self, account: Account) -> Result { - let value = StoredValue::Account(account); - self.validate_value(&value)?; - Ok(value) - } - - /// Checks if the account context is valid. - fn is_valid_context(&self) -> bool { - self.base_key() == Key::Account(self.account().account_hash()) - } - - /// Gets main purse id - pub fn get_main_purse(&self) -> Result { - if !self.is_valid_context() { - return Err(Error::InvalidContext); - } - Ok(self.account().main_purse()) - } - - /// Gets entry point type. - pub fn entry_point_type(&self) -> EntryPointType { - self.entry_point_type - } - - /// Gets given contract package with its access_key validated against current context. - pub(crate) fn get_validated_contract_package( - &mut self, - package_hash: ContractPackageHash, - ) -> Result { - let package_hash_key = Key::from(package_hash); - self.validate_key(&package_hash_key)?; - let contract_package: ContractPackage = self.read_gs_typed(&Key::from(package_hash))?; - self.validate_uref(&contract_package.access_key())?; - Ok(contract_package) - } -} diff --git a/execution_engine/src/core/runtime_context/tests.rs b/execution_engine/src/core/runtime_context/tests.rs deleted file mode 100644 index 733ac7bf9a..0000000000 --- a/execution_engine/src/core/runtime_context/tests.rs +++ /dev/null @@ -1,921 +0,0 @@ -use std::{ - cell::RefCell, - collections::{BTreeSet, HashMap, HashSet}, - iter::{self, FromIterator}, - rc::Rc, -}; - -use once_cell::sync::Lazy; -use rand::RngCore; - -use casper_types::{ - account::{ - AccountHash, ActionType, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, Weight, - }, - bytesrepr::ToBytes, - contracts::NamedKeys, - AccessRights, BlockTime, CLValue, Contract, DeployHash, EntryPointType, EntryPoints, Key, - Phase, ProtocolVersion, RuntimeArgs, URef, KEY_HASH_LENGTH, U512, -}; - -use super::{Address, Error, RuntimeContext}; -use crate::{ - core::{ - execution::AddressGenerator, runtime::extract_access_rights_from_keys, - tracking_copy::TrackingCopy, - }, - shared::{ - account::{Account, AssociatedKeys}, - additive_map::AdditiveMap, - gas::Gas, - newtypes::CorrelationId, - stored_value::StoredValue, - transform::Transform, - }, - storage::{ - global_state::{ - in_memory::{InMemoryGlobalState, InMemoryGlobalStateView}, - CommitResult, StateProvider, - }, - protocol_data::ProtocolData, - }, -}; - -const DEPLOY_HASH: [u8; 32] = [1u8; 32]; -const PHASE: Phase = Phase::Session; -const GAS_LIMIT: u64 = 500_000_000_000_000u64; - -static TEST_PROTOCOL_DATA: Lazy = Lazy::new(ProtocolData::default); - -fn mock_tracking_copy( - init_key: Key, - init_account: Account, -) -> TrackingCopy { - let correlation_id = CorrelationId::new(); - let hist = InMemoryGlobalState::empty().unwrap(); - let root_hash = hist.empty_root_hash; - let transform = Transform::Write(StoredValue::Account(init_account)); - - let mut m = AdditiveMap::new(); - m.insert(init_key, transform); - let commit_result = hist - .commit(correlation_id, root_hash, m) - .expect("Creation of mocked account should be a success."); - - let new_hash = match commit_result { - CommitResult::Success { state_root, .. } => state_root, - other => panic!("Commiting changes to test History failed: {:?}.", other), - }; - - let reader = hist - .checkout(new_hash) - .expect("Checkout should not throw errors.") - .expect("Root hash should exist."); - - TrackingCopy::new(reader) -} - -fn mock_account_with_purse(account_hash: AccountHash, purse: [u8; 32]) -> (Key, Account) { - let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); - let account = Account::new( - account_hash, - NamedKeys::new(), - URef::new(purse, AccessRights::READ_ADD_WRITE), - associated_keys, - Default::default(), - ); - let key = Key::Account(account_hash); - - (key, account) -} - -fn mock_account(account_hash: AccountHash) -> (Key, Account) { - mock_account_with_purse(account_hash, [0; 32]) -} - -// create random account key. -fn random_account_key(entropy_source: &mut G) -> Key { - let mut key = [0u8; 32]; - entropy_source.fill_bytes(&mut key); - Key::Account(AccountHash::new(key)) -} - -// create random contract key. -fn random_contract_key(entropy_source: &mut G) -> Key { - let mut key = [0u8; 32]; - entropy_source.fill_bytes(&mut key); - Key::Hash(key) -} - -// Create URef Key. -fn create_uref(address_generator: &mut AddressGenerator, rights: AccessRights) -> Key { - let address = address_generator.create_address(); - Key::URef(URef::new(address, rights)) -} - -fn random_hash(entropy_source: &mut G) -> Key { - let mut key = [0u8; KEY_HASH_LENGTH]; - entropy_source.fill_bytes(&mut key); - Key::Hash(key) -} - -fn mock_runtime_context<'a>( - account: &'a Account, - base_key: Key, - named_keys: &'a mut NamedKeys, - access_rights: HashMap>, - hash_address_generator: AddressGenerator, - uref_address_generator: AddressGenerator, - transfer_address_generator: AddressGenerator, -) -> RuntimeContext<'a, InMemoryGlobalStateView> { - let tracking_copy = mock_tracking_copy(base_key, account.clone()); - RuntimeContext::new( - Rc::new(RefCell::new(tracking_copy)), - EntryPointType::Session, - named_keys, - access_rights, - RuntimeArgs::new(), - BTreeSet::from_iter(vec![AccountHash::new([0; 32])]), - &account, - base_key, - BlockTime::new(0), - DeployHash::new([1u8; 32]), - Gas::new(U512::from(GAS_LIMIT)), - Gas::default(), - Rc::new(RefCell::new(hash_address_generator)), - Rc::new(RefCell::new(uref_address_generator)), - Rc::new(RefCell::new(transfer_address_generator)), - ProtocolVersion::V1_0_0, - CorrelationId::new(), - Phase::Session, - *TEST_PROTOCOL_DATA, - Vec::default(), - ) -} - -#[allow(clippy::assertions_on_constants)] -fn assert_forged_reference(result: Result) { - match result { - Err(Error::ForgedReference(_)) => assert!(true), - _ => panic!("Error. Test should have failed with ForgedReference error but didn't."), - } -} - -#[allow(clippy::assertions_on_constants)] -fn assert_invalid_access(result: Result, expecting: AccessRights) { - match result { - Err(Error::InvalidAccess { required }) if required == expecting => assert!(true), - other => panic!( - "Error. Test should have failed with InvalidAccess error but didn't: {:?}.", - other - ), - } -} - -fn test(access_rights: HashMap>, query: F) -> Result -where - F: FnOnce(RuntimeContext) -> Result, -{ - let deploy_hash = [1u8; 32]; - let (base_key, account) = mock_account(AccountHash::new([0u8; 32])); - - let mut named_keys = NamedKeys::new(); - let uref_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let hash_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let transfer_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let runtime_context = mock_runtime_context( - &account, - base_key, - &mut named_keys, - access_rights, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - ); - query(runtime_context) -} - -#[test] -fn use_uref_valid() { - // Test fixture - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref = create_uref(&mut rng, AccessRights::READ_WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref]); - // Use uref as the key to perform an action on the global state. - // This should succeed because the uref is valid. - let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); - let query_result = test(access_rights, |mut rc| rc.metered_write_gs(uref, value)); - query_result.expect("writing using valid uref should succeed"); -} - -#[test] -fn use_uref_forged() { - // Test fixture - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref = create_uref(&mut rng, AccessRights::READ_WRITE); - let access_rights = HashMap::new(); - let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); - let query_result = test(access_rights, |mut rc| rc.metered_write_gs(uref, value)); - - assert_forged_reference(query_result); -} - -#[test] -fn account_key_not_writeable() { - let mut rng = rand::thread_rng(); - let acc_key = random_account_key(&mut rng); - let query_result = test(HashMap::new(), |mut rc| { - rc.metered_write_gs( - acc_key, - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ) - }); - assert_invalid_access(query_result, AccessRights::WRITE); -} - -#[test] -fn account_key_readable_valid() { - // Account key is readable if it is a "base" key - current context of the - // execution. - let query_result = test(HashMap::new(), |mut rc| { - let base_key = rc.base_key(); - - let result = rc - .read_gs(&base_key) - .expect("Account key is readable.") - .expect("Account is found in GS."); - - assert_eq!(result, StoredValue::Account(rc.account().clone())); - Ok(()) - }); - - assert!(query_result.is_ok()); -} - -#[test] -fn account_key_readable_invalid() { - // Account key is NOT readable if it is different than the "base" key. - let mut rng = rand::thread_rng(); - let other_acc_key = random_account_key(&mut rng); - - let query_result = test(HashMap::new(), |mut rc| rc.read_gs(&other_acc_key)); - - assert_invalid_access(query_result, AccessRights::READ); -} - -#[test] -fn account_key_addable_valid() { - // Account key is addable if it is a "base" key - current context of the - // execution. - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref = create_uref(&mut rng, AccessRights::READ); - let access_rights = extract_access_rights_from_keys(vec![uref]); - let query_result = test(access_rights, |mut rc| { - let base_key = rc.base_key(); - let uref_name = "NewURef".to_owned(); - let named_key = StoredValue::CLValue(CLValue::from_t((uref_name.clone(), uref)).unwrap()); - - rc.metered_add_gs(base_key, named_key) - .expect("Adding should work."); - - let named_key_transform = Transform::AddKeys(iter::once((uref_name, uref)).collect()); - - assert_eq!( - *rc.effect().transforms.get(&base_key).unwrap(), - named_key_transform - ); - - Ok(()) - }); - - assert!(query_result.is_ok()); -} - -#[test] -fn account_key_addable_invalid() { - // Account key is NOT addable if it is a "base" key - current context of the - // execution. - let mut rng = rand::thread_rng(); - let other_acc_key = random_account_key(&mut rng); - - let query_result = test(HashMap::new(), |mut rc| { - rc.metered_add_gs( - other_acc_key, - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ) - }); - - assert_invalid_access(query_result, AccessRights::ADD); -} - -#[test] -fn contract_key_readable_valid() { - // Account key is readable if it is a "base" key - current context of the - // execution. - let mut rng = rand::thread_rng(); - let contract_key = random_contract_key(&mut rng); - let query_result = test(HashMap::new(), |mut rc| rc.read_gs(&contract_key)); - - assert!(query_result.is_ok()); -} - -#[test] -fn contract_key_not_writeable() { - // Account key is readable if it is a "base" key - current context of the - // execution. - let mut rng = rand::thread_rng(); - let contract_key = random_contract_key(&mut rng); - let query_result = test(HashMap::new(), |mut rc| { - rc.metered_write_gs( - contract_key, - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ) - }); - - assert_invalid_access(query_result, AccessRights::WRITE); -} - -#[test] -fn contract_key_addable_valid() { - // Contract key is addable if it is a "base" key - current context of the execution. - let account_hash = AccountHash::new([0u8; 32]); - let (account_key, account) = mock_account(account_hash); - let authorization_keys = BTreeSet::from_iter(vec![account_hash]); - let hash_address_generator = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let mut uref_address_generator = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let transfer_address_generator = AddressGenerator::new(&DEPLOY_HASH, PHASE); - - let mut rng = rand::thread_rng(); - let contract_key = random_contract_key(&mut rng); - let contract = StoredValue::Contract(Contract::default()); - - let tracking_copy = Rc::new(RefCell::new(mock_tracking_copy( - account_key, - account.clone(), - ))); - tracking_copy.borrow_mut().write(contract_key, contract); - - let mut named_keys = NamedKeys::new(); - let uref = create_uref(&mut uref_address_generator, AccessRights::WRITE); - let uref_name = "NewURef".to_owned(); - let named_uref_tuple = - StoredValue::CLValue(CLValue::from_t((uref_name.clone(), uref)).unwrap()); - - let access_rights = extract_access_rights_from_keys(vec![uref]); - - let mut runtime_context = RuntimeContext::new( - Rc::clone(&tracking_copy), - EntryPointType::Session, - &mut named_keys, - access_rights, - RuntimeArgs::new(), - authorization_keys, - &account, - contract_key, - BlockTime::new(0), - DeployHash::new(DEPLOY_HASH), - Gas::new(U512::from(GAS_LIMIT)), - Gas::default(), - Rc::new(RefCell::new(hash_address_generator)), - Rc::new(RefCell::new(uref_address_generator)), - Rc::new(RefCell::new(transfer_address_generator)), - ProtocolVersion::V1_0_0, - CorrelationId::new(), - PHASE, - Default::default(), - Vec::default(), - ); - - runtime_context - .metered_add_gs(contract_key, named_uref_tuple) - .expect("Adding should work."); - - let updated_contract = StoredValue::Contract(Contract::new( - [0u8; 32].into(), - [0u8; 32].into(), - iter::once((uref_name, uref)).collect(), - EntryPoints::default(), - ProtocolVersion::V1_0_0, - )); - - assert_eq!( - *tracking_copy - .borrow() - .effect() - .transforms - .get(&contract_key) - .unwrap(), - Transform::Write(updated_contract) - ); -} - -#[test] -fn contract_key_addable_invalid() { - let account_hash = AccountHash::new([0u8; 32]); - let (account_key, account) = mock_account(account_hash); - let authorization_keys = BTreeSet::from_iter(vec![account_hash]); - let hash_address_generator = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let mut uref_address_generator = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let transfer_address_generator = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let mut rng = rand::thread_rng(); - let contract_key = random_contract_key(&mut rng); - - let other_contract_key = random_contract_key(&mut rng); - let contract = StoredValue::Contract(Contract::default()); - let tracking_copy = Rc::new(RefCell::new(mock_tracking_copy( - account_key, - account.clone(), - ))); - - tracking_copy.borrow_mut().write(contract_key, contract); - - let mut named_keys = NamedKeys::new(); - - let uref = create_uref(&mut uref_address_generator, AccessRights::WRITE); - let uref_name = "NewURef".to_owned(); - let named_uref_tuple = StoredValue::CLValue(CLValue::from_t((uref_name, uref)).unwrap()); - - let access_rights = extract_access_rights_from_keys(vec![uref]); - let mut runtime_context = RuntimeContext::new( - Rc::clone(&tracking_copy), - EntryPointType::Session, - &mut named_keys, - access_rights, - RuntimeArgs::new(), - authorization_keys, - &account, - other_contract_key, - BlockTime::new(0), - DeployHash::new(DEPLOY_HASH), - Gas::default(), - Gas::default(), - Rc::new(RefCell::new(hash_address_generator)), - Rc::new(RefCell::new(uref_address_generator)), - Rc::new(RefCell::new(transfer_address_generator)), - ProtocolVersion::V1_0_0, - CorrelationId::new(), - PHASE, - Default::default(), - Vec::default(), - ); - - let result = runtime_context.metered_add_gs(contract_key, named_uref_tuple); - - assert_invalid_access(result, AccessRights::ADD); -} - -#[test] -fn uref_key_readable_valid() { - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref_key = create_uref(&mut rng, AccessRights::READ); - let access_rights = extract_access_rights_from_keys(vec![uref_key]); - let query_result = test(access_rights, |mut rc| rc.read_gs(&uref_key)); - assert!(query_result.is_ok()); -} - -#[test] -fn uref_key_readable_invalid() { - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref_key = create_uref(&mut rng, AccessRights::WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref_key]); - let query_result = test(access_rights, |mut rc| rc.read_gs(&uref_key)); - assert_invalid_access(query_result, AccessRights::READ); -} - -#[test] -fn uref_key_writeable_valid() { - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref_key = create_uref(&mut rng, AccessRights::WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref_key]); - let query_result = test(access_rights, |mut rc| { - rc.metered_write_gs( - uref_key, - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ) - }); - assert!(query_result.is_ok()); -} - -#[test] -fn uref_key_writeable_invalid() { - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref_key = create_uref(&mut rng, AccessRights::READ); - let access_rights = extract_access_rights_from_keys(vec![uref_key]); - let query_result = test(access_rights, |mut rc| { - rc.metered_write_gs( - uref_key, - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ) - }); - assert_invalid_access(query_result, AccessRights::WRITE); -} - -#[test] -fn uref_key_addable_valid() { - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref_key = create_uref(&mut rng, AccessRights::ADD_WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref_key]); - let query_result = test(access_rights, |mut rc| { - rc.metered_write_gs(uref_key, CLValue::from_t(10_i32).unwrap()) - .expect("Writing to the GlobalState should work."); - rc.metered_add_gs(uref_key, CLValue::from_t(1_i32).unwrap()) - }); - assert!(query_result.is_ok()); -} - -#[test] -fn uref_key_addable_invalid() { - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref_key = create_uref(&mut rng, AccessRights::WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref_key]); - let query_result = test(access_rights, |mut rc| { - rc.metered_add_gs( - uref_key, - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ) - }); - assert_invalid_access(query_result, AccessRights::ADD); -} - -#[test] -fn hash_key_readable() { - // values under hash's are universally readable - let query = |runtime_context: RuntimeContext| { - let mut rng = rand::thread_rng(); - let key = random_hash(&mut rng); - runtime_context.validate_readable(&key) - }; - let query_result = test(HashMap::new(), query); - assert!(query_result.is_ok()) -} - -#[test] -fn hash_key_writeable() { - // values under hash's are immutable - let query = |runtime_context: RuntimeContext| { - let mut rng = rand::thread_rng(); - let key = random_hash(&mut rng); - runtime_context.validate_writeable(&key) - }; - let query_result = test(HashMap::new(), query); - assert!(query_result.is_err()) -} - -#[test] -fn hash_key_addable_invalid() { - // values under hashes are immutable - let query = |runtime_context: RuntimeContext| { - let mut rng = rand::thread_rng(); - let key = random_hash(&mut rng); - runtime_context.validate_addable(&key) - }; - let query_result = test(HashMap::new(), query); - assert!(query_result.is_err()) -} - -#[test] -fn manage_associated_keys() { - // Testing a valid case only - successfuly added a key, and successfuly removed, - // making sure `account_dirty` mutated - let access_rights = HashMap::new(); - let query = |mut runtime_context: RuntimeContext| { - let account_hash = AccountHash::new([42; 32]); - let weight = Weight::new(155); - - // Add a key (this doesn't check for all invariants as `add_key` - // is already tested in different place) - runtime_context - .add_associated_key(account_hash, weight) - .expect("Unable to add key"); - - let effect = runtime_context.effect(); - let transform = effect.transforms.get(&runtime_context.base_key()).unwrap(); - let account = match transform { - Transform::Write(StoredValue::Account(account)) => account, - _ => panic!("Invalid transform operation found"), - }; - account - .get_associated_key_weight(account_hash) - .expect("Account hash wasn't added to associated keys"); - - let new_weight = Weight::new(100); - runtime_context - .update_associated_key(account_hash, new_weight) - .expect("Unable to update key"); - - let effect = runtime_context.effect(); - let transform = effect.transforms.get(&runtime_context.base_key()).unwrap(); - let account = match transform { - Transform::Write(StoredValue::Account(account)) => account, - _ => panic!("Invalid transform operation found"), - }; - let value = account - .get_associated_key_weight(account_hash) - .expect("Account hash wasn't added to associated keys"); - - assert_eq!(value, &new_weight, "value was not updated"); - - // Remove a key that was already added - runtime_context - .remove_associated_key(account_hash) - .expect("Unable to remove key"); - - // Verify - let effect = runtime_context.effect(); - let transform = effect.transforms.get(&runtime_context.base_key()).unwrap(); - let account = match transform { - Transform::Write(StoredValue::Account(account)) => account, - _ => panic!("Invalid transform operation found"), - }; - - assert!(account.get_associated_key_weight(account_hash).is_none()); - - // Remove a key that was already removed - runtime_context - .remove_associated_key(account_hash) - .expect_err("A non existing key was unexpectedly removed again"); - - Ok(()) - }; - let _ = test(access_rights, query); -} - -#[test] -fn action_thresholds_management() { - // Testing a valid case only - successfuly added a key, and successfuly removed, - // making sure `account_dirty` mutated - let access_rights = HashMap::new(); - let query = |mut runtime_context: RuntimeContext| { - runtime_context - .add_associated_key(AccountHash::new([42; 32]), Weight::new(254)) - .expect("Unable to add associated key with maximum weight"); - runtime_context - .set_action_threshold(ActionType::KeyManagement, Weight::new(253)) - .expect("Unable to set action threshold KeyManagement"); - runtime_context - .set_action_threshold(ActionType::Deployment, Weight::new(252)) - .expect("Unable to set action threshold Deployment"); - - let effect = runtime_context.effect(); - let transform = effect.transforms.get(&runtime_context.base_key()).unwrap(); - let mutated_account = match transform { - Transform::Write(StoredValue::Account(account)) => account, - _ => panic!("Invalid transform operation found"), - }; - - assert_eq!( - mutated_account.action_thresholds().deployment(), - &Weight::new(252) - ); - assert_eq!( - mutated_account.action_thresholds().key_management(), - &Weight::new(253) - ); - - runtime_context - .set_action_threshold(ActionType::Deployment, Weight::new(255)) - .expect_err("Shouldn't be able to set deployment threshold higher than key management"); - - Ok(()) - }; - let _ = test(access_rights, query); -} - -#[test] -fn should_verify_ownership_before_adding_key() { - // Testing a valid case only - successfuly added a key, and successfuly removed, - // making sure `account_dirty` mutated - let access_rights = HashMap::new(); - let query = |mut runtime_context: RuntimeContext| { - // Overwrites a `base_key` to a different one before doing any operation as - // account `[0; 32]` - runtime_context.base_key = Key::Hash([1; 32]); - - let err = runtime_context - .add_associated_key(AccountHash::new([84; 32]), Weight::new(123)) - .expect_err("This operation should return error"); - - match err { - Error::AddKeyFailure(AddKeyFailure::PermissionDenied) => {} - e => panic!("Invalid error variant: {:?}", e), - } - - Ok(()) - }; - let _ = test(access_rights, query); -} - -#[test] -fn should_verify_ownership_before_removing_a_key() { - // Testing a valid case only - successfuly added a key, and successfuly removed, - // making sure `account_dirty` mutated - let access_rights = HashMap::new(); - let query = |mut runtime_context: RuntimeContext| { - // Overwrites a `base_key` to a different one before doing any operation as - // account `[0; 32]` - runtime_context.base_key = Key::Hash([1; 32]); - - let err = runtime_context - .remove_associated_key(AccountHash::new([84; 32])) - .expect_err("This operation should return error"); - - match err { - Error::RemoveKeyFailure(RemoveKeyFailure::PermissionDenied) => {} - ref e => panic!("Invalid error variant: {:?}", e), - } - - Ok(()) - }; - let _ = test(access_rights, query); -} - -#[test] -fn should_verify_ownership_before_setting_action_threshold() { - // Testing a valid case only - successfuly added a key, and successfuly removed, - // making sure `account_dirty` mutated - let access_rights = HashMap::new(); - let query = |mut runtime_context: RuntimeContext| { - // Overwrites a `base_key` to a different one before doing any operation as - // account `[0; 32]` - runtime_context.base_key = Key::Hash([1; 32]); - - let err = runtime_context - .set_action_threshold(ActionType::Deployment, Weight::new(123)) - .expect_err("This operation should return error"); - - match err { - Error::SetThresholdFailure(SetThresholdFailure::PermissionDeniedError) => {} - ref e => panic!("Invalid error variant: {:?}", e), - } - - Ok(()) - }; - let _ = test(access_rights, query); -} - -#[test] -fn can_roundtrip_key_value_pairs() { - let access_rights = HashMap::new(); - let query = |mut runtime_context: RuntimeContext| { - let deploy_hash = [1u8; 32]; - let mut uref_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let test_uref = create_uref(&mut uref_address_generator, AccessRights::default()) - .as_uref() - .cloned() - .unwrap(); - let test_value = CLValue::from_t("test_value".to_string()).unwrap(); - - runtime_context - .write_purse_uref(test_uref.to_owned(), test_value.clone()) - .expect("should write_ls"); - - let result = runtime_context - .read_purse_uref(&test_uref) - .expect("should read_ls"); - - Ok(result == Some(test_value)) - }; - let query_result = test(access_rights, query).expect("should be ok"); - assert!(query_result) -} - -#[test] -fn remove_uref_works() { - // Test that `remove_uref` removes Key from both ephemeral representation - // which is one of the current RuntimeContext, and also puts that change - // into the `TrackingCopy` so that it's later committed to the GlobalState. - - let access_rights = HashMap::new(); - let deploy_hash = [1u8; 32]; - let (base_key, account) = mock_account(AccountHash::new([0u8; 32])); - let hash_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let mut uref_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let transfer_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let uref_name = "Foo".to_owned(); - let uref_key = create_uref(&mut uref_address_generator, AccessRights::READ); - let mut named_keys = iter::once((uref_name.clone(), uref_key)).collect(); - let mut runtime_context = mock_runtime_context( - &account, - base_key, - &mut named_keys, - access_rights, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - ); - - assert!(runtime_context.named_keys_contains_key(&uref_name)); - assert!(runtime_context.remove_key(&uref_name).is_ok()); - assert!(runtime_context.validate_key(&uref_key).is_err()); - assert!(!runtime_context.named_keys_contains_key(&uref_name)); - let effects = runtime_context.effect(); - let transform = effects.transforms.get(&base_key).unwrap(); - let account = match transform { - Transform::Write(StoredValue::Account(account)) => account, - _ => panic!("Invalid transform operation found"), - }; - assert!(!account.named_keys().contains_key(&uref_name)); -} - -#[test] -fn validate_valid_purse_of_an_account() { - // Tests that URef which matches a purse of a given context gets validated - let mock_purse = [42u8; 32]; - let access_rights = HashMap::new(); - let deploy_hash = [1u8; 32]; - let (base_key, account) = mock_account_with_purse(AccountHash::new([0u8; 32]), mock_purse); - let mut named_keys = NamedKeys::new(); - let hash_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let uref_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let transfer_address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); - let runtime_context = mock_runtime_context( - &account, - base_key, - &mut named_keys, - access_rights, - hash_address_generator, - uref_address_generator, - transfer_address_generator, - ); - - // URef that has the same id as purse of an account gets validated - // successfully. - let purse = URef::new(mock_purse, AccessRights::READ_ADD_WRITE); - assert!(runtime_context.validate_uref(&purse).is_ok()); - - // URef that has the same id as purse of an account gets validated - // successfully as the passed purse has only subset of the privileges - let purse = URef::new(mock_purse, AccessRights::READ); - assert!(runtime_context.validate_uref(&purse).is_ok()); - let purse = URef::new(mock_purse, AccessRights::ADD); - assert!(runtime_context.validate_uref(&purse).is_ok()); - let purse = URef::new(mock_purse, AccessRights::WRITE); - assert!(runtime_context.validate_uref(&purse).is_ok()); - - // Purse ID that doesn't match account's purse should fail as it's also not - // in known urefs. - let purse = URef::new([53; 32], AccessRights::READ_ADD_WRITE); - assert!(runtime_context.validate_uref(&purse).is_err()); -} - -#[test] -fn should_meter_for_gas_storage_write() { - // Test fixture - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref = create_uref(&mut rng, AccessRights::READ_WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref]); - let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); - let expected_write_cost = TEST_PROTOCOL_DATA - .wasm_config() - .storage_costs() - .calculate_gas_cost(value.serialized_length()); - - let (gas_usage_before, gas_usage_after) = test(access_rights, |mut rc| { - let gas_before = rc.gas_counter(); - rc.metered_write_gs(uref, value).expect("should write"); - let gas_after = rc.gas_counter(); - Ok((gas_before, gas_after)) - }) - .expect("should run test"); - - assert!( - gas_usage_after > gas_usage_before, - "{} <= {}", - gas_usage_after, - gas_usage_before - ); - - assert_eq!(gas_usage_after, gas_usage_before + expected_write_cost); -} - -#[test] -fn should_meter_for_gas_storage_add() { - // Test fixture - let mut rng = AddressGenerator::new(&DEPLOY_HASH, PHASE); - let uref = create_uref(&mut rng, AccessRights::ADD_WRITE); - let access_rights = extract_access_rights_from_keys(vec![uref]); - let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); - let expected_add_cost = TEST_PROTOCOL_DATA - .wasm_config() - .storage_costs() - .calculate_gas_cost(value.serialized_length()); - - let (gas_usage_before, gas_usage_after) = test(access_rights, |mut rc| { - rc.metered_write_gs(uref, value.clone()) - .expect("should write"); - let gas_before = rc.gas_counter(); - rc.metered_add_gs(uref, value).expect("should add"); - let gas_after = rc.gas_counter(); - Ok((gas_before, gas_after)) - }) - .expect("should run test"); - - assert!( - gas_usage_after > gas_usage_before, - "{} <= {}", - gas_usage_after, - gas_usage_before - ); - - assert_eq!(gas_usage_after, gas_usage_before + expected_add_cost); -} diff --git a/execution_engine/src/core/tracking_copy/byte_size.rs b/execution_engine/src/core/tracking_copy/byte_size.rs deleted file mode 100644 index 64cb0bdd1b..0000000000 --- a/execution_engine/src/core/tracking_copy/byte_size.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::{collections::BTreeMap, mem}; - -use casper_types::{bytesrepr::ToBytes, ContractWasm, Key}; - -use crate::shared::{account::Account, stored_value::StoredValue}; - -/// Returns byte size of the element - both heap size and stack size. -pub trait ByteSize { - fn byte_size(&self) -> usize; -} - -impl ByteSize for Key { - fn byte_size(&self) -> usize { - mem::size_of::() + self.heap_size() - } -} - -impl ByteSize for String { - fn byte_size(&self) -> usize { - mem::size_of::() + self.heap_size() - } -} - -impl ByteSize for BTreeMap { - fn byte_size(&self) -> usize { - mem::size_of::>() - + self.heap_size() - + self.len() * (mem::size_of::() + mem::size_of::()) - } -} - -impl ByteSize for StoredValue { - fn byte_size(&self) -> usize { - mem::size_of::() - + match self { - StoredValue::CLValue(cl_value) => cl_value.serialized_length(), - StoredValue::Account(account) => account.serialized_length(), - StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(), - StoredValue::Contract(contract_header) => contract_header.serialized_length(), - StoredValue::ContractPackage(contract_package) => { - contract_package.serialized_length() - } - StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(), - StoredValue::Transfer(transfer) => transfer.serialized_length(), - StoredValue::EraInfo(era_info) => era_info.serialized_length(), - StoredValue::Bid(bid) => bid.serialized_length(), - StoredValue::Withdraw(unbonding_purses) => unbonding_purses.serialized_length(), - StoredValue::EraValidators(recipients) => recipients.serialized_length(), - } - } -} - -/// Returns heap size of the value. -/// Note it's different from [ByteSize] that returns both heap and stack size. -pub trait HeapSizeOf { - fn heap_size(&self) -> usize; -} - -impl HeapSizeOf for Key { - fn heap_size(&self) -> usize { - 0 - } -} - -// TODO: contract has other fields (re a bunch) that are not repr here...on purpose? -impl HeapSizeOf for Account { - fn heap_size(&self) -> usize { - self.named_keys().heap_size() - } -} - -// TODO: contract has other fields (re protocol version) that are not repr here...on purpose? -impl HeapSizeOf for ContractWasm { - fn heap_size(&self) -> usize { - self.bytes().len() - } -} - -// NOTE: We're ignoring size of the tree's nodes. -impl HeapSizeOf for BTreeMap { - fn heap_size(&self) -> usize { - self.iter() - .fold(0, |sum, (k, v)| sum + k.heap_size() + v.heap_size()) - } -} - -impl ByteSize for [T] { - fn byte_size(&self) -> usize { - self.iter() - .fold(0, |sum, el| sum + mem::size_of::() + el.heap_size()) - } -} - -impl HeapSizeOf for String { - fn heap_size(&self) -> usize { - self.capacity() - } -} - -#[cfg(test)] -mod tests { - use std::{collections::BTreeMap, mem}; - - use super::ByteSize; - use casper_types::Key; - - fn assert_byte_size(el: T, expected: usize) { - assert_eq!(el.byte_size(), expected) - } - - #[test] - fn byte_size_of_string() { - assert_byte_size("Hello".to_owned(), 5 + mem::size_of::()) - } - - #[test] - fn byte_size_of_map() { - let v = vec![ - (Key::Hash([1u8; 32]), "A".to_string()), - (Key::Hash([2u8; 32]), "B".to_string()), - (Key::Hash([3u8; 32]), "C".to_string()), - (Key::Hash([4u8; 32]), "D".to_string()), - ]; - let it_size: usize = mem::size_of::>() - + 4 * (mem::size_of::() + mem::size_of::() + 1); - let map: BTreeMap = v.into_iter().collect(); - assert_byte_size(map, it_size); - } -} diff --git a/execution_engine/src/core/tracking_copy/ext.rs b/execution_engine/src/core/tracking_copy/ext.rs deleted file mode 100644 index 6f06365cc1..0000000000 --- a/execution_engine/src/core/tracking_copy/ext.rs +++ /dev/null @@ -1,250 +0,0 @@ -use std::convert::TryInto; - -use parity_wasm::elements::Module; - -use casper_types::{ - account::AccountHash, CLValue, Contract, ContractHash, ContractPackage, ContractPackageHash, - ContractWasm, ContractWasmHash, Key, URef, -}; - -use crate::{ - core::{execution, tracking_copy::TrackingCopy}, - shared::{ - account::Account, motes::Motes, newtypes::CorrelationId, stored_value::StoredValue, wasm, - wasm_prep::Preprocessor, TypeMismatch, - }, - storage::{global_state::StateReader, trie::merkle_proof::TrieMerkleProof}, -}; - -pub trait TrackingCopyExt { - type Error; - - /// Gets the account at a given account address - fn get_account( - &mut self, - correlation_id: CorrelationId, - account_hash: AccountHash, - ) -> Result; - - /// Reads the account at a given account address - fn read_account( - &mut self, - correlation_id: CorrelationId, - account_hash: AccountHash, - ) -> Result; - - // TODO: make this a static method - /// Gets the purse balance key for a given purse id - fn get_purse_balance_key( - &self, - correlation_id: CorrelationId, - purse_key: Key, - ) -> Result; - - /// Gets the balance at a given balance key - fn get_purse_balance( - &self, - correlation_id: CorrelationId, - balance_key: Key, - ) -> Result; - - /// Gets the purse balance key for a given purse id and provides a Merkle proof - fn get_purse_balance_key_with_proof( - &self, - correlation_id: CorrelationId, - purse_key: Key, - ) -> Result<(Key, TrieMerkleProof), Self::Error>; - - /// Gets the balance at a given balance key and provides a Merkle proof - fn get_purse_balance_with_proof( - &self, - correlation_id: CorrelationId, - balance_key: Key, - ) -> Result<(Motes, TrieMerkleProof), Self::Error>; - - /// Gets a contract by Key - fn get_contract_wasm( - &mut self, - correlation_id: CorrelationId, - contract_wasm_hash: ContractWasmHash, - ) -> Result; - - /// Gets a contract header by Key - fn get_contract( - &mut self, - correlation_id: CorrelationId, - contract_hash: ContractHash, - ) -> Result; - - /// Gets a contract package by Key - fn get_contract_package( - &mut self, - correlation_id: CorrelationId, - contract_package_hash: ContractPackageHash, - ) -> Result; - - fn get_system_module(&mut self, preprocessor: &Preprocessor) -> Result; -} - -impl TrackingCopyExt for TrackingCopy -where - R: StateReader, - R::Error: Into, -{ - type Error = execution::Error; - - fn get_account( - &mut self, - correlation_id: CorrelationId, - account_hash: AccountHash, - ) -> Result { - let account_key = Key::Account(account_hash); - match self.get(correlation_id, &account_key).map_err(Into::into)? { - Some(StoredValue::Account(account)) => Ok(account), - Some(other) => Err(execution::Error::TypeMismatch(TypeMismatch::new( - "Account".to_string(), - other.type_name(), - ))), - None => Err(execution::Error::KeyNotFound(account_key)), - } - } - - fn read_account( - &mut self, - correlation_id: CorrelationId, - account_hash: AccountHash, - ) -> Result { - let account_key = Key::Account(account_hash); - match self - .read(correlation_id, &account_key) - .map_err(Into::into)? - { - Some(StoredValue::Account(account)) => Ok(account), - Some(other) => Err(execution::Error::TypeMismatch(TypeMismatch::new( - "Account".to_string(), - other.type_name(), - ))), - None => Err(execution::Error::KeyNotFound(account_key)), - } - } - - fn get_purse_balance_key( - &self, - _correlation_id: CorrelationId, - purse_key: Key, - ) -> Result { - let balance_key: URef = purse_key - .into_uref() - .ok_or(execution::Error::KeyIsNotAURef(purse_key))?; - Ok(Key::Balance(balance_key.addr())) - } - - fn get_purse_balance( - &self, - correlation_id: CorrelationId, - key: Key, - ) -> Result { - let stored_value: StoredValue = self - .read(correlation_id, &key) - .map_err(Into::into)? - .ok_or(execution::Error::KeyNotFound(key))?; - let cl_value: CLValue = stored_value - .try_into() - .map_err(execution::Error::TypeMismatch)?; - let balance = Motes::new(cl_value.into_t()?); - Ok(balance) - } - - fn get_purse_balance_key_with_proof( - &self, - correlation_id: CorrelationId, - purse_key: Key, - ) -> Result<(Key, TrieMerkleProof), Self::Error> { - let balance_key: Key = purse_key - .uref_to_hash() - .ok_or(execution::Error::KeyIsNotAURef(purse_key))?; - let proof: TrieMerkleProof = self - .read_with_proof(correlation_id, &balance_key) // Key::Hash, so no need to normalize - .map_err(Into::into)? - .ok_or(execution::Error::KeyNotFound(purse_key))?; - let stored_value_ref: &StoredValue = proof.value(); - let cl_value: CLValue = stored_value_ref - .to_owned() - .try_into() - .map_err(execution::Error::TypeMismatch)?; - let balance_key: Key = cl_value.into_t()?; - Ok((balance_key, proof)) - } - - fn get_purse_balance_with_proof( - &self, - correlation_id: CorrelationId, - key: Key, - ) -> Result<(Motes, TrieMerkleProof), Self::Error> { - let proof: TrieMerkleProof = self - .read_with_proof(correlation_id, &key.normalize()) - .map_err(Into::into)? - .ok_or(execution::Error::KeyNotFound(key))?; - let cl_value: CLValue = proof - .value() - .to_owned() - .try_into() - .map_err(execution::Error::TypeMismatch)?; - let balance = Motes::new(cl_value.into_t()?); - Ok((balance, proof)) - } - - /// Gets a contract wasm by Key - fn get_contract_wasm( - &mut self, - correlation_id: CorrelationId, - contract_wasm_hash: ContractWasmHash, - ) -> Result { - let key = contract_wasm_hash.into(); - match self.get(correlation_id, &key).map_err(Into::into)? { - Some(StoredValue::ContractWasm(contract_wasm)) => Ok(contract_wasm), - Some(other) => Err(execution::Error::TypeMismatch(TypeMismatch::new( - "ContractHeader".to_string(), - other.type_name(), - ))), - None => Err(execution::Error::KeyNotFound(key)), - } - } - - /// Gets a contract header by Key - fn get_contract( - &mut self, - correlation_id: CorrelationId, - contract_hash: ContractHash, - ) -> Result { - let key = contract_hash.into(); - match self.get(correlation_id, &key).map_err(Into::into)? { - Some(StoredValue::Contract(contract)) => Ok(contract), - Some(other) => Err(execution::Error::TypeMismatch(TypeMismatch::new( - "ContractHeader".to_string(), - other.type_name(), - ))), - None => Err(execution::Error::KeyNotFound(key)), - } - } - - fn get_contract_package( - &mut self, - correlation_id: CorrelationId, - contract_package_hash: ContractPackageHash, - ) -> Result { - let key = contract_package_hash.into(); - match self.get(correlation_id, &key).map_err(Into::into)? { - Some(StoredValue::ContractPackage(contract_package)) => Ok(contract_package), - Some(other) => Err(execution::Error::TypeMismatch(TypeMismatch::new( - "ContractPackage".to_string(), - other.type_name(), - ))), - None => Err(execution::Error::KeyNotFound(key)), - } - } - - fn get_system_module(&mut self, preprocessor: &Preprocessor) -> Result { - Ok(wasm::do_nothing_module(preprocessor)?) - } -} diff --git a/execution_engine/src/core/tracking_copy/mod.rs b/execution_engine/src/core/tracking_copy/mod.rs deleted file mode 100644 index 18ed30f89b..0000000000 --- a/execution_engine/src/core/tracking_copy/mod.rs +++ /dev/null @@ -1,672 +0,0 @@ -mod byte_size; -mod ext; -pub(self) mod meter; -#[cfg(test)] -mod tests; - -use std::{ - collections::{BTreeSet, HashMap, HashSet, VecDeque}, - convert::{From, TryInto}, - iter, -}; - -use linked_hash_map::LinkedHashMap; -use thiserror::Error; - -use casper_types::{bytesrepr, CLType, CLValue, CLValueError, Key, KeyTag, Tagged, U512}; - -pub use self::ext::TrackingCopyExt; -use self::meter::{heap_meter::HeapSize, Meter}; -use crate::{ - core::engine_state::{execution_effect::ExecutionEffect, op::Op}, - shared::{ - additive_map::AdditiveMap, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - transform::{self, Transform}, - TypeMismatch, - }, - storage::{global_state::StateReader, trie::merkle_proof::TrieMerkleProof}, -}; - -#[derive(Debug)] -pub enum TrackingCopyQueryResult { - Success { - value: StoredValue, - proofs: Vec>, - }, - ValueNotFound(String), - CircularReference(String), -} - -/// Struct containing state relating to a given query. -struct Query { - /// The key from where the search starts. - base_key: Key, - /// A collection of normalized keys which have been visited during the search. - visited_keys: HashSet, - /// The key currently being processed. - current_key: Key, - /// Path components which have not yet been followed, held in the same order in which they were - /// provided to the `query()` call. - unvisited_names: VecDeque, - /// Path components which have been followed, held in the same order in which they were - /// provided to the `query()` call. - visited_names: Vec, -} - -impl Query { - fn new(base_key: Key, path: &[String]) -> Self { - Query { - base_key, - current_key: base_key.normalize(), - unvisited_names: path.iter().cloned().collect(), - visited_names: Vec::new(), - visited_keys: HashSet::new(), - } - } - - /// Panics if `unvisited_names` is empty. - fn next_name(&mut self) -> &String { - let next_name = self.unvisited_names.pop_front().unwrap(); - self.visited_names.push(next_name); - self.visited_names.last().unwrap() - } - - fn into_not_found_result(self, msg_prefix: &str) -> TrackingCopyQueryResult { - let msg = format!("{} at path: {}", msg_prefix, self.current_path()); - TrackingCopyQueryResult::ValueNotFound(msg) - } - - fn into_circular_ref_result(self) -> TrackingCopyQueryResult { - let msg = format!( - "{:?} has formed a circular reference at path: {}", - self.current_key, - self.current_path() - ); - TrackingCopyQueryResult::CircularReference(msg) - } - - fn current_path(&self) -> String { - let mut path = format!("{:?}", self.base_key); - for name in &self.visited_names { - path.push('/'); - path.push_str(name); - } - path - } -} - -/// Keeps track of already accessed keys. -/// We deliberately separate cached Reads from cached mutations -/// because we want to invalidate Reads' cache so it doesn't grow too fast. -pub struct TrackingCopyCache { - max_cache_size: usize, - current_cache_size: usize, - reads_cached: LinkedHashMap, - muts_cached: HashMap, - key_tag_reads_cached: LinkedHashMap>, - key_tag_muts_cached: HashMap>, - meter: M, -} - -impl> TrackingCopyCache { - /// Creates instance of `TrackingCopyCache` with specified `max_cache_size`, - /// above which least-recently-used elements of the cache are invalidated. - /// Measurements of elements' "size" is done with the usage of `Meter` - /// instance. - pub fn new(max_cache_size: usize, meter: M) -> TrackingCopyCache { - TrackingCopyCache { - max_cache_size, - current_cache_size: 0, - reads_cached: LinkedHashMap::new(), - muts_cached: HashMap::new(), - key_tag_reads_cached: LinkedHashMap::new(), - key_tag_muts_cached: HashMap::new(), - meter, - } - } - - /// Inserts `key` and `value` pair to Read cache. - pub fn insert_read(&mut self, key: Key, value: StoredValue) { - let element_size = Meter::measure(&self.meter, &key, &value); - self.reads_cached.insert(key, value); - self.current_cache_size += element_size; - while self.current_cache_size > self.max_cache_size { - match self.reads_cached.pop_front() { - Some((k, v)) => { - let element_size = Meter::measure(&self.meter, &k, &v); - self.current_cache_size -= element_size; - } - None => break, - } - } - } - - /// Inserts a `KeyTag` value and the keys under this prefix into the key reads cache. - pub fn insert_key_tag_read(&mut self, key_tag: KeyTag, keys: BTreeSet) { - let element_size = Meter::measure_keys(&self.meter, &keys); - self.key_tag_reads_cached.insert(key_tag, keys); - self.current_cache_size += element_size; - while self.current_cache_size > self.max_cache_size { - match self.reads_cached.pop_front() { - Some((k, v)) => { - let element_size = Meter::measure(&self.meter, &k, &v); - self.current_cache_size -= element_size; - } - None => break, - } - } - } - - /// Inserts `key` and `value` pair to Write/Add cache. - pub fn insert_write(&mut self, key: Key, value: StoredValue) { - self.muts_cached.insert(key, value); - - let key_set = self - .key_tag_muts_cached - .entry(key.tag()) - .or_insert_with(BTreeSet::new); - - key_set.insert(key); - } - - /// Gets value from `key` in the cache. - pub fn get(&mut self, key: &Key) -> Option<&StoredValue> { - if let Some(value) = self.muts_cached.get(&key) { - return Some(value); - }; - - self.reads_cached.get_refresh(key).map(|v| &*v) - } - - pub fn get_key_tag_muts_cached(&mut self, key_tag: &KeyTag) -> Option<&BTreeSet> { - self.key_tag_muts_cached.get(key_tag) - } - - pub fn get_key_tag_reads_cached(&mut self, key_tag: &KeyTag) -> Option<&BTreeSet> { - self.key_tag_reads_cached.get_refresh(key_tag).map(|v| &*v) - } -} - -pub struct TrackingCopy { - reader: R, - cache: TrackingCopyCache, - ops: AdditiveMap, - fns: AdditiveMap, -} - -#[derive(Debug)] -pub enum AddResult { - Success, - KeyNotFound(Key), - TypeMismatch(TypeMismatch), - Serialization(bytesrepr::Error), -} - -impl From for AddResult { - fn from(error: CLValueError) -> Self { - match error { - CLValueError::Serialization(error) => AddResult::Serialization(error), - CLValueError::Type(type_mismatch) => { - let expected = format!("{:?}", type_mismatch.expected); - let found = format!("{:?}", type_mismatch.found); - AddResult::TypeMismatch(TypeMismatch::new(expected, found)) - } - } - } -} - -impl> TrackingCopy { - pub fn new(reader: R) -> TrackingCopy { - TrackingCopy { - reader, - cache: TrackingCopyCache::new(1024 * 16, HeapSize), - /* TODO: Should `max_cache_size` - * be fraction of wasm memory - * limit? */ - ops: AdditiveMap::new(), - fns: AdditiveMap::new(), - } - } - - pub fn reader(&self) -> &R { - &self.reader - } - - /// Creates a new TrackingCopy, using this one (including its mutations) as - /// the base state to read against. The intended use case for this - /// function is to "snapshot" the current `TrackingCopy` and produce a - /// new `TrackingCopy` where further changes can be made. This - /// allows isolating a specific set of changes (those in the new - /// `TrackingCopy`) from existing changes. Note that mutations to state - /// caused by new changes (i.e. writes and adds) only impact the new - /// `TrackingCopy`, not this one. Note that currently there is no `join` / - /// `merge` function to bring changes from a fork back to the main - /// `TrackingCopy`. this means the current usage requires repeated - /// forking, however we recognize this is sub-optimal and will revisit - /// in the future. - pub fn fork(&self) -> TrackingCopy<&TrackingCopy> { - TrackingCopy::new(self) - } - - pub fn get( - &mut self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result, R::Error> { - if let Some(value) = self.cache.get(key) { - return Ok(Some(value.to_owned())); - } - if let Some(value) = self.reader.read(correlation_id, key)? { - self.cache.insert_read(*key, value.to_owned()); - Ok(Some(value)) - } else { - Ok(None) - } - } - - pub fn get_keys( - &mut self, - correlation_id: CorrelationId, - key_tag: &KeyTag, - ) -> Result, R::Error> { - let mut ret: BTreeSet = BTreeSet::new(); - match self.cache.get_key_tag_reads_cached(&key_tag) { - Some(keys) => ret.extend(keys), - None => { - let key_tag = key_tag.to_owned(); - let keys = self - .reader - .keys_with_prefix(correlation_id, &[key_tag as u8])?; - ret.extend(keys); - self.cache.insert_key_tag_read(key_tag, ret.to_owned()) - } - } - if let Some(keys) = self.cache.get_key_tag_muts_cached(&key_tag) { - ret.extend(keys) - } - Ok(ret) - } - - pub fn read( - &mut self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result, R::Error> { - let normalized_key = key.normalize(); - if let Some(value) = self.get(correlation_id, &normalized_key)? { - self.ops.insert_add(normalized_key, Op::Read); - self.fns.insert_add(normalized_key, Transform::Identity); - Ok(Some(value)) - } else { - Ok(None) - } - } - - pub fn write(&mut self, key: Key, value: StoredValue) { - let normalized_key = key.normalize(); - self.cache.insert_write(normalized_key, value.clone()); - self.ops.insert_add(normalized_key, Op::Write); - self.fns.insert_add(normalized_key, Transform::Write(value)); - } - - /// Ok(None) represents missing key to which we want to "add" some value. - /// Ok(Some(unit)) represents successful operation. - /// Err(error) is reserved for unexpected errors when accessing global - /// state. - pub fn add( - &mut self, - correlation_id: CorrelationId, - key: Key, - value: StoredValue, - ) -> Result { - let normalized_key = key.normalize(); - let current_value = match self.get(correlation_id, &normalized_key)? { - None => return Ok(AddResult::KeyNotFound(normalized_key)), - Some(current_value) => current_value, - }; - - let type_name = value.type_name(); - let mismatch = || { - Ok(AddResult::TypeMismatch(TypeMismatch::new( - "I32, U64, U128, U256, U512 or (String, Key) tuple".to_string(), - type_name, - ))) - }; - - let transform = match value { - StoredValue::CLValue(cl_value) => match *cl_value.cl_type() { - CLType::I32 => match cl_value.into_t() { - Ok(value) => Transform::AddInt32(value), - Err(error) => return Ok(AddResult::from(error)), - }, - CLType::U64 => match cl_value.into_t() { - Ok(value) => Transform::AddUInt64(value), - Err(error) => return Ok(AddResult::from(error)), - }, - CLType::U128 => match cl_value.into_t() { - Ok(value) => Transform::AddUInt128(value), - Err(error) => return Ok(AddResult::from(error)), - }, - CLType::U256 => match cl_value.into_t() { - Ok(value) => Transform::AddUInt256(value), - Err(error) => return Ok(AddResult::from(error)), - }, - CLType::U512 => match cl_value.into_t() { - Ok(value) => Transform::AddUInt512(value), - Err(error) => return Ok(AddResult::from(error)), - }, - _ => { - if *cl_value.cl_type() == casper_types::named_key_type() { - match cl_value.into_t() { - Ok(name_and_key) => { - let map = iter::once(name_and_key).collect(); - Transform::AddKeys(map) - } - Err(error) => return Ok(AddResult::from(error)), - } - } else { - return mismatch(); - } - } - }, - _ => return mismatch(), - }; - - match transform.clone().apply(current_value) { - Ok(new_value) => { - self.cache.insert_write(normalized_key, new_value); - self.ops.insert_add(normalized_key, Op::Add); - self.fns.insert_add(normalized_key, transform); - Ok(AddResult::Success) - } - Err(transform::Error::TypeMismatch(type_mismatch)) => { - Ok(AddResult::TypeMismatch(type_mismatch)) - } - Err(transform::Error::Serialization(error)) => Ok(AddResult::Serialization(error)), - } - } - - pub fn effect(&self) -> ExecutionEffect { - ExecutionEffect::new(self.ops.clone(), self.fns.clone()) - } - - /// Calling `query()` avoids calling into `self.cache`, so this will not return any values - /// written or mutated in this `TrackingCopy` via previous calls to `write()` or `add()`, since - /// these updates are only held in `self.cache`. - /// - /// The intent is that `query()` is only used to satisfy `QueryRequest`s made to the server. - /// Other EE internal use cases should call `read()` or `get()` in order to retrieve cached - /// values. - pub fn query( - &self, - correlation_id: CorrelationId, - base_key: Key, - path: &[String], - ) -> Result { - let mut query = Query::new(base_key, path); - - let mut proofs = Vec::new(); - - loop { - if !query.visited_keys.insert(query.current_key) { - return Ok(query.into_circular_ref_result()); - } - let stored_value = match self - .reader - .read_with_proof(correlation_id, &query.current_key)? - { - None => { - return Ok(query.into_not_found_result("Failed to find base key")); - } - Some(stored_value) => stored_value, - }; - - let value = stored_value.value().to_owned(); - - proofs.push(stored_value); - - if query.unvisited_names.is_empty() { - return Ok(TrackingCopyQueryResult::Success { value, proofs }); - } - - let stored_value: &StoredValue = proofs - .last() - .map(|r| r.value()) - .expect("but we just pushed"); - - match stored_value { - StoredValue::Account(account) => { - let name = query.next_name(); - if let Some(key) = account.named_keys().get(name) { - query.current_key = key.normalize(); - } else { - let msg_prefix = format!("Name {} not found in Account", name); - return Ok(query.into_not_found_result(&msg_prefix)); - } - } - StoredValue::CLValue(cl_value) if cl_value.cl_type() == &CLType::Key => { - if let Ok(key) = cl_value.to_owned().into_t::() { - query.current_key = key.normalize(); - } else { - return Ok(query.into_not_found_result("Failed to parse CLValue as Key")); - } - } - StoredValue::CLValue(cl_value) => { - let msg_prefix = format!( - "Query cannot continue as {:?} is not an account, contract nor key to \ - such. Value found", - cl_value - ); - return Ok(query.into_not_found_result(&msg_prefix)); - } - StoredValue::Contract(contract) => { - let name = query.next_name(); - if let Some(key) = contract.named_keys().get(name) { - query.current_key = key.normalize(); - } else { - let msg_prefix = format!("Name {} not found in Contract", name); - return Ok(query.into_not_found_result(&msg_prefix)); - } - } - StoredValue::ContractPackage(_) => { - return Ok(query.into_not_found_result(&"ContractPackage value found.")); - } - StoredValue::ContractWasm(_) => { - return Ok(query.into_not_found_result(&"ContractWasm value found.")); - } - StoredValue::Transfer(_) => { - return Ok(query.into_not_found_result(&"Transfer value found.")); - } - StoredValue::DeployInfo(_) => { - return Ok(query.into_not_found_result(&"DeployInfo value found.")); - } - StoredValue::EraInfo(_) => { - return Ok(query.into_not_found_result(&"EraInfo value found.")); - } - StoredValue::Bid(_) => { - return Ok(query.into_not_found_result(&"Bid value found.")); - } - StoredValue::Withdraw(_) => { - return Ok(query.into_not_found_result(&"UnbondingPurses value found.")); - } - StoredValue::EraValidators(_) => { - return Ok(query.into_not_found_result(&"EraValidators value found")); - } - } - } - } -} - -/// The purpose of this implementation is to allow a "snapshot" mechanism for -/// TrackingCopy. The state of a TrackingCopy (including the effects of -/// any transforms it has accumulated) can be read using an immutable -/// reference to that TrackingCopy via this trait implementation. See -/// `TrackingCopy::fork` for more information. -impl> StateReader for &TrackingCopy { - type Error = R::Error; - - fn read( - &self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result, Self::Error> { - if let Some(value) = self.cache.muts_cached.get(key) { - return Ok(Some(value.to_owned())); - } - if let Some(value) = self.reader.read(correlation_id, key)? { - Ok(Some(value)) - } else { - Ok(None) - } - } - - fn read_with_proof( - &self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result>, Self::Error> { - self.reader.read_with_proof(correlation_id, key) - } - - fn keys_with_prefix( - &self, - correlation_id: CorrelationId, - prefix: &[u8], - ) -> Result, Self::Error> { - self.reader.keys_with_prefix(correlation_id, prefix) - } -} - -#[derive(Error, Debug, PartialEq, Eq)] -pub enum ValidationError { - #[error("The path should not have a different length than the proof less one.")] - PathLengthDifferentThanProofLessOne, - - #[error("The provided key does not match the key in the proof.")] - UnexpectedKey, - - #[error("The provided value does not match the value in the proof.")] - UnexpectedValue, - - #[error("The proof hash is invalid.")] - InvalidProofHash, - - #[error("The path went cold.")] - PathCold, - - #[error("Serialization error: {0}")] - BytesRepr(bytesrepr::Error), - - #[error("Key is not a URef")] - KeyIsNotAURef(Key), - - #[error("Failed to convert stored value to key")] - ValueToCLValueConversion, - - #[error("{0}")] - CLValueError(CLValueError), -} - -impl From for ValidationError { - fn from(err: CLValueError) -> Self { - ValidationError::CLValueError(err) - } -} - -impl From for ValidationError { - fn from(error: bytesrepr::Error) -> Self { - Self::BytesRepr(error) - } -} - -pub fn validate_query_proof( - hash: &Blake2bHash, - proofs: &[TrieMerkleProof], - expected_first_key: &Key, - path: &[String], - expected_value: &StoredValue, -) -> Result<(), ValidationError> { - if proofs.len() != path.len() + 1 { - return Err(ValidationError::PathLengthDifferentThanProofLessOne); - } - - let mut proofs_iter = proofs.iter(); - - // length check above means we are safe to unwrap here - let first_proof = proofs_iter.next().unwrap(); - - if first_proof.key() != &expected_first_key.normalize() { - return Err(ValidationError::UnexpectedKey); - } - - if hash != &first_proof.compute_state_hash()? { - return Err(ValidationError::InvalidProofHash); - } - - let mut proof_value = first_proof.value(); - - for (proof, path_component) in proofs_iter.zip(path.iter()) { - let named_keys = match proof_value { - StoredValue::Account(account) => account.named_keys(), - StoredValue::Contract(contract) => contract.named_keys(), - _ => return Err(ValidationError::PathCold), - }; - - let key = match named_keys.get(path_component) { - Some(key) => key, - None => return Err(ValidationError::PathCold), - }; - - if proof.key() != &key.normalize() { - return Err(ValidationError::UnexpectedKey); - } - - if hash != &proof.compute_state_hash()? { - return Err(ValidationError::InvalidProofHash); - } - - proof_value = proof.value(); - } - - if proof_value != expected_value { - return Err(ValidationError::UnexpectedValue); - } - - Ok(()) -} - -pub fn validate_balance_proof( - hash: &Blake2bHash, - balance_proof: &TrieMerkleProof, - expected_purse_key: Key, - expected_motes: &U512, -) -> Result<(), ValidationError> { - let expected_balance_key = expected_purse_key - .into_uref() - .map(|uref| Key::Balance(uref.addr())) - .ok_or_else(|| ValidationError::KeyIsNotAURef(expected_purse_key.to_owned()))?; - - if balance_proof.key() != &expected_balance_key.normalize() { - return Err(ValidationError::UnexpectedKey); - } - - if hash != &balance_proof.compute_state_hash()? { - return Err(ValidationError::InvalidProofHash); - } - - let balance_proof_stored_value = balance_proof.value().to_owned(); - - let balance_proof_clvalue: CLValue = balance_proof_stored_value - .try_into() - .map_err(|_| ValidationError::ValueToCLValueConversion)?; - - let balance_motes: U512 = balance_proof_clvalue.into_t()?; - - if expected_motes != &balance_motes { - return Err(ValidationError::UnexpectedValue); - } - - Ok(()) -} diff --git a/execution_engine/src/core/tracking_copy/tests.rs b/execution_engine/src/core/tracking_copy/tests.rs deleted file mode 100644 index 92e3a0ffea..0000000000 --- a/execution_engine/src/core/tracking_copy/tests.rs +++ /dev/null @@ -1,1002 +0,0 @@ -use std::{cell::Cell, iter, rc::Rc}; - -use assert_matches::assert_matches; -use proptest::prelude::*; - -use casper_types::{ - account::{AccountHash, Weight, ACCOUNT_HASH_LENGTH}, - contracts::NamedKeys, - gens::*, - AccessRights, CLValue, Contract, EntryPoints, Key, KeyTag, ProtocolVersion, URef, U512, -}; - -use super::{ - meter::count_meter::Count, AddResult, TrackingCopy, TrackingCopyCache, TrackingCopyQueryResult, -}; -use crate::{ - core::{engine_state::op::Op, ValidationError}, - shared::{ - account::{Account, AssociatedKeys}, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::{gens::stored_value_arb, StoredValue}, - transform::Transform, - }, - storage::{ - global_state::{in_memory::InMemoryGlobalState, StateProvider, StateReader}, - trie::merkle_proof::TrieMerkleProof, - }, -}; - -struct CountingDb { - count: Rc>, - value: Option, -} - -impl CountingDb { - fn new(counter: Rc>) -> CountingDb { - CountingDb { - count: counter, - value: None, - } - } - - fn new_init(v: StoredValue) -> CountingDb { - CountingDb { - count: Rc::new(Cell::new(0)), - value: Some(v), - } - } -} - -impl StateReader for CountingDb { - type Error = String; - fn read( - &self, - _correlation_id: CorrelationId, - _key: &Key, - ) -> Result, Self::Error> { - let count = self.count.get(); - let value = match self.value { - Some(ref v) => v.clone(), - None => StoredValue::CLValue(CLValue::from_t(count).unwrap()), - }; - self.count.set(count + 1); - Ok(Some(value)) - } - - fn read_with_proof( - &self, - _correlation_id: CorrelationId, - _key: &Key, - ) -> Result>, Self::Error> { - Ok(None) - } - - fn keys_with_prefix( - &self, - _correlation_id: CorrelationId, - _prefix: &[u8], - ) -> Result, Self::Error> { - Ok(Vec::new()) - } -} - -#[test] -fn tracking_copy_new() { - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(counter); - let tc = TrackingCopy::new(db); - - assert_eq!(tc.ops.is_empty(), true); - assert_eq!(tc.fns.is_empty(), true); -} - -#[test] -fn tracking_copy_caching() { - let correlation_id = CorrelationId::new(); - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(Rc::clone(&counter)); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - let zero = StoredValue::CLValue(CLValue::from_t(0_i32).unwrap()); - // first read - let value = tc.read(correlation_id, &k).unwrap().unwrap(); - assert_eq!(value, zero); - - // second read; should use cache instead - // of going back to the DB - let value = tc.read(correlation_id, &k).unwrap().unwrap(); - let db_value = counter.get(); - assert_eq!(value, zero); - assert_eq!(db_value, 1); -} - -#[test] -fn tracking_copy_read() { - let correlation_id = CorrelationId::new(); - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(Rc::clone(&counter)); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - let zero = StoredValue::CLValue(CLValue::from_t(0_i32).unwrap()); - let value = tc.read(correlation_id, &k).unwrap().unwrap(); - // value read correctly - assert_eq!(value, zero); - // read produces an identity transform - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::Identity)); - // read does produce an op - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Read)); -} - -#[test] -fn tracking_copy_write() { - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(Rc::clone(&counter)); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - let one = StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()); - let two = StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()); - - // writing should work - tc.write(k, one.clone()); - // write does not need to query the DB - let db_value = counter.get(); - assert_eq!(db_value, 0); - // write creates a Transfrom - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::Write(one))); - // write creates an Op - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Write)); - - // writing again should update the values - tc.write(k, two.clone()); - let db_value = counter.get(); - assert_eq!(db_value, 0); - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::Write(two))); - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Write)); -} - -#[test] -fn tracking_copy_add_i32() { - let correlation_id = CorrelationId::new(); - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(counter); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - let three = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); - - // adding should work - let add = tc.add(correlation_id, k, three.clone()); - assert_matches!(add, Ok(_)); - - // add creates a Transfrom - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::AddInt32(3))); - // add creates an Op - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Add)); - - // adding again should update the values - let add = tc.add(correlation_id, k, three); - assert_matches!(add, Ok(_)); - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::AddInt32(6))); - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Add)); -} - -#[test] -fn tracking_copy_add_named_key() { - let zero_account_hash = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); - let correlation_id = CorrelationId::new(); - // DB now holds an `Account` so that we can test adding a `NamedKey` - let associated_keys = AssociatedKeys::new(zero_account_hash, Weight::new(1)); - let account = Account::new( - zero_account_hash, - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - Default::default(), - ); - let db = CountingDb::new_init(StoredValue::Account(account)); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - let u1 = Key::URef(URef::new([1u8; 32], AccessRights::READ_WRITE)); - let u2 = Key::URef(URef::new([2u8; 32], AccessRights::READ_WRITE)); - - let name1 = "test".to_string(); - let named_key = StoredValue::CLValue(CLValue::from_t((name1.clone(), u1)).unwrap()); - let name2 = "test2".to_string(); - let other_named_key = StoredValue::CLValue(CLValue::from_t((name2.clone(), u2)).unwrap()); - let mut map = NamedKeys::new(); - map.insert(name1, u1); - - // adding the wrong type should fail - let failed_add = tc.add( - correlation_id, - k, - StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), - ); - assert_matches!(failed_add, Ok(AddResult::TypeMismatch(_))); - assert_eq!(tc.ops.is_empty(), true); - assert_eq!(tc.fns.is_empty(), true); - - // adding correct type works - let add = tc.add(correlation_id, k, named_key); - assert_matches!(add, Ok(_)); - // add creates a Transfrom - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::AddKeys(map.clone()))); - // add creates an Op - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Add)); - - // adding again updates the values - map.insert(name2, u2); - let add = tc.add(correlation_id, k, other_named_key); - assert_matches!(add, Ok(_)); - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::AddKeys(map))); - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Add)); -} - -#[test] -fn tracking_copy_rw() { - let correlation_id = CorrelationId::new(); - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(counter); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - // reading then writing should update the op - let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); - let _ = tc.read(correlation_id, &k); - tc.write(k, value.clone()); - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::Write(value))); - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Write)); -} - -#[test] -fn tracking_copy_ra() { - let correlation_id = CorrelationId::new(); - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(counter); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - // reading then adding should update the op - let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); - let _ = tc.read(correlation_id, &k); - let _ = tc.add(correlation_id, k, value); - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::AddInt32(3))); - assert_eq!(tc.ops.len(), 1); - // this Op is correct because Read+Add = Write - assert_eq!(tc.ops.get(&k), Some(&Op::Write)); -} - -#[test] -fn tracking_copy_aw() { - let correlation_id = CorrelationId::new(); - let counter = Rc::new(Cell::new(0)); - let db = CountingDb::new(counter); - let mut tc = TrackingCopy::new(db); - let k = Key::Hash([0u8; 32]); - - // adding then writing should update the op - let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); - let write_value = StoredValue::CLValue(CLValue::from_t(7_i32).unwrap()); - let _ = tc.add(correlation_id, k, value); - tc.write(k, write_value.clone()); - assert_eq!(tc.fns.len(), 1); - assert_eq!(tc.fns.get(&k), Some(&Transform::Write(write_value))); - assert_eq!(tc.ops.len(), 1); - assert_eq!(tc.ops.get(&k), Some(&Op::Write)); -} - -proptest! { - #[test] - fn query_empty_path(k in key_arb(), missing_key in key_arb(), v in stored_value_arb()) { - let correlation_id = CorrelationId::new(); - let (gs, root_hash) = InMemoryGlobalState::from_pairs(correlation_id, &[(k, v.to_owned())]).unwrap(); - let view = gs.checkout(root_hash).unwrap().unwrap(); - let tc = TrackingCopy::new(view); - let empty_path = Vec::new(); - if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(correlation_id, k, &empty_path) { - assert_eq!(v, value); - } else { - panic!("Query failed when it should not have!"); - } - - if missing_key != k { - let result = tc.query(correlation_id, missing_key, &empty_path); - assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_))); - } - } - - #[test] - fn query_contract_state( - k in key_arb(), // key state is stored at - v in stored_value_arb(), // value in contract state - name in "\\PC*", // human-readable name for state - missing_name in "\\PC*", - hash in u8_slice_32(), // hash for contract key - ) { - let correlation_id = CorrelationId::new(); - let mut named_keys = NamedKeys::new(); - named_keys.insert(name.clone(), k); - let contract = - StoredValue::Contract(Contract::new( - [2; 32].into(), - [3; 32].into(), - named_keys, - EntryPoints::default(), - ProtocolVersion::V1_0_0, - )); - let contract_key = Key::Hash(hash); - - let (gs, root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[(k, v.to_owned()), (contract_key, contract)] - ).unwrap(); - let view = gs.checkout(root_hash).unwrap().unwrap(); - let tc = TrackingCopy::new(view); - let path = vec!(name.clone()); - if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(correlation_id, contract_key, &path) { - assert_eq!(v, value); - } else { - panic!("Query failed when it should not have!"); - } - - if missing_name != name { - let result = tc.query(correlation_id, contract_key, &[missing_name]); - assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_))); - } - } - - - #[test] - fn query_account_state( - k in key_arb(), // key state is stored at - v in stored_value_arb(), // value in account state - name in "\\PC*", // human-readable name for state - missing_name in "\\PC*", - pk in account_hash_arb(), // account hash - address in account_hash_arb(), // address for account hash - ) { - let correlation_id = CorrelationId::new(); - let named_keys = iter::once((name.clone(), k)).collect(); - let purse = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE); - let associated_keys = AssociatedKeys::new(pk, Weight::new(1)); - let account = Account::new( - pk, - named_keys, - purse, - associated_keys, - Default::default(), - ); - let account_key = Key::Account(address); - - let (gs, root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[(k, v.to_owned()), (account_key, StoredValue::Account(account))], - ).unwrap(); - let view = gs.checkout(root_hash).unwrap().unwrap(); - let tc = TrackingCopy::new(view); - let path = vec!(name.clone()); - if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(correlation_id, account_key, &path) { - assert_eq!(v, value); - } else { - panic!("Query failed when it should not have!"); - } - - if missing_name != name { - let result = tc.query(correlation_id, account_key, &[missing_name]); - assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_))); - } - } - - #[test] - fn query_path( - k in key_arb(), // key state is stored at - v in stored_value_arb(), // value in contract state - state_name in "\\PC*", // human-readable name for state - contract_name in "\\PC*", // human-readable name for contract - pk in account_hash_arb(), // account hash - address in account_hash_arb(), // address for account hash - hash in u8_slice_32(), // hash for contract key - ) { - let correlation_id = CorrelationId::new(); - // create contract which knows about value - let mut contract_named_keys = NamedKeys::new(); - contract_named_keys.insert(state_name.clone(), k); - let contract = - StoredValue::Contract(Contract::new( - [2; 32].into(), - [3; 32].into(), - contract_named_keys, - EntryPoints::default(), - ProtocolVersion::V1_0_0, - )); - let contract_key = Key::Hash(hash); - - // create account which knows about contract - let mut account_named_keys = NamedKeys::new(); - account_named_keys.insert(contract_name.clone(), contract_key); - let purse = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE); - let associated_keys = AssociatedKeys::new(pk, Weight::new(1)); - let account = Account::new( - pk, - account_named_keys, - purse, - associated_keys, - Default::default(), - ); - let account_key = Key::Account(address); - - let (gs, root_hash) = InMemoryGlobalState::from_pairs(correlation_id, &[ - (k, v.to_owned()), - (contract_key, contract), - (account_key, StoredValue::Account(account)), - ]).unwrap(); - let view = gs.checkout(root_hash).unwrap().unwrap(); - let tc = TrackingCopy::new(view); - let path = vec!(contract_name, state_name); - - let results = tc.query(correlation_id, account_key, &path); - if let Ok(TrackingCopyQueryResult::Success { value, .. }) = results { - assert_eq!(v, value); - } else { - panic!("Query failed when it should not have!"); - } - } -} - -#[test] -fn cache_reads_invalidation() { - let mut tc_cache = TrackingCopyCache::new(2, Count); - let (k1, v1) = ( - Key::Hash([1u8; 32]), - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ); - let (k2, v2) = ( - Key::Hash([2u8; 32]), - StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), - ); - let (k3, v3) = ( - Key::Hash([3u8; 32]), - StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), - ); - tc_cache.insert_read(k1, v1); - tc_cache.insert_read(k2, v2.clone()); - tc_cache.insert_read(k3, v3.clone()); - assert!(tc_cache.get(&k1).is_none()); // first entry should be invalidated - assert_eq!(tc_cache.get(&k2), Some(&v2)); // k2 and k3 should be there - assert_eq!(tc_cache.get(&k3), Some(&v3)); -} - -#[test] -fn cache_writes_not_invalidated() { - let mut tc_cache = TrackingCopyCache::new(2, Count); - let (k1, v1) = ( - Key::Hash([1u8; 32]), - StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - ); - let (k2, v2) = ( - Key::Hash([2u8; 32]), - StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), - ); - let (k3, v3) = ( - Key::Hash([3u8; 32]), - StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), - ); - tc_cache.insert_write(k1, v1.clone()); - tc_cache.insert_read(k2, v2.clone()); - tc_cache.insert_read(k3, v3.clone()); - // Writes are not subject to cache invalidation - assert_eq!(tc_cache.get(&k1), Some(&v1)); - assert_eq!(tc_cache.get(&k2), Some(&v2)); // k2 and k3 should be there - assert_eq!(tc_cache.get(&k3), Some(&v3)); -} - -#[test] -fn query_for_circular_references_should_fail() { - // create self-referential key - let cl_value_key = Key::URef(URef::new([255; 32], AccessRights::READ)); - let cl_value = StoredValue::CLValue(CLValue::from_t(cl_value_key).unwrap()); - let key_name = "key".to_string(); - - // create contract with this self-referential key in its named keys, and also a key referring to - // itself in its named keys. - let contract_key = Key::Hash([1; 32]); - let contract_name = "contract".to_string(); - let mut named_keys = NamedKeys::new(); - named_keys.insert(key_name.clone(), cl_value_key); - named_keys.insert(contract_name.clone(), contract_key); - let contract = StoredValue::Contract(Contract::new( - [2; 32].into(), - [3; 32].into(), - named_keys, - EntryPoints::default(), - ProtocolVersion::V1_0_0, - )); - - let correlation_id = CorrelationId::new(); - let (global_state, root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[(cl_value_key, cl_value), (contract_key, contract)], - ) - .unwrap(); - let view = global_state.checkout(root_hash).unwrap().unwrap(); - let tracking_copy = TrackingCopy::new(view); - - // query for the self-referential key (second path element of arbitrary value required to cause - // iteration _into_ the self-referential key) - let path = vec![key_name, String::new()]; - if let Ok(TrackingCopyQueryResult::CircularReference(msg)) = - tracking_copy.query(correlation_id, contract_key, &path) - { - let expected_path_msg = format!("at path: {:?}/{}", contract_key, path[0]); - assert!(msg.contains(&expected_path_msg)); - } else { - panic!("Query didn't fail with a circular reference error"); - } - - // query for itself in its own named keys - let path = vec![contract_name]; - if let Ok(TrackingCopyQueryResult::CircularReference(msg)) = - tracking_copy.query(correlation_id, contract_key, &path) - { - let expected_path_msg = format!("at path: {:?}/{}", contract_key, path[0]); - assert!(msg.contains(&expected_path_msg)); - } else { - panic!("Query didn't fail with a circular reference error"); - } -} - -#[test] -fn validate_query_proof_should_work() { - // create account - let account_hash = AccountHash::new([3; 32]); - let fake_purse = URef::new([4; 32], AccessRights::READ_ADD_WRITE); - let account_value = StoredValue::Account(Account::create( - account_hash, - NamedKeys::default(), - fake_purse, - )); - let account_key = Key::Account(account_hash); - - // create contract that refers to that account - let account_name = "account".to_string(); - let named_keys = { - let mut tmp = NamedKeys::new(); - tmp.insert(account_name.clone(), account_key); - tmp - }; - let contract_value = StoredValue::Contract(Contract::new( - [2; 32].into(), - [3; 32].into(), - named_keys, - EntryPoints::default(), - ProtocolVersion::V1_0_0, - )); - let contract_key = Key::Hash([5; 32]); - - // create account that refers to that contract - let account_hash = AccountHash::new([7; 32]); - let fake_purse = URef::new([6; 32], AccessRights::READ_ADD_WRITE); - let contract_name = "contract".to_string(); - let named_keys = { - let mut tmp = NamedKeys::new(); - tmp.insert(contract_name.clone(), contract_key); - tmp - }; - let main_account_value = - StoredValue::Account(Account::create(account_hash, named_keys, fake_purse)); - let main_account_key = Key::Account(account_hash); - - // random value for proof injection attack - let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); - let uref_value = StoredValue::CLValue(cl_value); - let uref_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); - - // persist them - let correlation_id = CorrelationId::new(); - let (global_state, root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[ - (account_key, account_value.to_owned()), - (contract_key, contract_value.to_owned()), - (main_account_key, main_account_value.to_owned()), - (uref_key, uref_value), - ], - ) - .unwrap(); - - let view = global_state - .checkout(root_hash) - .expect("should checkout") - .expect("should have view"); - - let tracking_copy = TrackingCopy::new(view); - - let path = &[contract_name, account_name]; - - let result = tracking_copy - .query(correlation_id, main_account_key, path) - .expect("should query"); - - let proofs = if let TrackingCopyQueryResult::Success { proofs, .. } = result { - proofs - } else { - panic!("query was not successful: {:?}", result) - }; - - // Happy path - crate::core::validate_query_proof(&root_hash, &proofs, &main_account_key, path, &account_value) - .expect("should validate"); - - // Path should be the same length as the proofs less one (so it should be of length 2) - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &proofs, - &main_account_key, - &[], - &account_value - ), - Err(ValidationError::PathLengthDifferentThanProofLessOne) - ); - - // Find an unexpected value after tracing the proof - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &proofs, - &main_account_key, - path, - &main_account_value - ), - Err(ValidationError::UnexpectedValue) - ); - - // Wrong key provided for the first entry in the proof - assert_eq!( - crate::core::validate_query_proof(&root_hash, &proofs, &account_key, path, &account_value), - Err(ValidationError::UnexpectedKey) - ); - - // Bad proof hash - assert_eq!( - crate::core::validate_query_proof( - &Blake2bHash::new(&[]), - &proofs, - &main_account_key, - path, - &account_value - ), - Err(ValidationError::InvalidProofHash) - ); - - // Provided path contains an unexpected key - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &proofs, - &main_account_key, - &[ - "a non-existent path key 1".to_string(), - "a non-existent path key 2".to_string() - ], - &account_value - ), - Err(ValidationError::PathCold) - ); - - let misfit_result = tracking_copy - .query(correlation_id, uref_key, &[]) - .expect("should query"); - - let misfit_proof = if let TrackingCopyQueryResult::Success { proofs, .. } = misfit_result { - proofs[0].to_owned() - } else { - panic!("query was not successful: {:?}", misfit_result) - }; - - // Proof has been subject to an injection - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &[ - proofs[0].to_owned(), - misfit_proof.to_owned(), - proofs[2].to_owned() - ], - &main_account_key, - path, - &account_value - ), - Err(ValidationError::UnexpectedKey) - ); - - // Proof has been subject to an injection - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &[ - misfit_proof.to_owned(), - proofs[1].to_owned(), - proofs[2].to_owned() - ], - &uref_key.normalize(), - path, - &account_value - ), - Err(ValidationError::PathCold) - ); - - // Proof has been subject to an injection - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &[misfit_proof, proofs[1].to_owned(), proofs[2].to_owned()], - &uref_key.normalize(), - path, - &account_value - ), - Err(ValidationError::PathCold) - ); - - let (misfit_global_state, misfit_root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[ - (account_key, account_value.to_owned()), - (contract_key, contract_value), - (main_account_key, main_account_value), - ], - ) - .unwrap(); - - let misfit_view = misfit_global_state - .checkout(misfit_root_hash) - .expect("should checkout") - .expect("should have view"); - - let misfit_tracking_copy = TrackingCopy::new(misfit_view); - - let misfit_result = misfit_tracking_copy - .query(correlation_id, main_account_key, path) - .expect("should query"); - - let misfit_proof = if let TrackingCopyQueryResult::Success { proofs, .. } = misfit_result { - proofs[1].to_owned() - } else { - panic!("query was not successful: {:?}", misfit_result) - }; - - // Proof has been subject to an injection - assert_eq!( - crate::core::validate_query_proof( - &root_hash, - &[proofs[0].to_owned(), misfit_proof, proofs[2].to_owned()], - &main_account_key, - path, - &account_value - ), - Err(ValidationError::InvalidProofHash) - ); -} - -#[test] -fn get_keys_should_return_keys_in_the_account_keyspace() { - // account 1 - let account_1_hash = AccountHash::new([1; 32]); - let fake_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); - let account_1_value = StoredValue::Account(Account::create( - account_1_hash, - NamedKeys::default(), - fake_purse, - )); - let account_1_key = Key::Account(account_1_hash); - - // account 2 - let account_2_hash = AccountHash::new([2; 32]); - let fake_purse = URef::new([43; 32], AccessRights::READ_ADD_WRITE); - let account_2_value = StoredValue::Account(Account::create( - account_2_hash, - NamedKeys::default(), - fake_purse, - )); - let account_2_key = Key::Account(account_2_hash); - - // random value - let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); - let uref_value = StoredValue::CLValue(cl_value); - let uref_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); - - // persist them - let correlation_id = CorrelationId::new(); - let (global_state, root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[ - (account_1_key, account_1_value), - (account_2_key, account_2_value), - (uref_key, uref_value), - ], - ) - .unwrap(); - - let view = global_state - .checkout(root_hash) - .expect("should checkout") - .expect("should have view"); - - let mut tracking_copy = TrackingCopy::new(view); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::Account) - .unwrap(); - - assert_eq!(key_set.len(), 2); - assert!(key_set.contains(&account_1_key)); - assert!(key_set.contains(&account_2_key)); - assert!(!key_set.contains(&uref_key)); -} - -#[test] -fn get_keys_should_return_keys_in_the_uref_keyspace() { - // account - let account_hash = AccountHash::new([1; 32]); - let fake_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); - let account_value = StoredValue::Account(Account::create( - account_hash, - NamedKeys::default(), - fake_purse, - )); - let account_key = Key::Account(account_hash); - - // random value 1 - let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); - let uref_1_value = StoredValue::CLValue(cl_value); - let uref_1_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); - - // random value 2 - let cl_value = CLValue::from_t(U512::one()).expect("should convert"); - let uref_2_value = StoredValue::CLValue(cl_value); - let uref_2_key = Key::URef(URef::new([9; 32], AccessRights::READ_ADD_WRITE)); - - // persist them - let correlation_id = CorrelationId::new(); - let (global_state, root_hash) = InMemoryGlobalState::from_pairs( - correlation_id, - &[ - (account_key, account_value), - (uref_1_key, uref_1_value), - (uref_2_key, uref_2_value), - ], - ) - .unwrap(); - - let view = global_state - .checkout(root_hash) - .expect("should checkout") - .expect("should have view"); - - let mut tracking_copy = TrackingCopy::new(view); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::URef) - .unwrap(); - - assert_eq!(key_set.len(), 2); - assert!(key_set.contains(&uref_1_key.normalize())); - assert!(key_set.contains(&uref_2_key.normalize())); - assert!(!key_set.contains(&account_key)); - - // random value 3 - let cl_value = CLValue::from_t(U512::from(2)).expect("should convert"); - let uref_3_value = StoredValue::CLValue(cl_value); - let uref_3_key = Key::URef(URef::new([10; 32], AccessRights::READ_ADD_WRITE)); - tracking_copy.write(uref_3_key, uref_3_value); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::URef) - .unwrap(); - - assert_eq!(key_set.len(), 3); - assert!(key_set.contains(&uref_1_key.normalize())); - assert!(key_set.contains(&uref_2_key.normalize())); - assert!(key_set.contains(&uref_3_key.normalize())); - assert!(!key_set.contains(&account_key)); -} - -#[test] -fn get_keys_should_handle_reads_from_empty_trie() { - let correlation_id = CorrelationId::new(); - let (global_state, root_hash) = InMemoryGlobalState::from_pairs(correlation_id, &[]).unwrap(); - - let view = global_state - .checkout(root_hash) - .expect("should checkout") - .expect("should have view"); - - let mut tracking_copy = TrackingCopy::new(view); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::URef) - .unwrap(); - - assert_eq!(key_set.len(), 0); - assert!(key_set.is_empty()); - - // persist random value 1 - let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); - let uref_1_value = StoredValue::CLValue(cl_value); - let uref_1_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); - tracking_copy.write(uref_1_key, uref_1_value); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::URef) - .unwrap(); - - assert_eq!(key_set.len(), 1); - assert!(key_set.contains(&uref_1_key.normalize())); - - // persist random value 2 - let cl_value = CLValue::from_t(U512::one()).expect("should convert"); - let uref_2_value = StoredValue::CLValue(cl_value); - let uref_2_key = Key::URef(URef::new([9; 32], AccessRights::READ_ADD_WRITE)); - tracking_copy.write(uref_2_key, uref_2_value); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::URef) - .unwrap(); - - assert_eq!(key_set.len(), 2); - assert!(key_set.contains(&uref_1_key.normalize())); - assert!(key_set.contains(&uref_2_key.normalize())); - - // persist account - let account_hash = AccountHash::new([1; 32]); - let fake_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); - let account_value = StoredValue::Account(Account::create( - account_hash, - NamedKeys::default(), - fake_purse, - )); - let account_key = Key::Account(account_hash); - tracking_copy.write(account_key, account_value); - - assert_eq!(key_set.len(), 2); - assert!(key_set.contains(&uref_1_key.normalize())); - assert!(key_set.contains(&uref_2_key.normalize())); - assert!(!key_set.contains(&account_key)); - - // persist random value 3 - let cl_value = CLValue::from_t(U512::from(2)).expect("should convert"); - let uref_3_value = StoredValue::CLValue(cl_value); - let uref_3_key = Key::URef(URef::new([10; 32], AccessRights::READ_ADD_WRITE)); - tracking_copy.write(uref_3_key, uref_3_value); - - let key_set = tracking_copy - .get_keys(correlation_id, &KeyTag::URef) - .unwrap(); - - assert_eq!(key_set.len(), 3); - assert!(key_set.contains(&uref_1_key.normalize())); - assert!(key_set.contains(&uref_2_key.normalize())); - assert!(key_set.contains(&uref_3_key.normalize())); - assert!(!key_set.contains(&account_key)); -} diff --git a/execution_engine/src/engine_state/engine_config.rs b/execution_engine/src/engine_state/engine_config.rs new file mode 100644 index 0000000000..f10318beb6 --- /dev/null +++ b/execution_engine/src/engine_state/engine_config.rs @@ -0,0 +1,514 @@ +//! Support for runtime configuration of the execution engine - as an integral property of the +//! `EngineState` instance. + +use std::collections::BTreeSet; + +use num_rational::Ratio; +use num_traits::One; + +use casper_types::{ + account::AccountHash, FeeHandling, ProtocolVersion, PublicKey, RefundHandling, StorageCosts, + SystemConfig, TimeDiff, WasmConfig, DEFAULT_FEE_HANDLING, DEFAULT_MINIMUM_BID_AMOUNT, + DEFAULT_REFUND_HANDLING, +}; + +/// Default value for a maximum query depth configuration option. +pub const DEFAULT_MAX_QUERY_DEPTH: u64 = 5; +/// Default value for maximum associated keys configuration option. +pub const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100; +/// Default value for maximum runtime call stack height configuration option. +pub const DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 = 12; +/// Default max serialized size of `StoredValue`s. +#[deprecated( + since = "3.2.0", + note = "not used in `casper-execution-engine` config anymore" +)] +pub const DEFAULT_MAX_STORED_VALUE_SIZE: u32 = 8 * 1024 * 1024; +/// Default value for minimum delegation amount in motes. +pub const DEFAULT_MINIMUM_DELEGATION_AMOUNT: u64 = 500 * 1_000_000_000; +/// Default value for maximum delegation amount in motes. +pub const DEFAULT_MAXIMUM_DELEGATION_AMOUNT: u64 = 1_000_000_000 * 1_000_000_000; +/// Default value for strict argument checking. +pub const DEFAULT_STRICT_ARGUMENT_CHECKING: bool = false; +/// 91 days / 7 days in a week = 13 weeks +/// Length of total vesting schedule in days. +const VESTING_SCHEDULE_LENGTH_DAYS: usize = 91; +const DAY_MILLIS: usize = 24 * 60 * 60 * 1000; +/// Default length of total vesting schedule period expressed in days. +pub const DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS: u64 = + VESTING_SCHEDULE_LENGTH_DAYS as u64 * DAY_MILLIS as u64; +/// Default maximum number of delegators per validator. +pub const DEFAULT_MAX_DELEGATORS_PER_VALIDATOR: u32 = 1200; +/// Default value for allowing auction bids. +pub const DEFAULT_ALLOW_AUCTION_BIDS: bool = true; +/// Default value for allowing unrestricted transfers. +pub const DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS: bool = true; +/// Default compute rewards. +pub const DEFAULT_COMPUTE_REWARDS: bool = true; +/// Default protocol version. +pub const DEFAULT_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0; +/// Default period for balance holds to decay (currently 24 hours). +pub const DEFAULT_BALANCE_HOLD_INTERVAL: TimeDiff = TimeDiff::from_seconds(24 * 60 * 60); + +/// Default entity flag. +pub const DEFAULT_ENABLE_ENTITY: bool = false; + +pub(crate) const DEFAULT_TRAP_ON_AMBIGUOUS_ENTITY_VERSION: bool = false; + +/// The runtime configuration of the execution engine +#[derive(Debug, Clone)] +pub struct EngineConfig { + /// Maximum number of associated keys (i.e. map of + /// [`AccountHash`](AccountHash)s to + /// [`Weight`](casper_types::account::Weight)s) for a single account. + max_associated_keys: u32, + max_runtime_call_stack_height: u32, + minimum_delegation_amount: u64, + maximum_delegation_amount: u64, + minimum_bid_amount: u64, + /// This flag indicates if arguments passed to contracts are checked against the defined types. + strict_argument_checking: bool, + /// Vesting schedule period in milliseconds. + vesting_schedule_period_millis: u64, + max_delegators_per_validator: u32, + wasm_config: WasmConfig, + system_config: SystemConfig, + protocol_version: ProtocolVersion, + /// A private network specifies a list of administrative accounts. + pub(crate) administrative_accounts: BTreeSet, + /// Auction entrypoints such as "add_bid" or "delegate" are disabled if this flag is set to + /// `false`. + pub(crate) allow_auction_bids: bool, + /// Allow unrestricted transfers between normal accounts. + /// + /// If set to `true` accounts can transfer tokens between themselves without restrictions. If + /// set to `false` tokens can be transferred only from normal accounts to administrators + /// and administrators to normal accounts but not normal accounts to normal accounts. + pub(crate) allow_unrestricted_transfers: bool, + /// Refund handling config. + pub(crate) refund_handling: RefundHandling, + /// Fee handling. + pub(crate) fee_handling: FeeHandling, + /// Compute auction rewards. + pub(crate) compute_rewards: bool, + pub(crate) enable_entity: bool, + pub(crate) trap_on_ambiguous_entity_version: bool, + storage_costs: StorageCosts, +} + +impl Default for EngineConfig { + fn default() -> Self { + EngineConfig { + max_associated_keys: DEFAULT_MAX_ASSOCIATED_KEYS, + max_runtime_call_stack_height: DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT, + minimum_delegation_amount: DEFAULT_MINIMUM_DELEGATION_AMOUNT, + maximum_delegation_amount: DEFAULT_MAXIMUM_DELEGATION_AMOUNT, + minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT, + strict_argument_checking: DEFAULT_STRICT_ARGUMENT_CHECKING, + vesting_schedule_period_millis: DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS, + max_delegators_per_validator: DEFAULT_MAX_DELEGATORS_PER_VALIDATOR, + wasm_config: WasmConfig::default(), + system_config: SystemConfig::default(), + administrative_accounts: Default::default(), + allow_auction_bids: DEFAULT_ALLOW_AUCTION_BIDS, + allow_unrestricted_transfers: DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS, + refund_handling: DEFAULT_REFUND_HANDLING, + fee_handling: DEFAULT_FEE_HANDLING, + compute_rewards: DEFAULT_COMPUTE_REWARDS, + protocol_version: DEFAULT_PROTOCOL_VERSION, + enable_entity: DEFAULT_ENABLE_ENTITY, + trap_on_ambiguous_entity_version: DEFAULT_TRAP_ON_AMBIGUOUS_ENTITY_VERSION, + storage_costs: Default::default(), + } + } +} + +impl EngineConfig { + /// Returns the current max associated keys config. + pub fn max_associated_keys(&self) -> u32 { + self.max_associated_keys + } + + /// Returns the current max runtime call stack height config. + pub fn max_runtime_call_stack_height(&self) -> u32 { + self.max_runtime_call_stack_height + } + + /// Returns the current wasm config. + pub fn wasm_config(&self) -> &WasmConfig { + &self.wasm_config + } + + /// Returns the current system config. + pub fn system_config(&self) -> &SystemConfig { + &self.system_config + } + + /// Returns the current protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns the minimum delegation amount in motes. + pub fn minimum_delegation_amount(&self) -> u64 { + self.minimum_delegation_amount + } + + /// Returns the maximum delegation amount in motes. + pub fn maximum_delegation_amount(&self) -> u64 { + self.maximum_delegation_amount + } + + /// Returns the minimum delegation amount in motes. + pub fn minimum_bid_amount(&self) -> u64 { + self.minimum_bid_amount + } + + /// Get the engine config's strict argument checking flag. + pub fn strict_argument_checking(&self) -> bool { + self.strict_argument_checking + } + + /// Get the vesting schedule period. + pub fn vesting_schedule_period_millis(&self) -> u64 { + self.vesting_schedule_period_millis + } + + /// Get the max delegators per validator + pub fn max_delegators_per_validator(&self) -> u32 { + self.max_delegators_per_validator + } + + /// Returns the engine config's administrative accounts. + pub fn administrative_accounts(&self) -> &BTreeSet { + &self.administrative_accounts + } + + /// Returns true if auction bids are allowed. + pub fn allow_auction_bids(&self) -> bool { + self.allow_auction_bids + } + + /// Returns true if unrestricted transfers are allowed. + pub fn allow_unrestricted_transfers(&self) -> bool { + self.allow_unrestricted_transfers + } + + /// Checks if an account hash is an administrator. + pub(crate) fn is_administrator(&self, account_hash: &AccountHash) -> bool { + self.administrative_accounts.contains(account_hash) + } + + /// Returns the engine config's refund ratio. + pub fn refund_handling(&self) -> RefundHandling { + self.refund_handling + } + + /// Returns the engine config's fee handling strategy. + pub fn fee_handling(&self) -> FeeHandling { + self.fee_handling + } + + /// Returns the engine config's storage_costs. + pub fn storage_costs(&self) -> &StorageCosts { + &self.storage_costs + } + + /// Returns the engine config's compute rewards flag. + pub fn compute_rewards(&self) -> bool { + self.compute_rewards + } + + /// Returns the `trap_on_ambiguous_entity_version` flag. + pub fn trap_on_ambiguous_entity_version(&self) -> bool { + self.trap_on_ambiguous_entity_version + } + + /// Sets the protocol version of the config. + /// + /// NOTE: This is only useful to the WasmTestBuilder for emulating a network upgrade, and hence + /// is subject to change or deletion without notice. + #[doc(hidden)] + pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + self.protocol_version = protocol_version; + } + + /// Sets the `wasm_config.max_memory` to `new_value`. + #[cfg(feature = "test-support")] + pub fn set_max_memory(&mut self, new_value: u32) { + *self.wasm_config.v1_mut().max_memory_mut() = new_value; + } +} + +/// A builder for an [`EngineConfig`]. +/// +/// Any field that isn't specified will be defaulted. See [the module docs](index.html) for the set +/// of default values. +#[derive(Default, Debug)] +pub struct EngineConfigBuilder { + max_query_depth: Option, + max_associated_keys: Option, + max_runtime_call_stack_height: Option, + minimum_delegation_amount: Option, + maximum_delegation_amount: Option, + minimum_bid_amount: Option, + strict_argument_checking: Option, + vesting_schedule_period_millis: Option, + max_delegators_per_validator: Option, + wasm_config: Option, + system_config: Option, + protocol_version: Option, + administrative_accounts: Option>, + allow_auction_bids: Option, + allow_unrestricted_transfers: Option, + refund_handling: Option, + fee_handling: Option, + compute_rewards: Option, + balance_hold_interval: Option, + enable_entity: Option, + trap_on_ambiguous_entity_version: Option, + storage_costs: Option, +} + +impl EngineConfigBuilder { + /// Creates a new `EngineConfig` builder. + pub fn new() -> Self { + EngineConfigBuilder::default() + } + + /// Sets the max query depth config option. + pub fn with_max_query_depth(mut self, max_query_depth: u64) -> Self { + self.max_query_depth = Some(max_query_depth); + self + } + + /// Sets the max associated keys config option. + pub fn with_max_associated_keys(mut self, max_associated_keys: u32) -> Self { + self.max_associated_keys = Some(max_associated_keys); + self + } + + /// Sets the max runtime call stack height config option. + pub fn with_max_runtime_call_stack_height( + mut self, + max_runtime_call_stack_height: u32, + ) -> Self { + self.max_runtime_call_stack_height = Some(max_runtime_call_stack_height); + self + } + + /// Sets the strict argument checking config option. + pub fn with_strict_argument_checking(mut self, value: bool) -> Self { + self.strict_argument_checking = Some(value); + self + } + + /// Sets the vesting schedule period millis config option. + pub fn with_vesting_schedule_period_millis(mut self, value: u64) -> Self { + self.vesting_schedule_period_millis = Some(value); + self + } + + /// Sets the max delegators per validator config option. + pub fn with_max_delegators_per_validator(mut self, value: u32) -> Self { + self.max_delegators_per_validator = Some(value); + self + } + + /// Sets the wasm config options. + pub fn with_wasm_config(mut self, wasm_config: WasmConfig) -> Self { + self.wasm_config = Some(wasm_config); + self + } + + /// Sets the system config options. + pub fn with_system_config(mut self, system_config: SystemConfig) -> Self { + self.system_config = Some(system_config); + self + } + + /// Sets the protocol version. + pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.protocol_version = Some(protocol_version); + self + } + + /// Sets the maximum wasm stack height config option. + pub fn with_wasm_max_stack_height(mut self, wasm_stack_height: u32) -> Self { + let wasm_config = self.wasm_config.get_or_insert_with(WasmConfig::default); + *wasm_config.v1_mut().max_stack_height_mut() = wasm_stack_height; + self + } + + /// Sets the minimum delegation amount config option. + pub fn with_minimum_delegation_amount(mut self, minimum_delegation_amount: u64) -> Self { + self.minimum_delegation_amount = Some(minimum_delegation_amount); + self + } + + /// Sets the maximum delegation amount config option. + pub fn with_maximum_delegation_amount(mut self, maximum_delegation_amount: u64) -> Self { + self.maximum_delegation_amount = Some(maximum_delegation_amount); + self + } + + /// Sets the minimum bid amount config option. + pub fn with_minimum_bid_amount(mut self, minimum_bid_amount: u64) -> Self { + self.minimum_bid_amount = Some(minimum_bid_amount); + self + } + + /// Sets the administrative accounts. + pub fn with_administrative_accounts( + mut self, + administrator_accounts: BTreeSet, + ) -> Self { + self.administrative_accounts = Some(administrator_accounts); + self + } + + /// Sets the allow auction bids config option. + pub fn with_allow_auction_bids(mut self, allow_auction_bids: bool) -> Self { + self.allow_auction_bids = Some(allow_auction_bids); + self + } + + /// Sets the allow unrestricted transfers config option. + pub fn with_allow_unrestricted_transfers(mut self, allow_unrestricted_transfers: bool) -> Self { + self.allow_unrestricted_transfers = Some(allow_unrestricted_transfers); + self + } + + /// Sets the refund handling config option. + pub fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self { + match refund_handling { + RefundHandling::Refund { refund_ratio } | RefundHandling::Burn { refund_ratio } => { + debug_assert!( + refund_ratio <= Ratio::one(), + "refund ratio should be in the range of [0, 1]" + ); + } + RefundHandling::NoRefund => { + //noop + } + } + + self.refund_handling = Some(refund_handling); + self + } + + /// Sets fee handling config option. + pub fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self { + self.fee_handling = Some(fee_handling); + self + } + + /// Sets compute rewards config option. + pub fn with_compute_rewards(mut self, compute_rewards: bool) -> Self { + self.compute_rewards = Some(compute_rewards); + self + } + + /// Sets balance hold interval config option. + pub fn balance_hold_interval(mut self, balance_hold_interval: TimeDiff) -> Self { + self.balance_hold_interval = Some(balance_hold_interval); + self + } + + /// Sets the enable entity flag. + pub fn with_enable_entity(mut self, enable_entity: bool) -> Self { + self.enable_entity = Some(enable_entity); + self + } + + /// Sets the flag if the runtime returns an error on entity version collision. + pub fn with_trap_on_ambiguous_entity_version( + mut self, + trap_on_ambiguous_entity_version: bool, + ) -> Self { + self.trap_on_ambiguous_entity_version = Some(trap_on_ambiguous_entity_version); + self + } + + /// Sets the storage_costs config option. + pub fn with_storage_costs(mut self, storage_costs: StorageCosts) -> Self { + self.storage_costs = Some(storage_costs); + self + } + + /// Builds a new [`EngineConfig`] object. + pub fn build(self) -> EngineConfig { + let max_associated_keys = self + .max_associated_keys + .unwrap_or(DEFAULT_MAX_ASSOCIATED_KEYS); + let max_runtime_call_stack_height = self + .max_runtime_call_stack_height + .unwrap_or(DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT); + let minimum_delegation_amount = self + .minimum_delegation_amount + .unwrap_or(DEFAULT_MINIMUM_DELEGATION_AMOUNT); + let maximum_delegation_amount = self + .maximum_delegation_amount + .unwrap_or(DEFAULT_MAXIMUM_DELEGATION_AMOUNT); + let minimum_bid_amount = self + .minimum_bid_amount + .unwrap_or(DEFAULT_MINIMUM_BID_AMOUNT); + let wasm_config = self.wasm_config.unwrap_or_default(); + let system_config = self.system_config.unwrap_or_default(); + let protocol_version = self.protocol_version.unwrap_or(DEFAULT_PROTOCOL_VERSION); + let administrative_accounts = { + self.administrative_accounts + .unwrap_or_default() + .iter() + .map(PublicKey::to_account_hash) + .collect() + }; + let allow_auction_bids = self + .allow_auction_bids + .unwrap_or(DEFAULT_ALLOW_AUCTION_BIDS); + let allow_unrestricted_transfers = self + .allow_unrestricted_transfers + .unwrap_or(DEFAULT_ALLOW_UNRESTRICTED_TRANSFERS); + let refund_handling = self.refund_handling.unwrap_or(DEFAULT_REFUND_HANDLING); + let fee_handling = self.fee_handling.unwrap_or(DEFAULT_FEE_HANDLING); + + let strict_argument_checking = self + .strict_argument_checking + .unwrap_or(DEFAULT_STRICT_ARGUMENT_CHECKING); + let vesting_schedule_period_millis = self + .vesting_schedule_period_millis + .unwrap_or(DEFAULT_VESTING_SCHEDULE_LENGTH_MILLIS); + let max_delegators_per_validator = self + .max_delegators_per_validator + .unwrap_or(DEFAULT_MAX_DELEGATORS_PER_VALIDATOR); + let compute_rewards = self.compute_rewards.unwrap_or(DEFAULT_COMPUTE_REWARDS); + let enable_entity = self.enable_entity.unwrap_or(DEFAULT_ENABLE_ENTITY); + let trap_on_ambiguous_entity_version = self + .trap_on_ambiguous_entity_version + .unwrap_or(DEFAULT_TRAP_ON_AMBIGUOUS_ENTITY_VERSION); + let storage_costs = self.storage_costs.unwrap_or_default(); + + EngineConfig { + max_associated_keys, + max_runtime_call_stack_height, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount, + wasm_config, + system_config, + protocol_version, + administrative_accounts, + allow_auction_bids, + allow_unrestricted_transfers, + refund_handling, + fee_handling, + strict_argument_checking, + vesting_schedule_period_millis, + max_delegators_per_validator, + compute_rewards, + enable_entity, + trap_on_ambiguous_entity_version, + storage_costs, + } + } +} diff --git a/execution_engine/src/engine_state/error.rs b/execution_engine/src/engine_state/error.rs new file mode 100644 index 0000000000..852c9e01e9 --- /dev/null +++ b/execution_engine/src/engine_state/error.rs @@ -0,0 +1,114 @@ +//! Definition of all the possible outcomes of the operation on an `EngineState` instance. +use datasize::DataSize; +use thiserror::Error; + +use casper_storage::{system::transfer::TransferError, tracking_copy::TrackingCopyError}; +use casper_types::{bytesrepr, system::mint, ApiError, Digest, Key, ProtocolVersion}; + +use super::InvalidRequest; +use crate::{ + execution::ExecError, + runtime::{stack, PreprocessingError}, +}; + +/// Engine state errors. +#[derive(Clone, Error, Debug)] +#[non_exhaustive] +pub enum Error { + /// Specified state root hash is not found. + #[error("Root not found: {0}")] + RootNotFound(Digest), + /// Protocol version used in the deploy is invalid. + #[error("Invalid protocol version: {0}")] + InvalidProtocolVersion(ProtocolVersion), + /// WASM preprocessing error. + #[error("Wasm preprocessing error: {0}")] + WasmPreprocessing(#[from] PreprocessingError), + /// Contract execution error. + #[error(transparent)] + Exec(ExecError), + /// Serialization/deserialization error. + #[error("Bytesrepr error: {0}")] + Bytesrepr(String), + /// Mint error. + #[error("Mint error: {0}")] + Mint(String), + /// Invalid key variant. + #[error("Unsupported key type: {0}")] + InvalidKeyVariant(Key), + /// An attempt to push to the runtime stack while already at the maximum height. + #[error("Runtime stack overflow")] + RuntimeStackOverflow, + /// Storage error. + #[error("Tracking copy error: {0}")] + TrackingCopy(TrackingCopyError), + /// Native transfer error. + #[error("Transfer error: {0}")] + Transfer(TransferError), + /// Could not derive a valid item to execute. + #[error("Invalid executable item: {0}")] + InvalidExecutableItem(#[from] InvalidRequest), +} + +impl Error { + /// Creates an [`enum@Error`] instance of an [`Error::Exec`] variant with an API + /// error-compatible object. + /// + /// This method should be used only by native code that has to mimic logic of a WASM executed + /// code. + pub fn reverter(api_error: impl Into) -> Error { + Error::Exec(ExecError::Revert(api_error.into())) + } +} + +impl From for Error { + fn from(err: TransferError) -> Self { + Error::Transfer(err) + } +} + +impl From for Error { + fn from(error: ExecError) -> Self { + match error { + ExecError::WasmPreprocessing(preprocessing_error) => { + Error::WasmPreprocessing(preprocessing_error) + } + _ => Error::Exec(error), + } + } +} + +impl From for Error { + fn from(error: bytesrepr::Error) -> Self { + Error::Bytesrepr(format!("{}", error)) + } +} + +impl From for Error { + fn from(error: mint::Error) -> Self { + Error::Mint(format!("{}", error)) + } +} + +impl From for Error { + fn from(_: stack::RuntimeStackOverflow) -> Self { + Self::RuntimeStackOverflow + } +} + +impl From for Error { + fn from(e: TrackingCopyError) -> Self { + Error::TrackingCopy(e) + } +} + +impl DataSize for Error { + const IS_DYNAMIC: bool = true; + + const STATIC_HEAP_SIZE: usize = 0; + + #[inline] + fn estimate_heap_size(&self) -> usize { + 12 // TODO: replace with some actual estimation depending on the variant + } +} diff --git a/execution_engine/src/engine_state/execution_kind.rs b/execution_engine/src/engine_state/execution_kind.rs new file mode 100644 index 0000000000..b3ed318ec3 --- /dev/null +++ b/execution_engine/src/engine_state/execution_kind.rs @@ -0,0 +1,123 @@ +//! Units of execution. + +use casper_types::{ + bytesrepr::Bytes, + contracts::{NamedKeys, ProtocolVersionMajor}, + AddressableEntityHash, EntityVersion, Key, PackageHash, TransactionInvocationTarget, +}; + +use super::{wasm_v1::SessionKind, Error, ExecutableItem}; +use crate::execution::ExecError; + +/// The type of execution about to be performed. +#[derive(Clone, Debug)] +pub(crate) enum ExecutionKind<'a> { + /// Standard (non-specialized) Wasm bytes related to a transaction of version 1 or later. + Standard(&'a Bytes), + /// Wasm bytes which install or upgrade a stored entity. + InstallerUpgrader(&'a Bytes), + /// Stored contract. + Stored { + /// AddressableEntity's hash. + entity_hash: AddressableEntityHash, + /// Entry point. + entry_point: String, + }, + /// Standard (non-specialized) Wasm bytes related to a `Deploy`. + /// + /// This is equivalent to the `Standard` variant with the exception that this kind will be + /// allowed to install or upgrade stored entities to retain existing (pre-node 2.0) behavior. + Deploy(&'a Bytes), + /// A call to an entity/contract in a package/contract package. + VersionedCall { + package_hash: PackageHash, + entity_version: Option, + protocol_version_major: Option, + /// Entry point. + entry_point: String, + }, +} + +impl<'a> ExecutionKind<'a> { + pub(crate) fn new( + named_keys: &NamedKeys, + executable_item: &'a ExecutableItem, + entry_point: String, + ) -> Result { + match executable_item { + ExecutableItem::Invocation(target) => { + Self::new_direct_invocation(named_keys, target, entry_point) + } + ExecutableItem::PaymentBytes(module_bytes) + | ExecutableItem::SessionBytes { + kind: SessionKind::GenericBytecode, + module_bytes, + } => Ok(ExecutionKind::Standard(module_bytes)), + ExecutableItem::SessionBytes { + kind: SessionKind::InstallUpgradeBytecode, + module_bytes, + } => Ok(ExecutionKind::InstallerUpgrader(module_bytes)), + ExecutableItem::Deploy(module_bytes) => Ok(ExecutionKind::Deploy(module_bytes)), + } + } + + fn new_direct_invocation( + named_keys: &NamedKeys, + target: &TransactionInvocationTarget, + entry_point: String, + ) -> Result { + let entity_hash = match target { + TransactionInvocationTarget::ByHash(addr) => AddressableEntityHash::new(*addr), + TransactionInvocationTarget::ByName(alias) => { + let entity_key = named_keys + .get(alias) + .ok_or_else(|| Error::Exec(ExecError::NamedKeyNotFound(alias.clone())))?; + + match entity_key { + Key::Hash(hash) => AddressableEntityHash::new(*hash), + Key::AddressableEntity(entity_addr) => { + AddressableEntityHash::new(entity_addr.value()) + } + _ => return Err(Error::InvalidKeyVariant(*entity_key)), + } + } + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => { + let package_hash = PackageHash::from(*addr); + return Ok(Self::VersionedCall { + package_hash, + entity_version: *version, + protocol_version_major: *protocol_version_major, + entry_point, + }); + } + TransactionInvocationTarget::ByPackageName { + name, + version, + protocol_version_major, + } => { + let package_key = named_keys + .get(name) + .ok_or_else(|| Error::Exec(ExecError::NamedKeyNotFound(name.to_string())))?; + + let package_hash = match package_key { + Key::Hash(hash) | Key::SmartContract(hash) => PackageHash::new(*hash), + _ => return Err(Error::InvalidKeyVariant(*package_key)), + }; + return Ok(Self::VersionedCall { + package_hash, + entity_version: *version, + protocol_version_major: *protocol_version_major, + entry_point, + }); + } + }; + Ok(ExecutionKind::Stored { + entity_hash, + entry_point, + }) + } +} diff --git a/execution_engine/src/engine_state/mod.rs b/execution_engine/src/engine_state/mod.rs new file mode 100644 index 0000000000..fd8fa3e661 --- /dev/null +++ b/execution_engine/src/engine_state/mod.rs @@ -0,0 +1,197 @@ +//! This module contains all the execution related code. +pub mod engine_config; +mod error; +pub(crate) mod execution_kind; +mod wasm_v1; + +use std::{cell::RefCell, collections::BTreeSet, rc::Rc}; + +use casper_types::{ + account::AccountHash, Gas, InitiatorAddr, Key, Phase, RuntimeArgs, StoredValue, TransactionHash, +}; + +use casper_storage::{ + global_state::{ + error::Error as GlobalStateError, + state::{StateProvider, StateReader}, + }, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError}, + TrackingCopy, +}; + +use crate::{execution::Executor, runtime::RuntimeStack}; +pub use engine_config::{ + EngineConfig, EngineConfigBuilder, DEFAULT_MAX_QUERY_DEPTH, + DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT, +}; +pub use error::Error; +use execution_kind::ExecutionKind; +pub use wasm_v1::{ + BlockInfo, ExecutableItem, InvalidRequest, SessionDataDeploy, SessionDataV1, SessionInputData, + WasmV1Request, WasmV1Result, +}; + +/// Gas/motes conversion rate of wasmless transfer cost is always 1 regardless of what user wants to +/// pay. +pub const WASMLESS_TRANSFER_FIXED_GAS_PRICE: u8 = 1; + +/// The public api of the v1 execution engine, as of protocol version 2.0.0 +#[derive(Debug, Clone, Default)] +pub struct ExecutionEngineV1 { + config: EngineConfig, +} + +impl ExecutionEngineV1 { + /// Creates new execution engine. + pub fn new(config: EngineConfig) -> ExecutionEngineV1 { + ExecutionEngineV1 { config } + } + + /// Returns engine config. + pub fn config(&self) -> &EngineConfig { + &self.config + } + + /// Executes wasm, and that's all. Does not commit or handle payment or anything else. + pub fn execute( + &self, + state_provider: &impl StateProvider, + wasm_v1_request: WasmV1Request, + ) -> WasmV1Result { + let WasmV1Request { + block_info, + transaction_hash, + gas_limit, + initiator_addr, + executable_item, + entry_point, + args, + authorization_keys, + phase, + } = wasm_v1_request; + // NOTE to core engineers: it is intended for the EE to ONLY execute wasm targeting the + // casper v1 virtual machine. it should not handle native behavior, database / global state + // interaction, payment processing, or anything other than its single function. + // A good deal of effort has been put into removing all such behaviors; please do not + // come along and start adding it back. + + let account_hash = initiator_addr.account_hash(); + let protocol_version = self.config.protocol_version(); + let state_hash = block_info.state_hash; + let tc = match state_provider.tracking_copy(state_hash) { + Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), + Ok(None) => return WasmV1Result::root_not_found(gas_limit, state_hash), + Err(gse) => { + return WasmV1Result::precondition_failure( + gas_limit, + Error::TrackingCopy(TrackingCopyError::Storage(gse)), + ) + } + }; + let (runtime_footprint, entity_addr) = { + match tc.borrow_mut().authorized_runtime_footprint_by_account( + protocol_version, + account_hash, + &authorization_keys, + &self.config().administrative_accounts, + ) { + Ok((runtime_footprint, entity_hash)) => (runtime_footprint, entity_hash), + Err(tce) => { + return WasmV1Result::precondition_failure(gas_limit, Error::TrackingCopy(tce)) + } + } + }; + let mut named_keys = runtime_footprint.named_keys().clone(); + let execution_kind = match ExecutionKind::new(&named_keys, &executable_item, entry_point) { + Ok(execution_kind) => execution_kind, + Err(ese) => return WasmV1Result::precondition_failure(gas_limit, ese), + }; + let access_rights = runtime_footprint.extract_access_rights(entity_addr.value()); + Executor::new(self.config().clone()).exec( + execution_kind, + args, + entity_addr, + Rc::new(RefCell::new(runtime_footprint)), + &mut named_keys, + access_rights, + authorization_keys, + account_hash, + block_info, + transaction_hash, + gas_limit, + Rc::clone(&tc), + phase, + RuntimeStack::from_account_hash( + account_hash, + self.config.max_runtime_call_stack_height() as usize, + ), + ) + } + + /// Executes wasm, and that's all. Does not commit or handle payment or anything else. + #[allow(clippy::too_many_arguments)] + pub fn execute_with_tracking_copy( + &self, + tracking_copy: TrackingCopy, + block_info: BlockInfo, + transaction_hash: TransactionHash, + gas_limit: Gas, + initiator_addr: InitiatorAddr, + executable_item: ExecutableItem, + entry_point: String, + args: RuntimeArgs, + authorization_keys: BTreeSet, + phase: Phase, + ) -> WasmV1Result + where + R: StateReader, + { + // NOTE to core engineers: it is intended for the EE to ONLY execute wasm targeting the + // casper v1 virtual machine. it should not handle native behavior, database / global state + // interaction, payment processing, or anything other than its single function. + // A good deal of effort has been put into removing all such behaviors; please do not + // come along and start adding it back. + + let account_hash = initiator_addr.account_hash(); + let protocol_version = self.config.protocol_version(); + let tc = Rc::new(RefCell::new(tracking_copy)); + let (runtime_footprint, entity_addr) = { + match tc.borrow_mut().authorized_runtime_footprint_by_account( + protocol_version, + account_hash, + &authorization_keys, + &self.config().administrative_accounts, + ) { + Ok((addressable_entity, entity_hash)) => (addressable_entity, entity_hash), + Err(tce) => { + return WasmV1Result::precondition_failure(gas_limit, Error::TrackingCopy(tce)) + } + } + }; + let mut named_keys = runtime_footprint.named_keys().clone(); + let execution_kind = match ExecutionKind::new(&named_keys, &executable_item, entry_point) { + Ok(execution_kind) => execution_kind, + Err(ese) => return WasmV1Result::precondition_failure(gas_limit, ese), + }; + let access_rights = runtime_footprint.extract_access_rights(entity_addr.value()); + Executor::new(self.config().clone()).exec( + execution_kind, + args, + entity_addr, + Rc::new(RefCell::new(runtime_footprint)), + &mut named_keys, + access_rights, + authorization_keys, + account_hash, + block_info, + transaction_hash, + gas_limit, + Rc::clone(&tc), + phase, + RuntimeStack::from_account_hash( + account_hash, + self.config.max_runtime_call_stack_height() as usize, + ), + ) + } +} diff --git a/execution_engine/src/engine_state/wasm_v1.rs b/execution_engine/src/engine_state/wasm_v1.rs new file mode 100644 index 0000000000..7d4fcdcbb6 --- /dev/null +++ b/execution_engine/src/engine_state/wasm_v1.rs @@ -0,0 +1,1002 @@ +use std::{collections::BTreeSet, convert::TryFrom}; + +use serde::Serialize; +use thiserror::Error; + +use casper_storage::{data_access_layer::TransferResult, tracking_copy::TrackingCopyCache}; +use casper_types::{ + account::AccountHash, + bytesrepr::Bytes, + contract_messages::Messages, + execution::{Effects, TransformKindV2}, + BlockHash, BlockTime, CLValue, DeployHash, Digest, ExecutableDeployItem, Gas, InitiatorAddr, + Key, Phase, PricingMode, ProtocolVersion, RuntimeArgs, TransactionEntryPoint, TransactionHash, + TransactionInvocationTarget, TransactionTarget, TransactionV1Hash, Transfer, URefAddr, U512, +}; + +use crate::engine_state::Error as EngineError; + +const DEFAULT_ENTRY_POINT: &str = "call"; + +/// Structure that needs to be filled with data so the engine can assemble wasm for deploy. +pub struct SessionDataDeploy<'a> { + deploy_hash: &'a DeployHash, + session: &'a ExecutableDeployItem, + initiator_addr: &'a InitiatorAddr, + signers: BTreeSet, + is_standard_payment: bool, +} + +impl<'a> SessionDataDeploy<'a> { + /// Constructor + pub fn new( + deploy_hash: &'a DeployHash, + session: &'a ExecutableDeployItem, + initiator_addr: &'a InitiatorAddr, + signers: BTreeSet, + is_standard_payment: bool, + ) -> Self { + Self { + deploy_hash, + session, + initiator_addr, + signers, + is_standard_payment, + } + } + + /// Deploy hash of the deploy + pub fn deploy_hash(&self) -> &DeployHash { + self.deploy_hash + } + + /// executable item of the deploy + pub fn session(&self) -> &ExecutableDeployItem { + self.session + } + + /// initiator address of the deploy + pub fn initiator_addr(&self) -> &InitiatorAddr { + self.initiator_addr + } + + /// signers of the deploy + pub fn signers(&self) -> BTreeSet { + self.signers.clone() + } +} + +/// Structure that needs to be filled with data so the engine can assemble wasm for v1. +pub struct SessionDataV1<'a> { + args: &'a RuntimeArgs, + target: &'a TransactionTarget, + entry_point: &'a TransactionEntryPoint, + is_install_upgrade: bool, + hash: &'a TransactionV1Hash, + pricing_mode: &'a PricingMode, + initiator_addr: &'a InitiatorAddr, + signers: BTreeSet, + is_standard_payment: bool, +} + +impl<'a> SessionDataV1<'a> { + #[allow(clippy::too_many_arguments)] + /// Constructor + pub fn new( + args: &'a RuntimeArgs, + target: &'a TransactionTarget, + entry_point: &'a TransactionEntryPoint, + is_install_upgrade: bool, + hash: &'a TransactionV1Hash, + pricing_mode: &'a PricingMode, + initiator_addr: &'a InitiatorAddr, + signers: BTreeSet, + is_standard_payment: bool, + ) -> Self { + Self { + args, + target, + entry_point, + is_install_upgrade, + hash, + pricing_mode, + initiator_addr, + signers, + is_standard_payment, + } + } + + /// Runtime args passed with the transaction. + pub fn args(&self) -> &RuntimeArgs { + self.args + } + + /// Target of the transaction. + pub fn target(&self) -> &TransactionTarget { + self.target + } + + /// Entry point of the transaction + pub fn entry_point(&self) -> &TransactionEntryPoint { + self.entry_point + } + + /// Should session be allowed to perform install/upgrade operations + pub fn is_install_upgrade(&self) -> bool { + self.is_install_upgrade + } + + /// Hash of the transaction + pub fn hash(&self) -> &TransactionV1Hash { + self.hash + } + + /// initiator address of the transaction + pub fn initiator_addr(&self) -> &InitiatorAddr { + self.initiator_addr + } + + /// signers of the transaction + pub fn signers(&self) -> BTreeSet { + self.signers.clone() + } + + /// Pricing mode of the transaction + pub fn pricing_mode(&self) -> &PricingMode { + self.pricing_mode + } +} + +/// Wrapper enum abstracting data for assmbling WasmV1Requests +pub enum SessionInputData<'a> { + /// Variant for sessions created from deploy transactions + DeploySessionData { + /// Deploy session data + data: SessionDataDeploy<'a>, + }, + /// Variant for sessions created from v1 transactions + SessionDataV1 { + /// v1 session data + data: SessionDataV1<'a>, + }, +} + +impl SessionInputData<'_> { + /// Transaction hash for the session + pub fn transaction_hash(&self) -> TransactionHash { + match self { + SessionInputData::DeploySessionData { data } => { + TransactionHash::Deploy(*data.deploy_hash()) + } + SessionInputData::SessionDataV1 { data } => TransactionHash::V1(*data.hash()), + } + } + + /// Initiator address for the session + pub fn initiator_addr(&self) -> &InitiatorAddr { + match self { + SessionInputData::DeploySessionData { data } => data.initiator_addr(), + SessionInputData::SessionDataV1 { data } => data.initiator_addr(), + } + } + + /// Signers for the session + pub fn signers(&self) -> BTreeSet { + match self { + SessionInputData::DeploySessionData { data } => data.signers(), + SessionInputData::SessionDataV1 { data } => data.signers(), + } + } + + /// determines if the transaction from which this session data was created is a standard payment + pub fn is_standard_payment(&self) -> bool { + match self { + SessionInputData::DeploySessionData { data } => data.is_standard_payment, + SessionInputData::SessionDataV1 { data } => data.is_standard_payment, + } + } + + /// Is install upgrade allowed? + pub fn is_install_upgrade_allowed(&self) -> bool { + match self { + SessionInputData::DeploySessionData { .. } => true, + SessionInputData::SessionDataV1 { data } => data.is_install_upgrade, + } + } +} + +/// Error returned if constructing a new [`WasmV1Request`] fails. +#[derive(Clone, Eq, PartialEq, Error, Serialize, Debug)] +pub enum InvalidRequest { + /// Missing custom payment. + #[error("custom payment not found for {0}")] + CustomPaymentNotFound(TransactionHash), + /// Unexpected variant. + #[error("unexpected variant for {0} attempting {1}")] + UnexpectedVariant(TransactionHash, String), + /// Unsupported mode. + #[error("unsupported mode for {0} attempting {1}")] + UnsupportedMode(TransactionHash, String), + /// Invalid entry point. + #[error("invalid entry point for {0} attempting {1}")] + InvalidEntryPoint(TransactionHash, String), + /// Invalid target. + #[error("invalid target for {0} attempting {1}")] + InvalidTarget(TransactionHash, String), + /// Unsupported category. + #[error("invalid category for {0} attempting {1}")] + InvalidCategory(TransactionHash, String), +} + +#[derive(Debug, Clone)] +pub enum SessionKind { + InstallUpgradeBytecode, + GenericBytecode, +} + +/// The item to be executed. +#[derive(Debug, Clone)] +pub enum ExecutableItem { + /// Deploy model byte code. + Deploy(Bytes), + /// Payment byte code. + PaymentBytes(Bytes), + /// Session byte code. + SessionBytes { + /// The kind of session. + kind: SessionKind, + /// The compiled Wasm. + module_bytes: Bytes, + }, + /// An attempt to invoke a stored entity or package. + Invocation(TransactionInvocationTarget), +} + +impl ExecutableItem { + /// Is install upgrade allowed? + pub fn is_install_upgrade_allowed(&self) -> bool { + match self { + ExecutableItem::Deploy(_) => true, + ExecutableItem::PaymentBytes(_) | ExecutableItem::Invocation(_) => false, + ExecutableItem::SessionBytes { kind, .. } => { + matches!(kind, SessionKind::InstallUpgradeBytecode) + } + } + } +} + +/// Block info. +#[derive(Copy, Clone, Debug)] +pub struct BlockInfo { + /// State root hash of the global state in which the transaction will be executed. + pub state_hash: Digest, + /// Block time represented as a unix timestamp. + pub block_time: BlockTime, + /// Parent block hash + pub parent_block_hash: BlockHash, + /// Block height + pub block_height: u64, + /// Protocol version + pub protocol_version: ProtocolVersion, +} + +impl BlockInfo { + /// A new instance of `[BlockInfo]`. + pub fn new( + state_hash: Digest, + block_time: BlockTime, + parent_block_hash: BlockHash, + block_height: u64, + protocol_version: ProtocolVersion, + ) -> Self { + BlockInfo { + state_hash, + block_time, + parent_block_hash, + block_height, + protocol_version, + } + } + + /// Apply different state hash. + pub fn with_state_hash(&mut self, state_hash: Digest) { + self.state_hash = state_hash; + } + + /// State hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Block time. + pub fn block_time(&self) -> BlockTime { + self.block_time + } + + /// Parent block hash. + pub fn parent_block_hash(&self) -> BlockHash { + self.parent_block_hash + } + + /// Block height. + pub fn block_height(&self) -> u64 { + self.block_height + } + + /// Protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } +} + +/// A request to execute the given Wasm on the V1 runtime. +#[derive(Debug)] +pub struct WasmV1Request { + /// Block info. + pub block_info: BlockInfo, + /// The hash identifying the transaction. + pub transaction_hash: TransactionHash, + /// The number of Motes per unit of Gas to be paid for execution. + pub gas_limit: Gas, + /// The transaction's initiator. + pub initiator_addr: InitiatorAddr, + /// The executable item. + pub executable_item: ExecutableItem, + /// The entry point to call when executing. + pub entry_point: String, + /// The runtime args. + pub args: RuntimeArgs, + /// The account hashes of the signers of the transaction. + pub authorization_keys: BTreeSet, + /// Execution phase. + pub phase: Phase, +} + +impl WasmV1Request { + /// New from executable deploy item or InvalidRequest error. + pub fn new_from_executable_deploy_item( + block_info: BlockInfo, + gas_limit: Gas, + transaction_hash: TransactionHash, + initiator_addr: InitiatorAddr, + authorization_keys: BTreeSet, + session_item: &ExecutableDeployItem, + ) -> Result { + let executable_info = + build_session_info_for_executable_item(session_item, transaction_hash)?; + Ok(Self::new_from_executable_info( + block_info, + gas_limit, + transaction_hash, + initiator_addr, + authorization_keys, + executable_info, + )) + } + + /// New payment from executable deploy item or InvalidRequest error. + pub fn new_payment_from_executable_deploy_item( + block_info: BlockInfo, + gas_limit: Gas, + transaction_hash: TransactionHash, + initiator_addr: InitiatorAddr, + authorization_keys: BTreeSet, + payment_item: &ExecutableDeployItem, + ) -> Result { + let executable_info = + build_payment_info_for_executable_item(payment_item, transaction_hash)?; + Ok(Self::new_from_executable_info( + block_info, + gas_limit, + transaction_hash, + initiator_addr, + authorization_keys, + executable_info, + )) + } + + pub(crate) fn new_from_executable_info( + block_info: BlockInfo, + gas_limit: Gas, + transaction_hash: TransactionHash, + initiator_addr: InitiatorAddr, + authorization_keys: BTreeSet, + executable_info: impl Executable, + ) -> Self { + let executable_item = executable_info.item(); + Self { + block_info, + transaction_hash, + gas_limit, + initiator_addr, + authorization_keys, + executable_item, + entry_point: executable_info.entry_point().clone(), + args: executable_info.args().clone(), + phase: executable_info.phase(), + } + } + + /// Creates a new request from a transaction for use as the session code. + pub fn new_session( + block_info: BlockInfo, + gas_limit: Gas, + session_input_data: &SessionInputData, + ) -> Result { + let session_info = SessionInfo::try_from(session_input_data)?; + let transaction_hash = session_input_data.transaction_hash(); + let initiator_addr = session_input_data.initiator_addr().clone(); + let authorization_keys = session_input_data.signers().clone(); + Ok(WasmV1Request::new_from_executable_info( + block_info, + gas_limit, + transaction_hash, + initiator_addr, + authorization_keys, + session_info, + )) + } + + /// Creates a new request from a transaction for use as custom payment. + pub fn new_custom_payment( + block_info: BlockInfo, + gas_limit: Gas, + session_input_data: &SessionInputData, + ) -> Result { + let payment_info = PaymentInfo::try_from(session_input_data)?; + let transaction_hash = session_input_data.transaction_hash(); + let initiator_addr = session_input_data.initiator_addr().clone(); + let authorization_keys = session_input_data.signers().clone(); + Ok(WasmV1Request::new_from_executable_info( + block_info, + gas_limit, + transaction_hash, + initiator_addr, + authorization_keys, + payment_info, + )) + } +} + +/// Wasm v1 result. +#[derive(Clone, Debug)] +pub struct WasmV1Result { + /// List of transfers that happened during execution. + transfers: Vec, + /// Gas limit. + limit: Gas, + /// Gas consumed. + consumed: Gas, + /// Execution effects. + effects: Effects, + /// Messages emitted during execution. + messages: Messages, + /// Did the wasm execute successfully? + error: Option, + /// Result captured from a ret call. + ret: Option, + /// Tracking copy cache captured during execution. + cache: Option, +} + +impl WasmV1Result { + /// Creates a new instance. + #[allow(clippy::too_many_arguments)] + pub fn new( + limit: Gas, + consumed: Gas, + effects: Effects, + transfers: Vec, + messages: Messages, + error: Option, + ret: Option, + cache: Option, + ) -> Self { + WasmV1Result { + limit, + consumed, + effects, + transfers, + messages, + error, + ret, + cache, + } + } + + /// Error, if any. + pub fn error(&self) -> Option<&EngineError> { + self.error.as_ref() + } + + /// List of transfers that happened during execution. + pub fn transfers(&self) -> &Vec { + &self.transfers + } + + /// Gas limit. + pub fn limit(&self) -> Gas { + self.limit + } + + /// Gas consumed. + pub fn consumed(&self) -> Gas { + self.consumed + } + + /// Execution effects. + pub fn effects(&self) -> &Effects { + &self.effects + } + + /// Tracking copy cache captured during execution. + pub fn cache(&self) -> Option<&TrackingCopyCache> { + self.cache.as_ref() + } + + /// Messages emitted during execution. + pub fn messages(&self) -> &Messages { + &self.messages + } + + /// Result captured from a ret call. + pub fn ret(&self) -> Option<&CLValue> { + self.ret.as_ref() + } + + /// Root not found. + pub fn root_not_found(gas_limit: Gas, state_hash: Digest) -> Self { + WasmV1Result { + transfers: Vec::default(), + effects: Effects::new(), + messages: Vec::default(), + limit: gas_limit, + consumed: Gas::zero(), + error: Some(EngineError::RootNotFound(state_hash)), + ret: None, + cache: None, + } + } + + /// Precondition failure. + pub fn precondition_failure(gas_limit: Gas, error: EngineError) -> Self { + WasmV1Result { + transfers: Vec::default(), + effects: Effects::new(), + messages: Vec::default(), + limit: gas_limit, + consumed: Gas::zero(), + error: Some(error), + ret: None, + cache: None, + } + } + + /// Failed to transform transaction into an executable item. + pub fn invalid_executable_item(gas_limit: Gas, error: InvalidRequest) -> Self { + WasmV1Result { + transfers: Vec::default(), + effects: Effects::new(), + messages: Vec::default(), + limit: gas_limit, + consumed: Gas::zero(), + error: Some(EngineError::InvalidExecutableItem(error)), + ret: None, + cache: None, + } + } + + /// Returns `true` if this is a precondition failure. + /// + /// Precondition variant is further described as an execution failure which does not have any + /// effects, and has a gas cost of 0. + pub fn has_precondition_failure(&self) -> bool { + self.error.is_some() && self.consumed == Gas::zero() && self.effects.is_empty() + } + + /// Converts a transfer result to an execution result. + pub fn from_transfer_result(transfer_result: TransferResult, consumed: Gas) -> Option { + // NOTE: for native / wasmless operations limit and consumed are always equal, and + // we can get away with simplifying to one or the other here. + // this is NOT true of wasm based operations however. + match transfer_result { + TransferResult::RootNotFound => None, + TransferResult::Success { + transfers, + effects, + cache, + } => Some(WasmV1Result { + transfers, + limit: consumed, + consumed, + effects, + messages: Messages::default(), + error: None, + ret: None, + cache: Some(cache), + }), + TransferResult::Failure(te) => { + Some(WasmV1Result { + transfers: vec![], + limit: consumed, + consumed, + effects: Effects::default(), // currently not returning effects on failure + messages: Messages::default(), + error: Some(EngineError::Transfer(te)), + ret: None, + cache: None, + }) + } + } + } + + /// Checks effects for an AddUInt512 transform to a balance at imputed addr + /// and for exactly the imputed amount. + pub fn balance_increased_by_amount(&self, addr: URefAddr, amount: U512) -> bool { + if self.effects.is_empty() || self.effects.transforms().is_empty() { + return false; + } + + let key = Key::Balance(addr); + if let Some(transform) = self.effects.transforms().iter().find(|x| x.key() == &key) { + if let TransformKindV2::AddUInt512(added) = transform.kind() { + return *added == amount; + } + } + false + } +} + +/// Helper struct to carry item, entry_point, and arg info for a `WasmV1Request`. +struct ExecutableInfo { + item: ExecutableItem, + entry_point: String, + args: RuntimeArgs, +} + +pub(crate) trait Executable { + fn item(&self) -> ExecutableItem; + fn entry_point(&self) -> &String; + fn args(&self) -> &RuntimeArgs; + fn phase(&self) -> Phase; +} + +/// New type for hanging session specific impl's off of. +struct SessionInfo(ExecutableInfo); + +impl Executable for SessionInfo { + fn item(&self) -> ExecutableItem { + self.0.item.clone() + } + + fn entry_point(&self) -> &String { + &self.0.entry_point + } + + fn args(&self) -> &RuntimeArgs { + &self.0.args + } + + fn phase(&self) -> Phase { + Phase::Session + } +} + +impl TryFrom<&SessionInputData<'_>> for PaymentInfo { + type Error = InvalidRequest; + + fn try_from(input_data: &SessionInputData) -> Result { + match input_data { + SessionInputData::DeploySessionData { data } => PaymentInfo::try_from(data), + SessionInputData::SessionDataV1 { data } => PaymentInfo::try_from(data), + } + } +} + +impl TryFrom<&SessionInputData<'_>> for SessionInfo { + type Error = InvalidRequest; + + fn try_from(input_data: &SessionInputData) -> Result { + match input_data { + SessionInputData::DeploySessionData { data } => SessionInfo::try_from(data), + SessionInputData::SessionDataV1 { data } => SessionInfo::try_from(data), + } + } +} + +impl TryFrom<&SessionDataDeploy<'_>> for SessionInfo { + type Error = InvalidRequest; + + fn try_from(deploy_data: &SessionDataDeploy) -> Result { + let transaction_hash = TransactionHash::Deploy(*deploy_data.deploy_hash()); + let session_item = deploy_data.session(); + build_session_info_for_executable_item(session_item, transaction_hash) + } +} + +fn build_session_info_for_executable_item( + session_item: &ExecutableDeployItem, + transaction_hash: TransactionHash, +) -> Result { + let session: ExecutableItem; + let session_entry_point: String; + let session_args: RuntimeArgs; + match session_item { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + session = ExecutableItem::Deploy(module_bytes.clone()); + session_entry_point = DEFAULT_ENTRY_POINT.to_string(); + session_args = args.clone(); + } + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => { + session = ExecutableItem::Invocation( + TransactionInvocationTarget::new_invocable_entity((*hash).into()), + ); + session_entry_point = entry_point.clone(); + session_args = args.clone(); + } + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => { + session = ExecutableItem::Invocation( + TransactionInvocationTarget::new_invocable_entity_alias(name.clone()), + ); + session_entry_point = entry_point.clone(); + session_args = args.clone(); + } + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => { + session = ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageHash { + addr: hash.value(), + version: *version, + protocol_version_major: None, + }); + session_entry_point = entry_point.clone(); + session_args = args.clone(); + } + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => { + session = ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageName { + name: name.to_owned(), + version: *version, + protocol_version_major: None, + }); + session_entry_point = entry_point.clone(); + session_args = args.clone(); + } + ExecutableDeployItem::Transfer { .. } => { + return Err(InvalidRequest::UnsupportedMode( + transaction_hash, + session_item.to_string(), + )); + } + } + + Ok(SessionInfo(ExecutableInfo { + item: session, + entry_point: session_entry_point, + args: session_args, + })) +} + +impl TryFrom<&SessionDataV1<'_>> for SessionInfo { + type Error = InvalidRequest; + + fn try_from(v1_txn: &SessionDataV1) -> Result { + let transaction_hash = TransactionHash::V1(*v1_txn.hash()); + let args = v1_txn.args().clone(); + let session = match v1_txn.target() { + TransactionTarget::Native => { + return Err(InvalidRequest::InvalidTarget( + transaction_hash, + v1_txn.target().to_string(), + )); + } + TransactionTarget::Stored { id, .. } => { + let TransactionEntryPoint::Custom(entry_point) = v1_txn.entry_point() else { + return Err(InvalidRequest::InvalidEntryPoint( + transaction_hash, + v1_txn.entry_point().to_string(), + )); + }; + let item = ExecutableItem::Invocation(id.clone()); + ExecutableInfo { + item, + entry_point: entry_point.clone(), + args, + } + } + TransactionTarget::Session { module_bytes, .. } => { + if *v1_txn.entry_point() != TransactionEntryPoint::Call { + return Err(InvalidRequest::InvalidEntryPoint( + transaction_hash, + v1_txn.entry_point().to_string(), + )); + }; + let kind = if v1_txn.is_install_upgrade() { + SessionKind::InstallUpgradeBytecode + } else { + SessionKind::GenericBytecode + }; + let item = ExecutableItem::SessionBytes { + kind, + module_bytes: module_bytes.clone(), + }; + ExecutableInfo { + item, + entry_point: DEFAULT_ENTRY_POINT.to_owned(), + args, + } + } + }; + + Ok(SessionInfo(session)) + } +} +/// New type for hanging payment specific impl's off of. +struct PaymentInfo(ExecutableInfo); + +impl Executable for PaymentInfo { + fn item(&self) -> ExecutableItem { + self.0.item.clone() + } + + fn entry_point(&self) -> &String { + &self.0.entry_point + } + + fn args(&self) -> &RuntimeArgs { + &self.0.args + } + + fn phase(&self) -> Phase { + Phase::Payment + } +} + +impl TryFrom<&SessionDataDeploy<'_>> for PaymentInfo { + type Error = InvalidRequest; + + fn try_from(deploy_data: &SessionDataDeploy) -> Result { + let payment_item = deploy_data.session(); + let transaction_hash = TransactionHash::Deploy(*deploy_data.deploy_hash()); + build_payment_info_for_executable_item(payment_item, transaction_hash) + } +} + +fn build_payment_info_for_executable_item( + payment_item: &ExecutableDeployItem, + transaction_hash: TransactionHash, +) -> Result { + match payment_item { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + let payment = if module_bytes.is_empty() { + return Err(InvalidRequest::UnsupportedMode( + transaction_hash, + "standard payment is no longer handled by the execution engine".to_string(), + )); + } else { + ExecutableItem::PaymentBytes(module_bytes.clone()) + }; + Ok(PaymentInfo(ExecutableInfo { + item: payment, + entry_point: DEFAULT_ENTRY_POINT.to_string(), + args: args.clone(), + })) + } + ExecutableDeployItem::StoredContractByHash { + hash, + args, + entry_point, + } => Ok(PaymentInfo(ExecutableInfo { + item: ExecutableItem::Invocation(TransactionInvocationTarget::ByHash(hash.value())), + entry_point: entry_point.clone(), + args: args.clone(), + })), + ExecutableDeployItem::StoredContractByName { + name, + args, + entry_point, + } => Ok(PaymentInfo(ExecutableInfo { + item: ExecutableItem::Invocation(TransactionInvocationTarget::ByName(name.clone())), + entry_point: entry_point.clone(), + args: args.clone(), + })), + ExecutableDeployItem::StoredVersionedContractByHash { + args, + hash, + version, + entry_point, + } => Ok(PaymentInfo(ExecutableInfo { + item: ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageHash { + addr: hash.value(), + version: *version, + protocol_version_major: None, + }), + entry_point: entry_point.clone(), + args: args.clone(), + })), + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + args, + entry_point, + } => Ok(PaymentInfo(ExecutableInfo { + item: ExecutableItem::Invocation(TransactionInvocationTarget::ByPackageName { + name: name.clone(), + version: *version, + protocol_version_major: None, + }), + entry_point: entry_point.clone(), + args: args.clone(), + })), + ExecutableDeployItem::Transfer { .. } => Err(InvalidRequest::UnexpectedVariant( + transaction_hash, + "payment item".to_string(), + )), + } +} + +impl TryFrom<&SessionDataV1<'_>> for PaymentInfo { + type Error = InvalidRequest; + + fn try_from(v1_txn: &SessionDataV1) -> Result { + let transaction_hash = TransactionHash::V1(*v1_txn.hash()); + match v1_txn.pricing_mode() { + mode @ PricingMode::PaymentLimited { + standard_payment, .. + } => { + if *standard_payment { + return Err(InvalidRequest::UnsupportedMode( + transaction_hash, + mode.to_string(), + )); + } + } + mode @ PricingMode::Fixed { .. } | mode @ PricingMode::Prepaid { .. } => { + return Err(InvalidRequest::UnsupportedMode( + transaction_hash, + mode.to_string(), + )); + } + }; + + let payment = match v1_txn.target() { + TransactionTarget::Session { module_bytes, .. } => { + if *v1_txn.entry_point() != TransactionEntryPoint::Call { + return Err(InvalidRequest::InvalidEntryPoint( + transaction_hash, + v1_txn.entry_point().to_string(), + )); + }; + let item = ExecutableItem::PaymentBytes(module_bytes.clone()); + ExecutableInfo { + item, + entry_point: DEFAULT_ENTRY_POINT.to_owned(), + args: v1_txn.args().clone(), + } + } + TransactionTarget::Native | TransactionTarget::Stored { .. } => { + return Err(InvalidRequest::InvalidTarget( + transaction_hash, + v1_txn.target().to_string(), + )); + } + }; + + Ok(PaymentInfo(payment)) + } +} diff --git a/execution_engine/src/execution/error.rs b/execution_engine/src/execution/error.rs new file mode 100644 index 0000000000..cf25ad7ea4 --- /dev/null +++ b/execution_engine/src/execution/error.rs @@ -0,0 +1,309 @@ +//! Execution error and supporting code. +use std::str::Utf8Error; +use thiserror::Error; + +use casper_storage::{global_state, tracking_copy::TrackingCopyError}; + +use casper_types::{ + account::{AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure}, + bytesrepr, + execution::TransformError, + system, AccessRights, AddressableEntityHash, ApiError, ByteCodeHash, CLType, CLValueError, + ContractRuntimeTag, EntityVersionKey, Key, PackageHash, StoredValueTypeMismatch, URef, +}; +use casper_wasm::elements; + +use crate::{ + resolvers::error::ResolverError, + runtime::{stack, PreprocessingError}, +}; + +/// Possible execution errors. +#[derive(Error, Debug, Clone)] +#[non_exhaustive] +pub enum Error { + /// WASM interpreter error. + #[error("Interpreter error: {}", _0)] + Interpreter(String), + /// Storage error. + #[error("Storage error: {}", _0)] + Storage(global_state::error::Error), + /// Failed to (de)serialize bytes. + #[error("Serialization error: {}", _0)] + BytesRepr(bytesrepr::Error), + /// Unable to find named key. + #[error("Named key {} not found", _0)] + NamedKeyNotFound(String), + /// Unable to find a key. + #[error("Key {} not found", _0)] + KeyNotFound(Key), + /// Unable to find an account. + #[error("Account {:?} not found", _0)] + AccountNotFound(Key), + /// Type mismatch error. + #[error("{}", _0)] + TypeMismatch(StoredValueTypeMismatch), + /// Invalid access. + #[error("Invalid access rights: {}", required)] + InvalidAccess { + /// Required access rights of the operation. + required: AccessRights, + }, + /// Forged reference error. + #[error("Forged reference: {}", _0)] + ForgedReference(URef), + /// Unable to find a function. + #[error("Function not found: {}", _0)] + FunctionNotFound(String), + /// Parity WASM error. + #[error("{}", _0)] + ParityWasm(elements::Error), + /// Error optimizing WASM. + #[error("WASM optimizer error")] + WasmOptimizer, + /// Execution exceeded the gas limit. + #[error("Out of gas error")] + GasLimit, + /// A stored smart contract called a ret function. + #[error("Return")] + Ret(Vec), + /// Error using WASM host function resolver. + #[error("Resolver error: {}", _0)] + Resolver(ResolverError), + /// Reverts execution with a provided status + #[error("{}", _0)] + Revert(ApiError), + /// Error adding an associated key. + #[error("{}", _0)] + AddKeyFailure(AddKeyFailure), + /// Error removing an associated key. + #[error("{}", _0)] + RemoveKeyFailure(RemoveKeyFailure), + /// Error updating an associated key. + #[error("{}", _0)] + UpdateKeyFailure(UpdateKeyFailure), + /// Error setting threshold on associated key. + #[error("{}", _0)] + SetThresholdFailure(SetThresholdFailure), + /// Error executing system contract. + #[error("{}", _0)] + SystemContract(system::Error), + /// Weight of all used associated keys does not meet account's deploy threshold. + #[error("Deployment authorization failure")] + DeploymentAuthorizationFailure, + /// Host buffer expected a value to be present. + #[error("Expected return value")] + ExpectedReturnValue, + /// Error calling a host function in a wrong context. + #[error("Invalid context")] + InvalidContext, + /// Unable to execute a deploy with invalid major protocol version. + #[error("Incompatible protocol major version. Expected version {expected} but actual version is {actual}")] + IncompatibleProtocolMajorVersion { + /// Expected major version. + expected: u32, + /// Actual major version supplied. + actual: u32, + }, + /// Error converting a CLValue. + #[error("{0}")] + CLValue(CLValueError), + /// WASM bytes contains an unsupported "start" section. + #[error("Unsupported Wasm start")] + UnsupportedWasmStart, + /// Contract package has no active contract versions. + #[error("No active contract versions for contract package")] + NoActiveEntityVersions(PackageHash), + /// Invalid entity version supplied. + #[error("Invalid entity version: {}", _0)] + InvalidEntityVersion(EntityVersionKey), + /// Invalid entity version supplied. + #[error("Disabled entity version: {}", _0)] + DisabledEntityVersion(EntityVersionKey), + /// Invalid entity version supplied. + #[error("Missing entity version: {}", _0)] + MissingEntityVersion(EntityVersionKey), + /// Contract does not have specified entry point. + #[error("No such method: {}", _0)] + NoSuchMethod(String), + /// Contract does + #[error("Error calling a template entry point: {}", _0)] + TemplateMethod(String), + /// Error processing WASM bytes. + #[error("Wasm preprocessing error: {}", _0)] + WasmPreprocessing(PreprocessingError), + /// Unexpected variant of a stored value. + #[error("Unexpected variant of a stored value")] + UnexpectedStoredValueVariant, + /// Error upgrading a locked contract package. + #[error("A locked contract cannot be upgraded")] + LockedEntity(PackageHash), + /// Unable to find a contract by a specified hash address. + #[error("Invalid contract: {}", _0)] + InvalidEntity(AddressableEntityHash), + /// Unable to find the WASM bytes specified by a hash address. + #[error("Invalid contract WASM: {}", _0)] + InvalidByteCode(ByteCodeHash), + /// Error calling a smart contract with a missing argument. + #[error("Missing argument: {name}")] + MissingArgument { + /// Name of the required argument. + name: String, + }, + /// Error writing a dictionary item key which exceeded maximum allowed length. + #[error("Dictionary item key exceeded maximum length")] + DictionaryItemKeyExceedsLength, + /// Missing system contract hash. + #[error("Missing system contract hash: {0}")] + MissingSystemContractHash(String), + /// An attempt to push to the runtime stack which is already at the maximum height. + #[error("Runtime stack overflow")] + RuntimeStackOverflow, + /// The runtime stack is `None`. + #[error("Runtime stack missing")] + MissingRuntimeStack, + /// Contract is disabled. + #[error("Contract is disabled")] + DisabledEntity(AddressableEntityHash), + /// Transform error. + #[error(transparent)] + Transform(TransformError), + /// Invalid key + #[error("Invalid key {0}")] + UnexpectedKeyVariant(Key), + /// Failed to transfer tokens on a private chain. + #[error("Failed to transfer with unrestricted transfers disabled")] + DisabledUnrestrictedTransfers, + /// Storage error. + #[error("Tracking copy error: {0}")] + TrackingCopy(TrackingCopyError), + /// Weight of all used associated keys does not meet entity's upgrade threshold. + #[error("Deployment authorization failure")] + UpgradeAuthorizationFailure, + /// The EntryPoints contains an invalid entry. + #[error("The EntryPoints contains an invalid entry")] + InvalidEntryPointType, + /// Invalid operation. + #[error("The imputed operation is invalid")] + InvalidImputedOperation, + /// Invalid string encoding. + #[error("Invalid UTF-8 string encoding: {0}")] + InvalidUtf8Encoding(Utf8Error), + /// Incompatible transaction runtime. + #[error("Incompatible runtime: {0}")] + IncompatibleRuntime(ContractRuntimeTag), + /// No matching entity version key. + #[error("No matching entity version key")] + NoMatchingEntityVersionKey, + /// Ambiguous entity version and unable to determine entity version key. + #[error("Ambiguous entity version")] + AmbiguousEntityVersion, +} + +impl From for Error { + fn from(error: PreprocessingError) -> Self { + Error::WasmPreprocessing(error) + } +} + +impl From for Error { + fn from(_optimizer_error: casper_wasm_utils::OptimizerError) -> Self { + Error::WasmOptimizer + } +} + +impl Error { + /// Returns new type mismatch error. + pub fn type_mismatch(expected: CLType, found: CLType) -> Error { + Error::TypeMismatch(StoredValueTypeMismatch::new( + format!("{:?}", expected), + format!("{:?}", found), + )) + } +} + +impl casper_wasmi::HostError for Error {} + +impl From for Error { + fn from(error: casper_wasmi::Error) -> Self { + match error + .as_host_error() + .and_then(|host_error| host_error.downcast_ref::()) + { + Some(error) => error.clone(), + None => Error::Interpreter(error.into()), + } + } +} + +impl From for Error { + fn from(e: global_state::error::Error) -> Self { + Error::Storage(e) + } +} + +impl From for Error { + fn from(e: bytesrepr::Error) -> Self { + Error::BytesRepr(e) + } +} + +impl From for Error { + fn from(e: elements::Error) -> Self { + Error::ParityWasm(e) + } +} + +impl From for Error { + fn from(err: ResolverError) -> Self { + Error::Resolver(err) + } +} + +impl From for Error { + fn from(err: AddKeyFailure) -> Self { + Error::AddKeyFailure(err) + } +} + +impl From for Error { + fn from(err: RemoveKeyFailure) -> Self { + Error::RemoveKeyFailure(err) + } +} + +impl From for Error { + fn from(err: UpdateKeyFailure) -> Self { + Error::UpdateKeyFailure(err) + } +} + +impl From for Error { + fn from(err: SetThresholdFailure) -> Self { + Error::SetThresholdFailure(err) + } +} + +impl From for Error { + fn from(error: system::Error) -> Self { + Error::SystemContract(error) + } +} + +impl From for Error { + fn from(e: CLValueError) -> Self { + Error::CLValue(e) + } +} + +impl From for Error { + fn from(_: stack::RuntimeStackOverflow) -> Self { + Error::RuntimeStackOverflow + } +} + +impl From for Error { + fn from(e: TrackingCopyError) -> Self { + Error::TrackingCopy(e) + } +} diff --git a/execution_engine/src/execution/executor.rs b/execution_engine/src/execution/executor.rs new file mode 100644 index 0000000000..372fe02188 --- /dev/null +++ b/execution_engine/src/execution/executor.rs @@ -0,0 +1,226 @@ +use std::{cell::RefCell, collections::BTreeSet, rc::Rc}; + +use casper_storage::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + tracking_copy::TrackingCopy, + AddressGenerator, +}; +use casper_types::{ + account::AccountHash, contract_messages::Messages, contracts::NamedKeys, execution::Effects, + ContextAccessRights, EntityAddr, EntryPointType, Gas, Key, Phase, RuntimeArgs, + RuntimeFootprint, StoredValue, TransactionHash, U512, +}; + +use crate::{ + engine_state::{execution_kind::ExecutionKind, BlockInfo, EngineConfig, WasmV1Result}, + execution::ExecError, + runtime::{Runtime, RuntimeStack}, + runtime_context::{AllowInstallUpgrade, RuntimeContext}, +}; + +const ARG_AMOUNT: &str = "amount"; + +fn try_get_amount(runtime_args: &RuntimeArgs) -> Result { + runtime_args + .try_get_number(ARG_AMOUNT) + .map_err(ExecError::from) +} + +/// Executor object deals with execution of WASM modules. +pub struct Executor { + config: EngineConfig, +} + +impl Executor { + /// Creates new executor object. + pub fn new(config: EngineConfig) -> Self { + Executor { config } + } + + /// Executes a WASM module. + /// + /// This method checks if a given contract hash is a system contract, and then short circuits to + /// a specific native implementation of it. Otherwise, a supplied WASM module is executed. + #[allow(clippy::too_many_arguments)] + pub(crate) fn exec( + &self, + execution_kind: ExecutionKind, + args: RuntimeArgs, + entity_addr: EntityAddr, + runtime_footprint: Rc>, + named_keys: &mut NamedKeys, + access_rights: ContextAccessRights, + authorization_keys: BTreeSet, + account_hash: AccountHash, + block_info: BlockInfo, + txn_hash: TransactionHash, + gas_limit: Gas, + tracking_copy: Rc>>, + phase: Phase, + stack: RuntimeStack, + ) -> WasmV1Result + where + R: StateReader, + { + let spending_limit: U512 = match try_get_amount(&args) { + Ok(spending_limit) => spending_limit, + Err(error) => { + return WasmV1Result::new( + gas_limit, + Gas::zero(), + Effects::default(), + Vec::default(), + Vec::default(), + Some(error.into()), + None, + None, + ); + } + }; + + let address_generator = { + let generator = AddressGenerator::new(txn_hash.as_ref(), phase); + Rc::new(RefCell::new(generator)) + }; + + let context_key = if self.config.enable_entity { + Key::AddressableEntity(entity_addr) + } else { + match entity_addr { + EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => Key::Hash(hash), + EntityAddr::Account(hash) => Key::Account(AccountHash::new(hash)), + } + }; + + let allow_install_upgrade = match execution_kind { + ExecutionKind::InstallerUpgrader(_) + | ExecutionKind::Stored { .. } + | ExecutionKind::VersionedCall { .. } + | ExecutionKind::Deploy(_) => AllowInstallUpgrade::Allowed, + ExecutionKind::Standard(_) => AllowInstallUpgrade::Forbidden, + }; + + let context = self.create_runtime_context( + named_keys, + runtime_footprint, + context_key, + authorization_keys, + access_rights, + account_hash, + address_generator, + tracking_copy, + block_info, + txn_hash, + phase, + args.clone(), + gas_limit, + spending_limit, + EntryPointType::Caller, + allow_install_upgrade, + ); + + let mut runtime = Runtime::new(context); + + let result = match execution_kind { + ExecutionKind::Standard(module_bytes) + | ExecutionKind::InstallerUpgrader(module_bytes) + | ExecutionKind::Deploy(module_bytes) => { + runtime.execute_module_bytes(module_bytes, stack) + } + ExecutionKind::Stored { + entity_hash, + entry_point, + } => { + // These args are passed through here as they are required to construct the new + // `Runtime` during the contract's execution (i.e. inside + // `Runtime::execute_contract`). + runtime.call_contract_with_stack(entity_hash, &entry_point, args, stack) + } + ExecutionKind::VersionedCall { + package_hash, + entity_version, + protocol_version_major, + entry_point, + } => runtime.call_package_version_with_stack( + package_hash, + protocol_version_major, + entity_version, + entry_point, + args, + stack, + ), + }; + match result { + Ok(ret) => WasmV1Result::new( + gas_limit, + runtime.context().gas_counter(), + runtime.context().effects(), + runtime.context().transfers().to_owned(), + runtime.context().messages(), + None, + Some(ret), + Some(runtime.context().cache()), + ), + Err(error) => WasmV1Result::new( + gas_limit, + runtime.context().gas_counter(), + Effects::new(), + vec![], + Messages::new(), + Some(error.into()), + None, + None, + ), + } + } + + /// Creates new runtime context. + #[allow(clippy::too_many_arguments)] + fn create_runtime_context<'a, R>( + &self, + named_keys: &'a mut NamedKeys, + runtime_footprint: Rc>, + context_key: Key, + authorization_keys: BTreeSet, + access_rights: ContextAccessRights, + account_hash: AccountHash, + address_generator: Rc>, + tracking_copy: Rc>>, + block_info: BlockInfo, + txn_hash: TransactionHash, + phase: Phase, + runtime_args: RuntimeArgs, + gas_limit: Gas, + remaining_spending_limit: U512, + entry_point_type: EntryPointType, + allow_install_upgrade: AllowInstallUpgrade, + ) -> RuntimeContext<'a, R> + where + R: StateReader, + { + let gas_counter = Gas::default(); + let transfers = Vec::default(); + + RuntimeContext::new( + named_keys, + runtime_footprint, + context_key, + authorization_keys, + access_rights, + account_hash, + address_generator, + tracking_copy, + self.config.clone(), + block_info, + txn_hash, + phase, + runtime_args, + gas_limit, + gas_counter, + transfers, + remaining_spending_limit, + entry_point_type, + allow_install_upgrade, + ) + } +} diff --git a/execution_engine/src/execution/mod.rs b/execution_engine/src/execution/mod.rs new file mode 100644 index 0000000000..b4cf640369 --- /dev/null +++ b/execution_engine/src/execution/mod.rs @@ -0,0 +1,7 @@ +//! Code execution. +mod error; +#[macro_use] +mod executor; + +pub use self::error::Error as ExecError; +pub(crate) use self::executor::Executor; diff --git a/execution_engine/src/lib.rs b/execution_engine/src/lib.rs index c22f2f92ea..fb286737f9 100644 --- a/execution_engine/src/lib.rs +++ b/execution_engine/src/lib.rs @@ -1,10 +1,10 @@ //! The engine which executes smart contracts on the Casper network. -#![doc(html_root_url = "https://docs.rs/casper-execution-engine/1.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-execution-engine/8.1.1")] #![doc( - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png", + test(attr(deny(warnings))) )] #![warn( missing_docs, @@ -12,8 +12,10 @@ trivial_numeric_casts, unused_qualifications )] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] -pub mod config; -pub mod core; -pub mod shared; -pub mod storage; +pub mod engine_state; +pub mod execution; +pub mod resolvers; +pub mod runtime; +pub mod runtime_context; diff --git a/execution_engine/src/resolvers/error.rs b/execution_engine/src/resolvers/error.rs new file mode 100644 index 0000000000..ae15d1af28 --- /dev/null +++ b/execution_engine/src/resolvers/error.rs @@ -0,0 +1,16 @@ +//! Errors that may be emitted by a host function resolver. +use thiserror::Error; + +use casper_types::ProtocolVersion; + +/// Error conditions of a host function resolver. +#[derive(Error, Debug, Copy, Clone)] +#[non_exhaustive] +pub enum ResolverError { + /// Unknown protocol version. + #[error("Unknown protocol version: {}", _0)] + UnknownProtocolVersion(ProtocolVersion), + /// WASM module does not export a memory section. + #[error("No imported memory")] + NoImportedMemory, +} diff --git a/execution_engine/src/resolvers/memory_resolver.rs b/execution_engine/src/resolvers/memory_resolver.rs new file mode 100644 index 0000000000..9777997cd4 --- /dev/null +++ b/execution_engine/src/resolvers/memory_resolver.rs @@ -0,0 +1,13 @@ +//! This module contains resolver of a memory section of the WASM code. +use casper_wasmi::MemoryRef; + +use super::error::ResolverError; + +/// This trait takes care of returning an instance of allocated memory. +/// +/// This happens once the WASM program tries to resolve "memory". Whenever +/// contract didn't request a memory this method should return an Error. +pub trait MemoryResolver { + /// Returns a memory instance. + fn memory_ref(&self) -> Result; +} diff --git a/execution_engine/src/resolvers/mod.rs b/execution_engine/src/resolvers/mod.rs new file mode 100644 index 0000000000..7ff1a6244b --- /dev/null +++ b/execution_engine/src/resolvers/mod.rs @@ -0,0 +1,49 @@ +//! This module is responsible for resolving host functions from within the WASM engine. +pub mod error; +pub mod memory_resolver; +pub(crate) mod v1_function_index; +mod v1_resolver; + +use casper_wasmi::ModuleImportResolver; + +use casper_types::ProtocolVersion; + +use self::error::ResolverError; +use super::engine_state::EngineConfig; +use crate::resolvers::memory_resolver::MemoryResolver; + +/// Creates a module resolver for given protocol version. +/// +/// * `protocol_version` Version of the protocol. Can't be lower than 1. +pub(crate) fn create_module_resolver( + _protocol_version: ProtocolVersion, + engine_config: &EngineConfig, +) -> Result { + Ok(v1_resolver::RuntimeModuleImportResolver::new( + engine_config.wasm_config().v1().max_memory(), + )) + // if in future it is necessary to pick a different resolver + // based on the protocol version, modify this logic accordingly + // if there is an unsupported / unknown protocol version return the following error: + // Err(ResolverError::UnknownProtocolVersion(protocol_version)) +} + +#[cfg(test)] +mod tests { + use casper_types::ProtocolVersion; + + use super::*; + + #[test] + fn resolve_invalid_module() { + // NOTE: we are currently not enforcing underlying logic + assert!( + create_module_resolver(ProtocolVersion::default(), &EngineConfig::default()).is_ok() + ); + } + + #[test] + fn protocol_version_1_always_resolves() { + assert!(create_module_resolver(ProtocolVersion::V1_0_0, &EngineConfig::default()).is_ok()); + } +} diff --git a/execution_engine/src/resolvers/v1_function_index.rs b/execution_engine/src/resolvers/v1_function_index.rs new file mode 100644 index 0000000000..e8fca09b20 --- /dev/null +++ b/execution_engine/src/resolvers/v1_function_index.rs @@ -0,0 +1,105 @@ +//! WASM host function resolver for protocol version 1.x.x. +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::{FromPrimitive, ToPrimitive}; + +/// Enum representing unique IDs of host functions supported in major version 1. +#[derive(Debug, PartialEq, FromPrimitive, ToPrimitive, Clone, Copy)] +#[repr(usize)] +pub(crate) enum FunctionIndex { + WriteFuncIndex, + ReadFuncIndex, + AddFuncIndex, + NewFuncIndex, + RetFuncIndex, + CallContractFuncIndex, + GetKeyFuncIndex, + GasFuncIndex, + HasKeyFuncIndex, + PutKeyFuncIndex, + IsValidURefFnIndex, + RevertFuncIndex, + AddAssociatedKeyFuncIndex, + RemoveAssociatedKeyFuncIndex, + UpdateAssociatedKeyFuncIndex, + SetActionThresholdFuncIndex, + LoadNamedKeysFuncIndex, + RemoveKeyFuncIndex, + GetCallerIndex, + GetBlocktimeIndex, + CreatePurseIndex, + TransferToAccountIndex, + TransferFromPurseToAccountIndex, + TransferFromPurseToPurseIndex, + GetBalanceIndex, + GetPhaseIndex, + GetSystemContractIndex, + GetMainPurseIndex, + ReadHostBufferIndex, + CreateContractPackageAtHash, + AddContractVersion, + AddContractVersionWithMessageTopics, + AddPackageVersionWithMessageTopics, + DisableContractVersion, + CallVersionedContract, + CreateContractUserGroup, + #[cfg(feature = "test-support")] + PrintIndex, + GetRuntimeArgsizeIndex, + GetRuntimeArgIndex, + RemoveContractUserGroupIndex, + ExtendContractUserGroupURefsIndex, + RemoveContractUserGroupURefsIndex, + Blake2b, + NewDictionaryFuncIndex, + DictionaryGetFuncIndex, + DictionaryPutFuncIndex, + LoadCallStack, + LoadAuthorizationKeys, + RandomBytes, + DictionaryReadFuncIndex, + EnableContractVersion, + ManageMessageTopic, + EmitMessage, + LoadCallerInformation, + GetBlockInfoIndex, + GenericHash, + RecoverSecp256k1, + VerifySignature, + CallPackageVersion, +} + +impl From for usize { + fn from(index: FunctionIndex) -> usize { + // NOTE: This can't fail as `FunctionIndex` is represented by usize, + // so this serves mostly as a syntax sugar. + index.to_usize().unwrap() + } +} + +impl TryFrom for FunctionIndex { + type Error = &'static str; + fn try_from(value: usize) -> Result { + FromPrimitive::from_usize(value).ok_or("Invalid function index") + } +} + +#[cfg(test)] +mod tests { + use super::FunctionIndex; + + #[test] + fn primitive_to_enum() { + FunctionIndex::try_from(19).expect("Unable to create enum from number"); + } + + #[test] + fn enum_to_primitive() { + let element = FunctionIndex::UpdateAssociatedKeyFuncIndex; + let _primitive: usize = element.into(); + } + + #[test] + fn invalid_index() { + assert!(FunctionIndex::try_from(123_456_789usize).is_err()); + } +} diff --git a/execution_engine/src/core/resolvers/v1_resolver.rs b/execution_engine/src/resolvers/v1_resolver.rs similarity index 75% rename from execution_engine/src/core/resolvers/v1_resolver.rs rename to execution_engine/src/resolvers/v1_resolver.rs index 1cae468339..bd90128ccd 100644 --- a/execution_engine/src/core/resolvers/v1_resolver.rs +++ b/execution_engine/src/resolvers/v1_resolver.rs @@ -1,6 +1,6 @@ use std::cell::RefCell; -use wasmi::{ +use casper_wasmi::{ memory_units::Pages, Error as InterpreterError, FuncInstance, FuncRef, MemoryDescriptor, MemoryInstance, MemoryRef, ModuleImportResolver, Signature, ValueType, }; @@ -164,6 +164,14 @@ impl ModuleImportResolver for RuntimeModuleImportResolver { Signature::new(&[ValueType::I32; 10][..], Some(ValueType::I32)), FunctionIndex::AddContractVersion.into(), ), + "casper_add_contract_version_with_message_topics" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 11][..], Some(ValueType::I32)), + FunctionIndex::AddContractVersionWithMessageTopics.into(), + ), + "casper_add_package_version_with_message_topics" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 11][..], Some(ValueType::I32)), + FunctionIndex::AddPackageVersionWithMessageTopics.into(), + ), "casper_disable_contract_version" => FuncInstance::alloc_host( Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)), FunctionIndex::DisableContractVersion.into(), @@ -200,19 +208,75 @@ impl ModuleImportResolver for RuntimeModuleImportResolver { Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)), FunctionIndex::Blake2b.into(), ), - "casper_record_transfer" => FuncInstance::alloc_host( - Signature::new(&[ValueType::I32; 10][..], Some(ValueType::I32)), - FunctionIndex::RecordTransfer.into(), + "casper_load_call_stack" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)), + FunctionIndex::LoadCallStack.into(), ), - "casper_record_era_info" => FuncInstance::alloc_host( - Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)), - FunctionIndex::RecordEraInfo.into(), + "casper_load_caller_information" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)), + FunctionIndex::LoadCallerInformation.into(), ), #[cfg(feature = "test-support")] "casper_print" => FuncInstance::alloc_host( Signature::new(&[ValueType::I32; 2][..], None), FunctionIndex::PrintIndex.into(), ), + "casper_dictionary_get" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 5][..], Some(ValueType::I32)), + FunctionIndex::DictionaryGetFuncIndex.into(), + ), + "casper_dictionary_read" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 3][..], Some(ValueType::I32)), + FunctionIndex::DictionaryReadFuncIndex.into(), + ), + "casper_dictionary_put" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)), + FunctionIndex::DictionaryPutFuncIndex.into(), + ), + "casper_new_dictionary" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 1][..], Some(ValueType::I32)), + FunctionIndex::NewDictionaryFuncIndex.into(), + ), + "casper_load_authorization_keys" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)), + FunctionIndex::LoadAuthorizationKeys.into(), + ), + "casper_random_bytes" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 2][..], Some(ValueType::I32)), + FunctionIndex::RandomBytes.into(), + ), + "casper_enable_contract_version" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)), + FunctionIndex::EnableContractVersion.into(), + ), + "casper_manage_message_topic" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)), + FunctionIndex::ManageMessageTopic.into(), + ), + "casper_emit_message" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 4][..], Some(ValueType::I32)), + FunctionIndex::EmitMessage.into(), + ), + "casper_get_block_info" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 2][..], None), + FunctionIndex::GetBlockInfoIndex.into(), + ), + "casper_generic_hash" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 5][..], Some(ValueType::I32)), + FunctionIndex::GenericHash.into(), + ), + "casper_recover_secp256k1" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)), + FunctionIndex::RecoverSecp256k1.into(), + ), + "casper_verify_signature" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 6][..], Some(ValueType::I32)), + FunctionIndex::VerifySignature.into(), + ), + "casper_call_package_version" => FuncInstance::alloc_host( + Signature::new(&[ValueType::I32; 11][..], Some(ValueType::I32)), + FunctionIndex::CallPackageVersion.into(), + ), _ => { return Err(InterpreterError::Function(format!( "host module doesn't export function with name {}", diff --git a/execution_engine/src/runtime/args.rs b/execution_engine/src/runtime/args.rs new file mode 100644 index 0000000000..5f27d2ceb2 --- /dev/null +++ b/execution_engine/src/runtime/args.rs @@ -0,0 +1,229 @@ +use casper_wasmi::{FromValue, RuntimeArgs, Trap}; + +pub(crate) trait Args +where + Self: Sized, +{ + fn parse(args: RuntimeArgs) -> Result; +} + +impl Args for (T1,) +where + T1: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + Ok((a0,)) + } +} + +impl Args for (T1, T2) +where + T1: FromValue + Sized, + T2: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + Ok((a0, a1)) + } +} + +impl Args for (T1, T2, T3) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + Ok((a0, a1, a2)) + } +} + +impl Args for (T1, T2, T3, T4) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + Ok((a0, a1, a2, a3)) + } +} + +impl Args for (T1, T2, T3, T4, T5) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + Ok((a0, a1, a2, a3, a4)) + } +} + +impl Args for (T1, T2, T3, T4, T5, T6) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + let a5: T6 = args.nth_checked(5)?; + Ok((a0, a1, a2, a3, a4, a5)) + } +} + +impl Args for (T1, T2, T3, T4, T5, T6, T7) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + let a5: T6 = args.nth_checked(5)?; + let a6: T7 = args.nth_checked(6)?; + Ok((a0, a1, a2, a3, a4, a5, a6)) + } +} + +impl Args for (T1, T2, T3, T4, T5, T6, T7, T8) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + let a5: T6 = args.nth_checked(5)?; + let a6: T7 = args.nth_checked(6)?; + let a7: T8 = args.nth_checked(7)?; + Ok((a0, a1, a2, a3, a4, a5, a6, a7)) + } +} + +impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, + T9: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + let a5: T6 = args.nth_checked(5)?; + let a6: T7 = args.nth_checked(6)?; + let a7: T8 = args.nth_checked(7)?; + let a8: T9 = args.nth_checked(8)?; + Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8)) + } +} + +impl Args for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, + T9: FromValue + Sized, + T10: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + let a5: T6 = args.nth_checked(5)?; + let a6: T7 = args.nth_checked(6)?; + let a7: T8 = args.nth_checked(7)?; + let a8: T9 = args.nth_checked(8)?; + let a9: T10 = args.nth_checked(9)?; + Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8, a9)) + } +} + +impl Args + for (T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11) +where + T1: FromValue + Sized, + T2: FromValue + Sized, + T3: FromValue + Sized, + T4: FromValue + Sized, + T5: FromValue + Sized, + T6: FromValue + Sized, + T7: FromValue + Sized, + T8: FromValue + Sized, + T9: FromValue + Sized, + T10: FromValue + Sized, + T11: FromValue + Sized, +{ + fn parse(args: RuntimeArgs) -> Result { + let a0: T1 = args.nth_checked(0)?; + let a1: T2 = args.nth_checked(1)?; + let a2: T3 = args.nth_checked(2)?; + let a3: T4 = args.nth_checked(3)?; + let a4: T5 = args.nth_checked(4)?; + let a5: T6 = args.nth_checked(5)?; + let a6: T7 = args.nth_checked(6)?; + let a7: T8 = args.nth_checked(7)?; + let a8: T9 = args.nth_checked(8)?; + let a9: T10 = args.nth_checked(9)?; + let a10: T11 = args.nth_checked(10)?; + Ok((a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10)) + } +} diff --git a/execution_engine/src/runtime/auction_internal.rs b/execution_engine/src/runtime/auction_internal.rs new file mode 100644 index 0000000000..9c7a614267 --- /dev/null +++ b/execution_engine/src/runtime/auction_internal.rs @@ -0,0 +1,541 @@ +use std::collections::BTreeSet; +use tracing::{debug, error}; + +use casper_storage::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + system::{ + auction::{ + providers::{AccountProvider, MintProvider, RuntimeProvider, StorageProvider}, + Auction, + }, + mint::Mint, + }, +}; +use casper_types::{ + account::AccountHash, + bytesrepr::{FromBytes, ToBytes}, + system::{ + auction::{BidAddr, BidKind, EraInfo, Error, Unbond, UnbondEra, UnbondKind}, + mint, + }, + AccessRights, CLTyped, CLValue, Key, KeyTag, PublicKey, RuntimeArgs, StoredValue, URef, U512, +}; + +use super::Runtime; +use crate::execution::ExecError; + +impl From for Option { + fn from(exec_error: ExecError) -> Self { + match exec_error { + // This is used to propagate [`execution::Error::GasLimit`] to make sure [`Auction`] + // contract running natively supports propagating gas limit errors without a panic. + ExecError::GasLimit => Some(Error::GasLimit), + // There are possibly other exec errors happening but such translation would be lossy. + _ => None, + } + } +} + +impl StorageProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn read(&mut self, uref: URef) -> Result, Error> { + match self.context.read_gs(&uref.into()) { + Ok(Some(StoredValue::CLValue(cl_value))) => { + Ok(Some(cl_value.into_t().map_err(|_| Error::CLValue)?)) + } + Ok(Some(_)) => { + error!("StorageProvider::read: unexpected StoredValue variant"); + Err(Error::Storage) + } + Ok(None) => Ok(None), + Err(ExecError::BytesRepr(_)) => Err(Error::Serialization), + // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See + // also [`Runtime::reverter`] and [`to_auction_error`] + Err(ExecError::GasLimit) => Err(Error::GasLimit), + Err(err) => { + error!("StorageProvider::read: {:?}", err); + Err(Error::Storage) + } + } + } + + fn write(&mut self, uref: URef, value: T) -> Result<(), Error> { + let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + self.context + .metered_write_gs(uref.into(), StoredValue::CLValue(cl_value)) + .map_err(|exec_error| { + error!("StorageProvider::write: {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + }) + } + + fn read_bid(&mut self, key: &Key) -> Result, Error> { + match self.context.read_gs(key) { + Ok(Some(StoredValue::BidKind(bid_kind))) => Ok(Some(bid_kind)), + Ok(Some(_)) => { + error!("StorageProvider::read_bid: unexpected StoredValue variant"); + Err(Error::Storage) + } + Ok(None) => Ok(None), + Err(ExecError::BytesRepr(_)) => Err(Error::Serialization), + // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See + // also [`Runtime::reverter`] and [`to_auction_error`] + Err(ExecError::GasLimit) => Err(Error::GasLimit), + Err(err) => { + error!("StorageProvider::read_bid: {:?}", err); + Err(Error::Storage) + } + } + } + + fn write_bid(&mut self, key: Key, bid_kind: BidKind) -> Result<(), Error> { + self.context + .metered_write_gs_unsafe(key, StoredValue::BidKind(bid_kind)) + .map_err(|exec_error| { + error!("StorageProvider::write_bid: {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + }) + } + + fn read_unbond(&mut self, bid_addr: BidAddr) -> Result, Error> { + match self.context.read_gs(&Key::BidAddr(bid_addr)) { + Ok(Some(StoredValue::BidKind(BidKind::Unbond(unbonds)))) => Ok(Some(*unbonds)), + Ok(Some(_)) => { + error!("StorageProvider::read_unbonds: unexpected StoredValue variant"); + Err(Error::Storage) + } + Ok(None) => Ok(None), + Err(ExecError::BytesRepr(_)) => Err(Error::Serialization), + // NOTE: This extra condition is needed to correctly propagate GasLimit to the user. See + // also [`Runtime::reverter`] and [`to_auction_error`] + Err(ExecError::GasLimit) => Err(Error::GasLimit), + Err(err) => { + error!("StorageProvider::read_unbonds: {:?}", err); + Err(Error::Storage) + } + } + } + + fn write_unbond(&mut self, bid_addr: BidAddr, unbond: Option) -> Result<(), Error> { + let unbond_key = Key::BidAddr(bid_addr); + match unbond { + Some(unbond) => self + .context + .metered_write_gs_unsafe( + unbond_key, + StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))), + ) + .map_err(|exec_error| { + error!("StorageProvider::write_unbond: {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + }), + None => { + self.context.prune_gs_unsafe(unbond_key); + Ok(()) + } + } + } + + fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), Error> { + Runtime::record_era_info(self, era_info) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::RecordEraInfo)) + } + + fn prune_bid(&mut self, bid_addr: BidAddr) { + Runtime::prune(self, bid_addr.into()); + } +} + +impl RuntimeProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn get_caller(&self) -> AccountHash { + self.context.get_initiator() + } + + fn is_allowed_session_caller(&self, account_hash: &AccountHash) -> bool { + Runtime::is_allowed_session_caller(self, account_hash) + } + + fn is_valid_uref(&self, uref: URef) -> bool { + self.context.validate_uref(&uref).is_ok() + } + + fn named_keys_get(&self, name: &str) -> Option { + self.context.named_keys_get(name).cloned() + } + + fn get_keys(&mut self, key_tag: &KeyTag) -> Result, Error> { + self.context.get_keys(key_tag).map_err(|err| { + error!(%key_tag, "RuntimeProvider::get_keys: {:?}", err); + Error::Storage + }) + } + + fn get_keys_by_prefix(&mut self, prefix: &[u8]) -> Result, Error> { + self.context + .get_keys_with_prefix(prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::get_keys_by_prefix: {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + }) + } + + fn delegator_count(&mut self, bid_addr: &BidAddr) -> Result { + let delegated_accounts = { + let prefix = bid_addr.delegated_account_prefix()?; + let keys = self + .context + .get_keys_with_prefix(&prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::delegator_count accounts {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + keys.len() + }; + let delegated_purses = { + let prefix = bid_addr.delegated_purse_prefix()?; + let keys = self + .context + .get_keys_with_prefix(&prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::delegator_count purses {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + keys.len() + }; + Ok(delegated_accounts.saturating_add(delegated_purses)) + } + + fn reservation_count(&mut self, bid_addr: &BidAddr) -> Result { + let reserved_accounts = { + let reservation_prefix = bid_addr.reserved_account_prefix()?; + let reservation_keys = self + .context + .get_keys_with_prefix(&reservation_prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::reservation_count {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + reservation_keys.len() + }; + let reserved_purses = { + let reservation_prefix = bid_addr.reserved_purse_prefix()?; + let reservation_keys = self + .context + .get_keys_with_prefix(&reservation_prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::reservation_count {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + reservation_keys.len() + }; + Ok(reserved_accounts.saturating_add(reserved_purses)) + } + + fn used_reservation_count(&mut self, bid_addr: &BidAddr) -> Result { + let reservation_account_prefix = bid_addr.reserved_account_prefix()?; + let reservation_purse_prefix = bid_addr.reserved_purse_prefix()?; + + let reservation_keys = { + let mut ret = self + .context + .get_keys_with_prefix(&reservation_account_prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::reservation_count {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + let purses = self + .context + .get_keys_with_prefix(&reservation_purse_prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::reservation_count {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + ret.extend(purses); + ret + }; + + let mut used = 0; + for reservation_key in reservation_keys { + if let Key::BidAddr(BidAddr::ReservedDelegationAccount { + validator, + delegator, + }) = reservation_key + { + let key_to_check = Key::BidAddr(BidAddr::DelegatedAccount { + validator, + delegator, + }); + if let Ok(Some(_)) = self.context.read_gs(&key_to_check) { + used += 1; + } + } + if let Key::BidAddr(BidAddr::ReservedDelegationPurse { + validator, + delegator, + }) = reservation_key + { + let key_to_check = Key::BidAddr(BidAddr::DelegatedPurse { + validator, + delegator, + }); + if let Ok(Some(_)) = self.context.read_gs(&key_to_check) { + used += 1; + } + } + } + Ok(used) + } + + fn vesting_schedule_period_millis(&self) -> u64 { + self.context + .engine_config() + .vesting_schedule_period_millis() + } + + fn allow_auction_bids(&self) -> bool { + self.context.engine_config().allow_auction_bids() + } + + fn should_compute_rewards(&self) -> bool { + self.context.engine_config().compute_rewards() + } +} + +impl MintProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn unbond(&mut self, unbond_kind: &UnbondKind, unbond_era: &UnbondEra) -> Result<(), Error> { + let is_delegator = unbond_kind.is_delegator(); + let (purse, maybe_account_hash) = match unbond_kind { + UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => { + let account_hash = pk.to_account_hash(); + let maybe_value = self + .context + .read_gs_unsafe(&Key::Account(account_hash)) + .map_err(|exec_error| { + error!("MintProvider::unbond: {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + + match maybe_value { + Some(StoredValue::Account(account)) => { + (account.main_purse(), Some(account_hash)) + } + Some(StoredValue::CLValue(cl_value)) => { + let entity_key: Key = cl_value.into_t().map_err(|_| Error::CLValue)?; + match self.context.read_gs_unsafe(&entity_key) { + Ok(Some(StoredValue::AddressableEntity(entity))) => { + (entity.main_purse(), Some(account_hash)) + } + Ok(Some(StoredValue::CLValue(_))) => { + return Err(Error::CLValue); + } + Ok(Some(_)) => { + return if is_delegator { + Err(Error::DelegatorNotFound) + } else { + Err(Error::ValidatorNotFound) + } + } + Ok(None) => { + return Err(Error::InvalidPublicKey); + } + Err(exec_error) => { + error!("MintProvider::unbond: {:?}", exec_error); + return Err( + >::from(exec_error).unwrap_or(Error::Storage) + ); + } + } + } + Some(_) => return Err(Error::UnexpectedStoredValueVariant), + None => return Err(Error::InvalidPublicKey), + } + } + UnbondKind::DelegatedPurse(addr) => { + let purse = URef::new(*addr, AccessRights::READ_ADD_WRITE); + match self.balance(purse) { + Ok(Some(_)) => (purse, None), + Ok(None) => return Err(Error::MissingPurse), + Err(err) => { + error!("MintProvider::unbond delegated purse: {:?}", err); + return Err(Error::MintError); + } + } + } + }; + + self.mint_transfer_direct( + maybe_account_hash, + *unbond_era.bonding_purse(), + purse, + *unbond_era.amount(), + None, + ) + .map_err(|_| Error::Transfer)? + .map_err(|_| Error::Transfer)?; + Ok(()) + } + + /// Allows optimized auction and mint interaction. + /// Intended to be used only by system contracts to manage staked purses. + /// NOTE: Never expose this through FFI. + fn mint_transfer_direct( + &mut self, + to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result, Error> { + let is_main_purse_transfer = self + .context + .runtime_footprint() + .borrow() + .main_purse() + .expect("didnt have purse") + .addr() + == source.addr(); + let has_perms = is_main_purse_transfer + || (source.is_writeable() && self.context.validate_uref(&source).is_ok()); + if !(has_perms || self.context.get_initiator() == PublicKey::System.to_account_hash()) { + return Err(Error::InvalidCaller); + } + + let args_values = RuntimeArgs::try_new(|args| { + args.insert(mint::ARG_TO, to)?; + args.insert(mint::ARG_SOURCE, source)?; + args.insert(mint::ARG_TARGET, target)?; + args.insert(mint::ARG_AMOUNT, amount)?; + args.insert(mint::ARG_ID, id)?; + Ok(()) + }) + .map_err(|_| Error::CLValue)?; + + let gas_counter = self.gas_counter(); + + self.context + .access_rights_extend(&[source, target.into_add()]); + + let mint_hash = self.get_mint_hash().map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::MissingValue) + })?; + + let cl_value = self + .call_contract(mint_hash, mint::METHOD_TRANSFER, args_values) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Transfer))?; + + self.set_gas_counter(gas_counter); + cl_value.into_t().map_err(|_| Error::CLValue) + } + + fn mint_into_existing_purse( + &mut self, + amount: U512, + existing_purse: URef, + ) -> Result<(), Error> { + if self.context.get_initiator() != PublicKey::System.to_account_hash() { + return Err(Error::InvalidCaller); + } + + let args_values = RuntimeArgs::try_new(|args| { + args.insert(mint::ARG_AMOUNT, amount)?; + args.insert(mint::ARG_PURSE, existing_purse)?; + Ok(()) + }) + .map_err(|_| Error::CLValue)?; + + let gas_counter = self.gas_counter(); + + let mint_hash = self.get_mint_hash().map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::MissingValue) + })?; + + let cl_value = self + .call_contract( + mint_hash, + mint::METHOD_MINT_INTO_EXISTING_PURSE, + args_values, + ) + .map_err(|error| >::from(error).unwrap_or(Error::MintError))?; + self.set_gas_counter(gas_counter); + cl_value + .into_t::>() + .map_err(|_| Error::CLValue)? + .map_err(|_| Error::MintError) + } + + fn create_purse(&mut self) -> Result { + Runtime::create_purse(self).map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::CreatePurseFailed) + }) + } + + fn available_balance(&mut self, purse: URef) -> Result, Error> { + Runtime::available_balance(self, purse) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::GetBalance)) + } + + fn read_base_round_reward(&mut self) -> Result { + let mint_hash = self.get_mint_hash().map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::MissingValue) + })?; + self.mint_read_base_round_reward(mint_hash) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::MintReward)) + } + + fn mint(&mut self, amount: U512) -> Result { + let mint_hash = self.get_mint_hash().map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::MissingValue) + })?; + self.mint_mint(mint_hash, amount) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::MintError)) + } + + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { + let mint_hash = self.get_mint_hash().map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::MissingValue) + })?; + self.mint_reduce_total_supply(mint_hash, amount) + .map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::MintReduceTotalSupply) + }) + } +} + +impl AccountProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn get_main_purse(&self) -> Result { + // NOTE: this is used by the system and is not (and should not be made to be) accessible + // from userland. + match Runtime::context(self) + .runtime_footprint() + .borrow() + .main_purse() + { + None => { + debug!("runtime attempt to access non-existent main purse"); + Err(Error::InvalidContext) + } + Some(purse) => Ok(purse), + } + } + + /// Set main purse. + fn set_main_purse(&mut self, purse: URef) { + Runtime::context(self) + .runtime_footprint() + .borrow_mut() + .set_main_purse(purse); + } +} + +impl Auction for Runtime<'_, R> where R: StateReader {} diff --git a/execution_engine/src/runtime/cryptography.rs b/execution_engine/src/runtime/cryptography.rs new file mode 100644 index 0000000000..c846997d18 --- /dev/null +++ b/execution_engine/src/runtime/cryptography.rs @@ -0,0 +1,43 @@ +//! Cryptography module containing hashing functions used internally +//! by the execution engine + +use blake2::{ + digest::{Update, VariableOutput}, + Blake2bVar, +}; +use sha2::{Digest, Sha256}; + +/// The number of bytes in a hash. +/// All hash functions in this module have a digest length of 32. +pub const DIGEST_LENGTH: usize = 32; + +/// The 32-byte digest blake2b hash function +pub fn blake2b>(data: T) -> [u8; DIGEST_LENGTH] { + let mut result = [0; DIGEST_LENGTH]; + // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher + let mut hasher = Blake2bVar::new(DIGEST_LENGTH).expect("should create hasher"); + + hasher.update(data.as_ref()); + + // NOTE: This should never fail, because result is exactly DIGEST_LENGTH long + hasher.finalize_variable(&mut result).ok(); + + result +} + +/// The 32-byte digest blake3 hash function +pub fn blake3>(data: T) -> [u8; DIGEST_LENGTH] { + let mut result = [0; DIGEST_LENGTH]; + let mut hasher = blake3::Hasher::new(); + + hasher.update(data.as_ref()); + let hash = hasher.finalize(); + let hash_bytes: &[u8; DIGEST_LENGTH] = hash.as_bytes(); + result.copy_from_slice(hash_bytes); + result +} + +/// The 32-byte digest sha256 hash function +pub fn sha256>(data: T) -> [u8; DIGEST_LENGTH] { + Sha256::digest(data).into() +} diff --git a/execution_engine/src/runtime/externals.rs b/execution_engine/src/runtime/externals.rs new file mode 100644 index 0000000000..d801d44850 --- /dev/null +++ b/execution_engine/src/runtime/externals.rs @@ -0,0 +1,1605 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + convert::TryFrom, +}; + +use casper_wasmi::{Externals, RuntimeArgs, RuntimeValue, Trap}; + +use casper_storage::global_state::{error::Error as GlobalStateError, state::StateReader}; +use casper_types::{ + account::AccountHash, + addressable_entity::{EntityEntryPoint, EntryPoints}, + api_error, + bytesrepr::{self, ToBytes}, + contract_messages::MessageTopicOperation, + contracts::{ + ContractPackageHash, EntryPoints as ContractEntryPoints, NamedKeys, ProtocolVersionMajor, + }, + AddressableEntityHash, ApiError, EntityVersion, Gas, Group, HashAlgorithm, HostFunction, + HostFunctionCost, Key, PackageHash, PackageStatus, PublicKey, Signature, StoredValue, URef, + U512, UREF_SERIALIZED_LENGTH, +}; + +use super::{args::Args, ExecError, Runtime}; +use crate::{resolvers::v1_function_index::FunctionIndex, runtime::cryptography}; + +impl Externals for Runtime<'_, R> +where + R: StateReader, +{ + fn invoke_index( + &mut self, + index: usize, + args: RuntimeArgs, + ) -> Result, Trap> { + let func = FunctionIndex::try_from(index).expect("unknown function index"); + + let host_function_costs = + (*self.context.engine_config().wasm_config().v1()).take_host_function_costs(); + + match func { + FunctionIndex::ReadFuncIndex => { + // args(0) = pointer to key in Wasm memory + // args(1) = size of key in Wasm memory + // args(2) = pointer to output size (output param) + let (key_ptr, key_size, output_size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.read_value, + [key_ptr, key_size, output_size_ptr], + )?; + let ret = self.read(key_ptr, key_size, output_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::LoadNamedKeysFuncIndex => { + // args(0) = pointer to amount of keys (output) + // args(1) = pointer to amount of serialized bytes (output) + let (total_keys_ptr, result_size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.load_named_keys, + [total_keys_ptr, result_size_ptr], + )?; + let ret = self.load_named_keys(total_keys_ptr, result_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::WriteFuncIndex => { + // args(0) = pointer to key in Wasm memory + // args(1) = size of key + // args(2) = pointer to value + // args(3) = size of value + let (key_ptr, key_size, value_ptr, value_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.write, + [key_ptr, key_size, value_ptr, value_size], + )?; + self.write(key_ptr, key_size, value_ptr, value_size)?; + Ok(None) + } + + FunctionIndex::AddFuncIndex => { + // args(0) = pointer to key in Wasm memory + // args(1) = size of key + // args(2) = pointer to value + // args(3) = size of value + let (key_ptr, key_size, value_ptr, value_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.add, + [key_ptr, key_size, value_ptr, value_size], + )?; + self.add(key_ptr, key_size, value_ptr, value_size)?; + Ok(None) + } + + FunctionIndex::NewFuncIndex => { + // args(0) = pointer to uref destination in Wasm memory + // args(1) = pointer to initial value + // args(2) = size of initial value + let (uref_ptr, value_ptr, value_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.new_uref, + [uref_ptr, value_ptr, value_size], + )?; + self.new_uref(uref_ptr, value_ptr, value_size)?; + Ok(None) + } + + FunctionIndex::RetFuncIndex => { + // args(0) = pointer to value + // args(1) = size of value + let (value_ptr, value_size) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.ret, [value_ptr, value_size])?; + Err(self.ret(value_ptr, value_size as usize)) + } + + FunctionIndex::GetKeyFuncIndex => { + // args(0) = pointer to key name in Wasm memory + // args(1) = size of key name + // args(2) = pointer to output buffer for serialized key + // args(3) = size of output buffer + // args(4) = pointer to bytes written + let (name_ptr, name_size, output_ptr, output_size, bytes_written) = + Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.get_key, + [name_ptr, name_size, output_ptr, output_size, bytes_written], + )?; + let ret = self.load_key( + name_ptr, + name_size, + output_ptr, + output_size as usize, + bytes_written, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::HasKeyFuncIndex => { + // args(0) = pointer to key name in Wasm memory + // args(1) = size of key name + let (name_ptr, name_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.has_key, + [name_ptr, name_size], + )?; + let result = self.has_key(name_ptr, name_size)?; + Ok(Some(RuntimeValue::I32(result))) + } + + FunctionIndex::PutKeyFuncIndex => { + // args(0) = pointer to key name in Wasm memory + // args(1) = size of key name + // args(2) = pointer to key in Wasm memory + // args(3) = size of key + let (name_ptr, name_size, key_ptr, key_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.put_key, + [name_ptr, name_size, key_ptr, key_size], + )?; + self.put_key(name_ptr, name_size, key_ptr, key_size)?; + Ok(None) + } + + FunctionIndex::RemoveKeyFuncIndex => { + // args(0) = pointer to key name in Wasm memory + // args(1) = size of key name + let (name_ptr, name_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.remove_key, + [name_ptr, name_size], + )?; + self.remove_key(name_ptr, name_size)?; + Ok(None) + } + + FunctionIndex::GetCallerIndex => { + // args(0) = pointer where a size of serialized bytes will be stored + let (output_size_ptr,) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.get_caller, [output_size_ptr])?; + let ret = self.get_caller(output_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::GetBlocktimeIndex => { + // args(0) = pointer to Wasm memory where to write. + let (dest_ptr,) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.get_blocktime, [dest_ptr])?; + self.get_blocktime(dest_ptr)?; + Ok(None) + } + + FunctionIndex::GasFuncIndex => { + let (gas_arg,): (u32,) = Args::parse(args)?; + // Gas is special cased internal host function and for accounting purposes it isn't + // represented in protocol data. + self.gas(Gas::new(gas_arg))?; + Ok(None) + } + + FunctionIndex::IsValidURefFnIndex => { + // args(0) = pointer to value to validate + // args(1) = size of value + let (uref_ptr, uref_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.is_valid_uref, + [uref_ptr, uref_size], + )?; + Ok(Some(RuntimeValue::I32(i32::from( + self.is_valid_uref(uref_ptr, uref_size)?, + )))) + } + + FunctionIndex::RevertFuncIndex => { + // args(0) = status u32 + let (status,) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.revert, [status])?; + Err(self.revert(status)) + } + + FunctionIndex::AddAssociatedKeyFuncIndex => { + // args(0) = pointer to array of bytes of an account hash + // args(1) = size of an account hash + // args(2) = weight of the key + let (account_hash_ptr, account_hash_size, weight_value) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.add_associated_key, + [ + account_hash_ptr, + account_hash_size, + weight_value as HostFunctionCost, + ], + )?; + let value = self.add_associated_key( + account_hash_ptr, + account_hash_size as usize, + weight_value, + )?; + Ok(Some(RuntimeValue::I32(value))) + } + + FunctionIndex::RemoveAssociatedKeyFuncIndex => { + // args(0) = pointer to array of bytes of an account hash + // args(1) = size of an account hash + let (account_hash_ptr, account_hash_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.remove_associated_key, + [account_hash_ptr, account_hash_size], + )?; + let value = + self.remove_associated_key(account_hash_ptr, account_hash_size as usize)?; + Ok(Some(RuntimeValue::I32(value))) + } + + FunctionIndex::UpdateAssociatedKeyFuncIndex => { + // args(0) = pointer to array of bytes of an account hash + // args(1) = size of an account hash + // args(2) = weight of the key + let (account_hash_ptr, account_hash_size, weight_value) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.update_associated_key, + [ + account_hash_ptr, + account_hash_size, + weight_value as HostFunctionCost, + ], + )?; + let value = self.update_associated_key( + account_hash_ptr, + account_hash_size as usize, + weight_value, + )?; + Ok(Some(RuntimeValue::I32(value))) + } + + FunctionIndex::SetActionThresholdFuncIndex => { + // args(0) = action type + // args(1) = new threshold + let (action_type_value, threshold_value) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.set_action_threshold, + [action_type_value, threshold_value as HostFunctionCost], + )?; + let value = self.set_action_threshold(action_type_value, threshold_value)?; + Ok(Some(RuntimeValue::I32(value))) + } + + FunctionIndex::CreatePurseIndex => { + // args(0) = pointer to array for return value + // args(1) = length of array for return value + let (dest_ptr, dest_size) = Args::parse(args)?; + + self.charge_host_function_call( + &host_function_costs.create_purse, + [dest_ptr, dest_size], + )?; + + let result = if (dest_size as usize) < UREF_SERIALIZED_LENGTH { + Err(ApiError::PurseNotCreated) + } else { + let purse = self.create_purse()?; + let purse_bytes = purse.into_bytes().map_err(ExecError::BytesRepr)?; + self.try_get_memory()? + .set(dest_ptr, &purse_bytes) + .map_err(|e| ExecError::Interpreter(e.into()))?; + Ok(()) + }; + + Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))) + } + + FunctionIndex::TransferToAccountIndex => { + // args(0) = pointer to array of bytes of an account hash + // args(1) = length of array of bytes of an account hash + // args(2) = pointer to array of bytes of an amount + // args(3) = length of array of bytes of an amount + // args(4) = pointer to array of bytes of an id + // args(5) = length of array of bytes of an id + // args(6) = pointer to a value where new value will be set + let (key_ptr, key_size, amount_ptr, amount_size, id_ptr, id_size, result_ptr) = + Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.transfer_to_account, + [ + key_ptr, + key_size, + amount_ptr, + amount_size, + id_ptr, + id_size, + result_ptr, + ], + )?; + let account_hash: AccountHash = { + let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + let amount: U512 = { + let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + let id: Option = { + let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + + let ret = match self.transfer_to_account(account_hash, amount, id)? { + Ok(transferred_to) => { + let result_value: u32 = transferred_to as u32; + let result_value_bytes = result_value.to_le_bytes(); + self.try_get_memory()? + .set(result_ptr, &result_value_bytes) + .map_err(|error| ExecError::Interpreter(error.into()))?; + Ok(()) + } + Err(api_error) => Err(api_error), + }; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::TransferFromPurseToAccountIndex => { + // args(0) = pointer to array of bytes in Wasm memory of a source purse + // args(1) = length of array of bytes in Wasm memory of a source purse + // args(2) = pointer to array of bytes in Wasm memory of an account hash + // args(3) = length of array of bytes in Wasm memory of an account hash + // args(4) = pointer to array of bytes in Wasm memory of an amount + // args(5) = length of array of bytes in Wasm memory of an amount + // args(6) = pointer to array of bytes in Wasm memory of an id + // args(7) = length of array of bytes in Wasm memory of an id + // args(8) = pointer to a value where value of `TransferredTo` enum will be set + let ( + source_ptr, + source_size, + key_ptr, + key_size, + amount_ptr, + amount_size, + id_ptr, + id_size, + result_ptr, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.transfer_from_purse_to_account, + [ + source_ptr, + source_size, + key_ptr, + key_size, + amount_ptr, + amount_size, + id_ptr, + id_size, + result_ptr, + ], + )?; + let source_purse = { + let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + let account_hash: AccountHash = { + let bytes = self.bytes_from_mem(key_ptr, key_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + let amount: U512 = { + let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + let id: Option = { + let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + let ret = match self.transfer_from_purse_to_account_hash( + source_purse, + account_hash, + amount, + id, + )? { + Ok(transferred_to) => { + let result_value: u32 = transferred_to as u32; + let result_value_bytes = result_value.to_le_bytes(); + self.try_get_memory()? + .set(result_ptr, &result_value_bytes) + .map_err(|error| ExecError::Interpreter(error.into()))?; + Ok(()) + } + Err(api_error) => Err(api_error), + }; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::TransferFromPurseToPurseIndex => { + // args(0) = pointer to array of bytes in Wasm memory of a source purse + // args(1) = length of array of bytes in Wasm memory of a source purse + // args(2) = pointer to array of bytes in Wasm memory of a target purse + // args(3) = length of array of bytes in Wasm memory of a target purse + // args(4) = pointer to array of bytes in Wasm memory of an amount + // args(5) = length of array of bytes in Wasm memory of an amount + // args(6) = pointer to array of bytes in Wasm memory of an id + // args(7) = length of array of bytes in Wasm memory of an id + let ( + source_ptr, + source_size, + target_ptr, + target_size, + amount_ptr, + amount_size, + id_ptr, + id_size, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.transfer_from_purse_to_purse, + [ + source_ptr, + source_size, + target_ptr, + target_size, + amount_ptr, + amount_size, + id_ptr, + id_size, + ], + )?; + + let source: URef = { + let bytes = self.bytes_from_mem(source_ptr, source_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + + let target: URef = { + let bytes = self.bytes_from_mem(target_ptr, target_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + + let amount: U512 = { + let bytes = self.bytes_from_mem(amount_ptr, amount_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + + let id: Option = { + let bytes = self.bytes_from_mem(id_ptr, id_size as usize)?; + bytesrepr::deserialize_from_slice(bytes).map_err(ExecError::BytesRepr)? + }; + + let ret = self.transfer_from_purse_to_purse(source, target, amount, id)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::GetBalanceIndex => { + // args(0) = pointer to purse input + // args(1) = length of purse + // args(2) = pointer to output size (output) + let (ptr, ptr_size, output_size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.get_balance, + [ptr, ptr_size, output_size_ptr], + )?; + let ret = self.get_balance_host_buffer(ptr, ptr_size as usize, output_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::GetPhaseIndex => { + // args(0) = pointer to Wasm memory where to write. + let (dest_ptr,) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.get_phase, [dest_ptr])?; + self.get_phase(dest_ptr)?; + Ok(None) + } + + FunctionIndex::GetSystemContractIndex => { + // args(0) = system contract index + // args(1) = dest pointer for storing serialized result + // args(2) = dest pointer size + let (system_contract_index, dest_ptr, dest_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.get_system_contract, + [system_contract_index, dest_ptr, dest_size], + )?; + let ret = self.get_system_contract(system_contract_index, dest_ptr, dest_size)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::GetMainPurseIndex => { + // args(0) = pointer to Wasm memory where to write. + let (dest_ptr,) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.get_main_purse, [dest_ptr])?; + self.get_main_purse(dest_ptr)?; + Ok(None) + } + + FunctionIndex::ReadHostBufferIndex => { + // args(0) = pointer to Wasm memory where to write size. + let (dest_ptr, dest_size, bytes_written_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.read_host_buffer, + [dest_ptr, dest_size, bytes_written_ptr], + )?; + let ret = self.read_host_buffer(dest_ptr, dest_size as usize, bytes_written_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::CreateContractPackageAtHash => { + // args(0) = pointer to wasm memory where to write 32-byte Hash address + // args(1) = pointer to wasm memory where to write 32-byte access key address + // args(2) = boolean flag to determine if the contract can be versioned + let (hash_dest_ptr, access_dest_ptr, is_locked) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.create_contract_package_at_hash, + [hash_dest_ptr, access_dest_ptr], + )?; + let package_status = PackageStatus::new(is_locked); + let (hash_addr, access_addr) = + self.create_contract_package_at_hash(package_status)?; + + self.function_address(hash_addr, hash_dest_ptr)?; + self.function_address(access_addr, access_dest_ptr)?; + Ok(None) + } + + FunctionIndex::CreateContractUserGroup => { + // args(0) = pointer to package key in wasm memory + // args(1) = size of package key in wasm memory + // args(2) = pointer to group label in wasm memory + // args(3) = size of group label in wasm memory + // args(4) = number of new urefs to generate for the group + // args(5) = pointer to existing_urefs in wasm memory + // args(6) = size of existing_urefs in wasm memory + // args(7) = pointer to location to write size of output (written to host buffer) + let ( + package_key_ptr, + package_key_size, + label_ptr, + label_size, + num_new_urefs, + existing_urefs_ptr, + existing_urefs_size, + output_size_ptr, + ) = Args::parse(args)?; + + self.charge_host_function_call( + &host_function_costs.create_contract_user_group, + [ + package_key_ptr, + package_key_size, + label_ptr, + label_size, + num_new_urefs, + existing_urefs_ptr, + existing_urefs_size, + output_size_ptr, + ], + )?; + + let contract_package_hash: PackageHash = + self.t_from_mem(package_key_ptr, package_key_size)?; + let label: String = self.t_from_mem(label_ptr, label_size)?; + let existing_urefs: BTreeSet = + self.t_from_mem(existing_urefs_ptr, existing_urefs_size)?; + + let ret = self.create_contract_user_group( + contract_package_hash, + label, + num_new_urefs, + existing_urefs, + output_size_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::AddContractVersion => { + // args(0) = pointer to package key in wasm memory + // args(1) = size of package key in wasm memory + // args(2) = pointer to entrypoints in wasm memory + // args(3) = size of entrypoints in wasm memory + // args(4) = pointer to named keys in wasm memory + // args(5) = size of named keys in wasm memory + // args(6) = pointer to output buffer for serialized key + // args(7) = size of output buffer + // args(8) = pointer to bytes written + let ( + contract_package_hash_ptr, + contract_package_hash_size, + version_ptr, + entry_points_ptr, + entry_points_size, + named_keys_ptr, + named_keys_size, + output_ptr, + output_size, + bytes_written_ptr, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.add_contract_version, + [ + contract_package_hash_ptr, + contract_package_hash_size, + version_ptr, + entry_points_ptr, + entry_points_size, + named_keys_ptr, + named_keys_size, + output_ptr, + output_size, + bytes_written_ptr, + ], + )?; + + let contract_package_hash: ContractPackageHash = + self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; + let package_hash = PackageHash::new(contract_package_hash.value()); + let entry_points: EntryPoints = { + let contract_entry_points: ContractEntryPoints = + self.t_from_mem(entry_points_ptr, entry_points_size)?; + + let points: Vec = contract_entry_points + .take_entry_points() + .into_iter() + .map(EntityEntryPoint::from) + .collect(); + + points.into() + }; + let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?; + let ret = self.add_contract_version( + package_hash, + version_ptr, + entry_points, + named_keys, + BTreeMap::new(), + output_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + FunctionIndex::AddContractVersionWithMessageTopics => { + // args(0) = pointer to package hash in wasm memory + // args(1) = size of package hash in wasm memory + // args(2) = pointer to entity version in wasm memory + // args(3) = pointer to entrypoints in wasm memory + // args(4) = size of entrypoints in wasm memory + // args(5) = pointer to named keys in wasm memory + // args(6) = size of named keys in wasm memory + // args(7) = pointer to the new topic names in wasm memory + // args(8) = size of the new topic names in wasm memory + // args(9) = pointer to output buffer for serialized key + // args(10) = size of output buffer + let ( + contract_package_hash_ptr, + contract_package_hash_size, + version_ptr, + entry_points_ptr, + entry_points_size, + named_keys_ptr, + named_keys_size, + message_topics_ptr, + message_topics_size, + output_ptr, + output_size, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.add_contract_version_with_message_topics, + [ + contract_package_hash_ptr, + contract_package_hash_size, + version_ptr, + entry_points_ptr, + entry_points_size, + named_keys_ptr, + named_keys_size, + message_topics_ptr, + message_topics_size, + output_ptr, + output_size, + ], + )?; + + // Exit if unable to return output. + if output_size < 32 { + // `output_size` must be >= actual length of serialized hash bytes + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::BufferTooSmall, + ))))); + } + + let package_hash: PackageHash = + self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; + let entry_points: EntryPoints = + self.t_from_mem(entry_points_ptr, entry_points_size)?; + let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?; + let message_topics: BTreeMap = + self.t_from_mem(message_topics_ptr, message_topics_size)?; + + // Check that the names of the topics that are added are within the configured + // limits. + let message_limits = self.context.engine_config().wasm_config().messages_limits(); + for (topic_name, _) in + message_topics + .iter() + .filter(|(_, operation)| match operation { + MessageTopicOperation::Add => true, + }) + { + if topic_name.len() > message_limits.max_topic_name_size() as usize { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::MaxTopicNameSizeExceeded, + ))))); + } + } + + let ret = self.add_contract_version( + package_hash, + version_ptr, + entry_points, + named_keys, + message_topics, + output_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::AddPackageVersionWithMessageTopics => { + // args(0) = pointer to package hash in wasm memory + // args(1) = size of package hash in wasm memory + // args(2) = pointer to entity version in wasm memory + // args(3) = pointer to entrypoints in wasm memory + // args(4) = size of entrypoints in wasm memory + // args(5) = pointer to named keys in wasm memory + // args(6) = size of named keys in wasm memory + // args(7) = pointer to the new topic names in wasm memory + // args(8) = size of the new topic names in wasm memory + // args(9) = pointer to output buffer for serialized key + // args(10) = size of output buffer + let ( + contract_package_hash_ptr, + contract_package_hash_size, + version_ptr, + entry_points_ptr, + entry_points_size, + named_keys_ptr, + named_keys_size, + message_topics, + message_topics_size, + output_ptr, + output_size, + ) = Args::parse(args)?; + + self.charge_host_function_call( + &host_function_costs.add_package_version_with_message_topics, + [ + contract_package_hash_ptr, + contract_package_hash_size, + version_ptr, + entry_points_ptr, + entry_points_size, + named_keys_ptr, + named_keys_size, + message_topics, + message_topics_size, + output_ptr, + output_size, + ], + )?; + + // Exit if unable to return output. + if output_size < 32 { + // `output_size` must be >= actual length of serialized hash bytes + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::BufferTooSmall, + ))))); + } + + let package_hash: PackageHash = + self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; + let entry_points: EntryPoints = + self.t_from_mem(entry_points_ptr, entry_points_size)?; + let named_keys: NamedKeys = self.t_from_mem(named_keys_ptr, named_keys_size)?; + let message_topics: BTreeMap = + self.t_from_mem(message_topics, message_topics_size)?; + + // Check that the names of the topics that are added are within the configured + // limits. + let message_limits = self.context.engine_config().wasm_config().messages_limits(); + for (topic_name, _) in + message_topics + .iter() + .filter(|(_, operation)| match operation { + MessageTopicOperation::Add => true, + }) + { + if topic_name.len() > message_limits.max_topic_name_size() as usize { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::MaxTopicNameSizeExceeded, + ))))); + } + } + + let ret = self.add_contract_version( + package_hash, + version_ptr, + entry_points, + named_keys, + message_topics, + output_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::DisableContractVersion => { + // args(0) = pointer to package hash in wasm memory + // args(1) = size of package hash in wasm memory + // args(2) = pointer to contract hash in wasm memory + // args(3) = size of contract hash in wasm memory + let (package_key_ptr, package_key_size, contract_hash_ptr, contract_hash_size) = + Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.disable_contract_version, + [ + package_key_ptr, + package_key_size, + contract_hash_ptr, + contract_hash_size, + ], + )?; + let contract_package_hash = self.t_from_mem(package_key_ptr, package_key_size)?; + let contract_hash = self.t_from_mem(contract_hash_ptr, contract_hash_size)?; + + let result = self.disable_contract_version(contract_package_hash, contract_hash)?; + + Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))) + } + + FunctionIndex::CallContractFuncIndex => { + // args(0) = pointer to contract hash where contract is at in global state + // args(1) = size of contract hash + // args(2) = pointer to entry point + // args(3) = size of entry point + // args(4) = pointer to function arguments in Wasm memory + // args(5) = size of arguments + // args(6) = pointer to result size (output) + let ( + contract_hash_ptr, + contract_hash_size, + entry_point_name_ptr, + entry_point_name_size, + args_ptr, + args_size, + result_size_ptr, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.call_contract, + [ + contract_hash_ptr, + contract_hash_size, + entry_point_name_ptr, + entry_point_name_size, + args_ptr, + args_size, + result_size_ptr, + ], + )?; + + let contract_hash: AddressableEntityHash = + self.t_from_mem(contract_hash_ptr, contract_hash_size)?; + let entry_point_name: String = + self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; + let args_bytes: Vec = { + let args_size: u32 = args_size; + self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec() + }; + + let ret = self.call_contract_host_buffer( + contract_hash, + &entry_point_name, + &args_bytes, + result_size_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::CallVersionedContract => { + // args(0) = pointer to contract_package_hash where contract is at in global state + // args(1) = size of contract_package_hash + // args(2) = pointer to contract version in wasm memory + // args(3) = size of contract version in wasm memory + // args(4) = pointer to method name in wasm memory + // args(5) = size of method name in wasm memory + // args(6) = pointer to function arguments in Wasm memory + // args(7) = size of arguments + // args(8) = pointer to result size (output) + let ( + contract_package_hash_ptr, + contract_package_hash_size, + contract_version_ptr, + contract_package_size, + entry_point_name_ptr, + entry_point_name_size, + args_ptr, + args_size, + result_size_ptr, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.call_versioned_contract, + [ + contract_package_hash_ptr, + contract_package_hash_size, + contract_version_ptr, + contract_package_size, + entry_point_name_ptr, + entry_point_name_size, + args_ptr, + args_size, + result_size_ptr, + ], + )?; + + let contract_package_hash: PackageHash = + self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; + let contract_version: Option = + self.t_from_mem(contract_version_ptr, contract_package_size)?; + let entry_point_name: String = + self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; + let args_bytes: Vec = { + let args_size: u32 = args_size; + self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec() + }; + + let ret = self.call_versioned_contract_host_buffer( + contract_package_hash, + contract_version, + entry_point_name, + &args_bytes, + result_size_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + #[cfg(feature = "test-support")] + FunctionIndex::PrintIndex => { + let (text_ptr, text_size) = Args::parse(args)?; + self.charge_host_function_call(&host_function_costs.print, [text_ptr, text_size])?; + self.print(text_ptr, text_size)?; + Ok(None) + } + + FunctionIndex::GetRuntimeArgsizeIndex => { + // args(0) = pointer to name of host runtime arg to load + // args(1) = size of name of the host runtime arg + // args(2) = pointer to a argument size (output) + let (name_ptr, name_size, size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.get_named_arg_size, + [name_ptr, name_size, size_ptr], + )?; + let ret = self.get_named_arg_size(name_ptr, name_size as usize, size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::GetRuntimeArgIndex => { + // args(0) = pointer to serialized argument name + // args(1) = size of serialized argument name + // args(2) = pointer to output pointer where host will write argument bytes + // args(3) = size of available data under output pointer + let (name_ptr, name_size, dest_ptr, dest_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.get_named_arg, + [name_ptr, name_size, dest_ptr, dest_size], + )?; + let ret = + self.get_named_arg(name_ptr, name_size as usize, dest_ptr, dest_size as usize)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::RemoveContractUserGroupIndex => { + // args(0) = pointer to package key in wasm memory + // args(1) = size of package key in wasm memory + // args(2) = pointer to serialized group label + // args(3) = size of serialized group label + let (package_key_ptr, package_key_size, label_ptr, label_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.remove_contract_user_group, + [package_key_ptr, package_key_size, label_ptr, label_size], + )?; + let package_key = self.t_from_mem(package_key_ptr, package_key_size)?; + let label: Group = self.t_from_mem(label_ptr, label_size)?; + + let ret = self.remove_contract_user_group(package_key, label)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::ExtendContractUserGroupURefsIndex => { + // args(0) = pointer to package key in wasm memory + // args(1) = size of package key in wasm memory + // args(2) = pointer to label name + // args(3) = label size bytes + // args(4) = output of size value of host bytes data + let (package_ptr, package_size, label_ptr, label_size, value_size_ptr) = + Args::parse(args)?; + + self.charge_host_function_call( + &host_function_costs.provision_contract_user_group_uref, + [ + package_ptr, + package_size, + label_ptr, + label_size, + value_size_ptr, + ], + )?; + let ret = self.provision_contract_user_group_uref( + package_ptr, + package_size, + label_ptr, + label_size, + value_size_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::RemoveContractUserGroupURefsIndex => { + // args(0) = pointer to package key in wasm memory + // args(1) = size of package key in wasm memory + // args(2) = pointer to label name + // args(3) = label size bytes + // args(4) = pointer to urefs + // args(5) = size of urefs pointer + let (package_ptr, package_size, label_ptr, label_size, urefs_ptr, urefs_size) = + Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.remove_contract_user_group_urefs, + [ + package_ptr, + package_size, + label_ptr, + label_size, + urefs_ptr, + urefs_size, + ], + )?; + let ret = self.remove_contract_user_group_urefs( + package_ptr, + package_size, + label_ptr, + label_size, + urefs_ptr, + urefs_size, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::Blake2b => { + let (in_ptr, in_size, out_ptr, out_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.blake2b, + [in_ptr, in_size, out_ptr, out_size], + )?; + let digest = + self.checked_memory_slice(in_ptr as usize, in_size as usize, |input| { + cryptography::blake2b(input) + })?; + + let result = if digest.len() != out_size as usize { + Err(ApiError::BufferTooSmall) + } else { + Ok(()) + }; + if result.is_err() { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))); + } + + self.try_get_memory()? + .set(out_ptr, &digest) + .map_err(|error| ExecError::Interpreter(error.into()))?; + Ok(Some(RuntimeValue::I32(0))) + } + + FunctionIndex::NewDictionaryFuncIndex => { + // args(0) = pointer to output size (output param) + let (output_size_ptr,): (u32,) = Args::parse(args)?; + const UREF_LEN: u32 = 33u32; + self.charge_host_function_call(&host_function_costs.new_uref, [0, 0, UREF_LEN])?; + let ret = self.new_dictionary(output_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::DictionaryGetFuncIndex => { + // args(0) = pointer to uref in Wasm memory + // args(1) = size of uref in Wasm memory + // args(2) = pointer to key bytes pointer in Wasm memory + // args(3) = pointer to key bytes size in Wasm memory + // args(4) = pointer to output size (output param) + let (uref_ptr, uref_size, key_bytes_ptr, key_bytes_size, output_size_ptr): ( + _, + u32, + _, + u32, + _, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.dictionary_get, + [key_bytes_ptr, key_bytes_size, output_size_ptr], + )?; + let ret = self.dictionary_get( + uref_ptr, + uref_size, + key_bytes_ptr, + key_bytes_size, + output_size_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::DictionaryPutFuncIndex => { + // args(0) = pointer to uref in Wasm memory + // args(1) = size of uref in Wasm memory + // args(2) = pointer to key bytes pointer in Wasm memory + // args(3) = pointer to key bytes size in Wasm memory + // args(4) = pointer to value bytes pointer in Wasm memory + // args(5) = pointer to value bytes size in Wasm memory + let (uref_ptr, uref_size, key_bytes_ptr, key_bytes_size, value_ptr, value_ptr_size): (_, u32, _, u32, _, u32) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.dictionary_put, + [key_bytes_ptr, key_bytes_size, value_ptr, value_ptr_size], + )?; + let ret = self.dictionary_put( + uref_ptr, + uref_size, + key_bytes_ptr, + key_bytes_size, + value_ptr, + value_ptr_size, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::DictionaryReadFuncIndex => { + // args(0) = pointer to key in Wasm memory + // args(1) = size of key in Wasm memory + // args(2) = pointer to output size (output param) + let (key_ptr, key_size, output_size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.read_value, + [key_ptr, key_size, output_size_ptr], + )?; + let ret = self.dictionary_read(key_ptr, key_size, output_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::LoadCallStack => { + // args(0) (Output) Pointer to number of elements in the call stack. + // args(1) (Output) Pointer to size in bytes of the serialized call stack. + let (call_stack_len_ptr, result_size_ptr) = Args::parse(args)?; + + self.charge_host_function_call( + &HostFunction::fixed(10_000), + [call_stack_len_ptr, result_size_ptr], + )?; + let ret = self.load_call_stack(call_stack_len_ptr, result_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::LoadCallerInformation => { + // args(0) (Input) Type of action + // args(1) (Output) Pointer to number of elements in the call stack. + // args(2) (Output) Pointer to size in bytes of the serialized call stack. + let (action, call_stack_len_ptr, result_size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &HostFunction::fixed(10_000), + [0, call_stack_len_ptr, result_size_ptr], + )?; + let ret = + self.load_caller_information(action, call_stack_len_ptr, result_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::LoadAuthorizationKeys => { + // args(0) (Output) Pointer to number of authorization keys. + // args(1) (Output) Pointer to size in bytes of the total bytes. + let (len_ptr, result_size_ptr) = Args::parse(args)?; + self.charge_host_function_call( + &HostFunction::fixed(10_000), + [len_ptr, result_size_ptr], + )?; + let ret = self.load_authorization_keys(len_ptr, result_size_ptr)?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + + FunctionIndex::RandomBytes => { + let (out_ptr, out_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.random_bytes, + [out_ptr, out_size], + )?; + + let random_bytes = self.context.random_bytes()?; + + let result = if random_bytes.len() != out_size as usize { + Err(ApiError::BufferTooSmall) + } else { + Ok(()) + }; + if result.is_err() { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))); + } + + self.try_get_memory()? + .set(out_ptr, &random_bytes) + .map_err(|error| ExecError::Interpreter(error.into()))?; + + Ok(Some(RuntimeValue::I32(0))) + } + + FunctionIndex::EnableContractVersion => { + // args(0) = pointer to package hash in wasm memory + // args(1) = size of package hash in wasm memory + // args(2) = pointer to contract hash in wasm memory + // args(3) = size of contract hash in wasm memory + let (package_key_ptr, package_key_size, contract_hash_ptr, contract_hash_size) = + Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.enable_contract_version, + [ + package_key_ptr, + package_key_size, + contract_hash_ptr, + contract_hash_size, + ], + )?; + let contract_package_hash = self.t_from_mem(package_key_ptr, package_key_size)?; + let contract_hash = self.t_from_mem(contract_hash_ptr, contract_hash_size)?; + + let result = self.enable_contract_version(contract_package_hash, contract_hash)?; + + Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))) + } + + FunctionIndex::ManageMessageTopic => { + // args(0) = pointer to the serialized topic name string in wasm memory + // args(1) = size of the serialized topic name string in wasm memory + // args(2) = pointer to the operation to be performed for the specified topic + // args(3) = size of the operation + let (topic_name_ptr, topic_name_size, operation_ptr, operation_size) = + Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.manage_message_topic, + [ + topic_name_ptr, + topic_name_size, + operation_ptr, + operation_size, + ], + )?; + + let limits = self.context.engine_config().wasm_config().messages_limits(); + + if topic_name_size > limits.max_topic_name_size() { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::MaxTopicNameSizeExceeded, + ))))); + } + + let topic_name_bytes = + self.bytes_from_mem(topic_name_ptr, topic_name_size as usize)?; + let topic_name = std::str::from_utf8(&topic_name_bytes) + .map_err(|e| Trap::from(ExecError::InvalidUtf8Encoding(e)))?; + + if operation_size as usize > MessageTopicOperation::max_serialized_len() { + return Err(Trap::from(ExecError::InvalidImputedOperation)); + } + let topic_operation = self + .t_from_mem(operation_ptr, operation_size) + .map_err(|_e| Trap::from(ExecError::InvalidImputedOperation))?; + + // only allow managing messages from stored contracts + if !self.context.get_context_key().is_smart_contract_key() { + return Err(Trap::from(ExecError::InvalidContext)); + } + + let result = match topic_operation { + MessageTopicOperation::Add => { + self.add_message_topic(topic_name).map_err(Trap::from)? + } + }; + + Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))) + } + + FunctionIndex::EmitMessage => { + // args(0) = pointer to the serialized topic name string in wasm memory + // args(1) = size of the serialized name string in wasm memory + // args(2) = pointer to the serialized message payload in wasm memory + // args(3) = size of the serialized message payload in wasm memory + let (topic_name_ptr, topic_name_size, message_ptr, message_size) = + Args::parse(args)?; + + // Charge for the call to emit message. This increases for every message emitted + // within an execution so we're not using the static value from the wasm config. + self.context + .charge_gas(Gas::new(self.context.emit_message_cost()))?; + // Charge for parameter weights. + self.charge_host_function_call( + &HostFunction::new(0, host_function_costs.emit_message.arguments()), + &[topic_name_ptr, topic_name_size, message_ptr, message_size], + )?; + + let limits = self.context.engine_config().wasm_config().messages_limits(); + + if topic_name_size > limits.max_topic_name_size() { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::MaxTopicNameSizeExceeded, + ))))); + } + + if message_size > limits.max_message_size() { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::MessageTooLarge, + ))))); + } + + let topic_name_bytes = + self.bytes_from_mem(topic_name_ptr, topic_name_size as usize)?; + let topic_name = std::str::from_utf8(&topic_name_bytes) + .map_err(|e| Trap::from(ExecError::InvalidUtf8Encoding(e)))?; + + let message = self.t_from_mem(message_ptr, message_size)?; + + let result = self.emit_message(topic_name, message)?; + if result.is_ok() { + // Increase the cost for the next call to emit a message. + let new_cost = self + .context + .emit_message_cost() + .checked_add(host_function_costs.cost_increase_per_message.into()) + .ok_or(ExecError::GasLimit)?; + self.context.set_emit_message_cost(new_cost); + } + Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))) + } + + FunctionIndex::GetBlockInfoIndex => { + // args(0) = field selector + // args(1) = pointer to output pointer where host will write argument bytes + let (field_idx, dest_ptr): (u8, u32) = Args::parse(args)?; + + self.charge_host_function_call(&host_function_costs.get_block_info, [0u32, 0u32])?; + self.get_block_info(field_idx, dest_ptr)?; + Ok(None) + } + + FunctionIndex::GenericHash => { + // args(0) = pointer to input in Wasm memory + // args(1) = size of input in Wasm memory + // args(2) = integer representation of HashAlgorithm enum variant + // args(3) = pointer to output pointer in Wasm memory + // args(4) = size of output + let (in_ptr, in_size, hash_algo_type, out_ptr, out_size) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.generic_hash, + [in_ptr, in_size, hash_algo_type, out_ptr, out_size], + )?; + let hash_algo_type = match HashAlgorithm::try_from(hash_algo_type as u8) { + Ok(v) => v, + Err(_e) => { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(Err( + ApiError::InvalidArgument, + ))))) + } + }; + + let digest = + self.checked_memory_slice(in_ptr as usize, in_size as usize, |input| { + match hash_algo_type { + HashAlgorithm::Blake2b => cryptography::blake2b(input), + HashAlgorithm::Blake3 => cryptography::blake3(input), + HashAlgorithm::Sha256 => cryptography::sha256(input), + } + })?; + + let result = if digest.len() > out_size as usize { + Err(ApiError::BufferTooSmall) + } else { + Ok(()) + }; + + if result.is_err() { + return Ok(Some(RuntimeValue::I32(api_error::i32_from(result)))); + } + + if self.try_get_memory()?.set(out_ptr, &digest).is_err() { + return Ok(Some(RuntimeValue::I32( + u32::from(ApiError::HostBufferEmpty) as i32, + ))); + } + + Ok(Some(RuntimeValue::I32(0))) + } + + FunctionIndex::RecoverSecp256k1 => { + // args(0) = pointer to input bytes in memory + // args(1) = length of input bytes in memory + // args(2) = pointer to signature bytes in memory + // args(3) = length of signature bytes in memory + // args(4) = pointer to public key buffer in memory (size is fixed) + // args(5) = the recovery id + + let ( + data_ptr, + data_size, + signature_ptr, + signature_size, + public_key_ptr, + recovery_id, + ) = Args::parse(args)?; + + self.charge_host_function_call( + &host_function_costs.recover_secp256k1, + [ + data_ptr, + data_size, + signature_ptr, + signature_size, + public_key_ptr, + recovery_id, + ], + )?; + + if recovery_id >= 4 { + return Ok(Some(RuntimeValue::I32( + u32::from(ApiError::InvalidArgument) as i32, + ))); + } + + let data = self.bytes_from_mem(data_ptr, data_size as usize)?; + let signature: Signature = self.t_from_mem(signature_ptr, signature_size)?; + + let Ok(public_key) = + casper_types::crypto::recover_secp256k1(data, &signature, recovery_id as u8) + else { + return Ok(Some(RuntimeValue::I32( + u32::from(ApiError::InvalidArgument) as i32, + ))); + }; + + let Ok(key_bytes) = public_key.to_bytes() else { + return Ok(Some(RuntimeValue::I32( + u32::from(ApiError::OutOfMemory) as i32 + ))); + }; + + if self + .try_get_memory()? + .set(public_key_ptr, &key_bytes) + .is_err() + { + return Ok(Some(RuntimeValue::I32( + u32::from(ApiError::HostBufferEmpty) as i32, + ))); + } + + Ok(Some(RuntimeValue::I32(0))) + } + + FunctionIndex::VerifySignature => { + // args(0) = pointer to message bytes in memory + // args(1) = length of message bytes + // args(2) = pointer to signature bytes in memory + // args(3) = length of signature bytes + // args(4) = pointer to public key bytes in memory + // args(5) = length of public key bytes + let ( + message_ptr, + message_size, + signature_ptr, + signature_size, + public_key_ptr, + public_key_size, + ) = Args::parse(args)?; + + self.charge_host_function_call( + &host_function_costs.verify_signature, + [ + message_ptr, + message_size, + signature_ptr, + signature_size, + public_key_ptr, + public_key_size, + ], + )?; + + let message = self.bytes_from_mem(message_ptr, message_size as usize)?; + let signature: Signature = self.t_from_mem(signature_ptr, signature_size)?; + let public_key: PublicKey = self.t_from_mem(public_key_ptr, public_key_size)?; + + if casper_types::crypto::verify(message, &signature, &public_key).is_err() { + return Ok(Some(RuntimeValue::I32( + u32::from(ApiError::InvalidArgument) as i32, + ))); + } + + Ok(Some(RuntimeValue::I32(0))) + } + FunctionIndex::CallPackageVersion => { + // args(0) = pointer to contract_package_hash where contract is at in global state + // args(1) = size of contract_package_hash + // args(2) = pointer to major version in wasm memory + // args(3) = size of major version in wasm memory + // args(3) = pointer to contract version in wasm memory + // args(4) = size of contract version in wasm memory + // args(5) = pointer to method name in wasm memory + // args(6) = size of method name in wasm memory + // args(7) = pointer to function arguments in Wasm memory + // args(8) = size of arguments + // args(9) = pointer to result size (output) + let ( + contract_package_hash_ptr, + contract_package_hash_size, + major_version_ptr, + major_version_size, + contract_version_ptr, + contract_version_size, + entry_point_name_ptr, + entry_point_name_size, + args_ptr, + args_size, + result_size_ptr, + ) = Args::parse(args)?; + self.charge_host_function_call( + &host_function_costs.call_package_version, + [ + contract_package_hash_ptr, + contract_package_hash_size, + major_version_ptr, + major_version_size, + contract_version_ptr, + contract_version_size, + entry_point_name_ptr, + entry_point_name_size, + args_ptr, + args_size, + result_size_ptr, + ], + )?; + + let contract_package_hash: PackageHash = + self.t_from_mem(contract_package_hash_ptr, contract_package_hash_size)?; + let contract_version: Option = + self.t_from_mem(contract_version_ptr, contract_version_size)?; + let major_version: Option = + self.t_from_mem(major_version_ptr, major_version_size)?; + let entry_point_name: String = + self.t_from_mem(entry_point_name_ptr, entry_point_name_size)?; + let args_bytes: Vec = { + let args_size: u32 = args_size; + self.bytes_from_mem(args_ptr, args_size as usize)?.to_vec() + }; + + let ret = self.call_package_version_host_buffer( + contract_package_hash, + major_version, + contract_version, + entry_point_name, + &args_bytes, + result_size_ptr, + )?; + Ok(Some(RuntimeValue::I32(api_error::i32_from(ret)))) + } + } + } +} diff --git a/execution_engine/src/runtime/handle_payment_internal.rs b/execution_engine/src/runtime/handle_payment_internal.rs new file mode 100644 index 0000000000..86debe700a --- /dev/null +++ b/execution_engine/src/runtime/handle_payment_internal.rs @@ -0,0 +1,183 @@ +use casper_storage::global_state::{error::Error as GlobalStateError, state::StateReader}; +use std::collections::BTreeSet; + +use casper_types::{ + account::AccountHash, addressable_entity::NamedKeyAddr, system::handle_payment::Error, Account, + CLValue, Contract, FeeHandling, Key, Phase, RefundHandling, StoredValue, TransferredTo, URef, + U512, +}; + +use casper_storage::system::handle_payment::{ + mint_provider::MintProvider, runtime_provider::RuntimeProvider, + storage_provider::StorageProvider, HandlePayment, +}; + +use crate::{execution::ExecError, runtime::Runtime}; + +impl From for Option { + fn from(exec_error: ExecError) -> Self { + match exec_error { + // This is used to propagate [`ExecError::GasLimit`] to make sure + // [`HandlePayment`] contract running natively supports propagating gas limit + // errors without a panic. + ExecError::GasLimit => Some(Error::GasLimit), + // There are possibly other exec errors happening but such translation would be lossy. + _ => None, + } + } +} + +impl MintProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn transfer_purse_to_account( + &mut self, + source: URef, + target: AccountHash, + amount: U512, + ) -> Result { + match self.transfer_from_purse_to_account_hash(source, target, amount, None) { + Ok(Ok(transferred_to)) => Ok(transferred_to), + Ok(Err(_mint_error)) => Err(Error::Transfer), + Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), + } + } + + fn transfer_purse_to_purse( + &mut self, + source: URef, + target: URef, + amount: U512, + ) -> Result<(), Error> { + let contract_hash = match self.get_mint_hash() { + Ok(mint_hash) => mint_hash, + Err(exec_error) => { + return Err(>::from(exec_error).unwrap_or(Error::Transfer)); + } + }; + match self.mint_transfer(contract_hash, None, source, target, amount, None) { + Ok(Ok(_)) => Ok(()), + Ok(Err(_mint_error)) => Err(Error::Transfer), + Err(exec_error) => Err(>::from(exec_error).unwrap_or(Error::Transfer)), + } + } + + fn available_balance(&mut self, purse: URef) -> Result, Error> { + Runtime::available_balance(self, purse) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::GetBalance)) + } + + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { + let contract_hash = match self.get_mint_hash() { + Ok(mint_hash) => mint_hash, + Err(exec_error) => { + return Err(>::from(exec_error).unwrap_or(Error::Transfer)); + } + }; + if let Err(exec_error) = self.mint_reduce_total_supply(contract_hash, amount) { + Err(>::from(exec_error).unwrap_or(Error::ReduceTotalSupply)) + } else { + Ok(()) + } + } +} + +impl RuntimeProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn get_key(&mut self, name: &str) -> Option { + match self.context.named_keys_get(name).cloned() { + None => match self.context.get_context_key() { + Key::AddressableEntity(entity_addr) => { + let key = if let Ok(addr) = + NamedKeyAddr::new_from_string(entity_addr, name.to_string()) + { + Key::NamedKey(addr) + } else { + return None; + }; + if let Ok(Some(StoredValue::NamedKey(value))) = self.context.read_gs(&key) { + value.get_key().ok() + } else { + None + } + } + Key::Hash(_) => { + match self + .context + .read_gs_typed::(&self.context.get_context_key()) + { + Ok(contract) => contract.named_keys().get(name).copied(), + Err(_) => None, + } + } + Key::Account(_) => { + match self + .context + .read_gs_typed::(&self.context.get_context_key()) + { + Ok(account) => account.named_keys().get(name).copied(), + Err(_) => None, + } + } + _ => None, + }, + Some(key) => Some(key), + } + } + + fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error> { + self.context + .put_key(name.to_string(), key) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::PutKey)) + } + + fn remove_key(&mut self, name: &str) -> Result<(), Error> { + self.context + .remove_key(name) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::RemoveKey)) + } + + fn get_phase(&self) -> Phase { + self.context.phase() + } + + fn get_caller(&self) -> AccountHash { + self.context.get_initiator() + } + + fn refund_handling(&self) -> RefundHandling { + self.context.engine_config().refund_handling() + } + + fn fee_handling(&self) -> FeeHandling { + self.context.engine_config().fee_handling() + } + + fn administrative_accounts(&self) -> BTreeSet { + self.context + .engine_config() + .administrative_accounts() + .clone() + } +} + +impl StorageProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn write_balance(&mut self, purse_uref: URef, amount: U512) -> Result<(), Error> { + let cl_amount = CLValue::from_t(amount).map_err(|_| Error::Storage)?; + self.context + .metered_write_gs_unsafe(Key::Balance(purse_uref.addr()), cl_amount) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage))?; + Ok(()) + } +} + +impl HandlePayment for Runtime<'_, R> where + R: StateReader +{ +} diff --git a/execution_engine/src/runtime/host_function_flag.rs b/execution_engine/src/runtime/host_function_flag.rs new file mode 100644 index 0000000000..79c486177a --- /dev/null +++ b/execution_engine/src/runtime/host_function_flag.rs @@ -0,0 +1,97 @@ +use std::{cell::Cell, rc::Rc}; + +use tracing::error; + +/// A flag to indicate whether the current runtime call is made within the scope of a host function. +/// +/// The flag is backed by an `Rc>`, meaning that clones will all share state. +#[derive(Default, Clone)] +pub(super) struct HostFunctionFlag { + /// A counter which, if non-zero, indicates that the `HostFunctionFlag` is `true`. + counter: Rc>, +} + +impl HostFunctionFlag { + /// Returns `true` if this `HostFunctionFlag` has entered any number of host function scopes + /// without having exited them all. + pub(super) fn is_in_host_function_scope(&self) -> bool { + self.counter.get() != 0 + } + + /// Must be called when entering a host function scope. + /// + /// The returned `ScopedHostFunctionFlag` must be kept alive for the duration of the host + /// function call. While at least one such `ScopedHostFunctionFlag` exists, + /// `is_in_host_function_scope()` returns `true`. + #[must_use] + pub(super) fn enter_host_function_scope(&self) -> ScopedHostFunctionFlag { + let new_count = self.counter.get().checked_add(1).unwrap_or_else(|| { + error!("checked_add failure in host function flag counter"); + debug_assert!(false, "checked_add failure in host function flag counter"); + u64::MAX + }); + self.counter.set(new_count); + ScopedHostFunctionFlag { + counter: self.counter.clone(), + } + } +} + +pub(super) struct ScopedHostFunctionFlag { + counter: Rc>, +} + +impl Drop for ScopedHostFunctionFlag { + fn drop(&mut self) { + let new_count = self.counter.get().checked_sub(1).unwrap_or_else(|| { + error!("checked_sub failure in host function flag counter"); + debug_assert!(false, "checked_sub failure in host function flag counter"); + 0 + }); + self.counter.set(new_count); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_handle_multiple_scopes() { + let flag = HostFunctionFlag::default(); + assert!(!flag.is_in_host_function_scope()); + + { + let _outer_scope = flag.enter_host_function_scope(); + assert_eq!(flag.counter.get(), 1); + assert!(flag.is_in_host_function_scope()); + + { + let _inner_scope = flag.enter_host_function_scope(); + assert_eq!(flag.counter.get(), 2); + assert!(flag.is_in_host_function_scope()); + } + + assert_eq!(flag.counter.get(), 1); + assert!(flag.is_in_host_function_scope()); + + { + let cloned_flag = flag.clone(); + assert_eq!(cloned_flag.counter.get(), 1); + assert!(cloned_flag.is_in_host_function_scope()); + assert!(flag.is_in_host_function_scope()); + + let _inner_scope = cloned_flag.enter_host_function_scope(); + assert_eq!(cloned_flag.counter.get(), 2); + assert!(cloned_flag.is_in_host_function_scope()); + assert!(flag.is_in_host_function_scope()); + } + + assert_eq!(flag.counter.get(), 1); + assert!(flag.is_in_host_function_scope()); + } + + assert_eq!(flag.counter.get(), 0); + assert!(!flag.is_in_host_function_scope()); + } +} diff --git a/execution_engine/src/runtime/mint_internal.rs b/execution_engine/src/runtime/mint_internal.rs new file mode 100644 index 0000000000..375197fb06 --- /dev/null +++ b/execution_engine/src/runtime/mint_internal.rs @@ -0,0 +1,206 @@ +use tracing::error; + +use casper_storage::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + system::{ + error::ProviderError, + mint::{ + runtime_provider::RuntimeProvider, storage_provider::StorageProvider, + system_provider::SystemProvider, Mint, + }, + }, +}; +use casper_types::{ + account::AccountHash, + bytesrepr::{FromBytes, ToBytes}, + system::{mint::Error, Caller}, + CLTyped, CLValue, Key, Phase, RuntimeFootprint, StoredValue, SystemHashRegistry, URef, U512, +}; + +use super::Runtime; +use crate::execution::ExecError; + +impl From for Option { + fn from(exec_error: ExecError) -> Self { + match exec_error { + // This is used to propagate [`ExecError::GasLimit`] to make sure [`Mint`] + // contract running natively supports propagating gas limit errors without a panic. + ExecError::GasLimit => Some(Error::GasLimit), + ExecError::ForgedReference(_) => Some(Error::ForgedReference), + // There are possibly other exec errors happening but such translation would be lossy. + _ => None, + } + } +} + +impl RuntimeProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn get_caller(&self) -> AccountHash { + self.context.get_initiator() + } + + fn get_immediate_caller(&self) -> Option { + Runtime::<'_, R>::get_immediate_caller(self).cloned() + } + + fn is_called_from_standard_payment(&self) -> bool { + self.context.phase() == Phase::Payment && self.module.is_none() + } + + fn get_system_entity_registry(&self) -> Result { + self.context.system_entity_registry().map_err(|err| { + error!(%err, "unable to obtain system entity registry during transfer"); + ProviderError::SystemEntityRegistry + }) + } + + fn runtime_footprint_by_account_hash( + &mut self, + account_hash: AccountHash, + ) -> Result, ProviderError> { + self.context + .runtime_footprint_by_account_hash(account_hash) + .map_err(|err| { + error!(%err, "error getting runtime footprint by account hash"); + ProviderError::AccountHash(account_hash) + }) + } + + fn get_phase(&self) -> Phase { + self.context.phase() + } + + fn get_key(&self, name: &str) -> Option { + self.context.named_keys_get(name).cloned() + } + + fn get_approved_spending_limit(&self) -> U512 { + self.context.remaining_spending_limit() + } + + fn sub_approved_spending_limit(&mut self, transferred: U512) { + // We're ignoring the result here since we always check first + // if there is still enough spending limit left. + self.context.subtract_amount_spent(transferred); + } + + fn get_main_purse(&self) -> Option { + self.context.runtime_footprint().borrow().main_purse() + } + + fn is_administrator(&self, account_hash: &AccountHash) -> bool { + self.context.engine_config().is_administrator(account_hash) + } + + fn allow_unrestricted_transfers(&self) -> bool { + self.context.engine_config().allow_unrestricted_transfers() + } + + /// Validate URef against context access rights. + fn is_valid_uref(&self, uref: &URef) -> bool { + self.context.access_rights().has_access_rights_to_uref(uref) + } +} + +impl StorageProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn new_uref(&mut self, init: T) -> Result { + let cl_value: CLValue = CLValue::from_t(init).map_err(|_| Error::CLValue)?; + self.context + .new_uref(StoredValue::CLValue(cl_value)) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::NewURef)) + } + + fn read(&mut self, uref: URef) -> Result, Error> { + let maybe_value = self + .context + .read_gs(&Key::URef(uref)) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage))?; + match maybe_value { + Some(StoredValue::CLValue(value)) => { + let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?; + Ok(Some(value)) + } + Some(_cl_value) => Err(Error::CLValue), + None => Ok(None), + } + } + + fn write_amount(&mut self, uref: URef, amount: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(amount).map_err(|_| Error::CLValue)?; + self.context + .metered_write_gs(Key::URef(uref), StoredValue::CLValue(cl_value)) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) + } + + fn add(&mut self, uref: URef, value: T) -> Result<(), Error> { + let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + self.context + .metered_add_gs(uref, cl_value) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) + } + + fn total_balance(&mut self, purse: URef) -> Result { + Runtime::total_balance(self, purse) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) + } + + fn available_balance(&mut self, purse: URef) -> Result, Error> { + Runtime::available_balance(self, purse) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) + } + + fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(balance).map_err(|_| Error::CLValue)?; + self.context + .metered_write_gs_unsafe(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value)) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) + } + + fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + self.context + .metered_add_gs_unsafe(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value)) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage)) + } +} + +impl SystemProvider for Runtime<'_, R> +where + R: StateReader, +{ + fn record_transfer( + &mut self, + maybe_to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result<(), Error> { + let result = Runtime::record_transfer(self, maybe_to, source, target, amount, id); + result.map_err(|exec_error| { + >::from(exec_error).unwrap_or(Error::RecordTransferFailure) + }) + } +} + +impl Mint for Runtime<'_, R> +where + R: StateReader, +{ + fn purse_exists(&mut self, uref: URef) -> Result { + let maybe_value = self + .context + .read_gs(&Key::Balance(uref.addr())) + .map_err(|exec_error| >::from(exec_error).unwrap_or(Error::Storage))?; + match maybe_value { + Some(StoredValue::CLValue(value)) => Ok(*value.cl_type() == U512::cl_type()), + Some(_non_cl_value) => Err(Error::CLValue), + None => Ok(false), + } + } +} diff --git a/execution_engine/src/runtime/mod.rs b/execution_engine/src/runtime/mod.rs new file mode 100644 index 0000000000..f75c66b8a0 --- /dev/null +++ b/execution_engine/src/runtime/mod.rs @@ -0,0 +1,4662 @@ +//! This module contains executor state of the WASM code. +mod args; +mod auction_internal; +pub mod cryptography; +mod externals; +mod handle_payment_internal; +mod host_function_flag; +mod mint_internal; +pub mod stack; +mod utils; +pub(crate) mod wasm_prep; + +use std::{ + cmp, + collections::{BTreeMap, BTreeSet}, + convert::{TryFrom, TryInto}, + iter::FromIterator, +}; + +use casper_wasm::elements::Module; +use casper_wasmi::{MemoryRef, Trap, TrapCode}; +use tracing::{debug, error, warn}; + +#[cfg(feature = "test-support")] +use casper_wasmi::RuntimeValue; +use itertools::Itertools; +use num_rational::Ratio; + +use casper_storage::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + system::{auction::Auction, handle_payment::HandlePayment, mint::Mint}, + tracking_copy::TrackingCopyExt, +}; +use casper_types::{ + account::{ + Account, AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, + UpdateKeyFailure, + }, + addressable_entity::{ + self, ActionThresholds, ActionType, AddressableEntity, AddressableEntityHash, + AssociatedKeys, ContractRuntimeTag, EntityEntryPoint, EntryPointAccess, EntryPointType, + EntryPoints, MessageTopicError, MessageTopics, NamedKeyAddr, NamedKeyValue, Parameter, + Weight, DEFAULT_ENTRY_POINT_NAME, + }, + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + contract_messages::{ + Message, MessageAddr, MessagePayload, MessageTopicOperation, MessageTopicSummary, + }, + contracts::{ + ContractHash, ContractPackage, ContractPackageHash, ContractPackageStatus, + ContractVersions, DisabledVersions, NamedKeys, ProtocolVersionMajor, + }, + system::{ + self, + auction::{self, DelegatorKind, EraInfo}, + handle_payment, mint, CallStackElement, Caller, CallerInfo, SystemEntityType, AUCTION, + HANDLE_PAYMENT, MINT, STANDARD_PAYMENT, + }, + AccessRights, ApiError, BlockGlobalAddr, BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, + ByteCodeKind, CLTyped, CLValue, ContextAccessRights, Contract, ContractWasm, EntityAddr, + EntityKind, EntityVersion, EntityVersionKey, EntityVersions, Gas, GrantedAccess, Group, Groups, + HashAddr, HostFunction, HostFunctionCost, InitiatorAddr, Key, NamedArg, Package, PackageHash, + PackageStatus, Phase, PublicKey, RuntimeArgs, RuntimeFootprint, StoredValue, Transfer, + TransferResult, TransferV2, TransferredTo, URef, DICTIONARY_ITEM_KEY_MAX_LENGTH, U512, +}; + +use crate::{ + execution::ExecError, runtime::host_function_flag::HostFunctionFlag, + runtime_context::RuntimeContext, +}; +pub use stack::{RuntimeStack, RuntimeStackFrame, RuntimeStackOverflow}; +pub use wasm_prep::{ + cycles_for_instruction, preprocess, PreprocessingError, WasmValidationError, + DEFAULT_BR_TABLE_MAX_SIZE, DEFAULT_MAX_GLOBALS, DEFAULT_MAX_PARAMETER_COUNT, + DEFAULT_MAX_TABLE_SIZE, +}; + +#[derive(Debug)] +enum CallContractIdentifier { + Contract { + contract_hash: HashAddr, + }, + ContractPackage { + contract_package_hash: HashAddr, + version: Option, + protocol_version_major: Option, + }, +} + +#[repr(u8)] +enum CallerInformation { + Initiator = 0, + Immediate = 1, + FullCallChain = 2, +} + +impl TryFrom for CallerInformation { + type Error = ApiError; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(CallerInformation::Initiator), + 1 => Ok(CallerInformation::Immediate), + 2 => Ok(CallerInformation::FullCallChain), + _ => Err(ApiError::InvalidCallerInfoRequest), + } + } +} + +/// Represents the runtime properties of a WASM execution. +pub struct Runtime<'a, R> { + context: RuntimeContext<'a, R>, + memory: Option, + module: Option, + host_buffer: Option, + stack: Option, + host_function_flag: HostFunctionFlag, +} + +impl<'a, R> Runtime<'a, R> +where + R: StateReader, +{ + /// Creates a new runtime instance. + pub(crate) fn new(context: RuntimeContext<'a, R>) -> Self { + Runtime { + context, + memory: None, + module: None, + host_buffer: None, + stack: None, + host_function_flag: HostFunctionFlag::default(), + } + } + + /// Creates a new runtime instance by cloning the config, and host function flag from `self`. + fn new_invocation_runtime( + &self, + context: RuntimeContext<'a, R>, + module: Module, + memory: MemoryRef, + stack: RuntimeStack, + ) -> Self { + Self::check_preconditions(&stack); + Runtime { + context, + memory: Some(memory), + module: Some(module), + host_buffer: None, + stack: Some(stack), + host_function_flag: self.host_function_flag.clone(), + } + } + + /// Creates a new runtime instance with a stack from `self`. + pub(crate) fn new_with_stack( + &self, + context: RuntimeContext<'a, R>, + stack: RuntimeStack, + ) -> Self { + Self::check_preconditions(&stack); + Runtime { + context, + memory: None, + module: None, + host_buffer: None, + stack: Some(stack), + host_function_flag: self.host_function_flag.clone(), + } + } + + /// Preconditions that would render the system inconsistent if violated. Those are strictly + /// programming errors. + fn check_preconditions(stack: &RuntimeStack) { + if stack.is_empty() { + error!("Call stack should not be empty while creating a new Runtime instance"); + debug_assert!(false); + } + + if stack.first_frame().unwrap().contract_hash().is_some() { + error!("First element of the call stack should always represent a Session call"); + debug_assert!(false); + } + } + + /// Returns the context. + pub(crate) fn context(&self) -> &RuntimeContext<'a, R> { + &self.context + } + + fn gas(&mut self, amount: Gas) -> Result<(), ExecError> { + self.context.charge_gas(amount) + } + + /// Returns current gas counter. + fn gas_counter(&self) -> Gas { + self.context.gas_counter() + } + + /// Sets new gas counter value. + fn set_gas_counter(&mut self, new_gas_counter: Gas) { + self.context.set_gas_counter(new_gas_counter); + } + + /// Charge for a system contract call. + /// + /// This method does not charge for system contract calls if the immediate caller is a system + /// contract or if we're currently within the scope of a host function call. This avoids + /// misleading gas charges if one system contract calls other system contract (e.g. auction + /// contract calls into mint to create new purses). + pub(crate) fn charge_system_contract_call(&mut self, amount: T) -> Result<(), ExecError> + where + T: Into, + { + if self.is_system_immediate_caller()? || self.host_function_flag.is_in_host_function_scope() + { + return Ok(()); + } + + self.context.charge_system_contract_call(amount) + } + + fn checked_memory_slice( + &self, + offset: usize, + size: usize, + func: impl FnOnce(&[u8]) -> Ret, + ) -> Result { + // This is mostly copied from a private function `MemoryInstance::checked_memory_region` + // that calls a user defined function with a validated slice of memory. This allows + // usage patterns that does not involve copying data onto heap first i.e. deserialize + // values without copying data first, etc. + // NOTE: Depending on the VM backend used in future, this may change, as not all VMs may + // support direct memory access. + self.try_get_memory()? + .with_direct_access(|buffer| { + let end = offset.checked_add(size).ok_or_else(|| { + casper_wasmi::Error::Memory(format!( + "trying to access memory block of size {} from offset {}", + size, offset + )) + })?; + + if end > buffer.len() { + return Err(casper_wasmi::Error::Memory(format!( + "trying to access region [{}..{}] in memory [0..{}]", + offset, + end, + buffer.len(), + ))); + } + + Ok(func(&buffer[offset..end])) + }) + .map_err(Into::into) + } + + /// Returns bytes from the WASM memory instance. + #[inline] + fn bytes_from_mem(&self, ptr: u32, size: usize) -> Result, ExecError> { + self.checked_memory_slice(ptr as usize, size, |data| data.to_vec()) + } + + /// Returns a deserialized type from the WASM memory instance. + #[inline] + fn t_from_mem(&self, ptr: u32, size: u32) -> Result { + let result = self.checked_memory_slice(ptr as usize, size as usize, |data| { + bytesrepr::deserialize_from_slice(data) + })?; + Ok(result?) + } + + /// Reads key (defined as `key_ptr` and `key_size` tuple) from Wasm memory. + #[inline] + fn key_from_mem(&mut self, key_ptr: u32, key_size: u32) -> Result { + self.t_from_mem(key_ptr, key_size) + } + + /// Reads `CLValue` (defined as `cl_value_ptr` and `cl_value_size` tuple) from Wasm memory. + #[inline] + fn cl_value_from_mem( + &mut self, + cl_value_ptr: u32, + cl_value_size: u32, + ) -> Result { + self.t_from_mem(cl_value_ptr, cl_value_size) + } + + /// Returns a deserialized string from the WASM memory instance. + #[inline] + fn string_from_mem(&self, ptr: u32, size: u32) -> Result { + self.t_from_mem(ptr, size).map_err(Trap::from) + } + + fn get_module_from_entry_points( + &mut self, + entry_points: &EntryPoints, + ) -> Result, ExecError> { + let module = self.try_get_module()?.clone(); + let entry_point_names: Vec<&str> = entry_points.keys().map(|s| s.as_str()).collect(); + let module_bytes = wasm_prep::get_module_from_entry_points(entry_point_names, module)?; + Ok(module_bytes) + } + + #[allow(clippy::wrong_self_convention)] + fn is_valid_uref(&self, uref_ptr: u32, uref_size: u32) -> Result { + let uref: URef = self.t_from_mem(uref_ptr, uref_size)?; + Ok(self.context.validate_uref(&uref).is_ok()) + } + + /// Load the uref known by the given name into the Wasm memory + fn load_key( + &mut self, + name_ptr: u32, + name_size: u32, + output_ptr: u32, + output_size: usize, + bytes_written_ptr: u32, + ) -> Result, Trap> { + let name = self.string_from_mem(name_ptr, name_size)?; + + // Get a key and serialize it + let key = match self.context.named_keys_get(&name) { + Some(key) => key, + None => { + return Ok(Err(ApiError::MissingKey)); + } + }; + + let key_bytes = match key.to_bytes() { + Ok(bytes) => bytes, + Err(error) => return Ok(Err(error.into())), + }; + + // `output_size` has to be greater or equal to the actual length of serialized Key bytes + if output_size < key_bytes.len() { + return Ok(Err(ApiError::BufferTooSmall)); + } + + // Set serialized Key bytes into the output buffer + if let Err(error) = self.try_get_memory()?.set(output_ptr, &key_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + // SAFETY: For all practical purposes following conversion is assumed to be safe + let bytes_size: u32 = key_bytes + .len() + .try_into() + .expect("Keys should not serialize to many bytes"); + let size_bytes = bytes_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self.try_get_memory()?.set(bytes_written_ptr, &size_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + fn has_key(&mut self, name_ptr: u32, name_size: u32) -> Result { + let name = self.string_from_mem(name_ptr, name_size)?; + if self.context.named_keys_contains_key(&name) { + Ok(0) + } else { + Ok(1) + } + } + + fn put_key( + &mut self, + name_ptr: u32, + name_size: u32, + key_ptr: u32, + key_size: u32, + ) -> Result<(), Trap> { + let name = self.string_from_mem(name_ptr, name_size)?; + let key = self.key_from_mem(key_ptr, key_size)?; + + if let Some(payment_purse) = self.context.maybe_payment_purse() { + if Key::URef(payment_purse).normalize() == key.normalize() { + warn!("attempt to put_key payment purse"); + return Err(Into::into(ExecError::Revert(ApiError::HandlePayment( + handle_payment::Error::AttemptToPersistPaymentPurse as u8, + )))); + } + } + self.context.put_key(name, key).map_err(Into::into) + } + + fn remove_key(&mut self, name_ptr: u32, name_size: u32) -> Result<(), Trap> { + let name = self.string_from_mem(name_ptr, name_size)?; + self.context.remove_key(&name)?; + Ok(()) + } + + /// Writes runtime context's account main purse to dest_ptr in the Wasm memory. + fn get_main_purse(&mut self, dest_ptr: u32) -> Result<(), Trap> { + let purse = self.context.get_main_purse()?; + let purse_bytes = purse.into_bytes().map_err(ExecError::BytesRepr)?; + self.try_get_memory()? + .set(dest_ptr, &purse_bytes) + .map_err(|e| ExecError::Interpreter(e.into()).into()) + } + + /// Writes caller (deploy) account public key to output_size_ptr in the Wasm + /// memory. + fn get_caller(&mut self, output_size_ptr: u32) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + let value = CLValue::from_t(self.context.get_initiator()).map_err(ExecError::CLValue)?; + let value_size = value.inner_bytes().len(); + + // Save serialized public key into host buffer + if let Err(error) = self.write_host_buffer(value) { + return Ok(Err(error)); + } + + // Write output + let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(output_size_ptr, &output_size_bytes) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + Ok(Ok(())) + } + + /// Gets the immediate caller of the current execution + fn get_immediate_caller(&self) -> Option<&RuntimeStackFrame> { + self.stack.as_ref().and_then(|stack| stack.previous_frame()) + } + + /// Checks if immediate caller is of session type of the same account as the provided account + /// hash. + fn is_allowed_session_caller(&self, provided_account_hash: &AccountHash) -> bool { + if self.context.get_initiator() == PublicKey::System.to_account_hash() { + return true; + } + + if let Some(Caller::Initiator { account_hash }) = self.get_immediate_caller() { + return account_hash == provided_account_hash; + } + false + } + + /// Writes runtime context's phase to dest_ptr in the Wasm memory. + fn get_phase(&mut self, dest_ptr: u32) -> Result<(), Trap> { + let phase = self.context.phase(); + let bytes = phase.into_bytes().map_err(ExecError::BytesRepr)?; + self.try_get_memory()? + .set(dest_ptr, &bytes) + .map_err(|e| ExecError::Interpreter(e.into()).into()) + } + + /// Writes requested field from runtime context's block info to dest_ptr in the Wasm memory. + fn get_block_info(&self, field_idx: u8, dest_ptr: u32) -> Result<(), Trap> { + if field_idx == 0 { + // original functionality + return self.get_blocktime(dest_ptr); + } + let block_info = self.context.get_block_info(); + + let mut data: Vec = vec![]; + if field_idx == 1 { + data = block_info + .block_height() + .into_bytes() + .map_err(ExecError::BytesRepr)?; + } + if field_idx == 2 { + data = block_info + .parent_block_hash() + .into_bytes() + .map_err(ExecError::BytesRepr)?; + } + if field_idx == 3 { + data = block_info + .state_hash() + .into_bytes() + .map_err(ExecError::BytesRepr)?; + } + if field_idx == 4 { + data = self + .context + .protocol_version() + .into_bytes() + .map_err(ExecError::BytesRepr)?; + } + if field_idx == 5 { + data = self + .context + .engine_config() + .enable_entity + .into_bytes() + .map_err(ExecError::BytesRepr)?; + } + if data.is_empty() { + Err(ExecError::InvalidImputedOperation.into()) + } else { + Ok(self + .try_get_memory()? + .set(dest_ptr, &data) + .map_err(|e| ExecError::Interpreter(e.into()))?) + } + } + + /// Writes current blocktime to dest_ptr in Wasm memory. + fn get_blocktime(&self, dest_ptr: u32) -> Result<(), Trap> { + let block_info = self.context.get_block_info(); + let blocktime = block_info + .block_time() + .into_bytes() + .map_err(ExecError::BytesRepr)?; + self.try_get_memory()? + .set(dest_ptr, &blocktime) + .map_err(|e| ExecError::Interpreter(e.into()).into()) + } + + /// Load the uref known by the given name into the Wasm memory + fn load_call_stack( + &mut self, + // (Output) Pointer to number of elements in the call stack. + call_stack_len_ptr: u32, + // (Output) Pointer to size in bytes of the serialized call stack. + result_size_ptr: u32, + ) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + let call_stack: Vec = match self.try_get_stack() { + Ok(stack) => { + let caller = stack.call_stack_elements(); + caller.iter().map_into().collect_vec() + } + Err(_error) => return Ok(Err(ApiError::Unhandled)), + }; + let call_stack_len: u32 = match call_stack.len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + let call_stack_len_bytes = call_stack_len.to_le_bytes(); + + if let Err(error) = self + .try_get_memory()? + .set(call_stack_len_ptr, &call_stack_len_bytes) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + + if call_stack_len == 0 { + return Ok(Ok(())); + } + + let call_stack_cl_value = CLValue::from_t(call_stack).map_err(ExecError::CLValue)?; + + let call_stack_cl_value_bytes_len: u32 = + match call_stack_cl_value.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + + if let Err(error) = self.write_host_buffer(call_stack_cl_value) { + return Ok(Err(error)); + } + + let call_stack_cl_value_bytes_len_bytes = call_stack_cl_value_bytes_len.to_le_bytes(); + + if let Err(error) = self + .try_get_memory()? + .set(result_size_ptr, &call_stack_cl_value_bytes_len_bytes) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + /// Returns information about the call stack based on a given action. + fn load_caller_information( + &mut self, + information: u8, + // (Output) Pointer to number of elements in the call stack. + call_stack_len_ptr: u32, + // (Output) Pointer to size in bytes of the serialized call stack. + result_size_ptr: u32, + ) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + + let caller_info = match CallerInformation::try_from(information) { + Ok(info) => info, + Err(error) => return Ok(Err(error)), + }; + + let caller = match caller_info { + CallerInformation::Initiator => { + let initiator_account_hash = self.context.get_initiator(); + let caller = Caller::initiator(initiator_account_hash); + match CallerInfo::try_from(caller) { + Ok(caller_info) => { + vec![caller_info] + } + Err(_) => return Ok(Err(ApiError::CLTypeMismatch)), + } + } + CallerInformation::Immediate => match self.get_immediate_caller() { + Some(frame) => match CallerInfo::try_from(*frame) { + Ok(immediate_info) => { + vec![immediate_info] + } + Err(_) => return Ok(Err(ApiError::CLTypeMismatch)), + }, + None => return Ok(Err(ApiError::Unhandled)), + }, + CallerInformation::FullCallChain => match self.try_get_stack() { + Ok(call_stack) => { + let call_stack = call_stack.call_stack_elements().clone(); + + let mut ret = vec![]; + for caller in call_stack { + match CallerInfo::try_from(caller) { + Ok(info) => ret.push(info), + Err(_) => return Ok(Err(ApiError::CLTypeMismatch)), + } + } + ret + } + Err(_) => return Ok(Err(ApiError::Unhandled)), + }, + }; + + let call_stack_len: u32 = match caller.len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + let call_stack_len_bytes = call_stack_len.to_le_bytes(); + + if let Err(error) = self + .try_get_memory()? + .set(call_stack_len_ptr, &call_stack_len_bytes) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + + if call_stack_len == 0 { + return Ok(Ok(())); + } + + let call_stack_cl_value = CLValue::from_t(caller).map_err(ExecError::CLValue)?; + + let call_stack_cl_value_bytes_len: u32 = + match call_stack_cl_value.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + + if let Err(error) = self.write_host_buffer(call_stack_cl_value) { + return Ok(Err(error)); + } + + let call_stack_cl_value_bytes_len_bytes = call_stack_cl_value_bytes_len.to_le_bytes(); + + if let Err(error) = self + .try_get_memory()? + .set(result_size_ptr, &call_stack_cl_value_bytes_len_bytes) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + /// Return some bytes from the memory and terminate the current `sub_call`. Note that the return + /// type is `Trap`, indicating that this function will always kill the current Wasm instance. + fn ret(&mut self, value_ptr: u32, value_size: usize) -> Trap { + self.host_buffer = None; + + let mem_get = + self.checked_memory_slice(value_ptr as usize, value_size, |data| data.to_vec()); + + match mem_get { + Ok(buf) => { + // Set the result field in the runtime and return the proper element of the `Error` + // enum indicating that the reason for exiting the module was a call to ret. + self.host_buffer = bytesrepr::deserialize_from_slice(buf).ok(); + + let urefs = match &self.host_buffer { + Some(buf) => utils::extract_urefs(buf), + None => Ok(vec![]), + }; + match urefs { + Ok(urefs) => { + for uref in &urefs { + if let Err(error) = self.context.validate_uref(uref) { + return Trap::from(error); + } + } + ExecError::Ret(urefs).into() + } + Err(e) => e.into(), + } + } + Err(e) => e.into(), + } + } + + /// Checks if a [`HashAddr`] corresponds to a system contract. + fn is_system_contract(&self, hash_addr: HashAddr) -> Result { + self.context.is_system_addressable_entity(&hash_addr) + } + + fn get_named_argument( + args: &RuntimeArgs, + name: &str, + ) -> Result { + let arg: CLValue = args + .get(name) + .cloned() + .ok_or(ExecError::Revert(ApiError::MissingArgument))?; + arg.into_t() + .map_err(|_| ExecError::Revert(ApiError::InvalidArgument)) + } + + fn try_get_named_argument( + args: &RuntimeArgs, + name: &str, + ) -> Result, ExecError> { + match args.get(name) { + Some(arg) => { + let arg = arg + .clone() + .into_t() + .map_err(|_| ExecError::Revert(ApiError::InvalidArgument))?; + Ok(Some(arg)) + } + None => Ok(None), + } + } + + fn reverter>(error: T) -> ExecError { + let api_error: ApiError = error.into(); + // NOTE: This is special casing needed to keep the native system contracts propagate + // GasLimit properly to the user. Once support for wasm system contract will be dropped this + // won't be necessary anymore. + match api_error { + ApiError::Mint(mint_error) if mint_error == mint::Error::GasLimit as u8 => { + ExecError::GasLimit + } + ApiError::AuctionError(auction_error) + if auction_error == auction::Error::GasLimit as u8 => + { + ExecError::GasLimit + } + ApiError::HandlePayment(handle_payment_error) + if handle_payment_error == handle_payment::Error::GasLimit as u8 => + { + ExecError::GasLimit + } + api_error => ExecError::Revert(api_error), + } + } + + /// Calls host mint contract. + fn call_host_mint( + &mut self, + entry_point_name: &str, + runtime_args: &RuntimeArgs, + access_rights: ContextAccessRights, + stack: RuntimeStack, + ) -> Result { + let gas_counter = self.gas_counter(); + + let mint_hash = self.context.get_system_contract(MINT)?; + let mint_addr = EntityAddr::new_system(mint_hash.value()); + let mint_key = if self.context.engine_config().enable_entity { + Key::AddressableEntity(EntityAddr::System(mint_hash.value())) + } else { + Key::Hash(mint_hash.value()) + }; + + let mint_named_keys = self + .context + .state() + .borrow_mut() + .get_named_keys(mint_addr)?; + + let mut named_keys = mint_named_keys; + + let runtime_context = self.context.new_from_self( + mint_key, + EntryPointType::Called, + &mut named_keys, + access_rights, + runtime_args.to_owned(), + ); + + let mut mint_runtime = self.new_with_stack(runtime_context, stack); + + let engine_config = self.context.engine_config(); + let system_config = engine_config.system_config(); + let mint_costs = system_config.mint_costs(); + + let result = match entry_point_name { + // Type: `fn mint(amount: U512) -> Result` + mint::METHOD_MINT => (|| { + mint_runtime.charge_system_contract_call(mint_costs.mint)?; + + let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?; + let result: Result = mint_runtime.mint(amount); + if let Err(mint::Error::GasLimit) = result { + return Err(ExecError::GasLimit); + } + CLValue::from_t(result).map_err(Self::reverter) + })(), + mint::METHOD_REDUCE_TOTAL_SUPPLY => (|| { + mint_runtime.charge_system_contract_call(mint_costs.reduce_total_supply)?; + + let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?; + let result: Result<(), mint::Error> = mint_runtime.reduce_total_supply(amount); + CLValue::from_t(result).map_err(Self::reverter) + })(), + mint::METHOD_BURN => (|| { + mint_runtime.charge_system_contract_call(mint_costs.burn)?; + + let purse: URef = Self::get_named_argument(runtime_args, mint::ARG_PURSE)?; + let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?; + let result: Result<(), mint::Error> = mint_runtime.burn(purse, amount); + CLValue::from_t(result).map_err(Self::reverter) + })(), + // Type: `fn create() -> URef` + mint::METHOD_CREATE => (|| { + mint_runtime.charge_system_contract_call(mint_costs.create)?; + + let uref = mint_runtime.mint(U512::zero()).map_err(Self::reverter)?; + CLValue::from_t(uref).map_err(Self::reverter) + })(), + // Type: `fn balance(purse: URef) -> Option` + mint::METHOD_BALANCE => (|| { + mint_runtime.charge_system_contract_call(mint_costs.balance)?; + + let uref: URef = Self::get_named_argument(runtime_args, mint::ARG_PURSE)?; + + let maybe_balance: Option = + mint_runtime.balance(uref).map_err(Self::reverter)?; + CLValue::from_t(maybe_balance).map_err(Self::reverter) + })(), + // Type: `fn transfer(maybe_to: Option, source: URef, target: URef, amount: + // U512, id: Option) -> Result<(), ExecError>` + mint::METHOD_TRANSFER => (|| { + mint_runtime.charge_system_contract_call(mint_costs.transfer)?; + + let maybe_to: Option = + Self::get_named_argument(runtime_args, mint::ARG_TO)?; + let source: URef = Self::get_named_argument(runtime_args, mint::ARG_SOURCE)?; + let target: URef = Self::get_named_argument(runtime_args, mint::ARG_TARGET)?; + let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?; + let id: Option = Self::get_named_argument(runtime_args, mint::ARG_ID)?; + let result: Result<(), mint::Error> = + mint_runtime.transfer(maybe_to, source, target, amount, id); + + CLValue::from_t(result).map_err(Self::reverter) + })(), + // Type: `fn read_base_round_reward() -> Result` + mint::METHOD_READ_BASE_ROUND_REWARD => (|| { + mint_runtime.charge_system_contract_call(mint_costs.read_base_round_reward)?; + + let result: U512 = mint_runtime + .read_base_round_reward() + .map_err(Self::reverter)?; + CLValue::from_t(result).map_err(Self::reverter) + })(), + mint::METHOD_MINT_INTO_EXISTING_PURSE => (|| { + mint_runtime.charge_system_contract_call(mint_costs.mint_into_existing_purse)?; + + let amount: U512 = Self::get_named_argument(runtime_args, mint::ARG_AMOUNT)?; + let existing_purse: URef = Self::get_named_argument(runtime_args, mint::ARG_PURSE)?; + + let result: Result<(), mint::Error> = + mint_runtime.mint_into_existing_purse(existing_purse, amount); + CLValue::from_t(result).map_err(Self::reverter) + })(), + _ => { + // Code should never reach this point as existence of the entrypoint is validated + // before reaching this point. + Ok(CLValue::unit()) + } + }; + + // Charge just for the amount that particular entry point cost - using gas cost from the + // isolated runtime might have a recursive costs whenever system contract calls other system + // contract. + self.gas( + mint_runtime + .gas_counter() + .checked_sub(gas_counter) + .unwrap_or(gas_counter), + )?; + + // Result still contains a result, but the entrypoints logic does not exit early on errors. + let ret = result?; + + // Update outer spending approved limit. + self.context + .set_remaining_spending_limit(mint_runtime.context.remaining_spending_limit()); + + let urefs = utils::extract_urefs(&ret)?; + self.context.access_rights_extend(&urefs); + { + let transfers = self.context.transfers_mut(); + mint_runtime.context.transfers().clone_into(transfers); + } + Ok(ret) + } + + /// Calls host `handle_payment` contract. + fn call_host_handle_payment( + &mut self, + entry_point_name: &str, + runtime_args: &RuntimeArgs, + access_rights: ContextAccessRights, + stack: RuntimeStack, + ) -> Result { + let gas_counter = self.gas_counter(); + + let handle_payment_hash = self.context.get_system_contract(HANDLE_PAYMENT)?; + let handle_payment_key = if self.context.engine_config().enable_entity { + Key::AddressableEntity(EntityAddr::System(handle_payment_hash.value())) + } else { + Key::Hash(handle_payment_hash.value()) + }; + + let handle_payment_named_keys = self + .context + .state() + .borrow_mut() + .get_named_keys(EntityAddr::System(handle_payment_hash.value()))?; + + let mut named_keys = handle_payment_named_keys; + + let runtime_context = self.context.new_from_self( + handle_payment_key, + EntryPointType::Called, + &mut named_keys, + access_rights, + runtime_args.to_owned(), + ); + + let mut runtime = self.new_with_stack(runtime_context, stack); + + let engine_config = self.context.engine_config(); + let system_config = engine_config.system_config(); + let handle_payment_costs = system_config.handle_payment_costs(); + + let result = match entry_point_name { + handle_payment::METHOD_GET_PAYMENT_PURSE => { + runtime.charge_system_contract_call(handle_payment_costs.get_payment_purse)?; + match self.context.maybe_payment_purse() { + Some(payment_purse) => CLValue::from_t(payment_purse).map_err(Self::reverter), + None => { + let payment_purse = runtime.get_payment_purse().map_err(Self::reverter)?; + self.context.set_payment_purse(payment_purse); + CLValue::from_t(payment_purse).map_err(Self::reverter) + } + } + } + handle_payment::METHOD_SET_REFUND_PURSE => (|| { + runtime.charge_system_contract_call(handle_payment_costs.set_refund_purse)?; + + let purse: URef = + Self::get_named_argument(runtime_args, handle_payment::ARG_PURSE)?; + runtime.set_refund_purse(purse).map_err(Self::reverter)?; + CLValue::from_t(()).map_err(Self::reverter) + })(), + handle_payment::METHOD_GET_REFUND_PURSE => (|| { + runtime.charge_system_contract_call(handle_payment_costs.get_refund_purse)?; + + let maybe_purse = runtime.get_refund_purse().map_err(Self::reverter)?; + CLValue::from_t(maybe_purse).map_err(Self::reverter) + })(), + _ => { + // Code should never reach here as existence of the entrypoint is validated before + // reaching this point. + Ok(CLValue::unit()) + } + }; + + self.gas( + runtime + .gas_counter() + .checked_sub(gas_counter) + .unwrap_or(gas_counter), + )?; + + let ret = result?; + + let urefs = utils::extract_urefs(&ret)?; + self.context.access_rights_extend(&urefs); + { + let transfers = self.context.transfers_mut(); + runtime.context.transfers().clone_into(transfers); + } + Ok(ret) + } + + /// Calls host auction contract. + fn call_host_auction( + &mut self, + entry_point_name: &str, + runtime_args: &RuntimeArgs, + access_rights: ContextAccessRights, + stack: RuntimeStack, + ) -> Result { + let gas_counter = self.gas_counter(); + + let auction_hash = self.context.get_system_contract(AUCTION)?; + let auction_key = if self.context.engine_config().enable_entity { + Key::AddressableEntity(EntityAddr::System(auction_hash.value())) + } else { + Key::Hash(auction_hash.value()) + }; + + let auction_named_keys = self + .context + .state() + .borrow_mut() + .get_named_keys(EntityAddr::System(auction_hash.value()))?; + + let mut named_keys = auction_named_keys; + + let runtime_context = self.context.new_from_self( + auction_key, + EntryPointType::Called, + &mut named_keys, + access_rights, + runtime_args.to_owned(), + ); + + let mut runtime = self.new_with_stack(runtime_context, stack); + + let engine_config = self.context.engine_config(); + let system_config = engine_config.system_config(); + let auction_costs = system_config.auction_costs(); + + let result = match entry_point_name { + auction::METHOD_GET_ERA_VALIDATORS => (|| { + runtime.charge_system_contract_call::(auction_costs.get_era_validators)?; + + let result = runtime.get_era_validators().map_err(Self::reverter)?; + + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_ADD_BID => (|| { + runtime.charge_system_contract_call(auction_costs.add_bid)?; + let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?; + let delegation_rate = + Self::get_named_argument(runtime_args, auction::ARG_DELEGATION_RATE)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + + let global_minimum_delegation_amount = + self.context.engine_config().minimum_delegation_amount(); + let minimum_delegation_amount = Self::try_get_named_argument( + runtime_args, + auction::ARG_MINIMUM_DELEGATION_AMOUNT, + )? + .unwrap_or(global_minimum_delegation_amount); + + let global_maximum_delegation_amount = + self.context.engine_config().maximum_delegation_amount(); + let maximum_delegation_amount = Self::try_get_named_argument( + runtime_args, + auction::ARG_MAXIMUM_DELEGATION_AMOUNT, + )? + .unwrap_or(global_maximum_delegation_amount); + + if minimum_delegation_amount < global_minimum_delegation_amount + || maximum_delegation_amount > global_maximum_delegation_amount + || minimum_delegation_amount > maximum_delegation_amount + { + return Err(ExecError::Revert(ApiError::InvalidDelegationAmountLimits)); + } + let reserved_slots = + Self::try_get_named_argument(runtime_args, auction::ARG_RESERVED_SLOTS)? + .unwrap_or(0); + + let max_delegators_per_validator = + self.context.engine_config().max_delegators_per_validator(); + + let minimum_bid_amount = self.context().engine_config().minimum_bid_amount(); + + let result = runtime + .add_bid( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount, + max_delegators_per_validator, + reserved_slots, + ) + .map_err(Self::reverter)?; + + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_WITHDRAW_BID => (|| { + runtime.charge_system_contract_call(auction_costs.withdraw_bid)?; + + let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + let min_bid_amount = self.context.engine_config().minimum_bid_amount(); + + let result = runtime + .withdraw_bid(public_key, amount, min_bid_amount) + .map_err(Self::reverter)?; + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_DELEGATE => (|| { + runtime.charge_system_contract_call(auction_costs.delegate)?; + + let delegator = { + match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) { + Ok(pk) => DelegatorKind::PublicKey(pk), + Err(_) => { + let uref: URef = match Self::get_named_argument( + runtime_args, + auction::ARG_DELEGATOR_PURSE, + ) { + Ok(uref) => uref, + Err(err) => { + debug!(%err, "failed to get delegator purse argument"); + return Err(err); + } + }; + DelegatorKind::Purse(uref.addr()) + } + } + }; + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + + let max_delegators_per_validator = + self.context.engine_config().max_delegators_per_validator(); + + let result = runtime + .delegate(delegator, validator, amount, max_delegators_per_validator) + .map_err(Self::reverter)?; + + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_UNDELEGATE => (|| { + runtime.charge_system_contract_call(auction_costs.undelegate)?; + + let delegator = { + match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) { + Ok(pk) => DelegatorKind::PublicKey(pk), + Err(_) => { + let uref: URef = match Self::get_named_argument( + runtime_args, + auction::ARG_DELEGATOR_PURSE, + ) { + Ok(uref) => uref, + Err(err) => { + debug!(%err, "failed to get delegator purse argument"); + return Err(err); + } + }; + DelegatorKind::Purse(uref.addr()) + } + } + }; + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + + let result = runtime + .undelegate(delegator, validator, amount) + .map_err(Self::reverter)?; + + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_REDELEGATE => (|| { + runtime.charge_system_contract_call(auction_costs.redelegate)?; + + let delegator = { + match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) { + Ok(pk) => DelegatorKind::PublicKey(pk), + Err(_) => { + let uref: URef = match Self::get_named_argument( + runtime_args, + auction::ARG_DELEGATOR_PURSE, + ) { + Ok(uref) => uref, + Err(err) => { + debug!(%err, "failed to get delegator purse argument"); + return Err(err); + } + }; + DelegatorKind::Purse(uref.addr()) + } + } + }; + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + let new_validator = + Self::get_named_argument(runtime_args, auction::ARG_NEW_VALIDATOR)?; + + let result = runtime + .redelegate(delegator, validator, amount, new_validator) + .map_err(Self::reverter)?; + + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_RUN_AUCTION => (|| { + runtime.charge_system_contract_call(auction_costs.run_auction)?; + + let era_end_timestamp_millis = + Self::get_named_argument(runtime_args, auction::ARG_ERA_END_TIMESTAMP_MILLIS)?; + let evicted_validators = + Self::get_named_argument(runtime_args, auction::ARG_EVICTED_VALIDATORS)?; + + let max_delegators_per_validator = + self.context.engine_config().max_delegators_per_validator(); + let minimum_bid_amount = self.context.engine_config().minimum_bid_amount(); + runtime + .run_auction( + era_end_timestamp_millis, + evicted_validators, + max_delegators_per_validator, + true, + Ratio::new_raw(U512::from(1), U512::from(5)), + minimum_bid_amount, + ) + .map_err(Self::reverter)?; + + CLValue::from_t(()).map_err(Self::reverter) + })(), + + // Type: `fn slash(validator_public_keys: &[PublicKey]) -> Result<(), ExecError>` + auction::METHOD_SLASH => (|| { + runtime.charge_system_contract_call(auction_costs.slash)?; + + let validator_public_keys = + Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR_PUBLIC_KEYS)?; + runtime + .slash(validator_public_keys) + .map_err(Self::reverter)?; + CLValue::from_t(()).map_err(Self::reverter) + })(), + + // Type: `fn distribute(reward_factors: BTreeMap) -> Result<(), + // ExecError>` + auction::METHOD_DISTRIBUTE => (|| { + runtime.charge_system_contract_call(auction_costs.distribute)?; + let rewards = Self::get_named_argument(runtime_args, auction::ARG_REWARDS_MAP)?; + runtime.distribute(rewards).map_err(Self::reverter)?; + CLValue::from_t(()).map_err(Self::reverter) + })(), + + // Type: `fn read_era_id() -> Result` + auction::METHOD_READ_ERA_ID => (|| { + runtime.charge_system_contract_call(auction_costs.read_era_id)?; + + let result = runtime.read_era_id().map_err(Self::reverter)?; + CLValue::from_t(result).map_err(Self::reverter) + })(), + + auction::METHOD_ACTIVATE_BID => (|| { + runtime.charge_system_contract_call(auction_costs.activate_bid)?; + + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + + runtime + .activate_bid(validator, engine_config.minimum_bid_amount()) + .map_err(Self::reverter)?; + + CLValue::from_t(()).map_err(Self::reverter) + })(), + auction::METHOD_CHANGE_BID_PUBLIC_KEY => (|| { + runtime.charge_system_contract_call(auction_costs.change_bid_public_key)?; + + let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?; + let new_public_key = + Self::get_named_argument(runtime_args, auction::ARG_NEW_PUBLIC_KEY)?; + + runtime + .change_bid_public_key(public_key, new_public_key) + .map_err(Self::reverter)?; + + CLValue::from_t(()).map_err(Self::reverter) + })(), + auction::METHOD_ADD_RESERVATIONS => (|| { + runtime.charge_system_contract_call(auction_costs.add_reservations)?; + + let reservations = + Self::get_named_argument(runtime_args, auction::ARG_RESERVATIONS)?; + + runtime + .add_reservations(reservations) + .map_err(Self::reverter)?; + + CLValue::from_t(()).map_err(Self::reverter) + })(), + auction::METHOD_CANCEL_RESERVATIONS => (|| { + runtime.charge_system_contract_call(auction_costs.cancel_reservations)?; + + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let delegators = Self::get_named_argument(runtime_args, auction::ARG_DELEGATORS)?; + let max_delegators_per_validator = + self.context.engine_config().max_delegators_per_validator(); + + runtime + .cancel_reservations(validator, delegators, max_delegators_per_validator) + .map_err(Self::reverter)?; + + CLValue::from_t(()).map_err(Self::reverter) + })(), + _ => { + // Code should never reach here as existence of the entrypoint is validated before + // reaching this point. + Ok(CLValue::unit()) + } + }; + + // Charge for the gas spent during execution in an isolated runtime. + self.gas( + runtime + .gas_counter() + .checked_sub(gas_counter) + .unwrap_or(gas_counter), + )?; + + // Result still contains a result, but the entrypoints logic does not exit early on errors. + let ret = result?; + + let urefs = utils::extract_urefs(&ret)?; + self.context.access_rights_extend(&urefs); + { + let transfers = self.context.transfers_mut(); + runtime.context.transfers().clone_into(transfers); + } + + Ok(ret) + } + + /// Call a contract by pushing a stack element onto the frame. + pub(crate) fn call_contract_with_stack( + &mut self, + contract_hash: AddressableEntityHash, + entry_point_name: &str, + args: RuntimeArgs, + stack: RuntimeStack, + ) -> Result { + self.stack = Some(stack); + + self.call_contract(contract_hash, entry_point_name, args) + } + + /// Call a version within a package by pushing a stack element onto the frame. + pub fn call_package_version_with_stack( + &mut self, + contract_package_hash: PackageHash, + protocol_version_major: Option, + version: Option, + entry_point_name: String, + args: RuntimeArgs, + stack: RuntimeStack, + ) -> Result { + self.stack = Some(stack); + + self.call_package_version( + contract_package_hash, + protocol_version_major, + version, + entry_point_name, + args, + ) + } + + pub(crate) fn execute_module_bytes( + &mut self, + module_bytes: &Bytes, + stack: RuntimeStack, + ) -> Result { + let protocol_version = self.context.protocol_version(); + let engine_config = self.context.engine_config(); + let wasm_config = engine_config.wasm_config(); + #[cfg(feature = "test-support")] + let max_stack_height = wasm_config.v1().max_stack_height(); + let module = preprocess(*wasm_config, module_bytes)?; + let (instance, memory) = + utils::instance_and_memory(module.clone(), protocol_version, engine_config)?; + self.memory = Some(memory); + self.module = Some(module); + self.stack = Some(stack); + self.context.set_args(utils::attenuate_uref_in_args( + self.context.args().clone(), + self.context + .runtime_footprint() + .borrow() + .main_purse() + .expect("line 1183") + .addr(), + AccessRights::WRITE, + )?); + + let result = instance.invoke_export(DEFAULT_ENTRY_POINT_NAME, &[], self); + + let error = match result { + Err(error) => error, + // If `Ok` and the `host_buffer` is `None`, the contract's execution succeeded but did + // not explicitly call `runtime::ret()`. Treat as though the execution + // returned the unit type `()` as per Rust functions which don't specify a + // return value. + Ok(_) => { + return Ok(self.take_host_buffer().unwrap_or(CLValue::from_t(())?)); + } + }; + + #[cfg(feature = "test-support")] + dump_runtime_stack_info(instance, max_stack_height); + + if let Some(host_error) = error.as_host_error() { + // If the "error" was in fact a trap caused by calling `ret` then + // this is normal operation and we should return the value captured + // in the Runtime result field. + let downcasted_error = host_error.downcast_ref::(); + return match downcasted_error { + Some(ExecError::Ret(ref _ret_urefs)) => self + .take_host_buffer() + .ok_or(ExecError::ExpectedReturnValue), + Some(error) => Err(error.clone()), + None => Err(ExecError::Interpreter(host_error.to_string())), + }; + } + Err(ExecError::Interpreter(error.into())) + } + + /// Calls contract living under a `key`, with supplied `args`. + pub fn call_contract( + &mut self, + contract_hash: AddressableEntityHash, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Result { + let contract_hash = contract_hash.value(); + let identifier = CallContractIdentifier::Contract { contract_hash }; + + self.execute_contract(identifier, entry_point_name, args) + } + + /// Calls `version` of the contract living at `key`, invoking `method` with + /// supplied `args`. This function also checks the args conform with the + /// types given in the contract header. + pub fn call_versioned_contract( + &mut self, + contract_package_hash: PackageHash, + contract_version: Option, + entry_point_name: String, + args: RuntimeArgs, + ) -> Result { + self.call_package_version( + contract_package_hash, + None, + contract_version, + entry_point_name, + args, + ) + } + + /// Calls `version` of the contract living at `key`, invoking `method` with + /// supplied `args`. This function also checks the args conform with the + /// types given in the contract header. + pub fn call_package_version( + &mut self, + contract_package_hash: PackageHash, + protocol_version_major: Option, + version: Option, + entry_point_name: String, + args: RuntimeArgs, + ) -> Result { + /* + m e + - - : pick the highest enabled version, considering the major protocol version first, then the entity version + - + : walk down from the highest major protocol version, so highest..2.+ then 1.+ + If there is only one, its that one (no guessing) + If there are more than one, but the entity version is in the range of only one major version + If there is a collision, + It would be safer to error on this collision, however + we are making a best attempt and picking the highest protocol version + add a chainspec setting to either error or pick the highest in this ambigious case + pick the highest protocol version + + - : pick the highest enabled entity version for the given major + + + : pick the version key based on +.+ + */ + + let contract_package_hash = contract_package_hash.value(); + let identifier = CallContractIdentifier::ContractPackage { + contract_package_hash, + version, + protocol_version_major, + }; + + self.execute_contract(identifier, &entry_point_name, args) + } + + fn get_protocol_version_for_entity_version( + &self, + entity_version: EntityVersion, + package: &Package, + ) -> Result { + let enabled_versions = package.enabled_versions(); + let current_protocol_version_major = self.context.protocol_version().value().major; + + let mut possible_versions = vec![]; + + for protocol_version_major in (1..=current_protocol_version_major).rev() { + let entity_version_key = EntityVersionKey::new(protocol_version_major, entity_version); + // If there is a corresponding addr then its an enabled valid entity version key + if enabled_versions.get(&entity_version_key).is_some() { + possible_versions.push(entity_version_key) + } + } + + if possible_versions.is_empty() { + return Err(ExecError::NoMatchingEntityVersionKey); + } + + if possible_versions.len() > 1 + && self + .context + .engine_config() + .trap_on_ambiguous_entity_version + { + return Err(ExecError::AmbiguousEntityVersion); + } + + // If possible versions has more than one, then the element to be popped + // will be the version key which has the same entity version, but the highest protocol + // version If there is only one version key matching the entity version then we will + // correctly pop the singular element in the possible versions. + // This sort is load bearing. + possible_versions.sort(); + // This unwrap is safe as long as we exit early on possible versions being empty + let entity_version_key = possible_versions.pop().unwrap(); + Ok(entity_version_key) + } + + fn get_key_from_entity_addr(&self, entity_addr: EntityAddr) -> Key { + if self.context().engine_config().enable_entity { + Key::AddressableEntity(entity_addr) + } else { + match entity_addr { + EntityAddr::System(system_hash_addr) => Key::Hash(system_hash_addr), + EntityAddr::Account(hash_addr) => Key::Account(AccountHash::new(hash_addr)), + EntityAddr::SmartContract(contract_hash_addr) => Key::Hash(contract_hash_addr), + } + } + } + + fn get_context_key_for_contract_call( + &self, + entity_addr: EntityAddr, + entry_point: &EntityEntryPoint, + ) -> Result { + let current = self.context.entry_point_type(); + let next = entry_point.entry_point_type(); + match (current, next) { + (EntryPointType::Called, EntryPointType::Caller) => { + // Session code can't be called from Contract code for security reasons. + Err(ExecError::InvalidContext) + } + (EntryPointType::Factory, EntryPointType::Caller) => { + // Session code can't be called from Installer code for security reasons. + Err(ExecError::InvalidContext) + } + (EntryPointType::Caller, EntryPointType::Caller) => { + // Session code called from session reuses current base key + Ok(self.context.get_context_key()) + } + (EntryPointType::Caller, EntryPointType::Called) + | (EntryPointType::Called, EntryPointType::Called) => { + Ok(self.get_key_from_entity_addr(entity_addr)) + } + _ => { + // Any other combination (installer, normal, etc.) is a contract context. + Ok(self.get_key_from_entity_addr(entity_addr)) + } + } + } + + fn try_get_memory(&self) -> Result<&MemoryRef, ExecError> { + self.memory.as_ref().ok_or(ExecError::WasmPreprocessing( + PreprocessingError::MissingMemorySection, + )) + } + + fn try_get_module(&self) -> Result<&Module, ExecError> { + self.module.as_ref().ok_or(ExecError::WasmPreprocessing( + PreprocessingError::MissingModule, + )) + } + + fn try_get_stack(&self) -> Result<&RuntimeStack, ExecError> { + self.stack.as_ref().ok_or(ExecError::MissingRuntimeStack) + } + + fn maybe_system_type(&self, hash_addr: HashAddr) -> Option { + let is_mint = self.is_mint(hash_addr); + if is_mint.is_some() { + return is_mint; + }; + + let is_auction = self.is_auction(hash_addr); + if is_auction.is_some() { + return is_auction; + }; + let is_handle = self.is_handle_payment(hash_addr); + if is_handle.is_some() { + return is_handle; + }; + + None + } + + fn is_mint(&self, hash_addr: HashAddr) -> Option { + let hash = match self.context.get_system_contract(MINT) { + Ok(hash) => hash, + Err(_) => { + error!("Failed to get system mint contract hash"); + return None; + } + }; + if hash.value() == hash_addr { + Some(SystemEntityType::Mint) + } else { + None + } + } + + /// Checks if current context is the `handle_payment` system contract. + fn is_handle_payment(&self, hash_addr: HashAddr) -> Option { + let hash = match self.context.get_system_contract(HANDLE_PAYMENT) { + Ok(hash) => hash, + Err(_) => { + error!("Failed to get system handle payment contract hash"); + return None; + } + }; + if hash.value() == hash_addr { + Some(SystemEntityType::HandlePayment) + } else { + None + } + } + + /// Checks if given hash is the auction system contract. + fn is_auction(&self, hash_addr: HashAddr) -> Option { + let hash = match self.context.get_system_contract(AUCTION) { + Ok(hash) => hash, + Err(_) => { + error!("Failed to get system auction contract hash"); + return None; + } + }; + + if hash.value() == hash_addr { + Some(SystemEntityType::Auction) + } else { + None + } + } + + fn execute_contract( + &mut self, + identifier: CallContractIdentifier, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Result { + let (footprint, entity_addr, package) = match identifier { + CallContractIdentifier::Contract { contract_hash } => { + let entity_addr = if self.context.is_system_addressable_entity(&contract_hash)? { + EntityAddr::new_system(contract_hash) + } else { + EntityAddr::new_smart_contract(contract_hash) + }; + let footprint = match self.context.read_gs(&Key::Hash(contract_hash))? { + Some(StoredValue::Contract(contract)) => { + if self.context.engine_config().enable_entity { + self.migrate_contract_and_contract_package(contract_hash)?; + }; + + let maybe_system_entity_type = self.maybe_system_type(contract_hash); + + RuntimeFootprint::new_contract_footprint( + ContractHash::new(contract_hash), + contract, + maybe_system_entity_type, + ) + } + Some(_) | None => { + if !self.context.engine_config().enable_entity { + return Err(ExecError::KeyNotFound(Key::Hash(contract_hash))); + } + let key = Key::AddressableEntity(entity_addr); + let entity = self.context.read_gs_typed::(&key)?; + let entity_named_keys = self + .context + .state() + .borrow_mut() + .get_named_keys(entity_addr)?; + let entry_points = self.context.get_casper_vm_v1_entry_point(key)?; + RuntimeFootprint::new_entity_footprint( + entity_addr, + entity, + entity_named_keys, + entry_points, + ) + } + }; + + let package_hash = footprint.package_hash().ok_or(ExecError::InvalidContext)?; + let package: Package = self.context.get_package(package_hash)?; + + // System contract hashes are disabled at upgrade point + let is_calling_system_contract = self.is_system_contract(contract_hash)?; + + let entity_hash = AddressableEntityHash::new(contract_hash); + + // Check if provided contract hash is disabled + let is_contract_enabled = package.is_entity_enabled(&entity_addr); + + if !is_calling_system_contract && !is_contract_enabled { + return Err(ExecError::DisabledEntity(entity_hash)); + } + + (footprint, entity_addr, package) + } + CallContractIdentifier::ContractPackage { + contract_package_hash, + version, + protocol_version_major, + } => { + let package = self.context.get_package(contract_package_hash)?; + let entity_version_key = match (version, protocol_version_major) { + (Some(entity_version), Some(major)) => { + EntityVersionKey::new(major, entity_version) + } + (None, Some(major)) => package.current_entity_version_for(major), + (Some(entity_version), None) => { + match self.get_protocol_version_for_entity_version(entity_version, &package) + { + Ok(entity_version_key) => entity_version_key, + Err(err) => { + return Err(err); + } + } + } + (None, None) => match package.current_entity_version() { + Some(v) => v, + None => { + return Err(ExecError::NoActiveEntityVersions( + contract_package_hash.into(), + )); + } + }, + }; + + if package.is_version_missing(entity_version_key) { + return Err(ExecError::MissingEntityVersion(entity_version_key)); + } + + if !package.is_version_enabled(entity_version_key) { + return Err(ExecError::DisabledEntityVersion(entity_version_key)); + } + + let hash_addr = package + .lookup_entity_hash(entity_version_key) + .copied() + .ok_or(ExecError::MissingEntityVersion(entity_version_key))? + .value(); + + let entity_addr = if self.context.is_system_addressable_entity(&hash_addr)? { + EntityAddr::new_system(hash_addr) + } else { + EntityAddr::new_smart_contract(hash_addr) + }; + + let footprint = match self.context.read_gs(&Key::Hash(hash_addr))? { + Some(StoredValue::Contract(contract)) => { + if self.context.engine_config().enable_entity { + self.migrate_contract_and_contract_package(hash_addr)?; + }; + let maybe_system_entity_type = self.maybe_system_type(hash_addr); + RuntimeFootprint::new_contract_footprint( + ContractHash::new(hash_addr), + contract, + maybe_system_entity_type, + ) + } + Some(_) | None => { + if !self.context.engine_config().enable_entity { + return Err(ExecError::KeyNotFound(Key::Hash(hash_addr))); + } + let key = Key::AddressableEntity(entity_addr); + let entity = self.context.read_gs_typed::(&key)?; + let entity_named_keys = self + .context + .state() + .borrow_mut() + .get_named_keys(entity_addr)?; + let entry_points = self.context.get_casper_vm_v1_entry_point(key)?; + RuntimeFootprint::new_entity_footprint( + entity_addr, + entity, + entity_named_keys, + entry_points, + ) + } + }; + + (footprint, entity_addr, package) + } + }; + + if let EntityKind::Account(_) = footprint.entity_kind() { + return Err(ExecError::InvalidContext); + } + + let entry_point = match footprint.entry_points().get(entry_point_name) { + Some(entry_point) => entry_point, + None => { + match footprint.entity_kind() { + EntityKind::System(_) => { + self.charge_system_contract_call( + self.context() + .engine_config() + .system_config() + .no_such_entrypoint(), + )?; + } + EntityKind::Account(_) => {} + EntityKind::SmartContract(_) => {} + } + return Err(ExecError::NoSuchMethod(entry_point_name.to_owned())); + } + }; + + let entry_point_type = entry_point.entry_point_type(); + + if self.context.engine_config().enable_entity && entry_point_type.is_invalid_context() { + return Err(ExecError::InvalidContext); + } + + // Get contract entry point hash + // if public, allowed + // if group, restricted to user group access + // if template, not allowed + self.validate_entry_point_access(&package, entry_point_name, entry_point.access())?; + if self.context.engine_config().strict_argument_checking() { + let entry_point_args_lookup: BTreeMap<&str, &Parameter> = entry_point + .args() + .iter() + .map(|param| (param.name(), param)) + .collect(); + + let args_lookup: BTreeMap<&str, &NamedArg> = args + .named_args() + .map(|named_arg| (named_arg.name(), named_arg)) + .collect(); + + // variable ensure args type(s) match defined args of entry point + for (param_name, param) in entry_point_args_lookup { + if let Some(named_arg) = args_lookup.get(param_name) { + if param.cl_type() != named_arg.cl_value().cl_type() { + return Err(ExecError::type_mismatch( + param.cl_type().clone(), + named_arg.cl_value().cl_type().clone(), + )); + } + } else if !param.cl_type().is_option() { + return Err(ExecError::MissingArgument { + name: param.name().to_string(), + }); + } + } + } + + let entity_hash = AddressableEntityHash::new(entity_addr.value()); + + if !self + .context + .engine_config() + .administrative_accounts() + .is_empty() + && !package.is_entity_enabled(&entity_addr) + && !self + .context + .is_system_addressable_entity(&entity_addr.value())? + { + return Err(ExecError::DisabledEntity(entity_hash)); + } + + // if session the caller's context + // else the called contract's context + let context_entity_key = + self.get_context_key_for_contract_call(entity_addr, entry_point)?; + + let context_entity_hash = context_entity_key + .into_entity_hash_addr() + .ok_or(ExecError::UnexpectedKeyVariant(context_entity_key))?; + + let (should_attenuate_urefs, should_validate_urefs) = { + // Determines if this call originated from the system account based on a first + // element of the call stack. + let is_system_account = + self.context.get_initiator() == PublicKey::System.to_account_hash(); + // Is the immediate caller a system contract, such as when the auction calls the mint. + let is_caller_system_contract = + self.is_system_contract(self.context.access_rights().context_key())?; + // Checks if the contract we're about to call is a system contract. + let is_calling_system_contract = self.is_system_contract(context_entity_hash)?; + // uref attenuation is necessary in the following circumstances: + // the originating account (aka the caller) is not the system account and + // the immediate caller is either a normal account or a normal contract and + // the target contract about to be called is a normal contract + let should_attenuate_urefs = + !is_system_account && !is_caller_system_contract && !is_calling_system_contract; + let should_validate_urefs = !is_caller_system_contract || !is_calling_system_contract; + (should_attenuate_urefs, should_validate_urefs) + }; + let runtime_args = if should_attenuate_urefs { + // Main purse URefs should be attenuated only when a non-system contract is executed by + // a non-system account to avoid possible phishing attack scenarios. + utils::attenuate_uref_in_args( + args, + self.context + .runtime_footprint() + .borrow() + .main_purse() + .expect("need purse for attenuation") + .addr(), + AccessRights::WRITE, + )? + } else { + args + }; + + let extended_access_rights = { + let mut all_urefs = vec![]; + for arg in runtime_args.to_values() { + let urefs = utils::extract_urefs(arg)?; + if should_validate_urefs { + for uref in &urefs { + self.context.validate_uref(uref)?; + } + } + all_urefs.extend(urefs); + } + all_urefs + }; + + let (mut named_keys, access_rights) = match entry_point_type { + EntryPointType::Caller => { + let mut access_rights = self + .context + .runtime_footprint() + .borrow() + .extract_access_rights(context_entity_hash); + access_rights.extend(&extended_access_rights); + + let named_keys = self + .context + .runtime_footprint() + .borrow() + .named_keys() + .clone(); + + (named_keys, access_rights) + } + EntryPointType::Called | EntryPointType::Factory => { + let mut access_rights = footprint.extract_access_rights(entity_hash.value()); + access_rights.extend(&extended_access_rights); + let named_keys = footprint.named_keys().clone(); + (named_keys, access_rights) + } + }; + + let stack = { + let mut stack = self.try_get_stack()?.clone(); + + let package_hash = match footprint.package_hash() { + Some(hash) => PackageHash::new(hash), + None => { + return Err(ExecError::UnexpectedStoredValueVariant); + } + }; + + let caller = if self.context.engine_config().enable_entity { + Caller::entity(package_hash, entity_addr) + } else { + Caller::smart_contract( + ContractPackageHash::new(package_hash.value()), + ContractHash::new(entity_addr.value()), + ) + }; + + stack.push(caller)?; + + stack + }; + + if let EntityKind::System(system_contract_type) = footprint.entity_kind() { + let entry_point_name = entry_point.name(); + + match system_contract_type { + SystemEntityType::Mint => { + return self.call_host_mint( + entry_point_name, + &runtime_args, + access_rights, + stack, + ); + } + SystemEntityType::HandlePayment => { + return self.call_host_handle_payment( + entry_point_name, + &runtime_args, + access_rights, + stack, + ); + } + SystemEntityType::Auction => { + return self.call_host_auction( + entry_point_name, + &runtime_args, + access_rights, + stack, + ); + } + // Not callable + SystemEntityType::StandardPayment => {} + } + } + + let module: Module = { + let byte_code_addr = footprint.wasm_hash().ok_or(ExecError::InvalidContext)?; + + let byte_code_key = match footprint.entity_kind() { + EntityKind::System(_) | EntityKind::Account(_) => { + Key::ByteCode(ByteCodeAddr::Empty) + } + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) => { + if self.context.engine_config().enable_entity { + Key::ByteCode(ByteCodeAddr::new_wasm_addr(byte_code_addr)) + } else { + Key::Hash(byte_code_addr) + } + } + EntityKind::SmartContract(runtime @ ContractRuntimeTag::VmCasperV2) => { + return Err(ExecError::IncompatibleRuntime(runtime)); + } + }; + + let byte_code: ByteCode = match self.context.read_gs(&byte_code_key)? { + Some(StoredValue::ContractWasm(wasm)) => { + ByteCode::new(ByteCodeKind::V1CasperWasm, wasm.take_bytes()) + } + Some(StoredValue::ByteCode(byte_code)) => byte_code, + Some(_) => { + return Err(ExecError::InvalidByteCode(ByteCodeHash::new( + byte_code_addr, + ))) + } + None => return Err(ExecError::KeyNotFound(byte_code_key)), + }; + + casper_wasm::deserialize_buffer(byte_code.bytes())? + }; + + let context = self.context.new_from_self( + context_entity_key, + entry_point.entry_point_type(), + &mut named_keys, + access_rights, + runtime_args, + ); + + let (instance, memory) = utils::instance_and_memory( + module.clone(), + self.context.protocol_version(), + self.context.engine_config(), + )?; + let runtime = &mut Runtime::new_invocation_runtime(self, context, module, memory, stack); + let result = instance.invoke_export(entry_point.name(), &[], runtime); + // The `runtime`'s context was initialized with our counter from before the call and any gas + // charged by the sub-call was added to its counter - so let's copy the correct value of the + // counter from there to our counter. Do the same for the message cost tracking. + self.context.set_gas_counter(runtime.context.gas_counter()); + self.context + .set_emit_message_cost(runtime.context.emit_message_cost()); + let transfers = self.context.transfers_mut(); + runtime.context.transfers().clone_into(transfers); + + match result { + Ok(_) => { + // If `Ok` and the `host_buffer` is `None`, the contract's execution succeeded but + // did not explicitly call `runtime::ret()`. Treat as though the + // execution returned the unit type `()` as per Rust functions which + // don't specify a return value. + if self.context.entry_point_type() == EntryPointType::Caller + && runtime.context.entry_point_type() == EntryPointType::Caller + { + // Overwrites parent's named keys with child's new named key but only when + // running session code. + *self.context.named_keys_mut() = runtime.context.named_keys().clone(); + } + self.context + .set_remaining_spending_limit(runtime.context.remaining_spending_limit()); + Ok(runtime.take_host_buffer().unwrap_or(CLValue::from_t(())?)) + } + Err(error) => { + #[cfg(feature = "test-support")] + dump_runtime_stack_info( + instance, + self.context + .engine_config() + .wasm_config() + .v1() + .max_stack_height(), + ); + if let Some(host_error) = error.as_host_error() { + // If the "error" was in fact a trap caused by calling `ret` then this is normal + // operation and we should return the value captured in the Runtime result + // field. + let downcasted_error = host_error.downcast_ref::(); + return match downcasted_error { + Some(ExecError::Ret(ref ret_urefs)) => { + // Insert extra urefs returned from call. + // Those returned URef's are guaranteed to be valid as they were already + // validated in the `ret` call inside context we ret from. + self.context.access_rights_extend(ret_urefs); + if self.context.entry_point_type() == EntryPointType::Caller + && runtime.context.entry_point_type() == EntryPointType::Caller + { + // Overwrites parent's named keys with child's new named key but + // only when running session code. + *self.context.named_keys_mut() = + runtime.context.named_keys().clone(); + } + // Stored contracts are expected to always call a `ret` function, + // otherwise it's an error. + runtime + .take_host_buffer() + .ok_or(ExecError::ExpectedReturnValue) + } + Some(error) => Err(error.clone()), + None => Err(ExecError::Interpreter(host_error.to_string())), + }; + } + Err(ExecError::Interpreter(error.into())) + } + } + } + + fn call_contract_host_buffer( + &mut self, + contract_hash: AddressableEntityHash, + entry_point_name: &str, + args_bytes: &[u8], + result_size_ptr: u32, + ) -> Result, ExecError> { + // Exit early if the host buffer is already occupied + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?; + + if let Some(payment_purse) = self.context.maybe_payment_purse() { + for named_arg in args.named_args() { + if utils::extract_urefs(named_arg.cl_value())? + .into_iter() + .any(|uref| uref.remove_access_rights() == payment_purse.remove_access_rights()) + { + warn!("attempt to call_contract with payment purse"); + + return Err(Into::into(ExecError::Revert(ApiError::HandlePayment( + handle_payment::Error::AttemptToPersistPaymentPurse as u8, + )))); + } + } + } + + let result = self.call_contract(contract_hash, entry_point_name, args)?; + self.manage_call_contract_host_buffer(result_size_ptr, result) + } + + fn call_versioned_contract_host_buffer( + &mut self, + contract_package_hash: PackageHash, + contract_version: Option, + entry_point_name: String, + args_bytes: &[u8], + result_size_ptr: u32, + ) -> Result, ExecError> { + // Exit early if the host buffer is already occupied + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?; + + if let Some(payment_purse) = self.context.maybe_payment_purse() { + for named_arg in args.named_args() { + if utils::extract_urefs(named_arg.cl_value())? + .into_iter() + .any(|uref| uref.remove_access_rights() == payment_purse.remove_access_rights()) + { + warn!("attempt to call_versioned_contract with payment purse"); + + return Err(Into::into(ExecError::Revert(ApiError::HandlePayment( + handle_payment::Error::AttemptToPersistPaymentPurse as u8, + )))); + } + } + } + + let result = self.call_versioned_contract( + contract_package_hash, + contract_version, + entry_point_name, + args, + )?; + self.manage_call_contract_host_buffer(result_size_ptr, result) + } + + fn call_package_version_host_buffer( + &mut self, + contract_package_hash: PackageHash, + protocol_version_major: Option, + contract_version: Option, + entry_point_name: String, + args_bytes: &[u8], + result_size_ptr: u32, + ) -> Result, ExecError> { + // Exit early if the host buffer is already occupied + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + let args: RuntimeArgs = bytesrepr::deserialize_from_slice(args_bytes)?; + + if let Some(payment_purse) = self.context.maybe_payment_purse() { + for named_arg in args.named_args() { + if utils::extract_urefs(named_arg.cl_value())? + .into_iter() + .any(|uref| uref.remove_access_rights() == payment_purse.remove_access_rights()) + { + warn!("attempt to call_versioned_contract with payment purse"); + + return Err(Into::into(ExecError::Revert(ApiError::HandlePayment( + handle_payment::Error::AttemptToPersistPaymentPurse as u8, + )))); + } + } + } + + let result = self.call_package_version( + contract_package_hash, + protocol_version_major, + contract_version, + entry_point_name, + args, + )?; + self.manage_call_contract_host_buffer(result_size_ptr, result) + } + + fn check_host_buffer(&mut self) -> Result<(), ApiError> { + if !self.can_write_to_host_buffer() { + Err(ApiError::HostBufferFull) + } else { + Ok(()) + } + } + + fn manage_call_contract_host_buffer( + &mut self, + result_size_ptr: u32, + result: CLValue, + ) -> Result, ExecError> { + let result_size: u32 = match result.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + + // leave the host buffer set to `None` if there's nothing to write there + if result_size != 0 { + if let Err(error) = self.write_host_buffer(result) { + return Ok(Err(error)); + } + } + + let result_size_bytes = result_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(result_size_ptr, &result_size_bytes) + { + return Err(ExecError::Interpreter(error.into())); + } + + Ok(Ok(())) + } + + fn load_named_keys( + &mut self, + total_keys_ptr: u32, + result_size_ptr: u32, + ) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + + let total_keys: u32 = match self.context.named_keys().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + + let total_keys_bytes = total_keys.to_le_bytes(); + if let Err(error) = self + .try_get_memory()? + .set(total_keys_ptr, &total_keys_bytes) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + + if total_keys == 0 { + // No need to do anything else, we leave host buffer empty. + return Ok(Ok(())); + } + + let named_keys = + CLValue::from_t(self.context.named_keys().clone()).map_err(ExecError::CLValue)?; + + let length: u32 = match named_keys.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::BufferTooSmall)), + }; + + if let Err(error) = self.write_host_buffer(named_keys) { + return Ok(Err(error)); + } + + let length_bytes = length.to_le_bytes(); + if let Err(error) = self.try_get_memory()?.set(result_size_ptr, &length_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + fn create_contract_package( + &mut self, + is_locked: PackageStatus, + ) -> Result<(ContractPackage, URef), ExecError> { + let access_key = self.context.new_unit_uref()?; + let package_status = match is_locked { + PackageStatus::Locked => ContractPackageStatus::Locked, + PackageStatus::Unlocked => ContractPackageStatus::Unlocked, + }; + + let contract_package = ContractPackage::new( + access_key, + ContractVersions::default(), + DisabledVersions::default(), + Groups::default(), + package_status, + ); + + Ok((contract_package, access_key)) + } + + fn create_package(&mut self, is_locked: PackageStatus) -> Result<(Package, URef), ExecError> { + let access_key = self.context.new_unit_uref()?; + let contract_package = Package::new( + EntityVersions::new(), + BTreeSet::new(), + Groups::new(), + is_locked, + ); + + Ok((contract_package, access_key)) + } + + fn create_contract_package_at_hash( + &mut self, + lock_status: PackageStatus, + ) -> Result<([u8; 32], [u8; 32]), ExecError> { + let addr = self.context.new_hash_address()?; + let access_key = if self.context.engine_config().enable_entity { + let (package, access_key) = self.create_package(lock_status)?; + self.context + .metered_write_gs_unsafe(Key::SmartContract(addr), package)?; + access_key + } else { + let (package, access_key) = self.create_contract_package(lock_status)?; + self.context + .metered_write_gs_unsafe(Key::Hash(addr), package)?; + access_key + }; + Ok((addr, access_key.addr())) + } + + fn create_contract_user_group_by_contract_package( + &mut self, + contract_package_hash: PackageHash, + label: String, + num_new_urefs: u32, + mut existing_urefs: BTreeSet, + output_size_ptr: u32, + ) -> Result, ExecError> { + let mut contract_package: ContractPackage = self + .context + .get_validated_contract_package(contract_package_hash.value())?; + + let groups = contract_package.groups_mut(); + let new_group = Group::new(label); + + // Ensure group does not already exist + if groups.contains(&new_group) { + return Ok(Err(addressable_entity::Error::GroupAlreadyExists.into())); + } + + // Ensure there are not too many groups + if groups.len() >= (addressable_entity::MAX_GROUPS as usize) { + return Ok(Err(addressable_entity::Error::MaxGroupsExceeded.into())); + } + + // Ensure there are not too many urefs + let total_urefs: usize = + groups.total_urefs() + (num_new_urefs as usize) + existing_urefs.len(); + if total_urefs > addressable_entity::MAX_TOTAL_UREFS { + let err = addressable_entity::Error::MaxTotalURefsExceeded; + return Ok(Err(ApiError::ContractHeader(err as u8))); + } + + // Proceed with creating user group + let mut new_urefs = Vec::with_capacity(num_new_urefs as usize); + for _ in 0..num_new_urefs { + let u = self.context.new_unit_uref()?; + new_urefs.push(u); + } + + for u in new_urefs.iter().cloned() { + existing_urefs.insert(u); + } + groups.insert(new_group, existing_urefs); + + // check we can write to the host buffer + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + // create CLValue for return value + let new_urefs_value = CLValue::from_t(new_urefs)?; + let value_size = new_urefs_value.inner_bytes().len(); + // write return value to buffer + if let Err(err) = self.write_host_buffer(new_urefs_value) { + return Ok(Err(err)); + } + // Write return value size to output location + let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(output_size_ptr, &output_size_bytes) + { + return Err(ExecError::Interpreter(error.into())); + } + + // Write updated package to the global state + self.context.metered_write_gs_unsafe( + ContractPackageHash::new(contract_package_hash.value()), + contract_package, + )?; + + Ok(Ok(())) + } + + fn create_contract_user_group( + &mut self, + contract_package_hash: PackageHash, + label: String, + num_new_urefs: u32, + mut existing_urefs: BTreeSet, + output_size_ptr: u32, + ) -> Result, ExecError> { + if !self.context.engine_config().enable_entity { + return self.create_contract_user_group_by_contract_package( + contract_package_hash, + label, + num_new_urefs, + existing_urefs, + output_size_ptr, + ); + }; + + let mut contract_package: Package = + self.context.get_validated_package(contract_package_hash)?; + + let groups = contract_package.groups_mut(); + let new_group = Group::new(label); + + // Ensure group does not already exist + if groups.contains(&new_group) { + return Ok(Err(addressable_entity::Error::GroupAlreadyExists.into())); + } + + // Ensure there are not too many groups + if groups.len() >= (addressable_entity::MAX_GROUPS as usize) { + return Ok(Err(addressable_entity::Error::MaxGroupsExceeded.into())); + } + + // Ensure there are not too many urefs + let total_urefs: usize = + groups.total_urefs() + (num_new_urefs as usize) + existing_urefs.len(); + if total_urefs > addressable_entity::MAX_TOTAL_UREFS { + let err = addressable_entity::Error::MaxTotalURefsExceeded; + return Ok(Err(ApiError::ContractHeader(err as u8))); + } + + // Proceed with creating user group + let mut new_urefs = Vec::with_capacity(num_new_urefs as usize); + for _ in 0..num_new_urefs { + let u = self.context.new_unit_uref()?; + new_urefs.push(u); + } + + for u in new_urefs.iter().cloned() { + existing_urefs.insert(u); + } + groups.insert(new_group, existing_urefs); + + // check we can write to the host buffer + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + // create CLValue for return value + let new_urefs_value = CLValue::from_t(new_urefs)?; + let value_size = new_urefs_value.inner_bytes().len(); + // write return value to buffer + if let Err(err) = self.write_host_buffer(new_urefs_value) { + return Ok(Err(err)); + } + // Write return value size to output location + let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(output_size_ptr, &output_size_bytes) + { + return Err(ExecError::Interpreter(error.into())); + } + + // Write updated package to the global state + self.context + .metered_write_gs_unsafe(contract_package_hash, contract_package)?; + + Ok(Ok(())) + } + + #[allow(clippy::too_many_arguments)] + fn add_contract_version( + &mut self, + package_hash: PackageHash, + version_ptr: u32, + entry_points: EntryPoints, + named_keys: NamedKeys, + message_topics: BTreeMap, + output_ptr: u32, + ) -> Result, ExecError> { + if self.context.engine_config().enable_entity { + self.add_contract_version_by_package( + package_hash, + version_ptr, + entry_points, + named_keys, + message_topics, + output_ptr, + ) + } else { + self.add_contract_version_by_contract_package( + package_hash.value(), + version_ptr, + entry_points, + named_keys, + message_topics, + output_ptr, + ) + } + } + + #[allow(clippy::too_many_arguments)] + fn add_contract_version_by_contract_package( + &mut self, + contract_package_hash: HashAddr, + version_ptr: u32, + entry_points: EntryPoints, + mut named_keys: NamedKeys, + message_topics: BTreeMap, + output_ptr: u32, + ) -> Result, ExecError> { + if !self.context.install_upgrade_allowed() { + // NOTE: This is not a permission check on the caller, + // it is enforcing the rule that only legacy standard deploys (which are grandfathered) + // and install / upgrade transactions are allowed to call this method + return Ok(Err(ApiError::NotAllowedToAddContractVersion)); + } + + // if entry_points.contains_stored_session() { + // // As of 2.0 we do not allow stored session logic to be + // // installed or upgraded. Pre-existing stored + // // session logic is still callable. + // return Err(ExecError::InvalidEntryPointType); + // } + + self.context + .validate_key(&Key::Hash(contract_package_hash))?; + + let mut contract_package: ContractPackage = self + .context + .get_validated_contract_package(contract_package_hash)?; + + let version = contract_package.current_contract_version(); + + // Return an error if the contract is locked and has some version associated with it. + if contract_package.is_locked() && version.is_some() { + return Err(ExecError::LockedEntity(PackageHash::new( + contract_package_hash, + ))); + } + + for (_, key) in named_keys.iter() { + self.context.validate_key(key)? + } + + let contract_wasm_hash = self.context.new_hash_address()?; + let contract_wasm = { + let module_bytes = self.get_module_from_entry_points(&entry_points)?; + ContractWasm::new(module_bytes) + }; + + let contract_hash_addr: HashAddr = self.context.new_hash_address()?; + let contract_entity_addr = EntityAddr::SmartContract(contract_hash_addr); + + let protocol_version = self.context.protocol_version(); + let major = protocol_version.value().major; + + let maybe_previous_hash = + if let Some(previous_contract_hash) = contract_package.current_contract_hash() { + let previous_contract: Contract = + self.context.read_gs_typed(&previous_contract_hash.into())?; + + let previous_named_keys = previous_contract.take_named_keys(); + named_keys.append(previous_named_keys); + Some(EntityAddr::SmartContract(previous_contract_hash.value())) + } else { + None + }; + + if let Err(err) = self.carry_forward_message_topics( + maybe_previous_hash, + contract_entity_addr, + message_topics, + )? { + return Ok(Err(err)); + }; + + let contract_package_hash = ContractPackageHash::new(contract_package_hash); + let contract = Contract::new( + contract_package_hash, + contract_wasm_hash.into(), + named_keys, + entry_points.into(), + protocol_version, + ); + + let insert_contract_result = + contract_package.insert_contract_version(major, contract_hash_addr.into()); + + self.context + .metered_write_gs_unsafe(Key::Hash(contract_wasm_hash), contract_wasm)?; + self.context + .metered_write_gs_unsafe(Key::Hash(contract_hash_addr), contract)?; + self.context + .metered_write_gs_unsafe(Key::Hash(contract_package_hash.value()), contract_package)?; + + // set return values to buffer + { + let hash_bytes = match contract_hash_addr.to_bytes() { + Ok(bytes) => bytes, + Err(error) => return Ok(Err(error.into())), + }; + + // Set serialized hash bytes into the output buffer + if let Err(error) = self.try_get_memory()?.set(output_ptr, &hash_bytes) { + return Err(ExecError::Interpreter(error.into())); + } + + // Set version into VM shared memory + let version_value: u32 = insert_contract_result.contract_version(); + let version_bytes = version_value.to_le_bytes(); + if let Err(error) = self.try_get_memory()?.set(version_ptr, &version_bytes) { + return Err(ExecError::Interpreter(error.into())); + } + } + + Ok(Ok(())) + } + + #[allow(clippy::too_many_arguments)] + fn add_contract_version_by_package( + &mut self, + package_hash: PackageHash, + version_ptr: u32, + entry_points: EntryPoints, + mut named_keys: NamedKeys, + message_topics: BTreeMap, + output_ptr: u32, + ) -> Result, ExecError> { + if !self.context.install_upgrade_allowed() { + // NOTE: This is not a permission check on the caller, + // it is enforcing the rule that only legacy standard deploys (which are grandfathered) + // and install / upgrade transactions are allowed to call this method + return Ok(Err(ApiError::NotAllowedToAddContractVersion)); + } + + if entry_points.contains_stored_session() { + // As of 2.0 we do not allow stored session logic to be + // installed or upgraded. Pre-existing stored + // session logic is still callable. + return Err(ExecError::InvalidEntryPointType); + } + + let mut package = self.context.get_package(package_hash.value())?; + + // Return an error if the contract is locked and has some version associated with it. + if package.is_locked() { + return Err(ExecError::LockedEntity(package_hash)); + } + + let ( + main_purse, + previous_named_keys, + action_thresholds, + associated_keys, + previous_hash_addr, + ) = self.new_version_entity_parts(&package)?; + + // We generate the byte code hash because a byte code record + // must exist for a contract record to exist. + let byte_code_hash = self.context.new_hash_address()?; + + let hash_addr = self.context.new_hash_address()?; + let entity_addr = EntityAddr::SmartContract(hash_addr); + + if let Err(err) = + self.carry_forward_message_topics(previous_hash_addr, entity_addr, message_topics)? + { + return Ok(Err(err)); + }; + + let protocol_version = self.context.protocol_version(); + + let insert_entity_version_result = + package.insert_entity_version(protocol_version.value().major, entity_addr); + + let byte_code = { + let module_bytes = self.get_module_from_entry_points(&entry_points)?; + ByteCode::new(ByteCodeKind::V1CasperWasm, module_bytes) + }; + + self.context.metered_write_gs_unsafe( + Key::ByteCode(ByteCodeAddr::new_wasm_addr(byte_code_hash)), + byte_code, + )?; + + let entity_addr = EntityAddr::new_smart_contract(hash_addr); + + { + // DO NOT EXTRACT INTO SEPARATE FUNCTION. + for (_, key) in named_keys.iter() { + // Validate all the imputed named keys + // against the installers permissions + self.context.validate_key(key)?; + } + // Carry forward named keys from previous version + // Grant all the imputed named keys + previous named keys. + named_keys.append(previous_named_keys); + for (name, key) in named_keys.iter() { + let named_key_value = + StoredValue::NamedKey(NamedKeyValue::from_concrete_values(*key, name.clone())?); + let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, name.clone())?; + self.context + .metered_write_gs_unsafe(Key::NamedKey(named_key_addr), named_key_value)?; + } + self.context.write_entry_points(entity_addr, entry_points)?; + } + + let entity = AddressableEntity::new( + package_hash, + byte_code_hash.into(), + protocol_version, + main_purse, + associated_keys, + action_thresholds, + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ); + let entity_key = Key::AddressableEntity(entity_addr); + self.context.metered_write_gs_unsafe(entity_key, entity)?; + self.context + .metered_write_gs_unsafe(package_hash, package)?; + + // set return values to buffer + { + let hash_bytes = match hash_addr.to_bytes() { + Ok(bytes) => bytes, + Err(error) => return Ok(Err(error.into())), + }; + + // Set serialized hash bytes into the output buffer + if let Err(error) = self.try_get_memory()?.set(output_ptr, &hash_bytes) { + return Err(ExecError::Interpreter(error.into())); + } + + // Set version into VM shared memory + let version_value: u32 = insert_entity_version_result.entity_version(); + let version_bytes = version_value.to_le_bytes(); + if let Err(error) = self.try_get_memory()?.set(version_ptr, &version_bytes) { + return Err(ExecError::Interpreter(error.into())); + } + } + + Ok(Ok(())) + } + + fn carry_forward_message_topics( + &mut self, + previous_entity_addr: Option, + entity_addr: EntityAddr, + message_topics: BTreeMap, + ) -> Result, ExecError> { + let mut previous_message_topics = match previous_entity_addr { + Some(previous_hash) => self.context.get_message_topics(previous_hash)?, + None => MessageTopics::default(), + }; + + let max_topics_per_contract = self + .context + .engine_config() + .wasm_config() + .messages_limits() + .max_topics_per_contract(); + + let topics_to_add = message_topics + .iter() + .filter(|(_, operation)| match operation { + MessageTopicOperation::Add => true, + }); + // Check if registering the new topics would exceed the limit per contract + if previous_message_topics.len() + topics_to_add.clone().count() + > max_topics_per_contract as usize + { + return Ok(Err(ApiError::from(MessageTopicError::MaxTopicsExceeded))); + } + + // Extend the previous topics with the newly added ones. + for (new_topic, _) in topics_to_add { + let topic_name_hash = cryptography::blake2b(new_topic.as_bytes()).into(); + if let Err(e) = previous_message_topics.add_topic(new_topic.as_str(), topic_name_hash) { + return Ok(Err(e.into())); + } + } + + for (topic_name, topic_hash) in previous_message_topics.iter() { + let topic_key = Key::message_topic(entity_addr, *topic_hash); + let block_time = self.context.get_block_info().block_time(); + let summary = StoredValue::MessageTopic(MessageTopicSummary::new( + 0, + block_time, + topic_name.clone(), + )); + self.context.metered_write_gs_unsafe(topic_key, summary)?; + } + Ok(Ok(())) + } + + fn new_version_entity_parts( + &mut self, + package: &Package, + ) -> Result< + ( + URef, + NamedKeys, + ActionThresholds, + AssociatedKeys, + Option, + ), + ExecError, + > { + if let Some(previous_entity_hash) = package.current_entity_hash() { + let previous_entity_key = Key::AddressableEntity(previous_entity_hash); + let (mut previous_entity, requires_purse_creation) = + self.context.get_contract_entity(previous_entity_key)?; + + let action_thresholds = previous_entity.action_thresholds().clone(); + + let associated_keys = previous_entity.associated_keys().clone(); + // STEP 1: LOAD THE CONTRACT AND CHECK IF CALLER IS IN ASSOCIATED KEYS WITH ENOUGH + // WEIGHT TO UPGRADE (COMPARE TO THE ACTION THRESHOLD FOR UPGRADE + // ACTION). STEP 2: IF CALLER IS NOT IN CONTRACTS ASSOCIATED KEYS + // CHECK FOR LEGACY UREFADDR UNDER KEY:HASH(PACKAGEADDR) + // IF FOUND, + // call validate_uref(that uref) + // IF VALID, + // create the new contract version carrying forward previous state including + // associated keys BUT add the caller to the associated keys with + // weight == to the action threshold for upgrade ELSE, error + if !previous_entity.can_upgrade_with(self.context.authorization_keys()) { + // Check if the calling entity must be grandfathered into the new + // addressable entity format + let account_hash = self.context.get_initiator(); + + let access_key = match self + .context + .read_gs(&Key::Hash(previous_entity.package_hash().value()))? + { + Some(StoredValue::ContractPackage(contract_package)) => { + contract_package.access_key() + } + Some(StoredValue::CLValue(cl_value)) => { + let (_key, uref) = cl_value + .into_t::<(Key, URef)>() + .map_err(ExecError::CLValue)?; + uref + } + Some(_other) => return Err(ExecError::UnexpectedStoredValueVariant), + None => { + return Err(ExecError::UpgradeAuthorizationFailure); + } + }; + + let has_access = self.context.validate_uref(&access_key).is_ok(); + + if has_access && !associated_keys.contains_key(&account_hash) { + previous_entity.add_associated_key( + account_hash, + *action_thresholds.upgrade_management(), + )?; + } else { + return Err(ExecError::UpgradeAuthorizationFailure); + } + } + + let main_purse = if requires_purse_creation { + self.create_purse()? + } else { + previous_entity.main_purse() + }; + + let associated_keys = previous_entity.associated_keys().clone(); + + let previous_named_keys = self.context.get_named_keys(previous_entity_key)?; + + return Ok(( + main_purse, + previous_named_keys, + action_thresholds, + associated_keys, + Some(previous_entity_hash), + )); + } + + Ok(( + self.create_purse()?, + NamedKeys::new(), + ActionThresholds::default(), + AssociatedKeys::new(self.context.get_initiator(), Weight::new(1)), + None, + )) + } + + fn disable_contract_version( + &mut self, + contract_package_hash: PackageHash, + contract_hash: AddressableEntityHash, + ) -> Result, ExecError> { + if self.context.engine_config().enable_entity { + let contract_package_key = Key::SmartContract(contract_package_hash.value()); + self.context.validate_key(&contract_package_key)?; + + let mut contract_package: Package = + self.context.get_validated_package(contract_package_hash)?; + + if contract_package.is_locked() { + return Err(ExecError::LockedEntity(contract_package_hash)); + } + + if let Err(err) = contract_package + .disable_entity_version(EntityAddr::SmartContract(contract_hash.value())) + { + return Ok(Err(err.into())); + } + + self.context + .metered_write_gs_unsafe(contract_package_key, contract_package)?; + } else { + let contract_package_key = Key::Hash(contract_package_hash.value()); + self.context.validate_key(&contract_package_key)?; + + let mut contract_package: ContractPackage = self + .context + .get_validated_contract_package(contract_package_hash.value())?; + + if contract_package.is_locked() { + return Err(ExecError::LockedEntity(PackageHash::new( + contract_package_hash.value(), + ))); + } + let contract_hash = ContractHash::new(contract_hash.value()); + + if let Err(err) = contract_package.disable_contract_version(contract_hash) { + return Ok(Err(err.into())); + } + + self.context + .metered_write_gs_unsafe(contract_package_key, contract_package)?; + } + + Ok(Ok(())) + } + + fn enable_contract_version( + &mut self, + contract_package_hash: PackageHash, + contract_hash: AddressableEntityHash, + ) -> Result, ExecError> { + if self.context.engine_config().enable_entity { + let contract_package_key = Key::SmartContract(contract_package_hash.value()); + self.context.validate_key(&contract_package_key)?; + + let mut contract_package: Package = + self.context.get_validated_package(contract_package_hash)?; + + if contract_package.is_locked() { + return Err(ExecError::LockedEntity(contract_package_hash)); + } + + if let Err(err) = + contract_package.enable_version(EntityAddr::SmartContract(contract_hash.value())) + { + return Ok(Err(err.into())); + } + + self.context + .metered_write_gs_unsafe(contract_package_key, contract_package)?; + } else { + let contract_package_key = Key::Hash(contract_package_hash.value()); + self.context.validate_key(&contract_package_key)?; + + let mut contract_package: ContractPackage = self + .context + .get_validated_contract_package(contract_package_hash.value())?; + + if contract_package.is_locked() { + return Err(ExecError::LockedEntity(PackageHash::new( + contract_package_hash.value(), + ))); + } + let contract_hash = ContractHash::new(contract_hash.value()); + + if let Err(err) = contract_package.enable_contract_version(contract_hash) { + return Ok(Err(err.into())); + } + + self.context + .metered_write_gs_unsafe(contract_package_key, contract_package)?; + } + + Ok(Ok(())) + } + + /// Writes function address (`hash_bytes`) into the Wasm memory (at + /// `dest_ptr` pointer). + fn function_address(&mut self, hash_bytes: [u8; 32], dest_ptr: u32) -> Result<(), Trap> { + self.try_get_memory()? + .set(dest_ptr, &hash_bytes) + .map_err(|e| ExecError::Interpreter(e.into()).into()) + } + + /// Generates new unforgeable reference and adds it to the context's + /// access_rights set. + fn new_uref(&mut self, uref_ptr: u32, value_ptr: u32, value_size: u32) -> Result<(), Trap> { + let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; // read initial value from memory + let uref = self.context.new_uref(StoredValue::CLValue(cl_value))?; + self.try_get_memory()? + .set(uref_ptr, &uref.into_bytes().map_err(ExecError::BytesRepr)?) + .map_err(|e| ExecError::Interpreter(e.into()).into()) + } + + /// Writes `value` under `key` in GlobalState. + fn write( + &mut self, + key_ptr: u32, + key_size: u32, + value_ptr: u32, + value_size: u32, + ) -> Result<(), Trap> { + let key = self.key_from_mem(key_ptr, key_size)?; + let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; + self.context + .metered_write_gs(key, cl_value) + .map_err(Into::into) + } + + /// Records a transfer. + fn record_transfer( + &mut self, + maybe_to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result<(), ExecError> { + if self.context.get_context_key() != self.context.get_system_entity_key(MINT)? { + return Err(ExecError::InvalidContext); + } + + if self.context.phase() != Phase::Session { + return Ok(()); + } + + let txn_hash = self.context.get_transaction_hash(); + let from = InitiatorAddr::AccountHash(self.context.get_initiator()); + let fee = Gas::from( + self.context + .engine_config() + .system_config() + .mint_costs() + .transfer, + ); + let transfer = Transfer::V2(TransferV2::new( + txn_hash, from, maybe_to, source, target, amount, fee, id, + )); + self.context.transfers_mut().push(transfer); + Ok(()) + } + + /// Records given auction info at a given era id + fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), ExecError> { + if self.context.get_initiator() != PublicKey::System.to_account_hash() { + return Err(ExecError::InvalidContext); + } + + if self.context.get_context_key() != self.context.get_system_entity_key(AUCTION)? { + return Err(ExecError::InvalidContext); + } + + if self.context.phase() != Phase::Session { + return Ok(()); + } + + self.context.write_era_info(Key::EraSummary, era_info); + + Ok(()) + } + + /// Adds `value` to the cell that `key` points at. + fn add( + &mut self, + key_ptr: u32, + key_size: u32, + value_ptr: u32, + value_size: u32, + ) -> Result<(), Trap> { + let key = self.key_from_mem(key_ptr, key_size)?; + let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; + self.context + .metered_add_gs(key, cl_value) + .map_err(Into::into) + } + + /// Reads value from the GS living under key specified by `key_ptr` and + /// `key_size`. Wasm and host communicate through memory that Wasm + /// module exports. If contract wants to pass data to the host, it has + /// to tell it [the host] where this data lives in the exported memory + /// (pass its pointer and length). + fn read( + &mut self, + key_ptr: u32, + key_size: u32, + output_size_ptr: u32, + ) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + + let key = self.key_from_mem(key_ptr, key_size)?; + let cl_value = match self.context.read_gs(&key)? { + Some(stored_value) => { + CLValue::try_from(stored_value).map_err(ExecError::TypeMismatch)? + } + None => return Ok(Err(ApiError::ValueNotFound)), + }; + + let value_size: u32 = match cl_value.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::BufferTooSmall)), + }; + + if let Err(error) = self.write_host_buffer(cl_value) { + return Ok(Err(error)); + } + + let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self.try_get_memory()?.set(output_size_ptr, &value_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + /// Reverts contract execution with a status specified. + fn revert(&mut self, status: u32) -> Trap { + ExecError::Revert(status.into()).into() + } + + /// Checks if a caller can manage its own associated keys and thresholds. + /// + /// On some private chains with administrator keys configured this requires that the caller is + /// an admin to be able to manage its own keys. If the caller is not an administrator then the + /// deploy has to be signed by an administrator. + fn can_manage_keys(&self) -> bool { + if self + .context + .engine_config() + .administrative_accounts() + .is_empty() + { + // Public chain + return self + .context + .runtime_footprint() + .borrow() + .can_manage_keys_with(self.context.authorization_keys()); + } + + if self + .context + .engine_config() + .is_administrator(&self.context.get_initiator()) + { + return true; + } + + // If caller is not an admin, check if deploy was co-signed by admin account. + self.context.is_authorized_by_admin() + } + + fn add_associated_key( + &mut self, + account_hash_ptr: u32, + account_hash_size: usize, + weight_value: u8, + ) -> Result { + let account_hash = { + // Account hash as serialized bytes + let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; + // Account hash deserialized + let source: AccountHash = bytesrepr::deserialize_from_slice(source_serialized) + .map_err(ExecError::BytesRepr)?; + source + }; + let weight = Weight::new(weight_value); + + if !self.can_manage_keys() { + return Ok(AddKeyFailure::PermissionDenied as i32); + } + + match self.context.add_associated_key(account_hash, weight) { + Ok(_) => Ok(0), + // This relies on the fact that `AddKeyFailure` is represented as + // i32 and first variant start with number `1`, so all other variants + // are greater than the first one, so it's safe to assume `0` is success, + // and any error is greater than 0. + Err(ExecError::AddKeyFailure(e)) => Ok(e as i32), + // Any other variant just pass as `Trap` + Err(e) => Err(e.into()), + } + } + + fn remove_associated_key( + &mut self, + account_hash_ptr: u32, + account_hash_size: usize, + ) -> Result { + let account_hash = { + // Account hash as serialized bytes + let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; + // Account hash deserialized + let source: AccountHash = bytesrepr::deserialize_from_slice(source_serialized) + .map_err(ExecError::BytesRepr)?; + source + }; + + if !self.can_manage_keys() { + return Ok(RemoveKeyFailure::PermissionDenied as i32); + } + + match self.context.remove_associated_key(account_hash) { + Ok(_) => Ok(0), + Err(ExecError::RemoveKeyFailure(e)) => Ok(e as i32), + Err(e) => Err(e.into()), + } + } + + fn update_associated_key( + &mut self, + account_hash_ptr: u32, + account_hash_size: usize, + weight_value: u8, + ) -> Result { + let account_hash = { + // Account hash as serialized bytes + let source_serialized = self.bytes_from_mem(account_hash_ptr, account_hash_size)?; + // Account hash deserialized + let source: AccountHash = bytesrepr::deserialize_from_slice(source_serialized) + .map_err(ExecError::BytesRepr)?; + source + }; + let weight = Weight::new(weight_value); + + if !self.can_manage_keys() { + return Ok(UpdateKeyFailure::PermissionDenied as i32); + } + + match self.context.update_associated_key(account_hash, weight) { + Ok(_) => Ok(0), + // This relies on the fact that `UpdateKeyFailure` is represented as + // i32 and first variant start with number `1`, so all other variants + // are greater than the first one, so it's safe to assume `0` is success, + // and any error is greater than 0. + Err(ExecError::UpdateKeyFailure(e)) => Ok(e as i32), + // Any other variant just pass as `Trap` + Err(e) => Err(e.into()), + } + } + + fn set_action_threshold( + &mut self, + action_type_value: u32, + threshold_value: u8, + ) -> Result { + if !self.can_manage_keys() { + return Ok(SetThresholdFailure::PermissionDeniedError as i32); + } + + match ActionType::try_from(action_type_value) { + Ok(action_type) => { + let threshold = Weight::new(threshold_value); + match self.context.set_action_threshold(action_type, threshold) { + Ok(_) => Ok(0), + Err(ExecError::SetThresholdFailure(e)) => Ok(e as i32), + Err(error) => Err(error.into()), + } + } + Err(_) => Err(Trap::Code(TrapCode::Unreachable)), + } + } + + /// Looks up the public mint contract key in the context's protocol data. + /// + /// Returned URef is already attenuated depending on the calling account. + fn get_mint_hash(&self) -> Result { + self.context.get_system_contract(MINT) + } + + /// Looks up the public handle payment contract key in the context's protocol data. + /// + /// Returned URef is already attenuated depending on the calling account. + fn get_handle_payment_hash(&self) -> Result { + self.context.get_system_contract(HANDLE_PAYMENT) + } + + /// Looks up the public standard payment contract key in the context's protocol data. + /// + /// Returned URef is already attenuated depending on the calling account. + fn get_standard_payment_hash(&self) -> Result { + self.context.get_system_contract(STANDARD_PAYMENT) + } + + /// Looks up the public auction contract key in the context's protocol data. + /// + /// Returned URef is already attenuated depending on the calling account. + fn get_auction_hash(&self) -> Result { + self.context.get_system_contract(AUCTION) + } + + /// Calls the `read_base_round_reward` method on the mint contract at the given mint + /// contract key + fn mint_read_base_round_reward( + &mut self, + mint_contract_hash: AddressableEntityHash, + ) -> Result { + let gas_counter = self.gas_counter(); + let call_result = self.call_contract( + mint_contract_hash, + mint::METHOD_READ_BASE_ROUND_REWARD, + RuntimeArgs::default(), + ); + self.set_gas_counter(gas_counter); + + let reward = call_result?.into_t()?; + Ok(reward) + } + + /// Calls the `mint` method on the mint contract at the given mint + /// contract key + fn mint_mint( + &mut self, + mint_contract_hash: AddressableEntityHash, + amount: U512, + ) -> Result { + let gas_counter = self.gas_counter(); + let runtime_args = { + let mut runtime_args = RuntimeArgs::new(); + runtime_args.insert(mint::ARG_AMOUNT, amount)?; + runtime_args + }; + let call_result = self.call_contract(mint_contract_hash, mint::METHOD_MINT, runtime_args); + self.set_gas_counter(gas_counter); + + let result: Result = call_result?.into_t()?; + Ok(result.map_err(system::Error::from)?) + } + + /// Calls the `reduce_total_supply` method on the mint contract at the given mint + /// contract key + fn mint_reduce_total_supply( + &mut self, + mint_contract_hash: AddressableEntityHash, + amount: U512, + ) -> Result<(), ExecError> { + let gas_counter = self.gas_counter(); + let runtime_args = { + let mut runtime_args = RuntimeArgs::new(); + runtime_args.insert(mint::ARG_AMOUNT, amount)?; + runtime_args + }; + let call_result = self.call_contract( + mint_contract_hash, + mint::METHOD_REDUCE_TOTAL_SUPPLY, + runtime_args, + ); + self.set_gas_counter(gas_counter); + + let result: Result<(), mint::Error> = call_result?.into_t()?; + Ok(result.map_err(system::Error::from)?) + } + + /// Calls the "create" method on the mint contract at the given mint + /// contract key + fn mint_create( + &mut self, + mint_contract_hash: AddressableEntityHash, + ) -> Result { + let result = + self.call_contract(mint_contract_hash, mint::METHOD_CREATE, RuntimeArgs::new()); + let purse = result?.into_t()?; + Ok(purse) + } + + fn create_purse(&mut self) -> Result { + let _scoped_host_function_flag = self.host_function_flag.enter_host_function_scope(); + self.mint_create(self.get_mint_hash()?) + } + + /// Calls the "transfer" method on the mint contract at the given mint + /// contract key + fn mint_transfer( + &mut self, + mint_contract_hash: AddressableEntityHash, + to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result, ExecError> { + self.context.validate_uref(&source)?; + + let args_values = { + let mut runtime_args = RuntimeArgs::new(); + runtime_args.insert(mint::ARG_TO, to)?; + runtime_args.insert(mint::ARG_SOURCE, source)?; + runtime_args.insert(mint::ARG_TARGET, target)?; + runtime_args.insert(mint::ARG_AMOUNT, amount)?; + runtime_args.insert(mint::ARG_ID, id)?; + runtime_args + }; + + let gas_counter = self.gas_counter(); + let call_result = + self.call_contract(mint_contract_hash, mint::METHOD_TRANSFER, args_values); + self.set_gas_counter(gas_counter); + + Ok(call_result?.into_t()?) + } + + /// Creates a new account at `target` hash, transferring `amount` + /// of motes from `source` purse to the new account's main purse. + fn transfer_to_new_account( + &mut self, + source: URef, + target: AccountHash, + amount: U512, + id: Option, + ) -> Result { + let mint_contract_hash = self.get_mint_hash()?; + + let allow_unrestricted_transfers = + self.context.engine_config().allow_unrestricted_transfers(); + + if !allow_unrestricted_transfers + && self.context.get_initiator() != PublicKey::System.to_account_hash() + && !self + .context + .engine_config() + .is_administrator(&self.context.get_initiator()) + && !self.context.engine_config().is_administrator(&target) + { + return Err(ExecError::DisabledUnrestrictedTransfers); + } + + // A precondition check that verifies that the transfer can be done + // as the source purse has enough funds to cover the transfer. + if amount > self.available_balance(source)?.unwrap_or_default() { + return Ok(Err(mint::Error::InsufficientFunds.into())); + } + + let target_purse = self.mint_create(mint_contract_hash)?; + + if source == target_purse { + return Ok(Err(mint::Error::EqualSourceAndTarget.into())); + } + + let result = self.mint_transfer( + mint_contract_hash, + Some(target), + source, + target_purse.with_access_rights(AccessRights::ADD), + amount, + id, + ); + + // We granted a temporary access rights bit to newly created main purse as part of + // `mint_create` call, and we need to remove it to avoid leakage of access rights. + + self.context + .remove_access(target_purse.addr(), target_purse.access_rights()); + + match result? { + Ok(()) => { + let main_purse = target_purse; + if !self.context.engine_config().enable_entity { + let account = Account::create(target, NamedKeys::new(), target_purse); + self.context.metered_write_gs_unsafe( + Key::Account(target), + StoredValue::Account(account), + )?; + return Ok(Ok(TransferredTo::NewAccount)); + } + + let protocol_version = self.context.protocol_version(); + let byte_code_hash = ByteCodeHash::default(); + let entity_hash = AddressableEntityHash::new(target.value()); + let package_hash = PackageHash::new(self.context.new_hash_address()?); + + let associated_keys = AssociatedKeys::new(target, Weight::new(1)); + + let entity = AddressableEntity::new( + package_hash, + byte_code_hash, + protocol_version, + main_purse, + associated_keys, + ActionThresholds::default(), + EntityKind::Account(target), + ); + + let package = { + let mut package = Package::new( + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::Locked, + ); + package.insert_entity_version( + protocol_version.value().major, + EntityAddr::Account(target.value()), + ); + package + }; + + let entity_key: Key = entity.entity_key(entity_hash); + + self.context + .metered_write_gs_unsafe(entity_key, StoredValue::AddressableEntity(entity))?; + + let contract_package_key: Key = package_hash.into(); + + self.context.metered_write_gs_unsafe( + contract_package_key, + StoredValue::SmartContract(package), + )?; + + let contract_by_account = CLValue::from_t(entity_key)?; + + let target_key = Key::Account(target); + + self.context.metered_write_gs_unsafe( + target_key, + StoredValue::CLValue(contract_by_account), + )?; + + Ok(Ok(TransferredTo::NewAccount)) + } + Err(mint_error) => Ok(Err(mint_error.into())), + } + } + + /// Transferring a given amount of motes from the given source purse to the + /// new account's purse. Requires that the [`URef`]s have already + /// been created by the mint contract (or are the genesis account's). + fn transfer_to_existing_account( + &mut self, + to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result { + let mint_contract_key = self.get_mint_hash()?; + + match self.mint_transfer(mint_contract_key, to, source, target, amount, id)? { + Ok(()) => Ok(Ok(TransferredTo::ExistingAccount)), + Err(error) => Ok(Err(error.into())), + } + } + + /// Transfers `amount` of motes from default purse of the account to + /// `target` account. If that account does not exist, creates one. + fn transfer_to_account( + &mut self, + target: AccountHash, + amount: U512, + id: Option, + ) -> Result { + let source = self.context.get_main_purse()?; + self.transfer_from_purse_to_account_hash(source, target, amount, id) + } + + /// Transfers `amount` of motes from `source` purse to `target` account's main purse. + /// If that account does not exist, creates one. + fn transfer_from_purse_to_account_hash( + &mut self, + source: URef, + target: AccountHash, + amount: U512, + id: Option, + ) -> Result { + let _scoped_host_function_flag = self.host_function_flag.enter_host_function_scope(); + let target_key = Key::Account(target); + + // Look up the account at the given key + match self.context.read_gs(&target_key)? { + None => { + // If no account exists, create a new account and transfer the amount to its + // main purse. + + self.transfer_to_new_account(source, target, amount, id) + } + Some(StoredValue::CLValue(entity_key_value)) => { + // Attenuate the target main purse + let entity_key = CLValue::into_t::(entity_key_value)?; + let target_uref = if let Some(StoredValue::AddressableEntity(entity)) = + self.context.read_gs(&entity_key)? + { + entity.main_purse_add_only() + } else { + let contract_hash = if let Some(entity_hash) = entity_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + { + entity_hash + } else { + return Err(ExecError::UnexpectedKeyVariant(entity_key)); + }; + return Err(ExecError::InvalidEntity(contract_hash)); + }; + + if source.with_access_rights(AccessRights::ADD) == target_uref { + return Ok(Ok(TransferredTo::ExistingAccount)); + } + + // Upsert ADD access to caller on target allowing deposit of motes; this will be + // revoked after the transfer is completed if caller did not already have ADD access + let granted_access = self.context.grant_access(target_uref); + + // If an account exists, transfer the amount to its purse + let transfer_result = self.transfer_to_existing_account( + Some(target), + source, + target_uref, + amount, + id, + ); + + // Remove from caller temporarily granted ADD access on target. + if let GrantedAccess::Granted { + uref_addr, + newly_granted_access_rights, + } = granted_access + { + self.context + .remove_access(uref_addr, newly_granted_access_rights) + } + transfer_result + } + Some(StoredValue::Account(account)) => { + self.transfer_from_purse_to_account(source, &account, amount, id) + } + Some(_) => { + // If some other value exists, return an error + Err(ExecError::AccountNotFound(target_key)) + } + } + } + + fn transfer_from_purse_to_account( + &mut self, + source: URef, + target_account: &Account, + amount: U512, + id: Option, + ) -> Result { + // Attenuate the target main purse + let target_uref = target_account.main_purse_add_only(); + + if source.with_access_rights(AccessRights::ADD) == target_uref { + return Ok(Ok(TransferredTo::ExistingAccount)); + } + + // Grant ADD access to caller on target allowing deposit of motes; this will be + // revoked after the transfer is completed if caller did not already have ADD access + let granted_access = self.context.grant_access(target_uref); + + // If an account exists, transfer the amount to its purse + let transfer_result = self.transfer_to_existing_account( + Some(target_account.account_hash()), + source, + target_uref, + amount, + id, + ); + + // Remove from caller temporarily granted ADD access on target. + if let GrantedAccess::Granted { + uref_addr, + newly_granted_access_rights, + } = granted_access + { + self.context + .remove_access(uref_addr, newly_granted_access_rights) + } + transfer_result + } + + /// Transfers `amount` of motes from `source` purse to `target` purse. + fn transfer_from_purse_to_purse( + &mut self, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result, ExecError> { + self.context.validate_uref(&source)?; + let mint_contract_key = self.get_mint_hash()?; + match self.mint_transfer(mint_contract_key, None, source, target, amount, id)? { + Ok(()) => Ok(Ok(())), + Err(mint_error) => Ok(Err(mint_error)), + } + } + + fn total_balance(&mut self, purse: URef) -> Result { + match self.context.total_balance(&purse) { + Ok(motes) => Ok(motes.value()), + Err(err) => Err(err), + } + } + + fn available_balance(&mut self, purse: URef) -> Result, ExecError> { + match self.context.available_balance(&purse) { + Ok(motes) => Ok(Some(motes.value())), + Err(err) => Err(err), + } + } + + fn get_balance_host_buffer( + &mut self, + purse_ptr: u32, + purse_size: usize, + output_size_ptr: u32, + ) -> Result, ExecError> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + + let purse: URef = { + let bytes = self.bytes_from_mem(purse_ptr, purse_size)?; + match bytesrepr::deserialize_from_slice(bytes) { + Ok(purse) => purse, + Err(error) => return Ok(Err(error.into())), + } + }; + + let balance = match self.available_balance(purse)? { + Some(balance) => balance, + None => return Ok(Err(ApiError::InvalidPurse)), + }; + + let balance_cl_value = match CLValue::from_t(balance) { + Ok(cl_value) => cl_value, + Err(error) => return Ok(Err(error.into())), + }; + + let balance_size = balance_cl_value.inner_bytes().len() as i32; + if let Err(error) = self.write_host_buffer(balance_cl_value) { + return Ok(Err(error)); + } + + let balance_size_bytes = balance_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(output_size_ptr, &balance_size_bytes) + { + return Err(ExecError::Interpreter(error.into())); + } + + Ok(Ok(())) + } + + fn get_system_contract( + &mut self, + system_contract_index: u32, + dest_ptr: u32, + _dest_size: u32, + ) -> Result, Trap> { + let hash: AddressableEntityHash = match SystemEntityType::try_from(system_contract_index) { + Ok(SystemEntityType::Mint) => self.get_mint_hash()?, + Ok(SystemEntityType::HandlePayment) => self.get_handle_payment_hash()?, + Ok(SystemEntityType::StandardPayment) => self.get_standard_payment_hash()?, + Ok(SystemEntityType::Auction) => self.get_auction_hash()?, + Err(error) => return Ok(Err(error)), + }; + + match self.try_get_memory()?.set(dest_ptr, hash.as_ref()) { + Ok(_) => Ok(Ok(())), + Err(error) => Err(ExecError::Interpreter(error.into()).into()), + } + } + + /// If host_buffer set, clears the host_buffer and returns value, else None + pub fn take_host_buffer(&mut self) -> Option { + self.host_buffer.take() + } + + /// Checks if a write to host buffer can happen. + /// + /// This will check if the host buffer is empty. + fn can_write_to_host_buffer(&self) -> bool { + self.host_buffer.is_none() + } + + /// Overwrites data in host buffer only if it's in empty state + fn write_host_buffer(&mut self, data: CLValue) -> Result<(), ApiError> { + match self.host_buffer { + Some(_) => return Err(ApiError::HostBufferFull), + None => self.host_buffer = Some(data), + } + Ok(()) + } + + fn read_host_buffer( + &mut self, + dest_ptr: u32, + dest_size: usize, + bytes_written_ptr: u32, + ) -> Result, ExecError> { + let (_cl_type, serialized_value) = match self.take_host_buffer() { + None => return Ok(Err(ApiError::HostBufferEmpty)), + Some(cl_value) => cl_value.destructure(), + }; + + if serialized_value.len() > u32::MAX as usize { + return Ok(Err(ApiError::OutOfMemory)); + } + if serialized_value.len() > dest_size { + return Ok(Err(ApiError::BufferTooSmall)); + } + + // Slice data, so if `dest_size` is larger than host_buffer size, it will take host_buffer + // as whole. + let sliced_buf = &serialized_value[..cmp::min(dest_size, serialized_value.len())]; + if let Err(error) = self.try_get_memory()?.set(dest_ptr, sliced_buf) { + return Err(ExecError::Interpreter(error.into())); + } + + // Never panics because we check that `serialized_value.len()` fits in `u32`. + let bytes_written: u32 = sliced_buf + .len() + .try_into() + .expect("Size of buffer should fit within limit"); + let bytes_written_data = bytes_written.to_le_bytes(); + + if let Err(error) = self + .try_get_memory()? + .set(bytes_written_ptr, &bytes_written_data) + { + return Err(ExecError::Interpreter(error.into())); + } + + Ok(Ok(())) + } + + #[cfg(feature = "test-support")] + fn print(&mut self, text_ptr: u32, text_size: u32) -> Result<(), Trap> { + let text = self.string_from_mem(text_ptr, text_size)?; + println!("{}", text); // this println! is intentional + Ok(()) + } + + fn get_named_arg_size( + &mut self, + name_ptr: u32, + name_size: usize, + size_ptr: u32, + ) -> Result, Trap> { + let name_bytes = self.bytes_from_mem(name_ptr, name_size)?; + let name = String::from_utf8_lossy(&name_bytes); + + let arg_size: u32 = match self.context.args().get(&name) { + Some(arg) if arg.inner_bytes().len() > u32::MAX as usize => { + return Ok(Err(ApiError::OutOfMemory)); + } + Some(arg) => { + // SAFETY: Safe to unwrap as we asserted length above + arg.inner_bytes() + .len() + .try_into() + .expect("Should fit within the range") + } + None => return Ok(Err(ApiError::MissingArgument)), + }; + + let arg_size_bytes = arg_size.to_le_bytes(); // Wasm is little-endian + + if let Err(e) = self.try_get_memory()?.set(size_ptr, &arg_size_bytes) { + return Err(ExecError::Interpreter(e.into()).into()); + } + + Ok(Ok(())) + } + + fn get_named_arg( + &mut self, + name_ptr: u32, + name_size: usize, + output_ptr: u32, + output_size: usize, + ) -> Result, Trap> { + let name_bytes = self.bytes_from_mem(name_ptr, name_size)?; + let name = String::from_utf8_lossy(&name_bytes); + + let arg = match self.context.args().get(&name) { + Some(arg) => arg, + None => return Ok(Err(ApiError::MissingArgument)), + }; + + if arg.inner_bytes().len() > output_size { + return Ok(Err(ApiError::OutOfMemory)); + } + + if let Err(error) = self + .try_get_memory()? + .set(output_ptr, &arg.inner_bytes()[..output_size]) + { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + /// Enforce group access restrictions (if any) on attempts to call an `EntryPoint`. + fn validate_entry_point_access( + &self, + package: &Package, + name: &str, + access: &EntryPointAccess, + ) -> Result<(), ExecError> { + match access { + EntryPointAccess::Public => Ok(()), + EntryPointAccess::Groups(group_names) => { + if group_names.is_empty() { + // Exits early in a special case of empty list of groups regardless of the group + // checking logic below it. + return Err(ExecError::InvalidContext); + } + + let find_result = group_names.iter().find(|&group_name| { + package + .groups() + .get(group_name) + .and_then(|urefs| { + urefs + .iter() + .find(|&uref| self.context.validate_uref(uref).is_ok()) + }) + .is_some() + }); + + if find_result.is_none() { + return Err(ExecError::InvalidContext); + } + + Ok(()) + } + EntryPointAccess::Template => Err(ExecError::TemplateMethod(name.to_string())), + } + } + + /// Remove a user group from access to a contract + fn remove_contract_user_group( + &mut self, + package_key: PackageHash, + label: Group, + ) -> Result, ExecError> { + if self.context.engine_config().enable_entity { + let mut package: Package = self.context.get_validated_package(package_key)?; + let group_to_remove = Group::new(label); + + // Ensure group exists in groups + if !package.groups().contains(&group_to_remove) { + return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())); + } + + // Remove group if it is not referenced by at least one entry_point in active versions. + let versions = package.versions(); + for entity_hash in versions.contract_hashes() { + let entry_points = { + self.context + .get_casper_vm_v1_entry_point(Key::AddressableEntity(*entity_hash))? + }; + for entry_point in entry_points.take_entry_points() { + match entry_point.access() { + EntryPointAccess::Public | EntryPointAccess::Template => { + continue; + } + EntryPointAccess::Groups(groups) => { + if groups.contains(&group_to_remove) { + return Ok(Err(addressable_entity::Error::GroupInUse.into())); + } + } + } + } + } + + if !package.remove_group(&group_to_remove) { + return Ok(Err(addressable_entity::Error::GroupInUse.into())); + } + + // Write updated package to the global state + self.context.metered_write_gs_unsafe(package_key, package)?; + } else { + let mut contract_package = self + .context + .get_validated_contract_package(package_key.value())?; + let group_to_remove = Group::new(label); + + // Ensure group exists in groups + if !contract_package.groups().contains(&group_to_remove) { + return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())); + } + + // Remove group if it is not referenced by at least one entry_point in active versions. + for (_version, contract_hash) in contract_package.versions().iter() { + let entry_points = { + self.context + .get_casper_vm_v1_entry_point(Key::contract_entity_key( + AddressableEntityHash::new(contract_hash.value()), + ))? + }; + for entry_point in entry_points.take_entry_points() { + match entry_point.access() { + EntryPointAccess::Public | EntryPointAccess::Template => { + continue; + } + EntryPointAccess::Groups(groups) => { + if groups.contains(&group_to_remove) { + return Ok(Err(addressable_entity::Error::GroupInUse.into())); + } + } + } + } + } + + if !contract_package.remove_group(&group_to_remove) { + return Ok(Err(addressable_entity::Error::GroupInUse.into())); + } + + // Write updated package to the global state + self.context.metered_write_gs_unsafe( + ContractPackageHash::new(package_key.value()), + contract_package, + )?; + } + Ok(Ok(())) + } + + #[allow(clippy::too_many_arguments)] + fn provision_contract_user_group_uref( + &mut self, + package_ptr: u32, + package_size: u32, + label_ptr: u32, + label_size: u32, + output_size_ptr: u32, + ) -> Result, ExecError> { + let contract_package_hash = self.t_from_mem(package_ptr, package_size)?; + let label: String = self.t_from_mem(label_ptr, label_size)?; + let new_uref = if self.context.engine_config().enable_entity { + let mut contract_package = self.context.get_validated_package(contract_package_hash)?; + let groups = contract_package.groups_mut(); + + let group_label = Group::new(label); + + // Ensure there are not too many urefs + if groups.total_urefs() + 1 > addressable_entity::MAX_TOTAL_UREFS { + return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into())); + } + + // Ensure given group exists and does not exceed limits + let group = match groups.get_mut(&group_label) { + Some(group) if group.len() + 1 > addressable_entity::MAX_GROUPS as usize => { + // Ensures there are not too many groups to fit in amount of new urefs + return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into())); + } + Some(group) => group, + None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())), + }; + + // Proceed with creating new URefs + let new_uref = self.context.new_unit_uref()?; + if !group.insert(new_uref) { + return Ok(Err(addressable_entity::Error::URefAlreadyExists.into())); + } + + // Write updated package to the global state + self.context + .metered_write_gs_unsafe(contract_package_hash, contract_package)?; + new_uref + } else { + let mut contract_package = self + .context + .get_validated_contract_package(contract_package_hash.value())?; + let groups = contract_package.groups_mut(); + + let group_label = Group::new(label); + + // Ensure there are not too many urefs + if groups.total_urefs() + 1 > addressable_entity::MAX_TOTAL_UREFS { + return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into())); + } + + // Ensure given group exists and does not exceed limits + let group = match groups.get_mut(&group_label) { + Some(group) if group.len() + 1 > addressable_entity::MAX_GROUPS as usize => { + // Ensures there are not too many groups to fit in amount of new urefs + return Ok(Err(addressable_entity::Error::MaxTotalURefsExceeded.into())); + } + Some(group) => group, + None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())), + }; + + // Proceed with creating new URefs + let new_uref = self.context.new_unit_uref()?; + if !group.insert(new_uref) { + return Ok(Err(addressable_entity::Error::URefAlreadyExists.into())); + } + + // Write updated package to the global state + self.context.metered_write_gs_unsafe( + ContractPackageHash::new(contract_package_hash.value()), + contract_package, + )?; + new_uref + }; + + // check we can write to the host buffer + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + // create CLValue for return value + let new_uref_value = CLValue::from_t(new_uref)?; + let value_size = new_uref_value.inner_bytes().len(); + // write return value to buffer + if let Err(err) = self.write_host_buffer(new_uref_value) { + return Ok(Err(err)); + } + // Write return value size to output location + let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(output_size_ptr, &output_size_bytes) + { + return Err(ExecError::Interpreter(error.into())); + } + + Ok(Ok(())) + } + + #[allow(clippy::too_many_arguments)] + fn remove_contract_user_group_urefs( + &mut self, + package_ptr: u32, + package_size: u32, + label_ptr: u32, + label_size: u32, + urefs_ptr: u32, + urefs_size: u32, + ) -> Result, ExecError> { + let contract_package_hash: PackageHash = self.t_from_mem(package_ptr, package_size)?; + let label: String = self.t_from_mem(label_ptr, label_size)?; + let urefs: BTreeSet = self.t_from_mem(urefs_ptr, urefs_size)?; + + if self.context.engine_config().enable_entity { + let mut contract_package = self.context.get_validated_package(contract_package_hash)?; + + let groups = contract_package.groups_mut(); + let group_label = Group::new(label); + + let group = match groups.get_mut(&group_label) { + Some(group) => group, + None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())), + }; + + if urefs.is_empty() { + return Ok(Ok(())); + } + + for uref in urefs { + if !group.remove(&uref) { + return Ok(Err(addressable_entity::Error::UnableToRemoveURef.into())); + } + } + // Write updated package to the global state + self.context + .metered_write_gs_unsafe(contract_package_hash, contract_package)?; + } else { + let contract_package_hash = ContractPackageHash::new(contract_package_hash.value()); + let mut contract_package = self + .context + .get_validated_contract_package(contract_package_hash.value())?; + + let groups = contract_package.groups_mut(); + let group_label = Group::new(label); + + let group = match groups.get_mut(&group_label) { + Some(group) => group, + None => return Ok(Err(addressable_entity::Error::GroupDoesNotExist.into())), + }; + + if urefs.is_empty() { + return Ok(Ok(())); + } + + for uref in urefs { + if !group.remove(&uref) { + return Ok(Err(addressable_entity::Error::UnableToRemoveURef.into())); + } + } + // Write updated package to the global state + self.context + .metered_write_gs_unsafe(contract_package_hash, contract_package)?; + } + + Ok(Ok(())) + } + + /// Calculate gas cost for a host function + fn charge_host_function_call( + &mut self, + host_function: &HostFunction, + weights: T, + ) -> Result<(), Trap> + where + T: AsRef<[HostFunctionCost]> + Copy, + { + let cost = host_function + .calculate_gas_cost(weights) + .ok_or(ExecError::GasLimit)?; // Overflowing gas calculation means gas limit was exceeded + self.gas(cost)?; + Ok(()) + } + + /// Creates a dictionary + fn new_dictionary(&mut self, output_size_ptr: u32) -> Result, ExecError> { + // check we can write to the host buffer + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + + // Create new URef + let new_uref = self.context.new_unit_uref()?; + + // create CLValue for return value + let new_uref_value = CLValue::from_t(new_uref)?; + let value_size = new_uref_value.inner_bytes().len(); + // write return value to buffer + if let Err(err) = self.write_host_buffer(new_uref_value) { + return Ok(Err(err)); + } + // Write return value size to output location + let output_size_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self + .try_get_memory()? + .set(output_size_ptr, &output_size_bytes) + { + return Err(ExecError::Interpreter(error.into())); + } + + Ok(Ok(())) + } + + /// Reads the `value` under a `key` in a dictionary + fn dictionary_get( + &mut self, + uref_ptr: u32, + uref_size: u32, + dictionary_item_key_bytes_ptr: u32, + dictionary_item_key_bytes_size: u32, + output_size_ptr: u32, + ) -> Result, Trap> { + // check we can write to the host buffer + if let Err(err) = self.check_host_buffer() { + return Ok(Err(err)); + } + + let uref: URef = self.t_from_mem(uref_ptr, uref_size)?; + let dictionary_item_key = self.checked_memory_slice( + dictionary_item_key_bytes_ptr as usize, + dictionary_item_key_bytes_size as usize, + |utf8_bytes| std::str::from_utf8(utf8_bytes).map(ToOwned::to_owned), + )?; + + let dictionary_item_key = if let Ok(item_key) = dictionary_item_key { + item_key + } else { + return Ok(Err(ApiError::InvalidDictionaryItemKey)); + }; + + let cl_value = match self.context.dictionary_get(uref, &dictionary_item_key)? { + Some(cl_value) => cl_value, + None => return Ok(Err(ApiError::ValueNotFound)), + }; + + let value_size: u32 = match cl_value.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::BufferTooSmall)), + }; + + if let Err(error) = self.write_host_buffer(cl_value) { + return Ok(Err(error)); + } + + let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self.try_get_memory()?.set(output_size_ptr, &value_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + /// Reads the `value` under a `Key::Dictionary`. + fn dictionary_read( + &mut self, + key_ptr: u32, + key_size: u32, + output_size_ptr: u32, + ) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + + let dictionary_key = self.key_from_mem(key_ptr, key_size)?; + let cl_value = match self.context.dictionary_read(dictionary_key)? { + Some(cl_value) => cl_value, + None => return Ok(Err(ApiError::ValueNotFound)), + }; + + let value_size: u32 = match cl_value.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::BufferTooSmall)), + }; + + if let Err(error) = self.write_host_buffer(cl_value) { + return Ok(Err(error)); + } + + let value_bytes = value_size.to_le_bytes(); // Wasm is little-endian + if let Err(error) = self.try_get_memory()?.set(output_size_ptr, &value_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + /// Writes a `key`, `value` pair in a dictionary + fn dictionary_put( + &mut self, + uref_ptr: u32, + uref_size: u32, + key_ptr: u32, + key_size: u32, + value_ptr: u32, + value_size: u32, + ) -> Result, Trap> { + let uref: URef = self.t_from_mem(uref_ptr, uref_size)?; + let dictionary_item_key_bytes = { + if (key_size as usize) > DICTIONARY_ITEM_KEY_MAX_LENGTH { + return Ok(Err(ApiError::DictionaryItemKeyExceedsLength)); + } + self.checked_memory_slice(key_ptr as usize, key_size as usize, |data| { + std::str::from_utf8(data).map(ToOwned::to_owned) + })? + }; + + let dictionary_item_key = if let Ok(item_key) = dictionary_item_key_bytes { + item_key + } else { + return Ok(Err(ApiError::InvalidDictionaryItemKey)); + }; + let cl_value = self.cl_value_from_mem(value_ptr, value_size)?; + if let Err(e) = self + .context + .dictionary_put(uref, &dictionary_item_key, cl_value) + { + return Err(Trap::from(e)); + } + Ok(Ok(())) + } + + /// Checks if immediate caller is a system contract or account. + /// + /// For cases where call stack is only the session code, then this method returns `true` if the + /// caller is system, or `false` otherwise. + fn is_system_immediate_caller(&self) -> Result { + let immediate_caller = match self.get_immediate_caller() { + Some(call_stack_element) => call_stack_element, + None => { + // Immediate caller is assumed to exist at a time this check is run. + return Ok(false); + } + }; + + match immediate_caller { + Caller::Initiator { account_hash } => { + // This case can happen during genesis where we're setting up purses for accounts. + Ok(account_hash == &PublicKey::System.to_account_hash()) + } + Caller::SmartContract { contract_hash, .. } => Ok(self + .context + .is_system_addressable_entity(&contract_hash.value())?), + Caller::Entity { entity_addr, .. } => Ok(self + .context + .is_system_addressable_entity(&entity_addr.value())?), + } + } + + fn load_authorization_keys( + &mut self, + len_ptr: u32, + result_size_ptr: u32, + ) -> Result, Trap> { + if !self.can_write_to_host_buffer() { + // Exit early if the host buffer is already occupied + return Ok(Err(ApiError::HostBufferFull)); + } + + // A set of keys is converted into a vector so it can be written to a host buffer + let authorization_keys = Vec::from_iter(self.context.authorization_keys().clone()); + + let total_keys: u32 = match authorization_keys.len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + let total_keys_bytes = total_keys.to_le_bytes(); + if let Err(error) = self.try_get_memory()?.set(len_ptr, &total_keys_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + if total_keys == 0 { + // No need to do anything else, we leave host buffer empty. + return Ok(Ok(())); + } + + let authorization_keys = CLValue::from_t(authorization_keys).map_err(ExecError::CLValue)?; + + let length: u32 = match authorization_keys.inner_bytes().len().try_into() { + Ok(value) => value, + Err(_) => return Ok(Err(ApiError::OutOfMemory)), + }; + if let Err(error) = self.write_host_buffer(authorization_keys) { + return Ok(Err(error)); + } + + let length_bytes = length.to_le_bytes(); + if let Err(error) = self.try_get_memory()?.set(result_size_ptr, &length_bytes) { + return Err(ExecError::Interpreter(error.into()).into()); + } + + Ok(Ok(())) + } + + fn prune(&mut self, key: Key) { + self.context.prune_gs_unsafe(key); + } + + pub(crate) fn migrate_contract_and_contract_package( + &mut self, + hash_addr: HashAddr, + ) -> Result { + let protocol_version = self.context.protocol_version(); + let contract = self.context.get_contract(ContractHash::new(hash_addr))?; + let package_hash = contract.contract_package_hash(); + self.context + .migrate_package(package_hash, protocol_version)?; + let entity_hash = AddressableEntityHash::new(hash_addr); + let key = Key::contract_entity_key(entity_hash); + self.context.read_gs_typed(&key) + } + + fn add_message_topic(&mut self, topic_name: &str) -> Result, ExecError> { + let topic_hash = cryptography::blake2b(topic_name).into(); + + self.context + .add_message_topic(topic_name, topic_hash) + .map(|ret| ret.map_err(ApiError::from)) + } + + fn emit_message( + &mut self, + topic_name: &str, + message: MessagePayload, + ) -> Result, Trap> { + let entity_addr = self.context.context_key_to_entity_addr()?; + + let topic_name_hash = cryptography::blake2b(topic_name).into(); + let topic_key = Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash)); + + // Check if the topic exists and get the summary. + let Some(StoredValue::MessageTopic(prev_topic_summary)) = + self.context.read_gs(&topic_key)? + else { + return Ok(Err(ApiError::MessageTopicNotRegistered)); + }; + + let current_blocktime = self.context.get_block_info().block_time(); + let topic_message_index = if prev_topic_summary.blocktime() != current_blocktime { + for index in 1..prev_topic_summary.message_count() { + self.context + .prune_gs_unsafe(Key::message(entity_addr, topic_name_hash, index)); + } + 0 + } else { + prev_topic_summary.message_count() + }; + + let block_message_index: u64 = match self + .context + .read_gs(&Key::BlockGlobal(BlockGlobalAddr::MessageCount))? + { + Some(stored_value) => { + let (prev_block_time, prev_count): (BlockTime, u64) = CLValue::into_t( + CLValue::try_from(stored_value).map_err(ExecError::TypeMismatch)?, + ) + .map_err(ExecError::CLValue)?; + if prev_block_time == current_blocktime { + prev_count + } else { + 0 + } + } + None => 0, + }; + + let Some(topic_message_count) = topic_message_index.checked_add(1) else { + return Ok(Err(ApiError::MessageTopicFull)); + }; + + let Some(block_message_count) = block_message_index.checked_add(1) else { + return Ok(Err(ApiError::MaxMessagesPerBlockExceeded)); + }; + + self.context.metered_emit_message( + topic_key, + current_blocktime, + block_message_count, + topic_message_count, + Message::new( + entity_addr, + message, + topic_name.to_string(), + topic_name_hash, + topic_message_index, + block_message_index, + ), + )?; + Ok(Ok(())) + } +} + +#[cfg(feature = "test-support")] +fn dump_runtime_stack_info(instance: casper_wasmi::ModuleRef, max_stack_height: u32) { + let globals = instance.globals(); + let Some(current_runtime_call_stack_height) = globals.last() else { + return; + }; + + if let RuntimeValue::I32(current_runtime_call_stack_height) = + current_runtime_call_stack_height.get() + { + if current_runtime_call_stack_height > max_stack_height as i32 { + eprintln!("runtime stack overflow, current={current_runtime_call_stack_height}, max={max_stack_height}"); + } + }; +} diff --git a/execution_engine/src/runtime/stack.rs b/execution_engine/src/runtime/stack.rs new file mode 100644 index 0000000000..824ecea251 --- /dev/null +++ b/execution_engine/src/runtime/stack.rs @@ -0,0 +1,189 @@ +//! Runtime stacks. +use casper_types::{account::AccountHash, system::Caller}; + +/// A runtime stack frame. +/// +/// Currently it aliases to a [`Caller`]. +/// +/// NOTE: Once we need to add more data to a stack frame we should make this a newtype, rather than +/// change [`Caller`]. +pub type RuntimeStackFrame = Caller; + +/// The runtime stack. +#[derive(Clone, Debug)] +pub struct RuntimeStack { + frames: Vec, + max_height: usize, +} + +/// Error returned on an attempt to pop off an empty stack. +#[cfg(test)] +#[derive(Debug)] +struct RuntimeStackUnderflow; + +/// Error returned on an attempt to push to a stack already at the maximum height. +#[derive(Debug)] +pub struct RuntimeStackOverflow; + +impl RuntimeStack { + /// Creates an empty stack. + pub fn new(max_height: usize) -> Self { + Self { + frames: Vec::with_capacity(max_height), + max_height, + } + } + + /// Creates a stack with one entry. + pub fn new_with_frame(max_height: usize, frame: RuntimeStackFrame) -> Self { + let mut frames = Vec::with_capacity(max_height); + frames.push(frame); + Self { frames, max_height } + } + + /// Is the stack empty? + pub fn is_empty(&self) -> bool { + self.frames.is_empty() + } + + /// The height of the stack. + pub fn len(&self) -> usize { + self.frames.len() + } + + /// The current stack frame. + pub fn current_frame(&self) -> Option<&RuntimeStackFrame> { + self.frames.last() + } + + /// The previous stack frame. + pub fn previous_frame(&self) -> Option<&RuntimeStackFrame> { + self.frames.iter().nth_back(1) + } + + /// The first stack frame. + pub fn first_frame(&self) -> Option<&RuntimeStackFrame> { + self.frames.first() + } + + /// Pops the current frame from the stack. + #[cfg(test)] + fn pop(&mut self) -> Result<(), RuntimeStackUnderflow> { + self.frames.pop().ok_or(RuntimeStackUnderflow)?; + Ok(()) + } + + /// Pushes a frame onto the stack. + pub fn push(&mut self, frame: RuntimeStackFrame) -> Result<(), RuntimeStackOverflow> { + if self.len() < self.max_height { + self.frames.push(frame); + Ok(()) + } else { + Err(RuntimeStackOverflow) + } + } + + // It is here for backwards compatibility only. + /// A view of the stack in the previous stack format. + pub fn call_stack_elements(&self) -> &Vec { + &self.frames + } + + /// Returns a stack with exactly one session element with the associated account hash. + pub fn from_account_hash(account_hash: AccountHash, max_height: usize) -> Self { + RuntimeStack { + frames: vec![Caller::initiator(account_hash)], + max_height, + } + } +} + +#[cfg(test)] +mod test { + use core::convert::TryInto; + + use casper_types::account::{AccountHash, ACCOUNT_HASH_LENGTH}; + + use super::*; + + fn nth_frame(n: usize) -> Caller { + let mut bytes = [0_u8; ACCOUNT_HASH_LENGTH]; + let n: u32 = n.try_into().unwrap(); + bytes[0..4].copy_from_slice(&n.to_le_bytes()); + Caller::initiator(AccountHash::new(bytes)) + } + + #[allow(clippy::redundant_clone)] + #[test] + fn stack_should_respect_max_height_after_clone() { + const MAX_HEIGHT: usize = 3; + let mut stack = RuntimeStack::new(MAX_HEIGHT); + stack.push(nth_frame(1)).unwrap(); + + let mut stack2 = stack.clone(); + stack2.push(nth_frame(2)).unwrap(); + stack2.push(nth_frame(3)).unwrap(); + stack2.push(nth_frame(4)).unwrap_err(); + assert_eq!(stack2.len(), MAX_HEIGHT); + } + + #[test] + fn stack_should_work_as_expected() { + const MAX_HEIGHT: usize = 6; + + let mut stack = RuntimeStack::new(MAX_HEIGHT); + assert!(stack.is_empty()); + assert_eq!(stack.len(), 0); + assert_eq!(stack.current_frame(), None); + assert_eq!(stack.previous_frame(), None); + assert_eq!(stack.first_frame(), None); + + stack.push(nth_frame(0)).unwrap(); + assert!(!stack.is_empty()); + assert_eq!(stack.len(), 1); + assert_eq!(stack.current_frame(), Some(&nth_frame(0))); + assert_eq!(stack.previous_frame(), None); + assert_eq!(stack.first_frame(), Some(&nth_frame(0))); + + let mut n: usize = 1; + while stack.push(nth_frame(n)).is_ok() { + n += 1; + assert!(!stack.is_empty()); + assert_eq!(stack.len(), n); + assert_eq!(stack.current_frame(), Some(&nth_frame(n - 1))); + assert_eq!(stack.previous_frame(), Some(&nth_frame(n - 2))); + assert_eq!(stack.first_frame(), Some(&nth_frame(0))); + } + assert!(!stack.is_empty()); + assert_eq!(stack.len(), MAX_HEIGHT); + assert_eq!(stack.current_frame(), Some(&nth_frame(MAX_HEIGHT - 1))); + assert_eq!(stack.previous_frame(), Some(&nth_frame(MAX_HEIGHT - 2))); + assert_eq!(stack.first_frame(), Some(&nth_frame(0))); + + while stack.len() >= 3 { + stack.pop().unwrap(); + n = n.checked_sub(1).unwrap(); + assert!(!stack.is_empty()); + assert_eq!(stack.len(), n); + assert_eq!(stack.current_frame(), Some(&nth_frame(n - 1))); + assert_eq!(stack.previous_frame(), Some(&nth_frame(n - 2))); + assert_eq!(stack.first_frame(), Some(&nth_frame(0))); + } + + stack.pop().unwrap(); + assert!(!stack.is_empty()); + assert_eq!(stack.len(), 1); + assert_eq!(stack.current_frame(), Some(&nth_frame(0))); + assert_eq!(stack.previous_frame(), None); + assert_eq!(stack.first_frame(), Some(&nth_frame(0))); + + stack.pop().unwrap(); + assert!(stack.is_empty()); + assert_eq!(stack.len(), 0); + assert_eq!(stack.current_frame(), None); + assert_eq!(stack.previous_frame(), None); + assert_eq!(stack.first_frame(), None); + + assert!(stack.pop().is_err()); + } +} diff --git a/execution_engine/src/runtime/standard_payment_internal.rs b/execution_engine/src/runtime/standard_payment_internal.rs new file mode 100644 index 0000000000..d3c20903e0 --- /dev/null +++ b/execution_engine/src/runtime/standard_payment_internal.rs @@ -0,0 +1,85 @@ +use casper_storage::global_state::{error::Error as GlobalStateError, state::StateReader}; +use casper_types::{ + account::Account, + system::{handle_payment, mint}, + ApiError, Key, RuntimeArgs, StoredValue, TransferredTo, URef, U512, +}; + +use casper_storage::system::standard_payment::{ + account_provider::AccountProvider, handle_payment_provider::HandlePaymentProvider, + mint_provider::MintProvider, StandardPayment, +}; + +use crate::{execution, runtime::Runtime}; + +pub(crate) const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; + +impl From for Option { + fn from(exec_error: execution::Error) -> Self { + match exec_error { + // This is used to propagate [`execution::Error::GasLimit`] to make sure + // [`StandardPayment`] contract running natively supports propagating gas limit + // errors without a panic. + execution::Error::GasLimit => Some(mint::Error::GasLimit.into()), + // There are possibly other exec errors happening but such translation would be lossy. + _ => None, + } + } +} + +impl<'a, R> AccountProvider for Runtime<'a, R> +where + R: StateReader, +{ + fn get_main_purse(&mut self) -> Result { + self.context.get_main_purse().map_err(|exec_error| { + >::from(exec_error).unwrap_or(ApiError::InvalidPurse) + }) + } +} + +impl<'a, R> MintProvider for Runtime<'a, R> +where + R: StateReader, +{ + fn transfer_purse_to_account( + &mut self, + source: URef, + target_account: &Account, + amount: U512, + ) -> Result<(), ApiError> { + match Runtime::transfer_from_purse_to_account(self, source, target_account, amount, None) { + Ok(Ok(TransferredTo::ExistingAccount)) => Ok(()), + Ok(Ok(TransferredTo::NewAccount)) => Ok(()), + Ok(Err(error)) => Err(error), + Err(_error) => Err(ApiError::Transfer), + } + } +} + +impl<'a, R> HandlePaymentProvider for Runtime<'a, R> +where + R: StateReader, +{ + fn get_payment_purse(&mut self) -> Result { + let hash = self + .get_handle_payment_contract() + .map_err(|_| ApiError::MissingSystemContractHash)?; + + let cl_value = self + .call_contract(hash, METHOD_GET_PAYMENT_PURSE, RuntimeArgs::new()) + .map_err(|exec_error| { + let maybe_api_error: Option = exec_error.into(); + maybe_api_error + .unwrap_or_else(|| handle_payment::Error::PaymentPurseNotFound.into()) + })?; + + let payment_purse_ref: URef = cl_value.into_t()?; + Ok(payment_purse_ref) + } +} + +impl<'a, R> StandardPayment for Runtime<'a, R> where + R: StateReader +{ +} diff --git a/execution_engine/src/runtime/utils.rs b/execution_engine/src/runtime/utils.rs new file mode 100644 index 0000000000..c177ebdeae --- /dev/null +++ b/execution_engine/src/runtime/utils.rs @@ -0,0 +1,1418 @@ +use std::collections::BTreeMap; + +use casper_wasm::elements::Module; +use casper_wasmi::{ImportsBuilder, MemoryRef, ModuleInstance, ModuleRef}; + +use casper_types::{ + contracts::NamedKeys, AccessRights, CLType, CLValue, Key, ProtocolVersion, PublicKey, + RuntimeArgs, URef, URefAddr, U128, U256, U512, +}; + +use crate::{ + engine_state::EngineConfig, + execution::ExecError, + resolvers::{self, memory_resolver::MemoryResolver}, +}; + +/// Creates an WASM module instance and a memory instance. +/// +/// This ensures that a memory instance is properly resolved into a pre-allocated memory area, and a +/// host function resolver is attached to the module. +/// +/// The WASM module is also validated to not have a "start" section as we currently don't support +/// running it. +/// +/// Both [`ModuleRef`] and a [`MemoryRef`] are ready to be executed. +pub(super) fn instance_and_memory( + parity_module: Module, + protocol_version: ProtocolVersion, + engine_config: &EngineConfig, +) -> Result<(ModuleRef, MemoryRef), ExecError> { + let module = casper_wasmi::Module::from_casper_wasm_module(parity_module)?; + let resolver = resolvers::create_module_resolver(protocol_version, engine_config)?; + let mut imports = ImportsBuilder::new(); + imports.push_resolver("env", &resolver); + let not_started_module = ModuleInstance::new(&module, &imports)?; + if not_started_module.has_start() { + return Err(ExecError::UnsupportedWasmStart); + } + let instance = not_started_module.not_started_instance().clone(); + let memory = resolver.memory_ref()?; + Ok((instance, memory)) +} + +/// Removes `rights_to_disable` from all urefs in `args` matching the address `uref_addr`. +pub(super) fn attenuate_uref_in_args( + mut args: RuntimeArgs, + uref_addr: URefAddr, + rights_to_disable: AccessRights, +) -> Result { + for arg in args.named_args_mut() { + *arg.cl_value_mut() = rewrite_urefs(arg.cl_value().clone(), |uref| { + if uref.addr() == uref_addr { + uref.disable_access_rights(rights_to_disable); + } + })?; + } + + Ok(args) +} + +/// Extracts a copy of every uref able to be deserialized from `cl_value`. +pub(super) fn extract_urefs(cl_value: &CLValue) -> Result, ExecError> { + let mut vec: Vec = Default::default(); + rewrite_urefs(cl_value.clone(), |uref| { + vec.push(*uref); + })?; + Ok(vec) +} + +/// Executes `func` on every uref able to be deserialized from `cl_value` and returns the resulting +/// re-serialized `CLValue`. +#[allow(clippy::cognitive_complexity)] +fn rewrite_urefs(cl_value: CLValue, mut func: impl FnMut(&mut URef)) -> Result { + let ret = match cl_value.cl_type() { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::PublicKey + | CLType::Any => cl_value, + CLType::Option(ty) => match **ty { + CLType::URef => { + let mut opt: Option = cl_value.to_owned().into_t()?; + opt.iter_mut().for_each(func); + CLValue::from_t(opt)? + } + CLType::Key => { + let mut opt: Option = cl_value.to_owned().into_t()?; + opt.iter_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(opt)? + } + _ => cl_value, + }, + CLType::List(ty) => match **ty { + CLType::URef => { + let mut urefs: Vec = cl_value.to_owned().into_t()?; + urefs.iter_mut().for_each(func); + CLValue::from_t(urefs)? + } + CLType::Key => { + let mut keys: Vec = cl_value.to_owned().into_t()?; + keys.iter_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(keys)? + } + _ => cl_value, + }, + CLType::ByteArray(_) => cl_value, + CLType::Result { ok, err } => match (&**ok, &**err) { + (CLType::URef, CLType::Bool) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::I32) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::I64) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::U8) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::U32) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::U64) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::U128) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::U256) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::U512) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::Unit) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::String) => { + let mut res: Result = cl_value.to_owned().into_t()?; + res.iter_mut().for_each(func); + CLValue::from_t(res)? + } + (CLType::URef, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(uref) => func(uref), + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::URef, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(uref) => func(uref), + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::Bool) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::I32) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::I64) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::U8) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::U32) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::U64) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::U128) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::U256) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::U512) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::Unit) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::String) => { + let mut res: Result = cl_value.to_owned().into_t()?; + if let Ok(Key::URef(uref)) = &mut res { + func(uref); + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(Key::URef(uref)) => func(uref), + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::Key, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(Key::URef(uref)) => func(uref), + Err(Key::URef(uref)) => func(uref), + Ok(_) | Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::Bool, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::I32, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::I64, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::U8, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::U32, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::U64, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::U128, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::U256, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::U512, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::Unit, CLType::URef) => { + let mut res: Result<(), URef> = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::String, CLType::URef) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(uref) => func(uref), + } + CLValue::from_t(res)? + } + (CLType::Bool, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::I32, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::I64, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::U8, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::U32, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::U64, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::U128, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::U256, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::U512, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::Unit, CLType::Key) => { + let mut res: Result<(), Key> = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (CLType::String, CLType::Key) => { + let mut res: Result = cl_value.to_owned().into_t()?; + match &mut res { + Ok(_) => {} + Err(Key::URef(uref)) => func(uref), + Err(_) => {} + } + CLValue::from_t(res)? + } + (_, _) => cl_value, + }, + CLType::Map { key, value } => match (&**key, &**value) { + (CLType::URef, CLType::Bool) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::I32) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::I64) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::U8) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::U32) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::U64) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::U128) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::U256) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::U512) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::Unit) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::String) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + func(&mut k); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, mut v)| { + func(&mut k); + v.as_uref_mut().iter_mut().for_each(|v| func(v)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::URef, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, mut v)| { + func(&mut k); + func(&mut v); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::Bool) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::I32) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::I64) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::U8) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::U32) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::U64) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::U128) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::U256) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::U512) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::Unit) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::String) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, mut v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + func(&mut v); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Key, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map = map + .into_iter() + .map(|(mut k, mut v)| { + k.as_uref_mut().iter_mut().for_each(|k| func(k)); + v.as_uref_mut().iter_mut().for_each(|v| func(v)); + (k, v) + }) + .collect(); + CLValue::from_t(map)? + } + (CLType::Bool, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::I32, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::I64, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::U8, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::U32, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::U64, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::U128, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::U256, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::U512, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::Unit, CLType::URef) => { + let mut map: BTreeMap<(), URef> = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::String, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::PublicKey, CLType::URef) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().for_each(func); + CLValue::from_t(map)? + } + (CLType::Bool, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::I32, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::I64, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::U8, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::U32, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::U64, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::U128, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::U256, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::U512, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::Unit, CLType::Key) => { + let mut map: BTreeMap<(), Key> = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::String, CLType::Key) => { + let mut map: NamedKeys = cl_value.to_owned().into_t()?; + map.keys_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (CLType::PublicKey, CLType::Key) => { + let mut map: BTreeMap = cl_value.to_owned().into_t()?; + map.values_mut().filter_map(Key::as_uref_mut).for_each(func); + CLValue::from_t(map)? + } + (_, _) => cl_value, + }, + CLType::Tuple1([ty]) => match **ty { + CLType::URef => { + let mut val: (URef,) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + CLType::Key => { + let mut val: (Key,) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + _ => cl_value, + }, + CLType::Tuple2([ty1, ty2]) => match (&**ty1, &**ty2) { + (CLType::URef, CLType::Bool) => { + let mut val: (URef, bool) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::I32) => { + let mut val: (URef, i32) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::I64) => { + let mut val: (URef, i64) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::U8) => { + let mut val: (URef, u8) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::U32) => { + let mut val: (URef, u32) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::U64) => { + let mut val: (URef, u64) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::U128) => { + let mut val: (URef, U128) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::U256) => { + let mut val: (URef, U256) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::U512) => { + let mut val: (URef, U512) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::Unit) => { + let mut val: (URef, ()) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::String) => { + let mut val: (URef, String) = cl_value.to_owned().into_t()?; + func(&mut val.0); + CLValue::from_t(val)? + } + (CLType::URef, CLType::Key) => { + let mut val: (URef, Key) = cl_value.to_owned().into_t()?; + func(&mut val.0); + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::URef, CLType::URef) => { + let mut val: (URef, URef) = cl_value.to_owned().into_t()?; + func(&mut val.0); + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::Key, CLType::Bool) => { + let mut val: (Key, bool) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::I32) => { + let mut val: (Key, i32) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::I64) => { + let mut val: (Key, i64) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::U8) => { + let mut val: (Key, u8) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::U32) => { + let mut val: (Key, u32) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::U64) => { + let mut val: (Key, u64) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::U128) => { + let mut val: (Key, U128) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::U256) => { + let mut val: (Key, U256) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::U512) => { + let mut val: (Key, U512) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::Unit) => { + let mut val: (Key, ()) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::String) => { + let mut val: (Key, String) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Key, CLType::URef) => { + let mut val: (Key, URef) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::Key, CLType::Key) => { + let mut val: (Key, Key) = cl_value.to_owned().into_t()?; + val.0.as_uref_mut().iter_mut().for_each(|v| func(v)); + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Bool, CLType::URef) => { + let mut val: (bool, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::I32, CLType::URef) => { + let mut val: (i32, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::I64, CLType::URef) => { + let mut val: (i64, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::U8, CLType::URef) => { + let mut val: (u8, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::U32, CLType::URef) => { + let mut val: (u32, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::U64, CLType::URef) => { + let mut val: (u64, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::U128, CLType::URef) => { + let mut val: (U128, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::U256, CLType::URef) => { + let mut val: (U256, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::U512, CLType::URef) => { + let mut val: (U512, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::Unit, CLType::URef) => { + let mut val: ((), URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::String, CLType::URef) => { + let mut val: (String, URef) = cl_value.to_owned().into_t()?; + func(&mut val.1); + CLValue::from_t(val)? + } + (CLType::Bool, CLType::Key) => { + let mut val: (bool, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::I32, CLType::Key) => { + let mut val: (i32, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::I64, CLType::Key) => { + let mut val: (i64, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::U8, CLType::Key) => { + let mut val: (u8, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::U32, CLType::Key) => { + let mut val: (u32, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::U64, CLType::Key) => { + let mut val: (u64, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::U128, CLType::Key) => { + let mut val: (U128, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::U256, CLType::Key) => { + let mut val: (U256, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::U512, CLType::Key) => { + let mut val: (U512, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::Unit, CLType::Key) => { + let mut val: ((), Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (CLType::String, CLType::Key) => { + let mut val: (String, Key) = cl_value.to_owned().into_t()?; + val.1.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(val)? + } + (_, _) => cl_value, + }, + CLType::Tuple3(_) => cl_value, + CLType::Key => { + let mut key: Key = cl_value.to_t()?; + key.as_uref_mut().iter_mut().for_each(|v| func(v)); + CLValue::from_t(key)? + } + CLType::URef => { + let mut uref: URef = cl_value.to_t()?; + func(&mut uref); + CLValue::from_t(uref)? + } + }; + Ok(ret) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use proptest::{ + array::uniform32, + collection::{btree_map, vec}, + option, + prelude::*, + result, + }; + + use casper_types::{ + gens::*, runtime_args, AccessRights, CLType, CLValue, Key, PublicKey, SecretKey, URef, + }; + + use super::*; + + fn cl_value_with_urefs_arb() -> impl Strategy)> { + // If compiler brings you here it most probably means you've added a variant to `CLType` + // enum but forgot to add generator for it. + let stub: Option = None; + if let Some(cl_type) = stub { + match cl_type { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::Key + | CLType::URef + | CLType::Option(_) + | CLType::List(_) + | CLType::ByteArray(..) + | CLType::Result { .. } + | CLType::Map { .. } + | CLType::Tuple1(_) + | CLType::Tuple2(_) + | CLType::Tuple3(_) + | CLType::PublicKey + | CLType::Any => (), + } + }; + + prop_oneof![ + Just((CLValue::from_t(()).expect("should create CLValue"), vec![])), + any::() + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + any::().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + u128_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + u256_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + u512_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + key_arb().prop_map(|x| { + let urefs = x.as_uref().into_iter().cloned().collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + uref_arb().prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![x])), + ".*".prop_map(|x: String| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + option::of(any::()) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + option::of(uref_arb()).prop_map(|x| { + let urefs = x.iter().cloned().collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + option::of(key_arb()).prop_map(|x| { + let urefs = x.iter().filter_map(Key::as_uref).cloned().collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + vec(any::(), 0..100) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + vec(uref_arb(), 0..100).prop_map(|x| ( + CLValue::from_t(x.clone()).expect("should create CLValue"), + x + )), + vec(key_arb(), 0..100).prop_map(|x| ( + CLValue::from_t(x.clone()).expect("should create CLValue"), + x.into_iter().filter_map(Key::into_uref).collect() + )), + uniform32(any::()) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + result::maybe_err(key_arb(), ".*").prop_map(|x| { + let urefs = match &x { + Ok(key) => key.as_uref().into_iter().cloned().collect(), + Err(_) => vec![], + }; + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + result::maybe_ok(".*", uref_arb()).prop_map(|x| { + let urefs = match &x { + Ok(_) => vec![], + Err(uref) => vec![*uref], + }; + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + btree_map(".*", u512_arb(), 0..100) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + btree_map(uref_arb(), u512_arb(), 0..100).prop_map(|x| { + let urefs = x.keys().cloned().collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + btree_map(".*", uref_arb(), 0..100).prop_map(|x| { + let urefs = x.values().cloned().collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + btree_map(uref_arb(), key_arb(), 0..100).prop_map(|x| { + let urefs: Vec = x + .clone() + .into_iter() + .flat_map(|(k, v)| { + vec![Some(k), v.into_uref()] + .into_iter() + .flatten() + .collect::>() + }) + .collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + btree_map(key_arb(), uref_arb(), 0..100).prop_map(|x| { + let urefs: Vec = x + .clone() + .into_iter() + .flat_map(|(k, v)| { + vec![k.into_uref(), Some(v)] + .into_iter() + .flatten() + .collect::>() + }) + .collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + btree_map(key_arb(), key_arb(), 0..100).prop_map(|x| { + let urefs: Vec = x + .clone() + .into_iter() + .flat_map(|(k, v)| { + vec![k.into_uref(), v.into_uref()] + .into_iter() + .flatten() + .collect::>() + }) + .collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + (any::()) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + (uref_arb()) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![x])), + (any::(), any::()) + .prop_map(|x| (CLValue::from_t(x).expect("should create CLValue"), vec![])), + (uref_arb(), any::()).prop_map(|x| { + let uref = x.0; + ( + CLValue::from_t(x).expect("should create CLValue"), + vec![uref], + ) + }), + (any::(), key_arb()).prop_map(|x| { + let urefs = x.1.as_uref().into_iter().cloned().collect(); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + (uref_arb(), key_arb()).prop_map(|x| { + let mut urefs = vec![x.0]; + urefs.extend(x.1.as_uref().into_iter().cloned()); + (CLValue::from_t(x).expect("should create CLValue"), urefs) + }), + ] + } + + proptest! { + #[test] + fn should_extract_urefs((cl_value, urefs) in cl_value_with_urefs_arb()) { + let extracted_urefs = extract_urefs(&cl_value).unwrap(); + prop_assert_eq!(extracted_urefs, urefs); + } + } + + #[test] + fn extract_from_public_keys_to_urefs_map() { + let uref = URef::new([43; 32], AccessRights::READ_ADD_WRITE); + let mut map = BTreeMap::new(); + map.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ), + uref, + ); + let cl_value = CLValue::from_t(map).unwrap(); + assert_eq!(extract_urefs(&cl_value).unwrap(), vec![uref]); + } + + #[test] + fn extract_from_public_keys_to_uref_keys_map() { + let uref = URef::new([43; 32], AccessRights::READ_ADD_WRITE); + let key = Key::from(uref); + let mut map = BTreeMap::new(); + map.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ), + key, + ); + let cl_value = CLValue::from_t(map).unwrap(); + assert_eq!(extract_urefs(&cl_value).unwrap(), vec![uref]); + } + + #[test] + fn should_modify_urefs() { + let uref_1 = URef::new([1; 32], AccessRights::READ_ADD_WRITE); + let uref_2 = URef::new([2; 32], AccessRights::READ_ADD_WRITE); + let uref_3 = URef::new([3; 32], AccessRights::READ_ADD_WRITE); + + let args = runtime_args! { + "uref1" => uref_1, + "uref2" => Some(uref_1), + "uref3" => vec![uref_2, uref_1, uref_3], + "uref4" => vec![Key::from(uref_3), Key::from(uref_2), Key::from(uref_1)], + }; + + let args = attenuate_uref_in_args(args, uref_1.addr(), AccessRights::WRITE).unwrap(); + + let arg = args.get("uref1").unwrap().clone(); + let lhs = arg.into_t::().unwrap(); + let rhs = uref_1.with_access_rights(AccessRights::READ_ADD); + assert_eq!(lhs, rhs); + + let arg = args.get("uref2").unwrap().clone(); + let lhs = arg.into_t::>().unwrap(); + let rhs = uref_1.with_access_rights(AccessRights::READ_ADD); + assert_eq!(lhs, Some(rhs)); + + let arg = args.get("uref3").unwrap().clone(); + let lhs = arg.into_t::>().unwrap(); + let rhs = vec![ + uref_2.with_access_rights(AccessRights::READ_ADD_WRITE), + uref_1.with_access_rights(AccessRights::READ_ADD), + uref_3.with_access_rights(AccessRights::READ_ADD_WRITE), + ]; + assert_eq!(lhs, rhs); + + let arg = args.get("uref4").unwrap().clone(); + let lhs = arg.into_t::>().unwrap(); + let rhs = vec![ + Key::from(uref_3.with_access_rights(AccessRights::READ_ADD_WRITE)), + Key::from(uref_2.with_access_rights(AccessRights::READ_ADD_WRITE)), + Key::from(uref_1.with_access_rights(AccessRights::READ_ADD)), + ]; + assert_eq!(lhs, rhs); + } +} diff --git a/execution_engine/src/runtime/wasm_prep.rs b/execution_engine/src/runtime/wasm_prep.rs new file mode 100644 index 0000000000..773e6a88e8 --- /dev/null +++ b/execution_engine/src/runtime/wasm_prep.rs @@ -0,0 +1,1246 @@ +//! Preprocessing of Wasm modules. +use std::{convert::TryInto, num::NonZeroU32}; + +use thiserror::Error; + +use casper_types::{OpcodeCosts, WasmConfig}; +use casper_wasm::elements::{ + self, External, Instruction, Internal, MemorySection, Module, Section, SignExtInstruction, + TableType, Type, +}; +use casper_wasm_utils::{ + self, + rules::{MemoryGrowCost, Rules}, + stack_height, +}; + +use crate::execution::ExecError; + +const ATOMIC_OPCODE_PREFIX: u8 = 0xfe; +const BULK_OPCODE_PREFIX: u8 = 0xfc; +const SIMD_OPCODE_PREFIX: u8 = 0xfd; + +const DEFAULT_GAS_MODULE_NAME: &str = "env"; +/// Name of the internal gas function injected by [`casper_wasm_utils::inject_gas_counter`]. +const INTERNAL_GAS_FUNCTION_NAME: &str = "gas"; + +/// We only allow maximum of 4k function pointers in a table section. +pub const DEFAULT_MAX_TABLE_SIZE: u32 = 4096; +/// Maximum number of elements that can appear as immediate value to the br_table instruction. +pub const DEFAULT_BR_TABLE_MAX_SIZE: u32 = 256; +/// Maximum number of global a module is allowed to declare. +pub const DEFAULT_MAX_GLOBALS: u32 = 256; +/// Maximum number of parameters a function can have. +pub const DEFAULT_MAX_PARAMETER_COUNT: u32 = 256; + +/// An error emitted by the Wasm preprocessor. +#[derive(Debug, Clone, Error)] +#[non_exhaustive] +pub enum WasmValidationError { + /// Initial table size outside allowed bounds. + #[error("initial table size of {actual} exceeds allowed limit of {max}")] + InitialTableSizeExceeded { + /// Allowed maximum table size. + max: u32, + /// Actual initial table size specified in the Wasm. + actual: u32, + }, + /// Maximum table size outside allowed bounds. + #[error("maximum table size of {actual} exceeds allowed limit of {max}")] + MaxTableSizeExceeded { + /// Allowed maximum table size. + max: u32, + /// Actual max table size specified in the Wasm. + actual: u32, + }, + /// Number of the tables in a Wasm must be at most one. + #[error("the number of tables must be at most one")] + MoreThanOneTable, + /// Length of a br_table exceeded the maximum allowed size. + #[error("maximum br_table size of {actual} exceeds allowed limit of {max}")] + BrTableSizeExceeded { + /// Maximum allowed br_table length. + max: u32, + /// Actual size of a br_table in the code. + actual: usize, + }, + /// Declared number of globals exceeds allowed limit. + #[error("declared number of globals ({actual}) exceeds allowed limit of {max}")] + TooManyGlobals { + /// Maximum allowed globals. + max: u32, + /// Actual number of globals declared in the Wasm. + actual: usize, + }, + /// Module declares a function type with too many parameters. + #[error("use of a function type with too many parameters (limit of {max} but function declares {actual})")] + TooManyParameters { + /// Maximum allowed parameters. + max: u32, + /// Actual number of parameters a function has in the Wasm. + actual: usize, + }, + /// Module tries to import a function that the host does not provide. + #[error("module imports a non-existent function")] + MissingHostFunction, + /// Opcode for a global access refers to a non-existing global + #[error("opcode for a global access refers to non-existing global index {index}")] + IncorrectGlobalOperation { + /// Provided index. + index: u32, + }, + /// Missing function index. + #[error("missing function index {index}")] + MissingFunctionIndex { + /// Provided index. + index: u32, + }, + /// Missing function type. + #[error("missing type index {index}")] + MissingFunctionType { + /// Provided index. + index: u32, + }, +} + +/// An error emitted by the Wasm preprocessor. +#[derive(Debug, Clone, Error)] +#[non_exhaustive] +pub enum PreprocessingError { + /// Unable to deserialize Wasm bytes. + #[error("Deserialization error: {0}")] + Deserialize(String), + /// Found opcodes forbidden by gas rules. + #[error( + "Encountered operation forbidden by gas rules. Consult instruction -> metering config map" + )] + OperationForbiddenByGasRules, + /// Stack limiter was unable to instrument the binary. + #[error("Stack limiter error")] + StackLimiter, + /// Wasm bytes is missing memory section. + #[error("Memory section should exist")] + MissingMemorySection, + /// The module is missing. + #[error("Missing module")] + MissingModule, + /// Unable to validate wasm bytes. + #[error("Wasm validation error: {0}")] + WasmValidation(#[from] WasmValidationError), +} + +impl From for PreprocessingError { + fn from(error: elements::Error) -> Self { + PreprocessingError::Deserialize(error.to_string()) + } +} + +/// Ensures that all the references to functions and global variables in the wasm bytecode are +/// properly declared. +/// +/// This validates that: +/// +/// - Start function points to a function declared in the Wasm bytecode +/// - All exported functions are pointing to functions declared in the Wasm bytecode +/// - `call` instructions reference a function declared in the Wasm bytecode. +/// - `global.set`, `global.get` instructions are referencing an existing global declared in the +/// Wasm bytecode. +/// - All members of the "elem" section point at functions declared in the Wasm bytecode. +fn ensure_valid_access(module: &Module) -> Result<(), WasmValidationError> { + let function_types_count = module + .type_section() + .map(|ts| ts.types().len()) + .unwrap_or_default(); + + let mut function_count = 0_u32; + if let Some(import_section) = module.import_section() { + for import_entry in import_section.entries() { + if let External::Function(function_type_index) = import_entry.external() { + if (*function_type_index as usize) < function_types_count { + function_count = function_count.saturating_add(1); + } else { + return Err(WasmValidationError::MissingFunctionType { + index: *function_type_index, + }); + } + } + } + } + if let Some(function_section) = module.function_section() { + for function_entry in function_section.entries() { + let function_type_index = function_entry.type_ref(); + if (function_type_index as usize) < function_types_count { + function_count = function_count.saturating_add(1); + } else { + return Err(WasmValidationError::MissingFunctionType { + index: function_type_index, + }); + } + } + } + + if let Some(function_index) = module.start_section() { + ensure_valid_function_index(function_index, function_count)?; + } + if let Some(export_section) = module.export_section() { + for export_entry in export_section.entries() { + if let Internal::Function(function_index) = export_entry.internal() { + ensure_valid_function_index(*function_index, function_count)?; + } + } + } + + if let Some(code_section) = module.code_section() { + let global_len = module + .global_section() + .map(|global_section| global_section.entries().len()) + .unwrap_or(0); + + for instr in code_section + .bodies() + .iter() + .flat_map(|body| body.code().elements()) + { + match instr { + Instruction::Call(idx) => { + ensure_valid_function_index(*idx, function_count)?; + } + Instruction::GetGlobal(idx) | Instruction::SetGlobal(idx) + if *idx as usize >= global_len => + { + return Err(WasmValidationError::IncorrectGlobalOperation { index: *idx }); + } + _ => {} + } + } + } + + if let Some(element_section) = module.elements_section() { + for element_segment in element_section.entries() { + for idx in element_segment.members() { + ensure_valid_function_index(*idx, function_count)?; + } + } + } + + Ok(()) +} + +fn ensure_valid_function_index(index: u32, function_count: u32) -> Result<(), WasmValidationError> { + if index >= function_count { + return Err(WasmValidationError::MissingFunctionIndex { index }); + } + Ok(()) +} + +/// Checks if given wasm module contains a non-empty memory section. +fn memory_section(module: &Module) -> Option<&MemorySection> { + for section in module.sections() { + if let Section::Memory(section) = section { + return if section.entries().is_empty() { + None + } else { + Some(section) + }; + } + } + None +} + +/// Ensures (table) section has at most one table entry, and initial, and maximum values are +/// normalized. +/// +/// If a maximum value is not specified it will be defaulted to 4k to prevent OOM. +fn ensure_table_size_limit(mut module: Module, limit: u32) -> Result { + if let Some(sect) = module.table_section_mut() { + // Table section is optional and there can be at most one. + if sect.entries().len() > 1 { + return Err(WasmValidationError::MoreThanOneTable); + } + + if let Some(table_entry) = sect.entries_mut().first_mut() { + let initial = table_entry.limits().initial(); + if initial > limit { + return Err(WasmValidationError::InitialTableSizeExceeded { + max: limit, + actual: initial, + }); + } + + match table_entry.limits().maximum() { + Some(max) => { + if max > limit { + return Err(WasmValidationError::MaxTableSizeExceeded { + max: limit, + actual: max, + }); + } + } + None => { + // rewrite wasm and provide a maximum limit for a table section + *table_entry = TableType::new(initial, Some(limit)) + } + } + } + } + + Ok(module) +} + +/// Ensure that any `br_table` instruction adheres to its immediate value limit. +fn ensure_br_table_size_limit(module: &Module, limit: u32) -> Result<(), WasmValidationError> { + let code_section = if let Some(type_section) = module.code_section() { + type_section + } else { + return Ok(()); + }; + for instr in code_section + .bodies() + .iter() + .flat_map(|body| body.code().elements()) + { + if let Instruction::BrTable(br_table_data) = instr { + if br_table_data.table.len() > limit as usize { + return Err(WasmValidationError::BrTableSizeExceeded { + max: limit, + actual: br_table_data.table.len(), + }); + } + } + } + Ok(()) +} + +/// Ensures that module doesn't declare too many globals. +/// +/// Globals are not limited through the `stack_height` as locals are. Neither does +/// the linear memory limit `memory_pages` applies to them. +fn ensure_global_variable_limit(module: &Module, limit: u32) -> Result<(), WasmValidationError> { + if let Some(global_section) = module.global_section() { + let actual = global_section.entries().len(); + if actual > limit as usize { + return Err(WasmValidationError::TooManyGlobals { max: limit, actual }); + } + } + Ok(()) +} + +/// Ensure maximum numbers of parameters a function can have. +/// +/// Those need to be limited to prevent a potentially exploitable interaction with +/// the stack height instrumentation: The costs of executing the stack height +/// instrumentation for an indirectly called function scales linearly with the amount +/// of parameters of this function. Because the stack height instrumentation itself is +/// is not weight metered its costs must be static (via this limit) and included in +/// the costs of the instructions that cause them (call, call_indirect). +fn ensure_parameter_limit(module: &Module, limit: u32) -> Result<(), WasmValidationError> { + let type_section = if let Some(type_section) = module.type_section() { + type_section + } else { + return Ok(()); + }; + + for Type::Function(func) in type_section.types() { + let actual = func.params().len(); + if actual > limit as usize { + return Err(WasmValidationError::TooManyParameters { max: limit, actual }); + } + } + + Ok(()) +} + +/// Ensures that Wasm module has valid imports. +fn ensure_valid_imports(module: &Module) -> Result<(), WasmValidationError> { + let import_entries = module + .import_section() + .map(|is| is.entries()) + .unwrap_or(&[]); + + // Gas counter is currently considered an implementation detail. + // + // If a wasm module tries to import it will be rejected. + + for import in import_entries { + if import.module() == DEFAULT_GAS_MODULE_NAME + && import.field() == INTERNAL_GAS_FUNCTION_NAME + { + return Err(WasmValidationError::MissingHostFunction); + } + } + + Ok(()) +} + +/// Preprocesses Wasm bytes and returns a module. +/// +/// This process consists of a few steps: +/// - Validate that the given bytes contain a memory section, and check the memory page limit. +/// - Inject gas counters into the code, which makes it possible for the executed Wasm to be charged +/// for opcodes; this also validates opcodes and ensures that there are no forbidden opcodes in +/// use, such as floating point opcodes. +/// - Ensure that the code has a maximum stack height. +/// +/// In case the preprocessing rules can't be applied, an error is returned. +/// Otherwise, this method returns a valid module ready to be executed safely on the host. +pub fn preprocess( + wasm_config: WasmConfig, + module_bytes: &[u8], +) -> Result { + let module = deserialize(module_bytes)?; + + ensure_valid_access(&module)?; + + if memory_section(&module).is_none() { + // `casper_wasm_utils::externalize_mem` expects a non-empty memory section to exist in the + // module, and panics otherwise. + return Err(PreprocessingError::MissingMemorySection); + } + + let module = ensure_table_size_limit(module, DEFAULT_MAX_TABLE_SIZE)?; + ensure_br_table_size_limit(&module, DEFAULT_BR_TABLE_MAX_SIZE)?; + ensure_global_variable_limit(&module, DEFAULT_MAX_GLOBALS)?; + ensure_parameter_limit(&module, DEFAULT_MAX_PARAMETER_COUNT)?; + ensure_valid_imports(&module)?; + + let costs = RuledOpcodeCosts(wasm_config.v1().opcode_costs()); + let module = casper_wasm_utils::externalize_mem(module, None, wasm_config.v1().max_memory()); + let module = casper_wasm_utils::inject_gas_counter(module, &costs, DEFAULT_GAS_MODULE_NAME) + .map_err(|_| PreprocessingError::OperationForbiddenByGasRules)?; + let module = stack_height::inject_limiter(module, wasm_config.v1().max_stack_height()) + .map_err(|_| PreprocessingError::StackLimiter)?; + Ok(module) +} + +/// Returns a parity Module from the given bytes without making modifications or checking limits. +pub fn deserialize(module_bytes: &[u8]) -> Result { + casper_wasm::deserialize_buffer::(module_bytes).map_err(|deserialize_error| { + match deserialize_error { + casper_wasm::SerializationError::UnknownOpcode(BULK_OPCODE_PREFIX) => { + PreprocessingError::Deserialize( + "Bulk memory operations are not supported".to_string(), + ) + } + casper_wasm::SerializationError::UnknownOpcode(SIMD_OPCODE_PREFIX) => { + PreprocessingError::Deserialize("SIMD operations are not supported".to_string()) + } + casper_wasm::SerializationError::UnknownOpcode(ATOMIC_OPCODE_PREFIX) => { + PreprocessingError::Deserialize("Atomic operations are not supported".to_string()) + } + casper_wasm::SerializationError::UnknownOpcode(_) => { + PreprocessingError::Deserialize("Encountered an unsupported operation".to_string()) + } + casper_wasm::SerializationError::Other( + "Enable the multi_value feature to deserialize more than one function result", + ) => { + // Due to the way casper-wasm crate works, it's always deserializes opcodes + // from multi_value proposal but if the feature is not enabled, then it will + // error with very specific message (as compared to other extensions). + // + // That's OK since we'd prefer to not inspect deserialized bytecode. We + // can simply replace the error message with a more user friendly one. + PreprocessingError::Deserialize( + "Multi value extension is not supported".to_string(), + ) + } + _ => deserialize_error.into(), + } + }) +} + +/// Creates new wasm module from entry points. +pub fn get_module_from_entry_points( + entry_point_names: Vec<&str>, + mut module: Module, +) -> Result, ExecError> { + let export_section = module + .export_section() + .ok_or_else(|| ExecError::FunctionNotFound(String::from("Missing Export Section")))?; + + let maybe_missing_name: Option = entry_point_names + .iter() + .find(|name| { + !export_section + .entries() + .iter() + .any(|export_entry| export_entry.field() == **name) + }) + .map(|s| String::from(*s)); + + match maybe_missing_name { + Some(missing_name) => Err(ExecError::FunctionNotFound(missing_name)), + None => { + casper_wasm_utils::optimize(&mut module, entry_point_names)?; + casper_wasm::serialize(module).map_err(ExecError::ParityWasm) + } + } +} + +/// Returns the cost of executing a single instruction. +/// +/// This is benchmarked on a reference hardware, and calculated based on the multiplies of the +/// cheapest opcode (nop) in the given instruction. +/// +/// For instance, nop will always have cycle cost of 1, and all other opcodes will have a multiple +/// of that. +/// +/// The number of cycles for each instruction correlates, but not directly, to the reference x86_64 +/// CPU cycles it takes to execute the instruction as the interpreter does extra work to invoke an +/// instruction. +pub fn cycles_for_instruction(instruction: &Instruction) -> u32 { + match instruction { + // The following instructions signal the beginning of a block, loop, or if construct. They + // don't have any static cost. Validated in benchmarks. + Instruction::Loop(_) => 1, + Instruction::Block(_) => 1, + Instruction::Else => 1, + Instruction::End => 1, + + Instruction::Unreachable => 1, + Instruction::Nop => 1, + + Instruction::If(_) => 3, + + // These instructions are resuming execution from previously saved location (produced by + // loop or block). + Instruction::Br(_) => 1, + Instruction::BrIf(_) => 3, + Instruction::BrTable(_) => 5, + + Instruction::Return => 1, + + // Call opcodes are charged for each of the opcode individually. Validated in benchmarks. + Instruction::Call(_) => 22, + Instruction::CallIndirect(_, _) => 27, + + Instruction::Drop => 1, + + // Select opcode is validated in benchmarks. + Instruction::Select => 11, + + Instruction::GetLocal(_) | Instruction::SetLocal(_) | Instruction::TeeLocal(_) => 5, + + Instruction::GetGlobal(_) => 7, + Instruction::SetGlobal(_) => 5, + + Instruction::I64Load32S(_, _) + | Instruction::F32Load(_, _) + | Instruction::F64Load(_, _) + | Instruction::I32Load(_, _) + | Instruction::I64Load(_, _) + | Instruction::I32Load8S(_, _) + | Instruction::I64Load32U(_, _) + | Instruction::I64Load8U(_, _) + | Instruction::I64Load8S(_, _) + | Instruction::I32Load8U(_, _) + | Instruction::I64Load16U(_, _) + | Instruction::I32Load16U(_, _) + | Instruction::I64Load16S(_, _) + | Instruction::I32Load16S(_, _) => 8, + + Instruction::I32Store(_, _) + | Instruction::I64Store(_, _) + | Instruction::F32Store(_, _) + | Instruction::F64Store(_, _) + | Instruction::I32Store8(_, _) + | Instruction::I32Store16(_, _) + | Instruction::I64Store8(_, _) + | Instruction::I64Store16(_, _) + | Instruction::I64Store32(_, _) => 4, + + Instruction::CurrentMemory(_) => 5, + Instruction::GrowMemory(_) => 5, + + Instruction::I32Const(_) + | Instruction::I64Const(_) + | Instruction::F32Const(_) + | Instruction::F64Const(_) => 5, + + Instruction::I32Eqz + | Instruction::I32Eq + | Instruction::I32Ne + | Instruction::I32LtS + | Instruction::I32LtU + | Instruction::I32GtS + | Instruction::I32GtU + | Instruction::I32LeS + | Instruction::I32LeU + | Instruction::I32GeS + | Instruction::I32GeU + | Instruction::I64Eqz + | Instruction::I64Eq + | Instruction::I64Ne + | Instruction::I64LtS + | Instruction::I64LtU + | Instruction::I64GtS + | Instruction::I64GtU + | Instruction::I64LeS + | Instruction::I64LeU + | Instruction::I64GeS + | Instruction::I64GeU => 5, + + Instruction::F32Eq + | Instruction::F32Ne + | Instruction::F32Lt + | Instruction::F32Gt + | Instruction::F32Le + | Instruction::F32Ge + | Instruction::F64Eq + | Instruction::F64Ne + | Instruction::F64Lt + | Instruction::F64Gt + | Instruction::F64Le + | Instruction::F64Ge => 5, + + Instruction::I32Clz | Instruction::I32Ctz | Instruction::I32Popcnt => 5, + + Instruction::I32Add | Instruction::I32Sub => 5, + + Instruction::I32Mul => 5, + + Instruction::I32DivS + | Instruction::I32DivU + | Instruction::I32RemS + | Instruction::I32RemU => 5, + + Instruction::I32And + | Instruction::I32Or + | Instruction::I32Xor + | Instruction::I32Shl + | Instruction::I32ShrS + | Instruction::I32ShrU + | Instruction::I32Rotl + | Instruction::I32Rotr + | Instruction::I64Clz + | Instruction::I64Ctz + | Instruction::I64Popcnt => 5, + + Instruction::I64Add | Instruction::I64Sub => 5, + Instruction::I64Mul => 5, + + Instruction::I64DivS + | Instruction::I64DivU + | Instruction::I64RemS + | Instruction::I64RemU => 5, + + Instruction::I64And + | Instruction::I64Or + | Instruction::I64Xor + | Instruction::I64Shl + | Instruction::I64ShrS + | Instruction::I64ShrU + | Instruction::I64Rotl + | Instruction::I64Rotr => 5, + + Instruction::F32Abs + | Instruction::F32Neg + | Instruction::F32Ceil + | Instruction::F32Floor + | Instruction::F32Trunc + | Instruction::F32Nearest + | Instruction::F32Sqrt + | Instruction::F32Add + | Instruction::F32Sub + | Instruction::F32Mul + | Instruction::F32Div + | Instruction::F32Min + | Instruction::F32Max + | Instruction::F32Copysign + | Instruction::F64Abs + | Instruction::F64Neg + | Instruction::F64Ceil + | Instruction::F64Floor + | Instruction::F64Trunc + | Instruction::F64Nearest + | Instruction::F64Sqrt + | Instruction::F64Add + | Instruction::F64Sub + | Instruction::F64Mul + | Instruction::F64Div + | Instruction::F64Min + | Instruction::F64Max + | Instruction::F64Copysign => 5, + + Instruction::I32WrapI64 | Instruction::I64ExtendSI32 | Instruction::I64ExtendUI32 => 5, + + Instruction::F32ConvertSI32 + | Instruction::F32ConvertUI32 + | Instruction::F32ConvertSI64 + | Instruction::F32ConvertUI64 + | Instruction::F32DemoteF64 + | Instruction::F64ConvertSI32 + | Instruction::F64ConvertUI32 + | Instruction::F64ConvertSI64 + | Instruction::F64ConvertUI64 + | Instruction::F64PromoteF32 => 5, + + // Unsupported reinterpretation operators for floats. + Instruction::I32ReinterpretF32 + | Instruction::I64ReinterpretF64 + | Instruction::F32ReinterpretI32 + | Instruction::F64ReinterpretI64 => 5, + + Instruction::SignExt(SignExtInstruction::I32Extend8S) + | Instruction::SignExt(SignExtInstruction::I32Extend16S) + | Instruction::SignExt(SignExtInstruction::I64Extend8S) + | Instruction::SignExt(SignExtInstruction::I64Extend16S) + | Instruction::SignExt(SignExtInstruction::I64Extend32S) => 5, + + Instruction::I32TruncUF32 | Instruction::I64TruncSF32 => 40, + + Instruction::I32TruncSF32 | Instruction::I64TruncUF32 => 42, + + Instruction::I32TruncSF64 + | Instruction::I32TruncUF64 + | Instruction::I64TruncUF64 + | Instruction::I64TruncSF64 => 195, + } +} + +struct RuledOpcodeCosts(OpcodeCosts); + +impl RuledOpcodeCosts { + /// Returns the cost multiplier of executing a single instruction. + fn instruction_cost_multiplier(&self, instruction: &Instruction) -> Option { + let costs = self.0; + + // Obtain the gas cost multiplier for the instruction. + match instruction { + Instruction::Unreachable => Some(costs.unreachable), + Instruction::Nop => Some(costs.nop), + + // Control flow class of opcodes is charged for each of the opcode individually. + Instruction::Block(_) => Some(costs.control_flow.block), + Instruction::Loop(_) => Some(costs.control_flow.op_loop), + Instruction::If(_) => Some(costs.control_flow.op_if), + Instruction::Else => Some(costs.control_flow.op_else), + Instruction::End => Some(costs.control_flow.end), + Instruction::Br(_) => Some(costs.control_flow.br), + Instruction::BrIf(_) => Some(costs.control_flow.br_if), + Instruction::BrTable(br_table_data) => { + // If we're unable to fit table size in `u32` to measure the cost, then such wasm + // would be rejected. This is unlikely scenario as we impose a limit + // for the amount of targets a `br_table` opcode can contain. + let br_table_size: u32 = br_table_data.table.len().try_into().ok()?; + + let br_table_cost = costs.control_flow.br_table.cost; + + let table_size_part = + br_table_size.checked_mul(costs.control_flow.br_table.size_multiplier)?; + + let br_table_cost = br_table_cost.checked_add(table_size_part)?; + Some(br_table_cost) + } + Instruction::Return => Some(costs.control_flow.op_return), + Instruction::Call(_) => Some(costs.control_flow.call), + Instruction::CallIndirect(_, _) => Some(costs.control_flow.call_indirect), + Instruction::Drop => Some(costs.control_flow.drop), + Instruction::Select => Some(costs.control_flow.select), + + Instruction::GetLocal(_) | Instruction::SetLocal(_) | Instruction::TeeLocal(_) => { + Some(costs.local) + } + Instruction::GetGlobal(_) | Instruction::SetGlobal(_) => Some(costs.global), + + Instruction::I32Load(_, _) + | Instruction::I64Load(_, _) + | Instruction::F32Load(_, _) + | Instruction::F64Load(_, _) + | Instruction::I32Load8S(_, _) + | Instruction::I32Load8U(_, _) + | Instruction::I32Load16S(_, _) + | Instruction::I32Load16U(_, _) + | Instruction::I64Load8S(_, _) + | Instruction::I64Load8U(_, _) + | Instruction::I64Load16S(_, _) + | Instruction::I64Load16U(_, _) + | Instruction::I64Load32S(_, _) + | Instruction::I64Load32U(_, _) => Some(costs.load), + + Instruction::I32Store(_, _) + | Instruction::I64Store(_, _) + | Instruction::F32Store(_, _) + | Instruction::F64Store(_, _) + | Instruction::I32Store8(_, _) + | Instruction::I32Store16(_, _) + | Instruction::I64Store8(_, _) + | Instruction::I64Store16(_, _) + | Instruction::I64Store32(_, _) => Some(costs.store), + + Instruction::CurrentMemory(_) => Some(costs.current_memory), + Instruction::GrowMemory(_) => Some(costs.grow_memory), + + Instruction::I32Const(_) | Instruction::I64Const(_) => Some(costs.op_const), + + Instruction::F32Const(_) | Instruction::F64Const(_) => None, // float_const + + Instruction::I32Eqz + | Instruction::I32Eq + | Instruction::I32Ne + | Instruction::I32LtS + | Instruction::I32LtU + | Instruction::I32GtS + | Instruction::I32GtU + | Instruction::I32LeS + | Instruction::I32LeU + | Instruction::I32GeS + | Instruction::I32GeU + | Instruction::I64Eqz + | Instruction::I64Eq + | Instruction::I64Ne + | Instruction::I64LtS + | Instruction::I64LtU + | Instruction::I64GtS + | Instruction::I64GtU + | Instruction::I64LeS + | Instruction::I64LeU + | Instruction::I64GeS + | Instruction::I64GeU => Some(costs.integer_comparison), + + Instruction::F32Eq + | Instruction::F32Ne + | Instruction::F32Lt + | Instruction::F32Gt + | Instruction::F32Le + | Instruction::F32Ge + | Instruction::F64Eq + | Instruction::F64Ne + | Instruction::F64Lt + | Instruction::F64Gt + | Instruction::F64Le + | Instruction::F64Ge => None, // Unsupported comparison operators for floats. + + Instruction::I32Clz | Instruction::I32Ctz | Instruction::I32Popcnt => Some(costs.bit), + + Instruction::I32Add | Instruction::I32Sub => Some(costs.add), + + Instruction::I32Mul => Some(costs.mul), + + Instruction::I32DivS + | Instruction::I32DivU + | Instruction::I32RemS + | Instruction::I32RemU => Some(costs.div), + + Instruction::I32And + | Instruction::I32Or + | Instruction::I32Xor + | Instruction::I32Shl + | Instruction::I32ShrS + | Instruction::I32ShrU + | Instruction::I32Rotl + | Instruction::I32Rotr + | Instruction::I64Clz + | Instruction::I64Ctz + | Instruction::I64Popcnt => Some(costs.bit), + + Instruction::I64Add | Instruction::I64Sub => Some(costs.add), + Instruction::I64Mul => Some(costs.mul), + + Instruction::I64DivS + | Instruction::I64DivU + | Instruction::I64RemS + | Instruction::I64RemU => Some(costs.div), + + Instruction::I64And + | Instruction::I64Or + | Instruction::I64Xor + | Instruction::I64Shl + | Instruction::I64ShrS + | Instruction::I64ShrU + | Instruction::I64Rotl + | Instruction::I64Rotr => Some(costs.bit), + + Instruction::F32Abs + | Instruction::F32Neg + | Instruction::F32Ceil + | Instruction::F32Floor + | Instruction::F32Trunc + | Instruction::F32Nearest + | Instruction::F32Sqrt + | Instruction::F32Add + | Instruction::F32Sub + | Instruction::F32Mul + | Instruction::F32Div + | Instruction::F32Min + | Instruction::F32Max + | Instruction::F32Copysign + | Instruction::F64Abs + | Instruction::F64Neg + | Instruction::F64Ceil + | Instruction::F64Floor + | Instruction::F64Trunc + | Instruction::F64Nearest + | Instruction::F64Sqrt + | Instruction::F64Add + | Instruction::F64Sub + | Instruction::F64Mul + | Instruction::F64Div + | Instruction::F64Min + | Instruction::F64Max + | Instruction::F64Copysign => None, // Unsupported math operators for floats. + + Instruction::I32WrapI64 | Instruction::I64ExtendSI32 | Instruction::I64ExtendUI32 => { + Some(costs.conversion) + } + + Instruction::I32TruncSF32 + | Instruction::I32TruncUF32 + | Instruction::I32TruncSF64 + | Instruction::I32TruncUF64 + | Instruction::I64TruncSF32 + | Instruction::I64TruncUF32 + | Instruction::I64TruncSF64 + | Instruction::I64TruncUF64 + | Instruction::F32ConvertSI32 + | Instruction::F32ConvertUI32 + | Instruction::F32ConvertSI64 + | Instruction::F32ConvertUI64 + | Instruction::F32DemoteF64 + | Instruction::F64ConvertSI32 + | Instruction::F64ConvertUI32 + | Instruction::F64ConvertSI64 + | Instruction::F64ConvertUI64 + | Instruction::F64PromoteF32 => None, // Unsupported conversion operators for floats. + + // Unsupported reinterpretation operators for floats. + Instruction::I32ReinterpretF32 + | Instruction::I64ReinterpretF64 + | Instruction::F32ReinterpretI32 + | Instruction::F64ReinterpretI64 => None, + + Instruction::SignExt(_) => Some(costs.sign), + } + } +} + +impl Rules for RuledOpcodeCosts { + fn instruction_cost(&self, instruction: &Instruction) -> Option { + // The number of cycles for each instruction correlates, but not directly, to the reference + // x86_64 CPU cycles. + let cycles = cycles_for_instruction(instruction); + + // The cost of executing an instruction is the number of cycles times the cost of a nop. + let multiplier = self.instruction_cost_multiplier(instruction)?; + + cycles.checked_mul(multiplier) + } + + fn memory_grow_cost(&self) -> Option { + NonZeroU32::new(self.0.grow_memory).map(MemoryGrowCost::Linear) + } +} + +#[cfg(test)] +mod tests { + use casper_types::addressable_entity::DEFAULT_ENTRY_POINT_NAME; + use casper_wasm::{ + builder, + elements::{CodeSection, Instructions}, + }; + use walrus::{FunctionBuilder, ModuleConfig, ValType}; + + use super::*; + + #[test] + fn should_not_panic_on_empty_memory() { + // These bytes were generated during fuzz testing and are compiled from Wasm which + // deserializes to a `Module` with a memory section containing no entries. + const MODULE_BYTES_WITH_EMPTY_MEMORY: [u8; 61] = [ + 0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x09, 0x02, 0x60, 0x01, 0x7f, + 0x01, 0x7f, 0x60, 0x00, 0x00, 0x03, 0x03, 0x02, 0x00, 0x01, 0x05, 0x01, 0x00, 0x08, + 0x01, 0x01, 0x0a, 0x1d, 0x02, 0x18, 0x00, 0x20, 0x00, 0x41, 0x80, 0x80, 0x82, 0x80, + 0x78, 0x70, 0x41, 0x80, 0x82, 0x80, 0x80, 0x7e, 0x4f, 0x22, 0x00, 0x1a, 0x20, 0x00, + 0x0f, 0x0b, 0x02, 0x00, 0x0b, + ]; + + match preprocess(WasmConfig::default(), &MODULE_BYTES_WITH_EMPTY_MEMORY).unwrap_err() { + PreprocessingError::MissingMemorySection => (), + error => panic!("expected MissingMemorySection, got {:?}", error), + } + } + + #[test] + fn should_not_overflow_in_export_section() { + let module = builder::module() + .function() + .signature() + .build() + .body() + .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End])) + .build() + .build() + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .internal() + .func(u32::MAX) + .build() + // Memory section is mandatory + .memory() + .build() + .build(); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!( + &error, + PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index }) + if *missing_index == u32::MAX + ), + "{:?}", + error, + ); + } + + #[test] + fn should_not_overflow_in_element_section() { + const CALL_FN_IDX: u32 = 0; + + let module = builder::module() + .function() + .signature() + .build() + .body() + .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End])) + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .internal() + .func(CALL_FN_IDX) + .build() + .table() + .with_element(u32::MAX, vec![u32::MAX]) + .build() + // Memory section is mandatory + .memory() + .build() + .build(); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!( + &error, + PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index }) + if *missing_index == u32::MAX + ), + "{:?}", + error, + ); + } + + #[test] + fn should_not_overflow_in_call_opcode() { + let module = builder::module() + .function() + .signature() + .build() + .body() + .with_instructions(Instructions::new(vec![ + Instruction::Call(u32::MAX), + Instruction::End, + ])) + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .build() + // .with_sections(vec![Section::Start(u32::MAX)]) + // Memory section is mandatory + .memory() + .build() + .build(); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!( + &error, + PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index }) + if *missing_index == u32::MAX + ), + "{:?}", + error, + ); + } + + #[test] + fn should_not_overflow_in_start_section_without_code_section() { + let module = builder::module() + .with_section(Section::Start(u32::MAX)) + .memory() + .build() + .build(); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); + + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!( + &error, + PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index }) + if *missing_index == u32::MAX + ), + "{:?}", + error, + ); + } + + #[test] + fn should_not_overflow_in_start_section_with_code() { + let module = builder::module() + .with_section(Section::Start(u32::MAX)) + .with_section(Section::Code(CodeSection::with_bodies(Vec::new()))) + .memory() + .build() + .build(); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!( + &error, + PreprocessingError::WasmValidation(WasmValidationError::MissingFunctionIndex { index: missing_index }) + if *missing_index == u32::MAX + ), + "{:?}", + error, + ); + } + + #[test] + fn should_not_accept_multi_value_proposal_wasm() { + let module_bytes = { + let mut module = walrus::Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut func_with_locals = + FunctionBuilder::new(&mut module.types, &[], &[ValType::I32, ValType::I64]); + + func_with_locals.func_body().i64_const(0).i32_const(1); + + let func_with_locals = func_with_locals.finish(vec![], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + + call_func.func_body().call(func_with_locals); + + let call = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call); + + module.emit_wasm() + }; + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!(&error, PreprocessingError::Deserialize(msg) + if msg == "Multi value extension is not supported"), + "{:?}", + error, + ); + } + + #[test] + fn should_not_accept_atomics_proposal_wasm() { + let module_bytes = { + let mut module = walrus::Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut func_with_atomics = FunctionBuilder::new(&mut module.types, &[], &[]); + + func_with_atomics.func_body().atomic_fence(); + + let func_with_atomics = func_with_atomics.finish(vec![], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + + call_func.func_body().call(func_with_atomics); + + let call = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call); + + module.emit_wasm() + }; + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!(&error, PreprocessingError::Deserialize(msg) + if msg == "Atomic operations are not supported"), + "{:?}", + error, + ); + } + + #[test] + fn should_not_accept_bulk_proposal_wasm() { + let module_bytes = { + let mut module = walrus::Module::with_config(ModuleConfig::new()); + + let memory_id = module.memories.add_local(false, 11, None); + + let mut func_with_bulk = FunctionBuilder::new(&mut module.types, &[], &[]); + + func_with_bulk.func_body().memory_copy(memory_id, memory_id); + + let func_with_bulk = func_with_bulk.finish(vec![], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + + call_func.func_body().call(func_with_bulk); + + let call = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call); + + module.emit_wasm() + }; + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!(&error, PreprocessingError::Deserialize(msg) + if msg == "Bulk memory operations are not supported"), + "{:?}", + error, + ); + } + + #[test] + fn should_not_accept_simd_proposal_wasm() { + let module_bytes = { + let mut module = walrus::Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut func_with_simd = FunctionBuilder::new(&mut module.types, &[], &[]); + + func_with_simd.func_body().v128_bitselect(); + + let func_with_simd = func_with_simd.finish(vec![], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + + call_func.func_body().call(func_with_simd); + + let call = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call); + + module.emit_wasm() + }; + let error = preprocess(WasmConfig::default(), &module_bytes) + .expect_err("should fail with an error"); + assert!( + matches!(&error, PreprocessingError::Deserialize(msg) + if msg == "SIMD operations are not supported"), + "{:?}", + error, + ); + } +} diff --git a/execution_engine/src/runtime_context/mod.rs b/execution_engine/src/runtime_context/mod.rs new file mode 100644 index 0000000000..301432debf --- /dev/null +++ b/execution_engine/src/runtime_context/mod.rs @@ -0,0 +1,1650 @@ +//! The context of execution of WASM code. + +#[cfg(test)] +mod tests; + +use std::{ + cell::RefCell, + collections::BTreeSet, + convert::{TryFrom, TryInto}, + fmt::Debug, + rc::Rc, +}; + +use tracing::error; + +use casper_storage::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + tracking_copy::{ + AddResult, TrackingCopy, TrackingCopyCache, TrackingCopyEntityExt, TrackingCopyError, + TrackingCopyExt, + }, + AddressGenerator, +}; + +use casper_types::{ + account::{ + Account, AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, + UpdateKeyFailure, + }, + addressable_entity::{ + ActionType, EntityKindTag, MessageTopicError, MessageTopics, NamedKeyAddr, NamedKeyValue, + Weight, + }, + bytesrepr::ToBytes, + contract_messages::{Message, MessageAddr, MessageTopicSummary, Messages, TopicNameHash}, + contracts::{ContractHash, ContractPackage, ContractPackageHash, NamedKeys}, + execution::Effects, + handle_stored_dictionary_value, + system::auction::EraInfo, + AccessRights, AddressableEntity, AddressableEntityHash, BlockTime, CLType, CLValue, + CLValueDictionary, ContextAccessRights, Contract, EntityAddr, EntryPointAddr, EntryPointType, + EntryPointValue, EntryPoints, Gas, GrantedAccess, HashAddr, Key, KeyTag, Motes, Package, + PackageHash, Phase, ProtocolVersion, RuntimeArgs, RuntimeFootprint, StoredValue, + StoredValueTypeMismatch, SystemHashRegistry, TransactionHash, Transfer, URef, URefAddr, + DICTIONARY_ITEM_KEY_MAX_LENGTH, KEY_HASH_LENGTH, U512, +}; + +use crate::{ + engine_state::{BlockInfo, EngineConfig}, + execution::ExecError, +}; + +/// Number of bytes returned from the `random_bytes` function. +pub const RANDOM_BYTES_COUNT: usize = 32; + +/// Whether the execution is permitted to call FFI `casper_add_contract_version()` or not. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum AllowInstallUpgrade { + /// Allowed. + Allowed, + /// Forbidden. + Forbidden, +} + +/// Holds information specific to the deployed contract. +pub struct RuntimeContext<'a, R> { + tracking_copy: Rc>>, + // Enables look up of specific uref based on human-readable name + named_keys: &'a mut NamedKeys, + // Used to check uref is known before use (prevents forging urefs) + access_rights: ContextAccessRights, + args: RuntimeArgs, + authorization_keys: BTreeSet, + block_info: BlockInfo, + transaction_hash: TransactionHash, + gas_limit: Gas, + gas_counter: Gas, + address_generator: Rc>, + phase: Phase, + engine_config: EngineConfig, + entry_point_type: EntryPointType, + transfers: Vec, + remaining_spending_limit: U512, + + // Original account/contract for read only tasks taken before execution + runtime_footprint: Rc>, + // Key pointing to the account / contract / entity context this instance is tied to + context_key: Key, + account_hash: AccountHash, + emit_message_cost: U512, + allow_install_upgrade: AllowInstallUpgrade, + payment_purse: Option, +} + +impl<'a, R> RuntimeContext<'a, R> +where + R: StateReader, +{ + /// Creates new runtime context where we don't already have one. + /// + /// Where we already have a runtime context, consider using `new_from_self()`. + #[allow(clippy::too_many_arguments)] + pub fn new( + named_keys: &'a mut NamedKeys, + runtime_footprint: Rc>, + context_key: Key, + authorization_keys: BTreeSet, + access_rights: ContextAccessRights, + account_hash: AccountHash, + address_generator: Rc>, + tracking_copy: Rc>>, + engine_config: EngineConfig, + block_info: BlockInfo, + transaction_hash: TransactionHash, + phase: Phase, + args: RuntimeArgs, + gas_limit: Gas, + gas_counter: Gas, + transfers: Vec, + remaining_spending_limit: U512, + entry_point_type: EntryPointType, + allow_install_upgrade: AllowInstallUpgrade, + ) -> Self { + let emit_message_cost = (*engine_config.wasm_config().v1()) + .take_host_function_costs() + .emit_message + .cost() + .into(); + RuntimeContext { + tracking_copy, + entry_point_type, + named_keys, + access_rights, + args, + runtime_footprint, + context_key, + authorization_keys, + account_hash, + block_info, + transaction_hash, + gas_limit, + gas_counter, + address_generator, + phase, + engine_config, + transfers, + remaining_spending_limit, + emit_message_cost, + allow_install_upgrade, + payment_purse: None, + } + } + + /// Creates new runtime context cloning values from self. + #[allow(clippy::too_many_arguments)] + pub fn new_from_self( + &self, + context_key: Key, + entry_point_type: EntryPointType, + named_keys: &'a mut NamedKeys, + access_rights: ContextAccessRights, + runtime_args: RuntimeArgs, + ) -> Self { + let runtime_footprint = self.runtime_footprint.clone(); + let authorization_keys = self.authorization_keys.clone(); + let account_hash = self.account_hash; + + let address_generator = self.address_generator.clone(); + let tracking_copy = self.state(); + let engine_config = self.engine_config.clone(); + + let block_info = self.block_info; + let transaction_hash = self.transaction_hash; + let phase = self.phase; + + let gas_limit = self.gas_limit; + let gas_counter = self.gas_counter; + let remaining_spending_limit = self.remaining_spending_limit(); + + let transfers = self.transfers.clone(); + let payment_purse = self.payment_purse; + + RuntimeContext { + tracking_copy, + entry_point_type, + named_keys, + access_rights, + args: runtime_args, + runtime_footprint, + context_key, + authorization_keys, + account_hash, + block_info, + transaction_hash, + gas_limit, + gas_counter, + address_generator, + phase, + engine_config, + transfers, + remaining_spending_limit, + emit_message_cost: self.emit_message_cost, + allow_install_upgrade: self.allow_install_upgrade, + payment_purse, + } + } + + /// Returns all authorization keys for this deploy. + pub fn authorization_keys(&self) -> &BTreeSet { + &self.authorization_keys + } + + /// Returns a named key by a name if it exists. + pub fn named_keys_get(&self, name: &str) -> Option<&Key> { + self.named_keys.get(name) + } + + /// Returns named keys. + pub fn named_keys(&self) -> &NamedKeys { + self.named_keys + } + + /// Returns a mutable reference to named keys. + pub fn named_keys_mut(&mut self) -> &mut NamedKeys { + self.named_keys + } + + /// Checks if named keys contains a key referenced by name. + pub fn named_keys_contains_key(&self, name: &str) -> bool { + self.named_keys.contains(name) + } + + /// Returns the payment purse, if set. + pub fn maybe_payment_purse(&self) -> Option { + self.payment_purse + } + + /// Sets the payment purse to the imputed uref. + pub fn set_payment_purse(&mut self, uref: URef) { + self.payment_purse = Some(uref); + } + + /// Returns an instance of the engine config. + pub fn engine_config(&self) -> &EngineConfig { + &self.engine_config + } + + /// Helper function to avoid duplication in `remove_uref`. + fn remove_key_from_contract( + &mut self, + key: Key, + mut contract: Contract, + name: &str, + ) -> Result<(), ExecError> { + if contract.remove_named_key(name).is_none() { + return Ok(()); + } + self.metered_write_gs_unsafe(key, contract)?; + Ok(()) + } + + /// Helper function to avoid duplication in `remove_uref`. + fn remove_key_from_entity(&mut self, name: &str) -> Result<(), ExecError> { + let key = self.context_key; + match key { + Key::AddressableEntity(entity_addr) => { + let named_key = + NamedKeyAddr::new_from_string(entity_addr, name.to_string())?.into(); + if let Some(StoredValue::NamedKey(_)) = self.read_gs(&named_key)? { + self.prune_gs_unsafe(named_key); + } + } + account_hash @ Key::Account(_) => { + let account: Account = { + let mut account: Account = self.read_gs_typed(&account_hash)?; + account.named_keys_mut().remove(name); + account + }; + self.named_keys.remove(name); + let account_value = self.account_to_validated_value(account)?; + self.metered_write_gs_unsafe(account_hash, account_value)?; + } + contract_uref @ Key::URef(_) => { + let contract: Contract = { + let value: StoredValue = self + .tracking_copy + .borrow_mut() + .read(&contract_uref)? + .ok_or(ExecError::KeyNotFound(contract_uref))?; + + value.try_into().map_err(ExecError::TypeMismatch)? + }; + + self.named_keys.remove(name); + self.remove_key_from_contract(contract_uref, contract, name)? + } + contract_hash @ Key::Hash(_) => { + let contract: Contract = self.read_gs_typed(&contract_hash)?; + self.named_keys.remove(name); + self.remove_key_from_contract(contract_hash, contract, name)? + } + _ => return Err(ExecError::UnexpectedKeyVariant(key)), + } + Ok(()) + } + + /// Remove Key from the `named_keys` map of the current context. + /// It removes both from the ephemeral map (RuntimeContext::named_keys) but + /// also the to-be-persisted map (in the TrackingCopy/GlobalState). + pub fn remove_key(&mut self, name: &str) -> Result<(), ExecError> { + self.named_keys.remove(name); + self.remove_key_from_entity(name) + } + + /// Returns block info. + pub fn get_block_info(&self) -> BlockInfo { + self.block_info + } + + /// Returns the transaction hash. + pub fn get_transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Extends access rights with a new map. + pub fn access_rights_extend(&mut self, urefs: &[URef]) { + self.access_rights.extend(urefs); + } + + /// Returns a mapping of access rights for each [`URef`]s address. + pub fn access_rights(&self) -> &ContextAccessRights { + &self.access_rights + } + + /// Returns footprint of the caller. + pub fn runtime_footprint(&self) -> Rc> { + Rc::clone(&self.runtime_footprint) + } + + /// Returns arguments. + pub fn args(&self) -> &RuntimeArgs { + &self.args + } + + pub(crate) fn set_args(&mut self, args: RuntimeArgs) { + self.args = args + } + + /// Returns new shared instance of an address generator. + pub fn address_generator(&self) -> Rc> { + Rc::clone(&self.address_generator) + } + + /// Returns new shared instance of a tracking copy. + pub(super) fn state(&self) -> Rc>> { + Rc::clone(&self.tracking_copy) + } + + /// Returns the gas limit. + pub fn gas_limit(&self) -> Gas { + self.gas_limit + } + + /// Returns the current gas counter. + pub fn gas_counter(&self) -> Gas { + self.gas_counter + } + + /// Sets the gas counter to a new value. + pub fn set_gas_counter(&mut self, new_gas_counter: Gas) { + self.gas_counter = new_gas_counter; + } + + /// Returns the context key for this instance. + pub fn get_context_key(&self) -> Key { + self.context_key + } + + /// Returns the initiator of the call chain. + pub fn get_initiator(&self) -> AccountHash { + self.account_hash + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.block_info.protocol_version() + } + + /// Returns the current phase. + pub fn phase(&self) -> Phase { + self.phase + } + + /// Returns `true` if the execution is permitted to call `casper_add_contract_version()`. + pub fn install_upgrade_allowed(&self) -> bool { + self.allow_install_upgrade == AllowInstallUpgrade::Allowed + } + + /// Generates new deterministic hash for uses as an address. + pub fn new_hash_address(&mut self) -> Result<[u8; KEY_HASH_LENGTH], ExecError> { + Ok(self.address_generator.borrow_mut().new_hash_address()) + } + + /// Returns 32 pseudo random bytes. + pub fn random_bytes(&mut self) -> Result<[u8; RANDOM_BYTES_COUNT], ExecError> { + Ok(self.address_generator.borrow_mut().create_address()) + } + + /// Creates new [`URef`] instance. + pub fn new_uref(&mut self, value: StoredValue) -> Result { + let uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.insert_uref(uref); + self.metered_write_gs(Key::URef(uref), value)?; + Ok(uref) + } + + /// Creates a new URef where the value it stores is CLType::Unit. + pub(crate) fn new_unit_uref(&mut self) -> Result { + self.new_uref(StoredValue::CLValue(CLValue::unit())) + } + + /// Puts `key` to the map of named keys of current context. + pub fn put_key(&mut self, name: String, key: Key) -> Result<(), ExecError> { + // No need to perform actual validation on the base key because an account or contract (i.e. + // the element stored under `base_key`) is allowed to add new named keys to itself. + match self.get_context_key() { + Key::Account(_) | Key::Hash(_) => { + let named_key_value = StoredValue::CLValue(CLValue::from_t((name.clone(), key))?); + self.validate_value(&named_key_value)?; + self.metered_add_gs_unsafe(self.get_context_key(), named_key_value)?; + self.insert_named_key(name, key); + } + Key::AddressableEntity(entity_addr) => { + let named_key_value = + StoredValue::NamedKey(NamedKeyValue::from_concrete_values(key, name.clone())?); + self.validate_value(&named_key_value)?; + let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, name.clone())?; + self.metered_write_gs_unsafe(Key::NamedKey(named_key_addr), named_key_value)?; + self.insert_named_key(name, key); + } + _ => return Err(ExecError::InvalidContext), + } + + Ok(()) + } + + pub(crate) fn get_message_topics( + &mut self, + hash_addr: EntityAddr, + ) -> Result { + self.tracking_copy + .borrow_mut() + .get_message_topics(hash_addr) + .map_err(Into::into) + } + + pub(crate) fn get_named_keys(&mut self, entity_key: Key) -> Result { + let entity_addr = if let Key::AddressableEntity(entity_addr) = entity_key { + entity_addr + } else { + return Err(ExecError::UnexpectedKeyVariant(entity_key)); + }; + self.tracking_copy + .borrow_mut() + .get_named_keys(entity_addr) + .map_err(Into::into) + } + + pub(crate) fn write_entry_points( + &mut self, + entity_addr: EntityAddr, + entry_points: EntryPoints, + ) -> Result<(), ExecError> { + if entry_points.is_empty() { + return Ok(()); + } + + for entry_point in entry_points.take_entry_points() { + let entry_point_addr = + EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name())?; + let entry_point_value = + StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)); + self.metered_write_gs_unsafe(Key::EntryPoint(entry_point_addr), entry_point_value)?; + } + + Ok(()) + } + + pub(crate) fn get_casper_vm_v1_entry_point( + &mut self, + entity_key: Key, + ) -> Result { + let entity_addr = if let Key::AddressableEntity(entity_addr) = entity_key { + entity_addr + } else { + return Err(ExecError::UnexpectedKeyVariant(entity_key)); + }; + + self.tracking_copy + .borrow_mut() + .get_v1_entry_points(entity_addr) + .map_err(Into::into) + } + + /// Reads the total balance of a purse [`URef`]. + /// + /// Currently address of a purse [`URef`] is also a hash in the [`Key::Hash`] space. + pub(crate) fn total_balance(&mut self, purse_uref: &URef) -> Result { + let key = Key::URef(*purse_uref); + let total = self + .tracking_copy + .borrow_mut() + .get_total_balance(key) + .map_err(ExecError::TrackingCopy)?; + Ok(total) + } + + /// Reads the available balance of a purse [`URef`]. + /// + /// Currently address of a purse [`URef`] is also a hash in the [`Key::Hash`] space. + pub(crate) fn available_balance(&mut self, purse_uref: &URef) -> Result { + let key = Key::URef(*purse_uref); + self.tracking_copy + .borrow_mut() + .get_available_balance(key) + .map_err(ExecError::TrackingCopy) + } + + /// Read a stored value under a [`Key`]. + pub fn read_gs(&mut self, key: &Key) -> Result, ExecError> { + self.validate_readable(key)?; + self.validate_key(key)?; + + let maybe_stored_value = self.tracking_copy.borrow_mut().read(key)?; + + let stored_value = match maybe_stored_value { + Some(stored_value) => handle_stored_dictionary_value(*key, stored_value)?, + None => return Ok(None), + }; + + Ok(Some(stored_value)) + } + + /// Reads a value from a global state directly. + /// + /// # Usage + /// + /// DO NOT EXPOSE THIS VIA THE FFI - This function bypasses security checks and should be used + /// with caution. + pub fn read_gs_unsafe(&mut self, key: &Key) -> Result, ExecError> { + self.tracking_copy + .borrow_mut() + .read(key) + .map_err(Into::into) + } + + /// This method is a wrapper over `read_gs` in the sense that it extracts the type held by a + /// `StoredValue` stored in the global state in a type safe manner. + /// + /// This is useful if you want to get the exact type from global state. + pub fn read_gs_typed(&mut self, key: &Key) -> Result + where + T: TryFrom, + T::Error: Debug, + { + let value = match self.read_gs(key)? { + None => return Err(ExecError::KeyNotFound(*key)), + Some(value) => value, + }; + + value + .try_into() + .map_err(|error| ExecError::TrackingCopy(TrackingCopyError::TypeMismatch(error))) + } + + /// Returns all keys based on the tag prefix. + pub fn get_keys(&mut self, key_tag: &KeyTag) -> Result, ExecError> { + self.tracking_copy + .borrow_mut() + .get_keys(key_tag) + .map_err(Into::into) + } + + /// Returns all key's that start with prefix, if any. + pub fn get_keys_with_prefix(&mut self, prefix: &[u8]) -> Result, ExecError> { + self.tracking_copy + .borrow_mut() + .reader() + .keys_with_prefix(prefix) + .map_err(Into::into) + } + + /// Write an era info instance to the global state. + pub fn write_era_info(&mut self, key: Key, value: EraInfo) { + if let Key::EraSummary = key { + // Writing an `EraInfo` for 100 validators will not exceed write size limit. + self.tracking_copy + .borrow_mut() + .write(key, StoredValue::EraInfo(value)); + } else { + panic!("Do not use this function for writing non-era-info keys") + } + } + + /// Creates validated instance of `StoredValue` from `account`. + fn account_to_validated_value(&self, account: Account) -> Result { + let value = StoredValue::Account(account); + self.validate_value(&value)?; + Ok(value) + } + + /// Write an account to the global state. + pub fn write_account(&mut self, key: Key, account: Account) -> Result<(), ExecError> { + if let Key::Account(_) = key { + self.validate_key(&key)?; + let account_value = self.account_to_validated_value(account)?; + self.metered_write_gs_unsafe(key, account_value)?; + Ok(()) + } else { + panic!("Do not use this function for writing non-account keys") + } + } + + /// Read an account from the global state. + pub fn read_account(&mut self, key: &Key) -> Result, ExecError> { + if let Key::Account(_) = key { + self.validate_key(key)?; + self.tracking_copy + .borrow_mut() + .read(key) + .map_err(Into::into) + } else { + panic!("Do not use this function for reading from non-account keys") + } + } + + /// Adds a named key. + /// + /// If given `Key` refers to an [`URef`] then it extends the runtime context's access rights + /// with the URef's access rights. + fn insert_named_key(&mut self, name: String, key: Key) { + if let Key::URef(uref) = key { + self.insert_uref(uref); + } + self.named_keys.insert(name, key); + } + + /// Adds a new [`URef`] into the context. + /// + /// Once an [`URef`] is inserted, it's considered a valid [`URef`] in this runtime context. + fn insert_uref(&mut self, uref: URef) { + self.access_rights.extend(&[uref]) + } + + /// Grants access to a [`URef`]; unless access was pre-existing. + pub fn grant_access(&mut self, uref: URef) -> GrantedAccess { + self.access_rights.grant_access(uref) + } + + /// Removes an access right from the current runtime context. + pub fn remove_access(&mut self, uref_addr: URefAddr, access_rights: AccessRights) { + self.access_rights.remove_access(uref_addr, access_rights) + } + + /// Returns a copy of the current effects of a tracking copy. + pub fn effects(&self) -> Effects { + self.tracking_copy.borrow().effects() + } + + /// Returns a copy of the current messages of a tracking copy. + pub fn messages(&self) -> Messages { + self.tracking_copy.borrow().messages() + } + + /// Returns a copy of the current named keys of a tracking copy. + pub fn cache(&self) -> TrackingCopyCache { + self.tracking_copy.borrow().cache() + } + + /// Returns the cost charged for the last emitted message. + pub fn emit_message_cost(&self) -> U512 { + self.emit_message_cost + } + + /// Sets the cost charged for the last emitted message. + pub fn set_emit_message_cost(&mut self, cost: U512) { + self.emit_message_cost = cost + } + + /// Returns list of transfers. + pub fn transfers(&self) -> &Vec { + &self.transfers + } + + /// Returns mutable list of transfers. + pub fn transfers_mut(&mut self) -> &mut Vec { + &mut self.transfers + } + + fn validate_cl_value(&self, cl_value: &CLValue) -> Result<(), ExecError> { + match cl_value.cl_type() { + CLType::Bool + | CLType::I32 + | CLType::I64 + | CLType::U8 + | CLType::U32 + | CLType::U64 + | CLType::U128 + | CLType::U256 + | CLType::U512 + | CLType::Unit + | CLType::String + | CLType::Option(_) + | CLType::List(_) + | CLType::ByteArray(..) + | CLType::Result { .. } + | CLType::Map { .. } + | CLType::Tuple1(_) + | CLType::Tuple3(_) + | CLType::Any + | CLType::PublicKey => Ok(()), + CLType::Key => { + let key: Key = cl_value.to_t()?; + self.validate_key(&key) + } + CLType::URef => { + let uref: URef = cl_value.to_t()?; + self.validate_uref(&uref) + } + tuple @ CLType::Tuple2(_) if *tuple == casper_types::named_key_type() => { + let (_name, key): (String, Key) = cl_value.to_t()?; + self.validate_key(&key) + } + CLType::Tuple2(_) => Ok(()), + } + } + + /// Validates whether keys used in the `value` are not forged. + pub(crate) fn validate_value(&self, value: &StoredValue) -> Result<(), ExecError> { + match value { + StoredValue::CLValue(cl_value) => self.validate_cl_value(cl_value), + StoredValue::NamedKey(named_key_value) => { + self.validate_cl_value(named_key_value.get_key_as_cl_value())?; + self.validate_cl_value(named_key_value.get_name_as_cl_value()) + } + StoredValue::Account(_) + | StoredValue::ByteCode(_) + | StoredValue::Contract(_) + | StoredValue::AddressableEntity(_) + | StoredValue::SmartContract(_) + | StoredValue::Transfer(_) + | StoredValue::DeployInfo(_) + | StoredValue::EraInfo(_) + | StoredValue::Bid(_) + | StoredValue::BidKind(_) + | StoredValue::Withdraw(_) + | StoredValue::Unbonding(_) + | StoredValue::ContractPackage(_) + | StoredValue::ContractWasm(_) + | StoredValue::MessageTopic(_) + | StoredValue::Message(_) + | StoredValue::Prepayment(_) + | StoredValue::EntryPoint(_) + | StoredValue::RawBytes(_) => Ok(()), + } + } + + pub(crate) fn context_key_to_entity_addr(&self) -> Result { + match self.context_key { + Key::Account(account_hash) => Ok(EntityAddr::Account(account_hash.value())), + Key::Hash(hash) => { + if self.is_system_addressable_entity(&hash)? { + Ok(EntityAddr::System(hash)) + } else { + Ok(EntityAddr::SmartContract(hash)) + } + } + Key::AddressableEntity(addr) => Ok(addr), + _ => Err(ExecError::UnexpectedKeyVariant(self.context_key)), + } + } + + /// Validates whether key is not forged (whether it can be found in the + /// `named_keys`) and whether the version of a key that contract wants + /// to use, has access rights that are less powerful than access rights' + /// of the key in the `named_keys`. + pub(crate) fn validate_key(&self, key: &Key) -> Result<(), ExecError> { + let uref = match key { + Key::URef(uref) => uref, + _ => return Ok(()), + }; + self.validate_uref(uref) + } + + /// Validate [`URef`] access rights. + /// + /// Returns unit if [`URef`]s address exists in the context, and has correct access rights bit + /// set. + pub(crate) fn validate_uref(&self, uref: &URef) -> Result<(), ExecError> { + if self.access_rights.has_access_rights_to_uref(uref) { + Ok(()) + } else { + Err(ExecError::ForgedReference(*uref)) + } + } + + /// Validates if a [`Key`] refers to a [`URef`] and has a read bit set. + fn validate_readable(&self, key: &Key) -> Result<(), ExecError> { + if self.is_readable(key) { + Ok(()) + } else { + Err(ExecError::InvalidAccess { + required: AccessRights::READ, + }) + } + } + + /// Validates if a [`Key`] refers to a [`URef`] and has a add bit set. + fn validate_addable(&self, key: &Key) -> Result<(), ExecError> { + if self.is_addable(key) { + Ok(()) + } else { + Err(ExecError::InvalidAccess { + required: AccessRights::ADD, + }) + } + } + + /// Validates if a [`Key`] refers to a [`URef`] and has a write bit set. + pub(crate) fn validate_writeable(&self, key: &Key) -> Result<(), ExecError> { + if self.is_writeable(key) { + Ok(()) + } else { + Err(ExecError::InvalidAccess { + required: AccessRights::WRITE, + }) + } + } + + /// Tests whether reading from the `key` is valid. + pub fn is_readable(&self, key: &Key) -> bool { + match self.context_key_to_entity_addr() { + Ok(entity_addr) => key.is_readable(&entity_addr), + Err(error) => { + error!(?error, "entity_key is unexpected key variant"); + panic!("is_readable: entity_key is unexpected key variant"); + } + } + } + + /// Tests whether addition to `key` is valid. + pub fn is_addable(&self, key: &Key) -> bool { + match self.context_key_to_entity_addr() { + Ok(entity_addr) => key.is_addable(&entity_addr), + Err(error) => { + error!(?error, "entity_key is unexpected key variant"); + panic!("is_addable: entity_key is unexpected key variant"); + } + } + } + + /// Tests whether writing to `key` is valid. + pub fn is_writeable(&self, key: &Key) -> bool { + match self.context_key_to_entity_addr() { + Ok(entity_addr) => key.is_writeable(&entity_addr), + Err(error) => { + error!(?error, "entity_key is unexpected key variant"); + panic!("is_writeable: entity_key is unexpected key variant"); + } + } + } + + /// Safely charge the specified amount of gas, up to the available gas limit. + /// + /// Returns [`Error::GasLimit`] if gas limit exceeded and `()` if not. + /// Intuition about the return value sense is to answer the question 'are we + /// allowed to continue?' + pub(crate) fn charge_gas(&mut self, gas: Gas) -> Result<(), ExecError> { + let prev = self.gas_counter(); + let gas_limit = self.gas_limit(); + // gas charge overflow protection + match prev.checked_add(gas) { + None => { + self.set_gas_counter(gas_limit); + Err(ExecError::GasLimit) + } + Some(val) if val > gas_limit => { + self.set_gas_counter(gas_limit); + Err(ExecError::GasLimit) + } + Some(val) => { + self.set_gas_counter(val); + Ok(()) + } + } + } + + /// Checks if we are calling a system addressable entity. + pub(crate) fn is_system_addressable_entity( + &self, + hash_addr: &HashAddr, + ) -> Result { + Ok(self.system_entity_registry()?.exists(hash_addr)) + } + + /// Charges gas for specified amount of bytes used. + fn charge_gas_storage(&mut self, bytes_count: usize) -> Result<(), ExecError> { + if let Some(hash_addr) = self.get_context_key().into_entity_hash_addr() { + if self.is_system_addressable_entity(&hash_addr)? { + // Don't charge storage used while executing a system contract. + return Ok(()); + } + } + + let storage_costs = self.engine_config.storage_costs(); + + let gas_cost = storage_costs.calculate_gas_cost(bytes_count); + + self.charge_gas(gas_cost) + } + + /// Charges gas for using a host system contract's entrypoint. + pub(crate) fn charge_system_contract_call(&mut self, call_cost: T) -> Result<(), ExecError> + where + T: Into, + { + let amount: Gas = call_cost.into(); + self.charge_gas(amount) + } + + /// Prune a key from the global state. + /// + /// Use with caution - there is no validation done as the key is assumed to be validated + /// already. + pub(crate) fn prune_gs_unsafe(&mut self, key: K) + where + K: Into, + { + self.tracking_copy.borrow_mut().prune(key.into()); + } + + pub(crate) fn migrate_package( + &mut self, + contract_package_hash: ContractPackageHash, + protocol_version: ProtocolVersion, + ) -> Result<(), ExecError> { + self.tracking_copy + .borrow_mut() + .migrate_package(Key::Hash(contract_package_hash.value()), protocol_version) + .map_err(ExecError::TrackingCopy) + } + + /// Writes data to global state with a measurement. + /// + /// Use with caution - there is no validation done as the key is assumed to be validated + /// already. + pub(crate) fn metered_write_gs_unsafe( + &mut self, + key: K, + value: V, + ) -> Result<(), ExecError> + where + K: Into, + V: Into, + { + let stored_value = value.into(); + + // Charge for amount as measured by serialized length + let bytes_count = stored_value.serialized_length(); + self.charge_gas_storage(bytes_count)?; + + self.tracking_copy + .borrow_mut() + .write(key.into(), stored_value); + Ok(()) + } + + /// Emits message and writes message summary to global state with a measurement. + pub(crate) fn metered_emit_message( + &mut self, + topic_key: Key, + block_time: BlockTime, + block_message_count: u64, + topic_message_count: u32, + message: Message, + ) -> Result<(), ExecError> { + let topic_value = StoredValue::MessageTopic(MessageTopicSummary::new( + topic_message_count, + block_time, + message.topic_name().to_owned(), + )); + let message_key = message.message_key(); + let message_value = StoredValue::Message(message.checksum().map_err(ExecError::BytesRepr)?); + + let block_message_count_value = + StoredValue::CLValue(CLValue::from_t((block_time, block_message_count))?); + + // Charge for amount as measured by serialized length + let bytes_count = topic_value.serialized_length() + + message_value.serialized_length() + + block_message_count_value.serialized_length(); + self.charge_gas_storage(bytes_count)?; + + self.tracking_copy.borrow_mut().emit_message( + topic_key, + topic_value, + message_key, + message_value, + block_message_count_value, + message, + ); + Ok(()) + } + + /// Writes data to a global state and charges for bytes stored. + /// + /// This method performs full validation of the key to be written. + pub(crate) fn metered_write_gs(&mut self, key: Key, value: T) -> Result<(), ExecError> + where + T: Into, + { + let stored_value = value.into(); + self.validate_writeable(&key)?; + self.validate_key(&key)?; + self.validate_value(&stored_value)?; + self.metered_write_gs_unsafe(key, stored_value) + } + + /// Adds data to a global state key and charges for bytes stored. + pub(crate) fn metered_add_gs_unsafe( + &mut self, + key: Key, + value: StoredValue, + ) -> Result<(), ExecError> { + let value_bytes_count = value.serialized_length(); + self.charge_gas_storage(value_bytes_count)?; + + match self.tracking_copy.borrow_mut().add(key, value) { + Err(storage_error) => Err(storage_error.into()), + Ok(AddResult::Success) => Ok(()), + Ok(AddResult::KeyNotFound(key)) => Err(ExecError::KeyNotFound(key)), + Ok(AddResult::TypeMismatch(type_mismatch)) => { + Err(ExecError::TypeMismatch(type_mismatch)) + } + Ok(AddResult::Serialization(error)) => Err(ExecError::BytesRepr(error)), + Ok(AddResult::Transform(error)) => Err(ExecError::Transform(error)), + } + } + + /// Adds `value` to the `key`. The premise for being able to `add` value is + /// that the type of it value can be added (is a Monoid). If the + /// values can't be added, either because they're not a Monoid or if the + /// value stored under `key` has different type, then `TypeMismatch` + /// errors is returned. + pub(crate) fn metered_add_gs(&mut self, key: K, value: V) -> Result<(), ExecError> + where + K: Into, + V: Into, + { + let key = key.into(); + let value = value.into(); + self.validate_addable(&key)?; + self.validate_key(&key)?; + self.validate_value(&value)?; + self.metered_add_gs_unsafe(key, value) + } + + /// Adds new associated key. + pub(crate) fn add_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), ExecError> { + let context_key = self.context_key; + let entity_addr = self.context_key_to_entity_addr()?; + + if EntryPointType::Caller == self.entry_point_type + && entity_addr.tag() != EntityKindTag::Account + { + // Exit early with error to avoid mutations + return Err(AddKeyFailure::PermissionDenied.into()); + } + + if self.engine_config.enable_entity { + // Get the current entity record + let entity = { + let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?; + // enforce max keys limit + if entity.associated_keys().len() + >= (self.engine_config.max_associated_keys() as usize) + { + return Err(ExecError::AddKeyFailure(AddKeyFailure::MaxKeysLimit)); + } + + // Exit early in case of error without updating global state + entity + .add_associated_key(account_hash, weight) + .map_err(ExecError::from)?; + entity + }; + + self.metered_write_gs_unsafe( + context_key, + self.addressable_entity_to_validated_value(entity)?, + )?; + } else { + // Take an account out of the global state + let account = { + let mut account: Account = self.read_gs_typed(&context_key)?; + + if account.associated_keys().len() as u32 + >= (self.engine_config.max_associated_keys()) + { + return Err(ExecError::AddKeyFailure(AddKeyFailure::MaxKeysLimit)); + } + + // Exit early in case of error without updating global state + let result = account.add_associated_key( + account_hash, + casper_types::account::Weight::new(weight.value()), + ); + + result.map_err(ExecError::from)?; + account + }; + + let account_value = self.account_to_validated_value(account)?; + + self.metered_write_gs_unsafe(context_key, account_value)?; + } + + Ok(()) + } + + /// Remove associated key. + pub(crate) fn remove_associated_key( + &mut self, + account_hash: AccountHash, + ) -> Result<(), ExecError> { + let context_key = self.context_key; + let entity_addr = self.context_key_to_entity_addr()?; + + if EntryPointType::Caller == self.entry_point_type + && entity_addr.tag() != EntityKindTag::Account + { + // Exit early with error to avoid mutations + return Err(RemoveKeyFailure::PermissionDenied.into()); + } + + if !self + .runtime_footprint() + .borrow() + .can_manage_keys_with(&self.authorization_keys) + { + // Exit early if authorization keys weight doesn't exceed required + // key management threshold + return Err(RemoveKeyFailure::PermissionDenied.into()); + } + + if self.engine_config.enable_entity { + // Get the current entity record + let entity = { + let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?; + + // Exit early in case of error without updating global state + entity + .remove_associated_key(account_hash) + .map_err(ExecError::from)?; + entity + }; + + self.metered_write_gs_unsafe( + context_key, + self.addressable_entity_to_validated_value(entity)?, + )?; + } else { + // Take an account out of the global state + let account = { + let mut account: Account = self.read_gs_typed(&context_key)?; + + // Exit early in case of error without updating global state + account + .remove_associated_key(account_hash) + .map_err(ExecError::from)?; + account + }; + + let account_value = self.account_to_validated_value(account)?; + + self.metered_write_gs_unsafe(context_key, account_value)?; + } + + Ok(()) + } + + /// Update associated key. + pub(crate) fn update_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), ExecError> { + let context_key = self.context_key; + let entity_addr = self.context_key_to_entity_addr()?; + + if EntryPointType::Caller == self.entry_point_type + && entity_addr.tag() != EntityKindTag::Account + { + // Exit early with error to avoid mutations + return Err(UpdateKeyFailure::PermissionDenied.into()); + } + + if !self + .runtime_footprint() + .borrow() + .can_manage_keys_with(&self.authorization_keys) + { + // Exit early if authorization keys weight doesn't exceed required + // key management threshold + return Err(UpdateKeyFailure::PermissionDenied.into()); + } + + if self.engine_config.enable_entity { + // Get the current entity record + let entity = { + let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?; + + // Exit early in case of error without updating global state + entity + .update_associated_key(account_hash, weight) + .map_err(ExecError::from)?; + entity + }; + + self.metered_write_gs_unsafe( + context_key, + self.addressable_entity_to_validated_value(entity)?, + )?; + } else { + // Take an account out of the global state + let account = { + let mut account: Account = self.read_gs_typed(&context_key)?; + + // Exit early in case of error without updating global state + account + .update_associated_key( + account_hash, + casper_types::account::Weight::new(weight.value()), + ) + .map_err(ExecError::from)?; + account + }; + + let account_value = self.account_to_validated_value(account)?; + + self.metered_write_gs_unsafe(context_key, account_value)?; + } + + Ok(()) + } + + pub(crate) fn is_authorized_by_admin(&self) -> bool { + self.engine_config + .administrative_accounts() + .intersection(&self.authorization_keys) + .next() + .is_some() + } + /// Gets given contract package with its access_key validated against current context. + pub(crate) fn get_validated_contract_package( + &mut self, + package_hash: HashAddr, + ) -> Result { + let package_hash_key = Key::Hash(package_hash); + self.validate_key(&package_hash_key)?; + let contract_package: ContractPackage = self.read_gs_typed(&package_hash_key)?; + + if !self.is_authorized_by_admin() { + self.validate_uref(&contract_package.access_key())?; + } + + Ok(contract_package) + } + + /// Set threshold of an associated key. + pub(crate) fn set_action_threshold( + &mut self, + action_type: ActionType, + threshold: Weight, + ) -> Result<(), ExecError> { + let context_key = self.context_key; + let entity_addr = self.context_key_to_entity_addr()?; + + if EntryPointType::Caller == self.entry_point_type + && entity_addr.tag() != EntityKindTag::Account + { + // Exit early with error to avoid mutations + return Err(SetThresholdFailure::PermissionDeniedError.into()); + } + + if self.engine_config.enable_entity { + // Take an addressable entity out of the global state + let mut entity: AddressableEntity = self.read_gs_typed(&context_key)?; + + // Exit early in case of error without updating global state + if self.is_authorized_by_admin() { + entity.set_action_threshold_unchecked(action_type, threshold) + } else { + entity.set_action_threshold(action_type, threshold) + } + .map_err(ExecError::from)?; + + let entity_value = self.addressable_entity_to_validated_value(entity)?; + + self.metered_write_gs_unsafe(context_key, entity_value)?; + } else { + // Converts an account's public key into a URef + let key = Key::Account(AccountHash::new(entity_addr.value())); + + // Take an account out of the global state + let mut account: Account = self.read_gs_typed(&key)?; + + // Exit early in case of error without updating global state + let action_type = match action_type { + ActionType::Deployment => casper_types::account::ActionType::Deployment, + ActionType::KeyManagement => casper_types::account::ActionType::KeyManagement, + ActionType::UpgradeManagement => return Err(ExecError::InvalidContext), + }; + + let threshold = casper_types::account::Weight::new(threshold.value()); + + if self.is_authorized_by_admin() { + account.set_action_threshold_unchecked(action_type, threshold) + } else { + account.set_action_threshold(action_type, threshold) + } + .map_err(ExecError::from)?; + + let account_value = self.account_to_validated_value(account)?; + + self.metered_write_gs_unsafe(key, account_value)?; + } + + Ok(()) + } + + fn addressable_entity_to_validated_value( + &self, + entity: AddressableEntity, + ) -> Result { + let value = StoredValue::AddressableEntity(entity); + self.validate_value(&value)?; + Ok(value) + } + + pub(crate) fn runtime_footprint_by_account_hash( + &mut self, + account_hash: AccountHash, + ) -> Result, ExecError> { + if self.engine_config.enable_entity { + match self.read_gs(&Key::Account(account_hash))? { + Some(StoredValue::CLValue(cl_value)) => { + let key: Key = cl_value.into_t().map_err(ExecError::CLValue)?; + match self.read_gs(&key)? { + Some(StoredValue::AddressableEntity(addressable_entity)) => { + let entity_addr = EntityAddr::Account(account_hash.value()); + let named_keys = self.get_named_keys(key)?; + let entry_points = self.get_casper_vm_v1_entry_point(key)?; + let footprint = RuntimeFootprint::new_entity_footprint( + entity_addr, + addressable_entity, + named_keys, + entry_points, + ); + Ok(Some(footprint)) + } + Some(_other_variant_2) => Err(ExecError::UnexpectedStoredValueVariant), + None => Ok(None), + } + } + Some(_other_variant_1) => Err(ExecError::UnexpectedStoredValueVariant), + None => Ok(None), + } + } else { + match self.read_gs(&Key::Account(account_hash))? { + Some(StoredValue::Account(account)) => { + Ok(Some(RuntimeFootprint::new_account_footprint(account))) + } + Some(_other_variant_1) => Err(ExecError::UnexpectedStoredValueVariant), + None => Ok(None), + } + } + } + + /// Gets main purse id + pub fn get_main_purse(&mut self) -> Result { + let main_purse = self + .runtime_footprint() + .borrow() + .main_purse() + .ok_or(ExecError::InvalidContext)?; + Ok(main_purse) + } + + /// Gets entry point type. + pub fn entry_point_type(&self) -> EntryPointType { + self.entry_point_type + } + + /// Gets given contract package with its access_key validated against current context. + pub(crate) fn get_validated_package( + &mut self, + package_hash: PackageHash, + ) -> Result { + let package_hash_key = Key::from(package_hash); + self.validate_key(&package_hash_key)?; + let contract_package = if self.engine_config.enable_entity { + self.read_gs_typed::(&Key::SmartContract(package_hash.value()))? + } else { + let cp = self.read_gs_typed::(&Key::Hash(package_hash.value()))?; + cp.into() + }; + Ok(contract_package) + } + + pub(crate) fn get_package(&mut self, package_hash: HashAddr) -> Result { + self.tracking_copy + .borrow_mut() + .get_package(package_hash) + .map_err(Into::into) + } + + pub(crate) fn get_contract( + &mut self, + contract_hash: ContractHash, + ) -> Result { + self.tracking_copy + .borrow_mut() + .get_contract(contract_hash) + .map_err(Into::into) + } + + pub(crate) fn get_contract_entity( + &mut self, + entity_key: Key, + ) -> Result<(AddressableEntity, bool), ExecError> { + let entity_hash = if let Some(entity_hash) = entity_key.into_entity_hash() { + entity_hash + } else { + return Err(ExecError::UnexpectedKeyVariant(entity_key)); + }; + + let mut tc = self.tracking_copy.borrow_mut(); + + let key = Key::contract_entity_key(entity_hash); + match tc.read(&key)? { + Some(StoredValue::AddressableEntity(entity)) => Ok((entity, false)), + Some(other) => Err(ExecError::TypeMismatch(StoredValueTypeMismatch::new( + "AddressableEntity".to_string(), + other.type_name(), + ))), + None => match tc.read(&Key::Hash(entity_hash.value()))? { + Some(StoredValue::Contract(contract)) => Ok((contract.into(), true)), + Some(other) => Err(ExecError::TypeMismatch(StoredValueTypeMismatch::new( + "Contract".to_string(), + other.type_name(), + ))), + None => Err(TrackingCopyError::KeyNotFound(key).into()), + }, + } + } + + /// Gets a dictionary item key from a dictionary referenced by a `uref`. + pub(crate) fn dictionary_get( + &mut self, + uref: URef, + dictionary_item_key: &str, + ) -> Result, ExecError> { + self.validate_readable(&uref.into())?; + self.validate_key(&uref.into())?; + let dictionary_item_key_bytes = dictionary_item_key.as_bytes(); + + if dictionary_item_key_bytes.len() > DICTIONARY_ITEM_KEY_MAX_LENGTH { + return Err(ExecError::DictionaryItemKeyExceedsLength); + } + + let dictionary_key = Key::dictionary(uref, dictionary_item_key_bytes); + self.dictionary_read(dictionary_key) + } + + /// Gets a dictionary value from a dictionary `Key`. + pub(crate) fn dictionary_read( + &mut self, + dictionary_key: Key, + ) -> Result, ExecError> { + let maybe_stored_value = self + .tracking_copy + .borrow_mut() + .read(&dictionary_key) + .map_err(Into::::into)?; + + if let Some(stored_value) = maybe_stored_value { + let stored_value = handle_stored_dictionary_value(dictionary_key, stored_value)?; + let cl_value = CLValue::try_from(stored_value).map_err(ExecError::TypeMismatch)?; + Ok(Some(cl_value)) + } else { + Ok(None) + } + } + + /// Puts a dictionary item key from a dictionary referenced by a `uref`. + pub fn dictionary_put( + &mut self, + seed_uref: URef, + dictionary_item_key: &str, + cl_value: CLValue, + ) -> Result<(), ExecError> { + let dictionary_item_key_bytes = dictionary_item_key.as_bytes(); + + if dictionary_item_key_bytes.len() > DICTIONARY_ITEM_KEY_MAX_LENGTH { + return Err(ExecError::DictionaryItemKeyExceedsLength); + } + + self.validate_writeable(&seed_uref.into())?; + self.validate_uref(&seed_uref)?; + + self.validate_cl_value(&cl_value)?; + + let wrapped_cl_value = { + let dictionary_value = CLValueDictionary::new( + cl_value, + seed_uref.addr().to_vec(), + dictionary_item_key_bytes.to_vec(), + ); + CLValue::from_t(dictionary_value).map_err(ExecError::from)? + }; + + let dictionary_key = Key::dictionary(seed_uref, dictionary_item_key_bytes); + self.metered_write_gs_unsafe(dictionary_key, wrapped_cl_value)?; + Ok(()) + } + + /// Gets system contract by name. + pub(crate) fn get_system_contract( + &self, + name: &str, + ) -> Result { + let registry = self.system_entity_registry()?; + let hash = registry.get(name).ok_or_else(|| { + error!("Missing system contract hash: {}", name); + ExecError::MissingSystemContractHash(name.to_string()) + })?; + Ok(AddressableEntityHash::new(*hash)) + } + + pub(crate) fn get_system_entity_key(&self, name: &str) -> Result { + let system_entity_hash = self.get_system_contract(name)?; + if self.engine_config.enable_entity { + Ok(Key::addressable_entity_key( + EntityKindTag::System, + system_entity_hash, + )) + } else { + Ok(Key::Hash(system_entity_hash.value())) + } + } + + /// Returns system entity registry by querying the global state. + pub fn system_entity_registry(&self) -> Result { + self.tracking_copy + .borrow_mut() + .get_system_entity_registry() + .map_err(|err| { + error!("Missing system entity registry"); + ExecError::TrackingCopy(err) + }) + } + + pub(super) fn remaining_spending_limit(&self) -> U512 { + self.remaining_spending_limit + } + + /// Subtract spent amount from the main purse spending limit. + pub(crate) fn subtract_amount_spent(&mut self, amount: U512) -> Option { + if let Some(res) = self.remaining_spending_limit.checked_sub(amount) { + self.remaining_spending_limit = res; + Some(self.remaining_spending_limit) + } else { + error!( + limit = %self.remaining_spending_limit, + spent = %amount, + "exceeded main purse spending limit" + ); + self.remaining_spending_limit = U512::zero(); + None + } + } + + /// Sets a new spending limit. + /// Should be called after inner context returns - if tokens were spent there, it must count + /// towards global limit for the whole deploy execution. + pub(crate) fn set_remaining_spending_limit(&mut self, amount: U512) { + self.remaining_spending_limit = amount; + } + + /// Adds new message topic. + pub(crate) fn add_message_topic( + &mut self, + topic_name: &str, + topic_name_hash: TopicNameHash, + ) -> Result, ExecError> { + let entity_addr = self.context_key_to_entity_addr()?; + + // Take the addressable entity out of the global state + { + let mut message_topics = self + .tracking_copy + .borrow_mut() + .get_message_topics(entity_addr)?; + + let max_topics_per_contract = self + .engine_config + .wasm_config() + .messages_limits() + .max_topics_per_contract(); + + if message_topics.len() >= max_topics_per_contract as usize { + return Ok(Err(MessageTopicError::MaxTopicsExceeded)); + } + + if let Err(e) = message_topics.add_topic(topic_name, topic_name_hash) { + return Ok(Err(e)); + } + } + + let topic_key = Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash)); + let block_time = self.block_info.block_time(); + let summary = StoredValue::MessageTopic(MessageTopicSummary::new( + 0, + block_time, + topic_name.to_string(), + )); + + self.metered_write_gs_unsafe(topic_key, summary)?; + + Ok(Ok(())) + } +} diff --git a/execution_engine/src/runtime_context/tests.rs b/execution_engine/src/runtime_context/tests.rs new file mode 100644 index 0000000000..e47dc7fcbe --- /dev/null +++ b/execution_engine/src/runtime_context/tests.rs @@ -0,0 +1,1137 @@ +use std::{cell::RefCell, collections::BTreeSet, convert::TryInto, iter::FromIterator, rc::Rc}; + +use rand::RngCore; + +use casper_storage::{ + global_state::state::lmdb::LmdbGlobalStateView, tracking_copy::new_temporary_tracking_copy, + AddressGenerator, TrackingCopy, +}; + +use super::{AllowInstallUpgrade, ExecError, RuntimeContext}; +use crate::engine_state::{BlockInfo, EngineConfig, EngineConfigBuilder}; +use casper_types::{ + account::{ + AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, ACCOUNT_HASH_LENGTH, + }, + addressable_entity::{ActionType, AssociatedKeys, EntryPoints, Weight}, + bytesrepr::ToBytes, + contracts::NamedKeys, + execution::TransformKindV2, + system::{AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT}, + AccessRights, AddressableEntity, AddressableEntityHash, BlockGlobalAddr, BlockHash, BlockTime, + ByteCodeHash, CLValue, ContextAccessRights, Digest, EntityAddr, EntityKind, EntryPointType, + Gas, HashAddr, Key, PackageHash, Phase, ProtocolVersion, PublicKey, RuntimeArgs, + RuntimeFootprint, SecretKey, StoredValue, SystemHashRegistry, Tagged, Timestamp, + TransactionHash, TransactionV1Hash, URef, KEY_HASH_LENGTH, U256, U512, +}; +use tempfile::TempDir; + +const TXN_HASH_RAW: [u8; 32] = [1u8; 32]; +const PHASE: Phase = Phase::Session; +const GAS_LIMIT: u64 = 500_000_000_000_000u64; + +fn test_engine_config() -> EngineConfig { + EngineConfig::default() +} + +fn new_tracking_copy( + account_hash: AccountHash, + init_entity_key: Key, + init_entity: AddressableEntity, +) -> (TrackingCopy, TempDir) { + let entity_key_cl_value = CLValue::from_t(init_entity_key).expect("must convert to cl value"); + + let initial_data = [ + (init_entity_key, StoredValue::AddressableEntity(init_entity)), + ( + Key::Account(account_hash), + StoredValue::CLValue(entity_key_cl_value), + ), + ]; + new_temporary_tracking_copy(initial_data, None, true) +} + +fn new_addressable_entity_with_purse( + account_hash: AccountHash, + entity_hash: AddressableEntityHash, + entity_kind: EntityKind, + purse: [u8; 32], +) -> (Key, Key, AddressableEntity) { + let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); + let entity = AddressableEntity::new( + PackageHash::default(), + ByteCodeHash::default(), + ProtocolVersion::V2_0_0, + URef::new(purse, AccessRights::READ_ADD_WRITE), + associated_keys, + Default::default(), + entity_kind, + ); + let account_key = Key::Account(account_hash); + let contract_key = Key::addressable_entity_key(entity_kind.tag(), entity_hash); + + (account_key, contract_key, entity) +} + +fn new_addressable_entity( + account_hash: AccountHash, + entity_hash: AddressableEntityHash, +) -> (Key, Key, AddressableEntity) { + new_addressable_entity_with_purse( + account_hash, + entity_hash, + EntityKind::Account(account_hash), + [0; 32], + ) +} + +// create random account key. +fn random_account_key(entropy_source: &mut G) -> Key { + let mut key = [0u8; 32]; + entropy_source.fill_bytes(&mut key); + Key::Account(AccountHash::new(key)) +} + +// create random contract key. +fn random_contract_key(entropy_source: &mut G) -> Key { + let mut key_hash = [0u8; 32]; + entropy_source.fill_bytes(&mut key_hash); + Key::AddressableEntity(EntityAddr::SmartContract(key_hash)) +} + +// Create URef Key. +fn create_uref_as_key(address_generator: &mut AddressGenerator, rights: AccessRights) -> Key { + let address = address_generator.create_address(); + Key::URef(URef::new(address, rights)) +} + +fn random_hash(entropy_source: &mut G) -> Key { + let mut key = [0u8; KEY_HASH_LENGTH]; + entropy_source.fill_bytes(&mut key); + Key::Hash(key) +} + +fn new_runtime_context<'a>( + addressable_entity: &'a AddressableEntity, + account_hash: AccountHash, + entity_address: Key, + named_keys: &'a mut NamedKeys, + access_rights: ContextAccessRights, + address_generator: AddressGenerator, +) -> (RuntimeContext<'a, LmdbGlobalStateView>, TempDir) { + let (mut tracking_copy, tempdir) = + new_tracking_copy(account_hash, entity_address, addressable_entity.clone()); + + let mint_hash = HashAddr::default(); + + let default_system_registry = { + let mut registry = SystemHashRegistry::new(); + registry.insert(MINT.to_string(), mint_hash); + registry.insert(HANDLE_PAYMENT.to_string(), HashAddr::default()); + registry.insert(STANDARD_PAYMENT.to_string(), HashAddr::default()); + registry.insert(AUCTION.to_string(), HashAddr::default()); + StoredValue::CLValue(CLValue::from_t(registry).unwrap()) + }; + + tracking_copy.write(Key::SystemEntityRegistry, default_system_registry); + tracking_copy.write( + Key::Account(account_hash), + StoredValue::CLValue(CLValue::from_t(entity_address).expect("must get cl_value")), + ); + tracking_copy.write( + entity_address, + StoredValue::AddressableEntity(addressable_entity.clone()), + ); + + // write block time to gs + let now = Timestamp::now(); + let cl_value = CLValue::from_t(now.millis()).expect("should get cl_value"); + let stored_value = StoredValue::CLValue(cl_value); + tracking_copy.write(Key::BlockGlobal(BlockGlobalAddr::BlockTime), stored_value); + + // write protocol version to gs + let protocol_version = ProtocolVersion::V2_0_0; + let cl_value = CLValue::from_t(protocol_version.destructure()).expect("should get cl_value"); + let stored_value = StoredValue::CLValue(cl_value); + tracking_copy.write( + Key::BlockGlobal(BlockGlobalAddr::ProtocolVersion), + stored_value, + ); + + // write the addressable entity flag to gs + let cl_value = CLValue::from_t(false).expect("should get cl_value"); + let stored_value = StoredValue::CLValue(cl_value); + tracking_copy.write( + Key::BlockGlobal(BlockGlobalAddr::AddressableEntity), + stored_value, + ); + + let addr = match entity_address { + Key::AddressableEntity(entity_addr) => entity_addr, + Key::Account(account_hash) => EntityAddr::Account(account_hash.value()), + Key::Hash(hash) => EntityAddr::SmartContract(hash), + _ => panic!("unexpected key"), + }; + + let runtime_footprint = RuntimeFootprint::new_entity_footprint( + addr, + addressable_entity.clone(), + named_keys.clone(), + EntryPoints::new(), + ); + + let engine_config = { + let config_builder = EngineConfigBuilder::new(); + config_builder.with_enable_entity(true).build() + }; + + let runtime_context = RuntimeContext::new( + named_keys, + Rc::new(RefCell::new(runtime_footprint)), + entity_address, + BTreeSet::from_iter(vec![account_hash]), + access_rights, + account_hash, + Rc::new(RefCell::new(address_generator)), + Rc::new(RefCell::new(tracking_copy)), + engine_config, + BlockInfo::new( + Digest::default(), + BlockTime::new(0), + BlockHash::default(), + 0, + ProtocolVersion::V2_0_0, + ), + TransactionHash::V1(TransactionV1Hash::from_raw([1u8; 32])), + Phase::Session, + RuntimeArgs::new(), + Gas::new(U512::from(GAS_LIMIT)), + Gas::default(), + Vec::default(), + U512::MAX, + EntryPointType::Caller, + AllowInstallUpgrade::Forbidden, + ); + + (runtime_context, tempdir) +} + +#[allow(clippy::assertions_on_constants)] +fn assert_forged_reference(result: Result) { + match result { + Err(ExecError::ForgedReference(_)) => assert!(true), + _ => panic!("Error. Test should have failed with ForgedReference error but didn't."), + } +} + +#[allow(clippy::assertions_on_constants)] +fn assert_invalid_access( + result: Result, + expecting: AccessRights, +) { + match result { + Err(ExecError::InvalidAccess { required }) if required == expecting => assert!(true), + other => panic!( + "Error. Test should have failed with InvalidAccess error but didn't: {:?}.", + other + ), + } +} + +fn build_runtime_context_and_execute( + mut named_keys: NamedKeys, + functor: F, +) -> Result +where + F: FnOnce(RuntimeContext) -> Result, +{ + let secret_key = SecretKey::ed25519_from_bytes([222; SecretKey::ED25519_LENGTH]) + .expect("should create secret key"); + let public_key = PublicKey::from(&secret_key); + let account_hash = public_key.to_account_hash(); + let entity_hash = AddressableEntityHash::new([10u8; 32]); + let deploy_hash = [1u8; 32]; + let (_, entity_key, addressable_entity) = + new_addressable_entity(public_key.to_account_hash(), entity_hash); + + let address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); + let access_rights = addressable_entity.extract_access_rights(entity_hash, &named_keys); + let (runtime_context, _tempdir) = new_runtime_context( + &addressable_entity, + account_hash, + entity_key, + &mut named_keys, + access_rights, + address_generator, + ); + + functor(runtime_context) +} + +#[track_caller] +fn last_transform_kind_on_addressable_entity( + runtime_context: &RuntimeContext, +) -> TransformKindV2 { + let key = runtime_context.context_key; + runtime_context + .effects() + .transforms() + .iter() + .rev() + .find_map(|transform| (transform.key() == &key).then(|| transform.kind().clone())) + .unwrap() +} + +#[test] +fn use_uref_valid() { + // Test fixture + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_as_key = create_uref_as_key(&mut rng, AccessRights::READ_WRITE); + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_as_key); + // Use uref as the key to perform an action on the global state. + // This should succeed because the uref is valid. + let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); + let result = build_runtime_context_and_execute(named_keys, |mut rc| { + rc.metered_write_gs(uref_as_key, value) + }); + result.expect("writing using valid uref should succeed"); +} + +#[test] +fn use_uref_forged() { + // Test fixture + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref = create_uref_as_key(&mut rng, AccessRights::READ_WRITE); + let named_keys = NamedKeys::new(); + // named_keys.insert(String::new(), Key::from(uref)); + let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); + let result = + build_runtime_context_and_execute(named_keys, |mut rc| rc.metered_write_gs(uref, value)); + + assert_forged_reference(result); +} + +#[test] +fn account_key_not_writeable() { + let mut rng = rand::thread_rng(); + let acc_key = random_account_key(&mut rng); + let result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| { + rc.metered_write_gs( + acc_key, + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ) + }); + assert_invalid_access(result, AccessRights::WRITE); +} + +#[test] +fn entity_key_readable_valid() { + // Entity key is readable if it is a "base" key - current context of the + // execution. + let result = build_runtime_context_and_execute(NamedKeys::new(), |rc| { + let context_key = rc.get_context_key(); + let runtime_footprint = rc.runtime_footprint(); + + let entity_hash = runtime_footprint.borrow().hash_addr(); + let key_hash = context_key.into_entity_hash_addr().expect("must get hash"); + + assert_eq!(entity_hash, key_hash); + Ok(()) + }); + + assert!(result.is_ok()); +} + +#[test] +fn account_key_addable_returns_type_mismatch() { + // Account key is not addable anymore as we do not store an account underneath they key + // but instead there is a CLValue which acts as an indirection to the corresponding entity. + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_as_key = create_uref_as_key(&mut rng, AccessRights::READ); + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_as_key); + let result = build_runtime_context_and_execute(named_keys, |mut rc| { + let account_key: Key = rc.account_hash.into(); + let uref_name = "NewURef".to_owned(); + let named_key = StoredValue::CLValue(CLValue::from_t((uref_name, uref_as_key)).unwrap()); + + rc.metered_add_gs(account_key, named_key) + }); + + assert!(result.is_err()); +} + +#[test] +fn account_key_addable_invalid() { + // Account key is NOT addable if it is a "base" key - current context of the + // execution. + let mut rng = rand::thread_rng(); + let other_acc_key = random_account_key(&mut rng); + + let result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| { + rc.metered_add_gs( + other_acc_key, + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ) + }); + + assert_invalid_access(result, AccessRights::ADD); +} + +#[test] +fn contract_key_readable_valid() { + // Account key is readable if it is a "base" key - current context of the + // execution. + let mut rng = rand::thread_rng(); + let contract_key = random_contract_key(&mut rng); + let result = + build_runtime_context_and_execute(NamedKeys::new(), |mut rc| rc.read_gs(&contract_key)); + + assert!(result.is_ok()); +} + +#[test] +fn contract_key_not_writeable() { + // Account key is readable if it is a "base" key - current context of the + // execution. + let mut rng = rand::thread_rng(); + let contract_key = random_contract_key(&mut rng); + let result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| { + rc.metered_write_gs( + contract_key, + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ) + }); + + assert_invalid_access(result, AccessRights::WRITE); +} + +#[test] +fn contract_key_addable_valid() { + // Contract key is addable if it is a "base" key - current context of the execution. + let account_hash = AccountHash::new([0u8; 32]); + let entity_hash = AddressableEntityHash::new([1u8; 32]); + let (_account_key, entity_key, entity) = new_addressable_entity(account_hash, entity_hash); + let authorization_keys = BTreeSet::from_iter(vec![account_hash]); + let mut address_generator = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + + let mut rng = rand::thread_rng(); + let contract_key = random_contract_key(&mut rng); + let entity_as_stored_value = StoredValue::AddressableEntity(AddressableEntity::default()); + let mut access_rights = entity_as_stored_value + .as_addressable_entity() + .unwrap() + .extract_access_rights(AddressableEntityHash::default(), &NamedKeys::new()); + + let (tracking_copy, _tempdir) = new_tracking_copy(account_hash, entity_key, entity); + let tracking_copy = Rc::new(RefCell::new(tracking_copy)); + tracking_copy + .borrow_mut() + .write(contract_key, entity_as_stored_value.clone()); + + let default_system_registry = { + let mut registry = SystemHashRegistry::new(); + registry.insert(MINT.to_string(), HashAddr::default()); + registry.insert(HANDLE_PAYMENT.to_string(), HashAddr::default()); + registry.insert(STANDARD_PAYMENT.to_string(), HashAddr::default()); + registry.insert(AUCTION.to_string(), HashAddr::default()); + StoredValue::CLValue(CLValue::from_t(registry).unwrap()) + }; + + tracking_copy + .borrow_mut() + .write(Key::SystemEntityRegistry, default_system_registry); + + let uref_as_key = create_uref_as_key(&mut address_generator, AccessRights::WRITE); + let uref_name = "NewURef".to_owned(); + let named_uref_tuple = + StoredValue::CLValue(CLValue::from_t((uref_name.clone(), uref_as_key)).unwrap()); + let mut named_keys = NamedKeys::new(); + named_keys.insert(uref_name, uref_as_key); + + access_rights.extend(&[uref_as_key.into_uref().expect("should be a URef")]); + + let addr = match contract_key { + Key::AddressableEntity(entity_addr) => entity_addr, + Key::Account(account_hash) => EntityAddr::Account(account_hash.value()), + Key::Hash(hash) => EntityAddr::SmartContract(hash), + _ => panic!("unexpected key"), + }; + + let runtime_footprint = RuntimeFootprint::new_entity_footprint( + addr, + AddressableEntity::default(), + named_keys.clone(), + EntryPoints::new(), + ); + + let mut runtime_context = RuntimeContext::new( + &mut named_keys, + Rc::new(RefCell::new(runtime_footprint)), + contract_key, + authorization_keys, + access_rights, + account_hash, + Rc::new(RefCell::new(address_generator)), + Rc::clone(&tracking_copy), + EngineConfig::default(), + BlockInfo::new( + Digest::default(), + BlockTime::new(0), + BlockHash::default(), + 0, + ProtocolVersion::V2_0_0, + ), + TransactionHash::V1(TransactionV1Hash::from_raw(TXN_HASH_RAW)), + PHASE, + RuntimeArgs::new(), + Gas::new(U512::from(GAS_LIMIT)), + Gas::default(), + Vec::default(), + U512::zero(), + EntryPointType::Caller, + AllowInstallUpgrade::Forbidden, + ); + + assert!(runtime_context + .metered_add_gs(contract_key, named_uref_tuple) + .is_err()) +} + +#[test] +fn contract_key_addable_invalid() { + let account_hash = AccountHash::new([0u8; 32]); + let entity_hash = AddressableEntityHash::new([1u8; 32]); + let (_, entity_key, entity) = new_addressable_entity(account_hash, entity_hash); + let authorization_keys = BTreeSet::from_iter(vec![account_hash]); + let mut address_generator = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let mut rng = rand::thread_rng(); + let contract_key = random_contract_key(&mut rng); + + let other_contract_key = random_contract_key(&mut rng); + let contract = StoredValue::AddressableEntity(AddressableEntity::default()); + let mut access_rights = contract + .as_addressable_entity() + .unwrap() + .extract_access_rights(AddressableEntityHash::default(), &NamedKeys::new()); + let (tracking_copy, _tempdir) = new_tracking_copy(account_hash, entity_key, entity.clone()); + let tracking_copy = Rc::new(RefCell::new(tracking_copy)); + + tracking_copy.borrow_mut().write(contract_key, contract); + + let uref_as_key = create_uref_as_key(&mut address_generator, AccessRights::WRITE); + let uref_name = "NewURef".to_owned(); + let named_uref_tuple = StoredValue::CLValue(CLValue::from_t((uref_name, uref_as_key)).unwrap()); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_as_key); + + access_rights.extend(&[uref_as_key.into_uref().expect("should be a URef")]); + + let addr = match entity_key { + Key::AddressableEntity(entity_addr) => entity_addr, + Key::Account(account_hash) => EntityAddr::Account(account_hash.value()), + Key::Hash(hash) => EntityAddr::SmartContract(hash), + _ => panic!("unexpected key"), + }; + + let runtime_footprint = RuntimeFootprint::new_entity_footprint( + addr, + AddressableEntity::default(), + named_keys.clone(), + EntryPoints::new(), + ); + + let mut runtime_context = RuntimeContext::new( + &mut named_keys, + Rc::new(RefCell::new(runtime_footprint)), + other_contract_key, + authorization_keys, + access_rights, + account_hash, + Rc::new(RefCell::new(address_generator)), + Rc::clone(&tracking_copy), + EngineConfig::default(), + BlockInfo::new( + Digest::default(), + BlockTime::new(0), + BlockHash::default(), + 0, + ProtocolVersion::V2_0_0, + ), + TransactionHash::V1(TransactionV1Hash::from_raw(TXN_HASH_RAW)), + PHASE, + RuntimeArgs::new(), + Gas::new(U512::from(GAS_LIMIT)), + Gas::default(), + Vec::default(), + U512::zero(), + EntryPointType::Caller, + AllowInstallUpgrade::Forbidden, + ); + + let result = runtime_context.metered_add_gs(contract_key, named_uref_tuple); + + assert_invalid_access(result, AccessRights::ADD); +} + +#[test] +fn uref_key_readable_valid() { + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_key = create_uref_as_key(&mut rng, AccessRights::READ); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_key); + + let result = build_runtime_context_and_execute(named_keys, |mut rc| rc.read_gs(&uref_key)); + assert!(result.is_ok()); +} + +#[test] +fn uref_key_readable_invalid() { + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_key = create_uref_as_key(&mut rng, AccessRights::WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_key); + + let result = build_runtime_context_and_execute(named_keys, |mut rc| rc.read_gs(&uref_key)); + assert_invalid_access(result, AccessRights::READ); +} + +#[test] +fn uref_key_writeable_valid() { + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_key = create_uref_as_key(&mut rng, AccessRights::WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_key); + + let result = build_runtime_context_and_execute(named_keys, |mut rc| { + rc.metered_write_gs( + uref_key, + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ) + }); + assert!(result.is_ok()); +} + +#[test] +fn uref_key_writeable_invalid() { + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_key = create_uref_as_key(&mut rng, AccessRights::READ); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_key); + + let result = build_runtime_context_and_execute(named_keys, |mut rc| { + rc.metered_write_gs( + uref_key, + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ) + }); + assert_invalid_access(result, AccessRights::WRITE); +} + +#[test] +fn uref_key_addable_valid() { + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_key = create_uref_as_key(&mut rng, AccessRights::ADD_WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_key); + + let result = build_runtime_context_and_execute(named_keys, |mut rc| { + rc.metered_write_gs(uref_key, CLValue::from_t(10_i32).unwrap()) + .expect("Writing to the GlobalState should work."); + rc.metered_add_gs(uref_key, CLValue::from_t(1_i32).unwrap()) + }); + assert!(result.is_ok()); +} + +#[test] +fn uref_key_addable_invalid() { + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_key = create_uref_as_key(&mut rng, AccessRights::WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(String::new(), uref_key); + + let result = build_runtime_context_and_execute(named_keys, |mut rc| { + rc.metered_add_gs( + uref_key, + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ) + }); + assert_invalid_access(result, AccessRights::ADD); +} + +#[test] +fn hash_key_is_not_writeable() { + // values under hash's are immutable + let functor = |runtime_context: RuntimeContext| { + let mut rng = rand::thread_rng(); + let key = random_hash(&mut rng); + runtime_context.validate_writeable(&key) + }; + let result = build_runtime_context_and_execute(NamedKeys::new(), functor); + assert!(result.is_err()) +} + +#[test] +fn hash_key_is_not_addable() { + // values under hashes are immutable + let functor = |runtime_context: RuntimeContext| { + let mut rng = rand::thread_rng(); + let key = random_hash(&mut rng); + runtime_context.validate_addable(&key) + }; + let result = build_runtime_context_and_execute(NamedKeys::new(), functor); + assert!(result.is_err()) +} + +#[test] +fn manage_associated_keys() { + // Testing a valid case only - successfully added a key, and successfully removed, + // making sure `account_dirty` mutated + let named_keys = NamedKeys::new(); + let functor = |mut runtime_context: RuntimeContext| { + let account_hash = AccountHash::new([42; 32]); + let weight = Weight::new(155); + + // Add a key (this doesn't check for all invariants as `add_key` + // is already tested in different place) + runtime_context + .add_associated_key(account_hash, weight) + .expect("Unable to add key"); + + let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context); + let entity = match transform_kind { + TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity, + _ => panic!("Invalid transform operation found"), + }; + entity + .associated_keys() + .get(&account_hash) + .expect("Account hash wasn't added to associated keys"); + + let new_weight = Weight::new(100); + runtime_context + .update_associated_key(account_hash, new_weight) + .expect("Unable to update key"); + + let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context); + let entity = match transform_kind { + TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity, + _ => panic!("Invalid transform operation found"), + }; + let value = entity + .associated_keys() + .get(&account_hash) + .expect("Account hash wasn't added to associated keys"); + + assert_eq!(value, &new_weight, "value was not updated"); + + // Remove a key that was already added + runtime_context + .remove_associated_key(account_hash) + .expect("Unable to remove key"); + + // Verify + let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context); + let entity = match transform_kind { + TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity, + _ => panic!("Invalid transform operation found"), + }; + + let actual = entity.associated_keys().get(&account_hash); + + assert!(actual.is_none()); + + // Remove a key that was already removed + runtime_context + .remove_associated_key(account_hash) + .expect_err("A non existing key was unexpectedly removed again"); + + Ok(()) + }; + let _ = build_runtime_context_and_execute(named_keys, functor); +} + +#[test] +fn action_thresholds_management() { + // Testing a valid case only - successfully added a key, and successfully removed, + // making sure `account_dirty` mutated + let named_keys = NamedKeys::new(); + let functor = |mut runtime_context: RuntimeContext| { + let entity_hash_by_account_hash = + CLValue::from_t(Key::Hash([2; 32])).expect("must convert to cl_value"); + + runtime_context + .metered_write_gs_unsafe( + Key::Account(AccountHash::new([42; 32])), + entity_hash_by_account_hash, + ) + .expect("must write key to gs"); + + runtime_context + .add_associated_key(AccountHash::new([42; 32]), Weight::new(254)) + .expect("Unable to add associated key with maximum weight"); + runtime_context + .set_action_threshold(ActionType::KeyManagement, Weight::new(253)) + .expect("Unable to set action threshold KeyManagement"); + runtime_context + .set_action_threshold(ActionType::Deployment, Weight::new(252)) + .expect("Unable to set action threshold Deployment"); + + let transform_kind = last_transform_kind_on_addressable_entity(&runtime_context); + let mutated_entity = match transform_kind { + TransformKindV2::Write(StoredValue::AddressableEntity(entity)) => entity, + _ => panic!("Invalid transform operation found"), + }; + + assert_eq!( + mutated_entity.action_thresholds().deployment(), + &Weight::new(252) + ); + assert_eq!( + mutated_entity.action_thresholds().key_management(), + &Weight::new(253) + ); + + runtime_context + .set_action_threshold(ActionType::Deployment, Weight::new(255)) + .expect_err("Shouldn't be able to set deployment threshold higher than key management"); + + Ok(()) + }; + let _ = build_runtime_context_and_execute(named_keys, functor); +} + +#[test] +fn should_verify_ownership_before_adding_key() { + // Testing a valid case only - successfully added a key, and successfully removed, + // making sure `account_dirty` mutated + let named_keys = NamedKeys::new(); + let functor = |mut runtime_context: RuntimeContext| { + // Overwrites a `context_key` to a different one before doing any operation as + // account `[0; 32]` + let entity_hash_by_account_hash = + CLValue::from_t(Key::Hash([2; 32])).expect("must convert to cl_value"); + + runtime_context + .metered_write_gs_unsafe( + Key::Account(AccountHash::new([84; 32])), + entity_hash_by_account_hash, + ) + .expect("must write key to gs"); + + runtime_context + .metered_write_gs_unsafe(Key::Hash([1; 32]), AddressableEntity::default()) + .expect("must write key to gs"); + + runtime_context.context_key = Key::Hash([1; 32]); + + let err = runtime_context + .add_associated_key(AccountHash::new([84; 32]), Weight::new(123)) + .expect_err("This operation should return error"); + + match err { + ExecError::UnexpectedKeyVariant(_) => { + // This is the v2.0.0 error as this test is currently using Key::Hash + // instead of Key::AddressableEntity + } + ExecError::AddKeyFailure(AddKeyFailure::PermissionDenied) => {} + e => panic!("Invalid error variant: {:?}", e), + } + + Ok(()) + }; + let _ = build_runtime_context_and_execute(named_keys, functor); +} + +#[test] +fn should_verify_ownership_before_removing_a_key() { + // Testing a valid case only - successfully added a key, and successfully removed, + // making sure `account_dirty` mutated + let named_keys = NamedKeys::new(); + let functor = |mut runtime_context: RuntimeContext| { + // Overwrites a `context_key` to a different one before doing any operation as + // account `[0; 32]` + runtime_context.context_key = Key::Hash([1; 32]); + + let err = runtime_context + .remove_associated_key(AccountHash::new([84; 32])) + .expect_err("This operation should return error"); + + match err { + ExecError::UnexpectedKeyVariant(_) => { + // this is the v2.0 error because this test is currently using + // Key::Hash instead of Key::AddressableEntity + } + ExecError::RemoveKeyFailure(RemoveKeyFailure::PermissionDenied) => {} + ref e => panic!("Invalid error variant: {:?}", e), + } + + Ok(()) + }; + let _ = build_runtime_context_and_execute(named_keys, functor); +} + +#[test] +fn should_verify_ownership_before_setting_action_threshold() { + // Testing a valid case only - successfully added a key, and successfully removed, + // making sure `account_dirty` mutated + let named_keys = NamedKeys::new(); + let functor = |mut runtime_context: RuntimeContext| { + // Overwrites a `context_key` to a different one before doing any operation as + // account `[0; 32]` + runtime_context.context_key = Key::Hash([1; 32]); + + let err = runtime_context + .set_action_threshold(ActionType::Deployment, Weight::new(123)) + .expect_err("This operation should return error"); + + match err { + ExecError::UnexpectedKeyVariant(_) => { + // this is what is returned under protocol version 2.0 because Key::Hash(_) is + // deprecated. + } + ExecError::SetThresholdFailure(SetThresholdFailure::PermissionDeniedError) => {} + ref e => panic!("Invalid error variant: {:?}", e), + } + + Ok(()) + }; + let _ = build_runtime_context_and_execute(named_keys, functor); +} + +#[test] +fn remove_uref_works() { + // Test that `remove_uref` removes Key from both ephemeral representation + // which is one of the current RuntimeContext, and also puts that change + // into the `TrackingCopy` so that it's later committed to the GlobalState. + let deploy_hash = [1u8; 32]; + let mut address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); + let uref_name = "Foo".to_owned(); + let uref_key = create_uref_as_key(&mut address_generator, AccessRights::READ); + let account_hash = AccountHash::new([0u8; 32]); + let entity_hash = AddressableEntityHash::new([0u8; 32]); + let mut named_keys = NamedKeys::new(); + named_keys.insert(uref_name.clone(), uref_key); + let (_, entity_key, addressable_entity) = new_addressable_entity(account_hash, entity_hash); + + let access_rights = addressable_entity.extract_access_rights(entity_hash, &named_keys); + + let (mut runtime_context, _tempdir) = new_runtime_context( + &addressable_entity, + account_hash, + entity_key, + &mut named_keys, + access_rights, + address_generator, + ); + + assert!(runtime_context.named_keys_contains_key(&uref_name)); + assert!(runtime_context.remove_key(&uref_name).is_ok()); + // It is valid to retain the access right for the given runtime context + // even if you remove the URef from the named keys. + assert!(runtime_context.validate_key(&uref_key).is_ok()); + assert!(!runtime_context.named_keys_contains_key(&uref_name)); + + let entity_named_keys = runtime_context + .get_named_keys(entity_key) + .expect("must get named keys for entity"); + assert!(!entity_named_keys.contains(&uref_name)); + // The next time the account is used, the access right is gone for the removed + // named key. + + let next_session_access_rights = addressable_entity.extract_access_rights( + AddressableEntityHash::new(account_hash.value()), + &entity_named_keys, + ); + let address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); + + let (runtime_context, _tempdir) = new_runtime_context( + &addressable_entity, + account_hash, + entity_key, + &mut named_keys, + next_session_access_rights, + address_generator, + ); + assert!(runtime_context.validate_key(&uref_key).is_err()); +} + +#[test] +fn an_accounts_access_rights_should_include_main_purse() { + let test_main_purse = URef::new([42u8; 32], AccessRights::READ_ADD_WRITE); + // All other access rights except for main purse are extracted from named keys. + let account_hash = AccountHash::new([0u8; 32]); + let entity_hash = AddressableEntityHash::new([1u8; 32]); + let named_keys = NamedKeys::new(); + let (_context_key, _, entity) = new_addressable_entity_with_purse( + account_hash, + entity_hash, + EntityKind::Account(account_hash), + test_main_purse.addr(), + ); + assert!( + named_keys.is_empty(), + "Named keys does not contain main purse" + ); + let access_rights = entity.extract_access_rights(entity_hash, &named_keys); + assert!( + access_rights.has_access_rights_to_uref(&test_main_purse), + "Main purse should be included in access rights" + ); +} + +#[test] +fn validate_valid_purse_of_an_account() { + // Tests that URef which matches a purse of a given context gets validated + let test_main_purse = URef::new([42u8; 32], AccessRights::READ_ADD_WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert("entry".to_string(), Key::from(test_main_purse)); + + let deploy_hash = [1u8; 32]; + let account_hash = AccountHash::new([0u8; 32]); + let entity_hash = AddressableEntityHash::new([1u8; 32]); + let (context_key, _, entity) = new_addressable_entity_with_purse( + account_hash, + entity_hash, + EntityKind::Account(account_hash), + test_main_purse.addr(), + ); + + let mut access_rights = entity.extract_access_rights(entity_hash, &named_keys); + access_rights.extend(&[test_main_purse]); + + let address_generator = AddressGenerator::new(&deploy_hash, Phase::Session); + let (runtime_context, _tempdir) = new_runtime_context( + &entity, + account_hash, + context_key, + &mut named_keys, + access_rights, + address_generator, + ); + + // URef that has the same id as purse of an account gets validated + // successfully. + assert!(runtime_context.validate_uref(&test_main_purse).is_ok()); + + let purse = test_main_purse.with_access_rights(AccessRights::READ); + assert!(runtime_context.validate_uref(&purse).is_ok()); + let purse = test_main_purse.with_access_rights(AccessRights::ADD); + assert!(runtime_context.validate_uref(&purse).is_ok()); + let purse = test_main_purse.with_access_rights(AccessRights::WRITE); + assert!(runtime_context.validate_uref(&purse).is_ok()); + + // Purse ID that doesn't match account's purse should fail as it's also not + // in known urefs. + let purse = URef::new([53; 32], AccessRights::READ_ADD_WRITE); + assert!(runtime_context.validate_uref(&purse).is_err()); +} + +#[test] +fn should_meter_for_gas_storage_write() { + // Test fixture + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_as_key = create_uref_as_key(&mut rng, AccessRights::READ_WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert("entry".to_string(), uref_as_key); + + let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); + let expected_write_cost = test_engine_config() + .storage_costs() + .calculate_gas_cost(value.serialized_length()); + + let (gas_usage_before, gas_usage_after) = + build_runtime_context_and_execute(named_keys, |mut rc| { + let gas_before = rc.gas_counter(); + rc.metered_write_gs(uref_as_key, value) + .expect("should write"); + let gas_after = rc.gas_counter(); + Ok((gas_before, gas_after)) + }) + .expect("should run test"); + + assert!( + gas_usage_after > gas_usage_before, + "{} <= {}", + gas_usage_after, + gas_usage_before + ); + + assert_eq!( + Some(gas_usage_after), + gas_usage_before.checked_add(expected_write_cost) + ); +} + +#[test] +fn should_meter_for_gas_storage_add() { + // Test fixture + let mut rng = AddressGenerator::new(&TXN_HASH_RAW, PHASE); + let uref_as_key = create_uref_as_key(&mut rng, AccessRights::ADD_WRITE); + + let mut named_keys = NamedKeys::new(); + named_keys.insert("entry".to_string(), uref_as_key); + + let value = StoredValue::CLValue(CLValue::from_t(43_i32).unwrap()); + let expected_add_cost = test_engine_config() + .storage_costs() + .calculate_gas_cost(value.serialized_length()); + + let (gas_usage_before, gas_usage_after) = + build_runtime_context_and_execute(named_keys, |mut rc| { + rc.metered_write_gs(uref_as_key, value.clone()) + .expect("should write"); + let gas_before = rc.gas_counter(); + rc.metered_add_gs(uref_as_key, value).expect("should add"); + let gas_after = rc.gas_counter(); + Ok((gas_before, gas_after)) + }) + .expect("should run test"); + + assert!( + gas_usage_after > gas_usage_before, + "{} <= {}", + gas_usage_after, + gas_usage_before + ); + + assert_eq!( + Some(gas_usage_after), + gas_usage_before.checked_add(expected_add_cost) + ); +} + +#[test] +fn associated_keys_add_full() { + let final_add_result = build_runtime_context_and_execute(NamedKeys::new(), |mut rc| { + let associated_keys_before = rc.runtime_footprint().borrow().associated_keys().len(); + + for count in 0..(rc.engine_config.max_associated_keys() as usize - associated_keys_before) { + let account_hash = { + let mut addr = [0; ACCOUNT_HASH_LENGTH]; + U256::from(count).to_big_endian(&mut addr); + AccountHash::new(addr) + }; + let weight = Weight::new(count.try_into().unwrap()); + rc.add_associated_key(account_hash, weight) + .unwrap_or_else(|e| panic!("should add key {}: {:?}", count, e)); + } + + rc.add_associated_key(AccountHash::new([42; 32]), Weight::new(42)) + }); + + assert!(matches!( + final_add_result.expect_err("should error out"), + ExecError::AddKeyFailure(AddKeyFailure::MaxKeysLimit) + )); +} diff --git a/execution_engine/src/shared.rs b/execution_engine/src/shared.rs deleted file mode 100644 index c023d171b8..0000000000 --- a/execution_engine/src/shared.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![allow(missing_docs)] - -pub mod additive_map; -#[macro_use] -pub mod gas; -pub mod account; -pub mod host_function_costs; -pub mod logging; -pub mod motes; -pub mod newtypes; -pub mod opcode_costs; -pub mod socket; -pub mod storage_costs; -pub mod stored_value; -pub mod system_config; -pub mod test_utils; -pub mod transform; -mod type_mismatch; -pub mod utils; -pub mod wasm; -pub mod wasm_config; -pub mod wasm_prep; - -pub use type_mismatch::TypeMismatch; diff --git a/execution_engine/src/shared/account.rs b/execution_engine/src/shared/account.rs deleted file mode 100644 index 9076243224..0000000000 --- a/execution_engine/src/shared/account.rs +++ /dev/null @@ -1,652 +0,0 @@ -mod action_thresholds; -mod associated_keys; - -use std::collections::BTreeSet; - -use casper_types::{ - account::{ - AccountHash, ActionType, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, - UpdateKeyFailure, Weight, - }, - bytesrepr::{self, Error, FromBytes, ToBytes}, - contracts::NamedKeys, - AccessRights, URef, -}; - -pub use action_thresholds::ActionThresholds; -pub use associated_keys::AssociatedKeys; - -#[derive(PartialEq, Eq, Clone, Debug)] -pub struct Account { - account_hash: AccountHash, - named_keys: NamedKeys, - main_purse: URef, - associated_keys: AssociatedKeys, - action_thresholds: ActionThresholds, -} - -impl Account { - pub fn new( - account_hash: AccountHash, - named_keys: NamedKeys, - main_purse: URef, - associated_keys: AssociatedKeys, - action_thresholds: ActionThresholds, - ) -> Self { - Account { - account_hash, - named_keys, - main_purse, - associated_keys, - action_thresholds, - } - } - - pub fn create(account: AccountHash, named_keys: NamedKeys, main_purse: URef) -> Self { - let associated_keys = AssociatedKeys::new(account, Weight::new(1)); - let action_thresholds: ActionThresholds = Default::default(); - Account::new( - account, - named_keys, - main_purse, - associated_keys, - action_thresholds, - ) - } - - pub fn named_keys_append(&mut self, keys: &mut NamedKeys) { - self.named_keys.append(keys); - } - - pub fn named_keys(&self) -> &NamedKeys { - &self.named_keys - } - - pub fn named_keys_mut(&mut self) -> &mut NamedKeys { - &mut self.named_keys - } - - pub fn account_hash(&self) -> AccountHash { - self.account_hash - } - - pub fn main_purse(&self) -> URef { - self.main_purse - } - - /// Returns an [`AccessRights::ADD`]-only version of the [`URef`]. - pub fn main_purse_add_only(&self) -> URef { - URef::new(self.main_purse.addr(), AccessRights::ADD) - } - - pub fn associated_keys(&self) -> impl Iterator { - self.associated_keys.iter() - } - - pub fn action_thresholds(&self) -> &ActionThresholds { - &self.action_thresholds - } - - pub fn add_associated_key( - &mut self, - account_hash: AccountHash, - weight: Weight, - ) -> Result<(), AddKeyFailure> { - self.associated_keys.add_key(account_hash, weight) - } - - /// Checks if removing given key would properly satisfy thresholds. - fn can_remove_key(&self, account_hash: AccountHash) -> bool { - let total_weight_without = self - .associated_keys - .total_keys_weight_excluding(account_hash); - - // Returns true if the total weight calculated without given public key would be greater or - // equal to all of the thresholds. - total_weight_without >= *self.action_thresholds().deployment() - && total_weight_without >= *self.action_thresholds().key_management() - } - - /// Checks if adding a weight to a sum of all weights excluding the given key would make the - /// resulting value to fall below any of the thresholds on account. - fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool { - // Calculates total weight of all keys excluding the given key - let total_weight = self - .associated_keys - .total_keys_weight_excluding(account_hash); - - // Safely calculate new weight by adding the updated weight - let new_weight = total_weight.value().saturating_add(weight.value()); - - // Returns true if the new weight would be greater or equal to all of - // the thresholds. - new_weight >= self.action_thresholds().deployment().value() - && new_weight >= self.action_thresholds().key_management().value() - } - - pub fn remove_associated_key( - &mut self, - account_hash: AccountHash, - ) -> Result<(), RemoveKeyFailure> { - if self.associated_keys.contains_key(&account_hash) { - // Check if removing this weight would fall below thresholds - if !self.can_remove_key(account_hash) { - return Err(RemoveKeyFailure::ThresholdViolation); - } - } - self.associated_keys.remove_key(&account_hash) - } - - pub fn update_associated_key( - &mut self, - account_hash: AccountHash, - weight: Weight, - ) -> Result<(), UpdateKeyFailure> { - if let Some(current_weight) = self.associated_keys.get(&account_hash) { - if weight < *current_weight { - // New weight is smaller than current weight - if !self.can_update_key(account_hash, weight) { - return Err(UpdateKeyFailure::ThresholdViolation); - } - } - } - self.associated_keys.update_key(account_hash, weight) - } - - pub fn get_associated_key_weight(&self, account_hash: AccountHash) -> Option<&Weight> { - self.associated_keys.get(&account_hash) - } - - pub fn set_action_threshold( - &mut self, - action_type: ActionType, - weight: Weight, - ) -> Result<(), SetThresholdFailure> { - // Verify if new threshold weight exceeds total weight of allassociated - // keys. - self.can_set_threshold(weight)?; - // Set new weight for given action - self.action_thresholds.set_threshold(action_type, weight) - } - - /// Verifies if user can set action threshold - pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> { - let total_weight = self.associated_keys.total_keys_weight(); - if new_threshold > total_weight { - return Err(SetThresholdFailure::InsufficientTotalWeight); - } - Ok(()) - } - - /// Checks whether all authorization keys are associated with this account - pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { - !authorization_keys.is_empty() - && authorization_keys - .iter() - .all(|e| self.associated_keys.contains_key(e)) - } - - /// Checks whether the sum of the weights of all authorization keys is - /// greater or equal to deploy threshold. - pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { - let total_weight = self - .associated_keys - .calculate_keys_weight(authorization_keys); - - total_weight >= *self.action_thresholds().deployment() - } - - /// Checks whether the sum of the weights of all authorization keys is - /// greater or equal to key management threshold. - pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { - let total_weight = self - .associated_keys - .calculate_keys_weight(authorization_keys); - - total_weight >= *self.action_thresholds().key_management() - } -} - -impl ToBytes for Account { - fn to_bytes(&self) -> Result, Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - result.append(&mut self.account_hash.to_bytes()?); - result.append(&mut self.named_keys.to_bytes()?); - result.append(&mut self.main_purse.to_bytes()?); - result.append(&mut self.associated_keys.to_bytes()?); - result.append(&mut self.action_thresholds.to_bytes()?); - Ok(result) - } - - fn serialized_length(&self) -> usize { - self.account_hash.serialized_length() - + self.named_keys.serialized_length() - + self.main_purse.serialized_length() - + self.associated_keys.serialized_length() - + self.action_thresholds.serialized_length() - } -} - -impl FromBytes for Account { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (account_hash, rem) = AccountHash::from_bytes(bytes)?; - let (named_keys, rem) = NamedKeys::from_bytes(rem)?; - let (main_purse, rem) = URef::from_bytes(rem)?; - let (associated_keys, rem) = AssociatedKeys::from_bytes(rem)?; - let (action_thresholds, rem) = ActionThresholds::from_bytes(rem)?; - Ok(( - Account { - account_hash, - named_keys, - main_purse, - associated_keys, - action_thresholds, - }, - rem, - )) - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::prelude::*; - - use casper_types::gens::{account_hash_arb, named_keys_arb, uref_arb}; - - use super::*; - use crate::shared::account::{ - action_thresholds::gens::action_thresholds_arb, associated_keys::gens::associated_keys_arb, - }; - - prop_compose! { - pub fn account_arb()( - account_hash in account_hash_arb(), - urefs in named_keys_arb(3), - purse in uref_arb(), - thresholds in action_thresholds_arb(), - mut associated_keys in associated_keys_arb(), - ) -> Account { - associated_keys.add_key(account_hash, Weight::new(1)).unwrap(); - Account::new( - account_hash, - urefs, - purse, - associated_keys, - thresholds, - ) - } - } -} - -#[cfg(test)] -mod proptests { - use proptest::prelude::*; - - use casper_types::bytesrepr; - - use super::*; - - proptest! { - #[test] - fn test_value_account(acct in gens::account_arb()) { - bytesrepr::test_serialization_roundtrip(&acct); - } - } -} - -#[cfg(test)] -mod tests { - use std::{collections::BTreeSet, iter::FromIterator}; - - use casper_types::{ - account::{ - AccountHash, ActionType, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure, - Weight, - }, - AccessRights, URef, - }; - - use super::*; - - #[test] - fn associated_keys_can_authorize_keys() { - let key_1 = AccountHash::new([0; 32]); - let key_2 = AccountHash::new([1; 32]); - let key_3 = AccountHash::new([2; 32]); - let mut keys = AssociatedKeys::default(); - - keys.add_key(key_2, Weight::new(2)) - .expect("should add key_1"); - keys.add_key(key_1, Weight::new(1)) - .expect("should add key_1"); - keys.add_key(key_3, Weight::new(3)) - .expect("should add key_1"); - - let account = Account::new( - AccountHash::new([0u8; 32]), - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - keys, - // deploy: 33 (3*11) - ActionThresholds::new(Weight::new(33), Weight::new(48)) - .expect("should create thresholds"), - ); - - assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_3, key_2, key_1]))); - assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_3, key_2]))); - - assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_2]))); - assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1]))); - - assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ - key_1, - key_2, - AccountHash::new([42; 32]) - ]))); - assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ - AccountHash::new([42; 32]), - key_1, - key_2 - ]))); - assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ - AccountHash::new([43; 32]), - AccountHash::new([44; 32]), - AccountHash::new([42; 32]) - ]))); - assert!(!account.can_authorize(&BTreeSet::new())); - } - - #[test] - fn account_can_deploy_with() { - let associated_keys = { - let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); - res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) - .expect("should add key 1"); - res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) - .expect("should add key 2"); - res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) - .expect("should add key 3"); - res - }; - let account = Account::new( - AccountHash::new([0u8; 32]), - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - // deploy: 33 (3*11) - ActionThresholds::new(Weight::new(33), Weight::new(48)) - .expect("should create thresholds"), - ); - - // sum: 22, required 33 - can't deploy - assert!(!account.can_deploy_with(&BTreeSet::from_iter(vec![ - AccountHash::new([3u8; 32]), - AccountHash::new([2u8; 32]), - ]))); - - // sum: 33, required 33 - can deploy - assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ - AccountHash::new([4u8; 32]), - AccountHash::new([3u8; 32]), - AccountHash::new([2u8; 32]), - ]))); - - // sum: 34, required 33 - can deploy - assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ - AccountHash::new([2u8; 32]), - AccountHash::new([1u8; 32]), - AccountHash::new([4u8; 32]), - AccountHash::new([3u8; 32]), - ]))); - } - - #[test] - fn account_can_manage_keys_with() { - let associated_keys = { - let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); - res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) - .expect("should add key 1"); - res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) - .expect("should add key 2"); - res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) - .expect("should add key 3"); - res - }; - let account = Account::new( - AccountHash::new([0u8; 32]), - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - // deploy: 33 (3*11) - ActionThresholds::new(Weight::new(11), Weight::new(33)) - .expect("should create thresholds"), - ); - - // sum: 22, required 33 - can't manage - assert!(!account.can_manage_keys_with(&BTreeSet::from_iter(vec![ - AccountHash::new([3u8; 32]), - AccountHash::new([2u8; 32]), - ]))); - - // sum: 33, required 33 - can manage - assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ - AccountHash::new([4u8; 32]), - AccountHash::new([3u8; 32]), - AccountHash::new([2u8; 32]), - ]))); - - // sum: 34, required 33 - can manage - assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ - AccountHash::new([2u8; 32]), - AccountHash::new([1u8; 32]), - AccountHash::new([4u8; 32]), - AccountHash::new([3u8; 32]), - ]))); - } - - #[test] - fn set_action_threshold_higher_than_total_weight() { - let identity_key = AccountHash::new([1u8; 32]); - let key_1 = AccountHash::new([2u8; 32]); - let key_2 = AccountHash::new([3u8; 32]); - let key_3 = AccountHash::new([4u8; 32]); - let associated_keys = { - let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); - res.add_key(key_1, Weight::new(2)) - .expect("should add key 1"); - res.add_key(key_2, Weight::new(3)) - .expect("should add key 2"); - res.add_key(key_3, Weight::new(4)) - .expect("should add key 3"); - res - }; - let mut account = Account::new( - AccountHash::new([0u8; 32]), - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - // deploy: 33 (3*11) - ActionThresholds::new(Weight::new(33), Weight::new(48)) - .expect("should create thresholds"), - ); - - assert_eq!( - account - .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 1)) - .unwrap_err(), - SetThresholdFailure::InsufficientTotalWeight, - ); - assert_eq!( - account - .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 245)) - .unwrap_err(), - SetThresholdFailure::InsufficientTotalWeight, - ) - } - - #[test] - fn remove_key_would_violate_action_thresholds() { - let identity_key = AccountHash::new([1u8; 32]); - let key_1 = AccountHash::new([2u8; 32]); - let key_2 = AccountHash::new([3u8; 32]); - let key_3 = AccountHash::new([4u8; 32]); - let associated_keys = { - let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); - res.add_key(key_1, Weight::new(2)) - .expect("should add key 1"); - res.add_key(key_2, Weight::new(3)) - .expect("should add key 2"); - res.add_key(key_3, Weight::new(4)) - .expect("should add key 3"); - res - }; - let mut account = Account::new( - AccountHash::new([0u8; 32]), - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - // deploy: 33 (3*11) - ActionThresholds::new(Weight::new(1 + 2 + 3 + 4), Weight::new(1 + 2 + 3 + 4 + 5)) - .expect("should create thresholds"), - ); - - assert_eq!( - account.remove_associated_key(key_3).unwrap_err(), - RemoveKeyFailure::ThresholdViolation, - ) - } - - #[test] - fn updating_key_would_violate_action_thresholds() { - let identity_key = AccountHash::new([1u8; 32]); - let identity_key_weight = Weight::new(1); - let key_1 = AccountHash::new([2u8; 32]); - let key_1_weight = Weight::new(2); - let key_2 = AccountHash::new([3u8; 32]); - let key_2_weight = Weight::new(3); - let key_3 = AccountHash::new([4u8; 32]); - let key_3_weight = Weight::new(4); - let associated_keys = { - let mut res = AssociatedKeys::new(identity_key, identity_key_weight); - res.add_key(key_1, key_1_weight).expect("should add key 1"); - res.add_key(key_2, key_2_weight).expect("should add key 2"); - res.add_key(key_3, key_3_weight).expect("should add key 3"); - // 1 + 2 + 3 + 4 - res - }; - - let deployment_threshold = Weight::new( - identity_key_weight.value() - + key_1_weight.value() - + key_2_weight.value() - + key_3_weight.value(), - ); - let key_management_threshold = Weight::new(deployment_threshold.value() + 1); - let mut account = Account::new( - identity_key, - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - // deploy: 33 (3*11) - ActionThresholds::new(deployment_threshold, key_management_threshold) - .expect("should create thresholds"), - ); - - // Decreases by 3 - assert_eq!( - account - .clone() - .update_associated_key(key_3, Weight::new(1)) - .unwrap_err(), - UpdateKeyFailure::ThresholdViolation, - ); - - // increase total weight (12) - account - .update_associated_key(identity_key, Weight::new(3)) - .unwrap(); - - // variant a) decrease total weight by 1 (total 11) - account - .clone() - .update_associated_key(key_3, Weight::new(3)) - .unwrap(); - // variant b) decrease total weight by 3 (total 9) - fail - assert_eq!( - account - .update_associated_key(key_3, Weight::new(1)) - .unwrap_err(), - UpdateKeyFailure::ThresholdViolation - ); - } - - #[test] - fn overflowing_should_allow_removal() { - let identity_key = AccountHash::new([42; 32]); - let key_1 = AccountHash::new([2u8; 32]); - let key_2 = AccountHash::new([3u8; 32]); - - let associated_keys = { - // Identity - let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); - - // Spare key - res.add_key(key_1, Weight::new(2)) - .expect("should add key 1"); - // Big key - res.add_key(key_2, Weight::new(255)) - .expect("should add key 2"); - - res - }; - - let mut account = Account::new( - identity_key, - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - ActionThresholds::new(Weight::new(1), Weight::new(254)) - .expect("should create thresholds"), - ); - - account.remove_associated_key(key_1).expect("should work") - } - - #[test] - fn overflowing_should_allow_updating() { - let identity_key = AccountHash::new([1; 32]); - let identity_key_weight = Weight::new(1); - let key_1 = AccountHash::new([2u8; 32]); - let key_1_weight = Weight::new(3); - let key_2 = AccountHash::new([3u8; 32]); - let key_2_weight = Weight::new(255); - let deployment_threshold = Weight::new(1); - let key_management_threshold = Weight::new(254); - - let associated_keys = { - // Identity - let mut res = AssociatedKeys::new(identity_key, identity_key_weight); - - // Spare key - res.add_key(key_1, key_1_weight).expect("should add key 1"); - // Big key - res.add_key(key_2, key_2_weight).expect("should add key 2"); - - res - }; - - let mut account = Account::new( - identity_key, - NamedKeys::new(), - URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), - associated_keys, - ActionThresholds::new(deployment_threshold, key_management_threshold) - .expect("should create thresholds"), - ); - - // decrease so total weight would be changed from 1 + 3 + 255 to 1 + 1 + 255 - account - .update_associated_key(key_1, Weight::new(1)) - .expect("should work"); - } -} diff --git a/execution_engine/src/shared/account/action_thresholds.rs b/execution_engine/src/shared/account/action_thresholds.rs deleted file mode 100644 index c70e4d2b81..0000000000 --- a/execution_engine/src/shared/account/action_thresholds.rs +++ /dev/null @@ -1,152 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use casper_types::{ - account::{ActionType, SetThresholdFailure, Weight, WEIGHT_SERIALIZED_LENGTH}, - bytesrepr::{self, Error, FromBytes, ToBytes}, -}; - -/// Thresholds that have to be met when executing an action of a certain type. -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ActionThresholds { - deployment: Weight, - key_management: Weight, -} - -impl ActionThresholds { - /// Creates new ActionThresholds object with provided weights - /// - /// Requires deployment threshold to be lower than or equal to - /// key management threshold. - pub fn new( - deployment: Weight, - key_management: Weight, - ) -> Result { - if deployment > key_management { - return Err(SetThresholdFailure::DeploymentThreshold); - } - Ok(ActionThresholds { - deployment, - key_management, - }) - } - /// Sets new threshold for [ActionType::Deployment]. - /// Should return an error if setting new threshold for `action_type` breaks - /// one of the invariants. Currently, invariant is that - /// `ActionType::Deployment` threshold shouldn't be higher than any - /// other, which should be checked both when increasing `Deployment` - /// threshold and decreasing the other. - pub fn set_deployment_threshold( - &mut self, - new_threshold: Weight, - ) -> Result<(), SetThresholdFailure> { - if new_threshold > self.key_management { - Err(SetThresholdFailure::DeploymentThreshold) - } else { - self.deployment = new_threshold; - Ok(()) - } - } - - /// Sets new threshold for [ActionType::KeyManagement]. - pub fn set_key_management_threshold( - &mut self, - new_threshold: Weight, - ) -> Result<(), SetThresholdFailure> { - if self.deployment > new_threshold { - Err(SetThresholdFailure::KeyManagementThreshold) - } else { - self.key_management = new_threshold; - Ok(()) - } - } - - pub fn deployment(&self) -> &Weight { - &self.deployment - } - - pub fn key_management(&self) -> &Weight { - &self.key_management - } - - /// Unified function that takes an action type, and changes appropriate - /// threshold defined by the [ActionType] variants. - pub fn set_threshold( - &mut self, - action_type: ActionType, - new_threshold: Weight, - ) -> Result<(), SetThresholdFailure> { - match action_type { - ActionType::Deployment => self.set_deployment_threshold(new_threshold), - ActionType::KeyManagement => self.set_key_management_threshold(new_threshold), - } - } -} - -impl Default for ActionThresholds { - fn default() -> Self { - ActionThresholds { - deployment: Weight::new(1), - key_management: Weight::new(1), - } - } -} - -impl ToBytes for ActionThresholds { - fn to_bytes(&self) -> Result, Error> { - let mut result = bytesrepr::unchecked_allocate_buffer(self); - result.append(&mut self.deployment.to_bytes()?); - result.append(&mut self.key_management.to_bytes()?); - Ok(result) - } - - fn serialized_length(&self) -> usize { - 2 * WEIGHT_SERIALIZED_LENGTH - } -} - -impl FromBytes for ActionThresholds { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (deployment, rem) = Weight::from_bytes(&bytes)?; - let (key_management, rem) = Weight::from_bytes(&rem)?; - let ret = ActionThresholds { - deployment, - key_management, - }; - Ok((ret, rem)) - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::prelude::*; - - use super::ActionThresholds; - - pub fn action_thresholds_arb() -> impl Strategy { - Just(Default::default()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_create_new_action_thresholds() { - let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); - assert_eq!(*action_thresholds.deployment(), Weight::new(1)); - assert_eq!(*action_thresholds.key_management(), Weight::new(42)); - } - - #[test] - fn should_not_create_action_thresholds_with_invalid_deployment_threshold() { - // deployment cant be greater than key management - assert!(ActionThresholds::new(Weight::new(5), Weight::new(1)).is_err()); - } - - #[test] - fn serialization_roundtrip() { - let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); - bytesrepr::test_serialization_roundtrip(&action_thresholds); - } -} diff --git a/execution_engine/src/shared/account/associated_keys.rs b/execution_engine/src/shared/account/associated_keys.rs deleted file mode 100644 index 69eee46378..0000000000 --- a/execution_engine/src/shared/account/associated_keys.rs +++ /dev/null @@ -1,367 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; - -use serde::{Deserialize, Serialize}; - -use casper_types::{ - account::{ - AccountHash, AddKeyFailure, RemoveKeyFailure, UpdateKeyFailure, Weight, MAX_ASSOCIATED_KEYS, - }, - bytesrepr::{Error, FromBytes, ToBytes}, -}; - -#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] -pub struct AssociatedKeys(BTreeMap); - -impl AssociatedKeys { - pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys { - let mut bt: BTreeMap = BTreeMap::new(); - bt.insert(key, weight); - AssociatedKeys(bt) - } - - /// Adds new AssociatedKey to the set. - /// Returns true if added successfully, false otherwise. - #[allow(clippy::map_entry)] - pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { - if self.0.len() == MAX_ASSOCIATED_KEYS { - Err(AddKeyFailure::MaxKeysLimit) - } else if self.0.contains_key(&key) { - Err(AddKeyFailure::DuplicateKey) - } else { - self.0.insert(key, weight); - Ok(()) - } - } - - /// Removes key from the associated keys set. - /// Returns true if value was found in the set prior to the removal, false - /// otherwise. - pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> { - self.0 - .remove(key) - .map(|_| ()) - .ok_or(RemoveKeyFailure::MissingKey) - } - - /// Adds new AssociatedKey to the set. - /// Returns true if added successfully, false otherwise. - #[allow(clippy::map_entry)] - pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> { - if !self.0.contains_key(&key) { - return Err(UpdateKeyFailure::MissingKey); - } - - self.0.insert(key, weight); - Ok(()) - } - - pub fn get(&self, key: &AccountHash) -> Option<&Weight> { - self.0.get(key) - } - - pub fn contains_key(&self, key: &AccountHash) -> bool { - self.0.contains_key(key) - } - - pub fn iter(&self) -> impl Iterator { - self.0.iter() - } - - pub fn len(&self) -> usize { - self.0.len() - } - - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Helper method that calculates weight for keys that comes from any - /// source. - /// - /// This method is not concerned about uniqueness of the passed iterable. - /// Uniqueness is determined based on the input collection properties, - /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`]) - /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]). - fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator) -> Weight { - let total = keys - .filter_map(|key| self.0.get(key)) - .fold(0u8, |acc, w| acc.saturating_add(w.value())); - - Weight::new(total) - } - - /// Calculates total weight of authorization keys provided by an argument - pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet) -> Weight { - self.calculate_any_keys_weight(authorization_keys.iter()) - } - - /// Calculates total weight of all authorization keys - pub fn total_keys_weight(&self) -> Weight { - self.calculate_any_keys_weight(self.0.keys()) - } - - /// Calculates total weight of all authorization keys excluding a given key - pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight { - self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash)) - } -} - -impl From> for AssociatedKeys { - fn from(associated_keys: BTreeMap) -> Self { - Self(associated_keys) - } -} - -impl ToBytes for AssociatedKeys { - fn to_bytes(&self) -> Result, Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for AssociatedKeys { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (num_keys, mut stream) = u32::from_bytes(bytes)?; - if num_keys as usize > MAX_ASSOCIATED_KEYS { - return Err(Error::Formatting); - } - - let mut associated_keys = BTreeMap::new(); - for _ in 0..num_keys { - let (k, rem) = FromBytes::from_bytes(stream)?; - let (v, rem) = FromBytes::from_bytes(rem)?; - associated_keys.insert(k, v); - stream = rem; - } - Ok((AssociatedKeys(associated_keys), stream)) - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::prelude::*; - - use casper_types::{ - account::MAX_ASSOCIATED_KEYS, - gens::{account_hash_arb, weight_arb}, - }; - - use super::AssociatedKeys; - - pub fn associated_keys_arb() -> impl Strategy { - proptest::collection::btree_map(account_hash_arb(), weight_arb(), MAX_ASSOCIATED_KEYS - 1) - .prop_map(|keys| { - let mut associated_keys = AssociatedKeys::default(); - keys.into_iter().for_each(|(k, v)| { - associated_keys.add_key(k, v).unwrap(); - }); - associated_keys - }) - } -} - -#[cfg(test)] -mod tests { - use std::{ - collections::{BTreeMap, BTreeSet}, - iter::FromIterator, - }; - - use casper_types::{ - account::{AccountHash, AddKeyFailure, Weight, ACCOUNT_HASH_LENGTH, MAX_ASSOCIATED_KEYS}, - bytesrepr::{self, ToBytes}, - }; - - use super::AssociatedKeys; - - #[test] - fn associated_keys_add() { - let mut keys = - AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1)); - let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); - let new_pk_weight = Weight::new(2); - assert!(keys.add_key(new_pk, new_pk_weight).is_ok()); - assert_eq!(keys.get(&new_pk), Some(&new_pk_weight)) - } - - #[test] - fn associated_keys_add_full() { - let map = (0..MAX_ASSOCIATED_KEYS).map(|k| { - ( - AccountHash::new([k as u8; ACCOUNT_HASH_LENGTH]), - Weight::new(k as u8), - ) - }); - assert_eq!(map.len(), 10); - let mut keys = { - let mut tmp = AssociatedKeys::default(); - map.for_each(|(key, weight)| assert!(tmp.add_key(key, weight).is_ok())); - tmp - }; - assert_eq!( - keys.add_key( - AccountHash::new([100u8; ACCOUNT_HASH_LENGTH]), - Weight::new(100) - ), - Err(AddKeyFailure::MaxKeysLimit) - ) - } - - #[test] - fn associated_keys_add_duplicate() { - let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); - let weight = Weight::new(1); - let mut keys = AssociatedKeys::new(pk, weight); - assert_eq!( - keys.add_key(pk, Weight::new(10)), - Err(AddKeyFailure::DuplicateKey) - ); - assert_eq!(keys.get(&pk), Some(&weight)); - } - - #[test] - fn associated_keys_remove() { - let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); - let weight = Weight::new(1); - let mut keys = AssociatedKeys::new(pk, weight); - assert!(keys.remove_key(&pk).is_ok()); - assert!(keys - .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH])) - .is_err()); - } - - #[test] - fn associated_keys_calculate_keys_once() { - let key_1 = AccountHash::new([0; 32]); - let key_2 = AccountHash::new([1; 32]); - let key_3 = AccountHash::new([2; 32]); - let mut keys = AssociatedKeys::default(); - - keys.add_key(key_2, Weight::new(2)) - .expect("should add key_1"); - keys.add_key(key_1, Weight::new(1)) - .expect("should add key_1"); - keys.add_key(key_3, Weight::new(3)) - .expect("should add key_1"); - - assert_eq!( - keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ - key_1, key_2, key_3, key_1, key_2, key_3, - ])), - Weight::new(1 + 2 + 3) - ); - } - - #[test] - fn associated_keys_total_weight() { - let associated_keys = { - let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); - res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) - .expect("should add key 1"); - res.add_key(AccountHash::new([3u8; 32]), Weight::new(12)) - .expect("should add key 2"); - res.add_key(AccountHash::new([4u8; 32]), Weight::new(13)) - .expect("should add key 3"); - res - }; - assert_eq!( - associated_keys.total_keys_weight(), - Weight::new(1 + 11 + 12 + 13) - ); - } - - #[test] - fn associated_keys_total_weight_excluding() { - let identity_key = AccountHash::new([1u8; 32]); - let identity_key_weight = Weight::new(1); - - let key_1 = AccountHash::new([2u8; 32]); - let key_1_weight = Weight::new(11); - - let key_2 = AccountHash::new([3u8; 32]); - let key_2_weight = Weight::new(12); - - let key_3 = AccountHash::new([4u8; 32]); - let key_3_weight = Weight::new(13); - - let associated_keys = { - let mut res = AssociatedKeys::new(identity_key, identity_key_weight); - res.add_key(key_1, key_1_weight).expect("should add key 1"); - res.add_key(key_2, key_2_weight).expect("should add key 2"); - res.add_key(key_3, key_3_weight).expect("should add key 3"); - res - }; - assert_eq!( - associated_keys.total_keys_weight_excluding(key_2), - Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value()) - ); - } - - #[test] - fn overflowing_keys_weight() { - let identity_key = AccountHash::new([1u8; 32]); - let key_1 = AccountHash::new([2u8; 32]); - let key_2 = AccountHash::new([3u8; 32]); - let key_3 = AccountHash::new([4u8; 32]); - - let identity_key_weight = Weight::new(250); - let weight_1 = Weight::new(1); - let weight_2 = Weight::new(2); - let weight_3 = Weight::new(3); - - let saturated_weight = Weight::new(u8::max_value()); - - let associated_keys = { - let mut res = AssociatedKeys::new(identity_key, identity_key_weight); - - res.add_key(key_1, weight_1).expect("should add key 1"); - res.add_key(key_2, weight_2).expect("should add key 2"); - res.add_key(key_3, weight_3).expect("should add key 3"); - res - }; - - assert_eq!( - associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ - identity_key, // 250 - key_1, // 251 - key_2, // 253 - key_3, // 256 - error - ])), - saturated_weight, - ); - } - - #[test] - fn serialization_roundtrip() { - let mut keys = AssociatedKeys::default(); - keys.add_key(AccountHash::new([1; 32]), Weight::new(1)) - .unwrap(); - keys.add_key(AccountHash::new([2; 32]), Weight::new(2)) - .unwrap(); - keys.add_key(AccountHash::new([3; 32]), Weight::new(3)) - .unwrap(); - bytesrepr::test_serialization_roundtrip(&keys); - } - - #[test] - fn should_not_panic_deserializing_malicious_data() { - let malicious_map: BTreeMap = (1usize..=(MAX_ASSOCIATED_KEYS + 1)) - .map(|i| { - let i_bytes = i.to_be_bytes(); - let mut account_hash_bytes = [0u8; 32]; - account_hash_bytes[32 - i_bytes.len()..].copy_from_slice(&i_bytes); - (AccountHash::new(account_hash_bytes), Weight::new(i as u8)) - }) - .collect(); - - let bytes = malicious_map.to_bytes().expect("should serialize"); - - assert_eq!( - bytesrepr::deserialize::(bytes).expect_err("should deserialize"), - bytesrepr::Error::Formatting - ); - } -} diff --git a/execution_engine/src/shared/additive_map.rs b/execution_engine/src/shared/additive_map.rs deleted file mode 100644 index 949dde2ff7..0000000000 --- a/execution_engine/src/shared/additive_map.rs +++ /dev/null @@ -1,169 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{ - hash_map::{IntoIter, Iter, IterMut, Keys, RandomState, Values}, - HashMap, - }, - fmt::{self, Debug, Formatter}, - hash::{BuildHasher, Hash}, - iter::{FromIterator, IntoIterator}, - ops::{AddAssign, Index}, -}; - -#[derive(Clone)] -pub struct AdditiveMap(HashMap); - -impl AdditiveMap { - pub fn new() -> Self { - Self(Default::default()) - } -} - -impl AdditiveMap { - /// Modifies the existing value stored under `key`, or the default value for `V` if none, by - /// adding `value_to_add`. - pub fn insert_add(&mut self, key: K, value_to_add: V) { - let current_value = self.0.entry(key).or_default(); - *current_value += value_to_add; - } -} - -impl AdditiveMap { - pub fn keys(&self) -> Keys<'_, K, V> { - self.0.keys() - } - - pub fn values(&self) -> Values<'_, K, V> { - self.0.values() - } - - pub fn iter(&self) -> Iter<'_, K, V> { - self.0.iter() - } - - pub fn len(&self) -> usize { - self.0.len() - } - - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl AdditiveMap { - pub fn get(&self, key: &Q) -> Option<&V> - where - K: Borrow, - Q: Eq + Hash + ?Sized, - { - self.0.get(key) - } - - pub fn insert(&mut self, key: K, value: V) -> Option { - self.0.insert(key, value) - } - - pub fn remove(&mut self, key: &Q) -> Option - where - K: Borrow, - Q: Eq + Hash + ?Sized, - { - self.0.remove(key) - } - - pub fn remove_entry(&mut self, key: &Q) -> Option<(K, V)> - where - K: Borrow, - Q: Eq + Hash + ?Sized, - { - self.0.remove_entry(key) - } -} - -impl Default for AdditiveMap { - fn default() -> Self { - Self(HashMap::with_hasher(Default::default())) - } -} - -impl<'a, K, V, S> IntoIterator for &'a AdditiveMap { - type Item = (&'a K, &'a V); - type IntoIter = Iter<'a, K, V>; - - fn into_iter(self) -> Iter<'a, K, V> { - self.0.iter() - } -} - -impl<'a, K, V, S> IntoIterator for &'a mut AdditiveMap { - type Item = (&'a K, &'a mut V); - type IntoIter = IterMut<'a, K, V>; - - fn into_iter(self) -> IterMut<'a, K, V> { - self.0.iter_mut() - } -} - -impl IntoIterator for AdditiveMap { - type Item = (K, V); - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - self.0.into_iter() - } -} - -impl FromIterator<(K, V)> for AdditiveMap { - fn from_iter>(iter: T) -> Self { - Self(HashMap::from_iter(iter)) - } -} - -impl Index<&Q> for AdditiveMap -where - K: Eq + Hash + Borrow, - Q: Eq + Hash + ?Sized, - S: BuildHasher, -{ - type Output = V; - - fn index(&self, key: &Q) -> &V { - &self.0[key] - } -} - -impl PartialEq for AdditiveMap { - fn eq(&self, other: &AdditiveMap) -> bool { - self.0 == other.0 - } -} - -impl Eq for AdditiveMap {} - -impl Debug for AdditiveMap { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -#[cfg(test)] -mod tests { - use super::AdditiveMap; - use crate::shared::transform::Transform; - - #[test] - fn insert_add() { - let key = "key"; - let mut int_map = AdditiveMap::new(); - int_map.insert_add(key, 1); - assert_eq!(1, int_map[key]); - int_map.insert_add(key, 2); - assert_eq!(3, int_map[key]); - - let mut transform_map = AdditiveMap::new(); - transform_map.insert_add(key, Transform::AddUInt64(1)); - assert_eq!(Transform::AddUInt64(1), transform_map[key]); - transform_map.insert_add(key, Transform::AddInt32(2)); - assert_eq!(Transform::AddInt32(3), transform_map[key]); - } -} diff --git a/execution_engine/src/shared/gas.rs b/execution_engine/src/shared/gas.rs deleted file mode 100644 index f682f6ad68..0000000000 --- a/execution_engine/src/shared/gas.rs +++ /dev/null @@ -1,216 +0,0 @@ -use std::{fmt, iter::Sum}; - -use num::Zero; - -use casper_types::U512; - -use crate::shared::motes::Motes; - -#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] -pub struct Gas(U512); - -impl Gas { - pub fn new(value: U512) -> Self { - Gas(value) - } - - pub fn value(&self) -> U512 { - self.0 - } - - pub fn from_motes(motes: Motes, conv_rate: u64) -> Option { - motes - .value() - .checked_div(U512::from(conv_rate)) - .map(Self::new) - } - - pub fn checked_add(&self, rhs: Self) -> Option { - self.0.checked_add(rhs.value()).map(Self::new) - } - - pub fn checked_sub(&self, rhs: Self) -> Option { - self.0.checked_sub(rhs.value()).map(Self::new) - } -} - -impl fmt::Display for Gas { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.0) - } -} - -impl std::ops::Add for Gas { - type Output = Gas; - - fn add(self, rhs: Self) -> Self::Output { - let val = self.value() + rhs.value(); - Gas::new(val) - } -} - -impl std::ops::Sub for Gas { - type Output = Gas; - - fn sub(self, rhs: Self) -> Self::Output { - let val = self.value() - rhs.value(); - Gas::new(val) - } -} - -impl std::ops::Div for Gas { - type Output = Gas; - - fn div(self, rhs: Self) -> Self::Output { - let val = self.value() / rhs.value(); - Gas::new(val) - } -} - -impl std::ops::Mul for Gas { - type Output = Gas; - - fn mul(self, rhs: Self) -> Self::Output { - let val = self.value() * rhs.value(); - Gas::new(val) - } -} - -impl std::ops::AddAssign for Gas { - fn add_assign(&mut self, rhs: Self) { - self.0 += rhs.0 - } -} - -impl Zero for Gas { - fn zero() -> Self { - Gas::new(U512::zero()) - } - - fn is_zero(&self) -> bool { - self.0.is_zero() - } -} - -impl Sum for Gas { - fn sum>(iter: I) -> Self { - iter.fold(Gas::zero(), std::ops::Add::add) - } -} - -impl From for Gas { - fn from(gas: u32) -> Self { - let gas_u512: U512 = gas.into(); - Gas::new(gas_u512) - } -} - -impl From for Gas { - fn from(gas: u64) -> Self { - let gas_u512: U512 = gas.into(); - Gas::new(gas_u512) - } -} - -#[cfg(test)] -mod tests { - use casper_types::U512; - - use crate::shared::{gas::Gas, motes::Motes}; - - #[test] - fn should_be_able_to_get_instance_of_gas() { - let initial_value = 1; - let gas = Gas::new(U512::from(initial_value)); - assert_eq!( - initial_value, - gas.value().as_u64(), - "should have equal value" - ) - } - - #[test] - fn should_be_able_to_compare_two_instances_of_gas() { - let left_gas = Gas::new(U512::from(1)); - let right_gas = Gas::new(U512::from(1)); - assert_eq!(left_gas, right_gas, "should be equal"); - let right_gas = Gas::new(U512::from(2)); - assert_ne!(left_gas, right_gas, "should not be equal") - } - - #[test] - fn should_be_able_to_add_two_instances_of_gas() { - let left_gas = Gas::new(U512::from(1)); - let right_gas = Gas::new(U512::from(1)); - let expected_gas = Gas::new(U512::from(2)); - assert_eq!((left_gas + right_gas), expected_gas, "should be equal") - } - - #[test] - fn should_be_able_to_subtract_two_instances_of_gas() { - let left_gas = Gas::new(U512::from(1)); - let right_gas = Gas::new(U512::from(1)); - let expected_gas = Gas::new(U512::from(0)); - assert_eq!((left_gas - right_gas), expected_gas, "should be equal") - } - - #[test] - fn should_be_able_to_multiply_two_instances_of_gas() { - let left_gas = Gas::new(U512::from(100)); - let right_gas = Gas::new(U512::from(10)); - let expected_gas = Gas::new(U512::from(1000)); - assert_eq!((left_gas * right_gas), expected_gas, "should be equal") - } - - #[test] - fn should_be_able_to_divide_two_instances_of_gas() { - let left_gas = Gas::new(U512::from(1000)); - let right_gas = Gas::new(U512::from(100)); - let expected_gas = Gas::new(U512::from(10)); - assert_eq!((left_gas / right_gas), expected_gas, "should be equal") - } - - #[test] - fn should_be_able_to_convert_from_mote() { - let mote = Motes::new(U512::from(100)); - let gas = Gas::from_motes(mote, 10).expect("should have gas"); - let expected_gas = Gas::new(U512::from(10)); - assert_eq!(gas, expected_gas, "should be equal") - } - - #[test] - fn should_be_able_to_default() { - let gas = Gas::default(); - let expected_gas = Gas::new(U512::from(0)); - assert_eq!(gas, expected_gas, "should be equal") - } - - #[test] - fn should_be_able_to_compare_relative_value() { - let left_gas = Gas::new(U512::from(100)); - let right_gas = Gas::new(U512::from(10)); - assert!(left_gas > right_gas, "should be gt"); - let right_gas = Gas::new(U512::from(100)); - assert!(left_gas >= right_gas, "should be gte"); - assert!(left_gas <= right_gas, "should be lte"); - let left_gas = Gas::new(U512::from(10)); - assert!(left_gas < right_gas, "should be lt"); - } - - #[test] - fn should_default() { - let left_gas = Gas::new(U512::from(0)); - let right_gas = Gas::default(); - assert_eq!(left_gas, right_gas, "should be equal"); - let u512 = U512::zero(); - assert_eq!(left_gas.value(), u512, "should be equal"); - } - - #[test] - fn should_support_checked_div_from_motes() { - let motes = Motes::new(U512::zero()); - let conv_rate = 0; - let maybe = Gas::from_motes(motes, conv_rate); - assert!(maybe.is_none(), "should be none due to divide by zero"); - } -} diff --git a/execution_engine/src/shared/host_function_costs.rs b/execution_engine/src/shared/host_function_costs.rs deleted file mode 100644 index 75a0d84de2..0000000000 --- a/execution_engine/src/shared/host_function_costs.rs +++ /dev/null @@ -1,754 +0,0 @@ -use datasize::DataSize; -use rand::{distributions::Standard, prelude::Distribution, Rng}; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; - -use super::gas::Gas; - -/// Representation of argument's cost. -pub type Cost = u32; - -const COST_SERIALIZED_LENGTH: usize = U32_SERIALIZED_LENGTH; - -/// An identifier that represents an unused argument. -const NOT_USED: Cost = 0; - -/// An arbitrary default fixed cost for host functions that were not researched yet. -const DEFAULT_FIXED_COST: Cost = 200; - -const DEFAULT_ADD_ASSOCIATED_KEY_COST: u32 = 9_000; -const DEFAULT_ADD_COST: u32 = 5_800; - -const DEFAULT_CALL_CONTRACT_COST: u32 = 4_500; -const DEFAULT_CALL_CONTRACT_ARGS_SIZE_WEIGHT: u32 = 420; - -const DEFAULT_CREATE_PURSE_COST: u32 = 170_000; -const DEFAULT_GET_BALANCE_COST: u32 = 3_800; -const DEFAULT_GET_BLOCKTIME_COST: u32 = 330; -const DEFAULT_GET_CALLER_COST: u32 = 380; -const DEFAULT_GET_KEY_COST: u32 = 2_000; -const DEFAULT_GET_KEY_NAME_SIZE_WEIGHT: u32 = 440; -const DEFAULT_GET_MAIN_PURSE_COST: u32 = 1_300; -const DEFAULT_GET_PHASE_COST: u32 = 710; -const DEFAULT_GET_SYSTEM_CONTRACT_COST: u32 = 1_100; -const DEFAULT_HAS_KEY_COST: u32 = 1_500; -const DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT: u32 = 840; -const DEFAULT_IS_VALID_UREF_COST: u32 = 760; -const DEFAULT_LOAD_NAMED_KEYS_COST: u32 = 42_000; -const DEFAULT_NEW_UREF_COST: u32 = 17_000; -const DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT: u32 = 590; - -const DEFAULT_PRINT_COST: u32 = 20_000; -const DEFAULT_PRINT_TEXT_SIZE_WEIGHT: u32 = 4_600; - -const DEFAULT_PUT_KEY_COST: u32 = 38_000; -const DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT: u32 = 1_100; - -const DEFAULT_READ_HOST_BUFFER_COST: u32 = 3_500; -const DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT: u32 = 310; - -const DEFAULT_READ_VALUE_COST: u32 = 6_000; -const DEFAULT_READ_VALUE_LOCAL_COST: u32 = 5_500; -const DEFAULT_READ_VALUE_LOCAL_KEY_SIZE_WEIGHT: u32 = 590; - -const DEFAULT_REMOVE_ASSOCIATED_KEY_COST: u32 = 4_200; - -const DEFAULT_REMOVE_KEY_COST: u32 = 61_000; -const DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT: u32 = 3_200; - -const DEFAULT_RET_COST: u32 = 23_000; -const DEFAULT_RET_VALUE_SIZE_WEIGHT: u32 = 420; - -const DEFAULT_REVERT_COST: u32 = 500; -const DEFAULT_SET_ACTION_THRESHOLD_COST: u32 = 74_000; -const DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST: u32 = 160_000; -const DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST: u32 = 82_000; -const DEFAULT_TRANSFER_TO_ACCOUNT_COST: u32 = 24_000; -const DEFAULT_UPDATE_ASSOCIATED_KEY_COST: u32 = 4_200; - -const DEFAULT_WRITE_COST: u32 = 14_000; -const DEFAULT_WRITE_VALUE_SIZE_WEIGHT: u32 = 980; - -const DEFAULT_WRITE_LOCAL_COST: u32 = 9_500; -const DEFAULT_WRITE_LOCAL_KEY_BYTES_SIZE_WEIGHT: u32 = 1_800; -const DEFAULT_WRITE_LOCAL_VALUE_SIZE_WEIGHT: u32 = 520; - -/// Representation of a host function cost -/// -/// Total gas cost is equal to `cost` + sum of each argument weight multiplied by the byte size of -/// the data. -#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Debug, DataSize)] -pub struct HostFunction { - /// How much user is charged for cost only - cost: Cost, - arguments: T, -} - -impl Default for HostFunction -where - T: Default, -{ - fn default() -> Self { - HostFunction::new(DEFAULT_FIXED_COST, Default::default()) - } -} - -impl HostFunction { - pub fn new(cost: Cost, arguments: T) -> Self { - Self { cost, arguments } - } - - pub fn cost(&self) -> Cost { - self.cost - } -} - -impl HostFunction -where - T: Default, -{ - pub fn fixed(cost: Cost) -> Self { - Self { - cost, - ..Default::default() - } - } -} - -impl HostFunction -where - T: AsRef<[Cost]>, -{ - pub fn arguments(&self) -> &[Cost] { - self.arguments.as_ref() - } - - /// Calculate gas cost for a host function - pub fn calculate_gas_cost(&self, weights: T) -> Gas { - let mut gas = Gas::new(self.cost.into()); - for (argument, weight) in self.arguments.as_ref().iter().zip(weights.as_ref()) { - let lhs = Gas::new((*argument).into()); - let rhs = Gas::new((*weight).into()); - gas += lhs * rhs; - } - gas - } -} - -impl Distribution> for Standard -where - Standard: Distribution, - T: AsRef<[Cost]>, -{ - fn sample(&self, rng: &mut R) -> HostFunction { - let cost = rng.gen::(); - let arguments = rng.gen(); - HostFunction::new(cost, arguments) - } -} - -impl ToBytes for HostFunction -where - T: AsRef<[Cost]>, -{ - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - ret.append(&mut self.cost.to_bytes()?); - for value in self.arguments.as_ref().iter() { - ret.append(&mut value.to_bytes()?); - } - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.cost.serialized_length() + (COST_SERIALIZED_LENGTH * self.arguments.as_ref().len()) - } -} - -impl FromBytes for HostFunction -where - T: Default + AsMut<[Cost]>, -{ - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (cost, mut bytes) = FromBytes::from_bytes(bytes)?; - let mut arguments = T::default(); - let arguments_mut = arguments.as_mut(); - for ith_argument in arguments_mut { - let (cost, rem) = FromBytes::from_bytes(bytes)?; - *ith_argument = cost; - bytes = rem; - } - Ok((Self { cost, arguments }, bytes)) - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct HostFunctionCosts { - pub read_value: HostFunction<[Cost; 3]>, - pub read_value_local: HostFunction<[Cost; 3]>, - pub write: HostFunction<[Cost; 4]>, - pub write_local: HostFunction<[Cost; 4]>, - pub add: HostFunction<[Cost; 4]>, - pub new_uref: HostFunction<[Cost; 3]>, - pub load_named_keys: HostFunction<[Cost; 2]>, - pub ret: HostFunction<[Cost; 2]>, - pub get_key: HostFunction<[Cost; 5]>, - pub has_key: HostFunction<[Cost; 2]>, - pub put_key: HostFunction<[Cost; 4]>, - pub remove_key: HostFunction<[Cost; 2]>, - pub revert: HostFunction<[Cost; 1]>, - pub is_valid_uref: HostFunction<[Cost; 2]>, - pub add_associated_key: HostFunction<[Cost; 3]>, - pub remove_associated_key: HostFunction<[Cost; 2]>, - pub update_associated_key: HostFunction<[Cost; 3]>, - pub set_action_threshold: HostFunction<[Cost; 2]>, - pub get_caller: HostFunction<[Cost; 1]>, - pub get_blocktime: HostFunction<[Cost; 1]>, - pub create_purse: HostFunction<[Cost; 2]>, - pub transfer_to_account: HostFunction<[Cost; 7]>, - pub transfer_from_purse_to_account: HostFunction<[Cost; 9]>, - pub transfer_from_purse_to_purse: HostFunction<[Cost; 8]>, - pub get_balance: HostFunction<[Cost; 3]>, - pub get_phase: HostFunction<[Cost; 1]>, - pub get_system_contract: HostFunction<[Cost; 3]>, - pub get_main_purse: HostFunction<[Cost; 1]>, - pub read_host_buffer: HostFunction<[Cost; 3]>, - pub create_contract_package_at_hash: HostFunction<[Cost; 2]>, - pub create_contract_user_group: HostFunction<[Cost; 8]>, - pub add_contract_version: HostFunction<[Cost; 10]>, - pub disable_contract_version: HostFunction<[Cost; 4]>, - pub call_contract: HostFunction<[Cost; 7]>, - pub call_versioned_contract: HostFunction<[Cost; 9]>, - pub get_named_arg_size: HostFunction<[Cost; 3]>, - pub get_named_arg: HostFunction<[Cost; 4]>, - pub remove_contract_user_group: HostFunction<[Cost; 4]>, - pub provision_contract_user_group_uref: HostFunction<[Cost; 5]>, - pub remove_contract_user_group_urefs: HostFunction<[Cost; 6]>, - pub print: HostFunction<[Cost; 2]>, - pub blake2b: HostFunction<[Cost; 4]>, -} - -impl Default for HostFunctionCosts { - fn default() -> Self { - Self { - read_value: HostFunction::fixed(DEFAULT_READ_VALUE_COST), - read_value_local: HostFunction::new( - DEFAULT_READ_VALUE_LOCAL_COST, - [NOT_USED, DEFAULT_READ_VALUE_LOCAL_KEY_SIZE_WEIGHT, NOT_USED], - ), - write: HostFunction::new( - DEFAULT_WRITE_COST, - [ - NOT_USED, - NOT_USED, - NOT_USED, - DEFAULT_WRITE_VALUE_SIZE_WEIGHT, - ], - ), - write_local: HostFunction::new( - DEFAULT_WRITE_LOCAL_COST, - [ - NOT_USED, - DEFAULT_WRITE_LOCAL_KEY_BYTES_SIZE_WEIGHT, - NOT_USED, - DEFAULT_WRITE_LOCAL_VALUE_SIZE_WEIGHT, - ], - ), - add: HostFunction::fixed(DEFAULT_ADD_COST), - new_uref: HostFunction::new( - DEFAULT_NEW_UREF_COST, - [NOT_USED, NOT_USED, DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT], - ), - load_named_keys: HostFunction::fixed(DEFAULT_LOAD_NAMED_KEYS_COST), - ret: HostFunction::new(DEFAULT_RET_COST, [NOT_USED, DEFAULT_RET_VALUE_SIZE_WEIGHT]), - get_key: HostFunction::new( - DEFAULT_GET_KEY_COST, - [ - NOT_USED, - DEFAULT_GET_KEY_NAME_SIZE_WEIGHT, - NOT_USED, - NOT_USED, - NOT_USED, - ], - ), - has_key: HostFunction::new( - DEFAULT_HAS_KEY_COST, - [NOT_USED, DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT], - ), - put_key: HostFunction::new( - DEFAULT_PUT_KEY_COST, - [ - NOT_USED, - DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT, - NOT_USED, - NOT_USED, - ], - ), - remove_key: HostFunction::new( - DEFAULT_REMOVE_KEY_COST, - [NOT_USED, DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT], - ), - revert: HostFunction::fixed(DEFAULT_REVERT_COST), - is_valid_uref: HostFunction::fixed(DEFAULT_IS_VALID_UREF_COST), - add_associated_key: HostFunction::fixed(DEFAULT_ADD_ASSOCIATED_KEY_COST), - remove_associated_key: HostFunction::fixed(DEFAULT_REMOVE_ASSOCIATED_KEY_COST), - update_associated_key: HostFunction::fixed(DEFAULT_UPDATE_ASSOCIATED_KEY_COST), - set_action_threshold: HostFunction::fixed(DEFAULT_SET_ACTION_THRESHOLD_COST), - get_caller: HostFunction::fixed(DEFAULT_GET_CALLER_COST), - get_blocktime: HostFunction::fixed(DEFAULT_GET_BLOCKTIME_COST), - create_purse: HostFunction::fixed(DEFAULT_CREATE_PURSE_COST), - transfer_to_account: HostFunction::fixed(DEFAULT_TRANSFER_TO_ACCOUNT_COST), - transfer_from_purse_to_account: HostFunction::fixed( - DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST, - ), - transfer_from_purse_to_purse: HostFunction::fixed( - DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST, - ), - get_balance: HostFunction::fixed(DEFAULT_GET_BALANCE_COST), - get_phase: HostFunction::fixed(DEFAULT_GET_PHASE_COST), - get_system_contract: HostFunction::fixed(DEFAULT_GET_SYSTEM_CONTRACT_COST), - get_main_purse: HostFunction::fixed(DEFAULT_GET_MAIN_PURSE_COST), - read_host_buffer: HostFunction::new( - DEFAULT_READ_HOST_BUFFER_COST, - [ - NOT_USED, - DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT, - NOT_USED, - ], - ), - create_contract_package_at_hash: HostFunction::default(), - create_contract_user_group: HostFunction::default(), - add_contract_version: HostFunction::default(), - disable_contract_version: HostFunction::default(), - call_contract: HostFunction::new( - DEFAULT_CALL_CONTRACT_COST, - [ - NOT_USED, - NOT_USED, - NOT_USED, - NOT_USED, - NOT_USED, - DEFAULT_CALL_CONTRACT_ARGS_SIZE_WEIGHT, - NOT_USED, - ], - ), - call_versioned_contract: HostFunction::default(), - get_named_arg_size: HostFunction::default(), - get_named_arg: HostFunction::default(), - remove_contract_user_group: HostFunction::default(), - provision_contract_user_group_uref: HostFunction::default(), - remove_contract_user_group_urefs: HostFunction::default(), - print: HostFunction::new( - DEFAULT_PRINT_COST, - [NOT_USED, DEFAULT_PRINT_TEXT_SIZE_WEIGHT], - ), - blake2b: HostFunction::default(), - } - } -} - -impl ToBytes for HostFunctionCosts { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - ret.append(&mut self.read_value.to_bytes()?); - ret.append(&mut self.read_value_local.to_bytes()?); - ret.append(&mut self.write.to_bytes()?); - ret.append(&mut self.write_local.to_bytes()?); - ret.append(&mut self.add.to_bytes()?); - ret.append(&mut self.new_uref.to_bytes()?); - ret.append(&mut self.load_named_keys.to_bytes()?); - ret.append(&mut self.ret.to_bytes()?); - ret.append(&mut self.get_key.to_bytes()?); - ret.append(&mut self.has_key.to_bytes()?); - ret.append(&mut self.put_key.to_bytes()?); - ret.append(&mut self.remove_key.to_bytes()?); - ret.append(&mut self.revert.to_bytes()?); - ret.append(&mut self.is_valid_uref.to_bytes()?); - ret.append(&mut self.add_associated_key.to_bytes()?); - ret.append(&mut self.remove_associated_key.to_bytes()?); - ret.append(&mut self.update_associated_key.to_bytes()?); - ret.append(&mut self.set_action_threshold.to_bytes()?); - ret.append(&mut self.get_caller.to_bytes()?); - ret.append(&mut self.get_blocktime.to_bytes()?); - ret.append(&mut self.create_purse.to_bytes()?); - ret.append(&mut self.transfer_to_account.to_bytes()?); - ret.append(&mut self.transfer_from_purse_to_account.to_bytes()?); - ret.append(&mut self.transfer_from_purse_to_purse.to_bytes()?); - ret.append(&mut self.get_balance.to_bytes()?); - ret.append(&mut self.get_phase.to_bytes()?); - ret.append(&mut self.get_system_contract.to_bytes()?); - ret.append(&mut self.get_main_purse.to_bytes()?); - ret.append(&mut self.read_host_buffer.to_bytes()?); - ret.append(&mut self.create_contract_package_at_hash.to_bytes()?); - ret.append(&mut self.create_contract_user_group.to_bytes()?); - ret.append(&mut self.add_contract_version.to_bytes()?); - ret.append(&mut self.disable_contract_version.to_bytes()?); - ret.append(&mut self.call_contract.to_bytes()?); - ret.append(&mut self.call_versioned_contract.to_bytes()?); - ret.append(&mut self.get_named_arg_size.to_bytes()?); - ret.append(&mut self.get_named_arg.to_bytes()?); - ret.append(&mut self.remove_contract_user_group.to_bytes()?); - ret.append(&mut self.provision_contract_user_group_uref.to_bytes()?); - ret.append(&mut self.remove_contract_user_group_urefs.to_bytes()?); - ret.append(&mut self.print.to_bytes()?); - ret.append(&mut self.blake2b.to_bytes()?); - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.read_value.serialized_length() - + self.read_value_local.serialized_length() - + self.write.serialized_length() - + self.write_local.serialized_length() - + self.add.serialized_length() - + self.new_uref.serialized_length() - + self.load_named_keys.serialized_length() - + self.ret.serialized_length() - + self.get_key.serialized_length() - + self.has_key.serialized_length() - + self.put_key.serialized_length() - + self.remove_key.serialized_length() - + self.revert.serialized_length() - + self.is_valid_uref.serialized_length() - + self.add_associated_key.serialized_length() - + self.remove_associated_key.serialized_length() - + self.update_associated_key.serialized_length() - + self.set_action_threshold.serialized_length() - + self.get_caller.serialized_length() - + self.get_blocktime.serialized_length() - + self.create_purse.serialized_length() - + self.transfer_to_account.serialized_length() - + self.transfer_from_purse_to_account.serialized_length() - + self.transfer_from_purse_to_purse.serialized_length() - + self.get_balance.serialized_length() - + self.get_phase.serialized_length() - + self.get_system_contract.serialized_length() - + self.get_main_purse.serialized_length() - + self.read_host_buffer.serialized_length() - + self.create_contract_package_at_hash.serialized_length() - + self.create_contract_user_group.serialized_length() - + self.add_contract_version.serialized_length() - + self.disable_contract_version.serialized_length() - + self.call_contract.serialized_length() - + self.call_versioned_contract.serialized_length() - + self.get_named_arg_size.serialized_length() - + self.get_named_arg.serialized_length() - + self.remove_contract_user_group.serialized_length() - + self.provision_contract_user_group_uref.serialized_length() - + self.remove_contract_user_group_urefs.serialized_length() - + self.print.serialized_length() - + self.blake2b.serialized_length() - } -} - -impl FromBytes for HostFunctionCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (read_value, rem) = FromBytes::from_bytes(bytes)?; - let (read_value_local, rem) = FromBytes::from_bytes(rem)?; - let (write, rem) = FromBytes::from_bytes(rem)?; - let (write_local, rem) = FromBytes::from_bytes(rem)?; - let (add, rem) = FromBytes::from_bytes(rem)?; - let (new_uref, rem) = FromBytes::from_bytes(rem)?; - let (load_named_keys, rem) = FromBytes::from_bytes(rem)?; - let (ret, rem) = FromBytes::from_bytes(rem)?; - let (get_key, rem) = FromBytes::from_bytes(rem)?; - let (has_key, rem) = FromBytes::from_bytes(rem)?; - let (put_key, rem) = FromBytes::from_bytes(rem)?; - let (remove_key, rem) = FromBytes::from_bytes(rem)?; - let (revert, rem) = FromBytes::from_bytes(rem)?; - let (is_valid_uref, rem) = FromBytes::from_bytes(rem)?; - let (add_associated_key, rem) = FromBytes::from_bytes(rem)?; - let (remove_associated_key, rem) = FromBytes::from_bytes(rem)?; - let (update_associated_key, rem) = FromBytes::from_bytes(rem)?; - let (set_action_threshold, rem) = FromBytes::from_bytes(rem)?; - let (get_caller, rem) = FromBytes::from_bytes(rem)?; - let (get_blocktime, rem) = FromBytes::from_bytes(rem)?; - let (create_purse, rem) = FromBytes::from_bytes(rem)?; - let (transfer_to_account, rem) = FromBytes::from_bytes(rem)?; - let (transfer_from_purse_to_account, rem) = FromBytes::from_bytes(rem)?; - let (transfer_from_purse_to_purse, rem) = FromBytes::from_bytes(rem)?; - let (get_balance, rem) = FromBytes::from_bytes(rem)?; - let (get_phase, rem) = FromBytes::from_bytes(rem)?; - let (get_system_contract, rem) = FromBytes::from_bytes(rem)?; - let (get_main_purse, rem) = FromBytes::from_bytes(rem)?; - let (read_host_buffer, rem) = FromBytes::from_bytes(rem)?; - let (create_contract_package_at_hash, rem) = FromBytes::from_bytes(rem)?; - let (create_contract_user_group, rem) = FromBytes::from_bytes(rem)?; - let (add_contract_version, rem) = FromBytes::from_bytes(rem)?; - let (disable_contract_version, rem) = FromBytes::from_bytes(rem)?; - let (call_contract, rem) = FromBytes::from_bytes(rem)?; - let (call_versioned_contract, rem) = FromBytes::from_bytes(rem)?; - let (get_named_arg_size, rem) = FromBytes::from_bytes(rem)?; - let (get_named_arg, rem) = FromBytes::from_bytes(rem)?; - let (remove_contract_user_group, rem) = FromBytes::from_bytes(rem)?; - let (provision_contract_user_group_uref, rem) = FromBytes::from_bytes(rem)?; - let (remove_contract_user_group_urefs, rem) = FromBytes::from_bytes(rem)?; - let (print, rem) = FromBytes::from_bytes(rem)?; - let (blake2b, rem) = FromBytes::from_bytes(rem)?; - Ok(( - HostFunctionCosts { - read_value, - read_value_local, - write, - write_local, - add, - new_uref, - load_named_keys, - ret, - get_key, - has_key, - put_key, - remove_key, - revert, - is_valid_uref, - add_associated_key, - remove_associated_key, - update_associated_key, - set_action_threshold, - get_caller, - get_blocktime, - create_purse, - transfer_to_account, - transfer_from_purse_to_account, - transfer_from_purse_to_purse, - get_balance, - get_phase, - get_system_contract, - get_main_purse, - read_host_buffer, - create_contract_package_at_hash, - create_contract_user_group, - add_contract_version, - disable_contract_version, - call_contract, - call_versioned_contract, - get_named_arg_size, - get_named_arg, - remove_contract_user_group, - provision_contract_user_group_uref, - remove_contract_user_group_urefs, - print, - blake2b, - }, - rem, - )) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> HostFunctionCosts { - HostFunctionCosts { - read_value: rng.gen(), - read_value_local: rng.gen(), - write: rng.gen(), - write_local: rng.gen(), - add: rng.gen(), - new_uref: rng.gen(), - load_named_keys: rng.gen(), - ret: rng.gen(), - get_key: rng.gen(), - has_key: rng.gen(), - put_key: rng.gen(), - remove_key: rng.gen(), - revert: rng.gen(), - is_valid_uref: rng.gen(), - add_associated_key: rng.gen(), - remove_associated_key: rng.gen(), - update_associated_key: rng.gen(), - set_action_threshold: rng.gen(), - get_caller: rng.gen(), - get_blocktime: rng.gen(), - create_purse: rng.gen(), - transfer_to_account: rng.gen(), - transfer_from_purse_to_account: rng.gen(), - transfer_from_purse_to_purse: rng.gen(), - get_balance: rng.gen(), - get_phase: rng.gen(), - get_system_contract: rng.gen(), - get_main_purse: rng.gen(), - read_host_buffer: rng.gen(), - create_contract_package_at_hash: rng.gen(), - create_contract_user_group: rng.gen(), - add_contract_version: rng.gen(), - disable_contract_version: rng.gen(), - call_contract: rng.gen(), - call_versioned_contract: rng.gen(), - get_named_arg_size: rng.gen(), - get_named_arg: rng.gen(), - remove_contract_user_group: rng.gen(), - provision_contract_user_group_uref: rng.gen(), - remove_contract_user_group_urefs: rng.gen(), - print: rng.gen(), - blake2b: rng.gen(), - } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::prelude::*; - - use super::{Cost, HostFunction, HostFunctionCosts}; - - pub fn host_function_cost_arb() -> impl Strategy> { - (any::(), any::()).prop_map(|(cost, arguments)| HostFunction::new(cost, arguments)) - } - - prop_compose! { - pub fn host_function_costs_arb() ( - read_value in host_function_cost_arb(), - read_value_local in host_function_cost_arb(), - write in host_function_cost_arb(), - write_local in host_function_cost_arb(), - add in host_function_cost_arb(), - new_uref in host_function_cost_arb(), - load_named_keys in host_function_cost_arb(), - ret in host_function_cost_arb(), - get_key in host_function_cost_arb(), - has_key in host_function_cost_arb(), - put_key in host_function_cost_arb(), - remove_key in host_function_cost_arb(), - revert in host_function_cost_arb(), - is_valid_uref in host_function_cost_arb(), - add_associated_key in host_function_cost_arb(), - remove_associated_key in host_function_cost_arb(), - update_associated_key in host_function_cost_arb(), - set_action_threshold in host_function_cost_arb(), - get_caller in host_function_cost_arb(), - get_blocktime in host_function_cost_arb(), - create_purse in host_function_cost_arb(), - transfer_to_account in host_function_cost_arb(), - transfer_from_purse_to_account in host_function_cost_arb(), - transfer_from_purse_to_purse in host_function_cost_arb(), - get_balance in host_function_cost_arb(), - get_phase in host_function_cost_arb(), - get_system_contract in host_function_cost_arb(), - get_main_purse in host_function_cost_arb(), - read_host_buffer in host_function_cost_arb(), - create_contract_package_at_hash in host_function_cost_arb(), - create_contract_user_group in host_function_cost_arb(), - add_contract_version in host_function_cost_arb(), - disable_contract_version in host_function_cost_arb(), - call_contract in host_function_cost_arb(), - call_versioned_contract in host_function_cost_arb(), - get_named_arg_size in host_function_cost_arb(), - get_named_arg in host_function_cost_arb(), - remove_contract_user_group in host_function_cost_arb(), - provision_contract_user_group_uref in host_function_cost_arb(), - remove_contract_user_group_urefs in host_function_cost_arb(), - print in host_function_cost_arb(), - blake2b in host_function_cost_arb(), - ) -> HostFunctionCosts { - HostFunctionCosts { - read_value, - read_value_local, - write, - write_local, - add, - new_uref, - load_named_keys, - ret, - get_key, - has_key, - put_key, - remove_key, - revert, - is_valid_uref, - add_associated_key, - remove_associated_key, - update_associated_key, - set_action_threshold, - get_caller, - get_blocktime, - create_purse, - transfer_to_account, - transfer_from_purse_to_account, - transfer_from_purse_to_purse, - get_balance, - get_phase, - get_system_contract, - get_main_purse, - read_host_buffer, - create_contract_package_at_hash, - create_contract_user_group, - add_contract_version, - disable_contract_version, - call_contract, - call_versioned_contract, - get_named_arg_size, - get_named_arg, - remove_contract_user_group, - provision_contract_user_group_uref, - remove_contract_user_group_urefs, - print, - blake2b, - } - } - } -} - -#[cfg(test)] -mod tests { - use casper_types::U512; - - use super::*; - - const COST: Cost = 42; - const ARGUMENT_COSTS: [Cost; 3] = [123, 456, 789]; - const WEIGHTS: [Cost; 3] = [1000, 1100, 1200]; - - #[test] - fn calculate_gas_cost_for_host_function() { - let host_function = HostFunction::new(COST, ARGUMENT_COSTS); - let expected_cost = COST - + (ARGUMENT_COSTS[0] * WEIGHTS[0]) - + (ARGUMENT_COSTS[1] * WEIGHTS[1]) - + (ARGUMENT_COSTS[2] * WEIGHTS[2]); - assert_eq!( - host_function.calculate_gas_cost(WEIGHTS), - Gas::new(expected_cost.into()) - ); - } - - #[test] - fn calculate_gas_cost_would_overflow() { - let large_value = Cost::max_value(); - - let host_function = HostFunction::new( - large_value, - [large_value, large_value, large_value, large_value], - ); - - let lhs = - host_function.calculate_gas_cost([large_value, large_value, large_value, large_value]); - - let large_value = U512::from(large_value); - let rhs = large_value + (U512::from(4) * large_value * large_value); - - assert_eq!(lhs, Gas::new(rhs)); - } -} - -#[cfg(test)] -mod proptests { - use proptest::prelude::*; - - use casper_types::bytesrepr; - - use super::*; - - type Signature = [Cost; 10]; - - proptest! { - #[test] - fn test_host_function(host_function in gens::host_function_cost_arb::()) { - bytesrepr::test_serialization_roundtrip(&host_function); - } - - #[test] - fn test_host_function_costs(host_function_costs in gens::host_function_costs_arb()) { - bytesrepr::test_serialization_roundtrip(&host_function_costs); - } - } -} diff --git a/execution_engine/src/shared/logging/README.md b/execution_engine/src/shared/logging/README.md deleted file mode 100644 index b54beeb649..0000000000 --- a/execution_engine/src/shared/logging/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# Logging - -## General - -The `logging` module provides the ability to log messages from any Casper crate to `stdout` using the canonical -macros from the [`log` crate](https://crates.io/crates/log). - -It also provides functions to allow logging messages with properties attached for the purpose of structured logging and -integration with tools like [Prometheus](https://prometheus.io/). - -Logging can be initialized to support outputting metrics, regardless of the chosen log-level, and can also be set to -display messages in a human-readable format or a hybrid structured one, with each line containing a human-readable -component followed by JSON formatted details. - -## Usage - -#### In libraries - -Libraries should link only to the `log` crate, and use the provided macros to log whatever information will be useful to -downstream consumers. - -#### In executables - -Logging can be initialized using the [`initialize()`][initialize] function, and should be done early in the runtime of -the program. - -#### In tests - -Logging can also be initialized early in a test's execution. Note that constructing a -[`TestContextBuilder`][TestContextBuilder] will automatically enable logging at warn-level in the human-readable -format. To avoid this, call [`initialize()`][initialize] with required settings before constructing the first -`TestContextBuilder`. - -Bear in mind that by default tests are run in parallel on multiple threads, so initializing logging might need to be -done in several or all of the tests in a single binary. - -## Metrics - -The structured log messages output via [`log_metric()`][log_metric] or [`log_duration()`][log_duration] can be -parsed and read by the [`casper-engine-metrics-scraper`][scraper]. - -This tool reads from `stdin`, extracts the "time-series-data" from the log messages' properties and makes the values -available via a `GET` endpoint. - - -[initialize]: https://docs.rs/casper-engine-shared/latest/casper_engine_shared/logging/fn.initialize.html -[log_metric]: https://docs.rs/casper-engine-shared/latest/casper_engine_shared/logging/fn.log_metric.html -[log_duration]: https://docs.rs/casper-engine-shared/latest/casper_engine_shared/logging/fn.log_duration.html -[TestContextBuilder]: https://docs.rs/casper-engine-test-support/latest/casper_engine_test_support/struct.TestContextBuilder.html -[scraper]: https://github.com/CasperLabs/CasperLabs/tree/master/execution-engine/engine-metrics-scraper diff --git a/execution_engine/src/shared/logging/mod.rs b/execution_engine/src/shared/logging/mod.rs deleted file mode 100644 index b076c81a4e..0000000000 --- a/execution_engine/src/shared/logging/mod.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! A logger implementation which outputs log messages from Casper crates to the terminal. - -mod settings; -mod structured_message; -mod terminal_logger; - -use std::collections::BTreeMap; - -use log::{self, Level, LevelFilter, Log, SetLoggerError}; - -pub use self::terminal_logger::TerminalLogger; -pub use settings::{Settings, Style}; - -#[doc(hidden)] -pub const PAYLOAD_KEY: &str = "payload="; -pub(crate) const METRIC_METADATA_TARGET: &str = "METRIC"; -pub(crate) const CASPER_METADATA_TARGET: &str = "casper_"; -pub(crate) const MESSAGE_TEMPLATE_KEY: &str = "message_template"; -pub(crate) const DEFAULT_MESSAGE_TEMPLATE: &str = "{message}"; -pub(crate) const DEFAULT_MESSAGE_KEY: &str = "message"; - -/// Initializes the global logger using the given settings. -/// -/// The logger will write all log messages from crates prefixed with "casper_" to stdout, and -/// can also log internal metrics generated by the Execution Engine. -/// -/// Returns an error if the global logger has already been set in this process. -pub fn initialize(settings: Settings) -> Result<(), SetLoggerError> { - let logger = Box::new(TerminalLogger::new(&settings)); - initialize_with_logger(logger, settings) -} - -/// This and the `TerminalLogger` are public but undocumented to allow functional testing of this -/// crate, e.g. by passing a logger composed of a `TerminalLogger`. -#[doc(hidden)] -pub fn initialize_with_logger( - logger: Box, - settings: Settings, -) -> Result<(), SetLoggerError> { - if settings.max_level() == LevelFilter::Off && !settings.enable_metrics() { - // No logging required - return Ok(()); - } - - log::set_boxed_logger(logger)?; - log::set_max_level(settings.max_level()); - Ok(()) -} - -/// Logs a message using the given format and properties. -/// -/// # Arguments -/// -/// * `log_level` - log level of the message to be logged -/// * `message_format` - a message template to apply over properties by key -/// * `properties` - a collection of machine readable key / value properties which will be logged -#[inline] -pub fn log_details( - _log_level: Level, - _message_format: String, - _properties: BTreeMap<&str, String>, -) { - // TODO: Metrics story https://casperlabs.atlassian.net/browse/NDRS-120 -} - -/// Logs the metrics associated with the specified host function. -pub fn log_host_function_metrics(_host_function: &str, _properties: BTreeMap<&str, String>) { - // TODO: Metrics story https://casperlabs.atlassian.net/browse/NDRS-120 -} diff --git a/execution_engine/src/shared/logging/settings.rs b/execution_engine/src/shared/logging/settings.rs deleted file mode 100644 index 8fa41cafd1..0000000000 --- a/execution_engine/src/shared/logging/settings.rs +++ /dev/null @@ -1,63 +0,0 @@ -use log::LevelFilter; - -/// Settings used to initialize the global logger. -#[derive(Clone, Copy, Debug)] -pub struct Settings { - max_level: LevelFilter, - enable_metrics: bool, - style: Style, -} - -impl Settings { - /// Constructs new `Settings`, where `max_level` sets the verbosity level above which messages - /// will be filtered out. - /// - /// `Off` is the lowest level, through `Error`, `Warn`, `Info`, `Debug` to `Trace` at the - /// highest level. - /// - /// By default, logging of metrics is disabled (see - /// [`with_metrics_enabled()`](Settings::with_metrics_enabled)), and the logging-style is set - /// to [`Style::Structured`]. - pub fn new(max_level: LevelFilter) -> Self { - Settings { - max_level, - enable_metrics: false, - style: Style::Structured, - } - } - - /// If `true`, log messages created via `log_metric()` and - /// `log_duration()` are logged, regardless of the log-level. - pub fn with_metrics_enabled(mut self, value: bool) -> Self { - self.enable_metrics = value; - self - } - - /// Sets the logging style to structured or human-readable. - pub fn with_style(mut self, value: Style) -> Self { - self.style = value; - self - } - - pub(crate) fn max_level(&self) -> LevelFilter { - self.max_level - } - - pub(crate) fn enable_metrics(&self) -> bool { - self.enable_metrics - } - - pub(crate) fn style(&self) -> Style { - self.style - } -} - -/// The style of generated log messages. -#[derive(Clone, Copy, Debug)] -pub enum Style { - /// Hybrid structured log-messages, with a human-readable component followed by JSON formatted - /// details. - Structured, - /// Human-readable log-messages. - HumanReadable, -} diff --git a/execution_engine/src/shared/logging/structured_message.rs b/execution_engine/src/shared/logging/structured_message.rs deleted file mode 100644 index eb797e7716..0000000000 --- a/execution_engine/src/shared/logging/structured_message.rs +++ /dev/null @@ -1,455 +0,0 @@ -use std::{ - collections::BTreeMap, - env, - fmt::{self, Display, Formatter}, - process, -}; - -use chrono::{DateTime, SecondsFormat, Utc}; -use log::kv::{self, Key, Value, Visitor}; -use once_cell::sync::Lazy; -use serde::{Serialize, Serializer}; - -use casper_types::SemVer; - -use crate::shared::{ - logging::{DEFAULT_MESSAGE_TEMPLATE, MESSAGE_TEMPLATE_KEY}, - utils, -}; - -static PROCESS_ID: Lazy = Lazy::new(process::id); -static PROCESS_NAME: Lazy = Lazy::new(|| { - env::current_exe() - .ok() - .and_then(|full_path| { - full_path - .file_stem() - .map(|file_stem| file_stem.to_string_lossy().to_string()) - }) - .unwrap_or_else(|| "unknown-process".to_string()) -}); -static HOST_NAME: Lazy = Lazy::new(|| { - hostname::get() - .map(|host_name| host_name.to_string_lossy().to_string()) - .unwrap_or_else(|_| "unknown-host".to_string()) -}); -static MESSAGE_TYPE: Lazy = Lazy::new(|| "ee-structured".to_string()); -static MESSAGE_TYPE_VERSION: Lazy = Lazy::new(MessageTypeVersion::default); - -/// container for log message data -#[derive(Clone, Debug, Serialize)] -pub(crate) struct StructuredMessage { - timestamp: TimestampRfc3999, - process_id: u32, - process_name: String, - host_name: String, - log_level: String, - priority: Priority, - message_type: String, - message_type_version: MessageTypeVersion, - message_id: MessageId, - description: String, - properties: MessageProperties, -} - -impl StructuredMessage { - pub fn new(log_level: String, message_id: MessageId, properties: MessageProperties) -> Self { - let timestamp = TimestampRfc3999::default(); - let process_id = *PROCESS_ID; - let process_name = PROCESS_NAME.clone(); - let host_name = HOST_NAME.clone(); - let priority = Priority::from(log_level.as_str()); - let message_type = MESSAGE_TYPE.clone(); - let message_type_version = *MESSAGE_TYPE_VERSION; - let description = properties.get_formatted_message(); - - StructuredMessage { - timestamp, - process_id, - process_name, - host_name, - log_level, - priority, - message_type, - message_type_version, - message_id, - description, - properties, - } - } -} - -impl Display for StructuredMessage { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - let json = utils::jsonify(self, false); - write!( - formatter, - "{timestamp} {loglevel} {priority} {hostname} {facility} payload={payload}", - timestamp = self.timestamp, - loglevel = self.log_level.to_string().to_uppercase(), - priority = self.priority, - hostname = self.host_name, - facility = self.process_name, - payload = json - ) - } -} - -/// newtype to encapsulate log level priority -#[derive(Clone, Copy, Debug, Hash, Serialize)] -struct Priority(u8); - -impl From<&str> for Priority { - fn from(level: &str) -> Self { - match level { - "Error" => Priority(3), - "Warn" => Priority(4), - "Info" => Priority(5), - "Debug" => Priority(6), - "Metric" => Priority(6), - "Trace" => Priority(7), - _ => Priority(255), - } - } -} - -impl Display for Priority { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - self.0.fmt(formatter) - } -} - -#[derive(Debug, Copy, Clone)] -struct MessageTypeVersion(SemVer); - -impl Display for MessageTypeVersion { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - self.0.fmt(formatter) - } -} - -impl Default for MessageTypeVersion { - fn default() -> Self { - MessageTypeVersion(SemVer::V1_0_0) - } -} - -impl Serialize for MessageTypeVersion { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let s = format!("{}.{}.{}", self.0.major, self.0.minor, self.0.patch); - serializer.serialize_str(&s) - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Serialize)] -pub(crate) struct MessageId(usize); - -impl MessageId { - pub fn new(id: usize) -> MessageId { - MessageId(id) - } -} - -/// newtype for Rfc3999 formatted timestamp -#[derive(Clone, Debug, Hash, Serialize)] -pub(crate) struct TimestampRfc3999(String); - -impl Default for TimestampRfc3999 { - fn default() -> Self { - let now: DateTime = Utc::now(); - TimestampRfc3999(now.to_rfc3339_opts(SecondsFormat::Millis, true)) - } -} - -impl Display for TimestampRfc3999 { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - self.0.fmt(formatter) - } -} - -#[derive(Clone, Debug, Hash, Serialize)] -pub(crate) struct MessageProperties(BTreeMap); - -impl MessageProperties { - pub fn new(mut properties: BTreeMap) -> MessageProperties { - // add the default message template ("message_template", "{message}") if the template key - // doesn't already exist. - properties - .entry(MESSAGE_TEMPLATE_KEY.to_string()) - .or_insert_with(|| DEFAULT_MESSAGE_TEMPLATE.to_string()); - MessageProperties(properties) - } - - pub fn insert(&mut self, key: String, value: String) -> Option { - self.0.insert(key, value) - } - - /// strips out brace encased motes in message_template - /// and applies them as candidate keys for the encapsulated collection of - /// message properties. the underlying value of any candidate key that - /// has an entry in the collection will be spliced into the output in - /// the place of its corresponding brace encased candidate key - pub fn get_formatted_message(&self) -> String { - let message_template = match self.0.get(MESSAGE_TEMPLATE_KEY) { - Some(message_template) if !message_template.is_empty() => message_template, - _ => return String::new(), - }; - - let mut buf = String::new(); - let mut candidate_key = String::new(); - - let mut key_seek = false; - let properties = &self.0; - - for c in message_template.chars() { - match c { - '{' => { - key_seek = true; - candidate_key.clear(); - } - '}' if key_seek => { - key_seek = false; - if let Some(v) = properties.get(&candidate_key) { - buf.push_str(v); - } - } - '}' => (), - c if key_seek => candidate_key.push(c), - c => buf.push(c), - } - } - buf - } -} - -impl Default for MessageProperties { - fn default() -> Self { - MessageProperties::new(BTreeMap::new()) - } -} - -/// This impl allows us to populate a `MessageProperties` map from a log `Record::key_values()`. -impl<'kvs> Visitor<'kvs> for MessageProperties { - fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), kv::Error> { - // The value was generated via the Debug impl, i.e. it has been wrapped in quotation marks - // and inner chars have been escaped as required. Undo this for passing to `jsonify`, or we - // get double-escaped chars. - let value = value - .to_string() - .trim_matches('"') - .replace(r#"\'"#, r#"'"#) - .replace(r#"\""#, r#"""#) - .replace(r#"\\"#, r#"\"#); - self.0.insert(key.to_string(), value); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::shared::logging::DEFAULT_MESSAGE_KEY; - - #[test] - fn should_get_process_id() { - assert!( - *PROCESS_ID != 0, - "PROCESS_ID should not be 0: {}", - *PROCESS_ID - ); - } - - #[test] - fn should_get_process_name() { - assert!(!PROCESS_NAME.is_empty(), "PROCESS_NAME should have chars") - } - - #[test] - fn should_get_host_name() { - assert!(!HOST_NAME.is_empty(), "HOST_NAME should have chars") - } - - #[test] - fn should_format_message_template_default_use_case() { - let mut properties: BTreeMap = BTreeMap::new(); - properties.insert( - DEFAULT_MESSAGE_KEY.to_string(), - "i am a log message".to_string(), - ); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!( - formatted, - "i am a log message".to_string(), - "message malformed" - ) - } - - #[test] - fn should_format_message_template_starting_and_ending_with_braces() { - let mut properties: BTreeMap = BTreeMap::new(); - properties.insert( - DEFAULT_MESSAGE_KEY.to_string(), - "i convey meaning".to_string(), - ); - properties.insert("abc".to_string(), "some text".to_string()); - properties.insert("some-hash".to_string(), "A@#$!@#".to_string()); - properties.insert("byz".to_string(), "".to_string()); - let template = - "{abc} i'm a message temp{byz}late some-hash:{some-hash} msg:{message}".to_string(); - properties.insert(MESSAGE_TEMPLATE_KEY.to_string(), template); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!( - formatted, - "some text i\'m a message template some-hash:A@#$!@# msg:i convey meaning".to_string(), - "message malformed" - ) - } - - #[test] - fn should_format_message_template_with_escaped_braces() { - let mut properties: BTreeMap = BTreeMap::new(); - properties.insert(DEFAULT_MESSAGE_KEY.to_string(), "a message".to_string()); - properties.insert("more-data".to_string(), "some additional data".to_string()); - let template = "this is {{message}} with {{{more-data}}}".to_string(); - properties.insert(MESSAGE_TEMPLATE_KEY.to_string(), template); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!( - formatted, - "this is a message with some additional data".to_string(), - "message malformed" - ) - } - - #[test] - fn should_format_message_template_with_no_properties() { - let properties: BTreeMap = BTreeMap::new(); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!(formatted, "".to_string(), "message malformed") - } - - #[test] - fn should_format_message_template_with_unclosed_brace() { - let mut properties: BTreeMap = BTreeMap::new(); - let template = "{message".to_string(); - properties.insert(MESSAGE_TEMPLATE_KEY.to_string(), template); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!(formatted, "".to_string(), "message malformed") - } - - #[test] - fn should_format_message_template_with_unopened_brace() { - let mut properties: BTreeMap = BTreeMap::new(); - let template = "message}".to_string(); - properties.insert(MESSAGE_TEMPLATE_KEY.to_string(), template); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!(formatted, "message".to_string(), "message malformed") - } - - #[test] - fn should_format_message_template_with_mismatched_braces_left() { - let mut properties: BTreeMap = BTreeMap::new(); - let template = "{{message}".to_string(); - properties.insert(MESSAGE_TEMPLATE_KEY.to_string(), template); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!(formatted, "".to_string(), "message malformed") - } - - #[test] - fn should_format_message_template_with_mismatched_braces_right() { - let mut properties: BTreeMap = BTreeMap::new(); - let template = "{message}}".to_string(); - properties.insert(MESSAGE_TEMPLATE_KEY.to_string(), template); - - let props = MessageProperties::new(properties); - - let formatted = props.get_formatted_message(); - - assert_eq!(formatted, "".to_string(), "message malformed") - } - - #[test] - fn should_validate_log_message() { - let test_msg = "test_message".to_string(); - - let mut properties = MessageProperties::default(); - properties.insert(DEFAULT_MESSAGE_KEY.to_string(), test_msg); - - let l = StructuredMessage::new("Error".to_string(), MessageId::new(1), properties); - - assert!( - should_have_rfc3339_timestamp(&l), - "rfc3339 timestamp required" - ); - - assert!(should_have_log_level(&l), "log level required"); - assert!(should_have_process_id(&l), "process id required"); - assert!(should_have_process_name(&l), "process name required"); - assert!(should_have_host_name(&l), "host name required"); - assert!(should_have_at_least_one_property(&l), "properties required"); - assert!(should_have_description(&l), "description required"); - } - - fn should_have_rfc3339_timestamp(l: &StructuredMessage) -> bool { - // ISO 8601 / RFC 3339 - // rfc3339 = "YYYY-MM-DDTHH:mm:ss+00:00" - match DateTime::parse_from_rfc3339(&l.timestamp.0) { - Ok(_d) => true, - Err(_) => false, - } - } - - fn should_have_log_level(l: &StructuredMessage) -> bool { - !l.log_level.is_empty() - } - - fn should_have_description(l: &StructuredMessage) -> bool { - !l.description.is_empty() - } - - fn should_have_process_id(l: &StructuredMessage) -> bool { - l.process_id > 0 - } - - fn should_have_process_name(l: &StructuredMessage) -> bool { - !l.process_name.is_empty() - } - - fn should_have_host_name(l: &StructuredMessage) -> bool { - !l.host_name.is_empty() - } - - fn should_have_at_least_one_property(l: &StructuredMessage) -> bool { - !l.properties.0.is_empty() - } -} diff --git a/execution_engine/src/shared/logging/terminal_logger.rs b/execution_engine/src/shared/logging/terminal_logger.rs deleted file mode 100644 index ef494b2133..0000000000 --- a/execution_engine/src/shared/logging/terminal_logger.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -use log::{Level, LevelFilter, Log, Metadata, Record}; - -use crate::shared::logging::{ - structured_message::{MessageId, MessageProperties, StructuredMessage, TimestampRfc3999}, - Settings, Style, CASPER_METADATA_TARGET, DEFAULT_MESSAGE_KEY, METRIC_METADATA_TARGET, -}; - -#[doc(hidden)] -/// Logs messages from targets with prefix "casper_" or "METRIC" to stdout. -pub struct TerminalLogger { - max_level: LevelFilter, - metrics_enabled: bool, - style: Style, - next_message_id: AtomicUsize, -} - -impl TerminalLogger { - pub fn new(settings: &Settings) -> Self { - TerminalLogger { - max_level: settings.max_level(), - metrics_enabled: settings.enable_metrics(), - style: settings.style(), - next_message_id: AtomicUsize::new(0), - } - } - - pub fn prepare_log_line(&self, record: &Record) -> Option { - if !self.enabled(&record.metadata()) { - return None; - } - - let mut properties = MessageProperties::default(); - let _ = record.key_values().visit(&mut properties); - - let log_line = match self.style { - Style::Structured => { - if record.key_values().count() == 0 { - properties.insert( - DEFAULT_MESSAGE_KEY.to_string(), - format!("{}", record.args()), - ); - } - - let message_id = - MessageId::new(self.next_message_id.fetch_add(1, Ordering::SeqCst)); - let structured_message = StructuredMessage::new( - level_to_str(record).to_string(), - message_id, - properties, - ); - format!("{}", structured_message) - } - Style::HumanReadable => { - let formatted_properties = properties.get_formatted_message(); - let msg = format!("{}", record.args()); - format!( - "{timestamp} {level} [{file}:{line}] {msg}{space}{formatted_properties}", - timestamp = TimestampRfc3999::default(), - level = level_to_str(&record).to_uppercase(), - file = record.file().unwrap_or("unknown-file"), - line = record.line().unwrap_or_default(), - msg = msg, - space = if formatted_properties.is_empty() || msg.is_empty() { - "" - } else { - " " - }, - formatted_properties = formatted_properties - ) - } - }; - - Some(log_line) - } -} - -impl Log for TerminalLogger { - fn enabled(&self, metadata: &Metadata) -> bool { - // If the target starts "casper_" it's either come from a log macro in one of our - // crates, or via `logging::log_details`. In this case, check the level. - (metadata.target().starts_with(CASPER_METADATA_TARGET) - && metadata.level() <= self.max_level) - // Otherwise, check if the target is "METRIC" and if we have metric logging enabled. - || (self.metrics_enabled && metadata.target() == METRIC_METADATA_TARGET) - } - - fn log(&self, record: &Record) { - if let Some(log_line) = self.prepare_log_line(record) { - println!("{}", log_line); - } - } - - fn flush(&self) {} -} - -fn level_to_str<'a>(record: &'a Record) -> &'a str { - if record.target() == METRIC_METADATA_TARGET { - return "Metric"; - } - - match record.level() { - Level::Trace => "Trace", - Level::Debug => "Debug", - Level::Info => "Info", - Level::Warn => "Warn", - Level::Error => "Error", - } -} diff --git a/execution_engine/src/shared/motes.rs b/execution_engine/src/shared/motes.rs deleted file mode 100644 index 07355ca189..0000000000 --- a/execution_engine/src/shared/motes.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::{fmt, iter::Sum}; - -use datasize::DataSize; -use num::Zero; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - U512, -}; - -use crate::shared::gas::Gas; - -#[derive( - DataSize, Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize, -)] -pub struct Motes(U512); - -impl Motes { - pub fn new(value: U512) -> Motes { - Motes(value) - } - - pub fn checked_add(&self, rhs: Self) -> Option { - self.0.checked_add(rhs.value()).map(Self::new) - } - - pub fn value(&self) -> U512 { - self.0 - } - - pub fn from_gas(gas: Gas, conv_rate: u64) -> Option { - gas.value() - .checked_mul(U512::from(conv_rate)) - .map(Self::new) - } -} - -impl fmt::Display for Motes { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self.0) - } -} - -impl std::ops::Add for Motes { - type Output = Motes; - - fn add(self, rhs: Self) -> Self::Output { - let val = self.value() + rhs.value(); - Motes::new(val) - } -} - -impl std::ops::Sub for Motes { - type Output = Motes; - - fn sub(self, rhs: Self) -> Self::Output { - let val = self.value() - rhs.value(); - Motes::new(val) - } -} - -impl std::ops::Div for Motes { - type Output = Motes; - - fn div(self, rhs: Self) -> Self::Output { - let val = self.value() / rhs.value(); - Motes::new(val) - } -} - -impl std::ops::Mul for Motes { - type Output = Motes; - - fn mul(self, rhs: Self) -> Self::Output { - let val = self.value() * rhs.value(); - Motes::new(val) - } -} - -impl Zero for Motes { - fn zero() -> Self { - Motes::new(U512::zero()) - } - - fn is_zero(&self) -> bool { - self.0.is_zero() - } -} - -impl Sum for Motes { - fn sum>(iter: I) -> Self { - iter.fold(Motes::zero(), std::ops::Add::add) - } -} - -impl ToBytes for Motes { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for Motes { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (value, remainder) = FromBytes::from_bytes(bytes)?; - Ok((Motes::new(value), remainder)) - } -} - -#[cfg(test)] -mod tests { - use casper_types::U512; - - use crate::shared::{gas::Gas, motes::Motes}; - - #[test] - fn should_be_able_to_get_instance_of_motes() { - let initial_value = 1; - let motes = Motes::new(U512::from(initial_value)); - assert_eq!( - initial_value, - motes.value().as_u64(), - "should have equal value" - ) - } - - #[test] - fn should_be_able_to_compare_two_instances_of_motes() { - let left_motes = Motes::new(U512::from(1)); - let right_motes = Motes::new(U512::from(1)); - assert_eq!(left_motes, right_motes, "should be equal"); - let right_motes = Motes::new(U512::from(2)); - assert_ne!(left_motes, right_motes, "should not be equal") - } - - #[test] - fn should_be_able_to_add_two_instances_of_motes() { - let left_motes = Motes::new(U512::from(1)); - let right_motes = Motes::new(U512::from(1)); - let expected_motes = Motes::new(U512::from(2)); - assert_eq!( - (left_motes + right_motes), - expected_motes, - "should be equal" - ) - } - - #[test] - fn should_be_able_to_subtract_two_instances_of_motes() { - let left_motes = Motes::new(U512::from(1)); - let right_motes = Motes::new(U512::from(1)); - let expected_motes = Motes::new(U512::from(0)); - assert_eq!( - (left_motes - right_motes), - expected_motes, - "should be equal" - ) - } - - #[test] - fn should_be_able_to_multiply_two_instances_of_motes() { - let left_motes = Motes::new(U512::from(100)); - let right_motes = Motes::new(U512::from(10)); - let expected_motes = Motes::new(U512::from(1000)); - assert_eq!( - (left_motes * right_motes), - expected_motes, - "should be equal" - ) - } - - #[test] - fn should_be_able_to_divide_two_instances_of_motes() { - let left_motes = Motes::new(U512::from(1000)); - let right_motes = Motes::new(U512::from(100)); - let expected_motes = Motes::new(U512::from(10)); - assert_eq!( - (left_motes / right_motes), - expected_motes, - "should be equal" - ) - } - - #[test] - fn should_be_able_to_convert_from_motes() { - let gas = Gas::new(U512::from(100)); - let motes = Motes::from_gas(gas, 10).expect("should have value"); - let expected_motes = Motes::new(U512::from(1000)); - assert_eq!(motes, expected_motes, "should be equal") - } - - #[test] - fn should_be_able_to_default() { - let motes = Motes::default(); - let expected_motes = Motes::new(U512::from(0)); - assert_eq!(motes, expected_motes, "should be equal") - } - - #[test] - fn should_be_able_to_compare_relative_value() { - let left_motes = Motes::new(U512::from(100)); - let right_motes = Motes::new(U512::from(10)); - assert!(left_motes > right_motes, "should be gt"); - let right_motes = Motes::new(U512::from(100)); - assert!(left_motes >= right_motes, "should be gte"); - assert!(left_motes <= right_motes, "should be lte"); - let left_motes = Motes::new(U512::from(10)); - assert!(left_motes < right_motes, "should be lt"); - } - - #[test] - fn should_default() { - let left_motes = Motes::new(U512::from(0)); - let right_motes = Motes::default(); - assert_eq!(left_motes, right_motes, "should be equal"); - let u512 = U512::zero(); - assert_eq!(left_motes.value(), u512, "should be equal"); - } - - #[test] - fn should_support_checked_mul_from_gas() { - let gas = Gas::new(U512::MAX); - let conv_rate = 10; - let maybe = Motes::from_gas(gas, conv_rate); - assert!(maybe.is_none(), "should be none due to overflow"); - } -} diff --git a/execution_engine/src/shared/newtypes/blake2b256.rs b/execution_engine/src/shared/newtypes/blake2b256.rs deleted file mode 100644 index 82cc49e58a..0000000000 --- a/execution_engine/src/shared/newtypes/blake2b256.rs +++ /dev/null @@ -1,117 +0,0 @@ -/// The number of bytes in a Blake2b hash -use std::{array::TryFromSliceError, convert::TryFrom}; - -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -/// Represents a 32-byte BLAKE2b hash digest -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize)] -pub struct Blake2bHash([u8; Blake2bHash::LENGTH]); - -impl Blake2bHash { - pub const LENGTH: usize = 32; - - /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data - pub fn new(data: &[u8]) -> Self { - let mut ret = [0u8; Blake2bHash::LENGTH]; - // NOTE: Safe to unwrap here because our digest length is constant and valid - let mut hasher = VarBlake2b::new(Blake2bHash::LENGTH).unwrap(); - hasher.update(data); - hasher.finalize_variable(|hash| ret.clone_from_slice(hash)); - Blake2bHash(ret) - } - - /// Returns the underlying BLKAE2b hash bytes - pub fn value(&self) -> [u8; Blake2bHash::LENGTH] { - self.0 - } - - /// Converts the underlying BLAKE2b hash digest array to a `Vec` - pub fn to_vec(&self) -> Vec { - self.0.to_vec() - } -} - -impl core::fmt::LowerHex for Blake2bHash { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let hex_string = base16::encode_lower(&self.value()); - if f.alternate() { - write!(f, "0x{}", hex_string) - } else { - write!(f, "{}", hex_string) - } - } -} - -impl core::fmt::UpperHex for Blake2bHash { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - let hex_string = base16::encode_upper(&self.value()); - if f.alternate() { - write!(f, "0x{}", hex_string) - } else { - write!(f, "{}", hex_string) - } - } -} - -impl core::fmt::Display for Blake2bHash { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "Blake2bHash({:#x})", self) - } -} - -impl core::fmt::Debug for Blake2bHash { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", self) - } -} - -impl From<[u8; Blake2bHash::LENGTH]> for Blake2bHash { - fn from(arr: [u8; Blake2bHash::LENGTH]) -> Self { - Blake2bHash(arr) - } -} - -impl<'a> TryFrom<&'a [u8]> for Blake2bHash { - type Error = TryFromSliceError; - - fn try_from(slice: &[u8]) -> Result { - <[u8; Blake2bHash::LENGTH]>::try_from(slice).map(Blake2bHash) - } -} - -impl AsRef<[u8]> for Blake2bHash { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl Into<[u8; Blake2bHash::LENGTH]> for Blake2bHash { - fn into(self) -> [u8; Blake2bHash::LENGTH] { - self.0 - } -} - -impl ToBytes for Blake2bHash { - #[inline(always)] - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - #[inline(always)] - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for Blake2bHash { - #[inline(always)] - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - FromBytes::from_bytes(bytes).map(|(arr, rem)| (Blake2bHash(arr), rem)) - } -} diff --git a/execution_engine/src/shared/newtypes/macros.rs b/execution_engine/src/shared/newtypes/macros.rs deleted file mode 100644 index 6d4131bbb9..0000000000 --- a/execution_engine/src/shared/newtypes/macros.rs +++ /dev/null @@ -1,119 +0,0 @@ -/// Creates an array newtype for given length with special access operators already implemented. -#[macro_export] -macro_rules! make_array_newtype { - ($name:ident, $ty:ty, $len:expr) => { - pub struct $name([$ty; $len]); - - impl $name { - pub fn new(source: [$ty; $len]) -> Self { - $name(source) - } - - pub fn into_inner(self) -> [$ty; $len] { - self.0 - } - } - - impl Clone for $name { - fn clone(&self) -> $name { - let &$name(ref dat) = self; - $name(dat.clone()) - } - } - - impl Copy for $name {} - - impl PartialEq for $name { - fn eq(&self, other: &$name) -> bool { - &self[..] == &other[..] - } - } - - impl Eq for $name {} - - impl PartialOrd for $name { - fn partial_cmp(&self, other: &$name) -> Option { - Some(self.cmp(other)) - } - } - - impl Ord for $name { - fn cmp(&self, other: &$name) -> core::cmp::Ordering { - self.0.cmp(&other.0) - } - } - - impl core::ops::Index for $name { - type Output = $ty; - - fn index(&self, index: usize) -> &$ty { - let &$name(ref dat) = self; - &dat[index] - } - } - - impl core::ops::Index> for $name { - type Output = [$ty]; - - fn index(&self, index: core::ops::Range) -> &[$ty] { - let &$name(ref dat) = self; - &dat[index] - } - } - - impl core::ops::Index> for $name { - type Output = [$ty]; - - fn index(&self, index: core::ops::RangeTo) -> &[$ty] { - let &$name(ref dat) = self; - &dat[index] - } - } - - impl core::ops::Index> for $name { - type Output = [$ty]; - - fn index(&self, index: core::ops::RangeFrom) -> &[$ty] { - let &$name(ref dat) = self; - &dat[index] - } - } - - impl core::ops::Index for $name { - type Output = [$ty]; - - fn index(&self, _: core::ops::RangeFull) -> &[$ty] { - let &$name(ref dat) = self; - &dat[..] - } - } - - impl core::fmt::Debug for $name { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}([", stringify!($name))?; - write!(f, "{:?}", self.0[0])?; - for item in self.0[1..].iter() { - write!(f, ", {:?}", item)?; - } - write!(f, "])") - } - } - - impl bytesrepr::ToBytes for $name { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } - } - - impl bytesrepr::FromBytes for $name { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (dat, rem) = <[$ty; $len]>::from_bytes(bytes)?; - Ok(($name(dat), rem)) - } - } - }; -} diff --git a/execution_engine/src/shared/newtypes/mod.rs b/execution_engine/src/shared/newtypes/mod.rs deleted file mode 100644 index 0fb38addf3..0000000000 --- a/execution_engine/src/shared/newtypes/mod.rs +++ /dev/null @@ -1,140 +0,0 @@ -//! Some newtypes. -mod blake2b256; -mod macros; -use std::fmt::{self, Display, Formatter}; - -pub use blake2b256::Blake2bHash; -use serde::Serialize; -use uuid::Uuid; - -#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Serialize)] -pub struct CorrelationId(Uuid); - -impl CorrelationId { - pub fn new() -> CorrelationId { - CorrelationId(Uuid::new_v4()) - } - - pub fn is_empty(&self) -> bool { - self.0.is_nil() - } -} - -impl Display for CorrelationId { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{:?}", self.0) - } -} - -#[cfg(test)] -mod tests { - use std::hash::{Hash, Hasher}; - - use crate::shared::{newtypes::CorrelationId, utils}; - - #[test] - fn should_be_able_to_generate_correlation_id() { - let correlation_id = CorrelationId::new(); - - assert_ne!( - correlation_id.to_string(), - "00000000-0000-0000-0000-000000000000", - "should not be empty value" - ) - } - - #[test] - fn should_support_to_string() { - let correlation_id = CorrelationId::new(); - - assert!( - !correlation_id.is_empty(), - "correlation_id should be produce string" - ) - } - - #[test] - fn should_support_to_string_no_type_encasement() { - let correlation_id = CorrelationId::new(); - - let correlation_id_string = correlation_id.to_string(); - - assert!( - !correlation_id_string.starts_with("CorrelationId"), - "correlation_id should just be the inner value without tuple name" - ) - } - - #[test] - fn should_support_to_json() { - let correlation_id = CorrelationId::new(); - - let correlation_id_json = utils::jsonify(correlation_id, false); - - assert!( - !correlation_id_json.is_empty(), - "correlation_id should be produce json" - ) - } - - #[test] - fn should_support_is_display() { - let correlation_id = CorrelationId::new(); - - let display = format!("{}", correlation_id); - - assert!(!display.is_empty(), "display should not be empty") - } - - #[test] - fn should_support_is_empty() { - let correlation_id = CorrelationId::new(); - - assert!( - !correlation_id.is_empty(), - "correlation_id should not be empty" - ) - } - - #[test] - fn should_create_unique_id_on_new() { - let correlation_id_lhs = CorrelationId::new(); - let correlation_id_rhs = CorrelationId::new(); - - assert_ne!( - correlation_id_lhs, correlation_id_rhs, - "correlation_ids should be distinct" - ); - } - - #[test] - fn should_support_clone() { - let correlation_id = CorrelationId::new(); - - let cloned = correlation_id; - - assert_eq!(correlation_id, cloned, "should be cloneable") - } - - #[test] - fn should_support_copy() { - let correlation_id = CorrelationId::new(); - - let cloned = correlation_id; - - assert_eq!(correlation_id, cloned, "should be cloneable") - } - - #[test] - fn should_support_hash() { - let correlation_id = CorrelationId::new(); - - let mut state = std::collections::hash_map::DefaultHasher::new(); - - correlation_id.hash(&mut state); - - let hash = state.finish(); - - assert!(hash > 0, "should be hashable"); - } -} diff --git a/execution_engine/src/shared/opcode_costs.rs b/execution_engine/src/shared/opcode_costs.rs deleted file mode 100644 index 2c42a163b4..0000000000 --- a/execution_engine/src/shared/opcode_costs.rs +++ /dev/null @@ -1,306 +0,0 @@ -use std::collections::BTreeMap; - -use datasize::DataSize; -use pwasm_utils::rules::{InstructionType, Metering, Set}; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; - -pub const DEFAULT_BIT_COST: u32 = 300; -pub const DEFAULT_ADD_COST: u32 = 210; -pub const DEFAULT_MUL_COST: u32 = 240; -pub const DEFAULT_DIV_COST: u32 = 320; -pub const DEFAULT_LOAD_COST: u32 = 2_500; -pub const DEFAULT_STORE_COST: u32 = 4_700; -pub const DEFAULT_CONST_COST: u32 = 110; -pub const DEFAULT_LOCAL_COST: u32 = 390; -pub const DEFAULT_GLOBAL_COST: u32 = 390; -pub const DEFAULT_CONTROL_FLOW_COST: u32 = 440; -pub const DEFAULT_INTEGER_COMPARISON_COST: u32 = 250; -pub const DEFAULT_CONVERSION_COST: u32 = 420; -pub const DEFAULT_UNREACHABLE_COST: u32 = 270; -pub const DEFAULT_NOP_COST: u32 = 200; // TODO: This value is not researched -pub const DEFAULT_CURRENT_MEMORY_COST: u32 = 290; -pub const DEFAULT_GROW_MEMORY_COST: u32 = 240_000; -pub const DEFAULT_REGULAR_COST: u32 = 210; - -const NUM_FIELDS: usize = 17; -pub const OPCODE_COSTS_SERIALIZED_LENGTH: usize = NUM_FIELDS * U32_SERIALIZED_LENGTH; - -// Taken (partially) from parity-ethereum -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct OpcodeCosts { - /// Bit operations multiplier. - pub bit: u32, - /// Arithmetic add operations multiplier. - pub add: u32, - /// Mul operations multiplier. - pub mul: u32, - /// Div operations multiplier. - pub div: u32, - /// Memory load operation multiplier. - pub load: u32, - /// Memory store operation multiplier. - pub store: u32, - /// Const operation multiplier. - #[serde(rename = "const")] - pub op_const: u32, - /// Local operations multiplier. - pub local: u32, - /// Global operations multiplier. - pub global: u32, - /// Control flow operations multiplier. - pub control_flow: u32, - /// Integer operations multiplier. - pub integer_comparison: u32, - /// Conversion operations multiplier. - pub conversion: u32, - /// Unreachable operation multiplier. - pub unreachable: u32, - /// Nop operation multiplier. - pub nop: u32, - /// Get current memory operation multiplier. - pub current_memory: u32, - /// Grow memory cost, per page (64kb) - pub grow_memory: u32, - /// Regular opcode cost - pub regular: u32, -} - -impl OpcodeCosts { - pub(crate) fn to_set(&self) -> Set { - let meterings = { - let mut tmp = BTreeMap::new(); - tmp.insert(InstructionType::Bit, Metering::Fixed(self.bit)); - tmp.insert(InstructionType::Add, Metering::Fixed(self.add)); - tmp.insert(InstructionType::Mul, Metering::Fixed(self.mul)); - tmp.insert(InstructionType::Div, Metering::Fixed(self.div)); - tmp.insert(InstructionType::Load, Metering::Fixed(self.load)); - tmp.insert(InstructionType::Store, Metering::Fixed(self.store)); - tmp.insert(InstructionType::Const, Metering::Fixed(self.op_const)); - tmp.insert(InstructionType::Local, Metering::Fixed(self.local)); - tmp.insert(InstructionType::Global, Metering::Fixed(self.global)); - tmp.insert( - InstructionType::ControlFlow, - Metering::Fixed(self.control_flow), - ); - tmp.insert( - InstructionType::IntegerComparison, - Metering::Fixed(self.integer_comparison), - ); - tmp.insert( - InstructionType::Conversion, - Metering::Fixed(self.conversion), - ); - tmp.insert( - InstructionType::Unreachable, - Metering::Fixed(self.unreachable), - ); - tmp.insert(InstructionType::Nop, Metering::Fixed(self.nop)); - tmp.insert( - InstructionType::CurrentMemory, - Metering::Fixed(self.current_memory), - ); - tmp.insert( - InstructionType::GrowMemory, - Metering::Fixed(self.grow_memory), - ); - - // Instructions Float, FloatComparison, FloatConst, FloatConversion are omitted here - // because we're using `with_forbidden_floats` below. - - tmp - }; - Set::new(self.regular, meterings) - .with_grow_cost(self.grow_memory) - .with_forbidden_floats() - } -} - -impl Default for OpcodeCosts { - fn default() -> Self { - OpcodeCosts { - bit: DEFAULT_BIT_COST, - add: DEFAULT_ADD_COST, - mul: DEFAULT_MUL_COST, - div: DEFAULT_DIV_COST, - load: DEFAULT_LOAD_COST, - store: DEFAULT_STORE_COST, - op_const: DEFAULT_CONST_COST, - local: DEFAULT_LOCAL_COST, - global: DEFAULT_GLOBAL_COST, - control_flow: DEFAULT_CONTROL_FLOW_COST, - integer_comparison: DEFAULT_INTEGER_COMPARISON_COST, - conversion: DEFAULT_CONVERSION_COST, - unreachable: DEFAULT_UNREACHABLE_COST, - nop: DEFAULT_NOP_COST, - current_memory: DEFAULT_CURRENT_MEMORY_COST, - grow_memory: DEFAULT_GROW_MEMORY_COST, - regular: DEFAULT_REGULAR_COST, - } - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> OpcodeCosts { - OpcodeCosts { - bit: rng.gen(), - add: rng.gen(), - mul: rng.gen(), - div: rng.gen(), - load: rng.gen(), - store: rng.gen(), - op_const: rng.gen(), - local: rng.gen(), - global: rng.gen(), - control_flow: rng.gen(), - integer_comparison: rng.gen(), - conversion: rng.gen(), - unreachable: rng.gen(), - nop: rng.gen(), - current_memory: rng.gen(), - grow_memory: rng.gen(), - regular: rng.gen(), - } - } -} - -impl ToBytes for OpcodeCosts { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.bit.to_bytes()?); - ret.append(&mut self.add.to_bytes()?); - ret.append(&mut self.mul.to_bytes()?); - ret.append(&mut self.div.to_bytes()?); - ret.append(&mut self.load.to_bytes()?); - ret.append(&mut self.store.to_bytes()?); - ret.append(&mut self.op_const.to_bytes()?); - ret.append(&mut self.local.to_bytes()?); - ret.append(&mut self.global.to_bytes()?); - ret.append(&mut self.control_flow.to_bytes()?); - ret.append(&mut self.integer_comparison.to_bytes()?); - ret.append(&mut self.conversion.to_bytes()?); - ret.append(&mut self.unreachable.to_bytes()?); - ret.append(&mut self.nop.to_bytes()?); - ret.append(&mut self.current_memory.to_bytes()?); - ret.append(&mut self.grow_memory.to_bytes()?); - ret.append(&mut self.regular.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - OPCODE_COSTS_SERIALIZED_LENGTH - } -} - -impl FromBytes for OpcodeCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (bit, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (add, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (mul, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (div, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (load, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (store, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (const_, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (local, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (global, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (control_flow, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (integer_comparison, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (conversion, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (unreachable, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (nop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (current_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (grow_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let (regular, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; - let opcode_costs = OpcodeCosts { - bit, - add, - mul, - div, - load, - store, - op_const: const_, - local, - global, - control_flow, - integer_comparison, - conversion, - unreachable, - nop, - current_memory, - grow_memory, - regular, - }; - Ok((opcode_costs, bytes)) - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use crate::shared::opcode_costs::OpcodeCosts; - - prop_compose! { - pub fn opcode_costs_arb()( - bit in num::u32::ANY, - add in num::u32::ANY, - mul in num::u32::ANY, - div in num::u32::ANY, - load in num::u32::ANY, - store in num::u32::ANY, - op_const in num::u32::ANY, - local in num::u32::ANY, - global in num::u32::ANY, - control_flow in num::u32::ANY, - integer_comparison in num::u32::ANY, - conversion in num::u32::ANY, - unreachable in num::u32::ANY, - nop in num::u32::ANY, - current_memory in num::u32::ANY, - grow_memory in num::u32::ANY, - regular in num::u32::ANY, - ) -> OpcodeCosts { - OpcodeCosts { - bit, - add, - mul, - div, - load, - store, - op_const, - local, - global, - control_flow, - integer_comparison, - conversion, - unreachable, - nop, - current_memory, - grow_memory, - regular, - } - } - } -} - -#[cfg(test)] -mod tests { - use proptest::proptest; - - use casper_types::bytesrepr; - - use super::gens; - - proptest! { - #[test] - fn should_serialize_and_deserialize_with_arbitrary_values( - opcode_costs in gens::opcode_costs_arb() - ) { - bytesrepr::test_serialization_roundtrip(&opcode_costs); - } - } -} diff --git a/execution_engine/src/shared/socket.rs b/execution_engine/src/shared/socket.rs deleted file mode 100644 index ced94cd805..0000000000 --- a/execution_engine/src/shared/socket.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::{io, path::Path}; - -pub struct Socket(String); - -impl Socket { - pub fn new(socket: String) -> Self { - Socket(socket) - } - - pub fn value(&self) -> String { - self.0.clone() - } - - pub fn as_str(&self) -> &str { - self.0.as_str() - } - - pub fn get_path(&self) -> &Path { - std::path::Path::new(&self.0) - } - - /// Safely removes file pointed out by a path. - /// - /// In practice this tries to remove the file, and if - /// the file does not exist, it ignores it, and propagates - /// any other error. - pub fn remove_file(&self) -> io::Result<()> { - let path = self.get_path(); - match std::fs::remove_file(path) { - Err(ref e) if e.kind() == io::ErrorKind::NotFound => Ok(()), - result => result, - } - } -} diff --git a/execution_engine/src/shared/storage_costs.rs b/execution_engine/src/shared/storage_costs.rs deleted file mode 100644 index 7a061ccc91..0000000000 --- a/execution_engine/src/shared/storage_costs.rs +++ /dev/null @@ -1,119 +0,0 @@ -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - U512, -}; - -use super::gas::Gas; - -pub const DEFAULT_GAS_PER_BYTE_COST: u32 = 625_000; - -#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct StorageCosts { - /// Gas charged per byte stored in the global state. - gas_per_byte: u32, -} - -impl StorageCosts { - pub const fn new(gas_per_byte: u32) -> Self { - Self { gas_per_byte } - } - - pub fn gas_per_byte(&self) -> u32 { - self.gas_per_byte - } - - /// Calculates gas cost for storing `bytes`. - pub fn calculate_gas_cost(&self, bytes: usize) -> Gas { - let value = U512::from(self.gas_per_byte) * U512::from(bytes); - Gas::new(value) - } -} - -impl Default for StorageCosts { - fn default() -> Self { - Self { - gas_per_byte: DEFAULT_GAS_PER_BYTE_COST, - } - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> StorageCosts { - StorageCosts { - gas_per_byte: rng.gen(), - } - } -} - -impl ToBytes for StorageCosts { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.gas_per_byte.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.gas_per_byte.serialized_length() - } -} - -impl FromBytes for StorageCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (gas_per_byte, rem) = FromBytes::from_bytes(bytes)?; - - Ok((StorageCosts { gas_per_byte }, rem)) - } -} - -#[cfg(test)] -pub mod tests { - use casper_types::U512; - - use super::*; - - const SMALL_WEIGHT: usize = 123456789; - const LARGE_WEIGHT: usize = usize::max_value(); - - #[test] - fn should_calculate_gas_cost() { - let storage_costs = StorageCosts::default(); - - let cost = storage_costs.calculate_gas_cost(SMALL_WEIGHT); - - let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(SMALL_WEIGHT); - assert_eq!(cost, Gas::new(expected_cost)); - } - - #[test] - fn should_calculate_big_gas_cost() { - let storage_costs = StorageCosts::default(); - - let cost = storage_costs.calculate_gas_cost(LARGE_WEIGHT); - - let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(LARGE_WEIGHT); - assert_eq!(cost, Gas::new(expected_cost)); - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::StorageCosts; - - prop_compose! { - pub fn storage_costs_arb()( - gas_per_byte in num::u32::ANY, - ) -> StorageCosts { - StorageCosts { - gas_per_byte, - } - } - } -} diff --git a/execution_engine/src/shared/stored_value.rs b/execution_engine/src/shared/stored_value.rs deleted file mode 100644 index e8ab97e8ae..0000000000 --- a/execution_engine/src/shared/stored_value.rs +++ /dev/null @@ -1,423 +0,0 @@ -use std::{convert::TryFrom, fmt::Debug}; - -use serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer}; -use serde_bytes::ByteBuf; - -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - contracts::ContractPackage, - system::auction::{Bid, EraInfo, SeigniorageRecipients, UnbondingPurse}, - CLValue, Contract, ContractWasm, DeployInfo, Transfer, -}; - -use crate::shared::{account::Account, TypeMismatch}; - -#[repr(u8)] -enum Tag { - CLValue = 0, - Account = 1, - ContractWasm = 2, - Contract = 3, - ContractPackage = 4, - Transfer = 5, - DeployInfo = 6, - EraInfo = 7, - Bid = 8, - Withdraw = 9, - EraValidators = 10, -} - -#[derive(Eq, PartialEq, Clone, Debug)] -pub enum StoredValue { - CLValue(CLValue), - Account(Account), - ContractWasm(ContractWasm), - Contract(Contract), - ContractPackage(ContractPackage), - Transfer(Transfer), - DeployInfo(DeployInfo), - EraInfo(EraInfo), - Bid(Box), - Withdraw(Vec), - EraValidators(SeigniorageRecipients), -} - -impl StoredValue { - pub fn as_cl_value(&self) -> Option<&CLValue> { - match self { - StoredValue::CLValue(cl_value) => Some(cl_value), - _ => None, - } - } - - pub fn as_account(&self) -> Option<&Account> { - match self { - StoredValue::Account(account) => Some(account), - _ => None, - } - } - - pub fn as_contract(&self) -> Option<&Contract> { - match self { - StoredValue::Contract(contract) => Some(contract), - _ => None, - } - } - - pub fn as_contract_wasm(&self) -> Option<&ContractWasm> { - match self { - StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm), - _ => None, - } - } - - pub fn as_contract_package(&self) -> Option<&ContractPackage> { - match self { - StoredValue::ContractPackage(contract_package) => Some(&contract_package), - _ => None, - } - } - - pub fn as_deploy_info(&self) -> Option<&DeployInfo> { - match self { - StoredValue::DeployInfo(deploy_info) => Some(deploy_info), - _ => None, - } - } - - pub fn as_era_info(&self) -> Option<&EraInfo> { - match self { - StoredValue::EraInfo(era_info) => Some(era_info), - _ => None, - } - } - - pub fn as_bid(&self) -> Option<&Bid> { - match self { - StoredValue::Bid(bid) => Some(bid), - _ => None, - } - } - - pub fn as_withdraw(&self) -> Option<&Vec> { - match self { - StoredValue::Withdraw(unbonding_purses) => Some(unbonding_purses), - _ => None, - } - } - - pub fn as_era_validators(&self) -> Option<&SeigniorageRecipients> { - match self { - StoredValue::EraValidators(recipients) => Some(recipients), - _ => None, - } - } - - pub fn type_name(&self) -> String { - match self { - StoredValue::CLValue(cl_value) => format!("{:?}", cl_value.cl_type()), - StoredValue::Account(_) => "Account".to_string(), - StoredValue::ContractWasm(_) => "Contract".to_string(), - StoredValue::Contract(_) => "Contract".to_string(), - StoredValue::ContractPackage(_) => "ContractPackage".to_string(), - StoredValue::Transfer(_) => "Transfer".to_string(), - StoredValue::DeployInfo(_) => "DeployInfo".to_string(), - StoredValue::EraInfo(_) => "EraInfo".to_string(), - StoredValue::Bid(_) => "Bid".to_string(), - StoredValue::Withdraw(_) => "Withdraw".to_string(), - StoredValue::EraValidators(_) => "EraValidators".to_string(), - } - } -} - -impl From for StoredValue { - fn from(value: CLValue) -> StoredValue { - StoredValue::CLValue(value) - } -} -impl From for StoredValue { - fn from(value: Account) -> StoredValue { - StoredValue::Account(value) - } -} -impl From for StoredValue { - fn from(value: ContractWasm) -> StoredValue { - StoredValue::ContractWasm(value) - } -} -impl From for StoredValue { - fn from(value: Contract) -> StoredValue { - StoredValue::Contract(value) - } -} -impl From for StoredValue { - fn from(value: ContractPackage) -> StoredValue { - StoredValue::ContractPackage(value) - } -} -impl From for StoredValue { - fn from(bid: Bid) -> StoredValue { - StoredValue::Bid(Box::new(bid)) - } -} - -impl From for StoredValue { - fn from(recipients: SeigniorageRecipients) -> StoredValue { - StoredValue::EraValidators(recipients) - } -} - -impl TryFrom for CLValue { - type Error = TypeMismatch; - - fn try_from(stored_value: StoredValue) -> Result { - match stored_value { - StoredValue::CLValue(cl_value) => Ok(cl_value), - _ => Err(TypeMismatch::new( - "CLValue".to_string(), - stored_value.type_name(), - )), - } - } -} - -impl TryFrom for Account { - type Error = TypeMismatch; - - fn try_from(stored_value: StoredValue) -> Result { - match stored_value { - StoredValue::Account(account) => Ok(account), - _ => Err(TypeMismatch::new( - "Account".to_string(), - stored_value.type_name(), - )), - } - } -} - -impl TryFrom for ContractWasm { - type Error = TypeMismatch; - - fn try_from(stored_value: StoredValue) -> Result { - match stored_value { - StoredValue::ContractWasm(contract_wasm) => Ok(contract_wasm), - _ => Err(TypeMismatch::new( - "ContractWasm".to_string(), - stored_value.type_name(), - )), - } - } -} - -impl TryFrom for ContractPackage { - type Error = TypeMismatch; - - fn try_from(stored_value: StoredValue) -> Result { - match stored_value { - StoredValue::ContractPackage(contract_package) => Ok(contract_package), - _ => Err(TypeMismatch::new( - "ContractPackage".to_string(), - stored_value.type_name(), - )), - } - } -} - -impl TryFrom for Contract { - type Error = TypeMismatch; - - fn try_from(stored_value: StoredValue) -> Result { - match stored_value { - StoredValue::Contract(contract) => Ok(contract), - _ => Err(TypeMismatch::new( - "Contract".to_string(), - stored_value.type_name(), - )), - } - } -} - -impl TryFrom for Transfer { - type Error = TypeMismatch; - - fn try_from(value: StoredValue) -> Result { - match value { - StoredValue::Transfer(transfer) => Ok(transfer), - _ => Err(TypeMismatch::new("Transfer".to_string(), value.type_name())), - } - } -} - -impl TryFrom for DeployInfo { - type Error = TypeMismatch; - - fn try_from(value: StoredValue) -> Result { - match value { - StoredValue::DeployInfo(deploy_info) => Ok(deploy_info), - _ => Err(TypeMismatch::new( - "DeployInfo".to_string(), - value.type_name(), - )), - } - } -} - -impl TryFrom for EraInfo { - type Error = TypeMismatch; - - fn try_from(value: StoredValue) -> Result { - match value { - StoredValue::EraInfo(era_info) => Ok(era_info), - _ => Err(TypeMismatch::new("EraInfo".to_string(), value.type_name())), - } - } -} - -impl ToBytes for StoredValue { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - let (tag, mut serialized_data) = match self { - StoredValue::CLValue(cl_value) => (Tag::CLValue, cl_value.to_bytes()?), - StoredValue::Account(account) => (Tag::Account, account.to_bytes()?), - StoredValue::ContractWasm(contract_wasm) => { - (Tag::ContractWasm, contract_wasm.to_bytes()?) - } - StoredValue::Contract(contract_header) => (Tag::Contract, contract_header.to_bytes()?), - StoredValue::ContractPackage(contract_package) => { - (Tag::ContractPackage, contract_package.to_bytes()?) - } - StoredValue::Transfer(transfer) => (Tag::Transfer, transfer.to_bytes()?), - StoredValue::DeployInfo(deploy_info) => (Tag::DeployInfo, deploy_info.to_bytes()?), - StoredValue::EraInfo(era_info) => (Tag::EraInfo, era_info.to_bytes()?), - StoredValue::Bid(bid) => (Tag::Bid, bid.to_bytes()?), - StoredValue::Withdraw(unbonding_purses) => { - (Tag::Withdraw, unbonding_purses.to_bytes()?) - } - StoredValue::EraValidators(recipients) => (Tag::EraValidators, recipients.to_bytes()?), - }; - result.push(tag as u8); - result.append(&mut serialized_data); - Ok(result) - } - - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH - + match self { - StoredValue::CLValue(cl_value) => cl_value.serialized_length(), - StoredValue::Account(account) => account.serialized_length(), - StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(), - StoredValue::Contract(contract_header) => contract_header.serialized_length(), - StoredValue::ContractPackage(contract_package) => { - contract_package.serialized_length() - } - StoredValue::Transfer(transfer) => transfer.serialized_length(), - StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(), - StoredValue::EraInfo(era_info) => era_info.serialized_length(), - StoredValue::Bid(bid) => bid.serialized_length(), - StoredValue::Withdraw(unbonding_purses) => unbonding_purses.serialized_length(), - StoredValue::EraValidators(recipients) => recipients.serialized_length(), - } - } -} - -impl FromBytes for StoredValue { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; - match tag { - tag if tag == Tag::CLValue as u8 => CLValue::from_bytes(remainder) - .map(|(cl_value, remainder)| (StoredValue::CLValue(cl_value), remainder)), - tag if tag == Tag::Account as u8 => Account::from_bytes(remainder) - .map(|(account, remainder)| (StoredValue::Account(account), remainder)), - tag if tag == Tag::ContractWasm as u8 => { - ContractWasm::from_bytes(remainder).map(|(contract_wasm, remainder)| { - (StoredValue::ContractWasm(contract_wasm), remainder) - }) - } - tag if tag == Tag::ContractPackage as u8 => { - ContractPackage::from_bytes(remainder).map(|(contract_package, remainder)| { - (StoredValue::ContractPackage(contract_package), remainder) - }) - } - tag if tag == Tag::Contract as u8 => Contract::from_bytes(remainder) - .map(|(contract, remainder)| (StoredValue::Contract(contract), remainder)), - tag if tag == Tag::Transfer as u8 => Transfer::from_bytes(remainder) - .map(|(transfer, remainder)| (StoredValue::Transfer(transfer), remainder)), - tag if tag == Tag::DeployInfo as u8 => DeployInfo::from_bytes(remainder) - .map(|(deploy_info, remainder)| (StoredValue::DeployInfo(deploy_info), remainder)), - tag if tag == Tag::EraInfo as u8 => EraInfo::from_bytes(remainder) - .map(|(deploy_info, remainder)| (StoredValue::EraInfo(deploy_info), remainder)), - tag if tag == Tag::Bid as u8 => Bid::from_bytes(remainder) - .map(|(bid, remainder)| (StoredValue::Bid(Box::new(bid)), remainder)), - tag if tag == Tag::Withdraw as u8 => { - Vec::::from_bytes(remainder).map(|(unbonding_purses, remainder)| { - (StoredValue::Withdraw(unbonding_purses), remainder) - }) - } - tag if tag == Tag::EraValidators as u8 => SeigniorageRecipients::from_bytes(remainder) - .map(|(recipients, remainder)| (StoredValue::EraValidators(recipients), remainder)), - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -impl Serialize for StoredValue { - fn serialize(&self, serializer: S) -> Result { - // The JSON representation of a StoredValue is just its bytesrepr - // While this makes it harder to inspect, it makes deterministic representation simple. - let bytes = self - .to_bytes() - .map_err(|error| ser::Error::custom(format!("{:?}", error)))?; - ByteBuf::from(bytes).serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for StoredValue { - fn deserialize>(deserializer: D) -> Result { - let bytes = ByteBuf::deserialize(deserializer)?.into_vec(); - Ok(bytesrepr::deserialize::(bytes) - .map_err(|error| de::Error::custom(format!("{:?}", error)))?) - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::prelude::*; - - use casper_types::{ - gens::{ - cl_value_arb, contract_arb, contract_package_arb, contract_wasm_arb, deploy_info_arb, - transfer_arb, - }, - system::auction::gens::era_info_arb, - }; - - use super::StoredValue; - use crate::shared::account::gens::account_arb; - - pub fn stored_value_arb() -> impl Strategy { - prop_oneof![ - cl_value_arb().prop_map(StoredValue::CLValue), - account_arb().prop_map(StoredValue::Account), - contract_package_arb().prop_map(StoredValue::ContractPackage), - contract_arb().prop_map(StoredValue::Contract), - contract_wasm_arb().prop_map(StoredValue::ContractWasm), - era_info_arb(1..10).prop_map(StoredValue::EraInfo), - deploy_info_arb().prop_map(StoredValue::DeployInfo), - transfer_arb().prop_map(StoredValue::Transfer) - ] - } -} - -#[cfg(test)] -mod tests { - use proptest::proptest; - - use super::*; - - proptest! { - #[test] - fn serialization_roundtrip(v in gens::stored_value_arb()) { - bytesrepr::test_serialization_roundtrip(&v); - } - } -} diff --git a/execution_engine/src/shared/system_config.rs b/execution_engine/src/shared/system_config.rs deleted file mode 100644 index 77eee75d1a..0000000000 --- a/execution_engine/src/shared/system_config.rs +++ /dev/null @@ -1,167 +0,0 @@ -pub mod auction_costs; -pub mod handle_payment_costs; -pub mod mint_costs; -pub mod standard_payment_costs; - -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use self::{ - auction_costs::AuctionCosts, handle_payment_costs::HandlePaymentCosts, mint_costs::MintCosts, - standard_payment_costs::StandardPaymentCosts, -}; -use crate::storage::protocol_data::DEFAULT_WASMLESS_TRANSFER_COST; - -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct SystemConfig { - /// Wasmless transfer cost expressed in gas. - wasmless_transfer_cost: u32, - - /// Configuration of auction entrypoint costs. - auction_costs: AuctionCosts, - - /// Configuration of mint entrypoint costs. - mint_costs: MintCosts, - - /// Configuration of handle payment entrypoint costs. - handle_payment_costs: HandlePaymentCosts, - - /// Configuration of standard payment costs. - standard_payment_costs: StandardPaymentCosts, -} - -impl SystemConfig { - pub fn new( - wasmless_transfer_cost: u32, - auction_costs: AuctionCosts, - mint_costs: MintCosts, - handle_payment_costs: HandlePaymentCosts, - standard_payment_costs: StandardPaymentCosts, - ) -> Self { - Self { - wasmless_transfer_cost, - auction_costs, - mint_costs, - handle_payment_costs, - standard_payment_costs, - } - } - - pub fn wasmless_transfer_cost(&self) -> u32 { - self.wasmless_transfer_cost - } - - pub fn auction_costs(&self) -> &AuctionCosts { - &self.auction_costs - } - - pub fn mint_costs(&self) -> &MintCosts { - &self.mint_costs - } - - pub fn handle_payment_costs(&self) -> &HandlePaymentCosts { - &self.handle_payment_costs - } - - pub fn standard_payment_costs(&self) -> &StandardPaymentCosts { - &self.standard_payment_costs - } -} - -impl Default for SystemConfig { - fn default() -> Self { - Self { - wasmless_transfer_cost: DEFAULT_WASMLESS_TRANSFER_COST, - auction_costs: AuctionCosts::default(), - mint_costs: MintCosts::default(), - handle_payment_costs: HandlePaymentCosts::default(), - standard_payment_costs: StandardPaymentCosts::default(), - } - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> SystemConfig { - SystemConfig { - wasmless_transfer_cost: rng.gen(), - auction_costs: rng.gen(), - mint_costs: rng.gen(), - handle_payment_costs: rng.gen(), - standard_payment_costs: rng.gen(), - } - } -} - -impl ToBytes for SystemConfig { - fn to_bytes(&self) -> Result, casper_types::bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.wasmless_transfer_cost.to_bytes()?); - ret.append(&mut self.auction_costs.to_bytes()?); - ret.append(&mut self.mint_costs.to_bytes()?); - ret.append(&mut self.handle_payment_costs.to_bytes()?); - ret.append(&mut self.standard_payment_costs.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.wasmless_transfer_cost.serialized_length() - + self.auction_costs.serialized_length() - + self.mint_costs.serialized_length() - + self.handle_payment_costs.serialized_length() - + self.standard_payment_costs.serialized_length() - } -} - -impl FromBytes for SystemConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> { - let (wasmless_transfer_cost, rem) = FromBytes::from_bytes(bytes)?; - let (auction_costs, rem) = FromBytes::from_bytes(rem)?; - let (mint_costs, rem) = FromBytes::from_bytes(rem)?; - let (handle_payment_costs, rem) = FromBytes::from_bytes(rem)?; - let (standard_payment_costs, rem) = FromBytes::from_bytes(rem)?; - Ok(( - SystemConfig::new( - wasmless_transfer_cost, - auction_costs, - mint_costs, - handle_payment_costs, - standard_payment_costs, - ), - rem, - )) - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::{ - auction_costs::gens::auction_costs_arb, - handle_payment_costs::gens::handle_payment_costs_arb, mint_costs::gens::mint_costs_arb, - standard_payment_costs::gens::standard_payment_costs_arb, SystemConfig, - }; - - prop_compose! { - pub fn system_config_arb()( - wasmless_transfer_cost in num::u32::ANY, - auction_costs in auction_costs_arb(), - mint_costs in mint_costs_arb(), - handle_payment_costs in handle_payment_costs_arb(), - standard_payment_costs in standard_payment_costs_arb(), - ) -> SystemConfig { - SystemConfig { - wasmless_transfer_cost, - auction_costs, - mint_costs, - handle_payment_costs, - standard_payment_costs, - } - } - } -} diff --git a/execution_engine/src/shared/system_config/auction_costs.rs b/execution_engine/src/shared/system_config/auction_costs.rs deleted file mode 100644 index ee426f2c3a..0000000000 --- a/execution_engine/src/shared/system_config/auction_costs.rs +++ /dev/null @@ -1,191 +0,0 @@ -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -pub const DEFAULT_GET_ERA_VALIDATORS_COST: u32 = 10_000; -pub const DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST: u32 = 10_000; -pub const DEFAULT_ADD_BID_COST: u32 = 10_000; -pub const DEFAULT_WITHDRAW_BID_COST: u32 = 10_000; -pub const DEFAULT_DELEGATE_COST: u32 = 10_000; -pub const DEFAULT_UNDELEGATE_COST: u32 = 10_000; -pub const DEFAULT_RUN_AUCTION_COST: u32 = 10_000; -pub const DEFAULT_SLASH_COST: u32 = 10_000; -pub const DEFAULT_DISTRIBUTE_COST: u32 = 10_000; -pub const DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST: u32 = 10_000; -pub const DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST: u32 = 10_000; -pub const DEFAULT_READ_ERA_ID_COST: u32 = 10_000; -pub const DEFAULT_ACTIVATE_BID_COST: u32 = 10_000; - -/// Description of costs of calling auction entrypoints. -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct AuctionCosts { - pub get_era_validators: u32, - pub read_seigniorage_recipients: u32, - pub add_bid: u32, - pub withdraw_bid: u32, - pub delegate: u32, - pub undelegate: u32, - pub run_auction: u32, - pub slash: u32, - pub distribute: u32, - pub withdraw_delegator_reward: u32, - pub withdraw_validator_reward: u32, - pub read_era_id: u32, - pub activate_bid: u32, -} - -impl Default for AuctionCosts { - fn default() -> Self { - Self { - get_era_validators: DEFAULT_GET_ERA_VALIDATORS_COST, - read_seigniorage_recipients: DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST, - add_bid: DEFAULT_ADD_BID_COST, - withdraw_bid: DEFAULT_WITHDRAW_BID_COST, - delegate: DEFAULT_DELEGATE_COST, - undelegate: DEFAULT_UNDELEGATE_COST, - run_auction: DEFAULT_RUN_AUCTION_COST, - slash: DEFAULT_SLASH_COST, - distribute: DEFAULT_DISTRIBUTE_COST, - withdraw_delegator_reward: DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST, - withdraw_validator_reward: DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST, - read_era_id: DEFAULT_READ_ERA_ID_COST, - activate_bid: DEFAULT_ACTIVATE_BID_COST, - } - } -} - -impl ToBytes for AuctionCosts { - fn to_bytes(&self) -> Result, casper_types::bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.get_era_validators.to_bytes()?); - ret.append(&mut self.read_seigniorage_recipients.to_bytes()?); - ret.append(&mut self.add_bid.to_bytes()?); - ret.append(&mut self.withdraw_bid.to_bytes()?); - ret.append(&mut self.delegate.to_bytes()?); - ret.append(&mut self.undelegate.to_bytes()?); - ret.append(&mut self.run_auction.to_bytes()?); - ret.append(&mut self.slash.to_bytes()?); - ret.append(&mut self.distribute.to_bytes()?); - ret.append(&mut self.withdraw_delegator_reward.to_bytes()?); - ret.append(&mut self.withdraw_validator_reward.to_bytes()?); - ret.append(&mut self.read_era_id.to_bytes()?); - ret.append(&mut self.activate_bid.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.get_era_validators.serialized_length() - + self.read_seigniorage_recipients.serialized_length() - + self.add_bid.serialized_length() - + self.withdraw_bid.serialized_length() - + self.delegate.serialized_length() - + self.undelegate.serialized_length() - + self.run_auction.serialized_length() - + self.slash.serialized_length() - + self.distribute.serialized_length() - + self.withdraw_delegator_reward.serialized_length() - + self.withdraw_validator_reward.serialized_length() - + self.read_era_id.serialized_length() - + self.activate_bid.serialized_length() - } -} - -impl FromBytes for AuctionCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> { - let (get_era_validators, rem) = FromBytes::from_bytes(bytes)?; - let (read_seigniorage_recipients, rem) = FromBytes::from_bytes(rem)?; - let (add_bid, rem) = FromBytes::from_bytes(rem)?; - let (withdraw_bid, rem) = FromBytes::from_bytes(rem)?; - let (delegate, rem) = FromBytes::from_bytes(rem)?; - let (undelegate, rem) = FromBytes::from_bytes(rem)?; - let (run_auction, rem) = FromBytes::from_bytes(rem)?; - let (slash, rem) = FromBytes::from_bytes(rem)?; - let (distribute, rem) = FromBytes::from_bytes(rem)?; - let (withdraw_delegator_reward, rem) = FromBytes::from_bytes(rem)?; - let (withdraw_validator_reward, rem) = FromBytes::from_bytes(rem)?; - let (read_era_id, rem) = FromBytes::from_bytes(rem)?; - let (activate_bid, rem) = FromBytes::from_bytes(rem)?; - Ok(( - Self { - get_era_validators, - read_seigniorage_recipients, - add_bid, - withdraw_bid, - delegate, - undelegate, - run_auction, - slash, - distribute, - withdraw_delegator_reward, - withdraw_validator_reward, - read_era_id, - activate_bid, - }, - rem, - )) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> AuctionCosts { - AuctionCosts { - get_era_validators: rng.gen(), - read_seigniorage_recipients: rng.gen(), - add_bid: rng.gen(), - withdraw_bid: rng.gen(), - delegate: rng.gen(), - undelegate: rng.gen(), - run_auction: rng.gen(), - slash: rng.gen(), - distribute: rng.gen(), - withdraw_delegator_reward: rng.gen(), - withdraw_validator_reward: rng.gen(), - read_era_id: rng.gen(), - activate_bid: rng.gen(), - } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::AuctionCosts; - - prop_compose! { - pub fn auction_costs_arb()( - get_era_validators in num::u32::ANY, - read_seigniorage_recipients in num::u32::ANY, - add_bid in num::u32::ANY, - withdraw_bid in num::u32::ANY, - delegate in num::u32::ANY, - undelegate in num::u32::ANY, - run_auction in num::u32::ANY, - slash in num::u32::ANY, - distribute in num::u32::ANY, - withdraw_delegator_reward in num::u32::ANY, - withdraw_validator_reward in num::u32::ANY, - read_era_id in num::u32::ANY, - activate_bid in num::u32::ANY, - ) -> AuctionCosts { - AuctionCosts { - get_era_validators, - read_seigniorage_recipients, - add_bid, - withdraw_bid, - delegate, - undelegate, - run_auction, - slash, - distribute, - withdraw_delegator_reward, - withdraw_validator_reward, - read_era_id, - activate_bid, - } - } - } -} diff --git a/execution_engine/src/shared/system_config/handle_payment_costs.rs b/execution_engine/src/shared/system_config/handle_payment_costs.rs deleted file mode 100644 index ae7f91fdec..0000000000 --- a/execution_engine/src/shared/system_config/handle_payment_costs.rs +++ /dev/null @@ -1,102 +0,0 @@ -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -pub const DEFAULT_GET_PAYMENT_PURSE_COST: u32 = 10_000; -pub const DEFAULT_SET_REFUND_PURSE_COST: u32 = 10_000; -pub const DEFAULT_GET_REFUND_PURSE_COST: u32 = 10_000; -pub const DEFAULT_FINALIZE_PAYMENT_COST: u32 = 10_000; - -/// Description of costs of calling handle payment entrypoints. -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct HandlePaymentCosts { - pub get_payment_purse: u32, - pub set_refund_purse: u32, - pub get_refund_purse: u32, - pub finalize_payment: u32, -} - -impl Default for HandlePaymentCosts { - fn default() -> Self { - Self { - get_payment_purse: DEFAULT_GET_PAYMENT_PURSE_COST, - set_refund_purse: DEFAULT_SET_REFUND_PURSE_COST, - get_refund_purse: DEFAULT_GET_REFUND_PURSE_COST, - finalize_payment: DEFAULT_FINALIZE_PAYMENT_COST, - } - } -} - -impl ToBytes for HandlePaymentCosts { - fn to_bytes(&self) -> Result, casper_types::bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.get_payment_purse.to_bytes()?); - ret.append(&mut self.set_refund_purse.to_bytes()?); - ret.append(&mut self.get_refund_purse.to_bytes()?); - ret.append(&mut self.finalize_payment.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.get_payment_purse.serialized_length() - + self.set_refund_purse.serialized_length() - + self.get_refund_purse.serialized_length() - + self.finalize_payment.serialized_length() - } -} - -impl FromBytes for HandlePaymentCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> { - let (get_payment_purse, rem) = FromBytes::from_bytes(bytes)?; - let (set_refund_purse, rem) = FromBytes::from_bytes(rem)?; - let (get_refund_purse, rem) = FromBytes::from_bytes(rem)?; - let (finalize_payment, rem) = FromBytes::from_bytes(rem)?; - - Ok(( - Self { - get_payment_purse, - set_refund_purse, - get_refund_purse, - finalize_payment, - }, - rem, - )) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> HandlePaymentCosts { - HandlePaymentCosts { - get_payment_purse: rng.gen(), - set_refund_purse: rng.gen(), - get_refund_purse: rng.gen(), - finalize_payment: rng.gen(), - } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::HandlePaymentCosts; - - prop_compose! { - pub fn handle_payment_costs_arb()( - get_payment_purse in num::u32::ANY, - set_refund_purse in num::u32::ANY, - get_refund_purse in num::u32::ANY, - finalize_payment in num::u32::ANY, - ) -> HandlePaymentCosts { - HandlePaymentCosts { - get_payment_purse, - set_refund_purse, - get_refund_purse, - finalize_payment, - } - } - } -} diff --git a/execution_engine/src/shared/system_config/mint_costs.rs b/execution_engine/src/shared/system_config/mint_costs.rs deleted file mode 100644 index 7a532c7753..0000000000 --- a/execution_engine/src/shared/system_config/mint_costs.rs +++ /dev/null @@ -1,122 +0,0 @@ -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -pub const DEFAULT_MINT_COST: u32 = 10_000; -pub const DEFAULT_REDUCE_TOTAL_SUPPLY_COST: u32 = 10_000; -pub const DEFAULT_CREATE_COST: u32 = 10_000; -pub const DEFAULT_BALANCE_COST: u32 = 10_000; -pub const DEFAULT_TRANSFER_COST: u32 = 10_000; -pub const DEFAULT_READ_BASE_ROUND_REWARD_COST: u32 = 10_000; - -/// Description of costs of calling mint entrypoints. -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct MintCosts { - pub mint: u32, - pub reduce_total_supply: u32, - pub create: u32, - pub balance: u32, - pub transfer: u32, - pub read_base_round_reward: u32, -} - -impl Default for MintCosts { - fn default() -> Self { - Self { - mint: DEFAULT_MINT_COST, - reduce_total_supply: DEFAULT_REDUCE_TOTAL_SUPPLY_COST, - create: DEFAULT_CREATE_COST, - balance: DEFAULT_BALANCE_COST, - transfer: DEFAULT_TRANSFER_COST, - read_base_round_reward: DEFAULT_READ_BASE_ROUND_REWARD_COST, - } - } -} - -impl ToBytes for MintCosts { - fn to_bytes(&self) -> Result, casper_types::bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.mint.to_bytes()?); - ret.append(&mut self.reduce_total_supply.to_bytes()?); - ret.append(&mut self.create.to_bytes()?); - ret.append(&mut self.balance.to_bytes()?); - ret.append(&mut self.transfer.to_bytes()?); - ret.append(&mut self.read_base_round_reward.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.mint.serialized_length() - + self.reduce_total_supply.serialized_length() - + self.create.serialized_length() - + self.balance.serialized_length() - + self.transfer.serialized_length() - + self.read_base_round_reward.serialized_length() - } -} - -impl FromBytes for MintCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> { - let (mint, rem) = FromBytes::from_bytes(bytes)?; - let (reduce_total_supply, rem) = FromBytes::from_bytes(rem)?; - let (create, rem) = FromBytes::from_bytes(rem)?; - let (balance, rem) = FromBytes::from_bytes(rem)?; - let (transfer, rem) = FromBytes::from_bytes(rem)?; - let (read_base_round_reward, rem) = FromBytes::from_bytes(rem)?; - - Ok(( - Self { - mint, - reduce_total_supply, - create, - balance, - transfer, - read_base_round_reward, - }, - rem, - )) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> MintCosts { - MintCosts { - mint: rng.gen(), - reduce_total_supply: rng.gen(), - create: rng.gen(), - balance: rng.gen(), - transfer: rng.gen(), - read_base_round_reward: rng.gen(), - } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::MintCosts; - - prop_compose! { - pub fn mint_costs_arb()( - mint in num::u32::ANY, - reduce_total_supply in num::u32::ANY, - create in num::u32::ANY, - balance in num::u32::ANY, - transfer in num::u32::ANY, - read_base_round_reward in num::u32::ANY, - ) -> MintCosts { - MintCosts { - mint, - reduce_total_supply, - create, - balance, - transfer, - read_base_round_reward, - } - } - } -} diff --git a/execution_engine/src/shared/system_config/standard_payment_costs.rs b/execution_engine/src/shared/system_config/standard_payment_costs.rs deleted file mode 100644 index 3773912660..0000000000 --- a/execution_engine/src/shared/system_config/standard_payment_costs.rs +++ /dev/null @@ -1,62 +0,0 @@ -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -pub const DEFAULT_PAY_COST: u32 = 10_000; - -/// Description of costs of calling standard payment entrypoints. -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct StandardPaymentCosts { - pub pay: u32, -} - -impl Default for StandardPaymentCosts { - fn default() -> Self { - Self { - pay: DEFAULT_PAY_COST, - } - } -} - -impl ToBytes for StandardPaymentCosts { - fn to_bytes(&self) -> Result, casper_types::bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - ret.append(&mut self.pay.to_bytes()?); - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.pay.serialized_length() - } -} - -impl FromBytes for StandardPaymentCosts { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), casper_types::bytesrepr::Error> { - let (pay, rem) = FromBytes::from_bytes(bytes)?; - Ok((Self { pay }, rem)) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> StandardPaymentCosts { - StandardPaymentCosts { pay: rng.gen() } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::StandardPaymentCosts; - - prop_compose! { - pub fn standard_payment_costs_arb()( - pay in num::u32::ANY, - ) -> StandardPaymentCosts { - StandardPaymentCosts { - pay, - } - } - } -} diff --git a/execution_engine/src/shared/test_utils.rs b/execution_engine/src/shared/test_utils.rs deleted file mode 100644 index 7698199b10..0000000000 --- a/execution_engine/src/shared/test_utils.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Some functions to use in tests. - -use casper_types::{account::AccountHash, contracts::NamedKeys, AccessRights, Key, URef}; - -use crate::shared::{account::Account, stored_value::StoredValue}; - -/// Returns an account value paired with its key -pub fn mocked_account(account_hash: AccountHash) -> Vec<(Key, StoredValue)> { - let purse = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE); - let account = Account::create(account_hash, NamedKeys::new(), purse); - vec![(Key::Account(account_hash), StoredValue::Account(account))] -} diff --git a/execution_engine/src/shared/transform.rs b/execution_engine/src/shared/transform.rs deleted file mode 100644 index 6e2b007f4b..0000000000 --- a/execution_engine/src/shared/transform.rs +++ /dev/null @@ -1,848 +0,0 @@ -use std::{ - any, - convert::TryFrom, - default::Default, - fmt::{self, Display, Formatter}, - ops::{Add, AddAssign}, -}; - -use num::traits::{AsPrimitive, WrappingAdd}; - -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - contracts::NamedKeys, - CLType, CLTyped, CLValue, CLValueError, U128, U256, U512, -}; - -use crate::shared::{stored_value::StoredValue, TypeMismatch}; - -/// Error type for applying and combining transforms. A `TypeMismatch` -/// occurs when a transform cannot be applied because the types are -/// not compatible (e.g. trying to add a number to a string). An -/// `Overflow` occurs if addition between numbers would result in the -/// value overflowing its size in memory (e.g. if a, b are i32 and a + -/// b > i32::MAX then a `AddInt32(a).apply(Value::Int32(b))` would -/// cause an overflow). -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum Error { - Serialization(bytesrepr::Error), - TypeMismatch(TypeMismatch), -} - -impl Display for Error { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - match self { - Error::Serialization(error) => write!(f, "{}", error), - Error::TypeMismatch(error) => write!(f, "{}", error), - } - } -} - -impl From for Error { - fn from(t: TypeMismatch) -> Error { - Error::TypeMismatch(t) - } -} - -impl From for Error { - fn from(cl_value_error: CLValueError) -> Error { - match cl_value_error { - CLValueError::Serialization(error) => Error::Serialization(error), - CLValueError::Type(cl_type_mismatch) => { - let expected = format!("{:?}", cl_type_mismatch.expected); - let found = format!("{:?}", cl_type_mismatch.found); - let type_mismatch = TypeMismatch { expected, found }; - Error::TypeMismatch(type_mismatch) - } - } - } -} - -#[allow(clippy::large_enum_variant)] -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum Transform { - Identity, - Write(StoredValue), - AddInt32(i32), - AddUInt64(u64), - AddUInt128(U128), - AddUInt256(U256), - AddUInt512(U512), - AddKeys(NamedKeys), - Failure(Error), -} - -macro_rules! from_try_from_impl { - ($type:ty, $variant:ident) => { - impl From<$type> for Transform { - fn from(x: $type) -> Self { - Transform::$variant(x) - } - } - - impl TryFrom for $type { - type Error = String; - - fn try_from(t: Transform) -> Result<$type, String> { - match t { - Transform::$variant(x) => Ok(x), - other => Err(format!("{:?}", other)), - } - } - } - }; -} - -from_try_from_impl!(StoredValue, Write); -from_try_from_impl!(i32, AddInt32); -from_try_from_impl!(u64, AddUInt64); -from_try_from_impl!(U128, AddUInt128); -from_try_from_impl!(U256, AddUInt256); -from_try_from_impl!(U512, AddUInt512); -from_try_from_impl!(NamedKeys, AddKeys); -from_try_from_impl!(Error, Failure); - -/// Attempts a wrapping addition of `to_add` to `stored_value`, assuming `stored_value` is -/// compatible with type `Y`. -fn wrapping_addition(stored_value: StoredValue, to_add: Y) -> Result -where - Y: AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive, -{ - let cl_value = CLValue::try_from(stored_value)?; - - match cl_value.cl_type() { - CLType::I32 => do_wrapping_addition::(cl_value, to_add), - CLType::I64 => do_wrapping_addition::(cl_value, to_add), - CLType::U8 => do_wrapping_addition::(cl_value, to_add), - CLType::U32 => do_wrapping_addition::(cl_value, to_add), - CLType::U64 => do_wrapping_addition::(cl_value, to_add), - CLType::U128 => do_wrapping_addition::(cl_value, to_add), - CLType::U256 => do_wrapping_addition::(cl_value, to_add), - CLType::U512 => do_wrapping_addition::(cl_value, to_add), - other => { - let expected = format!("integral type compatible with {}", any::type_name::()); - let found = format!("{:?}", other); - Err(TypeMismatch::new(expected, found).into()) - } - } -} - -/// Attempts a wrapping addition of `to_add` to the value represented by `cl_value`. -fn do_wrapping_addition(cl_value: CLValue, to_add: Y) -> Result -where - X: WrappingAdd + CLTyped + ToBytes + FromBytes + Copy + 'static, - Y: AsPrimitive, -{ - let x: X = cl_value.into_t()?; - let result = x.wrapping_add(&(to_add.as_())); - Ok(StoredValue::CLValue(CLValue::from_t(result)?)) -} - -impl Transform { - pub fn apply(self, stored_value: StoredValue) -> Result { - match self { - Transform::Identity => Ok(stored_value), - Transform::Write(new_value) => Ok(new_value), - Transform::AddInt32(to_add) => wrapping_addition(stored_value, to_add), - Transform::AddUInt64(to_add) => wrapping_addition(stored_value, to_add), - Transform::AddUInt128(to_add) => wrapping_addition(stored_value, to_add), - Transform::AddUInt256(to_add) => wrapping_addition(stored_value, to_add), - Transform::AddUInt512(to_add) => wrapping_addition(stored_value, to_add), - Transform::AddKeys(mut keys) => match stored_value { - StoredValue::Contract(mut contract) => { - contract.named_keys_append(&mut keys); - Ok(StoredValue::Contract(contract)) - } - StoredValue::Account(mut account) => { - account.named_keys_append(&mut keys); - Ok(StoredValue::Account(account)) - } - StoredValue::CLValue(cl_value) => { - let expected = "Contract or Account".to_string(); - let found = format!("{:?}", cl_value.cl_type()); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::ContractPackage(_) => { - let expected = "Contract or Account".to_string(); - let found = "ContractPackage".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::ContractWasm(_) => { - let expected = "Contract or Account".to_string(); - let found = "ContractWasm".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::Transfer(_) => { - let expected = "Contract or Account".to_string(); - let found = "Transfer".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::DeployInfo(_) => { - let expected = "Contract or Account".to_string(); - let found = "DeployInfo".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::EraInfo(_) => { - let expected = "Contract or Account".to_string(); - let found = "EraInfo".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::Bid(_) => { - let expected = "Contract or Account".to_string(); - let found = "Bid".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::Withdraw(_) => { - let expected = "Contract or Account".to_string(); - let found = "Withdraw".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - StoredValue::EraValidators(_) => { - let expected = "Contract or Account".to_string(); - let found = "EraValidators".to_string(); - Err(TypeMismatch::new(expected, found).into()) - } - }, - Transform::Failure(error) => Err(error), - } - } -} - -/// Combines numeric `Transform`s into a single `Transform`. This is done by unwrapping the -/// `Transform` to obtain the underlying value, performing the wrapping addition then wrapping up as -/// a `Transform` again. -fn wrapped_transform_addition(i: T, b: Transform, expected: &str) -> Transform -where - T: WrappingAdd - + AsPrimitive - + From - + From - + Into - + TryFrom, - i32: AsPrimitive, -{ - if let Transform::AddInt32(j) = b { - i.wrapping_add(&j.as_()).into() - } else if let Transform::AddUInt64(j) = b { - i.wrapping_add(&j.into()).into() - } else { - match T::try_from(b) { - Err(b_type) => Transform::Failure( - TypeMismatch { - expected: String::from(expected), - found: b_type, - } - .into(), - ), - - Ok(j) => i.wrapping_add(&j).into(), - } - } -} - -impl Add for Transform { - type Output = Transform; - - fn add(self, other: Transform) -> Transform { - match (self, other) { - (a, Transform::Identity) => a, - (Transform::Identity, b) => b, - (a @ Transform::Failure(_), _) => a, - (_, b @ Transform::Failure(_)) => b, - (_, b @ Transform::Write(_)) => b, - (Transform::Write(v), b) => { - // second transform changes value being written - match b.apply(v) { - Err(error) => Transform::Failure(error), - Ok(new_value) => Transform::Write(new_value), - } - } - (Transform::AddInt32(i), b) => match b { - Transform::AddInt32(j) => Transform::AddInt32(i.wrapping_add(j)), - Transform::AddUInt64(j) => Transform::AddUInt64(j.wrapping_add(i as u64)), - Transform::AddUInt128(j) => Transform::AddUInt128(j.wrapping_add(&(i.as_()))), - Transform::AddUInt256(j) => Transform::AddUInt256(j.wrapping_add(&(i.as_()))), - Transform::AddUInt512(j) => Transform::AddUInt512(j.wrapping_add(&i.as_())), - other => Transform::Failure( - TypeMismatch::new("AddInt32".to_owned(), format!("{:?}", other)).into(), - ), - }, - (Transform::AddUInt64(i), b) => match b { - Transform::AddInt32(j) => Transform::AddInt32(j.wrapping_add(i as i32)), - Transform::AddUInt64(j) => Transform::AddUInt64(i.wrapping_add(j)), - Transform::AddUInt128(j) => Transform::AddUInt128(j.wrapping_add(&i.into())), - Transform::AddUInt256(j) => Transform::AddUInt256(j.wrapping_add(&i.into())), - Transform::AddUInt512(j) => Transform::AddUInt512(j.wrapping_add(&i.into())), - other => Transform::Failure( - TypeMismatch::new("AddUInt64".to_owned(), format!("{:?}", other)).into(), - ), - }, - (Transform::AddUInt128(i), b) => wrapped_transform_addition(i, b, "U128"), - (Transform::AddUInt256(i), b) => wrapped_transform_addition(i, b, "U256"), - (Transform::AddUInt512(i), b) => wrapped_transform_addition(i, b, "U512"), - (Transform::AddKeys(mut ks1), b) => match b { - Transform::AddKeys(mut ks2) => { - ks1.append(&mut ks2); - Transform::AddKeys(ks1) - } - other => Transform::Failure( - TypeMismatch::new("AddKeys".to_owned(), format!("{:?}", other)).into(), - ), - }, - } - } -} - -impl AddAssign for Transform { - fn add_assign(&mut self, other: Self) { - *self = self.clone() + other; - } -} - -impl Display for Transform { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl Default for Transform { - fn default() -> Self { - Transform::Identity - } -} - -impl From<&Transform> for casper_types::Transform { - fn from(transform: &Transform) -> Self { - match transform { - Transform::Identity => casper_types::Transform::Identity, - Transform::Write(StoredValue::CLValue(cl_value)) => { - casper_types::Transform::WriteCLValue(cl_value.clone()) - } - Transform::Write(StoredValue::Account(account)) => { - casper_types::Transform::WriteAccount(account.account_hash()) - } - Transform::Write(StoredValue::ContractWasm(_)) => { - casper_types::Transform::WriteContractWasm - } - Transform::Write(StoredValue::Contract(_)) => casper_types::Transform::WriteContract, - Transform::Write(StoredValue::ContractPackage(_)) => { - casper_types::Transform::WriteContractPackage - } - Transform::Write(StoredValue::Transfer(transfer)) => { - casper_types::Transform::WriteTransfer(*transfer) - } - Transform::Write(StoredValue::DeployInfo(deploy_info)) => { - casper_types::Transform::WriteDeployInfo(deploy_info.clone()) - } - Transform::Write(StoredValue::EraInfo(era_info)) => { - casper_types::Transform::WriteEraInfo(era_info.clone()) - } - Transform::Write(StoredValue::Bid(bid)) => { - casper_types::Transform::WriteBid(bid.clone()) - } - Transform::Write(StoredValue::Withdraw(unbonding_purses)) => { - casper_types::Transform::WriteWithdraw(unbonding_purses.clone()) - } - Transform::Write(StoredValue::EraValidators(recipients)) => { - casper_types::Transform::WriteEraValidators(recipients.clone()) - } - Transform::AddInt32(value) => casper_types::Transform::AddInt32(*value), - Transform::AddUInt64(value) => casper_types::Transform::AddUInt64(*value), - Transform::AddUInt128(value) => casper_types::Transform::AddUInt128(*value), - Transform::AddUInt256(value) => casper_types::Transform::AddUInt256(*value), - Transform::AddUInt512(value) => casper_types::Transform::AddUInt512(*value), - Transform::AddKeys(named_keys) => casper_types::Transform::AddKeys( - named_keys - .iter() - .map(|(name, key)| casper_types::NamedKey { - name: name.clone(), - key: key.to_formatted_string(), - }) - .collect(), - ), - Transform::Failure(error) => casper_types::Transform::Failure(error.to_string()), - } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{collection::vec, prelude::*}; - - use super::Transform; - use crate::shared::stored_value::gens::stored_value_arb; - - pub fn transform_arb() -> impl Strategy { - prop_oneof![ - Just(Transform::Identity), - stored_value_arb().prop_map(Transform::Write), - any::().prop_map(Transform::AddInt32), - any::().prop_map(Transform::AddUInt64), - any::().prop_map(|u| Transform::AddUInt128(u.into())), - vec(any::(), 32).prop_map(|u| { - let mut buf: [u8; 32] = [0u8; 32]; - buf.copy_from_slice(&u); - Transform::AddUInt256(buf.into()) - }), - vec(any::(), 64).prop_map(|u| { - let mut buf: [u8; 64] = [0u8; 64]; - buf.copy_from_slice(&u); - Transform::AddUInt512(buf.into()) - }), - ] - } -} - -#[cfg(test)] -mod tests { - use num::{Bounded, Num}; - - use casper_types::{ - account::AccountHash, bytesrepr::Bytes, AccessRights, ContractWasm, Key, URef, U128, U256, - U512, - }; - - use super::*; - use crate::shared::account::{Account, ActionThresholds, AssociatedKeys}; - use std::collections::BTreeMap; - - const ZERO_ARRAY: [u8; 32] = [0; 32]; - const ZERO_PUBLIC_KEY: AccountHash = AccountHash::new(ZERO_ARRAY); - const TEST_STR: &str = "a"; - const TEST_BOOL: bool = true; - - const ZERO_I32: i32 = 0; - const ONE_I32: i32 = 1; - const NEG_ONE_I32: i32 = -1; - const NEG_TWO_I32: i32 = -2; - const MIN_I32: i32 = i32::min_value(); - const MAX_I32: i32 = i32::max_value(); - - const ZERO_I64: i64 = 0; - const ONE_I64: i64 = 1; - const NEG_ONE_I64: i64 = -1; - const NEG_TWO_I64: i64 = -2; - const MIN_I64: i64 = i64::min_value(); - const MAX_I64: i64 = i64::max_value(); - - const ZERO_U8: u8 = 0; - const ONE_U8: u8 = 1; - const MAX_U8: u8 = u8::max_value(); - - const ZERO_U32: u32 = 0; - const ONE_U32: u32 = 1; - const MAX_U32: u32 = u32::max_value(); - - const ZERO_U64: u64 = 0; - const ONE_U64: u64 = 1; - const MAX_U64: u64 = u64::max_value(); - - const ZERO_U128: U128 = U128([0; 2]); - const ONE_U128: U128 = U128([1, 0]); - const MAX_U128: U128 = U128([MAX_U64; 2]); - - const ZERO_U256: U256 = U256([0; 4]); - const ONE_U256: U256 = U256([1, 0, 0, 0]); - const MAX_U256: U256 = U256([MAX_U64; 4]); - - const ZERO_U512: U512 = U512([0; 8]); - const ONE_U512: U512 = U512([1, 0, 0, 0, 0, 0, 0, 0]); - const MAX_U512: U512 = U512([MAX_U64; 8]); - - #[test] - fn i32_overflow() { - let max = std::i32::MAX; - let min = std::i32::MIN; - - let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap()); - let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap()); - - let apply_overflow = Transform::AddInt32(1).apply(max_value.clone()); - let apply_underflow = Transform::AddInt32(-1).apply(min_value.clone()); - - let transform_overflow = Transform::AddInt32(max) + Transform::AddInt32(1); - let transform_underflow = Transform::AddInt32(min) + Transform::AddInt32(-1); - - assert_eq!(apply_overflow.expect("Unexpected overflow"), min_value); - assert_eq!(apply_underflow.expect("Unexpected underflow"), max_value); - - assert_eq!(transform_overflow, min.into()); - assert_eq!(transform_underflow, max.into()); - } - - fn uint_overflow_test() - where - T: Num + Bounded + CLTyped + ToBytes + Into + Copy, - { - let max = T::max_value(); - let min = T::min_value(); - let one = T::one(); - let zero = T::zero(); - - let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap()); - let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap()); - let zero_value = StoredValue::CLValue(CLValue::from_t(zero).unwrap()); - - let max_transform: Transform = max.into(); - let min_transform: Transform = min.into(); - - let one_transform: Transform = one.into(); - - let apply_overflow = Transform::AddInt32(1).apply(max_value.clone()); - - let apply_overflow_uint = one_transform.clone().apply(max_value.clone()); - let apply_underflow = Transform::AddInt32(-1).apply(min_value); - - let transform_overflow = max_transform.clone() + Transform::AddInt32(1); - let transform_overflow_uint = max_transform + one_transform; - let transform_underflow = min_transform + Transform::AddInt32(-1); - - assert_eq!(apply_overflow, Ok(zero_value.clone())); - assert_eq!(apply_overflow_uint, Ok(zero_value)); - assert_eq!(apply_underflow, Ok(max_value)); - - assert_eq!(transform_overflow, zero.into()); - assert_eq!(transform_overflow_uint, zero.into()); - assert_eq!(transform_underflow, max.into()); - } - - #[test] - fn u128_overflow() { - uint_overflow_test::(); - } - - #[test] - fn u256_overflow() { - uint_overflow_test::(); - } - - #[test] - fn u512_overflow() { - uint_overflow_test::(); - } - - #[test] - fn addition_between_mismatched_types_should_fail() { - fn assert_yields_type_mismatch_error(stored_value: StoredValue) { - match wrapping_addition(stored_value, ZERO_I32) { - Err(Error::TypeMismatch(_)) => (), - _ => panic!("wrapping addition should yield TypeMismatch error"), - }; - } - - let contract = StoredValue::ContractWasm(ContractWasm::new(vec![])); - assert_yields_type_mismatch_error(contract); - - let uref = URef::new(ZERO_ARRAY, AccessRights::READ); - let account = StoredValue::Account(Account::new( - ZERO_PUBLIC_KEY, - NamedKeys::new(), - uref, - AssociatedKeys::default(), - ActionThresholds::default(), - )); - assert_yields_type_mismatch_error(account); - - let cl_bool = - StoredValue::CLValue(CLValue::from_t(TEST_BOOL).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_bool); - - let cl_unit = StoredValue::CLValue(CLValue::from_t(()).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_unit); - - let cl_string = - StoredValue::CLValue(CLValue::from_t(TEST_STR).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_string); - - let cl_key = StoredValue::CLValue( - CLValue::from_t(Key::Hash(ZERO_ARRAY)).expect("should create CLValue"), - ); - assert_yields_type_mismatch_error(cl_key); - - let cl_uref = StoredValue::CLValue(CLValue::from_t(uref).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_uref); - - let cl_option = - StoredValue::CLValue(CLValue::from_t(Some(ZERO_U8)).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_option); - - let cl_list = StoredValue::CLValue( - CLValue::from_t(Bytes::from(vec![ZERO_U8])).expect("should create CLValue"), - ); - assert_yields_type_mismatch_error(cl_list); - - let cl_fixed_list = - StoredValue::CLValue(CLValue::from_t([ZERO_U8]).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_fixed_list); - - let cl_result: Result<(), u8> = Err(ZERO_U8); - let cl_result = - StoredValue::CLValue(CLValue::from_t(cl_result).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_result); - - let cl_map = StoredValue::CLValue( - CLValue::from_t(BTreeMap::::new()).expect("should create CLValue"), - ); - assert_yields_type_mismatch_error(cl_map); - - let cl_tuple1 = - StoredValue::CLValue(CLValue::from_t((ZERO_U8,)).expect("should create CLValue")); - assert_yields_type_mismatch_error(cl_tuple1); - - let cl_tuple2 = StoredValue::CLValue( - CLValue::from_t((ZERO_U8, ZERO_U8)).expect("should create CLValue"), - ); - assert_yields_type_mismatch_error(cl_tuple2); - - let cl_tuple3 = StoredValue::CLValue( - CLValue::from_t((ZERO_U8, ZERO_U8, ZERO_U8)).expect("should create CLValue"), - ); - assert_yields_type_mismatch_error(cl_tuple3); - } - - #[test] - #[allow(clippy::cognitive_complexity)] - fn wrapping_addition_should_succeed() { - fn add(current_value: X, to_add: Y) -> X - where - X: CLTyped + ToBytes + FromBytes + PartialEq + fmt::Debug, - Y: AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive - + AsPrimitive, - { - let current = StoredValue::CLValue( - CLValue::from_t(current_value).expect("should create CLValue"), - ); - let result = - wrapping_addition(current, to_add).expect("wrapping addition should succeed"); - CLValue::try_from(result) - .expect("should be CLValue") - .into_t() - .expect("should parse to X") - } - - // Adding to i32 - assert_eq!(ONE_I32, add(ZERO_I32, ONE_I32)); - assert_eq!(MIN_I32, add(MAX_I32, ONE_I32)); - assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32)); - assert_eq!(ZERO_I32, add(ONE_I32, NEG_ONE_I32)); - assert_eq!(NEG_ONE_I32, add(ZERO_I32, NEG_ONE_I32)); - assert_eq!(MAX_I32, add(NEG_ONE_I32, MIN_I32)); - - assert_eq!(ONE_I32, add(ZERO_I32, ONE_U64)); - assert_eq!(MIN_I32, add(MAX_I32, ONE_U64)); - assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32 as u64)); - - assert_eq!(ONE_I32, add(ZERO_I32, ONE_U128)); - assert_eq!(MIN_I32, add(MAX_I32, ONE_U128)); - assert_eq!(NEG_TWO_I32, add(MAX_I32, U128::from(MAX_I32))); - - assert_eq!(ONE_I32, add(ZERO_I32, ONE_U256)); - assert_eq!(MIN_I32, add(MAX_I32, ONE_U256)); - assert_eq!(NEG_TWO_I32, add(MAX_I32, U256::from(MAX_I32))); - - assert_eq!(ONE_I32, add(ZERO_I32, ONE_U512)); - assert_eq!(MIN_I32, add(MAX_I32, ONE_U512)); - assert_eq!(NEG_TWO_I32, add(MAX_I32, U512::from(MAX_I32))); - - // Adding to i64 - assert_eq!(ONE_I64, add(ZERO_I64, ONE_I32)); - assert_eq!(MIN_I64, add(MAX_I64, ONE_I32)); - assert_eq!(ZERO_I64, add(ONE_I64, NEG_ONE_I32)); - assert_eq!(NEG_ONE_I64, add(ZERO_I64, NEG_ONE_I32)); - assert_eq!(MAX_I64, add(MIN_I64, NEG_ONE_I32)); - - assert_eq!(ONE_I64, add(ZERO_I64, ONE_U64)); - assert_eq!(MIN_I64, add(MAX_I64, ONE_U64)); - assert_eq!(NEG_TWO_I64, add(MAX_I64, MAX_I64 as u64)); - - assert_eq!(ONE_I64, add(ZERO_I64, ONE_U128)); - assert_eq!(MIN_I64, add(MAX_I64, ONE_U128)); - assert_eq!(NEG_TWO_I64, add(MAX_I64, U128::from(MAX_I64))); - - assert_eq!(ONE_I64, add(ZERO_I64, ONE_U256)); - assert_eq!(MIN_I64, add(MAX_I64, ONE_U256)); - assert_eq!(NEG_TWO_I64, add(MAX_I64, U256::from(MAX_I64))); - - assert_eq!(ONE_I64, add(ZERO_I64, ONE_U512)); - assert_eq!(MIN_I64, add(MAX_I64, ONE_U512)); - assert_eq!(NEG_TWO_I64, add(MAX_I64, U512::from(MAX_I64))); - - // Adding to u8 - assert_eq!(ONE_U8, add(ZERO_U8, ONE_I32)); - assert_eq!(ZERO_U8, add(MAX_U8, ONE_I32)); - assert_eq!(MAX_U8, add(MAX_U8, 256_i32)); - assert_eq!(ZERO_U8, add(MAX_U8, 257_i32)); - assert_eq!(ZERO_U8, add(ONE_U8, NEG_ONE_I32)); - assert_eq!(MAX_U8, add(ZERO_U8, NEG_ONE_I32)); - assert_eq!(ZERO_U8, add(ZERO_U8, -256_i32)); - assert_eq!(MAX_U8, add(ZERO_U8, -257_i32)); - assert_eq!(MAX_U8, add(ZERO_U8, MAX_I32)); - assert_eq!(ZERO_U8, add(ZERO_U8, MIN_I32)); - - assert_eq!(ONE_U8, add(ZERO_U8, ONE_U64)); - assert_eq!(ZERO_U8, add(MAX_U8, ONE_U64)); - assert_eq!(ONE_U8, add(ZERO_U8, u64::from(MAX_U8) + 2)); - assert_eq!(MAX_U8, add(ZERO_U8, MAX_U64)); - - assert_eq!(ONE_U8, add(ZERO_U8, ONE_U128)); - assert_eq!(ZERO_U8, add(MAX_U8, ONE_U128)); - assert_eq!(ONE_U8, add(ZERO_U8, U128::from(MAX_U8) + 2)); - assert_eq!(MAX_U8, add(ZERO_U8, MAX_U128)); - - assert_eq!(ONE_U8, add(ZERO_U8, ONE_U256)); - assert_eq!(ZERO_U8, add(MAX_U8, ONE_U256)); - assert_eq!(ONE_U8, add(ZERO_U8, U256::from(MAX_U8) + 2)); - assert_eq!(MAX_U8, add(ZERO_U8, MAX_U256)); - - assert_eq!(ONE_U8, add(ZERO_U8, ONE_U512)); - assert_eq!(ZERO_U8, add(MAX_U8, ONE_U512)); - assert_eq!(ONE_U8, add(ZERO_U8, U512::from(MAX_U8) + 2)); - assert_eq!(MAX_U8, add(ZERO_U8, MAX_U512)); - - // Adding to u32 - assert_eq!(ONE_U32, add(ZERO_U32, ONE_I32)); - assert_eq!(ZERO_U32, add(MAX_U32, ONE_I32)); - assert_eq!(ZERO_U32, add(ONE_U32, NEG_ONE_I32)); - assert_eq!(MAX_U32, add(ZERO_U32, NEG_ONE_I32)); - assert_eq!(MAX_I32 as u32 + 1, add(ZERO_U32, MIN_I32)); - - assert_eq!(ONE_U32, add(ZERO_U32, ONE_U64)); - assert_eq!(ZERO_U32, add(MAX_U32, ONE_U64)); - assert_eq!(ONE_U32, add(ZERO_U32, u64::from(MAX_U32) + 2)); - assert_eq!(MAX_U32, add(ZERO_U32, MAX_U64)); - - assert_eq!(ONE_U32, add(ZERO_U32, ONE_U128)); - assert_eq!(ZERO_U32, add(MAX_U32, ONE_U128)); - assert_eq!(ONE_U32, add(ZERO_U32, U128::from(MAX_U32) + 2)); - assert_eq!(MAX_U32, add(ZERO_U32, MAX_U128)); - - assert_eq!(ONE_U32, add(ZERO_U32, ONE_U256)); - assert_eq!(ZERO_U32, add(MAX_U32, ONE_U256)); - assert_eq!(ONE_U32, add(ZERO_U32, U256::from(MAX_U32) + 2)); - assert_eq!(MAX_U32, add(ZERO_U32, MAX_U256)); - - assert_eq!(ONE_U32, add(ZERO_U32, ONE_U512)); - assert_eq!(ZERO_U32, add(MAX_U32, ONE_U512)); - assert_eq!(ONE_U32, add(ZERO_U32, U512::from(MAX_U32) + 2)); - assert_eq!(MAX_U32, add(ZERO_U32, MAX_U512)); - - // Adding to u64 - assert_eq!(ONE_U64, add(ZERO_U64, ONE_I32)); - assert_eq!(ZERO_U64, add(MAX_U64, ONE_I32)); - assert_eq!(ZERO_U64, add(ONE_U64, NEG_ONE_I32)); - assert_eq!(MAX_U64, add(ZERO_U64, NEG_ONE_I32)); - - assert_eq!(ONE_U64, add(ZERO_U64, ONE_U64)); - assert_eq!(ZERO_U64, add(MAX_U64, ONE_U64)); - assert_eq!(MAX_U64 - 1, add(MAX_U64, MAX_U64)); - - assert_eq!(ONE_U64, add(ZERO_U64, ONE_U128)); - assert_eq!(ZERO_U64, add(MAX_U64, ONE_U128)); - assert_eq!(ONE_U64, add(ZERO_U64, U128::from(MAX_U64) + 2)); - assert_eq!(MAX_U64, add(ZERO_U64, MAX_U128)); - - assert_eq!(ONE_U64, add(ZERO_U64, ONE_U256)); - assert_eq!(ZERO_U64, add(MAX_U64, ONE_U256)); - assert_eq!(ONE_U64, add(ZERO_U64, U256::from(MAX_U64) + 2)); - assert_eq!(MAX_U64, add(ZERO_U64, MAX_U256)); - - assert_eq!(ONE_U64, add(ZERO_U64, ONE_U512)); - assert_eq!(ZERO_U64, add(MAX_U64, ONE_U512)); - assert_eq!(ONE_U64, add(ZERO_U64, U512::from(MAX_U64) + 2)); - assert_eq!(MAX_U64, add(ZERO_U64, MAX_U512)); - - // Adding to U128 - assert_eq!(ONE_U128, add(ZERO_U128, ONE_I32)); - assert_eq!(ZERO_U128, add(MAX_U128, ONE_I32)); - assert_eq!(ZERO_U128, add(ONE_U128, NEG_ONE_I32)); - assert_eq!(MAX_U128, add(ZERO_U128, NEG_ONE_I32)); - - assert_eq!(ONE_U128, add(ZERO_U128, ONE_U64)); - assert_eq!(ZERO_U128, add(MAX_U128, ONE_U64)); - - assert_eq!(ONE_U128, add(ZERO_U128, ONE_U128)); - assert_eq!(ZERO_U128, add(MAX_U128, ONE_U128)); - assert_eq!(MAX_U128 - 1, add(MAX_U128, MAX_U128)); - - assert_eq!(ONE_U128, add(ZERO_U128, ONE_U256)); - assert_eq!(ZERO_U128, add(MAX_U128, ONE_U256)); - assert_eq!( - ONE_U128, - add( - ZERO_U128, - U256::from_dec_str(&MAX_U128.to_string()).unwrap() + 2, - ) - ); - assert_eq!(MAX_U128, add(ZERO_U128, MAX_U256)); - - assert_eq!(ONE_U128, add(ZERO_U128, ONE_U512)); - assert_eq!(ZERO_U128, add(MAX_U128, ONE_U512)); - assert_eq!( - ONE_U128, - add( - ZERO_U128, - U512::from_dec_str(&MAX_U128.to_string()).unwrap() + 2, - ) - ); - assert_eq!(MAX_U128, add(ZERO_U128, MAX_U512)); - - // Adding to U256 - assert_eq!(ONE_U256, add(ZERO_U256, ONE_I32)); - assert_eq!(ZERO_U256, add(MAX_U256, ONE_I32)); - assert_eq!(ZERO_U256, add(ONE_U256, NEG_ONE_I32)); - assert_eq!(MAX_U256, add(ZERO_U256, NEG_ONE_I32)); - - assert_eq!(ONE_U256, add(ZERO_U256, ONE_U64)); - assert_eq!(ZERO_U256, add(MAX_U256, ONE_U64)); - - assert_eq!(ONE_U256, add(ZERO_U256, ONE_U128)); - assert_eq!(ZERO_U256, add(MAX_U256, ONE_U128)); - - assert_eq!(ONE_U256, add(ZERO_U256, ONE_U256)); - assert_eq!(ZERO_U256, add(MAX_U256, ONE_U256)); - assert_eq!(MAX_U256 - 1, add(MAX_U256, MAX_U256)); - - assert_eq!(ONE_U256, add(ZERO_U256, ONE_U512)); - assert_eq!(ZERO_U256, add(MAX_U256, ONE_U512)); - assert_eq!( - ONE_U256, - add( - ZERO_U256, - U512::from_dec_str(&MAX_U256.to_string()).unwrap() + 2, - ) - ); - assert_eq!(MAX_U256, add(ZERO_U256, MAX_U512)); - - // Adding to U512 - assert_eq!(ONE_U512, add(ZERO_U512, ONE_I32)); - assert_eq!(ZERO_U512, add(MAX_U512, ONE_I32)); - assert_eq!(ZERO_U512, add(ONE_U512, NEG_ONE_I32)); - assert_eq!(MAX_U512, add(ZERO_U512, NEG_ONE_I32)); - - assert_eq!(ONE_U512, add(ZERO_U512, ONE_U64)); - assert_eq!(ZERO_U512, add(MAX_U512, ONE_U64)); - - assert_eq!(ONE_U512, add(ZERO_U512, ONE_U128)); - assert_eq!(ZERO_U512, add(MAX_U512, ONE_U128)); - - assert_eq!(ONE_U512, add(ZERO_U512, ONE_U256)); - assert_eq!(ZERO_U512, add(MAX_U512, ONE_U256)); - - assert_eq!(ONE_U512, add(ZERO_U512, ONE_U512)); - assert_eq!(ZERO_U512, add(MAX_U512, ONE_U512)); - assert_eq!(MAX_U512 - 1, add(MAX_U512, MAX_U512)); - } -} diff --git a/execution_engine/src/shared/type_mismatch.rs b/execution_engine/src/shared/type_mismatch.rs deleted file mode 100644 index f0f319cfe9..0000000000 --- a/execution_engine/src/shared/type_mismatch.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::fmt; - -use serde::{Deserialize, Serialize}; - -#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] -pub struct TypeMismatch { - pub expected: String, - pub found: String, -} - -impl fmt::Display for TypeMismatch { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - write!( - f, - "Type mismatch. Expected {} but found {}.", - self.expected, self.found - ) - } -} - -impl TypeMismatch { - pub fn new(expected: String, found: String) -> TypeMismatch { - TypeMismatch { expected, found } - } -} diff --git a/execution_engine/src/shared/utils.rs b/execution_engine/src/shared/utils.rs deleted file mode 100644 index 08701971d3..0000000000 --- a/execution_engine/src/shared/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use libc::{c_long, sysconf, _SC_PAGESIZE}; -use once_cell::sync::Lazy; -use serde::Serialize; -use tracing::warn; - -/// Sensible default for many if not all systems. -const DEFAULT_PAGE_SIZE: usize = 4096; - -/// OS page size. -pub static OS_PAGE_SIZE: Lazy = Lazy::new(|| { - // https://www.gnu.org/software/libc/manual/html_node/Sysconf.html - let value: c_long = unsafe { sysconf(_SC_PAGESIZE) }; - if value <= 0 { - DEFAULT_PAGE_SIZE - } else { - value as usize - } -}); - -/// Warns if `value` is not a multiple of the OS page size. -pub fn check_multiple_of_page_size(value: usize) { - if value % *OS_PAGE_SIZE != 0 { - warn!( - "maximum size {} is not multiple of system page size {}", - value, *OS_PAGE_SIZE, - ); - } -} - -/// serializes value to json; -/// pretty_print: false = inline -/// pretty_print: true = pretty printed / multiline -pub fn jsonify(value: T, pretty_print: bool) -> String -where - T: Serialize, -{ - let fj = if pretty_print { - serde_json::to_string_pretty - } else { - serde_json::to_string - }; - - match fj(&value) { - Ok(json) => json, - Err(_) => r#"{"error": "encountered error serializing value"}"#.to_owned(), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use serde::{Deserialize, Serialize}; - - #[derive(Clone, Debug, Deserialize, Eq, Serialize)] - struct SerMock { - foo: String, - bar: u32, - } - - impl PartialEq for SerMock { - fn eq(&self, other: &SerMock) -> bool { - self.foo.eq(&other.foo) && self.bar == other.bar - } - } - - #[test] - fn should_ser_to_json() { - let sermock = SerMock { - foo: "foo".to_string(), - bar: 1, - }; - - let json = jsonify(sermock, false); - - assert_eq!(json, r#"{"foo":"foo","bar":1}"#, "json expected to match"); - } - - #[test] - fn should_ser_to_pretty_json() { - let sermock = SerMock { - foo: "foo".to_string(), - bar: 1, - }; - - let json = jsonify(sermock, true); - - assert!(json.contains('\n'), "json expected to be multiline"); - } - - #[test] - fn should_deser_from_json() { - let sermock = SerMock { - foo: "foo".to_string(), - bar: 1, - }; - - let json = jsonify(&sermock, false); - - let sermock_clone: SerMock = serde_json::from_str(&json).expect("should deser"); - - assert!( - sermock.eq(&sermock_clone), - "instances should contain the same data" - ); - } -} diff --git a/execution_engine/src/shared/wasm.rs b/execution_engine/src/shared/wasm.rs deleted file mode 100644 index cc04450392..0000000000 --- a/execution_engine/src/shared/wasm.rs +++ /dev/null @@ -1,31 +0,0 @@ -use parity_wasm::{builder, elements::Module}; - -use casper_types::contracts::DEFAULT_ENTRY_POINT_NAME; - -use crate::shared::wasm_prep::{PreprocessingError, Preprocessor}; - -/// Creates minimal session code that does nothing -pub fn do_nothing_bytes() -> Vec { - let module = builder::module() - .function() - // A signature with 0 params and no return type - .signature() - .build() - .body() - .build() - .build() - // Export above function - .export() - .field(DEFAULT_ENTRY_POINT_NAME) - .build() - // Memory section is mandatory - .memory() - .build() - .build(); - parity_wasm::serialize(module).expect("should serialize") -} - -pub fn do_nothing_module(preprocessor: &Preprocessor) -> Result { - let do_nothing_bytes = do_nothing_bytes(); - preprocessor.preprocess(&do_nothing_bytes) -} diff --git a/execution_engine/src/shared/wasm_config.rs b/execution_engine/src/shared/wasm_config.rs deleted file mode 100644 index 72bd52db1b..0000000000 --- a/execution_engine/src/shared/wasm_config.rs +++ /dev/null @@ -1,152 +0,0 @@ -use datasize::DataSize; -use rand::{distributions::Standard, prelude::*, Rng}; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use super::{ - host_function_costs::HostFunctionCosts, opcode_costs::OpcodeCosts, storage_costs::StorageCosts, -}; - -pub const DEFAULT_WASM_MAX_MEMORY: u32 = 64; -pub const DEFAULT_MAX_STACK_HEIGHT: u32 = 64 * 1024; - -#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] -pub struct WasmConfig { - /// Maximum amount of a heap memory (represented in 64kb pages) each contract can use. - pub max_memory: u32, - /// Max stack height (native WebAssembly stack limiter) - pub max_stack_height: u32, - /// Wasm opcode costs table - opcode_costs: OpcodeCosts, - /// Storage costs - storage_costs: StorageCosts, - /// Host function costs table - host_function_costs: HostFunctionCosts, -} - -impl WasmConfig { - pub const fn new( - max_memory: u32, - max_stack_height: u32, - opcode_costs: OpcodeCosts, - storage_costs: StorageCosts, - host_function_costs: HostFunctionCosts, - ) -> Self { - Self { - max_memory, - max_stack_height, - opcode_costs, - storage_costs, - host_function_costs, - } - } - - pub fn opcode_costs(&self) -> OpcodeCosts { - self.opcode_costs - } - - pub fn storage_costs(&self) -> StorageCosts { - self.storage_costs - } - - pub fn take_host_function_costs(self) -> HostFunctionCosts { - self.host_function_costs - } -} - -impl Default for WasmConfig { - fn default() -> Self { - Self { - max_memory: DEFAULT_WASM_MAX_MEMORY, - max_stack_height: DEFAULT_MAX_STACK_HEIGHT, - opcode_costs: OpcodeCosts::default(), - storage_costs: StorageCosts::default(), - host_function_costs: HostFunctionCosts::default(), - } - } -} - -impl ToBytes for WasmConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.max_memory.to_bytes()?); - ret.append(&mut self.max_stack_height.to_bytes()?); - ret.append(&mut self.opcode_costs.to_bytes()?); - ret.append(&mut self.storage_costs.to_bytes()?); - ret.append(&mut self.host_function_costs.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.max_memory.serialized_length() - + self.max_stack_height.serialized_length() - + self.opcode_costs.serialized_length() - + self.storage_costs.serialized_length() - + self.host_function_costs.serialized_length() - } -} - -impl FromBytes for WasmConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (max_memory, rem) = FromBytes::from_bytes(bytes)?; - let (max_stack_height, rem) = FromBytes::from_bytes(rem)?; - let (opcode_costs, rem) = FromBytes::from_bytes(rem)?; - let (storage_costs, rem) = FromBytes::from_bytes(rem)?; - let (host_function_costs, rem) = FromBytes::from_bytes(rem)?; - - Ok(( - WasmConfig { - max_memory, - max_stack_height, - opcode_costs, - storage_costs, - host_function_costs, - }, - rem, - )) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> WasmConfig { - WasmConfig { - max_memory: rng.gen(), - max_stack_height: rng.gen(), - opcode_costs: rng.gen(), - storage_costs: rng.gen(), - host_function_costs: rng.gen(), - } - } -} - -#[cfg(any(feature = "gens", test))] -pub mod gens { - use proptest::{num, prop_compose}; - - use super::WasmConfig; - use crate::shared::{ - host_function_costs::gens::host_function_costs_arb, opcode_costs::gens::opcode_costs_arb, - storage_costs::gens::storage_costs_arb, - }; - - prop_compose! { - pub fn wasm_config_arb() ( - max_memory in num::u32::ANY, - max_stack_height in num::u32::ANY, - opcode_costs in opcode_costs_arb(), - storage_costs in storage_costs_arb(), - host_function_costs in host_function_costs_arb(), - ) -> WasmConfig { - WasmConfig { - max_memory, - max_stack_height, - opcode_costs, - storage_costs, - host_function_costs, - } - } - } -} diff --git a/execution_engine/src/shared/wasm_prep.rs b/execution_engine/src/shared/wasm_prep.rs deleted file mode 100644 index b9a4957430..0000000000 --- a/execution_engine/src/shared/wasm_prep.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::fmt::{self, Display, Formatter}; - -use parity_wasm::elements::{self, MemorySection, Module, Section}; -use pwasm_utils::{self, stack_height}; -use thiserror::Error; - -use super::wasm_config::WasmConfig; - -const DEFAULT_GAS_MODULE_NAME: &str = "env"; - -#[derive(Debug, Clone, Error)] -pub enum PreprocessingError { - Deserialize(String), - OperationForbiddenByGasRules, - StackLimiter, - MissingMemorySection, -} - -impl From for PreprocessingError { - fn from(error: elements::Error) -> Self { - PreprocessingError::Deserialize(error.to_string()) - } -} - -impl Display for PreprocessingError { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - match self { - PreprocessingError::Deserialize(error) => write!(f, "Deserialization error: {}", error), - PreprocessingError::OperationForbiddenByGasRules => write!(f, "Encountered operation forbidden by gas rules. Consult instruction -> metering config map"), - PreprocessingError::StackLimiter => write!(f, "Stack limiter error"), - PreprocessingError::MissingMemorySection => write!(f, "Memory section should exist"), - } - } -} - -/// Checks if given wasm module contains a memory section. -fn memory_section(module: &Module) -> Option<&MemorySection> { - for section in module.sections() { - if let Section::Memory(section) = section { - return Some(section); - } - } - None -} - -pub struct Preprocessor { - wasm_config: WasmConfig, -} - -impl Preprocessor { - pub fn new(wasm_config: WasmConfig) -> Self { - Self { wasm_config } - } - - pub fn preprocess(&self, module_bytes: &[u8]) -> Result { - let module = deserialize(module_bytes)?; - - if memory_section(&module).is_none() { - // `pwasm_utils::externalize_mem` expects a memory section to exist in the module, and - // panics otherwise. - return Err(PreprocessingError::MissingMemorySection); - } - - let module = pwasm_utils::externalize_mem(module, None, self.wasm_config.max_memory); - let module = pwasm_utils::inject_gas_counter( - module, - &self.wasm_config.opcode_costs().to_set(), - DEFAULT_GAS_MODULE_NAME, - ) - .map_err(|_| PreprocessingError::OperationForbiddenByGasRules)?; - let module = stack_height::inject_limiter(module, self.wasm_config.max_stack_height) - .map_err(|_| PreprocessingError::StackLimiter)?; - Ok(module) - } -} - -// Returns a parity Module from bytes without making modifications or limits -pub fn deserialize(module_bytes: &[u8]) -> Result { - parity_wasm::deserialize_buffer::(module_bytes).map_err(Into::into) -} diff --git a/execution_engine/src/storage.rs b/execution_engine/src/storage.rs deleted file mode 100644 index 7591c83aad..0000000000 --- a/execution_engine/src/storage.rs +++ /dev/null @@ -1,19 +0,0 @@ -#![allow(missing_docs)] - -// modules -pub mod error; -pub mod global_state; -pub mod protocol_data; -pub mod protocol_data_store; -pub mod store; -pub mod transaction_source; -pub mod trie; -pub mod trie_store; - -const MAX_DBS: u32 = 2; - -#[cfg(test)] -pub(crate) const DEFAULT_TEST_MAX_DB_SIZE: usize = 52_428_800; // 50 MiB - -#[cfg(test)] -pub(crate) const DEFAULT_TEST_MAX_READERS: u32 = 512; diff --git a/execution_engine/src/storage/error/in_memory.rs b/execution_engine/src/storage/error/in_memory.rs deleted file mode 100644 index 9a87c799ff..0000000000 --- a/execution_engine/src/storage/error/in_memory.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::sync; - -use thiserror::Error; - -use casper_types::bytesrepr; - -#[derive(Debug, Error, PartialEq, Eq)] -pub enum Error { - #[error("{0}")] - BytesRepr(bytesrepr::Error), - - #[error("Another thread panicked while holding a lock")] - Poison, -} - -impl From for Error { - fn from(error: bytesrepr::Error) -> Self { - Error::BytesRepr(error) - } -} - -impl From> for Error { - fn from(_error: sync::PoisonError) -> Self { - Error::Poison - } -} diff --git a/execution_engine/src/storage/error/lmdb.rs b/execution_engine/src/storage/error/lmdb.rs deleted file mode 100644 index 2f21b443f5..0000000000 --- a/execution_engine/src/storage/error/lmdb.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::sync; - -use lmdb as lmdb_external; -use thiserror::Error; - -use casper_types::bytesrepr; - -use crate::storage::error::in_memory; - -#[derive(Debug, Clone, Error, PartialEq, Eq)] -pub enum Error { - #[error(transparent)] - Lmdb(#[from] lmdb_external::Error), - - #[error("{0}")] - BytesRepr(bytesrepr::Error), - - #[error("Another thread panicked while holding a lock")] - Poison, -} - -impl wasmi::HostError for Error {} - -impl From for Error { - fn from(error: bytesrepr::Error) -> Self { - Error::BytesRepr(error) - } -} - -impl From> for Error { - fn from(_error: sync::PoisonError) -> Self { - Error::Poison - } -} - -impl From for Error { - fn from(error: in_memory::Error) -> Self { - match error { - in_memory::Error::BytesRepr(error) => Error::BytesRepr(error), - in_memory::Error::Poison => Error::Poison, - } - } -} diff --git a/execution_engine/src/storage/error/mod.rs b/execution_engine/src/storage/error/mod.rs deleted file mode 100644 index 264cb476bb..0000000000 --- a/execution_engine/src/storage/error/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod in_memory; -pub mod lmdb; - -pub use self::lmdb::Error; diff --git a/execution_engine/src/storage/global_state/in_memory.rs b/execution_engine/src/storage/global_state/in_memory.rs deleted file mode 100644 index 3863946744..0000000000 --- a/execution_engine/src/storage/global_state/in_memory.rs +++ /dev/null @@ -1,458 +0,0 @@ -use std::{ops::Deref, sync::Arc}; - -use crate::shared::{ - additive_map::AdditiveMap, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - transform::Transform, -}; -use casper_types::{Key, ProtocolVersion}; - -use crate::storage::{ - error::{self, in_memory}, - global_state::{commit, CommitResult, StateProvider, StateReader}, - protocol_data::ProtocolData, - protocol_data_store::in_memory::InMemoryProtocolDataStore, - store::Store, - transaction_source::{ - in_memory::{InMemoryEnvironment, InMemoryReadTransaction, InMemoryReadWriteTransaction}, - Transaction, TransactionSource, - }, - trie::{merkle_proof::TrieMerkleProof, operations::create_hashed_empty_trie, Trie}, - trie_store::{ - in_memory::InMemoryTrieStore, - operations::{ - self, keys_with_prefix, missing_trie_keys, put_trie, read, read_with_proof, ReadResult, - WriteResult, - }, - }, -}; - -pub struct InMemoryGlobalState { - pub environment: Arc, - pub trie_store: Arc, - pub protocol_data_store: Arc, - pub empty_root_hash: Blake2bHash, -} - -/// Represents a "view" of global state at a particular root hash. -pub struct InMemoryGlobalStateView { - pub environment: Arc, - pub store: Arc, - pub root_hash: Blake2bHash, -} - -impl InMemoryGlobalState { - /// Creates an empty state. - pub fn empty() -> Result { - let environment = Arc::new(InMemoryEnvironment::new()); - let trie_store = Arc::new(InMemoryTrieStore::new(&environment, None)); - let protocol_data_store = Arc::new(InMemoryProtocolDataStore::new(&environment, None)); - let root_hash: Blake2bHash = { - let (root_hash, root) = create_hashed_empty_trie::()?; - let mut txn = environment.create_read_write_txn()?; - trie_store.put(&mut txn, &root_hash, &root)?; - txn.commit()?; - root_hash - }; - Ok(InMemoryGlobalState::new( - environment, - trie_store, - protocol_data_store, - root_hash, - )) - } - - /// Creates a state from an existing environment, trie_Store, and root_hash. - /// Intended to be used for testing. - pub(crate) fn new( - environment: Arc, - trie_store: Arc, - protocol_data_store: Arc, - empty_root_hash: Blake2bHash, - ) -> Self { - InMemoryGlobalState { - environment, - trie_store, - protocol_data_store, - empty_root_hash, - } - } - - /// Creates a state from a given set of `Key, StoredValue` pairs. - pub fn from_pairs( - correlation_id: CorrelationId, - pairs: &[(Key, StoredValue)], - ) -> Result<(Self, Blake2bHash), error::Error> { - let state = InMemoryGlobalState::empty()?; - let mut current_root = state.empty_root_hash; - { - let mut txn = state.environment.create_read_write_txn()?; - for (key, value) in pairs { - let key = key.normalize(); - match operations::write::<_, _, _, InMemoryTrieStore, in_memory::Error>( - correlation_id, - &mut txn, - &state.trie_store, - ¤t_root, - &key, - value, - )? { - WriteResult::Written(root_hash) => { - current_root = root_hash; - } - WriteResult::AlreadyExists => (), - WriteResult::RootNotFound => panic!("InMemoryGlobalState has invalid root"), - } - } - txn.commit()?; - } - Ok((state, current_root)) - } -} - -impl StateReader for InMemoryGlobalStateView { - type Error = error::Error; - - fn read( - &self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let ret = match read::< - Key, - StoredValue, - InMemoryReadTransaction, - InMemoryTrieStore, - Self::Error, - >( - correlation_id, - &txn, - self.store.deref(), - &self.root_hash, - key, - )? { - ReadResult::Found(value) => Some(value), - ReadResult::NotFound => None, - ReadResult::RootNotFound => panic!("InMemoryGlobalState has invalid root"), - }; - txn.commit()?; - Ok(ret) - } - - fn read_with_proof( - &self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result>, Self::Error> { - let txn = self.environment.create_read_txn()?; - let ret = match read_with_proof::< - Key, - StoredValue, - InMemoryReadTransaction, - InMemoryTrieStore, - Self::Error, - >( - correlation_id, - &txn, - self.store.deref(), - &self.root_hash, - key, - )? { - ReadResult::Found(value) => Some(value), - ReadResult::NotFound => None, - ReadResult::RootNotFound => panic!("InMemoryGlobalState has invalid root"), - }; - txn.commit()?; - Ok(ret) - } - - fn keys_with_prefix( - &self, - correlation_id: CorrelationId, - prefix: &[u8], - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let keys_iter = keys_with_prefix::( - correlation_id, - &txn, - self.store.deref(), - &self.root_hash, - prefix, - ); - let mut ret: Vec = Vec::new(); - for result in keys_iter { - match result { - Ok(key) => ret.push(key), - Err(error) => return Err(error.into()), - } - } - txn.commit()?; - Ok(ret) - } -} - -impl StateProvider for InMemoryGlobalState { - type Error = error::Error; - - type Reader = InMemoryGlobalStateView; - - fn checkout(&self, prestate_hash: Blake2bHash) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let maybe_root: Option> = - self.trie_store.get(&txn, &prestate_hash)?; - let maybe_state = maybe_root.map(|_| InMemoryGlobalStateView { - environment: Arc::clone(&self.environment), - store: Arc::clone(&self.trie_store), - root_hash: prestate_hash, - }); - txn.commit()?; - Ok(maybe_state) - } - - fn commit( - &self, - correlation_id: CorrelationId, - prestate_hash: Blake2bHash, - effects: AdditiveMap, - ) -> Result { - let commit_result = commit::( - &self.environment, - &self.trie_store, - correlation_id, - prestate_hash, - effects, - )?; - Ok(commit_result) - } - - fn put_protocol_data( - &self, - protocol_version: ProtocolVersion, - protocol_data: &ProtocolData, - ) -> Result<(), Self::Error> { - let mut txn = self.environment.create_read_write_txn()?; - self.protocol_data_store - .put(&mut txn, &protocol_version, protocol_data)?; - txn.commit().map_err(Into::into) - } - - fn get_protocol_data( - &self, - protocol_version: ProtocolVersion, - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let result = self.protocol_data_store.get(&txn, &protocol_version)?; - txn.commit()?; - Ok(result) - } - - fn empty_root(&self) -> Blake2bHash { - self.empty_root_hash - } - - fn read_trie( - &self, - _correlation_id: CorrelationId, - trie_key: &Blake2bHash, - ) -> Result>, Self::Error> { - let txn = self.environment.create_read_txn()?; - let ret: Option> = self.trie_store.get(&txn, trie_key)?; - txn.commit()?; - Ok(ret) - } - - fn put_trie( - &self, - correlation_id: CorrelationId, - trie: &Trie, - ) -> Result { - let mut txn = self.environment.create_read_write_txn()?; - let trie_hash = put_trie::< - Key, - StoredValue, - InMemoryReadWriteTransaction, - InMemoryTrieStore, - Self::Error, - >(correlation_id, &mut txn, &self.trie_store, trie)?; - txn.commit()?; - Ok(trie_hash) - } - - /// Finds all of the keys of missing descendant `Trie` values - fn missing_trie_keys( - &self, - correlation_id: CorrelationId, - trie_keys: Vec, - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let missing_descendants = - missing_trie_keys::< - Key, - StoredValue, - InMemoryReadTransaction, - InMemoryTrieStore, - Self::Error, - >(correlation_id, &txn, self.trie_store.deref(), trie_keys)?; - txn.commit()?; - Ok(missing_descendants) - } -} - -#[cfg(test)] -mod tests { - use crate::shared::newtypes::Blake2bHash; - use casper_types::{account::AccountHash, CLValue}; - - use super::*; - - #[derive(Debug, Clone)] - struct TestPair { - key: Key, - value: StoredValue, - } - - fn create_test_pairs() -> [TestPair; 2] { - [ - TestPair { - key: Key::Account(AccountHash::new([1_u8; 32])), - value: StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - }, - TestPair { - key: Key::Account(AccountHash::new([2_u8; 32])), - value: StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), - }, - ] - } - - fn create_test_pairs_updated() -> [TestPair; 3] { - [ - TestPair { - key: Key::Account(AccountHash::new([1u8; 32])), - value: StoredValue::CLValue(CLValue::from_t("one".to_string()).unwrap()), - }, - TestPair { - key: Key::Account(AccountHash::new([2u8; 32])), - value: StoredValue::CLValue(CLValue::from_t("two".to_string()).unwrap()), - }, - TestPair { - key: Key::Account(AccountHash::new([3u8; 32])), - value: StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), - }, - ] - } - - fn create_test_state() -> (InMemoryGlobalState, Blake2bHash) { - InMemoryGlobalState::from_pairs( - CorrelationId::new(), - &create_test_pairs() - .iter() - .cloned() - .map(|TestPair { key, value }| (key, value)) - .collect::>(), - ) - .unwrap() - } - - #[test] - fn reads_from_a_checkout_return_expected_values() { - let correlation_id = CorrelationId::new(); - let (state, root_hash) = create_test_state(); - let checkout = state.checkout(root_hash).unwrap().unwrap(); - for TestPair { key, value } in create_test_pairs().iter().cloned() { - assert_eq!(Some(value), checkout.read(correlation_id, &key).unwrap()); - } - } - - #[test] - fn checkout_fails_if_unknown_hash_is_given() { - let (state, _) = create_test_state(); - let fake_hash = Blake2bHash::new(&[1, 2, 3]); - let result = state.checkout(fake_hash).unwrap(); - assert!(result.is_none()); - } - - #[test] - fn commit_updates_state() { - let correlation_id = CorrelationId::new(); - - let test_pairs_updated = create_test_pairs_updated(); - - let (state, root_hash) = create_test_state(); - - let effects: AdditiveMap = test_pairs_updated - .iter() - .cloned() - .map(|TestPair { key, value }| (key, Transform::Write(value))) - .collect(); - - let updated_hash = match state.commit(correlation_id, root_hash, effects).unwrap() { - CommitResult::Success { state_root, .. } => state_root, - _ => panic!("commit failed"), - }; - - let updated_checkout = state.checkout(updated_hash).unwrap().unwrap(); - - for TestPair { key, value } in test_pairs_updated.iter().cloned() { - assert_eq!( - Some(value), - updated_checkout.read(correlation_id, &key).unwrap() - ); - } - } - - #[test] - fn commit_updates_state_and_original_state_stays_intact() { - let correlation_id = CorrelationId::new(); - let test_pairs_updated = create_test_pairs_updated(); - - let (state, root_hash) = create_test_state(); - - let effects: AdditiveMap = { - let mut tmp = AdditiveMap::new(); - for TestPair { key, value } in &test_pairs_updated { - tmp.insert(*key, Transform::Write(value.to_owned())); - } - tmp - }; - - let updated_hash = match state.commit(correlation_id, root_hash, effects).unwrap() { - CommitResult::Success { state_root, .. } => state_root, - _ => panic!("commit failed"), - }; - - let updated_checkout = state.checkout(updated_hash).unwrap().unwrap(); - for TestPair { key, value } in test_pairs_updated.iter().cloned() { - assert_eq!( - Some(value), - updated_checkout.read(correlation_id, &key).unwrap() - ); - } - - let original_checkout = state.checkout(root_hash).unwrap().unwrap(); - for TestPair { key, value } in create_test_pairs().iter().cloned() { - assert_eq!( - Some(value), - original_checkout.read(correlation_id, &key).unwrap() - ); - } - assert_eq!( - None, - original_checkout - .read(correlation_id, &test_pairs_updated[2].key) - .unwrap() - ); - } - - #[test] - fn initial_state_has_the_expected_hash() { - let correlation_id = CorrelationId::new(); - let expected_bytes = vec![ - 197, 117, 38, 12, 241, 62, 54, 241, 121, 165, 11, 8, 130, 189, 100, 252, 4, 102, 236, - 210, 91, 221, 123, 200, 135, 102, 194, 204, 46, 76, 13, 254, - ]; - let (_, root_hash) = InMemoryGlobalState::from_pairs(correlation_id, &[]).unwrap(); - assert_eq!(expected_bytes, root_hash.to_vec()) - } -} diff --git a/execution_engine/src/storage/global_state/lmdb.rs b/execution_engine/src/storage/global_state/lmdb.rs deleted file mode 100644 index 0a61f50a14..0000000000 --- a/execution_engine/src/storage/global_state/lmdb.rs +++ /dev/null @@ -1,446 +0,0 @@ -use std::{ops::Deref, sync::Arc}; - -use crate::shared::{ - additive_map::AdditiveMap, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - transform::Transform, -}; -use casper_types::{Key, ProtocolVersion}; - -use crate::storage::{ - error, - global_state::{commit, CommitResult, StateProvider, StateReader}, - protocol_data::ProtocolData, - protocol_data_store::lmdb::LmdbProtocolDataStore, - store::Store, - transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource}, - trie::{merkle_proof::TrieMerkleProof, operations::create_hashed_empty_trie, Trie}, - trie_store::{ - lmdb::LmdbTrieStore, - operations::{ - keys_with_prefix, missing_trie_keys, put_trie, read, read_with_proof, ReadResult, - }, - }, -}; - -pub struct LmdbGlobalState { - pub environment: Arc, - pub trie_store: Arc, - pub protocol_data_store: Arc, - pub empty_root_hash: Blake2bHash, -} - -/// Represents a "view" of global state at a particular root hash. -pub struct LmdbGlobalStateView { - pub environment: Arc, - pub store: Arc, - pub root_hash: Blake2bHash, -} - -impl LmdbGlobalState { - /// Creates an empty state from an existing environment and trie_store. - pub fn empty( - environment: Arc, - trie_store: Arc, - protocol_data_store: Arc, - ) -> Result { - let root_hash: Blake2bHash = { - let (root_hash, root) = create_hashed_empty_trie::()?; - let mut txn = environment.create_read_write_txn()?; - trie_store.put(&mut txn, &root_hash, &root)?; - txn.commit()?; - root_hash - }; - Ok(LmdbGlobalState::new( - environment, - trie_store, - protocol_data_store, - root_hash, - )) - } - - /// Creates a state from an existing environment, store, and root_hash. - /// Intended to be used for testing. - pub(crate) fn new( - environment: Arc, - trie_store: Arc, - protocol_data_store: Arc, - empty_root_hash: Blake2bHash, - ) -> Self { - LmdbGlobalState { - environment, - trie_store, - protocol_data_store, - empty_root_hash, - } - } -} - -impl StateReader for LmdbGlobalStateView { - type Error = error::Error; - - fn read( - &self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let ret = match read::( - correlation_id, - &txn, - self.store.deref(), - &self.root_hash, - key, - )? { - ReadResult::Found(value) => Some(value), - ReadResult::NotFound => None, - ReadResult::RootNotFound => panic!("LmdbGlobalState has invalid root"), - }; - txn.commit()?; - Ok(ret) - } - - fn read_with_proof( - &self, - correlation_id: CorrelationId, - key: &Key, - ) -> Result>, Self::Error> { - let txn = self.environment.create_read_txn()?; - let ret = match read_with_proof::< - Key, - StoredValue, - lmdb::RoTransaction, - LmdbTrieStore, - Self::Error, - >( - correlation_id, - &txn, - self.store.deref(), - &self.root_hash, - key, - )? { - ReadResult::Found(value) => Some(value), - ReadResult::NotFound => None, - ReadResult::RootNotFound => panic!("LmdbGlobalState has invalid root"), - }; - txn.commit()?; - Ok(ret) - } - - fn keys_with_prefix( - &self, - correlation_id: CorrelationId, - prefix: &[u8], - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let keys_iter = keys_with_prefix::( - correlation_id, - &txn, - self.store.deref(), - &self.root_hash, - prefix, - ); - let mut ret = Vec::new(); - for result in keys_iter { - match result { - Ok(key) => ret.push(key), - Err(error) => return Err(error), - } - } - txn.commit()?; - Ok(ret) - } -} - -impl StateProvider for LmdbGlobalState { - type Error = error::Error; - - type Reader = LmdbGlobalStateView; - - fn checkout(&self, state_hash: Blake2bHash) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let maybe_root: Option> = self.trie_store.get(&txn, &state_hash)?; - let maybe_state = maybe_root.map(|_| LmdbGlobalStateView { - environment: Arc::clone(&self.environment), - store: Arc::clone(&self.trie_store), - root_hash: state_hash, - }); - txn.commit()?; - Ok(maybe_state) - } - - fn commit( - &self, - correlation_id: CorrelationId, - prestate_hash: Blake2bHash, - effects: AdditiveMap, - ) -> Result { - let commit_result = commit::( - &self.environment, - &self.trie_store, - correlation_id, - prestate_hash, - effects, - )?; - Ok(commit_result) - } - - fn put_protocol_data( - &self, - protocol_version: ProtocolVersion, - protocol_data: &ProtocolData, - ) -> Result<(), Self::Error> { - let mut txn = self.environment.create_read_write_txn()?; - self.protocol_data_store - .put(&mut txn, &protocol_version, protocol_data)?; - txn.commit().map_err(Into::into) - } - - fn get_protocol_data( - &self, - protocol_version: ProtocolVersion, - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let result = self.protocol_data_store.get(&txn, &protocol_version)?; - txn.commit()?; - Ok(result) - } - - fn empty_root(&self) -> Blake2bHash { - self.empty_root_hash - } - - fn read_trie( - &self, - _correlation_id: CorrelationId, - trie_key: &Blake2bHash, - ) -> Result>, Self::Error> { - let txn = self.environment.create_read_txn()?; - let ret: Option> = self.trie_store.get(&txn, trie_key)?; - txn.commit()?; - Ok(ret) - } - - fn put_trie( - &self, - correlation_id: CorrelationId, - trie: &Trie, - ) -> Result { - let mut txn = self.environment.create_read_write_txn()?; - let trie_hash = put_trie::< - Key, - StoredValue, - lmdb::RwTransaction, - LmdbTrieStore, - Self::Error, - >(correlation_id, &mut txn, &self.trie_store, trie)?; - txn.commit()?; - Ok(trie_hash) - } - - /// Finds all of the keys of missing descendant `Trie` values - fn missing_trie_keys( - &self, - correlation_id: CorrelationId, - trie_keys: Vec, - ) -> Result, Self::Error> { - let txn = self.environment.create_read_txn()?; - let missing_descendants = - missing_trie_keys::( - correlation_id, - &txn, - self.trie_store.deref(), - trie_keys, - )?; - txn.commit()?; - Ok(missing_descendants) - } -} - -#[cfg(test)] -mod tests { - use lmdb::DatabaseFlags; - use tempfile::tempdir; - - use crate::shared::newtypes::Blake2bHash; - use casper_types::{account::AccountHash, CLValue}; - - use super::*; - use crate::storage::{ - trie_store::operations::{write, WriteResult}, - DEFAULT_TEST_MAX_DB_SIZE, DEFAULT_TEST_MAX_READERS, - }; - - #[derive(Debug, Clone)] - struct TestPair { - key: Key, - value: StoredValue, - } - - fn create_test_pairs() -> [TestPair; 2] { - [ - TestPair { - key: Key::Account(AccountHash::new([1_u8; 32])), - value: StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), - }, - TestPair { - key: Key::Account(AccountHash::new([2_u8; 32])), - value: StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), - }, - ] - } - - fn create_test_pairs_updated() -> [TestPair; 3] { - [ - TestPair { - key: Key::Account(AccountHash::new([1u8; 32])), - value: StoredValue::CLValue(CLValue::from_t("one".to_string()).unwrap()), - }, - TestPair { - key: Key::Account(AccountHash::new([2u8; 32])), - value: StoredValue::CLValue(CLValue::from_t("two".to_string()).unwrap()), - }, - TestPair { - key: Key::Account(AccountHash::new([3u8; 32])), - value: StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), - }, - ] - } - - fn create_test_state() -> (LmdbGlobalState, Blake2bHash) { - let correlation_id = CorrelationId::new(); - let temp_dir = tempdir().unwrap(); - let environment = Arc::new( - LmdbEnvironment::new( - &temp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(), - ); - let trie_store = - Arc::new(LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()).unwrap()); - let protocol_data_store = Arc::new( - LmdbProtocolDataStore::new(&environment, None, DatabaseFlags::empty()).unwrap(), - ); - let ret = LmdbGlobalState::empty(environment, trie_store, protocol_data_store).unwrap(); - let mut current_root = ret.empty_root_hash; - { - let mut txn = ret.environment.create_read_write_txn().unwrap(); - - for TestPair { key, value } in &create_test_pairs() { - match write::<_, _, _, LmdbTrieStore, error::Error>( - correlation_id, - &mut txn, - &ret.trie_store, - ¤t_root, - key, - value, - ) - .unwrap() - { - WriteResult::Written(root_hash) => { - current_root = root_hash; - } - WriteResult::AlreadyExists => (), - WriteResult::RootNotFound => panic!("LmdbGlobalState has invalid root"), - } - } - - txn.commit().unwrap(); - } - (ret, current_root) - } - - #[test] - fn reads_from_a_checkout_return_expected_values() { - let correlation_id = CorrelationId::new(); - let (state, root_hash) = create_test_state(); - let checkout = state.checkout(root_hash).unwrap().unwrap(); - for TestPair { key, value } in create_test_pairs().iter().cloned() { - assert_eq!(Some(value), checkout.read(correlation_id, &key).unwrap()); - } - } - - #[test] - fn checkout_fails_if_unknown_hash_is_given() { - let (state, _) = create_test_state(); - let fake_hash: Blake2bHash = Blake2bHash::new(&[1u8; 32]); - let result = state.checkout(fake_hash).unwrap(); - assert!(result.is_none()); - } - - #[test] - fn commit_updates_state() { - let correlation_id = CorrelationId::new(); - let test_pairs_updated = create_test_pairs_updated(); - - let (state, root_hash) = create_test_state(); - - let effects: AdditiveMap = { - let mut tmp = AdditiveMap::new(); - for TestPair { key, value } in &test_pairs_updated { - tmp.insert(*key, Transform::Write(value.to_owned())); - } - tmp - }; - - let updated_hash = match state.commit(correlation_id, root_hash, effects).unwrap() { - CommitResult::Success { state_root, .. } => state_root, - _ => panic!("commit failed"), - }; - - let updated_checkout = state.checkout(updated_hash).unwrap().unwrap(); - - for TestPair { key, value } in test_pairs_updated.iter().cloned() { - assert_eq!( - Some(value), - updated_checkout.read(correlation_id, &key).unwrap() - ); - } - } - - #[test] - fn commit_updates_state_and_original_state_stays_intact() { - let correlation_id = CorrelationId::new(); - let test_pairs_updated = create_test_pairs_updated(); - - let (state, root_hash) = create_test_state(); - - let effects: AdditiveMap = { - let mut tmp = AdditiveMap::new(); - for TestPair { key, value } in &test_pairs_updated { - tmp.insert(*key, Transform::Write(value.to_owned())); - } - tmp - }; - - let updated_hash = match state.commit(correlation_id, root_hash, effects).unwrap() { - CommitResult::Success { state_root, .. } => state_root, - _ => panic!("commit failed"), - }; - - let updated_checkout = state.checkout(updated_hash).unwrap().unwrap(); - for TestPair { key, value } in test_pairs_updated.iter().cloned() { - assert_eq!( - Some(value), - updated_checkout.read(correlation_id, &key).unwrap() - ); - } - - let original_checkout = state.checkout(root_hash).unwrap().unwrap(); - for TestPair { key, value } in create_test_pairs().iter().cloned() { - assert_eq!( - Some(value), - original_checkout.read(correlation_id, &key).unwrap() - ); - } - assert_eq!( - None, - original_checkout - .read(correlation_id, &test_pairs_updated[2].key) - .unwrap() - ); - } -} diff --git a/execution_engine/src/storage/global_state/mod.rs b/execution_engine/src/storage/global_state/mod.rs deleted file mode 100644 index fe6888450c..0000000000 --- a/execution_engine/src/storage/global_state/mod.rs +++ /dev/null @@ -1,188 +0,0 @@ -pub mod in_memory; -pub mod lmdb; - -use std::{fmt, hash::BuildHasher}; - -use crate::shared::{ - additive_map::AdditiveMap, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - transform::{self, Transform}, - TypeMismatch, -}; -use casper_types::{bytesrepr, Key, ProtocolVersion}; - -use crate::storage::{ - protocol_data::ProtocolData, - transaction_source::{Transaction, TransactionSource}, - trie::{merkle_proof::TrieMerkleProof, Trie}, - trie_store::{ - operations::{read, write, ReadResult, WriteResult}, - TrieStore, - }, -}; - -/// A reader of state -pub trait StateReader { - /// An error which occurs when reading state - type Error; - - /// Returns the state value from the corresponding key - fn read(&self, correlation_id: CorrelationId, key: &K) -> Result, Self::Error>; - - /// Returns the merkle proof of the state value from the corresponding key - fn read_with_proof( - &self, - correlation_id: CorrelationId, - key: &K, - ) -> Result>, Self::Error>; - - /// Returns the keys in the trie matching `prefix`. - fn keys_with_prefix( - &self, - correlation_id: CorrelationId, - prefix: &[u8], - ) -> Result, Self::Error>; -} - -#[derive(Debug)] -pub enum CommitResult { - RootNotFound, - Success { state_root: Blake2bHash }, - KeyNotFound(Key), - TypeMismatch(TypeMismatch), - Serialization(bytesrepr::Error), -} - -impl fmt::Display for CommitResult { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - match self { - CommitResult::RootNotFound => write!(f, "Root not found"), - CommitResult::Success { state_root } => { - write!(f, "Success: state_root: {}", state_root,) - } - CommitResult::KeyNotFound(key) => write!(f, "Key not found: {}", key), - CommitResult::TypeMismatch(type_mismatch) => { - write!(f, "Type mismatch: {:?}", type_mismatch) - } - CommitResult::Serialization(error) => write!(f, "Serialization: {:?}", error), - } - } -} - -impl From for CommitResult { - fn from(error: transform::Error) -> Self { - match error { - transform::Error::TypeMismatch(type_mismatch) => { - CommitResult::TypeMismatch(type_mismatch) - } - transform::Error::Serialization(error) => CommitResult::Serialization(error), - } - } -} - -pub trait StateProvider { - type Error; - type Reader: StateReader; - - /// Checkouts to the post state of a specific block. - fn checkout(&self, state_hash: Blake2bHash) -> Result, Self::Error>; - - /// Applies changes and returns a new post state hash. - /// block_hash is used for computing a deterministic and unique keys. - fn commit( - &self, - correlation_id: CorrelationId, - state_hash: Blake2bHash, - effects: AdditiveMap, - ) -> Result; - - fn put_protocol_data( - &self, - protocol_version: ProtocolVersion, - protocol_data: &ProtocolData, - ) -> Result<(), Self::Error>; - - fn get_protocol_data( - &self, - protocol_version: ProtocolVersion, - ) -> Result, Self::Error>; - - fn empty_root(&self) -> Blake2bHash; - - /// Reads a `Trie` from the state if it is present - fn read_trie( - &self, - correlation_id: CorrelationId, - trie_key: &Blake2bHash, - ) -> Result>, Self::Error>; - - /// Insert a trie node into the trie - fn put_trie( - &self, - correlation_id: CorrelationId, - trie: &Trie, - ) -> Result; - - /// Finds all of the missing or corrupt keys of which are descendants of `trie_key` - fn missing_trie_keys( - &self, - correlation_id: CorrelationId, - trie_keys: Vec, - ) -> Result, Self::Error>; -} - -pub fn commit<'a, R, S, H, E>( - environment: &'a R, - store: &S, - correlation_id: CorrelationId, - prestate_hash: Blake2bHash, - effects: AdditiveMap, -) -> Result -where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - H: BuildHasher, -{ - let mut txn = environment.create_read_write_txn()?; - let mut state_root = prestate_hash; - - let maybe_root: Option> = store.get(&txn, &state_root)?; - - if maybe_root.is_none() { - return Ok(CommitResult::RootNotFound); - }; - - for (key, transform) in effects.into_iter() { - let read_result = read::<_, _, _, _, E>(correlation_id, &txn, store, &state_root, &key)?; - - let value = match (read_result, transform) { - (ReadResult::NotFound, Transform::Write(new_value)) => new_value, - (ReadResult::NotFound, _) => { - return Ok(CommitResult::KeyNotFound(key)); - } - (ReadResult::Found(current_value), transform) => match transform.apply(current_value) { - Ok(updated_value) => updated_value, - Err(err) => return Ok(err.into()), - }, - _x @ (ReadResult::RootNotFound, _) => panic!(stringify!(_x._1)), - }; - - let write_result = - write::<_, _, _, _, E>(correlation_id, &mut txn, store, &state_root, &key, &value)?; - - match write_result { - WriteResult::Written(root_hash) => { - state_root = root_hash; - } - WriteResult::AlreadyExists => (), - _x @ WriteResult::RootNotFound => panic!(stringify!(_x)), - } - } - - txn.commit()?; - - Ok(CommitResult::Success { state_root }) -} diff --git a/execution_engine/src/storage/protocol_data.rs b/execution_engine/src/storage/protocol_data.rs deleted file mode 100644 index 7d7bfffc89..0000000000 --- a/execution_engine/src/storage/protocol_data.rs +++ /dev/null @@ -1,312 +0,0 @@ -use std::collections::BTreeMap; - -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - ContractHash, HashAddr, -}; - -use crate::shared::{system_config::SystemConfig, wasm_config::WasmConfig}; - -const DEFAULT_ADDRESS: [u8; 32] = [0; 32]; -pub const DEFAULT_WASMLESS_TRANSFER_COST: u32 = 10_000; - -/// Represents a protocol's data. Intended to be associated with a given protocol version. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct ProtocolData { - wasm_config: WasmConfig, - system_config: SystemConfig, - mint: ContractHash, - handle_payment: ContractHash, - standard_payment: ContractHash, - auction: ContractHash, -} - -/// Provides a default instance with non existing urefs and empty costs table. -/// -/// Used in contexts where Handle Payment or Mint contract is not ready yet, and handle payment, and -/// mint installers are ran. For use with caution. -impl Default for ProtocolData { - fn default() -> ProtocolData { - ProtocolData { - wasm_config: WasmConfig::default(), - system_config: SystemConfig::default(), - mint: DEFAULT_ADDRESS.into(), - handle_payment: DEFAULT_ADDRESS.into(), - standard_payment: DEFAULT_ADDRESS.into(), - auction: DEFAULT_ADDRESS.into(), - } - } -} - -impl ProtocolData { - /// Creates a new `ProtocolData` value from a given `WasmCosts` value. - pub fn new( - wasm_config: WasmConfig, - system_costs: SystemConfig, - mint: ContractHash, - handle_payment: ContractHash, - standard_payment: ContractHash, - auction: ContractHash, - ) -> Self { - ProtocolData { - wasm_config, - system_config: system_costs, - mint, - handle_payment, - standard_payment, - auction, - } - } - - /// Creates a new, partially-valid [`ProtocolData`] value where only the mint URef is known. - /// - /// Used during `commit_genesis` before all system contracts' URefs are known. - pub fn partial_with_mint(mint: ContractHash) -> Self { - ProtocolData { - mint, - ..Default::default() - } - } - - /// Creates a new, partially-valid [`ProtocolData`] value where all but the standard payment - /// uref is known. - /// - /// Used during `commit_genesis` before all system contracts' URefs are known. - pub fn partial_without_standard_payment( - wasm_config: WasmConfig, - mint: ContractHash, - handle_payment: ContractHash, - ) -> Self { - ProtocolData { - wasm_config, - mint, - handle_payment, - ..Default::default() - } - } - - /// Gets the `WasmConfig` value from a given [`ProtocolData`] value. - pub fn wasm_config(&self) -> &WasmConfig { - &self.wasm_config - } - - /// Gets the `SystemConfig` value from a given [`ProtocolData`] value. - pub fn system_config(&self) -> &SystemConfig { - &self.system_config - } - - pub fn mint(&self) -> ContractHash { - self.mint - } - - pub fn handle_payment(&self) -> ContractHash { - self.handle_payment - } - - pub fn standard_payment(&self) -> ContractHash { - self.standard_payment - } - - pub fn auction(&self) -> ContractHash { - self.auction - } - - /// Retrieves all valid system contracts stored in protocol version - pub fn system_contracts(&self) -> Vec { - let mut vec = Vec::with_capacity(4); - if self.mint != DEFAULT_ADDRESS.into() { - vec.push(self.mint) - } - if self.handle_payment != DEFAULT_ADDRESS.into() { - vec.push(self.handle_payment) - } - if self.standard_payment != DEFAULT_ADDRESS.into() { - vec.push(self.standard_payment) - } - if self.auction != DEFAULT_ADDRESS.into() { - vec.push(self.auction) - } - vec - } - - pub fn update_from(&mut self, updates: BTreeMap) -> bool { - for (old_hash, new_hash) in updates { - if old_hash == self.mint { - self.mint = new_hash; - } else if old_hash == self.handle_payment { - self.handle_payment = new_hash; - } else if old_hash == self.standard_payment { - self.standard_payment = new_hash; - } else if old_hash == self.auction { - self.auction = new_hash; - } else { - return false; - } - } - true - } -} - -impl ToBytes for ProtocolData { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - - ret.append(&mut self.wasm_config.to_bytes()?); - ret.append(&mut self.system_config.to_bytes()?); - ret.append(&mut self.mint.to_bytes()?); - ret.append(&mut self.handle_payment.to_bytes()?); - ret.append(&mut self.standard_payment.to_bytes()?); - ret.append(&mut self.auction.to_bytes()?); - - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.wasm_config.serialized_length() - + self.system_config.serialized_length() - + self.mint.serialized_length() - + self.handle_payment.serialized_length() - + self.standard_payment.serialized_length() - + self.auction.serialized_length() - } -} - -impl FromBytes for ProtocolData { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (wasm_config, rem) = WasmConfig::from_bytes(bytes)?; - let (system_config, rem) = FromBytes::from_bytes(rem)?; - let (mint, rem) = HashAddr::from_bytes(rem)?; - let (handle_payment, rem) = HashAddr::from_bytes(rem)?; - let (standard_payment, rem) = HashAddr::from_bytes(rem)?; - let (auction, rem) = HashAddr::from_bytes(rem)?; - - Ok(( - ProtocolData { - wasm_config, - mint: mint.into(), - handle_payment: handle_payment.into(), - standard_payment: standard_payment.into(), - auction: auction.into(), - system_config, - }, - rem, - )) - } -} - -#[cfg(test)] -pub(crate) mod gens { - use proptest::prop_compose; - - use crate::shared::{ - system_config::gens::system_config_arb, wasm_config::gens::wasm_config_arb, - }; - use casper_types::gens; - - use super::ProtocolData; - - prop_compose! { - pub fn protocol_data_arb()( - wasm_config in wasm_config_arb(), - system_config in system_config_arb(), - mint in gens::u8_slice_32(), - handle_payment in gens::u8_slice_32(), - standard_payment in gens::u8_slice_32(), - auction in gens::u8_slice_32(), - ) -> ProtocolData { - ProtocolData { - wasm_config, - system_config, - mint: mint.into(), - handle_payment: handle_payment.into(), - standard_payment: standard_payment.into(), - auction: auction.into(), - } - } - } -} - -#[cfg(test)] -mod tests { - use proptest::proptest; - - use crate::shared::{system_config::SystemConfig, wasm_config::WasmConfig}; - use casper_types::{bytesrepr, ContractHash}; - - use super::{gens, ProtocolData}; - - #[test] - fn should_return_all_system_contracts() { - let mint_reference = [1u8; 32].into(); - let handle_payment_reference = [2u8; 32].into(); - let standard_payment_reference = [3u8; 32].into(); - let auction_reference = [4u8; 32].into(); - let protocol_data = { - let wasm_config = WasmConfig::default(); - let system_config = SystemConfig::default(); - ProtocolData::new( - wasm_config, - system_config, - mint_reference, - handle_payment_reference, - standard_payment_reference, - auction_reference, - ) - }; - - let actual = { - let mut items = protocol_data.system_contracts(); - items.sort_unstable(); - items - }; - - assert_eq!(actual.len(), 4); - assert_eq!(actual[0], mint_reference); - assert_eq!(actual[1], handle_payment_reference); - assert_eq!(actual[2], standard_payment_reference); - assert_eq!(actual[3], auction_reference); - } - - #[test] - fn should_return_only_valid_system_contracts() { - let expected: Vec = vec![]; - assert_eq!(ProtocolData::default().system_contracts(), expected); - - let mint_reference = [0u8; 32].into(); // <-- invalid addr - let handle_payment_reference = [2u8; 32].into(); - let standard_payment_reference = [3u8; 32].into(); - let auction_reference = [4u8; 32].into(); - let protocol_data = { - let wasm_config = WasmConfig::default(); - let system_config = SystemConfig::default(); - ProtocolData::new( - wasm_config, - system_config, - mint_reference, - handle_payment_reference, - standard_payment_reference, - auction_reference, - ) - }; - - let actual = { - let mut items = protocol_data.system_contracts(); - items.sort_unstable(); - items - }; - - assert_eq!(actual.len(), 3); - assert_eq!(actual[0], handle_payment_reference); - assert_eq!(actual[1], standard_payment_reference); - assert_eq!(actual[2], auction_reference); - } - - proptest! { - #[test] - fn should_serialize_and_deserialize_with_arbitrary_values( - protocol_data in gens::protocol_data_arb() - ) { - bytesrepr::test_serialization_roundtrip(&protocol_data); - } - } -} diff --git a/execution_engine/src/storage/protocol_data_store/in_memory.rs b/execution_engine/src/storage/protocol_data_store/in_memory.rs deleted file mode 100644 index c51be7d65f..0000000000 --- a/execution_engine/src/storage/protocol_data_store/in_memory.rs +++ /dev/null @@ -1,36 +0,0 @@ -use casper_types::ProtocolVersion; - -use crate::storage::{ - error::in_memory::Error, - protocol_data::ProtocolData, - protocol_data_store::{self, ProtocolDataStore}, - store::Store, - transaction_source::in_memory::InMemoryEnvironment, -}; - -/// An in-memory protocol data store -pub struct InMemoryProtocolDataStore { - maybe_name: Option, -} - -impl InMemoryProtocolDataStore { - pub fn new(_env: &InMemoryEnvironment, maybe_name: Option<&str>) -> Self { - let name = maybe_name - .map(|name| format!("{}-{}", protocol_data_store::NAME, name)) - .unwrap_or_else(|| String::from(protocol_data_store::NAME)); - InMemoryProtocolDataStore { - maybe_name: Some(name), - } - } -} - -impl Store for InMemoryProtocolDataStore { - type Error = Error; - type Handle = Option; - - fn handle(&self) -> Self::Handle { - self.maybe_name.to_owned() - } -} - -impl ProtocolDataStore for InMemoryProtocolDataStore {} diff --git a/execution_engine/src/storage/protocol_data_store/lmdb.rs b/execution_engine/src/storage/protocol_data_store/lmdb.rs deleted file mode 100644 index 42f34f61b6..0000000000 --- a/execution_engine/src/storage/protocol_data_store/lmdb.rs +++ /dev/null @@ -1,54 +0,0 @@ -use casper_types::ProtocolVersion; -use lmdb::{Database, DatabaseFlags}; - -use crate::storage::{ - error, - protocol_data::ProtocolData, - protocol_data_store::{self, ProtocolDataStore}, - store::Store, - transaction_source::lmdb::LmdbEnvironment, -}; - -/// An LMDB-backed protocol data store. -/// -/// Wraps [`lmdb::Database`]. -#[derive(Debug, Clone)] -pub struct LmdbProtocolDataStore { - db: Database, -} - -impl LmdbProtocolDataStore { - pub fn new( - env: &LmdbEnvironment, - maybe_name: Option<&str>, - flags: DatabaseFlags, - ) -> Result { - let name = Self::name(maybe_name); - let db = env.env().create_db(Some(&name), flags)?; - Ok(LmdbProtocolDataStore { db }) - } - - pub fn open(env: &LmdbEnvironment, maybe_name: Option<&str>) -> Result { - let name = Self::name(maybe_name); - let db = env.env().open_db(Some(&name))?; - Ok(LmdbProtocolDataStore { db }) - } - - fn name(maybe_name: Option<&str>) -> String { - maybe_name - .map(|name| format!("{}-{}", protocol_data_store::NAME, name)) - .unwrap_or_else(|| String::from(protocol_data_store::NAME)) - } -} - -impl Store for LmdbProtocolDataStore { - type Error = error::Error; - - type Handle = Database; - - fn handle(&self) -> Self::Handle { - self.db - } -} - -impl ProtocolDataStore for LmdbProtocolDataStore {} diff --git a/execution_engine/src/storage/protocol_data_store/mod.rs b/execution_engine/src/storage/protocol_data_store/mod.rs deleted file mode 100644 index 2791e6bc66..0000000000 --- a/execution_engine/src/storage/protocol_data_store/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! A store for persisting `ProtocolData` values at their -//! protocol versions. -use casper_types::ProtocolVersion; - -pub mod in_memory; -pub mod lmdb; -#[cfg(test)] -mod tests; - -use crate::storage::{protocol_data::ProtocolData, store::Store}; - -const NAME: &str = "PROTOCOL_DATA_STORE"; - -/// An entity which persists `ProtocolData` values at their protocol versions. -pub trait ProtocolDataStore: Store {} diff --git a/execution_engine/src/storage/protocol_data_store/tests/mod.rs b/execution_engine/src/storage/protocol_data_store/tests/mod.rs deleted file mode 100644 index 709a570c34..0000000000 --- a/execution_engine/src/storage/protocol_data_store/tests/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod proptests; diff --git a/execution_engine/src/storage/protocol_data_store/tests/proptests.rs b/execution_engine/src/storage/protocol_data_store/tests/proptests.rs deleted file mode 100644 index 06e4d63419..0000000000 --- a/execution_engine/src/storage/protocol_data_store/tests/proptests.rs +++ /dev/null @@ -1,65 +0,0 @@ -use std::{collections::BTreeMap, ops::RangeInclusive}; - -use lmdb::DatabaseFlags; -use proptest::{collection, prelude::proptest}; - -use casper_types::{gens as gens_ext, ProtocolVersion}; - -use crate::storage::{ - protocol_data::{gens, ProtocolData}, - protocol_data_store::{in_memory::InMemoryProtocolDataStore, lmdb::LmdbProtocolDataStore}, - store::tests as store_tests, - transaction_source::{in_memory::InMemoryEnvironment, lmdb::LmdbEnvironment}, - DEFAULT_TEST_MAX_DB_SIZE, DEFAULT_TEST_MAX_READERS, -}; - -const DEFAULT_MIN_LENGTH: usize = 1; -const DEFAULT_MAX_LENGTH: usize = 16; - -fn get_range() -> RangeInclusive { - let start = option_env!("CL_PROTOCOL_DATA_STORE_TEST_MAP_MIN_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MIN_LENGTH); - let end = option_env!("CL_PROTOCOL_DATA_STORE_TEST_MAP_MAX_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MAX_LENGTH); - RangeInclusive::new(start, end) -} - -fn in_memory_roundtrip_succeeds(inputs: BTreeMap) -> bool { - let env = InMemoryEnvironment::new(); - let store = InMemoryProtocolDataStore::new(&env, None); - - store_tests::roundtrip_succeeds(&env, &store, inputs).unwrap() -} - -fn lmdb_roundtrip_succeeds(inputs: BTreeMap) -> bool { - let tmp_dir = tempfile::tempdir().unwrap(); - let env = LmdbEnvironment::new( - &tmp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbProtocolDataStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - - let ret = store_tests::roundtrip_succeeds(&env, &store, inputs).unwrap(); - tmp_dir.close().unwrap(); - ret -} - -proptest! { - #[test] - fn prop_in_memory_roundtrip_succeeds( - m in collection::btree_map(gens_ext::protocol_version_arb(), gens::protocol_data_arb(), get_range()) - ) { - assert!(in_memory_roundtrip_succeeds(m)) - } - - #[test] - fn prop_lmdb_roundtrip_succeeds( - m in collection::btree_map(gens_ext::protocol_version_arb(), gens::protocol_data_arb(), get_range()) - ) { - assert!(lmdb_roundtrip_succeeds(m)) - } -} diff --git a/execution_engine/src/storage/store/mod.rs b/execution_engine/src/storage/store/mod.rs deleted file mode 100644 index b074c84850..0000000000 --- a/execution_engine/src/storage/store/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -mod store_ext; -#[cfg(test)] -pub(crate) mod tests; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -pub use self::store_ext::StoreExt; -use crate::storage::transaction_source::{Readable, Writable}; - -pub trait Store { - type Error: From; - - type Handle; - - fn handle(&self) -> Self::Handle; - - fn get(&self, txn: &T, key: &K) -> Result, Self::Error> - where - T: Readable, - K: ToBytes, - V: FromBytes, - Self::Error: From, - { - let handle = self.handle(); - match txn.read(handle, &key.to_bytes()?)? { - None => Ok(None), - Some(value_bytes) => { - let value = bytesrepr::deserialize(value_bytes.into())?; - Ok(Some(value)) - } - } - } - - fn put(&self, txn: &mut T, key: &K, value: &V) -> Result<(), Self::Error> - where - T: Writable, - K: ToBytes, - V: ToBytes, - Self::Error: From, - { - let handle = self.handle(); - txn.write(handle, &key.to_bytes()?, &value.to_bytes()?) - .map_err(Into::into) - } -} diff --git a/execution_engine/src/storage/store/store_ext.rs b/execution_engine/src/storage/store/store_ext.rs deleted file mode 100644 index 29703e3763..0000000000 --- a/execution_engine/src/storage/store/store_ext.rs +++ /dev/null @@ -1,46 +0,0 @@ -use casper_types::bytesrepr::{FromBytes, ToBytes}; - -use crate::storage::{ - store::Store, - transaction_source::{Readable, Writable}, -}; - -pub trait StoreExt: Store { - fn get_many<'a, T>( - &self, - txn: &T, - keys: impl Iterator, - ) -> Result>, Self::Error> - where - T: Readable, - K: ToBytes + 'a, - V: FromBytes, - Self::Error: From, - { - let mut ret: Vec> = Vec::new(); - for key in keys { - let result = self.get(txn, key)?; - ret.push(result) - } - Ok(ret) - } - - fn put_many<'a, T>( - &self, - txn: &mut T, - pairs: impl Iterator, - ) -> Result<(), Self::Error> - where - T: Writable, - K: ToBytes + 'a, - V: ToBytes + 'a, - Self::Error: From, - { - for (key, value) in pairs { - self.put(txn, key, value)?; - } - Ok(()) - } -} - -impl> StoreExt for T {} diff --git a/execution_engine/src/storage/store/tests.rs b/execution_engine/src/storage/store/tests.rs deleted file mode 100644 index de8f5dd38f..0000000000 --- a/execution_engine/src/storage/store/tests.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::collections::BTreeMap; - -use casper_types::bytesrepr::{FromBytes, ToBytes}; - -use crate::storage::{ - store::{Store, StoreExt}, - transaction_source::{Transaction, TransactionSource}, -}; - -// should be moved to the `store` module -fn roundtrip<'a, K, V, X, S>( - transaction_source: &'a X, - store: &S, - items: &BTreeMap, -) -> Result>, S::Error> -where - K: ToBytes, - V: ToBytes + FromBytes, - X: TransactionSource<'a, Handle = S::Handle>, - S: Store, - S::Error: From, -{ - let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; - store.put_many(&mut txn, items.iter())?; - let result = store.get_many(&txn, items.keys()); - txn.commit()?; - result -} - -// should be moved to the `store` module -pub fn roundtrip_succeeds<'a, K, V, X, S>( - transaction_source: &'a X, - store: &S, - items: BTreeMap, -) -> Result -where - K: ToBytes, - V: ToBytes + FromBytes + Clone + PartialEq, - X: TransactionSource<'a, Handle = S::Handle>, - S: Store, - S::Error: From, -{ - let maybe_values: Vec> = roundtrip(transaction_source, store, &items)?; - let values = match maybe_values.into_iter().collect::>>() { - Some(values) => values, - None => return Ok(false), - }; - Ok(Iterator::eq(items.values(), values.iter())) -} diff --git a/execution_engine/src/storage/transaction_source/in_memory.rs b/execution_engine/src/storage/transaction_source/in_memory.rs deleted file mode 100644 index 5f0077b443..0000000000 --- a/execution_engine/src/storage/transaction_source/in_memory.rs +++ /dev/null @@ -1,159 +0,0 @@ -use std::{ - collections::HashMap, - sync::{self, Arc, Mutex, MutexGuard}, -}; - -use casper_types::bytesrepr::Bytes; - -use crate::storage::{ - error::in_memory::Error, - transaction_source::{Readable, Transaction, TransactionSource, Writable}, -}; - -/// A marker for use in a mutex which represents the capability to perform a -/// write transaction. -struct WriteCapability; - -type WriteLock<'a> = MutexGuard<'a, WriteCapability>; - -type BytesMap = HashMap; - -type PoisonError<'a> = sync::PoisonError, BytesMap>>>; - -/// A read transaction for the in-memory trie store. -pub struct InMemoryReadTransaction { - view: HashMap, BytesMap>, -} - -impl InMemoryReadTransaction { - pub fn new(store: &InMemoryEnvironment) -> Result { - let view = { - let db_ref = Arc::clone(&store.data); - let view_lock = db_ref.lock()?; - view_lock.to_owned() - }; - Ok(InMemoryReadTransaction { view }) - } -} - -impl Transaction for InMemoryReadTransaction { - type Error = Error; - - type Handle = Option; - - fn commit(self) -> Result<(), Self::Error> { - Ok(()) - } -} - -impl Readable for InMemoryReadTransaction { - fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { - let sub_view = match self.view.get(&handle) { - Some(view) => view, - None => return Ok(None), - }; - Ok(sub_view.get(&Bytes::from(key)).cloned()) - } -} - -/// A read-write transaction for the in-memory trie store. -pub struct InMemoryReadWriteTransaction<'a> { - view: HashMap, BytesMap>, - store_ref: Arc, BytesMap>>>, - _write_lock: WriteLock<'a>, -} - -impl<'a> InMemoryReadWriteTransaction<'a> { - pub fn new(store: &'a InMemoryEnvironment) -> Result, Error> { - let store_ref = Arc::clone(&store.data); - let view = { - let view_lock = store_ref.lock()?; - view_lock.to_owned() - }; - let _write_lock = store.write_mutex.lock()?; - Ok(InMemoryReadWriteTransaction { - view, - store_ref, - _write_lock, - }) - } -} - -impl<'a> Transaction for InMemoryReadWriteTransaction<'a> { - type Error = Error; - - type Handle = Option; - - fn commit(self) -> Result<(), Self::Error> { - let mut store_ref_lock = self.store_ref.lock()?; - store_ref_lock.extend(self.view); - Ok(()) - } -} - -impl<'a> Readable for InMemoryReadWriteTransaction<'a> { - fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { - let sub_view = match self.view.get(&handle) { - Some(view) => view, - None => return Ok(None), - }; - Ok(sub_view.get(&Bytes::from(key)).cloned()) - } -} - -impl<'a> Writable for InMemoryReadWriteTransaction<'a> { - fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { - let sub_view = self.view.entry(handle).or_default(); - sub_view.insert(Bytes::from(key), Bytes::from(value)); - Ok(()) - } -} - -/// An environment for the in-memory trie store. -pub struct InMemoryEnvironment { - data: Arc, BytesMap>>>, - write_mutex: Arc>, -} - -impl Default for InMemoryEnvironment { - fn default() -> Self { - let data = { - let mut initial_map = HashMap::new(); - initial_map.insert(None, Default::default()); - Arc::new(Mutex::new(initial_map)) - }; - let write_mutex = Arc::new(Mutex::new(WriteCapability)); - InMemoryEnvironment { data, write_mutex } - } -} - -impl InMemoryEnvironment { - pub fn new() -> Self { - Default::default() - } - - pub fn data(&self, name: Option<&str>) -> Result, PoisonError> { - let data = self.data.lock()?; - let name = name.map(ToString::to_string); - let ret = data.get(&name).cloned(); - Ok(ret) - } -} - -impl<'a> TransactionSource<'a> for InMemoryEnvironment { - type Error = Error; - - type Handle = Option; - - type ReadTransaction = InMemoryReadTransaction; - - type ReadWriteTransaction = InMemoryReadWriteTransaction<'a>; - - fn create_read_txn(&'a self) -> Result { - InMemoryReadTransaction::new(self).map_err(Into::into) - } - - fn create_read_write_txn(&'a self) -> Result, Self::Error> { - InMemoryReadWriteTransaction::new(self).map_err(Into::into) - } -} diff --git a/execution_engine/src/storage/transaction_source/lmdb.rs b/execution_engine/src/storage/transaction_source/lmdb.rs deleted file mode 100644 index 32894fb012..0000000000 --- a/execution_engine/src/storage/transaction_source/lmdb.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::path::Path; - -use casper_types::bytesrepr::Bytes; -use lmdb::{ - self, Database, Environment, EnvironmentFlags, RoTransaction, RwTransaction, WriteFlags, -}; - -use crate::storage::{ - error, - transaction_source::{Readable, Transaction, TransactionSource, Writable}, - MAX_DBS, -}; - -/// Filename for the LMDB database created by the EE. -const EE_DB_FILENAME: &str = "data.lmdb"; - -impl<'a> Transaction for RoTransaction<'a> { - type Error = lmdb::Error; - - type Handle = Database; - - fn commit(self) -> Result<(), Self::Error> { - lmdb::Transaction::commit(self) - } -} - -impl<'a> Readable for RoTransaction<'a> { - fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { - match lmdb::Transaction::get(self, handle, &key) { - Ok(bytes) => Ok(Some(Bytes::from(bytes))), - Err(lmdb::Error::NotFound) => Ok(None), - Err(e) => Err(e), - } - } -} - -impl<'a> Transaction for RwTransaction<'a> { - type Error = lmdb::Error; - - type Handle = Database; - - fn commit(self) -> Result<(), Self::Error> { - as lmdb::Transaction>::commit(self) - } -} - -impl<'a> Readable for RwTransaction<'a> { - fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { - match lmdb::Transaction::get(self, handle, &key) { - Ok(bytes) => Ok(Some(Bytes::from(bytes))), - Err(lmdb::Error::NotFound) => Ok(None), - Err(e) => Err(e), - } - } -} - -impl<'a> Writable for RwTransaction<'a> { - fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { - self.put(handle, &key, &value, WriteFlags::empty()) - .map_err(Into::into) - } -} - -/// The environment for an LMDB-backed trie store. -/// -/// Wraps [`lmdb::Environment`]. -#[derive(Debug)] -pub struct LmdbEnvironment { - env: Environment, -} - -impl LmdbEnvironment { - pub fn new>( - path: P, - map_size: usize, - max_readers: u32, - ) -> Result { - let env = Environment::new() - // Set the flag to manage our own directory like in the storage component. - .set_flags(EnvironmentFlags::NO_SUB_DIR) - .set_max_dbs(MAX_DBS) - .set_map_size(map_size) - .set_max_readers(max_readers) - .open(&path.as_ref().join(EE_DB_FILENAME))?; - Ok(LmdbEnvironment { env }) - } - - pub fn env(&self) -> &Environment { - &self.env - } -} - -impl<'a> TransactionSource<'a> for LmdbEnvironment { - type Error = lmdb::Error; - - type Handle = Database; - - type ReadTransaction = RoTransaction<'a>; - - type ReadWriteTransaction = RwTransaction<'a>; - - fn create_read_txn(&'a self) -> Result, Self::Error> { - self.env.begin_ro_txn() - } - - fn create_read_write_txn(&'a self) -> Result, Self::Error> { - self.env.begin_rw_txn() - } -} diff --git a/execution_engine/src/storage/transaction_source/mod.rs b/execution_engine/src/storage/transaction_source/mod.rs deleted file mode 100644 index 48bcaf703d..0000000000 --- a/execution_engine/src/storage/transaction_source/mod.rs +++ /dev/null @@ -1,60 +0,0 @@ -use casper_types::bytesrepr::Bytes; - -pub mod in_memory; -pub mod lmdb; - -/// A transaction which can be committed or aborted. -pub trait Transaction: Sized { - /// An error which can occur while reading or writing during a transaction, - /// or committing the transaction. - type Error; - - /// An entity which is being read from or written to during a transaction. - type Handle; - - /// Commits the transaction. - fn commit(self) -> Result<(), Self::Error>; - - /// Aborts the transaction. - /// - /// Any pending operations will not be saved. - fn abort(self) { - unimplemented!("Abort operations should be performed in Drop implementations.") - } -} - -/// A transaction with the capability to read from a given [`Handle`](Transaction::Handle). -pub trait Readable: Transaction { - /// Returns the value from the corresponding key from a given [`Transaction::Handle`]. - fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error>; -} - -/// A transaction with the capability to write to a given [`Handle`](Transaction::Handle). -pub trait Writable: Transaction { - /// Inserts a key-value pair into a given [`Transaction::Handle`]. - fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error>; -} - -/// A source of transactions e.g. values that implement [`Readable`] -/// and/or [`Writable`]. -pub trait TransactionSource<'a> { - /// An error which can occur while creating a read or read-write - /// transaction. - type Error; - - /// An entity which is being read from or written to during a transaction. - type Handle; - - /// Represents the type of read transactions. - type ReadTransaction: Readable; - - /// Represents the type of read-write transactions. - type ReadWriteTransaction: Readable - + Writable; - - /// Creates a read transaction. - fn create_read_txn(&'a self) -> Result; - - /// Creates a read-write transaction. - fn create_read_write_txn(&'a self) -> Result; -} diff --git a/execution_engine/src/storage/trie/gens.rs b/execution_engine/src/storage/trie/gens.rs deleted file mode 100644 index df1139b43f..0000000000 --- a/execution_engine/src/storage/trie/gens.rs +++ /dev/null @@ -1,43 +0,0 @@ -use proptest::{collection::vec, option, prelude::*}; - -use crate::shared::{ - newtypes::Blake2bHash, - stored_value::{gens::stored_value_arb, StoredValue}, -}; -use casper_types::{gens::key_arb, Key}; - -use super::{Pointer, PointerBlock, Trie}; - -pub fn blake2b_hash_arb() -> impl Strategy { - vec(any::(), 0..1000).prop_map(|b| Blake2bHash::new(&b)) -} - -pub fn trie_pointer_arb() -> impl Strategy { - prop_oneof![ - blake2b_hash_arb().prop_map(Pointer::LeafPointer), - blake2b_hash_arb().prop_map(Pointer::NodePointer) - ] -} - -pub fn trie_pointer_block_arb() -> impl Strategy { - vec(option::of(trie_pointer_arb()), 256).prop_map(|vec| { - let mut ret: [Option; 256] = [Default::default(); 256]; - ret.clone_from_slice(vec.as_slice()); - ret.into() - }) -} - -pub fn trie_arb() -> impl Strategy> { - prop_oneof![ - (key_arb(), stored_value_arb()).prop_map(|(key, value)| Trie::Leaf { key, value }), - trie_pointer_block_arb().prop_map(|pointer_block| Trie::Node { - pointer_block: Box::new(pointer_block) - }), - (vec(any::(), 0..32), trie_pointer_arb()).prop_map(|(affix, pointer)| { - Trie::Extension { - affix: affix.into(), - pointer, - } - }) - ] -} diff --git a/execution_engine/src/storage/trie/merkle_proof.rs b/execution_engine/src/storage/trie/merkle_proof.rs deleted file mode 100644 index 8b97d44f5c..0000000000 --- a/execution_engine/src/storage/trie/merkle_proof.rs +++ /dev/null @@ -1,307 +0,0 @@ -use std::collections::VecDeque; - -use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes}; - -use crate::{ - shared::newtypes::Blake2bHash, - storage::trie::{Pointer, Trie, RADIX}, -}; - -const TRIE_MERKLE_PROOF_STEP_NODE_ID: u8 = 0; -const TRIE_MERKLE_PROOF_STEP_EXTENSION_ID: u8 = 1; - -/// A component of a proof that an entry exists in the Merkle trie. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum TrieMerkleProofStep { - /// Corresponds to [`Trie::Node`] - Node { - hole_index: u8, - indexed_pointers_with_hole: Vec<(u8, Pointer)>, - }, - /// Corresponds to [`Trie::Extension`] - Extension { affix: Bytes }, -} - -impl TrieMerkleProofStep { - /// Constructor for [`TrieMerkleProofStep::Node`] - pub fn node(hole_index: u8, indexed_pointers_with_hole: Vec<(u8, Pointer)>) -> Self { - Self::Node { - hole_index, - indexed_pointers_with_hole, - } - } - - /// Constructor for [`TrieMerkleProofStep::Extension`] - pub fn extension(affix: Vec) -> Self { - Self::Extension { - affix: affix.into(), - } - } -} - -impl ToBytes for TrieMerkleProofStep { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret: Vec = bytesrepr::allocate_buffer(self)?; - match self { - TrieMerkleProofStep::Node { - hole_index, - indexed_pointers_with_hole, - } => { - ret.push(TRIE_MERKLE_PROOF_STEP_NODE_ID); - ret.push(*hole_index); - ret.append(&mut indexed_pointers_with_hole.to_bytes()?) - } - TrieMerkleProofStep::Extension { affix } => { - ret.push(TRIE_MERKLE_PROOF_STEP_EXTENSION_ID); - ret.append(&mut affix.to_bytes()?) - } - }; - Ok(ret) - } - - fn serialized_length(&self) -> usize { - std::mem::size_of::() - + match self { - TrieMerkleProofStep::Node { - hole_index, - indexed_pointers_with_hole, - } => { - (*hole_index).serialized_length() - + (*indexed_pointers_with_hole).serialized_length() - } - TrieMerkleProofStep::Extension { affix } => affix.serialized_length(), - } - } -} - -impl FromBytes for TrieMerkleProofStep { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; - match tag { - TRIE_MERKLE_PROOF_STEP_NODE_ID => { - let (hole_index, rem): (u8, &[u8]) = FromBytes::from_bytes(rem)?; - let (indexed_pointers_with_hole, rem): (Vec<(u8, Pointer)>, &[u8]) = - FromBytes::from_bytes(rem)?; - Ok(( - TrieMerkleProofStep::Node { - hole_index, - indexed_pointers_with_hole, - }, - rem, - )) - } - TRIE_MERKLE_PROOF_STEP_EXTENSION_ID => { - let (affix, rem): (_, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((TrieMerkleProofStep::Extension { affix }, rem)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -/// A proof that a node with a specified `key` and `value` is present in the Merkle trie. -/// Given a state hash `x`, one can validate a proof `p` by checking `x == p.compute_state_hash()`. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TrieMerkleProof { - key: K, - value: V, - proof_steps: VecDeque, -} - -impl TrieMerkleProof { - /// Constructor for [`TrieMerkleProof`] - pub fn new(key: K, value: V, proof_steps: VecDeque) -> Self { - TrieMerkleProof { - key, - value, - proof_steps, - } - } - - /// Getter for the key in [`TrieMerkleProof`] - pub fn key(&self) -> &K { - &self.key - } - - /// Getter for the value in [`TrieMerkleProof`] - pub fn value(&self) -> &V { - &self.value - } - - /// Getter for the proof steps in [`TrieMerkleProof`] - pub fn proof_steps(&self) -> &VecDeque { - &self.proof_steps - } - - /// Transforms a [`TrieMerkleProof`] into the value it contains - pub fn into_value(self) -> V { - self.value - } -} - -impl TrieMerkleProof -where - K: ToBytes + Copy + Clone, - V: ToBytes + Clone, -{ - /// Recomputes a state root hash from a [`TrieMerkleProof`]. - /// This is done in the following steps: - /// - /// 1. Using [`TrieMerkleProof::key`] and [`TrieMerkleProof::value`], construct a - /// [`Trie::Leaf`] and compute a hash for that leaf. - /// - /// 2. We then iterate over [`TrieMerkleProof::proof_steps`] left to right, using the hash from - /// the previous step combined with the next step to compute a new hash. - /// - /// 3. When there are no more steps, we return the final hash we have computed. - /// - /// The steps in this function reflect `operations::rehash`. - pub fn compute_state_hash(&self) -> Result { - let mut hash = { - let leaf_bytes = Trie::leaf(self.key, self.value.to_owned()).to_bytes()?; - Blake2bHash::new(&leaf_bytes) - }; - - for (proof_step_index, proof_step) in self.proof_steps.iter().enumerate() { - let pointer = if proof_step_index == 0 { - Pointer::LeafPointer(hash) - } else { - Pointer::NodePointer(hash) - }; - let proof_step_bytes = match proof_step { - TrieMerkleProofStep::Node { - hole_index, - indexed_pointers_with_hole, - } => { - let hole_index = *hole_index; - assert!(hole_index as usize <= RADIX, "hole_index exceeded RADIX"); - let mut indexed_pointers = indexed_pointers_with_hole.to_owned(); - indexed_pointers.push((hole_index, pointer)); - Trie::::node(&indexed_pointers).to_bytes()? - } - TrieMerkleProofStep::Extension { affix } => { - Trie::::extension(affix.clone().into(), pointer).to_bytes()? - } - }; - hash = Blake2bHash::new(&proof_step_bytes); - } - Ok(hash) - } -} - -impl ToBytes for TrieMerkleProof -where - K: ToBytes, - V: ToBytes, -{ - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret: Vec = bytesrepr::allocate_buffer(self)?; - ret.append(&mut self.key.to_bytes()?); - ret.append(&mut self.value.to_bytes()?); - ret.append(&mut self.proof_steps.to_bytes()?); - Ok(ret) - } - - fn serialized_length(&self) -> usize { - self.key.serialized_length() - + self.value.serialized_length() - + self.proof_steps.serialized_length() - } -} - -impl FromBytes for TrieMerkleProof -where - K: FromBytes, - V: FromBytes, -{ - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (key, rem): (K, &[u8]) = FromBytes::from_bytes(bytes)?; - let (value, rem): (V, &[u8]) = FromBytes::from_bytes(rem)?; - let (proof_steps, rem): (VecDeque, &[u8]) = - FromBytes::from_bytes(rem)?; - Ok(( - TrieMerkleProof { - key, - value, - proof_steps, - }, - rem, - )) - } -} - -#[cfg(test)] -mod gens { - use proptest::{collection::vec, prelude::*}; - - use casper_types::{gens::key_arb, Key}; - - use crate::{ - shared::stored_value::{gens::stored_value_arb, StoredValue}, - storage::trie::{ - gens::trie_pointer_arb, - merkle_proof::{TrieMerkleProof, TrieMerkleProofStep}, - RADIX, - }, - }; - - const POINTERS_SIZE: usize = RADIX / 8; - const AFFIX_SIZE: usize = 6; - const STEPS_SIZE: usize = 6; - - pub fn trie_merkle_proof_step_arb() -> impl Strategy { - prop_oneof![ - ( - ::arbitrary(), - vec((::arbitrary(), trie_pointer_arb()), POINTERS_SIZE) - ) - .prop_map(|(hole_index, indexed_pointers_with_hole)| { - TrieMerkleProofStep::Node { - hole_index, - indexed_pointers_with_hole, - } - }), - vec(::arbitrary(), AFFIX_SIZE).prop_map(|affix| { - TrieMerkleProofStep::Extension { - affix: affix.into(), - } - }) - ] - } - - pub fn trie_merkle_proof_arb() -> impl Strategy> { - ( - key_arb(), - stored_value_arb(), - vec(trie_merkle_proof_step_arb(), STEPS_SIZE), - ) - .prop_map(|(key, value, proof_steps)| { - TrieMerkleProof::new(key, value, proof_steps.into()) - }) - } -} - -#[cfg(test)] -mod tests { - use proptest::prelude::*; - - use casper_types::bytesrepr; - - use super::gens; - - proptest! { - #[test] - fn trie_merkle_proof_step_serialization_is_correct( - step in gens::trie_merkle_proof_step_arb() - ) { - bytesrepr::test_serialization_roundtrip(&step) - } - - #[test] - fn trie_merkle_proof_serialization_is_correct( - proof in gens::trie_merkle_proof_arb() - ) { - bytesrepr::test_serialization_roundtrip(&proof) - } - } -} diff --git a/execution_engine/src/storage/trie/mod.rs b/execution_engine/src/storage/trie/mod.rs deleted file mode 100644 index 2343138ca1..0000000000 --- a/execution_engine/src/storage/trie/mod.rs +++ /dev/null @@ -1,457 +0,0 @@ -//! Core types for a Merkle Trie - -use std::{ - convert::TryInto, - fmt::{self, Debug, Display, Formatter}, - mem::MaybeUninit, -}; - -use serde::{ - de::{self, MapAccess, Visitor}, - ser::SerializeMap, - Deserialize, Deserializer, Serialize, Serializer, -}; - -use crate::shared::newtypes::Blake2bHash; -use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; - -#[cfg(test)] -pub mod gens; - -pub mod merkle_proof; -#[cfg(test)] -mod tests; - -pub const USIZE_EXCEEDS_U8: &str = "usize exceeds u8"; -pub const RADIX: usize = 256; - -/// A parent is represented as a pair of a child index and a node or extension. -pub type Parents = Vec<(u8, Trie)>; - -/// Represents a pointer to the next object in a Merkle Trie -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum Pointer { - LeafPointer(Blake2bHash), - NodePointer(Blake2bHash), -} - -impl Pointer { - pub fn hash(&self) -> &Blake2bHash { - match self { - Pointer::LeafPointer(hash) => hash, - Pointer::NodePointer(hash) => hash, - } - } - - pub fn into_hash(self) -> Blake2bHash { - match self { - Pointer::LeafPointer(hash) => hash, - Pointer::NodePointer(hash) => hash, - } - } - - pub fn update(&self, hash: Blake2bHash) -> Self { - match self { - Pointer::LeafPointer(_) => Pointer::LeafPointer(hash), - Pointer::NodePointer(_) => Pointer::NodePointer(hash), - } - } - - fn tag(&self) -> u8 { - match self { - Pointer::LeafPointer(_) => 0, - Pointer::NodePointer(_) => 1, - } - } -} - -impl ToBytes for Pointer { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::unchecked_allocate_buffer(self); - ret.push(self.tag()); - ret.extend_from_slice(self.hash().as_ref()); - Ok(ret) - } - - #[inline(always)] - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH + Blake2bHash::LENGTH - } -} - -impl FromBytes for Pointer { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, rem) = u8::from_bytes(bytes)?; - match tag { - 0 => { - let (hash, rem) = Blake2bHash::from_bytes(rem)?; - Ok((Pointer::LeafPointer(hash), rem)) - } - 1 => { - let (hash, rem) = Blake2bHash::from_bytes(rem)?; - Ok((Pointer::NodePointer(hash), rem)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -pub type PointerBlockValue = Option; -pub type PointerBlockArray = [PointerBlockValue; RADIX]; - -/// Represents the underlying structure of a node in a Merkle Trie -#[derive(Copy, Clone)] -pub struct PointerBlock(PointerBlockArray); - -impl Serialize for PointerBlock { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - // We are going to use the sparse representation of pointer blocks - // non-None entries and their indices will be output - - // Create the sequence serializer, reserving the necessary number of slots - let elements_count = self.0.iter().filter(|element| element.is_some()).count(); - let mut map = serializer.serialize_map(Some(elements_count))?; - - // Store the non-None entries with their indices - for (index, maybe_pointer_block) in self.0.iter().enumerate() { - if let Some(pointer_block_value) = maybe_pointer_block { - map.serialize_entry(&(index as u8), pointer_block_value)?; - } - } - map.end() - } -} - -impl<'de> Deserialize<'de> for PointerBlock { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct PointerBlockDeserializer; - - impl<'de> Visitor<'de> for PointerBlockDeserializer { - type Value = PointerBlock; - - fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { - formatter.write_str("sparse representation of a PointerBlock") - } - - fn visit_map(self, mut access: M) -> Result - where - M: MapAccess<'de>, - { - let mut pointer_block = PointerBlock::new(); - - // Unpack the sparse representation - while let Some((index, pointer_block_value)) = access.next_entry::()? { - let element = pointer_block.0.get_mut(usize::from(index)).ok_or_else(|| { - de::Error::custom(format!("invalid index {} in pointer block value", index)) - })?; - *element = Some(pointer_block_value); - } - - Ok(pointer_block) - } - } - deserializer.deserialize_map(PointerBlockDeserializer) - } -} - -impl PointerBlock { - pub fn new() -> Self { - Default::default() - } - - pub fn from_indexed_pointers(indexed_pointers: &[(u8, Pointer)]) -> Self { - let mut ret = PointerBlock::new(); - for (idx, ptr) in indexed_pointers.iter() { - ret[*idx as usize] = Some(*ptr); - } - ret - } - - pub fn to_indexed_pointers(&self) -> impl Iterator + '_ { - self.0 - .iter() - .enumerate() - .filter_map(|(index, maybe_pointer)| { - maybe_pointer - .map(|value| (index.try_into().expect(USIZE_EXCEEDS_U8), value.to_owned())) - }) - } - - pub fn child_count(&self) -> usize { - self.to_indexed_pointers().count() - } -} - -impl From for PointerBlock { - fn from(src: PointerBlockArray) -> Self { - PointerBlock(src) - } -} - -impl PartialEq for PointerBlock { - #[inline] - fn eq(&self, other: &PointerBlock) -> bool { - self.0[..] == other.0[..] - } -} - -impl Eq for PointerBlock {} - -impl Default for PointerBlock { - fn default() -> Self { - PointerBlock([Default::default(); RADIX]) - } -} - -impl ToBytes for PointerBlock { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - for pointer in self.0.iter() { - result.append(&mut pointer.to_bytes()?); - } - Ok(result) - } - - fn serialized_length(&self) -> usize { - self.0.iter().map(ToBytes::serialized_length).sum() - } -} - -impl FromBytes for PointerBlock { - fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let pointer_block_array = { - // With MaybeUninit here we can avoid default initialization of result array below. - let mut result: MaybeUninit = MaybeUninit::uninit(); - let result_ptr = result.as_mut_ptr() as *mut PointerBlockValue; - for i in 0..RADIX { - let (t, remainder) = match FromBytes::from_bytes(bytes) { - Ok(success) => success, - Err(error) => { - for j in 0..i { - unsafe { result_ptr.add(j).drop_in_place() } - } - return Err(error); - } - }; - unsafe { result_ptr.add(i).write(t) }; - bytes = remainder; - } - unsafe { result.assume_init() } - }; - Ok((PointerBlock(pointer_block_array), bytes)) - } -} - -impl core::ops::Index for PointerBlock { - type Output = PointerBlockValue; - - #[inline] - fn index(&self, index: usize) -> &Self::Output { - let PointerBlock(dat) = self; - &dat[index] - } -} - -impl core::ops::IndexMut for PointerBlock { - #[inline] - fn index_mut(&mut self, index: usize) -> &mut Self::Output { - let PointerBlock(dat) = self; - &mut dat[index] - } -} - -impl core::ops::Index> for PointerBlock { - type Output = [PointerBlockValue]; - - #[inline] - fn index(&self, index: core::ops::Range) -> &[PointerBlockValue] { - let &PointerBlock(ref dat) = self; - &dat[index] - } -} - -impl core::ops::Index> for PointerBlock { - type Output = [PointerBlockValue]; - - #[inline] - fn index(&self, index: core::ops::RangeTo) -> &[PointerBlockValue] { - let &PointerBlock(ref dat) = self; - &dat[index] - } -} - -impl core::ops::Index> for PointerBlock { - type Output = [PointerBlockValue]; - - #[inline] - fn index(&self, index: core::ops::RangeFrom) -> &[PointerBlockValue] { - let &PointerBlock(ref dat) = self; - &dat[index] - } -} - -impl core::ops::Index for PointerBlock { - type Output = [PointerBlockValue]; - - #[inline] - fn index(&self, index: core::ops::RangeFull) -> &[PointerBlockValue] { - let &PointerBlock(ref dat) = self; - &dat[index] - } -} - -impl ::std::fmt::Debug for PointerBlock { - #[allow(clippy::assertions_on_constants)] - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - assert!(RADIX > 1, "RADIX must be > 1"); - write!(f, "{}([", stringify!(PointerBlock))?; - write!(f, "{:?}", self.0[0])?; - for item in self.0[1..].iter() { - write!(f, ", {:?}", item)?; - } - write!(f, "])") - } -} - -/// Represents a Merkle Trie -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum Trie { - Leaf { key: K, value: V }, - Node { pointer_block: Box }, - Extension { affix: Bytes, pointer: Pointer }, -} - -impl Display for Trie -where - K: Debug, - V: Debug, -{ - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl Trie { - fn tag(&self) -> u8 { - match self { - Trie::Leaf { .. } => 0, - Trie::Node { .. } => 1, - Trie::Extension { .. } => 2, - } - } - - /// Constructs a [`Trie::Leaf`] from a given key and value. - pub fn leaf(key: K, value: V) -> Self { - Trie::Leaf { key, value } - } - - /// Constructs a [`Trie::Node`] from a given slice of indexed pointers. - pub fn node(indexed_pointers: &[(u8, Pointer)]) -> Self { - let pointer_block = PointerBlock::from_indexed_pointers(indexed_pointers); - let pointer_block = Box::new(pointer_block); - Trie::Node { pointer_block } - } - - /// Constructs a [`Trie::Extension`] from a given affix and pointer. - pub fn extension(affix: Vec, pointer: Pointer) -> Self { - Trie::Extension { - affix: affix.into(), - pointer, - } - } - - pub fn key(&self) -> Option<&K> { - match self { - Trie::Leaf { key, .. } => Some(key), - _ => None, - } - } -} - -impl ToBytes for Trie -where - K: ToBytes, - V: ToBytes, -{ - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::allocate_buffer(self)?; - ret.push(self.tag()); - - match self { - Trie::Leaf { key, value } => { - ret.append(&mut key.to_bytes()?); - ret.append(&mut value.to_bytes()?); - } - Trie::Node { pointer_block } => { - ret.append(&mut pointer_block.to_bytes()?); - } - Trie::Extension { affix, pointer } => { - ret.append(&mut affix.to_bytes()?); - ret.append(&mut pointer.to_bytes()?); - } - } - Ok(ret) - } - - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH - + match self { - Trie::Leaf { key, value } => key.serialized_length() + value.serialized_length(), - Trie::Node { pointer_block } => pointer_block.serialized_length(), - Trie::Extension { affix, pointer } => { - affix.serialized_length() + pointer.serialized_length() - } - } - } -} - -impl FromBytes for Trie { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, rem) = u8::from_bytes(bytes)?; - match tag { - 0 => { - let (key, rem) = K::from_bytes(rem)?; - let (value, rem) = V::from_bytes(rem)?; - Ok((Trie::Leaf { key, value }, rem)) - } - 1 => { - let (pointer_block, rem) = PointerBlock::from_bytes(rem)?; - Ok(( - Trie::Node { - pointer_block: Box::new(pointer_block), - }, - rem, - )) - } - 2 => { - let (affix, rem) = FromBytes::from_bytes(rem)?; - let (pointer, rem) = Pointer::from_bytes(rem)?; - Ok((Trie::Extension { affix, pointer }, rem)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -pub(crate) mod operations { - use casper_types::bytesrepr::{self, ToBytes}; - - use crate::{shared::newtypes::Blake2bHash, storage::trie::Trie}; - - /// Creates a tuple containing an empty root hash and an empty root (a node - /// with an empty pointer block) - pub fn create_hashed_empty_trie( - ) -> Result<(Blake2bHash, Trie), bytesrepr::Error> { - let root: Trie = Trie::Node { - pointer_block: Default::default(), - }; - let root_bytes: Vec = root.to_bytes()?; - Ok((Blake2bHash::new(&root_bytes), root)) - } -} diff --git a/execution_engine/src/storage/trie/tests.rs b/execution_engine/src/storage/trie/tests.rs deleted file mode 100644 index 64c3abf6ef..0000000000 --- a/execution_engine/src/storage/trie/tests.rs +++ /dev/null @@ -1,127 +0,0 @@ -#[test] -fn radix_is_256() { - assert_eq!( - super::RADIX, - 256, - "Changing RADIX alone might cause things to break" - ); -} - -mod pointer_block { - use crate::storage::trie::*; - - /// A defense against changes to [`RADIX`](history::trie::RADIX). - #[test] - fn debug_formatter_succeeds() { - let _ = format!("{:?}", PointerBlock::new()); - } - - #[test] - fn assignment_and_indexing() { - let test_hash = Blake2bHash::new(b"TrieTrieAgain"); - let leaf_pointer = Some(Pointer::LeafPointer(test_hash)); - let mut pointer_block = PointerBlock::new(); - pointer_block[0] = leaf_pointer; - pointer_block[RADIX - 1] = leaf_pointer; - assert_eq!(leaf_pointer, pointer_block[0]); - assert_eq!(leaf_pointer, pointer_block[RADIX - 1]); - assert_eq!(None, pointer_block[1]); - assert_eq!(None, pointer_block[RADIX - 2]); - } - - #[test] - #[should_panic] - fn assignment_off_end() { - let test_hash = Blake2bHash::new(b"TrieTrieAgain"); - let leaf_pointer = Some(Pointer::LeafPointer(test_hash)); - let mut pointer_block = PointerBlock::new(); - pointer_block[RADIX] = leaf_pointer; - } - - #[test] - #[should_panic] - fn indexing_off_end() { - let pointer_block = PointerBlock::new(); - let _val = pointer_block[RADIX]; - } -} - -mod proptests { - use proptest::prelude::proptest; - - use casper_types::{bytesrepr, gens::key_arb, Key}; - - use crate::{ - shared::stored_value::StoredValue, - storage::trie::{gens::*, PointerBlock, Trie}, - }; - - proptest! { - #[test] - fn roundtrip_blake2b_hash(hash in blake2b_hash_arb()) { - bytesrepr::test_serialization_roundtrip(&hash); - } - - #[test] - fn roundtrip_trie_pointer(pointer in trie_pointer_arb()) { - bytesrepr::test_serialization_roundtrip(&pointer); - } - - #[test] - fn roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) { - bytesrepr::test_serialization_roundtrip(&pointer_block); - } - - #[test] - fn roundtrip_trie(trie in trie_arb()) { - bytesrepr::test_serialization_roundtrip(&trie); - } - - #[test] - fn roundtrip_key(key in key_arb()) { - bytesrepr::test_serialization_roundtrip(&key); - } - - #[test] - fn serde_roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) { - let json_str = serde_json::to_string(&pointer_block)?; - let deserialized_pointer_block: PointerBlock = serde_json::from_str(&json_str)?; - assert_eq!(pointer_block, deserialized_pointer_block) - } - - #[test] - fn serde_roundtrip_trie(trie in trie_arb()) { - let json_str = serde_json::to_string(&trie)?; - let deserialized_trie: Trie = serde_json::from_str(&json_str)?; - assert_eq!(trie, deserialized_trie) - } - - #[test] - fn bincode_roundtrip_trie(trie in trie_arb()) { - let bincode_bytes = bincode::serialize(&trie)?; - let deserialized_trie = bincode::deserialize(&bincode_bytes)?; - assert_eq!(trie, deserialized_trie) - } - - #[test] - fn bincode_roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) { - let bincode_bytes = bincode::serialize(&pointer_block)?; - let deserialized_pointer_block = bincode::deserialize(&bincode_bytes)?; - assert_eq!(pointer_block, deserialized_pointer_block) - } - - #[test] - fn bincode_roundtrip_key(key in key_arb()) { - let bincode_bytes = bincode::serialize(&key)?; - let deserialized_key = bincode::deserialize(&bincode_bytes)?; - assert_eq!(key, deserialized_key) - } - - #[test] - fn serde_roundtrip_key(key in key_arb()) { - let json_str = serde_json::to_string(&key)?; - let deserialized_key = serde_json::from_str(&json_str)?; - assert_eq!(key, deserialized_key) - } - } -} diff --git a/execution_engine/src/storage/trie_store/in_memory.rs b/execution_engine/src/storage/trie_store/in_memory.rs deleted file mode 100644 index cabf2d5f48..0000000000 --- a/execution_engine/src/storage/trie_store/in_memory.rs +++ /dev/null @@ -1,129 +0,0 @@ -//! An in-memory trie store, intended to be used for testing. -//! -//! # Usage -//! -//! ``` -//! use casper_execution_engine::storage::store::Store; -//! use casper_execution_engine::storage::transaction_source::{Transaction, TransactionSource}; -//! use casper_execution_engine::storage::transaction_source::in_memory::InMemoryEnvironment; -//! use casper_execution_engine::storage::trie::{Pointer, PointerBlock, Trie}; -//! use casper_execution_engine::storage::trie_store::in_memory::InMemoryTrieStore; -//! use casper_execution_engine::shared::newtypes::Blake2bHash; -//! use casper_types::bytesrepr::{ToBytes, Bytes}; -//! -//! // Create some leaves -//! let leaf_1 = Trie::Leaf { key: Bytes::from(vec![0u8, 0, 0]), value: Bytes::from(b"val_1".to_vec()) }; -//! let leaf_2 = Trie::Leaf { key: Bytes::from(vec![1u8, 0, 0]), value: Bytes::from(b"val_2".to_vec()) }; -//! -//! // Get their hashes -//! let leaf_1_hash = Blake2bHash::new(&leaf_1.to_bytes().unwrap()); -//! let leaf_2_hash = Blake2bHash::new(&leaf_2.to_bytes().unwrap()); -//! -//! // Create a node -//! let node: Trie = { -//! let mut pointer_block = PointerBlock::new(); -//! pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash)); -//! pointer_block[1] = Some(Pointer::LeafPointer(leaf_2_hash)); -//! let pointer_block = Box::new(pointer_block); -//! Trie::Node { pointer_block } -//! }; -//! -//! // Get its hash -//! let node_hash = Blake2bHash::new(&node.to_bytes().unwrap()); -//! -//! // Create the environment and the store. For both the in-memory and -//! // LMDB-backed implementations, the environment is the source of -//! // transactions. -//! let env = InMemoryEnvironment::new(); -//! let store = InMemoryTrieStore::new(&env, None); -//! -//! // First let's create a read-write transaction, persist the values, but -//! // forget to commit the transaction. -//! { -//! // Create a read-write transaction -//! let mut txn = env.create_read_write_txn().unwrap(); -//! -//! // Put the values in the store -//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); -//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap(); -//! store.put(&mut txn, &node_hash, &node).unwrap(); -//! -//! // Here we forget to commit the transaction before it goes out of scope -//! } -//! -//! // Now let's check to see if the values were stored -//! { -//! // Create a read transaction -//! let txn = env.create_read_txn().unwrap(); -//! -//! // Observe that nothing has been persisted to the store -//! for hash in vec![&leaf_1_hash, &leaf_2_hash, &node_hash].iter() { -//! // We need to use a type annotation here to help the compiler choose -//! // a suitable FromBytes instance -//! let maybe_trie: Option> = store.get(&txn, hash).unwrap(); -//! assert!(maybe_trie.is_none()); -//! } -//! -//! // Commit the read transaction. Not strictly necessary, but better to be hygienic. -//! txn.commit().unwrap(); -//! } -//! -//! // Now let's try that again, remembering to commit the transaction this time -//! { -//! // Create a read-write transaction -//! let mut txn = env.create_read_write_txn().unwrap(); -//! -//! // Put the values in the store -//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); -//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap(); -//! store.put(&mut txn, &node_hash, &node).unwrap(); -//! -//! // Commit the transaction. -//! txn.commit().unwrap(); -//! } -//! -//! // Now let's check to see if the values were stored again -//! { -//! // Create a read transaction -//! let txn = env.create_read_txn().unwrap(); -//! -//! // Get the values in the store -//! assert_eq!(Some(leaf_1), store.get(&txn, &leaf_1_hash).unwrap()); -//! assert_eq!(Some(leaf_2), store.get(&txn, &leaf_2_hash).unwrap()); -//! assert_eq!(Some(node), store.get(&txn, &node_hash).unwrap()); -//! -//! // Commit the read transaction. -//! txn.commit().unwrap(); -//! } -//! ``` - -use super::{Blake2bHash, Store, Trie, TrieStore, NAME}; -use crate::storage::{error::in_memory::Error, transaction_source::in_memory::InMemoryEnvironment}; - -/// An in-memory trie store. -pub struct InMemoryTrieStore { - maybe_name: Option, -} - -impl InMemoryTrieStore { - pub fn new(_env: &InMemoryEnvironment, maybe_name: Option<&str>) -> Self { - let name = maybe_name - .map(|name| format!("{}-{}", NAME, name)) - .unwrap_or_else(|| String::from(NAME)); - InMemoryTrieStore { - maybe_name: Some(name), - } - } -} - -impl Store> for InMemoryTrieStore { - type Error = Error; - - type Handle = Option; - - fn handle(&self) -> Self::Handle { - self.maybe_name.to_owned() - } -} - -impl TrieStore for InMemoryTrieStore {} diff --git a/execution_engine/src/storage/trie_store/lmdb.rs b/execution_engine/src/storage/trie_store/lmdb.rs deleted file mode 100644 index 99f33fa3f9..0000000000 --- a/execution_engine/src/storage/trie_store/lmdb.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! An LMDB-backed trie store. -//! -//! # Usage -//! -//! ``` -//! use casper_execution_engine::storage::store::Store; -//! use casper_execution_engine::storage::transaction_source::{Transaction, TransactionSource}; -//! use casper_execution_engine::storage::transaction_source::lmdb::LmdbEnvironment; -//! use casper_execution_engine::storage::trie::{Pointer, PointerBlock, Trie}; -//! use casper_execution_engine::storage::trie_store::lmdb::LmdbTrieStore; -//! use casper_execution_engine::shared::newtypes::Blake2bHash; -//! use casper_types::bytesrepr::{ToBytes, Bytes}; -//! use lmdb::DatabaseFlags; -//! use tempfile::tempdir; -//! -//! // Create some leaves -//! let leaf_1 = Trie::Leaf { key: Bytes::from(vec![0u8, 0, 0]), value: Bytes::from(b"val_1".to_vec()) }; -//! let leaf_2 = Trie::Leaf { key: Bytes::from(vec![1u8, 0, 0]), value: Bytes::from(b"val_2".to_vec()) }; -//! -//! // Get their hashes -//! let leaf_1_hash = Blake2bHash::new(&leaf_1.to_bytes().unwrap()); -//! let leaf_2_hash = Blake2bHash::new(&leaf_2.to_bytes().unwrap()); -//! -//! // Create a node -//! let node: Trie = { -//! let mut pointer_block = PointerBlock::new(); -//! pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash)); -//! pointer_block[1] = Some(Pointer::LeafPointer(leaf_2_hash)); -//! let pointer_block = Box::new(pointer_block); -//! Trie::Node { pointer_block } -//! }; -//! -//! // Get its hash -//! let node_hash = Blake2bHash::new(&node.to_bytes().unwrap()); -//! -//! // Create the environment and the store. For both the in-memory and -//! // LMDB-backed implementations, the environment is the source of -//! // transactions. -//! let tmp_dir = tempdir().unwrap(); -//! let map_size = 4096 * 2560; // map size should be a multiple of OS page size -//! let max_readers = 512; -//! let env = LmdbEnvironment::new(&tmp_dir.path().to_path_buf(), map_size, max_readers).unwrap(); -//! let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); -//! -//! // First let's create a read-write transaction, persist the values, but -//! // forget to commit the transaction. -//! { -//! // Create a read-write transaction -//! let mut txn = env.create_read_write_txn().unwrap(); -//! -//! // Put the values in the store -//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); -//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap(); -//! store.put(&mut txn, &node_hash, &node).unwrap(); -//! -//! // Here we forget to commit the transaction before it goes out of scope -//! } -//! -//! // Now let's check to see if the values were stored -//! { -//! // Create a read transaction -//! let txn = env.create_read_txn().unwrap(); -//! -//! // Observe that nothing has been persisted to the store -//! for hash in vec![&leaf_1_hash, &leaf_2_hash, &node_hash].iter() { -//! // We need to use a type annotation here to help the compiler choose -//! // a suitable FromBytes instance -//! let maybe_trie: Option> = store.get(&txn, hash).unwrap(); -//! assert!(maybe_trie.is_none()); -//! } -//! -//! // Commit the read transaction. Not strictly necessary, but better to be hygienic. -//! txn.commit().unwrap(); -//! } -//! -//! // Now let's try that again, remembering to commit the transaction this time -//! { -//! // Create a read-write transaction -//! let mut txn = env.create_read_write_txn().unwrap(); -//! -//! // Put the values in the store -//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); -//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap(); -//! store.put(&mut txn, &node_hash, &node).unwrap(); -//! -//! // Commit the transaction. -//! txn.commit().unwrap(); -//! } -//! -//! // Now let's check to see if the values were stored again -//! { -//! // Create a read transaction -//! let txn = env.create_read_txn().unwrap(); -//! -//! // Get the values in the store -//! assert_eq!(Some(leaf_1), store.get(&txn, &leaf_1_hash).unwrap()); -//! assert_eq!(Some(leaf_2), store.get(&txn, &leaf_2_hash).unwrap()); -//! assert_eq!(Some(node), store.get(&txn, &node_hash).unwrap()); -//! -//! // Commit the read transaction. -//! txn.commit().unwrap(); -//! } -//! -//! tmp_dir.close().unwrap(); -//! ``` - -use lmdb::{Database, DatabaseFlags}; - -use crate::shared::newtypes::Blake2bHash; - -use crate::storage::{ - error, - store::Store, - transaction_source::lmdb::LmdbEnvironment, - trie::Trie, - trie_store::{self, TrieStore}, -}; - -/// An LMDB-backed trie store. -/// -/// Wraps [`lmdb::Database`]. -#[derive(Debug, Clone)] -pub struct LmdbTrieStore { - db: Database, -} - -impl LmdbTrieStore { - pub fn new( - env: &LmdbEnvironment, - maybe_name: Option<&str>, - flags: DatabaseFlags, - ) -> Result { - let name = Self::name(maybe_name); - let db = env.env().create_db(Some(&name), flags)?; - Ok(LmdbTrieStore { db }) - } - - pub fn open(env: &LmdbEnvironment, maybe_name: Option<&str>) -> Result { - let name = Self::name(maybe_name); - let db = env.env().open_db(Some(&name))?; - Ok(LmdbTrieStore { db }) - } - - fn name(maybe_name: Option<&str>) -> String { - maybe_name - .map(|name| format!("{}-{}", trie_store::NAME, name)) - .unwrap_or_else(|| String::from(trie_store::NAME)) - } -} - -impl Store> for LmdbTrieStore { - type Error = error::Error; - - type Handle = Database; - - fn handle(&self) -> Self::Handle { - self.db - } -} - -impl TrieStore for LmdbTrieStore {} diff --git a/execution_engine/src/storage/trie_store/mod.rs b/execution_engine/src/storage/trie_store/mod.rs deleted file mode 100644 index 74a2501010..0000000000 --- a/execution_engine/src/storage/trie_store/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! A store for persisting `Trie` values at their hashes. -//! -//! See the [in_memory](in_memory/index.html#usage) and -//! [lmdb](lmdb/index.html#usage) modules for usage examples. -pub mod in_memory; -pub mod lmdb; -pub(crate) mod operations; -#[cfg(test)] -mod tests; - -use crate::shared::newtypes::Blake2bHash; - -use crate::storage::{store::Store, trie::Trie}; - -const NAME: &str = "TRIE_STORE"; - -/// An entity which persists [`Trie`] values at their hashes. -pub trait TrieStore: Store> {} diff --git a/execution_engine/src/storage/trie_store/operations/mod.rs b/execution_engine/src/storage/trie_store/operations/mod.rs deleted file mode 100644 index c7e67553fc..0000000000 --- a/execution_engine/src/storage/trie_store/operations/mod.rs +++ /dev/null @@ -1,1259 +0,0 @@ -#[cfg(test)] -mod tests; - -use std::{ - cmp, - collections::{HashSet, VecDeque}, - convert::TryInto, - mem, -}; - -use tracing::warn; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use crate::{ - shared::newtypes::{Blake2bHash, CorrelationId}, - storage::{ - transaction_source::{Readable, Writable}, - trie::{ - merkle_proof::{TrieMerkleProof, TrieMerkleProofStep}, - Parents, Pointer, PointerBlock, Trie, RADIX, USIZE_EXCEEDS_U8, - }, - trie_store::TrieStore, - }, -}; - -#[derive(Debug, PartialEq, Eq)] -pub enum ReadResult { - Found(V), - NotFound, - RootNotFound, -} - -impl ReadResult { - #[cfg(test)] - pub fn is_found(&self) -> bool { - matches!(self, ReadResult::Found(_)) - } -} - -/// Returns a value from the corresponding key at a given root in a given store -pub fn read( - _correlation_id: CorrelationId, - txn: &T, - store: &S, - root: &Blake2bHash, - key: &K, -) -> Result, E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug, - V: ToBytes + FromBytes, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let path: Vec = key.to_bytes()?; - - let mut depth: usize = 0; - let mut current: Trie = match store.get(txn, root)? { - Some(root) => root, - None => return Ok(ReadResult::RootNotFound), - }; - - loop { - match current { - Trie::Leaf { - key: leaf_key, - value: leaf_value, - } => { - let result = if *key == leaf_key { - ReadResult::Found(leaf_value) - } else { - // Keys may not match in the case of a compressed path from - // a Node directly to a Leaf - ReadResult::NotFound - }; - return Ok(result); - } - Trie::Node { pointer_block } => { - let index: usize = { - assert!(depth < path.len(), "depth must be < {}", path.len()); - path[depth].into() - }; - let maybe_pointer: Option = { - assert!(index < RADIX, "key length must be < {}", RADIX); - pointer_block[index] - }; - match maybe_pointer { - Some(pointer) => match store.get(txn, pointer.hash())? { - Some(next) => { - depth += 1; - current = next; - } - None => { - panic!( - "No trie value at key: {:?} (reading from key: {:?})", - pointer.hash(), - key - ); - } - }, - None => { - return Ok(ReadResult::NotFound); - } - } - } - Trie::Extension { affix, pointer } => { - let sub_path = &path[depth..depth + affix.len()]; - if sub_path == affix.as_slice() { - match store.get(txn, pointer.hash())? { - Some(next) => { - depth += affix.len(); - current = next; - } - None => { - panic!( - "No trie value at key: {:?} (reading from key: {:?})", - pointer.hash(), - key - ); - } - } - } else { - return Ok(ReadResult::NotFound); - } - } - } - } -} - -/// Same as [`read`], except that a [`TrieMerkleProof`] is generated and returned along with the key -/// and the value given the root and store. -pub fn read_with_proof( - _correlation_id: CorrelationId, - txn: &T, - store: &S, - root: &Blake2bHash, - key: &K, -) -> Result>, E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug, - V: ToBytes + FromBytes, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let mut proof_steps = VecDeque::new(); - let path: Vec = key.to_bytes()?; - - let mut depth: usize = 0; - let mut current: Trie = match store.get(txn, root)? { - Some(root) => root, - None => return Ok(ReadResult::RootNotFound), - }; - loop { - match current { - Trie::Leaf { - key: leaf_key, - value, - } => { - if *key != leaf_key { - return Ok(ReadResult::NotFound); - } - let key = leaf_key; - return Ok(ReadResult::Found(TrieMerkleProof::new( - key, - value, - proof_steps, - ))); - } - Trie::Node { pointer_block } => { - let hole_index: usize = { - assert!(depth < path.len(), "depth must be < {}", path.len()); - path[depth].into() - }; - let pointer: Pointer = { - assert!(hole_index < RADIX, "key length must be < {}", RADIX); - match pointer_block[hole_index] { - Some(pointer) => pointer, - None => return Ok(ReadResult::NotFound), - } - }; - let indexed_pointers_with_hole = pointer_block - .to_indexed_pointers() - .filter(|(index, _)| *index as usize != hole_index) - .collect(); - let next = match store.get(txn, pointer.hash())? { - Some(next) => next, - None => { - panic!( - "No trie value at key: {:?} (reading from key: {:?})", - pointer.hash(), - key - ); - } - }; - depth += 1; - current = next; - let hole_index: u8 = hole_index.try_into().expect(USIZE_EXCEEDS_U8); - proof_steps.push_front(TrieMerkleProofStep::node( - hole_index, - indexed_pointers_with_hole, - )); - } - Trie::Extension { affix, pointer } => { - let sub_path = &path[depth..depth + affix.len()]; - if sub_path != affix.as_slice() { - return Ok(ReadResult::NotFound); - }; - - let next = match store.get(txn, pointer.hash())? { - Some(next) => next, - None => { - panic!( - "No trie value at key: {:?} (reading from key: {:?})", - pointer.hash(), - key - ); - } - }; - depth += affix.len(); - current = next; - proof_steps.push_front(TrieMerkleProofStep::extension(affix.into())); - } - } - } -} - -/// Given a root hash, find any try keys that are descendant from it that are: -/// 1. referenced but not present in the database -/// 2. referenced and present but whose values' hashes do not equal their keys (ie, corrupted) -// TODO: We only need to check one trie key at a time -pub fn missing_trie_keys( - _correlation_id: CorrelationId, - txn: &T, - store: &S, - mut trie_keys_to_visit: Vec, -) -> Result, E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + std::fmt::Debug, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let mut missing_descendants = Vec::new(); - let mut visited = HashSet::new(); - while let Some(trie_key) = trie_keys_to_visit.pop() { - if !visited.insert(trie_key) { - continue; - } - let maybe_retrieved_trie: Option> = store.get(txn, &trie_key)?; - if let Some(trie_value) = &maybe_retrieved_trie { - let hash_of_trie_value = { - let node_bytes = trie_value.to_bytes()?; - Blake2bHash::new(&node_bytes) - }; - if trie_key != hash_of_trie_value { - warn!( - "Trie key {:?} has corrupted value {:?} (hash of value is {:?}); \ - adding to list of missing nodes", - trie_key, trie_value, hash_of_trie_value - ); - missing_descendants.push(trie_key); - continue; - } - } - match maybe_retrieved_trie { - // If we can't find the trie_key; it is missing and we'll return it - None => { - missing_descendants.push(trie_key); - } - // If we could retrieve the node and it is a leaf, the search can move on - Some(Trie::Leaf { .. }) => (), - // If we hit a pointer block, queue up all of the nodes it points to - Some(Trie::Node { pointer_block }) => { - for (_, pointer) in pointer_block.to_indexed_pointers() { - match pointer { - Pointer::LeafPointer(descendant_leaf_trie_key) => { - trie_keys_to_visit.push(descendant_leaf_trie_key) - } - Pointer::NodePointer(descendant_node_trie_key) => { - trie_keys_to_visit.push(descendant_node_trie_key) - } - } - } - } - // If we hit an extension block, add its pointer to the queue - Some(Trie::Extension { pointer, .. }) => trie_keys_to_visit.push(pointer.into_hash()), - } - } - Ok(missing_descendants) -} - -#[cfg(test)] -pub fn check_integrity( - _correlation_id: CorrelationId, - txn: &T, - store: &S, - trie_keys_to_visit: Vec, -) -> Result<(), E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + std::fmt::Debug, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - for state_root in &trie_keys_to_visit { - match store.get(txn, &state_root)? { - Some(Trie::Node { .. }) => {} - _ => panic!("Should have a pointer block node as state root"), - } - } - let mut trie_keys_to_visit: Vec<(Vec, Blake2bHash)> = trie_keys_to_visit - .into_iter() - .map(|blake2b_hash| (Vec::new(), blake2b_hash)) - .collect(); - let mut visited = HashSet::new(); - while let Some((mut path, trie_key)) = trie_keys_to_visit.pop() { - if !visited.insert(trie_key) { - continue; - } - let maybe_retrieved_trie: Option> = store.get(txn, &trie_key)?; - if let Some(trie_value) = &maybe_retrieved_trie { - let hash_of_trie_value = { - let node_bytes = trie_value.to_bytes()?; - Blake2bHash::new(&node_bytes) - }; - if trie_key != hash_of_trie_value { - panic!( - "Trie key {:?} has corrupted value {:?} (hash of value is {:?})", - trie_key, trie_value, hash_of_trie_value - ); - } - } - match maybe_retrieved_trie { - // If we can't find the trie_key; it is missing and we'll return it - None => { - panic!("Missing trie key: {:?}", trie_key) - } - // If we could retrieve the node and it is a leaf, the search can move on - Some(Trie::Leaf { key, .. }) => { - let key_bytes = key.to_bytes()?; - if !key_bytes.starts_with(&path) { - panic!( - "Trie key {:?} belongs to a leaf with a corrupted affix. Key bytes: {:?}, Path: {:?}.", - trie_key, key_bytes, path - ); - } - } - // If we hit a pointer block, queue up all of the nodes it points to - Some(Trie::Node { pointer_block }) => { - for (byte, pointer) in pointer_block.to_indexed_pointers() { - let mut new_path = path.clone(); - new_path.push(byte); - match pointer { - Pointer::LeafPointer(descendant_leaf_trie_key) => { - trie_keys_to_visit.push((new_path, descendant_leaf_trie_key)) - } - Pointer::NodePointer(descendant_node_trie_key) => { - trie_keys_to_visit.push((new_path, descendant_node_trie_key)) - } - } - } - } - // If we hit an extension block, add its pointer to the queue - Some(Trie::Extension { pointer, affix }) => { - path.extend_from_slice(affix.as_slice()); - trie_keys_to_visit.push((path, pointer.into_hash())) - } - } - } - Ok(()) -} - -struct TrieScan { - tip: Trie, - parents: Parents, -} - -impl TrieScan { - fn new(tip: Trie, parents: Parents) -> Self { - TrieScan { tip, parents } - } -} - -/// Returns a [`TrieScan`] from the given key at a given root in a given store. -/// A scan consists of the deepest trie variant found at that key, a.k.a. the -/// "tip", along the with the parents of that variant. Parents are ordered by -/// their depth from the root (shallow to deep). -fn scan( - _correlation_id: CorrelationId, - txn: &T, - store: &S, - key_bytes: &[u8], - root: &Trie, -) -> Result, E> -where - K: ToBytes + FromBytes + Clone, - V: ToBytes + FromBytes + Clone, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let path = key_bytes; - - let mut current = root.to_owned(); - let mut depth: usize = 0; - let mut acc: Parents = Vec::new(); - - loop { - match current { - leaf @ Trie::Leaf { .. } => { - return Ok(TrieScan::new(leaf, acc)); - } - Trie::Node { pointer_block } => { - let index = { - assert!(depth < path.len(), "depth must be < {}", path.len()); - path[depth] - }; - let maybe_pointer: Option = { - let index: usize = index.into(); - assert!(index < RADIX, "index must be < {}", RADIX); - pointer_block[index] - }; - let pointer = match maybe_pointer { - Some(pointer) => pointer, - None => { - return Ok(TrieScan::new(Trie::Node { pointer_block }, acc)); - } - }; - match store.get(txn, pointer.hash())? { - Some(next) => { - current = next; - depth += 1; - acc.push((index, Trie::Node { pointer_block })) - } - None => { - panic!( - "No trie value at key: {:?} (reading from path: {:?})", - pointer.hash(), - path - ); - } - } - } - Trie::Extension { affix, pointer } => { - let sub_path = &path[depth..depth + affix.len()]; - if sub_path != affix.as_slice() { - return Ok(TrieScan::new(Trie::Extension { affix, pointer }, acc)); - } - match store.get(txn, pointer.hash())? { - Some(next) => { - let index = { - assert!(depth < path.len(), "depth must be < {}", path.len()); - path[depth] - }; - current = next; - depth += affix.len(); - acc.push((index, Trie::Extension { affix, pointer })) - } - None => { - panic!( - "No trie value at key: {:?} (reading from path: {:?})", - pointer.hash(), - path - ); - } - } - } - } - } -} - -#[derive(Debug, PartialEq, Eq)] -pub enum DeleteResult { - Deleted(Blake2bHash), - DoesNotExist, - RootNotFound, -} - -#[allow(unused)] -fn delete( - correlation_id: CorrelationId, - txn: &mut T, - store: &S, - root: &Blake2bHash, - key_to_delete: &K, -) -> Result -where - K: ToBytes + FromBytes + Clone + PartialEq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone, - T: Readable + Writable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let root_trie = match store.get(txn, root)? { - None => return Ok(DeleteResult::RootNotFound), - Some(root_trie) => root_trie, - }; - - let key_bytes = key_to_delete.to_bytes()?; - let TrieScan { tip, mut parents } = - scan::<_, _, _, _, E>(correlation_id, txn, store, &key_bytes, &root_trie)?; - - // Check that tip is a leaf - match tip { - Trie::Leaf { key, .. } if key == *key_to_delete => {} - _ => return Ok(DeleteResult::DoesNotExist), - } - - let mut new_elements: Vec<(Blake2bHash, Trie)> = Vec::new(); - - while let Some((idx, parent)) = parents.pop() { - match (new_elements.last_mut(), parent) { - (_, Trie::Leaf { .. }) => panic!("Should not find leaf"), - (None, Trie::Extension { .. }) => panic!("Extension node should never end in leaf"), - (Some((_, Trie::Leaf { .. })), _) => panic!("New elements should never contain a leaf"), - // The parent is the node which pointed to the leaf we deleted, and that leaf had - // multiple siblings. - (None, Trie::Node { mut pointer_block }) if pointer_block.child_count() > 2 => { - let trie_node: Trie = { - pointer_block[idx as usize] = None; - Trie::Node { pointer_block } - }; - let trie_key = Blake2bHash::new(&trie_node.to_bytes()?); - new_elements.push((trie_key, trie_node)) - } - // The parent is the node which pointed to the leaf we deleted, and that leaf had one or - // zero siblings. - (None, Trie::Node { mut pointer_block }) => { - let (sibling_idx, sibling_pointer) = match pointer_block - .to_indexed_pointers() - .find(|(jdx, _)| idx != *jdx) - { - // There are zero siblings. Elsewhere we maintain the invariant that only the - // root node can contain a single leaf. Therefore the parent is the root node. - // The resulting output is just the empty node and nothing else. - None => { - let trie_node = Trie::Node { - pointer_block: Box::new(PointerBlock::new()), - }; - let trie_key = Blake2bHash::new(&trie_node.to_bytes()?); - new_elements.push((trie_key, trie_node)); - break; - } - Some((sibling_idx, pointer)) => (sibling_idx, pointer), - }; - // There is one sibling. - match (sibling_pointer, parents.pop()) { - (_, Some((_, Trie::Leaf { .. }))) => panic!("Should not have leaf in scan"), - // There is no grandparent. Therefore the parent is the root node. Output the - // root node with the index zeroed out. - (_, None) => { - pointer_block[idx as usize] = None; - let trie_node = Trie::Node { pointer_block }; - let trie_key = Blake2bHash::new(&trie_node.to_bytes()?); - new_elements.push((trie_key, trie_node)); - break; - } - // The sibling is a leaf and the grandparent is a node. Reseat the single leaf - // sibling into the grandparent. - (Pointer::LeafPointer(..), Some((idx, Trie::Node { mut pointer_block }))) => { - pointer_block[idx as usize] = Some(sibling_pointer); - let trie_node = Trie::Node { pointer_block }; - let trie_key = Blake2bHash::new(&trie_node.to_bytes()?); - new_elements.push((trie_key, trie_node)) - } - // The sibling is a leaf and the grandparent is an extension. - (Pointer::LeafPointer(..), Some((_, Trie::Extension { .. }))) => { - match parents.pop() { - None => panic!("Root node cannot be an extension node"), - Some((_, Trie::Leaf { .. })) => panic!("Should not find leaf"), - Some((_, Trie::Extension { .. })) => { - panic!("Extension cannot extend to an extension") - } - // The great-grandparent is a node. Reseat the single leaf sibling into - // the position the grandparent was in. - Some((idx, Trie::Node { mut pointer_block })) => { - pointer_block[idx as usize] = Some(sibling_pointer); - let trie_node = Trie::Node { pointer_block }; - let trie_key = Blake2bHash::new(&trie_node.to_bytes()?); - new_elements.push((trie_key, trie_node)) - } - } - } - // The single sibling is a node or an extension, and a grandparent exists. - // Therefore the parent is not the root - (Pointer::NodePointer(sibling_trie_key), Some((idx, grandparent))) => { - // Push the grandparent back onto the parents so it may be processed later. - parents.push((idx, grandparent)); - // Elsewhere we maintain the invariant that all trie keys have corresponding - // trie values. - let sibling_trie = store - .get(txn, &sibling_trie_key)? - .expect("should have sibling"); - match sibling_trie { - Trie::Leaf { .. } => { - panic!("Node pointer should not point to leaf") - } - // The single sibling is a node, and there exists a grandparent. - // Therefore the parent is not the root. We output an extension to - // replace the parent, with a single byte corresponding to the sibling - // index. In the next loop iteration, we will handle the case where - // this extension might need to be combined with a grandparent - // extension. - Trie::Node { .. } => { - let new_extension: Trie = Trie::Extension { - affix: vec![sibling_idx].into(), - pointer: sibling_pointer, - }; - let trie_key = Blake2bHash::new(&new_extension.to_bytes()?); - new_elements.push((trie_key, new_extension)) - } - // The single sibling is a extension. We output an extension to replace - // the parent, prepending the sibling index to the sibling's affix. In - // the next loop iteration, we will handle the case where this extension - // might need to be combined with a grandparent extension. - Trie::Extension { - affix: extension_affix, - pointer, - } => { - let mut new_affix = vec![sibling_idx]; - new_affix.extend(Vec::::from(extension_affix)); - let new_extension: Trie = Trie::Extension { - affix: new_affix.into(), - pointer, - }; - let trie_key = Blake2bHash::new(&new_extension.to_bytes()?); - new_elements.push((trie_key, new_extension)) - } - } - } - } - } - // The parent is a pointer block, and we are propagating a node or extension upwards. - // It is impossible to propagate a leaf upwards. Reseat the thing we are propagating - // into the parent. - (Some((trie_key, _)), Trie::Node { mut pointer_block }) => { - let trie_node: Trie = { - pointer_block[idx as usize] = Some(Pointer::NodePointer(*trie_key)); - Trie::Node { pointer_block } - }; - let trie_key = Blake2bHash::new(&trie_node.to_bytes()?); - new_elements.push((trie_key, trie_node)) - } - // The parent is an extension, and we are outputting an extension. Prepend the parent - // affix to affix of the output extension, mutating the output in place. This is the - // only mutate-in-place. - ( - Some(( - trie_key, - Trie::Extension { - affix: child_affix, - pointer, - }, - )), - Trie::Extension { affix, .. }, - ) => { - let mut new_affix: Vec = affix.into(); - new_affix.extend_from_slice(child_affix.as_slice()); - *child_affix = new_affix.into(); - *trie_key = { - let new_extension: Trie = Trie::Extension { - affix: child_affix.to_owned(), - pointer: pointer.to_owned(), - }; - Blake2bHash::new(&new_extension.to_bytes()?) - } - } - // The parent is an extension and the new element is a pointer block. The next element - // we add will be an extension to the pointer block we are going to add. - (Some((trie_key, Trie::Node { .. })), Trie::Extension { affix, .. }) => { - let pointer = Pointer::NodePointer(*trie_key); - let trie_extension = Trie::Extension { affix, pointer }; - let trie_key = Blake2bHash::new(&trie_extension.to_bytes()?); - new_elements.push((trie_key, trie_extension)) - } - } - } - for (hash, element) in new_elements.iter() { - store.put(txn, hash, element)?; - } - // The hash of the final trie in the new elements is the new root - let new_root = new_elements - .pop() - .map(|(hash, _)| hash) - .unwrap_or_else(|| root.to_owned()); - Ok(DeleteResult::Deleted(new_root)) -} - -#[allow(clippy::type_complexity)] -fn rehash( - mut tip: Trie, - parents: Parents, -) -> Result)>, bytesrepr::Error> -where - K: ToBytes + Clone, - V: ToBytes + Clone, -{ - let mut ret: Vec<(Blake2bHash, Trie)> = Vec::new(); - let mut tip_hash = { - let node_bytes = tip.to_bytes()?; - Blake2bHash::new(&node_bytes) - }; - ret.push((tip_hash, tip.to_owned())); - - for (index, parent) in parents.into_iter().rev() { - match parent { - Trie::Leaf { .. } => { - panic!("parents should not contain any leaves"); - } - Trie::Node { mut pointer_block } => { - tip = { - let pointer = match tip { - Trie::Leaf { .. } => Pointer::LeafPointer(tip_hash), - Trie::Node { .. } => Pointer::NodePointer(tip_hash), - Trie::Extension { .. } => Pointer::NodePointer(tip_hash), - }; - pointer_block[index.into()] = Some(pointer); - Trie::Node { pointer_block } - }; - tip_hash = { - let node_bytes = tip.to_bytes()?; - Blake2bHash::new(&node_bytes) - }; - ret.push((tip_hash, tip.to_owned())) - } - Trie::Extension { affix, pointer } => { - tip = { - let pointer = pointer.update(tip_hash); - Trie::Extension { affix, pointer } - }; - tip_hash = { - let extension_bytes = tip.to_bytes()?; - Blake2bHash::new(&extension_bytes) - }; - ret.push((tip_hash, tip.to_owned())) - } - } - } - Ok(ret) -} - -fn common_prefix(ls: &[A], rs: &[A]) -> Vec { - ls.iter() - .zip(rs.iter()) - .take_while(|(l, r)| l == r) - .map(|(l, _)| l.to_owned()) - .collect() -} - -fn get_parents_path(parents: &[(u8, Trie)]) -> Vec { - let mut ret = Vec::new(); - for (index, element) in parents.iter() { - if let Trie::Extension { affix, .. } = element { - ret.extend(affix); - } else { - ret.push(index.to_owned()); - } - } - ret -} - -/// Takes a path to a leaf, that leaf's parent node, and the parents of that -/// node, and adds the node to the parents. -/// -/// This function will panic if the the path to the leaf and the path to its -/// parent node do not share a common prefix. -fn add_node_to_parents( - path_to_leaf: &[u8], - new_parent_node: Trie, - mut parents: Parents, -) -> Parents -where - K: ToBytes, - V: ToBytes, -{ - // TODO: add is_node() method to Trie - match new_parent_node { - Trie::Node { .. } => (), - _ => panic!("new_parent must be a node"), - } - // The current depth will be the length of the path to the new parent node. - let depth: usize = { - // Get the path to this node - let path_to_node: Vec = get_parents_path(&parents); - // Check that the path to the node is a prefix of the current path - let current_path = common_prefix(&path_to_leaf, &path_to_node); - assert_eq!(current_path, path_to_node); - // Get the length - path_to_node.len() - }; - // Index path by current depth; - let index = { - assert!( - depth < path_to_leaf.len(), - "depth must be < {}", - path_to_leaf.len() - ); - path_to_leaf[depth] - }; - // Add node to parents, along with index to modify - parents.push((index, new_parent_node)); - parents -} - -/// Takes paths to a new leaf and an existing leaf that share a common prefix, -/// along with the parents of the existing leaf. Creates a new node (adding a -/// possible parent extension for it to parents) which contains the existing -/// leaf. Returns the new node and parents, so that they can be used by -/// [`add_node_to_parents`]. -#[allow(clippy::type_complexity)] -fn reparent_leaf( - new_leaf_path: &[u8], - existing_leaf_path: &[u8], - parents: Parents, -) -> Result<(Trie, Parents), bytesrepr::Error> -where - K: ToBytes, - V: ToBytes, -{ - let mut parents = parents; - let (child_index, parent) = parents.pop().expect("parents should not be empty"); - let pointer_block = match parent { - Trie::Node { pointer_block } => pointer_block, - _ => panic!("A leaf should have a node for its parent"), - }; - // Get the path that the new leaf and existing leaf share - let shared_path = common_prefix(&new_leaf_path, &existing_leaf_path); - // Assemble a new node to hold the existing leaf. The new leaf will - // be added later during the add_parent_node and rehash phase. - let new_node = { - let index = existing_leaf_path[shared_path.len()]; - let existing_leaf_pointer = - pointer_block[::from(child_index)].expect("parent has lost the existing leaf"); - Trie::node(&[(index, existing_leaf_pointer)]) - }; - // Re-add the parent node to parents - parents.push((child_index, Trie::Node { pointer_block })); - // Create an affix for a possible extension node - let affix = { - let parents_path = get_parents_path(&parents); - &shared_path[parents_path.len()..] - }; - // If the affix is non-empty, create an extension node and add it - // to parents. - if !affix.is_empty() { - let new_node_bytes = new_node.to_bytes()?; - let new_node_hash = Blake2bHash::new(&new_node_bytes); - let new_extension = Trie::extension(affix.to_vec(), Pointer::NodePointer(new_node_hash)); - parents.push((child_index, new_extension)); - } - Ok((new_node, parents)) -} - -struct SplitResult { - new_node: Trie, - parents: Parents, - maybe_hashed_child_extension: Option<(Blake2bHash, Trie)>, -} - -/// Takes a path to a new leaf, an existing extension that leaf collides with, -/// and the parents of that extension. Creates a new node and possible parent -/// and child extensions. The node pointer contained in the existing extension -/// is repositioned in the new node or the possible child extension. The -/// possible parent extension is added to parents. Returns the new node, -/// parents, and the the possible child extension (paired with its hash). -/// The new node and parents can be used by [`add_node_to_parents`], and the -/// new hashed child extension can be added to the list of new trie elements. -fn split_extension( - new_leaf_path: &[u8], - existing_extension: Trie, - mut parents: Parents, -) -> Result, bytesrepr::Error> -where - K: ToBytes + Clone, - V: ToBytes + Clone, -{ - // TODO: add is_extension() method to Trie - let (affix, pointer) = match existing_extension { - Trie::Extension { affix, pointer } => (affix, pointer), - _ => panic!("existing_extension must be an extension"), - }; - let parents_path = get_parents_path(&parents); - // Get the path to the existing extension node - let existing_extension_path: Vec = - parents_path.iter().chain(affix.iter()).cloned().collect(); - // Get the path that the new leaf and existing leaf share - let shared_path = common_prefix(&new_leaf_path, &existing_extension_path); - // Create an affix for a possible parent extension above the new - // node. - let parent_extension_affix = shared_path[parents_path.len()..].to_vec(); - // Create an affix for a possible child extension between the new - // node and the node that the existing extension pointed to. - let child_extension_affix = affix[parent_extension_affix.len() + 1..].to_vec(); - // Create a child extension (paired with its hash) if necessary - let maybe_hashed_child_extension: Option<(Blake2bHash, Trie)> = - if child_extension_affix.is_empty() { - None - } else { - let child_extension = Trie::extension(child_extension_affix.to_vec(), pointer); - let child_extension_bytes = child_extension.to_bytes()?; - let child_extension_hash = Blake2bHash::new(&child_extension_bytes); - Some((child_extension_hash, child_extension)) - }; - // Assemble a new node. - let new_node: Trie = { - let index = existing_extension_path[shared_path.len()]; - let pointer = maybe_hashed_child_extension - .to_owned() - .map_or(pointer, |(hash, _)| Pointer::NodePointer(hash)); - Trie::node(&[(index, pointer)]) - }; - // Create a parent extension if necessary - if !parent_extension_affix.is_empty() { - let new_node_bytes = new_node.to_bytes()?; - let new_node_hash = Blake2bHash::new(&new_node_bytes); - let parent_extension = Trie::extension( - parent_extension_affix.to_vec(), - Pointer::NodePointer(new_node_hash), - ); - parents.push((parent_extension_affix[0], parent_extension)); - } - Ok(SplitResult { - new_node, - parents, - maybe_hashed_child_extension, - }) -} - -#[derive(Debug, PartialEq, Eq)] -pub enum WriteResult { - Written(Blake2bHash), - AlreadyExists, - RootNotFound, -} - -pub fn write( - correlation_id: CorrelationId, - txn: &mut T, - store: &S, - root: &Blake2bHash, - key: &K, - value: &V, -) -> Result -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq, - T: Readable + Writable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - match store.get(txn, root)? { - None => Ok(WriteResult::RootNotFound), - Some(current_root) => { - let new_leaf = Trie::Leaf { - key: key.to_owned(), - value: value.to_owned(), - }; - let path: Vec = key.to_bytes()?; - let TrieScan { tip, parents } = - scan::(correlation_id, txn, store, &path, ¤t_root)?; - let new_elements: Vec<(Blake2bHash, Trie)> = match tip { - // If the "tip" is the same as the new leaf, then the leaf - // is already in the Trie. - Trie::Leaf { .. } if new_leaf == tip => Vec::new(), - // If the "tip" is an existing leaf with the same key as the - // new leaf, but the existing leaf and new leaf have different - // values, then we are in the situation where we are "updating" - // an existing leaf. - Trie::Leaf { - key: ref leaf_key, - value: ref leaf_value, - } if key == leaf_key && value != leaf_value => rehash(new_leaf, parents)?, - // If the "tip" is an existing leaf with a different key than - // the new leaf, then we are in a situation where the new leaf - // shares some common prefix with the existing leaf. - Trie::Leaf { - key: ref existing_leaf_key, - .. - } if key != existing_leaf_key => { - let existing_leaf_path = existing_leaf_key.to_bytes()?; - let (new_node, parents) = reparent_leaf(&path, &existing_leaf_path, parents)?; - let parents = add_node_to_parents(&path, new_node, parents); - rehash(new_leaf, parents)? - } - // This case is unreachable, but the compiler can't figure - // that out. - Trie::Leaf { .. } => unreachable!(), - // If the "tip" is an existing node, then we can add a pointer - // to the new leaf to the node's pointer block. - node @ Trie::Node { .. } => { - let parents = add_node_to_parents(&path, node, parents); - rehash(new_leaf, parents)? - } - // If the "tip" is an extension node, then we must modify or - // replace it, adding a node where necessary. - extension @ Trie::Extension { .. } => { - let SplitResult { - new_node, - parents, - maybe_hashed_child_extension, - } = split_extension(&path, extension, parents)?; - let parents = add_node_to_parents(&path, new_node, parents); - if let Some(hashed_extension) = maybe_hashed_child_extension { - let mut ret = vec![hashed_extension]; - ret.extend(rehash(new_leaf, parents)?); - ret - } else { - rehash(new_leaf, parents)? - } - } - }; - if new_elements.is_empty() { - return Ok(WriteResult::AlreadyExists); - } - let mut root_hash = root.to_owned(); - for (hash, element) in new_elements.iter() { - store.put(txn, hash, element)?; - root_hash = *hash; - } - Ok(WriteResult::Written(root_hash)) - } - } -} - -pub fn put_trie( - _correlation_id: CorrelationId, - txn: &mut T, - store: &S, - trie: &Trie, -) -> Result -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq, - T: Readable + Writable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let trie_hash = { - let node_bytes = trie.to_bytes()?; - Blake2bHash::new(&node_bytes) - }; - store.put(txn, &trie_hash, trie)?; - Ok(trie_hash) -} - -enum KeysIteratorState> { - /// Iterate normally - Ok, - /// Return the error and stop iterating - #[allow(dead_code)] // Return variant alone is used in testing. - ReturnError(S::Error), - /// Already failed, only return None - Failed, -} - -struct VisitedTrieNode { - trie: Trie, - maybe_index: Option, - path: Vec, -} - -pub struct KeysIterator<'a, 'b, K, V, T, S: TrieStore> { - initial_descend: VecDeque, - visited: Vec>, - store: &'a S, - txn: &'b T, - state: KeysIteratorState, -} - -impl<'a, 'b, K, V, T, S> Iterator for KeysIterator<'a, 'b, K, V, T, S> -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - T: Readable, - S: TrieStore, - S::Error: From + From, -{ - type Item = Result; - - fn next(&mut self) -> Option { - match mem::replace(&mut self.state, KeysIteratorState::Ok) { - KeysIteratorState::Ok => (), - KeysIteratorState::ReturnError(e) => { - self.state = KeysIteratorState::Failed; - return Some(Err(e)); - } - KeysIteratorState::Failed => { - return None; - } - } - while let Some(VisitedTrieNode { - trie, - maybe_index, - mut path, - }) = self.visited.pop() - { - let mut maybe_next_trie: Option> = None; - - match trie { - Trie::Leaf { key, .. } => { - let key_bytes = match key.to_bytes() { - Ok(bytes) => bytes, - Err(e) => { - self.state = KeysIteratorState::Failed; - return Some(Err(e.into())); - } - }; - debug_assert!(key_bytes.starts_with(&path)); - // only return the leaf if it matches the initial descend path - path.extend(&self.initial_descend); - if key_bytes.starts_with(&path) { - return Some(Ok(key)); - } - } - Trie::Node { ref pointer_block } => { - // if we are still initially descending (and initial_descend is not empty), take - // the first index we should descend to, otherwise take maybe_index from the - // visited stack - let mut index: usize = self - .initial_descend - .front() - .map(|i| *i as usize) - .or(maybe_index) - .unwrap_or_default(); - while index < RADIX { - if let Some(ref pointer) = pointer_block[index] { - maybe_next_trie = match self.store.get(self.txn, pointer.hash()) { - Ok(trie) => trie, - Err(e) => { - self.state = KeysIteratorState::Failed; - return Some(Err(e)); - } - }; - debug_assert!(maybe_next_trie.is_some()); - if self.initial_descend.pop_front().is_none() { - self.visited.push(VisitedTrieNode { - trie, - maybe_index: Some(index + 1), - path: path.clone(), - }); - } - path.push(index as u8); - break; - } - // only continue the loop if we are not initially descending; - // if we are descending and we land here, it means that there is no subtrie - // along the descend path and we will return no results - if !self.initial_descend.is_empty() { - break; - } - index += 1; - } - } - Trie::Extension { affix, pointer } => { - let descend_len = cmp::min(self.initial_descend.len(), affix.len()); - let check_prefix = self - .initial_descend - .drain(..descend_len) - .collect::>(); - // if we are initially descending, we only want to continue if the affix - // matches the descend path - // if we are not, the check_prefix will be empty, so we will enter the if - // anyway - if affix.starts_with(&check_prefix) { - maybe_next_trie = match self.store.get(self.txn, pointer.hash()) { - Ok(trie) => trie, - Err(e) => { - self.state = KeysIteratorState::Failed; - return Some(Err(e)); - } - }; - debug_assert!({ matches!(&maybe_next_trie, Some(Trie::Node { .. })) }); - path.extend(affix); - } - } - } - - if let Some(next_trie) = maybe_next_trie { - self.visited.push(VisitedTrieNode { - trie: next_trie, - maybe_index: None, - path, - }); - } - } - None - } -} - -/// Returns the iterator over the keys at a given root hash. -/// -/// The root should be the apex of the trie. -#[cfg(test)] -pub fn keys<'a, 'b, K, V, T, S>( - correlation_id: CorrelationId, - txn: &'b T, - store: &'a S, - root: &Blake2bHash, -) -> KeysIterator<'a, 'b, K, V, T, S> -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - T: Readable, - S: TrieStore, - S::Error: From, -{ - keys_with_prefix(correlation_id, txn, store, root, &[]) -} - -/// Returns the iterator over the keys in the subtrie matching `prefix`. -/// -/// The root should be the apex of the trie. -pub fn keys_with_prefix<'a, 'b, K, V, T, S>( - _correlation_id: CorrelationId, - txn: &'b T, - store: &'a S, - root: &Blake2bHash, - prefix: &[u8], -) -> KeysIterator<'a, 'b, K, V, T, S> -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - T: Readable, - S: TrieStore, - S::Error: From, -{ - let (visited, init_state): (Vec>, _) = match store.get(txn, root) { - Ok(None) => (vec![], KeysIteratorState::Ok), - Err(e) => (vec![], KeysIteratorState::ReturnError(e)), - Ok(Some(current_root)) => ( - vec![VisitedTrieNode { - trie: current_root, - maybe_index: None, - path: vec![], - }], - KeysIteratorState::Ok, - ), - }; - - KeysIterator { - initial_descend: prefix.iter().cloned().collect(), - visited, - store, - txn, - state: init_state, - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/delete.rs b/execution_engine/src/storage/trie_store/operations/tests/delete.rs deleted file mode 100644 index 9d178af71a..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/delete.rs +++ /dev/null @@ -1,455 +0,0 @@ -use super::*; -use crate::storage::{transaction_source::Writable, trie_store::operations::DeleteResult}; - -fn checked_delete( - correlation_id: CorrelationId, - txn: &mut T, - store: &S, - root: &Blake2bHash, - key_to_delete: &K, -) -> Result -where - K: ToBytes + FromBytes + Clone + std::fmt::Debug + Eq, - V: ToBytes + FromBytes + Clone + std::fmt::Debug, - T: Readable + Writable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let delete_result = - operations::delete::(correlation_id, txn, store, root, key_to_delete)?; - if let DeleteResult::Deleted(new_root) = delete_result { - operations::check_integrity::(correlation_id, txn, store, vec![new_root])?; - } - Ok(delete_result) -} - -mod partial_tries { - use super::*; - use crate::storage::trie_store::operations::DeleteResult; - - fn delete_from_partial_trie_had_expected_results<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root: &Blake2bHash, - key_to_delete: &K, - expected_root_after_delete: &Blake2bHash, - expected_tries_after_delete: &[HashedTrie], - ) -> Result<(), E> - where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut txn = environment.create_read_write_txn()?; - // The assert below only works with partial tries - assert_eq!(store.get(&txn, &expected_root_after_delete)?, None); - let root_after_delete = match checked_delete::( - correlation_id, - &mut txn, - store, - root, - key_to_delete, - )? { - DeleteResult::Deleted(root_after_delete) => root_after_delete, - DeleteResult::DoesNotExist => panic!("key did not exist"), - DeleteResult::RootNotFound => panic!("root should be found"), - }; - assert_eq!(root_after_delete, *expected_root_after_delete); - for HashedTrie { hash, trie } in expected_tries_after_delete { - assert_eq!(store.get(&txn, &hash)?, Some(trie.clone())); - } - Ok(()) - } - - #[test] - fn lmdb_delete_from_partial_trie_had_expected_results() { - for i in 0..TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i + 1]().unwrap(); - let (updated_root_hash, updated_tries) = TEST_TRIE_GENERATORS[i]().unwrap(); - let key_to_delete = &TEST_LEAVES[i]; - let context = LmdbTestContext::new(&initial_tries).unwrap(); - - delete_from_partial_trie_had_expected_results::( - correlation_id, - &context.environment, - &context.store, - &initial_root_hash, - key_to_delete.key().unwrap(), - &updated_root_hash, - updated_tries.as_slice(), - ) - .unwrap(); - } - } - - #[test] - fn in_memory_delete_from_partial_trie_had_expected_results() { - for i in 0..TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i + 1]().unwrap(); - let (updated_root_hash, updated_tries) = TEST_TRIE_GENERATORS[i]().unwrap(); - let key_to_delete = &TEST_LEAVES[i]; - let context = InMemoryTestContext::new(&initial_tries).unwrap(); - - delete_from_partial_trie_had_expected_results::( - correlation_id, - &context.environment, - &context.store, - &initial_root_hash, - key_to_delete.key().unwrap(), - &updated_root_hash, - updated_tries.as_slice(), - ) - .unwrap(); - } - } - - fn delete_non_existent_key_from_partial_trie_should_return_does_not_exist<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root: &Blake2bHash, - key_to_delete: &K, - ) -> Result<(), E> - where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut txn = environment.create_read_write_txn()?; - match checked_delete::(correlation_id, &mut txn, store, root, key_to_delete)? - { - DeleteResult::Deleted(_) => panic!("should not delete"), - DeleteResult::DoesNotExist => Ok(()), - DeleteResult::RootNotFound => panic!("root should be found"), - } - } - - #[test] - fn lmdb_delete_non_existent_key_from_partial_trie_should_return_does_not_exist() { - for i in 0..TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i]().unwrap(); - let key_to_delete = &TEST_LEAVES_ADJACENTS[i]; - let context = LmdbTestContext::new(&initial_tries).unwrap(); - - delete_non_existent_key_from_partial_trie_should_return_does_not_exist::< - TestKey, - TestValue, - _, - _, - error::Error, - >( - correlation_id, - &context.environment, - &context.store, - &initial_root_hash, - key_to_delete.key().unwrap(), - ) - .unwrap(); - } - } - - #[test] - fn in_memory_delete_non_existent_key_from_partial_trie_should_return_does_not_exist() { - for i in 0..TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i]().unwrap(); - let key_to_delete = &TEST_LEAVES_ADJACENTS[i]; - let context = InMemoryTestContext::new(&initial_tries).unwrap(); - - delete_non_existent_key_from_partial_trie_should_return_does_not_exist::< - TestKey, - TestValue, - _, - _, - error::Error, - >( - correlation_id, - &context.environment, - &context.store, - &initial_root_hash, - key_to_delete.key().unwrap(), - ) - .unwrap(); - } - } -} - -mod full_tries { - use std::ops::RangeInclusive; - - use proptest::{collection, proptest}; - - use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - gens::colliding_key_arb, - Key, - }; - - use crate::{ - shared::{ - newtypes::{Blake2bHash, CorrelationId}, - stored_value::{gens::stored_value_arb, StoredValue}, - }, - storage::{ - error, - transaction_source::TransactionSource, - trie_store::{ - operations::{ - delete, - tests::{ - InMemoryTestContext, LmdbTestContext, TestKey, TestValue, - TEST_TRIE_GENERATORS, - }, - write, DeleteResult, WriteResult, - }, - TrieStore, - }, - }, - }; - - fn serially_insert_and_delete<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root: &Blake2bHash, - pairs: &[(K, V)], - ) -> Result<(), E> - where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut txn = environment.create_read_write_txn()?; - let mut roots = Vec::new(); - // Insert the key-value pairs, keeping track of the roots as we go - for (key, value) in pairs { - if let WriteResult::Written(new_root) = write::( - correlation_id, - &mut txn, - store, - roots.last().unwrap_or(&root), - key, - value, - )? { - roots.push(new_root); - } else { - panic!("Could not write pair") - } - } - // Delete the key-value pairs, checking the resulting roots as we go - let mut current_root = roots.pop().unwrap_or_else(|| root.to_owned()); - for (key, _value) in pairs.iter().rev() { - if let DeleteResult::Deleted(new_root) = - delete::(correlation_id, &mut txn, store, ¤t_root, key)? - { - current_root = roots.pop().unwrap_or_else(|| root.to_owned()); - assert_eq!(new_root, current_root); - } else { - panic!("Could not delete") - } - } - Ok(()) - } - - #[test] - fn lmdb_serially_insert_and_delete() { - let correlation_id = CorrelationId::new(); - let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = LmdbTestContext::new(&empty_trie).unwrap(); - - serially_insert_and_delete::( - correlation_id, - &context.environment, - &context.store, - &empty_root_hash, - &[ - (TestKey([1u8; 7]), TestValue([1u8; 6])), - (TestKey([0u8; 7]), TestValue([0u8; 6])), - (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])), - (TestKey([2u8; 7]), TestValue([2u8; 6])), - ], - ) - .unwrap(); - } - - #[test] - fn in_memory_serially_insert_and_delete() { - let correlation_id = CorrelationId::new(); - let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&empty_trie).unwrap(); - - serially_insert_and_delete::( - correlation_id, - &context.environment, - &context.store, - &empty_root_hash, - &[ - (TestKey([1u8; 7]), TestValue([1u8; 6])), - (TestKey([0u8; 7]), TestValue([0u8; 6])), - (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])), - (TestKey([2u8; 7]), TestValue([2u8; 6])), - ], - ) - .unwrap(); - } - - const INTERLEAVED_INSERT_AND_DELETE_TEST_LEAVES_1: [(TestKey, TestValue); 3] = [ - (TestKey([1u8; 7]), TestValue([1u8; 6])), - (TestKey([0u8; 7]), TestValue([0u8; 6])), - (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])), - ]; - - const INTERLEAVED_DELETE_TEST_KEYS_1: [TestKey; 1] = [TestKey([1u8; 7])]; - - fn interleaved_insert_and_delete<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root: &Blake2bHash, - pairs_to_insert: &[(K, V)], - keys_to_delete: &[K], - ) -> Result<(), E> - where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut txn = environment.create_read_write_txn()?; - let mut expected_root = *root; - // Insert the key-value pairs, keeping track of the roots as we go - for (key, value) in pairs_to_insert.iter() { - if let WriteResult::Written(new_root) = - write::(correlation_id, &mut txn, store, &expected_root, key, value)? - { - expected_root = new_root; - } else { - panic!("Could not write pair") - } - } - for key in keys_to_delete.iter() { - match delete::(correlation_id, &mut txn, store, &expected_root, key)? { - DeleteResult::Deleted(new_root) => { - expected_root = new_root; - } - DeleteResult::DoesNotExist => {} - DeleteResult::RootNotFound => panic!("should find root"), - } - } - - let pairs_to_insert_less_deleted: Vec<(K, V)> = pairs_to_insert - .iter() - .rev() - .cloned() - .filter(|(key, _value)| !keys_to_delete.contains(key)) - .collect(); - - let mut actual_root = *root; - for (key, value) in pairs_to_insert_less_deleted.iter() { - if let WriteResult::Written(new_root) = - write::(correlation_id, &mut txn, store, &actual_root, key, value)? - { - actual_root = new_root; - } else { - panic!("Could not write pair") - } - } - - assert_eq!(expected_root, actual_root, "Expected did not match actual"); - - Ok(()) - } - - #[test] - fn lmdb_interleaved_insert_and_delete() { - let correlation_id = CorrelationId::new(); - let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = LmdbTestContext::new(&empty_trie).unwrap(); - - interleaved_insert_and_delete::( - correlation_id, - &context.environment, - &context.store, - &empty_root_hash, - &INTERLEAVED_INSERT_AND_DELETE_TEST_LEAVES_1, - &INTERLEAVED_DELETE_TEST_KEYS_1, - ) - .unwrap(); - } - - #[test] - fn in_memory_interleaved_insert_and_delete() { - let correlation_id = CorrelationId::new(); - let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&empty_trie).unwrap(); - - interleaved_insert_and_delete::( - correlation_id, - &context.environment, - &context.store, - &empty_root_hash, - &INTERLEAVED_INSERT_AND_DELETE_TEST_LEAVES_1, - &INTERLEAVED_DELETE_TEST_KEYS_1, - ) - .unwrap(); - } - - const DEFAULT_MIN_LENGTH: usize = 1; - - const DEFAULT_MAX_LENGTH: usize = 6; - - fn get_range() -> RangeInclusive { - let start = option_env!("CL_TRIE_TEST_VECTOR_MIN_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MIN_LENGTH); - let end = option_env!("CL_TRIE_TEST_VECTOR_MAX_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MAX_LENGTH); - RangeInclusive::new(start, end) - } - - proptest! { - #[test] - fn prop_in_memory_interleaved_insert_and_delete( - pairs_to_insert in collection::vec((colliding_key_arb(), stored_value_arb()), get_range()) - ) { - let correlation_id = CorrelationId::new(); - let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&empty_trie).unwrap(); - - let keys_to_delete = { - let mut tmp = Vec::new(); - for i in (0..pairs_to_insert.len()).step_by(2) { - tmp.push(pairs_to_insert[i].0) - } - tmp - }; - - interleaved_insert_and_delete::( - correlation_id, - &context.environment, - &context.store, - &empty_root_hash, - &pairs_to_insert, - &keys_to_delete, - ) - .unwrap(); - } - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/ee_699.rs b/execution_engine/src/storage/trie_store/operations/tests/ee_699.rs deleted file mode 100644 index 21916d2b3e..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/ee_699.rs +++ /dev/null @@ -1,410 +0,0 @@ -use proptest::{arbitrary, array, collection, prop_oneof, strategy::Strategy}; - -use crate::{make_array_newtype, newtypes::Blake2bHash}; -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - gens, URef, -}; - -use super::{HashedTrie, TestValue}; -use crate::trie::Trie; - -pub const BASIC_LENGTH: usize = 4; -pub const SIMILAR_LENGTH: usize = 4; -pub const FANCY_LENGTH: usize = 5; -pub const LONG_LENGTH: usize = 8; - -const PUBLIC_KEY_BASIC_ID: u8 = 0; -const PUBLIC_KEY_SIMILAR_ID: u8 = 1; -const PUBLIC_KEY_FANCY_ID: u8 = 2; -const PUBLIC_KEY_LONG_ID: u8 = 3; - -pub const KEY_HASH_LENGTH: usize = 32; - -const KEY_ACCOUNT_ID: u8 = 0; -const KEY_HASH_ID: u8 = 1; -const KEY_UREF_ID: u8 = 2; - -make_array_newtype!(Basic, u8, BASIC_LENGTH); -make_array_newtype!(Similar, u8, SIMILAR_LENGTH); -make_array_newtype!(Fancy, u8, FANCY_LENGTH); -make_array_newtype!(Long, u8, LONG_LENGTH); - -macro_rules! impl_distribution_for_array_newtype { - ($name:ident, $ty:ty, $len:expr) => { - impl rand::distributions::Distribution<$name> for rand::distributions::Standard { - fn sample(&self, rng: &mut R) -> $name { - let mut dat = [0u8; $len]; - rng.fill_bytes(dat.as_mut()); - $name(dat) - } - } - }; -} - -impl_distribution_for_array_newtype!(Basic, u8, BASIC_LENGTH); -impl_distribution_for_array_newtype!(Similar, u8, SIMILAR_LENGTH); -impl_distribution_for_array_newtype!(Fancy, u8, FANCY_LENGTH); -impl_distribution_for_array_newtype!(Long, u8, LONG_LENGTH); - -macro_rules! make_array_newtype_arb { - ($name:ident, $ty:ty, $len:expr, $fn_name:ident) => { - fn $fn_name() -> impl Strategy { - collection::vec(arbitrary::any::<$ty>(), $len).prop_map(|values| { - let mut dat = [0u8; $len]; - dat.copy_from_slice(values.as_slice()); - $name(dat) - }) - } - }; -} - -make_array_newtype_arb!(Basic, u8, BASIC_LENGTH, basic_arb); -make_array_newtype_arb!(Similar, u8, SIMILAR_LENGTH, similar_arb); -make_array_newtype_arb!(Fancy, u8, FANCY_LENGTH, fancy_arb); -make_array_newtype_arb!(Long, u8, LONG_LENGTH, long_arb); - -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub enum PublicKey { - Basic(Basic), - Similar(Similar), - Fancy(Fancy), - Long(Long), -} - -impl ToBytes for PublicKey { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = bytesrepr::allocate_buffer(self)?; - match self { - PublicKey::Basic(key) => { - ret.push(PUBLIC_KEY_BASIC_ID); - ret.extend(key.to_bytes()?) - } - PublicKey::Similar(key) => { - ret.push(PUBLIC_KEY_SIMILAR_ID); - ret.extend(key.to_bytes()?) - } - PublicKey::Fancy(key) => { - ret.push(PUBLIC_KEY_FANCY_ID); - ret.extend(key.to_bytes()?) - } - PublicKey::Long(key) => { - ret.push(PUBLIC_KEY_LONG_ID); - ret.extend(key.to_bytes()?) - } - }; - Ok(ret) - } - - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH - + match self { - PublicKey::Basic(key) => key.serialized_length(), - PublicKey::Similar(key) => key.serialized_length(), - PublicKey::Fancy(key) => key.serialized_length(), - PublicKey::Long(key) => key.serialized_length(), - } - } -} - -impl FromBytes for PublicKey { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (id, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; - match id { - PUBLIC_KEY_BASIC_ID => { - let (key, rem): (Basic, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((PublicKey::Basic(key), rem)) - } - PUBLIC_KEY_SIMILAR_ID => { - let (key, rem): (Similar, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((PublicKey::Similar(key), rem)) - } - PUBLIC_KEY_FANCY_ID => { - let (key, rem): (Fancy, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((PublicKey::Fancy(key), rem)) - } - PUBLIC_KEY_LONG_ID => { - let (key, rem): (Long, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((PublicKey::Long(key), rem)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -fn public_key_arb() -> impl Strategy { - prop_oneof![ - basic_arb().prop_map(PublicKey::Basic), - similar_arb().prop_map(PublicKey::Similar), - fancy_arb().prop_map(PublicKey::Fancy), - long_arb().prop_map(PublicKey::Long) - ] -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub enum TestKey { - Account(PublicKey), - Hash([u8; KEY_HASH_LENGTH]), - URef(URef), -} - -impl ToBytes for TestKey { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut ret = Vec::with_capacity(self.serialized_length()); - match self { - TestKey::Account(public_key) => { - ret.push(KEY_ACCOUNT_ID); - ret.extend(&public_key.to_bytes()?) - } - TestKey::Hash(hash) => { - ret.push(KEY_HASH_ID); - ret.extend(&hash.to_bytes()?) - } - TestKey::URef(uref) => { - ret.push(KEY_UREF_ID); - ret.extend(&uref.to_bytes()?) - } - } - Ok(ret) - } - - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH - + match self { - TestKey::Account(public_key) => public_key.serialized_length(), - TestKey::Hash(hash) => hash.serialized_length(), - TestKey::URef(uref) => uref.serialized_length(), - } - } -} - -impl FromBytes for TestKey { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (id, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; - match id { - KEY_ACCOUNT_ID => { - let (public_key, rem): (PublicKey, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((TestKey::Account(public_key), rem)) - } - KEY_HASH_ID => { - let (hash, rem): ([u8; KEY_HASH_LENGTH], &[u8]) = FromBytes::from_bytes(rem)?; - Ok((TestKey::Hash(hash), rem)) - } - KEY_UREF_ID => { - let (uref, rem): (URef, &[u8]) = FromBytes::from_bytes(rem)?; - Ok((TestKey::URef(uref), rem)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -fn test_key_arb() -> impl Strategy { - prop_oneof![ - public_key_arb().prop_map(TestKey::Account), - gens::u8_slice_32().prop_map(TestKey::Hash), - gens::uref_arb().prop_map(TestKey::URef), - ] -} - -#[allow(clippy::unnecessary_operation)] -mod basics { - use proptest::proptest; - - use super::*; - - #[test] - fn random_key_generation_works_as_expected() { - use rand::Rng; - let mut rng = rand::thread_rng(); - let a: Basic = rng.gen(); - let b: Basic = rng.gen(); - assert_ne!(a, b) - } - - proptest! { - #[test] - fn key_should_roundtrip(key in test_key_arb()) { - bytesrepr::test_serialization_roundtrip(&key) - } - } -} - -type TestTrie = Trie; - -const TEST_LEAVES_LENGTH: usize = 6; - -/// Keys have been chosen deliberately and the `create_` functions below depend -/// on these exact definitions. Values are arbitrary. -const TEST_LEAVES: [TestTrie; TEST_LEAVES_LENGTH] = [ - Trie::Leaf { - key: TestKey::Account(PublicKey::Basic(Basic([0u8, 0, 0, 0]))), - value: TestValue(*b"value0"), - }, - Trie::Leaf { - key: TestKey::Account(PublicKey::Basic(Basic([0u8, 0, 0, 1]))), - value: TestValue(*b"value1"), - }, - Trie::Leaf { - key: TestKey::Account(PublicKey::Similar(Similar([0u8, 0, 0, 1]))), - value: TestValue(*b"value3"), - }, - Trie::Leaf { - key: TestKey::Account(PublicKey::Fancy(Fancy([0u8, 0, 0, 1, 0]))), - value: TestValue(*b"value4"), - }, - Trie::Leaf { - key: TestKey::Account(PublicKey::Long(Long([0u8, 0, 0, 1, 0, 0, 0, 0]))), - value: TestValue(*b"value5"), - }, - Trie::Leaf { - key: TestKey::Hash([0u8; 32]), - value: TestValue(*b"value6"), - }, -]; - -fn create_0_leaf_trie( -) -> Result<(Blake2bHash, Vec>), bytesrepr::Error> { - let root = HashedTrie::new(Trie::node(&[]))?; - - let root_hash: Blake2bHash = root.hash; - - let parents: Vec> = vec![root]; - - let tries: Vec> = { - let mut ret = Vec::new(); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -mod empty_tries { - use casper_types::newtypes::CorrelationId; - - use super::*; - use crate::{ - error::in_memory, - trie_store::operations::tests::{self, InMemoryTestContext}, - }; - - #[test] - fn in_memory_writes_to_n_leaf_empty_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = create_0_leaf_trie().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let initial_states = vec![root_hash]; - - let _states = tests::writes_to_n_leaf_empty_trie_had_expected_results::< - _, - _, - _, - _, - in_memory::Error, - >( - correlation_id, - &context.environment, - &context.store, - &initial_states, - &TEST_LEAVES, - ) - .unwrap(); - } -} - -mod proptests { - use proptest::{collection::vec, proptest}; - - use casper_types::newtypes::CorrelationId; - - const DEFAULT_MIN_LENGTH: usize = 0; - const DEFAULT_MAX_LENGTH: usize = 100; - - fn get_range() -> RangeInclusive { - let start = option_env!("CL_TRIE_TEST_VECTOR_MIN_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MIN_LENGTH); - let end = option_env!("CL_TRIE_TEST_VECTOR_MAX_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MAX_LENGTH); - RangeInclusive::new(start, end) - } - - use super::*; - use crate::{ - error::{self, in_memory}, - trie_store::operations::tests::{self, InMemoryTestContext, LmdbTestContext}, - }; - use std::ops::RangeInclusive; - - fn lmdb_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = create_0_leaf_trie().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let mut states_to_check = vec![]; - - let root_hashes = tests::write_pairs::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - pairs, - ) - .unwrap(); - - states_to_check.extend(root_hashes); - - tests::check_pairs::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states_to_check, - &pairs, - ) - .unwrap() - } - - fn in_memory_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = create_0_leaf_trie().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let mut states_to_check = vec![]; - - let root_hashes = tests::write_pairs::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - pairs, - ) - .unwrap(); - - states_to_check.extend(root_hashes); - - tests::check_pairs::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states_to_check, - &pairs, - ) - .unwrap() - } - - fn test_value_arb() -> impl Strategy { - array::uniform6(arbitrary::any::()).prop_map(TestValue) - } - - proptest! { - #[test] - fn prop_in_memory_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) { - assert!(in_memory_roundtrip_succeeds(&inputs)); - } - - #[test] - fn prop_lmdb_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) { - assert!(lmdb_roundtrip_succeeds(&inputs)); - } - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/keys.rs b/execution_engine/src/storage/trie_store/operations/tests/keys.rs deleted file mode 100644 index 473b4a9877..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/keys.rs +++ /dev/null @@ -1,309 +0,0 @@ -mod partial_tries { - use crate::shared::newtypes::CorrelationId; - - use crate::storage::{ - transaction_source::{Transaction, TransactionSource}, - trie::Trie, - trie_store::operations::{ - self, - tests::{ - InMemoryTestContext, LmdbTestContext, TestKey, TestValue, TEST_LEAVES, - TEST_TRIE_GENERATORS, - }, - }, - }; - - #[test] - fn lmdb_keys_from_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let test_leaves = TEST_LEAVES; - let (used, _) = test_leaves.split_at(num_leaves); - - let expected = { - let mut tmp = used - .iter() - .filter_map(Trie::key) - .cloned() - .collect::>(); - tmp.sort(); - tmp - }; - let actual = { - let txn = context.environment.create_read_txn().unwrap(); - let mut tmp = operations::keys::( - correlation_id, - &txn, - &context.store, - &root_hash, - ) - .filter_map(Result::ok) - .collect::>(); - txn.commit().unwrap(); - tmp.sort(); - tmp - }; - assert_eq!(actual, expected); - } - } - - #[test] - fn in_memory_keys_from_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let test_leaves = TEST_LEAVES; - let (used, _) = test_leaves.split_at(num_leaves); - - let expected = { - let mut tmp = used - .iter() - .filter_map(Trie::key) - .cloned() - .collect::>(); - tmp.sort(); - tmp - }; - let actual = { - let txn = context.environment.create_read_txn().unwrap(); - let mut tmp = operations::keys::( - correlation_id, - &txn, - &context.store, - &root_hash, - ) - .filter_map(Result::ok) - .collect::>(); - txn.commit().unwrap(); - tmp.sort(); - tmp - }; - assert_eq!(actual, expected); - } - } -} - -mod full_tries { - use crate::shared::newtypes::{Blake2bHash, CorrelationId}; - - use crate::storage::{ - transaction_source::{Transaction, TransactionSource}, - trie::Trie, - trie_store::operations::{ - self, - tests::{ - InMemoryTestContext, TestKey, TestValue, EMPTY_HASHED_TEST_TRIES, TEST_LEAVES, - TEST_TRIE_GENERATORS, - }, - }, - }; - - #[test] - fn in_memory_keys_from_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = InMemoryTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - for (num_leaves, state) in states[..state_index].iter().enumerate() { - let test_leaves = TEST_LEAVES; - let (used, _unused) = test_leaves.split_at(num_leaves); - - let expected = { - let mut tmp = used - .iter() - .filter_map(Trie::key) - .cloned() - .collect::>(); - tmp.sort(); - tmp - }; - let actual = { - let txn = context.environment.create_read_txn().unwrap(); - let mut tmp = operations::keys::( - correlation_id, - &txn, - &context.store, - &state, - ) - .filter_map(Result::ok) - .collect::>(); - txn.commit().unwrap(); - tmp.sort(); - tmp - }; - assert_eq!(actual, expected); - } - } - } -} - -#[cfg(debug_assertions)] -mod keys_iterator { - use crate::shared::newtypes::{Blake2bHash, CorrelationId}; - use casper_types::bytesrepr; - - use crate::storage::{ - transaction_source::TransactionSource, - trie::{Pointer, Trie}, - trie_store::operations::{ - self, - tests::{ - hash_test_tries, HashedTestTrie, HashedTrie, InMemoryTestContext, TestKey, - TestValue, TEST_LEAVES, - }, - }, - }; - - fn create_invalid_extension_trie( - ) -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[2..3])?; - let ext_1 = HashedTrie::new(Trie::extension( - vec![0u8, 0], - Pointer::NodePointer(leaves[0].hash), - ))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_1.hash))]))?; - let root_hash = root.hash; - - let tries = vec![root, ext_1, leaves[0].clone()]; - - Ok((root_hash, tries)) - } - - fn create_invalid_path_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..1])?; - - let root = HashedTrie::new(Trie::node(&[(1, Pointer::NodePointer(leaves[0].hash))]))?; - let root_hash = root.hash; - - let tries = vec![root, leaves[0].clone()]; - - Ok((root_hash, tries)) - } - - fn create_invalid_hash_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..2])?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(leaves[1].hash))]))?; - let root_hash = root.hash; - - let tries = vec![root, leaves[0].clone()]; - - Ok((root_hash, tries)) - } - - macro_rules! return_on_err { - ($x:expr) => { - match $x { - Ok(result) => result, - Err(_) => { - return; // we expect the test to panic, so this will cause a test failure - } - } - }; - } - - fn test_trie(root_hash: Blake2bHash, tries: Vec) { - let correlation_id = CorrelationId::new(); - let context = return_on_err!(InMemoryTestContext::new(&tries)); - let txn = return_on_err!(context.environment.create_read_txn()); - let _tmp = operations::keys::( - correlation_id, - &txn, - &context.store, - &root_hash, - ) - .collect::>(); - } - - #[test] - #[should_panic] - fn should_panic_on_leaf_after_extension() { - let (root_hash, tries) = return_on_err!(create_invalid_extension_trie()); - test_trie(root_hash, tries); - } - - #[test] - #[should_panic] - fn should_panic_when_key_not_matching_path() { - let (root_hash, tries) = return_on_err!(create_invalid_path_trie()); - test_trie(root_hash, tries); - } - - #[test] - #[should_panic] - fn should_panic_on_pointer_to_nonexisting_hash() { - let (root_hash, tries) = return_on_err!(create_invalid_hash_trie()); - test_trie(root_hash, tries); - } -} - -mod keys_with_prefix_iterator { - use crate::shared::newtypes::CorrelationId; - - use crate::storage::{ - transaction_source::TransactionSource, - trie::Trie, - trie_store::operations::{ - self, - tests::{create_6_leaf_trie, InMemoryTestContext, TestKey, TestValue, TEST_LEAVES}, - }, - }; - - fn expected_keys(prefix: &[u8]) -> Vec { - let mut tmp = TEST_LEAVES - .iter() - .filter_map(Trie::key) - .filter(|key| key.0.starts_with(prefix)) - .cloned() - .collect::>(); - tmp.sort(); - tmp - } - - fn test_prefix(prefix: &[u8]) { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = create_6_leaf_trie().expect("should create a trie"); - let context = InMemoryTestContext::new(&tries).expect("should create a new context"); - let txn = context - .environment - .create_read_txn() - .expect("should create a read txn"); - let expected = expected_keys(prefix); - let mut actual = operations::keys_with_prefix::( - correlation_id, - &txn, - &context.store, - &root_hash, - prefix, - ) - .filter_map(Result::ok) - .collect::>(); - actual.sort(); - assert_eq!(expected, actual); - } - - #[test] - fn test_prefixes() { - test_prefix(&[]); // 6 leaves - test_prefix(&[0]); // 6 leaves - test_prefix(&[0, 1]); // 1 leaf - test_prefix(&[0, 1, 0]); // 1 leaf - test_prefix(&[0, 1, 1]); // 0 leaves - test_prefix(&[0, 0]); // 5 leaves - test_prefix(&[0, 0, 1]); // 0 leaves - test_prefix(&[0, 0, 2]); // 1 leaf - test_prefix(&[0, 0, 0, 0]); // 3 leaves, prefix points to an Extension - test_prefix(&[0, 0, 0, 0, 0]); // 3 leaves - test_prefix(&[0, 0, 0, 0, 0, 0]); // 2 leaves - test_prefix(&[0, 0, 0, 0, 0, 0, 1]); // 1 leaf - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/mod.rs b/execution_engine/src/storage/trie_store/operations/tests/mod.rs deleted file mode 100644 index b8e19b186b..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/mod.rs +++ /dev/null @@ -1,1012 +0,0 @@ -mod delete; -mod keys; -mod proptests; -mod read; -mod scan; -mod synchronize; -mod write; - -use std::{collections::HashMap, convert}; - -use lmdb::DatabaseFlags; -use tempfile::{tempdir, TempDir}; - -use crate::shared::newtypes::{Blake2bHash, CorrelationId}; -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use crate::storage::{ - error::{self, in_memory}, - transaction_source::{ - in_memory::InMemoryEnvironment, lmdb::LmdbEnvironment, Readable, Transaction, - TransactionSource, - }, - trie::{merkle_proof::TrieMerkleProof, Pointer, Trie}, - trie_store::{ - self, - in_memory::InMemoryTrieStore, - lmdb::LmdbTrieStore, - operations::{self, read, read_with_proof, write, ReadResult, WriteResult}, - TrieStore, - }, - DEFAULT_TEST_MAX_DB_SIZE, DEFAULT_TEST_MAX_READERS, -}; -use std::ops::Not; - -const TEST_KEY_LENGTH: usize = 7; - -/// A short key type for tests. -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -struct TestKey([u8; TEST_KEY_LENGTH]); - -impl ToBytes for TestKey { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - Ok(self.0.to_vec()) - } - - fn serialized_length(&self) -> usize { - TEST_KEY_LENGTH - } -} - -impl FromBytes for TestKey { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (key, rem) = bytes.split_at(TEST_KEY_LENGTH); - let mut ret = [0u8; TEST_KEY_LENGTH]; - ret.copy_from_slice(key); - Ok((TestKey(ret), rem)) - } -} - -const TEST_VAL_LENGTH: usize = 6; - -/// A short value type for tests. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -struct TestValue([u8; TEST_VAL_LENGTH]); - -impl ToBytes for TestValue { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - Ok(self.0.to_vec()) - } - - fn serialized_length(&self) -> usize { - TEST_VAL_LENGTH - } -} - -impl FromBytes for TestValue { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (key, rem) = bytes.split_at(TEST_VAL_LENGTH); - let mut ret = [0u8; TEST_VAL_LENGTH]; - ret.copy_from_slice(key); - Ok((TestValue(ret), rem)) - } -} - -type TestTrie = Trie; - -type HashedTestTrie = HashedTrie; - -/// A pairing of a trie element and its hash. -#[derive(Debug, Clone, PartialEq, Eq)] -struct HashedTrie { - hash: Blake2bHash, - trie: Trie, -} - -impl HashedTrie { - pub fn new(trie: Trie) -> Result { - let trie_bytes = trie.to_bytes()?; - let hash = Blake2bHash::new(&trie_bytes); - Ok(HashedTrie { hash, trie }) - } -} - -const EMPTY_HASHED_TEST_TRIES: &[HashedTestTrie] = &[]; - -const TEST_LEAVES_LENGTH: usize = 6; - -/// Keys have been chosen deliberately and the `create_` functions below depend -/// on these exact definitions. Values are arbitrary. -const TEST_LEAVES: [TestTrie; TEST_LEAVES_LENGTH] = [ - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"value0"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 1]), - value: TestValue(*b"value1"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 2, 0, 0, 0]), - value: TestValue(*b"value2"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 255, 0]), - value: TestValue(*b"value3"), - }, - Trie::Leaf { - key: TestKey([0u8, 1, 0, 0, 0, 0, 0]), - value: TestValue(*b"value4"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 2, 0, 0, 0, 0]), - value: TestValue(*b"value5"), - }, -]; - -const TEST_LEAVES_UPDATED: [TestTrie; TEST_LEAVES_LENGTH] = [ - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueA"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 1]), - value: TestValue(*b"valueB"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 2, 0, 0, 0]), - value: TestValue(*b"valueC"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 255, 0]), - value: TestValue(*b"valueD"), - }, - Trie::Leaf { - key: TestKey([0u8, 1, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueE"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 2, 0, 0, 0, 0]), - value: TestValue(*b"valueF"), - }, -]; - -const TEST_LEAVES_NON_COLLIDING: [TestTrie; TEST_LEAVES_LENGTH] = [ - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueA"), - }, - Trie::Leaf { - key: TestKey([1u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueB"), - }, - Trie::Leaf { - key: TestKey([2u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueC"), - }, - Trie::Leaf { - key: TestKey([3u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueD"), - }, - Trie::Leaf { - key: TestKey([4u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueE"), - }, - Trie::Leaf { - key: TestKey([5u8, 0, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueF"), - }, -]; - -const TEST_LEAVES_ADJACENTS: [TestTrie; TEST_LEAVES_LENGTH] = [ - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 2]), - value: TestValue(*b"valueA"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 0, 3]), - value: TestValue(*b"valueB"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 3, 0, 0, 0]), - value: TestValue(*b"valueC"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 0, 0, 0, 1, 0]), - value: TestValue(*b"valueD"), - }, - Trie::Leaf { - key: TestKey([0u8, 2, 0, 0, 0, 0, 0]), - value: TestValue(*b"valueE"), - }, - Trie::Leaf { - key: TestKey([0u8, 0, 3, 0, 0, 0, 0]), - value: TestValue(*b"valueF"), - }, -]; - -type TrieGenerator = fn() -> Result<(Blake2bHash, Vec>), bytesrepr::Error>; - -const TEST_TRIE_GENERATORS_LENGTH: usize = 7; - -const TEST_TRIE_GENERATORS: [TrieGenerator; TEST_TRIE_GENERATORS_LENGTH] = [ - create_0_leaf_trie, - create_1_leaf_trie, - create_2_leaf_trie, - create_3_leaf_trie, - create_4_leaf_trie, - create_5_leaf_trie, - create_6_leaf_trie, -]; - -fn hash_test_tries(tries: &[TestTrie]) -> Result, bytesrepr::Error> { - tries - .iter() - .map(|trie| HashedTestTrie::new(trie.to_owned())) - .collect() -} - -fn create_0_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let root = HashedTrie::new(Trie::node(&[]))?; - - let root_hash: Blake2bHash = root.hash; - - let parents: Vec = vec![root]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_1_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..1])?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::LeafPointer(leaves[0].hash))]))?; - - let root_hash: Blake2bHash = root.hash; - - let parents: Vec = vec![root]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_2_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..2])?; - - let node = HashedTrie::new(Trie::node(&[ - (0, Pointer::LeafPointer(leaves[0].hash)), - (1, Pointer::LeafPointer(leaves[1].hash)), - ]))?; - - let ext = HashedTrie::new(Trie::extension( - vec![0u8, 0, 0, 0, 0], - Pointer::NodePointer(node.hash), - ))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext.hash))]))?; - - let root_hash = root.hash; - - let parents: Vec = vec![root, ext, node]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_3_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..3])?; - - let node_1 = HashedTrie::new(Trie::node(&[ - (0, Pointer::LeafPointer(leaves[0].hash)), - (1, Pointer::LeafPointer(leaves[1].hash)), - ]))?; - - let ext_1 = HashedTrie::new(Trie::extension( - vec![0u8, 0], - Pointer::NodePointer(node_1.hash), - ))?; - - let node_2 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(ext_1.hash)), - (2, Pointer::LeafPointer(leaves[2].hash)), - ]))?; - - let ext_2 = HashedTrie::new(Trie::extension( - vec![0u8, 0], - Pointer::NodePointer(node_2.hash), - ))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_2.hash))]))?; - - let root_hash = root.hash; - - let parents: Vec = vec![root, ext_2, node_2, ext_1, node_1]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_4_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..4])?; - - let node_1 = HashedTrie::new(Trie::node(&[ - (0, Pointer::LeafPointer(leaves[0].hash)), - (1, Pointer::LeafPointer(leaves[1].hash)), - ]))?; - - let node_2 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_1.hash)), - (255, Pointer::LeafPointer(leaves[3].hash)), - ]))?; - - let ext_1 = HashedTrie::new(Trie::extension( - vec![0u8], - Pointer::NodePointer(node_2.hash), - ))?; - - let node_3 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(ext_1.hash)), - (2, Pointer::LeafPointer(leaves[2].hash)), - ]))?; - - let ext_2 = HashedTrie::new(Trie::extension( - vec![0u8, 0], - Pointer::NodePointer(node_3.hash), - ))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_2.hash))]))?; - - let root_hash = root.hash; - - let parents: Vec = vec![root, ext_2, node_3, ext_1, node_2, node_1]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_5_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES[..5])?; - - let node_1 = HashedTrie::new(Trie::node(&[ - (0, Pointer::LeafPointer(leaves[0].hash)), - (1, Pointer::LeafPointer(leaves[1].hash)), - ]))?; - - let node_2 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_1.hash)), - (255, Pointer::LeafPointer(leaves[3].hash)), - ]))?; - - let ext_1 = HashedTrie::new(Trie::extension( - vec![0u8], - Pointer::NodePointer(node_2.hash), - ))?; - - let node_3 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(ext_1.hash)), - (2, Pointer::LeafPointer(leaves[2].hash)), - ]))?; - - let ext_2 = HashedTrie::new(Trie::extension( - vec![0u8], - Pointer::NodePointer(node_3.hash), - ))?; - - let node_4 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(ext_2.hash)), - (1, Pointer::LeafPointer(leaves[4].hash)), - ]))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_4.hash))]))?; - - let root_hash = root.hash; - - let parents: Vec = vec![root, node_4, ext_2, node_3, ext_1, node_2, node_1]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_6_leaf_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES)?; - - let node_1 = HashedTrie::new(Trie::node(&[ - (0, Pointer::LeafPointer(leaves[0].hash)), - (1, Pointer::LeafPointer(leaves[1].hash)), - ]))?; - - let node_2 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_1.hash)), - (255, Pointer::LeafPointer(leaves[3].hash)), - ]))?; - - let ext = HashedTrie::new(Trie::extension( - vec![0u8], - Pointer::NodePointer(node_2.hash), - ))?; - - let node_3 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(ext.hash)), - (2, Pointer::LeafPointer(leaves[2].hash)), - ]))?; - - let node_4 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_3.hash)), - (2, Pointer::LeafPointer(leaves[5].hash)), - ]))?; - - let node_5 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_4.hash)), - (1, Pointer::LeafPointer(leaves[4].hash)), - ]))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_5.hash))]))?; - - let root_hash = root.hash; - - let parents: Vec = vec![root, node_5, node_4, node_3, ext, node_2, node_1]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn create_6_leaf_corrupt_trie() -> Result<(Blake2bHash, Vec), bytesrepr::Error> { - let leaves = hash_test_tries(&TEST_LEAVES)?; - - let node_1 = HashedTrie::new(Trie::node(&[ - (0, Pointer::LeafPointer(leaves[0].hash)), - (1, Pointer::LeafPointer(leaves[1].hash)), - ]))?; - - let node_2 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_1.hash)), - (255, Pointer::LeafPointer(leaves[3].hash)), - ]))?; - - let ext = HashedTrie::new(Trie::extension( - vec![0u8], - Pointer::NodePointer(node_2.hash), - ))?; - - let node_3: HashedTestTrie = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(ext.hash)), - (2, Pointer::LeafPointer(leaves[2].hash)), - ]))?; - - let node_4 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_3.hash)), - (2, Pointer::LeafPointer(leaves[5].hash)), - ]))?; - - let node_5 = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(node_4.hash)), - (1, Pointer::LeafPointer(leaves[4].hash)), - ]))?; - - let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_5.hash))]))?; - - let root_hash = root.hash; - - let mut corrupt_node_3: HashedTestTrie = HashedTrie::new(Trie::node(&[ - (0, Pointer::NodePointer(Blake2bHash::new(b"yep"))), - (2, Pointer::LeafPointer(leaves[2].hash)), - ]))?; - - corrupt_node_3.hash = node_3.hash; - - let parents: Vec = - vec![root, node_5, node_4, corrupt_node_3, ext, node_2, node_1]; - - let tries: Vec = { - let mut ret = Vec::new(); - ret.extend(leaves); - ret.extend(parents); - ret - }; - - Ok((root_hash, tries)) -} - -fn put_tries<'a, K, V, R, S, E>( - environment: &'a R, - store: &S, - tries: &[HashedTrie], -) -> Result<(), E> -where - K: ToBytes, - V: ToBytes, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - if tries.is_empty() { - return Ok(()); - } - let mut txn = environment.create_read_write_txn()?; - for HashedTrie { hash, trie } in tries.iter() { - store.put(&mut txn, hash, trie)?; - } - txn.commit()?; - Ok(()) -} - -// A context for holding lmdb-based test resources -struct LmdbTestContext { - _temp_dir: TempDir, - environment: LmdbEnvironment, - store: LmdbTrieStore, -} - -impl LmdbTestContext { - fn new(tries: &[HashedTrie]) -> anyhow::Result - where - K: FromBytes + ToBytes, - V: FromBytes + ToBytes, - { - let _temp_dir = tempdir()?; - let environment = LmdbEnvironment::new( - &_temp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - )?; - let store = LmdbTrieStore::new(&environment, None, DatabaseFlags::empty())?; - put_tries::<_, _, _, _, error::Error>(&environment, &store, tries)?; - Ok(LmdbTestContext { - _temp_dir, - environment, - store, - }) - } - - fn update(&self, tries: &[HashedTrie]) -> anyhow::Result<()> - where - K: ToBytes, - V: ToBytes, - { - put_tries::<_, _, _, _, error::Error>(&self.environment, &self.store, tries)?; - Ok(()) - } -} - -// A context for holding in-memory test resources -struct InMemoryTestContext { - environment: InMemoryEnvironment, - store: InMemoryTrieStore, -} - -impl InMemoryTestContext { - fn new(tries: &[HashedTrie]) -> anyhow::Result - where - K: ToBytes, - V: ToBytes, - { - let environment = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&environment, None); - put_tries::<_, _, _, _, in_memory::Error>(&environment, &store, tries)?; - Ok(InMemoryTestContext { environment, store }) - } - - fn update(&self, tries: &[HashedTrie]) -> anyhow::Result<()> - where - K: ToBytes, - V: ToBytes, - { - put_tries::<_, _, _, _, in_memory::Error>(&self.environment, &self.store, tries)?; - Ok(()) - } -} - -fn check_leaves_exist( - correlation_id: CorrelationId, - txn: &T, - store: &S, - root: &Blake2bHash, - leaves: &[Trie], -) -> Result, E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Eq + Copy, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let mut ret = Vec::new(); - - for leaf in leaves { - if let Trie::Leaf { key, value } = leaf { - let maybe_value: ReadResult = - read::<_, _, _, _, E>(correlation_id, txn, store, root, key)?; - ret.push(ReadResult::Found(*value) == maybe_value) - } else { - panic!("leaves should only contain leaves") - } - } - Ok(ret) -} - -/// For a given vector of leaves check the merkle proofs exist and are correct -fn check_merkle_proofs( - correlation_id: CorrelationId, - txn: &T, - store: &S, - root: &Blake2bHash, - leaves: &[Trie], -) -> Result, E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - V: ToBytes + FromBytes + Eq + Copy, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let mut ret = Vec::new(); - - for leaf in leaves { - if let Trie::Leaf { key, value } = leaf { - let maybe_proof: ReadResult> = - read_with_proof::<_, _, _, _, E>(correlation_id, txn, store, root, key)?; - match maybe_proof { - ReadResult::Found(proof) => { - let hash = proof.compute_state_hash()?; - ret.push(hash == *root && proof.value() == value); - } - ReadResult::NotFound => { - ret.push(false); - } - ReadResult::RootNotFound => panic!("Root not found!"), - }; - } else { - panic!("leaves should only contain leaves") - } - } - Ok(ret) -} - -fn check_keys( - correlation_id: CorrelationId, - txn: &T, - store: &S, - root: &Blake2bHash, - leaves: &[Trie], -) -> bool -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Clone + Ord, - V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - T: Readable, - S: TrieStore, - S::Error: From, - E: From + From, -{ - let expected = { - let mut tmp = leaves - .iter() - .filter_map(Trie::key) - .cloned() - .collect::>(); - tmp.sort(); - tmp - }; - let actual = { - let mut tmp = operations::keys::<_, _, _, _>(correlation_id, txn, store, root) - .filter_map(Result::ok) - .collect::>(); - tmp.sort(); - tmp - }; - expected == actual -} - -fn check_leaves<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root: &Blake2bHash, - present: &[Trie], - absent: &[Trie], -) -> Result<(), E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, - V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let txn: R::ReadTransaction = environment.create_read_txn()?; - - assert!( - check_leaves_exist::<_, _, _, _, E>(correlation_id, &txn, store, root, present)? - .into_iter() - .all(convert::identity) - ); - - assert!( - check_merkle_proofs::<_, _, _, _, E>(correlation_id, &txn, store, root, present)? - .into_iter() - .all(convert::identity) - ); - - assert!( - check_leaves_exist::<_, _, _, _, E>(correlation_id, &txn, store, root, absent)? - .into_iter() - .all(bool::not) - ); - - assert!( - check_merkle_proofs::<_, _, _, _, E>(correlation_id, &txn, store, root, absent)? - .into_iter() - .all(bool::not) - ); - - assert!(check_keys::<_, _, _, _, E>( - correlation_id, - &txn, - store, - root, - present, - )); - - txn.commit()?; - Ok(()) -} - -fn write_leaves<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root_hash: &Blake2bHash, - leaves: &[Trie], -) -> Result, E> -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let mut results = Vec::new(); - if leaves.is_empty() { - return Ok(results); - } - let mut root_hash = root_hash.to_owned(); - let mut txn = environment.create_read_write_txn()?; - - for leaf in leaves.iter() { - if let Trie::Leaf { key, value } = leaf { - let write_result = - write::<_, _, _, _, E>(correlation_id, &mut txn, store, &root_hash, key, value)?; - match write_result { - WriteResult::Written(hash) => { - root_hash = hash; - } - WriteResult::AlreadyExists => (), - WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), - }; - results.push(write_result); - } else { - panic!("leaves should contain only leaves"); - } - } - txn.commit()?; - Ok(results) -} - -fn check_pairs_proofs<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root_hashes: &[Blake2bHash], - pairs: &[(K, V)], -) -> Result -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, - V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let txn = environment.create_read_txn()?; - for (index, root_hash) in root_hashes.iter().enumerate() { - for (key, value) in &pairs[..=index] { - let maybe_proof = - read_with_proof::<_, _, _, _, E>(correlation_id, &txn, store, root_hash, key)?; - match maybe_proof { - ReadResult::Found(proof) => { - let hash = proof.compute_state_hash()?; - if hash != *root_hash || proof.value() != value { - return Ok(false); - } - } - ReadResult::NotFound => return Ok(false), - ReadResult::RootNotFound => panic!("Root not found!"), - }; - } - } - Ok(true) -} - -fn check_pairs<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root_hashes: &[Blake2bHash], - pairs: &[(K, V)], -) -> Result -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Clone + Ord, - V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let txn = environment.create_read_txn()?; - for (index, root_hash) in root_hashes.iter().enumerate() { - for (key, value) in &pairs[..=index] { - let result = read::<_, _, _, _, E>(correlation_id, &txn, store, root_hash, key)?; - if ReadResult::Found(*value) != result { - return Ok(false); - } - } - let expected = { - let mut tmp = pairs[..=index] - .iter() - .map(|(k, _)| k) - .cloned() - .collect::>(); - tmp.sort(); - tmp - }; - let actual = { - let mut tmp = operations::keys::<_, _, _, _>(correlation_id, &txn, store, root_hash) - .filter_map(Result::ok) - .collect::>(); - tmp.sort(); - tmp - }; - if expected != actual { - return Ok(false); - } - } - Ok(true) -} - -fn write_pairs<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root_hash: &Blake2bHash, - pairs: &[(K, V)], -) -> Result, E> -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, - V: ToBytes + FromBytes + Clone + Eq, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let mut results = Vec::new(); - if pairs.is_empty() { - return Ok(results); - } - let mut root_hash = root_hash.to_owned(); - let mut txn = environment.create_read_write_txn()?; - - for (key, value) in pairs.iter() { - match write::<_, _, _, _, E>(correlation_id, &mut txn, store, &root_hash, key, value)? { - WriteResult::Written(hash) => { - root_hash = hash; - } - WriteResult::AlreadyExists => (), - WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), - }; - results.push(root_hash); - } - txn.commit()?; - Ok(results) -} - -fn writes_to_n_leaf_empty_trie_had_expected_results<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - states: &[Blake2bHash], - test_leaves: &[Trie], -) -> Result, E> -where - K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug + Copy + Ord, - V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug + Copy, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let mut states = states.to_vec(); - - // Write set of leaves to the trie - let hashes = write_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - states.last().unwrap(), - &test_leaves, - )? - .into_iter() - .map(|result| match result { - WriteResult::Written(root_hash) => root_hash, - _ => panic!("write_leaves resulted in non-write"), - }) - .collect::>(); - - states.extend(hashes); - - // Check that the expected set of leaves is in the trie at every - // state, and that the set of other leaves is not. - for (num_leaves, state) in states.iter().enumerate() { - let (used, unused) = test_leaves.split_at(num_leaves); - check_leaves::<_, _, _, _, E>(correlation_id, environment, store, state, used, unused)?; - } - - Ok(states) -} - -impl InMemoryEnvironment { - pub fn dump( - &self, - maybe_name: Option<&str>, - ) -> Result>, in_memory::Error> - where - K: FromBytes, - V: FromBytes, - { - let name = maybe_name - .map(|name| format!("{}-{}", trie_store::NAME, name)) - .unwrap_or_else(|| trie_store::NAME.to_string()); - let data = self.data(Some(&name))?.unwrap(); - data.into_iter() - .map(|(hash_bytes, trie_bytes)| { - let hash: Blake2bHash = bytesrepr::deserialize(hash_bytes.to_vec())?; - let trie: Trie = bytesrepr::deserialize(trie_bytes.to_vec())?; - Ok((hash, trie)) - }) - .collect::>, bytesrepr::Error>>() - .map_err(Into::into) - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/proptests.rs b/execution_engine/src/storage/trie_store/operations/tests/proptests.rs deleted file mode 100644 index 9b8ae201d4..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/proptests.rs +++ /dev/null @@ -1,115 +0,0 @@ -use std::ops::RangeInclusive; - -use proptest::{ - array, - collection::vec, - prelude::{any, proptest, Strategy}, -}; - -use super::*; - -const DEFAULT_MIN_LENGTH: usize = 0; - -const DEFAULT_MAX_LENGTH: usize = 100; - -fn get_range() -> RangeInclusive { - let start = option_env!("CL_TRIE_TEST_VECTOR_MIN_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MIN_LENGTH); - let end = option_env!("CL_TRIE_TEST_VECTOR_MAX_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MAX_LENGTH); - RangeInclusive::new(start, end) -} - -fn lmdb_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let mut states_to_check = vec![]; - - let root_hashes = write_pairs::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - pairs, - ) - .unwrap(); - - states_to_check.extend(root_hashes); - - check_pairs::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states_to_check, - &pairs, - ) - .unwrap(); - - check_pairs_proofs::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states_to_check, - &pairs, - ) - .unwrap() -} - -fn in_memory_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let mut states_to_check = vec![]; - - let root_hashes = write_pairs::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - pairs, - ) - .unwrap(); - - states_to_check.extend(root_hashes); - - check_pairs::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states_to_check, - &pairs, - ) - .unwrap(); - - check_pairs_proofs::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states_to_check, - &pairs, - ) - .unwrap() -} - -fn test_key_arb() -> impl Strategy { - array::uniform7(any::()).prop_map(TestKey) -} - -fn test_value_arb() -> impl Strategy { - array::uniform6(any::()).prop_map(TestValue) -} - -proptest! { - #[test] - fn prop_in_memory_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) { - assert!(in_memory_roundtrip_succeeds(&inputs)); - } - - #[test] - fn prop_lmdb_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) { - assert!(lmdb_roundtrip_succeeds(&inputs)); - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/read.rs b/execution_engine/src/storage/trie_store/operations/tests/read.rs deleted file mode 100644 index aaec29ebd0..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/read.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! This module contains tests for [`StateReader::read`]. -//! -//! Our primary goal here is to test this functionality in isolation. -//! Therefore, we manually construct test tries from a well-known set of -//! leaves called [`TEST_LEAVES`](super::TEST_LEAVES), each of which represents a value we are -//! trying to store in the trie at a given key. -//! -//! We use two strategies for testing. See the [`partial_tries`] and -//! [`full_tries`] modules for more info. - -use super::*; -use crate::storage::error::{self, in_memory}; - -mod partial_tries { - //! Here we construct 6 separate "partial" tries, increasing in size - //! from 0 to 5 leaves. Each of these tries contains no past history, - //! only a single a root to read from. The tests check that we can read - //! only the expected set of leaves from the trie from this single root. - - use super::*; - - #[test] - fn lmdb_reads_from_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let test_leaves = TEST_LEAVES; - let (used, unused) = test_leaves.split_at(num_leaves); - - check_leaves::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - used, - unused, - ) - .unwrap(); - } - } - - #[test] - fn in_memory_reads_from_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let test_leaves = TEST_LEAVES; - let (used, unused) = test_leaves.split_at(num_leaves); - - check_leaves::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - used, - unused, - ) - .unwrap(); - } - } -} - -mod full_tries { - //! Here we construct a series of 6 "full" tries, increasing in size - //! from 0 to 5 leaves. Each trie contains the history from preceding - //! tries in this series, and past history can be read from the roots of - //! each preceding trie. The tests check that we can read only the - //! expected set of leaves from the trie at the current root and all past - //! roots. - - use super::*; - - #[test] - fn lmdb_reads_from_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - for (num_leaves, state) in states[..state_index].iter().enumerate() { - let test_leaves = TEST_LEAVES; - let (used, unused) = test_leaves.split_at(num_leaves); - check_leaves::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - state, - used, - unused, - ) - .unwrap(); - } - } - } - - #[test] - fn in_memory_reads_from_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = InMemoryTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - for (num_leaves, state) in states[..state_index].iter().enumerate() { - let test_leaves = TEST_LEAVES; - let (used, unused) = test_leaves.split_at(num_leaves); - check_leaves::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - state, - used, - unused, - ) - .unwrap(); - } - } - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/scan.rs b/execution_engine/src/storage/trie_store/operations/tests/scan.rs deleted file mode 100644 index 331007bada..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/scan.rs +++ /dev/null @@ -1,160 +0,0 @@ -use crate::shared::newtypes::{Blake2bHash, CorrelationId}; - -use super::*; -use crate::storage::{ - error::{self, in_memory}, - trie_store::operations::{scan, TrieScan}, -}; - -fn check_scan<'a, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - root_hash: &Blake2bHash, - key: &[u8], -) -> Result<(), E> -where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From + std::fmt::Debug, - E: From + From + From, -{ - let txn: R::ReadTransaction = environment.create_read_txn()?; - let root = store - .get(&txn, &root_hash)? - .expect("check_scan received an invalid root hash"); - let TrieScan { mut tip, parents } = scan::( - correlation_id, - &txn, - store, - key, - &root, - )?; - - for (index, parent) in parents.into_iter().rev() { - let expected_tip_hash = { - let tip_bytes = tip.to_bytes().unwrap(); - Blake2bHash::new(&tip_bytes) - }; - match parent { - Trie::Leaf { .. } => panic!("parents should not contain any leaves"), - Trie::Node { pointer_block } => { - let pointer_tip_hash = pointer_block[::from(index)].map(|ptr| *ptr.hash()); - assert_eq!(Some(expected_tip_hash), pointer_tip_hash); - tip = Trie::Node { pointer_block }; - } - Trie::Extension { affix, pointer } => { - let pointer_tip_hash = pointer.hash().to_owned(); - assert_eq!(expected_tip_hash, pointer_tip_hash); - tip = Trie::Extension { affix, pointer }; - } - } - } - assert_eq!(root, tip); - txn.commit()?; - Ok(()) -} - -mod partial_tries { - use super::*; - - #[test] - fn lmdb_scans_from_n_leaf_partial_trie_had_expected_results() { - for generator in &TEST_TRIE_GENERATORS { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - - for leaf in TEST_LEAVES.iter() { - let leaf_bytes = leaf.to_bytes().unwrap(); - check_scan::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - &leaf_bytes, - ) - .unwrap() - } - } - } - - #[test] - fn in_memory_scans_from_n_leaf_partial_trie_had_expected_results() { - for generator in &TEST_TRIE_GENERATORS { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - - for leaf in TEST_LEAVES.iter() { - let leaf_bytes = leaf.to_bytes().unwrap(); - check_scan::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - &leaf_bytes, - ) - .unwrap() - } - } - } -} - -mod full_tries { - use super::*; - - #[test] - fn lmdb_scans_from_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - for state in &states[..state_index] { - for leaf in TEST_LEAVES.iter() { - let leaf_bytes = leaf.to_bytes().unwrap(); - check_scan::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - state, - &leaf_bytes, - ) - .unwrap() - } - } - } - } - - #[test] - fn in_memory_scans_from_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = InMemoryTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - for state in &states[..state_index] { - for leaf in TEST_LEAVES.iter() { - let leaf_bytes = leaf.to_bytes().unwrap(); - check_scan::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - state, - &leaf_bytes, - ) - .unwrap() - } - } - } - } -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/synchronize.rs b/execution_engine/src/storage/trie_store/operations/tests/synchronize.rs deleted file mode 100644 index cffd6b1ae3..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/synchronize.rs +++ /dev/null @@ -1,303 +0,0 @@ -use num_traits::{One, Zero}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use crate::{ - shared::newtypes::{Blake2bHash, CorrelationId}, - storage::{ - error, - error::in_memory, - transaction_source::{Transaction, TransactionSource}, - trie_store::{ - operations::{ - self, - tests::{InMemoryTestContext, LmdbTestContext, TestKey, TestValue}, - ReadResult, - }, - TrieStore, - }, - }, -}; - -fn copy_state<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - source_environment: &'a R, - source_store: &S, - target_environment: &'a R, - target_store: &S, - root: &Blake2bHash, -) -> Result<(), E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, - V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - // Make sure no missing nodes in source - { - let txn: R::ReadTransaction = source_environment.create_read_txn()?; - let missing_from_source = operations::missing_trie_keys::<_, _, _, _, E>( - correlation_id, - &txn, - source_store, - vec![root.to_owned()], - )?; - assert_eq!(missing_from_source, Vec::new()); - txn.commit()?; - } - - // Copy source to target - { - let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; - let mut target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?; - // Copy source to destination - let mut queue = vec![root.to_owned()]; - while let Some(trie_key) = queue.pop() { - let trie_to_insert = source_store - .get(&source_txn, &trie_key)? - .expect("should have trie"); - operations::put_trie::<_, _, _, _, E>( - correlation_id, - &mut target_txn, - target_store, - &trie_to_insert, - )?; - - // Now that we've added in `trie_to_insert`, queue up its children - let new_keys = operations::missing_trie_keys::<_, _, _, _, E>( - correlation_id, - &target_txn, - target_store, - vec![trie_key], - )?; - - queue.extend(new_keys); - } - source_txn.commit()?; - target_txn.commit()?; - } - - // After the copying process above there should be no missing entries in the target - { - let target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?; - let missing_from_target = operations::missing_trie_keys::<_, _, _, _, E>( - correlation_id, - &target_txn, - target_store, - vec![root.to_owned()], - )?; - assert_eq!(missing_from_target, Vec::new()); - target_txn.commit()?; - } - - // Make sure all of the target keys under the root hash are in the source - { - let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; - let target_txn: R::ReadTransaction = target_environment.create_read_txn()?; - let target_keys = - operations::keys::<_, _, _, _>(correlation_id, &target_txn, target_store, root) - .collect::, S::Error>>()?; - for key in target_keys { - let maybe_value: ReadResult = operations::read::<_, _, _, _, E>( - correlation_id, - &source_txn, - source_store, - &root, - &key, - )?; - assert!(maybe_value.is_found()) - } - source_txn.commit()?; - target_txn.commit()?; - } - - // Make sure all of the target keys under the root hash are in the source - { - let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; - let target_txn: R::ReadTransaction = target_environment.create_read_txn()?; - let soruce_keys = - operations::keys::<_, _, _, _>(correlation_id, &source_txn, source_store, root) - .collect::, S::Error>>()?; - for key in soruce_keys { - let maybe_value: ReadResult = operations::read::<_, _, _, _, E>( - correlation_id, - &target_txn, - target_store, - &root, - &key, - )?; - assert!(maybe_value.is_found()) - } - source_txn.commit()?; - target_txn.commit()?; - } - - Ok(()) -} - -#[test] -fn lmdb_copy_state() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = super::create_6_leaf_trie().unwrap(); - let source = LmdbTestContext::new(&tries).unwrap(); - let target = LmdbTestContext::new::(&[]).unwrap(); - - copy_state::( - correlation_id, - &source.environment, - &source.store, - &target.environment, - &target.store, - &root_hash, - ) - .unwrap(); -} - -#[test] -fn in_memory_copy_state() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = super::create_6_leaf_trie().unwrap(); - let source = InMemoryTestContext::new(&tries).unwrap(); - let target = InMemoryTestContext::new::(&[]).unwrap(); - - copy_state::( - correlation_id, - &source.environment, - &source.store, - &target.environment, - &target.store, - &root_hash, - ) - .unwrap(); -} - -fn missing_trie_keys_should_find_key_of_corrupt_value<'a, K, V, R, S, E>( - correlation_id: CorrelationId, - source_environment: &'a R, - source_store: &S, - target_environment: &'a R, - target_store: &S, - root: &Blake2bHash, -) -> Result<(), E> -where - K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, - V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, -{ - let bad_key = { - let txn: R::ReadTransaction = target_environment.create_read_txn()?; - let missing_from_target = operations::missing_trie_keys::<_, _, _, _, E>( - correlation_id, - &txn, - target_store, - vec![root.to_owned()], - )?; - txn.commit()?; - assert_eq!(missing_from_target.len(), usize::one()); - missing_from_target[usize::zero()] - }; - - let bad_value_hash = { - let txn: R::ReadTransaction = target_environment.create_read_txn()?; - let bad_trie = target_store.get(&txn, &bad_key)?.expect("should have trie"); - txn.commit()?; - Blake2bHash::new(&bad_trie.to_bytes()?) - }; - - assert_ne!(bad_key, bad_value_hash); - - // Fix target store now - { - let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; - let mut target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?; - - let mut queue = vec![bad_key]; - while let Some(trie_key) = queue.pop() { - let trie_to_insert = source_store - .get(&source_txn, &trie_key)? - .expect("should have trie"); - - operations::put_trie::<_, _, _, _, E>( - correlation_id, - &mut target_txn, - target_store, - &trie_to_insert, - )?; - - // Now that we've added in `trie_to_insert`, queue up its children - let new_keys = operations::missing_trie_keys::<_, _, _, _, E>( - correlation_id, - &target_txn, - target_store, - vec![trie_key], - )?; - - queue.extend(new_keys); - } - - source_txn.commit()?; - target_txn.commit()?; - } - - // Should be no missing now in target store - { - let txn: R::ReadTransaction = target_environment.create_read_txn()?; - let missing_from_target = operations::missing_trie_keys::<_, _, _, _, E>( - correlation_id, - &txn, - target_store, - vec![root.to_owned()], - )?; - txn.commit()?; - assert_eq!(missing_from_target, Vec::new()); - } - - Ok(()) -} - -#[test] -fn lmdb_missing_trie_keys_should_find_key_of_corrupt_value() { - let correlation_id = CorrelationId::new(); - let (clean_root_hash, clean_tries) = super::create_6_leaf_trie().unwrap(); - let (corrupt_root_hash, corrupt_tries) = super::create_6_leaf_corrupt_trie().unwrap(); - let clean_context = LmdbTestContext::new(&clean_tries).unwrap(); - let corrupt_context = LmdbTestContext::new(&corrupt_tries).unwrap(); - - assert_eq!(clean_root_hash, corrupt_root_hash); - - missing_trie_keys_should_find_key_of_corrupt_value::( - correlation_id, - &clean_context.environment, - &clean_context.store, - &corrupt_context.environment, - &corrupt_context.store, - &clean_root_hash, - ) - .unwrap(); -} - -#[test] -fn in_memory_missing_trie_keys_should_find_key_of_corrupt_value() { - let correlation_id = CorrelationId::new(); - let (clean_root_hash, clean_tries) = super::create_6_leaf_trie().unwrap(); - let (corrupt_root_hash, corrupt_tries) = super::create_6_leaf_corrupt_trie().unwrap(); - let clean_context = InMemoryTestContext::new(&clean_tries).unwrap(); - let corrupt_context = InMemoryTestContext::new(&corrupt_tries).unwrap(); - - assert_eq!(clean_root_hash, corrupt_root_hash); - - missing_trie_keys_should_find_key_of_corrupt_value::( - correlation_id, - &clean_context.environment, - &clean_context.store, - &corrupt_context.environment, - &corrupt_context.store, - &clean_root_hash, - ) - .unwrap(); -} diff --git a/execution_engine/src/storage/trie_store/operations/tests/write.rs b/execution_engine/src/storage/trie_store/operations/tests/write.rs deleted file mode 100644 index b67216bb78..0000000000 --- a/execution_engine/src/storage/trie_store/operations/tests/write.rs +++ /dev/null @@ -1,657 +0,0 @@ -use super::*; - -mod empty_tries { - use std::collections::HashMap; - - use super::*; - - #[test] - fn lmdb_non_colliding_writes_to_n_leaf_empty_trie_had_expected_results() { - for num_leaves in 1..=TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let initial_states = vec![root_hash]; - - writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &initial_states, - &TEST_LEAVES_NON_COLLIDING[..num_leaves], - ) - .unwrap(); - } - } - - #[test] - fn in_memory_non_colliding_writes_to_n_leaf_empty_trie_had_expected_results() { - for num_leaves in 1..=TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let initial_states = vec![root_hash]; - - writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &initial_states, - &TEST_LEAVES_NON_COLLIDING[..num_leaves], - ) - .unwrap(); - } - } - - #[test] - fn lmdb_writes_to_n_leaf_empty_trie_had_expected_results() { - for num_leaves in 1..=TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let initial_states = vec![root_hash]; - - writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &initial_states, - &TEST_LEAVES[..num_leaves], - ) - .unwrap(); - } - } - - #[test] - fn in_memory_writes_to_n_leaf_empty_trie_had_expected_results() { - for num_leaves in 1..=TEST_LEAVES_LENGTH { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let initial_states = vec![root_hash]; - - writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &initial_states, - &TEST_LEAVES[..num_leaves], - ) - .unwrap(); - } - } - - #[test] - fn in_memory_writes_to_n_leaf_empty_trie_had_expected_store_contents() { - let expected_contents: HashMap = { - let mut ret = HashMap::new(); - for generator in &TEST_TRIE_GENERATORS { - let (_, tries) = generator().unwrap(); - for HashedTestTrie { hash, trie } in tries { - ret.insert(hash, trie); - } - } - ret - }; - - let actual_contents: HashMap = { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - - write_leaves::<_, _, _, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &root_hash, - &TEST_LEAVES, - ) - .unwrap(); - - context.environment.dump(None).unwrap() - }; - - assert_eq!(expected_contents, actual_contents) - } -} - -mod partial_tries { - use super::*; - - fn noop_writes_to_n_leaf_partial_trie_had_expected_results<'a, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - states: &[Blake2bHash], - num_leaves: usize, - ) -> Result<(), E> - where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - // Check that the expected set of leaves is in the trie - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - &states[0], - &TEST_LEAVES[..num_leaves], - &[], - )?; - - // Rewrite that set of leaves - let write_results = write_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - &states[0], - &TEST_LEAVES[..num_leaves], - )?; - - assert!(write_results - .iter() - .all(|result| *result == WriteResult::AlreadyExists)); - - // Check that the expected set of leaves is in the trie - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - &states[0], - &TEST_LEAVES[..num_leaves], - &[], - ) - } - - #[test] - fn lmdb_noop_writes_to_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let states = vec![root_hash]; - - noop_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - num_leaves, - ) - .unwrap() - } - } - - #[test] - fn in_memory_noop_writes_to_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let states = vec![root_hash]; - - noop_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - num_leaves, - ) - .unwrap(); - } - } - - fn update_writes_to_n_leaf_partial_trie_had_expected_results<'a, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - states: &[Blake2bHash], - num_leaves: usize, - ) -> Result<(), E> - where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut states = states.to_owned(); - - // Check that the expected set of leaves is in the trie - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - &states[0], - &TEST_LEAVES[..num_leaves], - &[], - )?; - - // Update and check leaves - for (n, leaf) in TEST_LEAVES_UPDATED[..num_leaves].iter().enumerate() { - let expected_leaves: Vec = { - let n = n + 1; - TEST_LEAVES_UPDATED[..n] - .iter() - .chain(&TEST_LEAVES[n..num_leaves]) - .map(ToOwned::to_owned) - .collect() - }; - - let root_hash = { - let current_root = states.last().unwrap(); - let results = write_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - ¤t_root, - &[leaf.to_owned()], - )?; - assert_eq!(1, results.len()); - match results[0] { - WriteResult::Written(root_hash) => root_hash, - _ => panic!("value not written"), - } - }; - - states.push(root_hash); - - // Check that the expected set of leaves is in the trie - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - states.last().unwrap(), - &expected_leaves, - &[], - )?; - } - - Ok(()) - } - - #[test] - fn lmdb_update_writes_to_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = LmdbTestContext::new(&tries).unwrap(); - let initial_states = vec![root_hash]; - - update_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &initial_states, - num_leaves, - ) - .unwrap() - } - } - - #[test] - fn in_memory_update_writes_to_n_leaf_partial_trie_had_expected_results() { - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let correlation_id = CorrelationId::new(); - let (root_hash, tries) = generator().unwrap(); - let context = InMemoryTestContext::new(&tries).unwrap(); - let states = vec![root_hash]; - - update_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - num_leaves, - ) - .unwrap() - } - } -} - -mod full_tries { - use super::*; - - fn noop_writes_to_n_leaf_full_trie_had_expected_results<'a, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - states: &[Blake2bHash], - index: usize, - ) -> Result<(), E> - where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - // Check that the expected set of leaves is in the trie at every state reference - for (num_leaves, state) in states[..index].iter().enumerate() { - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - state, - &TEST_LEAVES[..num_leaves], - &[], - )?; - } - - // Rewrite that set of leaves - let write_results = write_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - states.last().unwrap(), - &TEST_LEAVES[..index], - )?; - - assert!(write_results - .iter() - .all(|result| *result == WriteResult::AlreadyExists)); - - // Check that the expected set of leaves is in the trie at every state reference - for (num_leaves, state) in states[..index].iter().enumerate() { - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - state, - &TEST_LEAVES[..num_leaves], - &[], - )? - } - - Ok(()) - } - - #[test] - fn lmdb_noop_writes_to_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - noop_writes_to_n_leaf_full_trie_had_expected_results::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - index, - ) - .unwrap(); - } - } - - #[test] - fn in_memory_noop_writes_to_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = InMemoryTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - noop_writes_to_n_leaf_full_trie_had_expected_results::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - index, - ) - .unwrap(); - } - } - - fn update_writes_to_n_leaf_full_trie_had_expected_results<'a, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - states: &[Blake2bHash], - num_leaves: usize, - ) -> Result<(), E> - where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut states = states.to_vec(); - - // Check that the expected set of leaves is in the trie at every state reference - for (state_index, state) in states.iter().enumerate() { - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - state, - &TEST_LEAVES[..state_index], - &[], - )?; - } - - // Write set of leaves to the trie - let hashes = write_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - states.last().unwrap(), - &TEST_LEAVES_UPDATED[..num_leaves], - )? - .iter() - .map(|result| match result { - WriteResult::Written(root_hash) => *root_hash, - _ => panic!("write_leaves resulted in non-write"), - }) - .collect::>(); - - states.extend(hashes); - - let expected: Vec> = { - let mut ret = vec![vec![]]; - if num_leaves > 0 { - for i in 1..=num_leaves { - ret.push(TEST_LEAVES[..i].to_vec()) - } - for i in 1..=num_leaves { - ret.push( - TEST_LEAVES[i..num_leaves] - .iter() - .chain(&TEST_LEAVES_UPDATED[..i]) - .map(ToOwned::to_owned) - .collect::>(), - ) - } - } - ret - }; - - assert_eq!(states.len(), expected.len()); - - // Check that the expected set of leaves is in the trie at every state reference - for (state_index, state) in states.iter().enumerate() { - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - state, - &expected[state_index], - &[], - )?; - } - - Ok(()) - } - - #[test] - fn lmdb_update_writes_to_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - update_writes_to_n_leaf_full_trie_had_expected_results::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - num_leaves, - ) - .unwrap() - } - } - - #[test] - fn in_memory_update_writes_to_n_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = InMemoryTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - - update_writes_to_n_leaf_full_trie_had_expected_results::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - num_leaves, - ) - .unwrap() - } - } - - fn node_writes_to_5_leaf_full_trie_had_expected_results<'a, R, S, E>( - correlation_id: CorrelationId, - environment: &'a R, - store: &S, - states: &[Blake2bHash], - ) -> Result<(), E> - where - R: TransactionSource<'a, Handle = S::Handle>, - S: TrieStore, - S::Error: From, - E: From + From + From, - { - let mut states = states.to_vec(); - let num_leaves = TEST_LEAVES_LENGTH; - - // Check that the expected set of leaves is in the trie at every state reference - for (state_index, state) in states.iter().enumerate() { - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - state, - &TEST_LEAVES[..state_index], - &[], - )?; - } - - // Write set of leaves to the trie - let hashes = write_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - states.last().unwrap(), - &TEST_LEAVES_ADJACENTS, - )? - .iter() - .map(|result| match result { - WriteResult::Written(root_hash) => *root_hash, - _ => panic!("write_leaves resulted in non-write"), - }) - .collect::>(); - - states.extend(hashes); - - let expected: Vec> = { - let mut ret = vec![vec![]]; - if num_leaves > 0 { - for i in 1..=num_leaves { - ret.push(TEST_LEAVES[..i].to_vec()) - } - for i in 1..=num_leaves { - ret.push( - TEST_LEAVES - .iter() - .chain(&TEST_LEAVES_ADJACENTS[..i]) - .map(ToOwned::to_owned) - .collect::>(), - ) - } - } - ret - }; - - assert_eq!(states.len(), expected.len()); - - // Check that the expected set of leaves is in the trie at every state reference - for (state_index, state) in states.iter().enumerate() { - check_leaves::<_, _, _, _, E>( - correlation_id, - environment, - store, - state, - &expected[state_index], - &[], - )?; - } - Ok(()) - } - - #[test] - fn lmdb_node_writes_to_5_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for generator in &TEST_TRIE_GENERATORS { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - } - - node_writes_to_5_leaf_full_trie_had_expected_results::<_, _, error::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - ) - .unwrap() - } - - #[test] - fn in_memory_node_writes_to_5_leaf_full_trie_had_expected_results() { - let correlation_id = CorrelationId::new(); - let context = InMemoryTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); - let mut states: Vec = Vec::new(); - - for generator in &TEST_TRIE_GENERATORS { - let (root_hash, tries) = generator().unwrap(); - context.update(&tries).unwrap(); - states.push(root_hash); - } - - node_writes_to_5_leaf_full_trie_had_expected_results::<_, _, in_memory::Error>( - correlation_id, - &context.environment, - &context.store, - &states, - ) - .unwrap() - } -} diff --git a/execution_engine/src/storage/trie_store/tests/concurrent.rs b/execution_engine/src/storage/trie_store/tests/concurrent.rs deleted file mode 100644 index cd723e4d77..0000000000 --- a/execution_engine/src/storage/trie_store/tests/concurrent.rs +++ /dev/null @@ -1,128 +0,0 @@ -use std::{ - sync::{Arc, Barrier}, - thread, -}; - -use casper_types::bytesrepr::Bytes; -use tempfile::tempdir; - -use super::TestData; -use crate::storage::{ - store::Store, - transaction_source::{ - in_memory::InMemoryEnvironment, lmdb::LmdbEnvironment, Transaction, TransactionSource, - }, - trie::Trie, - trie_store::{in_memory::InMemoryTrieStore, lmdb::LmdbTrieStore}, - DEFAULT_TEST_MAX_DB_SIZE, DEFAULT_TEST_MAX_READERS, -}; - -#[test] -fn lmdb_writer_mutex_does_not_collide_with_readers() { - let dir = tempdir().unwrap(); - let env = Arc::new( - LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(), - ); - let store = Arc::new(LmdbTrieStore::new(&env, None, Default::default()).unwrap()); - let num_threads = 10; - let barrier = Arc::new(Barrier::new(num_threads + 1)); - let mut handles = Vec::new(); - let TestData(ref leaf_1_hash, ref leaf_1) = &super::create_data()[0..1][0]; - - for _ in 0..num_threads { - let reader_env = env.clone(); - let reader_store = store.clone(); - let reader_barrier = barrier.clone(); - let leaf_1_hash = *leaf_1_hash; - #[allow(clippy::clone_on_copy)] - let leaf_1 = leaf_1.clone(); - - handles.push(thread::spawn(move || { - { - let txn = reader_env.create_read_txn().unwrap(); - let result: Option> = - reader_store.get(&txn, &leaf_1_hash).unwrap(); - assert_eq!(result, None); - txn.commit().unwrap(); - } - // wait for other reader threads to read and the main thread to - // take a read-write transaction - reader_barrier.wait(); - // wait for main thread to put and commit - reader_barrier.wait(); - { - let txn = reader_env.create_read_txn().unwrap(); - let result: Option> = - reader_store.get(&txn, &leaf_1_hash).unwrap(); - txn.commit().unwrap(); - result.unwrap() == leaf_1 - } - })); - } - - let mut txn = env.create_read_write_txn().unwrap(); - // wait for reader threads to read - barrier.wait(); - store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); - txn.commit().unwrap(); - // sync with reader threads - barrier.wait(); - - assert!(handles.into_iter().all(|b| b.join().unwrap())) -} - -#[test] -fn in_memory_writer_mutex_does_not_collide_with_readers() { - let env = Arc::new(InMemoryEnvironment::new()); - let store = Arc::new(InMemoryTrieStore::new(&env, None)); - let num_threads = 10; - let barrier = Arc::new(Barrier::new(num_threads + 1)); - let mut handles = Vec::new(); - let TestData(ref leaf_1_hash, ref leaf_1) = &super::create_data()[0..1][0]; - - for _ in 0..num_threads { - let reader_env = env.clone(); - let reader_store = store.clone(); - let reader_barrier = barrier.clone(); - let leaf_1_hash = *leaf_1_hash; - #[allow(clippy::clone_on_copy)] - let leaf_1 = leaf_1.clone(); - - handles.push(thread::spawn(move || { - { - let txn = reader_env.create_read_txn().unwrap(); - let result: Option> = - reader_store.get(&txn, &leaf_1_hash).unwrap(); - assert_eq!(result, None); - txn.commit().unwrap(); - } - // wait for other reader threads to read and the main thread to - // take a read-write transaction - reader_barrier.wait(); - // wait for main thread to put and commit - reader_barrier.wait(); - { - let txn = reader_env.create_read_txn().unwrap(); - let result: Option> = - reader_store.get(&txn, &leaf_1_hash).unwrap(); - txn.commit().unwrap(); - result.unwrap() == leaf_1 - } - })); - } - - let mut txn = env.create_read_write_txn().unwrap(); - // wait for reader threads to read - barrier.wait(); - store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); - txn.commit().unwrap(); - // sync with reader threads - barrier.wait(); - - assert!(handles.into_iter().all(|b| b.join().unwrap())) -} diff --git a/execution_engine/src/storage/trie_store/tests/mod.rs b/execution_engine/src/storage/trie_store/tests/mod.rs deleted file mode 100644 index 0d8ffa5e60..0000000000 --- a/execution_engine/src/storage/trie_store/tests/mod.rs +++ /dev/null @@ -1,78 +0,0 @@ -mod concurrent; -mod proptests; -mod simple; - -use casper_types::bytesrepr::{Bytes, ToBytes}; - -use crate::{ - shared::newtypes::Blake2bHash, - storage::trie::{Pointer, PointerBlock, Trie}, -}; - -#[derive(Clone)] -struct TestData(Blake2bHash, Trie); - -impl<'a, K, V> Into<(&'a Blake2bHash, &'a Trie)> for &'a TestData { - fn into(self) -> (&'a Blake2bHash, &'a Trie) { - (&self.0, &self.1) - } -} - -fn create_data() -> Vec> { - let leaf_1 = Trie::Leaf { - key: Bytes::from(vec![0u8, 0, 0]), - value: Bytes::from(b"val_1".to_vec()), - }; - let leaf_2 = Trie::Leaf { - key: Bytes::from(vec![1u8, 0, 0]), - value: Bytes::from(b"val_2".to_vec()), - }; - let leaf_3 = Trie::Leaf { - key: Bytes::from(vec![1u8, 0, 1]), - value: Bytes::from(b"val_3".to_vec()), - }; - - let leaf_1_hash = Blake2bHash::new(&leaf_1.to_bytes().unwrap()); - let leaf_2_hash = Blake2bHash::new(&leaf_2.to_bytes().unwrap()); - let leaf_3_hash = Blake2bHash::new(&leaf_3.to_bytes().unwrap()); - - let node_2: Trie = { - let mut pointer_block = PointerBlock::new(); - pointer_block[0] = Some(Pointer::LeafPointer(leaf_2_hash)); - pointer_block[1] = Some(Pointer::LeafPointer(leaf_3_hash)); - let pointer_block = Box::new(pointer_block); - Trie::Node { pointer_block } - }; - - let node_2_hash = Blake2bHash::new(&node_2.to_bytes().unwrap()); - - let ext_node: Trie = { - let affix = vec![1u8, 0]; - let pointer = Pointer::NodePointer(node_2_hash); - Trie::Extension { - affix: affix.into(), - pointer, - } - }; - - let ext_node_hash = Blake2bHash::new(&ext_node.to_bytes().unwrap()); - - let node_1: Trie = { - let mut pointer_block = PointerBlock::new(); - pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash)); - pointer_block[1] = Some(Pointer::NodePointer(ext_node_hash)); - let pointer_block = Box::new(pointer_block); - Trie::Node { pointer_block } - }; - - let node_1_hash = Blake2bHash::new(&node_1.to_bytes().unwrap()); - - vec![ - TestData(leaf_1_hash, leaf_1), - TestData(leaf_2_hash, leaf_2), - TestData(leaf_3_hash, leaf_3), - TestData(node_1_hash, node_1), - TestData(node_2_hash, node_2), - TestData(ext_node_hash, ext_node), - ] -} diff --git a/execution_engine/src/storage/trie_store/tests/proptests.rs b/execution_engine/src/storage/trie_store/tests/proptests.rs deleted file mode 100644 index cbf82217d0..0000000000 --- a/execution_engine/src/storage/trie_store/tests/proptests.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::{collections::BTreeMap, ops::RangeInclusive}; - -use lmdb::DatabaseFlags; -use proptest::{collection::vec, prelude::proptest}; -use tempfile::tempdir; - -use crate::shared::{newtypes::Blake2bHash, stored_value::StoredValue}; -use casper_types::{bytesrepr::ToBytes, Key}; - -use crate::storage::{ - store::tests as store_tests, - trie::{gens::trie_arb, Trie}, - DEFAULT_TEST_MAX_DB_SIZE, DEFAULT_TEST_MAX_READERS, -}; - -const DEFAULT_MIN_LENGTH: usize = 1; -const DEFAULT_MAX_LENGTH: usize = 4; - -fn get_range() -> RangeInclusive { - let start = option_env!("CL_TRIE_STORE_TEST_VECTOR_MIN_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MIN_LENGTH); - let end = option_env!("CL_TRIE_STORE_TEST_VECTOR_MAX_LENGTH") - .and_then(|s| str::parse::(s).ok()) - .unwrap_or(DEFAULT_MAX_LENGTH); - RangeInclusive::new(start, end) -} - -fn in_memory_roundtrip_succeeds(inputs: Vec>) -> bool { - use crate::storage::{ - transaction_source::in_memory::InMemoryEnvironment, - trie_store::in_memory::InMemoryTrieStore, - }; - - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - - let inputs: BTreeMap> = inputs - .into_iter() - .map(|trie| (Blake2bHash::new(&trie.to_bytes().unwrap()), trie)) - .collect(); - - store_tests::roundtrip_succeeds(&env, &store, inputs).unwrap() -} - -fn lmdb_roundtrip_succeeds(inputs: Vec>) -> bool { - use crate::storage::{ - transaction_source::lmdb::LmdbEnvironment, trie_store::lmdb::LmdbTrieStore, - }; - - let tmp_dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &tmp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - - let inputs: BTreeMap> = inputs - .into_iter() - .map(|trie| (Blake2bHash::new(&trie.to_bytes().unwrap()), trie)) - .collect(); - - let ret = store_tests::roundtrip_succeeds(&env, &store, inputs).unwrap(); - tmp_dir.close().unwrap(); - ret -} - -proptest! { - #[test] - fn prop_in_memory_roundtrip_succeeds(v in vec(trie_arb(), get_range())) { - assert!(in_memory_roundtrip_succeeds(v)) - } - - #[test] - fn prop_lmdb_roundtrip_succeeds(v in vec(trie_arb(), get_range())) { - assert!(lmdb_roundtrip_succeeds(v)) - } -} diff --git a/execution_engine/src/storage/trie_store/tests/simple.rs b/execution_engine/src/storage/trie_store/tests/simple.rs deleted file mode 100644 index b725d507f1..0000000000 --- a/execution_engine/src/storage/trie_store/tests/simple.rs +++ /dev/null @@ -1,602 +0,0 @@ -use lmdb::DatabaseFlags; -use tempfile::tempdir; - -use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes}; - -use super::TestData; -use crate::storage::{ - error::{self, in_memory}, - store::StoreExt, - transaction_source::{ - in_memory::InMemoryEnvironment, lmdb::LmdbEnvironment, Transaction, TransactionSource, - }, - trie::Trie, - trie_store::{in_memory::InMemoryTrieStore, lmdb::LmdbTrieStore, TrieStore}, - DEFAULT_TEST_MAX_DB_SIZE, DEFAULT_TEST_MAX_READERS, -}; - -fn put_succeeds<'a, K, V, S, X, E>( - store: &S, - transaction_source: &'a X, - items: &[TestData], -) -> Result<(), E> -where - K: ToBytes, - V: ToBytes, - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From, -{ - let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; - let items = items.iter().map(Into::into); - store.put_many(&mut txn, items)?; - txn.commit()?; - Ok(()) -} - -#[test] -fn in_memory_put_succeeds() { - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - let data = &super::create_data()[0..1]; - - assert!(put_succeeds::<_, _, _, _, in_memory::Error>(&store, &env, data).is_ok()); -} - -#[test] -fn lmdb_put_succeeds() { - let tmp_dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &tmp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - let data = &super::create_data()[0..1]; - - assert!(put_succeeds::<_, _, _, _, error::Error>(&store, &env, data).is_ok()); - - tmp_dir.close().unwrap(); -} - -fn put_get_succeeds<'a, K, V, S, X, E>( - store: &S, - transaction_source: &'a X, - items: &[TestData], -) -> Result>>, E> -where - K: ToBytes + FromBytes, - V: ToBytes + FromBytes, - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From, -{ - let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; - let items = items.iter().map(Into::into); - store.put_many(&mut txn, items.clone())?; - let keys = items.map(|(k, _)| k); - let ret = store.get_many(&txn, keys)?; - txn.commit()?; - Ok(ret) -} - -#[test] -fn in_memory_put_get_succeeds() { - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - let data = &super::create_data()[0..1]; - - let expected: Vec> = - data.to_vec().into_iter().map(|TestData(_, v)| v).collect(); - - assert_eq!( - expected, - put_get_succeeds::<_, _, _, _, in_memory::Error>(&store, &env, data) - .expect("put_get_succeeds failed") - .into_iter() - .collect::>>>() - .expect("one of the outputs was empty") - ) -} - -#[test] -fn lmdb_put_get_succeeds() { - let tmp_dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &tmp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - let data = &super::create_data()[0..1]; - - let expected: Vec> = - data.to_vec().into_iter().map(|TestData(_, v)| v).collect(); - - assert_eq!( - expected, - put_get_succeeds::<_, _, _, _, error::Error>(&store, &env, data) - .expect("put_get_succeeds failed") - .into_iter() - .collect::>>>() - .expect("one of the outputs was empty") - ); - - tmp_dir.close().unwrap(); -} - -#[test] -fn in_memory_put_get_many_succeeds() { - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - let data = super::create_data(); - - let expected: Vec> = - data.to_vec().into_iter().map(|TestData(_, v)| v).collect(); - - assert_eq!( - expected, - put_get_succeeds::<_, _, _, _, in_memory::Error>(&store, &env, &data) - .expect("put_get failed") - .into_iter() - .collect::>>>() - .expect("one of the outputs was empty") - ) -} - -#[test] -fn lmdb_put_get_many_succeeds() { - let tmp_dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &tmp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - let data = super::create_data(); - - let expected: Vec> = - data.to_vec().into_iter().map(|TestData(_, v)| v).collect(); - - assert_eq!( - expected, - put_get_succeeds::<_, _, _, _, error::Error>(&store, &env, &data) - .expect("put_get failed") - .into_iter() - .collect::>>>() - .expect("one of the outputs was empty") - ); - - tmp_dir.close().unwrap(); -} - -fn uncommitted_read_write_txn_does_not_persist<'a, K, V, S, X, E>( - store: &S, - transaction_source: &'a X, - items: &[TestData], -) -> Result>>, E> -where - K: ToBytes + FromBytes, - V: ToBytes + FromBytes, - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From, -{ - { - let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; - let items = items.iter().map(Into::into); - store.put_many(&mut txn, items)?; - } - { - let txn: X::ReadTransaction = transaction_source.create_read_txn()?; - let keys = items.iter().map(|TestData(k, _)| k); - let ret = store.get_many(&txn, keys)?; - txn.commit()?; - Ok(ret) - } -} - -#[test] -fn in_memory_uncommitted_read_write_txn_does_not_persist() { - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - let data = super::create_data(); - - assert_eq!( - None, - uncommitted_read_write_txn_does_not_persist::<_, _, _, _, in_memory::Error>( - &store, &env, &data, - ) - .expect("uncommitted_read_write_txn_does_not_persist failed") - .into_iter() - .collect::>>>() - ) -} - -#[test] -fn lmdb_uncommitted_read_write_txn_does_not_persist() { - let tmp_dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &tmp_dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - let data = super::create_data(); - - assert_eq!( - None, - uncommitted_read_write_txn_does_not_persist::<_, _, _, _, error::Error>( - &store, &env, &data, - ) - .expect("uncommitted_read_write_txn_does_not_persist failed") - .into_iter() - .collect::>>>() - ); - - tmp_dir.close().unwrap(); -} - -fn read_write_transaction_does_not_block_read_transaction<'a, X, E>( - transaction_source: &'a X, -) -> Result<(), E> -where - X: TransactionSource<'a>, - E: From, -{ - let read_write_txn = transaction_source.create_read_write_txn()?; - let read_txn = transaction_source.create_read_txn()?; - read_write_txn.commit()?; - read_txn.commit()?; - Ok(()) -} - -#[test] -fn in_memory_read_write_transaction_does_not_block_read_transaction() { - let env = InMemoryEnvironment::new(); - - assert!( - read_write_transaction_does_not_block_read_transaction::<_, in_memory::Error>(&env).is_ok() - ) -} - -#[test] -fn lmdb_read_write_transaction_does_not_block_read_transaction() { - let dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - - assert!(read_write_transaction_does_not_block_read_transaction::<_, error::Error>(&env).is_ok()) -} - -fn reads_are_isolated<'a, S, X, E>(store: &S, env: &'a X) -> Result<(), E> -where - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From + From, -{ - let TestData(leaf_1_hash, leaf_1) = &super::create_data()[0..1][0]; - - { - let read_txn_1 = env.create_read_txn()?; - let result = store.get(&read_txn_1, &leaf_1_hash)?; - assert_eq!(result, None); - - { - let mut write_txn = env.create_read_write_txn()?; - store.put(&mut write_txn, &leaf_1_hash, &leaf_1)?; - write_txn.commit()?; - } - - let result = store.get(&read_txn_1, &leaf_1_hash)?; - read_txn_1.commit()?; - assert_eq!(result, None); - } - - { - let read_txn_2 = env.create_read_txn()?; - let result = store.get(&read_txn_2, &leaf_1_hash)?; - read_txn_2.commit()?; - assert_eq!(result, Some(leaf_1.to_owned())); - } - - Ok(()) -} - -#[test] -fn in_memory_reads_are_isolated() { - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - - assert!(reads_are_isolated::<_, _, in_memory::Error>(&store, &env).is_ok()) -} - -#[test] -fn lmdb_reads_are_isolated() { - let dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - - assert!(reads_are_isolated::<_, _, error::Error>(&store, &env).is_ok()) -} - -fn reads_are_isolated_2<'a, S, X, E>(store: &S, env: &'a X) -> Result<(), E> -where - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From + From, -{ - let data = super::create_data(); - let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; - let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; - - { - let mut write_txn = env.create_read_write_txn()?; - store.put(&mut write_txn, leaf_1_hash, leaf_1)?; - write_txn.commit()?; - } - - { - let read_txn_1 = env.create_read_txn()?; - { - let mut write_txn = env.create_read_write_txn()?; - store.put(&mut write_txn, leaf_2_hash, leaf_2)?; - write_txn.commit()?; - } - let result = store.get(&read_txn_1, leaf_1_hash)?; - read_txn_1.commit()?; - assert_eq!(result, Some(leaf_1.to_owned())); - } - - { - let read_txn_2 = env.create_read_txn()?; - let result = store.get(&read_txn_2, leaf_2_hash)?; - read_txn_2.commit()?; - assert_eq!(result, Some(leaf_2.to_owned())); - } - - Ok(()) -} - -#[test] -fn in_memory_reads_are_isolated_2() { - let env = InMemoryEnvironment::new(); - let store = InMemoryTrieStore::new(&env, None); - - assert!(reads_are_isolated_2::<_, _, in_memory::Error>(&store, &env).is_ok()) -} - -#[test] -fn lmdb_reads_are_isolated_2() { - let dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); - - assert!(reads_are_isolated_2::<_, _, error::Error>(&store, &env).is_ok()) -} - -fn dbs_are_isolated<'a, S, X, E>(env: &'a X, store_a: &S, store_b: &S) -> Result<(), E> -where - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From + From, -{ - let data = super::create_data(); - let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; - let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; - - { - let mut write_txn = env.create_read_write_txn()?; - store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?; - write_txn.commit()?; - } - - { - let mut write_txn = env.create_read_write_txn()?; - store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?; - write_txn.commit()?; - } - - { - let read_txn = env.create_read_txn()?; - let result = store_a.get(&read_txn, leaf_1_hash)?; - assert_eq!(result, Some(leaf_1.to_owned())); - let result = store_a.get(&read_txn, leaf_2_hash)?; - assert_eq!(result, None); - read_txn.commit()?; - } - - { - let read_txn = env.create_read_txn()?; - let result = store_b.get(&read_txn, leaf_1_hash)?; - assert_eq!(result, None); - let result = store_b.get(&read_txn, leaf_2_hash)?; - assert_eq!(result, Some(leaf_2.to_owned())); - read_txn.commit()?; - } - - Ok(()) -} - -#[test] -fn in_memory_dbs_are_isolated() { - let env = InMemoryEnvironment::new(); - let store_a = InMemoryTrieStore::new(&env, Some("a")); - let store_b = InMemoryTrieStore::new(&env, Some("b")); - - assert!(dbs_are_isolated::<_, _, in_memory::Error>(&env, &store_a, &store_b).is_ok()) -} - -#[test] -fn lmdb_dbs_are_isolated() { - let dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store_a = LmdbTrieStore::new(&env, Some("a"), DatabaseFlags::empty()).unwrap(); - let store_b = LmdbTrieStore::new(&env, Some("b"), DatabaseFlags::empty()).unwrap(); - - assert!(dbs_are_isolated::<_, _, error::Error>(&env, &store_a, &store_b).is_ok()) -} - -fn transactions_can_be_used_across_sub_databases<'a, S, X, E>( - env: &'a X, - store_a: &S, - store_b: &S, -) -> Result<(), E> -where - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From + From, -{ - let data = super::create_data(); - let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; - let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; - - { - let mut write_txn = env.create_read_write_txn()?; - store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?; - store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?; - write_txn.commit()?; - } - - { - let read_txn = env.create_read_txn()?; - let result = store_a.get(&read_txn, leaf_1_hash)?; - assert_eq!(result, Some(leaf_1.to_owned())); - let result = store_b.get(&read_txn, leaf_2_hash)?; - assert_eq!(result, Some(leaf_2.to_owned())); - read_txn.commit()?; - } - - Ok(()) -} - -#[test] -fn in_memory_transactions_can_be_used_across_sub_databases() { - let env = InMemoryEnvironment::new(); - let store_a = InMemoryTrieStore::new(&env, Some("a")); - let store_b = InMemoryTrieStore::new(&env, Some("b")); - - assert!( - transactions_can_be_used_across_sub_databases::<_, _, in_memory::Error>( - &env, &store_a, &store_b, - ) - .is_ok() - ); -} - -#[test] -fn lmdb_transactions_can_be_used_across_sub_databases() { - let dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store_a = LmdbTrieStore::new(&env, Some("a"), DatabaseFlags::empty()).unwrap(); - let store_b = LmdbTrieStore::new(&env, Some("b"), DatabaseFlags::empty()).unwrap(); - - assert!( - transactions_can_be_used_across_sub_databases::<_, _, error::Error>( - &env, &store_a, &store_b, - ) - .is_ok() - ) -} - -fn uncommitted_transactions_across_sub_databases_do_not_persist<'a, S, X, E>( - env: &'a X, - store_a: &S, - store_b: &S, -) -> Result<(), E> -where - S: TrieStore, - X: TransactionSource<'a, Handle = S::Handle>, - S::Error: From, - E: From + From + From, -{ - let data = super::create_data(); - let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; - let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; - - { - let mut write_txn = env.create_read_write_txn()?; - store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?; - store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?; - } - - { - let read_txn = env.create_read_txn()?; - let result = store_a.get(&read_txn, leaf_1_hash)?; - assert_eq!(result, None); - let result = store_b.get(&read_txn, leaf_2_hash)?; - assert_eq!(result, None); - read_txn.commit()?; - } - - Ok(()) -} - -#[test] -fn in_memory_uncommitted_transactions_across_sub_databases_do_not_persist() { - let env = InMemoryEnvironment::new(); - let store_a = InMemoryTrieStore::new(&env, Some("a")); - let store_b = InMemoryTrieStore::new(&env, Some("b")); - - assert!( - uncommitted_transactions_across_sub_databases_do_not_persist::<_, _, in_memory::Error>( - &env, &store_a, &store_b, - ) - .is_ok() - ); -} - -#[test] -fn lmdb_uncommitted_transactions_across_sub_databases_do_not_persist() { - let dir = tempdir().unwrap(); - let env = LmdbEnvironment::new( - &dir.path().to_path_buf(), - DEFAULT_TEST_MAX_DB_SIZE, - DEFAULT_TEST_MAX_READERS, - ) - .unwrap(); - let store_a = LmdbTrieStore::new(&env, Some("a"), DatabaseFlags::empty()).unwrap(); - let store_b = LmdbTrieStore::new(&env, Some("b"), DatabaseFlags::empty()).unwrap(); - - assert!( - uncommitted_transactions_across_sub_databases_do_not_persist::<_, _, error::Error>( - &env, &store_a, &store_b, - ) - .is_ok() - ) -} diff --git a/execution_engine_testing/cargo_casper/.gitignore b/execution_engine_testing/cargo_casper/.gitignore deleted file mode 100644 index 212aaac968..0000000000 --- a/execution_engine_testing/cargo_casper/.gitignore +++ /dev/null @@ -1 +0,0 @@ -wasm/* diff --git a/execution_engine_testing/cargo_casper/Cargo.toml b/execution_engine_testing/cargo_casper/Cargo.toml deleted file mode 100644 index 1966021eb3..0000000000 --- a/execution_engine_testing/cargo_casper/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "cargo-casper" -version = "1.0.0" -authors = ["Fraser Hutchison "] -edition = "2018" -description = "Command line tool for creating a Wasm smart contract and tests for use on the Casper network." -readme = "README.md" -documentation = "https://docs.rs/cargo-casper" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/execution_engine_testing/cargo_casper" -license-file = "../../LICENSE" -include = [ - "src/*.rs", - "Cargo.lock", - "Cargo.toml", -] - -[dependencies] -clap = "2" -colour = "0.6" -once_cell = "1.5.2" - -[dev-dependencies] -assert_cmd = "1" -tempfile = "3" -toml = "0.5.7" diff --git a/execution_engine_testing/cargo_casper/README.md b/execution_engine_testing/cargo_casper/README.md deleted file mode 100644 index ac8a1805a3..0000000000 --- a/execution_engine_testing/cargo_casper/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# `cargo-casper` - -[![LOGO](https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) - -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) -[![Crates.io](https://img.shields.io/crates/v/cargo-casper)](https://crates.io/crates/cargo-casper) -[![Documentation](https://docs.rs/cargo-casper/badge.svg)](https://docs.rs/cargo-casper) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) - -A command line tool for creating a Wasm smart contract and tests for use on the Casper network. - -## License - -Licensed under the [CasperLabs Open Source License (COSL)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE). - ---- - -## Installation - -`cargo casper` is a Cargo subcommand which can be installed via `cargo install`: - -``` -cargo install cargo-casper -``` - -To install from the latest `dev` branch: - -``` -git clone https://github.com/CasperLabs/casper-node -cd casper-node/execution_engine_testing/cargo_casper -cargo install cargo-casper --path=. -``` - -## Usage - -To create a folder "my_project" containing an example contract and a separate test crate for the contract: - -``` -cargo casper my_project -``` - -This creates the following files: - -``` -my_project/ -├── contract -│   ├── .cargo -│   │   └── config -│   ├── Cargo.toml -│   ├── rust-toolchain -│   └── src -│   └── main.rs -├── tests -│   ├── build.rs -│   ├── Cargo.toml -│   ├── rust-toolchain -│   ├── src -│   │   └── integration_tests.rs -└── .travis.yml -``` - -### Building the contract - -To build the contract, the correct version of Rust must be installed along with the Wasm target: - -``` -cd my_project/contract -rustup install $(cat rust-toolchain) -rustup target add --toolchain=$(cat rust-toolchain) wasm32-unknown-unknown -``` - -The contract can now be built using: - -``` -cargo build --release -``` - -and will be built to `my_project/contract/target/wasm32-unknown-unknown/release/contract.wasm`. - -### Testing the contract - -Running the test will automatically build the contract in release mode, copy it to the "tests/wasm" folder, then build -and run the test: - -``` -cd my_project/tests -cargo test -``` - -## License - -Licensed under the [CasperLabs Open Source License (COSL)](../../LICENSE). diff --git a/execution_engine_testing/cargo_casper/src/common.rs b/execution_engine_testing/cargo_casper/src/common.rs deleted file mode 100644 index f4a126f9ef..0000000000 --- a/execution_engine_testing/cargo_casper/src/common.rs +++ /dev/null @@ -1,170 +0,0 @@ -use std::{ - fs::{self, OpenOptions}, - io::Write, - path::Path, - process::{self, Command}, - str, -}; - -use colour::e_red; -use once_cell::sync::Lazy; - -use crate::{dependency::Dependency, ARGS, FAILURE_EXIT_CODE}; - -pub static CL_CONTRACT: Lazy = - Lazy::new(|| Dependency::new("casper-contract", "1.0.0", "smart_contracts/contract")); -pub static CL_TYPES: Lazy = - Lazy::new(|| Dependency::new("casper-types", "1.0.0", "types")); - -pub fn print_error_and_exit(msg: &str) -> ! { - e_red!("error"); - eprintln!("{}", msg); - process::exit(FAILURE_EXIT_CODE) -} - -pub fn run_cargo_new(package_name: &str) { - let mut command = Command::new("cargo"); - command - .args(&["new", "--vcs", "none"]) - .arg(package_name) - .current_dir(ARGS.root_path()); - - let output = match command.output() { - Ok(output) => output, - Err(error) => print_error_and_exit(&format!(": failed to run '{:?}': {}", command, error)), - }; - - if !output.status.success() { - let stdout = str::from_utf8(&output.stdout).expect("should be valid UTF8"); - let stderr = str::from_utf8(&output.stderr).expect("should be valid UTF8"); - print_error_and_exit(&format!( - ": failed to run '{:?}':\n{}\n{}\n", - command, stdout, stderr - )); - } -} - -pub fn create_dir_all>(path: P) { - if let Err(error) = fs::create_dir_all(path.as_ref()) { - print_error_and_exit(&format!( - ": failed to create '{}': {}", - path.as_ref().display(), - error - )); - } -} - -pub fn write_file, C: AsRef<[u8]>>(path: P, contents: C) { - if let Err(error) = fs::write(path.as_ref(), contents) { - print_error_and_exit(&format!( - ": failed to write to '{}': {}", - path.as_ref().display(), - error - )); - } -} - -pub fn append_to_file, C: AsRef<[u8]>>(path: P, contents: C) { - let mut file = match OpenOptions::new().append(true).open(path.as_ref()) { - Ok(file) => file, - Err(error) => { - print_error_and_exit(&format!( - ": failed to open '{}': {}", - path.as_ref().display(), - error - )); - } - }; - if let Err(error) = file.write_all(contents.as_ref()) { - print_error_and_exit(&format!( - ": failed to append to '{}': {}", - path.as_ref().display(), - error - )); - } -} - -pub fn remove_file>(path: P) { - if let Err(error) = fs::remove_file(path.as_ref()) { - print_error_and_exit(&format!( - ": failed to remove '{}': {}", - path.as_ref().display(), - error - )); - } -} - -pub fn copy_file, D: AsRef>(source: S, destination: D) { - if let Err(error) = fs::copy(source.as_ref(), destination.as_ref()) { - print_error_and_exit(&format!( - ": failed to copy '{}' to '{}': {}", - source.as_ref().display(), - destination.as_ref().display(), - error - )); - } -} - -#[cfg(test)] -pub mod tests { - use std::{env, fs}; - - use toml::Value; - - use super::*; - - const CL_CONTRACT_TOML_PATH: &str = "smart_contracts/contract/Cargo.toml"; - const CL_TYPES_TOML_PATH: &str = "types/Cargo.toml"; - const PACKAGE_FIELD_NAME: &str = "package"; - const VERSION_FIELD_NAME: &str = "version"; - const PATH_PREFIX: &str = "/execution_engine_testing/cargo_casper"; - - /// Returns the absolute path of `relative_path` where this is relative to "casper-node". - /// Panics if the current working directory is not within "casper-node". - pub fn full_path_from_path_relative_to_workspace(relative_path: &str) -> String { - let mut full_path = env::current_dir().unwrap().display().to_string(); - let index = full_path.find(PATH_PREFIX).unwrap_or_else(|| { - panic!( - "test should be run from within casper-node workspace: {} relative path: {}", - full_path, relative_path, - ) - }); - full_path.replace_range(index + 1.., relative_path); - full_path - } - - /// Checks the version of the package specified by the Cargo.toml at `toml_path` is equal to - /// the hard-coded one specified in `dep.version()`. - pub fn check_package_version(dep: &Dependency, toml_path: &str) { - let toml_path = full_path_from_path_relative_to_workspace(toml_path); - - let raw_toml_contents = - fs::read(&toml_path).unwrap_or_else(|_| panic!("should read {}", toml_path)); - let toml_contents = String::from_utf8_lossy(&raw_toml_contents).to_string(); - let toml = toml_contents.parse::().unwrap(); - - let expected_version = toml[PACKAGE_FIELD_NAME][VERSION_FIELD_NAME] - .as_str() - .unwrap(); - // If this fails, ensure `dep.version()` is updated to match the value in the Cargo.toml at - // `toml_path`. - assert_eq!( - expected_version, - dep.version(), - "\n\nEnsure local version of {:?} is updated to {} as defined in {}\n\n", - dep, - expected_version, - toml_path - ); - } - - #[test] - fn check_cl_contract_version() { - check_package_version(&*CL_CONTRACT, CL_CONTRACT_TOML_PATH); - } - - #[test] - fn check_cl_types_version() { - check_package_version(&*CL_TYPES, CL_TYPES_TOML_PATH); - } -} diff --git a/execution_engine_testing/cargo_casper/src/contract_package.rs b/execution_engine_testing/cargo_casper/src/contract_package.rs deleted file mode 100644 index 22518731a4..0000000000 --- a/execution_engine_testing/cargo_casper/src/contract_package.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! Consts and functions used to generate the files comprising the "contract" package when running -//! the tool. - -use std::path::PathBuf; - -use once_cell::sync::Lazy; - -use crate::{ - common::{self, CL_CONTRACT, CL_TYPES}, - ARGS, TOOLCHAIN, -}; - -const PACKAGE_NAME: &str = "contract"; - -const MAIN_RS_CONTENTS: &str = r#"#![cfg_attr( - not(target_arch = "wasm32"), - crate_type = "target arch should be wasm32" -)] -#![no_main] - -use casper_contract::{ - contract_api::{runtime, storage}, -}; -use casper_types::{Key, URef}; - -const KEY: &str = "special_value"; -const ARG_MESSAGE: &str = "message"; - -fn store(value: String) { - // Store `value` under a new unforgeable reference. - let value_ref: URef = storage::new_uref(value); - - // Wrap the unforgeable reference in a value of type `Key`. - let value_key: Key = value_ref.into(); - - // Store this key under the name "special_value" in context-local storage. - runtime::put_key(KEY, value_key); -} - -// All session code must have a `call` entrypoint. -#[no_mangle] -pub extern "C" fn call() { - // Get the optional first argument supplied to the argument. - let value: String = runtime::get_named_arg(ARG_MESSAGE); - store(value); -} -"#; - -const CONFIG_TOML_CONTENTS: &str = r#"[build] -target = "wasm32-unknown-unknown" -"#; - -static CARGO_TOML: Lazy = - Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("Cargo.toml")); -static RUST_TOOLCHAIN: Lazy = - Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("rust-toolchain")); -static MAIN_RS: Lazy = - Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("src/main.rs")); -static CONFIG_TOML: Lazy = Lazy::new(|| { - ARGS.root_path() - .join(PACKAGE_NAME) - .join(".cargo/config.toml") -}); -static CARGO_TOML_ADDITIONAL_CONTENTS: Lazy = Lazy::new(|| { - format!( - r#"{} -{} - -[[bin]] -name = "{}" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -default = ["casper-contract/std", "casper-types/std", "casper-contract/test-support"] - -[profile.release] -lto = true -"#, - *CL_CONTRACT, *CL_TYPES, PACKAGE_NAME - ) -}); - -pub fn run_cargo_new() { - common::run_cargo_new(PACKAGE_NAME); -} - -pub fn update_cargo_toml() { - common::append_to_file(&*CARGO_TOML, &*CARGO_TOML_ADDITIONAL_CONTENTS); -} - -pub fn add_rust_toolchain() { - common::write_file(&*RUST_TOOLCHAIN, format!("{}\n", TOOLCHAIN)); -} - -pub fn update_main_rs() { - common::write_file(&*MAIN_RS, MAIN_RS_CONTENTS); -} - -pub fn add_config_toml() { - let folder = CONFIG_TOML.parent().expect("should have parent"); - common::create_dir_all(folder); - common::write_file(&*CONFIG_TOML, CONFIG_TOML_CONTENTS); -} diff --git a/execution_engine_testing/cargo_casper/src/dependency.rs b/execution_engine_testing/cargo_casper/src/dependency.rs deleted file mode 100644 index 3a385de468..0000000000 --- a/execution_engine_testing/cargo_casper/src/dependency.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::fmt::{Display, Formatter, Result}; - -use crate::ARGS; - -/// Used to hold the information about the Casper dependencies which will be required by the -/// generated Cargo.toml files. -/// -/// The information is output in a form suitable for injection into Cargo.toml via implementing the -/// `std::fmt::Display` trait. -#[derive(Debug)] -pub struct Dependency { - name: String, - version: String, - /// Path relative to "casper-node" - relative_path: String, -} - -impl Dependency { - pub fn new(name: &str, version: &str, relative_path: &str) -> Self { - Dependency { - name: name.to_string(), - version: version.to_string(), - relative_path: relative_path.to_string(), - } - } - - #[cfg(test)] - pub fn version(&self) -> &str { - &self.version - } -} - -impl Display for Dependency { - fn fmt(&self, formatter: &mut Formatter) -> Result { - if let Some(workspace_path) = ARGS.workspace_path() { - write!( - formatter, - r#"{} = {{ version = "{}", path = "{}/{}" }}"#, - self.name, - self.version, - workspace_path.display(), - self.relative_path - ) - } else { - write!(formatter, r#"{} = "{}""#, self.name, self.version) - } - } -} diff --git a/execution_engine_testing/cargo_casper/src/main.rs b/execution_engine_testing/cargo_casper/src/main.rs deleted file mode 100644 index f73c51ce2b..0000000000 --- a/execution_engine_testing/cargo_casper/src/main.rs +++ /dev/null @@ -1,167 +0,0 @@ -//! Command line tool for creating a Wasm contract and tests for use on the Casper Platform. - -#![deny(warnings)] - -use std::{ - env, - path::{Path, PathBuf}, -}; - -use clap::{crate_version, App, Arg}; -use once_cell::sync::Lazy; - -pub mod common; -mod contract_package; -pub mod dependency; -mod tests_package; -mod travis_yml; - -const APP_NAME: &str = "cargo-casper"; -const ABOUT: &str = - "A command line tool for creating a Wasm contract and tests at for use on the \ - Casper Platform."; -const TOOLCHAIN: &str = "nightly-2020-12-16"; - -const ROOT_PATH_ARG_NAME: &str = "path"; -const ROOT_PATH_ARG_VALUE_NAME: &str = "path"; -const ROOT_PATH_ARG_HELP: &str = "Path to new folder for contract and tests"; - -const WORKSPACE_PATH_ARG_NAME: &str = "workspace-path"; -const WORKSPACE_PATH_ARG_LONG: &str = "workspace-path"; - -const FAILURE_EXIT_CODE: i32 = 101; - -static USAGE: Lazy = Lazy::new(|| { - format!( - r#"cargo casper [FLAGS] - rustup install {0} - rustup target add --toolchain {0} wasm32-unknown-unknown - cd /tests - cargo test"#, - TOOLCHAIN - ) -}); -static ARGS: Lazy = Lazy::new(Args::new); - -#[derive(Debug)] -struct Args { - root_path: PathBuf, - workspace_path: Option, -} - -impl Args { - fn new() -> Self { - // If run normally, the args passed are 'cargo-casper', ''. However, if run as - // a cargo subcommand (i.e. cargo casper ), then cargo injects a new arg: - // 'cargo-casper', 'casper', ''. We need to filter this extra arg out. - // - // This yields the situation where if the binary receives args of 'cargo-casper', 'casper' - // then it might be a valid call (not a cargo subcommand - the user entered - // 'cargo-casper casper' meaning to create a target dir called 'casper') or it might be an - // invalid call (the user entered 'cargo casper' with no target dir specified). The latter - // case is assumed as being more likely. - let filtered_args_iter = env::args().enumerate().filter_map(|(index, value)| { - if index == 1 && value.as_str() == "casper" { - None - } else { - Some(value) - } - }); - - let root_path_arg = Arg::with_name(ROOT_PATH_ARG_NAME) - .required(true) - .value_name(ROOT_PATH_ARG_VALUE_NAME) - .help(ROOT_PATH_ARG_HELP); - - let workspace_path_arg = Arg::with_name(WORKSPACE_PATH_ARG_NAME) - .long(WORKSPACE_PATH_ARG_LONG) - .takes_value(true) - .hidden(true); - - let arg_matches = App::new(APP_NAME) - .version(crate_version!()) - .about(ABOUT) - .usage(USAGE.as_str()) - .arg(root_path_arg) - .arg(workspace_path_arg) - .get_matches_from(filtered_args_iter); - - let root_path = arg_matches - .value_of(ROOT_PATH_ARG_NAME) - .expect("expected path") - .into(); - - let workspace_path = arg_matches - .value_of(WORKSPACE_PATH_ARG_NAME) - .map(PathBuf::from); - - Args { - root_path, - workspace_path, - } - } - - pub fn root_path(&self) -> &Path { - &self.root_path - } - - pub fn workspace_path(&self) -> Option<&Path> { - self.workspace_path.as_deref() - } -} - -fn main() { - if ARGS.root_path().exists() { - common::print_error_and_exit(&format!( - ": destination '{}' already exists", - ARGS.root_path().display() - )); - } - - common::create_dir_all(ARGS.root_path()); - - contract_package::run_cargo_new(); - contract_package::update_cargo_toml(); - contract_package::add_rust_toolchain(); - contract_package::update_main_rs(); - contract_package::add_config_toml(); - - tests_package::run_cargo_new(); - tests_package::update_cargo_toml(); - tests_package::add_rust_toolchain(); - tests_package::add_build_rs(); - tests_package::replace_main_rs(); - - travis_yml::create(); -} - -#[cfg(test)] -mod tests { - use std::{env, fs}; - - use super::TOOLCHAIN; - - const PATH_PREFIX: &str = "/execution_engine_testing/cargo_casper"; - - #[test] - fn check_toolchain_version() { - let mut toolchain_path = env::current_dir().unwrap().display().to_string(); - let index = toolchain_path.find(PATH_PREFIX).unwrap_or_else(|| { - panic!( - "test should be run from within casper-node workspace: {}", - toolchain_path - ) - }); - toolchain_path.replace_range(index.., "/rust-toolchain"); - - let toolchain_contents = - fs::read(&toolchain_path).unwrap_or_else(|_| panic!("should read {}", toolchain_path)); - let expected_toolchain_value = String::from_utf8_lossy(&toolchain_contents) - .trim() - .to_string(); - - // If this fails, ensure `TOOLCHAIN` is updated to match the value in - // "casper-node/rust-toolchain". - assert_eq!(&*expected_toolchain_value, TOOLCHAIN); - } -} diff --git a/execution_engine_testing/cargo_casper/src/tests_package.rs b/execution_engine_testing/cargo_casper/src/tests_package.rs deleted file mode 100644 index 7098bcad47..0000000000 --- a/execution_engine_testing/cargo_casper/src/tests_package.rs +++ /dev/null @@ -1,171 +0,0 @@ -//! Consts and functions used to generate the files comprising the "tests" package when running the -//! tool. - -use std::path::PathBuf; - -use once_cell::sync::Lazy; - -use crate::{ - common::{self, CL_CONTRACT, CL_TYPES}, - dependency::Dependency, - ARGS, TOOLCHAIN, -}; - -const PACKAGE_NAME: &str = "tests"; - -const INTEGRATION_TESTS_RS_CONTENTS: &str = r#"#[cfg(test)] -mod tests { - use casper_engine_test_support::{ - Code, Error, SessionBuilder, TestContextBuilder, Value, - }; - use casper_types::{runtime_args, RuntimeArgs, U512, account::AccountHash, PublicKey, SecretKey, AsymmetricType}; - - const MY_ACCOUNT: [u8; 32] = [7u8; 32]; - // define KEY constant to match that in the contract - const KEY: &str = "special_value"; - const VALUE: &str = "hello world"; - const ARG_MESSAGE: &str = "message"; - - #[test] - fn should_store_hello_world() { - let public_key: PublicKey = SecretKey::ed25519_from_bytes(MY_ACCOUNT).unwrap().into(); - let account_addr = AccountHash::from(&public_key); - - let mut context = TestContextBuilder::new() - .with_public_key(public_key, U512::from(500_000_000_000_000_000u64)) - .build(); - - // The test framework checks for compiled Wasm files in '/wasm'. Paths - // relative to the current working dir (e.g. 'wasm/contract.wasm') can also be used, as can - // absolute paths. - let session_code = Code::from("contract.wasm"); - let session_args = runtime_args! { - ARG_MESSAGE => VALUE, - }; - let session = SessionBuilder::new(session_code, session_args) - .with_address(account_addr) - .with_authorization_keys(&[account_addr]) - .build(); - - let result_of_query: Result = - context.run(session).query(account_addr, &[KEY.to_string()]); - - let returned_value = result_of_query.expect("should be a value"); - - let expected_value = Value::from_t(VALUE.to_string()).expect("should construct Value"); - assert_eq!(expected_value, returned_value); - } -} - -fn main() { - panic!("Execute \"cargo test\" to test the contract, not \"cargo run\"."); -} -"#; - -const BUILD_RS_CONTENTS: &str = r#"use std::{env, fs, path::PathBuf, process::Command}; - -const CONTRACT_ROOT: &str = "../contract"; -const CONTRACT_CARGO_TOML: &str = "../contract/Cargo.toml"; -const CONTRACT_MAIN_RS: &str = "../contract/src/main.rs"; -const BUILD_ARGS: [&str; 2] = ["build", "--release"]; -const WASM_FILENAME: &str = "contract.wasm"; -const ORIGINAL_WASM_DIR: &str = "../contract/target/wasm32-unknown-unknown/release"; -const NEW_WASM_DIR: &str = "wasm"; - -fn main() { - // Watch contract source files for changes. - println!("cargo:rerun-if-changed={}", CONTRACT_CARGO_TOML); - println!("cargo:rerun-if-changed={}", CONTRACT_MAIN_RS); - - // Build the contract. - let output = Command::new("cargo") - .current_dir(CONTRACT_ROOT) - .args(&BUILD_ARGS) - .output() - .expect("Expected to build Wasm contracts"); - assert!( - output.status.success(), - "Failed to build Wasm contracts:\n{:?}", - output - ); - - // Move the compiled Wasm file to our own build folder ("wasm/contract.wasm"). - let new_wasm_dir = env::current_dir().unwrap().join(NEW_WASM_DIR); - let _ = fs::create_dir(&new_wasm_dir); - - let original_wasm_file = PathBuf::from(ORIGINAL_WASM_DIR).join(WASM_FILENAME); - let copied_wasm_file = new_wasm_dir.join(WASM_FILENAME); - fs::copy(original_wasm_file, copied_wasm_file).unwrap(); -} -"#; - -static CARGO_TOML: Lazy = - Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("Cargo.toml")); -static RUST_TOOLCHAIN: Lazy = - Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("rust-toolchain")); -static BUILD_RS: Lazy = Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("build.rs")); -static MAIN_RS: Lazy = - Lazy::new(|| ARGS.root_path().join(PACKAGE_NAME).join("src/main.rs")); -static INTEGRATION_TESTS_RS: Lazy = Lazy::new(|| { - ARGS.root_path() - .join(PACKAGE_NAME) - .join("src/integration_tests.rs") -}); -static ENGINE_TEST_SUPPORT: Lazy = Lazy::new(|| { - Dependency::new( - "casper-engine-test-support", - "1.0.0", - "execution_engine_testing/test_support", - ) -}); -static CARGO_TOML_ADDITIONAL_CONTENTS: Lazy = Lazy::new(|| { - format!( - r#" -[dev-dependencies] -{} -{} -{} - -[[bin]] -name = "integration-tests" -path = "src/integration_tests.rs" - -[features] -default = ["casper-contract/std", "casper-types/std", "casper-engine-test-support/test-support", "casper-contract/test-support""#, - *CL_CONTRACT, *CL_TYPES, *ENGINE_TEST_SUPPORT, - ) -}); - -pub fn run_cargo_new() { - common::run_cargo_new(PACKAGE_NAME); -} - -pub fn update_cargo_toml() { - let cargo_toml_additional_contents = format!("{}{}\n", &*CARGO_TOML_ADDITIONAL_CONTENTS, "]"); - common::append_to_file(&*CARGO_TOML, cargo_toml_additional_contents); -} - -pub fn add_rust_toolchain() { - common::write_file(&*RUST_TOOLCHAIN, format!("{}\n", TOOLCHAIN)); -} - -pub fn add_build_rs() { - common::write_file(&*BUILD_RS, BUILD_RS_CONTENTS); -} - -pub fn replace_main_rs() { - common::remove_file(&*MAIN_RS); - common::write_file(&*INTEGRATION_TESTS_RS, INTEGRATION_TESTS_RS_CONTENTS); -} - -#[cfg(test)] -pub mod tests { - use super::*; - - const ENGINE_TEST_SUPPORT_TOML_PATH: &str = "execution_engine_testing/test_support/Cargo.toml"; - - #[test] - fn check_engine_test_support_version() { - common::tests::check_package_version(&*ENGINE_TEST_SUPPORT, ENGINE_TEST_SUPPORT_TOML_PATH); - } -} diff --git a/execution_engine_testing/cargo_casper/src/travis_yml.rs b/execution_engine_testing/cargo_casper/src/travis_yml.rs deleted file mode 100644 index 473f43c2e4..0000000000 --- a/execution_engine_testing/cargo_casper/src/travis_yml.rs +++ /dev/null @@ -1,12 +0,0 @@ -use crate::{common, ARGS}; - -const FILENAME: &str = ".travis.yml"; -const CONTENTS: &str = r#"language: rust -script: - - cd tests && cargo build - - cd tests && cargo test -"#; - -pub fn create() { - common::write_file(ARGS.root_path().join(FILENAME), CONTENTS); -} diff --git a/execution_engine_testing/cargo_casper/tests/integration_tests.rs b/execution_engine_testing/cargo_casper/tests/integration_tests.rs deleted file mode 100644 index 10f9234d6e..0000000000 --- a/execution_engine_testing/cargo_casper/tests/integration_tests.rs +++ /dev/null @@ -1,76 +0,0 @@ -use std::{fs, process::Output}; - -use assert_cmd::Command; -use once_cell::sync::Lazy; - -const FAILURE_EXIT_CODE: i32 = 101; -const SUCCESS_EXIT_CODE: i32 = 0; -const TEST_PATH: &str = "test"; - -static WORKSPACE_PATH_ARG: Lazy = - Lazy::new(|| format!("--workspace-path={}/../../", env!("CARGO_MANIFEST_DIR"))); - -#[test] -fn should_fail_when_target_path_already_exists() { - let test_dir = tempfile::tempdir().unwrap().into_path(); - let output_error = Command::cargo_bin(env!("CARGO_PKG_NAME")) - .unwrap() - .arg(&test_dir) - .unwrap_err(); - - let exit_code = output_error.as_output().unwrap().status.code().unwrap(); - assert_eq!(FAILURE_EXIT_CODE, exit_code); - - let stderr: String = String::from_utf8_lossy(&output_error.as_output().unwrap().stderr).into(); - let expected_msg_fragment = format!(": destination '{}' already exists", test_dir.display()); - assert!(stderr.contains(&expected_msg_fragment)); - assert!(stderr.contains("error")); - - fs::remove_dir_all(&test_dir).unwrap(); -} - -/// Runs `cmd` and returns the `Output` if successful, or panics on failure. -fn output_from_command(mut command: Command) -> Output { - match command.ok() { - Ok(output) => output, - Err(error) => { - panic!( - "\nFailed to execute {:?}\n===== stderr begin =====\n{}\n===== stderr end =====\n", - command, - String::from_utf8_lossy(&error.as_output().unwrap().stderr) - ); - } - } -} - -fn run_tool_and_resulting_tests() { - let temp_dir = tempfile::tempdir().unwrap().into_path(); - - // Run 'cargo-casper / --workspace-path=' - let subdir = TEST_PATH; - let test_dir = temp_dir.join(subdir); - let mut tool_cmd = Command::cargo_bin(env!("CARGO_PKG_NAME")).unwrap(); - tool_cmd.arg(&test_dir); - tool_cmd.arg(&*WORKSPACE_PATH_ARG); - - // The CI environment doesn't have a Git user configured, so we can set the env var `USER` for - // use by 'cargo new' which is called as a subprocess of 'cargo-casper'. - tool_cmd.env("USER", "tester"); - let tool_output = output_from_command(tool_cmd); - assert_eq!(SUCCESS_EXIT_CODE, tool_output.status.code().unwrap()); - - // Run 'cargo test' in the 'tests' folder of the generated project. This builds the Wasm - // contract as well as the tests. - let mut test_cmd = Command::new(env!("CARGO")); - test_cmd.arg("test").current_dir(test_dir.join("tests")); - let test_output = output_from_command(test_cmd); - assert_eq!(SUCCESS_EXIT_CODE, test_output.status.code().unwrap()); - - // Cleans up temporary directory, but leaves it otherwise if the test failed. - fs::remove_dir_all(&temp_dir).unwrap(); -} - -#[test] -fn should_run_casperlabs_node() { - run_tool_and_resulting_tests(); -} diff --git a/execution_engine_testing/test_support/CHANGELOG.md b/execution_engine_testing/test_support/CHANGELOG.md new file mode 100644 index 0000000000..a02cecbc57 --- /dev/null +++ b/execution_engine_testing/test_support/CHANGELOG.md @@ -0,0 +1,215 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## 7.0.1 + +### Added +* Provide `from_chainspec_path` and `max_associated_keys` helper methods on `ChainspecConfig`. +* Provide functions for converting from `ChainspecConfig` to `EngineConfig`. +* Provide `try_exec` method on `WasmTestBuilder` for fallible contract execution. +* Provide `PRODUCTION_CHAINSPEC_PATH`: a lazy static defining the path to the production chainspec.toml file. + + + +## 7.0.0 + +### Added +* Provide `calculate_refund_amount` on `WasmTestBuilder`. +* Provide `upgrade_with_upgrade_request_and_config` on `WasmTestBuilder`. + + + +## 6.0.0 + +### Changed +* Update `casper-execution-engine` dependency. +* Handle evict items in the `WasmTestBuilder` when advancing eras or calling `step`. + + + +## 5.0.0 + +### Added +* Add `WasmTestBuilder::get_execution_journals` method for returning execution journals for all test runs. +* Add support to load values from a given Chainspec. +* Add static and constants that represent Casper-mainnet chainspec values. These values will change as new ProtocolVersions are added. The current values reflect ones used in the 1.5.0 ProtocolVersion. +* Add `WasmTestBuilder::advance_era`, `WasmTestBuilder::advance_eras_by`, and `WasmTestBuilder::advance_eras_by_default_auction_delay` to advance chain and run auction contract in test environment. + +### Changed +* `WasmTestBuilder::get_transforms` is deprecated in favor of `WasmTestBuilder::get_execution_journals`. +* `deploy_hash` field is now defaulted to a random value rather than zeros in `DeployItemBuilder`. + + + +## 4.0.0 + +### Changed +* Update dependencies (in particular `casper-types` to v2.0.0 due to additional `Key` variant, requiring a major version bump here). + + + +## 3.1.1 + +### Changed +* Update chainspec values used in `PRODUCTION_RUN_GENESIS_REQUEST` to match those of Mainnet protocol version 1.4.15. + + + +## 3.1.0 + +### Added +* Add support for `commit_prune` of `casper-execution-engine`. + + + +## 3.0.0 + +### Changed +* Version bump only to match major version bump of `casper-execution-engine` dependency. + + + +## 2.3.0 [YANKED] + +### Added +* Add `ChainspecConfig` to support parsing a chainspec. + + + +## 2.2.0 + +### Added +* Add some auction and transfer test support functions for reuse among benchmarks and unit tests. + +### Deprecated +* Deprecated the `DEFAULT_RUN_GENESIS_REQUEST` in favor of `PRODUCTION_RUN_GENESIS_REQUEST`. + + + +## 2.1.0 + +### Added +* Add further helper methods to `WasmTestBuilder`. + + + +## 2.0.3 - 2021-12-06 + +### Added +* Added `WasmTestBuilder::get_balance_keys` function. + + + +## 2.0.2 - 2021-11-24 + +### Changed +* Revert the change to the path detection logic applied in v2.0.1. + + + +## [2.0.1] - 2021-11-4 + +### Changed +* Change the path detection logic for compiled Wasm as used by the casper-node monorepo. + +### Deprecated +* Deprecate the `test-support` feature. It had and continues to have no effect when enabled. + + + +## [2.0.0] - 2021-11-01 + +### Added +* Provide fine-grained support for testing all aspects of smart contract execution, including: + * `WasmTestBuilder` for building and running a test to exercise a smart contract + * `DeployItemBuilder` for building a `DeployItem` from a smart contract + * `ExecuteRequestBuilder` for building an `ExecuteRequest` to execute a given smart contract + * `AdditiveMapDiff` to allow easy comparison of two AdditiveMaps + * `StepRequestBuilder` for building a `StepRequest` (generally only used by the execution engine itself) + * `UpgradeRequestBuilder` for building an `UpgradeRequest` (generally only used by the execution engine itself) +* Provide `LmdbWasmTestBuilder` can be used where global state needs to be persisted after execution of a smart contract +* Provide several helper functions in `utils` module +* Provide several default consts and statics useful across many test scenarios + +### Removed +* Remove coarse-grained support and newtypes for testing smart contracts, including removal of: + * `Account` + * `AccountHash` + * `Error` + * `Session` + * `SessionBuilder` + * `SessionTransferInfo` + * `TestContext` + * `TestContextBuilder` + * `Value` +* Remove `InMemoryWasmTestBuilder`. + + + +## [1.4.0] - 2021-10-04 + +### Changed +* Support building and testing using stable Rust. + + + +## [1.3.0] - 2021-07-19 + +### Changed +* Update pinned version of Rust to `nightly-2021-06-17`. + + + +## [1.2.0] - 2021-05-28 + +### Changed +* Change to Apache 2.0 license. + + + +## [1.1.1] - 2021-04-19 + +No changes. + + + +## [1.1.0] - 2021-04-13 [YANKED] + +No changes. + + + +## [1.0.1] - 2021-04-08 + +No changes. + + + +## [1.0.0] - 2021-03-30 + +### Added +* Initial release of execution-engine test support framework compatible with Casper mainnet. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/04f48a467...dev +[2.0.1]: https://github.com/casper-network/casper-node/compare/13585abcf...04f48a467 +[2.0.0]: https://github.com/casper-network/casper-node/compare/v1.4.0...13585abcf +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 diff --git a/execution_engine_testing/test_support/Cargo.toml b/execution_engine_testing/test_support/Cargo.toml index bb7765444b..ceb8a5eb4e 100644 --- a/execution_engine_testing/test_support/Cargo.toml +++ b/execution_engine_testing/test_support/Cargo.toml @@ -1,30 +1,41 @@ [package] name = "casper-engine-test-support" -version = "1.0.0" # when updating, also update 'html_root_url' in lib.rs -authors = ["Fraser Hutchison "] -edition = "2018" +version = "8.1.1" # when updating, also update 'html_root_url' in lib.rs +authors = ["Fraser Hutchison ", "Michał Papierski "] +edition = "2021" description = "Library to support testing of Wasm smart contracts for use on the Casper network." -readme = "README.md" documentation = "https://docs.rs/casper-engine-test-support" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/execution_engine_testing/test_support" -license-file = "../../LICENSE" +readme = "README.md" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/master/execution_engine_testing/test_support" +license = "Apache-2.0" [dependencies] -casper-contract = { version = "1.0.0", path = "../../smart_contracts/contract", features = ["std"] } -casper-execution-engine = { version = "1.0.0", path = "../../execution_engine", features = ["gens"] } -casper-types = { version = "1.0.0", path = "../../types", features = ["std"] } -lmdb = "0.8.0" -log = "0.4.8" +blake2 = "0.9.0" +casper-storage = { version = "2.1.1", path = "../../storage" } +casper-types = { version = "6.0.1", path = "../../types" } +env_logger = "0.10.0" +casper-execution-engine = { version = "8.1.1", path = "../../execution_engine", features = ["test-support"] } +humantime = "2" +filesize = "0.2.0" +lmdb-rkv = "0.14" +log = "0.4.14" num-rational = "0.4.0" -num-traits = "0.2.10" -once_cell = "1.5.2" -rand = "0.8.3" +num-traits = { workspace = true } +once_cell = "1.8.0" +rand = "0.8.4" +serde = { version = "1", features = ["derive", "rc"] } +tempfile = "3.4.0" +toml = "0.5.6" [dev-dependencies] -version-sync = "0.9" +casper-types = { version = "6.0.1", path = "../../types", features = ["std"] } +version-sync = "0.9.3" -[features] -use-as-wasm = [] -test-support = ["casper-contract/test-support"] +[build-dependencies] +toml_edit = "=0.21.0" +humantime = "2" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/execution_engine_testing/test_support/README.md b/execution_engine_testing/test_support/README.md index 73795bdfb1..1d9509a4e9 100644 --- a/execution_engine_testing/test_support/README.md +++ b/execution_engine_testing/test_support/README.md @@ -1,14 +1,13 @@ # `casper-engine-test-support` -[![LOGO](https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) [![Crates.io](https://img.shields.io/crates/v/casper-engine-test-support)](https://crates.io/crates/casper-engine-test-support) [![Documentation](https://docs.rs/casper-engine-test-support/badge.svg)](https://docs.rs/casper-engine-test-support) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) -A library to support testing of Wasm smart contracts for use on the CasperLabs network. +A library to support testing of Wasm smart contracts for use on the Casper network. ## License -Licensed under the [CasperLabs Open Source License (COSL)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE). +Licensed under the [Apache License Version 2.0](../../LICENSE). diff --git a/execution_engine_testing/test_support/build.rs b/execution_engine_testing/test_support/build.rs new file mode 100644 index 0000000000..fd15dbb995 --- /dev/null +++ b/execution_engine_testing/test_support/build.rs @@ -0,0 +1,28 @@ +use humantime::format_rfc3339; +use std::{ + env, fs, + path::Path, + time::{Duration, SystemTime}, +}; +use toml_edit::{value, Document}; + +fn main() { + let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); + let input_chainspec = Path::new(&manifest_dir) + .join("resources") + .join("chainspec.toml.in"); + let output_chainspec = Path::new(&manifest_dir) + .join("resources") + .join("chainspec.toml"); + + println!("cargo:rerun-if-changed={}", input_chainspec.display()); + + let toml = fs::read_to_string(input_chainspec).expect("could not read chainspec.toml.in"); + let mut doc = toml + .parse::() + .expect("invalid document in chainspec.toml.in"); + let activation_point = SystemTime::now() + Duration::from_secs(40); + doc["protocol"]["activation_point"] = value(format_rfc3339(activation_point).to_string()); + + fs::write(output_chainspec, doc.to_string()).expect("could not write chainspec.toml"); +} diff --git a/execution_engine_testing/test_support/resources/chainspec.toml.in b/execution_engine_testing/test_support/resources/chainspec.toml.in new file mode 120000 index 0000000000..6c89c598eb --- /dev/null +++ b/execution_engine_testing/test_support/resources/chainspec.toml.in @@ -0,0 +1 @@ +../../../resources/local/chainspec.toml.in \ No newline at end of file diff --git a/execution_engine_testing/test_support/src/account.rs b/execution_engine_testing/test_support/src/account.rs deleted file mode 100644 index 320a6adb08..0000000000 --- a/execution_engine_testing/test_support/src/account.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::convert::TryFrom; - -use casper_execution_engine::{shared, shared::stored_value::StoredValue}; -use casper_types::{account::AccountHash, contracts::NamedKeys, URef}; - -use crate::{Error, Result}; - -/// An `Account` instance. -#[derive(Eq, PartialEq, Clone, Debug)] -pub struct Account { - inner: shared::account::Account, -} - -impl Account { - /// creates a new Account instance. - pub(crate) fn new(account: shared::account::Account) -> Self { - Account { inner: account } - } - - /// Returns the public_key. - pub fn account_hash(&self) -> AccountHash { - self.inner.account_hash() - } - - /// Returns the named_keys. - pub fn named_keys(&self) -> &NamedKeys { - self.inner.named_keys() - } - - /// Returns the main_purse. - pub fn main_purse(&self) -> URef { - self.inner.main_purse() - } -} - -impl From for Account { - fn from(value: shared::account::Account) -> Self { - Account::new(value) - } -} - -impl TryFrom for Account { - type Error = Error; - - fn try_from(value: StoredValue) -> Result { - match value { - StoredValue::Account(account) => Ok(Account::new(account)), - _ => Err(Error::from(String::from("StoredValue is not an Account"))), - } - } -} diff --git a/execution_engine_testing/test_support/src/chainspec_config.rs b/execution_engine_testing/test_support/src/chainspec_config.rs new file mode 100644 index 0000000000..7ba4c6ab82 --- /dev/null +++ b/execution_engine_testing/test_support/src/chainspec_config.rs @@ -0,0 +1,347 @@ +use std::{ + convert::TryFrom, + fs, io, + path::{Path, PathBuf}, +}; + +use log::error; +use once_cell::sync::Lazy; +use serde::Deserialize; + +use casper_execution_engine::engine_state::{EngineConfig, EngineConfigBuilder}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + system::auction::VESTING_SCHEDULE_LENGTH_MILLIS, CoreConfig, FeeHandling, GenesisAccount, + GenesisConfig, MintCosts, PricingHandling, ProtocolVersion, RefundHandling, StorageCosts, + SystemConfig, TimeDiff, WasmConfig, +}; + +use crate::{ + GenesisConfigBuilder, DEFAULT_ACCOUNTS, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_MAX_QUERY_DEPTH, +}; + +/// The name of the chainspec file on disk. +pub const CHAINSPEC_NAME: &str = "chainspec.toml"; + +/// Symlink to chainspec. +pub static CHAINSPEC_SYMLINK: Lazy = Lazy::new(|| { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("resources/") + .join(CHAINSPEC_NAME) +}); + +#[derive(Debug)] +#[allow(clippy::enum_variant_names)] +pub enum Error { + FailedToLoadChainspec { + /// Path that failed to be read. + path: PathBuf, + /// The underlying OS error. + error: io::Error, + }, + FailedToParseChainspec(toml::de::Error), + Validation, +} + +/// This struct can be parsed from a TOML-encoded chainspec file. It means that as the +/// chainspec format changes over versions, as long as we maintain the core config in this form +/// in the chainspec file, it can continue to be parsed as an `ChainspecConfig`. +#[derive(Deserialize, Clone, Default, Debug)] +pub struct ChainspecConfig { + /// CoreConfig + #[serde(rename = "core")] + pub core_config: CoreConfig, + /// WasmConfig. + #[serde(rename = "wasm")] + pub wasm_config: WasmConfig, + /// SystemConfig + #[serde(rename = "system_costs")] + pub system_costs_config: SystemConfig, + /// Storage costs. + pub storage_costs: StorageCosts, +} + +impl ChainspecConfig { + fn from_bytes(bytes: &[u8]) -> Result { + let chainspec_config: ChainspecConfig = + toml::from_slice(bytes).map_err(Error::FailedToParseChainspec)?; + + if !chainspec_config.is_valid() { + return Err(Error::Validation); + } + + Ok(chainspec_config) + } + + fn from_path>(path: P) -> Result { + let path = path.as_ref(); + let bytes = fs::read(path).map_err(|error| Error::FailedToLoadChainspec { + path: path.to_path_buf(), + error, + })?; + ChainspecConfig::from_bytes(&bytes) + } + + /// Load from path. + pub fn from_chainspec_path>(filename: P) -> Result { + Self::from_path(filename) + } + + fn is_valid(&self) -> bool { + if self.core_config.vesting_schedule_period + > TimeDiff::from_millis(VESTING_SCHEDULE_LENGTH_MILLIS) + { + error!( + "vesting schedule period too long (actual {}; maximum {})", + self.core_config.vesting_schedule_period.millis(), + VESTING_SCHEDULE_LENGTH_MILLIS, + ); + return false; + } + + true + } + + pub(crate) fn create_genesis_request_from_chainspec>( + filename: P, + genesis_accounts: Vec, + protocol_version: ProtocolVersion, + ) -> Result { + ChainspecConfig::from_path(filename)? + .create_genesis_request(genesis_accounts, protocol_version) + } + + /// Create genesis request from self. + pub fn create_genesis_request( + &self, + genesis_accounts: Vec, + protocol_version: ProtocolVersion, + ) -> Result { + // if you get a compilation error here, make sure to update the builder below accordingly + let ChainspecConfig { + core_config, + wasm_config, + system_costs_config, + storage_costs, + } = self; + let CoreConfig { + validator_slots, + auction_delay, + locked_funds_period, + unbonding_delay, + round_seigniorage_rate, + enable_addressable_entity, + .. + } = core_config; + + let genesis_config = GenesisConfigBuilder::new() + .with_accounts(genesis_accounts) + .with_wasm_config(*wasm_config) + .with_system_config(*system_costs_config) + .with_validator_slots(*validator_slots) + .with_auction_delay(*auction_delay) + .with_locked_funds_period_millis(locked_funds_period.millis()) + .with_round_seigniorage_rate(*round_seigniorage_rate) + .with_unbonding_delay(*unbonding_delay) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .with_enable_addressable_entity(*enable_addressable_entity) + .with_storage_costs(*storage_costs) + .build(); + + Ok(GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + protocol_version, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + )) + } + + /// Create a `RunGenesisRequest` using values from the local `chainspec.toml`. + pub fn create_genesis_request_from_local_chainspec( + genesis_accounts: Vec, + protocol_version: ProtocolVersion, + ) -> Result { + Self::create_genesis_request_from_chainspec( + &*CHAINSPEC_SYMLINK, + genesis_accounts, + protocol_version, + ) + } + + /// Sets the vesting schedule period millis config option. + pub fn with_max_associated_keys(&mut self, value: u32) -> &mut Self { + self.core_config.max_associated_keys = value; + self + } + + /// Sets the vesting schedule period millis config option. + pub fn with_vesting_schedule_period_millis(mut self, value: u64) -> Self { + self.core_config.vesting_schedule_period = TimeDiff::from_millis(value); + self + } + + /// Sets the max delegators per validator config option. + pub fn with_max_delegators_per_validator(mut self, value: u32) -> Self { + self.core_config.max_delegators_per_validator = value; + self + } + + /// Sets the minimum delegation amount config option. + pub fn with_minimum_delegation_amount(mut self, minimum_delegation_amount: u64) -> Self { + self.core_config.minimum_delegation_amount = minimum_delegation_amount; + self + } + + /// Sets fee handling config option. + pub fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self { + self.core_config.fee_handling = fee_handling; + self + } + + /// Sets wasm config option. + pub fn with_wasm_config(mut self, wasm_config: WasmConfig) -> Self { + self.wasm_config = wasm_config; + self + } + + /// Sets mint costs. + pub fn with_mint_costs(self, mint_costs: MintCosts) -> Self { + self.system_costs_config.with_mint_costs(mint_costs); + self + } + + /// Sets wasm max stack height. + pub fn with_wasm_max_stack_height(mut self, max_stack_height: u32) -> Self { + *self.wasm_config.v1_mut().max_stack_height_mut() = max_stack_height; + self + } + + /// Sets refund handling config option. + pub fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self { + self.core_config.refund_handling = refund_handling; + self + } + + /// Sets pricing handling config option. + pub fn with_pricing_handling(mut self, pricing_handling: PricingHandling) -> Self { + self.core_config.pricing_handling = pricing_handling; + self + } + + /// Sets strict argument checking. + pub fn with_strict_argument_checking(mut self, strict_argument_checking: bool) -> Self { + self.core_config.strict_argument_checking = strict_argument_checking; + self + } + + /// Sets the enable addressable entity flag. + pub fn with_enable_addressable_entity(mut self, enable_addressable_entity: bool) -> Self { + self.core_config.enable_addressable_entity = enable_addressable_entity; + self + } + + /// Returns the `max_associated_keys` setting from the core config. + pub fn max_associated_keys(&self) -> u32 { + self.core_config.max_associated_keys + } + + /// Returns an engine config. + pub fn engine_config(&self) -> EngineConfig { + EngineConfigBuilder::new() + .with_max_query_depth(DEFAULT_MAX_QUERY_DEPTH) + .with_max_associated_keys(self.core_config.max_associated_keys) + .with_max_runtime_call_stack_height(self.core_config.max_runtime_call_stack_height) + .with_minimum_delegation_amount(self.core_config.minimum_delegation_amount) + .with_strict_argument_checking(self.core_config.strict_argument_checking) + .with_vesting_schedule_period_millis(self.core_config.vesting_schedule_period.millis()) + .with_max_delegators_per_validator(self.core_config.max_delegators_per_validator) + .with_wasm_config(self.wasm_config) + .with_system_config(self.system_costs_config) + .with_administrative_accounts(self.core_config.administrators.clone()) + .with_allow_auction_bids(self.core_config.allow_auction_bids) + .with_allow_unrestricted_transfers(self.core_config.allow_unrestricted_transfers) + .with_refund_handling(self.core_config.refund_handling) + .with_fee_handling(self.core_config.fee_handling) + .with_enable_entity(self.core_config.enable_addressable_entity) + .with_storage_costs(self.storage_costs) + .build() + } +} + +impl From for EngineConfig { + fn from(chainspec_config: ChainspecConfig) -> Self { + EngineConfigBuilder::new() + .with_max_query_depth(DEFAULT_MAX_QUERY_DEPTH) + .with_max_associated_keys(chainspec_config.core_config.max_associated_keys) + .with_max_runtime_call_stack_height( + chainspec_config.core_config.max_runtime_call_stack_height, + ) + .with_minimum_delegation_amount(chainspec_config.core_config.minimum_delegation_amount) + .with_strict_argument_checking(chainspec_config.core_config.strict_argument_checking) + .with_vesting_schedule_period_millis( + chainspec_config + .core_config + .vesting_schedule_period + .millis(), + ) + .with_max_delegators_per_validator( + chainspec_config.core_config.max_delegators_per_validator, + ) + .with_wasm_config(chainspec_config.wasm_config) + .with_system_config(chainspec_config.system_costs_config) + .with_enable_entity(chainspec_config.core_config.enable_addressable_entity) + .build() + } +} + +impl TryFrom for GenesisConfig { + type Error = Error; + + fn try_from(chainspec_config: ChainspecConfig) -> Result { + Ok(GenesisConfigBuilder::new() + .with_accounts(DEFAULT_ACCOUNTS.clone()) + .with_wasm_config(chainspec_config.wasm_config) + .with_system_config(chainspec_config.system_costs_config) + .with_validator_slots(chainspec_config.core_config.validator_slots) + .with_auction_delay(chainspec_config.core_config.auction_delay) + .with_locked_funds_period_millis( + chainspec_config.core_config.locked_funds_period.millis(), + ) + .with_round_seigniorage_rate(chainspec_config.core_config.round_seigniorage_rate) + .with_unbonding_delay(chainspec_config.core_config.unbonding_delay) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .with_storage_costs(chainspec_config.storage_costs) + .with_enable_addressable_entity(chainspec_config.core_config.enable_addressable_entity) + .build()) + } +} + +#[cfg(test)] +mod tests { + use std::{convert::TryFrom, path::PathBuf}; + + use casper_types::GenesisConfig; + use once_cell::sync::Lazy; + + use super::{ChainspecConfig, CHAINSPEC_NAME}; + + pub static LOCAL_PATH: Lazy = + Lazy::new(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../resources/local/")); + + #[test] + fn should_load_chainspec_config_from_chainspec() { + let path = &LOCAL_PATH.join(CHAINSPEC_NAME); + let chainspec_config = ChainspecConfig::from_chainspec_path(path).unwrap(); + // Check that the loaded values matches values present in the local chainspec. + assert_eq!(chainspec_config.core_config.auction_delay, 1); + } + + #[test] + fn should_get_exec_config_from_chainspec_values() { + let path = &LOCAL_PATH.join(CHAINSPEC_NAME); + let chainspec_config = ChainspecConfig::from_chainspec_path(path).unwrap(); + let config = GenesisConfig::try_from(chainspec_config).unwrap(); + assert_eq!(config.auction_delay(), 1) + } +} diff --git a/execution_engine_testing/test_support/src/code.rs b/execution_engine_testing/test_support/src/code.rs deleted file mode 100644 index 0aac14854a..0000000000 --- a/execution_engine_testing/test_support/src/code.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::path::{Path, PathBuf}; - -use crate::Hash; - -/// Represents the types of session or payment code. -pub enum Code { - /// The filesystem path of compiled Wasm code. - Path(PathBuf), - /// A named key providing the location of a stored contract. - NamedKey(String, String), - /// A hash providing the location of a stored contract. - Hash(Hash, String), -} - -// Note: can't just `impl> From for Code` because the compiler complains about -// a conflicting implementation of `From` - as URef could be made `AsRef` in the future - -impl<'a> From<&'a str> for Code { - fn from(path: &'a str) -> Code { - Code::Path(path.into()) - } -} - -impl<'a> From<&'a Path> for Code { - fn from(path: &'a Path) -> Code { - Code::Path(path.into()) - } -} - -impl From for Code { - fn from(path: PathBuf) -> Code { - Code::Path(path) - } -} diff --git a/execution_engine_testing/test_support/src/deploy_item.rs b/execution_engine_testing/test_support/src/deploy_item.rs new file mode 100644 index 0000000000..707528cea5 --- /dev/null +++ b/execution_engine_testing/test_support/src/deploy_item.rs @@ -0,0 +1,124 @@ +//! Units of account-triggered execution. + +use std::collections::BTreeSet; + +use casper_execution_engine::engine_state::{BlockInfo, InvalidRequest, WasmV1Request}; +use casper_types::{ + account::AccountHash, Deploy, DeployHash, ExecutableDeployItem, Gas, InitiatorAddr, + TransactionHash, +}; + +/// Definition of a deploy with all the details that make it possible to execute it. +/// Corresponds to the similarly-named IPC protobuf message. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct DeployItem { + /// Address that created and signed this deploy. This address will be used as a context for + /// executing session code. + pub address: AccountHash, + /// Session code. + pub session: ExecutableDeployItem, + /// Payment code. + pub payment: ExecutableDeployItem, + /// Gas price specified for this deploy by the user. + pub gas_price: u8, + /// List of accounts that signed this deploy. + pub authorization_keys: BTreeSet, + /// A unique identifier of the deploy. + /// Currently it is the hash of the deploy header (see `DeployHeader` in the `types` crate). + pub deploy_hash: DeployHash, +} + +impl DeployItem { + /// Creates a [`DeployItem`]. + pub fn new( + address: AccountHash, + session: ExecutableDeployItem, + payment: ExecutableDeployItem, + gas_price: u8, + authorization_keys: BTreeSet, + deploy_hash: DeployHash, + ) -> Self { + DeployItem { + address, + session, + payment, + gas_price, + authorization_keys, + deploy_hash, + } + } + + /// Is this a native transfer? + pub fn is_native_transfer(&self) -> bool { + matches!(self.session, ExecutableDeployItem::Transfer { .. }) + } + + /// Creates a new request from a deploy item for use as the session code. + pub fn new_session_from_deploy_item( + &self, + block_info: BlockInfo, + gas_limit: Gas, + ) -> Result { + let address = &self.address; + let session = &self.session; + let authorization_keys = &self.authorization_keys; + let deploy_hash = &self.deploy_hash; + + let transaction_hash = TransactionHash::Deploy(*deploy_hash); + let initiator_addr = InitiatorAddr::AccountHash(*address); + let authorization_keys = authorization_keys.clone(); + WasmV1Request::new_from_executable_deploy_item( + block_info, + gas_limit, + transaction_hash, + initiator_addr, + authorization_keys, + session, + ) + } + + /// Creates a new request from a deploy item for use as custom payment. + pub fn new_custom_payment_from_deploy_item( + &self, + block_info: BlockInfo, + gas_limit: Gas, + ) -> Result { + let address = &self.address; + let payment = &self.payment; + let authorization_keys = &self.authorization_keys; + let deploy_hash = &self.deploy_hash; + + let transaction_hash = TransactionHash::Deploy(*deploy_hash); + let initiator_addr = InitiatorAddr::AccountHash(*address); + let authorization_keys = authorization_keys.clone(); + + WasmV1Request::new_payment_from_executable_deploy_item( + block_info, + gas_limit, + transaction_hash, + initiator_addr, + authorization_keys, + payment, + ) + } +} + +impl From for DeployItem { + fn from(deploy: Deploy) -> Self { + let address = deploy.header().account().to_account_hash(); + let authorization_keys = deploy + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(); + + DeployItem::new( + address, + deploy.session().clone(), + deploy.payment().clone(), + deploy.header().gas_price() as u8, + authorization_keys, + DeployHash::new(*deploy.hash().inner()), + ) + } +} diff --git a/execution_engine_testing/test_support/src/deploy_item_builder.rs b/execution_engine_testing/test_support/src/deploy_item_builder.rs new file mode 100644 index 0000000000..ac0988f05e --- /dev/null +++ b/execution_engine_testing/test_support/src/deploy_item_builder.rs @@ -0,0 +1,317 @@ +use std::{collections::BTreeSet, path::Path}; + +use rand::Rng; + +use casper_types::{ + account::AccountHash, bytesrepr::Bytes, contracts::ContractPackageHash, AddressableEntityHash, + DeployHash, EntityVersion, ExecutableDeployItem, HashAddr, PackageHash, RuntimeArgs, +}; + +use crate::{deploy_item::DeployItem, utils, DEFAULT_GAS_PRICE}; + +#[derive(Default)] +struct DeployItemData { + pub address: Option, + pub payment_code: Option, + pub session_code: Option, + pub gas_price: u8, + pub authorization_keys: BTreeSet, + pub deploy_hash: Option, +} + +/// Builds a [`DeployItem`]. +pub struct DeployItemBuilder { + deploy_item: DeployItemData, +} + +impl DeployItemBuilder { + /// Returns a new [`DeployItemBuilder`] struct. + pub fn new() -> Self { + Default::default() + } + + /// Sets the address of the deploy. + pub fn with_address(mut self, address: AccountHash) -> Self { + self.deploy_item.address = Some(address); + self + } + + /// Sets the payment bytes for the deploy. + pub fn with_payment_bytes>( + mut self, + module_bytes: T, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::ModuleBytes { + module_bytes: module_bytes.into(), + args, + }); + self + } + + /// Sets the payment bytes of the deploy to an empty Vec. + pub fn with_standard_payment(self, args: RuntimeArgs) -> Self { + self.with_payment_bytes(vec![], args) + } + + /// Sets the payment bytes of a deploy by reading a file and passing [`RuntimeArgs`]. + pub fn with_payment_code>(self, file_name: T, args: RuntimeArgs) -> Self { + let module_bytes = utils::read_wasm_file(file_name); + self.with_payment_bytes(module_bytes, args) + } + + /// Sets payment code of the deploy with contract hash. + pub fn with_stored_payment_hash( + mut self, + hash: AddressableEntityHash, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredContractByHash { + hash: hash.into(), + entry_point: entry_point.into(), + args, + }); + self + } + + /// Sets the payment code of the deploy with a named key. + pub fn with_stored_payment_named_key( + mut self, + uref_name: &str, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredContractByName { + name: uref_name.to_owned(), + entry_point: entry_point_name.into(), + args, + }); + self + } + + /// Sets the payment code of the deploy with a contract package hash. + pub fn with_stored_versioned_payment_hash( + mut self, + package_hash: PackageHash, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByHash { + hash: ContractPackageHash::new(package_hash.value()), + version: None, + entry_point: entry_point.into(), + args, + }); + self + } + + /// Sets the payment code of the deploy with versioned contract stored under a named key. + pub fn with_stored_versioned_payment_named_key( + mut self, + uref_name: &str, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByName { + name: uref_name.to_owned(), + version: None, + entry_point: entry_point_name.into(), + args, + }); + self + } + + /// Sets the session bytes for the deploy. + pub fn with_session_bytes>( + mut self, + module_bytes: T, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.session_code = Some(ExecutableDeployItem::ModuleBytes { + module_bytes: module_bytes.into(), + args, + }); + self + } + + /// Sets the session code for the deploy using a wasm file. + pub fn with_session_code>(self, file_name: T, args: RuntimeArgs) -> Self { + let module_bytes = utils::read_wasm_file(file_name); + self.with_session_bytes(module_bytes, args) + } + + /// Sets the session code of the deploy as a native transfer. + pub fn with_transfer_args(mut self, args: RuntimeArgs) -> Self { + self.deploy_item.session_code = Some(ExecutableDeployItem::Transfer { args }); + self + } + + /// Sets the session code for the deploy with a stored contract hash, entrypoint and runtime + /// arguments. + pub fn with_stored_session_hash( + mut self, + hash: AddressableEntityHash, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.session_code = Some(ExecutableDeployItem::StoredContractByHash { + hash: hash.into(), + entry_point: entry_point.into(), + args, + }); + self + } + + /// Sets the session code of the deploy by using a contract stored under a named key. + pub fn with_stored_session_named_key( + mut self, + name: &str, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.session_code = Some(ExecutableDeployItem::StoredContractByName { + name: name.to_owned(), + entry_point: entry_point.into(), + args, + }); + self + } + + /// Sets the session code of the deploy with a versioned contract stored under a named key. + pub fn with_stored_versioned_contract_by_name( + mut self, + name: &str, + version: Option, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.session_code = Some(ExecutableDeployItem::StoredVersionedContractByName { + name: name.to_owned(), + version, + entry_point: entry_point.to_owned(), + args, + }); + self + } + + /// Sets the session code of the deploy with a stored, versioned contract by contract hash. + pub fn with_stored_versioned_contract_by_hash( + mut self, + hash: HashAddr, + version: Option, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.session_code = Some(ExecutableDeployItem::StoredVersionedContractByHash { + hash: hash.into(), + version, + entry_point: entry_point.to_owned(), + args, + }); + self + } + + /// Sets the payment code of the deploy with a versioned contract stored under a named key. + pub fn with_stored_versioned_payment_contract_by_name( + mut self, + key_name: &str, + version: Option, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByName { + name: key_name.to_owned(), + version, + entry_point: entry_point.to_owned(), + args, + }); + self + } + + /// Sets the payment code of the deploy using a stored versioned contract by contract hash. + pub fn with_stored_versioned_payment_contract_by_hash( + mut self, + hash: HashAddr, + version: Option, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByHash { + hash: hash.into(), + version, + entry_point: entry_point.to_owned(), + args, + }); + self + } + + /// Sets authorization keys for the deploy. + pub fn with_authorization_keys(mut self, authorization_keys: &[AccountHash]) -> Self { + self.deploy_item.authorization_keys = authorization_keys.iter().copied().collect(); + self + } + + /// Sets the gas price for the deploy. + pub fn with_gas_price(mut self, gas_price: u8) -> Self { + self.deploy_item.gas_price = gas_price; + self + } + + /// Sets the hash of the deploy. + pub fn with_deploy_hash(mut self, hash: [u8; 32]) -> Self { + self.deploy_item.deploy_hash = Some(DeployHash::from_raw(hash)); + self + } + + /// Consumes self and returns a [`DeployItem`]. + pub fn build(self) -> DeployItem { + DeployItem { + address: self + .deploy_item + .address + .unwrap_or_else(|| AccountHash::new([0u8; 32])), + session: self + .deploy_item + .session_code + .expect("should have session code"), + payment: self + .deploy_item + .payment_code + .expect("should have payment code"), + gas_price: self.deploy_item.gas_price, + authorization_keys: self.deploy_item.authorization_keys, + deploy_hash: self + .deploy_item + .deploy_hash + .unwrap_or_else(|| DeployHash::from_raw(rand::thread_rng().gen())), + } + } +} + +impl Default for DeployItemBuilder { + fn default() -> Self { + let deploy_item = DeployItemData { + gas_price: DEFAULT_GAS_PRICE, + ..Default::default() + }; + DeployItemBuilder { deploy_item } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_not_default_deploy_hash_to_zeros_if_not_specified() { + let address = AccountHash::new([42; 32]); + let deploy = DeployItemBuilder::new() + .with_address(address) + .with_authorization_keys(&[address]) + .with_session_bytes(Vec::new(), RuntimeArgs::new()) + .with_payment_bytes(Vec::new(), RuntimeArgs::new()) + .build(); + assert_ne!(deploy.deploy_hash, DeployHash::default()); + } +} diff --git a/execution_engine_testing/test_support/src/error.rs b/execution_engine_testing/test_support/src/error.rs deleted file mode 100644 index dab07b9125..0000000000 --- a/execution_engine_testing/test_support/src/error.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::result; - -use casper_execution_engine::shared::TypeMismatch; -use casper_types::CLValueError; - -/// The error type returned by any casper-engine-test-support operation. -#[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Hash, Debug)] -pub struct Error { - inner: String, -} - -impl From for Error { - fn from(error: String) -> Self { - Error { inner: error } - } -} - -impl From for Error { - fn from(error: CLValueError) -> Self { - Error { - inner: format!("{:?}", error), - } - } -} - -impl From for Error { - fn from(error: TypeMismatch) -> Self { - Error { - inner: format!("{:?}", error), - } - } -} - -/// A specialized `std::result::Result` for this crate. -pub type Result = result::Result; diff --git a/execution_engine_testing/test_support/src/execute_request_builder.rs b/execution_engine_testing/test_support/src/execute_request_builder.rs new file mode 100644 index 0000000000..55fba7eded --- /dev/null +++ b/execution_engine_testing/test_support/src/execute_request_builder.rs @@ -0,0 +1,517 @@ +use std::collections::BTreeSet; + +use casper_execution_engine::engine_state::{ + BlockInfo, ExecutableItem, SessionDataV1, SessionInputData, WasmV1Request, +}; +use casper_types::{ + account::AccountHash, addressable_entity::DEFAULT_ENTRY_POINT_NAME, + contracts::ProtocolVersionMajor, runtime_args, AddressableEntityHash, BlockHash, BlockTime, + Digest, EntityVersion, Gas, InitiatorAddr, PackageHash, Phase, PricingMode, ProtocolVersion, + RuntimeArgs, TransactionEntryPoint, TransactionHash, TransactionInvocationTarget, + TransactionRuntimeParams, TransactionTarget, TransactionV1Hash, +}; + +use crate::{ + deploy_item::DeployItem, DeployItemBuilder, ARG_AMOUNT, DEFAULT_BLOCK_TIME, DEFAULT_PAYMENT, + DEFAULT_PROTOCOL_VERSION, +}; + +/// A request comprising a [`WasmV1Request`] for use as session code, and an optional custom +/// payment `WasmV1Request`. +#[derive(Debug)] +pub struct ExecuteRequest { + /// The session request. + pub session: WasmV1Request, + /// The optional custom payment request. + pub custom_payment: Option, +} + +impl ExecuteRequest { + /// Is install upgrade allowed? + pub fn is_install_upgrade_allowed(&self) -> bool { + self.session.executable_item.is_install_upgrade_allowed() + } +} + +/// Builds an [`ExecuteRequest`]. +#[derive(Debug)] +pub struct ExecuteRequestBuilder { + state_hash: Digest, + block_time: BlockTime, + block_height: u64, + parent_block_hash: BlockHash, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator_addr: InitiatorAddr, + payment: Option, + payment_gas_limit: Gas, + payment_entry_point: String, + payment_args: RuntimeArgs, + session: ExecutableItem, + session_gas_limit: Gas, + session_entry_point: String, + session_args: RuntimeArgs, + authorization_keys: BTreeSet, +} + +const DEFAULT_GAS_LIMIT: u64 = 5_000_u64 * 10u64.pow(9); + +impl ExecuteRequestBuilder { + /// The default value used for `WasmV1Request::state_hash`. + pub const DEFAULT_STATE_HASH: Digest = Digest::from_raw([1; 32]); + /// The default value used for `WasmV1Request::transaction_hash`. + pub const DEFAULT_TRANSACTION_HASH: TransactionHash = + TransactionHash::V1(TransactionV1Hash::from_raw([2; 32])); + /// The default value used for `WasmV1Request::entry_point`. + pub const DEFAULT_ENTRY_POINT: &'static str = "call"; + /// The default protocol version stored in the BlockInfo + pub const DEFAULT_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0; + + /// Converts a `SessionInputData` into an `ExecuteRequestBuilder`. + pub fn from_session_input_data_for_protocol_version( + session_input_data: &SessionInputData, + protocol_version: ProtocolVersion, + ) -> Self { + let block_info = BlockInfo::new( + Self::DEFAULT_STATE_HASH, + BlockTime::new(DEFAULT_BLOCK_TIME), + BlockHash::default(), + 0, + protocol_version, + ); + let authorization_keys = session_input_data.signers(); + let session = + WasmV1Request::new_session(block_info, Gas::new(DEFAULT_GAS_LIMIT), session_input_data) + .unwrap(); + + let payment: Option; + let payment_gas_limit: Gas; + let payment_entry_point: String; + let payment_args: RuntimeArgs; + if session_input_data.is_standard_payment() { + payment = None; + payment_gas_limit = Gas::zero(); + payment_entry_point = DEFAULT_ENTRY_POINT_NAME.to_string(); + payment_args = RuntimeArgs::new(); + } else { + let block_info = BlockInfo::new( + Self::DEFAULT_STATE_HASH, + BlockTime::new(DEFAULT_BLOCK_TIME), + BlockHash::default(), + 0, + protocol_version, + ); + let request = WasmV1Request::new_custom_payment( + block_info, + Gas::new(DEFAULT_GAS_LIMIT), + session_input_data, + ) + .unwrap(); + payment = Some(request.executable_item); + payment_gas_limit = request.gas_limit; + payment_entry_point = request.entry_point; + payment_args = request.args; + } + + ExecuteRequestBuilder { + state_hash: session.block_info.state_hash, + block_time: session.block_info.block_time, + block_height: session.block_info.block_height, + parent_block_hash: session.block_info.parent_block_hash, + protocol_version: session.block_info.protocol_version, + transaction_hash: session.transaction_hash, + initiator_addr: session.initiator_addr, + payment, + payment_gas_limit, + payment_entry_point, + payment_args, + session: session.executable_item, + session_gas_limit: session.gas_limit, + session_entry_point: session.entry_point, + session_args: session.args, + authorization_keys, + } + } + + /// Converts a `SessionInputData` into an `ExecuteRequestBuilder`. + pub fn from_session_input_data(session_input_data: &SessionInputData) -> Self { + Self::from_session_input_data_for_protocol_version( + session_input_data, + DEFAULT_PROTOCOL_VERSION, + ) + } + + /// Converts a `DeployItem` into an `ExecuteRequestBuilder`. + pub fn from_deploy_item(deploy_item: &DeployItem) -> Self { + Self::from_deploy_item_for_protocol_version(deploy_item, DEFAULT_PROTOCOL_VERSION) + } + + /// Converts a `DeployItem` into an `ExecuteRequestBuilder`. + pub fn from_deploy_item_for_protocol_version( + deploy_item: &DeployItem, + protocol_version: ProtocolVersion, + ) -> Self { + let authorization_keys = deploy_item.authorization_keys.clone(); + let block_info = BlockInfo::new( + Self::DEFAULT_STATE_HASH, + BlockTime::new(DEFAULT_BLOCK_TIME), + BlockHash::default(), + 0, + protocol_version, + ); + let session = deploy_item + .new_session_from_deploy_item(block_info, Gas::new(DEFAULT_GAS_LIMIT)) + .unwrap(); + + let payment: Option; + let payment_gas_limit: Gas; + let payment_entry_point: String; + let payment_args: RuntimeArgs; + if deploy_item.payment.is_standard_payment(Phase::Payment) { + payment = None; + payment_gas_limit = Gas::zero(); + payment_entry_point = DEFAULT_ENTRY_POINT_NAME.to_string(); + payment_args = RuntimeArgs::new(); + } else { + let block_info = BlockInfo::new( + Self::DEFAULT_STATE_HASH, + BlockTime::new(DEFAULT_BLOCK_TIME), + BlockHash::default(), + 0, + DEFAULT_PROTOCOL_VERSION, + ); + let request = deploy_item + .new_custom_payment_from_deploy_item(block_info, Gas::new(DEFAULT_GAS_LIMIT)) + .unwrap(); + payment = Some(request.executable_item); + payment_gas_limit = request.gas_limit; + payment_entry_point = request.entry_point; + payment_args = request.args; + } + + ExecuteRequestBuilder { + state_hash: session.block_info.state_hash, + block_time: session.block_info.block_time, + block_height: session.block_info.block_height, + parent_block_hash: session.block_info.parent_block_hash, + protocol_version: session.block_info.protocol_version, + transaction_hash: session.transaction_hash, + initiator_addr: session.initiator_addr, + payment, + payment_gas_limit, + payment_entry_point, + payment_args, + session: session.executable_item, + session_gas_limit: session.gas_limit, + session_entry_point: session.entry_point, + session_args: session.args, + authorization_keys, + } + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with standard dependencies. + pub fn standard( + account_hash: AccountHash, + session_file: &str, + session_args: RuntimeArgs, + ) -> Self { + Self::standard_with_protocol_version( + account_hash, + session_file, + session_args, + DEFAULT_PROTOCOL_VERSION, + ) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with standard dependencies. + pub fn standard_with_protocol_version( + account_hash: AccountHash, + session_file: &str, + session_args: RuntimeArgs, + protocol_version: ProtocolVersion, + ) -> Self { + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, session_args) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }) + .with_authorization_keys(&[account_hash]) + .build(); + Self::from_deploy_item_for_protocol_version(&deploy_item, protocol_version) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with session module bytes. + pub fn module_bytes( + account_hash: AccountHash, + module_bytes: Vec, + session_args: RuntimeArgs, + ) -> Self { + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_bytes(module_bytes, session_args) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }) + .with_authorization_keys(&[account_hash]) + .build(); + Self::from_deploy_item(&deploy_item) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a + /// stored contract by hash. + pub fn contract_call_by_hash( + sender: AccountHash, + contract_hash: AddressableEntityHash, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[sender]) + .build(); + Self::from_deploy_item(&deploy_item) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a + /// stored contract by name. + pub fn contract_call_by_name( + sender: AccountHash, + contract_name: &str, + entry_point: &str, + args: RuntimeArgs, + ) -> Self { + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_named_key(contract_name, entry_point, args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[sender]) + .build(); + Self::from_deploy_item(&deploy_item) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a + /// versioned stored contract by hash. + pub fn contract_call_by_hash_versioned_with_major( + sender: AccountHash, + contract_package_hash: PackageHash, + version: Option, + protocol_version_major: Option, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Self { + let initiator_addr = InitiatorAddr::AccountHash(sender); + let target = TransactionTarget::Stored { + id: TransactionInvocationTarget::ByPackageHash { + addr: contract_package_hash.value(), + version, + protocol_version_major, + }, + runtime: TransactionRuntimeParams::VmCasperV1, + }; + let entry_point = TransactionEntryPoint::Custom(entry_point_name.to_owned()); + let hash = TransactionV1Hash::from_raw([1; 32]); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: DEFAULT_PAYMENT.as_u64(), + gas_price_tolerance: 1, + standard_payment: true, + }; + let mut signers = BTreeSet::new(); + signers.insert(sender); + let session_input_data = SessionInputData::SessionDataV1 { + data: SessionDataV1::new( + &args, + &target, + &entry_point, + false, + &hash, + &pricing_mode, + &initiator_addr, + signers, + pricing_mode.is_standard_payment(), + ), + }; + Self::from_session_input_data(&session_input_data) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a + /// versioned stored contract by hash. + pub fn versioned_contract_call_by_hash( + sender: AccountHash, + contract_package_hash: PackageHash, + version: Option, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Self { + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_versioned_contract_by_hash( + contract_package_hash.value(), + version, + entry_point_name, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[sender]) + .build(); + Self::from_deploy_item(&deploy_item) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a + /// versioned stored contract by name. + pub fn contract_call_by_name_versioned_with_major( + sender: AccountHash, + contract_name: &str, + version: Option, + protocol_version_major: Option, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Self { + let initiator_addr = InitiatorAddr::AccountHash(sender); + let target = TransactionTarget::Stored { + id: TransactionInvocationTarget::ByPackageName { + name: contract_name.to_owned(), + version, + protocol_version_major, + }, + runtime: TransactionRuntimeParams::VmCasperV1, + }; + let entry_point = TransactionEntryPoint::Custom(entry_point_name.to_owned()); + let hash = TransactionV1Hash::from_raw([1; 32]); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: DEFAULT_PAYMENT.as_u64(), + gas_price_tolerance: 1, + standard_payment: true, + }; + let mut signers = BTreeSet::new(); + signers.insert(sender); + let session_input_data = SessionInputData::SessionDataV1 { + data: SessionDataV1::new( + &args, + &target, + &entry_point, + false, + &hash, + &pricing_mode, + &initiator_addr, + signers, + pricing_mode.is_standard_payment(), + ), + }; + Self::from_session_input_data(&session_input_data) + } + + /// Returns an [`ExecuteRequest`] derived from a deploy with a session item that will call a + /// versioned stored contract by name. + pub fn versioned_contract_call_by_name( + sender: AccountHash, + contract_name: &str, + version: Option, + entry_point_name: &str, + args: RuntimeArgs, + ) -> Self { + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_versioned_contract_by_name(contract_name, version, entry_point_name, args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[sender]) + .build(); + Self::from_deploy_item(&deploy_item) + } + + /// Sets the block time of the [`WasmV1Request`]s. + pub fn with_block_time>(mut self, block_time: T) -> Self { + self.block_time = block_time.into(); + self + } + + /// Sets the block height of the [`WasmV1Request`]s. + pub fn with_block_height(mut self, block_height: u64) -> Self { + self.block_height = block_height; + self + } + + /// Sets the parent block hash of the [`WasmV1Request`]s. + pub fn with_parent_block_hash(mut self, parent_block_hash: BlockHash) -> Self { + self.parent_block_hash = parent_block_hash; + self + } + + /// Sets the parent block hash of the [`WasmV1Request`]s. + pub fn with_state_hash(mut self, state_hash: Digest) -> Self { + self.state_hash = state_hash; + self + } + + /// Sets the authorization keys used by the [`WasmV1Request`]s. + pub fn with_authorization_keys(mut self, authorization_keys: BTreeSet) -> Self { + self.authorization_keys = authorization_keys; + self + } + + /// Sets the protocol version for the execution request + pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.protocol_version = protocol_version; + self + } + + /// Consumes self and returns an `ExecuteRequest`. + pub fn build(self) -> ExecuteRequest { + let ExecuteRequestBuilder { + state_hash, + block_time, + block_height, + parent_block_hash, + protocol_version, + transaction_hash, + initiator_addr, + payment, + payment_gas_limit, + payment_entry_point, + payment_args, + session, + session_gas_limit, + session_entry_point, + session_args, + authorization_keys, + } = self; + + let block_info = BlockInfo::new( + state_hash, + block_time, + parent_block_hash, + block_height, + protocol_version, + ); + let maybe_custom_payment = payment.map(|executable_item| WasmV1Request { + block_info, + transaction_hash, + gas_limit: payment_gas_limit, + initiator_addr: initiator_addr.clone(), + executable_item, + entry_point: payment_entry_point, + args: payment_args, + authorization_keys: authorization_keys.clone(), + phase: Phase::Payment, + }); + + let session = WasmV1Request { + block_info, + transaction_hash, + gas_limit: session_gas_limit, + initiator_addr, + executable_item: session, + entry_point: session_entry_point, + args: session_args, + authorization_keys, + phase: Phase::Session, + }; + + ExecuteRequest { + session, + custom_payment: maybe_custom_payment, + } + } +} diff --git a/execution_engine_testing/test_support/src/genesis_config_builder.rs b/execution_engine_testing/test_support/src/genesis_config_builder.rs new file mode 100644 index 0000000000..31572d9c9c --- /dev/null +++ b/execution_engine_testing/test_support/src/genesis_config_builder.rs @@ -0,0 +1,131 @@ +//! A builder for an [`GenesisConfig`]. +use casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY; +use casper_types::{ + GenesisAccount, GenesisConfig, HoldBalanceHandling, StorageCosts, SystemConfig, WasmConfig, +}; +use num_rational::Ratio; + +use crate::{ + DEFAULT_AUCTION_DELAY, DEFAULT_GAS_HOLD_BALANCE_HANDLING, DEFAULT_GAS_HOLD_INTERVAL_MILLIS, + DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, +}; + +/// A builder for an [`GenesisConfig`]. +/// +/// Any field that isn't specified will be defaulted. See [the module docs](index.html) for the set +/// of default values. +#[derive(Default, Debug)] +pub struct GenesisConfigBuilder { + accounts: Option>, + wasm_config: Option, + system_config: Option, + validator_slots: Option, + auction_delay: Option, + locked_funds_period_millis: Option, + round_seigniorage_rate: Option>, + unbonding_delay: Option, + genesis_timestamp_millis: Option, + gas_hold_balance_handling: Option, + gas_hold_interval_millis: Option, + enable_addressable_entity: Option, + storage_costs: Option, +} + +impl GenesisConfigBuilder { + /// Creates a new `ExecConfig` builder. + pub fn new() -> Self { + GenesisConfigBuilder::default() + } + + /// Sets the genesis accounts. + pub fn with_accounts(mut self, accounts: Vec) -> Self { + self.accounts = Some(accounts); + self + } + + /// Sets the Wasm config options. + pub fn with_wasm_config(mut self, wasm_config: WasmConfig) -> Self { + self.wasm_config = Some(wasm_config); + self + } + + /// Sets the system config options. + pub fn with_system_config(mut self, system_config: SystemConfig) -> Self { + self.system_config = Some(system_config); + self + } + + /// Sets the validator slots config option. + pub fn with_validator_slots(mut self, validator_slots: u32) -> Self { + self.validator_slots = Some(validator_slots); + self + } + + /// Sets the auction delay config option. + pub fn with_auction_delay(mut self, auction_delay: u64) -> Self { + self.auction_delay = Some(auction_delay); + self + } + + /// Sets the locked funds period config option. + pub fn with_locked_funds_period_millis(mut self, locked_funds_period_millis: u64) -> Self { + self.locked_funds_period_millis = Some(locked_funds_period_millis); + self + } + + /// Sets the round seigniorage rate config option. + pub fn with_round_seigniorage_rate(mut self, round_seigniorage_rate: Ratio) -> Self { + self.round_seigniorage_rate = Some(round_seigniorage_rate); + self + } + + /// Sets the unbonding delay config option. + pub fn with_unbonding_delay(mut self, unbonding_delay: u64) -> Self { + self.unbonding_delay = Some(unbonding_delay); + self + } + + /// Sets the genesis timestamp config option. + pub fn with_genesis_timestamp_millis(mut self, genesis_timestamp_millis: u64) -> Self { + self.genesis_timestamp_millis = Some(genesis_timestamp_millis); + self + } + + /// Sets the enable addressable entity flag. + pub fn with_enable_addressable_entity(mut self, enable_addressable_entity: bool) -> Self { + self.enable_addressable_entity = Some(enable_addressable_entity); + self + } + + /// Sets the storage_costs handling. + pub fn with_storage_costs(mut self, storage_costs: StorageCosts) -> Self { + self.storage_costs = Some(storage_costs); + self + } + + /// Builds a new [`GenesisConfig`] object. + pub fn build(self) -> GenesisConfig { + GenesisConfig::new( + self.accounts.unwrap_or_default(), + self.wasm_config.unwrap_or_default(), + self.system_config.unwrap_or_default(), + self.validator_slots.unwrap_or(DEFAULT_VALIDATOR_SLOTS), + self.auction_delay.unwrap_or(DEFAULT_AUCTION_DELAY), + self.locked_funds_period_millis + .unwrap_or(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS), + self.round_seigniorage_rate + .unwrap_or(DEFAULT_ROUND_SEIGNIORAGE_RATE), + self.unbonding_delay.unwrap_or(DEFAULT_UNBONDING_DELAY), + self.genesis_timestamp_millis + .unwrap_or(DEFAULT_GENESIS_TIMESTAMP_MILLIS), + self.gas_hold_balance_handling + .unwrap_or(DEFAULT_GAS_HOLD_BALANCE_HANDLING), + self.gas_hold_interval_millis + .unwrap_or(DEFAULT_GAS_HOLD_INTERVAL_MILLIS), + self.enable_addressable_entity + .unwrap_or(DEFAULT_ENABLE_ENTITY), + self.storage_costs.unwrap_or_default(), + ) + } +} diff --git a/execution_engine_testing/test_support/src/internal/additive_map_diff.rs b/execution_engine_testing/test_support/src/internal/additive_map_diff.rs deleted file mode 100644 index 6185cf5da3..0000000000 --- a/execution_engine_testing/test_support/src/internal/additive_map_diff.rs +++ /dev/null @@ -1,193 +0,0 @@ -use casper_execution_engine::shared::{additive_map::AdditiveMap, transform::Transform}; -use casper_types::Key; - -/// Represents the difference between two `AdditiveMap`s. -#[derive(Debug, Default, PartialEq, Eq)] -pub struct AdditiveMapDiff { - left: AdditiveMap, - both: AdditiveMap, - right: AdditiveMap, -} - -impl AdditiveMapDiff { - /// Creates a diff from two `AdditiveMap`s. - pub fn new( - mut left: AdditiveMap, - mut right: AdditiveMap, - ) -> Self { - let mut both = AdditiveMap::new(); - for key in left.keys().copied().collect::>() { - // Safe to unwrap here since we're iterating `left` keys, so `left.remove` must succeed. - let left_value = left.remove(&key).unwrap(); - if let Some(right_value) = right.remove(&key) { - if left_value == right_value { - both.insert(key, left_value); - } else { - left.insert(key, left_value); - right.insert(key, right_value); - } - } else { - left.insert(key, left_value); - } - } - - AdditiveMapDiff { left, both, right } - } - - /// Returns the entries that are unique to the `left` input. - pub fn left(&self) -> &AdditiveMap { - &self.left - } - - /// Returns the entries that are unique to the `right` input. - pub fn right(&self) -> &AdditiveMap { - &self.right - } - - /// Returns the entries shared by both inputs. - pub fn both(&self) -> &AdditiveMap { - &self.both - } -} - -#[cfg(test)] -mod tests { - use once_cell::sync::Lazy; - use rand::{self, Rng}; - - use casper_types::{AccessRights, Key, URef, BLAKE2B_DIGEST_LENGTH}; - - use super::*; - - const MIN_ELEMENTS: u8 = 1; - const MAX_ELEMENTS: u8 = 10; - - static LEFT_ONLY: Lazy> = Lazy::new(|| { - let mut map = AdditiveMap::new(); - for i in 0..random_element_count() { - map.insert( - Key::URef(URef::new( - [i; BLAKE2B_DIGEST_LENGTH], - AccessRights::READ_ADD_WRITE, - )), - Transform::AddInt32(i.into()), - ); - } - map - }); - static BOTH: Lazy> = Lazy::new(|| { - let mut map = AdditiveMap::new(); - for i in 0..random_element_count() { - map.insert( - Key::URef(URef::new( - [i + MAX_ELEMENTS; BLAKE2B_DIGEST_LENGTH], - AccessRights::READ_ADD_WRITE, - )), - Transform::Identity, - ); - } - map - }); - static RIGHT_ONLY: Lazy> = Lazy::new(|| { - let mut map = AdditiveMap::new(); - for i in 0..random_element_count() { - map.insert( - Key::URef(URef::new( - [i; BLAKE2B_DIGEST_LENGTH], - AccessRights::READ_ADD_WRITE, - )), - Transform::AddUInt512(i.into()), - ); - } - map - }); - - fn random_element_count() -> u8 { - rand::thread_rng().gen_range(MIN_ELEMENTS..=MAX_ELEMENTS) - } - - struct TestFixture { - expected: AdditiveMapDiff, - } - - impl TestFixture { - fn new( - left_only: AdditiveMap, - both: AdditiveMap, - right_only: AdditiveMap, - ) -> Self { - TestFixture { - expected: AdditiveMapDiff { - left: left_only, - both, - right: right_only, - }, - } - } - - fn left(&self) -> AdditiveMap { - self.expected - .left - .iter() - .chain(self.expected.both.iter()) - .map(|(key, transform)| (*key, transform.clone())) - .collect() - } - - fn right(&self) -> AdditiveMap { - self.expected - .right - .iter() - .chain(self.expected.both.iter()) - .map(|(key, transform)| (*key, transform.clone())) - .collect() - } - - fn run(&self) { - let diff = AdditiveMapDiff::new(self.left(), self.right()); - assert_eq!(self.expected, diff); - } - } - - #[test] - fn should_create_diff_where_left_is_subset_of_right() { - let fixture = TestFixture::new(AdditiveMap::new(), BOTH.clone(), RIGHT_ONLY.clone()); - fixture.run(); - } - - #[test] - fn should_create_diff_where_right_is_subset_of_left() { - let fixture = TestFixture::new(LEFT_ONLY.clone(), BOTH.clone(), AdditiveMap::new()); - fixture.run(); - } - - #[test] - fn should_create_diff_where_no_intersection() { - let fixture = TestFixture::new(LEFT_ONLY.clone(), AdditiveMap::new(), RIGHT_ONLY.clone()); - fixture.run(); - } - - #[test] - fn should_create_diff_where_both_equal() { - let fixture = TestFixture::new(AdditiveMap::new(), BOTH.clone(), AdditiveMap::new()); - fixture.run(); - } - - #[test] - fn should_create_diff_where_left_is_empty() { - let fixture = TestFixture::new(AdditiveMap::new(), AdditiveMap::new(), RIGHT_ONLY.clone()); - fixture.run(); - } - - #[test] - fn should_create_diff_where_right_is_empty() { - let fixture = TestFixture::new(LEFT_ONLY.clone(), AdditiveMap::new(), AdditiveMap::new()); - fixture.run(); - } - - #[test] - fn should_create_diff_where_both_are_empty() { - let fixture = TestFixture::new(AdditiveMap::new(), AdditiveMap::new(), AdditiveMap::new()); - fixture.run(); - } -} diff --git a/execution_engine_testing/test_support/src/internal/deploy_item_builder.rs b/execution_engine_testing/test_support/src/internal/deploy_item_builder.rs deleted file mode 100644 index 02e4ef7f66..0000000000 --- a/execution_engine_testing/test_support/src/internal/deploy_item_builder.rs +++ /dev/null @@ -1,237 +0,0 @@ -use std::{collections::BTreeSet, path::Path}; - -use casper_execution_engine::{ - core::engine_state::{deploy_item::DeployItem, executable_deploy_item::ExecutableDeployItem}, - shared::newtypes::Blake2bHash, -}; -use casper_types::{ - account::AccountHash, ContractHash, ContractVersion, DeployHash, HashAddr, RuntimeArgs, -}; - -use crate::internal::{utils, DEFAULT_GAS_PRICE}; - -#[derive(Default)] -struct DeployItemData { - pub address: Option, - pub payment_code: Option, - pub session_code: Option, - pub gas_price: u64, - pub authorization_keys: BTreeSet, - pub deploy_hash: DeployHash, -} - -pub struct DeployItemBuilder { - deploy_item: DeployItemData, -} - -impl DeployItemBuilder { - pub fn new() -> Self { - Default::default() - } - - pub fn with_address(mut self, address: AccountHash) -> Self { - self.deploy_item.address = Some(address); - self - } - - pub fn with_payment_bytes(mut self, module_bytes: Vec, args: RuntimeArgs) -> Self { - self.deploy_item.payment_code = Some(ExecutableDeployItem::ModuleBytes { - module_bytes: module_bytes.into(), - args, - }); - self - } - - pub fn with_empty_payment_bytes(self, args: RuntimeArgs) -> Self { - self.with_payment_bytes(vec![], args) - } - - pub fn with_payment_code>(self, file_name: T, args: RuntimeArgs) -> Self { - let module_bytes = utils::read_wasm_file_bytes(file_name); - self.with_payment_bytes(module_bytes, args) - } - - pub fn with_stored_payment_hash( - mut self, - hash: ContractHash, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredContractByHash { - hash, - entry_point: entry_point.into(), - args, - }); - self - } - - pub fn with_stored_payment_named_key( - mut self, - uref_name: &str, - entry_point_name: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredContractByName { - name: uref_name.to_owned(), - entry_point: entry_point_name.into(), - args, - }); - self - } - - pub fn with_session_bytes(mut self, module_bytes: Vec, args: RuntimeArgs) -> Self { - self.deploy_item.session_code = Some(ExecutableDeployItem::ModuleBytes { - module_bytes: module_bytes.into(), - args, - }); - self - } - - pub fn with_session_code>(self, file_name: T, args: RuntimeArgs) -> Self { - let module_bytes = utils::read_wasm_file_bytes(file_name); - self.with_session_bytes(module_bytes, args) - } - - pub fn with_transfer_args(mut self, args: RuntimeArgs) -> Self { - self.deploy_item.session_code = Some(ExecutableDeployItem::Transfer { args }); - self - } - - pub fn with_stored_session_hash( - mut self, - hash: ContractHash, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.session_code = Some(ExecutableDeployItem::StoredContractByHash { - hash, - entry_point: entry_point.into(), - args, - }); - self - } - - pub fn with_stored_session_named_key( - mut self, - name: &str, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.session_code = Some(ExecutableDeployItem::StoredContractByName { - name: name.to_owned(), - entry_point: entry_point.into(), - args, - }); - self - } - - pub fn with_stored_versioned_contract_by_name( - mut self, - name: &str, - version: Option, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.session_code = Some(ExecutableDeployItem::StoredVersionedContractByName { - name: name.to_owned(), - version, - entry_point: entry_point.to_owned(), - args, - }); - self - } - - pub fn with_stored_versioned_contract_by_hash( - mut self, - hash: HashAddr, - version: Option, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.session_code = Some(ExecutableDeployItem::StoredVersionedContractByHash { - hash: hash.into(), - version, - entry_point: entry_point.to_owned(), - args, - }); - self - } - - pub fn with_stored_versioned_payment_contract_by_name( - mut self, - key_name: &str, - version: Option, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByName { - name: key_name.to_owned(), - version, - entry_point: entry_point.to_owned(), - args, - }); - self - } - - pub fn with_stored_versioned_payment_contract_by_hash( - mut self, - hash: HashAddr, - version: Option, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - self.deploy_item.payment_code = Some(ExecutableDeployItem::StoredVersionedContractByHash { - hash: hash.into(), - version, - entry_point: entry_point.to_owned(), - args, - }); - self - } - - pub fn with_authorization_keys(mut self, authorization_keys: &[AccountHash]) -> Self { - self.deploy_item.authorization_keys = authorization_keys.iter().copied().collect(); - self - } - - pub fn with_gas_price(mut self, gas_price: u64) -> Self { - self.deploy_item.gas_price = gas_price; - self - } - - pub fn with_deploy_hash(mut self, hash: [u8; 32]) -> Self { - let digest: Blake2bHash = hash.into(); - self.deploy_item.deploy_hash = DeployHash::new(digest.value()); - self - } - - pub fn build(self) -> DeployItem { - DeployItem { - address: self - .deploy_item - .address - .unwrap_or_else(|| AccountHash::new([0u8; 32])), - session: self - .deploy_item - .session_code - .expect("should have session code"), - payment: self - .deploy_item - .payment_code - .expect("should have payment code"), - gas_price: self.deploy_item.gas_price, - authorization_keys: self.deploy_item.authorization_keys, - deploy_hash: self.deploy_item.deploy_hash, - } - } -} - -impl Default for DeployItemBuilder { - fn default() -> Self { - let deploy_item = DeployItemData { - gas_price: DEFAULT_GAS_PRICE, - ..Default::default() - }; - DeployItemBuilder { deploy_item } - } -} diff --git a/execution_engine_testing/test_support/src/internal/exec_with_return.rs b/execution_engine_testing/test_support/src/internal/exec_with_return.rs deleted file mode 100644 index 85cefec716..0000000000 --- a/execution_engine_testing/test_support/src/internal/exec_with_return.rs +++ /dev/null @@ -1,184 +0,0 @@ -use std::{cell::RefCell, collections::BTreeSet, rc::Rc}; - -use casper_execution_engine::{ - core::{ - engine_state, - engine_state::{ - executable_deploy_item::ExecutableDeployItem, execution_effect::ExecutionEffect, - EngineConfig, - }, - execution::{self, AddressGenerator}, - runtime::{self, Runtime}, - runtime_context::RuntimeContext, - }, - shared::{gas::Gas, newtypes::CorrelationId, wasm_prep::Preprocessor}, - storage::{global_state::StateProvider, protocol_data::ProtocolData}, -}; -use casper_types::{ - account::AccountHash, bytesrepr::FromBytes, BlockTime, CLTyped, DeployHash, EntryPointType, - Key, Phase, ProtocolVersion, RuntimeArgs, URef, U512, -}; - -use crate::internal::{utils, WasmTestBuilder, DEFAULT_WASM_CONFIG}; - -use super::DEFAULT_SYSTEM_CONFIG; - -/// This function allows executing the contract stored in the given `wasm_file`, while capturing the -/// output. It is essentially the same functionality as `Executor::exec`, but the return value of -/// the contract is returned along with the effects. The purpose of this function is to test -/// installer contracts used in the new genesis process. -#[allow(clippy::too_many_arguments)] -pub fn exec( - config: EngineConfig, - builder: &mut WasmTestBuilder, - address: AccountHash, - wasm_file: &str, - block_time: u64, - deploy_hash: DeployHash, - entry_point_name: &str, - args: RuntimeArgs, - extra_urefs: Vec, -) -> Option<(T, Vec, ExecutionEffect)> -where - S: StateProvider, - engine_state::Error: From, - S::Error: Into, - T: FromBytes + CLTyped, -{ - let prestate = builder.get_post_state_hash(); - - let tracking_copy = Rc::new(RefCell::new( - builder - .get_engine_state() - .tracking_copy(prestate) - .unwrap() - .expect("should be able to checkout tracking copy"), - )); - - let phase = Phase::Session; - let address_generator = { - let address_generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(address_generator)) - }; - let transfer_address_generator = { - let address_generator = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(address_generator)) - }; - let gas_counter = Gas::default(); - let fn_store_id = { - let fn_store_id = AddressGenerator::new(deploy_hash.as_bytes(), phase); - Rc::new(RefCell::new(fn_store_id)) - }; - let gas_limit = Gas::new(U512::from(std::u64::MAX)); - let protocol_version = ProtocolVersion::V1_0_0; - let correlation_id = CorrelationId::new(); - let base_key = Key::Account(address); - - let account = builder.get_account(address).expect("should find account"); - - let mut named_keys = account.named_keys().clone(); - - let access_rights = { - let mut ret = runtime::extract_access_rights_from_keys(named_keys.values().cloned()); - let extras = runtime::extract_access_rights_from_urefs(extra_urefs.into_iter()); - ret.extend(extras.into_iter()); - ret - }; - - let protocol_data = { - let mint = builder.get_mint_contract_hash(); - let handle_payment = builder.get_mint_contract_hash(); - let standard_payment = builder.get_standard_payment_contract_hash(); - let auction = builder.get_auction_contract_hash(); - ProtocolData::new( - *DEFAULT_WASM_CONFIG, - *DEFAULT_SYSTEM_CONFIG, - mint, - handle_payment, - standard_payment, - auction, - ) - }; - - let transfers = Vec::default(); - - let context = RuntimeContext::new( - Rc::clone(&tracking_copy), - EntryPointType::Session, // Is it always? - &mut named_keys, - access_rights, - args, - BTreeSet::new(), - &account, - base_key, - BlockTime::new(block_time), - deploy_hash, - gas_limit, - gas_counter, - fn_store_id, - address_generator, - transfer_address_generator, - protocol_version, - correlation_id, - phase, - protocol_data, - transfers, - ); - - let wasm_bytes = utils::read_wasm_file_bytes(wasm_file); - let deploy_item = ExecutableDeployItem::ModuleBytes { - module_bytes: wasm_bytes.into(), - args: RuntimeArgs::new(), - }; - - let wasm_config = *DEFAULT_WASM_CONFIG; - - let preprocessor = Preprocessor::new(wasm_config); - let parity_module = deploy_item - .get_deploy_metadata( - tracking_copy, - &account, - correlation_id, - &preprocessor, - &protocol_version, - &protocol_data, - phase, - ) - .expect("should get wasm module"); - - let module = parity_module.take_module().expect("should have module"); - - let (instance, memory) = - runtime::instance_and_memory(module.clone(), protocol_version, &wasm_config) - .expect("should be able to make wasm instance from module"); - - let mut runtime = Runtime::new(config, Default::default(), memory, module, context); - - match instance.invoke_export(entry_point_name, &[], &mut runtime) { - Ok(_) => None, - Err(e) => { - if let Some(host_error) = e.as_host_error() { - // `ret` Trap is a success; downcast and attempt to extract result - let downcasted_error = host_error.downcast_ref::().unwrap(); - match downcasted_error { - execution::Error::Ret(ref ret_urefs) => { - let effect = runtime.context().effect(); - let urefs = ret_urefs.clone(); - - let value: T = runtime - .take_host_buffer() - .expect("should have return value in the host_buffer") - .into_t() - .expect("should deserialize return value"); - - Some((value, urefs, effect)) - } - - _ => None, - } - } else { - None - } - } - } -} diff --git a/execution_engine_testing/test_support/src/internal/execute_request_builder.rs b/execution_engine_testing/test_support/src/internal/execute_request_builder.rs deleted file mode 100644 index 8200078435..0000000000 --- a/execution_engine_testing/test_support/src/internal/execute_request_builder.rs +++ /dev/null @@ -1,150 +0,0 @@ -use std::convert::TryInto; - -use rand::Rng; - -use casper_execution_engine::core::engine_state::{ - deploy_item::DeployItem, execute_request::ExecuteRequest, -}; -use casper_types::{ - account::AccountHash, runtime_args, ContractHash, ContractVersion, ProtocolVersion, RuntimeArgs, -}; - -use crate::internal::{ - DeployItemBuilder, DEFAULT_BLOCK_TIME, DEFAULT_PAYMENT, DEFAULT_PROPOSER_PUBLIC_KEY, -}; - -const ARG_AMOUNT: &str = "amount"; - -#[derive(Debug)] -pub struct ExecuteRequestBuilder { - execute_request: ExecuteRequest, -} - -impl ExecuteRequestBuilder { - pub fn new() -> Self { - Default::default() - } - - pub fn from_deploy_item(deploy_item: DeployItem) -> Self { - ExecuteRequestBuilder::new().push_deploy(deploy_item) - } - - pub fn push_deploy(mut self, deploy: DeployItem) -> Self { - self.execute_request.deploys.push(deploy); - self - } - - pub fn with_pre_state_hash(mut self, pre_state_hash: &[u8]) -> Self { - self.execute_request.parent_state_hash = pre_state_hash.try_into().unwrap(); - self - } - - pub fn with_block_time(mut self, block_time: u64) -> Self { - self.execute_request.block_time = block_time; - self - } - - pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { - self.execute_request.protocol_version = protocol_version; - self - } - - pub fn with_proposer(mut self, proposer: casper_types::PublicKey) -> Self { - self.execute_request.proposer = proposer; - self - } - - pub fn build(self) -> ExecuteRequest { - self.execute_request - } - - pub fn standard( - account_hash: AccountHash, - session_file: &str, - session_args: RuntimeArgs, - ) -> Self { - let mut rng = rand::thread_rng(); - let deploy_hash: [u8; 32] = rng.gen(); - - let deploy = DeployItemBuilder::new() - .with_address(account_hash) - .with_session_code(session_file, session_args) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT - }) - .with_authorization_keys(&[account_hash]) - .with_deploy_hash(deploy_hash) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy) - } - - pub fn contract_call_by_hash( - sender: AccountHash, - contract_hash: ContractHash, - entry_point: &str, - args: RuntimeArgs, - ) -> Self { - let mut rng = rand::thread_rng(); - let deploy_hash = rng.gen(); - - let deploy = DeployItemBuilder::new() - .with_address(sender) - .with_stored_session_hash(contract_hash, entry_point, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[sender]) - .with_deploy_hash(deploy_hash) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy) - } - - /// Calls a versioned contract from contract package hash key_name - pub fn versioned_contract_call_by_hash_key_name( - sender: AccountHash, - hash_key_name: &str, - version: Option, - entry_point_name: &str, - args: RuntimeArgs, - ) -> Self { - let mut rng = rand::thread_rng(); - let deploy_hash = rng.gen(); - - let deploy = DeployItemBuilder::new() - .with_address(sender) - .with_stored_versioned_contract_by_name(hash_key_name, version, entry_point_name, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[sender]) - .with_deploy_hash(deploy_hash) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy) - } - - pub fn transfer(sender: AccountHash, transfer_args: RuntimeArgs) -> Self { - let mut rng = rand::thread_rng(); - let deploy_hash = rng.gen(); - - let deploy_item = DeployItemBuilder::new() - .with_address(sender) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(transfer_args) - .with_authorization_keys(&[sender]) - .with_deploy_hash(deploy_hash) - .build(); - - ExecuteRequestBuilder::from_deploy_item(deploy_item) - } -} - -impl Default for ExecuteRequestBuilder { - fn default() -> Self { - let execute_request = ExecuteRequest { - block_time: DEFAULT_BLOCK_TIME, - protocol_version: ProtocolVersion::V1_0_0, - proposer: DEFAULT_PROPOSER_PUBLIC_KEY.clone(), - ..Default::default() - }; - ExecuteRequestBuilder { execute_request } - } -} diff --git a/execution_engine_testing/test_support/src/internal/mod.rs b/execution_engine_testing/test_support/src/internal/mod.rs deleted file mode 100644 index cdcd6cacb9..0000000000 --- a/execution_engine_testing/test_support/src/internal/mod.rs +++ /dev/null @@ -1,129 +0,0 @@ -mod additive_map_diff; -mod deploy_item_builder; -pub mod exec_with_return; -mod execute_request_builder; -mod step_request_builder; -mod upgrade_request_builder; -pub mod utils; -mod wasm_test_builder; - -use num_rational::Ratio; -use once_cell::sync::Lazy; - -use casper_execution_engine::{ - core::engine_state::{ - genesis::{ExecConfig, GenesisAccount, GenesisConfig}, - run_genesis_request::RunGenesisRequest, - }, - shared::{ - motes::Motes, newtypes::Blake2bHash, system_config::SystemConfig, wasm_config::WasmConfig, - }, -}; -use casper_types::{account::AccountHash, ProtocolVersion, PublicKey, SecretKey, U512}; - -use super::DEFAULT_ACCOUNT_INITIAL_BALANCE; - -pub use additive_map_diff::AdditiveMapDiff; -pub use deploy_item_builder::DeployItemBuilder; -pub use execute_request_builder::ExecuteRequestBuilder; -pub use step_request_builder::StepRequestBuilder; -pub use upgrade_request_builder::UpgradeRequestBuilder; -pub use wasm_test_builder::{ - InMemoryWasmTestBuilder, LmdbWasmTestBuilder, WasmTestBuilder, WasmTestResult, -}; - -pub const DEFAULT_VALIDATOR_SLOTS: u32 = 5; -pub const DEFAULT_AUCTION_DELAY: u64 = 3; -/// Default lock-in period of 90 days -pub const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * 24 * 60 * 60 * 1000; -/// Default number of eras that need to pass to be able to withdraw unbonded funds. -pub const DEFAULT_UNBONDING_DELAY: u64 = 14; - -/// Default round seigniorage rate represented as a fractional number. -/// -/// Annual issuance: 2% -/// Minimum round exponent: 14 -/// Ticks per year: 31536000000 -/// -/// (1+0.02)^((2^14)/31536000000)-1 is expressed as a fraction below. -pub const DEFAULT_ROUND_SEIGNIORAGE_RATE: Ratio = Ratio::new_raw(6414, 623437335209); - -pub const DEFAULT_CHAIN_NAME: &str = "gerald"; -pub const DEFAULT_GENESIS_TIMESTAMP_MILLIS: u64 = 0; -pub const DEFAULT_BLOCK_TIME: u64 = 0; -pub const DEFAULT_GAS_PRICE: u64 = 1; -pub const MOCKED_ACCOUNT_ADDRESS: AccountHash = AccountHash::new([48u8; 32]); - -pub const ARG_AMOUNT: &str = "amount"; - -pub const TIMESTAMP_MILLIS_INCREMENT: u64 = 30000; // 30 seconds - -// NOTE: Those values could be constants but are kept as once_cell::sync::Lazy to avoid changes of -// `*FOO` into `FOO` back and forth. -pub static DEFAULT_GENESIS_CONFIG_HASH: Lazy = Lazy::new(|| [42; 32].into()); -pub static DEFAULT_ACCOUNT_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() -}); -pub static DEFAULT_ACCOUNT_ADDR: Lazy = - Lazy::new(|| AccountHash::from(&*DEFAULT_ACCOUNT_PUBLIC_KEY)); -// Declaring DEFAULT_ACCOUNT_KEY as *DEFAULT_ACCOUNT_ADDR causes tests to stall. -pub static DEFAULT_ACCOUNT_KEY: Lazy = - Lazy::new(|| AccountHash::from(&*DEFAULT_ACCOUNT_PUBLIC_KEY)); -pub static DEFAULT_PROPOSER_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([198; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() -}); -pub static DEFAULT_PROPOSER_ADDR: Lazy = - Lazy::new(|| AccountHash::from(&*DEFAULT_PROPOSER_PUBLIC_KEY)); -pub static DEFAULT_ACCOUNTS: Lazy> = Lazy::new(|| { - let mut ret = Vec::new(); - let genesis_account = GenesisAccount::account( - DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - None, - ); - ret.push(genesis_account); - let proposer_account = GenesisAccount::account( - DEFAULT_PROPOSER_PUBLIC_KEY.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - None, - ); - ret.push(proposer_account); - ret -}); -pub static DEFAULT_PROTOCOL_VERSION: Lazy = Lazy::new(|| ProtocolVersion::V1_0_0); -pub static DEFAULT_PAYMENT: Lazy = Lazy::new(|| U512::from(1_500_000_000_000u64)); -pub static DEFAULT_WASM_CONFIG: Lazy = Lazy::new(WasmConfig::default); -pub static DEFAULT_SYSTEM_CONFIG: Lazy = Lazy::new(SystemConfig::default); -pub static DEFAULT_EXEC_CONFIG: Lazy = Lazy::new(|| { - ExecConfig::new( - DEFAULT_ACCOUNTS.clone(), - *DEFAULT_WASM_CONFIG, - *DEFAULT_SYSTEM_CONFIG, - DEFAULT_VALIDATOR_SLOTS, - DEFAULT_AUCTION_DELAY, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, - DEFAULT_ROUND_SEIGNIORAGE_RATE, - DEFAULT_UNBONDING_DELAY, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, - ) -}); -pub static DEFAULT_GENESIS_CONFIG: Lazy = Lazy::new(|| { - GenesisConfig::new( - DEFAULT_CHAIN_NAME.to_string(), - DEFAULT_GENESIS_TIMESTAMP_MILLIS, - *DEFAULT_PROTOCOL_VERSION, - DEFAULT_EXEC_CONFIG.clone(), - ) -}); -pub static DEFAULT_RUN_GENESIS_REQUEST: Lazy = Lazy::new(|| { - RunGenesisRequest::new( - *DEFAULT_GENESIS_CONFIG_HASH, - *DEFAULT_PROTOCOL_VERSION, - DEFAULT_EXEC_CONFIG.clone(), - ) -}); -pub static SYSTEM_ADDR: Lazy = Lazy::new(|| PublicKey::System.to_account_hash()); diff --git a/execution_engine_testing/test_support/src/internal/step_request_builder.rs b/execution_engine_testing/test_support/src/internal/step_request_builder.rs deleted file mode 100644 index 4ec41745b9..0000000000 --- a/execution_engine_testing/test_support/src/internal/step_request_builder.rs +++ /dev/null @@ -1,94 +0,0 @@ -use casper_execution_engine::{ - core::engine_state::{ - step::{EvictItem, RewardItem, SlashItem}, - StepRequest, - }, - shared::newtypes::Blake2bHash, -}; -use casper_types::{EraId, ProtocolVersion}; - -#[derive(Debug)] -pub struct StepRequestBuilder { - parent_state_hash: Blake2bHash, - protocol_version: ProtocolVersion, - slash_items: Vec, - reward_items: Vec, - evict_items: Vec, - run_auction: bool, - next_era_id: EraId, - era_end_timestamp_millis: u64, -} - -impl StepRequestBuilder { - pub fn new() -> Self { - Default::default() - } - - pub fn with_parent_state_hash(mut self, parent_state_hash: Blake2bHash) -> Self { - self.parent_state_hash = parent_state_hash; - self - } - - pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { - self.protocol_version = protocol_version; - self - } - - pub fn with_slash_item(mut self, slash_item: SlashItem) -> Self { - self.slash_items.push(slash_item); - self - } - - pub fn with_reward_item(mut self, reward_item: RewardItem) -> Self { - self.reward_items.push(reward_item); - self - } - - pub fn with_evict_item(mut self, evict_item: EvictItem) -> Self { - self.evict_items.push(evict_item); - self - } - - pub fn with_run_auction(mut self, run_auction: bool) -> Self { - self.run_auction = run_auction; - self - } - - pub fn with_next_era_id(mut self, next_era_id: EraId) -> Self { - self.next_era_id = next_era_id; - self - } - - pub fn with_era_end_timestamp_millis(mut self, era_end_timestamp_millis: u64) -> Self { - self.era_end_timestamp_millis = era_end_timestamp_millis; - self - } - - pub fn build(self) -> StepRequest { - StepRequest::new( - self.parent_state_hash, - self.protocol_version, - self.slash_items, - self.reward_items, - self.evict_items, - self.run_auction, - self.next_era_id, - self.era_end_timestamp_millis, - ) - } -} - -impl Default for StepRequestBuilder { - fn default() -> Self { - StepRequestBuilder { - parent_state_hash: Default::default(), - protocol_version: Default::default(), - slash_items: Default::default(), - reward_items: Default::default(), - evict_items: Default::default(), - run_auction: true, //<-- run_auction by default - next_era_id: Default::default(), - era_end_timestamp_millis: Default::default(), - } - } -} diff --git a/execution_engine_testing/test_support/src/internal/upgrade_request_builder.rs b/execution_engine_testing/test_support/src/internal/upgrade_request_builder.rs deleted file mode 100644 index a78ea52157..0000000000 --- a/execution_engine_testing/test_support/src/internal/upgrade_request_builder.rs +++ /dev/null @@ -1,116 +0,0 @@ -use std::collections::BTreeMap; - -use num_rational::Ratio; - -use casper_execution_engine::{ - core::engine_state::UpgradeConfig, - shared::{ - newtypes::Blake2bHash, stored_value::StoredValue, system_config::SystemConfig, - wasm_config::WasmConfig, - }, -}; -use casper_types::{EraId, Key, ProtocolVersion}; - -#[derive(Default)] -pub struct UpgradeRequestBuilder { - pre_state_hash: Blake2bHash, - current_protocol_version: ProtocolVersion, - new_protocol_version: ProtocolVersion, - new_wasm_config: Option, - new_system_config: Option, - activation_point: Option, - new_validator_slots: Option, - new_auction_delay: Option, - new_locked_funds_period_millis: Option, - new_round_seigniorage_rate: Option>, - new_unbonding_delay: Option, - global_state_update: BTreeMap, -} - -impl UpgradeRequestBuilder { - pub fn new() -> Self { - Default::default() - } - - pub fn with_pre_state_hash(mut self, pre_state_hash: Blake2bHash) -> Self { - self.pre_state_hash = pre_state_hash; - self - } - - pub fn with_current_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { - self.current_protocol_version = protocol_version; - self - } - - pub fn with_new_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { - self.new_protocol_version = protocol_version; - self - } - - pub fn with_new_validator_slots(mut self, new_validator_slots: u32) -> Self { - self.new_validator_slots = Some(new_validator_slots); - self - } - - pub fn with_new_wasm_config(mut self, opcode_costs: WasmConfig) -> Self { - self.new_wasm_config = Some(opcode_costs); - self - } - pub fn with_new_auction_delay(mut self, new_auction_delay: u64) -> Self { - self.new_auction_delay = Some(new_auction_delay); - self - } - - pub fn with_new_locked_funds_period_millis( - mut self, - new_locked_funds_period_millis: u64, - ) -> Self { - self.new_locked_funds_period_millis = Some(new_locked_funds_period_millis); - self - } - - pub fn with_new_round_seigniorage_rate(mut self, rate: Ratio) -> Self { - self.new_round_seigniorage_rate = Some(rate); - self - } - - pub fn with_new_unbonding_delay(mut self, unbonding_delay: u64) -> Self { - self.new_unbonding_delay = Some(unbonding_delay); - self - } - - pub fn with_new_system_config(mut self, new_system_config: SystemConfig) -> Self { - self.new_system_config = Some(new_system_config); - self - } - - pub fn with_global_state_update( - mut self, - global_state_update: BTreeMap, - ) -> Self { - self.global_state_update = global_state_update; - self - } - - pub fn with_activation_point(mut self, activation_point: EraId) -> Self { - self.activation_point = Some(activation_point); - self - } - - pub fn build(self) -> UpgradeConfig { - UpgradeConfig::new( - self.pre_state_hash, - self.current_protocol_version, - self.new_protocol_version, - self.new_wasm_config, - self.new_system_config, - self.activation_point, - self.new_validator_slots, - self.new_auction_delay, - self.new_locked_funds_period_millis, - self.new_round_seigniorage_rate, - self.new_unbonding_delay, - self.global_state_update, - ) - } -} diff --git a/execution_engine_testing/test_support/src/internal/utils.rs b/execution_engine_testing/test_support/src/internal/utils.rs deleted file mode 100644 index 8fa2e7c4b0..0000000000 --- a/execution_engine_testing/test_support/src/internal/utils.rs +++ /dev/null @@ -1,223 +0,0 @@ -use std::{ - env, fs, - path::{Path, PathBuf}, - rc::Rc, -}; - -use once_cell::sync::Lazy; - -use casper_execution_engine::{ - core::engine_state::{ - execution_result::ExecutionResult, - genesis::{ExecConfig, GenesisAccount, GenesisConfig}, - run_genesis_request::RunGenesisRequest, - Error, - }, - shared::{ - account::Account, additive_map::AdditiveMap, gas::Gas, stored_value::StoredValue, - transform::Transform, - }, -}; -use casper_types::Key; - -use super::{DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY}; -use crate::internal::{ - DEFAULT_AUCTION_DELAY, DEFAULT_CHAIN_NAME, DEFAULT_GENESIS_CONFIG_HASH, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, - DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, -}; - -static RUST_WORKSPACE_PATH: Lazy = Lazy::new(|| { - let path = Path::new(env!("CARGO_MANIFEST_DIR")) - .parent() - .and_then(Path::parent) - .expect("CARGO_MANIFEST_DIR should have parent"); - assert!( - path.exists(), - "Workspace path {} does not exists", - path.display() - ); - path.to_path_buf() -}); -// The location of compiled Wasm files if compiled from the Rust sources within the casper-node -// repo, i.e. 'casper-node/target/wasm32-unknown-unknown/release/'. -static RUST_WORKSPACE_WASM_PATH: Lazy = Lazy::new(|| { - let path = RUST_WORKSPACE_PATH - .join("target") - .join("wasm32-unknown-unknown") - .join("release"); - assert!( - path.exists() || RUST_TOOL_WASM_PATH.exists(), - "Rust Wasm path {} does not exists", - path.display() - ); - path -}); -// The location of compiled Wasm files if running from within the 'tests' crate generated by the -// cargo-casper tool, i.e. 'wasm/'. -static RUST_TOOL_WASM_PATH: Lazy = Lazy::new(|| { - env::current_dir() - .expect("should get current working dir") - .join("wasm") -}); -// The location of compiled Wasm files if compiled from the Rust sources within the casper-node -// repo where `CARGO_TARGET_DIR` is set, i.e. -// '/wasm32-unknown-unknown/release/'. -static MAYBE_CARGO_TARGET_DIR_WASM_PATH: Lazy> = Lazy::new(|| { - let maybe_target = std::env::var("CARGO_TARGET_DIR").ok(); - maybe_target.as_ref().map(|path| { - Path::new(path) - .join("wasm32-unknown-unknown") - .join("release") - }) -}); -// The location of compiled Wasm files if compiled from the Rust sources within the casper-node -// repo, i.e. 'casper-node/target/wasm32-unknown-unknown/release/'. -#[cfg(feature = "use-as-wasm")] -static ASSEMBLY_SCRIPT_WORKSPACE_WASM_PATH: Lazy = Lazy::new(|| { - let path = RUST_WORKSPACE_PATH.join("target_as"); - - assert!( - path.exists(), - "AssemblyScript WASM path {} does not exist.", - path.display() - ); - path -}); -static WASM_PATHS: Lazy> = Lazy::new(get_compiled_wasm_paths); - -/// Constructs a list of paths that should be considered while looking for a compiled wasm file. -fn get_compiled_wasm_paths() -> Vec { - let mut ret = vec![ - // Contracts compiled with typescript are tried first - #[cfg(feature = "use-as-wasm")] - ASSEMBLY_SCRIPT_WORKSPACE_WASM_PATH.clone(), - RUST_WORKSPACE_WASM_PATH.clone(), - RUST_TOOL_WASM_PATH.clone(), - ]; - if let Some(cargo_target_dir_wasm_path) = &*MAYBE_CARGO_TARGET_DIR_WASM_PATH { - ret.push(cargo_target_dir_wasm_path.clone()); - }; - ret -} - -/// Reads a given compiled contract file based on path -pub fn read_wasm_file_bytes>(contract_file: T) -> Vec { - let mut attempted_paths = vec![]; - - if contract_file.as_ref().is_relative() { - // Find first path to a given file found in a list of paths - for wasm_path in WASM_PATHS.iter() { - let mut filename = wasm_path.clone(); - filename.push(contract_file.as_ref()); - if let Ok(wasm_bytes) = fs::read(&filename) { - return wasm_bytes; - } - attempted_paths.push(filename); - } - } - // Try just opening in case the arg is a valid path relative to current working dir, or is a - // valid absolute path. - if let Ok(wasm_bytes) = fs::read(contract_file.as_ref()) { - return wasm_bytes; - } - attempted_paths.push(contract_file.as_ref().to_owned()); - - let mut error_msg = - "\nFailed to open compiled Wasm file. Tried the following locations:\n".to_string(); - for attempted_path in attempted_paths { - error_msg = format!("{} - {}\n", error_msg, attempted_path.display()); - } - - panic!("{}\n", error_msg); -} - -pub fn create_exec_config(accounts: Vec) -> ExecConfig { - let wasm_config = *DEFAULT_WASM_CONFIG; - let system_config = *DEFAULT_SYSTEM_CONFIG; - let validator_slots = DEFAULT_VALIDATOR_SLOTS; - let auction_delay = DEFAULT_AUCTION_DELAY; - let locked_funds_period_millis = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE; - let unbonding_delay = DEFAULT_UNBONDING_DELAY; - let genesis_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; - ExecConfig::new( - accounts, - wasm_config, - system_config, - validator_slots, - auction_delay, - locked_funds_period_millis, - round_seigniorage_rate, - unbonding_delay, - genesis_timestamp_millis, - ) -} - -pub fn create_genesis_config(accounts: Vec) -> GenesisConfig { - let name = DEFAULT_CHAIN_NAME.to_string(); - let timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS; - let protocol_version = *DEFAULT_PROTOCOL_VERSION; - let exec_config = create_exec_config(accounts); - - GenesisConfig::new(name, timestamp, protocol_version, exec_config) -} - -pub fn create_run_genesis_request(accounts: Vec) -> RunGenesisRequest { - let exec_config = create_exec_config(accounts); - RunGenesisRequest::new( - *DEFAULT_GENESIS_CONFIG_HASH, - *DEFAULT_PROTOCOL_VERSION, - exec_config, - ) -} - -pub fn get_exec_costs, I: IntoIterator>( - exec_response: I, -) -> Vec { - exec_response - .into_iter() - .map(|res| res.as_ref().cost()) - .collect() -} - -pub fn get_success_result(response: &[Rc]) -> &ExecutionResult { - &*response.get(0).expect("should have a result") -} - -pub fn get_precondition_failure(response: &[Rc]) -> &Error { - let result = response.get(0).expect("should have a result"); - assert!( - result.has_precondition_failure(), - "should be a precondition failure" - ); - result.as_error().expect("should have an error") -} - -pub fn get_error_message, I: IntoIterator>( - execution_result: I, -) -> String { - let errors = execution_result - .into_iter() - .enumerate() - .filter_map(|(i, result)| { - if let ExecutionResult::Failure { error, .. } = result.as_ref() { - Some(format!("{}: {:?}", i, error)) - } else { - None - } - }) - .collect::>(); - errors.join("\n") -} - -#[allow(clippy::implicit_hasher)] -pub fn get_account(transforms: &AdditiveMap, account: &Key) -> Option { - transforms.get(account).and_then(|transform| { - if let Transform::Write(StoredValue::Account(account)) = transform { - Some(account.to_owned()) - } else { - None - } - }) -} diff --git a/execution_engine_testing/test_support/src/internal/wasm_test_builder.rs b/execution_engine_testing/test_support/src/internal/wasm_test_builder.rs deleted file mode 100644 index 860ebb4cb6..0000000000 --- a/execution_engine_testing/test_support/src/internal/wasm_test_builder.rs +++ /dev/null @@ -1,957 +0,0 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - ffi::OsStr, - fs, - ops::Deref, - path::PathBuf, - rc::Rc, - sync::Arc, -}; - -use lmdb::DatabaseFlags; -use log::LevelFilter; - -use bytesrepr::FromBytes; -use casper_execution_engine::{ - core::{ - engine_state, - engine_state::{ - era_validators::GetEraValidatorsRequest, - execute_request::ExecuteRequest, - execution_result::ExecutionResult, - run_genesis_request::RunGenesisRequest, - step::{StepRequest, StepResult}, - BalanceResult, EngineConfig, EngineState, GenesisResult, GetBidsRequest, QueryRequest, - QueryResult, UpgradeConfig, UpgradeResult, - }, - execution, - }, - shared::{ - account::Account, - additive_map::AdditiveMap, - gas::Gas, - logging::{self, Settings, Style}, - newtypes::{Blake2bHash, CorrelationId}, - stored_value::StoredValue, - transform::Transform, - utils::OS_PAGE_SIZE, - }, - storage::{ - global_state::{ - in_memory::InMemoryGlobalState, lmdb::LmdbGlobalState, CommitResult, StateProvider, - StateReader, - }, - protocol_data_store::lmdb::LmdbProtocolDataStore, - transaction_source::lmdb::LmdbEnvironment, - trie::merkle_proof::TrieMerkleProof, - trie_store::lmdb::LmdbTrieStore, - }, -}; -use casper_types::{ - account::AccountHash, - bytesrepr::{self}, - runtime_args, - system::{ - auction::{ - Bids, EraValidators, SeigniorageRecipientsSnapshot, UnbondingPurses, ValidatorWeights, - ARG_ERA_END_TIMESTAMP_MILLIS, ARG_EVICTED_VALIDATORS, AUCTION_DELAY_KEY, ERA_ID_KEY, - METHOD_RUN_AUCTION, - }, - mint::TOTAL_SUPPLY_KEY, - }, - CLTyped, CLValue, Contract, ContractHash, ContractPackage, ContractPackageHash, ContractWasm, - DeployHash, DeployInfo, EraId, Key, KeyTag, PublicKey, RuntimeArgs, Transfer, TransferAddr, - URef, U512, -}; - -use crate::internal::{ - utils, ExecuteRequestBuilder, DEFAULT_PROPOSER_ADDR, DEFAULT_PROTOCOL_VERSION, SYSTEM_ADDR, -}; - -/// LMDB initial map size is calculated based on DEFAULT_LMDB_PAGES and systems page size. -/// -/// This default value should give 50MiB initial map size by default. -const DEFAULT_LMDB_PAGES: usize = 128_000; - -/// LDMB max readers -/// -/// The default value is chosen to be the same as the node itself. -const DEFAULT_MAX_READERS: u32 = 512; - -/// This is appended to the data dir path provided to the `LmdbWasmTestBuilder`". -const GLOBAL_STATE_DIR: &str = "global_state"; - -pub type InMemoryWasmTestBuilder = WasmTestBuilder; -pub type LmdbWasmTestBuilder = WasmTestBuilder; - -/// Builder for simple WASM test -pub struct WasmTestBuilder { - /// [`EngineState`] is wrapped in [`Rc`] to work around a missing [`Clone`] implementation - engine_state: Rc>, - /// [`ExecutionResult`] is wrapped in [`Rc`] to work around a missing [`Clone`] implementation - exec_results: Vec>>, - upgrade_results: Vec>, - genesis_hash: Option, - post_state_hash: Option, - /// Cached transform maps after subsequent successful runs i.e. `transforms[0]` is for first - /// exec call etc. - transforms: Vec>, - /// Cached genesis transforms - genesis_account: Option, - /// Genesis transforms - genesis_transforms: Option>, - /// Mint contract key - mint_contract_hash: Option, - /// Handle payment contract key - handle_payment_contract_hash: Option, - /// Standard payment contract key - standard_payment_hash: Option, - /// Auction contract key - auction_contract_hash: Option, -} - -impl WasmTestBuilder { - fn initialize_logging() { - let log_settings = Settings::new(LevelFilter::Error).with_style(Style::HumanReadable); - let _ = logging::initialize(log_settings); - } -} - -impl Default for InMemoryWasmTestBuilder { - fn default() -> Self { - Self::initialize_logging(); - let engine_config = EngineConfig::new(); - - let global_state = InMemoryGlobalState::empty().expect("should create global state"); - let engine_state = EngineState::new(global_state, engine_config); - - WasmTestBuilder { - engine_state: Rc::new(engine_state), - exec_results: Vec::new(), - upgrade_results: Vec::new(), - genesis_hash: None, - post_state_hash: None, - transforms: Vec::new(), - genesis_account: None, - genesis_transforms: None, - mint_contract_hash: None, - handle_payment_contract_hash: None, - standard_payment_hash: None, - auction_contract_hash: None, - } - } -} - -// TODO: Deriving `Clone` for `WasmTestBuilder` doesn't work correctly (unsure why), so -// implemented by hand here. Try to derive in the future with a different compiler version. -impl Clone for WasmTestBuilder { - fn clone(&self) -> Self { - WasmTestBuilder { - engine_state: Rc::clone(&self.engine_state), - exec_results: self.exec_results.clone(), - upgrade_results: self.upgrade_results.clone(), - genesis_hash: self.genesis_hash, - post_state_hash: self.post_state_hash, - transforms: self.transforms.clone(), - genesis_account: self.genesis_account.clone(), - genesis_transforms: self.genesis_transforms.clone(), - mint_contract_hash: self.mint_contract_hash, - handle_payment_contract_hash: self.handle_payment_contract_hash, - standard_payment_hash: self.standard_payment_hash, - auction_contract_hash: self.auction_contract_hash, - } - } -} - -/// A wrapper type to disambiguate builder from an actual result -#[derive(Clone)] -pub struct WasmTestResult(WasmTestBuilder); - -impl WasmTestResult { - /// Access the builder - pub fn builder(&self) -> &WasmTestBuilder { - &self.0 - } -} - -impl InMemoryWasmTestBuilder { - pub fn new( - global_state: InMemoryGlobalState, - engine_config: EngineConfig, - post_state_hash: Blake2bHash, - ) -> Self { - Self::initialize_logging(); - let engine_state = EngineState::new(global_state, engine_config); - WasmTestBuilder { - engine_state: Rc::new(engine_state), - genesis_hash: Some(post_state_hash), - post_state_hash: Some(post_state_hash), - ..Default::default() - } - } -} - -impl LmdbWasmTestBuilder { - pub fn new_with_config + ?Sized>( - data_dir: &T, - engine_config: EngineConfig, - ) -> Self { - Self::initialize_logging(); - let page_size = *OS_PAGE_SIZE; - let global_state_dir = Self::create_and_get_global_state_dir(data_dir); - let environment = Arc::new( - LmdbEnvironment::new( - &global_state_dir, - page_size * DEFAULT_LMDB_PAGES, - DEFAULT_MAX_READERS, - ) - .expect("should create LmdbEnvironment"), - ); - let trie_store = Arc::new( - LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()) - .expect("should create LmdbTrieStore"), - ); - let protocol_data_store = Arc::new( - LmdbProtocolDataStore::new(&environment, None, DatabaseFlags::empty()) - .expect("should create LmdbProtocolDataStore"), - ); - let global_state = LmdbGlobalState::empty(environment, trie_store, protocol_data_store) - .expect("should create LmdbGlobalState"); - let engine_state = EngineState::new(global_state, engine_config); - WasmTestBuilder { - engine_state: Rc::new(engine_state), - exec_results: Vec::new(), - upgrade_results: Vec::new(), - genesis_hash: None, - post_state_hash: None, - transforms: Vec::new(), - genesis_account: None, - genesis_transforms: None, - mint_contract_hash: None, - handle_payment_contract_hash: None, - standard_payment_hash: None, - auction_contract_hash: None, - } - } - - pub fn new + ?Sized>(data_dir: &T) -> Self { - Self::new_with_config(data_dir, Default::default()) - } - - /// Creates new instance of builder and applies values only which allows the engine state to be - /// swapped with a new one, possibly after running genesis once and reusing existing database - /// (i.e. LMDB). - pub fn new_with_config_and_result + ?Sized>( - data_dir: &T, - engine_config: EngineConfig, - result: &WasmTestResult, - ) -> Self { - let mut builder = Self::new_with_config(data_dir, engine_config); - // Applies existing properties from gi - builder.genesis_hash = result.0.genesis_hash; - builder.post_state_hash = result.0.post_state_hash; - builder.mint_contract_hash = result.0.mint_contract_hash; - builder.handle_payment_contract_hash = result.0.handle_payment_contract_hash; - builder - } - - /// Creates a new instance of builder using the supplied configurations, opening wrapped LMDBs - /// (e.g. in the Trie and Data stores) rather than creating them. - pub fn open + ?Sized>( - data_dir: &T, - engine_config: EngineConfig, - post_state_hash: Blake2bHash, - ) -> Self { - Self::initialize_logging(); - let page_size = *OS_PAGE_SIZE; - let global_state_dir = Self::create_and_get_global_state_dir(data_dir); - let environment = Arc::new( - LmdbEnvironment::new( - &global_state_dir, - page_size * DEFAULT_LMDB_PAGES, - DEFAULT_MAX_READERS, - ) - .expect("should create LmdbEnvironment"), - ); - let trie_store = - Arc::new(LmdbTrieStore::open(&environment, None).expect("should open LmdbTrieStore")); - let protocol_data_store = Arc::new( - LmdbProtocolDataStore::open(&environment, None) - .expect("should open LmdbProtocolDataStore"), - ); - let global_state = LmdbGlobalState::empty(environment, trie_store, protocol_data_store) - .expect("should create LmdbGlobalState"); - let engine_state = EngineState::new(global_state, engine_config); - WasmTestBuilder { - engine_state: Rc::new(engine_state), - exec_results: Vec::new(), - upgrade_results: Vec::new(), - genesis_hash: None, - post_state_hash: Some(post_state_hash), - transforms: Vec::new(), - genesis_account: None, - genesis_transforms: None, - mint_contract_hash: None, - handle_payment_contract_hash: None, - standard_payment_hash: None, - auction_contract_hash: None, - } - } - - fn create_and_get_global_state_dir + ?Sized>(data_dir: &T) -> PathBuf { - let global_state_path = { - let mut path = PathBuf::from(data_dir); - path.push(GLOBAL_STATE_DIR); - path - }; - fs::create_dir_all(&global_state_path) - .unwrap_or_else(|_| panic!("Expected to create {}", global_state_path.display())); - global_state_path - } -} - -impl WasmTestBuilder -where - S: StateProvider, - engine_state::Error: From, - S::Error: Into, -{ - /// Carries on attributes from TestResult for further executions - pub fn from_result(result: WasmTestResult) -> Self { - WasmTestBuilder { - engine_state: result.0.engine_state, - exec_results: Vec::new(), - upgrade_results: Vec::new(), - genesis_hash: result.0.genesis_hash, - post_state_hash: result.0.post_state_hash, - transforms: Vec::new(), - genesis_account: result.0.genesis_account, - mint_contract_hash: result.0.mint_contract_hash, - handle_payment_contract_hash: result.0.handle_payment_contract_hash, - standard_payment_hash: result.0.standard_payment_hash, - auction_contract_hash: result.0.auction_contract_hash, - genesis_transforms: result.0.genesis_transforms, - } - } - - pub fn run_genesis(&mut self, run_genesis_request: &RunGenesisRequest) -> &mut Self { - let system_account = Key::Account(PublicKey::System.to_account_hash()); - - let genesis_result = self - .engine_state - .commit_genesis( - CorrelationId::new(), - run_genesis_request.genesis_config_hash(), - run_genesis_request.protocol_version(), - run_genesis_request.ee_config(), - ) - .expect("Unable to get genesis response"); - - if let GenesisResult::Success { - post_state_hash, - effect, - } = genesis_result - { - let state_root_hash = post_state_hash; - - let transforms = effect.transforms; - - let genesis_account = utils::get_account(&transforms, &system_account) - .expect("Unable to get system account"); - - let maybe_protocol_data = self - .engine_state - .get_protocol_data(run_genesis_request.protocol_version()) - .expect("should read protocol data"); - let protocol_data = maybe_protocol_data.expect("should have protocol data stored"); - - self.genesis_hash = Some(state_root_hash); - self.post_state_hash = Some(state_root_hash); - self.mint_contract_hash = Some(protocol_data.mint()); - self.handle_payment_contract_hash = Some(protocol_data.handle_payment()); - self.standard_payment_hash = Some(protocol_data.standard_payment()); - self.auction_contract_hash = Some(protocol_data.auction()); - self.genesis_account = Some(genesis_account); - self.genesis_transforms = Some(transforms); - return self; - } - - panic!("genesis failure: {:?}", genesis_result); - } - - pub fn query( - &self, - maybe_post_state: Option, - base_key: Key, - path: &[String], - ) -> Result { - let post_state = maybe_post_state - .or(self.post_state_hash) - .expect("builder must have a post-state hash"); - - let query_request = QueryRequest::new(post_state, base_key, path.to_vec()); - - let query_result = self - .engine_state - .run_query(CorrelationId::new(), query_request) - .expect("should get query response"); - - if let QueryResult::Success { value, .. } = query_result { - return Ok(value.deref().clone()); - } - - Err(format!("{:?}", query_result)) - } - - pub fn query_with_proof( - &self, - maybe_post_state: Option, - base_key: Key, - path: &[String], - ) -> Result<(StoredValue, Vec>), String> { - let post_state = maybe_post_state - .or(self.post_state_hash) - .expect("builder must have a post-state hash"); - - let path_vec: Vec = path.to_vec(); - - let query_request = QueryRequest::new(post_state, base_key, path_vec); - - let query_result = self - .engine_state - .run_query(CorrelationId::new(), query_request) - .expect("should get query response"); - - if let QueryResult::Success { value, proofs } = query_result { - return Ok((value.deref().clone(), proofs)); - } - - panic! {query_result}; - } - - pub fn total_supply(&self, maybe_post_state: Option) -> U512 { - let mint_key: Key = self - .mint_contract_hash - .expect("should have mint_contract_hash") - .into(); - - let result = self.query(maybe_post_state, mint_key, &[TOTAL_SUPPLY_KEY.to_string()]); - - let total_supply: U512 = if let Ok(StoredValue::CLValue(total_supply)) = result { - total_supply.into_t().expect("total supply should be U512") - } else { - panic!("mint should track total supply"); - }; - - total_supply - } - - pub fn exec(&mut self, mut exec_request: ExecuteRequest) -> &mut Self { - let exec_request = { - let hash = self - .post_state_hash - .clone() - .expect("expected post_state_hash"); - exec_request.parent_state_hash = hash; - exec_request - }; - let maybe_exec_results = self - .engine_state - .run_execute(CorrelationId::new(), exec_request); - assert!(maybe_exec_results.is_ok()); - // Parse deploy results - let execution_results = maybe_exec_results.as_ref().unwrap(); - // Cache transformations - self.transforms.extend( - execution_results - .iter() - .map(|res| res.effect().transforms.clone()), - ); - self.exec_results.push( - maybe_exec_results - .unwrap() - .into_iter() - .map(Rc::new) - .collect(), - ); - self - } - - /// Commit effects of previous exec call on the latest post-state hash. - pub fn commit(&mut self) -> &mut Self { - let prestate_hash = self - .post_state_hash - .clone() - .expect("Should have genesis hash"); - - let effects = self.transforms.last().cloned().unwrap_or_default(); - - self.commit_effects(prestate_hash, effects) - } - - /// Applies effects to global state. - pub fn commit_transforms( - &self, - pre_state_hash: Blake2bHash, - effects: AdditiveMap, - ) -> CommitResult { - self.engine_state - .apply_effect(CorrelationId::new(), pre_state_hash, effects) - .expect("should commit") - } - - /// Runs a commit request, expects a successful response, and - /// overwrites existing cached post state hash with a new one. - pub fn commit_effects( - &mut self, - prestate_hash: Blake2bHash, - effects: AdditiveMap, - ) -> &mut Self { - let commit_result = self.commit_transforms(prestate_hash, effects); - - if let CommitResult::Success { state_root } = commit_result { - self.post_state_hash = Some(state_root); - return self; - } - panic!( - "Expected commit success but received a failure instead: {:?}", - commit_result - ); - } - - pub fn upgrade_with_upgrade_request( - &mut self, - upgrade_config: &mut UpgradeConfig, - ) -> &mut Self { - let pre_state_hash = self.post_state_hash.expect("should have state hash"); - upgrade_config.with_pre_state_hash(pre_state_hash); - - let result = self - .engine_state - .commit_upgrade(CorrelationId::new(), upgrade_config.clone()); - - if let Ok(UpgradeResult::Success { - post_state_hash, .. - }) = result - { - self.post_state_hash = Some(post_state_hash); - } - - self.upgrade_results.push(result); - self - } - - pub fn run_auction( - &mut self, - era_end_timestamp_millis: u64, - evicted_validators: Vec, - ) -> &mut Self { - let auction = self.get_auction_contract_hash(); - let run_request = ExecuteRequestBuilder::contract_call_by_hash( - *SYSTEM_ADDR, - auction, - METHOD_RUN_AUCTION, - runtime_args! { - ARG_ERA_END_TIMESTAMP_MILLIS => era_end_timestamp_millis, - ARG_EVICTED_VALIDATORS => evicted_validators, - }, - ) - .build(); - self.exec(run_request).commit().expect_success() - } - - pub fn step(&mut self, step_request: StepRequest) -> &mut Self { - let result = self - .engine_state - .commit_step(CorrelationId::new(), step_request) - .expect("should step"); - - if let StepResult::Success { - post_state_hash, .. - } = result - { - self.post_state_hash = Some(post_state_hash); - self - } else { - panic!( - "Expected successful step result, but instead got error: {:?}", - result, - ) - } - } - - /// Expects a successful run and caches transformations - pub fn expect_success(&mut self) -> &mut Self { - // Check first result, as only first result is interesting for a simple test - let exec_results = self - .exec_results - .last() - .expect("Expected to be called after run()"); - let exec_result = exec_results - .get(0) - .expect("Unable to get first deploy result"); - - if exec_result.is_failure() { - panic!( - "Expected successful execution result, but instead got: {:?}", - exec_results, - ); - } - self - } - - pub fn is_error(&self) -> bool { - let exec_results = self - .exec_results - .last() - .expect("Expected to be called after run()"); - let exec_result = exec_results - .get(0) - .expect("Unable to get first execution result"); - exec_result.is_failure() - } - - /// Gets the transform map that's cached between runs - pub fn get_transforms(&self) -> Vec> { - self.transforms.clone() - } - - /// Gets genesis account (if present) - pub fn get_genesis_account(&self) -> &Account { - self.genesis_account - .as_ref() - .expect("Unable to obtain genesis account. Please run genesis first.") - } - - pub fn get_mint_contract_hash(&self) -> ContractHash { - self.mint_contract_hash - .expect("Unable to obtain mint contract. Please run genesis first.") - } - - pub fn get_handle_payment_contract_hash(&self) -> ContractHash { - self.handle_payment_contract_hash - .expect("Unable to obtain handle payment contract. Please run genesis first.") - } - - pub fn get_standard_payment_contract_hash(&self) -> ContractHash { - self.standard_payment_hash - .expect("Unable to obtain standard payment contract. Please run genesis first.") - } - - pub fn get_auction_contract_hash(&self) -> ContractHash { - self.auction_contract_hash - .expect("Unable to obtain auction contract. Please run genesis first.") - } - - pub fn get_genesis_transforms(&self) -> &AdditiveMap { - &self - .genesis_transforms - .as_ref() - .expect("should have genesis transforms") - } - - pub fn get_genesis_hash(&self) -> Blake2bHash { - self.genesis_hash - .clone() - .expect("Genesis hash should be present. Should be called after run_genesis.") - } - - pub fn get_post_state_hash(&self) -> Blake2bHash { - self.post_state_hash - .clone() - .expect("Should have post-state hash.") - } - - pub fn get_engine_state(&self) -> &EngineState { - &self.engine_state - } - - pub fn get_exec_results(&self) -> &Vec>> { - &self.exec_results - } - - pub fn get_exec_result(&self, index: usize) -> Option<&Vec>> { - self.exec_results.get(index) - } - - pub fn get_exec_results_count(&self) -> usize { - self.exec_results.len() - } - - pub fn get_upgrade_result( - &self, - index: usize, - ) -> Option<&Result> { - self.upgrade_results.get(index) - } - - pub fn expect_upgrade_success(&mut self) -> &mut Self { - // Check first result, as only first result is interesting for a simple test - let result = self - .upgrade_results - .last() - .expect("Expected to be called after a system upgrade.") - .as_ref(); - - result.unwrap_or_else(|_| panic!("Expected success, got: {:?}", result)); - - self - } - - pub fn finish(&self) -> WasmTestResult { - WasmTestResult(self.clone()) - } - - pub fn get_handle_payment_contract(&self) -> Contract { - let handle_payment_contract: Key = self - .handle_payment_contract_hash - .expect("should have handle payment contract uref") - .into(); - self.query(None, handle_payment_contract, &[]) - .and_then(|v| v.try_into().map_err(|error| format!("{:?}", error))) - .expect("should find handle payment URef") - } - - pub fn get_purse_balance(&self, purse: URef) -> U512 { - let base_key = Key::Balance(purse.addr()); - self.query(None, base_key, &[]) - .and_then(|v| CLValue::try_from(v).map_err(|error| format!("{:?}", error))) - .and_then(|cl_value| cl_value.into_t().map_err(|error| format!("{:?}", error))) - .expect("should parse balance into a U512") - } - - pub fn get_purse_balance_result(&self, purse: URef) -> BalanceResult { - let correlation_id = CorrelationId::new(); - let state_root_hash: Blake2bHash = - self.post_state_hash.expect("should have post_state_hash"); - self.engine_state - .get_purse_balance(correlation_id, state_root_hash, purse) - .expect("should get purse balance") - } - - pub fn get_proposer_purse_balance(&self) -> U512 { - let proposer_account = self - .get_account(*DEFAULT_PROPOSER_ADDR) - .expect("proposer account should exist"); - self.get_purse_balance(proposer_account.main_purse()) - } - - pub fn get_account(&self, account_hash: AccountHash) -> Option { - match self.query(None, Key::Account(account_hash), &[]) { - Ok(account_value) => match account_value { - StoredValue::Account(account) => Some(account), - _ => None, - }, - Err(_) => None, - } - } - - pub fn get_contract(&self, contract_hash: ContractHash) -> Option { - let contract_value: StoredValue = self - .query(None, contract_hash.into(), &[]) - .expect("should have contract value"); - - if let StoredValue::Contract(contract) = contract_value { - Some(contract) - } else { - None - } - } - - pub fn get_contract_wasm(&self, contract_hash: ContractHash) -> Option { - let contract_value: StoredValue = self - .query(None, contract_hash.into(), &[]) - .expect("should have contract value"); - - if let StoredValue::ContractWasm(contract_wasm) = contract_value { - Some(contract_wasm) - } else { - None - } - } - - pub fn get_contract_package( - &self, - contract_package_hash: ContractPackageHash, - ) -> Option { - let contract_value: StoredValue = self - .query(None, contract_package_hash.into(), &[]) - .expect("should have package value"); - - if let StoredValue::ContractPackage(package) = contract_value { - Some(package) - } else { - None - } - } - - pub fn get_transfer(&self, transfer: TransferAddr) -> Option { - let transfer_value: StoredValue = self - .query(None, Key::Transfer(transfer), &[]) - .expect("should have transfer value"); - - if let StoredValue::Transfer(transfer) = transfer_value { - Some(transfer) - } else { - None - } - } - - pub fn get_deploy_info(&self, deploy_hash: DeployHash) -> Option { - let deploy_info_value: StoredValue = self - .query(None, Key::DeployInfo(deploy_hash), &[]) - .expect("should have deploy info value"); - - if let StoredValue::DeployInfo(deploy_info) = deploy_info_value { - Some(deploy_info) - } else { - None - } - } - - pub fn exec_costs(&self, index: usize) -> Vec { - let exec_results = self - .get_exec_result(index) - .expect("should have exec response"); - utils::get_exec_costs(exec_results) - } - - pub fn last_exec_gas_cost(&self) -> Gas { - let exec_results = self - .exec_results - .last() - .expect("Expected to be called after run()"); - let exec_result = exec_results.get(0).expect("should have result"); - exec_result.cost() - } - - pub fn exec_error_message(&self, index: usize) -> Option { - let response = self.get_exec_result(index)?; - Some(utils::get_error_message(response)) - } - - pub fn exec_commit_finish(&mut self, execute_request: ExecuteRequest) -> WasmTestResult { - self.exec(execute_request) - .expect_success() - .commit() - .finish() - } - - pub fn get_era_validators(&mut self) -> EraValidators { - let correlation_id = CorrelationId::new(); - let state_hash = self.get_post_state_hash(); - let request = GetEraValidatorsRequest::new(state_hash, *DEFAULT_PROTOCOL_VERSION); - self.engine_state - .get_era_validators(correlation_id, request) - .expect("get era validators should not error") - } - - pub fn get_validator_weights(&mut self, era_id: EraId) -> Option { - let mut result = self.get_era_validators(); - result.remove(&era_id) - } - - pub fn get_bids(&mut self) -> Bids { - let get_bids_request = GetBidsRequest::new(self.get_post_state_hash()); - - let get_bids_result = self - .engine_state - .get_bids(CorrelationId::new(), get_bids_request) - .unwrap(); - - get_bids_result.bids().cloned().unwrap() - } - - pub fn get_withdraws(&mut self) -> UnbondingPurses { - let correlation_id = CorrelationId::new(); - let state_root_hash = self.get_post_state_hash(); - - let tracking_copy = self - .engine_state - .tracking_copy(state_root_hash) - .unwrap() - .unwrap(); - - let reader = tracking_copy.reader(); - - let withdraws_keys = reader - .keys_with_prefix(correlation_id, &[KeyTag::Withdraw as u8]) - .unwrap_or_default(); - - let mut ret = BTreeMap::new(); - - for key in withdraws_keys.into_iter() { - let read_result = reader.read(correlation_id, &key); - if let ( - Key::Withdraw(account_hash), - Ok(Some(StoredValue::Withdraw(unbonding_purses))), - ) = (key, read_result) - { - ret.insert(account_hash, unbonding_purses); - } - } - - ret - } - - pub fn get_seigniorage_recipients_snapshot(&mut self) -> SeigniorageRecipientsSnapshot { - let correlation_id = CorrelationId::new(); - let state_root_hash = self.get_post_state_hash(); - - let tracking_copy = self - .engine_state - .tracking_copy(state_root_hash) - .unwrap() - .unwrap(); - - let reader = tracking_copy.reader(); - - let era_ids = reader - .keys_with_prefix(correlation_id, &[KeyTag::EraValidators as u8]) - .unwrap_or_default(); - - let mut ret = BTreeMap::new(); - - for era_id in era_ids.into_iter() { - let read_result = reader.read(correlation_id, &era_id); - if let (Key::EraValidators(era_id), Ok(Some(StoredValue::EraValidators(recipients)))) = - (era_id, read_result) - { - ret.insert(era_id, recipients); - } - } - - ret - } - - pub fn get_value(&mut self, contract_hash: ContractHash, name: &str) -> T - where - T: FromBytes + CLTyped, - { - let contract = self - .get_contract(contract_hash) - .expect("should have contract"); - let key = contract - .named_keys() - .get(name) - .expect("should have named key"); - let stored_value = self.query(None, *key, &[]).expect("should query"); - let cl_value = stored_value - .as_cl_value() - .cloned() - .expect("should be cl value"); - let result: T = cl_value.into_t().expect("should convert"); - result - } - - pub fn get_era(&mut self) -> EraId { - let auction_contract = self.get_auction_contract_hash(); - self.get_value(auction_contract, ERA_ID_KEY) - } - - pub fn get_auction_delay(&mut self) -> u64 { - let auction_contract = self.get_auction_contract_hash(); - self.get_value(auction_contract, AUCTION_DELAY_KEY) - } -} diff --git a/execution_engine_testing/test_support/src/lib.rs b/execution_engine_testing/test_support/src/lib.rs index 849674639f..96106522c7 100644 --- a/execution_engine_testing/test_support/src/lib.rs +++ b/execution_engine_testing/test_support/src/lib.rs @@ -1,98 +1,243 @@ //! A library to support testing of Wasm smart contracts for use on the Casper Platform. -//! -//! # Example -//! Consider a contract held in "contract.wasm" which stores an arbitrary `String` under a `Key` -//! named "special_value": -//! ```no_run -//! use casper_contract::contract_api::{runtime, storage}; -//! use casper_types::Key; -//! const KEY: &str = "special_value"; -//! const ARG_VALUE: &str = "value"; -//! -//! #[no_mangle] -//! pub extern "C" fn call() { -//! let value: String = runtime::get_named_arg(ARG_VALUE); -//! let value_ref = storage::new_uref(value); -//! let value_key: Key = value_ref.into(); -//! runtime::put_key(KEY, value_key); -//! } -//! ``` -//! -//! The test could be written as follows: -//! ```no_run -//! use casper_engine_test_support::{Code, Error, SessionBuilder, TestContextBuilder, Value}; -//! use casper_types::{U512, RuntimeArgs, runtime_args, PublicKey, account::AccountHash, SecretKey}; -//! -//! const MY_ACCOUNT: [u8; 32] = [7u8; 32]; -//! const MY_ADDR: [u8; 32] = [8u8; 32]; -//! const KEY: &str = "special_value"; -//! const VALUE: &str = "hello world"; -//! const ARG_MESSAGE: &str = "message"; -//! -//! let public_key: PublicKey = SecretKey::ed25519_from_bytes(MY_ACCOUNT).unwrap().into(); -//! let account_addr = AccountHash::new(MY_ADDR); -//! -//! let mut context = TestContextBuilder::new() -//! .with_public_key(public_key, U512::from(128_000_000_000_000u64)) -//! .build(); -//! -//! // The test framework checks for compiled Wasm files in '/wasm'. Paths -//! // relative to the current working dir (e.g. 'wasm/contract.wasm') can also be used, as can -//! // absolute paths. -//! let session_code = Code::from("contract.wasm"); -//! let session_args = runtime_args! { -//! ARG_MESSAGE => VALUE, -//! }; -//! let session = SessionBuilder::new(session_code, session_args) -//! .with_address(account_addr) -//! .with_authorization_keys(&[account_addr]) -//! .build(); -//! -//! let result_of_query: Result = context.run(session).query(account_addr, &[KEY.to_string()]); -//! -//! let returned_value = result_of_query.expect("should be a value"); -//! -//! let expected_value = Value::from_t(VALUE.to_string()).expect("should construct Value"); -//! assert_eq!(expected_value, returned_value); -//! ``` - -#![doc(html_root_url = "https://docs.rs/casper-engine-test-support/1.0.0")] + +#![doc(html_root_url = "https://docs.rs/casper-engine-test-support/8.1.1")] #![doc( - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png", + test(attr(deny(warnings))) )] #![warn(missing_docs)] -mod account; -mod code; -mod error; -// This module is not intended to be used by third party crates. -#[doc(hidden)] -pub mod internal; -mod session; -mod test_context; -mod value; - -pub use account::Account; -pub use casper_types::account::AccountHash; -pub use code::Code; -pub use error::{Error, Result}; -pub use session::{Session, SessionBuilder, SessionTransferInfo}; -pub use test_context::{TestContext, TestContextBuilder}; -pub use value::Value; - -/// The address of a [`URef`](casper_types::URef) (unforgeable reference) on the network. -pub type URefAddr = [u8; 32]; - -/// The hash of a smart contract stored on the network, which can be used to reference the contract. -pub type Hash = [u8; 32]; +mod chainspec_config; +pub mod deploy_item; +mod deploy_item_builder; +mod execute_request_builder; +pub mod genesis_config_builder; +mod step_request_builder; +mod transfer_request_builder; +mod upgrade_request_builder; +pub mod utils; +mod wasm_test_builder; -/// Default test account address. -pub use crate::internal::DEFAULT_ACCOUNT_ADDR; +pub(crate) use genesis_config_builder::GenesisConfigBuilder; +use num_rational::Ratio; +use once_cell::sync::Lazy; -/// Default initial balance of a test account in motes. -pub const DEFAULT_ACCOUNT_INITIAL_BALANCE: u64 = 100_000_000_000_000_000u64; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + account::AccountHash, testing::TestRng, ChainspecRegistry, Digest, GenesisAccount, + GenesisConfig, HoldBalanceHandling, Motes, ProtocolVersion, PublicKey, SecretKey, StorageCosts, + SystemConfig, WasmConfig, WasmV1Config, U512, +}; + +pub use chainspec_config::{ChainspecConfig, CHAINSPEC_SYMLINK}; +pub use deploy_item_builder::DeployItemBuilder; +pub use execute_request_builder::{ExecuteRequest, ExecuteRequestBuilder}; +pub use step_request_builder::StepRequestBuilder; +pub use transfer_request_builder::TransferRequestBuilder; +pub use upgrade_request_builder::UpgradeRequestBuilder; +pub use wasm_test_builder::{EntityWithNamedKeys, LmdbWasmTestBuilder, WasmTestBuilder}; + +/// Default number of validator slots. +pub const DEFAULT_VALIDATOR_SLOTS: u32 = 5; +/// Default auction delay. +pub const DEFAULT_AUCTION_DELAY: u64 = 1; +/// Default lock-in period is currently zero. +pub const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 0; +/// Default length of total vesting schedule is currently zero. +pub const DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 0; + +/// Default number of eras that need to pass to be able to withdraw unbonded funds. +pub const DEFAULT_UNBONDING_DELAY: u64 = 7; + +/// Round seigniorage rate represented as a fraction of the total supply. +/// +/// Annual issuance: 8% +/// Minimum round length: 2^14 ms +/// Ticks per year: 31536000000 +/// +/// (1+0.08)^((2^14)/31536000000)-1 is expressed as a fractional number below. +pub const DEFAULT_ROUND_SEIGNIORAGE_RATE: Ratio = Ratio::new_raw(1, 4200000000000000000); + +/// Default chain name. +pub const DEFAULT_CHAIN_NAME: &str = "casper-execution-engine-testing"; +/// Default genesis timestamp in milliseconds. +pub const DEFAULT_GENESIS_TIMESTAMP_MILLIS: u64 = 0; +/// Default block time. +pub const DEFAULT_BLOCK_TIME: u64 = 0; +/// Default gas price. +pub const DEFAULT_GAS_PRICE: u8 = 1; +/// Amount named argument. +pub const ARG_AMOUNT: &str = "amount"; +/// Timestamp increment in milliseconds. +pub const TIMESTAMP_MILLIS_INCREMENT: u64 = 30_000; // 30 seconds +/// Default gas hold balance handling. +pub const DEFAULT_GAS_HOLD_BALANCE_HANDLING: HoldBalanceHandling = HoldBalanceHandling::Accrued; +/// Default gas hold interval in milliseconds. +pub const DEFAULT_GAS_HOLD_INTERVAL_MILLIS: u64 = 24 * 60 * 60 * 60; + +/// Default value for maximum associated keys configuration option. +pub const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100; + +/// Default value for a maximum query depth configuration option. +pub const DEFAULT_MAX_QUERY_DEPTH: u64 = 5; +/// Default value for maximum runtime call stack height configuration option. +pub const DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 = 12; +/// Default value for minimum delegation amount in motes. +pub const DEFAULT_MINIMUM_DELEGATION_AMOUNT: u64 = 500 * 1_000_000_000; +/// Default value for maximum delegation amount in motes. +pub const DEFAULT_MAXIMUM_DELEGATION_AMOUNT: u64 = 1_000_000_000 * 1_000_000_000; + +/// Default genesis config hash. +pub const DEFAULT_GENESIS_CONFIG_HASH: Digest = Digest::from_raw([42; 32]); +/// Default account secret key. +pub static DEFAULT_ACCOUNT_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap()); +/// Default account public key. +pub static DEFAULT_ACCOUNT_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*DEFAULT_ACCOUNT_SECRET_KEY)); +/// Default test account address. +pub static DEFAULT_ACCOUNT_ADDR: Lazy = + Lazy::new(|| AccountHash::from(&*DEFAULT_ACCOUNT_PUBLIC_KEY)); +// NOTE: declaring DEFAULT_ACCOUNT_KEY as *DEFAULT_ACCOUNT_ADDR causes tests to stall. +/// Default account key. +pub static DEFAULT_ACCOUNT_KEY: Lazy = + Lazy::new(|| AccountHash::from(&*DEFAULT_ACCOUNT_PUBLIC_KEY)); +/// Default initial balance of a test account in motes. +pub const DEFAULT_ACCOUNT_INITIAL_BALANCE: u64 = 10_000_000_000_000_000_000_u64; /// Minimal amount for a transfer that creates new accounts. -pub const MINIMUM_ACCOUNT_CREATION_BALANCE: u64 = 7_500_000_000_000_000u64; +pub const MINIMUM_ACCOUNT_CREATION_BALANCE: u64 = 7_500_000_000_000_000_u64; +/// Default proposer public key. +pub static DEFAULT_PROPOSER_PUBLIC_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([198; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +/// Default proposer address. +pub static DEFAULT_PROPOSER_ADDR: Lazy = + Lazy::new(|| AccountHash::from(&*DEFAULT_PROPOSER_PUBLIC_KEY)); +/// Default accounts. +pub static DEFAULT_ACCOUNTS: Lazy> = Lazy::new(|| { + let mut ret = Vec::new(); + let genesis_account = GenesisAccount::account( + DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); + ret.push(genesis_account); + let proposer_account = GenesisAccount::account( + DEFAULT_PROPOSER_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); + ret.push(proposer_account); + let rng = &mut TestRng::new(); + for _ in 0..10 { + let filler_account = GenesisAccount::account( + PublicKey::random(rng), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); + ret.push(filler_account); + } + ret +}); +/// Default [`ProtocolVersion`]. +pub const DEFAULT_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0; +/// Default payment. +pub static DEFAULT_PAYMENT: Lazy = Lazy::new(|| U512::from(10_000_000_000_000u64)); +/// Default [`WasmConfig`]. +pub static DEFAULT_WASM_CONFIG: Lazy = Lazy::new(WasmConfig::default); +/// Default [`WasmV1Config`]. +pub static DEFAULT_WASM_V1_CONFIG: Lazy = Lazy::new(WasmV1Config::default); +/// Default [`SystemConfig`]. +pub static DEFAULT_SYSTEM_CONFIG: Lazy = Lazy::new(SystemConfig::default); +/// Default [`StorageCosts`]. +pub static DEFAULT_STORAGE_COSTS: Lazy = Lazy::new(StorageCosts::default); + +/// Default [`GenesisConfig`]. +pub static DEFAULT_EXEC_CONFIG: Lazy = Lazy::new(|| { + GenesisConfigBuilder::default() + .with_accounts(DEFAULT_ACCOUNTS.clone()) + .with_wasm_config(*DEFAULT_WASM_CONFIG) + .with_system_config(*DEFAULT_SYSTEM_CONFIG) + .with_validator_slots(DEFAULT_VALIDATOR_SLOTS) + .with_auction_delay(DEFAULT_AUCTION_DELAY) + .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS) + .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE) + .with_unbonding_delay(DEFAULT_UNBONDING_DELAY) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .with_storage_costs(*DEFAULT_STORAGE_COSTS) + .build() +}); + +/// Default [`ChainspecRegistry`]. +pub static DEFAULT_CHAINSPEC_REGISTRY: Lazy = + Lazy::new(|| ChainspecRegistry::new_with_genesis(&[1, 2, 3], &[4, 5, 6])); + +/// A [`GenesisRequest`] using cost tables matching those used in Casper Mainnet. +pub static LOCAL_GENESIS_REQUEST: Lazy = Lazy::new(|| { + ChainspecConfig::create_genesis_request_from_local_chainspec( + DEFAULT_ACCOUNTS.clone(), + DEFAULT_PROTOCOL_VERSION, + ) + .expect("must create the request") +}); +/// Round seigniorage rate from the production chainspec. +pub static PRODUCTION_ROUND_SEIGNIORAGE_RATE: Lazy> = Lazy::new(|| { + let chainspec = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK) + .expect("must create chainspec_config"); + chainspec.core_config.round_seigniorage_rate +}); +/// System address. +pub static SYSTEM_ADDR: Lazy = Lazy::new(|| PublicKey::System.to_account_hash()); + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::MessageLimits; + + #[test] + fn defaults_should_match_production_chainspec_values() { + let production = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK).unwrap(); + // No need to test `CoreConfig::validator_slots`. + assert_eq!(production.core_config.auction_delay, DEFAULT_AUCTION_DELAY); + assert_eq!( + production.core_config.locked_funds_period.millis(), + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS + ); + assert_eq!( + production.core_config.unbonding_delay, + DEFAULT_UNBONDING_DELAY + ); + assert_eq!( + production.core_config.round_seigniorage_rate.reduced(), + DEFAULT_ROUND_SEIGNIORAGE_RATE.reduced() + ); + assert_eq!( + production.core_config.max_associated_keys, + DEFAULT_MAX_ASSOCIATED_KEYS + ); + assert_eq!( + production.core_config.max_runtime_call_stack_height, + DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT + ); + assert_eq!( + production.core_config.minimum_delegation_amount, + DEFAULT_MINIMUM_DELEGATION_AMOUNT + ); + assert_eq!( + production.core_config.maximum_delegation_amount, + DEFAULT_MAXIMUM_DELEGATION_AMOUNT + ); + + assert_eq!( + production.wasm_config.messages_limits(), + MessageLimits::default() + ); + + assert_eq!(production.wasm_config.v1(), &WasmV1Config::default()); + + assert_eq!(production.system_costs_config, SystemConfig::default()); + } +} diff --git a/execution_engine_testing/test_support/src/session.rs b/execution_engine_testing/test_support/src/session.rs deleted file mode 100644 index 494bb6d00b..0000000000 --- a/execution_engine_testing/test_support/src/session.rs +++ /dev/null @@ -1,166 +0,0 @@ -use rand::Rng; - -use casper_execution_engine::core::engine_state::execute_request::ExecuteRequest; -use casper_types::{runtime_args, ProtocolVersion, RuntimeArgs, URef, U512}; - -use crate::{ - internal::{DeployItemBuilder, ExecuteRequestBuilder, DEFAULT_PAYMENT}, - AccountHash, Code, -}; - -const ARG_AMOUNT: &str = "amount"; - -/// Transfer Information for validating a transfer including gas usage from source -pub struct SessionTransferInfo { - pub(crate) source_purse: URef, - pub(crate) maybe_target_purse: Option, - pub(crate) transfer_amount: U512, -} - -impl SessionTransferInfo { - /// Constructs a new `SessionTransferInfo` containing information for validating a transfer - /// when `test_context.run()` occurs. - /// - /// Assertion will be made that `source_purse` is debited `transfer_amount` with gas costs - /// handled. If given, assertion will be made that `maybe_target_purse` is credited - /// `transfer_amount` - pub fn new( - source_purse: URef, - maybe_target_purse: Option, - transfer_amount: U512, - ) -> Self { - SessionTransferInfo { - source_purse, - maybe_target_purse, - transfer_amount, - } - } -} - -/// A single session, i.e. a single request to execute a single deploy within the test context. -pub struct Session { - pub(crate) inner: ExecuteRequest, - pub(crate) expect_success: bool, - pub(crate) check_transfer_success: Option, - pub(crate) commit: bool, -} - -/// Builder for a [`Session`]. -pub struct SessionBuilder { - er_builder: ExecuteRequestBuilder, - di_builder: DeployItemBuilder, - expect_failure: bool, - check_transfer_success: Option, - without_commit: bool, -} - -impl SessionBuilder { - /// Constructs a new `SessionBuilder` containing a deploy with the provided session code and - /// session args, and with default values for the account address, payment code args, gas price, - /// authorization keys and protocol version. - pub fn new(session_code: Code, session_args: RuntimeArgs) -> Self { - let di_builder = DeployItemBuilder::new() - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }); - let di_builder = match session_code { - Code::Path(path) => di_builder.with_session_code(path, session_args), - Code::NamedKey(name, entry_point) => { - di_builder.with_stored_session_named_key(&name, &entry_point, session_args) - } - Code::Hash(hash, entry_point) => { - di_builder.with_stored_session_hash(hash.into(), &entry_point, session_args) - } - }; - let expect_failure = false; - let check_transfer_success = None; - let without_commit = false; - Self { - er_builder: Default::default(), - di_builder, - expect_failure, - check_transfer_success, - without_commit, - } - } - - /// Returns `self` with the provided account address set. - pub fn with_address(mut self, address: AccountHash) -> Self { - self.di_builder = self.di_builder.with_address(address); - self - } - - /// Returns `self` with the provided payment code and args set. - pub fn with_payment_code(mut self, code: Code, args: RuntimeArgs) -> Self { - self.di_builder = match code { - Code::Path(path) => self.di_builder.with_payment_code(path, args), - Code::NamedKey(name, entry_point) => { - self.di_builder - .with_stored_payment_named_key(&name, &entry_point, args) - } - Code::Hash(hash, entry_point) => { - self.di_builder - .with_stored_payment_hash(hash.into(), &entry_point, args) - } - }; - self - } - - /// Returns `self` with the provided block time set. - pub fn with_block_time(mut self, block_time: u64) -> Self { - self.er_builder = self.er_builder.with_block_time(block_time); - self - } - - /// Returns `self` with the provided gas price set. - pub fn with_gas_price(mut self, price: u64) -> Self { - self.di_builder = self.di_builder.with_gas_price(price); - self - } - - /// Returns `self` with the provided authorization keys set. - pub fn with_authorization_keys(mut self, keys: &[AccountHash]) -> Self { - self.di_builder = self.di_builder.with_authorization_keys(keys); - self - } - - /// Returns `self` with the provided protocol version set. - pub fn with_protocol_version(mut self, version: ProtocolVersion) -> Self { - self.er_builder = self.er_builder.with_protocol_version(version); - self - } - - /// Will disable the expect_success call during Text_Context.run() method when expected to fail. - pub fn without_expect_success(mut self) -> Self { - self.expect_failure = true; - self - } - - /// Provide SessionTransferInfo to validate transfer including gas used from source account - pub fn with_check_transfer_success( - mut self, - session_transfer_info: SessionTransferInfo, - ) -> Self { - self.check_transfer_success = Some(session_transfer_info); - self - } - - /// Do not perform commit within the ['TestContext'].['run'] method. - pub fn without_commit(mut self) -> Self { - self.without_commit = true; - self - } - - /// Builds the [`Session`]. - pub fn build(self) -> Session { - let mut rng = rand::thread_rng(); - let execute_request = self - .er_builder - .push_deploy(self.di_builder.with_deploy_hash(rng.gen()).build()) - .build(); - Session { - inner: execute_request, - expect_success: !self.expect_failure, - check_transfer_success: self.check_transfer_success, - commit: !self.without_commit, - } - } -} diff --git a/execution_engine_testing/test_support/src/step_request_builder.rs b/execution_engine_testing/test_support/src/step_request_builder.rs new file mode 100644 index 0000000000..f306856f41 --- /dev/null +++ b/execution_engine_testing/test_support/src/step_request_builder.rs @@ -0,0 +1,121 @@ +use casper_storage::{ + data_access_layer::{EvictItem, RewardItem, SlashItem, StepRequest}, + system::runtime_native::{Config, TransferConfig}, +}; +use casper_types::{Digest, EraId, ProtocolVersion}; + +/// Builder for creating a [`StepRequest`]. +#[derive(Debug, Clone)] +pub struct StepRequestBuilder { + runtime_config: Config, + parent_state_hash: Digest, + protocol_version: ProtocolVersion, + slash_items: Vec, + reward_items: Vec, + evict_items: Vec, + run_auction: bool, + next_era_id: EraId, + era_end_timestamp_millis: u64, +} + +impl StepRequestBuilder { + /// Returns a new `StepRequestBuilder`. + pub fn new() -> Self { + Default::default() + } + + /// Sets config. + pub fn with_runtime_config(mut self, runtime_config: Config) -> Self { + self.runtime_config = runtime_config; + self + } + + /// Sets `transfer_config` to the imputed value. + pub fn with_transfer_config(mut self, transfer_config: TransferConfig) -> Self { + self.runtime_config = self.runtime_config.set_transfer_config(transfer_config); + self + } + + /// Sets `parent_state_hash` to the given [`Digest`]. + pub fn with_parent_state_hash(mut self, parent_state_hash: Digest) -> Self { + self.parent_state_hash = parent_state_hash; + self + } + + /// Sets `protocol_version` to the given [`ProtocolVersion`]. + pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.protocol_version = protocol_version; + self + } + + /// Pushes the given [`SlashItem`] into `slash_items`. + pub fn with_slash_item(mut self, slash_item: SlashItem) -> Self { + self.slash_items.push(slash_item); + self + } + + /// Pushes the given [`RewardItem`] into `reward_items`. + pub fn with_reward_item(mut self, reward_item: RewardItem) -> Self { + self.reward_items.push(reward_item); + self + } + + /// Pushes the given [`EvictItem`] into `evict_items`. + pub fn with_evict_item(mut self, evict_item: EvictItem) -> Self { + self.evict_items.push(evict_item); + self + } + + /// Pushes the given vector of [`EvictItem`] into `evict_items`. + pub fn with_evict_items(mut self, evict_items: impl IntoIterator) -> Self { + self.evict_items.extend(evict_items); + self + } + + /// Sets `run_auction`. + pub fn with_run_auction(mut self, run_auction: bool) -> Self { + self.run_auction = run_auction; + self + } + + /// Sets `next_era_id` to the given [`EraId`]. + pub fn with_next_era_id(mut self, next_era_id: EraId) -> Self { + self.next_era_id = next_era_id; + self + } + + /// Sets `era_end_timestamp_millis`. + pub fn with_era_end_timestamp_millis(mut self, era_end_timestamp_millis: u64) -> Self { + self.era_end_timestamp_millis = era_end_timestamp_millis; + self + } + + /// Consumes the [`StepRequestBuilder`] and returns a [`StepRequest`]. + pub fn build(self) -> StepRequest { + StepRequest::new( + self.runtime_config, + self.parent_state_hash, + self.protocol_version, + self.slash_items, + self.evict_items, + self.next_era_id, + self.era_end_timestamp_millis, + ) + } +} + +impl Default for StepRequestBuilder { + fn default() -> Self { + StepRequestBuilder { + runtime_config: Default::default(), + parent_state_hash: Default::default(), + protocol_version: Default::default(), + slash_items: Default::default(), + evict_items: Default::default(), + run_auction: true, //<-- run_auction by default + next_era_id: Default::default(), + era_end_timestamp_millis: Default::default(), + reward_items: Default::default(), + } + } +} diff --git a/execution_engine_testing/test_support/src/test_context.rs b/execution_engine_testing/test_support/src/test_context.rs deleted file mode 100644 index 82f710fddb..0000000000 --- a/execution_engine_testing/test_support/src/test_context.rs +++ /dev/null @@ -1,194 +0,0 @@ -use casper_execution_engine::{ - core::engine_state::{ - genesis::{GenesisAccount, GenesisConfig}, - run_genesis_request::RunGenesisRequest, - }, - shared::motes::Motes, -}; -use casper_types::{AccessRights, Key, PublicKey, URef, U512}; - -use crate::{ - internal::{InMemoryWasmTestBuilder, DEFAULT_GENESIS_CONFIG, DEFAULT_GENESIS_CONFIG_HASH}, - Account, AccountHash, Error, Result, Session, URefAddr, Value, -}; - -/// Context in which to run a test of a Wasm smart contract. -pub struct TestContext { - inner: InMemoryWasmTestBuilder, -} - -impl TestContext { - fn maybe_purse_balance(&self, purse_uref: Option) -> Option { - match purse_uref { - None => None, - Some(purse_uref) => { - let purse_balance = self.get_balance(purse_uref.addr()); - Some(Motes::new(purse_balance)) - } - } - } - - /// Runs the supplied [`Session`] checking specified expectations of the execution and - /// subsequent commit of transforms are met. - /// - /// If `session` was built without - /// [`without_expect_success()`](crate::SessionBuilder::without_expect_success) (the default) - /// then `run()` will panic if execution of the deploy fails. - /// - /// If `session` was built with - /// [`with_check_transfer_success()`](crate::SessionBuilder::with_check_transfer_success), (not - /// the default) then `run()` will verify transfer balances including gas used. - /// - /// If `session` was built without - /// [`without_commit()`](crate::SessionBuilder::without_commit) (the default), then `run()` will - /// commit the resulting transforms. - pub fn run(&mut self, session: Session) -> &mut Self { - match session.check_transfer_success { - Some(session_transfer_info) => { - let source_initial_balance = self - .maybe_purse_balance(Some(session_transfer_info.source_purse)) - .expect("source purse balance"); - - let proposer_reward_starting_balance = self.inner.get_proposer_purse_balance(); - - let maybe_target_initial_balance = - self.maybe_purse_balance(session_transfer_info.maybe_target_purse); - - let builder = self.inner.exec(session.inner); - - if session.expect_success { - builder.expect_success(); - } - if session.commit { - builder.commit(); - } - - let transaction_fee = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - - match maybe_target_initial_balance { - None => (), - Some(target_initial_balance) => { - let target_ending_balance = self - .maybe_purse_balance(session_transfer_info.maybe_target_purse) - .expect("target ending balance"); - let expected_target_ending_balance = target_initial_balance - + Motes::new(session_transfer_info.transfer_amount); - if expected_target_ending_balance != target_ending_balance { - panic!( - "target ending balance does not match; expected: {} actual: {}", - expected_target_ending_balance, target_ending_balance - ); - } - } - } - - let expected_source_ending_balance = source_initial_balance - - Motes::new(session_transfer_info.transfer_amount) - - Motes::new(transaction_fee); - - let actual_source_ending_balance = self - .maybe_purse_balance(Some(session_transfer_info.source_purse)) - .expect("source ending balance"); - if expected_source_ending_balance != actual_source_ending_balance { - panic!( - "source ending balance does not match; expected: {} actual: {}", - expected_source_ending_balance, actual_source_ending_balance - ); - } - } - None => { - let builder = self.inner.exec(session.inner); - if session.expect_success { - builder.expect_success(); - } - if session.commit { - builder.commit(); - } - } - } - self - } - - /// Queries for a [`Value`] stored under the given `key` and `path`. - /// - /// Returns an [`Error`] if not found. - pub fn query(&self, key: AccountHash, path: &[String]) -> Result { - self.inner - .query(None, Key::Account(key), path) - .map(Value::new) - .map_err(Error::from) - } - - /// Gets the balance of the purse under the given [`URefAddr`]. - /// - /// Note that this requires performing an earlier query to retrieve `purse_addr`. - pub fn get_balance(&self, purse_addr: URefAddr) -> U512 { - let purse = URef::new(purse_addr, AccessRights::READ); - self.inner.get_purse_balance(purse) - } - - /// Gets the main purse [`URef`] from an [`Account`] stored under a [`PublicKey`], or `None`. - pub fn main_purse_address(&self, account_key: AccountHash) -> Option { - match self.inner.get_account(account_key) { - Some(account) => Some(account.main_purse()), - None => None, - } - } - - // TODO: Remove this once test can use query - /// Gets an [`Account`] stored under a [`PublicKey`], or `None`. - pub fn get_account(&self, account_key: AccountHash) -> Option { - match self.inner.get_account(account_key) { - Some(account) => Some(account.into()), - None => None, - } - } -} - -/// Builder for a [`TestContext`]. -pub struct TestContextBuilder { - genesis_config: GenesisConfig, -} - -impl TestContextBuilder { - /// Constructs a new `TestContextBuilder` initialised with default values for an account, i.e. - /// an account at [`DEFAULT_ACCOUNT_ADDR`](static@crate::DEFAULT_ACCOUNT_ADDR) with an initial - /// balance of [`DEFAULT_ACCOUNT_INITIAL_BALANCE`](crate::DEFAULT_ACCOUNT_INITIAL_BALANCE) - /// which will be added to the Genesis block. - pub fn new() -> Self { - TestContextBuilder { - genesis_config: DEFAULT_GENESIS_CONFIG.clone(), - } - } - - /// Returns `self` with the provided account's details added to existing ones, for inclusion in - /// the Genesis block. - /// - /// Note: `initial_balance` represents the number of motes. - pub fn with_public_key(mut self, public_key: PublicKey, initial_balance: U512) -> Self { - let new_account = GenesisAccount::account(public_key, Motes::new(initial_balance), None); - self.genesis_config - .ee_config_mut() - .push_account(new_account); - self - } - - /// Builds the [`TestContext`]. - pub fn build(self) -> TestContext { - let mut inner = InMemoryWasmTestBuilder::default(); - let run_genesis_request = RunGenesisRequest::new( - *DEFAULT_GENESIS_CONFIG_HASH, - self.genesis_config.protocol_version(), - self.genesis_config.take_ee_config(), - ); - inner.run_genesis(&run_genesis_request); - TestContext { inner } - } -} - -impl Default for TestContextBuilder { - fn default() -> Self { - TestContextBuilder::new() - } -} diff --git a/execution_engine_testing/test_support/src/transfer_request_builder.rs b/execution_engine_testing/test_support/src/transfer_request_builder.rs new file mode 100644 index 0000000000..341c58d5ff --- /dev/null +++ b/execution_engine_testing/test_support/src/transfer_request_builder.rs @@ -0,0 +1,237 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + iter, +}; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +use casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY; +use num_rational::Ratio; + +use casper_storage::{ + data_access_layer::TransferRequest, + system::runtime_native::{Config as NativeRuntimeConfig, TransferConfig}, +}; +use casper_types::{ + account::AccountHash, + bytesrepr::ToBytes, + system::mint::{ARG_AMOUNT, ARG_ID, ARG_SOURCE, ARG_TARGET}, + BlockTime, CLValue, Digest, FeeHandling, Gas, InitiatorAddr, ProtocolVersion, RefundHandling, + RuntimeArgs, TransactionHash, TransactionV1Hash, TransferTarget, URef, + DEFAULT_GAS_HOLD_INTERVAL, U512, +}; + +use crate::{ + DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_BLOCK_TIME, DEFAULT_PROTOCOL_VERSION, +}; + +/// Builds a [`TransferRequest`]. +#[derive(Debug)] +pub struct TransferRequestBuilder { + config: NativeRuntimeConfig, + state_hash: Digest, + block_time: BlockTime, + protocol_version: ProtocolVersion, + transaction_hash: Option, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + args: BTreeMap, + gas: Gas, +} + +impl TransferRequestBuilder { + /// The default value used for `TransferRequest::config`. + pub const DEFAULT_CONFIG: NativeRuntimeConfig = NativeRuntimeConfig::new( + TransferConfig::Unadministered, + FeeHandling::PayToProposer, + RefundHandling::Refund { + refund_ratio: Ratio::new_raw(99, 100), + }, + 0, + true, + true, + 0, + 500_000_000_000, + 500_000_000_000, + DEFAULT_GAS_HOLD_INTERVAL.millis(), + false, + Ratio::new_raw(U512::zero(), U512::zero()), + DEFAULT_ENABLE_ENTITY, + 2_500_000_000, + ); + /// The default value used for `TransferRequest::state_hash`. + pub const DEFAULT_STATE_HASH: Digest = Digest::from_raw([1; 32]); + /// The default value used for `TransferRequest::gas`. + pub const DEFAULT_GAS: u64 = 2_500_000_000; + + /// Constructs a new `TransferRequestBuilder`. + pub fn new, T: Into>(amount: A, target: T) -> Self { + let mut args = BTreeMap::new(); + let _ = args.insert( + ARG_AMOUNT.to_string(), + CLValue::from_t(amount.into()).unwrap(), + ); + let _ = args.insert( + ARG_ID.to_string(), + CLValue::from_t(Option::::None).unwrap(), + ); + let target_value = match target.into() { + TransferTarget::PublicKey(public_key) => CLValue::from_t(public_key), + TransferTarget::AccountHash(account_hash) => CLValue::from_t(account_hash), + TransferTarget::URef(uref) => CLValue::from_t(uref), + } + .unwrap(); + let _ = args.insert(ARG_TARGET.to_string(), target_value); + TransferRequestBuilder { + config: Self::DEFAULT_CONFIG, + state_hash: Self::DEFAULT_STATE_HASH, + block_time: BlockTime::new(DEFAULT_BLOCK_TIME), + protocol_version: DEFAULT_PROTOCOL_VERSION, + transaction_hash: None, + initiator: InitiatorAddr::PublicKey(DEFAULT_ACCOUNT_PUBLIC_KEY.clone()), + authorization_keys: iter::once(*DEFAULT_ACCOUNT_ADDR).collect(), + args, + gas: Gas::new(Self::DEFAULT_GAS), + } + } + + /// Sets the native runtime config of the [`TransferRequest`]. + pub fn with_native_runtime_config(mut self, config: NativeRuntimeConfig) -> Self { + self.config = config; + self + } + + /// Sets the block time of the [`TransferRequest`]. + pub fn with_block_time(mut self, block_time: u64) -> Self { + self.block_time = BlockTime::new(block_time); + self + } + + /// Sets the protocol version used by the [`TransferRequest`]. + pub fn with_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.protocol_version = protocol_version; + self + } + + /// Sets the transaction hash used by the [`TransferRequest`]. + pub fn with_transaction_hash(mut self, transaction_hash: TransactionHash) -> Self { + self.transaction_hash = Some(transaction_hash); + self + } + + /// Sets the initiator used by the [`TransferRequest`], and adds its account hash to the set of + /// authorization keys. + pub fn with_initiator>(mut self, initiator: T) -> Self { + self.initiator = initiator.into(); + let _ = self + .authorization_keys + .insert(self.initiator.account_hash()); + self + } + + /// Sets the authorization keys used by the [`TransferRequest`]. + pub fn with_authorization_keys>( + mut self, + authorization_keys: T, + ) -> Self { + self.authorization_keys = authorization_keys.into_iter().collect(); + self + } + + /// Adds the "source" runtime arg, replacing the existing one if it exists. + pub fn with_source(mut self, source: URef) -> Self { + let value = CLValue::from_t(source).unwrap(); + let _ = self.args.insert(ARG_SOURCE.to_string(), value); + self + } + + /// Adds the "id" runtime arg, replacing the existing one if it exists.. + pub fn with_transfer_id(mut self, id: u64) -> Self { + let value = CLValue::from_t(Some(id)).unwrap(); + let _ = self.args.insert(ARG_ID.to_string(), value); + self + } + + /// Consumes self and returns a `TransferRequest`. + /// + /// If a transaction hash was not provided, the blake2b hash of the contents of the other fields + /// will be calculated, so that different requests will have different transaction hashes. Note + /// that this generated hash is not the same as what would have been generated on an actual + /// `Transaction` for an equivalent request. + pub fn build(self) -> TransferRequest { + let txn_hash = match self.transaction_hash { + Some(txn_hash) => txn_hash, + None => { + let mut result = [0; 32]; + let mut hasher = VarBlake2b::new(32).unwrap(); + + match &self.config.transfer_config() { + TransferConfig::Administered { + administrative_accounts, + allow_unrestricted_transfers, + } => hasher.update( + (administrative_accounts, allow_unrestricted_transfers) + .to_bytes() + .unwrap(), + ), + TransferConfig::Unadministered => { + hasher.update([1]); + } + } + hasher.update(self.config.fee_handling().to_bytes().unwrap()); + hasher.update(self.config.refund_handling().to_bytes().unwrap()); + hasher.update( + self.config + .vesting_schedule_period_millis() + .to_bytes() + .unwrap(), + ); + hasher.update(self.config.allow_auction_bids().to_bytes().unwrap()); + hasher.update(self.config.compute_rewards().to_bytes().unwrap()); + hasher.update( + self.config + .max_delegators_per_validator() + .to_bytes() + .unwrap(), + ); + hasher.update(self.config.minimum_delegation_amount().to_bytes().unwrap()); + hasher.update(self.state_hash); + hasher.update(self.block_time.to_bytes().unwrap()); + hasher.update(self.protocol_version.to_bytes().unwrap()); + hasher.update(self.initiator.to_bytes().unwrap()); + hasher.update(self.authorization_keys.to_bytes().unwrap()); + hasher.update(self.args.to_bytes().unwrap()); + hasher.update(self.gas.to_bytes().unwrap()); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + TransactionHash::V1(TransactionV1Hash::from_raw(result)) + } + }; + + TransferRequest::with_runtime_args( + self.config, + self.state_hash, + self.protocol_version, + txn_hash, + self.initiator, + self.authorization_keys, + RuntimeArgs::from(self.args), + ) + } + + /// Sets the runtime args used by the [`TransferRequest`]. + /// + /// NOTE: This is not generally useful for creating a valid `TransferRequest`, and hence is + /// subject to change or deletion without notice. + #[doc(hidden)] + pub fn with_args(mut self, args: RuntimeArgs) -> Self { + self.args = args + .named_args() + .map(|named_arg| (named_arg.name().to_string(), named_arg.cl_value().clone())) + .collect(); + self + } +} diff --git a/execution_engine_testing/test_support/src/upgrade_request_builder.rs b/execution_engine_testing/test_support/src/upgrade_request_builder.rs new file mode 100644 index 0000000000..207ecc6c3c --- /dev/null +++ b/execution_engine_testing/test_support/src/upgrade_request_builder.rs @@ -0,0 +1,200 @@ +use std::collections::BTreeMap; + +use num_rational::Ratio; + +use casper_types::{ + ChainspecRegistry, Digest, EraId, FeeHandling, HoldBalanceHandling, Key, ProtocolUpgradeConfig, + ProtocolVersion, StoredValue, +}; + +/// Builds an `UpgradeConfig`. +pub struct UpgradeRequestBuilder { + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + new_protocol_version: ProtocolVersion, + activation_point: Option, + new_gas_hold_handling: Option, + new_gas_hold_interval: Option, + new_validator_slots: Option, + new_auction_delay: Option, + new_locked_funds_period_millis: Option, + new_round_seigniorage_rate: Option>, + new_unbonding_delay: Option, + global_state_update: BTreeMap, + chainspec_registry: ChainspecRegistry, + fee_handling: FeeHandling, + validator_minimum_bid_amount: u64, + maximum_delegation_amount: u64, + minimum_delegation_amount: u64, + enable_addressable_entity: bool, +} + +impl UpgradeRequestBuilder { + /// Returns a new `UpgradeRequestBuilder`. + pub fn new() -> Self { + Default::default() + } + + /// Sets a pre-state hash using a [`Digest`]. + pub fn with_pre_state_hash(mut self, pre_state_hash: Digest) -> Self { + self.pre_state_hash = pre_state_hash; + self + } + + /// Sets `current_protocol_version` to the given [`ProtocolVersion`]. + pub fn with_current_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.current_protocol_version = protocol_version; + self + } + + /// Sets `new_protocol_version` to the given [`ProtocolVersion`]. + pub fn with_new_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self { + self.new_protocol_version = protocol_version; + self + } + + /// Sets `with_new_gas_hold_handling`. + pub fn with_new_gas_hold_handling(mut self, gas_hold_handling: HoldBalanceHandling) -> Self { + self.new_gas_hold_handling = Some(gas_hold_handling); + self + } + + /// Sets `with_new_gas_hold_interval`. + pub fn with_new_gas_hold_interval(mut self, gas_hold_interval: u64) -> Self { + self.new_gas_hold_interval = Some(gas_hold_interval); + self + } + + /// Sets `new_validator_slots`. + pub fn with_new_validator_slots(mut self, new_validator_slots: u32) -> Self { + self.new_validator_slots = Some(new_validator_slots); + self + } + + /// Sets `new_auction_delay`. + pub fn with_new_auction_delay(mut self, new_auction_delay: u64) -> Self { + self.new_auction_delay = Some(new_auction_delay); + self + } + + /// Sets `new_locked_funds_period_millis`. + pub fn with_new_locked_funds_period_millis( + mut self, + new_locked_funds_period_millis: u64, + ) -> Self { + self.new_locked_funds_period_millis = Some(new_locked_funds_period_millis); + self + } + + /// Sets `new_round_seigniorage_rate`. + pub fn with_new_round_seigniorage_rate(mut self, rate: Ratio) -> Self { + self.new_round_seigniorage_rate = Some(rate); + self + } + + /// Sets `new_unbonding_delay`. + pub fn with_new_unbonding_delay(mut self, unbonding_delay: u64) -> Self { + self.new_unbonding_delay = Some(unbonding_delay); + self + } + + /// Sets `global_state_update`. + pub fn with_global_state_update( + mut self, + global_state_update: BTreeMap, + ) -> Self { + self.global_state_update = global_state_update; + self + } + + /// Sets `activation_point`. + pub fn with_activation_point(mut self, activation_point: EraId) -> Self { + self.activation_point = Some(activation_point); + self + } + + /// Sets the Chainspec registry. + pub fn with_chainspec_registry(mut self, chainspec_registry: ChainspecRegistry) -> Self { + self.chainspec_registry = chainspec_registry; + self + } + + /// Sets the fee handling. + pub fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self { + self.fee_handling = fee_handling; + self + } + + /// Set the validator minimum bid amount. + pub fn with_validator_minimum_bid_amount(mut self, validator_minimum_bid_amount: u64) -> Self { + self.validator_minimum_bid_amount = validator_minimum_bid_amount; + self + } + + /// Sets the maximum delegation for the validators bid during migration. + pub fn with_maximum_delegation_amount(mut self, maximum_delegation_amount: u64) -> Self { + self.maximum_delegation_amount = maximum_delegation_amount; + self + } + + /// Sets the minimum delegation for the validators bid during migration. + pub fn with_minimum_delegation_amount(mut self, minimum_delegation_amount: u64) -> Self { + self.minimum_delegation_amount = minimum_delegation_amount; + self + } + + /// Sets the enable entity flag. + pub fn with_enable_addressable_entity(mut self, enable_entity: bool) -> Self { + self.enable_addressable_entity = enable_entity; + self + } + + /// Consumes the `UpgradeRequestBuilder` and returns an [`ProtocolUpgradeConfig`]. + pub fn build(self) -> ProtocolUpgradeConfig { + ProtocolUpgradeConfig::new( + self.pre_state_hash, + self.current_protocol_version, + self.new_protocol_version, + self.activation_point, + self.new_gas_hold_handling, + self.new_gas_hold_interval, + self.new_validator_slots, + self.new_auction_delay, + self.new_locked_funds_period_millis, + self.new_round_seigniorage_rate, + self.new_unbonding_delay, + self.global_state_update, + self.chainspec_registry, + self.fee_handling, + self.validator_minimum_bid_amount, + self.maximum_delegation_amount, + self.minimum_delegation_amount, + self.enable_addressable_entity, + ) + } +} + +impl Default for UpgradeRequestBuilder { + fn default() -> UpgradeRequestBuilder { + UpgradeRequestBuilder { + pre_state_hash: Default::default(), + current_protocol_version: Default::default(), + new_protocol_version: Default::default(), + activation_point: None, + new_gas_hold_handling: None, + new_gas_hold_interval: None, + new_validator_slots: None, + new_auction_delay: None, + new_locked_funds_period_millis: None, + new_round_seigniorage_rate: None, + new_unbonding_delay: None, + global_state_update: Default::default(), + chainspec_registry: ChainspecRegistry::new_with_optional_global_state(&[], None), + fee_handling: FeeHandling::default(), + validator_minimum_bid_amount: 2_500_000_000_000u64, + maximum_delegation_amount: u64::MAX, + minimum_delegation_amount: 0, + enable_addressable_entity: false, + } + } +} diff --git a/execution_engine_testing/test_support/src/utils.rs b/execution_engine_testing/test_support/src/utils.rs new file mode 100644 index 0000000000..a6e2b6a7fe --- /dev/null +++ b/execution_engine_testing/test_support/src/utils.rs @@ -0,0 +1,195 @@ +//! Utility types and functions for working with execution engine tests. + +use std::{ + env, fs, + path::{Path, PathBuf}, +}; + +use once_cell::sync::Lazy; + +use casper_execution_engine::engine_state::{Error, WasmV1Result}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{bytesrepr::Bytes, GenesisAccount, GenesisConfig}; + +use super::{ + ChainspecConfig, DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY, +}; +use crate::{ + GenesisConfigBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, DEFAULT_STORAGE_COSTS, + DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, +}; + +static RUST_WORKSPACE_PATH: Lazy = Lazy::new(|| { + let path = Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .expect("CARGO_MANIFEST_DIR should have parent"); + assert!( + path.exists(), + "Workspace path {} does not exists", + path.display() + ); + path.to_path_buf() +}); +// The location of compiled Wasm files if compiled from the Rust sources within the casper-node +// repo, i.e. 'casper-node/target/wasm32-unknown-unknown/release/'. +static RUST_WORKSPACE_WASM_PATH: Lazy = Lazy::new(|| { + let path = RUST_WORKSPACE_PATH + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + assert!( + path.exists() || RUST_TOOL_WASM_PATH.exists(), + "Rust Wasm path {} does not exists", + path.display() + ); + path +}); +// The location of compiled Wasm files if running from within the 'tests' crate generated by the +// cargo_casper tool, i.e. 'wasm/'. +static RUST_TOOL_WASM_PATH: Lazy = Lazy::new(|| { + env::current_dir() + .expect("should get current working dir") + .join("wasm") +}); +// The location of compiled Wasm files if compiled from the Rust sources within the casper-node +// repo where `CARGO_TARGET_DIR` is set, i.e. +// '/wasm32-unknown-unknown/release/'. +static MAYBE_CARGO_TARGET_DIR_WASM_PATH: Lazy> = Lazy::new(|| { + let maybe_target = std::env::var("CARGO_TARGET_DIR").ok(); + maybe_target.as_ref().map(|path| { + Path::new(path) + .join("wasm32-unknown-unknown") + .join("release") + }) +}); +static WASM_PATHS: Lazy> = Lazy::new(get_compiled_wasm_paths); + +/// Constructs a list of paths that should be considered while looking for a compiled wasm file. +fn get_compiled_wasm_paths() -> Vec { + let mut ret = vec![ + RUST_WORKSPACE_WASM_PATH.clone(), + RUST_TOOL_WASM_PATH.clone(), + ]; + if let Some(cargo_target_dir_wasm_path) = &*MAYBE_CARGO_TARGET_DIR_WASM_PATH { + ret.push(cargo_target_dir_wasm_path.clone()); + }; + ret +} + +/// Reads a given compiled contract file based on path +pub fn read_wasm_file>(contract_file: T) -> Bytes { + let mut attempted_paths = vec![]; + + if contract_file.as_ref().is_relative() { + // Find first path to a given file found in a list of paths + for wasm_path in WASM_PATHS.iter() { + let mut filename = wasm_path.clone(); + filename.push(contract_file.as_ref()); + if let Ok(wasm_bytes) = fs::read(&filename) { + return Bytes::from(wasm_bytes); + } + attempted_paths.push(filename); + } + } + // Try just opening in case the arg is a valid path relative to current working dir, or is a + // valid absolute path. + if let Ok(wasm_bytes) = fs::read(contract_file.as_ref()) { + return Bytes::from(wasm_bytes); + } + attempted_paths.push(contract_file.as_ref().to_owned()); + + let mut error_msg = + "\nFailed to open compiled Wasm file. Tried the following locations:\n".to_string(); + for attempted_path in attempted_paths { + error_msg = format!("{} - {}\n", error_msg, attempted_path.display()); + } + + panic!("{}\n", error_msg); +} + +/// Returns an [`GenesisConfig`]. +pub fn create_genesis_config(accounts: Vec) -> GenesisConfig { + let wasm_config = *DEFAULT_WASM_CONFIG; + let system_config = *DEFAULT_SYSTEM_CONFIG; + let validator_slots = DEFAULT_VALIDATOR_SLOTS; + let auction_delay = DEFAULT_AUCTION_DELAY; + let locked_funds_period_millis = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE; + let unbonding_delay = DEFAULT_UNBONDING_DELAY; + let genesis_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + let storage_costs = *DEFAULT_STORAGE_COSTS; + + GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_wasm_config(wasm_config) + .with_system_config(system_config) + .with_validator_slots(validator_slots) + .with_auction_delay(auction_delay) + .with_locked_funds_period_millis(locked_funds_period_millis) + .with_round_seigniorage_rate(round_seigniorage_rate) + .with_unbonding_delay(unbonding_delay) + .with_genesis_timestamp_millis(genesis_timestamp_millis) + .with_storage_costs(storage_costs) + .build() +} + +/// Returns an [`GenesisConfig`] using a given chainspec config. +pub fn create_genesis_config_with_chainspec( + accounts: Vec, + chainspec: ChainspecConfig, +) -> GenesisConfig { + GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_wasm_config(chainspec.wasm_config) + .with_system_config(chainspec.system_costs_config) + .with_validator_slots(chainspec.core_config.validator_slots) + .with_auction_delay(chainspec.core_config.auction_delay) + .with_locked_funds_period_millis(chainspec.core_config.locked_funds_period.millis()) + .with_round_seigniorage_rate(chainspec.core_config.round_seigniorage_rate) + .with_unbonding_delay(chainspec.core_config.unbonding_delay) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .with_storage_costs(chainspec.storage_costs) + .with_enable_addressable_entity(chainspec.core_config.enable_addressable_entity) + .build() +} + +/// Returns a [`GenesisRequest`]. +pub fn create_run_genesis_request(accounts: Vec) -> GenesisRequest { + let config = create_genesis_config(accounts); + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) +} + +/// Returns a [`GenesisRequest`] using a given chainspec config. +pub fn create_run_genesis_request_with_chainspec_config( + accounts: Vec, + chainspec_config: ChainspecConfig, +) -> GenesisRequest { + let config = create_genesis_config_with_chainspec(accounts, chainspec_config); + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) +} + +/// Returns an error if the `ExecutionResult` has an error. +/// +/// # Panics +/// * Panics if the result does not have a precondition failure. +/// * Panics if result.as_error() is `None`. +pub fn get_precondition_failure(exec_result: &WasmV1Result) -> &Error { + assert!( + exec_result.has_precondition_failure(), + "should be a precondition failure" + ); + exec_result.error().expect("should have an error") +} diff --git a/execution_engine_testing/test_support/src/value.rs b/execution_engine_testing/test_support/src/value.rs deleted file mode 100644 index 05b6d8e026..0000000000 --- a/execution_engine_testing/test_support/src/value.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::convert::{TryFrom, TryInto}; - -use casper_execution_engine::shared::stored_value::StoredValue; -use casper_types::{ - bytesrepr::{FromBytes, ToBytes}, - CLTyped, CLValue, -}; - -use crate::{Account, Result}; - -/// A value stored under a given key on the network. -#[derive(Eq, PartialEq, Clone, Debug)] -pub struct Value { - inner: StoredValue, -} - -impl Value { - pub(crate) fn new(stored_value: StoredValue) -> Self { - Value { - inner: stored_value, - } - } - - /// Constructs a `Value` from `t`. - pub fn from_t(t: T) -> Result { - let cl_value = CLValue::from_t(t)?; - let inner = StoredValue::CLValue(cl_value); - Ok(Value { inner }) - } - - /// Consumes and converts `self` back into its underlying type. - pub fn into_t(self) -> Result { - let cl_value = CLValue::try_from(self.inner)?; - Ok(cl_value.into_t()?) - } - - /// Consumes and converts `self` into an `Account` or errors. - pub fn into_account(self) -> Result { - self.inner.try_into() - } -} diff --git a/execution_engine_testing/test_support/src/wasm_test_builder.rs b/execution_engine_testing/test_support/src/wasm_test_builder.rs new file mode 100644 index 0000000000..dfdda887db --- /dev/null +++ b/execution_engine_testing/test_support/src/wasm_test_builder.rs @@ -0,0 +1,2137 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + convert::TryFrom, + ffi::OsStr, + fs, + iter::{self, FromIterator}, + ops::Deref, + path::{Path, PathBuf}, + rc::Rc, + sync::Arc, +}; + +use filesize::PathExt; +use lmdb::DatabaseFlags; +use num_rational::Ratio; +use num_traits::{CheckedMul, Zero}; +use tempfile::TempDir; + +use casper_execution_engine::engine_state::{ + EngineConfig, Error, ExecutionEngineV1, WasmV1Request, WasmV1Result, DEFAULT_MAX_QUERY_DEPTH, +}; +use casper_storage::{ + data_access_layer::{ + balance::BalanceHandling, AuctionMethod, BalanceIdentifier, BalanceRequest, BalanceResult, + BiddingRequest, BiddingResult, BidsRequest, BlockRewardsRequest, BlockRewardsResult, + BlockStore, DataAccessLayer, EraValidatorsRequest, EraValidatorsResult, FeeRequest, + FeeResult, FlushRequest, FlushResult, GenesisRequest, GenesisResult, HandleFeeMode, + HandleFeeRequest, HandleFeeResult, MessageTopicsRequest, MessageTopicsResult, + ProofHandling, ProtocolUpgradeRequest, ProtocolUpgradeResult, PruneRequest, PruneResult, + QueryRequest, QueryResult, RoundSeigniorageRateRequest, RoundSeigniorageRateResult, + StepRequest, StepResult, SystemEntityRegistryPayload, SystemEntityRegistryRequest, + SystemEntityRegistryResult, SystemEntityRegistrySelector, TotalSupplyRequest, + TotalSupplyResult, TransferRequest, TrieRequest, + }, + global_state::{ + state::{ + lmdb::LmdbGlobalState, scratch::ScratchGlobalState, CommitProvider, ScratchProvider, + StateProvider, StateReader, + }, + transaction_source::lmdb::LmdbEnvironment, + trie::Trie, + trie_store::lmdb::LmdbTrieStore, + }, + system::runtime_native::{Config as NativeRuntimeConfig, TransferConfig}, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyExt}, + AddressGenerator, +}; + +use casper_types::{ + account::AccountHash, + addressable_entity::{EntityKindTag, MessageTopics, NamedKeyAddr}, + bytesrepr::{self, FromBytes}, + contracts::{ContractHash, NamedKeys}, + execution::Effects, + global_state::TrieMerkleProof, + runtime_args, + system::{ + auction::{ + BidAddrTag, BidKind, EraValidators, Unbond, UnbondKind, UnbondingPurse, ValidatorBid, + ValidatorWeights, WithdrawPurses, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_EVICTED_VALIDATORS, + AUCTION_DELAY_KEY, ERA_ID_KEY, METHOD_RUN_AUCTION, UNBONDING_DELAY_KEY, + }, + mint::{MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY}, + AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT, + }, + AccessRights, Account, AddressableEntity, AddressableEntityHash, AuctionCosts, BlockGlobalAddr, + BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, CLTyped, CLValue, Contract, Digest, + EntityAddr, EntryPoints, EraId, FeeHandling, Gas, HandlePaymentCosts, HoldBalanceHandling, + InitiatorAddr, Key, KeyTag, MintCosts, Motes, Package, PackageHash, Phase, + ProtocolUpgradeConfig, ProtocolVersion, PublicKey, RefundHandling, StoredValue, + SystemHashRegistry, TransactionHash, TransactionV1Hash, URef, OS_PAGE_SIZE, U512, +}; + +use crate::{ + chainspec_config::{ChainspecConfig, CHAINSPEC_SYMLINK}, + ExecuteRequest, ExecuteRequestBuilder, StepRequestBuilder, DEFAULT_GAS_PRICE, + DEFAULT_PROPOSER_ADDR, DEFAULT_PROTOCOL_VERSION, SYSTEM_ADDR, +}; + +/// LMDB initial map size is calculated based on DEFAULT_LMDB_PAGES and systems page size. +pub(crate) const DEFAULT_LMDB_PAGES: usize = 256_000_000; + +/// LMDB max readers +/// +/// The default value is chosen to be the same as the node itself. +pub(crate) const DEFAULT_MAX_READERS: u32 = 512; + +/// This is appended to the data dir path provided to the `LmdbWasmTestBuilder`". +const GLOBAL_STATE_DIR: &str = "global_state"; + +/// A wrapper structure that groups an entity alongside its namedkeys. +#[derive(Debug)] +pub struct EntityWithNamedKeys { + entity: AddressableEntity, + named_keys: NamedKeys, +} + +impl EntityWithNamedKeys { + /// Creates a new instance of an Entity with its NamedKeys. + pub fn new(entity: AddressableEntity, named_keys: NamedKeys) -> Self { + Self { entity, named_keys } + } + + /// Returns a reference to the Entity. + pub fn entity(&self) -> AddressableEntity { + self.entity.clone() + } + + /// Returns a reference to the main purse for the inner entity. + pub fn main_purse(&self) -> URef { + self.entity.main_purse() + } + + /// Returns a reference to the NamedKeys. + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } +} + +/// Builder for simple WASM test +pub struct WasmTestBuilder { + /// Data access layer. + data_access_layer: Arc, + /// [`ExecutionEngineV1`] is wrapped in [`Rc`] to work around a missing [`Clone`] + /// implementation. + execution_engine: Rc, + /// The chainspec. + chainspec: ChainspecConfig, + exec_results: Vec, + upgrade_results: Vec, + prune_results: Vec, + genesis_hash: Option, + /// Post state hash. + post_state_hash: Option, + /// Cached effects after successful runs i.e. `effects[0]` is the collection of effects for + /// first exec call, etc. + effects: Vec, + /// Genesis effects. + genesis_effects: Option, + /// Cached system account. + system_account: Option, + /// Scratch global state used for in-memory execution and commit optimization. + scratch_global_state: Option, + /// Global state dir, for implementations that define one. + global_state_dir: Option, + /// Temporary directory, for implementation that uses one. + temp_dir: Option>, +} + +impl WasmTestBuilder { + /// Commit scratch to global state, and reset the scratch cache. + pub fn write_scratch_to_db(&mut self) -> &mut Self { + let prestate_hash = self.post_state_hash.expect("Should have genesis hash"); + if let Some(scratch) = self.scratch_global_state.take() { + let new_state_root = self + .data_access_layer + .write_scratch_to_db(prestate_hash, scratch) + .unwrap(); + self.post_state_hash = Some(new_state_root); + } + self + } + /// Flushes the LMDB environment to disk. + pub fn flush_environment(&self) { + let request = FlushRequest::new(); + if let FlushResult::Failure(gse) = self.data_access_layer.flush(request) { + panic!("flush failed: {:?}", gse) + } + } + + /// Execute and commit transforms from an ExecuteRequest into a scratch global state. + /// You MUST call write_scratch_to_lmdb to flush these changes to LmdbGlobalState. + #[allow(deprecated)] + pub fn scratch_exec_and_commit(&mut self, mut exec_request: WasmV1Request) -> &mut Self { + if self.scratch_global_state.is_none() { + self.scratch_global_state = Some(self.data_access_layer.get_scratch_global_state()); + } + + let cached_state = self + .scratch_global_state + .as_ref() + .expect("scratch state should exist"); + + let state_hash = self.post_state_hash.expect("expected post_state_hash"); + exec_request.block_info.with_state_hash(state_hash); + + // First execute the request against our scratch global state. + let execution_result = self.execution_engine.execute(cached_state, exec_request); + let _post_state_hash = cached_state + .commit_effects( + self.post_state_hash.expect("requires a post_state_hash"), + execution_result.effects().clone(), + ) + .expect("should commit"); + + // Save transforms and execution results for WasmTestBuilder. + self.effects.push(execution_result.effects().clone()); + self.exec_results.push(execution_result); + self + } +} + +impl Clone for WasmTestBuilder { + fn clone(&self) -> Self { + WasmTestBuilder { + data_access_layer: Arc::clone(&self.data_access_layer), + execution_engine: Rc::clone(&self.execution_engine), + chainspec: self.chainspec.clone(), + exec_results: self.exec_results.clone(), + upgrade_results: self.upgrade_results.clone(), + prune_results: self.prune_results.clone(), + genesis_hash: self.genesis_hash, + post_state_hash: self.post_state_hash, + effects: self.effects.clone(), + genesis_effects: self.genesis_effects.clone(), + system_account: self.system_account.clone(), + scratch_global_state: None, + global_state_dir: self.global_state_dir.clone(), + temp_dir: self.temp_dir.clone(), + } + } +} + +#[derive(Copy, Clone, Debug)] +enum GlobalStateMode { + /// Creates empty lmdb database with specified flags + Create(DatabaseFlags), + /// Opens existing database + Open(Digest), +} + +impl GlobalStateMode { + fn post_state_hash(self) -> Option { + match self { + GlobalStateMode::Create(_) => None, + GlobalStateMode::Open(post_state_hash) => Some(post_state_hash), + } + } +} + +/// Wasm test builder where state is held in LMDB. +pub type LmdbWasmTestBuilder = WasmTestBuilder>; + +impl Default for LmdbWasmTestBuilder { + fn default() -> Self { + Self::new_temporary_with_chainspec(&*CHAINSPEC_SYMLINK) + } +} + +impl LmdbWasmTestBuilder { + /// Upgrades the execution engine using the scratch trie. + pub fn upgrade_using_scratch( + &mut self, + upgrade_config: &mut ProtocolUpgradeConfig, + ) -> &mut Self { + let pre_state_hash = self.post_state_hash.expect("should have state hash"); + upgrade_config.with_pre_state_hash(pre_state_hash); + + let scratch_state = self.data_access_layer.get_scratch_global_state(); + let pre_state_hash = upgrade_config.pre_state_hash(); + let req = ProtocolUpgradeRequest::new(upgrade_config.clone()); + let result = { + let result = scratch_state.protocol_upgrade(req); + if let ProtocolUpgradeResult::Success { effects, .. } = result { + let post_state_hash = self + .data_access_layer + .write_scratch_to_db(pre_state_hash, scratch_state) + .unwrap(); + self.post_state_hash = Some(post_state_hash); + let mut engine_config = self.chainspec.engine_config(); + let new_protocol_version = upgrade_config.new_protocol_version(); + engine_config.set_protocol_version(new_protocol_version); + self.execution_engine = Rc::new(ExecutionEngineV1::new(engine_config)); + ProtocolUpgradeResult::Success { + post_state_hash, + effects, + } + } else { + result + } + }; + self.upgrade_results.push(result); + self + } + + /// Returns an [`LmdbWasmTestBuilder`] with configuration. + pub fn new_with_config + ?Sized>( + data_dir: &T, + chainspec: ChainspecConfig, + ) -> Self { + let _ = env_logger::try_init(); + let page_size = *OS_PAGE_SIZE; + let global_state_dir = Self::global_state_dir(data_dir); + Self::create_global_state_dir(&global_state_dir); + let environment = Arc::new( + LmdbEnvironment::new( + &global_state_dir, + page_size * DEFAULT_LMDB_PAGES, + DEFAULT_MAX_READERS, + true, + ) + .expect("should create LmdbEnvironment"), + ); + let trie_store = Arc::new( + LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()) + .expect("should create LmdbTrieStore"), + ); + + let max_query_depth = DEFAULT_MAX_QUERY_DEPTH; + let enable_addressable_entity = chainspec.core_config.enable_addressable_entity; + let global_state = LmdbGlobalState::empty( + environment, + trie_store, + max_query_depth, + enable_addressable_entity, + ) + .expect("should create LmdbGlobalState"); + + let data_access_layer = Arc::new(DataAccessLayer { + block_store: BlockStore::new(), + state: global_state, + max_query_depth, + enable_addressable_entity, + }); + + let engine_config = chainspec.engine_config(); + let engine_state = ExecutionEngineV1::new(engine_config); + + WasmTestBuilder { + data_access_layer, + execution_engine: Rc::new(engine_state), + chainspec, + exec_results: Vec::new(), + upgrade_results: Vec::new(), + prune_results: Vec::new(), + genesis_hash: None, + post_state_hash: None, + effects: Vec::new(), + system_account: None, + genesis_effects: None, + scratch_global_state: None, + global_state_dir: Some(global_state_dir), + temp_dir: None, + } + } + + fn create_or_open>( + global_state_dir: T, + chainspec: ChainspecConfig, + protocol_version: ProtocolVersion, + mode: GlobalStateMode, + ) -> Self { + let _ = env_logger::try_init(); + let page_size = *OS_PAGE_SIZE; + + match mode { + GlobalStateMode::Create(_database_flags) => {} + GlobalStateMode::Open(_post_state_hash) => { + Self::create_global_state_dir(&global_state_dir) + } + } + + let environment = LmdbEnvironment::new( + &global_state_dir, + page_size * DEFAULT_LMDB_PAGES, + DEFAULT_MAX_READERS, + true, + ) + .expect("should create LmdbEnvironment"); + + let max_query_depth = DEFAULT_MAX_QUERY_DEPTH; + + let enable_addressable_entity = chainspec.core_config.enable_addressable_entity; + let global_state = match mode { + GlobalStateMode::Create(database_flags) => { + let trie_store = LmdbTrieStore::new(&environment, None, database_flags) + .expect("should open LmdbTrieStore"); + LmdbGlobalState::empty( + Arc::new(environment), + Arc::new(trie_store), + max_query_depth, + enable_addressable_entity, + ) + .expect("should create LmdbGlobalState") + } + GlobalStateMode::Open(post_state_hash) => { + let trie_store = + LmdbTrieStore::open(&environment, None).expect("should open LmdbTrieStore"); + LmdbGlobalState::new( + Arc::new(environment), + Arc::new(trie_store), + post_state_hash, + max_query_depth, + enable_addressable_entity, + ) + } + }; + + let data_access_layer = Arc::new(DataAccessLayer { + block_store: BlockStore::new(), + state: global_state, + max_query_depth, + enable_addressable_entity, + }); + let mut engine_config = chainspec.engine_config(); + engine_config.set_protocol_version(protocol_version); + let engine_state = ExecutionEngineV1::new(engine_config); + + let post_state_hash = mode.post_state_hash(); + + let builder = WasmTestBuilder { + data_access_layer, + execution_engine: Rc::new(engine_state), + chainspec, + exec_results: Vec::new(), + upgrade_results: Vec::new(), + prune_results: Vec::new(), + genesis_hash: None, + post_state_hash, + effects: Vec::new(), + genesis_effects: None, + system_account: None, + scratch_global_state: None, + global_state_dir: Some(global_state_dir.as_ref().to_path_buf()), + temp_dir: None, + }; + + builder + } + + /// Returns an [`LmdbWasmTestBuilder`] with configuration and values from + /// a given chainspec. + pub fn new_with_chainspec + ?Sized, P: AsRef>( + data_dir: &T, + chainspec_path: P, + ) -> Self { + let chainspec_config = ChainspecConfig::from_chainspec_path(chainspec_path) + .expect("must build chainspec configuration"); + + Self::new_with_config(data_dir, chainspec_config) + } + + /// Returns an [`LmdbWasmTestBuilder`] with configuration and values from + /// the production chainspec. + pub fn new_with_production_chainspec + ?Sized>(data_dir: &T) -> Self { + Self::new_with_chainspec(data_dir, &*CHAINSPEC_SYMLINK) + } + + /// Returns a new [`LmdbWasmTestBuilder`]. + pub fn new + ?Sized>(data_dir: &T) -> Self { + Self::new_with_config(data_dir, Default::default()) + } + + /// Creates a new instance of builder using the supplied configurations, opening wrapped LMDBs + /// (e.g. in the Trie and Data stores) rather than creating them. + pub fn open + ?Sized>( + data_dir: &T, + chainspec: ChainspecConfig, + protocol_version: ProtocolVersion, + post_state_hash: Digest, + ) -> Self { + let global_state_path = Self::global_state_dir(data_dir); + Self::open_raw( + global_state_path, + chainspec, + protocol_version, + post_state_hash, + ) + } + + /// Creates a new instance of builder using the supplied configurations, opening wrapped LMDBs + /// (e.g. in the Trie and Data stores) rather than creating them. + /// Differs from `open` in that it doesn't append `GLOBAL_STATE_DIR` to the supplied path. + pub fn open_raw>( + global_state_dir: T, + chainspec: ChainspecConfig, + protocol_version: ProtocolVersion, + post_state_hash: Digest, + ) -> Self { + Self::create_or_open( + global_state_dir, + chainspec, + protocol_version, + GlobalStateMode::Open(post_state_hash), + ) + } + + /// Creates new temporary lmdb builder with an engine config instance. + /// + /// Once [`LmdbWasmTestBuilder`] instance goes out of scope a global state directory will be + /// removed as well. + pub fn new_temporary_with_config(chainspec: ChainspecConfig) -> Self { + let temp_dir = tempfile::tempdir().unwrap(); + + let database_flags = DatabaseFlags::default(); + + let mut builder = Self::create_or_open( + temp_dir.path(), + chainspec, + DEFAULT_PROTOCOL_VERSION, + GlobalStateMode::Create(database_flags), + ); + + builder.temp_dir = Some(Rc::new(temp_dir)); + + builder + } + + /// Creates new temporary lmdb builder with a path to a chainspec to load. + /// + /// Once [`LmdbWasmTestBuilder`] instance goes out of scope a global state directory will be + /// removed as well. + pub fn new_temporary_with_chainspec>(chainspec_path: P) -> Self { + let chainspec = ChainspecConfig::from_chainspec_path(chainspec_path) + .expect("must build chainspec configuration"); + + Self::new_temporary_with_config(chainspec) + } + + fn create_global_state_dir>(global_state_path: T) { + fs::create_dir_all(&global_state_path).unwrap_or_else(|_| { + panic!( + "Expected to create {}", + global_state_path.as_ref().display() + ) + }); + } + + fn global_state_dir + ?Sized>(data_dir: &T) -> PathBuf { + let mut path = PathBuf::from(data_dir); + path.push(GLOBAL_STATE_DIR); + path + } + + /// Returns the file size on disk of the backing lmdb file behind LmdbGlobalState. + pub fn lmdb_on_disk_size(&self) -> Option { + if let Some(path) = self.global_state_dir.as_ref() { + let mut path = path.clone(); + path.push("data.lmdb"); + return path.as_path().size_on_disk().ok(); + } + None + } + + /// run step against scratch global state. + pub fn step_with_scratch(&mut self, step_request: StepRequest) -> &mut Self { + if self.scratch_global_state.is_none() { + self.scratch_global_state = Some(self.data_access_layer.get_scratch_global_state()); + } + + let cached_state = self + .scratch_global_state + .as_ref() + .expect("scratch state should exist"); + + match cached_state.step(step_request) { + StepResult::RootNotFound => { + panic!("Root not found") + } + StepResult::Failure(err) => { + panic!("{:?}", err) + } + StepResult::Success { .. } => {} + } + self + } + + /// Runs a [`TransferRequest`] and commits the resulting effects. + pub fn transfer_and_commit(&mut self, mut transfer_request: TransferRequest) -> &mut Self { + let pre_state_hash = self.post_state_hash.expect("expected post_state_hash"); + transfer_request.set_state_hash_and_config(pre_state_hash, self.native_runtime_config()); + let transfer_result = self.data_access_layer.transfer(transfer_request); + let gas = Gas::new(self.chainspec.system_costs_config.mint_costs().transfer); + let execution_result = WasmV1Result::from_transfer_result(transfer_result, gas) + .expect("transfer result should map to wasm v1 result"); + let effects = execution_result.effects().clone(); + self.effects.push(effects.clone()); + self.exec_results.push(execution_result); + self.commit_transforms(pre_state_hash, effects); + self + } +} + +impl WasmTestBuilder +where + S: StateProvider + CommitProvider, +{ + /// Takes a [`GenesisRequest`], executes the request and returns Self. + pub fn run_genesis(&mut self, request: GenesisRequest) -> &mut Self { + match self.data_access_layer.genesis(request) { + GenesisResult::Fatal(msg) => { + panic!("{}", msg); + } + GenesisResult::Failure(err) => { + panic!("{:?}", err); + } + GenesisResult::Success { + post_state_hash, + effects, + } => { + self.genesis_hash = Some(post_state_hash); + self.post_state_hash = Some(post_state_hash); + self.system_account = self.get_entity_by_account_hash(*SYSTEM_ADDR); + self.genesis_effects = Some(effects); + } + } + self + } + + fn query_system_entity_registry( + &self, + post_state_hash: Option, + ) -> Option { + match self.query(post_state_hash, Key::SystemEntityRegistry, &[]) { + Ok(StoredValue::CLValue(cl_registry)) => { + let system_entity_registry = + CLValue::into_t::(cl_registry).unwrap(); + Some(system_entity_registry) + } + Ok(_) => None, + Err(_) => None, + } + } + + /// Queries state for a [`StoredValue`]. + pub fn query( + &self, + maybe_post_state: Option, + base_key: Key, + path: &[String], + ) -> Result { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + + let query_request = QueryRequest::new(post_state, base_key, path.to_vec()); + + let query_result = self.data_access_layer.query(query_request); + if let QueryResult::Success { value, .. } = query_result { + return Ok(value.deref().clone()); + } + + Err(format!("{:?}", query_result)) + } + + /// Retrieves the message topics for the given hash addr. + pub fn message_topics( + &self, + maybe_post_state: Option, + entity_addr: EntityAddr, + ) -> Result { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + + let request = MessageTopicsRequest::new(post_state, entity_addr); + let result = self.data_access_layer.message_topics(request); + if let MessageTopicsResult::Success { message_topics } = result { + return Ok(message_topics); + } + + Err(format!("{:?}", result)) + } + + /// Query a named key in global state by account hash. + pub fn query_named_key_by_account_hash( + &self, + maybe_post_state: Option, + account_hash: AccountHash, + name: &str, + ) -> Result { + let entity_addr = self + .get_entity_hash_by_account_hash(account_hash) + .map(|entity_hash| EntityAddr::new_account(entity_hash.value())) + .expect("must get EntityAddr"); + self.query_named_key(maybe_post_state, entity_addr, name) + } + + /// Query a named key. + pub fn query_named_key( + &self, + maybe_post_state: Option, + entity_addr: EntityAddr, + name: &str, + ) -> Result { + let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, name.to_string()) + .expect("could not create named key address"); + let empty_path: Vec = vec![]; + let maybe_stored_value = self + .query(maybe_post_state, Key::NamedKey(named_key_addr), &empty_path) + .expect("no stored value found"); + let key = maybe_stored_value + .as_cl_value() + .map(|cl_val| CLValue::into_t::(cl_val.clone())) + .expect("must be cl_value") + .expect("must get key"); + self.query(maybe_post_state, key, &[]) + } + + /// Queries state for a dictionary item. + pub fn query_dictionary_item( + &self, + maybe_post_state: Option, + dictionary_seed_uref: URef, + dictionary_item_key: &str, + ) -> Result { + let dictionary_address = + Key::dictionary(dictionary_seed_uref, dictionary_item_key.as_bytes()); + let empty_path: Vec = vec![]; + self.query(maybe_post_state, dictionary_address, &empty_path) + } + + /// Queries for a [`StoredValue`] and returns the [`StoredValue`] and a Merkle proof. + pub fn query_with_proof( + &self, + maybe_post_state: Option, + base_key: Key, + path: &[String], + ) -> Result<(StoredValue, Vec>), String> { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + + let path_vec: Vec = path.to_vec(); + + let query_request = QueryRequest::new(post_state, base_key, path_vec); + + let query_result = self.data_access_layer.query(query_request); + + if let QueryResult::Success { value, proofs } = query_result { + return Ok((value.deref().clone(), proofs)); + } + + panic! {"{:?}", query_result}; + } + + /// Queries for the total supply of token. + /// # Panics + /// Panics if the total supply can't be found. + pub fn total_supply( + &self, + protocol_version: ProtocolVersion, + maybe_post_state: Option, + ) -> U512 { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + let result = self + .data_access_layer + .total_supply(TotalSupplyRequest::new(post_state, protocol_version)); + if let TotalSupplyResult::Success { total_supply } = result { + total_supply + } else { + panic!("total supply should exist at every root hash {:?}", result); + } + } + + /// Queries for the round seigniorage rate. + /// # Panics + /// Panics if the total supply or seigniorage rate can't be found. + pub fn round_seigniorage_rate( + &mut self, + maybe_post_state: Option, + protocol_version: ProtocolVersion, + ) -> Ratio { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + let result = + self.data_access_layer + .round_seigniorage_rate(RoundSeigniorageRateRequest::new( + post_state, + protocol_version, + )); + if let RoundSeigniorageRateResult::Success { rate } = result { + rate + } else { + panic!( + "round seigniorage rate should exist at every root hash {:?}", + result + ); + } + } + + /// Queries for the base round reward. + /// # Panics + /// Panics if the total supply or seigniorage rate can't be found. + pub fn base_round_reward( + &mut self, + maybe_post_state: Option, + protocol_version: ProtocolVersion, + ) -> U512 { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + let total_supply = self.total_supply(protocol_version, Some(post_state)); + let rate = self.round_seigniorage_rate(Some(post_state), protocol_version); + rate.checked_mul(&Ratio::from(total_supply)) + .map(|ratio| ratio.to_integer()) + .expect("must get base round reward") + } + + /// Direct auction interactions for stake management. + pub fn bidding( + &mut self, + maybe_post_state: Option, + protocol_version: ProtocolVersion, + initiator: InitiatorAddr, + auction_method: AuctionMethod, + ) -> BiddingResult { + let post_state = maybe_post_state + .or(self.post_state_hash) + .expect("builder must have a post-state hash"); + + let transaction_hash = TransactionHash::V1(TransactionV1Hash::default()); + let authorization_keys = BTreeSet::from_iter(iter::once(initiator.account_hash())); + + let config = &self.chainspec; + let fee_handling = config.core_config.fee_handling; + let refund_handling = config.core_config.refund_handling; + let vesting_schedule_period_millis = config.core_config.vesting_schedule_period.millis(); + let allow_auction_bids = config.core_config.allow_auction_bids; + let compute_rewards = config.core_config.compute_rewards; + let max_delegators_per_validator = config.core_config.max_delegators_per_validator; + let minimum_bid_amount = config.core_config.minimum_bid_amount; + let minimum_delegation_amount = config.core_config.minimum_delegation_amount; + let balance_hold_interval = config.core_config.gas_hold_interval.millis(); + let include_credits = config.core_config.fee_handling == FeeHandling::NoFee; + let credit_cap = Ratio::new_raw( + U512::from(*config.core_config.validator_credit_cap.numer()), + U512::from(*config.core_config.validator_credit_cap.denom()), + ); + let enable_addressable_entity = config.core_config.enable_addressable_entity; + let native_runtime_config = casper_storage::system::runtime_native::Config::new( + TransferConfig::Unadministered, + fee_handling, + refund_handling, + vesting_schedule_period_millis, + allow_auction_bids, + compute_rewards, + max_delegators_per_validator, + minimum_bid_amount, + minimum_delegation_amount, + balance_hold_interval, + include_credits, + credit_cap, + enable_addressable_entity, + config.system_costs_config.mint_costs().transfer, + ); + + let bidding_req = BiddingRequest::new( + native_runtime_config, + post_state, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + auction_method, + ); + self.data_access_layer().bidding(bidding_req) + } + + /// Runs an optional custom payment [`WasmV1Request`] and a session `WasmV1Request`. + /// + /// If the custom payment is `Some` and its execution fails, the session request is not + /// attempted. + pub fn exec_wasm_v1(&mut self, mut request: WasmV1Request) -> &mut Self { + let state_hash = self.post_state_hash.expect("expected post_state_hash"); + request.block_info.with_state_hash(state_hash); + let result = self + .execution_engine + .execute(self.data_access_layer.as_ref(), request); + let effects = result.effects().clone(); + self.exec_results.push(result); + self.effects.push(effects); + self + } + + /// Runs an [`ExecuteRequest`]. + pub fn exec(&mut self, mut exec_request: ExecuteRequest) -> &mut Self { + let mut effects = Effects::new(); + if let Some(mut payment) = exec_request.custom_payment { + let state_hash = self.post_state_hash.expect("expected post_state_hash"); + payment.block_info.with_state_hash(state_hash); + let payment_result = self + .execution_engine + .execute(self.data_access_layer.as_ref(), payment); + // If executing payment code failed, record this and exit without attempting session + // execution. + effects = payment_result.effects().clone(); + let payment_failed = payment_result.error().is_some(); + self.exec_results.push(payment_result); + if payment_failed { + self.effects.push(effects); + return self; + } + } + let state_hash = self.post_state_hash.expect("expected post_state_hash"); + exec_request.session.block_info.with_state_hash(state_hash); + + let session_result = self + .execution_engine + .execute(self.data_access_layer.as_ref(), exec_request.session); + // Cache transformations + effects.append(session_result.effects().clone()); + self.effects.push(effects); + self.exec_results.push(session_result); + self + } + + /// Commit effects of previous exec call on the latest post-state hash. + pub fn commit(&mut self) -> &mut Self { + let prestate_hash = self.post_state_hash.expect("Should have genesis hash"); + + let effects = self.effects.last().cloned().unwrap_or_default(); + + self.commit_transforms(prestate_hash, effects) + } + + /// Runs a commit request, expects a successful response, and + /// overwrites existing cached post state hash with a new one. + pub fn commit_transforms(&mut self, pre_state_hash: Digest, effects: Effects) -> &mut Self { + let post_state_hash = self + .data_access_layer + .commit_effects(pre_state_hash, effects) + .expect("should commit"); + self.post_state_hash = Some(post_state_hash); + self + } + + /// Upgrades the execution engine. + pub fn upgrade(&mut self, upgrade_config: &mut ProtocolUpgradeConfig) -> &mut Self { + let pre_state_hash = self.post_state_hash.expect("should have state hash"); + upgrade_config.with_pre_state_hash(pre_state_hash); + + let req = ProtocolUpgradeRequest::new(upgrade_config.clone()); + + let result = self.data_access_layer.protocol_upgrade(req); + + if let ProtocolUpgradeResult::Success { + post_state_hash, .. + } = result + { + let mut engine_config = self.chainspec.engine_config(); + engine_config.set_protocol_version(upgrade_config.new_protocol_version()); + self.execution_engine = Rc::new(ExecutionEngineV1::new(engine_config)); + self.post_state_hash = Some(post_state_hash); + } + + self.upgrade_results.push(result); + self + } + + /// Executes a request to call the system auction contract. + /// This ONLY executes the run_auction logic of the auction. If you are testing + /// specifically that function, this is sufficient. However, to match the standard + /// end of era auction behavior the comprehensive `step` function should be used instead. + pub fn run_auction( + &mut self, + era_end_timestamp_millis: u64, + evicted_validators: Vec, + ) -> &mut Self { + let auction = self.get_auction_contract_hash(); + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + *SYSTEM_ADDR, + auction, + METHOD_RUN_AUCTION, + runtime_args! { + ARG_ERA_END_TIMESTAMP_MILLIS => era_end_timestamp_millis, + ARG_EVICTED_VALIDATORS => evicted_validators, + }, + ) + .build(); + self.exec(exec_request).expect_success().commit() + } + + /// Increments engine state at end of era (rewards, auction, unbond, etc.). + pub fn step(&mut self, step_request: StepRequest) -> StepResult { + let step_result = self.data_access_layer.step(step_request); + + if let StepResult::Success { + post_state_hash, .. + } = step_result + { + self.post_state_hash = Some(post_state_hash); + } + + step_result + } + + fn native_runtime_config(&self) -> NativeRuntimeConfig { + let administrators: BTreeSet = self + .chainspec + .core_config + .administrators + .iter() + .map(|x| x.to_account_hash()) + .collect(); + let allow_unrestricted = self.chainspec.core_config.allow_unrestricted_transfers; + let transfer_config = TransferConfig::new(administrators, allow_unrestricted); + let include_credits = self.chainspec.core_config.fee_handling == FeeHandling::NoFee; + let credit_cap = Ratio::new_raw( + U512::from(*self.chainspec.core_config.validator_credit_cap.numer()), + U512::from(*self.chainspec.core_config.validator_credit_cap.denom()), + ); + NativeRuntimeConfig::new( + transfer_config, + self.chainspec.core_config.fee_handling, + self.chainspec.core_config.refund_handling, + self.chainspec.core_config.vesting_schedule_period.millis(), + self.chainspec.core_config.allow_auction_bids, + self.chainspec.core_config.compute_rewards, + self.chainspec.core_config.max_delegators_per_validator, + self.chainspec.core_config.minimum_bid_amount, + self.chainspec.core_config.minimum_delegation_amount, + self.chainspec.core_config.gas_hold_interval.millis(), + include_credits, + credit_cap, + self.chainspec.core_config.enable_addressable_entity, + self.chainspec.system_costs_config.mint_costs().transfer, + ) + } + + /// Distribute fees. + pub fn distribute_fees( + &mut self, + pre_state_hash: Option, + protocol_version: ProtocolVersion, + block_time: u64, + ) -> FeeResult { + let native_runtime_config = self.native_runtime_config(); + + let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap(); + let fee_req = FeeRequest::new( + native_runtime_config, + pre_state_hash, + protocol_version, + block_time.into(), + ); + let fee_result = self.data_access_layer.distribute_fees(fee_req); + + if let FeeResult::Success { + post_state_hash, .. + } = fee_result + { + self.post_state_hash = Some(post_state_hash); + } + + fee_result + } + + /// Distributes the rewards. + pub fn distribute( + &mut self, + pre_state_hash: Option, + protocol_version: ProtocolVersion, + rewards: BTreeMap>, + block_time: u64, + ) -> BlockRewardsResult { + let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap(); + let native_runtime_config = self.native_runtime_config(); + let distribute_req = BlockRewardsRequest::new( + native_runtime_config, + pre_state_hash, + protocol_version, + BlockTime::new(block_time), + rewards, + ); + let distribute_block_rewards_result = self + .data_access_layer + .distribute_block_rewards(distribute_req); + + if let BlockRewardsResult::Success { + post_state_hash, .. + } = distribute_block_rewards_result + { + self.post_state_hash = Some(post_state_hash); + } + + distribute_block_rewards_result + } + + /// Finalizes payment for a transaction + pub fn handle_fee( + &mut self, + pre_state_hash: Option, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + handle_fee_mode: HandleFeeMode, + ) -> HandleFeeResult { + let pre_state_hash = pre_state_hash.or(self.post_state_hash).unwrap(); + let native_runtime_config = self.native_runtime_config(); + let handle_fee_request = HandleFeeRequest::new( + native_runtime_config, + pre_state_hash, + protocol_version, + transaction_hash, + handle_fee_mode, + ); + let handle_fee_result = self.data_access_layer.handle_fee(handle_fee_request); + if let HandleFeeResult::Success { effects, .. } = &handle_fee_result { + self.commit_transforms(pre_state_hash, effects.clone()); + } + + handle_fee_result + } + + /// Expects a successful run + #[track_caller] + pub fn expect_success(&mut self) -> &mut Self { + let exec_result = self + .get_last_exec_result() + .expect("Expected to be called after exec()"); + if exec_result.error().is_some() { + panic!( + "Expected successful execution result, but instead got: {:#?}", + exec_result, + ); + } + self + } + + /// Expects a failed run + pub fn expect_failure(&mut self) -> &mut Self { + let exec_result = self + .get_last_exec_result() + .expect("Expected to be called after exec()"); + if exec_result.error().is_none() { + panic!( + "Expected failed execution result, but instead got: {:?}", + exec_result, + ); + } + self + } + + /// Returns `true` if the last exec had an error, otherwise returns false. + #[track_caller] + pub fn is_error(&self) -> bool { + self.get_last_exec_result() + .expect("Expected to be called after exec()") + .error() + .is_some() + } + + /// Returns an `engine_state::Error` if the last exec had an error, otherwise `None`. + #[track_caller] + pub fn get_error(&self) -> Option { + self.get_last_exec_result() + .expect("Expected to be called after exec()") + .error() + .cloned() + } + + /// Returns the error message of the last exec. + #[track_caller] + pub fn get_error_message(&self) -> Option { + self.get_last_exec_result() + .expect("Expected to be called after exec()") + .error() + .map(|error| error.to_string()) + } + + /// Gets `Effects` of all previous runs. + #[track_caller] + pub fn get_effects(&self) -> Vec { + self.effects.clone() + } + + /// Gets genesis account (if present) + pub fn get_genesis_account(&self) -> &AddressableEntity { + self.system_account + .as_ref() + .expect("Unable to obtain genesis account. Please run genesis first.") + } + + /// Returns the [`AddressableEntityHash`] of the mint, panics if it can't be found. + pub fn get_mint_contract_hash(&self) -> AddressableEntityHash { + self.get_system_entity_hash(MINT) + .expect("Unable to obtain mint contract. Please run genesis first.") + } + + /// Returns the [`AddressableEntityHash`] of the "handle payment" contract, panics if it can't + /// be found. + pub fn get_handle_payment_contract_hash(&self) -> AddressableEntityHash { + self.get_system_entity_hash(HANDLE_PAYMENT) + .expect("Unable to obtain handle payment contract. Please run genesis first.") + } + + /// Returns the [`AddressableEntityHash`] of the "standard payment" contract, panics if it can't + /// be found. + pub fn get_standard_payment_contract_hash(&self) -> AddressableEntityHash { + self.get_system_entity_hash(STANDARD_PAYMENT) + .expect("Unable to obtain standard payment contract. Please run genesis first.") + } + + fn get_system_entity_hash(&self, contract_name: &str) -> Option { + self.query_system_entity_registry(self.post_state_hash)? + .get(contract_name) + .map(|hash| AddressableEntityHash::new(*hash)) + } + + /// Returns the [`AddressableEntityHash`] of the "auction" contract, panics if it can't be + /// found. + pub fn get_auction_contract_hash(&self) -> AddressableEntityHash { + self.get_system_entity_hash(AUCTION) + .expect("Unable to obtain auction contract. Please run genesis first.") + } + + /// Returns genesis effects, panics if there aren't any. + pub fn get_genesis_effects(&self) -> &Effects { + self.genesis_effects + .as_ref() + .expect("should have genesis transforms") + } + + /// Returns the genesis hash, panics if it can't be found. + pub fn get_genesis_hash(&self) -> Digest { + self.genesis_hash + .expect("Genesis hash should be present. Should be called after run_genesis.") + } + + /// Returns the post state hash, panics if it can't be found. + pub fn get_post_state_hash(&self) -> Digest { + self.post_state_hash.expect("Should have post-state hash.") + } + + /// The chainspec configured settings for this builder. + pub fn chainspec(&self) -> &ChainspecConfig { + &self.chainspec + } + + /// The current engine config + pub fn engine_config(&self) -> &EngineConfig { + self.execution_engine.config() + } + + /// Update chainspec + pub fn with_chainspec(&mut self, chainspec: ChainspecConfig) -> &mut Self { + self.chainspec = chainspec; + self.execution_engine = Rc::new(ExecutionEngineV1::new(self.chainspec.engine_config())); + self + } + + /// Update the engine config of the builder. + pub fn with_engine_config(&mut self, engine_config: EngineConfig) -> &mut Self { + self.execution_engine = Rc::new(ExecutionEngineV1::new(engine_config)); + self + } + + /// Sets blocktime into global state. + pub fn with_block_time(&mut self, block_time: BlockTime) -> &mut Self { + if let Some(state_root_hash) = self.post_state_hash { + let mut tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .expect("should not error on checkout") + .expect("should checkout tracking copy"); + + let cl_value = CLValue::from_t(block_time.value()).expect("should get cl value"); + tracking_copy.write( + Key::BlockGlobal(BlockGlobalAddr::BlockTime), + StoredValue::CLValue(cl_value), + ); + self.commit_transforms(state_root_hash, tracking_copy.effects()); + } + + self + } + + /// Writes a set of keys and values to global state. + pub fn write_data_and_commit( + &mut self, + data: impl Iterator, + ) -> &mut Self { + if let Some(state_root_hash) = self.post_state_hash { + let mut tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .expect("should not error on checkout") + .expect("should checkout tracking copy"); + + for (key, val) in data { + tracking_copy.write(key, val); + } + + self.commit_transforms(state_root_hash, tracking_copy.effects()); + } + self + } + + /// Sets gas hold config into global state. + pub fn with_gas_hold_config( + &mut self, + handling: HoldBalanceHandling, + interval: u64, + ) -> &mut Self { + if let Some(state_root_hash) = self.post_state_hash { + let mut tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .expect("should not error on checkout") + .expect("should checkout tracking copy"); + + let registry = tracking_copy + .get_system_entity_registry() + .expect("should have registry"); + let mint = *registry.get("mint").expect("should have mint"); + let mint_addr = EntityAddr::new_system(mint); + let named_keys = tracking_copy + .get_named_keys(mint_addr) + .expect("should have named keys"); + + let mut address_generator = + AddressGenerator::new(state_root_hash.as_ref(), Phase::System); + + // gas handling + let uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE); + let stored_value = StoredValue::CLValue( + CLValue::from_t(handling.tag()).expect("should turn handling tag into CLValue"), + ); + + tracking_copy + .upsert_uref_to_named_keys( + mint_addr, + MINT_GAS_HOLD_HANDLING_KEY, + &named_keys, + uref, + stored_value, + ) + .expect("should upsert gas handling"); + + // gas interval + let uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE); + let stored_value = StoredValue::CLValue( + CLValue::from_t(interval).expect("should turn gas interval into CLValue"), + ); + + tracking_copy + .upsert_uref_to_named_keys( + mint_addr, + MINT_GAS_HOLD_INTERVAL_KEY, + &named_keys, + uref, + stored_value, + ) + .expect("should upsert gas interval"); + + self.commit_transforms(state_root_hash, tracking_copy.effects()); + } + self + } + + /// Returns the engine state. + pub fn get_engine_state(&self) -> &ExecutionEngineV1 { + &self.execution_engine + } + + /// Returns the engine state. + pub fn data_access_layer(&self) -> &S { + &self.data_access_layer + } + + /// Returns the last results execs. + pub fn get_last_exec_result(&self) -> Option { + self.exec_results.last().cloned() + } + + /// Returns the owned results of a specific exec. + pub fn get_exec_result_owned(&self, index: usize) -> Option { + self.exec_results.get(index).cloned() + } + + /// Returns a count of exec results. + pub fn get_exec_results_count(&self) -> usize { + self.exec_results.len() + } + + /// Returns a `Result` containing an [`ProtocolUpgradeResult`]. + pub fn get_upgrade_result(&self, index: usize) -> Option<&ProtocolUpgradeResult> { + self.upgrade_results.get(index) + } + + /// Expects upgrade success. + pub fn expect_upgrade_success(&mut self) -> &mut Self { + // Check first result, as only first result is interesting for a simple test + let result = self + .upgrade_results + .last() + .expect("Expected to be called after a system upgrade."); + + assert!(result.is_success(), "Expected success, got: {:?}", result); + + self + } + + /// Expect failure of the protocol upgrade. + pub fn expect_upgrade_failure(&mut self) -> &mut Self { + // Check first result, as only first result is interesting for a simple test + let result = self + .upgrade_results + .last() + .expect("Expected to be called after a system upgrade."); + + assert!(result.is_err(), "Expected Failure got {:?}", result); + + self + } + + /// Returns the `Account` if present. + pub fn get_account(&self, account_hash: AccountHash) -> Option { + let stored_value = self + .query(None, Key::Account(account_hash), &[]) + .expect("must have stored value"); + + stored_value.into_account() + } + + /// Returns the "handle payment" contract, panics if it can't be found. + pub fn get_handle_payment_contract(&self) -> EntityWithNamedKeys { + let hash = self + .get_system_entity_hash(HANDLE_PAYMENT) + .expect("should have handle payment contract"); + + let handle_payment_contract = if self.chainspec.core_config.enable_addressable_entity { + Key::addressable_entity_key(EntityKindTag::System, hash) + } else { + Key::Hash(hash.value()) + }; + let stored_value = self + .query(None, handle_payment_contract, &[]) + .expect("must have stored value"); + match stored_value { + StoredValue::Contract(contract) => { + let named_keys = contract.named_keys().clone(); + let entity = AddressableEntity::from(contract); + EntityWithNamedKeys::new(entity, named_keys) + } + StoredValue::AddressableEntity(entity) => { + let named_keys = self.get_named_keys(EntityAddr::System(hash.value())); + EntityWithNamedKeys::new(entity, named_keys) + } + _ => panic!("unhandled stored value"), + } + } + + /// Returns the balance of a purse, panics if the balance can't be parsed into a `U512`. + pub fn get_purse_balance(&self, purse: URef) -> U512 { + let base_key = Key::Balance(purse.addr()); + self.query(None, base_key, &[]) + .and_then(|v| CLValue::try_from(v).map_err(|error| format!("{:?}", error))) + .and_then(|cl_value| cl_value.into_t().map_err(|error| format!("{:?}", error))) + .expect("should parse balance into a U512") + } + + /// Returns a `BalanceResult` for a purse, panics if the balance can't be found. + pub fn get_purse_balance_result_with_proofs( + &self, + protocol_version: ProtocolVersion, + balance_identifier: BalanceIdentifier, + ) -> BalanceResult { + let balance_handling = BalanceHandling::Available; + let proof_handling = ProofHandling::Proofs; + let state_root_hash: Digest = self.post_state_hash.expect("should have post_state_hash"); + let request = BalanceRequest::new( + state_root_hash, + protocol_version, + balance_identifier, + balance_handling, + proof_handling, + ); + self.data_access_layer.balance(request) + } + + /// Returns a `BalanceResult` for a purse using a `PublicKey`. + pub fn get_public_key_balance_result_with_proofs( + &self, + protocol_version: ProtocolVersion, + public_key: PublicKey, + ) -> BalanceResult { + let state_root_hash: Digest = self.post_state_hash.expect("should have post_state_hash"); + let balance_handling = BalanceHandling::Available; + let proof_handling = ProofHandling::Proofs; + let request = BalanceRequest::from_public_key( + state_root_hash, + protocol_version, + public_key, + balance_handling, + proof_handling, + ); + self.data_access_layer.balance(request) + } + + /// Gets the purse balance of a proposer. + pub fn get_proposer_purse_balance(&self) -> U512 { + let proposer_contract = self + .get_entity_by_account_hash(*DEFAULT_PROPOSER_ADDR) + .expect("proposer account should exist"); + self.get_purse_balance(proposer_contract.main_purse()) + } + + /// Gets the contract hash associated with a given account hash. + pub fn get_entity_hash_by_account_hash( + &self, + account_hash: AccountHash, + ) -> Option { + match self.query(None, Key::Account(account_hash), &[]).ok() { + Some(StoredValue::Account(_)) => Some(AddressableEntityHash::new(account_hash.value())), + Some(StoredValue::CLValue(cl_value)) => { + let entity_key = CLValue::into_t::(cl_value).expect("must have contract hash"); + entity_key.into_entity_hash() + } + Some(_) | None => None, + } + } + + /// Returns an Entity alongside its named keys queried by its account hash. + pub fn get_entity_with_named_keys_by_account_hash( + &self, + account_hash: AccountHash, + ) -> Option { + if let Some(entity) = self.get_entity_by_account_hash(account_hash) { + let entity_named_keys = self.get_named_keys_by_account_hash(account_hash); + return Some(EntityWithNamedKeys::new(entity, entity_named_keys)); + }; + + None + } + + /// Returns an Entity alongside its named keys queried by its entity hash. + pub fn get_entity_with_named_keys_by_entity_hash( + &self, + entity_hash: AddressableEntityHash, + ) -> Option { + match self.get_addressable_entity(entity_hash) { + Some(entity) => { + let named_keys = self.get_named_keys(entity.entity_addr(entity_hash)); + Some(EntityWithNamedKeys::new(entity, named_keys)) + } + None => None, + } + } + + /// Queries for an `Account`. + pub fn get_entity_by_account_hash( + &self, + account_hash: AccountHash, + ) -> Option { + match self.query(None, Key::Account(account_hash), &[]).ok() { + Some(StoredValue::Account(account)) => Some(AddressableEntity::from(account)), + Some(StoredValue::CLValue(cl_value)) => { + let entity_key = CLValue::into_t::(cl_value).expect("must have entity key"); + match self.query(None, entity_key, &[]) { + Ok(StoredValue::AddressableEntity(entity)) => Some(entity), + Ok(_) | Err(_) => None, + } + } + Some(_other_variant) => None, + None => None, + } + } + + /// Queries for an `AddressableEntity` and panics if it can't be found. + pub fn get_expected_addressable_entity_by_account_hash( + &self, + account_hash: AccountHash, + ) -> AddressableEntity { + self.get_entity_by_account_hash(account_hash) + .expect("account to exist") + } + + /// Queries for an addressable entity by `AddressableEntityHash`. + pub fn get_addressable_entity( + &self, + entity_hash: AddressableEntityHash, + ) -> Option { + if !self.chainspec.core_config.enable_addressable_entity { + let contract_hash = ContractHash::new(entity_hash.value()); + return self + .get_contract(contract_hash) + .map(AddressableEntity::from); + } + + let entity_key = Key::addressable_entity_key(EntityKindTag::SmartContract, entity_hash); + + let value: StoredValue = match self.query(None, entity_key, &[]) { + Ok(stored_value) => stored_value, + Err(_) => self + .query( + None, + Key::addressable_entity_key(EntityKindTag::System, entity_hash), + &[], + ) + .ok()?, + }; + + if let StoredValue::AddressableEntity(entity) = value { + Some(entity) + } else { + None + } + } + + /// Retrieve a Contract from global state. + pub fn get_contract(&self, contract_hash: ContractHash) -> Option { + let contract_value: StoredValue = self + .query(None, contract_hash.into(), &[]) + .expect("should have contract value"); + + if let StoredValue::Contract(contract) = contract_value { + Some(contract) + } else { + None + } + } + + /// Queries for byte code by `ByteCodeAddr` and returns an `Option`. + pub fn get_byte_code(&self, byte_code_hash: ByteCodeHash) -> Option { + let byte_code_key = Key::byte_code_key(ByteCodeAddr::new_wasm_addr(byte_code_hash.value())); + + let byte_code_value: StoredValue = self + .query(None, byte_code_key, &[]) + .expect("should have contract value"); + + if let StoredValue::ByteCode(byte_code) = byte_code_value { + Some(byte_code) + } else { + None + } + } + + /// Queries for a contract package by `PackageHash`. + pub fn get_package(&self, package_hash: PackageHash) -> Option { + let key = if self.chainspec.core_config.enable_addressable_entity { + Key::SmartContract(package_hash.value()) + } else { + Key::Hash(package_hash.value()) + }; + let contract_value: StoredValue = self + .query(None, key, &[]) + .expect("should have package value"); + + match contract_value { + StoredValue::ContractPackage(contract_package) => Some(contract_package.into()), + StoredValue::SmartContract(package) => Some(package), + _ => None, + } + } + + /// Returns how much gas execution consumed / used. + pub fn exec_consumed(&self, index: usize) -> Gas { + self.exec_results + .get(index) + .map(WasmV1Result::consumed) + .unwrap() + } + + /// Returns the `Gas` cost of the last exec. + pub fn last_exec_gas_consumed(&self) -> Gas { + self.exec_results + .last() + .map(WasmV1Result::consumed) + .unwrap() + } + + /// Assert that last error is the expected one. + /// + /// NOTE: we're using string-based representation for checking equality + /// as the `Error` type does not implement `Eq` (many of its subvariants don't). + pub fn assert_error(&self, expected_error: Error) { + match self.get_error() { + Some(error) => assert_eq!(format!("{:?}", expected_error), format!("{:?}", error)), + None => panic!("expected error ({:?}) got success", expected_error), + } + } + + /// Gets [`EraValidators`]. + pub fn get_era_validators(&mut self) -> EraValidators { + let state_hash = self.get_post_state_hash(); + let request = EraValidatorsRequest::new(state_hash); + let result = self.data_access_layer.era_validators(request); + + if let EraValidatorsResult::Success { era_validators } = result { + era_validators + } else { + panic!("get era validators should be available"); + } + } + + /// Gets [`ValidatorWeights`] for a given [`EraId`]. + pub fn get_validator_weights(&mut self, era_id: EraId) -> Option { + let mut result = self.get_era_validators(); + result.remove(&era_id) + } + + /// Gets [`Vec`]. + pub fn get_bids(&mut self) -> Vec { + let get_bids_request = BidsRequest::new(self.get_post_state_hash()); + + let get_bids_result = self.data_access_layer.bids(get_bids_request); + + get_bids_result.into_option().unwrap() + } + + /// Returns named keys for an account entity by its account hash. + pub fn get_named_keys_by_account_hash(&self, account_hash: AccountHash) -> NamedKeys { + let entity_hash = self + .get_entity_hash_by_account_hash(account_hash) + .expect("must have entity hash"); + let entity_addr = EntityAddr::new_account(entity_hash.value()); + self.get_named_keys(entity_addr) + } + + /// Returns the named keys for a system contract. + pub fn get_named_keys_for_system_contract( + &self, + system_entity_hash: AddressableEntityHash, + ) -> NamedKeys { + self.get_named_keys(EntityAddr::System(system_entity_hash.value())) + } + + /// Returns the named keys for a system contract. + pub fn get_named_keys_for_contract(&self, contract_hash: AddressableEntityHash) -> NamedKeys { + self.get_named_keys(EntityAddr::SmartContract(contract_hash.value())) + } + + /// Get the named keys for an entity. + pub fn get_named_keys(&self, entity_addr: EntityAddr) -> NamedKeys { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + tracking_copy + .get_named_keys(entity_addr) + .expect("should have named keys") + } + + /// Gets [`BTreeMap`]. + pub fn get_unbonds(&mut self) -> BTreeMap> { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + let reader = tracking_copy.reader(); + + let unbond_keys = reader + .keys_with_prefix(&[KeyTag::BidAddr as u8]) + .unwrap_or_default(); + + let mut ret = BTreeMap::new(); + + for key in unbond_keys.into_iter() { + if let Ok(Some(StoredValue::BidKind(BidKind::Unbond(unbond)))) = reader.read(&key) { + let unbond_kind = unbond.unbond_kind(); + match ret.get_mut(unbond_kind) { + None => { + let _ = ret.insert(unbond_kind.clone(), vec![*unbond]); + } + Some(unbonds) => unbonds.push(*unbond), + }; + } + } + + ret + } + + /// Retrieve the bid for a validator by their public key. + pub fn get_validator_bid(&mut self, validator_public_key: PublicKey) -> Option { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + let reader = tracking_copy.reader(); + + let validator_keys = reader + .keys_with_prefix(&[KeyTag::BidAddr as u8, BidAddrTag::Validator as u8]) + .unwrap_or_default(); + + for key in validator_keys.into_iter() { + if let Ok(Some(StoredValue::BidKind(BidKind::Validator(bid)))) = reader.read(&key) { + if bid.validator_public_key() == &validator_public_key { + return Some(*bid); + } + } + } + + None + } + + /// Gets [`BTreeMap>`]. + pub fn get_unbonding_purses(&mut self) -> BTreeMap> { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + let reader = tracking_copy.reader(); + + let unbond_keys = reader + .keys_with_prefix(&[KeyTag::Unbond as u8]) + .unwrap_or_default(); + + let mut ret = BTreeMap::new(); + + for key in unbond_keys.into_iter() { + let read_result = reader.read(&key); + if let (Key::Unbond(account_hash), Ok(Some(StoredValue::Unbonding(unbonding_purses)))) = + (key, read_result) + { + ret.insert(account_hash, unbonding_purses); + } + } + + ret + } + + /// Gets [`WithdrawPurses`]. + pub fn get_withdraw_purses(&mut self) -> WithdrawPurses { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + let reader = tracking_copy.reader(); + + let withdraws_keys = reader + .keys_with_prefix(&[KeyTag::Withdraw as u8]) + .unwrap_or_default(); + + let mut ret = BTreeMap::new(); + + for key in withdraws_keys.into_iter() { + let read_result = reader.read(&key); + if let (Key::Withdraw(account_hash), Ok(Some(StoredValue::Withdraw(withdraw_purses)))) = + (key, read_result) + { + ret.insert(account_hash, withdraw_purses); + } + } + + ret + } + + /// Gets all `[Key::Balance]`s in global state. + pub fn get_balance_keys(&self) -> Vec { + self.get_keys(KeyTag::Balance).unwrap_or_default() + } + + /// Gets all keys in global state by a prefix. + pub fn get_keys( + &self, + tag: KeyTag, + ) -> Result, casper_storage::global_state::error::Error> { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + let reader = tracking_copy.reader(); + + reader.keys_with_prefix(&[tag as u8]) + } + + /// Gets all entry points for a given entity + pub fn get_entry_points(&self, entity_addr: EntityAddr) -> EntryPoints { + let state_root_hash = self.get_post_state_hash(); + + let tracking_copy = self + .data_access_layer + .tracking_copy(state_root_hash) + .unwrap() + .unwrap(); + + tracking_copy + .get_v1_entry_points(entity_addr) + .expect("must get entry points") + } + + /// Gets a stored value from a contract's named keys. + pub fn get_value(&mut self, entity_addr: EntityAddr, name: &str) -> T + where + T: FromBytes + CLTyped, + { + let named_keys = self.get_named_keys(entity_addr); + + let key = named_keys.get(name).expect("should have named key"); + let stored_value = self.query(None, *key, &[]).expect("should query"); + let cl_value = stored_value.into_cl_value().expect("should be cl value"); + let result: T = cl_value.into_t().expect("should convert"); + result + } + + /// Gets an [`EraId`]. + pub fn get_era(&mut self) -> EraId { + let auction_contract = self.get_auction_contract_hash(); + self.get_value(EntityAddr::System(auction_contract.value()), ERA_ID_KEY) + } + + /// Gets the auction delay. + pub fn get_auction_delay(&mut self) -> u64 { + let auction_contract = self.get_auction_contract_hash(); + self.get_value( + EntityAddr::System(auction_contract.value()), + AUCTION_DELAY_KEY, + ) + } + + /// Gets the unbonding delay + pub fn get_unbonding_delay(&mut self) -> u64 { + let auction_contract = self.get_auction_contract_hash(); + self.get_value( + EntityAddr::System(auction_contract.value()), + UNBONDING_DELAY_KEY, + ) + } + + fn system_entity_key(&self, request: SystemEntityRegistryRequest) -> Key { + let result = self.data_access_layer.system_entity_registry(request); + if let SystemEntityRegistryResult::Success { payload, .. } = result { + match payload { + SystemEntityRegistryPayload::All(_) => { + panic!("asked for auction, got entire registry"); + } + SystemEntityRegistryPayload::EntityKey(key) => key, + } + } else { + panic!("{:?}", result) + } + } + + /// Gets the [`AddressableEntityHash`] of the system auction contract, panics if it can't be + /// found. + pub fn get_system_auction_hash(&self) -> AddressableEntityHash { + let state_root_hash = self.get_post_state_hash(); + let request = SystemEntityRegistryRequest::new( + state_root_hash, + ProtocolVersion::V2_0_0, + SystemEntityRegistrySelector::auction(), + self.chainspec.core_config.enable_addressable_entity, + ); + self.system_entity_key(request) + .into_entity_hash() + .expect("should downcast") + } + + /// Gets the [`AddressableEntityHash`] of the system mint contract, panics if it can't be found. + pub fn get_system_mint_hash(&self) -> AddressableEntityHash { + let state_root_hash = self.get_post_state_hash(); + let request = SystemEntityRegistryRequest::new( + state_root_hash, + ProtocolVersion::V2_0_0, + SystemEntityRegistrySelector::mint(), + self.chainspec.core_config.enable_addressable_entity, + ); + self.system_entity_key(request) + .into_entity_hash() + .expect("should downcast") + } + + /// Gets the [`AddressableEntityHash`] of the system handle payment contract, panics if it can't + /// be found. + pub fn get_system_handle_payment_hash( + &self, + protocol_version: ProtocolVersion, + ) -> AddressableEntityHash { + let state_root_hash = self.get_post_state_hash(); + let request = SystemEntityRegistryRequest::new( + state_root_hash, + protocol_version, + SystemEntityRegistrySelector::handle_payment(), + self.chainspec.core_config.enable_addressable_entity, + ); + self.system_entity_key(request) + .into_entity_hash() + .expect("should downcast") + } + + /// Resets the `exec_results`, `upgrade_results` and `transform` fields. + pub fn clear_results(&mut self) -> &mut Self { + self.exec_results = Vec::new(); + self.upgrade_results = Vec::new(); + self.effects = Vec::new(); + self + } + + /// Advances eras by num_eras + pub fn advance_eras_by(&mut self, num_eras: u64) { + let step_request_builder = StepRequestBuilder::new() + .with_protocol_version(ProtocolVersion::V2_0_0) + .with_runtime_config(self.native_runtime_config()) + .with_run_auction(true); + + for _ in 0..num_eras { + let state_hash = self.get_post_state_hash(); + let step_request = step_request_builder + .clone() + .with_parent_state_hash(state_hash) + .with_next_era_id(self.get_era().successor()) + .build(); + + match self.step(step_request) { + StepResult::RootNotFound => panic!("Root not found {:?}", state_hash), + StepResult::Failure(err) => panic!("{:?}", err), + StepResult::Success { .. } => { + // noop + } + } + } + } + + /// Advances eras by configured amount + pub fn advance_eras_by_default_auction_delay(&mut self) { + let auction_delay = self.get_auction_delay(); + self.advance_eras_by(auction_delay + 1); + } + + /// Advances by a single era. + pub fn advance_era(&mut self) { + self.advance_eras_by(1); + } + + /// Returns an initialized step request builder. + pub fn step_request_builder(&mut self) -> StepRequestBuilder { + StepRequestBuilder::new() + .with_parent_state_hash(self.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V2_0_0) + .with_runtime_config(self.native_runtime_config()) + } + + /// Returns a trie by hash. + pub fn get_trie(&mut self, state_hash: Digest) -> Option> { + let req = TrieRequest::new(state_hash, None); + self.data_access_layer() + .trie(req) + .into_raw() + .unwrap() + .map(|bytes| bytesrepr::deserialize(bytes.into_inner().into()).unwrap()) + } + + /// Returns the costs related to interacting with the auction system contract. + pub fn get_auction_costs(&self) -> AuctionCosts { + *self.chainspec.system_costs_config.auction_costs() + } + + /// Returns the costs related to interacting with the mint system contract. + pub fn get_mint_costs(&self) -> MintCosts { + *self.chainspec.system_costs_config.mint_costs() + } + + /// Returns the costs related to interacting with the handle payment system contract. + pub fn get_handle_payment_costs(&self) -> HandlePaymentCosts { + *self.chainspec.system_costs_config.handle_payment_costs() + } + + /// Commits a prune of leaf nodes from the tip of the merkle trie. + pub fn commit_prune(&mut self, prune_config: PruneRequest) -> &mut Self { + let result = self.data_access_layer.prune(prune_config); + + if let PruneResult::Success { + post_state_hash, + effects, + } = &result + { + self.post_state_hash = Some(*post_state_hash); + self.effects.push(effects.clone()); + } + + self.prune_results.push(result); + self + } + + /// Returns a `Result` containing a [`PruneResult`]. + pub fn get_prune_result(&self, index: usize) -> Option<&PruneResult> { + self.prune_results.get(index) + } + + /// Expects a prune success. + pub fn expect_prune_success(&mut self) -> &mut Self { + // Check first result, as only first result is interesting for a simple test + let result = self + .prune_results + .last() + .expect("Expected to be called after a system upgrade."); + + match result { + PruneResult::RootNotFound => panic!("Root not found"), + PruneResult::MissingKey => panic!("Does not exists"), + PruneResult::Failure(tce) => { + panic!("{:?}", tce); + } + PruneResult::Success { .. } => {} + } + + self + } + + /// Calculates refunded amount from a last execution request. + pub fn calculate_refund_amount(&self, payment_amount: U512) -> U512 { + let gas_amount = Motes::from_gas(self.last_exec_gas_consumed(), DEFAULT_GAS_PRICE) + .expect("should create motes from gas"); + + let refund_ratio = match self.chainspec.core_config.refund_handling { + RefundHandling::Refund { refund_ratio } | RefundHandling::Burn { refund_ratio } => { + refund_ratio + } + RefundHandling::NoRefund => Ratio::zero(), + }; + + let (numer, denom) = refund_ratio.into(); + let refund_ratio = Ratio::new_raw(U512::from(numer), U512::from(denom)); + + // amount declared to be paid in payment code MINUS gas spent in last execution. + let refundable_amount = Ratio::from(payment_amount) - Ratio::from(gas_amount.value()); + (refundable_amount * refund_ratio).to_integer() + } +} diff --git a/execution_engine_testing/tests/Cargo.toml b/execution_engine_testing/tests/Cargo.toml index f6df69788e..7822ea36e8 100644 --- a/execution_engine_testing/tests/Cargo.toml +++ b/execution_engine_testing/tests/Cargo.toml @@ -1,66 +1,43 @@ [package] name = "casper-engine-tests" version = "0.1.0" -authors = ["Ed Hastings , Henry Till "] -edition = "2018" +authors = ["Ed Hastings , Henry Till "] +edition = "2021" [dependencies] base16 = "0.2.1" -casper-contract = { path = "../../smart_contracts/contract" } casper-engine-test-support = { path = "../test_support" } -casper-execution-engine = { path = "../../execution_engine" } -casper-types = { path = "../../types", features = ["std"] } +casper-execution-engine = { path = "../../execution_engine", features = ["test-support"] } +casper-storage = { path = "../../storage" } +casper-types = { path = "../../types", default-features = false, features = ["datasize", "json-schema"] } +ed25519-dalek = { version = "2.1.1", default-features = false, features = ["alloc", "zeroize"] } +casper-wasm = "1.0.0" clap = "2" -crossbeam-channel = "0.5.0" -env_logger = "0.8.1" +fs_extra = "1.2.0" log = "0.4.8" rand = "0.8.3" +serde = "1" serde_json = "1" -parity-wasm = "0.41.0" +tempfile = "3.4.0" +wat = "1.219.1" +wasmprinter = "0.219.0" +walrus = "0.20.2" [dev-dependencies] assert_matches = "1.3.0" -criterion = "0.3.0" +criterion = { version = "0.5.1", features = ["html_reports"]} +dictionary = { path = "../../smart_contracts/contracts/test/dictionary", default-features = false } +dictionary-call = { path = "../../smart_contracts/contracts/test/dictionary-call", default-features = false } +get-call-stack-recursive-subcall = { path = "../../smart_contracts/contracts/test/get-call-stack-recursive-subcall", default-features = false } +gh-1470-regression = { path = "../../smart_contracts/contracts/test/gh-1470-regression", default-features = false } +gh-1470-regression-call = { path = "../../smart_contracts/contracts/test/gh-1470-regression-call", default-features = false } +lmdb-rkv = "0.14" num-rational = "0.4.0" -num-traits = "0.2.10" +num-traits = { workspace = true } once_cell = "1.5.2" -serde_json = "1" -tempfile = "3" +regex = "1.5.4" +walrus = "0.20.2" +wat = "1.0.47" [features] -default = [ - "casper-contract/std", - "casper-contract/test-support", - "casper-execution-engine/test-support", - "casper-engine-test-support/test-support" -] -use-as-wasm = ["casper-engine-test-support/use-as-wasm"] - -[lib] -bench = false - -[[bench]] -name = "transfer_bench" -harness = false - -[[bin]] -name = "state-initializer" -path = "src/profiling/state_initializer.rs" -test = false -bench = false - -[[bin]] -name = "simple-transfer" -path = "src/profiling/simple_transfer.rs" -test = false -bench = false - -[[bin]] -name = "host-function-metrics" -path = "src/profiling/host_function_metrics.rs" -test = false -bench = false - -[[test]] -name = "metrics" -path = "src/logging/metrics.rs" +fixture-generators = [] diff --git a/execution_engine_testing/tests/benches/transfer_bench.rs b/execution_engine_testing/tests/benches/transfer_bench.rs deleted file mode 100644 index 28ddfea648..0000000000 --- a/execution_engine_testing/tests/benches/transfer_bench.rs +++ /dev/null @@ -1,328 +0,0 @@ -use std::{path::Path, time::Duration}; - -use criterion::{ - criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, Throughput, -}; -use tempfile::TempDir; - -use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::core::engine_state::EngineConfig; -use casper_types::{account::AccountHash, runtime_args, Key, RuntimeArgs, URef, U512}; - -const CONTRACT_CREATE_ACCOUNTS: &str = "create_accounts.wasm"; -const CONTRACT_CREATE_PURSES: &str = "create_purses.wasm"; -const CONTRACT_TRANSFER_TO_EXISTING_ACCOUNT: &str = "transfer_to_existing_account.wasm"; -const CONTRACT_TRANSFER_TO_PURSE: &str = "transfer_to_purse.wasm"; - -/// Size of batch used in multiple execs benchmark, and multiple deploys per exec cases. -const TRANSFER_BATCH_SIZE: u64 = 3; -const TARGET_ADDR: AccountHash = AccountHash::new([127; 32]); -const ARG_AMOUNT: &str = "amount"; -const ARG_ACCOUNTS: &str = "accounts"; -const ARG_SEED_AMOUNT: &str = "seed_amount"; -const ARG_TOTAL_PURSES: &str = "total_purses"; -const ARG_TARGET: &str = "target"; -const ARG_TARGET_PURSE: &str = "target_purse"; - -/// Converts an integer into an array of type [u8; 32] by converting integer -/// into its big endian representation and embedding it at the end of the -/// range. -fn make_deploy_hash(i: u64) -> [u8; 32] { - let mut result = [128; 32]; - result[32 - 8..].copy_from_slice(&i.to_be_bytes()); - result -} - -fn bootstrap(data_dir: &Path, accounts: Vec, amount: U512) -> LmdbWasmTestBuilder { - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_CREATE_ACCOUNTS, - runtime_args! { ARG_ACCOUNTS => accounts, ARG_SEED_AMOUNT => amount }, - ) - .build(); - - let engine_config = EngineConfig::new(); - - let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir, engine_config); - - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request) - .expect_success() - .commit(); - - builder -} - -fn create_purses( - builder: &mut LmdbWasmTestBuilder, - source: AccountHash, - total_purses: u64, - purse_amount: U512, -) -> Vec { - let exec_request = ExecuteRequestBuilder::standard( - source, - CONTRACT_CREATE_PURSES, - runtime_args! { ARG_TOTAL_PURSES => total_purses, ARG_SEED_AMOUNT => purse_amount }, - ) - .build(); - - builder.exec(exec_request).expect_success().commit(); - - // Return creates purses for given account by filtering named keys - let query_result = builder - .query(None, Key::Account(source), &[]) - .expect("should query target"); - let account = query_result - .as_account() - .unwrap_or_else(|| panic!("result should be account but received {:?}", query_result)); - - (0..total_purses) - .map(|index| { - let purse_lookup_key = format!("purse:{}", index); - let purse_uref = account - .named_keys() - .get(&purse_lookup_key) - .and_then(Key::as_uref) - .unwrap_or_else(|| panic!("should get named key {} as uref", purse_lookup_key)); - *purse_uref - }) - .collect() -} - -/// Uses multiple exec requests with a single deploy to transfer tokens. Executes all transfers in -/// batch determined by value of TRANSFER_BATCH_SIZE. -fn transfer_to_account_multiple_execs( - builder: &mut LmdbWasmTestBuilder, - account: AccountHash, - should_commit: bool, -) { - let amount = U512::one(); - - for _ in 0..TRANSFER_BATCH_SIZE { - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_EXISTING_ACCOUNT, - runtime_args! { - ARG_TARGET => account, - ARG_AMOUNT => amount, - }, - ) - .build(); - - let builder = builder.exec(exec_request).expect_success(); - if should_commit { - builder.commit(); - } - } -} - -/// Executes multiple deploys per single exec with based on TRANSFER_BATCH_SIZE. -fn transfer_to_account_multiple_deploys( - builder: &mut LmdbWasmTestBuilder, - account: AccountHash, - should_commit: bool, -) { - let mut exec_builder = ExecuteRequestBuilder::new(); - - for i in 0..TRANSFER_BATCH_SIZE { - let deploy = DeployItemBuilder::default() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) - .with_session_code( - CONTRACT_TRANSFER_TO_EXISTING_ACCOUNT, - runtime_args! { - ARG_TARGET => account, - ARG_AMOUNT => U512::one(), - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash(make_deploy_hash(i)) // deploy_hash - .build(); - exec_builder = exec_builder.push_deploy(deploy); - } - - let exec_request = exec_builder.build(); - - let builder = builder.exec(exec_request).expect_success(); - if should_commit { - builder.commit(); - } -} - -/// Uses multiple exec requests with a single deploy to transfer tokens from purse to purse. -/// Executes all transfers in batch determined by value of TRANSFER_BATCH_SIZE. -fn transfer_to_purse_multiple_execs( - builder: &mut LmdbWasmTestBuilder, - purse: URef, - should_commit: bool, -) { - let amount = U512::one(); - - for _ in 0..TRANSFER_BATCH_SIZE { - let exec_request = ExecuteRequestBuilder::standard( - TARGET_ADDR, - CONTRACT_TRANSFER_TO_PURSE, - runtime_args! { ARG_TARGET_PURSE => purse, ARG_AMOUNT => amount }, - ) - .build(); - - let builder = builder.exec(exec_request).expect_success(); - if should_commit { - builder.commit(); - } - } -} - -/// Executes multiple deploys per single exec with based on TRANSFER_BATCH_SIZE. -fn transfer_to_purse_multiple_deploys( - builder: &mut LmdbWasmTestBuilder, - purse: URef, - should_commit: bool, -) { - let mut exec_builder = ExecuteRequestBuilder::new(); - - for i in 0..TRANSFER_BATCH_SIZE { - let deploy = DeployItemBuilder::default() - .with_address(TARGET_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_session_code( - CONTRACT_TRANSFER_TO_PURSE, - runtime_args! { ARG_TARGET_PURSE => purse, ARG_AMOUNT => U512::one() }, - ) - .with_authorization_keys(&[TARGET_ADDR]) - .with_deploy_hash(make_deploy_hash(i)) // deploy_hash - .build(); - exec_builder = exec_builder.push_deploy(deploy); - } - - let exec_request = exec_builder.build(); - - let builder = builder.exec(exec_request).expect_success(); - if should_commit { - builder.commit(); - } -} - -pub fn transfer_to_existing_accounts(group: &mut BenchmarkGroup, should_commit: bool) { - let target_account = TARGET_ADDR; - let bootstrap_accounts = vec![target_account]; - - let data_dir = TempDir::new().expect("should create temp dir"); - let mut builder = bootstrap(data_dir.path(), bootstrap_accounts.clone(), U512::one()); - - group.bench_function( - format!( - "transfer_to_existing_account_multiple_execs/{}/{}", - TRANSFER_BATCH_SIZE, should_commit - ), - |b| { - b.iter(|| { - // Execute multiple deploys with multiple exec requests - transfer_to_account_multiple_execs(&mut builder, target_account, should_commit) - }) - }, - ); - - let data_dir = TempDir::new().expect("should create temp dir"); - let mut builder = bootstrap(data_dir.path(), bootstrap_accounts, U512::one()); - - group.bench_function( - format!( - "transfer_to_existing_account_multiple_deploys_per_exec/{}/{}", - TRANSFER_BATCH_SIZE, should_commit - ), - |b| { - b.iter(|| { - // Execute multiple deploys with a single exec request - transfer_to_account_multiple_deploys(&mut builder, target_account, should_commit) - }) - }, - ); -} - -pub fn transfer_to_existing_purses(group: &mut BenchmarkGroup, should_commit: bool) { - let target_account = TARGET_ADDR; - let bootstrap_accounts = vec![target_account]; - - let data_dir = TempDir::new().expect("should create temp dir"); - let mut builder = bootstrap( - data_dir.path(), - bootstrap_accounts.clone(), - U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), - ); - - let purse_amount = U512::one(); - let purses = create_purses(&mut builder, target_account, 1, purse_amount); - - group.bench_function( - format!( - "transfer_to_purse_multiple_execs/{}/{}", - TRANSFER_BATCH_SIZE, should_commit - ), - |b| { - let target_purse = purses[0]; - b.iter(|| { - // Execute multiple deploys with mutliple exec request - transfer_to_purse_multiple_execs(&mut builder, target_purse, should_commit) - }) - }, - ); - - let data_dir = TempDir::new().expect("should create temp dir"); - let mut builder = bootstrap( - data_dir.path(), - bootstrap_accounts, - U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) * 10, - ); - let purses = create_purses(&mut builder, TARGET_ADDR, 1, U512::one()); - - group.bench_function( - format!( - "transfer_to_purse_multiple_deploys_per_exec/{}/{}", - TRANSFER_BATCH_SIZE, should_commit - ), - |b| { - let target_purse = purses[0]; - b.iter(|| { - // Execute multiple deploys with a single exec request - transfer_to_purse_multiple_deploys(&mut builder, target_purse, should_commit) - }) - }, - ); -} - -pub fn transfer_bench(c: &mut Criterion) { - let mut group = c.benchmark_group("tps"); - - // Minimum number of samples and measurement times to decrease the total time of this benchmark. - // This may or may not decrease the quality of the numbers. - group.sample_size(10); - group.measurement_time(Duration::from_secs(10)); - - // Measure by elements where one element per second is one transaction per second - group.throughput(Throughput::Elements(TRANSFER_BATCH_SIZE)); - - // Transfers to existing accounts, no commits - transfer_to_existing_accounts(&mut group, false); - - // Transfers to existing purses, no commits - transfer_to_existing_purses(&mut group, false); - - // Transfers to existing accounts, with commits - transfer_to_existing_accounts(&mut group, true); - - // Transfers to existing purses, with commits - transfer_to_existing_purses(&mut group, true); - - group.finish(); -} - -criterion_group!(benches, transfer_bench); -criterion_main!(benches); diff --git a/execution_engine_testing/tests/fixtures/call_stack_fixture/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/call_stack_fixture/global_state/data.lmdb new file mode 100644 index 0000000000..d88a9698d8 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/call_stack_fixture/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/call_stack_fixture/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/call_stack_fixture/global_state/data.lmdb-lock new file mode 100644 index 0000000000..3cf9716815 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/call_stack_fixture/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/call_stack_fixture/state.json b/execution_engine_testing/tests/fixtures/call_stack_fixture/state.json new file mode 100644 index 0000000000..a2df3baa43 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/call_stack_fixture/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "1.0.0" + }, + "post_state_hash": "8d90f686f4d3906ca63ea4ac6b0b72348605c95069e946c1f5b32496907d7fec" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/counter_contract/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/counter_contract/global_state/data.lmdb new file mode 100644 index 0000000000..39c5ed751e Binary files /dev/null and b/execution_engine_testing/tests/fixtures/counter_contract/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/counter_contract/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/counter_contract/global_state/data.lmdb-lock new file mode 100644 index 0000000000..55ec3e38d3 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/counter_contract/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/counter_contract/state.json b/execution_engine_testing/tests/fixtures/counter_contract/state.json new file mode 100644 index 0000000000..e61cec95dc --- /dev/null +++ b/execution_engine_testing/tests/fixtures/counter_contract/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "1.0.0" + }, + "post_state_hash": "e8ac57060d30935c297b7565fcb17b1591edb4f2a9b9738682d493b9556e32cf" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/disabled_versions/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/disabled_versions/global_state/data.lmdb new file mode 100644 index 0000000000..cc875b0509 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/disabled_versions/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/disabled_versions/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/disabled_versions/global_state/data.lmdb-lock new file mode 100644 index 0000000000..2644a2afc2 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/disabled_versions/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/disabled_versions/state.json b/execution_engine_testing/tests/fixtures/disabled_versions/state.json new file mode 100644 index 0000000000..918eeab05b --- /dev/null +++ b/execution_engine_testing/tests/fixtures/disabled_versions/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "1.0.0" + }, + "post_state_hash": "b06fd206cd3719a18c212bef2f21adbef24086982c92250a8368929625693841" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/gh_3208/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/gh_3208/global_state/data.lmdb new file mode 100644 index 0000000000..3ed2631b7a Binary files /dev/null and b/execution_engine_testing/tests/fixtures/gh_3208/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/gh_3208/state.json b/execution_engine_testing/tests/fixtures/gh_3208/state.json new file mode 100644 index 0000000000..a9abfc7905 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/gh_3208/state.json @@ -0,0 +1,481 @@ +{ + "description": "Default proposer account is also a genesis validator with a vesting schedule already initialized", + "genesis_request": { + "chainspec_registry": { + "chainspec_raw_hash": "11c0e79b71c3976ccd0c02d1310e2516c08edc9d8b6f57ccd680d63a4d8e72da", + "genesis_accounts_raw_hash": "0afd4a04d7720da9922f2b40249989faf4ff8096e1ed49bee615bb6cb1ee4f7d", + "global_state_raw_hash": null + }, + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "validator": { + "bonded_amount": "1000000000000", + "delegation_rate": 15 + } + } + } + ], + "auction_delay": 3, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 10000, + "delegate": 10000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 10000, + "withdraw_bid": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 2500000000, + "mint": 2500000000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 100000000 + }, + "unbonding_delay": 14, + "validator_slots": 5, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 2500000000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "random_bytes": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 188, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": 440, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "regular": 210, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 625000 + } + } + }, + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "a180ca1ca4cb5f9a6a1430886c89689584d5b69a2d1cd862cd458cd459d788c0" +} diff --git a/execution_engine_testing/tests/fixtures/gh_3710/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/gh_3710/global_state/data.lmdb new file mode 100644 index 0000000000..d24665206b Binary files /dev/null and b/execution_engine_testing/tests/fixtures/gh_3710/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/gh_3710/state.json b/execution_engine_testing/tests/fixtures/gh_3710/state.json new file mode 100644 index 0000000000..746fc5c667 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/gh_3710/state.json @@ -0,0 +1,481 @@ +{ + "genesis_request": { + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "validator": null + } + } + ], + "auction_delay": 1, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 7, + 87535408 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 2500000000, + "delegate": 2500000000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 2500000000, + "withdraw_bid": 2500000000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 2500000000, + "mint": 2500000000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 100000000 + }, + "unbonding_delay": 7, + "validator_slots": 100, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 2500000000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420000 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 188, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": { + "block": 440, + "br": 440000, + "br_if": 440000, + "br_table": { + "cost": 440000, + "size_multiplier": 100 + }, + "call": 440, + "call_indirect": 440, + "drop": 440, + "else": 440, + "end": 440, + "if": 440, + "loop": 440, + "return": 440, + "select": 440 + }, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 630000 + } + } + }, + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "ca42d66dd3ca95adfb92e56afc96353af5a888a258220d3c6b439e02c7b66306" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/groups/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/groups/global_state/data.lmdb new file mode 100644 index 0000000000..fa9461bbda Binary files /dev/null and b/execution_engine_testing/tests/fixtures/groups/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/groups/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/groups/global_state/data.lmdb-lock new file mode 100644 index 0000000000..d2acc3858b Binary files /dev/null and b/execution_engine_testing/tests/fixtures/groups/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/groups/state.json b/execution_engine_testing/tests/fixtures/groups/state.json new file mode 100644 index 0000000000..5184d93184 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/groups/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "2.0.0" + }, + "post_state_hash": "b899bb0ccee3c734859a075fff3a71196a84eca1366e6bc3c04a2253fec1fb3c" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_2_0/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_2_0/global_state/data.lmdb new file mode 100644 index 0000000000..548e671d5c Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_2_0/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_2_0/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_2_0/global_state/data.lmdb-lock new file mode 100644 index 0000000000..b312f98d7a Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_2_0/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_2_0/state.json b/execution_engine_testing/tests/fixtures/release_1_2_0/state.json new file mode 100644 index 0000000000..a6b41a0585 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_2_0/state.json @@ -0,0 +1,465 @@ +{ + "genesis_request": { + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.2.0", + "ee_config": { + "accounts": [ + { + "Account": { + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "balance": "100000000000000000", + "validator": null + } + }, + { + "Account": { + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "balance": "100000000000000000", + "validator": null + } + } + ], + "wasm_config": { + "max_memory": 64, + "max_stack_height": 65536, + "opcode_costs": { + "bit": 300, + "add": 210, + "mul": 240, + "div": 320, + "load": 2500, + "store": 4700, + "const": 110, + "local": 390, + "global": 390, + "control_flow": 440, + "integer_comparison": 250, + "conversion": 420, + "unreachable": 270, + "nop": 200, + "current_memory": 290, + "grow_memory": 240000, + "regular": 210 + }, + "storage_costs": { + "gas_per_byte": 625000 + }, + "host_function_costs": { + "read_value": { + "cost": 6000, + "arguments": [ + 0, + 0, + 0 + ] + }, + "read_value_local": { + "cost": 5500, + "arguments": [ + 0, + 590, + 0 + ] + }, + "write": { + "cost": 14000, + "arguments": [ + 0, + 0, + 0, + 980 + ] + }, + "write_local": { + "cost": 9500, + "arguments": [ + 0, + 1800, + 0, + 520 + ] + }, + "add": { + "cost": 5800, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "new_uref": { + "cost": 17000, + "arguments": [ + 0, + 0, + 590 + ] + }, + "load_named_keys": { + "cost": 42000, + "arguments": [ + 0, + 0 + ] + }, + "ret": { + "cost": 23000, + "arguments": [ + 0, + 420 + ] + }, + "get_key": { + "cost": 2000, + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ] + }, + "has_key": { + "cost": 1500, + "arguments": [ + 0, + 840 + ] + }, + "put_key": { + "cost": 38000, + "arguments": [ + 0, + 1100, + 0, + 0 + ] + }, + "remove_key": { + "cost": 61000, + "arguments": [ + 0, + 3200 + ] + }, + "revert": { + "cost": 500, + "arguments": [ + 0 + ] + }, + "is_valid_uref": { + "cost": 760, + "arguments": [ + 0, + 0 + ] + }, + "add_associated_key": { + "cost": 9000, + "arguments": [ + 0, + 0, + 0 + ] + }, + "remove_associated_key": { + "cost": 4200, + "arguments": [ + 0, + 0 + ] + }, + "update_associated_key": { + "cost": 4200, + "arguments": [ + 0, + 0, + 0 + ] + }, + "set_action_threshold": { + "cost": 74000, + "arguments": [ + 0, + 0 + ] + }, + "get_caller": { + "cost": 380, + "arguments": [ + 0 + ] + }, + "get_blocktime": { + "cost": 330, + "arguments": [ + 0 + ] + }, + "create_purse": { + "cost": 170000, + "arguments": [ + 0, + 0 + ] + }, + "transfer_to_account": { + "cost": 24000, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "transfer_from_purse_to_account": { + "cost": 160000, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "transfer_from_purse_to_purse": { + "cost": 82000, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "get_balance": { + "cost": 3800, + "arguments": [ + 0, + 0, + 0 + ] + }, + "get_phase": { + "cost": 710, + "arguments": [ + 0 + ] + }, + "get_system_contract": { + "cost": 1100, + "arguments": [ + 0, + 0, + 0 + ] + }, + "get_main_purse": { + "cost": 1300, + "arguments": [ + 0 + ] + }, + "read_host_buffer": { + "cost": 3500, + "arguments": [ + 0, + 310, + 0 + ] + }, + "create_contract_package_at_hash": { + "cost": 200, + "arguments": [ + 0, + 0 + ] + }, + "create_contract_user_group": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "add_contract_version": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "disable_contract_version": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "call_contract": { + "cost": 4500, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ] + }, + "call_versioned_contract": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "get_named_arg_size": { + "cost": 200, + "arguments": [ + 0, + 0, + 0 + ] + }, + "get_named_arg": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "remove_contract_user_group": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "provision_contract_user_group_uref": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ] + }, + "remove_contract_user_group_urefs": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "print": { + "cost": 20000, + "arguments": [ + 0, + 4600 + ] + }, + "blake2b": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + } + } + }, + "system_config": { + "wasmless_transfer_cost": 10000, + "auction_costs": { + "get_era_validators": 10000, + "read_seigniorage_recipients": 10000, + "add_bid": 10000, + "withdraw_bid": 10000, + "delegate": 10000, + "undelegate": 10000, + "run_auction": 10000, + "slash": 10000, + "distribute": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000, + "read_era_id": 10000, + "activate_bid": 10000 + }, + "mint_costs": { + "mint": 10000, + "reduce_total_supply": 10000, + "create": 10000, + "balance": 10000, + "transfer": 10000, + "read_base_round_reward": 10000 + }, + "handle_payment_costs": { + "get_payment_purse": 10000, + "set_refund_purse": 10000, + "get_refund_purse": 10000, + "finalize_payment": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + } + }, + "validator_slots": 5, + "auction_delay": 3, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "unbonding_delay": 14, + "genesis_timestamp_millis": 0 + } + }, + "post_state_hash": "483aa1dc35286904ac958f38b71080b78d0904465ef9596b364506996dd5f0cc" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_3_1/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_3_1/global_state/data.lmdb new file mode 100644 index 0000000000..5f03445c46 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_3_1/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_3_1/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_3_1/global_state/data.lmdb-lock new file mode 100644 index 0000000000..0b21633590 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_3_1/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_3_1/state.json b/execution_engine_testing/tests/fixtures/release_1_3_1/state.json new file mode 100644 index 0000000000..2133bee5c0 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_3_1/state.json @@ -0,0 +1,465 @@ +{ + "genesis_request": { + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "validator": null + } + } + ], + "auction_delay": 3, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 10000, + "delegate": 10000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 10000, + "withdraw_bid": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 10000, + "mint": 10000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 10000 + }, + "unbonding_delay": 14, + "validator_slots": 5, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 170000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 160000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 24000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 65536, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": 440, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "regular": 210, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 625000 + } + } + }, + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "cedf51e7a23890fc14873d6c4da7076d80d79ed95038dd80710d45dbab4c822b" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_4_2/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_4_2/global_state/data.lmdb new file mode 100644 index 0000000000..463becdb18 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_2/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_2/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_4_2/global_state/data.lmdb-lock new file mode 100644 index 0000000000..beaef36656 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_2/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_2/state.json b/execution_engine_testing/tests/fixtures/release_1_4_2/state.json new file mode 100644 index 0000000000..7417214797 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_4_2/state.json @@ -0,0 +1,479 @@ +{ + "genesis_request": { + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5AF25e204AD03D0a26e236996404F1be51a60948bcc026cD084a83690B756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01BB47d33d777B4559Bb917d1825827421C4a6B1b9737F12e1C58EA4305aF88b74", + "validator": null + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "01cA57eEd30e4a7274Ef4C648F56F58F880B20D2CA25725D9e5C13C83C08c09aEB", + "validator": null + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "0153840868cB293a6e0a636B1f2245BEfBe1988fB287Bd6A9D84dF5Df4A519dd11", + "validator": null + } + } + ], + "auction_delay": 3, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 10000, + "delegate": 10000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 10000, + "withdraw_bid": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 2500000000, + "mint": 2500000000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 100000000 + }, + "unbonding_delay": 14, + "validator_slots": 5, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 2500000000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 65536, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": 440, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "regular": 210, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 625000 + } + } + }, + "genesis_config_hash": "2a2a2A2A2a2a2a2A2a2A2A2A2a2A2a2a2A2a2A2A2a2A2A2a2a2A2a2a2A2A2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "AaF255056ED8966704422AfaC3B9c5F95c9Cc8Fc90E777939c82c31451e826C9" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_4_3/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_4_3/global_state/data.lmdb new file mode 100644 index 0000000000..3506acaaac Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_3/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_3/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_4_3/global_state/data.lmdb-lock new file mode 100644 index 0000000000..60c57a47ee Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_3/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_3/state.json b/execution_engine_testing/tests/fixtures/release_1_4_3/state.json new file mode 100644 index 0000000000..33b719ebaf --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_4_3/state.json @@ -0,0 +1,506 @@ +{ + "genesis_request": { + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5AF25e204AD03D0a26e236996404F1be51a60948bcc026cD084a83690B756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01BB47d33d777B4559Bb917d1825827421C4a6B1b9737F12e1C58EA4305aF88b74", + "validator": null + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "0197fFc883c80Bee7237EF95d9B9B703d4AD63e60A21e605867682B75b8b3f4303", + "validator": { + "bonded_amount": "100000000", + "delegation_rate": 0 + } + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "01466627D52773c9D5cDA6C8FD28ea31eC7b94e68Aa8d42E2AD31A75dc8d24ed07", + "validator": { + "bonded_amount": "200000000", + "delegation_rate": 0 + } + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "01cA57eEd30e4a7274Ef4C648F56F58F880B20D2CA25725D9e5C13C83C08c09aEB", + "validator": null + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "0153840868cB293a6e0a636B1f2245BEfBe1988fB287Bd6A9D84dF5Df4A519dd11", + "validator": null + } + }, + { + "Account": { + "balance": "7500000000000000", + "public_key": "01fC947730F49eB01427a66e050733294d9e520e545c7a27125A780634e0860a27", + "validator": null + } + } + ], + "auction_delay": 3, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 10000, + "delegate": 10000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 10000, + "withdraw_bid": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 2500000000, + "mint": 2500000000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 100000000 + }, + "unbonding_delay": 14, + "validator_slots": 5, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 2500000000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 65536, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": 440, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "regular": 210, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 625000 + } + } + }, + "genesis_config_hash": "2a2a2A2A2a2a2a2A2a2A2A2A2a2A2a2a2A2a2A2A2a2A2A2a2a2A2a2a2A2A2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "916dCD18De4C08947b2C85Da4d26FEf5C92E27Cb559EdfD23A4D2f4780a3f422" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_4_4/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_4_4/global_state/data.lmdb new file mode 100644 index 0000000000..3e992f0dca Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_4/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_4/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_4_4/global_state/data.lmdb-lock new file mode 100644 index 0000000000..e57a959744 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_4/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_4/state.json b/execution_engine_testing/tests/fixtures/release_1_4_4/state.json new file mode 100644 index 0000000000..5f98b1e2f1 --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_4_4/state.json @@ -0,0 +1,465 @@ +{ + "genesis_request": { + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "validator": null + } + } + ], + "auction_delay": 3, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 10000, + "delegate": 10000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 10000, + "withdraw_bid": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 2500000000, + "mint": 2500000000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 100000000 + }, + "unbonding_delay": 14, + "validator_slots": 5, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 2500000000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 188, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": 440, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "regular": 210, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 625000 + } + } + }, + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "f3047c087de9e9bac49de080e061e3313c7227ddc9ce22b8c9c617c397569987" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_4_5/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_4_5/global_state/data.lmdb new file mode 100644 index 0000000000..e1154de2a0 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_5/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_5/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_4_5/global_state/data.lmdb-lock new file mode 100644 index 0000000000..7b865f6900 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_4_5/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_4_5/state.json b/execution_engine_testing/tests/fixtures/release_1_4_5/state.json new file mode 100644 index 0000000000..7eae13adfd --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_4_5/state.json @@ -0,0 +1,465 @@ +{ + "genesis_request": { + "ee_config": { + "accounts": [ + { + "Account": { + "balance": "100000000000000000", + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "validator": null + } + }, + { + "Account": { + "balance": "100000000000000000", + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "validator": null + } + } + ], + "auction_delay": 3, + "genesis_timestamp_millis": 0, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 6414, + 623437335209 + ], + "system_config": { + "auction_costs": { + "activate_bid": 10000, + "add_bid": 10000, + "delegate": 10000, + "distribute": 10000, + "get_era_validators": 10000, + "read_era_id": 10000, + "read_seigniorage_recipients": 10000, + "run_auction": 10000, + "slash": 10000, + "undelegate": 10000, + "withdraw_bid": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000 + }, + "handle_payment_costs": { + "finalize_payment": 10000, + "get_payment_purse": 10000, + "get_refund_purse": 10000, + "set_refund_purse": 10000 + }, + "mint_costs": { + "balance": 10000, + "create": 2500000000, + "mint": 2500000000, + "read_base_round_reward": 10000, + "reduce_total_supply": 10000, + "transfer": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + }, + "wasmless_transfer_cost": 100000000 + }, + "unbonding_delay": 14, + "validator_slots": 5, + "wasm_config": { + "host_function_costs": { + "add": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 5800 + }, + "add_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 9000 + }, + "add_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "blake2b": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "call_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ], + "cost": 4500 + }, + "call_versioned_contract": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_contract_package_at_hash": { + "arguments": [ + 0, + 0 + ], + "cost": 200 + }, + "create_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "create_purse": { + "arguments": [ + 0, + 0 + ], + "cost": 2500000000 + }, + "dictionary_get": { + "arguments": [ + 0, + 590, + 0 + ], + "cost": 5500 + }, + "dictionary_put": { + "arguments": [ + 0, + 1800, + 0, + 520 + ], + "cost": 9500 + }, + "disable_contract_version": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_balance": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 3800 + }, + "get_blocktime": { + "arguments": [ + 0 + ], + "cost": 330 + }, + "get_caller": { + "arguments": [ + 0 + ], + "cost": 380 + }, + "get_key": { + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ], + "cost": 2000 + }, + "get_main_purse": { + "arguments": [ + 0 + ], + "cost": 1300 + }, + "get_named_arg": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_named_arg_size": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 200 + }, + "get_phase": { + "arguments": [ + 0 + ], + "cost": 710 + }, + "get_system_contract": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 1100 + }, + "has_key": { + "arguments": [ + 0, + 840 + ], + "cost": 1500 + }, + "is_valid_uref": { + "arguments": [ + 0, + 0 + ], + "cost": 760 + }, + "load_named_keys": { + "arguments": [ + 0, + 0 + ], + "cost": 42000 + }, + "new_uref": { + "arguments": [ + 0, + 0, + 590 + ], + "cost": 17000 + }, + "print": { + "arguments": [ + 0, + 4600 + ], + "cost": 20000 + }, + "provision_contract_user_group_uref": { + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "put_key": { + "arguments": [ + 0, + 1100, + 0, + 0 + ], + "cost": 38000 + }, + "read_host_buffer": { + "arguments": [ + 0, + 310, + 0 + ], + "cost": 3500 + }, + "read_value": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 6000 + }, + "remove_associated_key": { + "arguments": [ + 0, + 0 + ], + "cost": 4200 + }, + "remove_contract_user_group": { + "arguments": [ + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_contract_user_group_urefs": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 200 + }, + "remove_key": { + "arguments": [ + 0, + 3200 + ], + "cost": 61000 + }, + "ret": { + "arguments": [ + 0, + 420 + ], + "cost": 23000 + }, + "revert": { + "arguments": [ + 0 + ], + "cost": 500 + }, + "set_action_threshold": { + "arguments": [ + 0, + 0 + ], + "cost": 74000 + }, + "transfer_from_purse_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "transfer_from_purse_to_purse": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 82000 + }, + "transfer_to_account": { + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "cost": 2500000000 + }, + "update_associated_key": { + "arguments": [ + 0, + 0, + 0 + ], + "cost": 4200 + }, + "write": { + "arguments": [ + 0, + 0, + 0, + 980 + ], + "cost": 14000 + } + }, + "max_memory": 64, + "max_stack_height": 188, + "opcode_costs": { + "add": 210, + "bit": 300, + "const": 110, + "control_flow": 440, + "conversion": 420, + "current_memory": 290, + "div": 320, + "global": 390, + "grow_memory": 240000, + "integer_comparison": 250, + "load": 2500, + "local": 390, + "mul": 240, + "nop": 200, + "regular": 210, + "store": 4700, + "unreachable": 270 + }, + "storage_costs": { + "gas_per_byte": 625000 + } + } + }, + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.0.0" + }, + "post_state_hash": "805c44ea515eb44273ab2368201283baf598db5d4ad1f416669201a7a390918b" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/release_1_5_8/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/release_1_5_8/global_state/data.lmdb new file mode 100644 index 0000000000..955837b578 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_5_8/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/release_1_5_8/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/release_1_5_8/global_state/data.lmdb-lock new file mode 100644 index 0000000000..eaa1d7b105 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/release_1_5_8/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/release_1_5_8/state.json b/execution_engine_testing/tests/fixtures/release_1_5_8/state.json new file mode 100644 index 0000000000..023061284a --- /dev/null +++ b/execution_engine_testing/tests/fixtures/release_1_5_8/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "1.0.0" + }, + "post_state_hash": "29bbd3e40c68462422db2a7bb144e71e53607a1b7d9bcbdacecef22c998de8e3" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/three_version_fixture/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/three_version_fixture/global_state/data.lmdb new file mode 100644 index 0000000000..55b292e123 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/three_version_fixture/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/three_version_fixture/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/three_version_fixture/global_state/data.lmdb-lock new file mode 100644 index 0000000000..42154f3b3d Binary files /dev/null and b/execution_engine_testing/tests/fixtures/three_version_fixture/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/three_version_fixture/state.json b/execution_engine_testing/tests/fixtures/three_version_fixture/state.json new file mode 100644 index 0000000000..542c11c45a --- /dev/null +++ b/execution_engine_testing/tests/fixtures/three_version_fixture/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "1.0.0" + }, + "post_state_hash": "30da4f641f4297d77600cb0319153c6e3f3353b0905231954a19a726896b28e5" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/upgrade_thresholds/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/upgrade_thresholds/global_state/data.lmdb new file mode 100644 index 0000000000..2b508cbc02 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/upgrade_thresholds/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/upgrade_thresholds/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/upgrade_thresholds/global_state/data.lmdb-lock new file mode 100644 index 0000000000..35e8d70a41 Binary files /dev/null and b/execution_engine_testing/tests/fixtures/upgrade_thresholds/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/upgrade_thresholds/state.json b/execution_engine_testing/tests/fixtures/upgrade_thresholds/state.json new file mode 100644 index 0000000000..81c929d4ef --- /dev/null +++ b/execution_engine_testing/tests/fixtures/upgrade_thresholds/state.json @@ -0,0 +1,495 @@ +{ + "genesis_request": { + "genesis_config_hash": "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a", + "protocol_version": "1.0.0", + "ee_config": { + "accounts": [ + { + "Account": { + "public_key": "01d5af25e204ad03d0a26e236996404f1be51a60948bcc026cd084a83690b756d3", + "balance": "100000000000000000", + "validator": null + } + }, + { + "Account": { + "public_key": "01bb47d33d777b4559bb917d1825827421c4a6b1b9737f12e1c58ea4305af88b74", + "balance": "100000000000000000", + "validator": null + } + } + ], + "wasm_config": { + "max_memory": 64, + "max_stack_height": 500, + "opcode_costs": { + "bit": 300, + "add": 210, + "mul": 240, + "div": 320, + "load": 2500, + "store": 4700, + "const": 110, + "local": 390, + "global": 390, + "integer_comparison": 250, + "conversion": 420, + "unreachable": 270, + "nop": 200, + "current_memory": 290, + "grow_memory": 240000, + "control_flow": { + "block": 440, + "loop": 440, + "if": 440, + "else": 440, + "end": 440, + "br": 440000, + "br_if": 440000, + "return": 440, + "call": 140000, + "call_indirect": 140000, + "drop": 440, + "select": 440, + "br_table": { + "cost": 440000, + "size_multiplier": 100 + } + } + }, + "storage_costs": { + "gas_per_byte": 630000 + }, + "host_function_costs": { + "read_value": { + "cost": 6000, + "arguments": [ + 0, + 0, + 0 + ] + }, + "dictionary_get": { + "cost": 5500, + "arguments": [ + 0, + 590, + 0 + ] + }, + "write": { + "cost": 14000, + "arguments": [ + 0, + 0, + 0, + 980 + ] + }, + "dictionary_put": { + "cost": 9500, + "arguments": [ + 0, + 1800, + 0, + 520 + ] + }, + "add": { + "cost": 5800, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "new_uref": { + "cost": 17000, + "arguments": [ + 0, + 0, + 590 + ] + }, + "load_named_keys": { + "cost": 42000, + "arguments": [ + 0, + 0 + ] + }, + "ret": { + "cost": 23000, + "arguments": [ + 0, + 420000 + ] + }, + "get_key": { + "cost": 2000, + "arguments": [ + 0, + 440, + 0, + 0, + 0 + ] + }, + "has_key": { + "cost": 1500, + "arguments": [ + 0, + 840 + ] + }, + "put_key": { + "cost": 38000, + "arguments": [ + 0, + 1100, + 0, + 0 + ] + }, + "remove_key": { + "cost": 61000, + "arguments": [ + 0, + 3200 + ] + }, + "revert": { + "cost": 500, + "arguments": [ + 0 + ] + }, + "is_valid_uref": { + "cost": 760, + "arguments": [ + 0, + 0 + ] + }, + "add_associated_key": { + "cost": 9000, + "arguments": [ + 0, + 0, + 0 + ] + }, + "remove_associated_key": { + "cost": 4200, + "arguments": [ + 0, + 0 + ] + }, + "update_associated_key": { + "cost": 4200, + "arguments": [ + 0, + 0, + 0 + ] + }, + "set_action_threshold": { + "cost": 74000, + "arguments": [ + 0, + 0 + ] + }, + "get_caller": { + "cost": 380, + "arguments": [ + 0 + ] + }, + "get_blocktime": { + "cost": 330, + "arguments": [ + 0 + ] + }, + "create_purse": { + "cost": 2500000000, + "arguments": [ + 0, + 0 + ] + }, + "transfer_to_account": { + "cost": 2500000000, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "transfer_from_purse_to_account": { + "cost": 2500000000, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "transfer_from_purse_to_purse": { + "cost": 82000, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "get_balance": { + "cost": 3800, + "arguments": [ + 0, + 0, + 0 + ] + }, + "get_phase": { + "cost": 710, + "arguments": [ + 0 + ] + }, + "get_system_contract": { + "cost": 1100, + "arguments": [ + 0, + 0, + 0 + ] + }, + "get_main_purse": { + "cost": 1300, + "arguments": [ + 0 + ] + }, + "read_host_buffer": { + "cost": 3500, + "arguments": [ + 0, + 310, + 0 + ] + }, + "create_contract_package_at_hash": { + "cost": 200, + "arguments": [ + 0, + 0 + ] + }, + "create_contract_user_group": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "add_contract_version": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "disable_contract_version": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "call_contract": { + "cost": 4500, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ] + }, + "call_versioned_contract": { + "cost": 4500, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 420, + 0 + ] + }, + "get_named_arg_size": { + "cost": 200, + "arguments": [ + 0, + 0, + 0 + ] + }, + "get_named_arg": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "remove_contract_user_group": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "provision_contract_user_group_uref": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0 + ] + }, + "remove_contract_user_group_urefs": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "print": { + "cost": 20000, + "arguments": [ + 0, + 4600 + ] + }, + "blake2b": { + "cost": 200, + "arguments": [ + 0, + 0, + 0, + 0 + ] + }, + "random_bytes": { + "cost": 200, + "arguments": [ + 0, + 0 + ] + } + } + }, + "system_config": { + "wasmless_transfer_cost": 100000000, + "auction_costs": { + "get_era_validators": 10000, + "read_seigniorage_recipients": 10000, + "add_bid": 2500000000, + "withdraw_bid": 2500000000, + "delegate": 2500000000, + "undelegate": 2500000000, + "run_auction": 10000, + "slash": 10000, + "distribute": 10000, + "withdraw_delegator_reward": 10000, + "withdraw_validator_reward": 10000, + "read_era_id": 10000, + "activate_bid": 10000, + "redelegate": 2500000000 + }, + "mint_costs": { + "mint": 2500000000, + "reduce_total_supply": 10000, + "create": 2500000000, + "balance": 10000, + "transfer": 10000, + "read_base_round_reward": 10000, + "mint_into_existing_purse": 2500000000 + }, + "handle_payment_costs": { + "get_payment_purse": 10000, + "set_refund_purse": 10000, + "get_refund_purse": 10000, + "finalize_payment": 10000 + }, + "standard_payment_costs": { + "pay": 10000 + } + }, + "validator_slots": 100, + "auction_delay": 1, + "locked_funds_period_millis": 7776000000, + "round_seigniorage_rate": [ + 7, + 87535408 + ], + "unbonding_delay": 7, + "genesis_timestamp_millis": 0 + }, + "chainspec_registry": { + "chainspec_raw_hash": "11c0e79b71c3976ccd0c02d1310e2516c08edc9d8b6f57ccd680d63a4d8e72da", + "genesis_accounts_raw_hash": "0afd4a04d7720da9922f2b40249989faf4ff8096e1ed49bee615bb6cb1ee4f7d", + "global_state_raw_hash": null + } + }, + "post_state_hash": "059d4fbbc1048314fd58111bbb6e733626de760b771793a181822e984fdd72a9" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/fixtures/validator_minimum_bid/global_state/data.lmdb b/execution_engine_testing/tests/fixtures/validator_minimum_bid/global_state/data.lmdb new file mode 100644 index 0000000000..25746586ee Binary files /dev/null and b/execution_engine_testing/tests/fixtures/validator_minimum_bid/global_state/data.lmdb differ diff --git a/execution_engine_testing/tests/fixtures/validator_minimum_bid/global_state/data.lmdb-lock b/execution_engine_testing/tests/fixtures/validator_minimum_bid/global_state/data.lmdb-lock new file mode 100644 index 0000000000..6cfbfde98a Binary files /dev/null and b/execution_engine_testing/tests/fixtures/validator_minimum_bid/global_state/data.lmdb-lock differ diff --git a/execution_engine_testing/tests/fixtures/validator_minimum_bid/state.json b/execution_engine_testing/tests/fixtures/validator_minimum_bid/state.json new file mode 100644 index 0000000000..d52514ffcd --- /dev/null +++ b/execution_engine_testing/tests/fixtures/validator_minimum_bid/state.json @@ -0,0 +1,6 @@ +{ + "genesis_request": { + "protocol_version": "1.0.0" + }, + "post_state_hash": "b39980efe0651ef956d8a9af5c197bc6a11af2f164d7f3ebe583a6a3172f57a0" +} \ No newline at end of file diff --git a/execution_engine_testing/tests/src/lib.rs b/execution_engine_testing/tests/src/lib.rs index 090944ac86..b4ef9c5012 100644 --- a/execution_engine_testing/tests/src/lib.rs +++ b/execution_engine_testing/tests/src/lib.rs @@ -1,3 +1,6 @@ -pub mod profiling; +pub mod lmdb_fixture; +pub mod wasm_utils; +pub use casper_engine_test_support::genesis_config_builder::GenesisConfigBuilder; + #[cfg(test)] mod test; diff --git a/execution_engine_testing/tests/src/lmdb_fixture.rs b/execution_engine_testing/tests/src/lmdb_fixture.rs new file mode 100644 index 0000000000..e29e70f280 --- /dev/null +++ b/execution_engine_testing/tests/src/lmdb_fixture.rs @@ -0,0 +1,175 @@ +use std::{ + env, + fs::File, + io::Write, + path::{Path, PathBuf}, +}; + +use fs_extra::dir; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use tempfile::TempDir; + +use casper_engine_test_support::{ChainspecConfig, LmdbWasmTestBuilder}; +use casper_storage::data_access_layer::GenesisRequest; +#[cfg(test)] +use casper_types::{AccessRights, Key, URef}; +use casper_types::{Digest, ProtocolVersion}; + +pub const RELEASE_1_2_0: &str = "release_1_2_0"; +pub const RELEASE_1_3_1: &str = "release_1_3_1"; +pub const RELEASE_1_4_2: &str = "release_1_4_2"; +pub const RELEASE_1_4_3: &str = "release_1_4_3"; +pub const RELEASE_1_4_4: &str = "release_1_4_4"; +pub const RELEASE_1_4_5: &str = "release_1_4_5"; +pub const RELEASE_1_5_8: &str = "release_1_5_8"; +const STATE_JSON_FILE: &str = "state.json"; +const FIXTURES_DIRECTORY: &str = "fixtures"; +const GENESIS_PROTOCOL_VERSION_FIELD: &str = "protocol_version"; + +#[cfg(test)] +const RUN_FIXTURE_GENERATORS_ENV: &str = "RUN_FIXTURE_GENERATORS"; + +#[cfg(test)] +pub(crate) fn is_fixture_generator_enabled() -> bool { + env::var_os(RUN_FIXTURE_GENERATORS_ENV).is_some() +} + +/// This is a special place in the global state where fixture contains a registry. +#[cfg(test)] +pub(crate) const ENTRY_REGISTRY_SPECIAL_ADDRESS: Key = + Key::URef(URef::new([0u8; 32], AccessRights::all())); + +fn path_to_lmdb_fixtures() -> PathBuf { + Path::new(env!("CARGO_MANIFEST_DIR")).join(FIXTURES_DIRECTORY) +} + +/// Contains serialized genesis config. +#[derive(Serialize, Deserialize)] +pub struct LmdbFixtureState { + /// Serializes as unstructured JSON value because [`GenesisRequest`] might change over time + /// and likely old fixture might not deserialize cleanly in the future. + pub genesis_request: serde_json::Value, + pub post_state_hash: Digest, +} + +impl LmdbFixtureState { + pub fn genesis_protocol_version(&self) -> ProtocolVersion { + serde_json::from_value( + self.genesis_request + .get(GENESIS_PROTOCOL_VERSION_FIELD) + .cloned() + .unwrap(), + ) + .expect("should have protocol version field") + } +} + +/// Creates a [`LmdbWasmTestBuilder`] from a named fixture directory. +/// +/// As part of this process a new temporary directory will be created to store LMDB files from given +/// fixture, and a builder will be created using it. +/// +/// This function returns a triple of the builder, a [`LmdbFixtureState`] which contains serialized +/// genesis request for given fixture, and a temporary directory which has to be kept in scope. +pub fn builder_from_global_state_fixture( + fixture_name: &str, +) -> (LmdbWasmTestBuilder, LmdbFixtureState, TempDir) { + let source = path_to_lmdb_fixtures().join(fixture_name); + let to = tempfile::tempdir().expect("should create temp dir"); + fs_extra::copy_items(&[source], &to, &dir::CopyOptions::default()) + .expect("should copy global state fixture"); + + let path_to_state = to.path().join(fixture_name).join(STATE_JSON_FILE); + let lmdb_fixture_state: LmdbFixtureState = + serde_json::from_reader(File::open(path_to_state).unwrap()).unwrap(); + let path_to_gs = to.path().join(fixture_name); + + ( + LmdbWasmTestBuilder::open( + &path_to_gs, + ChainspecConfig::default(), + lmdb_fixture_state.genesis_protocol_version(), + lmdb_fixture_state.post_state_hash, + ), + lmdb_fixture_state, + to, + ) +} + +pub fn builder_from_global_state_fixture_with_enable_ae( + fixture_name: &str, + enable_addressable_entity: bool, +) -> (LmdbWasmTestBuilder, LmdbFixtureState, TempDir) { + let source = path_to_lmdb_fixtures().join(fixture_name); + let to = tempfile::tempdir().expect("should create temp dir"); + fs_extra::copy_items(&[source], &to, &dir::CopyOptions::default()) + .expect("should copy global state fixture"); + + let path_to_state = to.path().join(fixture_name).join(STATE_JSON_FILE); + let lmdb_fixture_state: LmdbFixtureState = + serde_json::from_reader(File::open(path_to_state).unwrap()).unwrap(); + let path_to_gs = to.path().join(fixture_name); + + ( + LmdbWasmTestBuilder::open( + &path_to_gs, + ChainspecConfig::default().with_enable_addressable_entity(enable_addressable_entity), + lmdb_fixture_state.genesis_protocol_version(), + lmdb_fixture_state.post_state_hash, + ), + lmdb_fixture_state, + to, + ) +} + +/// Creates a new fixture with a name. +/// +/// This process is currently manual. The process to do this is to check out a release branch, call +/// this function to generate (i.e. `generate_fixture("release_1_3_0")`) and persist it in version +/// control. +pub fn generate_fixture( + name: &str, + genesis_request: GenesisRequest, + post_genesis_setup: impl FnOnce(&mut LmdbWasmTestBuilder), +) -> Result<(), Box> { + let lmdb_fixtures_root = path_to_lmdb_fixtures(); + let fixture_root = lmdb_fixtures_root.join(name); + + let path_to_data_lmdb = fixture_root.join("global_state").join("data.lmdb"); + if path_to_data_lmdb.exists() { + eprintln!( + "Lmdb fixture located at {} already exists. If you need to re-generate a fixture to ensure a serialization \ + changes are backwards compatible please make sure you are running a specific version, or a past commit. \ + Skipping.", + path_to_data_lmdb.display() + ); + return Ok(()); + } + + let chainspec = ChainspecConfig::default(); + let mut builder = LmdbWasmTestBuilder::new_with_config(&fixture_root, chainspec); + + builder.run_genesis(genesis_request.clone()); + + // You can customize the fixture post genesis with a callable. + post_genesis_setup(&mut builder); + + let post_state_hash = builder.get_post_state_hash(); + + let genesis_request_json = json!({ + GENESIS_PROTOCOL_VERSION_FIELD: genesis_request.protocol_version(), + }); + + let state = LmdbFixtureState { + genesis_request: genesis_request_json, + post_state_hash, + }; + let serialized_state = serde_json::to_string_pretty(&state)?; + + let path_to_state_file = fixture_root.join(STATE_JSON_FILE); + + let mut f = File::create(path_to_state_file)?; + f.write_all(serialized_state.as_bytes())?; + Ok(()) +} diff --git a/execution_engine_testing/tests/src/logging/metrics.rs b/execution_engine_testing/tests/src/logging/metrics.rs deleted file mode 100644 index ffe1bf809d..0000000000 --- a/execution_engine_testing/tests/src/logging/metrics.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::sync::{Arc, Mutex}; - -use log::{Metadata, Record}; - -use casper_execution_engine::shared::logging::TerminalLogger; - -struct Logger { - terminal_logger: TerminalLogger, - log_lines: Arc>>, -} - -impl log::Log for Logger { - fn enabled(&self, metadata: &Metadata) -> bool { - self.terminal_logger.enabled(metadata) - } - - fn log(&self, record: &Record) { - if let Some(log_line) = self.terminal_logger.prepare_log_line(record) { - self.log_lines.lock().unwrap().push(log_line); - } - } - - fn flush(&self) {} -} diff --git a/execution_engine_testing/tests/src/profiling/README.md b/execution_engine_testing/tests/src/profiling/README.md deleted file mode 100644 index fcd86ea78e..0000000000 --- a/execution_engine_testing/tests/src/profiling/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Overview - -This directory contains executable targets to allow for profiling code used to execute a transfer contract. - -# `state-initializer` - -This is used to initialize global state in preparation for running one of the other executables. It allows them to avoid taking into account the cost of installing the Handle Payment and Mint contracts. - -It takes a single optional command line argument to specify the directory in which to store the persistent data and outputs the post-state hash from the commit response. This hash will be used as an input to other profiling executables. - ---- - -# `simple-transfer` - -This runs a single transfer via the `LmdbWasmTestBuilder` and is designed to be used along with `perf` to analyse the performance data. - -First, run `state-initializer` to set up a persistent global state, then the `simple-transfer` executable will make use of that state, and can be profiled. - -For more details on each, run the executable with `--help`. - -## Example usage - -To profile `simple-transfer` using `perf` and open the flamegraph in Firefox, follow these steps: - -* Install `perf` (see [this askubuntu answer](https://askubuntu.com/a/578618/75096)) -* Clone and add [Flamegraph](https://github.com/brendangregg/FlameGraph) to your path -* Run: - ```bash - cd casper-node/ - make build-contracts-rs - cd execution_engine_testing/tests/ - cargo build --release --bin state-initializer - cargo build --release --bin simple-transfer - ../../target/release/state-initializer --data-dir=../../target | perf record -g --call-graph dwarf ../../target/release/simple-transfer --data-dir=../../target - perf script | stackcollapse-perf.pl | flamegraph.pl > flame.svg - firefox flame.svg - ``` - - -## Troubleshooting - -Due to kernel hardening, `perf` may need some or all of the following changes to be made in order to run properly: - - -### Error message about `perf_event_paranoid`: - -See [this superuser answer](https://superuser.com/a/980757/463043) for details. In summary, to temporarily fix the issue: - -```bash -sudo sysctl -w kernel.perf_event_paranoid=-1 -``` - -and to permanently fix it: - -```bash -sudo sh -c 'echo kernel.perf_event_paranoid=-1 >> /etc/sysctl.d/99-my-settings-local.conf' -sysctl -p /etc/sysctl.conf -``` - - -### Error message about `kptr_restrict`: - -See [this S.O. answer](https://stackoverflow.com/a/36263349/2556117) for details. In summary, to temporarily fix the issue: - -```bash -sudo sysctl -w kernel.kptr_restrict=0 -``` - -and to permanently fix it: - -```bash -sudo sh -c 'echo kernel.kptr_restrict=0 >> /etc/sysctl.d/99-my-settings-local.conf' -sysctl -p /etc/sysctl.conf -``` - ---- - -# `host-function-metrics` - -This tool generates CSV files containing metrics for the host functions callable by Wasm smart contracts and which are currently unmetered. - -Note that running the tool with the default 10,000 repetitions can take in excess of half an hour to complete. - -```bash -cd casper-node/ -make build-contracts-rs -cd execution_engine_testing/tests/ -cargo build --release --bin state-initializer -cargo build --release --bin host-function-metrics -../../target/release/state-initializer --data-dir=../../target | ../../target/release/host-function-metrics --data-dir=../../target --output-dir=../../target/host-function-metrics -``` diff --git a/execution_engine_testing/tests/src/profiling/host_function_metrics.rs b/execution_engine_testing/tests/src/profiling/host_function_metrics.rs deleted file mode 100644 index f9b73c92b6..0000000000 --- a/execution_engine_testing/tests/src/profiling/host_function_metrics.rs +++ /dev/null @@ -1,334 +0,0 @@ -//! This executable is for outputting metrics on each of the EE host functions. -//! -//! In order to set up the required global state, the `state-initializer` should have been run -//! first. - -use std::{ - collections::BTreeMap, - env, - fs::{self, File}, - io::{self, Write}, - iter, - path::{Path, PathBuf}, - process::Command, - str::FromStr, -}; - -use clap::{crate_version, App, Arg}; -use log::LevelFilter; -use rand::{self, Rng}; -use serde_json::Value; - -use casper_engine_test_support::internal::{ - DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, -}; -use casper_execution_engine::{ - core::engine_state::EngineConfig, - shared::logging::{self, Settings}, -}; -use casper_types::{runtime_args, ApiError, RuntimeArgs}; - -use casper_engine_tests::profiling; -use casper_execution_engine::shared::newtypes::Blake2bHash; - -const ABOUT: &str = - "Executes a contract which logs metrics for all host functions. Note that the \ - 'state-initializer' executable should be run first to set up the required global state."; - -const EXECUTE_AS_SUBPROCESS_ARG: &str = "execute-as-subprocess"; - -const ROOT_HASH_ARG_NAME: &str = "root-hash"; -const ROOT_HASH_ARG_VALUE_NAME: &str = "HEX-ENCODED HASH"; -const ROOT_HASH_ARG_HELP: &str = - "Initial root hash; the output of running the 'state-initializer' executable"; - -const REPETITIONS_ARG_NAME: &str = "repetitions"; -const REPETITIONS_ARG_SHORT: &str = "r"; -const REPETITIONS_ARG_DEFAULT: &str = "10000"; -const REPETITIONS_ARG_VALUE_NAME: &str = "NUM"; -const REPETITIONS_ARG_HELP: &str = "Number of repetitions of each host function call"; - -const OUTPUT_DIR_ARG_NAME: &str = "output-dir"; -const OUTPUT_DIR_ARG_SHORT: &str = "o"; -const OUTPUT_DIR_ARG_VALUE_NAME: &str = "DIR"; -const OUTPUT_DIR_ARG_HELP: &str = - "Path to output directory. It will be created if it doesn't exist. If unspecified, the \ - current working directory will be used"; - -const HOST_FUNCTION_METRICS_CONTRACT: &str = "host_function_metrics.wasm"; -const PAYMENT_AMOUNT: u64 = profiling::ACCOUNT_1_INITIAL_AMOUNT - 1_000_000_000; -const EXPECTED_REVERT_VALUE: u16 = 10; -const CSV_HEADER: &str = "args,n_exec,total_elapsed_time"; -const ARG_AMOUNT: &str = "amount"; -const ARG_SEED: &str = "seed"; -const ARG_OTHERS: &str = "others"; - -fn execute_as_subprocess_arg() -> Arg<'static, 'static> { - Arg::with_name(EXECUTE_AS_SUBPROCESS_ARG) - .long(EXECUTE_AS_SUBPROCESS_ARG) - .hidden(true) -} - -fn root_hash_arg() -> Arg<'static, 'static> { - Arg::with_name(ROOT_HASH_ARG_NAME) - .value_name(ROOT_HASH_ARG_VALUE_NAME) - .help(ROOT_HASH_ARG_HELP) -} - -fn repetitions_arg() -> Arg<'static, 'static> { - Arg::with_name(REPETITIONS_ARG_NAME) - .long(REPETITIONS_ARG_NAME) - .short(REPETITIONS_ARG_SHORT) - .default_value(REPETITIONS_ARG_DEFAULT) - .value_name(REPETITIONS_ARG_VALUE_NAME) - .help(REPETITIONS_ARG_HELP) -} - -fn output_dir_arg() -> Arg<'static, 'static> { - Arg::with_name(OUTPUT_DIR_ARG_NAME) - .long(OUTPUT_DIR_ARG_NAME) - .short(OUTPUT_DIR_ARG_SHORT) - .value_name(OUTPUT_DIR_ARG_VALUE_NAME) - .help(OUTPUT_DIR_ARG_HELP) -} - -#[derive(Debug)] -struct Args { - execute_as_subprocess: bool, - root_hash: Option, - repetitions: usize, - output_dir: PathBuf, - data_dir: PathBuf, -} - -impl Args { - fn new() -> Self { - let exe_name = profiling::exe_name(); - let data_dir_arg = profiling::data_dir_arg(); - let arg_matches = App::new(&exe_name) - .version(crate_version!()) - .about(ABOUT) - .arg(execute_as_subprocess_arg()) - .arg(root_hash_arg()) - .arg(repetitions_arg()) - .arg(output_dir_arg()) - .arg(data_dir_arg) - .get_matches(); - let execute_as_subprocess = arg_matches.is_present(EXECUTE_AS_SUBPROCESS_ARG); - let root_hash = arg_matches - .value_of(ROOT_HASH_ARG_NAME) - .map(ToString::to_string); - let repetitions = arg_matches - .value_of(REPETITIONS_ARG_NAME) - .map(profiling::parse_count) - .expect("should have repetitions"); - let output_dir = match arg_matches.value_of(OUTPUT_DIR_ARG_NAME) { - Some(dir) => PathBuf::from_str(dir).expect("Expected a valid unicode path"), - None => env::current_dir().expect("Expected to be able to access current working dir"), - }; - let data_dir = profiling::data_dir(&arg_matches); - Args { - execute_as_subprocess, - root_hash, - repetitions, - output_dir, - data_dir, - } - } -} - -/// Executes the host-function-metrics contract repeatedly to generate metrics in stdout. -fn run_test(root_hash: Vec, repetitions: usize, data_dir: &Path) { - let log_settings = Settings::new(LevelFilter::Warn).with_metrics_enabled(true); - let _ = logging::initialize(log_settings); - - let account_1_account_hash = profiling::account_1_account_hash(); - let account_2_account_hash = profiling::account_2_account_hash(); - - let engine_config = EngineConfig::new(); - - let mut test_builder = - LmdbWasmTestBuilder::open(data_dir, engine_config, Blake2bHash::new(&root_hash)); - - let mut rng = rand::thread_rng(); - - for _ in 0..repetitions { - let seed: u64 = rng.gen(); - let random_bytes_length: usize = rng.gen_range(0..10_000); - let mut random_bytes = vec![0_u8; random_bytes_length]; - rng.fill(random_bytes.as_mut_slice()); - - let deploy = DeployItemBuilder::new() - .with_address(account_1_account_hash) - .with_deploy_hash(rng.gen()) - .with_session_code( - HOST_FUNCTION_METRICS_CONTRACT, - runtime_args! { - ARG_SEED => seed, - ARG_OTHERS => (random_bytes, account_1_account_hash, account_2_account_hash), - }, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => PAYMENT_AMOUNT }) - .with_authorization_keys(&[account_1_account_hash]) - .build(); - let exec_request = ExecuteRequestBuilder::new() - .push_deploy(deploy.clone()) - .build(); - - test_builder.exec(exec_request); - // Should revert with User error 10. - let error_msg = test_builder - .exec_error_message(0) - .expect("should have error message"); - assert!( - error_msg.contains(&format!("{:?}", ApiError::User(EXPECTED_REVERT_VALUE))), - error_msg - ); - } -} - -#[derive(Debug)] -struct Metrics { - duration: String, - others: BTreeMap, -} - -fn gather_metrics(stdout: String) -> BTreeMap> { - const PAYLOAD_KEY: &str = "payload="; - const DESCRIPTION_KEY: &str = "description"; - const HOST_FUNCTION_PREFIX: &str = "host_function_"; - const PROPERTIES_KEY: &str = "properties"; - const DURATION_KEY: &str = "duration_in_seconds"; - const MESSAGE_KEY: &str = "message"; - const MESSAGE_TEMPLATE_KEY: &str = "message_template"; - - let mut result = BTreeMap::new(); - - for line in stdout.lines() { - if let Some(index) = line.find(PAYLOAD_KEY) { - let (_, payload_slice) = line.split_at(index + PAYLOAD_KEY.len()); - let mut payload = - serde_json::from_str::(payload_slice).expect("payload should parse as JSON"); - - let description = payload - .get_mut(DESCRIPTION_KEY) - .expect("payload should have description field") - .take(); - let function_id = description - .as_str() - .expect("description field should parse as string") - .split(' ') - .next() - .expect("description field should consist of function name followed by a space"); - if !function_id.starts_with(HOST_FUNCTION_PREFIX) { - continue; - } - let function_name = function_id - .split_at(HOST_FUNCTION_PREFIX.len()) - .1 - .to_string(); - - let metrics_vec = result.entry(function_name).or_insert_with(Vec::new); - - let mut properties: BTreeMap = serde_json::from_value( - payload - .get_mut(PROPERTIES_KEY) - .expect("payload should have properties field") - .take(), - ) - .expect("properties should parse as pairs of strings"); - - let duration = properties - .remove(DURATION_KEY) - .expect("properties should have a duration entry"); - let _ = properties.remove(MESSAGE_KEY); - let _ = properties.remove(MESSAGE_TEMPLATE_KEY); - - let metrics = Metrics { - duration, - others: properties, - }; - metrics_vec.push(metrics); - } - } - - result -} - -fn generate_csv(function_name: String, metrics_vec: Vec, output_dir: &Path) { - let file_path = output_dir.join(format!("{}.csv", function_name)); - let mut file = File::create(&file_path) - .unwrap_or_else(|_| panic!("should create {}", file_path.display())); - - writeln!(file, "{}", CSV_HEADER) - .unwrap_or_else(|_| panic!("should write to {}", file_path.display())); - - for metrics in metrics_vec { - write!(file, "\"(").unwrap_or_else(|_| panic!("should write to {}", file_path.display())); - for (_metric_key, metric_value) in metrics.others { - write!(file, "{},", metric_value) - .unwrap_or_else(|_| panic!("should write to {}", file_path.display())); - } - writeln!(file, ")\",1,{}", metrics.duration) - .unwrap_or_else(|_| panic!("should write to {}", file_path.display())); - } -} - -fn main() { - let args = Args::new(); - - // If the required initial root hash wasn't passed as a command line arg, expect to read it in - // from stdin to allow for it to be piped from the output of 'state-initializer'. - let (root_hash, root_hash_read_from_stdin) = match args.root_hash { - Some(root_hash) => (root_hash, false), - None => { - let mut input = String::new(); - let _ = io::stdin().read_line(&mut input); - (input.trim_end().to_string(), true) - } - }; - - // We're running as a subprocess - execute the test to output the metrics to stdout. - if args.execute_as_subprocess { - return run_test( - profiling::parse_hash(&root_hash), - args.repetitions, - &args.data_dir, - ); - } - - // We're running as the top-level process - invoke the current exe as a subprocess to capture - // its stdout. - let subprocess_flag = format!("--{}", EXECUTE_AS_SUBPROCESS_ARG); - let mut subprocess_args = env::args().chain(iter::once(subprocess_flag)); - let mut subprocess = Command::new( - subprocess_args - .next() - .expect("should get current executable's full path"), - ); - subprocess.args(subprocess_args); - if root_hash_read_from_stdin { - subprocess.arg(root_hash); - } - - let subprocess_output = subprocess - .output() - .expect("should run current executable as a subprocess"); - - let stdout = String::from_utf8(subprocess_output.stdout).expect("should be valid UTF-8"); - if !subprocess_output.status.success() { - let stderr = String::from_utf8(subprocess_output.stderr).expect("should be valid UTF-8"); - panic!( - "\nFailed to execute as subprocess:\n{}\n\n{}\n\n", - stdout, stderr - ); - } - - let all_metrics = gather_metrics(stdout); - let output_dir = &args.output_dir; - fs::create_dir_all(output_dir) - .unwrap_or_else(|_| panic!("should create {}", output_dir.display())); - for (function_id, metrics_vec) in all_metrics { - generate_csv(function_id, metrics_vec, &args.output_dir); - } -} diff --git a/execution_engine_testing/tests/src/profiling/mod.rs b/execution_engine_testing/tests/src/profiling/mod.rs deleted file mode 100644 index 7442493508..0000000000 --- a/execution_engine_testing/tests/src/profiling/mod.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::{env, path::PathBuf, str::FromStr}; - -use clap::{Arg, ArgMatches}; - -use casper_engine_test_support::DEFAULT_ACCOUNT_INITIAL_BALANCE; -use casper_types::{account::AccountHash, U512}; - -const DATA_DIR_ARG_NAME: &str = "data-dir"; -const DATA_DIR_ARG_SHORT: &str = "d"; -const DATA_DIR_ARG_LONG: &str = "data-dir"; -const DATA_DIR_ARG_VALUE_NAME: &str = "PATH"; -const DATA_DIR_ARG_HELP: &str = "Directory in which persistent data is stored [default: current \ - working directory]"; - -const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); -pub const ACCOUNT_1_INITIAL_AMOUNT: u64 = DEFAULT_ACCOUNT_INITIAL_BALANCE - 1_000_000_000; -const ACCOUNT_2_ADDR: AccountHash = AccountHash::new([2u8; 32]); - -pub enum TransferMode { - WASM, - WASMLESS, -} - -pub fn exe_name() -> String { - env::current_exe() - .expect("Expected to read current executable's name") - .file_stem() - .expect("Expected a file name for the current executable") - .to_str() - .expect("Expected valid unicode for the current executable's name") - .to_string() -} - -pub fn data_dir_arg() -> Arg<'static, 'static> { - Arg::with_name(DATA_DIR_ARG_NAME) - .short(DATA_DIR_ARG_SHORT) - .long(DATA_DIR_ARG_LONG) - .value_name(DATA_DIR_ARG_VALUE_NAME) - .help(DATA_DIR_ARG_HELP) - .takes_value(true) -} - -pub fn data_dir(arg_matches: &ArgMatches) -> PathBuf { - match arg_matches.value_of(DATA_DIR_ARG_NAME) { - Some(dir) => PathBuf::from_str(dir).expect("Expected a valid unicode path"), - None => env::current_dir().expect("Expected to be able to access current working dir"), - } -} - -pub fn parse_hash(encoded_hash: &str) -> Vec { - base16::decode(encoded_hash).expect("Expected a valid, hex-encoded hash") -} - -pub fn parse_count(count_as_str: &str) -> usize { - let count: usize = count_as_str.parse().expect("Expected an integral count"); - assert!(count > 0, "Expected count > 0"); - count -} - -pub fn parse_transfer_mode(transfer_mode: &str) -> TransferMode { - match transfer_mode { - "WASM" => TransferMode::WASM, - _ => TransferMode::WASMLESS, - } -} - -pub fn account_1_account_hash() -> AccountHash { - ACCOUNT_1_ADDR -} - -pub fn account_1_initial_amount() -> U512 { - ACCOUNT_1_INITIAL_AMOUNT.into() -} - -pub fn account_2_account_hash() -> AccountHash { - ACCOUNT_2_ADDR -} diff --git a/execution_engine_testing/tests/src/profiling/perf.sh b/execution_engine_testing/tests/src/profiling/perf.sh deleted file mode 100755 index 75207d4c74..0000000000 --- a/execution_engine_testing/tests/src/profiling/perf.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -set -eu -o pipefail - -RED='\033[0;31m' -CYAN='\033[0;36m' -NO_COLOR='\033[0m' -EE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." >/dev/null 2>&1 && pwd)" -TEMP_DIR=$(mktemp -d -t simple-transfer-perf-XXXXX) - -check_for_perf() { - if ! [[ -x "$(command -v perf)" ]]; then - printf "${RED}perf not installed${NO_COLOR}\n\n" - printf "For Debian, try:\n" - printf "${CYAN}sudo apt install linux-tools-common linux-tools-generic linux-tools-$(uname -r)${NO_COLOR}\n\n" - printf "For Redhat, try:\n" - printf "${CYAN}sudo yum install perf${NO_COLOR}\n\n" - exit 127 - fi -} - -run_perf() { - cd $EE_DIR - make build-contracts - cd engine-tests/ - cargo build --release --bin state-initializer - cargo build --release --bin simple-transfer - TARGET_DIR="${EE_DIR}/target/release" - DATA_DIR_ARG="--data-dir=../target" - ${TARGET_DIR}/state-initializer ${DATA_DIR_ARG} | perf record -g --call-graph dwarf ${TARGET_DIR}/simple-transfer ${DATA_DIR_ARG} - mv perf.data ${TEMP_DIR} -} - -check_or_clone_flamegraph() { - FLAMEGRAPH_DIR="/tmp/FlameGraph" - export PATH=${FLAMEGRAPH_DIR}:${PATH} - if ! [[ -x "$(command -v stackcollapse-perf.pl)" ]] || ! [[ -x "$(command -v flamegraph.pl)" ]]; then - rm -rf ${FLAMEGRAPH_DIR} - git clone --depth=1 https://github.com/brendangregg/FlameGraph ${FLAMEGRAPH_DIR} - fi -} - -create_flamegraph() { - FLAMEGRAPH="${TEMP_DIR}/flame.svg" - printf "Creating flamegraph at ${FLAMEGRAPH}\n" - cd ${TEMP_DIR} - perf script | stackcollapse-perf.pl | flamegraph.pl > ${FLAMEGRAPH} - x-www-browser ${FLAMEGRAPH} -} - -check_for_perf -run_perf -check_or_clone_flamegraph -create_flamegraph diff --git a/execution_engine_testing/tests/src/profiling/simple_transfer.rs b/execution_engine_testing/tests/src/profiling/simple_transfer.rs deleted file mode 100644 index 076a3f5162..0000000000 --- a/execution_engine_testing/tests/src/profiling/simple_transfer.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! This executable is designed to be used to profile a single execution of a simple contract which -//! transfers an amount between two accounts. -//! -//! In order to set up the required global state for the transfer, the `state-initializer` should -//! have been run first. -//! -//! By avoiding setting up global state as part of this executable, it will allow profiling to be -//! done only on meaningful code, rather than including test setup effort in the profile results. - -use std::{env, io, path::PathBuf}; - -use clap::{crate_version, App, Arg}; - -use casper_engine_test_support::internal::{ - DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_PAYMENT, -}; -use casper_execution_engine::{core::engine_state::EngineConfig, shared::newtypes::Blake2bHash}; -use casper_types::{runtime_args, RuntimeArgs, U512}; - -use casper_engine_tests::profiling; - -const ABOUT: &str = "Executes a simple contract which transfers an amount between two accounts. \ - Note that the 'state-initializer' executable should be run first to set up the required \ - global state."; - -const ROOT_HASH_ARG_NAME: &str = "root-hash"; -const ROOT_HASH_ARG_VALUE_NAME: &str = "HEX-ENCODED HASH"; -const ROOT_HASH_ARG_HELP: &str = - "Initial root hash; the output of running the 'state-initializer' executable"; - -const VERBOSE_ARG_NAME: &str = "verbose"; -const VERBOSE_ARG_SHORT: &str = "v"; -const VERBOSE_ARG_LONG: &str = "verbose"; -const VERBOSE_ARG_HELP: &str = "Display the transforms resulting from the contract execution"; - -const TRANSFER_AMOUNT: u64 = 1; - -fn root_hash_arg() -> Arg<'static, 'static> { - Arg::with_name(ROOT_HASH_ARG_NAME) - .value_name(ROOT_HASH_ARG_VALUE_NAME) - .help(ROOT_HASH_ARG_HELP) -} - -fn verbose_arg() -> Arg<'static, 'static> { - Arg::with_name(VERBOSE_ARG_NAME) - .short(VERBOSE_ARG_SHORT) - .long(VERBOSE_ARG_LONG) - .help(VERBOSE_ARG_HELP) -} - -#[derive(Debug)] -struct Args { - root_hash: Option>, - data_dir: PathBuf, - verbose: bool, -} - -impl Args { - fn new() -> Self { - let exe_name = profiling::exe_name(); - let data_dir_arg = profiling::data_dir_arg(); - let arg_matches = App::new(&exe_name) - .version(crate_version!()) - .about(ABOUT) - .arg(root_hash_arg()) - .arg(data_dir_arg) - .arg(verbose_arg()) - .get_matches(); - let root_hash = arg_matches - .value_of(ROOT_HASH_ARG_NAME) - .map(profiling::parse_hash); - let data_dir = profiling::data_dir(&arg_matches); - let verbose = arg_matches.is_present(VERBOSE_ARG_NAME); - Args { - root_hash, - data_dir, - verbose, - } - } -} - -fn main() { - let args = Args::new(); - - // If the required initial root hash wasn't passed as a command line arg, expect to read it in - // from stdin to allow for it to be piped from the output of 'state-initializer'. - let root_hash = args.root_hash.unwrap_or_else(|| { - let mut input = String::new(); - let _ = io::stdin().read_line(&mut input); - profiling::parse_hash(input.trim_end()) - }); - - let account_1_account_hash = profiling::account_1_account_hash(); - let account_2_account_hash = profiling::account_2_account_hash(); - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(account_1_account_hash) - .with_deploy_hash([1; 32]) - .with_session_code( - "simple_transfer.wasm", - runtime_args! { "target" =>account_2_account_hash, "amount" => U512::from(TRANSFER_AMOUNT) }, - ) - .with_empty_payment_bytes( runtime_args! { "amount" => *DEFAULT_PAYMENT}) - .with_authorization_keys(&[account_1_account_hash]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let engine_config = EngineConfig::new(); - - let mut test_builder = - LmdbWasmTestBuilder::open(&args.data_dir, engine_config, Blake2bHash::new(&root_hash)); - - test_builder.exec(exec_request).expect_success().commit(); - - if args.verbose { - println!("{:#?}", test_builder.get_transforms()); - } -} diff --git a/execution_engine_testing/tests/src/profiling/state_initializer.rs b/execution_engine_testing/tests/src/profiling/state_initializer.rs deleted file mode 100644 index 293b4d9ac7..0000000000 --- a/execution_engine_testing/tests/src/profiling/state_initializer.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! This executable is designed to be run to set up global state in preparation for running other -//! standalone test executable(s). This will allow profiling to be done on executables running only -//! meaningful code, rather than including test setup effort in the profile results. - -use std::{env, path::PathBuf}; - -use clap::{crate_version, App}; - -use casper_engine_test_support::internal::{ - DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT, DEFAULT_ACCOUNTS, - DEFAULT_ACCOUNT_ADDR, DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_CONFIG_HASH, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PAYMENT, - DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_SYSTEM_CONFIG, - DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, -}; -use casper_engine_tests::profiling; -use casper_execution_engine::core::engine_state::{ - engine_config::EngineConfig, genesis::ExecConfig, run_genesis_request::RunGenesisRequest, -}; -use casper_types::{runtime_args, RuntimeArgs}; - -const ABOUT: &str = "Initializes global state in preparation for profiling runs. Outputs the root \ - hash from the commit response."; -const STATE_INITIALIZER_CONTRACT: &str = "state_initializer.wasm"; -const ARG_ACCOUNT1_HASH: &str = "account_1_account_hash"; -const ARG_ACCOUNT1_AMOUNT: &str = "account_1_amount"; -const ARG_ACCOUNT2_HASH: &str = "account_2_account_hash"; - -fn data_dir() -> PathBuf { - let exe_name = profiling::exe_name(); - let data_dir_arg = profiling::data_dir_arg(); - let arg_matches = App::new(&exe_name) - .version(crate_version!()) - .about(ABOUT) - .arg(data_dir_arg) - .get_matches(); - profiling::data_dir(&arg_matches) -} - -fn main() { - let data_dir = data_dir(); - - let genesis_account_hash = *DEFAULT_ACCOUNT_ADDR; - let account_1_account_hash = profiling::account_1_account_hash(); - let account_1_initial_amount = profiling::account_1_initial_amount(); - let account_2_account_hash = profiling::account_2_account_hash(); - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_session_code( - STATE_INITIALIZER_CONTRACT, - runtime_args! { - ARG_ACCOUNT1_HASH => account_1_account_hash, - ARG_ACCOUNT1_AMOUNT => account_1_initial_amount, - ARG_ACCOUNT2_HASH => account_2_account_hash, - }, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[genesis_account_hash]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let engine_config = EngineConfig::new(); - let mut builder = LmdbWasmTestBuilder::new_with_config(&data_dir, engine_config); - - let exec_config = ExecConfig::new( - DEFAULT_ACCOUNTS.clone(), - *DEFAULT_WASM_CONFIG, - *DEFAULT_SYSTEM_CONFIG, - DEFAULT_VALIDATOR_SLOTS, - DEFAULT_AUCTION_DELAY, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, - DEFAULT_ROUND_SEIGNIORAGE_RATE, - DEFAULT_UNBONDING_DELAY, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, - ); - let run_genesis_request = RunGenesisRequest::new( - *DEFAULT_GENESIS_CONFIG_HASH, - *DEFAULT_PROTOCOL_VERSION, - exec_config, - ); - - let post_state_hash = builder - .run_genesis(&run_genesis_request) - .exec(exec_request) - .expect_success() - .commit() - .get_post_state_hash(); - println!("{}", base16::encode_lower(&post_state_hash)); -} diff --git a/execution_engine_testing/tests/src/test/calling_packages_by_version_query.rs b/execution_engine_testing/tests/src/test/calling_packages_by_version_query.rs new file mode 100644 index 0000000000..ec064ed8fe --- /dev/null +++ b/execution_engine_testing/tests/src/test/calling_packages_by_version_query.rs @@ -0,0 +1,665 @@ +use std::collections::BTreeSet; + +/// This test assumes that the provided fixture has v1 and v2 installed in protocol version 1 +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_PROTOCOL_VERSION, +}; +use casper_execution_engine::{ + engine_state::{EngineConfigBuilder, Error, SessionDataV1, SessionInputData}, + execution::ExecError, +}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + contracts::ProtocolVersionMajor, runtime_args, AddressableEntityHash, ChainspecRegistry, + EntityVersion, EntityVersionKey, EraId, HashAddr, HoldBalanceHandling, Key, NamedKeys, + PackageHash, PricingMode, ProtocolVersion, RuntimeArgs, StoredValue, Timestamp, + TransactionEntryPoint, TransactionInvocationTarget, TransactionRuntimeParams, + TransactionTarget, TransactionV1Hash, +}; +use once_cell::sync::Lazy; +use rand::Rng; + +static V3_0_0: Lazy = Lazy::new(|| ProtocolVersion::from_parts(3, 0, 0)); + +const DISABLE_CONTRACT: &str = "disable_contract.wasm"; + +static CURRENT_PROTOCOL_MAJOR: Lazy = Lazy::new(|| DEFAULT_PROTOCOL_VERSION.value().major); + +const CONTRACT_WASM: &str = "key_putter.wasm"; + +#[ignore] +#[test] +fn should_call_package_hash_by_exact_version() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + + exec_put_key_by_package_hash(&mut builder, Some(1), Some(1), ProtocolVersion::V1_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn should_call_package_name_by_exact_version() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + + exec_put_key_by_package_name(&mut builder, Some(1), Some(1), ProtocolVersion::V1_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn should_call_package_hash_by_exact_version_after_protocol_version_change() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + + upgrade_version(&mut builder, 2, false); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + exec_put_key_by_package_hash(&mut builder, Some(1), Some(1), ProtocolVersion::V2_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn should_call_package_name_by_exact_version_after_protocol_version_change() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, false); + + exec_put_key_by_package_name(&mut builder, Some(1), Some(1), ProtocolVersion::V2_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn should_call_by_hash_newest_version_when_only_major_specified() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, false); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + exec_put_key_by_package_hash(&mut builder, Some(1), None, ProtocolVersion::V2_0_0); + + let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_2, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v2"); + + disable_contract_version(&mut builder, 1, 2); + // After disabling 1.2, selecting by major protocol 1 should point to 1.1 + + exec_put_key_by_package_hash(&mut builder, Some(1), None, ProtocolVersion::V2_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn should_call_by_name_newest_version_when_only_major_specified() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, false); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + exec_put_key_by_package_name(&mut builder, Some(1), None, ProtocolVersion::V2_0_0); + + let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_2, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v2"); + + disable_contract_version(&mut builder, 1, 2); + // After disabling 1.2, selecting by major protocol 1 should point to 1.1 + + exec_put_key_by_package_name(&mut builder, Some(1), None, ProtocolVersion::V2_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn should_call_by_hash_the_newest_version() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, false); + + exec_put_key_by_package_hash(&mut builder, None, None, ProtocolVersion::V1_0_0); + + let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_2, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v2"); + + upgrade_version(&mut builder, 2, false); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + exec_put_key_by_package_hash(&mut builder, None, None, ProtocolVersion::V2_0_0); + + let hash_of_2_2 = + get_contract_hash_for_specific_version(&mut builder, *CURRENT_PROTOCOL_MAJOR, 2).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_2_2, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v4"); + + disable_contract_version(&mut builder, 2, 2); + // After disabling 2.2, selecting newest should point to 2.1 + + exec_put_key_by_package_hash(&mut builder, None, None, ProtocolVersion::V2_0_0); + + let hash_of_2_1 = get_contract_hash_for_specific_version(&mut builder, 2, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_2_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v3"); +} + +#[ignore] +#[test] +fn should_call_by_name_the_newest_version() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + + exec_put_key_by_package_name(&mut builder, None, None, ProtocolVersion::V1_0_0); + + let hash_of_1_2 = get_contract_hash_for_specific_version(&mut builder, 1, 2).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_2, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v2"); + + upgrade_version(&mut builder, 2, false); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + exec_put_key_by_package_name(&mut builder, None, None, ProtocolVersion::V2_0_0); + + let hash_of_2_2 = + get_contract_hash_for_specific_version(&mut builder, *CURRENT_PROTOCOL_MAJOR, 2).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_2_2, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v4"); + + disable_contract_version(&mut builder, 2, 2); + // After disabling 2.2, selecting newest should point to 2.1 + + exec_put_key_by_package_name(&mut builder, None, None, ProtocolVersion::V2_0_0); + + let hash_of_2_1 = get_contract_hash_for_specific_version(&mut builder, 2, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_2_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v3"); +} + +#[ignore] +#[test] +fn when_disamiguous_calls_are_enabled_should_call_by_hash_querying_by_version() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + + exec_put_key_by_package_hash(&mut builder, None, Some(1), ProtocolVersion::V1_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); + + upgrade_version(&mut builder, 2, false); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + exec_put_key_by_package_hash(&mut builder, None, Some(1), ProtocolVersion::V2_0_0); + + let hash_of_2_1 = get_contract_hash_for_specific_version(&mut builder, 2, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_2_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v3"); + + disable_contract_version(&mut builder, *CURRENT_PROTOCOL_MAJOR, 1); + exec_put_key_by_package_hash(&mut builder, None, Some(1), ProtocolVersion::V2_0_0); + + let hash_of_1_1 = get_contract_hash_for_specific_version(&mut builder, 1, 1).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + &mut builder, + hash_of_1_1, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, "key_putter_v1"); +} + +#[ignore] +#[test] +fn when_disamiguous_calls_are_disabled_then_ambiguous_call_by_hash_will_fail() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, true); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + let package_hash = get_package_hash(&mut builder); + let target = TransactionInvocationTarget::ByPackageHash { + addr: package_hash, + version: Some(1), + protocol_version_major: None, + }; + let request = builder_for_calling_entrypoint( + "put_key".to_owned(), + target, + RuntimeArgs::default(), + ProtocolVersion::V2_0_0, + ); + builder.exec(request).expect_failure().commit(); + let error = builder + .get_last_exec_result() + .unwrap() + .error() + .unwrap() + .clone(); + assert!(matches!( + error, + Error::Exec(ExecError::AmbiguousEntityVersion) + )) +} + +#[ignore] +#[test] +fn when_disamiguous_calls_are_disabled_then_ambiguous_call_by_name_will_fail() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, true); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V2_0_0); + + let target = TransactionInvocationTarget::ByPackageName { + name: "package_name".to_owned(), + version: Some(1), + protocol_version_major: None, + }; + let request = builder_for_calling_entrypoint( + "put_key".to_owned(), + target, + RuntimeArgs::default(), + ProtocolVersion::V2_0_0, + ); + builder.exec(request).expect_failure().commit(); + let error = builder + .get_last_exec_result() + .unwrap() + .error() + .unwrap() + .clone(); + assert!(matches!( + error, + Error::Exec(ExecError::AmbiguousEntityVersion) + )) +} + +#[ignore] +#[test] +fn calling_by_package_hash_should_work_when_more_then_two_protocol_versions() { + let mut builder = prepare_v1_builder(); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + install(&mut builder, CONTRACT_WASM, ProtocolVersion::V1_0_0); + upgrade_version(&mut builder, 2, false); + upgrade_version(&mut builder, 3, false); + install(&mut builder, CONTRACT_WASM, *V3_0_0); + + exec_put_key_by_package_hash(&mut builder, None, Some(1), *V3_0_0); + assert_contract_version_hash_placeholder_value(&mut builder, 3, 1, "key_putter_v3"); + + install(&mut builder, CONTRACT_WASM, *V3_0_0); + exec_put_key_by_package_hash(&mut builder, None, None, *V3_0_0); + assert_contract_version_hash_placeholder_value(&mut builder, 3, 2, "key_putter_v4"); + + exec_put_key_by_package_hash(&mut builder, Some(1), None, *V3_0_0); + assert_contract_version_hash_placeholder_value(&mut builder, 1, 2, "key_putter_v2"); +} + +fn assert_contract_version_hash_placeholder_value( + builder: &mut LmdbWasmTestBuilder, + protocol_version: ProtocolVersionMajor, + entity_version: EntityVersion, + expected_value: &str, +) { + let hash_of_contract = + get_contract_hash_for_specific_version(builder, protocol_version, entity_version).unwrap(); + let value = get_value_of_named_key_for_contract_hash_as_str( + builder, + hash_of_contract, + "key_placeholder", + ) + .unwrap(); + assert_eq!(value, expected_value); +} + +fn install(builder: &mut LmdbWasmTestBuilder, file_name: &str, protocol_version: ProtocolVersion) { + let install_request = ExecuteRequestBuilder::standard_with_protocol_version( + *DEFAULT_ACCOUNT_ADDR, + file_name, + RuntimeArgs::default(), + protocol_version, + ) + .build(); + builder.exec(install_request).expect_success().commit(); +} + +fn get_value_of_named_key_for_contract_hash_as_str( + builder: &mut LmdbWasmTestBuilder, + hash: HashAddr, + key_name: &str, +) -> Option { + let get_named_keys_for_contract_hash = get_named_keys_for_contract_hash(builder, hash); + get_named_keys_for_contract_hash + .get(key_name) + .and_then(|key| match builder.query(None, *key, &[]) { + Ok(v) => match v { + StoredValue::CLValue(cl_value) => cl_value.into_t().ok(), + _ => panic!("Unexpected stored value kind"), + }, + Err(_) => None, + }) +} + +fn disable_contract_version( + builder: &mut LmdbWasmTestBuilder, + protocol_version_major: ProtocolVersionMajor, + version: EntityVersion, +) { + let package_hash = get_package_hash(builder); + let hash = + get_contract_hash_for_specific_version(builder, protocol_version_major, version).unwrap(); + let stored_entity_hash = AddressableEntityHash::new(hash); + let disable_request = { + let session_args = runtime_args! { + "contract_package_hash" => package_hash, + "contract_hash" => stored_entity_hash, + }; + + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, DISABLE_CONTRACT, session_args) + .build() + }; + builder.exec(disable_request).expect_success().commit(); +} + +fn get_named_keys_for_contract_hash( + builder: &mut LmdbWasmTestBuilder, + hash: HashAddr, +) -> NamedKeys { + builder.get_named_keys_for_contract(AddressableEntityHash::new(hash)) +} + +fn call_contract_entrypoint( + builder: &mut LmdbWasmTestBuilder, + entry_point: String, + id: TransactionInvocationTarget, + args: RuntimeArgs, + protocol_version: ProtocolVersion, +) { + let request = builder_for_calling_entrypoint(entry_point, id, args, protocol_version); + builder.exec(request).expect_success().commit(); +} + +fn builder_for_calling_entrypoint( + entry_point: String, + id: TransactionInvocationTarget, + args: RuntimeArgs, + protocol_version: ProtocolVersion, +) -> casper_engine_test_support::ExecuteRequest { + let target = TransactionTarget::Stored { + id, + runtime: TransactionRuntimeParams::VmCasperV1, + }; + let entry_point = TransactionEntryPoint::Custom(entry_point); + let v1_hash = TransactionV1Hash::from_raw([5; 32]); + let mut signers = BTreeSet::new(); + signers.insert(*DEFAULT_ACCOUNT_ADDR); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: 2_500_000, + gas_price_tolerance: 1, + standard_payment: true, + }; + let initiator_addr = casper_types::InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR); + let session_data_v1 = SessionDataV1::new( + &args, + &target, + &entry_point, + true, + &v1_hash, + &pricing_mode, + &initiator_addr, + signers, + true, + ); + let session_input_data = SessionInputData::SessionDataV1 { + data: session_data_v1, + }; + ExecuteRequestBuilder::from_session_input_data_for_protocol_version( + &session_input_data, + protocol_version, + ) + .build() +} + +fn get_package_hash(builder: &mut LmdbWasmTestBuilder) -> [u8; 32] { + let account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let get = account.named_keys().get("package_name"); + let package_key = get.unwrap(); + let package_hash = match package_key { + Key::Hash(hash) => hash, + _ => { + panic!("COULDN'T HANLDE") + } + }; + *package_hash +} + +fn get_contract_hash_for_specific_version( + builder: &mut LmdbWasmTestBuilder, + protocol_version_major: ProtocolVersionMajor, + version: EntityVersion, +) -> Option { + let maybe_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR); + let account = maybe_account.unwrap(); + let get = account.named_keys().get("package_name"); + let package_key = get.unwrap(); + let package_hash = match package_key { + Key::Hash(hash) => hash, + _ => { + panic!("COULDN'T HANLDE THE KEY") + } + }; + let package = builder + .get_package(PackageHash::new(*package_hash)) + .unwrap(); + let key = EntityVersionKey::new(protocol_version_major, version); + package.versions().get(&key).map(|x| x.value()) +} + +fn exec_put_key_by_package_name( + builder: &mut LmdbWasmTestBuilder, + protocol_version_major: Option, + version: Option, + protocol_version: ProtocolVersion, +) { + let target = TransactionInvocationTarget::ByPackageName { + name: "package_name".to_owned(), + version, + protocol_version_major, + }; + call_contract_entrypoint( + builder, + "put_key".to_owned(), + target, + RuntimeArgs::default(), + protocol_version, + ) +} + +fn exec_put_key_by_package_hash( + builder: &mut LmdbWasmTestBuilder, + protocol_version_major: Option, + version: Option, + protocol_version: ProtocolVersion, +) { + let package_hash = get_package_hash(builder); + let target = TransactionInvocationTarget::ByPackageHash { + addr: package_hash, + version, + protocol_version_major, + }; + call_contract_entrypoint( + builder, + "put_key".to_owned(), + target, + RuntimeArgs::default(), + protocol_version, + ) +} + +fn upgrade_version( + builder: &mut LmdbWasmTestBuilder, + new_protocol_version_major: ProtocolVersionMajor, + should_trap_on_ambiguous_entity_version: bool, +) { + if new_protocol_version_major <= 1 { + panic!("Can't upgrade to 1 or 0 major version"); + } + let current_protocol_version = + ProtocolVersion::from_parts(new_protocol_version_major - 1, 0, 0); + let new_protocol_version = ProtocolVersion::from_parts(new_protocol_version_major, 0, 0); + + let activation_point = EraId::new(0u64); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(activation_point) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(24 * 60 * 60 * 60) + .with_enable_addressable_entity(false) + .build(); + let config = EngineConfigBuilder::new() + .with_trap_on_ambiguous_entity_version(should_trap_on_ambiguous_entity_version) + .build(); + builder + .with_block_time(Timestamp::now().into()) + .upgrade_using_scratch(&mut upgrade_request) + .expect_upgrade_success(); + builder.with_engine_config(config); +} + +fn prepare_v1_builder() -> LmdbWasmTestBuilder { + let mut rng = rand::thread_rng(); + let chainspec_bytes = rng.gen::<[u8; 32]>(); + let genesis_account = rng.gen::<[u8; 32]>(); + let chainspec_registry = + ChainspecRegistry::new_with_genesis(&chainspec_bytes, &genesis_account); + + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + ProtocolVersion::V1_0_0, + DEFAULT_EXEC_CONFIG.clone(), + chainspec_registry, + ); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); + builder +} diff --git a/execution_engine_testing/tests/src/test/chainspec_registry.rs b/execution_engine_testing/tests/src/test/chainspec_registry.rs new file mode 100644 index 0000000000..e675064800 --- /dev/null +++ b/execution_engine_testing/tests/src/test/chainspec_registry.rs @@ -0,0 +1,188 @@ +use rand::Rng; +use tempfile::TempDir; + +use casper_engine_test_support::{ + LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, +}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ChainspecRegistry, Digest, EraId, Key, ProtocolVersion}; + +use crate::lmdb_fixture; + +const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); + +const OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION; +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + OLD_PROTOCOL_VERSION.value().major, + OLD_PROTOCOL_VERSION.value().minor, + OLD_PROTOCOL_VERSION.value().patch + 1, +); + +#[ignore] +#[test] +fn should_commit_chainspec_registry_during_genesis() { + let mut rng = rand::thread_rng(); + let chainspec_bytes = rng.gen::<[u8; 32]>(); + let genesis_account = rng.gen::<[u8; 32]>(); + let chainspec_bytes_hash = Digest::hash(chainspec_bytes); + let genesis_account_hash = Digest::hash(genesis_account); + + let chainspec_registry = + ChainspecRegistry::new_with_genesis(&chainspec_bytes, &genesis_account); + + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + DEFAULT_EXEC_CONFIG.clone(), + chainspec_registry, + ); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); + + let queried_registry = builder + .query(None, Key::ChainspecRegistry, &[]) + .expect("must have entry under Key::ChainspecRegistry") + .as_cl_value() + .expect("must have underlying cl_value") + .to_owned() + .into_t::() + .expect("must convert to chainspec registry"); + + let queried_chainspec_hash = queried_registry.chainspec_raw_hash(); + assert_eq!(*queried_chainspec_hash, chainspec_bytes_hash); + + let queried_accounts_hash = queried_registry + .genesis_accounts_raw_hash() + .expect("must have entry for genesis accounts"); + assert_eq!(*queried_accounts_hash, genesis_account_hash); +} + +#[ignore] +#[test] +#[should_panic] +fn should_fail_to_commit_genesis_when_missing_genesis_accounts_hash() { + let mut rng = rand::thread_rng(); + let chainspec_bytes = rng.gen::<[u8; 32]>(); + + let incomplete_chainspec_registry = + ChainspecRegistry::new_with_optional_global_state(&chainspec_bytes, None); + + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + DEFAULT_EXEC_CONFIG.clone(), + incomplete_chainspec_registry, + ); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); +} + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +struct TestConfig { + with_global_state_bytes: bool, + from_v1_4_4: bool, +} + +fn should_upgrade_chainspec_registry(cfg: TestConfig) { + let mut rng = rand::thread_rng(); + let data_dir = TempDir::new().expect("should create temp dir"); + + let mut builder = if cfg.from_v1_4_4 { + let (builder, _lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_4); + builder + } else { + let mut builder = LmdbWasmTestBuilder::new(data_dir.path()); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + builder + }; + + let chainspec_bytes = rng.gen::<[u8; 32]>(); + let global_state_bytes = rng.gen::<[u8; 32]>(); + let chainspec_bytes_hash = Digest::hash(chainspec_bytes); + let global_state_bytes_hash = Digest::hash(global_state_bytes); + + let upgraded_chainspec_registry = ChainspecRegistry::new_with_optional_global_state( + &chainspec_bytes, + cfg.with_global_state_bytes + .then_some(global_state_bytes.as_slice()), + ); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_chainspec_registry(upgraded_chainspec_registry) + .build() + }; + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let queried_registry = builder + .query(None, Key::ChainspecRegistry, &[]) + .expect("must have entry under Key::ChainspecRegistry") + .as_cl_value() + .expect("must have underlying cl_value") + .to_owned() + .into_t::() + .expect("must convert to chainspec registry"); + + // There should be no entry for the genesis accounts once the upgrade has completed. + assert!(queried_registry.genesis_accounts_raw_hash().is_none()); + + let queried_chainspec_hash = queried_registry.chainspec_raw_hash(); + assert_eq!(*queried_chainspec_hash, chainspec_bytes_hash); + + if cfg.with_global_state_bytes { + let queried_global_state_toml_hash = queried_registry.global_state_raw_hash().unwrap(); + assert_eq!(*queried_global_state_toml_hash, global_state_bytes_hash); + } else { + assert!(queried_registry.global_state_raw_hash().is_none()); + } +} + +#[ignore] +#[test] +fn should_upgrade_chainspec_registry_with_global_state_hash() { + let cfg = TestConfig { + with_global_state_bytes: true, + from_v1_4_4: false, + }; + should_upgrade_chainspec_registry(cfg) +} + +#[ignore] +#[test] +fn should_upgrade_chainspec_registry_without_global_state_hash() { + let cfg = TestConfig { + with_global_state_bytes: false, + from_v1_4_4: false, + }; + should_upgrade_chainspec_registry(cfg) +} + +#[ignore] +#[test] +fn should_upgrade_chainspec_registry_with_global_state_hash_from_v1_4_4() { + let cfg = TestConfig { + with_global_state_bytes: true, + from_v1_4_4: true, + }; + should_upgrade_chainspec_registry(cfg) +} + +#[ignore] +#[test] +fn should_upgrade_chainspec_registry_without_global_state_hash_from_v1_4_4() { + let cfg = TestConfig { + with_global_state_bytes: false, + from_v1_4_4: true, + }; + should_upgrade_chainspec_registry(cfg) +} diff --git a/execution_engine_testing/tests/src/test/check_transfer_success.rs b/execution_engine_testing/tests/src/test/check_transfer_success.rs index c26c0fa90d..d208e45297 100644 --- a/execution_engine_testing/tests/src/test/check_transfer_success.rs +++ b/execution_engine_testing/tests/src/test/check_transfer_success.rs @@ -1,210 +1,229 @@ -use casper_types::{runtime_args, RuntimeArgs, U512}; -use core::convert::TryFrom; +use std::path::PathBuf; use casper_engine_test_support::{ - internal::DEFAULT_ACCOUNT_PUBLIC_KEY, Code, SessionBuilder, SessionTransferInfo, - TestContextBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, + utils::create_genesis_config, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, + DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, + DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, }; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{runtime_args, GenesisAccount, Key, Motes, U512}; const ARG_AMOUNT: &str = "amount"; const ARG_DESTINATION: &str = "destination"; -const DESTINATION_PURSE_ONE: &str = "destination_purse_one"; -const DESTINATION_PURSE_TWO: &str = "destination_purse_two"; -const TRANSFER_AMOUNT_ONE: &str = "transfer_amount_one"; -const TRANSFER_AMOUNT_TWO: &str = "transfer_amount_two"; const TRANSFER_WASM: &str = "transfer_main_purse_to_new_purse.wasm"; -const TRANSFER_TO_TWO_PURSES: &str = "transfer_main_purse_to_two_purses.wasm"; const NEW_PURSE_NAME: &str = "test_purse"; -const SECOND_PURSE_NAME: &str = "second_purse"; const FIRST_TRANSFER_AMOUNT: u64 = 142; const SECOND_TRANSFER_AMOUNT: u64 = 250; #[ignore] #[test] fn test_check_transfer_success_with_source_only() { - let mut test_context = TestContextBuilder::new() - .with_public_key( - DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE), - ) - .build(); + // create a genesis account. + let genesis_account = GenesisAccount::account( + DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); - // Getting main purse URef to verify transfer - let source_purse = test_context - .main_purse_address(*DEFAULT_ACCOUNT_ADDR) - .expect("main purse address"); - // Target purse doesn't exist yet, so only verifying removal from source - let maybe_target_purse = None; - let transfer_amount = U512::try_from(FIRST_TRANSFER_AMOUNT).expect("U512 from u64"); - let source_only_session_transfer_info = - SessionTransferInfo::new(source_purse, maybe_target_purse, transfer_amount); + // add the account to the genesis config. + let mut accounts = vec![genesis_account]; + accounts.extend((*DEFAULT_ACCOUNTS).clone()); + let genesis_config = create_genesis_config(accounts); + let genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); // Doing a transfer from main purse to create new purse and store URef under NEW_PURSE_NAME. - let session_code = Code::from(TRANSFER_WASM); + let transfer_amount = U512::from(FIRST_TRANSFER_AMOUNT); + let path = PathBuf::from(TRANSFER_WASM); let session_args = runtime_args! { ARG_DESTINATION => NEW_PURSE_NAME, ARG_AMOUNT => transfer_amount }; - let session = SessionBuilder::new(session_code, session_args) + + // build the deploy. + let deploy_item = DeployItemBuilder::new() + .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT}) + .with_session_code(path, session_args) .with_address(*DEFAULT_ACCOUNT_ADDR) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_check_transfer_success(source_only_session_transfer_info) + .with_deploy_hash([42; 32]) .build(); - test_context.run(session); + + // build a request to execute the deploy. + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(genesis_request).commit(); + + // we need this to figure out what the transfer fee is. + let proposer_starting_balance = builder.get_proposer_purse_balance(); + + // Getting main purse URef to verify transfer + let source_purse = builder + .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .main_purse(); + + builder.exec(exec_request).commit().expect_success(); + + let transaction_fee = builder.get_proposer_purse_balance() - proposer_starting_balance; + let expected_source_ending_balance = Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE) + .checked_sub(Motes::new(transfer_amount)) + .unwrap() + .checked_sub(Motes::new(transaction_fee)) + .unwrap(); + let actual_source_ending_balance = Motes::new(builder.get_purse_balance(source_purse)); + + assert_eq!(expected_source_ending_balance, actual_source_ending_balance); } #[ignore] #[test] -#[should_panic] fn test_check_transfer_success_with_source_only_errors() { - let mut test_context = TestContextBuilder::new() - .with_public_key( - DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE), - ) - .build(); + let genesis_account = GenesisAccount::account( + DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); - // Getting main purse Uref to verify transfer - let source_purse = test_context - .main_purse_address(*DEFAULT_ACCOUNT_ADDR) - .expect("main purse address"); - let maybe_target_purse = None; - // Setup mismatch between transfer_amount performed and given to trigger assertion. - let transfer_amount = U512::try_from(FIRST_TRANSFER_AMOUNT).expect("U512 from u64"); - let wrong_transfer_amount = transfer_amount - U512::try_from(100u64).expect("U512 from 64"); - let source_only_session_transfer_info = - SessionTransferInfo::new(source_purse, maybe_target_purse, transfer_amount); + let mut accounts = vec![genesis_account]; + accounts.extend((*DEFAULT_ACCOUNTS).clone()); + let genesis_config = create_genesis_config(accounts); + let genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); // Doing a transfer from main purse to create new purse and store Uref under NEW_PURSE_NAME. - let session_code = Code::from(TRANSFER_WASM); + let transfer_amount = U512::from(FIRST_TRANSFER_AMOUNT); + // Setup mismatch between transfer_amount performed and given to trigger assertion. + let wrong_transfer_amount = transfer_amount - U512::from(100u64); + + let path = PathBuf::from(TRANSFER_WASM); let session_args = runtime_args! { ARG_DESTINATION => NEW_PURSE_NAME, ARG_AMOUNT => wrong_transfer_amount }; - // Handle expected assertion fail. - let session = SessionBuilder::new(session_code, session_args) + + let deploy_item = DeployItemBuilder::new() + .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT}) + .with_session_code(path, session_args) .with_address(*DEFAULT_ACCOUNT_ADDR) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_check_transfer_success(source_only_session_transfer_info) + .with_deploy_hash([42; 32]) .build(); - test_context.run(session); // will panic if transfer does not work + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + // Set up test builder and run genesis. + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(genesis_request).commit(); + + // compare proposer balance before and after the transaction to get the tx fee. + let proposer_starting_balance = builder.get_proposer_purse_balance(); + let source_purse = builder + .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .main_purse(); + + builder.exec(exec_request).commit().expect_success(); + + let transaction_fee = builder.get_proposer_purse_balance() - proposer_starting_balance; + let expected_source_ending_balance = Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE) + .checked_sub(Motes::new(transfer_amount)) + .unwrap() + .checked_sub(Motes::new(transaction_fee)) + .unwrap(); + let actual_source_ending_balance = Motes::new(builder.get_purse_balance(source_purse)); + + assert!(expected_source_ending_balance != actual_source_ending_balance); } #[ignore] #[test] fn test_check_transfer_success_with_source_and_target() { - let mut test_context = TestContextBuilder::new() - .with_public_key( - DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE), - ) - .build(); - - // Getting main purse URef to verify transfer - let source_purse = test_context - .main_purse_address(*DEFAULT_ACCOUNT_ADDR) - .expect("main purse address"); + let genesis_account = GenesisAccount::account( + DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); - let maybe_target_purse = None; - let transfer_amount = U512::try_from(SECOND_TRANSFER_AMOUNT).expect("U512 from u64"); - let source_and_target_session_transfer_info = - SessionTransferInfo::new(source_purse, maybe_target_purse, transfer_amount); + let mut accounts = vec![genesis_account]; + accounts.extend((*DEFAULT_ACCOUNTS).clone()); + let genesis_config = create_genesis_config(accounts); + let genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); + let transfer_amount = U512::from(SECOND_TRANSFER_AMOUNT); // Doing a transfer from main purse to create new purse and store URef under NEW_PURSE_NAME. - let session_code = Code::from(TRANSFER_WASM); + let path = PathBuf::from(TRANSFER_WASM); let session_args = runtime_args! { ARG_DESTINATION => NEW_PURSE_NAME, ARG_AMOUNT => transfer_amount }; - let session = SessionBuilder::new(session_code, session_args) + let deploy_item = DeployItemBuilder::new() + .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT}) + .with_session_code(path, session_args) .with_address(*DEFAULT_ACCOUNT_ADDR) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_check_transfer_success(source_and_target_session_transfer_info) + .with_deploy_hash([42; 32]) .build(); - test_context.run(session); - // retrieve newly created purse URef - test_context - .query(*DEFAULT_ACCOUNT_ADDR, &[NEW_PURSE_NAME.to_string()]) - .expect("new purse should exist"); -} + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); -#[ignore] -#[test] -#[should_panic] -fn test_check_transfer_success_with_target_error() { - let mut test_context = TestContextBuilder::new() - .with_public_key( - DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE), - ) - .build(); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(genesis_request).commit(); + + // we need this to figure out what the transfer fee is. + let proposer_starting_balance = builder.get_proposer_purse_balance(); // Getting main purse URef to verify transfer - let source_purse = test_context - .main_purse_address(*DEFAULT_ACCOUNT_ADDR) - .expect("main purse address"); - let maybe_target_purse = None; - - // Contract will transfer from main purse twice, into two different purses - // This call will create the purses, so we can get the URef to destination purses. - let transfer_one_amount = U512::try_from(FIRST_TRANSFER_AMOUNT).expect("U512 from u64"); - let transfer_two_amount = U512::try_from(SECOND_TRANSFER_AMOUNT).expect("U512 from u64"); - let main_purse_transfer_from_amount = transfer_one_amount + transfer_two_amount; - let source_only_session_transfer_info = SessionTransferInfo::new( - source_purse, - maybe_target_purse, - main_purse_transfer_from_amount, - ); + let source_purse = builder + .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .main_purse(); - // Will create two purses NEW_PURSE_NAME and SECOND_PURSE_NAME - let session_code = Code::from(TRANSFER_TO_TWO_PURSES); - let session_args = runtime_args! { - DESTINATION_PURSE_ONE => NEW_PURSE_NAME, - TRANSFER_AMOUNT_ONE => transfer_one_amount, - DESTINATION_PURSE_TWO => SECOND_PURSE_NAME, - TRANSFER_AMOUNT_TWO => transfer_two_amount, - }; - let session = SessionBuilder::new(session_code, session_args) - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_check_transfer_success(source_only_session_transfer_info) - .build(); - test_context.run(session); + builder.exec(exec_request).commit().expect_success(); + + let transaction_fee = builder.get_proposer_purse_balance() - proposer_starting_balance; + let expected_source_ending_balance = Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE) + .checked_sub(Motes::new(transfer_amount)) + .unwrap() + .checked_sub(Motes::new(transaction_fee)) + .unwrap(); + let actual_source_ending_balance = Motes::new(builder.get_purse_balance(source_purse)); + + assert_eq!(expected_source_ending_balance, actual_source_ending_balance); + + // retrieve newly created purse URef + builder + .query( + None, + Key::Account(*DEFAULT_ACCOUNT_ADDR), + &[NEW_PURSE_NAME.to_string()], + ) + .expect("new purse should exist"); - // get account purse by name via get_account() - let account = test_context - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("account"); + // let target_purse = builder - let new_purse_address = account - .named_keys() + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + let target_purse = named_keys .get(NEW_PURSE_NAME) .expect("value") .into_uref() .expect("uref"); - let maybe_target_purse = Some(new_purse_address); // TODO: Put valid URef here - let source_and_target_session_transfer_info = SessionTransferInfo::new( - source_purse, - maybe_target_purse, - main_purse_transfer_from_amount, - ); + let expected_balance = U512::from(SECOND_TRANSFER_AMOUNT); + let target_balance = builder.get_purse_balance(target_purse); - // Same transfer as before, but with maybe_target_purse active for validating amount into purse - // The test for total pulled from main purse should not assert. - // The test for total into NEW_PURSE_NAME is only part of transfer and should assert. - let session_code = Code::from(TRANSFER_TO_TWO_PURSES); - let session_args = runtime_args! { - DESTINATION_PURSE_ONE => NEW_PURSE_NAME, - TRANSFER_AMOUNT_ONE => transfer_one_amount, - DESTINATION_PURSE_TWO => SECOND_PURSE_NAME, - TRANSFER_AMOUNT_TWO => transfer_two_amount, - }; - let session = SessionBuilder::new(session_code, session_args) - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_check_transfer_success(source_and_target_session_transfer_info) - .build(); - test_context.run(session); // will panic because maybe_target_purse balance isn't correct + assert_eq!(expected_balance, target_balance); } diff --git a/execution_engine_testing/tests/src/test/contract_api/account/associated_keys.rs b/execution_engine_testing/tests/src/test/contract_api/account/associated_keys.rs index e6643b28b0..0462c0093f 100644 --- a/execution_engine_testing/tests/src/test/contract_api/account/associated_keys.rs +++ b/execution_engine_testing/tests/src/test/contract_api/account/associated_keys.rs @@ -1,15 +1,15 @@ use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{ + engine_state::{EngineConfigBuilder, Error}, + execution::ExecError, }; use casper_types::{ - account::{AccountHash, Weight}, - runtime_args, RuntimeArgs, U512, + account::AccountHash, addressable_entity::Weight, runtime_args, ApiError, U512, }; const CONTRACT_ADD_UPDATE_ASSOCIATED_KEY: &str = "add_update_associated_key.wasm"; @@ -25,7 +25,7 @@ static ACCOUNT_1_INITIAL_FUND: Lazy = Lazy::new(|| *DEFAULT_PAYMENT * 10); fn should_manage_associated_key() { // for a given account, should be able to add a new associated key and update // that key - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -40,22 +40,21 @@ fn should_manage_associated_key() { ) .build(); - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit(); + + builder.exec(exec_request_1).expect_success().commit(); builder.exec(exec_request_2).expect_success().commit(); let genesis_key = *DEFAULT_ACCOUNT_ADDR; - let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + let contract_1 = builder + .get_entity_by_account_hash(ACCOUNT_1_ADDR) .expect("should have account"); - let gen_weight = account_1 - .get_associated_key_weight(genesis_key) + let gen_weight = contract_1 + .associated_keys() + .get(&genesis_key) .expect("weight"); let expected_weight = Weight::new(2); @@ -70,14 +69,72 @@ fn should_manage_associated_key() { builder.exec(exec_request_3).expect_success().commit(); - let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + let contract_1 = builder + .get_entity_by_account_hash(ACCOUNT_1_ADDR) .expect("should have account"); - let new_weight = account_1.get_associated_key_weight(genesis_key); + let new_weight = contract_1.associated_keys().get(&genesis_key); assert_eq!(new_weight, None, "key should be removed"); let is_error = builder.is_error(); assert!(!is_error); } + +#[ignore] +#[test] +fn should_remove_associated_key_when_at_max_allowed_cap() { + let mut builder = LmdbWasmTestBuilder::default(); + + let engine_config = EngineConfigBuilder::new() + .with_max_associated_keys(2) + .build(); + + builder + .with_engine_config(engine_config) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .commit(); + + assert_eq!(builder.get_engine_state().config().max_associated_keys(), 2); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, + runtime_args! { "target" => ACCOUNT_1_ADDR, "amount" => *ACCOUNT_1_INITIAL_FUND }, + ) + .build(); + let exec_request_2 = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + CONTRACT_ADD_UPDATE_ASSOCIATED_KEY, + runtime_args! { "account" => *DEFAULT_ACCOUNT_ADDR, }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + builder.exec(exec_request_2).expect_success().commit(); + + let exec_request_3 = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + CONTRACT_ADD_UPDATE_ASSOCIATED_KEY, + runtime_args! { "account" => *DEFAULT_ACCOUNT_ADDR, }, + ) + .build(); + + builder.exec(exec_request_3).expect_failure(); + + let error = builder.get_error().expect("we asserted the failure"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::MaxKeysLimit)) + )); + + let exec_request_4 = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + CONTRACT_REMOVE_ASSOCIATED_KEY, + runtime_args! { ARG_ACCOUNT => *DEFAULT_ACCOUNT_ADDR, }, + ) + .build(); + + builder.exec(exec_request_4).expect_success().commit(); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/account/authorized_keys.rs b/execution_engine_testing/tests/src/test/contract_api/account/authorized_keys.rs index 0b04fec3b2..6b1d72896a 100644 --- a/execution_engine_testing/tests/src/test/contract_api/account/authorized_keys.rs +++ b/execution_engine_testing/tests/src/test/contract_api/account/authorized_keys.rs @@ -1,28 +1,24 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, + ARG_AMOUNT, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::core::{ +use casper_execution_engine::{ engine_state::{self, Error}, - execution, -}; -use casper_types::{ - account::{AccountHash, Weight}, - runtime_args, - system::mint, - RuntimeArgs, U512, + execution::ExecError, }; +use casper_storage::{system::transfer::TransferError, tracking_copy::TrackingCopyError}; +use casper_types::{account::AccountHash, addressable_entity::Weight, runtime_args, U512}; +const CONTRACT_ADD_ASSOCIATED_KEY: &str = "add_associated_key.wasm"; const CONTRACT_ADD_UPDATE_ASSOCIATED_KEY: &str = "add_update_associated_key.wasm"; -const CONTRACT_AUTHORIZED_KEYS: &str = "authorized_keys.wasm"; +const CONTRACT_SET_ACTION_THRESHOLDS: &str = "set_action_thresholds.wasm"; const ARG_KEY_MANAGEMENT_THRESHOLD: &str = "key_management_threshold"; const ARG_DEPLOY_THRESHOLD: &str = "deploy_threshold"; const ARG_ACCOUNT: &str = "account"; +const ARG_WEIGHT: &str = "weight"; const KEY_1: AccountHash = AccountHash::new([254; 32]); const KEY_2: AccountHash = AccountHash::new([253; 32]); +const KEY_2_WEIGHT: Weight = Weight::new(100); const KEY_3: AccountHash = AccountHash::new([252; 32]); #[ignore] @@ -30,7 +26,7 @@ const KEY_3: AccountHash = AccountHash::new([252; 32]); fn should_deploy_with_authorized_identity_key() { let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, - CONTRACT_AUTHORIZED_KEYS, + CONTRACT_SET_ACTION_THRESHOLDS, runtime_args! { ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), ARG_DEPLOY_THRESHOLD => Weight::new(1), @@ -38,8 +34,8 @@ fn should_deploy_with_authorized_identity_key() { ) .build(); // Basic deploy with single key - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .commit() .expect_success(); @@ -52,45 +48,46 @@ fn should_raise_auth_failure_with_invalid_key() { // Error::Authorization assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_1); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), - ARG_DEPLOY_THRESHOLD => Weight::new(1) - }, - ) - .with_deploy_hash([1u8; 32]) - .with_authorization_keys(&[KEY_1]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), + ARG_DEPLOY_THRESHOLD => Weight::new(1) + }, + ) + .with_deploy_hash([1u8; 32]) + .with_authorization_keys(&[KEY_1]) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); // Basic deploy with single key - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .commit() - .finish(); + .commit(); - let deploy_result = result - .builder() - .get_exec_result(0) - .expect("should have exec response") - .get(0) - .expect("should have at least one deploy result"); + let deploy_result = builder + .get_exec_result_owned(0) + .expect("should have exec response"); assert!( deploy_result.has_precondition_failure(), "{:?}", deploy_result ); - let message = format!("{}", deploy_result.as_error().unwrap()); + let message = format!("{}", deploy_result.error().unwrap()); - assert_eq!(message, format!("{}", engine_state::Error::Authorization)) + assert_eq!( + message, + format!( + "{}", + engine_state::Error::TrackingCopy(TrackingCopyError::Authorization) + ) + ) } #[ignore] @@ -102,41 +99,42 @@ fn should_raise_auth_failure_with_invalid_keys() { assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_2); assert_ne!(*DEFAULT_ACCOUNT_ADDR, KEY_3); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), - ARG_DEPLOY_THRESHOLD => Weight::new(1) - }, - ) - .with_deploy_hash([1u8; 32]) - .with_authorization_keys(&[KEY_2, KEY_1, KEY_3]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), + ARG_DEPLOY_THRESHOLD => Weight::new(1) + }, + ) + .with_deploy_hash([1u8; 32]) + .with_authorization_keys(&[KEY_2, KEY_1, KEY_3]) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); // Basic deploy with single key - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .commit() - .finish(); + .commit(); - let deploy_result = result - .builder() - .get_exec_result(0) - .expect("should have exec response") - .get(0) - .expect("should have at least one deploy result"); + let deploy_result = builder + .get_exec_result_owned(0) + .expect("should have exec response"); assert!(deploy_result.has_precondition_failure()); - let message = format!("{}", deploy_result.as_error().unwrap()); + let message = format!("{}", deploy_result.error().unwrap()); - assert_eq!(message, format!("{}", engine_state::Error::Authorization)) + assert_eq!( + message, + format!( + "{}", + engine_state::Error::TrackingCopy(TrackingCopyError::Authorization) + ) + ) } #[ignore] @@ -172,7 +170,7 @@ fn should_raise_deploy_authorization_failure() { // a key with weight=2. let exec_request_4 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, - CONTRACT_AUTHORIZED_KEYS, + CONTRACT_SET_ACTION_THRESHOLDS, runtime_args! { ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(4), ARG_DEPLOY_THRESHOLD => Weight::new(3) @@ -180,8 +178,9 @@ fn should_raise_deploy_authorization_failure() { ) .build(); // Basic deploy with single key - let result1 = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) // Reusing a test contract that would add new key .exec(exec_request_1) .expect_success() @@ -192,143 +191,117 @@ fn should_raise_deploy_authorization_failure() { .exec(exec_request_3) .expect_success() .commit() - // This should execute successfuly - change deploy and key management + // This should execute successfully - change deploy and key management // thresholds. .exec(exec_request_4) .expect_success() - .commit() - .finish(); - - let exec_request_5 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - // Next deploy will see deploy threshold == 4, keymgmnt == 5 - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(5), - ARG_DEPLOY_THRESHOLD => Weight::new(4) - }, //args - ) - .with_deploy_hash([5u8; 32]) - .with_authorization_keys(&[KEY_1]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + .commit(); + + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + // Next deploy will see deploy threshold == 4, keymgmnt == 5 + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(5), + ARG_DEPLOY_THRESHOLD => Weight::new(4) + }, //args + ) + .with_deploy_hash([5u8; 32]) + .with_authorization_keys(&[KEY_1]) + .build(); + let exec_request_5 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); // With deploy threshold == 3 using single secondary key // with weight == 2 should raise deploy authorization failure. - let result2 = InMemoryWasmTestBuilder::from_result(result1) - .exec(exec_request_5) - .commit() - .finish(); + builder.clear_results().exec(exec_request_5).commit(); { - let deploy_result = result2 - .builder() - .get_exec_result(0) - .expect("should have exec response") - .get(0) - .expect("should have at least one deploy result"); + let deploy_result = builder + .get_exec_result_owned(0) + .expect("should have exec response"); assert!(deploy_result.has_precondition_failure()); - let message = format!("{}", deploy_result.as_error().unwrap()); - assert!(message.contains(&format!( - "{}", - execution::Error::DeploymentAuthorizationFailure - ))) + let message = format!("{}", deploy_result.error().unwrap()); + assert!(message.contains(&format!("{}", ExecError::DeploymentAuthorizationFailure))) } - let exec_request_6 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - // change deployment threshold to 4 - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(6), - ARG_DEPLOY_THRESHOLD => Weight::new(5) - }, - ) - .with_deploy_hash([6u8; 32]) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, KEY_1, KEY_2, KEY_3]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + // change deployment threshold to 4 + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(6), + ARG_DEPLOY_THRESHOLD => Weight::new(5) + }, + ) + .with_deploy_hash([6u8; 32]) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, KEY_1, KEY_2, KEY_3]) + .build(); + let exec_request_6 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); // identity key (w: 1) and KEY_1 (w: 2) passes threshold of 3 - let result3 = InMemoryWasmTestBuilder::from_result(result2) + builder + .clear_results() .exec(exec_request_6) .expect_success() - .commit() - .finish(); - - let exec_request_7 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - // change deployment threshold to 4 - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), - ARG_DEPLOY_THRESHOLD => Weight::new(0) - }, //args - ) - .with_deploy_hash([6u8; 32]) - .with_authorization_keys(&[KEY_2, KEY_1]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + .commit(); + + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + // change deployment threshold to 4 + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), + ARG_DEPLOY_THRESHOLD => Weight::new(0) + }, //args + ) + .with_deploy_hash([6u8; 32]) + .with_authorization_keys(&[KEY_2, KEY_1]) + .build(); + let exec_request_7 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); // deployment threshold is now 4 // failure: KEY_2 weight + KEY_1 weight < deployment threshold - let result4 = InMemoryWasmTestBuilder::from_result(result3) - .exec(exec_request_7) - .commit() - .finish(); + // let result4 = builder.clear_results() + builder.clear_results().exec(exec_request_7).commit(); { - let deploy_result = result4 - .builder() - .get_exec_result(0) - .expect("should have exec response") - .get(0) - .expect("should have at least one deploy result"); + let deploy_result = builder + .get_exec_result_owned(0) + .expect("should have exec response"); assert!(deploy_result.has_precondition_failure()); - let message = format!("{}", deploy_result.as_error().unwrap()); - assert!(message.contains(&format!( - "{}", - execution::Error::DeploymentAuthorizationFailure - ))) + let message = format!("{}", deploy_result.error().unwrap()); + assert!(message.contains(&format!("{}", ExecError::DeploymentAuthorizationFailure))) } - let exec_request_8 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - // change deployment threshold to 4 - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), - ARG_DEPLOY_THRESHOLD => Weight::new(0) - }, //args - ) - .with_deploy_hash([8u8; 32]) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, KEY_1, KEY_2, KEY_3]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + // change deployment threshold to 4 + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), + ARG_DEPLOY_THRESHOLD => Weight::new(0) + }, //args + ) + .with_deploy_hash([8u8; 32]) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, KEY_1, KEY_2, KEY_3]) + .build(); + let exec_request_8 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); // success: identity key weight + KEY_1 weight + KEY_2 weight >= deployment // threshold - InMemoryWasmTestBuilder::from_result(result4) + builder + .clear_results() .exec(exec_request_8) .commit() - .expect_success() - .finish(); + .expect_success(); } #[ignore] @@ -352,40 +325,35 @@ fn should_authorize_deploy_with_multiple_keys() { ) .build(); // Basic deploy with single key - let result1 = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) // Reusing a test contract that would add new key .exec(exec_request_1) .expect_success() .commit() .exec(exec_request_2) .expect_success() - .commit() - .finish(); + .commit(); // KEY_1 (w: 2) KEY_2 (w: 2) each passes default threshold of 1 - let exec_request_3 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), - ARG_DEPLOY_THRESHOLD => Weight::new(0), - }, - ) - .with_deploy_hash([36; 32]) - .with_authorization_keys(&[KEY_2, KEY_1]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; - - InMemoryWasmTestBuilder::from_result(result1) - .exec(exec_request_3) - .expect_success() - .commit(); + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), + ARG_DEPLOY_THRESHOLD => Weight::new(0), + }, + ) + .with_deploy_hash([36; 32]) + .with_authorization_keys(&[KEY_2, KEY_1]) + .build(); + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); + + builder.exec(exec_request_3).expect_success().commit(); } #[ignore] @@ -405,7 +373,17 @@ fn should_not_authorize_deploy_with_duplicated_keys() { let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, - CONTRACT_AUTHORIZED_KEYS, + CONTRACT_ADD_ASSOCIATED_KEY, + runtime_args! { + ARG_ACCOUNT => KEY_2, + ARG_WEIGHT => KEY_2_WEIGHT, + }, + ) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_SET_ACTION_THRESHOLDS, runtime_args! { ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(4), ARG_DEPLOY_THRESHOLD => Weight::new(3) @@ -413,57 +391,51 @@ fn should_not_authorize_deploy_with_duplicated_keys() { ) .build(); // Basic deploy with single key - let result1 = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder // Reusing a test contract that would add new key .exec(exec_request_1) .expect_success() - .commit() - .exec(exec_request_2) - .expect_success() - .commit() - .finish(); - - let exec_request_3 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT, - }) - .with_session_code( - CONTRACT_AUTHORIZED_KEYS, - runtime_args! { - ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), - ARG_DEPLOY_THRESHOLD => Weight::new(0) - }, - ) - .with_deploy_hash([3u8; 32]) - .with_authorization_keys(&[ - KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, - ]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; - let final_result = InMemoryWasmTestBuilder::from_result(result1) - .exec(exec_request_3) - .commit() - .finish(); - let deploy_result = final_result - .builder() - .get_exec_result(0) - .expect("should have exec response") - .get(0) - .expect("should have at least one deploy result"); + .commit(); + + builder.exec(exec_request_2).expect_success().commit(); + + builder.exec(exec_request_3).expect_success().commit(); + + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT, + }) + .with_session_code( + CONTRACT_SET_ACTION_THRESHOLDS, + runtime_args! { + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), + ARG_DEPLOY_THRESHOLD => Weight::new(0) + }, + ) + .with_deploy_hash([3u8; 32]) + .with_authorization_keys(&[ + KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, KEY_1, + ]) + .build(); + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); + builder.clear_results().exec(exec_request_3).commit(); + let deploy_result = builder + .get_exec_result_owned(0) + .expect("should have exec response"); assert!( deploy_result.has_precondition_failure(), "{:?}", deploy_result ); - let message = format!("{}", deploy_result.as_error().unwrap()); + let message = format!("{}", deploy_result.error().unwrap()); assert!(message.contains(&format!( "{}", - execution::Error::DeploymentAuthorizationFailure + TrackingCopyError::DeploymentAuthorizationFailure ))) } @@ -491,7 +463,7 @@ fn should_not_authorize_transfer_without_deploy_key_threshold() { .build(); let update_thresholds_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, - CONTRACT_AUTHORIZED_KEYS, + CONTRACT_SET_ACTION_THRESHOLDS, runtime_args! { ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(5), ARG_DEPLOY_THRESHOLD => Weight::new(5), @@ -500,10 +472,10 @@ fn should_not_authorize_transfer_without_deploy_key_threshold() { .build(); // Basic deploy with single key - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) // Reusing a test contract that would add new key .exec(add_key_1_request) .expect_success() @@ -517,53 +489,29 @@ fn should_not_authorize_transfer_without_deploy_key_threshold() { .commit(); // KEY_1 (w: 2) DEFAULT_ACCOUNT (w: 1) does not pass deploy threshold of 5 - let id: Option = None; - - let transfer_request_1 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_transfer_args(runtime_args! { - mint::ARG_TARGET => KEY_2, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => id, - }) - .with_deploy_hash([36; 32]) - .with_authorization_keys(&[KEY_1, *DEFAULT_ACCOUNT_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; - - builder.exec(transfer_request_1).commit(); + let transfer_request_1 = TransferRequestBuilder::new(transfer_amount, KEY_2) + .with_authorization_keys([KEY_1, *DEFAULT_ACCOUNT_ADDR]) + .build(); + + builder.transfer_and_commit(transfer_request_1); let response = builder - .get_exec_result(3) - .expect("should have response") - .first() - .expect("should have first result"); - let error = response.as_error().expect("should have error"); + .get_exec_result_owned(3) + .expect("should have response"); + let error = response.error().expect("should have error"); assert!(matches!( error, - Error::Exec(execution::Error::DeploymentAuthorizationFailure) + Error::Transfer(TransferError::TrackingCopy( + TrackingCopyError::DeploymentAuthorizationFailure + )) )); // KEY_1 (w: 2) KEY_2 (w: 2) DEFAULT_ACCOUNT_ADDR (w: 1) each passes threshold of 5 - let id: Option = None; - - let transfer_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_transfer_args(runtime_args! { - mint::ARG_TARGET => KEY_2, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => id, - }) - .with_deploy_hash([37; 32]) - .with_authorization_keys(&[KEY_2, KEY_1, *DEFAULT_ACCOUNT_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; - - builder.exec(transfer_request).expect_success().commit(); + let transfer_request = TransferRequestBuilder::new(transfer_amount, KEY_2) + .with_authorization_keys([KEY_2, KEY_1, *DEFAULT_ACCOUNT_ADDR]) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); } diff --git a/execution_engine_testing/tests/src/test/contract_api/account/key_management_thresholds.rs b/execution_engine_testing/tests/src/test/contract_api/account/key_management_thresholds.rs index 3b1474f1ba..ee2e8f734b 100644 --- a/execution_engine_testing/tests/src/test/contract_api/account/key_management_thresholds.rs +++ b/execution_engine_testing/tests/src/test/contract_api/account/key_management_thresholds.rs @@ -1,11 +1,8 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs}; +use casper_types::{account::AccountHash, runtime_args}; const CONTRACT_KEY_MANAGEMENT_THRESHOLDS: &str = "key_management_thresholds.wasm"; @@ -26,8 +23,8 @@ fn should_verify_key_management_permission_with_low_weight() { runtime_args! { ARG_STAGE => String::from("test-permission-denied") }, ) .build(); - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() @@ -45,26 +42,24 @@ fn should_verify_key_management_permission_with_sufficient_weight() { runtime_args! { ARG_STAGE => String::from("init") }, ) .build(); - let exec_request_2 = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - // This test verifies that all key management operations succeed - .with_session_code( - "key_management_thresholds.wasm", - runtime_args! { ARG_STAGE => String::from("test-key-mgmnt-succeed") }, - ) - .with_deploy_hash([2u8; 32]) - .with_authorization_keys(&[ - *DEFAULT_ACCOUNT_ADDR, - // Key [42; 32] is created in init stage - AccountHash::new([42; 32]), - ]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + // This test verifies that all key management operations succeed + .with_session_code( + "key_management_thresholds.wasm", + runtime_args! { ARG_STAGE => String::from("test-key-mgmnt-succeed") }, + ) + .with_deploy_hash([2u8; 32]) + .with_authorization_keys(&[ + *DEFAULT_ACCOUNT_ADDR, + // Key [42; 32] is created in init stage + AccountHash::new([42; 32]), + ]) + .build(); + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() diff --git a/execution_engine_testing/tests/src/test/contract_api/account/mod.rs b/execution_engine_testing/tests/src/test/contract_api/account/mod.rs index 0bdf371daa..053ab3aeff 100644 --- a/execution_engine_testing/tests/src/test/contract_api/account/mod.rs +++ b/execution_engine_testing/tests/src/test/contract_api/account/mod.rs @@ -2,3 +2,4 @@ mod associated_keys; mod authorized_keys; mod key_management_thresholds; mod named_keys; +mod named_keys_stored; diff --git a/execution_engine_testing/tests/src/test/contract_api/account/named_keys.rs b/execution_engine_testing/tests/src/test/contract_api/account/named_keys.rs index eefa12dcd6..7ecf477239 100644 --- a/execution_engine_testing/tests/src/test/contract_api/account/named_keys.rs +++ b/execution_engine_testing/tests/src/test/contract_api/account/named_keys.rs @@ -1,10 +1,7 @@ -use std::convert::TryFrom; - use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::{bytesrepr::FromBytes, runtime_args, CLTyped, CLValue, Key, RuntimeArgs, U512}; +use casper_types::{bytesrepr::FromBytes, runtime_args, CLTyped, CLValue, Key, U512}; const CONTRACT_NAMED_KEYS: &str = "named_keys.wasm"; const EXPECTED_UREF_VALUE: u64 = 123_456_789u64; @@ -22,21 +19,17 @@ const COMMAND_INCREASE_UREF2: &str = "increase-uref2"; const COMMAND_OVERWRITE_UREF2: &str = "overwrite-uref2"; const ARG_COMMAND: &str = "command"; -fn run_command(builder: &mut InMemoryWasmTestBuilder, command: &str) { +fn run_command(builder: &mut LmdbWasmTestBuilder, command: &str) { let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_NAMED_KEYS, runtime_args! { ARG_COMMAND => command }, ) .build(); - builder - .exec(exec_request) - .commit() - .expect_success() - .finish(); + builder.exec(exec_request).commit().expect_success(); } -fn read_value(builder: &mut InMemoryWasmTestBuilder, key: Key) -> T { +fn read_value(builder: &mut LmdbWasmTestBuilder, key: Key) -> T { CLValue::try_from(builder.query(None, key, &[]).expect("should have value")) .expect("should have CLValue") .into_t() @@ -46,25 +39,23 @@ fn read_value(builder: &mut InMemoryWasmTestBuilder, key #[ignore] #[test] fn should_run_named_keys_contract() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); run_command(&mut builder, COMMAND_CREATE_UREF1); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - assert!(account.named_keys().contains_key(KEY1)); - assert!(!account.named_keys().contains_key(KEY2)); + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + assert!(named_keys.contains(KEY1)); + assert!(!named_keys.contains(KEY2)); run_command(&mut builder, COMMAND_CREATE_UREF2); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - let uref1 = *account.named_keys().get(KEY1).expect("should have key"); - let uref2 = *account.named_keys().get(KEY2).expect("should have key"); + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + let uref1 = *named_keys.get(KEY1).expect("should have key"); + let uref2 = *named_keys.get(KEY2).expect("should have key"); let value1: String = read_value(&mut builder, uref1); let value2: U512 = read_value(&mut builder, uref2); assert_eq!(value1, "Hello, world!"); @@ -74,37 +65,33 @@ fn should_run_named_keys_contract() { run_command(&mut builder, COMMAND_REMOVE_UREF1); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - assert!(!account.named_keys().contains_key(KEY1)); - assert!(account.named_keys().contains_key(KEY2)); + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + assert!(!named_keys.contains(KEY1)); + assert!(named_keys.contains(KEY2)); run_command(&mut builder, COMMAND_TEST_READ_UREF2); run_command(&mut builder, COMMAND_INCREASE_UREF2); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - let uref2 = *account.named_keys().get(KEY2).expect("should have key"); + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + let uref2 = *named_keys.get(KEY2).expect("should have key"); let value2: U512 = read_value(&mut builder, uref2); assert_eq!(value2, U512::zero()); run_command(&mut builder, COMMAND_OVERWRITE_UREF2); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - let uref2 = *account.named_keys().get(KEY2).expect("should have key"); + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + let uref2 = *named_keys.get(KEY2).expect("should have key"); let value2: U512 = read_value(&mut builder, uref2); assert_eq!(value2, U512::from(EXPECTED_UREF_VALUE)); run_command(&mut builder, COMMAND_REMOVE_UREF2); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - assert!(!account.named_keys().contains_key(KEY1)); - assert!(!account.named_keys().contains_key(KEY2)); + let named_keys = builder.get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR); + + assert!(!named_keys.contains(KEY1)); + assert!(!named_keys.contains(KEY2)); } diff --git a/execution_engine_testing/tests/src/test/contract_api/account/named_keys_stored.rs b/execution_engine_testing/tests/src/test/contract_api/account/named_keys_stored.rs new file mode 100644 index 0000000000..2fc1c1fd8d --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/account/named_keys_stored.rs @@ -0,0 +1,106 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::execution::ExecError; +use casper_types::{runtime_args, ApiError, RuntimeArgs}; + +const CONTRACT_HASH_NAME: &str = "contract_stored"; +const ENTRY_POINT_CONTRACT: &str = "named_keys_contract"; +const ENTRY_POINT_SESSION: &str = "named_keys_session"; +const ENTRY_POINT_CONTRACT_TO_CONTRACT: &str = "named_keys_contract_to_contract"; + +#[ignore] +#[test] +fn should_run_stored_named_keys_contract() { + let mut builder = setup(); + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_NAME, + ENTRY_POINT_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_run_stored_named_keys_session() { + let mut builder = setup(); + + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_NAME, + ENTRY_POINT_SESSION, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request_1).expect_failure(); + + let expected_error = + casper_execution_engine::engine_state::Error::Exec(ExecError::Revert(ApiError::User(0))); + + builder.assert_error(expected_error) +} + +#[ignore] +#[test] +fn should_run_stored_named_keys_contract_to_contract() { + let mut builder = setup(); + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_NAME, + ENTRY_POINT_CONTRACT_TO_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_run_stored_named_keys_module_bytes_to_contract() { + let mut builder = setup(); + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "named_keys_stored_call.wasm", + runtime_args! { + "entry_point" => ENTRY_POINT_CONTRACT, + }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_run_stored_named_keys_module_bytes_to_contract_to_contract() { + let mut builder = setup(); + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "named_keys_stored_call.wasm", + runtime_args! { + "entry_point" => ENTRY_POINT_CONTRACT_TO_CONTRACT, + }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "named_keys_stored.wasm", + RuntimeArgs::default(), + ) + .build(); + builder.exec(exec_request_1).expect_success().commit(); + builder +} diff --git a/execution_engine_testing/tests/src/test/contract_api/add_contract_version.rs b/execution_engine_testing/tests/src/test/contract_api/add_contract_version.rs new file mode 100644 index 0000000000..baf2446da7 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/add_contract_version.rs @@ -0,0 +1,254 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use crate::lmdb_fixture; +use casper_engine_test_support::{ + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_SECRET_KEY, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{ + engine_state::{Error as StateError, SessionDataV1, SessionInputData}, + execution::ExecError, +}; +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + ApiError, BlockTime, Digest, EraId, InitiatorAddr, Key, PricingMode, ProtocolVersion, + PublicKey, RuntimeArgs, SecretKey, TimeDiff, Timestamp, Transaction, TransactionArgs, + TransactionEntryPoint, TransactionRuntimeParams, TransactionScheduling, TransactionTarget, + TransactionV1, TransactionV1Payload, +}; + +const CONTRACT: &str = "do_nothing_stored.wasm"; +const CHAIN_NAME: &str = "a"; +const BLOCK_TIME: BlockTime = BlockTime::new(10); + +pub(crate) const ARGS_MAP_KEY: u16 = 0; +pub(crate) const TARGET_MAP_KEY: u16 = 1; +pub(crate) const ENTRY_POINT_MAP_KEY: u16 = 2; +pub(crate) const SCHEDULING_MAP_KEY: u16 = 3; + +#[ignore] +#[test] +fn should_allow_add_contract_version_via_deploy() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit(); + + let deploy_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT, RuntimeArgs::new()) + .build(); + + builder.exec(deploy_request).expect_success().commit(); +} + +fn try_add_contract_version( + is_install_upgrade: bool, + should_succeed: bool, + mut builder: LmdbWasmTestBuilder, +) { + let module_bytes = utils::read_wasm_file(CONTRACT); + + let txn = new_transaction_v1_session( + is_install_upgrade, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + &DEFAULT_ACCOUNT_SECRET_KEY, + ); + + let txn_request = { + let wrapped = Transaction::from(txn.clone()).clone(); + let initiator_addr = txn.initiator_addr(); + let is_standard_payment = if let PricingMode::PaymentLimited { + standard_payment, .. + } = txn.pricing_mode() + { + *standard_payment + } else { + true + }; + let tx_args = txn + .deserialize_field::(ARGS_MAP_KEY) + .unwrap(); + let args = tx_args.as_named().unwrap(); + let target = txn + .deserialize_field::(TARGET_MAP_KEY) + .unwrap(); + let entry_point = txn + .deserialize_field::(ENTRY_POINT_MAP_KEY) + .unwrap(); + let session_input_data = to_v1_session_input_data( + is_standard_payment, + initiator_addr, + args, + &target, + &entry_point, + &wrapped, + ); + assert_eq!( + session_input_data.is_install_upgrade_allowed(), + is_install_upgrade, + "session_input_data should match imputed arg" + ); + ExecuteRequestBuilder::from_session_input_data(&session_input_data) + .with_block_time(BLOCK_TIME) + .build() + }; + assert_eq!( + txn_request.is_install_upgrade_allowed(), + is_install_upgrade, + "txn_request should match imputed arg" + ); + builder.exec(txn_request); + + if should_succeed { + builder.expect_success(); + } else { + builder.assert_error(StateError::Exec(ExecError::Revert( + ApiError::NotAllowedToAddContractVersion, + ))) + } +} + +pub fn new_transaction_v1_session( + is_install_upgrade: bool, + module_bytes: Bytes, + runtime: TransactionRuntimeParams, + secret_key: &SecretKey, +) -> TransactionV1 { + let timestamp = Timestamp::now(); + + let target = TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + }; + let args = TransactionArgs::Named(RuntimeArgs::new()); + let entry_point = TransactionEntryPoint::Call; + let scheduling = TransactionScheduling::Standard; + let mut fields: BTreeMap = BTreeMap::new(); + + fields.insert(ARGS_MAP_KEY, args.to_bytes().unwrap().into()); + fields.insert(TARGET_MAP_KEY, target.to_bytes().unwrap().into()); + fields.insert(ENTRY_POINT_MAP_KEY, entry_point.to_bytes().unwrap().into()); + fields.insert(SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into()); + + let public_key = PublicKey::from(secret_key); + let initiator_addr = InitiatorAddr::from(public_key); + build_transaction( + CHAIN_NAME.to_string(), + timestamp, + TimeDiff::from_millis(30 * 60 * 1_000), + PricingMode::Fixed { + gas_price_tolerance: 5, + additional_computation_factor: 0, + }, + fields, + initiator_addr, + secret_key, + ) +} + +fn build_transaction( + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + pricing_mode: PricingMode, + fields: BTreeMap, + initiator_addr: InitiatorAddr, + secret_key: &SecretKey, +) -> TransactionV1 { + let transaction_v1_payload = TransactionV1Payload::new( + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + ); + let hash = Digest::hash( + transaction_v1_payload + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize body: {}", error)), + ); + let mut transaction = TransactionV1::new(hash.into(), transaction_v1_payload, BTreeSet::new()); + transaction.sign(secret_key); + transaction +} + +fn to_v1_session_input_data<'a>( + is_standard_payment: bool, + initiator_addr: &'a InitiatorAddr, + args: &'a RuntimeArgs, + target: &'a TransactionTarget, + entry_point: &'a TransactionEntryPoint, + txn: &'a Transaction, +) -> SessionInputData<'a> { + let is_install_upgrade = match target { + TransactionTarget::Session { + is_install_upgrade, .. + } => *is_install_upgrade, + _ => false, + }; + match txn { + Transaction::Deploy(_) => panic!("unexpected deploy transaction"), + Transaction::V1(transaction_v1) => { + let data = SessionDataV1::new( + args, + target, + entry_point, + is_install_upgrade, + transaction_v1.hash(), + transaction_v1.pricing_mode(), + initiator_addr, + txn.signers().clone(), + is_standard_payment, + ); + SessionInputData::SessionDataV1 { data } + } + } +} + +#[ignore] +#[test] +fn should_allow_add_contract_version_via_transaction_v1_installer_upgrader() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit(); + try_add_contract_version(true, true, builder) +} + +#[ignore] +#[test] +fn should_disallow_add_contract_version_via_transaction_v1_standard() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit(); + try_add_contract_version(false, false, builder) +} + +#[ignore] +#[test] +fn should_allow_1x_user_to_add_contract_version_via_transaction_v1_installer_upgrader() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture_with_enable_ae( + lmdb_fixture::RELEASE_1_5_8, + true, + ); + let old_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(old_protocol_version) + .with_new_protocol_version(ProtocolVersion::from_parts(2, 0, 0)) + .with_activation_point(EraId::new(1)) + .with_enable_addressable_entity(true) + .build(); + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let account_as_1x = builder + .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) + .expect("must have stored value") + .as_account() + .is_some(); + + assert!(account_as_1x); + try_add_contract_version(true, true, builder) +} diff --git a/execution_engine_testing/tests/src/test/contract_api/blake2b.rs b/execution_engine_testing/tests/src/test/contract_api/blake2b.rs deleted file mode 100644 index 077f2ed30d..0000000000 --- a/execution_engine_testing/tests/src/test/contract_api/blake2b.rs +++ /dev/null @@ -1,62 +0,0 @@ -use rand::Rng; - -use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_types::{account, runtime_args, RuntimeArgs, BLAKE2B_DIGEST_LENGTH}; - -const BLAKE2B_WASM: &str = "blake2b.wasm"; -const ARG_BYTES: &str = "bytes"; -const HASH_RESULT: &str = "hash_result"; - -fn get_digest(builder: &InMemoryWasmTestBuilder) -> [u8; BLAKE2B_DIGEST_LENGTH] { - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - - let uref = account - .named_keys() - .get(HASH_RESULT) - .expect("should have value"); - - builder - .query(None, *uref, &[]) - .expect("should query") - .as_cl_value() - .cloned() - .expect("should be CLValue") - .into_t() - .expect("should convert") -} - -#[ignore] -#[test] -fn should_hash() { - const INPUT_LENGTH: usize = 32; - const RUNS: usize = 100; - - let mut rng = rand::thread_rng(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - for _ in 0..RUNS { - let input: [u8; INPUT_LENGTH] = rng.gen(); - - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - BLAKE2B_WASM, - runtime_args! { - ARG_BYTES => input - }, - ) - .build(); - - builder.exec(exec_request).commit().expect_success(); - - let digest = get_digest(&builder); - let expected_digest = account::blake2b(&input); - assert_eq!(digest, expected_digest); - } -} diff --git a/execution_engine_testing/tests/src/test/contract_api/create_purse.rs b/execution_engine_testing/tests/src/test/contract_api/create_purse.rs index aa0cfc902c..1abb7fadad 100644 --- a/execution_engine_testing/tests/src/test/contract_api/create_purse.rs +++ b/execution_engine_testing/tests/src/test/contract_api/create_purse.rs @@ -1,12 +1,10 @@ use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, WasmTestBuilder, DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; +use casper_types::{account::AccountHash, runtime_args, U512}; const CONTRACT_CREATE_PURSE_01: &str = "create_purse_01.wasm"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; @@ -33,21 +31,21 @@ fn should_insert_account_into_named_keys() { ) .build(); - let mut builder = WasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); builder.exec(exec_request_2).expect_success().commit(); - let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + let contract_1 = builder + .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR) .expect("should have account"); assert!( - account_1.named_keys().contains_key(TEST_PURSE_NAME), - "account_1 named_keys should include test purse" + contract_1.named_keys().contains(TEST_PURSE_NAME), + "contract_1 named_keys should include test purse" ); } @@ -67,29 +65,28 @@ fn should_create_usable_purse() { runtime_args! { ARG_PURSE_NAME => TEST_PURSE_NAME }, ) .build(); - let result = WasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() .exec(exec_request_2) .expect_success() - .commit() - .finish(); + .commit(); - let account_1 = result - .builder() - .get_account(ACCOUNT_1_ADDR) + let contract_1 = builder + .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR) .expect("should have account"); - let purse = account_1 + let purse = contract_1 .named_keys() .get(TEST_PURSE_NAME) .expect("should have known key") .into_uref() .expect("should have uref"); - let purse_balance = result.builder().get_purse_balance(purse); + let purse_balance = builder.get_purse_balance(purse); assert!( purse_balance.is_zero(), "when created directly a purse has 0 balance" diff --git a/execution_engine_testing/tests/src/test/contract_api/dictionary.rs b/execution_engine_testing/tests/src/test/contract_api/dictionary.rs new file mode 100644 index 0000000000..e17a37c0d5 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/dictionary.rs @@ -0,0 +1,704 @@ +use std::{convert::TryFrom, path::PathBuf}; + +use casper_engine_test_support::{ + utils::create_genesis_config, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, + TransferRequestBuilder, ARG_AMOUNT, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state::Error as EngineError, execution::ExecError}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + account::AccountHash, addressable_entity::EntityKindTag, runtime_args, AccessRights, + AddressableEntityHash, ApiError, CLType, CLValue, GenesisAccount, Key, Motes, RuntimeArgs, + StoredValue, +}; + +use dictionary_call::{NEW_DICTIONARY_ITEM_KEY, NEW_DICTIONARY_VALUE}; + +const DICTIONARY_WASM: &str = "dictionary.wasm"; +const DICTIONARY_CALL_WASM: &str = "dictionary_call.wasm"; +const DICTIONARY_ITEM_KEY_CHECK: &str = "dictionary-item-key-check.wasm"; +const DICTIONARY_READ: &str = "dictionary_read.wasm"; +const READ_FROM_KEY: &str = "read_from_key.wasm"; +const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); + +fn setup() -> (LmdbWasmTestBuilder, AddressableEntityHash) { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let fund_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build(); + + let install_contract_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + DICTIONARY_WASM, + RuntimeArgs::default(), + ) + .build(); + + builder.transfer_and_commit(fund_request).expect_success(); + + builder + .exec(install_contract_request) + .commit() + .expect_success(); + + let default_account_entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); + + assert!(default_account_entity + .named_keys() + .contains(dictionary::MALICIOUS_KEY_NAME)); + assert!(default_account_entity + .named_keys() + .contains(dictionary::DICTIONARY_REF)); + + let entity_hash = default_account_entity + .named_keys() + .get(dictionary::CONTRACT_HASH_NAME) + .cloned() + .and_then(Key::into_entity_hash) + .expect("should have hash"); + + (builder, entity_hash) +} + +fn query_dictionary_item( + builder: &LmdbWasmTestBuilder, + key: Key, + dictionary_name: Option, + dictionary_item_key: String, +) -> Result { + let empty_path = vec![]; + let dictionary_key_bytes = dictionary_item_key.as_bytes(); + let address = match key { + Key::Hash(_) => { + if dictionary_name.is_none() { + return Err("No dictionary name was provided".to_string()); + } + let name = dictionary_name.unwrap(); + let named_keys = builder + .query(None, key, &[])? + .as_contract() + .expect("must get contract") + .named_keys() + .clone(); + + let dictionary_uref = named_keys + .get(&name) + .and_then(Key::as_uref) + .ok_or_else(|| "No dictionary uref was found in named keys".to_string())?; + + Key::dictionary(*dictionary_uref, dictionary_key_bytes) + } + Key::Account(_) => { + if dictionary_name.is_none() { + return Err("No dictionary name was provided".to_string()); + } + let stored_value = builder.query(None, key, &[])?; + match stored_value { + StoredValue::CLValue(cl_value) => { + let entity_hash: AddressableEntityHash = CLValue::into_t::(cl_value) + .expect("must convert to contract hash") + .into_entity_hash() + .expect("must convert to contract hash"); + + let entity_key = + Key::addressable_entity_key(EntityKindTag::Account, entity_hash); + + return query_dictionary_item( + builder, + entity_key, + dictionary_name, + dictionary_item_key, + ); + } + StoredValue::Account(account) => { + if let Some(name) = dictionary_name { + let dictionary_uref = account + .named_keys() + .get(&name) + .and_then(Key::as_uref) + .ok_or_else(|| { + "No dictionary uref was found in named keys".to_string() + })?; + + Key::dictionary(*dictionary_uref, dictionary_key_bytes) + } else { + return Err("No dictionary name was provided".to_string()); + } + } + _ => return Err("Unhandled stored value".to_string()), + } + } + Key::AddressableEntity(entity_addr) => { + if let Some(name) = dictionary_name { + let stored_value = builder.query(None, key, &[])?; + + match &stored_value { + StoredValue::AddressableEntity(_) => {} + _ => { + return Err( + "Provided base key is nether an account or a contract".to_string() + ); + } + }; + + let named_keys = builder.get_named_keys(entity_addr); + + let dictionary_uref = named_keys + .get(&name) + .and_then(Key::as_uref) + .ok_or_else(|| "No dictionary uref was found in named keys".to_string())?; + + Key::dictionary(*dictionary_uref, dictionary_key_bytes) + } else { + return Err("No dictionary name was provided".to_string()); + } + } + Key::URef(uref) => Key::dictionary(uref, dictionary_key_bytes), + Key::Dictionary(address) => Key::Dictionary(address), + _ => return Err("Unsupported key type for a query to a dictionary item".to_string()), + }; + builder.query(None, address, &empty_path) +} + +#[ignore] +#[test] +fn should_modify_with_owned_access_rights() { + let (mut builder, contract_hash) = setup(); + + let modify_write_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + dictionary::MODIFY_WRITE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + let modify_write_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + dictionary::MODIFY_WRITE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have account"); + + let stored_dictionary_key = contract + .named_keys() + .get(dictionary::DICTIONARY_NAME) + .expect("dictionary"); + let dictionary_seed_uref = stored_dictionary_key.into_uref().expect("should be uref"); + + let key_bytes = dictionary::DICTIONARY_PUT_KEY.as_bytes(); + let dictionary_key = Key::dictionary(dictionary_seed_uref, key_bytes); + + builder + .exec(modify_write_request_1) + .commit() + .expect_success(); + + let stored_value = builder + .query(None, dictionary_seed_uref.into(), &[]) + .expect("should have value"); + let dictionary_uref_value = stored_value + .as_cl_value() + .cloned() + .expect("should have cl value"); + assert_eq!( + dictionary_uref_value.cl_type(), + &CLType::Unit, + "created dictionary uref should be unit" + ); + + let stored_value = builder + .query(None, dictionary_key, &[]) + .expect("should have value"); + let dictionary_value = stored_value + .as_cl_value() + .cloned() + .expect("should have cl value"); + + let value: String = dictionary_value.into_t().expect("should be a string"); + assert_eq!(value, "Hello, world!"); + + builder + .exec(modify_write_request_2) + .commit() + .expect_success(); + + let stored_value = builder + .query(None, dictionary_key, &[]) + .expect("should have value"); + let dictionary_value = stored_value + .as_cl_value() + .cloned() + .expect("should have cl value"); + + let value: String = dictionary_value.into_t().expect("should be a string"); + assert_eq!(value, "Hello, world! Hello, world!"); +} + +#[ignore] +#[test] +fn should_not_write_with_read_access_rights() { + let (mut builder, contract_hash) = setup(); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_WRITE, + dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_RO_ENTRYPOINT, + dictionary_call::ARG_CONTRACT_HASH => contract_hash, + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::InvalidAccess { + required: AccessRights::WRITE + }) + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_read_with_read_access_rights() { + let (mut builder, contract_hash) = setup(); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_READ, + dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_RO_ENTRYPOINT, + dictionary_call::ARG_CONTRACT_HASH => contract_hash, + }, + ) + .build(); + + builder.exec(call_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_read_with_write_access_rights() { + let (mut builder, contract_hash) = setup(); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_READ, + dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_W_ENTRYPOINT, + dictionary_call::ARG_CONTRACT_HASH => contract_hash, + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::InvalidAccess { + required: AccessRights::READ + }) + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_write_with_write_access_rights() { + let (mut builder, contract_hash) = setup(); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_WRITE, + dictionary_call::ARG_SHARE_UREF_ENTRYPOINT => dictionary::SHARE_W_ENTRYPOINT, + dictionary_call::ARG_CONTRACT_HASH => contract_hash, + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have account"); + + let stored_dictionary_key = contract + .named_keys() + .get(dictionary::DICTIONARY_NAME) + .expect("dictionary"); + let dictionary_root_uref = stored_dictionary_key.into_uref().expect("should be uref"); + + let dictionary_key = Key::dictionary(dictionary_root_uref, NEW_DICTIONARY_ITEM_KEY.as_bytes()); + + let result = builder + .query(None, dictionary_key, &[]) + .expect("should query"); + let value = result.as_cl_value().cloned().expect("should have cl value"); + let value: String = value.into_t().expect("should get string"); + assert_eq!(value, NEW_DICTIONARY_VALUE); +} + +#[ignore] +#[test] +fn should_not_write_with_forged_uref() { + let (mut builder, contract_hash) = setup(); + + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have account"); + + let stored_dictionary_key = contract + .named_keys() + .get(dictionary::DICTIONARY_NAME) + .expect("dictionary"); + let dictionary_root_uref = stored_dictionary_key.into_uref().expect("should be uref"); + + // Do some extra forging on the uref + let forged_uref = dictionary_root_uref.into_read_add_write(); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_FORGED_UREF_WRITE, + dictionary_call::ARG_FORGED_UREF => forged_uref, + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::ForgedReference(uref)) + if *uref == forged_uref + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_fail_put_with_invalid_dictionary_item_key() { + let (mut builder, contract_hash) = setup(); + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have account"); + + let _stored_dictionary_key = contract + .named_keys() + .get(dictionary::DICTIONARY_NAME) + .expect("dictionary"); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_INVALID_PUT_DICTIONARY_ITEM_KEY, + dictionary_call::ARG_CONTRACT_HASH => contract_hash + }, + ) + .build(); + + builder.exec(call_request).commit(); + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::Revert(ApiError::InvalidDictionaryItemKey)) + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_fail_get_with_invalid_dictionary_item_key() { + let (mut builder, contract_hash) = setup(); + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have account"); + + let _stored_dictionary_key = contract + .named_keys() + .get(dictionary::DICTIONARY_NAME) + .expect("dictionary"); + + let call_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + DICTIONARY_CALL_WASM, + runtime_args! { + dictionary_call::ARG_OPERATION => dictionary_call::OP_INVALID_GET_DICTIONARY_ITEM_KEY, + dictionary_call::ARG_CONTRACT_HASH => contract_hash + }, + ) + .build(); + + builder.exec(call_request).commit(); + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::Revert(ApiError::InvalidDictionaryItemKey)) + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn dictionary_put_should_fail_with_large_item_key() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let fund_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build(); + + let install_contract_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + DICTIONARY_ITEM_KEY_CHECK, + runtime_args! { + "dictionary-operation" => "put" + }, + ) + .build(); + + builder.transfer_and_commit(fund_request).expect_success(); + builder.exec(install_contract_request).commit(); + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::Revert(ApiError::DictionaryItemKeyExceedsLength)) + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn dictionary_get_should_fail_with_large_item_key() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let fund_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build(); + + let install_contract_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + DICTIONARY_ITEM_KEY_CHECK, + runtime_args! { + "dictionary-operation" => "get" + }, + ) + .build(); + + builder.transfer_and_commit(fund_request).expect_success(); + builder.exec(install_contract_request).commit(); + let exec_result = builder.get_last_exec_result().expect("should have results"); + let error = exec_result.error().expect("should have error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::Revert(ApiError::DictionaryItemKeyExceedsLength)) + ), + "Received error {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_query_dictionary_items_with_test_builder() { + let genesis_account = GenesisAccount::account( + DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ); + + let mut accounts = vec![genesis_account]; + accounts.extend((*DEFAULT_ACCOUNTS).clone()); + let genesis_config = create_genesis_config(accounts); + let genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); + + let dictionary_code = PathBuf::from(DICTIONARY_WASM); + let deploy_item = DeployItemBuilder::new() + .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT}) + .with_session_code(dictionary_code, RuntimeArgs::new()) + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(genesis_request).commit(); + + builder.exec(exec_request).commit().expect_success(); + + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + + let entity_hash = default_account + .named_keys() + .get(dictionary::CONTRACT_HASH_NAME) + .expect("should have contract") + .into_entity_hash() + .expect("should have hash"); + + let dictionary_uref = default_account + .named_keys() + .get(dictionary::DICTIONARY_REF) + .expect("should have dictionary uref") + .into_uref() + .expect("should have URef"); + + { + // Query through account's named keys + let queried_value = query_dictionary_item( + &builder, + Key::from(*DEFAULT_ACCOUNT_ADDR), + Some(dictionary::DICTIONARY_REF.to_string()), + dictionary::DEFAULT_DICTIONARY_NAME.to_string(), + ) + .expect("should query"); + let value = CLValue::try_from(queried_value).expect("should have cl value"); + let value: String = value.into_t().expect("should be string"); + assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE); + } + + { + // Query through account's named keys + let queried_value = query_dictionary_item( + &builder, + Key::from(*DEFAULT_ACCOUNT_ADDR), + Some(dictionary::DICTIONARY_REF.to_string()), + dictionary::DEFAULT_DICTIONARY_NAME.to_string(), + ) + .expect("should query"); + let value = CLValue::try_from(queried_value).expect("should have cl value"); + let value: String = value.into_t().expect("should be string"); + assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE); + } + + { + // Query through contract's named keys + let queried_value = query_dictionary_item( + &builder, + Key::Hash(entity_hash.value()), + Some(dictionary::DICTIONARY_NAME.to_string()), + dictionary::DEFAULT_DICTIONARY_NAME.to_string(), + ) + .expect("should query"); + let value = CLValue::try_from(queried_value).expect("should have cl value"); + let value: String = value.into_t().expect("should be string"); + assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE); + } + + { + // Query through dictionary URef itself + let queried_value = query_dictionary_item( + &builder, + Key::from(dictionary_uref), + None, + dictionary::DEFAULT_DICTIONARY_NAME.to_string(), + ) + .expect("should query"); + let value = CLValue::try_from(queried_value).expect("should have cl value"); + let value: String = value.into_t().expect("should be string"); + assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE); + } + + { + // Query by computed dictionary item key + let dictionary_item_name = dictionary::DEFAULT_DICTIONARY_NAME.as_bytes(); + let dictionary_item_key = Key::dictionary(dictionary_uref, dictionary_item_name); + + let queried_value = + query_dictionary_item(&builder, dictionary_item_key, None, String::new()) + .expect("should query"); + let value = CLValue::try_from(queried_value).expect("should have cl value"); + let value: String = value.into_t().expect("should be string"); + assert_eq!(value, dictionary::DEFAULT_DICTIONARY_VALUE); + } +} + +#[ignore] +#[test] +fn should_be_able_to_perform_dictionary_read() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let dictionary_session_call = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, DICTIONARY_READ, RuntimeArgs::new()) + .build(); + + builder + .exec(dictionary_session_call) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_be_able_to_perform_read_from_key() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let read_from_key_session_call = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, READ_FROM_KEY, RuntimeArgs::new()) + .build(); + + builder + .exec(read_from_key_session_call) + .expect_success() + .commit(); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/generic_hash.rs b/execution_engine_testing/tests/src/test/contract_api/generic_hash.rs new file mode 100644 index 0000000000..2fcb5883c5 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/generic_hash.rs @@ -0,0 +1,69 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{runtime_args, HashAlgorithm}; + +const GENERIC_HASH_WASM: &str = "generic_hash.wasm"; + +#[ignore] +#[test] +fn should_run_generic_hash_blake2() { + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GENERIC_HASH_WASM, + runtime_args! { + "data" => "blake2 hash test", + "algorithm" => HashAlgorithm::Blake2b as u8, + "expected" => [0x0A, 0x24, 0xA2, 0xDF, 0x30, 0x46, 0x1F, 0xA9, 0x69, 0x36, 0x67, 0x97, 0xE4, 0xD4, 0x30, 0xA1, 0x13, 0xC6, 0xCE, 0xE2, 0x78, 0xB5, 0xEF, 0x63, 0xBD, 0x5D, 0x00, 0xA0, 0xA6, 0x61, 0x1E, 0x29] + }, + ) + .build(), + ) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_run_generic_hash_blake3() { + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GENERIC_HASH_WASM, + runtime_args! { + "data" => "blake3 hash test", + "algorithm" => HashAlgorithm::Blake3 as u8, + "expected" => [0x01, 0x65, 0x7D, 0x50, 0x0C, 0x51, 0x9B, 0xB6, 0x8D, 0x01, 0x26, 0x53, 0x66, 0xE2, 0x72, 0x2E, 0x1A, 0x05, 0x65, 0x2E, 0xD7, 0x0C, 0x77, 0xB0, 0x06, 0x80, 0xF8, 0xE8, 0x9E, 0xF9, 0x0F, 0xA1] + }, + ) + .build(), + ) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_run_generic_hash_sha256() { + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GENERIC_HASH_WASM, + runtime_args! { + "data" => "sha256 hash test", + "algorithm" => HashAlgorithm::Sha256 as u8, + "expected" => [0x29, 0xD2, 0xC7, 0x7B, 0x39, 0x7F, 0xF6, 0x9E, 0x25, 0x0D, 0x81, 0xA3, 0xBA, 0xBB, 0x32, 0xDE, 0xFF, 0x3C, 0x2D, 0x06, 0xC9, 0x8E, 0x5E, 0x73, 0x60, 0x54, 0x3C, 0xE4, 0x91, 0xAC, 0x81, 0xCA] + }, + ) + .build(), + ) + .expect_success() + .commit(); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/get_arg.rs b/execution_engine_testing/tests/src/test/contract_api/get_arg.rs index 5a1b7629d3..112f0fc9a5 100644 --- a/execution_engine_testing/tests/src/test/contract_api/get_arg.rs +++ b/execution_engine_testing/tests/src/test/contract_api/get_arg.rs @@ -1,8 +1,5 @@ use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::{runtime_args, ApiError, RuntimeArgs, U512}; @@ -17,22 +14,17 @@ const ARG_VALUE1: &str = "value1"; fn call_get_arg(args: RuntimeArgs) -> Result<(), String> { let exec_request = ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_GET_ARG, args).build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .commit() - .finish(); + .commit(); - if !result.builder().is_error() { + if !builder.is_error() { return Ok(()); } - let response = result - .builder() - .get_exec_result(0) - .expect("should have a response"); - - let error_message = utils::get_error_message(response); + let error_message = builder.get_error_message().expect("should have a result"); Err(error_message) } @@ -44,7 +36,7 @@ fn should_use_passed_argument() { ARG_VALUE0 => ARG0_VALUE, ARG_VALUE1 => U512::from(ARG1_VALUE), }; - call_get_arg(args).expect("Should successfuly call get_arg with 2 valid args"); + call_get_arg(args).expect("Should successfully call get_arg with 2 valid args"); } #[ignore] diff --git a/execution_engine_testing/tests/src/test/contract_api/get_block_info.rs b/execution_engine_testing/tests/src/test/contract_api/get_block_info.rs new file mode 100644 index 0000000000..4dd3576eb2 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/get_block_info.rs @@ -0,0 +1,165 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{bytesrepr::ToBytes, runtime_args, BlockHash, ProtocolVersion}; + +const CONTRACT_GET_BLOCKINFO: &str = "get_blockinfo.wasm"; +const ARG_FIELD_IDX: &str = "field_idx"; + +const FIELD_IDX_BLOCK_TIME: u8 = 0; +const ARG_KNOWN_BLOCK_TIME: &str = "known_block_time"; + +#[ignore] +#[test] +fn should_run_get_block_time() { + let block_time: u64 = 42; + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_GET_BLOCKINFO, + runtime_args! { + ARG_FIELD_IDX => FIELD_IDX_BLOCK_TIME, + ARG_KNOWN_BLOCK_TIME => block_time + }, + ) + .with_block_time(block_time) + .build(); + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .commit() + .expect_success(); +} + +const FIELD_IDX_BLOCK_HEIGHT: u8 = 1; +const ARG_KNOWN_BLOCK_HEIGHT: &str = "known_block_height"; + +#[ignore] +#[test] +fn should_run_get_block_height() { + let block_height: u64 = 1; + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_GET_BLOCKINFO, + runtime_args! { + ARG_FIELD_IDX => FIELD_IDX_BLOCK_HEIGHT, + ARG_KNOWN_BLOCK_HEIGHT => block_height + }, + ) + .with_block_height(block_height) + .build(); + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .expect_success() + .commit(); +} + +const FIELD_IDX_PARENT_BLOCK_HASH: u8 = 2; +const ARG_KNOWN_BLOCK_PARENT_HASH: &str = "known_block_parent_hash"; + +#[ignore] +#[test] +fn should_run_get_block_parent_hash() { + let block_hash = BlockHash::default(); + let digest = block_hash.inner(); + let digest_bytes = digest.to_bytes().expect("should serialize"); + let bytes = casper_types::bytesrepr::Bytes::from(digest_bytes); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_GET_BLOCKINFO, + runtime_args! { + ARG_FIELD_IDX => FIELD_IDX_PARENT_BLOCK_HASH, + ARG_KNOWN_BLOCK_PARENT_HASH => bytes + }, + ) + .with_parent_block_hash(block_hash) + .build(); + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .expect_success() + .commit(); +} + +const FIELD_IDX_STATE_HASH: u8 = 3; +const ARG_KNOWN_STATE_HASH: &str = "known_state_hash"; + +#[ignore] +#[test] +fn should_run_get_state_hash() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let state_hash = builder.get_post_state_hash(); + let digest_bytes = state_hash.to_bytes().expect("should serialize"); + let bytes = casper_types::bytesrepr::Bytes::from(digest_bytes); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_GET_BLOCKINFO, + runtime_args! { + ARG_FIELD_IDX => FIELD_IDX_STATE_HASH, + ARG_KNOWN_STATE_HASH => bytes + }, + ) + .with_state_hash(state_hash) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +const FIELD_IDX_PROTOCOL_VERSION: u8 = 4; +const ARG_KNOWN_PROTOCOL_VERSION: &str = "known_protocol_version"; + +#[ignore] +#[test] +fn should_run_get_protocol_version() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let protocol_version = ProtocolVersion::V2_0_0; + let protocol_version_bytes = protocol_version.to_bytes().expect("should_serialize"); + let bytes = casper_types::bytesrepr::Bytes::from(protocol_version_bytes); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_GET_BLOCKINFO, + runtime_args! { + ARG_FIELD_IDX => FIELD_IDX_PROTOCOL_VERSION, + ARG_KNOWN_PROTOCOL_VERSION => bytes + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +const FIELD_IDX_ADDRESSABLE_ENTITY: u8 = 5; +const ARG_KNOWN_ADDRESSABLE_ENTITY: &str = "known_addressable_entity"; + +#[ignore] +#[test] +fn should_run_get_addressable_entity() { + let addressable_entity: bool = false; + let addressable_entity_bytes = addressable_entity.to_bytes().expect("should_serialize"); + let bytes = casper_types::bytesrepr::Bytes::from(addressable_entity_bytes); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_GET_BLOCKINFO, + runtime_args! { + ARG_FIELD_IDX => FIELD_IDX_ADDRESSABLE_ENTITY, + ARG_KNOWN_ADDRESSABLE_ENTITY => bytes + }, + ) + .build(); + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .commit() + .expect_success(); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/get_blocktime.rs b/execution_engine_testing/tests/src/test/contract_api/get_blocktime.rs index 0499a5cf08..0b65a29dca 100644 --- a/execution_engine_testing/tests/src/test/contract_api/get_blocktime.rs +++ b/execution_engine_testing/tests/src/test/contract_api/get_blocktime.rs @@ -1,8 +1,7 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::{runtime_args, RuntimeArgs}; +use casper_types::runtime_args; const CONTRACT_GET_BLOCKTIME: &str = "get_blocktime.wasm"; const ARG_KNOWN_BLOCK_TIME: &str = "known_block_time"; @@ -19,8 +18,8 @@ fn should_run_get_blocktime_contract() { ) .with_block_time(block_time) .build(); - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .commit() .expect_success(); diff --git a/execution_engine_testing/tests/src/test/contract_api/get_call_stack.rs b/execution_engine_testing/tests/src/test/contract_api/get_call_stack.rs new file mode 100644 index 0000000000..cd3f234e67 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/get_call_stack.rs @@ -0,0 +1,3628 @@ +use num_traits::One; + +use casper_engine_test_support::{ + ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error as CoreError, execution::ExecError}; +use casper_types::{ + account::{Account, AccountHash}, + contracts::{ContractHash, ContractPackageHash}, + runtime_args, + system::{Caller, CallerInfo}, + CLValue, EntityAddr, EntryPointType, HashAddr, Key, PackageHash, StoredValue, U512, +}; + +use get_call_stack_recursive_subcall::{ + Call, ContractAddress, ARG_CALLS, ARG_CURRENT_DEPTH, METHOD_FORWARDER_CONTRACT_NAME, + METHOD_FORWARDER_SESSION_NAME, +}; + +const CONTRACT_RECURSIVE_SUBCALL: &str = "get_call_stack_recursive_subcall.wasm"; +const CONTRACT_CALL_RECURSIVE_SUBCALL: &str = "get_call_stack_call_recursive_subcall.wasm"; + +const CONTRACT_PACKAGE_NAME: &str = "forwarder"; +const CONTRACT_NAME: &str = "our_contract_name"; + +const CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT: &str = METHOD_FORWARDER_CONTRACT_NAME; +const CONTRACT_FORWARDER_ENTRYPOINT_SESSION: &str = METHOD_FORWARDER_SESSION_NAME; + +fn stored_session(contract_hash: ContractHash) -> Call { + Call { + contract_address: ContractAddress::ContractHash(contract_hash), + target_method: CONTRACT_FORWARDER_ENTRYPOINT_SESSION.to_string(), + entry_point_type: EntryPointType::Caller, + } +} + +fn stored_versioned_session(contract_package_hash: ContractPackageHash) -> Call { + Call { + contract_address: ContractAddress::ContractPackageHash(contract_package_hash), + target_method: CONTRACT_FORWARDER_ENTRYPOINT_SESSION.to_string(), + entry_point_type: EntryPointType::Caller, + } +} + +fn stored_contract(contract_hash: ContractHash) -> Call { + Call { + contract_address: ContractAddress::ContractHash(contract_hash), + target_method: CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT.to_string(), + entry_point_type: EntryPointType::Called, + } +} + +fn stored_versioned_contract(contract_package_hash: ContractPackageHash) -> Call { + Call { + contract_address: ContractAddress::ContractPackageHash(contract_package_hash), + target_method: CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT.to_string(), + entry_point_type: EntryPointType::Called, + } +} + +fn store_contract(builder: &mut LmdbWasmTestBuilder, session_filename: &str) { + let store_contract_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, session_filename, runtime_args! {}) + .build(); + builder + .exec(store_contract_request) + .commit() + .expect_success(); +} + +fn execute_and_assert_result( + call_depth: usize, + builder: &mut LmdbWasmTestBuilder, + execute_request: ExecuteRequest, + is_invalid_context: bool, +) { + if call_depth == 0 { + builder.exec(execute_request).commit().expect_success(); + } else if is_invalid_context { + builder.exec(execute_request).commit().expect_failure(); + let error = builder.get_error().expect("must have an error"); + assert!(matches!( + error, + // Call chains have stored contract trying to call stored session which we don't + // support and is an actual error. + CoreError::Exec(ExecError::InvalidContext) + )); + } +} + +// Constant from the contracts used in the tests below. +const LARGE_AMOUNT: u64 = 1_500_000_000_000; + +// In the payment or session phase, this test will try to transfer `len + 1` times +// a fixed amount of `1_500_000_000_000` from the main purse of the account. +// We need to provide an explicit approval via passing that as an `amount` argument. +pub fn approved_amount(idx: usize) -> U512 { + U512::from(LARGE_AMOUNT * (idx + 1) as u64) +} + +trait AccountExt { + fn get_hash(&self, key: &str) -> HashAddr; +} + +impl AccountExt for Account { + fn get_hash(&self, key: &str) -> HashAddr { + self.named_keys() + .get(key) + .cloned() + .and_then(Key::into_hash_addr) + .unwrap() + } +} + +trait BuilderExt { + fn get_call_stack_from_session_context(&mut self, stored_call_stack_key: &str) -> Vec; + + fn get_call_stack_from_contract_context( + &mut self, + stored_call_stack_key: &str, + contract_package_hash: HashAddr, + ) -> Vec; +} + +impl BuilderExt for LmdbWasmTestBuilder { + fn get_call_stack_from_session_context(&mut self, stored_call_stack_key: &str) -> Vec { + let cl_value = self + .query( + None, + (*DEFAULT_ACCOUNT_ADDR).into(), + &[stored_call_stack_key.to_string()], + ) + .unwrap(); + + let caller_info = cl_value + .into_cl_value() + .map(CLValue::into_t::>) + .unwrap() + .unwrap(); + + let mut callers = vec![]; + + for info in caller_info { + let kind = info.kind(); + match kind { + 0 => { + let account_hash = info + .get_field_by_index(0) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 0 in fields") + .expect("account hash must be some"); + callers.push(Caller::Initiator { account_hash }); + } + 3 => { + let package_hash = info + .get_field_by_index(1) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 1 in fields") + .expect("package hash must be some"); + let entity_addr = info + .get_field_by_index(3) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 3 in fields") + .expect("entity addr must be some"); + callers.push(Caller::Entity { + package_hash, + entity_addr, + }); + } + 4 => { + let contract_package_hash = info + .get_field_by_index(2) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 2 in fields") + .expect("contract package hash must be some"); + let contract_hash = info + .get_field_by_index(4) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 4 in fields") + .expect("contract hash must be some"); + callers.push(Caller::SmartContract { + contract_package_hash, + contract_hash, + }); + } + _ => panic!("unhandled kind"), + } + } + + callers + } + + fn get_call_stack_from_contract_context( + &mut self, + stored_call_stack_key: &str, + contract_package_hash: HashAddr, + ) -> Vec { + let value = self + .query(None, Key::Hash(contract_package_hash), &[]) + .unwrap(); + + let contract_package = match value { + StoredValue::ContractPackage(package) => package, + _ => panic!("unreachable"), + }; + + let current_contract_hash = contract_package.current_contract_hash().unwrap(); + + let cl_value = self + .query( + None, + current_contract_hash.into(), + &[stored_call_stack_key.to_string()], + ) + .unwrap(); + + let stack_elements = cl_value + .into_cl_value() + .map(CLValue::into_t::>) + .unwrap() + .unwrap(); + + let mut callers = vec![]; + + for info in stack_elements { + let kind = info.kind(); + match kind { + 0 => { + let account_hash = info + .get_field_by_index(0) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 0 in fields") + .expect("account hash must be some"); + callers.push(Caller::Initiator { account_hash }); + } + 3 => { + let package_hash = info + .get_field_by_index(1) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 1 in fields") + .expect("package hash must be some"); + let entity_addr = info + .get_field_by_index(3) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 3 in fields") + .expect("entity addr must be some"); + callers.push(Caller::Entity { + package_hash, + entity_addr, + }); + } + 4 => { + let contract_package_hash = info + .get_field_by_index(2) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 2 in fields") + .expect("contract package hash must be some"); + let contract_hash = info + .get_field_by_index(4) + .map(|val| { + val.to_t::>() + .expect("must convert out of cl_value") + }) + .expect("must have index 4 in fields") + .expect("contract hash must be some"); + callers.push(Caller::SmartContract { + contract_package_hash, + contract_hash, + }); + } + _ => panic!("unhandled kind"), + } + } + + callers + } +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + store_contract(&mut builder, CONTRACT_RECURSIVE_SUBCALL); + builder +} + +fn assert_each_context_has_correct_call_stack_info( + builder: &mut LmdbWasmTestBuilder, + top_level_call: Call, + mut subcalls: Vec, + current_contract_package_hash: HashAddr, +) { + let mut calls = vec![top_level_call]; + calls.append(&mut subcalls); + + // query for and verify that all the elements in the call stack match their + // pre-defined Call element + for (i, call) in calls.iter().enumerate() { + let stored_call_stack_key = format!("call_stack-{}", i); + // we need to know where to look for the call stack information + let call_stack = match call.entry_point_type { + EntryPointType::Called | EntryPointType::Factory => builder + .get_call_stack_from_contract_context( + &stored_call_stack_key, + current_contract_package_hash, + ), + EntryPointType::Caller => { + builder.get_call_stack_from_session_context(&stored_call_stack_key) + } + }; + assert_eq!( + call_stack.len(), + i + 2, + "call stack len was an unexpected size {}, should be {} {:#?}", + call_stack.len(), + i + 2, + call_stack, + ); + let (head, rest) = call_stack.split_at(usize::one()); + + assert_eq!( + head, + [Caller::Initiator { + account_hash: *DEFAULT_ACCOUNT_ADDR, + }], + ); + assert_call_stack_matches_calls(rest.to_vec(), &calls); + } +} + +fn assert_invalid_context(builder: &mut LmdbWasmTestBuilder, depth: usize) { + if depth == 0 { + builder.expect_success(); + } else { + let error = builder.get_error().unwrap(); + assert!(matches!( + error, + casper_execution_engine::engine_state::Error::Exec(ExecError::InvalidContext) + )); + } +} + +fn assert_each_context_has_correct_call_stack_info_module_bytes( + builder: &mut LmdbWasmTestBuilder, + subcalls: Vec, + current_contract_package_hash: HashAddr, +) { + let stored_call_stack_key = format!("call_stack-{}", 0); + let call_stack = builder.get_call_stack_from_session_context(&stored_call_stack_key); + let (head, _) = call_stack.split_at(usize::one()); + assert_eq!( + head, + [Caller::Initiator { + account_hash: *DEFAULT_ACCOUNT_ADDR, + }], + ); + + for (i, call) in (1..=subcalls.len()).zip(subcalls.iter()) { + let stored_call_stack_key = format!("call_stack-{}", i); + // we need to know where to look for the call stack information + let call_stack = match call.entry_point_type { + EntryPointType::Called | EntryPointType::Factory => builder + .get_call_stack_from_contract_context( + &stored_call_stack_key, + current_contract_package_hash, + ), + EntryPointType::Caller => { + builder.get_call_stack_from_session_context(&stored_call_stack_key) + } + }; + let (head, rest) = call_stack.split_at(usize::one()); + assert_eq!( + head, + [Caller::Initiator { + account_hash: *DEFAULT_ACCOUNT_ADDR, + }], + ); + assert_call_stack_matches_calls(rest.to_vec(), &subcalls); + } +} + +fn assert_call_stack_matches_calls(call_stack: Vec, calls: &[Call]) { + for (index, expected_call_stack_element) in call_stack.iter().enumerate() { + let maybe_call = calls.get(index); + match (maybe_call, expected_call_stack_element) { + // Versioned Call with EntryPointType::Contract + ( + Some(Call { + entry_point_type, + contract_address: + ContractAddress::ContractPackageHash(current_contract_package_hash), + .. + }), + Caller::SmartContract { + contract_package_hash, + .. + }, + ) if *entry_point_type == EntryPointType::Called + && contract_package_hash.value() == current_contract_package_hash.value() => {} + + // Unversioned Call with EntryPointType::Called + ( + Some(Call { + entry_point_type, + contract_address: ContractAddress::ContractHash(current_contract_hash), + .. + }), + Caller::SmartContract { contract_hash, .. }, + ) if *entry_point_type == EntryPointType::Called + && contract_hash.value() == current_contract_hash.value() => {} + + // Versioned Call with EntryPointType::Session + ( + Some(Call { + entry_point_type, + contract_address: + ContractAddress::ContractPackageHash(current_contract_package_hash), + .. + }), + Caller::SmartContract { + contract_package_hash, + .. + }, + ) if *entry_point_type == EntryPointType::Caller + && *contract_package_hash == *current_contract_package_hash => {} + + // Unversioned Call with EntryPointType::Session + ( + Some(Call { + entry_point_type, + contract_address: ContractAddress::ContractHash(current_contract_hash), + .. + }), + Caller::SmartContract { contract_hash, .. }, + ) if *entry_point_type == EntryPointType::Caller + && contract_hash.value() == current_contract_hash.value() => {} + + _ => panic!( + "call stack element {:#?} didn't match expected call {:#?} at index {}, {:#?}", + expected_call_stack_element, maybe_call, index, call_stack, + ), + } + } +} + +mod session { + + use casper_engine_test_support::{ExecuteRequestBuilder, DEFAULT_ACCOUNT_ADDR}; + use casper_types::{execution::TransformKindV2, runtime_args, system::mint, Key}; + + use super::{ + approved_amount, AccountExt, ARG_CALLS, ARG_CURRENT_DEPTH, CONTRACT_CALL_RECURSIVE_SUBCALL, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + CONTRACT_NAME, CONTRACT_PACKAGE_NAME, + }; + + // DEPTHS should not contain 1, as it will eliminate the initial element from the subcalls + // vector + const DEPTHS: &[usize] = &[0, 2, 5, 10]; + + // Session + recursive subcall + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_contract_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_contract(current_contract_hash.into())); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_contract_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_contract( + current_contract_package_hash.into(), + )); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_session_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + println!("{:?}", default_account); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_versioned_contract( + current_contract_package_hash.into(), + )); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_session_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_contract(current_contract_hash.into())); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_session_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_session(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_contract( + current_contract_package_hash.into(), + )); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_session_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_session(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_contract(current_contract_hash.into())); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_session_to_stored_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_session_to_stored_versioned_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_session(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info_module_bytes( + &mut builder, + subcalls, + current_contract_package_hash, + ); + } + } + + // Session + recursive subcall failure cases + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_contract_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_versioned_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_contract_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn session_bytes_to_stored_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())); + } + + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_CALL_RECURSIVE_SUBCALL, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + // Stored contract + recursive subcall + + #[ignore] + #[test] + fn stored_versioned_contract_by_name_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_contract(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_hash_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + let effects = builder.get_effects().last().unwrap().clone(); + + let key = if builder.chainspec().core_config.enable_addressable_entity { + Key::SmartContract(current_contract_package_hash) + } else { + Key::Hash(current_contract_package_hash) + }; + + assert!( + effects + .transforms() + .iter() + .any(|transform| transform.key() == &key + && transform.kind() == &TransformKindV2::Identity), + "Missing `Identity` transform for a contract package being called." + ); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_contract(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_name_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_contract(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_hash_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_contract(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_contract_by_name_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_contract(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_contract_by_hash_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + let effects = builder.get_effects().last().unwrap().clone(); + + assert!( + effects.transforms().iter().any(|transform| transform.key() + == &Key::Hash(current_contract_hash) + && transform.kind() == &TransformKindV2::Identity), + "Missing `Identity` transform for a contract being called." + ); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_contract(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_contract_by_name_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_contract(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_contract_by_hash_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_contract(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + // Stored contract + recursive subcall failure cases + + #[ignore] + #[test] + fn stored_versioned_contract_by_name_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_hash_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_name_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_hash_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_name_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_hash_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_name_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_hash_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail( + ) { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_hash_to_stored_versioned_contract_to_stored_session_should_fail( + ) { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_name_to_stored_contract_to_stored_versioned_session_should_fail( + ) { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_contract_by_hash_to_stored_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail( + ) { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_name_to_stored_contract_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_contract_by_hash_to_stored_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_CONTRACT, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + // Stored session + recursive subcall + + #[ignore] + #[test] + fn stored_versioned_session_by_name_to_stored_versioned_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_hash_to_stored_versioned_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_name_to_stored_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_hash_to_stored_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_name_to_stored_versioned_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_versioned_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_session(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_name_to_stored_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_session() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_name_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_hash_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_name_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_hash_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_versioned_session(current_contract_package_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_name_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_versioned_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![super::stored_versioned_contract(current_contract_package_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_name_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_contract() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *len]; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit().expect_success(); + + super::assert_each_context_has_correct_call_stack_info( + &mut builder, + super::stored_session(current_contract_hash.into()), + subcalls, + current_contract_package_hash, + ); + } + } + + // Stored session + recursive subcall failure cases + + #[ignore] + #[test] + fn stored_versioned_session_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail( + ) { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() + { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_name_to_stored_contract_to_stored_versioned_session_should_fail() + { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_versioned_session_by_hash_to_stored_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_package_hash.into(), + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_session_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail() + { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + len.saturating_sub(1) + ]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_session_by_name_to_stored_contract_to_stored_versioned_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_contract_to_stored_session_should_fail() { + for len in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![super::stored_contract(current_contract_hash.into()); len.saturating_sub(1)]; + if *len > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + runtime_args! { + ARG_CALLS => subcalls.clone(), + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(*len), + }, + ) + .build(); + + builder.exec(execute_request).commit(); + + super::assert_invalid_context(&mut builder, *len); + } + } +} + +mod payment { + use std::iter; + + use rand::Rng; + + use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + }; + use casper_types::{runtime_args, system::mint, HashAddr, RuntimeArgs}; + use get_call_stack_recursive_subcall::Call; + + use crate::wasm_utils; + + use super::{ + approved_amount, AccountExt, ARG_CALLS, ARG_CURRENT_DEPTH, CONTRACT_CALL_RECURSIVE_SUBCALL, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, CONTRACT_NAME, CONTRACT_PACKAGE_NAME, + }; + + // DEPTHS should not contain 1, as it will eliminate the initial element from the subcalls + // vector. Going further than 6 will hit the gas limit. + const DEPTHS: &[usize] = &[0, 6, 10]; + + fn execute( + builder: &mut LmdbWasmTestBuilder, + call_depth: usize, + subcalls: Vec, + is_invalid_context: bool, + ) { + let execute_request = { + let mut rng = rand::thread_rng(); + let deploy_hash = rng.gen(); + let sender = *DEFAULT_ACCOUNT_ADDR; + let args = runtime_args! { + ARG_CALLS => subcalls, + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(call_depth), + }; + let deploy = DeployItemBuilder::new() + .with_address(sender) + .with_payment_code(CONTRACT_CALL_RECURSIVE_SUBCALL, args) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + ExecuteRequestBuilder::from_deploy_item(&deploy).build() + }; + + super::execute_and_assert_result(call_depth, builder, execute_request, is_invalid_context); + } + + fn execute_stored_payment_by_package_name( + builder: &mut LmdbWasmTestBuilder, + call_depth: usize, + subcalls: Vec, + ) { + let execute_request = { + let mut rng = rand::thread_rng(); + let deploy_hash = rng.gen(); + + let sender = *DEFAULT_ACCOUNT_ADDR; + + let args = runtime_args! { + ARG_CALLS => subcalls, + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(call_depth), + }; + + let deploy = DeployItemBuilder::new() + .with_address(sender) + .with_stored_versioned_payment_contract_by_name( + CONTRACT_PACKAGE_NAME, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + args, + ) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + + ExecuteRequestBuilder::from_deploy_item(&deploy).build() + }; + + super::execute_and_assert_result(call_depth, builder, execute_request, false); + } + + fn execute_stored_payment_by_package_hash( + builder: &mut LmdbWasmTestBuilder, + call_depth: usize, + subcalls: Vec, + current_contract_package_hash: HashAddr, + ) { + let execute_request = { + let mut rng = rand::thread_rng(); + let deploy_hash = rng.gen(); + let sender = *DEFAULT_ACCOUNT_ADDR; + let args = runtime_args! { + ARG_CALLS => subcalls, + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(call_depth), + }; + let deploy = DeployItemBuilder::new() + .with_address(sender) + .with_stored_versioned_payment_contract_by_hash( + current_contract_package_hash, + None, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + args, + ) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + ExecuteRequestBuilder::from_deploy_item(&deploy).build() + }; + + super::execute_and_assert_result(call_depth, builder, execute_request, false); + } + + fn execute_stored_payment_by_contract_name( + builder: &mut LmdbWasmTestBuilder, + call_depth: usize, + subcalls: Vec, + ) { + let execute_request = { + let mut rng = rand::thread_rng(); + let deploy_hash = rng.gen(); + + let sender = *DEFAULT_ACCOUNT_ADDR; + + let args = runtime_args! { + ARG_CALLS => subcalls, + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(call_depth), + }; + + let deploy = DeployItemBuilder::new() + .with_address(sender) + .with_stored_payment_named_key( + CONTRACT_NAME, + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + args, + ) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + + ExecuteRequestBuilder::from_deploy_item(&deploy).build() + }; + + super::execute_and_assert_result(call_depth, builder, execute_request, false); + } + + fn execute_stored_payment_by_contract_hash( + builder: &mut LmdbWasmTestBuilder, + call_depth: usize, + subcalls: Vec, + current_contract_hash: HashAddr, + ) { + let execute_request = { + let mut rng = rand::thread_rng(); + let deploy_hash = rng.gen(); + let sender = *DEFAULT_ACCOUNT_ADDR; + let args = runtime_args! { + ARG_CALLS => subcalls, + ARG_CURRENT_DEPTH => 0u8, + mint::ARG_AMOUNT => approved_amount(call_depth), + }; + let deploy = DeployItemBuilder::new() + .with_address(sender) + .with_stored_payment_hash( + current_contract_hash.into(), + CONTRACT_FORWARDER_ENTRYPOINT_SESSION, + args, + ) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + ExecuteRequestBuilder::from_deploy_item(&deploy).build() + }; + + super::execute_and_assert_result(call_depth, builder, execute_request, false); + } + + // Session + recursive subcall + + #[ignore] + #[test] + fn payment_bytes_to_stored_versioned_session_to_stored_versioned_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_contract( + current_contract_package_hash.into(), + )); + } + + execute(&mut builder, *call_depth, subcalls, false); + } + } + + #[ignore] + #[test] + fn payment_bytes_to_stored_versioned_session_to_stored_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_contract(current_contract_hash.into())); + } + + execute(&mut builder, *call_depth, subcalls, false); + } + } + + #[ignore] + #[test] + fn payment_bytes_to_stored_session_to_stored_versioned_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_session(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_contract( + current_contract_package_hash.into(), + )); + } + + execute(&mut builder, *call_depth, subcalls, false) + } + } + + // Payment logic is tethered to a low gas amount. It is not forbidden to attempt to do calls + // however they are expensive and if you exceed the gas limit it should fail with a + // GasLimit error. + #[ignore] + #[test] + fn payment_bytes_to_stored_contract_to_stored_session() { + let call_depth = 5usize; + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![ + super::stored_contract(current_contract_hash.into()), + super::stored_session(current_contract_hash.into()), + ]; + execute(&mut builder, call_depth, subcalls, true) + } + + #[ignore] + #[test] + fn payment_bytes_to_stored_session_to_stored_contract_() { + let call_depth = 5usize; + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = iter::repeat_with(|| { + [ + super::stored_session(current_contract_hash.into()), + super::stored_contract(current_contract_hash.into()), + ] + }) + .take(call_depth) + .flatten(); + execute(&mut builder, call_depth, subcalls.collect(), false) + } + + // Session + recursive subcall failure cases + + #[ignore] + #[test] + fn payment_bytes_to_stored_versioned_contract_to_stored_versioned_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )); + } + + execute(&mut builder, *call_depth, subcalls, true) + } + } + + #[ignore] + #[test] + fn payment_bytes_to_stored_versioned_contract_to_stored_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())); + } + + execute(&mut builder, *call_depth, subcalls, true) + } + } + + #[ignore] + #[test] + fn payment_bytes_to_stored_contract_to_stored_versioned_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_contract(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )); + } + + execute(&mut builder, *call_depth, subcalls, true) + } + } + + #[ignore] + #[test] + fn payment_bytes_to_stored_contract_to_stored_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_contract(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())); + } + + execute(&mut builder, *call_depth, subcalls, true) + } + } + + // Stored session + recursive subcall + + #[ignore] + #[test] + fn stored_versioned_payment_by_name_to_stored_versioned_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls); + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_hash_to_stored_versioned_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_package_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_package_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_name_to_stored_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_hash_to_stored_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_package_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_package_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_versioned_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_payment_by_hash_to_stored_versioned_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![ + super::stored_versioned_session(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_contract_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_payment_by_hash_to_stored_session() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_session(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_contract_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_name_to_stored_versioned_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_hash_to_stored_versioned_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_package_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_package_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_name_to_stored_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_hash_to_stored_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_package_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_package_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_versioned_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_payment_by_hash_to_stored_versioned_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + *call_depth + ]; + + execute_stored_payment_by_contract_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_payment_by_hash_to_stored_contract() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + println!("DA {:?}", default_account); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let subcalls = vec![super::stored_contract(current_contract_hash.into()); *call_depth]; + + execute_stored_payment_by_contract_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_hash, + ) + } + } + + // Stored session + recursive subcall failure cases + + #[ignore] + #[test] + fn stored_versioned_payment_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail( + ) { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() + { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + execute_stored_payment_by_package_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_package_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_name_to_stored_contract_to_stored_versioned_session_should_fail() + { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_contract(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + execute_stored_payment_by_package_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_versioned_payment_by_hash_to_stored_contract_to_stored_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_contract(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + execute_stored_payment_by_package_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_package_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_versioned_contract_to_stored_versioned_session_should_fail() + { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_session_by_hash_to_stored_versioned_contract_to_stored_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = + vec![ + super::stored_versioned_contract(current_contract_package_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + execute_stored_payment_by_contract_hash( + &mut builder, + *call_depth, + subcalls, + current_contract_hash, + ) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_contract_to_stored_versioned_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_package_hash = default_account.get_hash(CONTRACT_PACKAGE_NAME); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_contract(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_versioned_session( + current_contract_package_hash.into(), + )) + } + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } + + #[ignore] + #[test] + fn stored_payment_by_name_to_stored_contract_to_stored_session_should_fail() { + for call_depth in DEPTHS { + let mut builder = super::setup(); + let default_account = builder.get_account(*DEFAULT_ACCOUNT_ADDR).unwrap(); + let current_contract_hash = default_account.get_hash(CONTRACT_NAME); + + let mut subcalls = vec![ + super::stored_contract(current_contract_hash.into()); + call_depth.saturating_sub(1) + ]; + if *call_depth > 0 { + subcalls.push(super::stored_session(current_contract_hash.into())) + } + + execute_stored_payment_by_contract_name(&mut builder, *call_depth, subcalls) + } + } +} diff --git a/execution_engine_testing/tests/src/test/contract_api/get_caller.rs b/execution_engine_testing/tests/src/test/contract_api/get_caller.rs index 4dec3b91a1..bd66b54972 100644 --- a/execution_engine_testing/tests/src/test/contract_api/get_caller.rs +++ b/execution_engine_testing/tests/src/test/contract_api/get_caller.rs @@ -1,22 +1,28 @@ use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + account::AccountHash, + contracts::{ContractHash, ContractPackageHash}, + runtime_args, + system::{Caller, CallerInfo}, + CLValue, EntityAddr, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs}; const CONTRACT_GET_CALLER: &str = "get_caller.wasm"; const CONTRACT_GET_CALLER_SUBCALL: &str = "get_caller_subcall.wasm"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; +const LOAD_CALLER_INFORMATION: &str = "load_caller_info.wasm"; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); +const LOAD_CALLER_INFO_HASH: &str = "load_caller_info_contract_hash"; +const LOAD_CALLER_INFO_PACKAGE_HASH: &str = "load_caller_info_package"; #[ignore] #[test] fn should_run_get_caller_contract() { - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec( ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -32,9 +38,9 @@ fn should_run_get_caller_contract() { #[ignore] #[test] fn should_run_get_caller_contract_other_account() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder .exec( @@ -65,8 +71,8 @@ fn should_run_get_caller_contract_other_account() { #[test] fn should_run_get_caller_subcall_contract() { { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder .exec( @@ -81,9 +87,9 @@ fn should_run_get_caller_subcall_contract() { .commit(); } - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec( ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -106,3 +112,129 @@ fn should_run_get_caller_subcall_contract() { .expect_success() .commit(); } + +#[ignore] +#[test] +fn should_load_caller_information_based_on_action() { + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + LOAD_CALLER_INFORMATION, + runtime_args! {}, + ) + .build(), + ) + .expect_success() + .commit(); + + let caller_info_entity_hash = builder + .get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .get(LOAD_CALLER_INFO_HASH) + .expect("must have caller info entity key") + .into_entity_hash() + .expect("must get addressable entity hash"); + + let initiator_call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + caller_info_entity_hash, + "initiator", + runtime_args! {}, + ) + .build(); + + builder + .exec(initiator_call_request) + .expect_success() + .commit(); + + let immediate_call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + caller_info_entity_hash, + "get_immediate_caller", + runtime_args! {}, + ) + .build(); + + builder + .exec(immediate_call_request) + .expect_success() + .commit(); + + let initiator_call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + caller_info_entity_hash, + "get_full_stack", + runtime_args! {}, + ) + .build(); + + builder + .exec(initiator_call_request) + .expect_success() + .commit(); + + let info_named_keys = + builder.get_named_keys(EntityAddr::SmartContract(caller_info_entity_hash.value())); + + let initiator = *info_named_keys + .get("initiator") + .expect("must have key entry for initiator"); + + let initiator_account_hash = builder + .query(None, initiator, &[]) + .expect("must have stored value") + .as_cl_value() + .map(|cl_val| CLValue::into_t(cl_val.clone())) + .expect("must have cl value") + .expect("must get account hash"); + + assert_eq!(*DEFAULT_ACCOUNT_ADDR, initiator_account_hash); + + let immediate = *info_named_keys + .get("immediate") + .expect("must have key entry for initiator"); + + let caller: CallerInfo = builder + .query(None, immediate, &[]) + .expect("must have stored value") + .as_cl_value() + .map(|cl_val| CLValue::into_t(cl_val.clone())) + .expect("must have cl value") + .expect("must get caller"); + + let expected_caller = CallerInfo::try_from(Caller::initiator(*DEFAULT_ACCOUNT_ADDR)) + .expect("must get caller info"); + + assert_eq!(expected_caller, caller); + + let full = *info_named_keys + .get("full") + .expect("must have key entry for full call stack"); + + let full_call_stack: Vec = builder + .query(None, full, &[]) + .expect("must have stored value") + .as_cl_value() + .map(|cl_val| CLValue::into_t(cl_val.clone())) + .expect("must have cl value") + .expect("must get full call stack"); + + let package_hash = builder + .get_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .get(LOAD_CALLER_INFO_PACKAGE_HASH) + .expect("must get package key") + .into_hash_addr() + .map(ContractPackageHash::new) + .expect("must get package hash"); + + let frame = CallerInfo::try_from(Caller::smart_contract( + package_hash, + ContractHash::new(caller_info_entity_hash.value()), + )) + .expect("must get frame"); + let expected_stack = vec![expected_caller, frame]; + assert_eq!(expected_stack, full_call_stack); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/get_phase.rs b/execution_engine_testing/tests/src/test/contract_api/get_phase.rs index fae341206d..e433d7479c 100644 --- a/execution_engine_testing/tests/src/test/contract_api/get_phase.rs +++ b/execution_engine_testing/tests/src/test/contract_api/get_phase.rs @@ -1,11 +1,8 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_types::{runtime_args, Phase, RuntimeArgs}; +use casper_types::{runtime_args, Phase}; const ARG_PHASE: &str = "phase"; const ARG_AMOUNT: &str = "amount"; @@ -15,29 +12,27 @@ const ARG_AMOUNT: &str = "amount"; fn should_run_get_phase_contract() { let default_account = *DEFAULT_ACCOUNT_ADDR; - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_session_code( - "get_phase.wasm", - runtime_args! { ARG_PHASE => Phase::Session }, - ) - .with_payment_code( - "get_phase_payment.wasm", - runtime_args! { - ARG_PHASE => Phase::Payment, - ARG_AMOUNT => *DEFAULT_PAYMENT - }, - ) - .with_authorization_keys(&[default_account]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_deploy_hash([1; 32]) + .with_session_code( + "get_phase.wasm", + runtime_args! { ARG_PHASE => Phase::Session }, + ) + .with_payment_code( + "get_phase_payment.wasm", + runtime_args! { + ARG_PHASE => Phase::Payment, + ARG_AMOUNT => *DEFAULT_PAYMENT + }, + ) + .with_authorization_keys(&[default_account]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .commit() .expect_success(); diff --git a/execution_engine_testing/tests/src/test/contract_api/list_authorization_keys.rs b/execution_engine_testing/tests/src/test/contract_api/list_authorization_keys.rs new file mode 100644 index 0000000000..2341af92d1 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/list_authorization_keys.rs @@ -0,0 +1,158 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{ + account::AccountHash, addressable_entity::Weight, runtime_args, + system::standard_payment::ARG_AMOUNT, ApiError, PublicKey, SecretKey, +}; +use once_cell::sync::Lazy; + +const ARG_ACCOUNT: &str = "account"; +const ARG_WEIGHT: &str = "weight"; +const DEFAULT_WEIGHT: Weight = Weight::new(1); + +const CONTRACT_ADD_ASSOCIATED_KEY: &str = "add_associated_key.wasm"; + +const CONTRACT_LIST_AUTHORIZATION_KEYS: &str = "list_authorization_keys.wasm"; +const ARG_EXPECTED_AUTHORIZATION_KEYS: &str = "expected_authorization_keys"; + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash()); + +static ACCOUNT_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([243u8; 32]).unwrap()); +static ACCOUNT_2_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY)); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash()); + +const USER_ERROR_ASSERTION: u16 = 0; + +#[ignore] +#[test] +fn should_list_authorization_keys() { + assert!( + test_match( + *DEFAULT_ACCOUNT_ADDR, + vec![*DEFAULT_ACCOUNT_ADDR], + vec![*DEFAULT_ACCOUNT_ADDR], + ), + "one signature should match the expected authorization key" + ); + assert!( + !test_match( + *DEFAULT_ACCOUNT_ADDR, + vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR], + vec![*DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR], + ), + "two signatures are off by one" + ); + assert!( + test_match( + *DEFAULT_ACCOUNT_ADDR, + vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR], + vec![*DEFAULT_ACCOUNT_ADDR, *ACCOUNT_2_ADDR], + ), + "two signatures should match the expected list" + ); + assert!( + test_match( + *ACCOUNT_1_ADDR, + vec![*ACCOUNT_1_ADDR], + vec![*ACCOUNT_1_ADDR], + ), + "one signature should match the output for non-default account" + ); + + assert!( + test_match( + *DEFAULT_ACCOUNT_ADDR, + vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR], + vec![*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR], + ), + "multisig matches expected list" + ); + assert!( + !test_match( + *DEFAULT_ACCOUNT_ADDR, + vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR], + vec![], + ), + "multisig is not empty" + ); + assert!( + !test_match( + *DEFAULT_ACCOUNT_ADDR, + vec![*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR, *ACCOUNT_1_ADDR], + vec![*ACCOUNT_2_ADDR, *ACCOUNT_1_ADDR], + ), + "multisig does not include caller account" + ); +} + +fn test_match( + caller: AccountHash, + signatures: Vec, + expected_authorization_keys: Vec, +) -> bool { + let mut builder = setup(); + let session_args = runtime_args! { + ARG_EXPECTED_AUTHORIZATION_KEYS => expected_authorization_keys + }; + let deploy_hash = [42; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(caller) + .with_session_code(CONTRACT_LIST_AUTHORIZATION_KEYS, session_args) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }) + .with_authorization_keys(&signatures) + .with_deploy_hash(deploy_hash) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(exec_request).commit(); + + match builder.get_error() { + Some(Error::Exec(ExecError::Revert(ApiError::User(USER_ERROR_ASSERTION)))) => false, + Some(error) => panic!("Unexpected error {:?}", error), + None => { + // Success + true + } + } +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for account in [*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR] { + let add_key_request = { + let session_args = runtime_args! { + ARG_ACCOUNT => account, + ARG_WEIGHT => DEFAULT_WEIGHT, + }; + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_ASSOCIATED_KEY, + session_args, + ) + .build() + }; + + let transfer_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, account).build(); + + builder.exec(add_key_request).expect_success().commit(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); + } + + builder +} diff --git a/execution_engine_testing/tests/src/test/contract_api/list_named_keys.rs b/execution_engine_testing/tests/src/test/contract_api/list_named_keys.rs index 2b8e98b1ef..08bbd7e98d 100644 --- a/execution_engine_testing/tests/src/test/contract_api/list_named_keys.rs +++ b/execution_engine_testing/tests/src/test/contract_api/list_named_keys.rs @@ -1,8 +1,7 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::{account::AccountHash, contracts::NamedKeys, runtime_args, Key, RuntimeArgs}; +use casper_types::{account::AccountHash, contracts::NamedKeys, runtime_args, Key}; const CONTRACT_LIST_NAMED_KEYS: &str = "list_named_keys.wasm"; const NEW_NAME_ACCOUNT: &str = "Account"; @@ -13,8 +12,8 @@ const ARG_NEW_NAMED_KEYS: &str = "new_named_keys"; #[ignore] #[test] fn should_list_named_keys() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let initial_named_keys: NamedKeys = NamedKeys::new(); diff --git a/execution_engine_testing/tests/src/test/contract_api/main_purse.rs b/execution_engine_testing/tests/src/test/contract_api/main_purse.rs index 2e6e5dfef0..330ec9a014 100644 --- a/execution_engine_testing/tests/src/test/contract_api/main_purse.rs +++ b/execution_engine_testing/tests/src/test/contract_api/main_purse.rs @@ -1,12 +1,8 @@ use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::shared::stored_value::StoredValue; -use casper_types::{account::AccountHash, runtime_args, Key, RuntimeArgs}; +use casper_types::{account::AccountHash, runtime_args}; const CONTRACT_MAIN_PURSE: &str = "main_purse.wasm"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; @@ -17,17 +13,13 @@ const ARG_AMOUNT: &str = "amount"; #[ignore] #[test] fn should_run_main_purse_contract_default_account() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - let builder = builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let builder = builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let default_account = if let Ok(StoredValue::Account(account)) = - builder.query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - { - account - } else { - panic!("could not get account") - }; + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract for default account"); let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -42,7 +34,7 @@ fn should_run_main_purse_contract_default_account() { #[ignore] #[test] fn should_run_main_purse_contract_account_1() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -52,13 +44,13 @@ fn should_run_main_purse_contract_account_1() { .build(); let builder = builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit(); let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(ACCOUNT_1_ADDR) .expect("should get account"); let exec_request_2 = ExecuteRequestBuilder::standard( diff --git a/execution_engine_testing/tests/src/test/contract_api/mint_purse.rs b/execution_engine_testing/tests/src/test/contract_api/mint_purse.rs index 41181178d4..347246ceb1 100644 --- a/execution_engine_testing/tests/src/test/contract_api/mint_purse.rs +++ b/execution_engine_testing/tests/src/test/contract_api/mint_purse.rs @@ -1,6 +1,6 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, WasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST, SYSTEM_ADDR}, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, }; use casper_types::{runtime_args, RuntimeArgs, U512}; @@ -21,9 +21,9 @@ fn should_run_mint_purse_contract() { ExecuteRequestBuilder::standard(*SYSTEM_ADDR, CONTRACT_MINT_PURSE, RuntimeArgs::default()) .build(); - let mut builder = WasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).commit().expect_success(); builder.exec(exec_request_2).commit().expect_success(); @@ -39,8 +39,8 @@ fn should_not_allow_non_system_accounts_to_mint() { ) .build(); - assert!(WasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + assert!(LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .commit() .is_error()); diff --git a/execution_engine_testing/tests/src/test/contract_api/mod.rs b/execution_engine_testing/tests/src/test/contract_api/mod.rs index d73da496f0..2db086c25c 100644 --- a/execution_engine_testing/tests/src/test/contract_api/mod.rs +++ b/execution_engine_testing/tests/src/test/contract_api/mod.rs @@ -1,17 +1,24 @@ mod account; -mod blake2b; +mod add_contract_version; mod create_purse; +mod dictionary; +mod generic_hash; mod get_arg; +mod get_block_info; mod get_blocktime; +mod get_call_stack; mod get_caller; mod get_phase; +mod list_authorization_keys; mod list_named_keys; mod main_purse; mod mint_purse; +mod multisig_authorization; +mod named_dictionaries; +mod recover_secp256k1; mod revert; +mod runtime; mod subcall; mod transfer; -mod transfer_purse_to_account; -mod transfer_purse_to_purse; -mod transfer_stored; -mod transfer_u512_stored; +mod transfer_cached; +mod verify_signature; diff --git a/execution_engine_testing/tests/src/test/contract_api/multisig_authorization.rs b/execution_engine_testing/tests/src/test/contract_api/multisig_authorization.rs new file mode 100644 index 0000000000..77375da334 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/multisig_authorization.rs @@ -0,0 +1,204 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{ + account::AccountHash, addressable_entity::Weight, runtime_args, ApiError, RuntimeArgs, +}; + +const CONTRACT_ADD_ASSOCIATED_KEY: &str = "add_associated_key.wasm"; +const CONTRACT_MULTISIG_AUTHORIZATION: &str = "multisig_authorization.wasm"; +const CONTRACT_KEY: &str = "contract"; + +const ARG_AMOUNT: &str = "amount"; +const ARG_ACCOUNT: &str = "account"; +const ARG_WEIGHT: &str = "weight"; +const DEFAULT_WEIGHT: Weight = Weight::new(1); +const ENTRYPOINT_A: &str = "entrypoint_a"; +const ENTRYPOINT_B: &str = "entrypoint_b"; + +const ROLE_A_KEYS: [AccountHash; 3] = [ + AccountHash::new([1; 32]), + AccountHash::new([2; 32]), + AccountHash::new([3; 32]), +]; + +const ROLE_B_KEYS: [AccountHash; 3] = [ + AccountHash::new([4; 32]), + AccountHash::new([5; 32]), + AccountHash::new([6; 32]), +]; + +const USER_ERROR_PERMISSION_DENIED: u16 = 0; + +#[ignore] +#[test] +fn should_verify_multisig_authorization_key_roles() { + // Role A tests + assert!( + !test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_A, + &[*DEFAULT_ACCOUNT_ADDR,] + ), + "entrypoint A does not work with identity key" + ); + assert!( + test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_A, + &[*DEFAULT_ACCOUNT_ADDR, ROLE_A_KEYS[0],] + ), + "entrypoint A works with addional role A keys" + ); + assert!( + !test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_A, + &[*DEFAULT_ACCOUNT_ADDR, ROLE_B_KEYS[0],] + ), + "entrypoint A does not allow role B key" + ); + assert!( + !test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_A, + &[ + *DEFAULT_ACCOUNT_ADDR, + ROLE_B_KEYS[2], + ROLE_B_KEYS[1], + ROLE_B_KEYS[0], + ] + ), + "entrypoint A does not allow role B keys" + ); + assert!( + test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_A, + &[ + *DEFAULT_ACCOUNT_ADDR, + ROLE_A_KEYS[2], + ROLE_A_KEYS[1], + ROLE_A_KEYS[0], + ] + ), + "entrypoint A works with all role A keys" + ); + + // Role B tests + assert!( + !test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_B, + &[*DEFAULT_ACCOUNT_ADDR,] + ), + "entrypoint B does not work with identity key" + ); + assert!( + test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_B, + &[*DEFAULT_ACCOUNT_ADDR, ROLE_B_KEYS[0],] + ), + "entrypoint B works with addional role A keys" + ); + assert!( + !test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_B, + &[*DEFAULT_ACCOUNT_ADDR, ROLE_A_KEYS[0],] + ), + "entrypoint B does not allow role B key" + ); + assert!( + !test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_B, + &[ + *DEFAULT_ACCOUNT_ADDR, + ROLE_A_KEYS[2], + ROLE_A_KEYS[1], + ROLE_A_KEYS[0], + ] + ), + "entrypoint B does not allow role B keys" + ); + assert!( + test_multisig_auth( + *DEFAULT_ACCOUNT_ADDR, + ENTRYPOINT_B, + &[ + *DEFAULT_ACCOUNT_ADDR, + ROLE_B_KEYS[2], + ROLE_B_KEYS[1], + ROLE_B_KEYS[0], + ] + ), + "entrypoint B works with all role B keys" + ); +} + +fn test_multisig_auth( + caller: AccountHash, + entry_point: &str, + authorization_keys: &[AccountHash], +) -> bool { + let mut builder = setup(); + let session_args = runtime_args! {}; + let payment_args = runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }; + let deploy_hash = [42; 32]; + let deploy_item = DeployItemBuilder::new() + .with_address(caller) + .with_stored_session_named_key(CONTRACT_KEY, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(authorization_keys) + .with_deploy_hash(deploy_hash) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(exec_request).commit(); + + match builder.get_error() { + Some(Error::Exec(ExecError::Revert(ApiError::User(USER_ERROR_PERMISSION_DENIED)))) => false, + Some(error) => panic!("Unexpected error {:?}", error), + None => { + // Success + true + } + } +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for account in ROLE_A_KEYS.iter().chain(&ROLE_B_KEYS) { + let add_key_request = { + let session_args = runtime_args! { + ARG_ACCOUNT => *account, + ARG_WEIGHT => DEFAULT_WEIGHT, + }; + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_ASSOCIATED_KEY, + session_args, + ) + .build() + }; + + builder.exec(add_key_request).expect_success().commit(); + } + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_MULTISIG_AUTHORIZATION, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(install_request).expect_success().commit(); + + builder +} diff --git a/execution_engine_testing/tests/src/test/contract_api/named_dictionaries.rs b/execution_engine_testing/tests/src/test/contract_api/named_dictionaries.rs new file mode 100644 index 0000000000..41764f61b0 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/named_dictionaries.rs @@ -0,0 +1,33 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::runtime_args; +use rand::{rngs::StdRng, Rng, SeedableRng}; + +#[ignore] +#[test] +fn named_dictionaries_should_work_as_expected() { + // Types from `smart_contracts/contracts/test/named-dictionary-test/src/main.rs`. + type DictIndex = u8; + type KeySeed = u8; + type Value = u8; + + let mut rng = StdRng::seed_from_u64(0); + + let puts: Vec<(DictIndex, KeySeed, Value)> = (0..1_000) + .map(|_| (rng.gen_range(0..9), rng.gen_range(0..20), rng.gen())) + .collect(); + + let builder = &mut LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + builder + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "named-dictionary-test.wasm", + runtime_args! { "puts" => puts }, + ) + .build(), + ) + .expect_success(); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/recover_secp256k1.rs b/execution_engine_testing/tests/src/test/contract_api/recover_secp256k1.rs new file mode 100644 index 0000000000..459d46a8d1 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/recover_secp256k1.rs @@ -0,0 +1,116 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + runtime_args, PublicKey, SecretKey, Signature, +}; + +const RECOVER_SECP256K1_WASM: &str = "recover_secp256k1.wasm"; + +#[ignore] +#[test] +fn should_recover_secp256k1() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_secp256k1().unwrap(); + let public_key = PublicKey::from(&signing_key); + + let (signature, recovery_id) = match signing_key { + SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(), + _ => panic!("PK recovery mechanism only works with Secp256k1 keys"), + }; + + let signature = Signature::Secp256k1(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + let recovery_id = recovery_id.to_byte(); + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + RECOVER_SECP256K1_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "recovery_id" => recovery_id, + "expected" => public_key + }, + ) + .build(), + ) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_fail_recover_secp256k1_recovery_id_out_of_range() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_secp256k1().unwrap(); + let public_key = PublicKey::from(&signing_key); + + let (signature, _) = match signing_key { + SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(), + _ => panic!("PK recovery mechanism only works with Secp256k1 keys"), + }; + + let signature = Signature::Secp256k1(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + let recovery_id = 4; + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + RECOVER_SECP256K1_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "recovery_id" => recovery_id, + "expected" => public_key + }, + ) + .build(), + ) + .expect_failure() + .commit(); +} + +#[ignore] +#[test] +fn should_fail_recover_secp256k1_pk_mismatch() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_secp256k1().unwrap(); + + let (signature, _) = match signing_key { + SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(), + _ => panic!("PK recovery mechanism only works with Secp256k1 keys"), + }; + + let signature = Signature::Secp256k1(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + let recovery_id = 4; + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + RECOVER_SECP256K1_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "recovery_id" => recovery_id, + "expected" => PublicKey::System + }, + ) + .build(), + ) + .expect_failure() + .commit(); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/revert.rs b/execution_engine_testing/tests/src/test/contract_api/revert.rs index c2c19757c7..56ab34bedb 100644 --- a/execution_engine_testing/tests/src/test/contract_api/revert.rs +++ b/execution_engine_testing/tests/src/test/contract_api/revert.rs @@ -1,6 +1,5 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::RuntimeArgs; @@ -12,8 +11,8 @@ fn should_revert() { let exec_request = ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REVERT_WASM, RuntimeArgs::default()) .build(); - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .commit() .is_error(); diff --git a/execution_engine_testing/tests/src/test/contract_api/runtime.rs b/execution_engine_testing/tests/src/test/contract_api/runtime.rs new file mode 100644 index 0000000000..3c24218691 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/runtime.rs @@ -0,0 +1,133 @@ +use std::collections::HashSet; + +use rand::Rng; + +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{ + runtime::{cryptography, cryptography::DIGEST_LENGTH}, + runtime_context::RANDOM_BYTES_COUNT, +}; +use casper_storage::address_generator::ADDRESS_LENGTH; +use casper_types::runtime_args; + +const ARG_BYTES: &str = "bytes"; +const ARG_AMOUNT: &str = "amount"; + +const BLAKE2B_WASM: &str = "blake2b.wasm"; +const HASH_RESULT: &str = "hash_result"; + +const RANDOM_BYTES_WASM: &str = "random_bytes.wasm"; +const RANDOM_BYTES_RESULT: &str = "random_bytes_result"; + +const RANDOM_BYTES_PAYMENT_WASM: &str = "random_bytes_payment.wasm"; +const RANDOM_BYTES_PAYMENT_RESULT: &str = "random_bytes_payment_result"; + +fn get_value(builder: &LmdbWasmTestBuilder, result: &str) -> [u8; COUNT] { + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + + let uref = account.named_keys().get(result).expect("should have value"); + + builder + .query(None, *uref, &[]) + .expect("should query") + .into_cl_value() + .expect("should be CLValue") + .into_t() + .expect("should convert") +} + +#[ignore] +#[test] +fn should_return_different_random_bytes_on_different_phases() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut rng = rand::thread_rng(); + let deploy_hash = rng.gen(); + let address = *DEFAULT_ACCOUNT_ADDR; + let deploy_item = DeployItemBuilder::new() + .with_address(address) + .with_session_code(RANDOM_BYTES_WASM, runtime_args! {}) + .with_payment_code( + RANDOM_BYTES_PAYMENT_WASM, + runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }, + ) + .with_authorization_keys(&[address]) + .with_deploy_hash(deploy_hash) + .build(); + let execute_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(execute_request).commit().expect_success(); + + let session_generated_bytes = get_value::(&builder, RANDOM_BYTES_RESULT); + let payment_generated_bytes = + get_value::(&builder, RANDOM_BYTES_PAYMENT_RESULT); + + assert_ne!(session_generated_bytes, payment_generated_bytes) +} + +#[ignore] +#[test] +fn should_return_different_random_bytes_on_each_call() { + const RUNS: usize = 10; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let all_addresses: HashSet<_> = (0..RUNS) + .map(|_| { + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + RANDOM_BYTES_WASM, + runtime_args! {}, + ) + .build(); + + builder.exec(exec_request).commit().expect_success(); + + get_value::(&builder, RANDOM_BYTES_RESULT) + }) + .collect(); + + // Assert that each address is unique. + assert_eq!(all_addresses.len(), RUNS) +} + +#[ignore] +#[test] +fn should_hash() { + const INPUT_LENGTH: usize = 32; + const RUNS: usize = 100; + + let mut rng = rand::thread_rng(); + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for _ in 0..RUNS { + let input: [u8; INPUT_LENGTH] = rng.gen(); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + BLAKE2B_WASM, + runtime_args! { + ARG_BYTES => input + }, + ) + .build(); + + builder.exec(exec_request).commit().expect_success(); + + let digest = get_value::(&builder, HASH_RESULT); + let expected_digest = cryptography::blake2b(input); + assert_eq!(digest, expected_digest); + } +} diff --git a/execution_engine_testing/tests/src/test/contract_api/subcall.rs b/execution_engine_testing/tests/src/test/contract_api/subcall.rs index 9c4cbe6ee6..2dab61f172 100644 --- a/execution_engine_testing/tests/src/test/contract_api/subcall.rs +++ b/execution_engine_testing/tests/src/test/contract_api/subcall.rs @@ -1,10 +1,9 @@ use num_traits::cast::AsPrimitive; use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::{contracts::CONTRACT_INITIAL_VERSION, runtime_args, RuntimeArgs, U512}; +use casper_types::{runtime_args, RuntimeArgs, StorageCosts, ENTITY_INITIAL_VERSION, U512}; const ARG_TARGET: &str = "target_contract"; const ARG_GAS_AMOUNT: &str = "gas_amount"; @@ -12,7 +11,7 @@ const ARG_METHOD_NAME: &str = "method_name"; #[ignore] #[test] -fn should_charge_gas_for_subcall() { +fn should_enforce_subcall_consumption() { const CONTRACT_NAME: &str = "measure_gas_subcall.wasm"; const DO_NOTHING: &str = "do-nothing"; const DO_SOMETHING: &str = "do-something"; @@ -39,9 +38,9 @@ fn should_charge_gas_for_subcall() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(do_nothing_request).expect_success().commit(); @@ -49,42 +48,41 @@ fn should_charge_gas_for_subcall() { builder.exec(no_subcall_request).expect_success().commit(); - let do_nothing_cost = builder.exec_costs(0)[0]; + let do_nothing_consumed = builder.exec_consumed(0); - let do_something_cost = builder.exec_costs(1)[0]; + let do_something_consumed = builder.exec_consumed(1); - let no_subcall_cost = builder.exec_costs(2)[0]; + let no_subcall_consumed = builder.exec_consumed(2); assert_ne!( - do_nothing_cost, do_something_cost, - "should have different costs" + do_nothing_consumed, do_something_consumed, + "should have different consumeds" ); assert_ne!( - no_subcall_cost, do_something_cost, - "should have different costs" + no_subcall_consumed, do_something_consumed, + "should have different consumeds" ); assert!( - do_nothing_cost < do_something_cost, - "should cost more to do something via subcall" + do_nothing_consumed < do_something_consumed, + "should consume more to do something via subcall" ); assert!( - no_subcall_cost < do_nothing_cost, - "do nothing in a subcall should cost more than no subcall" + no_subcall_consumed < do_nothing_consumed, + "do nothing in a subcall should consume more than no subcall" ); } #[ignore] #[test] -fn should_add_all_gas_for_subcall() { +fn should_add_all_gas_consumed_for_subcall() { const CONTRACT_NAME: &str = "add_gas_subcall.wasm"; const ADD_GAS_FROM_SESSION: &str = "add-gas-from-session"; const ADD_GAS_VIA_SUBCALL: &str = "add-gas-via-subcall"; - // Use 90% of the standard test contract's balance - let gas_to_add: U512 = U512::from(u32::max_value()); + let gas_to_add: U512 = U512::from(1024); let gas_to_add_as_arg: u32 = gas_to_add.as_(); @@ -128,9 +126,9 @@ fn should_add_all_gas_for_subcall() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder .exec(add_zero_gas_from_session_request) @@ -147,30 +145,25 @@ fn should_add_all_gas_for_subcall() { builder .exec(add_some_gas_via_subcall_request) .expect_success() - .commit() - .finish(); - - let add_zero_gas_from_session_cost = builder.exec_costs(0)[0]; - let add_some_gas_from_session_cost = builder.exec_costs(1)[0]; - let add_zero_gas_via_subcall_cost = builder.exec_costs(2)[0]; - let add_some_gas_via_subcall_cost = builder.exec_costs(3)[0]; - - assert!(add_some_gas_from_session_cost.value() > gas_to_add); - assert_eq!( - add_some_gas_from_session_cost.value(), - gas_to_add + add_zero_gas_from_session_cost.value() - ); + .commit(); + + let add_zero_gas_from_session_consumed = builder.exec_consumed(0); + let add_some_gas_from_session_consumed = builder.exec_consumed(1); + let add_zero_gas_via_subcall_consumed = builder.exec_consumed(2); + let add_some_gas_via_subcall_consumed = builder.exec_consumed(3); - assert!(add_some_gas_via_subcall_cost.value() > gas_to_add); - assert_eq!( - add_some_gas_via_subcall_cost.value(), - gas_to_add + add_zero_gas_via_subcall_cost.value() + let expected_gas = U512::from(StorageCosts::default().gas_per_byte()) * gas_to_add; + assert!( + add_zero_gas_from_session_consumed.value() < add_zero_gas_via_subcall_consumed.value(), + "subcall expected to consume more gas due to storing contract" ); + assert!(add_some_gas_from_session_consumed.value() > expected_gas); + assert!(add_some_gas_via_subcall_consumed.value() > expected_gas); } #[ignore] #[test] -fn expensive_subcall_should_cost_more() { +fn expensive_subcall_should_consume_more() { const DO_NOTHING: &str = "do_nothing_stored.wasm"; const EXPENSIVE_CALCULATION: &str = "expensive_calculation.wasm"; const DO_NOTHING_PACKAGE_HASH_KEY_NAME: &str = "do_nothing_package_hash"; @@ -188,10 +181,10 @@ fn expensive_subcall_should_cost_more() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); // store the contracts first - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder .exec(store_do_nothing_request) @@ -201,26 +194,25 @@ fn expensive_subcall_should_cost_more() { builder .exec(store_calculation_request) .expect_success() - .commit() - .finish(); + .commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account"); let expensive_calculation_contract_hash = account .named_keys() .get(EXPENSIVE_CALCULATION_KEY) .expect("should get expensive_calculation contract hash") - .into_hash() + .into_entity_hash() .expect("should get hash"); // execute the contracts via subcalls - let call_do_nothing_request = ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( + let call_do_nothing_request = ExecuteRequestBuilder::versioned_contract_call_by_name( *DEFAULT_ACCOUNT_ADDR, DO_NOTHING_PACKAGE_HASH_KEY_NAME, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), ENTRY_FUNCTION_NAME, RuntimeArgs::new(), ) @@ -228,7 +220,7 @@ fn expensive_subcall_should_cost_more() { let call_expensive_calculation_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, - expensive_calculation_contract_hash.into(), + expensive_calculation_contract_hash, "calculate", RuntimeArgs::default(), ) @@ -244,12 +236,12 @@ fn expensive_subcall_should_cost_more() { .expect_success() .commit(); - let do_nothing_cost = builder.exec_costs(2)[0]; + let do_nothing_consumed = builder.exec_consumed(2); - let expensive_calculation_cost = builder.exec_costs(3)[0]; + let expensive_calculation_consumed = builder.exec_consumed(3); assert!( - do_nothing_cost < expensive_calculation_cost, - "calculation cost should be higher than doing nothing cost" + do_nothing_consumed < expensive_calculation_consumed, + "calculation consumed should be higher than doing nothing consumed" ); } diff --git a/execution_engine_testing/tests/src/test/contract_api/transfer.rs b/execution_engine_testing/tests/src/test/contract_api/transfer.rs index d321afc5f0..0487d26302 100644 --- a/execution_engine_testing/tests/src/test/contract_api/transfer.rs +++ b/execution_engine_testing/tests/src/test/contract_api/transfer.rs @@ -2,34 +2,49 @@ use assert_matches::assert_matches; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, }; -use casper_execution_engine::core::{engine_state::Error as EngineError, execution::Error}; +use casper_execution_engine::{engine_state::Error as EngineError, execution::ExecError}; use casper_types::{ account::AccountHash, runtime_args, system::{handle_payment, mint}, - ApiError, RuntimeArgs, U512, + ApiError, PublicKey, SecretKey, U512, }; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; +const CONTRACT_TRANSFER_TO_PUBLIC_KEY: &str = "transfer_to_public_key.wasm"; +const CONTRACT_TRANSFER_PURSE_TO_PUBLIC_KEY: &str = "transfer_purse_to_public_key.wasm"; +const CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = "transfer_to_named_purse.wasm"; static TRANSFER_1_AMOUNT: Lazy = Lazy::new(|| U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + 1000); static TRANSFER_2_AMOUNT: Lazy = Lazy::new(|| U512::from(750)); static TRANSFER_2_AMOUNT_WITH_ADV: Lazy = Lazy::new(|| *DEFAULT_PAYMENT + *TRANSFER_2_AMOUNT); -static TRANSFER_TOO_MUCH: Lazy = Lazy::new(|| U512::from(u64::max_value())); -static ACCOUNT_1_INITIAL_BALANCE: Lazy = Lazy::new(|| *DEFAULT_PAYMENT); +static TRANSFER_TOO_MUCH: Lazy = Lazy::new(|| U512::from(u64::MAX)); +static ACCOUNT_1_INITIAL_BALANCE: Lazy = + Lazy::new(|| U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE)); + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash()); + +static ACCOUNT_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([210u8; 32]).unwrap()); +static ACCOUNT_2_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY)); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash()); -const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); -const ACCOUNT_2_ADDR: AccountHash = AccountHash::new([2u8; 32]); const ARG_TARGET: &str = "target"; const ARG_AMOUNT: &str = "amount"; +const ARG_SOURCE_PURSE: &str = "source_purse"; +const ARG_PURSE_NAME: &str = "purse_name"; +const TEST_PURSE: &str = "test_purse"; #[ignore] #[test] @@ -37,12 +52,12 @@ fn should_transfer_to_account() { let transfer_amount: U512 = *TRANSFER_1_AMOUNT; // Run genesis - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - let builder = builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account"); let default_account_purse = default_account.main_purse(); @@ -55,7 +70,7 @@ fn should_transfer_to_account() { let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, + runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, ) .build(); @@ -75,13 +90,143 @@ fn should_transfer_to_account() { ); let handle_payment = builder.get_handle_payment_contract(); - let payment_purse = handle_payment + let payment_purse = (*handle_payment .named_keys() .get(handle_payment::PAYMENT_PURSE_KEY) + .unwrap()) + .into_uref() + .unwrap(); + assert_eq!(builder.get_purse_balance(payment_purse), U512::zero()); +} + +#[ignore] +#[test] +fn should_transfer_to_public_key() { + let transfer_amount: U512 = *TRANSFER_1_AMOUNT; + + // Run genesis + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should get account"); + + let default_account_purse = default_account.main_purse(); + + // Check genesis account balance + let initial_account_balance = builder.get_purse_balance(default_account_purse); + + // Exec transfer contract + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_PUBLIC_KEY, + runtime_args! { ARG_TARGET => ACCOUNT_1_PUBLIC_KEY.clone(), ARG_AMOUNT => *TRANSFER_1_AMOUNT }, + ) + .build(); + + let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); + + builder.exec(exec_request_1).expect_success().commit(); + + // Check genesis account balance + + let modified_balance = builder.get_purse_balance(default_account_purse); + + let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; + + assert_eq!( + modified_balance, + initial_account_balance - transaction_fee - transfer_amount + ); + + let handle_payment = builder.get_handle_payment_contract(); + let payment_purse = (*handle_payment + .named_keys() + .get(handle_payment::PAYMENT_PURSE_KEY) + .unwrap()) + .into_uref() + .unwrap(); + assert_eq!(builder.get_purse_balance(payment_purse), U512::zero()); +} + +#[ignore] +#[test] +fn should_transfer_from_purse_to_public_key() { + // Run genesis + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // Create a funded a purse, and store it in named keys + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_NAMED_PURSE, + runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => *TRANSFER_1_AMOUNT, + }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should get account"); + let default_account_purse = default_account.entity().main_purse(); + + // Check genesis account balance + let initial_account_balance = builder.get_purse_balance(default_account_purse); + + let test_purse = default_account + .named_keys() + .get(TEST_PURSE) .unwrap() - .clone() .into_uref() - .unwrap(); + .expect("should have test purse"); + + let test_purse_balanace_before = builder.get_purse_balance(test_purse); + + // Exec transfer contract + let exec_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_PURSE_TO_PUBLIC_KEY, + runtime_args! { + ARG_SOURCE_PURSE => test_purse, + ARG_TARGET => ACCOUNT_1_PUBLIC_KEY.clone(), + ARG_AMOUNT => *TRANSFER_1_AMOUNT, + }, + ) + .build(); + + let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); + + builder.exec(exec_request_2).expect_success().commit(); + + // Check genesis account balance + + let modified_balance = builder.get_purse_balance(default_account_purse); + + let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; + + assert_eq!(modified_balance, initial_account_balance - transaction_fee); + + let test_purse_balanace_after = builder.get_purse_balance(test_purse); + assert_eq!( + test_purse_balanace_after, + test_purse_balanace_before - *TRANSFER_1_AMOUNT + ); + + let handle_payment = builder.get_handle_payment_contract(); + let payment_purse = (*handle_payment + .named_keys() + .get(handle_payment::PAYMENT_PURSE_KEY) + .unwrap()) + .into_uref() + .unwrap(); assert_eq!(builder.get_purse_balance(payment_purse), U512::zero()); } @@ -93,12 +238,12 @@ fn should_transfer_from_account_to_account() { let transfer_2_amount: U512 = *TRANSFER_2_AMOUNT; // Run genesis - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - let builder = builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let builder = builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account"); let default_account_purse = default_account.main_purse(); @@ -113,7 +258,7 @@ fn should_transfer_from_account_to_account() { let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, + runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, ) .build(); @@ -132,7 +277,7 @@ fn should_transfer_from_account_to_account() { // Check account 1 balance let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should have account 1"); let account_1_purse = account_1.main_purse(); let account_1_balance = builder.get_purse_balance(account_1_purse); @@ -142,9 +287,9 @@ fn should_transfer_from_account_to_account() { // Exec transfer 2 contract let exec_request_2 = ExecuteRequestBuilder::standard( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT }, + runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT }, ) .build(); @@ -156,7 +301,7 @@ fn should_transfer_from_account_to_account() { builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2; let account_2 = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("should have account 2"); let account_2_purse = account_2.main_purse(); @@ -183,12 +328,12 @@ fn should_transfer_to_existing_account() { let transfer_2_amount: U512 = *TRANSFER_2_AMOUNT; // Run genesis - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - let builder = builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let builder = builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account"); let default_account_purse = default_account.main_purse(); @@ -203,7 +348,7 @@ fn should_transfer_to_existing_account() { let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, + runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, ) .build(); @@ -214,7 +359,7 @@ fn should_transfer_to_existing_account() { // Exec transfer contract let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account"); let account_1_purse = account_1.main_purse(); @@ -240,9 +385,9 @@ fn should_transfer_to_existing_account() { // Exec transfer contract let exec_request_2 = ExecuteRequestBuilder::standard( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT }, + runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT }, ) .build(); @@ -251,7 +396,7 @@ fn should_transfer_to_existing_account() { builder.exec(exec_request_2).expect_success().commit(); let account_2 = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("should get account"); let account_2_purse = account_2.main_purse(); @@ -283,25 +428,26 @@ fn should_fail_when_insufficient_funds() { let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, + runtime_args! { ARG_TARGET => *ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, ) .build(); let exec_request_2 = ExecuteRequestBuilder::standard( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT_WITH_ADV }, + runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_2_AMOUNT_WITH_ADV }, ) .build(); let exec_request_3 = ExecuteRequestBuilder::standard( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { ARG_TARGET => ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_TOO_MUCH }, + runtime_args! { ARG_TARGET => *ACCOUNT_2_ADDR, ARG_AMOUNT => *TRANSFER_TOO_MUCH }, ) .build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) // Exec transfer contract .exec(exec_request_1) .expect_success() @@ -312,44 +458,117 @@ fn should_fail_when_insufficient_funds() { .commit() // Exec transfer contract .exec(exec_request_3) - .commit() - .finish(); + .commit(); - let exec_results = result - .builder() - .get_exec_result(2) + let exec_result = builder + .get_exec_result_owned(2) .expect("should have exec response"); - assert_eq!(exec_results.len(), 1); - let exec_result = exec_results[0].as_error().expect("should have error"); - let error = assert_matches!(exec_result, EngineError::Exec(Error::Revert(e)) => *e, "{:?}", exec_result); - assert_eq!(error, ApiError::from(mint::Error::InsufficientFunds)); + let exec_result = exec_result.error().expect("should have error"); + let error = assert_matches!(exec_result, EngineError::Exec(ExecError::Revert(e)) => e, "{:?}", exec_result); + assert_eq!(*error, ApiError::from(mint::Error::InsufficientFunds)); } #[ignore] +#[allow(unused)] #[test] fn should_transfer_total_amount() { - let mut builder = InMemoryWasmTestBuilder::default(); + // NOTE: as of protocol version 2.0.0 the execution engine is no longer reponsible + // for payment, refund, or fee handling...thus + // full transactions executed via the node are subject to payment, fee, refund, + // etc based upon chainspec settings, but when using the EE directly as is done + // in this test, there is no charge and all transfers are at face value. + fn balance_checker(bldr: &mut LmdbWasmTestBuilder, account_hash: AccountHash) -> U512 { + let entity = bldr + .get_entity_by_account_hash(account_hash) + .expect("should have account entity"); + let entity_main_purse = entity.main_purse(); + bldr.get_purse_balance(entity_main_purse) + } + fn commit(bldr: &mut LmdbWasmTestBuilder, req_bldr: ExecuteRequestBuilder) { + let req = req_bldr.build(); + bldr.exec(req).expect_success().commit(); + } + fn genesis() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + builder + } + + let mut builder = genesis(); + + let balance_x_initial = balance_checker(&mut builder, *DEFAULT_ACCOUNT_ADDR); + let amount_to_fund = *ACCOUNT_1_INITIAL_BALANCE; + + // fund account 1 from default account + commit( + &mut builder, + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, + runtime_args! { "target" => *ACCOUNT_1_ADDR, "amount" => amount_to_fund }, + ), + ); + let balance_x_out = balance_checker(&mut builder, *DEFAULT_ACCOUNT_ADDR); + assert_eq!( + balance_x_initial - amount_to_fund, + balance_x_out, + "funded amount should be deducted from funder's balance" + ); + let balance_y_initial = balance_checker(&mut builder, *ACCOUNT_1_ADDR); + assert_eq!( + amount_to_fund, balance_y_initial, + "receiving account's balance should match funding amount" + ); + let diff = balance_x_initial - balance_y_initial; + assert_eq!( + diff, balance_x_out, + "funder's balance difference should equal funded amount" + ); - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, - runtime_args! { "target" => ACCOUNT_1_ADDR, "amount" => *ACCOUNT_1_INITIAL_BALANCE }, - ) - .build(); + // transfer it to a different account + commit( + &mut builder, + ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, + runtime_args! { "target" => *ACCOUNT_2_ADDR, "amount" => balance_y_initial }, + ), + ); + let balance_y_out = balance_checker(&mut builder, *ACCOUNT_1_ADDR); + assert_eq!( + balance_y_initial - amount_to_fund, + balance_y_out, + "funded amount should be deducted from funder's balance" + ); + let balance_z_initial = balance_checker(&mut builder, *ACCOUNT_2_ADDR); + assert_eq!( + amount_to_fund, balance_z_initial, + "receiving account's balance should match funding amount" + ); + let diff = balance_y_initial - balance_z_initial; + assert_eq!( + diff, balance_y_out, + "funder's balance difference should equal funded amount" + ); - let exec_request_2 = ExecuteRequestBuilder::standard( - ACCOUNT_1_ADDR, - CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, - runtime_args! { "target" => ACCOUNT_2_ADDR, "amount" => *ACCOUNT_1_INITIAL_BALANCE }, - ) - .build(); - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit() - .exec(exec_request_2) - .commit() - .expect_success() - .finish(); + // transfer it back to originator + commit( + &mut builder, + ExecuteRequestBuilder::standard( + *ACCOUNT_2_ADDR, + CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, + runtime_args! { "target" => *DEFAULT_ACCOUNT_ADDR, "amount" => balance_z_initial }, + ), + ); + let balance_x_in = balance_checker(&mut builder, *DEFAULT_ACCOUNT_ADDR); + let balance_z_out = balance_checker(&mut builder, *ACCOUNT_2_ADDR); + assert_eq!( + U512::zero(), + balance_z_out, + "trampoline account should be zero'd" + ); + assert_eq!( + balance_x_initial, balance_x_in, + "original balance should be restored" + ); } diff --git a/execution_engine_testing/tests/src/test/contract_api/transfer_cached.rs b/execution_engine_testing/tests/src/test/contract_api/transfer_cached.rs new file mode 100644 index 0000000000..ece72ef170 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/transfer_cached.rs @@ -0,0 +1,114 @@ +use once_cell::sync::Lazy; +use tempfile::TempDir; + +use casper_engine_test_support::{ + LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{account::AccountHash, MintCosts, PublicKey, SecretKey, U512}; + +/// The maximum amount of motes that payment code execution can cost. +const TRANSFER_MOTES_AMOUNT: u64 = 2_500_000_000; + +static TRANSFER_AMOUNT: Lazy = Lazy::new(|| U512::from(TRANSFER_MOTES_AMOUNT)); + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash()); + +static ACCOUNT_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([210u8; 32]).unwrap()); +static ACCOUNT_2_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY)); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash()); + +#[ignore] +#[test] +fn should_transfer_to_account() { + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new(data_dir.path()); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let pre_state_hash = builder.get_post_state_hash(); + + // Default account to account 1 + let transfer_request = TransferRequestBuilder::new(1, *ACCOUNT_1_ADDR).build(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + assert_ne!( + pre_state_hash, + builder.get_post_state_hash(), + "post state hash didn't change..." + ); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should get default account"); + + let account1 = builder + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should get account 1"); + + let default_account_balance = builder.get_purse_balance(default_account.main_purse()); + let default_expected_balance = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - (U512::one()); + assert_eq!( + default_account_balance, default_expected_balance, + "default account balance should reflect the transfer", + ); + + let account_1_balance = builder.get_purse_balance(account1.main_purse()); + assert_eq!( + account_1_balance, + U512::one(), + "account 1 balance should have been exactly one (1)" + ); +} + +#[ignore] +#[test] +fn should_transfer_multiple_times() { + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new(data_dir.path()); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let pre_state_hash = builder.get_post_state_hash(); + + // Default account to account 1 + // We must first transfer the amount account 1 will transfer to account 2, along with the fee + // account 1 will need to pay for that transfer. + let transfer_request = TransferRequestBuilder::new( + *TRANSFER_AMOUNT + MintCosts::default().transfer, + *ACCOUNT_1_ADDR, + ) + .build(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT, *ACCOUNT_2_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) + .build(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + // Double spend test for account 1 + let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT, *ACCOUNT_2_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) + .build(); + builder + .transfer_and_commit(transfer_request) + .expect_failure(); + + assert_ne!( + pre_state_hash, + builder.get_post_state_hash(), + "post state hash didn't change..." + ); +} diff --git a/execution_engine_testing/tests/src/test/contract_api/transfer_purse_to_account.rs b/execution_engine_testing/tests/src/test/contract_api/transfer_purse_to_account.rs deleted file mode 100644 index bbdfb38747..0000000000 --- a/execution_engine_testing/tests/src/test/contract_api/transfer_purse_to_account.rs +++ /dev/null @@ -1,121 +0,0 @@ -use std::convert::TryFrom; - -use once_cell::sync::Lazy; - -use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, -}; -use casper_types::{ - account::AccountHash, - runtime_args, - system::{ - auction::ARG_AMOUNT, - mint::{self, ARG_TARGET}, - }, - ApiError, CLValue, RuntimeArgs, U512, -}; - -const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; -const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]); - -static ACCOUNT_1_INITIAL_FUND: Lazy = Lazy::new(|| *DEFAULT_PAYMENT + 42); - -#[ignore] -#[test] -fn should_run_purse_to_account_transfer() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let account_1_account_hash = ACCOUNT_1_ADDR; - assert!( - builder.get_account(account_1_account_hash).is_none(), - "new account shouldn't exist yet" - ); - - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => *ACCOUNT_1_INITIAL_FUND }, - ) - .build(); - - builder.exec(exec_request_1).expect_success().commit(); - - let new_account = builder - .get_account(account_1_account_hash) - .expect("new account should exist now"); - - let balance = builder.get_purse_balance(new_account.main_purse()); - - assert_eq!( - balance, *ACCOUNT_1_INITIAL_FUND, - "balance should equal transferred amount" - ); -} - -#[ignore] -#[test] -fn should_fail_when_sending_too_much_from_purse_to_account() { - let account_1_key = ACCOUNT_1_ADDR; - - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, - runtime_args! { "target" => account_1_key, "amount" => U512::max_value() }, - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit() - .finish(); - - // Get transforms output for genesis account - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - - // Obtain main purse's balance - let final_balance_key = default_account.named_keys()["final_balance"].normalize(); - let final_balance = CLValue::try_from( - builder - .query(None, final_balance_key, &[]) - .expect("should have final balance"), - ) - .expect("should be a CLValue") - .into_t::() - .expect("should be U512"); - // When trying to send too much coins the balance is left unchanged - assert_eq!( - final_balance, - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - *DEFAULT_PAYMENT, - "final balance incorrect" - ); - - // Get the `transfer_result` for a given account - let transfer_result_key = default_account.named_keys()["transfer_result"].normalize(); - let transfer_result = CLValue::try_from( - builder - .query(None, transfer_result_key, &[]) - .expect("should have transfer result"), - ) - .expect("should be a CLValue") - .into_t::() - .expect("should be String"); - - // Main assertion for the result of `transfer_from_purse_to_purse` - let expected_error: ApiError = mint::Error::InsufficientFunds.into(); - assert_eq!( - transfer_result, - format!("{:?}", Result::<(), _>::Err(expected_error)), - "Transfer Error incorrect" - ); -} diff --git a/execution_engine_testing/tests/src/test/contract_api/transfer_purse_to_purse.rs b/execution_engine_testing/tests/src/test/contract_api/transfer_purse_to_purse.rs deleted file mode 100644 index e81a86b7a0..0000000000 --- a/execution_engine_testing/tests/src/test/contract_api/transfer_purse_to_purse.rs +++ /dev/null @@ -1,156 +0,0 @@ -use std::convert::TryFrom; - -use casper_types::{runtime_args, system::mint, ApiError, CLValue, RuntimeArgs, U512}; - -use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, -}; - -const CONTRACT_TRANSFER_PURSE_TO_PURSE: &str = "transfer_purse_to_purse.wasm"; -const PURSE_TO_PURSE_AMOUNT: u64 = 42; -const ARG_SOURCE: &str = "source"; -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; - -#[ignore] -#[test] -fn should_run_purse_to_purse_transfer() { - let source = "purse:main".to_string(); - let target = "purse:secondary".to_string(); - - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_PURSE_TO_PURSE, - runtime_args! { - ARG_SOURCE => source, - ARG_TARGET => target, - ARG_AMOUNT => U512::from(PURSE_TO_PURSE_AMOUNT) - }, - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit() - .finish(); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - - // Get the `purse_transfer_result` for a given - let purse_transfer_result_key = - default_account.named_keys()["purse_transfer_result"].normalize(); - let purse_transfer_result = CLValue::try_from( - builder - .query(None, purse_transfer_result_key, &[]) - .expect("should have purse transfer result"), - ) - .expect("should be a CLValue") - .into_t::() - .expect("should be String"); - // Main assertion for the result of `transfer_from_purse_to_purse` - assert_eq!( - purse_transfer_result, - format!("{:?}", Result::<_, ApiError>::Ok(()),) - ); - - let main_purse_balance_key = default_account.named_keys()["main_purse_balance"].normalize(); - let main_purse_balance = CLValue::try_from( - builder - .query(None, main_purse_balance_key, &[]) - .expect("should have main purse balance"), - ) - .expect("should be a CLValue") - .into_t::() - .expect("should be U512"); - - // Assert secondary purse value after successful transfer - let purse_secondary_key = default_account.named_keys()["purse:secondary"]; - let purse_secondary_uref = purse_secondary_key.into_uref().unwrap(); - let purse_secondary_balance = builder.get_purse_balance(purse_secondary_uref); - - // Final balance of the destination purse - assert_eq!(purse_secondary_balance, U512::from(PURSE_TO_PURSE_AMOUNT)); - assert_eq!( - main_purse_balance, - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - *DEFAULT_PAYMENT - PURSE_TO_PURSE_AMOUNT - ); -} - -#[ignore] -#[test] -fn should_run_purse_to_purse_transfer_with_error() { - // This test runs a contract that's after every call extends the same key with - // more data - let source = "purse:main".to_string(); - let target = "purse:secondary".to_string(); - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_PURSE_TO_PURSE, - runtime_args! { ARG_SOURCE => source, ARG_TARGET => target, ARG_AMOUNT => U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE + 1) }, - ) - .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit() - .finish(); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - - // Get the `purse_transfer_result` for a given - let purse_transfer_result_key = - default_account.named_keys()["purse_transfer_result"].normalize(); - let purse_transfer_result = CLValue::try_from( - builder - .query(None, purse_transfer_result_key, &[]) - .expect("should have purse transfer result"), - ) - .expect("should be a CLValue") - .into_t::() - .expect("should be String"); - // Main assertion for the result of `transfer_from_purse_to_purse` - assert_eq!( - purse_transfer_result, - format!( - "{:?}", - Result::<(), ApiError>::Err(mint::Error::InsufficientFunds.into()) - ), - ); - - // Obtain main purse's balance - let main_purse_balance_key = default_account.named_keys()["main_purse_balance"].normalize(); - let main_purse_balance = CLValue::try_from( - builder - .query(None, main_purse_balance_key, &[]) - .expect("should have main purse balance"), - ) - .expect("should be a CLValue") - .into_t::() - .expect("should be U512"); - - // Assert secondary purse value after successful transfer - let purse_secondary_key = default_account.named_keys()["purse:secondary"]; - let purse_secondary_uref = purse_secondary_key.into_uref().unwrap(); - let purse_secondary_balance = builder.get_purse_balance(purse_secondary_uref); - - // Final balance of the destination purse equals to 0 as this purse is created - // as new. - assert_eq!(purse_secondary_balance, U512::from(0)); - assert_eq!( - main_purse_balance, - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - *DEFAULT_PAYMENT - ); -} diff --git a/execution_engine_testing/tests/src/test/contract_api/transfer_stored.rs b/execution_engine_testing/tests/src/test/contract_api/transfer_stored.rs deleted file mode 100644 index 32610d0478..0000000000 --- a/execution_engine_testing/tests/src/test/contract_api/transfer_stored.rs +++ /dev/null @@ -1,101 +0,0 @@ -use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNT_KEY, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, -}; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; - -const CONTRACT_TRANSFER_TO_ACCOUNT_NAME: &str = "transfer_to_account"; -const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); -const TRANSFER_ENTRYPOINT: &str = "transfer"; -const ARG_AMOUNT: &str = "amount"; -const ARG_TARGET: &str = "target"; - -#[ignore] -#[test] -fn should_transfer_to_account_stored() { - let mut builder = InMemoryWasmTestBuilder::default(); - // first, store transfer contract - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - &format!("{}_stored.wasm", CONTRACT_TRANSFER_TO_ACCOUNT_NAME), - RuntimeArgs::default(), - ) - .build(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let proposer_reward_starting_balance_alpha = builder.get_proposer_purse_balance(); - - builder.exec_commit_finish(exec_request); - - let transaction_fee_alpha = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_alpha; - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - - let contract_hash = default_account - .named_keys() - .get(CONTRACT_TRANSFER_TO_ACCOUNT_NAME) - .expect("contract_hash should exist") - .into_hash() - .expect("should be a hash"); - - let modified_balance_alpha: U512 = builder.get_purse_balance(default_account.main_purse()); - - let transferred_amount: u64 = 1; - let payment_purse_amount = *DEFAULT_PAYMENT; - - // next make another deploy that USES stored payment logic - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_hash( - contract_hash.into(), - TRANSFER_ENTRYPOINT, - runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => transferred_amount }, - ) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let proposer_reward_starting_balance_bravo = builder.get_proposer_purse_balance(); - - builder.exec_commit_finish(exec_request); - - let modified_balance_bravo: U512 = builder.get_purse_balance(default_account.main_purse()); - - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - - let transaction_fee_bravo = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_bravo; - - let tally = transaction_fee_alpha - + U512::from(transferred_amount) - + transaction_fee_bravo - + modified_balance_bravo; - - assert!( - modified_balance_alpha < initial_balance, - "balance should be less than initial balance" - ); - - assert!( - modified_balance_bravo < modified_balance_alpha, - "second modified balance should be less than first modified balance" - ); - - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); -} diff --git a/execution_engine_testing/tests/src/test/contract_api/transfer_u512_stored.rs b/execution_engine_testing/tests/src/test/contract_api/transfer_u512_stored.rs deleted file mode 100644 index bac9a45cde..0000000000 --- a/execution_engine_testing/tests/src/test/contract_api/transfer_u512_stored.rs +++ /dev/null @@ -1,102 +0,0 @@ -use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNT_KEY, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, -}; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; - -const FUNCTION_NAME: &str = "transfer"; -const CONTRACT_KEY_NAME: &str = "transfer_to_account"; -const CONTRACT_TRANSFER_TO_ACCOUNT_NAME: &str = "transfer_to_account_u512"; -const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); -const TRANSFER_AMOUNT: u64 = 1; -const ARG_AMOUNT: &str = "amount"; -const ARG_TARGET: &str = "target"; - -#[ignore] -#[test] -fn should_transfer_to_account_stored() { - let mut builder = InMemoryWasmTestBuilder::default(); - - // first, store transfer contract - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - &format!("{}_stored.wasm", CONTRACT_TRANSFER_TO_ACCOUNT_NAME), - RuntimeArgs::default(), - ) - .build(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let proposer_reward_starting_balance_alpha = builder.get_proposer_purse_balance(); - - builder.exec_commit_finish(exec_request); - - let transaction_fee_alpha = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_alpha; - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - - let contract_hash = default_account - .named_keys() - .get(CONTRACT_KEY_NAME) - .expect("contract_hash should exist") - .into_hash() - .expect("should be a hash"); - - let modified_balance_alpha: U512 = builder.get_purse_balance(default_account.main_purse()); - - let transferred_amount: U512 = U512::from(TRANSFER_AMOUNT); - let payment_purse_amount = *DEFAULT_PAYMENT; - - // next make another deploy that USES stored payment logic - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_hash( - contract_hash.into(), - FUNCTION_NAME, - runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => transferred_amount }, - ) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let proposer_reward_starting_balance_alpha = builder.get_proposer_purse_balance(); - - builder.exec_commit_finish(exec_request); - - let transaction_fee_bravo = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_alpha; - - let modified_balance_bravo: U512 = builder.get_purse_balance(default_account.main_purse()); - - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - - let tally = - transaction_fee_alpha + transaction_fee_bravo + transferred_amount + modified_balance_bravo; - - assert!( - modified_balance_alpha < initial_balance, - "balance should be less than initial balance" - ); - - assert!( - modified_balance_bravo < modified_balance_alpha, - "second modified balance should be less than first modified balance" - ); - - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); -} diff --git a/execution_engine_testing/tests/src/test/contract_api/verify_signature.rs b/execution_engine_testing/tests/src/test/contract_api/verify_signature.rs new file mode 100644 index 0000000000..0f8c2b9a1a --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_api/verify_signature.rs @@ -0,0 +1,146 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + runtime_args, PublicKey, SecretKey, Signature, +}; +use ed25519_dalek::Signer; + +const VERIFY_SIGNATURE_WASM: &str = "verify_signature.wasm"; + +#[ignore] +#[test] +fn should_verify_secp256k1_signature() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_secp256k1().unwrap(); + let public_key = PublicKey::from(&signing_key); + + let (signature, _) = match signing_key { + SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(), + _ => panic!("Expected a Secp256k1 key"), + }; + + let signature = Signature::Secp256k1(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + VERIFY_SIGNATURE_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "public_key" => public_key, + }, + ) + .build(), + ) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_verify_ed25519_signature() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_ed25519().unwrap(); + let public_key = PublicKey::from(&signing_key); + + let signature = match signing_key { + SecretKey::Ed25519(signing_key) => signing_key.sign(message_bytes), + _ => panic!("Expected an Ed25519 key"), + }; + + let signature = Signature::Ed25519(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + VERIFY_SIGNATURE_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "public_key" => public_key, + }, + ) + .build(), + ) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_fail_verify_secp256k1_signature() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_secp256k1().unwrap(); + let unrelated_key = PublicKey::from(&SecretKey::generate_secp256k1().unwrap()); + + let (signature, _) = match signing_key { + SecretKey::Secp256k1(signing_key) => signing_key.sign_recoverable(message_bytes).unwrap(), + _ => panic!("Expected a Secp256k1 key"), + }; + + let signature = Signature::Secp256k1(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + VERIFY_SIGNATURE_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "public_key" => unrelated_key, + }, + ) + .build(), + ) + .expect_failure() + .commit(); +} + +#[ignore] +#[test] +fn should_fail_verify_ed25519_signature() { + let message = String::from("Recovery test"); + let message_bytes = message.as_bytes(); + let signing_key = SecretKey::generate_ed25519().unwrap(); + let unrelated_key = PublicKey::from(&SecretKey::generate_ed25519().unwrap()); + + let signature = match signing_key { + SecretKey::Ed25519(signing_key) => signing_key.sign(message_bytes), + _ => panic!("Expected an Ed25519 key"), + }; + + let signature = Signature::Ed25519(signature); + let signature_bytes: Bytes = signature.to_bytes().unwrap().into(); + + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec( + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + VERIFY_SIGNATURE_WASM, + runtime_args! { + "message" => message, + "signature_bytes" => signature_bytes, + "public_key" => unrelated_key, + }, + ) + .build(), + ) + .expect_failure() + .commit(); +} diff --git a/execution_engine_testing/tests/src/test/contract_context.rs b/execution_engine_testing/tests/src/test/contract_context.rs index 28a4b0000d..d39974d1ff 100644 --- a/execution_engine_testing/tests/src/test/contract_context.rs +++ b/execution_engine_testing/tests/src/test/contract_context.rs @@ -1,24 +1,18 @@ -use assert_matches::assert_matches; use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::core::{engine_state::Error, execution}; -use casper_types::{contracts::CONTRACT_INITIAL_VERSION, runtime_args, Key, RuntimeArgs}; + +use casper_types::{runtime_args, Key, RuntimeArgs, ENTITY_INITIAL_VERSION}; const CONTRACT_HEADERS: &str = "contract_context.wasm"; const PACKAGE_HASH_KEY: &str = "package_hash_key"; const PACKAGE_ACCESS_KEY: &str = "package_access_key"; const CONTRACT_HASH_KEY: &str = "contract_hash_key"; -const SESSION_CODE_TEST: &str = "session_code_test"; + const CONTRACT_CODE_TEST: &str = "contract_code_test"; -const ADD_NEW_KEY_AS_SESSION: &str = "add_new_key_as_session"; + const NEW_KEY: &str = "new_key"; -const SESSION_CODE_CALLER_AS_CONTRACT: &str = "session_code_caller_as_contract"; -const ARG_AMOUNT: &str = "amount"; + const CONTRACT_VERSION: &str = "contract_version"; #[ignore] @@ -32,77 +26,26 @@ fn should_enforce_intended_execution_contexts() { ) .build(); - let exec_request_2 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - SESSION_CODE_TEST, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_3 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - CONTRACT_CODE_TEST, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_4 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - ADD_NEW_KEY_AS_SESSION, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([4; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_KEY, + Some(ENTITY_INITIAL_VERSION), + CONTRACT_CODE_TEST, + runtime_args! {}, + ) + .build(); - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.exec(exec_request_2).expect_success().commit(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.exec(exec_request_3).expect_success().commit(); + builder.exec(exec_request_1).expect_success().commit(); - builder.exec(exec_request_4).expect_success().commit(); + builder.exec(exec_request_3).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let _package_hash = account .named_keys() @@ -113,10 +56,7 @@ fn should_enforce_intended_execution_contexts() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let _new_key = account - .named_keys() - .get(NEW_KEY) - .expect("new key should be there"); + assert!(account.named_keys().get(NEW_KEY).is_none()); // Check version @@ -144,62 +84,25 @@ fn should_enforce_intended_execution_context_direct_by_name() { ) .build(); - let exec_request_2 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key(CONTRACT_HASH_KEY, SESSION_CODE_TEST, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_3 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key(CONTRACT_HASH_KEY, CONTRACT_CODE_TEST, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_4 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key(CONTRACT_HASH_KEY, ADD_NEW_KEY_AS_SESSION, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([4; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let exec_request_3 = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_KEY, + CONTRACT_CODE_TEST, + runtime_args! {}, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); - builder.exec(exec_request_1).expect_success().commit(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.exec(exec_request_2).expect_success().commit(); + builder.exec(exec_request_1).expect_success().commit(); builder.exec(exec_request_3).expect_success().commit(); - builder.exec(exec_request_4).expect_success().commit(); - let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let _package_hash = account .named_keys() @@ -210,10 +113,7 @@ fn should_enforce_intended_execution_context_direct_by_name() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let _new_key = account - .named_keys() - .get(NEW_KEY) - .expect("new key should be there"); + assert!(account.named_keys().get(NEW_KEY).is_none()); } #[ignore] @@ -227,77 +127,37 @@ fn should_enforce_intended_execution_context_direct_by_hash() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); let contract_hash = account .named_keys() .get(CONTRACT_HASH_KEY) .expect("should have contract hash") - .into_hash() - .expect("should have hash"); - - let exec_request_2 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_hash(contract_hash.into(), SESSION_CODE_TEST, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_3 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_hash(contract_hash.into(), CONTRACT_CODE_TEST, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_4 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_hash(contract_hash.into(), ADD_NEW_KEY_AS_SESSION, args) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([4; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - builder.exec(exec_request_2).expect_success().commit(); + .into_entity_hash(); - builder.exec(exec_request_3).expect_success().commit(); + let contract_hash = contract_hash.unwrap(); + + let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + CONTRACT_CODE_TEST, + runtime_args! {}, + ) + .build(); - builder.exec(exec_request_4).expect_success().commit(); + builder.exec(exec_request_3).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let _package_hash = account .named_keys() @@ -308,70 +168,5 @@ fn should_enforce_intended_execution_context_direct_by_hash() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let _new_key = account - .named_keys() - .get(NEW_KEY) - .expect("new key should be there"); -} - -#[ignore] -#[test] -fn should_not_call_session_from_contract() { - // This test runs a contract that extends the same key with more data after every call. - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_HEADERS, - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); - - let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); - - let contract_package_hash = account - .named_keys() - .get(PACKAGE_HASH_KEY) - .cloned() - .expect("should have contract package"); - - let exec_request_2 = { - let args = runtime_args! { - PACKAGE_HASH_KEY => contract_package_hash, - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - SESSION_CODE_CALLER_AS_CONTRACT, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - builder.exec(exec_request_2).commit(); - - let response = builder - .get_exec_results() - .last() - .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); + assert!(account.named_keys().get(NEW_KEY).is_none()) } diff --git a/execution_engine_testing/tests/src/test/contract_headers.rs b/execution_engine_testing/tests/src/test/contract_headers.rs deleted file mode 100644 index d4db6b34bf..0000000000 --- a/execution_engine_testing/tests/src/test/contract_headers.rs +++ /dev/null @@ -1,125 +0,0 @@ -use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_types::{runtime_args, Key, RuntimeArgs, SemVer}; - -const CONTRACT_HEADERS: &str = "contract_headers.wasm"; -const PACKAGE_HASH_KEY: &str = "package_hash_key"; -const PACKAGE_ACCESS_KEY: &str = "package_access_key"; -const STEP_1: i32 = 5; -const STEP_2: i32 = 6; -const STEP_3: i32 = 42; - -#[ignore] -#[test] -fn should_enforce_intended_execution_contexts() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_HEADERS, - RuntimeArgs::default(), - ) - .build(); - - let exec_request_2 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - SemVer::V1_0_0, - "session_code_test", - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_3 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - SemVer::V1_0_0, - "contract_code_test", - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let exec_request_4 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - SemVer::V1_0_0, - "add_new_key_as_session", - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([4; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); - - builder.exec(exec_request_2).expect_success().commit(); - - builder.exec(exec_request_3).expect_success().commit(); - - builder.exec(exec_request_4).expect_success().commit(); - - let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); - - let package_hash = account - .named_keys() - .get(PACKAGE_HASH_KEY) - .expect("should have contract package"); - let access_uref = account - .named_keys() - .get(PACKAGE_ACCESS_KEY) - .expect("should have package hash"); - - let _foo = builder - .get_exec_result(3) - .expect("should have exec response"); - - let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); - - let new_key = account - .named_keys() - .get("new_key") - .expect("new key should be there"); -} diff --git a/execution_engine_testing/tests/src/test/contract_messages.rs b/execution_engine_testing/tests/src/test/contract_messages.rs new file mode 100644 index 0000000000..d37ab1c835 --- /dev/null +++ b/execution_engine_testing/tests/src/test/contract_messages.rs @@ -0,0 +1,1224 @@ +use num_traits::Zero; +use std::cell::RefCell; + +use casper_execution_engine::runtime::cryptography; + +use casper_engine_test_support::{ + ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_BLOCK_TIME, LOCAL_GENESIS_REQUEST, +}; + +use casper_types::{ + addressable_entity::MessageTopics, + bytesrepr::ToBytes, + contract_messages::{MessageChecksum, MessagePayload, MessageTopicSummary, TopicNameHash}, + runtime_args, AddressableEntityHash, BlockGlobalAddr, BlockTime, CLValue, CoreConfig, Digest, + EntityAddr, HostFunction, HostFunctionCostsV1, HostFunctionCostsV2, Key, MessageLimits, + OpcodeCosts, RuntimeArgs, StorageCosts, StoredValue, SystemConfig, WasmConfig, WasmV1Config, + WasmV2Config, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY, U512, +}; + +const MESSAGE_EMITTER_INSTALLER_WASM: &str = "contract_messages_emitter.wasm"; +const MESSAGE_EMITTER_UPGRADER_WASM: &str = "contract_messages_upgrader.wasm"; +const MESSAGE_EMITTER_FROM_ACCOUNT: &str = "contract_messages_from_account.wasm"; +const MESSAGE_EMITTER_PACKAGE_HASH_KEY_NAME: &str = "messages_emitter_package_hash"; +const MESSAGE_EMITTER_GENERIC_TOPIC: &str = "generic_messages"; +const MESSAGE_EMITTER_UPGRADED_TOPIC: &str = "new_topic_after_upgrade"; +const ENTRY_POINT_EMIT_MESSAGE: &str = "emit_message"; +const ENTRY_POINT_EMIT_MULTIPLE_MESSAGES: &str = "emit_multiple_messages"; +const ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION: &str = "emit_message_from_each_version"; +const ARG_NUM_MESSAGES_TO_EMIT: &str = "num_messages_to_emit"; +const ARG_TOPIC_NAME: &str = "topic_name"; +const ENTRY_POINT_ADD_TOPIC: &str = "add_topic"; +const ARG_MESSAGE_SUFFIX_NAME: &str = "message_suffix"; +const ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT: &str = "register_default_topic_with_init"; + +const EMITTER_MESSAGE_PREFIX: &str = "generic message: "; + +// Number of messages that will be emitted when calling `ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION` +const EMIT_MESSAGE_FROM_EACH_VERSION_NUM_MESSAGES: u32 = 3; + +fn install_messages_emitter_contract( + builder: &RefCell, + use_initializer: bool, +) -> AddressableEntityHash { + // Request to install the contract that will be emitting messages. + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + MESSAGE_EMITTER_INSTALLER_WASM, + runtime_args! { + ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT => use_initializer, + }, + ) + .build(); + + // Execute the request to install the message emitting contract. + // This will also register a topic for the contract to emit messages on. + builder + .borrow_mut() + .exec(install_request) + .expect_success() + .commit(); + + // Get the contract package for the messages_emitter. + let query_result = builder + .borrow_mut() + .query( + None, + Key::from(*DEFAULT_ACCOUNT_ADDR), + &[MESSAGE_EMITTER_PACKAGE_HASH_KEY_NAME.into()], + ) + .expect("should query"); + + let message_emitter_package = if let StoredValue::ContractPackage(package) = query_result { + package + } else { + panic!("Stored value is not a contract package: {:?}", query_result); + }; + + // Get the contract hash of the messages_emitter contract. + message_emitter_package + .versions() + .values() + .last() + .map(|contract_hash| AddressableEntityHash::new(contract_hash.value())) + .expect("Should have contract hash") +} + +fn upgrade_messages_emitter_contract( + builder: &RefCell, + use_initializer: bool, + expect_failure: bool, +) -> AddressableEntityHash { + let upgrade_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + MESSAGE_EMITTER_UPGRADER_WASM, + runtime_args! { + ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT => use_initializer, + }, + ) + .build(); + + // let new_topics = BTreeMap::from([( + // MESSAGE_EMITTER_GENERIC_TOPIC.to_string(), + // MessageTopicOperation::Add, + // )]); + + // println!("{}", new_topics.into_bytes().unwrap().len()); + + // Execute the request to upgrade the message emitting contract. + // This will also register a new topic for the contract to emit messages on. + if expect_failure { + builder + .borrow_mut() + .exec(upgrade_request) + .expect_failure() + .commit(); + } else { + builder + .borrow_mut() + .exec(upgrade_request) + .expect_success() + .commit(); + } + + // Get the contract package for the upgraded messages emitter contract. + let query_result = builder + .borrow_mut() + .query( + None, + Key::from(*DEFAULT_ACCOUNT_ADDR), + &[MESSAGE_EMITTER_PACKAGE_HASH_KEY_NAME.into()], + ) + .expect("should query"); + + let message_emitter_package = if let StoredValue::ContractPackage(package) = query_result { + package + } else { + panic!("Stored value is not a contract package: {:?}", query_result); + }; + + // Get the contract hash of the latest version of the messages emitter contract. + message_emitter_package + .versions() + .values() + .last() + .map(|contract_hash| AddressableEntityHash::new(contract_hash.value())) + .expect("Should have contract hash") +} + +fn emit_message_with_suffix( + builder: &RefCell, + suffix: &str, + contract_hash: &AddressableEntityHash, + block_time: u64, +) { + let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + *contract_hash, + ENTRY_POINT_EMIT_MESSAGE, + runtime_args! { + ARG_MESSAGE_SUFFIX_NAME => suffix, + }, + ) + .with_block_time(block_time) + .build(); + + builder + .borrow_mut() + .exec(emit_message_request) + .expect_success() + .commit(); +} + +struct ContractQueryView<'a> { + builder: &'a RefCell, + contract_hash: AddressableEntityHash, +} + +impl<'a> ContractQueryView<'a> { + fn new( + builder: &'a RefCell, + contract_hash: AddressableEntityHash, + ) -> Self { + Self { + builder, + contract_hash, + } + } + + fn message_topics(&self) -> MessageTopics { + let message_topics_result = self + .builder + .borrow_mut() + .message_topics(None, EntityAddr::SmartContract(self.contract_hash.value())) + .expect("must get message topics"); + + message_topics_result + } + + fn message_topic(&self, topic_name_hash: TopicNameHash) -> MessageTopicSummary { + let query_result = self + .builder + .borrow_mut() + .query( + None, + Key::message_topic( + EntityAddr::SmartContract(self.contract_hash.value()), + topic_name_hash, + ), + &[], + ) + .expect("should query"); + + match query_result { + StoredValue::MessageTopic(summary) => summary, + _ => { + panic!( + "Stored value is not a message topic summary: {:?}", + query_result + ); + } + } + } + + fn message_summary( + &self, + topic_name_hash: TopicNameHash, + message_index: u32, + state_hash: Option, + ) -> Result { + let query_result = self.builder.borrow_mut().query( + state_hash, + Key::message( + EntityAddr::SmartContract(self.contract_hash.value()), + topic_name_hash, + message_index, + ), + &[], + )?; + + match query_result { + StoredValue::Message(summary) => Ok(summary), + _ => panic!("Stored value is not a message summary: {:?}", query_result), + } + } +} + +#[ignore] +#[test] +fn should_emit_messages() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + let query_view = ContractQueryView::new(&builder, contract_hash); + + let message_topics = query_view.message_topics(); + + let (topic_name, message_topic_hash) = message_topics + .iter() + .next() + .expect("should have at least one topic"); + + assert_eq!(topic_name, &MESSAGE_EMITTER_GENERIC_TOPIC.to_string()); + // Check that the topic exists for the installed contract. + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 0 + ); + + // Now call the entry point to emit some messages. + emit_message_with_suffix(&builder, "test", &contract_hash, DEFAULT_BLOCK_TIME); + let expected_message = MessagePayload::from(format!("{}{}", EMITTER_MESSAGE_PREFIX, "test")); + let expected_message_hash = cryptography::blake2b( + [ + 0u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = query_view + .message_summary(*message_topic_hash, 0, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 1 + ); + + // call again to emit a new message and check that the index in the topic incremented. + emit_message_with_suffix(&builder, "test", &contract_hash, DEFAULT_BLOCK_TIME); + let expected_message_hash = cryptography::blake2b( + [ + 1u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = query_view + .message_summary(*message_topic_hash, 1, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 2 + ); + + let first_block_state_hash = builder.borrow().get_post_state_hash(); + + // call to emit a new message but in another block. + emit_message_with_suffix( + &builder, + "new block time", + &contract_hash, + DEFAULT_BLOCK_TIME + 1, + ); + let expected_message = + MessagePayload::from(format!("{}{}", EMITTER_MESSAGE_PREFIX, "new block time")); + let expected_message_hash = cryptography::blake2b( + [ + 0u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = query_view + .message_summary(*message_topic_hash, 0, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 1 + ); + + // old messages should be pruned from tip and inaccessible at the latest state hash. + assert!(query_view + .message_summary(*message_topic_hash, 1, None) + .is_err()); + + // old messages should still be discoverable at a state hash before pruning. + assert!(query_view + .message_summary(*message_topic_hash, 1, Some(first_block_state_hash)) + .is_ok()); +} + +#[ignore] +#[test] +fn should_emit_message_on_empty_topic_in_new_block() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + let query_view = ContractQueryView::new(&builder, contract_hash); + + let message_topics = query_view.message_topics(); + + let (_, message_topic_hash) = message_topics + .iter() + .next() + .expect("should have at least one topic"); + + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 0 + ); + + emit_message_with_suffix( + &builder, + "new block time", + &contract_hash, + DEFAULT_BLOCK_TIME + 1, + ); + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 1 + ); +} + +#[ignore] +#[test] +fn should_add_topics() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let contract_hash = install_messages_emitter_contract(&builder, true); + let query_view = ContractQueryView::new(&builder, contract_hash); + + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => "topic_1", + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_success() + .commit(); + + let topic_1_hash = *query_view + .message_topics() + .get("topic_1") + .expect("should have added topic `topic_1"); + assert_eq!(query_view.message_topic(topic_1_hash).message_count(), 0); + + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => "topic_2", + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_success() + .commit(); + + let topic_2_hash = *query_view + .message_topics() + .get("topic_2") + .expect("should have added topic `topic_2"); + + assert!(query_view.message_topics().get("topic_1").is_some()); + assert_eq!(query_view.message_topic(topic_1_hash).message_count(), 0); + assert_eq!(query_view.message_topic(topic_2_hash).message_count(), 0); +} + +#[ignore] +#[test] +fn should_not_add_duplicate_topics() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + let query_view = ContractQueryView::new(&builder, contract_hash); + let message_topics = query_view.message_topics(); + let (first_topic_name, _) = message_topics + .iter() + .next() + .expect("should have at least one topic"); + + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => first_topic_name, + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_failure() + .commit(); +} + +#[ignore] +#[test] +fn should_not_exceed_configured_limits() { + let chainspec = { + let default_wasm_v1_config = WasmV1Config::default(); + let default_wasm_v2_config = WasmV2Config::default(); + let wasm_v1_config = WasmV1Config::new( + default_wasm_v1_config.max_memory(), + default_wasm_v1_config.max_stack_height(), + default_wasm_v1_config.opcode_costs(), + default_wasm_v1_config.take_host_function_costs(), + ); + let wasm_v2_config = WasmV2Config::new( + default_wasm_v2_config.max_memory(), + default_wasm_v2_config.opcode_costs(), + default_wasm_v2_config.take_host_function_costs(), + ); + let wasm_config = WasmConfig::new( + MessageLimits { + max_topic_name_size: 32, + max_message_size: 100, + max_topics_per_contract: 2, + }, + wasm_v1_config, + wasm_v2_config, + ); + ChainspecConfig { + system_costs_config: SystemConfig::default(), + core_config: CoreConfig::default(), + wasm_config, + storage_costs: StorageCosts::default(), + } + }; + + let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec)); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + + // if the topic larger than the limit, registering should fail. + // string is 33 bytes > limit established above + let too_large_topic_name = std::str::from_utf8(&[0x4du8; 33]).unwrap(); + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => too_large_topic_name, + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_failure() + .commit(); + + // if the topic name is equal to the limit, registering should work. + // string is 32 bytes == limit established above + let topic_name_at_limit = std::str::from_utf8(&[0x4du8; 32]).unwrap(); + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => topic_name_at_limit, + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_success() + .commit(); + + // Check that the max number of topics limit is enforced. + // 2 topics are already registered, so registering another topic should + // fail since the limit is already reached. + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => "topic_1", + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_failure() + .commit(); + + // Check message size limit + let large_message = std::str::from_utf8(&[0x4du8; 128]).unwrap(); + let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_EMIT_MESSAGE, + runtime_args! { + ARG_MESSAGE_SUFFIX_NAME => large_message, + }, + ) + .build(); + + builder + .borrow_mut() + .exec(emit_message_request) + .expect_failure() + .commit(); +} + +fn should_carry_message_topics_on_upgraded_contract(use_initializer: bool) { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let _ = install_messages_emitter_contract(&builder, true); + let contract_hash = upgrade_messages_emitter_contract(&builder, use_initializer, false); + let query_view = ContractQueryView::new(&builder, contract_hash); + + let message_topics = query_view.message_topics(); + assert_eq!(message_topics.len(), 2); + let mut expected_topic_names = 0; + for (topic_name, topic_hash) in message_topics.iter() { + if topic_name == MESSAGE_EMITTER_GENERIC_TOPIC + || topic_name == MESSAGE_EMITTER_UPGRADED_TOPIC + { + expected_topic_names += 1; + } + + assert_eq!(query_view.message_topic(*topic_hash).message_count(), 0); + } + assert_eq!(expected_topic_names, 2); +} + +#[ignore] +#[test] +fn should_carry_message_topics_on_upgraded_contract_with_initializer() { + should_carry_message_topics_on_upgraded_contract(true); +} + +#[ignore] +#[test] +fn should_carry_message_topics_on_upgraded_contract_without_initializer() { + should_carry_message_topics_on_upgraded_contract(false); +} + +#[ignore] +#[test] +fn should_not_emit_messages_from_account() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // Request to run a deploy that tries to register a message topic without a stored contract. + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + MESSAGE_EMITTER_FROM_ACCOUNT, + RuntimeArgs::default(), + ) + .build(); + + // Expect to fail since topics can only be registered by stored contracts. + builder + .borrow_mut() + .exec(install_request) + .expect_failure() + .commit(); +} + +#[ignore] +#[test] +fn should_charge_expected_gas_for_storage() { + const GAS_PER_BYTE_COST: u32 = 100; + + let chainspec = { + let wasm_v1_config = WasmV1Config::new( + DEFAULT_WASM_MAX_MEMORY, + DEFAULT_MAX_STACK_HEIGHT, + OpcodeCosts::zero(), + HostFunctionCostsV1::zero(), + ); + let wasm_v2_config = WasmV2Config::new( + DEFAULT_WASM_MAX_MEMORY, + OpcodeCosts::zero(), + HostFunctionCostsV2::zero(), + ); + let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config); + ChainspecConfig { + wasm_config, + core_config: CoreConfig::default(), + system_costs_config: SystemConfig::default(), + storage_costs: StorageCosts::new(GAS_PER_BYTE_COST), + } + }; + + let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec)); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + + let topic_name = "consume_topic"; + + // check the consume of adding a new topic + let add_topic_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_ADD_TOPIC, + runtime_args! { + ARG_TOPIC_NAME => topic_name, + }, + ) + .build(); + + builder + .borrow_mut() + .exec(add_topic_request) + .expect_success() + .commit(); + + let add_topic_consumed = builder.borrow().last_exec_gas_consumed().value(); + + let default_topic_summary = + MessageTopicSummary::new(0, BlockTime::new(0), topic_name.to_string()); + let written_size_expected = + StoredValue::MessageTopic(default_topic_summary.clone()).serialized_length(); + assert_eq!( + U512::from(written_size_expected * GAS_PER_BYTE_COST as usize), + add_topic_consumed + ); + + let message_topic = + MessageTopicSummary::new(0, BlockTime::new(0), "generic_messages".to_string()); + emit_message_with_suffix(&builder, "test", &contract_hash, DEFAULT_BLOCK_TIME); + // check that the storage consume charged is variable since the message topic hash a variable + // string field with message size that is emitted. + let written_size_expected = StoredValue::Message(MessageChecksum([0; 32])).serialized_length() + + StoredValue::MessageTopic(message_topic).serialized_length() + + StoredValue::CLValue(CLValue::from_t((BlockTime::new(0), 0u64)).unwrap()) + .serialized_length(); + let emit_message_gas_consumed = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!( + U512::from(written_size_expected * GAS_PER_BYTE_COST as usize), + emit_message_gas_consumed + ); + + emit_message_with_suffix(&builder, "test 12345", &contract_hash, DEFAULT_BLOCK_TIME); + let written_size_expected = StoredValue::Message(MessageChecksum([0; 32])).serialized_length() + + StoredValue::MessageTopic(MessageTopicSummary::new( + 0, + BlockTime::new(0), + "generic_messages".to_string(), + )) + .serialized_length() + + StoredValue::CLValue(CLValue::from_t((BlockTime::new(0), 0u64)).unwrap()) + .serialized_length(); + let emit_message_gas_consumed = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!( + U512::from(written_size_expected * GAS_PER_BYTE_COST as usize), + emit_message_gas_consumed + ); + + // emitting messages in a different block will also prune the old entries so check the consumed. + emit_message_with_suffix( + &builder, + "message in different block", + &contract_hash, + DEFAULT_BLOCK_TIME + 1, + ); + let emit_message_gas_consumed = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!( + U512::from(written_size_expected * GAS_PER_BYTE_COST as usize), + emit_message_gas_consumed + ); +} + +#[ignore] +#[test] +fn should_charge_increasing_gas_consumed_for_multiple_messages_emitted() { + const FIRST_MESSAGE_EMIT_COST: u32 = 100; + const COST_INCREASE_PER_MESSAGE: u32 = 50; + const fn emit_consumed_per_execution(num_messages: u32) -> u32 { + FIRST_MESSAGE_EMIT_COST * num_messages + + (num_messages - 1) * num_messages / 2 * COST_INCREASE_PER_MESSAGE + } + + const MESSAGES_TO_EMIT: u32 = 4; + const EMIT_MULTIPLE_EXPECTED_COST: u32 = emit_consumed_per_execution(MESSAGES_TO_EMIT); + const EMIT_MESSAGES_FROM_MULTIPLE_CONTRACTS: u32 = + emit_consumed_per_execution(EMIT_MESSAGE_FROM_EACH_VERSION_NUM_MESSAGES); + let chainspec = { + let wasm_v1_config = WasmV1Config::new( + DEFAULT_WASM_MAX_MEMORY, + DEFAULT_MAX_STACK_HEIGHT, + OpcodeCosts::zero(), + HostFunctionCostsV1 { + emit_message: HostFunction::fixed(FIRST_MESSAGE_EMIT_COST), + cost_increase_per_message: COST_INCREASE_PER_MESSAGE, + ..Zero::zero() + }, + ); + let wasm_v2_config = WasmV2Config::new( + DEFAULT_WASM_MAX_MEMORY, + OpcodeCosts::zero(), + HostFunctionCostsV2::default(), + ); + let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config); + ChainspecConfig { + wasm_config, + core_config: CoreConfig::default(), + system_costs_config: SystemConfig::default(), + storage_costs: StorageCosts::zero(), + } + }; + + let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec)); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + + // Emit one message in this execution. Cost should be `FIRST_MESSAGE_EMIT_COST`. + emit_message_with_suffix(&builder, "test", &contract_hash, DEFAULT_BLOCK_TIME); + let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!(emit_message_gas_consume, FIRST_MESSAGE_EMIT_COST.into()); + + // Emit multiple messages in this execution. Cost should increase for each message emitted. + let emit_messages_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_EMIT_MULTIPLE_MESSAGES, + runtime_args! { + ARG_NUM_MESSAGES_TO_EMIT => MESSAGES_TO_EMIT, + }, + ) + .build(); + builder + .borrow_mut() + .exec(emit_messages_request) + .expect_success() + .commit(); + + let emit_multiple_messages_consume = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!( + emit_multiple_messages_consume, + EMIT_MULTIPLE_EXPECTED_COST.into() + ); + + // Try another execution where we emit a single message. + // Cost should be `FIRST_MESSAGE_EMIT_COST` + emit_message_with_suffix(&builder, "test", &contract_hash, DEFAULT_BLOCK_TIME); + let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!(emit_message_gas_consume, FIRST_MESSAGE_EMIT_COST.into()); + + // Check gas consume when multiple messages are emitted from different contracts. + let contract_hash = upgrade_messages_emitter_contract(&builder, true, false); + let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION, + runtime_args! { + ARG_MESSAGE_SUFFIX_NAME => "test message", + }, + ) + .build(); + + builder + .borrow_mut() + .exec(emit_message_request) + .expect_success() + .commit(); + + // 3 messages are emitted by this execution so the consume would be: + // `EMIT_MESSAGES_FROM_MULTIPLE_CONTRACTS` + let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value(); + assert_eq!( + emit_message_gas_consume, + U512::from(EMIT_MESSAGES_FROM_MULTIPLE_CONTRACTS) + ); +} + +#[ignore] +#[test] +fn should_register_topic_on_contract_creation() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, false); + let query_view = ContractQueryView::new(&builder, contract_hash); + + let message_topics = query_view.message_topics(); + let (topic_name, message_topic_hash) = message_topics + .iter() + .next() + .expect("should have at least one topic"); + + assert_eq!(topic_name, &MESSAGE_EMITTER_GENERIC_TOPIC.to_string()); + // Check that the topic exists for the installed contract. + assert_eq!( + query_view + .message_topic(*message_topic_hash) + .message_count(), + 0 + ); +} + +#[ignore] +#[test] +fn should_not_exceed_configured_topic_name_limits_on_contract_upgrade_no_init() { + let chainspec = { + let default_wasm_v1_config = WasmV1Config::default(); + let default_wasm_v2_config = WasmV2Config::default(); + let wasm_v1_config = WasmV1Config::new( + default_wasm_v1_config.max_memory(), + default_wasm_v1_config.max_stack_height(), + default_wasm_v1_config.opcode_costs(), + default_wasm_v1_config.take_host_function_costs(), + ); + let wasm_v2_config = WasmV2Config::new( + default_wasm_v2_config.max_memory(), + default_wasm_v2_config.opcode_costs(), + default_wasm_v2_config.take_host_function_costs(), + ); + let wasm_config = WasmConfig::new( + MessageLimits { + max_topic_name_size: 16, //length of MESSAGE_EMITTER_GENERIC_TOPIC + max_message_size: 100, + max_topics_per_contract: 3, + }, + wasm_v1_config, + wasm_v2_config, + ); + ChainspecConfig { + wasm_config, + core_config: CoreConfig::default(), + system_costs_config: SystemConfig::default(), + storage_costs: StorageCosts::default(), + } + }; + + let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec)); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let _ = install_messages_emitter_contract(&builder, false); + let _ = upgrade_messages_emitter_contract(&builder, false, true); +} + +#[ignore] +#[test] +fn should_not_exceed_configured_max_topics_per_contract_upgrade_no_init() { + let chainspec = { + let default_wasm_v1_config = WasmV1Config::default(); + let wasm_v1_config = WasmV1Config::new( + default_wasm_v1_config.max_memory(), + default_wasm_v1_config.max_stack_height(), + default_wasm_v1_config.opcode_costs(), + default_wasm_v1_config.take_host_function_costs(), + ); + let default_wasm_v2_config = WasmV2Config::default(); + let wasm_v2_config = WasmV2Config::new( + default_wasm_v2_config.max_memory(), + default_wasm_v2_config.opcode_costs(), + default_wasm_v2_config.take_host_function_costs(), + ); + let wasm_config = WasmConfig::new( + MessageLimits { + max_topic_name_size: 32, + max_message_size: 100, + max_topics_per_contract: 1, /* only allow 1 topic. Since on upgrade previous + * topics carry over, the upgrade should fail. */ + }, + wasm_v1_config, + wasm_v2_config, + ); + ChainspecConfig { + wasm_config, + system_costs_config: SystemConfig::default(), + core_config: CoreConfig::default(), + storage_costs: StorageCosts::default(), + } + }; + + let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec)); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let _ = install_messages_emitter_contract(&builder, false); + let _ = upgrade_messages_emitter_contract(&builder, false, true); +} + +#[ignore] +#[test] +fn should_produce_per_block_message_ordering() { + let builder = RefCell::new(LmdbWasmTestBuilder::default()); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let emitter_contract_hash = install_messages_emitter_contract(&builder, true); + let query_view = ContractQueryView::new(&builder, emitter_contract_hash); + + let message_topics = query_view.message_topics(); + let (_, message_topic_hash) = message_topics + .iter() + .next() + .expect("should have at least one topic"); + + let assert_last_message_block_index = |expected_index: u64| { + assert_eq!( + builder + .borrow() + .get_last_exec_result() + .unwrap() + .messages() + .first() + .unwrap() + .block_index(), + expected_index + ) + }; + + let query_message_count = || -> Option<(BlockTime, u64)> { + let query_result = + builder + .borrow_mut() + .query(None, Key::BlockGlobal(BlockGlobalAddr::MessageCount), &[]); + + match query_result { + Ok(StoredValue::CLValue(cl_value)) => Some(cl_value.into_t().unwrap()), + Err(_) => None, + _ => panic!("Stored value is not a CLvalue: {:?}", query_result), + } + }; + + // Emit the first message in the block. It should have block index 0. + emit_message_with_suffix( + &builder, + "test 0", + &emitter_contract_hash, + DEFAULT_BLOCK_TIME, + ); + assert_last_message_block_index(0); + assert_eq!( + query_message_count(), + Some((BlockTime::new(DEFAULT_BLOCK_TIME), 1)) + ); + + let expected_message = MessagePayload::from(format!("{}{}", EMITTER_MESSAGE_PREFIX, "test 0")); + let expected_message_hash = cryptography::blake2b( + [ + 0u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = query_view + .message_summary(*message_topic_hash, 0, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); + + // Emit the second message in the same block. It should have block index 1. + emit_message_with_suffix( + &builder, + "test 1", + &emitter_contract_hash, + DEFAULT_BLOCK_TIME, + ); + assert_last_message_block_index(1); + assert_eq!( + query_message_count(), + Some((BlockTime::new(DEFAULT_BLOCK_TIME), 2)) + ); + + let expected_message = MessagePayload::from(format!("{}{}", EMITTER_MESSAGE_PREFIX, "test 1")); + let expected_message_hash = cryptography::blake2b( + [ + 1u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = query_view + .message_summary(*message_topic_hash, 1, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); + + // Upgrade the message emitter contract end emit a message from this contract in the same block + // as before. The block index of the message should be 2 since the block hasn't changed. + let upgraded_contract_hash = upgrade_messages_emitter_contract(&builder, true, false); + let upgraded_contract_query_view = ContractQueryView::new(&builder, upgraded_contract_hash); + + let upgraded_topics = upgraded_contract_query_view.message_topics(); + let upgraded_message_topic_hash = upgraded_topics + .get(MESSAGE_EMITTER_UPGRADED_TOPIC) + .expect("should have upgraded topic"); + + let emit_message_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + upgraded_contract_hash, + "upgraded_emit_message", + runtime_args! { + ARG_MESSAGE_SUFFIX_NAME => "test 2", + }, + ) + .with_block_time(DEFAULT_BLOCK_TIME) + .build(); + + builder + .borrow_mut() + .exec(emit_message_request) + .expect_success() + .commit(); + assert_last_message_block_index(2); + assert_eq!( + query_message_count(), + Some((BlockTime::new(DEFAULT_BLOCK_TIME), 3)) + ); + + let expected_message = MessagePayload::from(format!("{}{}", EMITTER_MESSAGE_PREFIX, "test 2")); + let expected_message_hash = cryptography::blake2b( + [ + 2u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = upgraded_contract_query_view + .message_summary(*upgraded_message_topic_hash, 0, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); + + // Now emit a message in a different block. The block index should be 0 since it's the first + // message in the new block. + emit_message_with_suffix( + &builder, + "test 3", + &emitter_contract_hash, + DEFAULT_BLOCK_TIME + 1, + ); + assert_last_message_block_index(0); + assert_eq!( + query_message_count(), + Some((BlockTime::new(DEFAULT_BLOCK_TIME + 1), 1)) + ); + let expected_message = MessagePayload::from(format!("{}{}", EMITTER_MESSAGE_PREFIX, "test 3")); + let expected_message_hash = cryptography::blake2b( + [ + 0u64.to_bytes().unwrap(), + expected_message.to_bytes().unwrap(), + ] + .concat(), + ); + let queried_message_summary = query_view + .message_summary(*message_topic_hash, 0, None) + .expect("should have value") + .value(); + assert_eq!(expected_message_hash, queried_message_summary); +} + +#[ignore] +#[test] +fn emit_message_should_consume_variable_gas_based_on_topic_and_message_size() { + const MESSAGE_EMIT_COST: u32 = 1_000_000; + + const COST_PER_MESSAGE_TOPIC_NAME_SIZE: u32 = 2; + const COST_PER_MESSAGE_LENGTH: u32 = 1_000; + const MESSAGE_SUFFIX: &str = "test"; + + let chainspec = { + let wasm_v1_config = WasmV1Config::new( + DEFAULT_WASM_MAX_MEMORY, + DEFAULT_MAX_STACK_HEIGHT, + OpcodeCosts::zero(), + HostFunctionCostsV1 { + emit_message: HostFunction::new( + MESSAGE_EMIT_COST, + [ + 0, + COST_PER_MESSAGE_TOPIC_NAME_SIZE, + 0, + COST_PER_MESSAGE_LENGTH, + ], + ), + ..Zero::zero() + }, + ); + let wasm_v2_config = WasmV2Config::new( + DEFAULT_WASM_MAX_MEMORY, + OpcodeCosts::zero(), + HostFunctionCostsV2::default(), + ); + let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config); + ChainspecConfig { + wasm_config, + core_config: CoreConfig::default(), + system_costs_config: SystemConfig::default(), + storage_costs: StorageCosts::zero(), + } + }; + + let builder = RefCell::new(LmdbWasmTestBuilder::new_temporary_with_config(chainspec)); + builder + .borrow_mut() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let contract_hash = install_messages_emitter_contract(&builder, true); + + // Emit one message in this execution. Cost should be consume of the call to emit message + + // consume charged for message topic name length + consume for message payload size. + emit_message_with_suffix(&builder, MESSAGE_SUFFIX, &contract_hash, DEFAULT_BLOCK_TIME); + let emit_message_gas_consume = builder.borrow().last_exec_gas_consumed().value(); + let payload: MessagePayload = format!("{}{}", EMITTER_MESSAGE_PREFIX, MESSAGE_SUFFIX).into(); + let expected_consume = MESSAGE_EMIT_COST + + COST_PER_MESSAGE_TOPIC_NAME_SIZE * MESSAGE_EMITTER_GENERIC_TOPIC.len() as u32 + + COST_PER_MESSAGE_LENGTH * payload.serialized_length() as u32; + assert_eq!(emit_message_gas_consume, expected_consume.into()); +} diff --git a/execution_engine_testing/tests/src/test/counter.rs b/execution_engine_testing/tests/src/test/counter.rs deleted file mode 100644 index cb6ec50868..0000000000 --- a/execution_engine_testing/tests/src/test/counter.rs +++ /dev/null @@ -1,213 +0,0 @@ -use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_types::{runtime_args, Key, RuntimeArgs}; - -const CONTRACT_COUNTER_DEFINE: &str = "counter_define.wasm"; -const HASH_KEY_NAME: &str = "counter_package_hash"; -const COUNTER_VALUE_UREF: &str = "counter"; -const ENTRYPOINT_COUNTER: &str = "counter"; -const ENTRYPOINT_SESSION: &str = "session"; -const COUNTER_CONTRACT_HASH_KEY_NAME: &str = "counter_contract_hash"; -const ARG_COUNTER_METHOD: &str = "method"; -const METHOD_INC: &str = "inc"; - -#[ignore] -#[test] -fn should_run_counter_example_contract() { - let mut builder = InMemoryWasmTestBuilder::default(); - - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_COUNTER_DEFINE, - RuntimeArgs::new(), - ) - .build(); - - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit(); - - let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .expect("should be account") - .clone(); - - let counter_contract_hash_key = *account - .named_keys() - .get(COUNTER_CONTRACT_HASH_KEY_NAME) - .expect("should have counter contract hash key"); - - let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( - *DEFAULT_ACCOUNT_ADDR, - HASH_KEY_NAME, - None, - ENTRYPOINT_SESSION, - runtime_args! { COUNTER_CONTRACT_HASH_KEY_NAME => counter_contract_hash_key }, - ) - .build(); - - builder.exec(exec_request_2).expect_success().commit(); - - let value: i32 = builder - .query( - None, - counter_contract_hash_key, - &[COUNTER_VALUE_UREF.to_string()], - ) - .expect("should have counter value") - .as_cl_value() - .expect("should be CLValue") - .clone() - .into_t() - .expect("should cast CLValue to integer"); - - assert_eq!(value, 1); - - let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( - *DEFAULT_ACCOUNT_ADDR, - HASH_KEY_NAME, - None, - ENTRYPOINT_SESSION, - runtime_args! { COUNTER_CONTRACT_HASH_KEY_NAME => counter_contract_hash_key }, - ) - .build(); - - builder.exec(exec_request_3).expect_success().commit(); - - let value: i32 = builder - .query( - None, - counter_contract_hash_key, - &[COUNTER_VALUE_UREF.to_string()], - ) - .expect("should have counter value") - .as_cl_value() - .expect("should be CLValue") - .clone() - .into_t() - .expect("should cast CLValue to integer"); - - assert_eq!(value, 2); -} - -#[ignore] -#[test] -fn should_default_contract_hash_arg() { - let mut builder = InMemoryWasmTestBuilder::default(); - - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_COUNTER_DEFINE, - RuntimeArgs::new(), - ) - .build(); - - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit(); - - let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( - *DEFAULT_ACCOUNT_ADDR, - HASH_KEY_NAME, - None, - ENTRYPOINT_SESSION, - RuntimeArgs::new(), - ) - .build(); - - builder.exec(exec_request_2).expect_success().commit(); - - let value: i32 = { - let counter_contract_hash_key = *builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .expect("should be account") - .clone() - .named_keys() - .get(COUNTER_CONTRACT_HASH_KEY_NAME) - .expect("should have counter contract hash key"); - - builder - .query( - None, - counter_contract_hash_key, - &[COUNTER_VALUE_UREF.to_string()], - ) - .expect("should have counter value") - .as_cl_value() - .expect("should be CLValue") - .clone() - .into_t() - .expect("should cast CLValue to integer") - }; - - assert_eq!(value, 1); -} - -#[ignore] -#[test] -fn should_call_counter_contract_directly() { - let mut builder = InMemoryWasmTestBuilder::default(); - - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_COUNTER_DEFINE, - RuntimeArgs::new(), - ) - .build(); - - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit(); - - let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( - *DEFAULT_ACCOUNT_ADDR, - HASH_KEY_NAME, - None, - ENTRYPOINT_COUNTER, - runtime_args! { ARG_COUNTER_METHOD => METHOD_INC }, - ) - .build(); - - builder.exec(exec_request_2).expect_success().commit(); - - let value: i32 = { - let counter_contract_hash_key = *builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .expect("should be account") - .clone() - .named_keys() - .get(COUNTER_CONTRACT_HASH_KEY_NAME) - .expect("should have counter contract hash key"); - - builder - .query( - None, - counter_contract_hash_key, - &[COUNTER_VALUE_UREF.to_string()], - ) - .expect("should have counter value") - .as_cl_value() - .expect("should be CLValue") - .clone() - .into_t() - .expect("should cast CLValue to integer") - }; - - assert_eq!(value, 1); -} diff --git a/execution_engine_testing/tests/src/test/counter_factory.rs b/execution_engine_testing/tests/src/test/counter_factory.rs new file mode 100644 index 0000000000..3689ecbb2a --- /dev/null +++ b/execution_engine_testing/tests/src/test/counter_factory.rs @@ -0,0 +1,285 @@ +use std::{collections::BTreeSet, iter::FromIterator}; + +use crate::wasm_utils; +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{ + addressable_entity::{EntityKindTag, DEFAULT_ENTRY_POINT_NAME}, + runtime_args, AddressableEntityHash, ByteCodeAddr, Key, RuntimeArgs, U512, +}; + +const CONTRACT_COUNTER_FACTORY: &str = "counter_factory.wasm"; +const CONTRACT_FACTORY_DEFAULT_ENTRY_POINT: &str = "contract_factory_default"; +const CONTRACT_FACTORY_ENTRY_POINT: &str = "contract_factory"; +const DECREASE_ENTRY_POINT: &str = "decrement"; +const INCREASE_ENTRY_POINT: &str = "increment"; +const ARG_INITIAL_VALUE: &str = "initial_value"; +const ARG_NAME: &str = "name"; +const NEW_COUNTER_1_NAME: &str = "new-counter-1"; +const NEW_COUNTER_2_NAME: &str = "new-counter-2"; + +#[ignore] +#[test] +fn should_not_call_undefined_entrypoints_on_factory() { + let (mut builder, contract_hash) = setup(); + + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + DEFAULT_ENTRY_POINT_NAME, // should not be able to call "call" entry point + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_1).commit(); + + let no_such_method_1 = builder.get_error().expect("should have error"); + + assert!( + matches!(no_such_method_1, Error::Exec(ExecError::NoSuchMethod(function_name)) if function_name == DEFAULT_ENTRY_POINT_NAME) + ); + + // Can't call abstract entry point "increase" on the factory. + + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + INCREASE_ENTRY_POINT, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_2).commit(); + + let no_such_method_2 = builder.get_error().expect("should have error"); + + assert!( + matches!(&no_such_method_2, Error::Exec(ExecError::TemplateMethod(function_name)) if function_name == INCREASE_ENTRY_POINT), + "{:?}", + &no_such_method_2 + ); + + // Can't call abstract entry point "decrease" on the factory. + + let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + DECREASE_ENTRY_POINT, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_3).commit(); + + let no_such_method_3 = builder.get_error().expect("should have error"); + + assert!( + matches!(&no_such_method_3, Error::Exec(ExecError::TemplateMethod(function_name)) if function_name == DECREASE_ENTRY_POINT), + "{:?}", + &no_such_method_3 + ); +} + +#[ignore] +#[test] +fn contract_factory_wasm_should_have_expected_exports() { + let (builder, contract_hash) = setup(); + + let enable_entity = builder.chainspec().core_config.enable_addressable_entity; + + let bytes = if enable_entity { + let factory_contract = builder + .query( + None, + Key::addressable_entity_key(EntityKindTag::SmartContract, contract_hash), + &[], + ) + .expect("should have contract") + .as_addressable_entity() + .cloned() + .expect("should be contract"); + + let factory_contract_byte_code_key = Key::byte_code_key(ByteCodeAddr::new_wasm_addr( + factory_contract.byte_code_addr(), + )); + + let factory_contract_wasm = builder + .query(None, factory_contract_byte_code_key, &[]) + .expect("should have contract wasm") + .as_byte_code() + .cloned() + .expect("should have wasm"); + factory_contract_wasm.take_bytes() + } else { + let factory_contract = builder + .query(None, Key::Hash(contract_hash.value()), &[]) + .expect("should have contract") + .as_contract() + .cloned() + .expect("should be contract"); + + let factory_contract_byte_code_key = + Key::Hash(factory_contract.contract_wasm_hash().value()); + + let factory_contract_wasm = builder + .query(None, factory_contract_byte_code_key, &[]) + .expect("should have contract wasm") + .as_contract_wasm() + .cloned() + .expect("should have wasm"); + + factory_contract_wasm.take_bytes() + }; + + let factory_wasm_exports = wasm_utils::get_wasm_exports(&bytes); + let expected_entrypoints = BTreeSet::from_iter([ + INCREASE_ENTRY_POINT.to_string(), + DECREASE_ENTRY_POINT.to_string(), + CONTRACT_FACTORY_ENTRY_POINT.to_string(), + CONTRACT_FACTORY_DEFAULT_ENTRY_POINT.to_string(), + ]); + assert_eq!(factory_wasm_exports, expected_entrypoints); +} + +#[ignore] +#[test] +fn should_install_and_use_factory_pattern() { + let (mut builder, contract_hash) = setup(); + + // Call a factory entrypoint + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + CONTRACT_FACTORY_ENTRY_POINT, + runtime_args! { + ARG_NAME => NEW_COUNTER_1_NAME, + ARG_INITIAL_VALUE => U512::one(), + }, + ) + .build(); + + builder.exec(exec_request_1).commit().expect_success(); + + // Call a different factory entrypoint that accepts different set of arguments + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + CONTRACT_FACTORY_DEFAULT_ENTRY_POINT, + runtime_args! { + ARG_NAME => NEW_COUNTER_2_NAME, + }, + ) + .build(); + + builder.exec(exec_request_2).commit().expect_success(); + + let counter_factory_contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have contract hash"); + + let new_counter_1 = counter_factory_contract + .named_keys() + .get(NEW_COUNTER_1_NAME) + .expect("new counter should exist") + .into_entity_hash() + .unwrap(); + + let new_counter_1_contract = builder + .get_addressable_entity(new_counter_1) + .expect("should have contract instance"); + + let new_counter_2 = counter_factory_contract + .named_keys() + .get(NEW_COUNTER_2_NAME) + .expect("new counter should exist") + .into_entity_hash() + .unwrap(); + + let _new_counter_2_contract = builder + .get_addressable_entity(new_counter_2) + .expect("should have contract instance"); + + let counter_1_wasm = if builder.chainspec().core_config.enable_addressable_entity { + builder + .query( + None, + Key::byte_code_key(ByteCodeAddr::new_wasm_addr( + new_counter_1_contract.byte_code_addr(), + )), + &[], + ) + .expect("should have contract wasm") + .as_byte_code() + .cloned() + .expect("should have wasm") + .take_bytes() + } else { + builder + .query( + None, + Key::Hash(new_counter_1_contract.byte_code_addr()), + &[], + ) + .expect("should have contract wasm") + .as_contract_wasm() + .cloned() + .expect("should have wasm") + .take_bytes() + }; + + let new_counter_1_exports = wasm_utils::get_wasm_exports(&counter_1_wasm); + assert_eq!( + new_counter_1_exports, + BTreeSet::from_iter([ + INCREASE_ENTRY_POINT.to_string(), + DECREASE_ENTRY_POINT.to_string() + ]) + ); + + let increment_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + new_counter_1, + INCREASE_ENTRY_POINT, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(increment_request).commit().expect_success(); + + let decrement_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + new_counter_1, + DECREASE_ENTRY_POINT, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(decrement_request).commit().expect_success(); +} + +fn setup() -> (LmdbWasmTestBuilder, AddressableEntityHash) { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_COUNTER_FACTORY, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request).commit().expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have entity for account"); + + let contract_hash_key = account + .named_keys() + .get("factory_hash") + .expect("should have factory hash"); + + (builder, contract_hash_key.into_entity_hash().unwrap()) +} diff --git a/execution_engine_testing/tests/src/test/deploy/context_association.rs b/execution_engine_testing/tests/src/test/deploy/context_association.rs index a36e8935e5..109e2986d0 100644 --- a/execution_engine_testing/tests/src/test/deploy/context_association.rs +++ b/execution_engine_testing/tests/src/test/deploy/context_association.rs @@ -1,15 +1,11 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNT_KEY, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; use casper_types::{ runtime_args, system::{AUCTION, HANDLE_PAYMENT, MINT}, - RuntimeArgs, }; const SYSTEM_CONTRACT_HASHES_WASM: &str = "system_contract_hashes.wasm"; @@ -19,53 +15,61 @@ const ARG_AMOUNT: &str = "amount"; #[test] fn should_put_system_contract_hashes_to_account_context() { let payment_purse_amount = *DEFAULT_PAYMENT; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - let request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code(SYSTEM_CONTRACT_HASHES_WASM, runtime_args! {}) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount}) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([1; 32]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code(SYSTEM_CONTRACT_HASHES_WASM, runtime_args! {}) + .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount}) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([1; 32]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(request) .expect_success() .commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("account should exist"); let named_keys = account.named_keys(); - assert!(named_keys.contains_key(MINT), "should contain mint"); + assert!(named_keys.contains(MINT), "should contain mint"); assert!( - named_keys.contains_key(HANDLE_PAYMENT), + named_keys.contains(HANDLE_PAYMENT), "should contain handle payment" ); - assert!(named_keys.contains_key(AUCTION), "should contain auction"); + assert!(named_keys.contains(AUCTION), "should contain auction"); assert_eq!( - named_keys[MINT].into_hash().expect("should be a hash"), + named_keys + .get(MINT) + .unwrap() + .into_entity_hash_addr() + .expect("should be a hash"), builder.get_mint_contract_hash().value(), "mint_contract_hash should match" ); assert_eq!( - named_keys[HANDLE_PAYMENT] - .into_hash() + named_keys + .get(HANDLE_PAYMENT) + .unwrap() + .into_entity_hash_addr() .expect("should be a hash"), builder.get_handle_payment_contract_hash().value(), "handle_payment_contract_hash should match" ); assert_eq!( - named_keys[AUCTION].into_hash().expect("should be a hash"), + named_keys + .get(AUCTION) + .unwrap() + .into_entity_hash_addr() + .expect("should be a hash"), builder.get_auction_contract_hash().value(), "auction_contract_hash should match" ); diff --git a/execution_engine_testing/tests/src/test/deploy/non_standard_payment.rs b/execution_engine_testing/tests/src/test/deploy/non_standard_payment.rs index 54fada5f57..566a2137f9 100644 --- a/execution_engine_testing/tests/src/test/deploy/non_standard_payment.rs +++ b/execution_engine_testing/tests/src/test/deploy/non_standard_payment.rs @@ -1,16 +1,20 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state::BlockInfo, execution::ExecError}; +use casper_storage::data_access_layer::BalanceIdentifier; +use casper_types::{ + account::AccountHash, runtime_args, ApiError, BlockHash, Digest, Gas, RuntimeArgs, Timestamp, + U512, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]); const DO_NOTHING_WASM: &str = "do_nothing.wasm"; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; const TRANSFER_MAIN_PURSE_TO_NEW_PURSE_WASM: &str = "transfer_main_purse_to_new_purse.wasm"; +const PAYMENT_PURSE_PERSIST_WASM: &str = "payment_purse_persist.wasm"; const NAMED_PURSE_PAYMENT_WASM: &str = "named_purse_payment.wasm"; const ARG_TARGET: &str = "target"; const ARG_AMOUNT: &str = "amount"; @@ -25,11 +29,10 @@ fn should_charge_non_main_purse() { const TEST_PURSE_NAME: &str = "test-purse"; let account_1_account_hash = ACCOUNT_1_ADDR; - let payment_purse_amount = *DEFAULT_PAYMENT; let account_1_funding_amount = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE); let account_1_purse_funding_amount = *DEFAULT_PAYMENT; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let setup_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -43,67 +46,209 @@ fn should_charge_non_main_purse() { TRANSFER_MAIN_PURSE_TO_NEW_PURSE_WASM, runtime_args! { ARG_DESTINATION => TEST_PURSE_NAME, ARG_AMOUNT => account_1_purse_funding_amount }, ) - .build(); + .build(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.exec(setup_exec_request).expect_success().commit(); builder + .exec(setup_exec_request) + .expect_success() + .commit() .exec(create_purse_exec_request) .expect_success() .commit(); - let transfer_result = builder.finish(); // get account_1 - let account_1 = transfer_result - .builder() - .get_account(ACCOUNT_1_ADDR) + let account_1 = builder + .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR) .expect("should have account"); // get purse - let purse_key = account_1.named_keys()[TEST_PURSE_NAME]; + let purse_key = account_1.named_keys().get(TEST_PURSE_NAME).unwrap(); let purse = purse_key.into_uref().expect("should have uref"); - let purse_starting_balance = builder.get_purse_balance(purse); assert_eq!( purse_starting_balance, account_1_purse_funding_amount, - "purse should be funded with expected amount" + "purse should be funded with expected amount, which in this case is also == to the amount to be paid" ); + // in this test, we're just going to pay everything in the purse to + // keep the math easy. + let amount_to_be_paid = account_1_purse_funding_amount; // should be able to pay for exec using new purse - let account_payment_exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) - .with_payment_code( - NAMED_PURSE_PAYMENT_WASM, - runtime_args! { - ARG_PURSE_NAME => TEST_PURSE_NAME, - ARG_AMOUNT => payment_purse_amount - }, - ) - .with_authorization_keys(&[account_1_account_hash]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) + .with_payment_code( + NAMED_PURSE_PAYMENT_WASM, + runtime_args! { + ARG_PURSE_NAME => TEST_PURSE_NAME, + ARG_AMOUNT => amount_to_be_paid + }, + ) + .with_authorization_keys(&[account_1_account_hash]) + .with_deploy_hash([3; 32]) + .build(); + + let block_time = Timestamp::now().millis(); + let parent_block_hash = BlockHash::default(); + let block_info = BlockInfo::new( + Digest::default(), + block_time.into(), + parent_block_hash, + 1, + DEFAULT_PROTOCOL_VERSION, + ); builder - .exec(account_payment_exec_request) + .exec_wasm_v1( + deploy_item + .new_custom_payment_from_deploy_item(block_info, Gas::from(12_500_000_000_u64)) + .expect("should be valid req"), + ) .expect_success() .commit(); - let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; + let payment_purse_balance = builder + .get_purse_balance_result_with_proofs(DEFAULT_PROTOCOL_VERSION, BalanceIdentifier::Payment); - let expected_resting_balance = account_1_purse_funding_amount - transaction_fee; + assert!( + payment_purse_balance.is_success(), + "payment purse balance check should succeed" + ); - let purse_final_balance = builder.get_purse_balance(purse); + let paid_amount = *payment_purse_balance + .available_balance() + .expect("should have payment amount"); assert_eq!( - purse_final_balance, expected_resting_balance, + paid_amount, amount_to_be_paid, "purse resting balance should equal funding amount minus exec costs" ); + + let purse_final_balance = builder.get_purse_balance(purse); + + assert_eq!( + purse_final_balance, + U512::zero(), + "since we zero'd out the paying purse, the final balance should be zero" + ); +} + +const ARG_METHOD: &str = "method"; + +#[ignore] +#[test] +fn should_not_allow_custom_payment_purse_persistence_1() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) + .with_payment_code( + PAYMENT_PURSE_PERSIST_WASM, + runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT, ARG_METHOD => "put_key"}, + ) + .with_deploy_hash([1; 32]) + .with_authorization_keys(&[account_hash]) + .build(); + let block_info = BlockInfo::new( + Digest::default(), + Timestamp::now().millis().into(), + BlockHash::default(), + 1, + DEFAULT_PROTOCOL_VERSION, + ); + let limit = Gas::from(12_500_000_000_u64); + + let request = deploy_item + .new_custom_payment_from_deploy_item(block_info, limit) + .expect("should be valid req"); + + builder.exec_wasm_v1(request).expect_failure(); + + builder.assert_error(casper_execution_engine::engine_state::Error::Exec( + ExecError::Revert(ApiError::HandlePayment(40)), + )); +} + +#[ignore] +#[test] +fn should_not_allow_custom_payment_purse_persistence_2() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) + .with_payment_code( + PAYMENT_PURSE_PERSIST_WASM, + runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT, ARG_METHOD => "call_contract"}, + ) + .with_deploy_hash([1; 32]) + .with_authorization_keys(&[account_hash]) + .build(); + let block_info = BlockInfo::new( + Digest::default(), + Timestamp::now().millis().into(), + BlockHash::default(), + 1, + DEFAULT_PROTOCOL_VERSION, + ); + let limit = Gas::from(12_500_000_000_u64); + + let request = deploy_item + .new_custom_payment_from_deploy_item(block_info, limit) + .expect("should be valid req"); + + builder.exec_wasm_v1(request).expect_failure(); + + builder.assert_error(casper_execution_engine::engine_state::Error::Exec( + ExecError::Revert(ApiError::HandlePayment(40)), + )); +} + +#[ignore] +#[test] +fn should_not_allow_custom_payment_purse_persistence_3() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) + .with_payment_code( + PAYMENT_PURSE_PERSIST_WASM, + runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT, ARG_METHOD => "call_versioned_contract"}, + ) + .with_deploy_hash([1; 32]) + .with_authorization_keys(&[account_hash]) + .build(); + let block_info = BlockInfo::new( + Digest::default(), + Timestamp::now().millis().into(), + BlockHash::default(), + 1, + DEFAULT_PROTOCOL_VERSION, + ); + let limit = Gas::from(12_500_000_000_u64); + + let request = deploy_item + .new_custom_payment_from_deploy_item(block_info, limit) + .expect("should be valid req"); + + builder.exec_wasm_v1(request).expect_failure(); + + builder.assert_error(casper_execution_engine::engine_state::Error::Exec( + ExecError::Revert(ApiError::HandlePayment(40)), + )); } diff --git a/execution_engine_testing/tests/src/test/deploy/preconditions.rs b/execution_engine_testing/tests/src/test/deploy/preconditions.rs index 3d850d2c0e..c26274f1db 100644 --- a/execution_engine_testing/tests/src/test/deploy/preconditions.rs +++ b/execution_engine_testing/tests/src/test/deploy/preconditions.rs @@ -1,13 +1,11 @@ use assert_matches::assert_matches; use casper_engine_test_support::{ - internal::{ - utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + utils, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::core::engine_state::Error; +use casper_execution_engine::engine_state::Error; +use casper_storage::tracking_copy::TrackingCopyError; use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]); @@ -21,65 +19,67 @@ fn should_raise_precondition_authorization_failure_invalid_account() { let payment_purse_amount = 10_000_000; let transferred_amount = 1; - let exec_request = { - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_deploy_hash([1; 32]) .with_session_code( "transfer_purse_to_account.wasm", runtime_args! { "target" =>account_1_account_hash, "amount" => U512::from(transferred_amount) }, ) - .with_address(nonexistent_account_addr) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => U512::from(payment_purse_amount) }) + // .with_address(nonexistent_account_addr) + .with_standard_payment(runtime_args! { ARG_AMOUNT => U512::from(payment_purse_amount) }) .with_authorization_keys(&[nonexistent_account_addr]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let transfer_result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .finish(); + .commit(); - let response = transfer_result - .builder() - .get_exec_result(0) + let response = builder + .get_exec_result_owned(0) .expect("there should be a response"); - let precondition_failure = utils::get_precondition_failure(response); - assert_matches!(precondition_failure, Error::Authorization); + let precondition_failure = utils::get_precondition_failure(&response); + assert_matches!( + precondition_failure, + Error::TrackingCopy(TrackingCopyError::Authorization) + ); } #[ignore] #[test] fn should_raise_precondition_authorization_failure_empty_authorized_keys() { let empty_keys: [AccountHash; 0] = []; - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code("do_nothing.wasm", RuntimeArgs::default()) - .with_empty_payment_bytes(RuntimeArgs::default()) - .with_deploy_hash([1; 32]) - // empty authorization keys to force error - .with_authorization_keys(&empty_keys) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let transfer_result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code("do_nothing.wasm", RuntimeArgs::default()) + .with_standard_payment(RuntimeArgs::default()) + .with_deploy_hash([1; 32]) + // empty authorization keys to force error + .with_authorization_keys(&empty_keys) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .finish(); + .commit(); - let response = transfer_result - .builder() - .get_exec_result(0) + let response = builder + .get_exec_result_owned(0) .expect("there should be a response"); - let precondition_failure = utils::get_precondition_failure(response); - assert_matches!(precondition_failure, Error::Authorization); + let precondition_failure = utils::get_precondition_failure(&response); + assert_matches!( + precondition_failure, + Error::TrackingCopy(TrackingCopyError::Authorization) + ); } #[ignore] @@ -90,32 +90,33 @@ fn should_raise_precondition_authorization_failure_invalid_authorized_keys() { let payment_purse_amount = 10_000_000; let transferred_amount = 1; - let exec_request = { - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_deploy_hash([1; 32]) .with_session_code( "transfer_purse_to_account.wasm", runtime_args! { "target" =>account_1_account_hash, "amount" => U512::from(transferred_amount) }, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => U512::from(payment_purse_amount) }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => U512::from(payment_purse_amount) }) // invalid authorization key to force error .with_authorization_keys(&[nonexistent_account_addr]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let transfer_result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .finish(); + .commit(); - let response = transfer_result - .builder() - .get_exec_result(0) + let response = builder + .get_exec_result_owned(0) .expect("there should be a response"); - let precondition_failure = utils::get_precondition_failure(response); - assert_matches!(precondition_failure, Error::Authorization); + let precondition_failure = utils::get_precondition_failure(&response); + assert_matches!( + precondition_failure, + Error::TrackingCopy(TrackingCopyError::Authorization) + ); } diff --git a/execution_engine_testing/tests/src/test/deploy/receipts.rs b/execution_engine_testing/tests/src/test/deploy/receipts.rs index ce9a9ebda3..ac4ad122b2 100644 --- a/execution_engine_testing/tests/src/test/deploy/receipts.rs +++ b/execution_engine_testing/tests/src/test/deploy/receipts.rs @@ -3,13 +3,12 @@ use std::collections::{BTreeMap, BTreeSet}; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::storage::protocol_data::DEFAULT_WASMLESS_TRANSFER_COST; use casper_types::{ - account::AccountHash, runtime_args, AccessRights, DeployHash, PublicKey, RuntimeArgs, - SecretKey, Transfer, TransferAddr, U512, + account::AccountHash, runtime_args, system::mint, AccessRights, Gas, InitiatorAddr, PublicKey, + SecretKey, Transfer, TransferV2, U512, }; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; @@ -19,18 +18,26 @@ const TRANSFER_ARG_AMOUNT: &str = "amount"; const TRANSFER_ARG_ID: &str = "id"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS: &str = "transfer_purse_to_accounts.wasm"; -const TRANSFER_ARG_SOURCE: &str = "source"; const TRANSFER_ARG_TARGETS: &str = "targets"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_STORED: &str = "transfer_purse_to_accounts_stored.wasm"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_SUBCALL: &str = "transfer_purse_to_accounts_subcall.wasm"; -static ALICE_KEY: Lazy = - Lazy::new(|| SecretKey::ed25519_from_bytes([3; 32]).unwrap().into()); -static BOB_KEY: Lazy = - Lazy::new(|| SecretKey::ed25519_from_bytes([5; 32]).unwrap().into()); -static CAROL_KEY: Lazy = - Lazy::new(|| SecretKey::ed25519_from_bytes([7; 32]).unwrap().into()); +const HASH_KEY_NAME: &str = "transfer_purse_to_accounts_hash"; +const PURSE_NAME: &str = "purse"; + +static ALICE_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([3; 32]).unwrap(); + PublicKey::from(&secret_key) +}); +static BOB_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([5; 32]).unwrap(); + PublicKey::from(&secret_key) +}); +static CAROL_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([7; 32]).unwrap(); + PublicKey::from(&secret_key) +}); static ALICE_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ALICE_KEY)); static BOB_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*BOB_KEY)); @@ -43,76 +50,71 @@ static TRANSFER_AMOUNT_3: Lazy = Lazy::new(|| U512::from(300_100_000)); #[ignore] #[test] fn should_record_wasmless_transfer() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let id = Some(0); + let id = 0; - let transfer_request = ExecuteRequestBuilder::transfer( - *DEFAULT_ACCOUNT_ADDR, - runtime_args! { - TRANSFER_ARG_TARGET => *ALICE_ADDR, - TRANSFER_ARG_AMOUNT => *TRANSFER_AMOUNT_1, - TRANSFER_ARG_ID => id - }, - ) - .build(); + let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT_1, *ALICE_ADDR) + .with_transfer_id(id) + .build(); - let deploy_hash = { - let deploy_items: Vec = transfer_request - .deploys() - .iter() - .map(|deploy_item| deploy_item.deploy_hash) - .collect(); - deploy_items[0] - }; + let txn_hash = transfer_request.transaction_hash(); - builder.exec(transfer_request).commit().expect_success(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let alice_account = builder - .get_account(*ALICE_ADDR) + .get_entity_by_account_hash(*ALICE_ADDR) .expect("should have Alice's account"); let alice_attenuated_main_purse = alice_account .main_purse() .with_access_rights(AccessRights::ADD); - let deploy_info = builder - .get_deploy_info(deploy_hash) - .expect("should have deploy info"); - - assert_eq!(deploy_info.deploy_hash, deploy_hash); - assert_eq!(deploy_info.from, *DEFAULT_ACCOUNT_ADDR); - assert_eq!(deploy_info.source, default_account.main_purse()); + let execution_result = builder + .get_last_exec_result() + .expect("Expected execution results."); - assert_eq!(deploy_info.gas, U512::from(DEFAULT_WASMLESS_TRANSFER_COST)); - - let transfers = deploy_info.transfers; + let transfers = execution_result.transfers(); assert_eq!(transfers.len(), 1); - let transfer = builder - .get_transfer(transfers[0]) - .expect("should have transfer"); + let Transfer::V2(transfer) = transfers[0].clone() else { + panic!("wrong transfer variant"); + }; - assert_eq!(transfer.deploy_hash, deploy_hash); - assert_eq!(transfer.from, *DEFAULT_ACCOUNT_ADDR); + assert_eq!(transfer.transaction_hash, txn_hash); + assert_eq!( + transfer.from, + InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR) + ); assert_eq!(transfer.to, Some(*ALICE_ADDR)); assert_eq!(transfer.source, default_account.main_purse()); assert_eq!(transfer.target, alice_attenuated_main_purse); assert_eq!(transfer.amount, *TRANSFER_AMOUNT_1); - assert_eq!(transfer.gas, U512::zero()); - assert_eq!(transfer.id, id); + assert_eq!( + transfer.gas, + Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer + ) + ); + assert_eq!(transfer.id, Some(id)); } #[ignore] #[test] fn should_record_wasm_transfer() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let transfer_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -124,58 +126,59 @@ fn should_record_wasm_transfer() { ) .build(); - let deploy_hash = { - let deploy_items: Vec = transfer_request - .deploys() - .iter() - .map(|deploy_item| deploy_item.deploy_hash) - .collect(); - deploy_items[0] - }; + let txn_hash = transfer_request.session.transaction_hash; builder.exec(transfer_request).commit().expect_success(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let alice_account = builder - .get_account(*ALICE_ADDR) + .get_entity_by_account_hash(*ALICE_ADDR) .expect("should have Alice's account"); let alice_attenuated_main_purse = alice_account .main_purse() .with_access_rights(AccessRights::ADD); - let deploy_info = builder - .get_deploy_info(deploy_hash) - .expect("should have deploy info"); + let execution_result = builder + .get_last_exec_result() + .expect("Expected execution results."); - assert_eq!(deploy_info.deploy_hash, deploy_hash); - assert_eq!(deploy_info.from, *DEFAULT_ACCOUNT_ADDR); - assert_eq!(deploy_info.source, default_account.main_purse()); - assert_ne!(deploy_info.gas, U512::zero()); - - let transfers = deploy_info.transfers; + assert_ne!(execution_result.consumed(), Gas::zero()); + let transfers = execution_result.transfers(); assert_eq!(transfers.len(), 1); - let transfer = builder - .get_transfer(transfers[0]) - .expect("should have transfer"); + let Transfer::V2(transfer) = transfers[0].clone() else { + panic!("wrong transfer variant"); + }; - assert_eq!(transfer.deploy_hash, deploy_hash); - assert_eq!(transfer.from, *DEFAULT_ACCOUNT_ADDR); + assert_eq!(transfer.transaction_hash, txn_hash); + assert_eq!( + transfer.from, + InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR) + ); assert_eq!(transfer.source, default_account.main_purse()); assert_eq!(transfer.target, alice_attenuated_main_purse); assert_eq!(transfer.amount, *TRANSFER_AMOUNT_1); - assert_eq!(transfer.gas, U512::zero()) // TODO + assert_eq!( + transfer.gas, + Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer + ) + ) } #[ignore] #[test] fn should_record_wasm_transfer_with_id() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let id = Some(0); @@ -190,63 +193,60 @@ fn should_record_wasm_transfer_with_id() { ) .build(); - let deploy_hash = { - let deploy_items: Vec = transfer_request - .deploys() - .iter() - .map(|deploy_item| deploy_item.deploy_hash) - .collect(); - deploy_items[0] - }; + let txn_hash = transfer_request.session.transaction_hash; builder.exec(transfer_request).commit().expect_success(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let alice_account = builder - .get_account(*ALICE_ADDR) + .get_entity_by_account_hash(*ALICE_ADDR) .expect("should have Alice's account"); let alice_attenuated_main_purse = alice_account .main_purse() .with_access_rights(AccessRights::ADD); - let deploy_info = builder - .get_deploy_info(deploy_hash) - .expect("should have deploy info"); - - assert_eq!(deploy_info.deploy_hash, deploy_hash); - assert_eq!(deploy_info.from, *DEFAULT_ACCOUNT_ADDR); - assert_eq!(deploy_info.source, default_account.main_purse()); - assert_ne!(deploy_info.gas, U512::zero()); + let execution_result = builder + .get_last_exec_result() + .expect("Expected execution results."); - let transfers = deploy_info.transfers; + assert_ne!(execution_result.consumed(), Gas::zero()); + let transfers = execution_result.transfers(); assert_eq!(transfers.len(), 1); - let transfer = builder - .get_transfer(transfers[0]) - .expect("should have transfer"); + let Transfer::V2(transfer) = transfers[0].clone() else { + panic!("wrong transfer variant"); + }; - assert_eq!(transfer.deploy_hash, deploy_hash); - assert_eq!(transfer.from, *DEFAULT_ACCOUNT_ADDR); + assert_eq!(transfer.transaction_hash, txn_hash); + assert_eq!( + transfer.from, + InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR) + ); assert_eq!(transfer.source, default_account.main_purse()); assert_eq!(transfer.target, alice_attenuated_main_purse); assert_eq!(transfer.amount, *TRANSFER_AMOUNT_1); - assert_eq!(transfer.gas, U512::zero()); // TODO + assert_eq!( + transfer.gas, + Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer + ) + ); assert_eq!(transfer.id, id); } #[ignore] #[test] fn should_record_wasm_transfers() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have default account"); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let alice_id = Some(0); let bob_id = Some(1); @@ -264,37 +264,30 @@ fn should_record_wasm_transfers() { *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS, runtime_args! { - TRANSFER_ARG_SOURCE => default_account.main_purse(), + mint::ARG_AMOUNT => *TRANSFER_AMOUNT_1 + *TRANSFER_AMOUNT_2 + *TRANSFER_AMOUNT_3, TRANSFER_ARG_TARGETS => targets, }, ) .build(); - let deploy_hash = { - let deploy_items: Vec = transfer_request - .deploys() - .iter() - .map(|deploy_item| deploy_item.deploy_hash) - .collect(); - deploy_items[0] - }; + let txn_hash = transfer_request.session.transaction_hash; builder.exec(transfer_request).commit().expect_success(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let alice_account = builder - .get_account(*ALICE_ADDR) + .get_entity_by_account_hash(*ALICE_ADDR) .expect("should have Alice's account"); let bob_account = builder - .get_account(*BOB_ADDR) + .get_entity_by_account_hash(*BOB_ADDR) .expect("should have Bob's account"); let carol_account = builder - .get_account(*CAROL_ADDR) + .get_entity_by_account_hash(*CAROL_ADDR) .expect("should have Carol's account"); let alice_attenuated_main_purse = alice_account @@ -309,92 +302,103 @@ fn should_record_wasm_transfers() { .main_purse() .with_access_rights(AccessRights::ADD); - let deploy_info = builder - .get_deploy_info(deploy_hash) - .expect("should have deploy info"); - - assert_eq!(deploy_info.deploy_hash, deploy_hash); - assert_eq!(deploy_info.from, *DEFAULT_ACCOUNT_ADDR); - assert_eq!(deploy_info.source, default_account.main_purse()); - assert_ne!(deploy_info.gas, U512::zero()); + let execution_result = builder + .get_last_exec_result() + .expect("Expected execution results."); + assert_ne!(execution_result.consumed(), Gas::zero()); const EXPECTED_LENGTH: usize = 3; - let transfer_addrs = deploy_info.transfers; - assert_eq!(transfer_addrs.len(), EXPECTED_LENGTH); + assert_eq!(execution_result.transfers().len(), EXPECTED_LENGTH); assert_eq!( - transfer_addrs + execution_result + .transfers() .iter() .cloned() - .collect::>() + .collect::>() .len(), EXPECTED_LENGTH ); let transfers: BTreeSet = { let mut tmp = BTreeSet::new(); - for transfer_addr in transfer_addrs { - let transfer = builder - .get_transfer(transfer_addr) - .expect("should have transfer"); - tmp.insert(transfer); + for transfer in execution_result.transfers() { + tmp.insert(transfer.clone()); } tmp }; assert_eq!(transfers.len(), EXPECTED_LENGTH); - assert!(transfers.contains(&Transfer { - deploy_hash, - from: *DEFAULT_ACCOUNT_ADDR, + assert!(transfers.contains(&Transfer::V2(TransferV2 { + transaction_hash: txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), to: Some(*ALICE_ADDR), source: default_account.main_purse(), target: alice_attenuated_main_purse, amount: *TRANSFER_AMOUNT_1, - gas: U512::zero(), + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer + ), id: alice_id, - })); + }))); - assert!(transfers.contains(&Transfer { - deploy_hash, - from: *DEFAULT_ACCOUNT_ADDR, + assert!(transfers.contains(&Transfer::V2(TransferV2 { + transaction_hash: txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), to: Some(*BOB_ADDR), source: default_account.main_purse(), target: bob_attenuated_main_purse, amount: *TRANSFER_AMOUNT_2, - gas: U512::zero(), + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer + ), id: bob_id, - })); + }))); - assert!(transfers.contains(&Transfer { - deploy_hash, - from: *DEFAULT_ACCOUNT_ADDR, + assert!(transfers.contains(&Transfer::V2(TransferV2 { + transaction_hash: txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), to: Some(*CAROL_ADDR), source: default_account.main_purse(), target: carol_attenuated_main_purse, amount: *TRANSFER_AMOUNT_3, - gas: U512::zero(), + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer + ), id: carol_id, - })); + }))); } #[ignore] #[test] fn should_record_wasm_transfers_with_subcall() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let alice_id = Some(0); let bob_id = Some(1); let carol_id = Some(2); - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have default account"); + let total_transfer_amount = *TRANSFER_AMOUNT_1 + *TRANSFER_AMOUNT_2 + *TRANSFER_AMOUNT_3; let store_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_STORED, - runtime_args! {}, + runtime_args! { + mint::ARG_AMOUNT => total_transfer_amount, + }, ) .build(); @@ -410,34 +414,49 @@ fn should_record_wasm_transfers_with_subcall() { *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_PURSE_TO_ACCOUNTS_SUBCALL, runtime_args! { - TRANSFER_ARG_SOURCE => default_account.main_purse(), + mint::ARG_AMOUNT => total_transfer_amount, TRANSFER_ARG_TARGETS => targets, }, ) .build(); - let transfer_deploy_hash = { - let deploy_items: Vec = transfer_request - .deploys() - .iter() - .map(|deploy_item| deploy_item.deploy_hash) - .collect(); - deploy_items[0] - }; + let transfer_txn_hash = transfer_request.session.transaction_hash; builder.exec(store_request).commit().expect_success(); builder.exec(transfer_request).commit().expect_success(); + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); + + let entity_hash = default_account + .named_keys() + .get(HASH_KEY_NAME) + .unwrap() + .into_entity_hash() + .expect("should have contract hash"); + + let contract = builder + .get_entity_with_named_keys_by_entity_hash(entity_hash) + .expect("should have stored contract"); + + let contract_purse = contract + .named_keys() + .get(PURSE_NAME) + .unwrap() + .into_uref() + .expect("should have purse"); + let alice_account = builder - .get_account(*ALICE_ADDR) + .get_entity_by_account_hash(*ALICE_ADDR) .expect("should have Alice's account"); let bob_account = builder - .get_account(*BOB_ADDR) + .get_entity_by_account_hash(*BOB_ADDR) .expect("should have Bob's account"); let carol_account = builder - .get_account(*CAROL_ADDR) + .get_entity_by_account_hash(*CAROL_ADDR) .expect("should have Carol's account"); let alice_attenuated_main_purse = alice_account @@ -452,73 +471,175 @@ fn should_record_wasm_transfers_with_subcall() { .main_purse() .with_access_rights(AccessRights::ADD); - let deploy_info = builder - .get_deploy_info(transfer_deploy_hash) - .expect("should have deploy info"); + let execution_result = builder + .get_last_exec_result() + .expect("Expected execution results."); - assert_eq!(deploy_info.deploy_hash, transfer_deploy_hash); - assert_eq!(deploy_info.from, *DEFAULT_ACCOUNT_ADDR); - assert_eq!(deploy_info.source, default_account.main_purse()); - assert_ne!(deploy_info.gas, U512::zero()); + /* + assert_eq!(txn_info.transaction_hash, transfer_txn_hash); + assert_eq!( + txn_info.from, + InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR) + ); + assert_eq!(txn_info.source, default_account.main_purse()); + */ + assert_ne!(execution_result.consumed(), Gas::zero()); const EXPECTED_LENGTH: usize = 6; - let transfer_addrs = deploy_info.transfers; - assert_eq!(transfer_addrs.len(), EXPECTED_LENGTH); + assert_eq!(execution_result.transfers().len(), EXPECTED_LENGTH); assert_eq!( - transfer_addrs + execution_result + .transfers() .iter() .cloned() - .collect::>() + .collect::>() .len(), EXPECTED_LENGTH ); let transfer_counts: BTreeMap = { let mut tmp = BTreeMap::new(); - for transfer_addr in transfer_addrs { - let transfer = builder - .get_transfer(transfer_addr) - .expect("should have transfer"); - tmp.entry(transfer).and_modify(|i| *i += 1).or_insert(1); + for transfer in execution_result.transfers() { + tmp.entry(transfer.clone()) + .and_modify(|i| *i += 1) + .or_insert(1); } tmp }; - let expected_alice = Transfer { - deploy_hash: transfer_deploy_hash, - from: *DEFAULT_ACCOUNT_ADDR, + let session_expected_alice = Transfer::V2(TransferV2 { + transaction_hash: transfer_txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), to: Some(*ALICE_ADDR), source: default_account.main_purse(), target: alice_attenuated_main_purse, amount: *TRANSFER_AMOUNT_1, - gas: U512::zero(), + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer, + ), id: alice_id, - }; + }); - let expected_bob = Transfer { - deploy_hash: transfer_deploy_hash, - from: *DEFAULT_ACCOUNT_ADDR, + let session_expected_bob = Transfer::V2(TransferV2 { + transaction_hash: transfer_txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), to: Some(*BOB_ADDR), source: default_account.main_purse(), target: bob_attenuated_main_purse, amount: *TRANSFER_AMOUNT_2, - gas: U512::zero(), + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer, + ), id: bob_id, - }; + }); - let expected_carol = Transfer { - deploy_hash: transfer_deploy_hash, - from: *DEFAULT_ACCOUNT_ADDR, + let session_expected_carol = Transfer::V2(TransferV2 { + transaction_hash: transfer_txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), to: Some(*CAROL_ADDR), source: default_account.main_purse(), target: carol_attenuated_main_purse, amount: *TRANSFER_AMOUNT_3, - gas: U512::zero(), + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer, + ), id: carol_id, - }; + }); + + const SESSION_EXPECTED_COUNT: Option = Some(1); + for (i, expected) in [ + session_expected_alice, + session_expected_bob, + session_expected_carol, + ] + .iter() + .enumerate() + { + assert_eq!( + transfer_counts.get(expected).cloned(), + SESSION_EXPECTED_COUNT, + "transfer {} has unexpected value", + i + ); + } + + let stored_expected_alice = Transfer::V2(TransferV2 { + transaction_hash: transfer_txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), + to: Some(*ALICE_ADDR), + source: contract_purse, + target: alice_attenuated_main_purse, + amount: *TRANSFER_AMOUNT_1, + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer, + ), + id: alice_id, + }); - const EXPECTED_COUNT: Option = Some(2); - for expected in &[expected_alice, expected_bob, expected_carol] { - assert_eq!(transfer_counts.get(&expected).cloned(), EXPECTED_COUNT); + let stored_expected_bob = Transfer::V2(TransferV2 { + transaction_hash: transfer_txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), + to: Some(*BOB_ADDR), + source: contract_purse, + target: bob_attenuated_main_purse, + amount: *TRANSFER_AMOUNT_2, + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer, + ), + id: bob_id, + }); + + let stored_expected_carol = Transfer::V2(TransferV2 { + transaction_hash: transfer_txn_hash, + from: InitiatorAddr::AccountHash(*DEFAULT_ACCOUNT_ADDR), + to: Some(*CAROL_ADDR), + source: contract_purse, + target: carol_attenuated_main_purse, + amount: *TRANSFER_AMOUNT_3, + gas: Gas::from( + builder + .chainspec() + .system_costs_config + .mint_costs() + .transfer, + ), + id: carol_id, + }); + + const STORED_EXPECTED_COUNT: Option = Some(1); + for (i, expected) in [ + stored_expected_alice, + stored_expected_bob, + stored_expected_carol, + ] + .iter() + .enumerate() + { + assert_eq!( + transfer_counts.get(expected).cloned(), + STORED_EXPECTED_COUNT, + "transfer {} has unexpected value", + i + ); } } diff --git a/execution_engine_testing/tests/src/test/deploy/stored_contracts.rs b/execution_engine_testing/tests/src/test/deploy/stored_contracts.rs index eefda83a15..36ac1ea299 100644 --- a/execution_engine_testing/tests/src/test/deploy/stored_contracts.rs +++ b/execution_engine_testing/tests/src/test/deploy/stored_contracts.rs @@ -1,21 +1,13 @@ -use std::collections::BTreeMap; - +use assert_matches::assert_matches; use casper_engine_test_support::{ - internal::{ - AdditiveMapDiff, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, - UpgradeRequestBuilder, WasmTestBuilder, DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, -}; -use casper_execution_engine::{ - shared::{account::Account, stored_value::StoredValue, transform::Transform}, - storage::global_state::in_memory::InMemoryGlobalState, + DeployItemBuilder, EntityWithNamedKeys, ExecuteRequestBuilder, LmdbWasmTestBuilder, + UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, + DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; use casper_types::{ - account::AccountHash, - contracts::{ContractVersion, CONTRACT_INITIAL_VERSION, DEFAULT_ENTRY_POINT_NAME}, - runtime_args, ContractHash, EraId, Key, ProtocolVersion, RuntimeArgs, U512, + account::AccountHash, runtime_args, EntityVersion, EraId, HashAddr, ProtocolVersion, + RuntimeArgs, ENTITY_INITIAL_VERSION, U512, }; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]); @@ -23,20 +15,14 @@ const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); const DO_NOTHING_NAME: &str = "do_nothing"; const DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME: &str = "do_nothing_package_hash"; const DO_NOTHING_CONTRACT_HASH_NAME: &str = "do_nothing_hash"; -const INITIAL_VERSION: ContractVersion = CONTRACT_INITIAL_VERSION; +const INITIAL_VERSION: EntityVersion = ENTITY_INITIAL_VERSION; const ENTRY_FUNCTION_NAME: &str = "delegate"; -const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V1_0_0; +const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V2_0_0; const STORED_PAYMENT_CONTRACT_NAME: &str = "test_payment_stored.wasm"; const STORED_PAYMENT_CONTRACT_HASH_NAME: &str = "test_payment_hash"; const STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME: &str = "test_payment_package_hash"; -const PAY: &str = "pay"; -const TRANSFER: &str = "transfer"; +const PAY_ENTRYPOINT: &str = "pay"; const TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME: &str = "transfer_purse_to_account"; -const TRANSFER_PURSE_TO_ACCOUNT_STORED_HASH_KEY_NAME: &str = "transfer_purse_to_account_hash"; -// Currently Error enum that holds this variant is private and can't be used otherwise to compare -// message -const EXPECTED_ERROR_MESSAGE: &str = "IncompatibleProtocolMajorVersion { expected: 2, actual: 1 }"; -const EXPECTED_VERSION_ERROR_MESSAGE: &str = "InvalidContractVersion(ContractVersionKey(2, 1))"; const ARG_TARGET: &str = "target"; const ARG_AMOUNT: &str = "amount"; @@ -49,9 +35,9 @@ fn make_upgrade_request(new_protocol_version: ProtocolVersion) -> UpgradeRequest .with_activation_point(DEFAULT_ACTIVATION_POINT) } -fn store_payment_to_account_context( - builder: &mut WasmTestBuilder, -) -> (Account, ContractHash) { +fn install_custom_payment( + builder: &mut LmdbWasmTestBuilder, +) -> (EntityWithNamedKeys, HashAddr, U512) { // store payment contract let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -60,22 +46,23 @@ fn store_payment_to_account_context( ) .build(); - builder.exec_commit_finish(exec_request); + builder.exec(exec_request).commit(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); // check account named keys - let hash = default_account + let package_hash = default_account .named_keys() .get(STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME) .expect("key should exist") - .into_hash() - .expect("should be a hash") - .into(); + .into_hash_addr() + .expect("should be a hash"); - (default_account, hash) + let exec_cost = builder.get_last_exec_result().unwrap().consumed().value(); + + (default_account, package_hash, exec_cost) } #[ignore] @@ -88,28 +75,26 @@ fn should_exec_non_stored_code() { let payment_purse_amount = *DEFAULT_PAYMENT; let transferred_amount = 1; - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - &format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), - runtime_args! { - ARG_TARGET => account_1_account_hash, - ARG_AMOUNT => U512::from(transferred_amount) - }, - ) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([1; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code( + format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), + runtime_args! { + ARG_TARGET => account_1_account_hash, + ARG_AMOUNT => U512::from(transferred_amount) + }, + ) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_purse_amount, + }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([1; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); @@ -118,7 +103,7 @@ fn should_exec_non_stored_code() { let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get genesis account"); let modified_balance: U512 = builder.get_purse_balance(default_account.main_purse()); @@ -139,210 +124,132 @@ fn should_exec_non_stored_code() { #[ignore] #[test] -fn should_exec_stored_code_by_hash() { +fn should_fail_if_calling_non_existent_entry_point() { let payment_purse_amount = *DEFAULT_PAYMENT; - // genesis - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - // store payment - let proposer_reward_starting_balance_alpha = builder.get_proposer_purse_balance(); - - let (default_account, hash) = store_payment_to_account_context(&mut builder); - - // verify stored contract functions as expected by checking all the maths - - let (motes_alpha, modified_balance_alpha) = { - // get modified balance - let modified_balance_alpha: U512 = builder.get_purse_balance(default_account.main_purse()); - - let transaction_fee_alpha = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_alpha; - (transaction_fee_alpha, modified_balance_alpha) - }; - - let transferred_amount = 1; - - // next make another deploy that USES stored payment logic - - let proposer_reward_starting_balance_bravo = builder.get_proposer_purse_balance(); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - { - let exec_request_stored_payment = { - let account_1_account_hash = ACCOUNT_1_ADDR; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - &format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_stored_versioned_payment_contract_by_hash( - hash.value(), - Some(CONTRACT_INITIAL_VERSION), - PAY, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - builder.exec_commit_finish(exec_request_stored_payment); - } + // first, store payment contract with entry point named "pay" + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + STORED_PAYMENT_CONTRACT_NAME, + RuntimeArgs::default(), + ) + .build(); - let (motes_bravo, modified_balance_bravo) = { - let modified_balance_bravo: U512 = builder.get_purse_balance(default_account.main_purse()); + builder.exec(exec_request).commit(); - let transaction_fee_bravo = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_bravo; + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract associated with default account"); + let stored_payment_contract_hash = default_account + .named_keys() + .get(STORED_PAYMENT_CONTRACT_HASH_NAME) + .expect("should have standard_payment named key") + .into_entity_hash_addr() + .expect("standard_payment should be an uref"); - (transaction_fee_bravo, modified_balance_bravo) - }; + // next make another deploy that attempts to use the stored payment logic + // but passing the name for an entry point that does not exist. + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code(format!("{}.wasm", DO_NOTHING_NAME), RuntimeArgs::default()) + .with_stored_payment_hash( + stored_payment_contract_hash.into(), + "electric-boogaloo", + runtime_args! { ARG_AMOUNT => payment_purse_amount }, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([1; 32]) + .build(); - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); + let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - assert!( - modified_balance_alpha < initial_balance, - "balance should be less than initial balance" - ); + builder.exec(exec_request_stored_payment).commit(); assert!( - modified_balance_bravo < modified_balance_alpha, - "second modified balance should be less than first modified balance" + builder.is_error(), + "calling a non-existent entry point should not work" ); - let tally = motes_alpha + motes_bravo + U512::from(transferred_amount) + modified_balance_bravo; + let expected_error = Error::Exec(ExecError::NoSuchMethod("electric-boogaloo".to_string())); - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); + builder.assert_error(expected_error); } #[ignore] #[test] -fn should_exec_stored_code_by_named_hash() { - let payment_purse_amount = *DEFAULT_PAYMENT; +fn should_exec_stored_code_by_hash() { + let default_payment = *DEFAULT_PAYMENT; // genesis - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // store payment - let proposer_reward_starting_balance_alpha = builder.get_proposer_purse_balance(); - - let (default_account, _) = store_payment_to_account_context(&mut builder); - - // verify stored contract functions as expected by checking all the maths - - let (motes_alpha, modified_balance_alpha) = { - // get modified balance - let modified_balance_alpha: U512 = builder.get_purse_balance(default_account.main_purse()); - - // get cost - let transaction_fee_alpha = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_alpha; + let (_, custom_payment_package_hash, _) = install_custom_payment(&mut builder); - (transaction_fee_alpha, modified_balance_alpha) - }; - - let transferred_amount = 1; + let transferred_amount = U512::one(); // next make another deploy that USES stored payment logic - let proposer_reward_starting_balance_bravo = builder.get_proposer_purse_balance(); { - let exec_request_stored_payment = { - let account_1_account_hash = ACCOUNT_1_ADDR; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - &format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_stored_versioned_payment_contract_by_name( - STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME, - Some(CONTRACT_INITIAL_VERSION), - PAY, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_payment_contract_by_hash( + custom_payment_package_hash, + Some(ENTITY_INITIAL_VERSION), + PAY_ENTRYPOINT, + runtime_args! { + ARG_AMOUNT => default_payment, + }, + ) + .with_session_code( + format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), + runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => transferred_amount }, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([2; 32]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let transfer_using_stored_payment = + ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - builder.exec_commit_finish(exec_request_stored_payment); + builder.exec(transfer_using_stored_payment).expect_failure(); } - let (motes_bravo, modified_balance_bravo) = { - let modified_balance_bravo: U512 = builder.get_purse_balance(default_account.main_purse()); - - let transaction_fee_bravo = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_bravo; - - (transaction_fee_bravo, modified_balance_bravo) - }; - - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - - assert!( - modified_balance_alpha < initial_balance, - "balance should be less than initial balance" - ); - - assert!( - modified_balance_bravo < modified_balance_alpha, - "second modified balance should be less than first modified balance" - ); + let error = builder.get_error().unwrap(); - let tally = motes_alpha + motes_bravo + U512::from(transferred_amount) + modified_balance_bravo; - - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); + assert_matches!(error, Error::Exec(ExecError::ForgedReference(_))) } #[ignore] #[test] -fn should_exec_payment_and_session_stored_code() { +fn should_not_transfer_above_balance_using_stored_payment_code_by_hash() { let payment_purse_amount = *DEFAULT_PAYMENT; // genesis - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // store payment - // get cost - let proposer_reward_starting_balance_alpha = builder.get_proposer_purse_balance(); - - store_payment_to_account_context(&mut builder); + let (default_account, hash, _) = install_custom_payment(&mut builder); + let starting_balance = builder.get_purse_balance(default_account.main_purse()); - // verify stored contract functions as expected by checking all the maths - - let motes_alpha = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_alpha; + let transferred_amount = starting_balance - *DEFAULT_PAYMENT + U512::one(); - // next store transfer contract - let exec_request_store_transfer = { - let deploy = DeployItemBuilder::new() + let account_1_account_hash = ACCOUNT_1_ADDR; + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_session_code( - &format!("{}_stored.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), - RuntimeArgs::default(), + format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), + runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount }, ) - .with_stored_versioned_payment_contract_by_name( - STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME, - Some(CONTRACT_INITIAL_VERSION), - PAY, + .with_stored_versioned_payment_contract_by_hash( + hash, + Some(ENTITY_INITIAL_VERSION), + PAY_ENTRYPOINT, runtime_args! { ARG_AMOUNT => payment_purse_amount, }, @@ -351,430 +258,112 @@ fn should_exec_payment_and_session_stored_code() { .with_deploy_hash([2; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let proposer_reward_starting_balance_bravo = builder.get_proposer_purse_balance(); + let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder - .exec(exec_request_store_transfer) - .commit() - .expect_success(); - - let motes_bravo = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_bravo; - - let transferred_amount = 1; - - // next make another deploy that USES stored payment logic & stored transfer - // logic - let exec_request_stored_only = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME, - Some(CONTRACT_INITIAL_VERSION), - TRANSFER, - runtime_args! { - ARG_TARGET => ACCOUNT_1_ADDR, - ARG_AMOUNT => U512::from(transferred_amount), - }, - ) - .with_stored_versioned_payment_contract_by_name( - STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME, - Some(CONTRACT_INITIAL_VERSION), - PAY, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - - builder - .exec(exec_request_stored_only) - .commit() - .expect_success(); - - let motes_charlie = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - - let modified_balance: U512 = { - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - builder.get_purse_balance(default_account.main_purse()) - }; - - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); + .exec(exec_request_stored_payment) + .expect_failure() + .commit(); - let tally = motes_alpha - + motes_bravo - + motes_charlie - + U512::from(transferred_amount) - + modified_balance; + let error = builder.get_error().unwrap(); - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); + assert_matches!(error, Error::Exec(ExecError::ForgedReference(_))) } #[ignore] +#[allow(unused)] #[test] -fn should_have_equivalent_transforms_with_stored_contract_pointers() { - let account_1_account_hash = ACCOUNT_1_ADDR; +fn should_empty_account_using_stored_payment_code_by_hash() { let payment_purse_amount = *DEFAULT_PAYMENT; - let transferred_amount = 1; - let stored_transforms = { - let mut builder = InMemoryWasmTestBuilder::default(); - - let exec_request_1 = { - let store_transfer = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - &format!("{}_stored.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), - RuntimeArgs::default(), - ) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([1; 32]) - .build(); - - ExecuteRequestBuilder::new() - .push_deploy(store_transfer) - .build() - }; + // genesis + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let exec_request_2 = { - let store_transfer = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code(STORED_PAYMENT_CONTRACT_NAME, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); + // store payment - ExecuteRequestBuilder::new() - .push_deploy(store_transfer) - .build() - }; + let (default_account, hash, _) = install_custom_payment(&mut builder); + let starting_balance = builder.get_purse_balance(default_account.main_purse()); - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .expect_success() - .commit(); + // verify stored contract functions as expected by checking all the maths - builder.exec(exec_request_2).expect_success().commit(); + let transferred_amount = starting_balance - *DEFAULT_PAYMENT; - let call_stored_request = { - let deploy = DeployItemBuilder::new() + { + let account_1_account_hash = ACCOUNT_1_ADDR; + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key( - TRANSFER_PURSE_TO_ACCOUNT_STORED_HASH_KEY_NAME, - TRANSFER, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, + .with_session_code( + format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), + runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount }, ) - .with_stored_payment_named_key( - STORED_PAYMENT_CONTRACT_HASH_NAME, - PAY, + .with_stored_versioned_payment_contract_by_hash( + hash, + Some(ENTITY_INITIAL_VERSION), + PAY_ENTRYPOINT, runtime_args! { ARG_AMOUNT => payment_purse_amount, }, ) .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - builder - .exec(call_stored_request) - .expect_success() - .commit() - .get_transforms()[2] - .to_owned() - }; - - let provided_transforms = { - let do_nothing_request = |deploy_hash: [u8; 32]| { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code(&format!("{}.wasm", DO_NOTHING_NAME), RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash(deploy_hash) + .with_deploy_hash([2; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let provided_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - &format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_empty_payment_bytes( - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([3; 32]) - .build(); + let exec_request_stored_payment = + ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder - .exec(do_nothing_request([1; 32])) - .expect_success() - .commit(); - builder - .exec(do_nothing_request([2; 32])) - .expect_success() - .commit(); - - builder - .exec(provided_request) - .expect_success() - .get_transforms()[2] - .to_owned() - }; - - let diff = AdditiveMapDiff::new(provided_transforms, stored_transforms); - - let left: BTreeMap<&Key, &Transform> = diff.left().iter().collect(); - let right: BTreeMap<&Key, &Transform> = diff.right().iter().collect(); - - // The diff contains the same keys... - assert!(Iterator::eq(left.keys(), right.keys())); - - // ...but a few different values - for lr in left.values().zip(right.values()) { - match lr { - ( - Transform::Write(StoredValue::CLValue(l_value)), - Transform::Write(StoredValue::CLValue(r_value)), - ) => { - // differing refunds and balances - let _ = l_value.to_owned().into_t::().expect("should be U512"); - let _ = r_value.to_owned().into_t::().expect("should be U512"); - } - ( - Transform::Write(StoredValue::Account(la)), - Transform::Write(StoredValue::Account(ra)), - ) => { - assert_eq!(la.account_hash(), ra.account_hash()); - assert_eq!(la.main_purse(), ra.main_purse()); - assert_eq!(la.action_thresholds(), ra.action_thresholds()); - - assert!(Iterator::eq(la.associated_keys(), ra.associated_keys(),)); - - // la has stored contracts under named urefs - assert_ne!(la.named_keys(), ra.named_keys()); - } - ( - Transform::Write(StoredValue::Transfer(l_value)), - Transform::Write(StoredValue::Transfer(r_value)), - ) => assert_eq!(l_value, r_value), - ( - Transform::Write(StoredValue::DeployInfo(l_value)), - Transform::Write(StoredValue::DeployInfo(r_value)), - ) => { - assert_eq!(l_value.deploy_hash, r_value.deploy_hash); - assert_eq!(l_value.from, r_value.from); - assert_eq!(l_value.source, r_value.source); - assert_eq!(l_value.transfers, r_value.transfers); - assert_ne!(l_value.gas, r_value.gas); - } - (Transform::AddUInt512(_), Transform::AddUInt512(_)) => { - // differing payment - } - _ => { - println!("lr: {:?}", lr); - panic!("unexpected diff"); - } - } + builder.exec(exec_request_stored_payment).expect_failure(); } -} - -#[ignore] -#[test] -fn should_fail_payment_stored_at_named_key_with_incompatible_major_version() { - let payment_purse_amount = *DEFAULT_PAYMENT; - - // first, store payment contract - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - STORED_PAYMENT_CONTRACT_NAME, - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec_commit_finish(exec_request); - - let query_result = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query default account"); - let default_account = query_result - .as_account() - .expect("query result should be an account"); - - assert!( - default_account - .named_keys() - .contains_key(STORED_PAYMENT_CONTRACT_HASH_NAME), - "standard_payment should be present" - ); - // - // upgrade with new wasm costs with modified mint for given version to avoid missing wasm costs - // table that's queried early - // - let sem_ver = PROTOCOL_VERSION.value(); - let new_protocol_version = - ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); - - let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); - - builder - .upgrade_with_upgrade_request(&mut upgrade_request) - .expect_upgrade_success(); - - // next make another deploy that USES stored payment logic - let exec_request_stored_payment = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code(&format!("{}.wasm", DO_NOTHING_NAME), RuntimeArgs::default()) - .with_stored_payment_named_key( - STORED_PAYMENT_CONTRACT_HASH_NAME, - PAY, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); + let error = builder.get_error().expect("must have error"); - ExecuteRequestBuilder::new() - .push_deploy(deploy) - .with_protocol_version(new_protocol_version) - .build() - }; - - let test_result = builder.exec(exec_request_stored_payment).commit(); - - assert!( - test_result.is_error(), - "calling a payment module with increased major protocol version should be error" - ); - let error_message = builder - .exec_error_message(1) - .expect("should have exec error"); - assert!( - error_message.contains(EXPECTED_ERROR_MESSAGE), - "{:?}", - error_message - ); + assert_matches!(error, Error::Exec(ExecError::ForgedReference(_))) } #[ignore] #[test] -fn should_fail_payment_stored_at_hash_with_incompatible_major_version() { +fn should_exec_stored_code_by_named_hash() { let payment_purse_amount = *DEFAULT_PAYMENT; - // first, store payment contract - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - STORED_PAYMENT_CONTRACT_NAME, - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + // genesis + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.exec_commit_finish(exec_request); + install_custom_payment(&mut builder); - let query_result = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query default account"); - let default_account = query_result - .as_account() - .expect("query result should be an account"); - let stored_payment_contract_hash = default_account - .named_keys() - .get(STORED_PAYMENT_CONTRACT_HASH_NAME) - .expect("should have standard_payment named key") - .into_hash() - .expect("standard_payment should be an uref"); - - // - // upgrade with new wasm costs with modified mint for given version to avoid missing wasm costs - // table that's queried early - // - let sem_ver = PROTOCOL_VERSION.value(); - let new_protocol_version = - ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); + // verify stored contract functions as expected by checking all the maths - let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); + let transferred_amount = 1; - builder - .upgrade_with_upgrade_request(&mut upgrade_request) - .expect_upgrade_success(); + { + let account_1_account_hash = ACCOUNT_1_ADDR; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code( + format!("{}.wasm", TRANSFER_PURSE_TO_ACCOUNT_CONTRACT_NAME), + runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, + ) + .with_stored_versioned_payment_contract_by_name( + STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME, + Some(ENTITY_INITIAL_VERSION), + PAY_ENTRYPOINT, + runtime_args! { + ARG_AMOUNT => payment_purse_amount, + }, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([2; 32]) + .build(); - // next make another deploy that USES stored payment logic - let exec_request_stored_payment = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code(&format!("{}.wasm", DO_NOTHING_NAME), RuntimeArgs::default()) - .with_stored_payment_hash( - stored_payment_contract_hash.into(), - DEFAULT_ENTRY_POINT_NAME, - runtime_args! { ARG_AMOUNT => payment_purse_amount }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); + let exec_request_stored_payment = + ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - ExecuteRequestBuilder::new() - .push_deploy(deploy) - .with_protocol_version(new_protocol_version) - .build() - }; + builder.exec(exec_request_stored_payment).expect_failure(); - let test_result = builder.exec(exec_request_stored_payment).commit(); + let error = builder.get_error().unwrap(); - assert!( - test_result.is_error(), - "calling a payment module with increased major protocol version should be error" - ); - let error_message = builder - .exec_error_message(1) - .expect("should have exec error"); - assert!(error_message.contains(EXPECTED_ERROR_MESSAGE)); + assert_matches!(error, Error::Exec(ExecError::ForgedReference(_))) + } } #[ignore] @@ -782,6 +371,9 @@ fn should_fail_payment_stored_at_hash_with_incompatible_major_version() { fn should_fail_session_stored_at_named_key_with_incompatible_major_version() { let payment_purse_amount = *DEFAULT_PAYMENT; + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + // first, store payment contract for v1.0.0 let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -790,110 +382,36 @@ fn should_fail_session_stored_at_named_key_with_incompatible_major_version() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec_commit_finish(exec_request_1); - - let query_result = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query default account"); - let default_account = query_result - .as_account() - .expect("query result should be an account"); - assert!( - default_account - .named_keys() - .contains_key(DO_NOTHING_CONTRACT_HASH_NAME), - "do_nothing should be present in named keys" - ); - - // - // upgrade with new wasm costs with modified mint for given version - // - let sem_ver = PROTOCOL_VERSION.value(); - let new_protocol_version = - ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); - - let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); - - builder - .upgrade_with_upgrade_request(&mut upgrade_request) - .expect_upgrade_success(); - - // Call stored session code - - let exec_request_stored_payment = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key( - DO_NOTHING_CONTRACT_HASH_NAME, - ENTRY_FUNCTION_NAME, - RuntimeArgs::new(), - ) - .with_payment_code( - STORED_PAYMENT_CONTRACT_NAME, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); - - ExecuteRequestBuilder::new() - .push_deploy(deploy) - .with_protocol_version(new_protocol_version) - .build() - }; - - let test_result = builder.exec(exec_request_stored_payment).commit(); - - assert!( - test_result.is_error(), - "calling a session module with increased major protocol version should be error", - ); - let error_message = builder - .exec_error_message(1) - .expect("should have exec error"); - assert!( - error_message.contains(EXPECTED_ERROR_MESSAGE), - "{:?}", - error_message - ); -} + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); -#[ignore] -#[test] -fn should_fail_session_stored_at_named_key_with_missing_new_major_version() { - let payment_purse_amount = *DEFAULT_PAYMENT; + builder.exec(exec_request_1).commit(); - // first, store payment contract for v1.0.0 - let exec_request_1 = ExecuteRequestBuilder::standard( + let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, - &format!("{}_stored.wasm", DO_NOTHING_NAME), + STORED_PAYMENT_CONTRACT_NAME, RuntimeArgs::default(), ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec_commit_finish(exec_request_1); + builder.exec(exec_request).commit(); - let query_result = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query default account"); - let default_account = query_result - .as_account() - .expect("query result should be an account"); + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract associated with default account"); assert!( default_account .named_keys() - .contains_key(DO_NOTHING_CONTRACT_HASH_NAME), + .contains(DO_NOTHING_CONTRACT_HASH_NAME), "do_nothing should be present in named keys" ); + let stored_payment_contract_hash = default_account + .named_keys() + .get(STORED_PAYMENT_CONTRACT_HASH_NAME) + .expect("should have standard_payment named key") + .into_entity_hash_addr() + .expect("standard_payment should be an uref"); // // upgrade with new wasm costs with modified mint for given version // @@ -904,123 +422,44 @@ fn should_fail_session_stored_at_named_key_with_missing_new_major_version() { let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); builder - .upgrade_with_upgrade_request(&mut upgrade_request) - .expect_upgrade_success(); - - // Call stored session code - - let exec_request_stored_payment = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME, - Some(INITIAL_VERSION), - ENTRY_FUNCTION_NAME, - RuntimeArgs::new(), - ) - .with_payment_code( - STORED_PAYMENT_CONTRACT_NAME, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); - - ExecuteRequestBuilder::new() - .push_deploy(deploy) - .with_protocol_version(new_protocol_version) - .build() - }; - - let test_result = builder.exec(exec_request_stored_payment).commit(); - - assert!( - test_result.is_error(), - "calling a session module with increased major protocol version should be error", - ); - let error_message = builder - .exec_error_message(1) - .expect("should have exec error"); - assert!( - error_message.contains(EXPECTED_VERSION_ERROR_MESSAGE), - "{:?}", - error_message - ); -} - -#[ignore] -#[test] -fn should_fail_session_stored_at_hash_with_incompatible_major_version() { - let payment_purse_amount = *DEFAULT_PAYMENT; - - // first, store payment contract for v1.0.0 - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - &format!("{}_stored.wasm", DO_NOTHING_NAME), - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec_commit_finish(exec_request_1); - - // - // upgrade with new wasm costs with modified mint for given version - // - let sem_ver = PROTOCOL_VERSION.value(); - let new_protocol_version = - ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); - - let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); - - builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); // Call stored session code - let exec_request_stored_payment = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key( - DO_NOTHING_CONTRACT_HASH_NAME, - ENTRY_FUNCTION_NAME, - RuntimeArgs::new(), - ) - .with_payment_code( - STORED_PAYMENT_CONTRACT_NAME, - runtime_args! { - ARG_AMOUNT => payment_purse_amount, - }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); - - ExecuteRequestBuilder::new() - .push_deploy(deploy) - .with_protocol_version(new_protocol_version) - .build() - }; - - let test_result = builder.exec(exec_request_stored_payment).commit(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_session_named_key( + DO_NOTHING_CONTRACT_HASH_NAME, + ENTRY_FUNCTION_NAME, + RuntimeArgs::new(), + ) + .with_stored_payment_hash( + stored_payment_contract_hash.into(), + PAY_ENTRYPOINT, + runtime_args! { ARG_AMOUNT => payment_purse_amount }, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([2; 32]) + .build(); + + let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_stored_payment).commit(); assert!( - test_result.is_error(), + builder.is_error(), "calling a session module with increased major protocol version should be error", ); - let error_message = builder - .exec_error_message(1) - .expect("should have exec error"); - assert!( - error_message.contains(EXPECTED_ERROR_MESSAGE), - "{:?}", - error_message - ); + let _error = builder.get_error().expect("must have error"); + // println!("error {:?}", error); + // assert!(matches!( + // error, + // Error::Exec(ExecError::IncompatibleProtocolMajorVersion { + // expected: 3, + // actual: 2 + // }) + // )) } #[ignore] @@ -1028,8 +467,8 @@ fn should_fail_session_stored_at_hash_with_incompatible_major_version() { fn should_execute_stored_payment_and_session_code_with_new_major_version() { let payment_purse_amount = *DEFAULT_PAYMENT; - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // // upgrade with new wasm costs with modified mint for given version @@ -1041,7 +480,7 @@ fn should_execute_stored_payment_and_session_code_with_new_major_version() { let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); // first, store payment contract for v2.0.0 @@ -1051,7 +490,6 @@ fn should_execute_stored_payment_and_session_code_with_new_major_version() { STORED_PAYMENT_CONTRACT_NAME, RuntimeArgs::default(), ) - .with_protocol_version(new_protocol_version) .build(); let exec_request_2 = ExecuteRequestBuilder::standard( @@ -1059,59 +497,315 @@ fn should_execute_stored_payment_and_session_code_with_new_major_version() { &format!("{}_stored.wasm", DO_NOTHING_NAME), RuntimeArgs::default(), ) - .with_protocol_version(new_protocol_version) .build(); // store both contracts builder.exec(exec_request_1).expect_success().commit(); - let test_result = builder - .exec(exec_request_2) - .expect_success() - .commit() - .finish(); + builder.exec(exec_request_2).expect_success().commit(); // query both stored contracts by their named keys - let query_result = test_result - .builder() - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query default account"); - let default_account = query_result - .as_account() - .expect("query result should be an account"); + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let test_payment_stored_hash = default_account .named_keys() .get(STORED_PAYMENT_CONTRACT_HASH_NAME) .expect("standard_payment should be present in named keys") - .into_hash() + .into_entity_hash_addr() .expect("standard_payment named key should be hash"); - let exec_request_stored_payment = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME, - Some(INITIAL_VERSION), - ENTRY_FUNCTION_NAME, - RuntimeArgs::new(), - ) - .with_stored_payment_hash( - test_payment_stored_hash.into(), - "pay", - runtime_args! { ARG_AMOUNT => payment_purse_amount }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new() - .push_deploy(deploy) - .with_protocol_version(new_protocol_version) - .build() - }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name( + DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME, + Some(INITIAL_VERSION), + ENTRY_FUNCTION_NAME, + RuntimeArgs::new(), + ) + .with_stored_payment_hash( + test_payment_stored_hash.into(), + PAY_ENTRYPOINT, + runtime_args! { ARG_AMOUNT => payment_purse_amount }, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_stored_payment = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - InMemoryWasmTestBuilder::from_result(test_result) + builder + .clear_results() .exec(exec_request_stored_payment) - .expect_success() - .commit(); + .expect_failure(); + + let error = builder.get_error().unwrap(); + + assert_matches!(error, Error::Exec(ExecError::ForgedReference(_))) } + +// We are currently not enforcing major version compliance to permit optimistic retro-compatibility +// if we start enforcing this in the future, the following tests should be restored and patched up +// to whatever the relevant protocol versions are at that time. +// #[ignore] +// #[test] +// fn should_fail_payment_stored_at_hash_with_incompatible_major_version() { +// let payment_purse_amount = *DEFAULT_PAYMENT; +// +// let default_account_hash = *DEFAULT_ACCOUNT_ADDR; +// // first, store payment contract +// let exec_request = ExecuteRequestBuilder::standard( +// default_account_hash, +// STORED_PAYMENT_CONTRACT_NAME, +// RuntimeArgs::default(), +// ) +// .build(); +// +// let mut builder = LmdbWasmTestBuilder::default(); +// builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); +// +// builder.exec(exec_request).expect_success().commit(); +// +// let default_account = builder +// .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) +// .expect("must have contract associated with default account"); +// +// let stored_payment_key = *default_account +// .named_keys() +// .get(STORED_PAYMENT_CONTRACT_HASH_NAME) +// .expect("should have stored payment key"); +// +// let _stored_payment = builder +// .query(None, stored_payment_key, &[]) +// .expect("should have stored payement"); +// +// let stored_payment_contract_hash = stored_payment_key +// .into_entity_hash_addr() +// .expect("standard_payment should be an uref"); +// +// // +// // upgrade with new wasm costs with modified mint for given version to avoid missing wasm +// costs // table that's queried early +// // +// let sem_ver = PROTOCOL_VERSION.value(); +// let new_protocol_version = +// ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); +// +// let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); +// +// builder +// .upgrade(&mut upgrade_request) +// .expect_upgrade_success(); +// +// // next make another deploy that USES stored payment logic +// let deploy_item = DeployItemBuilder::new() +// .with_address(*DEFAULT_ACCOUNT_ADDR) +// .with_session_code(format!("{}.wasm", DO_NOTHING_NAME), RuntimeArgs::default()) +// .with_stored_payment_hash( +// stored_payment_contract_hash.into(), +// PAY_ENTRYPOINT, +// runtime_args! { ARG_AMOUNT => payment_purse_amount }, +// ) +// .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) +// .with_deploy_hash([2; 32]) +// .build(); +// +// let exec_request_stored_payment = +// ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); +// +// builder.exec(exec_request_stored_payment).commit(); +// +// assert!( +// builder.is_error(), +// "calling a payment module with increased major protocol version should be error" +// ); +// +// let expected_error = Error::Exec(ExecError::IncompatibleProtocolMajorVersion { +// expected: 3, +// actual: 2, +// }); +// +// builder.assert_error(expected_error); +// } + +// #[ignore] +// #[test] +// fn should_fail_session_stored_at_named_key_with_missing_new_major_version() { +// let payment_purse_amount = *DEFAULT_PAYMENT; +// +// let mut builder = LmdbWasmTestBuilder::default(); +// builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); +// +// // first, store payment contract for v1.0.0 +// let exec_request_1 = ExecuteRequestBuilder::standard( +// *DEFAULT_ACCOUNT_ADDR, +// &format!("{}_stored.wasm", DO_NOTHING_NAME), +// RuntimeArgs::default(), +// ) +// .build(); +// let exec_request_2 = ExecuteRequestBuilder::standard( +// *DEFAULT_ACCOUNT_ADDR, +// STORED_PAYMENT_CONTRACT_NAME, +// RuntimeArgs::default(), +// ) +// .build(); +// +// builder.exec(exec_request_1).commit(); +// builder.exec(exec_request_2).commit(); +// +// let default_account = builder +// .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) +// .expect("must have contract"); +// assert!( +// default_account +// .named_keys() +// .contains(DO_NOTHING_CONTRACT_HASH_NAME), +// "do_nothing should be present in named keys" +// ); +// +// // +// // upgrade with new wasm costs with modified mint for given version +// // +// let sem_ver = PROTOCOL_VERSION.value(); +// let new_protocol_version = +// ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); +// +// let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); +// +// builder +// .upgrade(&mut upgrade_request) +// .expect_upgrade_success(); +// +// // Call stored session code +// +// let deploy_item = DeployItemBuilder::new() +// .with_address(*DEFAULT_ACCOUNT_ADDR) +// .with_stored_versioned_contract_by_name( +// DO_NOTHING_CONTRACT_PACKAGE_HASH_NAME, +// Some(INITIAL_VERSION), +// ENTRY_FUNCTION_NAME, +// RuntimeArgs::new(), +// ) +// .with_stored_versioned_payment_contract_by_name( +// STORED_PAYMENT_CONTRACT_PACKAGE_HASH_NAME, +// Some(INITIAL_VERSION), +// PAY_ENTRYPOINT, +// runtime_args! { +// ARG_AMOUNT => payment_purse_amount, +// }, +// ) +// .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) +// .with_deploy_hash([2; 32]) +// .build(); +// +// let exec_request_stored_payment = +// ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); +// +// builder.exec(exec_request_stored_payment).commit(); +// +// assert!( +// builder.is_error(), +// "calling a session module with increased major protocol version should be error", +// ); +// +// let entity_version_key = EntityVersionKey::new(3, 1); +// +// let expected_error = Error::Exec(ExecError::MissingEntityVersion(entity_version_key)); +// +// builder.assert_error(expected_error); +// } +// +// #[ignore] +// #[test] +// fn should_fail_session_stored_at_hash_with_incompatible_major_version() { +// let payment_purse_amount = *DEFAULT_PAYMENT; +// +// let mut builder = LmdbWasmTestBuilder::default(); +// builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); +// +// // first, store payment contract for v1.0.0 +// let exec_request_1 = ExecuteRequestBuilder::standard( +// *DEFAULT_ACCOUNT_ADDR, +// &format!("{}_stored.wasm", DO_NOTHING_NAME), +// RuntimeArgs::default(), +// ) +// .build(); +// +// let mut builder = LmdbWasmTestBuilder::default(); +// builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); +// +// builder.exec(exec_request_1).commit(); +// +// let exec_request = ExecuteRequestBuilder::standard( +// *DEFAULT_ACCOUNT_ADDR, +// STORED_PAYMENT_CONTRACT_NAME, +// RuntimeArgs::default(), +// ) +// .build(); +// +// builder.exec(exec_request).commit(); +// +// // +// // upgrade with new wasm costs with modified mint for given version +// // +// let sem_ver = PROTOCOL_VERSION.value(); +// let new_protocol_version = +// ProtocolVersion::from_parts(sem_ver.major + 1, sem_ver.minor, sem_ver.patch); +// +// let mut upgrade_request = make_upgrade_request(new_protocol_version).build(); +// +// builder +// .upgrade(&mut upgrade_request) +// .expect_upgrade_success(); +// +// // Call stored session code +// +// // query both stored contracts by their named keys +// let default_account = builder +// .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) +// .expect("must have contract"); +// let test_payment_stored_hash = default_account +// .named_keys() +// .get(STORED_PAYMENT_CONTRACT_HASH_NAME) +// .expect("standard_payment should be present in named keys") +// .into_entity_hash_addr() +// .expect("standard_payment named key should be hash"); +// +// let deploy_item = DeployItemBuilder::new() +// .with_address(*DEFAULT_ACCOUNT_ADDR) +// .with_stored_session_named_key( +// DO_NOTHING_CONTRACT_HASH_NAME, +// ENTRY_FUNCTION_NAME, +// RuntimeArgs::new(), +// ) +// .with_stored_payment_hash( +// test_payment_stored_hash.into(), +// PAY_ENTRYPOINT, +// runtime_args! { ARG_AMOUNT => payment_purse_amount }, +// ) +// .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) +// .with_deploy_hash([2; 32]) +// .build(); +// +// let exec_request_stored_payment = +// ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); +// +// builder.exec(exec_request_stored_payment).commit(); +// +// assert!( +// builder.is_error(), +// "calling a session module with increased major protocol version should be error", +// ); +// let error = builder.get_error().expect("must have error"); +// assert!( +// matches!( +// error, +// Error::Exec(ExecError::IncompatibleProtocolMajorVersion { +// expected: 3, +// actual: 2 +// }), +// ), +// "Error does not match: {:?}", +// error +// ) +// } diff --git a/execution_engine_testing/tests/src/test/explorer/faucet.rs b/execution_engine_testing/tests/src/test/explorer/faucet.rs index 576779bf4b..905134bf82 100644 --- a/execution_engine_testing/tests/src/test/explorer/faucet.rs +++ b/execution_engine_testing/tests/src/test/explorer/faucet.rs @@ -1,76 +1,822 @@ +use num_rational::Ratio; + +use casper_execution_engine::{engine_state, execution::ExecError}; + use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ChainspecConfig, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, + TransferRequestBuilder, CHAINSPEC_SYMLINK, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + account::AccountHash, runtime_args, ApiError, FeeHandling, Key, PricingHandling, PublicKey, + RefundHandling, SecretKey, Transfer, U512, +}; + +// test constants. +use super::{ + faucet_test_helpers::{ + get_faucet_entity_hash, get_faucet_purse, query_stored_value, FaucetDeployHelper, + FaucetInstallSessionRequestBuilder, FundAccountRequestBuilder, + }, + ARG_AMOUNT, ARG_AVAILABLE_AMOUNT, ARG_DISTRIBUTIONS_PER_INTERVAL, ARG_ID, ARG_TARGET, + ARG_TIME_INTERVAL, AUTHORIZED_ACCOUNT_NAMED_KEY, AVAILABLE_AMOUNT_NAMED_KEY, + DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY, ENTRY_POINT_FAUCET, ENTRY_POINT_SET_VARIABLES, + FAUCET_CONTRACT_NAMED_KEY, FAUCET_FUND_AMOUNT, FAUCET_ID, FAUCET_INSTALLER_SESSION, + FAUCET_PURSE_NAMED_KEY, FAUCET_TIME_INTERVAL, INSTALLER_ACCOUNT, INSTALLER_FUND_AMOUNT, + INSTALLER_NAMED_KEY, LAST_DISTRIBUTION_TIME_NAMED_KEY, REMAINING_REQUESTS_NAMED_KEY, + TIME_INTERVAL_NAMED_KEY, TWO_HOURS_AS_MILLIS, }; -use casper_types::{account::AccountHash, runtime_args, ApiError, RuntimeArgs, U512}; -const FAUCET_CONTRACT: &str = "faucet.wasm"; -const NEW_ACCOUNT_ADDR: AccountHash = AccountHash::new([99u8; 32]); +/// User error variant defined in the faucet contract. +const FAUCET_CALL_BY_USER_WITH_AUTHORIZED_ACCOUNT_SET: u16 = 25; + +#[ignore] +#[test] +fn should_install_faucet_contract() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let fund_installer_account_request = FundAccountRequestBuilder::new() + .with_target_account(INSTALLER_ACCOUNT) + .with_fund_amount(U512::from(INSTALLER_FUND_AMOUNT)) + .build(); + + builder + .transfer_and_commit(fund_installer_account_request) + .expect_success(); + + builder + .exec(FaucetInstallSessionRequestBuilder::new().build()) + .expect_success() + .commit(); + + let installer_named_keys = builder + .get_entity_with_named_keys_by_account_hash(INSTALLER_ACCOUNT) + .expect("must have entity") + .named_keys() + .clone(); + + assert!(installer_named_keys + .get(&format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID)) + .is_some()); -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; + let faucet_purse_id = format!("{}_{}", FAUCET_PURSE_NAMED_KEY, FAUCET_ID); + assert!(installer_named_keys.get(&faucet_purse_id).is_some()); + + let faucet_named_key = Key::Hash( + installer_named_keys + .get(&format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID)) + .expect("failed to find faucet named key") + .into_entity_hash_addr() + .expect("must get hash addr"), + ); + + // check installer is set. + builder + .query(None, faucet_named_key, &[INSTALLER_NAMED_KEY.to_string()]) + .expect("failed to find installer named key"); + + // check time interval + builder + .query( + None, + faucet_named_key, + &[TIME_INTERVAL_NAMED_KEY.to_string()], + ) + .expect("failed to find time interval named key"); + + // check last distribution time + builder + .query( + None, + faucet_named_key, + &[LAST_DISTRIBUTION_TIME_NAMED_KEY.to_string()], + ) + .expect("failed to find last distribution named key"); + + // check faucet purse + builder + .query( + None, + faucet_named_key, + &[FAUCET_PURSE_NAMED_KEY.to_string()], + ) + .expect("failed to find faucet purse named key"); + + // check available amount + builder + .query( + None, + faucet_named_key, + &[AVAILABLE_AMOUNT_NAMED_KEY.to_string()], + ) + .expect("failed to find available amount named key"); + + // check remaining requests + builder + .query( + None, + faucet_named_key, + &[REMAINING_REQUESTS_NAMED_KEY.to_string()], + ) + .expect("failed to find remaining requests named key"); + + builder + .query( + None, + faucet_named_key, + &[AUTHORIZED_ACCOUNT_NAMED_KEY.to_string()], + ) + .expect("failed to find authorized account named key"); +} #[ignore] #[test] -fn should_get_funds_from_faucet() { - let amount = U512::from(1000); - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - FAUCET_CONTRACT, - runtime_args! { ARG_TARGET => NEW_ACCOUNT_ADDR, ARG_AMOUNT => amount }, +fn should_allow_installer_to_set_variables() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut helper = FaucetDeployHelper::new() + .with_installer_account(INSTALLER_ACCOUNT) + .with_installer_fund_amount(U512::from(INSTALLER_FUND_AMOUNT)) + .with_faucet_purse_fund_amount(U512::from(FAUCET_FUND_AMOUNT)) + .with_faucet_available_amount(Some(U512::from(FAUCET_FUND_AMOUNT))) + .with_faucet_distributions_per_interval(Some(2)) + .with_faucet_time_interval(Some(FAUCET_TIME_INTERVAL)); + + builder + .transfer_and_commit(helper.fund_installer_request()) + .expect_success(); + + builder + .exec(helper.faucet_install_request()) + .expect_success() + .commit(); + + let faucet_contract_hash = helper.query_and_set_faucet_contract_hash(&builder); + let faucet_entity_key = Key::Hash(faucet_contract_hash.value()); + + assert_eq!( + helper.query_faucet_purse_balance(&builder), + helper.faucet_purse_fund_amount() + ); + + let available_amount: U512 = query_stored_value( + &mut builder, + faucet_entity_key, + vec![AVAILABLE_AMOUNT_NAMED_KEY.to_string()], + ); + + // the available amount per interval will be zero until the installer calls + // the set_variable entrypoint to finish setup. + assert_eq!(available_amount, U512::zero()); + + let time_interval: u64 = query_stored_value( + &mut builder, + faucet_entity_key, + vec![TIME_INTERVAL_NAMED_KEY.to_string()], + ); + + // defaults to around two hours. + assert_eq!(time_interval, TWO_HOURS_AS_MILLIS); + + let distributions_per_interval: u64 = query_stored_value( + &mut builder, + faucet_entity_key, + vec![DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY.to_string()], + ); + + assert_eq!(distributions_per_interval, 0u64); + + builder + .exec(helper.faucet_config_request()) + .expect_success() + .commit(); + + let available_amount: U512 = query_stored_value( + &mut builder, + faucet_entity_key, + vec![AVAILABLE_AMOUNT_NAMED_KEY.to_string()], + ); + + assert_eq!(available_amount, helper.faucet_purse_fund_amount()); + + let time_interval: u64 = query_stored_value( + &mut builder, + faucet_entity_key, + vec![TIME_INTERVAL_NAMED_KEY.to_string()], + ); + + assert_eq!(time_interval, helper.faucet_time_interval().unwrap()); + + let distributions_per_interval: u64 = query_stored_value( + &mut builder, + faucet_entity_key, + vec![DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY.to_string()], + ); + + assert_eq!( + distributions_per_interval, + helper.faucet_distributions_per_interval().unwrap() + ); +} + +#[ignore] +#[test] +fn should_fund_new_account() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let faucet_purse_fund_amount = U512::from(9_000_000_000u64); + let faucet_distributions_per_interval = 3; + + let mut helper = FaucetDeployHelper::default() + .with_faucet_purse_fund_amount(faucet_purse_fund_amount) + .with_faucet_available_amount(Some(faucet_purse_fund_amount)) + .with_faucet_distributions_per_interval(Some(faucet_distributions_per_interval)); + + builder + .transfer_and_commit(helper.fund_installer_request()) + .expect_success(); + + builder + .exec(helper.faucet_install_request()) + .expect_success() + .commit(); + + helper.query_and_set_faucet_contract_hash(&builder); + + builder + .exec(helper.faucet_config_request()) + .expect_success() + .commit(); + + let new_account = AccountHash::new([7u8; 32]); + + let new_account_fund_amount = U512::from(5_000_000_000u64); + let fund_new_account_request = helper + .new_faucet_fund_request_builder() + .with_installer_account(helper.installer_account()) + .with_arg_target(new_account) + .with_arg_fund_amount(new_account_fund_amount) + .build(); + + let faucet_purse_uref = helper.query_faucet_purse(&builder); + let faucet_purse_balance_before = builder.get_purse_balance(faucet_purse_uref); + + builder + .exec(fund_new_account_request) + .expect_success() + .commit(); + + let faucet_purse_balance_after = builder.get_purse_balance(faucet_purse_uref); + + assert_eq!( + faucet_purse_balance_after, + faucet_purse_balance_before - new_account_fund_amount + ); + + let new_account_actual_purse_balance = builder.get_purse_balance( + builder + .get_expected_addressable_entity_by_account_hash(new_account) + .main_purse(), + ); + + assert_eq!(new_account_actual_purse_balance, new_account_fund_amount); +} + +#[ignore] +#[test] +fn should_fund_existing_account() { + let user_account = AccountHash::new([7u8; 32]); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let faucet_purse_fund_amount = U512::from(9_000_000_000u64); + let faucet_distributions_per_interval = 3; + + let mut helper = FaucetDeployHelper::default() + .with_faucet_purse_fund_amount(faucet_purse_fund_amount) + .with_faucet_available_amount(Some(faucet_purse_fund_amount)) + .with_faucet_distributions_per_interval(Some(faucet_distributions_per_interval)); + + builder + .transfer_and_commit(helper.fund_installer_request()) + .expect_success(); + + let user_account_initial_balance = U512::from(15_000_000_000u64); + + let fund_user_request = FundAccountRequestBuilder::new() + .with_target_account(user_account) + .with_fund_amount(user_account_initial_balance) + .build(); + + builder + .transfer_and_commit(fund_user_request) + .expect_success(); + + builder + .exec(helper.faucet_install_request()) + .expect_success() + .commit(); + + helper.query_and_set_faucet_contract_hash(&builder); + + builder + .exec(helper.faucet_config_request()) + .expect_success() + .commit(); + + builder + .exec( + helper + .new_faucet_fund_request_builder() + .with_user_account(user_account) + .with_payment_amount(user_account_initial_balance) + .build(), + ) + .expect_success() + .commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("must have last exec result"); + let transfer = exec_result.transfers().first().expect("must have transfer"); + + let one_distribution = Ratio::new( + faucet_purse_fund_amount, + faucet_distributions_per_interval.into(), ) - .build(); + .to_integer(); + assert!( + matches!(transfer, Transfer::V2(v2) if v2.amount == one_distribution), + "{:?}", + transfer + ); +} + +#[ignore] +#[test] +fn should_allow_installer_to_fund_freely() { + let installer_account = AccountHash::new([1u8; 32]); + let user_account = AccountHash::new([2u8; 32]); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let faucet_fund_amount = U512::from(200_000_000_000u64); + let half_of_faucet_fund_amount = faucet_fund_amount / 2; + let assigned_distributions_per_time_interval = 2u64; + let mut helper = FaucetDeployHelper::new() + .with_installer_account(installer_account) + .with_installer_fund_amount(INSTALLER_FUND_AMOUNT.into()) + .with_faucet_purse_fund_amount(faucet_fund_amount) + .with_faucet_available_amount(Some(half_of_faucet_fund_amount)) + .with_faucet_distributions_per_interval(Some(assigned_distributions_per_time_interval)) + .with_faucet_time_interval(Some(10_000u64)); + + builder + .transfer_and_commit(helper.fund_installer_request()) + .expect_success(); + + builder + .exec(helper.faucet_install_request()) + .expect_success() + .commit(); + + helper.query_and_set_faucet_contract_hash(&builder); + + let faucet_contract_hash = get_faucet_entity_hash(&builder, installer_account); + let faucet_entity_key = Key::Hash(faucet_contract_hash.value()); + let faucet_purse = get_faucet_purse(&builder, installer_account); + + let faucet_purse_balance = builder.get_purse_balance(faucet_purse); + assert_eq!(faucet_purse_balance, faucet_fund_amount); + + let available_amount = query_stored_value::( + &mut builder, + faucet_entity_key, + [AVAILABLE_AMOUNT_NAMED_KEY.to_string()].into(), + ); + + // the available amount per interval should be zero until the installer calls + // the set_variable entrypoint to finish setup. + assert_eq!(available_amount, U512::zero()); - let mut builder = InMemoryWasmTestBuilder::default(); builder - .run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request) + .exec(helper.faucet_config_request()) .expect_success() .commit(); - let account = builder - .get_account(NEW_ACCOUNT_ADDR) - .expect("should get account"); + let available_amount = query_stored_value::( + &mut builder, + faucet_entity_key, + [AVAILABLE_AMOUNT_NAMED_KEY.to_string()].into(), + ); + + assert_eq!(available_amount, half_of_faucet_fund_amount); + + let user_fund_amount = U512::from(3_000_000_000u64); + // This would only allow other callers to fund twice in this interval, + // but the installer can fund as many times as they want. + let num_funds = 3; + + for _ in 0..num_funds { + let faucet_call_by_installer = helper + .new_faucet_fund_request_builder() + .with_installer_account(helper.installer_account()) + .with_arg_fund_amount(user_fund_amount) + .with_arg_target(user_account) + .build(); + + builder + .exec(faucet_call_by_installer) + .expect_success() + .commit(); + } - let account_purse = account.main_purse(); - let account_balance = builder.get_purse_balance(account_purse); + let faucet_purse_balance = builder.get_purse_balance(faucet_purse); assert_eq!( - account_balance, amount, - "faucet should have created account with requested amount" + faucet_purse_balance, + faucet_fund_amount - user_fund_amount * num_funds, + "faucet purse balance must match expected amount after {} faucet calls", + num_funds ); + + // check the balance of the user's main purse + let user_main_purse_balance_after = builder.get_purse_balance( + builder + .get_expected_addressable_entity_by_account_hash(user_account) + .main_purse(), + ); + + assert_eq!(user_main_purse_balance_after, user_fund_amount * num_funds); } #[ignore] #[test] -fn should_fail_if_already_funded() { - let amount = U512::from(1000); - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - FAUCET_CONTRACT, - runtime_args! { ARG_TARGET => NEW_ACCOUNT_ADDR, ARG_AMOUNT => amount }, +fn should_not_fund_if_zero_distributions_per_interval() { + let installer_account = AccountHash::new([1u8; 32]); + let user_account = AccountHash::new([2u8; 32]); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // Fund installer account + let fund_installer_account_request = FundAccountRequestBuilder::new() + .with_target_account(installer_account) + .with_fund_amount(INSTALLER_FUND_AMOUNT.into()) + .build(); + + builder + .transfer_and_commit(fund_installer_account_request) + .expect_success(); + + let faucet_fund_amount = U512::from(400_000_000_000_000u64); + + let installer_session_request = ExecuteRequestBuilder::standard( + installer_account, + FAUCET_INSTALLER_SESSION, + runtime_args! {ARG_ID => FAUCET_ID, ARG_AMOUNT => faucet_fund_amount}, ) .build(); - let exec_request_2 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - FAUCET_CONTRACT, - runtime_args! { ARG_TARGET => NEW_ACCOUNT_ADDR, ARG_AMOUNT => amount }, + + builder + .exec(installer_session_request) + .expect_success() + .commit(); + + let installer_call_faucet_request = ExecuteRequestBuilder::contract_call_by_name( + installer_account, + &format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID), + ENTRY_POINT_FAUCET, + runtime_args! {ARG_TARGET => user_account}, ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + builder + .exec(installer_call_faucet_request) + .expect_failure() + .commit(); +} + +#[ignore] +#[test] +fn should_allow_funding_by_an_authorized_account() { + let installer_account = AccountHash::new([1u8; 32]); + + let authorized_account_public_key = { + let secret_key = + SecretKey::ed25519_from_bytes([2u8; 32]).expect("failed to construct secret key"); + PublicKey::from(&secret_key) + }; + + let authorized_account = authorized_account_public_key.to_account_hash(); + let user_account = AccountHash::new([3u8; 32]); + let faucet_fund_amount = U512::from(400_000_000_000_000u64); + let half_of_faucet_fund_amount = faucet_fund_amount / 2; + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut helper = FaucetDeployHelper::new() + .with_installer_account(installer_account) + .with_installer_fund_amount(INSTALLER_FUND_AMOUNT.into()) + .with_faucet_purse_fund_amount(faucet_fund_amount) + .with_faucet_available_amount(Some(half_of_faucet_fund_amount)) + .with_faucet_distributions_per_interval(Some(2u64)) + .with_faucet_time_interval(Some(10_000u64)); + + builder + .transfer_and_commit(helper.fund_installer_request()) + .expect_success(); builder - .run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) + .exec(helper.faucet_install_request()) .expect_success() - .commit() - .exec(exec_request_2); // should fail + .commit(); - let error_msg = builder - .exec_error_message(1) - .expect("should have error message"); + helper.query_and_set_faucet_contract_hash(&builder); + + builder + .exec(helper.faucet_config_request()) + .expect_success() + .commit(); + + let installer_named_keys = builder + .get_entity_with_named_keys_by_account_hash(installer_account) + .expect("must have entity") + .named_keys() + .clone(); + + let faucet_named_key = installer_named_keys + .get(&format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID)) + .expect("failed to find faucet named key"); + + let hash = faucet_named_key.into_entity_hash().expect( + "must convert to entity hash\ + ", + ); + let key = Key::Hash(hash.value()); + + let maybe_authorized_account_public_key = builder + .query(None, key, &[AUTHORIZED_ACCOUNT_NAMED_KEY.to_string()]) + .expect("failed to find authorized account named key") + .as_cl_value() + .expect("failed to convert into cl value") + .clone() + .into_t::>() + .expect("failed to convert into optional public key"); + + assert_eq!(maybe_authorized_account_public_key, None::); + + let faucet_authorize_account_request = helper + .new_faucet_authorize_account_request_builder() + .with_authorized_user_public_key(Some(authorized_account_public_key.clone())) + .build(); + + builder + .exec(faucet_authorize_account_request) + .expect_success() + .commit(); + + let maybe_authorized_account_public_key = builder + .query(None, key, &[AUTHORIZED_ACCOUNT_NAMED_KEY.to_string()]) + .expect("failed to find authorized account named key") + .as_cl_value() + .expect("failed to convert into cl value") + .clone() + .into_t::>() + .expect("failed to convert into optional public key"); + + assert_eq!( + maybe_authorized_account_public_key, + Some(authorized_account_public_key.clone()) + ); + + let authorized_account_fund_amount = U512::from(10_000_000_000u64); + let faucet_fund_authorized_account_by_installer_request = helper + .new_faucet_fund_request_builder() + .with_arg_fund_amount(authorized_account_fund_amount) + .with_arg_target(authorized_account_public_key.to_account_hash()) + .build(); + + builder + .exec(faucet_fund_authorized_account_by_installer_request) + .expect_success() + .commit(); + + let user_fund_amount = U512::from(10_000_000_000u64); + let faucet_fund_user_by_authorized_account_request = helper + .new_faucet_fund_request_builder() + .with_authorized_account(authorized_account) + .with_arg_fund_amount(user_fund_amount) + .with_arg_target(user_account) + .with_payment_amount(user_fund_amount) + .build(); + + builder + .exec(faucet_fund_user_by_authorized_account_request) + .expect_success() + .commit(); + + let user_main_purse_balance_after = builder.get_purse_balance( + builder + .get_expected_addressable_entity_by_account_hash(user_account) + .main_purse(), + ); + assert_eq!(user_main_purse_balance_after, user_fund_amount); + + // A user cannot fund themselves if there is an authorized account. + let faucet_fund_by_user_request = helper + .new_faucet_fund_request_builder() + .with_user_account(user_account) + .with_payment_amount(user_fund_amount) + .build(); + + builder + .exec(faucet_fund_by_user_request) + .expect_failure() + .commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("failed to get exec results"); + + let error = exec_result.error().unwrap(); assert!( - error_msg.contains(&format!("{:?}", ApiError::User(1))), - error_msg + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::User( + FAUCET_CALL_BY_USER_WITH_AUTHORIZED_ACCOUNT_SET + ))) + ), + "{:?}", + error, ); } + +#[ignore] +#[test] +fn faucet_costs() { + // This test will fail if execution costs vary. The expected costs should not be updated + // without understanding why the cost has changed. If the costs do change, it should be + // reflected in the "Costs by Entry Point" section of the faucet crate's README.md. + const EXPECTED_FAUCET_INSTALL_COST: u64 = 160_442_504_927; + const EXPECTED_FAUCET_INSTALL_COST_ALT: u64 = 149_230_872_143; + + const EXPECTED_FAUCET_SET_VARIABLES_COST: u64 = 79_455_975; + + const EXPECTED_FAUCET_CALL_BY_INSTALLER_COST: u64 = 2_652_626_533; + + const EXPECTED_FAUCET_CALL_BY_USER_COST: u64 = 2_558_318_531; + + let installer_account = AccountHash::new([1u8; 32]); + let user_account: AccountHash = AccountHash::new([2u8; 32]); + + let chainspec = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK) + .expect("must build chainspec configuration"); + let chainspec_config = chainspec + .with_fee_handling(FeeHandling::NoFee) + .with_refund_handling(RefundHandling::NoRefund) + .with_pricing_handling(PricingHandling::Fixed); + LmdbWasmTestBuilder::new_temporary_with_config(chainspec_config); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let fund_installer_account_request = + TransferRequestBuilder::new(INSTALLER_FUND_AMOUNT, installer_account).build(); + + builder + .transfer_and_commit(fund_installer_account_request) + .expect_success(); + + let faucet_fund_amount = U512::from(400_000_000_000_000u64); + let installer_session_request = ExecuteRequestBuilder::standard( + installer_account, + FAUCET_INSTALLER_SESSION, + runtime_args! {ARG_ID => FAUCET_ID, ARG_AMOUNT => faucet_fund_amount }, + ) + .build(); + + builder + .exec(installer_session_request) + .expect_success() + .commit(); + + let faucet_install_cost = builder.last_exec_gas_consumed(); + + let assigned_time_interval = 10_000u64; + let assigned_distributions_per_interval = 2u64; + let deploy_item = DeployItemBuilder::new() + .with_address(installer_account) + .with_authorization_keys(&[installer_account]) + .with_stored_session_named_key( + &format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID), + ENTRY_POINT_SET_VARIABLES, + runtime_args! { + ARG_AVAILABLE_AMOUNT => Some(faucet_fund_amount), + ARG_TIME_INTERVAL => Some(assigned_time_interval), + ARG_DISTRIBUTIONS_PER_INTERVAL => Some(assigned_distributions_per_interval) + }, + ) + .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT}) + .with_deploy_hash([3; 32]) + .build(); + + let installer_set_variable_request = + ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .exec(installer_set_variable_request) + .expect_success() + .commit(); + + let faucet_set_variables_cost = builder.last_exec_gas_consumed(); + + let user_fund_amount = U512::from(10_000_000_000u64); + + let deploy_item = DeployItemBuilder::new() + .with_address(installer_account) + .with_authorization_keys(&[installer_account]) + .with_stored_session_named_key( + &format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID), + ENTRY_POINT_FAUCET, + runtime_args! {ARG_TARGET => user_account, ARG_AMOUNT => user_fund_amount, ARG_ID => >::None}, + ) + .with_standard_payment(runtime_args! {ARG_AMOUNT => *DEFAULT_PAYMENT}) + .with_deploy_hash([4; 32]) + .build(); + + let faucet_call_by_installer = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .exec(faucet_call_by_installer) + .expect_success() + .commit(); + + let faucet_call_by_installer_cost = builder.last_exec_gas_consumed(); + + let faucet_contract_hash = get_faucet_entity_hash(&builder, installer_account); + + let deploy_item = DeployItemBuilder::new() + .with_address(user_account) + .with_authorization_keys(&[user_account]) + .with_stored_session_hash( + faucet_contract_hash, + ENTRY_POINT_FAUCET, + runtime_args! {ARG_TARGET => user_account, ARG_ID => >::None}, + ) + .with_standard_payment(runtime_args! {ARG_AMOUNT => user_fund_amount}) + .with_deploy_hash([4; 32]) + .build(); + + let faucet_call_by_user_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .exec(faucet_call_by_user_request) + .expect_success() + .commit(); + + let faucet_call_by_user_cost = builder.last_exec_gas_consumed(); + + let mut costs_as_expected = true; + let cost_64 = faucet_install_cost.value().as_u64(); + if cost_64 != EXPECTED_FAUCET_INSTALL_COST && cost_64 != EXPECTED_FAUCET_INSTALL_COST_ALT { + costs_as_expected = false; + eprintln!( + "faucet_install_cost wrong: expected: {}, got: {}", + EXPECTED_FAUCET_INSTALL_COST, + faucet_install_cost.value().as_u64() + ); + } + + if faucet_set_variables_cost.value().as_u64() != EXPECTED_FAUCET_SET_VARIABLES_COST { + costs_as_expected = false; + eprintln!( + "faucet_set_variables_cost wrong: expected: {}, got: {}", + EXPECTED_FAUCET_SET_VARIABLES_COST, + faucet_set_variables_cost.value().as_u64() + ); + } + + if faucet_call_by_installer_cost.value().as_u64() != EXPECTED_FAUCET_CALL_BY_INSTALLER_COST { + costs_as_expected = false; + eprintln!( + "faucet_call_by_installer_cost wrong: expected: {}, got: {}", + EXPECTED_FAUCET_CALL_BY_INSTALLER_COST, + faucet_call_by_installer_cost.value().as_u64() + ); + } + + if faucet_call_by_user_cost.value().as_u64() != EXPECTED_FAUCET_CALL_BY_USER_COST { + costs_as_expected = false; + eprintln!( + "faucet_call_by_user_cost wrong: expected: {}, got: {}", + EXPECTED_FAUCET_CALL_BY_USER_COST, + faucet_call_by_user_cost.value().as_u64() + ); + } + assert!(costs_as_expected); +} diff --git a/execution_engine_testing/tests/src/test/explorer/faucet_stored.rs b/execution_engine_testing/tests/src/test/explorer/faucet_stored.rs deleted file mode 100644 index bab350e804..0000000000 --- a/execution_engine_testing/tests/src/test/explorer/faucet_stored.rs +++ /dev/null @@ -1,122 +0,0 @@ -use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_types::{account::AccountHash, runtime_args, ApiError, RuntimeArgs, U512}; - -const FAUCET: &str = "faucet"; -const CALL_FAUCET: &str = "call_faucet"; -const NEW_ACCOUNT_ADDR: AccountHash = AccountHash::new([99u8; 32]); -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; - -fn get_builder() -> InMemoryWasmTestBuilder { - let mut builder = InMemoryWasmTestBuilder::default(); - { - // first, store contract - let store_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - &format!("{}_stored.wasm", FAUCET), - runtime_args! {}, - ) - .build(); - - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); - builder.exec_commit_finish(store_request); - } - builder -} - -#[ignore] -#[test] -fn should_get_funds_from_faucet_stored() { - let mut builder = get_builder(); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - - let contract_hash = default_account - .named_keys() - .get(FAUCET) - .expect("contract_hash should exist") - .into_hash() - .expect("should be a hash"); - - let amount = U512::from(1000); - - // call stored faucet - let exec_request = ExecuteRequestBuilder::contract_call_by_hash( - *DEFAULT_ACCOUNT_ADDR, - contract_hash.into(), - CALL_FAUCET, - runtime_args! { ARG_TARGET => NEW_ACCOUNT_ADDR, ARG_AMOUNT => amount }, - ) - .build(); - builder.exec(exec_request).expect_success().commit(); - - let account = builder - .get_account(NEW_ACCOUNT_ADDR) - .expect("should get account"); - - let account_purse = account.main_purse(); - let account_balance = builder.get_purse_balance(account_purse); - assert_eq!( - account_balance, amount, - "faucet should have created account with requested amount" - ); -} - -#[ignore] -#[test] -fn should_fail_if_already_funded() { - let mut builder = get_builder(); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); - - let contract_hash = default_account - .named_keys() - .get(FAUCET) - .expect("contract_hash should exist") - .into_hash() - .expect("should be a hash"); - - let amount = U512::from(1000); - - // call stored faucet - let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash( - *DEFAULT_ACCOUNT_ADDR, - contract_hash.into(), - CALL_FAUCET, - runtime_args! { ARG_TARGET => NEW_ACCOUNT_ADDR, ARG_AMOUNT => amount }, - ) - .build(); - - builder.exec(exec_request_1).expect_success().commit(); - - // call stored faucet again; should error - let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( - *DEFAULT_ACCOUNT_ADDR, - contract_hash.into(), - CALL_FAUCET, - runtime_args! { ARG_TARGET => NEW_ACCOUNT_ADDR, ARG_AMOUNT => amount }, - ) - .build(); - - builder.exec(exec_request_2); - - let exec_response = builder - .get_exec_result(2) - .expect("Expected to be called after run()"); - - let error_message = utils::get_error_message(exec_response); - assert!( - error_message.contains(&format!("{:?}", ApiError::User(1))), - "should have reverted with user error 1 (already funded) but received {}", - error_message, - ); -} diff --git a/execution_engine_testing/tests/src/test/explorer/faucet_test_helpers.rs b/execution_engine_testing/tests/src/test/explorer/faucet_test_helpers.rs new file mode 100644 index 0000000000..16f4c7ceb5 --- /dev/null +++ b/execution_engine_testing/tests/src/test/explorer/faucet_test_helpers.rs @@ -0,0 +1,573 @@ +use rand::Rng; + +use casper_engine_test_support::{ + DeployItemBuilder, EntityWithNamedKeys, ExecuteRequest, ExecuteRequestBuilder, + LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_PAYMENT, +}; +use casper_storage::data_access_layer::TransferRequest; +use casper_types::{ + account::AccountHash, bytesrepr::FromBytes, runtime_args, AddressableEntityHash, CLTyped, Key, + PublicKey, URef, U512, +}; + +use super::{ + ARG_AMOUNT, ARG_AVAILABLE_AMOUNT, ARG_DISTRIBUTIONS_PER_INTERVAL, ARG_ID, ARG_TARGET, + ARG_TIME_INTERVAL, ENTRY_POINT_AUTHORIZE_TO, ENTRY_POINT_FAUCET, ENTRY_POINT_SET_VARIABLES, + FAUCET_CONTRACT_NAMED_KEY, FAUCET_FUND_AMOUNT, FAUCET_ID, FAUCET_INSTALLER_SESSION, + FAUCET_PURSE_NAMED_KEY, INSTALLER_ACCOUNT, INSTALLER_FUND_AMOUNT, +}; + +#[derive(Clone, Copy, Debug)] +pub struct FundAccountRequestBuilder { + target_account: AccountHash, + fund_amount: U512, + fund_id: Option, +} + +impl FundAccountRequestBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn with_target_account(mut self, account_hash: AccountHash) -> Self { + self.target_account = account_hash; + self + } + + pub fn with_fund_amount(mut self, fund_amount: U512) -> Self { + self.fund_amount = fund_amount; + self + } + + pub fn with_fund_id(mut self, fund_id: Option) -> Self { + self.fund_id = fund_id; + self + } + + pub fn build(&self) -> TransferRequest { + let mut builder = TransferRequestBuilder::new(self.fund_amount, self.target_account); + if let Some(id) = self.fund_id { + builder = builder.with_transfer_id(id); + } + builder.build() + } +} + +impl Default for FundAccountRequestBuilder { + fn default() -> Self { + Self { + target_account: INSTALLER_ACCOUNT, + fund_amount: U512::from(INSTALLER_FUND_AMOUNT), + fund_id: None, + } + } +} + +#[derive(Clone, Debug)] +pub struct FaucetInstallSessionRequestBuilder { + installer_account: AccountHash, + faucet_installer_session: String, + faucet_id: u64, + faucet_fund_amount: U512, +} + +impl FaucetInstallSessionRequestBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self { + self.installer_account = installer_account; + self + } + + pub fn with_faucet_installer_session(mut self, installer_session: &str) -> Self { + self.faucet_installer_session = installer_session.to_string(); + self + } + + pub fn with_faucet_id(mut self, faucet_id: u64) -> Self { + self.faucet_id = faucet_id; + self + } + + pub fn with_faucet_fund_amount(mut self, faucet_fund_amount: U512) -> Self { + self.faucet_fund_amount = faucet_fund_amount; + self + } + + pub fn build(&self) -> ExecuteRequest { + ExecuteRequestBuilder::standard( + self.installer_account, + &self.faucet_installer_session, + runtime_args! { + ARG_ID => self.faucet_id, + ARG_AMOUNT => self.faucet_fund_amount + }, + ) + .build() + } +} + +impl Default for FaucetInstallSessionRequestBuilder { + fn default() -> Self { + Self { + installer_account: INSTALLER_ACCOUNT, + faucet_installer_session: FAUCET_INSTALLER_SESSION.to_string(), + faucet_id: FAUCET_ID, + faucet_fund_amount: FAUCET_FUND_AMOUNT.into(), + } + } +} + +#[derive(Debug, Copy, Clone)] +pub struct FaucetConfigRequestBuilder { + installer_account: AccountHash, + faucet_contract_hash: Option, + available_amount: Option, + time_interval: Option, + distributions_per_interval: Option, +} + +impl FaucetConfigRequestBuilder { + pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self { + self.installer_account = installer_account; + self + } + + pub fn with_faucet_contract_hash(mut self, contract_hash: AddressableEntityHash) -> Self { + self.faucet_contract_hash = Some(contract_hash); + self + } + + pub fn with_available_amount(mut self, available_amount: Option) -> Self { + self.available_amount = available_amount; + self + } + + pub fn with_time_interval(mut self, time_interval: Option) -> Self { + self.time_interval = time_interval; + self + } + + pub fn with_distributions_per_interval( + mut self, + distributions_per_interval: Option, + ) -> Self { + self.distributions_per_interval = distributions_per_interval; + self + } + + pub fn build(&self) -> ExecuteRequest { + ExecuteRequestBuilder::contract_call_by_hash( + self.installer_account, + self.faucet_contract_hash + .expect("must supply faucet contract hash"), + ENTRY_POINT_SET_VARIABLES, + runtime_args! { + ARG_AVAILABLE_AMOUNT => self.available_amount, + ARG_TIME_INTERVAL => self.time_interval, + ARG_DISTRIBUTIONS_PER_INTERVAL => self.distributions_per_interval + }, + ) + .build() + } +} + +impl Default for FaucetConfigRequestBuilder { + fn default() -> Self { + Self { + installer_account: INSTALLER_ACCOUNT, + faucet_contract_hash: None, + available_amount: None, + time_interval: None, + distributions_per_interval: None, + } + } +} + +pub struct FaucetAuthorizeAccountRequestBuilder { + installer_account: AccountHash, + authorized_account_public_key: Option, + faucet_contract_hash: Option, +} + +impl FaucetAuthorizeAccountRequestBuilder { + pub fn new() -> FaucetAuthorizeAccountRequestBuilder { + FaucetAuthorizeAccountRequestBuilder::default() + } + + pub fn with_faucet_contract_hash( + mut self, + faucet_contract_hash: Option, + ) -> Self { + self.faucet_contract_hash = faucet_contract_hash; + self + } + + pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self { + self.installer_account = installer_account; + self + } + + pub fn with_authorized_user_public_key( + mut self, + authorized_account_public_key: Option, + ) -> Self { + self.authorized_account_public_key = authorized_account_public_key; + self + } + + pub fn build(self) -> ExecuteRequest { + ExecuteRequestBuilder::contract_call_by_hash( + self.installer_account, + self.faucet_contract_hash + .expect("must supply faucet contract hash"), + ENTRY_POINT_AUTHORIZE_TO, + runtime_args! {ARG_TARGET => self.authorized_account_public_key}, + ) + .build() + } +} + +impl Default for FaucetAuthorizeAccountRequestBuilder { + fn default() -> Self { + Self { + installer_account: INSTALLER_ACCOUNT, + authorized_account_public_key: None, + faucet_contract_hash: None, + } + } +} + +enum FaucetCallerAccount { + Installer(AccountHash), + Authorized(AccountHash), + User(AccountHash), +} + +impl FaucetCallerAccount { + pub fn account_hash(&self) -> AccountHash { + match self { + FaucetCallerAccount::Installer(account_hash) + | FaucetCallerAccount::Authorized(account_hash) + | FaucetCallerAccount::User(account_hash) => *account_hash, + } + } +} + +pub struct FaucetFundRequestBuilder { + faucet_contract_hash: Option, + caller_account: FaucetCallerAccount, + arg_target: Option, + arg_fund_amount: Option, + arg_id: Option, + payment_amount: U512, + block_time: Option, +} + +impl FaucetFundRequestBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self { + self.caller_account = FaucetCallerAccount::Installer(installer_account); + self + } + + pub fn with_authorized_account(mut self, authorized_account: AccountHash) -> Self { + self.caller_account = FaucetCallerAccount::Authorized(authorized_account); + self + } + + pub fn with_user_account(mut self, user_account: AccountHash) -> Self { + self.caller_account = FaucetCallerAccount::User(user_account); + self + } + + pub fn with_arg_fund_amount(mut self, fund_amount: U512) -> Self { + self.arg_fund_amount = Some(fund_amount); + self + } + + pub fn with_arg_target(mut self, target: AccountHash) -> Self { + self.arg_target = Some(target); + self + } + + pub fn with_faucet_contract_hash( + mut self, + faucet_contract_hash: AddressableEntityHash, + ) -> Self { + self.faucet_contract_hash = Some(faucet_contract_hash); + self + } + + pub fn with_payment_amount(mut self, payment_amount: U512) -> Self { + self.payment_amount = payment_amount; + self + } + + pub fn build(self) -> ExecuteRequest { + let mut rng = rand::thread_rng(); + + let deploy_item = DeployItemBuilder::new() + .with_address(self.caller_account.account_hash()) + .with_authorization_keys(&[self.caller_account.account_hash()]) + .with_stored_session_hash( + self.faucet_contract_hash + .expect("must supply faucet contract hash"), + ENTRY_POINT_FAUCET, + match self.caller_account { + FaucetCallerAccount::Installer(_) + | FaucetCallerAccount::Authorized(_) => runtime_args! { + ARG_TARGET => self.arg_target.expect("must supply arg target when calling as installer or authorized account"), + ARG_AMOUNT => self.arg_fund_amount.expect("must supply arg amount when calling as installer or authorized account"), + ARG_ID => self.arg_id + }, + FaucetCallerAccount::User(_) => runtime_args! { + ARG_ID => self.arg_id + }, + }, + ) + .with_standard_payment(runtime_args! {ARG_AMOUNT => self.payment_amount}) + .with_deploy_hash(rng.gen()) + .build(); + + match self.block_time { + Some(block_time) => ExecuteRequestBuilder::from_deploy_item(&deploy_item) + .with_block_time(block_time) + .build(), + None => ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(), + } + } +} + +impl Default for FaucetFundRequestBuilder { + fn default() -> Self { + Self { + arg_fund_amount: None, + payment_amount: *DEFAULT_PAYMENT, + faucet_contract_hash: None, + caller_account: FaucetCallerAccount::Installer(INSTALLER_ACCOUNT), + arg_target: None, + arg_id: None, + block_time: None, + } + } +} + +pub fn query_stored_value( + builder: &mut LmdbWasmTestBuilder, + base_key: Key, + path: Vec, +) -> T { + builder + .query(None, base_key, &path) + .expect("must have stored value") + .as_cl_value() + .cloned() + .expect("must have cl value") + .into_t::() + .expect("must get value") +} + +pub fn get_faucet_entity_hash( + builder: &LmdbWasmTestBuilder, + installer_account: AccountHash, +) -> AddressableEntityHash { + builder + .get_entity_with_named_keys_by_account_hash(installer_account) + .unwrap() + .named_keys() + .get(&format!("{}_{}", FAUCET_CONTRACT_NAMED_KEY, FAUCET_ID)) + .cloned() + .and_then(Key::into_entity_hash_addr) + .map(AddressableEntityHash::new) + .expect("failed to find faucet contract") +} + +pub fn get_faucet_entity( + builder: &LmdbWasmTestBuilder, + installer_account: AccountHash, +) -> EntityWithNamedKeys { + builder + .get_entity_with_named_keys_by_entity_hash(get_faucet_entity_hash( + builder, + installer_account, + )) + .expect("failed to find faucet contract") +} + +pub fn get_faucet_purse(builder: &LmdbWasmTestBuilder, installer_account: AccountHash) -> URef { + get_faucet_entity(builder, installer_account) + .named_keys() + .get(FAUCET_PURSE_NAMED_KEY) + .cloned() + .and_then(Key::into_uref) + .expect("failed to find faucet purse") +} + +pub struct FaucetDeployHelper { + installer_account: AccountHash, + installer_fund_amount: U512, + installer_fund_id: Option, + authorized_user_public_key: Option, + faucet_purse_fund_amount: U512, + faucet_installer_session: String, + faucet_id: u64, + faucet_contract_hash: Option, + faucet_distributions_per_interval: Option, + faucet_available_amount: Option, + faucet_time_interval: Option, + fund_account_request_builder: FundAccountRequestBuilder, + pub faucet_install_session_request_builder: FaucetInstallSessionRequestBuilder, + pub faucet_config_request_builder: FaucetConfigRequestBuilder, +} + +impl FaucetDeployHelper { + pub fn new() -> Self { + Self::default() + } + + pub fn installer_account(&self) -> AccountHash { + self.installer_account + } + + pub fn with_installer_account(mut self, installer_account: AccountHash) -> Self { + self.installer_account = installer_account; + self + } + + pub fn with_installer_fund_amount(mut self, installer_fund_amount: U512) -> Self { + self.installer_fund_amount = installer_fund_amount; + self + } + + pub fn with_faucet_purse_fund_amount(mut self, faucet_purse_fund_amount: U512) -> Self { + self.faucet_purse_fund_amount = faucet_purse_fund_amount; + self + } + + pub fn with_faucet_available_amount(mut self, available_amount: Option) -> Self { + self.faucet_available_amount = available_amount; + self + } + + pub fn with_faucet_distributions_per_interval( + mut self, + distributions_per_interval: Option, + ) -> Self { + self.faucet_distributions_per_interval = distributions_per_interval; + self + } + + pub fn with_faucet_time_interval(mut self, time_interval_ms: Option) -> Self { + self.faucet_time_interval = time_interval_ms; + self + } + + pub fn query_and_set_faucet_contract_hash( + &mut self, + builder: &LmdbWasmTestBuilder, + ) -> AddressableEntityHash { + let contract_hash = get_faucet_entity_hash(builder, self.installer_account()); + self.faucet_contract_hash = Some(contract_hash); + + contract_hash + } + + pub fn query_faucet_purse(&self, builder: &LmdbWasmTestBuilder) -> URef { + get_faucet_purse(builder, self.installer_account()) + } + + pub fn query_faucet_purse_balance(&self, builder: &LmdbWasmTestBuilder) -> U512 { + let faucet_purse = self.query_faucet_purse(builder); + builder.get_purse_balance(faucet_purse) + } + + pub fn faucet_purse_fund_amount(&self) -> U512 { + self.faucet_purse_fund_amount + } + + pub fn faucet_contract_hash(&self) -> Option { + self.faucet_contract_hash + } + + pub fn faucet_distributions_per_interval(&self) -> Option { + self.faucet_distributions_per_interval + } + + pub fn faucet_time_interval(&self) -> Option { + self.faucet_time_interval + } + + pub fn fund_installer_request(&self) -> TransferRequest { + self.fund_account_request_builder + .with_target_account(self.installer_account) + .with_fund_amount(self.installer_fund_amount) + .with_fund_id(self.installer_fund_id) + .build() + } + + pub fn faucet_install_request(&self) -> ExecuteRequest { + self.faucet_install_session_request_builder + .clone() + .with_installer_account(self.installer_account) + .with_faucet_id(self.faucet_id) + .with_faucet_fund_amount(self.faucet_purse_fund_amount) + .with_faucet_installer_session(&self.faucet_installer_session) + .build() + } + + pub fn faucet_config_request(&self) -> ExecuteRequest { + self.faucet_config_request_builder + .with_installer_account(self.installer_account()) + .with_faucet_contract_hash( + self.faucet_contract_hash() + .expect("must supply faucet contract hash"), + ) + .with_distributions_per_interval(self.faucet_distributions_per_interval) + .with_available_amount(self.faucet_available_amount) + .with_time_interval(self.faucet_time_interval) + .build() + } + + pub fn new_faucet_fund_request_builder(&self) -> FaucetFundRequestBuilder { + FaucetFundRequestBuilder::new().with_faucet_contract_hash( + self.faucet_contract_hash() + .expect("must supply faucet contract hash"), + ) + } + + pub fn new_faucet_authorize_account_request_builder( + &self, + ) -> FaucetAuthorizeAccountRequestBuilder { + FaucetAuthorizeAccountRequestBuilder::new() + .with_installer_account(self.installer_account) + .with_authorized_user_public_key(self.authorized_user_public_key.clone()) + .with_faucet_contract_hash(self.faucet_contract_hash) + } +} + +impl Default for FaucetDeployHelper { + fn default() -> Self { + Self { + installer_fund_amount: U512::from(INSTALLER_FUND_AMOUNT), + installer_account: INSTALLER_ACCOUNT, + installer_fund_id: None, + authorized_user_public_key: None, + faucet_installer_session: FAUCET_INSTALLER_SESSION.to_string(), + faucet_id: FAUCET_ID, + faucet_purse_fund_amount: U512::from(FAUCET_FUND_AMOUNT), + faucet_contract_hash: None, + faucet_distributions_per_interval: None, + faucet_available_amount: None, + faucet_time_interval: None, + fund_account_request_builder: Default::default(), + faucet_install_session_request_builder: Default::default(), + faucet_config_request_builder: Default::default(), + } + } +} diff --git a/execution_engine_testing/tests/src/test/explorer/mod.rs b/execution_engine_testing/tests/src/test/explorer/mod.rs index 56fbcfc078..f37fa133be 100644 --- a/execution_engine_testing/tests/src/test/explorer/mod.rs +++ b/execution_engine_testing/tests/src/test/explorer/mod.rs @@ -1,2 +1,35 @@ mod faucet; -mod faucet_stored; +pub mod faucet_test_helpers; + +use casper_types::account::AccountHash; + +// Test constants. +pub const FAUCET_INSTALLER_SESSION: &str = "faucet_stored.wasm"; +pub const FAUCET_CONTRACT_NAMED_KEY: &str = "faucet"; +pub const INSTALLER_FUND_AMOUNT: u64 = 500_000_000_000_000; +pub const TWO_HOURS_AS_MILLIS: u64 = 7_200_000; +pub const FAUCET_ID: u64 = 1337; +pub const INSTALLER_ACCOUNT: AccountHash = AccountHash::new([1u8; 32]); +pub const FAUCET_FUND_AMOUNT: u64 = 500_000u64; +pub const FAUCET_TIME_INTERVAL: u64 = 10_000; + +// contract args and entry points. +pub const ARG_TARGET: &str = "target"; +pub const ARG_AMOUNT: &str = "amount"; +pub const ARG_ID: &str = "id"; +pub const ARG_AVAILABLE_AMOUNT: &str = "available_amount"; +pub const ARG_TIME_INTERVAL: &str = "time_interval"; +pub const ARG_DISTRIBUTIONS_PER_INTERVAL: &str = "distributions_per_interval"; +pub const ENTRY_POINT_FAUCET: &str = "call_faucet"; +pub const ENTRY_POINT_SET_VARIABLES: &str = "set_variables"; +pub const ENTRY_POINT_AUTHORIZE_TO: &str = "authorize_to"; + +// stored contract named keys. +pub const AVAILABLE_AMOUNT_NAMED_KEY: &str = "available_amount"; +pub const TIME_INTERVAL_NAMED_KEY: &str = "time_interval"; +pub const LAST_DISTRIBUTION_TIME_NAMED_KEY: &str = "last_distribution_time"; +pub const FAUCET_PURSE_NAMED_KEY: &str = "faucet_purse"; +pub const INSTALLER_NAMED_KEY: &str = "installer"; +pub const DISTRIBUTIONS_PER_INTERVAL_NAMED_KEY: &str = "distributions_per_interval"; +pub const REMAINING_REQUESTS_NAMED_KEY: &str = "remaining_requests"; +pub const AUTHORIZED_ACCOUNT_NAMED_KEY: &str = "authorized_account"; diff --git a/execution_engine_testing/tests/src/test/gas_counter.rs b/execution_engine_testing/tests/src/test/gas_counter.rs deleted file mode 100644 index 515209a7bf..0000000000 --- a/execution_engine_testing/tests/src/test/gas_counter.rs +++ /dev/null @@ -1,261 +0,0 @@ -use assert_matches::assert_matches; -use parity_wasm::{ - builder, - elements::{BlockType, Instruction, Instructions}, -}; - -use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, DEFAULT_WASM_CONFIG, - }, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_execution_engine::{ - core::engine_state::Error, - shared::{gas::Gas, wasm_prep::PreprocessingError}, -}; -use casper_types::{contracts::DEFAULT_ENTRY_POINT_NAME, runtime_args, RuntimeArgs}; - -/// Creates minimal session code that does nothing -fn make_minimal_do_nothing() -> Vec { - let module = builder::module() - .function() - // A signature with 0 params and no return type - .signature() - .build() - .body() - .build() - .build() - // Export above function - .export() - .field(DEFAULT_ENTRY_POINT_NAME) - .build() - // Memory section is mandatory - .memory() - .build() - .build(); - parity_wasm::serialize(module).expect("should serialize") -} - -/// Prepare malicious payload with amount of opcodes that could potentially overflow injected gas -/// counter. -fn make_gas_counter_overflow() -> Vec { - let opcode_costs = DEFAULT_WASM_CONFIG.opcode_costs(); - - // Create a lot of `nop` opcodes to potentially overflow gas injector's batching counter. - let upper_bound = (u32::max_value() as usize / opcode_costs.nop as usize) + 1; - - let instructions = { - let mut instructions = vec![Instruction::Nop; upper_bound]; - instructions.push(Instruction::End); - Instructions::new(instructions) - }; - - let module = builder::module() - .function() - // A signature with 0 params and no return type - .signature() - .build() - .body() - // Generated instructions for our entrypoint - .with_instructions(instructions) - .build() - .build() - // Export above function - .export() - .field(DEFAULT_ENTRY_POINT_NAME) - .build() - // Memory section is mandatory - .memory() - .build() - .build(); - parity_wasm::serialize(module).expect("should serialize") -} - -/// Creates session code with opcodes -fn make_session_code_with(instructions: Vec) -> Vec { - let module = builder::module() - .function() - // A signature with 0 params and no return type - .signature() - .build() - .body() - // Generated instructions for our entrypoint - .with_instructions(Instructions::new(instructions)) - .build() - .build() - // Export above function - .export() - .field(DEFAULT_ENTRY_POINT_NAME) - .build() - // Memory section is mandatory - .memory() - .build() - .build(); - parity_wasm::serialize(module).expect("should serialize") -} - -#[ignore] -#[test] -fn should_fail_to_overflow_gas_counter() { - let mut builder = InMemoryWasmTestBuilder::default(); - - let session_bytes = make_gas_counter_overflow(); - - let exec_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(session_bytes, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request).commit(); - - let responses = builder.get_exec_result(0).expect("should have response"); - let response = responses.get(0).expect("should have first element"); - - let lhs = response.as_error().expect("should have error"); - assert_matches!( - lhs, - Error::WasmPreprocessing(PreprocessingError::OperationForbiddenByGasRules) - ); -} - -#[ignore] -#[test] -fn should_correctly_measure_gas_for_opcodes() { - let opcode_costs = DEFAULT_WASM_CONFIG.opcode_costs(); - - const GROW_PAGES: u32 = 1; - - // A vector of expected cost of given WASM instruction. - // First element of the tuple represents and option where Some case represents metered - // instruction and None an instruction that's not accounted for. - // - // The idea here is to execute hand written WASM and compare the execution result's gas counter - // with the expected gathered from here. - let opcodes = vec![ - (Some(opcode_costs.nop), Instruction::Nop), - ( - Some(opcode_costs.current_memory), - Instruction::CurrentMemory(0), - ), // Push size to stack - (Some(opcode_costs.op_const), Instruction::I32Const(10)), - (Some(opcode_costs.mul), Instruction::I32Mul), // memory.size * 10 - (Some(opcode_costs.op_const), Instruction::I32Const(11)), - (Some(opcode_costs.add), Instruction::I32Add), - (Some(opcode_costs.op_const), Instruction::I32Const(12)), - (Some(opcode_costs.add), Instruction::I32Sub), - (Some(opcode_costs.op_const), Instruction::I32Const(13)), - (Some(opcode_costs.div), Instruction::I32DivU), - (Some(opcode_costs.op_const), Instruction::I32Const(3)), - (Some(opcode_costs.bit), Instruction::I32Shl), // x<<3 == x*(2*3) - // Store computation - (Some(opcode_costs.op_const), Instruction::I32Const(0)), // offset - (Some(opcode_costs.store), Instruction::I32Store(0, 4)), /* Store `memory.size * 10` on - * the heap */ - // Grow by N pages - ( - Some(opcode_costs.op_const), - Instruction::I32Const(GROW_PAGES as i32), - ), - // memory.grow is metered by the number of pages - ( - Some(opcode_costs.grow_memory * (GROW_PAGES + 1)), - Instruction::GrowMemory(0), - ), - (Some(opcode_costs.op_const), Instruction::I32Const(0)), - (Some(opcode_costs.store), Instruction::I32Store(0, 4)), /* Store `grow_memory` result - * whatever it is */ - // if 0 { nop } else { nop; nop; } - (Some(opcode_costs.op_const), Instruction::I32Const(0)), - ( - Some(opcode_costs.control_flow), - Instruction::If(BlockType::NoResult), - ), - (None, Instruction::Nop), - (None, Instruction::Else), - // else clause is accounted for only - (Some(opcode_costs.nop), Instruction::Nop), - (Some(opcode_costs.nop), Instruction::Nop), - (None, Instruction::End), - // 0 == 1 - (Some(opcode_costs.op_const), Instruction::I32Const(0)), - (Some(opcode_costs.op_const), Instruction::I32Const(1)), - (Some(opcode_costs.integer_comparison), Instruction::I32Eqz), - (Some(opcode_costs.store), Instruction::I32Store(0, 4)), /* Store `eqz` result - * whatever it is */ - // i32 -> i64 - (Some(opcode_costs.op_const), Instruction::I32Const(123)), - (Some(opcode_costs.conversion), Instruction::I64ExtendSI32), - (Some(opcode_costs.control_flow), Instruction::Drop), /* Discard the result */ - // Sentinel instruction that's required to be present but it's not accounted for - (None, Instruction::End), - ]; - - let instructions = opcodes.iter().map(|(_, instr)| instr.clone()).collect(); - let accounted_opcodes: Vec<_> = opcodes.iter().filter_map(|(cost, _)| *cost).collect(); - - let session_bytes = make_session_code_with(instructions); - - let exec_request = { - // NOTE: We use computed "do nothing" WASM module because it turns out "do_nothing" in - // AssemblyScript actually does "nop" which really "does something": (func (;10;) - // (type 4) nop) - - let do_nothing_bytes = make_minimal_do_nothing(); - - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(do_nothing_bytes, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([43; 32]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let payment_cost = { - let mut forked_builder = builder.clone(); - forked_builder.exec(exec_request).commit().expect_success(); - forked_builder.last_exec_gas_cost() - }; - - let exec_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(session_bytes, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - builder.exec(exec_request).commit().expect_success(); - - let gas_cost = builder.last_exec_gas_cost() - payment_cost; - let expected_cost = accounted_opcodes.clone().into_iter().map(Gas::from).sum(); - assert_eq!( - gas_cost, expected_cost, - "accounted costs {:?}", - accounted_opcodes - ); -} diff --git a/execution_engine_testing/tests/src/test/get_balance.rs b/execution_engine_testing/tests/src/test/get_balance.rs index 65030eafa6..0266c1f88f 100644 --- a/execution_engine_testing/tests/src/test/get_balance.rs +++ b/execution_engine_testing/tests/src/test/get_balance.rs @@ -1,23 +1,20 @@ use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + LmdbWasmTestBuilder, TransferRequestBuilder, LOCAL_GENESIS_REQUEST, +}; +use casper_storage::{ + data_access_layer::BalanceIdentifier, + tracking_copy::{self, ValidationError}, }; -use casper_execution_engine::{core, core::ValidationError, shared::newtypes::Blake2bHash}; use casper_types::{ - account::AccountHash, runtime_args, AccessRights, Key, PublicKey, RuntimeArgs, SecretKey, URef, + account::AccountHash, AccessRights, Digest, Key, ProtocolVersion, PublicKey, SecretKey, URef, U512, }; -const TRANSFER_ARG_TARGET: &str = "target"; -const TRANSFER_ARG_AMOUNT: &str = "amount"; -const TRANSFER_ARG_ID: &str = "id"; - static ALICE_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static ALICE_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ALICE_KEY)); @@ -26,31 +23,127 @@ static TRANSFER_AMOUNT_1: Lazy = Lazy::new(|| U512::from(100_000_000)); #[ignore] #[test] fn get_balance_should_work() { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let transfer_request = ExecuteRequestBuilder::transfer( - *DEFAULT_ACCOUNT_ADDR, - runtime_args! { - TRANSFER_ARG_TARGET => *ALICE_ADDR, - TRANSFER_ARG_AMOUNT => *TRANSFER_AMOUNT_1, - TRANSFER_ARG_ID => >::None, - }, + let protocol_version = ProtocolVersion::V2_0_0; + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let block_time = 1_000_000; + let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT_1, *ALICE_ADDR) + .with_block_time(block_time) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + let alice_account = builder + .get_entity_by_account_hash(*ALICE_ADDR) + .expect("should have Alice's account"); + + let alice_main_purse = alice_account.main_purse(); + + let alice_balance_result = builder.get_purse_balance_result_with_proofs( + protocol_version, + BalanceIdentifier::Purse(alice_main_purse), + ); + + let alice_balance = alice_balance_result + .available_balance() + .cloned() + .expect("should have motes"); + + assert_eq!(alice_balance, *TRANSFER_AMOUNT_1); + + let state_root_hash = builder.get_post_state_hash(); + + let proofs_result = alice_balance_result + .proofs_result() + .expect("should have proofs result"); + let balance_proof = proofs_result + .total_balance_proof() + .expect("should have proofs") + .clone(); + + assert!(tracking_copy::validate_balance_proof( + &state_root_hash, + &balance_proof, + alice_main_purse.into(), + &alice_balance, ) - .build(); + .is_ok()); - builder.exec(transfer_request).commit().expect_success(); + let bogus_key = Key::Hash([1u8; 32]); + assert_eq!( + tracking_copy::validate_balance_proof( + &state_root_hash, + &balance_proof, + bogus_key.to_owned(), + &alice_balance, + ), + Err(ValidationError::KeyIsNotAURef(bogus_key)) + ); + + let bogus_uref: Key = Key::URef(URef::new([3u8; 32], AccessRights::READ_ADD_WRITE)); + assert_eq!( + tracking_copy::validate_balance_proof( + &state_root_hash, + &balance_proof, + bogus_uref, + &alice_balance, + ), + Err(ValidationError::UnexpectedKey) + ); + + let bogus_hash = Digest::hash([5u8; 32]); + assert_eq!( + tracking_copy::validate_balance_proof( + &bogus_hash, + &balance_proof, + alice_main_purse.into(), + &alice_balance, + ), + Err(ValidationError::InvalidProofHash) + ); + + let bogus_motes = U512::from(1337); + assert_eq!( + tracking_copy::validate_balance_proof( + &state_root_hash, + &balance_proof, + alice_main_purse.into(), + &bogus_motes, + ), + Err(ValidationError::UnexpectedValue) + ); +} + +#[ignore] +#[test] +fn get_balance_using_public_key_should_work() { + let protocol_version = ProtocolVersion::V2_0_0; + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let block_time = 1_000_000; + let transfer_request = TransferRequestBuilder::new(*TRANSFER_AMOUNT_1, *ALICE_ADDR) + .with_block_time(block_time) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); let alice_account = builder - .get_account(*ALICE_ADDR) + .get_entity_by_account_hash(*ALICE_ADDR) .expect("should have Alice's account"); let alice_main_purse = alice_account.main_purse(); - let alice_balance_result = builder.get_purse_balance_result(alice_main_purse); + let alice_balance_result = + builder.get_public_key_balance_result_with_proofs(protocol_version, ALICE_KEY.clone()); let alice_balance = alice_balance_result - .motes() + .available_balance() .cloned() .expect("should have motes"); @@ -58,9 +151,15 @@ fn get_balance_should_work() { let state_root_hash = builder.get_post_state_hash(); - let balance_proof = alice_balance_result.proof().expect("should have proofs"); + let proofs_result = alice_balance_result + .proofs_result() + .expect("should have proofs result"); + let balance_proof = proofs_result + .total_balance_proof() + .expect("should have proofs") + .clone(); - assert!(core::validate_balance_proof( + assert!(tracking_copy::validate_balance_proof( &state_root_hash, &balance_proof, alice_main_purse.into(), @@ -70,7 +169,7 @@ fn get_balance_should_work() { let bogus_key = Key::Hash([1u8; 32]); assert_eq!( - core::validate_balance_proof( + tracking_copy::validate_balance_proof( &state_root_hash, &balance_proof, bogus_key.to_owned(), @@ -81,13 +180,18 @@ fn get_balance_should_work() { let bogus_uref: Key = Key::URef(URef::new([3u8; 32], AccessRights::READ_ADD_WRITE)); assert_eq!( - core::validate_balance_proof(&state_root_hash, &balance_proof, bogus_uref, &alice_balance,), + tracking_copy::validate_balance_proof( + &state_root_hash, + &balance_proof, + bogus_uref, + &alice_balance, + ), Err(ValidationError::UnexpectedKey) ); - let bogus_hash = Blake2bHash::new(&[5u8; 32]); + let bogus_hash = Digest::hash([5u8; 32]); assert_eq!( - core::validate_balance_proof( + tracking_copy::validate_balance_proof( &bogus_hash, &balance_proof, alice_main_purse.into(), @@ -98,7 +202,7 @@ fn get_balance_should_work() { let bogus_motes = U512::from(1337); assert_eq!( - core::validate_balance_proof( + tracking_copy::validate_balance_proof( &state_root_hash, &balance_proof, alice_main_purse.into(), diff --git a/execution_engine_testing/tests/src/test/groups.rs b/execution_engine_testing/tests/src/test/groups.rs index b231b1201b..12fe892620 100644 --- a/execution_engine_testing/tests/src/test/groups.rs +++ b/execution_engine_testing/tests/src/test/groups.rs @@ -1,18 +1,22 @@ +#![allow(deprecated)] + use assert_matches::assert_matches; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, }; -use casper_execution_engine::core::{engine_state::Error, execution}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; use casper_types::{ - account::AccountHash, contracts::CONTRACT_INITIAL_VERSION, runtime_args, Key, RuntimeArgs, U512, + account::AccountHash, + contracts::{ContractPackageHash, CONTRACT_INITIAL_VERSION}, + runtime_args, Key, PackageHash, RuntimeArgs, U512, }; +use crate::wasm_utils; + const CONTRACT_GROUPS: &str = "groups.wasm"; const PACKAGE_HASH_KEY: &str = "package_hash_key"; const PACKAGE_ACCESS_KEY: &str = "package_access_key"; @@ -33,30 +37,31 @@ const ARG_TARGET: &str = "target"; static TRANSFER_1_AMOUNT: Lazy = Lazy::new(|| U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + 1000); -#[ignore] -#[test] -fn should_call_group_restricted_session() { - // This test runs a contract that's after every call extends the same key with - // more data +fn setup_from_lmdb_fixture() -> LmdbWasmTestBuilder { + // let (mut builder, _, _) = lmdb_fixture::builder_from_global_state_fixture(GROUPS_FIXTURE); + // builder.with_block_time(Timestamp::now().into()); + // builder.with_gas_hold_config(HoldBalanceHandling::default(), 1200u64); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_GROUPS, RuntimeArgs::default(), ) .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - builder.exec(exec_request_1).expect_success().commit(); + builder +} + +#[ignore] +#[test] +fn should_call_group_restricted_session() { + let mut builder = setup_from_lmdb_fixture(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract"); let _package_hash = account .named_keys() @@ -67,61 +72,29 @@ fn should_call_group_restricted_session() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_2 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_SESSION, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_KEY, + None, + RESTRICTED_SESSION, + runtime_args! {}, + ) + .build(); builder.exec(exec_request_2).expect_success().commit(); - - let _account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); } #[ignore] #[test] fn should_call_group_restricted_session_caller() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); let package_hash = account .named_keys() @@ -132,47 +105,26 @@ fn should_call_group_restricted_session_caller() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_2 = { - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_SESSION_CALLER, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - builder.exec(exec_request_2).expect_success().commit(); + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_KEY, + None, + RESTRICTED_SESSION, + runtime_args! { + PACKAGE_HASH_ARG => package_hash.into_package_hash() + }, + ) + .build(); - let _account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + builder.exec(exec_request_2).expect_success().commit(); } #[test] #[ignore] fn should_not_call_restricted_session_from_wrong_account() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); - let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -180,21 +132,13 @@ fn should_not_call_restricted_session_from_wrong_account() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); - + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); let package_hash = account .named_keys() .get(PACKAGE_HASH_KEY) @@ -204,55 +148,38 @@ fn should_not_call_restricted_session_from_wrong_account() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_3 = { - let args = runtime_args! {}; - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_stored_versioned_contract_by_hash( - package_hash.into_hash().expect("should be hash"), - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_SESSION, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let args = runtime_args! {}; + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_stored_versioned_contract_by_hash( + package_hash.into_package_addr().expect("should be hash"), + None, + RESTRICTED_SESSION, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[ACCOUNT_1_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request_3).commit(); let _account = builder .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .expect("should query account"); let response = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); } #[test] #[ignore] fn should_not_call_restricted_session_caller_from_wrong_account() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); - let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -260,22 +187,15 @@ fn should_not_call_restricted_session_caller_from_wrong_account() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); - let package_hash = account + let package_hash = *account .named_keys() .get(PACKAGE_HASH_KEY) .expect("should have contract package"); @@ -284,69 +204,60 @@ fn should_not_call_restricted_session_caller_from_wrong_account() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_3 = { - let args = runtime_args! { - "package_hash" => *package_hash, - }; - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_stored_versioned_contract_by_hash( - package_hash.into_hash().expect("should be hash"), - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_SESSION_CALLER, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + let package_hash = package_hash + .into_package_hash() + .map(|package_hash| Key::Hash(package_hash.value())) + .expect("must get Key::Hash"); + + let args = runtime_args! { + "package_hash" => package_hash, }; - builder.exec(exec_request_3).commit(); + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_stored_versioned_contract_by_hash( + package_hash.into_package_addr().expect("should be hash"), + None, + RESTRICTED_SESSION_CALLER, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[ACCOUNT_1_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_3).expect_failure(); let _account = builder .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .expect("should query account"); let response = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); } #[ignore] #[test] fn should_call_group_restricted_contract() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = setup_from_lmdb_fixture(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_new_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_enable_addressable_entity(false) + .build() + }; - builder.exec(exec_request_1).expect_success().commit(); + builder.upgrade(&mut upgrade_request); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); let package_hash = account .named_keys() @@ -357,50 +268,32 @@ fn should_call_group_restricted_contract() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_2 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_CONTRACT, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let args = runtime_args! { + PACKAGE_HASH_ARG => *package_hash, }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name(PACKAGE_HASH_KEY, None, RESTRICTED_CONTRACT, args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request_2).expect_success().commit(); let _account = builder .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .expect("should query account"); } #[ignore] #[test] fn should_not_call_group_restricted_contract_from_wrong_account() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -408,19 +301,12 @@ fn should_not_call_group_restricted_contract_from_wrong_account() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); let package_hash = account .named_keys() @@ -431,65 +317,44 @@ fn should_not_call_group_restricted_contract_from_wrong_account() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_3 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_stored_versioned_contract_by_hash( - package_hash.into_hash().expect("should be hash"), - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_CONTRACT, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let args = runtime_args! { + PACKAGE_HASH_ARG => *package_hash, }; + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_stored_versioned_contract_by_hash( + package_hash.into_package_addr().expect("should be hash"), + None, + RESTRICTED_CONTRACT, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[ACCOUNT_1_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request_3).commit(); let response = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); } #[ignore] #[test] fn should_call_group_unrestricted_contract_caller() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); let package_hash = account .named_keys() @@ -500,66 +365,53 @@ fn should_call_group_unrestricted_contract_caller() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_2 = { - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - UNRESTRICTED_CONTRACT_CALLER, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + let package_hash = package_hash + .into_package_hash() + .map(|package_hash| ContractPackageHash::new(package_hash.value())) + .expect("must get Key::Hash"); + + let args = runtime_args! { + PACKAGE_HASH_ARG => package_hash, }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name( + PACKAGE_HASH_KEY, + None, + UNRESTRICTED_CONTRACT_CALLER, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request_2).expect_success().commit(); let _account = builder .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .expect("should query account"); } #[ignore] #[test] fn should_call_unrestricted_contract_caller_from_different_account() { + let mut builder = setup_from_lmdb_fixture(); + let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); - let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, ) .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - builder.exec(exec_request_1).expect_success().commit(); - builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); - let package_hash = account + let package_hash = *account .named_keys() .get(PACKAGE_HASH_KEY) .expect("should have contract package"); @@ -568,43 +420,28 @@ fn should_call_unrestricted_contract_caller_from_different_account() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_3 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_stored_versioned_contract_by_hash( - package_hash.into_hash().expect("should be hash"), - Some(CONTRACT_INITIAL_VERSION), - UNRESTRICTED_CONTRACT_CALLER, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let package_hash = package_hash + .into_package_hash() + .map(|package_hash| ContractPackageHash::new(package_hash.value())) + .expect("must get Key::Hash"); - builder.exec(exec_request_3).expect_success().commit(); + let exec_request_2 = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + PackageHash::new(package_hash.value()), + None, + UNRESTRICTED_CONTRACT_CALLER, + runtime_args! { + PACKAGE_HASH_ARG => package_hash, + }, + ) + .build(); + + builder.exec(exec_request_2).expect_success().commit(); } #[ignore] #[test] fn should_call_group_restricted_contract_as_session() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -612,21 +449,14 @@ fn should_call_group_restricted_contract_as_session() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); - let package_hash = account + let package_hash = *account .named_keys() .get(PACKAGE_HASH_KEY) .expect("should have contract package"); @@ -635,28 +465,24 @@ fn should_call_group_restricted_contract_as_session() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_3 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_hash( - package_hash.into_hash().expect("should be hash"), - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_CONTRACT_CALLER_AS_SESSION, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([4; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let package_hash = package_hash + .into_package_hash() + .map(|package_hash| ContractPackageHash::new(package_hash.value())) + .expect("must get Key::Hash"); + + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + PackageHash::new(package_hash.value()), + None, + RESTRICTED_CONTRACT_CALLER_AS_SESSION, + runtime_args! { + PACKAGE_HASH_ARG => package_hash, + }, + ) + .build(); builder.exec(exec_request_3).expect_success().commit(); } @@ -664,14 +490,6 @@ fn should_call_group_restricted_contract_as_session() { #[ignore] #[test] fn should_call_group_restricted_contract_as_session_from_wrong_account() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -679,21 +497,14 @@ fn should_call_group_restricted_contract_as_session_from_wrong_account() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + let mut builder = setup_from_lmdb_fixture(); builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); - let package_hash = account + let package_hash = *account .named_keys() .get(PACKAGE_HASH_KEY) .expect("should have contract package"); @@ -702,67 +513,116 @@ fn should_call_group_restricted_contract_as_session_from_wrong_account() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_3 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_stored_versioned_contract_by_hash( - package_hash.into_hash().expect("should be hash"), - Some(CONTRACT_INITIAL_VERSION), - RESTRICTED_CONTRACT_CALLER_AS_SESSION, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .with_deploy_hash([4; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let hash = package_hash + .into_package_hash() + .expect("must convert to package hash"); + + let package_key = package_hash + .into_package_hash() + .map(|package_hash| ContractPackageHash::new(package_hash.value())) + .expect("must get Key::Hash"); + + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let exec_request_3 = ExecuteRequestBuilder::versioned_contract_call_by_hash( + ACCOUNT_1_ADDR, + hash, + Some(CONTRACT_INITIAL_VERSION), + RESTRICTED_CONTRACT_CALLER_AS_SESSION, + runtime_args! { + PACKAGE_HASH_ARG => package_key, + }, + ) + .build(); - builder.exec(exec_request_3).commit(); + builder.exec(exec_request_3).expect_failure(); let response = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); } #[ignore] #[test] fn should_not_call_uncallable_contract_from_deploy() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), - ) - .build(); + let mut builder = setup_from_lmdb_fixture(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); + + let package_hash = *account + .named_keys() + .get(PACKAGE_HASH_KEY) + .expect("should have contract package"); + let _access_uref = account + .named_keys() + .get(PACKAGE_ACCESS_KEY) + .expect("should have package hash"); - let mut builder = InMemoryWasmTestBuilder::default(); + let package_hash = package_hash + .into_package_hash() + .map(|package_hash| Key::Hash(package_hash.value())) + .expect("must get Key::Hash"); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. - builder.exec(exec_request_1).expect_success().commit(); + let args = runtime_args! { + PACKAGE_HASH_ARG => package_hash, + }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name(PACKAGE_HASH_KEY, None, UNCALLABLE_SESSION, args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_2).commit(); + let response = builder + .get_last_exec_result() + .expect("should have last response"); + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); + + let args = runtime_args! { + PACKAGE_HASH_ARG => package_hash, + }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name( + PACKAGE_HASH_KEY, + None, + CALL_RESTRICTED_ENTRY_POINTS, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([6; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_3).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_call_uncallable_session_from_deploy() { + let mut builder = setup_from_lmdb_fixture(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); - let package_hash = account + let package_hash = *account .named_keys() .get(PACKAGE_HASH_KEY) .expect("should have contract package"); @@ -771,86 +631,73 @@ fn should_not_call_uncallable_contract_from_deploy() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_2 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - UNCALLABLE_SESSION, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + let package_hash = package_hash + .into_package_hash() + .map(|package_hash| Key::Hash(package_hash.value())) + .expect("must get Key::Hash"); + + // This inserts package as an argument because this test + // can work from different accounts which might not have the same keys in their session + // code. + let args = runtime_args! { + PACKAGE_HASH_ARG => package_hash, }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name(PACKAGE_HASH_KEY, None, UNCALLABLE_CONTRACT, args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request_2).commit(); let response = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); - - let exec_request_3 = { - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - CALL_RESTRICTED_ENTRY_POINTS, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([6; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); + let args = runtime_args! { + PACKAGE_HASH_ARG => package_hash, + }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name( + PACKAGE_HASH_KEY, + None, + CALL_RESTRICTED_ENTRY_POINTS, + args, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([6; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request_3).expect_success().commit(); } #[ignore] #[test] -fn should_not_call_uncallable_session_from_deploy() { - // This test runs a contract that's after every call extends the same key with - // more data - let exec_request_1 = ExecuteRequestBuilder::standard( +fn should_not_call_group_restricted_stored_payment_code_from_invalid_account() { + // This test calls a stored payment code that is restricted with a group access using an account + // that does not have any of the group urefs in context. + + let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, - CONTRACT_GROUPS, - RuntimeArgs::default(), + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, ) .build(); + let mut builder = setup_from_lmdb_fixture(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - builder.exec(exec_request_1).expect_success().commit(); + builder.exec(exec_request_2).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); let package_hash = account .named_keys() @@ -861,57 +708,89 @@ fn should_not_call_uncallable_session_from_deploy() { .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); - let exec_request_2 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - UNCALLABLE_CONTRACT, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([3; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + let args = runtime_args! { + "amount" => *DEFAULT_PAYMENT, }; + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_stored_versioned_payment_contract_by_hash( + package_hash + .into_package_addr() + .expect("must have created package hash"), + None, + "restricted_standard_payment", + args, + ) + .with_authorization_keys(&[ACCOUNT_1_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_3).commit(); + + let _account = builder + .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) + .expect("should query account"); - builder.exec(exec_request_2).commit(); let response = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::InvalidContext)); - - let exec_request_3 = { - let args = runtime_args! { - PACKAGE_HASH_ARG => package_hash.into_hash(), - }; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), - CALL_RESTRICTED_ENTRY_POINTS, - args, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([6; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() + let error = response.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::InvalidContext)); +} + +#[ignore] +#[test] +fn should_call_group_restricted_stored_payment_code() { + // This test calls a stored payment code that is restricted with a group access using an account + // that contains urefs from the group. + + let exec_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { ARG_TARGET => ACCOUNT_1_ADDR, ARG_AMOUNT => *TRANSFER_1_AMOUNT }, + ) + .build(); + + let mut builder = setup_from_lmdb_fixture(); + + builder.exec(exec_request_2).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); + + let package_hash = account + .named_keys() + .get(PACKAGE_HASH_KEY) + .expect("should have contract package"); + let _access_uref = account + .named_keys() + .get(PACKAGE_ACCESS_KEY) + .expect("should have package hash"); + + let args = runtime_args! { + "amount" => *DEFAULT_PAYMENT, }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + // .with_stored_versioned_contract_by_name(name, version, entry_point, args) + .with_stored_versioned_payment_contract_by_hash( + package_hash + .into_package_addr() + .expect("must have created package hash"), + None, + "restricted_standard_payment", + args, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([3; 32]) + .build(); + + let exec_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(exec_request_3).expect_success().commit(); } diff --git a/execution_engine_testing/tests/src/test/host_function_costs.rs b/execution_engine_testing/tests/src/test/host_function_costs.rs index 57243ad2d3..a29f6a3884 100644 --- a/execution_engine_testing/tests/src/test/host_function_costs.rs +++ b/execution_engine_testing/tests/src/test/host_function_costs.rs @@ -1,15 +1,13 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::{bytesrepr::Bytes, runtime_args, ContractHash, RuntimeArgs}; +use casper_types::{bytesrepr::Bytes, runtime_args, AddressableEntityHash, RuntimeArgs}; const HOST_FUNCTION_COSTS_NAME: &str = "host_function_costs.wasm"; const CONTRACT_KEY_NAME: &str = "contract"; const DO_NOTHING_NAME: &str = "do_nothing"; const DO_SOMETHING_NAME: &str = "do_something"; -const ACCOUNT_FUNCTION_NAME: &str = "account_function"; const CALLS_DO_NOTHING_LEVEL1_NAME: &str = "calls_do_nothing_level1"; const CALLS_DO_NOTHING_LEVEL2_NAME: &str = "calls_do_nothing_level2"; const ARG_BYTES: &str = "bytes"; @@ -21,7 +19,7 @@ const ARG_SIZE_FUNCTION_CALL_100_NAME: &str = "arg_size_function_call_100"; fn should_measure_gas_cost() { // This test runs a contract that's after every call extends the same key with // more data - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -31,19 +29,19 @@ fn should_measure_gas_cost() { .build(); // Create Accounts - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() + .into_entity_hash_addr() .expect("should be hash") .into(); @@ -61,7 +59,7 @@ fn should_measure_gas_cost() { builder.exec(exec_request_2).expect_success().commit(); - let do_nothing_cost = builder.last_exec_gas_cost().value(); + let do_nothing_cost = builder.last_exec_gas_consumed().value(); // // Measure opcodes (doing something) @@ -76,39 +74,12 @@ fn should_measure_gas_cost() { builder.exec(exec_request_2).expect_success().commit(); - let do_something_cost = builder.last_exec_gas_cost().value(); + let do_something_cost = builder.last_exec_gas_consumed().value(); assert!( !do_something_cost.is_zero(), "executing nothing should cost zero" ); assert!(do_something_cost > do_nothing_cost); - - // - // Measure host functions - // - let exec_request_3 = ExecuteRequestBuilder::contract_call_by_hash( - *DEFAULT_ACCOUNT_ADDR, - contract_hash, - ACCOUNT_FUNCTION_NAME, - runtime_args! { - "source_account" => *DEFAULT_ACCOUNT_ADDR, - }, - ) - .build(); - - let account_1_funds_before = builder.get_purse_balance(account.main_purse()); - - builder.exec(exec_request_3).expect_success().commit(); - - let account_1_funds_after = builder.get_purse_balance(account.main_purse()); - - let do_host_function_calls = account_1_funds_before - account_1_funds_after; - assert!( - !do_host_function_calls.is_zero(), - "executing nothing should cost zero" - ); - assert!(do_host_function_calls > do_something_cost); - assert!(do_host_function_calls > do_nothing_cost); } #[ignore] @@ -116,7 +87,7 @@ fn should_measure_gas_cost() { fn should_measure_nested_host_function_call_cost() { // This test runs a contract that's after every call extends the same key with // more data - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -126,19 +97,19 @@ fn should_measure_nested_host_function_call_cost() { .build(); // Create Accounts - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() + .into_entity_hash_addr() .expect("should be hash") .into(); @@ -155,7 +126,7 @@ fn should_measure_nested_host_function_call_cost() { .build(); builder.exec(exec_request_2).expect_success().commit(); - let level_1_cost = builder.last_exec_gas_cost().value(); + let level_1_cost = builder.last_exec_gas_consumed().value(); assert!( !level_1_cost.is_zero(), @@ -175,7 +146,7 @@ fn should_measure_nested_host_function_call_cost() { .build(); builder.exec(exec_request_3).expect_success().commit(); - let level_2_cost = builder.last_exec_gas_cost().value(); + let level_2_cost = builder.last_exec_gas_consumed().value(); assert!( !level_2_cost.is_zero(), @@ -194,7 +165,7 @@ fn should_measure_nested_host_function_call_cost() { #[test] fn should_measure_argument_size_in_host_function_call() { // Checks if calling a contract with large arguments affects costs - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -204,19 +175,19 @@ fn should_measure_argument_size_in_host_function_call() { .build(); // Create Accounts - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() + .into_entity_hash_addr() .expect("should be hash") .into(); @@ -234,7 +205,7 @@ fn should_measure_argument_size_in_host_function_call() { .build(); builder.exec(exec_request_2).expect_success().commit(); - let call_1_cost = builder.last_exec_gas_cost().value(); + let call_1_cost = builder.last_exec_gas_consumed().value(); assert!( !call_1_cost.is_zero(), @@ -254,7 +225,7 @@ fn should_measure_argument_size_in_host_function_call() { .build(); builder.exec(exec_request_3).expect_success().commit(); - let call_2_cost = builder.last_exec_gas_cost().value(); + let call_2_cost = builder.last_exec_gas_consumed().value(); assert!( call_2_cost > call_1_cost, diff --git a/execution_engine_testing/tests/src/test/manage_groups.rs b/execution_engine_testing/tests/src/test/manage_groups.rs index 06d9234131..daf0c4485b 100644 --- a/execution_engine_testing/tests/src/test/manage_groups.rs +++ b/execution_engine_testing/tests/src/test/manage_groups.rs @@ -1,19 +1,16 @@ -use std::{collections::BTreeSet, iter::FromIterator}; +use std::collections::BTreeSet; use assert_matches::assert_matches; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::core::{engine_state::Error, execution}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; use casper_types::{ - contracts::{self, CONTRACT_INITIAL_VERSION, MAX_GROUPS}, - runtime_args, Group, Key, RuntimeArgs, + addressable_entity::{self, MAX_GROUPS}, + runtime_args, Group, RuntimeArgs, ENTITY_INITIAL_VERSION, }; const CONTRACT_GROUPS: &str = "manage_groups.wasm"; @@ -24,12 +21,12 @@ const REMOVE_GROUP: &str = "remove_group"; const EXTEND_GROUP_UREFS: &str = "extend_group_urefs"; const REMOVE_GROUP_UREFS: &str = "remove_group_urefs"; const GROUP_NAME_ARG: &str = "group_name"; -const UREFS_ARG: &str = "urefs"; const NEW_UREFS_COUNT: u64 = 3; const GROUP_1_NAME: &str = "Group 1"; const TOTAL_NEW_UREFS_ARG: &str = "total_new_urefs"; const TOTAL_EXISTING_UREFS_ARG: &str = "total_existing_urefs"; const ARG_AMOUNT: &str = "amount"; +const ARG_UREF_INDICES: &str = "uref_indices"; static DEFAULT_CREATE_GROUP_ARGS: Lazy = Lazy::new(|| { runtime_args! { @@ -51,24 +48,21 @@ fn should_create_and_remove_group() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); - let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + let entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have entity"); - let package_hash = account + let package_hash = entity .named_keys() .get(PACKAGE_HASH_KEY) - .expect("should have contract package"); - let _access_uref = account + .expect("should have package"); + let _access_uref = entity .named_keys() .get(PACKAGE_ACCESS_KEY) .expect("should have package hash"); @@ -81,16 +75,16 @@ fn should_create_and_remove_group() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), CREATE_GROUP, DEFAULT_CREATE_GROUP_ARGS.clone(), ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([3; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_2).expect_success().commit(); @@ -119,16 +113,16 @@ fn should_create_and_remove_group() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), REMOVE_GROUP, args, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([3; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_3).expect_success().commit(); @@ -157,18 +151,15 @@ fn should_create_and_extend_user_group() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let package_hash = account .named_keys() @@ -187,16 +178,16 @@ fn should_create_and_extend_user_group() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), CREATE_GROUP, DEFAULT_CREATE_GROUP_ARGS.clone(), ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([5; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_2).expect_success().commit(); @@ -226,16 +217,16 @@ fn should_create_and_extend_user_group() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), EXTEND_GROUP_UREFS, args, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([3; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_3).expect_success().commit(); @@ -252,7 +243,7 @@ fn should_create_and_extend_user_group() { .expect("should have group"); assert!(group_1_extended.len() > group_1.len()); // Calculates how many new urefs were created - let new_urefs: BTreeSet<_> = group_1_extended.difference(&group_1).collect(); + let new_urefs: BTreeSet<_> = group_1_extended.difference(group_1).collect(); assert_eq!(new_urefs.len(), NEW_UREFS_COUNT as usize); } @@ -268,19 +259,15 @@ fn should_create_and_remove_urefs_from_group() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); - + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let package_hash = account .named_keys() .get(PACKAGE_HASH_KEY) @@ -298,16 +285,16 @@ fn should_create_and_remove_urefs_from_group() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), CREATE_GROUP, DEFAULT_CREATE_GROUP_ARGS.clone(), ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([3; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_2).expect_success().commit(); @@ -325,30 +312,30 @@ fn should_create_and_remove_urefs_from_group() { .expect("should have group"); assert_eq!(group_1.len(), 2); - let urefs_to_remove = Vec::from_iter(group_1.to_owned()); - let exec_request_3 = { - // This inserts package as an argument because this test - // can work from different accounts which might not have the same keys in their session - // code. + // This inserts package as an argument because this test can work from different accounts + // which might not have the same keys in their session code. let args = runtime_args! { GROUP_NAME_ARG => GROUP_1_NAME, - UREFS_ARG => urefs_to_remove, + // We're passing indices of urefs inside a group rather than URef values as group urefs + // aren't part of the access rights. This test will read a ContractPackage instance, get + // the group by its name, and remove URefs by their indices. + ARG_UREF_INDICES => vec![0u64, 1u64], }; let deploy = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), REMOVE_GROUP_UREFS, args, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([3; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_3).expect_success().commit(); @@ -378,19 +365,15 @@ fn should_limit_max_urefs_while_extending() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) - .expect("should query account") - .as_account() - .cloned() - .expect("should be account"); - + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); let package_hash = account .named_keys() .get(PACKAGE_HASH_KEY) @@ -408,16 +391,16 @@ fn should_limit_max_urefs_while_extending() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), CREATE_GROUP, DEFAULT_CREATE_GROUP_ARGS.clone(), ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([3; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_2).expect_success().commit(); @@ -447,16 +430,16 @@ fn should_limit_max_urefs_while_extending() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), EXTEND_GROUP_UREFS, args, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([5; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; let exec_request_4 = { @@ -472,16 +455,16 @@ fn should_limit_max_urefs_while_extending() { .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_versioned_contract_by_name( PACKAGE_HASH_KEY, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), EXTEND_GROUP_UREFS, args, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([32; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy).build() }; builder.exec(exec_request_3).expect_success().commit(); @@ -501,13 +484,13 @@ fn should_limit_max_urefs_while_extending() { // Tries to exceed the limit by 1 builder.exec(exec_request_4).commit(); - let response = builder - .get_exec_results() - .last() + let exec_response = builder + .get_last_exec_result() .expect("should have last response"); - assert_eq!(response.len(), 1); - let exec_response = response.last().expect("should have response"); - let error = exec_response.as_error().expect("should have error"); - let error = assert_matches!(error, Error::Exec(execution::Error::Revert(e)) => e); - assert_eq!(error, &contracts::Error::MaxTotalURefsExceeded.into()); + let error = exec_response.error().expect("should have error"); + let error = assert_matches!(error, Error::Exec(ExecError::Revert(e)) => e); + assert_eq!( + error, + &addressable_entity::Error::MaxTotalURefsExceeded.into() + ); } diff --git a/execution_engine_testing/tests/src/test/mod.rs b/execution_engine_testing/tests/src/test/mod.rs index 37cc3a6fa3..b0702b649c 100644 --- a/execution_engine_testing/tests/src/test/mod.rs +++ b/execution_engine_testing/tests/src/test/mod.rs @@ -1,18 +1,37 @@ +mod calling_packages_by_version_query; +mod chainspec_registry; mod check_transfer_success; mod contract_api; mod contract_context; -mod counter; +mod contract_messages; +mod counter_factory; mod deploy; mod explorer; -mod gas_counter; mod get_balance; mod groups; mod host_function_costs; mod manage_groups; +mod private_chain; mod regression; +mod stack_overflow; mod step; mod storage_costs; mod system_contracts; mod system_costs; +mod tutorial; mod upgrade; mod wasmless_transfer; + +// NOTE: the original execution engine also handled charging for gas costs +// and these integration tests commonly would, in addition to other behavior being tested, +// also check that expected payment handling was being done. +// As of 2.0 compliant execution engines no longer handle payment... +// all payment handling is done in the node prior to engaging native logic or an execution target +// and all testing of payment handling occurs within the node tests. +// Thus these ee integration tests cannot (and should not) test changes to balances related +// to costs as they once did. Instead they should (and only can) test that gas limits are +// correctly applied and enforced and that non-cost transfers work properly. +// Because many tests included balance checks with expectations around payment handling in +// addition to whatever else they were testing, they required adjustment. +// In some cases the names of the tests included terms such as 'should_charge_' or 'should_cost_' +// which is no longer true and require the name of the test be adjusted to reflect the new reality. diff --git a/execution_engine_testing/tests/src/test/private_chain.rs b/execution_engine_testing/tests/src/test/private_chain.rs new file mode 100644 index 0000000000..65a5b9b899 --- /dev/null +++ b/execution_engine_testing/tests/src/test/private_chain.rs @@ -0,0 +1,244 @@ +mod fees_accumulation; +pub mod management; +mod restricted_auction; +mod unrestricted_transfers; + +use casper_engine_test_support::{ + genesis_config_builder::GenesisConfigBuilder, ChainspecConfig, LmdbWasmTestBuilder, + DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROPOSER_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, + DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_STORAGE_COSTS, DEFAULT_SYSTEM_CONFIG, + DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, +}; +use num_rational::Ratio; +use once_cell::sync::Lazy; +use std::collections::{BTreeMap, BTreeSet}; + +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + account::AccountHash, system::auction::DELEGATION_RATE_DENOMINATOR, AdministratorAccount, + CoreConfig, FeeHandling, GenesisAccount, GenesisConfig, GenesisValidator, HostFunction, + HostFunctionCostsV1, MessageLimits, Motes, OpcodeCosts, PublicKey, RefundHandling, SecretKey, + StorageCosts, WasmConfig, WasmV1Config, WasmV2Config, DEFAULT_MAX_STACK_HEIGHT, + DEFAULT_WASM_MAX_MEMORY, U512, +}; +use tempfile::TempDir; + +static VALIDATOR_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([244; 32]).unwrap()); +static VALIDATOR_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*VALIDATOR_1_SECRET_KEY)); + +const DEFAULT_VALIDATOR_BONDED_AMOUNT: U512 = U512([u64::MAX, 0, 0, 0, 0, 0, 0, 0]); + +static DEFAULT_ADMIN_ACCOUNT_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([250; 32]).unwrap()); +static DEFAULT_ADMIN_ACCOUNT_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*DEFAULT_ADMIN_ACCOUNT_SECRET_KEY)); +static DEFAULT_ADMIN_ACCOUNT_ADDR: Lazy = + Lazy::new(|| DEFAULT_ADMIN_ACCOUNT_PUBLIC_KEY.to_account_hash()); + +static ADMIN_1_ACCOUNT_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([240; 32]).unwrap()); +static ADMIN_1_ACCOUNT_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ADMIN_1_ACCOUNT_SECRET_KEY)); +static ADMIN_1_ACCOUNT_ADDR: Lazy = + Lazy::new(|| ADMIN_1_ACCOUNT_PUBLIC_KEY.to_account_hash()); + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([251; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash()); + +static ACCOUNT_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([241; 32]).unwrap()); +static ACCOUNT_2_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY)); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash()); + +const ADMIN_ACCOUNT_INITIAL_BALANCE: U512 = U512([100_000_000_000_000_000u64, 0, 0, 0, 0, 0, 0, 0]); + +const PRIVATE_CHAIN_ALLOW_AUCTION_BIDS: bool = false; +const PRIVATE_CHAIN_ALLOW_UNRESTRICTED_TRANSFERS: bool = false; + +static PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS: Lazy> = Lazy::new(|| { + let default_admin = AdministratorAccount::new( + DEFAULT_ADMIN_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(ADMIN_ACCOUNT_INITIAL_BALANCE), + ); + let admin_1 = AdministratorAccount::new( + ADMIN_1_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(ADMIN_ACCOUNT_INITIAL_BALANCE), + ); + vec![default_admin, admin_1] +}); + +static PRIVATE_CHAIN_GENESIS_ADMIN_SET: Lazy> = Lazy::new(|| { + PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS + .iter() + .map(|admin| admin.public_key().clone()) + .collect() +}); + +static PRIVATE_CHAIN_GENESIS_VALIDATORS: Lazy> = + Lazy::new(|| { + let public_key = VALIDATOR_1_PUBLIC_KEY.clone(); + let genesis_validator_1 = GenesisValidator::new( + Motes::new(DEFAULT_VALIDATOR_BONDED_AMOUNT), + DELEGATION_RATE_DENOMINATOR, + ); + let mut genesis_validators = BTreeMap::new(); + genesis_validators.insert(public_key, genesis_validator_1); + genesis_validators + }); + +static PRIVATE_CHAIN_DEFAULT_ACCOUNTS: Lazy> = Lazy::new(|| { + let mut default_accounts = Vec::new(); + + let proposer_account = + GenesisAccount::account(DEFAULT_PROPOSER_PUBLIC_KEY.clone(), Motes::zero(), None); + default_accounts.push(proposer_account); + + // One normal account that starts at genesis + default_accounts.push(GenesisAccount::account( + ACCOUNT_1_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + )); + + // Set up genesis validators + { + let public_key = VALIDATOR_1_PUBLIC_KEY.clone(); + let genesis_validator = PRIVATE_CHAIN_GENESIS_VALIDATORS[&public_key]; + default_accounts.push(GenesisAccount::Account { + public_key, + // Genesis validators for a private network doesn't have balances, but they are part of + // fixed set of validators + balance: Motes::zero(), + validator: Some(genesis_validator), + }); + } + + let admin_accounts = PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS.clone(); + let genesis_admins = admin_accounts.into_iter().map(GenesisAccount::from); + default_accounts.extend(genesis_admins); + + default_accounts +}); + +const PRIVATE_CHAIN_REFUND_HANDLING: RefundHandling = RefundHandling::Refund { + refund_ratio: Ratio::new_raw(1, 1), +}; +const PRIVATE_CHAIN_FEE_HANDLING: FeeHandling = FeeHandling::Accumulate; +const PRIVATE_CHAIN_COMPUTE_REWARDS: bool = false; + +static DEFUALT_PRIVATE_CHAIN_EXEC_CONFIG: Lazy = Lazy::new(|| { + GenesisConfigBuilder::default() + .with_accounts(PRIVATE_CHAIN_DEFAULT_ACCOUNTS.clone()) + .with_wasm_config(*DEFAULT_WASM_CONFIG) + .with_system_config(*DEFAULT_SYSTEM_CONFIG) + .with_validator_slots(DEFAULT_VALIDATOR_SLOTS) + .with_auction_delay(DEFAULT_AUCTION_DELAY) + .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS) + .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE) + .with_unbonding_delay(DEFAULT_UNBONDING_DELAY) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .with_storage_costs(*DEFAULT_STORAGE_COSTS) + .build() +}); + +static DEFAULT_PRIVATE_CHAIN_GENESIS: Lazy = Lazy::new(|| { + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + DEFUALT_PRIVATE_CHAIN_EXEC_CONFIG.clone(), + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) +}); + +fn custom_setup_genesis_only( + allow_auction_bids: bool, + allow_unrestricted_transfers: bool, + refund_handling: RefundHandling, + fee_handling: FeeHandling, + compute_rewards: bool, +) -> LmdbWasmTestBuilder { + let engine_config = make_private_chain_config( + allow_auction_bids, + allow_unrestricted_transfers, + refund_handling, + fee_handling, + compute_rewards, + ); + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.as_ref(), engine_config); + builder.run_genesis(DEFAULT_PRIVATE_CHAIN_GENESIS.clone()); + builder +} + +fn setup_genesis_only() -> LmdbWasmTestBuilder { + custom_setup_genesis_only( + PRIVATE_CHAIN_ALLOW_AUCTION_BIDS, + PRIVATE_CHAIN_ALLOW_UNRESTRICTED_TRANSFERS, + PRIVATE_CHAIN_REFUND_HANDLING, + PRIVATE_CHAIN_FEE_HANDLING, + PRIVATE_CHAIN_COMPUTE_REWARDS, + ) +} + +fn make_wasm_config() -> WasmConfig { + let host_functions = HostFunctionCostsV1 { + // Required for non-standard payment that transfers to a system account. + // Depends on a bug filled to lower transfer host functions to be able to freely transfer + // funds inside payment code. + transfer_from_purse_to_account: HostFunction::fixed(0), + ..HostFunctionCostsV1::default() + }; + let wasm_v1_config = WasmV1Config::new( + DEFAULT_WASM_MAX_MEMORY, + DEFAULT_MAX_STACK_HEIGHT, + OpcodeCosts::default(), + host_functions, + ); + let wasm_v2_config = WasmV2Config::default(); + WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config) +} + +fn make_private_chain_config( + allow_auction_bids: bool, + allow_unrestricted_transfers: bool, + refund_handling: RefundHandling, + fee_handling: FeeHandling, + compute_rewards: bool, +) -> ChainspecConfig { + let administrators = PRIVATE_CHAIN_GENESIS_ADMIN_SET.clone(); + let core_config = CoreConfig { + administrators, + allow_auction_bids, + allow_unrestricted_transfers, + refund_handling, + fee_handling, + compute_rewards, + ..Default::default() + }; + let wasm_config = make_wasm_config(); + let storage_costs = StorageCosts::default(); + ChainspecConfig { + core_config, + wasm_config, + system_costs_config: Default::default(), + storage_costs, + } +} + +fn private_chain_setup() -> LmdbWasmTestBuilder { + custom_setup_genesis_only( + PRIVATE_CHAIN_ALLOW_AUCTION_BIDS, + PRIVATE_CHAIN_ALLOW_UNRESTRICTED_TRANSFERS, + PRIVATE_CHAIN_REFUND_HANDLING, + PRIVATE_CHAIN_FEE_HANDLING, + PRIVATE_CHAIN_COMPUTE_REWARDS, + ) +} diff --git a/execution_engine_testing/tests/src/test/private_chain/fees_accumulation.rs b/execution_engine_testing/tests/src/test/private_chain/fees_accumulation.rs new file mode 100644 index 0000000000..886dc71f62 --- /dev/null +++ b/execution_engine_testing/tests/src/test/private_chain/fees_accumulation.rs @@ -0,0 +1,303 @@ +use std::collections::BTreeSet; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_BLOCK_TIME, DEFAULT_PROPOSER_ADDR, DEFAULT_PROTOCOL_VERSION, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_types::{ + account::AccountHash, system::handle_payment::ACCUMULATION_PURSE_KEY, EntityAddr, EraId, + FeeHandling, Key, ProtocolVersion, RuntimeArgs, U512, +}; + +use crate::{ + lmdb_fixture, + test::private_chain::{self, ACCOUNT_1_ADDR, DEFAULT_ADMIN_ACCOUNT_ADDR}, + wasm_utils, +}; + +const OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION; +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + OLD_PROTOCOL_VERSION.value().major, + OLD_PROTOCOL_VERSION.value().minor, + OLD_PROTOCOL_VERSION.value().patch + 1, +); + +#[ignore] +#[test] +fn default_genesis_config_should_not_have_rewards_purse() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let handle_payment = builder.get_handle_payment_contract_hash(); + let handle_payment_contract = + builder.get_named_keys(EntityAddr::System(handle_payment.value())); + + assert!( + handle_payment_contract.contains(ACCUMULATION_PURSE_KEY), + "Did not find rewards purse in handle payment's named keys {:?}", + handle_payment_contract + ); +} + +#[ignore] +#[test] +fn should_finalize_and_accumulate_rewards_purse() { + let mut builder = private_chain::setup_genesis_only(); + + let handle_payment = builder.get_handle_payment_contract_hash(); + let handle_payment_1 = builder.get_named_keys(EntityAddr::System(handle_payment.value())); + + let rewards_purse_key = handle_payment_1 + .get(ACCUMULATION_PURSE_KEY) + .expect("should have rewards purse"); + let rewards_purse_uref = rewards_purse_key.into_uref().expect("should be uref"); + assert_eq!(builder.get_purse_balance(rewards_purse_uref), U512::zero()); + + let exec_request_1 = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let handle_payment_2 = builder.get_named_keys(EntityAddr::System(handle_payment.value())); + + assert_eq!( + handle_payment_1, handle_payment_2, + "none of the named keys should change before and after execution" + ); + + let _transfer_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR) + .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .build(); +} + +#[ignore] +#[allow(unused)] +// #[test] +fn should_accumulate_deploy_fees() { + let mut builder = super::private_chain_setup(); + + // Check handle payments has rewards purse + let handle_payment_hash = builder.get_handle_payment_contract_hash(); + let handle_payment_contract = + builder.get_named_keys(EntityAddr::System(handle_payment_hash.value())); + + let rewards_purse = handle_payment_contract + .get(ACCUMULATION_PURSE_KEY) + .unwrap() + .into_uref() + .expect("should be uref"); + + // At this point rewards purse balance is not zero as the `private_chain_setup` executes bunch + // of deploys before + let rewards_balance_before = builder.get_purse_balance(rewards_purse); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + + // let exec_request_proposer = exec_request.proposer.clone(); + + builder.exec(exec_request).expect_success().commit(); + + let handle_payment_after = + builder.get_named_keys(EntityAddr::System(handle_payment_hash.value())); + + assert_eq!( + handle_payment_after.get(ACCUMULATION_PURSE_KEY), + handle_payment_contract.get(ACCUMULATION_PURSE_KEY), + "keys should not change before and after deploy has been processed", + ); + + let rewards_purse = handle_payment_contract + .get(ACCUMULATION_PURSE_KEY) + .unwrap() + .into_uref() + .expect("should be uref"); + let rewards_balance_after = builder.get_purse_balance(rewards_purse); + assert!( + rewards_balance_after > rewards_balance_before, + "rewards balance should increase" + ); + + // // Ensures default proposer didn't receive any funds + // let proposer_account = builder + // .get_entity_by_account_hash(exec_request_proposer.to_account_hash()) + // .expect("should have proposer account"); + // + // assert_eq!( + // builder.get_purse_balance(proposer_account.main_purse()), + // U512::zero() + // ); +} + +#[ignore] +#[allow(unused)] +// #[test] +fn should_distribute_accumulated_fees_to_admins() { + let mut builder = super::private_chain_setup(); + + let handle_payment_hash = builder.get_handle_payment_contract_hash(); + let handle_payment = builder.get_named_keys(EntityAddr::System(handle_payment_hash.value())); + + let accumulation_purse = handle_payment + .get(ACCUMULATION_PURSE_KEY) + .expect("handle payment should have named key") + .into_uref() + .expect("accumulation purse should be an uref"); + + let exec_request_1 = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + + let accumulated_purse_balance_before_exec = builder.get_purse_balance(accumulation_purse); + assert!(accumulated_purse_balance_before_exec.is_zero()); + + builder.exec(exec_request_1).expect_success().commit(); + + // At this point rewards purse balance is not zero as the `private_chain_setup` executes bunch + // of deploys before + let accumulated_purse_balance_after_exec = builder.get_purse_balance(accumulation_purse); + assert!(!accumulated_purse_balance_after_exec.is_zero()); + + let admin = builder + .get_entity_by_account_hash(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .expect("should have admin account"); + let admin_balance_before = builder.get_purse_balance(admin.main_purse()); + + let mut administrative_accounts: BTreeSet = BTreeSet::new(); + administrative_accounts.insert(*DEFAULT_ADMIN_ACCOUNT_ADDR); + + let result = builder.distribute_fees(None, DEFAULT_PROTOCOL_VERSION, DEFAULT_BLOCK_TIME); + + assert!(result.is_success(), "expected success not: {:?}", result); + + let accumulated_purse_balance_after_distribute = builder.get_purse_balance(accumulation_purse); + + assert!( + accumulated_purse_balance_after_distribute < accumulated_purse_balance_after_exec, + "accumulated purse balance should be distributed ({} >= {})", + accumulated_purse_balance_after_distribute, + accumulated_purse_balance_after_exec + ); + + let admin_balance_after = builder.get_purse_balance(admin.main_purse()); + + assert!( + admin_balance_after > admin_balance_before, + "admin balance should grow after distributing accumulated purse" + ); +} + +#[ignore] +#[allow(unused)] +// #[test] +fn should_accumulate_fees_after_upgrade() { + let (mut builder, _lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_5); + + // Ensures default proposer didn't receive any funds + let proposer_account = builder + .query(None, Key::Account(*DEFAULT_PROPOSER_ADDR), &[]) + .expect("should have proposer account") + .into_account() + .expect("should have legacy Account under the Key::Account variant"); + + let proposer_balance_before = builder.get_purse_balance(proposer_account.main_purse()); + + // Check handle payments has rewards purse + let handle_payment_hash = builder.get_handle_payment_contract_hash(); + + let handle_payment_contract = builder + .query(None, Key::Hash(handle_payment_hash.value()), &[]) + .expect("should have handle payment contract") + .into_contract() + .expect("should have legacy Contract under the Key::Contract variant"); + + assert!( + handle_payment_contract + .named_keys() + .get(ACCUMULATION_PURSE_KEY) + .is_none(), + "should not have accumulation purse in a persisted state" + ); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) + .with_activation_point(EraId::default()) + .with_fee_handling(FeeHandling::Accumulate) + .build() + }; + + let updated_chainspec = builder + .chainspec() + .clone() + .with_fee_handling(FeeHandling::Accumulate); + + builder.with_chainspec(updated_chainspec); + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + // Check handle payments has rewards purse + let handle_payment_hash = builder.get_handle_payment_contract_hash(); + let handle_payment_contract = + builder.get_named_keys(EntityAddr::System(handle_payment_hash.value())); + let rewards_purse = handle_payment_contract + .get(ACCUMULATION_PURSE_KEY) + .expect("should have accumulation purse") + .into_uref() + .expect("should be uref"); + + // At this point rewards purse balance is not zero as the `private_chain_setup` executes bunch + // of deploys before + let rewards_balance_before = builder.get_purse_balance(rewards_purse); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let handle_payment_after = + builder.get_named_keys(EntityAddr::System(handle_payment_hash.value())); + + assert_eq!( + handle_payment_after.get(ACCUMULATION_PURSE_KEY), + handle_payment_contract.get(ACCUMULATION_PURSE_KEY), + "keys should not change before and after deploy has been processed", + ); + + let rewards_purse = handle_payment_contract + .get(ACCUMULATION_PURSE_KEY) + .unwrap() + .into_uref() + .expect("should be uref"); + let rewards_balance_after = builder.get_purse_balance(rewards_purse); + assert!( + rewards_balance_after > rewards_balance_before, + "rewards balance should increase" + ); + + let proposer_balance_after = builder.get_purse_balance(proposer_account.main_purse()); + assert_eq!( + proposer_balance_before, proposer_balance_after, + "proposer should not receive any more funds after switching to accumulation" + ); +} diff --git a/execution_engine_testing/tests/src/test/private_chain/management.rs b/execution_engine_testing/tests/src/test/private_chain/management.rs new file mode 100644 index 0000000000..03a53aa90a --- /dev/null +++ b/execution_engine_testing/tests/src/test/private_chain/management.rs @@ -0,0 +1,914 @@ +use casper_engine_test_support::{ + ChainspecConfig, DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder, + TransferRequestBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, + DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_STORAGE_COSTS, DEFAULT_SYSTEM_CONFIG, + DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_storage::{data_access_layer::GenesisRequest, tracking_copy::TrackingCopyError}; +use casper_types::{ + account::{AccountHash, Weight}, + bytesrepr::ToBytes, + runtime_args, + system::{ + auction::{self, DelegationRate}, + mint, + standard_payment::{self, ARG_AMOUNT}, + }, + AddressableEntityHash, ApiError, CLType, CLValue, CoreConfig, EntityAddr, GenesisAccount, Key, + Package, PackageHash, RuntimeArgs, U512, +}; +use tempfile::TempDir; + +use crate::{ + test::private_chain::{ + self, ACCOUNT_2_ADDR, ADMIN_1_ACCOUNT_ADDR, PRIVATE_CHAIN_ALLOW_AUCTION_BIDS, + PRIVATE_CHAIN_COMPUTE_REWARDS, VALIDATOR_1_PUBLIC_KEY, + }, + wasm_utils, GenesisConfigBuilder, +}; + +use super::{ + ACCOUNT_1_ADDR, ACCOUNT_1_PUBLIC_KEY, DEFAULT_ADMIN_ACCOUNT_ADDR, + PRIVATE_CHAIN_DEFAULT_ACCOUNTS, PRIVATE_CHAIN_FEE_HANDLING, + PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS, PRIVATE_CHAIN_GENESIS_ADMIN_SET, + PRIVATE_CHAIN_REFUND_HANDLING, +}; + +const ADD_ASSOCIATED_KEY_CONTRACT: &str = "add_associated_key.wasm"; +const REMOVE_ASSOCIATED_KEY_CONTRACT: &str = "remove_associated_key.wasm"; +const SET_ACTION_THRESHOLDS_CONTRACT: &str = "set_action_thresholds.wasm"; +const UPDATE_ASSOCIATED_KEY_CONTRACT: &str = "update_associated_key.wasm"; +const DISABLE_CONTRACT: &str = "disable_contract.wasm"; +const ENABLE_CONTRACT: &str = "enable_contract.wasm"; +const TRANSFER_TO_ACCOUNT_CONTRACT: &&str = &"transfer_to_account.wasm"; +const ARG_CONTRACT_PACKAGE_HASH: &str = "contract_package_hash"; +const ARG_CONTRACT_HASH: &str = "contract_hash"; + +const ARG_ACCOUNT: &str = "account"; +const ARG_WEIGHT: &str = "weight"; + +const ARG_KEY_MANAGEMENT_THRESHOLD: &str = "key_management_threshold"; +const ARG_DEPLOY_THRESHOLD: &str = "deploy_threshold"; +const DO_NOTHING_HASH_NAME: &str = "do_nothing_hash"; + +const DO_NOTHING_STORED_CONTRACT: &str = "do_nothing_stored.wasm"; +const CALL_CONTRACT_PROXY: &str = "call_contract.wasm"; +const DELEGATE_ENTRYPOINT: &str = "delegate"; + +const TEST_PAYMENT_STORED_CONTRACT: &str = "test_payment_stored.wasm"; +const TEST_PAYMENT_STORED_HASH_NAME: &str = "test_payment_hash"; +const PAY_ENTRYPOINT: &str = "pay"; + +#[should_panic(expected = "DuplicatedAdministratorEntry")] +#[ignore] +#[test] +fn should_not_run_genesis_with_duplicated_administrator_accounts() { + let core_config = CoreConfig { + administrators: PRIVATE_CHAIN_GENESIS_ADMIN_SET.clone(), + ..Default::default() + }; + let chainspec = ChainspecConfig { + core_config, + wasm_config: Default::default(), + system_costs_config: Default::default(), + storage_costs: Default::default(), + }; + + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.as_ref(), chainspec); + + let duplicated_administrator_accounts = { + let mut accounts = PRIVATE_CHAIN_DEFAULT_ACCOUNTS.clone(); + + let genesis_admins = PRIVATE_CHAIN_GENESIS_ADMIN_ACCOUNTS + .clone() + .into_iter() + .map(GenesisAccount::from); + accounts.extend(genesis_admins); + accounts + }; + + let genesis_config = GenesisConfigBuilder::default() + .with_accounts(duplicated_administrator_accounts) + .with_wasm_config(*DEFAULT_WASM_CONFIG) + .with_system_config(*DEFAULT_SYSTEM_CONFIG) + .with_validator_slots(DEFAULT_VALIDATOR_SLOTS) + .with_auction_delay(DEFAULT_AUCTION_DELAY) + .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS) + .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE) + .with_unbonding_delay(DEFAULT_UNBONDING_DELAY) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .with_storage_costs(*DEFAULT_STORAGE_COSTS) + .build(); + + let modified_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); + + builder.run_genesis(modified_genesis_request); +} + +#[ignore] +#[test] +fn genesis_accounts_should_not_update_key_weight() { + let mut builder = super::private_chain_setup(); + + let exec_request_1 = { + let session_args = runtime_args! { + ARG_ACCOUNT => *ACCOUNT_1_ADDR, + ARG_WEIGHT => Weight::MAX, + }; + ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + UPDATE_ASSOCIATED_KEY_CONTRACT, + session_args, + ) + .build() + }; + + builder.exec(exec_request_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(ApiError::PermissionDenied)) + ), + "{:?}", + error + ); + + let exec_request_2 = { + let session_args = runtime_args! { + ARG_ACCOUNT => *DEFAULT_ADMIN_ACCOUNT_ADDR, + ARG_WEIGHT => Weight::new(1), + }; + ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + UPDATE_ASSOCIATED_KEY_CONTRACT, + session_args, + ) + .build() + }; + + builder.exec(exec_request_2).expect_failure().commit(); +} + +#[ignore] +#[test] +fn genesis_accounts_should_not_modify_action_thresholds() { + let mut builder = super::private_chain_setup(); + + let exec_request = { + let session_args = runtime_args! { + ARG_DEPLOY_THRESHOLD => Weight::new(1), + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), + }; + ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + SET_ACTION_THRESHOLDS_CONTRACT, + session_args, + ) + .build() + }; + + builder.exec(exec_request).expect_failure().commit(); + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(ApiError::PermissionDenied)) + ), + "{:?}", + error + ); +} + +#[ignore] +#[test] +fn genesis_accounts_should_not_add_associated_keys() { + let secondary_account_hash = AccountHash::new([55; 32]); + + let mut builder = super::private_chain_setup(); + + let exec_request = { + let session_args = runtime_args! { + ARG_ACCOUNT => secondary_account_hash, + ARG_WEIGHT => Weight::MAX, + }; + ExecuteRequestBuilder::standard(*ACCOUNT_1_ADDR, ADD_ASSOCIATED_KEY_CONTRACT, session_args) + .build() + }; + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(ApiError::PermissionDenied)) + ), + "{:?}", + error + ); +} + +#[ignore] +#[test] +fn genesis_accounts_should_not_remove_associated_keys() { + let secondary_account_hash = AccountHash::new([55; 32]); + + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_ACCOUNT => secondary_account_hash, + ARG_WEIGHT => Weight::MAX, + }; + + let account_hash = *ACCOUNT_1_ADDR; + let deploy_hash: [u8; 32] = [55; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(ADD_ASSOCIATED_KEY_CONTRACT, session_args) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }) + .with_authorization_keys(&[*ADMIN_1_ACCOUNT_ADDR]) + .with_deploy_hash(deploy_hash) + .build(); + + let add_associated_key_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .exec(add_associated_key_request) + .expect_success() + .commit(); + + let remove_associated_key_request = { + let session_args = runtime_args! { + ARG_ACCOUNT => secondary_account_hash, + }; + ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + REMOVE_ASSOCIATED_KEY_CONTRACT, + session_args, + ) + .build() + }; + + builder + .exec(remove_associated_key_request) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(ApiError::PermissionDenied)) + ), + "{:?}", + error + ); +} + +#[ignore] +#[test] +fn administrator_account_should_disable_any_account() { + let mut builder = super::private_chain_setup(); + + let account_1_genesis = builder + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account 1 after genesis"); + + // Account 1 can deploy after genesis + let exec_request_1 = ExecuteRequestBuilder::module_bytes( + *ACCOUNT_1_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + builder.exec(exec_request_1).expect_success().commit(); + + // Disable account 1 + let session_args = runtime_args! { + ARG_DEPLOY_THRESHOLD => Weight::MAX, + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::MAX, + }; + + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [54; 32]; + + // Here, deploy is sent as an account, but signed by an administrator. + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_code(SET_ACTION_THRESHOLDS_CONTRACT, session_args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ADMIN_ACCOUNT_ADDR]) + .with_deploy_hash(deploy_hash) + .build(); + + let disable_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(disable_request_1).expect_success().commit(); + // Account 1 can not deploy after freezing + let exec_request_2 = ExecuteRequestBuilder::module_bytes( + *ACCOUNT_1_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + builder.exec(exec_request_2).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!(matches!( + error, + Error::TrackingCopy(TrackingCopyError::DeploymentAuthorizationFailure) + )); + + let account_1_disabled = builder + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account 1 after genesis"); + assert_ne!( + account_1_genesis, account_1_disabled, + "account 1 should be modified" + ); + + // Unfreeze account 1 + let session_args = runtime_args! { + ARG_DEPLOY_THRESHOLD => Weight::new(1), + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(0), + }; + + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [53; 32]; + + // Here, deploy is sent as an account, but signed by an administrator. + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_code(SET_ACTION_THRESHOLDS_CONTRACT, session_args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ADMIN_ACCOUNT_ADDR]) + .with_deploy_hash(deploy_hash) + .build(); + + let enable_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let session_args = runtime_args! { + ARG_DEPLOY_THRESHOLD => Weight::new(0), + ARG_KEY_MANAGEMENT_THRESHOLD => Weight::new(1), + }; + + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [52; 32]; + + // Here, deploy is sent as an account, but signed by an administrator. + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_code(SET_ACTION_THRESHOLDS_CONTRACT, session_args) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ADMIN_ACCOUNT_ADDR]) + .with_deploy_hash(deploy_hash) + .build(); + + let enable_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(enable_request_1).expect_success().commit(); + builder.exec(enable_request_2).expect_success().commit(); + + // Account 1 can deploy after unfreezing + let exec_request_3 = ExecuteRequestBuilder::module_bytes( + *ACCOUNT_1_ADDR, + wasm_utils::do_minimum_bytes(), + RuntimeArgs::default(), + ) + .build(); + builder.exec(exec_request_3).expect_success().commit(); + + let account_1_unfrozen = builder + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account 1 after genesis"); + assert_eq!( + account_1_genesis, account_1_unfrozen, + "account 1 should be modified back to genesis state" + ); +} + +#[ignore] +#[test] +fn native_transfer_should_create_new_private_account() { + let mut builder = super::private_chain_setup(); + + // Account 1 can deploy after genesis + let transfer_request = TransferRequestBuilder::new(1, *ACCOUNT_2_ADDR) + .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + let _account_2 = builder + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) + .expect("should have account 1 after transfer"); +} + +#[ignore] +#[test] +fn wasm_transfer_should_create_new_private_account() { + let mut builder = super::private_chain_setup(); + + // Account 1 can deploy after genesis + let transfer_args = runtime_args! { + mint::ARG_TARGET => *ACCOUNT_2_ADDR, + mint::ARG_AMOUNT => 1u64, + }; + let transfer_request = ExecuteRequestBuilder::standard( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + TRANSFER_TO_ACCOUNT_CONTRACT, + transfer_args, + ) + .build(); + + builder.exec(transfer_request).expect_success().commit(); + + let _account_2 = builder + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) + .expect("should have account 1 after genesis"); +} + +#[ignore] +#[test] +fn administrator_account_should_disable_any_contract_used_as_session() { + let mut builder = super::private_chain_setup(); + + let store_contract_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + DO_NOTHING_STORED_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder + .exec(store_contract_request) + .expect_success() + .commit(); + + let account_1_genesis = builder + .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account 1 after genesis"); + + let stored_entity_key = account_1_genesis + .named_keys() + .get(DO_NOTHING_HASH_NAME) + .unwrap(); + + let stored_entity_hash = stored_entity_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .expect("should have stored contract hash"); + + let do_nothing_contract_package_key = { + let addressable_entity = builder + .get_addressable_entity(stored_entity_hash) + .expect("should be entity"); + Key::Hash(addressable_entity.package_hash().value()) + }; + + let contract_package_before = Package::try_from( + builder + .query(None, do_nothing_contract_package_key, &[]) + .expect("should query"), + ) + .expect("should be contract package"); + + let stored_entity_addr = stored_entity_key + .into_hash_addr() + .map(EntityAddr::SmartContract) + .expect("must get entity addr"); + + assert!( + contract_package_before.is_entity_enabled(&stored_entity_addr), + "newly stored contract should be enabled" + ); + + // Account 1 can deploy after genesis + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_name( + *ACCOUNT_1_ADDR, + DO_NOTHING_HASH_NAME, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + builder.exec(exec_request_1).expect_success().commit(); + + let do_nothing_contract_package_hash = + PackageHash::new(do_nothing_contract_package_key.into_hash_addr().unwrap()); + + // Disable stored contract + let disable_request = { + let session_args = runtime_args! { + ARG_CONTRACT_PACKAGE_HASH => do_nothing_contract_package_hash, + ARG_CONTRACT_HASH => stored_entity_hash, + }; + + ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, DISABLE_CONTRACT, session_args) + .build() + }; + + builder.exec(disable_request).expect_success().commit(); + + let contract_package_after_disable = Package::try_from( + builder + .query(None, do_nothing_contract_package_key, &[]) + .expect("should query"), + ) + .expect("should be contract package"); + + assert_ne!( + contract_package_before, contract_package_after_disable, + "contract package should be disabled" + ); + assert!(!contract_package_after_disable.is_entity_enabled(&stored_entity_addr),); + + let call_delegate_requests_1 = { + // Unable to call disabled stored contract directly + let call_delegate_by_name = ExecuteRequestBuilder::contract_call_by_name( + *ACCOUNT_1_ADDR, + DO_NOTHING_HASH_NAME, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + + let call_delegate_by_hash = ExecuteRequestBuilder::contract_call_by_hash( + *ACCOUNT_1_ADDR, + stored_entity_hash, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + + let call_delegate_from_wasm = make_call_contract_session_request( + *ACCOUNT_1_ADDR, + stored_entity_hash, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ); + + vec![ + call_delegate_by_name, + call_delegate_by_hash, + call_delegate_from_wasm, + ] + }; + + for call_delegate_request in call_delegate_requests_1 { + builder + .exec(call_delegate_request) + .expect_failure() + .commit(); + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::DisabledEntity(disabled_contract_hash)) + if disabled_contract_hash == stored_entity_hash + ), + "expected disabled contract error, found {:?}", + error + ); + } + + // Enable stored contract + let enable_request = { + let session_args = runtime_args! { + ARG_CONTRACT_PACKAGE_HASH => do_nothing_contract_package_hash, + ARG_CONTRACT_HASH => stored_entity_hash, + }; + + ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, ENABLE_CONTRACT, session_args) + .build() + }; + + builder.exec(enable_request).expect_success().commit(); + + let call_delegate_requests_2 = { + // Unable to call disabled stored contract directly + let call_delegate_by_name = ExecuteRequestBuilder::contract_call_by_name( + *ACCOUNT_1_ADDR, + DO_NOTHING_HASH_NAME, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + + let call_delegate_by_hash = ExecuteRequestBuilder::contract_call_by_hash( + *ACCOUNT_1_ADDR, + stored_entity_hash, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ) + .build(); + + let call_delegate_from_wasm = make_call_contract_session_request( + *ACCOUNT_1_ADDR, + stored_entity_hash, + DELEGATE_ENTRYPOINT, + RuntimeArgs::default(), + ); + + vec![ + call_delegate_by_name, + call_delegate_by_hash, + call_delegate_from_wasm, + ] + }; + + for exec_request in call_delegate_requests_2 { + builder.exec(exec_request).expect_success().commit(); + } +} + +#[ignore] +#[test] +fn administrator_account_should_disable_any_contract_used_as_payment() { + // We'll simulate enabled unrestricted transfers here to test if stored payment contract is + // disabled. + let mut builder = private_chain::custom_setup_genesis_only( + PRIVATE_CHAIN_ALLOW_AUCTION_BIDS, + true, + PRIVATE_CHAIN_REFUND_HANDLING, + PRIVATE_CHAIN_FEE_HANDLING, + PRIVATE_CHAIN_COMPUTE_REWARDS, + ); + + let store_contract_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TEST_PAYMENT_STORED_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder + .exec(store_contract_request) + .expect_success() + .commit(); + + let account_1_genesis = builder + .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account 1 after genesis"); + + let stored_entity_key = account_1_genesis + .named_keys() + .get(TEST_PAYMENT_STORED_HASH_NAME) + .unwrap(); + + let stored_entity_hash = stored_entity_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .expect("should have stored entity hash"); + + let addressable_entity = builder + .get_addressable_entity(stored_entity_hash) + .expect("should be addressable entity"); + let test_payment_stored_package_key = { Key::Hash(addressable_entity.package_hash().value()) }; + + let test_payment_stored_package_hash = + PackageHash::new(addressable_entity.package_hash().value()); + + let contract_package_before = Package::try_from( + builder + .query(None, test_payment_stored_package_key, &[]) + .expect("should query"), + ) + .expect("should be contract package"); + let stored_entity_addr = stored_entity_key + .into_entity_addr() + .expect("must get entity addr"); + assert!( + contract_package_before.is_entity_enabled(&stored_entity_addr), + "newly stored contract should be enabled" + ); + + // Account 1 can deploy after genesis + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [100; 32]; + + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, + }; + let session_args = RuntimeArgs::default(); + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args) + .with_stored_payment_named_key(TEST_PAYMENT_STORED_HASH_NAME, PAY_ENTRYPOINT, payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_1).expect_failure(); + + // Disable payment contract + let disable_request = { + let session_args = runtime_args! { + ARG_CONTRACT_PACKAGE_HASH => test_payment_stored_package_hash, + ARG_CONTRACT_HASH => stored_entity_hash, + }; + + ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, DISABLE_CONTRACT, session_args) + .build() + }; + + builder.exec(disable_request).expect_success().commit(); + + let contract_package_after_disable = Package::try_from( + builder + .query(None, test_payment_stored_package_key, &[]) + .expect("should query"), + ) + .expect("should be contract package"); + + assert_ne!( + contract_package_before, contract_package_after_disable, + "contract package should be disabled" + ); + assert!(!contract_package_after_disable.is_entity_enabled(&stored_entity_addr),); + + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, + }; + let session_args = RuntimeArgs::default(); + + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [100; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args.clone()) + .with_stored_payment_named_key( + TEST_PAYMENT_STORED_HASH_NAME, + PAY_ENTRYPOINT, + payment_args.clone(), + ) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + let call_by_name = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args) + .with_stored_payment_hash(stored_entity_hash, PAY_ENTRYPOINT, payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + let call_by_hash = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + for execute_request in [call_by_name, call_by_hash] { + builder.exec(execute_request).expect_failure().commit(); + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::DisabledEntity(disabled_contract_hash)) + if disabled_contract_hash == stored_entity_hash + ), + "expected disabled contract error, found {:?}", + error + ); + } + + // Enable stored contract + let enable_request = { + let session_args = runtime_args! { + ARG_CONTRACT_PACKAGE_HASH => test_payment_stored_package_hash, + ARG_CONTRACT_HASH => stored_entity_hash, + }; + + ExecuteRequestBuilder::standard(*DEFAULT_ADMIN_ACCOUNT_ADDR, ENABLE_CONTRACT, session_args) + .build() + }; + + builder.exec(enable_request).expect_success().commit(); + + let payment_args = runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }; + let session_args = RuntimeArgs::default(); + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [100; 32]; + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args.clone()) + .with_stored_payment_named_key( + TEST_PAYMENT_STORED_HASH_NAME, + PAY_ENTRYPOINT, + payment_args.clone(), + ) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + let call_by_name = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args) + .with_stored_payment_hash(stored_entity_hash, PAY_ENTRYPOINT, payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + let call_by_hash = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + for exec_request in [call_by_name, call_by_hash] { + builder.exec(exec_request).expect_failure(); + } +} + +#[ignore] +#[test] +fn should_not_allow_add_bid_on_private_chain() { + let mut builder = super::private_chain_setup(); + + let delegation_rate: DelegationRate = 4; + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::one(), + auction::ARG_DELEGATION_RATE => delegation_rate, + }; + + let exec_request = + ExecuteRequestBuilder::standard(*ACCOUNT_1_ADDR, "add_bid.wasm", session_args).build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(api_error)) + if api_error == auction::Error::AuctionBidsDisabled.into(), + ), + "{:?}", + error, + ); +} + +#[ignore] +#[test] +fn should_not_allow_delegate_on_private_chain() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::one(), + }; + + let exec_request = + ExecuteRequestBuilder::standard(*ACCOUNT_1_ADDR, "delegate.wasm", session_args).build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(api_error)) + if api_error == auction::Error::AuctionBidsDisabled.into() + ), + "{:?}", + error + ); + // Redelegation would not work since delegate, and add_bid are disabled on private chains + // therefore there is nothing to test. +} + +fn make_call_contract_session_request( + account_hash: AccountHash, + contract_hash: AddressableEntityHash, + entrypoint: &str, + arguments: RuntimeArgs, +) -> ExecuteRequest { + let arguments_any = { + let arg_bytes = arguments.to_bytes().unwrap(); + CLValue::from_components(CLType::Any, arg_bytes) + }; + + let mut session_args = runtime_args! { + "entrypoint" => entrypoint, + "contract_hash" => contract_hash, + }; + session_args.insert_cl_value("arguments", arguments_any); + + ExecuteRequestBuilder::standard(account_hash, CALL_CONTRACT_PROXY, session_args).build() +} diff --git a/execution_engine_testing/tests/src/test/private_chain/restricted_auction.rs b/execution_engine_testing/tests/src/test/private_chain/restricted_auction.rs new file mode 100644 index 0000000000..89517723b0 --- /dev/null +++ b/execution_engine_testing/tests/src/test/private_chain/restricted_auction.rs @@ -0,0 +1,110 @@ +use casper_engine_test_support::{ + StepRequestBuilder, DEFAULT_BLOCK_TIME, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, TIMESTAMP_MILLIS_INCREMENT, +}; +use casper_storage::data_access_layer::RewardItem; +use casper_types::{system::auction::SeigniorageAllocation, Key, U512}; + +use crate::test::private_chain::{PRIVATE_CHAIN_GENESIS_VALIDATORS, VALIDATOR_1_PUBLIC_KEY}; + +#[ignore] +#[test] +fn should_not_distribute_rewards_but_compute_next_set() { + const VALIDATOR_1_REWARD_FACTOR: u64 = 0; + + let mut timestamp_millis = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + let mut builder = super::private_chain_setup(); + + let protocol_version = DEFAULT_PROTOCOL_VERSION; + // initial token supply + let initial_supply = builder.total_supply(protocol_version, None); + + for _ in 0..3 { + builder.distribute( + None, + DEFAULT_PROTOCOL_VERSION, + IntoIterator::into_iter([(VALIDATOR_1_PUBLIC_KEY.clone(), vec![U512::from(0)])]) + .collect(), + DEFAULT_BLOCK_TIME, + ); + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_next_era_id(builder.get_era().successor()) + .with_era_end_timestamp_millis(timestamp_millis) + .with_run_auction(true) + .with_reward_item(RewardItem::new( + VALIDATOR_1_PUBLIC_KEY.clone(), + VALIDATOR_1_REWARD_FACTOR, + )) + .build(); + assert!( + builder.step(step_request).is_success(), + "should execute step" + ); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let last_trusted_era = builder.get_era(); + + builder.distribute( + None, + DEFAULT_PROTOCOL_VERSION, + IntoIterator::into_iter([(VALIDATOR_1_PUBLIC_KEY.clone(), vec![U512::from(0)])]).collect(), + DEFAULT_BLOCK_TIME, + ); + + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_reward_item(RewardItem::new( + VALIDATOR_1_PUBLIC_KEY.clone(), + VALIDATOR_1_REWARD_FACTOR, + )) + .with_next_era_id(last_trusted_era.successor()) + .with_era_end_timestamp_millis(timestamp_millis) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "should execute step" + ); + + let era_info = { + let era_info_value = builder + .query(None, Key::EraSummary, &[]) + .expect("should have value"); + + era_info_value + .as_era_info() + .cloned() + .expect("should be era info") + }; + + const EXPECTED_VALIDATOR_1_PAYOUT: U512 = U512::zero(); + + assert_eq!( + era_info.seigniorage_allocations().len(), + PRIVATE_CHAIN_GENESIS_VALIDATORS.len(), + "running auction should not increase number of validators", + ); + + assert!( + matches!( + era_info.select(VALIDATOR_1_PUBLIC_KEY.clone()).next(), + Some(SeigniorageAllocation::Validator { validator_public_key, amount }) + if *validator_public_key == *VALIDATOR_1_PUBLIC_KEY && *amount == EXPECTED_VALIDATOR_1_PAYOUT + ), + "era info is {:?}", + era_info + ); + + let total_supply_after_distribution = builder.total_supply(protocol_version, None); + assert_eq!( + initial_supply, total_supply_after_distribution, + "total supply of tokens should not increase after an auction is ran" + ) +} diff --git a/execution_engine_testing/tests/src/test/private_chain/unrestricted_transfers.rs b/execution_engine_testing/tests/src/test/private_chain/unrestricted_transfers.rs new file mode 100644 index 0000000000..e95b9853ae --- /dev/null +++ b/execution_engine_testing/tests/src/test/private_chain/unrestricted_transfers.rs @@ -0,0 +1,683 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, TransferRequestBuilder, DEFAULT_PAYMENT, + MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_storage::system::transfer::TransferError; +use casper_types::{ + account::AccountHash, + runtime_args, + system::{mint, standard_payment}, + Key, PublicKey, RuntimeArgs, StoredValue, URef, U512, +}; + +use crate::{test::private_chain::ADMIN_1_ACCOUNT_ADDR, wasm_utils}; + +use super::{ACCOUNT_1_ADDR, ACCOUNT_2_ADDR, DEFAULT_ADMIN_ACCOUNT_ADDR}; + +const TRANSFER_TO_ACCOUNT_U512_CONTRACT: &str = "transfer_to_account_u512.wasm"; +const TRANSFER_TO_NAMED_PURSE_CONTRACT: &str = "transfer_to_named_purse.wasm"; + +const TEST_PURSE: &str = "test"; +const ARG_PURSE_NAME: &str = "purse_name"; +const ARG_AMOUNT: &str = "amount"; + +const TEST_PAYMENT_STORED_CONTRACT: &str = "test_payment_stored.wasm"; +const TEST_PAYMENT_STORED_HASH_NAME: &str = "test_payment_hash"; + +#[ignore] +#[test] +fn should_restrict_native_transfer_to_from_non_administrators() { + let mut builder = super::private_chain_setup(); + + let fund_transfer_1 = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR) + .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .build(); + + // Admin can transfer funds to create new account. + builder + .transfer_and_commit(fund_transfer_1) + .expect_success(); + + let transfer_request_1 = TransferRequestBuilder::new(1, *ACCOUNT_2_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) + .build(); + + // User can't transfer funds to non administrator (it doesn't matter if this would create a new + // account or not...the receiver must be an EXISTING administrator account + builder + .transfer_and_commit(transfer_request_1) + .expect_failure(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Transfer(TransferError::RestrictedTransferAttempted) + ), + "expected RestrictedTransferAttempted error, found {:?}", + error + ); + + let transfer_request_2 = TransferRequestBuilder::new(1, *DEFAULT_ADMIN_ACCOUNT_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) + .build(); + + // User can transfer funds back to admin. + builder + .transfer_and_commit(transfer_request_2) + .expect_success(); +} + +#[ignore] +#[test] +fn should_restrict_wasm_transfer_to_from_non_administrators() { + let mut builder = super::private_chain_setup(); + + let fund_transfer_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + TRANSFER_TO_ACCOUNT_U512_CONTRACT, + runtime_args! { + mint::ARG_TARGET => *ACCOUNT_1_ADDR, + mint::ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + }, + ) + .build(); + + // Admin can transfer funds to create new account. + builder.exec(fund_transfer_1).expect_success().commit(); + + let transfer_request_1 = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TRANSFER_TO_ACCOUNT_U512_CONTRACT, + runtime_args! { + mint::ARG_TARGET => *ACCOUNT_2_ADDR, + mint::ARG_AMOUNT => U512::one(), + }, + ) + .build(); + + // User can't transfer funds to create new account. + builder.exec(transfer_request_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(error, Error::Exec(ExecError::DisabledUnrestrictedTransfers)), + "expected DisabledUnrestrictedTransfers error, found {:?}", + error + ); + + let transfer_request_2 = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TRANSFER_TO_ACCOUNT_U512_CONTRACT, + runtime_args! { + mint::ARG_TARGET => *DEFAULT_ADMIN_ACCOUNT_ADDR, + mint::ARG_AMOUNT => U512::one(), + }, + ) + .build(); + + // User can transfer funds back to admin. + builder.exec(transfer_request_2).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_noop_self_transfer() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::zero(), // create empty purse without transfer + }; + let create_purse_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + session_args, + ) + .build(); + builder.exec(create_purse_request).expect_success().commit(); + + let mint_contract_hash = builder.get_mint_contract_hash(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account"); + let maybe_to: Option = None; + let source: URef = account.main_purse(); + let target: URef = account + .named_keys() + .get(TEST_PURSE) + .unwrap() + .into_uref() + .expect("should be uref"); + let amount: U512 = U512::one(); + let id: Option = None; + + let session_args = runtime_args! { + mint::ARG_TO => maybe_to, + mint::ARG_SOURCE => source, + mint::ARG_TARGET => target, + mint::ARG_AMOUNT => amount, + mint::ARG_ID => id, + }; + + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + *ACCOUNT_1_ADDR, + mint_contract_hash, + mint::METHOD_TRANSFER, + session_args, + ) + .build(); + builder.exec(exec_request).expect_success().commit(); + + // Transfer technically succeeded but the result of mint::Error was discarded so we have to + // ensure that purse has 0 balance. + let value = builder + .query(None, Key::Balance(target.addr()), &[]) + .unwrap(); + let value: U512 = if let StoredValue::CLValue(cl_value) = value { + cl_value.into_t().unwrap() + } else { + panic!("should be a CLValue"); + }; + assert_eq!(value, U512::zero()); +} + +#[ignore] +#[test] +fn should_allow_admin_to_native_transfer_from_own_purse() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::zero(), // create empty purse without transfer + }; + let create_purse_request = ExecuteRequestBuilder::standard( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + session_args, + ) + .build(); + builder.exec(create_purse_request).expect_success().commit(); + + let mint_contract_hash = builder.get_mint_contract_hash(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .expect("should have account"); + let maybe_to: Option = None; + let source: URef = account.main_purse(); + let target: URef = account + .named_keys() + .get(TEST_PURSE) + .unwrap() + .into_uref() + .expect("should be uref"); + let amount: U512 = U512::one(); + let id: Option = None; + + let session_args = runtime_args! { + mint::ARG_TO => maybe_to, + mint::ARG_SOURCE => source, + mint::ARG_TARGET => target, + mint::ARG_AMOUNT => amount, + mint::ARG_ID => id, + }; + + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + mint_contract_hash, + mint::METHOD_TRANSFER, + session_args, + ) + .build(); + builder.exec(exec_request).expect_success().commit(); + + // Transfer technically succeeded but the result of mint::Error was discarded so we have to + // ensure that purse has 0 balance. + let value = builder + .query(None, Key::Balance(target.addr()), &[]) + .unwrap(); + let value: U512 = if let StoredValue::CLValue(cl_value) = value { + cl_value.into_t().unwrap() + } else { + panic!("should be a CLValue"); + }; + assert_eq!(value, amount); +} + +#[ignore] +#[test] +fn should_not_allow_wasm_transfer_from_non_administrator_to_misc_purse() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::one(), + }; + let create_purse_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + session_args, + ) + .build(); + builder.exec(create_purse_request).expect_failure().commit(); + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into() + ), + "expected DisabledUnrestrictedTransfers error, found {:?}", + error + ) +} + +#[ignore] +#[test] +fn should_allow_wasm_transfer_from_administrator() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::one(), + }; + let create_purse_request = ExecuteRequestBuilder::standard( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + session_args, + ) + .build(); + builder.exec(create_purse_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_allow_native_transfer_from_non_administrator_to_misc_purse() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::zero(), // we can't transfer in private chain mode, so we'll just create empty valid purse + }; + let create_purse_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + session_args, + ) + .build(); + builder.exec(create_purse_request).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should have account"); + let source = account.main_purse(); + let target = account + .named_keys() + .get(TEST_PURSE) + .unwrap() + .into_uref() + .expect("should be uref"); + + let transfer_request = TransferRequestBuilder::new(1, target) + .with_initiator(*ACCOUNT_1_ADDR) + .with_source(source) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_failure(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Transfer(TransferError::UnableToVerifyTargetIsAdmin) + ), + "expected UnableToVerifyTargetIsAdmin error, found {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_allow_native_transfer_to_administrator_from_misc_purse() { + let mut builder = super::private_chain_setup(); + + let session_args = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::zero(), // we can't transfer in private chain mode, so we'll just create empty valid purse + }; + let create_purse_request = ExecuteRequestBuilder::standard( + *DEFAULT_ADMIN_ACCOUNT_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + session_args, + ) + .build(); + builder.exec(create_purse_request).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .expect("should have account"); + let source = account.main_purse(); + let target = account + .named_keys() + .get(TEST_PURSE) + .unwrap() + .into_uref() + .expect("should be uref"); + + let transfer_request = TransferRequestBuilder::new(1, target) + .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR) + .with_source(source) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); +} + +#[ignore] +#[test] +fn should_not_allow_wasm_transfer_from_non_administrator_to_known_purse() { + let mut builder = super::private_chain_setup(); + + let store_contract_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + "contract_funds.wasm", + RuntimeArgs::default(), + ) + .build(); + + builder + .exec(store_contract_request) + .expect_success() + .commit(); + + let transfer_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + "contract_funds_call.wasm", + runtime_args! { + ARG_AMOUNT => U512::one(), + }, + ) + .build(); + + builder.exec(transfer_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into() + ), + "expected DisabledUnrestrictedTransfers error, found {:?}", + error + ); +} + +#[ignore] +#[allow(unused)] +#[test] +fn should_not_allow_payment_to_purse_in_stored_payment() { + // This effectively disables any custom payment code + let mut builder = super::private_chain_setup(); + + let store_contract_request = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + TEST_PAYMENT_STORED_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder + .exec(store_contract_request) + .expect_success() + .commit(); + + // Account 1 can deploy after genesis + let sender = *ACCOUNT_1_ADDR; + let deploy_hash = [100; 32]; + + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, + }; + let session_args = RuntimeArgs::default(); + + const PAY_ENTRYPOINT: &str = "pay"; + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args) + .with_stored_payment_named_key(TEST_PAYMENT_STORED_HASH_NAME, PAY_ENTRYPOINT, payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(error, Error::Exec(ExecError::ForgedReference(_))), + "expected ForgedReference error, found {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_not_allow_direct_mint_transfer_with_system_addr_specified() { + // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to + // avoid restrictions. + let mut builder = super::private_chain_setup(); + + let fund_transfer_1 = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + "mint_transfer_proxy.wasm", + runtime_args! { + "to" => Some(PublicKey::System.to_account_hash()), + "amount" => U512::from(1u64), + }, + ) + .build(); + + // should fail because the imputed TO arg is not valid if PublicKey::System in this flow + builder.exec(fund_transfer_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(error, Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()), + "expected DisabledUnrestrictedTransfers error, found {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_not_allow_direct_mint_transfer_with_an_admin_in_to_field() { + // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to + // avoid restrictions. + let mut builder = super::private_chain_setup(); + + let fund_transfer_1 = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + "mint_transfer_proxy.wasm", + runtime_args! { + "to" => Some(*ADMIN_1_ACCOUNT_ADDR), + "amount" => U512::from(1u64), + }, + ) + .build(); + + // Admin can transfer funds to create new account. + builder.exec(fund_transfer_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(error, Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()), + "expected DisabledUnrestrictedTransfers error, found {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_allow_mint_transfer_without_to_field_from_admin() { + // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to + // avoid restrictions. + let mut builder = super::private_chain_setup(); + + let fund_transfer_1 = ExecuteRequestBuilder::standard( + *ADMIN_1_ACCOUNT_ADDR, + "mint_transfer_proxy.wasm", + runtime_args! { + "to" => None::, + "amount" => U512::from(1u64), + }, + ) + .build(); + + // Admin can transfer funds to create new account. + builder.exec(fund_transfer_1).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_allow_transfer_without_to_field_from_non_admin() { + // This test executes mint's transfer entrypoint with a SYSTEM_ADDR as to field in attempt to + // avoid restrictions. + let mut builder = super::private_chain_setup(); + + let fund_transfer_1 = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + "mint_transfer_proxy.wasm", + runtime_args! { + "to" => None::, + "amount" => U512::from(1u64), + }, + ) + .build(); + + // Admin can transfer funds to create new account. + builder.exec(fund_transfer_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(error, Error::Exec(ExecError::Revert(revert)) if revert == mint::Error::DisabledUnrestrictedTransfers.into()), + "expected DisabledUnrestrictedTransfers error, found {:?}", + error + ); +} + +// #[ignore] +// #[allow(unused)] +// #[test] +// fn should_not_allow_custom_payment() { +// let mut builder = super::private_chain_setup(); +// +// // Account 1 can deploy after genesis +// let sender = *ACCOUNT_1_ADDR; +// let deploy_hash = [100; 32]; +// +// let payment_amount = *DEFAULT_PAYMENT + U512::from(1u64); +// +// let payment_args = runtime_args! { +// standard_payment::ARG_AMOUNT => payment_amount, +// }; +// let session_args = RuntimeArgs::default(); +// +// let deploy_item = DeployItemBuilder::new() +// .with_address(sender) +// .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args) +// .with_payment_code("non_standard_payment.wasm", payment_args) +// .with_authorization_keys(&[sender]) +// .with_deploy_hash(deploy_hash) +// .build(); +// let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); +// +// builder.exec(exec_request_1).expect_failure(); +// } +// +// #[ignore] +// #[test] +// fn should_allow_wasm_transfer_to_system() { +// let mut builder = super::private_chain_setup(); +// +// // Account 1 can deploy after genesis +// let sender = *ACCOUNT_1_ADDR; +// let deploy_hash = [100; 32]; +// +// let payment_amount = *DEFAULT_PAYMENT + U512::from(1u64); +// +// let payment_args = runtime_args! { +// standard_payment::ARG_AMOUNT => payment_amount, +// }; +// let session_args = runtime_args! { +// "target" => *SYSTEM_ADDR, +// "amount" => U512::one(), +// }; +// +// let deploy_item = DeployItemBuilder::new() +// .with_address(sender) +// .with_session_code("transfer_to_account_u512.wasm", session_args) +// .with_standard_payment(payment_args) +// .with_authorization_keys(&[sender]) +// .with_deploy_hash(deploy_hash) +// .build(); +// let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); +// +// builder.exec(exec_request_1).expect_success().commit(); +// +// let handle_payment_contract = builder.get_named_keys(EntityAddr::System( +// builder.get_handle_payment_contract_hash().value(), +// )); +// let payment_purse_key = handle_payment_contract +// .get(handle_payment::PAYMENT_PURSE_KEY) +// .unwrap(); +// let payment_purse_uref = payment_purse_key.into_uref().unwrap(); +// println!("payment uref: {payment_purse_uref}"); +// assert_eq!( +// builder.get_purse_balance(payment_purse_uref), +// U512::zero(), +// "after finalizing a private chain a payment purse should be empty" +// ); +// } +// +// #[ignore] +// #[test] +// fn should_allow_native_transfer_to_administrator() { +// let mut builder = super::private_chain_setup(); +// +// let payment_purse_uref = { +// let handle_payment_contract = builder.get_named_keys(EntityAddr::System( +// builder.get_handle_payment_contract_hash().value(), +// )); +// let payment_purse_key = handle_payment_contract +// .get(handle_payment::PAYMENT_PURSE_KEY) +// .unwrap(); +// payment_purse_key.into_uref().unwrap() +// }; +// +// assert_eq!( +// builder.get_purse_balance(payment_purse_uref), +// U512::zero(), +// "payment purse should be empty" +// ); +// +// let fund_transfer_1 = +// TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *SYSTEM_ADDR) +// .with_initiator(*DEFAULT_ADMIN_ACCOUNT_ADDR) +// .build(); +// +// builder +// .transfer_and_commit(fund_transfer_1) +// .expect_success(); +// +// assert_eq!( +// builder.get_purse_balance(payment_purse_uref), +// U512::zero(), +// "after finalizing a private chain a payment purse should be empty" +// ); +// } diff --git a/execution_engine_testing/tests/src/test/regression/eco_863.rs b/execution_engine_testing/tests/src/test/regression/eco_863.rs deleted file mode 100644 index 4eaaba5fad..0000000000 --- a/execution_engine_testing/tests/src/test/regression/eco_863.rs +++ /dev/null @@ -1,229 +0,0 @@ -use assert_matches::assert_matches; -use once_cell::sync::Lazy; - -use casper_engine_test_support::{ - internal::{utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS}, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::{ - engine_state::{self, genesis::GenesisAccount}, - execution, - }, - shared::motes::Motes, -}; -use casper_types::{ - account::AccountHash, runtime_args, ApiError, Key, PublicKey, RuntimeArgs, SecretKey, U512, -}; - -const CONTRACT_FAUCET_STORED: &str = "faucet_stored.wasm"; -const CONTRACT_FAUCET_ENTRYPOINT: &str = "call_faucet"; -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; - -const FAUCET_REQUEST_AMOUNT: u64 = 333_333_333; - -static FAUCET: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() -}); -static ALICE: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([2; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() -}); - -static FAUCET_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*FAUCET)); -static ALICE_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ALICE)); - -#[ignore] -#[test] -fn faucet_should_create_account() { - let accounts = { - let faucet_account = GenesisAccount::account( - FAUCET.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - None, - ); - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - tmp.push(faucet_account); - tmp - }; - - let run_genesis_request = utils::create_run_genesis_request(accounts); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); - - let store_faucet_request = ExecuteRequestBuilder::standard( - *FAUCET_ADDR, - CONTRACT_FAUCET_STORED, - RuntimeArgs::default(), - ) - .build(); - - builder.exec(store_faucet_request).expect_success().commit(); - - let faucet_account = { - let tmp = builder - .query(None, Key::Account(*FAUCET_ADDR), &[]) - .unwrap(); - tmp.as_account().cloned().unwrap() - }; - - let faucet_hash = { - let faucet_key = faucet_account.named_keys().get("faucet").cloned().unwrap(); - faucet_key.into_hash().unwrap() - }; - - let faucet_request_amount = U512::from(FAUCET_REQUEST_AMOUNT); - - let faucet_request = ExecuteRequestBuilder::contract_call_by_hash( - *DEFAULT_ACCOUNT_ADDR, - faucet_hash.into(), - CONTRACT_FAUCET_ENTRYPOINT, - runtime_args! { - ARG_TARGET => *ALICE_ADDR, - ARG_AMOUNT => faucet_request_amount, - }, - ) - .build(); - - builder.exec(faucet_request).commit().expect_success(); - - let alice_account = { - let tmp = builder.query(None, Key::Account(*ALICE_ADDR), &[]).unwrap(); - tmp.as_account().cloned().unwrap() - }; - - let balance = builder.get_purse_balance(alice_account.main_purse()); - - assert_eq!(balance, faucet_request_amount); - - let faucet_request = ExecuteRequestBuilder::contract_call_by_hash( - *DEFAULT_ACCOUNT_ADDR, - faucet_hash.into(), - CONTRACT_FAUCET_ENTRYPOINT, - runtime_args! { - ARG_TARGET => *ALICE_ADDR, - ARG_AMOUNT => faucet_request_amount, - }, - ) - .build(); - - builder.exec(faucet_request).commit(); - - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; - assert_matches!( - error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::User(1))) - ); -} - -#[ignore] -#[test] -fn faucet_should_transfer_to_existing_account() { - let accounts = { - let faucet_account = GenesisAccount::account( - FAUCET.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - None, - ); - let alice_account = GenesisAccount::account( - ALICE.clone(), - Motes::new(MINIMUM_ACCOUNT_CREATION_BALANCE.into()), - None, - ); - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - tmp.push(faucet_account); - tmp.push(alice_account); - tmp - }; - - let run_genesis_request = utils::create_run_genesis_request(accounts); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); - - let store_faucet_request = ExecuteRequestBuilder::standard( - *FAUCET_ADDR, - CONTRACT_FAUCET_STORED, - RuntimeArgs::default(), - ) - .build(); - - builder.exec(store_faucet_request).expect_success().commit(); - - let faucet_account = { - let tmp = builder - .query(None, Key::Account(*FAUCET_ADDR), &[]) - .unwrap(); - tmp.as_account().cloned().unwrap() - }; - - let faucet_hash = { - let faucet_key = faucet_account.named_keys().get("faucet").cloned().unwrap(); - faucet_key.into_hash().unwrap() - }; - - let faucet_request_amount = U512::from(FAUCET_REQUEST_AMOUNT); - - let faucet_request = ExecuteRequestBuilder::contract_call_by_hash( - *FAUCET_ADDR, - faucet_hash.into(), - CONTRACT_FAUCET_ENTRYPOINT, - runtime_args! { - ARG_TARGET => *ALICE_ADDR, - ARG_AMOUNT => faucet_request_amount, - }, - ) - .build(); - - builder.exec(faucet_request).commit().expect_success(); - - let alice_account = { - let tmp = builder.query(None, Key::Account(*ALICE_ADDR), &[]).unwrap(); - tmp.as_account().cloned().unwrap() - }; - - let balance = builder.get_purse_balance(alice_account.main_purse()); - - assert_eq!( - balance, - faucet_request_amount + MINIMUM_ACCOUNT_CREATION_BALANCE - ); - - let faucet_request = ExecuteRequestBuilder::contract_call_by_hash( - *FAUCET_ADDR, - faucet_hash.into(), - CONTRACT_FAUCET_ENTRYPOINT, - runtime_args! { - ARG_TARGET => *ALICE_ADDR, - ARG_AMOUNT => faucet_request_amount, - }, - ) - .build(); - - builder.exec(faucet_request).commit(); - - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; - assert_matches!( - error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::User(1))) - ); -} diff --git a/execution_engine_testing/tests/src/test/regression/ee_1045.rs b/execution_engine_testing/tests/src/test/regression/ee_1045.rs index 90254e7e39..310f6d0e5e 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1045.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1045.rs @@ -2,21 +2,14 @@ use num_traits::Zero; use std::collections::BTreeSet; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, - DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::engine_state::genesis::{GenesisAccount, GenesisValidator}, - shared::motes::Motes, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, + DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, }; use casper_types::{ runtime_args, system::auction::{DelegationRate, ARG_VALIDATOR_PUBLIC_KEYS, INITIAL_ERA_ID, METHOD_SLASH}, - PublicKey, RuntimeArgs, SecretKey, U512, + GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512, }; use once_cell::sync::Lazy; @@ -27,33 +20,29 @@ const ARG_AMOUNT: &str = "amount"; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE + 1000; static ACCOUNT_1_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const ACCOUNT_1_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_1_BOND: u64 = 100_000; static ACCOUNT_2_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_2_BOND: u64 = 200_000; static ACCOUNT_3_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const ACCOUNT_3_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_3_BOND: u64 = 200_000; static ACCOUNT_4_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const ACCOUNT_4_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_4_BOND: u64 = 200_000; @@ -63,33 +52,33 @@ const ACCOUNT_4_BOND: u64 = 200_000; fn should_run_ee_1045_squash_validators() { let account_1 = GenesisAccount::account( ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), + Motes::new(ACCOUNT_1_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), + Motes::new(ACCOUNT_1_BOND), DelegationRate::zero(), )), ); let account_2 = GenesisAccount::account( ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), + Motes::new(ACCOUNT_2_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), + Motes::new(ACCOUNT_2_BOND), DelegationRate::zero(), )), ); let account_3 = GenesisAccount::account( ACCOUNT_3_PK.clone(), - Motes::new(ACCOUNT_3_BALANCE.into()), + Motes::new(ACCOUNT_3_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_3_BOND.into()), + Motes::new(ACCOUNT_3_BOND), DelegationRate::zero(), )), ); let account_4 = GenesisAccount::account( ACCOUNT_4_PK.clone(), - Motes::new(ACCOUNT_4_BALANCE.into()), + Motes::new(ACCOUNT_4_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_4_BOND.into()), + Motes::new(ACCOUNT_4_BOND), DelegationRate::zero(), )), ); @@ -120,9 +109,9 @@ fn should_run_ee_1045_squash_validators() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); let genesis_validator_weights = builder .get_validator_weights(INITIAL_ERA_ID) diff --git a/execution_engine_testing/tests/src/test/regression/ee_1071.rs b/execution_engine_testing/tests/src/test/regression/ee_1071.rs index ceba8a9328..5981580cd7 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1071.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1071.rs @@ -1,8 +1,7 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::RuntimeArgs; +use casper_types::{EntityAddr, RuntimeArgs}; const CONTRACT_EE_1071_REGRESSION: &str = "ee_1071_regression.wasm"; const CONTRACT_HASH_NAME: &str = "contract"; @@ -18,24 +17,23 @@ fn should_run_ee_1071_regression() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash = account + let contract_hash = (*account .named_keys() .get(CONTRACT_HASH_NAME) - .expect("should have hash") - .clone() - .into_hash() - .expect("should be hash") - .into(); + .expect("should have hash")) + .into_entity_hash_addr() + .expect("should be hash") + .into(); let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -45,15 +43,12 @@ fn should_run_ee_1071_regression() { ) .build(); - let contract_before = builder - .get_contract(contract_hash) - .expect("should have account"); + let contract_before = builder.get_named_keys(EntityAddr::SmartContract(contract_hash.value())); builder.exec(exec_request_2).expect_success().commit(); - let contract_after = builder - .get_contract(contract_hash) - .expect("should have account"); + let contract_after = builder.get_named_keys(EntityAddr::SmartContract(contract_hash.value())); + assert_ne!( contract_after, contract_before, "contract object should be modified" diff --git a/execution_engine_testing/tests/src/test/regression/ee_1103.rs b/execution_engine_testing/tests/src/test/regression/ee_1103.rs index 0d8b38e2e3..7af45c976b 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1103.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1103.rs @@ -2,22 +2,15 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, SYSTEM_ADDR, - TIMESTAMP_MILLIS_INCREMENT, - }, - MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::engine_state::{genesis::GenesisValidator, GenesisAccount}, - shared::motes::Motes, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, + DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, }; use casper_types::{ account::AccountHash, runtime_args, system::auction::{DelegationRate, ARG_DELEGATOR, ARG_VALIDATOR}, - PublicKey, RuntimeArgs, SecretKey, U512, + GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512, }; const ARG_TARGET: &str = "target"; @@ -28,39 +21,32 @@ const CONTRACT_DELEGATE: &str = "delegate.wasm"; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; static FAUCET: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_2: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_3: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([203; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([203; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_2: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_3: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); // These values were chosen to correspond to the values in accounts.toml @@ -171,10 +157,10 @@ fn validator_scores_should_reflect_delegates() { let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let run_genesis_request = utils::create_run_genesis_request(accounts); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); diff --git a/execution_engine_testing/tests/src/test/regression/ee_1119.rs b/execution_engine_testing/tests/src/test/regression/ee_1119.rs index d06dedc0a3..93301454f2 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1119.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1119.rs @@ -2,28 +2,22 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, - DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, SYSTEM_ADDR, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::engine_state::genesis::{GenesisAccount, GenesisValidator}, - shared::motes::Motes, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, }; +use casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT; use casper_types::{ account::AccountHash, runtime_args, system::{ auction::{ - Bids, DelegationRate, UnbondingPurses, ARG_DELEGATOR, ARG_VALIDATOR, + BidsExt, DelegationRate, UnbondKind, ARG_DELEGATOR, ARG_VALIDATOR, ARG_VALIDATOR_PUBLIC_KEYS, METHOD_SLASH, }, mint::TOTAL_SUPPLY_KEY, }, - PublicKey, RuntimeArgs, SecretKey, U512, + EntityAddr, GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512, }; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; @@ -31,7 +25,7 @@ const CONTRACT_WITHDRAW_BID: &str = "withdraw_bid.wasm"; const CONTRACT_DELEGATE: &str = "delegate.wasm"; const CONTRACT_UNDELEGATE: &str = "undelegate.wasm"; -const DELEGATE_AMOUNT_1: u64 = 95_000; +const DELEGATE_AMOUNT_1: u64 = 95_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; const UNDELEGATE_AMOUNT_1: u64 = 17_000; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; @@ -40,9 +34,8 @@ const ARG_AMOUNT: &str = "amount"; const ARG_PUBLIC_KEY: &str = "public_key"; static VALIDATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); const VALIDATOR_1_STAKE: u64 = 250_000; @@ -51,13 +44,13 @@ const VESTING_WEEKS: u64 = 14; #[ignore] #[test] -fn should_run_ee_1119_dont_slash_delegated_validators() { +fn should_slash_validator_and_their_delegators() { let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), + Motes::new(VALIDATOR_1_STAKE), DelegationRate::zero(), )), ); @@ -68,8 +61,8 @@ fn should_run_ee_1119_dont_slash_delegated_validators() { }; let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); let fund_system_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -108,15 +101,15 @@ fn should_run_ee_1119_dont_slash_delegated_validators() { .expect_success() .commit(); - let bids: Bids = builder.get_bids(); - let validator_1_bid = bids.get(&VALIDATOR_1).expect("should have bid"); + let bids = builder.get_bids(); + let validator_1_bid = bids.validator_bid(&VALIDATOR_1).expect("should have bid"); let bid_purse = validator_1_bid.bonding_purse(); assert_eq!( builder.get_purse_balance(*bid_purse), U512::from(VALIDATOR_1_STAKE), ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); + let unbond_purses = builder.get_unbonds(); assert_eq!(unbond_purses.len(), 0); // @@ -149,7 +142,7 @@ fn should_run_ee_1119_dont_slash_delegated_validators() { .expect_success(); // - // Other genesis validator withdraws withdraws his bid + // Other genesis validator withdraws his bid // let withdraw_bid_request = ExecuteRequestBuilder::standard( @@ -164,36 +157,30 @@ fn should_run_ee_1119_dont_slash_delegated_validators() { builder.exec(withdraw_bid_request).expect_success().commit(); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); + let unbond_purses = builder.get_unbonds(); + assert_eq!(unbond_purses.len(), 2); - let unbond_list = unbond_purses - .get(&VALIDATOR_1_ADDR) + let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone()); + + let unbonds = unbond_purses + .get(&unbond_kind) .cloned() .expect("should have unbond"); - assert_eq!(unbond_list.len(), 2); // two entries in order: undelegate, and withdraw bid - - // undelegate entry - - assert_eq!(unbond_list[0].validator_public_key(), &*VALIDATOR_1,); + assert_eq!(unbonds.len(), 1); + let unbond = unbonds.first().expect("must get unbond"); + assert_eq!(unbond.eras().len(), 1); + assert_eq!(unbond.validator_public_key(), &*VALIDATOR_1,); assert_eq!( - unbond_list[0].unbonder_public_key(), - &*DEFAULT_ACCOUNT_PUBLIC_KEY, + unbond.unbond_kind(), + &UnbondKind::Validator(VALIDATOR_1.clone()), ); - assert!(!unbond_list[0].is_validator()); - - // - // withdraw_bid entry - // - - assert_eq!(unbond_list[1].validator_public_key(), &*VALIDATOR_1,); - assert_eq!(unbond_list[1].unbonder_public_key(), &*VALIDATOR_1,); - assert!(unbond_list[1].is_validator()); - assert_eq!(unbond_list[1].amount(), &unbond_amount); + assert!(unbond.is_validator()); + let era = unbond.eras().first().expect("should have eras"); + assert_eq!(era.amount(), &unbond_amount); assert!( - !unbond_purses.contains_key(&*DEFAULT_ACCOUNT_ADDR), - "should not be part of unbonds" + unbond_purses.contains_key(&unbond_kind), + "should be part of unbonds" ); let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash( @@ -208,21 +195,23 @@ fn should_run_ee_1119_dont_slash_delegated_validators() { builder.exec(slash_request_1).expect_success().commit(); - let unbond_purses_noop: UnbondingPurses = builder.get_withdraws(); + let unbond_purses_noop = builder.get_unbonds(); assert_eq!( unbond_purses, unbond_purses_noop, "slashing default validator should be noop because no unbonding was done" ); - let bids: Bids = builder.get_bids(); + let bids = builder.get_bids(); assert!(!bids.is_empty()); - assert!(bids.contains_key(&VALIDATOR_1)); // still bid upon + bids.validator_bid(&VALIDATOR_1).expect("bids should exist"); // // Slash - only `withdraw_bid` amount is slashed // - let total_supply_before_slashing: U512 = - builder.get_value(builder.get_mint_contract_hash(), TOTAL_SUPPLY_KEY); + let total_supply_before_slashing: U512 = builder.get_value( + EntityAddr::System(builder.get_mint_contract_hash().value()), + TOTAL_SUPPLY_KEY, + ); let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, @@ -236,22 +225,19 @@ fn should_run_ee_1119_dont_slash_delegated_validators() { builder.exec(slash_request_2).expect_success().commit(); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); - - assert!(!unbond_purses.contains_key(&*DEFAULT_ACCOUNT_ADDR)); + let unbond_purses = builder.get_unbonds(); + assert_eq!(unbond_purses.len(), 0); - assert!(unbond_purses.get(&VALIDATOR_1_ADDR).unwrap().is_empty()); + let bids = builder.get_bids(); + assert!(bids.validator_bid(&VALIDATOR_1).is_none()); - let bids: Bids = builder.get_bids(); - let validator_1_bid = bids.get(&VALIDATOR_1).unwrap(); - assert!(validator_1_bid.inactive()); - assert!(validator_1_bid.staked_amount().is_zero()); + let total_supply_after_slashing: U512 = builder.get_value( + EntityAddr::System(builder.get_mint_contract_hash().value()), + TOTAL_SUPPLY_KEY, + ); - let total_supply_after_slashing: U512 = - builder.get_value(builder.get_mint_contract_hash(), TOTAL_SUPPLY_KEY); assert_eq!( - total_supply_before_slashing - total_supply_after_slashing, - U512::from(VALIDATOR_1_STAKE + UNDELEGATE_AMOUNT_1), + total_supply_after_slashing + VALIDATOR_1_STAKE + DELEGATE_AMOUNT_1, + total_supply_before_slashing, ); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_1120.rs b/execution_engine_testing/tests/src/test/regression/ee_1120.rs index 7c3a79591c..04ab958332 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1120.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1120.rs @@ -4,57 +4,48 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, SYSTEM_ADDR, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::engine_state::genesis::{GenesisAccount, GenesisValidator}, - shared::motes::Motes, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, }; +use casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT; use casper_types::{ account::AccountHash, runtime_args, system::auction::{ - Bids, DelegationRate, UnbondingPurses, ARG_DELEGATOR, ARG_VALIDATOR, + BidKind, BidsExt, DelegationRate, DelegatorKind, UnbondKind, ARG_DELEGATOR, ARG_VALIDATOR, ARG_VALIDATOR_PUBLIC_KEYS, METHOD_SLASH, }, - PublicKey, RuntimeArgs, SecretKey, U512, + GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, U512, }; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; const CONTRACT_DELEGATE: &str = "delegate.wasm"; const CONTRACT_UNDELEGATE: &str = "undelegate.wasm"; -const DELEGATE_AMOUNT_1: u64 = 95_000; -const DELEGATE_AMOUNT_2: u64 = 42_000; -const DELEGATE_AMOUNT_3: u64 = 13_000; -const UNDELEGATE_AMOUNT_1: u64 = 17_000; -const UNDELEGATE_AMOUNT_2: u64 = 24_500; -const UNDELEGATE_AMOUNT_3: u64 = 7_500; +const DELEGATE_AMOUNT_1: u64 = 1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; +const DELEGATE_AMOUNT_2: u64 = 2 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; +const DELEGATE_AMOUNT_3: u64 = 3 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; +const UNDELEGATE_AMOUNT_1: u64 = 1; +const UNDELEGATE_AMOUNT_2: u64 = 2; +const UNDELEGATE_AMOUNT_3: u64 = 3; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ARG_AMOUNT: &str = "amount"; static VALIDATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_2: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); -static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); static VALIDATOR_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_2)); static DELEGATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_1)); @@ -67,17 +58,17 @@ fn should_run_ee_1120_slash_delegators() { let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), + Motes::new(VALIDATOR_1_STAKE), DelegationRate::zero(), )), ); let validator_2 = GenesisAccount::account( VALIDATOR_2.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_2_STAKE.into()), + Motes::new(VALIDATOR_2_STAKE), DelegationRate::zero(), )), ); @@ -89,8 +80,8 @@ fn should_run_ee_1120_slash_delegators() { }; let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); let transfer_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -169,13 +160,15 @@ fn should_run_ee_1120_slash_delegators() { .commit(); // Ensure that initial bid entries exist for validator 1 and validator 2 - let initial_bids: Bids = builder.get_bids(); + let initial_bids = builder.get_bids(); + let key_map = initial_bids.delegator_map(); + let initial_bids_keys = key_map.keys().cloned().collect::>(); assert_eq!( - initial_bids.keys().cloned().collect::>(), + initial_bids_keys, BTreeSet::from_iter(vec![VALIDATOR_2.clone(), VALIDATOR_1.clone()]) ); - let initial_unbond_purses: UnbondingPurses = builder.get_withdraws(); + let initial_unbond_purses = builder.get_unbonds(); assert_eq!(initial_unbond_purses.len(), 0); // DELEGATOR_1 partially unbonds from VALIDATOR_1 @@ -214,74 +207,74 @@ fn should_run_ee_1120_slash_delegators() { ) .build(); - builder.exec(undelegate_request_1).commit().expect_success(); - builder.exec(undelegate_request_2).commit().expect_success(); - builder.exec(undelegate_request_3).commit().expect_success(); + let expected_unbond_keys = (&*DELEGATOR_1, &*VALIDATOR_2); + builder.exec(undelegate_request_1).expect_success().commit(); + builder.exec(undelegate_request_2).expect_success().commit(); + builder.exec(undelegate_request_3).expect_success().commit(); // Check unbonding purses before slashing - - let unbond_purses_before: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses_before.len(), 2); - - let validator_1_unbond_list_before = unbond_purses_before - .get(&VALIDATOR_1_ADDR) - .cloned() - .expect("should have unbond"); - assert_eq!(validator_1_unbond_list_before.len(), 2); // two entries in order: undelegate, and withdraw bid - - // Added through `undelegate_request_1` - assert_eq!( - validator_1_unbond_list_before[0].validator_public_key(), - &*VALIDATOR_1 - ); + let unbond_purses_before = builder.get_unbonds(); + // should be an unbonding purse for each distinct undelegator + unbond_purses_before.contains_key(&UnbondKind::Validator(expected_unbond_keys.1.clone())); + let delegator_unbond = unbond_purses_before + .get(&UnbondKind::DelegatedPublicKey( + expected_unbond_keys.0.clone(), + )) + .expect("should have entry"); + println!("du {:?}", delegator_unbond); assert_eq!( - validator_1_unbond_list_before[0].unbonder_public_key(), - &*DELEGATOR_1 + delegator_unbond.len(), + 2, + "this entity undelegated from 2 different validators" ); + let undelegate_from_v1 = delegator_unbond[1] + .eras() + .first() + .expect("should have entry"); + assert_eq!(undelegate_from_v1.amount().as_u64(), UNDELEGATE_AMOUNT_1); + let undelegate_from_v2 = delegator_unbond[0] + .eras() + .first() + .expect("should have entry"); + assert_eq!(undelegate_from_v2.amount().as_u64(), UNDELEGATE_AMOUNT_2); + + let dual_role_unbond = unbond_purses_before + .get(&UnbondKind::DelegatedPublicKey(expected_unbond_keys.1.clone())) + .expect("should have entry for entity that is both a validator and has also delegated to a different validator then unbonded from that other validator"); assert_eq!( - validator_1_unbond_list_before[0].amount(), - &U512::from(UNDELEGATE_AMOUNT_1) + dual_role_unbond.len(), + 1, + "this entity undelegated from 1 validator" ); + let undelegate_from_v1 = dual_role_unbond[0] + .eras() + .first() + .expect("should have entry"); + assert_eq!(undelegate_from_v1.amount().as_u64(), UNDELEGATE_AMOUNT_3); - // Added through `undelegate_request_3` - assert_eq!( - validator_1_unbond_list_before[1].validator_public_key(), - &*VALIDATOR_1 - ); - assert_eq!( - validator_1_unbond_list_before[1].unbonder_public_key(), - &*VALIDATOR_2 - ); - assert_eq!( - validator_1_unbond_list_before[1].amount(), - &U512::from(UNDELEGATE_AMOUNT_3) - ); + // Check bids before slashing - let validator_2_unbond_list = unbond_purses_before - .get(&*VALIDATOR_2_ADDR) + let bids_before: Vec = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + /* + There should be 5 total bids at this point: + VALIDATOR1 and VALIDATOR2 each have a validator bid + DELEGATOR1 is delegated to each of them for 2 more bids + VALIDATOR2 is also delegated to VALIDATOR1 for 1 more bid + */ + assert_eq!(bids_before.len(), 5); + let bids_before_keys = bids_before + .delegator_map() + .keys() .cloned() - .expect("should have unbond"); - - assert_eq!(validator_2_unbond_list.len(), 1); // one entry: undelegate - assert_eq!( - validator_2_unbond_list[0].validator_public_key(), - &*VALIDATOR_2 - ); - assert_eq!( - validator_2_unbond_list[0].unbonder_public_key(), - &*DELEGATOR_1 - ); - assert_eq!( - validator_2_unbond_list[0].amount(), - &U512::from(UNDELEGATE_AMOUNT_2), - ); - - // Check bids before slashing + .collect::>(); - let bids_before: Bids = builder.get_bids(); assert_eq!( - bids_before.keys().collect::>(), - initial_bids.keys().collect::>() + bids_before_keys, initial_bids_keys, + "prior to taking action, keys should match initial keys" ); let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash( @@ -297,48 +290,40 @@ fn should_run_ee_1120_slash_delegators() { builder.exec(slash_request_1).expect_success().commit(); // Compare bids after slashing validator 2 - let bids_after: Bids = builder.get_bids(); + let bids_after: Vec = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); assert_ne!(bids_before, bids_after); - assert_eq!(bids_after.len(), 2); - let validator_2_bid = bids_after.get(&VALIDATOR_2).unwrap(); - assert!(validator_2_bid.inactive()); - assert!(validator_2_bid.staked_amount().is_zero()); - - assert!(bids_after.contains_key(&VALIDATOR_1)); - assert_eq!(bids_after[&VALIDATOR_1].delegators().len(), 2); - - // validator 2's delegation bid on validator 1 was not slashed. - assert!(bids_after[&VALIDATOR_1] - .delegators() - .contains_key(&VALIDATOR_2)); - assert!(bids_after[&VALIDATOR_1] - .delegators() - .contains_key(&DELEGATOR_1)); - - let unbond_purses_after: UnbondingPurses = builder.get_withdraws(); + /* + there should be 3 total bids at this point: + VALIDATOR1 was not slashed, and their bid remains + DELEGATOR1 is still delegated to VALIDATOR1 and their bid remains + VALIDATOR2's validator bid was slashed (and removed), but they are + also delegated to VALIDATOR1 and that delegation bid remains + */ + assert_eq!(bids_after.len(), 3); + assert!(bids_after.validator_bid(&VALIDATOR_2).is_none()); + + let validator_1_bid = bids_after + .validator_bid(&VALIDATOR_1) + .expect("should have validator1 bid"); + let delegators = bids_after + .delegators_by_validator_public_key(validator_1_bid.validator_public_key()) + .expect("should have delegators"); + assert_eq!(delegators.len(), 2); + + bids_after.delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(VALIDATOR_2.clone())).expect("the delegation record from VALIDATOR2 should exist on VALIDATOR1, in this particular and unusual edge case"); + bids_after + .delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(DELEGATOR_1.clone())) + .expect("the delegation record from DELEGATOR_1 should exist on VALIDATOR1"); + + let unbond_purses_after = builder.get_unbonds(); assert_ne!(unbond_purses_before, unbond_purses_after); - - let validator_1_unbond_list_after = unbond_purses_after - .get(&VALIDATOR_1_ADDR) - .expect("should have validator 1 entry"); - assert_eq!(validator_1_unbond_list_after.len(), 2); - assert_eq!( - validator_1_unbond_list_after[0].unbonder_public_key(), - &*DELEGATOR_1 - ); - - // validator 2's delegation unbond from validator 1 was not slashed - assert_eq!( - validator_1_unbond_list_after[1].unbonder_public_key(), - &*VALIDATOR_2 - ); - - // delegator 1 had a delegation unbond slashed for validator 2's behavior. - // delegator 1 still has an active delegation unbond from validator 2. - assert_eq!( - validator_1_unbond_list_after, - &validator_1_unbond_list_before - ); + assert!(!unbond_purses_after.contains_key(&UnbondKind::Validator(VALIDATOR_1.clone()))); + assert!(unbond_purses_after.contains_key(&UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()))); + assert!(unbond_purses_after.contains_key(&UnbondKind::DelegatedPublicKey(VALIDATOR_2.clone()))); // slash validator 1 to clear remaining bids and unbonding purses let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash( @@ -353,19 +338,17 @@ fn should_run_ee_1120_slash_delegators() { builder.exec(slash_request_2).expect_success().commit(); - let bids_after: Bids = builder.get_bids(); - assert_eq!(bids_after.len(), 2); - let validator_1_bid = bids_after.get(&VALIDATOR_1).unwrap(); - assert!(validator_1_bid.inactive()); - assert!(validator_1_bid.staked_amount().is_zero()); - - let unbond_purses_after: UnbondingPurses = builder.get_withdraws(); - assert!(unbond_purses_after - .get(&VALIDATOR_1_ADDR) - .unwrap() - .is_empty()); - assert!(unbond_purses_after - .get(&VALIDATOR_2_ADDR) - .unwrap() - .is_empty()); + let bids_after = builder.get_bids(); + assert_eq!( + bids_after.len(), + 0, + "we slashed everybody so there should be no bids remaining" + ); + + let unbond_purses_after = builder.get_unbonds(); + assert_eq!( + unbond_purses_after.len(), + 0, + "we slashed everybody currently unbonded so there should be no unbonds remaining" + ); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_1129.rs b/execution_engine_testing/tests/src/test/regression/ee_1129.rs index 8ea8fb6adf..3d9e3ecf62 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1129.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1129.rs @@ -1,32 +1,27 @@ +use casper_wasm::builder; use num_traits::Zero; use once_cell::sync::Lazy; -use parity_wasm::builder; + +use casper_types::{GenesisAccount, GenesisValidator, Key}; use casper_engine_test_support::{ - internal::{ - utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, - DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, + utils, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, + DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; use casper_execution_engine::{ - core::{ - engine_state::{ - genesis::{GenesisAccount, GenesisValidator}, - Error, - }, - execution, - }, - shared::{motes::Motes, wasm::do_nothing_bytes, wasm_prep::PreprocessingError}, + engine_state::Error, execution::ExecError, runtime::PreprocessingError, }; use casper_types::{ account::AccountHash, - contracts::DEFAULT_ENTRY_POINT_NAME, + addressable_entity::DEFAULT_ENTRY_POINT_NAME, runtime_args, system::auction::{self, DelegationRate}, - PublicKey, RuntimeArgs, SecretKey, U512, + Motes, PublicKey, RuntimeArgs, SecretKey, DEFAULT_DELEGATE_COST, U512, }; +use crate::wasm_utils; + const ENTRY_POINT_NAME: &str = "create_purse"; const CONTRACT_KEY: &str = "contract"; const ACCESS_KEY: &str = "access"; @@ -35,24 +30,27 @@ const CONTRACT_EE_1129_REGRESSION: &str = "ee_1129_regression.wasm"; const ARG_AMOUNT: &str = "amount"; static VALIDATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); const VALIDATOR_1_STAKE: u64 = 250_000; -static UNDERFUNDED_PAYMENT_AMOUNT: Lazy = Lazy::new(|| U512::from(10_001)); +static UNDERFUNDED_DELEGATE_AMOUNT: Lazy = Lazy::new(|| U512::from(1)); +static UNDERFUNDED_ADD_BID_AMOUNT: Lazy = Lazy::new(|| U512::from(1)); static CALL_STORED_CONTRACT_OVERHEAD: Lazy = Lazy::new(|| U512::from(10_001)); #[ignore] -#[test] +#[allow(unused)] +// #[test] fn should_run_ee_1129_underfunded_delegate_call() { + assert!(U512::from(DEFAULT_DELEGATE_COST) > *UNDERFUNDED_DELEGATE_AMOUNT); + let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), + Motes::new(VALIDATOR_1_STAKE), DelegationRate::zero(), )), ); @@ -64,12 +62,12 @@ fn should_run_ee_1129_underfunded_delegate_call() { let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); let auction = builder.get_auction_contract_hash(); - let bid_amount = U512::one(); + let bid_amount = U512::from(100_000_000_000_000u64); let deploy_hash = [42; 32]; @@ -79,43 +77,42 @@ fn should_run_ee_1129_underfunded_delegate_call() { auction::ARG_AMOUNT => bid_amount, }; - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_stored_session_hash(auction, auction::METHOD_DELEGATE, args) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *UNDERFUNDED_PAYMENT_AMOUNT, // underfunded deploy + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *UNDERFUNDED_DELEGATE_AMOUNT, // underfunded deploy }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash(deploy_hash) .build(); - let exec_request = ExecuteRequestBuilder::new().push_deploy(deploy).build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::Exec(execution::Error::GasLimit)), + matches!(error, Error::Exec(ExecError::GasLimit)), "Unexpected error {:?}", error ); } #[ignore] -#[test] +#[allow(unused)] +// #[test] fn should_run_ee_1129_underfunded_add_bid_call() { let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), None, ); @@ -126,59 +123,56 @@ fn should_run_ee_1129_underfunded_add_bid_call() { let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); let auction = builder.get_auction_contract_hash(); - let amount = U512::one(); - let deploy_hash = [42; 32]; let delegation_rate: DelegationRate = 10; let args = runtime_args! { auction::ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - auction::ARG_AMOUNT => amount, + auction::ARG_AMOUNT => *UNDERFUNDED_ADD_BID_AMOUNT, auction::ARG_DELEGATION_RATE => delegation_rate, }; - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*VALIDATOR_1_ADDR) .with_stored_session_hash(auction, auction::METHOD_ADD_BID, args) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *UNDERFUNDED_PAYMENT_AMOUNT, + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *UNDERFUNDED_DELEGATE_AMOUNT, }) .with_authorization_keys(&[*VALIDATOR_1_ADDR]) .with_deploy_hash(deploy_hash) .build(); - let exec_request = ExecuteRequestBuilder::new().push_deploy(deploy).build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::Exec(execution::Error::GasLimit)), + matches!(error, Error::Exec(ExecError::GasLimit)), "Unexpected error {:?}", error ); } #[ignore] -#[test] +#[allow(unused)] +// #[test] fn should_run_ee_1129_underfunded_mint_contract_call() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -187,35 +181,31 @@ fn should_run_ee_1129_underfunded_mint_contract_call() { ) .build(); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key(CONTRACT_KEY, ENTRY_POINT_NAME, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_session_named_key(CONTRACT_KEY, ENTRY_POINT_NAME, RuntimeArgs::default()) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD, + }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(install_exec_request).expect_success().commit(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::Exec(execution::Error::GasLimit)), + matches!(error, Error::Exec(ExecError::GasLimit)), "Unexpected error {:?}", error ); @@ -224,9 +214,9 @@ fn should_run_ee_1129_underfunded_mint_contract_call() { #[ignore] #[test] fn should_not_panic_when_calling_session_contract_by_uref() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -235,35 +225,31 @@ fn should_not_panic_when_calling_session_contract_by_uref() { ) .build(); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_session_named_key(ACCESS_KEY, ENTRY_POINT_NAME, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_session_named_key(ACCESS_KEY, ENTRY_POINT_NAME, RuntimeArgs::default()) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD, + }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(install_exec_request).expect_success().commit(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::InvalidKeyVariant), + matches!(error, Error::InvalidKeyVariant(Key::URef(_))), "Unexpected error {:?}", error ); @@ -272,9 +258,9 @@ fn should_not_panic_when_calling_session_contract_by_uref() { #[ignore] #[test] fn should_not_panic_when_calling_payment_contract_by_uref() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -283,33 +269,29 @@ fn should_not_panic_when_calling_payment_contract_by_uref() { ) .build(); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(do_nothing_bytes(), RuntimeArgs::new()) - .with_stored_payment_named_key(ACCESS_KEY, ENTRY_POINT_NAME, RuntimeArgs::new()) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::new()) + .with_stored_payment_named_key(ACCESS_KEY, ENTRY_POINT_NAME, RuntimeArgs::new()) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(install_exec_request).expect_success().commit(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::InvalidKeyVariant), + matches!(error, Error::InvalidKeyVariant(Key::URef(_))), "Unexpected error {:?}", error ); @@ -318,9 +300,9 @@ fn should_not_panic_when_calling_payment_contract_by_uref() { #[ignore] #[test] fn should_not_panic_when_calling_contract_package_by_uref() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -329,40 +311,36 @@ fn should_not_panic_when_calling_contract_package_by_uref() { ) .build(); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_stored_versioned_contract_by_name( - ACCESS_KEY, - None, - ENTRY_POINT_NAME, - RuntimeArgs::default(), - ) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name( + ACCESS_KEY, + None, + ENTRY_POINT_NAME, + RuntimeArgs::default(), + ) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *CALL_STORED_CONTRACT_OVERHEAD, + }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(install_exec_request).expect_success().commit(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::InvalidKeyVariant), + matches!(error, Error::InvalidKeyVariant(Key::URef(_))), "Unexpected error {:?}", error ); @@ -371,9 +349,9 @@ fn should_not_panic_when_calling_contract_package_by_uref() { #[ignore] #[test] fn should_not_panic_when_calling_payment_versioned_contract_by_uref() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -382,37 +360,33 @@ fn should_not_panic_when_calling_payment_versioned_contract_by_uref() { ) .build(); - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(do_nothing_bytes(), RuntimeArgs::new()) - .with_stored_versioned_payment_contract_by_name( - ACCESS_KEY, - None, - ENTRY_POINT_NAME, - RuntimeArgs::new(), - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::new()) + .with_stored_versioned_payment_contract_by_name( + ACCESS_KEY, + None, + ENTRY_POINT_NAME, + RuntimeArgs::new(), + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(install_exec_request).expect_success().commit(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( - matches!(error, Error::InvalidKeyVariant), + matches!(error, Error::InvalidKeyVariant(Key::URef(_))), "Unexpected error {:?}", error ); @@ -432,39 +406,35 @@ fn do_nothing_without_memory() -> Vec { .field(DEFAULT_ENTRY_POINT_NAME) .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } #[ignore] #[test] fn should_not_panic_when_calling_module_without_memory() { - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(do_nothing_without_memory(), RuntimeArgs::new()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT, - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_bytes(do_nothing_without_memory(), RuntimeArgs::new()) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT, + }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); builder.exec(exec_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!( diff --git a/execution_engine_testing/tests/src/test/regression/ee_1152.rs b/execution_engine_testing/tests/src/test/regression/ee_1152.rs index 094f7b1490..c435de9f77 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1152.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1152.rs @@ -2,22 +2,17 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, StepRequestBuilder, - DEFAULT_ACCOUNTS, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, - TIMESTAMP_MILLIS_INCREMENT, - }, - AccountHash, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, - MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::engine_state::{genesis::GenesisValidator, GenesisAccount, RewardItem}, - shared::motes::Motes, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS, + DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, MINIMUM_ACCOUNT_CREATION_BALANCE, + TIMESTAMP_MILLIS_INCREMENT, }; +use casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT; use casper_types::{ + account::AccountHash, runtime_args, - system::auction::{self, DelegationRate, BLOCK_REWARD, INITIAL_ERA_ID}, - ProtocolVersion, PublicKey, RuntimeArgs, SecretKey, U512, + system::auction::{self, DelegationRate, INITIAL_ERA_ID}, + GenesisAccount, GenesisValidator, Motes, ProtocolVersion, PublicKey, SecretKey, U512, }; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; @@ -34,7 +29,7 @@ static DELEGATOR_1: Lazy = Lazy::new(|| PublicKey::from(&*DELEGATOR_1 static DELEGATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_1)); const VALIDATOR_STAKE: u64 = 1_000_000_000; -const DELEGATE_AMOUNT: u64 = 1_234_567; +const DELEGATE_AMOUNT: u64 = 1_234_567 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; #[ignore] #[test] @@ -42,17 +37,17 @@ fn should_run_ee_1152_regression_test() { let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_STAKE.into()), + Motes::new(VALIDATOR_STAKE), DelegationRate::zero(), )), ); let validator_2 = GenesisAccount::account( DELEGATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_STAKE.into()), + Motes::new(VALIDATOR_STAKE), DelegationRate::zero(), )), ); @@ -81,9 +76,9 @@ fn should_run_ee_1152_regression_test() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); builder.exec(fund_request_1).commit().expect_success(); builder.exec(fund_request_2).commit().expect_success(); @@ -137,7 +132,7 @@ fn should_run_ee_1152_regression_test() { assert!(!era_validators.is_empty()); - let (era_id, trusted_era_validators) = era_validators + let (era_id, _) = era_validators .into_iter() .last() .expect("should have last element"); @@ -145,17 +140,13 @@ fn should_run_ee_1152_regression_test() { builder.exec(undelegate_request).expect_success().commit(); - let mut step_request = StepRequestBuilder::new() + let step_request = StepRequestBuilder::new() .with_parent_state_hash(builder.get_post_state_hash()) .with_protocol_version(ProtocolVersion::V1_0_0) // Next era id is used for returning future era validators, which we don't need to inspect // in this test. - .with_next_era_id(era_id); - - for (public_key, _stake) in trusted_era_validators.clone().into_iter() { - let reward_amount = BLOCK_REWARD / trusted_era_validators.len() as u64; - step_request = step_request.with_reward_item(RewardItem::new(public_key, reward_amount)); - } + .with_next_era_id(era_id) + .with_era_end_timestamp_millis(timestamp_millis); builder.step(step_request.build()); diff --git a/execution_engine_testing/tests/src/test/regression/ee_1160.rs b/execution_engine_testing/tests/src/test/regression/ee_1160.rs index 259f8c237b..e7e977a499 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1160.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1160.rs @@ -1,70 +1,38 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, - DEFAULT_RUN_GENESIS_REQUEST, - }, - AccountHash, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, + LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::{ - core::engine_state::WASMLESS_TRANSFER_FIXED_GAS_PRICE, - shared::{gas::Gas, motes::Motes}, - storage::protocol_data::DEFAULT_WASMLESS_TRANSFER_COST, -}; -use casper_types::{runtime_args, system::mint, RuntimeArgs, U512}; +use casper_types::{account::AccountHash, U512}; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); #[ignore] #[test] fn ee_1160_wasmless_transfer_should_empty_account() { - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let wasmless_transfer_cost = Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); - - let transfer_amount = - U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - wasmless_transfer_cost.value(); + let transfer_amount = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get default_account"); - let no_wasm_transfer_request_1 = { - let wasmless_transfer_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => >::None - }; - - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(wasmless_transfer_args) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - + let no_wasm_transfer_request_1 = + TransferRequestBuilder::new(transfer_amount, ACCOUNT_1_ADDR).build(); builder - .exec(no_wasm_transfer_request_1) - .expect_success() - .commit(); + .transfer_and_commit(no_wasm_transfer_request_1) + .expect_success(); - let last_result = builder.get_exec_result(0).unwrap().clone(); - let last_result = &last_result[0]; + let last_result = builder.get_exec_result_owned(0).unwrap(); - assert!(last_result.as_error().is_none(), "{:?}", last_result); + assert!(last_result.error().is_none(), "{:?}", last_result); assert!(!last_result.transfers().is_empty()); let default_account_balance_after = builder.get_purse_balance(default_account.main_purse()); let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(ACCOUNT_1_ADDR) .expect("should get default_account"); let account_1_balance = builder.get_purse_balance(account_1.main_purse()); @@ -76,56 +44,20 @@ fn ee_1160_wasmless_transfer_should_empty_account() { #[test] fn ee_1160_transfer_larger_than_balance_should_fail() { let transfer_amount = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE) - - U512::from(DEFAULT_WASMLESS_TRANSFER_COST) // One above the available balance to transfer should raise an InsufficientPayment already + U512::one(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get default_account"); + let no_wasm_transfer_request_1 = + TransferRequestBuilder::new(transfer_amount, ACCOUNT_1_ADDR).build(); + builder.transfer_and_commit(no_wasm_transfer_request_1); - let balance_before = builder.get_purse_balance(default_account.main_purse()); - - let no_wasm_transfer_request_1 = { - let wasmless_transfer_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => >::None - }; - - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(wasmless_transfer_args) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - builder.exec(no_wasm_transfer_request_1).commit(); - - let balance_after = builder.get_purse_balance(default_account.main_purse()); - - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let wasmless_transfer_motes = Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); - - let last_result = builder.get_exec_result(0).unwrap().clone(); - let last_result = &last_result[0]; - assert_eq!( - balance_before - wasmless_transfer_motes.value(), - balance_after - ); - assert_eq!(last_result.cost(), wasmless_transfer_gas_cost); + let last_result = builder.get_exec_result_owned(0).unwrap(); assert!( - last_result.as_error().is_some(), + last_result.error().is_some(), "Expected error but last result is {:?}", last_result ); @@ -140,53 +72,17 @@ fn ee_1160_transfer_larger_than_balance_should_fail() { fn ee_1160_large_wasmless_transfer_should_avoid_overflow() { let transfer_amount = U512::max_value(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get default_account"); - - let balance_before = builder.get_purse_balance(default_account.main_purse()); - - let no_wasm_transfer_request_1 = { - let wasmless_transfer_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => >::None - }; - - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(wasmless_transfer_args) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - builder.exec(no_wasm_transfer_request_1).commit(); - - let balance_after = builder.get_purse_balance(default_account.main_purse()); - - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let wasmless_transfer_motes = Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); - - assert_eq!( - balance_before - wasmless_transfer_motes.value(), - balance_after - ); + let no_wasm_transfer_request_1 = + TransferRequestBuilder::new(transfer_amount, ACCOUNT_1_ADDR).build(); + builder.transfer_and_commit(no_wasm_transfer_request_1); - let last_result = builder.get_exec_result(0).unwrap().clone(); - let last_result = &last_result[0]; - assert_eq!(last_result.cost(), wasmless_transfer_gas_cost); + let last_result = builder.get_exec_result_owned(0).unwrap(); assert!( - last_result.as_error().is_some(), + last_result.error().is_some(), "Expected error but last result is {:?}", last_result ); diff --git a/execution_engine_testing/tests/src/test/regression/ee_1163.rs b/execution_engine_testing/tests/src/test/regression/ee_1163.rs index 3f89a1b300..5ca0eee208 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1163.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1163.rs @@ -1,69 +1,35 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_GAS_PRICE, - DEFAULT_RUN_GENESIS_REQUEST, - }, - AccountHash, DEFAULT_ACCOUNT_ADDR, -}; -use casper_execution_engine::{ - core::{ - engine_state::{Error, ExecuteRequest, WASMLESS_TRANSFER_FIXED_GAS_PRICE}, - execution, - }, - shared::{gas::Gas, motes::Motes}, - storage::protocol_data::DEFAULT_WASMLESS_TRANSFER_COST, + LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; +use casper_execution_engine::engine_state::Error; +use casper_storage::{data_access_layer::TransferRequest, system::transfer::TransferError}; use casper_types::{ - runtime_args, - system::{handle_payment, mint}, - ApiError, RuntimeArgs, U512, + account::AccountHash, system::handle_payment, Gas, MintCosts, Motes, RuntimeArgs, SystemConfig, + U512, }; -const PRIORITIZED_GAS_PRICE: u64 = DEFAULT_GAS_PRICE * 7; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); -fn setup() -> InMemoryWasmTestBuilder { - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder } -fn should_charge_for_user_error( - builder: &mut InMemoryWasmTestBuilder, - request: ExecuteRequest, +fn should_enforce_limit_for_user_error( + builder: &mut LmdbWasmTestBuilder, + request: TransferRequest, ) -> Error { - let transfer_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let transfer_cost_motes = - Motes::from_gas(transfer_cost, WASMLESS_TRANSFER_FIXED_GAS_PRICE).expect("gas overflow"); + let transfer_cost = Gas::from(SystemConfig::default().mint_costs().transfer); - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have default account"); - let main_purse = default_account.main_purse(); - let purse_balance_before = builder.get_purse_balance(main_purse); - let proposer_purse_balance_before = builder.get_proposer_purse_balance(); - - builder.exec(request).commit(); - - let purse_balance_after = builder.get_purse_balance(main_purse); - let proposer_purse_balance_after = builder.get_proposer_purse_balance(); + builder.transfer_and_commit(request); let response = builder - .get_exec_result(0) - .expect("should have result") - .get(0) - .expect("should have first result"); - assert_eq!(response.cost(), transfer_cost); - assert_eq!( - purse_balance_before - transfer_cost_motes.value(), - purse_balance_after - ); - assert_eq!( - proposer_purse_balance_before + transfer_cost_motes.value(), - proposer_purse_balance_after - ); + .get_exec_result_owned(0) + .expect("should have result"); - // Verify handle payment postconditions + assert_eq!(response.limit(), transfer_cost); + assert_eq!(response.consumed(), transfer_cost); let handle_payment = builder.get_handle_payment_contract(); let payment_purse = handle_payment @@ -76,151 +42,79 @@ fn should_charge_for_user_error( assert_eq!(payment_purse_balance, U512::zero()); - response.as_error().cloned().expect("should have error") + response.error().cloned().expect("should have error") } #[ignore] #[test] -fn shouldnt_consider_gas_price_when_calculating_minimum_balance() { - let id: Option = None; - - let create_account_request = { - let transfer_amount = Motes::new(U512::from(DEFAULT_WASMLESS_TRANSFER_COST) + U512::one()); - - let transfer_args = runtime_args! { - - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => transfer_amount.value(), - mint::ARG_ID => id, - }; - - ExecuteRequestBuilder::transfer(*DEFAULT_ACCOUNT_ADDR, transfer_args).build() - }; - - let transfer_request = { - let transfer_amount = Motes::new(U512::one()); - - let transfer_args = runtime_args! { - mint::ARG_TARGET => *DEFAULT_ACCOUNT_ADDR, - mint::ARG_AMOUNT => transfer_amount.value(), - mint::ARG_ID => id, - }; - - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(transfer_args) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .with_deploy_hash([42; 32]) - .with_gas_price(PRIORITIZED_GAS_PRICE) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - let mut builder = setup(); - builder - .exec(create_account_request) - .expect_success() - .commit(); - builder.exec(transfer_request).expect_success().commit(); -} - -#[ignore] -#[test] -fn should_properly_charge_fixed_cost_with_nondefault_gas_price() { - let transfer_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); +fn should_enforce_system_host_gas_limit() { // implies 1:1 gas/motes conversion rate regardless of gas price - let transfer_cost_motes = Motes::new(U512::from(DEFAULT_WASMLESS_TRANSFER_COST)); - let transfer_amount = Motes::new(U512::one()); - let id: Option = None; - - let transfer_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => transfer_amount.value(), - mint::ARG_ID => id, - }; - let transfer_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(transfer_args) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .with_gas_price(PRIORITIZED_GAS_PRICE) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let transfer_request = TransferRequestBuilder::new(transfer_amount.value(), ACCOUNT_1_ADDR) + .with_initiator(*DEFAULT_ACCOUNT_ADDR) + .build(); let mut builder = setup(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let main_purse = default_account.main_purse(); let purse_balance_before = builder.get_purse_balance(main_purse); - let proposer_purse_balance_before = builder.get_proposer_purse_balance(); - builder.exec(transfer_request).commit(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); let purse_balance_after = builder.get_purse_balance(main_purse); - let proposer_purse_balance_after = builder.get_proposer_purse_balance(); + let transfer_cost = Gas::from(MintCosts::default().transfer); let response = builder - .get_exec_result(0) - .expect("should have result") - .get(0) - .expect("should have first result"); - assert_eq!(response.cost(), transfer_cost); + .get_exec_result_owned(0) + .expect("should have result"); assert_eq!( - purse_balance_before - transfer_cost_motes.value() - transfer_amount.value(), - purse_balance_after + response.limit(), + transfer_cost, + "expected actual limit is {}", + transfer_cost ); assert_eq!( - proposer_purse_balance_before + transfer_cost_motes.value(), - proposer_purse_balance_after + purse_balance_before - transfer_amount.value(), + purse_balance_after ); } #[ignore] #[test] -fn should_charge_for_wasmless_transfer_missing_args() { +fn should_detect_wasmless_transfer_missing_args() { let transfer_args = RuntimeArgs::new(); - let transfer_request = - ExecuteRequestBuilder::transfer(*DEFAULT_ACCOUNT_ADDR, transfer_args).build(); + let transfer_request = TransferRequestBuilder::new(1, AccountHash::default()) + .with_args(transfer_args) + .build(); let mut builder = setup(); - let error = should_charge_for_user_error(&mut builder, transfer_request); + let error = should_enforce_limit_for_user_error(&mut builder, transfer_request); assert!(matches!( error, - Error::Exec(execution::Error::Revert(ApiError::MissingArgument)) + Error::Transfer(TransferError::MissingArgument) )); } #[ignore] #[test] -fn should_charge_for_wasmless_transfer_invalid_purse() { +fn should_detect_wasmless_transfer_invalid_purse() { let mut builder = setup(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let main_purse = default_account.main_purse(); - let id: Option = None; - - let transfer_args = runtime_args! { - mint::ARG_TARGET => main_purse, - mint::ARG_AMOUNT => U512::one(), - mint::ARG_ID => id, - }; - - let transfer_request = - ExecuteRequestBuilder::transfer(*DEFAULT_ACCOUNT_ADDR, transfer_args).build(); + let transfer_request = TransferRequestBuilder::new(1, main_purse).build(); - let error = should_charge_for_user_error(&mut builder, transfer_request); + let error = should_enforce_limit_for_user_error(&mut builder, transfer_request); assert!(matches!( error, - Error::Exec(execution::Error::Revert(ApiError::InvalidPurse)) + Error::Transfer(TransferError::InvalidPurse) )); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_1174.rs b/execution_engine_testing/tests/src/test/regression/ee_1174.rs index a6e3ffbae2..496f3f7f89 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_1174.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_1174.rs @@ -1,19 +1,16 @@ use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNT_PUBLIC_KEY, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, + LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::core::{engine_state::Error, execution}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; use casper_types::{ runtime_args, system::{ self, auction::{self, DelegationRate}, }, - ApiError, RuntimeArgs, U512, + ApiError, DEFAULT_MINIMUM_BID_AMOUNT, U512, }; const LARGE_DELEGATION_RATE: DelegationRate = 101; @@ -21,10 +18,10 @@ const LARGE_DELEGATION_RATE: DelegationRate = 101; #[ignore] #[test] fn should_run_ee_1174_delegation_rate_too_high() { - let bid_amount = U512::one(); + let bid_amount = U512::from(DEFAULT_MINIMUM_BID_AMOUNT); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let auction = builder.get_auction_contract_hash(); @@ -45,15 +42,13 @@ fn should_run_ee_1174_delegation_rate_too_high() { builder.exec(add_bid_request).commit(); let error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .expect("should have error"); assert!(matches!( error, - Error::Exec(execution::Error::Revert(ApiError::AuctionError(auction_error))) if *auction_error == system::auction::Error::DelegationRateTooLarge as u8)); + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) if auction_error == system::auction::Error::DelegationRateTooLarge as u8)); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_1217.rs b/execution_engine_testing/tests/src/test/regression/ee_1217.rs new file mode 100644 index 0000000000..1e57438c13 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/ee_1217.rs @@ -0,0 +1,707 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{ + engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error as CoreError}, + execution::ExecError, +}; +use casper_types::{ + runtime_args, system::auction, ApiError, PublicKey, SecretKey, DEFAULT_MINIMUM_BID_AMOUNT, U512, +}; +use once_cell::sync::Lazy; + +const CONTRACT_REGRESSION: &str = "ee_1217_regression.wasm"; +const CONTRACT_ADD_BID: &str = "add_bid.wasm"; +const CONTRACT_WITHDRAW_BID: &str = "withdraw_bid.wasm"; + +const PACKAGE_NAME: &str = "call_auction"; +const CONTRACT_ADD_BID_ENTRYPOINT_SESSION: &str = "add_bid_session"; +const CONTRACT_ADD_BID_ENTRYPOINT_CONTRACT: &str = "add_bid_contract"; +const CONTRACT_WITHDRAW_BID_ENTRYPOINT_SESSION: &str = "withdraw_bid_session"; +const CONTRACT_WITHDRAW_BID_ENTRYPOINT_CONTRACT: &str = "withdraw_bid_contract"; +const CONTRACT_DELEGATE_ENTRYPOINT_SESSION: &str = "delegate_session"; +const CONTRACT_DELEGATE_ENTRYPOINT_CONTRACT: &str = "delegate_contract"; +const CONTRACT_UNDELEGATE_ENTRYPOINT_SESSION: &str = "undelegate_session"; +const CONTRACT_UNDELEGATE_ENTRYPOINT_CONTRACT: &str = "undelegate_contract"; +const CONTRACT_ACTIVATE_BID_ENTRYPOINT_CONTRACT: &str = "activate_bid_contract"; +const CONTRACT_ACTIVATE_BID_ENTRYPOINT_SESSION: &str = "activate_bid_session"; + +static VALIDATOR_PUBLIC_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([33; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); + +#[ignore] +#[test] +fn should_fail_to_add_bid_from_stored_session_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let add_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_ADD_BID_ENTRYPOINT_SESSION, + runtime_args! { + auction::ARG_PUBLIC_KEY => default_public_key_arg, + }, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(add_bid_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_add_bid_from_stored_contract_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let add_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_ADD_BID_ENTRYPOINT_CONTRACT, + runtime_args! { + auction::ARG_PUBLIC_KEY => default_public_key_arg, + }, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(add_bid_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_withdraw_bid_from_stored_session_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let add_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let withdraw_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_WITHDRAW_BID_ENTRYPOINT_SESSION, + runtime_args! { + auction::ARG_PUBLIC_KEY => default_public_key_arg, + }, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(add_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(withdraw_bid_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_withdraw_bid_from_stored_contract_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let add_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let withdraw_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_WITHDRAW_BID_ENTRYPOINT_CONTRACT, + runtime_args! { + auction::ARG_PUBLIC_KEY => default_public_key_arg, + }, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(add_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(withdraw_bid_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_delegate_from_stored_session_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone(); + let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash(); + + let validator_fund_request = { + const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; + const ARG_AMOUNT: &str = "amount"; + const ARG_TARGET: &str = "target"; + + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => validator_addr, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build() + }; + + let add_bid_request = ExecuteRequestBuilder::standard( + VALIDATOR_PUBLIC_KEY.to_account_hash(), + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let delegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_DELEGATE_ENTRYPOINT_SESSION, + runtime_args! { + auction::ARG_DELEGATOR => default_public_key_arg, + auction::ARG_VALIDATOR => validator_public_key_arg, + }, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder + .exec(validator_fund_request) + .commit() + .expect_success(); + + builder.exec(add_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(delegate_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_delegate_from_stored_contract_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone(); + let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash(); + + let validator_fund_request = { + const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; + const ARG_AMOUNT: &str = "amount"; + const ARG_TARGET: &str = "target"; + + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => validator_addr, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build() + }; + + let add_bid_request = ExecuteRequestBuilder::standard( + VALIDATOR_PUBLIC_KEY.to_account_hash(), + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let delegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_DELEGATE_ENTRYPOINT_CONTRACT, + runtime_args! { + auction::ARG_DELEGATOR => default_public_key_arg, + auction::ARG_VALIDATOR => validator_public_key_arg, + }, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder + .exec(validator_fund_request) + .commit() + .expect_success(); + + builder.exec(add_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(delegate_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_undelegate_from_stored_session_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone(); + let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash(); + + let validator_fund_request = { + const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; + const ARG_AMOUNT: &str = "amount"; + const ARG_TARGET: &str = "target"; + + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => validator_addr, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build() + }; + + let add_bid_request = ExecuteRequestBuilder::standard( + VALIDATOR_PUBLIC_KEY.to_account_hash(), + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let delegate_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_DELEGATE, + runtime_args! { + auction::ARG_DELEGATOR => default_public_key_arg.clone(), + auction::ARG_VALIDATOR => validator_public_key_arg.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + }, + ) + .build(); + + let undelegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_UNDELEGATE_ENTRYPOINT_SESSION, + runtime_args! { + auction::ARG_DELEGATOR => default_public_key_arg, + auction::ARG_VALIDATOR => validator_public_key_arg, + }, + ) + .build(); + + builder + .exec(validator_fund_request) + .commit() + .expect_success(); + + builder.exec(add_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(delegate_request).commit().expect_success(); + + builder.exec(undelegate_request).commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_undelegate_from_stored_contract_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let validator_public_key_arg = VALIDATOR_PUBLIC_KEY.clone(); + let validator_addr = VALIDATOR_PUBLIC_KEY.to_account_hash(); + + let validator_fund_request = { + const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; + const ARG_AMOUNT: &str = "amount"; + const ARG_TARGET: &str = "target"; + + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => validator_addr, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build() + }; + + let add_bid_request = ExecuteRequestBuilder::standard( + VALIDATOR_PUBLIC_KEY.to_account_hash(), + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => validator_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let delegate_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_DELEGATE, + runtime_args! { + auction::ARG_DELEGATOR => default_public_key_arg.clone(), + auction::ARG_VALIDATOR => validator_public_key_arg.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + }, + ) + .build(); + + let undelegate_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_UNDELEGATE_ENTRYPOINT_CONTRACT, + runtime_args! { + auction::ARG_DELEGATOR => default_public_key_arg, + auction::ARG_VALIDATOR => validator_public_key_arg, + }, + ) + .build(); + + builder + .exec(validator_fund_request) + .commit() + .expect_success(); + + builder.exec(add_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + builder.exec(delegate_request).commit().expect_success(); + + builder.exec(undelegate_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_activate_bid_from_stored_session_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let add_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let withdraw_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(), + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(add_bid_request).commit().expect_success(); + builder.exec(withdraw_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + let activate_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_ACTIVATE_BID_ENTRYPOINT_SESSION, + runtime_args! { + auction::ARG_VALIDATOR => default_public_key_arg, + }, + ) + .build(); + + builder.exec(activate_bid_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} + +#[ignore] +#[test] +fn should_fail_to_activate_bid_from_stored_contract_code() { + let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); + + let add_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(), + auction::ARG_DELEGATION_RATE => 0u8, + }, + ) + .build(); + + let withdraw_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), // smaller amount results in Error::BondTooSmall + auction::ARG_PUBLIC_KEY => default_public_key_arg.clone(), + }, + ) + .build(); + + let store_call_auction_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION, + runtime_args! {}, + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(add_bid_request).commit().expect_success(); + builder.exec(withdraw_bid_request).commit().expect_success(); + + builder + .exec(store_call_auction_request) + .commit() + .expect_success(); + + let activate_bid_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_NAME, + None, + CONTRACT_ACTIVATE_BID_ENTRYPOINT_CONTRACT, + runtime_args! { + auction::ARG_VALIDATOR => default_public_key_arg, + }, + ) + .build(); + + builder.exec(activate_bid_request); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!(error, CoreError::Exec(ExecError::Revert(ApiError::AuctionError( + auction_error, + ))) if auction_error == auction::Error::InvalidContext as u8) + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/ee_1225.rs b/execution_engine_testing/tests/src/test/regression/ee_1225.rs new file mode 100644 index 0000000000..aac90cd7a8 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/ee_1225.rs @@ -0,0 +1,36 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; + +use casper_types::{runtime_args, RuntimeArgs}; + +const ARG_AMOUNT: &str = "amount"; +const EE_1225_REGRESSION_CONTRACT: &str = "ee_1225_regression.wasm"; +const DO_NOTHING_CONTRACT: &str = "do_nothing.wasm"; + +#[should_panic(expected = "Finalization")] +#[ignore] +#[allow(unused)] +// #[test] +fn should_run_ee_1225_verify_finalize_payment_invariants() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_payment_code( + EE_1225_REGRESSION_CONTRACT, + runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT, + }, + ) + .with_session_code(DO_NOTHING_CONTRACT, RuntimeArgs::default()) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([2; 32]) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); + + builder.exec(exec_request).expect_success().commit(); +} diff --git a/execution_engine_testing/tests/src/test/regression/ee_221.rs b/execution_engine_testing/tests/src/test/regression/ee_221.rs index bac8bf27b9..8dde2173f0 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_221.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_221.rs @@ -1,6 +1,5 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::RuntimeArgs; @@ -18,10 +17,10 @@ fn should_run_ee_221_get_uref_regression_test() { ) .build(); - let _result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .expect_success() - .commit() - .finish(); + .commit(); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_401.rs b/execution_engine_testing/tests/src/test/regression/ee_401.rs index 684bc16613..1f3c6dca04 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_401.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_401.rs @@ -1,6 +1,5 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::RuntimeArgs; @@ -24,8 +23,8 @@ fn should_execute_contracts_which_provide_extra_urefs() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); builder.exec(exec_request_2).expect_success().commit(); diff --git a/execution_engine_testing/tests/src/test/regression/ee_441.rs b/execution_engine_testing/tests/src/test/regression/ee_441.rs index d523696843..b0ee4393e5 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_441.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_441.rs @@ -1,11 +1,8 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_types::{runtime_args, Key, RuntimeArgs, URef}; +use casper_types::{runtime_args, Key, URef}; const EE_441_RNG_STATE: &str = "ee_441_rng_state.wasm"; @@ -19,36 +16,34 @@ fn get_uref(key: Key) -> URef { fn do_pass(pass: &str) -> (URef, URef) { // This test runs a contract that's after every call extends the same key with // more data - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_session_code( - EE_441_RNG_STATE, - runtime_args! { - "flag" => pass, - }, - ) - .with_deploy_hash([1u8; 32]) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_session_code( + EE_441_RNG_STATE, + runtime_args! { + "flag" => pass, + }, + ) + .with_deploy_hash([1u8; 32]) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .expect_success() .commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); ( - get_uref(account.named_keys()["uref1"]), - get_uref(account.named_keys()["uref2"]), + get_uref(*account.named_keys().get("uref1").unwrap()), + get_uref(*account.named_keys().get("uref2").unwrap()), ) } diff --git a/execution_engine_testing/tests/src/test/regression/ee_460.rs b/execution_engine_testing/tests/src/test/regression/ee_460.rs index ce6fa240f2..78f88dd242 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_460.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_460.rs @@ -1,9 +1,9 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + addressable_entity::EntityKindTag, execution::TransformKindV2, runtime_args, Key, U512, }; -use casper_execution_engine::shared::transform::Transform; -use casper_types::{runtime_args, RuntimeArgs, U512}; const CONTRACT_EE_460_REGRESSION: &str = "ee_460_regression.wasm"; @@ -18,24 +18,27 @@ fn should_run_ee_460_no_side_effects_on_error_regression() { runtime_args! { ARG_AMOUNT => U512::max_value() }, ) .build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() - .commit() - .finish(); + .commit(); // In this regression test it is verified that no new urefs are created on the // mint uref, which should mean no new purses are created in case of // transfer error. This is considered sufficient cause to confirm that the // mint uref is left untouched. - let mint_contract_uref = result.builder().get_mint_contract_hash(); + let mint_entity_key = + Key::addressable_entity_key(EntityKindTag::System, builder.get_mint_contract_hash()); - let transforms = &result.builder().get_transforms()[0]; - let mint_transforms = transforms - .get(&mint_contract_uref.into()) + let effects = &builder.get_effects()[0]; + let mint_transforms = effects + .transforms() + .iter() + .find(|transform| transform.key() == &mint_entity_key) // Skips the Identity writes introduced since payment code execution for brevity of the // check - .filter(|&v| v != &Transform::Identity); + .filter(|transform| transform.kind() != &TransformKindV2::Identity); assert!(mint_transforms.is_none()); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_468.rs b/execution_engine_testing/tests/src/test/regression/ee_468.rs index 64c1820afc..44e83f46f9 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_468.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_468.rs @@ -1,8 +1,8 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::RuntimeArgs; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{bytesrepr, RuntimeArgs}; const CONTRACT_DESERIALIZE_ERROR: &str = "deserialize_error.wasm"; @@ -15,11 +15,21 @@ fn should_not_fail_deserializing() { RuntimeArgs::new(), ) .build(); - let is_error = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + + let error = LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .commit() - .is_error(); + .get_error(); - assert!(is_error); + assert!( + matches!( + error, + Some(Error::Exec(ExecError::BytesRepr( + bytesrepr::Error::EarlyEndOfStream + ))) + ), + "{:?}", + error + ); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_470.rs b/execution_engine_testing/tests/src/test/regression/ee_470.rs index e41e0c99ba..14ff7d5578 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_470.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_470.rs @@ -1,16 +1,23 @@ +use std::sync::Arc; + use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::engine_state::engine_config::DEFAULT_ENABLE_ENTITY; +use casper_storage::global_state::{ + state::{lmdb::LmdbGlobalState, StateProvider}, + transaction_source::lmdb::LmdbEnvironment, + trie_store::lmdb::LmdbTrieStore, }; -use casper_execution_engine::storage::global_state::in_memory::InMemoryGlobalState; use casper_types::RuntimeArgs; +use lmdb::DatabaseFlags; const CONTRACT_DO_NOTHING: &str = "do_nothing.wasm"; #[ignore] #[test] fn regression_test_genesis_hash_mismatch() { - let mut builder_base = InMemoryWasmTestBuilder::default(); + let mut builder_base = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -20,22 +27,37 @@ fn regression_test_genesis_hash_mismatch() { .build(); // Step 1. - let builder = builder_base.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let builder = builder_base.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // This is trie's post state hash after calling run_genesis endpoint. // Step 1a) let genesis_run_hash = builder.get_genesis_hash(); - let genesis_transforms = builder.get_genesis_transforms().clone(); + let genesis_transforms = builder.get_genesis_effects().clone(); let empty_root_hash = { - let gs = InMemoryGlobalState::empty().expect("Empty GlobalState."); - gs.empty_root_hash + let gs = { + let tempdir = tempfile::tempdir().expect("should create tempdir"); + let lmdb_environment = LmdbEnvironment::new(tempdir.path(), 1024 * 1024, 32, false) + .expect("should create lmdb environment"); + let lmdb_trie_store = + LmdbTrieStore::new(&lmdb_environment, None, DatabaseFlags::default()) + .expect("should create lmdb trie store"); + + LmdbGlobalState::empty( + Arc::new(lmdb_environment), + Arc::new(lmdb_trie_store), + 6, + DEFAULT_ENABLE_ENTITY, + ) + .expect("Empty GlobalState.") + }; + gs.empty_root() }; // This is trie's post state hash after committing genesis effects on top of // empty trie. Step 1b) let genesis_transforms_hash = builder - .commit_effects(empty_root_hash, genesis_transforms) + .commit_transforms(empty_root_hash, genesis_transforms) .get_post_state_hash(); // They should match. @@ -46,15 +68,15 @@ fn regression_test_genesis_hash_mismatch() { // No step 3. // Step 4. - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // Step 4a) let second_genesis_run_hash = builder.get_genesis_hash(); - let second_genesis_transforms = builder.get_genesis_transforms().clone(); + let second_genesis_transforms = builder.get_genesis_effects().clone(); // Step 4b) let second_genesis_transforms_hash = builder - .commit_effects(empty_root_hash, second_genesis_transforms) + .commit_transforms(empty_root_hash, second_genesis_transforms) .get_post_state_hash(); assert_eq!(second_genesis_run_hash, second_genesis_transforms_hash); diff --git a/execution_engine_testing/tests/src/test/regression/ee_532.rs b/execution_engine_testing/tests/src/test/regression/ee_532.rs index 6c64bcdb98..6998c3425f 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_532.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_532.rs @@ -1,7 +1,8 @@ -use casper_engine_test_support::internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST, +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::core::engine_state::Error; +use casper_execution_engine::engine_state::Error; +use casper_storage::tracking_copy::TrackingCopyError; use casper_types::{account::AccountHash, RuntimeArgs}; const CONTRACT_EE_532_REGRESSION: &str = "ee_532_regression.wasm"; @@ -9,10 +10,7 @@ const UNKNOWN_ADDR: AccountHash = AccountHash::new([42u8; 32]); #[ignore] #[test] -fn should_run_ee_532_get_uref_regression_test() { - // This test runs a contract that's after every call extends the same key with - // more data - +fn should_run_ee_532_non_existent_account_regression_test() { let exec_request = ExecuteRequestBuilder::standard( UNKNOWN_ADDR, CONTRACT_EE_532_REGRESSION, @@ -20,28 +18,28 @@ fn should_run_ee_532_get_uref_regression_test() { ) .build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .commit() - .finish(); + .commit(); - let deploy_result = result - .builder() - .get_exec_result(0) - .expect("should have exec response") - .get(0) - .expect("should have at least one deploy result"); + let deploy_result = builder + .get_exec_result_owned(0) + .expect("should have exec response"); assert!( deploy_result.has_precondition_failure(), "expected precondition failure" ); - let message = deploy_result.as_error().map(|err| format!("{}", err)); + let message = deploy_result.error().map(|err| format!("{}", err)); assert_eq!( message, - Some(format!("{}", Error::Authorization)), + Some(format!( + "{}", + Error::TrackingCopy(TrackingCopyError::KeyNotFound(UNKNOWN_ADDR.into())) + )), "expected Error::Authorization" ) } diff --git a/execution_engine_testing/tests/src/test/regression/ee_536.rs b/execution_engine_testing/tests/src/test/regression/ee_536.rs index 2b6c5ad122..8761d8667c 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_536.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_536.rs @@ -1,6 +1,5 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::RuntimeArgs; @@ -8,9 +7,7 @@ const CONTRACT_EE_536_REGRESSION: &str = "ee_536_regression.wasm"; #[ignore] #[test] -fn should_run_ee_536_get_uref_regression_test() { - // This test runs a contract that's after every call extends the same key with - // more data +fn should_run_ee_536_associated_account_management_regression() { let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_EE_536_REGRESSION, @@ -18,10 +15,10 @@ fn should_run_ee_536_get_uref_regression_test() { ) .build(); - let _result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .expect_success() - .commit() - .finish(); + .commit(); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_539.rs b/execution_engine_testing/tests/src/test/regression/ee_539.rs index 6e945c6969..f6a0a58424 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_539.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_539.rs @@ -1,8 +1,7 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_types::{account::Weight, runtime_args, RuntimeArgs}; +use casper_types::{addressable_entity::Weight, runtime_args}; const CONTRACT_EE_539_REGRESSION: &str = "ee_539_regression.wasm"; const ARG_KEY_MANAGEMENT_THRESHOLD: &str = "key_management_threshold"; @@ -20,10 +19,10 @@ fn should_run_ee_539_serialize_action_thresholds_regression() { ) .build(); - let _result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .expect_success() - .commit() - .finish(); + .commit(); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_549.rs b/execution_engine_testing/tests/src/test/regression/ee_549.rs index 033179ce97..df1743d9da 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_549.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_549.rs @@ -1,6 +1,5 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::RuntimeArgs; @@ -16,10 +15,10 @@ fn should_run_ee_549_set_refund_regression() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request); // Execution should encounter an error because set_refund diff --git a/execution_engine_testing/tests/src/test/regression/ee_550.rs b/execution_engine_testing/tests/src/test/regression/ee_550.rs index a2ecbe7e1a..0e869dc6f6 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_550.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_550.rs @@ -1,11 +1,8 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs}; +use casper_types::{account::AccountHash, runtime_args}; const PASS_INIT_REMOVE: &str = "init_remove"; const PASS_TEST_REMOVE: &str = "test_remove"; @@ -27,25 +24,23 @@ fn should_run_ee_550_remove_with_saturated_threshold_regression() { ) .build(); - let exec_request_2 = { - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - CONTRACT_EE_550_REGRESSION, - runtime_args! { ARG_PASS => String::from(PASS_TEST_REMOVE) }, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, AccountHash::new(KEY_2_ADDR)]) - .with_deploy_hash(DEPLOY_HASH) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code( + CONTRACT_EE_550_REGRESSION, + runtime_args! { ARG_PASS => String::from(PASS_TEST_REMOVE) }, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, AccountHash::new(KEY_2_ADDR)]) + .with_deploy_hash(DEPLOY_HASH) + .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() @@ -64,25 +59,23 @@ fn should_run_ee_550_update_with_saturated_threshold_regression() { ) .build(); - let exec_request_2 = { - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - CONTRACT_EE_550_REGRESSION, - runtime_args! { ARG_PASS => String::from(PASS_TEST_UPDATE) }, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, AccountHash::new(KEY_2_ADDR)]) - .with_deploy_hash(DEPLOY_HASH) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code( + CONTRACT_EE_550_REGRESSION, + runtime_args! { ARG_PASS => String::from(PASS_TEST_UPDATE) }, + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR, AccountHash::new(KEY_2_ADDR)]) + .with_deploy_hash(DEPLOY_HASH) + .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let exec_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() diff --git a/execution_engine_testing/tests/src/test/regression/ee_572.rs b/execution_engine_testing/tests/src/test/regression/ee_572.rs index 549481363a..bcb05d48ed 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_572.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_572.rs @@ -1,12 +1,8 @@ use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::shared::stored_value::StoredValue; -use casper_types::{account::AccountHash, runtime_args, Key, RuntimeArgs, U512}; +use casper_types::{account::AccountHash, runtime_args, Key, U512}; const CONTRACT_CREATE: &str = "ee_572_regression_create.wasm"; const CONTRACT_ESCALATE: &str = "ee_572_regression_escalate.wasm"; @@ -31,7 +27,7 @@ fn should_run_ee_572_regression() { // This test runs a contract that's after every call extends the same key with // more data - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -52,7 +48,7 @@ fn should_run_ee_572_regression() { // Create Accounts builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit(); @@ -63,33 +59,36 @@ fn should_run_ee_572_regression() { builder.exec(exec_request_3).expect_success().commit(); let contract: Key = { - let account = match builder.query(None, Key::Account(ACCOUNT_1_ADDR), &[]) { - Ok(StoredValue::Account(account)) => account, - _ => panic!("Could not find account at: {:?}", ACCOUNT_1_ADDR), - }; + let account = builder + .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR) + .expect("must have default contract package"); *account .named_keys() .get(CREATE) .expect("Could not find contract pointer") }; + // Attempt to forge a new URef with escalated privileges let exec_request_4 = ExecuteRequestBuilder::standard( ACCOUNT_2_ADDR, CONTRACT_ESCALATE, runtime_args! { - "contract_hash" => contract.into_hash().expect("should be hash"), + "contract_hash" => contract.into_entity_hash_addr().expect("should be hash"), }, ) .build(); // Attempt to forge a new URef with escalated privileges - let response = builder + let _ = builder .exec(exec_request_4) - .get_exec_result(3) - .expect("should have a response") - .to_owned(); + .get_exec_result_owned(3) + .expect("should have a response"); - let error_message = utils::get_error_message(response); + let error_message = builder.get_error_message().unwrap(); - assert!(error_message.contains("ForgedReference"), error_message); + assert!( + error_message.contains("Forged reference"), + "{}", + error_message + ); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_584.rs b/execution_engine_testing/tests/src/test/regression/ee_584.rs index 277bf74482..f2d532e8d0 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_584.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_584.rs @@ -1,14 +1,13 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::shared::{stored_value::StoredValue, transform::Transform}; -use casper_types::RuntimeArgs; +use casper_types::{execution::TransformKindV2, RuntimeArgs, StoredValue}; const CONTRACT_EE_584_REGRESSION: &str = "ee_584_regression.wasm"; #[ignore] -#[test] +#[allow(unused)] +// #[test] fn should_run_ee_584_no_errored_session_transforms() { let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -17,24 +16,21 @@ fn should_run_ee_584_no_errored_session_transforms() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request); assert!(builder.is_error()); - let transforms = builder.get_transforms(); + let effects = &builder.get_effects()[0]; - assert!(transforms[0] - .iter() - .find( - |(_, t)| if let Transform::Write(StoredValue::CLValue(cl_value)) = t { - cl_value.to_owned().into_t::().unwrap_or_default() == "Hello, World!" - } else { - false - } - ) - .is_none()); + assert!(!effects.transforms().iter().any(|transform| { + if let TransformKindV2::Write(StoredValue::CLValue(cl_value)) = transform.kind() { + cl_value.to_owned().into_t::().unwrap_or_default() == "Hello, World!" + } else { + false + } + })); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_597.rs b/execution_engine_testing/tests/src/test/regression/ee_597.rs index 193ff3be34..0d59bfcd09 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_597.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_597.rs @@ -1,20 +1,19 @@ use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS}, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, MINIMUM_ACCOUNT_CREATION_BALANCE, }; -use casper_execution_engine::{core::engine_state::GenesisAccount, shared::motes::Motes}; use casper_types::{ - account::AccountHash, system::auction, ApiError, PublicKey, RuntimeArgs, SecretKey, + account::AccountHash, system::auction, ApiError, GenesisAccount, Motes, PublicKey, RuntimeArgs, + SecretKey, }; const CONTRACT_EE_597_REGRESSION: &str = "ee_597_regression.wasm"; static VALID_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALID_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALID_PUBLIC_KEY)); const VALID_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; @@ -24,11 +23,8 @@ const VALID_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; fn should_fail_when_bonding_amount_is_zero_ee_597_regression() { let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account = GenesisAccount::account( - VALID_PUBLIC_KEY.clone(), - Motes::new(VALID_BALANCE.into()), - None, - ); + let account = + GenesisAccount::account(VALID_PUBLIC_KEY.clone(), Motes::new(VALID_BALANCE), None); tmp.push(account); tmp }; @@ -42,19 +38,13 @@ fn should_fail_when_bonding_amount_is_zero_ee_597_regression() { ) .build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&run_genesis_request) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(run_genesis_request) .exec(exec_request) - .commit() - .finish(); + .commit(); - let response = result - .builder() - .get_exec_result(0) - .expect("should have a response") - .to_owned(); - - let error_message = utils::get_error_message(response); + let error_message = builder.get_error_message().expect("should have a result"); // Error::BondTooSmall => 5, assert!( @@ -62,6 +52,7 @@ fn should_fail_when_bonding_amount_is_zero_ee_597_regression() { "{:?}", ApiError::from(auction::Error::BondTooSmall) )), + "{}", error_message ); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_598.rs b/execution_engine_testing/tests/src/test/regression/ee_598.rs index 5d750a6f78..ae5501d3c3 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_598.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_598.rs @@ -2,20 +2,12 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, - }, + utils, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, }; -use casper_execution_engine::{ - core::engine_state::genesis::{GenesisAccount, GenesisValidator}, - shared::motes::Motes, -}; use casper_types::{ - account::AccountHash, - runtime_args, - system::auction::{self, DelegationRate}, - ApiError, PublicKey, RuntimeArgs, SecretKey, U512, + account::AccountHash, runtime_args, system::auction::DelegationRate, GenesisAccount, + GenesisValidator, Motes, PublicKey, SecretKey, U512, }; const ARG_AMOUNT: &str = "amount"; @@ -26,31 +18,31 @@ const ARG_ACCOUNT_HASH: &str = "account_hash"; const CONTRACT_AUCTION_BIDDING: &str = "auction_bidding.wasm"; static ACCOUNT_1_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const GENESIS_VALIDATOR_STAKE: u64 = 50_000; static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PK)); -static ACCOUNT_1_FUND: Lazy = Lazy::new(|| U512::from(1_500_000_000_000u64)); +static ACCOUNT_1_FUND: Lazy = Lazy::new(|| U512::from(10_000_000_000_000u64)); static ACCOUNT_1_BALANCE: Lazy = Lazy::new(|| *ACCOUNT_1_FUND + 100_000); static ACCOUNT_1_BOND: Lazy = Lazy::new(|| U512::from(25_000)); #[ignore] #[test] -fn should_fail_unbonding_more_than_it_was_staked_ee_598_regression() { - let public_key: PublicKey = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); +fn should_handle_unbond_for_more_than_stake_as_full_unbond_of_stake_ee_598_regression() { + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let public_key = PublicKey::from(&secret_key); let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account = GenesisAccount::account( public_key, - Motes::new(GENESIS_VALIDATOR_STAKE.into()) * Motes::new(2.into()), + Motes::new(GENESIS_VALIDATOR_STAKE) + .checked_mul(Motes::new(2)) + .unwrap(), Some(GenesisValidator::new( - Motes::new(GENESIS_VALIDATOR_STAKE.into()), + Motes::new(GENESIS_VALIDATOR_STAKE), DelegationRate::zero(), )), ); @@ -60,7 +52,7 @@ fn should_fail_unbonding_more_than_it_was_staked_ee_598_regression() { let run_genesis_request = utils::create_run_genesis_request(accounts); - let exec_request_1 = ExecuteRequestBuilder::standard( + let seed_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_AUCTION_BIDDING, runtime_args! { @@ -70,43 +62,28 @@ fn should_fail_unbonding_more_than_it_was_staked_ee_598_regression() { }, ) .build(); - let exec_request_2 = { - let deploy = DeployItemBuilder::new() - .with_address(*ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *ACCOUNT_1_FUND }) - .with_session_code( - "ee_598_regression.wasm", - runtime_args! { - ARG_AMOUNT => *ACCOUNT_1_BOND, - ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(), - }, - ) - .with_deploy_hash([2u8; 32]) - .with_authorization_keys(&[*ACCOUNT_1_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); - - builder.exec(exec_request_1).expect_success().commit(); + let deploy = DeployItemBuilder::new() + .with_address(*ACCOUNT_1_ADDR) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *ACCOUNT_1_FUND }) + .with_session_code( + "ee_598_regression.wasm", + runtime_args! { + ARG_AMOUNT => *ACCOUNT_1_BOND, + ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(), + }, + ) + .with_deploy_hash([2u8; 32]) + .with_authorization_keys(&[*ACCOUNT_1_ADDR]) + .build(); + let combined_bond_and_unbond_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); - let result = builder.exec(exec_request_2).commit().finish(); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); - let response = result - .builder() - .get_exec_result(1) - .expect("should have a response") - .to_owned(); - let error_message = utils::get_error_message(response); + builder.exec(seed_request).expect_success().commit(); - // Error::UnbondTooLarge, - assert!( - error_message.contains(&format!( - "{:?}", - ApiError::from(auction::Error::UnbondTooLarge) - )), - error_message - ); + builder + .exec(combined_bond_and_unbond_request) + .expect_success() + .commit(); } diff --git a/execution_engine_testing/tests/src/test/regression/ee_599.rs b/execution_engine_testing/tests/src/test/regression/ee_599.rs index c6816a54c5..82e468bbdb 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_599.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_599.rs @@ -1,24 +1,21 @@ use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; +use casper_types::{account::AccountHash, runtime_args, U512}; const CONTRACT_EE_599_REGRESSION: &str = "ee_599_regression.wasm"; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; const DONATION_PURSE_COPY_KEY: &str = "donation_purse_copy"; -const EXPECTED_ERROR: &str = "InvalidContext"; +const EXPECTED_ERROR: &str = "Forged reference"; const TRANSFER_FUNDS_KEY: &str = "transfer_funds"; const VICTIM_ADDR: AccountHash = AccountHash::new([42; 32]); static VICTIM_INITIAL_FUNDS: Lazy = Lazy::new(|| *DEFAULT_PAYMENT * 10); -fn setup() -> InMemoryWasmTestBuilder { +fn setup() -> LmdbWasmTestBuilder { // Creates victim account let exec_request_1 = { let args = runtime_args! { @@ -38,17 +35,16 @@ fn setup() -> InMemoryWasmTestBuilder { .build() }; - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut ctx = LmdbWasmTestBuilder::default(); + ctx.run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() .exec(exec_request_2) .expect_success() .commit() - .finish(); - - InMemoryWasmTestBuilder::from_result(result) + .clear_results(); + ctx } #[ignore] @@ -57,11 +53,11 @@ fn should_not_be_able_to_transfer_funds_with_transfer_purse_to_purse() { let mut builder = setup(); let victim_account = builder - .get_account(VICTIM_ADDR) + .get_entity_by_account_hash(VICTIM_ADDR) .expect("should have victim account"); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let transfer_funds = default_account .named_keys() @@ -79,7 +75,7 @@ fn should_not_be_able_to_transfer_funds_with_transfer_purse_to_purse() { let exec_request_3 = { let args = runtime_args! { "method" => "call", - "contract_key" => transfer_funds.into_hash().expect("should be hash"), + "contract_key" => transfer_funds.into_entity_hash_addr().expect("should be hash"), "sub_contract_method_fwd" => "transfer_from_purse_to_purse_ext", }; ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build() @@ -87,33 +83,25 @@ fn should_not_be_able_to_transfer_funds_with_transfer_purse_to_purse() { let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - let result_2 = builder.exec(exec_request_3).commit().finish(); + builder.exec(exec_request_3).commit(); let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let error_msg = result_2 - .builder() - .exec_error_message(0) - .expect("should have error"); + let error_msg = builder.get_error_message().expect("should have error"); assert!( error_msg.contains(EXPECTED_ERROR), "Got error: {}", error_msg ); - let victim_balance_after = result_2 - .builder() - .get_purse_balance(victim_account.main_purse()); + let victim_balance_after = builder.get_purse_balance(victim_account.main_purse()); assert_eq!( *VICTIM_INITIAL_FUNDS - transaction_fee, victim_balance_after ); - assert_eq!( - result_2.builder().get_purse_balance(donation_purse_copy), - U512::zero(), - ); + assert_eq!(builder.get_purse_balance(donation_purse_copy), U512::zero(),); } #[ignore] @@ -122,11 +110,11 @@ fn should_not_be_able_to_transfer_funds_with_transfer_from_purse_to_account() { let mut builder = setup(); let victim_account = builder - .get_account(VICTIM_ADDR) + .get_entity_by_account_hash(VICTIM_ADDR) .expect("should have victim account"); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let default_account_balance = builder.get_purse_balance(default_account.main_purse()); @@ -147,7 +135,7 @@ fn should_not_be_able_to_transfer_funds_with_transfer_from_purse_to_account() { let exec_request_3 = { let args = runtime_args! { "method" => "call".to_string(), - "contract_key" => transfer_funds.into_hash().expect("should get key"), + "contract_key" => transfer_funds.into_entity_hash_addr().expect("should get key"), "sub_contract_method_fwd" => "transfer_from_purse_to_account_ext", }; ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build() @@ -159,7 +147,7 @@ fn should_not_be_able_to_transfer_funds_with_transfer_from_purse_to_account() { let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let error_msg = builder.exec_error_message(0).expect("should have error"); + let error_msg = builder.get_error_message().expect("should have error"); assert!( error_msg.contains(EXPECTED_ERROR), "Got error: {}", @@ -190,11 +178,11 @@ fn should_not_be_able_to_transfer_funds_with_transfer_to_account() { let mut builder = setup(); let victim_account = builder - .get_account(VICTIM_ADDR) + .get_entity_by_account_hash(VICTIM_ADDR) .expect("should have victim account"); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let default_account_balance = builder.get_purse_balance(default_account.main_purse()); @@ -215,7 +203,7 @@ fn should_not_be_able_to_transfer_funds_with_transfer_to_account() { let exec_request_3 = { let args = runtime_args! { "method" => "call", - "contract_key" => transfer_funds.into_hash().expect("should be hash"), + "contract_key" => transfer_funds.into_entity_hash_addr().expect("should be hash"), "sub_contract_method_fwd" => "transfer_to_account_ext", }; ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build() @@ -223,23 +211,18 @@ fn should_not_be_able_to_transfer_funds_with_transfer_to_account() { let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - let result_2 = builder.exec(exec_request_3).commit().finish(); + builder.exec(exec_request_3).commit(); let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let error_msg = result_2 - .builder() - .exec_error_message(0) - .expect("should have error"); + let error_msg = builder.get_error_message().expect("should have error"); assert!( error_msg.contains(EXPECTED_ERROR), "Got error: {}", error_msg ); - let victim_balance_after = result_2 - .builder() - .get_purse_balance(victim_account.main_purse()); + let victim_balance_after = builder.get_purse_balance(victim_account.main_purse()); assert_eq!( *VICTIM_INITIAL_FUNDS - transaction_fee, @@ -247,15 +230,10 @@ fn should_not_be_able_to_transfer_funds_with_transfer_to_account() { ); // In this variant of test `donation_purse` is left unchanged i.e. zero balance - assert_eq!( - result_2.builder().get_purse_balance(donation_purse_copy), - U512::zero(), - ); + assert_eq!(builder.get_purse_balance(donation_purse_copy), U512::zero(),); // Verify that default account's balance didn't change - let updated_default_account_balance = result_2 - .builder() - .get_purse_balance(default_account.main_purse()); + let updated_default_account_balance = builder.get_purse_balance(default_account.main_purse()); assert_eq!( updated_default_account_balance - default_account_balance, @@ -265,15 +243,15 @@ fn should_not_be_able_to_transfer_funds_with_transfer_to_account() { #[ignore] #[test] -fn should_not_be_able_to_get_main_purse_in_invalid_context() { +fn should_not_be_able_to_get_main_purse_in_invalid_builder() { let mut builder = setup(); let victim_account = builder - .get_account(VICTIM_ADDR) + .get_entity_with_named_keys_by_account_hash(VICTIM_ADDR) .expect("should have victim account"); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let transfer_funds = default_account @@ -285,7 +263,7 @@ fn should_not_be_able_to_get_main_purse_in_invalid_context() { let exec_request_3 = { let args = runtime_args! { "method" => "call".to_string(), - "contract_key" => transfer_funds.into_hash().expect("should be hash"), + "contract_key" => transfer_funds.into_entity_hash_addr().expect("should be hash"), "sub_contract_method_fwd" => "transfer_to_account_ext", }; ExecuteRequestBuilder::standard(VICTIM_ADDR, CONTRACT_EE_599_REGRESSION, args).build() @@ -295,23 +273,18 @@ fn should_not_be_able_to_get_main_purse_in_invalid_context() { let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - let result_2 = builder.exec(exec_request_3).commit().finish(); + builder.exec(exec_request_3).commit(); let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let error_msg = result_2 - .builder() - .exec_error_message(0) - .expect("should have error"); + let error_msg = builder.get_error_message().expect("should have error"); assert!( error_msg.contains(EXPECTED_ERROR), "Got error: {}", error_msg ); - let victim_balance_after = result_2 - .builder() - .get_purse_balance(victim_account.main_purse()); + let victim_balance_after = builder.get_purse_balance(victim_account.main_purse()); assert_eq!( victim_balance_before - transaction_fee, diff --git a/execution_engine_testing/tests/src/test/regression/ee_601.rs b/execution_engine_testing/tests/src/test/regression/ee_601.rs index 17ab79712d..3bc23ec435 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_601.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_601.rs @@ -1,12 +1,8 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::shared::{stored_value::StoredValue, transform::Transform}; -use casper_types::{runtime_args, CLValue, Key, RuntimeArgs}; +use casper_types::{runtime_args, CLValue, EntityAddr, RuntimeArgs, StoredValue}; const ARG_AMOUNT: &str = "amount"; @@ -15,58 +11,45 @@ const ARG_AMOUNT: &str = "amount"; fn should_run_ee_601_pay_session_new_uref_collision() { let genesis_account_hash = *DEFAULT_ACCOUNT_ADDR; - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_deploy_hash([1; 32]) - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_payment_code( - "ee_601_regression.wasm", - runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }, - ) - .with_session_code("ee_601_regression.wasm", RuntimeArgs::default()) - .with_authorization_keys(&[genesis_account_hash]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_deploy_hash([1; 32]) + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_payment_code( + "ee_601_regression.wasm", + runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }, + ) + .with_session_code("ee_601_regression.wasm", RuntimeArgs::default()) + .with_authorization_keys(&[genesis_account_hash]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request); + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .expect_success() + .commit(); - let transforms = builder.get_transforms(); - let transform = &transforms[0]; + let hash = *DEFAULT_ACCOUNT_ADDR; + let named_keys = builder.get_named_keys(EntityAddr::Account(hash.value())); - let add_keys = if let Some(Transform::AddKeys(keys)) = - transform.get(&Key::Account(*DEFAULT_ACCOUNT_ADDR)) - { - keys - } else { - panic!( - "expected AddKeys transform for given key but received {:?}", - transforms[0] - ); - }; - - let pay_uref = add_keys + let payment_uref = *named_keys .get("new_uref_result-payment") .expect("payment uref should exist"); - let session_uref = add_keys + let session_uref = *named_keys .get("new_uref_result-session") .expect("session uref should exist"); assert_ne!( - pay_uref, session_uref, + payment_uref, session_uref, "payment and session code should not create same uref" ); - builder.commit(); - let payment_value: StoredValue = builder - .query(None, *pay_uref, &[]) + .query(None, payment_uref, &[]) .expect("should find payment value"); assert_eq!( @@ -76,7 +59,7 @@ fn should_run_ee_601_pay_session_new_uref_collision() { ); let session_value: StoredValue = builder - .query(None, *session_uref, &[]) + .query(None, session_uref, &[]) .expect("should find session value"); assert_eq!( diff --git a/execution_engine_testing/tests/src/test/regression/ee_771.rs b/execution_engine_testing/tests/src/test/regression/ee_771.rs index 8b4bfaa8e8..8786daaab1 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_771.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_771.rs @@ -1,6 +1,5 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, }; use casper_types::RuntimeArgs; @@ -16,19 +15,17 @@ fn should_run_ee_771_regression() { ) .build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) - .commit() - .finish(); + .commit(); - let response = result - .builder() - .get_exec_result(0) - .expect("should have a response") - .to_owned(); + let exec_result = builder + .get_exec_result_owned(0) + .expect("should have a response"); - let error = response[0].as_error().expect("should have error"); + let error = exec_result.error().expect("should have error"); assert_eq!( format!("{}", error), "Function not found: functiondoesnotexist" diff --git a/execution_engine_testing/tests/src/test/regression/ee_890.rs b/execution_engine_testing/tests/src/test/regression/ee_890.rs index 39599b4637..ce7be9a44e 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_890.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_890.rs @@ -1,13 +1,10 @@ -use parity_wasm::{self, builder}; +use casper_wasm::{self, builder}; use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, ARG_AMOUNT, - DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, ARG_AMOUNT, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_types::{contracts::DEFAULT_ENTRY_POINT_NAME, runtime_args, RuntimeArgs}; +use casper_types::{addressable_entity::DEFAULT_ENTRY_POINT_NAME, runtime_args, RuntimeArgs}; const DO_NOTHING_WASM: &str = "do_nothing.wasm"; @@ -35,7 +32,7 @@ fn make_do_nothing_with_start() -> Vec { .build() .build(); - parity_wasm::serialize(module).expect("should serialize") + casper_wasm::serialize(module).expect("should serialize") } #[ignore] @@ -43,24 +40,24 @@ fn make_do_nothing_with_start() -> Vec { fn should_run_ee_890_gracefully_reject_start_node_in_session() { let wasm_binary = make_do_nothing_with_start(); - let deploy_1 = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_session_bytes(wasm_binary, RuntimeArgs::new()) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([123; 32]) .build(); - let exec_request_1 = ExecuteRequestBuilder::new().push_deploy(deploy_1).build(); + let exec_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) - .commit() - .finish(); - let message = result.builder().exec_error_message(0).expect("should fail"); + .commit(); + let message = builder.get_error_message().expect("should fail"); assert!( - message.contains("UnsupportedWasmStart"), + message.contains("Unsupported Wasm start"), "Error message {:?} does not contain expected pattern", message ); @@ -71,7 +68,7 @@ fn should_run_ee_890_gracefully_reject_start_node_in_session() { fn should_run_ee_890_gracefully_reject_start_node_in_payment() { let wasm_binary = make_do_nothing_with_start(); - let deploy_1 = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_session_code(DO_NOTHING_WASM, RuntimeArgs::new()) .with_payment_bytes(wasm_binary, RuntimeArgs::new()) @@ -79,16 +76,16 @@ fn should_run_ee_890_gracefully_reject_start_node_in_payment() { .with_deploy_hash([123; 32]) .build(); - let exec_request_1 = ExecuteRequestBuilder::new().push_deploy(deploy_1).build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let result = InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .commit() - .finish(); - let message = result.builder().exec_error_message(0).expect("should fail"); + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .commit(); + let message = builder.get_error_message().expect("should fail"); assert!( - message.contains("UnsupportedWasmStart"), + message.contains("Unsupported Wasm start"), "Error message {:?} does not contain expected pattern", message ); diff --git a/execution_engine_testing/tests/src/test/regression/ee_966.rs b/execution_engine_testing/tests/src/test/regression/ee_966.rs index 9d6b8bd3c2..2b0656151e 100644 --- a/execution_engine_testing/tests/src/test/regression/ee_966.rs +++ b/execution_engine_testing/tests/src/test/regression/ee_966.rs @@ -1,29 +1,18 @@ use assert_matches::assert_matches; +use casper_wasm::builder; use once_cell::sync::Lazy; -use parity_wasm::builder; use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, UpgradeRequestBuilder, - ARG_AMOUNT, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_execution_engine::{ - core::{ - engine_state::{Error, ExecuteRequest}, - execution::Error as ExecError, - }, - shared::{ - host_function_costs::HostFunctionCosts, - opcode_costs::OpcodeCosts, - storage_costs::StorageCosts, - wasm_config::{WasmConfig, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}, - }, + DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder, + UpgradeRequestBuilder, ARG_AMOUNT, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, }; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; use casper_types::{ - contracts::DEFAULT_ENTRY_POINT_NAME, runtime_args, ApiError, EraId, ProtocolVersion, - RuntimeArgs, + addressable_entity::DEFAULT_ENTRY_POINT_NAME, runtime_args, ApiError, EraId, + HostFunctionCostsV1, HostFunctionCostsV2, MessageLimits, OpcodeCosts, ProtocolVersion, + RuntimeArgs, WasmConfig, WasmV1Config, WasmV2Config, DEFAULT_MAX_STACK_HEIGHT, + DEFAULT_WASM_MAX_MEMORY, }; const CONTRACT_EE_966_REGRESSION: &str = "ee_966_regression.wasm"; @@ -31,21 +20,24 @@ const MINIMUM_INITIAL_MEMORY: u32 = 16; const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(0); static DOUBLED_WASM_MEMORY_LIMIT: Lazy = Lazy::new(|| { - WasmConfig::new( + let wasm_v1_config = WasmV1Config::new( DEFAULT_WASM_MAX_MEMORY * 2, DEFAULT_MAX_STACK_HEIGHT, OpcodeCosts::default(), - StorageCosts::default(), - HostFunctionCosts::default(), - ) -}); -static NEW_PROTOCOL_VERSION: Lazy = Lazy::new(|| { - ProtocolVersion::from_parts( - DEFAULT_PROTOCOL_VERSION.value().major, - DEFAULT_PROTOCOL_VERSION.value().minor, - DEFAULT_PROTOCOL_VERSION.value().patch + 1, - ) + HostFunctionCostsV1::default(), + ); + let wasm_v2_config = WasmV2Config::new( + DEFAULT_WASM_MAX_MEMORY * 2, + OpcodeCosts::default(), + HostFunctionCostsV2::default(), + ); + WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config) }); +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + DEFAULT_PROTOCOL_VERSION.value().major, + DEFAULT_PROTOCOL_VERSION.value().minor, + DEFAULT_PROTOCOL_VERSION.value().patch + 1, +); fn make_session_code_with_memory_pages(initial_pages: u32, max_pages: Option) -> Vec { let module = builder::module() @@ -67,21 +59,21 @@ fn make_session_code_with_memory_pages(initial_pages: u32, max_pages: Option) -> ExecuteRequest { - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_session_bytes(session_code, RuntimeArgs::new()) - .with_empty_payment_bytes(runtime_args! { + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) .with_deploy_hash([42; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() + ExecuteRequestBuilder::from_deploy_item(&deploy_item).build() } #[ignore] @@ -92,9 +84,9 @@ fn should_run_ee_966_with_zero_min_and_zero_max_memory() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit().expect_success(); } @@ -106,16 +98,16 @@ fn should_run_ee_966_cant_have_too_much_initial_memory() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit(); - let exec_response = &builder - .get_exec_result(0) - .expect("should have exec response")[0]; - let error = exec_response.as_error().expect("should have error"); + let exec_result = &builder + .get_exec_result_owned(0) + .expect("should have exec response"); + let error = exec_result.error().expect("should have error"); assert_matches!(error, Error::Exec(ExecError::Interpreter(_))); } @@ -127,9 +119,9 @@ fn should_run_ee_966_should_request_exactly_maximum() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit().expect_success(); } @@ -141,9 +133,9 @@ fn should_run_ee_966_should_request_exactly_maximum_as_initial() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit().expect_success(); } @@ -158,16 +150,16 @@ fn should_run_ee_966_cant_have_too_much_max_memory() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit(); - let exec_response = &builder - .get_exec_result(0) - .expect("should have exec response")[0]; - let error = exec_response.as_error().expect("should have error"); + let exec_result = &builder + .get_exec_result_owned(0) + .expect("should have exec response"); + let error = exec_result.error().expect("should have error"); assert_matches!(error, Error::Exec(ExecError::Interpreter(_))); } @@ -181,16 +173,16 @@ fn should_run_ee_966_cant_have_way_too_much_max_memory() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit(); - let exec_response = &builder - .get_exec_result(0) - .expect("should have exec response")[0]; - let error = exec_response.as_error().expect("should have error"); + let exec_result = &builder + .get_exec_result_owned(0) + .expect("should have exec response"); + let error = exec_result.error().expect("should have error"); assert_matches!(error, Error::Exec(ExecError::Interpreter(_))); } @@ -202,16 +194,16 @@ fn should_run_ee_966_cant_have_larger_initial_than_max_memory() { let exec_request = make_request_with_session_bytes(session_code); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit(); - let exec_response = &builder - .get_exec_result(0) - .expect("should have exec response")[0]; - let error = exec_response.as_error().expect("should have error"); + let exec_result = &builder + .get_exec_result_owned(0) + .expect("should have exec response"); + let error = exec_result.error().expect("should have error"); assert_matches!(error, Error::Exec(ExecError::Interpreter(_))); } @@ -225,16 +217,16 @@ fn should_run_ee_966_regression_fail_when_growing_mem_past_max() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit(); - let results = &builder - .get_exec_result(0) - .expect("should have exec response")[0]; - let error = results.as_error().expect("should have error"); + let exec_result = &builder + .get_exec_result_owned(0) + .expect("should have exec response"); + let error = exec_result.error().expect("should have error"); assert_matches!(error, Error::Exec(ExecError::Revert(ApiError::OutOfMemory))); } @@ -248,9 +240,9 @@ fn should_run_ee_966_regression_when_growing_mem_after_upgrade() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).commit(); @@ -258,10 +250,10 @@ fn should_run_ee_966_regression_when_growing_mem_after_upgrade() { // This request should fail - as it's exceeding default memory limit // - let results = &builder - .get_exec_result(0) - .expect("should have exec response")[0]; - let error = results.as_error().expect("should have error"); + let exec_result = &builder + .get_exec_result_owned(0) + .expect("should have exec response"); + let error = exec_result.error().expect("should have error"); assert_matches!(error, Error::Exec(ExecError::Revert(ApiError::OutOfMemory))); // @@ -269,13 +261,19 @@ fn should_run_ee_966_regression_when_growing_mem_after_upgrade() { // let mut upgrade_request = UpgradeRequestBuilder::new() - .with_current_protocol_version(*DEFAULT_PROTOCOL_VERSION) - .with_new_protocol_version(*NEW_PROTOCOL_VERSION) + .with_current_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_wasm_config(*DOUBLED_WASM_MEMORY_LIMIT) .build(); - builder.upgrade_with_upgrade_request(&mut upgrade_request); + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(*DOUBLED_WASM_MEMORY_LIMIT); + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request); // // Now this request is working as the maximum memory limit is doubled. @@ -286,7 +284,6 @@ fn should_run_ee_966_regression_when_growing_mem_after_upgrade() { CONTRACT_EE_966_REGRESSION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request_2).commit().expect_success(); diff --git a/execution_engine_testing/tests/src/test/regression/gh_1470.rs b/execution_engine_testing/tests/src/test/regression/gh_1470.rs new file mode 100644 index 0000000000..fc85c9831c --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_1470.rs @@ -0,0 +1,926 @@ +use std::collections::BTreeMap; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{ + account::AccountHash, + runtime_args, + system::{auction, auction::DelegationRate}, + AccessRights, AddressableEntityHash, CLTyped, CLValue, Digest, EraId, HoldBalanceHandling, Key, + PackageHash, ProtocolVersion, RuntimeArgs, StoredValue, StoredValueTypeMismatch, + SystemHashRegistry, Timestamp, URef, U512, +}; + +use crate::lmdb_fixture; + +const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); +const GH_1470_REGRESSION: &str = "gh_1470_regression.wasm"; +const GH_1470_REGRESSION_CALL: &str = "gh_1470_regression_call.wasm"; +const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); + +const CONTRACT_ADD_BID: &str = "add_bid.wasm"; +const BOND_AMOUNT: u64 = 42; +const BID_DELEGATION_RATE: DelegationRate = auction::DELEGATION_RATE_DENOMINATOR; + +const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; +const ARG_AMOUNT: &str = "amount"; +const ARG_TARGET: &str = "target"; + +const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V1_0_0; + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer = TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR) + .with_transfer_id(42) + .build(); + + builder.transfer_and_commit(transfer).expect_success(); + + let sem_ver = PROTOCOL_VERSION.value(); + let new_protocol_version = + ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); + + let updated_chainspec = builder + .chainspec() + .clone() + .with_strict_argument_checking(true); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(PROTOCOL_VERSION) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .build() + }; + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + builder +} + +fn apply_global_state_update( + builder: &LmdbWasmTestBuilder, + post_state_hash: Digest, +) -> BTreeMap { + let key = URef::new([0u8; 32], AccessRights::all()).into(); + + let system_contract_hashes = builder + .query(Some(post_state_hash), key, &Vec::new()) + .expect("Must have stored system contract hashes") + .as_cl_value() + .expect("must be CLValue") + .clone() + .into_t::() + .expect("must convert to btree map"); + + let mut global_state_update = BTreeMap::::new(); + let registry = CLValue::from_t(system_contract_hashes) + .expect("must convert to StoredValue") + .into(); + + global_state_update.insert(Key::SystemEntityRegistry, registry); + + global_state_update +} + +#[ignore] +#[test] +fn gh_1470_call_contract_should_verify_group_access() { + let mut builder = setup(); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_1470_REGRESSION, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); + + let entity_hash_key = account + .named_keys() + .get(gh_1470_regression::CONTRACT_HASH_NAME) + .cloned() + .unwrap(); + let entity_hash = entity_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let package_hash_key = account + .named_keys() + .get(gh_1470_regression::PACKAGE_HASH_NAME) + .cloned() + .unwrap(); + let package_hash = package_hash_key + .into_package_addr() + .map(PackageHash::new) + .unwrap(); + + let call_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_DO_NOTHING, + gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash, + }; + ExecuteRequestBuilder::standard(ACCOUNT_1_ADDR, GH_1470_REGRESSION_CALL, args).build() + }; + + builder.exec(call_contract_request).commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("should have last response"); + let call_contract_error = exec_result.error().cloned().expect("should have error"); + + let call_versioned_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING, + gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash, + }; + ExecuteRequestBuilder::standard(ACCOUNT_1_ADDR, GH_1470_REGRESSION_CALL, args).build() + }; + + builder.exec(call_versioned_contract_request).commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("should have last response"); + let call_versioned_contract_error = exec_result.error().expect("should have error"); + + match (&call_contract_error, &call_versioned_contract_error) { + (Error::Exec(ExecError::InvalidContext), Error::Exec(ExecError::InvalidContext)) => (), + _ => panic!("Both variants should raise same error."), + } + + assert!(matches!( + call_versioned_contract_error, + Error::Exec(ExecError::InvalidContext) + )); + assert!(matches!( + call_contract_error, + Error::Exec(ExecError::InvalidContext) + )); +} + +// #[ignore] +// #[test] +// fn gh_1470_call_contract_should_verify_invalid_arguments_length() { +// let mut builder = setup(); + +// let exec_request_1 = ExecuteRequestBuilder::standard( +// *DEFAULT_ACCOUNT_ADDR, +// GH_1470_REGRESSION, +// RuntimeArgs::new(), +// ) +// .build(); + +// builder.exec(exec_request_1).expect_success().commit(); + +// let account_stored_value = builder +// .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) +// .unwrap(); +// let account = account_stored_value.as_account().cloned().unwrap(); + +// let contract_hash_key = account +// .named_keys() +// .get(gh_1470_regression::CONTRACT_HASH_NAME) +// .cloned() +// .unwrap(); +// let contract_hash = contract_hash_key +// .into_hash() +// .map(ContractHash::new) +// .unwrap(); +// let contract_package_hash_key = account +// .named_keys() +// .get(gh_1470_regression::CONTRACT_PACKAGE_HASH_NAME) +// .cloned() +// .unwrap(); +// let contract_package_hash = contract_package_hash_key +// .into_hash() +// .map(ContractPackageHash::new) +// .unwrap(); + +// let call_contract_request = { +// let args = runtime_args! { +// gh_1470_regression_call::ARG_TEST_METHOD => +// gh_1470_regression_call::METHOD_CALL_DO_NOTHING_NO_ARGS, +// gh_1470_regression_call::ARG_CONTRACT_HASH => contract_hash, }; +// ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) +// .build() +// }; + +// builder.exec(call_contract_request).commit(); + +// let response = builder +// .get_last_exec_result() +// .expect("should have last response"); +// assert_eq!(response.len(), 1); +// let exec_response = response.last().expect("should have response"); +// let call_contract_error = exec_response +// .as_error() +// .cloned() +// .expect("should have error"); + +// let call_versioned_contract_request = { +// let args = runtime_args! { +// gh_1470_regression_call::ARG_TEST_METHOD => +// gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_NO_ARGS, +// gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => contract_package_hash, }; +// ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) +// .build() +// }; + +// builder.exec(call_versioned_contract_request).commit(); + +// let response = builder +// .get_last_exec_result() +// .expect("should have last response"); +// assert_eq!(response.len(), 1); +// let exec_response = response.last().expect("should have response"); +// let call_versioned_contract_error = exec_response.as_error().expect("should have error"); + +// match (&call_contract_error, &call_versioned_contract_error) { +// ( +// Error::Exec(ExecError::MissingArgument { name: lhs_name }), +// Error::Exec(ExecError::MissingArgument { name: rhs_name }), +// ) if lhs_name == rhs_name => (), +// _ => panic!( +// "Both variants should raise same error: lhs={:?} rhs={:?}", +// call_contract_error, call_versioned_contract_error +// ), +// } + +// assert!( +// matches!( +// &call_versioned_contract_error, +// Error::Exec(ExecError::MissingArgument { +// name, +// }) +// if name == gh_1470_regression::ARG1 +// ), +// "{:?}", +// call_versioned_contract_error +// ); +// assert!( +// matches!( +// &call_contract_error, +// Error::Exec(ExecError::MissingArgument { +// name, +// }) +// if name == gh_1470_regression::ARG1 +// ), +// "{:?}", +// call_contract_error +// ); +// } + +#[ignore] +#[test] +fn gh_1470_call_contract_should_ignore_optional_args() { + let mut builder = setup(); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_1470_REGRESSION, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); + + let contract_hash_key = account + .named_keys() + .get(gh_1470_regression::CONTRACT_HASH_NAME) + .cloned() + .unwrap(); + let entity_hash = contract_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let package_hash_key = account + .named_keys() + .get(gh_1470_regression::PACKAGE_HASH_NAME) + .cloned() + .unwrap(); + let package_hash = package_hash_key + .into_package_addr() + .map(PackageHash::new) + .unwrap(); + + let call_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_DO_NOTHING_NO_OPTIONALS, + gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder + .exec(call_contract_request) + .expect_success() + .commit(); + + let call_versioned_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_NO_OPTIONALS, + gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder + .exec(call_versioned_contract_request) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn gh_1470_call_contract_should_not_accept_extra_args() { + let mut builder = setup(); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_1470_REGRESSION, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); + + let contract_hash_key = account + .named_keys() + .get(gh_1470_regression::CONTRACT_HASH_NAME) + .cloned() + .unwrap(); + let entity_hash = contract_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let package_hash_key = account + .named_keys() + .get(gh_1470_regression::PACKAGE_HASH_NAME) + .cloned() + .unwrap(); + let package_hash = package_hash_key + .into_package_addr() + .map(PackageHash::new) + .unwrap(); + + let call_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_DO_NOTHING_EXTRA, + gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder + .exec(call_contract_request) + .expect_success() + .commit(); + + let call_versioned_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_EXTRA, + gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder + .exec(call_versioned_contract_request) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn gh_1470_call_contract_should_verify_wrong_argument_types() { + let mut builder = setup(); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_1470_REGRESSION, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have contract"); + + let entity_hash_key = account + .named_keys() + .get(gh_1470_regression::CONTRACT_HASH_NAME) + .cloned() + .unwrap(); + let entity_hash = entity_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let package_hash_key = account + .named_keys() + .get(gh_1470_regression::PACKAGE_HASH_NAME) + .cloned() + .unwrap(); + let package_hash = package_hash_key + .into_package_addr() + .map(PackageHash::new) + .unwrap(); + + let call_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => + gh_1470_regression_call::METHOD_CALL_DO_NOTHING_TYPE_MISMATCH, + gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash, }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder.exec(call_contract_request).commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("should have last response"); + let call_contract_error = exec_result.error().cloned().expect("should have error"); + + let call_versioned_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => + gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_TYPE_MISMATCH, + gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash, }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder.exec(call_versioned_contract_request).commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("should have last response"); + let call_versioned_contract_error = exec_result.error().expect("should have error"); + + let expected = gh_1470_regression::Arg1Type::cl_type(); + let found = gh_1470_regression::Arg3Type::cl_type(); + + let expected_type_mismatch = + StoredValueTypeMismatch::new(format!("{:?}", expected), format!("{:?}", found)); + + match (&call_contract_error, &call_versioned_contract_error) { + ( + Error::Exec(ExecError::TypeMismatch(lhs_type_mismatch)), + Error::Exec(ExecError::TypeMismatch(rhs_type_mismatch)), + ) if lhs_type_mismatch == &expected_type_mismatch + && rhs_type_mismatch == &expected_type_mismatch => {} + _ => panic!( + "Both variants should raise same error: lhs={:?} rhs={:?}", + call_contract_error, call_versioned_contract_error + ), + } + + assert!(matches!( + call_versioned_contract_error, + Error::Exec(ExecError::TypeMismatch(type_mismatch)) + if *type_mismatch == expected_type_mismatch + )); + assert!(matches!( + call_contract_error, + Error::Exec(ExecError::TypeMismatch(type_mismatch)) + if type_mismatch == expected_type_mismatch + )); +} + +#[ignore] +#[test] +fn gh_1470_call_contract_should_verify_wrong_optional_argument_types() { + let mut builder = setup(); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_1470_REGRESSION, + RuntimeArgs::new(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default contract package"); + + let entity_hash_key = account + .named_keys() + .get(gh_1470_regression::CONTRACT_HASH_NAME) + .cloned() + .unwrap(); + let entity_hash = entity_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let package_hash_key = account + .named_keys() + .get(gh_1470_regression::PACKAGE_HASH_NAME) + .cloned() + .unwrap(); + let package_hash = package_hash_key + .into_package_addr() + .map(PackageHash::new) + .unwrap(); + + let call_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => + gh_1470_regression_call::METHOD_CALL_DO_NOTHING_OPTIONAL_TYPE_MISMATCH, + gh_1470_regression_call::ARG_CONTRACT_HASH => entity_hash, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder + .exec(call_contract_request) + .expect_failure() + .commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("should have last response"); + let call_contract_error = exec_result.error().cloned().expect("should have error"); + + let call_versioned_contract_request = { + let args = runtime_args! { + gh_1470_regression_call::ARG_TEST_METHOD => gh_1470_regression_call::METHOD_CALL_VERSIONED_DO_NOTHING_OPTIONAL_TYPE_MISMATCH, + gh_1470_regression_call::ARG_CONTRACT_PACKAGE_HASH => package_hash, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, GH_1470_REGRESSION_CALL, args) + .build() + }; + + builder.exec(call_versioned_contract_request).commit(); + + let exec_result = builder + .get_last_exec_result() + .expect("should have last response"); + let call_versioned_contract_error = exec_result.error().expect("should have error"); + + let expected = gh_1470_regression::Arg3Type::cl_type(); + let found = gh_1470_regression::Arg4Type::cl_type(); + + let expected_type_mismatch = + StoredValueTypeMismatch::new(format!("{:?}", expected), format!("{:?}", found)); + + match (&call_contract_error, &call_versioned_contract_error) { + ( + Error::Exec(ExecError::TypeMismatch(lhs_type_mismatch)), + Error::Exec(ExecError::TypeMismatch(rhs_type_mismatch)), + ) if lhs_type_mismatch == &expected_type_mismatch + && rhs_type_mismatch == &expected_type_mismatch => {} + _ => panic!( + "Both variants should raise same error: lhs={:?} rhs={:?}", + call_contract_error, call_versioned_contract_error + ), + } + + assert!(matches!( + call_versioned_contract_error, + Error::Exec(ExecError::TypeMismatch(type_mismatch)) + if *type_mismatch == expected_type_mismatch + )); + assert!(matches!( + call_contract_error, + Error::Exec(ExecError::TypeMismatch(type_mismatch)) + if type_mismatch == expected_type_mismatch + )); +} + +#[ignore] +#[test] +fn should_transfer_after_major_version_bump_from_1_2_0() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(previous_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .build() + }; + + builder + .with_block_time(Timestamp::now().into()) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let transfer = TransferRequestBuilder::new(1, AccountHash::new([3; 32])) + .with_transfer_id(1) + .build(); + + builder.transfer_and_commit(transfer).expect_success(); +} + +#[ignore] +#[test] +fn should_transfer_after_minor_version_bump_from_1_2_0() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = ProtocolVersion::from_parts( + current_protocol_version.value().major, + current_protocol_version.value().minor + 1, + 0, + ); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .build() + }; + + builder + .with_block_time(Timestamp::now().into()) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let transfer = TransferRequestBuilder::new(1, AccountHash::new([3; 32])) + .with_transfer_id(1) + .build(); + builder.transfer_and_commit(transfer).expect_success(); +} + +#[ignore] +#[test] +fn should_add_bid_after_major_bump() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .build() + }; + + builder + .with_block_time(Timestamp::now().into()) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let add_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::from(BOND_AMOUNT), + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let _default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); +} + +#[ignore] +#[test] +fn should_add_bid_after_minor_bump() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = ProtocolVersion::from_parts( + current_protocol_version.value().major, + current_protocol_version.value().minor + 1, + 0, + ); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .build() + }; + + builder + .with_block_time(Timestamp::now().into()) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let add_bid_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::from(BOND_AMOUNT), + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let _default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); +} + +#[ignore] +#[test] +fn should_wasm_transfer_after_major_bump() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .build() + }; + + builder + .with_block_time(Timestamp::now().into()) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let wasm_transfer = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_AMOUNT => U512::one(), + ARG_TARGET => AccountHash::new([1; 32]), + }, + ) + .build(); + + builder.exec(wasm_transfer).expect_success().commit(); + + let _default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); +} + +#[ignore] +#[test] +fn should_wasm_transfer_after_minor_bump() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = ProtocolVersion::from_parts( + current_protocol_version.value().major, + current_protocol_version.value().minor + 1, + 0, + ); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .build() + }; + + builder + .with_block_time(Timestamp::now().into()) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let wasm_transfer = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_AMOUNT => U512::one(), + ARG_TARGET => AccountHash::new([1; 32]), + }, + ) + .build(); + + builder.exec(wasm_transfer).expect_success().commit(); + + let _default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); +} + +#[ignore] +#[test] +fn should_upgrade_from_1_3_1_rel_fixture() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = ProtocolVersion::from_parts( + previous_protocol_version.value().major, + previous_protocol_version.value().minor + 1, + 0, + ); + + let global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(previous_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .build() + }; + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_1688.rs b/execution_engine_testing/tests/src/test/regression/gh_1688.rs new file mode 100644 index 0000000000..a96fe129da --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_1688.rs @@ -0,0 +1,156 @@ +use casper_engine_test_support::{ + deploy_item::DeployItem, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + runtime_args, system::standard_payment::ARG_AMOUNT, AddressableEntityHash, PackageHash, + RuntimeArgs, +}; + +const GH_1688_REGRESSION: &str = "gh_1688_regression.wasm"; + +const METHOD_PUT_KEY: &str = "put_key"; +const NEW_KEY_NAME: &str = "Hello"; +const PACKAGE_KEY: &str = "contract_package"; +const CONTRACT_HASH_KEY: &str = "contract_hash"; + +fn setup() -> (LmdbWasmTestBuilder, PackageHash, AddressableEntityHash) { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let install_contract_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_1688_REGRESSION, + runtime_args! {}, + ) + .build(); + + builder + .exec(install_contract_request_1) + .expect_success() + .commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let package_hash_key = account + .named_keys() + .get(PACKAGE_KEY) + .expect("should have package hash"); + + let entity_hash_key = account + .named_keys() + .get(CONTRACT_HASH_KEY) + .expect("should have hash"); + + let contract_package_hash = package_hash_key + .into_hash_addr() + .map(PackageHash::new) + .expect("should be hash"); + + let entity_hash = entity_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .expect("should be hash"); + + (builder, contract_package_hash, entity_hash) +} + +fn test(deploy_item_builder: impl FnOnce(PackageHash, AddressableEntityHash) -> DeployItem) { + let (mut builder, contract_package_hash, contract_hash) = setup(); + + let deploy_item = deploy_item_builder(contract_package_hash, contract_hash); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(exec_request).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .expect("should have contract"); + + assert!( + contract.named_keys().contains(NEW_KEY_NAME), + "expected {} in {:?}", + NEW_KEY_NAME, + contract.named_keys() + ); + assert!( + !account.named_keys().contains(NEW_KEY_NAME), + "unexpected {} in {:?}", + NEW_KEY_NAME, + contract.named_keys() + ); +} + +#[ignore] +#[test] +fn should_run_gh_1688_regression_stored_versioned_contract_by_hash() { + test(|contract_package_hash, _contract_hash| { + DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_hash( + contract_package_hash.value(), + None, + METHOD_PUT_KEY, + RuntimeArgs::default(), + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build() + }); +} + +#[ignore] +#[test] +fn should_run_gh_1688_regression_stored_versioned_contract_by_name() { + test(|_contract_package_hash, _contract_hash| { + DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_versioned_contract_by_name( + PACKAGE_KEY, + None, + METHOD_PUT_KEY, + RuntimeArgs::default(), + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build() + }); +} + +#[ignore] +#[test] +fn should_run_gh_1688_regression_stored_contract_by_hash() { + test(|_contract_package_hash, contract_hash| { + DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_session_hash(contract_hash, METHOD_PUT_KEY, RuntimeArgs::default()) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build() + }); +} + +#[ignore] +#[test] +fn should_run_gh_1688_regression_stored_contract_by_name() { + test(|_contract_package_hash, _contract_hash| { + DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_stored_session_named_key( + CONTRACT_HASH_KEY, + METHOD_PUT_KEY, + RuntimeArgs::default(), + ) + .with_standard_payment(runtime_args! { ARG_AMOUNT => *DEFAULT_PAYMENT, }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build() + }); +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_1902.rs b/execution_engine_testing/tests/src/test/regression/gh_1902.rs new file mode 100644 index 0000000000..a443fad01a --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_1902.rs @@ -0,0 +1,188 @@ +use num_rational::Ratio; +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder, + TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT; +use casper_types::{ + account::AccountHash, + runtime_args, + system::{ + auction::{self, DelegationRate}, + standard_payment, + }, + FeeHandling, Gas, PublicKey, RefundHandling, SecretKey, U512, +}; + +const BOND_AMOUNT: u64 = 42; +const DELEGATE_AMOUNT: u64 = 100 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; +const DELEGATION_RATE: DelegationRate = 10; + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([99; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PUBLIC_KEY)); + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + + let chainspec = builder + .chainspec() + .clone() + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(1, 1), + }) + .with_fee_handling(FeeHandling::PayToProposer); + builder.with_chainspec(chainspec); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR).build(); + builder + .transfer_and_commit(transfer_request) + .expect_success(); + builder +} + +fn exec_and_assert_costs( + builder: &mut LmdbWasmTestBuilder, + exec_request: ExecuteRequest, + expected_gas_cost: Gas, +) { + builder.exec(exec_request).expect_success().commit(); + assert_eq!(builder.last_exec_gas_consumed(), expected_gas_cost); +} + +#[ignore] +#[test] +fn should_not_charge_for_create_purse_in_first_time_bond() { + let mut builder = setup(); + + let bond_amount = U512::from(BOND_AMOUNT); + // This amount should be enough to make first time add_bid call. + let add_bid_cost = builder.get_auction_costs().add_bid; + + let pay_cost = builder + .chainspec() + .system_costs_config + .standard_payment_costs() + .pay; + + let add_bid_payment_amount = U512::from(add_bid_cost + pay_cost as u64) * 2; + + let sender = *DEFAULT_ACCOUNT_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_ADD_BID; + let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => add_bid_payment_amount, }; + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => bond_amount, + auction::ARG_DELEGATION_RATE => DELEGATION_RATE, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash([43; 32]) + .build(); + + let add_bid_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + exec_and_assert_costs(&mut builder, add_bid_request, Gas::from(add_bid_cost)); + + let delegate_cost = builder.get_auction_costs().delegate; + let delegate_payment_amount = U512::from(delegate_cost); + let delegate_amount = U512::from(DELEGATE_AMOUNT); + + let sender = *ACCOUNT_1_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_DELEGATE; + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => delegate_payment_amount, + }; + let session_args = runtime_args! { + auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => delegate_amount, + }; + let deploy_hash = [55; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + + let delegate_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + exec_and_assert_costs(&mut builder, delegate_request, Gas::from(delegate_cost)); + + let undelegate_cost = builder.get_auction_costs().undelegate; + let undelegate_payment_amount = U512::from(undelegate_cost); + let undelegate_amount = delegate_amount; + + let sender = *ACCOUNT_1_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_UNDELEGATE; + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => undelegate_payment_amount, + }; + let session_args = runtime_args! { + auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => undelegate_amount, + }; + let deploy_hash = [56; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash(deploy_hash) + .build(); + + let undelegate_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + exec_and_assert_costs(&mut builder, undelegate_request, Gas::from(undelegate_cost)); + + let unbond_amount = bond_amount; + // This amount should be enough to make first time add_bid call. + let withdraw_bid_cost = builder.get_auction_costs().withdraw_bid; + let withdraw_bid_payment_amount = U512::from(withdraw_bid_cost); + + let sender = *DEFAULT_ACCOUNT_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_WITHDRAW_BID; + let payment_args = + runtime_args! { standard_payment::ARG_AMOUNT => withdraw_bid_payment_amount, }; + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => unbond_amount, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash([58; 32]) + .build(); + + let withdraw_bid_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + exec_and_assert_costs( + &mut builder, + withdraw_bid_request, + Gas::from(withdraw_bid_cost), + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_1931.rs b/execution_engine_testing/tests/src/test/regression/gh_1931.rs new file mode 100644 index 0000000000..ae2f2b5942 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_1931.rs @@ -0,0 +1,35 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{RuntimeArgs, StoredValue}; + +const CONTRACT_NAME: &str = "do_nothing_stored.wasm"; +const CONTRACT_PACKAGE_NAMED_KEY: &str = "do_nothing_package_hash"; + +#[ignore] +#[test] +fn should_query_contract_package() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit(); + + let install_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_NAME, RuntimeArgs::new()) + .build(); + + builder.exec(install_request).expect_success().commit(); + + let contract_package_hash = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap() + .named_keys() + .clone() + .get(CONTRACT_PACKAGE_NAMED_KEY) + .expect("failed to get contract package named key.") + .to_owned(); + + let contract_package = builder + .query(None, contract_package_hash, &[]) + .expect("failed to find contract package"); + + assert!(matches!(contract_package, StoredValue::ContractPackage(_))); +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_2280.rs b/execution_engine_testing/tests/src/test/regression/gh_2280.rs new file mode 100644 index 0000000000..083193613f --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_2280.rs @@ -0,0 +1,689 @@ +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_types::{ + account::AccountHash, runtime_args, system::mint, AddressableEntityHash, EraId, Gas, + HostFunction, HostFunctionCost, HostFunctionCostsV1, Key, MintCosts, Motes, + ProtocolUpgradeConfig, ProtocolVersion, PublicKey, SecretKey, WasmConfig, WasmV1Config, + WasmV2Config, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY, U512, +}; + +const TRANSFER_TO_ACCOUNT_CONTRACT: &str = "transfer_to_account.wasm"; +const TRANSFER_PURSE_TO_ACCOUNT_CONTRACT: &str = "transfer_purse_to_account.wasm"; +const GH_2280_REGRESSION_CONTRACT: &str = "gh_2280_regression.wasm"; +const GH_2280_REGRESSION_CALL_CONTRACT: &str = "gh_2280_regression_call.wasm"; +const CREATE_PURSE_01_CONTRACT: &str = "create_purse_01.wasm"; +const FAUCET_NAME: &str = "faucet"; + +static ACCOUNT_1_PK: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| ACCOUNT_1_PK.to_account_hash()); + +static ACCOUNT_2_PK: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| ACCOUNT_2_PK.to_account_hash()); + +static ACCOUNT_3_PK: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static ACCOUNT_3_ADDR: Lazy = Lazy::new(|| ACCOUNT_3_PK.to_account_hash()); + +const ARG_TARGET: &str = "target"; +const ARG_AMOUNT: &str = "amount"; + +const TOKEN_AMOUNT: u64 = 1_000_000; + +const ARG_PURSE_NAME: &str = "purse_name"; +const TEST_PURSE_NAME: &str = "test"; + +const OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION; +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + OLD_PROTOCOL_VERSION.value().major, + OLD_PROTOCOL_VERSION.value().minor, + OLD_PROTOCOL_VERSION.value().patch + 1, +); +const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); + +const HOST_FUNCTION_COST_CHANGE: HostFunctionCost = 13_730_593; // random prime number + +const ARG_FAUCET_FUNDS: &str = "faucet_initial_balance"; +const HASH_KEY_NAME: &str = "gh_2280_hash"; +const ARG_CONTRACT_HASH: &str = "contract_hash"; + +#[ignore] +#[test] +fn gh_2280_transfer_should_always_cost_the_same_gas() { + let session_file = TRANSFER_TO_ACCOUNT_CONTRACT; + let account_hash = *DEFAULT_ACCOUNT_ADDR; + + let (mut builder, _) = setup(); + + let faucet_args_1 = runtime_args! { + ARG_TARGET => *ACCOUNT_1_ADDR, + ARG_AMOUNT => TOKEN_AMOUNT, + }; + + let fund_request_1 = + ExecuteRequestBuilder::standard(account_hash, session_file, faucet_args_1).build(); + builder.exec(fund_request_1).expect_success().commit(); + + let gas_cost_1 = builder.last_exec_gas_consumed(); + + // Next time pay exactly the amount that was reported which should be also the minimum you + // should be able to pay next time. + let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap(); + + let deploy_hash: [u8; 32] = [55; 32]; + let faucet_args_2 = runtime_args! { + ARG_TARGET => *ACCOUNT_2_ADDR, + ARG_AMOUNT => TOKEN_AMOUNT, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, faucet_args_2) + // + default_create_purse_cost + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(fund_request_2).expect_success().commit(); + + let gas_cost_2 = builder.last_exec_gas_consumed(); + + assert_eq!(gas_cost_1, gas_cost_2); + + // Increase "transfer_to_account" host function call exactly by X, so we can assert that + // transfer cost increased by exactly X without hidden fees. + let default_host_function_costs = HostFunctionCostsV1::default(); + + let default_transfer_to_account_cost = default_host_function_costs.transfer_to_account.cost(); + let new_transfer_to_account_cost = default_transfer_to_account_cost + .checked_add(HOST_FUNCTION_COST_CHANGE) + .expect("should add without overflow"); + let new_transfer_to_account = HostFunction::fixed(new_transfer_to_account_cost); + + let new_host_function_costs = HostFunctionCostsV1 { + transfer_to_account: new_transfer_to_account, + ..default_host_function_costs + }; + + let new_wasm_config = + make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config); + + // Inflate affected system contract entry point cost to the maximum + let new_mint_create_cost = u32::MAX; + let new_mint_costs = MintCosts { + create: new_mint_create_cost, + ..Default::default() + }; + + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(new_wasm_config) + .with_mint_costs(new_mint_costs); + + builder.with_chainspec(updated_chainspec); + + let mut upgrade_request = make_upgrade_request(); + builder.upgrade(&mut upgrade_request); + + let deploy_hash: [u8; 32] = [77; 32]; + let faucet_args_3 = runtime_args! { + ARG_TARGET => *ACCOUNT_3_ADDR, + ARG_AMOUNT => TOKEN_AMOUNT, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, faucet_args_3) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(fund_request_3).expect_success().commit(); + + let gas_cost_3 = builder.last_exec_gas_consumed(); + + assert!(gas_cost_3 > gas_cost_1); + assert!(gas_cost_3 > gas_cost_2); +} + +#[ignore] +#[test] +fn gh_2280_create_purse_should_always_cost_the_same_gas() { + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let session_file = CREATE_PURSE_01_CONTRACT; + + let (mut builder, _) = setup(); + + let create_purse_args_1 = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE_NAME + }; + + let fund_request_1 = + ExecuteRequestBuilder::standard(account_hash, session_file, create_purse_args_1).build(); + builder.exec(fund_request_1).expect_success().commit(); + + let gas_cost_1 = builder.last_exec_gas_consumed(); + + // Next time pay exactly the amount that was reported which should be also the minimum you + // should be able to pay next time. + let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap(); + + let deploy_hash: [u8; 32] = [55; 32]; + let create_purse_args_2 = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE_NAME, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, create_purse_args_2) + // + default_create_purse_cost + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(fund_request_2).expect_success().commit(); + + let gas_cost_2 = builder.last_exec_gas_consumed(); + + assert_eq!(gas_cost_1, gas_cost_2); + + let mut upgrade_request = make_upgrade_request(); + + // Increase "transfer_to_account" host function call exactly by X, so we can assert that + // transfer cost increased by exactly X without hidden fees. + let host_function_costs = builder + .chainspec() + .wasm_config + .v1() + .take_host_function_costs(); + + let default_create_purse_cost = host_function_costs.create_purse.cost(); + let new_create_purse_cost = default_create_purse_cost + .checked_add(HOST_FUNCTION_COST_CHANGE) + .expect("should add without overflow"); + let new_create_purse = HostFunction::fixed(new_create_purse_cost); + + let new_host_function_costs = HostFunctionCostsV1 { + create_purse: new_create_purse, + ..host_function_costs + }; + + let new_wasm_config = + make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config); + + // Inflate affected system contract entry point cost to the maximum + let new_mint_create_cost = u32::MAX; + let new_mint_costs = MintCosts { + create: new_mint_create_cost, + ..Default::default() + }; + + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(new_wasm_config) + .with_mint_costs(new_mint_costs); + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let deploy_hash: [u8; 32] = [77; 32]; + let create_purse_args_3 = runtime_args! { + ARG_PURSE_NAME => TEST_PURSE_NAME, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, create_purse_args_3) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(fund_request_3).expect_success().commit(); + + let gas_cost_3 = builder.last_exec_gas_consumed(); + + assert!(gas_cost_3 > gas_cost_1); + assert!(gas_cost_3 > gas_cost_2); + + let gas_cost_diff = gas_cost_3.checked_sub(gas_cost_2).unwrap_or_default(); + assert_eq!( + gas_cost_diff, + Gas::new(U512::from(HOST_FUNCTION_COST_CHANGE)) + ); +} + +#[ignore] +#[test] +fn gh_2280_transfer_purse_to_account_should_always_cost_the_same_gas() { + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let session_file = TRANSFER_PURSE_TO_ACCOUNT_CONTRACT; + + let (mut builder, _) = setup(); + + let faucet_args_1 = runtime_args! { + ARG_TARGET => *ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TOKEN_AMOUNT), + }; + + let fund_request_1 = + ExecuteRequestBuilder::standard(account_hash, session_file, faucet_args_1).build(); + builder.exec(fund_request_1).expect_success().commit(); + + let gas_cost_1 = builder.last_exec_gas_consumed(); + + // Next time pay exactly the amount that was reported which should be also the minimum you + // should be able to pay next time. + let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap(); + + let deploy_hash: [u8; 32] = [55; 32]; + let faucet_args_2 = runtime_args! { + ARG_TARGET => *ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(TOKEN_AMOUNT), + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(TRANSFER_PURSE_TO_ACCOUNT_CONTRACT, faucet_args_2) + // + default_create_purse_cost + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(fund_request_2).expect_success().commit(); + + let gas_cost_2 = builder.last_exec_gas_consumed(); + + assert_eq!(gas_cost_1, gas_cost_2); + + let mut upgrade_request = make_upgrade_request(); + + // Increase "transfer_to_account" host function call exactly by X, so we can assert that + // transfer cost increased by exactly X without hidden fees. + let default_host_function_costs = HostFunctionCostsV1::default(); + + let default_transfer_from_purse_to_account_cost = default_host_function_costs + .transfer_from_purse_to_account + .cost(); + let new_transfer_from_purse_to_account_cost = default_transfer_from_purse_to_account_cost + .checked_add(HOST_FUNCTION_COST_CHANGE) + .expect("should add without overflow"); + let new_transfer_from_purse_to_account = + HostFunction::fixed(new_transfer_from_purse_to_account_cost); + + let new_host_function_costs = HostFunctionCostsV1 { + transfer_from_purse_to_account: new_transfer_from_purse_to_account, + ..default_host_function_costs + }; + + let new_wasm_config = + make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config); + + // Inflate affected system contract entry point cost to the maximum + let new_mint_create_cost = u32::MAX; + let new_mint_costs = MintCosts { + create: new_mint_create_cost, + ..Default::default() + }; + + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(new_wasm_config) + .with_mint_costs(new_mint_costs); + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request); + + let deploy_hash: [u8; 32] = [77; 32]; + let faucet_args_3 = runtime_args! { + ARG_TARGET => *ACCOUNT_3_ADDR, + ARG_AMOUNT => U512::from(TOKEN_AMOUNT), + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, faucet_args_3) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(fund_request_3).expect_success().commit(); + + let gas_cost_3 = builder.last_exec_gas_consumed(); + + assert!(gas_cost_3 > gas_cost_1); + assert!(gas_cost_3 > gas_cost_2); +} + +#[ignore] +#[test] +fn gh_2280_stored_transfer_to_account_should_always_cost_the_same_gas() { + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let entry_point = FAUCET_NAME; + + let (mut builder, TestContext { gh_2280_regression }) = setup(); + + let faucet_args_1 = runtime_args! { + ARG_TARGET => *ACCOUNT_1_ADDR, + }; + + let fund_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + account_hash, + gh_2280_regression, + entry_point, + faucet_args_1, + ) + .build(); + builder.exec(fund_request_1).expect_success().commit(); + + let gas_cost_1 = builder.last_exec_gas_consumed(); + + // Next time pay exactly the amount that was reported which should be also the minimum you + // should be able to pay next time. + let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap(); + + let deploy_hash: [u8; 32] = [55; 32]; + let faucet_args_2 = runtime_args! { + ARG_TARGET => *ACCOUNT_2_ADDR, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_stored_session_hash(gh_2280_regression, entry_point, faucet_args_2) + // + default_create_purse_cost + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(fund_request_2).expect_success().commit(); + + let gas_cost_2 = builder.last_exec_gas_consumed(); + + assert_eq!(gas_cost_1, gas_cost_2); + + let mut upgrade_request = make_upgrade_request(); + + // Increase "transfer_to_account" host function call exactly by X, so we can assert that + // transfer cost increased by exactly X without hidden fees. + let default_host_function_costs = HostFunctionCostsV1::default(); + + let default_transfer_from_purse_to_account_cost = default_host_function_costs + .transfer_from_purse_to_account + .cost(); + let new_transfer_from_purse_to_account_cost = default_transfer_from_purse_to_account_cost + .checked_add(HOST_FUNCTION_COST_CHANGE) + .expect("should add without overflow"); + let new_transfer_from_purse_to_account = + HostFunction::fixed(new_transfer_from_purse_to_account_cost); + + let new_host_function_costs = HostFunctionCostsV1 { + transfer_from_purse_to_account: new_transfer_from_purse_to_account, + ..default_host_function_costs + }; + + let new_wasm_config = + make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config); + + // Inflate affected system contract entry point cost to the maximum + let new_mint_create_cost = u32::MAX; + let new_mint_costs = MintCosts { + create: new_mint_create_cost, + ..Default::default() + }; + + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(new_wasm_config) + .with_mint_costs(new_mint_costs); + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request); + + let deploy_hash: [u8; 32] = [77; 32]; + let faucet_args_3 = runtime_args! { + ARG_TARGET => *ACCOUNT_3_ADDR, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_stored_session_hash(gh_2280_regression, entry_point, faucet_args_3) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(fund_request_3).expect_success().commit(); + + let gas_cost_3 = builder.last_exec_gas_consumed(); + + assert!(gas_cost_3 > gas_cost_1, "{} <= {}", gas_cost_3, gas_cost_1); + assert!(gas_cost_3 > gas_cost_2); +} + +#[ignore] +#[test] +fn gh_2280_stored_faucet_call_should_cost_the_same() { + let session_file = GH_2280_REGRESSION_CALL_CONTRACT; + let account_hash = *DEFAULT_ACCOUNT_ADDR; + + let (mut builder, TestContext { gh_2280_regression }) = setup(); + + let faucet_args_1 = runtime_args! { + ARG_CONTRACT_HASH => gh_2280_regression, + ARG_TARGET => *ACCOUNT_1_ADDR, + }; + + let fund_request_1 = + ExecuteRequestBuilder::standard(account_hash, session_file, faucet_args_1).build(); + builder.exec(fund_request_1).expect_success().commit(); + + let gas_cost_1 = builder.last_exec_gas_consumed(); + + // Next time pay exactly the amount that was reported which should be also the minimum you + // should be able to pay next time. + let payment_amount = Motes::from_gas(gas_cost_1, 1).unwrap(); + + let deploy_hash: [u8; 32] = [55; 32]; + let faucet_args_2 = runtime_args! { + ARG_CONTRACT_HASH => gh_2280_regression, + ARG_TARGET => *ACCOUNT_2_ADDR, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, faucet_args_2) + // + default_create_purse_cost + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + builder.exec(fund_request_2).expect_success().commit(); + + let gas_cost_2 = builder.last_exec_gas_consumed(); + + assert_eq!(gas_cost_1, gas_cost_2); + + let mut upgrade_request = make_upgrade_request(); + + // Increase "transfer_to_account" host function call exactly by X, so we can assert that + // transfer cost increased by exactly X without hidden fees. + let default_host_function_costs = HostFunctionCostsV1::default(); + + let default_transfer_from_purse_to_account_cost = default_host_function_costs + .transfer_from_purse_to_account + .cost(); + let new_transfer_from_purse_to_account_cost = default_transfer_from_purse_to_account_cost + .checked_add(HOST_FUNCTION_COST_CHANGE) + .expect("should add without overflow"); + let new_transfer_from_purse_to_account = + HostFunction::fixed(new_transfer_from_purse_to_account_cost); + + let new_host_function_costs = HostFunctionCostsV1 { + transfer_from_purse_to_account: new_transfer_from_purse_to_account, + ..default_host_function_costs + }; + + let new_wasm_config = + make_wasm_config(new_host_function_costs, builder.chainspec().wasm_config); + + // Inflate affected system contract entry point cost to the maximum + let new_mint_create_cost = u32::MAX; + let new_mint_costs = MintCosts { + create: new_mint_create_cost, + ..Default::default() + }; + + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(new_wasm_config) + .with_mint_costs(new_mint_costs); + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request); + + let deploy_hash: [u8; 32] = [77; 32]; + let faucet_args_3 = runtime_args! { + ARG_CONTRACT_HASH => gh_2280_regression, + ARG_TARGET => *ACCOUNT_3_ADDR, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(session_file, faucet_args_3) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => payment_amount.value() + HOST_FUNCTION_COST_CHANGE + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let fund_request_3 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(fund_request_3).expect_success().commit(); + + let gas_cost_3 = builder.last_exec_gas_consumed(); + + assert!(gas_cost_3 > gas_cost_1, "{} <= {}", gas_cost_3, gas_cost_1); + assert!(gas_cost_3 > gas_cost_2); +} + +struct TestContext { + gh_2280_regression: AddressableEntityHash, +} + +fn setup() -> (LmdbWasmTestBuilder, TestContext) { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let session_args = runtime_args! { + mint::ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + ARG_FAUCET_FUNDS => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + }; + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_2280_REGRESSION_CONTRACT, + session_args, + ) + .build(); + + builder.exec(install_request).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let gh_2280_regression = account + .named_keys() + .get(HASH_KEY_NAME) + .cloned() + .and_then(Key::into_entity_hash_addr) + .map(AddressableEntityHash::new) + .expect("should have key"); + + (builder, TestContext { gh_2280_regression }) +} + +fn make_wasm_config( + new_host_function_costs: HostFunctionCostsV1, + old_wasm_config: WasmConfig, +) -> WasmConfig { + let wasm_v1_config = WasmV1Config::new( + DEFAULT_WASM_MAX_MEMORY, + DEFAULT_MAX_STACK_HEIGHT, + old_wasm_config.v1().opcode_costs(), + new_host_function_costs, + ); + let wasm_v2_config = WasmV2Config::default(); + WasmConfig::new( + old_wasm_config.messages_limits(), + wasm_v1_config, + wasm_v2_config, + ) +} + +fn make_upgrade_request() -> ProtocolUpgradeConfig { + UpgradeRequestBuilder::new() + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .build() +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_3097.rs b/execution_engine_testing/tests/src/test/regression/gh_3097.rs new file mode 100644 index 0000000000..5b7521a82e --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_3097.rs @@ -0,0 +1,442 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{ + runtime_args, AddressableEntityHash, EntityVersionKey, PackageHash, RuntimeArgs, +}; +use gh_1470_regression::PACKAGE_HASH_NAME; + +const GH_3097_REGRESSION_WASM: &str = "gh_3097_regression.wasm"; +const GH_3097_REGRESSION_CALL_WASM: &str = "gh_3097_regression_call.wasm"; +const DO_SOMETHING_ENTRYPOINT: &str = "do_something"; +const DISABLED_CONTRACT_HASH_KEY: &str = "disabled_contract_hash"; +const ENABLED_CONTRACT_HASH_KEY: &str = "enabled_contract_hash"; +const CONTRACT_PACKAGE_HASH_KEY: &str = "contract_package_hash"; +const ARG_METHOD: &str = "method"; +const ARG_CONTRACT_HASH_KEY: &str = "contract_hash_key"; +const ARG_MAJOR_VERSION: &str = "major_version"; +const ARG_CONTRACT_VERSION: &str = "contract_version"; +const METHOD_CALL_CONTRACT: &str = "call_contract"; +const METHOD_CALL_VERSIONED_CONTRACT: &str = "call_versioned_contract"; + +#[ignore] +#[test] +fn should_run_regression() { + // This test runs a contract that's after every call extends the same key with + // more data + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_3097_REGRESSION_WASM, + RuntimeArgs::default(), + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(exec_request) + .expect_success() + .commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let disabled_contract_hash = account + .named_keys() + .get(DISABLED_CONTRACT_HASH_KEY) + .unwrap() + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let enabled_contract_hash = account + .named_keys() + .get(ENABLED_CONTRACT_HASH_KEY) + .unwrap() + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap(); + let contract_package_hash = account + .named_keys() + .get(CONTRACT_PACKAGE_HASH_KEY) + .unwrap() + .into_package_addr() + .map(PackageHash::new) + .unwrap(); + + // Versioned contract calls by name + + let direct_call_latest_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_NAME, + None, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let direct_call_v2_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_NAME, + Some(2), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + let direct_call_v2_request_with_major = + ExecuteRequestBuilder::contract_call_by_name_versioned_with_major( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_NAME, + Some(2), + Some(2), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let direct_call_v1_request = ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_NAME, + Some(1), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let direct_call_v1_request_with_major = + ExecuteRequestBuilder::contract_call_by_name_versioned_with_major( + *DEFAULT_ACCOUNT_ADDR, + PACKAGE_HASH_NAME, + Some(1), + Some(2), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + builder + .exec(direct_call_latest_request) + .expect_success() + .commit(); + + builder + .exec(direct_call_v2_request) + .expect_success() + .commit(); + + builder + .exec(direct_call_v2_request_with_major) + .expect_success() + .commit(); + + builder + .exec(direct_call_v1_request) + .expect_failure() + .commit(); + + builder + .exec(direct_call_v1_request_with_major) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec( + ExecError::DisabledEntityVersion(version) + ) + if version == EntityVersionKey::new(2, 1), + ), + "Expected disabled contract version, found {:?}", + error, + ); + + // Versioned contract calls by hash + + let direct_call_latest_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_package_hash, + None, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let direct_call_v2_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_package_hash, + Some(2), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + let direct_call_v2_request_with_major = + ExecuteRequestBuilder::contract_call_by_hash_versioned_with_major( + *DEFAULT_ACCOUNT_ADDR, + contract_package_hash, + Some(2), + Some(2), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let direct_call_v1_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_package_hash, + Some(1), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let direct_call_v1_request_with_major = + ExecuteRequestBuilder::contract_call_by_hash_versioned_with_major( + *DEFAULT_ACCOUNT_ADDR, + contract_package_hash, + Some(1), + Some(2), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + builder + .exec(direct_call_latest_request) + .expect_success() + .commit(); + + builder + .exec(direct_call_v2_request) + .expect_success() + .commit(); + + builder + .exec(direct_call_v2_request_with_major) + .expect_success() + .commit(); + + builder + .exec(direct_call_v1_request) + .expect_failure() + .commit(); + + builder + .exec(direct_call_v1_request_with_major) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec( + ExecError::DisabledEntityVersion(version) + ) + if version == EntityVersionKey::new(2, 1), + ), + "Expected disabled contract version, found {:?}", + error, + ); + + // Versioned call from a session wasm + + let session_call_v1_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_3097_REGRESSION_CALL_WASM, + runtime_args! { + ARG_METHOD => METHOD_CALL_VERSIONED_CONTRACT, + ARG_MAJOR_VERSION => 2u32, + ARG_CONTRACT_VERSION => Some(1u32), + }, + ) + .build(); + + let session_call_v2_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_3097_REGRESSION_CALL_WASM, + runtime_args! { + ARG_METHOD => METHOD_CALL_VERSIONED_CONTRACT, + ARG_MAJOR_VERSION => 2u32, + ARG_CONTRACT_VERSION => Some(2u32), + }, + ) + .build(); + + let session_call_latest_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_3097_REGRESSION_CALL_WASM, + runtime_args! { + ARG_METHOD => METHOD_CALL_VERSIONED_CONTRACT, + ARG_MAJOR_VERSION => 2u32, + ARG_CONTRACT_VERSION => Option::::None, + }, + ) + .build(); + + builder + .exec(session_call_latest_request) + .expect_success() + .commit(); + + builder + .exec(session_call_v2_request) + .expect_success() + .commit(); + + builder + .exec(session_call_v1_request) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec( + ExecError::DisabledEntityVersion(version) + ) + if version == EntityVersionKey::new(2, 1), + ), + "Expected disabled contract version, found {:?}", + error, + ); + + // Call by contract hashes + + let call_by_hash_v2_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + enabled_contract_hash, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + let call_by_hash_v2_request_with_major = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + enabled_contract_hash, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + + builder + .exec(call_by_hash_v2_request) + .expect_success() + .commit(); + + builder + .exec(call_by_hash_v2_request_with_major) + .expect_success() + .commit(); + + let call_by_name_v2_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + ENABLED_CONTRACT_HASH_KEY, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + builder + .exec(call_by_name_v2_request) + .expect_success() + .commit(); + + // This direct contract by name/hash should fail + let call_by_hash_v1_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + disabled_contract_hash, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + builder + .exec(call_by_hash_v1_request) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec( + ExecError::DisabledEntity(contract_hash) + ) + if contract_hash == disabled_contract_hash + ), + "Expected invalid contract version, found {:?}", + error, + ); + + // This direct contract by name/hash should fail + let call_by_name_v1_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + DISABLED_CONTRACT_HASH_KEY, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::new(), + ) + .build(); + builder + .exec(call_by_name_v1_request) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec( + ExecError::DisabledEntity(contract_hash) + ) + if contract_hash == disabled_contract_hash + ), + "Expected invalid contract version, found {:?}", + error, + ); + + // Session calls into hashes + + let session_call_hash_v1_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_3097_REGRESSION_CALL_WASM, + runtime_args! { + ARG_METHOD => METHOD_CALL_CONTRACT, + ARG_CONTRACT_HASH_KEY => DISABLED_CONTRACT_HASH_KEY, + }, + ) + .build(); + + let session_call_hash_v2_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_3097_REGRESSION_CALL_WASM, + runtime_args! { + ARG_METHOD => METHOD_CALL_CONTRACT, + ARG_CONTRACT_HASH_KEY => ENABLED_CONTRACT_HASH_KEY, + }, + ) + .build(); + + builder + .exec(session_call_hash_v1_request) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + Error::Exec( + ExecError::DisabledEntity(contract_hash) + ) + if contract_hash == disabled_contract_hash + ), + "Expected invalid contract version, found {:?}", + error, + ); + + builder + .exec(session_call_hash_v2_request) + .expect_success() + .commit(); +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_3208.rs b/execution_engine_testing/tests/src/test/regression/gh_3208.rs new file mode 100644 index 0000000000..b6baafadce --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_3208.rs @@ -0,0 +1,402 @@ +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + genesis_config_builder::GenesisConfigBuilder, utils, ChainspecConfig, DeployItemBuilder, + ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY, + DEFAULT_GENESIS_CONFIG_HASH, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PAYMENT, DEFAULT_PROPOSER_ADDR, + DEFAULT_PROPOSER_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, +}; +use casper_execution_engine::{ + engine_state::{self}, + execution::ExecError, +}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + runtime_args, + system::{ + auction::{self, BidAddr, DelegationRate}, + standard_payment, + }, + ApiError, GenesisAccount, GenesisValidator, Key, Motes, StoredValue, + DEFAULT_MINIMUM_BID_AMOUNT, U512, +}; + +use crate::lmdb_fixture; + +static DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE: Lazy = + Lazy::new(|| U512::from(1_000_000_000_000u64)); + +static ACCOUNTS_WITH_GENESIS_VALIDATORS: Lazy> = Lazy::new(|| { + vec![ + GenesisAccount::account( + DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + None, + ), + GenesisAccount::account( + DEFAULT_PROPOSER_PUBLIC_KEY.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Some(GenesisValidator::new( + Motes::new(*DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE), + 15, + )), + ), + ] +}); +const DAY_MILLIS: usize = 24 * 60 * 60 * 1000; +const DAYS_IN_WEEK: usize = 7; +const WEEK_MILLIS: usize = DAYS_IN_WEEK * DAY_MILLIS; +const VESTING_SCHEDULE_LENGTH_DAYS: usize = 91; +const LOCKED_AMOUNTS_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1; + +const LMDB_FIXTURE_NAME: &str = "gh_3208"; + +#[ignore] +#[test] +fn should_run_regression_with_already_initialized_fixed_schedule() { + let (builder, _lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(LMDB_FIXTURE_NAME); + + let bid_key = Key::Bid(*DEFAULT_PROPOSER_ADDR); + + let stored_value = builder.query(None, bid_key, &[]).unwrap(); + if let StoredValue::Bid(bid) = stored_value { + assert!( + bid.is_locked_with_vesting_schedule(7776000000, DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS) + ); + let vesting_schedule = bid + .vesting_schedule() + .expect("should have a schedule initialized already"); + + let initial_stake = *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE; + + let total_vested_amounts = { + let mut total_vested_amounts = U512::zero(); + + for i in 0..LOCKED_AMOUNTS_LENGTH { + let timestamp = + vesting_schedule.initial_release_timestamp_millis() + (WEEK_MILLIS * i) as u64; + if let Some(locked_amount) = vesting_schedule.locked_amount(timestamp) { + let current_vested_amount = + initial_stake - locked_amount - total_vested_amounts; + total_vested_amounts += current_vested_amount + } + } + + total_vested_amounts + }; + + assert_eq!(total_vested_amounts, initial_stake); + } else { + panic!("unexpected StoredValue variant.") + } +} + +#[ignore] +#[test] +fn should_initialize_default_vesting_schedule() { + let genesis_request = + utils::create_run_genesis_request(ACCOUNTS_WITH_GENESIS_VALIDATORS.clone()); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(genesis_request); + + let bid_addr = BidAddr::from(*DEFAULT_PROPOSER_ADDR); + let stored_value_before = builder + .query(None, bid_addr.into(), &[]) + .expect("should query proposers bid"); + + let bid_before = if let StoredValue::BidKind(bid) = stored_value_before { + bid + } else { + panic!("Expected a bid variant in the global state"); + }; + + let bid_vesting_schedule = bid_before + .vesting_schedule() + .expect("genesis validator should have vesting schedule"); + + assert!( + bid_vesting_schedule.locked_amounts().is_none(), + "initial funds release is not yet processed" + ); + + let mut era_end_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + + era_end_timestamp_millis += DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + assert!( + builder + .step( + StepRequestBuilder::default() + .with_era_end_timestamp_millis(era_end_timestamp_millis) + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .build(), + ) + .is_success(), + "should run step to initialize a schedule" + ); + + let stored_value_after = builder + .query(None, bid_addr.into(), &[]) + .expect("should query proposers bid"); + + let bid_after = if let StoredValue::BidKind(bid) = stored_value_after { + bid + } else { + panic!("Expected a bid variant in the global state"); + }; + + let bid_vesting_schedule = bid_after + .vesting_schedule() + .expect("genesis validator should have vesting schedule"); + + assert!( + bid_vesting_schedule.locked_amounts().is_some(), + "initial funds release is initialized" + ); +} + +#[ignore] +#[test] +fn should_immediatelly_unbond_genesis_validator_with_zero_day_vesting_schedule() { + let vesting_schedule_period_millis = 0; + + let exec_config = { + let accounts = ACCOUNTS_WITH_GENESIS_VALIDATORS.clone(); + GenesisConfigBuilder::new().with_accounts(accounts).build() + }; + + let genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + exec_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); + + let engine_config = ChainspecConfig::default() + .with_vesting_schedule_period_millis(vesting_schedule_period_millis); + + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(engine_config); + builder.run_genesis(genesis_request); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), + auction::ARG_DELEGATION_RATE => 10 as DelegationRate, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let sender = *DEFAULT_PROPOSER_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_WITHDRAW_BID; + let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, }; + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_PROPOSER_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash([58; 32]) + .build(); + + let withdraw_bid_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let sender = *DEFAULT_PROPOSER_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_WITHDRAW_BID; + let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, }; + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_PROPOSER_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash([59; 32]) + .build(); + + let withdraw_bid_request_2 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .exec(withdraw_bid_request_1) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(error, engine_state::Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) if auction_error == auction::Error::ValidatorFundsLocked as u8), + "vesting schedule is not yet initialized" + ); + + let mut era_end_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + + assert!( + builder + .step( + StepRequestBuilder::default() + .with_era_end_timestamp_millis(era_end_timestamp_millis) + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_run_auction(true) + .build(), + ) + .is_success(), + "should run step to initialize a schedule" + ); + + era_end_timestamp_millis += DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + assert!( + builder + .step( + StepRequestBuilder::default() + .with_era_end_timestamp_millis(era_end_timestamp_millis) + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_run_auction(true) + .build(), + ) + .is_success(), + "should run step to initialize a schedule" + ); + + builder + .exec(withdraw_bid_request_2) + .expect_success() + .commit(); +} + +#[ignore] +#[test] +fn should_immediatelly_unbond_genesis_validator_with_zero_day_vesting_schedule_and_zero_day_lock() { + let vesting_schedule_period_millis = 0; + let locked_funds_period_millis = 0; + + let exec_config = { + let accounts = ACCOUNTS_WITH_GENESIS_VALIDATORS.clone(); + GenesisConfigBuilder::new() + .with_accounts(accounts) + .with_locked_funds_period_millis(locked_funds_period_millis) + .build() + }; + + let genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + exec_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); + + let chainspec = ChainspecConfig::default() + .with_vesting_schedule_period_millis(vesting_schedule_period_millis); + + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec); + builder.run_genesis(genesis_request); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_BID_AMOUNT), + auction::ARG_DELEGATION_RATE => 10 as DelegationRate, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let era_end_timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + + assert!( + builder + .step( + StepRequestBuilder::default() + .with_era_end_timestamp_millis(era_end_timestamp_millis) + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .with_run_auction(true) + .build(), + ) + .is_success(), + "should run step to initialize a schedule" + ); + + let sender = *DEFAULT_PROPOSER_ADDR; + let contract_hash = builder.get_auction_contract_hash(); + let entry_point = auction::METHOD_WITHDRAW_BID; + let payment_args = runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT, }; + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_PROPOSER_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *DEFAULT_PROPOSER_ACCOUNT_INITIAL_STAKE, + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(sender) + .with_stored_session_hash(contract_hash, entry_point, session_args) + .with_standard_payment(payment_args) + .with_authorization_keys(&[sender]) + .with_deploy_hash([58; 32]) + .build(); + + let withdraw_bid_request_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .exec(withdraw_bid_request_1) + .expect_success() + .commit(); +} + +#[cfg(feature = "fixture-generators")] +mod fixture { + use casper_engine_test_support::{ + utils, StepRequestBuilder, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, + }; + + use crate::lmdb_fixture; + + use super::{ACCOUNTS_WITH_GENESIS_VALIDATORS, LMDB_FIXTURE_NAME}; + + #[ignore] + #[test] + fn generate_gh_3208_fixture() { + let genesis_request = + utils::create_run_genesis_request(ACCOUNTS_WITH_GENESIS_VALIDATORS.clone()); + + lmdb_fixture::generate_fixture(LMDB_FIXTURE_NAME, genesis_request, |builder| { + let era_end_timestamp_millis = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + // Move forward the clock and initialize vesting schedule with 13 weeks after initial 90 + // days lock up. + builder.step( + StepRequestBuilder::default() + .with_era_end_timestamp_millis(era_end_timestamp_millis) + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(DEFAULT_PROTOCOL_VERSION) + .build(), + ); + }) + .unwrap(); + } +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_3710.rs b/execution_engine_testing/tests/src/test/regression/gh_3710.rs new file mode 100644 index 0000000000..57a9b6e77a --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_3710.rs @@ -0,0 +1,366 @@ +use std::{collections::BTreeSet, convert::TryInto, iter::FromIterator}; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, WasmTestBuilder, + DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PROPOSER_PUBLIC_KEY, LOCAL_GENESIS_REQUEST, +}; +use casper_storage::{ + data_access_layer::{PruneRequest, PruneResult}, + global_state::state::{CommitProvider, StateProvider}, +}; +use casper_types::{ + runtime_args, + system::auction::{self, DelegationRate}, + Digest, EraId, Key, KeyTag, ProtocolVersion, PublicKey, U512, +}; + +use crate::lmdb_fixture; + +const FIXTURE_N_ERAS: usize = 10; + +const GH_3710_FIXTURE: &str = "gh_3710"; + +#[ignore] +#[test] +fn gh_3710_commit_prune_with_empty_keys_should_be_noop() { + let (mut builder, _lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE); + + let prune_config = PruneRequest::new(builder.get_post_state_hash(), Vec::new()); + + builder.commit_prune(prune_config).expect_prune_success(); +} + +#[ignore] +#[test] +fn gh_3710_commit_prune_should_validate_state_root_hash() { + let (mut builder, _lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE); + + let prune_config = PruneRequest::new(Digest::hash("foobar"), Vec::new()); + + builder.commit_prune(prune_config); + + let prune_result = builder + .get_prune_result(0) + .expect("should have prune result"); + assert!(builder.get_prune_result(1).is_none()); + + assert!( + matches!(prune_result, PruneResult::RootNotFound), + "{:?}", + prune_result + ); +} + +#[ignore] +#[test] +fn gh_3710_commit_prune_should_delete_values() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE); + + let auction_delay: u64 = lmdb_fixture_state + .genesis_request + .get("ee_config") + .expect("should have ee_config") + .get("auction_delay") + .expect("should have auction delay") + .as_i64() + .expect("auction delay should be integer") + .try_into() + .expect("auction delay should be positive"); + + let keys_before_prune = builder + .get_keys(KeyTag::EraInfo) + .expect("should obtain all given keys"); + + assert_eq!( + keys_before_prune.len(), + FIXTURE_N_ERAS + 1 + auction_delay as usize + ); + + let batch_1: Vec = (0..FIXTURE_N_ERAS) + .map(|i| EraId::new(i.try_into().unwrap())) + .map(Key::EraInfo) + .collect(); + + let batch_2: Vec = (FIXTURE_N_ERAS..FIXTURE_N_ERAS + 1 + auction_delay as usize) + .map(|i| EraId::new(i.try_into().unwrap())) + .map(Key::EraInfo) + .collect(); + + assert_eq!( + BTreeSet::from_iter(batch_1.iter()) + .union(&BTreeSet::from_iter(batch_2.iter())) + .collect::>() + .len(), + keys_before_prune.len(), + "sanity check" + ); + + // Process prune of first batch + let pre_state_hash = builder.get_post_state_hash(); + + let prune_config_1 = PruneRequest::new(pre_state_hash, batch_1); + + builder.commit_prune(prune_config_1).expect_prune_success(); + let post_state_hash_batch_1 = builder.get_post_state_hash(); + assert_ne!(pre_state_hash, post_state_hash_batch_1); + + let keys_after_batch_1_prune = builder + .get_keys(KeyTag::EraInfo) + .expect("should obtain all given keys"); + + assert_eq!(keys_after_batch_1_prune.len(), 2); + + // Process prune of second batch + let pre_state_hash = builder.get_post_state_hash(); + + let prune_config_2 = PruneRequest::new(pre_state_hash, batch_2); + builder.commit_prune(prune_config_2).expect_prune_success(); + let post_state_hash_batch_2 = builder.get_post_state_hash(); + assert_ne!(pre_state_hash, post_state_hash_batch_2); + + let keys_after_batch_2_prune = builder + .get_keys(KeyTag::EraInfo) + .expect("should obtain all given keys"); + + assert_eq!(keys_after_batch_2_prune.len(), 0); +} + +const DEFAULT_REWARD_AMOUNT: u64 = 1_000_000; + +fn add_validator_and_wait_for_rotation(builder: &mut WasmTestBuilder, public_key: &PublicKey) +where + S: StateProvider + CommitProvider, +{ + const DELEGATION_RATE: DelegationRate = 10; + + let args = runtime_args! { + auction::ARG_PUBLIC_KEY => public_key.clone(), + auction::ARG_DELEGATION_RATE => DELEGATION_RATE, + auction::ARG_AMOUNT => U512::from(DEFAULT_REWARD_AMOUNT), + }; + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + public_key.to_account_hash(), + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + args, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + // compute N eras + + let current_era_id = builder.get_era(); + + // eras current..=delay + 1 without rewards (default genesis validator is not a + // validator yet) + for era_counter in current_era_id.iter(builder.get_auction_delay() + 1) { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(era_counter) + // no rewards as default validator is not a validator yet + .build(); + builder.step(step_request); + } +} + +fn distribute_rewards( + builder: &mut WasmTestBuilder, + block_height: u64, + proposer: &PublicKey, + amount: U512, +) where + S: StateProvider + CommitProvider, +{ + builder.distribute( + None, + ProtocolVersion::V1_0_0, + IntoIterator::into_iter([(proposer.clone(), vec![amount])]).collect(), + block_height, + ); +} + +#[ignore] +#[test] +fn gh_3710_should_produce_era_summary_in_a_step() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + add_validator_and_wait_for_rotation(&mut builder, &DEFAULT_ACCOUNT_PUBLIC_KEY); + distribute_rewards(&mut builder, 1, &DEFAULT_ACCOUNT_PUBLIC_KEY, 0.into()); + + let era_info_keys = builder.get_keys(KeyTag::EraInfo).unwrap(); + assert_eq!(era_info_keys, Vec::new()); + + let era_summary_1 = builder + .query(None, Key::EraSummary, &[]) + .expect("should query era summary"); + + let era_summary_1 = era_summary_1.as_era_info().expect("era summary"); + + // Reward another validator to observe that the summary changes. + add_validator_and_wait_for_rotation(&mut builder, &DEFAULT_PROPOSER_PUBLIC_KEY); + distribute_rewards(&mut builder, 2, &DEFAULT_PROPOSER_PUBLIC_KEY, 1.into()); + + let era_summary_2 = builder + .query(None, Key::EraSummary, &[]) + .expect("should query era summary"); + + let era_summary_2 = era_summary_2.as_era_info().expect("era summary"); + + assert_ne!(era_summary_1, era_summary_2); + + let era_info_keys = builder.get_keys(KeyTag::EraInfo).unwrap(); + assert_eq!(era_info_keys, Vec::new()); + + // As a sanity check ensure there's just a single era summary per tip + assert_eq!( + builder + .get_keys(KeyTag::EraSummary) + .expect("should get all era summary keys") + .len(), + 1 + ); +} + +mod fixture { + use std::collections::BTreeMap; + + use casper_engine_test_support::{ + ExecuteRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, + LOCAL_GENESIS_REQUEST, + }; + use casper_types::{ + runtime_args, + system::auction::{EraInfo, SeigniorageAllocation}, + EraId, Key, KeyTag, StoredValue, U512, + }; + + use super::{FIXTURE_N_ERAS, GH_3710_FIXTURE}; + use crate::lmdb_fixture; + + #[ignore = "RUN_FIXTURE_GENERATORS env var should be enabled"] + #[test] + fn generate_call_stack_fixture() { + const CALL_STACK_FIXTURE: &str = "call_stack_fixture"; + const CONTRACT_RECURSIVE_SUBCALL: &str = "get_call_stack_recursive_subcall.wasm"; + + if !lmdb_fixture::is_fixture_generator_enabled() { + println!("Enable the RUN_FIXTURE_GENERATORS variable"); + return; + } + + let genesis_request = LOCAL_GENESIS_REQUEST.clone(); + + lmdb_fixture::generate_fixture(CALL_STACK_FIXTURE, genesis_request, |builder| { + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_RECURSIVE_SUBCALL, + runtime_args! {}, + ) + .build(); + + builder.exec(execute_request).expect_success().commit(); + }) + .unwrap(); + } + + #[ignore = "RUN_FIXTURE_GENERATORS env var should be enabled"] + #[test] + fn generate_groups_fixture() { + const GROUPS_FIXTURE: &str = "groups"; + const GROUPS_WASM: &str = "groups.wasm"; + + if !lmdb_fixture::is_fixture_generator_enabled() { + println!("Enable the RUN_FIXTURE_GENERATORS variable"); + return; + } + + let genesis_request = LOCAL_GENESIS_REQUEST.clone(); + + lmdb_fixture::generate_fixture(GROUPS_FIXTURE, genesis_request, |builder| { + let execute_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GROUPS_WASM, + runtime_args! {}, + ) + .build(); + + builder.exec(execute_request).expect_success().commit(); + }) + .unwrap(); + } + + #[ignore = "RUN_FIXTURE_GENERATORS env var should be enabled"] + #[test] + fn generate_era_info_bloat_fixture() { + if !lmdb_fixture::is_fixture_generator_enabled() { + println!("Enable the RUN_FIXTURE_GENERATORS variable"); + return; + } + // To generate this fixture again you have to re-run this code release-1.4.13. + let genesis_request = LOCAL_GENESIS_REQUEST.clone(); + lmdb_fixture::generate_fixture(GH_3710_FIXTURE, genesis_request, |builder| { + super::add_validator_and_wait_for_rotation(builder, &DEFAULT_ACCOUNT_PUBLIC_KEY); + + // N more eras that pays out rewards + super::distribute_rewards(builder, 0, &DEFAULT_ACCOUNT_PUBLIC_KEY, 0.into()); + + let last_era_info = EraId::new(builder.get_auction_delay() + FIXTURE_N_ERAS as u64); + let last_era_info_key = Key::EraInfo(last_era_info); + + let keys = builder.get_keys(KeyTag::EraInfo).unwrap(); + let mut keys_lookup = BTreeMap::new(); + for key in &keys { + keys_lookup.insert(key, ()); + } + + assert!(keys_lookup.contains_key(&last_era_info_key)); + assert_eq!(keys_lookup.keys().last().copied(), Some(&last_era_info_key)); + + // all era infos should have unique rewards that are in increasing order + let stored_values: Vec = keys_lookup + .keys() + .map(|key| builder.query(None, **key, &[]).unwrap()) + .collect(); + + let era_infos: Vec<&EraInfo> = stored_values + .iter() + .filter_map(StoredValue::as_era_info) + .collect(); + + let rewards: Vec<&U512> = era_infos + .iter() + .flat_map(|era_info| era_info.seigniorage_allocations()) + .map(|seigniorage| match seigniorage { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } if validator_public_key == &*DEFAULT_ACCOUNT_PUBLIC_KEY => amount, + SeigniorageAllocation::Validator { .. } => panic!("Unexpected validator"), + SeigniorageAllocation::Delegator { .. } + | SeigniorageAllocation::DelegatorKind { .. } => panic!("No delegators"), + }) + .collect(); + + let sorted_rewards = { + let mut vec = rewards.clone(); + vec.sort(); + vec + }; + assert_eq!(rewards, sorted_rewards); + + assert!( + rewards.first().unwrap() < rewards.last().unwrap(), + "{:?}", + rewards + ); + }) + .unwrap(); + } +} diff --git a/execution_engine_testing/tests/src/test/regression/gh_4898.rs b/execution_engine_testing/tests/src/test/regression/gh_4898.rs new file mode 100644 index 0000000000..cd6be1e7c8 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gh_4898.rs @@ -0,0 +1,30 @@ +use casper_engine_test_support::{ + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; + +use casper_types::runtime_args; + +const ARG_DATA: &str = "data"; +const GH_4898_REGRESSION_WASM: &str = "gh_4898_regression.wasm"; + +#[ignore] +#[test] +fn should_not_contain_f64_opcodes() { + let module_bytes = utils::read_wasm_file(GH_4898_REGRESSION_WASM); + let wat = wasmprinter::print_bytes(module_bytes).expect("WASM parse error"); + assert!(!wat.contains("f64."), "WASM contains f64 opcodes"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + GH_4898_REGRESSION_WASM, + runtime_args! { + ARG_DATA => "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb" + }, + ) + .build(); + + builder.exec(exec_request).commit(); +} diff --git a/execution_engine_testing/tests/src/test/regression/gov_116.rs b/execution_engine_testing/tests/src/test/regression/gov_116.rs new file mode 100644 index 0000000000..1bd3902622 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gov_116.rs @@ -0,0 +1,341 @@ +use std::{collections::BTreeSet, iter::FromIterator}; + +use num_traits::Zero; +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + genesis_config_builder::GenesisConfigBuilder, utils, ChainspecConfig, ExecuteRequestBuilder, + LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, + DEFAULT_VALIDATOR_SLOTS, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + runtime_args, + system::auction::{self, DelegationRate, EraValidators, VESTING_SCHEDULE_LENGTH_MILLIS}, + GenesisAccount, GenesisValidator, Motes, PublicKey, SecretKey, DEFAULT_MINIMUM_BID_AMOUNT, + U256, U512, +}; + +const MINIMUM_BONDED_AMOUNT: u64 = 1_000; + +/// Validator with smallest stake will withdraw most of his stake to ensure we did move time forward +/// to unlock his whole vesting schedule. +const WITHDRAW_AMOUNT: u64 = MINIMUM_BONDED_AMOUNT - DEFAULT_MINIMUM_BID_AMOUNT; + +/// Initial lockup period +const VESTING_BASE: u64 = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + +const DAY_MILLIS: u64 = 24 * 60 * 60 * 1000; +const WEEK_MILLIS: u64 = 7 * DAY_MILLIS; +const DELEGATION_RATE: DelegationRate = 0; + +/// Simplified vesting weeks for testing purposes. Each element is used as an argument to +/// run_auction call. +const VESTING_WEEKS: [u64; 3] = [ + // Passes the vesting schedule (aka initial lockup + schedule length) + VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS, + // One week after + VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS + WEEK_MILLIS, + // Two weeks after + VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS + (2 * WEEK_MILLIS), +]; + +static GENESIS_VALIDATOR_PUBLIC_KEYS: Lazy> = Lazy::new(|| { + let mut set = BTreeSet::new(); + for i in 1..=DEFAULT_VALIDATOR_SLOTS { + let mut secret_key_bytes = [255u8; 32]; + U256::from(i).to_big_endian(&mut secret_key_bytes); + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::from(&secret_key); + set.insert(public_key); + } + set +}); + +static GENESIS_VALIDATORS: Lazy> = Lazy::new(|| { + let mut vec = Vec::with_capacity(GENESIS_VALIDATOR_PUBLIC_KEYS.len()); + + for (index, public_key) in GENESIS_VALIDATOR_PUBLIC_KEYS.iter().enumerate() { + let bond = MINIMUM_BONDED_AMOUNT + index as u64; + let account = GenesisAccount::account( + public_key.clone(), + Motes::new(MINIMUM_ACCOUNT_CREATION_BALANCE), + Some(GenesisValidator::new( + Motes::new(bond), + DelegationRate::zero(), + )), + ); + vec.push(account); + } + + vec +}); + +static LOWEST_STAKE_VALIDATOR: Lazy = Lazy::new(|| { + let mut genesis_accounts: Vec<&GenesisAccount> = GENESIS_ACCOUNTS.iter().collect(); + genesis_accounts.sort_by_key(|genesis_account| genesis_account.staked_amount()); + + // Finds a genesis validator with lowest stake + let genesis_account = genesis_accounts + .into_iter() + .find(|genesis_account| { + genesis_account.is_validator() && genesis_account.staked_amount() > Motes::zero() + }) + .unwrap(); + + assert_eq!( + genesis_account.staked_amount(), + Motes::new(MINIMUM_BONDED_AMOUNT) + ); + + genesis_account.public_key() +}); + +static GENESIS_ACCOUNTS: Lazy> = Lazy::new(|| { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + tmp.append(&mut GENESIS_VALIDATORS.clone()); + tmp +}); + +fn initialize_builder() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + + let run_genesis_request = utils::create_run_genesis_request(GENESIS_ACCOUNTS.clone()); + builder.run_genesis(run_genesis_request); + + let fund_request = TransferRequestBuilder::new( + MINIMUM_ACCOUNT_CREATION_BALANCE, + PublicKey::System.to_account_hash(), + ) + .build(); + + builder.transfer_and_commit(fund_request).expect_success(); + + builder +} + +#[ignore] +#[test] +fn should_not_retain_genesis_validator_slot_protection_after_vesting_period_elapsed() { + let lowest_stake_validator_addr = LOWEST_STAKE_VALIDATOR.to_account_hash(); + + let mut builder = initialize_builder(); + + // Unlock all funds of genesis validator + builder.run_auction(VESTING_WEEKS[0], Vec::new()); + + let era_validators_1: EraValidators = builder.get_era_validators(); + + let (last_era_1, weights_1) = era_validators_1.iter().last().unwrap(); + let genesis_validator_stake_1 = weights_1.get(&LOWEST_STAKE_VALIDATOR).unwrap(); + let next_validator_set_1 = BTreeSet::from_iter(weights_1.keys().cloned()); + assert_eq!( + next_validator_set_1, + GENESIS_VALIDATOR_PUBLIC_KEYS.clone(), + "expected validator set should be unchanged" + ); + + let withdraw_bid_request = { + let auction_hash = builder.get_auction_contract_hash(); + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => LOWEST_STAKE_VALIDATOR.clone(), + auction::ARG_AMOUNT => U512::from(WITHDRAW_AMOUNT), + }; + ExecuteRequestBuilder::contract_call_by_hash( + lowest_stake_validator_addr, + auction_hash, + auction::METHOD_WITHDRAW_BID, + session_args, + ) + .build() + }; + + builder.exec(withdraw_bid_request).expect_success().commit(); + + builder.run_auction(VESTING_WEEKS[1], Vec::new()); + + let era_validators_2: EraValidators = builder.get_era_validators(); + + let (last_era_2, weights_2) = era_validators_2.iter().last().unwrap(); + assert!(last_era_2 > last_era_1); + let genesis_validator_stake_2 = weights_2.get(&LOWEST_STAKE_VALIDATOR).unwrap(); + + let next_validator_set_2 = BTreeSet::from_iter(weights_2.keys().cloned()); + assert_eq!(next_validator_set_2, GENESIS_VALIDATOR_PUBLIC_KEYS.clone()); + + assert!( + genesis_validator_stake_1 > genesis_validator_stake_2, + "stake should decrease in future era" + ); + + let stake_diff = if genesis_validator_stake_1 > genesis_validator_stake_2 { + genesis_validator_stake_1 - genesis_validator_stake_2 + } else { + genesis_validator_stake_2 - genesis_validator_stake_1 + }; + + assert_eq!(stake_diff, U512::from(WITHDRAW_AMOUNT)); + + // Add nonfounding validator higher than `unbonding_account` has after unlocking & withdrawing + + // New validator bids with the original stake of unbonding_account to take his place in future + // era. We know that unbonding_account has now smaller stake than before. + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *genesis_validator_stake_1, + auction::ARG_DELEGATION_RATE => DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + builder.run_auction(VESTING_WEEKS[2], Vec::new()); + + let era_validators_3: EraValidators = builder.get_era_validators(); + let (last_era_3, weights_3) = era_validators_3.iter().last().unwrap(); + assert!(last_era_3 > last_era_2); + + assert_eq!( + weights_3.len(), + DEFAULT_VALIDATOR_SLOTS as usize, + "auction incorrectly computed more than slots than available" + ); + + assert!( + weights_3.contains_key(&*DEFAULT_ACCOUNT_PUBLIC_KEY), + "new non-genesis validator should replace a genesis validator with smaller stake" + ); + + assert!( + !weights_3.contains_key(&LOWEST_STAKE_VALIDATOR), + "unbonded account should be out of the set" + ); + + let next_validator_set_3 = BTreeSet::from_iter(weights_3.keys().cloned()); + let expected_validators = { + let mut pks = GENESIS_VALIDATOR_PUBLIC_KEYS.clone(); + pks.remove(&LOWEST_STAKE_VALIDATOR); + pks.insert(DEFAULT_ACCOUNT_PUBLIC_KEY.clone()); + pks + }; + assert_eq!( + next_validator_set_3, expected_validators, + "actual next validator set does not match expected validator set" + ); +} + +#[ignore] +#[test] +fn should_retain_genesis_validator_slot_protection() { + const CASPER_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS; + const CASPER_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS; + const CASPER_VESTING_BASE: u64 = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS; + + let mut builder = { + let chainspec = ChainspecConfig::default() + .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS); + + let run_genesis_request = { + let accounts = GENESIS_ACCOUNTS.clone(); + let exec_config = GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + .build(); + + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + exec_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) + }; + + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec); + builder.run_genesis(run_genesis_request); + + let fund_request = TransferRequestBuilder::new( + MINIMUM_ACCOUNT_CREATION_BALANCE, + PublicKey::System.to_account_hash(), + ) + .build(); + + builder.transfer_and_commit(fund_request).expect_success(); + + builder + }; + + let era_validators_1: EraValidators = builder.get_era_validators(); + + let (last_era_1, weights_1) = era_validators_1.iter().last().unwrap(); + let genesis_validator_stake_1 = weights_1.get(&LOWEST_STAKE_VALIDATOR).unwrap(); + // One higher than the lowest stake + let winning_stake = *genesis_validator_stake_1 + U512::one(); + let next_validator_set_1 = BTreeSet::from_iter(weights_1.keys().cloned()); + assert_eq!( + next_validator_set_1, + GENESIS_VALIDATOR_PUBLIC_KEYS.clone(), + "expected validator set should be unchanged" + ); + + builder.run_auction(CASPER_VESTING_BASE, Vec::new()); + + let era_validators_2: EraValidators = builder.get_era_validators(); + + let (last_era_2, weights_2) = era_validators_2.iter().last().unwrap(); + assert!(last_era_2 > last_era_1); + let next_validator_set_2 = BTreeSet::from_iter(weights_2.keys().cloned()); + assert_eq!(next_validator_set_2, GENESIS_VALIDATOR_PUBLIC_KEYS.clone()); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => winning_stake, + auction::ARG_DELEGATION_RATE => DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + builder.run_auction(CASPER_VESTING_BASE + WEEK_MILLIS, Vec::new()); + + // All genesis validator slots are protected after ~1 week + let era_validators_3: EraValidators = builder.get_era_validators(); + let (last_era_3, weights_3) = era_validators_3.iter().last().unwrap(); + assert!(last_era_3 > last_era_2); + let next_validator_set_3 = BTreeSet::from_iter(weights_3.keys().cloned()); + assert_eq!(next_validator_set_3, GENESIS_VALIDATOR_PUBLIC_KEYS.clone()); + + // After 13 weeks ~ 91 days lowest stake validator is dropped and replaced with higher bid + builder.run_auction( + CASPER_VESTING_BASE + VESTING_SCHEDULE_LENGTH_MILLIS, + Vec::new(), + ); + + let era_validators_4: EraValidators = builder.get_era_validators(); + let (last_era_4, weights_4) = era_validators_4.iter().last().unwrap(); + assert!(last_era_4 > last_era_3); + let next_validator_set_4 = BTreeSet::from_iter(weights_4.keys().cloned()); + let expected_validators = { + let mut pks = GENESIS_VALIDATOR_PUBLIC_KEYS.clone(); + pks.remove(&LOWEST_STAKE_VALIDATOR); + pks.insert(DEFAULT_ACCOUNT_PUBLIC_KEY.clone()); + pks + }; + assert_eq!( + next_validator_set_4, expected_validators, + "actual next validator set does not match expected validator set" + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/gov_42.rs b/execution_engine_testing/tests/src/test/regression/gov_42.rs new file mode 100644 index 0000000000..e34ac67ce4 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gov_42.rs @@ -0,0 +1,219 @@ +// This test focuses on testing whether we charge for +// WASM files that are malformed (unparseable). + +// If we're provided with malformed file, we should charge. +// The exception is the "empty wasm" when send as +// a payment, because in such case we use the "default payment" +// instead. + +// For increased security, we also test some other cases in this test +// like gas overflow (which is a runtime error). + +// Other potential test cases: +// 1. Wasm with unsupported "start" section - tested in `ee_890` (but without asserting the +// charge) + +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{runtime_args, Gas, RuntimeArgs}; + +use crate::{ + test::regression::test_utils::{ + make_gas_counter_overflow, make_module_with_start_section, + make_module_without_memory_section, + }, + wasm_utils, +}; + +const ARG_AMOUNT: &str = "amount"; + +#[derive(Copy, Clone, Debug)] +enum ExecutionPhase { + Payment, + Session, +} + +fn run_test_case(input_wasm_bytes: &[u8], expected_error: &str, execution_phase: ExecutionPhase) { + let payment_amount = *DEFAULT_PAYMENT; + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let session_args = RuntimeArgs::default(); + let deploy_hash = [42; 32]; + + let (deploy_item_builder, expected_error_message) = match execution_phase { + ExecutionPhase::Payment => ( + DeployItemBuilder::new() + .with_payment_bytes( + input_wasm_bytes.to_vec(), + runtime_args! {ARG_AMOUNT => payment_amount,}, + ) + .with_session_bytes(wasm_utils::do_nothing_bytes(), session_args), + expected_error, + ), + ExecutionPhase::Session => ( + DeployItemBuilder::new() + .with_session_bytes(input_wasm_bytes.to_vec(), session_args) + .with_standard_payment(runtime_args! {ARG_AMOUNT => payment_amount,}), + expected_error, + ), + }; + let deploy_item = deploy_item_builder + .with_address(account_hash) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + let do_minimum_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let empty_wasm_in_payment = match execution_phase { + ExecutionPhase::Payment => input_wasm_bytes.is_empty(), + ExecutionPhase::Session => false, + }; + + if empty_wasm_in_payment { + // Special case: We expect success, since default payment will be used instead. + builder.exec(do_minimum_request).expect_success().commit(); + } else { + builder.exec(do_minimum_request).expect_failure().commit(); + + let actual_error = builder.get_error().expect("should have error").to_string(); + assert!(actual_error.contains(expected_error_message)); + + let gas = builder.last_exec_gas_consumed(); + assert_eq!(gas, Gas::zero()); + } +} + +#[ignore] +#[test] +fn should_charge_payment_with_incorrect_wasm_file_invalid_magic_number() { + const WASM_BYTES: &[u8] = &[1, 2, 3, 4, 5]; // Correct WASM magic bytes are: 0x00 0x61 0x73 0x6d ("\0asm") + let execution_phase = ExecutionPhase::Payment; + let expected_error = " Invalid magic number at start of file"; + run_test_case(WASM_BYTES, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_session_with_incorrect_wasm_file_invalid_magic_number() { + const WASM_BYTES: &[u8] = &[1, 2, 3, 4, 5]; // Correct WASM magic bytes are: 0x00 0x61 0x73 0x6d ("\0asm") + let execution_phase = ExecutionPhase::Session; + let expected_error = "Invalid magic number at start of file"; + run_test_case(WASM_BYTES, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_fail_to_charge_payment_with_incorrect_wasm_file_empty_bytes() { + const WASM_BYTES: &[u8] = &[]; + let execution_phase = ExecutionPhase::Payment; + let expected_error = "I/O Error: UnexpectedEof"; + run_test_case(WASM_BYTES, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_session_with_incorrect_wasm_file_empty_bytes() { + const WASM_BYTES: &[u8] = &[]; + let execution_phase = ExecutionPhase::Session; + let expected_error = "I/O Error: UnexpectedEof"; + run_test_case(WASM_BYTES, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_payment_with_incorrect_wasm_correct_magic_number_incomplete_module() { + const WASM_BYTES: &[u8] = &[ + 0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00, 0x01, 0x35, 0x09, 0x60, 0x02, 0x7F, 0x7F, + 0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x01, 0x7F, 0x00, 0x60, 0x00, + 0x00, 0x60, 0x01, 0x7F, 0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x00, 0x60, 0x05, 0x7F, + 0x7F, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x02, 0x7F, 0x7F, 0x00, 0x60, 0x04, 0x7F, 0x7F, + 0x7F, 0x7F, 0x00, 0x02, 0x50, 0x03, 0x03, 0x65, 0x6E, 0x76, 0x16, 0x63, 0x61, 0x73, 0x70, + 0x65, 0x72, 0x5F, 0x6C, 0x6F, 0x61, 0x64, 0x5F, 0x6E, 0x61, 0x6D, 0x65, 0x64, 0x5F, 0x6B, + 0x65, 0x79, 0x73, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x17, 0x63, 0x61, 0x73, 0x70, 0x65, + 0x72, 0x5F, 0x72, 0x65, 0x61, 0x64, 0x5F, 0x68, 0x6F, 0x73, 0x74, 0x5F, 0x62, 0x75, 0x66, + 0x66, 0x65, 0x72, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76, 0x0D, 0x63, 0x61, 0x73, 0x70, 0x65, + 0x72, 0x5F, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x00, + ]; + let execution_phase = ExecutionPhase::Payment; + let expected_error = "I/O Error: UnexpectedEof"; + run_test_case(WASM_BYTES, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_session_with_incorrect_wasm_correct_magic_number_incomplete_module() { + const WASM_BYTES: &[u8] = &[ + 0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00, 0x01, 0x35, 0x09, 0x60, 0x02, 0x7F, 0x7F, + 0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x01, 0x7F, 0x00, 0x60, 0x00, + 0x00, 0x60, 0x01, 0x7F, 0x01, 0x7F, 0x60, 0x03, 0x7F, 0x7F, 0x7F, 0x00, 0x60, 0x05, 0x7F, + 0x7F, 0x7F, 0x7F, 0x7F, 0x01, 0x7F, 0x60, 0x02, 0x7F, 0x7F, 0x00, 0x60, 0x04, 0x7F, 0x7F, + 0x7F, 0x7F, 0x00, 0x02, 0x50, 0x03, 0x03, 0x65, 0x6E, 0x76, 0x16, 0x63, 0x61, 0x73, 0x70, + 0x65, 0x72, 0x5F, 0x6C, 0x6F, 0x61, 0x64, 0x5F, 0x6E, 0x61, 0x6D, 0x65, 0x64, 0x5F, 0x6B, + 0x65, 0x79, 0x73, 0x00, 0x00, 0x03, 0x65, 0x6E, 0x76, 0x17, 0x63, 0x61, 0x73, 0x70, 0x65, + 0x72, 0x5F, 0x72, 0x65, 0x61, 0x64, 0x5F, 0x68, 0x6F, 0x73, 0x74, 0x5F, 0x62, 0x75, 0x66, + 0x66, 0x65, 0x72, 0x00, 0x01, 0x03, 0x65, 0x6E, 0x76, 0x0D, 0x63, 0x61, 0x73, 0x70, 0x65, + 0x72, 0x5F, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x00, + ]; + let execution_phase = ExecutionPhase::Session; + let expected_error = "I/O Error: UnexpectedEof"; + run_test_case(WASM_BYTES, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_payment_with_incorrect_wasm_gas_counter_overflow() { + let wasm_bytes = make_gas_counter_overflow(); + let execution_phase = ExecutionPhase::Payment; + let expected_error = "Encountered operation forbidden by gas rules"; + run_test_case(&wasm_bytes, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_session_with_incorrect_wasm_gas_counter_overflow() { + let wasm_bytes = make_gas_counter_overflow(); + let execution_phase = ExecutionPhase::Session; + let expected_error = "Encountered operation forbidden by gas rules"; + run_test_case(&wasm_bytes, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_payment_with_incorrect_wasm_no_memory_section() { + let wasm_bytes = make_module_without_memory_section(); + let execution_phase = ExecutionPhase::Payment; + let expected_error = "Memory section should exist"; + run_test_case(&wasm_bytes, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_session_with_incorrect_wasm_no_memory_section() { + let wasm_bytes = make_module_without_memory_section(); + let execution_phase = ExecutionPhase::Session; + let expected_error = "Memory section should exist"; + run_test_case(&wasm_bytes, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_payment_with_incorrect_wasm_start_section() { + let wasm_bytes = make_module_with_start_section(); + let execution_phase = ExecutionPhase::Payment; + let expected_error = "Unsupported Wasm start"; + run_test_case(&wasm_bytes, expected_error, execution_phase) +} + +#[ignore] +#[test] +fn should_charge_session_with_incorrect_wasm_start_section() { + let wasm_bytes = make_module_with_start_section(); + let execution_phase = ExecutionPhase::Session; + let expected_error = "Unsupported Wasm start"; + run_test_case(&wasm_bytes, expected_error, execution_phase) +} diff --git a/execution_engine_testing/tests/src/test/regression/gov_427.rs b/execution_engine_testing/tests/src/test/regression/gov_427.rs new file mode 100644 index 0000000000..80f86f0fc0 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gov_427.rs @@ -0,0 +1,113 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_WASM_V1_CONFIG, + LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{addressable_entity::DEFAULT_ENTRY_POINT_NAME, RuntimeArgs}; +use walrus::{ir::Value, FunctionBuilder, Module, ModuleConfig, ValType}; + +/// Creates a wasm with a function that contains local section with types in `repeated_pattern` +/// repeated `repeat_count` times with additional `extra_types` appended at the end of local group. +fn make_arbitrary_local_count( + repeat_count: usize, + repeat_pattern: &[ValType], + extra_types: &[ValType], +) -> Vec { + let mut module = Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut func_with_locals = FunctionBuilder::new(&mut module.types, &[], &[]); + + let mut locals = Vec::new(); + for _ in 0..repeat_count { + for val_type in repeat_pattern { + let local = module.locals.add(*val_type); + locals.push((local, *val_type)); + } + } + + for extra_type in extra_types { + let local = module.locals.add(*extra_type); + locals.push((local, *extra_type)); + } + + for (i, (local, val_type)) in locals.into_iter().enumerate() { + let value = match val_type { + ValType::I32 => Value::I32(i.try_into().unwrap()), + ValType::I64 => Value::I64(i.try_into().unwrap()), + ValType::F32 => Value::F32(i as f32), + ValType::F64 => Value::F64(i as f64), + ValType::V128 => Value::V128(i.try_into().unwrap()), + ValType::Externref | ValType::Funcref => todo!("{:?}", val_type), + }; + func_with_locals.func_body().const_(value).local_set(local); + } + + let func_with_locals = func_with_locals.finish(vec![], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + + call_func.func_body().call(func_with_locals); + + let call = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call); + + module.emit_wasm() +} + +#[ignore] +#[test] +fn too_many_locals_should_exceed_stack_height() { + const CALL_COST: usize = 1; + let extra_types = [ValType::I32]; + let repeat_pattern = [ValType::I64]; + let max_stack_height = DEFAULT_WASM_V1_CONFIG.max_stack_height() as usize; + + let success_wasm_bytes: Vec = make_arbitrary_local_count( + max_stack_height - extra_types.len() - CALL_COST - 1, + &repeat_pattern, + &extra_types, + ); + + let failing_wasm_bytes: Vec = make_arbitrary_local_count( + max_stack_height - extra_types.len() - CALL_COST, + &repeat_pattern, + &extra_types, + ); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let success_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + success_wasm_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(success_request).expect_success().commit(); + + let failing_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + failing_wasm_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(failing_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + + // Here we pass the preprocess stage, but we fail at stack height limiter as we do have very + // restrictive default stack height. + assert!( + matches!( + &error, + Error::Exec(ExecError::Interpreter(s)) if s.contains("Unreachable") + ), + "{:?}", + error + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/gov_74.rs b/execution_engine_testing/tests/src/test/regression/gov_74.rs new file mode 100644 index 0000000000..6efd99f023 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gov_74.rs @@ -0,0 +1,157 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{ + engine_state::Error, + execution::ExecError, + runtime::{PreprocessingError, WasmValidationError, DEFAULT_MAX_PARAMETER_COUNT}, +}; +use casper_types::{EraId, ProtocolVersion, RuntimeArgs, WasmV1Config}; + +use crate::wasm_utils; + +const ARITY_INTERPRETER_LIMIT: usize = DEFAULT_MAX_PARAMETER_COUNT as usize; +const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); +const I32_WAT_TYPE: &str = "i64"; +const NEW_WASM_STACK_HEIGHT: u32 = 16; +const OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION; +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + OLD_PROTOCOL_VERSION.value().major, + OLD_PROTOCOL_VERSION.value().minor, + OLD_PROTOCOL_VERSION.value().patch + 1, +); + +fn initialize_builder() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + builder +} + +#[ignore] +#[test] +fn should_pass_max_parameter_count() { + let mut builder = initialize_builder(); + + // This runs out of the interpreter stack limit + let module_bytes = wasm_utils::make_n_arg_call_bytes(ARITY_INTERPRETER_LIMIT, I32_WAT_TYPE) + .expect("should make wasm bytes"); + + let exec = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec).expect_success().commit(); + + let module_bytes = wasm_utils::make_n_arg_call_bytes(ARITY_INTERPRETER_LIMIT + 1, I32_WAT_TYPE) + .expect("should make wasm bytes"); + + let exec = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec).expect_failure().commit(); + let error = builder.get_error().expect("should have error"); + + assert!( + matches!( + error, + Error::WasmPreprocessing(PreprocessingError::WasmValidation( + WasmValidationError::TooManyParameters { + max: 256, + actual: 257 + } + )) + ), + "{:?}", + error + ); +} + +#[ignore] +#[test] +fn should_observe_stack_height_limit() { + let mut builder = initialize_builder(); + + assert!(WasmV1Config::default().max_stack_height() > NEW_WASM_STACK_HEIGHT); + + // This runs out of the interpreter stack limit + let exec_request_1 = { + let module_bytes = + wasm_utils::make_n_arg_call_bytes(NEW_WASM_STACK_HEIGHT as usize, I32_WAT_TYPE) + .expect("should make wasm bytes"); + + ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build() + }; + + builder.exec(exec_request_1).expect_success().commit(); + + { + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_max_stack_height(NEW_WASM_STACK_HEIGHT); + + builder.with_chainspec(updated_chainspec); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .build(); + + builder.upgrade(&mut upgrade_request); + } + + // This runs out of the interpreter stack limit. + // An amount of args equal to the new limit fails because there's overhead of `fn call` that + // adds 1 to the height. + let exec_request_2 = { + let module_bytes = + wasm_utils::make_n_arg_call_bytes(NEW_WASM_STACK_HEIGHT as usize, I32_WAT_TYPE) + .expect("should make wasm bytes"); + + ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build() + }; + + builder.exec(exec_request_2).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(&error, Error::Exec(ExecError::Interpreter(s)) if s.contains("Unreachable")), + "{:?}", + error + ); + + // But new limit minus one runs fine + let exec_request_3 = { + let module_bytes = + wasm_utils::make_n_arg_call_bytes(NEW_WASM_STACK_HEIGHT as usize - 1, I32_WAT_TYPE) + .expect("should make wasm bytes"); + + ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build() + }; + + builder.exec(exec_request_3).expect_success().commit(); +} diff --git a/execution_engine_testing/tests/src/test/regression/gov_89_regression.rs b/execution_engine_testing/tests/src/test/regression/gov_89_regression.rs new file mode 100644 index 0000000000..1bb327acf5 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/gov_89_regression.rs @@ -0,0 +1,201 @@ +use std::{ + collections::BTreeSet, + convert::TryInto, + time::{Duration, SystemTime, UNIX_EPOCH}, +}; + +use num_traits::Zero; +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + utils, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS, +}; +use casper_storage::data_access_layer::{SlashItem, StepResult}; +use casper_types::{ + execution::TransformKindV2, + system::auction::{ + BidsExt, DelegationRate, SeigniorageRecipientsSnapshotV2, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + }, + CLValue, EntityAddr, EraId, GenesisAccount, GenesisValidator, Key, Motes, ProtocolVersion, + PublicKey, SecretKey, StoredValue, U512, +}; + +static ACCOUNT_1_PUBLIC_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +const ACCOUNT_1_BALANCE: u64 = 100_000_000; +const ACCOUNT_1_BOND: u64 = 100_000_000; + +static ACCOUNT_2_PUBLIC_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +const ACCOUNT_2_BALANCE: u64 = 200_000_000; +const ACCOUNT_2_BOND: u64 = 200_000_000; + +fn initialize_builder() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PUBLIC_KEY.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PUBLIC_KEY.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp + }; + let run_genesis_request = utils::create_run_genesis_request(accounts); + builder.run_genesis(run_genesis_request); + builder +} + +#[ignore] +#[test] +fn should_not_create_any_purse() { + let mut builder = initialize_builder(); + let auction_hash = builder.get_auction_contract_hash(); + + let mut now = SystemTime::now(); + let eras_end_timestamp_millis_1 = now.duration_since(UNIX_EPOCH).expect("Time went backwards"); + + now += Duration::from_secs(60 * 60); + let eras_end_timestamp_millis_2 = now.duration_since(UNIX_EPOCH).expect("Time went backwards"); + + assert!(eras_end_timestamp_millis_2 > eras_end_timestamp_millis_1); + + let step_request_1 = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_slash_item(SlashItem::new(ACCOUNT_1_PUBLIC_KEY.clone())) + .with_next_era_id(EraId::from(1)) + .with_era_end_timestamp_millis(eras_end_timestamp_millis_1.as_millis().try_into().unwrap()) + .build(); + + let before_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value( + EntityAddr::System(auction_hash.value()), + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + ); + + let bids_before_slashing = builder.get_bids(); + assert!( + bids_before_slashing.contains_validator_public_key(&ACCOUNT_1_PUBLIC_KEY), + "should have entry in the genesis bids table {:?}", + bids_before_slashing + ); + + let effects_1 = match builder.step(step_request_1) { + StepResult::Failure(_) => { + panic!("step_request_1: Failure") + } + StepResult::RootNotFound => { + panic!("step_request_1: RootNotFound") + } + StepResult::Success { effects, .. } => effects, + }; + + assert!( + builder + .query( + None, + Key::Unbond(ACCOUNT_1_PUBLIC_KEY.to_account_hash()), + &[], + ) + .is_err(), + "slash does not unbond" + ); + + let bids_after_slashing = builder.get_bids(); + assert!( + !bids_after_slashing.contains_validator_public_key(&ACCOUNT_1_PUBLIC_KEY), + "should not have entry after slashing {:?}", + bids_after_slashing + ); + + let bids_after_slashing = builder.get_bids(); + let account_1_bid = bids_after_slashing.validator_bid(&ACCOUNT_1_PUBLIC_KEY); + assert!(account_1_bid.is_none()); + + let bids_after_slashing = builder.get_bids(); + assert_ne!( + bids_before_slashing, bids_after_slashing, + "bids table should be different before and after slashing" + ); + + // seigniorage snapshot should have changed after auction + let after_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value( + EntityAddr::System(auction_hash.value()), + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + ); + assert!( + !after_auction_seigniorage + .keys() + .all(|key| before_auction_seigniorage.contains_key(key)), + "run auction should have changed seigniorage keys" + ); + + let step_request_2 = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_slash_item(SlashItem::new(ACCOUNT_1_PUBLIC_KEY.clone())) + .with_next_era_id(EraId::from(2)) + .with_era_end_timestamp_millis(eras_end_timestamp_millis_2.as_millis().try_into().unwrap()) + .build(); + + let effects_2 = match builder.step(step_request_2) { + StepResult::RootNotFound | StepResult::Failure(_) => { + panic!("step_request_2: failed to step") + } + StepResult::Success { effects, .. } => effects, + }; + + let cl_u512_zero = CLValue::from_t(U512::zero()).unwrap(); + + let balances_1: BTreeSet = effects_1 + .transforms() + .iter() + .filter_map(|transform| match transform.kind() { + TransformKindV2::Write(StoredValue::CLValue(cl_value)) + if transform.key().as_balance().is_some() && cl_value == &cl_u512_zero => + { + Some(*transform.key()) + } + _ => None, + }) + .collect(); + + assert_eq!(balances_1.len(), 0, "distribute should not create purses"); + + let balances_2: BTreeSet = effects_2 + .transforms() + .iter() + .filter_map(|transform| match transform.kind() { + TransformKindV2::Write(StoredValue::CLValue(cl_value)) + if transform.key().as_balance().is_some() && cl_value == &cl_u512_zero => + { + Some(*transform.key()) + } + _ => None, + }) + .collect(); + + assert_eq!(balances_2.len(), 0, "distribute should not create purses"); + + let common_keys: BTreeSet<_> = balances_1.intersection(&balances_2).collect(); + assert_eq!(common_keys.len(), 0, "there should be no commmon Key::Balance keys with Transfer::Write(0) in two distinct step requests"); +} diff --git a/execution_engine_testing/tests/src/test/regression/host_function_metrics_size_and_gas_cost.rs b/execution_engine_testing/tests/src/test/regression/host_function_metrics_size_and_gas_cost.rs new file mode 100644 index 0000000000..fbe3e5fef6 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/host_function_metrics_size_and_gas_cost.rs @@ -0,0 +1,135 @@ +use casper_engine_test_support::{ + utils, DeployItemBuilder, ExecuteRequest, ExecuteRequestBuilder, LmdbWasmTestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state, execution::ExecError}; +use casper_types::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + bytesrepr::Bytes, + runtime_args, + system::standard_payment, + ApiError, U512, +}; + +const CONTRACT_HOST_FUNCTION_METRICS: &str = "host_function_metrics.wasm"; +const CONTRACT_TRANSFER_TO_ACCOUNT_U512: &str = "transfer_to_account_u512.wasm"; + +// This value is not systemic, as code is added the size of WASM will increase, +// you can change this value to reflect the increase in WASM size. +const HOST_FUNCTION_METRICS_STANDARD_SIZE: usize = 160_000; +const HOST_FUNCTION_METRICS_STANDARD_GAS_COST: u64 = 475_000_000_000; + +/// Acceptable size regression/improvement in percentage. +const SIZE_MARGIN: usize = 5; +/// Acceptable gas cost regression/improvement in percentage. +const GAS_COST_MARGIN: u64 = 5; + +const HOST_FUNCTION_METRICS_MAX_SIZE: usize = + HOST_FUNCTION_METRICS_STANDARD_SIZE * (100 + SIZE_MARGIN) / 100; +const HOST_FUNCTION_METRICS_MAX_GAS_COST: u64 = + HOST_FUNCTION_METRICS_STANDARD_GAS_COST * (100 + GAS_COST_MARGIN) / 100; + +const ACCOUNT0_ADDR: AccountHash = AccountHash::new([42; ACCOUNT_HASH_LENGTH]); +const ACCOUNT1_ADDR: AccountHash = AccountHash::new([43; ACCOUNT_HASH_LENGTH]); + +const ARG_TARGET: &str = "target"; +const ARG_AMOUNT: &str = "amount"; + +const ARG_SEED: &str = "seed"; +const ARG_OTHERS: &str = "others"; +const EXPECTED_REVERT_VALUE: u16 = 9; +const SEED_VALUE: u64 = 821_577_831_833_715_345; +const TRANSFER_FROM_MAIN_PURSE_AMOUNT: u64 = 2_000_000_u64; + +#[ignore] +#[test] +fn host_function_metrics_has_acceptable_size() { + let size = utils::read_wasm_file(CONTRACT_HOST_FUNCTION_METRICS).len(); + assert!( + size <= HOST_FUNCTION_METRICS_MAX_SIZE, + "Performance regression: contract host-function-metrics became {} bytes long; up to {} bytes long would be acceptable.", + size, + HOST_FUNCTION_METRICS_MAX_SIZE + ); + println!( + "contract host-function-metrics byte size: {}, ubound: {}", + size, HOST_FUNCTION_METRICS_MAX_SIZE + ) +} + +fn create_account_exec_request(address: AccountHash) -> ExecuteRequest { + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT_U512, + runtime_args! { + ARG_TARGET => address, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + }, + ) + .build() +} + +#[ignore] +#[test] +fn host_function_metrics_has_acceptable_gas_cost() { + let mut builder = setup(); + + let seed: u64 = SEED_VALUE; + let random_bytes = { + let mut random_bytes = vec![0_u8; 10_000]; + for (i, byte) in random_bytes.iter_mut().enumerate() { + *byte = i.checked_rem(256).unwrap().try_into().unwrap(); + } + random_bytes + }; + + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT0_ADDR) + .with_deploy_hash([55; 32]) + .with_session_code( + CONTRACT_HOST_FUNCTION_METRICS, + runtime_args! { + ARG_SEED => seed, + ARG_OTHERS => (Bytes::from(random_bytes), ACCOUNT0_ADDR, ACCOUNT1_ADDR), + ARG_AMOUNT => TRANSFER_FROM_MAIN_PURSE_AMOUNT, + }, + ) + .with_standard_payment(runtime_args! { standard_payment::ARG_AMOUNT => *DEFAULT_PAYMENT }) + .with_authorization_keys(&[ACCOUNT0_ADDR]) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::User(user_error))) + if user_error == EXPECTED_REVERT_VALUE + ), + "Expected revert but actual error is {:?}", + error + ); + + let gas_cost = builder.last_exec_gas_consumed().value(); + assert!( + gas_cost <= U512::from(HOST_FUNCTION_METRICS_MAX_GAS_COST), + "Performance regression: contract host-function-metrics used {} gas; it should use no more than {} gas.", + gas_cost, + HOST_FUNCTION_METRICS_MAX_GAS_COST + ); +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) + .exec(create_account_exec_request(ACCOUNT0_ADDR)) + .expect_success() + .commit() + .exec(create_account_exec_request(ACCOUNT1_ADDR)) + .expect_success() + .commit(); + builder +} diff --git a/execution_engine_testing/tests/src/test/regression/mod.rs b/execution_engine_testing/tests/src/test/regression/mod.rs index c1faa11c93..6a1ea76739 100644 --- a/execution_engine_testing/tests/src/test/regression/mod.rs +++ b/execution_engine_testing/tests/src/test/regression/mod.rs @@ -1,5 +1,3 @@ -mod eco_863; - mod ee_1045; mod ee_1071; mod ee_1103; @@ -10,6 +8,8 @@ mod ee_1152; mod ee_1160; mod ee_1163; mod ee_1174; +mod ee_1217; +mod ee_1225; mod ee_221; mod ee_401; mod ee_441; @@ -30,3 +30,39 @@ mod ee_601; mod ee_771; mod ee_890; mod ee_966; +mod gh_1470; +mod gh_1688; +mod gh_1902; +mod gh_1931; +mod gh_2280; +mod gh_3097; +mod gh_3208; +mod gh_3710; +mod gh_4898; +mod gov_116; +mod gov_42; +mod gov_427; +mod gov_74; +mod gov_89_regression; +mod host_function_metrics_size_and_gas_cost; +mod regression_20210707; +mod regression_20210831; +mod regression_20210924; +mod regression_20211110; +mod regression_20220119; +mod regression_20220204; +mod regression_20220207; +mod regression_20220208; +mod regression_20220211; +mod regression_20220217; +mod regression_20220221; +mod regression_20220222; +mod regression_20220223; +mod regression_20220224; +mod regression_20220303; +mod regression_20220727; +mod regression_20240105; +mod regression_20250812; +mod slow_input; +pub(crate) mod test_utils; +mod transforms_must_be_ordered; diff --git a/execution_engine_testing/tests/src/test/regression/regression_20210707.rs b/execution_engine_testing/tests/src/test/regression/regression_20210707.rs new file mode 100644 index 0000000000..d2ab4daa1f --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20210707.rs @@ -0,0 +1,506 @@ +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + DeployItemBuilder, EntityWithNamedKeys, ExecuteRequest, ExecuteRequestBuilder, + LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state::Error as CoreError, execution::ExecError}; +use casper_storage::{data_access_layer::TransferRequest, system::transfer::TransferError}; +use casper_types::{ + account::AccountHash, runtime_args, system::mint, AccessRights, AddressableEntityHash, + PublicKey, RuntimeArgs, SecretKey, URef, U512, +}; + +use crate::wasm_utils; + +const HARDCODED_UREF: URef = URef::new([42; 32], AccessRights::READ_ADD_WRITE); +const CONTRACT_HASH_NAME: &str = "contract_hash"; + +const METHOD_SEND_TO_ACCOUNT: &str = "send_to_account"; +const METHOD_SEND_TO_PURSE: &str = "send_to_purse"; +const METHOD_HARDCODED_PURSE_SRC: &str = "hardcoded_purse_src"; +const METHOD_STORED_PAYMENT: &str = "stored_payment"; +const METHOD_HARDCODED_PAYMENT: &str = "hardcoded_payment"; + +const ARG_SOURCE: &str = "source"; +const ARG_RECIPIENT: &str = "recipient"; +const ARG_AMOUNT: &str = "amount"; +const ARG_TARGET: &str = "target"; + +const REGRESSION_20210707: &str = "regression_20210707.wasm"; + +static ALICE_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static ALICE_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ALICE_KEY)); + +static BOB_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static BOB_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*BOB_KEY)); + +fn setup_regression_contract() -> ExecuteRequest { + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + REGRESSION_20210707, + runtime_args! { + mint::ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + }, + ) + .build() +} + +fn transfer(sender: AccountHash, target: AccountHash, amount: u64) -> TransferRequest { + TransferRequestBuilder::new(amount, target) + .with_initiator(sender) + .build() +} + +fn get_account_entity_hash(entity: &EntityWithNamedKeys) -> AddressableEntityHash { + entity + .named_keys() + .get(CONTRACT_HASH_NAME) + .cloned() + .expect("should have contract hash") + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap() +} + +fn assert_forged_uref_error(error: CoreError, forged_uref: URef) { + assert!( + matches!(error, CoreError::Exec(ExecError::ForgedReference(uref)) if uref == forged_uref), + "Expected forged uref {:?} but received {:?}", + forged_uref, + error + ); +} + +#[ignore] +#[test] +fn should_transfer_funds_from_contract_to_new_account() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let contract_hash = get_account_entity_hash(&account); + + assert!(builder.get_entity_by_account_hash(*BOB_ADDR).is_none()); + + let call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + METHOD_SEND_TO_ACCOUNT, + runtime_args! { + ARG_RECIPIENT => *BOB_ADDR, + ARG_AMOUNT => U512::from(700_000_000_000u64), + }, + ) + .build(); + + builder.exec(call_request).commit().expect_success(); +} + +#[ignore] +#[test] +fn should_transfer_funds_from_contract_to_existing_account() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + let fund_request_2 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + builder.transfer_and_commit(fund_request_2).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let contract_hash = get_account_entity_hash(&account); + + let call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + METHOD_SEND_TO_ACCOUNT, + runtime_args! { + ARG_RECIPIENT => *BOB_ADDR, + ARG_AMOUNT => U512::from(700_000_000_000u64), + }, + ) + .build(); + + builder.exec(call_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_transfer_funds_from_forged_purse_to_account_native_transfer() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request).expect_success(); + + let take_from = builder.get_expected_addressable_entity_by_account_hash(*ALICE_ADDR); + let alice_main_purse = take_from.main_purse(); + + let transfer_request = TransferRequestBuilder::new(700_000_000_000_u64, *BOB_ADDR) + .with_source(alice_main_purse) + .build(); + + builder.transfer_and_commit(transfer_request); + + let error = builder.get_error().expect("should have error"); + + assert!( + matches!(error, CoreError::Transfer(TransferError::ForgedReference(uref)) if uref == alice_main_purse), + "Expected forged uref {:?} but received {:?}", + alice_main_purse, + error + ); +} + +#[ignore] +#[test] +fn should_not_transfer_funds_from_forged_purse_to_owned_purse() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + let fund_request_2 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + builder.transfer_and_commit(fund_request_2).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let bob = builder + .get_entity_with_named_keys_by_account_hash(*BOB_ADDR) + .unwrap(); + let bob_main_purse = bob.main_purse(); + + let contract_hash = get_account_entity_hash(&account); + + let call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + METHOD_SEND_TO_PURSE, + runtime_args! { + ARG_TARGET => bob_main_purse, + ARG_AMOUNT => U512::from(700_000_000_000u64), + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let error = builder.get_error().expect("should have error"); + + assert_forged_uref_error(error, bob_main_purse); +} + +#[ignore] +#[test] +fn should_not_transfer_funds_into_bob_purse() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let bob = builder.get_expected_addressable_entity_by_account_hash(*BOB_ADDR); + let bob_main_purse = bob.main_purse(); + + let contract_hash = get_account_entity_hash(&account); + + let call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + METHOD_SEND_TO_PURSE, + runtime_args! { + ARG_TARGET => bob_main_purse, + ARG_AMOUNT => U512::from(700_000_000_000u64), + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let error = builder.get_error().expect("should have error"); + + assert_forged_uref_error(error, bob_main_purse); +} + +#[ignore] +#[test] +fn should_not_transfer_from_hardcoded_purse() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let contract_hash = get_account_entity_hash(&account); + + let call_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + contract_hash, + METHOD_HARDCODED_PURSE_SRC, + runtime_args! { + ARG_AMOUNT => U512::from(700_000_000_000u64), + }, + ) + .build(); + + builder.exec(call_request).commit(); + + let error = builder.get_error().expect("should have error"); + + assert_forged_uref_error(error, HARDCODED_UREF); +} + +#[ignore] +#[allow(unused)] +//#[test] +fn should_not_refund_to_bob_and_charge_alice() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + let fund_request_2 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + builder.transfer_and_commit(fund_request_2).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let bob = builder.get_expected_addressable_entity_by_account_hash(*BOB_ADDR); + let bob_main_purse = bob.main_purse(); + + let contract_hash = get_account_entity_hash(&account); + + let args = runtime_args! { + ARG_SOURCE => bob_main_purse, + ARG_AMOUNT => *DEFAULT_PAYMENT, + }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_stored_payment_hash(contract_hash, METHOD_STORED_PAYMENT, args) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([77; 32]) + .build(); + + let call_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(call_request).commit(); + + let error = builder.get_error().expect("should have error"); + + assert_forged_uref_error(error, bob_main_purse); +} + +#[ignore] +#[test] +fn should_not_charge_alice_for_execution() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + let fund_request_2 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + builder.transfer_and_commit(fund_request_2).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let bob = builder.get_expected_addressable_entity_by_account_hash(*BOB_ADDR); + let bob_main_purse = bob.main_purse(); + + let contract_hash = get_account_entity_hash(&account); + + let args = runtime_args! { + ARG_SOURCE => bob_main_purse, + ARG_AMOUNT => *DEFAULT_PAYMENT, + }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + // Just do nothing if ever we'd get into session execution + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_stored_payment_hash(contract_hash, METHOD_STORED_PAYMENT, args) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([77; 32]) + .build(); + + let call_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(call_request).commit(); + + let error = builder.get_error().expect("should have error"); + + assert_forged_uref_error(error, bob_main_purse); +} + +#[ignore] +#[test] +fn should_not_charge_for_execution_from_hardcoded_purse() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let store_request = setup_regression_contract(); + + let fund_request_1 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *ALICE_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + let fund_request_2 = transfer( + *DEFAULT_ACCOUNT_ADDR, + *BOB_ADDR, + MINIMUM_ACCOUNT_CREATION_BALANCE, + ); + + builder.exec(store_request).commit().expect_success(); + builder.transfer_and_commit(fund_request_1).expect_success(); + builder.transfer_and_commit(fund_request_2).expect_success(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap(); + + let contract_hash = get_account_entity_hash(&account); + + let args = runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT, + }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + // Just do nothing if ever we'd get into session execution + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_stored_payment_hash(contract_hash, METHOD_HARDCODED_PAYMENT, args) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([77; 32]) + .build(); + + let call_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(call_request).commit(); + + let error = builder.get_error().expect("should have error"); + + assert_forged_uref_error(error, HARDCODED_UREF); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20210831.rs b/execution_engine_testing/tests/src/test/regression/regression_20210831.rs new file mode 100644 index 0000000000..9fe1e74bf1 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20210831.rs @@ -0,0 +1,487 @@ +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_PUBLIC_KEY, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{ + engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error as CoreError}, + execution::ExecError, +}; +use casper_types::{ + account::AccountHash, + runtime_args, + system::auction::{self, BidsExt, DelegationRate}, + ApiError, PublicKey, RuntimeArgs, SecretKey, U512, +}; + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([57; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PUBLIC_KEY)); + +static ACCOUNT_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([75; 32]).unwrap()); +static ACCOUNT_2_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY)); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_2_PUBLIC_KEY)); + +const CONTRACT_REGRESSION_20210831: &str = "regression_20210831.wasm"; + +const METHOD_ADD_BID_PROXY_CALL: &str = "add_bid_proxy_call"; +const METHOD_WITHDRAW_PROXY_CALL: &str = "withdraw_proxy_call"; +const METHOD_DELEGATE_PROXY_CALL: &str = "delegate_proxy_call"; +const METHOD_UNDELEGATE_PROXY_CALL: &str = "undelegate_proxy_call"; +const METHOD_ACTIVATE_BID_CALL: &str = "activate_bid_proxy_call"; + +const CONTRACT_HASH_NAME: &str = "contract_hash"; + +const BID_DELEGATION_RATE: DelegationRate = 42; +static BID_AMOUNT: Lazy = Lazy::new(|| U512::from(1_000_000)); +static DELEGATE_AMOUNT: Lazy = Lazy::new(|| U512::from(500_000)); + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_request_1 = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR).build(); + + builder + .transfer_and_commit(transfer_request_1) + .expect_success(); + + let transfer_request_2 = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_2_ADDR).build(); + + builder + .transfer_and_commit(transfer_request_2) + .expect_success(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_request_1 = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_1_ADDR).build(); + + builder + .transfer_and_commit(transfer_request_1) + .expect_success(); + + let transfer_request_2 = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, *ACCOUNT_2_ADDR).build(); + + builder + .transfer_and_commit(transfer_request_2) + .expect_success(); + + let install_request_1 = ExecuteRequestBuilder::standard( + *ACCOUNT_2_ADDR, + CONTRACT_REGRESSION_20210831, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(install_request_1).expect_success().commit(); + + builder +} + +#[ignore] +#[test] +fn regression_20210831_should_fail_to_add_bid() { + let mut builder = setup(); + + let sender = *ACCOUNT_2_ADDR; + let add_bid_args = runtime_args! { + auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }; + + let add_bid_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + sender, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + add_bid_args.clone(), + ) + .build(); + + builder.exec(add_bid_request_1); + + let error_1 = builder + .get_error() + .expect("attempt 1 should raise invalid context"); + assert!( + matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_1 + ); + + // ACCOUNT_2 unbonds ACCOUNT_1 through a proxy + let add_bid_request_2 = ExecuteRequestBuilder::contract_call_by_name( + sender, + CONTRACT_HASH_NAME, + METHOD_ADD_BID_PROXY_CALL, + add_bid_args, + ) + .build(); + + builder.exec(add_bid_request_2).commit(); + + let error_2 = builder + .get_error() + .expect("attempt 2 should raise invalid context"); + assert!( + matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_2 + ); +} + +#[ignore] +#[test] +fn regression_20210831_should_fail_to_delegate() { + let mut builder = setup(); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let sender = *ACCOUNT_2_ADDR; + let delegate_args = runtime_args! { + auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *DELEGATE_AMOUNT, + }; + + let delegate_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + sender, + builder.get_auction_contract_hash(), + auction::METHOD_DELEGATE, + delegate_args.clone(), + ) + .build(); + + builder.exec(delegate_request_1); + + let error_1 = builder + .get_error() + .expect("attempt 1 should raise invalid context"); + assert!( + matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_1 + ); + + // ACCOUNT_2 unbonds ACCOUNT_1 through a proxy + let delegate_request_2 = ExecuteRequestBuilder::contract_call_by_name( + sender, + CONTRACT_HASH_NAME, + METHOD_DELEGATE_PROXY_CALL, + delegate_args, + ) + .build(); + + builder.exec(delegate_request_2).commit(); + + let error_2 = builder + .get_error() + .expect("attempt 2 should raise invalid context"); + assert!( + matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_2 + ); +} + +#[ignore] +#[test] +fn regression_20210831_should_fail_to_withdraw_bid() { + let mut builder = setup(); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *ACCOUNT_1_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let bids = builder.get_bids(); + let account_1_bid_before = bids + .validator_bid(&ACCOUNT_1_PUBLIC_KEY) + .expect("validator bid should exist"); + assert_eq!( + builder.get_purse_balance(*account_1_bid_before.bonding_purse()), + *BID_AMOUNT, + ); + assert!( + !account_1_bid_before.inactive(), + "newly added bid should be active" + ); + + let sender = *ACCOUNT_2_ADDR; + let withdraw_bid_args = runtime_args! { + auction::ARG_PUBLIC_KEY => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + }; + + // ACCOUNT_2 unbonds ACCOUNT_1 by a direct auction contract call + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + sender, + builder.get_auction_contract_hash(), + auction::METHOD_WITHDRAW_BID, + withdraw_bid_args.clone(), + ) + .build(); + + builder.exec(exec_request_1).commit(); + + let error_1 = builder + .get_error() + .expect("attempt 1 should raise invalid context"); + assert!( + matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_1 + ); + + // ACCOUNT_2 unbonds ACCOUNT_1 through a proxy + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_name( + sender, + CONTRACT_HASH_NAME, + METHOD_WITHDRAW_PROXY_CALL, + withdraw_bid_args, + ) + .build(); + + builder.exec(exec_request_2).commit(); + + let error_2 = builder + .get_error() + .expect("attempt 2 should raise invalid context"); + assert!( + matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_2 + ); + + let bids = builder.get_bids(); + let account_1_bid_after = bids + .validator_bid(&ACCOUNT_1_PUBLIC_KEY) + .expect("after bid should exist"); + + assert_eq!( + account_1_bid_after, account_1_bid_before, + "bids before and after malicious attempt should be equal" + ); +} + +#[ignore] +#[test] +fn regression_20210831_should_fail_to_undelegate_bid() { + let mut builder = setup(); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }, + ) + .build(); + + let delegate_request = ExecuteRequestBuilder::contract_call_by_hash( + *ACCOUNT_1_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_DELEGATE, + runtime_args! { + auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + builder.exec(delegate_request).expect_success().commit(); + + let bids = builder.get_bids(); + let default_account_bid_before = bids + .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY) + .expect("should have bid"); + assert_eq!( + builder.get_purse_balance(*default_account_bid_before.bonding_purse()), + *BID_AMOUNT, + ); + assert!( + !default_account_bid_before.inactive(), + "newly added bid should be active" + ); + + let sender = *ACCOUNT_2_ADDR; + let undelegate_args = runtime_args! { + auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_DELEGATOR => ACCOUNT_1_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + }; + + // ACCOUNT_2 undelegates ACCOUNT_1 by a direct auction contract call + let exec_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + sender, + builder.get_auction_contract_hash(), + auction::METHOD_UNDELEGATE, + undelegate_args.clone(), + ) + .build(); + + builder.exec(exec_request_1).commit(); + + let error_1 = builder + .get_error() + .expect("attempt 1 should raise invalid context"); + assert!( + matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_1 + ); + + // ACCOUNT_2 undelegates ACCOUNT_1 through a proxy + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_name( + sender, + CONTRACT_HASH_NAME, + METHOD_UNDELEGATE_PROXY_CALL, + undelegate_args, + ) + .build(); + + builder.exec(exec_request_2).commit(); + + let error_2 = builder + .get_error() + .expect("attempt 2 should raise invalid context"); + assert!( + matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_2 + ); + + let bids = builder.get_bids(); + let default_account_bid_after = bids + .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY) + .expect("should have bid"); + + assert_eq!( + default_account_bid_after, default_account_bid_before, + "bids before and after malicious attempt should be equal" + ); +} + +#[ignore] +#[test] +fn regression_20210831_should_fail_to_activate_bid() { + let mut builder = setup(); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); + + let bids = builder.get_bids(); + let bid = bids + .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY) + .expect("should have bid"); + assert!(!bid.inactive()); + + let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + builder.get_auction_contract_hash(), + auction::METHOD_WITHDRAW_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_AMOUNT => *BID_AMOUNT, + }, + ) + .build(); + + builder.exec(withdraw_bid_request).expect_success().commit(); + + let bids = builder.get_bids(); + let bid = bids.validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY); + assert!(bid.is_none()); + + let sender = *ACCOUNT_2_ADDR; + let activate_bid_args = runtime_args! { + auction::ARG_VALIDATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + }; + + let activate_bid_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + sender, + builder.get_auction_contract_hash(), + auction::METHOD_ACTIVATE_BID, + activate_bid_args.clone(), + ) + .build(); + + builder.exec(activate_bid_request_1); + + let error_1 = builder + .get_error() + .expect("attempt 1 should raise invalid context"); + assert!( + matches!(error_1, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_1 + ); + + let activate_bid_request_2 = ExecuteRequestBuilder::contract_call_by_name( + sender, + CONTRACT_HASH_NAME, + METHOD_ACTIVATE_BID_CALL, + activate_bid_args, + ) + .build(); + + builder.exec(activate_bid_request_2).commit(); + + let error_2 = builder + .get_error() + .expect("attempt 2 should raise invalid context"); + assert!( + matches!(error_2, CoreError::Exec(ExecError::Revert(ApiError::AuctionError(error_code))) if error_code == auction::Error::InvalidContext as u8), + "{:?}", + error_2 + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20210924.rs b/execution_engine_testing/tests/src/test/regression/regression_20210924.rs new file mode 100644 index 0000000000..3d8fe69b09 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20210924.rs @@ -0,0 +1,105 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, +}; +use casper_types::{runtime_args, Gas, RuntimeArgs, DEFAULT_NOP_COST, U512}; + +use crate::wasm_utils; + +const ARG_AMOUNT: &str = "amount"; + +#[ignore] +#[test] +fn should_charge_minimum_for_do_nothing_session() { + let minimum_deploy_payment = U512::from(0); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let session_args = RuntimeArgs::default(); + let deploy_hash = [42; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_bytes(wasm_utils::do_nothing_bytes(), session_args) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => minimum_deploy_payment, + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let do_nothing_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(do_nothing_request).commit(); + + let gas = builder.last_exec_gas_consumed(); + assert_eq!(gas, Gas::zero()); +} + +#[ignore] +#[test] +fn should_execute_do_minimum_session() { + let minimum_deploy_payment = U512::from(DEFAULT_NOP_COST); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let session_args = RuntimeArgs::default(); + let deploy_hash = [42; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_bytes(wasm_utils::do_minimum_bytes(), session_args) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => minimum_deploy_payment, + }) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let do_minimum_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(do_minimum_request).expect_success().commit(); + + let gas = builder.last_exec_gas_consumed(); + assert_eq!(gas, Gas::from(DEFAULT_NOP_COST)); +} + +#[ignore] +#[test] +fn should_charge_minimum_for_do_nothing_payment() { + let minimum_deploy_payment = U512::from(0); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let session_args = RuntimeArgs::default(); + let deploy_hash = [42; 32]; + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_bytes(wasm_utils::do_nothing_bytes(), session_args) + .with_payment_bytes( + wasm_utils::do_nothing_bytes(), + runtime_args! { + ARG_AMOUNT => minimum_deploy_payment, + }, + ) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let do_nothing_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + builder.exec(do_nothing_request).commit(); + + let gas = builder.last_exec_gas_consumed(); + assert_eq!(gas, Gas::zero()); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20211110.rs b/execution_engine_testing/tests/src/test/regression/regression_20211110.rs new file mode 100644 index 0000000000..f0172ea895 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20211110.rs @@ -0,0 +1,83 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, + LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error as CoreError, execution::ExecError}; +use casper_types::{ + account::AccountHash, runtime_args, system::standard_payment, AddressableEntityHash, Key, U512, +}; + +const RECURSE_ENTRYPOINT: &str = "recurse"; +const ARG_TARGET: &str = "target"; +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; +const REGRESSION_20211110_CONTRACT: &str = "regression_20211110.wasm"; + +const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([111; 32]); +const INSTALL_COST: u64 = 40_000_000_000; +const STARTING_BALANCE: u64 = 100_000_000_000; + +#[ignore] +#[test] +fn regression_20211110() { + let mut funds: u64 = STARTING_BALANCE; + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_request = TransferRequestBuilder::new(funds, ACCOUNT_1_ADDR).build(); + + let session_args = runtime_args! {}; + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => U512::from(INSTALL_COST) + }; + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_standard_payment(payment_args) + .with_session_code(REGRESSION_20211110_CONTRACT, session_args) + .with_authorization_keys(&[ACCOUNT_1_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let install_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); + builder.exec(install_request).expect_success().commit(); + + funds = funds.checked_sub(INSTALL_COST).unwrap(); + + let contract_hash = match builder + .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR) + .unwrap() + .named_keys() + .get(CONTRACT_HASH_NAME) + .unwrap() + { + Key::AddressableEntity(entity_addr) => AddressableEntityHash::new(entity_addr.value()), + _ => panic!("Couldn't find regression contract."), + }; + + let payment_args = runtime_args! { + standard_payment::ARG_AMOUNT => U512::from(funds), + }; + let session_args = runtime_args! { + ARG_TARGET => contract_hash + }; + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_standard_payment(payment_args) + .with_stored_session_hash(contract_hash, RECURSE_ENTRYPOINT, session_args) + .with_authorization_keys(&[ACCOUNT_1_ADDR]) + .with_deploy_hash([43; 32]) + .build(); + let recurse_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(recurse_request).expect_failure(); + + let error = builder.get_error().expect("should have returned an error"); + assert!(matches!( + error, + CoreError::Exec(ExecError::RuntimeStackOverflow) + )); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220119.rs b/execution_engine_testing/tests/src/test/regression/regression_20220119.rs new file mode 100644 index 0000000000..ee48e68bb1 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220119.rs @@ -0,0 +1,22 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::RuntimeArgs; + +const REGRESSION_20220119_CONTRACT: &str = "regression_20220119.wasm"; + +#[ignore] +#[test] +fn should_create_purse() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + REGRESSION_20220119_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220204.rs b/execution_engine_testing/tests/src/test/regression/regression_20220204.rs new file mode 100644 index 0000000000..02cb831d74 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220204.rs @@ -0,0 +1,259 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state, execution::ExecError}; +use casper_types::{runtime_args, AccessRights, RuntimeArgs}; + +const REGRESSION_20220204_CONTRACT: &str = "regression_20220204.wasm"; +const REGRESSION_20220204_CALL_CONTRACT: &str = "regression_20220204_call.wasm"; +const REGRESSION_20220204_NONTRIVIAL_CONTRACT: &str = "regression_20220204_nontrivial.wasm"; + +const NONTRIVIAL_ARG_AS_CONTRACT: &str = "nontrivial_arg_as_contract"; +const ARG_ENTRYPOINT: &str = "entrypoint"; +const ARG_PURSE: &str = "purse"; +const ARG_NEW_ACCESS_RIGHTS: &str = "new_access_rights"; +const TRANSFER_AS_CONTRACT: &str = "transfer_as_contract"; + +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; + +#[ignore] +#[test] +fn regression_20220204_as_contract() { + let contract = REGRESSION_20220204_CALL_CONTRACT; + let entrypoint = TRANSFER_AS_CONTRACT; + let new_access_rights = AccessRights::READ_ADD_WRITE; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let exec_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + contract, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_ENTRYPOINT => entrypoint, + }, + ) + .build(); + builder.exec(exec_request_2).commit(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse().with_access_rights(expected); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse + ), + "Expected revert but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn regression_20220204_as_contract_attenuated() { + let contract = REGRESSION_20220204_CALL_CONTRACT; + let entrypoint = TRANSFER_AS_CONTRACT; + let new_access_rights = AccessRights::READ; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let exec_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + contract, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_ENTRYPOINT => entrypoint, + }, + ) + .build(); + builder.exec(exec_request_2).commit(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse().with_access_rights(expected); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse + ), + "Expected revert but received {:?}", + error + ); + let contract = REGRESSION_20220204_CALL_CONTRACT; + let entrypoint = TRANSFER_AS_CONTRACT; + let new_access_rights = AccessRights::WRITE; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let exec_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + contract, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_ENTRYPOINT => entrypoint, + }, + ) + .build(); + builder.exec(exec_request_2).commit(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse().with_access_rights(expected); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse + ), + "Expected revert but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn regression_20220204_as_contract_by_hash() { + let entrypoint = TRANSFER_AS_CONTRACT; + let new_access_rights = AccessRights::READ_ADD_WRITE; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse(); + let exec_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_NAME, + entrypoint, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_PURSE => main_purse, + }, + ) + .build(); + builder.exec(exec_request).commit(); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse.with_access_rights(expected) + ), + "Expected revert but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn regression_20220204_nontrivial_arg_as_contract() { + let contract = REGRESSION_20220204_NONTRIVIAL_CONTRACT; + let entrypoint = NONTRIVIAL_ARG_AS_CONTRACT; + let new_access_rights = AccessRights::READ_ADD_WRITE; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let exec_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + contract, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_ENTRYPOINT => entrypoint, + }, + ) + .build(); + builder.exec(exec_request_2).commit(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse().with_access_rights(expected); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse + ), + "Expected revert but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn regression_20220204_as_contract_by_hash_attenuated() { + let entrypoint = TRANSFER_AS_CONTRACT; + let new_access_rights = AccessRights::READ; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse(); + let exec_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_NAME, + entrypoint, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_PURSE => main_purse, + }, + ) + .build(); + builder.exec(exec_request).commit(); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse.with_access_rights(expected) + ), + "Expected revert but received {:?}", + error + ); + let entrypoint = TRANSFER_AS_CONTRACT; + let new_access_rights = AccessRights::WRITE; + let expected = AccessRights::READ_ADD_WRITE; + let mut builder = setup(); + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let main_purse = account.main_purse(); + let exec_request = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_HASH_NAME, + entrypoint, + runtime_args! { + ARG_NEW_ACCESS_RIGHTS => new_access_rights.bits(), + ARG_PURSE => main_purse, + }, + ) + .build(); + builder.exec(exec_request).commit(); + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == main_purse.with_access_rights(expected) + ), + "Expected revert but received {:?}", + error + ); +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + REGRESSION_20220204_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + builder.exec(install_request).expect_success().commit(); + + builder +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220207.rs b/execution_engine_testing/tests/src/test/regression/regression_20220207.rs new file mode 100644 index 0000000000..7a1c49ac02 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220207.rs @@ -0,0 +1,75 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{account::AccountHash, runtime_args, system::mint, ApiError, U512}; + +const REGRESSION_20220207_CONTRACT: &str = "regression_20220207.wasm"; +const ARG_AMOUNT_TO_SEND: &str = "amount_to_send"; +const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([111; 32]); + +const UNAPPROVED_SPENDING_AMOUNT_ERR: Error = Error::Exec(ExecError::Revert(ApiError::Mint( + mint::Error::UnapprovedSpendingAmount as u8, +))); + +#[ignore] +#[test] +fn should_not_transfer_above_approved_limit() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let args = runtime_args! { + mint::ARG_AMOUNT => U512::from(1000u64), // What we approved. + ARG_AMOUNT_TO_SEND => U512::from(1100u64), // What contract is trying to send. + mint::ARG_TARGET => ACCOUNT_1_ADDR, + }; + + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220207_CONTRACT, args) + .build(); + + builder + .exec(exec_request) + .assert_error(UNAPPROVED_SPENDING_AMOUNT_ERR); +} + +#[ignore] +#[test] +fn should_transfer_within_approved_limit() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let args = runtime_args! { + mint::ARG_AMOUNT => U512::from(1000u64), + ARG_AMOUNT_TO_SEND => U512::from(100u64), + mint::ARG_TARGET => ACCOUNT_1_ADDR, + }; + + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220207_CONTRACT, args) + .build(); + + builder.exec(exec_request).expect_success(); +} + +#[ignore] +#[test] +fn should_fail_without_amount_arg() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let args = runtime_args! { + // If `amount` arg is absent, host assumes that limit is 0. + // This should fail then. + ARG_AMOUNT_TO_SEND => U512::from(100u64), + mint::ARG_TARGET => ACCOUNT_1_ADDR, + }; + + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220207_CONTRACT, args) + .build(); + + builder + .exec(exec_request) + .assert_error(UNAPPROVED_SPENDING_AMOUNT_ERR); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220208.rs b/execution_engine_testing/tests/src/test/regression/regression_20220208.rs new file mode 100644 index 0000000000..f919ed508a --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220208.rs @@ -0,0 +1,64 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{account::AccountHash, runtime_args, system::mint, ApiError, U512}; + +const REGRESSION_20220208_CONTRACT: &str = "regression_20220208.wasm"; +const ARG_AMOUNT_PART_1: &str = "amount_part_1"; +const ARG_AMOUNT_PART_2: &str = "amount_part_2"; +const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([111; 32]); + +const UNAPPROVED_SPENDING_AMOUNT_ERR: Error = Error::Exec(ExecError::Revert(ApiError::Mint( + mint::Error::UnapprovedSpendingAmount as u8, +))); + +#[ignore] +#[test] +fn should_transfer_within_approved_limit_multiple_transfers() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let part_1 = U512::from(100u64); + let part_2 = U512::from(100u64); + let transfers_limit = part_1 + part_2; + + let args = runtime_args! { + ARG_AMOUNT_PART_1 => part_1, + ARG_AMOUNT_PART_2 => part_2, + mint::ARG_AMOUNT => transfers_limit, + mint::ARG_TARGET => ACCOUNT_1_ADDR, + }; + + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220208_CONTRACT, args) + .build(); + + builder.exec(exec_request).expect_success(); +} + +#[ignore] +#[test] +fn should_not_transfer_above_approved_limit_multiple_transfers() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let part_1 = U512::from(100u64); + let part_2 = U512::from(100u64); + let transfers_limit = part_1 + part_2 - U512::one(); + + let args = runtime_args! { + ARG_AMOUNT_PART_1 => part_1, + ARG_AMOUNT_PART_2 => part_2, + mint::ARG_AMOUNT => transfers_limit, + mint::ARG_TARGET => ACCOUNT_1_ADDR, + }; + + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, REGRESSION_20220208_CONTRACT, args) + .build(); + + builder + .exec(exec_request) + .assert_error(UNAPPROVED_SPENDING_AMOUNT_ERR); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220211.rs b/execution_engine_testing/tests/src/test/regression/regression_20220211.rs new file mode 100644 index 0000000000..7611764a08 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220211.rs @@ -0,0 +1,120 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state, execution::ExecError}; +use casper_types::{runtime_args, AccessRights, RuntimeArgs, URef}; + +const REGRESSION_20220211_CONTRACT: &str = "regression_20220211.wasm"; +const REGRESSION_20220211_CALL_CONTRACT: &str = "regression_20220211_call.wasm"; +const RET_AS_CONTRACT: &str = "ret_as_contract"; +const RET_AS_SESSION: &str = "ret_as_contract"; +const PUT_KEY_AS_SESSION: &str = "put_key_as_session"; +const PUT_KEY_AS_CONTRACT: &str = "put_key_as_contract"; +const READ_AS_SESSION: &str = "read_as_session"; +const READ_AS_CONTRACT: &str = "read_as_contract"; +const WRITE_AS_SESSION: &str = "write_as_session"; +const WRITE_AS_CONTRACT: &str = "write_as_contract"; +const ADD_AS_SESSION: &str = "add_as_session"; +const ADD_AS_CONTRACT: &str = "add_as_contract"; +const ARG_ENTRYPOINT: &str = "entrypoint"; + +#[ignore] +#[test] +fn regression_20220211_ret_as_contract() { + test(RET_AS_CONTRACT); +} + +#[ignore] +#[test] +fn regression_20220211_ret_as_session() { + test(RET_AS_SESSION); +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + REGRESSION_20220211_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + builder.exec(install_request).expect_success().commit(); + + builder +} + +fn test(entrypoint: &str) { + let mut builder = setup(); + + let expected_forged_uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE); + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + REGRESSION_20220211_CALL_CONTRACT, + runtime_args! { + ARG_ENTRYPOINT => entrypoint, + }, + ) + .build(); + builder.exec(exec_request).commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == expected_forged_uref + ), + "Expected revert but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn regression_20220211_put_key_as_session() { + test(PUT_KEY_AS_SESSION); +} + +#[ignore] +#[test] +fn regression_20220211_put_key_as_contract() { + test(PUT_KEY_AS_CONTRACT); +} + +#[ignore] +#[test] +fn regression_20220211_read_as_session() { + test(READ_AS_SESSION); +} + +#[ignore] +#[test] +fn regression_20220211_read_as_contract() { + test(READ_AS_CONTRACT); +} + +#[ignore] +#[test] +fn regression_20220211_write_as_session() { + test(WRITE_AS_SESSION); +} + +#[ignore] +#[test] +fn regression_20220211_write_as_contract() { + test(WRITE_AS_CONTRACT); +} + +#[ignore] +#[test] +fn regression_20220211_add_as_session() { + test(ADD_AS_SESSION); +} + +#[ignore] +#[test] +fn regression_20220211_add_as_contract() { + test(ADD_AS_CONTRACT); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220217.rs b/execution_engine_testing/tests/src/test/regression/regression_20220217.rs new file mode 100644 index 0000000000..9599e141c7 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220217.rs @@ -0,0 +1,425 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state, execution::ExecError}; +use casper_types::{account::AccountHash, runtime_args, system::mint, AccessRights, URef, U512}; + +const TRANSFER_TO_NAMED_PURSE_CONTRACT: &str = "transfer_to_named_purse.wasm"; + +const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); +const ARG_PURSE_NAME: &str = "purse_name"; +const ARG_AMOUNT: &str = "amount"; +const DEFAULT_PURSE_BALANCE: u64 = 1_000_000_000; +const PURSE_1: &str = "purse_1"; +const PURSE_2: &str = "purse_2"; +const ACCOUNT_1_PURSE: &str = "purse_3"; + +#[ignore] +#[test] +fn regression_20220217_transfer_mint_by_hash_from_main_purse() { + let mut builder = setup(); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let default_purse = default_account.main_purse(); + let account_1 = builder + .get_entity_by_account_hash(ACCOUNT_1_ADDR) + .expect("should have account"); + let account_1_purse = account_1.main_purse(); + + let mint_hash = builder.get_mint_contract_hash(); + + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + ACCOUNT_1_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(ACCOUNT_1_ADDR), + mint::ARG_SOURCE => default_purse, + mint::ARG_TARGET => account_1_purse, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }, + ) + .build(); + builder.exec(exec_request_2).expect_failure().commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE), + ), + "Expected {:?} revert but received {:?}", + default_purse, + error + ); +} + +#[ignore] +#[test] +fn regression_20220217_transfer_mint_by_package_hash_from_main_purse() { + let mut builder = setup(); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let default_purse = default_account.main_purse(); + let account_1 = builder + .get_entity_by_account_hash(ACCOUNT_1_ADDR) + .expect("should have account"); + let account_1_purse = account_1.main_purse(); + + let mint_hash = builder.get_mint_contract_hash(); + + let mint = builder + .get_addressable_entity(mint_hash) + .expect("should have mint contract"); + let mint_package_hash = mint.package_hash(); + + let exec_request = ExecuteRequestBuilder::versioned_contract_call_by_hash( + ACCOUNT_1_ADDR, + mint_package_hash, + None, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(ACCOUNT_1_ADDR), + mint::ARG_SOURCE => default_purse, + mint::ARG_TARGET => account_1_purse, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }, + ) + .build(); + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE), + ), + "Expected {:?} revert but received {:?}", + default_purse, + error + ); +} + +#[ignore] +#[test] +fn regression_20220217_mint_by_hash_transfer_from_other_purse() { + let mut builder = setup(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let purse_1 = account + .named_keys() + .get(PURSE_1) + .unwrap() + .into_uref() + .expect("should have purse 1"); + let purse_2 = account + .named_keys() + .get(PURSE_2) + .unwrap() + .into_uref() + .expect("should have purse 2"); + + let mint_hash = builder.get_mint_contract_hash(); + + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(ACCOUNT_1_ADDR), + mint::ARG_SOURCE => purse_1, + mint::ARG_TARGET => purse_2, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn regression_20220217_mint_by_hash_transfer_from_someones_purse() { + let mut builder = setup(); + + let account_1 = builder + .get_entity_with_named_keys_by_account_hash(ACCOUNT_1_ADDR) + .expect("should have account"); + let account_1_purse = account_1 + .named_keys() + .get(ACCOUNT_1_PURSE) + .unwrap() + .into_uref() + .expect("should have account main purse"); + + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + + let purse_1 = default_account + .named_keys() + .get(PURSE_1) + .unwrap() + .into_uref() + .expect("should have purse 1"); + + let mint_hash = builder.get_mint_contract_hash(); + + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + ACCOUNT_1_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_SOURCE => purse_1, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_TARGET => account_1_purse, + mint::ARG_TO => Some(ACCOUNT_1_ADDR), + }, + ) + .build(); + + builder.exec(exec_request).commit(); + let error = builder.get_error().expect("should have error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == purse_1 + ), + "Expected forged uref but received {:?}", + error, + ); +} + +#[ignore] +#[test] +fn regression_20220217_should_not_transfer_funds_on_unrelated_purses() { + let mut builder = setup(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let purse_1 = account + .named_keys() + .get(PURSE_1) + .unwrap() + .into_uref() + .expect("should have purse 1"); + let purse_2 = account + .named_keys() + .get(PURSE_2) + .unwrap() + .into_uref() + .expect("should have purse 2"); + + let mint_hash = builder.get_mint_contract_hash(); + + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + ACCOUNT_1_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(*DEFAULT_ACCOUNT_ADDR), + mint::ARG_SOURCE => purse_1, + mint::ARG_TARGET => purse_2, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }, + ) + .build(); + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have error"); + + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == purse_1 + ), + "Expected forged uref but received {:?}", + error, + ); +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let fund_account_1_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR).build(); + let fund_purse_1_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + runtime_args! { + ARG_PURSE_NAME => PURSE_1, + ARG_AMOUNT => U512::from(DEFAULT_PURSE_BALANCE), + }, + ) + .build(); + let fund_purse_2_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + runtime_args! { + ARG_PURSE_NAME => PURSE_2, + ARG_AMOUNT => U512::from(DEFAULT_PURSE_BALANCE), + }, + ) + .build(); + let fund_purse_3_request = ExecuteRequestBuilder::standard( + ACCOUNT_1_ADDR, + TRANSFER_TO_NAMED_PURSE_CONTRACT, + runtime_args! { + ARG_PURSE_NAME => ACCOUNT_1_PURSE, + ARG_AMOUNT => U512::from(DEFAULT_PURSE_BALANCE), + }, + ) + .build(); + + builder + .transfer_and_commit(fund_account_1_request) + .expect_success(); + builder.exec(fund_purse_1_request).expect_success().commit(); + builder.exec(fund_purse_2_request).expect_success().commit(); + builder.exec(fund_purse_3_request).expect_success().commit(); + builder +} + +#[ignore] +#[test] +fn regression_20220217_auction_add_bid_directly() { + let mut builder = setup(); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let default_purse = default_account.main_purse(); + let account_1 = builder + .get_entity_by_account_hash(ACCOUNT_1_ADDR) + .expect("should have account"); + let account_1_purse = account_1.main_purse(); + + let mint_hash = builder.get_mint_contract_hash(); + + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + ACCOUNT_1_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(ACCOUNT_1_ADDR), + mint::ARG_SOURCE => default_purse, + mint::ARG_TARGET => account_1_purse, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }, + ) + .build(); + builder.exec(exec_request_2).expect_failure().commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE), + ), + "Expected {:?} revert but received {:?}", + default_purse, + error + ); +} + +#[ignore] +#[test] +fn regression_20220217_() { + let mut builder = setup(); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + let default_purse = default_account.main_purse(); + let account_1 = builder + .get_entity_by_account_hash(ACCOUNT_1_ADDR) + .expect("should have account"); + let account_1_purse = account_1.main_purse(); + + let mint_hash = builder.get_mint_contract_hash(); + + let exec_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + ACCOUNT_1_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(ACCOUNT_1_ADDR), + mint::ARG_SOURCE => default_purse, + mint::ARG_TARGET => account_1_purse, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }, + ) + .build(); + builder.exec(exec_request_2).expect_failure().commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == default_purse.with_access_rights(AccessRights::READ_ADD_WRITE), + ), + "Expected {:?} revert but received {:?}", + default_purse, + error + ); +} + +#[ignore] +#[test] +fn mint_by_hash_transfer_should_fail_because_lack_of_target_uref_access() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let default_account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have account"); + + let mint_hash = builder.get_mint_contract_hash(); + + let source = default_account.main_purse(); + // This could be any URef to which the caller has no access rights. + let target = URef::default(); + + let id = Some(0u64); + + let transfer_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + mint_hash, + mint::METHOD_TRANSFER, + runtime_args! { + mint::ARG_TO => Some(*DEFAULT_ACCOUNT_ADDR), + mint::ARG_SOURCE => source, + mint::ARG_TARGET => target, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => id, + }, + ) + .build(); + + // Previously we would allow deposit in this flow to a purse without explicit ADD access. We + // still allow that in some other flows, but due to code complexity, this is no longer + // supported. + builder.exec(transfer_request).expect_failure(); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220221.rs b/execution_engine_testing/tests/src/test/regression/regression_20220221.rs new file mode 100644 index 0000000000..b49012ce78 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220221.rs @@ -0,0 +1,133 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, TransferRequestBuilder, + UpgradeRequestBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, TIMESTAMP_MILLIS_INCREMENT, +}; +use casper_execution_engine::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT; +use casper_types::{ + runtime_args, + system::auction::{self, DelegationRate, INITIAL_ERA_ID}, + EraId, ProtocolVersion, PublicKey, SecretKey, U256, U512, +}; + +const VALIDATOR_STAKE: u64 = 1_000_000_000; + +const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); + +const OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION; +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + OLD_PROTOCOL_VERSION.value().major, + OLD_PROTOCOL_VERSION.value().minor, + OLD_PROTOCOL_VERSION.value().patch + 1, +); + +fn generate_secret_keys() -> impl Iterator { + (1u64..).map(|i| { + let u256 = U256::from(i); + let mut u256_bytes = [0u8; 32]; + u256.to_big_endian(&mut u256_bytes); + SecretKey::ed25519_from_bytes(u256_bytes).expect("should create secret key") + }) +} + +fn generate_public_keys() -> impl Iterator { + generate_secret_keys().map(|secret_key| PublicKey::from(&secret_key)) +} + +#[ignore] +#[test] +fn regression_20220221_should_distribute_to_many_validators() { + // distribute funds in a scenario where validator slots is greater than or equal to max runtime + // stack height + + let mut public_keys = generate_public_keys(); + + let fund_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, PublicKey::System).build(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut upgrade_request = UpgradeRequestBuilder::default() + .with_new_validator_slots(DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT + 1) + .with_pre_state_hash(builder.get_post_state_hash()) + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .build(); + + builder.upgrade(&mut upgrade_request); + + builder.transfer_and_commit(fund_request).expect_success(); + + // Add validators + for _ in 0..DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT { + let public_key = public_keys.next().unwrap(); + + let transfer_request = TransferRequestBuilder::new( + MINIMUM_ACCOUNT_CREATION_BALANCE / 10, + public_key.to_account_hash(), + ) + .build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + let delegation_rate: DelegationRate = 10; + + let session_args = runtime_args! { + auction::ARG_PUBLIC_KEY => public_key.clone(), + auction::ARG_AMOUNT => U512::from(VALIDATOR_STAKE), + auction::ARG_DELEGATION_RATE => delegation_rate, + }; + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + public_key.to_account_hash(), + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + session_args, + ) + .build(); + + builder.exec(execute_request).expect_success().commit(); + } + + let mut timestamp_millis = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + for _ in 0..=DEFAULT_AUCTION_DELAY { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let era_validators = builder.get_era_validators(); + + assert!(!era_validators.is_empty()); + + let (era_id, trusted_era_validators) = era_validators + .into_iter() + .last() + .expect("should have last element"); + assert!(era_id > INITIAL_ERA_ID, "{}", era_id); + + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(NEW_PROTOCOL_VERSION) + // Next era id is used for returning future era validators, which we don't need to inspect + // in this test. + .with_next_era_id(era_id) + .with_era_end_timestamp_millis(timestamp_millis); + + assert_eq!( + trusted_era_validators.len(), + DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT as usize + ); + + let step_request = step_request.build(); + + assert!(builder.step(step_request).is_success(), "should run step"); + + builder.run_auction(timestamp_millis, Vec::new()); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220222.rs b/execution_engine_testing/tests/src/test/regression/regression_20220222.rs new file mode 100644 index 0000000000..696b6bac89 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220222.rs @@ -0,0 +1,58 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{engine_state, execution::ExecError}; +use casper_types::{account::AccountHash, runtime_args, U512}; + +const ALICE_ADDR: AccountHash = AccountHash::new([42; 32]); + +#[ignore] +#[test] +fn regression_20220222_escalate() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_request = + TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ALICE_ADDR).build(); + + builder + .transfer_and_commit(transfer_request) + .expect_success(); + + let alice = builder + .get_entity_by_account_hash(ALICE_ADDR) + .expect("should have account"); + + let alice_main_purse = alice.main_purse(); + + // Getting main purse URef to verify transfer + let _source_purse = builder + .get_expected_addressable_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .main_purse(); + + let session_args = runtime_args! { + "alice_purse_addr" => alice_main_purse.addr(), + "amount" => U512::MAX, + }; + + let exec_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "regression_20220222.wasm", + session_args, + ) + .build(); + builder.exec(exec_request).expect_failure(); + + let error = builder.get_error().expect("should have error"); + + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::ForgedReference(forged_uref)) + if forged_uref == alice_main_purse.into_add() + ), + "Expected revert but received {:?}", + error + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220223.rs b/execution_engine_testing/tests/src/test/regression/regression_20220223.rs new file mode 100644 index 0000000000..9f443ad844 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220223.rs @@ -0,0 +1,310 @@ +use casper_types::system::mint; +use once_cell::sync::Lazy; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; +use casper_execution_engine::{ + engine_state, engine_state::engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, + execution::ExecError, +}; +use casper_types::{ + self, + account::AccountHash, + api_error::ApiError, + runtime_args, + system::auction::{ + DelegationRate, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_PUBLIC_KEY, + ARG_VALIDATOR, + }, + PublicKey, SecretKey, U512, +}; + +const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; +const CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = "transfer_to_named_purse.wasm"; + +const CONTRACT_REGRESSION_ADD_BID: &str = "regression_add_bid.wasm"; +const CONTRACT_ADD_BID: &str = "add_bid.wasm"; + +const CONTRACT_REGRESSION_DELEGATE: &str = "regression_delegate.wasm"; +const CONTRACT_DELEGATE: &str = "delegate.wasm"; + +const CONTRACT_REGRESSION_TRANSFER: &str = "regression_transfer.wasm"; + +const ARG_TARGET: &str = "target"; +const ARG_PURSE_NAME: &str = "purse_name"; +const TEST_PURSE: &str = "test_purse"; +const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE + 1000; +const ADD_BID_AMOUNT_1: u64 = 95_000; +const ADD_BID_DELEGATION_RATE_1: DelegationRate = 10; +const DELEGATE_AMOUNT_1: u64 = 125_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; + +static VALIDATOR_1_PUBLIC_KEY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static VALIDATOR_1_ADDR: Lazy = + Lazy::new(|| AccountHash::from(&*VALIDATOR_1_PUBLIC_KEY)); + +#[ignore] +#[test] +fn should_fail_to_add_new_bid_over_the_approved_amount() { + let mut builder = setup(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_REGRESSION_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(validator_1_add_bid_request).expect_failure(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error))) + if mint_error == mint::Error::UnapprovedSpendingAmount as u8 + ), + "Expected unapproved spending amount error but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_fail_to_add_into_existing_bid_over_the_approved_amount() { + let mut builder = setup(); + + let validator_1_add_bid_request_1 = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let validator_1_add_bid_request_2 = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_REGRESSION_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder + .exec(validator_1_add_bid_request_1) + .expect_success() + .commit(); + builder.exec(validator_1_add_bid_request_2).expect_failure(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error))) + if mint_error == mint::Error::UnapprovedSpendingAmount as u8 + ), + "Expected unapproved spending amount error but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_fail_to_add_new_delegator_over_the_approved_amount() { + let mut builder = setup(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder + .exec(validator_1_add_bid_request) + .expect_success() + .commit(); + + let delegator_1_delegate_requestr = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + }, + ) + .build(); + + builder + .exec(delegator_1_delegate_requestr) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error))) + if mint_error == mint::Error::UnapprovedSpendingAmount as u8 + ), + "Expected unapproved spending amount error but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_fail_to_update_existing_delegator_over_the_approved_amount() { + let mut builder = setup(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let delegator_1_delegate_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + }, + ) + .build(); + + let delegator_1_delegate_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_REGRESSION_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => VALIDATOR_1_PUBLIC_KEY.clone(), + ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + }, + ) + .build(); + + builder + .exec(validator_1_add_bid_request) + .expect_success() + .commit(); + + builder + .exec(delegator_1_delegate_request_1) + .expect_success() + .commit(); + + builder + .exec(delegator_1_delegate_request_2) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error))) + if mint_error == mint::Error::UnapprovedSpendingAmount as u8 + ), + "Expected unapproved spending amount error but received {:?}", + error + ); +} + +#[ignore] +#[test] +fn should_fail_to_mint_transfer_over_the_limit() { + let mut builder = setup(); + + let default_account = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should have default account"); + + let test_purse_2 = default_account + .named_keys() + .get(TEST_PURSE) + .unwrap() + .into_uref() + .expect("should have test purse 2"); + + let args = runtime_args! { + mint::ARG_TO => Option::::None, + mint::ARG_TARGET => test_purse_2, + mint::ARG_AMOUNT => U512::one(), + mint::ARG_ID => Some(1u64), + }; + let transfer_request_1 = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, CONTRACT_REGRESSION_TRANSFER, args) + .build(); + + builder.exec(transfer_request_1).expect_failure().commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error))) + if mint_error == mint::Error::UnapprovedSpendingAmount as u8 + ), + "Expected unapproved spending amount error but received {:?}", + error + ); +} + +fn setup() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let create_purse_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_NAMED_PURSE, + runtime_args! { + ARG_PURSE_NAME => TEST_PURSE, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + }, + ) + .build(); + + builder + .exec(validator_1_fund_request) + .expect_success() + .commit(); + + builder.exec(create_purse_request).expect_success().commit(); + + builder +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220224.rs b/execution_engine_testing/tests/src/test/regression/regression_20220224.rs new file mode 100644 index 0000000000..f6e6e34b12 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220224.rs @@ -0,0 +1,46 @@ +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state, execution::ExecError}; +use casper_types::{runtime_args, system::mint, ApiError, RuntimeArgs}; + +const CONTRACT_REGRESSION_PAYMENT: &str = "regression_payment.wasm"; +const CONTRACT_REVERT: &str = "revert.wasm"; + +#[ignore] +#[test] +fn should_not_transfer_above_approved_limit_in_payment_code() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let account_hash = *DEFAULT_ACCOUNT_ADDR; + let deploy_hash: [u8; 32] = [42; 32]; + let payment_args = runtime_args! { + "amount" => *DEFAULT_PAYMENT, + }; + let session_args = RuntimeArgs::default(); + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_session_code(CONTRACT_REVERT, session_args) + .with_payment_code(CONTRACT_REGRESSION_PAYMENT, payment_args) + .with_authorization_keys(&[account_hash]) + .with_deploy_hash(deploy_hash) + .build(); + + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should have returned an error"); + assert!( + matches!( + error, + engine_state::Error::Exec(ExecError::Revert(ApiError::Mint(mint_error))) + if mint_error == mint::Error::UnapprovedSpendingAmount as u8 + ), + "Expected unapproved spending amount error but received {:?}", + error + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220303.rs b/execution_engine_testing/tests/src/test/regression/regression_20220303.rs new file mode 100644 index 0000000000..1c17bdfdc7 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220303.rs @@ -0,0 +1,149 @@ +use std::{ + collections::BTreeMap, + time::{Duration, Instant}, +}; + +use casper_engine_test_support::{LmdbWasmTestBuilder, UpgradeRequestBuilder}; +use casper_types::{ + contracts::ContractHash, + system::{self, mint}, + AccessRights, CLValue, Digest, EntityAddr, EntryPoints, EraId, Key, ProtocolVersion, + StoredValue, SystemHashRegistry, URef, +}; +use rand::Rng; + +use crate::lmdb_fixture::{self, ENTRY_REGISTRY_SPECIAL_ADDRESS}; + +const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); + +#[ignore] +#[test] +fn should_update_contract_metadata_at_upgrade_with_major_bump() { + test_upgrade(1, 0, 0, 0); +} + +#[ignore] +#[test] +fn should_update_contract_metadata_at_upgrade_with_minor_bump() { + test_upgrade(0, 1, 0, 0); +} + +#[ignore] +#[test] +fn should_update_contract_metadata_at_upgrade_with_patch_bump() { + test_upgrade(0, 0, 1, 0); +} + +#[ignore] +#[test] +fn test_upgrade_with_global_state_update_entries() { + test_upgrade(0, 0, 1, 20000); +} + +fn test_upgrade(major_bump: u32, minor_bump: u32, patch_bump: u32, upgrade_entries: u32) { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + let mint_contract_hash = { + let stored_value: StoredValue = builder + .query(None, ENTRY_REGISTRY_SPECIAL_ADDRESS, &[]) + .expect("should query system entity registry"); + let cl_value = stored_value + .as_cl_value() + .cloned() + .expect("should have cl value"); + let registry: SystemHashRegistry = cl_value.into_t().expect("should have system registry"); + registry + .get(system::MINT) + .cloned() + .expect("should contract hash") + }; + let old_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let legacy_mint_hash = ContractHash::new(mint_contract_hash); + + let old_mint_contract = builder + .get_contract(legacy_mint_hash) + .expect("should have mint contract"); + assert_eq!(old_mint_contract.protocol_version(), old_protocol_version); + let new_protocol_version = ProtocolVersion::from_parts( + old_protocol_version.value().major + major_bump, + old_protocol_version.value().minor + minor_bump, + old_protocol_version.value().patch + patch_bump, + ); + let mut global_state_update = + apply_global_state_update(&builder, lmdb_fixture_state.post_state_hash); + + let mut rng = casper_types::testing::TestRng::new(); + if upgrade_entries > 0 { + for _ in 0..upgrade_entries { + global_state_update.insert( + Key::URef(URef::new(rng.gen(), AccessRights::empty())), + StoredValue::CLValue(CLValue::from_t(rng.gen::()).unwrap()), + ); + } + } + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(old_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .build() + }; + let start = Instant::now(); + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + let elapsed = start.elapsed(); + assert!( + elapsed < Duration::from_secs(40), + "upgrade took too long! {} (millis)", + elapsed.as_millis() + ); + let new_contract = builder + .get_addressable_entity(mint_contract_hash.into()) + .expect("should have mint contract"); + assert_eq!( + old_mint_contract.contract_package_hash().value(), + new_contract.package_hash().value() + ); + assert_eq!( + old_mint_contract.contract_wasm_hash().value(), + new_contract.byte_code_hash().value() + ); + let new_entry_points = builder.get_entry_points(EntityAddr::System(mint_contract_hash)); + let old_entry_points = EntryPoints::from(old_mint_contract.entry_points().clone()); + assert_ne!(&old_entry_points, &new_entry_points); + assert_eq!( + &new_entry_points, + &mint::mint_entry_points(), + "should have new entrypoints written" + ); + assert_eq!(new_contract.protocol_version(), new_protocol_version); +} + +fn apply_global_state_update( + builder: &LmdbWasmTestBuilder, + post_state_hash: Digest, +) -> BTreeMap { + let key = URef::new([0u8; 32], AccessRights::all()).into(); + + let system_contract_hashes = builder + .query(Some(post_state_hash), key, &Vec::new()) + .expect("Must have stored system contract hashes") + .as_cl_value() + .expect("must be CLValue") + .clone() + .into_t::() + .expect("must convert to btree map"); + + let mut global_state_update = BTreeMap::::new(); + let registry = CLValue::from_t(system_contract_hashes) + .expect("must convert to StoredValue") + .into(); + + global_state_update.insert(Key::SystemEntityRegistry, registry); + + global_state_update +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20220727.rs b/execution_engine_testing/tests/src/test/regression/regression_20220727.rs new file mode 100644 index 0000000000..f89f15d187 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20220727.rs @@ -0,0 +1,742 @@ +use std::fmt::Write; + +use casper_wasm::{ + builder, + elements::{Instruction, Instructions}, +}; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{ + engine_state, + execution::ExecError, + runtime::{ + PreprocessingError, WasmValidationError, DEFAULT_BR_TABLE_MAX_SIZE, DEFAULT_MAX_GLOBALS, + DEFAULT_MAX_PARAMETER_COUNT, DEFAULT_MAX_TABLE_SIZE, + }, +}; +use casper_types::{addressable_entity::DEFAULT_ENTRY_POINT_NAME, RuntimeArgs}; + +use crate::wasm_utils; + +const OOM_INIT: (u32, Option) = (2805655325, None); +const FAILURE_ONE_ABOVE_LIMIT: (u32, Option) = (DEFAULT_MAX_TABLE_SIZE + 1, None); +const FAILURE_MAX_ABOVE_LIMIT: (u32, Option) = (DEFAULT_MAX_TABLE_SIZE, Some(u32::MAX)); +const FAILURE_INIT_ABOVE_LIMIT: (u32, Option) = + (DEFAULT_MAX_TABLE_SIZE, Some(DEFAULT_MAX_TABLE_SIZE + 1)); +const ALLOWED_NO_MAX: (u32, Option) = (DEFAULT_MAX_TABLE_SIZE, None); +const ALLOWED_LIMITS: (u32, Option) = (DEFAULT_MAX_TABLE_SIZE, Some(DEFAULT_MAX_TABLE_SIZE)); +// Anything larger than that fails wasmi interpreter with a runtime stack overflow. +const FAILING_BR_TABLE_SIZE: usize = DEFAULT_BR_TABLE_MAX_SIZE as usize + 1; +const FAILING_GLOBALS_SIZE: usize = DEFAULT_MAX_PARAMETER_COUNT as usize + 1; +const FAILING_PARAMS_COUNT: usize = DEFAULT_MAX_PARAMETER_COUNT as usize + 1; + +fn make_oom_payload(initial: u32, maximum: Option) -> Vec { + let mut bounds = initial.to_string(); + if let Some(max) = maximum { + bounds += " "; + bounds += &max.to_string(); + } + + let wat = format!( + r#"(module + (table (;0;) {} funcref) + (memory (;0;) 0) + (export "call" (func $call)) + (func $call)) + "#, + bounds + ); + wat::parse_str(wat).expect("should parse wat") +} + +#[ignore] +#[test] +fn should_not_oom() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let initial_size_exceeded = vec![OOM_INIT, FAILURE_ONE_ABOVE_LIMIT]; + + let max_size_exceeded = vec![FAILURE_MAX_ABOVE_LIMIT, FAILURE_INIT_ABOVE_LIMIT]; + + for (initial, maximum) in initial_size_exceeded { + let module_bytes = make_oom_payload(initial, maximum); + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().unwrap(); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::InitialTableSizeExceeded { max, actual })) + if max == DEFAULT_MAX_TABLE_SIZE && actual == initial + ), + "{:?}", + error + ); + } + + for (initial, maximum) in max_size_exceeded { + let module_bytes = make_oom_payload(initial, maximum); + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().unwrap(); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::MaxTableSizeExceeded { max, actual })) + if max == DEFAULT_MAX_TABLE_SIZE && Some(actual) == maximum + ), + "{initial} {maximum:?} {:?}", + error + ); + } +} + +#[ignore] +#[test] +fn should_pass_table_validation() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let passing_test_cases = vec![ALLOWED_NO_MAX, ALLOWED_LIMITS]; + + for (initial, maximum) in passing_test_cases { + let module_bytes = make_oom_payload(initial, maximum); + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + } +} + +#[ignore] +#[test] +fn should_pass_elem_section() { + // more functions than elements - wasmi doesn't allocate + let elem_does_not_fit_err = test_element_section(0, None, DEFAULT_MAX_TABLE_SIZE); + assert!( + matches!( + elem_does_not_fit_err, + Some(engine_state::Error::Exec(ExecError::Interpreter(ref msg))) + if msg == "elements segment does not fit" + ), + "{:?}", + elem_does_not_fit_err + ); + + // wasmi assumes table size and function pointers are equal + assert!(test_element_section( + DEFAULT_MAX_TABLE_SIZE, + Some(DEFAULT_MAX_TABLE_SIZE), + DEFAULT_MAX_TABLE_SIZE + ) + .is_none()); +} + +fn test_element_section( + table_init: u32, + table_max: Option, + function_count: u32, +) -> Option { + // Ensures proper initialization of table elements for different number of function pointers + // + // This should ensure there's no hidden lazy allocation and initialization that might still + // overallocate memory, burn cpu cycles allocating etc. + + // (module + // (table 0 1 anyfunc) + // (memory $0 1) + // (export "memory" (memory $0)) + // (export "foo1" (func $foo1)) + // (export "foo2" (func $foo2)) + // (export "foo3" (func $foo3)) + // (export "main" (func $main)) + // (func $foo1 (; 0 ;) + // ) + // (func $foo2 (; 1 ;) + // ) + // (func $foo3 (; 2 ;) + // ) + // (func $main (; 3 ;) (result i32) + // (i32.const 0) + // ) + // (elem (i32.const 0) $foo1 $foo2 $foo3) + // ) + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut wat = String::new(); + + if let Some(max) = table_max { + writeln!( + wat, + r#"(module + (table {table_init} {max} funcref)"# + ) + .unwrap(); + } else { + writeln!( + wat, + r#"(module + (table {table_init} funcref)"# + ) + .unwrap(); + } + + wat += "(memory $0 1)\n"; + wat += r#"(export "memory" (memory $0))"#; + wat += "\n"; + wat += r#"(export "call" (func $call))"#; + wat += "\n"; + for i in 0..function_count { + writeln!(wat, r#"(export "foo{i}" (func $foo{i}))"#).unwrap(); + } + for i in 0..function_count { + writeln!(wat, "(func $foo{i} (; 0 ;))").unwrap(); + } + wat += "(func $call)\n"; + wat += "\n"; + wat += "(elem (i32.const 0) "; + for i in 0..function_count { + write!(wat, "$foo{i} ").unwrap(); + } + wat += ")\n"; + wat += ")"; + + std::fs::write("/tmp/elem.wat", &wat).unwrap(); + + let module_bytes = wat::parse_str(wat).unwrap(); + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).commit(); + + builder.get_error() +} + +#[ignore] +#[test] +fn should_not_allow_more_than_one_table() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let module = builder::module() + // table 1 + .table() + .with_min(0) + .build() + // table 2 + .table() + .with_min(1) + .build() + .function() + // A signature with 0 params and no return type + .signature() + .build() + .body() + // Generated instructions for our entrypoint + .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End])) + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .build() + // Memory section is mandatory + .memory() + .build() + .build(); + let module_bytes = casper_wasm::serialize(module).expect("should serialize"); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().unwrap(); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation( + WasmValidationError::MoreThanOneTable + )) + ), + "{:?}", + error + ); +} + +/// Generates arbitrary length br_table opcode trying to exploit memory allocation in the wasm +/// parsing code. +fn make_arbitrary_br_table(size: usize) -> Result, Box> { + // (module + // (type (;0;) (func (param i32) (result i32))) + // (type (;1;) (func)) + // (func (;0;) (type 0) (param i32) (result i32) + // block ;; label = @1 + // block ;; label = @2 + // block ;; label = @3 + // block ;; label = @4 + // local.get 0 + // br_table 2 (;@2;) 1 (;@3;) 0 (;@4;) 3 (;@1;) + // end + // i32.const 100 + // return + // end + // i32.const 101 + // return + // end + // i32.const 102 + // return + // end + // i32.const 103 + // return) + // (func (;1;) (type 1) + // i32.const 0 + // call 0 + // drop) + // (memory (;0;) 0) + // (export "call" (func 1))) + + let mut src = String::new(); + writeln!(src, "(module")?; + writeln!(src, "(memory (;0;) 0)")?; + writeln!(src, r#"(export "call" (func $call))"#)?; + writeln!(src, r#"(func $switch_like (param $p i32) (result i32)"#)?; + + let mut bottom = ";;\n(local.get $p)\n".to_string(); + bottom += "(br_table\n"; + + for (br_table_offset, n) in (0..=size - 1).rev().enumerate() { + writeln!(bottom, " {n} ;; param == {br_table_offset} => (br {n})")?; // p == 0 => (br n) + } + writeln!(bottom, "{size})) ;; else => (br {size})")?; + + bottom += ";;"; + + for n in 0..=size { + let mut wrap = String::new(); + writeln!(wrap, "(block")?; + writeln!(wrap, "{bottom}")?; + writeln!(wrap, "(i32.const {val})", val = 100 + n)?; + writeln!(wrap, "(return))")?; + bottom = wrap; + } + + writeln!(src, "{bottom}")?; + + writeln!( + src, + r#"(func $call (drop (call $switch_like (i32.const 0))))"# + )?; + + writeln!(src, ")")?; + + let module_bytes = wat::parse_str(&src)?; + Ok(module_bytes) +} + +#[ignore] +#[test] +fn should_allow_large_br_table() { + // Anything larger than that fails wasmi interpreter with a runtime stack overflow. + let module_bytes = make_arbitrary_br_table(DEFAULT_BR_TABLE_MAX_SIZE as usize) + .expect("should create module bytes"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_allow_large_br_table() { + let module_bytes = + make_arbitrary_br_table(FAILING_BR_TABLE_SIZE).expect("should create module bytes"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should fail"); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::BrTableSizeExceeded { max, actual })) + if max == DEFAULT_BR_TABLE_MAX_SIZE && actual == FAILING_BR_TABLE_SIZE + ), + "{:?}", + error, + ); +} + +fn make_arbitrary_global(size: usize) -> Result, Box> { + // (module + // (memory $0 1) + // (global $global0 i32 (i32.const 1)) + // (global $global1 i32 (i32.const 2)) + // (global $global2 i32 (i32.const 3)) + // (func (export "call") + // global.get $global0 + // global.get $global1 + // global.get $global2 + // i32.add + // i32.add + // drop + // ) + // ) + let mut src = String::new(); + writeln!(src, "(module")?; + writeln!(src, " (memory $0 1)")?; + + for i in 0..size { + writeln!( + src, + " (global $global{i} i32 (i32.const {value}))", + value = i + 1 + )?; + } + + writeln!(src, r#" (func (export "call")"#)?; + debug_assert!(size >= 2); + writeln!(src, " global.get $global{last}", last = size - 2)?; + writeln!(src, " global.get $global{last}", last = size - 1)?; + writeln!(src, " i32.add")?; + writeln!(src, " drop")?; // drop the result + writeln!(src, " )")?; + writeln!(src, ")")?; + let module_bytes = wat::parse_str(&src)?; + Ok(module_bytes) +} + +#[ignore] +#[test] +fn should_allow_multiple_globals() { + let module_bytes = + make_arbitrary_global(DEFAULT_MAX_GLOBALS as usize).expect("should make arbitrary global"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_allow_too_many_globals() { + let module_bytes = + make_arbitrary_global(FAILING_GLOBALS_SIZE).expect("should make arbitrary global"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should fail"); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::TooManyGlobals { max, actual })) + if max == DEFAULT_MAX_GLOBALS && actual == FAILING_GLOBALS_SIZE + ), + "{:?}", + error, + ); +} + +#[ignore] +#[test] +fn should_verify_max_param_count() { + let module_bytes_max_params = + wasm_utils::make_n_arg_call_bytes(DEFAULT_MAX_PARAMETER_COUNT as usize, "i32") + .expect("should create wasm bytes"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes_max_params, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let module_bytes_100_params = + wasm_utils::make_n_arg_call_bytes(100, "i32").expect("should create wasm bytes"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes_100_params, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_allow_too_many_params() { + let module_bytes = wasm_utils::make_n_arg_call_bytes(FAILING_PARAMS_COUNT, "i32") + .expect("should create wasm bytes"); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should fail"); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::TooManyParameters { max, actual })) + if max == DEFAULT_MAX_PARAMETER_COUNT && actual == FAILING_PARAMS_COUNT + ), + "{:?}", + error, + ); +} + +#[ignore] +#[test] +fn should_not_allow_to_import_gas_function() { + let module_bytes = wat::parse_str( + r#"(module + (func $gas (import "env" "gas") (param i32)) + (memory $0 1) + )"#, + ) + .unwrap(); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request).expect_failure().commit(); + + let error = builder.get_error().expect("should fail"); + + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation( + WasmValidationError::MissingHostFunction + )) + ), + "{:?}", + error, + ); +} + +#[ignore] +#[test] +fn should_not_get_non_existing_global() { + let get_undeclared_global = r#"(module + (memory $memory 16) + (export "call" (func $call_fn)) + (func $call_fn + global.get 0 + drop + ) + )"#; + + test_non_existing_global(get_undeclared_global, 0); +} + +#[ignore] +#[test] +fn should_not_get_global_above_declared_range() { + let get_undeclared_global = r#"(module + (memory $memory 16) + (export "call" (func $call_fn)) + (func $call_fn + global.get 3 + drop + ) + (global $global0 i32 (i32.const 0)) + (global $global1 i32 (i32.const 1)) + (global $global256 i32 (i32.const 2)) + )"#; + + test_non_existing_global(get_undeclared_global, 3); +} + +#[ignore] +#[test] +fn should_not_set_non_existing_global() { + let set_undeclared_global = r#"(module + (memory $memory 16) + (export "call" (func $call_fn)) + (func $call_fn + i32.const 123 + global.set 0 + drop + ) + )"#; + + test_non_existing_global(set_undeclared_global, 0); +} + +#[ignore] +#[test] +fn should_not_set_non_existing_global_u32_max() { + let set_undeclared_global = format!( + r#"(module + (memory $memory 16) + (export "call" (func $call_fn)) + (func $call_fn + i32.const 0 + global.set {index} + ) + (global $global0 (mut i32) (i32.const 0)) + (global $global1 (mut i32) (i32.const 1)) + (global $global256 (mut i32) (i32.const 2)) + )"#, + index = u32::MAX + ); + + test_non_existing_global(&set_undeclared_global, u32::MAX); +} + +#[ignore] +#[test] +fn should_not_get_non_existing_global_u32_max() { + let set_undeclared_global = format!( + r#"(module + (memory $memory 16) + (export "call" (func $call_fn)) + (func $call_fn + global.get {index} + drop + ) + (global $global0 (mut i32) (i32.const 0)) + )"#, + index = u32::MAX + ); + + test_non_existing_global(&set_undeclared_global, u32::MAX); +} + +#[ignore] +#[test] +fn should_not_set_non_existing_global_above_declared_range() { + let set_undeclared_global = r#"(module + (memory $memory 16) + (export "call" (func $call_fn)) + (func $call_fn + i32.const 0 + global.set 123 + ) + (global $global0 (mut i32) (i32.const 0)) + (global $global1 (mut i32) (i32.const 1)) + (global $global256 (mut i32) (i32.const 2)) + )"#; + + test_non_existing_global(set_undeclared_global, 123); +} + +fn test_non_existing_global(module_wat: &str, index: u32) { + let module_bytes = wat::parse_str(module_wat).unwrap(); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let exec_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + builder.exec(exec_request).expect_failure().commit(); + let error = builder.get_error().expect("should fail"); + assert!( + matches!( + error, + engine_state::Error::WasmPreprocessing(PreprocessingError::WasmValidation(WasmValidationError::IncorrectGlobalOperation { index: incorrect_index })) + if incorrect_index == index + ), + "{:?}", + error, + ); +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20240105.rs b/execution_engine_testing/tests/src/test/regression/regression_20240105.rs new file mode 100644 index 0000000000..d2ee4f0931 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20240105.rs @@ -0,0 +1,1367 @@ +mod repeated_ffi_call_should_gas_out_quickly { + use std::{ + env, + sync::mpsc::{self, RecvTimeoutError}, + thread, + time::{Duration, Instant}, + }; + + use casper_execution_engine::{engine_state::Error, execution::ExecError}; + use rand::Rng; + use tempfile::TempDir; + + use casper_engine_test_support::{ + ChainspecConfig, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, + CHAINSPEC_SYMLINK, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, + }; + use casper_types::{ + account::AccountHash, runtime_args, testing::TestRng, ProtocolVersion, RuntimeArgs, + DICTIONARY_ITEM_KEY_MAX_LENGTH, U512, + }; + + const CONTRACT: &str = "regression_20240105.wasm"; + const TIMEOUT: Duration = Duration::from_secs(4); + const PAYMENT_AMOUNT: u64 = 1_000_000_000_000_u64; + + fn production_max_associated_keys() -> u8 { + let chainspec_config = ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK).unwrap(); + chainspec_config.max_associated_keys().try_into().unwrap() + } + + struct Fixture { + builder: LmdbWasmTestBuilder, + data_dir: TempDir, + rng: TestRng, + } + + impl Fixture { + fn new() -> Self { + let data_dir = TempDir::new().unwrap(); + let mut builder = LmdbWasmTestBuilder::new_with_production_chainspec(data_dir.path()); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()).commit(); + let rng = TestRng::new(); + Fixture { + builder, + data_dir, + rng, + } + } + + /// Calls regression_20240105.wasm with some setup function. Execution is expected to + /// succeed. + fn execute_setup(&mut self, session_args: RuntimeArgs) { + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code(CONTRACT, session_args) + .with_standard_payment(runtime_args! { "amount" => U512::from(PAYMENT_AMOUNT * 4) }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash(self.rng.gen()) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); + self.builder.exec(exec_request).expect_success().commit(); + } + + /// Calls regression_20240105.wasm with expectation of execution failure due to running out + /// of gas within the duration specified in `TIMEOUT`. + fn execute_with_timeout(self, session_args: RuntimeArgs, extra_auth_keys: u8) { + if cfg!(debug_assertions) { + println!("not testing in debug mode"); + return; + } + let (tx, rx) = mpsc::channel(); + let Fixture { + builder, + data_dir, + mut rng, + } = self; + let post_state_hash = builder.get_post_state_hash(); + let mut auth_keys = Vec::new(); + auth_keys.push(*DEFAULT_ACCOUNT_ADDR); + for i in 1..=extra_auth_keys { + auth_keys.push(AccountHash::new([i; 32])); + } + let executor = thread::spawn(move || { + let deploy = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code(CONTRACT, session_args) + .with_standard_payment(runtime_args! { "amount" => U512::from(PAYMENT_AMOUNT) }) + .with_authorization_keys(&auth_keys) + .with_deploy_hash(rng.gen()) + .build(); + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy).build(); + + let mut chainspec_config = + ChainspecConfig::from_chainspec_path(&*CHAINSPEC_SYMLINK).unwrap(); + // Increase the `max_memory` available in order to avoid hitting unreachable + // instruction during execution. + *chainspec_config.wasm_config.v1_mut().max_memory_mut() = 10_000; + let mut builder = LmdbWasmTestBuilder::open( + data_dir.path(), + chainspec_config, + ProtocolVersion::V2_0_0, + post_state_hash, + ); + + builder.exec(exec_request); + let error = builder.get_error().unwrap(); + let _ = tx.send(error); + }); + + let timeout = if let Ok(value) = env::var("CL_TEST_TIMEOUT_SECS") { + Duration::from_secs(value.parse().expect("should parse as u64")) + } else { + TIMEOUT + }; + let start = Instant::now(); + let receiver_result = rx.recv_timeout(timeout); + executor.join().unwrap(); + match receiver_result { + Ok(error) => { + assert!( + matches!(error, Error::Exec(ExecError::GasLimit)), + "expected gas limit error, but got {:?}", + error + ); + } + Err(RecvTimeoutError::Timeout) => { + panic!( + "execution should take less than {} seconds, but took {} seconds ", + timeout.as_secs_f32(), + start.elapsed().as_secs_f32(), + ) + } + Err(RecvTimeoutError::Disconnected) => unreachable!(), + } + } + } + + #[ignore] + #[test] + fn write_small() { + let session_args = runtime_args! { + "fn" => "write", + "len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn write_large() { + let session_args = runtime_args! { + "fn" => "write", + "len" => 100_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn read_missing() { + let session_args = runtime_args! { + "fn" => "read", + "len" => Option::::None, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn read_small() { + let session_args = runtime_args! { + "fn" => "read", + "len" => Some(1_u32), + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn read_large() { + let session_args = runtime_args! { + "fn" => "read", + "len" => Some(100_000_u32), + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_small() { + let session_args = runtime_args! { + "fn" => "add", + "large" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_large() { + let session_args = runtime_args! { + "fn" => "add", + "large" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn new_uref_small() { + let session_args = runtime_args! { + "fn" => "new", + "len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn new_uref_large() { + let session_args = runtime_args! { + "fn" => "new", + "len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn call_contract_small_runtime_args() { + let session_args = runtime_args! { + "fn" => "call_contract", + "args_len" => 1_u32 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn call_contract_large_runtime_args() { + let session_args = runtime_args! { + "fn" => "call_contract", + "args_len" => 1_024_u32 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_key_small_name_missing_key() { + let session_args = runtime_args! { + "fn" => "get_key", + "large_name" => false, + "large_key" => Option::::None + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_key_small_name_small_key() { + let session_args = runtime_args! { + "fn" => "get_key", + "large_name" => false, + "large_key" => Some(false) + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_key_small_name_large_key() { + let session_args = runtime_args! { + "fn" => "get_key", + "large_name" => false, + "large_key" => Some(true) + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_key_large_name_small_key() { + let session_args = runtime_args! { + "fn" => "get_key", + "large_name" => true, + "large_key" => Some(false) + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_key_large_name_large_key() { + let session_args = runtime_args! { + "fn" => "get_key", + "large_name" => true, + "large_key" => Some(true) + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn has_key_small_name_missing_key() { + let session_args = runtime_args! { + "fn" => "has_key", + "large_name" => false, + "key_exists" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn has_key_small_name_existing_key() { + let session_args = runtime_args! { + "fn" => "has_key", + "large_name" => false, + "key_exists" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn has_key_large_name_missing_key() { + let session_args = runtime_args! { + "fn" => "has_key", + "large_name" => true, + "key_exists" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn has_key_large_name_existing_key() { + let session_args = runtime_args! { + "fn" => "has_key", + "large_name" => true, + "key_exists" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn put_key_small_name_small_key() { + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => false, + "large_key" => false, + "num_keys" => Option::::None + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn put_key_small_name_large_key() { + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => false, + "large_key" => true, + "num_keys" => Option::::None + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn put_key_large_name_small_key() { + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => true, + "large_key" => false, + "num_keys" => Option::::None + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn put_key_large_name_large_key() { + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => true, + "large_key" => true, + "num_keys" => Option::::None + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn is_valid_uref_for_invalid() { + let session_args = runtime_args! { + "fn" => "is_valid_uref", + "valid" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn is_valid_uref_for_valid() { + let session_args = runtime_args! { + "fn" => "is_valid_uref", + "valid" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_and_remove_associated_key() { + let session_args = runtime_args! { + "fn" => "add_associated_key", + "remove_after_adding" => true, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_associated_key_duplicated() { + let session_args = runtime_args! { + "fn" => "add_associated_key", + "remove_after_adding" => false, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn remove_associated_key_non_existent() { + let session_args = runtime_args! { "fn" => "remove_associated_key" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn update_associated_key_non_existent() { + let session_args = runtime_args! { + "fn" => "update_associated_key", + "exists" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn update_associated_key_existing() { + let session_args = runtime_args! { + "fn" => "update_associated_key", + "exists" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn set_action_threshold() { + let session_args = runtime_args! { "fn" => "set_action_threshold" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_named_keys_empty() { + let session_args = runtime_args! { + "fn" => "load_named_keys", + "num_keys" => 0_u32, + "large_name" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_named_keys_one_key_small_name() { + let num_keys = 1_u32; + let mut fixture = Fixture::new(); + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => false, + "large_key" => true, + "num_keys" => Some(num_keys), + }; + fixture.execute_setup(session_args); + let session_args = runtime_args! { + "fn" => "load_named_keys", + "num_keys" => num_keys + }; + fixture.execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_named_keys_one_key_large_name() { + let num_keys = 1_u32; + let mut fixture = Fixture::new(); + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => true, + "large_key" => true, + "num_keys" => Some(num_keys), + }; + fixture.execute_setup(session_args); + let session_args = runtime_args! { + "fn" => "load_named_keys", + "num_keys" => num_keys, + }; + fixture.execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_named_keys_many_keys_small_name() { + let num_keys = 1_000_u32; + let mut fixture = Fixture::new(); + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => false, + "large_key" => true, + "num_keys" => Some(num_keys), + }; + fixture.execute_setup(session_args); + let session_args = runtime_args! { + "fn" => "load_named_keys", + "num_keys" => num_keys, + }; + fixture.execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_named_keys_many_keys_large_name() { + let num_keys = 10_u32; + let mut fixture = Fixture::new(); + let session_args = runtime_args! { + "fn" => "put_key", + "large_name" => true, + "large_key" => true, + "num_keys" => Some(num_keys), + }; + fixture.execute_setup(session_args); + let session_args = runtime_args! { + "fn" => "load_named_keys", + "num_keys" => num_keys, + }; + fixture.execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn remove_key_small_name() { + let session_args = runtime_args! { + "fn" => "remove_key", + "large_name" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn remove_key_large_name() { + let session_args = runtime_args! { + "fn" => "remove_key", + "large_name" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_caller() { + let session_args = runtime_args! { "fn" => "get_caller" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_blocktime() { + let session_args = runtime_args! { "fn" => "get_blocktime" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_purse() { + let session_args = runtime_args! { "fn" => "create_purse" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn transfer_to_account_create_account() { + let session_args = runtime_args! { + "fn" => "transfer_to_account", + "account_exists" => false, + "amount" => U512::MAX + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn transfer_to_account_existing_account() { + let session_args = runtime_args! { + "fn" => "transfer_to_account", + "account_exists" => true, + "amount" => U512::MAX + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn transfer_from_purse_to_account_create_account() { + let session_args = runtime_args! { + "fn" => "transfer_from_purse_to_account", + "account_exists" => false, + "amount" => U512::MAX + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn transfer_from_purse_to_account_existing_account() { + let session_args = runtime_args! { + "fn" => "transfer_from_purse_to_account", + "account_exists" => true, + "amount" => U512::MAX + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn transfer_from_purse_to_purse() { + let session_args = runtime_args! { + "fn" => "transfer_from_purse_to_purse", + "amount" => U512::MAX + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_balance_non_existent_purse() { + let session_args = runtime_args! { + "fn" => "get_balance", + "purse_exists" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_balance_existing_purse() { + let session_args = runtime_args! { + "fn" => "get_balance", + "purse_exists" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_phase() { + let session_args = runtime_args! { "fn" => "get_phase" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_system_contract() { + let session_args = runtime_args! { "fn" => "get_system_contract" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_main_purse() { + let session_args = runtime_args! { "fn" => "get_main_purse" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn read_host_buffer_empty() { + let session_args = runtime_args! { "fn" => "read_host_buffer" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_package_at_hash() { + let session_args = runtime_args! { "fn" => "create_contract_package_at_hash" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_contract_version_no_entry_points_no_named_keys() { + let session_args = runtime_args! { + "fn" => "add_contract_version", + "entry_points_len" => 0_u32, + "named_keys_len" => 0_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_contract_version_small_entry_points_small_named_keys() { + let session_args = runtime_args! { + "fn" => "add_contract_version", + "entry_points_len" => 1_u32, + "named_keys_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_contract_version_small_entry_points_large_named_keys() { + let session_args = runtime_args! { + "fn" => "add_contract_version", + "entry_points_len" => 1_u32, + "named_keys_len" => 100_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_contract_version_large_entry_points_small_named_keys() { + let session_args = runtime_args! { + "fn" => "add_contract_version", + "entry_points_len" => 100_u32, + "named_keys_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn add_contract_version_large_entry_points_large_named_keys() { + let session_args = runtime_args! { + "fn" => "add_contract_version", + "entry_points_len" => 100_u32, + "named_keys_len" => 100_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn disable_contract_version() { + let session_args = runtime_args! { "fn" => "disable_contract_version" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn call_versioned_contract_small_runtime_args() { + let session_args = runtime_args! { + "fn" => "call_versioned_contract", + "args_len" => 1_u32 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn call_versioned_contract_large_runtime_args() { + let session_args = runtime_args! { + "fn" => "call_versioned_contract", + "args_len" => 1_024_u32 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_no_new_urefs_no_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_no_new_urefs_few_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 1_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_no_new_urefs_many_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 10_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_few_new_urefs_no_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 1_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_few_new_urefs_few_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 1_u8, + "num_existing_urefs" => 1_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_few_new_urefs_many_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 1_u8, + "num_existing_urefs" => 5_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_many_new_urefs_no_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 5_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_many_new_urefs_few_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 5_u8, + "num_existing_urefs" => 1_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_small_label_many_new_urefs_many_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 5_u8, + "num_existing_urefs" => 5_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_no_new_urefs_no_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_no_new_urefs_few_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 1_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_no_new_urefs_many_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 10_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_few_new_urefs_no_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 1_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_few_new_urefs_few_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 1_u8, + "num_existing_urefs" => 1_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_few_new_urefs_many_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 1_u8, + "num_existing_urefs" => 5_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_many_new_urefs_no_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 5_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_many_new_urefs_few_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 5_u8, + "num_existing_urefs" => 1_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_large_label_many_new_urefs_many_existing_urefs() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_000_000_u32, + "num_new_urefs" => 5_u8, + "num_existing_urefs" => 5_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_failure_max_urefs_exceeded() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => u8::MAX, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn create_contract_user_group_failure_max_groups_exceeded() { + let session_args = runtime_args! { + "fn" => "create_contract_user_group", + "label_len" => 1_u32, + "num_new_urefs" => 0_u8, + "num_existing_urefs" => 0_u8, + "allow_exceeding_max_groups" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + // #[ignore] + // #[test] + // fn print_small() { + // let session_args = runtime_args! { + // "fn" => "print", + // "num_chars" => 1_u32 + // }; + // Fixture::new().execute_with_timeout(session_args, 0) + // } + // + // #[ignore] + // #[test] + // fn print_large() { + // let session_args = runtime_args! { + // "fn" => "print", + // "num_chars" => 1_000_000_u32 + // }; + // Fixture::new().execute_with_timeout(session_args, 0) + // } + + #[ignore] + #[test] + fn get_runtime_arg_size_zero() { + let session_args = runtime_args! { + "fn" => "get_runtime_arg_size", + "arg" => () + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_runtime_arg_size_small() { + let session_args = runtime_args! { + "fn" => "get_runtime_arg_size", + "arg" => 1_u8 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_runtime_arg_size_large() { + let session_args = runtime_args! { + "fn" => "get_runtime_arg_size", + "arg" => [1_u8; 1_000_000] + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_runtime_arg_zero_size() { + let session_args = runtime_args! { + "fn" => "get_runtime_arg", + "arg" => () + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_runtime_arg_small_size() { + let session_args = runtime_args! { + "fn" => "get_runtime_arg", + "arg" => 1_u8 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn get_runtime_arg_large_size() { + let session_args = runtime_args! { + "fn" => "get_runtime_arg", + "arg" => [1_u8; 1_000_000] + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn remove_contract_user_group() { + let session_args = runtime_args! { "fn" => "remove_contract_user_group" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn extend_contract_user_group_urefs_and_remove_as_required() { + let session_args = runtime_args! { + "fn" => "extend_contract_user_group_urefs", + "allow_exceeding_max_urefs" => false + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn extend_contract_user_group_urefs_failure_max_urefs_exceeded() { + let session_args = runtime_args! { + "fn" => "extend_contract_user_group_urefs", + "allow_exceeding_max_urefs" => true + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn remove_contract_user_group_urefs() { + let session_args = runtime_args! { "fn" => "remove_contract_user_group_urefs" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn blake2b_small() { + let session_args = runtime_args! { + "fn" => "blake2b", + "len" => 1_u32 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn blake2b_large() { + let session_args = runtime_args! { + "fn" => "blake2b", + "len" => 1_000_000_u32 + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn new_dictionary() { + let session_args = runtime_args! { "fn" => "new_dictionary" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_get_small_name_small_value() { + let session_args = runtime_args! { + "fn" => "dictionary_get", + "name_len" => 1_u32, + "value_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_get_small_name_large_value() { + let session_args = runtime_args! { + "fn" => "dictionary_get", + "name_len" => 1_u32, + "value_len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_get_large_name_small_value() { + let session_args = runtime_args! { + "fn" => "dictionary_get", + "name_len" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4, + "value_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_get_large_name_large_value() { + let session_args = runtime_args! { + "fn" => "dictionary_get", + "name_len" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4, + "value_len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_put_small_name_small_value() { + let session_args = runtime_args! { + "fn" => "dictionary_put", + "name_len" => 1_u32, + "value_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_put_small_name_large_value() { + let session_args = runtime_args! { + "fn" => "dictionary_put", + "name_len" => 1_u32, + "value_len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_put_large_name_small_value() { + let session_args = runtime_args! { + "fn" => "dictionary_put", + "name_len" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4, + "value_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_put_large_name_large_value() { + let session_args = runtime_args! { + "fn" => "dictionary_put", + "name_len" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4, + "value_len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_call_stack() { + let session_args = runtime_args! { "fn" => "load_call_stack" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_authorization_keys_small() { + let session_args = runtime_args! { + "fn" => "load_authorization_keys", + "setup" => false, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn load_authorization_keys_large() { + let session_args = runtime_args! { + "fn" => "load_authorization_keys", + "setup" => true, + }; + let mut fixture = Fixture::new(); + fixture.execute_setup(session_args); + let session_args = runtime_args! { + "fn" => "load_authorization_keys", + "setup" => false, + }; + fixture.execute_with_timeout(session_args, production_max_associated_keys() - 1) + } + + #[ignore] + #[test] + fn random_bytes() { + let session_args = runtime_args! { "fn" => "random_bytes" }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_read_small_name_small_value() { + let session_args = runtime_args! { + "fn" => "dictionary_read", + "name_len" => 1_u32, + "value_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_read_small_name_large_value() { + let session_args = runtime_args! { + "fn" => "dictionary_read", + "name_len" => 1_u32, + "value_len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_read_large_name_small_value() { + let session_args = runtime_args! { + "fn" => "dictionary_read", + "name_len" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4, + "value_len" => 1_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn dictionary_read_large_name_large_value() { + let session_args = runtime_args! { + "fn" => "dictionary_read", + "name_len" => DICTIONARY_ITEM_KEY_MAX_LENGTH as u32 - 4, + "value_len" => 1_000_u32, + }; + Fixture::new().execute_with_timeout(session_args, 0) + } + + #[ignore] + #[test] + fn enable_contract_version() { + let session_args = runtime_args! { "fn" => "enable_contract_version" }; + Fixture::new().execute_with_timeout(session_args, 0) + } +} diff --git a/execution_engine_testing/tests/src/test/regression/regression_20250812.rs b/execution_engine_testing/tests/src/test/regression/regression_20250812.rs new file mode 100644 index 0000000000..4af904d619 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/regression_20250812.rs @@ -0,0 +1,38 @@ +use casper_engine_test_support::{ + utils::create_run_genesis_request_with_chainspec_config, ChainspecConfig, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, +}; +use casper_types::RuntimeArgs; + +const DO_NOTHING_CONTRACT: &str = "do_nothing_stored.wasm"; + +#[ignore] +#[test] +fn should_correctly_install_and_add_contract_version_with_ae_turned_on() { + let chainspec = ChainspecConfig::default().with_enable_addressable_entity(true); + + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec.clone()); + builder + .run_genesis(create_run_genesis_request_with_chainspec_config( + DEFAULT_ACCOUNTS.to_vec(), + chainspec, + )) + .commit(); + + let install_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + DO_NOTHING_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + let install_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + DO_NOTHING_CONTRACT, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(install_request_1).expect_success().commit(); + builder.exec(install_request_2).expect_success().commit(); +} diff --git a/execution_engine_testing/tests/src/test/regression/slow_input.rs b/execution_engine_testing/tests/src/test/regression/slow_input.rs new file mode 100644 index 0000000000..8eaf42b200 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/slow_input.rs @@ -0,0 +1,250 @@ +use std::mem; + +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + addressable_entity::DEFAULT_ENTRY_POINT_NAME, Gas, RuntimeArgs, + DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, +}; + +use walrus::{ir::BinaryOp, FunctionBuilder, InstrSeqBuilder, Module, ModuleConfig, ValType}; + +#[ignore] +#[test] +fn should_charge_extra_per_amount_of_br_table_elements() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + const FIXED_BLOCK_AMOUNT: usize = 256; + const N_ELEMENTS: u32 = 5; + const M_ELEMENTS: u32 = 168; + + let br_table_min_elements = fixed_cost_br_table(FIXED_BLOCK_AMOUNT, N_ELEMENTS); + let br_table_max_elements = fixed_cost_br_table(FIXED_BLOCK_AMOUNT, M_ELEMENTS); + + assert_ne!(&br_table_min_elements, &br_table_max_elements); + + let exec_request_1 = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + br_table_min_elements, + RuntimeArgs::default(), + ) + .build(); + + let exec_request_2 = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + br_table_max_elements, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let gas_cost_1 = builder.last_exec_gas_consumed(); + + builder.exec(exec_request_2).expect_success().commit(); + + let gas_cost_2 = builder.last_exec_gas_consumed(); + + assert!( + gas_cost_2 > gas_cost_1, + "larger br_table should cost more gas" + ); + + let br_table_cycles = 5; + + assert_eq!( + gas_cost_2.checked_sub(gas_cost_1), + Some(Gas::from( + (M_ELEMENTS - N_ELEMENTS) * DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER * br_table_cycles + )), + "the cost difference should equal to exactly the size of br_table difference " + ); +} + +#[allow(dead_code)] +fn cpu_burner_br_if(iterations: i64) -> Vec { + let mut module = Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut loop_func = FunctionBuilder::new(&mut module.types, &[ValType::I64], &[]); + + let var_counter = module.locals.add(ValType::I64); + let var_i = module.locals.add(ValType::I64); + + loop_func + .func_body() + // i := 0 + .i64_const(0) + .local_set(var_i) + .loop_(None, |loop_| { + let loop_id = loop_.id(); + loop_. // loop: + // i += 1 + local_get(var_i) + .i64_const(1) + .binop(BinaryOp::I64Add) + // if i < iterations { + .local_tee(var_i) + .local_get(var_counter) + .binop(BinaryOp::I64LtU) + // goto loop + // } + .br_if(loop_id); + }); + + let loop_func = loop_func.finish(vec![var_counter], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + call_func.func_body().i64_const(iterations).call(loop_func); + + let call_func = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call_func); + + module.emit_wasm() +} + +#[allow(dead_code)] +fn cpu_burner_br_table(iterations: i64) -> Vec { + let mut module = Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut loop_func = FunctionBuilder::new(&mut module.types, &[ValType::I64], &[]); + + let param_iterations = module.locals.add(ValType::I64); + let local_i = module.locals.add(ValType::I64); + + loop_func + .func_body() + // i := 0 + .i64_const(0) + .local_set(local_i) + .block(None, |loop_break| { + let loop_break_id = loop_break.id(); + + loop_break.loop_(None, |while_loop| { + let while_loop_id = while_loop.id(); // loop: + + while_loop + .block(None, |while_loop_inner| { + let while_loop_inner_id = while_loop_inner.id(); + // counter += 1 + while_loop_inner + .local_get(local_i) + .i64_const(1) + .binop(BinaryOp::I64Add) + // switch (i < counter) { + .local_tee(local_i) + .local_get(param_iterations) + .binop(BinaryOp::I64LtU) + .br_table( + vec![ + // case 0: break; + loop_break_id, + // case 1: continue; (goto while_loop) + while_loop_id, + ] + .into(), + // default: throw() + while_loop_inner_id, + ); + }) + // the "throw" + .unreachable(); + }); + }); + + let loop_func = loop_func.finish(vec![param_iterations], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + call_func.func_body().i64_const(iterations).call(loop_func); + + let call_func = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call_func); + + module.emit_wasm() +} + +/// Creates Wasm bytes with fixed amount of `block`s but with a `br_table` of a variable size. +/// +/// Gas cost of executing `fixed_cost_br_table(n + m)` should be greater than +/// `fixed_cost_br_table(n)` by exactly `br_table.entry_cost * m` iff m > 0. +fn fixed_cost_br_table(total_labels: usize, br_table_element_size: u32) -> Vec { + assert!((br_table_element_size as usize) < total_labels); + + let mut module = Module::with_config(ModuleConfig::new()); + + let _memory_id = module.memories.add_local(false, 11, None); + + let mut br_table_func = FunctionBuilder::new(&mut module.types, &[ValType::I32], &[]); + + let param_jump_label = module.locals.add(ValType::I32); + + fn recursive_block_generator( + current_block: &mut InstrSeqBuilder, + mut recursive_step_fn: impl FnMut(&mut InstrSeqBuilder) -> bool, + ) { + if !recursive_step_fn(current_block) { + current_block.block(None, |nested_block| { + recursive_block_generator(nested_block, recursive_step_fn); + }); + } + } + + br_table_func.func_body().block(None, |outer_block| { + // Outer block becames the "default" jump label for `br_table`. + let outer_block_id = outer_block.id(); + + // Count of recursive iterations left + let mut counter = total_labels; + + // Labels are extended with newly generated labels at each recursive step + let mut labels = Vec::new(); + + // Generates nested blocks + recursive_block_generator(outer_block, |step| { + // Save current nested block in labels. + labels.push(step.id()); + + if counter == 0 { + // At the tail of this recursive generator we'll create a `br_table` with variable + // amount of labels depending on this function parameter. + let labels = mem::take(&mut labels); + let sliced_labels = labels.as_slice()[..br_table_element_size as usize].to_vec(); + + // Code at the tail block + step.local_get(param_jump_label) + .br_table(sliced_labels.into(), outer_block_id); + + // True means this is a tail call, and we won't go deeper + true + } else { + counter -= 1; + // Go deeper + false + } + }) + }); + + let br_table_func = br_table_func.finish(vec![param_jump_label], &mut module.funcs); + + let mut call_func = FunctionBuilder::new(&mut module.types, &[], &[]); + call_func + .func_body() + // Call `br_table_func` with 0 as the jump label, + // Specific value does not change the cost, so as long as it will generate valid wasm it's + // ok. + .i32_const(0) + .call(br_table_func); + + let call_func = call_func.finish(Vec::new(), &mut module.funcs); + + module.exports.add(DEFAULT_ENTRY_POINT_NAME, call_func); + + module.emit_wasm() +} diff --git a/execution_engine_testing/tests/src/test/regression/test_utils.rs b/execution_engine_testing/tests/src/test/regression/test_utils.rs new file mode 100644 index 0000000000..aac1724539 --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/test_utils.rs @@ -0,0 +1,83 @@ +use casper_engine_test_support::DEFAULT_WASM_V1_CONFIG; +use casper_types::addressable_entity::DEFAULT_ENTRY_POINT_NAME; +use casper_wasm::{ + builder, + elements::{Instruction, Instructions}, +}; + +/// Prepare malicious payload with amount of opcodes that could potentially overflow injected gas +/// counter. +pub(crate) fn make_gas_counter_overflow() -> Vec { + let opcode_costs = DEFAULT_WASM_V1_CONFIG.opcode_costs(); + + // Create a lot of `nop` opcodes to potentially overflow gas injector's batching counter. + let upper_bound = (u32::MAX as usize / opcode_costs.nop as usize) + 1; + + let instructions = { + let mut instructions = vec![Instruction::Nop; upper_bound]; + instructions.push(Instruction::End); + Instructions::new(instructions) + }; + + let module = builder::module() + .function() + // A signature with 0 params and no return type + .signature() + .build() + .body() + // Generated instructions for our entrypoint + .with_instructions(instructions) + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .build() + // Memory section is mandatory + .memory() + .build() + .build(); + casper_wasm::serialize(module).expect("should serialize") +} + +/// Prepare malicious payload in a form of a wasm module without memory section. +pub(crate) fn make_module_without_memory_section() -> Vec { + // Create some opcodes. + let upper_bound = 10; + + let instructions = { + let mut instructions = vec![Instruction::Nop; upper_bound]; + instructions.push(Instruction::End); + Instructions::new(instructions) + }; + + let module = builder::module() + .function() + // A signature with 0 params and no return type + .signature() + .build() + .body() + // Generated instructions for our entrypoint + .with_instructions(instructions) + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .build() + .build(); + casper_wasm::serialize(module).expect("should serialize") +} + +/// Prepare malicious payload in a form of a wasm module with forbidden start section. +pub(crate) fn make_module_with_start_section() -> Vec { + let module = r#" + (module + (memory 1) + (start 0) + (func (export "call") + ) + ) + "#; + wat::parse_str(module).expect("should parse wat") +} diff --git a/execution_engine_testing/tests/src/test/regression/transforms_must_be_ordered.rs b/execution_engine_testing/tests/src/test/regression/transforms_must_be_ordered.rs new file mode 100644 index 0000000000..a7054db06b --- /dev/null +++ b/execution_engine_testing/tests/src/test/regression/transforms_must_be_ordered.rs @@ -0,0 +1,133 @@ +//! Tests whether transforms produced by contracts appear ordered in the effects. +use core::convert::TryInto; + +use rand::{rngs::StdRng, Rng, SeedableRng}; + +use casper_engine_test_support::{ + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + execution::TransformKindV2, runtime_args, system::standard_payment, AddressableEntityHash, Key, + URef, U512, +}; + +#[ignore] +#[test] +fn contract_transforms_should_be_ordered_in_the_effects() { + // This many URefs will be created in the contract. + const N_UREFS: u32 = 100; + // This many operations will be scattered among these URefs. + const N_OPS: usize = 1000; + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut rng = StdRng::seed_from_u64(0); + + let execution_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "ordered-transforms.wasm", + runtime_args! { "n" => N_UREFS }, + ) + .build(); + + // Installs the contract and creates the URefs, all initialized to `0_i32`. + builder.exec(execution_request).expect_success().commit(); + + let contract_hash = match builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .unwrap() + .named_keys() + .get("ordered-transforms-contract-hash") + .unwrap() + { + Key::AddressableEntity(entity_addr) => AddressableEntityHash::new(entity_addr.value()), + _ => panic!("Couldn't find ordered-transforms contract."), + }; + + // List of operations to be performed by the contract. + // An operation is a tuple (t, i, v) where: + // * `t` is the operation type: 0 for reading, 1 for writing and 2 for adding; + // * `i` is the URef index; + // * `v` is the value to write or add (always zero for reads). + let operations: Vec<(u8, u32, i32)> = (0..N_OPS) + .map(|_| { + let t: u8 = rng.gen_range(0..3); + let i: u32 = rng.gen_range(0..N_UREFS); + if t == 0 { + (t, i, 0) + } else { + (t, i, rng.gen()) + } + }) + .collect(); + + builder + .exec( + ExecuteRequestBuilder::from_deploy_item( + &DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_standard_payment(runtime_args! { + standard_payment::ARG_AMOUNT => U512::from(150_000_000_000_u64), + }) + .with_stored_session_hash( + contract_hash, + "perform_operations", + runtime_args! { + "operations" => operations.clone(), + }, + ) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash(rng.gen()) + .build(), + ) + .build(), + ) + .expect_success() + .commit(); + + let exec_result = builder.get_exec_result_owned(1).unwrap(); + let effects = exec_result.effects(); + + let contract = builder + .get_entity_with_named_keys_by_entity_hash(contract_hash) + .unwrap(); + let urefs: Vec = (0..N_UREFS) + .map( + |i| match contract.named_keys().get(&format!("uref-{}", i)).unwrap() { + Key::URef(uref) => *uref, + _ => panic!("Expected a URef."), + }, + ) + .collect(); + + assert!(effects + .transforms() + .iter() + .filter_map(|transform| { + let uref = match transform.key() { + Key::URef(uref) => uref, + _ => return None, + }; + let uref_index: u32 = match urefs + .iter() + .enumerate() + .find(|(_, u)| u.addr() == uref.addr()) + { + Some((i, _)) => i.try_into().unwrap(), + None => return None, + }; + let (type_index, value): (u8, i32) = match transform.kind() { + TransformKindV2::Identity => (0, 0), + TransformKindV2::Write(sv) => { + let v: i32 = sv.as_cl_value().unwrap().clone().into_t().unwrap(); + (1, v) + } + TransformKindV2::AddInt32(v) => (2, *v), + _ => panic!("Invalid transform."), + }; + Some((type_index, uref_index, value)) + }) + .eq(operations.into_iter())); +} diff --git a/execution_engine_testing/tests/src/test/stack_overflow.rs b/execution_engine_testing/tests/src/test/stack_overflow.rs new file mode 100644 index 0000000000..09ebadac76 --- /dev/null +++ b/execution_engine_testing/tests/src/test/stack_overflow.rs @@ -0,0 +1,40 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::RuntimeArgs; + +#[ignore] +#[test] +fn runtime_stack_overflow_should_cause_unreachable_error() { + // Create an unconstrained recursive call + let wat = r#"(module + (func $call (call $call)) + (export "call" (func $call)) + (memory $memory 1) + )"#; + + let module_bytes = wat::parse_str(wat).unwrap(); + + let do_stack_overflow_request = ExecuteRequestBuilder::module_bytes( + *DEFAULT_ACCOUNT_ADDR, + module_bytes, + RuntimeArgs::default(), + ) + .build(); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + builder + .exec(do_stack_overflow_request) + .expect_failure() + .commit(); + + let error = builder.get_error().expect("should have error"); + assert!( + matches!(&error, Error::Exec(ExecError::Interpreter(s)) if s.contains("Unreachable")), + "{:?}", + error + ); +} diff --git a/execution_engine_testing/tests/src/test/step.rs b/execution_engine_testing/tests/src/test/step.rs index e65de4c17c..66deb59a93 100644 --- a/execution_engine_testing/tests/src/test/step.rs +++ b/execution_engine_testing/tests/src/test/step.rs @@ -1,74 +1,61 @@ -use std::convert::TryFrom; - use num_traits::Zero; use once_cell::sync::Lazy; -use casper_engine_test_support::internal::{ - utils, InMemoryWasmTestBuilder, StepRequestBuilder, WasmTestBuilder, DEFAULT_ACCOUNTS, -}; -use casper_execution_engine::{ - core::engine_state::{ - genesis::{GenesisAccount, GenesisValidator}, - RewardItem, SlashItem, - }, - shared::motes::Motes, - storage::global_state::in_memory::InMemoryGlobalState, +use casper_engine_test_support::{ + utils, LmdbWasmTestBuilder, StepRequestBuilder, DEFAULT_ACCOUNTS, }; +use casper_storage::data_access_layer::SlashItem; use casper_types::{ system::{ - auction::{Bids, DelegationRate, SeigniorageRecipientsSnapshot, BLOCK_REWARD}, + auction::{ + BidsExt, DelegationRate, SeigniorageRecipientsSnapshotV2, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + }, mint::TOTAL_SUPPLY_KEY, }, - CLValue, ContractHash, EraId, Key, ProtocolVersion, PublicKey, SecretKey, U512, + CLValue, EntityAddr, EraId, GenesisAccount, GenesisValidator, Key, Motes, ProtocolVersion, + PublicKey, SecretKey, U512, }; static ACCOUNT_1_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const ACCOUNT_1_BALANCE: u64 = 100_000_000; const ACCOUNT_1_BOND: u64 = 100_000_000; static ACCOUNT_2_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); const ACCOUNT_2_BALANCE: u64 = 200_000_000; const ACCOUNT_2_BOND: u64 = 200_000_000; -fn get_named_key( - builder: &mut InMemoryWasmTestBuilder, - contract_hash: ContractHash, - name: &str, -) -> Key { +fn get_named_key(builder: &mut LmdbWasmTestBuilder, entity_hash: EntityAddr, name: &str) -> Key { *builder - .get_contract(contract_hash) - .expect("should have contract") - .named_keys() + .get_named_keys(entity_hash) .get(name) .expect("should have bid purses") } -fn initialize_builder() -> WasmTestBuilder { - let mut builder = InMemoryWasmTestBuilder::default(); +fn initialize_builder() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), + Motes::new(ACCOUNT_1_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), + Motes::new(ACCOUNT_1_BOND), DelegationRate::zero(), )), ); let account_2 = GenesisAccount::account( ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), + Motes::new(ACCOUNT_2_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), + Motes::new(ACCOUNT_2_BOND), DelegationRate::zero(), )), ); @@ -77,7 +64,7 @@ fn initialize_builder() -> WasmTestBuilder { tmp }; let run_genesis_request = utils::create_run_genesis_request(accounts); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); builder } @@ -86,53 +73,51 @@ fn initialize_builder() -> WasmTestBuilder { #[test] fn should_step() { let mut builder = initialize_builder(); + let step_request_builder = builder.step_request_builder(); - let step_request = StepRequestBuilder::new() - .with_parent_state_hash(builder.get_post_state_hash()) - .with_protocol_version(ProtocolVersion::V1_0_0) + let step_request = step_request_builder .with_slash_item(SlashItem::new(ACCOUNT_1_PK.clone())) - .with_reward_item(RewardItem::new(ACCOUNT_1_PK.clone(), BLOCK_REWARD / 2)) - .with_reward_item(RewardItem::new(ACCOUNT_2_PK.clone(), BLOCK_REWARD / 2)) .with_next_era_id(EraId::from(1)) .build(); - let before_auction_seigniorage: SeigniorageRecipientsSnapshot = - builder.get_seigniorage_recipients_snapshot(); + let auction_hash = builder.get_auction_contract_hash(); - let bids_before_slashing: Bids = builder.get_bids(); - assert!( - bids_before_slashing.contains_key(&ACCOUNT_1_PK), - "should have entry in the genesis bids table {:?}", - bids_before_slashing + let before_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value( + EntityAddr::System(auction_hash.value()), + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, ); - let bids_before_slashing: Bids = builder.get_bids(); + let bids_before_slashing = builder.get_bids(); + let account_1_bid = bids_before_slashing + .validator_bid(&ACCOUNT_1_PK) + .expect("should have account1 bid"); + assert!(!account_1_bid.inactive(), "bid should not be inactive"); assert!( - bids_before_slashing.contains_key(&ACCOUNT_1_PK), - "should have entry in bids table before slashing {:?}", - bids_before_slashing + !account_1_bid.staked_amount().is_zero(), + "bid amount should not be 0" ); - builder.step(step_request); + assert!(builder.step(step_request).is_success(), "should step"); - let bids_after_slashing: Bids = builder.get_bids(); - let account_1_bid = bids_after_slashing.get(&ACCOUNT_1_PK).unwrap(); - assert!(account_1_bid.inactive()); - assert!(account_1_bid.staked_amount().is_zero()); + let bids_after_slashing = builder.get_bids(); + assert!(bids_after_slashing.validator_bid(&ACCOUNT_1_PK).is_none()); - let bids_after_slashing: Bids = builder.get_bids(); assert_ne!( bids_before_slashing, bids_after_slashing, "bids table should be different before and after slashing" ); // seigniorage snapshot should have changed after auction - let after_auction_seigniorage: SeigniorageRecipientsSnapshot = - builder.get_seigniorage_recipients_snapshot(); - - assert!(before_auction_seigniorage - .keys() - .ne(after_auction_seigniorage.keys())) + let after_auction_seigniorage: SeigniorageRecipientsSnapshotV2 = builder.get_value( + EntityAddr::System(auction_hash.value()), + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + ); + assert!( + !after_auction_seigniorage + .keys() + .all(|key| before_auction_seigniorage.contains_key(key)), + "run auction should have changed seigniorage keys" + ); } /// Should be able to step slashing, rewards, and run auction. @@ -145,9 +130,13 @@ fn should_adjust_total_supply() { let mint_hash = builder.get_mint_contract_hash(); // should check total supply before step - let total_supply_key = get_named_key(&mut builder, mint_hash, TOTAL_SUPPLY_KEY) - .into_uref() - .expect("should be uref"); + let total_supply_key = get_named_key( + &mut builder, + EntityAddr::System(mint_hash.value()), + TOTAL_SUPPLY_KEY, + ) + .into_uref() + .expect("should be uref"); let starting_total_supply = CLValue::try_from( builder @@ -164,12 +153,11 @@ fn should_adjust_total_supply() { .with_protocol_version(ProtocolVersion::V1_0_0) .with_slash_item(SlashItem::new(ACCOUNT_1_PK.clone())) .with_slash_item(SlashItem::new(ACCOUNT_2_PK.clone())) - .with_reward_item(RewardItem::new(ACCOUNT_1_PK.clone(), 0)) - .with_reward_item(RewardItem::new(ACCOUNT_2_PK.clone(), BLOCK_REWARD / 2)) .with_next_era_id(EraId::from(1)) .build(); - builder.step(step_request); + assert!(builder.step(step_request).is_success(), "should step"); + let maybe_post_state_hash = Some(builder.get_post_state_hash()); // should check total supply after step diff --git a/execution_engine_testing/tests/src/test/storage_costs.rs b/execution_engine_testing/tests/src/test/storage_costs.rs index 1b0884a72a..d77420e30f 100644 --- a/execution_engine_testing/tests/src/test/storage_costs.rs +++ b/execution_engine_testing/tests/src/test/storage_costs.rs @@ -1,43 +1,30 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use num_rational::Ratio; +use num_traits::Zero; use once_cell::sync::Lazy; -#[cfg(not(feature = "use-as-wasm"))] -use casper_engine_test_support::internal::DEFAULT_ACCOUNT_PUBLIC_KEY; use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, UpgradeRequestBuilder, - DEFAULT_PROTOCOL_VERSION, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, -}; -#[cfg(not(feature = "use-as-wasm"))] -use casper_execution_engine::shared::system_config::auction_costs::DEFAULT_ADD_BID_COST; -use casper_execution_engine::shared::{ - host_function_costs::{HostFunction, HostFunctionCosts}, - opcode_costs::OpcodeCosts, - storage_costs::StorageCosts, - stored_value::StoredValue, - wasm_config::{WasmConfig, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}, + ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, }; use casper_types::{ bytesrepr::{Bytes, ToBytes}, - CLValue, ContractHash, EraId, ProtocolVersion, RuntimeArgs, U512, -}; -#[cfg(not(feature = "use-as-wasm"))] -use casper_types::{ + contracts::{ContractHash, ContractPackage, ContractVersionKey}, runtime_args, system::{ auction::{self, DelegationRate}, AUCTION, }, + AddressableEntityHash, BrTableCost, CLValue, ControlFlowCosts, EraId, Gas, Group, Groups, + HostFunctionCostsV1, HostFunctionCostsV2, Key, MessageLimits, OpcodeCosts, ProtocolVersion, + RuntimeArgs, StorageCosts, StoredValue, URef, WasmConfig, WasmV1Config, WasmV2Config, + DEFAULT_ADD_BID_COST, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY, U512, }; -use num_rational::Ratio; - const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(0); const STORAGE_COSTS_NAME: &str = "storage_costs.wasm"; -#[cfg(not(feature = "use-as-wasm"))] const SYSTEM_CONTRACT_HASHES_NAME: &str = "system_contract_hashes.wasm"; -#[cfg(not(feature = "use-as-wasm"))] const DO_NOTHING_WASM: &str = "do_nothing.wasm"; const CONTRACT_KEY_NAME: &str = "contract"; @@ -58,7 +45,7 @@ const WRITE_SMALL_VALUE: &[u8] = b"1"; const WRITE_LARGE_VALUE: &[u8] = b"1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111"; const ADD_SMALL_VALUE: u64 = 1; -const ADD_LARGE_VALUE: u64 = u64::max_value(); +const ADD_LARGE_VALUE: u64 = u64::MAX; const NEW_OPCODE_COSTS: OpcodeCosts = OpcodeCosts { bit: 0, @@ -70,69 +57,50 @@ const NEW_OPCODE_COSTS: OpcodeCosts = OpcodeCosts { op_const: 0, local: 0, global: 0, - control_flow: 0, + control_flow: ControlFlowCosts { + block: 0, + op_loop: 0, + op_if: 0, + op_else: 0, + end: 0, + br: 0, + br_if: 0, + br_table: BrTableCost { + cost: 0, + size_multiplier: 0, + }, + op_return: 0, + call: 0, + call_indirect: 0, + drop: 0, + select: 0, + }, integer_comparison: 0, conversion: 0, unreachable: 0, nop: 0, current_memory: 0, grow_memory: 0, - regular: 0, + sign: 0, }; -static NEW_HOST_FUNCTION_COSTS: Lazy = Lazy::new(|| HostFunctionCosts { - read_value: HostFunction::fixed(0), - read_value_local: HostFunction::fixed(0), - write: HostFunction::fixed(0), - write_local: HostFunction::fixed(0), - add: HostFunction::fixed(0), - new_uref: HostFunction::fixed(0), - load_named_keys: HostFunction::fixed(0), - ret: HostFunction::fixed(0), - get_key: HostFunction::fixed(0), - has_key: HostFunction::fixed(0), - put_key: HostFunction::fixed(0), - remove_key: HostFunction::fixed(0), - revert: HostFunction::fixed(0), - is_valid_uref: HostFunction::fixed(0), - add_associated_key: HostFunction::fixed(0), - remove_associated_key: HostFunction::fixed(0), - update_associated_key: HostFunction::fixed(0), - set_action_threshold: HostFunction::fixed(0), - get_caller: HostFunction::fixed(0), - get_blocktime: HostFunction::fixed(0), - create_purse: HostFunction::fixed(0), - transfer_to_account: HostFunction::fixed(0), - transfer_from_purse_to_account: HostFunction::fixed(0), - transfer_from_purse_to_purse: HostFunction::fixed(0), - get_balance: HostFunction::fixed(0), - get_phase: HostFunction::fixed(0), - get_system_contract: HostFunction::fixed(0), - get_main_purse: HostFunction::fixed(0), - read_host_buffer: HostFunction::fixed(0), - create_contract_package_at_hash: HostFunction::fixed(0), - create_contract_user_group: HostFunction::fixed(0), - add_contract_version: HostFunction::fixed(0), - disable_contract_version: HostFunction::fixed(0), - call_contract: HostFunction::fixed(0), - call_versioned_contract: HostFunction::fixed(0), - get_named_arg_size: HostFunction::fixed(0), - get_named_arg: HostFunction::fixed(0), - remove_contract_user_group: HostFunction::fixed(0), - provision_contract_user_group_uref: HostFunction::fixed(0), - remove_contract_user_group_urefs: HostFunction::fixed(0), - print: HostFunction::fixed(0), - blake2b: HostFunction::fixed(0), -}); -static STORAGE_COSTS_ONLY: Lazy = Lazy::new(|| { - WasmConfig::new( +static NEW_HOST_FUNCTION_COSTS: Lazy = Lazy::new(HostFunctionCostsV1::zero); +static NEW_HOST_FUNCTION_COSTS_V2: Lazy = Lazy::new(HostFunctionCostsV2::zero); +static NO_COSTS_WASM_CONFIG: Lazy = Lazy::new(|| { + let wasm_v1_config = WasmV1Config::new( DEFAULT_WASM_MAX_MEMORY, DEFAULT_MAX_STACK_HEIGHT, NEW_OPCODE_COSTS, - StorageCosts::default(), *NEW_HOST_FUNCTION_COSTS, - ) + ); + let wasm_v2_config = WasmV2Config::new( + DEFAULT_WASM_MAX_MEMORY, + NEW_OPCODE_COSTS, + *NEW_HOST_FUNCTION_COSTS_V2, + ); + WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config) }); + static NEW_PROTOCOL_VERSION: Lazy = Lazy::new(|| { ProtocolVersion::from_parts( DEFAULT_PROTOCOL_VERSION.value().major, @@ -141,28 +109,45 @@ static NEW_PROTOCOL_VERSION: Lazy = Lazy::new(|| { ) }); -fn initialize_isolated_storage_costs() -> InMemoryWasmTestBuilder { +/* +NOTE: in this test suite, to isolation specific micro function, +we are using specific costs that are not indicative of production values + +Do not interpret statements in this test suite as global statements of fact +rather, they are self-reflective. + +For instance, "should not charge for x" does not mean production usage would allow zero +cost host interaction. It only means in this controlled setup we have isolated that value +for fine grained testing. +*/ + +fn initialize_isolated_storage_costs() -> LmdbWasmTestBuilder { // This test runs a contract that's after every call extends the same key with // more data - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); // // Isolate storage costs without host function costs, and without opcode costs // - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let mut upgrade_request = UpgradeRequestBuilder::new() - .with_current_protocol_version(*DEFAULT_PROTOCOL_VERSION) + .with_current_protocol_version(DEFAULT_PROTOCOL_VERSION) .with_new_protocol_version(*NEW_PROTOCOL_VERSION) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_wasm_config(*STORAGE_COSTS_ONLY) .build(); - builder.upgrade_with_upgrade_request(&mut upgrade_request); + let updated_chainspec = builder + .chainspec() + .clone() + .with_wasm_config(*NO_COSTS_WASM_CONFIG); + + builder + .with_chainspec(updated_chainspec) + .upgrade(&mut upgrade_request); builder } -#[cfg(not(feature = "use-as-wasm"))] #[ignore] #[test] fn should_verify_isolate_host_side_payment_code_is_free() { @@ -173,11 +158,10 @@ fn should_verify_isolate_host_side_payment_code_is_free() { DO_NOTHING_WASM, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let balance_before = builder.get_purse_balance(account.main_purse()); @@ -194,10 +178,9 @@ fn should_verify_isolate_host_side_payment_code_is_free() { balance_before - transaction_fee, "balance before and after should match" ); - assert_eq!(builder.last_exec_gas_cost().value(), U512::zero()); + assert_eq!(builder.last_exec_gas_consumed().value(), U512::zero()); } -#[cfg(not(feature = "use-as-wasm"))] #[ignore] #[test] fn should_verify_isolated_auction_storage_is_free() { @@ -211,12 +194,11 @@ fn should_verify_isolated_auction_storage_is_free() { SYSTEM_CONTRACT_HASHES_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let bond_amount = U512::from(BOND_AMOUNT); @@ -227,7 +209,7 @@ fn should_verify_isolated_auction_storage_is_free() { .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_ADD_BID, @@ -237,7 +219,6 @@ fn should_verify_isolated_auction_storage_is_free() { auction::ARG_DELEGATION_RATE => DELEGATION_RATE, }, ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); let balance_before = builder.get_purse_balance(account.main_purse()); @@ -259,7 +240,7 @@ fn should_verify_isolated_auction_storage_is_free() { expected - balance_after ); assert_eq!( - builder.last_exec_gas_cost().value(), + builder.last_exec_gas_consumed().value(), U512::from(DEFAULT_ADD_BID_COST) ); } @@ -276,24 +257,22 @@ fn should_measure_gas_cost_for_storage_usage_write() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); - assert!(!builder.last_exec_gas_cost().value().is_zero()); + assert!(!builder.last_exec_gas_consumed().value().is_zero()); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); // // Measure small write @@ -308,7 +287,6 @@ fn should_measure_gas_cost_for_storage_usage_write() { WRITE_FUNCTION_SMALL_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder_a @@ -316,7 +294,7 @@ fn should_measure_gas_cost_for_storage_usage_write() { .expect_success() .commit(); - builder_a.last_exec_gas_cost() + builder_a.last_exec_gas_consumed() }; let expected_small_write_data = @@ -349,7 +327,6 @@ fn should_measure_gas_cost_for_storage_usage_write() { WRITE_FUNCTION_LARGE_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder_b @@ -357,7 +334,7 @@ fn should_measure_gas_cost_for_storage_usage_write() { .expect_success() .commit(); - builder_b.last_exec_gas_cost() + builder_b.last_exec_gas_consumed() }; let expected_large_write_data = @@ -383,8 +360,8 @@ fn should_measure_gas_cost_for_storage_usage_write() { fn should_measure_unisolated_gas_cost_for_storage_usage_write() { let cost_per_byte = U512::from(StorageCosts::default().gas_per_byte()); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -396,16 +373,15 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_write() { builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); // // Measure small write @@ -427,7 +403,7 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_write() { .expect_success() .commit(); - builder_a.last_exec_gas_cost() + builder_a.last_exec_gas_consumed() }; let expected_small_write_data = @@ -467,7 +443,7 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_write() { .expect_success() .commit(); - builder_b.last_exec_gas_cost() + builder_b.last_exec_gas_consumed() }; let expected_large_write_data = @@ -500,7 +476,6 @@ fn should_measure_gas_cost_for_storage_usage_add() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); @@ -508,16 +483,15 @@ fn should_measure_gas_cost_for_storage_usage_add() { // let mut builder_a = builder.clone(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); // // Measure small add @@ -532,7 +506,6 @@ fn should_measure_gas_cost_for_storage_usage_add() { ADD_FUNCTION_SMALL_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder_a @@ -540,7 +513,7 @@ fn should_measure_gas_cost_for_storage_usage_add() { .expect_success() .commit(); - builder_a.last_exec_gas_cost() + builder_a.last_exec_gas_consumed() }; let expected_small_add_data = @@ -573,7 +546,6 @@ fn should_measure_gas_cost_for_storage_usage_add() { ADD_FUNCTION_LARGE_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder_b @@ -581,7 +553,7 @@ fn should_measure_gas_cost_for_storage_usage_add() { .expect_success() .commit(); - builder_b.last_exec_gas_cost() + builder_b.last_exec_gas_consumed() }; let expected_large_write_data = @@ -609,8 +581,8 @@ fn should_measure_gas_cost_for_storage_usage_add() { fn should_measure_unisolated_gas_cost_for_storage_usage_add() { let cost_per_byte = U512::from(StorageCosts::default().gas_per_byte()); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let install_exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -621,19 +593,16 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_add() { builder.exec(install_exec_request).expect_success().commit(); - // let mut builder_a = builder.clone(); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); // // Measure small add @@ -655,7 +624,7 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_add() { .expect_success() .commit(); - builder_a.last_exec_gas_cost() + builder_a.last_exec_gas_consumed() }; let expected_small_add_data = @@ -695,7 +664,7 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_add() { .expect_success() .commit(); - builder_b.last_exec_gas_cost() + builder_b.last_exec_gas_consumed() }; let expected_large_write_data = @@ -720,7 +689,7 @@ fn should_measure_unisolated_gas_cost_for_storage_usage_add() { #[ignore] #[test] -fn should_verify_new_uref_is_charging_for_storage() { +fn should_verify_new_uref_storage_cost() { let mut builder = initialize_isolated_storage_costs(); let install_exec_request = ExecuteRequestBuilder::standard( @@ -728,24 +697,20 @@ fn should_verify_new_uref_is_charging_for_storage() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -753,14 +718,18 @@ fn should_verify_new_uref_is_charging_for_storage() { NEW_UREF_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); + assert_eq!( + // should charge for storage of a u64 behind a URef + builder.last_exec_gas_consumed(), + StorageCosts::default().calculate_gas_cost( + StoredValue::CLValue(CLValue::from_t(0u64).expect("should create CLValue")) + .serialized_length() + ) + ) } #[ignore] @@ -773,24 +742,20 @@ fn should_verify_put_key_is_charging_for_storage() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -798,19 +763,25 @@ fn should_verify_put_key_is_charging_for_storage() { PUT_KEY_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); + assert_eq!( + // should charge for storage of a named key + builder.last_exec_gas_consumed(), + StorageCosts::default().calculate_gas_cost( + StoredValue::CLValue( + CLValue::from_t(("new_key".to_string(), Key::Hash([0u8; 32]))).unwrap() + ) + .serialized_length() + ), + ) } #[ignore] #[test] -fn should_verify_remove_key_is_charging_for_storage() { +fn should_verify_remove_key_is_not_charging_for_storage() { let mut builder = initialize_isolated_storage_costs(); let install_exec_request = ExecuteRequestBuilder::standard( @@ -818,24 +789,20 @@ fn should_verify_remove_key_is_charging_for_storage() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -843,14 +810,19 @@ fn should_verify_remove_key_is_charging_for_storage() { REMOVE_KEY_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); + if builder.chainspec().core_config.enable_addressable_entity { + assert_eq!( + // should charge zero, because we do not charge for storage when removing a key + builder.last_exec_gas_consumed(), + StorageCosts::default().calculate_gas_cost(0), + ) + } else { + assert!(builder.last_exec_gas_consumed() > Gas::zero()) + } } #[ignore] @@ -863,24 +835,20 @@ fn should_verify_create_contract_at_hash_is_charging_for_storage() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -888,14 +856,18 @@ fn should_verify_create_contract_at_hash_is_charging_for_storage() { CREATE_CONTRACT_PACKAGE_AT_HASH_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); + assert_eq!( + // should charge at least enough for storage of a package and unit CLValue (for a URef) + builder.last_exec_gas_consumed(), + StorageCosts::default().calculate_gas_cost( + StoredValue::ContractPackage(ContractPackage::default()).serialized_length() + + StoredValue::CLValue(CLValue::unit()).serialized_length() + ) + ) } #[ignore] @@ -908,24 +880,20 @@ fn should_verify_create_contract_user_group_is_charging_for_storage() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -933,16 +901,30 @@ fn should_verify_create_contract_user_group_is_charging_for_storage() { CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); + let mut groups = Groups::new(); + groups.insert(Group::new("Label"), BTreeSet::new()); + + let mut package = ContractPackage::new( + URef::default(), + [(ContractVersionKey::new(2, 1), ContractHash::new([0u8; 32]))] + .iter() + .cloned() + .collect::>(), + Default::default(), + groups, + Default::default(), + ); - let balance_before = balance_after; + assert_eq!( + // should charge for storage of the new package + builder.last_exec_gas_consumed(), + StorageCosts::default() + .calculate_gas_cost(StoredValue::ContractPackage(package.clone()).serialized_length()), + ); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -950,16 +932,20 @@ fn should_verify_create_contract_user_group_is_charging_for_storage() { PROVISION_UREFS_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); + package + .groups_mut() + .get_mut(&Group::new("Label")) + .unwrap() + .insert(URef::new([0u8; 32], Default::default())); - assert!(balance_after < balance_before); - - let balance_before = balance_after; + assert!( + // should charge for storage of the new package and a unit CLValue (for a URef) + builder.last_exec_gas_consumed() > Gas::zero() + ); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -967,14 +953,16 @@ fn should_verify_create_contract_user_group_is_charging_for_storage() { REMOVE_CONTRACT_USER_GROUP_FUNCTION, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); + package.remove_group(&Group::new("Label")); - assert!(balance_after < balance_before); + assert!( + // should charge for storage of the new package + builder.last_exec_gas_consumed() > Gas::zero() + ) } #[ignore] @@ -987,24 +975,20 @@ fn should_verify_subcall_new_uref_is_charging_for_storage() { STORAGE_COSTS_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder.exec(install_exec_request).expect_success().commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let contract_hash: ContractHash = account + let contract_hash: AddressableEntityHash = account .named_keys() .get(CONTRACT_KEY_NAME) .expect("contract hash") - .into_hash() - .expect("should be hash") - .into(); + .into_entity_hash() + .expect("should be hash"); let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, @@ -1016,12 +1000,6 @@ fn should_verify_subcall_new_uref_is_charging_for_storage() { builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); - - let balance_before = balance_after; - let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, contract_hash, @@ -1032,12 +1010,6 @@ fn should_verify_subcall_new_uref_is_charging_for_storage() { builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); - - let balance_before = balance_after; - let exec_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, contract_hash, @@ -1048,7 +1020,12 @@ fn should_verify_subcall_new_uref_is_charging_for_storage() { builder.exec(exec_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); - - assert!(balance_after < balance_before); + assert_eq!( + // should charge for storage of a u64 behind a URef + builder.last_exec_gas_consumed(), + StorageCosts::default().calculate_gas_cost( + StoredValue::CLValue(CLValue::from_t(0u64).expect("should create CLValue")) + .serialized_length() + ) + ) } diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs index 4bb1bdb05d..61ff8b5617 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/bids.rs @@ -1,42 +1,43 @@ -use std::{collections::BTreeSet, iter::FromIterator}; - use assert_matches::assert_matches; use num_traits::{One, Zero}; use once_cell::sync::Lazy; +use std::{ + collections::{BTreeMap, BTreeSet}, + iter::FromIterator, +}; +use tempfile::TempDir; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_ACCOUNTS, - DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_RUN_GENESIS_REQUEST, DEFAULT_UNBONDING_DELAY, - SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, + genesis_config_builder::GenesisConfigBuilder, utils, ChainspecConfig, ExecuteRequestBuilder, + LmdbWasmTestBuilder, StepRequestBuilder, UpgradeRequestBuilder, DEFAULT_ACCOUNTS, + DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, DEFAULT_AUCTION_DELAY, + DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_EXEC_CONFIG, DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_MAXIMUM_DELEGATION_AMOUNT, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE, + DEFAULT_UNBONDING_DELAY, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, + TIMESTAMP_MILLIS_INCREMENT, }; use casper_execution_engine::{ - core::{ - engine_state::{ - self, - genesis::{GenesisAccount, GenesisValidator}, - }, - execution, - }, - shared::motes::Motes, + engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error}, + execution::ExecError, }; +use casper_storage::data_access_layer::{GenesisRequest, HandleFeeMode}; + +use crate::lmdb_fixture; use casper_types::{ self, account::AccountHash, api_error::ApiError, runtime_args, - system::{ - self, - auction::{ - self, Bids, DelegationRate, EraValidators, UnbondingPurses, ValidatorWeights, - ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_PUBLIC_KEY, ARG_VALIDATOR, - ERA_ID_KEY, INITIAL_ERA_ID, - }, + system::auction::{ + self, BidKind, BidsExt, DelegationRate, DelegatorKind, EraValidators, + Error as AuctionError, UnbondKind, ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE, + ARG_DELEGATOR, ARG_ENTRY_POINT, ARG_MAXIMUM_DELEGATION_AMOUNT, + ARG_MINIMUM_DELEGATION_AMOUNT, ARG_NEW_PUBLIC_KEY, ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY, + ARG_REWARDS_MAP, ARG_VALIDATOR, ERA_ID_KEY, INITIAL_ERA_ID, METHOD_DISTRIBUTE, }, - EraId, PublicKey, RuntimeArgs, SecretKey, U512, + EntityAddr, EraId, GenesisAccount, GenesisValidator, HoldBalanceHandling, Key, Motes, + ProtocolVersion, PublicKey, SecretKey, TransactionHash, DEFAULT_MINIMUM_BID_AMOUNT, U256, U512, }; const ARG_TARGET: &str = "target"; @@ -47,102 +48,106 @@ const CONTRACT_ADD_BID: &str = "add_bid.wasm"; const CONTRACT_WITHDRAW_BID: &str = "withdraw_bid.wasm"; const CONTRACT_DELEGATE: &str = "delegate.wasm"; const CONTRACT_UNDELEGATE: &str = "undelegate.wasm"; +const CONTRACT_REDELEGATE: &str = "redelegate.wasm"; +const CONTRACT_CHANGE_BID_PUBLIC_KEY: &str = "change_bid_public_key.wasm"; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE + 1000; const ADD_BID_AMOUNT_1: u64 = 95_000; const ADD_BID_AMOUNT_2: u64 = 47_500; +const ADD_BID_AMOUNT_3: u64 = 200_000; const ADD_BID_DELEGATION_RATE_1: DelegationRate = 10; const BID_AMOUNT_2: u64 = 5_000; const ADD_BID_DELEGATION_RATE_2: DelegationRate = 15; const WITHDRAW_BID_AMOUNT_2: u64 = 15_000; +const ADD_BID_DELEGATION_RATE_3: DelegationRate = 20; -const DELEGATE_AMOUNT_1: u64 = 125_000; -const DELEGATE_AMOUNT_2: u64 = 15_000; +const DELEGATE_AMOUNT_1: u64 = 125_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; +const DELEGATE_AMOUNT_2: u64 = 15_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; const UNDELEGATE_AMOUNT_1: u64 = 35_000; +const UNDELEGATE_AMOUNT_2: u64 = 5_000; const SYSTEM_TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000; static NON_FOUNDER_VALIDATOR_1_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static NON_FOUNDER_VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*NON_FOUNDER_VALIDATOR_1_PK)); static NON_FOUNDER_VALIDATOR_2_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([4; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static NON_FOUNDER_VALIDATOR_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*NON_FOUNDER_VALIDATOR_2_PK)); +static NON_FOUNDER_VALIDATOR_3_PK: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static NON_FOUNDER_VALIDATOR_3_ADDR: Lazy = + Lazy::new(|| AccountHash::from(&*NON_FOUNDER_VALIDATOR_3_PK)); + static ACCOUNT_1_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([200; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PK)); const ACCOUNT_1_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_1_BOND: u64 = 100_000; static ACCOUNT_2_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([202; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_2_PK)); const ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; const ACCOUNT_2_BOND: u64 = 200_000; static BID_ACCOUNT_1_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static BID_ACCOUNT_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*BID_ACCOUNT_1_PK)); const BID_ACCOUNT_1_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; +const BID_ACCOUNT_1_BOND: u64 = 200_000; static BID_ACCOUNT_2_PK: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static BID_ACCOUNT_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*BID_ACCOUNT_2_PK)); const BID_ACCOUNT_2_BALANCE: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; static VALIDATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_2: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); static DELEGATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_1)); static DELEGATOR_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_2)); const VALIDATOR_1_STAKE: u64 = 1_000_000; -const DELEGATOR_1_STAKE: u64 = 1_500_000; +const DELEGATOR_1_STAKE: u64 = 1_500_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; const DELEGATOR_1_BALANCE: u64 = DEFAULT_ACCOUNT_INITIAL_BALANCE; -const DELEGATOR_2_STAKE: u64 = 2_000_000; +const DELEGATOR_2_STAKE: u64 = 2_000_000 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; const DELEGATOR_2_BALANCE: u64 = DEFAULT_ACCOUNT_INITIAL_BALANCE; const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; const EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS: u64 = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS; const WEEK_TIMESTAMPS: [u64; 14] = [ EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS, @@ -161,14 +166,18 @@ const WEEK_TIMESTAMPS: [u64; 14] = [ EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS + (WEEK_MILLIS * 13), ]; +const DAY_MILLIS: u64 = 24 * 60 * 60 * 1000; +const CASPER_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS; +const CASPER_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS; + #[ignore] #[test] -fn should_run_add_bid() { +fn should_add_new_bid() { let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( BID_ACCOUNT_1_PK.clone(), - Motes::new(BID_ACCOUNT_1_BALANCE.into()), + Motes::new(BID_ACCOUNT_1_BALANCE), None, ); tmp.push(account_1); @@ -177,9 +186,9 @@ fn should_run_add_bid() { let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); let exec_request_1 = ExecuteRequestBuilder::standard( *BID_ACCOUNT_1_ADDR, @@ -192,18 +201,112 @@ fn should_run_add_bid() { ) .build(); - builder.exec(exec_request_1).commit().expect_success(); + builder.exec(exec_request_1).expect_success().commit(); - let bids: Bids = builder.get_bids(); + let bids = builder.get_bids(); assert_eq!(bids.len(), 1); + let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap(); + assert_eq!( + builder.get_purse_balance(*active_bid.bonding_purse()), + U512::from(ADD_BID_AMOUNT_1) + ); + assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1); +} + +#[ignore] +#[test] +fn should_add_new_bid_with_limits() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), + None, + ); + tmp.push(account_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); - let active_bid = bids.get(&BID_ACCOUNT_1_PK.clone()).unwrap(); + builder.run_genesis(run_genesis_request); + + let exec_request_0 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + // Below global minimum. + ARG_MINIMUM_DELEGATION_AMOUNT => 1_000_000_000u64, + }, + ) + .build(); + + builder.exec(exec_request_0).expect_failure(); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + ARG_MINIMUM_DELEGATION_AMOUNT => 600_000_000_000u64, + ARG_MAXIMUM_DELEGATION_AMOUNT => 900_000_000_000u64, + }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + let bids = builder.get_bids(); + + assert_eq!(bids.len(), 1); + let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap(); assert_eq!( builder.get_purse_balance(*active_bid.bonding_purse()), U512::from(ADD_BID_AMOUNT_1) ); assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1); +} + +#[ignore] +#[test] +fn should_increase_existing_bid() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), + None, + ); + tmp.push(account_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + let exec_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); // 2nd bid top-up let exec_request_2 = ExecuteRequestBuilder::standard( @@ -217,21 +320,54 @@ fn should_run_add_bid() { ) .build(); - builder.exec(exec_request_2).commit().expect_success(); + builder.exec(exec_request_2).expect_success().commit(); - let bids: Bids = builder.get_bids(); + let bids = builder.get_bids(); assert_eq!(bids.len(), 1); - let active_bid = bids.get(&BID_ACCOUNT_1_PK.clone()).unwrap(); + let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap(); assert_eq!( builder.get_purse_balance(*active_bid.bonding_purse()), U512::from(ADD_BID_AMOUNT_1 + BID_AMOUNT_2) ); assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_2); +} - // 3. withdraw some amount - let exec_request_3 = ExecuteRequestBuilder::standard( +#[ignore] +#[test] +fn should_decrease_existing_bid() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), + None, + ); + tmp.push(account_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + let bid_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + builder.exec(bid_request).expect_success().commit(); + + // withdraw some amount + let withdraw_request = ExecuteRequestBuilder::standard( *BID_ACCOUNT_1_ADDR, CONTRACT_WITHDRAW_BID, runtime_args! { @@ -240,30 +376,34 @@ fn should_run_add_bid() { }, ) .build(); - builder.exec(exec_request_3).commit().expect_success(); + builder.exec(withdraw_request).commit().expect_success(); - let bids: Bids = builder.get_bids(); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); assert_eq!(bids.len(), 1); - let active_bid = bids.get(&BID_ACCOUNT_1_PK.clone()).unwrap(); + let active_bid = bids.validator_bid(&BID_ACCOUNT_1_PK.clone()).unwrap(); assert_eq!( builder.get_purse_balance(*active_bid.bonding_purse()), // Since we don't pay out immediately `WITHDRAW_BID_AMOUNT_2` is locked in unbonding queue - U512::from(ADD_BID_AMOUNT_1 + BID_AMOUNT_2) + U512::from(ADD_BID_AMOUNT_1) ); - let unbonding_purses: UnbondingPurses = builder.get_withdraws(); - let unbond_list = unbonding_purses - .get(&BID_ACCOUNT_1_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!(unbond_list[0].unbonder_public_key(), &*BID_ACCOUNT_1_PK); - assert_eq!(unbond_list[0].validator_public_key(), &*BID_ACCOUNT_1_PK); + let unbonds = builder.get_unbonds(); + let unbond_kind = UnbondKind::Validator(BID_ACCOUNT_1_PK.clone()); + let unbonds = unbonds.get(&unbond_kind).expect("should have unbonded"); + let unbond = unbonds.first().expect("must have at least an unbond"); + assert_eq!(unbond.eras().len(), 1); + assert_eq!(unbond.unbond_kind(), &unbond_kind); + assert_eq!(unbond.validator_public_key(), &*BID_ACCOUNT_1_PK); + + let era = unbond.eras().first().expect("should have era"); // `WITHDRAW_BID_AMOUNT_2` is in unbonding list - - assert_eq!(unbond_list[0].amount(), &U512::from(WITHDRAW_BID_AMOUNT_2),); - - assert_eq!(unbond_list[0].era_of_creation(), INITIAL_ERA_ID,); + assert_eq!(era.amount(), &U512::from(WITHDRAW_BID_AMOUNT_2),); + assert_eq!(era.era_of_creation(), INITIAL_ERA_ID,); } #[ignore] @@ -273,7 +413,7 @@ fn should_run_delegate_and_undelegate() { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( BID_ACCOUNT_1_PK.clone(), - Motes::new(BID_ACCOUNT_1_BALANCE.into()), + Motes::new(BID_ACCOUNT_1_BALANCE), None, ); tmp.push(account_1); @@ -282,9 +422,9 @@ fn should_run_delegate_and_undelegate() { let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); let transfer_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -318,23 +458,29 @@ fn should_run_delegate_and_undelegate() { ) .build(); - builder.exec(transfer_request_1).commit().expect_success(); - builder.exec(transfer_request_2).commit().expect_success(); - builder.exec(add_bid_request_1).commit().expect_success(); + builder.exec(transfer_request_1).expect_success().commit(); + builder.exec(transfer_request_2).expect_success().commit(); + builder.exec(add_bid_request_1).expect_success().commit(); let auction_hash = builder.get_auction_contract_hash(); - let bids: Bids = builder.get_bids(); + let bids: Vec = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); assert_eq!(bids.len(), 1); - let active_bid = bids.get(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(); + let active_bid = bids.validator_bid(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(); assert_eq!( builder.get_purse_balance(*active_bid.bonding_purse()), U512::from(ADD_BID_AMOUNT_1) ); assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1); + let auction_key = Key::Hash(auction_hash.value()); + let auction_stored_value = builder - .query(None, auction_hash.into(), &[]) + .query(None, auction_key, &[]) .expect("should query auction hash"); let _auction = auction_stored_value .as_contract() @@ -354,11 +500,23 @@ fn should_run_delegate_and_undelegate() { builder.exec(exec_request_1).commit().expect_success(); - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); - let delegators = bids[&NON_FOUNDER_VALIDATOR_1_PK].delegators(); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 2); + let delegators = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("should have delegators"); assert_eq!(delegators.len(), 1); - let delegated_amount_1 = *delegators[&BID_ACCOUNT_1_PK].staked_amount(); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_1_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .expect("should have account1 delegation"); + let delegated_amount_1 = delegator.staked_amount(); assert_eq!(delegated_amount_1, U512::from(DELEGATE_AMOUNT_1)); // 2nd bid top-up @@ -375,11 +533,23 @@ fn should_run_delegate_and_undelegate() { builder.exec(exec_request_2).commit().expect_success(); - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); - let delegators = bids[&NON_FOUNDER_VALIDATOR_1_PK].delegators(); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 2); + let delegators = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("should have delegators"); assert_eq!(delegators.len(), 1); - let delegated_amount_1 = *delegators[&BID_ACCOUNT_1_PK].staked_amount(); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_1_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .expect("should have account1 delegation"); + let delegated_amount_1 = delegator.staked_amount(); assert_eq!( delegated_amount_1, U512::from(DELEGATE_AMOUNT_1 + DELEGATE_AMOUNT_2) @@ -395,87 +565,68 @@ fn should_run_delegate_and_undelegate() { }, ) .build(); - builder.exec(exec_request_3).commit().expect_success(); + builder.exec(exec_request_3).expect_success().commit(); - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); - let delegators = bids[&NON_FOUNDER_VALIDATOR_1_PK].delegators(); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 2); + let delegators = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("should have delegators"); assert_eq!(delegators.len(), 1); - let delegated_amount_1 = *delegators[&BID_ACCOUNT_1_PK].staked_amount(); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_1_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .expect("should have account1 delegation"); + let delegated_amount_1 = delegator.staked_amount(); assert_eq!( delegated_amount_1, U512::from(DELEGATE_AMOUNT_1 + DELEGATE_AMOUNT_2 - UNDELEGATE_AMOUNT_1) ); - let unbonding_purses: UnbondingPurses = builder.get_withdraws(); + let unbonding_purses = builder.get_unbonds(); assert_eq!(unbonding_purses.len(), 1); - let unbond_list = unbonding_purses - .get(&NON_FOUNDER_VALIDATOR_1_ADDR) + let unbond_kind = UnbondKind::DelegatedPublicKey(BID_ACCOUNT_1_PK.clone()); + let unbond = unbonding_purses + .get(&unbond_kind) .expect("should have unbonding purse for non founder validator"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &*NON_FOUNDER_VALIDATOR_1_PK - ); - assert_eq!(unbond_list[0].unbonder_public_key(), &*BID_ACCOUNT_1_PK); - assert_eq!(unbond_list[0].amount(), &U512::from(UNDELEGATE_AMOUNT_1)); - assert!(!unbond_list[0].is_validator()); - - assert_eq!(unbond_list[0].era_of_creation(), INITIAL_ERA_ID); + let unbond = unbond.first().expect("must get unbond"); + assert_eq!(unbond.eras().len(), 1); + assert_eq!(unbond.validator_public_key(), &*NON_FOUNDER_VALIDATOR_1_PK); + assert_eq!(unbond.unbond_kind(), &unbond_kind); + assert!(!unbond.is_validator()); + let era = unbond.eras().first().expect("should have era"); + assert_eq!(era.amount(), &U512::from(UNDELEGATE_AMOUNT_1)); + assert_eq!(era.era_of_creation(), INITIAL_ERA_ID); } #[ignore] #[test] -fn should_calculate_era_validators() { - assert_ne!(*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR,); - assert_ne!(*ACCOUNT_2_ADDR, *BID_ACCOUNT_1_ADDR,); - assert_ne!(*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR,); +fn should_run_delegate_with_delegation_amount_limits() { let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_2 = GenesisAccount::account( - ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_3 = GenesisAccount::account( BID_ACCOUNT_1_PK.clone(), - Motes::new(BID_ACCOUNT_1_BALANCE.into()), + Motes::new(BID_ACCOUNT_1_BALANCE), None, ); tmp.push(account_1); - tmp.push(account_2); - tmp.push(account_3); tmp }; let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); - let transfer_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - let transfer_request_2 = ExecuteRequestBuilder::standard( + let transfer_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { @@ -485,280 +636,300 @@ fn should_calculate_era_validators() { ) .build(); - let auction_hash = builder.get_auction_contract_hash(); - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 2, "founding validators {:?}", bids); - - // Verify first era validators - let first_validator_weights: ValidatorWeights = builder - .get_validator_weights(INITIAL_ERA_ID) - .expect("should have first era validator weights"); - assert_eq!( - first_validator_weights - .keys() - .cloned() - .collect::>(), - BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()]) - ); - - builder.exec(transfer_request_1).commit().expect_success(); - builder.exec(transfer_request_2).commit().expect_success(); - // non-founding validator request let add_bid_request_1 = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_1_ADDR, + *NON_FOUNDER_VALIDATOR_1_ADDR, CONTRACT_ADD_BID, runtime_args! { - ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + ARG_MINIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_1, + ARG_MAXIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_1, }, ) .build(); - builder.exec(add_bid_request_1).commit().expect_success(); - - let pre_era_id: EraId = builder.get_value(auction_hash, ERA_ID_KEY); - assert_eq!(pre_era_id, EraId::from(0)); + builder.exec(transfer_request).expect_success().commit(); + builder.exec(add_bid_request_1).expect_success().commit(); - builder.run_auction( - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, - Vec::new(), + let bids = builder.get_bids(); + assert_eq!(bids.len(), 1); + let active_bid = bids.validator_bid(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(); + assert_eq!( + builder.get_purse_balance(*active_bid.bonding_purse()), + U512::from(ADD_BID_AMOUNT_1) ); + assert_eq!(*active_bid.delegation_rate(), ADD_BID_DELEGATION_RATE_1); - let post_era_id: EraId = builder.get_value(auction_hash, ERA_ID_KEY); - assert_eq!(post_era_id, EraId::from(1)); - - let era_validators: EraValidators = builder.get_era_validators(); + let exec_request_0 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1 - 1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - // Check if there are no missing eras after the calculation, but we don't care about what the - // elements are - let eras: Vec<_> = era_validators.keys().copied().collect(); - assert!(!era_validators.is_empty()); - assert!(era_validators.len() >= DEFAULT_AUCTION_DELAY as usize); // definetely more than 1 element - let (first_era, _) = era_validators.iter().min().unwrap(); - let (last_era, _) = era_validators.iter().max().unwrap(); - let expected_eras: Vec = { - let lo: u64 = (*first_era).into(); - let hi: u64 = (*last_era).into(); - (lo..=hi).map(EraId::from).collect() - }; - assert_eq!(eras, expected_eras, "Eras {:?}", eras); + builder.exec(exec_request_0).expect_failure(); - assert!(post_era_id > EraId::from(0)); - let consensus_next_era_id: EraId = post_era_id + DEFAULT_AUCTION_DELAY + 1; + let exec_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1 + 1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - let snapshot_size = DEFAULT_AUCTION_DELAY as usize + 1; + builder.exec(exec_request_1).expect_failure(); - assert_eq!( - era_validators.len(), - snapshot_size, - "era_id={} {:?}", - consensus_next_era_id, - era_validators - ); // eraindex==1 - ran once + let exec_request_2 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - let lookup_era_id = consensus_next_era_id - 1; - - let validator_weights = era_validators - .get(&lookup_era_id) // indexed from 0 - .unwrap_or_else(|| { - panic!( - "should have era_index=={} entry {:?}", - consensus_next_era_id, era_validators - ) - }); - assert_eq!( - validator_weights.len(), - 3, - "{:?} {:?}", - era_validators, - validator_weights - ); //2 genesis validators "winners" - assert_eq!( - validator_weights - .get(&BID_ACCOUNT_1_PK) - .expect("should have bid account in this era"), - &U512::from(ADD_BID_AMOUNT_1) - ); - - // Check validator weights using the API - let era_validators_result = builder - .get_validator_weights(lookup_era_id) - .expect("should have validator weights"); - assert_eq!(era_validators_result, *validator_weights); - - // Make sure looked up era validators are different than initial era validators - assert_ne!(era_validators_result, first_validator_weights); -} + builder.exec(exec_request_2).expect_success().commit(); +} #[ignore] #[test] -fn should_get_first_seigniorage_recipients() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_2 = GenesisAccount::account( - ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), - DelegationRate::zero(), - )), - ); - tmp.push(account_1); - tmp.push(account_2); - tmp - }; - - let run_genesis_request = utils::create_run_genesis_request(accounts); +fn should_forcibly_undelegate_after_setting_validator_limits() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - builder.run_genesis(&run_genesis_request); + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let transfer_request_1 = ExecuteRequestBuilder::standard( + let delegator_2_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, + ARG_TARGET => *DELEGATOR_2_ADDR, ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) .build(); - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 2); + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - let founding_validator_1 = bids.get(&ACCOUNT_1_PK).expect("should have account 1 pk"); - assert_eq!( - founding_validator_1 - .vesting_schedule() - .map(|vesting_schedule| vesting_schedule.initial_release_timestamp_millis()), - Some(DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS) - ); + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + validator_1_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_add_bid_request, + delegator_1_validator_1_delegate_request, + delegator_2_validator_1_delegate_request, + ]; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + // builder.advance_eras_by_default_auction_delay(); - let founding_validator_2 = bids.get(&ACCOUNT_2_PK).expect("should have account 2 pk"); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 3); + + let auction_delay = builder.get_auction_delay(); + // new_era is the first era in the future where new era validator weights will be calculated + let new_era = INITIAL_ERA_ID + auction_delay + 1; + assert!(builder.get_validator_weights(new_era).is_none()); assert_eq!( - founding_validator_2 - .vesting_schedule() - .map(|vesting_schedule| vesting_schedule.initial_release_timestamp_millis()), - Some(DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS) + builder.get_validator_weights(new_era - 1).unwrap(), + builder.get_validator_weights(INITIAL_ERA_ID).unwrap() ); - builder.exec(transfer_request_1).commit().expect_success(); - - // run_auction should be executed first builder.run_auction( DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, Vec::new(), ); - let mut era_validators: EraValidators = builder.get_era_validators(); - let snapshot_size = DEFAULT_AUCTION_DELAY as usize + 1; + let validator_weights: ValidatorWeights = builder + .get_validator_weights(new_era) + .expect("should have first era validator weights"); - assert_eq!(era_validators.len(), snapshot_size, "{:?}", era_validators); // eraindex==1 - ran once + assert_eq!( + *validator_weights.get(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(), + U512::from(ADD_BID_AMOUNT_1 + DELEGATE_AMOUNT_1 + DELEGATE_AMOUNT_2) + ); - assert!(era_validators.contains_key(&(EraId::from(DEFAULT_AUCTION_DELAY).successor()))); + // set delegation limits + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(1_000), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + ARG_MINIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_2 + 1_000, + ARG_MAXIMUM_DELEGATION_AMOUNT => DELEGATE_AMOUNT_1 - 1_000, + }, + ) + .build(); - let era_id = EraId::from(DEFAULT_AUCTION_DELAY) - 1; + builder + .exec(validator_1_add_bid_request) + .expect_success() + .commit(); + + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 2); - let validator_weights = era_validators.remove(&era_id).unwrap_or_else(|| { - panic!( - "should have era_index=={} entry {:?}", - era_id, era_validators - ) - }); - // 2 genesis validators "winners" with non-zero bond - assert_eq!(validator_weights.len(), 2, "{:?}", validator_weights); - assert_eq!( - validator_weights.get(&ACCOUNT_1_PK).unwrap(), - &U512::from(ACCOUNT_1_BOND) - ); - assert_eq!( - validator_weights.get(&ACCOUNT_2_PK).unwrap(), - &U512::from(ACCOUNT_2_BOND) - ); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 2); - let first_validator_weights = builder - .get_validator_weights(era_id) - .expect("should have validator weights"); - assert_eq!(first_validator_weights, validator_weights); -} + assert!(builder.get_validator_weights(new_era + 1).is_none()); -#[ignore] -#[test] -fn should_release_founder_stake() { - // ACCOUNT_1_BOND / 14 = 7_142 - const EXPECTED_WEEKLY_RELEASE: u64 = 7_142; + builder.run_auction( + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + Vec::new(), + ); - const EXPECTED_REMAINDER: u64 = 12; + let validator_weights: ValidatorWeights = builder + .get_validator_weights(new_era + 1) + .expect("should have first era validator weights"); - const EXPECTED_LOCKED_AMOUNTS: [u64; 14] = [ - 92858, 85716, 78574, 71432, 64290, 57148, 50006, 42864, 35722, 28580, 21438, 14296, 7154, 0, - ]; + assert_eq!( + *validator_weights.get(&NON_FOUNDER_VALIDATOR_1_PK).unwrap(), + // The validator has now bid ADD_BID_AMOUNT_1 + 1_000. + // Delegator 1's delegation has been decreased to the maximum of DELEGATE_AMOUNT_1 - 1_000. + // Delegator 2's delegation was below minimum, so it has been completely unbonded. + U512::from(ADD_BID_AMOUNT_1 + 1_000 + DELEGATE_AMOUNT_1 - 1_000) + ); - let expected_locked_amounts: Vec = EXPECTED_LOCKED_AMOUNTS - .iter() - .cloned() - .map(U512::from) - .collect(); + let unbonding_purses = builder.get_unbonds(); + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()); + let delegator_1 = unbonding_purses + .get(&unbond_kind) + .expect("should have delegator_1") + .first() + .expect("must get unbond"); - let expect_unbond_success = |builder: &mut InMemoryWasmTestBuilder, amount: u64| { - let partial_unbond = ExecuteRequestBuilder::standard( - *ACCOUNT_1_ADDR, - CONTRACT_WITHDRAW_BID, - runtime_args! { - ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(), - ARG_AMOUNT => U512::from(amount), - }, - ) - .build(); + let delegator_1_unbonding = delegator_1 + .eras() + .first() + .expect("should have delegator_1 unbonding"); - builder.exec(partial_unbond).commit().expect_success(); - }; + let overage = 1_000; - let expect_unbond_failure = |builder: &mut InMemoryWasmTestBuilder, amount: u64| { - let full_unbond = ExecuteRequestBuilder::standard( - *ACCOUNT_1_ADDR, - CONTRACT_WITHDRAW_BID, - runtime_args! { - ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(), - ARG_AMOUNT => U512::from(amount), - }, - ) - .build(); + assert_eq!( + delegator_1_unbonding.amount(), + &U512::from(overage), + "expected delegator_1 amount to match" + ); - builder.exec(full_unbond).commit(); + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone()); + let delegator_2 = unbonding_purses + .get(&unbond_kind) + .expect("should have delegator_2"); - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; - assert_matches!( - error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::AuctionError(15))) - ); - }; + let delegator_2_unbonding = delegator_2 + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have era"); + + assert_eq!( + delegator_2_unbonding.amount(), + &U512::from(DELEGATE_AMOUNT_2), + "expected delegator_2 amount to match" + ); +} +#[ignore] +#[test] +fn should_not_allow_delegator_stake_range_during_vesting() { let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), + Motes::new(BID_ACCOUNT_1_BOND), DelegationRate::zero(), )), ); @@ -766,129 +937,58 @@ fn should_release_founder_stake() { tmp }; - let run_genesis_request = utils::create_run_genesis_request(accounts); + let genesis_config = GenesisConfigBuilder::new() + .with_accounts(accounts) + .with_locked_funds_period_millis(1) + .build(); + + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); + // need to step past genesis era + builder.advance_era(); - let fund_system_account = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, + // attempt to change delegation limits + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE / 10) + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ACCOUNT_1_BOND), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + ARG_MINIMUM_DELEGATION_AMOUNT => DEFAULT_MINIMUM_DELEGATION_AMOUNT, + ARG_MAXIMUM_DELEGATION_AMOUNT => DEFAULT_MAXIMUM_DELEGATION_AMOUNT - 2, }, ) .build(); - builder.exec(fund_system_account).commit().expect_success(); - - // Check bid and its vesting schedule - { - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); - - let entry = bids.get(&ACCOUNT_1_PK).unwrap(); - let vesting_schedule = entry.vesting_schedule().unwrap(); - - let initial_release = vesting_schedule.initial_release_timestamp_millis(); - assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); - - let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); - assert!(locked_amounts.is_none()); - } - - builder.run_auction(DEFAULT_GENESIS_TIMESTAMP_MILLIS, Vec::new()); - - { - // Attempt unbond of one mote - expect_unbond_failure(&mut builder, u64::one()); - } - - builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new()); - - // Check bid and its vesting schedule - { - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); - - let entry = bids.get(&ACCOUNT_1_PK).unwrap(); - let vesting_schedule = entry.vesting_schedule().unwrap(); - - let initial_release = vesting_schedule.initial_release_timestamp_millis(); - assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); - - let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); - assert_eq!(locked_amounts, Some(expected_locked_amounts)); - } - - let mut total_unbonded = 0; - - { - // Attempt full unbond - expect_unbond_failure(&mut builder, ACCOUNT_1_BOND); - - // Attempt unbond of released amount - expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE); - - total_unbonded += EXPECTED_WEEKLY_RELEASE; - - assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[0]) - } - - for i in 1..13 { - // Run auction forward by almost a week - builder.run_auction(WEEK_TIMESTAMPS[i] - 1, Vec::new()); - - // Attempt unbond of 1 mote - expect_unbond_failure(&mut builder, u64::one()); - - // Run auction forward by one millisecond - builder.run_auction(WEEK_TIMESTAMPS[i], Vec::new()); - - // Attempt unbond of more than weekly release - expect_unbond_failure(&mut builder, EXPECTED_WEEKLY_RELEASE + 1); - - // Attempt unbond of released amount - expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE); - - total_unbonded += EXPECTED_WEEKLY_RELEASE; - - assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[i]) - } - - { - // Run auction forward by almost a week - builder.run_auction(WEEK_TIMESTAMPS[13] - 1, Vec::new()); - - // Attempt unbond of 1 mote - expect_unbond_failure(&mut builder, u64::one()); - - // Run auction forward by one millisecond - builder.run_auction(WEEK_TIMESTAMPS[13], Vec::new()); - - // Attempt unbond of released amount + remainder - expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER); - - total_unbonded += EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER; - - assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[13]) - } + builder.exec(validator_1_add_bid_request).expect_failure(); - assert_eq!(ACCOUNT_1_BOND, total_unbonded); + let error = builder.get_error().expect("must have error"); + let err_str = format!("{}", error); + assert!( + err_str.starts_with("ApiError::AuctionError(VestingLockout)"), + "should get vesting lockout error" + ); } #[ignore] #[test] -fn should_fail_to_get_era_validators() { +fn should_allow_delegator_stake_range_change_if_no_vesting() { let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), + Motes::new(BID_ACCOUNT_1_BOND), DelegationRate::zero(), )), ); @@ -896,57 +996,47 @@ fn should_fail_to_get_era_validators() { tmp }; - let run_genesis_request = utils::create_run_genesis_request(accounts); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&run_genesis_request); + let genesis_config = GenesisConfigBuilder::new() + .with_accounts(accounts) + .with_locked_funds_period_millis(0) + .build(); - assert_eq!( - builder.get_validator_weights(EraId::MAX), - None, - "should not have era validators for invalid era" + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), ); -} - -#[ignore] -#[test] -fn should_use_era_validators_endpoint_for_first_era() { - let extra_accounts = vec![GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - )]; - - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - tmp.extend(extra_accounts); - tmp - }; - - let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); - - let validator_weights = builder - .get_validator_weights(INITIAL_ERA_ID) - .expect("should have validator weights for era 0"); + builder.run_genesis(run_genesis_request); + // need to step past genesis era + builder.advance_era(); - assert_eq!(validator_weights.len(), 1); - assert_eq!(validator_weights[&ACCOUNT_1_PK], ACCOUNT_1_BOND.into()); + // attempt to change delegation limits + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ACCOUNT_1_BOND), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + ARG_MINIMUM_DELEGATION_AMOUNT => DEFAULT_MINIMUM_DELEGATION_AMOUNT, + ARG_MAXIMUM_DELEGATION_AMOUNT => DEFAULT_MAXIMUM_DELEGATION_AMOUNT - 2, + }, + ) + .build(); - let era_validators: EraValidators = builder.get_era_validators(); - assert_eq!(era_validators[&EraId::from(0)], validator_weights); + builder + .exec(validator_1_add_bid_request) + .expect_success() + .commit(); } #[ignore] #[test] -fn should_calculate_era_validators_multiple_new_bids() { +fn should_calculate_era_validators() { assert_ne!(*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR,); assert_ne!(*ACCOUNT_2_ADDR, *BID_ACCOUNT_1_ADDR,); assert_ne!(*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR,); @@ -954,80 +1044,74 @@ fn should_calculate_era_validators_multiple_new_bids() { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), + Motes::new(ACCOUNT_1_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), + Motes::new(ACCOUNT_1_BOND), DelegationRate::zero(), )), ); let account_2 = GenesisAccount::account( ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), + Motes::new(ACCOUNT_2_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), + Motes::new(ACCOUNT_2_BOND), DelegationRate::zero(), )), ); let account_3 = GenesisAccount::account( BID_ACCOUNT_1_PK.clone(), - Motes::new(BID_ACCOUNT_1_BALANCE.into()), - None, - ); - let account_4 = GenesisAccount::account( - BID_ACCOUNT_2_PK.clone(), - Motes::new(BID_ACCOUNT_2_BALANCE.into()), + Motes::new(BID_ACCOUNT_1_BALANCE), None, ); tmp.push(account_1); tmp.push(account_2); tmp.push(account_3); - tmp.push(account_4); tmp }; let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); - let genesis_validator_weights = builder - .get_validator_weights(INITIAL_ERA_ID) - .expect("should have genesis validators for initial era"); + let transfer_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + let transfer_request_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - // new_era is the first era in the future where new era validator weights will be calculated - let new_era = INITIAL_ERA_ID + DEFAULT_AUCTION_DELAY + 1; - assert!(builder.get_validator_weights(new_era).is_none()); - assert_eq!( - builder.get_validator_weights(new_era - 1).unwrap(), - builder.get_validator_weights(INITIAL_ERA_ID).unwrap() - ); + let auction_hash = builder.get_auction_contract_hash(); + let bids = builder.get_bids(); + assert_eq!(bids.len(), 2, "founding validators {:?}", bids); + // Verify first era validators + let first_validator_weights: ValidatorWeights = builder + .get_validator_weights(INITIAL_ERA_ID) + .expect("should have first era validator weights"); assert_eq!( - genesis_validator_weights + first_validator_weights .keys() .cloned() .collect::>(), BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()]) ); - // Fund additional accounts - for target in &[ - *SYSTEM_ADDR, - *NON_FOUNDER_VALIDATOR_1_ADDR, - *NON_FOUNDER_VALIDATOR_2_ADDR, - ] { - let transfer_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *target, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - builder.exec(transfer_request_1).commit().expect_success(); - } + builder.exec(transfer_request_1).commit().expect_success(); + builder.exec(transfer_request_2).commit().expect_success(); // non-founding validator request let add_bid_request_1 = ExecuteRequestBuilder::standard( @@ -1040,562 +1124,625 @@ fn should_calculate_era_validators_multiple_new_bids() { }, ) .build(); - let add_bid_request_2 = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_2_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_PUBLIC_KEY => BID_ACCOUNT_2_PK.clone(), - ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), - ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2, - }, - ) - .build(); builder.exec(add_bid_request_1).commit().expect_success(); - builder.exec(add_bid_request_2).commit().expect_success(); - // run auction and compute validators for new era + let pre_era_id: EraId = builder.get_value(EntityAddr::System(auction_hash.value()), ERA_ID_KEY); + assert_eq!(pre_era_id, EraId::from(0)); + builder.run_auction( DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, Vec::new(), ); - // Verify first era validators - let new_validator_weights: ValidatorWeights = builder - .get_validator_weights(new_era) - .expect("should have first era validator weights"); - // check that the new computed era has exactly the state we expect - let lhs = new_validator_weights - .keys() - .cloned() - .collect::>(); + let post_era_id: EraId = + builder.get_value(EntityAddr::System(auction_hash.value()), ERA_ID_KEY); + assert_eq!(post_era_id, EraId::from(1)); - let rhs = BTreeSet::from_iter(vec![ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone(), - BID_ACCOUNT_2_PK.clone(), - ]); + let era_validators: EraValidators = builder.get_era_validators(); - assert_eq!(lhs, rhs); + // Check if there are no missing eras after the calculation, but we don't care about what the + // elements are + let auction_delay = builder.get_auction_delay(); + let eras: Vec<_> = era_validators.keys().copied().collect(); + assert!(!era_validators.is_empty()); + assert!(era_validators.len() >= auction_delay as usize); // definitely more than 1 element + let (first_era, _) = era_validators.iter().min().unwrap(); + let (last_era, _) = era_validators.iter().max().unwrap(); + let expected_eras: Vec = { + let lo: u64 = (*first_era).into(); + let hi: u64 = (*last_era).into(); + (lo..=hi).map(EraId::from).collect() + }; + assert_eq!(eras, expected_eras, "Eras {:?}", eras); - // make sure that new validators are exactly those that were part of add_bid requests - let new_validators: BTreeSet<_> = rhs - .difference(&genesis_validator_weights.keys().cloned().collect()) - .cloned() - .collect(); + assert!(post_era_id > EraId::from(0)); + let consensus_next_era_id: EraId = post_era_id + auction_delay + 1; + + let snapshot_size = auction_delay as usize + 2; assert_eq!( - new_validators, - BTreeSet::from_iter(vec![BID_ACCOUNT_1_PK.clone(), BID_ACCOUNT_2_PK.clone(),]) - ); + era_validators.len(), + snapshot_size, + "era_id={} {:?}", + consensus_next_era_id, + era_validators + ); // eraindex==1 - ran once + + let lookup_era_id = consensus_next_era_id - 1; + + let validator_weights = era_validators + .get(&lookup_era_id) // indexed from 0 + .unwrap_or_else(|| { + panic!( + "should have era_index=={} entry {:?}", + consensus_next_era_id, era_validators + ) + }); + assert_eq!( + validator_weights.len(), + 3, + "{:?} {:?}", + era_validators, + validator_weights + ); //2 genesis validators "winners" + assert_eq!( + validator_weights + .get(&BID_ACCOUNT_1_PK) + .expect("should have bid account in this era"), + &U512::from(ADD_BID_AMOUNT_1) + ); + + // Check validator weights using the API + let era_validators_result = builder + .get_validator_weights(lookup_era_id) + .expect("should have validator weights"); + assert_eq!(era_validators_result, *validator_weights); + + // Make sure looked up era validators are different than initial era validators + assert_ne!(era_validators_result, first_validator_weights); } #[ignore] #[test] -fn undelegated_funds_should_be_released() { - let system_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) - }, - ) - .build(); +fn should_get_first_seigniorage_recipients() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp + }; - let validator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + // We can't use `utils::create_run_genesis_request` as the snapshot used an auction delay of 3. + let auction_delay = 3; + let exec_config = GenesisConfigBuilder::new() + .with_accounts(accounts) + .with_auction_delay(auction_delay) + .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + .build(); + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + exec_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); - let delegator_1_fund_request = ExecuteRequestBuilder::standard( + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + let transfer_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_TARGET => *SYSTEM_ADDR, ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) .build(); - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *NON_FOUNDER_VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), - ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, - }, - ) - .build(); - - let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_1_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), - }, - ) - .build(); - - let post_genesis_requests = vec![ - system_fund_request, - delegator_1_fund_request, - validator_1_fund_request, - validator_1_add_bid_request, - delegator_1_validator_1_delegate_request, - ]; - - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let bids = builder.get_bids(); + assert_eq!(bids.len(), 2); - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); - } + let founding_validator_1 = bids + .validator_bid(&ACCOUNT_1_PK) + .expect("should have account 1 pk"); + assert_eq!( + founding_validator_1 + .vesting_schedule() + .map(|vesting_schedule| vesting_schedule.initial_release_timestamp_millis()), + Some(DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + ); - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + let founding_validator_2 = bids + .validator_bid(&ACCOUNT_2_PK) + .expect("should have account 2 pk"); + assert_eq!( + founding_validator_2 + .vesting_schedule() + .map(|vesting_schedule| vesting_schedule.initial_release_timestamp_millis()), + Some(DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + ); - let delegator_1_undelegate_purse = builder - .get_account(*BID_ACCOUNT_1_ADDR) - .expect("should have default account") - .main_purse(); + builder.exec(transfer_request_1).commit().expect_success(); - let delegator_1_undelegate_request = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_1_ADDR, - CONTRACT_UNDELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), - }, - ) - .build(); + // run_auction should be executed first + builder.run_auction( + DEFAULT_GENESIS_TIMESTAMP_MILLIS + CASPER_LOCKED_FUNDS_PERIOD_MILLIS, + Vec::new(), + ); - builder - .exec(delegator_1_undelegate_request) - .commit() - .expect_success(); + let mut era_validators: EraValidators = builder.get_era_validators(); + let auction_delay = builder.get_auction_delay(); + let snapshot_size = auction_delay as usize + 2; - let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse); + assert_eq!(era_validators.len(), snapshot_size, "{:?}", era_validators); // eraindex==1 - ran once - for _ in 0..=DEFAULT_UNBONDING_DELAY { - let delegator_1_undelegate_purse_balance = - builder.get_purse_balance(delegator_1_undelegate_purse); - assert_eq!( - delegator_1_purse_balance_before, - delegator_1_undelegate_purse_balance - ); + assert!(era_validators.contains_key(&(EraId::from(auction_delay).successor()))); - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + let era_id = EraId::from(auction_delay); - let delegator_1_undelegate_purse_balance = - builder.get_purse_balance(delegator_1_undelegate_purse); + let validator_weights = era_validators.remove(&era_id).unwrap_or_else(|| { + panic!( + "should have era_index=={} entry {:?}", + era_id, era_validators + ) + }); + // 2 genesis validators "winners" with non-zero bond + assert_eq!(validator_weights.len(), 2, "{:?}", validator_weights); assert_eq!( - delegator_1_undelegate_purse_balance, - delegator_1_purse_balance_before + U512::from(UNDELEGATE_AMOUNT_1) - ) + validator_weights.get(&ACCOUNT_1_PK).unwrap(), + &U512::from(ACCOUNT_1_BOND) + ); + assert_eq!( + validator_weights.get(&ACCOUNT_2_PK).unwrap(), + &U512::from(ACCOUNT_2_BOND) + ); + + let first_validator_weights = builder + .get_validator_weights(era_id) + .expect("should have validator weights"); + assert_eq!(first_validator_weights, validator_weights); } #[ignore] #[test] -fn fully_undelegated_funds_should_be_released() { - const SYSTEM_TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; +fn should_release_founder_stake() { + const NEW_MINIMUM_DELEGATION_AMOUNT: u64 = 0; - let system_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) - }, - ) - .build(); + // ACCOUNT_1_BOND / 14 = 7_142 + const EXPECTED_WEEKLY_RELEASE: u64 = 7_142; - let validator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + const EXPECTED_REMAINDER: u64 = 12; - let delegator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *BID_ACCOUNT_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + const EXPECTED_LOCKED_AMOUNTS: [u64; 14] = [ + 92858, 85716, 78574, 71432, 64290, 57148, 50006, 42864, 35722, 28580, 21438, 14296, 7154, 0, + ]; - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *NON_FOUNDER_VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), - ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, - }, - ) - .build(); + let expected_locked_amounts: Vec = EXPECTED_LOCKED_AMOUNTS + .iter() + .cloned() + .map(U512::from) + .collect(); - let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_1_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), - }, - ) - .build(); - - let post_genesis_requests = vec![ - system_fund_request, - delegator_1_fund_request, - validator_1_fund_request, - validator_1_add_bid_request, - delegator_1_validator_1_delegate_request, - ]; - - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); - } - - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + let expect_unbond_success = |builder: &mut LmdbWasmTestBuilder, amount: u64| { + let partial_unbond = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(amount), + }, + ) + .build(); - let delegator_1_undelegate_purse = builder - .get_account(*BID_ACCOUNT_1_ADDR) - .expect("should have default account") - .main_purse(); + builder.exec(partial_unbond).commit().expect_success(); + }; - let delegator_1_undelegate_request = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_1_ADDR, - CONTRACT_UNDELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), - }, - ) - .build(); + let expect_unbond_failure = |builder: &mut LmdbWasmTestBuilder, amount: u64| { + let full_unbond = ExecuteRequestBuilder::standard( + *ACCOUNT_1_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + ARG_PUBLIC_KEY => ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(amount), + }, + ) + .build(); - builder - .exec(delegator_1_undelegate_request) - .commit() - .expect_success(); + builder.exec(full_unbond).commit(); - let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse); + let error = builder + .get_last_exec_result() + .expect("should have last exec result") + .error() + .cloned() + .expect("should have error"); + assert_matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(15))) + ); + }; - for _ in 0..=DEFAULT_UNBONDING_DELAY { - let delegator_1_undelegate_purse_balance = - builder.get_purse_balance(delegator_1_undelegate_purse); - assert_eq!( - delegator_1_undelegate_purse_balance, - delegator_1_purse_balance_before + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), ); - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + tmp.push(account_1); + tmp + }; - let delegator_1_undelegate_purse_after = - builder.get_purse_balance(delegator_1_undelegate_purse); + //let run_genesis_request = utils::create_run_genesis_request(accounts); + let run_genesis_request = { + let exec_config = GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + .build(); + + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + exec_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) + }; - assert_eq!( - delegator_1_undelegate_purse_after - delegator_1_purse_balance_before, - U512::from(DELEGATE_AMOUNT_1) - ) -} + let chainspec = ChainspecConfig::default() + .with_minimum_delegation_amount(NEW_MINIMUM_DELEGATION_AMOUNT) + .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS); -#[ignore] -#[test] -fn should_undelegate_delegators_when_validator_unbonds() { - const VALIDATOR_1_REMAINING_BID: u64 = 1; - const VALIDATOR_1_WITHDRAW_AMOUNT: u64 = VALIDATOR_1_STAKE - VALIDATOR_1_REMAINING_BID; + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec); - let system_fund_request = ExecuteRequestBuilder::standard( + builder.run_genesis(run_genesis_request); + + let fund_system_account = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + ARG_AMOUNT => U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE / 10) }, ) .build(); - let validator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + builder.exec(fund_system_account).commit().expect_success(); - let delegator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *DELEGATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + // Check bid and its vesting schedule + { + let bids = builder.get_bids(); + assert_eq!(bids.len(), 1); - let delegator_2_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *DELEGATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + let entry = bids.validator_bid(&ACCOUNT_1_PK).unwrap(); + let vesting_schedule = entry.vesting_schedule().unwrap(); - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - }, - ) - .build(); + let initial_release = vesting_schedule.initial_release_timestamp_millis(); + assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); - let delegator_1_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), - ARG_VALIDATOR => VALIDATOR_1.clone(), - ARG_DELEGATOR => DELEGATOR_1.clone(), - }, - ) - .build(); + let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); + assert!(locked_amounts.is_none()); + } - let delegator_2_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), - ARG_VALIDATOR => VALIDATOR_1.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), - }, - ) - .build(); + builder.run_auction(DEFAULT_GENESIS_TIMESTAMP_MILLIS, Vec::new()); - let validator_1_partial_withdraw_bid = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_WITHDRAW_BID, - runtime_args! { - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - ARG_AMOUNT => U512::from(VALIDATOR_1_WITHDRAW_AMOUNT), - }, - ) - .build(); + { + // Attempt unbond of one mote + expect_unbond_failure(&mut builder, u64::one()); + } - let post_genesis_requests = vec![ - system_fund_request, - validator_1_fund_request, - delegator_1_fund_request, - delegator_2_fund_request, - validator_1_add_bid_request, - delegator_1_delegate_request, - delegator_2_delegate_request, - validator_1_partial_withdraw_bid, - ]; + builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new()); - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + // Check bid and its vesting schedule + { + let bids = builder.get_bids(); + assert_eq!(bids.len(), 1); - let mut builder = InMemoryWasmTestBuilder::default(); + let entry = bids.validator_bid(&ACCOUNT_1_PK).unwrap(); + let vesting_schedule = entry.vesting_schedule().unwrap(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let initial_release = vesting_schedule.initial_release_timestamp_millis(); + assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); + let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); + assert_eq!(locked_amounts, Some(expected_locked_amounts)); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + let mut total_unbonded = 0; - let bids_before: Bids = builder.get_bids(); - let validator_1_bid = bids_before - .get(&*VALIDATOR_1) - .expect("should have validator 1 bid"); - assert_eq!( - validator_1_bid - .delegators() - .keys() - .cloned() - .collect::>(), - BTreeSet::from_iter(vec![DELEGATOR_1.clone(), DELEGATOR_2.clone()]) - ); + { + // Attempt full unbond + expect_unbond_failure(&mut builder, ACCOUNT_1_BOND); - // Validator partially unbonds and only one entry is present - let unbonding_purses_before: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbonding_purses_before[&*VALIDATOR_1_ADDR].len(), 1); - assert_eq!( - unbonding_purses_before[&*VALIDATOR_1_ADDR][0].unbonder_public_key(), - &*VALIDATOR_1 - ); + // Attempt unbond of released amount + expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE); - let validator_1_withdraw_bid = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_WITHDRAW_BID, - runtime_args! { - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - ARG_AMOUNT => U512::from(VALIDATOR_1_REMAINING_BID), - }, - ) - .build(); + total_unbonded += EXPECTED_WEEKLY_RELEASE; - builder - .exec(validator_1_withdraw_bid) - .commit() - .expect_success(); + assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[0]) + } - let bids_after: Bids = builder.get_bids(); - let validator_1_bid = bids_after.get(&VALIDATOR_1).unwrap(); - assert!(validator_1_bid.inactive()); - assert!(validator_1_bid.staked_amount().is_zero()); + for i in 1..13 { + // Run auction forward by almost a week + builder.run_auction(WEEK_TIMESTAMPS[i] - 1, Vec::new()); - let unbonding_purses_after: UnbondingPurses = builder.get_withdraws(); - assert_ne!(unbonding_purses_after, unbonding_purses_before); + // Attempt unbond of 1 mote + expect_unbond_failure(&mut builder, u64::one()); - let validator_1_unbonding_purse = unbonding_purses_after - .get(&VALIDATOR_1_ADDR) - .expect("should have unbonding purse entry"); - assert_eq!(validator_1_unbonding_purse.len(), 4); // validator1, validator1, delegator1, delegator2 + // Run auction forward by one millisecond + builder.run_auction(WEEK_TIMESTAMPS[i], Vec::new()); - let delegator_1_unbonding_purse = validator_1_unbonding_purse - .iter() - .find(|unbonding_purse| { - ( - unbonding_purse.validator_public_key(), - unbonding_purse.unbonder_public_key(), - ) == (&*VALIDATOR_1, &*DELEGATOR_1) - }) - .expect("should have delegator 1 entry"); - assert_eq!( - delegator_1_unbonding_purse.amount(), - &U512::from(DELEGATOR_1_STAKE) - ); + // Attempt unbond of more than weekly release + expect_unbond_failure(&mut builder, EXPECTED_WEEKLY_RELEASE + 1); - let delegator_2_unbonding_purse = validator_1_unbonding_purse - .iter() - .find(|unbonding_purse| { - ( - unbonding_purse.validator_public_key(), - unbonding_purse.unbonder_public_key(), - ) == (&*VALIDATOR_1, &*DELEGATOR_2) - }) - .expect("should have delegator 2 entry"); - assert_eq!( - delegator_2_unbonding_purse.amount(), - &U512::from(DELEGATOR_2_STAKE) - ); + // Attempt unbond of released amount + expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE); - let validator_1_unbonding_purse: Vec<_> = validator_1_unbonding_purse - .iter() - .filter(|unbonding_purse| { - ( - unbonding_purse.validator_public_key(), - unbonding_purse.unbonder_public_key(), - ) == (&*VALIDATOR_1, &*VALIDATOR_1) - }) - .collect(); + total_unbonded += EXPECTED_WEEKLY_RELEASE; + + assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[i]) + } + + { + // Run auction forward by almost a week + builder.run_auction(WEEK_TIMESTAMPS[13] - 1, Vec::new()); + + // Attempt unbond of 1 mote + expect_unbond_failure(&mut builder, u64::one()); + + // Run auction forward by one millisecond + builder.run_auction(WEEK_TIMESTAMPS[13], Vec::new()); + + // Attempt unbond of released amount + remainder + expect_unbond_success(&mut builder, EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER); + + total_unbonded += EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER; + + assert_eq!(ACCOUNT_1_BOND - total_unbonded, EXPECTED_LOCKED_AMOUNTS[13]) + } + + assert_eq!(ACCOUNT_1_BOND, total_unbonded); +} + +#[ignore] +#[test] +fn should_fail_to_get_era_validators() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + tmp.push(account_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); assert_eq!( - validator_1_unbonding_purse[0].amount(), - &U512::from(VALIDATOR_1_WITHDRAW_AMOUNT) - ); - assert_eq!( - validator_1_unbonding_purse[1].amount(), - &U512::from(VALIDATOR_1_REMAINING_BID) + builder.get_validator_weights(EraId::MAX), + None, + "should not have era validators for invalid era" ); +} - // Process unbonding requests to verify delegators recevied their stakes - let validator_1 = builder - .get_account(*VALIDATOR_1_ADDR) - .expect("should have validator 1 account"); - let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse()); +#[ignore] +#[test] +fn should_use_era_validators_endpoint_for_first_era() { + let extra_accounts = vec![GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + )]; - let delegator_1 = builder - .get_account(*DELEGATOR_1_ADDR) - .expect("should have delegator 1 account"); - let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse()); + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + tmp.extend(extra_accounts); + tmp + }; - let delegator_2 = builder - .get_account(*DELEGATOR_2_ADDR) - .expect("should have delegator 1 account"); - let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse()); + let run_genesis_request = utils::create_run_genesis_request(accounts); - for _ in 0..=DEFAULT_UNBONDING_DELAY { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + let mut builder = LmdbWasmTestBuilder::default(); - let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse()); - let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse()); - let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse()); + builder.run_genesis(run_genesis_request); + + let validator_weights = builder + .get_validator_weights(INITIAL_ERA_ID) + .expect("should have validator weights for era 0"); + + assert_eq!(validator_weights.len(), 1); + assert_eq!(validator_weights[&ACCOUNT_1_PK], ACCOUNT_1_BOND.into()); + + let era_validators: EraValidators = builder.get_era_validators(); + assert_eq!(era_validators[&EraId::from(0)], validator_weights); +} + +#[ignore] +#[test] +fn should_calculate_era_validators_multiple_new_bids() { + assert_ne!(*ACCOUNT_1_ADDR, *ACCOUNT_2_ADDR,); + assert_ne!(*ACCOUNT_2_ADDR, *BID_ACCOUNT_1_ADDR,); + assert_ne!(*ACCOUNT_2_ADDR, *DEFAULT_ACCOUNT_ADDR,); + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + let account_3 = GenesisAccount::account( + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), + None, + ); + let account_4 = GenesisAccount::account( + BID_ACCOUNT_2_PK.clone(), + Motes::new(BID_ACCOUNT_2_BALANCE), + None, + ); + tmp.push(account_1); + tmp.push(account_2); + tmp.push(account_3); + tmp.push(account_4); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(run_genesis_request); + + let genesis_validator_weights = builder + .get_validator_weights(INITIAL_ERA_ID) + .expect("should have genesis validators for initial era"); + let auction_delay = builder.get_auction_delay(); + // new_era is the first era in the future where new era validator weights will be calculated + let new_era = INITIAL_ERA_ID + auction_delay + 1; + assert!(builder.get_validator_weights(new_era).is_none()); assert_eq!( - validator_1_balance_before + U512::from(VALIDATOR_1_STAKE), - validator_1_balance_after + builder.get_validator_weights(new_era - 1).unwrap(), + builder.get_validator_weights(INITIAL_ERA_ID).unwrap() ); + assert_eq!( - delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE), - delegator_1_balance_after + genesis_validator_weights + .keys() + .cloned() + .collect::>(), + BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()]) + ); + + // Fund additional accounts + for target in &[ + *SYSTEM_ADDR, + *NON_FOUNDER_VALIDATOR_1_ADDR, + *NON_FOUNDER_VALIDATOR_2_ADDR, + ] { + let transfer_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *target, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + builder.exec(transfer_request_1).commit().expect_success(); + } + + // non-founding validator request + let add_bid_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + let add_bid_request_2 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2, + }, + ) + .build(); + + builder.exec(add_bid_request_1).commit().expect_success(); + builder.exec(add_bid_request_2).commit().expect_success(); + + // run auction and compute validators for new era + builder.run_auction( + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + Vec::new(), ); + // Verify first era validators + let new_validator_weights: ValidatorWeights = builder + .get_validator_weights(new_era) + .expect("should have first era validator weights"); + + // check that the new computed era has exactly the state we expect + let lhs = new_validator_weights + .keys() + .cloned() + .collect::>(); + + let rhs = BTreeSet::from_iter(vec![ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + BID_ACCOUNT_2_PK.clone(), + ]); + + assert_eq!(lhs, rhs); + + // make sure that new validators are exactly those that were part of add_bid requests + let new_validators: BTreeSet<_> = rhs + .difference(&genesis_validator_weights.keys().cloned().collect()) + .cloned() + .collect(); assert_eq!( - delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE), - delegator_2_balance_after + new_validators, + BTreeSet::from_iter(vec![BID_ACCOUNT_1_PK.clone(), BID_ACCOUNT_2_PK.clone()]) ); } #[ignore] #[test] -fn should_undelegate_delegators_when_validator_fully_unbonds() { +fn undelegated_funds_should_be_released() { let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) }, ) .build(); @@ -1604,7 +1751,7 @@ fn should_undelegate_delegators_when_validator_fully_unbonds() { *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) @@ -1614,1064 +1761,3741 @@ fn should_undelegate_delegators_when_validator_fully_unbonds() { *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_TARGET => *BID_ACCOUNT_1_ADDR, ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) .build(); - let delegator_2_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, runtime_args! { - ARG_TARGET => *DELEGATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - }, - ) - .build(); - - let delegator_1_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), - ARG_VALIDATOR => VALIDATOR_1.clone(), - ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, }, ) .build(); - let delegator_2_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, CONTRACT_DELEGATE, runtime_args! { - ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), - ARG_VALIDATOR => VALIDATOR_1.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), }, ) .build(); let post_genesis_requests = vec![ system_fund_request, - validator_1_fund_request, delegator_1_fund_request, - delegator_2_fund_request, + validator_1_fund_request, validator_1_add_bid_request, - delegator_1_delegate_request, - delegator_2_delegate_request, + delegator_1_validator_1_delegate_request, ]; let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - // Fully unbond - let validator_1_withdraw_bid = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_WITHDRAW_BID, + for _ in 0..5 { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let delegator_1_undelegate_purse = builder + .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR) + .expect("should have default account") + .main_purse(); + + let delegator_1_undelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_UNDELEGATE, runtime_args! { - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), }, ) .build(); builder - .exec(validator_1_withdraw_bid) + .exec(delegator_1_undelegate_request) .commit() .expect_success(); - let bids_after: Bids = builder.get_bids(); - let validator_1_bid = bids_after.get(&VALIDATOR_1).unwrap(); - assert!(validator_1_bid.inactive()); - assert!(validator_1_bid.staked_amount().is_zero()); - - let unbonding_purses_before: UnbondingPurses = builder.get_withdraws(); - - let validator_1_unbonding_purse = unbonding_purses_before - .get(&VALIDATOR_1_ADDR) - .expect("should have unbonding purse entry"); - assert_eq!(validator_1_unbonding_purse.len(), 3); // validator1, delegator1, delegator2 - - let delegator_1_unbonding_purse = validator_1_unbonding_purse - .iter() - .find(|unbonding_purse| { - ( - unbonding_purse.validator_public_key(), - unbonding_purse.unbonder_public_key(), - ) == (&*VALIDATOR_1, &*DELEGATOR_1) - }) - .expect("should have delegator 1 entry"); - assert_eq!( - delegator_1_unbonding_purse.amount(), - &U512::from(DELEGATOR_1_STAKE) - ); - - let delegator_2_unbonding_purse = validator_1_unbonding_purse - .iter() - .find(|unbonding_purse| { - ( - unbonding_purse.validator_public_key(), - unbonding_purse.unbonder_public_key(), - ) == (&*VALIDATOR_1, &*DELEGATOR_2) - }) - .expect("should have delegator 2 entry"); - assert_eq!( - delegator_2_unbonding_purse.amount(), - &U512::from(DELEGATOR_2_STAKE) - ); - - // Process unbonding requests to verify delegators recevied their stakes - let validator_1 = builder - .get_account(*VALIDATOR_1_ADDR) - .expect("should have validator 1 account"); - let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse()); + let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse); - let delegator_1 = builder - .get_account(*DELEGATOR_1_ADDR) - .expect("should have delegator 1 account"); - let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse()); + let unbonding_delay = builder.get_unbonding_delay(); - let delegator_2 = builder - .get_account(*DELEGATOR_2_ADDR) - .expect("should have delegator 1 account"); - let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse()); + for _ in 0..=unbonding_delay { + let delegator_1_undelegate_purse_balance = + builder.get_purse_balance(delegator_1_undelegate_purse); + assert_eq!( + delegator_1_purse_balance_before, + delegator_1_undelegate_purse_balance + ); - for _ in 0..=DEFAULT_UNBONDING_DELAY { builder.run_auction(timestamp_millis, Vec::new()); timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; } - let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse()); - let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse()); - let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse()); - - assert_eq!( - validator_1_balance_before + U512::from(VALIDATOR_1_STAKE), - validator_1_balance_after - ); - assert_eq!( - delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE), - delegator_1_balance_after - ); + let delegator_1_undelegate_purse_balance = + builder.get_purse_balance(delegator_1_undelegate_purse); assert_eq!( - delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE), - delegator_2_balance_after - ); + delegator_1_undelegate_purse_balance, + delegator_1_purse_balance_before + U512::from(UNDELEGATE_AMOUNT_1) + ) } #[ignore] #[test] -fn should_handle_evictions() { - let activate_bid = |builder: &mut InMemoryWasmTestBuilder, validator_public_key: PublicKey| { - const ARG_VALIDATOR_PUBLIC_KEY: &str = "validator_public_key"; - let run_request = ExecuteRequestBuilder::standard( - AccountHash::from(&validator_public_key), - CONTRACT_ACTIVATE_BID, - runtime_args! { - ARG_VALIDATOR_PUBLIC_KEY => validator_public_key, - }, - ) - .build(); - builder.exec(run_request).commit().expect_success(); - }; - - let latest_validators = |builder: &mut InMemoryWasmTestBuilder| { - let era_validators: EraValidators = builder.get_era_validators(); - let validators = era_validators - .iter() - .rev() - .next() - .map(|(_era_id, validators)| validators) - .expect("should have validators"); - validators.keys().cloned().collect::>() - }; - - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_2 = GenesisAccount::account( - ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_3 = GenesisAccount::account( - BID_ACCOUNT_1_PK.clone(), - Motes::new(BID_ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(300_000.into()), - DelegationRate::zero(), - )), - ); - let account_4 = GenesisAccount::account( - BID_ACCOUNT_2_PK.clone(), - Motes::new(BID_ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(400_000.into()), - DelegationRate::zero(), - )), - ); - tmp.push(account_1); - tmp.push(account_2); - tmp.push(account_3); - tmp.push(account_4); - tmp - }; +fn fully_undelegated_funds_should_be_released() { + const SYSTEM_TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - "target" => *SYSTEM_ADDR, + ARG_TARGET => *SYSTEM_ADDR, ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) }, ) .build(); - let mut timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let run_genesis_request = utils::create_run_genesis_request(accounts); + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - builder.run_genesis(&run_genesis_request); + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - builder.exec(system_fund_request).commit().expect_success(); + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + validator_1_fund_request, + validator_1_add_bid_request, + delegator_1_validator_1_delegate_request, + ]; - // No evictions - builder.run_auction(timestamp, Vec::new()); - timestamp += WEEK_MILLIS; + let mut timestamp_millis = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - assert_eq!( - latest_validators(&mut builder), - BTreeSet::from_iter(vec![ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone(), - BID_ACCOUNT_2_PK.clone() - ]) - ); + let mut builder = LmdbWasmTestBuilder::default(); - // Evict BID_ACCOUNT_1_PK and BID_ACCOUNT_2_PK - builder.run_auction( - timestamp, - vec![BID_ACCOUNT_1_PK.clone(), BID_ACCOUNT_2_PK.clone()], - ); - timestamp += WEEK_MILLIS; + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - assert_eq!( - latest_validators(&mut builder), - BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone(),]) - ); + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } - // Activate BID_ACCOUNT_1_PK - activate_bid(&mut builder, BID_ACCOUNT_1_PK.clone()); - builder.run_auction(timestamp, Vec::new()); - timestamp += WEEK_MILLIS; + for _ in 0..5 { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } - assert_eq!( - latest_validators(&mut builder), - BTreeSet::from_iter(vec![ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone() - ]) - ); + let delegator_1_undelegate_purse = builder + .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR) + .expect("should have default account") + .main_purse(); - // Activate BID_ACCOUNT_2_PK - activate_bid(&mut builder, BID_ACCOUNT_2_PK.clone()); - builder.run_auction(timestamp, Vec::new()); - timestamp += WEEK_MILLIS; + let delegator_1_undelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + builder + .exec(delegator_1_undelegate_request) + .commit() + .expect_success(); + + let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse); + + let unbonding_delay = builder.get_unbonding_delay(); + + for _ in 0..=unbonding_delay { + let delegator_1_undelegate_purse_balance = + builder.get_purse_balance(delegator_1_undelegate_purse); + assert_eq!( + delegator_1_undelegate_purse_balance, + delegator_1_purse_balance_before + ); + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let delegator_1_undelegate_purse_after = + builder.get_purse_balance(delegator_1_undelegate_purse); assert_eq!( - latest_validators(&mut builder), + delegator_1_undelegate_purse_after - delegator_1_purse_balance_before, + U512::from(DELEGATE_AMOUNT_1) + ) +} + +#[ignore] +#[test] +fn should_undelegate_delegators_when_validator_unbonds() { + const VALIDATOR_1_REMAINING_BID: u64 = DEFAULT_MINIMUM_BID_AMOUNT; + const VALIDATOR_1_WITHDRAW_AMOUNT: u64 = VALIDATOR_1_STAKE - VALIDATOR_1_REMAINING_BID; + + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + }, + ) + .build(); + + let delegator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + let delegator_2_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let validator_1_partial_withdraw_bid = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(VALIDATOR_1_WITHDRAW_AMOUNT), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + validator_1_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_add_bid_request, + delegator_1_delegate_request, + delegator_2_delegate_request, + validator_1_partial_withdraw_bid, + ]; + + let mut timestamp_millis = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + for _ in 0..5 { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let bids_before = builder.get_bids(); + let validator_1_bid = bids_before + .validator_bid(&VALIDATOR_1) + .expect("should have validator 1 bid"); + let delegators = bids_before + .delegators_by_validator_public_key(validator_1_bid.validator_public_key()) + .expect("should have delegators"); + let delegator_kinds = delegators + .iter() + .map(|x| x.delegator_kind()) + .cloned() + .collect::>(); + assert_eq!( + delegator_kinds, BTreeSet::from_iter(vec![ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone(), - BID_ACCOUNT_2_PK.clone() + DelegatorKind::PublicKey(DELEGATOR_1.clone()), + DelegatorKind::PublicKey(DELEGATOR_2.clone()) ]) ); - // Evict all validators - builder.run_auction( - timestamp, - vec![ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone(), - BID_ACCOUNT_2_PK.clone(), - ], + // Validator partially unbonds and only one entry is present + let unbonding_purses_before = builder.get_unbonds(); + let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone()); + let unbond = unbonding_purses_before[&unbond_kind] + .first() + .expect("must get unbond"); + assert_eq!(unbond.eras().len(), 1); + let unbond = &unbonding_purses_before[&unbond_kind] + .first() + .expect("must have unbond"); + assert_eq!( + unbond.unbond_kind(), + &UnbondKind::Validator(VALIDATOR_1.clone()) ); - timestamp += WEEK_MILLIS; - assert_eq!(latest_validators(&mut builder), BTreeSet::new()); + let validator_1_withdraw_bid = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(VALIDATOR_1_REMAINING_BID), + }, + ) + .build(); - // Activate all validators - for validator in &[ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone(), - BID_ACCOUNT_2_PK.clone(), - ] { - activate_bid(&mut builder, validator.clone()); + builder + .exec(validator_1_withdraw_bid) + .commit() + .expect_success(); + + let bids_after = builder.get_bids(); + assert!(bids_after.validator_bid(&VALIDATOR_1).is_none()); + + let unbonding_purses_after = builder.get_unbonds(); + assert_ne!(unbonding_purses_after, unbonding_purses_before); + + let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone()); + let validator1 = unbonding_purses_after + .get(&unbond_kind) + .expect("should have validator1") + .first() + .expect("must have unbond"); + + let validator1_unbonding = validator1.eras().first().expect("should have eras"); + + assert_eq!( + validator1_unbonding.amount(), + &U512::from(VALIDATOR_1_WITHDRAW_AMOUNT), + "expected validator1 amount to match" + ); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()); + let delegator1 = unbonding_purses_after + .get(&unbond_kind) + .expect("should have delegator1") + .first() + .expect("must have unbond"); + + let delegator1_unbonding = delegator1.eras().first().expect("should have eras"); + + assert_eq!( + delegator1_unbonding.amount(), + &U512::from(DELEGATOR_1_STAKE), + "expected delegator1 amount to match" + ); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone()); + let delegator2 = unbonding_purses_after + .get(&unbond_kind) + .expect("should have delegator2") + .first() + .expect("must have unbond"); + + let delegator2_unbonding = delegator2.eras().first().expect("should have eras"); + + assert_eq!( + delegator2_unbonding.amount(), + &U512::from(DELEGATOR_2_STAKE), + "expected delegator2 amount to match" + ); + + // Process unbonding requests to verify delegators recevied their stakes + let validator_1 = builder + .get_entity_by_account_hash(*VALIDATOR_1_ADDR) + .expect("should have validator 1 account"); + let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse()); + + let delegator_1 = builder + .get_entity_by_account_hash(*DELEGATOR_1_ADDR) + .expect("should have delegator 1 account"); + let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse()); + + let delegator_2 = builder + .get_entity_by_account_hash(*DELEGATOR_2_ADDR) + .expect("should have delegator 1 account"); + let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse()); + + for _ in 0..=DEFAULT_UNBONDING_DELAY { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; } - builder.run_auction(timestamp, Vec::new()); + + let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse()); + let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse()); + let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse()); assert_eq!( - latest_validators(&mut builder), - BTreeSet::from_iter(vec![ - ACCOUNT_1_PK.clone(), - ACCOUNT_2_PK.clone(), - BID_ACCOUNT_1_PK.clone(), - BID_ACCOUNT_2_PK.clone() - ]) + validator_1_balance_before + U512::from(VALIDATOR_1_STAKE), + validator_1_balance_after + ); + assert_eq!( + delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE), + delegator_1_balance_after + ); + assert_eq!( + delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE), + delegator_2_balance_after + ); +} + +#[ignore] +#[test] +fn should_undelegate_delegators_when_validator_fully_unbonds() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + }, + ) + .build(); + + let delegator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + let delegator_2_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + validator_1_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_add_bid_request, + delegator_1_delegate_request, + delegator_2_delegate_request, + ]; + + let mut timestamp_millis = + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + // Fully unbond + let validator_1_withdraw_bid = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + }, + ) + .build(); + + builder + .exec(validator_1_withdraw_bid) + .commit() + .expect_success(); + + let bids_after = builder.get_bids(); + assert!(bids_after.validator_bid(&VALIDATOR_1).is_none()); + + let unbonding_purses_before = builder.get_unbonds(); + + let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone()); + let validator_1_era = unbonding_purses_before + .get(&unbond_kind) + .expect("should have unbonding purse") + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have era"); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()); + let delegator_1_unbonding_purse = unbonding_purses_before + .get(&unbond_kind) + .expect("should have unbonding purse entry") + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have unbonding purse"); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone()); + let delegator_2_unbonding_purse = unbonding_purses_before + .get(&unbond_kind) + .expect("should have unbonding purse entry") + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have unbonding purse"); + + assert_eq!(validator_1_era.amount(), &U512::from(VALIDATOR_1_STAKE)); + assert_eq!( + delegator_1_unbonding_purse.amount(), + &U512::from(DELEGATOR_1_STAKE) + ); + assert_eq!( + delegator_2_unbonding_purse.amount(), + &U512::from(DELEGATOR_2_STAKE) + ); + + // Process unbonding requests to verify delegators received their stakes + let validator_1 = builder + .get_entity_by_account_hash(*VALIDATOR_1_ADDR) + .expect("should have validator 1 account"); + let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse()); + + let delegator_1 = builder + .get_entity_by_account_hash(*DELEGATOR_1_ADDR) + .expect("should have delegator 1 account"); + let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse()); + + let delegator_2 = builder + .get_entity_by_account_hash(*DELEGATOR_2_ADDR) + .expect("should have delegator 1 account"); + let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse()); + + for _ in 0..=DEFAULT_UNBONDING_DELAY { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse()); + let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse()); + let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse()); + + assert_eq!( + validator_1_balance_before + U512::from(VALIDATOR_1_STAKE), + validator_1_balance_after + ); + assert_eq!( + delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE), + delegator_1_balance_after + ); + assert_eq!( + delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE), + delegator_2_balance_after + ); +} + +#[ignore] +#[test] +fn should_undelegate_delegators_when_validator_unbonds_below_minimum_bid_amount() { + const VALIDATOR_1_REMAINING_BID: u64 = DEFAULT_MINIMUM_BID_AMOUNT - 1; + const VALIDATOR_1_WITHDRAW_AMOUNT: u64 = VALIDATOR_1_STAKE - VALIDATOR_1_REMAINING_BID; + + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + }, + ) + .build(); + + let delegator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + let delegator_2_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + validator_1_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_add_bid_request, + delegator_1_delegate_request, + delegator_2_delegate_request, + ]; + + let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + // Try to unbond partially. Stake would fall below minimum bid which should force a full + // unbonding. + let validator_1_withdraw_bid = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_WITHDRAW_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(VALIDATOR_1_WITHDRAW_AMOUNT), + }, + ) + .build(); + + builder + .exec(validator_1_withdraw_bid) + .commit() + .expect_success(); + + let bids_after = builder.get_bids(); + assert!(bids_after.validator_bid(&VALIDATOR_1).is_none()); + + let unbonding_purses_before = builder.get_unbonds(); + + let unbond_kind = UnbondKind::Validator(VALIDATOR_1.clone()); + let validator_1_era = unbonding_purses_before + .get(&unbond_kind) + .expect("should have unbonding purse") + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have era"); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()); + let delegator_1_unbonding_purse = unbonding_purses_before + .get(&unbond_kind) + .expect("should have unbonding purse entry") + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have unbonding purse"); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_2.clone()); + let delegator_2_unbonding_purse = unbonding_purses_before + .get(&unbond_kind) + .expect("should have unbonding purse entry") + .first() + .expect("must have unbond") + .eras() + .first() + .expect("should have unbonding purse"); + + assert_eq!(validator_1_era.amount(), &U512::from(VALIDATOR_1_STAKE)); + assert_eq!( + delegator_1_unbonding_purse.amount(), + &U512::from(DELEGATOR_1_STAKE) + ); + assert_eq!( + delegator_2_unbonding_purse.amount(), + &U512::from(DELEGATOR_2_STAKE) + ); + + // Process unbonding requests to verify delegators received their stakes + let validator_1 = builder + .get_entity_by_account_hash(*VALIDATOR_1_ADDR) + .expect("should have validator 1 account"); + let validator_1_balance_before = builder.get_purse_balance(validator_1.main_purse()); + + let delegator_1 = builder + .get_entity_by_account_hash(*DELEGATOR_1_ADDR) + .expect("should have delegator 1 account"); + let delegator_1_balance_before = builder.get_purse_balance(delegator_1.main_purse()); + + let delegator_2 = builder + .get_entity_by_account_hash(*DELEGATOR_2_ADDR) + .expect("should have delegator 1 account"); + let delegator_2_balance_before = builder.get_purse_balance(delegator_2.main_purse()); + + for _ in 0..=DEFAULT_UNBONDING_DELAY { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } + + let validator_1_balance_after = builder.get_purse_balance(validator_1.main_purse()); + let delegator_1_balance_after = builder.get_purse_balance(delegator_1.main_purse()); + let delegator_2_balance_after = builder.get_purse_balance(delegator_2.main_purse()); + + assert_eq!( + validator_1_balance_before + U512::from(VALIDATOR_1_STAKE), + validator_1_balance_after + ); + assert_eq!( + delegator_1_balance_before + U512::from(DELEGATOR_1_STAKE), + delegator_1_balance_after + ); + assert_eq!( + delegator_2_balance_before + U512::from(DELEGATOR_2_STAKE), + delegator_2_balance_after + ); +} + +#[ignore] +#[test] +fn should_handle_evictions() { + let activate_bid = |builder: &mut LmdbWasmTestBuilder, validator_public_key: PublicKey| { + const ARG_VALIDATOR: &str = "validator"; + let run_request = ExecuteRequestBuilder::standard( + AccountHash::from(&validator_public_key), + CONTRACT_ACTIVATE_BID, + runtime_args! { + ARG_VALIDATOR => validator_public_key, + }, + ) + .build(); + builder.exec(run_request).expect_success().commit(); + }; + + let latest_validators = |builder: &mut LmdbWasmTestBuilder| { + let era_validators: EraValidators = builder.get_era_validators(); + let validators = era_validators + .iter() + .next_back() + .map(|(_era_id, validators)| validators) + .expect("should have validators"); + validators.keys().cloned().collect::>() + }; + + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + let account_3 = GenesisAccount::account( + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(300_000), + DelegationRate::zero(), + )), + ); + let account_4 = GenesisAccount::account( + BID_ACCOUNT_2_PK.clone(), + Motes::new(BID_ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(400_000), + DelegationRate::zero(), + )), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp.push(account_3); + tmp.push(account_4); + tmp + }; + + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + "target" => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); + + let mut timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + builder.exec(system_fund_request).expect_success().commit(); + + // No evictions + builder.run_auction(timestamp, Vec::new()); + timestamp += WEEK_MILLIS; + + assert_eq!( + latest_validators(&mut builder), + BTreeSet::from_iter(vec![ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + BID_ACCOUNT_2_PK.clone(), + ]) + ); + + // Evict BID_ACCOUNT_1_PK and BID_ACCOUNT_2_PK + builder.run_auction( + timestamp, + vec![BID_ACCOUNT_1_PK.clone(), BID_ACCOUNT_2_PK.clone()], + ); + timestamp += WEEK_MILLIS; + + assert_eq!( + latest_validators(&mut builder), + BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()]) + ); + + // Activate BID_ACCOUNT_1_PK + activate_bid(&mut builder, BID_ACCOUNT_1_PK.clone()); + builder.run_auction(timestamp, Vec::new()); + timestamp += WEEK_MILLIS; + + assert_eq!( + latest_validators(&mut builder), + BTreeSet::from_iter(vec![ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + ]) + ); + + // Activate BID_ACCOUNT_2_PK + activate_bid(&mut builder, BID_ACCOUNT_2_PK.clone()); + builder.run_auction(timestamp, Vec::new()); + timestamp += WEEK_MILLIS; + + assert_eq!( + latest_validators(&mut builder), + BTreeSet::from_iter(vec![ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + BID_ACCOUNT_2_PK.clone(), + ]) + ); + + // Evict all validators + builder.run_auction( + timestamp, + vec![ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + BID_ACCOUNT_2_PK.clone(), + ], + ); + timestamp += WEEK_MILLIS; + + assert_eq!(latest_validators(&mut builder), BTreeSet::new()); + + // Activate all validators + for validator in &[ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + BID_ACCOUNT_2_PK.clone(), + ] { + activate_bid(&mut builder, validator.clone()); + } + builder.run_auction(timestamp, Vec::new()); + + assert_eq!( + latest_validators(&mut builder), + BTreeSet::from_iter(vec![ + ACCOUNT_1_PK.clone(), + ACCOUNT_2_PK.clone(), + BID_ACCOUNT_1_PK.clone(), + BID_ACCOUNT_2_PK.clone(), + ]) ); } #[should_panic(expected = "OrphanedDelegator")] #[ignore] #[test] -fn should_validate_orphaned_genesis_delegators() { - let missing_validator = SecretKey::ed25519_from_bytes([123; 32]).unwrap().into(); +fn should_validate_orphaned_genesis_delegators() { + let missing_validator_secret_key = SecretKey::ed25519_from_bytes([123; 32]).unwrap(); + let missing_validator = PublicKey::from(&missing_validator_secret_key); + + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + let orphaned_delegator = GenesisAccount::delegator( + missing_validator, + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp.push(delegator_1); + tmp.push(orphaned_delegator); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); +} + +#[should_panic(expected = "DuplicatedDelegatorEntry")] +#[ignore] +#[test] +fn should_validate_duplicated_genesis_delegators() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + let duplicated_delegator_1 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + let duplicated_delegator_2 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_2.clone(), + Motes::new(DELEGATOR_2_BALANCE), + Motes::new(DELEGATOR_2_STAKE), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp.push(delegator_1); + tmp.push(duplicated_delegator_1); + tmp.push(duplicated_delegator_2); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); +} + +#[should_panic(expected = "InvalidDelegationRate")] +#[ignore] +#[test] +fn should_validate_delegation_rate_of_genesis_validator() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::MAX, + )), + ); + tmp.push(account_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); +} + +#[should_panic(expected = "InvalidBondAmount")] +#[ignore] +#[test] +fn should_validate_bond_amount_of_genesis_validator() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new(Motes::zero(), DelegationRate::zero())), + ); + tmp.push(account_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); +} + +#[ignore] +#[test] +fn should_setup_genesis_delegators() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND), 80)), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp.push(delegator_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + let _account_1 = builder + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) + .expect("should install account 1"); + let _account_2 = builder + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) + .expect("should install account 2"); + + let delegator_1 = builder + .get_entity_by_account_hash(*DELEGATOR_1_ADDR) + .expect("should install delegator 1"); + assert_eq!( + builder.get_purse_balance(delegator_1.main_purse()), + U512::from(DELEGATOR_1_BALANCE) + ); + + let bids = builder.get_bids(); + let key_map = bids.delegator_map(); + let validator_keys = key_map.keys().cloned().collect::>(); + assert_eq!( + validator_keys, + BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()]) + ); + + let account_1_bid_entry = bids + .validator_bid(&ACCOUNT_1_PK) + .expect("should have account 1 bid"); + assert_eq!(*account_1_bid_entry.delegation_rate(), 80); + let delegators = bids + .delegators_by_validator_public_key(&ACCOUNT_1_PK) + .expect("should have delegators"); + assert_eq!(delegators.len(), 1); + let delegator = delegators.first().expect("should have delegator"); + assert_eq!( + delegator.delegator_kind(), + &DelegatorKind::PublicKey(DELEGATOR_1.clone()), + "should be DELEGATOR_1" + ); + assert_eq!(delegator.staked_amount(), U512::from(DELEGATOR_1_STAKE)); +} + +#[ignore] +#[test] +fn should_not_partially_undelegate_uninitialized_vesting_schedule() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let validator_1 = GenesisAccount::account( + VALIDATOR_1.clone(), + Motes::new(VALIDATOR_1_STAKE), + Some(GenesisValidator::new( + Motes::new(VALIDATOR_1_STAKE), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + VALIDATOR_1.clone(), + DELEGATOR_1.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + tmp.push(validator_1); + tmp.push(delegator_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + let fund_delegator_account = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + builder + .exec(fund_delegator_account) + .commit() + .expect_success(); + + let partial_undelegate = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE - 1), + }, + ) + .build(); + + builder.exec(partial_undelegate).commit(); + let error = builder + .get_last_exec_result() + .expect("should have last exec result") + .error() + .cloned() + .expect("should have error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == auction::Error::DelegatorFundsLocked as u8 + )); +} + +#[ignore] +#[test] +fn should_not_fully_undelegate_uninitialized_vesting_schedule() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let validator_1 = GenesisAccount::account( + VALIDATOR_1.clone(), + Motes::new(VALIDATOR_1_STAKE), + Some(GenesisValidator::new( + Motes::new(VALIDATOR_1_STAKE), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + VALIDATOR_1.clone(), + DELEGATOR_1.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + tmp.push(validator_1); + tmp.push(delegator_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); + + let fund_delegator_account = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + builder + .exec(fund_delegator_account) + .commit() + .expect_success(); + + let full_undelegate = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + }, + ) + .build(); + + builder.exec(full_undelegate).commit(); + let error = builder + .get_last_exec_result() + .expect("should have last exec result") + .error() + .cloned() + .expect("should have error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == auction::Error::DelegatorFundsLocked as u8 + )); +} + +#[ignore] +#[test] +fn should_not_undelegate_vfta_holder_stake() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let validator_1 = GenesisAccount::account( + VALIDATOR_1.clone(), + Motes::new(VALIDATOR_1_STAKE), + Some(GenesisValidator::new( + Motes::new(VALIDATOR_1_STAKE), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + VALIDATOR_1.clone(), + DELEGATOR_1.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Motes::new(DELEGATOR_1_STAKE), + ); + tmp.push(validator_1); + tmp.push(delegator_1); + tmp + }; + + let run_genesis_request = { + let exec_config = GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + .build(); + + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + exec_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) + }; + let chainspec = ChainspecConfig::default() + .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS); + + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec); + + builder.run_genesis(run_genesis_request); + + let post_genesis_requests = { + let fund_delegator_account = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + let fund_system_account = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + vec![fund_system_account, fund_delegator_account] + }; + + for post_genesis_request in post_genesis_requests { + builder.exec(post_genesis_request).commit().expect_success(); + } + + { + let bids = builder.get_bids(); + let delegator = bids + .delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(DELEGATOR_1.clone())) + .expect("should have delegator"); + let vesting_schedule = delegator + .vesting_schedule() + .expect("should have delegator vesting schedule"); + assert!( + vesting_schedule.locked_amounts().is_none(), + "should not be locked" + ); + } + + builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new()); + + { + let bids = builder.get_bids(); + let delegator = bids + .delegator_by_kind(&VALIDATOR_1, &DelegatorKind::PublicKey(DELEGATOR_1.clone())) + .expect("should have delegator"); + let vesting_schedule = delegator + .vesting_schedule() + .expect("should have vesting schedule"); + assert!( + vesting_schedule.locked_amounts().is_some(), + "should be locked" + ); + } + + let partial_unbond = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE - 1), + }, + ) + .build(); + builder.exec(partial_unbond).commit(); + let error = builder + .get_last_exec_result() + .expect("should have last exec result") + .error() + .cloned() + .expect("should have error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == auction::Error::DelegatorFundsLocked as u8 + )); +} + +#[ignore] +#[test] +fn should_release_vfta_holder_stake() { + const EXPECTED_WEEKLY_RELEASE: u64 = + (DELEGATOR_1_STAKE - DEFAULT_MINIMUM_DELEGATION_AMOUNT) / 14; + const DELEGATOR_VFTA_STAKE: u64 = DELEGATOR_1_STAKE - DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const EXPECTED_REMAINDER: u64 = 12; + const NEW_MINIMUM_DELEGATION_AMOUNT: u64 = 0; + const EXPECTED_LOCKED_AMOUNTS: [u64; 14] = [ + 1392858, 1285716, 1178574, 1071432, 964290, 857148, 750006, 642864, 535722, 428580, 321438, + 214296, 107154, 0, + ]; + + let expected_locked_amounts: Vec = EXPECTED_LOCKED_AMOUNTS + .iter() + .cloned() + .map(U512::from) + .collect(); + + let expect_undelegate_success = |builder: &mut LmdbWasmTestBuilder, amount: u64| { + let partial_unbond = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_VALIDATOR => ACCOUNT_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_AMOUNT => U512::from(amount), + }, + ) + .build(); + + builder.exec(partial_unbond).commit().expect_success(); + }; + + let expect_undelegate_failure = |builder: &mut LmdbWasmTestBuilder, amount: u64| { + let full_undelegate = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_VALIDATOR => ACCOUNT_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_AMOUNT => U512::from(amount), + }, + ) + .build(); + + builder.exec(full_undelegate).commit(); + + let error = builder + .get_last_exec_result() + .expect("should have last exec result") + .error() + .cloned() + .expect("should have error"); + + assert!( + matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == auction::Error::DelegatorFundsLocked as u8 + ), + "{:?}", + error + ); + }; + + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::new(DELEGATOR_VFTA_STAKE), + ); + tmp.push(account_1); + tmp.push(delegator_1); + tmp + }; + + let run_genesis_request = { + let genesis_config = GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_locked_funds_period_millis(CASPER_LOCKED_FUNDS_PERIOD_MILLIS) + .build(); + + GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + genesis_config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ) + }; + + let chainspec = ChainspecConfig::default() + .with_vesting_schedule_period_millis(CASPER_VESTING_SCHEDULE_PERIOD_MILLIS) + .with_minimum_delegation_amount(NEW_MINIMUM_DELEGATION_AMOUNT); + + let mut builder = LmdbWasmTestBuilder::new_temporary_with_config(chainspec); + + builder.run_genesis(run_genesis_request); + + let fund_delegator_account = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + builder + .exec(fund_delegator_account) + .commit() + .expect_success(); + + let fund_system_account = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + builder.exec(fund_system_account).commit().expect_success(); + + // Check bid and its vesting schedule + { + let bids = builder.get_bids(); + assert_eq!(bids.len(), 2); + let delegator = bids + .delegator_by_kind( + &ACCOUNT_1_PK, + &DelegatorKind::PublicKey(DELEGATOR_1.clone()), + ) + .expect("should have delegator"); + + let vesting_schedule = delegator + .vesting_schedule() + .expect("should have delegator vesting schedule"); + + let initial_release = vesting_schedule.initial_release_timestamp_millis(); + assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); + + let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); + assert!(locked_amounts.is_none()); + } + + builder.run_auction(DEFAULT_GENESIS_TIMESTAMP_MILLIS, Vec::new()); + + { + // Attempt unbond of one mote + expect_undelegate_failure(&mut builder, u64::one()); + } + + builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new()); + + // Check bid and its vesting schedule + { + let bids = builder.get_bids(); + assert_eq!(bids.len(), 2); + let delegator = bids + .delegator_by_kind( + &ACCOUNT_1_PK, + &DelegatorKind::PublicKey(DELEGATOR_1.clone()), + ) + .expect("should have delegator"); + + let vesting_schedule = delegator + .vesting_schedule() + .expect("should have delegator vesting schedule"); + + let initial_release = vesting_schedule.initial_release_timestamp_millis(); + assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); + + let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); + assert_eq!(locked_amounts, Some(expected_locked_amounts)); + } + + let mut total_unbonded = 0; + + { + // Attempt full unbond + expect_undelegate_failure(&mut builder, DELEGATOR_VFTA_STAKE); + + // Attempt unbond of released amount + expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE); + + total_unbonded += EXPECTED_WEEKLY_RELEASE; + + assert_eq!( + DELEGATOR_VFTA_STAKE - total_unbonded, + EXPECTED_LOCKED_AMOUNTS[0] + ) + } + + for i in 1..13 { + // Run auction forward by almost a week + builder.run_auction(WEEK_TIMESTAMPS[i] - 1, Vec::new()); + + // Attempt unbond of 1 mote + expect_undelegate_failure(&mut builder, u64::one()); + + // Run auction forward by one millisecond + builder.run_auction(WEEK_TIMESTAMPS[i], Vec::new()); + + // Attempt unbond of more than weekly release + expect_undelegate_failure(&mut builder, EXPECTED_WEEKLY_RELEASE + 1); + + // Attempt unbond of released amount + expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE); + + total_unbonded += EXPECTED_WEEKLY_RELEASE; + + assert_eq!( + DELEGATOR_VFTA_STAKE - total_unbonded, + EXPECTED_LOCKED_AMOUNTS[i] + ) + } + + { + // Run auction forward by almost a week + builder.run_auction(WEEK_TIMESTAMPS[13] - 1, Vec::new()); + + // Attempt unbond of 1 mote + expect_undelegate_failure(&mut builder, u64::one()); + + // Run auction forward by one millisecond + builder.run_auction(WEEK_TIMESTAMPS[13], Vec::new()); + + // Attempt unbond of released amount + remainder + expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER); + + total_unbonded += EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER; + + assert_eq!( + DELEGATOR_VFTA_STAKE - total_unbonded, + EXPECTED_LOCKED_AMOUNTS[13] + ) + } + + assert_eq!(DELEGATOR_VFTA_STAKE, total_unbonded); +} + +#[ignore] +#[test] +fn should_reset_delegators_stake_after_slashing() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_2, + }, + ) + .build(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + let delegator_2_validator_2_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_fund_request, + validator_2_fund_request, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + delegator_1_validator_2_delegate_request, + delegator_2_validator_1_delegate_request, + delegator_2_validator_2_delegate_request, + ]; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).expect_success().commit(); + } + + let auction_hash = builder.get_auction_contract_hash(); + + // Check bids before slashing + + let bids_1 = builder.get_bids(); + let _ = bids_1 + .validator_total_stake(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("should have total stake"); + + let validator_1_delegator_stakes_1 = { + match bids_1.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) { + None => U512::zero(), + Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(), + } + }; + + assert!(validator_1_delegator_stakes_1 > U512::zero()); + + let validator_2_delegator_stakes_1 = { + match bids_1.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK) { + None => U512::zero(), + Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(), + } + }; + assert!(validator_2_delegator_stakes_1 > U512::zero()); + + let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash( + *SYSTEM_ADDR, + auction_hash, + auction::METHOD_SLASH, + runtime_args! { + auction::ARG_VALIDATOR_PUBLIC_KEYS => vec![ + NON_FOUNDER_VALIDATOR_2_PK.clone(), + ] + }, + ) + .build(); + + builder.exec(slash_request_1).expect_success().commit(); + + // Compare bids after slashing validator 2 + let bids_2 = builder.get_bids(); + assert_ne!(bids_1, bids_2); + + let _ = bids_2 + .validator_bid(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("should have bids"); + let validator_1_delegator_stakes_2 = { + match bids_1.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) { + None => U512::zero(), + Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(), + } + }; + assert!(validator_1_delegator_stakes_2 > U512::zero()); + + assert!(bids_2.validator_bid(&NON_FOUNDER_VALIDATOR_2_PK).is_none()); + + // Validator 1 total delegated stake did not change + assert_eq!( + validator_1_delegator_stakes_2, + validator_1_delegator_stakes_1 + ); + + let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + *SYSTEM_ADDR, + auction_hash, + auction::METHOD_SLASH, + runtime_args! { + auction::ARG_VALIDATOR_PUBLIC_KEYS => vec![ + NON_FOUNDER_VALIDATOR_1_PK.clone(), + ] + }, + ) + .build(); + + builder.exec(slash_request_2).expect_success().commit(); + + // Compare bids after slashing validator 2 + let bids_3 = builder.get_bids(); + assert_ne!(bids_3, bids_2); + assert_ne!(bids_3, bids_1); + + assert!(bids_3.validator_bid(&NON_FOUNDER_VALIDATOR_1_PK).is_none()); + let validator_1_delegator_stakes_3 = { + match bids_3.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) { + None => U512::zero(), + Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(), + } + }; + + assert_ne!( + validator_1_delegator_stakes_3, + validator_1_delegator_stakes_1 + ); + assert_ne!( + validator_1_delegator_stakes_3, + validator_1_delegator_stakes_2 + ); + + // Validator 1 total delegated stake is set to 0 + assert_eq!(validator_1_delegator_stakes_3, U512::zero()); +} + +#[should_panic(expected = "InvalidDelegatedAmount")] +#[ignore] +#[test] +fn should_validate_genesis_delegators_bond_amount() { + let accounts = { + let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); + let account_1 = GenesisAccount::account( + ACCOUNT_1_PK.clone(), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND), 80)), + ); + let account_2 = GenesisAccount::account( + ACCOUNT_2_PK.clone(), + Motes::new(ACCOUNT_2_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_2_BOND), + DelegationRate::zero(), + )), + ); + let delegator_1 = GenesisAccount::delegator( + ACCOUNT_1_PK.clone(), + DELEGATOR_1.clone(), + Motes::new(DELEGATOR_1_BALANCE), + Motes::zero(), + ); + tmp.push(account_1); + tmp.push(account_2); + tmp.push(delegator_1); + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); +} + +fn check_validator_slots_for_accounts(accounts: usize) { + let accounts = { + let range = 1..=accounts; + + let mut tmp: Vec = Vec::with_capacity(accounts); + + for count in range.map(U256::from) { + let secret_key = { + let mut secret_key_bytes = [0; 32]; + count.to_big_endian(&mut secret_key_bytes); + SecretKey::ed25519_from_bytes(secret_key_bytes).expect("should create ed25519 key") + }; + + let public_key = PublicKey::from(&secret_key); + + let account = GenesisAccount::account( + public_key, + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND), 80)), + ); + + tmp.push(account) + } + + tmp + }; + + let run_genesis_request = utils::create_run_genesis_request(accounts); + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(run_genesis_request); +} + +#[should_panic(expected = "InvalidValidatorSlots")] +#[ignore] +#[test] +fn should_fail_with_more_accounts_than_slots() { + check_validator_slots_for_accounts(DEFAULT_EXEC_CONFIG.validator_slots() as usize + 1); +} + +#[ignore] +#[test] +fn should_run_genesis_with_exact_validator_slots() { + check_validator_slots_for_accounts(DEFAULT_EXEC_CONFIG.validator_slots() as usize); +} + +#[ignore] +#[test] +fn should_delegate_and_redelegate() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + validator_1_fund_request, + validator_2_fund_request, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + ]; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + builder.advance_eras_by_default_auction_delay(); + + let delegator_1_undelegate_purse = builder + .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR) + .expect("should have default account") + .main_purse(); + + let delegator_1_redelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_REDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone() + }, + ) + .build(); + + builder + .exec(delegator_1_redelegate_request) + .commit() + .expect_success(); + + let unbond_kind = UnbondKind::DelegatedPublicKey(BID_ACCOUNT_1_PK.clone()); + let after_redelegation = builder + .get_unbonds() + .get(&unbond_kind) + .expect("must have unbond") + .first() + .expect("must have an entry for the unbond") + .eras() + .len(); + + assert_eq!(1, after_redelegation); + + let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_undelegate_purse); + + for _ in 0..=DEFAULT_UNBONDING_DELAY { + let delegator_1_redelegate_purse_balance = + builder.get_purse_balance(delegator_1_undelegate_purse); + assert_eq!( + delegator_1_purse_balance_before, + delegator_1_redelegate_purse_balance + ); + builder.advance_era() + } + + // Since a redelegation has been processed no funds should have transferred back to the purse. + let delegator_1_purse_balance_after = builder.get_purse_balance(delegator_1_undelegate_purse); + + assert_eq!( + delegator_1_purse_balance_before, + delegator_1_purse_balance_after + ); + + let bids = builder.get_bids(); + assert_eq!(bids.len(), 3); + + assert!( + bids.delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .is_none(), + "fully unbonded" + ); + + let delegators = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK) + .expect("should have delegators"); + assert_eq!(delegators.len(), 1); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .expect("should have delegator"); + let redelegated_amount_1 = delegator.staked_amount(); + assert_eq!( + redelegated_amount_1, + U512::from(DELEGATE_AMOUNT_1), + "expected full unbond" + ); +} + +#[ignore] +#[test] +fn should_handle_redelegation_to_inactive_validator() { + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let delegator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let post_genesis_requests = vec![ + validator_1_fund_request, + validator_2_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + delegator_2_validator_1_delegate_request, + ]; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + builder.advance_eras_by_default_auction_delay(); + + let invalid_redelegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_REDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_NEW_VALIDATOR => BID_ACCOUNT_1_PK.clone() + }, + ) + .build(); + + builder.exec(invalid_redelegate_request).expect_failure(); + + let error = builder.get_error().expect("expected error"); + let str = format!("{}", error); + assert!( + str.starts_with("ApiError::AuctionError(RedelegationValidatorNotFound)"), + "expected RedelegationValidatorNotFound" + ) +} + +#[ignore] +#[test] +fn should_enforce_minimum_delegation_amount() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_to_validator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let post_genesis_request = vec![transfer_to_validator_1, transfer_to_delegator_1]; + + for request in post_genesis_request { + builder.exec(request).expect_success().commit(); + } + + let add_bid_request_1 = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(add_bid_request_1).expect_success().commit(); + + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + assert!( + builder.step(step_request).is_success(), + "must execute step request" + ); + } + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(100u64), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + // The delegation amount is below the default value of 500 CSPR, + // therefore the delegation should not succeed. + builder.exec(delegation_request_1).expect_failure(); + + let error = builder.get_error().expect("must get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::DelegationAmountTooSmall as u8)); +} + +#[ignore] +#[test] +fn should_allow_delegations_with_minimal_floor_amount() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_to_validator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let post_genesis_request = vec![ + transfer_to_validator_1, + transfer_to_delegator_1, + transfer_to_delegator_2, + ]; + + for request in post_genesis_request { + builder.exec(request).expect_success().commit(); + } + + let add_bid_request_1 = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(add_bid_request_1).expect_success().commit(); + + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step request" + ); + } + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT - 1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + // The delegation amount is below the default value of 500 CSPR, + // therefore the delegation should not succeed. + builder.exec(delegation_request_1).expect_failure(); + + let error = builder.get_error().expect("must get error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::DelegationAmountTooSmall as u8)); + + let delegation_request_2 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_2).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_enforce_max_delegators_per_validator_cap() { + let chainspec = ChainspecConfig::default().with_max_delegators_per_validator(2u32); - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_2 = GenesisAccount::account( - ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - let orphaned_delegator = GenesisAccount::delegator( - missing_validator, - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_to_validator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_3 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let post_genesis_request = vec![ + transfer_to_validator_1, + transfer_to_delegator_1, + transfer_to_delegator_2, + transfer_to_delegator_3, + ]; + + for request in post_genesis_request { + builder.exec(request).expect_success().commit(); + } + + let add_bid_request_1 = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(add_bid_request_1).expect_success().commit(); + + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step request" ); - tmp.push(account_1); - tmp.push(account_2); - tmp.push(delegator_1); - tmp.push(orphaned_delegator); - tmp + } + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let delegation_request_2 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + let delegation_requests = [delegation_request_1, delegation_request_2]; + + for request in delegation_requests { + builder.exec(request).expect_success().commit(); + } + + let delegation_request_3 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_3).expect_failure(); + + let error = builder.get_error().expect("must get error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8)); + + let delegator_2_staked_amount = { + let bids = builder.get_bids(); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_1_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()), + ) + .expect("should have delegator bid"); + delegator.staked_amount() }; - let run_genesis_request = utils::create_run_genesis_request(accounts); + let undelegation_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_AMOUNT => delegator_2_staked_amount, + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + builder.exec(undelegation_request).expect_success().commit(); + + let bids = builder.get_bids(); + + let current_delegator_count = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("must have bid record") + .iter() + .filter(|x| x.staked_amount() > U512::zero()) + .collect::>() + .len(); + + assert_eq!(current_delegator_count, 1); + + let delegation_request_3 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_3).expect_success().commit(); + + let bids = builder.get_bids(); + let current_delegator_count = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .expect("must have bid record") + .len(); + + assert_eq!(current_delegator_count, 2); +} + +#[ignore] +#[test] +fn should_transfer_to_main_purse_in_case_of_redelegation_past_max_delegation_cap() { + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); + + let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let post_genesis_requests = vec![ + validator_1_fund_request, + validator_2_fund_request, + transfer_to_delegator_1, + transfer_to_delegator_2, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + delegator_1_validator_2_delegate_request, + ]; - builder.run_genesis(&run_genesis_request); -} + let chainspec = ChainspecConfig::default().with_max_delegators_per_validator(1u32); -#[should_panic(expected = "DuplicatedDelegatorEntry")] -#[ignore] -#[test] -fn should_validate_duplicated_genesis_delegators() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - ); - let account_2 = GenesisAccount::account( - ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - let duplicated_delegator_1 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - let duplicated_delegator_2 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_2.clone(), - Motes::new(DELEGATOR_2_BALANCE.into()), - Motes::new(DELEGATOR_2_STAKE.into()), - ); - tmp.push(account_1); - tmp.push(account_2); - tmp.push(delegator_1); - tmp.push(duplicated_delegator_1); - tmp.push(duplicated_delegator_2); - tmp - }; + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec); - let run_genesis_request = utils::create_run_genesis_request(accounts); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let mut builder = InMemoryWasmTestBuilder::default(); + for request in post_genesis_requests { + builder.exec(request).expect_success().commit(); + } - builder.run_genesis(&run_genesis_request); -} + builder.advance_eras_by_default_auction_delay(); -#[should_panic(expected = "InvalidDelegationRate")] -#[ignore] -#[test] -fn should_validate_delegation_rate_of_genesis_validator() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::max_value(), - )), - ); - tmp.push(account_1); - tmp - }; + let delegator_1_main_purse = builder + .get_entity_by_account_hash(*BID_ACCOUNT_1_ADDR) + .expect("should have default account") + .main_purse(); - let run_genesis_request = utils::create_run_genesis_request(accounts); + let delegator_1_redelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_REDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone() + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + builder + .exec(delegator_1_redelegate_request) + .commit() + .expect_success(); - builder.run_genesis(&run_genesis_request); -} + let unbond_kind = UnbondKind::DelegatedPublicKey(BID_ACCOUNT_1_PK.clone()); + let after_redelegation = builder + .get_unbonds() + .get(&unbond_kind) + .expect("must have unbond") + .first() + .expect("must have at least one entry") + .eras() + .len(); -#[should_panic(expected = "InvalidBondAmount")] -#[ignore] -#[test] -fn should_validate_bond_amount_of_genesis_validator() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new(Motes::zero(), DelegationRate::zero())), - ); - tmp.push(account_1); - tmp - }; + assert_eq!(1, after_redelegation); - let run_genesis_request = utils::create_run_genesis_request(accounts); + let delegator_1_purse_balance_before = builder.get_purse_balance(delegator_1_main_purse); - let mut builder = InMemoryWasmTestBuilder::default(); + for _ in 0..=DEFAULT_UNBONDING_DELAY { + let delegator_1_redelegate_purse_balance = + builder.get_purse_balance(delegator_1_main_purse); + assert_eq!( + delegator_1_purse_balance_before, + delegator_1_redelegate_purse_balance + ); - builder.run_genesis(&run_genesis_request); + builder.advance_era(); + } } #[ignore] #[test] -fn should_setup_genesis_delegators() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND.into()), 80)), - ); - let account_2 = GenesisAccount::account( - ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - tmp.push(account_1); - tmp.push(account_2); - tmp.push(delegator_1); - tmp - }; +fn should_delegate_and_redelegate_with_eviction_regression_test() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); - let run_genesis_request = utils::create_run_genesis_request(accounts); + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - builder.run_genesis(&run_genesis_request); + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let _account_1 = builder - .get_account(*ACCOUNT_1_ADDR) - .expect("should install account 1"); - let _account_2 = builder - .get_account(*ACCOUNT_2_ADDR) - .expect("should install account 2"); + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - let delegator_1 = builder - .get_account(*DELEGATOR_1_ADDR) - .expect("should install delegator 1"); - assert_eq!( - builder.get_purse_balance(delegator_1.main_purse()), - U512::from(DELEGATOR_1_BALANCE) - ); + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - let bids: Bids = builder.get_bids(); - assert_eq!( - bids.keys().cloned().collect::>(), - BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone(),]) - ); + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - let account_1_bid_entry = bids.get(&*ACCOUNT_1_PK).expect("should have account 1 bid"); - assert_eq!(*account_1_bid_entry.delegation_rate(), 80); - assert_eq!(account_1_bid_entry.delegators().len(), 1); + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + validator_1_fund_request, + validator_2_fund_request, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + ]; - let account_1_delegator_1_entry = account_1_bid_entry - .delegators() - .get(&*DELEGATOR_1) - .expect("account 1 should have delegator 1"); - assert_eq!( - *account_1_delegator_1_entry.staked_amount(), - U512::from(DELEGATOR_1_STAKE) - ); -} + let mut builder = LmdbWasmTestBuilder::default(); -#[ignore] -#[test] -fn should_not_partially_undelegate_uninitialized_vesting_schedule() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let validator_1 = GenesisAccount::account( - VALIDATOR_1.clone(), - Motes::new(VALIDATOR_1_STAKE.into()), - Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - VALIDATOR_1.clone(), - DELEGATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - tmp.push(validator_1); - tmp.push(delegator_1); - tmp - }; + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } + + let delegator_1_redelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_REDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone() + }, + ) + .build(); + + builder + .exec(delegator_1_redelegate_request) + .commit() + .expect_success(); - let run_genesis_request = utils::create_run_genesis_request(accounts); + builder.advance_eras_by(DEFAULT_UNBONDING_DELAY); - let mut builder = InMemoryWasmTestBuilder::default(); + // Advance one more era, this is the point where the redelegate request is processed (era >= + // unbonding_delay + 1) + builder.advance_era(); - builder.run_genesis(&run_genesis_request); + let bids = builder.get_bids(); + assert!(bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_1_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .is_none()); + assert!(bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .is_some()); +} - let fund_delegator_account = ExecuteRequestBuilder::standard( +#[ignore] +#[test] +fn should_increase_existing_delegation_when_limit_exceeded() { + let chainspec = ChainspecConfig::default().with_max_delegators_per_validator(2); + + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_to_validator_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *DELEGATOR_1_ADDR, - ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) .build(); - builder - .exec(fund_delegator_account) - .commit() - .expect_success(); - let partial_undelegate = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, - CONTRACT_UNDELEGATE, + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - auction::ARG_VALIDATOR => VALIDATOR_1.clone(), - auction::ARG_DELEGATOR => DELEGATOR_1.clone(), - ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE - 1), + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) }, ) .build(); - builder.exec(partial_undelegate).commit(); - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; + let transfer_to_delegator_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); - assert!(matches!( - error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::AuctionError(auction_error))) - if *auction_error == system::auction::Error::DelegatorFundsLocked as u8 - )); -} + let transfer_to_delegator_3 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(BID_ACCOUNT_1_BALANCE) + }, + ) + .build(); -#[ignore] -#[test] -fn should_not_fully_undelegate_uninitialized_vesting_schedule() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let validator_1 = GenesisAccount::account( - VALIDATOR_1.clone(), - Motes::new(VALIDATOR_1_STAKE.into()), - Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - VALIDATOR_1.clone(), - DELEGATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - tmp.push(validator_1); - tmp.push(delegator_1); - tmp - }; + let post_genesis_request = vec![ + transfer_to_validator_1, + transfer_to_delegator_1, + transfer_to_delegator_2, + transfer_to_delegator_3, + ]; - let run_genesis_request = utils::create_run_genesis_request(accounts); + for request in post_genesis_request { + builder.exec(request).expect_success().commit(); + } + + let add_bid_request_1 = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + builder.exec(add_bid_request_1).expect_success().commit(); - builder.run_genesis(&run_genesis_request); + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); - let fund_delegator_account = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, + assert!( + builder.step(step_request).is_success(), + "must execute step request" + ); + } + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, runtime_args! { - ARG_TARGET => *DELEGATOR_1_ADDR, - ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), }, ) .build(); - builder - .exec(fund_delegator_account) - .commit() - .expect_success(); - let full_undelegate = ExecuteRequestBuilder::standard( + let delegation_request_2 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + let delegation_requests = [delegation_request_1, delegation_request_2]; + + for request in delegation_requests { + builder.exec(request).expect_success().commit(); + } + + let delegation_request_3 = ExecuteRequestBuilder::standard( *DELEGATOR_1_ADDR, - CONTRACT_UNDELEGATE, + CONTRACT_DELEGATE, runtime_args! { - auction::ARG_VALIDATOR => VALIDATOR_1.clone(), - auction::ARG_DELEGATOR => DELEGATOR_1.clone(), - ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), }, ) .build(); - builder.exec(full_undelegate).commit(); - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; + builder.exec(delegation_request_3).expect_failure(); + + let error = builder.get_error().expect("must get error"); assert!(matches!( error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::AuctionError(auction_error))) - if *auction_error == system::auction::Error::DelegatorFundsLocked as u8 - )); + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8)); + + // The validator already has the maximum number of delegators allowed. However, this is a + // delegator that already delegated, so their bid should just be increased. + let delegation_request_2_repeat = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); + + builder + .exec(delegation_request_2_repeat) + .expect_success() + .commit(); } #[ignore] #[test] -fn should_not_undelegate_vfta_holder_stake() { - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let validator_1 = GenesisAccount::account( - VALIDATOR_1.clone(), - Motes::new(VALIDATOR_1_STAKE.into()), - Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - VALIDATOR_1.clone(), - DELEGATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - tmp.push(validator_1); - tmp.push(delegator_1); - tmp - }; - - let run_genesis_request = utils::create_run_genesis_request(accounts); +fn should_fail_bid_public_key_change_if_conflicting_validator_bid_exists() { + let system_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) + }, + ) + .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - builder.run_genesis(&run_genesis_request); + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let post_genesis_requests = { - let fund_delegator_account = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *DELEGATOR_1_ADDR, - ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) - }, - ) - .build(); + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let fund_system_account = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) - }, - ) - .build(); + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - vec![fund_system_account, fund_delegator_account] - }; + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_2), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - for post_genesis_request in post_genesis_requests { - builder.exec(post_genesis_request).commit().expect_success(); - } + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - { - let bids: Bids = builder.get_bids(); - let delegator = bids - .get(&*VALIDATOR_1) - .expect("should have validator") - .delegators() - .get(&*DELEGATOR_1) - .expect("should have delegator"); - let vesting_schedule = delegator - .vesting_schedule() - .expect("should have vesting schedule"); - assert_eq!(vesting_schedule.locked_amounts(), None); + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + validator_1_fund_request, + validator_2_fund_request, + validator_1_add_bid_request, + validator_2_add_bid_request, + delegator_1_validator_1_delegate_request, + ]; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); } - builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new()); + builder.advance_eras_by_default_auction_delay(); - let partial_unbond = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, - CONTRACT_UNDELEGATE, + let change_bid_public_key_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_CHANGE_BID_PUBLIC_KEY, runtime_args! { - auction::ARG_VALIDATOR => VALIDATOR_1.clone(), - auction::ARG_DELEGATOR => DELEGATOR_1.clone(), - ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE - 1), + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_NEW_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone() }, ) .build(); - { - let bids: Bids = builder.get_bids(); - let delegator = bids - .get(&*VALIDATOR_1) - .expect("should have validator") - .delegators() - .get(&*DELEGATOR_1) - .expect("should have delegator"); - let vesting_schedule = delegator - .vesting_schedule() - .expect("should have vesting schedule"); - assert!(matches!(vesting_schedule.locked_amounts(), Some(_))); - } - - builder.exec(partial_unbond).commit(); - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; + builder.exec(change_bid_public_key_request).expect_failure(); + let error = builder.get_error().expect("must get error"); assert!(matches!( error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::AuctionError(auction_error))) - if *auction_error == system::auction::Error::DelegatorFundsLocked as u8 - )); + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ValidatorBidExistsAlready as u8)); } #[ignore] #[test] -fn should_release_vfta_holder_stake() { - const EXPECTED_WEEKLY_RELEASE: u64 = DELEGATOR_1_STAKE / 14; - - const EXPECTED_REMAINDER: u64 = 12; - - const EXPECTED_LOCKED_AMOUNTS: [u64; 14] = [ - 1392858, 1285716, 1178574, 1071432, 964290, 857148, 750006, 642864, 535722, 428580, 321438, - 214296, 107154, 0, - ]; - - let expected_locked_amounts: Vec = EXPECTED_LOCKED_AMOUNTS - .iter() - .cloned() - .map(U512::from) - .collect(); - - let expect_undelegate_success = |builder: &mut InMemoryWasmTestBuilder, amount: u64| { - let partial_unbond = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, - CONTRACT_UNDELEGATE, - runtime_args! { - auction::ARG_VALIDATOR => ACCOUNT_1_PK.clone(), - auction::ARG_DELEGATOR => DELEGATOR_1.clone(), - ARG_AMOUNT => U512::from(amount), - }, - ) - .build(); - - builder.exec(partial_unbond).commit().expect_success(); - }; - - let expect_undelegate_failure = |builder: &mut InMemoryWasmTestBuilder, amount: u64| { - let full_undelegate = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, - CONTRACT_UNDELEGATE, - runtime_args! { - auction::ARG_VALIDATOR => ACCOUNT_1_PK.clone(), - auction::ARG_DELEGATOR => DELEGATOR_1.clone(), - ARG_AMOUNT => U512::from(amount), - }, - ) - .build(); - - builder.exec(full_undelegate).commit(); - - let error = { - let response = builder - .get_exec_results() - .last() - .expect("should have last exec result"); - let exec_response = response.last().expect("should have response"); - exec_response.as_error().expect("should have error") - }; - - assert!( - matches!( - error, - engine_state::Error::Exec(execution::Error::Revert(ApiError::AuctionError(auction_error))) - if *auction_error == system::auction::Error::DelegatorFundsLocked as u8 - ), - "{:?}", - error - ); - }; - - let accounts = { - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - let account_1 = GenesisAccount::account( - ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(ACCOUNT_1_BOND.into()), - DelegationRate::zero(), - )), - ); - let delegator_1 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(DELEGATOR_1_STAKE.into()), - ); - tmp.push(account_1); - tmp.push(delegator_1); - tmp - }; - - let run_genesis_request = utils::create_run_genesis_request(accounts); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&run_genesis_request); - - let fund_delegator_account = ExecuteRequestBuilder::standard( +fn should_change_validator_bid_public_key() { + let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *DELEGATOR_1_ADDR, - ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + ARG_TARGET => *SYSTEM_ADDR, + ARG_AMOUNT => U512::from(SYSTEM_TRANSFER_AMOUNT) }, ) .build(); - builder - .exec(fund_delegator_account) - .commit() - .expect_success(); - let fund_system_account = ExecuteRequestBuilder::standard( + let validator_1_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + ARG_TARGET => *NON_FOUNDER_VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) .build(); - builder.exec(fund_system_account).commit().expect_success(); - - // Check bid and its vesting schedule - { - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); - - let bid_entry = bids.get(&ACCOUNT_1_PK).unwrap(); - let entry = bid_entry.delegators().get(&*DELEGATOR_1).unwrap(); + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let vesting_schedule = entry.vesting_schedule().unwrap(); + let validator_3_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *NON_FOUNDER_VALIDATOR_3_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let initial_release = vesting_schedule.initial_release_timestamp_millis(); - assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); - assert!(locked_amounts.is_none()); - } + let delegator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *BID_ACCOUNT_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); - builder.run_auction(DEFAULT_GENESIS_TIMESTAMP_MILLIS, Vec::new()); + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); - { - // Attempt unbond of one mote - expect_undelegate_failure(&mut builder, u64::one()); - } + let validator_3_add_bid_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_3_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_3_PK.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_3), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_3, + }, + ) + .build(); - builder.run_auction(WEEK_TIMESTAMPS[0], Vec::new()); + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + }, + ) + .build(); - // Check bid and its vesting schedule - { - let bids: Bids = builder.get_bids(); - assert_eq!(bids.len(), 1); + let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + }, + ) + .build(); - let bid_entry = bids.get(&ACCOUNT_1_PK).unwrap(); - let entry = bid_entry.delegators().get(&*DELEGATOR_1).unwrap(); + let post_genesis_requests = vec![ + system_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, + validator_1_fund_request, + validator_2_fund_request, + validator_3_fund_request, + validator_1_add_bid_request, + validator_3_add_bid_request, + delegator_1_validator_1_delegate_request, + delegator_2_validator_1_delegate_request, + ]; - let vesting_schedule = entry.vesting_schedule().unwrap(); + let mut builder = LmdbWasmTestBuilder::default(); - let initial_release = vesting_schedule.initial_release_timestamp_millis(); - assert_eq!(initial_release, EXPECTED_INITIAL_RELEASE_TIMESTAMP_MILLIS); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let locked_amounts = vesting_schedule.locked_amounts().map(|arr| arr.to_vec()); - assert_eq!(locked_amounts, Some(expected_locked_amounts)); + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); } - let mut total_unbonded = 0; - - { - // Attempt full unbond - expect_undelegate_failure(&mut builder, DELEGATOR_1_STAKE); - - // Attempt unbond of released amount - expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE); + builder.advance_eras_by_default_auction_delay(); + + // redelegate funds to validator 3 + // NOTE: previously, this would leave a remaining delegation to the original delegator behind + // with less than that min delegation bid. Under the new logic if the remaining del bid amount + // would be less than the min, it gets converted to a full unbond instead of a partial unbond + let attempted_partial_unbond_redlegate_amount = + U512::from(UNDELEGATE_AMOUNT_1 + DEFAULT_MINIMUM_DELEGATION_AMOUNT); + let actual_delegated_amount = U512::from(DELEGATE_AMOUNT_1); + assert!( + attempted_partial_unbond_redlegate_amount < actual_delegated_amount, + "attempted partial amount should be less than actual delegated amount" + ); + let attempted_remaining_delegation_amount = + actual_delegated_amount - attempted_partial_unbond_redlegate_amount; + assert!( + attempted_remaining_delegation_amount < DEFAULT_MINIMUM_DELEGATION_AMOUNT.into(), + "attempted remainder should be less than minimum in this case" + ); + let delegator_1_redelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_REDELEGATE, + runtime_args! { + ARG_AMOUNT => attempted_partial_unbond_redlegate_amount, + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), + ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_3_PK.clone() + }, + ) + .build(); - total_unbonded += EXPECTED_WEEKLY_RELEASE; + builder + .exec(delegator_1_redelegate_request) + .commit() + .expect_success(); - assert_eq!( - DELEGATOR_1_STAKE - total_unbonded, - EXPECTED_LOCKED_AMOUNTS[0] - ) - } + let bids: Vec = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); - for i in 1..13 { - // Run auction forward by almost a week - builder.run_auction(WEEK_TIMESTAMPS[i] - 1, Vec::new()); + assert_eq!( + bids.len(), + 3, + "with unbonds filtered out there should be 3 bids" + ); + assert!(bids + .validator_bid(&NON_FOUNDER_VALIDATOR_2_PK.clone()) + .is_none()); - // Attempt unbond of 1 mote - expect_undelegate_failure(&mut builder, u64::one()); + // change validator 1 bid public key to validator 2 public key + let change_bid_public_key_request = ExecuteRequestBuilder::standard( + *NON_FOUNDER_VALIDATOR_1_ADDR, + CONTRACT_CHANGE_BID_PUBLIC_KEY, + runtime_args! { + ARG_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_1_PK.clone(), + ARG_NEW_PUBLIC_KEY => NON_FOUNDER_VALIDATOR_2_PK.clone() + }, + ) + .build(); - // Run auction forward by one millisecond - builder.run_auction(WEEK_TIMESTAMPS[i], Vec::new()); + let era_id = builder.get_era(); - // Attempt unbond of more than weekly release - expect_undelegate_failure(&mut builder, EXPECTED_WEEKLY_RELEASE + 1); + builder + .exec(change_bid_public_key_request) + .commit() + .expect_success(); - // Attempt unbond of released amount - expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE); + let bids: Vec = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!( + bids.len(), + 4, + "with unbonds filtered out, there should be 4 bids" + ); + let new_validator_bid = bids + .validator_bid(&NON_FOUNDER_VALIDATOR_2_PK.clone()) + .unwrap_or_else(|| { + panic!( + "should have validator bid {:?}", + NON_FOUNDER_VALIDATOR_2_PK.clone() + ) + }); - total_unbonded += EXPECTED_WEEKLY_RELEASE; + assert_eq!( + builder.get_purse_balance(*new_validator_bid.bonding_purse()), + U512::from(ADD_BID_AMOUNT_1) + ); - assert_eq!( - DELEGATOR_1_STAKE - total_unbonded, - EXPECTED_LOCKED_AMOUNTS[i] + let bridge = bids + .bridge( + &NON_FOUNDER_VALIDATOR_1_PK.clone(), + &NON_FOUNDER_VALIDATOR_2_PK.clone(), + &era_id, ) - } - - { - // Run auction forward by almost a week - builder.run_auction(WEEK_TIMESTAMPS[13] - 1, Vec::new()); + .unwrap(); + assert_eq!( + bridge.old_validator_public_key(), + &NON_FOUNDER_VALIDATOR_1_PK.clone() + ); + assert_eq!( + bridge.new_validator_public_key(), + &NON_FOUNDER_VALIDATOR_2_PK.clone() + ); + assert_eq!(*bridge.era_id(), era_id); + + assert!(bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_1_PK) + .is_none()); + let delegators = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK) + .expect("should have delegators"); + // NOTE: previously in this test the partial redelegate would have been allowed, and thus this + // delegator would have had delegations to the original validator (below min) + // and the redelegated target (the redelegated amount) + // The new logic converted it into a full unbond to avoid the remainder below min being + // left behind. + assert_eq!( + delegators.len(), + 1, + "the remaining delegator should have bridged over" + ); + assert!( + bids.delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .is_none(), + "the redelegated unbond should not have bridged over" + ); - // Attempt unbond of 1 mote - expect_undelegate_failure(&mut builder, u64::one()); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()), + ) + .expect("should have account2 delegation"); + assert_eq!(delegator.staked_amount(), U512::from(DELEGATE_AMOUNT_2)); + + // distribute rewards + let protocol_version = DEFAULT_PROTOCOL_VERSION; + let total_payout = builder.base_round_reward(None, protocol_version); + let mut rewards = BTreeMap::new(); + rewards.insert(NON_FOUNDER_VALIDATOR_1_PK.clone(), vec![total_payout]); + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( + *SYSTEM_ADDR, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, + runtime_args! { + ARG_ENTRY_POINT => METHOD_DISTRIBUTE, + ARG_REWARDS_MAP => rewards + }, + ) + .build(); - // Run auction forward by one millisecond - builder.run_auction(WEEK_TIMESTAMPS[13], Vec::new()); + builder.exec(distribute_request).commit().expect_success(); + let bids: Vec = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!( + bids.len(), + 4, + "excluding unbonds there should now be 4 bids" + ); - // Attempt unbond of released amount + remainder - expect_undelegate_success(&mut builder, EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER); + assert!( + bids.delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .is_none(), + "should not have undelegated delegator" + ); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()), + ) + .expect("should have account2 delegation"); + assert!(delegator.staked_amount() > U512::from(DELEGATE_AMOUNT_2)); + let expected_reward = 12; + assert_eq!( + delegator.staked_amount(), + U512::from(DELEGATE_AMOUNT_2 + expected_reward) + ); - total_unbonded += EXPECTED_WEEKLY_RELEASE + EXPECTED_REMAINDER; + // advance eras until unbonds are processed + builder.advance_eras_by(DEFAULT_UNBONDING_DELAY + 1); - assert_eq!( - DELEGATOR_1_STAKE - total_unbonded, - EXPECTED_LOCKED_AMOUNTS[13] + let bids = builder.get_bids(); + assert_eq!(bids.len(), 5, "with unbonds filtered there should be 5"); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_3_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), ) - } + .expect("should have account1 delegation"); - assert_eq!(DELEGATOR_1_STAKE, total_unbonded); + assert_eq!( + delegator.staked_amount(), + U512::from(DELEGATE_AMOUNT_1 + expected_reward), + "the fully redelegated amount plus the earned rewards" + ); } #[ignore] #[test] -fn should_reset_delegators_stake_after_slashing() { +fn should_handle_excessively_long_bridge_record_chains() { + let mut validators = Vec::new(); + for index in 0..21 { + let secret_key = + SecretKey::ed25519_from_bytes([10 + index; SecretKey::ED25519_LENGTH]).unwrap(); + let pubkey = PublicKey::from(&secret_key); + let addr = AccountHash::from(&pubkey); + validators.push((pubkey, addr)); + } + let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -2755,28 +5579,6 @@ fn should_reset_delegators_stake_after_slashing() { ) .build(); - let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_1_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_1), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(), - ARG_DELEGATOR => BID_ACCOUNT_1_PK.clone(), - }, - ) - .build(); - - let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard( - *BID_ACCOUNT_2_ADDR, - CONTRACT_DELEGATE, - runtime_args! { - ARG_AMOUNT => U512::from(DELEGATE_AMOUNT_2), - ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone(), - ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), - }, - ) - .build(); - let delegator_2_validator_2_delegate_request = ExecuteRequestBuilder::standard( *BID_ACCOUNT_2_ADDR, CONTRACT_DELEGATE, @@ -2788,7 +5590,7 @@ fn should_reset_delegators_stake_after_slashing() { ) .build(); - let post_genesis_requests = vec![ + let mut post_genesis_requests = vec![ system_fund_request, delegator_1_fund_request, delegator_2_fund_request, @@ -2797,165 +5599,365 @@ fn should_reset_delegators_stake_after_slashing() { validator_1_add_bid_request, validator_2_add_bid_request, delegator_1_validator_1_delegate_request, - delegator_1_validator_2_delegate_request, - delegator_2_validator_1_delegate_request, delegator_2_validator_2_delegate_request, ]; - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - for request in post_genesis_requests { - builder.exec(request).expect_success().commit(); + // add extra validators fund requests + for (_pubkey, addr) in validators.iter() { + let validator_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *addr, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + post_genesis_requests.push(validator_fund_request) } - let auction_hash = builder.get_auction_contract_hash(); + let mut builder = LmdbWasmTestBuilder::default(); - // Check bids before slashing + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let bids_1: Bids = builder.get_bids(); + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); + } - let validator_1_delegator_stakes_1: U512 = bids_1 - .get(&NON_FOUNDER_VALIDATOR_1_PK) - .expect("should have bids") - .delegators() - .iter() - .map(|(_, delegator)| *delegator.staked_amount()) - .sum(); - assert!(validator_1_delegator_stakes_1 > U512::zero()); + builder.advance_eras_by_default_auction_delay(); - let validator_2_delegator_stakes_1: U512 = bids_1 - .get(&NON_FOUNDER_VALIDATOR_2_PK) - .expect("should have bids") - .delegators() - .iter() - .map(|(_, delegator)| *delegator.staked_amount()) - .sum(); - assert!(validator_2_delegator_stakes_1 > U512::zero()); + // verify delegator 2 main purse balance after delegation + let delegator_2_main_purse = builder + .get_entity_by_account_hash(*BID_ACCOUNT_2_ADDR) + .expect("should have default account") + .main_purse(); + assert_eq!( + builder.get_purse_balance(delegator_2_main_purse), + U512::from(TRANSFER_AMOUNT - DELEGATE_AMOUNT_2) + ); - let slash_request_1 = ExecuteRequestBuilder::contract_call_by_hash( - *SYSTEM_ADDR, - auction_hash, - auction::METHOD_SLASH, + // redelegate funds to validator 1 + let delegator_2_redelegate_request = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_2_ADDR, + CONTRACT_REDELEGATE, runtime_args! { - auction::ARG_VALIDATOR_PUBLIC_KEYS => vec![ - NON_FOUNDER_VALIDATOR_2_PK.clone(), - ] + ARG_AMOUNT => U512::from(UNDELEGATE_AMOUNT_2), + ARG_VALIDATOR => NON_FOUNDER_VALIDATOR_2_PK.clone(), + ARG_DELEGATOR => BID_ACCOUNT_2_PK.clone(), + ARG_NEW_VALIDATOR => NON_FOUNDER_VALIDATOR_1_PK.clone() }, ) .build(); - builder.exec(slash_request_1).expect_success().commit(); + builder + .exec(delegator_2_redelegate_request) + .commit() + .expect_success(); - // Compare bids after slashing validator 2 - let bids_2: Bids = builder.get_bids(); - assert_ne!(bids_1, bids_2); + // change validator bid public key + let mut current_bid_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone(); + let mut current_bid_addr = *NON_FOUNDER_VALIDATOR_1_ADDR; + for (pubkey, addr) in validators.iter() { + let change_bid_public_key_request = ExecuteRequestBuilder::standard( + current_bid_addr, + CONTRACT_CHANGE_BID_PUBLIC_KEY, + runtime_args! { + ARG_PUBLIC_KEY => current_bid_public_key.clone(), + ARG_NEW_PUBLIC_KEY => pubkey.clone() + }, + ) + .build(); - let validator_1_bid_2 = bids_2 - .get(&NON_FOUNDER_VALIDATOR_1_PK) - .expect("should have bids"); - let validator_1_delegator_stakes_2: U512 = validator_1_bid_2 - .delegators() - .iter() - .map(|(_, delegator)| *delegator.staked_amount()) - .sum(); - assert!(validator_1_delegator_stakes_2 > U512::zero()); + builder + .exec(change_bid_public_key_request) + .commit() + .expect_success(); - let validator_2_bid_2 = bids_2 - .get(&NON_FOUNDER_VALIDATOR_2_PK) - .expect("should have bids"); - assert!(validator_2_bid_2.inactive()); + current_bid_public_key = pubkey.clone(); + current_bid_addr = *addr; + } - let validator_2_delegator_stakes_2: U512 = validator_2_bid_2 - .delegators() - .iter() - .map(|(_, delegator)| *delegator.staked_amount()) - .sum(); - assert!(validator_2_delegator_stakes_2 < validator_2_delegator_stakes_1); - assert_eq!(validator_2_delegator_stakes_2, U512::zero()); + let era_id = builder.get_era(); - // Validator 1 total delegated stake did not change + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 25); + let new_validator_bid = bids.validator_bid(¤t_bid_public_key).unwrap(); assert_eq!( - validator_1_delegator_stakes_2, - validator_1_delegator_stakes_1 + builder.get_purse_balance(*new_validator_bid.bonding_purse()), + U512::from(ADD_BID_AMOUNT_1) ); - let slash_request_2 = ExecuteRequestBuilder::contract_call_by_hash( + // check if bridge records exist + let mut old_public_key = NON_FOUNDER_VALIDATOR_1_PK.clone(); + for (pubkey, _addr) in validators.iter() { + let bridge = bids + .bridge(&old_public_key.clone(), &pubkey.clone(), &era_id) + .unwrap(); + assert_eq!(bridge.old_validator_public_key(), &old_public_key.clone()); + assert_eq!(bridge.new_validator_public_key(), &pubkey.clone()); + assert_eq!(*bridge.era_id(), era_id); + + old_public_key = pubkey.clone(); + } + + // verify delegator bids for current validator bid + let current_public_key = old_public_key; + let delegators = bids + .delegators_by_validator_public_key(¤t_public_key) + .expect("should have delegators"); + assert_eq!(delegators.len(), 1); + let delegator = bids + .delegator_by_kind( + ¤t_public_key, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .expect("should have account1 delegation"); + assert_eq!(delegator.staked_amount(), U512::from(DELEGATE_AMOUNT_1)); + let delegators = bids + .delegators_by_validator_public_key(&NON_FOUNDER_VALIDATOR_2_PK) + .expect("should have delegators"); + assert_eq!(delegators.len(), 1); + let delegator = bids + .delegator_by_kind( + &NON_FOUNDER_VALIDATOR_2_PK, + &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()), + ) + .expect("should have account2 delegation"); + assert_eq!( + delegator.staked_amount(), + U512::from(DELEGATE_AMOUNT_2 - UNDELEGATE_AMOUNT_2) + ); + + // distribute rewards + let protocol_version = DEFAULT_PROTOCOL_VERSION; + let total_payout = builder.base_round_reward(None, protocol_version); + let mut rewards = BTreeMap::new(); + rewards.insert(NON_FOUNDER_VALIDATOR_1_PK.clone(), vec![total_payout]); + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - auction_hash, - auction::METHOD_SLASH, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { - auction::ARG_VALIDATOR_PUBLIC_KEYS => vec![ - NON_FOUNDER_VALIDATOR_1_PK.clone(), - ] + ARG_ENTRY_POINT => METHOD_DISTRIBUTE, + ARG_REWARDS_MAP => rewards }, ) .build(); - builder.exec(slash_request_2).expect_success().commit(); + builder.exec(distribute_request).commit().expect_success(); - // Compare bids after slashing validator 2 - let bids_3: Bids = builder.get_bids(); - assert_ne!(bids_3, bids_2); - assert_ne!(bids_3, bids_1); + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 25); - let validator_1 = bids_3 - .get(&NON_FOUNDER_VALIDATOR_1_PK) - .expect("should have bids"); - let validator_1_delegator_stakes_3: U512 = validator_1 - .delegators() - .iter() - .map(|(_, delegator)| *delegator.staked_amount()) - .sum(); + let delegator = bids + .delegator_by_kind( + ¤t_public_key, + &DelegatorKind::PublicKey(BID_ACCOUNT_1_PK.clone()), + ) + .expect("should have account1 delegation"); + assert_eq!(delegator.staked_amount(), U512::from(DELEGATE_AMOUNT_1)); - assert_ne!( - validator_1_delegator_stakes_3, - validator_1_delegator_stakes_1 - ); - assert_ne!( - validator_1_delegator_stakes_3, - validator_1_delegator_stakes_2 + // advance eras until unbonds are processed + builder.advance_eras_by(DEFAULT_UNBONDING_DELAY + 1); + + let bids: Vec<_> = builder + .get_bids() + .into_iter() + .filter(|bid| !bid.is_unbond()) + .collect(); + assert_eq!(bids.len(), 25); + let delegator = bids.delegator_by_kind( + ¤t_public_key, + &DelegatorKind::PublicKey(BID_ACCOUNT_2_PK.clone()), ); + assert!(delegator.is_none()); - // Validator 1 total delegated stake is set to 0 - assert_eq!(validator_1_delegator_stakes_3, U512::zero()); + // verify that unbond was returned to main purse instead of being redelegated + assert_eq!( + builder.get_purse_balance(delegator_2_main_purse), + U512::from(TRANSFER_AMOUNT - DELEGATE_AMOUNT_2 + UNDELEGATE_AMOUNT_2) + ); } -#[should_panic(expected = "InvalidDelegatedAmount")] #[ignore] #[test] -fn should_validate_genesis_delegators_bond_amount() { +fn credits_are_considered_when_determining_validators() { + // In this test we have 2 genesis nodes that are validators: Node 1 and Node 2; 1 has less stake + // than 2. We have only 2 validator slots so later we'll bid in another node with a stake + // slightly higher than the one of node 1. + // Under normal circumstances, since node 3 put in a higher bid, it should win the slot and kick + // out node 1. But since we add some credits for node 1 (because it was a validator and + // proposed blocks) it should maintain its slot. let accounts = { let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); let account_1 = GenesisAccount::account( ACCOUNT_1_PK.clone(), - Motes::new(ACCOUNT_1_BALANCE.into()), - Some(GenesisValidator::new(Motes::new(ACCOUNT_1_BOND.into()), 80)), + Motes::new(ACCOUNT_1_BALANCE), + Some(GenesisValidator::new( + Motes::new(ACCOUNT_1_BOND), + DelegationRate::zero(), + )), ); let account_2 = GenesisAccount::account( ACCOUNT_2_PK.clone(), - Motes::new(ACCOUNT_2_BALANCE.into()), + Motes::new(ACCOUNT_2_BALANCE), Some(GenesisValidator::new( - Motes::new(ACCOUNT_2_BOND.into()), + Motes::new(ACCOUNT_2_BOND), DelegationRate::zero(), )), ); - let delegator_1 = GenesisAccount::delegator( - ACCOUNT_1_PK.clone(), - DELEGATOR_1.clone(), - Motes::new(DELEGATOR_1_BALANCE.into()), - Motes::new(U512::zero()), + let account_3 = GenesisAccount::account( + BID_ACCOUNT_1_PK.clone(), + Motes::new(BID_ACCOUNT_1_BALANCE), + None, ); tmp.push(account_1); tmp.push(account_2); - tmp.push(delegator_1); + tmp.push(account_3); tmp }; - let run_genesis_request = utils::create_run_genesis_request(accounts); + let mut builder = LmdbWasmTestBuilder::default(); + let config = GenesisConfigBuilder::default() + .with_accounts(accounts) + .with_validator_slots(2) // set up only 2 validators + .with_auction_delay(DEFAULT_AUCTION_DELAY) + .with_locked_funds_period_millis(DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS) + .with_round_seigniorage_rate(DEFAULT_ROUND_SEIGNIORAGE_RATE) + .with_unbonding_delay(DEFAULT_UNBONDING_DELAY) + .with_genesis_timestamp_millis(DEFAULT_GENESIS_TIMESTAMP_MILLIS) + .build(); + let run_genesis_request = GenesisRequest::new( + DEFAULT_GENESIS_CONFIG_HASH, + DEFAULT_PROTOCOL_VERSION, + config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), + ); + builder.run_genesis(run_genesis_request); + + let genesis_validator_weights = builder + .get_validator_weights(INITIAL_ERA_ID) + .expect("should have genesis validators for initial era"); + let auction_delay = builder.get_auction_delay(); + + // new_era is the first era in the future where new era validator weights will be calculated + let new_era = INITIAL_ERA_ID + auction_delay + 1; + assert!(builder.get_validator_weights(new_era).is_none()); + assert_eq!( + builder.get_validator_weights(new_era - 1).unwrap(), + builder.get_validator_weights(INITIAL_ERA_ID).unwrap() + ); + // in the genesis era both node 1 and 2 are validators. + assert_eq!( + genesis_validator_weights + .keys() + .cloned() + .collect::>(), + BTreeSet::from_iter(vec![ACCOUNT_1_PK.clone(), ACCOUNT_2_PK.clone()]) + ); + + // bid in the 3rd node with an amount just a bit more than node 1. + let exec_request_1 = ExecuteRequestBuilder::standard( + *BID_ACCOUNT_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => BID_ACCOUNT_1_PK.clone(), + ARG_AMOUNT => U512::from(ACCOUNT_1_BOND + 1), + ARG_DELEGATION_RATE => ADD_BID_DELEGATION_RATE_1, + }, + ) + .build(); + + builder.exec(exec_request_1).expect_success().commit(); + + // Add a credit for node 1 artificially (assume it has proposed a block with a transaction and + // received credit). + let credit_amount = U512::from(2001); + let add_credit = HandleFeeMode::credit( + Box::new(ACCOUNT_1_PK.clone()), + credit_amount, + INITIAL_ERA_ID, + ); + builder.handle_fee( + None, + DEFAULT_PROTOCOL_VERSION, + TransactionHash::from_raw([1; 32]), + add_credit, + ); + + // run auction and compute validators for new era + builder.run_auction( + DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + Vec::new(), + ); + + let new_validator_weights: ValidatorWeights = builder + .get_validator_weights(new_era) + .expect("should have first era validator weights"); + + // We have only 2 slots. Node 2 should be in the set because it's the highest bidder. Node 1 + // should keep its validator slot even though it's bid is now lower than node 3. This should + // have happened because there was a credit for node 1 added. + assert_eq!( + new_validator_weights.get(&ACCOUNT_2_PK), + Some(&U512::from(ACCOUNT_2_BOND)) + ); + assert!(!new_validator_weights.contains_key(&BID_ACCOUNT_1_PK)); + let expected_amount = credit_amount.saturating_add(U512::from(ACCOUNT_1_BOND)); + assert_eq!( + new_validator_weights.get(&ACCOUNT_1_PK), + Some(&expected_amount) + ); +} + +#[ignore] +#[test] +fn should_mark_bids_with_less_than_minimum_bid_amount_as_inactive_via_upgrade() { + const VALIDATOR_MIN_BID_FIXTURE: &str = "validator_minimum_bid"; + + const FIXTURE_MIN_BID_AMOUNT: u64 = 10_000 * 1_000_000_000; + + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(VALIDATOR_MIN_BID_FIXTURE); - let mut builder = InMemoryWasmTestBuilder::default(); + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = ProtocolVersion::from_parts( + current_protocol_version.value().major, + current_protocol_version.value().minor + 1, + 0, + ); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(EraId::new(1)) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(1200u64) + .with_validator_minimum_bid_amount(FIXTURE_MIN_BID_AMOUNT + 1) + .build() + }; + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let public_key = { + let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + }; + let bid = builder + .get_validator_bid(public_key) + .expect("must have the validator bid record"); - builder.run_genesis(&run_genesis_request); + assert!(bid.inactive()) } diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs index e90d170863..223835b3ec 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/distribute.rs @@ -1,67 +1,65 @@ use std::collections::BTreeMap; use num_rational::Ratio; +use num_traits::{CheckedMul, CheckedSub}; use once_cell::sync::Lazy; +use crate::test::system_contracts::auction::{ + get_delegator_staked_amount, get_era_info, get_validator_bid, +}; use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, UpgradeRequestBuilder, - DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, - DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_RUN_GENESIS_REQUEST, - SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_MINIMUM_DELEGATION_AMOUNT, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, PRODUCTION_ROUND_SEIGNIORAGE_RATE, + SYSTEM_ADDR, TIMESTAMP_MILLIS_INCREMENT, }; +use casper_storage::data_access_layer::AuctionMethod; use casper_types::{ self, account::AccountHash, runtime_args, system::auction::{ - self, Bid, Bids, DelegationRate, Delegator, SeigniorageAllocation, ARG_AMOUNT, - ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_PUBLIC_KEY, ARG_REWARD_FACTORS, ARG_VALIDATOR, - BLOCK_REWARD, DELEGATION_RATE_DENOMINATOR, METHOD_DISTRIBUTE, + self, BidsExt, DelegationRate, DelegatorBid, DelegatorKind, SeigniorageAllocation, + SeigniorageRecipientsSnapshotV2, UnbondKind, ARG_AMOUNT, ARG_DELEGATION_RATE, + ARG_DELEGATOR, ARG_PUBLIC_KEY, ARG_REWARDS_MAP, ARG_VALIDATOR, DELEGATION_RATE_DENOMINATOR, + METHOD_DISTRIBUTE, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, }, - EraId, Key, ProtocolVersion, PublicKey, RuntimeArgs, SecretKey, U512, + EntityAddr, EraId, ProtocolVersion, PublicKey, SecretKey, Timestamp, + DEFAULT_MINIMUM_BID_AMOUNT, U512, }; const ARG_ENTRY_POINT: &str = "entry_point"; const ARG_TARGET: &str = "target"; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; -const CONTRACT_AUCTION_BIDS: &str = "auction_bids.wasm"; const CONTRACT_ADD_BID: &str = "add_bid.wasm"; const CONTRACT_DELEGATE: &str = "delegate.wasm"; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; static VALIDATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_2: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([5; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_3: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_1: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([204; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_2: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([206; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static DELEGATOR_3: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([208; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([208; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); @@ -72,35 +70,30 @@ static DELEGATOR_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DE static DELEGATOR_3_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_3)); static GENESIS_ROUND_SEIGNIORAGE_RATE: Lazy> = Lazy::new(|| { Ratio::new( - U512::from(*DEFAULT_ROUND_SEIGNIORAGE_RATE.numer()), - U512::from(*DEFAULT_ROUND_SEIGNIORAGE_RATE.denom()), + U512::from(*PRODUCTION_ROUND_SEIGNIORAGE_RATE.numer()), + U512::from(*PRODUCTION_ROUND_SEIGNIORAGE_RATE.denom()), ) }); -fn get_validator_bid(builder: &mut InMemoryWasmTestBuilder, validator: PublicKey) -> Option { - let mut bids: Bids = builder.get_bids(); - bids.remove(&validator) -} - fn get_delegator_bid( - builder: &mut InMemoryWasmTestBuilder, + builder: &mut LmdbWasmTestBuilder, validator: PublicKey, delegator: PublicKey, -) -> Option { - let validator_bid = get_validator_bid(builder, validator)?; - validator_bid.delegators().get(&delegator).cloned() +) -> Option { + let bids = builder.get_bids(); + bids.delegator_by_kind(&validator, &DelegatorKind::PublicKey(delegator.clone())) } fn withdraw_bid( - builder: &mut InMemoryWasmTestBuilder, + builder: &mut LmdbWasmTestBuilder, sender: AccountHash, validator: PublicKey, amount: U512, ) { let auction = builder.get_auction_contract_hash(); let withdraw_bid_args = runtime_args! { - auction::ARG_PUBLIC_KEY => validator, - auction::ARG_AMOUNT => amount, + ARG_PUBLIC_KEY => validator, + ARG_AMOUNT => amount, }; let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash( sender, @@ -113,7 +106,7 @@ fn withdraw_bid( } fn undelegate( - builder: &mut InMemoryWasmTestBuilder, + builder: &mut LmdbWasmTestBuilder, sender: AccountHash, delegator: PublicKey, validator: PublicKey, @@ -121,9 +114,9 @@ fn undelegate( ) { let auction = builder.get_auction_contract_hash(); let undelegate_args = runtime_args! { - auction::ARG_DELEGATOR => delegator, - auction::ARG_VALIDATOR => validator, - auction::ARG_AMOUNT => amount, + ARG_DELEGATOR => delegator, + ARG_VALIDATOR => validator, + ARG_AMOUNT => amount, }; let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash( sender, @@ -135,38 +128,17 @@ fn undelegate( builder.exec(undelegate_request).expect_success().commit(); } -fn get_delegator_staked_amount( - builder: &mut InMemoryWasmTestBuilder, - validator: PublicKey, - delegator: PublicKey, -) -> U512 { - let bids: Bids = builder.get_bids(); - let validator_bid = bids.get(&validator).expect("should have validator entry"); - - let delegator_entry = validator_bid - .delegators() - .get(&delegator) - .unwrap_or_else(|| { - panic!( - "should have delegator entry delegator={:?} bid={:?}", - delegator, validator_bid - ) - }); - *delegator_entry.staked_amount() -} - #[ignore] #[test] fn should_distribute_delegation_rate_zero() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_2_STAKE: u64 = 1_000_000; + const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const DELEGATOR_2_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE; + const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE; const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; - let participant_portion = Ratio::new(U512::one(), U512::from(3)); - let remainders = Ratio::from(U512::zero()); - let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -250,83 +222,125 @@ fn should_distribute_delegation_rate_zero() { delegator_2_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); + let total_payout = builder.base_round_reward(None, protocol_version); let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_balance = { + let delegators_share = { + let commission_rate = Ratio::new( + U512::from(VALIDATOR_1_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE)); + let delegator_reward = expected_total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + }; + + let delegator_1_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_1_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .expect("must get delegator 1 payout") + }; + + let delegator_2_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_2_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .expect("must get delegator 2 payout") + }; + + let validator_1_expected_payout = { + let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout; + let validator_share = expected_total_reward - Ratio::from(total_delegator_payout); + validator_share.to_integer() + }; + + let validator_1_actual_payout = { let vaildator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - vaildator_stake_before }; - let expected_validator_1_balance_ratio = - expected_total_reward * participant_portion + remainders; - let expected_validator_1_balance = expected_validator_1_balance_ratio.to_integer(); assert_eq!( - validator_1_balance, expected_validator_1_balance, + validator_1_actual_payout, validator_1_expected_payout, "rhs {}", - expected_validator_1_balance_ratio + validator_1_expected_payout ); - let delegator_1_balance = { + let delegator_1_actual_payout = { let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); delegator_stake_after - delegator_stake_before }; - let expected_delegator_1_balance = (expected_total_reward * participant_portion).to_integer(); - assert_eq!(delegator_1_balance, expected_delegator_1_balance); + assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout); - let delegator_2_balance = { + let delegator_2_actual_payout = { let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); delegator_stake_after - delegator_stake_before }; - let expected_delegator_2_balance = (expected_total_reward * participant_portion).to_integer(); - assert_eq!(delegator_2_balance, expected_delegator_2_balance); - - let total_payout = validator_1_balance + delegator_1_balance + delegator_2_balance; - assert_eq!(total_payout, expected_total_reward_integer); + assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout); // Subsequently, there should be no more rewards let validator_1_balance = { @@ -334,11 +348,9 @@ fn should_distribute_delegation_rate_zero() { &mut builder, *VALIDATOR_1_ADDR, VALIDATOR_1.clone(), - validator_1_balance + U512::from(VALIDATOR_1_STAKE), + validator_1_actual_payout + U512::from(VALIDATOR_1_STAKE), ); - let validator_1_bid = get_validator_bid(&mut builder, VALIDATOR_1.clone()).unwrap(); - assert!(validator_1_bid.inactive()); - assert!(validator_1_bid.staked_amount().is_zero()); + assert!(get_validator_bid(&mut builder, VALIDATOR_1.clone()).is_none()); U512::zero() }; assert_eq!(validator_1_balance, U512::zero()); @@ -355,56 +367,44 @@ fn should_distribute_delegation_rate_zero() { let delegator_2_balance = { assert!( get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()).is_none(), - "validator withdrawing full stake also removes delegator 2 reinvested funds" + "validator withdrawing full stake also removes delegator 1 reinvested funds" ); U512::zero() }; assert!(delegator_2_balance.is_zero()); - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; + let era_info = get_era_info(&mut builder); assert!(matches!( era_info.select(VALIDATOR_1.clone()).next(), Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_balance + if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout )); assert!(matches!( era_info.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_balance + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key) , amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout )); assert!(matches!( era_info.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_1_balance + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout )); } #[ignore] #[test] fn should_withdraw_bids_after_distribute() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_2_STAKE: u64 = 1_000_000; + const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const DELEGATOR_2_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE; + const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE; const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; - let participant_portion = Ratio::new(U512::one(), U512::from(3)); - let remainders = Ratio::from(U512::zero()); - let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -488,90 +488,138 @@ fn should_withdraw_bids_after_distribute() { delegator_2_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let mut builder = LmdbWasmTestBuilder::default(); - let mut builder = InMemoryWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let protocol_version = DEFAULT_PROTOCOL_VERSION; + let total_payout = builder.base_round_reward(None, protocol_version); // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; + let initial_supply = builder.total_supply(protocol_version, None); + let rate = builder.round_seigniorage_rate(None, protocol_version); + + let expected_total_reward = rate * initial_supply; let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); + for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(protocol_version) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_balance = { - let vaildator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_1_actual_payout = { + let validator_stake_before = U512::from(VALIDATOR_1_STAKE); + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); - validator_stake_after - vaildator_stake_before + validator_stake_after - validator_stake_before + }; + + let delegators_share = { + let commission_rate = Ratio::new( + U512::from(VALIDATOR_1_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE)); + let delegator_reward = expected_total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + }; + + let delegator_1_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_1_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + reward_multiplier + .checked_mul(&delegators_share) + .map(|ratio| ratio.to_integer()) + .unwrap() + }; + + let delegator_2_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_2_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + reward_multiplier + .checked_mul(&delegators_share) + .map(|ratio| ratio.to_integer()) + .unwrap() }; - let expected_validator_1_balance_ratio = - expected_total_reward * participant_portion + remainders; - let expected_validator_1_balance = expected_validator_1_balance_ratio.to_integer(); + let validator_1_expected_payout = { + let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout; + let validator_share = expected_total_reward - Ratio::from(total_delegator_payout); + validator_share.to_integer() + }; assert_eq!( - validator_1_balance, expected_validator_1_balance, + validator_1_actual_payout, validator_1_expected_payout, "rhs {}", - expected_validator_1_balance_ratio + validator_1_expected_payout ); - let delegator_1_balance = { + let delegator_1_actual_payout = { let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); delegator_stake_after - delegator_stake_before }; - let expected_delegator_1_balance = (expected_total_reward * participant_portion).to_integer(); - assert_eq!(delegator_1_balance, expected_delegator_1_balance); - let delegator_2_balance = { + assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout); + + let delegator_2_actual_payout = { let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); delegator_stake_after - delegator_stake_before }; - let expected_delegator_2_balance = (expected_total_reward * participant_portion).to_integer(); - assert_eq!(delegator_2_balance, expected_delegator_2_balance); - let total_payout = validator_1_balance + delegator_1_balance + delegator_2_balance; - assert_eq!(total_payout, expected_total_reward_integer); + assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout); let delegator_1_unstaked_amount = { assert!( get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()).is_some(), "delegator 1 should have a stake" ); - let undelegate_amount = U512::from(DELEGATOR_1_STAKE) + delegator_1_balance; + let undelegate_amount = U512::from(DELEGATOR_1_STAKE) + delegator_1_actual_payout; + undelegate( &mut builder, *DELEGATOR_1_ADDR, @@ -583,7 +631,7 @@ fn should_withdraw_bids_after_distribute() { get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()).is_none(), "delegator 1 did not unstake full expected amount" ); - delegator_1_balance + delegator_1_actual_payout }; assert!( !delegator_1_unstaked_amount.is_zero(), @@ -595,7 +643,7 @@ fn should_withdraw_bids_after_distribute() { get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()).is_some(), "delegator 2 should have a stake" ); - let undelegate_amount = U512::from(DELEGATOR_2_STAKE) + delegator_2_balance; + let undelegate_amount = U512::from(DELEGATOR_2_STAKE) + delegator_2_actual_payout; undelegate( &mut builder, *DELEGATOR_2_ADDR, @@ -607,7 +655,7 @@ fn should_withdraw_bids_after_distribute() { get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()).is_none(), "delegator 2 did not unstake full expected amount" ); - delegator_2_balance + delegator_2_actual_payout }; assert!( !delegator_2_unstaked_amount.is_zero(), @@ -619,7 +667,7 @@ fn should_withdraw_bids_after_distribute() { get_validator_bid(&mut builder, VALIDATOR_1.clone()).is_some(), "validator 1 should have a stake" ); - let withdraw_bid_amount = validator_1_balance + U512::from(VALIDATOR_1_STAKE); + let withdraw_bid_amount = validator_1_actual_payout + U512::from(VALIDATOR_1_STAKE); withdraw_bid( &mut builder, *VALIDATOR_1_ADDR, @@ -627,58 +675,44 @@ fn should_withdraw_bids_after_distribute() { withdraw_bid_amount, ); - let bid = get_validator_bid(&mut builder, VALIDATOR_1.clone()).unwrap(); - assert!(bid.inactive()); - assert!(bid.staked_amount().is_zero()); + assert!(get_validator_bid(&mut builder, VALIDATOR_1.clone()).is_none()); withdraw_bid_amount }; assert!(!validator_1_balance.is_zero()); - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; + let era_info = get_era_info(&mut builder); assert!(matches!( era_info.select(VALIDATOR_1.clone()).next(), Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_balance + if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout )); assert!(matches!( era_info.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_balance + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout )); assert!(matches!( era_info.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_1_balance + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_1_expected_payout )); } #[ignore] -#[test] +#[allow(unused)] +// #[test] fn should_distribute_rewards_after_restaking_delegated_funds() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_2_STAKE: u64 = 1_000_000; + const VALIDATOR_1_STAKE: u64 = 7_000_000_000_000_000; + const DELEGATOR_1_STAKE: u64 = 5_000_000_000_000_000; + const DELEGATOR_2_STAKE: u64 = 5_000_000_000_000_000; + const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE; const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; - let participant_portion = Ratio::new(U512::one(), U512::from(3)); - let remainders = Ratio::from(U512::zero()); - let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -762,300 +796,247 @@ fn should_distribute_rewards_after_restaking_delegated_funds() { delegator_2_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let mut builder = LmdbWasmTestBuilder::default(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward_1 = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; - let expected_total_reward_1_integer = expected_total_reward_1.to_integer(); - - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); - } - - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } + let initial_supply = builder.total_supply(protocol_version, None); + let initial_rate = builder.round_seigniorage_rate(None, protocol_version); + let initial_round_reward = builder.base_round_reward(None, protocol_version); - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; - - let distribute_request_1 = ExecuteRequestBuilder::standard( - *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, - runtime_args! { - ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors.clone(), - }, - ) - .build(); - - builder.exec(distribute_request_1).commit().expect_success(); - - let validator_1_staked_amount_1 = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - let delegator_1_staked_amount_1 = - get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - let delegator_2_staked_amount_1 = - get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); - - let validator_1_updated_stake_1 = { - let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = validator_1_staked_amount_1; - - validator_stake_after - validator_stake_before - }; - - let expected_validator_1_balance_ratio_1 = - expected_total_reward_1 * participant_portion + remainders; - let expected_validator_1_payout_1 = expected_validator_1_balance_ratio_1.to_integer(); + let initial_expected_reward_rate = initial_rate * initial_supply; assert_eq!( - validator_1_updated_stake_1, expected_validator_1_payout_1, - "rhs {}", - expected_validator_1_balance_ratio_1 + initial_round_reward, + initial_expected_reward_rate.to_integer() ); - let delegator_1_updated_stake_1 = { - let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); - let delegator_stake_after = delegator_1_staked_amount_1; - - delegator_stake_after - delegator_stake_before - }; - let expected_delegator_1_payout_1 = - (expected_total_reward_1 * participant_portion).to_integer(); - assert_eq!(delegator_1_updated_stake_1, expected_delegator_1_payout_1); - - let delegator_2_updated_stake_1 = { - let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); - let delegator_stake_after = delegator_2_staked_amount_1; - delegator_stake_after - delegator_stake_before - }; - let expected_delegator_2_payout_1 = - (expected_total_reward_1 * participant_portion).to_integer(); - assert_eq!(delegator_2_updated_stake_1, expected_delegator_2_payout_1); - - let total_payout_1 = - validator_1_updated_stake_1 + delegator_1_updated_stake_1 + delegator_2_updated_stake_1; - assert_eq!(total_payout_1, expected_total_reward_1_integer); - - let era_info_1 = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; - - assert!(matches!( - era_info_1.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout_1 - )); - - assert!(matches!( - era_info_1.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_payout_1 - )); - - assert!(matches!( - era_info_1.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_2_payout_1 - )); - - // Next round of rewards - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for request in post_genesis_requests { + builder.exec(request).commit().expect_success(); } - let total_supply_2 = builder.total_supply(None); - assert!(total_supply_2 > initial_supply); - - let distribute_request_2 = ExecuteRequestBuilder::standard( - *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, - runtime_args! { - ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors, - }, - ) - .build(); - - builder.exec(distribute_request_2).commit().expect_success(); - - let expected_total_reward_2 = *GENESIS_ROUND_SEIGNIORAGE_RATE * total_supply_2; - - let expected_total_reward_2_integer = expected_total_reward_2.to_integer(); - - let validator_1_staked_amount_2 = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - let delegator_1_staked_amount_2 = - get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - let delegator_2_staked_amount_2 = - get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); - - let validator_1_updated_stake_2 = { - let validator_stake_before = validator_1_staked_amount_1; - let validator_stake_after = validator_1_staked_amount_2; - validator_stake_after - validator_stake_before - }; - - let delegator_1_updated_stake_2 = { - let delegator_stake_before = delegator_1_staked_amount_1; - let delegator_stake_after = delegator_1_staked_amount_2; - - delegator_stake_after - delegator_stake_before - }; - let expected_delegator_1_payout_2 = - (expected_total_reward_2 * participant_portion).to_integer(); - assert_eq!(delegator_1_updated_stake_2, expected_delegator_1_payout_2); - - let delegator_2_updated_stake_2 = { - let delegator_stake_before = delegator_2_staked_amount_1; - let delegator_stake_after = delegator_2_staked_amount_2; - delegator_stake_after - delegator_stake_before - }; - let expected_delegator_2_payout_2 = - (expected_total_reward_2 * participant_portion).to_integer(); - assert_eq!(delegator_2_updated_stake_2, expected_delegator_2_payout_2); - - // Ensure that paying out next set of rewards gives higher payouts than previous time. - assert!(validator_1_updated_stake_2 > validator_1_updated_stake_1); - assert!(delegator_1_updated_stake_2 > delegator_1_updated_stake_1); - assert!(delegator_2_updated_stake_2 > delegator_2_updated_stake_1); - - let total_payout_2 = - validator_1_updated_stake_2 + delegator_1_updated_stake_2 + delegator_2_updated_stake_2; - assert_eq!(total_payout_2, expected_total_reward_2_integer); - assert!(total_payout_2 > total_payout_1); - - let era_info_2 = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; - assert_ne!(era_info_2, era_info_1); - - let expected_validator_1_balance_ratio_2 = - expected_total_reward_2 * participant_portion + remainders; - - assert!(expected_validator_1_balance_ratio_2 > expected_validator_1_balance_ratio_1); - - let expected_validator_1_payout_2 = expected_validator_1_balance_ratio_2.to_integer(); + // we need to crank forward because our validator is not a genesis validator + builder.advance_eras_by_default_auction_delay(); + + let mut era = builder.get_era(); + let mut round_reward = initial_round_reward; + let mut total_supply = initial_supply; + let mut expected_reward_rate = initial_expected_reward_rate; + let mut validator_stake = U512::from(VALIDATOR_1_STAKE); + let mut delegator_1_stake = U512::from(DELEGATOR_1_STAKE); + let mut delegator_2_stake = U512::from(DELEGATOR_2_STAKE); + let mut total_delegator_stake = U512::from(TOTAL_DELEGATOR_STAKE); + let mut total_stake = U512::from(VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE); + for idx in 0..10 { + let rewards = { + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![round_reward]); + rewards + }; + + let result = builder.distribute(None, protocol_version, rewards, Timestamp::now().millis()); + assert!(result.is_success(), "failed to distribute {:?}", result); + builder.advance_era(); + let current_era = builder.get_era(); + assert_eq!( + era.successor(), + current_era, + "unexpected era {:?}", + current_era + ); + era = current_era; - assert!(matches!( - era_info_2.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount, .. }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout_2 - )); + let updated_round_reward = builder.base_round_reward(None, protocol_version); + round_reward = updated_round_reward; - assert!(matches!( - era_info_2.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_payout_2 - )); + let updated_validator_stake = get_validator_bid(&mut builder, VALIDATOR_1.clone()) + .expect("should have validator bid") + .staked_amount(); + assert!( + updated_validator_stake > validator_stake, + "validator stake should go up" + ); + let updated_delegator_1_stake = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + assert!( + updated_delegator_1_stake > delegator_1_stake, + "delegator 1 stake should go up" + ); + let updated_delegator_2_stake = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); + assert!( + updated_delegator_2_stake > delegator_2_stake, + "delegator 2 stake should go up was: {:?} is: {:?}", + delegator_2_stake, + updated_delegator_2_stake, + ); + let updated_total_delegator_stake = updated_delegator_1_stake + updated_delegator_2_stake; + assert!( + updated_total_delegator_stake > total_delegator_stake, + "total delegator stake should go up" + ); + total_delegator_stake = updated_total_delegator_stake; + let updated_total_stake = updated_validator_stake + updated_total_delegator_stake; + assert!( + updated_total_stake > total_stake, + "total stake should go up" + ); - assert!(matches!( - era_info_2.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_2_payout_2 - )); + let delegators_share = { + let commission_rate = Ratio::new( + U512::from(VALIDATOR_1_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = Ratio::new(updated_total_delegator_stake, updated_total_stake); + let delegator_reward = expected_reward_rate + .checked_mul(&reward_multiplier) + .expect("should get delegator reward ratio"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get delegator reward"); + delegator_reward.checked_sub(&commission).unwrap() + }; + + let delegator_1_expected_payout = { + let reward_multiplier = + Ratio::new(updated_delegator_1_stake, updated_total_delegator_stake); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .expect("must get delegator 1 reward") + }; + + let delegator_2_expected_payout = { + let reward_multiplier = + Ratio::new(updated_delegator_2_stake, updated_total_delegator_stake); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .expect("must get delegator 2 reward") + }; + + let validator_1_actual_payout = updated_validator_stake - validator_stake; + + let validator_1_expected_payout = (expected_reward_rate + - Ratio::from(delegator_1_expected_payout + delegator_2_expected_payout)) + .to_integer(); + assert_eq!(validator_1_actual_payout, validator_1_expected_payout); + + let delegator_1_actual_payout = updated_delegator_1_stake - delegator_1_stake; + assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout); + + let delegator_2_actual_payout = updated_delegator_2_stake - delegator_2_stake; + assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout); + + let updated_era_info = get_era_info(&mut builder); + + assert!(matches!( + updated_era_info.select(VALIDATOR_1.clone()).next(), + Some(SeigniorageAllocation::Validator { validator_public_key, amount }) + if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout + )); + + assert!(matches!( + updated_era_info.select(DELEGATOR_1.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout + )); + + assert!(matches!( + updated_era_info.select(DELEGATOR_2.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout + )); + + // Next round of rewards + let updated_supply = builder.total_supply(protocol_version, None); + assert!(updated_supply > total_supply); + total_supply = updated_supply; + + let updated_rate = builder.round_seigniorage_rate(None, protocol_version); + expected_reward_rate = updated_rate * total_supply; + + // lets churn the bids just to have some fun + let undelegate_amount = delegator_1_expected_payout - 1; + let undelegate_result = builder.bidding( + None, + protocol_version, + (*DELEGATOR_1_ADDR).into(), + AuctionMethod::Undelegate { + validator: VALIDATOR_1.clone(), + delegator: DelegatorKind::PublicKey(DELEGATOR_1.clone()), + amount: undelegate_amount, + }, + ); + assert!(undelegate_result.is_success(), "{:?}", undelegate_result); + builder.commit_transforms(builder.get_post_state_hash(), undelegate_result.effects()); + delegator_1_stake = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - // Withdraw delegator rewards - let delegator_1_rewards = delegator_1_updated_stake_1 + delegator_1_updated_stake_2; - assert!(delegator_1_rewards > U512::from(DELEGATOR_1_STAKE)); - undelegate( - &mut builder, - *DELEGATOR_1_ADDR, - DELEGATOR_1.clone(), - VALIDATOR_1.clone(), - delegator_1_rewards, - ); - let remaining_delegator_1_bid = - get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()) - .expect("should have delegator bid"); - assert_eq!( - *remaining_delegator_1_bid.staked_amount(), - U512::from(DELEGATOR_1_STAKE) - ); + let undelegate_amount = U512::from(1_000_000); + let undelegate_result = builder.bidding( + None, + protocol_version, + (*DELEGATOR_2_ADDR).into(), + AuctionMethod::Delegate { + max_delegators_per_validator: u32::MAX, + validator: VALIDATOR_1.clone(), + delegator: DelegatorKind::PublicKey(DELEGATOR_2.clone()), + amount: undelegate_amount, + }, + ); + assert!(undelegate_result.is_success(), "{:?}", undelegate_result); + builder.commit_transforms(builder.get_post_state_hash(), undelegate_result.effects()); + delegator_2_stake = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); - let delegator_2_rewards = delegator_2_updated_stake_1 + delegator_2_updated_stake_2; - assert!(delegator_2_rewards > U512::from(DELEGATOR_2_STAKE)); - undelegate( - &mut builder, - *DELEGATOR_2_ADDR, - DELEGATOR_2.clone(), - VALIDATOR_1.clone(), - delegator_2_rewards, - ); - let remaining_delegator_2_bid = - get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()) - .expect("should have delegator bid"); - assert_eq!( - *remaining_delegator_2_bid.staked_amount(), - U512::from(DELEGATOR_2_STAKE) - ); + let auction_method = { + let amount = U512::from(10_000_000); + if idx % 2 == 0 { + AuctionMethod::AddBid { + public_key: VALIDATOR_1.clone(), + amount, + delegation_rate: 0, + minimum_delegation_amount: undelegate_amount.as_u64(), + maximum_delegation_amount: undelegate_amount.as_u64(), + minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT, + reserved_slots: 0, + } + } else { + AuctionMethod::WithdrawBid { + public_key: VALIDATOR_1.clone(), + amount, + minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT, + } + } + }; + let bid_flip_result = builder.bidding( + None, + protocol_version, + (*VALIDATOR_1_ADDR).into(), + auction_method, + ); + assert!(bid_flip_result.is_success(), "{:?}", bid_flip_result); + builder.commit_transforms(builder.get_post_state_hash(), undelegate_result.effects()); + validator_stake = get_validator_bid(&mut builder, VALIDATOR_1.clone()) + .expect("should have validator bid") + .staked_amount(); - // Withdraw validator rewards - let validator_1_rewards = validator_1_updated_stake_1 + validator_1_updated_stake_2; - assert!(validator_1_rewards > U512::from(VALIDATOR_1_STAKE)); - withdraw_bid( - &mut builder, - *VALIDATOR_1_ADDR, - VALIDATOR_1.clone(), - validator_1_rewards, - ); - let remaining_validator_1_bid = - get_validator_bid(&mut builder, VALIDATOR_1.clone()).expect("should have validator bid"); - assert_eq!( - *remaining_validator_1_bid.staked_amount(), - U512::from(VALIDATOR_1_STAKE) - ); + total_stake = validator_stake + delegator_1_stake + delegator_2_stake; + } } #[ignore] #[test] -fn should_distribute_reinvested_rewards_by_different_factor() { - const VALIDATOR_1_STAKE: u64 = 4_000_000; - const VALIDATOR_2_STAKE: u64 = 2_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; - - const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; - - const VALIDATOR_1_REWARD_FACTOR_1: u64 = 333333333334; - const VALIDATOR_2_REWARD_FACTOR_1: u64 = 333333333333; - const VALIDATOR_3_REWARD_FACTOR_1: u64 = 333333333333; - - const VALIDATOR_1_REWARD_FACTOR_2: u64 = 333333333333; - const VALIDATOR_2_REWARD_FACTOR_2: u64 = 333333333333; - const VALIDATOR_3_REWARD_FACTOR_2: u64 = 333333333334; +fn should_distribute_delegation_rate_half() { + const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000; + const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000; + const DELEGATOR_2_STAKE: u64 = 1_000_000_000_000; + const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE; + const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE; - let one_third = Ratio::new(U512::one(), U512::from(3)); + const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -1077,21 +1058,21 @@ fn should_distribute_reinvested_rewards_by_different_factor() { ) .build(); - let validator_2_fund_request = ExecuteRequestBuilder::standard( + let delegator_1_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *VALIDATOR_2_ADDR, + ARG_TARGET => *DELEGATOR_1_ADDR, ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) .build(); - let validator_3_fund_request = ExecuteRequestBuilder::standard( + let delegator_2_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, runtime_args! { - ARG_TARGET => *VALIDATOR_3_ADDR, + ARG_TARGET => *DELEGATOR_2_ADDR, ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) }, ) @@ -1102,351 +1083,189 @@ fn should_distribute_reinvested_rewards_by_different_factor() { CONTRACT_ADD_BID, runtime_args! { ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, ARG_PUBLIC_KEY => VALIDATOR_1.clone(), }, ) .build(); - let validator_2_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_2_ADDR, - CONTRACT_ADD_BID, + let delegator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_2.clone(), + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), }, ) .build(); - let validator_3_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_3_ADDR, - CONTRACT_ADD_BID, + let delegator_2_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_3.clone(), - }, + ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, ) .build(); let post_genesis_requests = vec![ system_fund_request, validator_1_fund_request, - validator_2_fund_request, - validator_3_fund_request, + delegator_1_fund_request, + delegator_2_fund_request, validator_1_add_bid_request, - validator_2_add_bid_request, - validator_3_add_bid_request, + delegator_1_delegate_request, + delegator_2_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let mut builder = LmdbWasmTestBuilder::default(); - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward_1 = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; - let expected_total_reward_1_integer = expected_total_reward_1.to_integer(); + let initial_supply = builder.total_supply(protocol_version, None); + let total_payout = builder.base_round_reward(None, protocol_version); + let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; + let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); } - let reward_factors_1: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR_1); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR_1); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR_1); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); - let distribute_request_1 = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors_1, + ARG_REWARDS_MAP => rewards }, ) .build(); - builder.exec(distribute_request_1).commit().expect_success(); - - let validator_1_staked_amount_1 = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - - let validator_2_staked_amount_1 = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) - .expect("should have validator bid") - .staked_amount(); - - let validator_3_staked_amount_1 = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) - .expect("should have validator bid") - .staked_amount(); - - let validator_1_updated_stake_1 = { - let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = validator_1_staked_amount_1; - validator_stake_after - validator_stake_before - }; - let expected_validator_1_payout = (expected_total_reward_1 * one_third).to_integer(); - assert_eq!(validator_1_updated_stake_1, expected_validator_1_payout); - - let rounded_amount = U512::one(); - - let validator_2_updated_stake_1 = { - let validator_stake_before = U512::from(VALIDATOR_2_STAKE); - let validator_stake_after = validator_2_staked_amount_1; - validator_stake_after - validator_stake_before - }; - let expected_validator_2_payout_1 = - (expected_total_reward_1 * one_third - rounded_amount).to_integer(); - assert_eq!(validator_2_updated_stake_1, expected_validator_2_payout_1); - - let validator_3_updated_stake_1 = { - let validator_stake_before = U512::from(VALIDATOR_3_STAKE); - let validator_stake_after = validator_3_staked_amount_1; - validator_stake_after - validator_stake_before - }; - let expected_validator_3_payout_1 = - (expected_total_reward_1 * one_third - rounded_amount).to_integer(); - assert_eq!(validator_3_updated_stake_1, expected_validator_3_payout_1); - - let total_payout = - validator_1_updated_stake_1 + validator_2_updated_stake_1 + expected_validator_3_payout_1; - let rounded_amount = U512::from(2); - assert_eq!( - total_payout, - expected_total_reward_1_integer - rounded_amount - ); - - let era_info_1 = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); + builder.exec(distribute_request).commit().expect_success(); - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") + let delegators_share = { + let commission_rate = Ratio::new( + U512::from(VALIDATOR_1_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE)); + let delegator_reward = expected_total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + }; + + let delegator_1_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_1_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .unwrap() }; - assert!(matches!( - era_info_1.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout - )); - - assert!(matches!( - era_info_1.select(VALIDATOR_2.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == expected_validator_2_payout_1 - )); - - assert!(matches!( - era_info_1.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == expected_validator_3_payout_1 - )); - - // New rewards - - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } - - let total_supply_2 = builder.total_supply(None); - assert!(total_supply_2 > initial_supply); - - let reward_factors_2 = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR_2); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR_2); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR_2); - tmp + let delegator_2_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_2_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .unwrap() }; - let distribute_request_2 = ExecuteRequestBuilder::standard( - *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, - runtime_args! { - ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors_2, - }, - ) - .build(); - - builder.exec(distribute_request_2).commit().expect_success(); - - let expected_total_reward_2 = *GENESIS_ROUND_SEIGNIORAGE_RATE * total_supply_2; - assert!(expected_total_reward_2 > expected_total_reward_1); - let expected_total_reward_2_integer = expected_total_reward_2.to_integer(); - - let validator_1_staked_amount_2 = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - - let validator_2_staked_amount_2 = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) - .expect("should have validator bid") - .staked_amount(); - - let validator_3_staked_amount_2 = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) - .expect("should have validator bid") - .staked_amount(); - - let rounded_amount = U512::one(); - - let validator_1_updated_stake_2 = { - let validator_stake_before = validator_1_staked_amount_1; - let validator_stake_after = validator_1_staked_amount_2; - validator_stake_after - validator_stake_before + let validator_1_expected_payout = { + let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout; + let validators_part = expected_total_reward - Ratio::from(total_delegator_payout); + validators_part.to_integer() }; - assert!(validator_1_updated_stake_2 > validator_1_updated_stake_1); - - let expected_validator_1_payout_2 = - (expected_total_reward_2 * one_third - rounded_amount).to_integer(); - assert_eq!(validator_1_updated_stake_2, expected_validator_1_payout_2); - let validator_2_updated_stake_2 = { - let validator_stake_before = validator_2_staked_amount_1; - let validator_stake_after = validator_2_staked_amount_2; + let validator_1_actual_payout = { + let validator_stake_before = U512::from(VALIDATOR_1_STAKE); + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) + .expect("should have validator bid") + .staked_amount(); validator_stake_after - validator_stake_before }; - assert!(validator_2_updated_stake_2 > validator_2_updated_stake_1); - let expected_validator_2_payout_2 = - (expected_total_reward_2 * one_third - rounded_amount).to_integer(); - assert_eq!(validator_2_updated_stake_2, expected_validator_2_payout_2); + assert_eq!(validator_1_actual_payout, validator_1_expected_payout); - let validator_3_updated_stake_2 = { - let validator_stake_before = validator_3_staked_amount_1; - let validator_stake_after = validator_3_staked_amount_2; - validator_stake_after - validator_stake_before + let delegator_1_actual_payout = { + let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); + let delegator_stake_after = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + delegator_stake_after - delegator_stake_before }; - let expected_validator_3_payout_2 = (expected_total_reward_2 * one_third).to_integer(); - assert_eq!(validator_3_updated_stake_2, expected_validator_3_payout_2); - assert!(validator_3_updated_stake_2 > validator_3_updated_stake_1); - - assert!(validator_1_updated_stake_2 > validator_1_updated_stake_1); - assert!(validator_2_updated_stake_2 > validator_2_updated_stake_1); - assert!(validator_3_updated_stake_2 > validator_3_updated_stake_1); - - let total_payout_2 = - validator_1_updated_stake_2 + validator_2_updated_stake_2 + expected_validator_3_payout_2; - let rounded_amount = U512::from(2); - assert_eq!( - total_payout_2, - expected_total_reward_2_integer - rounded_amount - ); + assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout); - let era_info_2 = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") + let delegator_2_actual_payout = { + let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); + let delegator_stake_after = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); + delegator_stake_after - delegator_stake_before }; + assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout); - assert_ne!(era_info_1, era_info_2); + let era_info = get_era_info(&mut builder); assert!(matches!( - era_info_2.select(VALIDATOR_1.clone()).next(), + era_info.select(VALIDATOR_1.clone()).next(), Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout_2 + if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout )); assert!(matches!( - era_info_2.select(VALIDATOR_2.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == expected_validator_2_payout_2 + era_info.select(DELEGATOR_1.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout )); assert!(matches!( - era_info_2.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == expected_validator_3_payout_2 + era_info.select(DELEGATOR_2.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout )); - - // Ensure validators can withdraw their reinvested rewards - let validator_1_reward = validator_1_updated_stake_1 + validator_1_updated_stake_2; - assert!(validator_1_reward > U512::from(VALIDATOR_1_STAKE)); - withdraw_bid( - &mut builder, - *VALIDATOR_1_ADDR, - VALIDATOR_1.clone(), - validator_1_reward, - ); - let remaining_validator_1_bid = - get_validator_bid(&mut builder, VALIDATOR_1.clone()).expect("should have validator bid"); - assert_eq!( - *remaining_validator_1_bid.staked_amount(), - U512::from(VALIDATOR_1_STAKE) - ); - - let validator_2_reward = validator_2_updated_stake_1 + validator_2_updated_stake_2; - assert!(validator_2_reward > U512::from(VALIDATOR_2_STAKE)); - withdraw_bid( - &mut builder, - *VALIDATOR_2_ADDR, - VALIDATOR_2.clone(), - validator_2_reward, - ); - let remaining_validator_2_bid = - get_validator_bid(&mut builder, VALIDATOR_2.clone()).expect("should have validator bid"); - assert_eq!( - *remaining_validator_2_bid.staked_amount(), - U512::from(VALIDATOR_2_STAKE) - ); - - let validator_3_reward = validator_3_updated_stake_1 + validator_3_updated_stake_2; - assert!(validator_3_reward > U512::from(VALIDATOR_3_STAKE)); - withdraw_bid( - &mut builder, - *VALIDATOR_3_ADDR, - VALIDATOR_3.clone(), - validator_3_reward, - ); - let remaining_validator_3_bid = - get_validator_bid(&mut builder, VALIDATOR_3.clone()).expect("should have validator bid"); - assert_eq!( - *remaining_validator_3_bid.staked_amount(), - U512::from(VALIDATOR_3_STAKE) - ); } #[ignore] #[test] -fn should_distribute_delegation_rate_half() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_2_STAKE: u64 = 1_000_000; - - const VALIDATOR_1_DELGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; - - // Validator share - let validator_share = Ratio::new(U512::from(2), U512::from(3)); - let remainders = Ratio::from(U512::from(2)); - let rounded_amount = U512::from(2); +fn should_distribute_delegation_rate_full() { + const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000; + const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000; + const DELEGATOR_2_STAKE: u64 = 1_000_000_000_000; - // Delegator shares - let delegator_shares = Ratio::new(U512::one(), U512::from(6)); + const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -1493,8 +1312,10 @@ fn should_distribute_delegation_rate_half() { CONTRACT_ADD_BID, runtime_args! { ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => VALIDATOR_1_DELGATION_RATE, + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + auction::ARG_MINIMUM_DELEGATION_AMOUNT => 10, + auction::ARG_MAXIMUM_DELEGATION_AMOUNT => DELEGATOR_2_STAKE + 1, }, ) .build(); @@ -1534,12 +1355,13 @@ fn should_distribute_delegation_rate_half() { let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; let expected_total_reward_integer = expected_total_reward.to_integer(); @@ -1552,69 +1374,55 @@ fn should_distribute_delegation_rate_half() { timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![expected_total_reward_integer]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_balance = { + let validator_1_updated_stake = { let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - validator_stake_before }; let expected_validator_1_balance = - (expected_total_reward * validator_share + remainders - rounded_amount).to_integer(); - - assert_eq!(validator_1_balance, expected_validator_1_balance); + (expected_total_reward * Ratio::from(U512::one())).to_integer(); + assert_eq!(validator_1_updated_stake, expected_validator_1_balance); - let delegator_1_balance = { - let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); - let delegator_stake_after = + let delegator_1_updated_stake = { + let validator_stake_before = U512::from(DELEGATOR_1_STAKE); + let validator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - delegator_stake_after - delegator_stake_before + validator_stake_after - validator_stake_before }; - let expected_delegator_1_balance = (expected_total_reward * delegator_shares).to_integer(); - assert_eq!(delegator_1_balance, expected_delegator_1_balance); + let expected_delegator_1_balance = U512::zero(); + assert_eq!(delegator_1_updated_stake, expected_delegator_1_balance); let delegator_2_balance = { - let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); - let delegator_stake_after = + let validator_stake_before = U512::from(DELEGATOR_2_STAKE); + let validator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); - delegator_stake_after - delegator_stake_before + validator_stake_after - validator_stake_before }; - let expected_delegator_2_balance = (expected_total_reward * delegator_shares).to_integer(); + let expected_delegator_2_balance = U512::zero(); assert_eq!(delegator_2_balance, expected_delegator_2_balance); - let total_payout = validator_1_balance + delegator_1_balance + delegator_2_balance; + let total_payout = validator_1_updated_stake + delegator_1_updated_stake + delegator_2_balance; assert_eq!(total_payout, expected_total_reward_integer); - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; + let era_info = get_era_info(&mut builder); assert!(matches!( era_info.select(VALIDATOR_1.clone()).next(), @@ -1624,25 +1432,27 @@ fn should_distribute_delegation_rate_half() { assert!(matches!( era_info.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_balance )); assert!(matches!( era_info.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_1_balance )); } #[ignore] #[test] -fn should_distribute_delegation_rate_full() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_2_STAKE: u64 = 1_000_000; +fn should_distribute_uneven_delegation_rate_zero() { + const VALIDATOR_1_STAKE: u64 = 200_000_000_000; + const DELEGATOR_1_STAKE: u64 = 600_000_000_000; + const DELEGATOR_2_STAKE: u64 = 800_000_000_000; + const TOTAL_DELEGATOR_STAKE: u64 = DELEGATOR_1_STAKE + DELEGATOR_2_STAKE; + const TOTAL_STAKE: u64 = VALIDATOR_1_STAKE + TOTAL_DELEGATOR_STAKE; - const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; + const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -1727,123 +1537,156 @@ fn should_distribute_delegation_rate_full() { delegator_2_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); + let total_payout = builder.base_round_reward(None, protocol_version); let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); + let delegators_share = { + let commission_rate = Ratio::new( + U512::from(VALIDATOR_1_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(U512::from(TOTAL_DELEGATOR_STAKE), U512::from(TOTAL_STAKE)); + let delegator_reward = expected_total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + }; + + let delegator_1_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_1_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .unwrap() + }; + + let delegator_2_expected_payout = { + let reward_multiplier = Ratio::new( + U512::from(DELEGATOR_2_STAKE), + U512::from(TOTAL_DELEGATOR_STAKE), + ); + delegators_share + .checked_mul(&reward_multiplier) + .map(|ratio| ratio.to_integer()) + .unwrap() + }; + + let validator_1_expected_payout = { + let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout; + let validators_part = expected_total_reward - Ratio::from(total_delegator_payout); + validators_part.to_integer() + }; + let validator_1_updated_stake = { let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - validator_stake_before }; - let expected_validator_1_balance = - (expected_total_reward * Ratio::from(U512::one())).to_integer(); - assert_eq!(validator_1_updated_stake, expected_validator_1_balance); + assert_eq!(validator_1_updated_stake, validator_1_expected_payout); let delegator_1_updated_stake = { - let validator_stake_before = U512::from(DELEGATOR_1_STAKE); - let validator_stake_after = + let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); + let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - validator_stake_after - validator_stake_before + delegator_stake_after - delegator_stake_before }; - let expected_delegator_1_balance = U512::zero(); - assert_eq!(delegator_1_updated_stake, expected_delegator_1_balance); + assert_eq!(delegator_1_updated_stake, delegator_1_expected_payout); - let delegator_2_balance = { - let validator_stake_before = U512::from(DELEGATOR_2_STAKE); - let validator_stake_after = + let delegator_2_updated_stake = { + let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); + let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); - validator_stake_after - validator_stake_before + delegator_stake_after - delegator_stake_before }; - let expected_delegator_2_balance = U512::zero(); - assert_eq!(delegator_2_balance, expected_delegator_2_balance); - - let total_payout = validator_1_updated_stake + delegator_1_updated_stake + delegator_2_balance; - assert_eq!(total_payout, expected_total_reward_integer); - - let era_info = { - let era = builder.get_era(); + assert_eq!(delegator_2_updated_stake, delegator_2_expected_payout); - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; + let era_info = get_era_info(&mut builder); assert!(matches!( era_info.select(VALIDATOR_1.clone()).next(), Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_balance + if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_expected_payout )); assert!(matches!( era_info.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_balance + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout )); assert!(matches!( era_info.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_1_balance + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout )); } #[ignore] #[test] -fn should_distribute_uneven_delegation_rate_zero() { +fn should_distribute_with_multiple_validators_and_delegators() { const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 3_000_000; - const DELEGATOR_2_STAKE: u64 = 4_000_000; - - const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; + const VALIDATOR_2_STAKE: u64 = 1_000_000; + const VALIDATOR_3_STAKE: u64 = 1_000_000; - let validator_1_portion = Ratio::new(U512::one(), U512::from(8)); - let delegator_1_portion = Ratio::new(U512::from(3), U512::from(8)); - let delegator_2_portion = Ratio::new(U512::from(4), U512::from(8)); + const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; + const VALIDATOR_2_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 4; + const VALIDATOR_3_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; - let remainder = Ratio::from(U512::from(1)); + const DELEGATOR_1_STAKE: u64 = 6_000_000_000_000; + const DELEGATOR_2_STAKE: u64 = 8_000_000_000_000; + const DELEGATOR_3_STAKE: u64 = 2_000_000_000_000; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -1865,6 +1708,26 @@ fn should_distribute_uneven_delegation_rate_zero() { ) .build(); + let validator_2_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_2_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + let validator_3_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_3_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + let delegator_1_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -1885,6 +1748,16 @@ fn should_distribute_uneven_delegation_rate_zero() { ) .build(); + let delegator_3_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_3_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( *VALIDATOR_1_ADDR, CONTRACT_ADD_BID, @@ -1896,8 +1769,30 @@ fn should_distribute_uneven_delegation_rate_zero() { ) .build(); - let delegator_1_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, + let validator_2_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_2_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_2_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_2.clone(), + }, + ) + .build(); + + let validator_3_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_3_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_3_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_3.clone(), + }, + ) + .build(); + + let delegator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, CONTRACT_DELEGATE, runtime_args! { ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), @@ -1918,769 +1813,212 @@ fn should_distribute_uneven_delegation_rate_zero() { ) .build(); + let delegator_3_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_3_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_3_STAKE), + ARG_VALIDATOR => VALIDATOR_2.clone(), + ARG_DELEGATOR => DELEGATOR_3.clone(), + }, + ) + .build(); + let post_genesis_requests = vec![ system_fund_request, validator_1_fund_request, + validator_2_fund_request, + validator_3_fund_request, delegator_1_fund_request, delegator_2_fund_request, + delegator_3_fund_request, validator_1_add_bid_request, + validator_2_add_bid_request, + validator_3_add_bid_request, delegator_1_delegate_request, delegator_2_delegate_request, + delegator_3_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); + let total_payout = builder.base_round_reward(None, protocol_version); let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + // Validator 1 distribution + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_updated_stake = { + let validator_1_actual_payout = { let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - validator_stake_before }; - let expected_validator_1_payout = - (expected_total_reward * validator_1_portion + remainder).to_integer(); - assert_eq!(validator_1_updated_stake, expected_validator_1_payout); - let delegator_1_updated_stake = { + let delegator_1_actual_payout = { let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); delegator_stake_after - delegator_stake_before }; - let expected_delegator_1_payout = (expected_total_reward * delegator_1_portion).to_integer(); - assert_eq!(delegator_1_updated_stake, expected_delegator_1_payout); - let delegator_2_updated_stake = { + let delegator_2_actual_payout = { let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); let delegator_stake_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); delegator_stake_after - delegator_stake_before }; - let expected_delegator_2_payout = (expected_total_reward * delegator_2_portion).to_integer(); - assert_eq!(delegator_2_updated_stake, expected_delegator_2_payout); - - let total_payout = - validator_1_updated_stake + delegator_1_updated_stake + delegator_2_updated_stake; - assert_eq!(total_payout, expected_total_reward_integer); - - let era_info = { - let era = builder.get_era(); - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; + let era_info = get_era_info(&mut builder); assert!(matches!( era_info.select(VALIDATOR_1.clone()).next(), Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout + if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_actual_payout )); assert!(matches!( era_info.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == expected_delegator_1_payout + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_actual_payout )); assert!(matches!( era_info.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == expected_delegator_2_payout + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_actual_payout )); -} - -#[ignore] -#[test] -fn should_distribute_by_factor() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const VALIDATOR_2_STAKE: u64 = 1_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; - - const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; - - const VALIDATOR_1_REWARD_FACTOR: u64 = 333333333334; - const VALIDATOR_2_REWARD_FACTOR: u64 = 333333333333; - const VALIDATOR_3_REWARD_FACTOR: u64 = 333333333333; - - let one_third = Ratio::new(U512::one(), U512::from(3)); - - let system_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_2_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_3_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_3_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - }, - ) - .build(); - - let validator_2_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_2_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_2.clone(), - }, - ) - .build(); - - let validator_3_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_3_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_3.clone(), - }, - ) - .build(); - - let post_genesis_requests = vec![ - system_fund_request, - validator_1_fund_request, - validator_2_fund_request, - validator_3_fund_request, - validator_1_add_bid_request, - validator_2_add_bid_request, - validator_3_add_bid_request, - ]; - - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; - let expected_total_reward_integer = expected_total_reward.to_integer(); - - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); - } - - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_2.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + // Validator 2 distribution + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_1_payout = (expected_total_reward * one_third).to_integer(); - assert_eq!(validator_1_updated_stake, expected_validator_1_payout); - - let rounded_amount = U512::one(); - - let validator_2_updated_stake = { + let validator_2_actual_payout = { let validator_stake_before = U512::from(VALIDATOR_2_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_2_payout = - (expected_total_reward * one_third - rounded_amount).to_integer(); - assert_eq!(validator_2_updated_stake, expected_validator_2_payout); - - let validator_3_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_3_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_2.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - validator_stake_before }; - let expected_validator_3_payout = - (expected_total_reward * one_third - rounded_amount).to_integer(); - assert_eq!(validator_3_updated_stake, expected_validator_3_payout); - - let total_payout = - validator_1_updated_stake + validator_2_updated_stake + validator_3_updated_stake; - let rounded_amount = U512::from(2); - assert_eq!(total_payout, expected_total_reward_integer - rounded_amount); - - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; - - assert!(matches!( - era_info.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout - )); - - assert!(matches!( - era_info.select(VALIDATOR_2.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == expected_validator_2_payout - )); - - assert!(matches!( - era_info.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == expected_validator_3_payout - )); -} - -#[ignore] -#[test] -fn should_distribute_by_factor_regardless_of_stake() { - const VALIDATOR_1_STAKE: u64 = 4_000_000; - const VALIDATOR_2_STAKE: u64 = 2_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; - - const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; - - const VALIDATOR_1_REWARD_FACTOR: u64 = 333333333334; - const VALIDATOR_2_REWARD_FACTOR: u64 = 333333333333; - const VALIDATOR_3_REWARD_FACTOR: u64 = 333333333333; - - let one_third = Ratio::new(U512::one(), U512::from(3)); - - let system_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_2_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_3_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_3_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - }, - ) - .build(); - - let validator_2_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_2_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_2.clone(), - }, - ) - .build(); - - let validator_3_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_3_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_3.clone(), - }, - ) - .build(); - - let post_genesis_requests = vec![ - system_fund_request, - validator_1_fund_request, - validator_2_fund_request, - validator_3_fund_request, - validator_1_add_bid_request, - validator_2_add_bid_request, - validator_3_add_bid_request, - ]; - - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; - let expected_total_reward_integer = expected_total_reward.to_integer(); - - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); - } - - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } - - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR); - tmp - }; - - let distribute_request = ExecuteRequestBuilder::standard( - *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, - runtime_args! { - ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors - }, - ) - .build(); - - builder.exec(distribute_request).commit().expect_success(); - - let validator_1_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_1_payout = (expected_total_reward * one_third).to_integer(); - assert_eq!(validator_1_updated_stake, expected_validator_1_payout); - - let rounded_amount = U512::one(); - - let validator_2_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_2_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_2_payout = - (expected_total_reward * one_third - rounded_amount).to_integer(); - assert_eq!(validator_2_updated_stake, expected_validator_2_payout); - - let validator_3_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_3_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_3_payout = - (expected_total_reward * one_third - rounded_amount).to_integer(); - assert_eq!(validator_3_updated_stake, expected_validator_3_payout); - - let total_payout = - validator_1_updated_stake + validator_2_updated_stake + expected_validator_3_payout; - let rounded_amount = U512::from(2); - assert_eq!(total_payout, expected_total_reward_integer - rounded_amount); - - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") + let delegator_3_actual_payout = { + let delegator_stake_before = U512::from(DELEGATOR_3_STAKE); + let delegator_stake_after = + get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_3.clone()); + delegator_stake_after - delegator_stake_before }; - assert!(matches!( - era_info.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout - )); + let era_info = get_era_info(&mut builder); assert!(matches!( era_info.select(VALIDATOR_2.clone()).next(), Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == expected_validator_2_payout + if *validator_public_key == *VALIDATOR_2 && *amount == validator_2_actual_payout )); assert!(matches!( - era_info.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == expected_validator_3_payout + era_info.select(DELEGATOR_3.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_3 && *amount == delegator_3_actual_payout )); -} - -#[ignore] -#[test] -fn should_distribute_by_factor_uneven() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const VALIDATOR_2_STAKE: u64 = 1_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; - const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; - - const VALIDATOR_1_REWARD_FACTOR: u64 = 500000000000; - const VALIDATOR_2_REWARD_FACTOR: u64 = 300000000000; - const VALIDATOR_3_REWARD_FACTOR: u64 = 200000000000; - - let one_half = Ratio::new(U512::one(), U512::from(2)); - let three_tenths = Ratio::new(U512::from(3), U512::from(10)); - let one_fifth = Ratio::new(U512::from(1), U512::from(5)); - - let system_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *SYSTEM_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_3.clone(), vec![total_payout]); - let validator_1_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_1_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_2_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_3_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_3_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_1_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_1_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_1.clone(), - }, - ) - .build(); - - let validator_2_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_2_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_2.clone(), - }, - ) - .build(); - - let validator_3_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_3_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_3.clone(), - }, - ) - .build(); - - let post_genesis_requests = vec![ - system_fund_request, - validator_1_fund_request, - validator_2_fund_request, - validator_3_fund_request, - validator_1_add_bid_request, - validator_2_add_bid_request, - validator_3_add_bid_request, - ]; - - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; - let expected_total_reward_integer = expected_total_reward.to_integer(); - - for request in post_genesis_requests { - builder.exec(request).commit().expect_success(); - } - - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } - - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR); - tmp - }; - - let distribute_request = ExecuteRequestBuilder::standard( + // Validator 3 distribution + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_1_payout = (expected_total_reward * one_half).to_integer(); - assert_eq!(validator_1_updated_stake, expected_validator_1_payout); - - let validator_2_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_2_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_stake_after - validator_stake_before - }; - let expected_validator_2_balance = (expected_total_reward * three_tenths).to_integer(); - assert_eq!(validator_2_updated_stake, expected_validator_2_balance); - - let validator_3_updated_stake = { + let validator_3_actual_payout = { let validator_stake_before = U512::from(VALIDATOR_3_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_3.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - validator_stake_before }; - let expected_validator_3_payout = (expected_total_reward * one_fifth).to_integer(); - assert_eq!(validator_3_updated_stake, expected_validator_3_payout); - - let total_payout = - validator_1_updated_stake + validator_2_updated_stake + validator_3_updated_stake; - let rounded_amount = U512::one(); - assert_eq!(total_payout, expected_total_reward_integer - rounded_amount); - - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; - - assert!(matches!( - era_info.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout - )); - assert!(matches!( - era_info.select(VALIDATOR_2.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == expected_validator_2_balance - )); + let era_info = get_era_info(&mut builder); assert!(matches!( - era_info.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == expected_validator_3_payout - )); -} - -#[ignore] -#[test] -fn should_distribute_with_multiple_validators_and_delegators() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const VALIDATOR_2_STAKE: u64 = 1_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; - - const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; - const VALIDATOR_2_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 4; - const VALIDATOR_3_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; + era_info.select(VALIDATOR_3.clone()).next(), + Some(SeigniorageAllocation::Validator { validator_public_key, amount }) + if *validator_public_key == *VALIDATOR_3 && *amount == validator_3_actual_payout + )); +} - const VALIDATOR_1_REWARD_FACTOR: u64 = 200000000000; - const VALIDATOR_2_REWARD_FACTOR: u64 = 300000000000; - const VALIDATOR_3_REWARD_FACTOR: u64 = 500000000000; +#[ignore] +#[test] +fn should_distribute_with_multiple_validators_and_shared_delegator() { + const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000; + const VALIDATOR_2_STAKE: u64 = 1_000_000_000_000; + const VALIDATOR_3_STAKE: u64 = 1_000_000_000_000; - const DELEGATOR_1_STAKE: u64 = 3_000_000; - const DELEGATOR_2_STAKE: u64 = 4_000_000; - const DELEGATOR_3_STAKE: u64 = 1_000_000; + const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; - let remainder = U512::one(); + const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -2757,7 +2095,7 @@ fn should_distribute_with_multiple_validators_and_delegators() { CONTRACT_ADD_BID, runtime_args! { ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), - ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_DELEGATION_RATE => DELEGATION_RATE, ARG_PUBLIC_KEY => VALIDATOR_1.clone(), }, ) @@ -2768,7 +2106,7 @@ fn should_distribute_with_multiple_validators_and_delegators() { CONTRACT_ADD_BID, runtime_args! { ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), - ARG_DELEGATION_RATE => VALIDATOR_2_DELEGATION_RATE, + ARG_DELEGATION_RATE => DELEGATION_RATE, ARG_PUBLIC_KEY => VALIDATOR_2.clone(), }, ) @@ -2779,13 +2117,13 @@ fn should_distribute_with_multiple_validators_and_delegators() { CONTRACT_ADD_BID, runtime_args! { ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), - ARG_DELEGATION_RATE => VALIDATOR_3_DELEGATION_RATE, + ARG_DELEGATION_RATE => DELEGATION_RATE, ARG_PUBLIC_KEY => VALIDATOR_3.clone(), }, ) .build(); - let delegator_1_delegate_request = ExecuteRequestBuilder::standard( + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( *DELEGATOR_1_ADDR, CONTRACT_DELEGATE, runtime_args! { @@ -2796,24 +2134,24 @@ fn should_distribute_with_multiple_validators_and_delegators() { ) .build(); - let delegator_2_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_2_ADDR, + let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, CONTRACT_DELEGATE, runtime_args! { - ARG_AMOUNT => U512::from(DELEGATOR_2_STAKE), - ARG_VALIDATOR => VALIDATOR_1.clone(), - ARG_DELEGATOR => DELEGATOR_2.clone(), + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_2.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), }, ) .build(); - let delegator_3_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_3_ADDR, + let delegator_1_validator_3_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, CONTRACT_DELEGATE, runtime_args! { - ARG_AMOUNT => U512::from(DELEGATOR_3_STAKE), - ARG_VALIDATOR => VALIDATOR_2.clone(), - ARG_DELEGATOR => DELEGATOR_3.clone(), + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_3.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), }, ) .build(); @@ -2829,184 +2167,212 @@ fn should_distribute_with_multiple_validators_and_delegators() { validator_1_add_bid_request, validator_2_add_bid_request, validator_3_add_bid_request, - delegator_1_delegate_request, - delegator_2_delegate_request, - delegator_3_delegate_request, + delegator_1_validator_1_delegate_request, + delegator_1_validator_2_delegate_request, + delegator_1_validator_3_delegate_request, ]; - let mut timestamp_millis = - DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); + let total_payout = builder.base_round_reward(None, protocol_version); let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); + rewards.insert(VALIDATOR_2.clone(), vec![total_payout]); + rewards.insert(VALIDATOR_3.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); builder.exec(distribute_request).commit().expect_success(); - let validator_1_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_1_delegator_1_share = { + let total_reward = &Ratio::from(expected_total_reward_integer); + + let validator_1_total_stake = VALIDATOR_1_STAKE + DELEGATOR_1_STAKE; + + let delegator_total_stake = U512::from(DELEGATOR_1_STAKE); + let commission_rate = Ratio::new( + U512::from(DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(delegator_total_stake, U512::from(validator_1_total_stake)); + let delegator_reward = total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + } + .to_integer(); + + let validator_1_actual_payout = { + let validator_balance_before = U512::from(VALIDATOR_1_STAKE); + let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); - validator_stake_after - validator_stake_before + validator_balance_after - validator_balance_before }; - let validator_2_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_2_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) + let validator_1_expected_payout = { + let validator_share = expected_total_reward; + let validator_portion = validator_share - Ratio::from(validator_1_delegator_1_share); + validator_portion.to_integer() + }; + assert_eq!(validator_1_actual_payout, validator_1_expected_payout); + + let validator_2_delegator_1_share = { + let validator_2_total_stake = VALIDATOR_2_STAKE + DELEGATOR_1_STAKE; + + let total_reward = &Ratio::from(expected_total_reward.to_integer()); + + let delegator_total_stake = U512::from(DELEGATOR_1_STAKE); + let commission_rate = Ratio::new( + U512::from(DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(delegator_total_stake, U512::from(validator_2_total_stake)); + let delegator_reward = total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + } + .to_integer(); + + let validator_2_actual_payout = { + let validator_balance_before = U512::from(VALIDATOR_2_STAKE); + let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_2.clone()) .expect("should have validator bid") .staked_amount(); - validator_stake_after - validator_stake_before + validator_balance_after - validator_balance_before + }; + let validator_2_expected_payout = { + let validator_share = expected_total_reward; + let validator_portion = validator_share - Ratio::from(validator_2_delegator_1_share); + validator_portion.to_integer() }; + assert_eq!(validator_2_actual_payout, validator_2_expected_payout); - let validator_3_updated_stake = { - let validator_stake_before = U512::from(VALIDATOR_3_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) + let validator_3_delegator_1_share = { + let validator_3_total_stake = VALIDATOR_3_STAKE + DELEGATOR_1_STAKE; + + let total_reward = &Ratio::from(expected_total_reward.to_integer()); + + let delegator_total_stake = U512::from(DELEGATOR_1_STAKE); + let commission_rate = Ratio::new( + U512::from(DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = + Ratio::new(delegator_total_stake, U512::from(validator_3_total_stake)); + let delegator_reward = total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + let commission = delegator_reward + .checked_mul(&commission_rate) + .expect("must get commission"); + delegator_reward.checked_sub(&commission).unwrap() + } + .to_integer(); + + let validator_3_actual_payout = { + let validator_balance_before = U512::from(VALIDATOR_3_STAKE); + let validator_balance_after = get_validator_bid(&mut builder, VALIDATOR_3.clone()) .expect("should have validator bid") .staked_amount(); - validator_stake_after - validator_stake_before + validator_balance_after - validator_balance_before + }; + let validator_3_expected_payout = { + let validator_share = expected_total_reward; + let validator_portion = validator_share - Ratio::from(validator_3_delegator_1_share); + validator_portion.to_integer() }; + assert_eq!(validator_3_actual_payout, validator_3_expected_payout); - let delegator_1_updated_stake = { - let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); - let delegator_stake_after = + let delegator_1_validator_1_updated_stake = { + let delegator_balance_before = U512::from(DELEGATOR_1_STAKE); + let delegator_balance_after = get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - delegator_stake_after - delegator_stake_before + delegator_balance_after - delegator_balance_before }; - let delegator_2_updated_stake = { - let delegator_stake_before = U512::from(DELEGATOR_2_STAKE); + assert_eq!( + delegator_1_validator_1_updated_stake, + validator_1_delegator_1_share + ); + + let delegator_1_validator_2_updated_stake = { + let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); let delegator_stake_after = - get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); + get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_1.clone()); delegator_stake_after - delegator_stake_before }; + assert_eq!( + delegator_1_validator_2_updated_stake, + validator_2_delegator_1_share + ); - let delegator_3_updated_stake = { - let delegator_stake_before = U512::from(DELEGATOR_3_STAKE); + let delegator_1_validator_3_updated_stake = { + let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); let delegator_stake_after = - get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_3.clone()); + get_delegator_staked_amount(&mut builder, VALIDATOR_3.clone(), DELEGATOR_1.clone()); delegator_stake_after - delegator_stake_before }; - - let total_payout: U512 = [ - validator_1_updated_stake, - validator_2_updated_stake, - validator_3_updated_stake, - delegator_1_updated_stake, - delegator_2_updated_stake, - delegator_3_updated_stake, - ] - .iter() - .cloned() - .sum(); - - assert_eq!(total_payout, expected_total_reward_integer - remainder); - - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; - - assert!(matches!( - era_info.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == validator_1_updated_stake - )); - - assert!(matches!( - era_info.select(VALIDATOR_2.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == validator_2_updated_stake - )); - - assert!(matches!( - era_info.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == validator_3_updated_stake - )); - - assert!(matches!( - era_info.select(DELEGATOR_1.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_updated_stake - )); - - assert!(matches!( - era_info.select(DELEGATOR_2.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_updated_stake - )); - - assert!(matches!( - era_info.select(DELEGATOR_3.clone()).next(), - Some(SeigniorageAllocation::Delegator { delegator_public_key, amount, .. }) - if *delegator_public_key == *DELEGATOR_3 && *amount == delegator_3_updated_stake - )); + assert_eq!( + delegator_1_validator_3_updated_stake, + validator_3_delegator_1_share + ); } #[ignore] #[test] -fn should_distribute_with_multiple_validators_and_shared_delegator() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const VALIDATOR_2_STAKE: u64 = 1_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; +fn should_increase_total_supply_after_distribute() { + const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const VALIDATOR_2_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const VALIDATOR_3_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; - const VALIDATOR_1_REWARD_FACTOR: u64 = 333333333334; - const VALIDATOR_2_REWARD_FACTOR: u64 = 333333333333; - const VALIDATOR_3_REWARD_FACTOR: u64 = 333333333333; - - const DELEGATOR_1_STAKE: u64 = 1_000_000; - - let validator_1_portion = Ratio::new(U512::from(1), U512::from(4)); - let validator_2_portion = Ratio::new(U512::from(1), U512::from(4)); - let validator_3_portion = Ratio::new(U512::from(1), U512::from(4)); - let delegator_1_validator_1_portion = Ratio::new(U512::from(1), U512::from(12)); - let delegator_1_validator_2_portion = Ratio::new(U512::from(1), U512::from(12)); - let delegator_1_validator_3_portion = Ratio::new(U512::from(1), U512::from(12)); - - let remainder = U512::from(2); + const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -3163,208 +2529,76 @@ fn should_distribute_with_multiple_validators_and_shared_delegator() { let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); - let expected_total_reward = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; - let expected_total_reward_integer = expected_total_reward.to_integer(); + let initial_supply = builder.total_supply(protocol_version, None); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); - timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - } - - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR); - tmp - }; - - let distribute_request = ExecuteRequestBuilder::standard( - *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, - runtime_args! { - ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors - }, - ) - .build(); - - builder.exec(distribute_request).commit().expect_success(); - - let validator_1_updated_stake = { - let validator_balance_before = U512::from(VALIDATOR_1_STAKE); - let validator_balance_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_balance_after - validator_balance_before - }; - - let expected_validator_1_payout = (expected_total_reward * validator_1_portion).to_integer(); - assert_eq!(validator_1_updated_stake, expected_validator_1_payout); - - let validator_2_updated_stake = { - let validator_balance_before = U512::from(VALIDATOR_2_STAKE); - let validator_balance_after = *get_validator_bid(&mut builder, VALIDATOR_2.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_balance_after - validator_balance_before - }; - let expected_validator_2_balance = (expected_total_reward * validator_2_portion).to_integer(); - assert_eq!(validator_2_updated_stake, expected_validator_2_balance); - - let validator_3_updated_stake = { - let validator_balance_before = U512::from(VALIDATOR_3_STAKE); - let validator_balance_after = *get_validator_bid(&mut builder, VALIDATOR_3.clone()) - .expect("should have validator bid") - .staked_amount(); - validator_balance_after - validator_balance_before - }; - let expected_validator_3_updated_stake = - (expected_total_reward * validator_3_portion).to_integer(); - assert_eq!( - validator_3_updated_stake, - expected_validator_3_updated_stake - ); + let post_genesis_supply = builder.total_supply(protocol_version, None); - let delegator_1_validator_1_updated_stake = { - let delegator_balance_before = U512::from(DELEGATOR_1_STAKE); - let delegator_balance_after = - get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); - delegator_balance_after - delegator_balance_before - }; - let expected_delegator_1_validator_1_payout = - (expected_total_reward * delegator_1_validator_1_portion).to_integer(); assert_eq!( - delegator_1_validator_1_updated_stake, - expected_delegator_1_validator_1_payout + initial_supply, post_genesis_supply, + "total supply should remain unchanged prior to first distribution" ); - let rounded_amount = U512::one(); - let delegator_1_validator_2_updated_stake = { - let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); - let delegator_stake_after = - get_delegator_staked_amount(&mut builder, VALIDATOR_2.clone(), DELEGATOR_1.clone()); - delegator_stake_after - delegator_stake_before - }; - let expected_delegator_1_validator_2_payout = - (expected_total_reward * delegator_1_validator_2_portion - rounded_amount).to_integer(); - assert_eq!( - delegator_1_validator_2_updated_stake, - expected_delegator_1_validator_2_payout - ); + // run auction + for _ in 0..5 { + builder.run_auction(timestamp_millis, Vec::new()); + timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; + } - let delegator_1_validator_3_updated_stake = { - let delegator_stake_before = U512::from(DELEGATOR_1_STAKE); - let delegator_stake_after = - get_delegator_staked_amount(&mut builder, VALIDATOR_3.clone(), DELEGATOR_1.clone()); - delegator_stake_after - delegator_stake_before - }; - let expected_delegator_1_validator_3_payout = - (expected_total_reward * delegator_1_validator_3_portion - rounded_amount).to_integer(); + let post_auction_supply = builder.total_supply(protocol_version, None); assert_eq!( - delegator_1_validator_3_updated_stake, - expected_delegator_1_validator_3_payout - ); - - let total_payout: U512 = [ - validator_1_updated_stake, - validator_2_updated_stake, - validator_3_updated_stake, - delegator_1_validator_1_updated_stake, - delegator_1_validator_2_updated_stake, - delegator_1_validator_3_updated_stake, - ] - .iter() - .cloned() - .sum(); - - assert_eq!(total_payout, expected_total_reward_integer - remainder); - - let era_info = { - let era = builder.get_era(); - - let era_info_value = builder - .query(None, Key::EraInfo(era), &[]) - .expect("should have value"); - - era_info_value - .as_era_info() - .cloned() - .expect("should be era info") - }; - - assert!(matches!( - era_info.select(VALIDATOR_1.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_1 && *amount == expected_validator_1_payout - )); - - assert!(matches!( - era_info.select(VALIDATOR_2.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_2 && *amount == expected_validator_2_balance - )); - - assert!(matches!( - era_info.select(VALIDATOR_3.clone()).next(), - Some(SeigniorageAllocation::Validator { validator_public_key, amount }) - if *validator_public_key == *VALIDATOR_3 && *amount == expected_validator_3_updated_stake - )); - - let delegator_1_allocations: Vec = - era_info.select(DELEGATOR_1.clone()).cloned().collect(); - - assert_eq!(delegator_1_allocations.len(), 3); - - assert!( - delegator_1_allocations.contains(&SeigniorageAllocation::delegator( - DELEGATOR_1.clone(), - VALIDATOR_1.clone(), - expected_delegator_1_validator_1_payout, - )) - ); - - assert!( - delegator_1_allocations.contains(&SeigniorageAllocation::delegator( - DELEGATOR_1.clone(), - VALIDATOR_2.clone(), - expected_delegator_1_validator_2_payout, - )) + initial_supply, post_auction_supply, + "total supply should remain unchanged regardless of auction" ); - assert!( - delegator_1_allocations.contains(&SeigniorageAllocation::delegator( - DELEGATOR_1.clone(), - VALIDATOR_3.clone(), - expected_delegator_1_validator_3_payout, - )) - ); + let total_payout = U512::from(1_000_000_000_000_u64); + + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); + rewards.insert(VALIDATOR_2.clone(), vec![total_payout]); + rewards.insert(VALIDATOR_3.clone(), vec![total_payout]); + + for _ in 0..5 { + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( + *SYSTEM_ADDR, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, + runtime_args! { + ARG_ENTRY_POINT => METHOD_DISTRIBUTE, + ARG_REWARDS_MAP => rewards.clone() + }, + ) + .build(); + + builder.exec(distribute_request).expect_success().commit(); + + let post_distribute_supply = builder.total_supply(protocol_version, None); + assert!( + initial_supply < post_distribute_supply, + "total supply should increase after distribute ({} >= {})", + initial_supply, + post_distribute_supply + ); + } } #[ignore] #[test] -fn should_increase_total_supply_after_distribute() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const VALIDATOR_2_STAKE: u64 = 1_000_000; - const VALIDATOR_3_STAKE: u64 = 1_000_000; +fn should_not_create_purses_during_distribute() { + const VALIDATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; const DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR / 2; - const VALIDATOR_1_REWARD_FACTOR: u64 = 333333333334; - const VALIDATOR_2_REWARD_FACTOR: u64 = 333333333333; - const VALIDATOR_3_REWARD_FACTOR: u64 = 333333333333; - - const DELEGATOR_1_STAKE: u64 = 1_000_000; + const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; let system_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -3386,26 +2620,6 @@ fn should_increase_total_supply_after_distribute() { ) .build(); - let validator_2_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_2_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - - let validator_3_fund_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_TRANSFER_TO_ACCOUNT, - runtime_args! { - ARG_TARGET => *VALIDATOR_3_ADDR, - ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) - }, - ) - .build(); - let delegator_1_fund_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_TO_ACCOUNT, @@ -3447,28 +2661,6 @@ fn should_increase_total_supply_after_distribute() { ) .build(); - let validator_2_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_2_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_2_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_2.clone(), - }, - ) - .build(); - - let validator_3_add_bid_request = ExecuteRequestBuilder::standard( - *VALIDATOR_3_ADDR, - CONTRACT_ADD_BID, - runtime_args! { - ARG_AMOUNT => U512::from(VALIDATOR_3_STAKE), - ARG_DELEGATION_RATE => DELEGATION_RATE, - ARG_PUBLIC_KEY => VALIDATOR_3.clone(), - }, - ) - .build(); - let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( *DELEGATOR_1_ADDR, CONTRACT_DELEGATE, @@ -3480,24 +2672,24 @@ fn should_increase_total_supply_after_distribute() { ) .build(); - let delegator_1_validator_2_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, + let delegator_2_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, CONTRACT_DELEGATE, runtime_args! { ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), - ARG_VALIDATOR => VALIDATOR_2.clone(), - ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), }, ) .build(); - let delegator_1_validator_3_delegate_request = ExecuteRequestBuilder::standard( - *DELEGATOR_1_ADDR, + let delegator_3_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_3_ADDR, CONTRACT_DELEGATE, runtime_args! { ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), - ARG_VALIDATOR => VALIDATOR_3.clone(), - ARG_DELEGATOR => DELEGATOR_1.clone(), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_3.clone(), }, ) .build(); @@ -3505,34 +2697,31 @@ fn should_increase_total_supply_after_distribute() { let post_genesis_requests = vec![ system_fund_request, validator_1_fund_request, - validator_2_fund_request, - validator_3_fund_request, delegator_1_fund_request, delegator_2_fund_request, delegator_3_fund_request, validator_1_add_bid_request, - validator_2_add_bid_request, - validator_3_add_bid_request, delegator_1_validator_1_delegate_request, - delegator_1_validator_2_delegate_request, - delegator_1_validator_3_delegate_request, + delegator_2_validator_1_delegate_request, + delegator_3_validator_1_delegate_request, ]; let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); for request in post_genesis_requests { builder.exec(request).commit().expect_success(); } - let post_genesis_supply = builder.total_supply(None); + let post_genesis_supply = builder.total_supply(protocol_version, None); assert_eq!( initial_supply, post_genesis_supply, @@ -3545,33 +2734,40 @@ fn should_increase_total_supply_after_distribute() { timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; } - let post_auction_supply = builder.total_supply(None); + let post_auction_supply = builder.total_supply(protocol_version, None); assert_eq!( initial_supply, post_auction_supply, "total supply should remain unchanged regardless of auction" ); - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), VALIDATOR_1_REWARD_FACTOR); - tmp.insert(VALIDATOR_2.clone(), VALIDATOR_2_REWARD_FACTOR); - tmp.insert(VALIDATOR_3.clone(), VALIDATOR_3_REWARD_FACTOR); - tmp - }; + let total_payout = U512::from(1_000_000_000_000_u64); + + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); + let number_of_purses_before_distribute = builder.get_balance_keys().len(); + builder.exec(distribute_request).commit().expect_success(); - let post_distribute_supply = builder.total_supply(None); + let number_of_purses_after_distribute = builder.get_balance_keys().len(); + + assert_eq!( + number_of_purses_after_distribute, + number_of_purses_before_distribute + ); + + let post_distribute_supply = builder.total_supply(protocol_version, None); assert!( initial_supply < post_distribute_supply, "total supply should increase after distribute ({} >= {})", @@ -3583,9 +2779,9 @@ fn should_increase_total_supply_after_distribute() { #[ignore] #[test] fn should_distribute_delegation_rate_full_after_upgrading() { - const VALIDATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_1_STAKE: u64 = 1_000_000; - const DELEGATOR_2_STAKE: u64 = 1_000_000; + const VALIDATOR_1_STAKE: u64 = 1_000_000_000_000; + const DELEGATOR_1_STAKE: u64 = 1_000_000_000_000; + const DELEGATOR_2_STAKE: u64 = 1_000_000_000_000; const VALIDATOR_1_DELEGATION_RATE: DelegationRate = DELEGATION_RATE_DENOMINATOR; @@ -3675,12 +2871,13 @@ fn should_distribute_delegation_rate_full_after_upgrading() { let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + let protocol_version = DEFAULT_PROTOCOL_VERSION; // initial token supply - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); let expected_total_reward_before = *GENESIS_ROUND_SEIGNIORAGE_RATE * initial_supply; let expected_total_reward_integer = expected_total_reward_before.to_integer(); @@ -3689,22 +2886,20 @@ fn should_distribute_delegation_rate_full_after_upgrading() { } for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); + builder.advance_era(); timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![expected_total_reward_integer]); - let distribute_request = ExecuteRequestBuilder::standard( + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, runtime_args! { ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors + ARG_REWARDS_MAP => rewards }, ) .build(); @@ -3713,7 +2908,7 @@ fn should_distribute_delegation_rate_full_after_upgrading() { let validator_1_stake_before = { let validator_stake_before = U512::from(VALIDATOR_1_STAKE); - let validator_stake_after = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); validator_stake_after - validator_stake_before @@ -3748,10 +2943,10 @@ fn should_distribute_delegation_rate_full_after_upgrading() { // // Update round seigniorage rate into 50% of default value // - let new_seigniorage_multiplier = Ratio::new_raw(1, 10); + let new_seigniorage_multiplier = Ratio::new_raw(1, 2); let new_round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE * new_seigniorage_multiplier; - let old_protocol_version = *DEFAULT_PROTOCOL_VERSION; + let old_protocol_version = DEFAULT_PROTOCOL_VERSION; let sem_ver = old_protocol_version.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); @@ -3762,47 +2957,44 @@ fn should_distribute_delegation_rate_full_after_upgrading() { .with_current_protocol_version(old_protocol_version) .with_new_protocol_version(new_protocol_version) .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_validator_minimum_bid_amount(1u64) .with_new_round_seigniorage_rate(new_round_seigniorage_rate) .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); - let initial_supply = builder.total_supply(None); + let initial_supply = builder.total_supply(protocol_version, None); for _ in 0..5 { - builder.run_auction(timestamp_millis, Vec::new()); + builder.advance_era(); timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; } - let reward_factors: BTreeMap = { - let mut tmp = BTreeMap::new(); - tmp.insert(VALIDATOR_1.clone(), BLOCK_REWARD); - tmp - }; - - let distribute_request = ExecuteRequestBuilder::standard( - *SYSTEM_ADDR, - CONTRACT_AUCTION_BIDS, - runtime_args! { - ARG_ENTRY_POINT => METHOD_DISTRIBUTE, - ARG_REWARD_FACTORS => reward_factors - }, - ) - .with_protocol_version(new_protocol_version) - .build(); - let new_round_seigniorage_rate = { let (numer, denom) = new_round_seigniorage_rate.into(); Ratio::new(numer.into(), denom.into()) }; - builder.exec(distribute_request).commit().expect_success(); - let expected_total_reward_after = new_round_seigniorage_rate * initial_supply; + let mut rewards = BTreeMap::new(); + rewards.insert( + VALIDATOR_1.clone(), + vec![expected_total_reward_after.to_integer()], + ); + assert!( + builder + .distribute(None, new_protocol_version, rewards, timestamp_millis) + .is_success(), + "must distribute" + ); + + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), expected_total_reward_integer); + let validator_1_balance_after = { - let validator_staked_amount = *get_validator_bid(&mut builder, VALIDATOR_1.clone()) + let validator_staked_amount = get_validator_bid(&mut builder, VALIDATOR_1.clone()) .expect("should have validator bid") .staked_amount(); validator_staked_amount - validator_1_stake_before - U512::from(VALIDATOR_1_STAKE) @@ -3842,7 +3034,279 @@ fn should_distribute_delegation_rate_full_after_upgrading() { validator_1_balance_after + delegator_1_balance_after + delegator_2_balance_after; assert_eq!(total_payout_after, expected_total_reward_after); - assert!(expected_validator_1_payout_before > expected_validator_1_balance_after); // expected amount after decreasing seigniorage rate is lower than the first amount - assert!(total_payout_before > total_payout_after); // expected total payout after decreasing - // rate is lower than the first payout + // expected amount after reducing the seigniorage rate is lower than the first amount + assert!(expected_validator_1_payout_before > expected_validator_1_balance_after); + assert!(total_payout_before > total_payout_after); +} + +// In this test, we set up a validator and a delegator, then the delegator delegates to the +// validator. We step forward one era (auction delay is 3 eras) and then fully undelegate. We expect +// that there is no bonding purse for this delegator / validator pair. This test should prove that +// if you undelegate before your delegation would receive rewards from a validator, you will no +// longer be delegated, as expected. +#[ignore] +#[test] +fn should_not_restake_after_full_unbond() { + const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const VALIDATOR_1_STAKE: u64 = 1_000_000; + const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // advance past the initial auction delay due to special condition of post-genesis behavior. + + builder.advance_eras_by_default_auction_delay(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + builder + .exec(validator_1_fund_request) + .expect_success() + .commit(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + builder + .exec(delegator_1_fund_request) + .expect_success() + .commit(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + }, + ) + .build(); + + builder + .exec(validator_1_add_bid_request) + .expect_success() + .commit(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder + .exec(delegator_1_validator_1_delegate_request) + .expect_success() + .commit(); + + builder.advance_era(); + + let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + + assert!(delegator.is_some()); + assert_eq!( + delegator.unwrap().staked_amount(), + U512::from(DELEGATOR_1_STAKE) + ); + + builder.advance_era(); + + // undelegate in the era right after we delegated. + undelegate( + &mut builder, + *DELEGATOR_1_ADDR, + DELEGATOR_1.clone(), + VALIDATOR_1.clone(), + U512::from(DELEGATOR_1_STAKE), + ); + let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + assert!(delegator.is_none()); + + let withdraws = builder.get_unbonds(); + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()); + let unbond = withdraws.get(&unbond_kind).expect("should have entry"); + let delegator_unbond_amount = unbond[0].eras().first().expect("should be era").amount(); + + assert_eq!( + *delegator_unbond_amount, + U512::from(DELEGATOR_1_STAKE), + "unbond purse amount should match staked amount" + ); + + // step until validator receives rewards. + builder.advance_eras_by(2); + + // validator receives rewards after this step. + builder.advance_era(); + + // Delegator should not remain delegated even though they were eligible for rewards in the + // second era. + let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + assert!(delegator.is_none()); +} + +// In this test, we set up a delegator and a validator, the delegator delegates to the validator. +// We then undelegate during the first era where the delegator would be eligible to receive rewards +// for their delegation and expect that there is no bonding purse for the delegator / validator pair +// and that the delegator does not remain delegated to the validator as expected. +#[ignore] +#[test] +fn delegator_full_unbond_during_first_reward_era() { + const DELEGATOR_1_STAKE: u64 = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + const VALIDATOR_1_STAKE: u64 = 1_000_000; + const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 0; + + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // advance past the initial auction delay due to special condition of post-genesis behavior. + builder.advance_eras_by_default_auction_delay(); + + let validator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + builder + .exec(validator_1_fund_request) + .expect_success() + .commit(); + + let delegator_1_fund_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(TRANSFER_AMOUNT) + }, + ) + .build(); + + builder + .exec(delegator_1_fund_request) + .expect_success() + .commit(); + + let validator_1_add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_AMOUNT => U512::from(VALIDATOR_1_STAKE), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + }, + ) + .build(); + + builder + .exec(validator_1_add_bid_request) + .expect_success() + .commit(); + + let delegator_1_validator_1_delegate_request = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DELEGATOR_1_STAKE), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder + .exec(delegator_1_validator_1_delegate_request) + .expect_success() + .commit(); + + // first step after funding, adding bid and delegating. + builder.advance_era(); + + let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()) + .expect("should be delegator"); + + assert_eq!(delegator.staked_amount(), U512::from(DELEGATOR_1_STAKE)); + + // step until validator receives rewards. + builder.advance_eras_by(3); + + // assert that the validator should indeed receive rewards and that + // the delegator is scheduled to receive rewards this era. + + let auction_hash = builder.get_auction_contract_hash(); + let seigniorage_snapshot: SeigniorageRecipientsSnapshotV2 = builder.get_value( + EntityAddr::System(auction_hash.value()), + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + ); + + let validator_seigniorage = seigniorage_snapshot + .get(&builder.get_era()) + .expect("should be seigniorage for era") + .get(&VALIDATOR_1) + .expect("should be validator seigniorage for era"); + + let delegator_kind = DelegatorKind::PublicKey(DELEGATOR_1.clone()); + let delegator_seigniorage = validator_seigniorage + .delegator_stake() + .get(&delegator_kind) + .expect("should be delegator seigniorage"); + assert_eq!(*delegator_seigniorage, U512::from(DELEGATOR_1_STAKE)); + + // undelegate in the first era that the delegator will receive rewards. + undelegate( + &mut builder, + *DELEGATOR_1_ADDR, + DELEGATOR_1.clone(), + VALIDATOR_1.clone(), + U512::from(DELEGATOR_1_STAKE), + ); + let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + assert!(delegator.is_none()); + + let unbond_kind = UnbondKind::DelegatedPublicKey(DELEGATOR_1.clone()); + let withdraws = builder.get_unbonds(); + let unbond = withdraws + .get(&unbond_kind) + .expect("should have validator entry"); + let delegator_unbond_amount = unbond[0].eras().first().expect("should have era").amount(); + + assert_eq!( + *delegator_unbond_amount, + U512::from(DELEGATOR_1_STAKE), + "unbond purse amount should match staked amount" + ); + + // validator receives rewards after this step. + builder.advance_era(); + + // Delegator's stake should remain at zero delegated even though they were eligible for rewards + // in the second era. + let delegator = get_delegator_bid(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + assert!(delegator.is_none()); } diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/mod.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/mod.rs index 5a142aa7d6..0554dce7f1 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction/mod.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/mod.rs @@ -1,2 +1,655 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_PROPOSER_PUBLIC_KEY, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{ + runtime_args, + system::auction::{ + BidAddr, BidKind, BidsExt, DelegationRate, DelegatorBid, DelegatorKind, EraInfo, + ValidatorBid, ARG_AMOUNT, ARG_NEW_VALIDATOR, ARG_VALIDATOR, + }, + GenesisAccount, GenesisValidator, Key, Motes, PublicKey, SecretKey, StoredValue, U512, +}; +use num_traits::Zero; + +const STORED_STAKING_CONTRACT_NAME: &str = "staking_stored.wasm"; + mod bids; mod distribute; +mod reservations; + +fn get_validator_bid( + builder: &mut LmdbWasmTestBuilder, + validator_public_key: PublicKey, +) -> Option { + let bids = builder.get_bids(); + bids.validator_bid(&validator_public_key) +} + +pub fn get_delegator_staked_amount( + builder: &mut LmdbWasmTestBuilder, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, +) -> U512 { + let bids = builder.get_bids(); + + let delegator = bids + .delegator_by_kind(&validator_public_key, &DelegatorKind::PublicKey(delegator_public_key.clone())) + .expect("bid should exist for validator-{validator_public_key}, delegator-{delegator_public_key}"); + + delegator.staked_amount() +} + +pub fn get_era_info(builder: &mut LmdbWasmTestBuilder) -> EraInfo { + let era_info_value = builder + .query(None, Key::EraSummary, &[]) + .expect("should have value"); + + era_info_value + .as_era_info() + .cloned() + .expect("should be era info") +} + +#[ignore] +#[test] +fn should_support_contract_staking() { + const ARG_ACTION: &str = "action"; + let timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + let purse_name = "staking_purse".to_string(); + let contract_name = "staking".to_string(); + let entry_point_name = "run".to_string(); + let stake = "STAKE".to_string(); + let unstake = "UNSTAKE".to_string(); + let restake = "RESTAKE".to_string(); + let get_staked_amount = "STAKED_AMOUNT".to_string(); + let account = *DEFAULT_ACCOUNT_ADDR; + let seed_amount = U512::from(10_000_000_000_000_000_u64); + let delegate_amount = U512::from(5_000_000_000_000_000_u64); + let validator_pk = &*DEFAULT_PROPOSER_PUBLIC_KEY; + let other_validator_pk = { + let secret_key = SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + }; + + let mut builder = LmdbWasmTestBuilder::default(); + let mut genesis_request = LOCAL_GENESIS_REQUEST.clone(); + genesis_request.set_enable_entity(false); + + genesis_request.push_genesis_validator( + validator_pk, + GenesisValidator::new( + Motes::new(10_000_000_000_000_000_u64), + DelegationRate::zero(), + ), + ); + genesis_request.push_genesis_account(GenesisAccount::Account { + public_key: other_validator_pk.clone(), + validator: Some(GenesisValidator::new( + Motes::new(1_000_000_000_000_000_u64), + DelegationRate::zero(), + )), + balance: Motes::new(10_000_000_000_000_000_u64), + }); + builder.run_genesis(genesis_request); + + let auction_delay = builder.get_unbonding_delay(); + let unbond_delay = builder.get_unbonding_delay(); + + for _ in 0..=auction_delay { + // crank era + builder.run_auction(timestamp_millis, vec![]); + } + + let account_main_purse = builder + .get_entity_with_named_keys_by_account_hash(account) + .expect("should have account") + .main_purse(); + let starting_account_balance = builder.get_purse_balance(account_main_purse); + + builder + .exec( + ExecuteRequestBuilder::standard( + account, + STORED_STAKING_CONTRACT_NAME, + runtime_args! { + ARG_AMOUNT => seed_amount + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let default_account = builder.get_account(account).expect("should have account"); + let named_keys = default_account.named_keys(); + + let contract_key = named_keys + .get(&contract_name) + .expect("contract_name key should exist"); + + let stored_contract = builder + .query(None, *contract_key, &[]) + .expect("should have stored value at contract key"); + + let contract = stored_contract + .as_contract() + .expect("stored value should be contract"); + + let contract_named_keys = contract.named_keys(); + + let contract_purse = contract_named_keys + .get(&purse_name) + .expect("purse_name key should exist") + .into_uref() + .expect("should be a uref"); + + let post_install_account_balance = builder.get_purse_balance(account_main_purse); + assert_eq!( + post_install_account_balance, + starting_account_balance.saturating_sub(seed_amount), + "post install should be reduced due to seeding contract purse" + ); + + let pre_delegation_balance = builder.get_purse_balance(contract_purse); + assert_eq!(pre_delegation_balance, seed_amount); + + // check delegated amount from contract + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => get_staked_amount.clone(), + ARG_VALIDATOR => validator_pk.clone(), + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let result = builder.get_last_exec_result().unwrap(); + let staked_amount: U512 = result.ret().unwrap().to_owned().into_t().unwrap(); + assert_eq!( + staked_amount, + U512::zero(), + "staked amount should be zero prior to staking" + ); + + // stake from contract + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => stake, + ARG_AMOUNT => delegate_amount, + ARG_VALIDATOR => validator_pk.clone(), + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let post_delegation_balance = builder.get_purse_balance(contract_purse); + assert_eq!( + post_delegation_balance, + pre_delegation_balance.saturating_sub(delegate_amount), + "contract purse balance should be reduced by staked amount" + ); + + let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse { + validator: validator_pk.to_account_hash(), + delegator: contract_purse.addr(), + }); + + let stored_value = builder + .query(None, delegation_key, &[]) + .expect("should have delegation bid"); + + assert!( + matches!(stored_value, StoredValue::BidKind(BidKind::Delegator(_))), + "expected delegator bid" + ); + + if let StoredValue::BidKind(BidKind::Delegator(delegator)) = stored_value { + assert_eq!( + delegator.staked_amount(), + delegate_amount, + "staked amount should match delegation amount" + ); + } + + // check delegated amount from contract + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => get_staked_amount.clone(), + ARG_VALIDATOR => validator_pk.clone(), + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let result = builder.get_last_exec_result().unwrap(); + let staked_amount: U512 = result.ret().unwrap().to_owned().into_t().unwrap(); + assert_eq!( + staked_amount, delegate_amount, + "staked amount should match delegation amount" + ); + + for _ in 0..=auction_delay { + // crank era + builder.run_auction(timestamp_millis, vec![]); + } + + let increased_delegate_amount = if let StoredValue::BidKind(BidKind::Delegator(delegator)) = + builder + .query(None, delegation_key, &[]) + .expect("should have delegation bid") + { + delegator.staked_amount() + } else { + U512::zero() + }; + + // restake from contract + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => restake, + ARG_AMOUNT => increased_delegate_amount, + ARG_VALIDATOR => validator_pk.clone(), + ARG_NEW_VALIDATOR => other_validator_pk.clone() + }, + ) + .build(), + ) + .commit() + .expect_success(); + + assert!( + builder.query(None, delegation_key, &[]).is_err(), + "delegation record should be removed" + ); + + assert_eq!( + post_delegation_balance, + builder.get_purse_balance(contract_purse), + "at this point, unstaked token has not been returned" + ); + + for _ in 0..=unbond_delay { + // crank era + builder.run_auction(timestamp_millis, vec![]); + } + + let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse { + validator: other_validator_pk.to_account_hash(), + delegator: contract_purse.addr(), + }); + + let stored_value = builder + .query(None, delegation_key, &[]) + .expect("should have delegation bid"); + + assert!( + matches!(stored_value, StoredValue::BidKind(BidKind::Delegator(_))), + "expected delegator bid" + ); + + if let StoredValue::BidKind(BidKind::Delegator(delegator)) = stored_value { + assert_eq!( + delegator.staked_amount(), + delegate_amount, + "staked amount should match delegation amount" + ); + } + + // unstake from contract + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => unstake, + ARG_AMOUNT => increased_delegate_amount, + ARG_VALIDATOR => other_validator_pk.clone(), + }, + ) + .build(), + ) + .commit() + .expect_success(); + + assert!( + builder.query(None, delegation_key, &[]).is_err(), + "delegation record should be removed" + ); + + assert_eq!( + post_delegation_balance, + builder.get_purse_balance(contract_purse), + "at this point, unstaked token has not been returned" + ); + + let unbond_key = Key::BidAddr(BidAddr::UnbondPurse { + validator: other_validator_pk.to_account_hash(), + unbonder: contract_purse.addr(), + }); + let unbonded_amount = if let StoredValue::BidKind(BidKind::Unbond(unbond)) = builder + .query(None, unbond_key, &[]) + .expect("should have unbond") + { + let unbond_era = unbond.eras().first().expect("should have an era entry"); + assert_eq!( + *unbond_era.amount(), + increased_delegate_amount, + "unbonded amount should match expectations" + ); + *unbond_era.amount() + } else { + U512::zero() + }; + + for _ in 0..=unbond_delay { + // crank era + builder.run_auction(timestamp_millis, vec![]); + } + + assert_eq!( + delegate_amount.saturating_add(unbonded_amount), + builder.get_purse_balance(contract_purse), + "unbonded amount should be available to contract staking purse" + ); +} + +#[ignore] +#[test] +fn should_not_enforce_max_spending_when_main_purse_not_in_use() { + const ARG_ACTION: &str = "action"; + let timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + let purse_name = "staking_purse".to_string(); + let contract_name = "staking".to_string(); + let entry_point_name = "run".to_string(); + let stake_all = "STAKE_ALL".to_string(); + let account = *DEFAULT_ACCOUNT_ADDR; + let seed_amount = U512::from(10_000_000_000_000_000_u64); + let validator_pk = &*DEFAULT_PROPOSER_PUBLIC_KEY; + let other_validator_pk = { + let secret_key = SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + }; + + let mut builder = LmdbWasmTestBuilder::default(); + let mut genesis_request = LOCAL_GENESIS_REQUEST.clone(); + genesis_request.set_enable_entity(false); + + genesis_request.push_genesis_validator( + validator_pk, + GenesisValidator::new( + Motes::new(10_000_000_000_000_000_u64), + DelegationRate::zero(), + ), + ); + genesis_request.push_genesis_account(GenesisAccount::Account { + public_key: other_validator_pk.clone(), + validator: Some(GenesisValidator::new( + Motes::new(1_000_000_000_000_000_u64), + DelegationRate::zero(), + )), + balance: Motes::new(10_000_000_000_000_000_u64), + }); + builder.run_genesis(genesis_request); + + let auction_delay = builder.get_unbonding_delay(); + + for _ in 0..=auction_delay { + // crank era + builder.run_auction(timestamp_millis, vec![]); + } + + let account_main_purse = builder + .get_entity_with_named_keys_by_account_hash(account) + .expect("should have account") + .main_purse(); + let starting_account_balance = builder.get_purse_balance(account_main_purse); + + builder + .exec( + ExecuteRequestBuilder::standard( + account, + STORED_STAKING_CONTRACT_NAME, + runtime_args! { + ARG_AMOUNT => seed_amount + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let default_account = builder.get_account(account).expect("should have account"); + let named_keys = default_account.named_keys(); + + let contract_key = named_keys + .get(&contract_name) + .expect("contract_name key should exist"); + + let stored_contract = builder + .query(None, *contract_key, &[]) + .expect("should have stored value at contract key"); + + let contract = stored_contract + .as_contract() + .expect("stored value should be contract"); + + let contract_named_keys = contract.named_keys(); + + let contract_purse = contract_named_keys + .get(&purse_name) + .expect("purse_name key should exist") + .into_uref() + .expect("should be a uref"); + + let post_install_account_balance = builder.get_purse_balance(account_main_purse); + assert_eq!( + post_install_account_balance, + starting_account_balance.saturating_sub(seed_amount), + "post install should be reduced due to seeding contract purse" + ); + + let pre_delegation_balance = builder.get_purse_balance(contract_purse); + assert_eq!(pre_delegation_balance, seed_amount); + + // stake from contract + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => stake_all, + ARG_VALIDATOR => validator_pk.clone(), + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let post_delegation_balance = builder.get_purse_balance(contract_purse); + assert_eq!( + post_delegation_balance, + U512::zero(), + "contract purse balance should be reduced by staked amount" + ); + + let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse { + validator: validator_pk.to_account_hash(), + delegator: contract_purse.addr(), + }); + + let stored_value = builder + .query(None, delegation_key, &[]) + .expect("should have delegation bid"); + + assert!( + matches!(stored_value, StoredValue::BidKind(BidKind::Delegator(_))), + "expected delegator bid" + ); + + if let StoredValue::BidKind(BidKind::Delegator(delegator)) = stored_value { + assert_eq!( + delegator.staked_amount(), + pre_delegation_balance, + "staked amount should match delegation amount" + ); + } + + for _ in 0..=auction_delay { + // crank era + builder.run_auction(timestamp_millis, vec![]); + } + + builder + .query(None, delegation_key, &[]) + .expect("should have delegation bid"); +} + +#[ignore] +#[test] +fn should_read_bid_with_vesting_schedule_populated() { + const ARG_ACTION: &str = "action"; + let purse_name = "staking_purse".to_string(); + let contract_name = "staking".to_string(); + let entry_point_name = "run".to_string(); + let get_staked_amount = "STAKED_AMOUNT".to_string(); + let account = *DEFAULT_ACCOUNT_ADDR; + let seed_amount = U512::from(10_000_000_000_000_000_u64); + let validator_pk = &*DEFAULT_PROPOSER_PUBLIC_KEY; + + let mut builder = LmdbWasmTestBuilder::default(); + let mut genesis_request = LOCAL_GENESIS_REQUEST.clone(); + genesis_request.set_enable_entity(false); + genesis_request.push_genesis_validator( + validator_pk, + GenesisValidator::new( + Motes::new(10_000_000_000_000_000_u64), + DelegationRate::zero(), + ), + ); + builder.run_genesis(genesis_request); + + builder + .exec( + ExecuteRequestBuilder::standard( + account, + STORED_STAKING_CONTRACT_NAME, + runtime_args! { + ARG_AMOUNT => seed_amount + }, + ) + .build(), + ) + .commit() + .expect_success(); + + let default_account = builder.get_account(account).expect("should have account"); + let named_keys = default_account.named_keys(); + + let contract_key = named_keys + .get(&contract_name) + .expect("contract_name key should exist"); + + let stored_contract = builder + .query(None, *contract_key, &[]) + .expect("should have stored value at contract key"); + + let contract = stored_contract + .as_contract() + .expect("stored value should be contract"); + + let contract_named_keys = contract.named_keys(); + + let contract_purse = contract_named_keys + .get(&purse_name) + .expect("purse_name key should exist") + .into_uref() + .expect("should be a uref"); + + // Create a mock bid with a vesting schedule initialized. + // This is only there to make sure size constraints are not a problem + // when trying to read this relatively large structure as a guest. + let mut mock_bid = DelegatorBid::locked( + DelegatorKind::Purse(contract_purse.addr()), + U512::from(100_000_000), + contract_purse, + validator_pk.clone(), + 0, + ); + + mock_bid + .vesting_schedule_mut() + .unwrap() + .initialize_with_schedule(U512::from(100_000_000), 0); + + let delegation_key = Key::BidAddr(BidAddr::DelegatedPurse { + validator: validator_pk.to_account_hash(), + delegator: contract_purse.addr(), + }); + + builder.write_data_and_commit( + [( + delegation_key, + StoredValue::BidKind(BidKind::Delegator(Box::new(mock_bid))), + )] + .iter() + .cloned(), + ); + + builder + .query(None, delegation_key, &[]) + .expect("should have delegation bid") + .as_bid_kind() + .expect("should be bidkind") + .vesting_schedule() + .expect("should have vesting schedule") + .locked_amounts() + .expect("should have locked amounts"); + + builder + .exec( + ExecuteRequestBuilder::contract_call_by_name( + account, + &contract_name, + &entry_point_name, + runtime_args! { + ARG_ACTION => get_staked_amount.clone(), + ARG_VALIDATOR => validator_pk.clone(), + }, + ) + .build(), + ) + .commit() + .expect_success(); +} diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction/reservations.rs b/execution_engine_testing/tests/src/test/system_contracts/auction/reservations.rs new file mode 100644 index 0000000000..34e4407d2a --- /dev/null +++ b/execution_engine_testing/tests/src/test/system_contracts/auction/reservations.rs @@ -0,0 +1,981 @@ +use num_rational::Ratio; +use num_traits::{CheckedMul, CheckedSub}; +use once_cell::sync::Lazy; +use std::collections::BTreeMap; +use tempfile::TempDir; + +use casper_engine_test_support::{ + ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, StepRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, +}; +use casper_execution_engine::{ + engine_state::{engine_config::DEFAULT_MINIMUM_DELEGATION_AMOUNT, Error}, + execution::ExecError, +}; + +use crate::test::system_contracts::auction::{ + get_delegator_staked_amount, get_era_info, get_validator_bid, +}; +use casper_types::{ + self, + account::AccountHash, + api_error::ApiError, + runtime_args, + system::auction::{ + BidsExt, DelegationRate, DelegatorKind, Error as AuctionError, Reservation, + SeigniorageAllocation, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_DELEGATORS, + ARG_ENTRY_POINT, ARG_PUBLIC_KEY, ARG_RESERVATIONS, ARG_RESERVED_SLOTS, ARG_REWARDS_MAP, + ARG_VALIDATOR, DELEGATION_RATE_DENOMINATOR, METHOD_DISTRIBUTE, + }, + ProtocolVersion, PublicKey, SecretKey, U512, +}; + +const ARG_TARGET: &str = "target"; + +const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; +const CONTRACT_ADD_BID: &str = "add_bid.wasm"; +const CONTRACT_DELEGATE: &str = "delegate.wasm"; +const CONTRACT_UNDELEGATE: &str = "undelegate.wasm"; +const CONTRACT_ADD_RESERVATIONS: &str = "add_reservations.wasm"; +const CONTRACT_CANCEL_RESERVATIONS: &str = "cancel_reservations.wasm"; + +const ADD_BID_AMOUNT_1: u64 = 1_000_000_000_000; +const ADD_BID_RESERVED_SLOTS: u32 = 1; + +static VALIDATOR_1: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static DELEGATOR_1: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([205; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static DELEGATOR_2: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([207; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static DELEGATOR_3: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([209; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); +static DELEGATOR_4: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([211; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) +}); + +static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); +static DELEGATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_1)); +static DELEGATOR_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_2)); +static DELEGATOR_3_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_3)); +static DELEGATOR_4_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*DELEGATOR_4)); + +const VALIDATOR_1_DELEGATION_RATE: DelegationRate = 10; +const VALIDATOR_1_RESERVATION_DELEGATION_RATE: DelegationRate = 20; + +/// Fund validator and delegators accounts. +fn setup_accounts(max_delegators_per_validator: u32) -> LmdbWasmTestBuilder { + let chainspec = + ChainspecConfig::default().with_max_delegators_per_validator(max_delegators_per_validator); + + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new_with_config(data_dir.path(), chainspec); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let transfer_to_validator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *VALIDATOR_1_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_1_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_2 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_2_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_3 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_3_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + let transfer_to_delegator_4 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_ACCOUNT, + runtime_args! { + ARG_TARGET => *DELEGATOR_4_ADDR, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE) + }, + ) + .build(); + + let post_genesis_request = vec![ + transfer_to_validator_1, + transfer_to_delegator_1, + transfer_to_delegator_2, + transfer_to_delegator_3, + transfer_to_delegator_4, + ]; + + for request in post_genesis_request { + builder.exec(request).expect_success().commit(); + } + + builder +} + +/// Submit validator bid for `VALIDATOR_1_ADDR` and advance eras +/// until they are elected as active validator. +fn setup_validator_bid(builder: &mut LmdbWasmTestBuilder, reserved_slots: u32) { + let add_validator_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_RESERVED_SLOTS => reserved_slots, + }, + ) + .build(); + + builder + .exec(add_validator_request) + .expect_success() + .commit(); + + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step request" + ); + } +} + +#[ignore] +#[test] +fn should_enforce_max_delegators_per_validator_with_reserved_slots() { + let mut builder = setup_accounts(3); + + setup_validator_bid(&mut builder, ADD_BID_RESERVED_SLOTS); + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + let delegation_request_2 = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let delegation_requests = [delegation_request_1, delegation_request_2]; + + for request in delegation_requests { + builder.exec(request).expect_success().commit(); + } + + // Delegator 3 is not on reservation list and validator is at delegator limit + // therefore delegation request should fail + let delegation_request_3 = ExecuteRequestBuilder::standard( + *DELEGATOR_3_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_3.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_3).expect_failure(); + let error = builder.get_error().expect("should get error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8)); + + // Once we put Delegator 3 on reserved list the delegation request should succeed + let reservation = Reservation::new( + VALIDATOR_1.clone(), + DelegatorKind::PublicKey(DELEGATOR_3.clone()), + 0, + ); + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![reservation], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + + let delegation_request_4 = ExecuteRequestBuilder::standard( + *DELEGATOR_3_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_3.clone(), + }, + ) + .build(); + builder.exec(delegation_request_4).expect_success().commit(); + + // Delegator 4 not on reserved list and validator at capacity + // therefore delegation request should fail + let delegation_request_5 = ExecuteRequestBuilder::standard( + *DELEGATOR_4_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_4.clone(), + }, + ) + .build(); + builder.exec(delegation_request_5).expect_failure(); + + // Now we undelegate Delegator 3 and cancel his reservation, + // then add reservation for Delegator 4. Then delegation request for + // Delegator 4 should succeed + let undelegation_request = ExecuteRequestBuilder::standard( + *DELEGATOR_3_ADDR, + CONTRACT_UNDELEGATE, + runtime_args! { + ARG_AMOUNT => U512::MAX, + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_3.clone(), + }, + ) + .build(); + builder.exec(undelegation_request).expect_success().commit(); + + let cancellation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_CANCEL_RESERVATIONS, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_3.clone())], + }, + ) + .build(); + builder.exec(cancellation_request).expect_success().commit(); + + let reservation = Reservation::new( + VALIDATOR_1.clone(), + DelegatorKind::PublicKey(DELEGATOR_4.clone()), + 0, + ); + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![reservation], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + + let delegation_request_6 = ExecuteRequestBuilder::standard( + *DELEGATOR_4_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_4.clone(), + }, + ) + .build(); + builder.exec(delegation_request_6).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_allow_validator_to_reserve_all_delegator_slots() { + let max_delegators_per_validator = 2; + + let mut builder = setup_accounts(max_delegators_per_validator); + + setup_validator_bid(&mut builder, 0); + + // cannot reserve more slots than maximum delegator number + let add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_RESERVED_SLOTS => max_delegators_per_validator + 1, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededReservationSlotsLimit as u8)); + + // can reserve all slots + let add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_RESERVED_SLOTS => max_delegators_per_validator, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_not_allow_validator_to_reserve_more_slots_than_free_delegator_slots() { + let max_delegators_per_validator = 2; + + let mut builder = setup_accounts(max_delegators_per_validator); + + setup_validator_bid(&mut builder, 0); + + let delegation_request_1 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_1).expect_success().commit(); + + // cannot reserve more slots than number of free delegator slots + let add_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_RESERVED_SLOTS => max_delegators_per_validator, + }, + ) + .build(); + + builder.exec(add_bid_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededReservationSlotsLimit as u8)); +} + +#[ignore] +#[test] +fn should_not_allow_validator_to_reduce_number_of_reserved_spots_if_they_are_occupied() { + let mut builder = setup_accounts(3); + + let reserved_slots = 2; + setup_validator_bid(&mut builder, reserved_slots); + + // add reservations for Delegators 1 and 2 + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()) , 0), + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()) , 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1) + .expect("should have reservations"); + assert_eq!(reservations.len(), 2); + + // cannot reduce number of reserved slots because + // there are reservations for all of them + let add_validator_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_RESERVED_SLOTS => reserved_slots - 1, + }, + ) + .build(); + + builder.exec(add_validator_bid_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ReservationSlotsCountTooSmall as u8)); + + // remove a reservation for Delegator 2 and + // reduce number of reserved spots + let cancellation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_CANCEL_RESERVATIONS, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_2.clone())], + }, + ) + .build(); + builder.exec(cancellation_request).expect_success().commit(); + + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1) + .expect("should have reservations"); + assert_eq!(reservations.len(), 1); + + let add_validator_bid_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_BID, + runtime_args! { + ARG_PUBLIC_KEY => VALIDATOR_1.clone(), + ARG_AMOUNT => U512::from(ADD_BID_AMOUNT_1), + ARG_DELEGATION_RATE => VALIDATOR_1_DELEGATION_RATE, + ARG_RESERVED_SLOTS => reserved_slots - 1, + }, + ) + .build(); + + builder + .exec(add_validator_bid_request) + .expect_success() + .commit(); + + // cannot add a reservation for Delegator 2 back + // because number of slots is reduced + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededReservationsLimit as u8)); +} + +#[ignore] +#[test] +fn should_not_allow_validator_to_remove_active_reservation_if_there_are_no_free_delegator_slots() { + let mut builder = setup_accounts(2); + + let reserved_slots = 1; + setup_validator_bid(&mut builder, reserved_slots); + + // add delegation for Delegator 1 + let delegation_request_1 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_1).expect_success().commit(); + + // cannot add delegation for Delegator 2 + let delegation_request_2 = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_2).expect_failure(); + let error = builder.get_error().expect("should get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8)); + + // add reservation for Delegator 2 + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + + // add delegation for Delegator 2 + let delegation_request_2 = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + builder.exec(delegation_request_2).expect_success().commit(); + + // cannot cancel reservation for Delegator 2 + // because there are no free public slots for delegators + let cancellation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_CANCEL_RESERVATIONS, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_2.clone())], + }, + ) + .build(); + builder.exec(cancellation_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededDelegatorSizeLimit as u8)); +} + +#[ignore] +#[test] +fn should_handle_reserved_slots() { + let mut builder = setup_accounts(4); + + let reserved_slots = 3; + setup_validator_bid(&mut builder, reserved_slots); + + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1); + assert!(reservations.is_none()); + + // add reservations for Delegators 1 and 2 + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), 0), + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1) + .expect("should have reservations"); + assert_eq!(reservations.len(), 2); + + // try to cancel reservation for Delegators 1,2 and 3 + // this fails because reservation for Delegator 3 doesn't exist yet + let cancellation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_CANCEL_RESERVATIONS, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATORS => vec![DELEGATOR_1.clone(), DELEGATOR_2.clone(), DELEGATOR_3.clone()], + }, + ) + .build(); + builder.exec(cancellation_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ReservationNotFound as u8)); + + // add reservation for Delegator 2 and 3 + // reservation for Delegator 2 already exists, but it shouldn't cause an error + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0), + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_3.clone()), 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1) + .expect("should have reservations"); + assert_eq!(reservations.len(), 3); + + // try to add reservation for Delegator 4 + // this fails because the reservation list is already full + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_4.clone()), 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::ExceededReservationsLimit as u8)); + + // cancel all reservations + let cancellation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_CANCEL_RESERVATIONS, + runtime_args! { + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATORS => vec![DelegatorKind::PublicKey(DELEGATOR_1.clone()), DelegatorKind::PublicKey(DELEGATOR_2.clone()), DelegatorKind::PublicKey(DELEGATOR_3.clone())], + }, + ) + .build(); + builder.exec(cancellation_request).expect_success().commit(); + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1); + assert!(reservations.is_none()); +} + +#[ignore] +#[test] +fn should_update_reservation_delegation_rate() { + let mut builder = setup_accounts(4); + + let reserved_slots = 3; + setup_validator_bid(&mut builder, reserved_slots); + + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1); + assert!(reservations.is_none()); + + // add reservations for Delegators 1 and 2 + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), 0), + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_2.clone()), 0), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1) + .expect("should have reservations"); + assert_eq!(reservations.len(), 2); + + // try to change delegation rate for Delegator 1 + // this fails because delegation rate value is invalid + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), DELEGATION_RATE_DENOMINATOR + 1), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_failure(); + let error = builder.get_error().expect("should get error"); + assert!(matches!( + error, + Error::Exec(ExecError::Revert(ApiError::AuctionError(auction_error))) + if auction_error == AuctionError::DelegationRateTooLarge as u8)); + + // change delegation rate for Delegator 1 + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), 10), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + let reservations = builder + .get_bids() + .reservations_by_validator_public_key(&VALIDATOR_1) + .expect("should have reservations"); + assert_eq!(reservations.len(), 2); + + let delegator_1_reservation = reservations + .iter() + .find(|r| *r.delegator_kind() == DelegatorKind::PublicKey(DELEGATOR_1.clone())) + .unwrap(); + assert_eq!(*delegator_1_reservation.delegation_rate(), 10); +} + +#[ignore] +#[test] +fn should_distribute_rewards_with_reserved_slots() { + let validator_stake = U512::from(ADD_BID_AMOUNT_1); + let delegator_1_stake = U512::from(1_000_000_000_000u64); + let delegator_2_stake = U512::from(1_000_000_000_000u64); + let total_delegator_stake = delegator_1_stake + delegator_2_stake; + let total_stake = validator_stake + total_delegator_stake; + + let mut builder = setup_accounts(3); + + setup_validator_bid(&mut builder, ADD_BID_RESERVED_SLOTS); + + // add reservation for Delegator 1 + let reservation_request = ExecuteRequestBuilder::standard( + *VALIDATOR_1_ADDR, + CONTRACT_ADD_RESERVATIONS, + runtime_args! { + ARG_RESERVATIONS => vec![ + Reservation::new(VALIDATOR_1.clone(), DelegatorKind::PublicKey(DELEGATOR_1.clone()), VALIDATOR_1_RESERVATION_DELEGATION_RATE), + ], + }, + ) + .build(); + builder.exec(reservation_request).expect_success().commit(); + + // add delegator bids for Delegator 1 and 2 + let delegation_request_1 = ExecuteRequestBuilder::standard( + *DELEGATOR_1_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => delegator_1_stake, + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_1.clone(), + }, + ) + .build(); + let delegation_request_2 = ExecuteRequestBuilder::standard( + *DELEGATOR_2_ADDR, + CONTRACT_DELEGATE, + runtime_args! { + ARG_AMOUNT => delegator_2_stake, + ARG_VALIDATOR => VALIDATOR_1.clone(), + ARG_DELEGATOR => DELEGATOR_2.clone(), + }, + ) + .build(); + + let delegation_requests = [delegation_request_1, delegation_request_2]; + + for request in delegation_requests { + builder.exec(request).expect_success().commit(); + } + + // calculate expected rewards + let protocol_version = DEFAULT_PROTOCOL_VERSION; + let initial_supply = builder.total_supply(protocol_version, None); + let total_payout = builder.base_round_reward(None, protocol_version); + let rate = builder.round_seigniorage_rate(None, protocol_version); + let expected_total_reward = rate * initial_supply; + let expected_total_reward_integer = expected_total_reward.to_integer(); + assert_eq!(total_payout, expected_total_reward_integer); + + // advance eras + for _ in 0..=builder.get_auction_delay() { + let step_request = StepRequestBuilder::new() + .with_parent_state_hash(builder.get_post_state_hash()) + .with_protocol_version(ProtocolVersion::V1_0_0) + .with_next_era_id(builder.get_era().successor()) + .with_run_auction(true) + .build(); + + assert!( + builder.step(step_request).is_success(), + "must execute step successfully" + ); + } + + let mut rewards = BTreeMap::new(); + rewards.insert(VALIDATOR_1.clone(), vec![total_payout]); + + let distribute_request = ExecuteRequestBuilder::contract_call_by_hash( + *SYSTEM_ADDR, + builder.get_auction_contract_hash(), + METHOD_DISTRIBUTE, + runtime_args! { + ARG_ENTRY_POINT => METHOD_DISTRIBUTE, + ARG_REWARDS_MAP => rewards + }, + ) + .build(); + + builder.exec(distribute_request).commit().expect_success(); + + let default_commission_rate = Ratio::new( + U512::from(VALIDATOR_1_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reservation_commission_rate = Ratio::new( + U512::from(VALIDATOR_1_RESERVATION_DELEGATION_RATE), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let reward_multiplier = Ratio::new(total_delegator_stake, total_stake); + let base_delegator_reward = expected_total_reward + .checked_mul(&reward_multiplier) + .expect("must get delegator reward"); + + let delegator_1_expected_payout = { + let reward_multiplier = Ratio::new(delegator_1_stake, total_delegator_stake); + let delegator_1_reward = base_delegator_reward + .checked_mul(&reward_multiplier) + .unwrap(); + let commission = delegator_1_reward + .checked_mul(&reservation_commission_rate) + .unwrap(); + delegator_1_reward + .checked_sub(&commission) + .unwrap() + .to_integer() + }; + let delegator_2_expected_payout = { + let reward_multiplier = Ratio::new(delegator_2_stake, total_delegator_stake); + let delegator_2_reward = base_delegator_reward + .checked_mul(&reward_multiplier) + .unwrap(); + let commission = delegator_2_reward + .checked_mul(&default_commission_rate) + .unwrap(); + delegator_2_reward + .checked_sub(&commission) + .unwrap() + .to_integer() + }; + + let delegator_1_actual_payout = { + let delegator_stake_before = delegator_1_stake; + let delegator_stake_after = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_1.clone()); + delegator_stake_after - delegator_stake_before + }; + assert_eq!(delegator_1_actual_payout, delegator_1_expected_payout); + + let delegator_2_actual_payout = { + let delegator_stake_before = delegator_2_stake; + let delegator_stake_after = + get_delegator_staked_amount(&mut builder, VALIDATOR_1.clone(), DELEGATOR_2.clone()); + delegator_stake_after - delegator_stake_before + }; + assert_eq!(delegator_2_actual_payout, delegator_2_expected_payout); + + let validator_1_expected_payout = { + let total_delegator_payout = delegator_1_expected_payout + delegator_2_expected_payout; + let validators_part = expected_total_reward - Ratio::from(total_delegator_payout); + validators_part.to_integer() + }; + + let validator_1_actual_payout = { + let validator_stake_before = validator_stake; + let validator_stake_after = get_validator_bid(&mut builder, VALIDATOR_1.clone()) + .expect("should have validator bid") + .staked_amount(); + validator_stake_after - validator_stake_before + }; + + assert_eq!(validator_1_actual_payout, validator_1_expected_payout); + + let era_info = get_era_info(&mut builder); + + assert!(matches!( + era_info.select(DELEGATOR_1.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_1 && *amount == delegator_1_expected_payout + )); + + assert!(matches!( + era_info.select(DELEGATOR_2.clone()).next(), + Some(SeigniorageAllocation::DelegatorKind { delegator_kind: DelegatorKind::PublicKey(delegator_public_key), amount, .. }) + if *delegator_public_key == *DELEGATOR_2 && *amount == delegator_2_expected_payout + )); +} diff --git a/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs b/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs index ab424547ab..cd55f15cc5 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/auction_bidding.rs @@ -1,52 +1,39 @@ -use assert_matches::assert_matches; use num_traits::Zero; use casper_engine_test_support::{ - internal::{ - utils, ExecuteRequestBuilder, InMemoryWasmTestBuilder, UpgradeRequestBuilder, - DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, - DEFAULT_RUN_GENESIS_REQUEST, DEFAULT_UNBONDING_DELAY, SYSTEM_ADDR, - TIMESTAMP_MILLIS_INCREMENT, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, -}; -use casper_execution_engine::{ - core::{ - engine_state::{ - genesis::{GenesisAccount, GenesisValidator}, - Error as EngineError, - }, - execution::Error, - }, - shared::motes::Motes, + utils, ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, + UpgradeRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, + DEFAULT_GENESIS_TIMESTAMP_MILLIS, DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_PROPOSER_PUBLIC_KEY, DEFAULT_PROTOCOL_VERSION, DEFAULT_UNBONDING_DELAY, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, + TIMESTAMP_MILLIS_INCREMENT, }; +use casper_execution_engine::{engine_state::Error as EngineError, execution::ExecError}; + use casper_types::{ account::AccountHash, runtime_args, - system::auction::{ - self, Bids, DelegationRate, UnbondingPurses, ARG_VALIDATOR_PUBLIC_KEYS, INITIAL_ERA_ID, - METHOD_SLASH, + system::{ + auction::{ + self, BidsExt, DelegationRate, UnbondKind, ARG_VALIDATOR_PUBLIC_KEYS, INITIAL_ERA_ID, + METHOD_SLASH, + }, + mint, }, - ApiError, EraId, ProtocolVersion, PublicKey, RuntimeArgs, SecretKey, U512, + ApiError, EraId, GenesisAccount, GenesisValidator, Motes, ProtocolVersion, PublicKey, + SecretKey, DEFAULT_MINIMUM_BID_AMOUNT, U512, }; const CONTRACT_TRANSFER_TO_ACCOUNT: &str = "transfer_to_account_u512.wasm"; const CONTRACT_ADD_BID: &str = "add_bid.wasm"; const CONTRACT_WITHDRAW_BID: &str = "withdraw_bid.wasm"; -const CONTRACT_AUCTION_BIDDING: &str = "auction_bidding.wasm"; const GENESIS_VALIDATOR_STAKE: u64 = 50_000; const GENESIS_ACCOUNT_STAKE: u64 = 100_000; const TRANSFER_AMOUNT: u64 = MINIMUM_ACCOUNT_CREATION_BALANCE; -const TEST_BOND: &str = "bond"; -const TEST_SEED_NEW_ACCOUNT: &str = "seed_new_account"; - const ARG_AMOUNT: &str = "amount"; const ARG_PUBLIC_KEY: &str = "public_key"; -const ARG_ENTRY_POINT: &str = "entry_point"; -const ARG_ACCOUNT_HASH: &str = "account_hash"; const ARG_DELEGATION_RATE: &str = "delegation_rate"; const DELEGATION_RATE: DelegationRate = 42; @@ -55,8 +42,8 @@ const DELEGATION_RATE: DelegationRate = 42; #[test] fn should_run_successful_bond_and_unbond_and_slashing() { let default_public_key_arg = DEFAULT_ACCOUNT_PUBLIC_KEY.clone(); - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -71,7 +58,7 @@ fn should_run_successful_bond_and_unbond_and_slashing() { builder.exec(exec_request).expect_success().commit(); let _default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); let auction = builder.get_auction_contract_hash(); @@ -89,9 +76,9 @@ fn should_run_successful_bond_and_unbond_and_slashing() { builder.exec(exec_request_1).expect_success().commit(); - let bids: Bids = builder.get_bids(); + let bids = builder.get_bids(); let default_account_bid = bids - .get(&*DEFAULT_ACCOUNT_PUBLIC_KEY) + .validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY) .expect("should have bid"); let bid_purse = *default_account_bid.bonding_purse(); assert_eq!( @@ -99,17 +86,17 @@ fn should_run_successful_bond_and_unbond_and_slashing() { GENESIS_ACCOUNT_STAKE.into() ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); + let unbond_purses = builder.get_unbonds(); assert_eq!(unbond_purses.len(), 0); // // Partial unbond // - let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - 1; + let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE - DEFAULT_MINIMUM_BID_AMOUNT); let unbonding_purse = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account") .main_purse(); let exec_request_3 = ExecuteRequestBuilder::standard( @@ -126,46 +113,51 @@ fn should_run_successful_bond_and_unbond_and_slashing() { let account_balance_before = builder.get_purse_balance(unbonding_purse); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); - - let unbond_list = unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &default_public_key_arg, - ); - assert!(unbond_list[0].is_validator()); - - assert_eq!(unbond_list[0].era_of_creation(), INITIAL_ERA_ID,); + let unbonds = builder.get_unbonds(); + let unbond = { + assert_eq!(unbonds.len(), 1); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + let unbond = unbonds + .get(&unbond_kind) + .expect("should have unbond") + .first() + .expect("must have one unbond entry"); + assert_eq!(unbond.eras().len(), 1, "unexpected era count"); + assert_eq!(unbond.validator_public_key(), &default_public_key_arg,); + assert!(unbond.is_validator()); + unbond + }; - let unbond_era_1 = unbond_list[0].era_of_creation(); + let unbond_era_1 = unbond.eras().first().expect("should have era"); + assert_eq!(unbond_era_1.era_of_creation(), INITIAL_ERA_ID,); builder.run_auction( DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, Vec::new(), ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); - let unbond_list = unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &default_public_key_arg, - ); - assert!(unbond_list[0].is_validator()); + let unbonds = builder.get_unbonds(); + let unbond = { + assert_eq!(unbonds.len(), 1); + + let unbond = unbonds + .get(&UnbondKind::Validator( + (*DEFAULT_ACCOUNT_PUBLIC_KEY).clone(), + )) + .expect("should have unbond") + .first() + .expect("must have one unbond entry"); + assert_eq!(unbond.eras().len(), 1); + assert_eq!(unbond.validator_public_key(), &default_public_key_arg,); + assert!(unbond.is_validator()); + unbond + }; let account_balance = builder.get_purse_balance(unbonding_purse); assert_eq!(account_balance_before, account_balance); - assert_eq!(unbond_list[0].amount(), &unbond_amount,); - - let unbond_era_2 = unbond_list[0].era_of_creation(); + let unbond_era_2 = unbond.eras().first().expect("should have eras"); + assert_eq!(unbond_era_2.amount(), &unbond_amount,); assert_eq!(unbond_era_2, unbond_era_1); @@ -183,16 +175,12 @@ fn should_run_successful_bond_and_unbond_and_slashing() { builder.exec(exec_request_5).expect_success().commit(); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert!(unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .unwrap() - .is_empty()); + let unbonds = builder.get_unbonds(); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + assert!(!unbonds.contains_key(&unbond_kind)); - let bids: Bids = builder.get_bids(); - let default_account_bid = bids.get(&DEFAULT_ACCOUNT_PUBLIC_KEY).unwrap(); - assert!(default_account_bid.inactive()); - assert!(default_account_bid.staked_amount().is_zero()); + let bids = builder.get_bids(); + assert!(bids.validator_bid(&DEFAULT_ACCOUNT_PUBLIC_KEY).is_none()); let account_balance_after_slashing = builder.get_purse_balance(unbonding_purse); assert_eq!(account_balance_after_slashing, account_balance_before); @@ -200,61 +188,64 @@ fn should_run_successful_bond_and_unbond_and_slashing() { #[ignore] #[test] -fn should_fail_bonding_with_insufficient_funds() { - let account_1_public_key: PublicKey = - SecretKey::ed25519_from_bytes([123; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - let account_1_hash = AccountHash::from(&account_1_public_key); +fn should_fail_bonding_with_insufficient_funds_directly() { + let new_validator_sk = SecretKey::ed25519_from_bytes([123; SecretKey::ED25519_LENGTH]).unwrap(); + let new_validator_pk: PublicKey = (&new_validator_sk).into(); + let new_validator_hash = AccountHash::from(&new_validator_pk); + assert_ne!(&DEFAULT_PROPOSER_PUBLIC_KEY.clone(), &new_validator_pk); - let exec_request_1 = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_AUCTION_BIDDING, - runtime_args! { - ARG_ENTRY_POINT => TEST_SEED_NEW_ACCOUNT, - ARG_ACCOUNT_HASH => account_1_hash, - ARG_AMOUNT => *DEFAULT_PAYMENT + GENESIS_ACCOUNT_STAKE, - }, - ) - .build(); - let exec_request_2 = ExecuteRequestBuilder::standard( - account_1_hash, - CONTRACT_AUCTION_BIDDING, - runtime_args! { - ARG_ENTRY_POINT => TEST_BOND, - ARG_AMOUNT => *DEFAULT_PAYMENT + GENESIS_ACCOUNT_STAKE, - ARG_PUBLIC_KEY => account_1_public_key, - }, - ) - .build(); + let mut builder = LmdbWasmTestBuilder::default(); - let mut builder = InMemoryWasmTestBuilder::default(); + let transfer_amount = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE); + let delegation_rate: DelegationRate = 10; - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request_1) - .commit(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.exec(exec_request_2).commit(); + let exec_request = TransferRequestBuilder::new(transfer_amount, new_validator_hash) + .with_transfer_id(1) + .build(); + + builder.transfer_and_commit(exec_request).expect_success(); + + let new_validator_account = builder + .get_entity_by_account_hash(new_validator_hash) + .expect("should work"); - let response = builder - .get_exec_result(1) - .expect("should have a response") - .to_owned(); + let new_validator_balance = builder.get_purse_balance(new_validator_account.main_purse()); + + assert_eq!(new_validator_balance, transfer_amount,); + + let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( + new_validator_hash, + builder.get_auction_contract_hash(), + auction::METHOD_ADD_BID, + runtime_args! { + auction::ARG_PUBLIC_KEY => new_validator_pk, + auction::ARG_AMOUNT => new_validator_balance + U512::one(), + auction::ARG_DELEGATION_RATE => delegation_rate, + }, + ) + .build(); + builder.exec(add_bid_request); - assert_eq!(response.len(), 1); - let exec_result = response[0].as_error().expect("should have error"); - let error = assert_matches!(exec_result, EngineError::Exec(Error::Revert(e)) => *e, "{:?}", exec_result); - assert_eq!(error, ApiError::from(auction::Error::TransferToBidPurse)); + let error = builder.get_error().expect("should be error"); + assert!( + matches!( + error, + EngineError::Exec(ExecError::Revert(ApiError::Mint(mint_error)) + ) + if mint_error == mint::Error::InsufficientFunds as u8), + "{:?}", + error + ); } #[ignore] #[test] fn should_fail_unbonding_validator_with_locked_funds() { - let account_1_public_key: PublicKey = - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); + let account_1_secret_key = + SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let account_1_public_key = PublicKey::from(&account_1_secret_key); let account_1_hash = AccountHash::from(&account_1_public_key); let account_1_balance = U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE); @@ -264,7 +255,7 @@ fn should_fail_unbonding_validator_with_locked_funds() { account_1_public_key.clone(), Motes::new(account_1_balance), Some(GenesisValidator::new( - Motes::new(GENESIS_VALIDATOR_STAKE.into()), + Motes::new(GENESIS_VALIDATOR_STAKE), DelegationRate::zero(), )), ); @@ -274,9 +265,9 @@ fn should_fail_unbonding_validator_with_locked_funds() { let run_genesis_request = utils::create_run_genesis_request(accounts); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); let exec_request_2 = ExecuteRequestBuilder::standard( account_1_hash, @@ -290,12 +281,7 @@ fn should_fail_unbonding_validator_with_locked_funds() { builder.exec(exec_request_2).commit(); - let response = builder - .get_exec_result(0) - .expect("should have a response") - .to_owned(); - - let error_message = utils::get_error_message(response); + let error_message = builder.get_error_message().expect("should have a result"); // handle_payment::Error::NotBonded => 0 assert!( @@ -321,18 +307,13 @@ fn should_fail_unbonding_validator_without_bonding_first() { ) .build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request).commit(); - let response = builder - .get_exec_result(0) - .expect("should have a response") - .to_owned(); - - let error_message = utils::get_error_message(response); + let error_message = builder.get_error_message().expect("should have a result"); assert!( error_message.contains(&format!( @@ -352,11 +333,11 @@ fn should_run_successful_bond_and_unbond_with_release() { let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let unbonding_purse = default_account.main_purse(); @@ -374,7 +355,7 @@ fn should_run_successful_bond_and_unbond_with_release() { builder.exec(exec_request).expect_success().commit(); let _default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); let exec_request_1 = ExecuteRequestBuilder::standard( @@ -390,15 +371,17 @@ fn should_run_successful_bond_and_unbond_with_release() { builder.exec(exec_request_1).expect_success().commit(); - let bids: Bids = builder.get_bids(); - let bid = bids.get(&default_public_key_arg).expect("should have bid"); + let bids = builder.get_bids(); + let bid = bids + .validator_bid(&default_public_key_arg) + .expect("should have bid"); let bid_purse = *bid.bonding_purse(); assert_eq!( builder.get_purse_balance(bid_purse), GENESIS_ACCOUNT_STAKE.into() ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); + let unbond_purses = builder.get_unbonds(); assert_eq!(unbond_purses.len(), 0); // @@ -410,7 +393,7 @@ fn should_run_successful_bond_and_unbond_with_release() { // Partial unbond // - let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - 1; + let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - DEFAULT_MINIMUM_BID_AMOUNT; let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -424,46 +407,39 @@ fn should_run_successful_bond_and_unbond_with_release() { builder.exec(exec_request_2).expect_success().commit(); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); + let unbonds = builder.get_unbonds(); + assert_eq!(unbonds.len(), 1); - let unbond_list = unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &default_public_key_arg, - ); - assert!(unbond_list[0].is_validator()); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + let unbond = unbonds + .get(&unbond_kind) + .expect("should have unbond") + .first() + .expect("must have one unbond entry"); + assert_eq!(unbond.eras().len(), 1); + assert_eq!(unbond.validator_public_key(), &default_public_key_arg,); + assert!(unbond.is_validator()); - assert_eq!(unbond_list[0].era_of_creation(), INITIAL_ERA_ID + 1); - - let unbond_era_1 = unbond_list[0].era_of_creation(); + let era = unbond.eras().first().expect("should have era"); + assert_eq!(*era.amount(), unbond_amount); + let unbond_era_1 = era.era_of_creation(); + assert_eq!(unbond_era_1, INITIAL_ERA_ID + 1); let account_balance_before_auction = builder.get_purse_balance(unbonding_purse); builder.run_auction(timestamp_millis, Vec::new()); timestamp_millis += TIMESTAMP_MILLIS_INCREMENT; - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); - - let unbond_list = unbond_purses - .get(&DEFAULT_ACCOUNT_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &default_public_key_arg, - ); - assert!(unbond_list[0].is_validator()); assert_eq!( builder.get_purse_balance(unbonding_purse), account_balance_before_auction, // Not paid yet ); - let unbond_era_2 = unbond_list[0].era_of_creation(); + let unbond_era_2 = unbond + .eras() + .first() + .expect("should have eras") + .era_of_creation(); assert_eq!(unbond_era_2, unbond_era_1); // era of withdrawal didn't change since first run @@ -482,16 +458,16 @@ fn should_run_successful_bond_and_unbond_with_release() { account_balance_before_auction + unbond_amount ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert!(unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .unwrap() - .is_empty()); + let unbonds = builder.get_unbonds(); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + assert!(!unbonds.contains_key(&unbond_kind)); - let bids: Bids = builder.get_bids(); + let bids = builder.get_bids(); assert!(!bids.is_empty()); - let bid = bids.get(&default_public_key_arg).expect("should have bid"); + let bid = bids + .validator_bid(&default_public_key_arg) + .expect("should have bid"); let bid_purse = *bid.bonding_purse(); assert_eq!( builder.get_purse_balance(bid_purse), @@ -507,12 +483,12 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { let mut timestamp_millis = DEFAULT_GENESIS_TIMESTAMP_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let new_unbonding_delay = DEFAULT_UNBONDING_DELAY + 5; - let old_protocol_version = *DEFAULT_PROTOCOL_VERSION; + let old_protocol_version = DEFAULT_PROTOCOL_VERSION; let sem_ver = old_protocol_version.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); @@ -527,10 +503,10 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let unbonding_purse = default_account.main_purse(); @@ -543,13 +519,12 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { "amount" => U512::from(TRANSFER_AMOUNT) }, ) - .with_protocol_version(new_protocol_version) .build(); builder.exec(exec_request).expect_success().commit(); let _default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); let exec_request_1 = ExecuteRequestBuilder::standard( @@ -561,20 +536,21 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { ARG_DELEGATION_RATE => DELEGATION_RATE, }, ) - .with_protocol_version(new_protocol_version) .build(); builder.exec(exec_request_1).expect_success().commit(); - let bids: Bids = builder.get_bids(); - let bid = bids.get(&default_public_key_arg).expect("should have bid"); + let bids = builder.get_bids(); + let bid = bids + .validator_bid(&default_public_key_arg) + .expect("should have bid"); let bid_purse = *bid.bonding_purse(); assert_eq!( builder.get_purse_balance(bid_purse), GENESIS_ACCOUNT_STAKE.into() ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); + let unbond_purses = builder.get_unbonds(); assert_eq!(unbond_purses.len(), 0); // @@ -586,7 +562,7 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { // Partial unbond // - let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - 1; + let unbond_amount = U512::from(GENESIS_ACCOUNT_STAKE) - DEFAULT_MINIMUM_BID_AMOUNT; let exec_request_2 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -596,51 +572,54 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { ARG_PUBLIC_KEY => default_public_key_arg.clone(), }, ) - .with_protocol_version(new_protocol_version) .build(); builder.exec(exec_request_2).expect_success().commit(); let account_balance_before_auction = builder.get_purse_balance(unbonding_purse); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert_eq!(unbond_purses.len(), 1); + let unbonds = builder.get_unbonds(); + assert_eq!(unbonds.len(), 1); - let unbond_list = unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &default_public_key_arg, - ); - assert!(unbond_list[0].is_validator()); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + let unbond = unbonds + .get(&unbond_kind) + .expect("should have unbond") + .first() + .expect("must have one unbond entry"); + assert_eq!(unbond.eras().len(), 1); + assert_eq!(unbond.validator_public_key(), &default_public_key_arg,); + assert!(unbond.is_validator()); - assert_eq!(unbond_list[0].era_of_creation(), INITIAL_ERA_ID + 1); + let era = unbond.eras().first().expect("should have eras"); + assert_eq!(era.era_of_creation(), INITIAL_ERA_ID + 1); - let unbond_era_1 = unbond_list[0].era_of_creation(); + let unbond_era_1 = era.era_of_creation(); builder.run_auction(timestamp_millis, Vec::new()); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); + let unbond_purses = builder.get_unbonds(); assert_eq!(unbond_purses.len(), 1); - let unbond_list = unbond_purses - .get(&DEFAULT_ACCOUNT_ADDR) - .expect("should have unbond"); - assert_eq!(unbond_list.len(), 1); - assert_eq!( - unbond_list[0].validator_public_key(), - &default_public_key_arg, - ); - assert!(unbond_list[0].is_validator()); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + let unbond = unbond_purses + .get(&unbond_kind) + .expect("should have unbond") + .first() + .expect("must have one unbond entry"); + assert_eq!(unbond.validator_public_key(), &default_public_key_arg,); + assert!(unbond.is_validator()); assert_eq!( builder.get_purse_balance(unbonding_purse), account_balance_before_auction, // Not paid yet ); - let unbond_era_2 = unbond_list[0].era_of_creation(); + let unbond_era_2 = unbond + .eras() + .first() + .expect("should have era") + .era_of_creation(); assert_eq!(unbond_era_2, unbond_era_1); // era of withdrawal didn't change since first run @@ -674,16 +653,16 @@ fn should_run_successful_unbond_funds_after_changing_unbonding_delay() { account_balance_before_auction + unbond_amount ); - let unbond_purses: UnbondingPurses = builder.get_withdraws(); - assert!(unbond_purses - .get(&*DEFAULT_ACCOUNT_ADDR) - .unwrap() - .is_empty()); + let unbonds = builder.get_unbonds(); + let unbond_kind = UnbondKind::Validator((*DEFAULT_ACCOUNT_PUBLIC_KEY).clone()); + assert!(!unbonds.contains_key(&unbond_kind)); - let bids: Bids = builder.get_bids(); + let bids = builder.get_bids(); assert!(!bids.is_empty()); - let bid = bids.get(&default_public_key_arg).expect("should have bid"); + let bid = bids + .validator_bid(&default_public_key_arg) + .expect("should have bid"); let bid_purse = *bid.bonding_purse(); assert_eq!( builder.get_purse_balance(bid_purse), diff --git a/execution_engine_testing/tests/src/test/system_contracts/genesis.rs b/execution_engine_testing/tests/src/test/system_contracts/genesis.rs index f02a727e8f..18ef700aca 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/genesis.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/genesis.rs @@ -2,21 +2,17 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - InMemoryWasmTestBuilder, DEFAULT_AUCTION_DELAY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_ROUND_SEIGNIORAGE_RATE, DEFAULT_SYSTEM_CONFIG, - DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, DEFAULT_WASM_CONFIG, - }, - AccountHash, + genesis_config_builder::GenesisConfigBuilder, ChainspecConfig, LmdbWasmTestBuilder, + DEFAULT_AUCTION_DELAY, DEFAULT_CHAINSPEC_REGISTRY, DEFAULT_GENESIS_TIMESTAMP_MILLIS, + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, DEFAULT_PROTOCOL_VERSION, DEFAULT_ROUND_SEIGNIORAGE_RATE, + DEFAULT_STORAGE_COSTS, DEFAULT_SYSTEM_CONFIG, DEFAULT_UNBONDING_DELAY, DEFAULT_VALIDATOR_SLOTS, + DEFAULT_WASM_CONFIG, }; -use casper_execution_engine::{ - core::engine_state::{ - genesis::{ExecConfig, GenesisAccount, GenesisValidator}, - run_genesis_request::RunGenesisRequest, - }, - shared::{motes::Motes, stored_value::StoredValue}, +use casper_storage::data_access_layer::GenesisRequest; +use casper_types::{ + account::AccountHash, system::auction::DelegationRate, GenesisAccount, GenesisValidator, Key, + Motes, ProtocolVersion, PublicKey, SecretKey, StoredValue, U512, }; -use casper_types::{system::auction::DelegationRate, ProtocolVersion, PublicKey, SecretKey, U512}; const GENESIS_CONFIG_HASH: [u8; 32] = [127; 32]; const ACCOUNT_1_BONDED_AMOUNT: u64 = 1_000_000; @@ -25,22 +21,20 @@ const ACCOUNT_1_BALANCE: u64 = 1_000_000_000; const ACCOUNT_2_BALANCE: u64 = 2_000_000_000; static ACCOUNT_1_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_1_PUBLIC_KEY)); static ACCOUNT_2_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() + let secret_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) }); static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*ACCOUNT_2_PUBLIC_KEY)); static GENESIS_CUSTOM_ACCOUNTS: Lazy> = Lazy::new(|| { let account_1 = { - let account_1_balance = Motes::new(ACCOUNT_1_BALANCE.into()); - let account_1_bonded_amount = Motes::new(ACCOUNT_1_BONDED_AMOUNT.into()); + let account_1_balance = Motes::new(ACCOUNT_1_BALANCE); + let account_1_bonded_amount = Motes::new(ACCOUNT_1_BONDED_AMOUNT); GenesisAccount::account( ACCOUNT_1_PUBLIC_KEY.clone(), account_1_balance, @@ -51,8 +45,8 @@ static GENESIS_CUSTOM_ACCOUNTS: Lazy> = Lazy::new(|| { ) }; let account_2 = { - let account_2_balance = Motes::new(ACCOUNT_2_BALANCE.into()); - let account_2_bonded_amount = Motes::new(ACCOUNT_2_BONDED_AMOUNT.into()); + let account_2_balance = Motes::new(ACCOUNT_2_BALANCE); + let account_2_bonded_amount = Motes::new(ACCOUNT_2_BONDED_AMOUNT); GenesisAccount::account( ACCOUNT_2_PUBLIC_KEY.clone(), account_2_balance, @@ -69,66 +63,52 @@ static GENESIS_CUSTOM_ACCOUNTS: Lazy> = Lazy::new(|| { #[test] fn should_run_genesis() { let protocol_version = ProtocolVersion::V1_0_0; - let wasm_config = *DEFAULT_WASM_CONFIG; - let system_config = *DEFAULT_SYSTEM_CONFIG; - let validator_slots = DEFAULT_VALIDATOR_SLOTS; - let auction_delay = DEFAULT_AUCTION_DELAY; - let locked_funds_period = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; - let round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE; - let unbonding_delay = DEFAULT_UNBONDING_DELAY; - let genesis_timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS; - let exec_config = ExecConfig::new( + let run_genesis_request = ChainspecConfig::create_genesis_request_from_local_chainspec( GENESIS_CUSTOM_ACCOUNTS.clone(), - wasm_config, - system_config, - validator_slots, - auction_delay, - locked_funds_period, - round_seigniorage_rate, - unbonding_delay, - genesis_timestamp, - ); - let run_genesis_request = - RunGenesisRequest::new(GENESIS_CONFIG_HASH.into(), protocol_version, exec_config); + protocol_version, + ) + .expect("must create genesis request"); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); - let system_account = builder - .get_account(PublicKey::System.to_account_hash()) + let _system_account = builder + .get_entity_by_account_hash(PublicKey::System.to_account_hash()) .expect("system account should exist"); + let account_1_addr = builder + .get_entity_hash_by_account_hash(*ACCOUNT_1_ADDR) + .expect("must get addr for entity account 1"); + + assert_eq!(account_1_addr.value(), ACCOUNT_1_ADDR.value()); + let account_1 = builder - .get_account(*ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("account 1 should exist"); let account_2 = builder - .get_account(*ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("account 2 should exist"); - let system_account_balance_actual = builder.get_purse_balance(system_account.main_purse()); let account_1_balance_actual = builder.get_purse_balance(account_1.main_purse()); let account_2_balance_actual = builder.get_purse_balance(account_2.main_purse()); - assert_eq!(system_account_balance_actual, U512::zero()); assert_eq!(account_1_balance_actual, U512::from(ACCOUNT_1_BALANCE)); assert_eq!(account_2_balance_actual, U512::from(ACCOUNT_2_BALANCE)); - let mint_contract_hash = builder.get_mint_contract_hash(); - let handle_payment_contract_hash = builder.get_handle_payment_contract_hash(); + let mint_contract_key = Key::Hash(builder.get_mint_contract_hash().value()); + let handle_payment_contract_key = Key::Hash(builder.get_handle_payment_contract_hash().value()); - let result = builder.query(None, mint_contract_hash.into(), &[]); + let result = builder.query(None, mint_contract_key, &[]); if let Ok(StoredValue::Contract(_)) = result { // Contract exists at mint contract hash } else { panic!("contract not found at mint hash"); } - if let Ok(StoredValue::Contract(_)) = - builder.query(None, handle_payment_contract_hash.into(), &[]) - { + if let Ok(StoredValue::Contract(_)) = builder.query(None, handle_payment_contract_key, &[]) { // Contract exists at handle payment contract hash } else { panic!("contract not found at handle payment hash"); @@ -141,32 +121,38 @@ fn should_track_total_token_supply_in_mint() { let accounts = GENESIS_CUSTOM_ACCOUNTS.clone(); let wasm_config = *DEFAULT_WASM_CONFIG; let system_config = *DEFAULT_SYSTEM_CONFIG; - let protocol_version = ProtocolVersion::V1_0_0; + let protocol_version = DEFAULT_PROTOCOL_VERSION; let validator_slots = DEFAULT_VALIDATOR_SLOTS; let auction_delay = DEFAULT_AUCTION_DELAY; let locked_funds_period = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; let round_seigniorage_rate = DEFAULT_ROUND_SEIGNIORAGE_RATE; let unbonding_delay = DEFAULT_UNBONDING_DELAY; - let genesis_tiemstamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS; - let ee_config = ExecConfig::new( - accounts.clone(), - wasm_config, - system_config, - validator_slots, - auction_delay, - locked_funds_period, - round_seigniorage_rate, - unbonding_delay, - genesis_tiemstamp, + let genesis_timestamp = DEFAULT_GENESIS_TIMESTAMP_MILLIS; + let config = GenesisConfigBuilder::default() + .with_accounts(accounts.clone()) + .with_wasm_config(wasm_config) + .with_system_config(system_config) + .with_validator_slots(validator_slots) + .with_auction_delay(auction_delay) + .with_locked_funds_period_millis(locked_funds_period) + .with_round_seigniorage_rate(round_seigniorage_rate) + .with_unbonding_delay(unbonding_delay) + .with_genesis_timestamp_millis(genesis_timestamp) + .with_storage_costs(*DEFAULT_STORAGE_COSTS) + .build(); + + let genesis_request = GenesisRequest::new( + GENESIS_CONFIG_HASH.into(), + protocol_version, + config, + DEFAULT_CHAINSPEC_REGISTRY.clone(), ); - let run_genesis_request = - RunGenesisRequest::new(GENESIS_CONFIG_HASH.into(), protocol_version, ee_config); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(genesis_request); - let total_supply = builder.total_supply(None); + let total_supply = builder.total_supply(protocol_version, None); let expected_balance: U512 = accounts.iter().map(|item| item.balance().value()).sum(); let expected_staked_amount: U512 = accounts diff --git a/execution_engine_testing/tests/src/test/system_contracts/handle_payment/finalize_payment.rs b/execution_engine_testing/tests/src/test/system_contracts/handle_payment/finalize_payment.rs index b7d45b17a9..bee2c2a74d 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/handle_payment/finalize_payment.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/handle_payment/finalize_payment.rs @@ -1,13 +1,7 @@ -use std::convert::TryInto; - use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, SYSTEM_ADDR, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, SYSTEM_ADDR, }; -use casper_execution_engine::shared::account::Account; use casper_types::{ account::AccountHash, runtime_args, system::handle_payment, Key, RuntimeArgs, URef, U512, }; @@ -17,6 +11,9 @@ const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm const FINALIZE_PAYMENT: &str = "finalize_payment.wasm"; const LOCAL_REFUND_PURSE: &str = "local_refund_purse"; +const CREATE_PURSE_01: &str = "create_purse_01.wasm"; +const ARG_PURSE_NAME: &str = "purse_name"; + const ACCOUNT_ADDR: AccountHash = AccountHash::new([1u8; 32]); pub const ARG_AMOUNT: &str = "amount"; pub const ARG_AMOUNT_SPENT: &str = "amount_spent"; @@ -24,8 +21,8 @@ pub const ARG_REFUND_FLAG: &str = "refund"; pub const ARG_ACCOUNT_KEY: &str = "account"; pub const ARG_TARGET: &str = "target"; -fn initialize() -> InMemoryWasmTestBuilder { - let mut builder = InMemoryWasmTestBuilder::default(); +fn initialize() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -44,7 +41,7 @@ fn initialize() -> InMemoryWasmTestBuilder { ) .build(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder.exec(exec_request_1).expect_success().commit(); @@ -82,9 +79,10 @@ fn finalize_payment_should_not_be_run_by_non_system_accounts() { } #[ignore] -#[test] +#[allow(unused)] +// #[test] fn finalize_payment_should_refund_to_specified_purse() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let payment_amount = *DEFAULT_PAYMENT; let refund_purse_flag: u8 = 1; // Don't need to run finalize_payment manually, it happens during @@ -94,11 +92,26 @@ fn finalize_payment_should_refund_to_specified_purse() { ARG_REFUND_FLAG => refund_purse_flag, ARG_AMOUNT_SPENT => Option::::None, ARG_ACCOUNT_KEY => Option::::None, + ARG_PURSE_NAME => LOCAL_REFUND_PURSE, + }; + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let create_purse_request = { + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CREATE_PURSE_01, + runtime_args! { + ARG_PURSE_NAME => LOCAL_REFUND_PURSE, + }, + ) + .build() }; - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.exec(create_purse_request).expect_success().commit(); let rewards_pre_balance = builder.get_proposer_purse_balance(); + let payment_pre_balance = get_handle_payment_payment_purse_balance(&builder); let refund_pre_balance = get_named_account_balance(&builder, *DEFAULT_ACCOUNT_ADDR, LOCAL_REFUND_PURSE) @@ -113,19 +126,17 @@ fn finalize_payment_should_refund_to_specified_purse() { "payment purse should start with zero balance" ); - let exec_request = { - let genesis_account_hash = *DEFAULT_ACCOUNT_ADDR; + let genesis_account_hash = *DEFAULT_ACCOUNT_ADDR; - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_session_code("do_nothing.wasm", RuntimeArgs::default()) - .with_payment_code(FINALIZE_PAYMENT, args) - .with_authorization_keys(&[genesis_account_hash]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_deploy_hash([1; 32]) + .with_session_code("do_nothing.wasm", RuntimeArgs::default()) + .with_payment_code(FINALIZE_PAYMENT, args) + .with_authorization_keys(&[genesis_account_hash]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); @@ -164,13 +175,13 @@ fn finalize_payment_should_refund_to_specified_purse() { // ------------- utility functions -------------------- // -fn get_handle_payment_payment_purse_balance(builder: &InMemoryWasmTestBuilder) -> U512 { +fn get_handle_payment_payment_purse_balance(builder: &LmdbWasmTestBuilder) -> U512 { let purse = get_payment_purse_by_name(builder, handle_payment::PAYMENT_PURSE_KEY) .expect("should find handle payment payment purse"); builder.get_purse_balance(purse) } -fn get_handle_payment_refund_purse(builder: &InMemoryWasmTestBuilder) -> Option { +fn get_handle_payment_refund_purse(builder: &LmdbWasmTestBuilder) -> Option { let handle_payment_contract = builder.get_handle_payment_contract(); handle_payment_contract .named_keys() @@ -178,7 +189,7 @@ fn get_handle_payment_refund_purse(builder: &InMemoryWasmTestBuilder) -> Option< .cloned() } -fn get_payment_purse_by_name(builder: &InMemoryWasmTestBuilder, purse_name: &str) -> Option { +fn get_payment_purse_by_name(builder: &LmdbWasmTestBuilder, purse_name: &str) -> Option { let handle_payment_contract = builder.get_handle_payment_contract(); handle_payment_contract .named_keys() @@ -188,16 +199,13 @@ fn get_payment_purse_by_name(builder: &InMemoryWasmTestBuilder, purse_name: &str } fn get_named_account_balance( - builder: &InMemoryWasmTestBuilder, + builder: &LmdbWasmTestBuilder, account_address: AccountHash, name: &str, ) -> Option { - let account_key = Key::Account(account_address); - - let account: Account = builder - .query(None, account_key, &[]) - .and_then(|v| v.try_into().map_err(|error| format!("{:?}", error))) - .expect("should find balance uref"); + let account = builder + .get_entity_with_named_keys_by_account_hash(account_address) + .expect("should have account"); let purse = account .named_keys() diff --git a/execution_engine_testing/tests/src/test/system_contracts/handle_payment/get_payment_purse.rs b/execution_engine_testing/tests/src/test/system_contracts/handle_payment/get_payment_purse.rs index 3e2344a812..4b5219067b 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/handle_payment/get_payment_purse.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/handle_payment/get_payment_purse.rs @@ -1,11 +1,8 @@ use casper_engine_test_support::{ - internal::{ - ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_PAYMENT, + LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; +use casper_types::{account::AccountHash, runtime_args, U512}; const CONTRACT_GET_PAYMENT_PURSE: &str = "get_payment_purse.wasm"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; @@ -15,7 +12,8 @@ const ARG_AMOUNT: &str = "amount"; const ARG_TARGET: &str = "target"; #[ignore] -#[test] +#[allow(unused)] +//#[test] fn should_run_get_payment_purse_contract_default_account() { let exec_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -25,15 +23,16 @@ fn should_run_get_payment_purse_contract_default_account() { }, ) .build(); - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request) .expect_success() .commit(); } #[ignore] -#[test] +#[allow(unused)] +//#[test] fn should_run_get_payment_purse_contract_account_1() { let exec_request_1 = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -49,8 +48,8 @@ fn should_run_get_payment_purse_contract_account_1() { }, ) .build(); - InMemoryWasmTestBuilder::default() - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + LmdbWasmTestBuilder::default() + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(exec_request_1) .expect_success() .commit() diff --git a/execution_engine_testing/tests/src/test/system_contracts/handle_payment/refund_purse.rs b/execution_engine_testing/tests/src/test/system_contracts/handle_payment/refund_purse.rs index a3f82267dc..494be66035 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/handle_payment/refund_purse.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/handle_payment/refund_purse.rs @@ -1,15 +1,18 @@ use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_PAYMENT, - DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, MINIMUM_ACCOUNT_CREATION_BALANCE, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, }; -use casper_types::{account::AccountHash, runtime_args, RuntimeArgs, U512}; +use casper_types::{account::AccountHash, runtime_args, system::mint, RuntimeArgs, U512}; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); const ARG_PAYMENT_AMOUNT: &str = "payment_amount"; +const CREATE_PURSE_01: &str = "create_purse_01.wasm"; +const ARG_PURSE_NAME: &str = "purse_name"; +const ARG_PURSE_NAME_1: &str = "purse_name_1"; +const ARG_PURSE_NAME_2: &str = "purse_name_2"; +const LOCAL_REFUND_PURSE_1: &str = "local_refund_purse_1"; +const LOCAL_REFUND_PURSE_2: &str = "local_refund_purse_2"; #[ignore] #[test] @@ -30,15 +33,15 @@ fn should_run_refund_purse_contract_account_1() { refund_tests(&mut builder, ACCOUNT_1_ADDR); } -fn initialize() -> InMemoryWasmTestBuilder { - let mut builder = InMemoryWasmTestBuilder::default(); +fn initialize() -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); builder } -fn transfer(builder: &mut InMemoryWasmTestBuilder, account_hash: AccountHash, amount: U512) { +fn transfer(builder: &mut LmdbWasmTestBuilder, account_hash: AccountHash, amount: U512) { let exec_request = { ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -54,21 +57,55 @@ fn transfer(builder: &mut InMemoryWasmTestBuilder, account_hash: AccountHash, am builder.exec(exec_request).expect_success().commit(); } -fn refund_tests(builder: &mut InMemoryWasmTestBuilder, account_hash: AccountHash) { - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(account_hash) - .with_deploy_hash([2; 32]) - .with_session_code("do_nothing.wasm", RuntimeArgs::default()) - .with_payment_code( - "refund_purse.wasm", - runtime_args! { ARG_PAYMENT_AMOUNT => *DEFAULT_PAYMENT }, - ) - .with_authorization_keys(&[account_hash]) - .build(); +fn refund_tests(builder: &mut LmdbWasmTestBuilder, account_hash: AccountHash) { + let create_purse_request_1 = { + ExecuteRequestBuilder::standard( + account_hash, + CREATE_PURSE_01, + runtime_args! { + ARG_PURSE_NAME => LOCAL_REFUND_PURSE_1, + }, + ) + .build() + }; - ExecuteRequestBuilder::new().push_deploy(deploy).build() + let create_purse_request_2 = { + ExecuteRequestBuilder::standard( + account_hash, + CREATE_PURSE_01, + runtime_args! { + ARG_PURSE_NAME => LOCAL_REFUND_PURSE_2, + }, + ) + .build() }; - builder.exec(exec_request).expect_success().commit(); + builder + .exec(create_purse_request_1) + .expect_success() + .commit(); + builder + .exec(create_purse_request_2) + .expect_success() + .commit(); + + let deploy_item = DeployItemBuilder::new() + .with_address(account_hash) + .with_deploy_hash([2; 32]) + .with_session_code("do_nothing.wasm", RuntimeArgs::default()) + .with_payment_code( + "refund_purse.wasm", + runtime_args! { + ARG_PAYMENT_AMOUNT => *DEFAULT_PAYMENT, + mint::ARG_AMOUNT => *DEFAULT_PAYMENT, + ARG_PURSE_NAME_1 => LOCAL_REFUND_PURSE_1, + ARG_PURSE_NAME_2 => LOCAL_REFUND_PURSE_2, + }, + ) + .with_authorization_keys(&[account_hash]) + .build(); + + let refund_purse_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); + + builder.exec(refund_purse_request).expect_success().commit(); } diff --git a/execution_engine_testing/tests/src/test/system_contracts/mint.rs b/execution_engine_testing/tests/src/test/system_contracts/mint.rs new file mode 100644 index 0000000000..98489d8b74 --- /dev/null +++ b/execution_engine_testing/tests/src/test/system_contracts/mint.rs @@ -0,0 +1,235 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{runtime_args, ProtocolVersion, URef, U512}; + +use casper_storage::data_access_layer::BalanceIdentifier; +use tempfile::TempDir; + +// const TEST_DELEGATOR_INITIAL_ACCOUNT_BALANCE: u64 = 1_000_000 * 1_000_000_000; + +const CONTRACT_BURN: &str = "burn.wasm"; +const CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = "transfer_to_named_purse.wasm"; + +const ARG_AMOUNT: &str = "amount"; + +const ARG_PURSE_NAME: &str = "purse_name"; + +#[ignore] +#[test] +fn should_empty_purse_when_burning_above_balance() { + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new(data_dir.as_ref()); + let source = *DEFAULT_ACCOUNT_ADDR; + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // let delegator_keys = auction::generate_public_keys(1); + // let validator_keys = auction::generate_public_keys(1); + + // run_genesis_and_create_initial_accounts( + // &mut builder, + // &validator_keys, + // delegator_keys + // .iter() + // .map(|public_key| public_key.to_account_hash()) + // .collect::>(), + // U512::from(TEST_DELEGATOR_INITIAL_ACCOUNT_BALANCE), + // ); + + let initial_supply = builder.total_supply(ProtocolVersion::V2_0_0, None); + let purse_name = "purse"; + let purse_amount = U512::from(10_000_000_000u64); + + // Create purse and transfer tokens to it + let exec_request = ExecuteRequestBuilder::standard( + source, + CONTRACT_TRANSFER_TO_NAMED_PURSE, + runtime_args! { + ARG_PURSE_NAME => purse_name, + ARG_AMOUNT => purse_amount, + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(source) + .expect("should have account"); + + let purse_uref: URef = account + .named_keys() + .get(purse_name) + .unwrap() + .into_uref() + .expect("should be uref"); + + assert_eq!( + builder + .get_purse_balance_result_with_proofs( + ProtocolVersion::V2_0_0, + BalanceIdentifier::Purse(purse_uref) + ) + .total_balance() + .cloned() + .unwrap(), + purse_amount + ); + + // Burn part of tokens in a purse + let num_of_tokens_to_burn = U512::from(2_000_000_000u64); + let num_of_tokens_after_burn = U512::from(8_000_000_000u64); + + let exec_request = ExecuteRequestBuilder::standard( + source, + CONTRACT_BURN, + runtime_args! { + ARG_PURSE_NAME => purse_name, + ARG_AMOUNT => num_of_tokens_to_burn, + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + assert_eq!( + builder + .get_purse_balance_result_with_proofs( + ProtocolVersion::V2_0_0, + BalanceIdentifier::Purse(purse_uref) + ) + .total_balance() + .cloned() + .unwrap(), + num_of_tokens_after_burn + ); + + // Burn rest of tokens in a purse + let num_of_tokens_to_burn = U512::from(8_000_000_000u64); + let num_of_tokens_after_burn = U512::zero(); + + let exec_request = ExecuteRequestBuilder::standard( + source, + CONTRACT_BURN, + runtime_args! { + ARG_PURSE_NAME => purse_name, + ARG_AMOUNT => num_of_tokens_to_burn, + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + assert_eq!( + builder + .get_purse_balance_result_with_proofs( + ProtocolVersion::V2_0_0, + BalanceIdentifier::Purse(purse_uref) + ) + .total_balance() + .cloned() + .unwrap(), + num_of_tokens_after_burn + ); + + let supply_after_burns = builder.total_supply(ProtocolVersion::V2_0_0, None); + let expected_supply_after_burns = initial_supply - U512::from(10_000_000_000u64); + + assert_eq!(supply_after_burns, expected_supply_after_burns); +} + +#[ignore] +#[test] +fn should_not_burn_excess_tokens() { + let data_dir = TempDir::new().expect("should create temp dir"); + let mut builder = LmdbWasmTestBuilder::new(data_dir.as_ref()); + let source = *DEFAULT_ACCOUNT_ADDR; + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + // let delegator_keys = auction::generate_public_keys(1); + // let validator_keys = auction::generate_public_keys(1); + // + // run_genesis_and_create_initial_accounts( + // &mut builder, + // &validator_keys, + // delegator_keys + // .iter() + // .map(|public_key| public_key.to_account_hash()) + // .collect::>(), + // U512::from(TEST_DELEGATOR_INITIAL_ACCOUNT_BALANCE), + // ); + + let initial_supply = builder.total_supply(ProtocolVersion::V2_0_0, None); + let purse_name = "purse"; + let purse_amount = U512::from(10_000_000_000u64); + + // Create purse and transfer tokens to it + let exec_request = ExecuteRequestBuilder::standard( + source, + CONTRACT_TRANSFER_TO_NAMED_PURSE, + runtime_args! { + ARG_PURSE_NAME => purse_name, + ARG_AMOUNT => purse_amount, + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let account = builder + .get_entity_with_named_keys_by_account_hash(source) + .expect("should have account"); + + let purse_uref: URef = account + .named_keys() + .get(purse_name) + .unwrap() + .into_uref() + .expect("should be uref"); + + assert_eq!( + builder + .get_purse_balance_result_with_proofs( + ProtocolVersion::V2_0_0, + BalanceIdentifier::Purse(purse_uref) + ) + .total_balance() + .cloned() + .unwrap(), + purse_amount + ); + + // Try to burn more then in a purse + let num_of_tokens_to_burn = U512::MAX; + let num_of_tokens_after_burn = U512::zero(); + + let exec_request = ExecuteRequestBuilder::standard( + source, + CONTRACT_BURN, + runtime_args! { + ARG_PURSE_NAME => purse_name, + ARG_AMOUNT => num_of_tokens_to_burn, + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + assert_eq!( + builder + .get_purse_balance_result_with_proofs( + ProtocolVersion::V2_0_0, + BalanceIdentifier::Purse(purse_uref) + ) + .total_balance() + .cloned() + .unwrap(), + num_of_tokens_after_burn, + ); + + let supply_after_burns = builder.total_supply(ProtocolVersion::V2_0_0, None); + let expected_supply_after_burns = initial_supply - U512::from(10_000_000_000u64); + + assert_eq!(supply_after_burns, expected_supply_after_burns); +} diff --git a/execution_engine_testing/tests/src/test/system_contracts/mod.rs b/execution_engine_testing/tests/src/test/system_contracts/mod.rs index a504b7347e..a2fe0ef6ef 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/mod.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/mod.rs @@ -2,6 +2,6 @@ mod auction; mod auction_bidding; mod genesis; mod handle_payment; +mod mint; mod standard_payment; -mod system_hashes; mod upgrade; diff --git a/execution_engine_testing/tests/src/test/system_contracts/standard_payment.rs b/execution_engine_testing/tests/src/test/system_contracts/standard_payment.rs index 1e03baeb7d..19c0455c65 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/standard_payment.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/standard_payment.rs @@ -1,480 +1,55 @@ +use std::collections::HashMap; + use assert_matches::assert_matches; use casper_engine_test_support::{ - internal::{ - utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, - DEFAULT_ACCOUNT_KEY, DEFAULT_GAS_PRICE, DEFAULT_PAYMENT, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, MINIMUM_ACCOUNT_CREATION_BALANCE, + DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_ACCOUNT_KEY, DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, }; -use casper_execution_engine::{ - core::{ - engine_state::{Error, MAX_PAYMENT}, - execution, - }, - shared::{gas::Gas, motes::Motes, transform::Transform}, +use casper_execution_engine::{engine_state::Error, execution::ExecError}; +use casper_types::{ + account::AccountHash, execution::TransformKindV2, runtime_args, system::handle_payment, + ApiError, Key, RuntimeArgs, U512, }; -use casper_types::{account::AccountHash, runtime_args, ApiError, RuntimeArgs, U512}; const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([42u8; 32]); const DO_NOTHING_WASM: &str = "do_nothing.wasm"; const TRANSFER_PURSE_TO_ACCOUNT_WASM: &str = "transfer_purse_to_account.wasm"; const REVERT_WASM: &str = "revert.wasm"; -const ENDLESS_LOOP_WASM: &str = "endless_loop.wasm"; const ARG_AMOUNT: &str = "amount"; const ARG_TARGET: &str = "target"; -#[ignore] -#[test] -fn should_raise_insufficient_payment_when_caller_lacks_minimum_balance() { - let account_1_account_hash = ACCOUNT_1_ADDR; - - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - TRANSFER_PURSE_TO_ACCOUNT_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => *MAX_PAYMENT - U512::one() }, - ) - .build(); - - let mut builder = InMemoryWasmTestBuilder::default(); - - let _response = builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request) - .expect_success() - .commit() - .get_exec_result(0) - .expect("there should be a response") - .to_owned(); - - let account_1_request = - ExecuteRequestBuilder::standard(ACCOUNT_1_ADDR, REVERT_WASM, RuntimeArgs::default()) - .build(); - - let account_1_response = builder - .exec(account_1_request) - .commit() - .get_exec_result(1) - .expect("there should be a response"); - - let error_message = utils::get_error_message(account_1_response); - - assert!( - error_message.contains("InsufficientPayment"), - "expected insufficient payment, got: {}", - error_message - ); - - let expected_transfers_count = 0; - let transforms = builder.get_transforms(); - let transform = &transforms[1]; - - assert_eq!( - transform.len(), - expected_transfers_count, - "there should be no transforms if the account main purse has less than max payment" - ); -} - #[ignore] #[test] fn should_forward_payment_execution_runtime_error() { let account_1_account_hash = ACCOUNT_1_ADDR; let transferred_amount = U512::from(1); - let exec_request = { - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_deploy_hash([1; 32]) .with_payment_code(REVERT_WASM, RuntimeArgs::default()) .with_session_code( TRANSFER_PURSE_TO_ACCOUNT_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount } - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - - builder.exec(exec_request).commit().finish(); - - let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - let expected_reward_balance = *MAX_PAYMENT; - - let modified_balance = builder.get_purse_balance( - builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account") - .main_purse(), - ); - - assert_eq!( - modified_balance, - initial_balance - expected_reward_balance, - "modified balance is incorrect" - ); - - assert_eq!( - transaction_fee, expected_reward_balance, - "transaction fee is incorrect" - ); - - assert_eq!( - initial_balance, - (modified_balance + transaction_fee), - "no net resources should be gained or lost post-distribution" - ); - - let response = builder - .get_exec_result(0) - .expect("there should be a response"); - - let execution_result = utils::get_success_result(response); - let error = execution_result.as_error().expect("should have error"); - assert_matches!( - error, - Error::Exec(execution::Error::Revert(ApiError::User(100))) - ); -} - -#[ignore] -#[test] -fn should_forward_payment_execution_gas_limit_error() { - let account_1_account_hash = ACCOUNT_1_ADDR; - let transferred_amount = U512::from(1); - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_payment_code(ENDLESS_LOOP_WASM, RuntimeArgs::default()) - .with_session_code( - TRANSFER_PURSE_TO_ACCOUNT_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount } + runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => transferred_amount }, ) .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - - builder.exec(exec_request).commit().finish(); - - let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - let expected_reward_balance = *MAX_PAYMENT; - - let modified_balance = builder.get_purse_balance( - builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account") - .main_purse(), - ); - - assert_eq!( - modified_balance, - initial_balance - expected_reward_balance, - "modified balance is incorrect" - ); - - assert_eq!( - transaction_fee, expected_reward_balance, - "transaction fee is incorrect" - ); - - assert_eq!( - initial_balance, - (modified_balance + transaction_fee), - "no net resources should be gained or lost post-distribution" - ); - - let response = builder - .get_exec_result(0) - .expect("there should be a response"); - - let execution_result = utils::get_success_result(response); - let error = execution_result.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::GasLimit)); - let payment_gas_limit = Gas::from_motes(Motes::new(*MAX_PAYMENT), DEFAULT_GAS_PRICE) - .expect("should convert to gas"); - assert_eq!( - execution_result.cost(), - payment_gas_limit, - "cost should equal gas limit" - ); -} - -#[ignore] -#[test] -fn should_run_out_of_gas_when_session_code_exceeds_gas_limit() { - let account_1_account_hash = ACCOUNT_1_ADDR; - let payment_purse_amount = *DEFAULT_PAYMENT; - let transferred_amount = 1; - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_session_code( - ENDLESS_LOOP_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - let transfer_result = builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request) - .commit() - .finish(); - - let response = transfer_result - .builder() - .get_exec_result(0) - .expect("there should be a response"); - - let execution_result = utils::get_success_result(response); - let error = execution_result.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::GasLimit)); - let session_gas_limit = Gas::from_motes(Motes::new(payment_purse_amount), DEFAULT_GAS_PRICE) - .expect("should convert to gas"); - assert_eq!( - execution_result.cost(), - session_gas_limit, - "cost should equal gas limit" - ); -} - -#[ignore] -#[test] -fn should_correctly_charge_when_session_code_runs_out_of_gas() { - let payment_purse_amount = *DEFAULT_PAYMENT; - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_session_code(ENDLESS_LOOP_WASM, RuntimeArgs::default()) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) - .exec(exec_request) - .commit() - .finish(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - let modified_balance: U512 = builder.get_purse_balance(default_account.main_purse()); - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); + builder.exec(exec_request).commit(); - assert_ne!( - modified_balance, initial_balance, - "balance should be less than initial balance" - ); - - let response = builder - .get_exec_result(0) + let exec_result = builder + .get_exec_result_owned(0) .expect("there should be a response"); - let success_result = utils::get_success_result(&response); - let gas = success_result.cost(); - let motes = Motes::from_gas(gas, DEFAULT_GAS_PRICE).expect("should have motes"); - - let tally = motes.value() + modified_balance; - - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); - - let execution_result = utils::get_success_result(response); - let error = execution_result.as_error().expect("should have error"); - assert_matches!(error, Error::Exec(execution::Error::GasLimit)); - let session_gas_limit = Gas::from_motes(Motes::new(payment_purse_amount), DEFAULT_GAS_PRICE) - .expect("should convert to gas"); - assert_eq!( - execution_result.cost(), - session_gas_limit, - "cost should equal gas limit" - ); -} - -#[ignore] -#[test] -fn should_correctly_charge_when_session_code_fails() { - let account_1_account_hash = ACCOUNT_1_ADDR; - let payment_purse_amount = *DEFAULT_PAYMENT; - let transferred_amount = 1; - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_session_code( - REVERT_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - - builder.exec(exec_request).commit().finish(); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - let modified_balance: U512 = builder.get_purse_balance(default_account.main_purse()); - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - - assert_ne!( - modified_balance, initial_balance, - "balance should be less than initial balance" - ); - - let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let tally = transaction_fee + modified_balance; - - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); -} - -#[ignore] -#[test] -fn should_correctly_charge_when_session_code_succeeds() { - let account_1_account_hash = ACCOUNT_1_ADDR; - let payment_purse_amount = *DEFAULT_PAYMENT; - let transferred_amount = 1; - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_deploy_hash([1; 32]) - .with_session_code( - TRANSFER_PURSE_TO_ACCOUNT_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance(); - - builder - .exec(exec_request) - .expect_success() - .commit() - .finish(); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get genesis account"); - let modified_balance: U512 = builder.get_purse_balance(default_account.main_purse()); - let initial_balance: U512 = U512::from(DEFAULT_ACCOUNT_INITIAL_BALANCE); - - assert_ne!( - modified_balance, initial_balance, - "balance should be less than initial balance" - ); - - let transaction_fee_1 = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1; - - let total = transaction_fee_1 + U512::from(transferred_amount); - let tally = total + modified_balance; - - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ); - assert_eq!( - initial_balance, tally, - "no net resources should be gained or lost post-distribution" - ) -} - -#[ignore] -#[test] -fn should_finalize_to_rewards_purse() { - let account_1_account_hash = ACCOUNT_1_ADDR; - let payment_purse_amount = *DEFAULT_PAYMENT; - let transferred_amount = 1; - - let exec_request = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code( - TRANSFER_PURSE_TO_ACCOUNT_WASM, - runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transferred_amount) }, - ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([1; 32]) - .build(); - - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; - - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - - builder.exec(exec_request).expect_success().commit(); - - let modified_reward_starting_balance = builder.get_proposer_purse_balance(); - - assert!( - modified_reward_starting_balance > proposer_reward_starting_balance, - "proposer's balance should be higher after exec" - ); + let error = exec_result.error().expect("should have error"); + assert_matches!(error, Error::Exec(ExecError::Revert(ApiError::User(100)))); } #[ignore] @@ -484,53 +59,47 @@ fn independent_standard_payments_should_not_write_the_same_keys() { let payment_purse_amount = *DEFAULT_PAYMENT; let transfer_amount = MINIMUM_ACCOUNT_CREATION_BALANCE; - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - let setup_exec_request = { - let deploy = DeployItemBuilder::new() + let deploy_item = DeployItemBuilder::new() .with_address(*DEFAULT_ACCOUNT_ADDR) .with_session_code( TRANSFER_PURSE_TO_ACCOUNT_WASM, runtime_args! { ARG_TARGET => account_1_account_hash, ARG_AMOUNT => U512::from(transfer_amount) }, ) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) + .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount }) .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) .with_deploy_hash([1; 32]) .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let setup_exec_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); // create another account via transfer builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(setup_exec_request) .expect_success() .commit(); - let exec_request_from_genesis = { - let deploy = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) - .with_deploy_hash([2; 32]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) + .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_KEY]) + .with_deploy_hash([2; 32]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request_from_genesis = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); - let exec_request_from_account_1 = { - let deploy = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => payment_purse_amount }) - .with_authorization_keys(&[account_1_account_hash]) - .with_deploy_hash([1; 32]) - .build(); + let deploy_item = DeployItemBuilder::new() + .with_address(ACCOUNT_1_ADDR) + .with_session_code(DO_NOTHING_WASM, RuntimeArgs::default()) + .with_standard_payment(runtime_args! { ARG_AMOUNT => payment_purse_amount }) + .with_authorization_keys(&[account_1_account_hash]) + .with_deploy_hash([1; 32]) + .build(); - ExecuteRequestBuilder::new().push_deploy(deploy).build() - }; + let exec_request_from_account_1 = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); // run two independent deploys builder @@ -541,20 +110,52 @@ fn independent_standard_payments_should_not_write_the_same_keys() { .expect_success() .commit(); - let transforms = builder.get_transforms(); - let transforms_from_genesis = &transforms[1]; - let transforms_from_account_1 = &transforms[2]; - - // confirm the two deploys have no overlapping writes - let common_write_keys = transforms_from_genesis.keys().filter(|k| { - matches!( - ( - transforms_from_genesis.get(k), - transforms_from_account_1.get(k), - ), - (Some(Transform::Write(_)), Some(Transform::Write(_))) - ) - }); + let effects = builder.get_effects(); + let effects_from_genesis = &effects[1]; + let effects_from_account_1 = &effects[2]; + + // Retrieve the payment purse. + let payment_purse = builder + .get_handle_payment_contract() + .named_keys() + .get(handle_payment::PAYMENT_PURSE_KEY) + .unwrap() + .into_uref() + .unwrap(); + + let transforms_from_genesis_map: HashMap = effects_from_genesis + .transforms() + .iter() + .map(|transform| (*transform.key(), transform.kind().clone())) + .collect(); + let transforms_from_account_1_map: HashMap = effects_from_account_1 + .transforms() + .iter() + .map(|transform| (*transform.key(), transform.kind().clone())) + .collect(); + + // Confirm the two deploys have no overlapping writes except for the payment purse balance. + let common_write_keys = effects_from_genesis + .transforms() + .iter() + .filter_map(|transform| { + if transform.key() != &Key::Balance(payment_purse.addr()) + && matches!( + ( + transforms_from_genesis_map.get(transform.key()), + transforms_from_account_1_map.get(transform.key()), + ), + ( + Some(TransformKindV2::Write(_)), + Some(TransformKindV2::Write(_)) + ) + ) + { + Some(*transform.key()) + } else { + None + } + }); assert_eq!(common_write_keys.count(), 0); } diff --git a/execution_engine_testing/tests/src/test/system_contracts/system_hashes.rs b/execution_engine_testing/tests/src/test/system_contracts/system_hashes.rs deleted file mode 100644 index 6da46c3056..0000000000 --- a/execution_engine_testing/tests/src/test/system_contracts/system_hashes.rs +++ /dev/null @@ -1,24 +0,0 @@ -use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, -}; -use casper_types::RuntimeArgs; - -const CONTRACT_SYSTEM_HASHES: &str = "system_hashes.wasm"; - -#[ignore] -#[test] -fn should_verify_fixed_system_contract_hashes() { - let mut builder = InMemoryWasmTestBuilder::default(); - - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); - - let exec_request = ExecuteRequestBuilder::standard( - *DEFAULT_ACCOUNT_ADDR, - CONTRACT_SYSTEM_HASHES, - RuntimeArgs::default(), - ) - .build(); - - builder.exec(exec_request).expect_success().commit(); -} diff --git a/execution_engine_testing/tests/src/test/system_contracts/upgrade.rs b/execution_engine_testing/tests/src/test/system_contracts/upgrade.rs index c663f39d7e..2fc54255ab 100644 --- a/execution_engine_testing/tests/src/test/system_contracts/upgrade.rs +++ b/execution_engine_testing/tests/src/test/system_contracts/upgrade.rs @@ -1,74 +1,45 @@ use std::collections::BTreeMap; -use casper_engine_test_support::internal::{ - InMemoryWasmTestBuilder, UpgradeRequestBuilder, DEFAULT_RUN_GENESIS_REQUEST, - DEFAULT_UNBONDING_DELAY, DEFAULT_WASM_CONFIG, -}; +use num_rational::Ratio; -use casper_execution_engine::shared::{ - host_function_costs::HostFunctionCosts, - opcode_costs::{ - OpcodeCosts, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, - DEFAULT_CONTROL_FLOW_COST, DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, - DEFAULT_DIV_COST, DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, - DEFAULT_INTEGER_COMPARISON_COST, DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MUL_COST, - DEFAULT_NOP_COST, DEFAULT_REGULAR_COST, DEFAULT_STORE_COST, DEFAULT_UNREACHABLE_COST, - }, - storage_costs::StorageCosts, - stored_value::StoredValue, - wasm_config::{WasmConfig, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}, +use casper_engine_test_support::{ + ChainspecConfig, ExecuteRequestBuilder, LmdbWasmTestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, DEFAULT_MAX_ASSOCIATED_KEYS, DEFAULT_UNBONDING_DELAY, + LOCAL_GENESIS_REQUEST, }; + +use crate::{lmdb_fixture, lmdb_fixture::ENTRY_REGISTRY_SPECIAL_ADDRESS}; use casper_types::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + contracts::NamedKeys, + runtime_args, system::{ + self, auction::{ - AUCTION_DELAY_KEY, LOCKED_FUNDS_PERIOD_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + DelegatorKind, SeigniorageRecipientsSnapshotV1, SeigniorageRecipientsSnapshotV2, + AUCTION_DELAY_KEY, DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, + LOCKED_FUNDS_PERIOD_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, }, mint::ROUND_SEIGNIORAGE_RATE_KEY, }, - CLValue, EraId, ProtocolVersion, U512, + Account, AddressableEntityHash, CLValue, CoreConfig, EntityAddr, EraId, Key, ProtocolVersion, + StorageCosts, StoredValue, SystemHashRegistry, U256, U512, }; -use num_rational::Ratio; +use rand::Rng; const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::V1_0_0; const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); - -fn get_upgraded_wasm_config() -> WasmConfig { - let opcode_cost = OpcodeCosts { - bit: DEFAULT_BIT_COST + 1, - add: DEFAULT_ADD_COST + 1, - mul: DEFAULT_MUL_COST + 1, - div: DEFAULT_DIV_COST + 1, - load: DEFAULT_LOAD_COST + 1, - store: DEFAULT_STORE_COST + 1, - op_const: DEFAULT_CONST_COST + 1, - local: DEFAULT_LOCAL_COST + 1, - global: DEFAULT_GLOBAL_COST + 1, - control_flow: DEFAULT_CONTROL_FLOW_COST + 1, - integer_comparison: DEFAULT_INTEGER_COMPARISON_COST + 1, - conversion: DEFAULT_CONVERSION_COST + 1, - unreachable: DEFAULT_UNREACHABLE_COST + 1, - nop: DEFAULT_NOP_COST + 1, - current_memory: DEFAULT_CURRENT_MEMORY_COST + 1, - grow_memory: DEFAULT_GROW_MEMORY_COST + 1, - regular: DEFAULT_REGULAR_COST + 1, - }; - let storage_costs = StorageCosts::default(); - let host_function_costs = HostFunctionCosts::default(); - WasmConfig::new( - DEFAULT_WASM_MAX_MEMORY, - DEFAULT_MAX_STACK_HEIGHT * 2, - opcode_cost, - storage_costs, - host_function_costs, - ) -} +const ARG_ACCOUNT: &str = "account"; #[ignore] #[test] fn should_upgrade_only_protocol_version() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + // let old_wasm_config = *builder.get_engine_state().config().wasm_config(); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = @@ -83,106 +54,98 @@ fn should_upgrade_only_protocol_version() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); - let upgraded_protocol_data = builder - .get_engine_state() - .get_protocol_data(new_protocol_version) - .expect("should have result") - .expect("should have protocol data"); - - assert_eq!( - *DEFAULT_WASM_CONFIG, - *upgraded_protocol_data.wasm_config(), - "upgraded costs should equal original costs" - ); + // let upgraded_engine_config = builder.get_engine_state().config(); + // + // assert_eq!( + // old_wasm_config, + // *upgraded_engine_config.wasm_config(), + // "upgraded costs should equal original costs" + // ); } #[ignore] #[test] fn should_allow_only_wasm_costs_patch_version() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 2); - let new_wasm_config = get_upgraded_wasm_config(); + // let new_wasm_config = get_upgraded_wasm_config(); let mut upgrade_request = { UpgradeRequestBuilder::new() .with_current_protocol_version(PROTOCOL_VERSION) .with_new_protocol_version(new_protocol_version) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_wasm_config(new_wasm_config) .build() }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); - let upgraded_protocol_data = builder - .get_engine_state() - .get_protocol_data(new_protocol_version) - .expect("should have result") - .expect("should have upgraded protocol data"); + // let upgraded_engine_config = builder.get_engine_state().config(); - assert_eq!( - new_wasm_config, - *upgraded_protocol_data.wasm_config(), - "upgraded costs should equal new costs" - ); + // assert_eq!( + // new_wasm_config, + // *upgraded_engine_config.wasm_config(), + // "upgraded costs should equal new costs" + // ); } #[ignore] #[test] fn should_allow_only_wasm_costs_minor_version() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor + 1, sem_ver.patch); - let new_wasm_config = get_upgraded_wasm_config(); + // let new_wasm_config = get_upgraded_wasm_config(); let mut upgrade_request = { UpgradeRequestBuilder::new() .with_current_protocol_version(PROTOCOL_VERSION) .with_new_protocol_version(new_protocol_version) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_wasm_config(new_wasm_config) .build() }; + // let engine_config = EngineConfigBuilder::default() + // .with_wasm_config(new_wasm_config) + // .build(); + builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); - - let upgraded_protocol_data = builder - .get_engine_state() - .get_protocol_data(new_protocol_version) - .expect("should have result") - .expect("should have upgraded protocol data"); - - assert_eq!( - new_wasm_config, - *upgraded_protocol_data.wasm_config(), - "upgraded costs should equal new costs" - ); + // + // let upgraded_engine_config = builder.get_engine_state().config(); + // + // assert_eq!( + // new_wasm_config, + // *upgraded_engine_config.wasm_config(), + // "upgraded costs should equal new costs" + // ); } #[ignore] #[test] fn should_not_downgrade() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + // let old_wasm_config = *builder.get_engine_state().config().wasm_config(); let new_protocol_version = ProtocolVersion::from_parts(2, 0, 0); @@ -195,21 +158,9 @@ fn should_not_downgrade() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); - let upgraded_protocol_data = builder - .get_engine_state() - .get_protocol_data(new_protocol_version) - .expect("should have result") - .expect("should have protocol data"); - - assert_eq!( - *DEFAULT_WASM_CONFIG, - *upgraded_protocol_data.wasm_config(), - "upgraded costs should equal original costs" - ); - let mut downgrade_request = { UpgradeRequestBuilder::new() .with_current_protocol_version(new_protocol_version) @@ -218,23 +169,23 @@ fn should_not_downgrade() { .build() }; - builder.upgrade_with_upgrade_request(&mut downgrade_request); + builder.upgrade(&mut downgrade_request); - let maybe_upgrade_result = builder.get_upgrade_result(1).expect("should have response"); + let upgrade_result = builder.get_upgrade_result(1).expect("should have response"); assert!( - maybe_upgrade_result.is_err(), + !upgrade_result.is_success(), "expected failure got {:?}", - maybe_upgrade_result + upgrade_result ); } #[ignore] #[test] fn should_not_skip_major_versions() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); @@ -249,55 +200,58 @@ fn should_not_skip_major_versions() { .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); - let maybe_upgrade_result = builder.get_upgrade_result(0).expect("should have response"); + let upgrade_result = builder.get_upgrade_result(0).expect("should have response"); - assert!(maybe_upgrade_result.is_err(), "expected failure"); + assert!(upgrade_result.is_err(), "expected failure"); } #[ignore] #[test] -fn should_not_skip_minor_versions() { - let mut builder = InMemoryWasmTestBuilder::default(); +fn should_allow_skip_minor_versions() { + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); - let invalid_version = + // can skip minor versions as long as they are higher than current version + let valid_new_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor + 2, sem_ver.patch); let mut upgrade_request = { UpgradeRequestBuilder::new() .with_current_protocol_version(PROTOCOL_VERSION) - .with_new_protocol_version(invalid_version) + .with_new_protocol_version(valid_new_version) .with_activation_point(DEFAULT_ACTIVATION_POINT) .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); - let maybe_upgrade_result = builder.get_upgrade_result(0).expect("should have response"); + let upgrade_result = builder.get_upgrade_result(0).expect("should have response"); - assert!(maybe_upgrade_result.is_err(), "expected failure"); + assert!(upgrade_result.is_success(), "expected success"); } #[ignore] #[test] fn should_upgrade_only_validator_slots() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); - let validator_slot_key = builder - .get_contract(builder.get_auction_contract_hash()) - .expect("auction should exist") - .named_keys()[VALIDATOR_SLOTS_KEY]; + let validator_slot_key = *builder + .get_named_keys(EntityAddr::System( + builder.get_auction_contract_hash().value(), + )) + .get(VALIDATOR_SLOTS_KEY) + .unwrap(); let before_validator_slots: u32 = builder .query(None, validator_slot_key, &[]) @@ -320,7 +274,7 @@ fn should_upgrade_only_validator_slots() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); let after_validator_slots: u32 = builder @@ -341,18 +295,20 @@ fn should_upgrade_only_validator_slots() { #[ignore] #[test] fn should_upgrade_only_auction_delay() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); - let auction_delay_key = builder - .get_contract(builder.get_auction_contract_hash()) - .expect("auction should exist") - .named_keys()[AUCTION_DELAY_KEY]; + let auction_delay_key = *builder + .get_named_keys(EntityAddr::System( + builder.get_auction_contract_hash().value(), + )) + .get(AUCTION_DELAY_KEY) + .unwrap(); let before_auction_delay: u64 = builder .query(None, auction_delay_key, &[]) @@ -375,7 +331,7 @@ fn should_upgrade_only_auction_delay() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); let after_auction_delay: u64 = builder @@ -396,18 +352,20 @@ fn should_upgrade_only_auction_delay() { #[ignore] #[test] fn should_upgrade_only_locked_funds_period() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); - let locked_funds_period_key = builder - .get_contract(builder.get_auction_contract_hash()) - .expect("auction should exist") - .named_keys()[LOCKED_FUNDS_PERIOD_KEY]; + let locked_funds_period_key = *builder + .get_named_keys(EntityAddr::System( + builder.get_auction_contract_hash().value(), + )) + .get(LOCKED_FUNDS_PERIOD_KEY) + .unwrap(); let before_locked_funds_period_millis: u64 = builder .query(None, locked_funds_period_key, &[]) @@ -430,7 +388,7 @@ fn should_upgrade_only_locked_funds_period() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); let after_locked_funds_period_millis: u64 = builder @@ -451,18 +409,17 @@ fn should_upgrade_only_locked_funds_period() { #[ignore] #[test] fn should_upgrade_only_round_seigniorage_rate() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); - let round_seigniorage_rate_key = builder - .get_contract(builder.get_mint_contract_hash()) - .expect("auction should exist") - .named_keys()[ROUND_SEIGNIORAGE_RATE_KEY]; + let keys = builder.get_named_keys(EntityAddr::System(builder.get_mint_contract_hash().value())); + + let round_seigniorage_rate_key = *keys.get(ROUND_SEIGNIORAGE_RATE_KEY).unwrap(); let before_round_seigniorage_rate: Ratio = builder .query(None, round_seigniorage_rate_key, &[]) @@ -485,7 +442,7 @@ fn should_upgrade_only_round_seigniorage_rate() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); let after_round_seigniorage_rate: Ratio = builder @@ -513,18 +470,20 @@ fn should_upgrade_only_round_seigniorage_rate() { #[ignore] #[test] fn should_upgrade_only_unbonding_delay() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); - let unbonding_delay_key = builder - .get_contract(builder.get_auction_contract_hash()) - .expect("auction should exist") - .named_keys()[UNBONDING_DELAY_KEY]; + let entity_addr = EntityAddr::System(builder.get_auction_contract_hash().value()); + + let unbonding_delay_key = *builder + .get_named_keys(entity_addr) + .get(UNBONDING_DELAY_KEY) + .unwrap(); let before_unbonding_delay: u64 = builder .query(None, unbonding_delay_key, &[]) @@ -547,7 +506,7 @@ fn should_upgrade_only_unbonding_delay() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); let after_unbonding_delay: u64 = builder @@ -570,19 +529,21 @@ fn should_upgrade_only_unbonding_delay() { #[ignore] #[test] fn should_apply_global_state_upgrade() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let sem_ver = PROTOCOL_VERSION.value(); let new_protocol_version = ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); // We'll try writing directly to this key. - let unbonding_delay_key = builder - .get_contract(builder.get_auction_contract_hash()) - .expect("auction should exist") - .named_keys()[UNBONDING_DELAY_KEY]; + let unbonding_delay_key = *builder + .get_named_keys(EntityAddr::System( + builder.get_auction_contract_hash().value(), + )) + .get(UNBONDING_DELAY_KEY) + .unwrap(); let before_unbonding_delay: u64 = builder .query(None, unbonding_delay_key, &[]) @@ -611,7 +572,7 @@ fn should_apply_global_state_upgrade() { }; builder - .upgrade_with_upgrade_request(&mut upgrade_request) + .upgrade(&mut upgrade_request) .expect_upgrade_success(); let after_unbonding_delay: u64 = builder @@ -630,3 +591,301 @@ fn should_apply_global_state_upgrade() { "Should have modified locked funds period" ); } + +#[ignore] +#[test] +fn should_increase_max_associated_keys_after_upgrade() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let sem_ver = PROTOCOL_VERSION.value(); + let new_protocol_version = + ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(PROTOCOL_VERSION) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .build() + }; + + let enable_entity = false; + let max_associated_keys = DEFAULT_MAX_ASSOCIATED_KEYS + 1; + let core_config = CoreConfig { + max_associated_keys, + enable_addressable_entity: enable_entity, + ..Default::default() + }; + + let chainspec = ChainspecConfig { + core_config, + wasm_config: Default::default(), + system_costs_config: Default::default(), + storage_costs: StorageCosts::default(), + }; + builder.with_chainspec(chainspec); + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + for n in (0..DEFAULT_MAX_ASSOCIATED_KEYS).map(U256::from) { + let account_hash = { + let mut addr = [0; ACCOUNT_HASH_LENGTH]; + n.to_big_endian(&mut addr); + AccountHash::new(addr) + }; + + let add_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + "add_update_associated_key.wasm", + runtime_args! { + ARG_ACCOUNT => account_hash, + }, + ) + .build(); + + builder.exec(add_request).expect_success().commit(); + } + + let account = builder + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("should get account"); + + assert!(account.associated_keys().len() > DEFAULT_MAX_ASSOCIATED_KEYS as usize); + assert_eq!( + account.associated_keys().len(), + max_associated_keys as usize + ); +} + +#[ignore] +#[test] +fn should_correctly_migrate_and_prune_system_contract_records() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_3_1); + + let legacy_system_entity_registry = { + let stored_value: StoredValue = builder + .query(None, ENTRY_REGISTRY_SPECIAL_ADDRESS, &[]) + .expect("should query system entity registry"); + let cl_value = stored_value + .as_cl_value() + .cloned() + .expect("should have cl value"); + let registry: SystemHashRegistry = cl_value.into_t().expect("should have system registry"); + registry + }; + + let old_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let mut global_state_update = BTreeMap::::new(); + + let registry = CLValue::from_t(legacy_system_entity_registry.clone()) + .expect("must convert to StoredValue") + .into(); + + global_state_update.insert(Key::SystemEntityRegistry, registry); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(old_protocol_version) + .with_new_protocol_version(ProtocolVersion::from_parts(2, 0, 0)) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_global_state_update(global_state_update) + .build(); + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + let system_names = vec![system::MINT, system::AUCTION, system::HANDLE_PAYMENT]; + + for name in system_names { + let legacy_hash = *legacy_system_entity_registry + .get(name) + .expect("must have hash"); + let legacy_contract_key = Key::Hash(legacy_hash); + let _legacy_query = builder.query(None, legacy_contract_key, &[]); + + builder + .get_addressable_entity(AddressableEntityHash::new(legacy_hash)) + .expect("must have system entity"); + } +} + +#[test] +fn should_not_migrate_bids_with_invalid_min_max_delegation_amounts() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let sem_ver = PROTOCOL_VERSION.value(); + let new_protocol_version = + ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(PROTOCOL_VERSION) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_maximum_delegation_amount(250_000_000_000) + .with_minimum_delegation_amount(500_000_000_000) + .build() + }; + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_failure(); +} + +#[test] +fn should_upgrade_legacy_accounts() { + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let mut rng = rand::thread_rng(); + let account_data = (0..10000).map(|_| { + let account_hash = rng.gen(); + let main_purse_uref = rng.gen(); + + let account_key = Key::Account(account_hash); + let account_value = StoredValue::Account(Account::create( + account_hash, + NamedKeys::new(), + main_purse_uref, + )); + + (account_key, account_value) + }); + + builder.write_data_and_commit(account_data); + + let sem_ver = PROTOCOL_VERSION.value(); + let new_protocol_version = + ProtocolVersion::from_parts(sem_ver.major, sem_ver.minor, sem_ver.patch + 1); + + let mut upgrade_request = { + UpgradeRequestBuilder::new() + .with_current_protocol_version(PROTOCOL_VERSION) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .with_minimum_delegation_amount(250_000_000_000) + .with_maximum_delegation_amount(500_000_000_000) + .build() + }; + + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); +} + +#[ignore] +#[test] +fn should_migrate_seigniorage_snapshot_to_new_version() { + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(lmdb_fixture::RELEASE_1_4_3); + + let auction_contract_hash = builder.get_auction_contract_hash(); + + // get legacy auction contract + let auction_contract = builder + .query(None, Key::Hash(auction_contract_hash.value()), &[]) + .expect("should have auction contract") + .into_contract() + .expect("should have legacy Contract under the Key::Contract variant"); + + // check that snapshot version key does not exist yet + let auction_named_keys = auction_contract.named_keys(); + let maybe_snapshot_version_named_key = + auction_named_keys.get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY); + assert!(maybe_snapshot_version_named_key.is_none()); + + // fetch legacy snapshot + let legacy_seigniorage_snapshot: SeigniorageRecipientsSnapshotV1 = { + let snapshot_key = auction_named_keys + .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) + .expect("snapshot named key should exist"); + builder + .query(None, *snapshot_key, &[]) + .expect("should have seigniorage snapshot") + .as_cl_value() + .expect("should be a CLValue") + .clone() + .into_t() + .expect("should be SeigniorageRecipientsSnapshotV1") + }; + + // prepare upgrade request + let old_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(old_protocol_version) + .with_new_protocol_version(ProtocolVersion::from_parts(2, 0, 0)) + .with_activation_point(DEFAULT_ACTIVATION_POINT) + .build(); + + // execute upgrade + builder + .upgrade(&mut upgrade_request) + .expect_upgrade_success(); + + // fetch updated named keys + let auction_named_keys = + builder.get_named_keys(EntityAddr::System(auction_contract_hash.value())); + + // check that snapshot version named key was populated + let snapshot_version_key = auction_named_keys + .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY) + .expect("auction should have snapshot version named key"); + let snapshot_version: u8 = builder + .query(None, *snapshot_version_key, &[]) + .expect("should have seigniorage snapshot version") + .as_cl_value() + .expect("should be a CLValue") + .clone() + .into_t() + .expect("should be u8"); + assert_eq!( + snapshot_version, + DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION + ); + + // fetch new snapshot + let seigniorage_snapshot: SeigniorageRecipientsSnapshotV2 = { + let snapshot_key = auction_named_keys + .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) + .expect("snapshot named key should exist"); + builder + .query(None, *snapshot_key, &[]) + .expect("should have seigniorage snapshot") + .as_cl_value() + .expect("should be a CLValue") + .clone() + .into_t() + .expect("should be SeigniorageRecipientsSnapshotV2") + }; + + // compare snapshots + for era_id in legacy_seigniorage_snapshot.keys() { + let legacy_seigniorage_recipients = legacy_seigniorage_snapshot.get(era_id).unwrap(); + let new_seigniorage_recipient = seigniorage_snapshot.get(era_id).unwrap(); + + for pubkey in legacy_seigniorage_recipients.keys() { + let legacy_recipient = legacy_seigniorage_recipients.get(pubkey).unwrap(); + let new_recipient = new_seigniorage_recipient.get(pubkey).unwrap(); + + assert_eq!(legacy_recipient.stake(), new_recipient.stake()); + assert_eq!( + legacy_recipient.delegation_rate(), + new_recipient.delegation_rate() + ); + for pk in legacy_recipient.delegator_stake().keys() { + assert!(new_recipient + .delegator_stake() + .contains_key(&DelegatorKind::PublicKey(pk.clone()))) + } + } + } +} diff --git a/execution_engine_testing/tests/src/test/system_costs.rs b/execution_engine_testing/tests/src/test/system_costs.rs index 58abf88671..3c466f431e 100644 --- a/execution_engine_testing/tests/src/test/system_costs.rs +++ b/execution_engine_testing/tests/src/test/system_costs.rs @@ -2,41 +2,11 @@ use num_traits::Zero; use once_cell::sync::Lazy; use casper_engine_test_support::{ - internal::{ - utils, DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, - UpgradeRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_PAYMENT, - DEFAULT_PROTOCOL_VERSION, DEFAULT_RUN_GENESIS_REQUEST, - }, - AccountHash, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, -}; -use casper_execution_engine::{ - core::engine_state::{genesis::GenesisValidator, GenesisAccount}, - shared::{ - gas::Gas, - host_function_costs::{Cost, HostFunction, HostFunctionCosts}, - motes::Motes, - opcode_costs::OpcodeCosts, - storage_costs::StorageCosts, - system_config::{ - auction_costs::{ - AuctionCosts, DEFAULT_ADD_BID_COST, DEFAULT_DELEGATE_COST, DEFAULT_DISTRIBUTE_COST, - DEFAULT_RUN_AUCTION_COST, DEFAULT_SLASH_COST, DEFAULT_UNDELEGATE_COST, - DEFAULT_WITHDRAW_BID_COST, - }, - handle_payment_costs::{ - HandlePaymentCosts, DEFAULT_FINALIZE_PAYMENT_COST, DEFAULT_SET_REFUND_PURSE_COST, - }, - mint_costs::{ - MintCosts, DEFAULT_BALANCE_COST, DEFAULT_MINT_COST, - DEFAULT_REDUCE_TOTAL_SUPPLY_COST, DEFAULT_TRANSFER_COST, - }, - standard_payment_costs::StandardPaymentCosts, - SystemConfig, - }, - wasm, - wasm_config::{WasmConfig, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}, - }, - storage::protocol_data::DEFAULT_WASMLESS_TRANSFER_COST, + utils, ChainspecConfig, DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, + UpgradeRequestBuilder, DEFAULT_ACCOUNTS, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_INITIAL_BALANCE, + DEFAULT_ACCOUNT_PUBLIC_KEY, DEFAULT_MAX_ASSOCIATED_KEYS, DEFAULT_MINIMUM_DELEGATION_AMOUNT, + DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, LOCAL_GENESIS_REQUEST, + MINIMUM_ACCOUNT_CREATION_BALANCE, }; use casper_types::{ runtime_args, @@ -44,45 +14,56 @@ use casper_types::{ auction::{self, DelegationRate}, handle_payment, mint, AUCTION, }, - EraId, ProtocolVersion, PublicKey, RuntimeArgs, SecretKey, U512, + AuctionCosts, BrTableCost, ControlFlowCosts, CoreConfig, EraId, Gas, GenesisAccount, + GenesisValidator, HandlePaymentCosts, HostFunction, HostFunctionCost, HostFunctionCostsV1, + HostFunctionCostsV2, MessageLimits, MintCosts, Motes, OpcodeCosts, ProtocolVersion, PublicKey, + RuntimeArgs, SecretKey, StandardPaymentCosts, StorageCosts, SystemConfig, WasmConfig, + WasmV1Config, WasmV2Config, DEFAULT_ADD_BID_COST, DEFAULT_MAX_STACK_HEIGHT, + DEFAULT_MINIMUM_BID_AMOUNT, DEFAULT_WASM_MAX_MEMORY, U512, }; +use crate::wasm_utils; + const SYSTEM_CONTRACT_HASHES_NAME: &str = "system_contract_hashes.wasm"; const CONTRACT_ADD_BID: &str = "add_bid.wasm"; +const CONTRACT_TRANSFER_TO_NAMED_PURSE: &str = "transfer_to_named_purse.wasm"; static VALIDATOR_1_SECRET_KEY: Lazy = Lazy::new(|| SecretKey::ed25519_from_bytes([123; SecretKey::ED25519_LENGTH]).unwrap()); static VALIDATOR_1: Lazy = Lazy::new(|| PublicKey::from(&*VALIDATOR_1_SECRET_KEY)); -static VALIDATOR_1_ADDR: Lazy = Lazy::new(|| AccountHash::from(&*VALIDATOR_1)); const VALIDATOR_1_STAKE: u64 = 250_000; -const BOND_AMOUNT: u64 = 42; -const BID_AMOUNT: u64 = 99; +static VALIDATOR_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::ed25519_from_bytes([124; SecretKey::ED25519_LENGTH]).unwrap()); +static VALIDATOR_2: Lazy = Lazy::new(|| PublicKey::from(&*VALIDATOR_2_SECRET_KEY)); +const BOND_AMOUNT: u64 = DEFAULT_MINIMUM_BID_AMOUNT + 42; +const BID_AMOUNT: u64 = 99 + DEFAULT_MINIMUM_DELEGATION_AMOUNT; const TRANSFER_AMOUNT: u64 = 123; const BID_DELEGATION_RATE: DelegationRate = auction::DELEGATION_RATE_DENOMINATOR; -const UPDATED_CALL_CONTRACT_COST: Cost = 12_345; -const NEW_ADD_BID_COST: u32 = DEFAULT_ADD_BID_COST * 2; -const NEW_WITHDRAW_BID_COST: u32 = DEFAULT_WITHDRAW_BID_COST * 3; -const NEW_DELEGATE_COST: u32 = DEFAULT_DELEGATE_COST * 4; -const NEW_UNDELEGATE_COST: u32 = DEFAULT_UNDELEGATE_COST * 5; +const UPDATED_CALL_CONTRACT_COST: HostFunctionCost = 12_345; +const NEW_ADD_BID_COST: u64 = 2_500_000_000; +const NEW_WITHDRAW_BID_COST: u32 = 2_500_000_000; +const NEW_DELEGATE_COST: u32 = 2_500_000_000; +const NEW_UNDELEGATE_COST: u32 = NEW_DELEGATE_COST; +const NEW_REDELEGATE_COST: u32 = NEW_DELEGATE_COST; const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); -static OLD_PROTOCOL_VERSION: Lazy = Lazy::new(|| *DEFAULT_PROTOCOL_VERSION); -static NEW_PROTOCOL_VERSION: Lazy = Lazy::new(|| { - ProtocolVersion::from_parts( - OLD_PROTOCOL_VERSION.value().major, - OLD_PROTOCOL_VERSION.value().minor, - OLD_PROTOCOL_VERSION.value().patch + 1, - ) -}); +const OLD_PROTOCOL_VERSION: ProtocolVersion = DEFAULT_PROTOCOL_VERSION; +const NEW_PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion::from_parts( + OLD_PROTOCOL_VERSION.value().major, + OLD_PROTOCOL_VERSION.value().minor, + OLD_PROTOCOL_VERSION.value().patch + 1, +); +const ARG_PURSE_NAME: &str = "purse_name"; +const NAMED_PURSE_NAME: &str = "purse_1"; const ARG_AMOUNT: &str = "amount"; #[ignore] #[test] fn add_bid_and_withdraw_bid_have_expected_costs() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let system_contract_hashes_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -95,17 +76,17 @@ fn add_bid_and_withdraw_bid_have_expected_costs() { .expect_success() .commit(); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + let entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, - account + entity .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_ADD_BID, @@ -117,31 +98,32 @@ fn add_bid_and_withdraw_bid_have_expected_costs() { ) .build(); - let balance_before = builder.get_purse_balance(account.main_purse()); + let balance_before = builder.get_purse_balance(entity.main_purse()); let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance(); builder.exec(add_bid_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); + let balance_after = builder.get_purse_balance(entity.main_purse()); let transaction_fee_1 = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1; - let expected_call_cost = U512::from(DEFAULT_ADD_BID_COST); + let system_config = builder.chainspec().system_costs_config; + let expected_call_cost = U512::from(system_config.auction_costs().add_bid); assert_eq!( balance_after, balance_before - U512::from(BOND_AMOUNT) - transaction_fee_1 ); - assert_eq!(builder.last_exec_gas_cost().value(), expected_call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); // Withdraw bid let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, - account + entity .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_WITHDRAW_BID, @@ -152,64 +134,44 @@ fn add_bid_and_withdraw_bid_have_expected_costs() { ) .build(); - let balance_before = builder.get_purse_balance(account.main_purse()); + let balance_before = builder.get_purse_balance(entity.main_purse()); let proposer_reward_starting_balance_2 = builder.get_proposer_purse_balance(); builder.exec(withdraw_bid_request).expect_success().commit(); - let balance_after = builder.get_purse_balance(account.main_purse()); + let balance_after = builder.get_purse_balance(entity.main_purse()); let transaction_fee_2 = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2; - let expected_call_cost = U512::from(DEFAULT_WITHDRAW_BID_COST); + let system_config = builder.chainspec().system_costs_config; + let expected_call_cost = U512::from(system_config.auction_costs().withdraw_bid); assert_eq!(balance_after, balance_before - transaction_fee_2); - assert_eq!(builder.last_exec_gas_cost().value(), expected_call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); } #[ignore] #[test] fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { - let new_wasmless_transfer_cost = DEFAULT_WASMLESS_TRANSFER_COST; - - let new_auction_costs = AuctionCosts { - add_bid: NEW_ADD_BID_COST, - withdraw_bid: NEW_WITHDRAW_BID_COST, - ..Default::default() - }; - let new_mint_costs = MintCosts::default(); - let new_standard_payment_costs = StandardPaymentCosts::default(); - let new_handle_payment_costs = HandlePaymentCosts::default(); - - let new_system_config = SystemConfig::new( - new_wasmless_transfer_cost, - new_auction_costs, - new_mint_costs, - new_handle_payment_costs, - new_standard_payment_costs, - ); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let mut upgrade_request = { UpgradeRequestBuilder::new() - .with_current_protocol_version(*OLD_PROTOCOL_VERSION) - .with_new_protocol_version(*NEW_PROTOCOL_VERSION) + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_system_config(new_system_config) .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); let system_contract_hashes_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, SYSTEM_CONTRACT_HASHES_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder .exec(system_contract_hashes_request) @@ -217,7 +179,7 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { .commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let add_bid_request = ExecuteRequestBuilder::contract_call_by_hash( @@ -226,7 +188,7 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_ADD_BID, @@ -236,7 +198,6 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, }, ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); let balance_before = builder.get_purse_balance(account.main_purse()); @@ -255,7 +216,7 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { balance_after, balance_before - U512::from(BOND_AMOUNT) - transaction_fee_1 ); - assert_eq!(builder.last_exec_gas_cost().value(), expected_call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); // Withdraw bid let withdraw_bid_request = ExecuteRequestBuilder::contract_call_by_hash( @@ -264,7 +225,7 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_WITHDRAW_BID, @@ -273,7 +234,6 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { auction::ARG_AMOUNT => U512::from(BOND_AMOUNT), }, ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); let balance_before = builder.get_purse_balance(account.main_purse()); @@ -288,31 +248,40 @@ fn upgraded_add_bid_and_withdraw_bid_have_expected_costs() { let call_cost = U512::from(NEW_WITHDRAW_BID_COST); assert_eq!(balance_after, balance_before - transaction_fee_2); - assert_eq!(builder.last_exec_gas_cost().value(), call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), call_cost); } #[ignore] #[test] fn delegate_and_undelegate_have_expected_costs() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Some(GenesisValidator::new( + Motes::new(VALIDATOR_1_STAKE), + DelegationRate::zero(), + )), + ); + let validator_2 = GenesisAccount::account( + VALIDATOR_2.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), + Motes::new(VALIDATOR_1_STAKE), DelegationRate::zero(), )), ); let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); tmp.push(validator_1); + tmp.push(validator_2); tmp }; let run_genesis_request = utils::create_run_genesis_request(accounts); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); let system_contract_hashes_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, @@ -326,7 +295,7 @@ fn delegate_and_undelegate_have_expected_costs() { .commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let delegate_request = ExecuteRequestBuilder::contract_call_by_hash( @@ -335,7 +304,7 @@ fn delegate_and_undelegate_have_expected_costs() { .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_DELEGATE, @@ -358,12 +327,38 @@ fn delegate_and_undelegate_have_expected_costs() { let transaction_fee_1 = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1; - let expected_call_cost = U512::from(DEFAULT_DELEGATE_COST); + let system_config = builder.chainspec().system_costs_config; + let expected_call_cost = U512::from(system_config.auction_costs().delegate); assert_eq!( balance_after, balance_before - U512::from(BID_AMOUNT) - transaction_fee_1, ); - assert_eq!(builder.last_exec_gas_cost().value(), expected_call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); + + let redelegate_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + account + .named_keys() + .get(AUCTION) + .unwrap() + .into_entity_hash_addr() + .unwrap() + .into(), + auction::METHOD_REDELEGATE, + runtime_args! { + auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_VALIDATOR => VALIDATOR_1.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + auction::ARG_NEW_VALIDATOR => VALIDATOR_2.clone() + }, + ) + .build(); + + builder.exec(redelegate_request).expect_success().commit(); + + let system_config = builder.chainspec().system_costs_config; + let expected_call_cost = U512::from(system_config.auction_costs().redelegate); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); // Withdraw bid let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash( @@ -372,14 +367,14 @@ fn delegate_and_undelegate_have_expected_costs() { .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_UNDELEGATE, runtime_args! { auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), auction::ARG_VALIDATOR => VALIDATOR_1.clone(), - auction::ARG_AMOUNT => U512::from(BID_AMOUNT), + auction::ARG_AMOUNT => U512::from(BID_AMOUNT - DEFAULT_MINIMUM_DELEGATION_AMOUNT), }, ) .build(); @@ -394,70 +389,59 @@ fn delegate_and_undelegate_have_expected_costs() { let transaction_fee_2 = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_2; - let expected_call_cost = U512::from(DEFAULT_UNDELEGATE_COST); + let system_config = builder.chainspec().system_costs_config; + let expected_call_cost = U512::from(system_config.auction_costs().undelegate); assert_eq!(balance_after, balance_before - transaction_fee_2); - assert_eq!(builder.last_exec_gas_cost().value(), expected_call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); } #[ignore] #[test] fn upgraded_delegate_and_undelegate_have_expected_costs() { - let new_wasmless_transfer_cost = DEFAULT_WASMLESS_TRANSFER_COST; - - let new_auction_costs = AuctionCosts { - delegate: NEW_DELEGATE_COST, - undelegate: NEW_UNDELEGATE_COST, - ..Default::default() - }; - let new_mint_costs = MintCosts::default(); - let new_standard_payment_costs = StandardPaymentCosts::default(); - let new_handle_payment_costs = HandlePaymentCosts::default(); - - let new_system_config = SystemConfig::new( - new_wasmless_transfer_cost, - new_auction_costs, - new_mint_costs, - new_handle_payment_costs, - new_standard_payment_costs, - ); - - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); let accounts = { let validator_1 = GenesisAccount::account( VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), + Some(GenesisValidator::new( + Motes::new(VALIDATOR_1_STAKE), + DelegationRate::zero(), + )), + ); + let validator_2 = GenesisAccount::account( + VALIDATOR_2.clone(), + Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE), Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), + Motes::new(VALIDATOR_1_STAKE), DelegationRate::zero(), )), ); let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); tmp.push(validator_1); + tmp.push(validator_2); tmp }; let run_genesis_request = utils::create_run_genesis_request(accounts); - builder.run_genesis(&run_genesis_request); + builder.run_genesis(run_genesis_request); let mut upgrade_request = { UpgradeRequestBuilder::new() - .with_current_protocol_version(*OLD_PROTOCOL_VERSION) - .with_new_protocol_version(*NEW_PROTOCOL_VERSION) + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_system_config(new_system_config) .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); let system_contract_hashes_request = ExecuteRequestBuilder::standard( *DEFAULT_ACCOUNT_ADDR, SYSTEM_CONTRACT_HASHES_NAME, RuntimeArgs::default(), ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); builder .exec(system_contract_hashes_request) @@ -465,7 +449,7 @@ fn upgraded_delegate_and_undelegate_have_expected_costs() { .commit(); let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let delegate_request = ExecuteRequestBuilder::contract_call_by_hash( @@ -474,7 +458,7 @@ fn upgraded_delegate_and_undelegate_have_expected_costs() { .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_DELEGATE, @@ -484,7 +468,6 @@ fn upgraded_delegate_and_undelegate_have_expected_costs() { auction::ARG_AMOUNT => U512::from(BID_AMOUNT), }, ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); let proposer_reward_starting_balance_1 = builder.get_proposer_purse_balance(); @@ -501,26 +484,51 @@ fn upgraded_delegate_and_undelegate_have_expected_costs() { balance_after, balance_before - U512::from(BID_AMOUNT) - transaction_fee_1, ); - assert_eq!(builder.last_exec_gas_cost().value(), call_cost); - // Withdraw bid + assert_eq!(builder.last_exec_gas_consumed().value(), call_cost); + + // Redelegate bid + let redelegate_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + account + .named_keys() + .get(AUCTION) + .unwrap() + .into_entity_hash_addr() + .unwrap() + .into(), + auction::METHOD_REDELEGATE, + runtime_args! { + auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + auction::ARG_VALIDATOR => VALIDATOR_1.clone(), + auction::ARG_AMOUNT => U512::from(DEFAULT_MINIMUM_DELEGATION_AMOUNT), + auction::ARG_NEW_VALIDATOR => VALIDATOR_2.clone() + }, + ) + .build(); + + builder.exec(redelegate_request).expect_success().commit(); + + let expected_call_cost = U512::from(NEW_REDELEGATE_COST); + assert_eq!(builder.last_exec_gas_consumed().value(), expected_call_cost); + + // Withdraw bid (undelegate) let undelegate_request = ExecuteRequestBuilder::contract_call_by_hash( *DEFAULT_ACCOUNT_ADDR, account .named_keys() .get(AUCTION) .unwrap() - .into_hash() + .into_entity_hash_addr() .unwrap() .into(), auction::METHOD_UNDELEGATE, runtime_args! { auction::ARG_DELEGATOR => DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), auction::ARG_VALIDATOR => VALIDATOR_1.clone(), - auction::ARG_AMOUNT => U512::from(BID_AMOUNT), + auction::ARG_AMOUNT => U512::from(BID_AMOUNT - DEFAULT_MINIMUM_DELEGATION_AMOUNT), }, ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); let balance_before = builder.get_purse_balance(account.main_purse()); @@ -535,45 +543,43 @@ fn upgraded_delegate_and_undelegate_have_expected_costs() { let call_cost = U512::from(NEW_UNDELEGATE_COST); assert_eq!(balance_after, balance_before - transaction_fee_2); - assert_eq!(builder.last_exec_gas_cost().value(), call_cost); + assert_eq!(builder.last_exec_gas_consumed().value(), call_cost); } #[ignore] #[test] fn mint_transfer_has_expected_costs() { - let mut builder = InMemoryWasmTestBuilder::default(); - - let accounts = { - let validator_1 = GenesisAccount::account( - VALIDATOR_1.clone(), - Motes::new(DEFAULT_ACCOUNT_INITIAL_BALANCE.into()), - Some(GenesisValidator::new( - Motes::new(VALIDATOR_1_STAKE.into()), - DelegationRate::zero(), - )), - ); + let mut builder = LmdbWasmTestBuilder::default(); - let mut tmp: Vec = DEFAULT_ACCOUNTS.clone(); - tmp.push(validator_1); - tmp - }; + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); - let run_genesis_request = utils::create_run_genesis_request(accounts); + let transfer_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_TRANSFER_TO_NAMED_PURSE, + runtime_args! { + ARG_PURSE_NAME => NAMED_PURSE_NAME, + ARG_AMOUNT => U512::from(MINIMUM_ACCOUNT_CREATION_BALANCE), + }, + ) + .build(); - builder.run_genesis(&run_genesis_request); + builder.exec(transfer_request_1).expect_success().commit(); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let validator_1_account = builder - .get_account(*VALIDATOR_1_ADDR) - .expect("should have account"); + let purse_1 = default_account + .named_keys() + .get(NAMED_PURSE_NAME) + .unwrap() + .into_uref() + .expect("should have purse"); let mint_hash = builder.get_mint_contract_hash(); let source = default_account.main_purse(); - let target = validator_1_account.main_purse(); + let target = purse_1; let id = Some(0u64); @@ -584,7 +590,7 @@ fn mint_transfer_has_expected_costs() { mint_hash, mint::METHOD_TRANSFER, runtime_args! { - mint::ARG_TO => Some(*VALIDATOR_1_ADDR), + mint::ARG_TO => Some(*DEFAULT_ACCOUNT_ADDR), mint::ARG_SOURCE => source, mint::ARG_TARGET => target, mint::ARG_AMOUNT => U512::from(TRANSFER_AMOUNT), @@ -602,75 +608,111 @@ fn mint_transfer_has_expected_costs() { let transaction_fee = builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - let expected_call_cost = U512::from(DEFAULT_TRANSFER_COST); assert_eq!( balance_after, balance_before - transfer_amount - transaction_fee, ); - assert_eq!(builder.last_exec_gas_cost().value(), expected_call_cost); } #[ignore] #[test] fn should_charge_for_erroneous_system_contract_calls() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let auction_hash = builder.get_auction_contract_hash(); let mint_hash = builder.get_mint_contract_hash(); let handle_payment_hash = builder.get_handle_payment_contract_hash(); - let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should have account"); + let system_config = builder.chainspec().system_costs_config; // Entrypoints that could fail early due to missing arguments let entrypoint_calls = vec![ - (auction_hash, auction::METHOD_ADD_BID, DEFAULT_ADD_BID_COST), + ( + auction_hash, + auction::METHOD_ADD_BID, + system_config.auction_costs().add_bid, + ), ( auction_hash, auction::METHOD_WITHDRAW_BID, - DEFAULT_WITHDRAW_BID_COST, + system_config.auction_costs().withdraw_bid, ), ( auction_hash, auction::METHOD_DELEGATE, - DEFAULT_DELEGATE_COST, + system_config.auction_costs().delegate, ), ( auction_hash, auction::METHOD_UNDELEGATE, - DEFAULT_UNDELEGATE_COST, + system_config.auction_costs().undelegate, + ), + ( + auction_hash, + auction::METHOD_REDELEGATE, + system_config.auction_costs().redelegate, ), ( auction_hash, auction::METHOD_RUN_AUCTION, - DEFAULT_RUN_AUCTION_COST, + system_config.auction_costs().run_auction, + ), + ( + auction_hash, + auction::METHOD_SLASH, + system_config.auction_costs().slash, ), - (auction_hash, auction::METHOD_SLASH, DEFAULT_SLASH_COST), ( auction_hash, auction::METHOD_DISTRIBUTE, - DEFAULT_DISTRIBUTE_COST, + system_config.auction_costs().distribute, + ), + ( + mint_hash, + mint::METHOD_MINT, + system_config.mint_costs().mint.into(), ), - (mint_hash, mint::METHOD_MINT, DEFAULT_MINT_COST), ( mint_hash, mint::METHOD_REDUCE_TOTAL_SUPPLY, - DEFAULT_REDUCE_TOTAL_SUPPLY_COST, + system_config.mint_costs().reduce_total_supply.into(), + ), + ( + mint_hash, + mint::METHOD_BALANCE, + system_config.mint_costs().balance.into(), + ), + ( + mint_hash, + mint::METHOD_TRANSFER, + system_config.mint_costs().transfer.into(), ), - (mint_hash, mint::METHOD_BALANCE, DEFAULT_BALANCE_COST), - (mint_hash, mint::METHOD_TRANSFER, DEFAULT_TRANSFER_COST), ( handle_payment_hash, handle_payment::METHOD_SET_REFUND_PURSE, - DEFAULT_SET_REFUND_PURSE_COST, + system_config.handle_payment_costs().set_refund_purse.into(), + ), + // ( + // handle_payment_hash, + // handle_payment::METHOD_FINALIZE_PAYMENT, + // system_config.handle_payment_costs().finalize_payment, + // ), + ( + auction_hash, + "this_entrypoint_does_not_exists_1", + system_config.no_such_entrypoint(), + ), + ( + mint_hash, + "this_entrypoint_does_not_exists_2", + system_config.no_such_entrypoint(), ), ( handle_payment_hash, - handle_payment::METHOD_FINALIZE_PAYMENT, - DEFAULT_FINALIZE_PAYMENT_COST, + "this_entrypoint_does_not_exists_3", + system_config.no_such_entrypoint(), ), ]; @@ -683,62 +725,53 @@ fn should_charge_for_erroneous_system_contract_calls() { ) .build(); - let balance_before = builder.get_purse_balance(account.main_purse()); - - let proposer_reward_starting_balance = builder.get_proposer_purse_balance(); - builder.exec(exec_request).commit(); let _error = builder - .get_exec_results() - .last() + .get_last_exec_result() .expect("should have results") - .get(0) - .expect("should have first result") - .as_error() + .error() + .cloned() .unwrap_or_else(|| panic!("should have error while executing {}", entrypoint)); - let transaction_fee = - builder.get_proposer_purse_balance() - proposer_reward_starting_balance; - - let balance_after = builder.get_purse_balance(account.main_purse()); + // assert!(matches!( + // error, + // Error::Exec(ExecError::NoSuchMethod(ref no_such_method)) if no_such_method == + // entrypoint), "{:?}", error); let call_cost = U512::from(expected_cost); + assert_eq!( - balance_after, - balance_before - transaction_fee, - "Calling a failed entrypoint {} does not incur expected cost of {}", - entrypoint, - expected_cost, + builder.last_exec_gas_consumed().value(), + call_cost, + "{:?}", + entrypoint ); - assert_eq!(builder.last_exec_gas_cost().value(), call_cost); } } #[ignore] #[test] fn should_verify_do_nothing_charges_only_for_standard_payment() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); - let do_nothing_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_session_bytes(wasm::do_nothing_bytes(), RuntimeArgs::default()) - .with_empty_payment_bytes(runtime_args! { - ARG_AMOUNT => *DEFAULT_PAYMENT - }) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) - .with_deploy_hash([42; 32]) - .build(); - - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let deploy_item = DeployItemBuilder::new() + .with_address(*DEFAULT_ACCOUNT_ADDR) + .with_session_bytes(wasm_utils::do_nothing_bytes(), RuntimeArgs::default()) + .with_standard_payment(runtime_args! { + ARG_AMOUNT => *DEFAULT_PAYMENT + }) + .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + .with_deploy_hash([42; 32]) + .build(); + + let do_nothing_request = ExecuteRequestBuilder::from_deploy_item(&deploy_item).build(); let user_funds_before = builder.get_purse_balance(default_account.main_purse()); @@ -752,15 +785,15 @@ fn should_verify_do_nothing_charges_only_for_standard_payment() { assert_eq!(user_funds_after, user_funds_before - transaction_fee,); - assert_eq!(builder.last_exec_gas_cost(), Gas::new(U512::zero())); + assert_eq!(builder.last_exec_gas_consumed(), Gas::new(U512::zero())); } #[ignore] #[test] fn should_verify_wasm_add_bid_wasm_cost_is_not_recursive() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); let new_opcode_costs = OpcodeCosts { bit: 0, @@ -772,101 +805,96 @@ fn should_verify_wasm_add_bid_wasm_cost_is_not_recursive() { op_const: 0, local: 0, global: 0, - control_flow: 0, + control_flow: ControlFlowCosts { + block: 0, + op_loop: 0, + op_if: 0, + op_else: 0, + end: 0, + br: 0, + br_if: 0, + br_table: BrTableCost { + cost: 0, + size_multiplier: 0, + }, + op_return: 0, + call: 0, + call_indirect: 0, + drop: 0, + select: 0, + }, integer_comparison: 0, conversion: 0, unreachable: 0, nop: 0, current_memory: 0, grow_memory: 0, - regular: 0, + sign: 0, }; let new_storage_costs = StorageCosts::new(0); // We're elevating cost of `transfer_from_purse_to_purse` while zeroing others. // This will verify that user pays for the transfer host function _only_ while host does not // additionally charge for calling mint's "transfer" entrypoint under the hood. - let new_host_function_costs = HostFunctionCosts { - read_value: HostFunction::fixed(0), - read_value_local: HostFunction::fixed(0), - write: HostFunction::fixed(0), - write_local: HostFunction::fixed(0), - add: HostFunction::fixed(0), - new_uref: HostFunction::fixed(0), - load_named_keys: HostFunction::fixed(0), - ret: HostFunction::fixed(0), - get_key: HostFunction::fixed(0), - has_key: HostFunction::fixed(0), - put_key: HostFunction::fixed(0), - remove_key: HostFunction::fixed(0), - revert: HostFunction::fixed(0), - is_valid_uref: HostFunction::fixed(0), - add_associated_key: HostFunction::fixed(0), - remove_associated_key: HostFunction::fixed(0), - update_associated_key: HostFunction::fixed(0), - set_action_threshold: HostFunction::fixed(0), - get_caller: HostFunction::fixed(0), - get_blocktime: HostFunction::fixed(0), - create_purse: HostFunction::fixed(0), - transfer_to_account: HostFunction::fixed(0), - transfer_from_purse_to_account: HostFunction::fixed(0), - transfer_from_purse_to_purse: HostFunction::fixed(0), - get_balance: HostFunction::fixed(0), - get_phase: HostFunction::fixed(0), - get_system_contract: HostFunction::fixed(0), - get_main_purse: HostFunction::fixed(0), - read_host_buffer: HostFunction::fixed(0), - create_contract_package_at_hash: HostFunction::fixed(0), - create_contract_user_group: HostFunction::fixed(0), - add_contract_version: HostFunction::fixed(0), - disable_contract_version: HostFunction::fixed(0), + let new_host_function_costs = HostFunctionCostsV1 { call_contract: HostFunction::fixed(UPDATED_CALL_CONTRACT_COST), - call_versioned_contract: HostFunction::fixed(0), - get_named_arg_size: HostFunction::fixed(0), - get_named_arg: HostFunction::fixed(0), - remove_contract_user_group: HostFunction::fixed(0), - provision_contract_user_group_uref: HostFunction::fixed(0), - remove_contract_user_group_urefs: HostFunction::fixed(0), - print: HostFunction::fixed(0), - blake2b: HostFunction::fixed(0), + ..Zero::zero() }; - - let new_wasm_config = WasmConfig::new( + let wasm_v1_config = WasmV1Config::new( DEFAULT_WASM_MAX_MEMORY, DEFAULT_MAX_STACK_HEIGHT, new_opcode_costs, - new_storage_costs, new_host_function_costs, ); + let wasm_v2_config = WasmV2Config::new( + DEFAULT_WASM_MAX_MEMORY, + OpcodeCosts::default(), + HostFunctionCostsV2::default(), + ); + let wasm_config = WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config); - let new_wasmless_transfer_cost = 0; + let new_max_associated_keys = DEFAULT_MAX_ASSOCIATED_KEYS; let new_auction_costs = AuctionCosts::default(); - let new_mint_costs = MintCosts::default(); + let new_mint_costs = MintCosts { + transfer: 0, + ..Default::default() + }; let new_standard_payment_costs = StandardPaymentCosts::default(); let new_handle_payment_costs = HandlePaymentCosts::default(); - let new_system_config = SystemConfig::new( - new_wasmless_transfer_cost, + let system_costs_config = SystemConfig::new( + 1, new_auction_costs, new_mint_costs, new_handle_payment_costs, new_standard_payment_costs, ); + let core_config = CoreConfig { + max_associated_keys: new_max_associated_keys, + ..Default::default() + }; + + let chainspec = ChainspecConfig { + system_costs_config, + wasm_config, + core_config, + storage_costs: new_storage_costs, + }; + builder.with_chainspec(chainspec); + let mut upgrade_request = { UpgradeRequestBuilder::new() - .with_current_protocol_version(*OLD_PROTOCOL_VERSION) - .with_new_protocol_version(*NEW_PROTOCOL_VERSION) + .with_current_protocol_version(OLD_PROTOCOL_VERSION) + .with_new_protocol_version(NEW_PROTOCOL_VERSION) .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_wasm_config(new_wasm_config) - .with_new_system_config(new_system_config) .build() }; - builder.upgrade_with_upgrade_request(&mut upgrade_request); + builder.upgrade(&mut upgrade_request); let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have default account"); let add_bid_request = ExecuteRequestBuilder::standard( @@ -878,7 +906,6 @@ fn should_verify_wasm_add_bid_wasm_cost_is_not_recursive() { auction::ARG_DELEGATION_RATE => BID_DELEGATION_RATE, }, ) - .with_protocol_version(*NEW_PROTOCOL_VERSION) .build(); // Verify that user is called and deploy raises runtime error @@ -893,13 +920,16 @@ fn should_verify_wasm_add_bid_wasm_cost_is_not_recursive() { let transaction_fee_1 = builder.get_proposer_purse_balance() - proposer_reward_starting_balance_1; - let expected_call_cost = - U512::from(DEFAULT_ADD_BID_COST) + U512::from(UPDATED_CALL_CONTRACT_COST); - assert_eq!( user_funds_after, user_funds_before - transaction_fee_1 - U512::from(BOND_AMOUNT) ); - assert_eq!(builder.last_exec_gas_cost(), Gas::new(expected_call_cost)); + let expected_call_cost = + U512::from(DEFAULT_ADD_BID_COST) + U512::from(UPDATED_CALL_CONTRACT_COST); + + assert_eq!( + builder.last_exec_gas_consumed(), + Gas::new(expected_call_cost) + ); } diff --git a/execution_engine_testing/tests/src/test/tutorial.rs b/execution_engine_testing/tests/src/test/tutorial.rs new file mode 100644 index 0000000000..a78b2ed62f --- /dev/null +++ b/execution_engine_testing/tests/src/test/tutorial.rs @@ -0,0 +1,2 @@ +mod counter; +mod hello_world; diff --git a/execution_engine_testing/tests/src/test/tutorial/counter.rs b/execution_engine_testing/tests/src/test/tutorial/counter.rs new file mode 100644 index 0000000000..d32762f7ea --- /dev/null +++ b/execution_engine_testing/tests/src/test/tutorial/counter.rs @@ -0,0 +1,100 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{Key, RuntimeArgs, StoredValue}; + +const COUNT_KEY: &str = "count"; +const COUNTER_INSTALLER_WASM: &str = "counter_installer.wasm"; +const INCREMENT_COUNTER_WASM: &str = "increment_counter.wasm"; +const COUNTER_KEY: &str = "counter"; + +#[ignore] +#[test] +fn should_run_counter_example() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let install_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + COUNTER_INSTALLER_WASM, + RuntimeArgs::default(), + ) + .build(); + + let inc_request_1 = ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + COUNTER_KEY, + "counter_inc", + RuntimeArgs::default(), + ) + .build(); + + let call_request_1 = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + INCREMENT_COUNTER_WASM, + RuntimeArgs::default(), + ) + .build(); + + builder.exec(install_request_1).expect_success().commit(); + + let binding = builder + .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) + .expect("must have value"); + let result = binding.as_account().unwrap().named_keys(); + + println!("Named keys, {:?}", result); + + let query_result = builder + .query( + None, + Key::Account(*DEFAULT_ACCOUNT_ADDR), + &[COUNTER_KEY.into(), COUNT_KEY.into()], + ) + .expect("should query"); + + let counter_before: i32 = if let StoredValue::CLValue(cl_value) = query_result { + cl_value.into_t().unwrap() + } else { + panic!("Stored value is not an i32: {:?}", query_result); + }; + + builder.exec(inc_request_1).expect_success().commit(); + + let query_result = builder + .query( + None, + Key::from(*DEFAULT_ACCOUNT_ADDR), + &[COUNTER_KEY.into(), COUNT_KEY.into()], + ) + .expect("should query"); + + let counter_after: i32 = if let StoredValue::CLValue(cl_value) = query_result { + cl_value.into_t().unwrap() + } else { + panic!("Stored value is not an i32: {:?}", query_result); + }; + + let counter_diff = counter_after - counter_before; + assert_eq!(counter_diff, 1); + + builder.exec(call_request_1).expect_success().commit(); +} + +// #[test] +// fn gen_fixture() { +// lmdb_fixture::generate_fixture( +// "counter_contract", +// LOCAL_GENESIS_REQUEST.clone(), +// |builder| { +// let install_request_1 = ExecuteRequestBuilder::standard( +// *DEFAULT_ACCOUNT_ADDR, +// COUNTER_INSTALLER_WASM, +// RuntimeArgs::default(), +// ) +// .build(); +// builder.exec(install_request_1).expect_success().commit(); +// }, +// ) +// .expect("should gen fixture"); +// } diff --git a/execution_engine_testing/tests/src/test/tutorial/hello_world.rs b/execution_engine_testing/tests/src/test/tutorial/hello_world.rs new file mode 100644 index 0000000000..a25c7ed62a --- /dev/null +++ b/execution_engine_testing/tests/src/test/tutorial/hello_world.rs @@ -0,0 +1,36 @@ +use casper_engine_test_support::{ + ExecuteRequestBuilder, LmdbWasmTestBuilder, DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, +}; +use casper_types::{runtime_args, Key, StoredValue}; + +const HELLO_WORLD_CONTRACT: &str = "hello_world.wasm"; +const KEY: &str = "special_value"; +const ARG_MESSAGE: &str = "message"; +const MESSAGE_VALUE: &str = "Hello, world!"; + +#[ignore] +#[test] +fn should_run_hello_world() { + let mut builder = LmdbWasmTestBuilder::default(); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + let exec_request = { + let session_args = runtime_args! { + ARG_MESSAGE => MESSAGE_VALUE, + }; + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, HELLO_WORLD_CONTRACT, session_args) + .build() + }; + builder.exec(exec_request).expect_success().commit(); + + let stored_message = builder + .query(None, Key::from(*DEFAULT_ACCOUNT_ADDR), &[KEY.into()]) + .expect("should query"); + + let message: String = if let StoredValue::CLValue(cl_value) = stored_message { + cl_value.into_t().unwrap() + } else { + panic!("Stored message is not a clvalue: {:?}", stored_message); + }; + assert_eq!(message, MESSAGE_VALUE); +} diff --git a/execution_engine_testing/tests/src/test/upgrade.rs b/execution_engine_testing/tests/src/test/upgrade.rs index 9d7176eed3..c67cf8301a 100644 --- a/execution_engine_testing/tests/src/test/upgrade.rs +++ b/execution_engine_testing/tests/src/test/upgrade.rs @@ -1,34 +1,50 @@ use casper_engine_test_support::{ - internal::{ExecuteRequestBuilder, InMemoryWasmTestBuilder, DEFAULT_RUN_GENESIS_REQUEST}, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, UpgradeRequestBuilder, + DEFAULT_ACCOUNT_ADDR, LOCAL_GENESIS_REQUEST, MINIMUM_ACCOUNT_CREATION_BALANCE, +}; + +use crate::lmdb_fixture; +use casper_execution_engine::{ + engine_state, + engine_state::{EngineConfigBuilder, Error}, + execution::ExecError, }; -use casper_execution_engine::shared::stored_value::StoredValue; use casper_types::{ - contracts::{ContractVersion, CONTRACT_INITIAL_VERSION}, - runtime_args, CLValue, ContractPackageHash, RuntimeArgs, + account::AccountHash, + addressable_entity::{AssociatedKeys, Weight}, + contracts::ContractPackageHash, + runtime_args, AddressableEntityHash, CLValue, EntityVersion, EraId, HoldBalanceHandling, Key, + PackageHash, ProtocolVersion, RuntimeArgs, StoredValue, Timestamp, ENTITY_INITIAL_VERSION, }; const DO_NOTHING_STORED_CONTRACT_NAME: &str = "do_nothing_stored"; const DO_NOTHING_STORED_UPGRADER_CONTRACT_NAME: &str = "do_nothing_stored_upgrader"; const DO_NOTHING_STORED_CALLER_CONTRACT_NAME: &str = "do_nothing_stored_caller"; +const PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME: &str = "purse_holder_stored_caller"; +const PURSE_HOLDER_STORED_CONTRACT_NAME: &str = "purse_holder_stored"; +const PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME: &str = "purse_holder_stored_upgrader"; +const UPGRADE_THRESHOLD_CONTRACT_NAME: &str = "upgrade_threshold.wasm"; +const UPGRADE_THRESHOLD_UPGRADER: &str = "upgrade_threshold_upgrader.wasm"; + const ENTRY_FUNCTION_NAME: &str = "delegate"; -const DO_NOTHING_PACKAGE_HASH_KEY_NAME: &str = "do_nothing_package_hash"; +const DO_NOTHING_CONTRACT_NAME: &str = "do_nothing_package_hash"; const DO_NOTHING_HASH_KEY_NAME: &str = "do_nothing_hash"; -const INITIAL_VERSION: ContractVersion = CONTRACT_INITIAL_VERSION; -const UPGRADED_VERSION: ContractVersion = INITIAL_VERSION + 1; + +const INITIAL_VERSION: EntityVersion = ENTITY_INITIAL_VERSION; +const UPGRADED_VERSION: EntityVersion = INITIAL_VERSION + 1; const PURSE_NAME_ARG_NAME: &str = "purse_name"; const PURSE_1: &str = "purse_1"; const METHOD_REMOVE: &str = "remove"; const VERSION: &str = "version"; -const PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME: &str = "purse_holder_stored_caller"; -const PURSE_HOLDER_STORED_CONTRACT_NAME: &str = "purse_holder_stored"; -const PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME: &str = "purse_holder_stored_upgrader"; + const HASH_KEY_NAME: &str = "purse_holder"; + const TOTAL_PURSES: usize = 3; const PURSE_NAME: &str = "purse_name"; const ENTRY_POINT_NAME: &str = "entry_point"; const ENTRY_POINT_ADD: &str = "add_named_purse"; const ARG_CONTRACT_PACKAGE: &str = "contract_package"; +const ARG_MAJOR_VERSION: &str = "major_version"; const ARG_VERSION: &str = "version"; const ARG_NEW_PURSE_NAME: &str = "new_purse_name"; const ARG_IS_LOCKED: &str = "is_locked"; @@ -37,9 +53,9 @@ const ARG_IS_LOCKED: &str = "is_locked"; #[ignore] #[test] fn should_upgrade_do_nothing_to_do_something_version_hash_call() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // Create contract package and store contract ver: 1.0.0 with "delegate" entry function { @@ -59,9 +75,9 @@ fn should_upgrade_do_nothing_to_do_something_version_hash_call() { // Calling initial version from contract package hash, should have no effects { let exec_request = { - ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( + ExecuteRequestBuilder::versioned_contract_call_by_name( *DEFAULT_ACCOUNT_ADDR, - DO_NOTHING_PACKAGE_HASH_KEY_NAME, + DO_NOTHING_CONTRACT_NAME, Some(INITIAL_VERSION), ENTRY_FUNCTION_NAME, RuntimeArgs::new(), @@ -73,11 +89,22 @@ fn should_upgrade_do_nothing_to_do_something_version_hash_call() { } let account_1 = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); + let entity_hash = account_1 + .named_keys() + .get(DO_NOTHING_HASH_KEY_NAME) + .expect("must have do-nothing-hash") + .into_entity_hash() + .unwrap(); + + let entity = builder + .get_entity_with_named_keys_by_entity_hash(entity_hash) + .expect("must have entity"); + assert!( - account_1.named_keys().get(PURSE_1).is_none(), + entity.named_keys().get(PURSE_1).is_none(), "purse should not exist", ); @@ -102,9 +129,9 @@ fn should_upgrade_do_nothing_to_do_something_version_hash_call() { PURSE_NAME_ARG_NAME => PURSE_1, }; let exec_request = { - ExecuteRequestBuilder::versioned_contract_call_by_hash_key_name( + ExecuteRequestBuilder::versioned_contract_call_by_name( *DEFAULT_ACCOUNT_ADDR, - DO_NOTHING_PACKAGE_HASH_KEY_NAME, + DO_NOTHING_CONTRACT_NAME, Some(UPGRADED_VERSION), ENTRY_FUNCTION_NAME, args, @@ -116,11 +143,22 @@ fn should_upgrade_do_nothing_to_do_something_version_hash_call() { } let account_1 = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); + let entity_hash = account_1 + .named_keys() + .get("end of upgrade") + .expect("must have do-nothing-hash") + .into_entity_hash() + .unwrap(); + + let entity = builder + .get_entity_with_named_keys_by_entity_hash(entity_hash) + .expect("must have entity"); + assert!( - account_1.named_keys().get(PURSE_1).is_some(), + entity.named_keys().get(PURSE_1).is_some(), "purse should exist", ); } @@ -129,9 +167,9 @@ fn should_upgrade_do_nothing_to_do_something_version_hash_call() { #[ignore] #[test] fn should_upgrade_do_nothing_to_do_something_contract_call() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // Create contract package and store contract ver: 1.0.0 { @@ -149,21 +187,22 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { } let account_1 = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); account_1 .named_keys() .get(DO_NOTHING_HASH_KEY_NAME) .expect("should have key of do_nothing_hash") - .into_hash() + .into_entity_hash_addr() .expect("should have into hash"); let stored_contract_package_hash = account_1 .named_keys() - .get(DO_NOTHING_PACKAGE_HASH_KEY_NAME) + .get(DO_NOTHING_CONTRACT_NAME) .expect("should have key of do_nothing_hash") - .into_hash() + .into_hash_addr() + .map(PackageHash::new) .expect("should have hash"); // Calling initial stored version from contract package hash, should have no effects @@ -171,6 +210,7 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { let contract_name = format!("{}.wasm", DO_NOTHING_STORED_CALLER_CONTRACT_NAME); let args = runtime_args! { ARG_CONTRACT_PACKAGE => stored_contract_package_hash, + ARG_MAJOR_VERSION => 2u32, ARG_VERSION => INITIAL_VERSION, ARG_NEW_PURSE_NAME => PURSE_1, }; @@ -182,11 +222,22 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { } let account_1 = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); + let entity_hash = account_1 + .named_keys() + .get(DO_NOTHING_HASH_KEY_NAME) + .expect("must have do-nothing-hash") + .into_entity_hash() + .unwrap(); + + let entity = builder + .get_entity_with_named_keys_by_entity_hash(entity_hash) + .expect("must have entity"); + assert!( - account_1.named_keys().get(PURSE_1).is_none(), + entity.named_keys().get(PURSE_1).is_none(), "purse should not exist", ); @@ -207,9 +258,10 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { let stored_contract_package_hash = account_1 .named_keys() - .get(DO_NOTHING_PACKAGE_HASH_KEY_NAME) + .get(DO_NOTHING_CONTRACT_NAME) .expect("should have key of do_nothing_hash") - .into_hash() + .into_hash_addr() + .map(PackageHash::new) .expect("should have hash"); // Calling upgraded stored version, expecting purse creation @@ -217,6 +269,7 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { let contract_name = format!("{}.wasm", DO_NOTHING_STORED_CALLER_CONTRACT_NAME); let args = runtime_args! { ARG_CONTRACT_PACKAGE => stored_contract_package_hash, + ARG_MAJOR_VERSION => 2, ARG_VERSION => UPGRADED_VERSION, ARG_NEW_PURSE_NAME => PURSE_1, }; @@ -229,11 +282,22 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { } let account_1 = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should get account 1"); + let entity_hash = account_1 + .named_keys() + .get("end of upgrade") + .expect("must have do-nothing-hash") + .into_entity_hash() + .unwrap(); + + let entity = builder + .get_entity_with_named_keys_by_entity_hash(entity_hash) + .expect("must have entity"); + assert!( - account_1.named_keys().get(PURSE_1).is_some(), + entity.named_keys().get(PURSE_1).is_some(), "purse should exist", ); } @@ -241,9 +305,9 @@ fn should_upgrade_do_nothing_to_do_something_contract_call() { #[ignore] #[test] fn should_be_able_to_observe_state_transition_across_upgrade() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // store do-nothing-stored { @@ -263,25 +327,25 @@ fn should_be_able_to_observe_state_transition_across_upgrade() { } let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); assert!( - account.named_keys().contains_key(VERSION), + account.named_keys().contains(VERSION), "version uref should exist on install" ); - let stored_package_hash: ContractPackageHash = account + let stored_package_hash = account .named_keys() .get(HASH_KEY_NAME) .expect("should have stored uref") - .into_hash() - .expect("should have hash") - .into(); + .into_hash_addr() + .map(PackageHash::new) + .expect("should have hash"); // verify version before upgrade let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let version = *account @@ -318,7 +382,7 @@ fn should_be_able_to_observe_state_transition_across_upgrade() { // version should change after upgrade let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let version = *account @@ -340,9 +404,9 @@ fn should_be_able_to_observe_state_transition_across_upgrade() { #[ignore] #[test] fn should_support_extending_functionality() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // store do-nothing-stored { @@ -362,21 +426,21 @@ fn should_support_extending_functionality() { } let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let stored_package_hash = account .named_keys() .get(HASH_KEY_NAME) .expect("should have stored uref") - .into_hash() + .into_hash_addr() .expect("should have hash"); let stored_hash = account .named_keys() .get(PURSE_HOLDER_STORED_CONTRACT_NAME) .expect("should have stored uref") - .into_hash() + .into_entity_hash_addr() .expect("should have hash") .into(); @@ -401,10 +465,10 @@ fn should_support_extending_functionality() { // verify known uref actually exists prior to upgrade let contract = builder - .get_contract(stored_hash) + .get_entity_with_named_keys_by_entity_hash(stored_hash) .expect("should have contract"); assert!( - contract.named_keys().contains_key(PURSE_1), + contract.named_keys().contains(PURSE_1), "purse uref should exist in contract's named_keys before upgrade" ); @@ -416,7 +480,7 @@ fn should_support_extending_functionality() { *DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args! { - ARG_CONTRACT_PACKAGE => stored_package_hash, + ARG_CONTRACT_PACKAGE => PackageHash::new(stored_package_hash), }, ) .build() @@ -427,17 +491,17 @@ fn should_support_extending_functionality() { // verify uref still exists in named_keys after upgrade: let contract = builder - .get_contract(stored_hash) + .get_entity_with_named_keys_by_entity_hash(stored_hash) .expect("should have contract"); assert!( - contract.named_keys().contains_key(PURSE_1), + contract.named_keys().contains(PURSE_1), "PURSE_1 uref should still exist in contract's named_keys after upgrade" ); // Get account again after upgrade to refresh named keys let account_2 = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); // Get contract again after upgrade @@ -445,7 +509,7 @@ fn should_support_extending_functionality() { .named_keys() .get(PURSE_HOLDER_STORED_CONTRACT_NAME) .expect("should have stored uref") - .into_hash() + .into_entity_hash_addr() .expect("should have hash") .into(); assert_ne!(stored_hash, stored_hash_2); @@ -471,11 +535,11 @@ fn should_support_extending_functionality() { // verify known urefs no longer include removed purse let contract = builder - .get_contract(stored_hash_2) + .get_entity_with_named_keys_by_entity_hash(stored_hash_2) .expect("should have contract"); assert!( - !contract.named_keys().contains_key(PURSE_1), + !contract.named_keys().contains(PURSE_1), "PURSE_1 uref should no longer exist in contract's named_keys after remove" ); } @@ -483,9 +547,9 @@ fn should_support_extending_functionality() { #[ignore] #[test] fn should_maintain_named_keys_across_upgrade() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // store contract { @@ -505,21 +569,22 @@ fn should_maintain_named_keys_across_upgrade() { } let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); let stored_hash = account .named_keys() .get(PURSE_HOLDER_STORED_CONTRACT_NAME) .expect("should have stored hash") - .into_hash() + .into_entity_hash_addr() .expect("should have hash"); let stored_package_hash = account .named_keys() .get(HASH_KEY_NAME) .expect("should have stored package hash") - .into_hash() + .into_hash_addr() + .map(PackageHash::new) .expect("should have hash"); // add several purse urefs to named_keys @@ -544,10 +609,10 @@ fn should_maintain_named_keys_across_upgrade() { // verify known uref actually exists prior to upgrade let contract = builder - .get_contract(stored_hash.into()) + .get_entity_with_named_keys_by_entity_hash(stored_hash.into()) .expect("should have contract"); assert!( - contract.named_keys().contains_key(purse_name), + contract.named_keys().contains(purse_name), "purse uref should exist in contract's named_keys before upgrade" ); } @@ -571,17 +636,15 @@ fn should_maintain_named_keys_across_upgrade() { // verify all urefs still exist in named_keys after upgrade let contract = builder - .get_contract(stored_hash.into()) + .get_entity_with_named_keys_by_entity_hash(stored_hash.into()) .expect("should have contract"); for index in 0..TOTAL_PURSES { let purse_name: &str = &format!("purse_{}", index); assert!( - contract.named_keys().contains_key(purse_name), - format!( - "{} uref should still exist in contract's named_keys after upgrade", - index - ) + contract.named_keys().contains(purse_name), + "{} uref should still exist in contract's named_keys after upgrade", + index ); } } @@ -589,9 +652,9 @@ fn should_maintain_named_keys_across_upgrade() { #[ignore] #[test] fn should_fail_upgrade_for_locked_contract() { - let mut builder = InMemoryWasmTestBuilder::default(); + let mut builder = LmdbWasmTestBuilder::default(); - builder.run_genesis(&DEFAULT_RUN_GENESIS_REQUEST); + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); // store contract { @@ -611,19 +674,19 @@ fn should_fail_upgrade_for_locked_contract() { } let account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("should have account"); - let stored_package_hash: ContractPackageHash = account + let stored_package_hash: PackageHash = account .named_keys() .get(HASH_KEY_NAME) .expect("should have stored package hash") - .into_hash() - .expect("should have hash") - .into(); + .into_hash_addr() + .map(PackageHash::new) + .expect("should have hash"); let contract_package = builder - .get_contract_package(stored_package_hash) + .get_package(stored_package_hash) .expect("should get package hash"); // Ensure that our current package is indeed locked. @@ -645,3 +708,957 @@ fn should_fail_upgrade_for_locked_contract() { assert!(builder.exec(exec_request).is_error()); } } + +#[ignore] +#[test] +fn should_only_upgrade_if_threshold_is_met() { + const CONTRACT_HASH_NAME: &str = "contract_hash_name"; + const PACKAGE_HASH_KEY_NAME: &str = "contract_package_hash"; + + const ENTRYPOINT_ADD_ASSOCIATED_KEY: &str = "add_associated_key"; + const ENTRYPOINT_MANAGE_ACTION_THRESHOLD: &str = "manage_action_threshold"; + + const ARG_ENTITY_ACCOUNT_HASH: &str = "entity_account_hash"; + const ARG_KEY_WEIGHT: &str = "key_weight"; + const ARG_NEW_UPGRADE_THRESHOLD: &str = "new_threshold"; + const ARG_CONTRACT_PACKAGE: &str = "contract_package_hash"; + + let mut builder = LmdbWasmTestBuilder::default(); + + builder.run_genesis(LOCAL_GENESIS_REQUEST.clone()); + + if !builder.chainspec().core_config.enable_addressable_entity { + return; + } + + let install_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + UPGRADE_THRESHOLD_CONTRACT_NAME, + runtime_args! {}, + ) + .build(); + + builder.exec(install_request).expect_success().commit(); + + let entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default addressable entity"); + + let upgrade_threshold_contract_hash = entity + .named_keys() + .get(CONTRACT_HASH_NAME) + .expect("must have named key entry for contract hash") + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .expect("must get contract hash"); + + let upgrade_threshold_package_hash = entity + .named_keys() + .get(PACKAGE_HASH_KEY_NAME) + .expect("must have named key entry for package hash") + .into_package_addr() + .map(PackageHash::new) + .expect("must get package hash"); + + let upgrade_threshold_contract_entity = builder + .get_entity_with_named_keys_by_entity_hash(upgrade_threshold_contract_hash) + .expect("must have upgrade threshold entity"); + + let entity = upgrade_threshold_contract_entity.entity(); + let actual_associated_keys = entity.associated_keys(); + let mut expected_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1)); + assert_eq!(&expected_associated_keys, actual_associated_keys); + + let mut entity_account_hashes = + vec![AccountHash::new([10u8; 32]), AccountHash::new([11u8; 32])]; + + for entity_account_hash in &entity_account_hashes { + expected_associated_keys + .add_key(*entity_account_hash, Weight::new(1)) + .expect("must add associated key"); + + let execute_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + upgrade_threshold_contract_hash, + ENTRYPOINT_ADD_ASSOCIATED_KEY, + runtime_args! { + ARG_ENTITY_ACCOUNT_HASH => *entity_account_hash, + ARG_KEY_WEIGHT => 1u8 + }, + ) + .build(); + + builder.exec(execute_request).expect_success().commit(); + } + + let update_upgrade_threshold_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + upgrade_threshold_contract_hash, + ENTRYPOINT_MANAGE_ACTION_THRESHOLD, + runtime_args! { + ARG_NEW_UPGRADE_THRESHOLD => 3u8 + }, + ) + .build(); + + builder + .exec(update_upgrade_threshold_request) + .expect_success() + .commit(); + + let upgrade_threshold_contract_entity = builder + .get_addressable_entity(upgrade_threshold_contract_hash) + .expect("must have upgrade threshold entity"); + + let updated_associated_keys = upgrade_threshold_contract_entity.associated_keys(); + assert_eq!(&expected_associated_keys, updated_associated_keys); + + let updated_action_threshold = upgrade_threshold_contract_entity.action_thresholds(); + assert_eq!( + updated_action_threshold.upgrade_management(), + &Weight::new(3u8) + ); + + let invalid_upgrade_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + UPGRADE_THRESHOLD_UPGRADER, + runtime_args! { + ARG_CONTRACT_PACKAGE => upgrade_threshold_package_hash + }, + ) + .build(); + + builder.exec(invalid_upgrade_request).expect_failure(); + + builder.assert_error(engine_state::Error::Exec( + ExecError::UpgradeAuthorizationFailure, + )); + + let authorization_keys = { + entity_account_hashes.push(*DEFAULT_ACCOUNT_ADDR); + entity_account_hashes + }; + + let valid_upgrade_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + UPGRADE_THRESHOLD_UPGRADER, + runtime_args! { + ARG_CONTRACT_PACKAGE => upgrade_threshold_package_hash + }, + ) + .with_authorization_keys(authorization_keys.into_iter().collect()) + .build(); + + builder + .exec(valid_upgrade_request) + .expect_success() + .commit(); +} + +fn setup_upgrade_threshold_state() -> (LmdbWasmTestBuilder, AccountHash) { + const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); + const UPGRADE_THRESHOLDS_FIXTURE: &str = "upgrade_thresholds"; + + let (mut builder, lmdb_fixture_state, _temp_dir) = + crate::lmdb_fixture::builder_from_global_state_fixture_with_enable_ae( + UPGRADE_THRESHOLDS_FIXTURE, + true, + ); + + let current_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(current_protocol_version.value().major + 1, 0, 0); + + let activation_point = EraId::new(0u64); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(current_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(activation_point) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(24 * 60 * 60 * 60) + .with_enable_addressable_entity(true) + .build(); + + builder + .with_block_time(Timestamp::now().into()) + .upgrade_using_scratch(&mut upgrade_request) + .expect_upgrade_success(); + + let transfer = TransferRequestBuilder::new(MINIMUM_ACCOUNT_CREATION_BALANCE, ACCOUNT_1_ADDR) + .with_transfer_id(42) + .build(); + builder.transfer_and_commit(transfer).expect_success(); + + (builder, ACCOUNT_1_ADDR) +} + +#[ignore] +#[test] +fn should_correctly_set_upgrade_threshold_on_entity_upgrade() { + let (mut builder, entity_1) = setup_upgrade_threshold_state(); + + if !builder.chainspec().core_config.enable_addressable_entity { + return; + } + + let default_addressable_entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default entity"); + + let entity_hash = default_addressable_entity + .named_keys() + .get(PURSE_HOLDER_STORED_CONTRACT_NAME) + // We use hash addr as the migration hasn't occurred. + .map(|holder_key| holder_key.into_hash_addr().map(AddressableEntityHash::new)) + .unwrap() + .expect("must convert to hash"); + + let stored_package_hash = default_addressable_entity + .named_keys() + .get(HASH_KEY_NAME) + .expect("should have stored package hash") + .into_hash_addr() + .map(PackageHash::new) + .expect("should have hash"); + + let exec_request = ExecuteRequestBuilder::standard( + entity_1, + &format!("{}.wasm", PURSE_HOLDER_STORED_CALLER_CONTRACT_NAME), + runtime_args! { + ENTRY_POINT_NAME => VERSION, + HASH_KEY_NAME => entity_hash + }, + ) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let purse_holder_as_entity = builder + .get_addressable_entity(entity_hash) + .expect("must have purse holder entity hash"); + + let purse_holder_main_purse_before = purse_holder_as_entity.main_purse(); + + let actual_associated_keys = purse_holder_as_entity.associated_keys(); + + assert!(actual_associated_keys.is_empty()); + + let upgrade_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + &format!("{}.wasm", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME), + runtime_args! { + ARG_CONTRACT_PACKAGE => stored_package_hash + }, + ) + .build(); + + builder.exec(upgrade_request).expect_success().commit(); + + let new_entity_hash = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have entity") + .named_keys() + .get(PURSE_HOLDER_STORED_CONTRACT_NAME) + .map(|key| key.into_entity_hash_addr().map(AddressableEntityHash::new)) + .unwrap() + .expect("must get contract hash"); + + let updated_purse_entity = builder + .get_addressable_entity(new_entity_hash) + .expect("must have purse holder entity hash"); + + let updated_entity_main_purse = updated_purse_entity.main_purse(); + let actual_associated_keys = updated_purse_entity.associated_keys(); + + let expect_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1)); + + assert_eq!(purse_holder_main_purse_before, updated_entity_main_purse); + assert_eq!(actual_associated_keys, &expect_associated_keys); +} + +#[allow(clippy::enum_variant_names)] +enum MigrationScenario { + ByContractHash, + ByContractName, + ByPackageHash(Option), + ByPackageName(Option), + ByUpgrader, +} + +fn call_and_migrate_purse_holder_contract(migration_scenario: MigrationScenario) { + let (mut builder, _) = setup_upgrade_threshold_state(); + + if !builder.chainspec().core_config.enable_addressable_entity { + return; + } + + let runtime_args = runtime_args! { + PURSE_NAME_ARG_NAME => PURSE_1 + }; + + let default_addressable_entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default entity"); + + let entity_hash = default_addressable_entity + .named_keys() + .get(PURSE_HOLDER_STORED_CONTRACT_NAME) + .map(|holder_key| holder_key.into_hash_addr().map(AddressableEntityHash::new)) + .unwrap() + .expect("must convert to hash"); + + let package_hash = default_addressable_entity + .named_keys() + .get(HASH_KEY_NAME) + .expect("must have package named key entry") + .into_hash_addr() + .map(PackageHash::new) + .unwrap(); + + let execute_request = match migration_scenario { + MigrationScenario::ByPackageName(maybe_contract_version) => { + ExecuteRequestBuilder::versioned_contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + HASH_KEY_NAME, + maybe_contract_version, + ENTRY_POINT_ADD, + runtime_args, + ) + .build() + } + MigrationScenario::ByPackageHash(maybe_contract_version) => { + ExecuteRequestBuilder::versioned_contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + package_hash, + maybe_contract_version, + ENTRY_POINT_ADD, + runtime_args, + ) + .build() + } + MigrationScenario::ByContractHash => ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + entity_hash, + ENTRY_POINT_ADD, + runtime_args, + ) + .build(), + MigrationScenario::ByContractName => ExecuteRequestBuilder::contract_call_by_name( + *DEFAULT_ACCOUNT_ADDR, + PURSE_HOLDER_STORED_CONTRACT_NAME, + ENTRY_POINT_ADD, + runtime_args, + ) + .build(), + MigrationScenario::ByUpgrader => ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + &format!("{}.wasm", PURSE_HOLDER_STORED_UPGRADER_CONTRACT_NAME), + runtime_args! { + ARG_CONTRACT_PACKAGE => package_hash + }, + ) + .build(), + }; + + builder.exec(execute_request).expect_success().commit(); + + let updated_entity = builder + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) + .expect("must have default entity"); + + let updated_key = updated_entity + .named_keys() + .get(PURSE_HOLDER_STORED_CONTRACT_NAME) + .expect("must have updated entity"); + + let updated_hash = if let MigrationScenario::ByUpgrader = migration_scenario { + updated_key.into_entity_hash() + } else { + updated_key.into_hash_addr().map(AddressableEntityHash::new) + } + .expect("must get entity hash"); + + let updated_purse_entity = builder + .get_addressable_entity(updated_hash) + .expect("must have purse holder entity hash"); + + let actual_associated_keys = updated_purse_entity.associated_keys(); + if let MigrationScenario::ByUpgrader = migration_scenario { + let expect_associated_keys = AssociatedKeys::new(*DEFAULT_ACCOUNT_ADDR, Weight::new(1)); + assert_eq!(actual_associated_keys, &expect_associated_keys); + // Post migration by upgrade there should be previous + 1 versions + // present in the package. (previous = 1) + let version_count = builder + .get_package(package_hash) + .expect("must have package") + .versions() + .version_count(); + + assert_eq!(version_count, 2usize); + } else { + assert_eq!(actual_associated_keys, &AssociatedKeys::default()); + } +} + +#[ignore] +#[test] +fn should_correct_migrate_contract_when_invoked_by_package_name() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageName(None)) +} + +#[ignore] +#[test] +fn should_correctly_migrate_contract_when_invoked_by_name_and_version() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageName(Some(INITIAL_VERSION))) +} + +#[ignore] +#[test] +fn should_correct_migrate_contract_when_invoked_by_package_hash() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageHash(None)) +} + +#[ignore] +#[test] +fn should_correct_migrate_contract_when_invoked_by_package_hash_and_specific_version() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByPackageHash(Some(INITIAL_VERSION))) +} + +#[ignore] +#[test] +fn should_correctly_migrate_contract_when_invoked_by_contract_hash() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByContractHash) +} + +#[ignore] +#[test] +fn should_correctly_migrate_contract_when_invoked_by_contract_name() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByContractName) +} + +#[ignore] +#[test] +fn should_correctly_migrate_and_upgrade_with_upgrader() { + call_and_migrate_purse_holder_contract(MigrationScenario::ByUpgrader) +} + +#[ignore] +#[test] +fn should_correctly_retain_disabled_contract_version() { + const DISABLED_VERSIONS_FIX: &str = "disabled_versions"; + + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(DISABLED_VERSIONS_FIX); + + let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0); + + let activation_point = EraId::new(0u64); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(previous_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(activation_point) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(24 * 60 * 60 * 60) + .with_enable_addressable_entity(true) + .build(); + + builder + .with_block_time(Timestamp::now().into()) + .upgrade_using_scratch(&mut upgrade_request) + .expect_upgrade_success(); + + let exec_request = { + let contract_name = format!("{}.wasm", "do_nothing_stored_upgrader"); + ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + &contract_name, + RuntimeArgs::default(), + ) + .build() + }; + + builder.exec(exec_request).expect_success().commit(); + + let contract_package = builder + .query( + None, + Key::Account(*DEFAULT_ACCOUNT_ADDR), + &["do_nothing_package_hash".to_string()], + ) + .expect("must have stored value") + .as_contract_package() + .expect("must have contract_package") + .clone(); + + assert_eq!(contract_package.versions().len(), 3); + + let disabled_version_key = contract_package + .disabled_versions() + .first() + .expect("must have disabled version key"); + + let disabled_contract_hash = contract_package + .versions() + .get(disabled_version_key) + .expect("package must contain one disabled hash"); + + let exec_request = ExecuteRequestBuilder::contract_call_by_hash( + *DEFAULT_ACCOUNT_ADDR, + AddressableEntityHash::new(disabled_contract_hash.value()), + "delegate", + runtime_args! { + "purse_name" => "purse_2" + }, + ) + .build(); + + builder.exec(exec_request).expect_failure(); +} + +fn setup_state_for_version_tests( + should_trap_on_ambiguous_entity_version: bool, +) -> (LmdbWasmTestBuilder, ContractPackageHash) { + const THREE_VERSION_FIXTURE: &str = "three_version_fixture"; + + let (mut builder, lmdb_fixture_state, _temp_dir) = + lmdb_fixture::builder_from_global_state_fixture(THREE_VERSION_FIXTURE); + + let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0); + + let activation_point = EraId::new(0u64); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(previous_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(activation_point) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(24 * 60 * 60 * 60) + .with_enable_addressable_entity(false) + .build(); + + let config = EngineConfigBuilder::new() + .with_trap_on_ambiguous_entity_version(should_trap_on_ambiguous_entity_version) + .build(); + + builder + .with_block_time(Timestamp::now().into()) + .upgrade_using_scratch(&mut upgrade_request) + .expect_upgrade_success(); + + builder.with_engine_config(config); + + let account = builder + .query(None, Key::Account(*DEFAULT_ACCOUNT_ADDR), &[]) + .expect("must have account as stored value") + .as_account() + .expect("have account") + .to_owned(); + + let contract_package_hash = account + .named_keys() + .get("purse_holder") + .expect("must have key") + .into_hash_addr() + .map(ContractPackageHash::new) + .expect("must have package hash"); + + (builder, contract_package_hash) +} + +fn execute_no_major_some_entity_version_calls(trap_on_ambiguous_entity_version: bool) { + let (mut builder, contract_package_hash) = + setup_state_for_version_tests(trap_on_ambiguous_entity_version); + + let config = builder.engine_config(); + + let actual_trap_on_ambiguous_entity_version = config.trap_on_ambiguous_entity_version(); + assert_eq!( + trap_on_ambiguous_entity_version, + actual_trap_on_ambiguous_entity_version + ); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(1), + "major_version" => None::, + "entry_point" => "add_named_purse".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(2), + "major_version" => None::, + "entry_point" => "add".to_string(), + "purse_name" => "v_1_2_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(3), + "major_version" => None::, + "entry_point" => "add".to_string(), + "purse_name" => "v_1_3_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package" => contract_package_hash + }; + let exec_request = { + let contract_name = format!("{}.wasm", "purse_holder_stored_upgrader"); + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args).build() + }; + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(1), + "major_version" => None::, + "entry_point" => "add".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + if actual_trap_on_ambiguous_entity_version { + builder.exec(exec_request).expect_failure(); + let expected_error = Error::Exec(ExecError::AmbiguousEntityVersion); + builder.assert_error(expected_error); + return; + } + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(1), + "major_version" => Some(1), + "entry_point" => "add_named_purse".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => None::, + "major_version" => None::, + "entry_point" => "add".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let contract_package = builder + .query(None, Key::Hash(contract_package_hash.value()), &[]) + .expect("must have contract package as stored value") + .into_contract_package() + .expect("must get contract package"); + + let disable_hash = contract_package + .current_contract_hash() + .expect("must get hash"); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "contract_hash" => disable_hash, + }; + + let contract_name = format!("{}.wasm", "disable_contract_by_contract_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(1), + "major_version" => None::, + "entry_point" => "add_named_purse".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_correctly_manage_entity_version_calls_with_error_flag_off() { + execute_no_major_some_entity_version_calls(false) +} + +#[ignore] +#[test] +fn should_correctly_return_error_for_multiple_entity_versions() { + execute_no_major_some_entity_version_calls(true) +} + +#[ignore] +#[test] +fn should_call_correct_version_when_specifying_only_major_version() { + let (mut builder, contract_package_hash) = setup_state_for_version_tests(false); + + // There are three 1.x versions in the package. + // The 1.1 version has an entry point `add_named_purse` while the 1.2 and 1.3 + // rename the entry point to `add` + // Thus a call specifying 1.1 should work, however as per the rules, if 1.* + // is specified, then 1.3 should be invoked and the call should fail with + // the 1.1 entry point name. + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(1), + "major_version" => Some(1), + "entry_point" => "add_named_purse".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => None::, + "major_version" => Some(1), + "entry_point" => "add_named_purse".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_failure(); + + let expected_error = Error::Exec(ExecError::NoSuchMethod("add_named_purse".to_string())); + + builder.assert_error(expected_error); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => None::, + "major_version" => Some(1), + "entry_point" => "add".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +#[ignore] +#[test] +fn should_correctly_invoke_version_in_package_when_no_versions_are_specified() { + let (mut builder, contract_package_hash) = setup_state_for_version_tests(false); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => None::, + "major_version" => None::, + "entry_point" => "add".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package" => contract_package_hash + }; + let exec_request = { + let contract_name = format!("{}.wasm", "purse_holder_stored_upgrader_v2_2"); + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args).build() + }; + + builder.exec(exec_request).expect_success().commit(); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => None::, + "major_version" => None::, + "entry_point" => "add".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_failure(); + + let expected_error = Error::Exec(ExecError::NoSuchMethod("add".to_string())); + + builder.assert_error(expected_error); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => None::, + "major_version" => None::, + "entry_point" => "delegate".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .build(); + + builder.exec(exec_request).expect_success().commit(); +} + +fn should_not_require_subsequent_cases(trap: bool) { + let (mut builder, contract_package_hash) = setup_state_for_version_tests(trap); + + let previous_protocol_version = builder.engine_config().protocol_version(); + + let new_protocol_version = + ProtocolVersion::from_parts(previous_protocol_version.value().major + 1, 0, 0); + + let activation_point = EraId::new(0u64); + + let mut upgrade_request = UpgradeRequestBuilder::new() + .with_current_protocol_version(previous_protocol_version) + .with_new_protocol_version(new_protocol_version) + .with_activation_point(activation_point) + .with_new_gas_hold_handling(HoldBalanceHandling::Accrued) + .with_new_gas_hold_interval(24 * 60 * 60 * 60) + .with_enable_addressable_entity(false) + .build(); + + builder + .with_block_time(Timestamp::now().into()) + .upgrade_using_scratch(&mut upgrade_request) + .expect_upgrade_success(); + + let config = EngineConfigBuilder::new() + .with_protocol_version(new_protocol_version) + .with_trap_on_ambiguous_entity_version(trap) + .build(); + + builder.with_engine_config(config); + + let config = builder.engine_config(); + let protocol_version = config.protocol_version(); + + let runtime_args = runtime_args! { + "contract_package" => contract_package_hash + }; + let exec_request = { + let contract_name = format!("{}.wasm", "purse_holder_stored_upgrader_v2_2"); + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .with_protocol_version(protocol_version) + .build() + }; + + builder.exec(exec_request).expect_success().commit(); + + let contract_package = builder + .query(None, Key::Hash(contract_package_hash.value()), &[]) + .expect("must get package as stored value") + .into_contract_package() + .expect("must get package"); + let current_version = contract_package + .current_contract_version() + .expect("must have the latest current version"); + + assert_eq!(current_version.protocol_version_major(), 3); + + let runtime_args = runtime_args! { + "contract_package_hash" => contract_package_hash, + "version" => Some(1), + "major_version" => None::, + "entry_point" => "delegate".to_string(), + "purse_name" => "v_1_1_purse", + }; + + let contract_name = format!("{}.wasm", "call_package_version_by_hash"); + let exec_request = + ExecuteRequestBuilder::standard(*DEFAULT_ACCOUNT_ADDR, &contract_name, runtime_args) + .with_protocol_version(protocol_version) + .build(); + + if trap { + builder.exec(exec_request).expect_failure(); + let expected_error = Error::Exec(ExecError::AmbiguousEntityVersion); + builder.assert_error(expected_error); + } else { + builder.exec(exec_request).expect_success().commit(); + } +} + +#[ignore] +#[test] +fn should_not_require_subsequent_increasing_versions_to_correctly_identify_version_key_with_trap_set( +) { + should_not_require_subsequent_cases(true) +} + +#[ignore] +#[test] +fn should_not_require_subsequent_increasing_versions_to_correctly_identify_version_key_with_trap_unset( +) { + should_not_require_subsequent_cases(false) +} diff --git a/node/src/components/consensus/era_supervisor/era_id.rs b/execution_engine_testing/tests/src/test/vm2_tests.rs similarity index 100% rename from node/src/components/consensus/era_supervisor/era_id.rs rename to execution_engine_testing/tests/src/test/vm2_tests.rs diff --git a/execution_engine_testing/tests/src/test/wasmless_transfer.rs b/execution_engine_testing/tests/src/test/wasmless_transfer.rs index 9520c9d92c..502b03abf0 100644 --- a/execution_engine_testing/tests/src/test/wasmless_transfer.rs +++ b/execution_engine_testing/tests/src/test/wasmless_transfer.rs @@ -1,40 +1,39 @@ +use once_cell::sync::Lazy; + use casper_engine_test_support::{ - internal::{ - DeployItemBuilder, ExecuteRequestBuilder, InMemoryWasmTestBuilder, UpgradeRequestBuilder, - DEFAULT_PAYMENT, DEFAULT_PROTOCOL_VERSION, DEFAULT_RUN_GENESIS_REQUEST, - }, - DEFAULT_ACCOUNT_ADDR, + ExecuteRequestBuilder, LmdbWasmTestBuilder, TransferRequestBuilder, DEFAULT_ACCOUNT_ADDR, + DEFAULT_PAYMENT, LOCAL_GENESIS_REQUEST, }; -use casper_execution_engine::{ - core::{ - engine_state::{Error as CoreError, WASMLESS_TRANSFER_FIXED_GAS_PRICE}, - execution::Error as ExecError, - }, - shared::{ - gas::Gas, - motes::Motes, - system_config::{ - auction_costs::AuctionCosts, handle_payment_costs::HandlePaymentCosts, - mint_costs::MintCosts, standard_payment_costs::StandardPaymentCosts, SystemConfig, - }, - }, - storage::protocol_data::DEFAULT_WASMLESS_TRANSFER_COST, +use casper_execution_engine::engine_state::{ + Error as CoreError, WASMLESS_TRANSFER_FIXED_GAS_PRICE, }; +use casper_storage::system::transfer::TransferError; use casper_types::{ account::AccountHash, runtime_args, system::{handle_payment, mint}, - AccessRights, ApiError, EraId, Key, ProtocolVersion, RuntimeArgs, URef, U512, + AccessRights, Gas, Key, MintCosts, Motes, PublicKey, SecretKey, URef, U512, }; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; +const CONTRACT_NEW_NAMED_UREF: &str = "new_named_uref.wasm"; const CONTRACT_CREATE_PURSE_01: &str = "create_purse_01.wasm"; -const TRANSFER_RESULT_NAMED_KEY: &str = "transfer_result"; +const NON_UREF_NAMED_KEY: &str = "transfer_result"; const TEST_PURSE_NAME: &str = "test_purse"; const ARG_PURSE_NAME: &str = "purse_name"; +const ARG_UREF_NAME: &str = "uref_name"; + +static ACCOUNT_1_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([234u8; 32]).unwrap()); +static ACCOUNT_1_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_1_SECRET_KEY)); +static ACCOUNT_1_ADDR: Lazy = Lazy::new(|| ACCOUNT_1_PUBLIC_KEY.to_account_hash()); -const ACCOUNT_1_ADDR: AccountHash = AccountHash::new([1u8; 32]); -const ACCOUNT_2_ADDR: AccountHash = AccountHash::new([2u8; 32]); +static ACCOUNT_2_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::secp256k1_from_bytes([210u8; 32]).unwrap()); +static ACCOUNT_2_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ACCOUNT_2_SECRET_KEY)); +static ACCOUNT_2_ADDR: Lazy = Lazy::new(|| ACCOUNT_2_PUBLIC_KEY.to_account_hash()); #[ignore] #[test] @@ -42,6 +41,12 @@ fn should_transfer_wasmless_account_to_purse() { transfer_wasmless(WasmlessTransfer::AccountMainPurseToPurse); } +#[ignore] +#[test] +fn should_transfer_wasmless_account_to_public_key() { + transfer_wasmless(WasmlessTransfer::AccountMainPurseToPublicKeyMainPurse); +} + #[ignore] #[test] fn should_transfer_wasmless_account_to_account() { @@ -60,6 +65,12 @@ fn should_transfer_wasmless_purse_to_purse() { transfer_wasmless(WasmlessTransfer::PurseToPurse); } +#[ignore] +#[test] +fn should_transfer_wasmless_purse_to_public_key() { + transfer_wasmless(WasmlessTransfer::PurseToPublicKey); +} + #[ignore] #[test] fn should_transfer_wasmless_amount_as_u64() { @@ -69,7 +80,9 @@ fn should_transfer_wasmless_amount_as_u64() { enum WasmlessTransfer { AccountMainPurseToPurse, AccountMainPurseToAccountMainPurse, + AccountMainPurseToPublicKeyMainPurse, PurseToPurse, + PurseToPublicKey, AccountToAccountByKey, AmountAsU64, } @@ -81,12 +94,12 @@ fn transfer_wasmless(wasmless_transfer: WasmlessTransfer) { let id: Option = None; let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); let account_2_purse = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("should get account 2") .main_purse(); @@ -103,14 +116,21 @@ fn transfer_wasmless(wasmless_transfer: WasmlessTransfer) { } WasmlessTransfer::AccountMainPurseToAccountMainPurse => { runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, + mint::ARG_TARGET => *ACCOUNT_2_ADDR, + mint::ARG_AMOUNT => transfer_amount, + mint::ARG_ID => id + } + } + WasmlessTransfer::AccountMainPurseToPublicKeyMainPurse => { + runtime_args! { + mint::ARG_TARGET => ACCOUNT_2_PUBLIC_KEY.clone(), mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id } } WasmlessTransfer::AccountToAccountByKey => { runtime_args! { - mint::ARG_TARGET => Key::Account(ACCOUNT_2_ADDR), + mint::ARG_TARGET => Key::Account(*ACCOUNT_2_ADDR), mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id } @@ -123,6 +143,14 @@ fn transfer_wasmless(wasmless_transfer: WasmlessTransfer) { mint::ARG_ID => id } } + WasmlessTransfer::PurseToPublicKey => { + runtime_args! { + mint::ARG_SOURCE => account_1_purse, + mint::ARG_TARGET => ACCOUNT_2_PUBLIC_KEY.clone(), + mint::ARG_AMOUNT => transfer_amount, + mint::ARG_ID => id + } + } WasmlessTransfer::AmountAsU64 => { runtime_args! { mint::ARG_SOURCE => account_1_purse, @@ -133,30 +161,17 @@ fn transfer_wasmless(wasmless_transfer: WasmlessTransfer) { } }; - let no_wasm_transfer_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let no_wasm_transfer_request = TransferRequestBuilder::new(0, AccountHash::default()) + .with_args(runtime_args) + .with_initiator(*ACCOUNT_1_ADDR) + .build(); builder - .exec(no_wasm_transfer_request) - .expect_success() - .commit(); - - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let wasmless_transfer_cost = Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); + .transfer_and_commit(no_wasm_transfer_request) + .expect_success(); assert_eq!( - account_1_starting_balance - transfer_amount - wasmless_transfer_cost.value(), + account_1_starting_balance - transfer_amount, builder.get_purse_balance(account_1_purse), "account 1 ending balance incorrect" ); @@ -167,11 +182,10 @@ fn transfer_wasmless(wasmless_transfer: WasmlessTransfer) { ); // Make sure postconditions are met: payment purse has to be empty after finalization - let handle_payment = builder.get_handle_payment_contract_hash(); - let contract = builder - .get_contract(handle_payment) - .expect("should have contract"); - let key = contract + + let handle_payment_entity = builder.get_handle_payment_contract(); + + let key = handle_payment_entity .named_keys() .get(handle_payment::PAYMENT_PURSE_KEY) .cloned() @@ -287,126 +301,124 @@ fn invalid_transfer_wasmless(invalid_wasmless_transfer: InvalidWasmlessTransfer) InvalidWasmlessTransfer::TransferToSelfByAddr => { // same source and target purse is invalid ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id, }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidPurse)), + CoreError::Transfer(TransferError::InvalidPurse), ) } InvalidWasmlessTransfer::TransferToSelfByKey => { // same source and target purse is invalid ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { - mint::ARG_TARGET => Key::Account(ACCOUNT_1_ADDR), + mint::ARG_TARGET => Key::Account(*ACCOUNT_1_ADDR), mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidPurse)), + CoreError::Transfer(TransferError::InvalidPurse), ) } InvalidWasmlessTransfer::TransferToSelfByURef => { let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); // same source and target purse is invalid ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { mint::ARG_TARGET => account_1_purse, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidPurse)), + CoreError::Transfer(TransferError::InvalidPurse), ) } InvalidWasmlessTransfer::OtherSourceAccountByAddr => { // passes another account's addr as source ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { - mint::ARG_SOURCE => ACCOUNT_2_ADDR, - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_SOURCE => *ACCOUNT_2_ADDR, + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidArgument)), + CoreError::Transfer(TransferError::InvalidArgument), ) } InvalidWasmlessTransfer::OtherSourceAccountByKey => { // passes another account's Key::Account as source ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { - mint::ARG_SOURCE => Key::Account(ACCOUNT_2_ADDR), - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_SOURCE => Key::Account(*ACCOUNT_2_ADDR), + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidArgument)), + CoreError::Transfer(TransferError::InvalidArgument), ) } InvalidWasmlessTransfer::OtherSourceAccountByURef => { let account_2_purse = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("should get account 1") .main_purse(); // passes another account's purse as source ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { mint::ARG_SOURCE => account_2_purse, - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::ForgedReference(account_2_purse)), + CoreError::Transfer(TransferError::ForgedReference(account_2_purse)), ) } InvalidWasmlessTransfer::MissingTarget => { // does not pass target ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::MissingArgument)), + CoreError::Transfer(TransferError::MissingArgument), ) } InvalidWasmlessTransfer::MissingAmount => { // does not pass amount ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, + mint::ARG_TARGET => *ACCOUNT_2_ADDR, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::MissingArgument)), + CoreError::Transfer(TransferError::MissingArgument), ) } InvalidWasmlessTransfer::SourceURefNotPurse => { - let not_purse_uref = - get_default_account_named_uref(&mut builder, TRANSFER_RESULT_NAMED_KEY); + let not_purse_uref = get_default_account_named_uref(&mut builder, NON_UREF_NAMED_KEY); // passes an invalid uref as source (an existing uref that is not a purse uref) ( *DEFAULT_ACCOUNT_ADDR, runtime_args! { mint::ARG_SOURCE => not_purse_uref, - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidPurse)), + CoreError::Transfer(TransferError::InvalidPurse), ) } InvalidWasmlessTransfer::TargetURefNotPurse => { - let not_purse_uref = - get_default_account_named_uref(&mut builder, TRANSFER_RESULT_NAMED_KEY); + let not_purse_uref = get_default_account_named_uref(&mut builder, NON_UREF_NAMED_KEY); // passes an invalid uref as target (an existing uref that is not a purse uref) ( *DEFAULT_ACCOUNT_ADDR, @@ -415,7 +427,7 @@ fn invalid_transfer_wasmless(invalid_wasmless_transfer: InvalidWasmlessTransfer) mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidPurse)), + CoreError::Transfer(TransferError::InvalidPurse), ) } InvalidWasmlessTransfer::SourceURefNonexistent => { @@ -424,82 +436,72 @@ fn invalid_transfer_wasmless(invalid_wasmless_transfer: InvalidWasmlessTransfer) // a caller passes a uref as source they are claiming it is a purse and that they have // write access to it / are allowed to take funds from it. ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { mint::ARG_SOURCE => nonexistent_purse, - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::ForgedReference(nonexistent_purse)), + CoreError::Transfer(TransferError::ForgedReference(nonexistent_purse)), ) } InvalidWasmlessTransfer::TargetURefNonexistent => { let nonexistent_purse = URef::new([255; 32], AccessRights::READ_ADD_WRITE); // passes a nonexistent uref as target ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { mint::ARG_TARGET => nonexistent_purse, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::Revert(ApiError::InvalidPurse)), + CoreError::Transfer(TransferError::InvalidPurse), ) } InvalidWasmlessTransfer::OtherPurseToSelfPurse => { let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); let account_2_purse = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("should get account 1") .main_purse(); // attempts to take from an unowned purse ( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, runtime_args! { mint::ARG_SOURCE => account_2_purse, mint::ARG_TARGET => account_1_purse, mint::ARG_AMOUNT => transfer_amount, mint::ARG_ID => id }, - CoreError::Exec(ExecError::ForgedReference(account_2_purse)), + CoreError::Transfer(TransferError::ForgedReference(account_2_purse)), ) } }; - let no_wasm_transfer_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(addr) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[addr]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let no_wasm_transfer_request = TransferRequestBuilder::new(0, AccountHash::default()) + .with_args(runtime_args) + .with_initiator(addr) + .build(); let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); let account_1_starting_balance = builder.get_purse_balance(account_1_purse); - builder.exec(no_wasm_transfer_request); + builder.transfer_and_commit(no_wasm_transfer_request); let result = builder - .get_exec_results() - .last() - .expect("Expected to be called after run()") - .get(0) - .expect("Unable to get first deploy result"); + .get_last_exec_result() + .expect("Expected to be called after run()"); - assert!(result.is_failure(), "was expected to fail"); - - let error = result.as_error().expect("should have error"); + let error = result.error().expect("should have error"); let account_1_closing_balance = builder.get_purse_balance(account_1_purse); @@ -515,11 +517,8 @@ fn invalid_transfer_wasmless(invalid_wasmless_transfer: InvalidWasmlessTransfer) assert_eq!(account_1_starting_balance, account_1_closing_balance); // Make sure postconditions are met: payment purse has to be empty after finalization - let handle_payment = builder.get_handle_payment_contract_hash(); - let contract = builder - .get_contract(handle_payment) - .expect("should have contract"); - let key = contract + let handle_payment_entity = builder.get_handle_payment_contract(); + let key = handle_payment_entity .named_keys() .get(handle_payment::PAYMENT_PURSE_KEY) .cloned() @@ -534,59 +533,39 @@ fn invalid_transfer_wasmless(invalid_wasmless_transfer: InvalidWasmlessTransfer) #[ignore] #[test] fn transfer_wasmless_should_create_target_if_it_doesnt_exist() { - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let wasmless_transfer_cost = Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); - let create_account_2: bool = false; let mut builder = init_wasmless_transform_builder(create_account_2); let transfer_amount: U512 = U512::from(1000); let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); assert_eq!( - builder.get_account(ACCOUNT_2_ADDR), + builder.get_entity_by_account_hash(*ACCOUNT_2_ADDR), None, "account 2 should not exist" ); let account_1_starting_balance = builder.get_purse_balance(account_1_purse); - let runtime_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => >::None - }; - - let no_wasm_transfer_request = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; + let no_wasm_transfer_request = TransferRequestBuilder::new(transfer_amount, *ACCOUNT_2_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) + .build(); builder - .exec(no_wasm_transfer_request) - .expect_success() - .commit(); + .transfer_and_commit(no_wasm_transfer_request) + .expect_success(); let account_2 = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("account 2 should exist"); let account_2_starting_balance = builder.get_purse_balance(account_2.main_purse()); assert_eq!( - account_1_starting_balance - transfer_amount - wasmless_transfer_cost.value(), + account_1_starting_balance - transfer_amount, builder.get_purse_balance(account_1_purse), "account 1 ending balance incorrect" ); @@ -596,9 +575,9 @@ fn transfer_wasmless_should_create_target_if_it_doesnt_exist() { ); } -fn get_default_account_named_uref(builder: &mut InMemoryWasmTestBuilder, name: &str) -> URef { +fn get_default_account_named_uref(builder: &mut LmdbWasmTestBuilder, name: &str) -> URef { let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) + .get_entity_with_named_keys_by_account_hash(*DEFAULT_ACCOUNT_ADDR) .expect("default account should exist"); default_account .named_keys() @@ -609,8 +588,8 @@ fn get_default_account_named_uref(builder: &mut InMemoryWasmTestBuilder, name: & .to_owned() } -fn init_wasmless_transform_builder(create_account_2: bool) -> InMemoryWasmTestBuilder { - let mut builder = InMemoryWasmTestBuilder::default(); +fn init_wasmless_transform_builder(create_account_2: bool) -> LmdbWasmTestBuilder { + let mut builder = LmdbWasmTestBuilder::default(); let id: Option = None; @@ -618,7 +597,7 @@ fn init_wasmless_transform_builder(create_account_2: bool) -> InMemoryWasmTestBu *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, + mint::ARG_TARGET => *ACCOUNT_1_ADDR, mint::ARG_AMOUNT => *DEFAULT_PAYMENT, mint::ARG_ID => id }, @@ -626,7 +605,7 @@ fn init_wasmless_transform_builder(create_account_2: bool) -> InMemoryWasmTestBu .build(); builder - .run_genesis(&DEFAULT_RUN_GENESIS_REQUEST) + .run_genesis(LOCAL_GENESIS_REQUEST.clone()) .exec(create_account_1_request) .expect_success() .commit(); @@ -639,7 +618,7 @@ fn init_wasmless_transform_builder(create_account_2: bool) -> InMemoryWasmTestBu *DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, + mint::ARG_TARGET => *ACCOUNT_2_ADDR, mint::ARG_AMOUNT => *DEFAULT_PAYMENT, mint::ARG_ID => id }, @@ -649,68 +628,63 @@ fn init_wasmless_transform_builder(create_account_2: bool) -> InMemoryWasmTestBu builder .exec(create_account_2_request) .commit() - .expect_success() - .to_owned() + .expect_success(); + + let new_named_uref_request = ExecuteRequestBuilder::standard( + *DEFAULT_ACCOUNT_ADDR, + CONTRACT_NEW_NAMED_UREF, + runtime_args! { + ARG_UREF_NAME => NON_UREF_NAMED_KEY, + }, + ) + .build(); + + builder + .exec(new_named_uref_request) + .commit() + .expect_success(); + + builder } #[ignore] #[test] -fn transfer_wasmless_should_fail_without_main_purse_minimum_balance() { - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); - let wasmless_transfer_cost = Motes::from_gas( - wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); - +fn transfer_wasmless_onward() { let create_account_2: bool = false; let mut builder = init_wasmless_transform_builder(create_account_2); - let account_1_to_account_2_amount: U512 = - U512::from(DEFAULT_WASMLESS_TRANSFER_COST) - U512::one(); + let account_1_to_account_2_amount: U512 = U512::one(); let account_2_to_account_1_amount: U512 = U512::one(); let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); assert_eq!( - builder.get_account(ACCOUNT_2_ADDR), + builder.get_entity_by_account_hash(*ACCOUNT_2_ADDR), None, "account 2 should not exist" ); let account_1_starting_balance = builder.get_purse_balance(account_1_purse); - let runtime_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, - mint::ARG_AMOUNT => account_1_to_account_2_amount, - mint::ARG_ID => >::None - }; - - let no_wasm_transfer_request_1 = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) + let no_wasm_transfer_request_1 = + TransferRequestBuilder::new(account_1_to_account_2_amount, *ACCOUNT_2_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; builder - .exec(no_wasm_transfer_request_1) - .expect_success() - .commit(); + .transfer_and_commit(no_wasm_transfer_request_1) + .expect_success(); let account_2 = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("account 2 should exist"); let account_2_starting_balance = builder.get_purse_balance(account_2.main_purse()); assert_eq!( - account_1_starting_balance - account_1_to_account_2_amount - wasmless_transfer_cost.value(), + account_1_starting_balance - account_1_to_account_2_amount, builder.get_purse_balance(account_1_purse), "account 1 ending balance incorrect" ); @@ -720,40 +694,20 @@ fn transfer_wasmless_should_fail_without_main_purse_minimum_balance() { ); // Another transfer but this time created account tries to do a transfer - assert!(account_2_to_account_1_amount < wasmless_transfer_cost.value()); - let runtime_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => account_2_to_account_1_amount, - mint::ARG_ID => >::None - }; - - let no_wasm_transfer_request_2 = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_2_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_2_ADDR]) + let no_wasm_transfer_request_2 = + TransferRequestBuilder::new(account_2_to_account_1_amount, *ACCOUNT_1_ADDR) + .with_initiator(*ACCOUNT_2_ADDR) .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - builder.exec(no_wasm_transfer_request_2).commit(); - let exec_result = &builder.get_exec_results().last().unwrap()[0]; - let error = exec_result - .as_error() - .unwrap_or_else(|| panic!("should have error {:?}", exec_result)); - assert!( - matches!(error, CoreError::InsufficientPayment), - "{:?}", - error - ); + builder + .transfer_and_commit(no_wasm_transfer_request_2) + .expect_success(); } #[ignore] #[test] fn transfer_wasmless_should_transfer_funds_after_paying_for_transfer() { - let wasmless_transfer_gas_cost = Gas::from(DEFAULT_WASMLESS_TRANSFER_COST); + let wasmless_transfer_gas_cost = Gas::from(MintCosts::default().transfer); let wasmless_transfer_cost = Motes::from_gas( wasmless_transfer_gas_cost, WASMLESS_TRANSFER_FIXED_GAS_PRICE, @@ -768,47 +722,35 @@ fn transfer_wasmless_should_transfer_funds_after_paying_for_transfer() { let account_2_to_account_1_amount: U512 = U512::one(); let account_1_purse = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1") .main_purse(); assert_eq!( - builder.get_account(ACCOUNT_2_ADDR), + builder.get_entity_by_account_hash(*ACCOUNT_2_ADDR), None, "account 2 should not exist" ); let account_1_starting_balance = builder.get_purse_balance(account_1_purse); - let runtime_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, - mint::ARG_AMOUNT => account_1_to_account_2_amount, - mint::ARG_ID => >::None - }; - - let no_wasm_transfer_request_1 = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) + let no_wasm_transfer_request_1 = + TransferRequestBuilder::new(account_1_to_account_2_amount, *ACCOUNT_2_ADDR) + .with_initiator(*ACCOUNT_1_ADDR) .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; builder - .exec(no_wasm_transfer_request_1) - .expect_success() - .commit(); + .transfer_and_commit(no_wasm_transfer_request_1) + .expect_success(); let account_2 = builder - .get_account(ACCOUNT_2_ADDR) + .get_entity_by_account_hash(*ACCOUNT_2_ADDR) .expect("account 2 should exist"); let account_2_starting_balance = builder.get_purse_balance(account_2.main_purse()); assert_eq!( - account_1_starting_balance - account_1_to_account_2_amount - wasmless_transfer_cost.value(), + account_1_starting_balance - account_1_to_account_2_amount, builder.get_purse_balance(account_1_purse), "account 1 ending balance incorrect" ); @@ -818,26 +760,13 @@ fn transfer_wasmless_should_transfer_funds_after_paying_for_transfer() { ); // Another transfer but this time created account tries to do a transfer - assert!(account_2_to_account_1_amount <= wasmless_transfer_cost.value()); - let runtime_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_1_ADDR, - mint::ARG_AMOUNT => account_2_to_account_1_amount, - mint::ARG_ID => >::None - }; - - let no_wasm_transfer_request_2 = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_2_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_2_ADDR]) + let no_wasm_transfer_request_2 = + TransferRequestBuilder::new(account_2_to_account_1_amount, *ACCOUNT_1_ADDR) + .with_initiator(*ACCOUNT_2_ADDR) .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; builder - .exec(no_wasm_transfer_request_2) - .commit() + .transfer_and_commit(no_wasm_transfer_request_2) .expect_success(); } @@ -849,7 +778,7 @@ fn transfer_wasmless_should_fail_with_secondary_purse_insufficient_funds() { let account_1_to_account_2_amount: U512 = U512::from(1000); let create_purse_request = ExecuteRequestBuilder::standard( - ACCOUNT_1_ADDR, + *ACCOUNT_1_ADDR, CONTRACT_CREATE_PURSE_01, runtime_args! { ARG_PURSE_NAME => TEST_PURSE_NAME }, ) @@ -857,7 +786,7 @@ fn transfer_wasmless_should_fail_with_secondary_purse_insufficient_funds() { builder.exec(create_purse_request).commit().expect_success(); let account_1 = builder - .get_account(ACCOUNT_1_ADDR) + .get_entity_with_named_keys_by_account_hash(*ACCOUNT_1_ADDR) .expect("should get account 1"); let account_1_purse = account_1 @@ -872,119 +801,13 @@ fn transfer_wasmless_should_fail_with_secondary_purse_insufficient_funds() { let account_1_starting_balance = builder.get_purse_balance(account_1_purse); assert_eq!(account_1_starting_balance, U512::zero()); - let runtime_args = runtime_args! { - mint::ARG_SOURCE => account_1_purse, - mint::ARG_TARGET => ACCOUNT_2_ADDR, - mint::ARG_AMOUNT => account_1_to_account_2_amount, - mint::ARG_ID => >::None - }; - - let no_wasm_transfer_request_1 = { - let deploy_item = DeployItemBuilder::new() - .with_address(ACCOUNT_1_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(runtime_args) - .with_authorization_keys(&[ACCOUNT_1_ADDR]) - .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item).build() - }; - - builder.exec(no_wasm_transfer_request_1).commit(); - - let exec_result = &builder.get_exec_results().last().unwrap()[0]; - let error = exec_result.as_error().expect("should have error"); - assert!( - matches!(error, CoreError::InsufficientPayment), - "{:?}", - error - ); -} - -#[ignore] -#[test] -fn transfer_wasmless_should_observe_upgraded_cost() { - let new_wasmless_transfer_cost_value = DEFAULT_WASMLESS_TRANSFER_COST * 2; - - let new_wasmless_transfer_gas_cost = Gas::from(new_wasmless_transfer_cost_value); - let new_wasmless_transfer_cost = Motes::from_gas( - new_wasmless_transfer_gas_cost, - WASMLESS_TRANSFER_FIXED_GAS_PRICE, - ) - .expect("gas overflow"); - - let transfer_amount = U512::one(); - const DEFAULT_ACTIVATION_POINT: EraId = EraId::new(1); - - let new_auction_costs = AuctionCosts::default(); - let new_mint_costs = MintCosts::default(); - let new_handle_payment_costs = HandlePaymentCosts::default(); - let new_standard_payment_costs = StandardPaymentCosts::default(); - - let new_system_config = SystemConfig::new( - new_wasmless_transfer_cost_value, - new_auction_costs, - new_mint_costs, - new_handle_payment_costs, - new_standard_payment_costs, - ); - - let old_protocol_version = *DEFAULT_PROTOCOL_VERSION; - let new_protocol_version = ProtocolVersion::from_parts( - old_protocol_version.value().major, - old_protocol_version.value().minor, - old_protocol_version.value().patch + 1, - ); - - let mut builder = InMemoryWasmTestBuilder::default(); - builder.run_genesis(&*DEFAULT_RUN_GENESIS_REQUEST); - - let default_account = builder - .get_account(*DEFAULT_ACCOUNT_ADDR) - .expect("should get default_account"); - - let mut upgrade_request = { - UpgradeRequestBuilder::new() - .with_current_protocol_version(*DEFAULT_PROTOCOL_VERSION) - .with_new_protocol_version(new_protocol_version) - .with_activation_point(DEFAULT_ACTIVATION_POINT) - .with_new_system_config(new_system_config) - .build() - }; - - builder.upgrade_with_upgrade_request(&mut upgrade_request); - - let default_account_balance_before = builder.get_purse_balance(default_account.main_purse()); - - let no_wasm_transfer_request_1 = { - let wasmless_transfer_args = runtime_args! { - mint::ARG_TARGET => ACCOUNT_2_ADDR, - mint::ARG_AMOUNT => transfer_amount, - mint::ARG_ID => >::None - }; - - let deploy_item = DeployItemBuilder::new() - .with_address(*DEFAULT_ACCOUNT_ADDR) - .with_empty_payment_bytes(runtime_args! {}) - .with_transfer_args(wasmless_transfer_args) - .with_authorization_keys(&[*DEFAULT_ACCOUNT_ADDR]) + let no_wasm_transfer_request_1 = + TransferRequestBuilder::new(account_1_to_account_2_amount, *ACCOUNT_2_ADDR) + .with_source(account_1_purse) + .with_initiator(*ACCOUNT_1_ADDR) .build(); - ExecuteRequestBuilder::from_deploy_item(deploy_item) - .with_protocol_version(new_protocol_version) - .build() - }; builder - .exec(no_wasm_transfer_request_1) - .expect_success() - .commit(); - - let default_account_balance_after = builder.get_purse_balance(default_account.main_purse()); - - assert_eq!( - default_account_balance_before - transfer_amount - new_wasmless_transfer_cost.value(), - default_account_balance_after, - "expected wasmless transfer cost to be {} but it was {}", - new_wasmless_transfer_cost, - default_account_balance_before - default_account_balance_after - transfer_amount - ); + .transfer_and_commit(no_wasm_transfer_request_1) + .expect_failure(); } diff --git a/execution_engine_testing/tests/src/wasm_utils.rs b/execution_engine_testing/tests/src/wasm_utils.rs new file mode 100644 index 0000000000..27b035feca --- /dev/null +++ b/execution_engine_testing/tests/src/wasm_utils.rs @@ -0,0 +1,92 @@ +//! Wasm helpers. +use std::{collections::BTreeSet, fmt::Write}; + +use casper_wasm::{ + builder, + elements::{Instruction, Instructions}, +}; +use walrus::Module; + +use casper_types::addressable_entity::DEFAULT_ENTRY_POINT_NAME; + +/// Creates minimal session code that does nothing +pub fn do_nothing_bytes() -> Vec { + let module = builder::module() + .function() + // A signature with 0 params and no return type + .signature() + .build() + .body() + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .build() + // Memory section is mandatory + .memory() + .build() + .build(); + casper_wasm::serialize(module).expect("should serialize") +} + +/// Creates minimal session code that does only one "nop" opcode +pub fn do_minimum_bytes() -> Vec { + let module = builder::module() + .function() + // A signature with 0 params and no return type + .signature() + .build() + .body() + .with_instructions(Instructions::new(vec![Instruction::Nop, Instruction::End])) + .build() + .build() + // Export above function + .export() + .field(DEFAULT_ENTRY_POINT_NAME) + .build() + // Memory section is mandatory + .memory() + .build() + .build(); + casper_wasm::serialize(module).expect("should serialize") +} + +/// Creates minimal session code that contains a function with arbitrary number of parameters. +pub fn make_n_arg_call_bytes( + arity: usize, + arg_type: &str, +) -> Result, Box> { + let mut call_args = String::new(); + for i in 0..arity { + write!(call_args, "({}.const {}) ", arg_type, i)?; + } + + let mut func_params = String::new(); + for i in 0..arity { + write!(func_params, "(param $arg{} {}) ", i, arg_type)?; + } + + // This wasm module contains a function with a specified amount of arguments in it. + let wat = format!( + r#"(module + (func $call (call $func {call_args}) (return)) + (func $func {func_params} (return)) + (export "func" (func $func)) + (export "call" (func $call)) + (memory $memory 1) + )"# + ); + let module_bytes = wat::parse_str(wat)?; + Ok(module_bytes) +} + +/// Returns a set of exports for a given wasm module bytes +pub fn get_wasm_exports(module_bytes: &[u8]) -> BTreeSet { + let module = Module::from_buffer(module_bytes).expect("should have walid wasm bytes"); + module + .exports + .iter() + .map(|export| export.name.clone()) + .collect() +} diff --git a/executor/wasm/Cargo.toml b/executor/wasm/Cargo.toml new file mode 100644 index 0000000000..c99174ac8d --- /dev/null +++ b/executor/wasm/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "casper-executor-wasm" +version = "0.1.3" +edition = "2021" +authors = ["Michał Papierski "] +description = "Casper executor wasm package" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/executor/wasm" +license = "Apache-2.0" + +[dependencies] +blake2 = "0.10" +borsh = { version = "1.5", features = ["derive"] } +bytes = "1.10" +casper-executor-wasm-common = { version = "0.1.3", path = "../wasm_common" } +casper-executor-wasm-host = { version = "0.1.3", path = "../wasm_host" } +casper-executor-wasm-interface = { version = "0.1.3", path = "../wasm_interface" } +casper-executor-wasmer-backend = { version = "0.1.3", path = "../wasmer_backend" } +casper-storage = { version = "2.1.1", path = "../../storage" } +casper-types = { version = "6.0.1", path = "../../types", features = ["std"] } +casper-execution-engine = { version = "8.1.1", path = "../../execution_engine", features = [ + "test-support", +] } +digest = "0.10.7" +parking_lot = "0.12.1" +thiserror = "2.0" +tracing = "0.1.40" +base16 = "0.2.1" + +[dev-dependencies] +tempfile = "3.10.1" +once_cell = "1.19.0" +fs_extra = "1.3.0" +serde_json = "1.0.127" +itertools = "0.14.0" diff --git a/executor/wasm/src/install.rs b/executor/wasm/src/install.rs new file mode 100644 index 0000000000..8697c671e9 --- /dev/null +++ b/executor/wasm/src/install.rs @@ -0,0 +1,221 @@ +use std::sync::Arc; + +use bytes::Bytes; +use casper_executor_wasm_common::error::CallError; +use casper_executor_wasm_interface::{executor::ExecuteError, GasUsage}; +use casper_storage::{global_state::error::Error as GlobalStateError, AddressGenerator}; +use casper_types::{ + account::AccountHash, execution::Effects, BlockHash, BlockTime, Digest, TransactionHash, +}; +use parking_lot::RwLock; +use thiserror::Error; + +// NOTE: One struct that represents both InstallContractRequest and ExecuteRequest. + +/// Store contract request. +pub struct InstallContractRequest { + /// Initiator's address. + pub(crate) initiator: AccountHash, + /// Gas limit. + pub(crate) gas_limit: u64, + /// Wasm bytes of the contract to be stored. + pub(crate) wasm_bytes: Bytes, + /// Constructor entry point name. + pub(crate) entry_point: Option, + /// Input data for the constructor. + pub(crate) input: Option, + /// Attached tokens value that to be transferred into the constructor. + pub(crate) transferred_value: u64, + /// Transaction hash. + pub(crate) transaction_hash: TransactionHash, + /// Address generator. + pub(crate) address_generator: Arc>, + /// Chain name. + pub(crate) chain_name: Arc, + /// Block time. + pub(crate) block_time: BlockTime, + /// State hash. + pub(crate) state_hash: Digest, + /// Parent block hash. + pub(crate) parent_block_hash: BlockHash, + /// Block height. + pub(crate) block_height: u64, + /// Seed used for smart contract hash computation. + pub(crate) seed: Option<[u8; 32]>, +} + +#[derive(Default)] +pub struct InstallContractRequestBuilder { + initiator: Option, + gas_limit: Option, + wasm_bytes: Option, + entry_point: Option, + input: Option, + transferred_value: Option, + transaction_hash: Option, + address_generator: Option>>, + chain_name: Option>, + block_time: Option, + state_hash: Option, + parent_block_hash: Option, + block_height: Option, + seed: Option<[u8; 32]>, +} + +impl InstallContractRequestBuilder { + pub fn with_initiator(mut self, initiator: AccountHash) -> Self { + self.initiator = Some(initiator); + self + } + + pub fn with_gas_limit(mut self, gas_limit: u64) -> Self { + self.gas_limit = Some(gas_limit); + self + } + + pub fn with_wasm_bytes(mut self, wasm_bytes: Bytes) -> Self { + self.wasm_bytes = Some(wasm_bytes); + self + } + + pub fn with_entry_point(mut self, entry_point: String) -> Self { + self.entry_point = Some(entry_point); + self + } + + pub fn with_input(mut self, input: Bytes) -> Self { + self.input = Some(input); + self + } + + pub fn with_transferred_value(mut self, transferred_value: u64) -> Self { + self.transferred_value = Some(transferred_value); + self + } + + pub fn with_address_generator(mut self, address_generator: AddressGenerator) -> Self { + self.address_generator = Some(Arc::new(RwLock::new(address_generator))); + self + } + + pub fn with_shared_address_generator( + mut self, + address_generator: Arc>, + ) -> Self { + self.address_generator = Some(address_generator); + self + } + + pub fn with_transaction_hash(mut self, transaction_hash: TransactionHash) -> Self { + self.transaction_hash = Some(transaction_hash); + self + } + + pub fn with_chain_name>>(mut self, chain_name: T) -> Self { + self.chain_name = Some(chain_name.into()); + self + } + + pub fn with_block_time(mut self, block_time: BlockTime) -> Self { + self.block_time = Some(block_time); + self + } + + pub fn with_seed(mut self, seed: [u8; 32]) -> Self { + self.seed = Some(seed); + self + } + + pub fn with_state_hash(mut self, state_hash: Digest) -> Self { + self.state_hash = Some(state_hash); + self + } + + pub fn with_parent_block_hash(mut self, parent_block_hash: BlockHash) -> Self { + self.parent_block_hash = Some(parent_block_hash); + self + } + + pub fn with_block_height(mut self, block_height: u64) -> Self { + self.block_height = Some(block_height); + self + } + + pub fn build(self) -> Result { + let initiator = self.initiator.ok_or("Initiator not set")?; + let gas_limit = self.gas_limit.ok_or("Gas limit not set")?; + let wasm_bytes = self.wasm_bytes.ok_or("Wasm bytes not set")?; + let entry_point = self.entry_point; + let input = self.input; + let transferred_value = self.transferred_value.ok_or("Value not set")?; + let address_generator = self.address_generator.ok_or("Address generator not set")?; + let transaction_hash = self.transaction_hash.ok_or("Transaction hash not set")?; + let chain_name = self.chain_name.ok_or("Chain name not set")?; + let block_time = self.block_time.ok_or("Block time not set")?; + let seed = self.seed; + let state_hash = self.state_hash.ok_or("State hash not set")?; + let parent_block_hash = self.parent_block_hash.ok_or("Parent block hash not set")?; + let block_height = self.block_height.ok_or("Block height not set")?; + Ok(InstallContractRequest { + initiator, + gas_limit, + wasm_bytes, + entry_point, + input, + transferred_value, + address_generator, + transaction_hash, + chain_name, + block_time, + seed, + state_hash, + parent_block_hash, + block_height, + }) + } +} + +/// Result of executing a Wasm contract. +#[derive(Debug)] +pub struct InstallContractResult { + /// Smart contract address. + pub(crate) smart_contract_addr: [u8; 32], + /// Gas usage. + pub(crate) gas_usage: GasUsage, + /// Effects produced by the execution. + pub(crate) effects: Effects, + /// Post state hash after installation. + pub(crate) post_state_hash: Digest, +} +impl InstallContractResult { + pub fn effects(&self) -> &Effects { + &self.effects + } + + pub fn gas_usage(&self) -> &GasUsage { + &self.gas_usage + } + + pub fn post_state_hash(&self) -> Digest { + self.post_state_hash + } + + pub fn smart_contract_addr(&self) -> &[u8; 32] { + &self.smart_contract_addr + } +} + +#[derive(Debug, Error)] +pub enum InstallContractError { + #[error("system contract error: {0}")] + SystemContract(CallError), + + #[error("execute: {0}")] + Execute(ExecuteError), + + #[error("Global state error: {0}")] + GlobalState(#[from] GlobalStateError), + + #[error("constructor error: {host_error}")] + Constructor { host_error: CallError }, +} diff --git a/executor/wasm/src/lib.rs b/executor/wasm/src/lib.rs new file mode 100644 index 0000000000..cc8e7cd3eb --- /dev/null +++ b/executor/wasm/src/lib.rs @@ -0,0 +1,894 @@ +pub mod install; +pub(crate) mod system; + +use std::{ + collections::{BTreeSet, VecDeque}, + sync::Arc, +}; + +use bytes::Bytes; +use casper_execution_engine::{ + engine_state::{BlockInfo, Error as EngineError, ExecutableItem, ExecutionEngineV1}, + execution::ExecError, +}; +use casper_executor_wasm_common::{ + chain_utils, + error::{CallError, TrapCode}, + flags::ReturnFlags, +}; +use casper_executor_wasm_host::context::Context; +use casper_executor_wasm_interface::{ + executor::{ + ExecuteError, ExecuteRequest, ExecuteRequestBuilder, ExecuteResult, + ExecuteWithProviderError, ExecuteWithProviderResult, ExecutionKind, Executor, + }, + ConfigBuilder, GasUsage, VMError, WasmInstance, +}; +use casper_executor_wasmer_backend::WasmerEngine; +use casper_storage::{ + global_state::{ + error::Error as GlobalStateError, + state::{CommitProvider, StateProvider}, + GlobalStateReader, + }, + TrackingCopy, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::{ActionThresholds, AssociatedKeys}, + bytesrepr, AddressableEntity, ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, + ContractRuntimeTag, Digest, EntityAddr, EntityKind, Gas, Groups, InitiatorAddr, Key, + MessageLimits, Package, PackageHash, PackageStatus, Phase, ProtocolVersion, StorageCosts, + StoredValue, TransactionInvocationTarget, URef, WasmV2Config, U512, +}; +use install::{InstallContractError, InstallContractRequest, InstallContractResult}; +use parking_lot::RwLock; +use system::{MintArgs, MintTransferArgs}; +use tracing::{error, warn}; + +const DEFAULT_WASM_ENTRY_POINT: &str = "call"; + +const DEFAULT_MINT_TRANSFER_GAS_COST: u64 = 1; // NOTE: Require gas while executing and set this to at least 100_000_000 (or use chainspec) + +#[derive(Copy, Clone, Debug)] +pub enum ExecutorKind { + /// Ahead of time compiled Wasm. + /// + /// This is the default executor kind. + Compiled, +} + +#[derive(Copy, Clone, Debug)] +pub struct ExecutorConfig { + memory_limit: u32, + executor_kind: ExecutorKind, + wasm_config: WasmV2Config, + storage_costs: StorageCosts, + message_limits: MessageLimits, +} + +impl ExecutorConfigBuilder { + pub fn new() -> ExecutorConfigBuilder { + ExecutorConfigBuilder::default() + } +} + +#[derive(Default)] +pub struct ExecutorConfigBuilder { + memory_limit: Option, + executor_kind: Option, + wasm_config: Option, + storage_costs: Option, + message_limits: Option, +} + +impl ExecutorConfigBuilder { + /// Set the memory limit. + pub fn with_memory_limit(mut self, memory_limit: u32) -> Self { + self.memory_limit = Some(memory_limit); + self + } + + /// Set the executor kind. + pub fn with_executor_kind(mut self, executor_kind: ExecutorKind) -> Self { + self.executor_kind = Some(executor_kind); + self + } + + /// Set the wasm config. + pub fn with_wasm_config(mut self, wasm_config: WasmV2Config) -> Self { + self.wasm_config = Some(wasm_config); + self + } + + /// Set the wasm config. + pub fn with_storage_costs(mut self, storage_costs: StorageCosts) -> Self { + self.storage_costs = Some(storage_costs); + self + } + + /// Set the message limits. + pub fn with_message_limits(mut self, message_limits: MessageLimits) -> Self { + self.message_limits = Some(message_limits); + self + } + + /// Build the `ExecutorConfig`. + pub fn build(self) -> Result { + let memory_limit = self.memory_limit.ok_or("Memory limit is not set")?; + let executor_kind = self.executor_kind.ok_or("Executor kind is not set")?; + let wasm_config = self.wasm_config.ok_or("Wasm config is not set")?; + let storage_costs = self.storage_costs.ok_or("Storage costs are not set")?; + let message_limits = self.message_limits.ok_or("Message limits are not set")?; + + Ok(ExecutorConfig { + memory_limit, + executor_kind, + wasm_config, + storage_costs, + message_limits, + }) + } +} + +#[derive(Clone)] +pub struct ExecutorV2 { + config: ExecutorConfig, + compiled_wasm_engine: Arc, + execution_stack: Arc>>, + execution_engine_v1: Arc, +} + +impl ExecutorV2 { + pub fn install_contract( + &self, + state_root_hash: Digest, + state_provider: &R, + install_request: InstallContractRequest, + ) -> Result + where + R: StateProvider + CommitProvider, + ::Reader: 'static, + { + let mut tracking_copy = match state_provider.checkout(state_root_hash) { + Ok(Some(tracking_copy)) => { + TrackingCopy::new(tracking_copy, 1, state_provider.enable_entity()) + } + Ok(None) => { + return Err(InstallContractError::GlobalState( + GlobalStateError::RootNotFound, + )) + } + Err(error) => return Err(error.into()), + }; + + let InstallContractRequest { + initiator, + gas_limit, + wasm_bytes, + entry_point, + input, + transferred_value, + address_generator, + transaction_hash, + chain_name, + block_time, + seed, + state_hash, + parent_block_hash, + block_height, + } = install_request; + + let bytecode_hash = chain_utils::compute_wasm_bytecode_hash(&wasm_bytes); + + let caller_key = Key::Account(initiator); + let _source_purse = get_purse_for_entity(&mut tracking_copy, caller_key); + + // 1. Store package hash + let smart_contract_addr: [u8; 32] = chain_utils::compute_predictable_address( + chain_name.as_bytes(), + initiator.value(), + bytecode_hash, + seed, + ); + + let mut smart_contract = Package::new( + Default::default(), + Default::default(), + Groups::default(), + PackageStatus::Unlocked, + ); + + let protocol_version = ProtocolVersion::V2_0_0; + let protocol_version_major = protocol_version.value().major; + + let next_version = smart_contract.next_entity_version_for(protocol_version_major); + + let entity_version_key = smart_contract.insert_entity_version( + protocol_version_major, + EntityAddr::SmartContract(smart_contract_addr), + ); + debug_assert_eq!(entity_version_key.entity_version(), next_version); + + let smart_contract_addr = chain_utils::compute_predictable_address( + chain_name.as_bytes(), + initiator.value(), + bytecode_hash, + seed, + ); + + tracking_copy.write( + Key::SmartContract(smart_contract_addr), + StoredValue::SmartContract(smart_contract), + ); + + // 2. Store wasm + + let bytecode = ByteCode::new(ByteCodeKind::V2CasperWasm, wasm_bytes.clone().into()); + let bytecode_addr = ByteCodeAddr::V2CasperWasm(bytecode_hash); + + tracking_copy.write( + Key::ByteCode(bytecode_addr), + StoredValue::ByteCode(bytecode), + ); + + // 3. Store addressable entity + let addressable_entity_key = + Key::AddressableEntity(EntityAddr::SmartContract(smart_contract_addr)); + + // TODO: abort(str) as an alternative to trap + let main_purse: URef = match system::mint_mint( + &mut tracking_copy, + transaction_hash, + Arc::clone(&address_generator), + MintArgs { + initial_balance: U512::zero(), + }, + ) { + Ok(uref) => uref, + Err(mint_error) => { + error!(?mint_error, "Failed to create a purse"); + return Err(InstallContractError::SystemContract( + CallError::CalleeTrapped(TrapCode::UnreachableCodeReached), + )); + } + }; + + let addressable_entity = AddressableEntity::new( + PackageHash::new(smart_contract_addr), + ByteCodeHash::new(bytecode_hash), + ProtocolVersion::V2_0_0, + main_purse, + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2), + ); + + tracking_copy.write( + addressable_entity_key, + StoredValue::AddressableEntity(addressable_entity), + ); + + let ctor_gas_usage = match entry_point { + Some(entry_point_name) => { + let input = input.unwrap_or_default(); + let execute_request = ExecuteRequestBuilder::default() + .with_initiator(initiator) + .with_caller_key(caller_key) + .with_target(ExecutionKind::Stored { + address: smart_contract_addr, + entry_point: entry_point_name, + }) + .with_gas_limit(gas_limit) + .with_input(input) + .with_transferred_value(transferred_value) + .with_transaction_hash(transaction_hash) + .with_shared_address_generator(address_generator) + .with_chain_name(chain_name) + .with_block_time(block_time) + .with_state_hash(state_hash) + .with_parent_block_hash(parent_block_hash) + .with_block_height(block_height) + .build() + .expect("should build"); + + let forked_tc = tracking_copy.fork2(); + + match Self::execute_with_tracking_copy(self, forked_tc, execute_request) { + Ok(ExecuteResult { + host_error, + output, + gas_usage, + effects, + cache, + messages, + }) => { + if let Some(host_error) = host_error { + return Err(InstallContractError::Constructor { host_error }); + } + + tracking_copy.apply_changes(effects, cache, messages); + + if let Some(output) = output { + warn!(?output, "unexpected output from constructor"); + } + + gas_usage + } + Err(execute_error) => { + error!(%execute_error, "unable to execute constructor"); + return Err(InstallContractError::Execute(execute_error)); + } + } + } + None => { + // TODO: Calculate storage gas cost etc. and make it the base cost, then add + // constructor gas cost + GasUsage::new(gas_limit, gas_limit) + } + }; + + let effects = tracking_copy.effects(); + + match state_provider.commit_effects(state_root_hash, effects.clone()) { + Ok(post_state_hash) => Ok(InstallContractResult { + smart_contract_addr, + gas_usage: ctor_gas_usage, + effects, + post_state_hash, + }), + Err(error) => Err(InstallContractError::GlobalState(error)), + } + } + + fn execute_with_tracking_copy( + &self, + mut tracking_copy: TrackingCopy, + execute_request: ExecuteRequest, + ) -> Result { + let ExecuteRequest { + initiator, + caller_key, + gas_limit, + execution_kind, + input, + transferred_value, + transaction_hash, + address_generator, + chain_name, + block_time, + state_hash, + parent_block_hash, + block_height, + } = execute_request; + + // TODO: Purse uref does not need to be optional once value transfers to WasmBytes are + // supported. let caller_entity_addr = EntityAddr::new_account(caller); + let source_purse = get_purse_for_entity(&mut tracking_copy, caller_key); + + let (wasm_bytes, export_name) = match &execution_kind { + ExecutionKind::SessionBytes(wasm_bytes) => { + // self.execute_wasm(tracking_copy, address, gas_limit, wasm_bytes, input) + (wasm_bytes.clone(), DEFAULT_WASM_ENTRY_POINT) + } + ExecutionKind::Stored { + address: smart_contract_addr, + entry_point, + } => { + let smart_contract_key = Key::SmartContract(*smart_contract_addr); + let legacy_key = Key::Hash(*smart_contract_addr); + + let mut contract = tracking_copy + .read_first(&[&legacy_key, &smart_contract_key]) + .expect("should read contract"); + + if let Some(StoredValue::SmartContract(smart_contract_package)) = &contract { + let contract_hash = smart_contract_package + .versions() + .latest() + .expect("should have last entry"); + let entity_addr = EntityAddr::SmartContract(contract_hash.value()); + let latest_version_key = Key::AddressableEntity(entity_addr); + assert_eq!(&entity_addr.value(), smart_contract_addr); + let new_contract = tracking_copy + .read(&latest_version_key) + .expect("should read latest version"); + contract = new_contract; + }; + + match contract { + Some(StoredValue::AddressableEntity(addressable_entity)) => { + let wasm_key = match addressable_entity.kind() { + EntityKind::System(_) => todo!(), + EntityKind::Account(_) => todo!(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) => { + // We need to short circuit here to execute v1 contracts with legacy + // execut + + let block_info = BlockInfo::new( + state_hash, + block_time, + parent_block_hash, + block_height, + self.execution_engine_v1.config().protocol_version(), + ); + + let entity_addr = EntityAddr::SmartContract(*smart_contract_addr); + + return self.execute_legacy_wasm_byte_code( + initiator, + &entity_addr, + entry_point.clone(), + &input, + &mut tracking_copy, + block_info, + transaction_hash, + gas_limit, + ); + } + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2) => { + Key::ByteCode(ByteCodeAddr::V2CasperWasm( + addressable_entity.byte_code_addr(), + )) + } + }; + + // Note: Bytecode stored in the GlobalStateReader has a "kind" option - + // currently we know we have a v2 bytecode as the stored contract is of "V2" + // variant. + let wasm_bytes = tracking_copy + .read(&wasm_key) + .expect("should read wasm") + .expect("should have wasm bytes") + .into_byte_code() + .expect("should be byte code") + .take_bytes(); + + if transferred_value != 0 { + let args = { + let maybe_to = None; + let source = source_purse; + let target = addressable_entity.main_purse(); + let amount = transferred_value; + let id = None; + MintTransferArgs { + maybe_to, + source, + target, + amount: amount.into(), + id, + } + }; + + match system::mint_transfer( + &mut tracking_copy, + transaction_hash, + Arc::clone(&address_generator), + args, + ) { + Ok(()) => { + // Transfer succeed, go on + } + Err(error) => { + return Ok(ExecuteResult { + host_error: Some(error), + output: None, + gas_usage: GasUsage::new( + gas_limit, + gas_limit - DEFAULT_MINT_TRANSFER_GAS_COST, + ), + effects: tracking_copy.effects(), + cache: tracking_copy.cache(), + messages: tracking_copy.messages(), + }); + } + } + } + + (Bytes::from(wasm_bytes), entry_point.as_str()) + } + Some(StoredValue::Contract(_legacy_contract)) => { + let block_info = BlockInfo::new( + state_hash, + block_time, + parent_block_hash, + block_height, + self.execution_engine_v1.config().protocol_version(), + ); + + let entity_addr = EntityAddr::SmartContract(*smart_contract_addr); + + return self.execute_legacy_wasm_byte_code( + initiator, + &entity_addr, + entry_point.clone(), + &input, + &mut tracking_copy, + block_info, + transaction_hash, + gas_limit, + ); + } + Some(stored_value) => { + todo!( + "Unexpected {stored_value:?} under key {:?}", + &execution_kind + ); + } + None => { + error!( + smart_contract_addr = base16::encode_lower(&smart_contract_addr), + ?execution_kind, + "No contract code found", + ); + return Err(ExecuteError::CodeNotFound(*smart_contract_addr)); + } + } + } + }; + + let vm = Arc::clone(&self.compiled_wasm_engine); + + let mut initial_tracking_copy = tracking_copy.fork2(); + + // Derive callee key from the execution target. + let callee_key = match &execution_kind { + ExecutionKind::Stored { + address: smart_contract_addr, + .. + } => Key::SmartContract(*smart_contract_addr), + ExecutionKind::SessionBytes(_wasm_bytes) => Key::Account(initiator), + }; + + let context = Context { + initiator, + config: self.config.wasm_config, + storage_costs: self.config.storage_costs, + caller: caller_key, + callee: callee_key, + transferred_value, + tracking_copy, + executor: self.clone(), + address_generator: Arc::clone(&address_generator), + transaction_hash, + chain_name, + input, + block_time, + message_limits: self.config.message_limits, + }; + + let wasm_instance_config = ConfigBuilder::new() + .with_gas_limit(gas_limit) + .with_memory_limit(self.config.memory_limit) + .build(); + + let mut instance = vm.instantiate(wasm_bytes, context, wasm_instance_config)?; + + self.push_execution_stack(execution_kind.clone()); + let (vm_result, gas_usage) = instance.call_export(export_name); + + let top_execution_kind = self + .pop_execution_stack() + .expect("should have execution kind"); // SAFETY: We just pushed + debug_assert_eq!(&top_execution_kind, &execution_kind); + + let context = instance.teardown(); + + let Context { + tracking_copy: final_tracking_copy, + .. + } = context; + + match vm_result { + Ok(()) => Ok(ExecuteResult { + host_error: None, + output: None, + gas_usage, + effects: final_tracking_copy.effects(), + cache: final_tracking_copy.cache(), + messages: final_tracking_copy.messages(), + }), + Err(VMError::Return { flags, data }) => { + let host_error = if flags.contains(ReturnFlags::REVERT) { + // The contract has reverted. + Some(CallError::CalleeReverted) + } else { + // Merge the tracking copy parts since the execution has succeeded. + initial_tracking_copy.apply_changes( + final_tracking_copy.effects(), + final_tracking_copy.cache(), + final_tracking_copy.messages(), + ); + + None + }; + + Ok(ExecuteResult { + host_error, + output: data, + gas_usage, + effects: initial_tracking_copy.effects(), + cache: initial_tracking_copy.cache(), + messages: initial_tracking_copy.messages(), + }) + } + Err(VMError::OutOfGas) => Ok(ExecuteResult { + host_error: Some(CallError::CalleeGasDepleted), + output: None, + gas_usage, + effects: final_tracking_copy.effects(), + cache: final_tracking_copy.cache(), + messages: final_tracking_copy.messages(), + }), + Err(VMError::Trap(trap_code)) => Ok(ExecuteResult { + host_error: Some(CallError::CalleeTrapped(trap_code)), + output: None, + gas_usage, + effects: initial_tracking_copy.effects(), + cache: initial_tracking_copy.cache(), + messages: initial_tracking_copy.messages(), + }), + Err(VMError::Export(export_error)) => { + error!(?export_error, "export error"); + Ok(ExecuteResult { + host_error: Some(CallError::NotCallable), + output: None, + gas_usage, + effects: initial_tracking_copy.effects(), + cache: initial_tracking_copy.cache(), + messages: initial_tracking_copy.messages(), + }) + } + Err(VMError::Execute(execute_error)) => { + let effects = initial_tracking_copy.effects(); + let cache = initial_tracking_copy.cache(); + let messages = initial_tracking_copy.messages(); + error!( + ?execute_error, + ?gas_usage, + ?effects, + ?cache, + ?messages, + "host error" + ); + Err(execute_error) + } + Err(VMError::Internal(internal_error)) => { + error!(?internal_error, "internal host error"); + Err(ExecuteError::InternalHost(internal_error)) + } + } + } + + #[allow(clippy::too_many_arguments)] + fn execute_legacy_wasm_byte_code( + &self, + initiator: AccountHash, + entity_addr: &EntityAddr, + entry_point: String, + input: &Bytes, + tracking_copy: &mut TrackingCopy, + block_info: BlockInfo, + transaction_hash: casper_types::TransactionHash, + gas_limit: u64, + ) -> Result + where + R: GlobalStateReader + 'static, + { + let authorization_keys = BTreeSet::from_iter([initiator]); + let initiator_addr = InitiatorAddr::AccountHash(initiator); + let executable_item = + ExecutableItem::Invocation(TransactionInvocationTarget::ByHash(entity_addr.value())); + let entry_point = entry_point.clone(); + let args = bytesrepr::deserialize_from_slice(input).expect("should deserialize"); + let phase = Phase::Session; + + let wasm_v1_result = { + let forked_tc = tracking_copy.fork2(); + self.execution_engine_v1.execute_with_tracking_copy( + forked_tc, + block_info, + transaction_hash, + Gas::from(gas_limit), + initiator_addr, + executable_item, + entry_point, + args, + authorization_keys, + phase, + ) + }; + + let effects = wasm_v1_result.effects(); + let messages = wasm_v1_result.messages(); + + match wasm_v1_result.cache() { + Some(cache) => { + tracking_copy.apply_changes(effects.clone(), cache.clone(), messages.clone()); + } + None => { + debug_assert!( + effects.is_empty(), + "effects should be empty if there is no cache" + ); + } + } + + let gas_consumed = wasm_v1_result + .consumed() + .value() + .try_into() + .expect("Should convert consumed gas to u64"); + + let mut output = wasm_v1_result + .ret() + .map(|ret| bytesrepr::serialize(ret).unwrap()) + .map(Bytes::from); + + let host_error = match wasm_v1_result.error() { + Some(EngineError::Exec(ExecError::GasLimit)) => Some(CallError::CalleeGasDepleted), + Some(EngineError::Exec(ExecError::Revert(revert_code))) => { + assert!(output.is_none(), "output should be None"); // ExecutionEngineV1 sets output to None when error occurred. + let revert_code: u32 = (*revert_code).into(); + output = Some(revert_code.to_le_bytes().to_vec().into()); // Pass serialized revert code as output. + Some(CallError::CalleeReverted) + } + Some(_) => Some(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)), + None => None, + }; + + // TODO: Support multisig + + // TODO: Convert this to a host error as if it was executed. + + // SAFETY: Gas limit is first promoted from u64 to u512, and we know + // consumed gas under v1 would not exceed the imposed limit therefore an + // unwrap here is safe. + + let remaining_points = gas_limit.checked_sub(gas_consumed).unwrap(); + + let fork2 = tracking_copy.fork2(); + Ok(ExecuteResult { + host_error, + output, + gas_usage: GasUsage::new(gas_limit, remaining_points), + effects: fork2.effects(), + cache: fork2.cache(), + messages: fork2.messages(), + }) + } + + pub fn execute_with_provider( + &self, + state_root_hash: Digest, + state_provider: &R, + execute_request: ExecuteRequest, + ) -> Result + where + R: StateProvider + CommitProvider, + ::Reader: 'static, + { + let tracking_copy = match state_provider.checkout(state_root_hash) { + Ok(Some(tracking_copy)) => tracking_copy, + Ok(None) => { + return Err(ExecuteWithProviderError::GlobalState( + GlobalStateError::RootNotFound, + )) + } + Err(global_state_error) => return Err(global_state_error.into()), + }; + + let tracking_copy = TrackingCopy::new(tracking_copy, 1, state_provider.enable_entity()); + + match self.execute_with_tracking_copy(tracking_copy, execute_request) { + Ok(ExecuteResult { + host_error, + output, + gas_usage, + effects, + cache: _, + messages, + }) => match state_provider.commit_effects(state_root_hash, effects.clone()) { + Ok(post_state_hash) => Ok(ExecuteWithProviderResult::new( + host_error, + output, + gas_usage, + effects, + post_state_hash, + messages, + )), + Err(error) => Err(error.into()), + }, + Err(error) => Err(ExecuteWithProviderError::Execute(error)), + } + } +} + +impl ExecutorV2 { + /// Create a new `ExecutorV2` instance. + pub fn new(config: ExecutorConfig, execution_engine_v1: Arc) -> Self { + let wasm_engine = match config.executor_kind { + ExecutorKind::Compiled => WasmerEngine::new(), + }; + ExecutorV2 { + config, + compiled_wasm_engine: Arc::new(wasm_engine), + execution_stack: Default::default(), + execution_engine_v1, + } + } + + /// Push the execution stack. + pub(crate) fn push_execution_stack(&self, execution_kind: ExecutionKind) { + let mut execution_stack = self.execution_stack.write(); + execution_stack.push_back(execution_kind); + } + + /// Pop the execution stack. + pub(crate) fn pop_execution_stack(&self) -> Option { + let mut execution_stack = self.execution_stack.write(); + execution_stack.pop_back() + } +} + +impl Executor for ExecutorV2 { + /// Execute a Wasm contract. + /// + /// # Errors + /// Returns an error if the execution fails. This can happen if the Wasm instance cannot be + /// prepared. Otherwise, returns the result of the execution with a gas usage attached which + /// means a successful execution (that may or may not have produced an error such as a trap, + /// return, or out of gas). + fn execute( + &self, + tracking_copy: TrackingCopy, + execute_request: ExecuteRequest, + ) -> Result { + self.execute_with_tracking_copy(tracking_copy, execute_request) + } +} + +fn get_purse_for_entity( + tracking_copy: &mut TrackingCopy, + entity_key: Key, +) -> casper_types::URef { + let stored_value = tracking_copy + .read(&entity_key) + .expect("should read account") + .expect("should have account"); + match stored_value { + StoredValue::CLValue(addressable_entity_key) => { + let key = addressable_entity_key + .into_t::() + .expect("should be key"); + let stored_value = tracking_copy + .read(&key) + .expect("should read account") + .expect("should have account"); + + let addressable_entity = stored_value + .into_addressable_entity() + .expect("should be addressable entity"); + + addressable_entity.main_purse() + } + StoredValue::Account(account) => account.main_purse(), + StoredValue::SmartContract(smart_contract_package) => { + let contract_hash = smart_contract_package + .versions() + .latest() + .expect("should have last entry"); + let entity_addr = EntityAddr::SmartContract(contract_hash.value()); + let latest_version_key = Key::AddressableEntity(entity_addr); + let new_contract = tracking_copy + .read(&latest_version_key) + .expect("should read latest version"); + let addressable_entity = new_contract + .expect("should have addressable entity") + .into_addressable_entity() + .expect("should be addressable entity"); + addressable_entity.main_purse() + } + other => panic!("should be account or contract received {other:?}"), + } +} diff --git a/executor/wasm/src/system.rs b/executor/wasm/src/system.rs new file mode 100644 index 0000000000..82671b618b --- /dev/null +++ b/executor/wasm/src/system.rs @@ -0,0 +1,293 @@ +//! System contract wire up for the new engine. +//! +//! This module wraps system contract logic into a dispatcher that can be used by the new engine +//! hiding the complexity of the underlying implementation. +use std::{cell::RefCell, rc::Rc, sync::Arc}; + +use casper_executor_wasm_common::error::{CallError, TrapCode}; +use casper_executor_wasm_interface::HostResult; +use casper_storage::{ + global_state::GlobalStateReader, + system::{ + mint::Mint, + runtime_native::{Config, Id, RuntimeNative}, + }, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError}, + AddressGenerator, TrackingCopy, +}; +use casper_types::{ + account::AccountHash, CLValueError, ContextAccessRights, EntityAddr, Key, Phase, + ProtocolVersion, PublicKey, SystemHashRegistry, TransactionHash, URef, U512, +}; +use parking_lot::RwLock; +use thiserror::Error; +use tracing::{debug, error}; + +#[derive(Debug, Error)] +enum DispatchError { + #[error("Tracking copy error: {0}")] + Storage(TrackingCopyError), + #[error("CLValue error: {0}")] + CLValue(CLValueError), + #[error("Registry not found")] + RegistryNotFound, + #[error("Missing system contract: {0}")] + MissingSystemContract(&'static str), + #[error("Runtime footprint")] + RuntimeFootprint(TrackingCopyError), +} + +fn dispatch_system_contract( + tracking_copy: &mut TrackingCopy, + transaction_hash: TransactionHash, + address_generator: Arc>, + system_contract: &'static str, + func: impl FnOnce(RuntimeNative) -> Ret, +) -> Result { + let system_entity_registry = { + let stored_value = tracking_copy + .read(&Key::SystemEntityRegistry) + .map_err(DispatchError::Storage)? + .ok_or(DispatchError::RegistryNotFound)?; + stored_value + .into_cl_value() + .expect("should convert stored value into CLValue") + .into_t::() + .map_err(DispatchError::CLValue)? + }; + let system_entity_addr = system_entity_registry + .get(system_contract) + .ok_or(DispatchError::MissingSystemContract(system_contract))?; + let entity_addr = EntityAddr::new_system(*system_entity_addr); + + // let addressable_entity_stored_value = + + let runtime_footprint = tracking_copy + .runtime_footprint_by_entity_addr(entity_addr) + .map_err(DispatchError::RuntimeFootprint)?; + + let config = Config::default(); + let protocol_version = ProtocolVersion::V1_0_0; + + let access_rights = ContextAccessRights::new(*system_entity_addr, []); + let address = PublicKey::System.to_account_hash(); + + let forked_tracking_copy = Rc::new(RefCell::new(tracking_copy.fork2())); + + let remaining_spending_limit = U512::MAX; // NOTE: Since there's no custom payment, there's no need to track the remaining spending limit. + let phase = Phase::System; // NOTE: Since this is a system contract, the phase is always `System`. + + let ret = { + let runtime = RuntimeNative::new( + config, + protocol_version, + Id::Transaction(transaction_hash), + address_generator, + Rc::clone(&forked_tracking_copy), + address, + Key::AddressableEntity(entity_addr), + runtime_footprint, + access_rights, + remaining_spending_limit, + phase, + ); + + func(runtime) + }; + + // SAFETY: `RuntimeNative` is dropped in the block above, we can extract the tracking copy the + // effects. + let modified_tracking_copy = Rc::try_unwrap(forked_tracking_copy) + .ok() + .expect("No other references"); + + let modified_tracking_copy = modified_tracking_copy.into_inner(); + + tracking_copy.apply_changes( + modified_tracking_copy.effects(), + modified_tracking_copy.cache(), + modified_tracking_copy.messages(), + ); + + Ok(ret) +} + +#[derive(Debug, Clone, Copy)] +pub(crate) struct MintArgs { + pub(crate) initial_balance: U512, +} + +pub(crate) fn mint_mint( + tracking_copy: &mut TrackingCopy, + transaction_hash: TransactionHash, + address_generator: Arc>, + args: MintArgs, +) -> Result { + let mint_result = match dispatch_system_contract( + tracking_copy, + transaction_hash, + address_generator, + "mint", + |mut runtime| runtime.mint(args.initial_balance), + ) { + Ok(mint_result) => mint_result, + Err(error) => { + error!(%error, ?args, "mint failed"); + panic!("Mint failed with error {error:?}; aborting"); + } + }; + + match mint_result { + Ok(uref) => Ok(uref), + Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted), + Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted), + Err(mint_error) => { + error!(%mint_error, ?args, "mint transfer failed"); + Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)) + } + } +} + +#[derive(Debug, Copy, Clone)] +pub(crate) struct MintTransferArgs { + pub(crate) maybe_to: Option, + pub(crate) source: URef, + pub(crate) target: URef, + pub(crate) amount: U512, + pub(crate) id: Option, +} + +pub(crate) fn mint_transfer( + tracking_copy: &mut TrackingCopy, + id: TransactionHash, + address_generator: Arc>, + args: MintTransferArgs, +) -> HostResult { + let transfer_result: Result<(), casper_types::system::mint::Error> = + match dispatch_system_contract( + tracking_copy, + id, + address_generator, + "mint", + |mut runtime| { + runtime.transfer( + args.maybe_to, + args.source, + args.target, + args.amount, + args.id, + ) + }, + ) { + Ok(result) => result, + Err(error) => { + error!(%error, "mint transfer failed"); + return Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)); + } + }; + + debug!(?args, ?transfer_result, "transfer"); + + match transfer_result { + Ok(()) => Ok(()), + Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted), + Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted), + Err(mint_error) => { + error!(%mint_error, ?args, "mint transfer failed"); + Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)) + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use casper_storage::{ + data_access_layer::{GenesisRequest, GenesisResult}, + global_state::{ + self, + state::{CommitProvider, StateProvider}, + }, + system::{ + mint::{storage_provider::StorageProvider, Mint}, + runtime_native::Id, + }, + AddressGenerator, + }; + use casper_types::{ + ChainspecRegistry, Digest, GenesisConfig, Phase, ProtocolVersion, TransactionHash, + TransactionV1Hash, U512, + }; + use parking_lot::RwLock; + + use crate::system::dispatch_system_contract; + + #[test] + fn test_system_dispatcher() { + let (global_state, mut root_hash, _tempdir) = + global_state::state::lmdb::make_temporary_global_state([]); + + let genesis_config = GenesisConfig::default(); + + let genesis_request: GenesisRequest = GenesisRequest::new( + Digest::hash("foo"), + ProtocolVersion::V2_0_0, + genesis_config, + ChainspecRegistry::new_with_genesis(b"", b""), + ); + + match global_state.genesis(genesis_request) { + GenesisResult::Failure(failure) => panic!("Failed to run genesis: {:?}", failure), + GenesisResult::Fatal(fatal) => panic!("Fatal error while running genesis: {}", fatal), + GenesisResult::Success { + post_state_hash, + effects: _, + } => { + root_hash = post_state_hash; + } + } + + let mut tracking_copy = global_state + .tracking_copy(root_hash) + .expect("Obtaining root hash succeed") + .expect("Root hash exists"); + + let transaction_hash_bytes: [u8; 32] = [1; 32]; + let transaction_hash: TransactionHash = + TransactionHash::V1(TransactionV1Hash::from_raw(transaction_hash_bytes)); + let id = Id::Transaction(transaction_hash); + let address_generator = Arc::new(RwLock::new(AddressGenerator::new( + &id.seed(), + Phase::Session, + ))); + + let ret = dispatch_system_contract( + &mut tracking_copy, + transaction_hash, + Arc::clone(&address_generator), + "mint", + |mut runtime| runtime.mint(U512::from(1000u64)), + ); + + let uref = ret.expect("dispatch mint").expect("uref"); + + let ret: Result, _> = dispatch_system_contract( + &mut tracking_copy, + transaction_hash, + Arc::clone(&address_generator), + "mint", + |mut runtime| runtime.total_balance(uref), + ); + + // let ret = ret.expect("dispatch total balance"); + + assert_eq!(ret.unwrap(), Ok(U512::from(1000u64))); + + let post_root_hash = global_state + .commit(root_hash, tracking_copy.effects()) + .expect("Should apply effect"); + + assert_ne!(post_root_hash, root_hash); + } +} diff --git a/executor/wasm/tests/integration.rs b/executor/wasm/tests/integration.rs new file mode 100644 index 0000000000..991d22c5f7 --- /dev/null +++ b/executor/wasm/tests/integration.rs @@ -0,0 +1,1018 @@ +use std::{ + env, + fs::{self, File}, + path::{Path, PathBuf}, + sync::Arc, +}; + +use bytes::Bytes; +use casper_execution_engine::engine_state::ExecutionEngineV1; +use casper_executor_wasm::{ + install::{ + InstallContractError, InstallContractRequest, InstallContractRequestBuilder, + InstallContractResult, + }, + ExecutorConfigBuilder, ExecutorKind, ExecutorV2, +}; +use casper_executor_wasm_common::error::CallError; +use casper_executor_wasm_interface::executor::{ + ExecuteError, ExecuteRequest, ExecuteRequestBuilder, ExecuteWithProviderError, + ExecuteWithProviderResult, ExecutionKind, +}; +use casper_storage::{ + data_access_layer::{ + prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult}, + GenesisRequest, GenesisResult, MessageTopicsRequest, MessageTopicsResult, QueryRequest, + QueryResult, + }, + global_state::{ + self, + state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider}, + transaction_source::lmdb::LmdbEnvironment, + trie_store::lmdb::LmdbTrieStore, + }, + system::runtime_native::Id, + AddressGenerator, KeyPrefix, +}; +use casper_types::{ + account::AccountHash, BlockHash, ChainspecRegistry, Digest, EntityAddr, GenesisAccount, + GenesisConfig, HostFunctionCostsV2, HostFunctionV2, Key, MessageLimits, Motes, Phase, + ProtocolVersion, PublicKey, SecretKey, StorageCosts, StoredValue, SystemConfig, Timestamp, + TransactionHash, TransactionV1Hash, WasmConfig, WasmV2Config, U512, +}; +use fs_extra::dir; +use itertools::Itertools; +use once_cell::sync::Lazy; +use parking_lot::RwLock; +use tempfile::TempDir; + +static DEFAULT_ACCOUNT_SECRET_KEY: Lazy = + Lazy::new(|| SecretKey::ed25519_from_bytes([199; SecretKey::ED25519_LENGTH]).unwrap()); +static DEFAULT_ACCOUNT_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*DEFAULT_ACCOUNT_SECRET_KEY)); +static DEFAULT_ACCOUNT_HASH: Lazy = + Lazy::new(|| DEFAULT_ACCOUNT_PUBLIC_KEY.to_account_hash()); + +const CSPR: u64 = 10u64.pow(9); + +static RUST_WORKSPACE_PATH: Lazy = Lazy::new(|| { + let path = Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .and_then(Path::parent) + .expect("CARGO_MANIFEST_DIR should have parent"); + assert!( + path.exists(), + "Workspace path {} does not exists", + path.display() + ); + path.to_path_buf() +}); + +static RUST_WORKSPACE_WASM_PATH: Lazy = Lazy::new(|| { + let path = RUST_WORKSPACE_PATH + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + assert!( + path.exists() || RUST_TOOL_WASM_PATH.exists(), + "Rust Wasm path {} does not exists", + path.display() + ); + path +}); +// The location of compiled Wasm files if running from within the 'tests' crate generated by the +// cargo_casper tool, i.e. 'wasm/'. +static RUST_TOOL_WASM_PATH: Lazy = Lazy::new(|| { + env::current_dir() + .expect("should get current working dir") + .join("wasm") +}); + +#[track_caller] +fn read_wasm>(filename: P) -> Bytes { + let paths = vec![ + RUST_WORKSPACE_WASM_PATH.clone(), + RUST_TOOL_WASM_PATH.clone(), + ]; + + for path in &paths { + let wasm_path = path.join(&filename); + match fs::read(wasm_path) { + Ok(bytes) => return Bytes::from(bytes), + Err(err) => { + if err.kind() == std::io::ErrorKind::NotFound { + continue; + } else { + panic!( + "Failed to read Wasm file at {}: {}", + filename.as_ref().display(), + err + ); + } + } + } + } + + panic!( + "Failed to find Wasm file at {} in any of the paths: {:?}", + filename.as_ref().display(), + paths + ); +} + +const TRANSACTION_HASH_BYTES: [u8; 32] = [55; 32]; +const TRANSACTION_HASH: TransactionHash = + TransactionHash::V1(TransactionV1Hash::from_raw(TRANSACTION_HASH_BYTES)); +const DEFAULT_GAS_LIMIT: u64 = 1_000_000 * CSPR; +const DEFAULT_CHAIN_NAME: &str = "casper-test"; + +// TODO: This is a temporary value, it should be set in the config. Default value from V1 engine +// does not apply to V2 engine due to different cost structure. Rather than hardcoding it here, we +// should probably reflect gas costs in a dynamic costs in host function charge. Proper value is +// pending calculation. +const DEFAULT_GAS_PER_BYTE_COST: u32 = 1_117_587; + +fn make_address_generator() -> Arc> { + let id = Id::Transaction(TRANSACTION_HASH); + Arc::new(RwLock::new(AddressGenerator::new( + &id.seed(), + Phase::Session, + ))) +} + +fn base_execute_builder() -> ExecuteRequestBuilder { + ExecuteRequestBuilder::default() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_caller_key(Key::Account(*DEFAULT_ACCOUNT_HASH)) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(1000) + .with_transaction_hash(TRANSACTION_HASH) + .with_chain_name(DEFAULT_CHAIN_NAME) + .with_block_time(Timestamp::now().into()) + .with_state_hash(Digest::hash(b"state")) + .with_block_height(1) + .with_parent_block_hash(BlockHash::new(Digest::hash(b"block1"))) +} + +fn base_install_request_builder() -> InstallContractRequestBuilder { + InstallContractRequestBuilder::default() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transaction_hash(TRANSACTION_HASH) + .with_chain_name(DEFAULT_CHAIN_NAME) + .with_block_time(Timestamp::now().into()) + .with_state_hash(Digest::hash(b"state")) + .with_block_height(1) + .with_parent_block_hash(BlockHash::new(Digest::hash(b"block1"))) +} + +#[test] +fn harness() { + let mut executor = make_executor(); + + let (mut global_state, mut state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let address_generator = make_address_generator(); + + let flipper_address; + + state_root_hash = { + let input_data = borsh::to_vec(&("Foo Token".to_string(),)) + .map(Bytes::from) + .unwrap(); + + let install_request = base_install_request_builder() + .with_wasm_bytes(read_wasm("vm2_cep18.wasm")) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_transferred_value(0) + .with_entry_point("new".to_string()) + .with_input(input_data) + .build() + .expect("should build"); + + let create_result = run_create_contract( + &mut executor, + &mut global_state, + state_root_hash, + install_request, + ); + + flipper_address = *create_result.smart_contract_addr(); + + global_state + .commit_effects(state_root_hash, create_result.effects().clone()) + .expect("Should commit") + }; + + let execute_request = ExecuteRequestBuilder::default() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_caller_key(Key::Account(*DEFAULT_ACCOUNT_HASH)) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(1000) + .with_transaction_hash(TRANSACTION_HASH) + .with_target(ExecutionKind::SessionBytes(read_wasm("vm2-harness.wasm"))) + .with_serialized_input((flipper_address,)) + .with_shared_address_generator(address_generator) + .with_chain_name(DEFAULT_CHAIN_NAME) + .with_block_time(Timestamp::now().into()) + .with_state_hash(state_root_hash) + .with_block_height(1) + .with_parent_block_hash(BlockHash::new(Digest::hash(b"bl0ck"))) + .build() + .expect("should build"); + + run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); +} + +pub(crate) fn make_executor() -> ExecutorV2 { + let storage_costs = StorageCosts::new(DEFAULT_GAS_PER_BYTE_COST); + let execution_engine_v1 = ExecutionEngineV1::default(); + let executor_config = ExecutorConfigBuilder::default() + .with_memory_limit(17) + .with_executor_kind(ExecutorKind::Compiled) + .with_wasm_config(WasmV2Config::default()) + .with_storage_costs(storage_costs) + .with_message_limits(MessageLimits::default()) + .build() + .expect("Should build"); + ExecutorV2::new(executor_config, Arc::new(execution_engine_v1)) +} + +#[test] + +fn cep18() { + let mut executor = make_executor(); + + let (mut global_state, mut state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let address_generator = make_address_generator(); + + let input_data = borsh::to_vec(&("Foo Token".to_string(),)) + .map(Bytes::from) + .unwrap(); + + let block_time_1 = Timestamp::now().into(); + + let create_request = base_install_request_builder() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_transaction_hash(TRANSACTION_HASH) + .with_wasm_bytes(read_wasm("vm2_cep18.wasm").clone()) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_transferred_value(0) + .with_entry_point("new".to_string()) + .with_input(input_data) + .with_block_time(block_time_1) + .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash + .with_block_height(1) // TODO: Carry on block height + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash + .build() + .expect("should build"); + + let create_result = run_create_contract( + &mut executor, + &mut global_state, + state_root_hash, + create_request, + ); + + dbg!(create_result.gas_usage().gas_spent()); + + let contract_hash = EntityAddr::SmartContract(*create_result.smart_contract_addr()); + + state_root_hash = global_state + .commit_effects(state_root_hash, create_result.effects().clone()) + .expect("Should commit"); + + let msgs = global_state.prefixed_values(PrefixedValuesRequest::new( + state_root_hash, + KeyPrefix::MessageEntriesByEntity(contract_hash), + )); + let PrefixedValuesResult::Success { + key_prefix: _, + values, + } = msgs + else { + panic!("Expected success") + }; + + { + let mut topics_1 = values + .iter() + .filter_map(|stored_value| stored_value.as_message_topic_summary()) + .collect_vec(); + topics_1 + .sort_by_key(|topic| (topic.topic_name(), topic.blocktime(), topic.message_count())); + + assert_eq!(topics_1[0].topic_name(), "Transfer"); + assert_eq!(topics_1[0].message_count(), 1); + assert_eq!(topics_1[0].blocktime(), block_time_1); + } + + let block_time_2 = (block_time_1.value() + 1).into(); + assert_ne!(block_time_1, block_time_2); + + let execute_request = ExecuteRequestBuilder::default() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_caller_key(Key::Account(*DEFAULT_ACCOUNT_HASH)) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(1000) + .with_transaction_hash(TRANSACTION_HASH) + .with_target(ExecutionKind::SessionBytes(read_wasm( + "vm2_cep18_caller.wasm", + ))) + .with_serialized_input((create_result.smart_contract_addr(),)) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_chain_name(DEFAULT_CHAIN_NAME) + .with_block_time(block_time_2) + .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash + .with_block_height(2) // TODO: Carry on block height + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash + .build() + .expect("should build"); + + let result_2 = run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); + dbg!(result_2.gas_usage().gas_spent()); + + state_root_hash = global_state + .commit_effects(state_root_hash, result_2.effects().clone()) + .expect("Should commit"); + + let MessageTopicsResult::Success { message_topics } = + global_state.message_topics(MessageTopicsRequest::new(state_root_hash, contract_hash)) + else { + panic!("Expected success") + }; + + assert!(matches!(message_topics.get("Transfer"), Some(_))); + assert_ne!( + message_topics.get("Mint"), + message_topics.get("Transfer"), + "Mint and Transfer topics should have different hashes" + ); + + { + let msgs = global_state.prefixed_values(PrefixedValuesRequest::new( + state_root_hash, + KeyPrefix::MessageEntriesByEntity(contract_hash), + )); + let PrefixedValuesResult::Success { + key_prefix: _, + values, + } = msgs + else { + panic!("Expected success") + }; + + let mut topics_2 = values + .iter() + .filter_map(|stored_value| stored_value.as_message_topic_summary()) + .collect_vec(); + topics_2 + .sort_by_key(|topic| (topic.topic_name(), topic.blocktime(), topic.message_count())); + + assert_eq!(topics_2.len(), 1); + assert_eq!(topics_2[0].topic_name(), "Transfer"); + assert_eq!(topics_2[0].message_count(), 2); + assert_eq!(topics_2[0].blocktime(), block_time_2); // NOTE: Session called mint; the topic + // summary blocktime is refreshed + } + + let mut messages = result_2.messages().iter().collect_vec(); + messages.sort_by_key(|message| { + ( + message.topic_name(), + message.topic_index(), + message.block_index(), + ) + }); + assert_eq!(messages.len(), 2); + assert_eq!(messages[0].topic_name(), "Transfer"); + assert_eq!(messages[0].topic_index(), 0); + assert_eq!(messages[0].block_index(), 0); + + assert_eq!(messages[1].topic_name(), "Transfer"); + assert_eq!(messages[1].topic_index(), 1); + assert_eq!(messages[1].block_index(), 1); +} + +fn make_global_state_with_genesis() -> (LmdbGlobalState, Digest, TempDir) { + let default_accounts = vec![GenesisAccount::Account { + public_key: DEFAULT_ACCOUNT_PUBLIC_KEY.clone(), + balance: Motes::new(U512::from(100 * CSPR)), + validator: None, + }]; + + let (global_state, _state_root_hash, _tempdir) = + global_state::state::lmdb::make_temporary_global_state([]); + + let genesis_config = GenesisConfig::new( + default_accounts, + WasmConfig::default(), + SystemConfig::default(), + 10, + 10, + 0, + Default::default(), + 14, + Timestamp::now().millis(), + casper_types::HoldBalanceHandling::Accrued, + 0, + true, + StorageCosts::default(), + ); + let genesis_request: GenesisRequest = GenesisRequest::new( + Digest::hash("foo"), + ProtocolVersion::V2_0_0, + genesis_config, + ChainspecRegistry::new_with_genesis(b"", b""), + ); + match global_state.genesis(genesis_request) { + GenesisResult::Failure(failure) => panic!("Failed to run genesis: {:?}", failure), + GenesisResult::Fatal(fatal) => panic!("Fatal error while running genesis: {}", fatal), + GenesisResult::Success { + post_state_hash, + effects: _, + } => (global_state, post_state_hash, _tempdir), + } +} + +#[test] +fn traits() { + let mut executor = make_executor(); + let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let execute_request = base_execute_builder() + .with_target(ExecutionKind::SessionBytes(read_wasm("vm2_trait.wasm"))) + .with_serialized_input(()) + .with_shared_address_generator(make_address_generator()) + .build() + .expect("should build"); + + run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); +} + +#[test] +fn upgradable() { + let mut executor = make_executor(); + + let (mut global_state, mut state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let address_generator = make_address_generator(); + + let upgradable_address; + + state_root_hash = { + let input_data = borsh::to_vec(&(0u8,)).map(Bytes::from).unwrap(); + + let create_request = base_install_request_builder() + .with_wasm_bytes(read_wasm("vm2_upgradable.wasm")) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_entry_point("new".to_string()) + .with_input(input_data) + .build() + .expect("should build"); + + let create_result = run_create_contract( + &mut executor, + &mut global_state, + state_root_hash, + create_request, + ); + + upgradable_address = *create_result.smart_contract_addr(); + + global_state + .commit_effects(state_root_hash, create_result.effects().clone()) + .expect("Should commit") + }; + + let version_before_upgrade = { + let execute_request = base_execute_builder() + .with_target(ExecutionKind::Stored { + address: upgradable_address, + entry_point: "version".to_string(), + }) + .with_input(Bytes::new()) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .build() + .expect("should build"); + let res = run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); + let output = res.output().expect("should have output"); + let version: String = borsh::from_slice(output).expect("should deserialize"); + version + }; + assert_eq!(version_before_upgrade, "v1"); + + { + // Increment the value + let execute_request = base_execute_builder() + .with_target(ExecutionKind::Stored { + address: upgradable_address, + entry_point: "increment".to_string(), + }) + .with_input(Bytes::new()) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .build() + .expect("should build"); + let res = run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); + state_root_hash = global_state + .commit_effects(state_root_hash, res.effects().clone()) + .expect("Should commit"); + }; + + let binding = read_wasm("vm2_upgradable_v2.wasm"); + let new_code = binding.as_ref(); + + let execute_request = base_execute_builder() + .with_transferred_value(0) + .with_target(ExecutionKind::Stored { + address: upgradable_address, + entry_point: "perform_upgrade".to_string(), + }) + .with_gas_limit(DEFAULT_GAS_LIMIT * 10) + .with_serialized_input((new_code,)) + .with_shared_address_generator(Arc::clone(&address_generator)) + .build() + .expect("should build"); + let res = run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); + state_root_hash = global_state + .commit_effects(state_root_hash, res.effects().clone()) + .expect("Should commit"); + + let version_after_upgrade = { + let execute_request = base_execute_builder() + .with_target(ExecutionKind::Stored { + address: upgradable_address, + entry_point: "version".to_string(), + }) + .with_input(Bytes::new()) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .build() + .expect("should build"); + let res = run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); + let output = res.output().expect("should have output"); + let version: String = borsh::from_slice(output).expect("should deserialize"); + version + }; + assert_eq!(version_after_upgrade, "v2"); + + { + // Increment the value + let execute_request = base_execute_builder() + .with_target(ExecutionKind::Stored { + address: upgradable_address, + entry_point: "increment_by".to_string(), + }) + .with_serialized_input((10u64,)) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .build() + .expect("should build"); + let res = run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + execute_request, + ); + state_root_hash = global_state + .commit_effects(state_root_hash, res.effects().clone()) + .expect("Should commit"); + }; + + let _ = state_root_hash; +} + +fn run_create_contract( + executor: &mut ExecutorV2, + global_state: &LmdbGlobalState, + pre_state_hash: Digest, + install_contract_request: InstallContractRequest, +) -> InstallContractResult { + executor + .install_contract(pre_state_hash, global_state, install_contract_request) + .expect("Succeed") +} + +fn run_wasm_session( + executor: &mut ExecutorV2, + global_state: &LmdbGlobalState, + pre_state_hash: Digest, + execute_request: ExecuteRequest, +) -> ExecuteWithProviderResult { + let result = executor + .execute_with_provider(pre_state_hash, global_state, execute_request) + .expect("Succeed"); + + if let Some(host_error) = result.host_error { + panic!("Host error: {host_error:?}") + } + + result +} + +#[test] +fn backwards_compatibility() { + let (mut global_state, post_state_hash, _temp) = { + let fixture_name = "counter_contract"; + // /Users/michal/Dev/casper-node/execution_engine_testing/tests/fixtures/counter_contract/ + // global_state/data.lmdb + let lmdb_fixtures_base_dir = Path::new(env!("CARGO_MANIFEST_DIR")) + .join("../") + .join("../") + .join("execution_engine_testing") + .join("tests") + .join("fixtures"); + assert!(lmdb_fixtures_base_dir.exists()); + + let source = lmdb_fixtures_base_dir.join("counter_contract"); + let to = tempfile::tempdir().expect("should create temp dir"); + fs_extra::copy_items(&[source], &to, &dir::CopyOptions::default()) + .expect("should copy global state fixture"); + + let path_to_state = to.path().join(fixture_name).join("state.json"); + + let lmdb_fixture_state: serde_json::Value = + serde_json::from_reader(File::open(path_to_state).unwrap()).unwrap(); + let post_state_hash = + Digest::from_hex(lmdb_fixture_state["post_state_hash"].as_str().unwrap()).unwrap(); + + let path_to_gs = to.path().join(fixture_name).join("global_state"); + + const DEFAULT_LMDB_PAGES: usize = 256_000_000; + const DEFAULT_MAX_READERS: u32 = 512; + + let environment = LmdbEnvironment::new( + &path_to_gs, + 16384 * DEFAULT_LMDB_PAGES, + DEFAULT_MAX_READERS, + true, + ) + .expect("should create LmdbEnvironment"); + + let trie_store = + LmdbTrieStore::open(&environment, None).expect("should open LmdbTrieStore"); + ( + LmdbGlobalState::new( + Arc::new(environment), + Arc::new(trie_store), + post_state_hash, + 100, + false, + ), + post_state_hash, + to, + ) + }; + + let result = global_state.query(QueryRequest::new( + post_state_hash, + Key::Account(*DEFAULT_ACCOUNT_HASH), + Vec::new(), + )); + let value = match result { + QueryResult::RootNotFound => todo!(), + QueryResult::ValueNotFound(value) => panic!("Value not found: {:?}", value), + QueryResult::Success { value, .. } => value, + QueryResult::Failure(failure) => panic!("Failed to query: {:?}", failure), + }; + + // + // Calling legacy contract directly by it's address + // + + let mut state_root_hash = post_state_hash; + + let value = match *value { + StoredValue::Account(account) => account, + _ => panic!("Expected CLValue"), + }; + + let counter_hash = match value.named_keys().get("counter") { + Some(Key::Hash(hash_address)) => hash_address, + _ => panic!("Expected counter URef"), + }; + + let mut executor = make_executor(); + let address_generator = make_address_generator(); + + // Calling v1 vm directly by hash is not currently supported (i.e. disabling vm1 runtime, and + // allowing vm1 direct calls may circumvent chainspec setting) let execute_request = + // base_execute_builder() .with_target(ExecutionKind::Stored { + // address: *counter_hash, + // entry_point: "counter_get".to_string(), + // }) + // .with_input(runtime_args.into()) + // .with_gas_limit(DEFAULT_GAS_LIMIT) + // .with_transferred_value(0) + // .with_shared_address_generator(Arc::clone(&address_generator)) + // .with_state_hash(state_root_hash) + // .with_block_height(1) + // .with_parent_block_hash(BlockHash::new(Digest::hash(b"block1"))) + // .build() + // .expect("should build"); + // let res = run_wasm_session( + // &mut executor, + // &mut global_state, + // state_root_hash, + // execute_request, + // ); + // state_root_hash = global_state + // .commit_effects(state_root_hash, res.effects().clone()) + // .expect("Should commit"); + + // + // Instantiate v2 runtime proxy contract + // + let input_data = counter_hash.to_vec(); + let install_request: InstallContractRequest = base_install_request_builder() + .with_wasm_bytes(read_wasm("vm2_legacy_counter_proxy.wasm")) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_transferred_value(0) + .with_entry_point("new".to_string()) + .with_input(input_data.into()) + .with_state_hash(state_root_hash) + .with_block_height(2) + .with_parent_block_hash(BlockHash::new(Digest::hash(b"block2"))) + .build() + .expect("should build"); + + let create_result = run_create_contract( + &mut executor, + &mut global_state, + state_root_hash, + install_request, + ); + + state_root_hash = create_result.post_state_hash(); + + let proxy_address = *create_result.smart_contract_addr(); + + // Call v2 contract + + let call_request = base_execute_builder() + .with_target(ExecutionKind::Stored { + address: proxy_address, + entry_point: "perform_test".to_string(), + }) + .with_input(Bytes::new()) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_state_hash(state_root_hash) + .with_block_height(3) + .with_parent_block_hash(BlockHash::new(Digest::hash(b"block3"))) + .build() + .expect("should build"); + + run_wasm_session( + &mut executor, + &mut global_state, + state_root_hash, + call_request, + ); +} + +// host function tests + +fn call_dummy_host_fn_by_name( + host_function_name: &str, + gas_limit: u64, +) -> Result { + let executor = { + let execution_engine_v1 = ExecutionEngineV1::default(); + let default_wasm_config = WasmV2Config::default(); + let wasm_config = WasmV2Config::new( + default_wasm_config.max_memory(), + default_wasm_config.opcode_costs(), + HostFunctionCostsV2 { + read: HostFunctionV2::fixed(1), + write: HostFunctionV2::fixed(1), + remove: HostFunctionV2::fixed(1), + copy_input: HostFunctionV2::fixed(1), + ret: HostFunctionV2::fixed(1), + create: HostFunctionV2::fixed(1), + transfer: HostFunctionV2::fixed(1), + env_balance: HostFunctionV2::fixed(1), + upgrade: HostFunctionV2::fixed(1), + call: HostFunctionV2::fixed(1), + print: HostFunctionV2::fixed(1), + emit: HostFunctionV2::fixed(1), + env_info: HostFunctionV2::fixed(1), + }, + ); + let executor_config = ExecutorConfigBuilder::default() + .with_memory_limit(17) + .with_executor_kind(ExecutorKind::Compiled) + .with_wasm_config(wasm_config) + .with_storage_costs(StorageCosts::default()) + .with_message_limits(MessageLimits::default()) + .build() + .expect("Should build"); + ExecutorV2::new(executor_config, Arc::new(execution_engine_v1)) + }; + + let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let address_generator = make_address_generator(); + + let input_data = borsh::to_vec(&(host_function_name.to_owned(),)) + .map(Bytes::from) + .unwrap(); + + let create_request = InstallContractRequestBuilder::default() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_gas_limit(gas_limit) + .with_transaction_hash(TRANSACTION_HASH) + .with_wasm_bytes(read_wasm("vm2_host.wasm")) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_transferred_value(0) + .with_entry_point("new".to_string()) + .with_input(input_data) + .with_chain_name(DEFAULT_CHAIN_NAME) + .with_block_time(Timestamp::now().into()) + .with_state_hash(Digest::from_raw([0; 32])) + .with_block_height(1) + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) + .build() + .expect("should build"); + + executor.install_contract(state_root_hash, &mut global_state, create_request) +} + +fn assert_consumes_gas(host_function_name: &str) { + let result = call_dummy_host_fn_by_name(host_function_name, 1); + assert!(result.is_err_and(|e| match e { + InstallContractError::Constructor { + host_error: CallError::CalleeGasDepleted, + } => true, + _ => false, + })); +} + +#[test] +fn host_functions_consume_gas() { + assert_consumes_gas("get_caller"); + assert_consumes_gas("get_block_time"); + assert_consumes_gas("get_transferred_value"); + assert_consumes_gas("get_balance_of"); + assert_consumes_gas("call"); + assert_consumes_gas("input"); + assert_consumes_gas("create"); + assert_consumes_gas("print"); + assert_consumes_gas("read"); + assert_consumes_gas("ret"); + assert_consumes_gas("transfer"); + assert_consumes_gas("upgrade"); + assert_consumes_gas("write"); +} + +#[allow(dead_code)] +fn write_n_bytes_at_limit( + bytes_len: u64, + gas_limit: u64, +) -> Result { + let executor = { + let execution_engine_v1 = ExecutionEngineV1::default(); + let default_wasm_config = WasmV2Config::default(); + let wasm_config = WasmV2Config::new( + default_wasm_config.max_memory(), + default_wasm_config.opcode_costs(), + HostFunctionCostsV2 { + read: HostFunctionV2::fixed(0), + write: HostFunctionV2::fixed(0), + remove: HostFunctionV2::fixed(0), + copy_input: HostFunctionV2::fixed(0), + ret: HostFunctionV2::fixed(0), + create: HostFunctionV2::fixed(0), + transfer: HostFunctionV2::fixed(0), + env_balance: HostFunctionV2::fixed(0), + upgrade: HostFunctionV2::fixed(0), + call: HostFunctionV2::fixed(0), + print: HostFunctionV2::fixed(0), + emit: HostFunctionV2::fixed(0), + env_info: HostFunctionV2::fixed(0), + }, + ); + let executor_config = ExecutorConfigBuilder::default() + .with_memory_limit(17) + .with_executor_kind(ExecutorKind::Compiled) + .with_wasm_config(wasm_config) + .with_storage_costs(StorageCosts::new(1)) + .with_message_limits(MessageLimits::default()) + .build() + .expect("Should build"); + ExecutorV2::new(executor_config, Arc::new(execution_engine_v1)) + }; + + let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let address_generator = make_address_generator(); + + let input_data = borsh::to_vec(&(bytes_len,)).map(Bytes::from).unwrap(); + + let create_request = InstallContractRequestBuilder::default() + .with_initiator(*DEFAULT_ACCOUNT_HASH) + .with_gas_limit(gas_limit) + .with_transaction_hash(TRANSACTION_HASH) + .with_wasm_bytes(read_wasm("vm2_host.wasm")) + .with_shared_address_generator(Arc::clone(&address_generator)) + .with_transferred_value(0) + .with_entry_point("new_with_write".to_string()) + .with_input(input_data) + .with_chain_name(DEFAULT_CHAIN_NAME) + .with_block_time(Timestamp::now().into()) + .with_state_hash(Digest::from_raw([0; 32])) + .with_block_height(1) + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) + .build() + .expect("should build"); + + executor.install_contract(state_root_hash, &mut global_state, create_request) +} + +// #[test] +// fn consume_gas_on_write() { +// let successful_write = write_n_bytes_at_limit(50, 10_000); +// assert!(successful_write.is_ok()); + +// let out_of_gas_write_exceeded_gas_limit = write_n_bytes_at_limit(50, 10); +// assert!(out_of_gas_write_exceeded_gas_limit.is_err_and(|e| match e { +// InstallContractError::Constructor { +// host_error: HostError::CalleeGasDepleted, +// } => true, +// _ => false, +// })); +// } + +#[test] +fn non_existing_smart_contract_does_not_panic() { + let address_generator = make_address_generator(); + let executor = make_executor(); + let (mut global_state, state_root_hash, _tempdir) = make_global_state_with_genesis(); + + let non_existing_address = [255; 32]; + let execute_request = base_execute_builder() + .with_target(ExecutionKind::Stored { + address: non_existing_address, + entry_point: "non_existing".to_string(), + }) + .with_input(Bytes::new()) + .with_gas_limit(DEFAULT_GAS_LIMIT) + .with_transferred_value(0) + .with_shared_address_generator(Arc::clone(&address_generator)) + .build() + .expect("should build"); + + let result = executor + .execute_with_provider(state_root_hash, &mut global_state, execute_request) + .expect_err("Failure"); + + assert!(matches!( + result, + ExecuteWithProviderError::Execute(execute_error) if matches!(execute_error, ExecuteError::CodeNotFound(address) if address == non_existing_address))); +} diff --git a/executor/wasm_common/Cargo.toml b/executor/wasm_common/Cargo.toml new file mode 100644 index 0000000000..65bc54c0b9 --- /dev/null +++ b/executor/wasm_common/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "casper-executor-wasm-common" +version = "0.1.3" +edition = "2021" +authors = ["Michał Papierski "] +description = "Casper executor common package" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/executor/wasm_common" +license = "Apache-2.0" + +[dependencies] +bitflags = "2.9" +blake2 = "0.10" +borsh = { version = "1.5", features = ["derive"] } +casper-contract-sdk-sys = { version = "0.1.3", path = "../../smart_contracts/sdk_sys" } +num-derive = { workspace = true } +num-traits = { workspace = true } +thiserror = "2" +safe-transmute = "0.11" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +serde = { version = "1", features = ["derive"] } + +[dev-dependencies] +hex = "0.4" diff --git a/executor/wasm_common/src/chain_utils.rs b/executor/wasm_common/src/chain_utils.rs new file mode 100644 index 0000000000..ba31e22242 --- /dev/null +++ b/executor/wasm_common/src/chain_utils.rs @@ -0,0 +1,48 @@ +use blake2::{digest::consts::U32, Blake2b, Digest}; + +/// Compute a predictable address for a contract. +/// +/// The address is computed as the hash of the chain name, initiator account, and the hash of the +/// Wasm code. +pub fn compute_predictable_address>( + chain_name: T, + initiator_address: [u8; 32], + bytecode_hash: [u8; 32], + seed: Option<[u8; 32]>, +) -> [u8; 32] { + let mut hasher = Blake2b::::new(); + + hasher.update(chain_name); + hasher.update(initiator_address); + hasher.update(bytecode_hash); + + if let Some(seed) = seed { + hasher.update(seed); + } + + hasher.finalize().into() +} + +pub fn compute_wasm_bytecode_hash>(wasm_bytes: T) -> [u8; 32] { + let mut hasher = Blake2b::::new(); + hasher.update(wasm_bytes); + let hash = hasher.finalize(); + hash.into() +} + +#[cfg(test)] +mod tests { + const SEED: [u8; 32] = [1u8; 32]; + + #[test] + fn test_compute_predictable_address() { + let initiator = [1u8; 32]; + let bytecode_hash = [2u8; 32]; + + let predictable_address_1 = + super::compute_predictable_address("testnet", initiator, bytecode_hash, Some(SEED)); + let predictable_address_2 = + super::compute_predictable_address("mainnet", initiator, bytecode_hash, Some(SEED)); + assert_ne!(predictable_address_1, predictable_address_2); + } +} diff --git a/executor/wasm_common/src/entry_point.rs b/executor/wasm_common/src/entry_point.rs new file mode 100644 index 0000000000..b7a32a1e63 --- /dev/null +++ b/executor/wasm_common/src/entry_point.rs @@ -0,0 +1,8 @@ +/// The caller must cover cost. +/// +/// This is the default mode in VM2 runtime. +pub const ENTRY_POINT_PAYMENT_CALLER: u8 = 0; +/// Will cover cost to execute self but not cost of any subsequent invoked contracts +pub const ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY: u8 = 1; +/// will cover cost to execute self and the cost of any subsequent invoked contracts +pub const ENTRY_POINT_PAYMENT_SELF_ONWARD: u8 = 2; diff --git a/executor/wasm_common/src/env_info.rs b/executor/wasm_common/src/env_info.rs new file mode 100644 index 0000000000..3ca0d4ce18 --- /dev/null +++ b/executor/wasm_common/src/env_info.rs @@ -0,0 +1,14 @@ +use safe_transmute::TriviallyTransmutable; + +#[derive(Clone, Copy)] +#[repr(C)] +pub struct EnvInfo { + pub block_time: u64, + pub transferred_value: u64, + pub caller_addr: [u8; 32], + pub caller_kind: u32, + pub callee_addr: [u8; 32], + pub callee_kind: u32, +} + +unsafe impl TriviallyTransmutable for EnvInfo {} diff --git a/executor/wasm_common/src/error.rs b/executor/wasm_common/src/error.rs new file mode 100644 index 0000000000..5bdf60a0fb --- /dev/null +++ b/executor/wasm_common/src/error.rs @@ -0,0 +1,172 @@ +//! Error code for signaling error while processing a host function. +//! +//! API inspired by `std::io::Error` and `std::io::ErrorKind` but somewhat more memory efficient. + +use thiserror::Error; + +#[derive(Debug, Default, PartialEq)] +#[non_exhaustive] +#[repr(u32)] +pub enum CommonResult { + #[default] + Success = 0, + /// An entity was not found, often a missing key in the global state. + NotFound = 1, + /// Data not valid for the operation were encountered. + /// + /// As an example this could be a malformed parameter that does not contain a valid UTF-8. + InvalidData = 2, + /// The input to the host function was invalid. + InvalidInput = 3, + /// The topic is too long. + TopicTooLong = 4, + /// Too many topics. + TooManyTopics = 5, + /// The payload is too long. + PayloadTooLong = 6, + /// The message topic is full and cannot accept new messages. + MessageTopicFull = 7, + /// The maximum number of messages emitted per block was exceeded when trying to emit a + /// message. + MaxMessagesPerBlockExceeded = 8, + /// Internal error (for example, failed to acquire a lock) + Internal = 9, + /// An error code not covered by the other variants. + Other(u32), +} + +pub const HOST_ERROR_SUCCESS: u32 = 0; +pub const HOST_ERROR_NOT_FOUND: u32 = 1; +pub const HOST_ERROR_INVALID_DATA: u32 = 2; +pub const HOST_ERROR_INVALID_INPUT: u32 = 3; +pub const HOST_ERROR_TOPIC_TOO_LONG: u32 = 4; +pub const HOST_ERROR_TOO_MANY_TOPICS: u32 = 5; +pub const HOST_ERROR_PAYLOAD_TOO_LONG: u32 = 6; +pub const HOST_ERROR_MESSAGE_TOPIC_FULL: u32 = 7; +pub const HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED: u32 = 8; +pub const HOST_ERROR_INTERNAL: u32 = 9; + +impl From for CommonResult { + fn from(value: u32) -> Self { + match value { + HOST_ERROR_SUCCESS => Self::Success, + HOST_ERROR_NOT_FOUND => Self::NotFound, + HOST_ERROR_INVALID_DATA => Self::InvalidData, + HOST_ERROR_INVALID_INPUT => Self::InvalidInput, + HOST_ERROR_TOPIC_TOO_LONG => Self::TopicTooLong, + HOST_ERROR_TOO_MANY_TOPICS => Self::TooManyTopics, + HOST_ERROR_PAYLOAD_TOO_LONG => Self::PayloadTooLong, + HOST_ERROR_MESSAGE_TOPIC_FULL => Self::MessageTopicFull, + HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED => Self::MaxMessagesPerBlockExceeded, + HOST_ERROR_INTERNAL => Self::Internal, + other => Self::Other(other), + } + } +} + +pub fn result_from_code(code: u32) -> Result<(), CommonResult> { + match code { + HOST_ERROR_SUCCESS => Ok(()), + other => Err(CommonResult::from(other)), + } +} + +/// Wasm trap code. +#[derive(Debug, Error)] +pub enum TrapCode { + /// Trap code for out of bounds memory access. + #[error("call stack exhausted")] + StackOverflow, + /// Trap code for out of bounds memory access. + #[error("out of bounds memory access")] + MemoryOutOfBounds, + /// Trap code for out of bounds table access. + #[error("undefined element: out of bounds table access")] + TableAccessOutOfBounds, + /// Trap code for indirect call to null. + #[error("uninitialized element")] + IndirectCallToNull, + /// Trap code for indirect call type mismatch. + #[error("indirect call type mismatch")] + BadSignature, + /// Trap code for integer overflow. + #[error("integer overflow")] + IntegerOverflow, + /// Trap code for division by zero. + #[error("integer divide by zero")] + IntegerDivisionByZero, + /// Trap code for invalid conversion to integer. + #[error("invalid conversion to integer")] + BadConversionToInteger, + /// Trap code for unreachable code reached triggered by unreachable instruction. + #[error("unreachable")] + UnreachableCodeReached, +} + +pub const CALLEE_SUCCEEDED: u32 = 0; +pub const CALLEE_REVERTED: u32 = 1; +pub const CALLEE_TRAPPED: u32 = 2; +pub const CALLEE_GAS_DEPLETED: u32 = 3; +pub const CALLEE_NOT_CALLABLE: u32 = 4; +pub const CALLEE_HOST_ERROR: u32 = 5; + +/// Represents the result of a host function call. +/// +/// 0 is used as a success. +#[derive(Debug, Error)] +pub enum CallError { + /// Callee contract reverted. + #[error("callee reverted")] + CalleeReverted, + /// Called contract trapped. + #[error("callee trapped: {0}")] + CalleeTrapped(TrapCode), + /// Called contract reached gas limit. + #[error("callee gas depleted")] + CalleeGasDepleted, + /// Called contract is not callable. + #[error("not callable")] + NotCallable, +} + +impl CallError { + /// Converts the host error into a u32. + #[must_use] + pub fn into_u32(self) -> u32 { + match self { + Self::CalleeReverted => CALLEE_REVERTED, + Self::CalleeTrapped(_) => CALLEE_TRAPPED, + Self::CalleeGasDepleted => CALLEE_GAS_DEPLETED, + Self::NotCallable => CALLEE_NOT_CALLABLE, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from_u32_not_found() { + let error = CommonResult::from(HOST_ERROR_NOT_FOUND); + assert_eq!(error, CommonResult::NotFound); + } + + #[test] + fn test_from_u32_invalid_data() { + let error = CommonResult::from(HOST_ERROR_INVALID_DATA); + assert_eq!(error, CommonResult::InvalidData); + } + + #[test] + fn test_from_u32_invalid_input() { + let error = CommonResult::from(HOST_ERROR_INVALID_INPUT); + assert_eq!(error, CommonResult::InvalidInput); + } + + #[test] + fn test_from_u32_other() { + let error = CommonResult::from(10); + assert_eq!(error, CommonResult::Other(10)); + } +} diff --git a/executor/wasm_common/src/flags.rs b/executor/wasm_common/src/flags.rs new file mode 100644 index 0000000000..5078a06ee1 --- /dev/null +++ b/executor/wasm_common/src/flags.rs @@ -0,0 +1,59 @@ +//! Types that can be safely shared between host and the wasm sdk. +use bitflags::bitflags; + +bitflags! { + /// Flags that can be passed as part of returning values. + #[repr(transparent)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct ReturnFlags: u32 { + /// If this bit is set, the host should return the value to the caller and all the execution effects are reverted. + const REVERT = 0x0000_0001; + + // The source may set any bits. + const _ = !0; + } + + #[repr(transparent)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct EntryPointFlags: u32 { + const CONSTRUCTOR = 0x0000_0001; + const FALLBACK = 0x0000_0002; + } + + /// Flags that can be passed as part of calling contracts. + #[repr(transparent)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] + pub struct CallFlags: u32 { + // TODO: This is a placeholder + } +} + +impl Default for EntryPointFlags { + fn default() -> Self { + Self::empty() + } +} + +impl Default for CallFlags { + fn default() -> Self { + Self::empty() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_return_flags() { + assert_eq!(ReturnFlags::empty().bits(), 0x0000_0000); + assert_eq!(ReturnFlags::REVERT.bits(), 0x0000_0001); + } + + #[test] + fn creating_from_invalid_bit_flags_does_not_fail() { + let _return_flags = ReturnFlags::from_bits(u32::MAX).unwrap(); + let _revert = ReturnFlags::from_bits(0x0000_0001).unwrap(); + let _empty = ReturnFlags::from_bits(0x0000_0000).unwrap(); + } +} diff --git a/executor/wasm_common/src/keyspace.rs b/executor/wasm_common/src/keyspace.rs new file mode 100644 index 0000000000..c7282d93a5 --- /dev/null +++ b/executor/wasm_common/src/keyspace.rs @@ -0,0 +1,103 @@ +use num_derive::{FromPrimitive, ToPrimitive}; + +#[repr(u64)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)] +pub enum KeyspaceTag { + /// Used for a state based storage which usually involves single dimensional data i.e. + /// key-value pairs, etc. + /// + /// See also [`Keyspace::State`]. + State = 0, + /// Used for a context based storage which usually involves multi dimensional data i.e. maps, + /// efficient vectors, etc. + Context = 1, + /// Used for a named key based storage which usually involves named keys. + NamedKey = 2, + /// Used for a payment info based storage which usually involves payment information. + PaymentInfo = 3, +} + +#[repr(u64)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Keyspace<'a> { + /// Stores contract's context. + /// + /// There's no additional payload for this variant as the host implies the contract's address. + State, + /// Stores contract's context data. Bytes can be any value as long as it uniquely identifies a + /// value. + Context(&'a [u8]), + /// Stores contract's named keys. + NamedKey(&'a str), + /// Entry point payment info. + PaymentInfo(&'a str), +} + +impl Keyspace<'_> { + #[must_use] + pub fn as_tag(&self) -> KeyspaceTag { + match self { + Keyspace::State => KeyspaceTag::State, + Keyspace::Context(_) => KeyspaceTag::Context, + Keyspace::NamedKey(_) => KeyspaceTag::NamedKey, + Keyspace::PaymentInfo(_) => KeyspaceTag::PaymentInfo, + } + } + + #[must_use] + pub fn as_u64(&self) -> u64 { + self.as_tag() as u64 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_as_tag_state() { + let keyspace = Keyspace::State; + assert_eq!(keyspace.as_tag(), KeyspaceTag::State); + } + + #[test] + fn test_as_tag_context() { + let data = [1, 2, 3]; + let keyspace = Keyspace::Context(&data); + assert_eq!(keyspace.as_tag(), KeyspaceTag::Context); + } + + #[test] + fn test_as_tag_named_key() { + let name = "my_key"; + let keyspace = Keyspace::NamedKey(name); + assert_eq!(keyspace.as_tag(), KeyspaceTag::NamedKey); + } + + #[test] + fn test_as_u64_state() { + let keyspace = Keyspace::State; + assert_eq!(keyspace.as_u64(), 0); + } + + #[test] + fn test_as_u64_context() { + let data = [1, 2, 3]; + let keyspace = Keyspace::Context(&data); + assert_eq!(keyspace.as_u64(), 1); + } + + #[test] + fn test_as_u64_named_key() { + let name = "my_key"; + let keyspace = Keyspace::NamedKey(name); + assert_eq!(keyspace.as_u64(), 2); + } + + #[test] + fn test_as_u64_payment_info() { + let name = "entry_point"; + let keyspace: Keyspace = Keyspace::PaymentInfo(name); + assert_eq!(keyspace.as_u64(), 3); + } +} diff --git a/executor/wasm_common/src/lib.rs b/executor/wasm_common/src/lib.rs new file mode 100644 index 0000000000..5dd1ecd45a --- /dev/null +++ b/executor/wasm_common/src/lib.rs @@ -0,0 +1,7 @@ +//! A crate that shares common types and utilities between the Wasm executor and the Wasm interface. +pub mod chain_utils; +pub mod entry_point; +pub mod env_info; +pub mod error; +pub mod flags; +pub mod keyspace; diff --git a/executor/wasm_host/Cargo.toml b/executor/wasm_host/Cargo.toml new file mode 100644 index 0000000000..5c7eaea088 --- /dev/null +++ b/executor/wasm_host/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "casper-executor-wasm-host" +version = "0.1.3" +edition = "2021" +authors = ["Michał Papierski "] +description = "Casper executor host package" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/executor/wasm_host" +license = "Apache-2.0" + +[dependencies] +base16 = "0.2" +bytes = "1.10" +casper-executor-wasm-common = { version = "0.1.3", path = "../wasm_common" } +casper-executor-wasm-interface = { version = "0.1.3", path = "../wasm_interface" } +casper-storage = { version = "2.1.1", path = "../../storage" } +casper-types = { version = "6.0.1", path = "../../types" } +either = "1.15" +num-derive = { workspace = true } +num-traits = { workspace = true } +parking_lot = "0.12" +safe-transmute = "0.11" +thiserror = "2" +tracing = "0.1" diff --git a/executor/wasm_host/src/abi.rs b/executor/wasm_host/src/abi.rs new file mode 100644 index 0000000000..278b1d8232 --- /dev/null +++ b/executor/wasm_host/src/abi.rs @@ -0,0 +1,21 @@ +use safe_transmute::TriviallyTransmutable; + +#[repr(C)] +#[derive(Copy, Clone, Debug, PartialEq)] +pub(crate) struct ReadInfo { + /// Allocated pointer. + pub(crate) data: u32, + /// Size in bytes. + pub(crate) data_size: u32, +} + +unsafe impl TriviallyTransmutable for ReadInfo {} + +#[repr(C)] +#[derive(Copy, Clone, Debug, PartialEq)] + +pub(crate) struct CreateResult { + pub(crate) package_address: [u8; 32], +} + +unsafe impl TriviallyTransmutable for CreateResult {} diff --git a/executor/wasm_host/src/context.rs b/executor/wasm_host/src/context.rs new file mode 100644 index 0000000000..0a15f4b0ea --- /dev/null +++ b/executor/wasm_host/src/context.rs @@ -0,0 +1,36 @@ +use std::sync::Arc; + +use bytes::Bytes; +use casper_executor_wasm_interface::executor::Executor; +use casper_storage::{global_state::GlobalStateReader, AddressGenerator, TrackingCopy}; +use casper_types::{ + account::AccountHash, BlockTime, Key, MessageLimits, StorageCosts, TransactionHash, + WasmV2Config, +}; +use parking_lot::RwLock; + +/// Container that holds all relevant modules necessary to process an execution request. +pub struct Context { + /// The address of the account that initiated the contract or session code. + pub initiator: AccountHash, + /// The address of the addressable entity that is currently executing the contract or session + /// code. + pub caller: Key, + /// The address of the addressable entity that is being called. + pub callee: Key, + /// The state of the global state at the time of the call based on the currently executing + /// contract or session address. + // pub state_address: Address, + /// The amount of tokens that were send to the contract's purse at the time of the call. + pub transferred_value: u64, + pub config: WasmV2Config, + pub storage_costs: StorageCosts, + pub message_limits: MessageLimits, + pub tracking_copy: TrackingCopy, + pub executor: E, // TODO: This could be part of the caller + pub transaction_hash: TransactionHash, + pub address_generator: Arc>, + pub chain_name: Arc, + pub input: Bytes, + pub block_time: BlockTime, +} diff --git a/executor/wasm_host/src/host.rs b/executor/wasm_host/src/host.rs new file mode 100644 index 0000000000..8ae0cfe4ad --- /dev/null +++ b/executor/wasm_host/src/host.rs @@ -0,0 +1,1716 @@ +use std::{borrow::Cow, num::NonZeroU32, sync::Arc}; + +use bytes::Bytes; +use casper_executor_wasm_common::{ + chain_utils, + entry_point::{ + ENTRY_POINT_PAYMENT_CALLER, ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY, + ENTRY_POINT_PAYMENT_SELF_ONWARD, + }, + env_info::EnvInfo, + error::{ + CallError, CALLEE_NOT_CALLABLE, CALLEE_SUCCEEDED, CALLEE_TRAPPED, HOST_ERROR_INVALID_DATA, + HOST_ERROR_INVALID_INPUT, HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED, + HOST_ERROR_MESSAGE_TOPIC_FULL, HOST_ERROR_NOT_FOUND, HOST_ERROR_PAYLOAD_TOO_LONG, + HOST_ERROR_SUCCESS, HOST_ERROR_TOO_MANY_TOPICS, HOST_ERROR_TOPIC_TOO_LONG, + }, + flags::ReturnFlags, + keyspace::{Keyspace, KeyspaceTag}, +}; +use casper_executor_wasm_interface::{ + executor::{ExecuteRequestBuilder, ExecuteResult, ExecutionKind, Executor}, + u32_from_host_result, Caller, InternalHostError, VMError, VMResult, +}; +use casper_storage::{ + global_state::GlobalStateReader, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt}, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::{ActionThresholds, AssociatedKeys, MessageTopicError, NamedKeyAddr}, + bytesrepr::ToBytes, + contract_messages::{Message, MessageAddr, MessagePayload, MessageTopicSummary}, + AddressableEntity, BlockGlobalAddr, BlockHash, BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, + ByteCodeKind, CLType, CLValue, ContractRuntimeTag, Digest, EntityAddr, EntityEntryPoint, + EntityKind, EntryPointAccess, EntryPointAddr, EntryPointPayment, EntryPointType, + EntryPointValue, HashAddr, HostFunctionV2, Key, Package, PackageHash, ProtocolVersion, + StoredValue, URef, U512, +}; +use either::Either; +use num_derive::FromPrimitive; +use num_traits::FromPrimitive; +use tracing::{error, info, warn}; + +use crate::{ + abi::{CreateResult, ReadInfo}, + context::Context, + system::{self, MintArgs, MintTransferArgs}, +}; + +#[derive(Debug, Copy, Clone, FromPrimitive, PartialEq)] +enum EntityKindTag { + Account = 0, + Contract = 1, +} + +pub trait FallibleInto { + fn try_into_wrapped(self) -> VMResult; +} + +impl FallibleInto for From +where + To: TryFrom, +{ + fn try_into_wrapped(self) -> VMResult { + To::try_from(self).map_err(|_| VMError::Internal(InternalHostError::TypeConversion)) + } +} + +/// Consumes a set amount of gas for the specified storage value. +fn charge_gas_storage( + caller: &mut impl Caller>, + size_bytes: usize, +) -> VMResult<()> { + let storage_costs = &caller.context().storage_costs; + let gas_cost = storage_costs.calculate_gas_cost(size_bytes); + let value: u64 = gas_cost.value().try_into().map_err(|_| VMError::OutOfGas)?; + caller.consume_gas(value)?; + Ok(()) +} + +/// Consumes a set amount of gas for the specified host function and weights +fn charge_host_function_call( + caller: &mut impl Caller>, + host_function: &HostFunctionV2<[u64; N]>, + weights: [u64; N], +) -> VMResult<()> +where + S: GlobalStateReader, + E: Executor, +{ + let Some(cost) = host_function.calculate_gas_cost(weights) else { + // Overflowing gas calculation means gas limit was exceeded + return Err(VMError::OutOfGas); + }; + + caller.consume_gas(cost.value().as_u64())?; + Ok(()) +} + +/// Writes a message to the global state and charges for storage used. +fn metered_write( + caller: &mut impl Caller>, + key: Key, + value: StoredValue, +) -> VMResult<()> { + charge_gas_storage(caller, value.serialized_length())?; + caller.context_mut().tracking_copy.write(key, value); + Ok(()) +} + +/// Write value under a key. +pub fn casper_write( + mut caller: impl Caller>, + key_space: u64, + key_ptr: u32, + key_size: u32, + value_ptr: u32, + value_size: u32, +) -> VMResult { + let write_cost = caller.context().config.host_function_costs().write; + charge_host_function_call( + &mut caller, + &write_cost, + [ + key_space, + u64::from(key_ptr), + u64::from(key_size), + u64::from(value_ptr), + u64::from(value_size), + ], + )?; + + let keyspace_tag = match KeyspaceTag::from_u64(key_space) { + Some(keyspace_tag) => keyspace_tag, + None => { + // Unknown keyspace received, return error + return Ok(HOST_ERROR_NOT_FOUND); + } + }; + + let key_payload_bytes = caller.memory_read(key_ptr, key_size.try_into_wrapped()?)?; + + let keyspace = match keyspace_tag { + KeyspaceTag::State => Keyspace::State, + KeyspaceTag::Context => Keyspace::Context(&key_payload_bytes), + KeyspaceTag::NamedKey => { + let key_name = match std::str::from_utf8(&key_payload_bytes) { + Ok(key_name) => key_name, + Err(_) => { + // TODO: Invalid key name encoding + return Ok(HOST_ERROR_INVALID_DATA); + } + }; + + Keyspace::NamedKey(key_name) + } + KeyspaceTag::PaymentInfo => { + let key_name = match std::str::from_utf8(&key_payload_bytes) { + Ok(key_name) => key_name, + Err(_) => { + return Ok(HOST_ERROR_INVALID_DATA); + } + }; + + if !caller.has_export(key_name) { + // Missing wasm export, unable to perform global state write + return Ok(HOST_ERROR_NOT_FOUND); + } + + Keyspace::PaymentInfo(key_name) + } + }; + + let global_state_key = match keyspace_to_global_state_key(caller.context(), keyspace) { + Some(global_state_key) => global_state_key, + None => { + // Unknown keyspace received, return error + return Ok(HOST_ERROR_NOT_FOUND); + } + }; + + let value = caller.memory_read(value_ptr, value_size.try_into_wrapped()?)?; + + let stored_value = match keyspace { + Keyspace::State | Keyspace::Context(_) | Keyspace::NamedKey(_) => { + StoredValue::RawBytes(value) + } + Keyspace::PaymentInfo(_) => { + let entry_point_payment = match value.as_slice() { + [ENTRY_POINT_PAYMENT_CALLER] => EntryPointPayment::Caller, + [ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY] => { + EntryPointPayment::DirectInvocationOnly + } + [ENTRY_POINT_PAYMENT_SELF_ONWARD] => EntryPointPayment::SelfOnward, + _ => { + // Invalid entry point payment variant + return Ok(HOST_ERROR_INVALID_INPUT); + } + }; + + let entry_point = EntityEntryPoint::new( + "_", + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + entry_point_payment, + ); + let entry_point_value = EntryPointValue::V1CasperVm(entry_point); + StoredValue::EntryPoint(entry_point_value) + } + }; + + metered_write(&mut caller, global_state_key, stored_value)?; + + Ok(HOST_ERROR_SUCCESS) +} + +/// Remove value under a key. +/// +/// This produces a transformation of Prune to the global state. Keep in mind that technically the +/// data is not removed from the global state as it still there, it's just not reachable anymore +/// from the newly created tip. +/// +/// The name for this host function is `remove` to keep it simple and consistent with read/write +/// verbs, and also consistent with the rust stdlib vocabulary i.e. `V` +pub fn casper_remove( + mut caller: impl Caller>, + key_space: u64, + key_ptr: u32, + key_size: u32, +) -> VMResult { + let write_cost = caller.context().config.host_function_costs().remove; + charge_host_function_call( + &mut caller, + &write_cost, + [key_space, u64::from(key_ptr), u64::from(key_size)], + )?; + + let keyspace_tag = match KeyspaceTag::from_u64(key_space) { + Some(keyspace_tag) => keyspace_tag, + None => { + // Unknown keyspace received, return error + return Ok(HOST_ERROR_NOT_FOUND); + } + }; + + let key_payload_bytes = caller.memory_read(key_ptr, key_size.try_into_wrapped()?)?; + + let keyspace = match keyspace_tag { + KeyspaceTag::State => Keyspace::State, + KeyspaceTag::Context => Keyspace::Context(&key_payload_bytes), + KeyspaceTag::NamedKey => { + let key_name = match std::str::from_utf8(&key_payload_bytes) { + Ok(key_name) => key_name, + Err(_) => { + // TODO: Invalid key name encoding + return Ok(HOST_ERROR_INVALID_DATA); + } + }; + + Keyspace::NamedKey(key_name) + } + KeyspaceTag::PaymentInfo => { + let key_name = match std::str::from_utf8(&key_payload_bytes) { + Ok(key_name) => key_name, + Err(_) => { + return Ok(HOST_ERROR_INVALID_DATA); + } + }; + + if !caller.has_export(key_name) { + // Missing wasm export, unable to perform global state write + return Ok(HOST_ERROR_NOT_FOUND); + } + + Keyspace::PaymentInfo(key_name) + } + }; + + let global_state_key = match keyspace_to_global_state_key(caller.context(), keyspace) { + Some(global_state_key) => global_state_key, + None => { + // Unknown keyspace received, return error + return Ok(HOST_ERROR_NOT_FOUND); + } + }; + + let global_state_read_result = caller.context_mut().tracking_copy.read(&global_state_key); + match global_state_read_result { + Ok(Some(_stored_value)) => { + // Produce a prune transform only if value under a given key exists in the global state + caller.context_mut().tracking_copy.prune(global_state_key); + } + Ok(None) => { + // Entry does not exists, and we can't proceed with the prune operation + return Ok(HOST_ERROR_NOT_FOUND); + } + Err(error) => { + // To protect the network against potential non-determinism (i.e. one validator runs out + // of space or just faces I/O issues that other validators may not have) we're simply + // aborting the process, hoping that once the node goes back online issues are resolved + // on the validator side. TODO: We should signal this to the contract + // runtime somehow, and let validator nodes skip execution. + error!( + ?error, + ?global_state_key, + "Error while attempting a read before removing value; aborting" + ); + panic!("Error while attempting a read before removing value; aborting key={global_state_key:?} error={error:?}") + } + } + + Ok(HOST_ERROR_SUCCESS) +} + +pub fn casper_print( + mut caller: impl Caller>, + message_ptr: u32, + message_size: u32, +) -> VMResult<()> { + let print_cost = caller.context().config.host_function_costs().print; + charge_host_function_call( + &mut caller, + &print_cost, + [u64::from(message_ptr), u64::from(message_size)], + )?; + + let vec = caller.memory_read(message_ptr, message_size.try_into_wrapped()?)?; + let msg = String::from_utf8_lossy(&vec); + eprintln!("⛓️ {msg}"); + Ok(()) +} + +/// Write value under a key. +pub fn casper_read( + mut caller: impl Caller>, + key_tag: u64, + key_ptr: u32, + key_size: u32, + info_ptr: u32, + cb_alloc: u32, + alloc_ctx: u32, +) -> VMResult { + let read_cost = caller.context().config.host_function_costs().read; + charge_host_function_call( + &mut caller, + &read_cost, + [ + key_tag, + u64::from(key_ptr), + u64::from(key_size), + u64::from(info_ptr), + u64::from(cb_alloc), + u64::from(alloc_ctx), + ], + )?; + + let keyspace_tag = match KeyspaceTag::from_u64(key_tag) { + Some(keyspace_tag) => keyspace_tag, + None => { + // Unknown keyspace received, return error + return Ok(HOST_ERROR_INVALID_INPUT); + } + }; + + // TODO: Opportunity for optimization: don't read data under key_ptr if given key space does not + // require it. + let key_payload_bytes = caller.memory_read(key_ptr, key_size.try_into_wrapped()?)?; + + let keyspace = match keyspace_tag { + KeyspaceTag::State => Keyspace::State, + KeyspaceTag::Context => Keyspace::Context(&key_payload_bytes), + KeyspaceTag::NamedKey => { + let key_name = match std::str::from_utf8(&key_payload_bytes) { + Ok(key_name) => key_name, + Err(_) => { + return Ok(HOST_ERROR_INVALID_DATA); + } + }; + + Keyspace::NamedKey(key_name) + } + KeyspaceTag::PaymentInfo => { + let key_name = match std::str::from_utf8(&key_payload_bytes) { + Ok(key_name) => key_name, + Err(_) => { + return Ok(HOST_ERROR_INVALID_DATA); + } + }; + if !caller.has_export(key_name) { + // Missing wasm export, unable to perform global state read + return Ok(HOST_ERROR_NOT_FOUND); + } + Keyspace::PaymentInfo(key_name) + } + }; + + let global_state_key = match keyspace_to_global_state_key(caller.context(), keyspace) { + Some(global_state_key) => global_state_key, + None => { + // Unknown keyspace received, return error + return Ok(HOST_ERROR_NOT_FOUND); + } + }; + let global_state_read_result = caller.context_mut().tracking_copy.read(&global_state_key); + + let global_state_raw_bytes: Cow<[u8]> = match global_state_read_result { + Ok(Some(StoredValue::RawBytes(raw_bytes))) => Cow::Owned(raw_bytes), + Ok(Some(StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)))) => { + match entry_point.entry_point_payment() { + EntryPointPayment::Caller => Cow::Borrowed(&[ENTRY_POINT_PAYMENT_CALLER]), + EntryPointPayment::DirectInvocationOnly => { + Cow::Borrowed(&[ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY]) + } + EntryPointPayment::SelfOnward => Cow::Borrowed(&[ENTRY_POINT_PAYMENT_SELF_ONWARD]), + } + } + Ok(Some(stored_value)) => { + // TODO: Backwards compatibility with old EE, although it's not clear if we should do it + // at the storage level. Since new VM has storage isolated from the Wasm + // (i.e. we have Keyspace on the wasm which gets converted to a global state `Key`). + // I think if we were to pursue this we'd add a new `Keyspace` enum variant for each old + // VM supported Key types (i.e. URef, Dictionary perhaps) for some period of time, then + // deprecate this. + todo!("Unsupported {stored_value:?}") + } + Ok(None) => return Ok(HOST_ERROR_NOT_FOUND), // Entry does not exists + Err(error) => { + // To protect the network against potential non-determinism (i.e. one validator runs out + // of space or just faces I/O issues that other validators may not have) we're simply + // aborting the process, hoping that once the node goes back online issues are resolved + // on the validator side. TODO: We should signal this to the contract + // runtime somehow, and let validator nodes skip execution. + error!(?error, "Error while reading from storage; aborting"); + panic!("Error while reading from storage; aborting key={global_state_key:?} error={error:?}") + } + }; + + let out_ptr: u32 = if cb_alloc != 0 { + caller.alloc(cb_alloc, global_state_raw_bytes.len(), alloc_ctx)? + } else { + // treats alloc_ctx as data + alloc_ctx + }; + + let read_info = ReadInfo { + data: out_ptr, + data_size: global_state_raw_bytes.len().try_into_wrapped()?, + }; + + let read_info_bytes = safe_transmute::transmute_one_to_bytes(&read_info); + caller.memory_write(info_ptr, read_info_bytes)?; + if out_ptr != 0 { + caller.memory_write(out_ptr, &global_state_raw_bytes)?; + } + Ok(HOST_ERROR_SUCCESS) +} + +fn keyspace_to_global_state_key( + context: &Context, + keyspace: Keyspace<'_>, +) -> Option { + let entity_addr = context_to_entity_addr(context); + + match keyspace { + Keyspace::State => Some(Key::State(entity_addr)), + Keyspace::Context(bytes) => { + let digest = Digest::hash(bytes); + Some(casper_types::Key::NamedKey( + NamedKeyAddr::new_named_key_entry(entity_addr, digest.value()), + )) + } + Keyspace::NamedKey(payload) => { + let digest = Digest::hash(payload.as_bytes()); + Some(casper_types::Key::NamedKey( + NamedKeyAddr::new_named_key_entry(entity_addr, digest.value()), + )) + } + Keyspace::PaymentInfo(payload) => { + let entry_point_addr = + EntryPointAddr::new_v1_entry_point_addr(entity_addr, payload).ok()?; + Some(Key::EntryPoint(entry_point_addr)) + } + } +} + +fn context_to_entity_addr( + context: &Context, +) -> EntityAddr { + match context.callee { + Key::Account(account_hash) => EntityAddr::new_account(account_hash.value()), + Key::SmartContract(smart_contract_addr) => { + EntityAddr::new_smart_contract(smart_contract_addr) + } + _ => { + // This should never happen, as the caller is always an account or a smart contract. + panic!("Unexpected callee variant: {:?}", context.callee) + } + } +} + +pub fn casper_copy_input( + mut caller: impl Caller>, + cb_alloc: u32, + alloc_ctx: u32, +) -> VMResult { + let input = caller.context().input.clone(); + + let out_ptr: u32 = if cb_alloc != 0 { + caller.alloc(cb_alloc, input.len(), alloc_ctx)? + } else { + // treats alloc_ctx as data + alloc_ctx + }; + + let copy_input_cost = caller.context().config.host_function_costs().copy_input; + charge_host_function_call( + &mut caller, + ©_input_cost, + [ + u64::from(out_ptr), + input + .len() + .try_into() + .expect("usize is at least the same size as u64"), + ], + )?; + + if out_ptr == 0 { + Ok(out_ptr) + } else { + caller.memory_write(out_ptr, &input)?; + Ok(out_ptr + (input.len() as u32)) + } +} + +/// Returns from the execution of a smart contract with an optional flags. +pub fn casper_return( + mut caller: impl Caller>, + flags: u32, + data_ptr: u32, + data_len: u32, +) -> VMResult<()> { + let ret_cost = caller.context().config.host_function_costs().ret; + charge_host_function_call( + &mut caller, + &ret_cost, + [u64::from(data_ptr), u64::from(data_len)], + )?; + + let flags = ReturnFlags::from_bits_retain(flags); + let data = if data_ptr == 0 { + None + } else { + let data = caller + .memory_read(data_ptr, data_len.try_into_wrapped()?) + .map(Bytes::from)?; + Some(data) + }; + Err(VMError::Return { flags, data }) +} + +#[allow(clippy::too_many_arguments)] +pub fn casper_create( + mut caller: impl Caller>, + code_ptr: u32, + code_len: u32, + transferred_value: u64, + entry_point_ptr: u32, + entry_point_len: u32, + input_ptr: u32, + input_len: u32, + seed_ptr: u32, + seed_len: u32, + result_ptr: u32, +) -> VMResult { + let create_cost = caller.context().config.host_function_costs().create; + charge_host_function_call( + &mut caller, + &create_cost, + [ + u64::from(code_ptr), + u64::from(code_len), + transferred_value, + u64::from(entry_point_ptr), + u64::from(entry_point_len), + u64::from(input_ptr), + u64::from(input_len), + u64::from(seed_ptr), + u64::from(seed_len), + u64::from(result_ptr), + ], + )?; + + let code = if code_ptr != 0 { + caller + .memory_read(code_ptr, code_len as usize) + .map(Bytes::from)? + } else { + caller.bytecode() + }; + + let seed = if seed_ptr != 0 { + if seed_len != 32 { + return Ok(CALLEE_NOT_CALLABLE); + } + let seed_bytes = caller.memory_read(seed_ptr, seed_len as usize)?; + let seed_bytes: [u8; 32] = seed_bytes.try_into().unwrap(); // SAFETY: We checked for length. + Some(seed_bytes) + } else { + None + }; + + // For calling a constructor + let constructor_entry_point = { + let entry_point_ptr = NonZeroU32::new(entry_point_ptr); + match entry_point_ptr { + Some(entry_point_ptr) => { + let entry_point_bytes = + caller.memory_read(entry_point_ptr.get(), entry_point_len as _)?; + match String::from_utf8(entry_point_bytes) { + Ok(entry_point) => Some(entry_point), + Err(utf8_error) => { + error!(%utf8_error, "entry point name is not a valid utf-8 string; unable to call"); + return Ok(CALLEE_NOT_CALLABLE); + } + } + } + None => { + // No constructor to be called + None + } + } + }; + + // Pass input data when calling a constructor. It's optional, as constructors aren't required + let input_data: Option = if input_ptr == 0 { + None + } else { + let input_data = caller.memory_read(input_ptr, input_len as _)?.into(); + Some(input_data) + }; + + let bytecode_hash = chain_utils::compute_wasm_bytecode_hash(&code); + + let bytecode = ByteCode::new(ByteCodeKind::V2CasperWasm, code.clone().into()); + let bytecode_addr = ByteCodeAddr::V2CasperWasm(bytecode_hash); + + // 1. Store package hash + let mut smart_contract_package = Package::default(); + + let protocol_version = ProtocolVersion::V2_0_0; + let protocol_version_major = protocol_version.value().major; + + let callee_addr = context_to_entity_addr(caller.context()).value(); + + let smart_contract_addr: HashAddr = chain_utils::compute_predictable_address( + caller.context().chain_name.as_bytes(), + callee_addr, + bytecode_hash, + seed, + ); + + smart_contract_package.insert_entity_version( + protocol_version_major, + EntityAddr::SmartContract(smart_contract_addr), + ); + + if caller + .context_mut() + .tracking_copy + .read(&Key::SmartContract(smart_contract_addr)) + .map_err(|_| VMError::Internal(InternalHostError::TrackingCopy))? + .is_some() + { + return VMResult::Err(VMError::Internal(InternalHostError::ContractAlreadyExists)); + } + + metered_write( + &mut caller, + Key::SmartContract(smart_contract_addr), + StoredValue::SmartContract(smart_contract_package), + )?; + + // 2. Store wasm + metered_write( + &mut caller, + Key::ByteCode(bytecode_addr), + StoredValue::ByteCode(bytecode), + )?; + + // 3. Store addressable entity + + let entity_addr = EntityAddr::SmartContract(smart_contract_addr); + let addressable_entity_key = Key::AddressableEntity(entity_addr); + + // TODO: abort(str) as an alternative to trap + let address_generator = Arc::clone(&caller.context().address_generator); + let transaction_hash = caller.context().transaction_hash; + let main_purse: URef = match system::mint_mint( + &mut caller.context_mut().tracking_copy, + transaction_hash, + address_generator, + MintArgs { + initial_balance: U512::zero(), + }, + ) { + Ok(uref) => uref, + Err(mint_error) => { + error!(?mint_error, "Failed to create a purse"); + return Ok(CALLEE_TRAPPED); + } + }; + + let addressable_entity = AddressableEntity::new( + PackageHash::new(smart_contract_addr), + ByteCodeHash::new(bytecode_hash), + ProtocolVersion::V2_0_0, + main_purse, + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2), + ); + + metered_write( + &mut caller, + addressable_entity_key, + StoredValue::AddressableEntity(addressable_entity), + )?; + + let _initial_state = match constructor_entry_point { + Some(entry_point_name) => { + // Take the gas spent so far and use it as a limit for the new VM. + let gas_limit = caller + .gas_consumed() + .try_into_remaining() + .map_err(|_| InternalHostError::TypeConversion)?; + + let execute_request = ExecuteRequestBuilder::default() + .with_initiator(caller.context().initiator) + .with_caller_key(caller.context().callee) + .with_gas_limit(gas_limit) + .with_target(ExecutionKind::Stored { + address: smart_contract_addr, + entry_point: entry_point_name.clone(), + }) + .with_input(input_data.unwrap_or_default()) + .with_transferred_value(transferred_value) + .with_transaction_hash(caller.context().transaction_hash) + // We're using shared address generator there as we need to preserve and advance the + // state of deterministic address generator across chain of calls. + .with_shared_address_generator(Arc::clone(&caller.context().address_generator)) + .with_chain_name(caller.context().chain_name.clone()) + .with_block_time(caller.context().block_time) + .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash + .with_block_height(1) // TODO: Carry on block height + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash + .build() + .map_err(|_| InternalHostError::ExecuteRequestBuildFailure)?; + + let tracking_copy_for_ctor = caller.context().tracking_copy.fork2(); + + match caller + .context() + .executor + .execute(tracking_copy_for_ctor, execute_request) + { + Ok(ExecuteResult { + host_error, + output, + gas_usage, + effects, + cache, + messages, + }) => { + // output + caller.consume_gas(gas_usage.gas_spent())?; + + if let Some(host_error) = host_error { + return Ok(host_error.into_u32()); + } + + caller + .context_mut() + .tracking_copy + .apply_changes(effects, cache, messages); + + output + } + Err(execute_error) => { + // This is a bug in the EE, as it should have been caught during the preparation + // phase when the contract was stored in the global state. + error!(?execute_error, "Failed to execute constructor entry point"); + return Err(VMError::Execute(execute_error)); + } + } + } + None => None, + }; + + let create_result = CreateResult { + package_address: smart_contract_addr, + }; + + let create_result_bytes = safe_transmute::transmute_one_to_bytes(&create_result); + + debug_assert_eq!( + safe_transmute::transmute_one(create_result_bytes), + Ok(create_result), + "Sanity check", // NOTE: Remove these guards with sufficient test coverage + ); + + caller.memory_write(result_ptr, create_result_bytes)?; + + Ok(CALLEE_SUCCEEDED) +} + +#[allow(clippy::too_many_arguments)] +pub fn casper_call( + mut caller: impl Caller>, + address_ptr: u32, + address_len: u32, + transferred_value: u64, + entry_point_ptr: u32, + entry_point_len: u32, + input_ptr: u32, + input_len: u32, + cb_alloc: u32, + cb_ctx: u32, +) -> VMResult { + let call_cost = caller.context().config.host_function_costs().call; + charge_host_function_call( + &mut caller, + &call_cost, + [ + u64::from(address_ptr), + u64::from(address_len), + transferred_value, + u64::from(entry_point_ptr), + u64::from(entry_point_len), + u64::from(input_ptr), + u64::from(input_len), + u64::from(cb_alloc), + u64::from(cb_ctx), + ], + )?; + + // 1. Look up address in the storage + // 1a. if it's legacy contract, wire up old EE, pretend you're 1.x. Input data would be + // "RuntimeArgs". Serialized output of the call has to be passed as output. Value is ignored as + // you can't pass value (tokens) to called contracts. 1b. if it's new contract, wire up + // another VM as according to the bytecode format. 2. Depends on the VM used (old or new) at + // this point either entry point is validated (i.e. EE returned error) or will be validated as + // for now. 3. If entry point is valid, call it, transfer the value, pass the input data. If + // it's invalid, return error. 4. Output data is captured by calling `cb_alloc`. + // let vm = VM::new(); + // vm. + let address = caller.memory_read(address_ptr, address_len as _)?; + let smart_contract_addr: HashAddr = address.try_into_wrapped()?; + + let input_data: Bytes = caller.memory_read(input_ptr, input_len as _)?.into(); + + let entry_point = { + let entry_point_bytes = caller.memory_read(entry_point_ptr, entry_point_len as _)?; + match String::from_utf8(entry_point_bytes) { + Ok(entry_point) => entry_point, + Err(utf8_error) => { + error!(%utf8_error, "entry point name is not a valid utf-8 string; unable to call"); + return Ok(CALLEE_NOT_CALLABLE); + } + } + }; + + let tracking_copy = caller.context().tracking_copy.fork2(); + + // Take the gas spent so far and use it as a limit for the new VM. + let gas_limit = caller + .gas_consumed() + .try_into_remaining() + .map_err(|_| InternalHostError::TypeConversion)?; + + let execute_request = ExecuteRequestBuilder::default() + .with_initiator(caller.context().initiator) + .with_caller_key(caller.context().callee) + .with_gas_limit(gas_limit) + .with_target(ExecutionKind::Stored { + address: smart_contract_addr, + entry_point: entry_point.clone(), + }) + .with_transferred_value(transferred_value) + .with_input(input_data) + .with_transaction_hash(caller.context().transaction_hash) + // We're using shared address generator there as we need to preserve and advance the state + // of deterministic address generator across chain of calls. + .with_shared_address_generator(Arc::clone(&caller.context().address_generator)) + .with_chain_name(caller.context().chain_name.clone()) + .with_block_time(caller.context().block_time) + .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash + .with_block_height(1) // TODO: Carry on block height + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash + .build() + .map_err(|_| InternalHostError::ExecuteRequestBuildFailure)?; + + let (gas_usage, host_result) = match caller + .context() + .executor + .execute(tracking_copy, execute_request) + { + Ok(ExecuteResult { + host_error, + output, + gas_usage, + effects, + cache, + messages, + }) => { + if let Some(output) = output { + let out_ptr: u32 = if cb_alloc != 0 { + caller.alloc(cb_alloc, output.len(), cb_ctx)? + } else { + // treats alloc_ctx as data + cb_ctx + }; + + if out_ptr != 0 { + caller.memory_write(out_ptr, &output)?; + } + } + + let host_result = match host_error { + Some(host_error) => Err(host_error), + None => { + caller + .context_mut() + .tracking_copy + .apply_changes(effects, cache, messages); + Ok(()) + } + }; + + (gas_usage, host_result) + } + Err(execute_error) => { + error!( + ?execute_error, + ?smart_contract_addr, + ?entry_point, + "Failed to execute entry point" + ); + return Err(VMError::Execute(execute_error)); + } + }; + + let gas_spent = gas_usage + .gas_limit() + .checked_sub(gas_usage.remaining_points()) + .ok_or(InternalHostError::RemainingGasExceedsGasLimit)?; + + caller.consume_gas(gas_spent)?; + + Ok(u32_from_host_result(host_result)) +} + +pub fn casper_env_balance( + mut caller: impl Caller>, + entity_kind: u32, + entity_addr_ptr: u32, + entity_addr_len: u32, + output_ptr: u32, +) -> VMResult { + let balance_cost = caller.context().config.host_function_costs().env_balance; + charge_host_function_call( + &mut caller, + &balance_cost, + [ + u64::from(entity_kind), + u64::from(entity_addr_ptr), + u64::from(entity_addr_len), + u64::from(output_ptr), + ], + )?; + + let entity_key = match EntityKindTag::from_u32(entity_kind) { + Some(EntityKindTag::Account) => { + if entity_addr_len != 32 { + return Ok(HOST_ERROR_SUCCESS); + } + let entity_addr = caller.memory_read(entity_addr_ptr, entity_addr_len as usize)?; + let account_hash: AccountHash = AccountHash::new(entity_addr.try_into_wrapped()?); + + let account_key = Key::Account(account_hash); + match caller.context_mut().tracking_copy.read(&account_key) { + Ok(Some(StoredValue::CLValue(clvalue))) => { + let addressible_entity_key = clvalue + .into_t::() + .map_err(|_| InternalHostError::TypeConversion)?; + Either::Right(addressible_entity_key) + } + Ok(Some(StoredValue::Account(account))) => Either::Left(account.main_purse()), + Ok(Some(other_entity)) => { + error!("Unexpected entity type: {other_entity:?}"); + return Err(InternalHostError::UnexpectedEntityKind.into()); + } + Ok(None) => return Ok(HOST_ERROR_SUCCESS), + Err(error) => { + error!("Error while reading from storage; aborting key={account_key:?} error={error:?}"); + return Err(InternalHostError::TrackingCopy.into()); + } + } + } + Some(EntityKindTag::Contract) => { + if entity_addr_len != 32 { + return Ok(HOST_ERROR_SUCCESS); + } + let hash_bytes = caller.memory_read(entity_addr_ptr, entity_addr_len as usize)?; + let hash_bytes: [u8; 32] = hash_bytes.try_into().unwrap(); // SAFETY: We checked for length. + + let smart_contract_key = Key::SmartContract(hash_bytes); + match caller.context_mut().tracking_copy.read(&smart_contract_key) { + Ok(Some(StoredValue::SmartContract(smart_contract_package))) => { + match smart_contract_package.versions().latest() { + Some(addressible_entity_hash) => { + let key = Key::AddressableEntity(EntityAddr::SmartContract( + addressible_entity_hash.value(), + )); + Either::Right(key) + } + None => { + warn!( + ?smart_contract_key, + "Unable to find latest addressible entity hash for contract" + ); + return Ok(HOST_ERROR_SUCCESS); + } + } + } + Ok(Some(_)) => { + return Ok(HOST_ERROR_SUCCESS); + } + Ok(None) => { + // Not found, balance is 0 + return Ok(HOST_ERROR_SUCCESS); + } + Err(error) => { + error!( + hash_bytes = base16::encode_lower(&hash_bytes), + ?error, + "Error while reading from storage; aborting" + ); + panic!("Error while reading from storage") + } + } + } + None => return Ok(HOST_ERROR_SUCCESS), + }; + + let purse = match entity_key { + Either::Left(main_purse) => main_purse, + Either::Right(indirect_entity_key) => { + match caller + .context_mut() + .tracking_copy + .read(&indirect_entity_key) + { + Ok(Some(StoredValue::AddressableEntity(addressable_entity))) => { + addressable_entity.main_purse() + } + Ok(Some(other_entity)) => { + panic!("Unexpected entity type: {other_entity:?}") + } + Ok(None) => panic!("Key not found while checking balance"), //return Ok(0), + Err(error) => { + panic!("Error while reading from storage; aborting key={entity_key:?} error={error:?}") + } + } + } + }; + + let total_balance = caller + .context_mut() + .tracking_copy + .get_total_balance(Key::URef(purse)) + .map_err(|_| InternalHostError::TotalBalanceReadFailure)?; + + let total_balance: u64 = total_balance + .value() + .try_into() + .map_err(|_| InternalHostError::TotalBalanceOverflow)?; + + caller.memory_write(output_ptr, &total_balance.to_le_bytes())?; + Ok(HOST_ERROR_NOT_FOUND) +} + +pub fn casper_transfer( + mut caller: impl Caller>, + entity_addr_ptr: u32, + entity_addr_len: u32, + amount_ptr: u32, +) -> VMResult { + let transfer_cost = caller.context().config.host_function_costs().transfer; + charge_host_function_call( + &mut caller, + &transfer_cost, + [ + u64::from(entity_addr_ptr), + u64::from(entity_addr_len), + u64::from(amount_ptr), + ], + )?; + + if entity_addr_len != 32 { + // Invalid entity address; failing to proceed with the transfer + return Ok(u32_from_host_result(Err(CallError::NotCallable))); + } + + let amount = { + let mut amount_bytes = [0u8; 8]; + caller.memory_read_into(amount_ptr, &mut amount_bytes)?; + u64::from_le_bytes(amount_bytes) + }; + + let (target_entity_addr, _runtime_footprint) = { + let entity_addr = caller.memory_read(entity_addr_ptr, entity_addr_len as usize)?; + debug_assert_eq!(entity_addr.len(), 32); + + // SAFETY: entity_addr is 32 bytes long + let account_hash: AccountHash = AccountHash::new(entity_addr.try_into().unwrap()); + + let protocol_version = ProtocolVersion::V2_0_0; + let (entity_addr, runtime_footprint) = match caller + .context_mut() + .tracking_copy + .runtime_footprint_by_account_hash(protocol_version, account_hash) + { + Ok((entity_addr, runtime_footprint)) => (entity_addr, runtime_footprint), + Err(TrackingCopyError::KeyNotFound(key)) => { + warn!(?key, "Account not found"); + return Ok(u32_from_host_result(Err(CallError::NotCallable))); + } + Err(error) => { + error!(?error, "Error while reading from storage; aborting"); + panic!("Error while reading from storage") + } + }; + (entity_addr, runtime_footprint) + }; + + let callee_addressable_entity_key = match caller.context().callee { + callee_account_key @ Key::Account(_account_hash) => { + match caller.context_mut().tracking_copy.read(&callee_account_key) { + Ok(Some(StoredValue::CLValue(indirect))) => { + // is it an account? + indirect + .into_t::() + .map_err(|_| InternalHostError::TypeConversion)? + } + Ok(Some(other)) => panic!("should be cl value but got {other:?}"), + Ok(None) => return Ok(u32_from_host_result(Err(CallError::NotCallable))), + Err(error) => { + error!( + ?error, + ?callee_account_key, + "Error while reading from storage; aborting" + ); + panic!("Error while reading from storage") + } + } + } + smart_contract_key @ Key::SmartContract(_) => { + match caller.context_mut().tracking_copy.read(&smart_contract_key) { + Ok(Some(StoredValue::SmartContract(smart_contract_package))) => { + match smart_contract_package.versions().latest() { + Some(addressible_entity_hash) => Key::AddressableEntity( + EntityAddr::SmartContract(addressible_entity_hash.value()), + ), + None => { + warn!( + ?smart_contract_key, + "Unable to find latest addressible entity hash for contract" + ); + return Ok(u32_from_host_result(Err(CallError::NotCallable))); + } + } + } + Ok(Some(other)) => panic!("should be smart contract but got {other:?}"), + Ok(None) => return Ok(u32_from_host_result(Err(CallError::NotCallable))), + Err(error) => { + error!( + ?error, + ?smart_contract_key, + "Error while reading from storage; aborting" + ); + panic!("Error while reading from storage") + } + } + } + other => panic!("should be account or smart contract but got {other:?}"), + }; + + let callee_stored_value = caller + .context_mut() + .tracking_copy + .read(&callee_addressable_entity_key) + .map_err(|_| InternalHostError::TrackingCopy)? + .ok_or(InternalHostError::AccountRecordNotFound)?; + let callee_addressable_entity = callee_stored_value + .into_addressable_entity() + .ok_or(InternalHostError::TypeConversion)?; + let callee_purse = callee_addressable_entity.main_purse(); + + let target_purse = match caller + .context_mut() + .tracking_copy + .runtime_footprint_by_entity_addr(target_entity_addr) + { + Ok(runtime_footprint) => match runtime_footprint.main_purse() { + Some(target_purse) => target_purse, + None => todo!("create a main purse for a contract"), + }, + Err(TrackingCopyError::KeyNotFound(key)) => { + warn!(?key, "Transfer recipient not found"); + return Ok(u32_from_host_result(Err(CallError::NotCallable))); + } + Err(error) => { + error!(?error, "Error while reading from storage; aborting"); + return Err(InternalHostError::TrackingCopy)?; + } + }; + // We don't execute anything as it does not make sense to execute an account as there + // are no entry points. + let transaction_hash = caller.context().transaction_hash; + let address_generator = Arc::clone(&caller.context().address_generator); + let args = MintTransferArgs { + source: callee_purse, + target: target_purse, + amount: U512::from(amount), + maybe_to: None, + id: None, + }; + + let result = system::mint_transfer( + &mut caller.context_mut().tracking_copy, + transaction_hash, + address_generator, + args, + ); + + Ok(u32_from_host_result(result)) +} + +pub fn casper_upgrade( + mut caller: impl Caller>, + code_ptr: u32, + code_size: u32, + entry_point_ptr: u32, + entry_point_size: u32, + input_ptr: u32, + input_size: u32, +) -> VMResult { + let upgrade_cost = caller.context().config.host_function_costs().upgrade; + charge_host_function_call( + &mut caller, + &upgrade_cost, + [ + u64::from(code_ptr), + u64::from(code_size), + u64::from(entry_point_ptr), + u64::from(entry_point_size), + u64::from(input_ptr), + u64::from(input_size), + ], + )?; + + let code = caller + .memory_read(code_ptr, code_size as usize) + .map(Bytes::from)?; + + let entry_point = match NonZeroU32::new(entry_point_ptr) { + Some(entry_point_ptr) => { + // There's upgrade entry point to be called + let entry_point_bytes = + caller.memory_read(entry_point_ptr.get(), entry_point_size as usize)?; + match String::from_utf8(entry_point_bytes) { + Ok(entry_point) => Some(entry_point), + Err(utf8_error) => { + error!(%utf8_error, "entry point name is not a valid utf-8 string; unable to call"); + return Ok(CALLEE_NOT_CALLABLE); + } + } + } + None => { + // No constructor to be called + None + } + }; + + // Pass input data when calling a constructor. It's optional, as constructors aren't required + let input_data: Option = if input_ptr == 0 { + None + } else { + let input_data = caller.memory_read(input_ptr, input_size as _)?.into(); + Some(input_data) + }; + + let (smart_contract_addr, callee_addressable_entity_key) = match caller.context().callee { + Key::Account(_account_hash) => { + error!("Account upgrade is not possible"); + return Ok(CALLEE_NOT_CALLABLE); + } + addressable_entity_key @ Key::SmartContract(smart_contract_addr) => { + let smart_contract_key = addressable_entity_key; + match caller.context_mut().tracking_copy.read(&smart_contract_key) { + Ok(Some(StoredValue::SmartContract(smart_contract_package))) => { + match smart_contract_package.versions().latest() { + Some(addressible_entity_hash) => { + let key = Key::AddressableEntity(EntityAddr::SmartContract( + addressible_entity_hash.value(), + )); + (smart_contract_addr, key) + } + None => { + warn!( + ?smart_contract_key, + "Unable to find latest addressible entity hash for contract" + ); + return Ok(CALLEE_NOT_CALLABLE); + } + } + } + Ok(Some(other)) => panic!("should be smart contract but got {other:?}"), + Ok(None) => return Ok(CALLEE_NOT_CALLABLE), + Err(error) => { + error!( + ?error, + ?smart_contract_key, + "Error while reading from storage; aborting" + ); + panic!("Error while reading from storage") + } + } + } + other => panic!("should be account or addressable entity but got {other:?}"), + }; + + let callee_addressable_entity = match caller + .context_mut() + .tracking_copy + .read(&callee_addressable_entity_key) + { + Ok(Some(StoredValue::AddressableEntity(addressable_entity))) => addressable_entity, + Ok(Some(other_entity)) => { + panic!("Unexpected entity type: {other_entity:?}") + } + Ok(None) => return Ok(CALLEE_NOT_CALLABLE), + Err(error) => { + panic!("Error while reading from storage; aborting key={callee_addressable_entity_key:?} error={error:?}") + } + }; + + // 1. Ensure that the new code is valid (maybe?) + // TODO: Is validating new code worth it if the user pays for the storage anyway? Should we + // protect users against invalid code? + + // 2. Update the code therefore making hash(new_code) != addressable_entity.bytecode_addr (aka + // hash(old_code)) + let bytecode_key = Key::ByteCode(ByteCodeAddr::V2CasperWasm( + callee_addressable_entity.byte_code_addr(), + )); + metered_write( + &mut caller, + bytecode_key, + StoredValue::ByteCode(ByteCode::new( + ByteCodeKind::V2CasperWasm, + code.clone().into(), + )), + )?; + + // 3. Execute upgrade routine (if specified) + // this code should handle reading old state, and saving new state + + if let Some(entry_point_name) = entry_point { + // Take the gas spent so far and use it as a limit for the new VM. + let gas_limit = caller + .gas_consumed() + .try_into_remaining() + .map_err(|_| InternalHostError::TypeConversion)?; + + let execute_request = ExecuteRequestBuilder::default() + .with_initiator(caller.context().initiator) + .with_caller_key(caller.context().callee) + .with_gas_limit(gas_limit) + .with_target(ExecutionKind::Stored { + address: smart_contract_addr, + entry_point: entry_point_name.clone(), + }) + .with_input(input_data.unwrap_or_default()) + // Upgrade entry point is executed with zero value as it does not seem to make sense to + // be able to transfer anything. + .with_transferred_value(0) + .with_transaction_hash(caller.context().transaction_hash) + // We're using shared address generator there as we need to preserve and advance the + // state of deterministic address generator across chain of calls. + .with_shared_address_generator(Arc::clone(&caller.context().address_generator)) + .with_chain_name(caller.context().chain_name.clone()) + .with_block_time(caller.context().block_time) + .with_state_hash(Digest::from_raw([0; 32])) // TODO: Carry on state root hash + .with_block_height(1) // TODO: Carry on block height + .with_parent_block_hash(BlockHash::new(Digest::from_raw([0; 32]))) // TODO: Carry on parent block hash + .build() + .map_err(|_| InternalHostError::ExecuteRequestBuildFailure)?; + + let tracking_copy_for_ctor = caller.context().tracking_copy.fork2(); + + match caller + .context() + .executor + .execute(tracking_copy_for_ctor, execute_request) + { + Ok(ExecuteResult { + host_error, + output, + gas_usage, + effects, + cache, + messages, + }) => { + // output + caller.consume_gas(gas_usage.gas_spent())?; + + if let Some(host_error) = host_error { + return Ok(host_error.into_u32()); + } + + caller + .context_mut() + .tracking_copy + .apply_changes(effects, cache, messages); + + if let Some(output) = output { + info!( + ?entry_point_name, + ?output, + "unexpected output from migration entry point" + ); + } + } + Err(execute_error) => { + // Unable to call contract because of execution error or internal host error. + // This usually means an internal error that should not happen and has to be handled + // by the contract runtime. + error!( + ?execute_error, + ?entry_point_name, + smart_contract_addr = base16::encode_lower(&smart_contract_addr), + "Failed to execute upgrade entry point" + ); + return Err(VMError::Execute(execute_error)); + } + } + } + + Ok(CALLEE_SUCCEEDED) +} + +pub fn casper_env_info( + mut caller: impl Caller>, + info_ptr: u32, + info_size: u32, +) -> VMResult { + let block_time_cost = caller.context().config.host_function_costs().env_info; + charge_host_function_call( + &mut caller, + &block_time_cost, + [u64::from(info_ptr), u64::from(info_size)], + )?; + + let (caller_kind, caller_addr) = match &caller.context().caller { + Key::Account(account_hash) => (EntityKindTag::Account as u32, account_hash.value()), + Key::SmartContract(smart_contract_addr) => { + (EntityKindTag::Contract as u32, *smart_contract_addr) + } + other => panic!("Unexpected caller: {other:?}"), + }; + + let (callee_kind, callee_addr) = match &caller.context().callee { + Key::Account(initiator_addr) => (EntityKindTag::Account as u32, initiator_addr.value()), + Key::SmartContract(smart_contract_addr) => { + (EntityKindTag::Contract as u32, *smart_contract_addr) + } + other => panic!("Unexpected callee: {other:?}"), + }; + + let transferred_value = caller.context().transferred_value; + + let block_time = caller.context().block_time.value(); + + // `EnvInfo` in little-endian representation. + let env_info_le = EnvInfo { + caller_addr, + caller_kind: caller_kind.to_le(), + callee_addr, + callee_kind: callee_kind.to_le(), + transferred_value: transferred_value.to_le(), + block_time: block_time.to_le(), + }; + + let env_info_bytes = safe_transmute::transmute_one_to_bytes(&env_info_le); + let write_len = env_info_bytes.len().min(info_size as usize); + caller.memory_write(info_ptr, &env_info_bytes[..write_len])?; + + Ok(HOST_ERROR_SUCCESS) +} + +pub fn casper_emit( + mut caller: impl Caller>, + topic_name_ptr: u32, + topic_name_size: u32, + payload_ptr: u32, + payload_size: u32, +) -> VMResult { + // Charge for parameter weights. + let emit_host_function = caller.context().config.host_function_costs().emit; + + charge_host_function_call( + &mut caller, + &emit_host_function, + [ + u64::from(topic_name_ptr), + u64::from(topic_name_size), + u64::from(payload_ptr), + u64::from(payload_size), + ], + )?; + + if topic_name_size > caller.context().message_limits.max_topic_name_size { + return Ok(HOST_ERROR_TOPIC_TOO_LONG); + } + + if payload_size > caller.context().message_limits.max_message_size { + return Ok(HOST_ERROR_PAYLOAD_TOO_LONG); + } + + let topic_name = { + let topic: Vec = caller.memory_read(topic_name_ptr, topic_name_size as usize)?; + let Ok(topic) = String::from_utf8(topic) else { + // Not a valid UTF-8 string + return Ok(HOST_ERROR_INVALID_DATA); + }; + topic + }; + + let payload = caller.memory_read(payload_ptr, payload_size as usize)?; + + let entity_addr = context_to_entity_addr(caller.context()); + + let mut message_topics = caller + .context_mut() + .tracking_copy + .get_message_topics(entity_addr) + .unwrap_or_else(|error| { + panic!("Error while reading from storage; aborting error={error:?}") + }); + + if message_topics.len() >= caller.context().message_limits.max_topics_per_contract as usize { + return Ok(HOST_ERROR_TOO_MANY_TOPICS); + } + + let topic_name_hash = Digest::hash(&topic_name).value().into(); + + match message_topics.add_topic(&topic_name, topic_name_hash) { + Ok(()) => { + // New topic is created + } + Err(MessageTopicError::DuplicateTopic) => { + // We're lazily creating message topics and this operation is idempotent. Therefore + // already existing topic is not an issue. + } + Err(MessageTopicError::MaxTopicsExceeded) => { + // We're validating the size of topics before adding them + return Ok(HOST_ERROR_TOO_MANY_TOPICS); + } + Err(MessageTopicError::TopicNameSizeExceeded) => { + // We're validating the length of topic before adding it + return Ok(HOST_ERROR_TOPIC_TOO_LONG); + } + Err(error) => { + // These error variants are non_exhaustive, and we should handle them explicitly. + unreachable!("Unexpected error while adding a topic: {:?}", error); + } + }; + + let current_block_time = caller.context().block_time; + eprintln!("📩 {topic_name}: {payload:?} (at {current_block_time:?})"); + + let topic_key = Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash)); + let prev_topic_summary = match caller.context_mut().tracking_copy.read(&topic_key) { + Ok(Some(StoredValue::MessageTopic(message_topic_summary))) => message_topic_summary, + Ok(Some(stored_value)) => { + panic!("Unexpected stored value: {stored_value:?}"); + } + Ok(None) => { + let message_topic_summary = + MessageTopicSummary::new(0, current_block_time, topic_name.clone()); + let summary = StoredValue::MessageTopic(message_topic_summary.clone()); + caller.context_mut().tracking_copy.write(topic_key, summary); + message_topic_summary + } + Err(error) => panic!("Error while reading from storage; aborting error={error:?}"), + }; + + let topic_message_index = if prev_topic_summary.blocktime() != current_block_time { + for index in 1..prev_topic_summary.message_count() { + let message_key = Key::message(entity_addr, topic_name_hash, index); + debug_assert!( + { + // NOTE: This assertion is to ensure that the message index is continuous, and + // the previous messages are pruned properly. + caller + .context_mut() + .tracking_copy + .read(&message_key) + .map_err(|_| VMError::Internal(InternalHostError::TrackingCopy))? + .is_some() + }, + "Message index is not continuous" + ); + + // Prune the previous messages + caller.context_mut().tracking_copy.prune(message_key); + } + 0 + } else { + prev_topic_summary.message_count() + }; + + // Data stored in the global state associated with the message block. + type MessageCountPair = (BlockTime, u64); + + let block_message_index: u64 = match caller + .context_mut() + .tracking_copy + .read(&Key::BlockGlobal(BlockGlobalAddr::MessageCount)) + { + Ok(Some(StoredValue::CLValue(value_pair))) => { + let (prev_block_time, prev_count): MessageCountPair = + CLValue::into_t(value_pair).map_err(|_| InternalHostError::TypeConversion)?; + if prev_block_time == current_block_time { + prev_count + } else { + 0 + } + } + Ok(Some(other)) => panic!("Unexpected stored value: {other:?}"), + Ok(None) => { + // No messages in current block yet + 0 + } + Err(error) => { + panic!("Error while reading from storage; aborting error={error:?}") + } + }; + + let Some(topic_message_count) = topic_message_index.checked_add(1) else { + return Ok(HOST_ERROR_MESSAGE_TOPIC_FULL); + }; + + let Some(block_message_count) = block_message_index.checked_add(1) else { + return Ok(HOST_ERROR_MAX_MESSAGES_PER_BLOCK_EXCEEDED); + }; + + // Under v2 runtime messages are only limited to bytes. + let message_payload = MessagePayload::Bytes(payload.into()); + + let message = Message::new( + entity_addr, + message_payload, + topic_name, + topic_name_hash, + topic_message_index, + block_message_index, + ); + let topic_value = StoredValue::MessageTopic(MessageTopicSummary::new( + topic_message_count, + current_block_time, + message.topic_name().to_owned(), + )); + + let message_key = message.message_key(); + let message_value = StoredValue::Message( + message + .checksum() + .map_err(|_| InternalHostError::MessageChecksumMissing)?, + ); + let message_count_pair: MessageCountPair = (current_block_time, block_message_count); + let block_message_count_value = StoredValue::CLValue( + CLValue::from_t(message_count_pair).map_err(|_| InternalHostError::TypeConversion)?, + ); + + // Charge for amount as measured by serialized length + let bytes_count = topic_value.serialized_length() + + message_value.serialized_length() + + block_message_count_value.serialized_length(); + charge_gas_storage(&mut caller, bytes_count)?; + + caller.context_mut().tracking_copy.emit_message( + topic_key, + topic_value, + message_key, + message_value, + block_message_count_value, + message, + ); + + Ok(HOST_ERROR_SUCCESS) +} diff --git a/executor/wasm_host/src/lib.rs b/executor/wasm_host/src/lib.rs new file mode 100644 index 0000000000..5cefdf488e --- /dev/null +++ b/executor/wasm_host/src/lib.rs @@ -0,0 +1,5 @@ +//! Implementation of all host functions. +pub(crate) mod abi; +pub mod context; +pub mod host; +pub(crate) mod system; diff --git a/executor/wasm_host/src/system.rs b/executor/wasm_host/src/system.rs new file mode 100644 index 0000000000..4d086d7511 --- /dev/null +++ b/executor/wasm_host/src/system.rs @@ -0,0 +1,202 @@ +//! System contract dispatch. +//! +//! System contracts are special contracts that are always available to the system. +//! They are used to implement core system functionality, such as minting and transferring tokens. +//! This module provides a way to dispatch calls to system contracts that are implemented under +//! storage crate. +//! +//! The dispatcher provides the necessary information to properly execute system contract's code +//! within the context of the current execution of the new Wasm host logic. +use std::{cell::RefCell, rc::Rc, sync::Arc}; + +use casper_executor_wasm_common::error::{CallError, TrapCode}; +use casper_executor_wasm_interface::HostResult; +use casper_storage::{ + global_state::GlobalStateReader, + system::{ + mint::Mint, + runtime_native::{Config, Id, RuntimeNative}, + }, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError}, + AddressGenerator, TrackingCopy, +}; +use casper_types::{ + account::AccountHash, CLValueError, ContextAccessRights, EntityAddr, Key, Phase, + ProtocolVersion, PublicKey, SystemHashRegistry, TransactionHash, URef, U512, +}; +use parking_lot::RwLock; +use thiserror::Error; +use tracing::{debug, error}; + +#[derive(Debug, Error)] +enum DispatchError { + #[error("Tracking copy error: {0}")] + Storage(#[from] TrackingCopyError), + #[error("CLValue error: {0}")] + CLValue(CLValueError), + #[error("Registry not found")] + RegistryNotFound, + #[error("Missing addressable entity")] + MissingRuntimeFootprint(TrackingCopyError), + #[error("Missing system contract: {0}")] + MissingSystemContract(&'static str), +} + +fn dispatch_system_contract( + tracking_copy: &mut TrackingCopy, + transaction_hash: TransactionHash, + address_generator: Arc>, + system_contract: &'static str, + func: impl FnOnce(RuntimeNative) -> Ret, +) -> Result { + let system_entity_registry = { + let stored_value = tracking_copy + .read(&Key::SystemEntityRegistry)? + .ok_or(DispatchError::RegistryNotFound)?; + stored_value + .into_cl_value() + .expect("should convert stored value into CLValue") + .into_t::() + .map_err(DispatchError::CLValue)? + }; + let system_entity_addr = system_entity_registry + .get(system_contract) + .ok_or(DispatchError::MissingSystemContract(system_contract))?; + let entity_addr = EntityAddr::new_system(*system_entity_addr); + + let runtime_footprint = tracking_copy + .runtime_footprint_by_entity_addr(entity_addr) + .map_err(DispatchError::MissingRuntimeFootprint)?; + + let config = Config::default(); + let protocol_version = ProtocolVersion::V1_0_0; + + let access_rights = ContextAccessRights::new(*system_entity_addr, []); + let address = PublicKey::System.to_account_hash(); + + let forked_tracking_copy = Rc::new(RefCell::new(tracking_copy.fork2())); + + let remaining_spending_limit = U512::MAX; // NOTE: Since there's no custom payment, there's no need to track the remaining spending limit. + let phase = Phase::System; // NOTE: Since this is a system contract, the phase is always `System`. + + let ret = { + let runtime = RuntimeNative::new( + config, + protocol_version, + Id::Transaction(transaction_hash), + address_generator, + Rc::clone(&forked_tracking_copy), + address, + Key::AddressableEntity(entity_addr), + runtime_footprint, + access_rights, + remaining_spending_limit, + phase, + ); + + func(runtime) + }; + + // SAFETY: `RuntimeNative` is dropped in the block above, we can extract the tracking copy and + // the effects. + let modified_tracking_copy = Rc::try_unwrap(forked_tracking_copy) + .ok() + .expect("No other references"); + + let modified_tracking_copy = modified_tracking_copy.into_inner(); + + tracking_copy.apply_changes( + modified_tracking_copy.effects(), + modified_tracking_copy.cache(), + modified_tracking_copy.messages(), + ); + + Ok(ret) +} + +#[derive(Debug, Clone, Copy)] +pub(crate) struct MintArgs { + pub(crate) initial_balance: U512, +} + +pub(crate) fn mint_mint( + tracking_copy: &mut TrackingCopy, + transaction_hash: TransactionHash, + address_generator: Arc>, + args: MintArgs, +) -> Result { + let mint_result = match dispatch_system_contract( + tracking_copy, + transaction_hash, + address_generator, + "mint", + |mut runtime| runtime.mint(args.initial_balance), + ) { + Ok(mint_result) => mint_result, + Err(error) => { + error!(%error, ?args, "mint failed"); + panic!("Mint failed; aborting"); + } + }; + + match mint_result { + Ok(uref) => Ok(uref), + Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted), + Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted), + Err(mint_error) => { + error!(%mint_error, ?args, "mint transfer failed"); + Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)) + } + } +} + +#[derive(Debug, Copy, Clone)] +pub(crate) struct MintTransferArgs { + pub(crate) maybe_to: Option, + pub(crate) source: URef, + pub(crate) target: URef, + pub(crate) amount: U512, + pub(crate) id: Option, +} + +pub(crate) fn mint_transfer( + tracking_copy: &mut TrackingCopy, + id: TransactionHash, + address_generator: Arc>, + args: MintTransferArgs, +) -> HostResult { + let transfer_result: Result<(), casper_types::system::mint::Error> = + match dispatch_system_contract( + tracking_copy, + id, + address_generator, + "mint", + |mut runtime| { + runtime.transfer( + args.maybe_to, + args.source, + args.target, + args.amount, + args.id, + ) + }, + ) { + Ok(result) => result, + Err(error) => { + error!(%error, "mint transfer failed"); + return Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)); + } + }; + + debug!(?args, ?transfer_result, "transfer"); + + match transfer_result { + Ok(()) => Ok(()), + Err(casper_types::system::mint::Error::InsufficientFunds) => Err(CallError::CalleeReverted), + Err(casper_types::system::mint::Error::GasLimit) => Err(CallError::CalleeGasDepleted), + Err(mint_error) => { + error!(%mint_error, ?args, "mint transfer failed"); + Err(CallError::CalleeTrapped(TrapCode::UnreachableCodeReached)) + } + } +} diff --git a/executor/wasm_interface/Cargo.toml b/executor/wasm_interface/Cargo.toml new file mode 100644 index 0000000000..8888c68705 --- /dev/null +++ b/executor/wasm_interface/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "casper-executor-wasm-interface" +version = "0.1.3" +edition = "2021" +authors = ["Michał Papierski "] +description = "Casper executor interface package" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/executor/wasm_interface" +license = "Apache-2.0" + +[dependencies] +bytes = "1.10" +borsh = { version = "1.5", features = ["derive"] } +casper-executor-wasm-common = { version = "0.1.3", path = "../wasm_common" } +casper-storage = { version = "2.1.1", path = "../../storage" } +casper-types = { version = "6.0.1", path = "../../types" } +parking_lot = "0.12" +thiserror = "2" diff --git a/executor/wasm_interface/src/executor.rs b/executor/wasm_interface/src/executor.rs new file mode 100644 index 0000000000..1e608b48dd --- /dev/null +++ b/executor/wasm_interface/src/executor.rs @@ -0,0 +1,381 @@ +use std::sync::Arc; + +use borsh::BorshSerialize; +use bytes::Bytes; +use casper_storage::{ + global_state::{error::Error as GlobalStateError, GlobalStateReader}, + tracking_copy::TrackingCopyCache, + AddressGenerator, TrackingCopy, +}; +use casper_types::{ + account::AccountHash, contract_messages::Messages, execution::Effects, BlockHash, BlockTime, + Digest, HashAddr, Key, TransactionHash, +}; +use parking_lot::RwLock; +use thiserror::Error; + +use crate::{CallError, GasUsage, InternalHostError, WasmPreparationError}; + +/// Request to execute a Wasm contract. +pub struct ExecuteRequest { + /// Initiator's address. + pub initiator: AccountHash, + /// Caller's address key. + /// + /// Either a `[`Key::Account`]` or a `[`Key::AddressableEntity`]. + pub caller_key: Key, + /// Gas limit. + pub gas_limit: u64, + /// Target for execution. + pub execution_kind: ExecutionKind, + /// Input data. + pub input: Bytes, + /// Value transferred to the contract. + pub transferred_value: u64, + /// Transaction hash. + pub transaction_hash: TransactionHash, + /// Address generator. + /// + /// This can be either seeded and created as part of the builder or shared across chain of + /// execution requests. + pub address_generator: Arc>, + /// Chain name. + /// + /// This is very important ingredient for deriving contract hashes on the network. + pub chain_name: Arc, + /// Block time represented as a unix timestamp. + pub block_time: BlockTime, + /// State root hash of the global state in which the transaction will be executed. + pub state_hash: Digest, + /// Parent block hash. + pub parent_block_hash: BlockHash, + /// Block height. + pub block_height: u64, +} + +/// Builder for `ExecuteRequest`. +#[derive(Default)] +pub struct ExecuteRequestBuilder { + initiator: Option, + caller_key: Option, + gas_limit: Option, + target: Option, + input: Option, + value: Option, + transaction_hash: Option, + address_generator: Option>>, + chain_name: Option>, + block_time: Option, + state_hash: Option, + parent_block_hash: Option, + block_height: Option, +} + +impl ExecuteRequestBuilder { + /// Set the initiator's address. + #[must_use] + pub fn with_initiator(mut self, initiator: AccountHash) -> Self { + self.initiator = Some(initiator); + self + } + + /// Set the caller's key. + #[must_use] + pub fn with_caller_key(mut self, caller_key: Key) -> Self { + self.caller_key = Some(caller_key); + self + } + + /// Set the gas limit. + #[must_use] + pub fn with_gas_limit(mut self, gas_limit: u64) -> Self { + self.gas_limit = Some(gas_limit); + self + } + + /// Set the target for execution. + #[must_use] + pub fn with_target(mut self, target: ExecutionKind) -> Self { + self.target = Some(target); + self + } + + /// Pass input data. + #[must_use] + pub fn with_input(mut self, input: Bytes) -> Self { + self.input = Some(input); + self + } + + /// Pass input data that can be serialized. + #[must_use] + pub fn with_serialized_input(self, input: T) -> Self { + let input = borsh::to_vec(&input) + .map(Bytes::from) + .expect("should serialize input"); + self.with_input(input) + } + + /// Pass value to be sent to the contract. + #[must_use] + pub fn with_transferred_value(mut self, value: u64) -> Self { + self.value = Some(value); + self + } + + /// Set the transaction hash. + #[must_use] + pub fn with_transaction_hash(mut self, transaction_hash: TransactionHash) -> Self { + self.transaction_hash = Some(transaction_hash); + self + } + + /// Set the address generator. + /// + /// This can be either seeded and created as part of the builder or shared across chain of + /// execution requests. + #[must_use] + pub fn with_address_generator(mut self, address_generator: AddressGenerator) -> Self { + self.address_generator = Some(Arc::new(RwLock::new(address_generator))); + self + } + + /// Set the shared address generator. + /// + /// This is useful when the address generator is shared across a chain of multiple execution + /// requests. + #[must_use] + pub fn with_shared_address_generator( + mut self, + address_generator: Arc>, + ) -> Self { + self.address_generator = Some(address_generator); + self + } + + /// Set the chain name. + #[must_use] + pub fn with_chain_name>>(mut self, chain_name: T) -> Self { + self.chain_name = Some(chain_name.into()); + self + } + + /// Set the block time. + #[must_use] + pub fn with_block_time(mut self, block_time: BlockTime) -> Self { + self.block_time = Some(block_time); + self + } + + /// Set the state hash. + #[must_use] + pub fn with_state_hash(mut self, state_hash: Digest) -> Self { + self.state_hash = Some(state_hash); + self + } + + /// Set the parent block hash. + #[must_use] + pub fn with_parent_block_hash(mut self, parent_block_hash: BlockHash) -> Self { + self.parent_block_hash = Some(parent_block_hash); + self + } + + /// Set the block height. + #[must_use] + pub fn with_block_height(mut self, block_height: u64) -> Self { + self.block_height = Some(block_height); + self + } + + /// Build the `ExecuteRequest`. + pub fn build(self) -> Result { + let initiator = self.initiator.ok_or("Initiator is not set")?; + let caller_key = self.caller_key.ok_or("Caller is not set")?; + let gas_limit = self.gas_limit.ok_or("Gas limit is not set")?; + let execution_kind = self.target.ok_or("Target is not set")?; + let input = self.input.ok_or("Input is not set")?; + let transferred_value = self.value.ok_or("Value is not set")?; + let transaction_hash = self.transaction_hash.ok_or("Transaction hash is not set")?; + let address_generator = self + .address_generator + .ok_or("Address generator is not set")?; + let chain_name = self.chain_name.ok_or("Chain name is not set")?; + let block_time = self.block_time.ok_or("Block time is not set")?; + let state_hash = self.state_hash.ok_or("State hash is not set")?; + let parent_block_hash = self + .parent_block_hash + .ok_or("Parent block hash is not set")?; + let block_height = self.block_height.ok_or("Block height is not set")?; + Ok(ExecuteRequest { + initiator, + caller_key, + gas_limit, + execution_kind, + input, + transferred_value, + transaction_hash, + address_generator, + chain_name, + block_time, + state_hash, + parent_block_hash, + block_height, + }) + } +} + +/// Result of executing a Wasm contract. +#[derive(Debug)] +pub struct ExecuteResult { + /// Error while executing Wasm: traps, memory access errors, etc. + pub host_error: Option, + /// Output produced by the Wasm contract. + pub output: Option, + /// Gas usage. + pub gas_usage: GasUsage, + /// Effects produced by the execution. + pub effects: Effects, + /// Cache of tracking copy effects produced by the execution. + pub cache: TrackingCopyCache, + /// Messages produced by the execution. + pub messages: Messages, +} + +impl ExecuteResult { + /// Returns the host error. + pub fn effects(&self) -> &Effects { + &self.effects + } + + pub fn into_effects(self) -> Effects { + self.effects + } + + pub fn host_error(&self) -> Option<&CallError> { + self.host_error.as_ref() + } + + pub fn output(&self) -> Option<&Bytes> { + self.output.as_ref() + } + + pub fn gas_usage(&self) -> &GasUsage { + &self.gas_usage + } +} + +/// Result of executing a Wasm contract on a state provider. +#[derive(Debug)] +pub struct ExecuteWithProviderResult { + /// Error while executing Wasm: traps, memory access errors, etc. + pub host_error: Option, + /// Output produced by the Wasm contract. + output: Option, + /// Gas usage. + gas_usage: GasUsage, + /// Effects produced by the execution. + effects: Effects, + /// Post state hash. + post_state_hash: Digest, + /// Messages produced by the execution. + messages: Messages, +} + +impl ExecuteWithProviderResult { + #[must_use] + pub fn new( + host_error: Option, + output: Option, + gas_usage: GasUsage, + effects: Effects, + post_state_hash: Digest, + messages: Messages, + ) -> Self { + Self { + host_error, + output, + gas_usage, + effects, + post_state_hash, + messages, + } + } + + pub fn output(&self) -> Option<&Bytes> { + self.output.as_ref() + } + + pub fn gas_usage(&self) -> &GasUsage { + &self.gas_usage + } + + pub fn effects(&self) -> &Effects { + &self.effects + } + + #[must_use] + pub fn post_state_hash(&self) -> Digest { + self.post_state_hash + } + + pub fn messages(&self) -> &Messages { + &self.messages + } +} + +/// Target for Wasm execution. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExecutionKind { + /// Execute Wasm bytes directly. + SessionBytes(Bytes), + /// Execute a stored contract by its address. + Stored { + /// Address of the contract. + address: HashAddr, + /// Entry point to call. + entry_point: String, + }, +} + +/// Error that can occur during execution, before the Wasm virtual machine is involved. +/// +/// This error is returned by the `execute` function. It contains information about the error that +/// occurred. +#[derive(Debug, Error)] +pub enum ExecuteError { + /// Error while preparing Wasm instance: export not found, validation, compilation errors, etc. + /// + /// No wasm was executed at this point. + #[error("Wasm error error: {0}")] + WasmPreparation(#[from] WasmPreparationError), + /// Error while executing Wasm: traps, memory access errors, etc. + #[error("Internal host error: {0}")] + InternalHost(#[from] InternalHostError), + #[error("Code not found")] + CodeNotFound(HashAddr), +} + +#[derive(Debug, Error)] +pub enum ExecuteWithProviderError { + /// Error while accessing global state. + #[error("Global state error: {0}")] + GlobalState(#[from] GlobalStateError), + #[error(transparent)] + Execute(#[from] ExecuteError), +} + +/// Executor trait. +/// +/// An executor is responsible for executing Wasm contracts. This implies that the executor is able +/// to prepare Wasm instances, execute them, and handle errors that occur during execution. +/// +/// Trait bounds also implying that the executor has to support interior mutability, as it may need +/// to update its internal state during execution of a single or a chain of multiple contracts. +pub trait Executor: Clone + Send { + fn execute( + &self, + tracking_copy: TrackingCopy, + execute_request: ExecuteRequest, + ) -> Result; +} diff --git a/executor/wasm_interface/src/lib.rs b/executor/wasm_interface/src/lib.rs new file mode 100644 index 0000000000..cfe83f3130 --- /dev/null +++ b/executor/wasm_interface/src/lib.rs @@ -0,0 +1,300 @@ +pub mod executor; + +use bytes::Bytes; +use executor::ExecuteError; +use thiserror::Error; + +use casper_executor_wasm_common::{ + error::{CallError, TrapCode, CALLEE_SUCCEEDED}, + flags::ReturnFlags, +}; + +/// Interface version for the Wasm host functions. +/// +/// This defines behavior of the Wasm execution environment i.e. the host behavior, serialiation, +/// etc. +/// +/// Only the highest `interface_version_X` is taken from the imports table which means Wasm has to +/// support X-1, X-2 versions as well. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct InterfaceVersion(u32); + +impl From for InterfaceVersion { + fn from(value: u32) -> Self { + InterfaceVersion(value) + } +} + +pub type HostResult = Result<(), CallError>; + +/// Converts a host result into a u32. +#[must_use] +pub fn u32_from_host_result(result: HostResult) -> u32 { + match result { + Ok(()) => CALLEE_SUCCEEDED, + Err(host_error) => host_error.into_u32(), + } +} + +/// Errors that can occur when resolving imports. +#[derive(Debug, Error)] +pub enum Resolver { + #[error("export {name} not found.")] + Export { name: String }, + /// Trying to call a function pointer by index. + #[error("function pointer {index} not found.")] + Table { index: u32 }, +} + +#[derive(Error, Debug)] +pub enum ExportError { + /// An error than occurs when the exported type and the expected type + /// are incompatible. + #[error("incompatible type")] + IncompatibleType, + /// This error arises when an export is missing + #[error("missing export {0}")] + Missing(String), +} + +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum MemoryError { + /// Memory access is outside heap bounds. + #[error("memory access out of bounds")] + HeapOutOfBounds, + /// Address calculation overflow. + #[error("address calculation overflow")] + Overflow, + /// String is not valid UTF-8. + #[error("string is not valid utf-8")] + NonUtf8String, +} + +#[derive(Error, Debug)] +/// Represents a catastrophic internal host error. +pub enum InternalHostError { + #[error("type conversion failure")] + TypeConversion, + #[error("contract already exists")] + ContractAlreadyExists, + #[error("tracking copy error")] + TrackingCopy, + #[error("failed building execution request")] + ExecuteRequestBuildFailure, + #[error("unexpected entity kind")] + UnexpectedEntityKind, + #[error("failed reading total balance")] + TotalBalanceReadFailure, + #[error("total balance exceeded u64::MAX")] + TotalBalanceOverflow, + #[error("remaining gas exceeded the gas limit")] + RemainingGasExceedsGasLimit, + #[error("account not found under key")] + AccountRecordNotFound, + #[error("message did not have a checksum")] + MessageChecksumMissing, +} + +/// The outcome of a call. +/// We can fold all errors into this type and return it from the host functions and remove Outcome +/// type. +#[derive(Debug, Error)] +pub enum VMError { + #[error("Return 0x{flags:?} {data:?}")] + Return { + flags: ReturnFlags, + data: Option, + }, + #[error("export: {0}")] + Export(ExportError), + #[error("Out of gas")] + OutOfGas, + /// Error while executing Wasm: traps, memory access errors, etc. + /// + /// NOTE: for supporting multiple different backends we may want to abstract this a bit and + /// extract memory access errors, trap codes, and unify error reporting. + #[error("Trap: {0}")] + Trap(TrapCode), + #[error("Internal host error")] + Internal(#[from] InternalHostError), + #[error("Execute error: {0}")] + Execute(#[from] ExecuteError), +} + +impl VMError { + /// Returns the output data if the error is a `Return` error. + pub fn into_output_data(self) -> Option { + match self { + VMError::Return { data, .. } => data, + _ => None, + } + } +} + +/// Result of a VM operation. +pub type VMResult = Result; + +/// Configuration for the Wasm engine. +#[derive(Clone, Debug)] +pub struct Config { + gas_limit: u64, + memory_limit: u32, +} + +impl Config { + #[must_use] + pub fn gas_limit(&self) -> u64 { + self.gas_limit + } + + #[must_use] + pub fn memory_limit(&self) -> u32 { + self.memory_limit + } +} + +/// Configuration for the Wasm engine. +#[derive(Clone, Debug, Default)] +pub struct ConfigBuilder { + gas_limit: Option, + /// Memory limit in pages. + memory_limit: Option, +} + +impl ConfigBuilder { + /// Create a new configuration builder. + #[must_use] + pub fn new() -> Self { + Self::default() + } + + /// Gas limit in units. + #[must_use] + pub fn with_gas_limit(mut self, gas_limit: u64) -> Self { + self.gas_limit = Some(gas_limit); + self + } + + /// Memory limit denominated in pages. + #[must_use] + pub fn with_memory_limit(mut self, memory_limit: u32) -> Self { + self.memory_limit = Some(memory_limit); + self + } + + /// Build the configuration. + #[must_use] + pub fn build(self) -> Config { + let gas_limit = self.gas_limit.expect("Required field missing: gas_limit"); + let memory_limit = self + .memory_limit + .expect("Required field missing: memory_limit"); + Config { + gas_limit, + memory_limit, + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum MeteringPoints { + Remaining(u64), + Exhausted, +} + +impl MeteringPoints { + pub fn try_into_remaining(self) -> Result { + if let Self::Remaining(v) = self { + Ok(v) + } else { + Err(self) + } + } +} + +/// An abstraction over the 'caller' object of a host function that works for any Wasm VM. +/// +/// This allows access for important instances such as the context object that was passed to the +/// instance, wasm linear memory access, etc. +pub trait Caller { + type Context; + + fn context(&self) -> &Self::Context; + fn context_mut(&mut self) -> &mut Self::Context; + /// Returns currently running *unmodified* bytecode. + fn bytecode(&self) -> Bytes; + + /// Check if an export is present in the module. + fn has_export(&self, name: &str) -> bool; + + fn memory_read(&self, offset: u32, size: usize) -> VMResult> { + let mut vec = vec![0; size]; + self.memory_read_into(offset, &mut vec)?; + Ok(vec) + } + fn memory_read_into(&self, offset: u32, output: &mut [u8]) -> VMResult<()>; + fn memory_write(&self, offset: u32, data: &[u8]) -> VMResult<()>; + /// Allocates memory inside the Wasm VM by calling an export. + /// + /// Error is a type-erased error coming from the VM itself. + fn alloc(&mut self, idx: u32, size: usize, ctx: u32) -> VMResult; + /// Returns the amount of gas used. + fn gas_consumed(&mut self) -> MeteringPoints; + /// Set the amount of gas used. + fn consume_gas(&mut self, value: u64) -> VMResult<()>; +} + +#[derive(Debug, Error)] +pub enum WasmPreparationError { + #[error("Missing export {0}")] + MissingExport(String), + #[error("Compile error: {0}")] + Compile(String), + #[error("Memory instantiation error: {0}")] + Memory(String), + #[error("Instantiation error: {0}")] + Instantiation(String), +} + +#[derive(Debug)] +pub struct GasUsage { + /// The amount of gas used by the execution. + gas_limit: u64, + /// The amount of gas remaining after the execution. + remaining_points: u64, +} + +impl GasUsage { + #[must_use] + pub fn new(gas_limit: u64, remaining_points: u64) -> Self { + GasUsage { + gas_limit, + remaining_points, + } + } + + #[must_use] + pub fn gas_spent(&self) -> u64 { + debug_assert!(self.remaining_points <= self.gas_limit); + self.gas_limit - self.remaining_points + } + + #[must_use] + pub fn gas_limit(&self) -> u64 { + self.gas_limit + } + + #[must_use] + pub fn remaining_points(&self) -> u64 { + self.remaining_points + } +} + +/// A trait that represents a Wasm instance. +pub trait WasmInstance { + type Context; + + fn call_export(&mut self, name: &str) -> (Result<(), VMError>, GasUsage); + fn teardown(self) -> Self::Context; +} diff --git a/executor/wasmer_backend/Cargo.toml b/executor/wasmer_backend/Cargo.toml new file mode 100644 index 0000000000..1213a8beb7 --- /dev/null +++ b/executor/wasmer_backend/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "casper-executor-wasmer-backend" +version = "0.1.3" +edition = "2021" +authors = ["Michał Papierski "] +description = "Casper executor interface package" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/executor/wasm_interface" +license = "Apache-2.0" + +[dependencies] +bytes = "1.10" +casper-executor-wasm-common = { version = "0.1.3", path = "../wasm_common" } +casper-executor-wasm-interface = { version = "0.1.3", path = "../wasm_interface" } +casper-executor-wasm-host = { version = "0.1.0", path = "../wasm_host" } +casper-storage = { version = "2.1.1", path = "../../storage" } +casper-contract-sdk-sys = { version = "0.1.3", path = "../../smart_contracts/sdk_sys" } +casper-types = { version = "6.0.1", path = "../../types" } +regex = "1.11" +wasmer = { version = "5.0.4", default-features = false, features = [ + "singlepass", +] } +wasmer-compiler-singlepass = "5.0.4" +wasmer-middlewares = "5.0.4" +wasmer-types = "5.0.4" +tracing = "0.1.41" + +[dev-dependencies] +wat = "1.227.1" diff --git a/executor/wasmer_backend/src/imports.rs b/executor/wasmer_backend/src/imports.rs new file mode 100644 index 0000000000..27d2b05a72 --- /dev/null +++ b/executor/wasmer_backend/src/imports.rs @@ -0,0 +1,98 @@ +use casper_executor_wasm_interface::{executor::Executor, VMError, VMResult}; +use casper_storage::global_state::GlobalStateReader; +use tracing::warn; +use wasmer::{FunctionEnv, FunctionEnvMut, Imports, Store}; + +use casper_contract_sdk_sys::for_each_host_function; + +use crate::WasmerEnv; + +/// A trait for converting a C ABI type declaration to a type that is understandable by wasm32 +/// target (and wasmer, by a consequence). +#[allow(dead_code)] +pub(crate) trait WasmerConvert: Sized { + type Output; +} + +impl WasmerConvert for i32 { + type Output = i32; +} + +impl WasmerConvert for u32 { + type Output = u32; +} +impl WasmerConvert for u64 { + type Output = u64; +} + +impl WasmerConvert for usize { + type Output = u32; +} + +impl WasmerConvert for *const T { + type Output = u32; // Pointers are 32-bit addressable +} + +impl WasmerConvert for *mut T { + type Output = u32; // Pointers are 32-bit addressable +} + +impl WasmerConvert + for extern "C" fn(Arg1, Arg2) -> Ret +{ + type Output = u32; // Function pointers are 32-bit addressable +} + +const DEFAULT_ENV_NAME: &str = "env"; + +/// This function will populate imports object with all host functions that are defined. +pub(crate) fn generate_casper_imports( + store: &mut Store, + env: &FunctionEnv>, +) -> Imports { + let mut imports = Imports::new(); + + macro_rules! visit_host_function { + (@convert_ret $ret:ty) => { + <$ret as $crate::imports::WasmerConvert>::Output + }; + (@convert_ret) => { () }; + ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => { + $( + imports.define($crate::imports::DEFAULT_ENV_NAME, stringify!($name), wasmer::Function::new_typed_with_env( + store, + env, + | + env: FunctionEnvMut>, + // List all types and statically mapped C types into wasm types + $($($arg: <$argty as $crate::imports::WasmerConvert>::Output,)*)? + | -> VMResult { + let wasmer_caller = $crate::WasmerCaller { env }; + + // Dispatch to the actual host function. This also ensures that the return type of host function impl has expected type. + let result: VMResult< visit_host_function!(@convert_ret $($ret)?) > = casper_executor_wasm_host::host::$name(wasmer_caller, $($($arg,)*)?); + + match result { + Ok(ret) => Ok(ret), + Err(error) => { + warn!( + "Host function {} failed with error: {error:?}", + stringify!($name), + ); + + if let VMError::Internal(internal) = error { + panic!("InternalHostError {internal:?}; aborting"); + } + + Err(error) + } + } + } + )); + )* + } + } + for_each_host_function!(visit_host_function); + + imports +} diff --git a/executor/wasmer_backend/src/lib.rs b/executor/wasmer_backend/src/lib.rs new file mode 100644 index 0000000000..384ecc5399 --- /dev/null +++ b/executor/wasmer_backend/src/lib.rs @@ -0,0 +1,450 @@ +pub(crate) mod imports; +pub(crate) mod middleware; + +use std::{ + collections::BinaryHeap, + sync::{Arc, LazyLock, Weak}, +}; + +use bytes::Bytes; +use casper_executor_wasm_common::error::TrapCode; +use casper_executor_wasm_host::context::Context; +use casper_executor_wasm_interface::{ + executor::Executor, Caller, Config, ExportError, GasUsage, InterfaceVersion, MeteringPoints, + VMError, VMResult, WasmInstance, WasmPreparationError, +}; +use casper_storage::global_state::GlobalStateReader; +use middleware::{ + gas_metering, + gatekeeper::{Gatekeeper, GatekeeperConfig}, +}; +use regex::Regex; +use wasmer::{ + AsStoreMut, AsStoreRef, CompilerConfig, Engine, Function, FunctionEnv, FunctionEnvMut, + Instance, Memory, MemoryView, Module, RuntimeError, Store, StoreMut, Table, TypedFunction, +}; +use wasmer_compiler_singlepass::Singlepass; +use wasmer_middlewares::metering; + +fn from_wasmer_memory_access_error(error: wasmer::MemoryAccessError) -> VMError { + let trap_code = match error { + wasmer::MemoryAccessError::HeapOutOfBounds | wasmer::MemoryAccessError::Overflow => { + // As according to Wasm spec section `Memory Instructions` any access to memory that + // is out of bounds of the memory's current size is a trap. Reference: https://webassembly.github.io/spec/core/syntax/instructions.html#memory-instructions + TrapCode::MemoryOutOfBounds + } + wasmer::MemoryAccessError::NonUtf8String => { + // This can happen only when using wasmer's utf8 reading routines which we don't + // need. + unreachable!("NonUtf8String") + } + _ => { + // All errors are handled and converted to a trap code, but we have to add this as + // wasmer's errors are #[non_exhaustive] + unreachable!("Unexpected error: {error:?}") + } + }; + VMError::Trap(trap_code) +} + +fn from_wasmer_trap_code(value: wasmer_types::TrapCode) -> TrapCode { + match value { + wasmer_types::TrapCode::StackOverflow => TrapCode::StackOverflow, + wasmer_types::TrapCode::HeapAccessOutOfBounds => TrapCode::MemoryOutOfBounds, + wasmer_types::TrapCode::HeapMisaligned => { + unreachable!("Atomic operations are not supported") + } + wasmer_types::TrapCode::TableAccessOutOfBounds => TrapCode::TableAccessOutOfBounds, + wasmer_types::TrapCode::IndirectCallToNull => TrapCode::IndirectCallToNull, + wasmer_types::TrapCode::BadSignature => TrapCode::BadSignature, + wasmer_types::TrapCode::IntegerOverflow => TrapCode::IntegerOverflow, + wasmer_types::TrapCode::IntegerDivisionByZero => TrapCode::IntegerDivisionByZero, + wasmer_types::TrapCode::BadConversionToInteger => TrapCode::BadConversionToInteger, + wasmer_types::TrapCode::UnreachableCodeReached => TrapCode::UnreachableCodeReached, + wasmer_types::TrapCode::UnalignedAtomic => { + todo!("Atomic memory extension is not supported") + } + } +} + +fn from_wasmer_export_error(error: wasmer::ExportError) -> VMError { + let export_error = match error { + wasmer::ExportError::IncompatibleType => ExportError::IncompatibleType, + wasmer::ExportError::Missing(export_name) => ExportError::Missing(export_name), + }; + VMError::Export(export_error) +} + +#[derive(Default)] +pub struct WasmerEngine(()); + +impl WasmerEngine { + pub fn new() -> Self { + Self::default() + } + + pub fn instantiate, S: GlobalStateReader + 'static, E: Executor + 'static>( + &self, + wasm_bytes: T, + context: Context, + config: Config, + ) -> Result>, WasmPreparationError> { + WasmerInstance::from_wasm_bytes(wasm_bytes, context, config) + } +} + +struct WasmerEnv { + context: Context, + instance: Weak, + bytecode: Bytes, + exported_runtime: Option, + interface_version: InterfaceVersion, +} + +pub(crate) struct WasmerCaller<'a, S: GlobalStateReader, E: Executor> { + env: FunctionEnvMut<'a, WasmerEnv>, +} + +impl WasmerCaller<'_, S, E> { + fn with_memory(&self, f: impl FnOnce(MemoryView<'_>) -> T) -> T { + let mem = &self.env.data().exported_runtime().memory; + let binding = self.env.as_store_ref(); + let view = mem.view(&binding); + f(view) + } + + fn with_instance(&self, f: impl FnOnce(&Instance) -> Ret) -> Ret { + let instance = self.env.data().instance.upgrade().expect("Valid instance"); + f(&instance) + } + + fn with_store_and_instance(&mut self, f: impl FnOnce(StoreMut, &Instance) -> Ret) -> Ret { + let (data, store) = self.env.data_and_store_mut(); + let instance = data.instance.upgrade().expect("Valid instance"); + f(store, &instance) + } + + /// Returns the amount of gas used. + fn get_remaining_points(&mut self) -> MeteringPoints { + self.with_store_and_instance(|mut store, instance| { + let metering_points = metering::get_remaining_points(&mut store, instance); + match metering_points { + metering::MeteringPoints::Remaining(points) => MeteringPoints::Remaining(points), + metering::MeteringPoints::Exhausted => MeteringPoints::Exhausted, + } + }) + } + /// Set the amount of gas used. + fn set_remaining_points(&mut self, new_value: u64) { + self.with_store_and_instance(|mut store, instance| { + metering::set_remaining_points(&mut store, instance, new_value); + }) + } +} + +impl Caller for WasmerCaller<'_, S, E> { + type Context = Context; + + fn memory_write(&self, offset: u32, data: &[u8]) -> Result<(), VMError> { + self.with_memory(|mem| mem.write(offset.into(), data)) + .map_err(from_wasmer_memory_access_error) + } + + fn context(&self) -> &Context { + &self.env.data().context + } + + fn context_mut(&mut self) -> &mut Context { + &mut self.env.data_mut().context + } + + fn memory_read_into(&self, offset: u32, output: &mut [u8]) -> Result<(), VMError> { + self.with_memory(|mem| mem.read(offset.into(), output)) + .map_err(from_wasmer_memory_access_error) + } + + fn alloc(&mut self, idx: u32, size: usize, ctx: u32) -> VMResult { + let _interface_version = self.env.data().interface_version; + + let (data, mut store) = self.env.data_and_store_mut(); + let value = data + .exported_runtime() + .exported_table + .as_ref() + .expect("should have table exported") // TODO: if theres no table then no function pointer is stored in the wasm blob - + // probably safe + .get(&mut store.as_store_mut(), idx) + .expect("has entry in the table"); // TODO: better error handling - pass 0 as nullptr? + let funcref = value.funcref().expect("is funcref"); + let valid_funcref = funcref.as_ref().expect("valid funcref"); + let alloc_callback: TypedFunction<(u32, u32), u32> = valid_funcref + .typed(&store) + .unwrap_or_else(|error| panic!("{error:?}")); + let ptr = alloc_callback + .call(&mut store.as_store_mut(), size.try_into().unwrap(), ctx) + .map_err(handle_wasmer_runtime_error)?; + Ok(ptr) + } + + fn bytecode(&self) -> Bytes { + self.env.data().bytecode.clone() + } + + /// Returns the amount of gas used. + #[inline] + fn gas_consumed(&mut self) -> MeteringPoints { + self.get_remaining_points() + } + + /// Set the amount of gas used. + /// + /// This method will cause the VM engine to stop in case remaining gas points are depleted. + fn consume_gas(&mut self, amount: u64) -> VMResult<()> { + let gas_consumed = self.gas_consumed(); + match gas_consumed { + MeteringPoints::Remaining(remaining_points) => { + let remaining_points = remaining_points + .checked_sub(amount) + .ok_or(VMError::OutOfGas)?; + self.set_remaining_points(remaining_points); + Ok(()) + } + MeteringPoints::Exhausted => Err(VMError::OutOfGas), + } + } + + #[inline] + fn has_export(&self, name: &str) -> bool { + self.with_instance(|instance| instance.exports.contains(name)) + } +} + +impl WasmerEnv { + fn new(context: Context, code: Bytes, interface_version: InterfaceVersion) -> Self { + Self { + context, + instance: Weak::new(), + exported_runtime: None, + bytecode: code, + interface_version, + } + } + pub(crate) fn exported_runtime(&self) -> &ExportedRuntime { + self.exported_runtime + .as_ref() + .expect("Valid instance of exported runtime") + } +} + +/// Container for Wasm-provided exports such as alloc, dealloc, etc. +/// +/// Let's call it a "minimal runtime" that is expected to exist inside a Wasm. +#[derive(Clone)] +pub(crate) struct ExportedRuntime { + pub(crate) memory: Memory, + pub(crate) exported_table: Option, +} + +pub(crate) struct WasmerInstance { + instance: Arc, + env: FunctionEnv>, + store: Store, + config: Config, +} + +fn handle_wasmer_runtime_error(error: RuntimeError) -> VMError { + match error.downcast::() { + Ok(vm_error) => vm_error, + Err(wasmer_runtime_error) => { + // NOTE: Can this be other variant than VMError and trap? This may indicate a bug in + // our code. + let wasmer_trap_code = wasmer_runtime_error.to_trap().expect("Trap code"); + VMError::Trap(from_wasmer_trap_code(wasmer_trap_code)) + } + } +} + +impl WasmerInstance +where + S: GlobalStateReader + 'static, + E: Executor + 'static, +{ + pub(crate) fn call_export(&mut self, name: &str) -> Result<(), VMError> { + let exported_call_func: TypedFunction<(), ()> = self + .instance + .exports + .get_typed_function(&self.store, name) + .map_err(from_wasmer_export_error)?; + + exported_call_func + .call(&mut self.store.as_store_mut()) + .map_err(handle_wasmer_runtime_error)?; + Ok(()) + } + + pub(crate) fn from_wasm_bytes>( + wasm_bytes: C, + context: Context, + config: Config, + ) -> Result { + let engine = { + let mut singlepass_compiler = Singlepass::new(); + let gatekeeper_config = GatekeeperConfig::default(); + singlepass_compiler.push_middleware(Arc::new(Gatekeeper::new(gatekeeper_config))); + singlepass_compiler + .push_middleware(gas_metering::gas_metering_middleware(config.gas_limit())); + singlepass_compiler + }; + + let engine = Engine::from(engine); + + let wasm_bytes: Bytes = wasm_bytes.into(); + + let module = Module::new(&engine, &wasm_bytes) + .map_err(|error| WasmPreparationError::Compile(error.to_string()))?; + + let mut store = Store::new(engine); + + let wasmer_env = WasmerEnv::new(context, wasm_bytes, InterfaceVersion::from(1u32)); + let function_env = FunctionEnv::new(&mut store, wasmer_env); + + let memory = Memory::new( + &mut store, + wasmer_types::MemoryType { + minimum: wasmer_types::Pages(17), + maximum: None, + shared: false, + }, + ) + .map_err(|error| WasmPreparationError::Memory(error.to_string()))?; + + let imports = { + let mut imports = imports::generate_casper_imports(&mut store, &function_env); + + imports.define("env", "memory", memory.clone()); + + imports.define( + "env", + "interface_version_1", + Function::new_typed(&mut store, || {}), + ); + + imports + }; + + // TODO: Deal with "start" section that executes actual Wasm - test, measure gas, etc. -> + // Instance::new may fail with RuntimError + + let instance = { + let instance = Instance::new(&mut store, &module, &imports) + .map_err(|error| WasmPreparationError::Instantiation(error.to_string()))?; + + // We don't necessarily need atomic counter. Arc's purpose is to be able to retrieve a + // Weak reference to the instance to be able to invoke recursive calls to the wasm + // itself from within a host function implementation. + + // instance.exports.get_table(name) + Arc::new(instance) + }; + + let interface_version = { + static RE: LazyLock = + LazyLock::new(|| Regex::new(r"^interface_version_(?P\d+)$").unwrap()); + + let mut interface_versions = BinaryHeap::new(); + for import in module.imports() { + if import.module() == "env" { + if let Some(caps) = RE.captures(import.name()) { + let version = &caps["version"]; + let version: u32 = version.parse().expect("valid number"); // SAFETY: regex guarantees this is a number, and imports table guarantees + // limited set of values. + interface_versions.push(InterfaceVersion::from(version)); + } + } + } + + // Get the highest one assuming given Wasm can support all previous interface versions. + interface_versions.pop() + }; + + // TODO: get first export of type table as some compilers generate different names (i.e. + // rust __indirect_function_table, assemblyscript `table` etc). There's only one table + // allowed in a valid module. + let table = match instance.exports.get_table("__indirect_function_table") { + Ok(table) => Some(table.clone()), + Err(error @ wasmer::ExportError::IncompatibleType) => { + return Err(WasmPreparationError::MissingExport(error.to_string())) + } + Err(wasmer::ExportError::Missing(_)) => None, + }; + + { + let function_env_mut = function_env.as_mut(&mut store); + function_env_mut.instance = Arc::downgrade(&instance); + function_env_mut.exported_runtime = Some(ExportedRuntime { + memory, + exported_table: table, + }); + if let Some(interface_version) = interface_version { + function_env_mut.interface_version = interface_version; + } + } + + Ok(Self { + instance, + env: function_env, + store, + config, + }) + } +} + +impl WasmInstance for WasmerInstance +where + S: GlobalStateReader + 'static, + E: Executor + 'static, +{ + type Context = Context; + fn call_export(&mut self, name: &str) -> (Result<(), VMError>, GasUsage) { + let vm_result = self.call_export(name); + let remaining_points = metering::get_remaining_points(&mut self.store, &self.instance); + match remaining_points { + metering::MeteringPoints::Remaining(remaining_points) => { + let gas_usage = GasUsage::new(self.config.gas_limit(), remaining_points); + (vm_result, gas_usage) + } + metering::MeteringPoints::Exhausted => { + let gas_usage = GasUsage::new(self.config.gas_limit(), 0); + (Err(VMError::OutOfGas), gas_usage) + } + } + } + + /// Consume instance object and retrieve the [`Context`] object. + fn teardown(self) -> Context { + let WasmerInstance { env, mut store, .. } = self; + + let mut env_mut = env.into_mut(&mut store); + + let data = env_mut.data_mut(); + + // NOTE: There must be a better way than re-creating the object based on consumed fields. + + Context { + initiator: data.context.initiator, + caller: data.context.caller, + callee: data.context.callee, + config: data.context.config, + storage_costs: data.context.storage_costs, + transferred_value: data.context.transferred_value, + tracking_copy: data.context.tracking_copy.fork2(), + executor: data.context.executor.clone(), + transaction_hash: data.context.transaction_hash, + address_generator: Arc::clone(&data.context.address_generator), + chain_name: data.context.chain_name.clone(), + input: data.context.input.clone(), + block_time: data.context.block_time, + message_limits: data.context.message_limits, + } + } +} diff --git a/executor/wasmer_backend/src/middleware.rs b/executor/wasmer_backend/src/middleware.rs new file mode 100644 index 0000000000..95e3fc4852 --- /dev/null +++ b/executor/wasmer_backend/src/middleware.rs @@ -0,0 +1,2 @@ +pub(crate) mod gas_metering; +pub(crate) mod gatekeeper; diff --git a/executor/wasmer_backend/src/middleware/gas_metering.rs b/executor/wasmer_backend/src/middleware/gas_metering.rs new file mode 100644 index 0000000000..414e4b1b76 --- /dev/null +++ b/executor/wasmer_backend/src/middleware/gas_metering.rs @@ -0,0 +1,636 @@ +use std::sync::Arc; + +use wasmer::{wasmparser::Operator, ModuleMiddleware}; +use wasmer_middlewares::Metering; + +/// Calculated based on the benchmark results and fitted for approx ~1000 CSPR of computation and +/// 16s maximum computation time. +const MULTIPLIER: u64 = 16; + +/// The scaling factor for the cost function is used to saturate the computation time for the +/// maximum limits allocated. Multiplier derived from benchmarks itself is not accurate enough due +/// to non-linear overhead of a gas metering on real world code. Fixed scaling factor is used +/// to adjust the multiplier to counter the effects of metering overhead. This is validated with +/// real world compute-intensive Wasm. +const SCALING_FACTOR: u64 = 2; + +fn cycles(operator: &Operator) -> u64 { + match operator { + Operator::I32Const { .. } => 1, + Operator::I64Const { .. } => 1, + Operator::F32Const { .. } => 1, + Operator::F64Const { .. } => 1, + Operator::I32Clz => 1, + Operator::I32Ctz => 1, + Operator::I32Popcnt => 1, + Operator::I64Clz => 1, + Operator::I64Ctz => 1, + Operator::I64Popcnt => 1, + Operator::F32Abs => 1, + Operator::F32Neg => 1, + Operator::F64Abs => 2, + Operator::F64Neg => 1, + Operator::F32Ceil => 4, + Operator::F32Floor => 4, + Operator::F32Trunc => 3, + Operator::F32Nearest => 3, + Operator::F64Ceil => 4, + Operator::F64Floor => 4, + Operator::F64Trunc => 4, + Operator::F64Nearest => 4, + Operator::F32Sqrt => 4, + Operator::F64Sqrt => 8, + Operator::I32Add => 1, + Operator::I32Sub => 1, + Operator::I32Mul => 1, + Operator::I32And => 1, + Operator::I32Or => 1, + Operator::I32Xor => 1, + Operator::I32Shl => 1, + Operator::I32ShrS => 1, + Operator::I32ShrU => 1, + Operator::I32Rotl => 1, + Operator::I32Rotr => 1, + Operator::I64Add => 1, + Operator::I64Sub => 1, + Operator::I64Mul => 1, + Operator::I64And => 1, + Operator::I64Or => 1, + Operator::I64Xor => 1, + Operator::I64Shl => 1, + Operator::I64ShrS => 1, + Operator::I64ShrU => 1, + Operator::I64Rotl => 1, + Operator::I64Rotr => 1, + Operator::I32DivS => 18, + Operator::I32DivU => 18, + Operator::I32RemS => 19, + Operator::I32RemU => 19, + Operator::I64DivS => 19, + Operator::I64DivU => 18, + Operator::I64RemS => 18, + Operator::I64RemU => 18, + Operator::F32Add => 3, + Operator::F32Sub => 4, + Operator::F32Mul => 3, + Operator::F64Add => 4, + Operator::F64Sub => 4, + Operator::F64Mul => 4, + Operator::F32Div => 5, + Operator::F64Div => 4, + Operator::F32Min => 24, + Operator::F32Max => 21, + Operator::F64Min => 24, + Operator::F64Max => 23, + Operator::F32Copysign => 2, + Operator::F64Copysign => 4, + Operator::I32Eqz => 1, + Operator::I64Eqz => 2, + Operator::I32Eq => 1, + Operator::I32Ne => 1, + Operator::I32LtS => 1, + Operator::I32LtU => 2, + Operator::I32GtS => 1, + Operator::I32GtU => 2, + Operator::I32LeS => 2, + Operator::I32LeU => 1, + Operator::I32GeS => 1, + Operator::I32GeU => 1, + Operator::I64Eq => 1, + Operator::I64Ne => 2, + Operator::I64LtS => 1, + Operator::I64LtU => 1, + Operator::I64GtS => 1, + Operator::I64GtU => 2, + Operator::I64LeS => 1, + Operator::I64LeU => 1, + Operator::I64GeS => 2, + Operator::I64GeU => 1, + Operator::F32Eq => 2, + Operator::F32Ne => 2, + Operator::F64Eq => 2, + Operator::F64Ne => 2, + Operator::F32Lt => 2, + Operator::F32Gt => 2, + Operator::F32Le => 2, + Operator::F32Ge => 2, + Operator::F64Lt => 2, + Operator::F64Gt => 2, + Operator::F64Le => 2, + Operator::F64Ge => 2, + Operator::I32Extend8S => 1, + Operator::I32Extend16S => 1, + Operator::I64Extend8S => 1, + Operator::I64Extend16S => 1, + Operator::F32ConvertI32S => 2, + Operator::F32ConvertI64S => 2, + Operator::F64ConvertI32S => 2, + Operator::F64ConvertI64S => 2, + Operator::I64Extend32S => 1, + Operator::I32WrapI64 => 1, + Operator::I64ExtendI32S => 1, + Operator::I64ExtendI32U => 1, + Operator::F32DemoteF64 => 1, + Operator::F64PromoteF32 => 2, + Operator::F32ReinterpretI32 => 1, + Operator::F64ReinterpretI64 => 1, + Operator::F32ConvertI32U => 2, + Operator::F64ConvertI32U => 2, + Operator::I32ReinterpretF32 => 1, + Operator::I64ReinterpretF64 => 1, + Operator::I32TruncF32S => 19, + Operator::I32TruncF32U => 17, + Operator::I32TruncF64S => 19, + Operator::I32TruncF64U => 18, + Operator::I64TruncF32S => 19, + Operator::I64TruncF32U => 21, + Operator::I64TruncF64S => 19, + Operator::I64TruncF64U => 23, + Operator::I64TruncSatF32S => 19, + Operator::I64TruncSatF64S => 19, + Operator::I32TruncSatF32U => 19, + Operator::I32TruncSatF64U => 18, + Operator::I64TruncSatF32U => 20, + Operator::I64TruncSatF64U => 22, + Operator::I32TruncSatF32S => 18, + Operator::I32TruncSatF64S => 19, + Operator::F32ConvertI64U => 14, + Operator::F64ConvertI64U => 13, + Operator::RefFunc { .. } => 29, + Operator::RefTestNullable { .. } => 34, + Operator::LocalGet { .. } => 1, + Operator::GlobalGet { .. } => 5, + Operator::GlobalSet { .. } => 1, + Operator::LocalTee { .. } => 1, + Operator::TableGet { .. } => 29, + Operator::TableSize { .. } => 25, + Operator::I32Load { .. } => 2, + Operator::I64Load { .. } => 2, + Operator::F32Load { .. } => 2, + Operator::F64Load { .. } => 2, + Operator::I32Store { .. } => 1, + Operator::I64Store { .. } => 1, + Operator::F32Store { .. } => 1, + Operator::F64Store { .. } => 1, + Operator::I32Load8S { .. } => 2, + Operator::I32Load8U { .. } => 2, + Operator::I32Load16S { .. } => 2, + Operator::I32Load16U { .. } => 2, + Operator::I64Load8S { .. } => 2, + Operator::I64Load8U { .. } => 2, + Operator::I64Load16S { .. } => 2, + Operator::I64Load16U { .. } => 2, + Operator::I64Load32S { .. } => 2, + Operator::I64Load32U { .. } => 2, + Operator::I32Store8 { .. } => 1, + Operator::I32Store16 { .. } => 1, + Operator::I64Store8 { .. } => 1, + Operator::I64Store16 { .. } => 1, + Operator::I64Store32 { .. } => 1, + Operator::MemorySize { .. } => 31, + Operator::MemoryGrow { .. } => 67, + + + Operator::MemoryInit { .. } + | Operator::DataDrop { .. } + | Operator::MemoryCopy { ..} + | Operator::MemoryFill { .. } + | Operator::TableInit { .. } + | Operator::ElemDrop { .. } + | Operator::TableCopy { .. } => 31, // memory.copy has cycle count of 31, rest needs benchmark validation (bulk memory extension) + + + Operator::Select => 14, + Operator::If { .. } => 1, + Operator::Call { .. } => 17, + Operator::Br { .. } => 12, + Operator::BrIf { .. } => 14, + Operator::BrTable { .. } => 34, + Operator::CallIndirect { .. } => 23, + Operator::Unreachable => 1, + Operator::Nop => 1, + Operator::Block { .. } | Operator::Loop { .. } | Operator::Else => 1, + Operator::TryTable { .. } + | Operator::Throw { .. } + | Operator::ThrowRef + | Operator::Try { .. } + | Operator::Catch { .. } + | Operator::Rethrow { .. } + | Operator::Delegate { .. } + | Operator::CatchAll => todo!("try/catch operators are not metered yet; gatekeeper config should not enable this extension"), + Operator::End + | Operator::Return + | Operator::ReturnCall { .. } + | Operator::ReturnCallIndirect { .. } => 1, + Operator::Drop => 1, + Operator::TypedSelect { .. } => unreachable!(), + Operator::LocalSet { .. } => 1, + Operator::RefNull { .. } + | Operator::RefIsNull + | Operator::RefEq + | Operator::StructNew { .. } + | Operator::StructNewDefault { .. } + | Operator::StructGet { .. } + | Operator::StructGetS { .. } + | Operator::StructGetU { .. } + | Operator::StructSet { .. } + | Operator::ArrayNew { .. } + | Operator::ArrayNewDefault { .. } + | Operator::ArrayNewFixed { .. } + | Operator::ArrayNewData { .. } + | Operator::ArrayNewElem { .. } + | Operator::ArrayGet { .. } + | Operator::ArrayGetS { .. } + | Operator::ArrayGetU { .. } + | Operator::ArraySet { .. } + | Operator::ArrayLen + | Operator::ArrayFill { .. } + | Operator::ArrayCopy { .. } + | Operator::ArrayInitData { .. } + | Operator::ArrayInitElem { .. } + | Operator::RefTestNonNull { .. } + | Operator::RefCastNonNull { .. } + | Operator::RefCastNullable { .. } + | Operator::BrOnCast { .. } + | Operator::BrOnCastFail { .. } + | Operator::AnyConvertExtern + | Operator::ExternConvertAny + | Operator::RefI31 + | Operator::I31GetS + | Operator::I31GetU + | Operator::TableFill { .. } + | Operator::TableSet { .. } + | Operator::TableGrow { .. } + | Operator::MemoryDiscard { .. } + | Operator::MemoryAtomicNotify { .. } + | Operator::MemoryAtomicWait32 { .. } + | Operator::MemoryAtomicWait64 { .. } + | Operator::AtomicFence + | Operator::I32AtomicLoad { .. } + | Operator::I64AtomicLoad { .. } + | Operator::I32AtomicLoad8U { .. } + | Operator::I32AtomicLoad16U { .. } + | Operator::I64AtomicLoad8U { .. } + | Operator::I64AtomicLoad16U { .. } + | Operator::I64AtomicLoad32U { .. } + | Operator::I32AtomicStore { .. } + | Operator::I64AtomicStore { .. } + | Operator::I32AtomicStore8 { .. } + | Operator::I32AtomicStore16 { .. } + | Operator::I64AtomicStore8 { .. } + | Operator::I64AtomicStore16 { .. } + | Operator::I64AtomicStore32 { .. } + | Operator::I32AtomicRmwAdd { .. } + | Operator::I64AtomicRmwAdd { .. } + | Operator::I32AtomicRmw8AddU { .. } + | Operator::I32AtomicRmw16AddU { .. } + | Operator::I64AtomicRmw8AddU { .. } + | Operator::I64AtomicRmw16AddU { .. } + | Operator::I64AtomicRmw32AddU { .. } + | Operator::I32AtomicRmwSub { .. } + | Operator::I64AtomicRmwSub { .. } + | Operator::I32AtomicRmw8SubU { .. } + | Operator::I32AtomicRmw16SubU { .. } + | Operator::I64AtomicRmw8SubU { .. } + | Operator::I64AtomicRmw16SubU { .. } + | Operator::I64AtomicRmw32SubU { .. } + | Operator::I32AtomicRmwAnd { .. } + | Operator::I64AtomicRmwAnd { .. } + | Operator::I32AtomicRmw8AndU { .. } + | Operator::I32AtomicRmw16AndU { .. } + | Operator::I64AtomicRmw8AndU { .. } + | Operator::I64AtomicRmw16AndU { .. } + | Operator::I64AtomicRmw32AndU { .. } + | Operator::I32AtomicRmwOr { .. } + | Operator::I64AtomicRmwOr { .. } + | Operator::I32AtomicRmw8OrU { .. } + | Operator::I32AtomicRmw16OrU { .. } + | Operator::I64AtomicRmw8OrU { .. } + | Operator::I64AtomicRmw16OrU { .. } + | Operator::I64AtomicRmw32OrU { .. } + | Operator::I32AtomicRmwXor { .. } + | Operator::I64AtomicRmwXor { .. } + | Operator::I32AtomicRmw8XorU { .. } + | Operator::I32AtomicRmw16XorU { .. } + | Operator::I64AtomicRmw8XorU { .. } + | Operator::I64AtomicRmw16XorU { .. } + | Operator::I64AtomicRmw32XorU { .. } + | Operator::I32AtomicRmwXchg { .. } + | Operator::I64AtomicRmwXchg { .. } + | Operator::I32AtomicRmw8XchgU { .. } + | Operator::I32AtomicRmw16XchgU { .. } + | Operator::I64AtomicRmw8XchgU { .. } + | Operator::I64AtomicRmw16XchgU { .. } + | Operator::I64AtomicRmw32XchgU { .. } + | Operator::I32AtomicRmwCmpxchg { .. } + | Operator::I64AtomicRmwCmpxchg { .. } + | Operator::I32AtomicRmw8CmpxchgU { .. } + | Operator::I32AtomicRmw16CmpxchgU { .. } + | Operator::I64AtomicRmw8CmpxchgU { .. } + | Operator::I64AtomicRmw16CmpxchgU { .. } + | Operator::I64AtomicRmw32CmpxchgU { .. } + | Operator::V128Load { .. } + | Operator::V128Load8x8S { .. } + | Operator::V128Load8x8U { .. } + | Operator::V128Load16x4S { .. } + | Operator::V128Load16x4U { .. } + | Operator::V128Load32x2S { .. } + | Operator::V128Load32x2U { .. } + | Operator::V128Load8Splat { .. } + | Operator::V128Load16Splat { .. } + | Operator::V128Load32Splat { .. } + | Operator::V128Load64Splat { .. } + | Operator::V128Load32Zero { .. } + | Operator::V128Load64Zero { .. } + | Operator::V128Store { .. } + | Operator::V128Load8Lane { .. } + | Operator::V128Load16Lane { .. } + | Operator::V128Load32Lane { .. } + | Operator::V128Load64Lane { .. } + | Operator::V128Store8Lane { .. } + | Operator::V128Store16Lane { .. } + | Operator::V128Store32Lane { .. } + | Operator::V128Store64Lane { .. } + | Operator::V128Const { .. } + | Operator::I8x16Shuffle { .. } + | Operator::I8x16ExtractLaneS { .. } + | Operator::I8x16ExtractLaneU { .. } + | Operator::I8x16ReplaceLane { .. } + | Operator::I16x8ExtractLaneS { .. } + | Operator::I16x8ExtractLaneU { .. } + | Operator::I16x8ReplaceLane { .. } + | Operator::I32x4ExtractLane { .. } + | Operator::I32x4ReplaceLane { .. } + | Operator::I64x2ExtractLane { .. } + | Operator::I64x2ReplaceLane { .. } + | Operator::F32x4ExtractLane { .. } + | Operator::F32x4ReplaceLane { .. } + | Operator::F64x2ExtractLane { .. } + | Operator::F64x2ReplaceLane { .. } + | Operator::I8x16Swizzle + | Operator::I8x16Splat + | Operator::I16x8Splat + | Operator::I32x4Splat + | Operator::I64x2Splat + | Operator::F32x4Splat + | Operator::F64x2Splat + | Operator::I8x16Eq + | Operator::I8x16Ne + | Operator::I8x16LtS + | Operator::I8x16LtU + | Operator::I8x16GtS + | Operator::I8x16GtU + | Operator::I8x16LeS + | Operator::I8x16LeU + | Operator::I8x16GeS + | Operator::I8x16GeU + | Operator::I16x8Eq + | Operator::I16x8Ne + | Operator::I16x8LtS + | Operator::I16x8LtU + | Operator::I16x8GtS + | Operator::I16x8GtU + | Operator::I16x8LeS + | Operator::I16x8LeU + | Operator::I16x8GeS + | Operator::I16x8GeU + | Operator::I32x4Eq + | Operator::I32x4Ne + | Operator::I32x4LtS + | Operator::I32x4LtU + | Operator::I32x4GtS + | Operator::I32x4GtU + | Operator::I32x4LeS + | Operator::I32x4LeU + | Operator::I32x4GeS + | Operator::I32x4GeU + | Operator::I64x2Eq + | Operator::I64x2Ne + | Operator::I64x2LtS + | Operator::I64x2GtS + | Operator::I64x2LeS + | Operator::I64x2GeS + | Operator::F32x4Eq + | Operator::F32x4Ne + | Operator::F32x4Lt + | Operator::F32x4Gt + | Operator::F32x4Le + | Operator::F32x4Ge + | Operator::F64x2Eq + | Operator::F64x2Ne + | Operator::F64x2Lt + | Operator::F64x2Gt + | Operator::F64x2Le + | Operator::F64x2Ge + | Operator::V128Not + | Operator::V128And + | Operator::V128AndNot + | Operator::V128Or + | Operator::V128Xor + | Operator::V128Bitselect + | Operator::V128AnyTrue + | Operator::I8x16Abs + | Operator::I8x16Neg + | Operator::I8x16Popcnt + | Operator::I8x16AllTrue + | Operator::I8x16Bitmask + | Operator::I8x16NarrowI16x8S + | Operator::I8x16NarrowI16x8U + | Operator::I8x16Shl + | Operator::I8x16ShrS + | Operator::I8x16ShrU + | Operator::I8x16Add + | Operator::I8x16AddSatS + | Operator::I8x16AddSatU + | Operator::I8x16Sub + | Operator::I8x16SubSatS + | Operator::I8x16SubSatU + | Operator::I8x16MinS + | Operator::I8x16MinU + | Operator::I8x16MaxS + | Operator::I8x16MaxU + | Operator::I8x16AvgrU + | Operator::I16x8ExtAddPairwiseI8x16S + | Operator::I16x8ExtAddPairwiseI8x16U + | Operator::I16x8Abs + | Operator::I16x8Neg + | Operator::I16x8Q15MulrSatS + | Operator::I16x8AllTrue + | Operator::I16x8Bitmask + | Operator::I16x8NarrowI32x4S + | Operator::I16x8NarrowI32x4U + | Operator::I16x8ExtendLowI8x16S + | Operator::I16x8ExtendHighI8x16S + | Operator::I16x8ExtendLowI8x16U + | Operator::I16x8ExtendHighI8x16U + | Operator::I16x8Shl + | Operator::I16x8ShrS + | Operator::I16x8ShrU + | Operator::I16x8Add + | Operator::I16x8AddSatS + | Operator::I16x8AddSatU + | Operator::I16x8Sub + | Operator::I16x8SubSatS + | Operator::I16x8SubSatU + | Operator::I16x8Mul + | Operator::I16x8MinS + | Operator::I16x8MinU + | Operator::I16x8MaxS + | Operator::I16x8MaxU + | Operator::I16x8AvgrU + | Operator::I16x8ExtMulLowI8x16S + | Operator::I16x8ExtMulHighI8x16S + | Operator::I16x8ExtMulLowI8x16U + | Operator::I16x8ExtMulHighI8x16U + | Operator::I32x4ExtAddPairwiseI16x8S + | Operator::I32x4ExtAddPairwiseI16x8U + | Operator::I32x4Abs + | Operator::I32x4Neg + | Operator::I32x4AllTrue + | Operator::I32x4Bitmask + | Operator::I32x4ExtendLowI16x8S + | Operator::I32x4ExtendHighI16x8S + | Operator::I32x4ExtendLowI16x8U + | Operator::I32x4ExtendHighI16x8U + | Operator::I32x4Shl + | Operator::I32x4ShrS + | Operator::I32x4ShrU + | Operator::I32x4Add + | Operator::I32x4Sub + | Operator::I32x4Mul + | Operator::I32x4MinS + | Operator::I32x4MinU + | Operator::I32x4MaxS + | Operator::I32x4MaxU + | Operator::I32x4DotI16x8S + | Operator::I32x4ExtMulLowI16x8S + | Operator::I32x4ExtMulHighI16x8S + | Operator::I32x4ExtMulLowI16x8U + | Operator::I32x4ExtMulHighI16x8U + | Operator::I64x2Abs + | Operator::I64x2Neg + | Operator::I64x2AllTrue + | Operator::I64x2Bitmask + | Operator::I64x2ExtendLowI32x4S + | Operator::I64x2ExtendHighI32x4S + | Operator::I64x2ExtendLowI32x4U + | Operator::I64x2ExtendHighI32x4U + | Operator::I64x2Shl + | Operator::I64x2ShrS + | Operator::I64x2ShrU + | Operator::I64x2Add + | Operator::I64x2Sub + | Operator::I64x2Mul + | Operator::I64x2ExtMulLowI32x4S + | Operator::I64x2ExtMulHighI32x4S + | Operator::I64x2ExtMulLowI32x4U + | Operator::I64x2ExtMulHighI32x4U + | Operator::F32x4Ceil + | Operator::F32x4Floor + | Operator::F32x4Trunc + | Operator::F32x4Nearest + | Operator::F32x4Abs + | Operator::F32x4Neg + | Operator::F32x4Sqrt + | Operator::F32x4Add + | Operator::F32x4Sub + | Operator::F32x4Mul + | Operator::F32x4Div + | Operator::F32x4Min + | Operator::F32x4Max + | Operator::F32x4PMin + | Operator::F32x4PMax + | Operator::F64x2Ceil + | Operator::F64x2Floor + | Operator::F64x2Trunc + | Operator::F64x2Nearest + | Operator::F64x2Abs + | Operator::F64x2Neg + | Operator::F64x2Sqrt + | Operator::F64x2Add + | Operator::F64x2Sub + | Operator::F64x2Mul + | Operator::F64x2Div + | Operator::F64x2Min + | Operator::F64x2Max + | Operator::F64x2PMin + | Operator::F64x2PMax + | Operator::I32x4TruncSatF32x4S + | Operator::I32x4TruncSatF32x4U + | Operator::F32x4ConvertI32x4S + | Operator::F32x4ConvertI32x4U + | Operator::I32x4TruncSatF64x2SZero + | Operator::I32x4TruncSatF64x2UZero + | Operator::F64x2ConvertLowI32x4S + | Operator::F64x2ConvertLowI32x4U + | Operator::F32x4DemoteF64x2Zero + | Operator::F64x2PromoteLowF32x4 + | Operator::I8x16RelaxedSwizzle + | Operator::I32x4RelaxedTruncF32x4S + | Operator::I32x4RelaxedTruncF32x4U + | Operator::I32x4RelaxedTruncF64x2SZero + | Operator::I32x4RelaxedTruncF64x2UZero + | Operator::F32x4RelaxedMadd + | Operator::F32x4RelaxedNmadd + | Operator::F64x2RelaxedMadd + | Operator::F64x2RelaxedNmadd + | Operator::I8x16RelaxedLaneselect + | Operator::I16x8RelaxedLaneselect + | Operator::I32x4RelaxedLaneselect + | Operator::I64x2RelaxedLaneselect + | Operator::F32x4RelaxedMin + | Operator::F32x4RelaxedMax + | Operator::F64x2RelaxedMin + | Operator::F64x2RelaxedMax + | Operator::I16x8RelaxedQ15mulrS + | Operator::I16x8RelaxedDotI8x16I7x16S + | Operator::I32x4RelaxedDotI8x16I7x16AddS + | Operator::CallRef { .. } + | Operator::ReturnCallRef { .. } + | Operator::RefAsNonNull + | Operator::BrOnNull { .. } + | Operator::BrOnNonNull { .. } + | Operator::GlobalAtomicGet { .. } + | Operator::GlobalAtomicSet { .. } + | Operator::GlobalAtomicRmwAdd { .. } + | Operator::GlobalAtomicRmwSub { .. } + | Operator::GlobalAtomicRmwAnd { .. } + | Operator::GlobalAtomicRmwOr { .. } + | Operator::GlobalAtomicRmwXor { .. } + | Operator::GlobalAtomicRmwXchg { .. } + | Operator::GlobalAtomicRmwCmpxchg { .. } + | Operator::TableAtomicGet { .. } + | Operator::TableAtomicSet { .. } + | Operator::TableAtomicRmwXchg { .. } + | Operator::TableAtomicRmwCmpxchg { .. } + | Operator::StructAtomicGet { .. } + | Operator::StructAtomicGetS { .. } + | Operator::StructAtomicGetU { .. } + | Operator::StructAtomicSet { .. } + | Operator::StructAtomicRmwAdd { .. } + | Operator::StructAtomicRmwSub { .. } + | Operator::StructAtomicRmwAnd { .. } + | Operator::StructAtomicRmwOr { .. } + | Operator::StructAtomicRmwXor { .. } + | Operator::StructAtomicRmwXchg { .. } + | Operator::StructAtomicRmwCmpxchg { .. } + | Operator::ArrayAtomicGet { .. } + | Operator::ArrayAtomicGetS { .. } + | Operator::ArrayAtomicGetU { .. } + | Operator::ArrayAtomicSet { .. } + | Operator::ArrayAtomicRmwAdd { .. } + | Operator::ArrayAtomicRmwSub { .. } + | Operator::ArrayAtomicRmwAnd { .. } + | Operator::ArrayAtomicRmwOr { .. } + | Operator::ArrayAtomicRmwXor { .. } + | Operator::ArrayAtomicRmwXchg { .. } + | Operator::ArrayAtomicRmwCmpxchg { .. } + | Operator::RefI31Shared => todo!("{operator:?}"), + } +} + +pub(crate) fn gas_metering_middleware(initial_limit: u64) -> Arc { + Arc::new(Metering::new(initial_limit, |operator| { + cycles(operator) * MULTIPLIER / SCALING_FACTOR + })) +} diff --git a/executor/wasmer_backend/src/middleware/gatekeeper.rs b/executor/wasmer_backend/src/middleware/gatekeeper.rs new file mode 100644 index 0000000000..0021543940 --- /dev/null +++ b/executor/wasmer_backend/src/middleware/gatekeeper.rs @@ -0,0 +1,660 @@ +use wasmer::{wasmparser::Operator, FunctionMiddleware, MiddlewareError, ModuleMiddleware}; + +const MIDDLEWARE_NAME: &str = "Gatekeeper"; +const FLOATING_POINTS_NOT_ALLOWED: &str = "Floating point opcodes are not allowed"; + +#[inline] +fn extension_not_allowed_error(extension: &str) -> MiddlewareError { + MiddlewareError::new( + MIDDLEWARE_NAME, + format!("Wasm `{extension}` extension is not allowed"), + ) +} + +#[derive(Copy, Clone, Debug)] +pub(crate) struct GatekeeperConfig { + /// Allow the `bulk_memory` proposal. + bulk_memory: bool, + /// Allow the `exceptions` proposal. + exceptions: bool, + /// Allow the `function_references` proposal. + function_references: bool, + /// Allow the `gc` proposal. + gc: bool, + /// Allow the `legacy_exceptions` proposal. + #[allow(dead_code)] + legacy_exceptions: bool, + /// Allow the `memory_control` proposal. + memory_control: bool, + /// Allow the `mvp` proposal. + mvp: bool, + /// Allow the `reference_types` proposal. + reference_types: bool, + /// Allow the `relaxed_simd` proposal. + relaxed_simd: bool, + /// Allow the `saturating_float_to_int` proposal. + /// + /// This *requires* canonicalized NaNs enabled in the compiler config. + saturating_float_to_int: bool, + /// Allow the `shared_everything_threads` proposal. + #[allow(dead_code)] + shared_everything_threads: bool, + /// Allow the `sign_extension` proposal. + sign_extension: bool, + /// Allow the `simd` proposal. + simd: bool, + /// Allow the `stack_switching` proposal. + #[allow(dead_code)] + stack_switching: bool, + /// Allow the `tail_call` proposal. + tail_call: bool, + /// Allow the `threads` proposal. + threads: bool, + /// Allow the `wide_arithmetic` proposal. + #[allow(dead_code)] + wide_arithmetic: bool, + /// Allow floating point opcodes from `mvp` extension. + /// + /// This *requires* canonicalized NaNs enabled in the compiler config. + allow_floating_points: bool, +} + +/// Check if the operator is a floating point operator. +#[inline] +const fn is_floating_point(operator: &wasmer::wasmparser::Operator<'_>) -> bool { + match operator { + // mvp + Operator::F32Load {..} | + Operator::F64Load {..} | + Operator::F32Store {..} | + Operator::F64Store {..} | + Operator::F32Const {..} | + Operator::F64Const {..} | + Operator::F32Abs | + Operator::F32Neg | + Operator::F32Ceil | + Operator::F32Floor | + Operator::F32Trunc | + Operator::F32Nearest | + Operator::F32Sqrt | + Operator::F32Add | + Operator::F32Sub | + Operator::F32Mul | + Operator::F32Div | + Operator::F32Min | + Operator::F32Max | + Operator::F32Copysign | + Operator::F64Abs | + Operator::F64Neg | + Operator::F64Ceil | + Operator::F64Floor | + Operator::F64Trunc | + Operator::F64Nearest | + Operator::F64Sqrt | + Operator::F64Add | + Operator::F64Sub | + Operator::F64Mul | + Operator::F64Div | + Operator::F64Min | + Operator::F64Max | + Operator::F64Copysign | + Operator::F32Eq | + Operator::F32Ne | + Operator::F32Lt | + Operator::F32Gt | + Operator::F32Le | + Operator::F32Ge | + Operator::F64Eq | + Operator::F64Ne | + Operator::F64Lt | + Operator::F64Gt | + Operator::F64Le | + Operator::F64Ge | + Operator::I32TruncF32S | + Operator::I32TruncF32U | + Operator::I32TruncF64S | + Operator::I32TruncF64U | + Operator::I64TruncF32S | + Operator::I64TruncF32U | + Operator::I64TruncF64S | + Operator::I64TruncF64U | + Operator::F32ConvertI32S | + Operator::F32ConvertI32U | + Operator::F32ConvertI64S | + Operator::F32ConvertI64U | + Operator::F32DemoteF64 | + Operator::F64ConvertI32S | + Operator::F64ConvertI32U | + Operator::F64ConvertI64S | + Operator::F64ConvertI64U | + Operator::F64PromoteF32 | + Operator::I32ReinterpretF32 | + Operator::I64ReinterpretF64 | + Operator::F32ReinterpretI32 | + Operator::F64ReinterpretI64 | + // saturating_float_to_int + Operator::I32TruncSatF32S | + Operator::I32TruncSatF32U | + Operator::I32TruncSatF64S | + Operator::I32TruncSatF64U | + Operator::I64TruncSatF32S | + Operator::I64TruncSatF32U | + Operator::I64TruncSatF64S | + Operator::I64TruncSatF64U | + // simd + Operator::F32x4ExtractLane{..} | + Operator::F32x4ReplaceLane{..} | + Operator::F64x2ExtractLane{..} | + Operator::F64x2ReplaceLane{..} | + Operator::F32x4Splat | + Operator::F64x2Splat | + Operator::F32x4Eq | + Operator::F32x4Ne | + Operator::F32x4Lt | + Operator::F32x4Gt | + Operator::F32x4Le | + Operator::F32x4Ge | + Operator::F64x2Eq | + Operator::F64x2Ne | + Operator::F64x2Lt | + Operator::F64x2Gt | + Operator::F64x2Le | + Operator::F64x2Ge | + Operator::F32x4Ceil | + Operator::F32x4Floor | + Operator::F32x4Trunc | + Operator::F32x4Nearest | + Operator::F32x4Abs | + Operator::F32x4Neg | + Operator::F32x4Sqrt | + Operator::F32x4Add | + Operator::F32x4Sub | + Operator::F32x4Mul | + Operator::F32x4Div | + Operator::F32x4Min | + Operator::F32x4Max | + Operator::F32x4PMin | + Operator::F32x4PMax | + Operator::F64x2Ceil | + Operator::F64x2Floor | + Operator::F64x2Trunc | + Operator::F64x2Nearest | + Operator::F64x2Abs | + Operator::F64x2Neg | + Operator::F64x2Sqrt | + Operator::F64x2Add | + Operator::F64x2Sub | + Operator::F64x2Mul | + Operator::F64x2Div | + Operator::F64x2Min | + Operator::F64x2Max | + Operator::F64x2PMin | + Operator::F64x2PMax | + Operator::I32x4TruncSatF32x4S | + Operator::I32x4TruncSatF32x4U | + Operator::F32x4ConvertI32x4S | + Operator::F32x4ConvertI32x4U | + Operator::I32x4TruncSatF64x2SZero | + Operator::I32x4TruncSatF64x2UZero | + Operator::F64x2ConvertLowI32x4S | + Operator::F64x2ConvertLowI32x4U | + Operator::F32x4DemoteF64x2Zero | + Operator::F64x2PromoteLowF32x4 | + // relaxed_simd extension + Operator::I32x4RelaxedTruncF32x4S | + Operator::I32x4RelaxedTruncF32x4U | + Operator::I32x4RelaxedTruncF64x2SZero | + Operator::I32x4RelaxedTruncF64x2UZero | + Operator::F32x4RelaxedMadd | + Operator::F32x4RelaxedNmadd | + Operator::F64x2RelaxedMadd | + Operator::F64x2RelaxedNmadd | + Operator::F32x4RelaxedMin | + Operator::F32x4RelaxedMax | + Operator::F64x2RelaxedMin | + Operator::F64x2RelaxedMax => true, + _ => false, + } +} + +impl Default for GatekeeperConfig { + fn default() -> Self { + Self { + bulk_memory: true, + exceptions: false, + function_references: false, + gc: false, + legacy_exceptions: false, + memory_control: false, + mvp: true, + reference_types: false, + relaxed_simd: false, + saturating_float_to_int: false, + shared_everything_threads: false, + sign_extension: true, + simd: false, + stack_switching: false, + tail_call: false, + threads: false, + wide_arithmetic: false, + // Not yet ready to enable this; needs updated benchmark to accomodate overhead of + // canonicalized NaNs and manual validation. + allow_floating_points: false, + } + } +} + +#[derive(Debug, Default)] +pub(crate) struct Gatekeeper { + config: GatekeeperConfig, +} + +impl Gatekeeper { + pub(crate) fn new(config: GatekeeperConfig) -> Self { + Self { config } + } +} + +impl ModuleMiddleware for Gatekeeper { + fn generate_function_middleware( + &self, + _local_function_index: wasmer::LocalFunctionIndex, + ) -> Box { + Box::new(FunctionGatekeeper::new(self.config)) + } +} + +#[derive(Debug)] +struct FunctionGatekeeper { + config: GatekeeperConfig, +} + +impl FunctionGatekeeper { + fn new(config: GatekeeperConfig) -> Self { + Self { config } + } + + /// Ensure that floating point opcodes are allowed. + fn ensure_floating_point_allowed( + &self, + operator: &wasmer::wasmparser::Operator<'_>, + ) -> Result<(), wasmer::MiddlewareError> { + if !self.config.allow_floating_points && is_floating_point(operator) { + return Err(MiddlewareError::new( + MIDDLEWARE_NAME, + FLOATING_POINTS_NOT_ALLOWED, + )); + } + Ok(()) + } + + fn validated_push_operator<'b, 'a: 'b>( + &self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + // This is a late check as we first check if given extension is allowed and then check if + // floating point opcodes are allowed. This is because different Wasm extensions do + // contain floating point opcodes and this approach makes all the gatekeeping more robust. + self.ensure_floating_point_allowed(&operator)?; + // Push the operator to the state. + state.push_operator(operator); + Ok(()) + } + + fn bulk_memory<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.bulk_memory { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("bulk_memory")) + } + } + + fn exceptions<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.exceptions { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("exceptions")) + } + } + + fn function_references<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.function_references { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("function_references")) + } + } + + fn gc<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.gc { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("gc")) + } + } + + #[allow(dead_code)] + fn legacy_exceptions<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.legacy_exceptions { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("legacy_exceptions")) + } + } + + fn memory_control<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.memory_control { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("memory_control")) + } + } + + fn mvp<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.mvp { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("mvp")) + } + } + + fn reference_types<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.reference_types { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("reference_types")) + } + } + + fn relaxed_simd<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.relaxed_simd { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("relaxed_simd")) + } + } + + fn saturating_float_to_int<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.saturating_float_to_int { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("saturating_float_to_int")) + } + } + + #[allow(dead_code)] + fn shared_everything_threads<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.shared_everything_threads { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("shared_everything_threads")) + } + } + + fn sign_extension<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.sign_extension { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("sign_extension")) + } + } + + fn simd<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.simd { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("simd")) + } + } + + #[allow(dead_code)] + fn stack_switching<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.stack_switching { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("stack_switching")) + } + } + + fn tail_call<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.tail_call { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("tail_call")) + } + } + fn threads<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.threads { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("threads")) + } + } + + #[allow(dead_code)] + fn wide_arithmetic<'b, 'a: 'b>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'b>, + ) -> Result<(), wasmer::MiddlewareError> { + if self.config.wide_arithmetic { + self.validated_push_operator(operator, state)?; + Ok(()) + } else { + Err(extension_not_allowed_error("wide_arithmetic")) + } + } +} + +impl FunctionMiddleware for FunctionGatekeeper { + fn feed<'a>( + &mut self, + operator: wasmer::wasmparser::Operator<'a>, + state: &mut wasmer::MiddlewareReaderState<'a>, + ) -> Result<(), wasmer::MiddlewareError> { + macro_rules! match_op { + ($op:ident { $($payload:tt)* }) => { + $op { .. } + }; + ($op:ident) => { + $op + }; + } + + macro_rules! gatekeep { + ($( @$proposal:ident $op:ident $({ $($payload:tt)* })? => $visit:ident)*) => {{ + use wasmer::wasmparser::Operator::*; + match operator { + $( + match_op!($op $({ $($payload)* })?) => self.$proposal(operator, state), + )* + } + }} + } + + wasmer::wasmparser::for_each_operator!(gatekeep) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::Arc; + use wasmer::{sys::EngineBuilder, CompilerConfig, Module, Singlepass, Store, WasmError}; + + #[test] + fn mvp_opcodes_allowed() { + let bytecode = wat::parse_str( + r#" + (module + (func (export "add") (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.add) + + ) + "#, + ) + .unwrap(); + let mut gatekeeper = Gatekeeper::default(); + gatekeeper.config.mvp = true; + let gatekeeper = Arc::new(gatekeeper); + let mut compiler_config = Singlepass::default(); + compiler_config.push_middleware(gatekeeper); + let store = Store::new(EngineBuilder::new(compiler_config)); + let _module = Module::new(&store, &bytecode).unwrap(); + } + #[test] + fn mvp_opcodes_allowed_without_floating_points() { + let bytecode = wat::parse_str( + r#" + (module + (func (export "add") (param f32 f32) (result f32) + local.get 0 + local.get 1 + f32.add) + ) + "#, + ) + .unwrap(); + let mut gatekeeper = Gatekeeper::default(); + gatekeeper.config.mvp = true; + gatekeeper.config.allow_floating_points = false; + let gatekeeper = Arc::new(gatekeeper); + let mut compiler_config = Singlepass::default(); + compiler_config.push_middleware(gatekeeper); + let store = Store::new(EngineBuilder::new(compiler_config)); + let error = Module::new(&store, &bytecode).unwrap_err(); + let middleware = match error { + wasmer::CompileError::Wasm(WasmError::Middleware(middleware)) => middleware, + _ => panic!("Expected a middleware error"), + }; + assert_eq!(middleware.message, FLOATING_POINTS_NOT_ALLOWED); + } + + #[test] + fn mvp_opcodes_allowed_with_floating_points() { + let bytecode = wat::parse_str( + r#" + (module + (func (export "add") (param f32 f32) (result f32) + local.get 0 + local.get 1 + f32.add) + ) + "#, + ) + .unwrap(); + let mut gatekeeper = Gatekeeper::default(); + gatekeeper.config.mvp = true; + gatekeeper.config.allow_floating_points = true; + let gatekeeper = Arc::new(gatekeeper); + let mut compiler_config = Singlepass::default(); + compiler_config.push_middleware(gatekeeper); + let store = Store::new(EngineBuilder::new(compiler_config)); + let _module = Module::new(&store, &bytecode).unwrap(); + } + #[test] + fn mvp_opcodes_not_allowed() { + let bytecode = wat::parse_str( + r#" + (module + (func (export "add") (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.add) + ) + "#, + ) + .unwrap(); + let mut gatekeeper = Gatekeeper::default(); + gatekeeper.config.mvp = false; + let gatekeeper = Arc::new(gatekeeper); + let mut compiler_config = Singlepass::default(); + compiler_config.push_middleware(gatekeeper); + let store = Store::new(EngineBuilder::new(compiler_config)); + let error = Module::new(&store, &bytecode).unwrap_err(); + assert_eq!(error.to_string(), "WebAssembly translation error: Error in middleware Gatekeeper: Wasm `mvp` extension is not allowed"); + } +} diff --git a/generate-chainspec.sh b/generate-chainspec.sh index b5d45bf5ad..115bee4cc1 100755 --- a/generate-chainspec.sh +++ b/generate-chainspec.sh @@ -8,8 +8,8 @@ generate_timestamp() { local DELAY=${1} local SCRIPT=( - "from datetime import datetime, timedelta;" - "print((datetime.utcnow() + timedelta(seconds=${DELAY})).isoformat('T') + 'Z')" + "from datetime import datetime, timedelta, timezone;" + "print((datetime.now(timezone.utc).replace(tzinfo=None) + timedelta(seconds=${DELAY})).isoformat('T') + 'Z')" ) python3 -c "${SCRIPT[*]}" @@ -36,4 +36,4 @@ main() { generate_chainspec ${BASEDIR} ${TIMESTAMP} } -main ${@} +main $@ diff --git a/images/Casper-association-logo-new.svg b/images/Casper-association-logo-new.svg new file mode 100644 index 0000000000..1ad71fbc08 --- /dev/null +++ b/images/Casper-association-logo-new.svg @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/images/CasperLabs_Logo_Horizontal_RGB.png b/images/CasperLabs_Logo_Horizontal_RGB.png deleted file mode 100644 index ebe5087c31..0000000000 Binary files a/images/CasperLabs_Logo_Horizontal_RGB.png and /dev/null differ diff --git a/images/Casper_Logo_Favicon.png b/images/Casper_Logo_Favicon.png new file mode 100644 index 0000000000..58434885e6 Binary files /dev/null and b/images/Casper_Logo_Favicon.png differ diff --git a/images/Casper_Logo_Favicon_48.png b/images/Casper_Logo_Favicon_48.png new file mode 100644 index 0000000000..f1333a2efe Binary files /dev/null and b/images/Casper_Logo_Favicon_48.png differ diff --git a/images/casper-association-logo-primary.svg b/images/casper-association-logo-primary.svg new file mode 100644 index 0000000000..783ec7dc3a --- /dev/null +++ b/images/casper-association-logo-primary.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/joiner-dev-tmux.sh b/joiner-dev-tmux.sh deleted file mode 100755 index 810c58f061..0000000000 --- a/joiner-dev-tmux.sh +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env bash -# -*- mode: sh; fill-column: 80; sh-basic-offset: 4; -*- - -# This file is formatted with `shfmt -i 4 -ci` -# and linted with shellcheck - -set -o errexit -set -o nounset -set -o pipefail - -tmux_new_window() { - local SESSION=${1} - local ID=${2} - local CMD=${3} - local NAME="${SESSION}-${ID}" - - tmux new-window -t "${SESSION}:${ID}" -n "${NAME}" - tmux send-keys -t "${NAME}" "${CMD}" C-m -} - -build_system_contracts() { - local CMD=( - "make -s" - "build-contracts-rs" - "CARGO_FLAGS=--quiet" - ) - - echo "Building system contracts..." - ${CMD[*]} -} - -build_node() { - local CMD=( - "cargo build" - "--quiet" - "--manifest-path=node/Cargo.toml" - ) - - echo "Building node..." - ${CMD[*]} -} - -build_casper_client() { - local CMD=( - "cargo build" - "--quiet" - "-p casper-client" - ) - - echo "Building casper-client..." - ${CMD[*]} -} - -generate_timestamp() { - local DELAY=${1} - - local SCRIPT=( - "from datetime import datetime, timedelta;" - "print((datetime.utcnow() + timedelta(seconds=${DELAY})).isoformat('T') + 'Z')" - ) - - python3 -c "${SCRIPT[*]}" -} - -generate_chainspec() { - local BASEDIR=${1} - local TIMESTAMP=${2} - local SOURCE="${BASEDIR}/resources/joiner/chainspec.toml.in" - local TARGET="${BASEDIR}/resources/joiner/chainspec.toml" - - export BASEDIR - export TIMESTAMP - - echo "Generating chainspec..." - envsubst <"${SOURCE}" >"${TARGET}" -} - -run_node() { - local EXECUTABLE=${1} - local SESSION=${2} - local ID=${3} - local CONFIG_DIR=${4} - local DATA_DIR=${5} - local BOOTSTRAP_RPC_PORT=${6} - - local CONFIG_TOML_PATH="${CONFIG_DIR}/config.toml" - local SECRET_KEY_PATH="${CONFIG_DIR}/secret_keys/node-${ID}.pem" - local STORAGE_DIR="${DATA_DIR}/node-${ID}-storage" - - local CMD=( - "${EXECUTABLE}" - "validator" - "${CONFIG_TOML_PATH}" - "-C consensus.secret_key_path=${SECRET_KEY_PATH}" - "-C storage.path=${STORAGE_DIR}" - "-C rpc_server.address='0.0.0.0:${BOOTSTRAP_RPC_PORT}'" - "1> >(tee ${DATA_DIR}/node-${ID}.log) 2> >(tee ${DATA_DIR}/node-${ID}.log.stderr)" - ) - - mkdir -p "${STORAGE_DIR}" - tmux_new_window "${SESSION}" "${ID}" "${CMD[*]}" - echo "Booting node ${ID}..." -} - -run_joiner_node() { - local EXECUTABLE=${1} - local SESSION=${2} - local ID=${3} - local CONFIG_DIR=${4} - local DATA_DIR=${5} - local BOOTSTRAP_RPC_PORT=${6} - - local RPC_URL="http://127.0.0.1:${BOOTSTRAP_RPC_PORT}" - local GET_LATEST_BLOCK_HASH_CMD=( - "cargo" - "run" - "--quiet" - "-p" "casper-client" - "--" - "get-block" - "--node-address=${RPC_URL}" - - "|" - - "jq" - ".result.block.hash" - ) - - local GET_TRUSTED_HASH=( - "TRUSTED_HASH='null';" - "while [[ \${TRUSTED_HASH} == 'null' ]] ; do" - "TRUSTED_HASH=\$(${GET_LATEST_BLOCK_HASH_CMD[*]});" - "echo \${TRUSTED_HASH};" - "sleep 5;" - "done" - "1> >(tee ${DATA_DIR}/node-${ID}.log) 2> >(tee ${DATA_DIR}/node-${ID}.log.stderr)" - ) - - local CONFIG_TOML_PATH="${CONFIG_DIR}/config.toml" - local SECRET_KEY_PATH="${CONFIG_DIR}/secret_keys/node-${ID}.pem" - local STORAGE_DIR="${DATA_DIR}/node-${ID}-storage" - local RUN_JOINER=( - "${EXECUTABLE}" - "validator" - "${CONFIG_TOML_PATH}" - "-C consensus.secret_key_path=${SECRET_KEY_PATH}" - "-C storage.path=${STORAGE_DIR}" - "-C network.bind_address='0.0.0.0:0'" - "-C rpc_server.address='0.0.0.0:0'" - "-C rest_server.address='0.0.0.0:0'" - "-C event_stream_server.address='0.0.0.0:0'" - "-C node.trusted_hash=\"\${TRUSTED_HASH}\"" - "1> >(tee -a ${DATA_DIR}/node-${ID}.log) 2> >(tee -a ${DATA_DIR}/node-${ID}.log.stderr)" - ) - - local CMD=( - "${GET_TRUSTED_HASH[*]}" - "&&" - "${RUN_JOINER[*]}" - ) - - tmux_new_window "${SESSION}" "${ID}" "${CMD[*]}" - echo "Launching joiner node ${ID}..." -} - -check_for_bootstrap() { - local BOOTSTRAP_PORT=34553 - - while ! (: /dev/null; do - sleep 1 - done -} - -main() { - local BASEDIR - BASEDIR="$(readlink -f "$(dirname "${0}")")" - local CONFIG_DIR="${BASEDIR}/resources/joiner" - local EXECUTABLE="${BASEDIR}/target/debug/casper-node" - - local DELAY=${1:-40} - local TIMESTAMP - TIMESTAMP="$(generate_timestamp "${DELAY}")" - - local BOOTSTRAP_RPC_PORT="${BOOTSTRAP_RPC_PORT:-50101}" - - export RUST_LOG="${RUST_LOG:-debug}" - export TMPDIR="${TMPDIR:-$(mktemp -d)}" - - # create a new tmux session - # if one already exists this will fail and the program will exit - local SESSION="${SESSION:-local}" - tmux new-session -d -s "${SESSION}" - - if [[ ! -x "${EXECUTABLE}" || ! -v QUICK_START ]]; then - build_system_contracts - - build_node - - build_casper_client - fi - - generate_chainspec "${BASEDIR}" "${TIMESTAMP}" - - local ID=1 - run_node "${EXECUTABLE}" "${SESSION}" "${ID}" "${CONFIG_DIR}" "${TMPDIR}" "${BOOTSTRAP_RPC_PORT}" - check_for_bootstrap - - local ID=2 - run_joiner_node "${EXECUTABLE}" "${SESSION}" "${ID}" "${CONFIG_DIR}" "${TMPDIR}" "${BOOTSTRAP_RPC_PORT}" - - echo - echo "DELAY : ${DELAY}" - echo "RPC : http://127.0.0.1:${BOOTSTRAP_RPC_PORT}" - echo "RUST_LOG : ${RUST_LOG}" - echo "TIMESTAMP : ${TIMESTAMP}" - echo "TMPDIR : ${TMPDIR}" - echo - echo "To view: " - echo " tmux attach-session -t ${SESSION}" - echo - echo "To kill: " - echo " tmux kill-session -t ${SESSION}" - echo -} - -main "${@}" diff --git a/nix/.gitignore b/nix/.gitignore deleted file mode 100644 index b2be92b7db..0000000000 --- a/nix/.gitignore +++ /dev/null @@ -1 +0,0 @@ -result diff --git a/nix/README.md b/nix/README.md deleted file mode 100644 index ddb6520527..0000000000 --- a/nix/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Nix-based kubernetes test environment - -All operations are based on having the `nix` package manager available. It can be easily be installed using the quickstart instructions found at https://nixos.org/download.html#nix-quick-install, which are just - -```console -$ curl -L https://nixos.org/nix/install | sh -``` - -## Building a docker image of a node - -To build your current source into a container image, enter a nix-shell in *root folder* of the repository, then run `nix/build-node.sh`. - -```console -$ nix-shell -$ nix/build-node.sh -[...] -Created new docker image casper-node:f2b9cd7a-dirty. - -Load into local docker: -docker load -i ./result - -Publish image -skopeo --insecure-policy copy docker-archive:./result docker://clmarc/casper-node:f2b9cd7a-dirty -``` - -The image will be inside the nix store as a docker archive file, with a local symlink `result` pointing to it. As shown above, there are now options to either upload to a repository (provided you have credentials), or just import it locally. The image tag will be based on the current state of the source tree, if uncommitted changes to any files are present, a `-dirty` will be appended. - -## Setting up a new kubernetes cluster - -One option is to use a hosted kubernetes solution, e.g. offerings from Digital Ocean, Amazon or Google. However, hosting a cluster for testing purpose is a cheaper and potentially simpler alternative. We recommend using [k3s](https://k3s.io) to setup a cluster, which is lighter on resources at the cost of not offering high availability for the control plane - a feature not needed for our testing environments. - -Here is a brief overview on how to create a cluster on Hetzner's cheap cloud storage (see also the [quick start instructions of k3s](https://rancher.com/docs/k3s/latest/en/quick-start/)): - -### Setting up the master node - -1. Create any number of nodes, one of which will be the master node. -1. Install k3s on the master using `curl -sfL https://get.k3s.io | sh`. -1. Download the kubeconfig at `/etc/rancher/k3s/k3s.yaml` and make sure to replace the localhost IP in `clusters.cluster.server` with the master node's IP. -1. Make note of the server token in `/var/lib/rancher/k3s/server/node-token`. -1. For any non-master node, run `curl -sfL https://get.k3s.io | K3S_URL=https://${SERVERIP}:6443 K3S_TOKEN=${NODETOKEN} sh -`, replacing `${SERVERIP}` with the master node's IP and `${NODETOKEN}` with the previously mentioned token. - -Setting `KUBECONFIG` to the path of the downloaded kubeconfig and running `kubectl get nodes` should show all nodes as online shortly after. - -### Enabling storage - -The final step is to create a storage provider, for which we will be using Longhorn. This is required to be able to offer persistent storage on nodes, see the [Longhorn example in the k3s docs](https://rancher.com/docs/k3s/latest/en/storage/) for details. - -The prerequisite is that the `open-iscsi` is installed **on the node itself**. Thus we enter each node and install it: - -```console -# Shortcut to get node IPs -NODE_IPS=$(kubectl get nodes -o 'jsonpath={.items[*].status.addresses[?(@.type=="InternalIP")].address}') -for IP in ${NODE_IPS}; do - ssh root@$IP "hostname; apt-get -qq update && apt-get -qq install open-iscsi"; -done; -# ... -``` - -With `iscsi` installed, we can now deploy longhorn: - -```console -$ kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/longhorn.yaml -``` diff --git a/nix/build-node.sh b/nix/build-node.sh deleted file mode 100755 index 3490ae050f..0000000000 --- a/nix/build-node.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -# Creates a new docker image, without importing it into the local docker repo or uploading it. - -set -eu - -cd $(dirname $0) - -TAG=$(git describe --always --dirty) -TARGET_REPO=clmarc - -echo "Building using tag ${TAG}" -nix-build --argstr tag "${TAG}" node-container.nix - -echo "Created new docker image casper-node:${TAG}." -echo -echo "Load into local docker:" -echo "docker load -i ./result" -echo -echo "Publish image" -echo "skopeo --insecure-policy copy docker-archive:./result docker://${TARGET_REPO}/casper-node:${TAG}" diff --git a/nix/casper-network.yaml b/nix/casper-network.yaml deleted file mode 100644 index 2646e9836b..0000000000 --- a/nix/casper-network.yaml +++ /dev/null @@ -1,94 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: casper-node - labels: - app: casper-node -spec: - clusterIP: None - selector: - app: casper-node ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: casper-node -spec: - serviceName: "casper-node" - # podManagementPolicy: "Parallel" - replicas: 5 - selector: - matchLabels: - app: casper-node - template: - metadata: - labels: - app: casper-node - spec: - volumes: - - name: chain-map - configMap: - name: chain-map - initContainers: - - name: init-node - image: busybox:1.28 - command: [ - "sh", - "-c", - "mkdir /chain-temp; - cd /chain-temp; - tar xvf /chain-map/chain_map.tar.xz; - cp -vr chain/ ../config/; - export NODE_INDEX=${HOSTNAME##*-}; - cp -vr node-${NODE_INDEX}/ ../config/node; - echo 'all done initializing'; - ", - ] - volumeMounts: - # The stored network definition - - name: chain-map - mountPath: "/chain-map" - readOnly: true - # Volume that holds the configuration - - name: config - mountPath: /config - terminationGracePeriodSeconds: 5 - containers: - - name: node - image: clmarc/casper-node:5bb6e0c4 # TODO: Dynamically set. - # The args override some settings from environment variables. - args: ["validator", "/config/node/config.toml"] - env: - - name: RUST_LOG - value: info - - name: RUST_BACKTRACE - value: "1" - resources: - limits: - cpu: "500m" - memory: "1000Mi" - requests: - cpu: "250m" - memory: "500Mi" - volumeMounts: - - name: config - mountPath: /config - - name: storage - mountPath: /storage - volumeClaimTemplates: - - metadata: - name: config - spec: - accessModes: ["ReadWriteOnce"] - storageClassName: "longhorn" - resources: - requests: - storage: 5Mi - - metadata: - name: storage - spec: - accessModes: ["ReadWriteOnce"] - storageClassName: "longhorn" - resources: - requests: - storage: 1Gi diff --git a/nix/demo-cluster.py b/nix/demo-cluster.py deleted file mode 100755 index 57836511df..0000000000 --- a/nix/demo-cluster.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env python3 - -from base64 import b64encode -import os -import time -import tarfile - -import click -import kubernetes -import toml -from kubernetes.client.rest import ApiException -import volatile - -TAG = "9bddd925" - -#: Prefix for namespaces of deployed networks. Prevents accidental deletion of namespaces like -# `default`. -NETWORK_NAME_PREFIX = "casper-" - -#: Maximum size for a compressed network definition, including WASM. -MAX_CHAIN_MAP_SIZE = 1024 * 1023 - - -def k8s(): - """Loads `KUBECONFIG` and sets up an API client""" - kubernetes.config.load_kube_config() - return kubernetes.client.CoreV1Api() - - -@click.group() -def cli(): - """casper-node Kubernetes cluster administration""" - pass - - -@cli.command("deploy") -@click.argument( - "network-path", - type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True), -) -@click.option( - "-i", "--id", help="ID for the casper network deployment. Defaults to the chainname" -) -def deploy(network_path, id): - """Deploy a generated network onto cluster namespace""" - - api = k8s() - - if id is None: - chainspec = toml.load( - open(os.path.join(network_path, "chain", "chainspec.toml")) - ) - name = name = NETWORK_NAME_PREFIX + chainspec["genesis"]["name"] - else: - name = NETWORK_NAME_PREFIX + id - - # Ensure we're not overwriting an existing deployment. - if namespace_exists(api, name): - click.echo( - "Kubernetes deployment `{}` already exists. Run `destroy` first.".format( - name - ) - ) - return - - # Create the namespace. - api.create_namespace({"metadata": {"name": name}}) - - # Since shared volumes are quite complicated, we store our whole network configuration as a - # config map. This limits the size effectively to < 1 MB, but LZMA-compression should be good - # enough to bring the size down to < 300 KB, the majority of which are WASM contracts. - click.echo("Compressing network definition") - chain_map = b64encode(xz_dir(network_path)).decode("ASCII") - - assert len(chain_map) < MAX_CHAIN_MAP_SIZE - - click.echo("Uploading config map with network definition") - api.create_namespaced_config_map( - namespace=name, - body={ - "metadata": {"name": "chain-map",}, - "binaryData": {"chain_map.tar.xz": chain_map}, - }, - ) - - click.echo("Config has been uploaded. You can now deploy the actual network.") - - -@cli.command("destroy") -@click.argument("name") -def destroy(name): - """Kill a potentially running network""" - - api = k8s() - - network_name = NETWORK_NAME_PREFIX + name - - if not namespace_exists(api, network_name): - click.echo("Does not exist: {}".format(network_name)) - return - - click.echo("Deleting namespace {}".format(network_name), nl=False) - delete_namespace(api, network_name) - click.echo() - - -def namespace_exists(api, name): - """Checks whether a given namespace exists""" - try: - ns = api.read_namespace(name) - except ApiException as e: - if e.status != 404: - raise - return False - return True - - -def pod_status(api, namespace, pod_name): - """Checks whether a given pod exists in a namespace""" - try: - pod = api.read_namespaced_pod(name=name, namespace=namespace) - return resp.status.phase - except ApiException as e: - if e.status != 404: - raise - return None - - -def delete_namespace(api, name): - """Delete a namespace and watch for it to terminate""" - # It seems the watch API is not supported for namespace reading? We poll manually. - while True: - if namespace_exists(api, name): - try: - api.delete_namespace(name) - except ApiException as e: - if e.status != 404: - raise - break - else: - break - - click.echo(".", nl=False) - time.sleep(0.250) - - -def xz_dir(target): - """Creates a `.tar.xz`-formatted archive from a path, with all archive paths being relative to - the `target` dir. - - Returns the archive in-memory.""" - with volatile.file() as archive: - with tarfile.open(archive.name, "w:xz") as tar: - for dirpath, _dirnames, filenames in os.walk(target): - for filename in filenames: - full_path = os.path.join(dirpath, filename) - relpath = os.path.relpath(full_path, target) - tar.add(full_path, arcname=relpath) - - archive.close() - return open(archive.name, "rb").read() - - -if __name__ == "__main__": - cli() diff --git a/nix/deps.nix b/nix/deps.nix deleted file mode 100644 index bd35ae15a8..0000000000 --- a/nix/deps.nix +++ /dev/null @@ -1,11 +0,0 @@ -# Pinned dependencies for nix packages. - -let - # Fixed mozilla overlay, which does not publish release, so we pin an arbitrary commit. - moz_overlay = import (builtins.fetchTarball - "https://github.com/mozilla/nixpkgs-mozilla/archive/8c007b60731c07dd7a052cce508de3bb1ae849b4.tar.gz"); - - # Pin to a stable nix release. - pkgsPath = import (builtins.fetchTarball - "https://github.com/NixOS/nixpkgs/archive/20.09.tar.gz"); -in pkgsPath { overlays = [ moz_overlay ]; } diff --git a/nix/forward-ports.sh b/nix/forward-ports.sh deleted file mode 100755 index 8e0931b6ce..0000000000 --- a/nix/forward-ports.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -kubectl -n casper-mynet port-forward casper-node-2 7777 & -kubectl -n casper-mynet port-forward casper-node-3 7778:7777 & -wait diff --git a/nix/node-container.nix b/nix/node-container.nix deleted file mode 100644 index cc8763ff14..0000000000 --- a/nix/node-container.nix +++ /dev/null @@ -1,27 +0,0 @@ -# Container definition for the casper node. -# -# The container uses the node binary as an entrypoint, and expects it `config.toml` at `/`. By -# default, it will launch the node in validator mode. - -{ pkgs ? import ./deps.nix, tag ? "latest" }: -let casper-node = (import ./node.nix) { inherit pkgs; }; -in pkgs.dockerTools.buildImage { - name = "casper-node"; - tag = tag; - contents = with pkgs; [ busybox dnsutils strace ]; - - extraCommands = '' - mkdir -p etc - echo 'hosts: files dns' > etc/nsswitch.conf - ''; - - config = { - Cmd = [ "validator" "/config/node/config.toml" ]; - Entrypoint = [ "${casper-node}/bin/casper-node" ]; - WorkingDir = "/storage"; - Volumes = { - "/storage" = { }; - "/config" = { }; - }; - }; -} diff --git a/nix/node.nix b/nix/node.nix deleted file mode 100644 index 0a655c3619..0000000000 --- a/nix/node.nix +++ /dev/null @@ -1,22 +0,0 @@ -{ pkgs ? import ./deps.nix }: -let - rustChannel = (pkgs.rustChannelOf { rustToolchain = ../rust-toolchain; }); - rustPlatform = pkgs.makeRustPlatform { - rustc = rustChannel.rust; - cargo = rustChannel.rust; - }; - source = pkgs.nix-gitignore.gitignoreSource [ "nix/" ] ../.; -in rustPlatform.buildRustPackage rec { - name = "casper-node"; - pname = "casper-node"; - cargoSha256 = "1v7did47ckmmlkrkp6rd5r4gqjrl22gm5zwi710w66lr00zvc092"; - src = source; - buildInputs = with pkgs; [ openssl ]; - nativeBuildInputs = with pkgs; [ pkg-config ]; - cargoBuildFlags = [ "-p" "casper-node" ]; - - # Do not run tests, they require too many dependencies not capture here. - doCheck = false; - - PROTOC = "${pkgs.protobuf}/bin/protoc"; -} diff --git a/nix/run-cluster.sh b/nix/run-cluster.sh deleted file mode 100755 index c255c0dd32..0000000000 --- a/nix/run-cluster.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -set -e - -NETNAME=mynet -NAMESPACE=casper-${NETNAME} - -# if [ $# -ne 1 ]; then -# echo "usage: $0 TAG" -# exit 1; -# fi; - -# Create a new network. -rm -rf ${NETNAME} -../utils/casper-tool/casper-tool.py create-network --number-of-nodes 5 ${NETNAME} - -# Drop the old namespace. -./demo-cluster.py destroy ${NETNAME} -./demo-cluster.py deploy ${NETNAME} - -kubectl --namespace ${NAMESPACE} apply -f casper-network.yaml diff --git a/nix/run-test-deploy.sh b/nix/run-test-deploy.sh deleted file mode 100755 index a3e76030ba..0000000000 --- a/nix/run-test-deploy.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - -set -u - -# Settings -NODE_ADDRESS=http://localhost:7777 -SECOND_NODE_ADDRESS=http://localhost:7778 - -# CLIENT="cargo run --manifest-path=../client/Cargo.toml --" -CLIENT=../target/debug/casper-client -CHAIN_NAME=mynet -NODE_IDX=2 - -# Caluclated values -SECRET_KEY_PATH=${CHAIN_NAME}/node-${NODE_IDX}/keys/secret_key.pem -WASM_DIR=$(readlink -f ../target/wasm32-unknown-unknown/release/) - -# Helper function to check a deploy exists. -deploy_exists() { - ADDRESS=$1 - HASH=$2 - SHORT_HASH=$(echo $HASH | cut -c 1-8) - - if ${CLIENT} get-deploy --node-address ${ADDRESS} ${HASH} > /dev/null; then - echo "Deploy ${SHORT_HASH} found on ${ADDRESS}" - else - echo "Deploy ${SHORT_HASH} NOT FOUND on ${ADDRESS}" - fi; -} - -DEPLOY_HASH=$(${CLIENT} put-deploy \ - --chain-name ${CHAIN_NAME}\ - --node-address ${NODE_ADDRESS}\ - --secret-key "${SECRET_KEY_PATH}" \ - --session-path "${WASM_DIR}/do_nothing.wasm" \ - --payment-amount 10000000000 | tee /dev/tty | jq -r '.result.deploy_hash' ) - -sleep 0.5; - -deploy_exists ${NODE_ADDRESS} ${DEPLOY_HASH} -deploy_exists ${SECOND_NODE_ADDRESS} ${DEPLOY_HASH} diff --git a/node/BINARY_PORT_PROTOCOL.md b/node/BINARY_PORT_PROTOCOL.md new file mode 100644 index 0000000000..52ce277754 --- /dev/null +++ b/node/BINARY_PORT_PROTOCOL.md @@ -0,0 +1,47 @@ +# The Binary Port Protocol + +This page specifies the protocol of casper nodes Binary Port. + +## Synopsis + +The protocol consists of one party (the client) sending requests to another party (the server) and the server sending responses back to the client. +The Binary Port communication protocol is binary and supports a long lived tcp connection. Once the tcp connection is open the binary port assumes a series of request-response messages. It is not supported to send a second request before receiving the entirety of the response to the first one via one tcp connection. Both requests and responses are have envelopes containing some metadata. + +### Request format + +| Size in bytes | Field | Description | +| ------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 4 | length | A LE encoded number of bytes of all the subsequent fields (excluding the length itself). Based on this number the server "knows" where the binary request ends. | +| 2 | version | Version of the binary port header serialized as a single u16 number. The server handles only strictly specified versions and can deny service if the version doesn't meet it's expectation. The current supported version is `1` | +| 1 | type_tag | Tag identifying the request. | +| 2 | id | An identifier that should dbe understandable to the client and should facilitate correlating requests with responses | +| variable | payload | Payload to be interpreted according to the `type_tag`. | + +### Response format + +| Size in bytes | Field | Description | +| ------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 4 | length | A LE encoded number of bytes of all the subsequent fields (excluding the length itself). Based on this number the client should "know" where the binary response ends. | +| 4 | Request length | number of bytes of the `request` field. | +| variable | request | The raw binary request that was provided by the client (including the requests `length` field). | +| 2 | version | Version of the binary port response structure. Currently supported version is `1` | +| 2 | error_code | Error code, where 0 indicates success. | +| 1-2 | response_type | Optional payload type tag (first byte being 1 indicates that it exists). | +| 4 | payload_length | Number of bytes of the var-length `payload` field. | +| Variable | payload | Payload to be interpreted according to the `response_type`. If there is no response, or the response was erroneous this field will have 0 bytes and `payload_length` will be the number `0` | + +**Notes:** `variable` means that the payload size is variable and depends on the tag. + +## Request model details + +Currently, there are 3 supported types of requests, but the request model can be extended. The request types are: + +- A `Get` request, which is one of: + - A `Record` request asking for a record with an extensible `RecordId` tag and a key + - An `Information` request asking for a piece of information with an extensible `InformationRequestTag` tag and a key + - A `State` request asking for some data from global state. This can be: + - An `Item` request asking for a single item given a `Key` + - An `AllItems` request asking for all items given a `KeyTag` + - A `Trie` request asking for a trie given a `Digest` +- A `TryAcceptTransaction` request for a transaction to be accepted and executed +- A `TrySpeculativeExec` request for a transaction to be executed speculatively, without saving the transaction effects in global state diff --git a/node/CHANGELOG.md b/node/CHANGELOG.md new file mode 100644 index 0000000000..67ea364282 --- /dev/null +++ b/node/CHANGELOG.md @@ -0,0 +1,557 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + +## [Unreleased] +### Added +* `TransactionInvocationTarget::ByPackageHash` has a new field `version_key` +* `TransactionInvocationTarget::ByPackageName` has a new field `version_key` + +### Changed +* Transaction::Deploy no longer supports using (in `payment` or `session`) when the `ExecutableDeployItem::StoredVersionedContractByHash` with field `version` (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). To execute a stored contract in a specific version please use Transaction::V1. +* Transaction::Deploy no longer supports using (in `payment` or `session`) when the `ExecutableDeployItem::StoredVersionedContractByName` with field `version` (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). To execute a stored contract in a specific version please use Transaction::V1. +* Transaction::V1 no longer supports using `TransactionInvocationTarget::ByPackageHash` variant with `version` defined (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). Please use `version_key` instead. +* Transaction::V1 no longer supports using `TransactionInvocationTarget::ByPackageName` variant with `version` defined (the field is retained for retro compatiblity but new transactions will be rejected by TransactionAcceptor). Please use `version_key` instead. + +## 2.0.0 + +### Added +* Add `BinaryPort` interface along with the relevant config entries. +* Added chainspec settings `finders_fee`, `finality_signature_proportion` and `signature_rewards_max_delay` to control behavior of the new seigniorage model. +* Isolated sync handling, which comes online with only local data and rejects peers. Useful for testing, auditing, and similar scenarios. + +### Changed +* All SSE events are emitted via the `/events` endpoint. None of the previous ones (`/events/main`, `/events/deploys`, and `/events/sigs`) is available any longer. +* `DeployBuffer` was renamed to `TransactionBuffer` along with the related metrics. +* Switch blocks and the creation and propagation of signatures on them are now rewarded. +* Era end reports now record rewards as motes rather than scores. +* Seigniorage model is now independent of the details of consensus (and compatible with both Highway and Zug) and based solely upon block proposals, signature generation and signature distribution by validators. + +### Removed +* Remove the JSON-RPC and speculative execution interfaces. +* Remove chainspec setting `highway.performance_meter.blocks_to_consider` and the entire `highway.performance_meter` section. +* Remove chainspec setting `highway.reduced_reward_multiplier` + +## 1.5.6 + +### Changed +* The node will recognise if a pending upgrade is unstaged and will avoid shutting down for upgrade in this case. +* If an upgrade with the same activation point as the current one is detected on startup, the node will immediately shut down for upgrade. +* Reduce chainspec setting `deploys.max_ttl` from 18 hours to 2 hours. + +## 1.5.5 + +### Added +* New chainspec setting `highway.performance_meter.blocks_to_consider` with a value of 10, meaning that nodes will take 10 most recent blocks into account when determining their performance in Highway for the purpose of choosing their round lengths. + +## 1.5.4 + +### Added +* New environment variable `CL_EVENT_QUEUE_DUMP_THRESHOLD` to enable dumping of queue event counts to log when a certain threshold is exceeded. +* Add initial support for private chains. +* Add support for CA signed client certificates for private chains. +* Add a Highway Analysis tool for checking the state of the consensus. + +### Changed +* Minimum block time reduced from 32.768s to 16.384s, with corresponding changes to related chainspec settings: + * `core.minimum_block_time` reduced to `16384 ms`. + * `core.round_seigniorage_rate` reduced to `[7, 175070816]`. + * `highway.block_gas_limit` reduced to `4_000_000_000_000`. +* The `state_identifier` parameter of the `query_global_state` JSON-RPC method is now optional. If no `state_identifier` is specified, the highest complete block known to the node will be used to fulfill the request. +* `state_get_account_info` RPC handler can now handle an `AccountIdentifier` as a parameter. +* Replace the `sync_to_genesis` node config field with `sync_handling`. + * The new `sync_handling` field accepts three values: + - `genesis` - node will attempt to acquire all block data back to genesis + - `ttl` - node will attempt to acquire all block data to comply with time to live enforcement + - `nosync` - node will only acquire blocks moving forward +* Make the `network.estimator_weights` section of the node config more fine-grained to provide more precise throttling of non-validator traffic. + +### Removed +* The section `consensus.highway.round_success_meter` has been removed from the config file as no longer relevant with the introduction of a new method of determining the round exponent in Highway. + +### Fixed +* Now possible to build outside a git repository context (e.g. from a source tarball). In such cases, the node's build version (as reported vie status endpoints) will not contain a trailing git short hash. +* Remove an error that would unnecessarily be raised when a node includes its highest orphaned block within the current era. +* Short-circuit initialization of block and deploy metadata DB to resolve delays after an upgrade. + +### Security +* Update `openssl` to version 0.10.55 as mitigation for [RUSTSEC-2023-0044](https://rustsec.org/advisories/RUSTSEC-2023-0044). + + + +## 1.5.3 + +### Added +* Add `deploy_acceptor` section to config with a single option `timestamp_leeway` to allow a small leeway when deciding if a deploy is future-dated. +* Add `deploys.max_timestamp_leeway` chainspec option to define the upper limit for the new config option `deploy_acceptor.timestamp_leeway`. +* Add `block_validator.max_completed_entries` config option to control the number of recently validated proposed blocks to retain. + +### Changed +* Change the limit of the `core_config.simultaneous_peer_requests` chainspec parameter to 255. +* Optimize the `BlockValidator` component to reduce the number of simultaneous fetch events created for a given proposed block. + +### Fixed +* Fix issue in `chain_get_block_transfers` JSON-RPC where blocks with no deploys could be reported as having `null` transfers rather than `[]`. +* Fix issue in `chain_get_block_transfers` JSON-RPC where blocks containing successful transfers could erroneously be reported as having none. + +### Removed +* Remove the `block_synchronizer.stall_limit` node config parameter since it is no longer needed. + + + +## 1.5.2 + +### Added +* Added the `cors_origin` config option under the `[rest_server]`, `[rpc_server]`, `[event_stream_server]` and `[speculative_exec_server]` sections to allow configuration of the CORS Origin. + + + +## 1.5.1 + +### Added +* Added the `upgrade_timeout` config option under the `[node]` section. + +### Changed +* `speculative_exec` server now routes deploys to `DeployAcceptor` for more comprehensive validation, including cryptographic verification of signatures. + + + +## 1.5.0-rc.1 + +### Added +* Introduce fast-syncing to join the network, avoiding the need to execute every block to catch up. +* Add config sections for new components to support fast-sync: `[block_accumulator]`, `[block_synchronizer]`, `[deploy_buffer]` and `[upgrade_watcher]`. +* Add new Zug consensus protocol, disabled by default, along with a new `[consensus.zug]` config section. +* Add a `consensus_protocol` option to the chainspec to choose a consensus protocol, and a `minimum_block_time` setting for the minimum difference between a block's timestamp and its child's. +* Add a `vesting_schedule_period` option to the chainspec to define the period in which genesis validators' bids are released over time after they are unlocked. +* Add a `simultaneous_peer_requests` option to the chainspec to define the maximum number of simultaneous block-sync and sync-leap requests. +* Add following config options under `[node]` section to support fast-sync: + * `sync_to_genesis` which if set to `true` will cause the node to retrieve all blocks, deploys and global state back to genesis. + * `idle_tolerance` which defines the time after which the syncing process is considered stalled. + * `max_attempts` which defines the maximum number of attempts to sync before exiting the node process after the syncing process is considered stalled. + * `control_logic_default_delay` which defines the default delay for the control events that have no dedicated delay requirements. + * `force_resync` which if set to `true` will cause the node to resync all of the blocks. +* Add following config options under `[network]` section: + * `min_peers_for_initialization` which defines the minimum number of fully-connected peers to consider network component initialized. + * `handshake_timeout` which defines connection handshake timeouts (they were hardcoded at 20 seconds previously). + * `max_incoming_peer_connections` which defines the maximum number of incoming connections per unique peer allowed. + * `max_in_flight_demands` which defines the maximum number of in-flight requests for data from a single peer. + * `tarpit_version_threshold`, `tarpit_duration` and `tarpit_chance` to configure the tarpitting feature, designed to reduce the impact of old node versions making repeated, rapid reconnection attempts. + * `blocklist_retain_duration` which defines how long peers remain blocked after they get blocklisted. + * optional `[network.identity]` section to support loading existing network identity certificates signed by a certificate authority. + * In addition to `consensus` and `deploy_requests`, the following values can now be controlled via the `[network.estimator_weights]` section in config: `gossip`, `finality_signatures`, `deploy_responses`, `block_requests`, `block_responses`, `trie_requests` and `trie_responses`. +* The network handshake now contains the hash of the chainspec used and will be successful only if they match. +* Checksums for execution results and deploy approvals are written to global state after each block execution. +* Add a new config option `[rpc_server.max_body_bytes]` to allow a configurable value for the maximum size of the body of a JSON-RPC request. +* Add `enable_server` option to all HTTP server configuration sections (`rpc_server`, `rest_server`, `event_stream_server`) which allow users to enable/disable each server independently (enabled by default). +* Add `enable_server`, `address`, `qps_limit` and `max_body_bytes` to new `speculative_exec_server` section to `config.toml` to configure speculative execution JSON-RPC server (disabled by default). +* Add new event to the main SSE server stream across all endpoints `/events/*` which emits a shutdown event when the node shuts down. +* Add following fields to the `/status` endpoint and the `info_get_status` JSON-RPC: + * `reactor_state` indicating the node's current operating mode. + * `last_progress` indicating the time the node last made progress. + * `available_block_range` indicating the highest contiguous sequence of the block chain for which the node has complete data. + * `block_sync` indicating the state of the block synchronizer component. +* Add new REST `/chainspec` and JSON-RPC `info_get_chainspec` endpoints that return the raw bytes of the `chainspec.toml`, `accounts.toml` and `global_state.toml` files as read at node startup. +* Add a new JSON-RPC endpoint `query_balance` which queries for balances under a given `PurseIdentifier`. +* Add new JSON-RPC endpoint `/speculative_exec` that accepts a deploy and a block hash and executes that deploy, returning the execution effects. +* Add `strict_argument_checking` to the chainspec to enable strict args checking when executing a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +* A diagnostics port can now be enabled via the `[diagnostics_port]` config section. See the `README.md` for details. +* Add `SIGUSR2` signal handling to dump the queue in JSON format (see "Changed" section for `SIGUSR1`). +* Add `validate_and_store_timeout` config option under `[gossip]` section to control the time the gossiper waits for another component to validate and store an item received via gossip. +* Add metrics: + * `block_accumulator_block_acceptors`, `block_accumulator_known_child_blocks` to report status of the block accumulator component + * `(forward|historical)_block_sync_duration_seconds` to report the progress of block synchronization + * `deploy_buffer_total_deploys`, `deploy_buffer_held_deploys`, `deploy_buffer_dead_deploys` to report status of the deploy buffer component + * `(lowest|highest)_available_block_height` to report the low/high values of the complete block range (the highest contiguous chain of blocks for which the node has complete data) + * `sync_leap_duration_seconds`, `sync_leap_fetched_from_peer_total`, `sync_leap_rejected_by_peer_total`, `sync_leap_cant_fetch_total` to report progress of the sync leaper component + * `execution_queue_size` to report the number of blocks enqueued pending execution + * `accumulated_(outgoing|incoming)_limiter_delay` to report how much time was spent throttling other peers. +* Add `testing` feature to casper-node crate to support test-only functionality (random constructors) on blocks and deploys. +* Connections to unresponsive nodes will be terminated, based on a watchdog feature. + +### Changed +* The `starting_state_root_hash` field from the REST and JSON-RPC status endpoints now represents the state root hash of the lowest block in the available block range. +* Detection of a crash no longer triggers DB integrity checks to run on node start; the checks can be triggered manually instead. +* Nodes no longer connect to nodes that do not speak the same protocol version by default. +* Incoming connections from peers are rejected if they are exceeding the default incoming connections per peer limit of 3. +* Chain automatically creates a switch block immediately after genesis or an upgrade, known as "immediate switch blocks". +* Requests for data from a peer are now de-prioritized over networking messages necessary for consensus and chain advancement. +* Replace network message format with a more efficient encoding while keeping the initial handshake intact. +* Flush outgoing messages immediately, trading bandwidth for latency and hence optimizing feedback loops of various components in the system. +* Move `finality_threshold_fraction` from the `[highway]` to the `[core]` section in the chainspec. +* Move `max_execution_delay` config option from `[consensus.highway]` to `[consensus]` section. +* Add CORS behavior to allow any route on the JSON-RPC, REST and SSE servers. +* The JSON-RPC server now returns more useful responses in many error cases. +* Add a new parameter to `info_get_deploys` JSON-RPC, `finalized_approvals` - controlling whether the approvals returned with the deploy should be the ones originally received by the node, or overridden by the approvals that were finalized along with the deploy. +* Support using block height as the `state_identifier` parameter of JSON-RPC `query_global_state` requests. +* Add new `block_hash` and `block_height` optional fields to JSON-RPC `info_get_deploy` response which will be present when execution results aren't available. +* JSON-RPC responses which fail to provide requested data will now also include an indication of that node's available block range, i.e. the block heights for which it holds all global state. See [#2789](https://github.com/casper-network/casper-node/pull/2789) for an example of the new error response. +* Add a `lock_status` field to the JSON representation of the `ContractPackage` values. +* `Key::SystemContractRegistry` is now readable and can be queried via the `query_global_state` JSON-RPC. +* Unify log messages for blocked nodes and provide more detailed reasons for blocking peers. +* Rename `current_era` metric to `consensus_current_era`. + +### Deprecated +* `null` should no longer be used as a value for `params` in JSON-RPC requests. Prefer an empty Array or Object. +* Deprecate the `chain_height` metric in favor of `highest_available_block_height`. + +### Removed +* Remove legacy synchronization from genesis in favor of fast-sync. +* Remove config options no longer required due to fast-sync: `[linear_chain_sync]`, `[block_proposer]` and `[consensus.highway.standstill_timeout]`. +* Remove chainspec setting `[protocol.last_emergency_restart]` as fast sync will use the global state directly for recognizing such restarts instead. +* Remove a temporary chainspec setting `[core.max_stored_value_size]` which was used to limit the size of individual values stored in global state. +* Remove config section `[deploy_acceptor]` which only has one option `verify_accounts`, meaning deploys received from clients always undergo account balance checks to assess suitability for execution or not. +* Remove storage integrity check. +* Remove `SIGUSR1`/`SIGUSR2` queue dumps in favor of the diagnostics port. +* Remove `casper-mainnet` feature flag. + +### Fixed +* Limiters for incoming requests and outgoing bandwidth will no longer inadvertently delay some validator traffic when maxed out due to joining nodes. +* Dropped connections no longer cause the outstanding messages metric to become incorrect. +* JSON-RPC server is now mostly compliant with the standard. Specifically, correct error values are now returned in responses in many failure cases. + +### Security +* Bump `openssl` crate to version 0.10.48, if compiling with vendored OpenSSL to address latest RUSTSEC advisories. + + + +## 1.4.15-alt + +### Changed +* Update dependencies (in particular `casper-types` to v2.0.0 due to additional `Key` variant). Note that publishing `1.4.15-alt` is only to rectify the issue where `casper-types` was published as v1.6.0 despite having a breaking change. It is expected to only be consumed as a crate; there will be no upgrade of Casper Mainnet, Testnet, etc to protocol version `1.4.15-alt`. + + + +## 1.4.15 + +### Changed +* Modified JSON-RPCs `chain_get_era_info_by_switch_block` and `chain_get_era_summary` to use either `Key::EraInfo` or `Key::EraSummary` as appropriate in order to provide useful responses. + + + +## 1.4.14 + +### Added +* Node executes new prune process after executing each block, whereby entries under `Key::EraInfo` are removed in batches of size defined by the new chainspec option `[core.prune_batch_size]`. +* After executing a switch block, information about that era is stored to global state under a new static key `Key::EraSummary`. +* Add a new JSON-RPC endpoint `chain_get_era_summary` to retrieve the information stored under `Key::EraSummary`. + +### Changed +* Rather than storing an ever-increasing collection of era information after executing a switch block under `Key::EraInfo`, the node now stores only the information relevant to that era under `Key::EraSummary`. +* Update `openssl` and `openssl-sys` to latest versions. + +### Removed +* Remove asymmetric key functionality (move to `casper-types` crate behind feature `std`). +* Remove time types (move to `casper-types` with some functionality behind feature `std`). + +### Fixed +* Fix issue in BlockValidator inhibiting the use of fallback peers to fetch missing deploys. + + + +## 1.4.13 + +### Changed +* Update `casper-execution-engine`. + + + +## 1.4.8 + +### Added +* Add an `identity` option to load existing network identity certificates signed by a CA. + + + +### Changed +* Update `casper-execution-engine`. + + + +## 1.4.7 + +### Changed +* Update `casper-execution-engine` and three `openssl` crates to latest versions. + + + +## 1.4.6 + +### Changed +* Update dependencies to make use of scratch global state in the contract runtime. + + + +## 1.4.5 + +### Added +* Add a temporary chainspec setting `max_stored_value_size` to limit the size of individual values stored in global state. +* Add a chainspec setting `minimum_delegation_amount` to limit the minimal amount of motes that can be delegated by a first time delegator. +* Add a chainspec setting `block_max_approval_count` to limit the maximum number of approvals across all deploys in a single block. +* Add a `finalized_approvals` field to the GetDeploy RPC, which if `true` causes the response to include finalized approvals substituted for the originally-received ones. + +### Fixed +* Include deploy approvals in block payloads upon which consensus operates. +* Fixes a bug where historical auction data was unavailable via `get-auction-info` RPC. + + + +## 1.4.4 - 2021-12-29 + +### Added +* Add `contract_runtime_latest_commit_step` gauge metric indicating the execution duration of the latest `commit_step` call. + +### Changed +* No longer checksum-hex encode various types. + + + +## 1.4.3 - 2021-12-06 + +### Added +* Add new event to the main SSE server stream accessed via `/events/main` which emits hashes of expired deploys. + +### Changed +* `enable_manual_sync` configuration parameter defaults to `true`. +* Default behavior of LMDB changed to use [`NO_READAHEAD`](https://docs.rs/lmdb/0.8.0/lmdb/struct.EnvironmentFlags.html#associatedconstant.NO_READAHEAD). + + + +## [1.4.2] - 2021-11-11 + +### Changed +* There are now less false warnings/errors regarding dropped responders or closed channels during a shutdown, where they are expected and harmless. +* Execution transforms are ordered by insertion order. + +### Removed +* The config option `consensus.highway.unit_hashes_folder` has been removed. + +### Fixed +* The block proposer component now retains pending deploys and transfers across a restart. + + + +## [1.4.0] - 2021-10-04 + +### Added +* Add `enable_manual_sync` boolean option to `[contract_runtime]` in the config.toml which enables manual LMDB sync. +* Add `contract_runtime_execute_block` histogram tracking execution time of a whole block. +* Long-running events now log their event type. +* Individual weights for traffic throttling can now be set through the configuration value `network.estimator_weights`. +* Add `consensus.highway.max_request_batch_size` configuration parameter. Defaults to 20. +* New histogram metrics `deploy_acceptor_accepted_deploy` and `deploy_acceptor_rejected_deploy` that track how long the initial verification took. +* Add gzip content negotiation (using accept-encoding header) to rpc endpoints. +* Add `state_get_trie` JSON-RPC endpoint. +* Add `info_get_validator_changes` JSON-RPC endpoint and REST endpoint `validator-changes` that return the status changes of active validators. + +### Changed +* The following Highway timers are now separate, configurable, and optional (if the entry is not in the config, the timer is never called): + * `standstill_timeout` causes the node to restart if no progress is made. + * `request_state_interval` makes the node periodically request the latest state from a peer. + * `log_synchronizer_interval` periodically logs the number of entries in the synchronizer queues. +* Add support for providing node uptime via the addition of an `uptime` parameter in the response to the `/status` endpoint and the `info_get_status` JSON-RPC. +* Support building and testing using stable Rust. +* Log chattiness in `debug` or lower levels has been reduced and performance at `info` or higher slightly improved. +* The following parameters in the `[gossip]` section of the config has been renamed: + * `[finished_entry_duration_secs]` => `[finished_entry_duration]` + * `[gossip_request_timeout_secs]` => `[gossip_request_timeout]` + * `[get_remainder_timeout_secs]` => `[get_remainder_timeout]` +* The following parameters in config now follow the humantime convention ('30sec', '120min', etc.): + * `[network][gossip_interval]` + * `[gossip][finished_entry_duration]` + * `[gossip][gossip_request_timeout]` + * `[gossip][get_remainder_timeout]` + * `[fetcher][get_from_peer_timeout]` + +### Removed +* The unofficial support for nix-related derivations and support tooling has been removed. +* Experimental, nix-based kubernetes testing support has been removed. +* Experimental support for libp2p has been removed. +* The `isolation_reconnect_delay` configuration, which has been ignored since 1.3, has been removed. +* The libp2p-exclusive metrics of `read_futures_in_flight`, `read_futures_total`, `write_futures_in_flight`, `write_futures_total` have been removed. + +### Fixed +* Resolve an issue where `Deploys` with payment amounts exceeding the block gas limit would not be rejected. +* Resolve issue of duplicated config option `max_associated_keys`. + + + +## [1.3.2] - 2021-08-02 + +### Fixed +* Resolve an issue in the `state_get_dictionary_item` JSON-RPC when a `ContractHash` is used. +* Corrected network state engine to hold in blocked state for full 10 minutes when encountering out of order race condition. + + + +## [1.3.1] - 2021-07-26 + +### Fixed +* Parametrized sync_timeout and increased value to stop possible post upgrade restart loop. + + + +## [1.3.0] - 2021-07-19 + +### Added +* Add support for providing historical auction information via the addition of an optional block ID in the `state_get_auction_info` JSON-RPC. +* Exclude inactive validators from proposing blocks. +* Add validation of the `[protocol]` configuration on startup, to ensure the contained values make sense. +* Add optional outgoing bandwidth limiter to the networking component, controllable via new `[network][max_outgoing_byte_rate_non_validators]` config option. +* Add optional incoming message limiter to the networking component, controllable via new `[network][max_incoming_message_rate_non_validators]` config option. +* Add optional in-memory deduplication of deploys, controllable via new `[storage]` config options `[enable_mem_deduplication]` and `[mem_pool_prune_interval]`. +* Add a new event stream to SSE server accessed via `/events/deploys` which emits deploys in full as they are accepted. +* Events now log their ancestors, so detailed tracing of events is possible. + +### Changed +* Major rewrite of the network component, covering connection negotiation and management, periodic housekeeping and logging. +* Exchange and authenticate Validator public keys in network handshake between peers. +* Remove needless copying of outgoing network messages. +* Move finality signatures to separate event stream and change stream endpoints to `/events/main` and `/events/sigs`. +* Avoid truncating the state root hash when reporting node's status via JSON-RPC or REST servers. +* The JSON-RPC server waits until an incoming deploy has been sent to storage before responding to the client. +* Persist event stream event index across node restarts. +* Separate transfers from other deploys in the block proposer. +* Enable getting validators for future eras in `EffectBuilder::get_era_validators()`. +* Improve logging around stalled consensus detection. +* Skip storage integrity checks if the node didn't previously crash. +* Update pinned version of Rust to `nightly-2021-06-17`. +* Changed LMDB flags to reduce flushing and optimize IO performance in the Contract Runtime. +* Don't shut down by default anymore if stalled. To enable set config option `shutdown_on_standstill = true` in `[consensus.highway]`. +* Major rewrite of the contract runtime component. +* Ports used for local testing are now determined in a manner that hopefully leads to less accidental conflicts. +* At log level `DEBUG`, single events are no longer logged (use `TRACE` instead). +* More node modules are now `pub(crate)`. + +### Removed +* Remove systemd notify support, including removal of `[network][systemd_support]` config option. +* Removed dead code revealed by making modules `pub(crate)`. +* The networking layer no longer gives preferences to validators from the previous era. + +### Fixed +* Avoid redundant requests caused by the Highway synchronizer. +* Update "current era" metric also for initial era. +* Keep syncing until the node is in the current era, rather than allowing an acceptable drift. +* Update the list of peers with newly-learned ones in linear chain sync. +* Drain the joiner reactor queue on exit, to eliminate stale connections whose handshake has completed, but which live on the queue. +* Shut down SSE event streams gracefully. +* Limit the maximum number of clients connected to the event stream server via the `[event_stream_server][max_concurrent_subscribers]` config option. +* Avoid emitting duplicate events in the event stream. +* Change `BlockIdentifier` params in the Open-RPC schema to be optional. +* Asymmetric connections are now swept regularly again. + + + +## [1.2.0] - 2021-05-27 + +### Added +* Add configuration options for `[consensus][highway][round_success_meter]`. +* Add `[protocol][last_emergency_restart]` field to the chainspec for use by fast sync. +* Add an endpoint at `/rpc-schema` to the REST server which returns the OpenRPC-compatible schema of the JSON-RPC API. +* Have consensus component shut down the node on an absence of messages for the last era for a given period. +* Add a new `Step` event to the event stream which displays the contract runtime `Step` execution results. +* Add a configurable delay before proposing dependencies, to give deploys time to be gossiped before inclusion in a new block. +* Add instrumentation to the network component. +* Add fetchers for block headers. +* Add joiner test. + +### Changed +* Change to Apache 2.0 license. +* Provide an efficient way of finding the block to which a given deploy belongs. +* On hard-reset upgrades, only remove stored blocks with old protocol versions, and remove all data associated with a removed block. +* Restrict expensive startup integrity checks to sessions following unclean shutdowns. +* Improve node joining process. +* Improve linear chain component, including cleanups and optimized handling of finality signatures. +* Make the syncing process, linear chain component and joiner reactor not depend on the Era Supervisor. +* Improve logging of banned peers. +* Change trigger for upgrade checks to timed interval rather than announcement of new block. +* Use the same JSON representation for a block in the event stream as for the JSON-RPC server. +* Avoid creating a new era when shutting down for an upgrade. +* Allow consensus to disconnect from faulty peers. +* Use own most recent round exponent instead of the median when initializing a new era. +* Request protocol state from peers only for the latest era. +* Add an instance ID to consensus pings, so that they are only handled in the era and the network they were meant for. +* Avoid requesting a consensus dependency that is already in the synchronizer queue. +* Change replay detection to not use execution results. +* Initialize consensus round success meter with current timestamp. +* Era Supervisor now accounts for the last emergency restart. +* Upgrade dependencies, in particular tokio. +* Use `minimum_block_time` and `maximum_round_length` in Highway, instead of `minimum_round_exponent` and `maximum_round_exponent`. The minimum round length doesn't have to be a power of two in milliseconds anymore. + +### Removed +* Remove `impl Sub for Timestamp` to help avoid panicking in non-obvious edge cases. +* Remove `impl Sub for Timestamp` from production code to help avoid panicking in non-obvious edge cases. +* Remove `[event_stream_server][broadcast_channel_size]` from config.toml, and make it a factor of the event stream buffer size. + +### Fixed +* Have casper-node process exit with the exit code returned by the validator reactor. +* Restore cached block proposer state correctly. +* Runtime memory estimator now registered in the joiner reactor. +* Avoid potential arithmetic overflow in consensus component. +* Avoid potential index out of bounds error in consensus component. +* Avoid panic on dropping an event responder. +* Validate each block size in the block validator component. +* Prevent deploy replays. +* Ensure finality signatures received after storing a block are gossiped and stored. +* Ensure isolated bootstrap nodes attempt to reconnect properly. +* Ensure the reactor doesn't skip fatal errors before successfully exiting. +* Collect only verified signatures from bonded validators. +* Fix a race condition where new metrics were replaced before the networking component had shut down completely, resulting in a panic. +* Ensure an era is not activated twice. +* Avoid redundant requests caused by the Highway synchronizer. +* Reduce duplication in block validation requests made by the Highway synchronizer. +* Request latest consensus state only if consensus has stalled locally. + + + +## [1.1.1] - 2021-04-19 + +### Changed +* Ensure consistent validation when adding deploys and transfers while proposing and validating blocks. + + + +## [1.1.0] - 2021-04-13 [YANKED] + +### Changed +* Ensure that global state queries will only be permitted to recurse to a fixed maximum depth. + + + +## [1.0.1] - 2021-04-08 + +### Added +* Add `[deploys][max_deploy_size]` to chainspec to limit the size of valid deploys. +* Add `[network][maximum_net_message_size]` to chainspec to limit the size of peer-to-peer messages. + +### Changed +* Check deploy size does not exceed maximum permitted as part of deploy validation. +* Include protocol version and maximum message size in network handshake of nodes. +* Change accounts.toml to only be included in v1.0.0 configurations. + + + +## [1.0.0] - 2021-03-30 + +### Added +* Initial release of node for Casper mainnet. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/37d561634adf73dab40fffa7f1f1ee47e80bf8a1...dev +[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.0...37d561634adf73dab40fffa7f1f1ee47e80bf8a1 +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 \ No newline at end of file diff --git a/node/Cargo.toml b/node/Cargo.toml index 81baf43205..c696ed6458 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -1,118 +1,121 @@ [package] name = "casper-node" -version = "1.0.0" # when updating, also update 'html_root_url' in lib.rs -authors = ["Marc Brinkmann ", "Fraser Hutchison "] -edition = "2018" +version = "2.0.4" # when updating, also update 'html_root_url' in lib.rs +authors = ["Ed Hastings ", "Karan Dhareshwar "] +edition = "2021" description = "The Casper blockchain node" documentation = "https://docs.rs/casper-node" readme = "README.md" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/node" -license-file = "../LICENSE" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/node" +license = "Apache-2.0" default-run = "casper-node" +exclude = ["proptest-regressions"] [dependencies] ansi_term = "0.12.1" anyhow = "1" +aquamarine = "0.1.12" +async-trait = "0.1.50" backtrace = "0.3.50" base16 = "0.2.1" base64 = "0.13.0" bincode = "1" -blake2 = { version = "0.9.0", default-features = false } -casper-execution-engine = { version = "1.0.0", path = "../execution_engine" } -casper-node-macros = { version = "1.0.0", path = "../node_macros" } -casper-types = { version = "1.0.0", path = "../types", features = ["std", "gens"] } -chrono = "0.4.10" -datasize = { version = "0.2.9", features = ["detailed", "fake_clock-types", "futures-types", "smallvec-types"] } +bytes = "1.0.1" +casper-binary-port = { version = "1.1.1", path = "../binary_port" } +casper-storage = { version = "2.1.1", path = "../storage" } +casper-types = { version = "6.0.1", path = "../types", features = ["datasize", "json-schema", "std-fs-io"] } +casper-execution-engine = { version = "8.1.1", path = "../execution_engine" } +datasize = { version = "0.2.11", features = ["detailed", "fake_clock-types", "futures-types", "smallvec-types"] } derive_more = "0.99.7" -derp = "0.0.14" -ed25519-dalek = { version = "1", default-features = false, features = ["rand", "serde", "u64_backend"] } -either = "1" +either = { version = "1", features = ["serde"] } enum-iterator = "0.6.0" +erased-serde = "0.3.18" fs2 = "0.4.3" -futures = "0.3.5" +futures = "0.3.31" futures-io = "0.3.5" -getrandom = "0.2.0" -hex = "0.4.2" -hex-buffer-serde = "0.2.1" +hex-buffer-serde = "0.3.0" hex_fmt = "0.3.0" hostname = "0.3.0" http = "0.2.1" -humantime = "2" -hyper = "0.14.4" -itertools = "0.10.0" -jemalloc-ctl = "0.3.3" -jemallocator = "0.3.2" -k256 = { version = "0.7.2", features = ["arithmetic", "ecdsa", "sha256", "zeroize"] } +humantime = "2.1.0" +hyper = "0.14.27" +itertools = "0.10.3" libc = "0.2.66" -libp2p = { version = "0.35.1", default-features = false, features = ["deflate", "dns", "floodsub", "gossipsub", "identify", "kad", "mdns", "mplex", "noise", "ping", "request-response", "tcp-tokio", "uds", "yamux"] } linked-hash-map = "0.5.3" -lmdb = "0.8.0" +lmdb-rkv = "0.14" log = { version = "0.4.8", features = ["std", "serde", "kv_unstable"] } num = { version = "0.4.0", default-features = false } -num-derive = "0.3.0" +num-derive = { workspace = true } num-rational = { version = "0.4.0", features = ["serde"] } -num-traits = "0.2.10" +num-traits = { workspace = true } num_cpus = "1" once_cell = "1" -openssl = "0.10.32" -parking_lot = "0.11.0" -pem = "0.8.1" -prometheus = "0.12.0" -proptest = { version = "1.0.0", optional = true } -quanta = "0.7.2" +openssl = "0.10.70" +pin-project = "1.0.6" +prometheus = { version = "0.13.4", default-features = false } +quanta = "0.9.2" rand = "0.8.3" rand_chacha = "0.3.0" regex = "1" -schemars = { version = "0.8.0", features = ["preserve_order"] } -sd-notify = "0.3.0" -serde = { version = "1", features = ["derive"] } +rmp-serde = "0.14.4" +schemars = { version = "0.8.16", features = ["preserve_order", "impl_json_schema"] } +serde = { version = "1", features = ["derive", "rc"] } serde-big-array = "0.3.0" +serde-map-to-array = "1.1.0" serde_bytes = "0.11.5" -serde_json = "1" +serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0.1.6" +shlex = "1.3.0" signal-hook = "0.3.4" signature = "1" smallvec = { version = "1", features = ["serde"] } static_assertions = "1" +stats_alloc = "0.1.8" structopt = "0.3.14" +strum = { version = "0.24.1", features = ["strum_macros", "derive"] } sys-info = "0.8.0" -tempfile = "3" +tempfile = "3.4.0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "sync", "time"] } -tokio-openssl = "0.6.1" +tokio-openssl = "0.6.3" tokio-serde = { version = "0.8.0", features = ["bincode"] } tokio-stream = { version = "0.1.4", features = ["sync"] } tokio-util = { version = "0.6.4", features = ["codec"] } -toml = "0.5.6" +mio = "0.8.11" +toml = { version = "0.8.19", features = ["preserve_order"] } tower = { version = "0.4.6", features = ["limit"] } tracing = "0.1.18" tracing-futures = "0.2.5" -tracing-subscriber = { version = "0.2.10", features = ["fmt", "json"] } +tracing-subscriber = { version = "0.3.15", features = ["env-filter", "fmt", "json"] } uint = "0.9.0" -untrusted = "0.7.1" uuid = { version = "0.8.1", features = ["serde", "v4"] } -warp = "0.3.0" -warp-json-rpc = "0.3.0" +warp = { version = "0.3.6", features = ["compression"] } wheelbuf = "0.2.0" -[build-dependencies] -vergen = "3" +casper-executor-wasm = { version = "0.1.3", path = "../executor/wasm" } +casper-executor-wasm-interface = { version = "0.1.3", path = "../executor/wasm_interface" } +fs_extra = "1.3.0" [dev-dependencies] -assert_matches = "1" +casper-binary-port = { version = "1.1.1", path = "../binary_port", features = ["testing"] } +assert-json-diff = "2.0.1" +assert_matches = "1.5.0" +casper-types = { path = "../types", features = ["datasize", "json-schema", "std-fs-io", "testing"] } fake_instant = "0.4.0" -multihash = "0.13.2" -pnet = "0.27.2" +pnet = "0.28.0" +pretty_assertions = "0.7.2" +proptest = "1.0.0" +proptest-derive = "0.5.1" rand_core = "0.6.2" -rand_pcg = "0.3.0" -reqwest = "0.10.8" -rmp-serde = "0.14.4" +reqwest = { version = "0.11.27", features = ["stream"] } tokio = { version = "1", features = ["test-util"] } [features] -vendored-openssl = ['openssl/vendored'] -fast-sync = [] +failpoints = [] +testing = ["casper-types/testing"] +vendored-openssl = ["openssl/vendored"] +datasize = ["casper-types/datasize"] [[bin]] name = "casper-node" @@ -139,9 +142,9 @@ assets = [ ] maintainer-scripts = "../resources/maintainer_scripts/debian" extended-description = """ -Package for Casper Node and Client. +Package for Casper Node. -For information on using package, see https://github.com/CasperLabs/casper-node +For information on using package, see https://github.com/casper-network/casper-node """ [package.metadata.deb.systemd-units] diff --git a/node/README.md b/node/README.md index a0e709b812..d89b031b03 100644 --- a/node/README.md +++ b/node/README.md @@ -1,16 +1,15 @@ # `casper-node` -[![LOGO](https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) [![Crates.io](https://img.shields.io/crates/v/casper-node)](https://crates.io/crates/casper-node) [![Documentation](https://docs.rs/casper-node/badge.svg)](https://docs.rs/casper-node) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) The component for running a node on the casper network. -[Node Operator Guide](https://docs.casperlabs.io/en/latest/node-operator/index.html) +[Node Operator Guide](https://docs.casper.network/operators/) ## License -Licensed under the [CasperLabs Open Source License (COSL)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE). \ No newline at end of file +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). \ No newline at end of file diff --git a/node/build.rs b/node/build.rs index 07b69df9d6..42f0f6a06a 100644 --- a/node/build.rs +++ b/node/build.rs @@ -1,16 +1,42 @@ -use std::env; +use std::{env, process::Command}; -use vergen::ConstantsFlags; +const NODE_BUILD_PROFILE_ENV_VAR: &str = "NODE_BUILD_PROFILE"; +const NODE_GIT_HASH_ENV_VAR: &str = "NODE_GIT_SHA"; +const CARGO_BUILD_PROFILE_ENV_VAR: &str = "PROFILE"; + +/// +/// `casper-node` build script to capture the git revision hash and cargo build profile and export +/// them to cargo to include them in the version information. +/// +/// Notes: This script exports information to cargo via println! with the old invocation prefix of +/// `cargo:`, if/when the node uses a Rust version `1.77` or above, this should be changed to +/// `cargo::` as the prefix changed in that version of rust fn main() { - let mut flags = ConstantsFlags::empty(); - flags.toggle(ConstantsFlags::SHA_SHORT); - flags.toggle(ConstantsFlags::REBUILD_ON_HEAD_CHANGE); - vergen::generate_cargo_keys(flags).expect("should generate the cargo keys"); + match Command::new("git") + .arg("rev-parse") + .arg("--short") + .arg("HEAD") + .output() + { + Ok(output) => { + // In the event the git command is successful, export the properly formatted git hash to + // cargo at compile time. + let git_hash_raw = + String::from_utf8(output.stdout).expect("Failed to obtain commit hash to string"); + let git_hash = git_hash_raw.trim_end_matches('\n'); + + println!("cargo:rustc-env={NODE_GIT_HASH_ENV_VAR}={git_hash}"); + } + + Err(error) => { + println!("cargo:warning={error}"); + println!("cargo:warning=casper-node build version will not include git short hash"); + } + } - // Make the build profile available to rustc at compile time. println!( - "cargo:rustc-env=NODE_BUILD_PROFILE={}", - env::var("PROFILE").unwrap() + "cargo:rustc-env={NODE_BUILD_PROFILE_ENV_VAR}={}", + env::var(CARGO_BUILD_PROFILE_ENV_VAR).unwrap() ); } diff --git a/node/proptest-regressions/components/diagnostics_port/stop_at.txt b/node/proptest-regressions/components/diagnostics_port/stop_at.txt new file mode 100644 index 0000000000..b167d803f1 --- /dev/null +++ b/node/proptest-regressions/components/diagnostics_port/stop_at.txt @@ -0,0 +1,8 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc a95b33d3196ca47b38fb6d16346318cbfbcd6494087384852a2d4bdb585f1edf # shrinks to stop_at = NextBlock +cc 5d4cf22796e91f3ca192f4b42ff7738143ba06e6ad7ea088abc9e63784be78a6 # shrinks to stop_at = EraId(EraId(0)) diff --git a/node/src/app/cli.rs b/node/src/app/cli.rs deleted file mode 100644 index 8e1dda10fd..0000000000 --- a/node/src/app/cli.rs +++ /dev/null @@ -1,318 +0,0 @@ -//! Command-line option parsing. -//! -//! Most configuration is done via config files (see [`config`](../config/index.html) for details). - -pub mod arglang; - -use std::{ - env, fs, - path::{Path, PathBuf}, - str::FromStr, -}; - -use anyhow::{self, Context}; -use regex::Regex; -use structopt::StructOpt; -use toml::{value::Table, Value}; -use tracing::{error, info, trace, warn}; - -use crate::config; -use casper_node::{ - logging, - reactor::{initializer, joiner, validator, ReactorExit, Runner}, - setup_signal_hooks, - types::ExitCode, - utils::{ - pid_file::{PidFile, PidFileOutcome}, - WithDir, - }, -}; -use prometheus::Registry; - -// We override the standard allocator to gather metrics and tune the allocator via th MALLOC_CONF -// env var. -#[global_allocator] -static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; - -// Note: The docstring on `Cli` is the help shown when calling the binary with `--help`. -#[derive(Debug, StructOpt)] -#[structopt(version = casper_node::VERSION_STRING_COLOR.as_str())] -/// Casper blockchain node. -pub enum Cli { - /// Run the validator node. - /// - /// Loads the configuration values from the given configuration file or uses defaults if not - /// given, then runs the reactor. - Validator { - /// Path to configuration file. - config: PathBuf, - - #[structopt( - short = "C", - long, - env = "NODE_CONFIG", - use_delimiter(true), - value_delimiter(";") - )] - /// Overrides and extensions for configuration file entries in the form - ///
.=. For example, '-C=node.chainspec_config_path=chainspec.toml' - config_ext: Vec, - }, - /// Migrate modified values from the old config as required after an upgrade. - MigrateConfig { - /// Path to configuration file of previous version of node. - #[structopt(long)] - old_config: PathBuf, - /// Path to configuration file of this version of node. - #[structopt(long)] - new_config: PathBuf, - }, - /// Migrate any stored data as required after an upgrade. - MigrateData { - /// Path to configuration file of previous version of node. - #[structopt(long)] - old_config: PathBuf, - /// Path to configuration file of this version of node. - #[structopt(long)] - new_config: PathBuf, - }, -} - -#[derive(Debug)] -/// Command line extension to be applied to TOML-based config file values. -pub struct ConfigExt { - section: String, - key: String, - value: String, -} - -impl ConfigExt { - /// Updates TOML table with updated or extended key value pairs. - /// - /// Returns errors if the respective sections to be updated are not TOML tables or if parsing - /// the command line options failed. - fn update_toml_table(&self, toml_value: &mut Value) -> anyhow::Result<()> { - let table = toml_value - .as_table_mut() - .ok_or_else(|| anyhow::anyhow!("configuration table is not a table"))?; - - if !table.contains_key(&self.section) { - table.insert(self.section.clone(), Value::Table(Table::new())); - } - let val = arglang::parse(&self.value)?; - table[&self.section] - .as_table_mut() - .ok_or_else(|| { - anyhow::anyhow!("configuration section {} is not a table", self.section) - })? - .insert(self.key.clone(), val); - Ok(()) - } -} - -impl FromStr for ConfigExt { - type Err = anyhow::Error; - - /// Attempts to create a ConfigExt from a str patterned as `section.key=value` - fn from_str(input: &str) -> Result { - let re = Regex::new(r"^([^.]+)\.([^=]+)=(.+)$").unwrap(); - let captures = re - .captures(input) - .context("could not parse config_ext (see README.md)")?; - Ok(ConfigExt { - section: captures - .get(1) - .context("failed to find section")? - .as_str() - .to_owned(), - key: captures - .get(2) - .context("failed to find key")? - .as_str() - .to_owned(), - value: captures - .get(3) - .context("failed to find value")? - .as_str() - .to_owned(), - }) - } -} - -impl Cli { - /// Executes selected CLI command. - pub async fn run(self) -> anyhow::Result { - match self { - Cli::Validator { config, config_ext } => { - // Setup UNIX signal hooks. - setup_signal_hooks(); - - let validator_config = Self::init(&config, config_ext)?; - info!(version = %casper_node::VERSION_STRING.as_str(), "node starting up"); - - let pidfile_outcome = { - // Determine storage directory to store pidfile in. - let storage_config = validator_config.map_ref(|cfg| cfg.storage.clone()); - let root = storage_config.with_dir(storage_config.value().path.clone()); - - // Create directory if it does not exist, similar to how the storage component - // would do it. - if !root.exists() { - fs::create_dir_all(&root).context("create storage directory")?; - } - - PidFile::acquire(root.join("initializer.pid")) - }; - - // Note: Do not change `_pidfile` to `_`, or it will be dropped prematurely. - // Instantiating `pidfile` guarantees that it will be dropped _after_ any reactor, - // which is what we want. - let (_pidfile, crashed) = match pidfile_outcome { - PidFileOutcome::AnotherNodeRunning(_) => { - anyhow::bail!("another node instance is running (pidfile is locked)"); - } - PidFileOutcome::Crashed(pidfile) => { - warn!("previous node instance seems to have crashed, integrity checks may be run"); - (pidfile, true) - } - PidFileOutcome::Clean(pidfile) => { - info!("no previous crash detected"); - (pidfile, false) - } - PidFileOutcome::PidFileError(err) => { - return Err(anyhow::anyhow!(err)); - } - }; - - // We use a `ChaCha20Rng` for the production node. For one, we want to completely - // eliminate any chance of runtime failures, regardless of how small (these - // exist with `OsRng`). Additionally, we want to limit the number of syscalls for - // performance reasons. - let mut rng = casper_node::new_rng(); - - // The metrics are shared across all reactors. - let registry = Registry::new(); - - let mut initializer_runner = Runner::::with_metrics( - (crashed, validator_config), - &mut rng, - ®istry, - ) - .await?; - - // let mut initializer2_runner = Runner::::with_metrics( - // WithDir::new(root.clone(), validator_config), - // &mut rng, - // ®istry, - // ) - // .await?; - // initializer2_runner.run(&mut rng).await; - - match initializer_runner.run(&mut rng).await { - ReactorExit::ProcessShouldExit(exit_code) => return Ok(exit_code as i32), - ReactorExit::ProcessShouldContinue => info!("finished initialization"), - } - - let initializer = initializer_runner.into_inner(); - let root = config - .parent() - .map(|path| path.to_owned()) - .unwrap_or_else(|| "/".into()); - let mut joiner_runner = Runner::::with_metrics( - WithDir::new(root, initializer), - &mut rng, - ®istry, - ) - .await?; - match joiner_runner.run(&mut rng).await { - ReactorExit::ProcessShouldExit(exit_code) => return Ok(exit_code as i32), - ReactorExit::ProcessShouldContinue => info!("finished joining"), - } - - let config = joiner_runner.into_inner().into_validator_config().await?; - let mut validator_runner = - Runner::::with_metrics(config, &mut rng, ®istry).await?; - - match validator_runner.run(&mut rng).await { - ReactorExit::ProcessShouldExit(exit_code) => Ok(exit_code as i32), - reactor_exit => { - error!("validator should not exit with {:?}", reactor_exit); - Ok(ExitCode::Abort as i32) - } - } - } - Cli::MigrateConfig { - old_config, - new_config, - } => { - let new_config = Self::init(&new_config, vec![])?; - - let old_root = old_config - .parent() - .map(|path| path.to_owned()) - .unwrap_or_else(|| "/".into()); - let encoded_old_config = fs::read_to_string(&old_config) - .context("could not read old configuration file") - .with_context(|| old_config.display().to_string())?; - let old_config = toml::from_str(&encoded_old_config)?; - - info!(version = %env!("CARGO_PKG_VERSION"), "migrating config"); - casper_node::migrate_config(WithDir::new(old_root, old_config), new_config)?; - Ok(ExitCode::Success as i32) - } - Cli::MigrateData { - old_config, - new_config, - } => { - let new_config = Self::init(&new_config, vec![])?; - - let old_root = old_config - .parent() - .map(|path| path.to_owned()) - .unwrap_or_else(|| "/".into()); - let encoded_old_config = fs::read_to_string(&old_config) - .context("could not read old configuration file") - .with_context(|| old_config.display().to_string())?; - let old_config = toml::from_str(&encoded_old_config)?; - - info!(version = %env!("CARGO_PKG_VERSION"), "migrating data"); - casper_node::migrate_data(WithDir::new(old_root, old_config), new_config)?; - Ok(ExitCode::Success as i32) - } - } - } - - /// Parses the config file for the current version of casper-node, and initializes logging. - fn init( - config: &Path, - config_ext: Vec, - ) -> anyhow::Result> { - // Determine the parent directory of the configuration file, if any. - // Otherwise, we default to `/`. - let root = config - .parent() - .map(|path| path.to_owned()) - .unwrap_or_else(|| "/".into()); - - // The app supports running without a config file, using default values. - let encoded_config = fs::read_to_string(&config) - .context("could not read configuration file") - .with_context(|| config.display().to_string())?; - - // Get the TOML table version of the config indicated from CLI args, or from a new - // defaulted config instance if one is not provided. - let mut config_table: Value = toml::from_str(&encoded_config)?; - - // If any command line overrides to the config values are passed, apply them. - for item in config_ext { - item.update_toml_table(&mut config_table)?; - } - - // Create validator config, including any overridden values. - let validator_config: validator::Config = config_table.try_into()?; - logging::init_with_config(&validator_config.logging)?; - trace!("{}", config::to_string(&validator_config)?); - - Ok(WithDir::new(root, validator_config)) - } -} diff --git a/node/src/app/config.rs b/node/src/app/config.rs deleted file mode 100644 index 4ab78dd7f8..0000000000 --- a/node/src/app/config.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! Configuration file management. -//! -//! Configuration for the node is loaded from TOML files, but all configuration values have sensible -//! defaults. -//! -//! The binary offers an option to generate a configuration from defaults for editing. I.e. running -//! the following will dump a default configuration file to stdout: -//! ``` -//! cargo run --release -- generate-config -//! ``` -//! -//! # Adding a configuration section -//! -//! When adding a section to the configuration, ensure that -//! -//! * it has an entry in the root configuration [`Config`](struct.Config.html), -//! * `Default` is implemented (derived or manually) with sensible defaults, -//! * it is completely documented. -//! * it is annotated with `#[serde(deny_unknown_fields)]` to ensure config files and command-line -//! overrides contain valid keys. - -use std::path::Path; - -use anyhow::Context; -use serde::{de::DeserializeOwned, Serialize}; - -use casper_node::utils::read_file; - -/// Loads a TOML-formatted configuration from a given file. -pub fn load_from_file, C: DeserializeOwned>(config_path: P) -> anyhow::Result { - let path_ref = config_path.as_ref(); - let config: C = toml::from_slice( - &read_file(path_ref).with_context(|| "failed to read configuration file")?, - ) - .with_context(|| format!("Failed to parse configuration file {}", path_ref.display()))?; - Ok(config) -} - -/// Creates a TOML-formatted string from a given configuration. -pub fn to_string(cfg: &C) -> anyhow::Result { - toml::to_string_pretty(cfg).with_context(|| "Failed to serialize default configuration") -} - -#[cfg(test)] -mod tests { - use casper_node::reactor::validator::Config; - - #[test] - fn example_config_should_parse() { - let config_path = format!( - "{}/../resources/local/config.toml", - env!("CARGO_MANIFEST_DIR") - ); - let _config: Config = super::load_from_file(config_path).unwrap(); - } -} diff --git a/node/src/app/main.rs b/node/src/app/main.rs index c1610b1d00..5965371479 100644 --- a/node/src/app/main.rs +++ b/node/src/app/main.rs @@ -3,11 +3,8 @@ //! This is the core application for the Casper blockchain. Run with `--help` to see available //! command-line arguments. -mod cli; -pub mod config; - use std::{ - panic::{self, PanicInfo}, + panic::{self, PanicHookInfo}, process, }; @@ -16,26 +13,21 @@ use structopt::StructOpt; use tokio::runtime::Builder; use tracing::info; -use casper_node::MAX_THREAD_COUNT; - -use cli::Cli; +use casper_node::{cli::Cli, MAX_THREAD_COUNT}; /// Aborting panic hook. /// /// Will exit the application using `abort` when an error occurs. Always shows a backtrace. -fn panic_hook(info: &PanicInfo) { +fn panic_hook(info: &PanicHookInfo<'_>) { let backtrace = Backtrace::new(); eprintln!("{:?}", backtrace); // Print panic info if let Some(s) = info.payload().downcast_ref::<&str>() { - eprintln!("node panicked: {}", s); - // TODO - use `info.message()` once https://github.com/rust-lang/rust/issues/66745 is fixed - // } else if let Some(message) = info.message() { - // eprintln!("{}", message); + eprintln!("node panicked: {s}"); } else { - eprintln!("{}", info); + eprintln!("{info}"); } // Abort after a panic, even if only a worker thread panicked. @@ -60,7 +52,7 @@ fn main() -> anyhow::Result<()> { // Parse CLI args and run selected subcommand. let opts = Cli::from_args(); - runtime.block_on(async { opts.run().await })? + runtime.block_on(opts.run())? }; info!(%exit_code, "exiting casper-node"); diff --git a/node/src/cli.rs b/node/src/cli.rs new file mode 100644 index 0000000000..b00585ca5b --- /dev/null +++ b/node/src/cli.rs @@ -0,0 +1,301 @@ +//! Command-line option parsing. +//! +//! Most configuration is done via config files (see [`config`](../config/index.html) for details). + +pub mod arglang; + +use std::{ + alloc::System, + fs, + path::{Path, PathBuf}, + str::FromStr, + sync::Arc, +}; + +use anyhow::{self, bail, Context}; +use prometheus::Registry; +use regex::Regex; +use stats_alloc::{StatsAlloc, INSTRUMENTED_SYSTEM}; +use structopt::StructOpt; +use toml::{value::Table, Value}; +use tracing::{error, info}; + +use casper_types::{Chainspec, ChainspecRawBytes}; + +use crate::{ + components::network::Identity as NetworkIdentity, + logging, + reactor::{main_reactor, Runner}, + setup_signal_hooks, + types::ExitCode, + utils::{ + chain_specification::validate_chainspec, config_specification::validate_config, Loadable, + WithDir, + }, +}; + +// We override the standard allocator to gather metrics and tune the allocator via the MALLOC_CONF +// env var. +#[global_allocator] +static ALLOC: &StatsAlloc = &INSTRUMENTED_SYSTEM; + +// Note: The docstring on `Cli` is the help shown when calling the binary with `--help`. +#[derive(Debug, StructOpt)] +#[structopt(version = crate::VERSION_STRING_COLOR.as_str())] +#[allow(rustdoc::invalid_html_tags)] +/// Casper blockchain node. +pub enum Cli { + /// Run the node in standard mode. + /// + /// Loads the configuration values from the given configuration file or uses defaults if not + /// given, then runs the reactor. + #[structopt(alias = "validator")] + Standard { + /// Path to configuration file. + config: PathBuf, + + #[structopt( + short = "C", + long, + env = "NODE_CONFIG", + use_delimiter(true), + value_delimiter(";") + )] + /// Overrides and extensions for configuration file entries in the form + ///
.=. For example, '-C=node.chainspec_config_path=chainspec.toml' + config_ext: Vec, + }, + /// Migrate modified values from the old config as required after an upgrade. + MigrateConfig { + /// Path to configuration file of previous version of node. + #[structopt(long)] + old_config: PathBuf, + /// Path to configuration file of this version of node. + #[structopt(long)] + new_config: PathBuf, + }, + /// Migrate any stored data as required after an upgrade. + MigrateData { + /// Path to configuration file of previous version of node. + #[structopt(long)] + old_config: PathBuf, + /// Path to configuration file of this version of node. + #[structopt(long)] + new_config: PathBuf, + }, + /// Verify that a given config file can be parsed. + ValidateConfig { + /// Path to configuration file. + config: PathBuf, + }, +} + +#[derive(Debug)] +/// Command line extension to be applied to TOML-based config file values. +pub struct ConfigExt { + section: String, + key: String, + value: String, +} + +impl ConfigExt { + /// Updates TOML table with updated or extended key value pairs. + /// + /// Returns errors if the respective sections to be updated are not TOML tables or if parsing + /// the command line options failed. + fn update_toml_table(&self, toml_value: &mut Value) -> anyhow::Result<()> { + let table = toml_value + .as_table_mut() + .ok_or_else(|| anyhow::anyhow!("configuration table is not a table"))?; + + if !table.contains_key(&self.section) { + table.insert(self.section.clone(), Value::Table(Table::new())); + } + let val = arglang::parse(&self.value)?; + table[&self.section] + .as_table_mut() + .ok_or_else(|| { + anyhow::anyhow!("configuration section {} is not a table", self.section) + })? + .insert(self.key.clone(), val); + Ok(()) + } +} + +impl FromStr for ConfigExt { + type Err = anyhow::Error; + + /// Attempts to create a ConfigExt from a str patterned as `section.key=value` + fn from_str(input: &str) -> Result { + let re = Regex::new(r"^([^.]+)\.([^=]+)=(.+)$").unwrap(); + let captures = re + .captures(input) + .context("could not parse config_ext (see README.md)")?; + Ok(ConfigExt { + section: captures + .get(1) + .context("failed to find section")? + .as_str() + .to_owned(), + key: captures + .get(2) + .context("failed to find key")? + .as_str() + .to_owned(), + value: captures + .get(3) + .context("failed to find value")? + .as_str() + .to_owned(), + }) + } +} + +impl Cli { + /// Executes selected CLI command. + pub async fn run(self) -> anyhow::Result { + match self { + Cli::Standard { config, config_ext } => { + // Setup UNIX signal hooks. + setup_signal_hooks(); + + let mut reactor_config = Self::init(&config, config_ext)?; + + // We use a `ChaCha20Rng` for the production node. For one, we want to completely + // eliminate any chance of runtime failures, regardless of how small (these + // exist with `OsRng`). Additionally, we want to limit the number of syscalls for + // performance reasons. + let mut rng = crate::new_rng(); + + let registry = Registry::new(); + + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_path(reactor_config.dir())?; + + info!( + protocol_version = %chainspec.protocol_version(), + build_version = %crate::VERSION_STRING.as_str(), + "node starting up" + ); + + if !validate_chainspec(&chainspec) { + bail!("invalid chainspec"); + } + + if !validate_config(reactor_config.value()) { + bail!("invalid config"); + } + + reactor_config.value_mut().ensure_valid(&chainspec); + + let network_identity = NetworkIdentity::from_config(WithDir::new( + reactor_config.dir(), + reactor_config.value().network.clone(), + )) + .context("failed to create a network identity")?; + + let mut main_runner = Runner::::with_metrics( + reactor_config, + Arc::new(chainspec), + Arc::new(chainspec_raw_bytes), + network_identity, + &mut rng, + ®istry, + ) + .await?; + + let exit_code = main_runner.run(&mut rng).await; + Ok(exit_code as i32) + } + Cli::MigrateConfig { + old_config, + new_config, + } => { + let new_config = Self::init(&new_config, vec![])?; + + let old_root = old_config + .parent() + .map_or_else(|| "/".into(), Path::to_path_buf); + let encoded_old_config = fs::read_to_string(&old_config) + .context("could not read old configuration file") + .with_context(|| old_config.display().to_string())?; + let old_config = toml::from_str(&encoded_old_config)?; + + info!(build_version = %crate::VERSION_STRING.as_str(), "migrating config"); + crate::config_migration::migrate_config( + WithDir::new(old_root, old_config), + new_config, + )?; + Ok(ExitCode::Success as i32) + } + Cli::MigrateData { + old_config, + new_config, + } => { + let new_config = Self::init(&new_config, vec![])?; + + let old_root = old_config + .parent() + .map_or_else(|| "/".into(), Path::to_path_buf); + let encoded_old_config = fs::read_to_string(&old_config) + .context("could not read old configuration file") + .with_context(|| old_config.display().to_string())?; + let old_config = toml::from_str(&encoded_old_config)?; + + info!(build_version = %crate::VERSION_STRING.as_str(), "migrating data"); + crate::data_migration::migrate_data( + WithDir::new(old_root, old_config), + new_config, + )?; + Ok(ExitCode::Success as i32) + } + Cli::ValidateConfig { config } => { + info!(build_version = %crate::VERSION_STRING.as_str(), config_file = ?config, "validating config file"); + match Self::init(&config, vec![]) { + Ok(_config) => { + info!(build_version = %crate::VERSION_STRING.as_str(), config_file = ?config, "config file is valid"); + Ok(ExitCode::Success as i32) + } + Err(err) => { + // initialize manually in case of error to avoid double initialization + logging::init_with_config(&Default::default())?; + error!(build_version = %crate::VERSION_STRING.as_str(), config_file = ?config, "config file is not valid"); + Err(err) + } + } + } + } + } + + /// Parses the config file for the current version of casper-node, and initializes logging. + fn init( + config: &Path, + config_ext: Vec, + ) -> anyhow::Result> { + // Determine the parent directory of the configuration file, if any. + // Otherwise, we default to `/`. + let root = config + .parent() + .map_or_else(|| "/".into(), Path::to_path_buf); + + // The app supports running without a config file, using default values. + let encoded_config = fs::read_to_string(config) + .context("could not read configuration file") + .with_context(|| config.display().to_string())?; + + // Get the TOML table version of the config indicated from CLI args, or from a new + // defaulted config instance if one is not provided. + let mut config_table: Value = toml::from_str(&encoded_config)?; + + // If any command line overrides to the config values are passed, apply them. + for item in config_ext { + item.update_toml_table(&mut config_table)?; + } + + // Create main config, including any overridden values. + let main_config: main_reactor::Config = config_table.try_into()?; + logging::init_with_config(&main_config.logging)?; + + Ok(WithDir::new(root, main_config)) + } +} diff --git a/node/src/app/cli/arglang.rs b/node/src/cli/arglang.rs similarity index 85% rename from node/src/app/cli/arglang.rs rename to node/src/cli/arglang.rs index cd15fc0e2a..1964708a5e 100644 --- a/node/src/app/cli/arglang.rs +++ b/node/src/cli/arglang.rs @@ -20,8 +20,9 @@ use std::{iter::Peekable, str::FromStr}; use thiserror::Error; use toml::Value; +/// A Token to be parsed. #[derive(Clone, Debug, Eq, PartialEq)] -pub enum Token { +pub(crate) enum Token { String(String), I64(i64), Boolean(bool), @@ -31,7 +32,7 @@ pub enum Token { } #[derive(Debug, Error, Eq, PartialEq)] -pub enum Error { +pub(crate) enum Error { #[error("unterminated string in input")] UnterminatedString, #[error("unexpected token {0:?}")] @@ -63,14 +64,14 @@ fn tokenize(input: &str) -> Result, Error> { // Check if we need to complete a token. if !buffer.is_empty() { match ch { - Some(' ') | Some('"') | Some('[') | Some(']') | Some(',') | None => { + Some(' ' | '"' | '[' | ']' | ',') | None => { // Try to parse as number or bool first. if let Ok(value) = i64::from_str(&buffer) { tokens.push(Token::I64(value)); } else if let Ok(value) = bool::from_str(&buffer) { tokens.push(Token::Boolean(value)); } else { - tokens.push(Token::String(buffer.clone())) + tokens.push(Token::String(buffer.clone())); } buffer.clear(); @@ -129,50 +130,44 @@ fn parse_stream(tokens: &mut Peekable) -> Result where I: Iterator, { - loop { - match tokens.next() { - Some(Token::String(value)) => return Ok(Value::String(value)), - Some(Token::I64(value)) => return Ok(Value::Integer(value)), - Some(Token::Boolean(value)) => return Ok(Value::Boolean(value)), - Some(Token::OpenBracket) => { - // Special case for empty list. - if tokens.peek() == Some(&Token::CloseBracket) { - tokens.next(); - return Ok(Value::Array(Vec::new())); - } + match tokens.next() { + Some(Token::String(value)) => Ok(Value::String(value)), + Some(Token::I64(value)) => Ok(Value::Integer(value)), + Some(Token::Boolean(value)) => Ok(Value::Boolean(value)), + Some(Token::OpenBracket) => { + // Special case for empty list. + if tokens.peek() == Some(&Token::CloseBracket) { + tokens.next(); + return Ok(Value::Array(Vec::new())); + } - let mut items = Vec::new(); - loop { - items.push(parse_stream(tokens)?); + let mut items = Vec::new(); + loop { + items.push(parse_stream(tokens)?); - match tokens.next() { - Some(Token::CloseBracket) => { - return Ok(Value::Array(items)); - } - Some(Token::Comma) => { - // Continue parsing next time. - } - Some(t) => { - return Err(Error::UnexpectedToken(t)); - } - None => { - return Err(Error::UnexpectedEndOfInput); - } + match tokens.next() { + Some(Token::CloseBracket) => { + return Ok(Value::Array(items)); + } + Some(Token::Comma) => { + // Continue parsing next time. + } + Some(t) => { + return Err(Error::UnexpectedToken(t)); + } + None => { + return Err(Error::UnexpectedEndOfInput); } } } - Some(t @ Token::CloseBracket) | Some(t @ Token::Comma) => { - return Err(Error::UnexpectedToken(t)); - } - None => { - return Err(Error::UnexpectedEndOfInput); - } } + Some(t @ (Token::CloseBracket | Token::Comma)) => Err(Error::UnexpectedToken(t)), + None => Err(Error::UnexpectedEndOfInput), } } /// Parse string using arglang. -pub fn parse(input: &str) -> Result { +pub(crate) fn parse(input: &str) -> Result { let mut tokens = tokenize(input)?.into_iter().peekable(); let value = parse_stream(&mut tokens)?; diff --git a/node/src/components.rs b/node/src/components.rs index c0b2bdf0af..a6db462083 100644 --- a/node/src/components.rs +++ b/node/src/components.rs @@ -1,43 +1,110 @@ -//! Components +//! Components subsystem. //! -//! Components are the building blocks of the whole application, wired together inside a reactor. -//! Each component has a unified interface, expressed by the `Component` trait. -pub(crate) mod block_proposer; +//! Components are the building blocks for the application and wired together inside a +//! [reactor](crate::reactor). Each component has a unified interface, expressed by the +//! [`Component`] trait. +//! +//! # Events +//! +//! Every component defines a set of events it can process, expressed through the +//! [`Component::Event`] associated type. If an event that originated outside the component is to be +//! handled (e.g. a request or announcement being handled), a `From for +//! ComponentEvent` implementation must be added (see component vs reactor event section below). +//! +//! A typical cycle for components is to receive an event, either originating from the outside, or +//! as the result of an effect created by the component. This event is processed in the +//! [`handle_event`](Component::handle_event) function, potentially returning effects that may +//! produce new events. +//! +//! # Error and halting states +//! +//! Components in general are expected to be able to handle every input (that is every +//! [`Component::Event`]) in every state. Unexpected inputs should usually be logged and discarded, +//! if possible, and the component is expected to recover from error states by itself. +//! +//! When a recovery is not possible, the [`fatal!`](crate::fatal!) macro should be used to produce +//! an effect that will shut down the system. +//! +//! # Component events and reactor events +//! +//! It is easy to confuse the components own associated event ([`Component::Event`]) and the +//! so-called "reactor event", often written `REv` (see [`effects`](crate::effect) for details on +//! the distinctions). +//! +//! A component's own event defines what sort of events it produces purely for internal use, and +//! also which unbound events it can accept. **Acceptance of external events** is expressed by +//! implementing a `From` implementation for the unbound, i.e. a component that can process +//! `FooAnnouncement` and a `BarRequest` will have to `impl From for Event` and +//! `impl From`, with `Event` being the event named as [`Component::Event`]. +//! +//! Since components are usually not specific to only a single reactor, they have to implement +//! `Component` for a variety of reactor events (`REv`). A component can **demand that the +//! reactor provides a set of capabilities** by requiring `From`-implementations on the `REv`, e.g. +//! by restricting the `impl Component` by `where REv: From`. The concrete requirement +//! will usually be dictated by a restriction on a method on an +//! [`EffectBuilder`](crate::effect::EffectBuilder). + +pub(crate) mod binary_port; +pub(crate) mod block_accumulator; +pub(crate) mod block_synchronizer; pub(crate) mod block_validator; -pub(crate) mod chainspec_loader; -#[cfg(test)] -pub(crate) mod collector; -pub(crate) mod consensus; +pub mod consensus; pub mod contract_runtime; -pub(crate) mod deploy_acceptor; +pub(crate) mod diagnostics_port; pub(crate) mod event_stream_server; pub(crate) mod fetcher; pub(crate) mod gossiper; -pub(crate) mod linear_chain; -#[cfg(feature = "fast-sync")] -pub(crate) mod linear_chain_fast_sync; -#[cfg(not(feature = "fast-sync"))] -pub(crate) mod linear_chain_sync; -pub(crate) mod rest_server; -pub(crate) mod rpc_server; +pub(crate) mod transaction_buffer; // The `in_memory_network` is public for use in doctests. #[cfg(test)] pub mod in_memory_network; pub(crate) mod metrics; pub(crate) mod network; -pub(crate) mod networking_metrics; -pub(crate) mod small_network; -pub(crate) mod storage; +pub(crate) mod rest_server; +pub(crate) mod shutdown_trigger; +pub mod storage; +pub(crate) mod sync_leaper; +pub(crate) mod transaction_acceptor; +pub(crate) mod upgrade_watcher; + +use datasize::DataSize; +use serde::Deserialize; +use std::fmt::{Debug, Display}; +use tracing::info; use crate::{ effect::{EffectBuilder, Effects}, + failpoints::FailpointActivation, NodeRng, }; +#[cfg_attr(doc, aquamarine::aquamarine)] +/// ```mermaid +/// flowchart TD +/// style Start fill:#66ccff,stroke:#333,stroke-width:4px +/// style End fill:#66ccff,stroke:#333,stroke-width:4px +/// +/// Start --> Uninitialized +/// Uninitialized --> Initializing +/// Initializing --> Initialized +/// Initializing --> Fatal +/// Initialized --> End +/// Fatal --> End +/// ``` +#[derive(Clone, PartialEq, Eq, DataSize, Debug, Deserialize, Default)] +pub(crate) enum ComponentState { + #[default] + Uninitialized, + Initializing, + Initialized, + Fatal(String), +} + /// Core Component. /// -/// Its inputs are `Event`s, allowing it to -/// perform work whenever an event is received, outputting `Effect`s each time it is called. +/// Every component process a set of events it defines itself +/// Its inputs are `Event`s, allowing it to perform work whenever an event is received, outputting +/// `Effect`s each time it is called. /// /// # Error and halting states /// @@ -55,19 +122,24 @@ use crate::{ /// /// Components place restrictions on reactor events (`REv`s), indicating what kind of effects they /// need to be able to produce to operate. -pub trait Component { +pub(crate) trait Component { /// Event associated with `Component`. /// /// The event type that is handled by the component. type Event; - /// Error emitted when constructing the component. - type ConstructionError; + /// Name of the component. + fn name(&self) -> &str; + + /// Activate/deactivate a failpoint. + fn activate_failpoint(&mut self, _activation: &FailpointActivation) { + // Default is to ignore failpoints. + } /// Processes an event, outputting zero or more effects. /// /// This function must not ever perform any blocking or CPU intensive work, as it is expected - /// to return very quickly. + /// to return very quickly -- it will usually be called from an `async` function context. fn handle_event( &mut self, effect_builder: EffectBuilder, @@ -75,3 +147,58 @@ pub trait Component { event: Self::Event, ) -> Effects; } + +pub(crate) trait InitializedComponent: Component { + fn state(&self) -> &ComponentState; + + fn is_uninitialized(&self) -> bool { + self.state() == &ComponentState::Uninitialized + } + + fn is_fatal(&self) -> bool { + matches!(self.state(), ComponentState::Fatal(_)) + } + + fn start_initialization(&mut self) { + if self.is_uninitialized() { + self.set_state(ComponentState::Initializing); + } else { + info!(name = self.name(), "component must be uninitialized"); + } + } + + fn set_state(&mut self, new_state: ComponentState); +} + +pub(crate) trait PortBoundComponent: InitializedComponent { + type Error: Display + Debug; + type ComponentEvent; + + fn bind( + &mut self, + enabled: bool, + effect_builder: EffectBuilder, + ) -> (Effects, ComponentState) { + if !enabled { + return (Effects::new(), ComponentState::Initialized); + } + + match self.listen(effect_builder) { + Ok(effects) => (effects, ComponentState::Initialized), + Err(error) => (Effects::new(), ComponentState::Fatal(format!("{}", error))), + } + } + + fn listen( + &mut self, + effect_builder: EffectBuilder, + ) -> Result, Self::Error>; +} + +pub(crate) trait ValidatorBoundComponent: Component { + fn handle_validators( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Effects; +} diff --git a/node/src/components/binary_port.rs b/node/src/components/binary_port.rs new file mode 100644 index 0000000000..4e8a441bcd --- /dev/null +++ b/node/src/components/binary_port.rs @@ -0,0 +1,1939 @@ +//! The Binary Port +mod config; +mod connection_terminator; +mod error; +mod event; +mod metrics; +mod rate_limiter; +#[cfg(test)] +mod tests; + +use std::{convert::TryFrom, net::SocketAddr, sync::Arc}; + +use casper_binary_port::{ + AccountInformation, AddressableEntityInformation, BalanceResponse, BinaryMessage, + BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, Command, CommandHeader, + CommandTag, ContractInformation, DictionaryItemIdentifier, DictionaryQueryResult, + EntityIdentifier, EraIdentifier, ErrorCode, GetRequest, GetTrieFullResult, + GlobalStateEntityQualifier, GlobalStateQueryResult, GlobalStateRequest, InformationRequest, + InformationRequestTag, KeyPrefix, NodeStatus, PackageIdentifier, PurseIdentifier, + ReactorStateName, RecordId, ResponseType, RewardResponse, TransactionWithExecutionInfo, + ValueWithProof, +}; +use casper_storage::{ + data_access_layer::{ + balance::BalanceHandling, + prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult}, + tagged_values::{TaggedValuesRequest, TaggedValuesResult, TaggedValuesSelection}, + BalanceIdentifier, BalanceRequest, BalanceResult, ProofHandling, ProofsResult, + QueryRequest, QueryResult, SeigniorageRecipientsRequest, SeigniorageRecipientsResult, + TrieRequest, + }, + global_state::trie::TrieRaw, + system::auction, + tracking_copy::TrackingCopyError, + KeyPrefix as StorageKeyPrefix, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::NamedKeyAddr, + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + contracts::{ContractHash, ContractPackage, ContractPackageHash}, + BlockHeader, BlockIdentifier, BlockWithSignatures, ByteCode, ByteCodeAddr, ByteCodeHash, + Chainspec, ContractWasm, ContractWasmHash, Digest, EntityAddr, GlobalStateIdentifier, Key, + Package, PackageAddr, Peers, ProtocolVersion, Rewards, StoredValue, TimeDiff, Timestamp, + Transaction, URef, +}; +use connection_terminator::ConnectionTerminator; +use thiserror::Error as ThisError; + +use datasize::DataSize; +use either::Either; +use futures::{SinkExt, StreamExt}; +use once_cell::sync::OnceCell; +use rate_limiter::{LimiterResponse, RateLimiter, RateLimiterError}; +use tokio::{ + join, + net::{TcpListener, TcpStream}, + select, + sync::{Mutex, Notify, OwnedSemaphorePermit, Semaphore}, +}; +use tokio_util::codec::{Encoder, Framed}; +use tracing::{debug, error, info, trace, warn}; + +#[cfg(test)] +use futures::{future::BoxFuture, FutureExt}; + +use self::error::Error; +use crate::{ + contract_runtime::SpeculativeExecutionResult, + effect::{ + requests::{ + AcceptTransactionRequest, BlockSynchronizerRequest, ChainspecRawBytesRequest, + ConsensusRequest, ContractRuntimeRequest, NetworkInfoRequest, ReactorInfoRequest, + StorageRequest, UpgradeWatcherRequest, + }, + EffectBuilder, EffectExt, Effects, + }, + reactor::{main_reactor::MainEvent, QueueKind}, + types::NodeRng, + utils::{display_error, ListeningError}, +}; +pub(crate) use metrics::Metrics; + +use super::{Component, ComponentState, InitializedComponent, PortBoundComponent}; + +pub(crate) use config::Config; +pub(crate) use event::Event; + +const COMPONENT_NAME: &str = "binary_port"; + +#[derive(Debug, ThisError)] +pub(crate) enum BinaryPortInitializationError { + #[error("could not initialize rate limiter: {0}")] + CannotInitializeRateLimiter(String), + #[error("could not initialize metrics: {0}")] + CannotInitializeMetrics(prometheus::Error), +} + +impl From for BinaryPortInitializationError { + fn from(value: RateLimiterError) -> Self { + BinaryPortInitializationError::CannotInitializeRateLimiter(value.to_string()) + } +} + +impl From for BinaryPortInitializationError { + fn from(value: prometheus::Error) -> Self { + BinaryPortInitializationError::CannotInitializeMetrics(value) + } +} + +#[derive(Debug, DataSize)] +pub(crate) struct BinaryPort { + #[data_size(skip)] + state: ComponentState, + #[data_size(skip)] + config: Arc, + #[data_size(skip)] + chainspec: Arc, + #[data_size(skip)] + connection_limit: Arc, + #[data_size(skip)] + metrics: Arc, + #[data_size(skip)] + local_addr: Arc>, + #[data_size(skip)] + shutdown_trigger: Arc, + #[data_size(skip)] + server_join_handle: OnceCell>, + #[data_size(skip)] + rate_limiter: OnceCell>>, +} + +impl BinaryPort { + pub(crate) fn new(config: Config, chainspec: Arc, metrics: Metrics) -> Self { + Self { + state: ComponentState::Uninitialized, + connection_limit: Arc::new(Semaphore::new(config.max_connections)), + config: Arc::new(config), + chainspec, + metrics: Arc::new(metrics), + local_addr: Arc::new(OnceCell::new()), + shutdown_trigger: Arc::new(Notify::new()), + server_join_handle: OnceCell::new(), + rate_limiter: OnceCell::new(), + } + } + + /// Returns the binding address. + /// + /// Only used in testing. + #[cfg(test)] + pub(crate) fn bind_address(&self) -> Option { + self.local_addr.get().cloned() + } +} + +struct BinaryRequestTerminationDelayValues { + get_record: TimeDiff, + get_information: TimeDiff, + get_state: TimeDiff, + get_trie: TimeDiff, + accept_transaction: TimeDiff, + speculative_exec: TimeDiff, +} + +impl BinaryRequestTerminationDelayValues { + fn from_config(config: &Config) -> Self { + BinaryRequestTerminationDelayValues { + get_record: config.get_record_request_termination_delay, + get_information: config.get_information_request_termination_delay, + get_state: config.get_state_request_termination_delay, + get_trie: config.get_trie_request_termination_delay, + accept_transaction: config.accept_transaction_request_termination_delay, + speculative_exec: config.speculative_exec_request_termination_delay, + } + } + fn get_life_termination_delay(&self, request: &Command) -> TimeDiff { + match request { + Command::Get(GetRequest::Record { .. }) => self.get_record, + Command::Get(GetRequest::Information { .. }) => self.get_information, + Command::Get(GetRequest::State(_)) => self.get_state, + Command::Get(GetRequest::Trie { .. }) => self.get_trie, + Command::TryAcceptTransaction { .. } => self.accept_transaction, + Command::TrySpeculativeExec { .. } => self.speculative_exec, + } + } +} + +async fn handle_request( + req: Command, + effect_builder: EffectBuilder, + config: &Config, + metrics: &Metrics, + protocol_version: ProtocolVersion, +) -> BinaryResponse +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + match req { + Command::TryAcceptTransaction { transaction } => { + metrics.binary_port_try_accept_transaction_count.inc(); + try_accept_transaction(effect_builder, transaction, false).await + } + Command::TrySpeculativeExec { transaction } => { + metrics.binary_port_try_speculative_exec_count.inc(); + if !config.allow_request_speculative_exec { + debug!( + hash = %transaction.hash(), + "received a request for speculative execution while the feature is disabled" + ); + return BinaryResponse::new_error(ErrorCode::FunctionDisabled); + } + let response = try_accept_transaction(effect_builder, transaction.clone(), true).await; + if !response.is_success() { + return response; + } + try_speculative_execution(effect_builder, transaction).await + } + Command::Get(get_req) => { + handle_get_request(get_req, effect_builder, config, metrics, protocol_version).await + } + } +} + +async fn handle_get_request( + get_req: GetRequest, + effect_builder: EffectBuilder, + config: &Config, + metrics: &Metrics, + protocol_version: ProtocolVersion, +) -> BinaryResponse +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + match get_req { + // this workaround is in place because get_block_transfers performs a lazy migration + GetRequest::Record { + record_type_tag, + key, + } if RecordId::try_from(record_type_tag) == Ok(RecordId::Transfer) => { + metrics.binary_port_get_record_count.inc(); + if key.is_empty() { + return BinaryResponse::new_empty(); + } + let Ok(block_hash) = bytesrepr::deserialize_from_slice(&key) else { + debug!("received an incorrectly serialized key for a transfer record"); + return BinaryResponse::new_error(ErrorCode::TransferRecordMalformedKey); + }; + let Some(transfers) = effect_builder + .get_block_transfers_from_storage(block_hash) + .await + else { + return BinaryResponse::new_empty(); + }; + let Ok(serialized) = bincode::serialize(&transfers) else { + return BinaryResponse::new_error(ErrorCode::InternalError); + }; + BinaryResponse::from_raw_bytes(ResponseType::Transfers, serialized) + } + GetRequest::Record { + record_type_tag, + key, + } => { + metrics.binary_port_get_record_count.inc(); + if key.is_empty() { + return BinaryResponse::new_empty(); + } + match RecordId::try_from(record_type_tag) { + Ok(record_id) => { + let Some(db_bytes) = effect_builder.get_raw_data(record_id, key).await else { + return BinaryResponse::new_empty(); + }; + let payload_type = + ResponseType::from_record_id(record_id, db_bytes.is_legacy()); + BinaryResponse::from_raw_bytes(payload_type, db_bytes.into_raw_bytes()) + } + Err(_) => BinaryResponse::new_error(ErrorCode::UnsupportedRequest), + } + } + GetRequest::Information { info_type_tag, key } => { + metrics.binary_port_get_info_count.inc(); + let Ok(tag) = InformationRequestTag::try_from(info_type_tag) else { + debug!( + tag = info_type_tag, + "received an unknown information request tag" + ); + return BinaryResponse::new_error(ErrorCode::UnsupportedRequest); + }; + match InformationRequest::try_from((tag, &key[..])) { + Ok(req) => handle_info_request(req, effect_builder, protocol_version).await, + Err(error) => { + debug!(?tag, %error, "failed to parse an information request"); + BinaryResponse::new_error(ErrorCode::MalformedInformationRequest) + } + } + } + GetRequest::State(req) => { + metrics.binary_port_get_state_count.inc(); + handle_state_request(effect_builder, *req, protocol_version, config).await + } + GetRequest::Trie { trie_key } => { + metrics.binary_port_get_trie_count.inc(); + handle_trie_request(effect_builder, trie_key, config).await + } + } +} + +async fn handle_get_items_by_prefix( + state_identifier: Option, + key_prefix: KeyPrefix, + effect_builder: EffectBuilder, +) -> BinaryResponse +where + REv: From + From + From, +{ + let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::RootNotFound); + }; + let storage_key_prefix = match key_prefix { + KeyPrefix::DelegatorBidAddrsByValidator(hash) => { + StorageKeyPrefix::DelegatorBidAddrsByValidator(hash) + } + KeyPrefix::MessagesByEntity(addr) => StorageKeyPrefix::MessageEntriesByEntity(addr), + KeyPrefix::MessagesByEntityAndTopic(addr, topic) => { + StorageKeyPrefix::MessagesByEntityAndTopic(addr, topic) + } + KeyPrefix::NamedKeysByEntity(addr) => StorageKeyPrefix::NamedKeysByEntity(addr), + KeyPrefix::GasBalanceHoldsByPurse(purse) => StorageKeyPrefix::GasBalanceHoldsByPurse(purse), + KeyPrefix::ProcessingBalanceHoldsByPurse(purse) => { + StorageKeyPrefix::ProcessingBalanceHoldsByPurse(purse) + } + KeyPrefix::EntryPointsV1ByEntity(addr) => StorageKeyPrefix::EntryPointsV1ByEntity(addr), + KeyPrefix::EntryPointsV2ByEntity(addr) => StorageKeyPrefix::EntryPointsV2ByEntity(addr), + }; + let request = PrefixedValuesRequest::new(state_root_hash, storage_key_prefix); + match effect_builder.get_prefixed_values(request).await { + PrefixedValuesResult::Success { values, .. } => BinaryResponse::from_value(values), + PrefixedValuesResult::RootNotFound => BinaryResponse::new_error(ErrorCode::RootNotFound), + PrefixedValuesResult::Failure(error) => { + debug!(%error, "failed when querying for values by prefix"); + BinaryResponse::new_error(ErrorCode::InternalError) + } + } +} + +async fn handle_get_all_items( + state_identifier: Option, + key_tag: casper_types::KeyTag, + effect_builder: EffectBuilder, +) -> BinaryResponse +where + REv: From + From + From, +{ + let Some(state_root_hash) = resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::RootNotFound); + }; + let request = TaggedValuesRequest::new(state_root_hash, TaggedValuesSelection::All(key_tag)); + match effect_builder.get_tagged_values(request).await { + TaggedValuesResult::Success { values, .. } => BinaryResponse::from_value(values), + TaggedValuesResult::RootNotFound => BinaryResponse::new_error(ErrorCode::RootNotFound), + TaggedValuesResult::Failure(error) => { + debug!(%error, "failed when querying for all values by tag"); + BinaryResponse::new_error(ErrorCode::InternalError) + } + } +} + +async fn handle_state_request( + effect_builder: EffectBuilder, + request: GlobalStateRequest, + protocol_version: ProtocolVersion, + config: &Config, +) -> BinaryResponse +where + REv: From + + From + + From + + From, +{ + let (state_identifier, qualifier) = request.destructure(); + match qualifier { + GlobalStateEntityQualifier::Item { base_key, path } => { + let Some(state_root_hash) = + resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::RootNotFound); + }; + match get_global_state_item(effect_builder, state_root_hash, base_key, path).await { + Ok(Some(result)) => BinaryResponse::from_value(result), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + GlobalStateEntityQualifier::AllItems { key_tag } => { + if !config.allow_request_get_all_values { + debug!(%key_tag, "received a request for items by key tag while the feature is disabled"); + BinaryResponse::new_error(ErrorCode::FunctionDisabled) + } else { + handle_get_all_items(state_identifier, key_tag, effect_builder).await + } + } + GlobalStateEntityQualifier::DictionaryItem { identifier } => { + let Some(state_root_hash) = + resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::RootNotFound); + }; + let result = match identifier { + DictionaryItemIdentifier::AccountNamedKey { + hash, + dictionary_name, + dictionary_item_key, + } => { + get_dictionary_item_by_legacy_named_key( + effect_builder, + state_root_hash, + Key::Account(hash), + dictionary_name, + dictionary_item_key, + ) + .await + } + DictionaryItemIdentifier::ContractNamedKey { + hash, + dictionary_name, + dictionary_item_key, + } => { + get_dictionary_item_by_legacy_named_key( + effect_builder, + state_root_hash, + Key::Hash(hash), + dictionary_name, + dictionary_item_key, + ) + .await + } + DictionaryItemIdentifier::EntityNamedKey { + addr, + dictionary_name, + dictionary_item_key, + } => { + get_dictionary_item_by_named_key( + effect_builder, + state_root_hash, + addr, + dictionary_name, + dictionary_item_key, + ) + .await + } + DictionaryItemIdentifier::URef { + seed_uref, + dictionary_item_key, + } => { + let key = Key::dictionary(seed_uref, dictionary_item_key.as_bytes()); + get_global_state_item(effect_builder, state_root_hash, key, vec![]) + .await + .map(|maybe_res| maybe_res.map(|res| DictionaryQueryResult::new(key, res))) + } + DictionaryItemIdentifier::DictionaryItem(addr) => { + let key = Key::Dictionary(addr); + get_global_state_item(effect_builder, state_root_hash, key, vec![]) + .await + .map(|maybe_res| maybe_res.map(|res| DictionaryQueryResult::new(key, res))) + } + }; + match result { + Ok(Some(result)) => BinaryResponse::from_value(result), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + GlobalStateEntityQualifier::Balance { purse_identifier } => { + let Some(state_root_hash) = + resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_empty(); + }; + get_balance( + effect_builder, + state_root_hash, + purse_identifier, + protocol_version, + ) + .await + } + GlobalStateEntityQualifier::ItemsByPrefix { key_prefix } => { + handle_get_items_by_prefix(state_identifier, key_prefix, effect_builder).await + } + } +} + +async fn handle_trie_request( + effect_builder: EffectBuilder, + trie_key: Digest, + config: &Config, +) -> BinaryResponse +where + REv: From + + From + + From + + From, +{ + if !config.allow_request_get_trie { + debug!(%trie_key, "received a trie request while the feature is disabled"); + BinaryResponse::new_error(ErrorCode::FunctionDisabled) + } else { + let req = TrieRequest::new(trie_key, None); + match effect_builder.get_trie(req).await.into_raw() { + Ok(result) => { + BinaryResponse::from_value(GetTrieFullResult::new(result.map(TrieRaw::into_inner))) + } + Err(error) => { + debug!(%error, "failed when querying for a trie"); + BinaryResponse::new_error(ErrorCode::InternalError) + } + } + } +} + +async fn get_dictionary_item_by_legacy_named_key( + effect_builder: EffectBuilder, + state_root_hash: Digest, + entity_key: Key, + dictionary_name: String, + dictionary_item_key: String, +) -> Result, ErrorCode> +where + REv: From + From + From, +{ + match effect_builder + .query_global_state(QueryRequest::new(state_root_hash, entity_key, vec![])) + .await + { + QueryResult::Success { value, .. } => { + let named_keys = match &*value { + StoredValue::Account(account) => account.named_keys(), + StoredValue::Contract(contract) => contract.named_keys(), + value => { + debug!( + value_type = value.type_name(), + "unexpected stored value found when querying for a dictionary" + ); + return Err(ErrorCode::DictionaryURefNotFound); + } + }; + let Some(uref) = named_keys.get(&dictionary_name).and_then(Key::as_uref) else { + debug!( + dictionary_name, + "dictionary seed URef not found in named keys" + ); + return Err(ErrorCode::DictionaryURefNotFound); + }; + let key = Key::dictionary(*uref, dictionary_item_key.as_bytes()); + let Some(query_result) = + get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + + Ok(Some(DictionaryQueryResult::new(key, query_result))) + } + QueryResult::RootNotFound => { + debug!("root not found when querying for a dictionary seed URef"); + Err(ErrorCode::DictionaryURefNotFound) + } + QueryResult::ValueNotFound(error) => { + debug!(%error, "value not found when querying for a dictionary seed URef"); + Err(ErrorCode::DictionaryURefNotFound) + } + QueryResult::Failure(error) => { + debug!(%error, "failed when querying for a dictionary seed URef"); + Err(ErrorCode::FailedQuery) + } + } +} + +async fn get_dictionary_item_by_named_key( + effect_builder: EffectBuilder, + state_root_hash: Digest, + entity_addr: EntityAddr, + dictionary_name: String, + dictionary_item_key: String, +) -> Result, ErrorCode> +where + REv: From + From + From, +{ + let Ok(key_addr) = NamedKeyAddr::new_from_string(entity_addr, dictionary_name) else { + return Err(ErrorCode::InternalError); + }; + let req = QueryRequest::new(state_root_hash, Key::NamedKey(key_addr), vec![]); + match effect_builder.query_global_state(req).await { + QueryResult::Success { value, .. } => { + let key_val = match &*value { + StoredValue::NamedKey(key_val) => key_val, + value => { + debug!( + value_type = value.type_name(), + "unexpected stored value found when querying for a dictionary" + ); + return Err(ErrorCode::DictionaryURefNotFound); + } + }; + let uref = match key_val.get_key() { + Ok(Key::URef(uref)) => uref, + result => { + debug!( + ?result, + "unexpected named key result when querying for a dictionary" + ); + return Err(ErrorCode::DictionaryURefNotFound); + } + }; + let key = Key::dictionary(uref, dictionary_item_key.as_bytes()); + let Some(query_result) = + get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + Ok(Some(DictionaryQueryResult::new(key, query_result))) + } + QueryResult::RootNotFound => { + debug!("root not found when querying for a dictionary seed URef"); + Err(ErrorCode::DictionaryURefNotFound) + } + QueryResult::ValueNotFound(error) => { + debug!(%error, "value not found when querying for a dictionary seed URef"); + Err(ErrorCode::DictionaryURefNotFound) + } + QueryResult::Failure(error) => { + debug!(%error, "failed when querying for a dictionary seed URef"); + Err(ErrorCode::FailedQuery) + } + } +} + +async fn get_balance( + effect_builder: EffectBuilder, + state_root_hash: Digest, + purse_identifier: PurseIdentifier, + protocol_version: ProtocolVersion, +) -> BinaryResponse +where + REv: From + + From + + From + + From, +{ + let balance_id = match purse_identifier { + PurseIdentifier::Payment => BalanceIdentifier::Payment, + PurseIdentifier::Accumulate => BalanceIdentifier::Accumulate, + PurseIdentifier::Purse(uref) => BalanceIdentifier::Purse(uref), + PurseIdentifier::PublicKey(pub_key) => BalanceIdentifier::Public(pub_key), + PurseIdentifier::Account(account) => BalanceIdentifier::Account(account), + PurseIdentifier::Entity(entity) => BalanceIdentifier::Entity(entity), + }; + let balance_handling = BalanceHandling::Available; + + let balance_req = BalanceRequest::new( + state_root_hash, + protocol_version, + balance_id, + balance_handling, + ProofHandling::Proofs, + ); + match effect_builder.get_balance(balance_req).await { + BalanceResult::RootNotFound => BinaryResponse::new_error(ErrorCode::RootNotFound), + BalanceResult::Success { + total_balance, + available_balance, + proofs_result, + .. + } => { + let ProofsResult::Proofs { + total_balance_proof, + balance_holds, + } = proofs_result + else { + warn!("binary port received no proofs for a balance request with proofs"); + return BinaryResponse::new_error(ErrorCode::InternalError); + }; + let response = BalanceResponse { + total_balance, + available_balance, + total_balance_proof, + balance_holds, + }; + BinaryResponse::from_value(response) + } + BalanceResult::Failure(TrackingCopyError::KeyNotFound(_)) => { + BinaryResponse::new_error(ErrorCode::PurseNotFound) + } + BalanceResult::Failure(error) => { + debug!(%error, "failed when querying for a balance"); + BinaryResponse::new_error(ErrorCode::FailedQuery) + } + } +} + +async fn get_global_state_item( + effect_builder: EffectBuilder, + state_root_hash: Digest, + base_key: Key, + path: Vec, +) -> Result, ErrorCode> +where + REv: From + From + From, +{ + match effect_builder + .query_global_state(QueryRequest::new(state_root_hash, base_key, path)) + .await + { + QueryResult::Success { value, proofs } => { + Ok(Some(GlobalStateQueryResult::new(*value, proofs))) + } + QueryResult::RootNotFound => Err(ErrorCode::RootNotFound), + QueryResult::ValueNotFound(error) => { + debug!(%error, "value not found when querying for a global state item"); + Err(ErrorCode::NotFound) + } + QueryResult::Failure(error) => { + debug!(%error, "failed when querying for a global state item"); + Err(ErrorCode::FailedQuery) + } + } +} + +async fn get_contract_package( + effect_builder: EffectBuilder, + state_root_hash: Digest, + hash: ContractPackageHash, +) -> Result, ValueWithProof>>, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::Hash(hash.value()); + let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match result.into_inner() { + (StoredValue::ContractPackage(contract), proof) => { + Ok(Some(Either::Left(ValueWithProof::new(contract, proof)))) + } + (other, _) => { + let Some((Key::SmartContract(addr), _)) = other + .as_cl_value() + .and_then(|cl_val| cl_val.to_t::<(Key, URef)>().ok()) + else { + debug!( + ?other, + "unexpected stored value found when querying for a contract package" + ); + return Err(ErrorCode::InternalError); + }; + let package = get_package(effect_builder, state_root_hash, addr).await?; + Ok(package.map(Either::Right)) + } + } +} + +async fn get_package( + effect_builder: EffectBuilder, + state_root_hash: Digest, + package_addr: PackageAddr, +) -> Result>, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::SmartContract(package_addr); + let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match result.into_inner() { + (StoredValue::SmartContract(contract), proof) => { + Ok(Some(ValueWithProof::new(contract, proof))) + } + other => { + debug!( + ?other, + "unexpected stored value found when querying for a package" + ); + Err(ErrorCode::InternalError) + } + } +} + +async fn get_contract( + effect_builder: EffectBuilder, + state_root_hash: Digest, + hash: ContractHash, + include_wasm: bool, +) -> Result>, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::Hash(hash.value()); + let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match result.into_inner() { + (StoredValue::Contract(contract), proof) + if include_wasm && contract.contract_wasm_hash() != ContractWasmHash::default() => + { + let wasm_hash = contract.contract_wasm_hash(); + let Some(wasm) = get_contract_wasm(effect_builder, state_root_hash, wasm_hash).await? + else { + return Ok(None); + }; + Ok(Some(Either::Left(ContractInformation::new( + hash, + ValueWithProof::new(contract, proof), + Some(wasm), + )))) + } + (StoredValue::Contract(contract), proof) => Ok(Some(Either::Left( + ContractInformation::new(hash, ValueWithProof::new(contract, proof), None), + ))), + (other, _) => { + let Some(Key::AddressableEntity(addr)) = other + .as_cl_value() + .and_then(|cl_val| cl_val.to_t::().ok()) + else { + debug!( + ?other, + "unexpected stored value found when querying for a contract" + ); + return Err(ErrorCode::InternalError); + }; + let entity = get_entity(effect_builder, state_root_hash, addr, include_wasm).await?; + Ok(entity.map(Either::Right)) + } + } +} + +async fn get_account( + effect_builder: EffectBuilder, + state_root_hash: Digest, + hash: AccountHash, + include_bytecode: bool, +) -> Result>, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::Account(hash); + let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match result.into_inner() { + (StoredValue::Account(account), proof) => { + Ok(Some(Either::Left(AccountInformation::new(account, proof)))) + } + (other, _) => { + let Some(Key::AddressableEntity(addr)) = other + .as_cl_value() + .and_then(|cl_val| cl_val.to_t::().ok()) + else { + debug!( + ?other, + "unexpected stored value found when querying for an account" + ); + return Err(ErrorCode::InternalError); + }; + let entity = + get_entity(effect_builder, state_root_hash, addr, include_bytecode).await?; + Ok(entity.map(Either::Right)) + } + } +} + +async fn get_entity( + effect_builder: EffectBuilder, + state_root_hash: Digest, + addr: EntityAddr, + include_bytecode: bool, +) -> Result, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::from(addr); + let Some(result) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match result.into_inner() { + (StoredValue::AddressableEntity(entity), proof) + if include_bytecode && entity.byte_code_hash() != ByteCodeHash::default() => + { + let Some(bytecode) = + get_contract_bytecode(effect_builder, state_root_hash, entity.byte_code_hash()) + .await? + else { + return Ok(None); + }; + Ok(Some(AddressableEntityInformation::new( + addr, + ValueWithProof::new(entity, proof), + Some(bytecode), + ))) + } + (StoredValue::AddressableEntity(entity), proof) => Ok(Some( + AddressableEntityInformation::new(addr, ValueWithProof::new(entity, proof), None), + )), + (other, _) => { + debug!( + ?other, + "unexpected stored value found when querying for an entity" + ); + Err(ErrorCode::InternalError) + } + } +} + +async fn get_contract_wasm( + effect_builder: EffectBuilder, + state_root_hash: Digest, + hash: ContractWasmHash, +) -> Result>, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::from(hash); + let Some(value) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match value.into_inner() { + (StoredValue::ContractWasm(wasm), proof) => Ok(Some(ValueWithProof::new(wasm, proof))), + other => { + debug!( + ?other, + "unexpected stored value found when querying for Wasm" + ); + Err(ErrorCode::InternalError) + } + } +} + +async fn get_contract_bytecode( + effect_builder: EffectBuilder, + state_root_hash: Digest, + addr: ByteCodeHash, +) -> Result>, ErrorCode> +where + REv: From + + From + + From + + From, +{ + let key = Key::ByteCode(ByteCodeAddr::new_wasm_addr(addr.value())); + let Some(value) = get_global_state_item(effect_builder, state_root_hash, key, vec![]).await? + else { + return Ok(None); + }; + match value.into_inner() { + (StoredValue::ByteCode(bytecode), proof) => Ok(Some(ValueWithProof::new(bytecode, proof))), + other => { + debug!( + ?other, + "unexpected stored value found when querying for bytecode" + ); + Err(ErrorCode::InternalError) + } + } +} + +async fn handle_info_request( + req: InformationRequest, + effect_builder: EffectBuilder, + protocol_version: ProtocolVersion, +) -> BinaryResponse +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + match req { + InformationRequest::BlockHeader(identifier) => { + let maybe_header = resolve_block_header(effect_builder, identifier).await; + BinaryResponse::from_option(maybe_header) + } + InformationRequest::BlockWithSignatures(identifier) => { + let Some(height) = resolve_block_height(effect_builder, identifier).await else { + return BinaryResponse::new_empty(); + }; + let Some(block) = effect_builder + .get_block_at_height_with_metadata_from_storage(height, true) + .await + else { + return BinaryResponse::new_empty(); + }; + BinaryResponse::from_value(BlockWithSignatures::new( + block.block, + block.block_signatures, + )) + } + InformationRequest::Transaction { + hash, + with_finalized_approvals, + } => { + let Some((transaction, execution_info)) = effect_builder + .get_transaction_and_exec_info_from_storage(hash, with_finalized_approvals) + .await + else { + return BinaryResponse::new_empty(); + }; + BinaryResponse::from_value(TransactionWithExecutionInfo::new( + transaction, + execution_info, + )) + } + InformationRequest::Peers => { + BinaryResponse::from_value(Peers::from(effect_builder.network_peers().await)) + } + InformationRequest::Uptime => BinaryResponse::from_value(effect_builder.get_uptime().await), + InformationRequest::LastProgress => { + BinaryResponse::from_value(effect_builder.get_last_progress().await) + } + InformationRequest::ReactorState => { + let state = effect_builder.get_reactor_state().await; + BinaryResponse::from_value(ReactorStateName::new(state)) + } + InformationRequest::NetworkName => { + BinaryResponse::from_value(effect_builder.get_network_name().await) + } + InformationRequest::ConsensusValidatorChanges => { + BinaryResponse::from_value(effect_builder.get_consensus_validator_changes().await) + } + InformationRequest::BlockSynchronizerStatus => { + BinaryResponse::from_value(effect_builder.get_block_synchronizer_status().await) + } + InformationRequest::AvailableBlockRange => BinaryResponse::from_value( + effect_builder + .get_available_block_range_from_storage() + .await, + ), + InformationRequest::NextUpgrade => { + BinaryResponse::from_option(effect_builder.get_next_upgrade().await) + } + InformationRequest::ConsensusStatus => { + BinaryResponse::from_option(effect_builder.consensus_status().await) + } + InformationRequest::ChainspecRawBytes => { + BinaryResponse::from_value((*effect_builder.get_chainspec_raw_bytes().await).clone()) + } + InformationRequest::LatestSwitchBlockHeader => BinaryResponse::from_option( + effect_builder + .get_latest_switch_block_header_from_storage() + .await, + ), + InformationRequest::NodeStatus => { + let ( + node_uptime, + network_name, + last_added_block, + peers, + next_upgrade, + consensus_status, + reactor_state, + last_progress, + available_block_range, + block_sync, + latest_switch_block_header, + ) = join!( + effect_builder.get_uptime(), + effect_builder.get_network_name(), + effect_builder.get_highest_complete_block_from_storage(), + effect_builder.network_peers(), + effect_builder.get_next_upgrade(), + effect_builder.consensus_status(), + effect_builder.get_reactor_state(), + effect_builder.get_last_progress(), + effect_builder.get_available_block_range_from_storage(), + effect_builder.get_block_synchronizer_status(), + effect_builder.get_latest_switch_block_header_from_storage(), + ); + let starting_state_root_hash = effect_builder + .get_block_header_at_height_from_storage(available_block_range.low(), true) + .await + .map(|header| *header.state_root_hash()) + .unwrap_or_default(); + let (our_public_signing_key, round_length) = + consensus_status.map_or((None, None), |consensus_status| { + ( + Some(consensus_status.validator_public_key().clone()), + consensus_status.round_length(), + ) + }); + let reactor_state = ReactorStateName::new(reactor_state); + + let Ok(uptime) = TimeDiff::try_from(node_uptime) else { + return BinaryResponse::new_error(ErrorCode::InternalError); + }; + + let status = NodeStatus { + protocol_version, + peers: Peers::from(peers), + build_version: crate::VERSION_STRING.clone(), + chainspec_name: network_name.into(), + starting_state_root_hash, + last_added_block_info: last_added_block.map(Into::into), + our_public_signing_key, + round_length, + next_upgrade, + uptime, + reactor_state, + last_progress: last_progress.into(), + available_block_range, + block_sync, + latest_switch_block_hash: latest_switch_block_header + .map(|header| header.block_hash()), + }; + BinaryResponse::from_value(status) + } + InformationRequest::Reward { + era_identifier, + validator, + delegator, + } => { + let Some(header) = + resolve_era_switch_block_header(effect_builder, era_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::SwitchBlockNotFound); + }; + let Some(previous_height) = header.height().checked_sub(1) else { + // there's not going to be any rewards for the genesis block + debug!("received a request for rewards in the genesis block"); + return BinaryResponse::new_empty(); + }; + let Some(parent_header) = effect_builder + .get_block_header_at_height_from_storage(previous_height, true) + .await + else { + return BinaryResponse::new_error(ErrorCode::SwitchBlockParentNotFound); + }; + let snapshot_request = + SeigniorageRecipientsRequest::new(*parent_header.state_root_hash()); + + let snapshot = match effect_builder + .get_seigniorage_recipients_snapshot_from_contract_runtime(snapshot_request) + .await + { + SeigniorageRecipientsResult::Success { + seigniorage_recipients, + } => seigniorage_recipients, + SeigniorageRecipientsResult::RootNotFound => { + return BinaryResponse::new_error(ErrorCode::RootNotFound) + } + SeigniorageRecipientsResult::Failure(error) => { + warn!(%error, "failed when querying for seigniorage recipients"); + return BinaryResponse::new_error(ErrorCode::FailedQuery); + } + SeigniorageRecipientsResult::AuctionNotFound => { + warn!("auction not found when querying for seigniorage recipients"); + return BinaryResponse::new_error(ErrorCode::InternalError); + } + SeigniorageRecipientsResult::ValueNotFound(error) => { + warn!(%error, "value not found when querying for seigniorage recipients"); + return BinaryResponse::new_error(ErrorCode::InternalError); + } + }; + let Some(era_end) = header.clone_era_end() else { + // switch block should have an era end + error!( + hash = %header.block_hash(), + "switch block missing era end (undefined behavior)" + ); + return BinaryResponse::new_error(ErrorCode::InternalError); + }; + let block_rewards = match era_end.rewards() { + Rewards::V2(rewards) => rewards, + Rewards::V1(_) => { + //It is possible to calculate V1 rewards, but previously we didn't support an + // endpoint to report it in that way. We could implement it + // in a future release if there is interest in it - it's not trivial though. + return BinaryResponse::new_error(ErrorCode::UnsupportedRewardsV1Request); + } + }; + let Some(validator_rewards) = block_rewards.get(&validator) else { + return BinaryResponse::new_empty(); + }; + + let seigniorage_recipient = + snapshot.get_seignorage_recipient(&header.era_id(), &validator); + + let reward = auction::detail::reward( + &validator, + delegator.as_deref(), + header.era_id(), + validator_rewards, + &snapshot, + ); + match (reward, seigniorage_recipient) { + (Ok(Some(reward)), Some(seigniorage_recipient)) => { + let response = RewardResponse::new( + reward, + header.era_id(), + *seigniorage_recipient.delegation_rate(), + header.block_hash(), + ); + BinaryResponse::from_value(response) + } + (Err(error), _) => { + warn!(%error, "failed when calculating rewards"); + BinaryResponse::new_error(ErrorCode::InternalError) + } + _ => BinaryResponse::new_empty(), + } + } + InformationRequest::ProtocolVersion => BinaryResponse::from_value(protocol_version), + InformationRequest::Package { + state_identifier, + identifier, + } => { + let Some(state_root_hash) = + resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::RootNotFound); + }; + let either = match identifier { + PackageIdentifier::ContractPackageHash(hash) => { + get_contract_package(effect_builder, state_root_hash, hash).await + } + PackageIdentifier::PackageAddr(addr) => { + get_package(effect_builder, state_root_hash, addr) + .await + .map(|opt| opt.map(Either::Right)) + } + }; + match either { + Ok(Some(Either::Left(contract_package))) => { + BinaryResponse::from_value(contract_package) + } + Ok(Some(Either::Right(package))) => BinaryResponse::from_value(package), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + InformationRequest::Entity { + state_identifier, + identifier, + include_bytecode, + } => { + let Some(state_root_hash) = + resolve_state_root_hash(effect_builder, state_identifier).await + else { + return BinaryResponse::new_error(ErrorCode::RootNotFound); + }; + match identifier { + EntityIdentifier::ContractHash(hash) => { + match get_contract(effect_builder, state_root_hash, hash, include_bytecode) + .await + { + Ok(Some(Either::Left(contract))) => BinaryResponse::from_value(contract), + Ok(Some(Either::Right(entity))) => BinaryResponse::from_value(entity), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + EntityIdentifier::AccountHash(hash) => { + match get_account(effect_builder, state_root_hash, hash, include_bytecode).await + { + Ok(Some(Either::Left(account))) => BinaryResponse::from_value(account), + Ok(Some(Either::Right(entity))) => BinaryResponse::from_value(entity), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + EntityIdentifier::PublicKey(pub_key) => { + let hash = pub_key.to_account_hash(); + match get_account(effect_builder, state_root_hash, hash, include_bytecode).await + { + Ok(Some(Either::Left(account))) => BinaryResponse::from_value(account), + Ok(Some(Either::Right(entity))) => BinaryResponse::from_value(entity), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + EntityIdentifier::EntityAddr(addr) => { + match get_entity(effect_builder, state_root_hash, addr, include_bytecode).await + { + Ok(Some(entity)) => BinaryResponse::from_value(entity), + Ok(None) => BinaryResponse::new_empty(), + Err(err) => BinaryResponse::new_error(err), + } + } + } + } + } +} + +async fn try_accept_transaction( + effect_builder: EffectBuilder, + transaction: Transaction, + is_speculative: bool, +) -> BinaryResponse +where + REv: From, +{ + effect_builder + .try_accept_transaction(transaction, is_speculative) + .await + .map_or_else( + |err| BinaryResponse::new_error(err.into()), + |()| BinaryResponse::new_empty(), + ) +} + +async fn try_speculative_execution( + effect_builder: EffectBuilder, + transaction: Transaction, +) -> BinaryResponse +where + REv: From + From + From, +{ + let tip = match effect_builder + .get_highest_complete_block_header_from_storage() + .await + { + Some(tip) => tip, + None => return BinaryResponse::new_error(ErrorCode::NoCompleteBlocks), + }; + + let result = effect_builder + .speculatively_execute(Box::new(tip), Box::new(transaction)) + .await; + + match result { + SpeculativeExecutionResult::InvalidTransaction(error) => { + debug!(%error, "invalid transaction submitted for speculative execution"); + BinaryResponse::new_error(error.into()) + } + SpeculativeExecutionResult::WasmV1(spec_exec_result) => { + BinaryResponse::from_value(spec_exec_result) + } + SpeculativeExecutionResult::ReceivedV1Transaction => { + BinaryResponse::new_error(ErrorCode::ReceivedV1Transaction) + } + } +} + +async fn handle_client_loop( + stream: TcpStream, + effect_builder: EffectBuilder, + config: Arc, + rate_limiter: Arc>, + monitor: ConnectionTerminator, + life_extensions_config: BinaryRequestTerminationDelayValues, +) -> Result<(), Error> +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + let codec = BinaryMessageCodec::new(config.max_message_size_bytes); + let mut framed = Framed::new(stream, codec); + monitor + .terminate_at(Timestamp::now() + config.initial_connection_lifetime) + .await; + let cancellation_token = monitor.get_cancellation_token(); + loop { + select! { + maybe_bytes = framed.next() => { + let Some(result) = maybe_bytes else { + debug!("remote party closed the connection"); + return Ok(()); + }; + let limiter_response = rate_limiter.lock().await.throttle(); + let binary_message = result?; + let payload = binary_message.payload(); + if payload.is_empty() { + // This should be unreachable, we reject 0-length messages earlier + warn!("Empty payload detected late."); + return Err(Error::NoPayload); + } + let mut bytes_buf = bytes::BytesMut::with_capacity(payload.len() + 4); + let response = + handle_payload(effect_builder, payload, limiter_response, &monitor, &life_extensions_config).await; + codec.clone().encode(binary_message, &mut bytes_buf)?; + framed + .send(BinaryMessage::new( + BinaryResponseAndRequest::new(response, Bytes::from(bytes_buf.freeze().to_vec())).to_bytes()?, + )) + .await? + } + _ = cancellation_token.cancelled() => { + debug!("Binary port connection stale - closing."); + return Ok(()); + } + } + } +} + +fn extract_header(payload: &[u8]) -> Result<(CommandHeader, &[u8]), ErrorCode> { + const BINARY_VERSION_LENGTH_BYTES: usize = size_of::(); + + if payload.len() < BINARY_VERSION_LENGTH_BYTES { + return Err(ErrorCode::TooLittleBytesForRequestHeaderVersion); + } + + let binary_protocol_version = match u16::from_bytes(payload) { + Ok((binary_protocol_version, _)) => binary_protocol_version, + Err(_) => return Err(ErrorCode::MalformedCommandHeaderVersion), + }; + + if binary_protocol_version != CommandHeader::HEADER_VERSION { + return Err(ErrorCode::CommandHeaderVersionMismatch); + } + + match CommandHeader::from_bytes(payload) { + Ok((header, remainder)) => Ok((header, remainder)), + Err(error) => { + debug!(%error, "failed to parse binary request header"); + Err(ErrorCode::MalformedCommandHeader) + } + } +} + +async fn handle_payload( + effect_builder: EffectBuilder, + payload: &[u8], + limiter_response: LimiterResponse, + connection_terminator: &ConnectionTerminator, + life_extensions_config: &BinaryRequestTerminationDelayValues, +) -> BinaryResponse +where + REv: From, +{ + let (header, remainder) = match extract_header(payload) { + Ok(header) => header, + Err(error_code) => return BinaryResponse::new_error(error_code), + }; + + if let LimiterResponse::Throttled = limiter_response { + return BinaryResponse::new_error(ErrorCode::RequestThrottled); + } + + // we might receive a request added in a minor version if we're behind + let Ok(tag) = CommandTag::try_from(header.type_tag()) else { + return BinaryResponse::new_error(ErrorCode::UnsupportedRequest); + }; + + let request = match Command::try_from((tag, remainder)) { + Ok(request) => request, + Err(error) => { + debug!(%error, "failed to parse binary request body"); + return BinaryResponse::new_error(ErrorCode::MalformedCommand); + } + }; + connection_terminator + .delay_termination(life_extensions_config.get_life_termination_delay(&request)) + .await; + + effect_builder + .make_request( + |responder| Event::HandleRequest { request, responder }, + QueueKind::Regular, + ) + .await +} + +async fn handle_client( + addr: SocketAddr, + stream: TcpStream, + effect_builder: EffectBuilder, + config: Arc, + _permit: OwnedSemaphorePermit, + rate_limiter: Arc>, +) where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + let keep_alive_monitor = ConnectionTerminator::new(); + let life_extensions_config = BinaryRequestTerminationDelayValues::from_config(&config); + if let Err(err) = handle_client_loop( + stream, + effect_builder, + config, + rate_limiter, + keep_alive_monitor, + life_extensions_config, + ) + .await + { + // Low severity is used to prevent malicious clients from causing log floods. + trace!(%addr, err=display_error(&err), "binary port client handler error"); + } +} + +async fn run_server( + local_addr: Arc>, + effect_builder: EffectBuilder, + config: Arc, + shutdown_trigger: Arc, +) where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + let listener = match TcpListener::bind(&config.address).await { + Ok(listener) => listener, + Err(err) => { + error!(%err, "unable to bind binary port listener"); + return; + } + }; + + let bind_address = match listener.local_addr() { + Ok(bind_address) => bind_address, + Err(err) => { + error!(%err, "unable to get local addr of binary port"); + return; + } + }; + + local_addr.set(bind_address).unwrap(); + + loop { + select! { + _ = shutdown_trigger.notified() => { + break; + } + result = listener.accept() => match result { + Ok((stream, peer)) => { + effect_builder + .make_request( + |responder| Event::AcceptConnection { + stream, + peer, + responder, + }, + QueueKind::Regular, + ) + .await; + } + Err(io_err) => { + info!(%io_err, "problem accepting binary port connection"); + } + } + } + } +} + +#[cfg(test)] +impl crate::reactor::Finalize for BinaryPort { + fn finalize(mut self) -> BoxFuture<'static, ()> { + self.shutdown_trigger.notify_one(); + async move { + if let Some(handle) = self.server_join_handle.take() { + handle.await.ok(); + } + } + .boxed() + } +} + +async fn resolve_block_header( + effect_builder: EffectBuilder, + block_identifier: Option, +) -> Option +where + REv: From + From + From, +{ + match block_identifier { + Some(BlockIdentifier::Hash(block_hash)) => { + effect_builder + .get_block_header_from_storage(block_hash, true) + .await + } + Some(BlockIdentifier::Height(block_height)) => { + effect_builder + .get_block_header_at_height_from_storage(block_height, true) + .await + } + None => { + effect_builder + .get_highest_complete_block_header_from_storage() + .await + } + } +} + +async fn resolve_block_height( + effect_builder: EffectBuilder, + block_identifier: Option, +) -> Option +where + REv: From + From + From, +{ + match block_identifier { + Some(BlockIdentifier::Hash(block_hash)) => effect_builder + .get_block_header_from_storage(block_hash, true) + .await + .map(|header| header.height()), + Some(BlockIdentifier::Height(block_height)) => Some(block_height), + None => effect_builder + .get_highest_complete_block_from_storage() + .await + .map(|header| header.height()), + } +} + +async fn resolve_state_root_hash( + effect_builder: EffectBuilder, + state_identifier: Option, +) -> Option +where + REv: From + From + From, +{ + match state_identifier { + Some(GlobalStateIdentifier::BlockHash(block_hash)) => effect_builder + .get_block_header_from_storage(block_hash, true) + .await + .map(|header| *header.state_root_hash()), + Some(GlobalStateIdentifier::BlockHeight(block_height)) => effect_builder + .get_block_header_at_height_from_storage(block_height, true) + .await + .map(|header| *header.state_root_hash()), + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)) => Some(state_root_hash), + None => effect_builder + .get_highest_complete_block_header_from_storage() + .await + .map(|header| *header.state_root_hash()), + } +} + +async fn resolve_era_switch_block_header( + effect_builder: EffectBuilder, + era_identifier: Option, +) -> Option +where + REv: From + From + From, +{ + match era_identifier { + Some(EraIdentifier::Era(era_id)) => { + effect_builder + .get_switch_block_header_by_era_id_from_storage(era_id) + .await + } + Some(EraIdentifier::Block(block_identifier)) => { + let header = resolve_block_header(effect_builder, Some(block_identifier)).await?; + if header.is_switch_block() { + Some(header) + } else { + effect_builder + .get_switch_block_header_by_era_id_from_storage(header.era_id()) + .await + } + } + None => { + effect_builder + .get_latest_switch_block_header_from_storage() + .await + } + } +} + +impl Component for BinaryPort +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + type Event = Event; + + fn name(&self) -> &str { + COMPONENT_NAME + } + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match &self.state { + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); + Effects::new() + } + ComponentState::Initializing => match event { + Event::Initialize => { + let rate_limiter_res = + RateLimiter::new(self.config.qps_limit, TimeDiff::from_seconds(1)); + match rate_limiter_res { + Ok(rate_limiter) => { + match self.rate_limiter.set(Arc::new(Mutex::new(rate_limiter))) { + Ok(_) => {} + Err(_) => { + error!("failed to initialize binary port, rate limiter already initialized"); + >::set_state( + self, + ComponentState::Fatal("failed to initialize binary port, rate limiter already initialized".to_string()), + ); + return Effects::new(); + } + }; + } + Err(error) => { + error!(%error, "failed to initialize binary port"); + >::set_state( + self, + ComponentState::Fatal(error.to_string()), + ); + return Effects::new(); + } + }; + let (effects, state) = self.bind(self.config.enable_server, effect_builder); + >::set_state(self, state); + effects + } + _ => { + warn!( + ?event, + name = >::name(self), + "binary port is initializing, ignoring event" + ); + Effects::new() + } + }, + ComponentState::Initialized => match event { + Event::Initialize => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() + } + Event::AcceptConnection { + stream, + peer, + responder, + } => { + if let Ok(permit) = Arc::clone(&self.connection_limit).try_acquire_owned() { + self.metrics.binary_port_connections_count.inc(); + let config = Arc::clone(&self.config); + let rate_limiter = Arc::clone( + self.rate_limiter + .get() + .expect("This should have been set during initialization"), + ); + tokio::spawn(handle_client( + peer, + stream, + effect_builder, + config, + permit, + rate_limiter, + )); + } else { + warn!( + "connection limit reached, dropping connection from {}", + peer + ); + } + responder.respond(()).ignore() + } + Event::HandleRequest { request, responder } => { + let config = Arc::clone(&self.config); + let metrics = Arc::clone(&self.metrics); + let protocol_version = self.chainspec.protocol_version(); + async move { + let response = handle_request( + request, + effect_builder, + &config, + &metrics, + protocol_version, + ) + .await; + responder.respond(response).await; + } + .ignore() + } + }, + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + } + } +} + +impl InitializedComponent for BinaryPort +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +impl PortBoundComponent for BinaryPort +where + REv: From + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send, +{ + type Error = ListeningError; + type ComponentEvent = Event; + + fn listen( + &mut self, + effect_builder: EffectBuilder, + ) -> Result, Self::Error> { + let local_addr = Arc::clone(&self.local_addr); + let server_join_handle = tokio::spawn(run_server( + local_addr, + effect_builder, + Arc::clone(&self.config), + Arc::clone(&self.shutdown_trigger), + )); + self.server_join_handle + .set(server_join_handle) + .expect("server join handle should not be set elsewhere"); + + Ok(Effects::new()) + } +} diff --git a/node/src/components/binary_port/config.rs b/node/src/components/binary_port/config.rs new file mode 100644 index 0000000000..fe614b4a21 --- /dev/null +++ b/node/src/components/binary_port/config.rs @@ -0,0 +1,127 @@ +use std::str::FromStr; + +use casper_types::TimeDiff; +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_ADDRESS: &str = "0.0.0.0:0"; +/// Default maximum message size. +const DEFAULT_MAX_MESSAGE_SIZE: u32 = 4 * 1024 * 1024; +/// Default maximum number of connections. +const DEFAULT_MAX_CONNECTIONS: usize = 5; +/// Default maximum number of requests per second. +const DEFAULT_QPS_LIMIT: usize = 110; +// Initial time given to a connection before it expires +const DEFAULT_INITIAL_CONNECTION_LIFETIME: &str = "10 seconds"; +// Default amount of time which is given to a connection to extend it's lifetime when a valid +// [`Command::Get(GetRequest::Record)`] is sent to the node +const DEFAULT_GET_RECORD_REQUEST_TERMINATION_DELAY: &str = "0 seconds"; +// Default amount of time which is given to a connection to extend it's lifetime when a valid +// [`Command::Get(GetRequest::Information)`] is sent to the node +const DEFAULT_GET_INFORMATION_REQUEST_TERMINATION_DELAY: &str = "5 seconds"; +// Default amount of time which is given to a connection to extend it's lifetime when a valid +// [`Command::Get(GetRequest::State)`] is sent to the node +const DEFAULT_GET_STATE_REQUEST_TERMINATION_DELAY: &str = "0 seconds"; +// Default amount of time which is given to a connection to extend it's lifetime when a valid +// [`Command::Get(GetRequest::Trie)`] is sent to the node +const DEFAULT_GET_TRIE_REQUEST_TERMINATION_DELAY: &str = "0 seconds"; +// Default amount of time which is given to a connection to extend it's lifetime when a valid +// [`Command::TryAcceptTransaction`] is sent to the node +const DEFAULT_ACCEPT_TRANSACTION_REQUEST_TERMINATION_DELAY: &str = "24 seconds"; +// Default amount of time which is given to a connection to extend it's lifetime when a valid +// [`Command::TrySpeculativeExec`] is sent to the node +const DEFAULT_SPECULATIVE_EXEC_REQUEST_TERMINATION_DELAY: &str = "0 seconds"; + +/// Binary port server configuration. +#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct Config { + /// Setting to enable the BinaryPort server. + pub enable_server: bool, + /// Address to bind BinaryPort server to. + pub address: String, + /// Flag used to enable/disable the [`AllValues`] request + // In case we need "enabled" flag for more than 2 requests we should introduce generic + // "function disabled/enabled" mechanism. For now, we can stick to these two booleans. + pub allow_request_get_all_values: bool, + /// Flag used to enable/disable the [`Trie`] request + pub allow_request_get_trie: bool, + /// Flag used to enable/disable the [`TrySpeculativeExec`] request. + pub allow_request_speculative_exec: bool, + /// Maximum size of the binary port message. + pub max_message_size_bytes: u32, + /// Maximum number of connections to the server. + pub max_connections: usize, + /// Maximum number of requests per second. + pub qps_limit: usize, + // Initial time given to a connection before it expires + pub initial_connection_lifetime: TimeDiff, + // The amount of time which is given to a connection to extend it's lifetime when a valid + // [`Command::Get(GetRequest::Record)`] is sent to the node + pub get_record_request_termination_delay: TimeDiff, + // The amount of time which is given to a connection to extend it's lifetime when a valid + // [`Command::Get(GetRequest::Information)`] is sent to the node + pub get_information_request_termination_delay: TimeDiff, + // The amount of time which is given to a connection to extend it's lifetime when a valid + // [`Command::Get(GetRequest::State)`] is sent to the node + pub get_state_request_termination_delay: TimeDiff, + // The amount of time which is given to a connection to extend it's lifetime when a valid + // [`Command::Get(GetRequest::Trie)`] is sent to the node + pub get_trie_request_termination_delay: TimeDiff, + // The amount of time which is given to a connection to extend it's lifetime when a valid + // [`Command::TryAcceptTransaction`] is sent to the node + pub accept_transaction_request_termination_delay: TimeDiff, + // The amount of time which is given to a connection to extend it's lifetime when a valid + // [`Command::TrySpeculativeExec`] is sent to the node + pub speculative_exec_request_termination_delay: TimeDiff, +} + +impl Config { + /// Creates a default instance for `BinaryPort`. + pub fn new() -> Self { + Config { + enable_server: true, + address: DEFAULT_ADDRESS.to_string(), + allow_request_get_all_values: false, + allow_request_get_trie: false, + allow_request_speculative_exec: false, + max_message_size_bytes: DEFAULT_MAX_MESSAGE_SIZE, + max_connections: DEFAULT_MAX_CONNECTIONS, + qps_limit: DEFAULT_QPS_LIMIT, + initial_connection_lifetime: TimeDiff::from_str(DEFAULT_INITIAL_CONNECTION_LIFETIME) + .unwrap(), + get_record_request_termination_delay: TimeDiff::from_str( + DEFAULT_GET_RECORD_REQUEST_TERMINATION_DELAY, + ) + .unwrap(), + get_information_request_termination_delay: TimeDiff::from_str( + DEFAULT_GET_INFORMATION_REQUEST_TERMINATION_DELAY, + ) + .unwrap(), + get_state_request_termination_delay: TimeDiff::from_str( + DEFAULT_GET_STATE_REQUEST_TERMINATION_DELAY, + ) + .unwrap(), + get_trie_request_termination_delay: TimeDiff::from_str( + DEFAULT_GET_TRIE_REQUEST_TERMINATION_DELAY, + ) + .unwrap(), + accept_transaction_request_termination_delay: TimeDiff::from_str( + DEFAULT_ACCEPT_TRANSACTION_REQUEST_TERMINATION_DELAY, + ) + .unwrap(), + speculative_exec_request_termination_delay: TimeDiff::from_str( + DEFAULT_SPECULATIVE_EXEC_REQUEST_TERMINATION_DELAY, + ) + .unwrap(), + } + } +} + +impl Default for Config { + fn default() -> Self { + Config::new() + } +} diff --git a/node/src/components/binary_port/connection_terminator.rs b/node/src/components/binary_port/connection_terminator.rs new file mode 100644 index 0000000000..f7f642774e --- /dev/null +++ b/node/src/components/binary_port/connection_terminator.rs @@ -0,0 +1,257 @@ +use casper_types::{TimeDiff, Timestamp}; +use std::time::Duration; +use tokio::{select, sync::Mutex, time}; +use tokio_util::sync::CancellationToken; + +struct TerminationData { + /// Moment in time at which the termination will happen. The + /// actual termination can happen some time after this + /// timestamp within reasonable timeframe of waking up + /// threads and rusts internal polling mechanisms + terminate_at: Timestamp, + /// A cancellation token which can stop (by calling + /// `.cancel()` on it) the countdown in case an extended + /// lifetime needs to be placed + stop_countdown: CancellationToken, +} + +/// Terminator which causes a cancellation_token to get canceled if a given timeout occurs. +/// Allows to extend the timeout period by resetting the termination dealine (using `terminate_at`) +/// or with a helper function `delay_by`. Both functions won't reset the termination deadline if +/// the new termination would happen before the existing one (we only allow to extend the +/// termination period) +pub(super) struct ConnectionTerminator { + /// This token will get canceled if the timeout passes + cancellation_token: CancellationToken, + //Data steering the internal countdown + countdown_data: Mutex>, +} + +impl ConnectionTerminator { + /// Updates or sets the termination deadline. + /// There will be no update if the termination already happened. + /// Both set and update won't happen if the `in_terminate_at` is in the past. + /// Updating an already running termination countdown happens only if the incoming + /// `in_terminate_at` is > then the existing one. Returns true if the update was in effect. + /// False otherwise + pub(super) async fn terminate_at(&self, in_terminate_at: Timestamp) -> bool { + let now = Timestamp::now(); + if in_terminate_at <= now { + //Do nothing if termiantion is in the past + return false; + } + let terminate_in = Duration::from_millis(in_terminate_at.millis() - now.millis()); + let mut countdown_data_guard = self.countdown_data.lock().await; + if let Some(TerminationData { + terminate_at, + stop_countdown, + }) = countdown_data_guard.as_ref() + { + if in_terminate_at < *terminate_at { + //Don't update termination time if the proposed one is more restrictive than + // the existing one. + return false; + } else { + stop_countdown.cancel(); + } + } + if self.cancellation_token.is_cancelled() { + //Don't proceed if the outbound token was already cancelled + return false; + } + let stop_countdown = self + .spawn_termination_countdown(terminate_in, self.cancellation_token.clone()) + .await; + let data = TerminationData { + terminate_at: in_terminate_at, + stop_countdown, + }; + *countdown_data_guard = Some(data); + true + } + + /// Delays the termination by `delay_by` amount. If the terminations `terminate_at` is + /// further in the future than `now() + delay_by`, this function will have no effect + /// and will return false. Returns true otherwise. + pub(crate) async fn delay_termination(&self, delay_by: TimeDiff) -> bool { + let temrinate_at = Timestamp::now() + delay_by; + self.terminate_at(temrinate_at).await + } + + //Ctor. To start the countdown mechanism you need to call `terminate_at` + pub(super) fn new() -> Self { + let cancellation_token = CancellationToken::new(); + ConnectionTerminator { + cancellation_token, + countdown_data: Mutex::new(None), + } + } + + pub(super) fn get_cancellation_token(&self) -> CancellationToken { + self.cancellation_token.clone() + } + + // Spawns a thread that will cancel `cancellation_token` in a given `terminate_in` duration. + // This function doesn't check if the cancellation_token wasn't already cancelled - it needs to + // be checked beforehand Return a different CancellationToken which can be used to kill the + // running thread + async fn spawn_termination_countdown( + &self, + terminate_in: Duration, + cancellation_token: CancellationToken, + ) -> CancellationToken { + let cancel_countdown = CancellationToken::new(); + let cancel_countdown_to_move = cancel_countdown.clone(); + tokio::task::spawn(async move { + select! { + _ = time::sleep(terminate_in) => { + cancellation_token.cancel() + }, + _ = cancel_countdown_to_move.cancelled() => { + }, + + } + }); + cancel_countdown + } +} + +#[cfg(test)] +mod tests { + use super::ConnectionTerminator; + use casper_types::{TimeDiff, Timestamp}; + use std::time::Duration; + use tokio::{select, time::sleep}; + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn should_fail_setting_expiration_in_past() { + let terminator = ConnectionTerminator::new(); + let in_past = Timestamp::from(1); + assert!(!terminator.terminate_at(in_past).await); + + let initial_inactivity = TimeDiff::from_seconds(1); + let terminator = ConnectionTerminator::new(); + let now = Timestamp::now(); + assert!(terminator.terminate_at(now + initial_inactivity).await); + assert!(!terminator.terminate_at(now).await); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn should_fail_setting_expiration_when_already_cancelled() { + let initial_inactivity = TimeDiff::from_seconds(1); + let terminator = ConnectionTerminator::new(); + let now = Timestamp::now(); + assert!(terminator.terminate_at(now + initial_inactivity).await); + let cancellation_token = terminator.get_cancellation_token(); + select! { + _ = cancellation_token.cancelled() => { + let elapsed = now.elapsed(); + assert!(elapsed >= TimeDiff::from_seconds(1)); + assert!(elapsed <= TimeDiff::from_millis(1500)); + }, + _ = sleep(Duration::from_secs(10)) => { + unreachable!() + }, + } + + let initial_inactivity = TimeDiff::from_seconds(10); + let now = Timestamp::now(); + assert!(!terminator.terminate_at(now + initial_inactivity).await); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn should_cancel_after_enough_inactivity() { + let initial_inactivity = TimeDiff::from_seconds(1); + let terminator = ConnectionTerminator::new(); + let now = Timestamp::now(); + assert!(terminator.terminate_at(now + initial_inactivity).await); + let cancellation_token = terminator.get_cancellation_token(); + select! { + _ = cancellation_token.cancelled() => { + let elapsed = now.elapsed(); + assert!(elapsed >= TimeDiff::from_seconds(1)); + assert!(elapsed <= TimeDiff::from_millis(1500)); + }, + _ = sleep(Duration::from_secs(10)) => { + unreachable!() + }, + } + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn should_cancel_after_extended_time() { + let initial_inactivity = TimeDiff::from_seconds(1); + let terminator = ConnectionTerminator::new(); + let now = Timestamp::now(); + assert!(terminator.terminate_at(now + initial_inactivity).await); + sleep(Duration::from_millis(100)).await; + terminator + .delay_termination(TimeDiff::from_seconds(2)) + .await; + let cancellation_token = terminator.get_cancellation_token(); + select! { + _ = cancellation_token.cancelled() => { + let elapsed = now.elapsed(); + assert!(elapsed >= TimeDiff::from_seconds(2)); + assert!(elapsed <= TimeDiff::from_millis(2500)); + }, + _ = sleep(Duration::from_secs(10)) => { + unreachable!() + }, + } + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn should_cancel_after_multiple_time_extensions() { + let initial_inactivity = TimeDiff::from_seconds(1); + let terminator = ConnectionTerminator::new(); + let now = Timestamp::now(); + assert!(terminator.terminate_at(now + initial_inactivity).await); + sleep(Duration::from_millis(100)).await; + terminator + .delay_termination(TimeDiff::from_seconds(2)) + .await; + sleep(Duration::from_millis(100)).await; + terminator + .delay_termination(TimeDiff::from_seconds(3)) + .await; + let cancellation_token = terminator.get_cancellation_token(); + select! { + _ = cancellation_token.cancelled() => { + let elapsed = now.elapsed(); + assert!(elapsed >= TimeDiff::from_seconds(3)); + assert!(elapsed <= TimeDiff::from_millis(4000)); + }, + _ = sleep(Duration::from_secs(10)) => { + unreachable!() + }, + } + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + async fn should_not_shorten_termination_time() { + let initial_inactivity = TimeDiff::from_seconds(1); + let terminator = ConnectionTerminator::new(); + let now = Timestamp::now(); + assert!(terminator.terminate_at(now + initial_inactivity).await); + sleep(Duration::from_millis(100)).await; + terminator + .delay_termination(TimeDiff::from_seconds(2)) + .await; + sleep(Duration::from_millis(100)).await; + terminator + .delay_termination(TimeDiff::from_seconds(1)) + .await; + let cancellation_token = terminator.get_cancellation_token(); + select! { + _ = cancellation_token.cancelled() => { + let elapsed = now.elapsed(); + assert!(elapsed >= TimeDiff::from_seconds(2)); + assert!(elapsed <= TimeDiff::from_millis(2500)); + }, + _ = sleep(Duration::from_secs(10)) => { + unreachable!() + }, + } + } +} diff --git a/node/src/components/binary_port/error.rs b/node/src/components/binary_port/error.rs new file mode 100644 index 0000000000..46050a4eed --- /dev/null +++ b/node/src/components/binary_port/error.rs @@ -0,0 +1,12 @@ +use casper_types::bytesrepr; +use thiserror::Error; + +#[derive(Debug, Error)] +pub(crate) enum Error { + #[error(transparent)] + BytesRepr(#[from] bytesrepr::Error), + #[error("received request without payload")] + NoPayload, + #[error(transparent)] + BinaryPort(#[from] casper_binary_port::Error), +} diff --git a/node/src/components/binary_port/event.rs b/node/src/components/binary_port/event.rs new file mode 100644 index 0000000000..429944dd4d --- /dev/null +++ b/node/src/components/binary_port/event.rs @@ -0,0 +1,53 @@ +use std::{ + fmt::{Display, Formatter}, + net::SocketAddr, +}; + +use casper_binary_port::{BinaryResponse, Command, GetRequest}; +use tokio::net::TcpStream; + +use crate::effect::Responder; + +#[derive(Debug)] +pub(crate) enum Event { + Initialize, + AcceptConnection { + stream: TcpStream, + peer: SocketAddr, + responder: Responder<()>, + }, + HandleRequest { + request: Command, + responder: Responder, + }, +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Event::Initialize => write!(f, "initialize"), + Event::AcceptConnection { peer, .. } => write!(f, "accept connection from {}", peer), + Event::HandleRequest { request, .. } => match request { + Command::Get(request) => match request { + GetRequest::Record { + record_type_tag, + key, + } => { + write!(f, "get record with tag {} ({})", record_type_tag, key.len()) + } + GetRequest::Information { info_type_tag, key } => { + write!(f, "get info with tag {} ({})", info_type_tag, key.len()) + } + GetRequest::State(state_request) => state_request.as_ref().fmt(f), + GetRequest::Trie { trie_key } => write!(f, "get trie ({})", trie_key), + }, + Command::TryAcceptTransaction { transaction, .. } => { + write!(f, "try accept transaction ({})", transaction.hash()) + } + Command::TrySpeculativeExec { transaction, .. } => { + write!(f, "try speculative exec ({})", transaction.hash()) + } + }, + } + } +} diff --git a/node/src/components/binary_port/metrics.rs b/node/src/components/binary_port/metrics.rs new file mode 100644 index 0000000000..0d4ff254cb --- /dev/null +++ b/node/src/components/binary_port/metrics.rs @@ -0,0 +1,121 @@ +use prometheus::{IntCounter, Registry}; + +use crate::unregister_metric; + +const BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_NAME: &str = + "binary_port_try_accept_transaction_count"; +const BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_HELP: &str = + "number of TryAcceptTransaction queries received"; + +const BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_NAME: &str = "binary_port_try_speculative_exec_count"; +const BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_HELP: &str = + "number of TrySpeculativeExec queries received"; + +const BINARY_PORT_GET_RECORD_COUNT_NAME: &str = "binary_port_get_record_count"; +const BINARY_PORT_GET_RECORD_COUNT_HELP: &str = "number of received Get queries for records"; + +const BINARY_PORT_GET_INFORMATION_NAME: &str = "binary_port_get_info_count"; +const BINARY_PORT_GET_INFORMATION_HELP: &str = + "number of received Get queries for information from the node"; + +const BINARY_PORT_GET_STATE_COUNT_NAME: &str = "binary_port_get_state_count"; +const BINARY_PORT_GET_STATE_COUNT_HELP: &str = + "number of Get queries received for the global state"; + +const BINARY_PORT_CONNECTIONS_COUNT_NAME: &str = "binary_port_connections_count"; +const BINARY_PORT_CONNECTIONS_COUNT_HELP: &str = + "total number of external connections established to binary port"; + +const BINARY_PORT_TRIE_COUNT_NAME: &str = "binary_port_get_trie_count"; +const BINARY_PORT_TRIE_COUNT_HELP: &str = "number of Get queries received for the trie state"; + +/// Metrics. +#[derive(Debug)] +pub(crate) struct Metrics { + /// Number of `TryAcceptTransaction` queries received. + pub(super) binary_port_try_accept_transaction_count: IntCounter, + /// Number of `TrySpeculativeExec` queries received. + pub(super) binary_port_try_speculative_exec_count: IntCounter, + /// Number of `Get::Record` queries received. + pub(super) binary_port_get_record_count: IntCounter, + /// Number of `Get::Information` queries received. + pub(super) binary_port_get_info_count: IntCounter, + /// Number of `Get::State` queries received. + pub(super) binary_port_get_state_count: IntCounter, + /// Number of distinct connections to binary port. + pub(super) binary_port_connections_count: IntCounter, + /// Number of `Get::Trie` queries received. + pub(super) binary_port_get_trie_count: IntCounter, + + registry: Registry, +} + +impl Metrics { + /// Creates a new instance of the metrics. + pub fn new(registry: &Registry) -> Result { + let binary_port_try_accept_transaction_count = IntCounter::new( + BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_NAME.to_string(), + BINARY_PORT_TRY_ACCEPT_TRANSACTION_COUNT_HELP.to_string(), + )?; + + let binary_port_try_speculative_exec_count = IntCounter::new( + BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_NAME.to_string(), + BINARY_PORT_TRY_SPECULATIVE_EXEC_COUNT_HELP.to_string(), + )?; + + let binary_port_get_record_count = IntCounter::new( + BINARY_PORT_GET_RECORD_COUNT_NAME.to_string(), + BINARY_PORT_GET_RECORD_COUNT_HELP.to_string(), + )?; + + let binary_port_get_info_count = IntCounter::new( + BINARY_PORT_GET_INFORMATION_NAME.to_string(), + BINARY_PORT_GET_INFORMATION_HELP.to_string(), + )?; + + let binary_port_get_state_count = IntCounter::new( + BINARY_PORT_GET_STATE_COUNT_NAME.to_string(), + BINARY_PORT_GET_STATE_COUNT_HELP.to_string(), + )?; + + let binary_port_connections_count = IntCounter::new( + BINARY_PORT_CONNECTIONS_COUNT_NAME.to_string(), + BINARY_PORT_CONNECTIONS_COUNT_HELP.to_string(), + )?; + + let binary_port_get_trie_count = IntCounter::new( + BINARY_PORT_TRIE_COUNT_NAME.to_string(), + BINARY_PORT_TRIE_COUNT_HELP.to_string(), + )?; + + registry.register(Box::new(binary_port_try_accept_transaction_count.clone()))?; + registry.register(Box::new(binary_port_try_speculative_exec_count.clone()))?; + registry.register(Box::new(binary_port_get_record_count.clone()))?; + registry.register(Box::new(binary_port_get_info_count.clone()))?; + registry.register(Box::new(binary_port_get_state_count.clone()))?; + registry.register(Box::new(binary_port_connections_count.clone()))?; + registry.register(Box::new(binary_port_get_trie_count.clone()))?; + + Ok(Metrics { + binary_port_try_accept_transaction_count, + binary_port_try_speculative_exec_count, + binary_port_get_record_count, + binary_port_get_info_count, + binary_port_get_state_count, + binary_port_connections_count, + binary_port_get_trie_count, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.binary_port_try_accept_transaction_count); + unregister_metric!(self.registry, self.binary_port_try_speculative_exec_count); + unregister_metric!(self.registry, self.binary_port_get_record_count); + unregister_metric!(self.registry, self.binary_port_get_info_count); + unregister_metric!(self.registry, self.binary_port_get_state_count); + unregister_metric!(self.registry, self.binary_port_connections_count); + } +} diff --git a/node/src/components/binary_port/rate_limiter.rs b/node/src/components/binary_port/rate_limiter.rs new file mode 100644 index 0000000000..c574566266 --- /dev/null +++ b/node/src/components/binary_port/rate_limiter.rs @@ -0,0 +1,405 @@ +use casper_types::{TimeDiff, Timestamp}; +use thiserror::Error as ThisError; + +#[derive(Debug, ThisError)] +pub(crate) enum RateLimiterError { + #[error("Cannot create Rate limiter with 0 max_requests")] + EmptyWindowNotAllowed, + #[error("Maximum window duration is too large")] + WindowDurationTooLarge, + #[error("Maximum window duration is too small")] + WindowDurationTooSmall, +} + +const MAX_WINDOW_DURATION_MS: u64 = 1000 * 60 * 60; // 1 hour + +#[derive(PartialEq, Eq, Debug)] +/// Response from the rate limiter. +pub(crate) enum LimiterResponse { + /// when limiter allowed the request + Allowed, + /// when limiter throttled the request + Throttled, +} + +/// A buffer to store timestamps of requests. The assumption is that the buffer will keep the +/// monotonical order of timestamps as they are pushed. +#[derive(Debug)] +struct Buffer { + buffer: Vec, + in_index: usize, + out_index: usize, + capacity: usize, +} + +impl Buffer { + fn new(size: usize) -> Self { + Buffer { + buffer: vec![0; size + 1], + in_index: 0, + out_index: 0, + capacity: size + 1, + } + } + + fn is_full(&self) -> bool { + self.in_index == (self.out_index + self.capacity - 1) % self.capacity + } + + fn is_empty(&self) -> bool { + self.in_index == self.out_index + } + + //This should only be used from `push` + fn push_and_slide(&mut self, value: u64) -> bool { + let out_index = self.out_index as i32; + let capacity = self.capacity as i32; + let mut to_index = self.in_index as i32; + let mut from_index = (self.in_index as i32 + capacity - 1) % capacity; + + while to_index != out_index && self.buffer[from_index as usize] > value { + self.buffer[to_index as usize] = self.buffer[from_index as usize]; + to_index = (to_index + capacity - 1) % capacity; + from_index = (from_index + capacity - 1) % capacity; + } + self.buffer[to_index as usize] = value; + self.in_index = (self.in_index + 1) % self.capacity; + true + } + + fn push(&mut self, value: u64) -> bool { + if self.is_full() { + return false; + } + if !self.is_empty() { + let last_stored_index = (self.in_index + self.capacity - 1) % self.capacity; + let last_stored = self.buffer[last_stored_index]; + // We are expecting values to be monotonically increasing. But there is a scenario in + // which the system time might be changed to a previous time. + // We handle that by wiggling it inside the buffer + if last_stored > value { + return self.push_and_slide(value); + } + } + self.buffer[self.in_index] = value; + self.in_index = (self.in_index + 1) % self.capacity; + true + } + + fn prune_lt(&mut self, value: u64) -> usize { + if self.is_empty() { + return 0; + } + let mut number_of_pruned = 0; + while self.in_index != self.out_index { + if self.buffer[self.out_index] >= value { + break; + } + self.out_index = (self.out_index + 1) % self.capacity; + number_of_pruned += 1; + } + number_of_pruned + } + + #[cfg(test)] + fn to_vec(&self) -> Vec { + let mut vec = Vec::new(); + let mut local_out = self.out_index; + while self.in_index != local_out { + vec.push(self.buffer[local_out]); + local_out = (local_out + 1) % self.capacity; + } + vec + } +} + +#[derive(Debug)] +pub(crate) struct RateLimiter { + /// window duration. + window_ms: u64, + /// Log of unix epoch time in ms when requests were made. + buffer: Buffer, +} + +impl RateLimiter { + //ctor + pub(crate) fn new( + max_requests: usize, + window_duration: TimeDiff, + ) -> Result { + if max_requests == 0 { + // We consider 0-max_requests as a misconfiguration + return Err(RateLimiterError::EmptyWindowNotAllowed); + } + let window_duration_in_ms = window_duration.millis(); + if window_duration_in_ms >= MAX_WINDOW_DURATION_MS { + return Err(RateLimiterError::WindowDurationTooLarge); + } + let window_duration_in_ms = window_duration.millis(); + if window_duration_in_ms == 0 { + return Err(RateLimiterError::WindowDurationTooSmall); + } + Ok(RateLimiter { + window_ms: window_duration_in_ms, + buffer: Buffer::new(max_requests), + }) + } + + pub(crate) fn throttle(&mut self) -> LimiterResponse { + self.internal_throttle(Timestamp::now().millis()) + } + + fn internal_throttle(&mut self, now: u64) -> LimiterResponse { + let is_full = self.buffer.is_full(); + if !is_full { + self.buffer.push(now); + return LimiterResponse::Allowed; + } else { + //The following subtraction could theoretically not fit in unsigned, but in real-life + // cases we limit the window duration to 1 hour (it's checked in ctor). So unless + // someone calls it from the perspective of 1970, it should be fine. + let no_of_pruned = self.buffer.prune_lt(now - self.window_ms); + if no_of_pruned == 0 { + //No pruning was done, so we are still at max_requests + return LimiterResponse::Throttled; + } + } + self.buffer.push(now); + LimiterResponse::Allowed + } +} + +#[cfg(test)] +mod tests { + use casper_types::TimeDiff; + + use super::*; + + #[test] + fn sliding_window_should_validate_ctor_inputs() { + assert!(RateLimiter::new(0, TimeDiff::from_millis(1000)).is_err()); + assert!(RateLimiter::new(10, TimeDiff::from_millis(MAX_WINDOW_DURATION_MS + 1)).is_err()); + assert!(RateLimiter::new(10, TimeDiff::from_millis(0)).is_err()); + } + + #[test] + fn sliding_window_throttle_should_limit_requests() { + let mut rate_limiter = rate_limiter(); + let t_1 = 10000_u64; + let t_2 = 10002_u64; + let t_3 = 10003_u64; + + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_2), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_3), + LimiterResponse::Throttled + ); + } + + #[test] + fn sliding_window_throttle_should_not_count_throttled_requests() { + let mut rate_limiter = rate_limiter(); + let t_1 = 1_u64; + let t_2 = 500_u64; + let t_3 = 1000_u64; + let t_4 = 1400_u64; + + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_2), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_3), + LimiterResponse::Throttled + ); + assert_eq!( + rate_limiter.internal_throttle(t_4), + LimiterResponse::Allowed + ); + } + + #[test] + fn sliding_window_throttle_should_limit_requests_on_burst() { + let mut rate_limiter = rate_limiter(); + let t_1 = 10000; + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Throttled + ); + } + + #[test] + fn sliding_window_should_slide_away_from_old_checks() { + let mut rate_limiter = rate_limiter(); + let t_1 = 10000_u64; + let t_2 = 10002_u64; + let t_3 = 11002_u64; + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_2), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_3), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_3), + LimiterResponse::Throttled + ); + } + + #[test] + fn sliding_window_should_take_past_timestamp() { + let mut rate_limiter = rate_limiter(); + let t_1 = 10000_u64; + let t_2 = 9999_u64; + let t_3 = 10001_u64; + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_2), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_3), + LimiterResponse::Throttled + ); + } + + #[test] + fn sliding_window_should_anneal_timestamp_from_past_() { + let mut rate_limiter = rate_limiter(); + let t_1 = 10000_u64; + let t_2 = 9999_u64; + let t_3 = 12001_u64; + let t_4 = 12002_u64; + assert_eq!( + rate_limiter.internal_throttle(t_1), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_2), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_3), + LimiterResponse::Allowed + ); + assert_eq!( + rate_limiter.internal_throttle(t_4), + LimiterResponse::Allowed + ); + } + + #[test] + fn buffer_should_saturate_with_values() { + let mut buffer = Buffer::new(3); + assert!(buffer.push(1)); + assert!(buffer.push(2)); + assert!(buffer.push(3)); + assert!(!buffer.push(4)); + assert_eq!(buffer.to_vec(), vec![1_u64, 2_u64, 3_u64]); + } + + #[test] + fn buffer_should_prune() { + let mut buffer = Buffer::new(3); + assert!(buffer.push(1)); + assert!(buffer.push(2)); + assert!(buffer.push(3)); + assert_eq!(buffer.prune_lt(3), 2); + assert!(buffer.push(4)); + assert_eq!(buffer.to_vec(), vec![3_u64, 4_u64]); + assert_eq!(buffer.prune_lt(5), 2); + + assert!(buffer.push(1)); + assert!(buffer.push(2)); + assert!(buffer.push(3)); + assert_eq!(buffer.prune_lt(5), 3); + assert!(buffer.to_vec().is_empty()); + + assert!(buffer.push(5)); + assert!(buffer.push(6)); + assert!(buffer.push(7)); + assert_eq!(buffer.to_vec(), vec![5, 6, 7]); + } + + #[test] + fn push_and_slide_should_keep_order() { + let mut buffer = Buffer::new(5); + assert!(buffer.push(1)); + assert!(buffer.push(2)); + assert!(buffer.push(7)); + assert!(buffer.push(6)); + assert_eq!(buffer.to_vec(), vec![1, 2, 6, 7]); + assert_eq!(buffer.prune_lt(7), 3); + assert_eq!(buffer.to_vec(), vec![7]); + + let mut buffer = Buffer::new(4); + assert!(buffer.push(2)); + assert!(buffer.push(8)); + assert!(buffer.push(5)); + assert!(buffer.push(1)); + assert_eq!(buffer.to_vec(), vec![1, 2, 5, 8]); + assert_eq!(buffer.prune_lt(5), 2); + assert_eq!(buffer.to_vec(), vec![5, 8]); + + let mut buffer = Buffer::new(4); + assert!(buffer.push(2)); + assert!(buffer.push(8)); + assert!(buffer.push(2)); + assert!(buffer.push(1)); + assert_eq!(buffer.to_vec(), vec![1, 2, 2, 8]); + + let mut buffer = Buffer::new(4); + assert!(buffer.push(2)); + assert!(buffer.push(8)); + assert!(buffer.push(3)); + assert!(buffer.push(1)); + assert_eq!(buffer.prune_lt(2), 1); + assert!(buffer.push(0)); + assert_eq!(buffer.to_vec(), vec![0, 2, 3, 8]); + + let mut buffer = Buffer::new(4); + assert!(buffer.push(8)); + assert!(buffer.push(7)); + assert!(buffer.push(6)); + assert!(buffer.push(5)); + assert_eq!(buffer.prune_lt(7), 2); + assert!(buffer.push(9)); + assert!(buffer.push(10)); + assert_eq!(buffer.prune_lt(9), 2); + assert!(buffer.push(11)); + assert!(buffer.push(1)); + assert_eq!(buffer.to_vec(), vec![1, 9, 10, 11]); + } + + fn rate_limiter() -> RateLimiter { + RateLimiter::new(2, TimeDiff::from_millis(1000)).unwrap() + } +} diff --git a/node/src/components/binary_port/tests.rs b/node/src/components/binary_port/tests.rs new file mode 100644 index 0000000000..cda56ff013 --- /dev/null +++ b/node/src/components/binary_port/tests.rs @@ -0,0 +1,456 @@ +use std::fmt::{self, Display, Formatter}; + +use derive_more::From; +use either::Either; +use rand::Rng; +use serde::Serialize; + +use casper_binary_port::{ + BinaryResponse, Command, GetRequest, GlobalStateEntityQualifier, GlobalStateRequest, RecordId, +}; + +use casper_types::{ + BlockHeader, Digest, GlobalStateIdentifier, KeyTag, PublicKey, Timestamp, Transaction, + TransactionV1, +}; + +use crate::{ + components::binary_port::event::Event as BinaryPortEvent, + effect::{ + announcements::ControlAnnouncement, + requests::{ + AcceptTransactionRequest, BlockSynchronizerRequest, ChainspecRawBytesRequest, + ConsensusRequest, ContractRuntimeRequest, NetworkInfoRequest, ReactorInfoRequest, + StorageRequest, UpgradeWatcherRequest, + }, + }, + reactor::ReactorEvent, +}; +use std::{sync::Arc, time::Duration}; + +use futures::channel::oneshot::{self, Receiver}; +use prometheus::Registry; +use thiserror::Error as ThisError; + +use casper_binary_port::ErrorCode; +use casper_types::{testing::TestRng, Chainspec, ChainspecRawBytes}; + +use crate::{ + components::{ + binary_port::config::Config as BinaryPortConfig, network::Identity as NetworkIdentity, + Component, InitializedComponent, + }, + effect::{EffectBuilder, EffectExt, Effects, Responder}, + reactor::{self, EventQueueHandle, QueueKind, Reactor, Runner}, + testing::{network::NetworkedReactor, ConditionCheckReactor}, + types::NodeRng, + utils::Loadable, +}; + +use super::{BinaryPort, Metrics as BinaryPortMetrics}; + +const ENABLED: bool = true; +const DISABLED: bool = false; + +struct TestCase { + allow_request_get_all_values: bool, + allow_request_get_trie: bool, + allow_request_speculative_exec: bool, + request_generator: Either Command, Command>, +} + +#[tokio::test] +async fn should_enqueue_requests_for_enabled_functions() { + let mut rng = TestRng::new(); + + let get_all_values_enabled = TestCase { + allow_request_get_all_values: ENABLED, + allow_request_get_trie: rng.gen(), + allow_request_speculative_exec: rng.gen(), + request_generator: Either::Left(|_| all_values_request()), + }; + + let get_trie_enabled = TestCase { + allow_request_get_all_values: rng.gen(), + allow_request_get_trie: ENABLED, + allow_request_speculative_exec: rng.gen(), + request_generator: Either::Left(|_| trie_request()), + }; + + let try_speculative_exec_enabled = TestCase { + allow_request_get_all_values: rng.gen(), + allow_request_get_trie: rng.gen(), + allow_request_speculative_exec: ENABLED, + request_generator: Either::Left(try_speculative_exec_request), + }; + + for test_case in [ + get_all_values_enabled, + get_trie_enabled, + try_speculative_exec_enabled, + ] { + let (_, mut runner) = run_test_case(test_case, &mut rng).await; + + runner + .crank_until( + &mut rng, + got_contract_runtime_request, + Duration::from_secs(10), + ) + .await; + } +} + +#[tokio::test] +async fn should_return_error_for_disabled_functions() { + let mut rng = TestRng::new(); + + const EXPECTED_ERROR_CODE: ErrorCode = ErrorCode::FunctionDisabled; + + let get_all_values_disabled = TestCase { + allow_request_get_all_values: DISABLED, + allow_request_get_trie: rng.gen(), + allow_request_speculative_exec: rng.gen(), + request_generator: Either::Left(|_| all_values_request()), + }; + + let get_trie_disabled = TestCase { + allow_request_get_all_values: rng.gen(), + allow_request_get_trie: DISABLED, + allow_request_speculative_exec: rng.gen(), + request_generator: Either::Left(|_| trie_request()), + }; + + let try_speculative_exec_disabled = TestCase { + allow_request_get_all_values: rng.gen(), + allow_request_get_trie: rng.gen(), + allow_request_speculative_exec: DISABLED, + request_generator: Either::Left(try_speculative_exec_request), + }; + + for test_case in [ + get_all_values_disabled, + get_trie_disabled, + try_speculative_exec_disabled, + ] { + let (receiver, mut runner) = run_test_case(test_case, &mut rng).await; + + let result = tokio::select! { + result = receiver => result.expect("expected successful response"), + _ = runner.crank_until( + &mut rng, + got_contract_runtime_request, + Duration::from_secs(10), + ) => { + panic!("expected receiver to complete first") + } + }; + assert_eq!(result.error_code(), EXPECTED_ERROR_CODE as u16) + } +} + +#[tokio::test] +async fn should_return_empty_response_when_fetching_empty_key() { + let mut rng = TestRng::new(); + + let test_cases: Vec = record_requests_with_empty_keys() + .into_iter() + .map(|request| TestCase { + allow_request_get_all_values: DISABLED, + allow_request_get_trie: DISABLED, + allow_request_speculative_exec: DISABLED, + request_generator: Either::Right(request), + }) + .collect(); + + for test_case in test_cases { + let (receiver, mut runner) = run_test_case(test_case, &mut rng).await; + + let result = tokio::select! { + result = receiver => result.expect("expected successful response"), + _ = runner.crank_until( + &mut rng, + got_contract_runtime_request, + Duration::from_secs(10), + ) => { + panic!("expected receiver to complete first") + } + }; + assert_eq!(result.error_code(), 0); + assert!(result.payload().is_empty()); + } +} + +async fn run_test_case( + TestCase { + allow_request_get_all_values, + allow_request_get_trie, + allow_request_speculative_exec, + request_generator, + }: TestCase, + rng: &mut TestRng, +) -> ( + Receiver, + Runner>, +) { + let config = BinaryPortConfig { + enable_server: true, + allow_request_get_all_values, + allow_request_get_trie, + allow_request_speculative_exec, + max_message_size_bytes: 1024, + max_connections: 2, + ..Default::default() + }; + + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let mut runner: Runner> = Runner::new( + config.clone(), + Arc::new(chainspec), + Arc::new(chainspec_raw_bytes), + rng, + ) + .await + .unwrap(); + + // Initialize component. + runner + .process_injected_effects(|effect_builder| { + effect_builder + .into_inner() + .schedule(BinaryPortEvent::Initialize, QueueKind::Api) + .ignore() + }) + .await; + + let (sender, receiver) = oneshot::channel(); + let request = match request_generator { + Either::Left(f) => f(rng), + Either::Right(v) => v, + }; + let event = BinaryPortEvent::HandleRequest { + request, + responder: Responder::without_shutdown(sender), + }; + + runner + .process_injected_effects(|effect_builder| { + effect_builder + .into_inner() + .schedule(event, QueueKind::Api) + .ignore() + }) + .await; + + (receiver, runner) +} + +struct MockReactor { + binary_port: BinaryPort, +} + +impl NetworkedReactor for MockReactor {} + +impl Reactor for MockReactor { + type Event = Event; + type Config = BinaryPortConfig; + type Error = ReactorError; + + fn new( + config: Self::Config, + chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, + registry: &Registry, + _event_queue: EventQueueHandle, + _rng: &mut NodeRng, + ) -> Result<(Self, Effects), Self::Error> { + let binary_port_metrics = BinaryPortMetrics::new(registry).unwrap(); + let mut binary_port = BinaryPort::new(config, chainspec, binary_port_metrics); + >::start_initialization(&mut binary_port); + + let reactor = MockReactor { binary_port }; + + let effects = Effects::new(); + + Ok((reactor, effects)) + } + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Event, + ) -> Effects { + match event { + Event::BinaryPort(event) => reactor::wrap_effects( + Event::BinaryPort, + self.binary_port.handle_event(effect_builder, rng, event), + ), + Event::ControlAnnouncement(_) => panic!("unexpected control announcement"), + Event::ContractRuntimeRequest(_) | Event::ReactorInfoRequest(_) => { + // We're only interested if the binary port actually created a request to Contract + // Runtime component, but we're not interested in the result. + Effects::new() + } + Event::AcceptTransactionRequest(req) => req.responder.respond(Ok(())).ignore(), + Event::StorageRequest(StorageRequest::GetHighestCompleteBlockHeader { responder }) => { + let proposer = PublicKey::random(rng); + let block_header_v2 = casper_types::BlockHeaderV2::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Timestamp::now(), + Default::default(), + Default::default(), + Default::default(), + proposer, + Default::default(), + Default::default(), + Default::default(), + ); + responder + .respond(Some(BlockHeader::V2(block_header_v2))) + .ignore() + } + Event::StorageRequest(req) => panic!("unexpected storage req {}", req), + } + } +} + +/// Error type returned by the test reactor. +#[derive(Debug, ThisError)] +enum ReactorError { + #[error("prometheus (metrics) error: {0}")] + Metrics(#[from] prometheus::Error), +} + +/// Top-level event for the test reactors. +#[derive(Debug, From, Serialize)] +#[must_use] +enum Event { + #[from] + BinaryPort(#[serde(skip_serializing)] BinaryPortEvent), + #[from] + ControlAnnouncement(ControlAnnouncement), + #[from] + ContractRuntimeRequest(ContractRuntimeRequest), + #[from] + ReactorInfoRequest(ReactorInfoRequest), + #[from] + AcceptTransactionRequest(AcceptTransactionRequest), + StorageRequest(StorageRequest), +} + +impl From for Event { + fn from(_request: ChainspecRawBytesRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(_request: UpgradeWatcherRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(_request: BlockSynchronizerRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(_request: ConsensusRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(_request: NetworkInfoRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(request: StorageRequest) -> Self { + Event::StorageRequest(request) + } +} + +impl Display for Event { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann), + Event::BinaryPort(request) => write!(formatter, "binary port request: {:?}", request), + Event::ContractRuntimeRequest(request) => { + write!(formatter, "contract runtime request: {:?}", request) + } + Event::ReactorInfoRequest(request) => { + write!(formatter, "reactor info request: {:?}", request) + } + Event::AcceptTransactionRequest(request) => { + write!(formatter, "accept transaction request: {:?}", request) + } + Event::StorageRequest(request) => { + write!(formatter, "storage request: {:?}", request) + } + } + } +} + +impl ReactorEvent for Event { + fn is_control(&self) -> bool { + matches!(self, Event::ControlAnnouncement(_)) + } + + fn try_into_control(self) -> Option { + if let Self::ControlAnnouncement(ctrl_ann) = self { + Some(ctrl_ann) + } else { + None + } + } +} + +fn all_values_request() -> Command { + let state_identifier = GlobalStateIdentifier::StateRootHash(Digest::hash([1u8; 32])); + Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(state_identifier), + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::Account, + }, + )))) +} + +#[cfg(test)] +fn record_requests_with_empty_keys() -> Vec { + let mut data = Vec::new(); + for record_id in RecordId::all() { + data.push(Command::Get(GetRequest::Record { + record_type_tag: record_id.into(), + key: vec![], + })) + } + data +} + +fn trie_request() -> Command { + Command::Get(GetRequest::Trie { + trie_key: Digest::hash([1u8; 32]), + }) +} + +fn try_speculative_exec_request(rng: &mut TestRng) -> Command { + Command::TrySpeculativeExec { + transaction: Transaction::V1(TransactionV1::random(rng)), + } +} + +fn got_contract_runtime_request(event: &Event) -> bool { + matches!(event, Event::ContractRuntimeRequest(_)) +} diff --git a/node/src/components/block_accumulator.rs b/node/src/components/block_accumulator.rs new file mode 100644 index 0000000000..e655f6ec97 --- /dev/null +++ b/node/src/components/block_accumulator.rs @@ -0,0 +1,889 @@ +mod block_acceptor; +mod config; +mod error; +mod event; +mod leap_instruction; +mod local_tip_identifier; +mod metrics; +mod sync_identifier; +mod sync_instruction; +#[cfg(test)] +mod tests; + +use std::{ + collections::{btree_map, BTreeMap, VecDeque}, + convert::TryInto, + sync::Arc, +}; + +use datasize::DataSize; +use futures::FutureExt; +use itertools::Itertools; +use prometheus::Registry; +use tracing::{debug, error, info, warn}; + +use casper_types::{ + ActivationPoint, Block, BlockHash, BlockSignaturesV2, EraId, FinalitySignatureV2, TimeDiff, + Timestamp, +}; + +use crate::{ + components::{ + block_accumulator::{ + block_acceptor::{BlockAcceptor, ShouldStore}, + leap_instruction::LeapInstruction, + local_tip_identifier::LocalTipIdentifier, + metrics::Metrics, + }, + network::blocklist::BlocklistJustification, + Component, ValidatorBoundComponent, + }, + effect::{ + announcements::{ + BlockAccumulatorAnnouncement, FatalAnnouncement, MetaBlockAnnouncement, + PeerBehaviorAnnouncement, + }, + requests::{BlockAccumulatorRequest, MarkBlockCompletedRequest, StorageRequest}, + EffectBuilder, EffectExt, Effects, + }, + fatal, + types::{ForwardMetaBlock, MetaBlock, MetaBlockState, NodeId, ValidatorMatrix}, + NodeRng, +}; + +pub(crate) use config::Config; +pub(crate) use error::Error; +pub(crate) use event::Event; +pub(crate) use sync_identifier::SyncIdentifier; +pub(crate) use sync_instruction::SyncInstruction; + +const COMPONENT_NAME: &str = "block_accumulator"; + +/// If a peer "informs" us about more than the expected number of new blocks times this factor, +/// they are probably spamming, and we refuse to create new block acceptors for them. +const PEER_RATE_LIMIT_MULTIPLIER: usize = 2; + +/// A cache of pending blocks and finality signatures that are gossiped to this node. +/// +/// Announces new blocks and finality signatures once they become valid. +#[derive(DataSize, Debug)] +pub(crate) struct BlockAccumulator { + /// This component requires the era validator weights for every era + /// it receives blocks and / or finality signatures for to verify that + /// the received signatures are legitimate to the era and to calculate + /// sufficient finality from collected finality signatures. + validator_matrix: ValidatorMatrix, + /// Each block_acceptor instance is responsible for combining + /// potential blocks and their finality signatures. When we have + /// collected sufficient finality weight's worth of signatures + /// for a potential block, we accept the block and store it. + block_acceptors: BTreeMap, + /// Key is the parent block hash, value is the child block hash. + /// Used to determine if we have awareness of the next block to be + /// sync'd or executed. + block_children: BTreeMap, + /// The height of the subjective local tip of the chain. This is used to + /// keep track of whether blocks received from the network are relevant or not, + /// and to determine if this node is close enough to the perceived tip of the + /// network to transition to executing block for itself. + local_tip: Option, + /// Chainspec activation point. + activation_point: Option, + /// Configured setting for how close to perceived tip local tip must be for + /// this node to attempt block execution for itself. + attempt_execution_threshold: u64, + /// Configured setting for tolerating a lack of newly received block + /// and / or finality signature data. If we last saw progress longer + /// ago than this interval, we will poll the network to determine + /// if we are caught up or have become isolated. + dead_air_interval: TimeDiff, + /// Configured setting for how often to purge dead state. + purge_interval: TimeDiff, + /// Configured setting for how many eras are considered to be recent. + recent_era_interval: u64, + /// Tracks activity and assists with perceived tip determination. + last_progress: Timestamp, + /// For each peer, a list of block hashes we first heard from them, and the timestamp when we + /// created the block acceptor, from oldest to newest. + peer_block_timestamps: BTreeMap>, + /// The minimum time between a block and its child. + min_block_time: TimeDiff, + /// The number of validator slots. + validator_slots: u32, + /// Metrics. + #[data_size(skip)] + metrics: Metrics, +} + +impl BlockAccumulator { + pub(crate) fn new( + config: Config, + validator_matrix: ValidatorMatrix, + recent_era_interval: u64, + min_block_time: TimeDiff, + validator_slots: u32, + registry: &Registry, + ) -> Result { + Ok(Self { + validator_matrix, + attempt_execution_threshold: config.attempt_execution_threshold, + dead_air_interval: config.dead_air_interval, + block_acceptors: Default::default(), + block_children: Default::default(), + last_progress: Timestamp::now(), + purge_interval: config.purge_interval, + local_tip: None, + activation_point: None, + recent_era_interval, + peer_block_timestamps: Default::default(), + min_block_time, + validator_slots, + metrics: Metrics::new(registry)?, + }) + } + + pub(crate) fn sync_instruction(&mut self, sync_identifier: SyncIdentifier) -> SyncInstruction { + let block_hash = sync_identifier.block_hash(); + let leap_instruction = self.leap_instruction(&sync_identifier); + debug!(?leap_instruction, "BlockAccumulator"); + if let Some((block_height, era_id)) = sync_identifier.block_height_and_era() { + self.register_local_tip(block_height, era_id); + } + if leap_instruction.should_leap() { + return SyncInstruction::Leap { block_hash }; + } + match sync_identifier.block_hash_to_sync(self.next_syncable_block_hash(block_hash)) { + Some(block_hash_to_sync) => { + self.reset_last_progress(); + SyncInstruction::BlockSync { + block_hash: block_hash_to_sync, + } + } + None => { + if self.is_stale() { + debug!(%block_hash, "BlockAccumulator: when not in Validate leap because stale gossip"); + SyncInstruction::LeapIntervalElapsed { block_hash } + } else { + SyncInstruction::CaughtUp { block_hash } + } + } + } + } + + /// Register activation point from next protocol version chainspec, if any. + pub(crate) fn register_activation_point( + &mut self, + maybe_activation_point: Option, + ) { + self.activation_point = maybe_activation_point; + } + + /// Drops all old block acceptors and tracks new local block height; + /// subsequent attempts to register a block lower than tip will be rejected. + fn register_local_tip(&mut self, height: u64, era_id: EraId) { + let new_local_tip = match self.local_tip { + Some(current) => current.height < height && current.era_id <= era_id, + None => true, + }; + if new_local_tip { + self.purge(); + self.local_tip = Some(LocalTipIdentifier::new(height, era_id)); + self.reset_last_progress(); + info!(local_tip=?self.local_tip, "new local tip detected"); + } + } + + /// Registers a peer with an existing acceptor, or creates a new one. + /// + /// If the era is outdated or the peer has already caused us to create more acceptors than + /// expected, no new acceptor will be created. + fn upsert_acceptor( + &mut self, + block_hash: BlockHash, + maybe_era_id: Option, + maybe_sender: Option, + ) { + // If the acceptor already exists, just register the peer, if applicable. + let entry = match self.block_acceptors.entry(block_hash) { + btree_map::Entry::Occupied(entry) => { + if let Some(sender) = maybe_sender { + entry.into_mut().register_peer(sender); + } + return; + } + btree_map::Entry::Vacant(entry) => entry, + }; + + // The acceptor doesn't exist. Don't create it if the item's era is not + // provided or the item's era is older than the local tip era by more + // than `recent_era_interval`. + match (maybe_era_id, self.local_tip) { + (Some(era_id), Some(local_tip)) + if era_id >= local_tip.era_id.saturating_sub(self.recent_era_interval) => {} + (Some(_), None) => {} + _ => { + // If we created the event, it's safe to create the acceptor. + if maybe_sender.is_some() { + debug!(?maybe_era_id, local_tip=?self.local_tip, "not creating acceptor"); + return; + } + } + } + + // Check that the sender isn't telling us about more blocks than expected. + if let Some(sender) = maybe_sender { + let block_timestamps = self.peer_block_timestamps.entry(sender).or_default(); + + // Prune the timestamps, so the count reflects only the most recently added acceptors. + let purge_interval = self.purge_interval; + while block_timestamps + .front() + .is_some_and(|(_, timestamp)| timestamp.elapsed() > purge_interval) + { + block_timestamps.pop_front(); + } + + // Assume a block time of at least 1 millisecond, so we don't divide by zero. + let min_block_time = self.min_block_time.max(TimeDiff::from_millis(1)); + let expected_blocks = (purge_interval / min_block_time) as usize; + let max_block_count = PEER_RATE_LIMIT_MULTIPLIER.saturating_mul(expected_blocks); + if block_timestamps.len() >= max_block_count { + warn!( + ?sender, %block_hash, + "rejecting block hash from peer who sent us more than {} within {}", + max_block_count, self.purge_interval, + ); + return; + } + block_timestamps.push_back((block_hash, Timestamp::now())); + } + + entry.insert(BlockAcceptor::new(block_hash, maybe_sender)); + self.metrics.block_acceptors.inc(); + } + + fn register_block( + &mut self, + effect_builder: EffectBuilder, + meta_block: ForwardMetaBlock, + sender: Option, + ) -> Effects + where + REv: From + + From + + From + + From + + Send, + { + let block_hash = meta_block.block.hash(); + debug!(%block_hash, "registering block"); + let era_id = meta_block.block.era_id(); + let block_height = meta_block.block.height(); + if self + .local_tip + .as_ref() + .is_some_and(|local_tip| block_height < local_tip.height) + { + debug!(%block_hash, "ignoring outdated block"); + return Effects::new(); + } + self.upsert_acceptor(*block_hash, Some(era_id), sender); + + let acceptor = match self.block_acceptors.get_mut(block_hash) { + None => return Effects::new(), + Some(acceptor) => acceptor, + }; + + match acceptor.register_block(meta_block, sender) { + Ok(_) => match self.validator_matrix.validator_weights(era_id) { + Some(evw) => { + let (should_store, faulty_senders) = + acceptor.should_store_block(&evw, self.validator_matrix.chain_name_hash()); + self.store_block_and_finality_signatures( + effect_builder, + should_store, + faulty_senders, + ) + } + None => Effects::new(), + }, + Err(error) => match error { + Error::InvalidGossip(ref gossip_error) => { + warn!(%gossip_error, "received invalid block"); + effect_builder + .announce_block_peer_with_justification( + gossip_error.peer(), + BlocklistJustification::SentBadBlock { error }, + ) + .ignore() + } + Error::EraMismatch { + peer, + block_hash, + expected, + actual, + } => { + warn!( + "era mismatch from {} for {}; expected: {} and actual: {}", + peer, block_hash, expected, actual + ); + effect_builder + .announce_block_peer_with_justification( + peer, + BlocklistJustification::SentBadBlock { error }, + ) + .ignore() + } + ref error @ Error::BlockHashMismatch { .. } => { + error!(%error, "finality signature has mismatched block_hash; this is a bug"); + Effects::new() + } + ref error @ Error::SufficientFinalityWithoutBlock { .. } => { + error!(%error, "should not have sufficient finality without block"); + Effects::new() + } + Error::InvalidConfiguration => fatal!( + effect_builder, + "node has an invalid configuration, shutting down" + ) + .ignore(), + Error::BogusValidator(_) => { + error!(%error, "unexpected detection of bogus validator, this is a bug"); + Effects::new() + } + Error::MetaBlockMerge(error) => { + error!(%error, "failed to merge meta blocks, this is a bug"); + Effects::new() + } + Error::TooManySignatures { peer, limit } => effect_builder + .announce_block_peer_with_justification( + peer, + BlocklistJustification::SentTooManyFinalitySignatures { + max_allowed: limit, + }, + ) + .ignore(), + }, + } + } + + fn register_finality_signature( + &mut self, + effect_builder: EffectBuilder, + finality_signature: FinalitySignatureV2, + sender: Option, + ) -> Effects + where + REv: From + + From + + From + + From + + Send, + { + let block_hash = finality_signature.block_hash(); + let era_id = finality_signature.era_id(); + self.upsert_acceptor(*block_hash, Some(era_id), sender); + + let acceptor = match self.block_acceptors.get_mut(block_hash) { + Some(acceptor) => acceptor, + // When there is no acceptor for it, this function returns + // early, ignoring the signature. + None => { + debug!(%finality_signature, "no acceptor to receive finality_signature"); + return Effects::new(); + } + }; + + if sender.is_none() { + acceptor.set_our_signature(finality_signature.clone()); + } + + debug!(%finality_signature, "registering finality signature"); + match acceptor.register_finality_signature(finality_signature, sender, self.validator_slots) + { + Ok(Some(finality_signature)) => self.store_block_and_finality_signatures( + effect_builder, + ShouldStore::SingleSignature(finality_signature), + None, + ), + Ok(None) => match self.validator_matrix.validator_weights(era_id) { + Some(evw) => { + let (should_store, faulty_senders) = + acceptor.should_store_block(&evw, self.validator_matrix.chain_name_hash()); + self.store_block_and_finality_signatures( + effect_builder, + should_store, + faulty_senders, + ) + } + None => Effects::new(), + }, + Err(error) => match error { + Error::InvalidGossip(ref gossip_error) => { + warn!(%gossip_error, "received invalid finality_signature"); + effect_builder + .announce_block_peer_with_justification( + gossip_error.peer(), + BlocklistJustification::SentBadFinalitySignature { error }, + ) + .ignore() + } + Error::EraMismatch { + peer, + block_hash, + expected, + actual, + } => { + // the acceptor logic purges finality signatures that don't match + // the era validators, so in this case we can continue to + // use the acceptor + warn!( + "era mismatch from {} for {}; expected: {} and actual: {}", + peer, block_hash, expected, actual + ); + effect_builder + .announce_block_peer_with_justification( + peer, + BlocklistJustification::SentBadFinalitySignature { error }, + ) + .ignore() + } + ref error @ Error::BlockHashMismatch { .. } => { + error!(%error, "finality signature has mismatched block_hash; this is a bug"); + Effects::new() + } + ref error @ Error::SufficientFinalityWithoutBlock { .. } => { + error!(%error, "should not have sufficient finality without block"); + Effects::new() + } + Error::InvalidConfiguration => fatal!( + effect_builder, + "node has an invalid configuration, shutting down" + ) + .ignore(), + Error::BogusValidator(_) => { + error!(%error, "unexpected detection of bogus validator, this is a bug"); + Effects::new() + } + Error::MetaBlockMerge(error) => { + error!(%error, "failed to merge meta blocks, this is a bug"); + Effects::new() + } + Error::TooManySignatures { peer, limit } => effect_builder + .announce_block_peer_with_justification( + peer, + BlocklistJustification::SentTooManyFinalitySignatures { + max_allowed: limit, + }, + ) + .ignore(), + }, + } + } + + fn register_stored( + &self, + effect_builder: EffectBuilder, + maybe_meta_block: Option, + maybe_block_signatures: Option, + ) -> Effects + where + REv: From + + From + + From + + Send, + { + let mut effects = Effects::new(); + if let Some(meta_block) = maybe_meta_block { + effects.extend( + effect_builder + .announce_meta_block(meta_block.into()) + .ignore(), + ); + }; + if let Some(block_signatures) = maybe_block_signatures { + for finality_signature in block_signatures.finality_signatures() { + effects.extend( + effect_builder + .announce_finality_signature_accepted(Box::new(finality_signature)) + .ignore(), + ); + } + } + effects + } + + fn get_peers(&self, block_hash: BlockHash) -> Option> { + self.block_acceptors + .get(&block_hash) + .map(|acceptor| acceptor.peers().iter().cloned().collect()) + } + + fn is_stale(&mut self) -> bool { + // we expect to be receiving gossiped blocks from other nodes + // if we haven't received any messages describing higher blocks + // for more than the self.dead_air_interval config allows + // we leap again to poll the network + self.last_progress.elapsed() >= self.dead_air_interval + } + + pub(crate) fn reset_last_progress(&mut self) { + self.last_progress = Timestamp::now(); + } + + fn leap_instruction(&self, sync_identifier: &SyncIdentifier) -> LeapInstruction { + let local_tip_height = match self.local_tip { + Some(local_tip) => local_tip.height, + None => { + // if the accumulator is unaware of local tip, + // leap to learn more about the network state + return LeapInstruction::UnsetLocalTip; + } + }; + + let sync_identifier_height = match sync_identifier.block_height() { + Some(block_height) => block_height, + None => { + if let Some(height) = self + .block_acceptors + .get(&sync_identifier.block_hash()) + .filter(|x| x.block_height().is_some()) + .map(|x| x.block_height().unwrap_or_default()) + { + height + } else { + return LeapInstruction::UnknownBlockHeight; + } + } + }; + + match self + .block_acceptors + .iter() + .filter(|(_, acceptor)| { + acceptor.has_sufficient_finality() && acceptor.block_height().is_some() + }) + .max_by(|x, y| x.1.block_height().cmp(&y.1.block_height())) + .map(|(_, acceptor)| { + ( + acceptor.block_height().unwrap_or_default(), + acceptor.is_upgrade_boundary(self.activation_point), + ) + }) { + None => LeapInstruction::NoUsableBlockAcceptors, + Some((acceptor_height, is_upgrade_boundary)) => { + // the accumulator has heard about at least one usable block via gossiping + // if we've see chatter about a usable higher block, we can determine + // if we have local state at or near that highest usable block. + // if we have reason to believe we have fallen too far behind the network, + // we should switch to catchup mode and start the leap process + // otherwise, we should attempt to keep up with the network by + // executing our own blocks. + + // This is a special case; if we have heard chatter about the last block + // before a protocol upgrade and have enough finality signatures to believe + // it, we want to be cautious about leaping, because other nodes on the + // network are starting to go down and come back up on the new protocol + // version and may or may not respond. Thus, it is best for the node to + // continue executing its own blocks to get to the upgrade point on its + // own (if able). + let is_upgrade_boundary = is_upgrade_boundary == Some(true); + + let height = local_tip_height.max(sync_identifier_height); + let distance_from_highest_known_block = acceptor_height.saturating_sub(height); + + LeapInstruction::from_execution_threshold( + self.attempt_execution_threshold, + distance_from_highest_known_block, + is_upgrade_boundary, + ) + } + } + } + + fn next_syncable_block_hash(&self, parent_block_hash: BlockHash) -> Option { + let child_hash = self.block_children.get(&parent_block_hash)?; + let block_acceptor = self.block_acceptors.get(child_hash)?; + if block_acceptor.has_sufficient_finality() { + Some(block_acceptor.block_hash()) + } else { + None + } + } + + fn purge(&mut self) { + let now = Timestamp::now(); + let mut purged = vec![]; + let purge_interval = self.purge_interval; + let maybe_local_tip_height = self.local_tip.map(|local_tip| local_tip.height); + let attempt_execution_threshold = self.attempt_execution_threshold; + self.block_acceptors.retain(|k, v| { + if let (Some(acceptor_height), Some(local_tip_height)) = + (v.block_height(), maybe_local_tip_height) + { + // With `attempt_execution_threshold` being 3 as of this + // comment, we keep blocks in the range + // [(local_tip_height - 3), local_tip_height]. + if acceptor_height >= local_tip_height.saturating_sub(attempt_execution_threshold) + && acceptor_height <= local_tip_height + { + return true; + } + // Keep future blocks that we signed or are sufficiently signed. + if acceptor_height > local_tip_height + && (v.our_signature().is_some() || v.has_sufficient_finality()) + { + return true; + } + } + let expired = now.saturating_diff(v.last_progress()) > purge_interval; + if expired { + purged.push(*k) + } + !expired + }); + self.block_children + .retain(|_parent, child| false == purged.contains(child)); + self.peer_block_timestamps.retain(|_, block_timestamps| { + while block_timestamps + .front() + .is_some_and(|(_, timestamp)| timestamp.elapsed() > purge_interval) + { + block_timestamps.pop_front(); + } + !block_timestamps.is_empty() + }); + + self.metrics + .block_acceptors + .set(self.block_acceptors.len().try_into().unwrap_or(i64::MIN)); + self.metrics + .known_child_blocks + .set(self.block_children.len().try_into().unwrap_or(i64::MIN)); + } + + fn update_block_children(&mut self, meta_block: &ForwardMetaBlock) { + if meta_block.block.is_genesis() { + return; + } + let parent_hash = meta_block.block.parent_hash(); + if self + .block_children + .insert(*parent_hash, *meta_block.block.hash()) + .is_none() + { + self.metrics.known_child_blocks.inc(); + } + } + + fn store_block_and_finality_signatures( + &mut self, + effect_builder: EffectBuilder, + should_store: ShouldStore, + faulty_senders: I, + ) -> Effects + where + REv: From + + From + + From + + Send, + I: IntoIterator, + { + let mut effects = match should_store { + ShouldStore::SufficientlySignedBlock { + meta_block, + block_signatures, + } => { + let block_hash = meta_block.block.hash(); + debug!(%block_hash, "storing block and finality signatures"); + self.update_block_children(&meta_block); + // The block wasn't executed yet, so we just put it to storage. An `ExecutedBlock` + // event will then re-trigger this flow and eventually mark it complete. + let cloned_signatures = block_signatures.clone(); + let block: Block = (*meta_block.block).clone().into(); + effect_builder + .put_block_to_storage(Arc::new(block)) + .then(move |_| { + effect_builder.put_signatures_to_storage(cloned_signatures.into()) + }) + .event(move |_| Event::Stored { + maybe_meta_block: Some(meta_block), + maybe_block_signatures: Some(block_signatures), + }) + } + ShouldStore::CompletedBlock { + meta_block, + block_signatures, + } => { + let block_hash = meta_block.block.hash(); + debug!(%block_hash, "storing finality signatures and marking block complete"); + self.update_block_children(&meta_block); + // The block was already executed, which means it is stored and we have the global + // state for it. As on this code path we also know it is sufficiently signed, + // we mark it as complete. + let block_height = meta_block.block.height(); + effect_builder + .put_signatures_to_storage(block_signatures.clone().into()) + .then(move |_| effect_builder.mark_block_completed(block_height)) + .event(move |_| Event::Stored { + maybe_meta_block: Some(meta_block), + maybe_block_signatures: Some(block_signatures), + }) + } + ShouldStore::MarkComplete(meta_block) => { + let block_hash = meta_block.block.hash(); + debug!(%block_hash, "marking block complete"); + let block_height = meta_block.block.height(); + effect_builder + .mark_block_completed(block_height) + .event(move |_| Event::Stored { + maybe_meta_block: Some(meta_block), + maybe_block_signatures: None, + }) + } + ShouldStore::SingleSignature(signature) => { + debug!(%signature, "storing finality signature"); + let mut block_signatures = BlockSignaturesV2::new( + *signature.block_hash(), + signature.block_height(), + signature.era_id(), + signature.chain_name_hash(), + ); + block_signatures + .insert_signature(signature.public_key().clone(), *signature.signature()); + effect_builder + .put_finality_signature_to_storage(signature.into()) + .event(move |_| Event::Stored { + maybe_meta_block: None, + maybe_block_signatures: Some(block_signatures), + }) + } + ShouldStore::Nothing => { + debug!("not storing block or finality signatures"); + Effects::new() + } + }; + effects.extend(faulty_senders.into_iter().flat_map(|(node_id, error)| { + effect_builder + .announce_block_peer_with_justification( + node_id, + BlocklistJustification::SentBadFinalitySignature { error }, + ) + .ignore() + })); + effects + } +} + +pub(crate) trait ReactorEvent: + From + + From + + From + + From + + From + + From + + Send + + 'static +{ +} + +impl ReactorEvent for REv where + REv: From + + From + + From + + From + + From + + From + + Send + + 'static +{ +} + +impl Component for BlockAccumulator { + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::Request(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + responder, + }) => responder.respond(self.get_peers(block_hash)).ignore(), + Event::RegisterPeer { + block_hash, + era_id, + sender, + } => { + self.upsert_acceptor(block_hash, era_id, Some(sender)); + Effects::new() + } + Event::ReceivedBlock { block, sender } => { + let meta_block: ForwardMetaBlock = + MetaBlock::new_forward(block, vec![], MetaBlockState::new()) + .try_into() + .unwrap(); + self.register_block(effect_builder, meta_block, Some(sender)) + } + Event::CreatedFinalitySignature { finality_signature } => { + debug!(%finality_signature, "BlockAccumulator: CreatedFinalitySignature"); + self.register_finality_signature(effect_builder, *finality_signature, None) + } + Event::ReceivedFinalitySignature { + finality_signature, + sender, + } => { + self.register_finality_signature(effect_builder, *finality_signature, Some(sender)) + } + Event::ExecutedBlock { meta_block } => { + let height = meta_block.block.height(); + let era_id = meta_block.block.era_id(); + let effects = self.register_block(effect_builder, meta_block, None); + self.register_local_tip(height, era_id); + effects + } + Event::Stored { + maybe_meta_block, + maybe_block_signatures, + } => self.register_stored(effect_builder, maybe_meta_block, maybe_block_signatures), + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +impl ValidatorBoundComponent for BlockAccumulator { + fn handle_validators( + &mut self, + effect_builder: EffectBuilder, + _: &mut NodeRng, + ) -> Effects { + info!("BlockAccumulator: handling updated validator matrix"); + let validator_matrix = &self.validator_matrix; // Closure can't borrow all of self. + let should_stores = self + .block_acceptors + .values_mut() + .filter(|acceptor| false == acceptor.has_sufficient_finality()) + .filter_map(|acceptor| { + let era_id = acceptor.era_id()?; + let evw = validator_matrix.validator_weights(era_id)?; + Some(acceptor.should_store_block(&evw, validator_matrix.chain_name_hash())) + }) + .collect_vec(); + should_stores + .into_iter() + .flat_map(|(should_store, faulty_senders)| { + self.store_block_and_finality_signatures( + effect_builder, + should_store, + faulty_senders, + ) + }) + .collect() + } +} diff --git a/node/src/components/block_accumulator/block_acceptor.rs b/node/src/components/block_accumulator/block_acceptor.rs new file mode 100644 index 0000000000..41f562c8ce --- /dev/null +++ b/node/src/components/block_accumulator/block_acceptor.rs @@ -0,0 +1,557 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use datasize::DataSize; +use itertools::Itertools; +use tracing::{debug, error, warn}; + +use casper_types::{ + ActivationPoint, BlockHash, BlockSignaturesV2, ChainNameDigest, EraId, FinalitySignatureV2, + PublicKey, Timestamp, +}; + +use crate::{ + components::block_accumulator::error::{Bogusness, Error as AcceptorError, InvalidGossipError}, + types::{EraValidatorWeights, ForwardMetaBlock, NodeId, SignatureWeight}, +}; + +#[derive(DataSize, Debug)] +pub(super) struct BlockAcceptor { + block_hash: BlockHash, + meta_block: Option, + signatures: BTreeMap)>, + peers: BTreeSet, + last_progress: Timestamp, + our_signature: Option, +} + +#[derive(Debug, PartialEq)] +#[allow(clippy::large_enum_variant)] +pub(super) enum ShouldStore { + SufficientlySignedBlock { + meta_block: ForwardMetaBlock, + block_signatures: BlockSignaturesV2, + }, + CompletedBlock { + meta_block: ForwardMetaBlock, + block_signatures: BlockSignaturesV2, + }, + MarkComplete(ForwardMetaBlock), + SingleSignature(FinalitySignatureV2), + Nothing, +} + +impl BlockAcceptor { + pub(super) fn new(block_hash: BlockHash, peers: I) -> Self + where + I: IntoIterator, + { + Self { + block_hash, + meta_block: None, + signatures: BTreeMap::new(), + peers: peers.into_iter().collect(), + last_progress: Timestamp::now(), + our_signature: None, + } + } + + pub(super) fn peers(&self) -> &BTreeSet { + &self.peers + } + + pub(super) fn register_peer(&mut self, peer: NodeId) { + self.peers.insert(peer); + } + + pub(super) fn register_block( + &mut self, + meta_block: ForwardMetaBlock, + peer: Option, + ) -> Result<(), AcceptorError> { + if self.block_hash() != *meta_block.block.hash() { + return Err(AcceptorError::BlockHashMismatch { + expected: self.block_hash(), + actual: *meta_block.block.hash(), + }); + } + + // Verify is needed for the cases when the block comes from the gossiper. It it came here + // from the fetcher it'll already be verified. + if let Err(error) = meta_block.block.verify() { + warn!(%error, "received invalid block"); + return match peer { + Some(node_id) => Err(AcceptorError::InvalidGossip(Box::new( + InvalidGossipError::Block { + block_hash: *meta_block.block.hash(), + peer: node_id, + validation_error: error, + }, + ))), + None => Err(AcceptorError::InvalidConfiguration), + }; + } + + if let Some(node_id) = peer { + self.register_peer(node_id); + } + + match self.meta_block.take() { + Some(existing_meta_block) => { + let merged_meta_block = existing_meta_block.merge(meta_block)?; + self.meta_block = Some(merged_meta_block); + } + None => { + self.meta_block = Some(meta_block); + } + } + + Ok(()) + } + + pub(super) fn register_finality_signature( + &mut self, + finality_signature: FinalitySignatureV2, + peer: Option, + validator_slots: u32, + ) -> Result, AcceptorError> { + if self.block_hash != *finality_signature.block_hash() { + return Err(AcceptorError::BlockHashMismatch { + expected: self.block_hash, + actual: *finality_signature.block_hash(), + }); + } + if let Some(node_id) = peer { + // We multiply the number of validators by 2 to get the maximum of signatures, because + // of the theoretically possible scenario when we're collecting sigs but are + // not yet able to validate them (no validator weights). We should allow to + // absorb more than theoretical limit (but not much more) so we don't fill + // all slots with invalid sigs: + check_signatures_from_peer_bound(validator_slots * 2, node_id, &self.signatures)?; + } + if let Err(error) = finality_signature.is_verified() { + warn!(%error, "received invalid finality signature"); + return match peer { + Some(node_id) => Err(AcceptorError::InvalidGossip(Box::new( + InvalidGossipError::FinalitySignature { + block_hash: *finality_signature.block_hash(), + peer: node_id, + validation_error: error, + }, + ))), + None => Err(AcceptorError::InvalidConfiguration), + }; + } + + let had_sufficient_finality = self.has_sufficient_finality(); + // if we don't have finality yet, collect the signature and return + // while we could store the finality signature, we currently prefer + // to store block and signatures when sufficient weight is attained + if false == had_sufficient_finality { + if let Some(node_id) = peer { + self.register_peer(node_id); + } + self.signatures + .entry(finality_signature.public_key().clone()) + .and_modify(|(_, senders)| senders.extend(peer)) + .or_insert_with(|| (finality_signature, peer.into_iter().collect())); + return Ok(None); + } + + if let Some(meta_block) = &self.meta_block { + // if the signature's era does not match the block's era + // it's malicious / bogus / invalid. + if meta_block.block.era_id() != finality_signature.era_id() { + return match peer { + Some(node_id) => Err(AcceptorError::EraMismatch { + block_hash: *finality_signature.block_hash(), + expected: meta_block.block.era_id(), + actual: finality_signature.era_id(), + peer: node_id, + }), + None => Err(AcceptorError::InvalidConfiguration), + }; + } + } else { + // should have block if self.has_sufficient_finality() + return Err(AcceptorError::SufficientFinalityWithoutBlock { + block_hash: *finality_signature.block_hash(), + }); + } + + if let Some(node_id) = peer { + self.register_peer(node_id); + } + let is_new = !self + .signatures + .contains_key(finality_signature.public_key()); + + self.signatures + .entry(finality_signature.public_key().clone()) + .and_modify(|(_, senders)| senders.extend(peer)) + .or_insert_with(|| (finality_signature.clone(), peer.into_iter().collect())); + + if had_sufficient_finality && is_new { + // we received this finality signature after putting the block & earlier signatures + // to storage + self.touch(); + return Ok(Some(finality_signature)); + }; + + // either we've seen this signature already or we're still waiting for sufficient finality + Ok(None) + } + + /// Returns instructions to write the block and/or finality signatures to storage. + /// Also returns a set of peers that sent us invalid data and should be banned. + pub(super) fn should_store_block( + &mut self, + era_validator_weights: &EraValidatorWeights, + chain_name_hash: ChainNameDigest, + ) -> (ShouldStore, Vec<(NodeId, AcceptorError)>) { + let block_hash = self.block_hash; + let no_block = self.meta_block.is_none(); + let no_sigs = self.signatures.is_empty(); + if self.has_sufficient_finality() { + if let Some(meta_block) = self.meta_block.as_mut() { + if meta_block.state.is_executed() + && meta_block.state.register_as_marked_complete().was_updated() + { + debug!( + %block_hash, no_block, no_sigs, + "already have sufficient finality signatures, but marking block complete" + ); + return (ShouldStore::MarkComplete(meta_block.clone()), Vec::new()); + } + } + + debug!( + %block_hash, no_block, no_sigs, + "not storing anything - already have sufficient finality signatures" + ); + return (ShouldStore::Nothing, Vec::new()); + } + + if no_block || no_sigs { + debug!(%block_hash, no_block, no_sigs, "not storing block"); + return (ShouldStore::Nothing, Vec::new()); + } + + let faulty_senders = self.remove_bogus_validators(era_validator_weights); + let signature_weight = era_validator_weights.signature_weight(self.signatures.keys()); + if SignatureWeight::Strict == signature_weight { + self.touch(); + if let Some(meta_block) = self.meta_block.as_mut() { + let mut block_signatures = BlockSignaturesV2::new( + *meta_block.block.hash(), + meta_block.block.height(), + meta_block.block.era_id(), + chain_name_hash, + ); + self.signatures.values().for_each(|(signature, _)| { + block_signatures + .insert_signature(signature.public_key().clone(), *signature.signature()); + }); + if meta_block + .state + .register_has_sufficient_finality() + .was_already_registered() + { + error!( + %block_hash, + block_height = meta_block.block.height(), + meta_block_state = ?meta_block.state, + "should not register having sufficient finality for the same block more \ + than once" + ); + } + if meta_block.state.is_executed() { + if meta_block + .state + .register_as_marked_complete() + .was_already_registered() + { + error!( + %block_hash, + block_height = meta_block.block.height(), + meta_block_state = ?meta_block.state, + "should not mark the same block complete more than once" + ); + } + + return ( + ShouldStore::CompletedBlock { + meta_block: meta_block.clone(), + block_signatures, + }, + faulty_senders, + ); + } + if meta_block + .state + .register_as_stored() + .was_already_registered() + { + error!( + %block_hash, + block_height = meta_block.block.height(), + meta_block_state = ?meta_block.state, + "should not store the same block more than once" + ); + } + return ( + ShouldStore::SufficientlySignedBlock { + meta_block: meta_block.clone(), + block_signatures, + }, + faulty_senders, + ); + } + } + + let signed_weight = era_validator_weights.signed_weight(self.signatures.keys()); + let total_era_weight = era_validator_weights.get_total_weight(); + let satisfaction_percent = signed_weight * 100 / total_era_weight; + debug!( + %block_hash, + %signed_weight, + %total_era_weight, + %satisfaction_percent, + no_block, no_sigs, + "not storing anything - insufficient finality signatures" + ); + (ShouldStore::Nothing, faulty_senders) + } + + pub(super) fn has_sufficient_finality(&self) -> bool { + self.meta_block + .as_ref() + .map(|meta_block| meta_block.state.has_sufficient_finality()) + .unwrap_or(false) + } + + pub(super) fn era_id(&self) -> Option { + if let Some(meta_block) = &self.meta_block { + return Some(meta_block.block.era_id()); + } + if let Some((finality_signature, _)) = self.signatures.values().next() { + return Some(finality_signature.era_id()); + } + None + } + + pub(super) fn block_height(&self) -> Option { + self.meta_block + .as_ref() + .map(|meta_block| meta_block.block.height()) + } + + pub(super) fn block_hash(&self) -> BlockHash { + self.block_hash + } + + pub(super) fn is_upgrade_boundary( + &self, + activation_point: Option, + ) -> Option { + match (&self.meta_block, activation_point) { + (None, _) => None, + (Some(_), None) => Some(false), + (Some(meta_block), Some(activation_point)) => { + Some(meta_block.is_upgrade_boundary(activation_point)) + } + } + } + + pub(super) fn last_progress(&self) -> Timestamp { + self.last_progress + } + + pub(super) fn our_signature(&self) -> Option<&FinalitySignatureV2> { + self.our_signature.as_ref() + } + + pub(super) fn set_our_signature(&mut self, signature: FinalitySignatureV2) { + self.our_signature = Some(signature); + } + + /// Removes finality signatures that have the wrong era ID or are signed by non-validators. + /// Returns the set of peers that sent us these signatures. + fn remove_bogus_validators( + &mut self, + era_validator_weights: &EraValidatorWeights, + ) -> Vec<(NodeId, AcceptorError)> { + let bogus_validators = era_validator_weights.bogus_validators(self.signatures.keys()); + + let mut faulty_senders = Vec::new(); + bogus_validators.iter().for_each(|bogus_validator| { + debug!(%bogus_validator, "bogus validator"); + if let Some((_, senders)) = self.signatures.remove(bogus_validator) { + faulty_senders.extend(senders.iter().map(|sender| { + ( + *sender, + AcceptorError::BogusValidator(Bogusness::NotAValidator), + ) + })); + } + }); + + if let Some(meta_block) = &self.meta_block { + let bogus_validators = self + .signatures + .iter() + .filter(|(_, (v, _))| v.era_id() != meta_block.block.era_id()) + .map(|(k, _)| k.clone()) + .collect_vec(); + + bogus_validators.iter().for_each(|bogus_validator| { + debug!(%bogus_validator, "bogus validator"); + if let Some((_, senders)) = self.signatures.remove(bogus_validator) { + faulty_senders.extend(senders.iter().map(|sender| { + ( + *sender, + AcceptorError::BogusValidator(Bogusness::SignatureEraIdMismatch), + ) + })); + } + }); + } + + for (node_id, _) in &faulty_senders { + self.peers.remove(node_id); + } + + faulty_senders + } + + fn touch(&mut self) { + self.last_progress = Timestamp::now(); + } +} + +/// Returns an error if the peer has sent too many finality signatures. +fn check_signatures_from_peer_bound( + limit: u32, + peer: NodeId, + signatures: &BTreeMap)>, +) -> Result<(), AcceptorError> { + let signatures_for_peer = signatures + .values() + .filter(|(_fin_sig, nodes)| nodes.contains(&peer)) + .count(); + + if signatures_for_peer < limit as usize { + Ok(()) + } else { + Err(AcceptorError::TooManySignatures { peer, limit }) + } +} + +#[cfg(test)] +impl BlockAcceptor { + pub(super) fn executed(&self) -> bool { + self.meta_block + .as_ref() + .is_some_and(|meta_block| meta_block.state.is_executed()) + } + + pub(super) fn meta_block(&self) -> Option { + self.meta_block.clone() + } + + pub(super) fn set_last_progress(&mut self, last_progress: Timestamp) { + self.last_progress = last_progress; + } + + pub(super) fn set_meta_block(&mut self, meta_block: Option) { + self.meta_block = meta_block; + } + + pub(super) fn set_sufficient_finality(&mut self, has_sufficient_finality: bool) { + if let Some(meta_block) = self.meta_block.as_mut() { + meta_block + .state + .set_sufficient_finality(has_sufficient_finality); + } + } + + pub(super) fn signatures( + &self, + ) -> &BTreeMap)> { + &self.signatures + } + + pub(super) fn signatures_mut( + &mut self, + ) -> &mut BTreeMap)> { + &mut self.signatures + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + //use crate::types::NodeId; + //use std::collections::{BTreeMap, BTreeSet}; + + #[test] + fn check_signatures_from_peer_bound_works() { + let rng = &mut TestRng::new(); + let max_signatures = 3; + let peer_to_check = NodeId::random(rng); + + let mut signatures = BTreeMap::new(); + // Insert only the peer to check: + signatures.insert( + PublicKey::random(rng), + (FinalitySignatureV2::random(rng), { + let mut nodes = BTreeSet::new(); + nodes.insert(peer_to_check); + nodes + }), + ); + // Insert an unrelated peer: + signatures.insert( + PublicKey::random(rng), + (FinalitySignatureV2::random(rng), { + let mut nodes = BTreeSet::new(); + nodes.insert(NodeId::random(rng)); + nodes + }), + ); + // Insert both the peer to check and an unrelated one: + signatures.insert( + PublicKey::random(rng), + (FinalitySignatureV2::random(rng), { + let mut nodes = BTreeSet::new(); + nodes.insert(NodeId::random(rng)); + nodes.insert(peer_to_check); + nodes + }), + ); + + // The peer has send only 2 signatures, so adding a new signature should pass: + assert!(matches!( + check_signatures_from_peer_bound(max_signatures, peer_to_check, &signatures), + Ok(()) + )); + + // Let's insert once again both the peer to check and an unrelated one: + signatures.insert( + PublicKey::random(rng), + (FinalitySignatureV2::random(rng), { + let mut nodes = BTreeSet::new(); + nodes.insert(NodeId::random(rng)); + nodes.insert(peer_to_check); + nodes + }), + ); + + // Now this should fail: + assert!(matches!( + check_signatures_from_peer_bound(max_signatures, peer_to_check, &signatures), + Err(AcceptorError::TooManySignatures { peer, limit }) + if peer == peer_to_check && limit == max_signatures + )); + } +} diff --git a/node/src/components/block_accumulator/config.rs b/node/src/components/block_accumulator/config.rs new file mode 100644 index 0000000000..e1db0bc972 --- /dev/null +++ b/node/src/components/block_accumulator/config.rs @@ -0,0 +1,33 @@ +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::TimeDiff; + +const DEFAULT_ATTEMPT_EXECUTION_THRESHOLD: u64 = 3; +const DEFAULT_DEAD_AIR_INTERVAL_SECS: u32 = 180; +#[cfg(test)] +const DEFAULT_PURGE_INTERVAL_SECS: u32 = 5; // 5 seconds. + +#[cfg(not(test))] +const DEFAULT_PURGE_INTERVAL_SECS: u32 = 5 * 60; // 5 minutes. + +/// Configuration options for the block accumulator. +#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] +pub struct Config { + /// Attempt execution threshold. + pub attempt_execution_threshold: u64, + /// Dead air interval. + pub dead_air_interval: TimeDiff, + /// Purge interval. + pub purge_interval: TimeDiff, +} + +impl Default for Config { + fn default() -> Self { + Config { + attempt_execution_threshold: DEFAULT_ATTEMPT_EXECUTION_THRESHOLD, + dead_air_interval: TimeDiff::from_seconds(DEFAULT_DEAD_AIR_INTERVAL_SECS), + purge_interval: TimeDiff::from_seconds(DEFAULT_PURGE_INTERVAL_SECS), + } + } +} diff --git a/node/src/components/block_accumulator/error.rs b/node/src/components/block_accumulator/error.rs new file mode 100644 index 0000000000..de09081e6a --- /dev/null +++ b/node/src/components/block_accumulator/error.rs @@ -0,0 +1,67 @@ +use thiserror::Error; +use tracing::error; + +use casper_types::{crypto, BlockHash, BlockValidationError, EraId}; + +use crate::types::{MetaBlockMergeError, NodeId}; + +#[derive(Error, Debug)] +pub(crate) enum InvalidGossipError { + #[error("received cryptographically invalid block for: {block_hash} from: {peer} with error: {validation_error}")] + Block { + block_hash: BlockHash, + peer: NodeId, + validation_error: BlockValidationError, + }, + #[error("received cryptographically invalid finality_signature for: {block_hash} from: {peer} with error: {validation_error}")] + FinalitySignature { + block_hash: BlockHash, + peer: NodeId, + validation_error: crypto::Error, + }, +} + +impl InvalidGossipError { + pub(super) fn peer(&self) -> NodeId { + match self { + InvalidGossipError::FinalitySignature { peer, .. } + | InvalidGossipError::Block { peer, .. } => *peer, + } + } +} + +#[derive(Error, Debug)] +pub(crate) enum Bogusness { + #[error("peer is not a validator in current era")] + NotAValidator, + #[error("peer provided finality signatures from incorrect era")] + SignatureEraIdMismatch, +} + +#[derive(Error, Debug)] +pub(crate) enum Error { + #[error(transparent)] + InvalidGossip(Box), + #[error("invalid configuration")] + InvalidConfiguration, + #[error("mismatched eras detected")] + EraMismatch { + block_hash: BlockHash, + expected: EraId, + actual: EraId, + peer: NodeId, + }, + #[error("mismatched block hash: expected={expected}, actual={actual}")] + BlockHashMismatch { + expected: BlockHash, + actual: BlockHash, + }, + #[error("should not be possible to have sufficient finality without block: {block_hash}")] + SufficientFinalityWithoutBlock { block_hash: BlockHash }, + #[error("bogus validator detected")] + BogusValidator(Bogusness), + #[error(transparent)] + MetaBlockMerge(#[from] MetaBlockMergeError), + #[error("tried to insert a signature past the bounds")] + TooManySignatures { peer: NodeId, limit: u32 }, +} diff --git a/node/src/components/block_accumulator/event.rs b/node/src/components/block_accumulator/event.rs new file mode 100644 index 0000000000..3774a7cb8a --- /dev/null +++ b/node/src/components/block_accumulator/event.rs @@ -0,0 +1,107 @@ +use std::{ + fmt::{self, Display, Formatter}, + sync::Arc, +}; + +use derive_more::From; + +use casper_types::{BlockHash, BlockSignaturesV2, BlockV2, EraId, FinalitySignatureV2}; + +use crate::{ + effect::requests::BlockAccumulatorRequest, + types::{ForwardMetaBlock, NodeId}, +}; + +#[derive(Debug, From)] +pub(crate) enum Event { + #[from] + Request(BlockAccumulatorRequest), + RegisterPeer { + block_hash: BlockHash, + era_id: Option, + sender: NodeId, + }, + ReceivedBlock { + block: Arc, + sender: NodeId, + }, + CreatedFinalitySignature { + finality_signature: Box, + }, + ReceivedFinalitySignature { + finality_signature: Box, + sender: NodeId, + }, + ExecutedBlock { + meta_block: ForwardMetaBlock, + }, + Stored { + maybe_meta_block: Option, + maybe_block_signatures: Option, + }, +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Request(BlockAccumulatorRequest::GetPeersForBlock { block_hash, .. }) => { + write!( + f, + "block accumulator peers request for block: {}", + block_hash + ) + } + Event::RegisterPeer { + block_hash, sender, .. + } => { + write!( + f, + "registering peer {} after gossip: {}", + sender, block_hash + ) + } + Event::ReceivedBlock { block, sender } => { + write!(f, "received {} from {}", block, sender) + } + Event::CreatedFinalitySignature { finality_signature } => { + write!(f, "created {}", finality_signature) + } + Event::ReceivedFinalitySignature { + finality_signature, + sender, + } => { + write!(f, "received {} from {}", finality_signature, sender) + } + Event::ExecutedBlock { meta_block } => { + write!(f, "executed block {}", meta_block.block.hash()) + } + Event::Stored { + maybe_meta_block: Some(meta_block), + maybe_block_signatures, + } => { + write!( + f, + "stored {} and {} finality signatures", + meta_block.block.hash(), + maybe_block_signatures + .as_ref() + .map(|sigs| sigs.len()) + .unwrap_or_default() + ) + } + Event::Stored { + maybe_meta_block: None, + maybe_block_signatures, + } => { + write!( + f, + "stored {} finality signatures", + maybe_block_signatures + .as_ref() + .map(|sigs| sigs.len()) + .unwrap_or_default() + ) + } + } + } +} diff --git a/node/src/components/block_accumulator/leap_instruction.rs b/node/src/components/block_accumulator/leap_instruction.rs new file mode 100644 index 0000000000..7dcf61a8f1 --- /dev/null +++ b/node/src/components/block_accumulator/leap_instruction.rs @@ -0,0 +1,90 @@ +use std::fmt::{Display, Formatter}; + +#[derive(Debug, PartialEq)] +pub(super) enum LeapInstruction { + // should not leap + AtHighestKnownBlock, + WithinAttemptExecutionThreshold(u64), + TooCloseToUpgradeBoundary(u64), + NoUsableBlockAcceptors, + + // should leap + UnsetLocalTip, + UnknownBlockHeight, + OutsideAttemptExecutionThreshold(u64), +} + +impl LeapInstruction { + pub(super) fn from_execution_threshold( + attempt_execution_threshold: u64, + distance_from_highest_known_block: u64, + is_upgrade_boundary: bool, + ) -> Self { + if distance_from_highest_known_block == 0 { + return LeapInstruction::AtHighestKnownBlock; + } + // allow double the execution threshold back off as a safety margin + if is_upgrade_boundary + && distance_from_highest_known_block <= attempt_execution_threshold * 2 + { + return LeapInstruction::TooCloseToUpgradeBoundary(distance_from_highest_known_block); + } + if distance_from_highest_known_block > attempt_execution_threshold { + return LeapInstruction::OutsideAttemptExecutionThreshold( + distance_from_highest_known_block, + ); + } + LeapInstruction::WithinAttemptExecutionThreshold(distance_from_highest_known_block) + } + + pub(super) fn should_leap(&self) -> bool { + match self { + LeapInstruction::AtHighestKnownBlock + | LeapInstruction::WithinAttemptExecutionThreshold(_) + | LeapInstruction::TooCloseToUpgradeBoundary(_) + | LeapInstruction::NoUsableBlockAcceptors => false, + LeapInstruction::UnsetLocalTip + | LeapInstruction::UnknownBlockHeight + | LeapInstruction::OutsideAttemptExecutionThreshold(_) => true, + } + } +} + +impl Display for LeapInstruction { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + LeapInstruction::AtHighestKnownBlock => { + write!(f, "at highest known block") + } + LeapInstruction::TooCloseToUpgradeBoundary(diff) => { + write!(f, "{} blocks away from protocol upgrade", diff) + } + LeapInstruction::WithinAttemptExecutionThreshold(diff) => { + write!( + f, + "within attempt_execution_threshold, {} blocks behind highest known block", + diff + ) + } + LeapInstruction::OutsideAttemptExecutionThreshold(diff) => { + write!( + f, + "outside attempt_execution_threshold, {} blocks behind highest known block", + diff + ) + } + LeapInstruction::UnsetLocalTip => { + write!(f, "block accumulator local tip is unset") + } + LeapInstruction::UnknownBlockHeight => { + write!(f, "unknown block height") + } + LeapInstruction::NoUsableBlockAcceptors => { + write!( + f, + "currently have no block acceptor instances with sufficient finality" + ) + } + } + } +} diff --git a/node/src/components/block_accumulator/local_tip_identifier.rs b/node/src/components/block_accumulator/local_tip_identifier.rs new file mode 100644 index 0000000000..4535d4fdbe --- /dev/null +++ b/node/src/components/block_accumulator/local_tip_identifier.rs @@ -0,0 +1,28 @@ +use std::cmp::Ordering; + +use casper_types::EraId; +use datasize::DataSize; + +#[derive(Clone, Copy, DataSize, Debug, Eq, PartialEq)] +pub(super) struct LocalTipIdentifier { + pub(super) height: u64, + pub(super) era_id: EraId, +} + +impl LocalTipIdentifier { + pub(super) fn new(height: u64, era_id: EraId) -> Self { + Self { height, era_id } + } +} + +impl PartialOrd for LocalTipIdentifier { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for LocalTipIdentifier { + fn cmp(&self, other: &Self) -> Ordering { + self.height.cmp(&other.height) + } +} diff --git a/node/src/components/block_accumulator/metrics.rs b/node/src/components/block_accumulator/metrics.rs new file mode 100644 index 0000000000..5e44639b02 --- /dev/null +++ b/node/src/components/block_accumulator/metrics.rs @@ -0,0 +1,44 @@ +use prometheus::{IntGauge, Registry}; + +use crate::unregister_metric; + +/// Metrics for the block accumulator component. +#[derive(Debug)] +pub(super) struct Metrics { + /// Total number of BlockAcceptors contained in the BlockAccumulator. + pub(super) block_acceptors: IntGauge, + /// Number of child block hashes that we know of and that will be used in order to request next + /// blocks. + pub(super) known_child_blocks: IntGauge, + registry: Registry, +} + +impl Metrics { + /// Creates a new instance of the block accumulator metrics, using the given prefix. + pub fn new(registry: &Registry) -> Result { + let block_acceptors = IntGauge::new( + "block_accumulator_block_acceptors".to_string(), + "number of block acceptors in the Block Accumulator".to_string(), + )?; + let known_child_blocks = IntGauge::new( + "block_accumulator_known_child_blocks".to_string(), + "number of blocks received by the Block Accumulator for which we know the hash of the child block".to_string(), + )?; + + registry.register(Box::new(block_acceptors.clone()))?; + registry.register(Box::new(known_child_blocks.clone()))?; + + Ok(Metrics { + block_acceptors, + known_child_blocks, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.block_acceptors); + unregister_metric!(self.registry, self.known_child_blocks); + } +} diff --git a/node/src/components/block_accumulator/sync_identifier.rs b/node/src/components/block_accumulator/sync_identifier.rs new file mode 100644 index 0000000000..c2b1868a3f --- /dev/null +++ b/node/src/components/block_accumulator/sync_identifier.rs @@ -0,0 +1,103 @@ +use std::fmt::{Display, Formatter}; + +use casper_types::{BlockHash, EraId}; + +#[derive(Clone, Debug)] +pub(crate) enum SyncIdentifier { + // all we know about the block is its hash; + // this is usually a trusted hash from config + BlockHash(BlockHash), + // we know both the hash and the height of the block + BlockIdentifier(BlockHash, u64), + // we have just acquired the necessary data for the block + // including sufficient finality; this may be a historical + // block and / or potentially the new highest block + SyncedBlockIdentifier(BlockHash, u64, EraId), + // we acquired the necessary data for the block, including + // sufficient finality and it has been enqueued for + // execution; this state is valid for forward blocks only + ExecutingBlockIdentifier(BlockHash, u64, EraId), + // we read this block from disk, and have all the parts + // we need to discover its descendent (if any) to continue. + LocalTip(BlockHash, u64, EraId), +} + +impl SyncIdentifier { + pub(crate) fn block_hash(&self) -> BlockHash { + match self { + SyncIdentifier::BlockIdentifier(hash, _) + | SyncIdentifier::SyncedBlockIdentifier(hash, _, _) + | SyncIdentifier::ExecutingBlockIdentifier(hash, _, _) + | SyncIdentifier::LocalTip(hash, _, _) + | SyncIdentifier::BlockHash(hash) => *hash, + } + } + + pub(crate) fn block_height(&self) -> Option { + match self { + SyncIdentifier::BlockIdentifier(_, height) + | SyncIdentifier::SyncedBlockIdentifier(_, height, _) + | SyncIdentifier::ExecutingBlockIdentifier(_, height, _) + | SyncIdentifier::LocalTip(_, height, _) => Some(*height), + SyncIdentifier::BlockHash(_) => None, + } + } + + pub(crate) fn era_id(&self) -> Option { + match self { + SyncIdentifier::BlockHash(_) | SyncIdentifier::BlockIdentifier(_, _) => None, + SyncIdentifier::SyncedBlockIdentifier(_, _, era_id) + | SyncIdentifier::ExecutingBlockIdentifier(_, _, era_id) + | SyncIdentifier::LocalTip(_, _, era_id) => Some(*era_id), + } + } + + pub(crate) fn block_height_and_era(&self) -> Option<(u64, EraId)> { + if let (Some(block_height), Some(era_id)) = (self.block_height(), self.era_id()) { + return Some((block_height, era_id)); + } + None + } + + pub(crate) fn is_held_locally(&self) -> bool { + match self { + SyncIdentifier::BlockHash(_) | SyncIdentifier::BlockIdentifier(_, _) => false, + + SyncIdentifier::SyncedBlockIdentifier(_, _, _) + | SyncIdentifier::ExecutingBlockIdentifier(_, _, _) + | SyncIdentifier::LocalTip(_, _, _) => true, + } + } + + pub(crate) fn block_hash_to_sync(&self, child_hash: Option) -> Option { + if self.is_held_locally() { + child_hash + } else { + Some(self.block_hash()) + } + } +} + +impl Display for SyncIdentifier { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + SyncIdentifier::BlockHash(block_hash) => block_hash.fmt(f), + SyncIdentifier::BlockIdentifier(block_hash, block_height) => { + write!( + f, + "block_hash: {} block_height: {}", + block_hash, block_height + ) + } + SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id) + | SyncIdentifier::ExecutingBlockIdentifier(block_hash, block_height, era_id) + | SyncIdentifier::LocalTip(block_hash, block_height, era_id) => { + write!( + f, + "block_hash: {} block_height: {} era_id: {}", + block_hash, block_height, era_id + ) + } + } + } +} diff --git a/node/src/components/block_accumulator/sync_instruction.rs b/node/src/components/block_accumulator/sync_instruction.rs new file mode 100644 index 0000000000..93556ef6ce --- /dev/null +++ b/node/src/components/block_accumulator/sync_instruction.rs @@ -0,0 +1,20 @@ +use casper_types::BlockHash; + +#[derive(Debug)] +pub(crate) enum SyncInstruction { + Leap { block_hash: BlockHash }, + BlockSync { block_hash: BlockHash }, + CaughtUp { block_hash: BlockHash }, + LeapIntervalElapsed { block_hash: BlockHash }, +} + +impl SyncInstruction { + pub(crate) fn block_hash(&self) -> BlockHash { + match self { + SyncInstruction::Leap { block_hash } + | SyncInstruction::BlockSync { block_hash } + | SyncInstruction::CaughtUp { block_hash } + | SyncInstruction::LeapIntervalElapsed { block_hash } => *block_hash, + } + } +} diff --git a/node/src/components/block_accumulator/tests.rs b/node/src/components/block_accumulator/tests.rs new file mode 100644 index 0000000000..21fd8d98be --- /dev/null +++ b/node/src/components/block_accumulator/tests.rs @@ -0,0 +1,2177 @@ +use std::{ + collections::BTreeSet, + fmt::{self, Debug, Display, Formatter}, + sync::Arc, + time::Duration, +}; + +use derive_more::From; +use num_rational::Ratio; +use prometheus::Registry; +use rand::Rng; +use serde::Serialize; +use tempfile::TempDir; +use thiserror::Error as ThisError; +use tokio::time; + +use casper_types::{ + generate_ed25519_keypair, testing::TestRng, ActivationPoint, BlockV2, ChainNameDigest, + Chainspec, ChainspecRawBytes, FinalitySignature, FinalitySignatureV2, ProtocolVersion, + PublicKey, SecretKey, Signature, TestBlockBuilder, TransactionConfig, U512, +}; +use reactor::ReactorEvent; + +use crate::{ + components::{ + consensus::tests::utils::{ + ALICE_NODE_ID, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_NODE_ID, BOB_PUBLIC_KEY, + BOB_SECRET_KEY, CAROL_PUBLIC_KEY, CAROL_SECRET_KEY, + }, + network::Identity as NetworkIdentity, + storage::{self, Storage}, + }, + effect::{ + announcements::ControlAnnouncement, + requests::{ContractRuntimeRequest, MarkBlockCompletedRequest, NetworkRequest}, + }, + protocol::Message, + reactor::{self, EventQueueHandle, QueueKind, Reactor, Runner, TryCrankOutcome}, + types::EraValidatorWeights, + utils::{Loadable, WithDir}, + NodeRng, +}; + +use super::*; + +const POLL_INTERVAL: Duration = Duration::from_millis(10); +const RECENT_ERA_INTERVAL: u64 = 1; +const VALIDATOR_SLOTS: u32 = 100; + +fn meta_block_with_default_state(block: Arc) -> ForwardMetaBlock { + MetaBlock::new_forward(block, vec![], MetaBlockState::new()) + .try_into() + .unwrap() +} + +fn signatures_for_block( + block: &BlockV2, + signatures: &[FinalitySignatureV2], + chain_name_hash: ChainNameDigest, +) -> BlockSignaturesV2 { + let mut block_signatures = BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + ); + for signature in signatures { + block_signatures.insert_signature(signature.public_key().clone(), *signature.signature()); + } + block_signatures +} + +/// Top-level event for the reactor. +#[derive(Debug, From, Serialize)] +#[allow(clippy::large_enum_variant)] +#[must_use] +enum Event { + #[from] + Storage(#[serde(skip_serializing)] storage::Event), + #[from] + BlockAccumulator(#[serde(skip_serializing)] super::Event), + #[from] + ControlAnnouncement(ControlAnnouncement), + #[from] + FatalAnnouncement(FatalAnnouncement), + #[from] + BlockAccumulatorAnnouncement(#[serde(skip_serializing)] BlockAccumulatorAnnouncement), + #[from] + MetaBlockAnnouncement(#[serde(skip_serializing)] MetaBlockAnnouncement), + #[from] + ContractRuntime(#[serde(skip_serializing)] ContractRuntimeRequest), + #[from] + StorageRequest(StorageRequest), + #[from] + NetworkRequest(NetworkRequest), + #[from] + NetworkPeerBehaviorAnnouncement(PeerBehaviorAnnouncement), +} + +impl From for Event { + fn from(request: MarkBlockCompletedRequest) -> Self { + Event::Storage(storage::Event::MarkBlockCompletedRequest(request)) + } +} + +impl ReactorEvent for Event { + fn is_control(&self) -> bool { + matches!(self, Event::ControlAnnouncement(_)) + } + + fn try_into_control(self) -> Option { + if let Self::ControlAnnouncement(ctrl_ann) = self { + Some(ctrl_ann) + } else { + None + } + } +} + +impl Display for Event { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Storage(event) => write!(formatter, "storage: {}", event), + Event::BlockAccumulator(event) => write!(formatter, "block accumulator: {}", event), + Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann), + Event::FatalAnnouncement(fatal_ann) => write!(formatter, "fatal: {}", fatal_ann), + Event::BlockAccumulatorAnnouncement(ann) => { + write!(formatter, "block-accumulator announcement: {}", ann) + } + Event::MetaBlockAnnouncement(meta_block_ann) => { + write!(formatter, "meta block announcement: {}", meta_block_ann) + } + Event::ContractRuntime(event) => { + write!(formatter, "contract-runtime event: {:?}", event) + } + Event::StorageRequest(request) => write!(formatter, "storage request: {:?}", request), + Event::NetworkRequest(request) => write!(formatter, "network request: {:?}", request), + Event::NetworkPeerBehaviorAnnouncement(peer_behavior) => { + write!(formatter, "peer behavior announcement: {:?}", peer_behavior) + } + } + } +} + +/// Error type returned by the test reactor. +#[derive(Debug, ThisError)] +enum ReactorError { + #[error("prometheus (metrics) error: {0}")] + Metrics(#[from] prometheus::Error), +} + +struct MockReactor { + storage: Storage, + block_accumulator: BlockAccumulator, + blocked_peers: Vec, + validator_matrix: ValidatorMatrix, + _storage_tempdir: TempDir, +} + +impl Reactor for MockReactor { + type Event = Event; + type Config = (); + type Error = ReactorError; + + fn new( + _config: Self::Config, + chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, + registry: &Registry, + _event_queue: EventQueueHandle, + _rng: &mut NodeRng, + ) -> Result<(Self, Effects), Self::Error> { + let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1); + let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config); + let validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + let block_accumulator_config = Config::default(); + let block_time = block_accumulator_config.purge_interval / 2; + + let block_accumulator = BlockAccumulator::new( + block_accumulator_config, + validator_matrix.clone(), + RECENT_ERA_INTERVAL, + block_time, + VALIDATOR_SLOTS, + registry, + ) + .unwrap(); + + let storage = Storage::new( + &storage_withdir, + None, + ProtocolVersion::from_parts(1, 0, 0), + EraId::default(), + "test", + chainspec.transaction_config.max_ttl.into(), + chainspec.core_config.recent_era_count(), + Some(registry), + false, + TransactionConfig::default(), + ) + .unwrap(); + + let reactor = MockReactor { + storage, + block_accumulator, + blocked_peers: vec![], + validator_matrix, + _storage_tempdir: storage_tempdir, + }; + + let effects = Effects::new(); + + Ok((reactor, effects)) + } + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Event, + ) -> Effects { + match event { + Event::Storage(event) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, event), + ), + Event::StorageRequest(req) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, req.into()), + ), + Event::BlockAccumulator(event) => reactor::wrap_effects( + Event::BlockAccumulator, + self.block_accumulator + .handle_event(effect_builder, rng, event), + ), + Event::MetaBlockAnnouncement(MetaBlockAnnouncement(mut meta_block)) => { + let effects = Effects::new(); + let state = meta_block.mut_state(); + assert!(state.is_stored()); + state.register_as_sent_to_transaction_buffer(); + if !state.is_executed() { + return effects; + } + + state.register_we_have_tried_to_sign(); + state.register_as_consensus_notified(); + + if state.register_as_accumulator_notified().was_updated() { + return reactor::wrap_effects( + Event::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + super::Event::ExecutedBlock { + meta_block: meta_block.try_into().unwrap(), + }, + ), + ); + } + + assert!(state.is_marked_complete()); + state.register_as_gossiped(); + assert!(state.verify_complete()); + effects + } + Event::ControlAnnouncement(ctrl_ann) => { + panic!("unhandled control announcement: {}", ctrl_ann) + } + Event::FatalAnnouncement(fatal_ann) => { + panic!("unhandled fatal announcement: {}", fatal_ann) + } + Event::BlockAccumulatorAnnouncement(_) => { + // We do not care about block accumulator announcements in these tests. + Effects::new() + } + Event::ContractRuntime(_event) => { + panic!("test does not handle contract runtime events") + } + Event::NetworkRequest(_) => panic!("test does not handle network requests"), + Event::NetworkPeerBehaviorAnnouncement(peer_behavior) => { + self.blocked_peers.push(peer_behavior); + Effects::new() + } + } + } +} + +#[test] +fn upsert_acceptor() { + let mut rng = TestRng::new(); + let config = Config::default(); + let era0 = EraId::from(0); + let validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + let recent_era_interval = 1; + let block_time = config.purge_interval / 2; + let metrics_registry = Registry::new(); + let mut accumulator = BlockAccumulator::new( + config, + validator_matrix, + recent_era_interval, + block_time, + VALIDATOR_SLOTS, + &metrics_registry, + ) + .unwrap(); + + let random_block_hash = BlockHash::random(&mut rng); + accumulator.upsert_acceptor(random_block_hash, Some(era0), Some(*ALICE_NODE_ID)); + assert!(accumulator + .block_acceptors + .remove(&random_block_hash) + .is_some()); + assert!(accumulator + .peer_block_timestamps + .remove(&ALICE_NODE_ID) + .is_some()); + + accumulator.register_local_tip(0, EraId::new(0)); + + let max_block_count = + PEER_RATE_LIMIT_MULTIPLIER * ((config.purge_interval / block_time) as usize); + + for _ in 0..max_block_count { + accumulator.upsert_acceptor( + BlockHash::random(&mut rng), + Some(era0), + Some(*ALICE_NODE_ID), + ); + } + + assert_eq!(accumulator.block_acceptors.len(), max_block_count); + + let block_hash = BlockHash::random(&mut rng); + + // Alice has sent us too many blocks; we don't register this one. + accumulator.upsert_acceptor(block_hash, Some(era0), Some(*ALICE_NODE_ID)); + assert_eq!(accumulator.block_acceptors.len(), max_block_count); + assert!(!accumulator.block_acceptors.contains_key(&block_hash)); + + // Bob hasn't sent us anything yet. But we don't insert without an era ID. + accumulator.upsert_acceptor(block_hash, None, Some(*BOB_NODE_ID)); + assert_eq!(accumulator.block_acceptors.len(), max_block_count); + assert!(!accumulator.block_acceptors.contains_key(&block_hash)); + + // With an era ID he's allowed to tell us about this one. + accumulator.upsert_acceptor(block_hash, Some(era0), Some(*BOB_NODE_ID)); + assert_eq!(accumulator.block_acceptors.len(), max_block_count + 1); + assert!(accumulator.block_acceptors.contains_key(&block_hash)); + + // And if Alice tells us about it _now_, we'll register her as a peer. + accumulator.upsert_acceptor(block_hash, None, Some(*ALICE_NODE_ID)); + assert!(accumulator.block_acceptors[&block_hash] + .peers() + .contains(&ALICE_NODE_ID)); + + // Modify the timestamp of the acceptor we just added to be too old. + let purge_interval = config.purge_interval * 2; + let purged_hash = { + let (hash, timestamp) = accumulator + .peer_block_timestamps + .get_mut(&ALICE_NODE_ID) + .unwrap() + .front_mut() + .unwrap(); + *timestamp = Timestamp::now().saturating_sub(purge_interval); + *hash + }; + // This should lead to a purge of said acceptor, therefore enabling us to + // add another one for Alice. + assert_eq!(accumulator.block_acceptors.len(), max_block_count + 1); + accumulator.upsert_acceptor( + BlockHash::random(&mut rng), + Some(era0), + Some(*ALICE_NODE_ID), + ); + // Acceptor was added. + assert_eq!(accumulator.block_acceptors.len(), max_block_count + 2); + // The timestamp was purged. + assert_ne!( + accumulator + .peer_block_timestamps + .get(&ALICE_NODE_ID) + .unwrap() + .front() + .unwrap() + .0, + purged_hash + ); +} + +#[test] +fn acceptor_get_peers() { + let mut rng = TestRng::new(); + let block = TestBlockBuilder::new().build(&mut rng); + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + assert!(acceptor.peers().is_empty()); + let first_peer = NodeId::random(&mut rng); + let second_peer = NodeId::random(&mut rng); + acceptor.register_peer(first_peer); + assert_eq!(acceptor.peers(), &BTreeSet::from([first_peer])); + acceptor.register_peer(second_peer); + assert_eq!(acceptor.peers(), &BTreeSet::from([first_peer, second_peer])); +} + +#[test] +fn acceptor_register_finality_signature() { + let rng = &mut TestRng::new(); + // Create a block and an acceptor for it. + let block = Arc::new(TestBlockBuilder::new().build(rng)); + let chain_name_hash = ChainNameDigest::random(rng); + let mut meta_block: ForwardMetaBlock = + MetaBlock::new_forward(block.clone(), vec![], MetaBlockState::new()) + .try_into() + .unwrap(); + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + + // Create a finality signature with the wrong block hash. + let wrong_fin_sig = FinalitySignatureV2::random_for_block( + BlockHash::random(rng), + rng.gen(), + EraId::new(0), + ChainNameDigest::random(rng), + rng, + ); + assert!(matches!( + acceptor + .register_finality_signature(wrong_fin_sig, None, VALIDATOR_SLOTS) + .unwrap_err(), + Error::BlockHashMismatch { + expected: _, + actual: _ + } + )); + + // Create an invalid finality signature. + let invalid_fin_sig = FinalitySignatureV2::new( + *block.hash(), + block.height(), + EraId::random(rng), + chain_name_hash, + Signature::System, + PublicKey::random(rng), + ); + // We shouldn't be able to create invalid signatures ourselves, so we've + // reached an invalid state. + assert!(matches!( + acceptor + .register_finality_signature(invalid_fin_sig.clone(), None, VALIDATOR_SLOTS) + .unwrap_err(), + Error::InvalidConfiguration + )); + // Peers shouldn't send us invalid signatures. + let first_peer = NodeId::random(rng); + assert!(matches!( + acceptor + .register_finality_signature(invalid_fin_sig, Some(first_peer), VALIDATOR_SLOTS) + .unwrap_err(), + Error::InvalidGossip(_) + )); + // Create a valid finality signature and register it. + let fin_sig = FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + rng, + ); + assert!(acceptor + .register_finality_signature(fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS) + .unwrap() + .is_none()); + // Register it from the second peer as well. + let second_peer = NodeId::random(rng); + assert!(acceptor + .register_finality_signature(fin_sig.clone(), Some(second_peer), VALIDATOR_SLOTS) + .unwrap() + .is_none()); + // Make sure the peer list is updated accordingly. + let (sig, senders) = acceptor.signatures().get(fin_sig.public_key()).unwrap(); + assert_eq!(*sig, fin_sig); + assert_eq!(*senders, BTreeSet::from([first_peer, second_peer])); + // Create a second finality signature and register it. + let second_fin_sig = FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + rng, + ); + assert!(acceptor + .register_finality_signature(second_fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS) + .unwrap() + .is_none()); + // Make sure the peer list for the first signature is unchanged. + let (first_sig, first_sig_senders) = acceptor.signatures().get(fin_sig.public_key()).unwrap(); + assert_eq!(*first_sig, fin_sig); + assert_eq!( + *first_sig_senders, + BTreeSet::from([first_peer, second_peer]) + ); + // Make sure the peer list for the second signature is correct. + let (sig, senders) = acceptor + .signatures() + .get(second_fin_sig.public_key()) + .unwrap(); + assert_eq!(*sig, second_fin_sig); + assert_eq!(*senders, BTreeSet::from([first_peer])); + assert!(!acceptor.has_sufficient_finality()); + // Register the block with the sufficient finality flag set. + meta_block.state.register_has_sufficient_finality(); + acceptor + .register_block(meta_block.clone(), Some(first_peer)) + .unwrap(); + // Registering invalid signatures should still yield an error. + let wrong_era = EraId::from(u64::MAX ^ u64::from(block.era_id())); + let invalid_fin_sig = FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + wrong_era, + chain_name_hash, + rng, + ); + assert!(matches!( + acceptor + .register_finality_signature(invalid_fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS) + .unwrap_err(), + Error::EraMismatch { + block_hash: _, + expected: _, + actual: _, + peer: _ + } + )); + // Registering an invalid signature that we created means we're in an + // invalid state. + assert!(matches!( + acceptor + .register_finality_signature(invalid_fin_sig, None, VALIDATOR_SLOTS) + .unwrap_err(), + Error::InvalidConfiguration + )); + // Registering valid signatures still works, but we already had the second + // signature. + assert!(acceptor + .register_finality_signature(second_fin_sig.clone(), Some(second_peer), VALIDATOR_SLOTS) + .unwrap() + .is_none()); + assert!(acceptor + .signatures() + .get(second_fin_sig.public_key()) + .unwrap() + .1 + .contains(&second_peer)); + // Register a new valid signature which should be yielded by the function. + let third_fin_sig = FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + rng, + ); + assert_eq!( + acceptor + .register_finality_signature(third_fin_sig.clone(), Some(first_peer), VALIDATOR_SLOTS) + .unwrap() + .unwrap(), + third_fin_sig + ); + // Additional registrations of the third signature with and without a peer + // should still work. + assert!(acceptor + .register_finality_signature(third_fin_sig.clone(), Some(second_peer), VALIDATOR_SLOTS) + .unwrap() + .is_none()); + assert!(acceptor + .register_finality_signature(third_fin_sig, None, VALIDATOR_SLOTS) + .unwrap() + .is_none()); +} + +#[test] +fn acceptor_register_block() { + let mut rng = TestRng::new(); + // Create a block and an acceptor for it. + let block = Arc::new(TestBlockBuilder::new().build(&mut rng)); + let mut meta_block = meta_block_with_default_state(block.clone()); + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + + // Create a finality signature with the wrong block hash. + let wrong_block = + meta_block_with_default_state(Arc::new(TestBlockBuilder::new().build(&mut rng))); + assert!(matches!( + acceptor.register_block(wrong_block, None).unwrap_err(), + Error::BlockHashMismatch { + expected: _, + actual: _ + } + )); + + { + // Invalid block case. + let invalid_block: Arc = Arc::new(TestBlockBuilder::new().build_invalid(&mut rng)); + + let mut invalid_block_acceptor = BlockAcceptor::new(*invalid_block.hash(), vec![]); + let invalid_meta_block = meta_block_with_default_state(invalid_block); + let malicious_peer = NodeId::random(&mut rng); + // Peers shouldn't send us invalid blocks. + assert!(matches!( + invalid_block_acceptor + .register_block(invalid_meta_block.clone(), Some(malicious_peer)) + .unwrap_err(), + Error::InvalidGossip(_) + )); + // We shouldn't be able to create invalid blocks ourselves, so we've + // reached an invalid state. + assert!(matches!( + invalid_block_acceptor + .register_block(invalid_meta_block, None) + .unwrap_err(), + Error::InvalidConfiguration + )); + } + + // At this point, we know only the hash of the block. + assert!(acceptor.block_height().is_none()); + assert!(acceptor.peers().is_empty()); + + // Register the block with ourselves as source. + acceptor.register_block(meta_block.clone(), None).unwrap(); + assert_eq!(acceptor.block_height().unwrap(), block.height()); + assert!(acceptor.peers().is_empty()); + + // Register the block from a peer. + let first_peer = NodeId::random(&mut rng); + acceptor + .register_block(meta_block.clone(), Some(first_peer)) + .unwrap(); + // Peer list should be updated. + assert_eq!(*acceptor.peers(), BTreeSet::from([first_peer])); + + // The `executed` flag should not be set yet. + assert!(!acceptor.executed()); + // Register the block from a second peer with the executed flag set. + let second_peer = NodeId::random(&mut rng); + assert!(meta_block.state.register_as_executed().was_updated()); + acceptor + .register_block(meta_block.clone(), Some(second_peer)) + .unwrap(); + // Peer list should contain both peers. + assert_eq!(*acceptor.peers(), BTreeSet::from([first_peer, second_peer])); + // `executed` flag should now be set. + assert!(acceptor.executed()); + + // Re-registering with the `executed` flag set should not change anything. + acceptor.register_block(meta_block, None).unwrap(); + assert_eq!(*acceptor.peers(), BTreeSet::from([first_peer, second_peer])); + assert!(acceptor.executed()); +} + +#[test] +fn acceptor_should_store_block() { + let mut rng = TestRng::new(); + // Create a block and an acceptor for it. + let chain_name_hash = ChainNameDigest::random(&mut rng); + let block = Arc::new(TestBlockBuilder::new().build(&mut rng)); + let mut meta_block = meta_block_with_default_state(block.clone()); + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + + // Create 4 pairs of keys so we can later create 4 signatures. + let keys: Vec<(SecretKey, PublicKey)> = (0..4).map(|_| generate_ed25519_keypair()).collect(); + // Register the keys into the era validator weights, front loaded on the + // first 2 with 80% weight. + let era_validator_weights = EraValidatorWeights::new( + block.era_id(), + BTreeMap::from([ + (keys[0].1.clone(), U512::from(40)), + (keys[1].1.clone(), U512::from(40)), + (keys[2].1.clone(), U512::from(10)), + (keys[3].1.clone(), U512::from(10)), + ]), + Ratio::new(1, 3), + ); + + // We should have nothing at this point. + assert!( + !acceptor.has_sufficient_finality() + && acceptor.block_height().is_none() + && acceptor.signatures().is_empty() + ); + + // With the sufficient finality flag set, nothing else should matter and we + // should not store anything. + acceptor.set_sufficient_finality(true); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + // Reset the flag. + acceptor.set_sufficient_finality(false); + + let (should_store, offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + assert!(offenders.is_empty()); + + let mut signatures = vec![]; + + // Create the first validator's signature. + let fin_sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &keys[0].0, + ); + signatures.push(fin_sig.clone()); + // First signature with 40% weight brings the block to weak finality. + acceptor + .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS) + .unwrap(); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + + // Registering the block now. + acceptor.register_block(meta_block.clone(), None).unwrap(); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + + // Create the third validator's signature. + let fin_sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &keys[2].0, + ); + // The third signature with weight 10% doesn't make the block go to + // strict finality. + signatures.push(fin_sig.clone()); + acceptor + .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS) + .unwrap(); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + + // Create a bogus signature from a non-validator for this era. + let non_validator_keys = generate_ed25519_keypair(); + let faulty_peer = NodeId::random(&mut rng); + let bogus_sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &non_validator_keys.0, + ); + acceptor + .register_finality_signature(bogus_sig, Some(faulty_peer), VALIDATOR_SLOTS) + .unwrap(); + let (should_store, offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + // Make sure the peer who sent us this bogus signature is marked as an + // offender. + assert_eq!(offenders[0].0, faulty_peer); + + // Create the second validator's signature. + let fin_sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &keys[1].0, + ); + signatures.push(fin_sig.clone()); + // Second signature with 40% weight brings the block to strict finality. + acceptor + .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS) + .unwrap(); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + let block_signatures = signatures_for_block(&block, &signatures, chain_name_hash); + let mut meta_block_with_expected_state = meta_block.clone(); + meta_block_with_expected_state.state.register_as_stored(); + meta_block_with_expected_state + .state + .register_has_sufficient_finality(); + assert_eq!( + should_store, + ShouldStore::SufficientlySignedBlock { + meta_block: meta_block_with_expected_state, + block_signatures, + } + ); + + // Create the fourth validator's signature. + let fin_sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &keys[3].0, + ); + // Already have sufficient finality signatures, so we're not supposed to + // store anything else. + acceptor + .register_finality_signature(fin_sig, None, VALIDATOR_SLOTS) + .unwrap(); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + + // Without the block, even with sufficient signatures we should not store anything. + acceptor.set_meta_block(None); + acceptor.set_sufficient_finality(false); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); + + // Without any signatures, we should not store anything. + meta_block.state.register_has_sufficient_finality(); + acceptor.set_meta_block(Some(meta_block)); + acceptor.signatures_mut().retain(|_, _| false); + let (should_store, _offenders) = + acceptor.should_store_block(&era_validator_weights, chain_name_hash); + assert_eq!(should_store, ShouldStore::Nothing); +} + +#[test] +fn acceptor_should_correctly_bound_the_signatures() { + let mut rng = TestRng::new(); + let validator_slots = 2; + + // Create a block and an acceptor for it. + let block = Arc::new(TestBlockBuilder::new().build(&mut rng)); + let chain_name_hash = ChainNameDigest::random(&mut rng); + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + let first_peer = NodeId::random(&mut rng); + + // Fill the signatures map: + for fin_sig in (0..validator_slots * 2).map(|_| { + FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &mut rng, + ) + }) { + assert!(acceptor + .register_finality_signature(fin_sig, Some(first_peer), validator_slots) + .unwrap() + .is_none()); + } + + let fin_sig = FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &mut rng, + ); + assert!(matches!( + acceptor.register_finality_signature(fin_sig, Some(first_peer), validator_slots), + Err(Error::TooManySignatures { .. }), + )); +} + +#[test] +fn acceptor_signatures_bound_should_not_be_triggered_if_peers_are_different() { + let mut rng = TestRng::new(); + let validator_slots = 3; + + // Create a block and an acceptor for it. + let block = Arc::new(TestBlockBuilder::new().build(&mut rng)); + let chain_name_hash = ChainNameDigest::random(&mut rng); + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + let first_peer = NodeId::random(&mut rng); + let second_peer = NodeId::random(&mut rng); + + // Fill the signatures map: + for fin_sig in (0..validator_slots).map(|_| { + FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &mut rng, + ) + }) { + assert!(acceptor + .register_finality_signature(fin_sig, Some(first_peer), validator_slots) + .unwrap() + .is_none()); + } + + // This should pass, because it is another peer: + let fin_sig = FinalitySignatureV2::random_for_block( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &mut rng, + ); + assert!(acceptor + .register_finality_signature(fin_sig, Some(second_peer), validator_slots) + .unwrap() + .is_none()); +} + +#[test] +fn accumulator_should_leap() { + let mut rng = TestRng::new(); + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + let block_accumulator_config = Config::default(); + let recent_era_interval = 1; + let block_time = block_accumulator_config.purge_interval / 2; + let attempt_execution_threshold = block_accumulator_config.attempt_execution_threshold; + let mut block_accumulator = BlockAccumulator::new( + block_accumulator_config, + validator_matrix.clone(), + recent_era_interval, + block_time, + VALIDATOR_SLOTS, + &Registry::default(), + ) + .unwrap(); + + let era_id = EraId::from(0); + let chain_name_hash = ChainNameDigest::random(&mut rng); + + // Register the era in the validator matrix so the block is valid. + register_evw_for_era(&mut validator_matrix, era_id); + + assert!( + block_accumulator.local_tip.is_none(), + "block_accumulator local tip should init null" + ); + + expected_leap_instruction( + LeapInstruction::UnsetLocalTip, + block_accumulator.leap_instruction(&SyncIdentifier::BlockIdentifier( + BlockHash::random(&mut rng), + 0, + )), + ); + + block_accumulator.local_tip = Some(LocalTipIdentifier::new(1, era_id)); + + let synced = SyncIdentifier::BlockHash(BlockHash::random(&mut rng)); + expected_leap_instruction( + LeapInstruction::UnknownBlockHeight, + block_accumulator.leap_instruction(&synced), + ); + + let synced = SyncIdentifier::SyncedBlockIdentifier(BlockHash::random(&mut rng), 1, era_id); + expected_leap_instruction( + LeapInstruction::NoUsableBlockAcceptors, + block_accumulator.leap_instruction(&synced), + ); + + // Create an acceptor to change the highest usable block height. + { + let block = TestBlockBuilder::new() + .era(era_id) + .height(1) + .switch_block(false) + .build(&mut rng); + + block_accumulator + .block_acceptors + .insert(*block.hash(), block_acceptor(block, chain_name_hash)); + } + + expected_leap_instruction( + LeapInstruction::AtHighestKnownBlock, + block_accumulator.leap_instruction(&synced), + ); + + let block_height = attempt_execution_threshold; + // Insert an acceptor within execution range + { + let block = TestBlockBuilder::new() + .era(era_id) + .height(block_height) + .switch_block(false) + .build(&mut rng); + + block_accumulator + .block_acceptors + .insert(*block.hash(), block_acceptor(block, chain_name_hash)); + } + + expected_leap_instruction( + LeapInstruction::WithinAttemptExecutionThreshold( + attempt_execution_threshold.saturating_sub(1), + ), + block_accumulator.leap_instruction(&synced), + ); + + let centurion = 100; + // Insert an upgrade boundary + { + let block = TestBlockBuilder::new() + .era(era_id) + .height(centurion) + .switch_block(true) + .build(&mut rng); + + block_accumulator + .block_acceptors + .insert(*block.hash(), block_acceptor(block, chain_name_hash)); + } + + expected_leap_instruction( + LeapInstruction::AtHighestKnownBlock, + block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier( + BlockHash::random(&mut rng), + centurion, + era_id, + )), + ); + expected_leap_instruction( + LeapInstruction::OutsideAttemptExecutionThreshold(attempt_execution_threshold + 1), + block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier( + BlockHash::random(&mut rng), + centurion - attempt_execution_threshold - 1, + era_id, + )), + ); + + let offset = centurion.saturating_sub(attempt_execution_threshold); + for height in offset..centurion { + expected_leap_instruction( + LeapInstruction::WithinAttemptExecutionThreshold(centurion.saturating_sub(height)), + block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier( + BlockHash::random(&mut rng), + height, + era_id, + )), + ); + } + + let upgrade_attempt_execution_threshold = attempt_execution_threshold * 2; + block_accumulator.register_activation_point(Some(ActivationPoint::EraId(era_id.successor()))); + let offset = centurion.saturating_sub(upgrade_attempt_execution_threshold); + for height in offset..centurion { + expected_leap_instruction( + LeapInstruction::TooCloseToUpgradeBoundary(centurion.saturating_sub(height)), + block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier( + BlockHash::random(&mut rng), + height, + era_id, + )), + ); + } + + expected_leap_instruction( + LeapInstruction::AtHighestKnownBlock, + block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier( + BlockHash::random(&mut rng), + centurion, + era_id, + )), + ); + expected_leap_instruction( + LeapInstruction::OutsideAttemptExecutionThreshold(upgrade_attempt_execution_threshold + 1), + block_accumulator.leap_instruction(&SyncIdentifier::SyncedBlockIdentifier( + BlockHash::random(&mut rng), + centurion - upgrade_attempt_execution_threshold - 1, + era_id, + )), + ); +} + +fn expected_leap_instruction(expected: LeapInstruction, actual: LeapInstruction) { + assert!( + expected.eq(&actual), + "{}", + format!("expected: {} actual: {}", expected, actual) + ); +} + +fn block_acceptor(block: BlockV2, chain_name_hash: ChainNameDigest) -> BlockAcceptor { + let mut acceptor = BlockAcceptor::new(*block.hash(), vec![]); + // One finality signature from our only validator for block 1. + acceptor + .register_finality_signature( + FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ), + None, + VALIDATOR_SLOTS, + ) + .unwrap(); + + let meta_block = { + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + MetaBlock::new_forward(Arc::new(block), vec![], state) + .try_into() + .unwrap() + }; + acceptor.register_block(meta_block, None).unwrap(); + + acceptor +} + +#[test] +fn accumulator_purge() { + let mut rng = TestRng::new(); + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + let block_accumulator_config = Config::default(); + let recent_era_interval = 1; + let block_time = block_accumulator_config.purge_interval / 2; + let purge_interval = block_accumulator_config.purge_interval; + let time_before_insertion = Timestamp::now(); + let mut block_accumulator = BlockAccumulator::new( + block_accumulator_config, + validator_matrix.clone(), + recent_era_interval, + block_time, + VALIDATOR_SLOTS, + &Registry::default(), + ) + .unwrap(); + block_accumulator.register_local_tip(0, 0.into()); + + // Create 3 parent-child blocks. + let block_1 = Arc::new(generate_non_genesis_block(&mut rng)); + let block_2 = Arc::new(generate_next_block(&mut rng, &block_1)); + let block_3 = Arc::new(generate_next_block(&mut rng, &block_2)); + + // Also create 2 peers. + let peer_1 = NodeId::random(&mut rng); + let peer_2 = NodeId::random(&mut rng); + + let chain_name_hash = ChainNameDigest::random(&mut rng); + + // One finality signature from our only validator for block 1. + let fin_sig_1 = FinalitySignatureV2::create( + *block_1.hash(), + block_1.height(), + block_1.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + // One finality signature from our only validator for block 2. + let fin_sig_2 = FinalitySignatureV2::create( + *block_2.hash(), + block_2.height(), + block_2.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + // One finality signature from our only validator for block 3. + let fin_sig_3 = FinalitySignatureV2::create( + *block_3.hash(), + block_3.height(), + block_3.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + // Register the eras in the validator matrix so the blocks are valid. + { + register_evw_for_era(&mut validator_matrix, block_1.era_id()); + register_evw_for_era(&mut validator_matrix, block_2.era_id()); + register_evw_for_era(&mut validator_matrix, block_3.era_id()); + } + + // We will manually call `upsert_acceptor` in order to have + // `peer_block_timestamps` populated. + { + // Insert the first block with sufficient finality from the first peer. + block_accumulator.upsert_acceptor(*block_1.hash(), Some(block_1.era_id()), Some(peer_1)); + let acceptor = block_accumulator + .block_acceptors + .get_mut(block_1.hash()) + .unwrap(); + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + let meta_block = MetaBlock::new_forward(block_1.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(fin_sig_1, Some(peer_1), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, None).unwrap(); + } + + { + // Insert the second block with sufficient finality from the second + // peer. + block_accumulator.upsert_acceptor(*block_2.hash(), Some(block_2.era_id()), Some(peer_2)); + let acceptor = block_accumulator + .block_acceptors + .get_mut(block_2.hash()) + .unwrap(); + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + let meta_block = MetaBlock::new_forward(block_2.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(fin_sig_2, Some(peer_2), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, None).unwrap(); + } + + { + // Insert the third block with sufficient finality from the third peer. + block_accumulator.upsert_acceptor(*block_3.hash(), Some(block_3.era_id()), Some(peer_1)); + block_accumulator.upsert_acceptor(*block_3.hash(), Some(block_3.era_id()), Some(peer_2)); + let acceptor = block_accumulator + .block_acceptors + .get_mut(block_3.hash()) + .unwrap(); + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + let meta_block = MetaBlock::new_forward(block_3.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(fin_sig_3, Some(peer_1), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, Some(peer_2)).unwrap(); + } + + { + // Modify the times in the acceptors for blocks 1 and 2 as well as in + // `peer_block_timestamps` for the second peer to become outdated. + let last_progress = time_before_insertion.saturating_sub(purge_interval * 10); + block_accumulator + .block_acceptors + .get_mut(block_1.hash()) + .unwrap() + .set_last_progress(last_progress); + block_accumulator + .block_acceptors + .get_mut(block_2.hash()) + .unwrap() + .set_last_progress(last_progress); + for (_block_hash, timestamp) in block_accumulator + .peer_block_timestamps + .get_mut(&peer_2) + .unwrap() + { + *timestamp = last_progress; + } + } + + // Entries we modified earlier should be purged. + block_accumulator.purge(); + // Acceptors for blocks 1 and 2 should not have been purged because they + // have strict finality. + assert!(block_accumulator + .block_acceptors + .contains_key(block_1.hash())); + assert!(block_accumulator + .block_acceptors + .contains_key(block_2.hash())); + assert!(block_accumulator + .block_acceptors + .contains_key(block_3.hash())); + // We should have kept only the timestamps for the first peer. + assert!(block_accumulator + .peer_block_timestamps + .contains_key(&peer_1)); + assert!(!block_accumulator + .peer_block_timestamps + .contains_key(&peer_2)); + + { + // Modify the `strict_finality` flag in the acceptors for blocks 1 and + // 2. + block_accumulator + .block_acceptors + .get_mut(block_1.hash()) + .unwrap() + .set_sufficient_finality(false); + block_accumulator + .block_acceptors + .get_mut(block_2.hash()) + .unwrap() + .set_sufficient_finality(false); + } + + // Entries we modified earlier should be purged. + block_accumulator.purge(); + // Acceptors for blocks 1 and 2 should have been purged. + assert!(!block_accumulator + .block_acceptors + .contains_key(block_1.hash())); + assert!(!block_accumulator + .block_acceptors + .contains_key(block_2.hash())); + assert!(block_accumulator + .block_acceptors + .contains_key(block_3.hash())); + // The third block acceptor is all that is left and it has no known + // children, so `block_children` should be empty. + assert!(block_accumulator.block_children.is_empty()); + // We should have kept only the timestamps for the first peer. + assert!(block_accumulator + .peer_block_timestamps + .contains_key(&peer_1)); + assert!(!block_accumulator + .peer_block_timestamps + .contains_key(&peer_2)); + + // Create a block just in range of block 3 to not qualify for a purge. + let in_range_block = Arc::new( + TestBlockBuilder::new() + .era(block_3.era_id()) + .height(block_3.height() - block_accumulator.attempt_execution_threshold) + .protocol_version(block_3.protocol_version()) + .switch_block(false) + .build(&mut rng), + ); + + let in_range_block_sig = FinalitySignatureV2::create( + *in_range_block.hash(), + in_range_block.height(), + in_range_block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + { + // Insert the in range block with sufficient finality. + block_accumulator.upsert_acceptor( + *in_range_block.hash(), + Some(in_range_block.era_id()), + Some(peer_1), + ); + let acceptor = block_accumulator + .block_acceptors + .get_mut(in_range_block.hash()) + .unwrap(); + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + let meta_block = MetaBlock::new_forward(in_range_block.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(in_range_block_sig, Some(peer_1), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, Some(peer_2)).unwrap(); + } + + // Create a block just out of range of block 3 to qualify for a purge. + let out_of_range_block = Arc::new( + TestBlockBuilder::new() + .era(block_3.era_id()) + .height(block_3.height() - block_accumulator.attempt_execution_threshold - 1) + .protocol_version(block_3.protocol_version()) + .switch_block(false) + .build(&mut rng), + ); + let out_of_range_block_sig = FinalitySignatureV2::create( + *out_of_range_block.hash(), + out_of_range_block.height(), + out_of_range_block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + { + // Insert the out of range block with sufficient finality. + block_accumulator.upsert_acceptor( + *out_of_range_block.hash(), + Some(out_of_range_block.era_id()), + Some(peer_1), + ); + let acceptor = block_accumulator + .block_acceptors + .get_mut(out_of_range_block.hash()) + .unwrap(); + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + let meta_block = MetaBlock::new_forward(out_of_range_block.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(out_of_range_block_sig, Some(peer_1), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, Some(peer_2)).unwrap(); + } + + // Make sure the local tip along with its recent parents never get purged. + { + assert!(block_accumulator + .block_acceptors + .contains_key(block_3.hash())); + // Make block 3 the local tip. + block_accumulator.local_tip = + Some(LocalTipIdentifier::new(block_3.height(), block_3.era_id())); + // Change the timestamps to old ones so that all blocks would normally + // get purged. + let last_progress = time_before_insertion.saturating_sub(purge_interval * 10); + for (_, acceptor) in block_accumulator.block_acceptors.iter_mut() { + acceptor.set_last_progress(last_progress); + } + for (_, timestamps) in block_accumulator.peer_block_timestamps.iter_mut() { + for (_, timestamp) in timestamps.iter_mut() { + *timestamp = last_progress; + } + } + // Do the purge. + block_accumulator.purge(); + // As block 3 is the local tip, it should not have been purged. + assert!(block_accumulator + .block_acceptors + .contains_key(block_3.hash())); + // Neither should the block in `attempt_execution_threshold` range. + assert!(block_accumulator + .block_acceptors + .contains_key(in_range_block.hash())); + // But the block out of `attempt_execution_threshold` range should + // have been purged. + assert!(!block_accumulator + .block_acceptors + .contains_key(out_of_range_block.hash())); + + // Now replace the local tip with something else (in this case we'll + // have no local tip) so that previously created blocks no longer have + // purge immunity. + block_accumulator.local_tip.take(); + // Do the purge. + block_accumulator.purge(); + // Block 3 is no longer the local tip, and given that it's old, the + // blocks should have been purged. + assert!(block_accumulator.block_acceptors.is_empty()); + } + + // Create a future block after block 3. + let future_block = Arc::new( + TestBlockBuilder::new() + .era(block_3.era_id()) + .height(block_3.height() + block_accumulator.attempt_execution_threshold) + .protocol_version(block_3.protocol_version()) + .switch_block(false) + .build(&mut rng), + ); + let future_block_sig = FinalitySignatureV2::create( + *future_block.hash(), + future_block.height(), + future_block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + { + // Insert the future block with sufficient finality. + block_accumulator.upsert_acceptor( + *future_block.hash(), + Some(future_block.era_id()), + Some(peer_1), + ); + let acceptor = block_accumulator + .block_acceptors + .get_mut(future_block.hash()) + .unwrap(); + let mut state = MetaBlockState::new(); + state.register_has_sufficient_finality(); + let meta_block = MetaBlock::new_forward(future_block.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(future_block_sig, Some(peer_1), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, Some(peer_2)).unwrap(); + } + + // Create a future block after block 3, but which will not have strict + // finality. + let future_unsigned_block = Arc::new( + TestBlockBuilder::new() + .era(block_3.era_id()) + .height(block_3.height() + block_accumulator.attempt_execution_threshold * 2) + .protocol_version(block_3.protocol_version()) + .switch_block(false) + .build(&mut rng), + ); + let future_unsigned_block_sig = FinalitySignatureV2::create( + *future_unsigned_block.hash(), + future_unsigned_block.height(), + future_unsigned_block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + { + // Insert the future unsigned block without sufficient finality. + block_accumulator.upsert_acceptor( + *future_unsigned_block.hash(), + Some(future_unsigned_block.era_id()), + Some(peer_1), + ); + let acceptor = block_accumulator + .block_acceptors + .get_mut(future_unsigned_block.hash()) + .unwrap(); + let state = MetaBlockState::new(); + let meta_block = MetaBlock::new_forward(future_unsigned_block.clone(), vec![], state) + .try_into() + .unwrap(); + acceptor + .register_finality_signature(future_unsigned_block_sig, Some(peer_1), VALIDATOR_SLOTS) + .unwrap(); + acceptor.register_block(meta_block, Some(peer_2)).unwrap(); + } + + // Make sure block with sufficient finality doesn't get purged. + { + // Make block 3 the local tip again. + block_accumulator.local_tip = + Some(LocalTipIdentifier::new(block_3.height(), block_3.era_id())); + assert!(block_accumulator + .block_acceptors + .contains_key(future_block.hash())); + assert!(block_accumulator + .block_acceptors + .contains_key(future_unsigned_block.hash())); + + // Change the timestamps to old ones so that all blocks would normally + // get purged. + let last_progress = time_before_insertion.saturating_sub(purge_interval * 10); + for (_, acceptor) in block_accumulator.block_acceptors.iter_mut() { + acceptor.set_last_progress(last_progress); + } + for (_, timestamps) in block_accumulator.peer_block_timestamps.iter_mut() { + for (_, timestamp) in timestamps.iter_mut() { + *timestamp = last_progress; + } + } + // Do the purge. + block_accumulator.purge(); + // Neither should the future block with sufficient finality. + assert!(block_accumulator + .block_acceptors + .contains_key(future_block.hash())); + // But the future block without sufficient finality should have been + // purged. + assert!(!block_accumulator + .block_acceptors + .contains_key(future_unsigned_block.hash())); + + // Now replace the local tip with something else (in this case we'll + // have no local tip) so that previously created blocks no longer have + // purge immunity. + block_accumulator.local_tip.take(); + // Do the purge. + block_accumulator.purge(); + // Block 3 is no longer the local tip, and given that it's old, the + // blocks should have been purged. + assert!(block_accumulator.block_acceptors.is_empty()); + } +} + +fn register_evw_for_era(validator_matrix: &mut ValidatorMatrix, era_id: EraId) { + let weights = EraValidatorWeights::new( + era_id, + BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]), + Ratio::new(1, 3), + ); + validator_matrix.register_era_validator_weights(weights); +} + +fn generate_next_block(rng: &mut TestRng, block: &BlockV2) -> BlockV2 { + let era_id = if block.is_switch_block() { + block.era_id().successor() + } else { + block.era_id() + }; + + TestBlockBuilder::new() + .era(era_id) + .height(block.height() + 1) + .protocol_version(block.protocol_version()) + .switch_block(false) + .build(rng) +} + +fn generate_non_genesis_block(rng: &mut TestRng) -> BlockV2 { + let era = rng.gen_range(10..20); + let height = era * 10 + rng.gen_range(0..10); + let is_switch = rng.gen_bool(0.1); + + TestBlockBuilder::new() + .era(era) + .height(height) + .switch_block(is_switch) + .build(rng) +} + +fn generate_older_block(rng: &mut TestRng, block: &BlockV2, height_difference: u64) -> BlockV2 { + TestBlockBuilder::new() + .era(block.era_id().predecessor().unwrap_or_default()) + .height(block.height() - height_difference) + .protocol_version(block.protocol_version()) + .switch_block(false) + .build(rng) +} + +#[tokio::test] +async fn block_accumulator_reactor_flow() { + let mut rng = TestRng::new(); + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let chain_name_hash = chainspec.name_hash(); + let mut runner: Runner = Runner::new( + (), + Arc::new(chainspec), + Arc::new(chainspec_raw_bytes), + &mut rng, + ) + .await + .unwrap(); + + // Create 2 blocks, one parent one child. + let block_1 = generate_non_genesis_block(&mut rng); + let block_2 = generate_next_block(&mut rng, &block_1); + + // Also create 2 peers. + let peer_1 = NodeId::random(&mut rng); + let peer_2 = NodeId::random(&mut rng); + + // One finality signature from our only validator for block 1. + let fin_sig_1 = FinalitySignatureV2::create( + *block_1.hash(), + block_1.height(), + block_1.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + // One finality signature from our only validator for block 2. + let fin_sig_2 = FinalitySignatureV2::create( + *block_2.hash(), + block_2.height(), + block_2.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + // Register the eras in the validator matrix so the blocks are valid. + { + let mut validator_matrix = runner.reactor_mut().validator_matrix.clone(); + register_evw_for_era(&mut validator_matrix, block_1.era_id()); + register_evw_for_era(&mut validator_matrix, block_2.era_id()); + } + + // Register a signature for block 1. + { + let effect_builder = runner.effect_builder(); + let reactor = runner.reactor_mut(); + + let block_accumulator = &mut reactor.block_accumulator; + block_accumulator.register_local_tip(0, 0.into()); + + let event = super::Event::ReceivedFinalitySignature { + finality_signature: Box::new(fin_sig_1.clone()), + sender: peer_1, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + } + + // Register block 1. + { + runner + .process_injected_effects(|effect_builder| { + let event = super::Event::ReceivedBlock { + block: Arc::new(block_1.clone()), + sender: peer_2, + }; + effect_builder + .into_inner() + .schedule(event, QueueKind::Validation) + .ignore() + }) + .await; + for _ in 0..6 { + while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + let expected_block = runner + .reactor() + .storage + .read_block_by_hash(*block_1.hash()) + .unwrap(); + assert_eq!(expected_block, block_1.clone().into()); + let expected_block_signatures = runner + .reactor() + .storage + .get_finality_signatures_for_block(*block_1.hash()); + assert_eq!( + expected_block_signatures + .and_then(|sigs| sigs.finality_signature(fin_sig_1.public_key())) + .unwrap(), + FinalitySignature::from(fin_sig_1) + ); + } + + // Register block 2 before the signature. + { + let effect_builder = runner.effect_builder(); + let reactor = runner.reactor_mut(); + + let block_accumulator = &mut reactor.block_accumulator; + let event = super::Event::ReceivedBlock { + block: Arc::new(block_2.clone()), + sender: peer_2, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + } + + // Register the signature for block 2. + { + runner + .process_injected_effects(|effect_builder| { + let event = super::Event::CreatedFinalitySignature { + finality_signature: Box::new(fin_sig_2.clone()), + }; + effect_builder + .into_inner() + .schedule(event, QueueKind::Validation) + .ignore() + }) + .await; + for _ in 0..6 { + while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + + let expected_block = runner + .reactor() + .storage + .read_block_by_hash(*block_2.hash()) + .unwrap(); + assert_eq!(expected_block, block_2.clone().into()); + let expected_block_signatures = runner + .reactor() + .storage + .get_finality_signatures_for_block(*block_2.hash()); + assert_eq!( + expected_block_signatures + .and_then(|sigs| sigs.finality_signature(fin_sig_2.public_key())) + .unwrap(), + FinalitySignature::from(fin_sig_2) + ); + } + + // Verify the state of the accumulator is correct. + { + let reactor = runner.reactor_mut(); + let block_accumulator = &mut reactor.block_accumulator; + // Local tip should not have changed since no blocks were executed. + assert_eq!( + block_accumulator.local_tip, + Some(LocalTipIdentifier::new(0, 0.into())) + ); + + assert!(!block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .executed()); + assert!(block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .has_sufficient_finality()); + assert_eq!( + *block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .peers(), + BTreeSet::from([peer_1, peer_2]) + ); + + assert!(!block_accumulator + .block_acceptors + .get(block_2.hash()) + .unwrap() + .executed()); + assert!(block_accumulator + .block_acceptors + .get(block_2.hash()) + .unwrap() + .has_sufficient_finality()); + assert_eq!( + *block_accumulator + .block_acceptors + .get(block_2.hash()) + .unwrap() + .peers(), + BTreeSet::from([peer_2]) + ); + + // Shouldn't have any complete blocks. + assert!(runner + .reactor() + .storage + .get_highest_complete_block() + .unwrap() + .is_none()); + } + + // Get the meta block along with the state, then register it as executed to + // later notify the accumulator of its execution. + let meta_block_1 = { + let block_accumulator = &runner.reactor().block_accumulator; + let mut meta_block = block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .meta_block() + .unwrap(); + assert!(meta_block.state.register_as_executed().was_updated()); + meta_block + }; + + // Let the accumulator know block 1 has been executed. + { + runner + .process_injected_effects(|effect_builder| { + let event = super::Event::ExecutedBlock { + meta_block: meta_block_1.clone(), + }; + effect_builder + .into_inner() + .schedule(event, QueueKind::Validation) + .ignore() + }) + .await; + for _ in 0..4 { + while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + } + + // Verify the state of the accumulator is correct. + { + let reactor = runner.reactor_mut(); + let block_accumulator = &mut reactor.block_accumulator; + // Local tip should now be block 1. + let expected_local_tip = LocalTipIdentifier::new(block_1.height(), block_1.era_id()); + assert_eq!(block_accumulator.local_tip, Some(expected_local_tip)); + + assert!(block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .executed()); + assert!(block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .has_sufficient_finality()); + assert_eq!( + *block_accumulator + .block_acceptors + .get(block_1.hash()) + .unwrap() + .peers(), + BTreeSet::from([peer_1, peer_2]) + ); + // The block should be marked complete in storage by now. + assert_eq!( + runner + .reactor() + .storage + .get_highest_complete_block() + .unwrap() + .unwrap() + .height(), + meta_block_1.block.height() + ); + } + + // Retrigger the event so the accumulator can update its meta block state. + { + runner + .process_injected_effects(|effect_builder| { + let event = super::Event::ExecutedBlock { + meta_block: meta_block_1.clone(), + }; + effect_builder + .into_inner() + .schedule(event, QueueKind::Validation) + .ignore() + }) + .await; + while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + + let older_block = generate_older_block(&mut rng, &block_1, 1); + // Register an older block. + { + let effect_builder = runner.effect_builder(); + let reactor = runner.reactor_mut(); + + let block_accumulator = &mut reactor.block_accumulator; + let event = super::Event::ReceivedBlock { + block: Arc::new(older_block.clone()), + sender: peer_1, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + // This should have no effect on the accumulator since the block is + // older than the local tip. + assert!(!block_accumulator + .block_acceptors + .contains_key(older_block.hash())); + } + + let older_block_signature = FinalitySignatureV2::create( + *older_block.hash(), + older_block.height(), + older_block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + // Register a signature for an older block. + { + let effect_builder = runner.effect_builder(); + let reactor = runner.reactor_mut(); + + let block_accumulator = &mut reactor.block_accumulator; + let event = super::Event::ReceivedFinalitySignature { + finality_signature: Box::new(older_block_signature), + sender: peer_2, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + // The block is older than the local tip, but the accumulator doesn't + // know that because it was only provided with the signature, so it + // creates the acceptor if it's in the same era or newer than the + // local tip era, which, in this case, it is. + assert!(block_accumulator + .block_acceptors + .contains_key(older_block.hash())); + } + + let old_era_block = TestBlockBuilder::new() + .era(block_1.era_id() - RECENT_ERA_INTERVAL - 1) + .height(1) + .switch_block(false) + .build(&mut rng); + + let old_era_signature = FinalitySignatureV2::create( + *old_era_block.hash(), + old_era_block.height(), + old_era_block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + // Register a signature for a block in an old era. + { + let effect_builder = runner.effect_builder(); + let reactor = runner.reactor_mut(); + + let block_accumulator = &mut reactor.block_accumulator; + let event = super::Event::ReceivedFinalitySignature { + finality_signature: Box::new(old_era_signature), + sender: peer_2, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + // This signature is from an older era and shouldn't lead to the + // creation of an acceptor. + assert!(!block_accumulator + .block_acceptors + .contains_key(old_era_block.hash())); + } +} + +#[tokio::test] +async fn block_accumulator_doesnt_purge_with_delayed_block_execution() { + let mut rng = TestRng::new(); + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let chain_name_hash = chainspec.name_hash(); + let mut runner: Runner = Runner::new( + (), + Arc::new(chainspec), + Arc::new(chainspec_raw_bytes), + &mut rng, + ) + .await + .unwrap(); + + // Create 1 block. + let block_1 = generate_non_genesis_block(&mut rng); + + // Also create 2 peers. + let peer_1 = NodeId::random(&mut rng); + let peer_2 = NodeId::random(&mut rng); + + let fin_sig_bob = FinalitySignatureV2::create( + *block_1.hash(), + block_1.height(), + block_1.era_id(), + chain_name_hash, + &BOB_SECRET_KEY, + ); + + let fin_sig_carol = FinalitySignatureV2::create( + *block_1.hash(), + block_1.height(), + block_1.era_id(), + chain_name_hash, + &CAROL_SECRET_KEY, + ); + + let fin_sig_alice = FinalitySignatureV2::create( + *block_1.hash(), + block_1.height(), + block_1.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + + // Register the era in the validator matrix so the block is valid. + { + let mut validator_matrix = runner.reactor_mut().validator_matrix.clone(); + let weights = EraValidatorWeights::new( + block_1.era_id(), + BTreeMap::from([ + (ALICE_PUBLIC_KEY.clone(), 10.into()), /* Less weight so that the sig from Alice + * would not have sufficient finality */ + (BOB_PUBLIC_KEY.clone(), 100.into()), + (CAROL_PUBLIC_KEY.clone(), 100.into()), + ]), + Ratio::new(1, 3), + ); + validator_matrix.register_era_validator_weights(weights); + } + + // Register signatures for block 1. + { + let effect_builder = runner.effect_builder(); + let reactor = runner.reactor_mut(); + + let block_accumulator = &mut reactor.block_accumulator; + block_accumulator.register_local_tip(0, 0.into()); + + let event = super::Event::ReceivedFinalitySignature { + finality_signature: Box::new(fin_sig_bob.clone()), + sender: peer_1, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + + let event = super::Event::ReceivedFinalitySignature { + finality_signature: Box::new(fin_sig_carol.clone()), + sender: peer_1, + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + + // Register the finality signature created by Alice (this validator) after executing the + // block. + let event = super::Event::CreatedFinalitySignature { + finality_signature: Box::new(fin_sig_alice.clone()), + }; + let effects = block_accumulator.handle_event(effect_builder, &mut rng, event); + assert!(effects.is_empty()); + } + + // Register block 1 as received from peer. + { + runner + .process_injected_effects(|effect_builder| { + let event = super::Event::ReceivedBlock { + block: Arc::new(block_1.clone()), + sender: peer_2, + }; + effect_builder + .into_inner() + .schedule(event, QueueKind::Validation) + .ignore() + }) + .await; + for _ in 0..6 { + while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + let expected_block = runner + .reactor() + .storage + .read_block_by_hash(*block_1.hash()) + .unwrap(); + assert_eq!(expected_block, block_1.clone().into()); + let expected_block_signatures = runner + .reactor() + .storage + .get_finality_signatures_for_block(*block_1.hash()); + assert_eq!( + expected_block_signatures + .and_then(|sigs| sigs.finality_signature(fin_sig_alice.public_key())) + .unwrap(), + FinalitySignature::from(fin_sig_alice) + ); + } + + // Now add a delay between when the finality signature is created and registered in the + // accumulator. Usually registering the created finality signature and the executed block + // happen immediately but if the event queue is backed up the event to register the executed + // block can be delayed. Since we would purge an acceptor if the purge interval has passed, + // we want to simulate a situation in which the purge interval was exceeded in order to test + // the special case that if an acceptor that had sufficient finality, it is not purged. + time::sleep( + Duration::from(runner.reactor().block_accumulator.purge_interval) + Duration::from_secs(1), + ) + .await; + + // Register block 1 as having been executed by Alice (this node). + { + runner + .process_injected_effects(|effect_builder| { + let mut meta_block_state = MetaBlockState::new_already_stored(); + meta_block_state.register_as_executed(); + let event = super::Event::ExecutedBlock { + meta_block: MetaBlock::new_forward( + Arc::new(block_1.clone()), + Vec::new(), + meta_block_state, + ) + .try_into() + .unwrap(), + }; + effect_builder + .into_inner() + .schedule(event, QueueKind::Regular) + .ignore() + }) + .await; + let mut finished = false; + while !finished { + let mut retry_count = 5; + while runner.try_crank(&mut rng).await == TryCrankOutcome::NoEventsToProcess { + retry_count -= 1; + if retry_count == 0 { + finished = true; + break; + } + time::sleep(POLL_INTERVAL).await; + } + } + + // Expect that the block was marked complete by the event generated by the accumulator. + let expected_block = runner + .reactor() + .storage + .get_highest_complete_block() + .unwrap() + .unwrap(); + assert_eq!(expected_block.height(), block_1.height()); + } +} diff --git a/node/src/components/block_proposer.rs b/node/src/components/block_proposer.rs deleted file mode 100644 index a3e6c62f0f..0000000000 --- a/node/src/components/block_proposer.rs +++ /dev/null @@ -1,491 +0,0 @@ -//! Block proposer. -//! -//! The block proposer stores deploy hashes in memory, tracking their suitability for inclusion into -//! a new block. Upon request, it returns a list of candidates that can be included. - -mod config; -mod deploy_sets; -mod event; -mod metrics; - -#[cfg(test)] -mod tests; - -use std::{ - collections::{HashMap, HashSet}, - convert::Infallible, - time::Duration, -}; - -pub use config::Config; -use datasize::DataSize; -use itertools::Itertools; -use prometheus::{self, Registry}; -use tracing::{debug, error, info, trace}; - -use crate::{ - components::Component, - effect::{ - requests::{BlockProposerRequest, ProtoBlockRequest, StateStoreRequest, StorageRequest}, - EffectBuilder, EffectExt, Effects, - }, - types::{ - appendable_block::{AddError, AppendableBlock}, - chainspec::DeployConfig, - Chainspec, DeployHash, DeployHeader, ProtoBlock, Timestamp, - }, - NodeRng, -}; -pub(crate) use deploy_sets::BlockProposerDeploySets; -pub(crate) use event::{DeployType, Event}; -use metrics::BlockProposerMetrics; - -/// Block proposer component. -#[derive(DataSize, Debug)] -pub(crate) struct BlockProposer { - /// The current state of the proposer component. - state: BlockProposerState, - - /// Metrics, present in all states. - metrics: BlockProposerMetrics, -} - -/// Interval after which a pruning of the internal sets is triggered. -// TODO: Make configurable. -const PRUNE_INTERVAL: Duration = Duration::from_secs(10); - -/// Experimentally, deploys are in the range of 270-280 bytes, we use this to determine if we are -/// within a threshold to break iteration of `pending` early. -const DEPLOY_APPROX_MIN_SIZE: usize = 300; - -/// The type of values expressing the block height in the chain. -type BlockHeight = u64; - -/// A queue of contents of blocks that we know have been finalized, but we are still missing -/// notifications about finalization of some of their ancestors. It maps block height to the -/// deploys contained in the corresponding block. -type FinalizationQueue = HashMap>; - -/// A queue of requests we can't respond to yet, because we aren't up to date on finalized blocks. -/// The key is the height of the next block we will expect to be finalized at the point when we can -/// fulfill the corresponding requests. -type RequestQueue = HashMap>; - -/// Current operational state of a block proposer. -#[derive(DataSize, Debug)] -#[allow(clippy::large_enum_variant)] -enum BlockProposerState { - /// Block proposer is initializing, waiting for a state snapshot. - Initializing { - /// Events cached pending transition to `Ready` state when they can be handled. - pending: Vec, - /// The key under which this component's state is cached in storage. - state_key: Vec, - /// The deploy config from the current chainspec. - deploy_config: DeployConfig, - /// The configuration, containing local settings for deploy selection - local_config: Config, - }, - /// Normal operation. - Ready(BlockProposerReady), -} - -impl BlockProposer { - /// Creates a new block proposer instance. - pub(crate) fn new( - registry: Registry, - effect_builder: EffectBuilder, - next_finalized_block: BlockHeight, - chainspec: &Chainspec, - local_config: Config, - ) -> Result<(Self, Effects), prometheus::Error> - where - REv: From + From + From + Send + 'static, - { - debug!(%next_finalized_block, "creating block proposer"); - // load the state from storage or use a fresh instance if loading fails. - let state_key = deploy_sets::create_storage_key(chainspec); - let effects = effect_builder - .get_finalized_deploys(chainspec.deploy_config.max_ttl) - .event(move |finalized_deploys| Event::Loaded { - finalized_deploys, - next_finalized_block, - }); - - let block_proposer = BlockProposer { - state: BlockProposerState::Initializing { - pending: Vec::new(), - state_key, - deploy_config: chainspec.deploy_config, - local_config, - }, - metrics: BlockProposerMetrics::new(registry)?, - }; - - Ok((block_proposer, effects)) - } -} - -impl Component for BlockProposer -where - REv: From + From + From + Send + 'static, -{ - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - let mut effects = Effects::new(); - - // We handle two different states in the block proposer, but our "ready" state is - // encapsulated in a separate type to simplify the code. The `Initializing` state is simple - // enough to handle it here directly. - match (&mut self.state, event) { - ( - BlockProposerState::Initializing { - ref mut pending, - state_key, - deploy_config, - local_config, - }, - Event::Loaded { - finalized_deploys, - next_finalized_block, - }, - ) => { - let mut new_ready_state = BlockProposerReady { - sets: BlockProposerDeploySets::from_finalized( - finalized_deploys, - next_finalized_block, - ), - unhandled_finalized: Default::default(), - deploy_config: *deploy_config, - state_key: state_key.clone(), - request_queue: Default::default(), - local_config: local_config.clone(), - }; - - // Replay postponed events onto new state. - for ev in pending.drain(..) { - effects.extend(new_ready_state.handle_event(effect_builder, ev)); - } - - self.state = BlockProposerState::Ready(new_ready_state); - - // Start pruning deploys after delay. - effects.extend( - effect_builder - .set_timeout(PRUNE_INTERVAL) - .event(|_| Event::Prune), - ); - } - ( - BlockProposerState::Initializing { - ref mut pending, .. - }, - event, - ) => { - // Any incoming events are just buffered until initialization is complete. - pending.push(event); - } - - (BlockProposerState::Ready(ref mut ready_state), event) => { - effects.extend(ready_state.handle_event(effect_builder, event)); - - // Update metrics after the effects have been applied. - self.metrics - .pending_deploys - .set(ready_state.sets.pending.len() as i64); - } - }; - - effects - } -} - -/// State of operational block proposer. -#[derive(DataSize, Debug)] -struct BlockProposerReady { - /// Set of deploys currently stored in the block proposer. - sets: BlockProposerDeploySets, - /// `unhandled_finalized` is a set of hashes for deploys that the `BlockProposer` has not yet - /// seen but were reported as reported to `finalized_deploys()`. They are used to - /// filter deploys for proposal, similar to `self.sets.finalized_deploys`. - unhandled_finalized: HashSet, - /// We don't need the whole Chainspec here, just the deploy config. - deploy_config: DeployConfig, - /// Key for storing the block proposer state. - state_key: Vec, - /// The queue of requests awaiting being handled. - request_queue: RequestQueue, - /// The block proposer configuration, containing local settings for selecting deploys. - local_config: Config, -} - -impl BlockProposerReady { - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - event: Event, - ) -> Effects - where - REv: Send + From, - { - match event { - Event::Request(BlockProposerRequest::RequestProtoBlock(request)) => { - if request.next_finalized > self.sets.next_finalized { - debug!( - request_next_finalized = %request.next_finalized, - self_next_finalized = %self.sets.next_finalized, - "received request before finalization announcement" - ); - self.request_queue - .entry(request.next_finalized) - .or_default() - .push(request); - Effects::new() - } else { - info!(%request.next_finalized, "proposing a proto block"); - request - .responder - .respond(self.propose_proto_block( - self.deploy_config, - request.current_instant, - request.past_deploys, - request.random_bit, - )) - .ignore() - } - } - Event::BufferDeploy { hash, deploy_type } => { - self.add_deploy_or_transfer(Timestamp::now(), hash, *deploy_type); - Effects::new() - } - Event::Prune => { - let pruned = self.prune(Timestamp::now()); - debug!(%pruned, "pruned deploys from buffer"); - - // Re-trigger timer after `PRUNE_INTERVAL`. - effect_builder - .set_timeout(PRUNE_INTERVAL) - .event(|_| Event::Prune) - } - Event::Loaded { .. } => { - // This should never happen, but we can just ignore the event and carry on. - error!("got loaded event for block proposer state during ready state"); - Effects::new() - } - Event::FinalizedBlock(block) => { - let deploys = block.deploys_and_transfers_iter().copied().collect_vec(); - let mut height = block.height(); - - if height > self.sets.next_finalized { - debug!( - %height, - next_finalized = %self.sets.next_finalized, - "received finalized blocks out of order; queueing" - ); - // safe to subtract 1 - height will never be 0 in this branch, because - // next_finalized is at least 0, and height has to be greater - self.sets.finalization_queue.insert(height - 1, deploys); - Effects::new() - } else { - debug!(%height, "handling finalized block"); - let mut effects = self.handle_finalized_block(effect_builder, height, deploys); - while let Some(deploys) = self.sets.finalization_queue.remove(&height) { - info!(%height, "removed finalization queue entry"); - height += 1; - effects.extend(self.handle_finalized_block( - effect_builder, - height, - deploys, - )); - } - effects - } - } - } - } - - /// Adds a deploy to the block proposer. - /// - /// Returns `false` if the deploy has been rejected. - fn add_deploy_or_transfer( - &mut self, - current_instant: Timestamp, - hash: DeployHash, - deploy_or_transfer: DeployType, - ) { - if deploy_or_transfer.header().expired(current_instant) { - trace!(%hash, "expired deploy rejected from the buffer"); - return; - } - if self.unhandled_finalized.remove(&hash) { - info!(%hash, - "deploy was previously marked as finalized, storing header" - ); - self.sets - .finalized_deploys - .insert(hash, deploy_or_transfer.take_header()); - return; - } - // only add the deploy if it isn't contained in a finalized block - if self.sets.finalized_deploys.contains_key(&hash) { - info!(%hash, "deploy rejected from the buffer"); - } else { - self.sets - .pending - .insert(hash, (deploy_or_transfer, current_instant)); - info!(%hash, "added deploy to the buffer"); - } - } - - /// Notifies the block proposer that a block has been finalized. - fn finalized_deploys(&mut self, deploys: I) - where - I: IntoIterator, - { - for deploy_hash in deploys.into_iter() { - match self.sets.pending.remove(&deploy_hash) { - Some((deploy_type, _)) => { - self.sets - .finalized_deploys - .insert(deploy_hash, deploy_type.take_header()); - } - // If we haven't seen this deploy before, we still need to take note of it. - _ => { - self.unhandled_finalized.insert(deploy_hash); - } - } - } - } - - /// Handles finalization of a block. - fn handle_finalized_block( - &mut self, - _effect_builder: EffectBuilder, - height: BlockHeight, - deploys: I, - ) -> Effects - where - I: IntoIterator, - { - self.finalized_deploys(deploys); - self.sets.next_finalized = self.sets.next_finalized.max(height + 1); - - if let Some(requests) = self.request_queue.remove(&self.sets.next_finalized) { - info!(height = %(height + 1), "handling queued requests"); - requests - .into_iter() - .flat_map(|request| { - request - .responder - .respond(self.propose_proto_block( - self.deploy_config, - request.current_instant, - request.past_deploys, - request.random_bit, - )) - .ignore() - }) - .collect() - } else { - Effects::new() - } - } - - /// Checks if a deploy's dependencies are satisfied, so the deploy is eligible for inclusion. - fn deps_resolved(&self, header: &DeployHeader, past_deploys: &HashSet) -> bool { - header - .dependencies() - .iter() - .all(|dep| past_deploys.contains(dep) || self.contains_finalized(dep)) - } - - /// Returns a list of candidates for inclusion into a block. - fn propose_proto_block( - &mut self, - deploy_config: DeployConfig, - block_timestamp: Timestamp, - past_deploys: HashSet, - random_bit: bool, - ) -> ProtoBlock { - let mut appendable_block = AppendableBlock::new(deploy_config, block_timestamp); - - // We prioritize transfers over deploys, so we try to include them first. - for (hash, (deploy_type, received_time)) in &self.sets.pending { - if !deploy_type.is_transfer() - || !self.deps_resolved(&deploy_type.header(), &past_deploys) - || past_deploys.contains(hash) - || self.contains_finalized(hash) - || block_timestamp.saturating_diff(*received_time) < self.local_config.deploy_delay - { - continue; - } - - if let Err(err) = appendable_block.add(*hash, deploy_type) { - match err { - // We added the maximum number of transfers. - AddError::TransferCount | AddError::GasLimit | AddError::BlockSize => break, - // The deploy is not valid in this block, but might be valid in another. - AddError::InvalidDeploy => (), - // These errors should never happen when adding a transfer. - AddError::InvalidGasAmount | AddError::DeployCount | AddError::Duplicate => { - error!(?err, "unexpected error when adding transfer") - } - } - } - } - - // Now we try to add other deploys to the block. - for (hash, (deploy_type, received_time)) in &self.sets.pending { - if deploy_type.is_transfer() - || !self.deps_resolved(&deploy_type.header(), &past_deploys) - || past_deploys.contains(hash) - || self.contains_finalized(hash) - || block_timestamp.saturating_diff(*received_time) < self.local_config.deploy_delay - { - continue; - } - - if let Err(err) = appendable_block.add(*hash, deploy_type) { - match err { - // We added the maximum number of deploys. - AddError::DeployCount => break, - AddError::BlockSize => { - if appendable_block.total_size() + DEPLOY_APPROX_MIN_SIZE - > deploy_config.block_gas_limit as usize - { - break; // Probably no deploy will fit in this block anymore. - } - } - // The deploy is not valid in this block, but might be valid in another. - // TODO: Do something similar to DEPLOY_APPROX_MIN_SIZE for gas. - AddError::InvalidDeploy | AddError::GasLimit => (), - // These errors should never happen when adding a deploy. - AddError::TransferCount | AddError::Duplicate => { - error!(?err, "unexpected error when adding deploy") - } - AddError::InvalidGasAmount => { - error!("payment_amount couldn't be converted from motes to gas") - } - } - } - } - - appendable_block.into_proto_block(random_bit) - } - - /// Prunes expired deploy information from the BlockProposer, returns the total deploys pruned. - fn prune(&mut self, current_instant: Timestamp) -> usize { - self.sets.prune(current_instant) - } - - fn contains_finalized(&self, dep: &DeployHash) -> bool { - self.sets.finalized_deploys.contains_key(dep) || self.unhandled_finalized.contains(dep) - } -} diff --git a/node/src/components/block_proposer/config.rs b/node/src/components/block_proposer/config.rs deleted file mode 100644 index 193f8992c0..0000000000 --- a/node/src/components/block_proposer/config.rs +++ /dev/null @@ -1,28 +0,0 @@ -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use crate::types::TimeDiff; - -/// Block proposer configuration. -#[derive(DataSize, Debug, Deserialize, Serialize, Clone)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct Config { - /// Deploys are only proposed in a new block if the have been received at least this long ago. - /// A longer delay makes it more likely that many proposed deploys are already known by the - /// other nodes, and don't have to be requested from the proposer afterwards. - #[serde(default = "default_deploy_delay")] - pub deploy_delay: TimeDiff, -} - -impl Default for Config { - fn default() -> Config { - Config { - deploy_delay: default_deploy_delay(), - } - } -} - -fn default_deploy_delay() -> TimeDiff { - "1min".parse().unwrap() -} diff --git a/node/src/components/block_proposer/deploy_sets.rs b/node/src/components/block_proposer/deploy_sets.rs deleted file mode 100644 index f75d4a1560..0000000000 --- a/node/src/components/block_proposer/deploy_sets.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::{ - collections::HashMap, - fmt::{self, Display, Formatter}, -}; - -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use super::{event::DeployType, BlockHeight, FinalizationQueue}; -use crate::types::{Chainspec, DeployHash, DeployHeader, Timestamp}; - -/// Stores the internal state of the BlockProposer. -#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] -pub struct BlockProposerDeploySets { - /// The collection of deploys pending for inclusion in a block, with a timestamp of when we - /// received them. - pub(super) pending: HashMap, - /// The deploys that have already been included in a finalized block. - pub(super) finalized_deploys: HashMap, - /// The next block height we expect to be finalized. - /// If we receive a notification of finalization of a later block, we will store it in - /// finalization_queue. - /// If we receive a request that contains a later next_finalized, we will store it in - /// request_queue. - pub(super) next_finalized: BlockHeight, - /// The queue of finalized block contents awaiting inclusion in `self.finalized_deploys`. - pub(super) finalization_queue: FinalizationQueue, -} - -impl Default for BlockProposerDeploySets { - fn default() -> Self { - let pending = HashMap::new(); - let finalized_deploys = Default::default(); - let next_finalized = Default::default(); - let finalization_queue = Default::default(); - BlockProposerDeploySets { - pending, - finalized_deploys, - next_finalized, - finalization_queue, - } - } -} - -impl BlockProposerDeploySets { - /// Constructs the instance of `BlockProposerDeploySets` from the list of finalized deploys. - pub(super) fn from_finalized( - finalized_deploys: Vec<(DeployHash, DeployHeader)>, - next_finalized_height: u64, - ) -> BlockProposerDeploySets { - BlockProposerDeploySets { - pending: HashMap::new(), - finalized_deploys: finalized_deploys.into_iter().collect(), - next_finalized: next_finalized_height, - finalization_queue: Default::default(), - } - } -} - -impl Display for BlockProposerDeploySets { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!( - f, - "(pending:{}, finalized:{})", - self.pending.len(), - self.finalized_deploys.len() - ) - } -} - -/// Create a state storage key for block proposer deploy sets based on a chainspec. -/// -/// We namespace based on a chainspec to prevent validators from loading data for a different chain -/// if they forget to clear their state. -pub fn create_storage_key(chainspec: &Chainspec) -> Vec { - format!( - "block_proposer_deploy_sets:version={},chain_name={}", - chainspec.protocol_config.version, chainspec.network_config.name - ) - .into() -} - -impl BlockProposerDeploySets { - /// Prunes expired deploy information from the BlockProposerState, returns the total deploys - /// pruned - pub(crate) fn prune(&mut self, current_instant: Timestamp) -> usize { - let pending = prune_pending_deploys(&mut self.pending, current_instant); - let finalized = prune_deploys(&mut self.finalized_deploys, current_instant); - pending + finalized - } -} - -/// Prunes expired deploy information from an individual deploy collection, returns the total -/// deploys pruned -pub(super) fn prune_deploys( - deploys: &mut HashMap, - current_instant: Timestamp, -) -> usize { - let initial_len = deploys.len(); - deploys.retain(|_hash, header| !header.expired(current_instant)); - initial_len - deploys.len() -} - -/// Prunes expired deploy information from an individual pending deploy collection, returns the -/// total deploys pruned -pub(super) fn prune_pending_deploys( - deploys: &mut HashMap, - current_instant: Timestamp, -) -> usize { - let initial_len = deploys.len(); - deploys.retain(|_hash, (deploy_type, _)| !deploy_type.header().expired(current_instant)); - initial_len - deploys.len() -} diff --git a/node/src/components/block_proposer/event.rs b/node/src/components/block_proposer/event.rs deleted file mode 100644 index 0b3c495033..0000000000 --- a/node/src/components/block_proposer/event.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::fmt::{self, Formatter}; - -use datasize::DataSize; -use derive_more::From; -use fmt::Display; -use serde::{Deserialize, Serialize}; - -use super::BlockHeight; -use crate::{ - effect::requests::BlockProposerRequest, - types::{DeployHash, DeployHeader, FinalizedBlock}, -}; -use casper_execution_engine::shared::motes::Motes; - -/// A wrapper over `DeployHeader` to differentiate between wasm-less transfers and wasm headers. -#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] -pub enum DeployType { - /// Represents a wasm-less transfer. - Transfer { - header: DeployHeader, - payment_amount: Motes, - size: usize, - }, - /// Represents a wasm deploy. - Other { - header: DeployHeader, - payment_amount: Motes, - size: usize, - }, -} - -impl DeployType { - /// Access header in all variants of `DeployType`. - pub fn header(&self) -> &DeployHeader { - match self { - Self::Transfer { header, .. } => header, - Self::Other { header, .. } => header, - } - } - - /// Extract into header and drop `DeployType`. - pub fn take_header(self) -> DeployHeader { - match self { - Self::Transfer { header, .. } => header, - Self::Other { header, .. } => header, - } - } - - /// Access payment_amount from all variants. - pub fn payment_amount(&self) -> Motes { - match self { - Self::Transfer { payment_amount, .. } => *payment_amount, - Self::Other { payment_amount, .. } => *payment_amount, - } - } - - /// Access size from all variants. - pub fn size(&self) -> usize { - match self { - Self::Transfer { size, .. } => *size, - Self::Other { size, .. } => *size, - } - } - - /// Asks if the variant is a Transfer. - pub fn is_transfer(&self) -> bool { - matches!(self, DeployType::Transfer { .. }) - } - - /// Asks if the variant is Wasm. - pub fn is_wasm(&self) -> bool { - matches!(self, DeployType::Other { .. }) - } -} - -/// An event for when using the block proposer as a component. -#[derive(DataSize, Debug, From)] -pub enum Event { - /// Incoming `BlockProposerRequest`. - #[from] - Request(BlockProposerRequest), - /// The chainspec and previous sets have been successfully loaded from storage. - Loaded { - /// Previously finalized deploys. - finalized_deploys: Vec<(DeployHash, DeployHeader)>, - /// The height of the next expected finalized block. - next_finalized_block: BlockHeight, - }, - /// A new deploy should be buffered. - BufferDeploy { - hash: DeployHash, - deploy_type: Box, - }, - /// The block proposer has been asked to prune stale deploys - Prune, - /// A block has been finalized. We should never propose its deploys again. - FinalizedBlock(Box), -} - -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Request(req) => write!(f, "block-proposer request: {}", req), - Event::Loaded { - next_finalized_block, - .. - } => write!( - f, - "loaded block-proposer finalized deploys; expected next finalized block: {}", - next_finalized_block - ), - Event::BufferDeploy { hash, .. } => write!(f, "block-proposer add {}", hash), - Event::Prune => write!(f, "block-proposer prune"), - Event::FinalizedBlock(block) => { - write!(f, "block-proposer finalized block {}", block) - } - } - } -} diff --git a/node/src/components/block_proposer/metrics.rs b/node/src/components/block_proposer/metrics.rs deleted file mode 100644 index 14ef77b9a3..0000000000 --- a/node/src/components/block_proposer/metrics.rs +++ /dev/null @@ -1,33 +0,0 @@ -use datasize::DataSize; -use prometheus::{self, IntGauge, Registry}; - -use crate::unregister_metric; - -/// Metrics for the block proposer. -#[derive(DataSize, Debug, Clone)] -pub(super) struct BlockProposerMetrics { - /// Amount of pending deploys - #[data_size(skip)] - pub(super) pending_deploys: IntGauge, - /// Registry stored to allow deregistration later. - #[data_size(skip)] - registry: Registry, -} - -impl BlockProposerMetrics { - /// Creates a new instance of the block proposer metrics. - pub fn new(registry: Registry) -> Result { - let pending_deploys = IntGauge::new("pending_deploy", "amount of pending deploys")?; - registry.register(Box::new(pending_deploys.clone()))?; - Ok(BlockProposerMetrics { - pending_deploys, - registry, - }) - } -} - -impl Drop for BlockProposerMetrics { - fn drop(&mut self) { - unregister_metric!(self.registry, self.pending_deploys); - } -} diff --git a/node/src/components/block_proposer/tests.rs b/node/src/components/block_proposer/tests.rs deleted file mode 100644 index a0d285a598..0000000000 --- a/node/src/components/block_proposer/tests.rs +++ /dev/null @@ -1,691 +0,0 @@ -use casper_execution_engine::{ - core::engine_state::executable_deploy_item::ExecutableDeployItem, shared::gas::Gas, -}; -use casper_types::{ - bytesrepr::{Bytes, ToBytes}, - runtime_args, - system::standard_payment::ARG_AMOUNT, - RuntimeArgs, SecretKey, -}; -use itertools::Itertools; - -use super::*; -use crate::{ - crypto::AsymmetricKeyExt, - testing::TestRng, - types::{Deploy, DeployHash, TimeDiff}, -}; - -const DEFAULT_TEST_GAS_PRICE: u64 = 1; - -fn default_gas_payment() -> Gas { - Gas::from(1u32) -} - -fn generate_transfer( - rng: &mut TestRng, - timestamp: Timestamp, - ttl: TimeDiff, - dependencies: Vec, - payment_amount: Gas, -) -> Deploy { - let gas_price = DEFAULT_TEST_GAS_PRICE; - let secret_key = SecretKey::random(rng); - let chain_name = "chain".to_string(); - - let args = runtime_args! { - ARG_AMOUNT => payment_amount.value() - }; - let payment = ExecutableDeployItem::ModuleBytes { - module_bytes: Bytes::new(), - args, - }; - - let session = ExecutableDeployItem::Transfer { - args: RuntimeArgs::new(), - }; - - Deploy::new( - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - payment, - session, - &secret_key, - ) -} - -fn generate_deploy( - rng: &mut TestRng, - timestamp: Timestamp, - ttl: TimeDiff, - dependencies: Vec, - payment_amount: Gas, - gas_price: u64, -) -> Deploy { - let secret_key = SecretKey::random(rng); - let chain_name = "chain".to_string(); - let args = runtime_args! { - ARG_AMOUNT => payment_amount.value() - }; - let payment = ExecutableDeployItem::ModuleBytes { - module_bytes: Bytes::new(), - args, - }; - let session = ExecutableDeployItem::ModuleBytes { - module_bytes: Bytes::new(), - args: RuntimeArgs::new(), - }; - - Deploy::new( - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - payment, - session, - &secret_key, - ) -} - -fn create_test_proposer(deploy_delay: TimeDiff) -> BlockProposerReady { - BlockProposerReady { - sets: Default::default(), - deploy_config: Default::default(), - state_key: b"block-proposer-test".to_vec(), - request_queue: Default::default(), - unhandled_finalized: Default::default(), - local_config: Config { deploy_delay }, - } -} - -impl From for Event { - fn from(_: StorageRequest) -> Self { - // we never send a storage request in our unit tests, but if this does become - // meaningful.... - unreachable!("no storage requests in block proposer unit tests") - } -} - -impl From for Event { - fn from(_: StateStoreRequest) -> Self { - unreachable!("no state store requests in block proposer unit tests") - } -} - -#[test] -fn should_add_and_take_deploys() { - let creation_time = Timestamp::from(100); - let ttl = TimeDiff::from(Duration::from_millis(100)); - let block_time1 = Timestamp::from(80); - let block_time2 = Timestamp::from(120); - let block_time3 = Timestamp::from(220); - - let no_deploys = HashSet::new(); - let mut proposer = create_test_proposer(0.into()); - let mut rng = crate::new_rng(); - let deploy1 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy2 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy3 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy4 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time2, - no_deploys.clone(), - true, - ); - assert!(block.deploy_hashes().is_empty()); - assert!(block.transfer_hashes().is_empty()); - - // add two deploys - proposer.add_deploy_or_transfer(block_time2, *deploy1.id(), deploy1.deploy_type().unwrap()); - proposer.add_deploy_or_transfer(block_time2, *deploy2.id(), deploy2.deploy_type().unwrap()); - - // if we try to create a block with a timestamp that is too early, we shouldn't get any - // deploys - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time1, - no_deploys.clone(), - true, - ); - assert!(block.deploy_hashes().is_empty()); - assert!(block.transfer_hashes().is_empty()); - - // if we try to create a block with a timestamp that is too late, we shouldn't get any - // deploys, either - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time3, - no_deploys.clone(), - true, - ); - assert!(block.deploy_hashes().is_empty()); - assert!(block.transfer_hashes().is_empty()); - - // take the deploys out - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time2, - no_deploys.clone(), - true, - ); - assert!(block.transfer_hashes().is_empty()); - assert_eq!(block.deploy_hashes().len(), 2); - assert!(block.deploy_hashes().contains(&deploy1.id())); - assert!(block.deploy_hashes().contains(&deploy2.id())); - - // take the deploys out - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time2, - no_deploys.clone(), - true, - ); - assert!(block.transfer_hashes().is_empty()); - assert_eq!(block.deploy_hashes().len(), 2); - - // but they shouldn't be returned if we include it in the past deploys - let deploy_hashes = block.deploys_and_transfers_iter().copied().collect_vec(); - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time2, - block.deploys_and_transfers_iter().copied().collect(), - true, - ); - assert!(block.deploy_hashes().is_empty()); - assert!(block.transfer_hashes().is_empty()); - - // finalize the block - proposer.finalized_deploys(deploy_hashes.iter().copied()); - - // add more deploys - proposer.add_deploy_or_transfer(block_time2, *deploy3.id(), deploy3.deploy_type().unwrap()); - proposer.add_deploy_or_transfer(block_time2, *deploy4.id(), deploy4.deploy_type().unwrap()); - - let block = - proposer.propose_proto_block(DeployConfig::default(), block_time2, no_deploys, true); - - // since block 1 is now finalized, neither deploy1 nor deploy2 should be among the returned - assert!(block.transfer_hashes().is_empty()); - assert_eq!(block.deploy_hashes().len(), 2); - assert!(block.deploy_hashes().contains(&deploy3.id())); - assert!(block.deploy_hashes().contains(&deploy4.id())); -} - -#[test] -fn should_successfully_prune() { - let expired_time = Timestamp::from(201); - let creation_time = Timestamp::from(100); - let test_time = Timestamp::from(120); - let ttl = TimeDiff::from(Duration::from_millis(100)); - - let mut rng = crate::new_rng(); - let deploy1 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy2 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy3 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy4 = generate_deploy( - &mut rng, - creation_time + Duration::from_secs(20).into(), - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let mut proposer = create_test_proposer(0.into()); - - // pending - proposer.add_deploy_or_transfer(creation_time, *deploy1.id(), deploy1.deploy_type().unwrap()); - proposer.add_deploy_or_transfer(creation_time, *deploy2.id(), deploy2.deploy_type().unwrap()); - proposer.add_deploy_or_transfer(creation_time, *deploy3.id(), deploy3.deploy_type().unwrap()); - proposer.add_deploy_or_transfer(creation_time, *deploy4.id(), deploy4.deploy_type().unwrap()); - - // pending => finalized - proposer.finalized_deploys(vec![*deploy1.id()]); - - assert_eq!(proposer.sets.pending.len(), 3); - assert!(proposer.sets.finalized_deploys.contains_key(deploy1.id())); - - // test for retained values - let pruned = proposer.prune(test_time); - assert_eq!(pruned, 0); - - assert_eq!(proposer.sets.pending.len(), 3); - assert_eq!(proposer.sets.finalized_deploys.len(), 1); - assert!(proposer.sets.finalized_deploys.contains_key(&deploy1.id())); - - // now move the clock to make some things expire - let pruned = proposer.prune(expired_time); - assert_eq!(pruned, 3); - - assert_eq!(proposer.sets.pending.len(), 1); // deploy4 is still valid - assert_eq!(proposer.sets.finalized_deploys.len(), 0); -} - -#[test] -fn should_keep_track_of_unhandled_deploys() { - let creation_time = Timestamp::from(100); - let test_time = Timestamp::from(120); - let ttl = TimeDiff::from(Duration::from_millis(100)); - - let mut rng = crate::new_rng(); - let deploy1 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let deploy2 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let mut proposer = create_test_proposer(0.into()); - - // We do NOT add deploy2... - proposer.add_deploy_or_transfer(creation_time, *deploy1.id(), deploy1.deploy_type().unwrap()); - // But we DO mark it as finalized, by it's hash - proposer.finalized_deploys(vec![*deploy1.id(), *deploy2.id()]); - - assert!( - proposer.contains_finalized(deploy1.id()), - "should contain deploy1" - ); - assert!( - proposer.contains_finalized(deploy2.id()), - "deploy2's hash should be considered seen" - ); - assert!( - !proposer.sets.finalized_deploys.contains_key(deploy2.id()), - "should not yet contain deploy2" - ); - assert!( - proposer.contains_finalized(deploy2.id()), - "should recognize deploy2 as finalized" - ); - - assert!( - deploy2 - .header() - .is_valid(&proposer.deploy_config, test_time), - "deploy2 should be valid" - ); - - // Now we add Deploy2 - proposer.add_deploy_or_transfer(creation_time, *deploy2.id(), deploy2.deploy_type().unwrap()); - assert!( - proposer.sets.finalized_deploys.contains_key(deploy2.id()), - "deploy2 should now be in finalized_deploys" - ); - assert!( - !proposer.unhandled_finalized.contains(deploy2.id()), - "deploy2 should not be in unhandled_finalized" - ); -} - -#[test] -fn should_respect_limits_for_wasmless_transfer_hashes() { - test_proposer_with(TestArgs { - transfer_count: 30, - max_transfer_count: 20, - proposed_count: 20, - remaining_pending_count: 10, - ..Default::default() - }); -} - -#[test] -fn should_respect_limits_for_deploy_hashes() { - test_proposer_with(TestArgs { - deploy_count: 30, - max_deploy_count: 20, - proposed_count: 20, - remaining_pending_count: 10, - ..Default::default() - }); -} - -#[test] -fn should_respect_limits_for_deploys_and_transfers_together() { - test_proposer_with(TestArgs { - transfer_count: 30, - max_transfer_count: 20, - deploy_count: 30, - max_deploy_count: 20, - proposed_count: 40, - remaining_pending_count: 20, - ..Default::default() - }); -} - -#[test] -fn should_respect_limits_for_gas_cost() { - test_proposer_with(TestArgs { - transfer_count: 15, - max_transfer_count: 20, - deploy_count: 30, - max_deploy_count: 20, - payment_amount: default_gas_payment(), - block_gas_limit: 10, - proposed_count: 25, - remaining_pending_count: 20, - ..Default::default() - }); -} - -#[test] -fn should_respect_block_gas_limit_for_deploys() { - test_proposer_with(TestArgs { - deploy_count: 15, - payment_amount: default_gas_payment(), - block_gas_limit: 5, - max_deploy_count: 15, - proposed_count: 5, - remaining_pending_count: 10, - ..Default::default() - }); -} - -#[test] -fn should_propose_deploy_if_block_size_limit_met() { - test_proposer_with(TestArgs { - transfer_count: 1, - deploy_count: 1, - payment_amount: default_gas_payment(), - block_gas_limit: 10, - max_transfer_count: 2, - max_deploy_count: 2, - proposed_count: 2, - remaining_pending_count: 0, - max_block_size: Some(2 * DEPLOY_APPROX_MIN_SIZE), - }); -} - -#[test] -fn should_not_propose_deploy_if_block_size_limit_within_threshold() { - test_proposer_with(TestArgs { - transfer_count: 2, - deploy_count: 2, - payment_amount: default_gas_payment(), - block_gas_limit: 10, - max_transfer_count: 3, - max_deploy_count: 3, - proposed_count: 4, - remaining_pending_count: 0, - max_block_size: Some(2 * DEPLOY_APPROX_MIN_SIZE), - }); -} - -#[test] -fn should_not_propose_deploy_if_block_size_limit_passed() { - test_proposer_with(TestArgs { - deploy_count: 3, - transfer_count: 2, // transfers should -not- count towards the block size limit - payment_amount: default_gas_payment(), - block_gas_limit: 100, - max_transfer_count: 5, - max_deploy_count: 5, - proposed_count: 4, - remaining_pending_count: 1, - max_block_size: Some(2 * DEPLOY_APPROX_MIN_SIZE), - }); -} - -#[test] -fn should_allow_transfers_to_exceed_block_size_limit() { - test_proposer_with(TestArgs { - deploy_count: 3, - transfer_count: 60, - payment_amount: default_gas_payment(), - block_gas_limit: 100, - max_transfer_count: 40, - max_deploy_count: 5, - proposed_count: 42, - remaining_pending_count: 21, - max_block_size: Some(2 * DEPLOY_APPROX_MIN_SIZE), - }); -} - -#[derive(Default)] -struct TestArgs { - /// Number of deploys to create. - deploy_count: u32, - /// Max deploys to propose. - max_deploy_count: u32, - /// Number of transfer deploys to create. - transfer_count: u32, - /// Number of transfer deploys to create. - max_transfer_count: u32, - /// Payment amount for transfers. - payment_amount: Gas, - /// Max gas cost for block. - block_gas_limit: u64, - /// Post-finalization of proposed block, how many transfers and deploys remain. - remaining_pending_count: usize, - /// Block deploy count proposed. - proposed_count: usize, - /// Block size limit in bytes. - max_block_size: Option, -} - -/// Test the block_proposer by generating deploys and transfers with variable limits, asserting -/// on internal counts post-finalization. -fn test_proposer_with( - TestArgs { - deploy_count, - max_deploy_count, - transfer_count, - max_transfer_count, - payment_amount, - block_gas_limit, - remaining_pending_count, - proposed_count, - max_block_size, - }: TestArgs, -) -> BlockProposerReady { - let creation_time = Timestamp::from(100); - let test_time = Timestamp::from(120); - let ttl = TimeDiff::from(Duration::from_millis(100)); - let past_deploys = HashSet::new(); - - let mut rng = crate::new_rng(); - let mut proposer = create_test_proposer(0.into()); - let mut config = proposer.deploy_config; - // defaults are 10, 1000 respectively - config.block_max_deploy_count = max_deploy_count; - config.block_max_transfer_count = max_transfer_count; - config.block_gas_limit = block_gas_limit; - if let Some(max_block_size) = max_block_size { - config.max_block_size = max_block_size as u32; - } - - for _ in 0..deploy_count { - let deploy = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - payment_amount, - DEFAULT_TEST_GAS_PRICE, - ); - println!("generated deploy with size {}", deploy.serialized_length()); - proposer.add_deploy_or_transfer(creation_time, *deploy.id(), deploy.deploy_type().unwrap()); - } - for _ in 0..transfer_count { - let transfer = generate_transfer(&mut rng, creation_time, ttl, vec![], payment_amount); - proposer.add_deploy_or_transfer( - creation_time, - *transfer.id(), - transfer.deploy_type().unwrap(), - ); - } - - let block = proposer.propose_proto_block(config, test_time, past_deploys, true); - let all_deploys = block.deploys_and_transfers_iter().collect_vec(); - proposer.finalized_deploys(all_deploys.iter().map(|hash| **hash)); - println!("proposed deploys {}", block.deploy_hashes().len()); - println!("proposed transfers {}", block.transfer_hashes().len()); - assert_eq!( - all_deploys.len(), - proposed_count, - "should have a proposed_count of {}, but got {}", - proposed_count, - all_deploys.len() - ); - assert_eq!( - proposer.sets.pending.len(), - remaining_pending_count, - "should have a remaining_pending_count of {}, but got {}", - remaining_pending_count, - proposer.sets.pending.len() - ); - proposer -} - -#[test] -fn should_return_deploy_dependencies() { - let creation_time = Timestamp::from(100); - let ttl = TimeDiff::from(Duration::from_millis(100)); - let block_time = Timestamp::from(120); - - let mut rng = crate::new_rng(); - let deploy1 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - // let deploy2 depend on deploy1 - let deploy2 = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![*deploy1.id()], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - - let no_deploys = HashSet::new(); - let mut proposer = create_test_proposer(0.into()); - - // add deploy2 - proposer.add_deploy_or_transfer(creation_time, *deploy2.id(), deploy2.deploy_type().unwrap()); - - // deploy2 has an unsatisfied dependency - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time, - no_deploys.clone(), - true, - ); - assert!(block.deploy_hashes().is_empty()); - assert!(block.transfer_hashes().is_empty()); - - // add deploy1 - proposer.add_deploy_or_transfer(creation_time, *deploy1.id(), deploy1.deploy_type().unwrap()); - - let block = proposer.propose_proto_block( - DeployConfig::default(), - block_time, - no_deploys.clone(), - true, - ); - let deploys: Vec = block.deploys_and_transfers_iter().cloned().collect(); - // only deploy1 should be returned, as it has no dependencies - assert_eq!(deploys.len(), 1); - assert!(deploys.contains(deploy1.id())); - - // the deploy will be included in block 1 - proposer.finalized_deploys(deploys.iter().copied()); - - let block = proposer.propose_proto_block(DeployConfig::default(), block_time, no_deploys, true); - // `blocks` contains a block that contains deploy1 now, so we should get deploy2 - let deploys2 = block.deploy_hashes(); - assert_eq!(deploys2.len(), 1); - assert!(deploys2.contains(deploy2.id())); -} - -#[test] -fn should_respect_deploy_delay() { - let mut rng = crate::new_rng(); - let creation_time = Timestamp::from(0); - let ttl = TimeDiff::from(10000); - let no_deploys = HashSet::new(); - let deploy_config = DeployConfig::default(); - let deploy = generate_deploy( - &mut rng, - creation_time, - ttl, - vec![], - default_gas_payment(), - DEFAULT_TEST_GAS_PRICE, - ); - let mut proposer = create_test_proposer(10.into()); // Deploy delay: 10 milliseconds - - // Add the deploy at time 100. So at 109 it cannot be proposed yet, but at time 110 it can. - proposer.add_deploy_or_transfer(100.into(), *deploy.id(), deploy.deploy_type().unwrap()); - let block = proposer.propose_proto_block(deploy_config, 109.into(), no_deploys.clone(), true); - assert!(block.deploy_hashes().is_empty()); - let block = proposer.propose_proto_block(deploy_config, 110.into(), no_deploys, true); - assert_eq!(&vec![*deploy.id()], block.deploy_hashes()); -} diff --git a/node/src/components/block_synchronizer.rs b/node/src/components/block_synchronizer.rs new file mode 100644 index 0000000000..cbf7a1d76b --- /dev/null +++ b/node/src/components/block_synchronizer.rs @@ -0,0 +1,1636 @@ +mod block_acquisition; +mod block_acquisition_action; +mod block_builder; +mod block_synchronizer_progress; +mod config; +mod deploy_acquisition; +mod error; +mod event; +mod execution_results_acquisition; +mod global_state_synchronizer; +mod metrics; +mod need_next; +mod peer_list; +mod signature_acquisition; +mod trie_accumulator; + +#[cfg(test)] +mod tests; + +use std::sync::Arc; + +use datasize::DataSize; +use either::Either; +use futures::FutureExt; +use prometheus::Registry; +use tracing::{debug, error, info, trace, warn}; + +use casper_storage::{ + block_store::types::ApprovalsHashes, data_access_layer::ExecutionResultsChecksumResult, +}; +use casper_types::{ + Block, BlockHash, BlockHeader, BlockSignatures, BlockSyncStatus, BlockSynchronizerStatus, + Chainspec, FinalitySignature, FinalitySignatureId, Timestamp, Transaction, +}; + +use super::network::blocklist::BlocklistJustification; +use crate::{ + components::{ + fetcher::{ + EmptyValidationMetadata, Error as FetcherError, FetchItem, FetchResult, FetchedData, + }, + Component, ComponentState, InitializedComponent, ValidatorBoundComponent, + }, + effect::{ + announcements::{MetaBlockAnnouncement, PeerBehaviorAnnouncement}, + requests::{ + BlockAccumulatorRequest, BlockSynchronizerRequest, ContractRuntimeRequest, + FetcherRequest, MakeBlockExecutableRequest, MarkBlockCompletedRequest, + NetworkInfoRequest, StorageRequest, SyncGlobalStateRequest, TrieAccumulatorRequest, + }, + EffectBuilder, EffectExt, EffectResultExt, Effects, + }, + reactor::{self, main_reactor::MainEvent}, + types::{ + sync_leap_validation_metadata::SyncLeapValidationMetaData, BlockExecutionResultsOrChunk, + ExecutableBlock, LegacyDeploy, MetaBlock, MetaBlockState, NodeId, SyncLeap, + SyncLeapIdentifier, TrieOrChunk, ValidatorMatrix, + }, + NodeRng, +}; + +use block_builder::BlockBuilder; +pub(crate) use block_synchronizer_progress::BlockSynchronizerProgress; +pub(crate) use config::Config; +pub(crate) use error::BlockAcquisitionError; +pub(crate) use event::Event; +use execution_results_acquisition::ExecutionResultsAcquisition; +pub(crate) use execution_results_acquisition::ExecutionResultsChecksum; +use global_state_synchronizer::GlobalStateSynchronizer; +pub(crate) use global_state_synchronizer::{ + Error as GlobalStateSynchronizerError, Event as GlobalStateSynchronizerEvent, + Response as GlobalStateSynchronizerResponse, +}; +use metrics::Metrics; +pub(crate) use need_next::NeedNext; +use trie_accumulator::TrieAccumulator; +pub(crate) use trie_accumulator::{ + Error as TrieAccumulatorError, Event as TrieAccumulatorEvent, + Response as TrieAccumulatorResponse, +}; + +const COMPONENT_NAME: &str = "block_synchronizer"; + +pub(crate) trait ReactorEvent: + From> + + From + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send + + 'static +{ +} + +impl ReactorEvent for REv where + REv: From> + + From + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From + + From + + From + + From + + From + + From + + From + + From + + From + + Send + + 'static +{ +} + +#[derive(DataSize, Debug)] +pub(crate) struct BlockSynchronizer { + state: ComponentState, + config: Config, + chainspec: Arc, + max_simultaneous_peers: u8, + validator_matrix: ValidatorMatrix, + + // execute forward block (do not get global state or execution effects) + forward: Option, + // either sync-to-genesis or sync-leaped block (get global state and execution effects) + historical: Option, + // deals with global state acquisition for historical blocks + global_sync: GlobalStateSynchronizer, + #[data_size(skip)] + metrics: Metrics, +} + +impl BlockSynchronizer { + pub(crate) fn new( + config: Config, + chainspec: Arc, + max_simultaneous_peers: u8, + validator_matrix: ValidatorMatrix, + registry: &Registry, + ) -> Result { + Ok(BlockSynchronizer { + state: ComponentState::Uninitialized, + config, + chainspec, + max_simultaneous_peers, + validator_matrix, + forward: None, + historical: None, + global_sync: GlobalStateSynchronizer::new(config.max_parallel_trie_fetches as usize), + metrics: Metrics::new(registry)?, + }) + } + + /// Returns the progress being made on the historical syncing. + pub(crate) fn historical_progress(&mut self) -> BlockSynchronizerProgress { + match &self.historical { + None => BlockSynchronizerProgress::Idle, + Some(builder) => self.progress(builder), + } + } + + /// Returns the progress being made on the forward syncing. + pub(crate) fn forward_progress(&mut self) -> BlockSynchronizerProgress { + match &self.forward { + None => BlockSynchronizerProgress::Idle, + Some(builder) => self.progress(builder), + } + } + + pub(crate) fn purge(&mut self) { + self.purge_historical(); + self.purge_forward(); + } + + pub(crate) fn purge_historical(&mut self) { + if let Some(builder) = &self.historical { + debug!(%builder, "BlockSynchronizer: purging block builder"); + } + self.historical = None; + } + + pub(crate) fn purge_forward(&mut self) { + if let Some(builder) = &self.forward { + debug!(%builder, "BlockSynchronizer: purging block builder"); + } + self.forward = None; + } + + /// Registers a block for synchronization. + /// + /// Returns `true` if a block was registered for synchronization successfully. + /// Will return `false` if there was an attempt to register the same block hash + /// again while the synchronizer was working on the same block. The synchronizer + /// will continue work on the block in that case. + pub(crate) fn register_block_by_hash( + &mut self, + block_hash: BlockHash, + should_fetch_execution_state: bool, + ) -> bool { + if let (true, Some(builder), _) | (false, _, Some(builder)) = ( + should_fetch_execution_state, + &self.historical, + &self.forward, + ) { + if builder.block_hash() == block_hash && !builder.is_failed() { + return false; + } + } + let builder = BlockBuilder::new( + block_hash, + should_fetch_execution_state, + self.max_simultaneous_peers, + self.config.peer_refresh_interval, + self.chainspec.core_config.legacy_required_finality, + self.chainspec + .core_config + .start_protocol_version_with_strict_finality_signatures_required, + ); + if should_fetch_execution_state { + self.historical.replace(builder); + } else { + self.forward.replace(builder); + } + true + } + + /// Registers a sync leap result, if able. + pub(crate) fn register_sync_leap( + &mut self, + sync_leap: &SyncLeap, + peers: Vec, + should_fetch_execution_state: bool, + ) { + fn apply_sigs(builder: &mut BlockBuilder, maybe_sigs: Option<&BlockSignatures>) { + if let Some(signatures) = maybe_sigs { + for finality_signature in signatures.finality_signatures() { + if let Err(error) = + builder.register_finality_signature(finality_signature, None) + { + debug!(%error, "BlockSynchronizer: failed to register finality signature"); + } + } + } + } + + let (block_header, maybe_sigs) = sync_leap.highest_block_header_and_signatures(); + if let Some(builder) = self.get_builder(block_header.block_hash(), true) { + debug!(%builder, "BlockSynchronizer: register_sync_leap update builder"); + apply_sigs(builder, maybe_sigs); + builder.register_peers(peers); + } else { + let era_id = block_header.era_id(); + if let Some(validator_weights) = self.validator_matrix.validator_weights(era_id) { + let mut builder = BlockBuilder::new_from_sync_leap( + block_header.clone(), + maybe_sigs, + validator_weights, + peers, + should_fetch_execution_state, + self.max_simultaneous_peers, + self.config.peer_refresh_interval, + self.chainspec.core_config.legacy_required_finality, + self.chainspec + .core_config + .start_protocol_version_with_strict_finality_signatures_required, + ); + apply_sigs(&mut builder, maybe_sigs); + if should_fetch_execution_state { + self.historical = Some(builder); + } else { + self.forward = Some(builder); + } + } else { + warn!( + block_hash = %block_header.block_hash(), + "BlockSynchronizer: register_sync_leap unable to create block builder", + ); + } + } + } + + /// Registers peers to a block builder by `BlockHash`. + pub(crate) fn register_peers(&mut self, block_hash: BlockHash, peers: Vec) { + if let Some(builder) = self.get_builder(block_hash, false) { + builder.register_peers(peers); + } + } + + /* EVENT LOGIC */ + + fn register_made_finalized_block( + &mut self, + block_hash: &BlockHash, + result: Option, + ) { + if let Some(builder) = &self.historical { + if builder.block_hash() == *block_hash { + error!(%block_hash, "historical block should not have been converted for execution"); + } + } + + match &mut self.forward { + Some(builder) if builder.block_hash() == *block_hash => { + if let Some(executable_block) = result { + builder.register_made_executable_block(executable_block); + } else { + // Could not create finalized block, abort + builder.abort(); + } + } + _ => { + trace!(%block_hash, "BlockSynchronizer: not currently synchronizing forward block"); + } + } + } + + fn register_block_execution_enqueued(&mut self, block_hash: &BlockHash) { + if let Some(builder) = &self.historical { + if builder.block_hash() == *block_hash { + error!(%block_hash, "historical block should not be enqueued for execution"); + } + } + + match &mut self.forward { + Some(builder) if builder.block_hash() == *block_hash => { + builder.register_block_execution_enqueued(); + self.metrics + .forward_block_sync_duration + .observe(builder.sync_start_time().elapsed().as_secs_f64()); + } + _ => { + trace!(%block_hash, "BlockSynchronizer: not currently synchronizing forward block"); + } + } + } + + fn register_block_executed(&mut self, block_hash: &BlockHash) { + if let Some(builder) = &self.historical { + if builder.block_hash() == *block_hash { + error!(%block_hash, "historical block should not be executed"); + } + } + + match &mut self.forward { + Some(builder) if builder.block_hash() == *block_hash => { + builder.register_block_executed(); + self.metrics + .forward_block_sync_duration + .observe(builder.sync_start_time().elapsed().as_secs_f64()); + } + _ => { + trace!(%block_hash, "BlockSynchronizer: not currently synchronizing forward block"); + } + } + } + + fn register_marked_complete( + &mut self, + effect_builder: EffectBuilder, + block_hash: &BlockHash, + is_new: bool, + ) -> Effects + where + REv: From + + From + + From + + Send, + { + if let Some(builder) = &self.forward { + if builder.block_hash() == *block_hash { + error!( + %block_hash, + "forward block should not be marked complete in block synchronizer" + ); + } + } + + let mut effects = Effects::new(); + match &mut self.historical { + Some(builder) if builder.block_hash() == *block_hash => { + builder.register_marked_complete(); + if !is_new { + warn!(%block_hash, "marked complete an already-complete block"); + return effects; + } + // other components need to know that we've added an historical block + // that they may be interested in + if let Some(block) = builder.maybe_block() { + effects.extend( + effect_builder + .get_execution_results_from_storage(*block.hash()) + .then(move |maybe_execution_results| async move { + match maybe_execution_results { + Some(execution_results) => { + let meta_block = MetaBlock::new_historical( + Arc::new(*block), + execution_results, + MetaBlockState::new_after_historical_sync(), + ); + effect_builder.announce_meta_block(meta_block).await; + } + None => { + error!( + "should have execution results for {}", + block.hash() + ); + } + } + }) + .ignore(), + ); + } + self.metrics + .historical_block_sync_duration + .observe(builder.sync_start_time().elapsed().as_secs_f64()); + } + _ => { + trace!(%block_hash, "BlockSynchronizer: not currently synchronizing historical block"); + } + } + effects + } + + fn dishonest_peers(&self) -> Vec { + let mut ret = vec![]; + if let Some(builder) = &self.forward { + ret.extend(builder.dishonest_peers()); + } + if let Some(builder) = &self.historical { + ret.extend(builder.dishonest_peers()); + } + ret + } + + fn flush_dishonest_peers(&mut self) { + if let Some(builder) = &mut self.forward { + builder.flush_dishonest_peers(); + } + if let Some(builder) = &mut self.historical { + builder.flush_dishonest_peers(); + } + } + + fn need_next( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Effects + where + REv: ReactorEvent + From> + From, + { + let latch_reset_interval = self.config.latch_reset_interval; + let need_next_interval = self.config.need_next_interval.into(); + let mut results = Effects::new(); + let max_simultaneous_peers = self.max_simultaneous_peers; + let mut builder_needs_next = |builder: &mut BlockBuilder, chainspec: Arc| { + if builder.check_latch(latch_reset_interval) + || builder.is_finished() + || builder.is_failed() + { + return; + } + let action = builder.block_acquisition_action(rng, max_simultaneous_peers); + let peers = action.peers_to_ask(); + let need_next = action.need_next(); + info!( + "BlockSynchronizer: {} with {} peers", + need_next, + peers.len() + ); + match need_next { + NeedNext::Nothing(_) => { + // currently idle or waiting, check back later + results.extend( + effect_builder + .set_timeout(need_next_interval) + .event(|_| Event::Request(BlockSynchronizerRequest::NeedNext)), + ); + } + NeedNext::BlockHeader(block_hash) => { + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + effect_builder + .fetch::( + block_hash, + node_id, + Box::new(EmptyValidationMetadata), + ) + .event(Event::BlockHeaderFetched) + })) + } + NeedNext::BlockBody(block_hash) => { + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + effect_builder + .fetch::(block_hash, node_id, Box::new(EmptyValidationMetadata)) + .event(Event::BlockFetched) + })) + } + NeedNext::FinalitySignatures(block_hash, era_id, validators) => { + builder.latch_by(std::cmp::min( + validators.len(), + max_simultaneous_peers as usize, + )); + for (validator, peer) in validators + .into_iter() + .take(max_simultaneous_peers as usize) + .zip(peers.into_iter().cycle()) + { + debug!(%validator, %peer, "attempting to fetch FinalitySignature"); + builder.register_finality_signature_pending(validator.clone()); + let id = Box::new(FinalitySignatureId::new(block_hash, era_id, validator)); + results.extend( + effect_builder + .fetch::( + id, + peer, + Box::new(EmptyValidationMetadata), + ) + .event(Event::FinalitySignatureFetched), + ); + } + } + NeedNext::GlobalState(block_hash, global_state_root_hash) => { + builder.latch(); + results.extend( + effect_builder + .sync_global_state(block_hash, global_state_root_hash) + .event(move |result| Event::GlobalStateSynced { block_hash, result }), + ); + } + NeedNext::ExecutionResultsChecksum(block_hash, global_state_root_hash) => { + builder.latch(); + results.extend( + effect_builder + .get_execution_results_checksum(global_state_root_hash) + .event(move |result| Event::GotExecutionResultsChecksum { + block_hash, + result, + }), + ); + } + NeedNext::ExecutionResults(block_hash, id, checksum) => { + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + debug!("attempting to fetch BlockExecutionResultsOrChunk"); + effect_builder + .fetch::(id, node_id, Box::new(checksum)) + .event(move |result| Event::ExecutionResultsFetched { + block_hash, + result, + }) + })) + } + NeedNext::ApprovalsHashes(block_hash, block) => { + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + effect_builder + .fetch::(block_hash, node_id, block.clone()) + .event(Event::ApprovalsHashesFetched) + })) + } + NeedNext::DeployByHash(block_hash, deploy_hash) => { + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + effect_builder + .fetch::( + deploy_hash, + node_id, + Box::new(EmptyValidationMetadata), + ) + .event(move |result| Event::DeployFetched { + block_hash, + result: Either::Left(result), + }) + })) + } + NeedNext::TransactionById(block_hash, txn_id) => { + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + effect_builder + .fetch::( + txn_id, + node_id, + Box::new(EmptyValidationMetadata), + ) + .event(move |result| Event::DeployFetched { + block_hash, + result: Either::Right(result), + }) + })) + } + NeedNext::MakeExecutableBlock(block_hash, _) => { + let need_to_execute = false == builder.should_fetch_execution_state() + && builder.execution_unattempted(); + if need_to_execute { + builder.latch(); + results.extend( + effect_builder + .make_block_executable(block_hash) + .event(move |result| Event::MadeFinalizedBlock { + block_hash, + result, + }), + ) + } + } + NeedNext::EnqueueForExecution(block_hash, _, executable_block) => { + builder.latch(); + results.extend( + effect_builder + .enqueue_block_for_execution( + *executable_block, + MetaBlockState::new_already_stored(), + ) + .event(move |_| Event::MarkBlockExecutionEnqueued(block_hash)), + ) + } + NeedNext::BlockMarkedComplete(block_hash, block_height) => { + // Only mark the block complete if we're syncing historical + // because we have global state and execution effects (if + // any). + if builder.should_fetch_execution_state() { + builder.latch(); + results.extend( + effect_builder.mark_block_completed(block_height).event( + move |is_new| Event::MarkBlockCompleted { block_hash, is_new }, + ), + ) + } + } + NeedNext::Peers(block_hash) => { + if builder.should_fetch_execution_state() { + builder.latch(); + // the accumulator may or may not have peers for an older block, + // so we're going to also get a random sampling from networking + results.extend( + effect_builder + .get_fully_connected_peers(max_simultaneous_peers as usize) + .event(move |peers| Event::NetworkPeers(block_hash, peers)), + ) + } + builder.latch(); + results.extend( + effect_builder + .get_block_accumulated_peers(block_hash) + .event(move |maybe_peers| { + Event::AccumulatedPeers(block_hash, maybe_peers) + }), + ) + } + NeedNext::EraValidators(era_id) => { + warn!( + "BlockSynchronizer: does not have era_validators for era_id: {}", + era_id + ); + builder.latch_by(peers.len()); + results.extend(peers.into_iter().flat_map(|node_id| { + effect_builder + .fetch::( + SyncLeapIdentifier::sync_to_historical(builder.block_hash()), + node_id, + Box::new(SyncLeapValidationMetaData::from_chainspec( + chainspec.as_ref(), + )), + ) + .event(Event::SyncLeapFetched) + })) + } + NeedNext::SwitchToHaveStrictFinality(block_hash, _) => { + // Don't set the latch since this is an internal state transition + if builder.block_hash() != block_hash { + debug!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + } else if let Err(error) = builder.switch_to_have_strict_finality(block_hash) { + error!(%error, "BlockSynchronizer: failed to advance acquisition state"); + } else { + results.extend( + effect_builder + .set_timeout(need_next_interval) + .event(|_| Event::Request(BlockSynchronizerRequest::NeedNext)), + ); + } + } + } + }; + + if let Some(builder) = &mut self.forward { + builder_needs_next(builder, Arc::clone(&self.chainspec)); + } + if let Some(builder) = &mut self.historical { + builder_needs_next(builder, Arc::clone(&self.chainspec)); + } + results + } + + fn peers_accumulated(&mut self, block_hash: BlockHash, peers: Vec) { + if let Some(builder) = self.get_builder(block_hash, true) { + builder.register_peers(peers); + } + } + + fn block_header_fetched( + &mut self, + result: Result, FetcherError>, + ) { + let (block_hash, maybe_block_header, maybe_peer_id): ( + BlockHash, + Option>, + Option, + ) = match result { + Ok(FetchedData::FromPeer { item, peer }) => (item.fetch_id(), Some(item), Some(peer)), + Ok(FetchedData::FromStorage { item }) => (item.fetch_id(), Some(item), None), + Err(err) => { + debug!(%err, "BlockSynchronizer: failed to fetch block header"); + if err.is_peer_fault() { + (*err.id(), None, Some(*err.peer())) + } else { + (*err.id(), None, None) + } + } + }; + + let validator_matrix = &self.validator_matrix.clone(); + if let Some(builder) = self.get_builder(block_hash, false) { + match maybe_block_header { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); + } + + if builder.waiting_for_block_header() { + builder.latch_decrement(); + } + } + Some(block_header) => { + if let Err(error) = builder.register_block_header(*block_header, maybe_peer_id) + { + error!(%error, "BlockSynchronizer: failed to apply block header"); + } else { + builder.register_era_validator_weights(validator_matrix); + } + } + } + } + } + + fn block_fetched(&mut self, result: Result, FetcherError>) { + let (block_hash, maybe_block, maybe_peer_id): ( + BlockHash, + Option>, + Option, + ) = match result { + Ok(FetchedData::FromPeer { item, peer }) => { + debug!( + "BlockSynchronizer: fetched body {:?} from peer {}", + item.hash(), + peer + ); + (*item.hash(), Some(item), Some(peer)) + } + Ok(FetchedData::FromStorage { item }) => (*item.hash(), Some(item), None), + Err(err) => { + debug!(%err, "BlockSynchronizer: failed to fetch block"); + if err.is_peer_fault() { + (*err.id(), None, Some(*err.peer())) + } else { + (*err.id(), None, None) + } + } + }; + + if let Some(builder) = self.get_builder(block_hash, false) { + match maybe_block { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); + } + + if builder.waiting_for_block() { + builder.latch_decrement(); + } + } + Some(block) => { + if let Err(error) = builder.register_block(*block, maybe_peer_id) { + error!(%error, "BlockSynchronizer: failed to apply block"); + } + } + } + } + } + + fn approvals_hashes_fetched( + &mut self, + result: Result, FetcherError>, + ) { + let (block_hash, maybe_approvals_hashes, maybe_peer_id): ( + BlockHash, + Option>, + Option, + ) = match result { + Ok(FetchedData::FromPeer { item, peer }) => { + debug!( + "BlockSynchronizer: fetched approvals hashes {:?} from peer {}", + item.block_hash(), + peer + ); + (*item.block_hash(), Some(item), Some(peer)) + } + Ok(FetchedData::FromStorage { item }) => (*item.block_hash(), Some(item), None), + Err(err) => { + debug!(%err, "BlockSynchronizer: failed to fetch approvals hashes"); + if err.is_peer_fault() { + (*err.id(), None, Some(*err.peer())) + } else { + (*err.id(), None, None) + } + } + }; + + if let Some(builder) = self.get_builder(block_hash, false) { + match maybe_approvals_hashes { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); + } + + if builder.waiting_for_approvals_hashes() { + builder.latch_decrement(); + } + } + Some(approvals_hashes) => { + if let Err(error) = + builder.register_approvals_hashes(&approvals_hashes, maybe_peer_id) + { + error!(%error, "BlockSynchronizer: failed to apply approvals hashes"); + } + } + } + } + } + + fn finality_signature_fetched( + &mut self, + result: Result, FetcherError>, + ) { + let (id, maybe_finality_signature, maybe_peer_id) = match result { + Ok(FetchedData::FromPeer { item, peer }) => { + debug!( + "BlockSynchronizer: fetched finality signature {} from peer {}", + item, peer + ); + (item.fetch_id(), Some(item), Some(peer)) + } + Ok(FetchedData::FromStorage { item }) => (item.fetch_id(), Some(item), None), + Err(err) => { + debug!(%err, "BlockSynchronizer: failed to fetch finality signature"); + if err.is_peer_fault() { + (err.id().clone(), None, Some(*err.peer())) + } else { + (err.id().clone(), None, None) + } + } + }; + + if let Some(builder) = self.get_builder(*id.block_hash(), false) { + match maybe_finality_signature { + None => { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); + } + + // Failed to fetch a finality sig. Decrement the latch if we were actually + // waiting for signatures. + if builder.waiting_for_signatures() { + builder.latch_decrement(); + } + } + Some(finality_signature) => { + if let Err(error) = + builder.register_finality_signature(*finality_signature, maybe_peer_id) + { + warn!(%error, "BlockSynchronizer: failed to apply finality signature"); + } + } + } + } + } + + fn sync_leap_fetched(&mut self, result: Result, FetcherError>) { + let (block_hash, maybe_sync_leap, maybe_peer_id): ( + BlockHash, + Option>, + Option, + ) = match result { + Ok(FetchedData::FromPeer { item, peer }) => { + debug!( + "BlockSynchronizer: fetched sync leap {:?} from peer {}", + item.fetch_id().block_hash(), + peer + ); + + (item.fetch_id().block_hash(), Some(item), Some(peer)) + } + Ok(FetchedData::FromStorage { item }) => { + error!(%item, "BlockSynchronizer: sync leap should never come from storage"); + (item.fetch_id().block_hash(), None, None) // maybe_sync_leap None will demote peer + } + Err(err) => { + debug!(%err, "BlockSynchronizer: failed to fetch sync leap"); + if err.is_peer_fault() { + (err.id().block_hash(), None, Some(*err.peer())) + } else { + (err.id().block_hash(), None, None) + } + } + }; + let demote_peer = maybe_sync_leap.is_none(); + if let Some(sync_leap) = maybe_sync_leap { + let era_validator_weights = sync_leap.era_validator_weights( + self.validator_matrix.fault_tolerance_threshold(), + &self.chainspec.protocol_config, + ); + for evw in era_validator_weights { + self.validator_matrix.register_era_validator_weights(evw); + } + } + let validator_matrix = &self.validator_matrix.clone(); + if let Some(builder) = self.get_builder(block_hash, true) { + if demote_peer { + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); + } + } else { + if let Some(peer_id) = maybe_peer_id { + builder.promote_peer(peer_id); + } + builder.register_era_validator_weights(validator_matrix); + } + } + } + + fn global_state_synced( + &mut self, + block_hash: BlockHash, + result: Result, + ) { + let (maybe_root_hash, unreliable_peers) = match result { + Ok(response) => (Some(*response.hash()), response.unreliable_peers()), + Err(error) => { + debug!(%error, "BlockSynchronizer: failed to sync global state"); + match error { + GlobalStateSynchronizerError::TrieAccumulator(unreliable_peers) + | GlobalStateSynchronizerError::PutTrie(_, unreliable_peers) => { + (None, unreliable_peers) + } + GlobalStateSynchronizerError::NoPeersAvailable => { + // This should never happen. Before creating a sync request, + // the block synchronizer will request another set of peers + // (both random and from the accumulator). + debug!( + "BlockSynchronizer: global state sync request was issued with no peers" + ); + (None, Vec::new()) + } + GlobalStateSynchronizerError::ProcessingAnotherRequest { + hash_being_synced, + hash_requested, + } => { + warn!(%hash_being_synced, %hash_requested, + "BlockSynchronizer: global state sync is processing another request"); + (None, Vec::new()) + } + } + } + }; + + if let Some(builder) = &mut self.historical { + if builder.block_hash() != block_hash { + debug!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + } else { + builder.latch_decrement(); + if let Some(root_hash) = maybe_root_hash { + if let Err(error) = builder.register_global_state(root_hash.into_inner()) { + error!(%block_hash, %error, "BlockSynchronizer: failed to apply global state"); + } + } + // Demote all the peers where we didn't find the required global state tries + for peer in unreliable_peers.iter() { + builder.demote_peer(*peer); + } + } + } + } + + fn got_execution_results_checksum( + &mut self, + block_hash: BlockHash, + result: ExecutionResultsChecksumResult, + ) { + let builder = match &mut self.historical { + None => { + // execution results checksums are only relevant to historical blocks + debug!(%block_hash, "BlockSynchronizer: not currently synchronising block"); + return; + } + Some(builder) => { + let current_block_hash = builder.block_hash(); + if current_block_hash != block_hash { + debug!(%block_hash, %current_block_hash, "BlockSynchronizer: currently synchronising different block"); + return; + } + builder + } + }; + + let execution_results_checksum = match result { + ExecutionResultsChecksumResult::Failure(error) => { + error!(%block_hash, %error, "BlockSynchronizer: unexpected error getting checksum registry"); + ExecutionResultsChecksum::Uncheckable + } + ExecutionResultsChecksumResult::RootNotFound => { + error!(%block_hash, "BlockSynchronizer: unexpected error getting checksum registry (root not found)"); + ExecutionResultsChecksum::Uncheckable + } + ExecutionResultsChecksumResult::ChecksumNotFound => { + error!(%block_hash, "BlockSynchronizer: checksum not found (should exist)"); + ExecutionResultsChecksum::Uncheckable + } + ExecutionResultsChecksumResult::RegistryNotFound => { + // we didn't track this checksum pre-1.5 + debug!(%block_hash, "BlockSynchronizer: checksum registry not found (legacy record)"); + ExecutionResultsChecksum::Uncheckable + } + ExecutionResultsChecksumResult::Success { checksum } => { + debug!( + %block_hash, "BlockSynchronizer: got execution_results_checksum {}", + checksum + ); + ExecutionResultsChecksum::Checkable(checksum) + } + }; + + builder.latch_decrement(); + if let Err(error) = builder.register_execution_results_checksum(execution_results_checksum) + { + error!(%block_hash, %error, "BlockSynchronizer: failed to apply execution results checksum"); + } + } + + fn execution_results_fetched( + &mut self, + effect_builder: EffectBuilder, + block_hash: BlockHash, + result: FetchResult, + ) -> Effects + where + REv: From + Send, + { + debug!(%block_hash, "execution_results_fetched"); + let (maybe_value_or_chunk, maybe_peer_id) = match result { + Ok(FetchedData::FromPeer { item, peer }) => { + debug!( + "BlockSynchronizer: fetched execution results {} from peer {}", + item.block_hash(), + peer + ); + (Some(item), Some(peer)) + } + Ok(FetchedData::FromStorage { item }) => (Some(item), None), + Err(err) => { + debug!(%err, "BlockSynchronizer: failed to fetch execution results or chunk"); + if err.is_peer_fault() { + (None, Some(*err.peer())) + } else { + (None, None) + } + } + }; + debug!( + has_value_or_chunk = maybe_value_or_chunk.is_some(), + ?maybe_peer_id, + "execution_results_fetched" + ); + + if let Some(builder) = &mut self.historical { + if builder.block_hash() != block_hash { + debug!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + return Effects::new(); + } + match maybe_value_or_chunk { + None => { + debug!(%block_hash, "execution_results_fetched: No maybe_value_or_chunk"); + if let Some(peer_id) = maybe_peer_id { + builder.demote_peer(peer_id); + } + if builder.waiting_for_execution_results() { + builder.latch_decrement(); + } + } + Some(value_or_chunk) => { + // due to reasons, the stitched back together execution effects need to be saved + // to disk here, when the last chunk is collected. + // we expect a response back, which will crank the block builder for this block + // to the next state. + debug!( + %value_or_chunk, + "execution_results_fetched" + ); + match builder.register_fetched_execution_results(maybe_peer_id, *value_or_chunk) + { + Ok(Some(execution_results)) => { + debug!(%block_hash, "execution_results_fetched: putting execution results to storage"); + let (block_height, era_id) = match builder.block_height_and_era() { + Some(value) => value, + None => { + error!( + %block_hash, + "BlockSynchronizer: failed to apply execution results or \ + chunk due to missing block height and era id" + ); + return Effects::new(); + } + }; + return effect_builder + .put_execution_artifacts_to_storage( + block_hash, + block_height, + era_id, + execution_results, + ) + .event(move |()| Event::ExecutionResultsStored(block_hash)); + } + Ok(None) => { + debug!(%block_hash, "execution_results_fetched: Ok(None)"); + } + Err(error) => { + error!(%block_hash, %error, "BlockSynchronizer: failed to apply execution results or chunk"); + } + } + } + } + } + Effects::new() + } + + fn execution_results_stored(&mut self, block_hash: BlockHash) { + if let Some(builder) = &mut self.historical { + if builder.block_hash() != block_hash { + debug!(%block_hash, "BlockSynchronizer: register_execution_results_stored: not currently synchronizing block"); + } else { + builder.latch_decrement(); + if let Err(error) = builder.register_execution_results_stored_notification() { + error!(%block_hash, %error, "BlockSynchronizer: register_execution_results_stored: failed to apply stored execution results"); + } + } + } + } + + fn transaction_fetched( + &mut self, + block_hash: BlockHash, + fetched_txn: FetchedData, + ) { + let (txn, maybe_peer) = match fetched_txn { + FetchedData::FromPeer { item, peer } => (item, Some(peer)), + FetchedData::FromStorage { item } => (item, None), + }; + + if let Some(builder) = self.get_builder(block_hash, false) { + if let Err(error) = builder.register_deploy(txn.fetch_id(), maybe_peer) { + error!(%block_hash, %error, "BlockSynchronizer: failed to apply deploy"); + } + } + } + + fn disqualify_peer(&mut self, node_id: NodeId) { + if let Some(builder) = &mut self.forward { + builder.disqualify_peer(node_id); + } + if let Some(builder) = &mut self.historical { + builder.disqualify_peer(node_id); + } + } + + fn progress(&self, builder: &BlockBuilder) -> BlockSynchronizerProgress { + if builder.is_finished() { + match builder.block_height_and_era() { + None => { + error!("BlockSynchronizer: finished builder should have block height and era") + } + Some((block_height, era_id)) => { + return BlockSynchronizerProgress::Synced( + builder.block_hash(), + block_height, + era_id, + ); + } + } + } + + if builder.is_executing() { + match builder.block_height_and_era() { + None => { + error!("BlockSynchronizer: finished builder should have block height and era") + } + Some((block_height, era_id)) => { + // If the block is currently being executed, we will not + // purge the builder and instead wait for it to be + // executed and marked complete. + if builder.is_executing() { + return BlockSynchronizerProgress::Executing( + builder.block_hash(), + block_height, + era_id, + ); + } + } + } + } + + let last_progress_time = builder.last_progress_time().max( + self.global_sync + .last_progress() + .unwrap_or_else(Timestamp::zero), + ); + + BlockSynchronizerProgress::Syncing( + builder.block_hash(), + builder.block_height(), + last_progress_time, + ) + } + + fn status(&self) -> BlockSynchronizerStatus { + BlockSynchronizerStatus::new( + self.historical.as_ref().map(|builder| { + BlockSyncStatus::new( + builder.block_hash(), + builder.block_height(), + builder.block_acquisition_state().to_string(), + ) + }), + self.forward.as_ref().map(|builder| { + BlockSyncStatus::new( + builder.block_hash(), + builder.block_height(), + builder.block_acquisition_state().to_string(), + ) + }), + ) + } + + fn get_builder( + &mut self, + block_hash: BlockHash, + decrement_latch: bool, + ) -> Option<&mut BlockBuilder> { + match (&mut self.forward, &mut self.historical) { + (Some(builder), _) | (_, Some(builder)) if builder.block_hash() == block_hash => { + if decrement_latch { + builder.latch_decrement(); + } + Some(builder) + } + _ => { + trace!(%block_hash, "BlockSynchronizer: not currently synchronizing block"); + None + } + } + } +} + +impl InitializedComponent for BlockSynchronizer +where + REv: ReactorEvent + From>, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +impl Component for BlockSynchronizer { + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); + Effects::new() + } + ComponentState::Initializing => { + match event { + Event::Initialize => { + >::set_state( + self, + ComponentState::Initialized, + ); + // start dishonest peer management on initialization + effect_builder + .set_timeout(self.config.disconnect_dishonest_peers_interval.into()) + .event(move |_| { + Event::Request(BlockSynchronizerRequest::DishonestPeers) + }) + } + Event::Request(_) + | Event::DisconnectFromPeer(_) + | Event::MadeFinalizedBlock { .. } + | Event::MarkBlockExecutionEnqueued(_) + | Event::MarkBlockExecuted(_) + | Event::MarkBlockCompleted { .. } + | Event::BlockHeaderFetched(_) + | Event::BlockFetched(_) + | Event::ApprovalsHashesFetched(_) + | Event::FinalitySignatureFetched(_) + | Event::SyncLeapFetched(_) + | Event::GlobalStateSynced { .. } + | Event::GotExecutionResultsChecksum { .. } + | Event::DeployFetched { .. } + | Event::ExecutionResultsFetched { .. } + | Event::ExecutionResultsStored(_) + | Event::AccumulatedPeers(_, _) + | Event::NetworkPeers(_, _) + | Event::GlobalStateSynchronizer(_) => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is pending initialization" + ); + Effects::new() + } + } + } + ComponentState::Initialized => match event { + Event::Initialize => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() + } + Event::Request(request) => match request { + // the rpc and rest servers include block sync data on their status responses + BlockSynchronizerRequest::Status { responder } => { + responder.respond(self.status()).ignore() + } + // prompts for what data (if any) is needed next to acquire block(s) being + // sync'd + BlockSynchronizerRequest::NeedNext => self.need_next(effect_builder, rng), + // this component is periodically asked for any peers that have provided false + // data (if any) which are then disconnected from + BlockSynchronizerRequest::DishonestPeers => { + let mut effects: Effects = self + .dishonest_peers() + .into_iter() + .flat_map(|node_id| { + effect_builder + .announce_block_peer_with_justification( + node_id, + BlocklistJustification::DishonestPeer, + ) + .ignore() + }) + .collect(); + self.flush_dishonest_peers(); + effects.extend( + effect_builder + .set_timeout(self.config.disconnect_dishonest_peers_interval.into()) + .event(move |_| { + Event::Request(BlockSynchronizerRequest::DishonestPeers) + }), + ); + effects + } + + // this is a request that's separate from a typical block synchronizer flow; + // it's sent when we need to sync global states of block after an upgrade + // and its parent in order to check whether the validators have been + // changed by the upgrade + BlockSynchronizerRequest::SyncGlobalStates(mut global_states) => { + if let Some((block_hash, global_state_hash)) = global_states.pop() { + let global_states_clone = global_states.clone(); + effect_builder + .sync_global_state(block_hash, global_state_hash) + .result( + move |_| { + Event::Request(BlockSynchronizerRequest::SyncGlobalStates( + global_states_clone, + )) + }, + move |_| { + global_states.push((block_hash, global_state_hash)); + Event::Request(BlockSynchronizerRequest::SyncGlobalStates( + global_states, + )) + }, + ) + } else { + Effects::new() + } + } + }, + // tunnel event to global state synchronizer + // global_state_sync is a black box; we do not hook need next here + // global_state_sync signals the historical sync builder at the end of its process, + // and need next is then re-hooked to get the rest of the block + Event::GlobalStateSynchronizer(event) => { + let processed_event = match event { + GlobalStateSynchronizerEvent::GetPeers(_) => { + let peers = self.historical.as_ref().map_or_else(Vec::new, |builder| { + builder.peer_list().qualified_peers_up_to( + rng, + self.config.max_parallel_trie_fetches as usize, + ) + }); + GlobalStateSynchronizerEvent::GetPeers(peers) + } + event => event, + }; + reactor::wrap_effects( + Event::GlobalStateSynchronizer, + self.global_sync + .handle_event(effect_builder, rng, processed_event), + ) + } + // when a peer is disconnected from for any reason, disqualify peer + Event::DisconnectFromPeer(node_id) => { + self.disqualify_peer(node_id); + Effects::new() + } + Event::MarkBlockExecutionEnqueued(block_hash) => { + // when syncing a forward block the synchronizer considers it + // finished after it has been successfully enqueued for execution + self.register_block_execution_enqueued(&block_hash); + Effects::new() + } + Event::MarkBlockExecuted(block_hash) => { + // when syncing a forward block the synchronizer considers it + // synced after it has been successfully executed and marked + // complete in storage. + self.register_block_executed(&block_hash); + Effects::new() + } + Event::MarkBlockCompleted { block_hash, is_new } => { + // when syncing an historical block, the synchronizer considers it + // finished after receiving confirmation that the complete block + // has been stored. + self.register_marked_complete(effect_builder, &block_hash, is_new) + } + + // --- each of the following events MUST return need next --- + + // for both historical and forward sync, the block header has been fetched + Event::BlockHeaderFetched(result) => { + self.block_header_fetched(result); + self.need_next(effect_builder, rng) + } + // for both historical and forward sync, the block body has been fetched + Event::BlockFetched(result) => { + self.block_fetched(result); + self.need_next(effect_builder, rng) + } + // for both historical and forward sync, a finality signature has been fetched + Event::FinalitySignatureFetched(result) => { + self.finality_signature_fetched(result); + self.need_next(effect_builder, rng) + } + // for both historical and forward sync, post-1.4 blocks track approvals hashes + // for the deploys they contain + Event::ApprovalsHashesFetched(result) => { + self.approvals_hashes_fetched(result); + self.need_next(effect_builder, rng) + } + Event::SyncLeapFetched(result) => { + self.sync_leap_fetched(result); + self.need_next(effect_builder, rng) + } + // we use the existence of n execution results checksum as an expedient way to + // determine if a block is post-1.4 + Event::GotExecutionResultsChecksum { block_hash, result } => { + self.got_execution_results_checksum(block_hash, result); + self.need_next(effect_builder, rng) + } + // historical sync needs to know that global state has been sync'd + Event::GlobalStateSynced { block_hash, result } => { + self.global_state_synced(block_hash, result); + self.need_next(effect_builder, rng) + } + // historical sync needs to know that execution results have been fetched + Event::ExecutionResultsFetched { block_hash, result } => { + let mut effects = + self.execution_results_fetched(effect_builder, block_hash, result); + effects.extend(self.need_next(effect_builder, rng)); + effects + } + // historical sync needs to know that execution effects have been stored + Event::ExecutionResultsStored(block_hash) => { + self.execution_results_stored(block_hash); + self.need_next(effect_builder, rng) + } + // for pre-1.5 blocks we use the legacy deploy fetcher, otherwise we use the deploy + // fetcher but the results of both are forwarded to this handler + Event::DeployFetched { block_hash, result } => { + match result { + Either::Left(Ok(fetched_legacy_deploy)) => { + let deploy_id = fetched_legacy_deploy.id(); + debug!(%block_hash, ?deploy_id, "BlockSynchronizer: fetched legacy deploy"); + self.transaction_fetched(block_hash, fetched_legacy_deploy.convert()) + } + Either::Right(Ok(fetched_txn)) => { + let txn_id = fetched_txn.id(); + debug!(%block_hash, %txn_id, "BlockSynchronizer: fetched transaction"); + self.transaction_fetched(block_hash, fetched_txn) + } + Either::Left(Err(error)) => { + if let Some(builder) = self.get_builder(block_hash, false) { + if builder.waiting_for_deploys() { + builder.latch_decrement(); + } + } + + debug!(%error, "BlockSynchronizer: failed to fetch legacy deploy"); + } + Either::Right(Err(error)) => { + if let Some(builder) = self.get_builder(block_hash, false) { + if builder.waiting_for_deploys() { + builder.latch_decrement(); + } + } + + debug!(%error, "BlockSynchronizer: failed to fetch deploy"); + } + }; + self.need_next(effect_builder, rng) + } + // fresh peers to apply (random sample from network) + Event::NetworkPeers(block_hash, peers) => { + debug!(%block_hash, "BlockSynchronizer: got {} peers from network", peers.len()); + self.peers_accumulated(block_hash, peers); + self.need_next(effect_builder, rng) + } + // fresh peers to apply (qualified peers from accumulator) + Event::AccumulatedPeers(block_hash, Some(peers)) => { + debug!(%block_hash, "BlockSynchronizer: got {} peers from accumulator", peers.len()); + self.peers_accumulated(block_hash, peers); + self.need_next(effect_builder, rng) + } + // no more peers available; periodically retry via need next... + // the node will likely get more peers over time and resume + Event::AccumulatedPeers(block_hash, None) => { + debug!(%block_hash, "BlockSynchronizer: got 0 peers from accumulator"); + self.peers_accumulated(block_hash, vec![]); + self.need_next(effect_builder, rng) + } + Event::MadeFinalizedBlock { block_hash, result } => { + // when syncing a forward block the node does not acquire + // global state and execution results from peers; instead + // the node attempts to execute the block to produce the + // global state and execution results and check the results + // first, the block it must be turned into a finalized block + // and then enqueued for execution. + self.register_made_finalized_block(&block_hash, result); + self.need_next(effect_builder, rng) + } + }, + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +impl ValidatorBoundComponent for BlockSynchronizer { + fn handle_validators( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Effects { + info!("BlockSynchronizer: handling updated validator matrix"); + if let Some(block_builder) = &mut self.forward { + block_builder.register_era_validator_weights(&self.validator_matrix); + } + if let Some(block_builder) = &mut self.historical { + block_builder.register_era_validator_weights(&self.validator_matrix); + } + self.need_next(effect_builder, rng) + } +} diff --git a/node/src/components/block_synchronizer/block_acquisition.rs b/node/src/components/block_synchronizer/block_acquisition.rs new file mode 100644 index 0000000000..b132b2fb95 --- /dev/null +++ b/node/src/components/block_synchronizer/block_acquisition.rs @@ -0,0 +1,1461 @@ +use std::{ + collections::{HashMap, HashSet}, + fmt::{self, Display, Formatter}, +}; + +use datasize::DataSize; +use derive_more::Display; +use tracing::{debug, error, info, trace, warn}; + +use casper_storage::block_store::types::ApprovalsHashes; +use casper_types::{ + execution::ExecutionResult, Block, BlockHash, BlockHeader, Digest, EraId, FinalitySignature, + ProtocolVersion, PublicKey, TransactionHash, TransactionId, +}; + +use crate::{ + components::block_synchronizer::{ + block_acquisition_action::BlockAcquisitionAction, + deploy_acquisition::TransactionAcquisition, peer_list::PeerList, + signature_acquisition::SignatureAcquisition, BlockAcquisitionError, + ExecutionResultsAcquisition, ExecutionResultsChecksum, + }, + types::{BlockExecutionResultsOrChunk, EraValidatorWeights, ExecutableBlock, SignatureWeight}, + NodeRng, +}; + +use super::deploy_acquisition::TransactionIdentifier; + +// BlockAcquisitionState is a milestone oriented state machine; it is always in a resting state +// indicating the last completed step, while attempting to acquire the necessary data to transition +// to the next resting state milestone. the start and end of the workflow is linear, but the +// middle steps conditionally branch depending upon if this is a historical block (needs execution +// state) or a block we intend to execute, and if the block body has one or more deploys. +// +// blocks always require a header & body and sufficient finality signatures; blocks may contain +// one or more deploys. if a block has any deploys, we must also acquire execution effects +// for the deploys in the block (we do this as a chunked aggregate), and for post 1.5 blocks +// we must also acquire approvals hashes (which correlate to which authorized account holders +// signed the deploys). +// +// there are two levels of finality, weak and strict. we first get the block header (which is +// the minimum amount of necessary information we need to function), and then attempt to acquire +// at least weak finality before doing further work acquiring data for a block, to avoid being +// tricked into wasting resources downloading bogus blocks. with at least weak finality, we can +// go about acquiring the rest of the block's required records relatively safely. if we have not +// acquired strong finality by the time we've downloaded everything else, we do another round +// of asking for remaining signatures before accepting the sync'd block. +// +// when acquiring data for a historical block, we want global state (always) and execution +// effects (if any). when acquiring sufficient data to execute a block, we do not acquire +// global state or execution effects. however, we still check for existence of an execution +// effect _checksum_ leaf in global state at the block's root hash as an expedient way to +// determine if a block was created post-1.5 +// +// note that fetchers are used to acquire the required records, which by default check local +// storage for existence and only ask peers if we don't already have the record being fetched +// similarly, we collect finality signatures during each state between HaveBlockHeader and +// HaveStrictFinalitySignatures inclusive, and therefore may have already acquired strict +// finality before we check for it at the very end. finally due to the trie store structure +// of global state, other than the first downloaded historical block we likely already have +// the vast majority of global state data locally. for these reasons, it is common for most +// blocks to transition thru the various states very quickly...particularly blocks without +// deploys. however, the first block downloaded or blocks with a lot of deploys and / or +// execution state delta can take arbitrarily longer on their relevant steps. +// +// similarly, it is possible that the peer set available to us to acquire this data can become +// partitioned. the block synchronizer will periodically attempt to refresh its peer list to +// mitigate this, but this strategy is less effective on small networks. we periodically +// reattempt until we succeed or the node shuts down, in which case: ¯\_(ツ)_/¯ +#[cfg_attr(doc, aquamarine::aquamarine)] +/// ```mermaid +/// flowchart TD +/// Initialized --> HaveBlockHeader +/// HaveBlockHeader --> HaveWeakFinalitySignatures +/// HaveWeakFinalitySignatures --> HaveBlock +/// HaveBlock --> B{is historical?} +/// B -->|Yes| HaveGlobalState +/// B -->|No| C +/// HaveGlobalState --> HaveAllExecutionResults +/// HaveAllExecutionResults --> A{is legacy block?} +/// A -->|Yes| C +/// A -->|No| HaveApprovalsHashes +/// HaveApprovalsHashes --> C{is block empty?} +/// C -->|Yes| HaveStrictFinalitySignatures +/// C -->|No| HaveAllDeploys +/// HaveAllDeploys --> HaveStrictFinalitySignatures +/// HaveStrictFinalitySignatures --> D{is historical?} +/// D -->|Yes| Complete +/// D -->|No| HaveFinalizedBlock +/// HaveFinalizedBlock --> Complete +/// ``` +#[derive(Clone, DataSize, Debug)] +pub(super) enum BlockAcquisitionState { + Initialized(BlockHash, SignatureAcquisition), + HaveBlockHeader(Box, SignatureAcquisition), + HaveWeakFinalitySignatures(Box, SignatureAcquisition), + HaveBlock(Box, SignatureAcquisition, TransactionAcquisition), + HaveGlobalState( + Box, + SignatureAcquisition, + TransactionAcquisition, + ExecutionResultsAcquisition, + ), + HaveAllExecutionResults( + Box, + SignatureAcquisition, + TransactionAcquisition, + ExecutionResultsChecksum, + ), + HaveApprovalsHashes(Box, SignatureAcquisition, TransactionAcquisition), + HaveAllDeploys(Box, SignatureAcquisition), + HaveStrictFinalitySignatures(Box, SignatureAcquisition), + // We keep the `Block` as well as the `FinalizedBlock` because the + // block is necessary to reach the `Complete` state and the finalized + // block is used to enqueue for execution. While the block would surely + // be stored by the time we get to this state, it would be inefficient + // to fetch it from storage again to transition to the `Complete` state, + // so it is retained. The downside is that the block is useful in its + // entirety only in the historical sync, and `HaveFinalizedBlock` along + // with execution are strictly forward sync states. Until a refactor splits + // the `Complete` states for the historical and forward cases, we need to + // keep the block around. + HaveExecutableBlock(Box, Box, bool), + // The `Complete` state needs the block itself in order to produce a meta + // block announcement in the historical sync flow. In the forward sync, + // only the block hash and height are necessary. Therefore, we retain the + // block fully in this state. + Complete(Box), + Failed(BlockHash, Option), +} + +impl Display for BlockAcquisitionState { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + BlockAcquisitionState::Initialized(block_hash, _) => { + write!(f, "initialized for: {}", block_hash) + } + BlockAcquisitionState::HaveBlockHeader(block_header, _) => write!( + f, + "have block header({}) for: {}", + block_header.height(), + block_header.block_hash() + ), + BlockAcquisitionState::HaveWeakFinalitySignatures(block_header, _) => write!( + f, + "have weak finality({}) for: {}", + block_header.height(), + block_header.block_hash() + ), + BlockAcquisitionState::HaveBlock(block, _, _) => write!( + f, + "have block body({}) for: {}", + block.height(), + block.hash() + ), + BlockAcquisitionState::HaveGlobalState(block, _, _, _) => write!( + f, + "have global state({}) for: {}", + block.height(), + block.hash() + ), + BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _) => write!( + f, + "have execution results({}) for: {}", + block.height(), + block.hash() + ), + BlockAcquisitionState::HaveApprovalsHashes(block, _, _) => write!( + f, + "have approvals hashes({}) for: {}", + block.height(), + block.hash() + ), + BlockAcquisitionState::HaveAllDeploys(block, _) => { + write!(f, "have deploys({}) for: {}", block.height(), block.hash()) + } + BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) => write!( + f, + "have strict finality({}) for: {}", + block.height(), + block.hash() + ), + BlockAcquisitionState::HaveExecutableBlock(block, _, _) => write!( + f, + "have finalized block({}) for: {}", + block.height(), + *block.hash() + ), + BlockAcquisitionState::Complete(block) => { + write!( + f, + "have complete block({}) for: {}", + block.height(), + *block.hash() + ) + } + BlockAcquisitionState::Failed(block_hash, maybe_block_height) => { + write!(f, "fatal({:?}) for: {}", maybe_block_height, block_hash) + } + } + } +} + +impl BlockAcquisitionState { + pub(crate) fn block_hash(&self) -> BlockHash { + match self { + BlockAcquisitionState::Initialized(block_hash, _) + | BlockAcquisitionState::Failed(block_hash, _) => *block_hash, + BlockAcquisitionState::HaveBlockHeader(block_header, _) + | BlockAcquisitionState::HaveWeakFinalitySignatures(block_header, _) => { + block_header.block_hash() + } + BlockAcquisitionState::HaveBlock(block, _, _) + | BlockAcquisitionState::HaveGlobalState(block, _, _, _) + | BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _) + | BlockAcquisitionState::HaveApprovalsHashes(block, _, _) + | BlockAcquisitionState::HaveAllDeploys(block, _) + | BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) + | BlockAcquisitionState::HaveExecutableBlock(block, ..) + | BlockAcquisitionState::Complete(block) => *block.hash(), + } + } + + pub(crate) fn maybe_block(&self) -> Option> { + match self { + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) => None, + BlockAcquisitionState::HaveAllDeploys(block, _) + | BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) + | BlockAcquisitionState::HaveBlock(block, _, _) + | BlockAcquisitionState::HaveGlobalState(block, _, _, _) + | BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _) + | BlockAcquisitionState::HaveApprovalsHashes(block, _, _) + | BlockAcquisitionState::HaveExecutableBlock(block, _, _) + | BlockAcquisitionState::Complete(block) => Some(block.clone()), + } + } +} + +#[derive(Clone, Copy, Debug, Display, PartialEq)] +#[must_use] +pub(super) enum Acceptance { + #[display(fmt = "had it")] + HadIt, + #[display(fmt = "needed it")] + NeededIt, +} + +pub(super) struct RegisterExecResultsOutcome { + pub(super) exec_results: Option>, + pub(super) acceptance: Option, +} + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// ```mermaid +/// sequenceDiagram +/// Note right of Initialized: need next +/// Initialized ->> BlockHeader: get header +/// BlockHeader ->> WeakFinalitySignatures: get at least weak finality +/// WeakFinalitySignatures ->> Block: get block +/// Block -->> GlobalState: is historical? +/// GlobalState ->> AllExecutionResults: get execution results +/// AllExecutionResults -->> ApprovalsHashes: is not legacy? +/// AllExecutionResults -->> AllDeploys: is legacy? +/// ApprovalsHashes ->> AllDeploys: get deploys +/// GlobalState -->> StrictFinalitySignatures: is block empty? +/// Block -->> AllDeploys: is not historical and is not empty? +/// Block -->> StrictFinalitySignatures: is not historical and is empty? +/// AllDeploys ->> StrictFinalitySignatures: get strict finality +/// StrictFinalitySignatures ->> FinalizedBlock: is forward and finalized block created +/// StrictFinalitySignatures -->> Complete: is historical and block marked complete +/// FinalizedBlock ->> Complete: is forward and block executed +/// ``` +impl BlockAcquisitionState { + // the BlockAcquisitionState states and their valid transitions follow: + // + // Initialized -> need block header + // + // HaveBlockHeader -> if no era validators -> need era validator weights + // else need weak finality + // + // HaveWeakFinalitySignatures -> need block + // + // HaveBlock -> if should_fetch_execution_state -> need global state + // else if block has deploys need approvals hashes + // else if no deploys need strict finality + // + // HaveGlobalState -> if should_fetch_execution_state + // if block has deploys -> + // if have execution effects -> need approvals hashes + // else -> need execution effects + // else -> need strict finality + // else -> error + // + // HaveAllExecutionResults -> if should_fetch_execution_state + // if approvals checkable -> need approvals hashes + // else -> need deploys + // else error + // + // HaveApprovalsHashes -> need deploys + // + // HaveDeploys -> need strict finality + // + // HaveStrictFinalitySignatures -> if should_fetch_execution_state -> need to mark block + // complete else need to convert block to FinalizedBlock + // + // HaveFinalizedBlock -> need enqueue block for execution + // + // Complete -> Complete (success / terminal) + // + // Failed -> Failed (terminal) + // + /// Determines what action should be taken to acquire the next needed block related data. + pub(super) fn next_action( + &mut self, + peer_list: &PeerList, + validator_weights: &EraValidatorWeights, + rng: &mut NodeRng, + is_historical: bool, + max_simultaneous_peers: u8, + ) -> Result { + // self is the resting state we are in, ret is the next action that should be taken + // to acquire the necessary data to get us to the next step (if any), or an error + let ret = match self { + BlockAcquisitionState::Initialized(block_hash, ..) => Ok( + BlockAcquisitionAction::block_header(peer_list, rng, *block_hash), + ), + BlockAcquisitionState::HaveBlockHeader(block_header, signatures) => { + Ok(signatures_from_missing_validators( + validator_weights, + signatures, + max_simultaneous_peers, + peer_list, + rng, + block_header.era_id(), + block_header.block_hash(), + )) + } + BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) => Ok( + BlockAcquisitionAction::block_body(peer_list, rng, header.block_hash()), + ), + BlockAcquisitionState::HaveBlock(block, signatures, transactions) => { + if is_historical { + Ok(BlockAcquisitionAction::global_state( + peer_list, + rng, + *block.hash(), + *block.state_root_hash(), + )) + } else if transactions.needs_transaction() { + Ok(BlockAcquisitionAction::approvals_hashes( + block, peer_list, rng, + )) + } else if signatures.has_sufficient_finality(is_historical, true) { + Ok(BlockAcquisitionAction::switch_to_have_sufficient_finality( + *block.hash(), + block.height(), + )) + } else { + Ok(signatures_from_missing_validators( + validator_weights, + signatures, + max_simultaneous_peers, + peer_list, + rng, + block.era_id(), + *block.hash(), + )) + } + } + BlockAcquisitionState::HaveGlobalState( + block, + signatures, + deploy_state, + exec_results, + ) => { + if false == is_historical { + Err(BlockAcquisitionError::InvalidStateTransition) + } else if deploy_state.needs_transaction() { + BlockAcquisitionAction::maybe_execution_results( + block, + peer_list, + rng, + exec_results, + ) + } else if signatures.has_sufficient_finality(is_historical, true) { + Ok(BlockAcquisitionAction::switch_to_have_sufficient_finality( + *block.hash(), + block.height(), + )) + } else { + Ok(signatures_from_missing_validators( + validator_weights, + signatures, + max_simultaneous_peers, + peer_list, + rng, + block.era_id(), + *block.hash(), + )) + } + } + BlockAcquisitionState::HaveAllExecutionResults( + block, + signatures, + deploys, + checksum, + ) if is_historical => { + let is_checkable = checksum.is_checkable(); + signatures.set_is_legacy(!is_checkable); + if is_checkable { + Ok(BlockAcquisitionAction::approvals_hashes( + block, peer_list, rng, + )) + } else if let Some(needed_deploy) = deploys.next_needed_transaction() { + // If the checksum is not checkable, it means that we are dealing with a legacy + // deploys. If the required transactions are not deploys for + // this block it means that something went wrong. + let deploy_hash = match needed_deploy { + TransactionIdentifier::ByHash(TransactionHash::Deploy(deploy_hash)) => { + deploy_hash + } + _ => return Err(BlockAcquisitionError::InvalidTransactionType), + }; + debug!("BlockAcquisition: requesting missing deploy by hash"); + Ok(BlockAcquisitionAction::legacy_deploy_by_hash( + *block.hash(), + deploy_hash, + peer_list, + rng, + )) + } else { + Ok( + BlockAcquisitionAction::next_action_after_deploy_acquisition( + *block.hash(), + block.height(), + block.era_id(), + peer_list, + rng, + validator_weights, + signatures, + is_historical, + max_simultaneous_peers, + ), + ) + } + } + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, _) => { + Err(BlockAcquisitionError::InvalidStateTransition) + } + BlockAcquisitionState::HaveApprovalsHashes(block, signatures, transactions) => { + if let Some(needed_txn_id) = transactions.next_needed_transaction() { + let txn_id = match needed_txn_id { + TransactionIdentifier::ByHash(txn_hash) => { + Err(BlockAcquisitionError::MissingApprovalsHashes(txn_hash)) + } + TransactionIdentifier::ById(txn_id) => Ok(txn_id), + }?; + debug!("BlockAcquisition: requesting missing transaction by ID"); + Ok(BlockAcquisitionAction::transaction_by_id( + *block.hash(), + txn_id, + peer_list, + rng, + )) + } else { + Ok( + BlockAcquisitionAction::next_action_after_deploy_acquisition( + *block.hash(), + block.height(), + block.era_id(), + peer_list, + rng, + validator_weights, + signatures, + is_historical, + max_simultaneous_peers, + ), + ) + } + } + BlockAcquisitionState::HaveAllDeploys(block, signatures) => { + if signatures.has_sufficient_finality(is_historical, true) { + Ok(BlockAcquisitionAction::switch_to_have_sufficient_finality( + *block.hash(), + block.height(), + )) + } else { + Ok(signatures_from_missing_validators( + validator_weights, + signatures, + max_simultaneous_peers, + peer_list, + rng, + block.era_id(), + *block.hash(), + )) + } + } + BlockAcquisitionState::HaveStrictFinalitySignatures(block, ..) => { + if is_historical { + // we have enough signatures; need to make sure we've stored the necessary bits + Ok(BlockAcquisitionAction::block_marked_complete( + *block.hash(), + block.height(), + )) + } else { + Ok(BlockAcquisitionAction::make_executable_block( + *block.hash(), + block.height(), + )) + } + } + BlockAcquisitionState::HaveExecutableBlock(block, executable_block, enqueued) => { + if is_historical { + Err(BlockAcquisitionError::InvalidStateTransition) + } else if *enqueued == false { + Ok(BlockAcquisitionAction::enqueue_block_for_execution( + block.hash(), + executable_block.clone(), + )) + } else { + // if the block was already enqueued for execution just wait, there's + // nothing else to do + Ok(BlockAcquisitionAction::need_nothing(*block.hash())) + } + } + BlockAcquisitionState::Complete(block) => { + Ok(BlockAcquisitionAction::need_nothing(*block.hash())) + } + BlockAcquisitionState::Failed(block_hash, ..) => { + Ok(BlockAcquisitionAction::need_nothing(*block_hash)) + } + }; + ret + } + + /// The block height of the current block, if available. + pub(super) fn block_height(&self) -> Option { + match self { + BlockAcquisitionState::Initialized(..) | BlockAcquisitionState::Failed(..) => None, + BlockAcquisitionState::HaveBlockHeader(header, _) + | BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) => Some(header.height()), + BlockAcquisitionState::HaveExecutableBlock(_, executable_block, _) => { + Some(executable_block.height) + } + BlockAcquisitionState::HaveBlock(block, _, _) + | BlockAcquisitionState::HaveGlobalState(block, ..) + | BlockAcquisitionState::HaveAllExecutionResults(block, _, _, _) + | BlockAcquisitionState::HaveApprovalsHashes(block, _, _) + | BlockAcquisitionState::HaveAllDeploys(block, ..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) + | BlockAcquisitionState::Complete(block) => Some(block.height()), + } + } + + /// Register the block header for this block. + pub(super) fn register_block_header( + &mut self, + header: BlockHeader, + strict_finality_protocol_version: ProtocolVersion, + is_historical: bool, + ) -> Result, BlockAcquisitionError> { + let new_state = match self { + BlockAcquisitionState::Initialized(block_hash, signatures) => { + if header.block_hash() == *block_hash { + info!( + "BlockAcquisition: registering header for: {:?}, height: {}", + block_hash, + header.height() + ); + let is_legacy_block = is_historical + && header.protocol_version() < strict_finality_protocol_version; + signatures.set_is_legacy(is_legacy_block); + BlockAcquisitionState::HaveBlockHeader(Box::new(header), signatures.clone()) + } else { + return Err(BlockAcquisitionError::BlockHashMismatch { + expected: *block_hash, + actual: header.block_hash(), + }); + } + } + // we never ask for a block_header while in the following states, + // and thus it is erroneous to attempt to apply one + BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => return Ok(None), + }; + self.set_state(new_state); + Ok(Some(Acceptance::NeededIt)) + } + + /// Register the block body for this block. + pub(super) fn register_block( + &mut self, + block: Block, + need_execution_state: bool, + ) -> Result, BlockAcquisitionError> { + let new_state = match self { + BlockAcquisitionState::HaveWeakFinalitySignatures(header, signatures) => { + let expected_block_hash = header.block_hash(); + let actual_block_hash = block.hash(); + if *actual_block_hash != expected_block_hash { + return Err(BlockAcquisitionError::BlockHashMismatch { + expected: expected_block_hash, + actual: *actual_block_hash, + }); + } + info!( + "BlockAcquisition: registering block for: {}", + header.block_hash() + ); + let transaction_hashes = match &block { + Block::V1(v1) => v1 + .deploy_and_transfer_hashes() + .copied() + .map(TransactionHash::from) + .collect(), + Block::V2(v2) => v2.all_transactions().copied().collect(), + }; + let deploy_acquisition = + TransactionAcquisition::new_by_hash(transaction_hashes, need_execution_state); + + BlockAcquisitionState::HaveBlock( + Box::new(block), + signatures.clone(), + deploy_acquisition, + ) + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(None); + } + }; + self.set_state(new_state); + Ok(Some(Acceptance::NeededIt)) + } + + /// Advance acquisition state to HaveStrictFinality. + pub(super) fn switch_to_have_strict_finality( + &mut self, + block_hash: BlockHash, + is_historical: bool, + ) -> Result<(), BlockAcquisitionError> { + if block_hash != self.block_hash() { + return Err(BlockAcquisitionError::BlockHashMismatch { + expected: self.block_hash(), + actual: block_hash, + }); + } + let maybe_new_state = match self { + BlockAcquisitionState::HaveBlock(block, acquired_signatures, ..) + | BlockAcquisitionState::HaveGlobalState(block, acquired_signatures, ..) + | BlockAcquisitionState::HaveAllDeploys(block, acquired_signatures) + | BlockAcquisitionState::HaveApprovalsHashes(block, acquired_signatures, ..) => { + if acquired_signatures.has_sufficient_finality(is_historical, true) { + Some(BlockAcquisitionState::HaveStrictFinalitySignatures( + block.clone(), + acquired_signatures.clone(), + )) + } else { + return Err(BlockAcquisitionError::InvalidStateTransition); + } + } + BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Complete(..) => None, + }; + if let Some(new_state) = maybe_new_state { + self.set_state(new_state); + }; + Ok(()) + } + + /// Register a finality signature as pending for this block. + pub(super) fn register_finality_signature_pending(&mut self, validator: PublicKey) { + match self { + BlockAcquisitionState::HaveBlockHeader(_, acquired_signatures) + | BlockAcquisitionState::HaveBlock(_, acquired_signatures, ..) + | BlockAcquisitionState::HaveGlobalState(_, acquired_signatures, ..) + | BlockAcquisitionState::HaveApprovalsHashes(_, acquired_signatures, ..) + | BlockAcquisitionState::HaveAllExecutionResults(_, acquired_signatures, ..) + | BlockAcquisitionState::HaveAllDeploys(_, acquired_signatures) + | BlockAcquisitionState::HaveStrictFinalitySignatures(_, acquired_signatures) + | BlockAcquisitionState::HaveWeakFinalitySignatures(_, acquired_signatures) => { + acquired_signatures.register_pending(validator); + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => {} + }; + } + + pub(super) fn actively_acquiring_signatures(&self, is_historical: bool) -> bool { + match self { + BlockAcquisitionState::HaveBlockHeader(..) => true, + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + BlockAcquisitionState::HaveBlock(_, acquired_signatures, acquired_transactions) => { + !is_historical + && acquired_transactions.needs_transaction() == false + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveGlobalState( + _, + acquired_signatures, + acquired_deploys, + .., + ) => { + acquired_deploys.needs_transaction() == false + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveApprovalsHashes( + _, + acquired_signatures, + acquired_deploys, + ) => { + acquired_deploys.needs_transaction() == false + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveAllExecutionResults( + _, + acquired_signatures, + acquired_deploys, + .., + ) => { + acquired_signatures.is_legacy() + && acquired_deploys.needs_transaction() == false + && acquired_signatures.signature_weight() != SignatureWeight::Strict + } + BlockAcquisitionState::HaveAllDeploys(_, acquired_signatures) => { + acquired_signatures.signature_weight() != SignatureWeight::Strict + } + } + } + + /// Register a finality signature for this block. + pub(super) fn register_finality_signature( + &mut self, + signature: FinalitySignature, + validator_weights: &EraValidatorWeights, + is_historical: bool, + ) -> Result, BlockAcquisitionError> { + // we will accept finality signatures we don't yet have while in every state other than + // Initialized and Failed. However, it can only cause a state transition when we + // are in a resting state that needs weak finality or strict finality. + let cloned_sig = signature.clone(); + let signer = signature.public_key().clone(); + let acceptance: Acceptance; + let maybe_block_hash: Option; + let currently_acquiring_sigs = self.actively_acquiring_signatures(is_historical); + let maybe_new_state: Option = match self { + BlockAcquisitionState::HaveBlockHeader(header, acquired_signatures) => { + // we are attempting to acquire at least ~1/3 signature weight before + // committing to doing non-trivial work to acquire this block + // thus the primary thing we are doing in this state is accumulating sigs. + // We also want to ensure we've tried at least once to fetch every potential + // signature. + maybe_block_hash = Some(header.block_hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + if acquired_signatures.has_sufficient_finality(is_historical, false) { + Some(BlockAcquisitionState::HaveWeakFinalitySignatures( + header.clone(), + acquired_signatures.clone(), + )) + } else { + None + } + } + BlockAcquisitionState::HaveBlock(block, acquired_signatures, acquired_transactions) => { + maybe_block_hash = Some(*block.hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + if !is_historical + && acquired_transactions.needs_transaction() == false + && acquired_signatures.has_sufficient_finality(is_historical, true) + { + // When syncing a forward block, if we don't need deploys and have all required + // signatures, advance the state + Some(BlockAcquisitionState::HaveStrictFinalitySignatures( + block.clone(), + acquired_signatures.clone(), + )) + } else { + // Otherwise stay in HaveBlock to allow fetching for the next bit of data + None + } + } + BlockAcquisitionState::HaveGlobalState( + block, + acquired_signatures, + acquired_deploys, + .., + ) => { + maybe_block_hash = Some(*block.hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + if !acquired_deploys.needs_transaction() + && acquired_signatures.has_sufficient_finality(is_historical, true) + { + Some(BlockAcquisitionState::HaveStrictFinalitySignatures( + block.clone(), + acquired_signatures.clone(), + )) + } else { + None + } + } + BlockAcquisitionState::HaveApprovalsHashes(block, acquired_signatures, ..) => { + maybe_block_hash = Some(*block.hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + None + } + BlockAcquisitionState::HaveAllExecutionResults( + block, + acquired_signatures, + acquired_deploys, + .., + ) => { + maybe_block_hash = Some(*block.hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + if acquired_signatures.is_legacy() + && acquired_deploys.needs_transaction() == false + && acquired_signatures.has_sufficient_finality(is_historical, true) + { + Some(BlockAcquisitionState::HaveStrictFinalitySignatures( + block.clone(), + acquired_signatures.clone(), + )) + } else { + None + } + } + BlockAcquisitionState::HaveAllDeploys(block, acquired_signatures) => { + maybe_block_hash = Some(*block.hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + if acquired_signatures.has_sufficient_finality(is_historical, true) { + Some(BlockAcquisitionState::HaveStrictFinalitySignatures( + block.clone(), + acquired_signatures.clone(), + )) + } else { + None + } + } + BlockAcquisitionState::HaveStrictFinalitySignatures(block, acquired_signatures) => { + maybe_block_hash = Some(*block.hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + None + } + BlockAcquisitionState::HaveWeakFinalitySignatures(header, acquired_signatures) => { + // 1: In HaveWeakFinalitySignatures we are waiting to acquire the block body + // 2: In HaveStrictFinalitySignatures we are in the happy path resting state + // and have enough signatures, but not necessarily all signatures and + // will accept late comers while resting in this state + maybe_block_hash = Some(header.block_hash()); + acceptance = acquired_signatures.apply_signature(signature, validator_weights); + None + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => return Ok(None), + }; + let ret = currently_acquiring_sigs.then_some(acceptance); + info!( + signature=%cloned_sig, + ?ret, + "BlockAcquisition: registering finality signature for: {}", + if let Some(block_hash) = maybe_block_hash { + block_hash.to_string() + } else { + "unknown block".to_string() + } + ); + self.log_finality_signature_acceptance(&maybe_block_hash, &signer, ret); + if let Some(new_state) = maybe_new_state { + self.set_state(new_state); + } + Ok(ret) + } + + /// Register the approvals hashes for this block. + pub(super) fn register_approvals_hashes( + &mut self, + approvals_hashes: &ApprovalsHashes, + need_execution_state: bool, + ) -> Result, BlockAcquisitionError> { + let new_state = match self { + BlockAcquisitionState::HaveBlock(block, signatures, acquired) + if !need_execution_state => + { + info!( + "BlockAcquisition: registering approvals hashes for: {}", + block.hash() + ); + acquired.apply_approvals_hashes(approvals_hashes)?; + BlockAcquisitionState::HaveApprovalsHashes( + block.clone(), + signatures.clone(), + acquired.clone(), + ) + } + + BlockAcquisitionState::HaveAllExecutionResults(block, signatures, transactions, _) + if need_execution_state => + { + transactions.apply_approvals_hashes(approvals_hashes)?; + info!( + "BlockAcquisition: registering approvals hashes for: {}", + block.hash() + ); + BlockAcquisitionState::HaveApprovalsHashes( + block.clone(), + signatures.clone(), + transactions.clone(), + ) + } + // we never ask for deploys in the following states, and thus it is erroneous to attempt + // to apply any + BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(_, _, _) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(None); + } + }; + self.set_state(new_state); + Ok(Some(Acceptance::NeededIt)) + } + + /// Register global state for this block. + pub(super) fn register_global_state( + &mut self, + root_hash: Digest, + need_execution_state: bool, + ) -> Result<(), BlockAcquisitionError> { + let new_state = match self { + BlockAcquisitionState::HaveBlock(block, signatures, transactions) + if need_execution_state => + { + info!( + "BlockAcquisition: registering global state for: {}", + block.hash() + ); + if block.state_root_hash() == &root_hash { + let block_hash = *block.hash(); + BlockAcquisitionState::HaveGlobalState( + block.clone(), + signatures.clone(), + transactions.clone(), + ExecutionResultsAcquisition::Needed { block_hash }, + ) + } else { + return Err(BlockAcquisitionError::RootHashMismatch { + expected: *block.state_root_hash(), + actual: root_hash, + }); + } + } + // we never ask for global state in the following states, and thus it is erroneous to + // attempt to apply any + BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(()); + } + }; + self.set_state(new_state); + Ok(()) + } + + /// Register execution results checksum for this block. + pub(super) fn register_execution_results_checksum( + &mut self, + execution_results_checksum: ExecutionResultsChecksum, + need_execution_state: bool, + ) -> Result<(), BlockAcquisitionError> { + debug!(state=%self, need_execution_state, "BlockAcquisitionState: register_execution_results_checksum"); + match self { + BlockAcquisitionState::HaveGlobalState( + block, + _, + _, + acq @ ExecutionResultsAcquisition::Needed { .. }, + ) if need_execution_state => { + info!( + "BlockAcquisition: registering execution results hash for: {}", + block.hash() + ); + *acq = acq + .clone() + .apply_checksum(execution_results_checksum) + .map_err(BlockAcquisitionError::ExecutionResults)?; + } + BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(()); + } + }; + Ok(()) + } + + /// Register execution results or chunk for this block. + pub(super) fn register_execution_results_or_chunk( + &mut self, + block_execution_results_or_chunk: BlockExecutionResultsOrChunk, + need_execution_state: bool, + ) -> Result { + debug!(state=%self, need_execution_state, + block_execution_results_or_chunk=%block_execution_results_or_chunk, + "register_execution_results_or_chunk"); + let (new_state, maybe_exec_results, acceptance) = match self { + BlockAcquisitionState::HaveGlobalState( + block, + signatures, + deploys, + exec_results_acq, + ) if need_execution_state => { + info!( + "BlockAcquisition: registering execution result or chunk for: {}", + block.hash() + ); + let transaction_hashes = match block.as_ref() { + Block::V1(v1) => v1 + .deploy_and_transfer_hashes() + .copied() + .map(TransactionHash::from) + .collect(), + Block::V2(v2) => v2.all_transactions().copied().collect(), + }; + match exec_results_acq + .clone() + .apply_block_execution_results_or_chunk( + block_execution_results_or_chunk, + transaction_hashes, + ) { + Ok((new_acquisition, acceptance)) => match new_acquisition { + ExecutionResultsAcquisition::Needed { .. } + | ExecutionResultsAcquisition::Pending { .. } => { + debug!("apply_block_execution_results_or_chunk: Needed | Pending"); + return Ok(RegisterExecResultsOutcome { + exec_results: None, + acceptance: Some(acceptance), + }); + } + ExecutionResultsAcquisition::Complete { ref results, .. } => { + debug!("apply_block_execution_results_or_chunk: Complete"); + let new_state = BlockAcquisitionState::HaveGlobalState( + block.clone(), + signatures.clone(), + deploys.clone(), + new_acquisition.clone(), + ); + let maybe_exec_results = Some(results.clone()); + (new_state, maybe_exec_results, acceptance) + } + ExecutionResultsAcquisition::Acquiring { .. } => { + debug!("apply_block_execution_results_or_chunk: Acquiring"); + let new_state = BlockAcquisitionState::HaveGlobalState( + block.clone(), + signatures.clone(), + deploys.clone(), + new_acquisition, + ); + let maybe_exec_results = None; + (new_state, maybe_exec_results, acceptance) + } + }, + Err(error) => { + warn!(%error, "failed to apply execution results"); + return Err(BlockAcquisitionError::ExecutionResults(error)); + } + } + } + BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(RegisterExecResultsOutcome { + exec_results: None, + acceptance: None, + }); + } + }; + self.set_state(new_state); + Ok(RegisterExecResultsOutcome { + exec_results: maybe_exec_results, + acceptance: Some(acceptance), + }) + } + + /// Register execution results stored notification for this block. + pub(super) fn register_execution_results_stored_notification( + &mut self, + need_execution_state: bool, + ) -> Result<(), BlockAcquisitionError> { + let new_state = match self { + BlockAcquisitionState::HaveGlobalState( + block, + signatures, + deploys, + ExecutionResultsAcquisition::Complete { checksum, .. }, + ) if need_execution_state => { + info!( + "BlockAcquisition: registering execution results stored notification for: {}", + block.hash() + ); + BlockAcquisitionState::HaveAllExecutionResults( + block.clone(), + signatures.clone(), + deploys.clone(), + *checksum, + ) + } + BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(()); + } + }; + self.set_state(new_state); + Ok(()) + } + + /// Register a deploy for this block. + pub(super) fn register_deploy( + &mut self, + txn_id: TransactionId, + is_historical: bool, + ) -> Result, BlockAcquisitionError> { + let (block, signatures, deploys) = match self { + BlockAcquisitionState::HaveBlock(block, signatures, transactions) + if false == is_historical => + { + (block, signatures, transactions) + } + BlockAcquisitionState::HaveApprovalsHashes(block, signatures, transactions) => { + (block, signatures, transactions) + } + BlockAcquisitionState::HaveAllExecutionResults( + block, + signatures, + deploys, + checksum, + ) if is_historical => match checksum { + ExecutionResultsChecksum::Uncheckable => (block, signatures, deploys), + ExecutionResultsChecksum::Checkable(_) => { + return Err(BlockAcquisitionError::InvalidAttemptToApplyTransaction { txn_id }); + } + }, + BlockAcquisitionState::Initialized(_, _) + | BlockAcquisitionState::HaveBlockHeader(_, _) + | BlockAcquisitionState::HaveWeakFinalitySignatures(_, _) + | BlockAcquisitionState::HaveBlock(_, _, _) + | BlockAcquisitionState::HaveGlobalState(_, _, _, _) + | BlockAcquisitionState::HaveAllExecutionResults(_, _, _, _) + | BlockAcquisitionState::HaveAllDeploys(_, _) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(_, _) + | BlockAcquisitionState::Failed(_, _) + | BlockAcquisitionState::Complete(..) => { + debug!( + ?txn_id, + "BlockAcquisition: invalid attempt to register deploy for: {}", + self.block_hash() + ); + return Ok(None); + } + }; + info!("BlockAcquisition: registering deploy for: {}", block.hash()); + let maybe_acceptance = deploys.apply_transaction(txn_id); + if !deploys.needs_transaction() { + let new_state = + BlockAcquisitionState::HaveAllDeploys(block.clone(), signatures.clone()); + self.set_state(new_state); + } + Ok(maybe_acceptance) + } + + pub(super) fn register_made_finalized_block( + &mut self, + need_execution_state: bool, + executable_block: ExecutableBlock, + ) -> Result<(), BlockAcquisitionError> { + if need_execution_state { + return Err(BlockAcquisitionError::InvalidAttemptToEnqueueBlockForExecution); + } + + let new_state = match self { + BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) => { + BlockAcquisitionState::HaveExecutableBlock( + block.clone(), + Box::new(executable_block), + false, + ) + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(()); + } + }; + self.set_state(new_state); + Ok(()) + } + + /// Register block is enqueued for execution with the contract runtime. + pub(super) fn register_block_execution_enqueued( + &mut self, + ) -> Result<(), BlockAcquisitionError> { + match self { + BlockAcquisitionState::HaveExecutableBlock(block, _, enqueued) => { + info!( + "BlockAcquisition: registering block enqueued for execution for: {}", + block + ); + *enqueued = true; + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => {} + }; + Ok(()) + } + + /// Register block executed for this block. + pub(super) fn register_block_executed( + &mut self, + need_execution_state: bool, + ) -> Result<(), BlockAcquisitionError> { + if need_execution_state { + return Err(BlockAcquisitionError::InvalidAttemptToEnqueueBlockForExecution); + } + + let new_state = match self { + BlockAcquisitionState::HaveExecutableBlock(block, _, _) => { + info!( + "BlockAcquisition: registering block executed for: {}", + *block.hash() + ); + BlockAcquisitionState::Complete(block.clone()) + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(()); + } + }; + self.set_state(new_state); + Ok(()) + } + + /// Register marked complete (all required data stored locally) for this block. + pub(super) fn register_marked_complete( + &mut self, + need_execution_state: bool, + ) -> Result<(), BlockAcquisitionError> { + if !need_execution_state { + return Err(BlockAcquisitionError::InvalidAttemptToMarkComplete); + } + + let new_state = match self { + BlockAcquisitionState::HaveStrictFinalitySignatures(block, _) => { + info!( + "BlockAcquisition: registering marked complete for: {}", + *block.hash() + ); + BlockAcquisitionState::Complete(block.clone()) + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => { + return Ok(()); + } + }; + self.set_state(new_state); + Ok(()) + } + + fn log_finality_signature_acceptance( + &self, + maybe_block_hash: &Option, + signer: &PublicKey, + acceptance: Option, + ) { + match maybe_block_hash { + None => { + error!( + "BlockAcquisition: unknown block_hash for finality signature from {}", + signer + ); + } + Some(block_hash) => match acceptance { + Some(Acceptance::HadIt) => { + trace!( + "BlockAcquisition: existing finality signature for {:?} from {}", + block_hash, + signer + ); + } + Some(Acceptance::NeededIt) => { + debug!( + "BlockAcquisition: new finality signature for {:?} from {}", + block_hash, signer + ); + } + None => { + debug!( + "BlockAcquisition: finality signature for {:?} from {} while not actively \ + trying to acquire finality signatures", + block_hash, signer + ); + } + }, + } + } + + fn set_state(&mut self, new_state: BlockAcquisitionState) { + debug!( + "BlockAcquisition: {} (transitioned from: {})", + new_state, self + ); + *self = new_state; + } +} + +// Collect signatures with Vacant state or which are currently missing from +// the SignatureAcquisition. +pub(super) fn signatures_from_missing_validators( + validator_weights: &EraValidatorWeights, + signatures: &mut SignatureAcquisition, + max_simultaneous_peers: u8, + peer_list: &PeerList, + rng: &mut NodeRng, + era_id: EraId, + block_hash: BlockHash, +) -> BlockAcquisitionAction { + let mut missing_signatures_in_random_order: HashSet = validator_weights + .missing_validators(signatures.not_vacant()) + .cloned() + .collect(); + // If there are too few, retry any in Pending state. + if (missing_signatures_in_random_order.len() as u8) < max_simultaneous_peers { + missing_signatures_in_random_order.extend( + validator_weights + .missing_validators(signatures.not_pending()) + .cloned(), + ); + } + BlockAcquisitionAction::finality_signatures( + peer_list, + rng, + era_id, + block_hash, + missing_signatures_in_random_order.into_iter().collect(), + ) +} diff --git a/node/src/components/block_synchronizer/block_acquisition_action.rs b/node/src/components/block_synchronizer/block_acquisition_action.rs new file mode 100644 index 0000000000..4be30bbfcb --- /dev/null +++ b/node/src/components/block_synchronizer/block_acquisition_action.rs @@ -0,0 +1,284 @@ +use std::fmt::{self, Display, Formatter}; +use tracing::{debug, warn}; + +use casper_types::{Block, BlockHash, DeployHash, Digest, EraId, PublicKey, TransactionId}; + +use crate::{ + components::block_synchronizer::{ + need_next::NeedNext, peer_list::PeerList, signature_acquisition::SignatureAcquisition, + BlockAcquisitionError, ExecutionResultsAcquisition, ExecutionResultsChecksum, + }, + types::{BlockExecutionResultsOrChunkId, EraValidatorWeights, ExecutableBlock, NodeId}, + NodeRng, +}; + +use super::block_acquisition::signatures_from_missing_validators; + +#[derive(Debug, PartialEq)] +pub(crate) struct BlockAcquisitionAction { + peers_to_ask: Vec, + need_next: NeedNext, +} + +impl Display for BlockAcquisitionAction { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "{} from {} peers", + self.need_next, + self.peers_to_ask.len() + ) + } +} + +impl BlockAcquisitionAction { + pub(super) fn need_next(&self) -> NeedNext { + self.need_next.clone() + } + + pub(super) fn peers_to_ask(&self) -> Vec { + self.peers_to_ask.to_vec() + } + + pub(super) fn need_nothing(block_hash: BlockHash) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::Nothing(block_hash), + } + } + + pub(super) fn peers(block_hash: BlockHash) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::Peers(block_hash), + } + } + + pub(super) fn execution_results_checksum( + block_hash: BlockHash, + global_state_root_hash: Digest, + ) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::ExecutionResultsChecksum(block_hash, global_state_root_hash), + } + } + + pub(super) fn execution_results( + block_hash: BlockHash, + peer_list: &PeerList, + rng: &mut NodeRng, + next: BlockExecutionResultsOrChunkId, + checksum: ExecutionResultsChecksum, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::ExecutionResults(block_hash, next, checksum), + } + } + + pub(super) fn approvals_hashes(block: &Block, peer_list: &PeerList, rng: &mut NodeRng) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::ApprovalsHashes(*block.hash(), Box::new(block.clone())), + } + } + + pub(super) fn legacy_deploy_by_hash( + block_hash: BlockHash, + deploy_hash: DeployHash, + peer_list: &PeerList, + rng: &mut NodeRng, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::DeployByHash(block_hash, deploy_hash), + } + } + + pub(super) fn transaction_by_id( + block_hash: BlockHash, + txn_id: TransactionId, + peer_list: &PeerList, + rng: &mut NodeRng, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::TransactionById(block_hash, txn_id), + } + } + + pub(super) fn global_state( + peer_list: &PeerList, + rng: &mut NodeRng, + block_hash: BlockHash, + root_hash: Digest, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::GlobalState(block_hash, root_hash), + } + } + + pub(super) fn finality_signatures( + peer_list: &PeerList, + rng: &mut NodeRng, + era_id: EraId, + block_hash: BlockHash, + missing_signatures: Vec, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + + debug!( + %era_id, + missing_signatures = missing_signatures.len(), + peers_to_ask = peers_to_ask.len(), + "BlockSynchronizer: requesting finality signatures"); + + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::FinalitySignatures(block_hash, era_id, missing_signatures), + } + } + + pub(super) fn block_body( + peer_list: &PeerList, + rng: &mut NodeRng, + block_hash: BlockHash, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::BlockBody(block_hash), + } + } + + pub(super) fn switch_to_have_sufficient_finality( + block_hash: BlockHash, + block_height: u64, + ) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::SwitchToHaveStrictFinality(block_hash, block_height), + } + } + + pub(super) fn block_marked_complete(block_hash: BlockHash, block_height: u64) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::BlockMarkedComplete(block_hash, block_height), + } + } + + pub(super) fn make_executable_block(block_hash: BlockHash, block_height: u64) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::MakeExecutableBlock(block_hash, block_height), + } + } + + pub(super) fn enqueue_block_for_execution( + block_hash: &BlockHash, + executable_block: Box, + ) -> Self { + BlockAcquisitionAction { + peers_to_ask: vec![], + need_next: NeedNext::EnqueueForExecution( + *block_hash, + executable_block.height, + executable_block, + ), + } + } + + pub(super) fn block_header( + peer_list: &PeerList, + rng: &mut NodeRng, + block_hash: BlockHash, + ) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::BlockHeader(block_hash), + } + } + + pub(super) fn era_validators(peer_list: &PeerList, rng: &mut NodeRng, era_id: EraId) -> Self { + let peers_to_ask = peer_list.qualified_peers(rng); + BlockAcquisitionAction { + peers_to_ask, + need_next: NeedNext::EraValidators(era_id), + } + } + + pub(super) fn maybe_execution_results( + block: &Block, + peer_list: &PeerList, + rng: &mut NodeRng, + exec_results: &mut ExecutionResultsAcquisition, + ) -> Result { + match exec_results { + ExecutionResultsAcquisition::Needed { .. } => { + Ok(BlockAcquisitionAction::execution_results_checksum( + *block.hash(), + *block.state_root_hash(), + )) + } + acq @ ExecutionResultsAcquisition::Pending { .. } + | acq @ ExecutionResultsAcquisition::Acquiring { .. } => { + match acq.needs_value_or_chunk() { + None => { + warn!( + block_hash = %block.hash(), + "execution_results_acquisition.needs_value_or_chunk() should never be \ + None for these variants" + ); + Err(BlockAcquisitionError::InvalidAttemptToAcquireExecutionResults) + } + Some((next, checksum)) => Ok(BlockAcquisitionAction::execution_results( + *block.hash(), + peer_list, + rng, + next, + checksum, + )), + } + } + ExecutionResultsAcquisition::Complete { .. } => Ok( + BlockAcquisitionAction::approvals_hashes(block, peer_list, rng), + ), + } + } + + #[allow(clippy::too_many_arguments)] + pub(super) fn next_action_after_deploy_acquisition( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + peer_list: &PeerList, + rng: &mut NodeRng, + validator_weights: &EraValidatorWeights, + signatures: &mut SignatureAcquisition, + is_historical: bool, + max_simultaneous_peers: u8, + ) -> Self { + if signatures.has_sufficient_finality(is_historical, true) { + BlockAcquisitionAction::switch_to_have_sufficient_finality(block_hash, block_height) + } else { + signatures_from_missing_validators( + validator_weights, + signatures, + max_simultaneous_peers, + peer_list, + rng, + era_id, + block_hash, + ) + } + } +} diff --git a/node/src/components/block_synchronizer/block_builder.rs b/node/src/components/block_synchronizer/block_builder.rs new file mode 100644 index 0000000000..e5bb94a58e --- /dev/null +++ b/node/src/components/block_synchronizer/block_builder.rs @@ -0,0 +1,871 @@ +mod latch; +#[cfg(test)] +mod tests; + +use std::{ + collections::HashMap, + fmt::{Display, Formatter}, + time::Instant, +}; + +use datasize::DataSize; +use tracing::{debug, error, trace, warn}; + +use casper_storage::block_store::types::ApprovalsHashes; +use casper_types::{ + execution::ExecutionResult, Block, BlockHash, BlockHeader, BlockSignatures, Digest, EraId, + FinalitySignature, LegacyRequiredFinality, ProtocolVersion, PublicKey, TimeDiff, Timestamp, + TransactionHash, TransactionId, +}; + +use super::{ + block_acquisition::{Acceptance, BlockAcquisitionState, RegisterExecResultsOutcome}, + block_acquisition_action::BlockAcquisitionAction, + execution_results_acquisition::{self, ExecutionResultsChecksum}, + peer_list::{PeerList, PeersStatus}, + signature_acquisition::SignatureAcquisition, + BlockAcquisitionError, +}; +use crate::{ + components::block_synchronizer::block_builder::latch::Latch, + types::{ + BlockExecutionResultsOrChunk, EraValidatorWeights, ExecutableBlock, NodeId, ValidatorMatrix, + }, + NodeRng, +}; + +#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)] +pub(super) enum Error { + BlockAcquisition(BlockAcquisitionError), + MissingValidatorWeights(BlockHash), +} + +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Error::BlockAcquisition(err) => write!(f, "block acquisition error: {}", err), + Error::MissingValidatorWeights(block_hash) => { + write!(f, "missing validator weights for: {}", block_hash) + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, DataSize)] +enum ExecutionProgress { + Idle, + Started, + Done, +} + +impl ExecutionProgress { + fn start(self) -> Option { + match self { + Self::Idle => Some(Self::Started), + _ => None, + } + } + + fn finish(self) -> Option { + match self { + Self::Started => Some(Self::Done), + _ => None, + } + } +} + +#[derive(DataSize, Debug)] +pub(super) struct BlockBuilder { + // imputed + block_hash: BlockHash, + should_fetch_execution_state: bool, + strict_finality_protocol_version: ProtocolVersion, + peer_list: PeerList, + + // progress tracking + sync_start: Instant, + execution_progress: ExecutionProgress, + last_progress: Timestamp, + latch: Latch, + + // acquired state + acquisition_state: BlockAcquisitionState, + era_id: Option, + validator_weights: Option, +} + +impl Display for BlockBuilder { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "is_historical: {:?}, has_validators: {:?}, block builder: {}", + self.should_fetch_execution_state, + self.validator_weights.is_some(), + self.acquisition_state + ) + } +} + +impl BlockBuilder { + pub(super) fn new( + block_hash: BlockHash, + should_fetch_execution_state: bool, + max_simultaneous_peers: u8, + peer_refresh_interval: TimeDiff, + legacy_required_finality: LegacyRequiredFinality, + strict_finality_protocol_version: ProtocolVersion, + ) -> Self { + BlockBuilder { + block_hash, + era_id: None, + validator_weights: None, + acquisition_state: BlockAcquisitionState::Initialized( + block_hash, + SignatureAcquisition::new(vec![], legacy_required_finality), + ), + peer_list: PeerList::new(max_simultaneous_peers, peer_refresh_interval), + should_fetch_execution_state, + strict_finality_protocol_version, + sync_start: Instant::now(), + execution_progress: ExecutionProgress::Idle, + last_progress: Timestamp::now(), + latch: Latch::default(), + } + } + + #[allow(clippy::too_many_arguments)] + pub(super) fn new_from_sync_leap( + block_header: BlockHeader, + maybe_sigs: Option<&BlockSignatures>, + validator_weights: EraValidatorWeights, + peers: Vec, + should_fetch_execution_state: bool, + max_simultaneous_peers: u8, + peer_refresh_interval: TimeDiff, + legacy_required_finality: LegacyRequiredFinality, + strict_finality_protocol_version: ProtocolVersion, + ) -> Self { + let block_hash = block_header.block_hash(); + let era_id = Some(block_header.era_id()); + let mut signature_acquisition = SignatureAcquisition::new( + validator_weights.validator_public_keys().cloned().collect(), + legacy_required_finality, + ); + if let Some(signatures) = maybe_sigs { + for finality_signature in signatures.finality_signatures() { + let _ = + signature_acquisition.apply_signature(finality_signature, &validator_weights); + } + } + let acquisition_state = BlockAcquisitionState::HaveWeakFinalitySignatures( + Box::new(block_header), + signature_acquisition, + ); + let mut peer_list = PeerList::new(max_simultaneous_peers, peer_refresh_interval); + peers.iter().for_each(|p| peer_list.register_peer(*p)); + + BlockBuilder { + block_hash, + era_id, + validator_weights: Some(validator_weights), + acquisition_state, + peer_list, + should_fetch_execution_state, + strict_finality_protocol_version, + sync_start: Instant::now(), + execution_progress: ExecutionProgress::Idle, + last_progress: Timestamp::now(), + latch: Latch::default(), + } + } + + pub(super) fn abort(&mut self) { + self.acquisition_state = + BlockAcquisitionState::Failed(self.block_hash, self.block_height()); + self.flush_peers(); + self.touch(); + } + + pub(crate) fn block_acquisition_state(&self) -> &BlockAcquisitionState { + &self.acquisition_state + } + + #[cfg(test)] + pub(crate) fn set_block_acquisition_state(&mut self, state: BlockAcquisitionState) { + self.acquisition_state = state + } + + pub(super) fn block_hash(&self) -> BlockHash { + self.block_hash + } + + pub(super) fn maybe_block(&self) -> Option> { + self.acquisition_state.maybe_block() + } + + pub(super) fn block_height(&self) -> Option { + self.acquisition_state.block_height() + } + + pub(super) fn block_height_and_era(&self) -> Option<(u64, EraId)> { + if let Some(block_height) = self.acquisition_state.block_height() { + if let Some(evw) = &self.validator_weights { + return Some((block_height, evw.era_id())); + } + } + None + } + + pub(super) fn should_fetch_execution_state(&self) -> bool { + self.should_fetch_execution_state + } + + pub(super) fn sync_start_time(&self) -> Instant { + self.sync_start + } + + pub(super) fn last_progress_time(&self) -> Timestamp { + self.last_progress + } + + #[cfg(test)] + pub fn latched(&self) -> bool { + self.latch.count() > 0 + } + + #[cfg(test)] + pub fn latch_count(&self) -> u8 { + self.latch.count() + } + + pub(super) fn check_latch(&mut self, interval: TimeDiff) -> bool { + self.latch.check_latch(interval, Timestamp::now()) + } + + /// Increments the latch counter by 1. + pub(super) fn latch(&mut self) { + self.latch.increment(1); + } + + pub(super) fn latch_by(&mut self, count: usize) { + self.latch.increment(count as u8); + } + + /// Decrements the latch counter. + pub(super) fn latch_decrement(&mut self) { + self.latch.decrement(1); + } + + pub(super) fn is_failed(&self) -> bool { + matches!(self.acquisition_state, BlockAcquisitionState::Failed(_, _)) + } + + pub(super) fn is_finished(&self) -> bool { + match self.acquisition_state { + BlockAcquisitionState::Initialized(_, _) + | BlockAcquisitionState::HaveBlockHeader(_, _) + | BlockAcquisitionState::HaveWeakFinalitySignatures(_, _) + | BlockAcquisitionState::HaveBlock(_, _, _) + | BlockAcquisitionState::HaveGlobalState(_, _, _, _) + | BlockAcquisitionState::HaveAllExecutionResults(_, _, _, _) + | BlockAcquisitionState::HaveApprovalsHashes(_, _, _) + | BlockAcquisitionState::HaveAllDeploys(_, _) + | BlockAcquisitionState::HaveStrictFinalitySignatures(_, _) + | BlockAcquisitionState::HaveExecutableBlock(_, _, _) + | BlockAcquisitionState::Failed(_, _) => false, + BlockAcquisitionState::Complete(_) => true, + } + } + + pub(super) fn is_executing(&self) -> bool { + matches!(self.execution_progress, ExecutionProgress::Started) + } + + pub(super) fn execution_unattempted(&self) -> bool { + matches!(self.execution_progress, ExecutionProgress::Idle) + } + + pub(super) fn register_block_execution_enqueued(&mut self) { + if self.should_fetch_execution_state { + let block_hash = self.block_hash(); + error!(%block_hash, "invalid attempt to enqueue historical block for execution"); + self.abort(); + return; + } + + if let Err(error) = self.acquisition_state.register_block_execution_enqueued() { + error!(%error, "register block execution enqueued failed"); + self.abort() + } else { + self.touch(); + } + + match self.execution_progress.start() { + None => { + let block_hash = self.block_hash(); + warn!(%block_hash, "invalid attempt to start block execution"); + } + Some(executing_progress) => { + self.touch(); + self.execution_progress = executing_progress; + } + } + } + + pub(super) fn register_made_executable_block(&mut self, executable_block: ExecutableBlock) { + if let Err(error) = self + .acquisition_state + .register_made_finalized_block(self.should_fetch_execution_state, executable_block) + { + error!(%error, "register finalized block failed"); + self.abort() + } else { + self.touch(); + } + } + + pub(super) fn register_block_executed(&mut self) { + if let Err(error) = self + .acquisition_state + .register_block_executed(self.should_fetch_execution_state) + { + error!(%error, "register block executed failed"); + self.abort() + } else { + if self.should_fetch_execution_state { + let block_hash = self.block_hash(); + error!(%block_hash, "invalid attempt to finish block execution on historical block"); + self.abort(); + } + + match self.execution_progress.finish() { + None => { + let block_hash = self.block_hash(); + warn!(%block_hash, "invalid attempt to finish block execution"); + } + Some(executing_progress) => { + self.touch(); + self.execution_progress = executing_progress; + } + } + } + } + + pub(super) fn register_marked_complete(&mut self) { + if let Err(error) = self + .acquisition_state + .register_marked_complete(self.should_fetch_execution_state) + { + error!(%error, "register marked complete failed"); + self.abort() + } else { + self.touch(); + } + } + + pub(super) fn dishonest_peers(&self) -> Vec { + self.peer_list.dishonest_peers() + } + + pub(super) fn disqualify_peer(&mut self, peer: NodeId) { + debug!(?peer, "disqualify_peer"); + self.peer_list.disqualify_peer(peer); + } + + pub(super) fn promote_peer(&mut self, peer: NodeId) { + self.peer_list.promote_peer(peer); + } + + pub(super) fn demote_peer(&mut self, peer: NodeId) { + self.peer_list.demote_peer(peer); + } + + pub(super) fn flush_dishonest_peers(&mut self) { + self.peer_list.flush_dishonest_peers(); + } + + pub(super) fn block_acquisition_action( + &mut self, + rng: &mut NodeRng, + max_simultaneous_peers: u8, + ) -> BlockAcquisitionAction { + match self.peer_list.need_peers() { + PeersStatus::Sufficient => { + trace!( + "BlockBuilder: sufficient peers for block_hash {}", + self.block_hash + ); + } + PeersStatus::Insufficient => { + debug!( + "BlockBuilder: insufficient peers for block_hash {}", + self.block_hash + ); + return BlockAcquisitionAction::peers(self.block_hash); + } + PeersStatus::Stale => { + debug!("BlockBuilder: refreshing peers for {}", self.block_hash); + return BlockAcquisitionAction::peers(self.block_hash); + } + } + let era_id = match self.era_id { + None => { + // if we don't have the era_id, we only have block_hash, thus get block_header + return BlockAcquisitionAction::block_header(&self.peer_list, rng, self.block_hash); + } + Some(era_id) => era_id, + }; + let validator_weights = match &self.validator_weights { + None => { + return BlockAcquisitionAction::era_validators(&self.peer_list, rng, era_id); + } + Some(validator_weights) => { + if validator_weights.is_empty() { + return BlockAcquisitionAction::era_validators(&self.peer_list, rng, era_id); + } + validator_weights + } + }; + match self.acquisition_state.next_action( + &self.peer_list, + validator_weights, + rng, + self.should_fetch_execution_state, + max_simultaneous_peers, + ) { + Ok(ret) => ret, + Err(err) => { + error!(%err, "BlockBuilder: attempt to determine next action resulted in error."); + self.abort(); + BlockAcquisitionAction::need_nothing(self.block_hash) + } + } + } + + pub(super) fn register_era_validator_weights(&mut self, validator_matrix: &ValidatorMatrix) { + if self.validator_weights.is_some() || self.era_id.is_none() { + return; + } + + if let Some(era_id) = self.era_id { + if let Some(evw) = validator_matrix.validator_weights(era_id) { + self.validator_weights = Some(evw); + self.touch(); + } + } + } + + pub(super) fn waiting_for_block_header(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::Initialized(..) => true, + BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + + pub(super) fn register_block_header( + &mut self, + block_header: BlockHeader, + maybe_peer: Option, + ) -> Result<(), Error> { + let was_waiting_for_block_header = self.waiting_for_block_header(); + + let era_id = block_header.era_id(); + let acceptance = self.acquisition_state.register_block_header( + block_header, + self.strict_finality_protocol_version, + self.should_fetch_execution_state, + ); + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_block_header)?; + self.era_id = Some(era_id); + Ok(()) + } + + pub(super) fn waiting_for_block(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveWeakFinalitySignatures(..) => true, + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + + pub(super) fn register_block( + &mut self, + block: Block, + maybe_peer: Option, + ) -> Result<(), Error> { + let was_waiting_for_block = self.waiting_for_block(); + let acceptance = self + .acquisition_state + .register_block(block, self.should_fetch_execution_state); + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_block) + } + + pub(super) fn waiting_for_approvals_hashes(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveBlock(..) if !self.should_fetch_execution_state => true, + BlockAcquisitionState::HaveAllExecutionResults(..) + if self.should_fetch_execution_state => + { + true + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + + pub(super) fn register_approvals_hashes( + &mut self, + approvals_hashes: &ApprovalsHashes, + maybe_peer: Option, + ) -> Result<(), Error> { + let was_waiting_for_approvals_hashes = self.waiting_for_approvals_hashes(); + let acceptance = self + .acquisition_state + .register_approvals_hashes(approvals_hashes, self.should_fetch_execution_state); + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_approvals_hashes) + } + + pub(super) fn register_finality_signature_pending(&mut self, validator: PublicKey) { + self.acquisition_state + .register_finality_signature_pending(validator); + } + + pub(super) fn switch_to_have_strict_finality( + &mut self, + block_hash: BlockHash, + ) -> Result<(), Error> { + match self + .acquisition_state + .switch_to_have_strict_finality(block_hash, self.should_fetch_execution_state) + { + Ok(()) => { + self.touch(); + Ok(()) + } + Err(error) => { + self.abort(); + Err(Error::BlockAcquisition(error)) + } + } + } + + pub(super) fn waiting_for_signatures(&self) -> bool { + self.acquisition_state + .actively_acquiring_signatures(self.should_fetch_execution_state) + } + + pub(super) fn register_finality_signature( + &mut self, + finality_signature: FinalitySignature, + maybe_peer: Option, + ) -> Result<(), Error> { + let was_waiting_for_sigs = self.waiting_for_signatures(); + let validator_weights = self + .validator_weights + .as_ref() + .ok_or(Error::MissingValidatorWeights(self.block_hash))?; + let acceptance = self.acquisition_state.register_finality_signature( + finality_signature, + validator_weights, + self.should_fetch_execution_state, + ); + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_sigs) + } + + pub(super) fn register_global_state(&mut self, global_state: Digest) -> Result<(), Error> { + if let Err(error) = self + .acquisition_state + .register_global_state(global_state, self.should_fetch_execution_state) + { + return Err(Error::BlockAcquisition(error)); + } + self.touch(); + Ok(()) + } + + pub(super) fn register_execution_results_checksum( + &mut self, + execution_results_checksum: ExecutionResultsChecksum, + ) -> Result<(), Error> { + debug!(block_hash=%self.block_hash, "register_execution_results_checksum"); + if let Err(err) = self.acquisition_state.register_execution_results_checksum( + execution_results_checksum, + self.should_fetch_execution_state, + ) { + debug!(block_hash=%self.block_hash, %err, "register_execution_results_checksum: Error::BlockAcquisition"); + return Err(Error::BlockAcquisition(err)); + } + self.touch(); + Ok(()) + } + + pub(super) fn waiting_for_execution_results(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveGlobalState(..) if self.should_fetch_execution_state => true, + BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveApprovalsHashes(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + + pub(super) fn register_fetched_execution_results( + &mut self, + maybe_peer: Option, + block_execution_results_or_chunk: BlockExecutionResultsOrChunk, + ) -> Result>, Error> { + debug!(block_hash=%self.block_hash, "register_fetched_execution_results"); + let was_waiting_for_execution_results = self.waiting_for_execution_results(); + match self.acquisition_state.register_execution_results_or_chunk( + block_execution_results_or_chunk, + self.should_fetch_execution_state, + ) { + Ok(RegisterExecResultsOutcome { + exec_results, + acceptance, + }) => { + debug!( + ?acceptance, + "register_fetched_execution_results: Ok(RegisterExecResultsOutcome)" + ); + self.handle_acceptance( + maybe_peer, + Ok(acceptance), + was_waiting_for_execution_results, + )?; + Ok(exec_results) + } + Err(BlockAcquisitionError::ExecutionResults(error)) => { + match error { + // late response - not considered an error + execution_results_acquisition::Error::AttemptToApplyDataAfterCompleted { .. } => { + debug!(%error, "late block_execution_results_or_chunk response"); + return Ok(None); + } + // programmer error + execution_results_acquisition::Error::BlockHashMismatch { .. } + | execution_results_acquisition::Error::InvalidAttemptToApplyChecksum { .. } + | execution_results_acquisition::Error::AttemptToApplyDataWhenMissingChecksum { .. } + | execution_results_acquisition::Error::InvalidOutcomeFromApplyingChunk { .. } + => { + if was_waiting_for_execution_results { + self.latch_decrement(); + } + debug!( + "register_fetched_execution_results: BlockHashMismatch | \ + InvalidAttemptToApplyChecksum | AttemptToApplyDataWhenMissingChecksum \ + | InvalidOutcomeFromApplyingChunk" + ); + }, + // malicious peer if checksum is available. + execution_results_acquisition::Error::ChunkCountMismatch { .. } => { + let is_checkable = match &self.acquisition_state { + BlockAcquisitionState::HaveGlobalState( + _, + _, + _, + execution_results_acquisition, + ) => execution_results_acquisition.is_checkable(), + _ => false, + }; + debug!(is_checkable, "register_fetched_execution_results: ChunkCountMismatch"); + if is_checkable { + if let Some(peer) = maybe_peer { + self.disqualify_peer(peer); + } + } + if was_waiting_for_execution_results { + self.latch_decrement(); + } + } + // malicious peer + execution_results_acquisition::Error::InvalidChunkCount { .. } + | execution_results_acquisition::Error::ChecksumMismatch { .. } + | execution_results_acquisition::Error::FailedToDeserialize { .. } + | execution_results_acquisition::Error::ExecutionResultToDeployHashLengthDiscrepancy { .. } => { + debug!("register_fetched_execution_results: InvalidChunkCount | ChecksumMismatch | FailedToDeserialize | ExecutionResultToDeployHashLengthDiscrepancy"); + if let Some(peer) = maybe_peer { + self.disqualify_peer(peer); + } + if was_waiting_for_execution_results { + self.latch_decrement(); + } + } + // checksum unavailable, so unknown if this peer is malicious + execution_results_acquisition::Error::ChunksWithDifferentChecksum { .. } => { + debug!("register_fetched_execution_results: ChunksWithDifferentChecksum"); + if was_waiting_for_execution_results { + self.latch_decrement(); + } + } + } + Err(Error::BlockAcquisition( + BlockAcquisitionError::ExecutionResults(error), + )) + } + Err(error) => { + error!(%error, "unexpected error"); + Ok(None) + } + } + } + + pub(super) fn register_execution_results_stored_notification(&mut self) -> Result<(), Error> { + debug!(block_hash=%self.block_hash, "register_execution_results_stored_notification"); + if let Err(err) = self + .acquisition_state + .register_execution_results_stored_notification(self.should_fetch_execution_state) + { + debug!(block_hash=%self.block_hash, "register_execution_results_stored_notification: abort"); + self.abort(); + return Err(Error::BlockAcquisition(err)); + } + self.touch(); + Ok(()) + } + + pub(super) fn waiting_for_deploys(&self) -> bool { + match &self.acquisition_state { + BlockAcquisitionState::HaveApprovalsHashes(_, _, transactions) => { + transactions.needs_transaction() + } + BlockAcquisitionState::HaveAllExecutionResults(_, _, transactions, checksum) + if self.should_fetch_execution_state => + { + if !checksum.is_checkable() { + transactions.needs_transaction() + } else { + false + } + } + BlockAcquisitionState::Initialized(..) + | BlockAcquisitionState::HaveBlockHeader(..) + | BlockAcquisitionState::HaveWeakFinalitySignatures(..) + | BlockAcquisitionState::HaveBlock(..) + | BlockAcquisitionState::HaveGlobalState(..) + | BlockAcquisitionState::HaveAllExecutionResults(..) + | BlockAcquisitionState::HaveAllDeploys(..) + | BlockAcquisitionState::HaveStrictFinalitySignatures(..) + | BlockAcquisitionState::HaveExecutableBlock(..) + | BlockAcquisitionState::Failed(..) + | BlockAcquisitionState::Complete(..) => false, + } + } + + pub(super) fn register_deploy( + &mut self, + txn_id: TransactionId, + maybe_peer: Option, + ) -> Result<(), Error> { + let was_waiting_for_deploys = self.waiting_for_deploys(); + let acceptance = self + .acquisition_state + .register_deploy(txn_id, self.should_fetch_execution_state); + self.handle_acceptance(maybe_peer, acceptance, was_waiting_for_deploys) + } + + pub(super) fn register_peers(&mut self, peers: Vec) { + if peers.is_empty() { + // We asked for peers but none were provided. Exit early without + // clearing the latch so that we don't ask again needlessly. + trace!("BlockSynchronizer: no peers available"); + return; + } + if !(self.is_finished() || self.is_failed()) { + peers + .into_iter() + .for_each(|peer| self.peer_list.register_peer(peer)); + } + self.touch(); + } + + fn handle_acceptance( + &mut self, + maybe_peer: Option, + acceptance: Result, BlockAcquisitionError>, + should_unlatch: bool, + ) -> Result<(), Error> { + match acceptance { + Ok(Some(Acceptance::NeededIt)) => { + // Got a useful response. Unlatch in all cases since we want to get the next item. + self.touch(); + if let Some(peer) = maybe_peer { + self.promote_peer(peer); + } + } + Ok(Some(Acceptance::HadIt)) => { + // Already had this item, which means that this was a late response for a previous + // fetch. We don't unlatch in this case and wait for a valid response. + } + Ok(None) => { + if should_unlatch { + self.latch_decrement(); + } + } + Err(error) => { + if let Some(peer) = maybe_peer { + self.disqualify_peer(peer); + } + + // If we were waiting for a response and the item was not good, + // decrement latch. Fetch will be retried when unlatched. + if should_unlatch { + self.latch_decrement(); + } + + return Err(Error::BlockAcquisition(error)); + } + } + Ok(()) + } + + fn flush_peers(&mut self) { + self.peer_list.flush(); + } + + fn touch(&mut self) { + self.last_progress = Timestamp::now(); + self.latch.unlatch(); + } + + pub(crate) fn peer_list(&self) -> &PeerList { + &self.peer_list + } +} diff --git a/node/src/components/block_synchronizer/block_builder/latch.rs b/node/src/components/block_synchronizer/block_builder/latch.rs new file mode 100644 index 0000000000..37153bbec4 --- /dev/null +++ b/node/src/components/block_synchronizer/block_builder/latch.rs @@ -0,0 +1,63 @@ +use datasize::DataSize; + +use tracing::warn; + +use casper_types::{TimeDiff, Timestamp}; + +#[derive(Debug, Default, DataSize)] +pub(super) struct Latch { + #[data_size(skip)] + latch: u8, + timestamp: Option, +} + +impl Latch { + pub(super) fn increment(&mut self, increment_by: u8) { + match self.latch.checked_add(increment_by) { + Some(val) => { + self.latch = val; + self.touch(); + } + None => { + warn!("latch increment overflowed."); + } + } + } + + pub(super) fn decrement(&mut self, decrement_by: u8) { + match self.latch.checked_sub(decrement_by) { + Some(val) => { + self.latch = val; + } + None => { + self.latch = 0; + } + } + self.touch(); + } + + pub(super) fn unlatch(&mut self) { + self.latch = 0; + self.timestamp = None; + } + + pub(super) fn check_latch(&mut self, interval: TimeDiff, checked: Timestamp) -> bool { + match self.timestamp { + None => false, + Some(timestamp) => { + if checked > timestamp + interval { + self.unlatch() + } + self.count() > 0 + } + } + } + + pub(super) fn count(&self) -> u8 { + self.latch + } + + pub(super) fn touch(&mut self) { + self.timestamp = Some(Timestamp::now()); + } +} diff --git a/node/src/components/block_synchronizer/block_builder/tests.rs b/node/src/components/block_synchronizer/block_builder/tests.rs new file mode 100644 index 0000000000..be87b417b6 --- /dev/null +++ b/node/src/components/block_synchronizer/block_builder/tests.rs @@ -0,0 +1,516 @@ +use std::{collections::BTreeMap, thread, time::Duration}; + +use num_rational::Ratio; + +use casper_types::{ + testing::TestRng, ChainNameDigest, FinalitySignatureV2, TestBlockBuilder, Transaction, +}; + +use crate::components::consensus::tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_KEY}; + +use super::*; + +#[test] +fn handle_acceptance_promotes_and_disqualifies_peers() { + let mut rng = TestRng::new(); + let block = TestBlockBuilder::new().build(&mut rng); + let mut builder = BlockBuilder::new( + *block.hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + + let honest_peer = NodeId::random(&mut rng); + let dishonest_peer = NodeId::random(&mut rng); + + // Builder acceptance for needed signature from ourselves. + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), true) + .is_ok()); + assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for existent signature from ourselves. + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), true) + .is_ok()); + assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for no signature from ourselves. + assert!(builder.handle_acceptance(None, Ok(None), true).is_ok()); + assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for no signature from a peer. + // Peer shouldn't be registered. + assert!(builder + .handle_acceptance(Some(honest_peer), Ok(None), true) + .is_ok()); + assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for existent signature from a peer. + // Peer shouldn't be registered. + assert!(builder + .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::HadIt)), true) + .is_ok()); + assert!(builder.peer_list().qualified_peers(&mut rng).is_empty()); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for needed signature from a peer. + // Peer should be registered as honest. + assert!(builder + .handle_acceptance(Some(honest_peer), Ok(Some(Acceptance::NeededIt)), true) + .is_ok()); + assert!(builder + .peer_list() + .qualified_peers(&mut rng) + .contains(&honest_peer)); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for error on signature handling from ourselves. + assert!(builder + .handle_acceptance( + None, + Err(BlockAcquisitionError::InvalidStateTransition), + true + ) + .is_err()); + assert!(builder + .peer_list() + .qualified_peers(&mut rng) + .contains(&honest_peer)); + assert!(builder.peer_list().dishonest_peers().is_empty()); + // Builder acceptance for error on signature handling from a peer. + // Peer should be registered as dishonest. + assert!(builder + .handle_acceptance( + Some(dishonest_peer), + Err(BlockAcquisitionError::InvalidStateTransition), + true + ) + .is_err()); + assert!(builder + .peer_list() + .qualified_peers(&mut rng) + .contains(&honest_peer)); + assert!(builder + .peer_list() + .dishonest_peers() + .contains(&dishonest_peer)); +} + +#[test] +fn handle_acceptance_unlatches_builder() { + let mut rng = TestRng::new(); + let block = TestBlockBuilder::new().build(&mut rng); + let mut builder = BlockBuilder::new( + block.header().block_hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + + // Check that if a valid element was received, the latch is reset + builder.latch_by(2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), true) + .is_ok()); + assert_eq!(builder.latch.count(), 0); + builder.latch_by(2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::NeededIt)), false) + .is_ok()); + assert_eq!(builder.latch.count(), 0); + + // Check that if a element that was previously received, + // the latch is not decremented since this is a late response + builder.latch_by(2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), true) + .is_ok()); + assert_eq!(builder.latch.count(), 2); + assert!(builder + .handle_acceptance(None, Ok(Some(Acceptance::HadIt)), false) + .is_ok()); + assert_eq!(builder.latch.count(), 2); + + // Check that the latch is decremented if a response lead to an error, + // but only if the builder was waiting for that element in its current state + assert!(builder + .handle_acceptance( + None, + Err(BlockAcquisitionError::InvalidStateTransition), + true + ) + .is_err()); + assert_eq!(builder.latch.count(), 1); + assert!(builder + .handle_acceptance( + None, + Err(BlockAcquisitionError::InvalidStateTransition), + false + ) + .is_err()); + assert_eq!(builder.latch.count(), 1); + + // Check that the latch is decremented if a valid response was received that did not produce any + // side effect, but only if the builder was waiting for that element in its current state + builder.latch_by(1); + assert!(builder.handle_acceptance(None, Ok(None), false).is_ok()); + assert_eq!(builder.latch.count(), 2); + assert!(builder.handle_acceptance(None, Ok(None), true).is_ok()); + assert_eq!(builder.latch.count(), 1); +} + +#[test] +fn register_era_validator_weights() { + let mut rng = TestRng::new(); + let block = TestBlockBuilder::new().build(&mut rng); + let mut builder = BlockBuilder::new( + *block.hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + let latest_timestamp = builder.last_progress; + + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + + thread::sleep(Duration::from_millis(5)); + // Register default era (0). We have no information in the builder to + // determine if these weights are relevant, so they shouldn't be stored. + builder.register_era_validator_weights(&validator_matrix); + assert!(builder.validator_weights.is_none()); + assert_eq!(latest_timestamp, builder.last_progress); + // Set the era of the builder to 1000. + builder.era_id = Some(EraId::from(1000)); + thread::sleep(Duration::from_millis(5)); + // Register the default era again. The builder is interested in weights + // for era 1000, but the matrix has weights only for era 0, so they + // shouldn't be registered. + builder.register_era_validator_weights(&validator_matrix); + assert!(builder.validator_weights.is_none()); + assert_eq!(latest_timestamp, builder.last_progress); + // Set the era of the builder to the random block's era. + builder.era_id = Some(block.era_id()); + // Add weights for that era to the validator matrix. + let weights = EraValidatorWeights::new( + block.era_id(), + BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]), + Ratio::new(1, 3), + ); + validator_matrix.register_era_validator_weights(weights.clone()); + thread::sleep(Duration::from_millis(5)); + // Register the random block's era weights. This should store the weights. + builder.register_era_validator_weights(&validator_matrix); + assert_eq!(builder.validator_weights.unwrap(), weights); + assert_ne!(latest_timestamp, builder.last_progress); +} + +#[test] +fn register_executable_block() { + let mut rng = TestRng::new(); + let chain_name_hash = ChainNameDigest::random(&mut rng); + // Create a random block. + let block = TestBlockBuilder::new().build(&mut rng); + // Create a builder for the block. + let mut builder = BlockBuilder::new( + *block.hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + let mut latest_timestamp = builder.last_progress; + // Create mock era weights for the block's era. + let weights = EraValidatorWeights::new( + block.era_id(), + BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]), + Ratio::new(1, 3), + ); + // Create a signature acquisition to fill. + let mut signature_acquisition = SignatureAcquisition::new( + vec![ALICE_PUBLIC_KEY.clone()], + LegacyRequiredFinality::Strict, + ); + let sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + assert_eq!( + signature_acquisition.apply_signature(sig.into(), &weights), + Acceptance::NeededIt + ); + // Set the builder's state to `HaveStrictFinalitySignatures`. + let expected_txns = vec![Transaction::random(&mut rng)]; + let executable_block = + ExecutableBlock::from_block_and_transactions(block.clone(), expected_txns.clone()); + builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures( + Box::new(block.clone().into()), + signature_acquisition.clone(), + ); + + // Register the finalized block. + thread::sleep(Duration::from_millis(5)); + builder.register_made_executable_block(executable_block.clone()); + match &builder.acquisition_state { + BlockAcquisitionState::HaveExecutableBlock(actual_block, executable_block, enqueued) => { + assert_eq!(actual_block.hash(), block.hash()); + assert_eq!(expected_txns, *executable_block.transactions); + assert!(!enqueued); + } + _ => panic!("Unexpected outcome in registering finalized block"), + } + assert!(!builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); + latest_timestamp = builder.last_progress; + + // Make the builder historical. + builder.should_fetch_execution_state = true; + // Reset the state to `HaveStrictFinalitySignatures`. + builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures( + Box::new(block.into()), + signature_acquisition.clone(), + ); + // Register the finalized block. This should fail on historical builders. + thread::sleep(Duration::from_millis(5)); + builder.register_made_executable_block(executable_block); + assert!(builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); +} + +#[test] +fn register_block_execution() { + let mut rng = TestRng::new(); + let chain_name_hash = ChainNameDigest::random(&mut rng); + // Create a random block. + let block = TestBlockBuilder::new().build(&mut rng); + // Create a builder for the block. + let mut builder = BlockBuilder::new( + *block.hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + let mut latest_timestamp = builder.last_progress; + // Create mock era weights for the block's era. + let weights = EraValidatorWeights::new( + block.era_id(), + BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]), + Ratio::new(1, 3), + ); + // Create a signature acquisition to fill. + let mut signature_acquisition = SignatureAcquisition::new( + vec![ALICE_PUBLIC_KEY.clone()], + LegacyRequiredFinality::Strict, + ); + let sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + assert_eq!( + signature_acquisition.apply_signature(sig.into(), &weights), + Acceptance::NeededIt + ); + + let executable_block = Box::new(ExecutableBlock::from_block_and_transactions( + block.clone(), + vec![Transaction::random(&mut rng)], + )); + builder.acquisition_state = + BlockAcquisitionState::HaveExecutableBlock(Box::new(block.into()), executable_block, false); + + assert_eq!(builder.execution_progress, ExecutionProgress::Idle); + // Register the block execution enquement as successful. This should + // advance the execution progress. + thread::sleep(Duration::from_millis(5)); + builder.register_block_execution_enqueued(); + assert_eq!(builder.execution_progress, ExecutionProgress::Started); + assert!(matches!( + builder.acquisition_state, + BlockAcquisitionState::HaveExecutableBlock(_, _, true) + )); + assert!(!builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); + latest_timestamp = builder.last_progress; + + // Attempt to register the block for execution again. The state shouldn't + // change and the builder shouldn't fail. + thread::sleep(Duration::from_millis(5)); + builder.register_block_execution_enqueued(); + assert_eq!(builder.execution_progress, ExecutionProgress::Started); + assert!(matches!( + builder.acquisition_state, + BlockAcquisitionState::HaveExecutableBlock(_, _, true) + )); + assert!(!builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); + latest_timestamp = builder.last_progress; + + // Make the builder historical. + builder.should_fetch_execution_state = true; + // Register the block execution enquement as successful. This should put + // the builder in a failed state as we shouldn't execute historical blocks. + thread::sleep(Duration::from_millis(5)); + builder.register_block_execution_enqueued(); + assert!(builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); +} + +#[test] +fn register_block_executed() { + let mut rng = TestRng::new(); + let chain_name_hash = ChainNameDigest::random(&mut rng); + // Create a random block. + let block = TestBlockBuilder::new().build(&mut rng); + // Create a builder for the block. + let mut builder = BlockBuilder::new( + *block.hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + let mut latest_timestamp = builder.last_progress; + // Create mock era weights for the block's era. + let weights = EraValidatorWeights::new( + block.era_id(), + BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]), + Ratio::new(1, 3), + ); + // Create a signature acquisition to fill. + let mut signature_acquisition = SignatureAcquisition::new( + vec![ALICE_PUBLIC_KEY.clone()], + LegacyRequiredFinality::Strict, + ); + let sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + assert_eq!( + signature_acquisition.apply_signature(sig.into(), &weights), + Acceptance::NeededIt + ); + // Set the builder state to `HaveStrictFinalitySignatures`. + builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures( + Box::new(block.into()), + signature_acquisition, + ); + // Mark execution as started. + builder.execution_progress = ExecutionProgress::Started; + + thread::sleep(Duration::from_millis(5)); + // Register the block as executed. This should advance the execution + // progress to `Done`. + builder.register_block_executed(); + assert_eq!(builder.execution_progress, ExecutionProgress::Done); + assert!(!builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); + latest_timestamp = builder.last_progress; + + thread::sleep(Duration::from_millis(5)); + // Register the block as executed again. This should not change the + // builder's state. + builder.register_block_executed(); + assert_eq!(builder.execution_progress, ExecutionProgress::Done); + assert!(!builder.is_failed()); + assert_eq!(latest_timestamp, builder.last_progress); + + // Set the builder to be historical and register the block as executed + // again. This should put the builder in the failed state. + builder.should_fetch_execution_state = true; + thread::sleep(Duration::from_millis(5)); + builder.register_block_executed(); + assert!(builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); +} + +#[test] +fn register_block_marked_complete() { + let mut rng = TestRng::new(); + let chain_name_hash = ChainNameDigest::random(&mut rng); + // Create a random block. + let block = TestBlockBuilder::new().build(&mut rng); + // Create a builder for the block. + let mut builder = BlockBuilder::new( + *block.hash(), + false, + 1, + TimeDiff::from_seconds(1), + LegacyRequiredFinality::Strict, + ProtocolVersion::V1_0_0, + ); + // Make the builder historical. + builder.should_fetch_execution_state = true; + let mut latest_timestamp = builder.last_progress; + // Create mock era weights for the block's era. + let weights = EraValidatorWeights::new( + block.era_id(), + BTreeMap::from([(ALICE_PUBLIC_KEY.clone(), 100.into())]), + Ratio::new(1, 3), + ); + // Create a signature acquisition to fill. + let mut signature_acquisition = SignatureAcquisition::new( + vec![ALICE_PUBLIC_KEY.clone()], + LegacyRequiredFinality::Strict, + ); + let sig = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + &ALICE_SECRET_KEY, + ); + assert_eq!( + signature_acquisition.apply_signature(sig.into(), &weights), + Acceptance::NeededIt + ); + + // Set the builder state to `HaveStrictFinalitySignatures`. + builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures( + Box::new(block.clone().into()), + signature_acquisition.clone(), + ); + // Register the block as marked complete. Since there are no missing + // deploys, this should transition the builder state to + // `HaveStrictFinalitySignatures`. + thread::sleep(Duration::from_millis(5)); + builder.register_marked_complete(); + assert!(matches!( + builder.acquisition_state, + BlockAcquisitionState::Complete(..) + )); + assert!(!builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); + latest_timestamp = builder.last_progress; + + // Make this a forward builder. + builder.should_fetch_execution_state = false; + // Set the builder state to `HaveStrictFinalitySignatures`. + builder.acquisition_state = BlockAcquisitionState::HaveStrictFinalitySignatures( + Box::new(block.into()), + signature_acquisition.clone(), + ); + // Register the block as marked complete. In the forward flow we should + // abort the builder as an attempt to mark the block complete is invalid. + thread::sleep(Duration::from_millis(5)); + builder.register_marked_complete(); + assert!(builder.is_failed()); + assert_ne!(latest_timestamp, builder.last_progress); +} diff --git a/node/src/components/block_synchronizer/block_synchronizer_progress.rs b/node/src/components/block_synchronizer/block_synchronizer_progress.rs new file mode 100644 index 0000000000..ecc9e9ebca --- /dev/null +++ b/node/src/components/block_synchronizer/block_synchronizer_progress.rs @@ -0,0 +1,53 @@ +use std::fmt::{Display, Formatter}; + +use casper_types::{BlockHash, EraId, Timestamp}; + +#[derive(Debug)] +pub(crate) enum BlockSynchronizerProgress { + Idle, + Syncing(BlockHash, Option, Timestamp), + Executing(BlockHash, u64, EraId), + Synced(BlockHash, u64, EraId), +} + +impl BlockSynchronizerProgress { + pub(crate) fn is_active(&self) -> bool { + match self { + BlockSynchronizerProgress::Idle | BlockSynchronizerProgress::Synced(_, _, _) => false, + BlockSynchronizerProgress::Syncing(_, _, _) + | BlockSynchronizerProgress::Executing(_, _, _) => true, + } + } +} + +impl Display for BlockSynchronizerProgress { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let display_height = |f: &mut Formatter<'_>, maybe_height: &Option| match maybe_height + { + Some(height) => write!(f, "block {height}"), + None => write!(f, "unknown block height"), + }; + match self { + BlockSynchronizerProgress::Idle => write!(f, "block synchronizer idle"), + BlockSynchronizerProgress::Syncing(block_hash, block_height, timestamp) => { + write!(f, "block synchronizer syncing ")?; + display_height(f, block_height)?; + write!(f, "{}, {}", timestamp, block_hash) + } + BlockSynchronizerProgress::Executing(block_hash, block_height, era_id) => { + write!( + f, + "block synchronizer executing block {}, {}, {}", + block_height, block_hash, era_id + ) + } + BlockSynchronizerProgress::Synced(block_hash, block_height, era_id) => { + write!( + f, + "block synchronizer synced block {}, {}, {}", + block_height, block_hash, era_id + ) + } + } + } +} diff --git a/node/src/components/block_synchronizer/config.rs b/node/src/components/block_synchronizer/config.rs new file mode 100644 index 0000000000..29ba11dd99 --- /dev/null +++ b/node/src/components/block_synchronizer/config.rs @@ -0,0 +1,42 @@ +use std::str::FromStr; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::TimeDiff; + +const DEFAULT_MAX_PARALLEL_TRIE_FETCHES: u32 = 5000; +const DEFAULT_PEER_REFRESH_INTERVAL: &str = "90sec"; +const DEFAULT_NEED_NEXT_INTERVAL: &str = "1sec"; +const DEFAULT_DISCONNECT_DISHONEST_PEERS_INTERVAL: &str = "10sec"; +const DEFAULT_LATCH_RESET_INTERVAL: &str = "5sec"; + +/// Configuration options for fetching. +#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] +pub struct Config { + /// Maximum number of trie nodes to fetch in parallel. + pub max_parallel_trie_fetches: u32, + /// Time interval for the node to ask for refreshed peers. + pub peer_refresh_interval: TimeDiff, + /// Time interval for the node to check what the block synchronizer needs to acquire next. + pub need_next_interval: TimeDiff, + /// Time interval for recurring disconnection of dishonest peers. + pub disconnect_dishonest_peers_interval: TimeDiff, + /// Time interval for resetting the latch in block builders. + pub latch_reset_interval: TimeDiff, +} + +impl Default for Config { + fn default() -> Self { + Config { + max_parallel_trie_fetches: DEFAULT_MAX_PARALLEL_TRIE_FETCHES, + peer_refresh_interval: TimeDiff::from_str(DEFAULT_PEER_REFRESH_INTERVAL).unwrap(), + need_next_interval: TimeDiff::from_str(DEFAULT_NEED_NEXT_INTERVAL).unwrap(), + disconnect_dishonest_peers_interval: TimeDiff::from_str( + DEFAULT_DISCONNECT_DISHONEST_PEERS_INTERVAL, + ) + .unwrap(), + latch_reset_interval: TimeDiff::from_str(DEFAULT_LATCH_RESET_INTERVAL).unwrap(), + } + } +} diff --git a/node/src/components/block_synchronizer/deploy_acquisition.rs b/node/src/components/block_synchronizer/deploy_acquisition.rs new file mode 100644 index 0000000000..6dacebd01a --- /dev/null +++ b/node/src/components/block_synchronizer/deploy_acquisition.rs @@ -0,0 +1,176 @@ +#[cfg(test)] +mod tests; + +use std::{ + cmp::Ord, + fmt::{Display, Formatter}, +}; + +use datasize::DataSize; +use tracing::debug; + +use casper_storage::block_store::types::ApprovalsHashes; +use casper_types::{TransactionHash, TransactionId}; + +use super::block_acquisition::Acceptance; + +#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)] +pub(crate) enum Error { + AcquisitionByIdNotPossible, + EncounteredNonVacantTransactionState, +} + +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Error::AcquisitionByIdNotPossible => write!(f, "acquisition by id is not possible"), + Error::EncounteredNonVacantTransactionState => { + write!(f, "encountered non vacant transaction state") + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)] +pub(super) enum TransactionIdentifier { + ByHash(TransactionHash), + ById(TransactionId), +} + +#[derive(Clone, PartialEq, Eq, DataSize, Debug)] +pub(super) enum TransactionAcquisition { + ByHash(Acquisition), + ById(Acquisition), +} + +impl TransactionAcquisition { + pub(super) fn new_by_hash( + transaction_hashes: Vec, + need_execution_result: bool, + ) -> Self { + TransactionAcquisition::ByHash(Acquisition::new(transaction_hashes, need_execution_result)) + } + + pub(super) fn apply_transaction( + &mut self, + transaction_id: TransactionId, + ) -> Option { + match self { + TransactionAcquisition::ByHash(acquisition) => { + acquisition.apply_transaction(transaction_id.transaction_hash()) + } + TransactionAcquisition::ById(acquisition) => { + acquisition.apply_transaction(transaction_id) + } + } + } + + pub(super) fn apply_approvals_hashes( + &mut self, + approvals_hashes: &ApprovalsHashes, + ) -> Result<(), Error> { + let new_acquisition = match self { + TransactionAcquisition::ByHash(acquisition) => { + let mut new_txn_ids = vec![]; + for ((transaction_hash, txn_state), approvals_hash) in acquisition + .inner + .drain(..) + .zip(approvals_hashes.approvals_hashes()) + { + if !matches!(txn_state, TransactionState::Vacant) { + return Err(Error::EncounteredNonVacantTransactionState); + }; + let txn_id = match (transaction_hash, approvals_hash) { + (TransactionHash::Deploy(deploy_hash), deploy_approvals_hash) => { + TransactionId::new(deploy_hash.into(), deploy_approvals_hash) + } + (TransactionHash::V1(transaction_v1_hash), txn_v1_approvals_hash) => { + TransactionId::new(transaction_v1_hash.into(), txn_v1_approvals_hash) + } + }; + new_txn_ids.push((txn_id, TransactionState::Vacant)); + } + + TransactionAcquisition::ById(Acquisition { + inner: new_txn_ids, + need_execution_result: acquisition.need_execution_result, + }) + } + TransactionAcquisition::ById(_) => { + debug!("TransactionAcquisition: attempt to apply approvals hashes on a transaction acquired by ID"); + return Err(Error::AcquisitionByIdNotPossible); + } + }; + + *self = new_acquisition; + Ok(()) + } + + pub(super) fn needs_transaction(&self) -> bool { + match self { + TransactionAcquisition::ByHash(acq) => acq.needs_transaction().is_some(), + TransactionAcquisition::ById(acq) => acq.needs_transaction().is_some(), + } + } + + pub(super) fn next_needed_transaction(&self) -> Option { + match self { + TransactionAcquisition::ByHash(acq) => { + acq.needs_transaction().map(TransactionIdentifier::ByHash) + } + TransactionAcquisition::ById(acq) => { + acq.needs_transaction().map(TransactionIdentifier::ById) + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug, Default)] +pub(super) enum TransactionState { + #[default] + Vacant, + HaveTransactionBody, +} + +#[derive(Clone, PartialEq, Eq, DataSize, Debug)] +pub(super) struct Acquisition { + inner: Vec<(T, TransactionState)>, + need_execution_result: bool, +} + +impl Acquisition { + fn new(txn_identifiers: Vec, need_execution_result: bool) -> Self { + let inner = txn_identifiers + .into_iter() + .map(|txn_identifier| (txn_identifier, TransactionState::Vacant)) + .collect(); + Acquisition { + inner, + need_execution_result, + } + } + + fn apply_transaction(&mut self, transaction_identifier: T) -> Option { + for item in self.inner.iter_mut() { + if item.0 == transaction_identifier { + match item.1 { + TransactionState::Vacant => { + item.1 = TransactionState::HaveTransactionBody; + return Some(Acceptance::NeededIt); + } + TransactionState::HaveTransactionBody => return Some(Acceptance::HadIt), + } + } + } + None + } + + fn needs_transaction(&self) -> Option { + self.inner + .iter() + .find_map(|(txn_identifier, state)| match state { + TransactionState::Vacant => Some(*txn_identifier), + TransactionState::HaveTransactionBody => None, + }) + } +} diff --git a/node/src/components/block_synchronizer/deploy_acquisition/tests.rs b/node/src/components/block_synchronizer/deploy_acquisition/tests.rs new file mode 100644 index 0000000000..7e541f0a98 --- /dev/null +++ b/node/src/components/block_synchronizer/deploy_acquisition/tests.rs @@ -0,0 +1,200 @@ +use std::collections::{BTreeMap, VecDeque}; + +use assert_matches::assert_matches; +use rand::Rng; + +use casper_storage::block_store::types::ApprovalsHashes; +use casper_types::{ + global_state::TrieMerkleProof, testing::TestRng, AccessRights, CLValue, StoredValue, + TestBlockBuilder, Transaction, URef, +}; + +use super::*; + +fn gen_test_transactions(rng: &mut TestRng) -> BTreeMap { + let num_txns = rng.gen_range(2..15); + (0..num_txns) + .map(|_| { + let transaction = Transaction::random(rng); + (transaction.hash(), transaction) + }) + .collect() +} + +fn gen_approvals_hashes<'a, I: Iterator + Clone>( + rng: &mut TestRng, + transactions_iter: I, +) -> ApprovalsHashes { + let era = rng.gen_range(0..6); + let block = TestBlockBuilder::new() + .era(era) + .height(era * 10 + rng.gen_range(0..10)) + .transactions(transactions_iter.clone()) + .build(rng); + + ApprovalsHashes::new( + *block.hash(), + transactions_iter + .map(|txn| txn.compute_approvals_hash().unwrap()) + .collect(), + TrieMerkleProof::new( + URef::new([255; 32], AccessRights::NONE).into(), + StoredValue::CLValue(CLValue::from_t(()).unwrap()), + VecDeque::new(), + ), + ) +} + +fn get_transaction_id(transaction: &Transaction) -> TransactionId { + match transaction { + Transaction::Deploy(deploy) => TransactionId::new( + TransactionHash::Deploy(*deploy.hash()), + deploy.compute_approvals_hash().unwrap(), + ), + Transaction::V1(transaction_v1) => TransactionId::new( + TransactionHash::V1(*transaction_v1.hash()), + transaction_v1.compute_approvals_hash().unwrap(), + ), + } +} + +#[test] +fn dont_apply_approvals_hashes_when_acquiring_by_id() { + let mut rng = TestRng::new(); + let test_transactions = gen_test_transactions(&mut rng); + let approvals_hashes = gen_approvals_hashes(&mut rng, test_transactions.values()); + + let mut txn_acquisition = TransactionAcquisition::ById(Acquisition::new( + test_transactions.values().map(get_transaction_id).collect(), + false, + )); + + assert_matches!( + txn_acquisition.apply_approvals_hashes(&approvals_hashes), + Err(Error::AcquisitionByIdNotPossible) + ); + assert_matches!( + txn_acquisition.next_needed_transaction().unwrap(), + TransactionIdentifier::ById(id) if test_transactions.contains_key(&id.transaction_hash()) + ); +} + +#[test] +fn apply_approvals_on_acquisition_by_hash_creates_correct_ids() { + let mut rng = TestRng::new(); + let test_transactions = gen_test_transactions(&mut rng); + let mut txn_acquisition = + TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false); + + // Generate the ApprovalsHashes for all test transactions except the last one + let approvals_hashes = gen_approvals_hashes( + &mut rng, + test_transactions.values().take(test_transactions.len() - 1), + ); + + assert_matches!( + txn_acquisition.next_needed_transaction().unwrap(), + TransactionIdentifier::ByHash(hash) if test_transactions.contains_key(&hash) + ); + assert!(txn_acquisition + .apply_approvals_hashes(&approvals_hashes) + .is_ok()); + + // Now acquisition is done by id + assert_matches!( + txn_acquisition.next_needed_transaction().unwrap(), + TransactionIdentifier::ById(id) if test_transactions.contains_key(&id.transaction_hash()) + ); + + // Apply the transactions + for transaction in test_transactions.values().take(test_transactions.len() - 1) { + let acceptance = txn_acquisition.apply_transaction(get_transaction_id(transaction)); + assert_matches!(acceptance, Some(Acceptance::NeededIt)); + } + + // The last transaction was excluded from acquisition when we applied the approvals hashes so it + // should not be needed + assert!(!txn_acquisition.needs_transaction()); + + // Try to apply the last transaction; it should not be accepted + let last_transaction = test_transactions.values().last().unwrap(); + let last_txn_acceptance = + txn_acquisition.apply_transaction(get_transaction_id(last_transaction)); + assert_matches!(last_txn_acceptance, None); +} + +#[test] +fn apply_approvals_hashes_after_having_already_applied_transactions() { + let mut rng = TestRng::new(); + let test_transactions = gen_test_transactions(&mut rng); + let mut txn_acquisition = + TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false); + let (_, first_txn) = test_transactions.first_key_value().unwrap(); + + let approvals_hashes = gen_approvals_hashes(&mut rng, test_transactions.values()); + + // Apply a valid transaction that was not applied before. This should succeed. + let acceptance = txn_acquisition.apply_transaction(get_transaction_id(first_txn)); + assert_matches!(acceptance, Some(Acceptance::NeededIt)); + + // Apply approvals hashes. This should fail since we have already acquired transactions by hash. + assert_matches!( + txn_acquisition.apply_approvals_hashes(&approvals_hashes), + Err(Error::EncounteredNonVacantTransactionState) + ); +} + +#[test] +fn partially_applied_txns_on_acquisition_by_hash_should_need_missing_txns() { + let mut rng = TestRng::new(); + let test_transactions = gen_test_transactions(&mut rng); + let mut txn_acquisition = + TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false); + + assert_matches!( + txn_acquisition.next_needed_transaction().unwrap(), + TransactionIdentifier::ByHash(hash) if test_transactions.contains_key(&hash) + ); + + // Apply all the transactions except for the last one + for transaction in test_transactions.values().take(test_transactions.len() - 1) { + let acceptance = txn_acquisition.apply_transaction(get_transaction_id(transaction)); + assert_matches!(acceptance, Some(Acceptance::NeededIt)); + } + + // Last transaction should be needed now + let last_txn = test_transactions.iter().last().unwrap().1; + assert_matches!( + txn_acquisition.next_needed_transaction().unwrap(), + TransactionIdentifier::ByHash(hash) if last_txn.hash() == hash + ); + + // Apply the last transaction and check the acceptance + let last_txn_acceptance = txn_acquisition.apply_transaction(get_transaction_id(last_txn)); + assert_matches!(last_txn_acceptance, Some(Acceptance::NeededIt)); + + // Try to add the last transaction again to check the acceptance + let already_registered_acceptance = + txn_acquisition.apply_transaction(get_transaction_id(last_txn)); + assert_matches!(already_registered_acceptance, Some(Acceptance::HadIt)); +} + +#[test] +fn apply_unregistered_transaction_returns_no_acceptance() { + let mut rng = TestRng::new(); + let test_transactions = gen_test_transactions(&mut rng); + let mut txn_acquisition = + TransactionAcquisition::new_by_hash(test_transactions.keys().copied().collect(), false); + + let unregistered_transaction = Transaction::random(&mut rng); + let unregistered_txn_acceptance = + txn_acquisition.apply_transaction(get_transaction_id(&unregistered_transaction)); + + // An unregistered transaction should not be accepted + assert!(unregistered_txn_acceptance.is_none()); + let first_transaction = test_transactions.iter().next().unwrap().1; + assert_matches!( + txn_acquisition.next_needed_transaction().unwrap(), + TransactionIdentifier::ByHash(hash) if first_transaction.hash() == hash + ); +} diff --git a/node/src/components/block_synchronizer/error.rs b/node/src/components/block_synchronizer/error.rs new file mode 100644 index 0000000000..52561ae7c7 --- /dev/null +++ b/node/src/components/block_synchronizer/error.rs @@ -0,0 +1,87 @@ +use std::fmt::{Display, Formatter}; + +use datasize::DataSize; +use derive_more::From; + +use casper_types::{Digest, TransactionHash, TransactionId}; + +use super::deploy_acquisition; + +use casper_types::BlockHash; + +#[derive(Clone, Copy, From, PartialEq, Eq, DataSize, Debug)] +pub(crate) enum BlockAcquisitionError { + InvalidStateTransition, + BlockHashMismatch { + expected: BlockHash, + actual: BlockHash, + }, + RootHashMismatch { + expected: Digest, + actual: Digest, + }, + InvalidAttemptToAcquireExecutionResults, + #[from] + InvalidAttemptToApplyApprovalsHashes(deploy_acquisition::Error), + InvalidAttemptToApplyTransaction { + txn_id: TransactionId, + }, + MissingApprovalsHashes(TransactionHash), + InvalidAttemptToMarkComplete, + InvalidAttemptToEnqueueBlockForExecution, + ExecutionResults(super::execution_results_acquisition::Error), + InvalidTransactionType, +} + +impl Display for BlockAcquisitionError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BlockAcquisitionError::InvalidStateTransition => write!(f, "invalid state transition"), + BlockAcquisitionError::InvalidAttemptToMarkComplete => { + write!(f, "invalid attempt to mark complete") + } + BlockAcquisitionError::InvalidAttemptToAcquireExecutionResults => { + write!( + f, + "invalid attempt to acquire execution results while in a terminal state" + ) + } + BlockAcquisitionError::BlockHashMismatch { expected, actual } => { + write!( + f, + "block hash mismatch: expected {} actual: {}", + expected, actual + ) + } + BlockAcquisitionError::RootHashMismatch { expected, actual } => write!( + f, + "root hash mismatch: expected {} actual: {}", + expected, actual + ), + BlockAcquisitionError::ExecutionResults(error) => { + write!(f, "execution results error: {}", error) + } + BlockAcquisitionError::InvalidAttemptToApplyApprovalsHashes(error) => write!( + f, + "invalid attempt to apply approvals hashes results: {}", + error + ), + BlockAcquisitionError::InvalidAttemptToEnqueueBlockForExecution => { + write!(f, "invalid attempt to enqueue block for execution") + } + BlockAcquisitionError::InvalidAttemptToApplyTransaction { txn_id } => { + write!(f, "invalid attempt to apply transaction: {}", txn_id) + } + BlockAcquisitionError::MissingApprovalsHashes(missing_txn_hash) => { + write!( + f, + "missing approvals hashes for transaction {}", + missing_txn_hash + ) + } + BlockAcquisitionError::InvalidTransactionType => { + write!(f, "invalid transaction identifier",) + } + } + } +} diff --git a/node/src/components/block_synchronizer/event.rs b/node/src/components/block_synchronizer/event.rs new file mode 100644 index 0000000000..cdf1b08f2f --- /dev/null +++ b/node/src/components/block_synchronizer/event.rs @@ -0,0 +1,185 @@ +use std::fmt::{self, Display, Formatter}; + +use derive_more::From; +use either::Either; +use serde::Serialize; + +use casper_storage::{ + block_store::types::ApprovalsHashes, data_access_layer::ExecutionResultsChecksumResult, +}; +use casper_types::{Block, BlockHash, BlockHeader, FinalitySignature, Transaction}; + +use super::GlobalStateSynchronizerEvent; +use crate::{ + components::{ + block_synchronizer::{GlobalStateSynchronizerError, GlobalStateSynchronizerResponse}, + fetcher::FetchResult, + }, + effect::requests::BlockSynchronizerRequest, + types::{BlockExecutionResultsOrChunk, ExecutableBlock, LegacyDeploy, NodeId, SyncLeap}, +}; + +#[derive(From, Debug, Serialize)] +pub(crate) enum Event { + Initialize, + #[from] + Request(BlockSynchronizerRequest), + DisconnectFromPeer(NodeId), + #[from] + MadeFinalizedBlock { + block_hash: BlockHash, + result: Option, + }, + MarkBlockExecutionEnqueued(BlockHash), + MarkBlockExecuted(BlockHash), + MarkBlockCompleted { + block_hash: BlockHash, + is_new: bool, + }, + #[from] + BlockHeaderFetched(FetchResult), + #[from] + BlockFetched(FetchResult), + #[from] + ApprovalsHashesFetched(FetchResult), + #[from] + FinalitySignatureFetched(FetchResult), + #[from] + SyncLeapFetched(FetchResult), + GlobalStateSynced { + block_hash: BlockHash, + #[serde(skip_serializing)] + result: Result, + }, + GotExecutionResultsChecksum { + block_hash: BlockHash, + #[serde(skip_serializing)] + result: ExecutionResultsChecksumResult, + }, + DeployFetched { + block_hash: BlockHash, + result: Either, FetchResult>, + }, + ExecutionResultsFetched { + block_hash: BlockHash, + result: FetchResult, + }, + ExecutionResultsStored(BlockHash), + AccumulatedPeers(BlockHash, Option>), + NetworkPeers(BlockHash, Vec), + #[from] + GlobalStateSynchronizer(GlobalStateSynchronizerEvent), +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Request(BlockSynchronizerRequest::NeedNext { .. }) => { + write!(f, "block synchronizer need next request") + } + Event::Request(BlockSynchronizerRequest::SyncGlobalStates(global_states)) => { + write!(f, "global states to be synced: [")?; + for (block_hash, global_state_hash) in global_states { + write!( + f, + "(block {}, global state {}), ", + block_hash, global_state_hash + )?; + } + write!(f, "]") + } + Event::Request(_) => { + write!(f, "block synchronizer request from effect builder") + } + Event::Initialize => { + write!(f, "initialize this component") + } + Event::DisconnectFromPeer(peer) => { + write!(f, "disconnected from peer {}", peer) + } + Event::BlockHeaderFetched(Ok(fetched_item)) => { + write!(f, "{}", fetched_item) + } + Event::BlockHeaderFetched(Err(fetcher_error)) => { + write!(f, "{}", fetcher_error) + } + Event::BlockFetched(Ok(fetched_item)) => { + write!(f, "{}", fetched_item) + } + Event::BlockFetched(Err(fetcher_error)) => { + write!(f, "{}", fetcher_error) + } + Event::ApprovalsHashesFetched(Ok(fetched_item)) => { + write!(f, "{}", fetched_item) + } + Event::ApprovalsHashesFetched(Err(fetcher_error)) => { + write!(f, "{}", fetcher_error) + } + Event::FinalitySignatureFetched(Ok(fetched_item)) => { + write!(f, "{}", fetched_item) + } + Event::FinalitySignatureFetched(Err(fetcher_error)) => { + write!(f, "{}", fetcher_error) + } + Event::SyncLeapFetched(Ok(fetched_item)) => { + write!(f, "{}", fetched_item) + } + Event::SyncLeapFetched(Err(fetcher_error)) => { + write!(f, "{}", fetcher_error) + } + Event::GlobalStateSynced { + block_hash: _, + result, + } => match result { + Ok(response) => write!(f, "synced global state under root {}", response.hash()), + Err(error) => write!(f, "failed to sync global state: {}", error), + }, + Event::GotExecutionResultsChecksum { + block_hash: _, + result, + } => match result.as_legacy() { + Ok(Some(digest)) => write!(f, "got exec results checksum {}", digest), + Ok(None) => write!(f, "got no exec results checksum"), + Err(error) => write!(f, "failed to get exec results checksum: {}", error), + }, + Event::DeployFetched { + block_hash: _, + result, + } => match result { + Either::Left(Ok(fetched_item)) => write!(f, "{}", fetched_item), + Either::Left(Err(fetcher_error)) => write!(f, "{}", fetcher_error), + Either::Right(Ok(fetched_item)) => write!(f, "{}", fetched_item), + Either::Right(Err(fetcher_error)) => write!(f, "{}", fetcher_error), + }, + Event::ExecutionResultsFetched { + block_hash: _, + result, + } => match result { + Ok(fetched_item) => write!(f, "{}", fetched_item), + Err(fetcher_error) => write!(f, "{}", fetcher_error), + }, + Event::ExecutionResultsStored { .. } => write!(f, "stored execution results"), + Event::GlobalStateSynchronizer(event) => { + write!(f, "{:?}", event) + } + Event::NetworkPeers(..) => { + write!(f, "network peers") + } + Event::AccumulatedPeers(..) => { + write!(f, "accumulated peers") + } + Event::MadeFinalizedBlock { .. } => { + write!(f, "made finalized block") + } + Event::MarkBlockExecutionEnqueued(..) => { + write!(f, "mark block enqueued for execution") + } + Event::MarkBlockExecuted(..) => { + write!(f, "block execution complete") + } + Event::MarkBlockCompleted { .. } => { + write!(f, "mark block completed") + } + } + } +} diff --git a/node/src/components/block_synchronizer/execution_results_acquisition.rs b/node/src/components/block_synchronizer/execution_results_acquisition.rs new file mode 100644 index 0000000000..f99afb5483 --- /dev/null +++ b/node/src/components/block_synchronizer/execution_results_acquisition.rs @@ -0,0 +1,553 @@ +#[cfg(test)] +mod tests; + +use std::{ + collections::HashMap, + fmt::{self, Display, Formatter}, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +use tracing::{debug, error}; + +use casper_types::{ + bytesrepr, execution::ExecutionResult, BlockHash, ChunkWithProof, Digest, TransactionHash, +}; + +use super::block_acquisition::Acceptance; +use crate::types::{BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, ValueOrChunk}; + +#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug, Serialize, Deserialize)] +pub(crate) enum ExecutionResultsChecksum { + // due to historical reasons, pre-1.5 chunks do not support Merkle proof checking + Uncheckable, + // can be Merkle proof checked + Checkable(Digest), +} + +impl Display for ExecutionResultsChecksum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Self::Uncheckable => write!(f, "uncheckable execution results"), + Self::Checkable(digest) => write!(f, "execution results checksum {}", digest), + } + } +} + +impl ExecutionResultsChecksum { + pub(super) fn is_checkable(&self) -> bool { + matches!(self, ExecutionResultsChecksum::Checkable(_)) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, DataSize, Debug)] +pub(crate) enum Error { + BlockHashMismatch { + expected: BlockHash, + actual: BlockHash, + }, + ChunkCountMismatch { + block_hash: BlockHash, + expected: u64, + actual: u64, + }, + InvalidChunkCount { + block_hash: BlockHash, + }, + InvalidAttemptToApplyChecksum { + block_hash: BlockHash, + }, + AttemptToApplyDataAfterCompleted { + block_hash: BlockHash, + }, + AttemptToApplyDataWhenMissingChecksum { + block_hash: BlockHash, + }, + ChecksumMismatch { + block_hash: BlockHash, + expected: Digest, + actual: Digest, + }, + ChunksWithDifferentChecksum { + block_hash: BlockHash, + expected: Digest, + actual: Digest, + }, + FailedToDeserialize { + block_hash: BlockHash, + }, + ExecutionResultToDeployHashLengthDiscrepancy { + block_hash: BlockHash, + expected: usize, + actual: usize, + }, + InvalidOutcomeFromApplyingChunk { + block_hash: BlockHash, + }, +} + +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Error::BlockHashMismatch { expected, actual } => { + write!( + f, + "block hash mismatch: expected {} actual: {}", + expected, actual + ) + } + Error::ExecutionResultToDeployHashLengthDiscrepancy { + block_hash, + expected, + actual, + } => { + write!( + f, + "discrepancy between the number of deploys and corresponding execution results for block_hash: {}; expected {} actual: {}", + block_hash, expected, actual + ) + } + Error::ChunkCountMismatch { + block_hash, + expected, + actual, + } => { + write!( + f, + "chunk count mismatch for block_hash: {}; expected {} actual: {}", + block_hash, expected, actual + ) + } + Error::InvalidChunkCount { block_hash } => { + write!( + f, + "invalid chunk count for block_hash: {}; execution results should either be a complete single value or come in 2 or more chunks", + block_hash + ) + } + Error::InvalidAttemptToApplyChecksum { block_hash } => { + write!( + f, + "attempt to apply checksum to a non-pending item, block_hash: {}", + block_hash + ) + } + Error::AttemptToApplyDataAfterCompleted { block_hash } => { + write!( + f, + "attempt to apply execution results for already completed block_hash: {}", + block_hash + ) + } + Error::AttemptToApplyDataWhenMissingChecksum { block_hash } => { + write!( + f, + "attempt to apply execution results before check sum for block_hash: {}", + block_hash + ) + } + Error::ChecksumMismatch { + block_hash, + expected, + actual, + } => { + write!( + f, + "root hash mismatch for block_hash: {}; expected {} actual: {}", + block_hash, expected, actual + ) + } + Error::FailedToDeserialize { block_hash } => { + write!( + f, + "failed to deserialize execution effects for block_hash: {}", + block_hash, + ) + } + Error::ChunksWithDifferentChecksum { + block_hash, + expected, + actual, + } => write!( + f, + "chunks with different checksum for block_hash: {}; expected {} actual: {}", + block_hash, expected, actual + ), + Error::InvalidOutcomeFromApplyingChunk { block_hash } => write!( + f, + "cannot have already had chunk if in pending mode for block hash: {}", + block_hash + ), + } + } +} + +#[derive(Clone, PartialEq, Eq, DataSize, Debug)] +pub(super) enum ExecutionResultsAcquisition { + Needed { + block_hash: BlockHash, + }, + Pending { + block_hash: BlockHash, + checksum: ExecutionResultsChecksum, + }, + Acquiring { + block_hash: BlockHash, + checksum: ExecutionResultsChecksum, + chunks: HashMap, + chunk_count: u64, + next: u64, + }, + Complete { + block_hash: BlockHash, + checksum: ExecutionResultsChecksum, + results: HashMap, + }, +} + +impl Display for ExecutionResultsAcquisition { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ExecutionResultsAcquisition::Needed { block_hash } => { + write!(f, "Needed: {}", block_hash) + } + ExecutionResultsAcquisition::Pending { + block_hash, + checksum: _, + } => write!(f, "Pending: {}", block_hash), + ExecutionResultsAcquisition::Acquiring { + block_hash, + checksum: _, + chunks: _, + chunk_count, + next, + } => write!( + f, + "Acquiring: {}, chunk_count={}, next={}", + block_hash, chunk_count, next + ), + ExecutionResultsAcquisition::Complete { + block_hash, + checksum: _, + results: _, + } => write!(f, "Complete: {}", block_hash), + } + } +} + +impl ExecutionResultsAcquisition { + pub(super) fn needs_value_or_chunk( + &self, + ) -> Option<(BlockExecutionResultsOrChunkId, ExecutionResultsChecksum)> { + match self { + ExecutionResultsAcquisition::Needed { .. } + | ExecutionResultsAcquisition::Complete { .. } => None, + ExecutionResultsAcquisition::Pending { + block_hash, + checksum, + } => Some((BlockExecutionResultsOrChunkId::new(*block_hash), *checksum)), + ExecutionResultsAcquisition::Acquiring { + block_hash, + checksum, + next, + .. + } => Some(( + BlockExecutionResultsOrChunkId::new(*block_hash).next_chunk(*next), + *checksum, + )), + } + } + + pub(super) fn apply_checksum(self, checksum: ExecutionResultsChecksum) -> Result { + match self { + ExecutionResultsAcquisition::Needed { block_hash } => { + debug!("apply_checksum - Needed"); + Ok(ExecutionResultsAcquisition::Pending { + block_hash, + checksum, + }) + } + ExecutionResultsAcquisition::Pending { block_hash, .. } + | ExecutionResultsAcquisition::Acquiring { block_hash, .. } + | ExecutionResultsAcquisition::Complete { block_hash, .. } => { + debug!("apply_checksum - Pending | Acquiring | Complete"); + Err(Error::InvalidAttemptToApplyChecksum { block_hash }) + } + } + } + + pub(super) fn apply_block_execution_results_or_chunk( + self, + block_execution_results_or_chunk: BlockExecutionResultsOrChunk, + transaction_hashes: Vec, + ) -> Result<(Self, Acceptance), Error> { + let block_hash = *block_execution_results_or_chunk.block_hash(); + let value = block_execution_results_or_chunk.into_value(); + + debug!(%block_hash, state=%self, "apply_block_execution_results_or_chunk"); + + let expected_block_hash = self.block_hash(); + if expected_block_hash != block_hash { + debug!( + %block_hash, + "apply_block_execution_results_or_chunk: Error::BlockHashMismatch" + ); + return Err(Error::BlockHashMismatch { + expected: expected_block_hash, + actual: block_hash, + }); + } + + let (checksum, execution_results) = match (self, value) { + ( + ExecutionResultsAcquisition::Pending { checksum, .. }, + ValueOrChunk::Value(execution_results), + ) + | ( + ExecutionResultsAcquisition::Acquiring { checksum, .. }, + ValueOrChunk::Value(execution_results), + ) => { + debug!( + "apply_block_execution_results_or_chunk: (Pending, Value) | (Acquiring, Value)" + ); + (checksum, execution_results) + } + ( + ExecutionResultsAcquisition::Pending { checksum, .. }, + ValueOrChunk::ChunkWithProof(chunk), + ) => { + debug!("apply_block_execution_results_or_chunk: (Pending, ChunkWithProof)"); + match apply_chunk(block_hash, checksum, HashMap::new(), chunk, None) { + Ok(ApplyChunkOutcome::HadIt { .. }) => { + error!("cannot have already had chunk if in pending mode"); + return Err(Error::InvalidOutcomeFromApplyingChunk { block_hash }); + } + Ok(ApplyChunkOutcome::NeedNext { + chunks, + chunk_count, + next, + }) => { + let acquisition = ExecutionResultsAcquisition::Acquiring { + block_hash, + checksum, + chunks, + chunk_count, + next, + }; + let acceptance = Acceptance::NeededIt; + return Ok((acquisition, acceptance)); + } + Ok(ApplyChunkOutcome::Complete { execution_results }) => { + (checksum, execution_results) + } + Err(err) => { + return Err(err); + } + } + } + ( + ExecutionResultsAcquisition::Acquiring { + checksum, + chunks, + chunk_count, + next, + .. + }, + ValueOrChunk::ChunkWithProof(chunk), + ) => { + debug!("apply_block_execution_results_or_chunk: (Acquiring, ChunkWithProof)"); + match apply_chunk(block_hash, checksum, chunks, chunk, Some(chunk_count)) { + Ok(ApplyChunkOutcome::HadIt { chunks }) => { + let acquisition = ExecutionResultsAcquisition::Acquiring { + block_hash, + checksum, + chunks, + chunk_count, + next, + }; + let acceptance = Acceptance::HadIt; + return Ok((acquisition, acceptance)); + } + Ok(ApplyChunkOutcome::NeedNext { + chunks, + chunk_count, + next, + }) => { + let acquisition = ExecutionResultsAcquisition::Acquiring { + block_hash, + checksum, + chunks, + chunk_count, + next, + }; + let acceptance = Acceptance::NeededIt; + return Ok((acquisition, acceptance)); + } + Ok(ApplyChunkOutcome::Complete { execution_results }) => { + (checksum, execution_results) + } + Err(err) => { + return Err(err); + } + } + } + (ExecutionResultsAcquisition::Needed { block_hash }, _) => { + debug!("apply_block_execution_results_or_chunk: (Needed, _)"); + return Err(Error::AttemptToApplyDataWhenMissingChecksum { block_hash }); + } + (ExecutionResultsAcquisition::Complete { .. }, _) => { + debug!("apply_block_execution_results_or_chunk: (Complete, _)"); + return Err(Error::AttemptToApplyDataAfterCompleted { block_hash }); + } + }; + + if transaction_hashes.len() != execution_results.len() { + debug!( + %block_hash, + "apply_block_execution_results_or_chunk: Error::ExecutionResultToDeployHashLengthDiscrepancy" + ); + return Err(Error::ExecutionResultToDeployHashLengthDiscrepancy { + block_hash, + expected: transaction_hashes.len(), + actual: execution_results.len(), + }); + } + let results = transaction_hashes + .into_iter() + .zip(execution_results) + .collect(); + debug!( + %block_hash, + "apply_block_execution_results_or_chunk: returning ExecutionResultsAcquisition::Complete" + ); + let acceptance = Acceptance::NeededIt; + let acquisition = ExecutionResultsAcquisition::Complete { + block_hash, + results, + checksum, + }; + Ok((acquisition, acceptance)) + } + + pub(super) fn is_checkable(&self) -> bool { + match self { + ExecutionResultsAcquisition::Needed { .. } => false, + ExecutionResultsAcquisition::Pending { checksum, .. } + | ExecutionResultsAcquisition::Acquiring { checksum, .. } + | ExecutionResultsAcquisition::Complete { checksum, .. } => checksum.is_checkable(), + } + } + + fn block_hash(&self) -> BlockHash { + match self { + ExecutionResultsAcquisition::Needed { block_hash } + | ExecutionResultsAcquisition::Pending { block_hash, .. } + | ExecutionResultsAcquisition::Acquiring { block_hash, .. } + | ExecutionResultsAcquisition::Complete { block_hash, .. } => *block_hash, + } + } +} + +#[derive(Debug)] +enum ApplyChunkOutcome { + HadIt { + chunks: HashMap, + }, + NeedNext { + chunks: HashMap, + chunk_count: u64, + next: u64, + }, + Complete { + execution_results: Vec, + }, +} + +impl ApplyChunkOutcome { + fn need_next(chunks: HashMap, chunk_count: u64, next: u64) -> Self { + ApplyChunkOutcome::NeedNext { + chunks, + chunk_count, + next, + } + } + + fn execution_results(execution_results: Vec) -> Self { + ApplyChunkOutcome::Complete { execution_results } + } +} + +fn apply_chunk( + block_hash: BlockHash, + checksum: ExecutionResultsChecksum, + mut chunks: HashMap, + chunk: ChunkWithProof, + expected_count: Option, +) -> Result { + let digest = chunk.proof().root_hash(); + let index = chunk.proof().index(); + let chunk_count = chunk.proof().count(); + if chunk_count == 1 { + debug!(%block_hash, "apply_chunk: Error::InvalidChunkCount"); + return Err(Error::InvalidChunkCount { block_hash }); + } + + if let Some(expected) = expected_count { + if expected != chunk_count { + debug!(%block_hash, "apply_chunk: Error::ChunkCountMismatch"); + return Err(Error::ChunkCountMismatch { + block_hash, + expected, + actual: chunk_count, + }); + } + } + + // ExecutionResultsChecksum::Uncheckable has no checksum, otherwise check it + if let ExecutionResultsChecksum::Checkable(expected) = checksum { + if expected != digest { + debug!(%block_hash, "apply_chunk: Error::ChecksumMismatch"); + return Err(Error::ChecksumMismatch { + block_hash, + expected, + actual: digest, + }); + } + } else if let Some(other_chunk) = chunks.values().next() { + let existing_chunk_digest = other_chunk.proof().root_hash(); + if existing_chunk_digest != digest { + debug!(%block_hash, "apply_chunk: Error::ChunksWithDifferentChecksum"); + return Err(Error::ChunksWithDifferentChecksum { + block_hash, + expected: existing_chunk_digest, + actual: digest, + }); + } + } + + if chunks.insert(index, chunk).is_some() { + debug!(%block_hash, index, "apply_chunk: already had it"); + return Ok(ApplyChunkOutcome::HadIt { chunks }); + }; + + match (0..chunk_count).find(|idx| !chunks.contains_key(idx)) { + Some(next) => Ok(ApplyChunkOutcome::need_next(chunks, chunk_count, next)), + None => { + let serialized: Vec = (0..chunk_count) + .filter_map(|index| chunks.get(&index)) + .flat_map(|c| c.chunk()) + .copied() + .collect(); + match bytesrepr::deserialize(serialized) { + Ok(results) => { + debug!(%block_hash, "apply_chunk: ApplyChunkOutcome::execution_results"); + Ok(ApplyChunkOutcome::execution_results(results)) + } + Err(error) => { + error!(%error, "failed to deserialize execution results"); + Err(Error::FailedToDeserialize { block_hash }) + } + } + } + } +} diff --git a/node/src/components/block_synchronizer/execution_results_acquisition/tests.rs b/node/src/components/block_synchronizer/execution_results_acquisition/tests.rs new file mode 100644 index 0000000000..17b2a0659c --- /dev/null +++ b/node/src/components/block_synchronizer/execution_results_acquisition/tests.rs @@ -0,0 +1,525 @@ +use assert_matches::assert_matches; + +use casper_types::{ + bytesrepr::ToBytes, execution::ExecutionResultV2, testing::TestRng, DeployHash, + TestBlockBuilder, +}; + +use super::*; +use crate::{ + components::block_synchronizer::tests::test_utils::chunks_with_proof_from_data, + types::BlockExecutionResultsOrChunkId, +}; + +const NUM_TEST_EXECUTION_RESULTS: u64 = 100000; + +#[test] +fn execution_results_chunks_apply_correctly() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + // Create chunkable execution results + let exec_results: Vec = (0..NUM_TEST_EXECUTION_RESULTS) + .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng))) + .collect(); + let test_chunks = chunks_with_proof_from_data(&exec_results.to_bytes().unwrap()); + assert!(test_chunks.len() >= 3); + + // Start off with only one chunk applied + let mut chunks: HashMap = HashMap::new(); + let first_chunk = test_chunks.first_key_value().unwrap(); + let last_chunk = test_chunks.last_key_value().unwrap(); + chunks.insert(*first_chunk.0, first_chunk.1.clone()); + + // Insert all the other chunks except the last; skip the first one since it should have been + // added already + for (index, chunk) in test_chunks.iter().take(test_chunks.len() - 1).skip(1) { + let apply_result = apply_chunk( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + chunks, + chunk.clone(), + None, + ); + + // Check the index of the next chunk that should be applied + chunks = assert_matches!(apply_result, Ok(ApplyChunkOutcome::NeedNext{chunks, chunk_count, next}) => { + assert_eq!(next, index + 1); + assert_eq!(chunk_count as usize, test_chunks.len()); + chunks + }); + } + + // Apply the last chunk, and expect to get back the execution results + let apply_result = apply_chunk( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + chunks, + last_chunk.1.clone(), + None, + ); + assert_matches!(apply_result, Ok(ApplyChunkOutcome::Complete{execution_results}) => { + assert_eq!(execution_results, exec_results); + }); +} + +#[test] +fn single_chunk_execution_results_dont_apply_other_chunks() { + let rng = &mut TestRng::new(); + let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES - 1]); + assert_eq!(test_chunks.len(), 1); + + // We can't apply a chunk if the execution results are not chunked (only 1 chunk exists) + // Expect an error in this case. + let first_chunk = test_chunks.first_key_value().unwrap(); + + let apply_result = apply_chunk( + *TestBlockBuilder::new().build(rng).hash(), + ExecutionResultsChecksum::Uncheckable, + test_chunks.clone().into_iter().collect(), + first_chunk.1.clone(), + None, + ); + + assert_matches!(apply_result, Err(Error::InvalidChunkCount { .. })); +} + +#[test] +fn execution_results_chunks_from_block_with_different_hash_are_not_applied() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 3]); + + // Start acquiring chunks + let mut acquisition = ExecutionResultsAcquisition::new_acquiring( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + test_chunks.clone().into_iter().take(1).collect(), + 3, + 1, + ); + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(test_chunks.last_key_value().unwrap().1.clone()), + ); + acquisition = assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]), + Ok((acq, Acceptance::NeededIt)) => acq + ); + assert_matches!(acquisition, ExecutionResultsAcquisition::Acquiring { .. }); + + // Applying execution results from other block should return an error + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *TestBlockBuilder::new().build(rng).hash(), + ValueOrChunk::ChunkWithProof(test_chunks.first_key_value().unwrap().1.clone()), + ); + assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]), + Err(Error::BlockHashMismatch {expected, .. }) => assert_eq!(expected, *block.hash()) + ); +} + +#[test] +fn execution_results_chunks_from_trie_with_different_chunk_count_are_not_applied() { + let rng = &mut TestRng::new(); + let test_chunks_1 = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 3]); + assert_eq!(test_chunks_1.len(), 3); + + let test_chunks_2 = chunks_with_proof_from_data(&[1; ChunkWithProof::CHUNK_SIZE_BYTES * 2]); + assert_eq!(test_chunks_2.len(), 2); + + // If chunk tries have different number of chunks we shouldn't attempt to apply the incoming + // chunk and exit early + let bad_chunk = test_chunks_2.first_key_value().unwrap(); + + let apply_result = apply_chunk( + *TestBlockBuilder::new().build(rng).hash(), + ExecutionResultsChecksum::Uncheckable, + test_chunks_1.into_iter().take(2).collect(), + bad_chunk.1.clone(), + Some(3), + ); + + assert_matches!(apply_result, Err(Error::ChunkCountMismatch {expected, actual, ..}) if expected == 3 && actual == 2); +} + +#[test] +fn invalid_execution_results_from_applied_chunks_dont_deserialize() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + // Create some chunk data that cannot pe serialized into execution results + let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 2]); + assert_eq!(test_chunks.len(), 2); + let last_chunk = test_chunks.last_key_value().unwrap(); + + // Expect that this data cannot be deserialized + let apply_result = apply_chunk( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + test_chunks.clone().into_iter().take(1).collect(), + last_chunk.1.clone(), + None, + ); + assert_matches!(apply_result, Err(Error::FailedToDeserialize { .. })); +} + +#[test] +fn cant_apply_chunk_from_different_exec_results_or_invalid_checksum() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + // Create valid execution results + let valid_exec_results: Vec = (0..NUM_TEST_EXECUTION_RESULTS) + .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng))) + .collect(); + let valid_test_chunks = chunks_with_proof_from_data(&valid_exec_results.to_bytes().unwrap()); + assert!(valid_test_chunks.len() >= 3); + + // Create some invalid chunks that are not part of the execution results we are building + let invalid_test_chunks = + chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 2]); + assert_eq!(invalid_test_chunks.len(), 2); + + // Try to apply the invalid test chunks to the valid chunks and expect to fail since the + // checksums for the proofs are different between the chunks. + let apply_result = apply_chunk( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + valid_test_chunks.clone().into_iter().take(2).collect(), + invalid_test_chunks.first_key_value().unwrap().1.clone(), + None, + ); + assert_matches!(apply_result, Err(Error::ChunksWithDifferentChecksum{block_hash: _, expected, actual}) => { + assert_eq!(expected, valid_test_chunks.first_key_value().unwrap().1.proof().root_hash()); + assert_eq!(actual, invalid_test_chunks.first_key_value().unwrap().1.proof().root_hash()); + }); + + // Same test but here we are explicitly specifying the execution results checksum that + // should be checked. + let apply_result = apply_chunk( + *block.hash(), + ExecutionResultsChecksum::Checkable( + valid_test_chunks + .first_key_value() + .unwrap() + .1 + .proof() + .root_hash(), + ), + valid_test_chunks.clone().into_iter().take(2).collect(), + invalid_test_chunks.first_key_value().unwrap().1.clone(), + None, + ); + assert_matches!(apply_result, Err(Error::ChecksumMismatch{block_hash: _, expected, actual}) => { + assert_eq!(expected, valid_test_chunks.first_key_value().unwrap().1.proof().root_hash()); + assert_eq!(actual, invalid_test_chunks.first_key_value().unwrap().1.proof().root_hash()); + }); +} + +// Constructors for acquisition states used for testing and verifying generic properties of +// these states +impl ExecutionResultsAcquisition { + fn new_needed(block_hash: BlockHash) -> Self { + let acq = Self::Needed { block_hash }; + assert_eq!(acq.block_hash(), block_hash); + assert!(!acq.is_checkable()); + assert_eq!(acq.needs_value_or_chunk(), None); + acq + } + + fn new_pending(block_hash: BlockHash, checksum: ExecutionResultsChecksum) -> Self { + let acq = Self::Pending { + block_hash, + checksum, + }; + assert_eq!(acq.block_hash(), block_hash); + assert_eq!(acq.is_checkable(), checksum.is_checkable()); + assert_eq!( + acq.needs_value_or_chunk(), + Some((BlockExecutionResultsOrChunkId::new(block_hash), checksum)) + ); + acq + } + + fn new_acquiring( + block_hash: BlockHash, + checksum: ExecutionResultsChecksum, + chunks: HashMap, + chunk_count: u64, + next: u64, + ) -> Self { + let acq = Self::Acquiring { + block_hash, + checksum, + chunks, + chunk_count, + next, + }; + assert_eq!(acq.block_hash(), block_hash); + assert_eq!(acq.is_checkable(), checksum.is_checkable()); + assert_eq!( + acq.needs_value_or_chunk(), + Some(( + BlockExecutionResultsOrChunkId::new(block_hash).next_chunk(next), + checksum + )) + ); + acq + } + + fn new_complete( + block_hash: BlockHash, + checksum: ExecutionResultsChecksum, + results: HashMap, + ) -> Self { + let acq = Self::Complete { + block_hash, + checksum, + results, + }; + assert_eq!(acq.block_hash(), block_hash); + assert_eq!(acq.is_checkable(), checksum.is_checkable()); + assert_eq!(acq.needs_value_or_chunk(), None); + acq + } +} + +#[test] +fn acquisition_needed_state_has_correct_transitions() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let acquisition = ExecutionResultsAcquisition::new_needed(*block.hash()); + + let exec_results_checksum = ExecutionResultsChecksum::Checkable(Digest::hash([0; 32])); + assert_matches!( + acquisition.clone().apply_checksum(exec_results_checksum), + Ok(ExecutionResultsAcquisition::Pending{block_hash, checksum}) if block_hash == *block.hash() && checksum == exec_results_checksum + ); + + assert_matches!( + acquisition.clone().apply_checksum(ExecutionResultsChecksum::Uncheckable), + Ok(ExecutionResultsAcquisition::Pending{block_hash, checksum}) if block_hash == *block.hash() && checksum == ExecutionResultsChecksum::Uncheckable + ); + + let mut test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES]); + assert_eq!(test_chunks.len(), 1); + + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(test_chunks.remove(&0).unwrap()), + ); + assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]), + Err(Error::AttemptToApplyDataWhenMissingChecksum { .. }) + ); +} + +#[test] +fn acquisition_pending_state_has_correct_transitions() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let acquisition = ExecutionResultsAcquisition::new_pending( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + ); + assert_matches!( + acquisition + .clone() + .apply_checksum(ExecutionResultsChecksum::Uncheckable), + Err(Error::InvalidAttemptToApplyChecksum { .. }) + ); + + // Acquisition can transition from `Pending` to `Complete` if a value and deploy hashes are + // applied + let execution_results = vec![ExecutionResult::from(ExecutionResultV2::random(rng))]; + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::new(execution_results, 0).unwrap(), + ); + assert_matches!( + acquisition + .clone() + .apply_block_execution_results_or_chunk(exec_result.clone(), vec![]), + Err(Error::ExecutionResultToDeployHashLengthDiscrepancy { .. }) + ); + assert_matches!( + acquisition.clone().apply_block_execution_results_or_chunk( + exec_result, + vec![DeployHash::new(Digest::hash([0; 32])).into()] + ), + Ok(( + ExecutionResultsAcquisition::Complete { .. }, + Acceptance::NeededIt + )) + ); + + // Acquisition can transition from `Pending` to `Acquiring` if a single chunk is applied + let exec_results: Vec = (0..NUM_TEST_EXECUTION_RESULTS) + .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng))) + .collect(); + let test_chunks = chunks_with_proof_from_data(&exec_results.to_bytes().unwrap()); + assert!(test_chunks.len() >= 3); + + let first_chunk = test_chunks.first_key_value().unwrap().1; + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(first_chunk.clone()), + ); + let transaction_hashes: Vec = (0..NUM_TEST_EXECUTION_RESULTS) + .map(|index| DeployHash::new(Digest::hash(index.to_bytes().unwrap())).into()) + .collect(); + assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, transaction_hashes), + Ok(( + ExecutionResultsAcquisition::Acquiring { .. }, + Acceptance::NeededIt + )) + ); +} + +#[test] +fn acquisition_acquiring_state_has_correct_transitions() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + // Generate valid execution results that are chunkable + let exec_results: Vec = (0..NUM_TEST_EXECUTION_RESULTS) + .map(|_| ExecutionResult::from(ExecutionResultV2::random(rng))) + .collect(); + let test_chunks = chunks_with_proof_from_data(&exec_results.to_bytes().unwrap()); + assert!(test_chunks.len() >= 3); + + let mut acquisition = ExecutionResultsAcquisition::new_acquiring( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + test_chunks.clone().into_iter().take(1).collect(), + test_chunks.len() as u64, + 1, + ); + assert_matches!( + acquisition + .clone() + .apply_checksum(ExecutionResultsChecksum::Uncheckable), + Err(Error::InvalidAttemptToApplyChecksum { .. }) + ); + + // Apply all chunks except the last and check if the acquisition state remains `Acquiring` + for (_, chunk) in test_chunks.iter().take(test_chunks.len() - 1).skip(1) { + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(chunk.clone()), + ); + acquisition = assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]), + Ok((acq, Acceptance::NeededIt)) => acq + ); + assert_matches!(acquisition, ExecutionResultsAcquisition::Acquiring { .. }); + } + + // Now apply the last chunk and check if the acquisition completes + let last_chunk = test_chunks.last_key_value().unwrap().1; + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(last_chunk.clone()), + ); + let transaction_hashes: Vec = (0..NUM_TEST_EXECUTION_RESULTS) + .map(|index| DeployHash::new(Digest::hash(index.to_bytes().unwrap())).into()) + .collect(); + acquisition = assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, transaction_hashes), + Ok((acq, Acceptance::NeededIt)) => acq + ); + assert_matches!(acquisition, ExecutionResultsAcquisition::Complete { .. }); +} + +#[test] +fn acquisition_acquiring_state_gets_overridden_by_value() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + let test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES * 3]); + + // Start acquiring chunks + let mut acquisition = ExecutionResultsAcquisition::new_acquiring( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + test_chunks.clone().into_iter().take(1).collect(), + 3, + 1, + ); + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(test_chunks.last_key_value().unwrap().1.clone()), + ); + acquisition = assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]), + Ok((acq, Acceptance::NeededIt)) => acq + ); + assert_matches!(acquisition, ExecutionResultsAcquisition::Acquiring { .. }); + + // Assume we got a full execution result for this block. + // Since we don't have a checksum for the execution results, we can't really determine which + // data is the better one. We expect to overwrite the execution results chunks that + // we previously acquired with this complete result. + let execution_results = vec![ExecutionResult::from(ExecutionResultV2::random(rng))]; + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::new(execution_results, 0).unwrap(), + ); + assert_matches!( + acquisition + .clone() + .apply_block_execution_results_or_chunk(exec_result.clone(), vec![]), + Err(Error::ExecutionResultToDeployHashLengthDiscrepancy { .. }) + ); + + assert_matches!( + acquisition.apply_block_execution_results_or_chunk( + exec_result, + vec![DeployHash::new(Digest::hash([0; 32])).into()] + ), + Ok(( + ExecutionResultsAcquisition::Complete { .. }, + Acceptance::NeededIt + )) + ); +} + +#[test] +fn acquisition_complete_state_has_correct_transitions() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + + let acquisition = ExecutionResultsAcquisition::new_complete( + *block.hash(), + ExecutionResultsChecksum::Uncheckable, + HashMap::new(), + ); + + let exec_results_checksum = ExecutionResultsChecksum::Checkable(Digest::hash([0; 32])); + assert_matches!( + acquisition.clone().apply_checksum(exec_results_checksum), + Err(Error::InvalidAttemptToApplyChecksum { .. }) + ); + + assert_matches!( + acquisition + .clone() + .apply_checksum(ExecutionResultsChecksum::Uncheckable), + Err(Error::InvalidAttemptToApplyChecksum { .. }) + ); + + let mut test_chunks = chunks_with_proof_from_data(&[0; ChunkWithProof::CHUNK_SIZE_BYTES]); + assert_eq!(test_chunks.len(), 1); + + let exec_result = BlockExecutionResultsOrChunk::new_from_value( + *block.hash(), + ValueOrChunk::ChunkWithProof(test_chunks.remove(&0).unwrap()), + ); + assert_matches!( + acquisition.apply_block_execution_results_or_chunk(exec_result, vec![]), + Err(Error::AttemptToApplyDataAfterCompleted { .. }) + ); +} diff --git a/node/src/components/block_synchronizer/global_state_synchronizer.rs b/node/src/components/block_synchronizer/global_state_synchronizer.rs new file mode 100644 index 0000000000..d46efa762d --- /dev/null +++ b/node/src/components/block_synchronizer/global_state_synchronizer.rs @@ -0,0 +1,664 @@ +#[cfg(test)] +mod tests; + +use std::{ + collections::{BTreeMap, HashSet}, + fmt, mem, +}; + +use datasize::DataSize; +use derive_more::From; +use serde::Serialize; +use thiserror::Error; +use tracing::{debug, error, warn}; + +use casper_storage::{ + data_access_layer::{PutTrieRequest, PutTrieResult}, + global_state::{error::Error as GlobalStateError, trie::TrieRaw}, +}; +use casper_types::{BlockHash, Digest, DisplayIter, Timestamp}; + +use super::{TrieAccumulator, TrieAccumulatorError, TrieAccumulatorEvent, TrieAccumulatorResponse}; +use crate::{ + components::Component, + effect::{ + announcements::PeerBehaviorAnnouncement, + requests::{ + ContractRuntimeRequest, FetcherRequest, SyncGlobalStateRequest, TrieAccumulatorRequest, + }, + EffectBuilder, EffectExt, Effects, Responder, + }, + reactor, + types::{NodeId, TrieOrChunk}, + NodeRng, +}; + +const COMPONENT_NAME: &str = "global_state_synchronizer"; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, DataSize, From)] +pub(crate) struct RootHash(Digest); + +impl RootHash { + #[cfg(test)] + pub(crate) fn new(digest: Digest) -> Self { + Self(digest) + } + + pub(crate) fn into_inner(self) -> Digest { + self.0 + } +} + +impl fmt::Display for RootHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, DataSize, From)] +pub(crate) struct TrieHash(Digest); + +impl fmt::Display for TrieHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[derive(Debug, Clone, Error)] +pub(crate) enum Error { + #[error("trie accumulator encountered an error while fetching a trie; unreliable peers {}", DisplayIter::new(.0))] + TrieAccumulator(Vec), + #[error("Failed to persist trie element in global state: {0}; unreliable peers {}", DisplayIter::new(.1))] + PutTrie(GlobalStateError, Vec), + #[error("no peers available to ask for a trie")] + NoPeersAvailable, + #[error("received request for {hash_requested} while syncing another root hash: {hash_being_synced}")] + ProcessingAnotherRequest { + hash_being_synced: Digest, + hash_requested: Digest, + }, +} + +#[derive(Debug, Clone)] +pub(crate) struct Response { + hash: RootHash, + unreliable_peers: Vec, +} + +impl Response { + pub(crate) fn new(hash: RootHash, unreliable_peers: Vec) -> Self { + Self { + hash, + unreliable_peers, + } + } + + pub(crate) fn hash(&self) -> &RootHash { + &self.hash + } + + pub(crate) fn unreliable_peers(self) -> Vec { + self.unreliable_peers + } +} + +#[derive(Debug, From, Serialize)] +pub(crate) enum Event { + #[from] + Request(SyncGlobalStateRequest), + GetPeers(Vec), + FetchedTrie { + trie_hash: TrieHash, + trie_accumulator_result: Result, + }, + PutTrieResult { + #[serde(skip)] + raw: TrieRaw, + #[serde(skip)] + result: PutTrieResult, + }, + #[from] + TrieAccumulator(TrieAccumulatorEvent), +} + +#[derive(Debug, DataSize)] +struct RequestState { + root_hash: RootHash, + block_hashes: HashSet, + responders: Vec>>, + unreliable_peers: HashSet, +} + +impl RequestState { + fn new(request: SyncGlobalStateRequest) -> Self { + let mut block_hashes = HashSet::new(); + block_hashes.insert(request.block_hash); + Self { + root_hash: RootHash(request.state_root_hash), + block_hashes, + responders: vec![request.responder], + unreliable_peers: HashSet::new(), + } + } + + /// Extends the responders based on an additional request. + fn add_request(&mut self, request: SyncGlobalStateRequest) { + self.block_hashes.insert(request.block_hash); + self.responders.push(request.responder); + } + + /// Consumes this request state and sends the response on all responders. + fn respond(self, response: Result) -> Effects { + self.responders + .into_iter() + .flat_map(|responder| responder.respond(response.clone()).ignore()) + .collect() + } +} + +#[derive(Debug, DataSize)] +struct TrieAwaitingChildren { + trie_raw: TrieRaw, + missing_children: HashSet, +} + +impl TrieAwaitingChildren { + fn new(trie_raw: TrieRaw, missing_children: Vec) -> Self { + Self { + trie_raw, + missing_children: missing_children.into_iter().collect(), + } + } + + /// Handles `written_trie` being written to the database - removes the trie as a dependency and + /// returns the next trie to be downloaded. + fn trie_written(&mut self, written_trie: TrieHash) { + self.missing_children.remove(&written_trie); + } + + fn ready_to_be_written(&self) -> bool { + self.missing_children.is_empty() + } + + fn into_trie_raw(self) -> TrieRaw { + self.trie_raw + } +} + +#[derive(Debug, Default, DataSize)] +struct FetchQueue { + queue: Vec, + /// set of the same values that are in the queue - so that we can quickly check that we do not + /// duplicate the same entry in the queue + hashes_set: HashSet, +} + +impl FetchQueue { + fn insert(&mut self, trie_hash: TrieHash) { + if self.hashes_set.insert(trie_hash) { + self.queue.push(trie_hash); + } + } + + fn take(&mut self, num_to_take: usize) -> Vec { + // `to_return` will contain `num_to_take` elements from the end of the queue (or all of + // them if `num_to_take` is greater than queue length). + // Taking elements from the end will essentially make our traversal depth-first instead of + // breadth-first. + let to_return = self + .queue + .split_off(self.queue.len().saturating_sub(num_to_take)); + // remove the returned hashes from the "duplication prevention" set + for returned_hash in &to_return { + self.hashes_set.remove(returned_hash); + } + to_return + } + + fn handle_request_cancelled(&mut self) { + self.queue = vec![]; + self.hashes_set = HashSet::new(); + } +} + +#[derive(Debug, DataSize)] +pub(super) struct GlobalStateSynchronizer { + max_parallel_trie_fetches: usize, + trie_accumulator: TrieAccumulator, + request_state: Option, + tries_awaiting_children: BTreeMap, + fetch_queue: FetchQueue, + in_flight: HashSet, + last_progress: Option, +} + +impl GlobalStateSynchronizer { + pub(super) fn new(max_parallel_trie_fetches: usize) -> Self { + Self { + max_parallel_trie_fetches, + trie_accumulator: TrieAccumulator::new(), + request_state: None, + tries_awaiting_children: Default::default(), + fetch_queue: Default::default(), + in_flight: Default::default(), + last_progress: None, + } + } + + fn touch(&mut self) { + self.last_progress = Some(Timestamp::now()); + } + + pub(super) fn last_progress(&self) -> Option { + self.last_progress + } + + fn handle_request( + &mut self, + request: SyncGlobalStateRequest, + effect_builder: EffectBuilder, + ) -> Effects + where + REv: From + From + Send, + { + let state_root_hash = request.state_root_hash; + + let mut effects = match &mut self.request_state { + None => { + self.request_state = Some(RequestState::new(request)); + self.touch(); + self.enqueue_trie_for_fetching(effect_builder, TrieHash(state_root_hash)) + } + Some(state) => { + if state.root_hash.0 != state_root_hash { + return request + .responder + .respond(Err(Error::ProcessingAnotherRequest { + hash_being_synced: state.root_hash.0, + hash_requested: state_root_hash, + })) + .ignore(); + } else { + state.add_request(request); + self.touch(); + } + Effects::new() + } + }; + + debug!( + %state_root_hash, + fetch_queue_length = self.fetch_queue.queue.len(), + tries_awaiting_children_length = self.tries_awaiting_children.len(), + "handle_request" + ); + + effects.extend(self.parallel_fetch(effect_builder)); + + effects + } + + fn parallel_fetch(&mut self, effect_builder: EffectBuilder) -> Effects { + effect_builder + .immediately() + .event(|()| Event::GetPeers(vec![])) + } + + fn parallel_fetch_with_peers( + &mut self, + peers: Vec, + effect_builder: EffectBuilder, + ) -> Effects + where + REv: From + Send, + { + let mut effects = Effects::new(); + + if self.request_state.is_none() { + debug!("called parallel_fetch while not processing any requests"); + return effects; + } + + // Just to not overdo parallel trie fetches in small networks. 5000 parallel trie fetches + // seemed to be fine in networks of 100 peers, so we set the limit at 50 * number of peers. + let max_parallel_trie_fetches = self.max_parallel_trie_fetches.min(peers.len() * 50); + + // if we're not finished, figure out how many new fetching tasks we can start + let num_fetches_to_start = max_parallel_trie_fetches.saturating_sub(self.in_flight.len()); + + debug!( + max_parallel_trie_fetches, + in_flight_length = self.in_flight.len(), + fetch_queue_length = self.fetch_queue.queue.len(), + num_fetches_to_start, + "parallel_fetch" + ); + + let to_fetch = self.fetch_queue.take(num_fetches_to_start); + + if peers.is_empty() { + // if we have no peers, fail - trie accumulator would return an error, anyway + debug!("no peers available, cancelling request"); + return self.cancel_request(Error::NoPeersAvailable); + } + + for trie_hash in to_fetch { + if self.in_flight.insert(trie_hash) { + effects.extend(effect_builder.fetch_trie(trie_hash.0, peers.clone()).event( + move |trie_accumulator_result| Event::FetchedTrie { + trie_hash, + trie_accumulator_result, + }, + )); + } + } + + effects + } + + fn handle_fetched_trie( + &mut self, + trie_hash: TrieHash, + trie_accumulator_result: Result, + effect_builder: EffectBuilder, + ) -> Effects + where + REv: From + From + Send, + { + // A result of `false` probably indicates that this is a stale fetch from a previously + // cancelled request - we shouldn't cancel the current request if the result is an error in + // such a case. + let in_flight_was_present = self.in_flight.remove(&trie_hash); + + debug!( + %trie_hash, + in_flight_length = self.in_flight.len(), + fetch_queue_length = self.fetch_queue.queue.len(), + processing_request = self.request_state.is_some(), + "handle_fetched_trie" + ); + + let trie_raw = match trie_accumulator_result { + Ok(response) => { + if let Some(request_state) = &mut self.request_state { + request_state + .unreliable_peers + .extend(response.unreliable_peers()); + } + response.trie() + } + Err(error) => { + debug!(%error, "error fetching a trie"); + let new_unreliable_peers = match error { + TrieAccumulatorError::Absent(_, _, unreliable_peers) + | TrieAccumulatorError::PeersExhausted(_, unreliable_peers) => unreliable_peers, + TrieAccumulatorError::NoPeers(_) => { + // Trie accumulator did not have any peers to download from + // so the request will be canceled with no peers to report + vec![] + } + }; + let unreliable_peers = self.request_state.as_mut().map_or_else(Vec::new, |state| { + state.unreliable_peers.extend(new_unreliable_peers); + state.unreliable_peers.iter().copied().collect() + }); + debug!(%trie_hash, "unreliable peers for requesting trie, cancelling request"); + let mut effects = if in_flight_was_present { + self.cancel_request(Error::TrieAccumulator(unreliable_peers)) + } else { + Effects::new() + }; + + // continue fetching other requests if any + // request_state might be `None` if we are processing fetch responses that were in + // flight when we cancelled a request + if self.request_state.is_some() { + effects.extend(self.parallel_fetch(effect_builder)); + } + return effects; + } + }; + + self.touch(); + + let request = PutTrieRequest::new((*trie_raw).clone()); + effect_builder + .put_trie_if_all_children_present(request) + .event(move |put_trie_result| Event::PutTrieResult { + raw: *trie_raw, + result: put_trie_result, + }) + } + + pub(super) fn cancel_request(&mut self, error: Error) -> Effects { + match self.request_state.take() { + Some(request_state) => { + debug!(root_hash=%request_state.root_hash, "cancelling request"); + self.fetch_queue.handle_request_cancelled(); + self.in_flight = HashSet::new(); + request_state.respond(Err(error)) + } + None => { + debug!("not cancelling request - none being processed"); + Effects::new() + } + } + } + + fn finish_request(&mut self) -> Effects { + match self.request_state.take() { + Some(request_state) => { + let root_hash = request_state.root_hash; + debug!(%root_hash, "finishing request"); + let unreliable_peers = request_state.unreliable_peers.iter().copied().collect(); + request_state.respond(Ok(Response::new(root_hash, unreliable_peers))) + } + None => { + // We only call this function after checking that we are processing a request - if + // the request is None, this is a bug + error!("not finishing request - none being processed"); + Effects::new() + } + } + } + + fn handle_put_trie_result( + &mut self, + requested_hash: Digest, + put_trie_result: PutTrieResult, + effect_builder: EffectBuilder, + ) -> Effects + where + REv: From + From + Send, + { + let mut effects = Effects::new(); + + match put_trie_result { + PutTrieResult::Success { hash } if hash == requested_hash => { + effects.extend(self.handle_trie_written(effect_builder, TrieHash(hash))) + } + PutTrieResult::Success { hash } => { + error!( + %hash, + %requested_hash, + "trie was stored under a different hash than was used to request it - \ + it's a bug" + ); + } + PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren( + trie_hash, + trie_raw, + missing_children, + )) => effects.extend(self.handle_trie_missing_children( + effect_builder, + TrieHash(trie_hash), + trie_raw, + missing_children.into_iter().map(TrieHash).collect(), + )), + PutTrieResult::Failure(gse) => { + warn!(%requested_hash, %gse, "couldn't put trie into global state"); + if let Some(request_state) = &mut self.request_state { + let unreliable_peers = request_state.unreliable_peers.iter().copied().collect(); + effects.extend(self.cancel_request(Error::PutTrie(gse, unreliable_peers))); + } + } + } + + // request_state can be none if we're processing a result of a fetch that was in flight + // when a request got cancelled + if self.request_state.is_some() { + effects.extend(self.parallel_fetch(effect_builder)); + } + + effects + } + + fn handle_trie_written( + &mut self, + effect_builder: EffectBuilder, + written_trie: TrieHash, + ) -> Effects + where + REv: From + From + Send, + { + self.touch(); + + // Remove the written trie from dependencies of the tries that are waiting. + for trie_awaiting in self.tries_awaiting_children.values_mut() { + trie_awaiting.trie_written(written_trie); + } + + let (ready_tries, still_incomplete): (BTreeMap<_, _>, BTreeMap<_, _>) = + mem::take(&mut self.tries_awaiting_children) + .into_iter() + .partition(|(_, trie_awaiting)| trie_awaiting.ready_to_be_written()); + debug!( + ready_tries = ready_tries.len(), + still_incomplete = still_incomplete.len(), + "handle_trie_written" + ); + self.tries_awaiting_children = still_incomplete; + + let mut effects: Effects = ready_tries + .into_iter() + .flat_map(|(_, trie_awaiting)| { + let trie_raw = trie_awaiting.into_trie_raw(); + let request = PutTrieRequest::new(trie_raw.clone()); + effect_builder + .put_trie_if_all_children_present(request) + .event(move |result| Event::PutTrieResult { + raw: trie_raw, + result, + }) + }) + .collect(); + + // If there is a request state associated with the trie we just wrote, it means that it was + // a root trie and we can report fetching to be finished. + if let Some(request_state) = &mut self.request_state { + if TrieHash(request_state.root_hash.0) == written_trie { + effects.extend(self.finish_request()); + } + } + + effects + } + + fn enqueue_trie_for_fetching( + &mut self, + effect_builder: EffectBuilder, + trie_hash: TrieHash, + ) -> Effects + where + REv: From + Send, + { + // we might have fetched it already! + if let Some(trie_awaiting) = self.tries_awaiting_children.get_mut(&trie_hash) { + // simulate fetching having been completed in order to start fetching any children that + // might be still missing + let trie_raw = trie_awaiting.trie_raw.clone(); + let request = PutTrieRequest::new(trie_raw.clone()); + effect_builder + .put_trie_if_all_children_present(request) + .event(move |result| Event::PutTrieResult { + raw: trie_raw, + result, + }) + } else { + // otherwise, add to the queue + self.fetch_queue.insert(trie_hash); + Effects::new() + } + } + + fn handle_trie_missing_children( + &mut self, + effect_builder: EffectBuilder, + trie_hash: TrieHash, + trie_raw: TrieRaw, + missing_children: Vec, + ) -> Effects + where + REv: From + From + Send, + { + if self.request_state.is_none() { + // this can be valid if we're processing a fetch result that was in flight while we + // were cancelling a request - but we don't want to continue queueing further tries for + // fetching + return Effects::new(); + } + + self.touch(); + + let mut effects: Effects = missing_children + .iter() + .flat_map(|child| self.enqueue_trie_for_fetching(effect_builder, *child)) + .collect(); + self.tries_awaiting_children.insert( + trie_hash, + TrieAwaitingChildren::new(trie_raw, missing_children), + ); + effects.extend(self.parallel_fetch(effect_builder)); + effects + } +} + +impl Component for GlobalStateSynchronizer +where + REv: From + + From + + From> + + From + + Send, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::Request(request) => self.handle_request(request, effect_builder), + Event::GetPeers(peers) => self.parallel_fetch_with_peers(peers, effect_builder), + Event::FetchedTrie { + trie_hash, + trie_accumulator_result, + } => self.handle_fetched_trie(trie_hash, trie_accumulator_result, effect_builder), + Event::PutTrieResult { + raw: trie_raw, + result: put_trie_result, + } => self.handle_put_trie_result(trie_raw.hash(), put_trie_result, effect_builder), + Event::TrieAccumulator(event) => reactor::wrap_effects( + Event::TrieAccumulator, + self.trie_accumulator + .handle_event(effect_builder, rng, event), + ), + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} diff --git a/node/src/components/block_synchronizer/global_state_synchronizer/tests.rs b/node/src/components/block_synchronizer/global_state_synchronizer/tests.rs new file mode 100644 index 0000000000..a5883271b3 --- /dev/null +++ b/node/src/components/block_synchronizer/global_state_synchronizer/tests.rs @@ -0,0 +1,713 @@ +use std::time::Duration; + +use futures::channel::oneshot; +use rand::Rng; + +use casper_storage::global_state::error::Error as GlobalStateError; +use casper_types::{bytesrepr::Bytes, testing::TestRng, TestBlockBuilder}; + +use super::*; +use crate::{ + reactor::{EventQueueHandle, QueueKind, Scheduler}, + utils, +}; + +/// Event for the mock reactor. +#[derive(Debug)] +enum ReactorEvent { + TrieAccumulatorRequest(TrieAccumulatorRequest), + ContractRuntimeRequest(ContractRuntimeRequest), +} + +impl From for ReactorEvent { + fn from(req: ContractRuntimeRequest) -> ReactorEvent { + ReactorEvent::ContractRuntimeRequest(req) + } +} + +impl From for ReactorEvent { + fn from(req: TrieAccumulatorRequest) -> ReactorEvent { + ReactorEvent::TrieAccumulatorRequest(req) + } +} + +struct MockReactor { + scheduler: &'static Scheduler, + effect_builder: EffectBuilder, +} + +impl MockReactor { + fn new() -> Self { + let scheduler = utils::leak(Scheduler::new(QueueKind::weights(), None)); + let event_queue_handle = EventQueueHandle::without_shutdown(scheduler); + let effect_builder = EffectBuilder::new(event_queue_handle); + MockReactor { + scheduler, + effect_builder, + } + } + + fn effect_builder(&self) -> EffectBuilder { + self.effect_builder + } + + async fn expect_trie_accumulator_request(&self, hash: &Digest) { + let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; + match reactor_event { + ReactorEvent::TrieAccumulatorRequest(request) => { + assert_eq!(request.hash, *hash); + } + _ => { + unreachable!(); + } + }; + } + + async fn expect_put_trie_request(&self, trie: &TrieRaw) { + let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; + match reactor_event { + ReactorEvent::ContractRuntimeRequest(ContractRuntimeRequest::PutTrie { + request, + responder: _, + }) => { + assert_eq!(request.raw(), trie); + } + _ => { + unreachable!(); + } + }; + } +} + +fn random_test_trie(rng: &mut TestRng) -> TrieRaw { + let data: Vec = (0..64).map(|_| rng.gen()).collect(); + TrieRaw::new(Bytes::from(data)) +} + +fn random_sync_global_state_request( + rng: &mut TestRng, + responder: Responder>, +) -> (SyncGlobalStateRequest, TrieRaw) { + let block = TestBlockBuilder::new().build(rng); + let trie = random_test_trie(rng); + + // Create a request + ( + SyncGlobalStateRequest { + block_hash: *block.hash(), + state_root_hash: Digest::hash(trie.inner()), + responder, + }, + trie, + ) +} + +#[tokio::test] +async fn fetch_request_without_peers_is_canceled() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut global_state_synchronizer = GlobalStateSynchronizer::new(rng.gen_range(2..10)); + + // Create a responder to allow assertion of the error + let (sender, receiver) = oneshot::channel(); + // Create a request without peers + let (request, _) = + random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender)); + + // Check how the request is handled by the block synchronizer. + let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 1); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + assert!(global_state_synchronizer.last_progress.is_some()); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = + global_state_synchronizer.parallel_fetch_with_peers(vec![], reactor.effect_builder()); + + // Since the request does not have any peers, it should be canceled. + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_none()); + // Fetch should be always 0 as long as we're below parallel_fetch_limit + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + // Check if the error is propagated on the channel + tokio::spawn(effects.remove(0)); + let result = receiver.await.unwrap(); + assert!(result.is_err()); +} + +#[tokio::test] +async fn sync_global_state_request_starts_maximum_trie_fetches() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let parallel_fetch_limit = rng.gen_range(2..10); + let mut global_state_synchronizer = GlobalStateSynchronizer::new(parallel_fetch_limit); + + let mut progress = Timestamp::now(); + + let (request, trie_raw) = random_sync_global_state_request( + &mut rng, + Responder::without_shutdown(oneshot::channel().0), + ); + let trie_hash = request.state_root_hash; + tokio::time::sleep(Duration::from_millis(5)).await; + let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 1); + // At first the synchronizer only fetches the root node. + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + assert!(global_state_synchronizer.last_progress().unwrap() > progress); + progress = global_state_synchronizer.last_progress().unwrap(); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + // Fetch should be always 0 as long as we're below parallel_fetch_limit + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + + // Check if trie_accumulator requests were generated for all tries. + tokio::spawn(effects.remove(0)); + reactor.expect_trie_accumulator_request(&trie_hash).await; + + // sleep a bit so that the next progress timestamp is different + tokio::time::sleep(Duration::from_millis(2)).await; + // simulate the fetch returning a trie + let effects = global_state_synchronizer.handle_fetched_trie( + trie_hash.into(), + Ok(TrieAccumulatorResponse::new(trie_raw.clone(), vec![])), + reactor.effect_builder(), + ); + + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + // the fetch request is no longer in flight + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + assert!(global_state_synchronizer.last_progress().unwrap() > progress); + progress = global_state_synchronizer.last_progress().unwrap(); + + // sleep a bit so that the next progress timestamp is different + tokio::time::sleep(Duration::from_millis(2)).await; + + // root node would have some children that we haven't yet downloaded + let missing_children = (0u8..255) + // TODO: generate random hashes when `rng.gen` works + .map(|i| Digest::hash([i; 32])) + .collect(); + + let trie_hash = trie_raw.hash(); + + // simulate synchronizer processing the fetched trie + let effects = global_state_synchronizer.handle_put_trie_result( + trie_hash, + PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren( + trie_hash, + trie_raw, + missing_children, + )), + reactor.effect_builder(), + ); + + assert_eq!(effects.len(), 2); + for effect in effects { + let events = tokio::spawn(effect).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + } + + let effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + // The global state synchronizer should now start to get the missing tries and create a + // trie_accumulator fetch request for each of the missing children. + assert_eq!(effects.len(), parallel_fetch_limit); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!( + global_state_synchronizer.fetch_queue.queue.len(), + 255 - parallel_fetch_limit + ); + assert_eq!( + global_state_synchronizer.in_flight.len(), + parallel_fetch_limit + ); + assert!(global_state_synchronizer.last_progress().unwrap() > progress); +} + +#[tokio::test] +async fn trie_accumulator_error_cancels_request() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + // Set the parallel fetch limit to allow only 1 fetch + let mut global_state_synchronizer = GlobalStateSynchronizer::new(1); + + // Create and register one request + let (sender, receiver1) = oneshot::channel(); + let (request1, _) = + random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender)); + let trie_hash1 = request1.state_root_hash; + let mut effects = global_state_synchronizer.handle_request(request1, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 1); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + + // Validate that a trie accumulator request was created + tokio::spawn(effects.remove(0)); + reactor.expect_trie_accumulator_request(&trie_hash1).await; + + // Create and register a second request + let (sender, receiver2) = oneshot::channel(); + let (request2, _) = + random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender)); + let trie_hash2 = request2.state_root_hash; + let mut effects = global_state_synchronizer.handle_request(request2, reactor.effect_builder()); + // This request should generate an error response + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + // First request is in flight + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + + tokio::spawn(effects.remove(0)); + match receiver2.await.unwrap() { + // the synchronizer should say that it's already processing a different request + Err(Error::ProcessingAnotherRequest { + hash_being_synced, + hash_requested, + }) => { + assert_eq!(hash_being_synced, trie_hash1); + assert_eq!(hash_requested, trie_hash2); + } + res => panic!("unexpected result: {:?}", res), + } + + // Simulate a trie_accumulator error for the first trie + let trie_accumulator_result = Err(TrieAccumulatorError::Absent(trie_hash1, 0, vec![])); + let mut effects = global_state_synchronizer.handle_fetched_trie( + trie_hash1.into(), + trie_accumulator_result, + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_none()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + let cancel_effect = effects.pop().unwrap(); + + // Check if we got the error for the first trie on the channel + tokio::spawn(cancel_effect); + let result = receiver1.await.unwrap(); + assert!(result.is_err()); +} + +#[tokio::test] +async fn successful_trie_fetch_puts_trie_to_store() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut global_state_synchronizer = GlobalStateSynchronizer::new(rng.gen_range(2..10)); + + // Create a request + let (request, trie) = random_sync_global_state_request( + &mut rng, + Responder::without_shutdown(oneshot::channel().0), + ); + let state_root_hash = request.state_root_hash; + + let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + // Validate that we got a trie_accumulator request + tokio::spawn(effects.remove(0)); + reactor + .expect_trie_accumulator_request(&state_root_hash) + .await; + + // Simulate a successful trie fetch + let trie_accumulator_result = Ok(TrieAccumulatorResponse::new(trie.clone(), Vec::new())); + let mut effects = global_state_synchronizer.handle_fetched_trie( + state_root_hash.into(), + trie_accumulator_result, + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + // Should attempt to put the trie to the trie store + tokio::spawn(effects.remove(0)); + reactor.expect_put_trie_request(&trie).await; +} + +#[tokio::test] +async fn trie_store_error_cancels_request() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut global_state_synchronizer = GlobalStateSynchronizer::new(rng.gen_range(2..10)); + + // Create a request + let (sender, receiver) = oneshot::channel(); + let (request, trie) = + random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender)); + let state_root_hash = request.state_root_hash; + + let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + + // Validate that we got a trie_accumulator request + tokio::spawn(effects.remove(0)); + reactor + .expect_trie_accumulator_request(&state_root_hash) + .await; + + // Assuming we received the trie from the accumulator, check the behavior when we an error + // is returned when trying to put the trie to the store. + let mut effects = global_state_synchronizer.handle_put_trie_result( + trie.hash(), + PutTrieResult::Failure(GlobalStateError::RootNotFound), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + // Request should be canceled. + assert!(global_state_synchronizer.request_state.is_none()); + tokio::spawn(effects.remove(0)); + let result = receiver.await.unwrap(); + assert!(result.is_err()); +} + +#[tokio::test] +async fn missing_trie_node_children_triggers_fetch() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let parallel_fetch_limit = rng.gen_range(2..10); + let mut global_state_synchronizer = GlobalStateSynchronizer::new(parallel_fetch_limit); + + // Create a request + let (request, request_trie) = random_sync_global_state_request( + &mut rng, + Responder::without_shutdown(oneshot::channel().0), + ); + let trie_hash = Digest::hash(request_trie.clone().inner()); + let state_root_hash = request.state_root_hash; + + let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + + // Validate that we got a trie_accumulator request + tokio::spawn(effects.remove(0)); + reactor + .expect_trie_accumulator_request(&state_root_hash) + .await; + + // Simulate a successful trie fetch from the accumulator + let trie_accumulator_result = Ok(TrieAccumulatorResponse::new( + request_trie.clone(), + Vec::new(), + )); + let mut effects = global_state_synchronizer.handle_fetched_trie( + state_root_hash.into(), + trie_accumulator_result, + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + // Should try to put the trie in the store. + tokio::spawn(effects.remove(0)); + reactor.expect_put_trie_request(&request_trie).await; + + // Simulate an error from the trie store where the trie is missing children. + // We generate more than the parallel_fetch_limit. + let num_missing_trie_nodes = rng.gen_range(12..20); + let missing_tries: Vec = (0..num_missing_trie_nodes) + .map(|_| random_test_trie(&mut rng)) + .collect(); + let missing_trie_nodes_hashes: Vec = missing_tries + .iter() + .map(|missing_trie| Digest::hash(missing_trie.inner())) + .collect(); + + let effects = global_state_synchronizer.handle_put_trie_result( + trie_hash, + PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren( + trie_hash, + request_trie.clone(), + missing_trie_nodes_hashes.clone(), + )), + reactor.effect_builder(), + ); + + assert_eq!(effects.len(), 2); + for effect in effects { + let events = tokio::spawn(effect).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + } + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + // The global state synchronizer should now start to get the missing tries and create a + // trie_accumulator fetch request for each of the missing children. + assert_eq!(effects.len(), parallel_fetch_limit); + assert_eq!(global_state_synchronizer.tries_awaiting_children.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!( + global_state_synchronizer.in_flight.len(), + parallel_fetch_limit + ); + // There are still tries that were not issued a fetch since it would exceed the limit. + assert_eq!( + global_state_synchronizer.fetch_queue.queue.len(), + num_missing_trie_nodes - parallel_fetch_limit + ); + + // Check the requests that were issued. + for (idx, effect) in effects.drain(0..).rev().enumerate() { + tokio::spawn(effect); + reactor + .expect_trie_accumulator_request( + &missing_trie_nodes_hashes[num_missing_trie_nodes - idx - 1], + ) + .await; + } + + // Now handle a successful fetch from the trie_accumulator for one of the missing children. + let trie_hash = missing_trie_nodes_hashes[num_missing_trie_nodes - 1]; + let trie_accumulator_result = Ok(TrieAccumulatorResponse::new( + missing_tries[num_missing_trie_nodes - 1].clone(), + Vec::new(), + )); + let mut effects = global_state_synchronizer.handle_fetched_trie( + trie_hash.into(), + trie_accumulator_result, + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!( + global_state_synchronizer.in_flight.len(), + parallel_fetch_limit - 1 + ); + assert_eq!( + global_state_synchronizer.fetch_queue.queue.len(), + num_missing_trie_nodes - parallel_fetch_limit + ); + tokio::spawn(effects.remove(0)); + reactor + .expect_put_trie_request(&missing_tries[num_missing_trie_nodes - 1]) + .await; + + let trie_hash = + Digest::hash_into_chunks_if_necessary(missing_tries[num_missing_trie_nodes - 1].inner()); + + // Handle put trie to store for the missing child + let mut effects = global_state_synchronizer.handle_put_trie_result( + trie_hash, + PutTrieResult::Success { hash: trie_hash }, + reactor.effect_builder(), + ); + + assert_eq!(effects.len(), 1); + assert_eq!(global_state_synchronizer.tries_awaiting_children.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + // The in flight value should still be 1 below the limit - the effects should contain a request + // for peers. + assert_eq!( + global_state_synchronizer.in_flight.len(), + parallel_fetch_limit - 1 + ); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + // Check if one of the pending fetches for the missing children was picked up. + assert_eq!( + global_state_synchronizer.in_flight.len(), + parallel_fetch_limit + ); + + // Should have one less missing child than before. + assert_eq!( + global_state_synchronizer + .tries_awaiting_children + .get(&Digest::hash(request_trie.inner()).into()) + .unwrap() + .missing_children + .len(), + num_missing_trie_nodes - 1 + ); + + // Check that a fetch was created for the next missing child. + tokio::spawn(effects.remove(0)); + reactor + .expect_trie_accumulator_request( + &missing_trie_nodes_hashes[num_missing_trie_nodes - parallel_fetch_limit - 1], + ) + .await; +} + +#[tokio::test] +async fn stored_trie_finalizes_request() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let parallel_fetch_limit = rng.gen_range(2..10); + let mut global_state_synchronizer = GlobalStateSynchronizer::new(parallel_fetch_limit); + + // Create a request + let (sender, receiver) = oneshot::channel(); + let (request, trie) = + random_sync_global_state_request(&mut rng, Responder::without_shutdown(sender)); + let state_root_hash = request.state_root_hash; + + let mut effects = global_state_synchronizer.handle_request(request, reactor.effect_builder()); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + + let events = tokio::spawn(effects.remove(0)).await.unwrap(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::GetPeers(_))); + + let mut effects = global_state_synchronizer.parallel_fetch_with_peers( + std::iter::repeat_with(|| NodeId::random(&mut rng)) + .take(2) + .collect(), + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 1); + + // Validate that we got a trie_accumulator request + tokio::spawn(effects.remove(0)); + reactor + .expect_trie_accumulator_request(&state_root_hash) + .await; + + // Handle a successful fetch from the trie_accumulator for one of the missing children. + let trie_hash = Digest::hash(trie.inner()); + let trie_accumulator_result = Ok(TrieAccumulatorResponse::new(trie.clone(), Vec::new())); + let mut effects = global_state_synchronizer.handle_fetched_trie( + trie_hash.into(), + trie_accumulator_result, + reactor.effect_builder(), + ); + assert_eq!(effects.len(), 1); + assert!(global_state_synchronizer.request_state.is_some()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + tokio::spawn(effects.remove(0)); + reactor.expect_put_trie_request(&trie).await; + + // Generate a successful trie store + let mut effects = global_state_synchronizer.handle_put_trie_result( + trie_hash, + PutTrieResult::Success { hash: trie_hash }, + reactor.effect_builder(), + ); + // Assert request was successful and global synchronizer is finished. + assert_eq!(effects.len(), 1); + assert_eq!(global_state_synchronizer.tries_awaiting_children.len(), 0); + assert!(global_state_synchronizer.request_state.is_none()); + assert_eq!(global_state_synchronizer.in_flight.len(), 0); + assert_eq!(global_state_synchronizer.fetch_queue.queue.len(), 0); + tokio::spawn(effects.remove(0)); + let result = receiver.await.unwrap(); + assert!(result.is_ok()); +} diff --git a/node/src/components/block_synchronizer/metrics.rs b/node/src/components/block_synchronizer/metrics.rs new file mode 100644 index 0000000000..3063b789ac --- /dev/null +++ b/node/src/components/block_synchronizer/metrics.rs @@ -0,0 +1,58 @@ +use prometheus::{Histogram, Registry}; + +use crate::{unregister_metric, utils}; + +const HIST_SYNC_DURATION_NAME: &str = "historical_block_sync_duration_seconds"; +const HIST_SYNC_DURATION_HELP: &str = "duration (in sec) to synchronize a historical block"; +const FWD_SYNC_DURATION_NAME: &str = "forward_block_sync_duration_seconds"; +const FWD_SYNC_DURATION_HELP: &str = "duration (in sec) to synchronize a forward block"; + +// We use exponential buckets to observe the time it takes to synchronize blocks. +// Coverage is ~7.7s with higher resolution in the first buckets. +const EXPONENTIAL_BUCKET_START: f64 = 0.2; +const EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0; +const EXPONENTIAL_BUCKET_COUNT: usize = 10; + +/// Metrics for the block synchronizer component. +#[derive(Debug)] +pub(super) struct Metrics { + /// Time duration for the historical synchronizer to get a block. + pub(super) historical_block_sync_duration: Histogram, + /// Time duration for the forward synchronizer to get a block. + pub(super) forward_block_sync_duration: Histogram, + registry: Registry, +} + +impl Metrics { + /// Creates a new instance of the block synchronizer metrics. + pub fn new(registry: &Registry) -> Result { + let buckets = prometheus::exponential_buckets( + EXPONENTIAL_BUCKET_START, + EXPONENTIAL_BUCKET_FACTOR, + EXPONENTIAL_BUCKET_COUNT, + )?; + + Ok(Metrics { + historical_block_sync_duration: utils::register_histogram_metric( + registry, + HIST_SYNC_DURATION_NAME, + HIST_SYNC_DURATION_HELP, + buckets.clone(), + )?, + forward_block_sync_duration: utils::register_histogram_metric( + registry, + FWD_SYNC_DURATION_NAME, + FWD_SYNC_DURATION_HELP, + buckets, + )?, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.historical_block_sync_duration); + unregister_metric!(self.registry, self.forward_block_sync_duration); + } +} diff --git a/node/src/components/block_synchronizer/need_next.rs b/node/src/components/block_synchronizer/need_next.rs new file mode 100644 index 0000000000..b4f7abddd1 --- /dev/null +++ b/node/src/components/block_synchronizer/need_next.rs @@ -0,0 +1,67 @@ +use datasize::DataSize; +use derive_more::Display; + +use casper_types::{Block, BlockHash, DeployHash, Digest, EraId, PublicKey, TransactionId}; + +use crate::types::{BlockExecutionResultsOrChunkId, ExecutableBlock}; + +use super::execution_results_acquisition::ExecutionResultsChecksum; + +#[derive(DataSize, Debug, Clone, Display, PartialEq)] +pub(crate) enum NeedNext { + #[display(fmt = "need next for {}: nothing", _0)] + Nothing(BlockHash), + #[display(fmt = "need next for {}: peers", _0)] + Peers(BlockHash), + #[display(fmt = "need next for {}: era validators", _0)] + EraValidators(EraId), + #[display(fmt = "need next for {}: block header", _0)] + BlockHeader(BlockHash), + #[display(fmt = "need next for {}: block body", _0)] + BlockBody(BlockHash), + #[display(fmt = "need next for {}: approvals hashes ({})", _0, _1)] + ApprovalsHashes(BlockHash, Box), + #[display( + fmt = "need next for {}: finality signatures at {} ({} validators)", + _0, + _1, + "_2.len()" + )] + FinalitySignatures(BlockHash, EraId, Vec), + #[display(fmt = "need next for {}: global state (state root hash {})", _0, _1)] + GlobalState(BlockHash, Digest), + #[display(fmt = "need next for {}: deploy {}", _0, _1)] + DeployByHash(BlockHash, DeployHash), + #[display(fmt = "need next for {}: transaction {}", _0, _1)] + TransactionById(BlockHash, TransactionId), + #[display(fmt = "need next for {}: make block executable (height {})", _0, _1)] + MakeExecutableBlock(BlockHash, u64), + #[display( + fmt = "need next for {}: enqueue this block (height {}) for execution", + _0, + _1 + )] + EnqueueForExecution(BlockHash, u64, Box), + /// We want the Merkle root hash stored in global state under the ChecksumRegistry key for the + /// execution results. + #[display( + fmt = "need next for {}: execution results checksum (state root hash {})", + _0, + _1 + )] + ExecutionResultsChecksum(BlockHash, Digest), + #[display(fmt = "need next for {}: {} (checksum {})", _0, _1, _2)] + ExecutionResults( + BlockHash, + BlockExecutionResultsOrChunkId, + ExecutionResultsChecksum, + ), + #[display(fmt = "need next for {}: mark complete (height {})", _0, _1)] + BlockMarkedComplete(BlockHash, u64), + #[display( + fmt = "need next for {}: transition acquisition state to HaveStrictFinality (height {})", + _0, + _1 + )] + SwitchToHaveStrictFinality(BlockHash, u64), +} diff --git a/node/src/components/block_synchronizer/peer_list.rs b/node/src/components/block_synchronizer/peer_list.rs new file mode 100644 index 0000000000..547af010b7 --- /dev/null +++ b/node/src/components/block_synchronizer/peer_list.rs @@ -0,0 +1,184 @@ +#[cfg(test)] +mod tests; + +use std::collections::{btree_map::Entry, BTreeMap}; + +use datasize::DataSize; +use itertools::Itertools; +use rand::seq::IteratorRandom; +use tracing::debug; + +use crate::{types::NodeId, NodeRng}; +use casper_types::{TimeDiff, Timestamp}; + +#[derive(Copy, Clone, PartialEq, Eq, DataSize, Debug, Default)] +enum PeerQuality { + #[default] + Unknown, + Unreliable, + Reliable, + Dishonest, +} + +pub(super) enum PeersStatus { + Sufficient, + Insufficient, + Stale, +} + +#[derive(Clone, PartialEq, Eq, DataSize, Debug)] +pub(super) struct PeerList { + peer_list: BTreeMap, + keep_fresh: Timestamp, + max_simultaneous_peers: u8, + peer_refresh_interval: TimeDiff, +} + +impl PeerList { + pub(super) fn new(max_simultaneous_peers: u8, peer_refresh_interval: TimeDiff) -> Self { + PeerList { + peer_list: BTreeMap::new(), + keep_fresh: Timestamp::now(), + max_simultaneous_peers, + peer_refresh_interval, + } + } + pub(super) fn register_peer(&mut self, peer: NodeId) { + if self.peer_list.contains_key(&peer) { + return; + } + self.peer_list.insert(peer, PeerQuality::Unknown); + self.keep_fresh = Timestamp::now(); + } + + pub(super) fn dishonest_peers(&self) -> Vec { + self.peer_list + .iter() + .filter_map(|(node_id, pq)| { + if *pq == PeerQuality::Dishonest { + Some(*node_id) + } else { + None + } + }) + .collect_vec() + } + + pub(super) fn flush(&mut self) { + self.peer_list.clear(); + } + + pub(super) fn flush_dishonest_peers(&mut self) { + self.peer_list.retain(|_, v| *v != PeerQuality::Dishonest); + } + + pub(super) fn disqualify_peer(&mut self, peer: NodeId) { + self.peer_list.insert(peer, PeerQuality::Dishonest); + } + + pub(super) fn promote_peer(&mut self, peer: NodeId) { + debug!("BlockSynchronizer: promoting peer {:?}", peer); + // vacant should be unreachable + match self.peer_list.entry(peer) { + Entry::Vacant(_) => { + self.peer_list.insert(peer, PeerQuality::Unknown); + } + Entry::Occupied(entry) => match entry.get() { + PeerQuality::Dishonest => { + // no change -- this is terminal + } + PeerQuality::Unreliable | PeerQuality::Unknown => { + self.peer_list.insert(peer, PeerQuality::Reliable); + } + PeerQuality::Reliable => { + // no change -- this is the best + } + }, + } + } + + pub(super) fn demote_peer(&mut self, peer: NodeId) { + debug!("BlockSynchronizer: demoting peer {:?}", peer); + // vacant should be unreachable + match self.peer_list.entry(peer) { + Entry::Vacant(_) => { + // no change + } + Entry::Occupied(entry) => match entry.get() { + PeerQuality::Dishonest | PeerQuality::Unreliable => { + // no change + } + PeerQuality::Reliable | PeerQuality::Unknown => { + self.peer_list.insert(peer, PeerQuality::Unreliable); + } + }, + } + } + + pub(super) fn need_peers(&mut self) -> PeersStatus { + if !self + .peer_list + .iter() + .any(|(_, pq)| *pq != PeerQuality::Dishonest) + { + debug!("PeerList: no honest peers"); + return PeersStatus::Insufficient; + } + + // periodically ask for refreshed peers + if Timestamp::now().saturating_diff(self.keep_fresh) > self.peer_refresh_interval { + self.keep_fresh = Timestamp::now(); + let count = self + .peer_list + .iter() + .filter(|(_, pq)| **pq == PeerQuality::Reliable || **pq == PeerQuality::Unknown) + .count(); + let reliability_goal = self.max_simultaneous_peers as usize; + if count < reliability_goal { + debug!("PeerList: is stale"); + return PeersStatus::Stale; + } + } + + PeersStatus::Sufficient + } + + fn get_random_peers_by_quality( + &self, + rng: &mut NodeRng, + up_to: usize, + peer_quality: PeerQuality, + ) -> Vec { + self.peer_list + .iter() + .filter(|(_peer, quality)| **quality == peer_quality) + .choose_multiple(rng, up_to) + .into_iter() + .map(|(peer, _)| *peer) + .collect() + } + + pub(super) fn qualified_peers(&self, rng: &mut NodeRng) -> Vec { + self.qualified_peers_up_to(rng, self.max_simultaneous_peers as usize) + } + + pub(super) fn qualified_peers_up_to(&self, rng: &mut NodeRng, up_to: usize) -> Vec { + // get most useful up to limit + let mut peers = self.get_random_peers_by_quality(rng, up_to, PeerQuality::Reliable); + + // if below limit get unknown peers which may or may not be useful + let missing = up_to.saturating_sub(peers.len()); + if missing > 0 { + peers.extend(self.get_random_peers_by_quality(rng, missing, PeerQuality::Unknown)); + } + + // if still below limit try unreliable peers again until we have the chance to refresh the + // peer list + let missing = up_to.saturating_sub(peers.len()); + if missing > 0 { + peers.extend(self.get_random_peers_by_quality(rng, missing, PeerQuality::Unreliable)); + } + + peers + } +} diff --git a/node/src/components/block_synchronizer/peer_list/tests.rs b/node/src/components/block_synchronizer/peer_list/tests.rs new file mode 100644 index 0000000000..24035aa7a5 --- /dev/null +++ b/node/src/components/block_synchronizer/peer_list/tests.rs @@ -0,0 +1,129 @@ +use std::collections::HashSet; + +use super::*; +use casper_types::testing::TestRng; + +impl PeerList { + pub(crate) fn is_peer_unreliable(&self, peer_id: &NodeId) -> bool { + *self.peer_list.get(peer_id).unwrap() == PeerQuality::Unreliable + } + + pub(crate) fn is_peer_reliable(&self, peer_id: &NodeId) -> bool { + *self.peer_list.get(peer_id).unwrap() == PeerQuality::Reliable + } + + pub(crate) fn is_peer_unknown(&self, peer_id: &NodeId) -> bool { + *self.peer_list.get(peer_id).unwrap() == PeerQuality::Unknown + } +} + +// Create multiple random peers +fn random_peers(rng: &mut TestRng, num_random_peers: usize) -> HashSet { + (0..num_random_peers).map(|_| NodeId::random(rng)).collect() +} + +#[test] +fn number_of_qualified_peers_is_correct() { + let mut rng = TestRng::new(); + let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1)); + + let test_peers: Vec = random_peers(&mut rng, 10).into_iter().collect(); + + // Add test peers to the peer list and check the internal size + for peer in test_peers.iter() { + peer_list.register_peer(*peer); + } + assert_eq!(peer_list.peer_list.len(), 10); + + // All peers should be `Unknown`; check that the number of qualified peers is within the + // `max_simultaneous_peers` + let qualified_peers = peer_list.qualified_peers(&mut rng); + assert_eq!(qualified_peers.len(), 5); + + // Promote some peers to make them `Reliable`; check the count again + for peer in &test_peers[..3] { + peer_list.promote_peer(*peer); + } + let qualified_peers = peer_list.qualified_peers(&mut rng); + assert_eq!(qualified_peers.len(), 5); + + // Demote some peers to make them `Unreliable`; check the count again + for peer in &test_peers[5..] { + peer_list.demote_peer(*peer); + } + let qualified_peers = peer_list.qualified_peers(&mut rng); + assert_eq!(qualified_peers.len(), 5); + + // Disqualify 7 peers; only 3 peers should remain valid for proposal + for peer in &test_peers[..7] { + peer_list.disqualify_peer(*peer); + } + let qualified_peers = peer_list.qualified_peers(&mut rng); + assert_eq!(qualified_peers.len(), 3); +} + +#[test] +fn unknown_peer_becomes_reliable_when_promoted() { + let mut rng = TestRng::new(); + let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1)); + let test_peer = NodeId::random(&mut rng); + + peer_list.register_peer(test_peer); + assert!(peer_list.is_peer_unknown(&test_peer)); + peer_list.promote_peer(test_peer); + assert!(peer_list.is_peer_reliable(&test_peer)); +} + +#[test] +fn unknown_peer_becomes_unreliable_when_demoted() { + let mut rng = TestRng::new(); + let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1)); + let test_peer = NodeId::random(&mut rng); + + peer_list.register_peer(test_peer); + assert!(peer_list.is_peer_unknown(&test_peer)); + peer_list.demote_peer(test_peer); + assert!(peer_list.is_peer_unreliable(&test_peer)); +} + +#[test] +fn reliable_peer_becomes_unreliable_when_demoted() { + let mut rng = TestRng::new(); + let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1)); + let test_peer = NodeId::random(&mut rng); + + peer_list.register_peer(test_peer); + assert!(peer_list.is_peer_unknown(&test_peer)); + peer_list.promote_peer(test_peer); + assert!(peer_list.is_peer_reliable(&test_peer)); + peer_list.demote_peer(test_peer); + assert!(peer_list.is_peer_unreliable(&test_peer)); +} + +#[test] +fn unreliable_peer_becomes_reliable_when_promoted() { + let mut rng = TestRng::new(); + let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1)); + let test_peer = NodeId::random(&mut rng); + + peer_list.register_peer(test_peer); + assert!(peer_list.is_peer_unknown(&test_peer)); + peer_list.demote_peer(test_peer); + assert!(peer_list.is_peer_unreliable(&test_peer)); + peer_list.promote_peer(test_peer); + assert!(peer_list.is_peer_reliable(&test_peer)); +} + +#[test] +fn unreliable_peer_remains_unreliable_if_demoted() { + let mut rng = TestRng::new(); + let mut peer_list = PeerList::new(5, TimeDiff::from_seconds(1)); + let test_peer = NodeId::random(&mut rng); + + peer_list.register_peer(test_peer); + assert!(peer_list.is_peer_unknown(&test_peer)); + peer_list.demote_peer(test_peer); + assert!(peer_list.is_peer_unreliable(&test_peer)); + peer_list.demote_peer(test_peer); + assert!(peer_list.is_peer_unreliable(&test_peer)); +} diff --git a/node/src/components/block_synchronizer/signature_acquisition.rs b/node/src/components/block_synchronizer/signature_acquisition.rs new file mode 100644 index 0000000000..1be8ad9fab --- /dev/null +++ b/node/src/components/block_synchronizer/signature_acquisition.rs @@ -0,0 +1,810 @@ +use std::collections::{btree_map::Entry, BTreeMap}; + +use datasize::DataSize; + +use casper_types::{FinalitySignature, LegacyRequiredFinality, PublicKey}; + +use super::block_acquisition::Acceptance; +use crate::types::{EraValidatorWeights, SignatureWeight}; + +#[derive(Clone, PartialEq, Eq, DataSize, Debug)] +enum SignatureState { + Vacant, + Pending, + Signature(Box), +} + +#[derive(Clone, PartialEq, Eq, DataSize, Debug)] +pub(super) struct SignatureAcquisition { + inner: BTreeMap, + maybe_is_legacy: Option, + signature_weight: SignatureWeight, + legacy_required_finality: LegacyRequiredFinality, +} + +impl SignatureAcquisition { + pub(super) fn new( + validators: Vec, + legacy_required_finality: LegacyRequiredFinality, + ) -> Self { + let inner = validators + .into_iter() + .map(|validator| (validator, SignatureState::Vacant)) + .collect(); + let maybe_is_legacy = None; + SignatureAcquisition { + inner, + maybe_is_legacy, + signature_weight: SignatureWeight::Insufficient, + legacy_required_finality, + } + } + + pub(super) fn register_pending(&mut self, public_key: PublicKey) { + match self.inner.entry(public_key) { + Entry::Vacant(vacant_entry) => { + vacant_entry.insert(SignatureState::Pending); + } + Entry::Occupied(mut occupied_entry) => { + if *occupied_entry.get() == SignatureState::Vacant { + occupied_entry.insert(SignatureState::Pending); + } + } + } + } + + pub(super) fn apply_signature( + &mut self, + finality_signature: FinalitySignature, + validator_weights: &EraValidatorWeights, + ) -> Acceptance { + let acceptance = match self.inner.entry(finality_signature.public_key().clone()) { + Entry::Vacant(vacant_entry) => { + vacant_entry.insert(SignatureState::Signature(Box::new(finality_signature))); + Acceptance::NeededIt + } + Entry::Occupied(mut occupied_entry) => match *occupied_entry.get() { + SignatureState::Vacant | SignatureState::Pending => { + occupied_entry.insert(SignatureState::Signature(Box::new(finality_signature))); + Acceptance::NeededIt + } + SignatureState::Signature(_) => Acceptance::HadIt, + }, + }; + if self.signature_weight != SignatureWeight::Strict { + self.signature_weight = validator_weights.signature_weight(self.have_signatures()); + } + acceptance + } + + pub(super) fn have_signatures(&self) -> impl Iterator { + self.inner.iter().filter_map(|(k, v)| match v { + SignatureState::Vacant | SignatureState::Pending => None, + SignatureState::Signature(_finality_signature) => Some(k), + }) + } + + pub(super) fn not_vacant(&self) -> impl Iterator { + self.inner.iter().filter_map(|(k, v)| match v { + SignatureState::Vacant => None, + SignatureState::Pending | SignatureState::Signature(_) => Some(k), + }) + } + + pub(super) fn not_pending(&self) -> impl Iterator { + self.inner.iter().filter_map(|(k, v)| match v { + SignatureState::Pending => None, + SignatureState::Vacant | SignatureState::Signature(_) => Some(k), + }) + } + + pub(super) fn set_is_legacy(&mut self, is_legacy: bool) { + self.maybe_is_legacy = Some(is_legacy); + } + + pub(super) fn is_legacy(&self) -> bool { + self.maybe_is_legacy.unwrap_or(false) + } + + pub(super) fn signature_weight(&self) -> SignatureWeight { + self.signature_weight + } + + // Determines signature weight sufficiency based on the type of sync (forward or historical) and + // the protocol version that the block was created with (pre-1.5 or post-1.5) + // `requires_strict_finality` determines what the caller requires with regards to signature + // sufficiency: + // * false means that the caller considers `Weak` finality as sufficient + // * true means that the caller considers `Strict` finality as sufficient + pub(super) fn has_sufficient_finality( + &self, + is_historical: bool, + requires_strict_finality: bool, + ) -> bool { + if is_historical && self.is_legacy() { + match self.legacy_required_finality { + LegacyRequiredFinality::Strict => self + .signature_weight + .is_sufficient(requires_strict_finality), + LegacyRequiredFinality::Weak => { + self.signature_weight == SignatureWeight::Strict + || self.signature_weight == SignatureWeight::Weak + } + LegacyRequiredFinality::Any => true, + } + } else { + self.signature_weight + .is_sufficient(requires_strict_finality) + } + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeSet, fmt::Debug, iter}; + + use assert_matches::assert_matches; + use itertools::Itertools; + use num_rational::Ratio; + use rand::Rng; + + use casper_types::{ + testing::TestRng, BlockHash, ChainNameDigest, EraId, FinalitySignatureV2, SecretKey, U512, + }; + + use super::*; + + impl SignatureAcquisition { + pub(super) fn have_no_vacant(&self) -> bool { + self.inner.iter().all(|(_, v)| *v != SignatureState::Vacant) + } + } + + fn keypair(rng: &mut TestRng) -> (PublicKey, SecretKey) { + let secret = SecretKey::random(rng); + let public = PublicKey::from(&secret); + + (public, secret) + } + + /// Asserts that 2 iterators iterate over the same set of items. + macro_rules! assert_iter_equal { + ( $left:expr, $right:expr $(,)? ) => {{ + fn to_btreeset( + left: impl IntoIterator, + right: impl IntoIterator, + ) -> (BTreeSet, BTreeSet) { + (left.into_iter().collect(), right.into_iter().collect()) + } + + let (left, right) = to_btreeset($left, $right); + assert_eq!(left, right); + }}; + } + + fn test_finality_with_ratio(finality_threshold: Ratio, first_weight: SignatureWeight) { + let rng = &mut TestRng::new(); + let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec(); + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let era_id = EraId::new(rng.gen()); + let chain_name_hash = ChainNameDigest::random(rng); + let weights = EraValidatorWeights::new( + era_id, + validators + .iter() + .enumerate() + .map(|(i, (public, _))| (public.clone(), (i + 1).into())) + .collect(), + finality_threshold, + ); + assert_eq!(U512::from(10), weights.get_total_weight()); + let mut signature_acquisition = SignatureAcquisition::new( + validators.iter().map(|(p, _)| p.clone()).collect(), + LegacyRequiredFinality::Strict, + ); + + // Signature for the validator #0 weighting 1: + let (public_0, secret_0) = validators.first().unwrap(); + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_0, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + assert_iter_equal!(signature_acquisition.have_signatures(), [public_0]); + assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]); + assert!(signature_acquisition.have_no_vacant() == false); + assert_iter_equal!( + signature_acquisition.not_pending(), + validators.iter().map(|(p, _)| p), + ); + + assert_eq!(signature_acquisition.signature_weight(), first_weight); + + // Signature for the validator #2 weighting 3: + let (public_2, secret_2) = validators.get(2).unwrap(); + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_2, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + assert_iter_equal!( + signature_acquisition.have_signatures(), + [public_0, public_2], + ); + assert_iter_equal!(signature_acquisition.not_vacant(), [public_0, public_2]); + assert!(signature_acquisition.have_no_vacant() == false); + assert_iter_equal!( + signature_acquisition.not_pending(), + validators.iter().map(|(p, _)| p), + ); + // The total signed weight is 4/10, which is higher than 1/3: + assert_eq!( + signature_acquisition.signature_weight(), + SignatureWeight::Weak + ); + + // Signature for the validator #3 weighting 4: + let (public_3, secret_3) = validators.get(3).unwrap(); + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_3, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + assert_iter_equal!( + signature_acquisition.have_signatures(), + [public_0, public_2, public_3], + ); + assert_iter_equal!( + signature_acquisition.not_vacant(), + [public_0, public_2, public_3], + ); + assert!(signature_acquisition.have_no_vacant() == false); + assert_iter_equal!( + signature_acquisition.not_pending(), + validators.iter().map(|(p, _)| p), + ); + // The total signed weight is 8/10, which is higher than 2/3: + assert_eq!( + signature_acquisition.signature_weight(), + SignatureWeight::Strict + ); + } + + #[test] + fn should_return_insufficient_when_weight_1_and_1_3_is_required() { + test_finality_with_ratio(Ratio::new(1, 3), SignatureWeight::Insufficient) + } + + #[test] + fn should_return_weak_when_weight_1_and_1_10_is_required() { + test_finality_with_ratio(Ratio::new(1, 10), SignatureWeight::Insufficient) + } + + #[test] + fn should_return_weak_when_weight_1_and_1_11_is_required() { + test_finality_with_ratio(Ratio::new(1, 11), SignatureWeight::Weak) + } + + #[test] + fn adding_a_not_already_stored_validator_signature_works() { + let rng = &mut TestRng::new(); + let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec(); + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let chain_name_hash = ChainNameDigest::random(rng); + let era_id = EraId::new(rng.gen()); + let weights = EraValidatorWeights::new( + era_id, + validators + .iter() + .enumerate() + .map(|(i, (public, _))| (public.clone(), (i + 1).into())) + .collect(), + Ratio::new(1, 3), // Highway finality + ); + assert_eq!(U512::from(10), weights.get_total_weight()); + let mut signature_acquisition = SignatureAcquisition::new( + validators.iter().map(|(p, _)| p.clone()).collect(), + LegacyRequiredFinality::Strict, + ); + + // Signature for an already stored validator: + let (_public_0, secret_0) = validators.first().unwrap(); + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_0, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + + // Signature for an unknown validator: + let (_public, secret) = keypair(rng); + let finality_signature = + FinalitySignatureV2::create(block_hash, block_height, era_id, chain_name_hash, &secret); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + } + + #[test] + fn signing_twice_does_nothing() { + let rng = &mut TestRng::new(); + let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec(); + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let chain_name_hash = ChainNameDigest::random(rng); + let era_id = EraId::new(rng.gen()); + let weights = EraValidatorWeights::new( + era_id, + validators + .iter() + .enumerate() + .map(|(i, (public, _))| (public.clone(), (i + 1).into())) + .collect(), + Ratio::new(1, 3), // Highway finality + ); + assert_eq!(U512::from(10), weights.get_total_weight()); + let mut signature_acquisition = SignatureAcquisition::new( + validators.iter().map(|(p, _)| p.clone()).collect(), + LegacyRequiredFinality::Strict, + ); + + let (_public_0, secret_0) = validators.first().unwrap(); + + // Signature for an already stored validator: + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_0, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + + // Signing again returns `HadIt`: + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_0, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::HadIt + ); + } + + #[test] + fn register_pending_has_the_expected_behavior() { + let rng = &mut TestRng::new(); + let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec(); + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let era_id = EraId::new(rng.gen()); + let chain_name_hash = ChainNameDigest::random(rng); + let weights = EraValidatorWeights::new( + era_id, + validators + .iter() + .enumerate() + .map(|(i, (public, _))| (public.clone(), (i + 1).into())) + .collect(), + Ratio::new(1, 11), // Low finality threshold + ); + assert_eq!(U512::from(10), weights.get_total_weight()); + let mut signature_acquisition = SignatureAcquisition::new( + validators.iter().map(|(p, _)| p.clone()).collect(), + LegacyRequiredFinality::Strict, + ); + + // Set the validator #0 weighting 1 as pending: + let (public_0, secret_0) = validators.first().unwrap(); + signature_acquisition.register_pending(public_0.clone()); + assert_iter_equal!(signature_acquisition.have_signatures(), []); + assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]); + assert_iter_equal!( + signature_acquisition.not_pending(), + validators.iter().skip(1).map(|(p, _s)| p).collect_vec(), + ); + assert!(signature_acquisition.have_no_vacant() == false); + assert_eq!( + signature_acquisition.signature_weight(), + SignatureWeight::Insufficient + ); + + // Sign it: + let finality_signature = FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + secret_0, + ); + assert_matches!( + signature_acquisition.apply_signature(finality_signature.into(), &weights), + Acceptance::NeededIt + ); + assert_iter_equal!(signature_acquisition.have_signatures(), [public_0]); + assert_iter_equal!(signature_acquisition.not_vacant(), [public_0]); + assert!(signature_acquisition.have_no_vacant() == false); + assert_iter_equal!( + signature_acquisition.not_pending(), + validators.iter().map(|(p, _)| p), + ); + assert_eq!( + signature_acquisition.signature_weight(), + SignatureWeight::Weak + ); + } + + #[test] + fn register_pending_an_unknown_validator_works() { + let rng = &mut TestRng::new(); + let validators = iter::repeat_with(|| keypair(rng)).take(4).collect_vec(); + let mut signature_acquisition = SignatureAcquisition::new( + validators.iter().map(|(p, _)| p.clone()).collect(), + LegacyRequiredFinality::Strict, + ); + + // Set a new validator as pending: + let (public, _secret) = keypair(rng); + signature_acquisition.register_pending(public.clone()); + assert_iter_equal!(signature_acquisition.have_signatures(), []); + assert_iter_equal!(signature_acquisition.not_vacant(), [&public]); + assert_iter_equal!( + signature_acquisition.not_pending(), + validators.iter().map(|(p, _s)| p), + ); + assert!(signature_acquisition.have_no_vacant() == false); + } + + #[test] + fn missing_legacy_flag_means_not_legacy() { + let signature_weight = SignatureWeight::Insufficient; + let legacy_required_finality = LegacyRequiredFinality::Any; + + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: None, + signature_weight, + legacy_required_finality, + }; + + assert!(!sa.is_legacy()) + } + + #[test] + fn not_historical_and_not_legacy_and_is_insufficient() { + let signature_weight = SignatureWeight::Insufficient; + + // This parameter should not affect calculation for not historical and not legacy blocks. + let legacy_required_finality = [ + LegacyRequiredFinality::Any, + LegacyRequiredFinality::Weak, + LegacyRequiredFinality::Strict, + ]; + + legacy_required_finality + .iter() + .for_each(|legacy_required_finality| { + let is_legacy = false; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality: *legacy_required_finality, + }; + + let is_historical = false; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + }) + } + + #[test] + fn not_historical_and_not_legacy_and_is_weak() { + let signature_weight = SignatureWeight::Weak; + + // This parameter should not affect calculation for not historical and not legacy blocks. + let legacy_required_finality = [ + LegacyRequiredFinality::Any, + LegacyRequiredFinality::Weak, + LegacyRequiredFinality::Strict, + ]; + + legacy_required_finality + .iter() + .for_each(|legacy_required_finality| { + let is_legacy = false; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality: *legacy_required_finality, + }; + + let is_historical = false; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + }) + } + + #[test] + fn not_historical_and_not_legacy_and_is_strict() { + let signature_weight = SignatureWeight::Strict; + + // This parameter should not affect calculation for not historical and not legacy blocks. + let legacy_required_finality = [ + LegacyRequiredFinality::Any, + LegacyRequiredFinality::Weak, + LegacyRequiredFinality::Strict, + ]; + + legacy_required_finality + .iter() + .for_each(|legacy_required_finality| { + let is_legacy = false; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality: *legacy_required_finality, + }; + + let is_historical = false; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + }) + } + + #[test] + fn historical_and_legacy_requires_any_and_is_insufficient() { + let signature_weight = SignatureWeight::Insufficient; + let legacy_required_finality = LegacyRequiredFinality::Any; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + } + + #[test] + fn historical_and_legacy_requires_any_and_is_weak() { + let signature_weight = SignatureWeight::Weak; + let legacy_required_finality = LegacyRequiredFinality::Any; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + } + + #[test] + fn historical_and_legacy_requires_any_and_is_strict() { + let signature_weight = SignatureWeight::Strict; + let legacy_required_finality = LegacyRequiredFinality::Any; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + } + + #[test] + fn historical_and_legacy_requires_weak_and_is_insufficient() { + let signature_weight = SignatureWeight::Insufficient; + let legacy_required_finality = LegacyRequiredFinality::Weak; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + } + + #[test] + fn historical_and_legacy_requires_weak_and_is_weak() { + let signature_weight = SignatureWeight::Weak; + let legacy_required_finality = LegacyRequiredFinality::Weak; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + } + + #[test] + fn historical_and_legacy_requires_weak_and_is_strict() { + let signature_weight = SignatureWeight::Strict; + let legacy_required_finality = LegacyRequiredFinality::Weak; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + } + + #[test] + fn historical_and_legacy_requires_strict_and_is_insufficient() { + let signature_weight = SignatureWeight::Insufficient; + let legacy_required_finality = LegacyRequiredFinality::Strict; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + } + + #[test] + fn historical_and_legacy_requires_strict_and_is_weak() { + let signature_weight = SignatureWeight::Weak; + let legacy_required_finality = LegacyRequiredFinality::Strict; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(!result); + } + + #[test] + fn historical_and_legacy_requires_strict_and_is_strict() { + let signature_weight = SignatureWeight::Strict; + let legacy_required_finality = LegacyRequiredFinality::Strict; + + let is_legacy = true; + let sa = SignatureAcquisition { + inner: Default::default(), + maybe_is_legacy: Some(is_legacy), + signature_weight, + legacy_required_finality, + }; + + let is_historical = true; + let requires_strict_finality = false; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + + let requires_strict_finality = true; + let result = sa.has_sufficient_finality(is_historical, requires_strict_finality); + assert!(result); + } +} diff --git a/node/src/components/block_synchronizer/tests.rs b/node/src/components/block_synchronizer/tests.rs new file mode 100644 index 0000000000..b9d197c291 --- /dev/null +++ b/node/src/components/block_synchronizer/tests.rs @@ -0,0 +1,4267 @@ +pub(crate) mod test_utils; + +use std::{ + cmp::min, + collections::{BTreeMap, VecDeque}, + convert::TryInto, + iter, + time::Duration, +}; + +use assert_matches::assert_matches; +use derive_more::From; +use num_rational::Ratio; +use rand::{seq::IteratorRandom, Rng}; + +use casper_storage::data_access_layer::ExecutionResultsChecksumResult; +use casper_types::{ + global_state::TrieMerkleProof, testing::TestRng, AccessRights, BlockV2, CLValue, + ChainNameDigest, Chainspec, Deploy, Digest, EraId, FinalitySignatureV2, Key, + LegacyRequiredFinality, ProtocolVersion, PublicKey, SecretKey, StoredValue, TestBlockBuilder, + TestBlockV1Builder, TimeDiff, URef, U512, +}; + +use super::*; +use crate::{ + components::{ + block_synchronizer::block_acquisition::BlockAcquisitionState, + consensus::tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_KEY}, + }, + effect::Effect, + reactor::{EventQueueHandle, QueueKind, Scheduler}, + tls::KeyFingerprint, + types::{BlockExecutionResultsOrChunkId, ValueOrChunk}, + utils, +}; + +const MAX_SIMULTANEOUS_PEERS: u8 = 5; +const TEST_LATCH_RESET_INTERVAL_MILLIS: u64 = 5; +const SHOULD_FETCH_EXECUTION_STATE: bool = true; +const STRICT_FINALITY_REQUIRED_VERSION: ProtocolVersion = ProtocolVersion::from_parts(1, 5, 0); + +/// Event for the mock reactor. +#[derive(Debug, From)] +enum MockReactorEvent { + MarkBlockCompletedRequest(#[allow(dead_code)] MarkBlockCompletedRequest), + BlockFetcherRequest(FetcherRequest), + BlockHeaderFetcherRequest(FetcherRequest), + LegacyDeployFetcherRequest(FetcherRequest), + TransactionFetcherRequest(FetcherRequest), + FinalitySignatureFetcherRequest(FetcherRequest), + TrieOrChunkFetcherRequest(#[allow(dead_code)] FetcherRequest), + BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest), + SyncLeapFetcherRequest(#[allow(dead_code)] FetcherRequest), + ApprovalsHashesFetcherRequest(FetcherRequest), + NetworkInfoRequest(NetworkInfoRequest), + BlockAccumulatorRequest(BlockAccumulatorRequest), + PeerBehaviorAnnouncement(#[allow(dead_code)] PeerBehaviorAnnouncement), + StorageRequest(StorageRequest), + TrieAccumulatorRequest(#[allow(dead_code)] TrieAccumulatorRequest), + ContractRuntimeRequest(ContractRuntimeRequest), + SyncGlobalStateRequest(SyncGlobalStateRequest), + MakeBlockExecutableRequest(MakeBlockExecutableRequest), + MetaBlockAnnouncement(MetaBlockAnnouncement), +} + +struct MockReactor { + scheduler: &'static Scheduler, + effect_builder: EffectBuilder, +} + +impl MockReactor { + fn new() -> Self { + let scheduler = utils::leak(Scheduler::new(QueueKind::weights(), None)); + let event_queue_handle = EventQueueHandle::without_shutdown(scheduler); + let effect_builder = EffectBuilder::new(event_queue_handle); + MockReactor { + scheduler, + effect_builder, + } + } + + fn effect_builder(&self) -> EffectBuilder { + self.effect_builder + } + + async fn crank(&self) -> MockReactorEvent { + let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; + reactor_event + } + + async fn process_effects( + &self, + effects: impl IntoIterator>, + ) -> Vec { + let mut events = Vec::new(); + for effect in effects { + tokio::spawn(effect); + let event = self.crank().await; + events.push(event); + } + events + } +} + +struct TestEnv { + block: Block, + validator_keys: Vec>, + peers: Vec, +} + +// Utility struct used to generate common test artifacts +impl TestEnv { + // Replaces the test block with the one provided as parameter + fn with_block(self, block: Block) -> Self { + Self { + block, + validator_keys: self.validator_keys, + peers: self.peers, + } + } + + fn block(&self) -> &Block { + &self.block + } + + fn validator_keys(&self) -> &Vec> { + &self.validator_keys + } + + fn peers(&self) -> &Vec { + &self.peers + } + + // Generates a `ValidatorMatrix` that has the validators for the era of the test block + // All validators have equal weights + fn gen_validator_matrix(&self) -> ValidatorMatrix { + let validator_weights: BTreeMap = self + .validator_keys + .iter() + .map(|key| (PublicKey::from(key.as_ref()), 100.into())) // we give each validator equal weight + .collect(); + + assert_eq!(validator_weights.len(), self.validator_keys.len()); + + // Set up a validator matrix for the era in which our test block was created + let mut validator_matrix = ValidatorMatrix::new( + Ratio::new(1, 3), + ChainNameDigest::from_chain_name("casper-example"), + None, + EraId::from(0), + self.validator_keys[0].clone(), + PublicKey::from(self.validator_keys[0].as_ref()), + 1, + 3, + ); + validator_matrix.register_validator_weights(self.block.era_id(), validator_weights); + + validator_matrix + } + + fn random(rng: &mut TestRng) -> TestEnv { + let num_validators: usize = rng.gen_range(10..100); + let validator_keys: Vec<_> = iter::repeat_with(|| Arc::new(SecretKey::random(rng))) + .take(num_validators) + .collect(); + + let num_peers = rng.gen_range(10..20); + + TestEnv { + block: TestBlockBuilder::new().build(rng).into(), + validator_keys, + peers: iter::repeat(()) + .take(num_peers) + .map(|_| NodeId::from(rng.gen::())) + .collect(), + } + } +} + +fn check_sync_global_state_event(event: MockReactorEvent, block: &Block) { + assert!(matches!( + event, + MockReactorEvent::SyncGlobalStateRequest { .. } + )); + let global_sync_request = match event { + MockReactorEvent::SyncGlobalStateRequest(req) => req, + _ => unreachable!(), + }; + assert_eq!(global_sync_request.block_hash, *block.hash()); + assert_eq!( + global_sync_request.state_root_hash, + *block.state_root_hash() + ); +} + +// Calls need_next for the block_synchronizer and processes the effects resulted returning a list of +// the new events that were generated +async fn need_next( + rng: &mut TestRng, + reactor: &MockReactor, + block_synchronizer: &mut BlockSynchronizer, + num_expected_events: u8, +) -> Vec { + let effects = block_synchronizer.need_next(reactor.effect_builder(), rng); + assert_eq!(effects.len() as u8, num_expected_events); + reactor.process_effects(effects).await +} + +fn register_multiple_signatures<'a, I: IntoIterator>>( + builder: &mut BlockBuilder, + block: &Block, + validator_keys_iter: I, + chain_name_hash: ChainNameDigest, +) { + for secret_key in validator_keys_iter { + // Register a finality signature + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + secret_key.as_ref(), + ); + assert!(signature.is_verified().is_ok()); + assert!(builder + .register_finality_signature(signature.into(), None) + .is_ok()); + } +} + +fn dummy_merkle_proof() -> TrieMerkleProof { + TrieMerkleProof::new( + URef::new([255; 32], AccessRights::NONE).into(), + StoredValue::CLValue(CLValue::from_t(()).unwrap()), + VecDeque::new(), + ) +} + +trait OneExt: IntoIterator { + fn try_one(self) -> Option; + fn one(self) -> Self::Item; +} + +impl OneExt for I { + fn try_one(self) -> Option { + let mut it = self.into_iter(); + let first = it.next()?; + + it.next().is_none().then_some(first) + } + + #[track_caller] + fn one(self) -> Self::Item { + let mut it = self.into_iter(); + let first = it + .next() + .expect("no element in the iterator, but 1 was expected"); + + if it.next().is_some() { + panic!("more that 1 element in the iterator, but 1 was expected") + } + + first + } +} + +#[cfg(test)] +impl BlockSynchronizer { + fn new_initialized( + rng: &mut TestRng, + validator_matrix: ValidatorMatrix, + config: Config, + ) -> BlockSynchronizer { + let mut block_synchronizer = BlockSynchronizer::new( + config, + Arc::new(Chainspec::random(rng)), + MAX_SIMULTANEOUS_PEERS, + validator_matrix, + &Registry::new(), + ) + .expect("Failed to create BlockSynchronizer"); + + >::set_state( + &mut block_synchronizer, + ComponentState::Initialized, + ); + + block_synchronizer + } + + fn with_legacy_finality(mut self, legacy_required_finality: LegacyRequiredFinality) -> Self { + let core_config = &mut Arc::get_mut(&mut self.chainspec).unwrap().core_config; + core_config.start_protocol_version_with_strict_finality_signatures_required = + STRICT_FINALITY_REQUIRED_VERSION; + core_config.legacy_required_finality = legacy_required_finality; + + self + } + + fn forward_builder(&self) -> &BlockBuilder { + self.forward.as_ref().expect("Forward builder missing") + } +} + +/// Returns the number of validators that need a signature for a weak finality of 1/3. +fn weak_finality_threshold(n: usize) -> usize { + n / 3 + 1 +} + +/// Returns the number of validators that need a signature for a strict finality of 2/3. +fn strict_finality_threshold(n: usize) -> usize { + n * 2 / 3 + 1 +} + +fn latch_inner_check(builder: Option<&BlockBuilder>, expected: bool, msg: &str) { + assert_eq!( + builder.expect("builder should exist").latched(), + expected, + "{}", + msg + ); +} + +fn latch_count_check(builder: Option<&BlockBuilder>, expected: u8, msg: &str) { + assert_eq!( + builder.expect("builder should exist").latch_count(), + expected, + "{}", + msg + ); +} + +fn need_next_inner_check( + builder: Option<&mut BlockBuilder>, + rng: &mut TestRng, + expected: NeedNext, + msg: &str, +) { + let need_next = builder + .expect("should exist") + .block_acquisition_action(rng, MAX_SIMULTANEOUS_PEERS) + .need_next(); + assert_eq!(need_next, expected, "{}", msg); +} + +#[tokio::test] +async fn global_state_sync_wont_stall_with_bad_peers() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng).with_block( + TestBlockBuilder::new() + .era(1) + .random_transactions(1, &mut rng) + .build(&mut rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let cfg = Config { + latch_reset_interval: TimeDiff::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS), + ..Default::default() + }; + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); + + // Set up the synchronizer for the test block such that the next step is getting global state + block_synchronizer.register_block_by_hash(*block.hash(), true); + assert!( + block_synchronizer.historical.is_some(), + "we only get global state on historical sync" + ); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + let historical_builder = block_synchronizer.historical.as_mut().unwrap(); + assert!( + historical_builder + .register_block_header(block.clone_header(), None) + .is_ok(), + "historical builder should register header" + ); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!( + historical_builder + .register_block(block.clone(), None) + .is_ok(), + "should register block" + ); + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // At this point, the next step the synchronizer takes should be to get global state + let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + assert_eq!( + effects.len(), + 1, + "need next should have 1 effect at this step, not {}", + effects.len() + ); + tokio::spawn(async move { effects.remove(0).await }); + let event = mock_reactor.crank().await; + + // Expect a `SyncGlobalStateRequest` for the `GlobalStateSynchronizer` + // The peer list that the GlobalStateSynchronizer will use to fetch the tries + let first_peer_set = peers.iter().copied().choose_multiple(&mut rng, 4); + check_sync_global_state_event(event, block); + + // Wait for the latch to reset + tokio::time::sleep(Duration::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS * 2)).await; + + // Simulate an error form the global_state_synchronizer; + // make it seem that the `TrieAccumulator` did not find the required tries on any of the peers + block_synchronizer.global_state_synced( + *block.hash(), + Err(GlobalStateSynchronizerError::TrieAccumulator( + first_peer_set.to_vec(), + )), + ); + + // At this point we expect that another request for the global state would be made, + // this time with other peers + let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + assert_eq!( + effects.len(), + 1, + "need next should still have 1 effect at this step, not {}", + effects.len() + ); + tokio::spawn(async move { effects.remove(0).await }); + let event = mock_reactor.crank().await; + + let second_peer_set = peers.iter().copied().choose_multiple(&mut rng, 4); + check_sync_global_state_event(event, block); + + // Wait for the latch to reset + tokio::time::sleep(Duration::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS * 2)).await; + + // Simulate a successful global state sync; + // Although the request was successful, some peers did not have the data. + let unreliable_peers = second_peer_set.into_iter().choose_multiple(&mut rng, 2); + block_synchronizer.global_state_synced( + *block.hash(), + Ok(GlobalStateSynchronizerResponse::new( + (*block.state_root_hash()).into(), + unreliable_peers.clone(), + )), + ); + let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + assert_eq!( + effects.len(), + 1, + "need next should still have 1 effect after global state sync'd, not {}", + effects.len() + ); + tokio::spawn(async move { effects.remove(0).await }); + let event = mock_reactor.crank().await; + + assert!( + false == matches!(event, MockReactorEvent::SyncGlobalStateRequest { .. }), + "synchronizer should have progressed" + ); + + // Check if the peers returned by the `GlobalStateSynchronizer` in the response were marked + // unreliable. + for peer in unreliable_peers.iter() { + assert!( + block_synchronizer + .historical + .as_ref() + .unwrap() + .peer_list() + .is_peer_unreliable(peer), + "{} should be marked unreliable", + peer + ); + } +} + +#[tokio::test] +async fn synchronizer_doesnt_busy_loop_without_peers() { + fn check_need_peer_events(expected_block_hash: BlockHash, events: Vec) { + // Explicitly verify the two effects are indeed asking networking and accumulator for peers. + assert_matches!( + events[0], + MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers { + count, + .. + }) if count == MAX_SIMULTANEOUS_PEERS as usize + ); + assert_matches!( + events[1], + MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + .. + }) if block_hash == expected_block_hash + ); + } + + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng).with_block( + TestBlockBuilder::new() + .era(1) + .random_transactions(1, &mut rng) + .build(&mut rng) + .into(), + ); + let block = test_env.block(); + let block_hash = *block.hash(); + let validator_matrix = test_env.gen_validator_matrix(); + let cfg = Config { + latch_reset_interval: TimeDiff::from_millis(TEST_LATCH_RESET_INTERVAL_MILLIS), + ..Default::default() + }; + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); + + block_synchronizer.register_block_by_hash(block_hash, true); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + false, + "initial set up, should not be latched", + ); + + { + // We registered no peers, so we need peers + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::Peers(block_hash), + "should need peers", + ); + + // We registered no peers, so the synchronizer should ask for peers. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::Request(BlockSynchronizerRequest::NeedNext), + ); + assert_eq!(effects.len(), 2, "we should ask for peers from both networking and accumulator, thus two effects are expected"); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "should be latched waiting for peers", + ); + + check_need_peer_events(block_hash, mock_reactor.process_effects(effects).await); + } + + { + // Inject an empty response from the network, simulating no available peers. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::NetworkPeers(*block.hash(), vec![]), + ); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "should still be latched because only one response was received and it \ + did not have what we needed.", + ); + + assert!(effects.is_empty(), "effects should be empty"); + } + + { + // Inject an empty response from the accumulator, simulating no available peers. + // as this is the second of two responses, the latch clears. the logic then + // calls need next again, we still need peers, so we generate the same two effects again. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::AccumulatedPeers(*block.hash(), None), + ); + assert!(!effects.is_empty(), "we should still need peers..."); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "we need peers, ask again", + ); + + // We registered no peers, so we still need peers + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::Peers(block_hash), + "should need peers", + ); + + check_need_peer_events(block_hash, mock_reactor.process_effects(effects).await); + } +} + +#[tokio::test] +async fn should_not_stall_after_registering_new_era_validator_weights() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let block_hash = *block.hash(); + let era_id = block.era_id(); + + // Set up a validator matrix. + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix.clone(), Config::default()); + + // Set up the synchronizer for the test block such that the next step is getting era validators. + block_synchronizer.register_block_by_hash(block_hash, true); + block_synchronizer.register_peers(block_hash, peers.clone()); + block_synchronizer + .historical + .as_mut() + .expect("should have historical builder") + .register_block_header(block.clone_header(), None) + .expect("should register block header"); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + false, + "initial set up, should not be latched", + ); + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::EraValidators(era_id), + "should need era validators for era block is in", + ); + + let effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + assert_eq!( + effects.len(), + MAX_SIMULTANEOUS_PEERS as usize, + "need next should have an effect per peer when needing sync leap" + ); + latch_inner_check( + block_synchronizer.historical.as_ref(), + true, + "after determination that we need validators, should be latched", + ); + + // `need_next` should return no effects while latched. + assert!( + block_synchronizer + .need_next(mock_reactor.effect_builder(), &mut rng) + .is_empty(), + "should return no effects while latched" + ); + + // bleed off the event q, checking the expected event kind + for effect in effects { + tokio::spawn(effect); + let event = mock_reactor.crank().await; + match event { + MockReactorEvent::SyncLeapFetcherRequest(_) => (), + _ => panic!("unexpected event: {:?}", event), + }; + } + + // Update the validator matrix to now have an entry for the era of our random block. + validator_matrix.register_validator_weights( + era_id, + iter::once((ALICE_PUBLIC_KEY.clone(), 100.into())).collect(), + ); + + // register validator_matrix + block_synchronizer + .historical + .as_mut() + .expect("should have historical builder") + .register_era_validator_weights(&validator_matrix); + + latch_inner_check( + block_synchronizer.historical.as_ref(), + false, + "after registering validators, should not be latched", + ); + + need_next_inner_check( + block_synchronizer.historical.as_mut(), + &mut rng, + NeedNext::FinalitySignatures(block_hash, era_id, validator_matrix.public_keys(&era_id)), + "should need finality sigs", + ); + + // Ensure the in-flight latch has been released, i.e. that `need_next` returns something. + let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + assert_eq!( + effects.len(), + 1, + "need next should produce 1 effect because we currently need exactly 1 signature \ + NOTE: finality signatures are a special case; we currently we fan out 1 peer per signature \ + but do multiple rounds of this against increasingly strict weight thresholds. \ + All other fetchers fan out by asking each of MAX_SIMULTANEOUS_PEERS for the _same_ item." + ); + + tokio::spawn(async move { effects.remove(0).await }); + let event = mock_reactor.crank().await; + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id.block_hash() == block.hash() + ); +} + +#[test] +fn duplicate_register_block_not_allowed_if_builder_is_not_failed() { + let mut rng = TestRng::new(); + let test_env = TestEnv::random(&mut rng); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for forward sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); // we only get global state on historical sync + + // Registering the block again should not be allowed until the sync finishes + assert!(!block_synchronizer.register_block_by_hash(*block.hash(), false)); + + // Trying to register a different block should replace the old one + let new_block: Block = TestBlockBuilder::new().build(&mut rng).into(); + assert!(block_synchronizer.register_block_by_hash(*new_block.hash(), false)); + assert_eq!( + block_synchronizer.forward.unwrap().block_hash(), + *new_block.hash() + ); +} + +#[tokio::test] +async fn historical_sync_gets_peers_form_both_connected_peers_and_accumulator() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.historical.is_some()); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::Request(BlockSynchronizerRequest::NeedNext), + ); + assert_eq!(effects.len(), 2); + let events = mock_reactor.process_effects(effects).await; + + // The first thing the synchronizer should do is get peers. + // For the historical flow, the synchronizer will get a random sampling of the connected + // peers and also ask the accumulator to provide peers from which it has received information + // for the block that is being synchronized. + assert_matches!( + events[0], + MockReactorEvent::NetworkInfoRequest(NetworkInfoRequest::FullyConnectedPeers { + count, + .. + }) if count == MAX_SIMULTANEOUS_PEERS as usize + ); + + assert_matches!( + events[1], + MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + .. + }) if block_hash == *block.hash() + ) +} + +#[tokio::test] +async fn fwd_sync_gets_peers_only_from_accumulator() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for forward sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::Request(BlockSynchronizerRequest::NeedNext), + ); + assert_eq!(effects.len(), 1); + let events = mock_reactor.process_effects(effects).await; + + // The first thing the synchronizer should do is get peers. + // For the forward flow, the synchronizer will ask the accumulator to provide peers + // from which it has received information for the block that is being synchronized. + assert_matches!( + events[0], + MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + .. + }) if block_hash == *block.hash() + ) +} + +#[tokio::test] +async fn sync_starts_with_header_fetch() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let block = test_env.block(); + let peers = test_env.peers(); + let validator_matrix = test_env.gen_validator_matrix(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + MAX_SIMULTANEOUS_PEERS, + ) + .await; + + // The first thing needed after the synchronizer has peers is + // to fetch the block header from peers. + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() + ); + } +} + +#[tokio::test] +async fn fwd_sync_is_not_blocked_by_failed_header_fetch_within_latch_interval() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let block = test_env.block(); + let block_hash = *block.hash(); + let peers = test_env.peers(); + let validator_matrix = test_env.gen_validator_matrix(); + let cfg = Config { + ..Default::default() + }; + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); + + // Register block for fwd sync + assert!( + block_synchronizer.register_block_by_hash(block_hash, false), + "should register block by hash" + ); + assert!( + block_synchronizer.forward.is_some(), + "should have forward sync" + ); + block_synchronizer.register_peers(block_hash, peers.clone()); + + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + MAX_SIMULTANEOUS_PEERS, + ) + .await; + + let initial_progress = block_synchronizer + .forward + .as_ref() + .expect("should exist") + .last_progress_time(); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "forward builder should be latched after need next call", + ); + + let mut peers_asked = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == block_hash => { + peers_asked.push(peer); + }, + "should be block header fetch" + ); + } + + // Simulate fetch errors for the header + let mut generated_effects = Effects::new(); + for peer in peers_asked { + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + &format!("response from peer: {:?}, but should still be latched until after final response received", peer), + ); + assert!( + generated_effects.is_empty(), + "effects should remain empty until last response" + ); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockHeaderFetched(Err(FetcherError::Absent { + id: Box::new(*block.hash()), + peer, + })), + ); + + // the effects array should be empty while the latch is active + // once the latch is reset, we should get some effects + generated_effects.extend(effects); + } + + need_next_inner_check( + block_synchronizer.forward.as_mut(), + &mut rng, + NeedNext::BlockHeader(block_hash), + "should need block header", + ); + assert!( + !generated_effects.is_empty(), + "should have gotten effects after the final response tail called into need next" + ); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "all requests have been responded to, and the last event response should have \ + resulted in a fresh need next being reported and thus a new latch", + ); + + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == block_hash, + "should be syncing" + ); + + tokio::time::sleep(Duration::from(cfg.need_next_interval)).await; + + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + ); + + let current_progress = block_synchronizer + .forward + .as_ref() + .expect("should exist") + .last_progress_time(); + + assert_eq!( + initial_progress, current_progress, + "we have not gotten the record we need, so progress should remain the same" + ) +} + +#[tokio::test] +async fn registering_header_successfully_triggers_signatures_fetch_for_weak_finality() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + MAX_SIMULTANEOUS_PEERS, + ) + .await; + + let mut peers_asked = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() => { + peers_asked.push(peer); + } + ); + } + + // Simulate successful fetch of the block header + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockHeaderFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone_header()), + peer: peers_asked[0], + })), + ); + + // Check the block acquisition state + let fwd_builder = block_synchronizer.forward_builder(); + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == *block.hash() + ); + + // Check if the peer that provided the successful response was promoted + assert!(fwd_builder.peer_list().is_peer_reliable(&peers_asked[0])); + + // Next the synchronizer should fetch finality signatures to reach weak finality. + // The number of requests should be limited to the number of peers even if we + // need to get more signatures to reach weak finality. + assert_eq!( + effects.len(), + min( + test_env.validator_keys().len(), + MAX_SIMULTANEOUS_PEERS as usize, + ) + ); + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id.block_hash() == block.hash() && id.era_id() == block.era_id() + ); + } +} + +#[tokio::test] +async fn fwd_more_signatures_are_requested_if_weak_finality_is_not_reached() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == *block.hash() + ); + + // Simulate a successful fetch of a single signature + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + validators_secret_keys[0].as_ref(), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature.into()), + peer: peers[0], + })), + ); + + // A single signature isn't enough to reach weak finality. + // The synchronizer should ask for the remaining signatures. + // The peer limit should still be in place. + assert_eq!( + effects.len(), + min( + validators_secret_keys.len() - 1, + MAX_SIMULTANEOUS_PEERS as usize, + ) + ); + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id.block_hash(), block.hash()); + assert_eq!(id.era_id(), block.era_id()); + assert_ne!(*id.public_key(), PublicKey::from(validators_secret_keys[0].as_ref())); + } + ); + } + + // Register finality signatures to reach weak finality + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys + .iter() + .skip(1) + .take(weak_finality_threshold(validators_secret_keys.len())) + { + // Register a finality signature + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + secret_key.as_ref(), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature.into()), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + // Now the block should have weak finality. + // We are only interested in the last effects generated since as soon as the block has weak + // finality it should start to fetch the block body. + let events = mock_reactor + .process_effects( + generated_effects + .into_iter() + .rev() + .take(MAX_SIMULTANEOUS_PEERS as usize), + ) + .await; + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, *block.hash()); + } + ); + } +} + +#[tokio::test] +async fn fwd_sync_is_not_blocked_by_failed_signatures_fetch_within_latch_interval() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let expected_block_hash = *block.hash(); + let era_id = block.era_id(); + let validator_matrix = test_env.gen_validator_matrix(); + let num_validators = test_env.validator_keys().len() as u8; + let cfg = Config { + ..Default::default() + }; + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, cfg); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(expected_block_hash, false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(expected_block_hash, peers.clone()); + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlockHeader(header, _) if header.block_hash() == expected_block_hash + ); + + // Synchronizer should fetch finality signatures + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + min(num_validators, MAX_SIMULTANEOUS_PEERS), + /* We have num_validators + * validators so we + * require the num_validators + * signatures */ + ) + .await; + + // Check what signatures were requested + let mut sigs_requested = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(*id.block_hash(), expected_block_hash); + assert_eq!(id.era_id(), era_id); + sigs_requested.push((peer, id.public_key().clone())); + } + ); + } + + // Simulate failed fetch of finality signatures + let mut generated_effects = Effects::new(); + for (peer, public_key) in sigs_requested { + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + &format!("response from peer: {:?}, but should still be latched until after final response received", peer), + ); + assert!( + generated_effects.is_empty(), + "effects should remain empty until last response" + ); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Err(FetcherError::Absent { + id: Box::new(Box::new(FinalitySignatureId::new( + expected_block_hash, + era_id, + public_key, + ))), + peer, + })), + ); + // the effects array should be empty while the latch is active + // once the latch is reset, we should get some effects + generated_effects.extend(effects); + } + + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == expected_block_hash, + "should be syncing" + ); + + // The effects are empty at this point and the synchronizer is stuck + assert!( + !generated_effects.is_empty(), + "should have gotten effects after the final response tail called into need next" + ); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "all requests have been responded to, and the last event response should have \ + resulted in a fresh need next being reported and thus a new latch", + ); + + for event in mock_reactor.process_effects(generated_effects).await { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && *id.block_hash() == expected_block_hash && id.era_id() == block.era_id() + ); + } + + // Check if the forward builder is reported as stalled so that the control logic can recover + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == expected_block_hash + ); +} + +#[tokio::test] +async fn next_action_for_have_weak_finality_is_fetching_block_body() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) if header.block_hash() == *block.hash() + ); + + // Now the block should have weak finality. + // Next step is to get the block body. + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + MAX_SIMULTANEOUS_PEERS, + ) + .await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, *block.hash()); + } + ); + } +} + +#[tokio::test] +async fn registering_block_body_transitions_builder_to_have_block_state() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveWeakFinalitySignatures(header, _) if header.block_hash() == *block.hash() + ); + + // Now the block should have weak finality. + // Next step is to get the block body. + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + MAX_SIMULTANEOUS_PEERS, + ) + .await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, *block.hash()); + } + ); + } + + block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone()), + peer: peers[0], + })), + ); + + assert_matches!( + block_synchronizer.forward_builder().block_acquisition_state(), + BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash() + ); +} + +#[tokio::test] +async fn fwd_having_block_body_for_block_without_deploys_requires_only_signatures() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash() + ); + + // Since the block doesn't have any deploys, + // the next step should be to fetch the finality signatures for strict finality. + let effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id.block_hash() == block.hash() && id.era_id() == block.era_id() + ); + } +} + +#[tokio::test] +async fn fwd_having_block_body_for_block_with_deploys_requires_approvals_hashes() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng).with_block( + TestBlockBuilder::new() + .era(1) + .random_transactions(1, &mut rng) + .build(&mut rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash() + ); + + // Since the block has deploys, + // the next step should be to fetch the approvals hashes. + let events = need_next( + &mut rng, + &mock_reactor, + &mut block_synchronizer, + MAX_SIMULTANEOUS_PEERS, + ) + .await; + + for event in events { + if !matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() + ) { + println!("peers: {:?}", peers); + println!("{}", block.hash()); + println!("event: {:?}", event); + } + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() + ); + } +} + +#[tokio::test] +async fn fwd_registering_approvals_hashes_triggers_fetch_for_deploys() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let txns = [Transaction::random(&mut rng)]; + let test_env = TestEnv::random(&mut rng).with_block( + TestBlockBuilder::new() + .era(1) + .transactions(txns.iter()) + .build(&mut rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash() + ); + + let approvals_hashes = ApprovalsHashes::new( + *block.hash(), + txns.iter() + .map(|txn| txn.compute_approvals_hash().unwrap()) + .collect(), + dummy_merkle_proof(), + ); + + // Since the block has approvals hashes, + // the next step should be to fetch the deploys. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer { + item: Box::new(approvals_hashes.clone()), + peer: peers[0], + })), + ); + assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS as usize); + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::TransactionFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, txns[0].compute_id()); + } + ); + } +} + +#[tokio::test] +async fn fwd_have_block_body_without_deploys_and_strict_finality_transitions_state_machine() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach strict finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys.iter(), + chain_name_hash, + ); + + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash() + ); + + // Since the block doesn't have any deploys and already has achieved strict finality, we expect + // it to transition directly to HaveStrictFinality and ask for the next piece of work + // immediately + let mut effects = block_synchronizer.need_next(mock_reactor.effect_builder(), &mut rng); + assert_eq!(effects.len(), 1); + + let fwd_builder = block_synchronizer + .forward + .as_ref() + .expect("Forward builder should have been initialized"); + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash() + ); + + // Expect a single NeedNext event + let events = effects.remove(0).await; + assert_eq!(events.len(), 1); + assert_matches!( + events[0], + Event::Request(BlockSynchronizerRequest::NeedNext) + ); +} + +#[tokio::test] +async fn fwd_have_block_with_strict_finality_requires_creation_of_finalized_block() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register signatures for weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveBlock(acquired_block, _, _) if acquired_block.hash() == block.hash() + ); + + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash() + ); + + // Block should have strict finality and will require to be executed + let events = need_next(&mut rng, &mock_reactor, &mut block_synchronizer, 1).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest { + block_hash, + .. + }) if block_hash == *block.hash() + ); + } +} + +#[tokio::test] +async fn fwd_have_strict_finality_requests_enqueue_when_finalized_block_is_created() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash() + ); + + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + ); + + // After the FinalizedBlock is created, the block synchronizer will request for it to be + // enqueued for execution + let event = Event::MadeFinalizedBlock { + block_hash: *block.hash(), + result: Some(ExecutableBlock::from_block_and_transactions( + block.clone().try_into().expect("Expected a V2 block."), + Vec::new(), + )), + }; + let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 1); + let events = mock_reactor.process_effects(effects).await; + + // Check the block acquisition state + let fwd_builder = block_synchronizer + .forward + .as_ref() + .expect("Forward builder should have been initialized"); + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveExecutableBlock(actual_block, _, _) if *actual_block.hash() == *block.hash() + ); + + // This is the first of two events created when `EffectBuilder::enqueue_block_for_execution` is + // called. + assert_matches!( + &events[0], + MockReactorEvent::StorageRequest( + StorageRequest::GetKeyBlockHeightForActivationPoint { .. } + ) + ); + + // Progress is syncing until we get a confirmation that the block was enqueued for execution + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + ); +} + +#[tokio::test] +async fn fwd_builder_status_is_executing_when_block_is_enqueued_for_execution() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Check the block acquisition state + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveStrictFinalitySignatures(acquired_block, ..) if acquired_block.hash() == block.hash() + ); + + // Register finalized block + fwd_builder.register_made_executable_block(ExecutableBlock::from_block_and_transactions( + block.clone().try_into().expect("Expected a V2 block."), + Vec::new(), + )); + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::HaveExecutableBlock(actual_block, _, _) if *actual_block.hash() == *block.hash() + ); + + // Simulate that enqueuing the block for execution was successful + let event = Event::MarkBlockExecutionEnqueued(*block.hash()); + + // There is nothing for the synchronizer to do at this point. + // It will wait for the block to be executed + let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 0); + + // Progress should now indicate that the block is executing + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Executing(block_hash, _, _) if block_hash == *block.hash() + ); +} + +#[tokio::test] +async fn fwd_sync_is_finished_when_block_is_marked_as_executed() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Register finalized block + fwd_builder.register_made_executable_block(ExecutableBlock::from_block_and_transactions( + block.clone().try_into().expect("Expected a V2 block."), + Vec::new(), + )); + fwd_builder.register_block_execution_enqueued(); + + // Progress should now indicate that the block is executing + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Executing(block_hash, _, _) if block_hash == *block.hash() + ); + + // Simulate a MarkBlockExecuted event + let event = Event::MarkBlockExecuted(*block.hash()); + + // There is nothing for the synchronizer to do at this point, the sync is finished. + let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 0); + + // Progress should now indicate that the block is executing + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Synced(block_hash, _, _) if block_hash == *block.hash() + ); +} + +#[tokio::test] +async fn historical_sync_announces_meta_block() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.historical.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + assert!(historical_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + + // Register finality signatures to reach weak finality + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Set the builder state to `HaveStrictFinalitySignatures` + match historical_builder.block_acquisition_state() { + BlockAcquisitionState::HaveBlock(state_block, state_signatures, _) => historical_builder + .set_block_acquisition_state(BlockAcquisitionState::HaveStrictFinalitySignatures( + state_block.clone(), + state_signatures.clone(), + )), + other => panic!("Unexpected state: {:?}", other), + } + // Make sure the historical builder is syncing + assert_matches!( + block_synchronizer.historical_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + ); + + // Simulate a MarkBlockCompleted event + let event = Event::MarkBlockCompleted { + block_hash: *block.hash(), + is_new: true, + }; + // Put it through to the synchronizer + let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 1); + let mut events = mock_reactor.process_effects(effects).await; + + // We should have a request to get the execution results + match events.pop().unwrap() { + MockReactorEvent::StorageRequest(StorageRequest::GetExecutionResults { + block_hash: actual_block_hash, + responder, + }) => { + assert_eq!(actual_block_hash, *block.hash()); + // We'll just send empty execution results for this case. + responder.respond(Some(vec![])).await; + } + other => panic!("Unexpected event: {:?}", other), + } + // Crank one more time because the meta block event is chained onto the + // execution results fetching + let event = mock_reactor.crank().await; + match event { + MockReactorEvent::MetaBlockAnnouncement(MetaBlockAnnouncement(mut meta_block)) => { + assert_eq!(meta_block.hash(), *block.hash()); + // The transaction buffer is supposed to get notified + assert!(meta_block + .mut_state() + .register_as_sent_to_transaction_buffer() + .was_updated()); + } + other => panic!("Unexpected event: {:?}", other), + } + // The historical sync for this block should now be complete + assert_matches!( + block_synchronizer.historical_progress(), + BlockSynchronizerProgress::Synced(block_hash, _, _) if block_hash == *block.hash() + ); +} + +#[test] +fn builders_are_purged_when_requested() { + let mut rng = TestRng::new(); + let test_env = TestEnv::random(&mut rng); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for forward sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + + // Registering block for historical sync + assert!(block_synchronizer + .register_block_by_hash(*TestBlockBuilder::new().build(&mut rng).hash(), true)); + + assert!(block_synchronizer.forward.is_some()); + assert!(block_synchronizer.historical.is_some()); + + block_synchronizer.purge_historical(); + assert!(block_synchronizer.forward.is_some()); + assert!(block_synchronizer.historical.is_none()); + + assert!(block_synchronizer + .register_block_by_hash(*TestBlockBuilder::new().build(&mut rng).hash(), true)); + assert!(block_synchronizer.forward.is_some()); + assert!(block_synchronizer.historical.is_some()); + + block_synchronizer.purge_forward(); + assert!(block_synchronizer.forward.is_none()); + assert!(block_synchronizer.historical.is_some()); + + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + assert!(block_synchronizer.historical.is_some()); + + block_synchronizer.purge(); + assert!(block_synchronizer.forward.is_none()); + assert!(block_synchronizer.historical.is_none()); +} + +#[tokio::test] +async fn synchronizer_halts_if_block_cannot_be_made_executable() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(&mut rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let fwd_builder = block_synchronizer + .forward + .as_mut() + .expect("Forward builder should have been initialized"); + assert!(fwd_builder + .register_block_header(block.clone_header(), None) + .is_ok()); + fwd_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + // Register finality signatures to reach weak finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(fwd_builder.register_block(block.clone(), None).is_ok()); + // Register the remaining signatures to reach strict finality + register_multiple_signatures( + fwd_builder, + block, + validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + + // Block should have strict finality and will require to be executed + let events = need_next(&mut rng, &mock_reactor, &mut block_synchronizer, 1).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest { + block_hash, + .. + }) if block_hash == *block.hash() + ); + } + + // Simulate an error (the block couldn't be converted for execution). + // This can happen if the synchronizer didn't fetch the right approvals hashes. + // Don't expect to progress any further here. The control logic should + // leap and backfill this block during a historical sync. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::MadeFinalizedBlock { + block_hash: *block.hash(), + result: None, + }, + ); + assert_eq!(effects.len(), 0); + + // Check the block acquisition state + let fwd_builder = block_synchronizer + .forward + .as_ref() + .expect("Forward builder should have been initialized"); + assert_matches!( + fwd_builder.block_acquisition_state(), + BlockAcquisitionState::Failed(block_hash, _) if block_hash == block.hash() + ); + + // Progress should now indicate that the block is syncing + assert_matches!( + block_synchronizer.forward_progress(), + BlockSynchronizerProgress::Syncing(block_hash, _, _) if block_hash == *block.hash() + ); +} + +fn historical_state(block_synchronizer: &BlockSynchronizer) -> &BlockAcquisitionState { + block_synchronizer + .historical + .as_ref() + .unwrap() + .block_acquisition_state() +} + +/// When there is no deploy, the state goes from `HaveGlobalState` to `HaveStrictFinalitySignature` +/// directly, skipping `HaveAllExecutionResults`, `HaveApprovalsHashes` and `HaveAllDeploys`. +#[tokio::test] +async fn historical_sync_skips_exec_results_and_deploys_if_block_empty() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let test_env = TestEnv::random(rng); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.forward.is_none()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + // Skip steps HaveBlockHeader, HaveWeakFinalitySignature, HaveBlock + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await; + + let request = match events.try_one() { + Some(MockReactorEvent::SyncGlobalStateRequest( + request @ SyncGlobalStateRequest { + block_hash, + state_root_hash, + .. + }, + )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request, + _ => panic!("there should be a unique event of type SyncGlobalStateRequest"), + }; + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)), + ); + + // ----- HaveBlock ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveBlock { .. } + ); + + // Those effects are handled directly and not through the reactor: + let events = effects + .try_one() + .expect("there should be only one effect") + .await; + assert_matches!( + events.try_one(), + Some(Event::GlobalStateSynchronizer( + GlobalStateSynchronizerEvent::GetPeers(_) + )) + ); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + // ----- HaveGlobalState ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(..)); + } +} + +#[tokio::test] +async fn historical_sync_no_legacy_block() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let txn = Transaction::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockBuilder::new() + .era(1) + .transactions(iter::once(&txn)) + .build(rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.forward.is_none()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await; + + let request = match events.try_one() { + Some(MockReactorEvent::SyncGlobalStateRequest( + request @ SyncGlobalStateRequest { + block_hash, + state_root_hash, + .. + }, + )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request, + _ => panic!("there should be a unique event of type SyncGlobalStateRequest"), + }; + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)), + ); + + // Those effects are handled directly and not through the reactor: + let events = effects.one().await; + assert_matches!( + events.try_one(), + Some(Event::GlobalStateSynchronizer( + GlobalStateSynchronizerEvent::GetPeers(_) + )) + ); + + // ----- HaveBlock ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveBlock { .. } + ); + + // Let's not test the detail of the global synchronization event, + // since it is already tested in its unit tests. + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + // ----- HaveGlobalState ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + let events = mock_reactor.process_effects(effects).await; + + match events.try_one() { + Some(MockReactorEvent::ContractRuntimeRequest( + ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, + responder, + }, + )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await, + other => panic!("Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}", other), + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: ExecutionResultsChecksumResult::Success { + checksum: Digest::SENTINEL_NONE, + }, + }, + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. }) + ); + } + + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + let mut events = mock_reactor.process_effects(effects).await; + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + assert_matches!( + events.remove(0), + MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. }) + ); + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsStored(*block.hash()), + ); + // ----- HaveAllExecutionResults ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) if checksum.is_checkable() + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::from_storage(Box::new( + ApprovalsHashes::new( + *block.hash(), + vec![txn.compute_approvals_hash().unwrap()], + dummy_merkle_proof(), + ), + )))), + ); + // ----- HaveApprovalsHashes ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveApprovalsHashes(_, _, _) + ); + + let events = mock_reactor.process_effects(effects).await; + for event in events { + assert_matches!( + event, + MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(txn)))), + }, + ); + // ----- HaveAllDeploys ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllDeploys(_, _) + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_)); + } + + // Then we get back to the strict finality signature part, which is already tested. +} + +#[tokio::test] +async fn historical_sync_legacy_block_strict_finality() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let deploy = Deploy::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockV1Builder::new() + .era(1) + .deploys(iter::once(&deploy.clone())) + .build(rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.forward.is_none()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await; + + let request = match events.try_one() { + Some(MockReactorEvent::SyncGlobalStateRequest( + request @ SyncGlobalStateRequest { + block_hash, + state_root_hash, + .. + }, + )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request, + _ => panic!("there should be a unique event of type SyncGlobalStateRequest"), + }; + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)), + ); + + // Those effects are handled directly and not through the reactor: + let events = effects.one().await; + assert_matches!( + events.try_one(), + Some(Event::GlobalStateSynchronizer( + GlobalStateSynchronizerEvent::GetPeers(_) + )) + ); + + // ----- HaveBlock ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveBlock { .. } + ); + + // Let's not test the detail of the global synchronization event, + // since it is already tested in its unit tests. + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + // ----- HaveGlobalState ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + let events = mock_reactor.process_effects(effects).await; + + match events.try_one() { + Some(MockReactorEvent::ContractRuntimeRequest( + ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, + responder, + }, + )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await, + other => panic!("Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}", other), + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: ExecutionResultsChecksumResult::RegistryNotFound, // test a legacy block + }, + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. }) + ); + } + + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + let mut events = mock_reactor.process_effects(effects).await; + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + assert_matches!( + events.remove(0), + MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. }) + ); + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsStored(*block.hash()), + ); + // ----- HaveAllExecutionResults ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) + if checksum.is_checkable() == false + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::LegacyDeployFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Left(Ok(FetchedData::from_storage(Box::new(deploy.into())))), + }, + ); + // ----- HaveAllDeploys ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllDeploys(_, _) + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_)); + } + + // Then we get back to the strict finality signature part, which is already tested. +} + +#[tokio::test] +async fn historical_sync_legacy_block_weak_finality() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let deploy = Deploy::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockV1Builder::new() + .era(1) + .deploys(iter::once(&deploy.clone())) + .build(rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Weak); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.forward.is_none()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await; + + let request = match events.try_one() { + Some(MockReactorEvent::SyncGlobalStateRequest( + request @ SyncGlobalStateRequest { + block_hash, + state_root_hash, + .. + }, + )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request, + _ => panic!("there should be a unique event of type SyncGlobalStateRequest"), + }; + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)), + ); + + // Those effects are handled directly and not through the reactor: + let events = effects.one().await; + assert_matches!( + events.try_one(), + Some(Event::GlobalStateSynchronizer( + GlobalStateSynchronizerEvent::GetPeers(_) + )) + ); + + // ----- HaveBlock ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveBlock { .. } + ); + + // Let's not test the detail of the global synchronization event, + // since it is already tested in its unit tests. + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + // ----- HaveGlobalState ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + let events = mock_reactor.process_effects(effects).await; + + match events.try_one() { + Some(MockReactorEvent::ContractRuntimeRequest( + ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, + responder, + }, + )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await, + other => panic!("Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}", other), + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: ExecutionResultsChecksumResult::RegistryNotFound, // test a legacy block + }, + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. }) + ); + } + + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + let mut events = mock_reactor.process_effects(effects).await; + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + assert_matches!( + events.remove(0), + MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. }) + ); + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsStored(*block.hash()), + ); + // ----- HaveAllExecutionResults ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) + if checksum.is_checkable() == false + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::LegacyDeployFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Left(Ok(FetchedData::from_storage(Box::new(deploy.into())))), + }, + ); + + // ----- HaveStrictFinalitySignatures ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveStrictFinalitySignatures(_, _) + ); + + let events = effects.one().await; + + let event = match events.try_one() { + Some(event @ Event::Request(BlockSynchronizerRequest::NeedNext)) => event, + _ => panic!("Expected a NeedNext request here"), + }; + + let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), rng, event); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveStrictFinalitySignatures(_, _) + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!(event, MockReactorEvent::MarkBlockCompletedRequest(_)); + } +} + +#[tokio::test] +async fn historical_sync_legacy_block_any_finality() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let deploy = Deploy::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockV1Builder::new() + .era(1) + .deploys(iter::once(&deploy.clone())) + .build(rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Any); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + assert!(block_synchronizer.forward.is_none()); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys.iter().take(1), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let events = need_next(rng, &mock_reactor, &mut block_synchronizer, 1).await; + + let request = match events.try_one() { + Some(MockReactorEvent::SyncGlobalStateRequest( + request @ SyncGlobalStateRequest { + block_hash, + state_root_hash, + .. + }, + )) if block_hash == *block.hash() && &state_root_hash == block.state_root_hash() => request, + _ => panic!("there should be a unique event of type SyncGlobalStateRequest"), + }; + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynchronizer(global_state_synchronizer::Event::Request(request)), + ); + + // Those effects are handled directly and not through the reactor: + let events = effects.one().await; + assert_matches!( + events.try_one(), + Some(Event::GlobalStateSynchronizer( + GlobalStateSynchronizerEvent::GetPeers(_) + )) + ); + + // ----- HaveBlock ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveBlock { .. } + ); + + // Let's not test the detail of the global synchronization event, + // since it is already tested in its unit tests. + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + // ----- HaveGlobalState ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + let events = mock_reactor.process_effects(effects).await; + + match events.try_one() { + Some(MockReactorEvent::ContractRuntimeRequest( + ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, + responder, + }, + )) => responder.respond(ExecutionResultsChecksumResult::Success { checksum: state_root_hash }).await, + other => panic!("Event should be of type `ContractRuntimeRequest(ContractRuntimeRequest::GetExecutionResultsChecksum) but it is {:?}", other), + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: ExecutionResultsChecksumResult::RegistryNotFound, // test a legacy block + }, + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. }) + ); + } + + let execution_results = BlockExecutionResultsOrChunk::new_mock_value(rng, *block.hash()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + let mut events = mock_reactor.process_effects(effects).await; + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + assert_matches!( + events.remove(0), + MockReactorEvent::StorageRequest(StorageRequest::PutExecutionResults { .. }) + ); + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsStored(*block.hash()), + ); + // ----- HaveAllExecutionResults ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) + if checksum.is_checkable() == false + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::LegacyDeployFetcherRequest(FetcherRequest { .. }) + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Left(Ok(FetchedData::from_storage(Box::new(deploy.into())))), + }, + ); + + // ----- HaveStrictFinalitySignatures ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveStrictFinalitySignatures(_, _) + ); + + let events = effects.one().await; + + let event = match events.try_one() { + Some(event @ Event::Request(BlockSynchronizerRequest::NeedNext)) => event, + _ => panic!("Expected a NeedNext request here"), + }; + + let effects = block_synchronizer.handle_event(mock_reactor.effect_builder(), rng, event); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveStrictFinalitySignatures(_, _) + ); + + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!(event, MockReactorEvent::MarkBlockCompletedRequest(_)); + } +} + +#[tokio::test] +async fn fwd_sync_latch_should_not_decrement_for_old_responses() { + let mut rng = TestRng::new(); + let mock_reactor = MockReactor::new(); + let txn = Transaction::random(&mut rng); + let test_env = TestEnv::random(&mut rng).with_block( + TestBlockBuilder::new() + .era(1) + .transactions(iter::once(&txn)) + .build(&mut rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(&mut rng, validator_matrix, Config::default()); + + // Register block for fwd sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), false)); + assert!(block_synchronizer.forward.is_some()); + + // Start syncing. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::Request(BlockSynchronizerRequest::NeedNext), + ); + assert_eq!(effects.len(), 1); + + // First, the synchronizer should get peers. + let events = mock_reactor.process_effects(effects).await; + assert_matches!( + events[0], + MockReactorEvent::BlockAccumulatorRequest(BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + .. + }) if block_hash == *block.hash() + ); + + latch_inner_check( + block_synchronizer.forward.as_ref(), + true, + "should be latched waiting for peers", + ); + } + + // Register peers. This would make the synchronizer ask for the block header. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::AccumulatedPeers(*block.hash(), Some(peers.clone())), + ); + let events = mock_reactor.process_effects(effects).await; + + let mut peers_asked = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockHeaderFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() => { + peers_asked.push(peer); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no block header was received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Simulate successful fetch of the block header. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockHeaderFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone_header()), + peer: peers_asked[0], + })), + ); + let events = mock_reactor.process_effects(effects).await; + + let expected_latch_count = events.len() as u8; // number of finality sig fetches. + + // Check what signatures were requested + let mut sigs_requested = Vec::new(); + for event in events { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert_eq!(id.block_hash(), block.hash()); + assert_eq!(id.era_id(), block.era_id()); + sigs_requested.push((peer, id.public_key().clone())); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + format!( + "Latch count should be {} since no finality sigs were received.", + expected_latch_count + ) + .as_str(), + ); + + // Receive a late response with the block header. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockHeaderFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone_header()), + peer: peers_asked[1], + })), + ); + + assert_eq!(effects.len(), 0); + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + format!( + "Latch count should be {} since no finality sigs were received.", + expected_latch_count + ) + .as_str(), + ); + } + + // Register finality sigs. This would make the synchronizer switch to have weak finality and + // continue asking for the block body. + { + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())) + { + // Register a finality signature + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + secret_key.as_ref(), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature.into()), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + let events = mock_reactor + .process_effects( + generated_effects + .into_iter() + .rev() + .take(MAX_SIMULTANEOUS_PEERS as usize), + ) + .await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::BlockFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, *block.hash()); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no block was received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive some more finality signatures to check if the latch decrements. + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys + .iter() + .skip(weak_finality_threshold(validators_secret_keys.len())) + .take(2) + { + // Register a finality signature + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + secret_key.as_ref(), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature.into()), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + assert_eq!(generated_effects.len(), 0); + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no block was received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + } + + // Register a block response. This would make the synchronizer switch to HaveBlock and continue + // asking for the approvals hashes. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone()), + peer: peers[0], + })), + ); + let events = mock_reactor.process_effects(effects).await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::ApprovalsHashesFetcherRequest(FetcherRequest { + id, + peer, + .. + }) if peers.contains(&peer) && id == *block.hash() + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no approval hashes were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive another response with the block. This is the second response out of the 5 we sent + // out earlier. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::BlockFetched(Ok(FetchedData::FromPeer { + item: Box::new(block.clone()), + peer: peers[1], + })), + ); + assert_eq!(effects.len(), 0); + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no approval hashes were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + } + + // Register approvals hashes. This would make the synchronizer switch to HaveApprovalsHashes and + // continue asking for the deploys. + { + let approvals_hashes = ApprovalsHashes::new( + *block.hash(), + vec![txn.compute_approvals_hash().unwrap()], + dummy_merkle_proof(), + ); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer { + item: Box::new(approvals_hashes.clone()), + peer: peers[0], + })), + ); + assert_eq!(effects.len(), MAX_SIMULTANEOUS_PEERS as usize); + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::TransactionFetcherRequest(FetcherRequest { + id, + peer, + .. + }) => { + assert!(peers.contains(&peer)); + assert_eq!(id, txn.compute_id()); + } + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no deploys were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive a late response with the approvals hashes. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::FromPeer { + item: Box::new(approvals_hashes.clone()), + peer: peers[1], + })), + ); + + assert_eq!(effects.len(), 0); + latch_count_check( + block_synchronizer.forward.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no deploys were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + } + + // Receive a deploy. This would make the synchronizer switch to HaveAllDeploys and continue + // asking for more finality signatures in order to reach strict finality. + { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(txn.clone())))), + }, + ); + let events = mock_reactor.process_effects(effects).await; + let expected_latch_count = events.len() as u8; + + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + format!( + "Latch count should be {} since no new signatures were received.", + expected_latch_count + ) + .as_str(), + ); + + // Since it's the single deploy in the block, the next step is to get the rest of the + // finality signatures to get strict finality. + for event in events { + assert_matches!( + event, + MockReactorEvent::FinalitySignatureFetcherRequest(FetcherRequest { + id, + .. + }) => { + assert_eq!(id.block_hash(), block.hash()); + assert_eq!(id.era_id(), block.era_id()); + } + ); + } + + // Receive a late deploy response. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(txn.clone())))), + }, + ); + + assert_eq!(effects.len(), 0); + latch_count_check( + block_synchronizer.forward.as_ref(), + expected_latch_count, + "Latch should not have changed since we did not receive a new signature yet.", + ); + } + + // Receive the rest of the missing signatures to get strict finality. This would switch the + // state to HaveStrictFinality and continue to request to make the block executable. + { + let mut generated_effects = Effects::new(); + for secret_key in validators_secret_keys.iter().rev().take( + strict_finality_threshold(validators_secret_keys.len()) + - weak_finality_threshold(validators_secret_keys.len()), + ) { + // Register a finality signature + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + secret_key.as_ref(), + ); + assert!(signature.is_verified().is_ok()); + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + &mut rng, + Event::FinalitySignatureFetched(Ok(FetchedData::FromPeer { + item: Box::new(signature.into()), + peer: peers[2], + })), + ); + generated_effects.extend(effects); + } + + // Once strict finality is achieved, the synchronizer will try to make the block executable. + let events = mock_reactor + .process_effects(generated_effects.into_iter().rev().take(1)) + .await; + + for event in events { + assert_matches!( + event, + MockReactorEvent::MakeBlockExecutableRequest(MakeBlockExecutableRequest { + block_hash, + .. + }) if block_hash == *block.hash() + ); + } + + latch_count_check( + block_synchronizer.forward.as_ref(), + 1, + "Latch count should still be 1 since no FinalizedBlock was received.", + ); + } +} + +#[tokio::test] +async fn historical_sync_latch_should_not_decrement_for_old_deploy_fetch_responses() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let transactions: BTreeMap<_, _> = iter::repeat_with(|| { + let txn = Transaction::random(rng); + let hash = txn.hash(); + (hash, txn) + }) + .take(3) + .collect(); + let test_env = TestEnv::random(rng).with_block( + TestBlockBuilder::new() + .era(1) + .transactions(transactions.values()) + .build(rng) + .into(), + ); + + let block = test_env.block(); + let block_v2: BlockV2 = block.clone().try_into().unwrap(); + let first_txn = transactions + .get(block_v2.all_transactions().next().unwrap()) + .unwrap(); + let second_txn = transactions + .get(block_v2.all_transactions().nth(1).unwrap()) + .unwrap(); + let third_txn = transactions + .get(block_v2.all_transactions().nth(2).unwrap()) + .unwrap(); + + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: ExecutionResultsChecksumResult::Success { + checksum: Digest::SENTINEL_NONE, + }, + }, + ); + + let execution_results = + BlockExecutionResultsOrChunk::new_mock_value_with_multiple_random_results( + rng, + *block.hash(), + 3, + ); + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsStored(*block.hash()), + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllExecutionResults(_, _, _, checksum) + if checksum.is_checkable() == true + ); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ApprovalsHashesFetched(Ok(FetchedData::from_storage(Box::new( + ApprovalsHashes::new( + *block.hash(), + vec![ + first_txn.compute_approvals_hash().unwrap(), + second_txn.compute_approvals_hash().unwrap(), + third_txn.compute_approvals_hash().unwrap(), + ], + dummy_merkle_proof(), + ), + )))), + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveApprovalsHashes(_, _, _) + ); + + let events = mock_reactor.process_effects(effects).await; + for event in events { + assert_matches!( + event, + MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. }) + ); + } + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no deploys were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive 1 out of MAX_SIMULTANEOUS_PEERS requests for the first deploy. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(first_txn.clone())))), + }, + ); + + // The first deploy was registered. The synchronizer will create MAX_SIMULTANEOUS_PEERS fetch + // requests for another deploy. + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. }) + ); + } + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since the node should ask for the second deploy.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive 1 out of MAX_SIMULTANEOUS_PEERS requests for the second deploy. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(second_txn.clone())))), + }, + ); + + // The second deploy was registered. The synchronizer will create MAX_SIMULTANEOUS_PEERS fetch + // requests for another deploy. + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::TransactionFetcherRequest(FetcherRequest { .. }) + ); + } + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since the node should ask for the third deploy.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // The current state is: + // * Sent out MAX_SIMULTANEOUS_PEERS requests for the first deploy and received 1 response. + // * Sent out MAX_SIMULTANEOUS_PEERS requests for the second deploy and received 1 response. + // * Sent out MAX_SIMULTANEOUS_PEERS requests for the third deploy and haven't received anything + // yet. + // + // So we can receive at this point MAX_SIMULTANEOUS_PEERS - 2 "late" responses for the first and + // second deploys and MAX_SIMULTANEOUS_PEERS responses for the third deploy. + // + // Simulate that we receive the "late" responses first. The synchronizer shouldn't unlatch and + // try to send out more requests for the third deploy. It should hold off until the right + // response comes through. + + // Receive the late responses for the first deploy + for _ in 1..MAX_SIMULTANEOUS_PEERS { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(first_txn.clone())))), + }, + ); + + assert_eq!(effects.len(), 0); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + "Shouldn't decrement the latch since this was a late response", + ); + } + + // Receive the late responses for the second deploy + for _ in 1..MAX_SIMULTANEOUS_PEERS { + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(second_txn.clone())))), + }, + ); + + assert_eq!(effects.len(), 0); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + "Shouldn't decrement the latch since this was a late response", + ); + } + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::DeployFetched { + block_hash: *block.hash(), + result: Either::Right(Ok(FetchedData::from_storage(Box::new(third_txn.clone())))), + }, + ); + + // ----- HaveAllDeploys ----- + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveAllDeploys(_, _) + ); + + let events = mock_reactor.process_effects(effects).await; + for event in events { + assert_matches!(event, MockReactorEvent::FinalitySignatureFetcherRequest(_)); + } +} + +#[tokio::test] +async fn historical_sync_latch_should_not_decrement_for_old_execution_results() { + let rng = &mut TestRng::new(); + let mock_reactor = MockReactor::new(); + let first_txn = Transaction::random(rng); + let second_txn = Transaction::random(rng); + let third_txn = Transaction::random(rng); + let test_env = TestEnv::random(rng).with_block( + TestBlockBuilder::new() + .era(1) + .transactions([first_txn, second_txn, third_txn].iter()) + .build(rng) + .into(), + ); + let peers = test_env.peers(); + let block = test_env.block(); + let validator_matrix = test_env.gen_validator_matrix(); + let chain_name_hash = validator_matrix.chain_name_hash(); + let validators_secret_keys = test_env.validator_keys(); + let mut block_synchronizer = + BlockSynchronizer::new_initialized(rng, validator_matrix, Default::default()) + .with_legacy_finality(LegacyRequiredFinality::Strict); + + // Register block for historical sync + assert!(block_synchronizer.register_block_by_hash(*block.hash(), SHOULD_FETCH_EXECUTION_STATE)); + block_synchronizer.register_peers(*block.hash(), peers.clone()); + + let historical_builder = block_synchronizer + .historical + .as_mut() + .expect("Historical builder should have been initialized"); + historical_builder + .register_block_header(block.clone_header(), None) + .expect("header registration works"); + historical_builder.register_era_validator_weights(&block_synchronizer.validator_matrix); + register_multiple_signatures( + historical_builder, + block, + validators_secret_keys + .iter() + .take(weak_finality_threshold(validators_secret_keys.len())), + chain_name_hash, + ); + assert!(historical_builder + .register_block(block.clone(), None) + .is_ok()); + + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GlobalStateSynced { + block_hash: *block.hash(), + result: Ok(GlobalStateSynchronizerResponse::new( + global_state_synchronizer::RootHash::new(*block.state_root_hash()), + vec![], + )), + }, + ); + + assert_matches!( + historical_state(&block_synchronizer), + BlockAcquisitionState::HaveGlobalState { .. } + ); + + latch_count_check( + block_synchronizer.historical.as_ref(), + 1, + "Latch count should be 1 since we're waiting for execution results checksum.", + ); + + // Create chunked execution results. + let execution_results = + BlockExecutionResultsOrChunk::new_mock_value_with_multiple_random_results( + rng, + *block.hash(), + 100000, // Lots of results to achieve chunking. + ); + let checksum = assert_matches!( + execution_results.value(), + ValueOrChunk::ChunkWithProof(chunk) => chunk.proof().root_hash() + ); + + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::GotExecutionResultsChecksum { + block_hash: *block.hash(), + result: ExecutionResultsChecksumResult::Success { checksum }, + }, + ); + + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { .. }) + ); + } + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no chunks of execution results were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive the first chunk of execution results. + let effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new( + execution_results.clone(), + ))), + }, + ); + + // It's expected that the synchronizer will ask for the next chunks of execution results. + for event in mock_reactor.process_effects(effects).await { + assert_matches!( + event, + MockReactorEvent::BlockExecutionResultsOrChunkFetcherRequest(FetcherRequest { id, .. }) if id.chunk_index() != 0 + ); + } + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since no responses with chunks != 0 were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive the first chunk of execution results again (late response). + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Ok(FetchedData::from_storage(Box::new(execution_results))), + }, + ); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS, + format!( + "Latch count should be {} since we already had the first chunk and no responses with chunks != 0 were received.", + MAX_SIMULTANEOUS_PEERS + ) + .as_str(), + ); + + // Receive a fetch error. + let _effects = block_synchronizer.handle_event( + mock_reactor.effect_builder(), + rng, + Event::ExecutionResultsFetched { + block_hash: *block.hash(), + result: Err(FetcherError::Absent { + id: Box::new(BlockExecutionResultsOrChunkId::new(*block.hash())), + peer: peers[0], + }), + }, + ); + + latch_count_check( + block_synchronizer.historical.as_ref(), + MAX_SIMULTANEOUS_PEERS - 1, + format!( + "Latch count should be {} since we received an `Absent` response.", + MAX_SIMULTANEOUS_PEERS - 1 + ) + .as_str(), + ); +} diff --git a/node/src/components/block_synchronizer/tests/test_utils.rs b/node/src/components/block_synchronizer/tests/test_utils.rs new file mode 100644 index 0000000000..f6e54d3b39 --- /dev/null +++ b/node/src/components/block_synchronizer/tests/test_utils.rs @@ -0,0 +1,35 @@ +use std::{collections::BTreeMap, convert::TryInto}; + +use crate::types::TrieOrChunkId; +#[cfg(test)] +use casper_types::ChunkWithProof; +use rand::Rng; + +pub(crate) fn chunks_with_proof_from_data(data: &[u8]) -> BTreeMap { + (0..data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES).count()) + .map(|index| { + ( + index as u64, + ChunkWithProof::new(data, index.try_into().unwrap()).unwrap(), + ) + }) + .collect() +} + +pub(crate) fn test_chunks_with_proof( + num_chunks: u64, +) -> (Vec, Vec, Vec) { + let mut rng = rand::thread_rng(); + let data: Vec = (0..ChunkWithProof::CHUNK_SIZE_BYTES * num_chunks as usize) + .map(|_| rng.gen()) + .collect(); + + let chunks = chunks_with_proof_from_data(&data); + + let chunk_ids: Vec = chunks + .iter() + .map(|(index, chunk)| TrieOrChunkId(*index, chunk.proof().root_hash())) + .collect(); + + (chunks.values().cloned().collect(), chunk_ids, data) +} diff --git a/node/src/components/block_synchronizer/trie_accumulator.rs b/node/src/components/block_synchronizer/trie_accumulator.rs new file mode 100644 index 0000000000..b3204da22c --- /dev/null +++ b/node/src/components/block_synchronizer/trie_accumulator.rs @@ -0,0 +1,347 @@ +#[cfg(test)] +mod tests; + +use std::{ + collections::{HashMap, HashSet}, + fmt::{self, Debug}, +}; + +use datasize::DataSize; +use derive_more::From; +use rand::seq::SliceRandom; +use serde::Serialize; +use thiserror::Error; +use tracing::{debug, error, trace, warn}; + +use casper_storage::global_state::trie::TrieRaw; +use casper_types::{bytesrepr::Bytes, ChunkWithProof, Digest, DisplayIter}; + +use crate::{ + components::{ + fetcher::{ + EmptyValidationMetadata, Error as FetcherError, FetchItem, FetchResult, FetchedData, + }, + Component, + }, + effect::{ + announcements::PeerBehaviorAnnouncement, + requests::{FetcherRequest, TrieAccumulatorRequest}, + EffectBuilder, EffectExt, Effects, Responder, + }, + types::{NodeId, TrieOrChunk, TrieOrChunkId}, + NodeRng, +}; + +const COMPONENT_NAME: &str = "trie_accumulator"; + +#[derive(Debug, From, Error, Clone, Serialize)] +pub(crate) enum Error { + #[error("trie accumulator ran out of peers trying to fetch item with error: {0}; unreliable peers: {}", DisplayIter::new(.1))] + // Note: Due to being a thrice nested component, this error type tighter size constraints. For + // this reason, we have little choice but to box the `FetcherError`. + PeersExhausted(Box>, Vec), + #[error("trie accumulator couldn't fetch trie chunk ({0}, {1}); unreliable peers: {}", DisplayIter::new(.2))] + Absent(Digest, u64, Vec), + #[error("request contained no peers; trie = {0}")] + NoPeers(Digest), +} + +#[derive(Debug, Clone, Serialize)] +pub(crate) struct Response { + trie: Box, + unreliable_peers: Vec, +} + +impl Response { + pub(crate) fn new(trie: TrieRaw, unreliable_peers: Vec) -> Self { + Response { + trie: Box::new(trie), + unreliable_peers, + } + } + + pub(crate) fn trie(self) -> Box { + self.trie + } + + pub(crate) fn unreliable_peers(&self) -> &Vec { + &self.unreliable_peers + } +} + +#[derive(DataSize, Debug)] +struct PartialChunks { + peers: Vec, + responders: Vec>>, + chunks: HashMap, + unreliable_peers: Vec, +} + +impl PartialChunks { + fn missing_chunk(&self, count: u64) -> Option { + (0..count).find(|idx| !self.chunks.contains_key(idx)) + } + + fn assemble_chunks(&self, count: u64) -> TrieRaw { + let data: Bytes = (0..count) + .filter_map(|index| self.chunks.get(&index)) + .flat_map(|chunk| chunk.chunk()) + .copied() + .collect(); + TrieRaw::new(data) + } + + fn next_peer(&mut self) -> Option<&NodeId> { + // remove the last used peer from the queue + self.peers.pop(); + self.peers.last() + } + + fn merge(&mut self, other: PartialChunks) { + self.chunks.extend(other.chunks); + self.responders.extend(other.responders); + // set used for filtering out duplicates + let mut filter_peers: HashSet = self.peers.iter().cloned().collect(); + for peer in other.peers { + if filter_peers.insert(peer) { + self.peers.push(peer); + } + } + } + + fn respond(self, value: Result) -> Effects { + self.responders + .into_iter() + .flat_map(|responder| responder.respond(value.clone()).ignore()) + .collect() + } + + fn mark_peer_unreliable(&mut self, peer: &NodeId) { + self.unreliable_peers.push(*peer); + } +} + +#[derive(DataSize, Debug)] +pub(super) struct TrieAccumulator { + partial_chunks: HashMap, +} + +#[derive(DataSize, Debug, From, Serialize)] +pub(crate) enum Event { + #[from] + Request(TrieAccumulatorRequest), + TrieOrChunkFetched { + id: TrieOrChunkId, + fetch_result: FetchResult, + }, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Event::Request(_) => write!(f, "trie fetcher request"), + Event::TrieOrChunkFetched { id, .. } => { + write!(f, "got a result for trie or chunk {}", id) + } + } + } +} + +impl TrieAccumulator { + pub(crate) fn new() -> Self { + TrieAccumulator { + partial_chunks: Default::default(), + } + } + + fn consume_trie_or_chunk( + &mut self, + effect_builder: EffectBuilder, + trie_or_chunk: TrieOrChunk, + ) -> Effects + where + REv: From> + From + Send, + { + let TrieOrChunkId(_index, hash) = trie_or_chunk.fetch_id(); + match trie_or_chunk { + TrieOrChunk::Value(trie) => match self.partial_chunks.remove(&hash) { + None => { + error!(%hash, "fetched a trie we didn't request!"); + Effects::new() + } + Some(partial_chunks) => { + trace!(%hash, "got a full trie"); + let unreliable_peers = partial_chunks.unreliable_peers.clone(); + partial_chunks.respond(Ok(Response::new(trie.into_inner(), unreliable_peers))) + } + }, + TrieOrChunk::ChunkWithProof(chunk) => self.consume_chunk(effect_builder, chunk), + } + } + + fn consume_chunk( + &mut self, + effect_builder: EffectBuilder, + chunk: ChunkWithProof, + ) -> Effects + where + REv: From> + From + Send, + { + let digest = chunk.proof().root_hash(); + let index = chunk.proof().index(); + let count = chunk.proof().count(); + let mut partial_chunks = match self.partial_chunks.remove(&digest) { + None => { + error!(%digest, %index, "got a chunk that wasn't requested"); + return Effects::new(); + } + Some(partial_chunks) => partial_chunks, + }; + + // Add the downloaded chunk to cache. + let _ = partial_chunks.chunks.insert(index, chunk); + + // Check if we can now return a complete trie. + match partial_chunks.missing_chunk(count) { + Some(missing_index) => { + let peer = match partial_chunks.peers.last() { + Some(peer) => *peer, + None => { + debug!( + %digest, %missing_index, + "no peers to download the next chunk from, giving up", + ); + let unreliable_peers = partial_chunks.unreliable_peers.clone(); + return partial_chunks.respond(Err(Error::Absent( + digest, + index, + unreliable_peers, + ))); + } + }; + let next_id = TrieOrChunkId(missing_index, digest); + self.try_download_chunk(effect_builder, next_id, peer, partial_chunks) + } + None => { + let trie = partial_chunks.assemble_chunks(count); + let unreliable_peers = partial_chunks.unreliable_peers.clone(); + partial_chunks.respond(Ok(Response::new(trie, unreliable_peers))) + } + } + } + + fn try_download_chunk( + &mut self, + effect_builder: EffectBuilder, + id: TrieOrChunkId, + peer: NodeId, + partial_chunks: PartialChunks, + ) -> Effects + where + REv: From> + Send, + { + let hash = id.digest(); + let maybe_old_partial_chunks = self.partial_chunks.insert(*hash, partial_chunks); + if let Some(old_partial_chunks) = maybe_old_partial_chunks { + // unwrap is safe as we just inserted a value at this key + self.partial_chunks + .get_mut(hash) + .unwrap() + .merge(old_partial_chunks); + } + effect_builder + .fetch::(id, peer, Box::new(EmptyValidationMetadata)) + .event(move |fetch_result| Event::TrieOrChunkFetched { id, fetch_result }) + } +} + +impl Component for TrieAccumulator +where + REv: From> + From + Send, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + trace!(?event, "TrieAccumulator: handling event"); + match event { + Event::Request(TrieAccumulatorRequest { + hash, + responder, + mut peers, + }) => { + peers.shuffle(rng); + let trie_id = TrieOrChunkId(0, hash); + let peer = match peers.last() { + Some(peer) => *peer, + None => { + error!(%hash, "tried to fetch trie with no peers available"); + return responder.respond(Err(Error::NoPeers(hash))).ignore(); + } + }; + let partial_chunks = PartialChunks { + responders: vec![responder], + peers, + chunks: Default::default(), + unreliable_peers: Vec::new(), + }; + self.try_download_chunk(effect_builder, trie_id, peer, partial_chunks) + } + Event::TrieOrChunkFetched { id, fetch_result } => { + let hash = id.digest(); + match fetch_result { + Err(error) => match self.partial_chunks.remove(hash) { + None => { + error!(%id, + "got a fetch result for a chunk we weren't trying to fetch", + ); + Effects::new() + } + Some(mut partial_chunks) => { + debug!(%error, %id, "error fetching trie chunk"); + partial_chunks.mark_peer_unreliable(error.peer()); + // try with the next peer, if possible + match partial_chunks.next_peer().cloned() { + Some(next_peer) => self.try_download_chunk( + effect_builder, + id, + next_peer, + partial_chunks, + ), + None => { + warn!(%id, "couldn't fetch chunk"); + let faulty_peers = partial_chunks.unreliable_peers.clone(); + partial_chunks.respond(Err(Error::PeersExhausted( + Box::new(error), + faulty_peers, + ))) + } + } + } + }, + Ok(FetchedData::FromStorage { + item: trie_or_chunk, + }) => { + debug!(%trie_or_chunk, "got trie or chunk from storage"); + self.consume_trie_or_chunk(effect_builder, *trie_or_chunk) + } + Ok(FetchedData::FromPeer { + item: trie_or_chunk, + peer, + }) => { + debug!(%peer, %trie_or_chunk, "got trie or chunk from peer"); + self.consume_trie_or_chunk(effect_builder, *trie_or_chunk) + } + } + } + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} diff --git a/node/src/components/block_synchronizer/trie_accumulator/tests.rs b/node/src/components/block_synchronizer/trie_accumulator/tests.rs new file mode 100644 index 0000000000..e957acd156 --- /dev/null +++ b/node/src/components/block_synchronizer/trie_accumulator/tests.rs @@ -0,0 +1,268 @@ +use super::*; +use crate::{ + components::block_synchronizer::tests::test_utils::test_chunks_with_proof, + reactor::{EventQueueHandle, QueueKind, Scheduler}, + types::ValueOrChunk, + utils, +}; +use casper_types::testing::TestRng; +use futures::channel::oneshot; + +/// Event for the mock reactor. +#[derive(Debug)] +enum ReactorEvent { + FetcherRequest(FetcherRequest), + PeerBehaviorAnnouncement(#[allow(dead_code)] PeerBehaviorAnnouncement), +} + +impl From for ReactorEvent { + fn from(req: PeerBehaviorAnnouncement) -> ReactorEvent { + ReactorEvent::PeerBehaviorAnnouncement(req) + } +} + +impl From> for ReactorEvent { + fn from(req: FetcherRequest) -> ReactorEvent { + ReactorEvent::FetcherRequest(req) + } +} + +struct MockReactor { + scheduler: &'static Scheduler, + effect_builder: EffectBuilder, +} + +impl MockReactor { + fn new() -> Self { + let scheduler = utils::leak(Scheduler::new(QueueKind::weights(), None)); + let event_queue_handle = EventQueueHandle::without_shutdown(scheduler); + let effect_builder = EffectBuilder::new(event_queue_handle); + MockReactor { + scheduler, + effect_builder, + } + } + + fn effect_builder(&self) -> EffectBuilder { + self.effect_builder + } + + async fn expect_fetch_event(&self, chunk_id: &TrieOrChunkId, peer: &NodeId) { + let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; + match reactor_event { + ReactorEvent::FetcherRequest(request) => { + assert_eq!(request.id, *chunk_id); + assert_eq!(request.peer, *peer); + } + _ => { + unreachable!(); + } + }; + } +} + +async fn download_chunk_and_check( + reactor: &MockReactor, + trie_accumulator: &mut TrieAccumulator, + chunk_to_download: &TrieOrChunkId, + peer: &NodeId, + partial_chunks: PartialChunks, +) { + // Try to download a chunk from a peer + let mut effects = trie_accumulator.try_download_chunk( + reactor.effect_builder(), + *chunk_to_download, + *peer, + partial_chunks, + ); + // A fetch effect should be generated + assert_eq!(effects.len(), 1); + + // Run the effects and check if the correct fetch was requested + tokio::spawn(async move { effects.remove(0).await }); + reactor.expect_fetch_event(chunk_to_download, peer).await; +} + +#[test] +fn unsolicited_chunk_produces_no_effects() { + let reactor = MockReactor::new(); + + // Empty accumulator. Does not expect any chunks. + let mut trie_accumulator = TrieAccumulator::new(); + let (test_chunks, _, _) = test_chunks_with_proof(1); + + let effects = trie_accumulator.consume_chunk(reactor.effect_builder(), test_chunks[0].clone()); + assert!(effects.is_empty()); +} + +#[tokio::test] +async fn try_download_chunk_generates_fetch_effect() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut trie_accumulator = TrieAccumulator::new(); + + // Create a test chunk + let (_, chunk_ids, _) = test_chunks_with_proof(1); + let peer = NodeId::random(&mut rng); + let chunks = PartialChunks { + peers: vec![peer], + responders: Default::default(), + chunks: Default::default(), + unreliable_peers: Default::default(), + }; + + download_chunk_and_check( + &reactor, + &mut trie_accumulator, + &chunk_ids[0], + &peer, + chunks, + ) + .await; +} + +#[tokio::test] +async fn failed_fetch_retriggers_download_with_different_peer() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut trie_accumulator = TrieAccumulator::new(); + + // Create a test chunk + let (_, chunk_ids, _) = test_chunks_with_proof(1); + + // Create multiple peers + let peers: Vec = (0..2).map(|_| NodeId::random(&mut rng)).collect(); + + let chunks = PartialChunks { + peers: peers.clone(), + responders: Default::default(), + chunks: Default::default(), + unreliable_peers: Default::default(), + }; + + download_chunk_and_check( + &reactor, + &mut trie_accumulator, + &chunk_ids[0], + &peers[1], + chunks, + ) + .await; + + // Simulate a fetch error + let fetch_result: FetchResult = Err(FetcherError::TimedOut { + id: Box::new(chunk_ids[0]), + peer: peers[1], + }); + let event = Event::TrieOrChunkFetched { + id: chunk_ids[0], + fetch_result, + }; + + // Handling the fetch error should make the trie accumulator generate another fetch for the + // same chunk but with a different peer + let mut effects = trie_accumulator.handle_event(reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 1); + + // Run the effects and check if the fetch was re-triggered + tokio::spawn(async move { effects.remove(0).await }); + reactor.expect_fetch_event(&chunk_ids[0], &peers[0]).await; +} + +#[tokio::test] +async fn fetched_chunk_triggers_download_of_missing_chunk() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut trie_accumulator = TrieAccumulator::new(); + + // Create test chunks + let (test_chunks, chunk_ids, _) = test_chunks_with_proof(2); + let peer = NodeId::random(&mut rng); + + let chunks = PartialChunks { + peers: vec![peer], + responders: Default::default(), + chunks: Default::default(), + unreliable_peers: Default::default(), + }; + + download_chunk_and_check( + &reactor, + &mut trie_accumulator, + &chunk_ids[1], + &peer, + chunks, + ) + .await; + + // Simulate a successful fetch + let chunk = Box::new(ValueOrChunk::ChunkWithProof(test_chunks[1].clone())); + let fetch_result: FetchResult = Ok(FetchedData::FromPeer { peer, item: chunk }); + let event = Event::TrieOrChunkFetched { + id: chunk_ids[1], + fetch_result, + }; + + // Process the downloaded chunk + let mut effects = trie_accumulator.handle_event(reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 1); + + // Check if a new fetch was issued for the missing chunk + tokio::spawn(async move { effects.remove(0).await }); + reactor.expect_fetch_event(&chunk_ids[0], &peer).await; +} + +#[tokio::test] +async fn trie_returned_when_all_chunks_fetched() { + let mut rng = TestRng::new(); + let reactor = MockReactor::new(); + let mut trie_accumulator = TrieAccumulator::new(); + + // Create test chunks + let (test_chunks, chunk_ids, data) = test_chunks_with_proof(3); + let peer = NodeId::random(&mut rng); + + // Create a responder to assert the validity of the assembled trie + let (sender, receiver) = oneshot::channel(); + let responder = Responder::without_shutdown(sender); + + let chunks = PartialChunks { + peers: vec![peer], + responders: vec![responder], + chunks: Default::default(), + unreliable_peers: Default::default(), + }; + + download_chunk_and_check( + &reactor, + &mut trie_accumulator, + &chunk_ids[0], + &peer, + chunks, + ) + .await; + + let mut effects = Effects::new(); + + for i in 0..3 { + // Simulate a successful fetch + let fetch_result: FetchResult = Ok(FetchedData::FromPeer { + peer, + item: Box::new(ValueOrChunk::ChunkWithProof(test_chunks[i].clone())), + }); + let event = Event::TrieOrChunkFetched { + id: chunk_ids[i], + fetch_result, + }; + + // Expect to get one effect for each call. First 2 will be requests to download missing + // chunks. Last one will be the returned trie since all chunks are available. + effects = trie_accumulator.handle_event(reactor.effect_builder(), &mut rng, event); + assert_eq!(effects.len(), 1); + } + + // Validate the returned trie + tokio::spawn(async move { effects.remove(0).await }); + let result_trie = receiver.await.unwrap().expect("Expected trie").trie; + assert_eq!(*result_trie, TrieRaw::new(Bytes::from(data))); +} diff --git a/node/src/components/block_validator.rs b/node/src/components/block_validator.rs index 42ee3c5a2a..289f0c7609 100644 --- a/node/src/components/block_validator.rs +++ b/node/src/components/block_validator.rs @@ -1,394 +1,945 @@ //! Block validator //! -//! The block validator checks whether all the deploys included in the proto block exist, either -//! locally or on the network. +//! The block validator checks whether all the transactions included in the block payload exist, +//! either locally or on the network. //! -//! When multiple requests are made to validate the same proto block, they will eagerly return true -//! if valid, but only fail if all sources have been exhausted. This is only relevant when calling -//! for validation of the same protoblock multiple times at the same time. +//! When multiple requests are made to validate the same block payload, they will eagerly return +//! true if valid, but only fail if all sources have been exhausted. This is only relevant when +//! calling for validation of the same proposed block multiple times at the same time. -mod keyed_counter; +mod config; +mod event; +mod state; +#[cfg(test)] +mod tests; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet, VecDeque}, - convert::Infallible, - fmt::Debug, - hash::Hash, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, sync::Arc, }; use datasize::DataSize; -use derive_more::{Display, From}; -use itertools::Itertools; -use smallvec::{smallvec, SmallVec}; -use tracing::info; +use tracing::{debug, error, trace, warn}; + +use casper_types::{ + Approval, ApprovalsHash, Chainspec, EraId, FinalitySignature, FinalitySignatureId, PublicKey, + RewardedSignatures, SingleBlockRewardedSignatures, Timestamp, Transaction, TransactionHash, + TransactionId, +}; use crate::{ - components::{block_proposer::DeployType, Component}, + components::{ + consensus::{ClContext, ProposedBlock}, + fetcher::{self, EmptyValidationMetadata, FetchResult, FetchedData}, + Component, + }, effect::{ + announcements::FatalAnnouncement, requests::{BlockValidationRequest, FetcherRequest, StorageRequest}, - EffectBuilder, EffectExt, EffectOptionExt, Effects, Responder, + EffectBuilder, EffectExt, Effects, Responder, + }, + fatal, + types::{ + BlockWithMetadata, InvalidProposalError, NodeId, TransactionFootprint, ValidatorMatrix, }, - types::{appendable_block::AppendableBlock, Block, Chainspec, Deploy, DeployHash, ProtoBlock}, NodeRng, }; -use keyed_counter::KeyedCounter; - -use super::fetcher::FetchResult; +pub use config::Config; +pub(crate) use event::Event; +use state::{AddResponderResult, BlockValidationState, MaybeStartFetching}; -// TODO: Consider removing this trait. -pub trait BlockLike: Eq + Hash { - fn deploys(&self) -> Vec<&DeployHash>; -} +const COMPONENT_NAME: &str = "block_validator"; -impl BlockLike for Block { - fn deploys(&self) -> Vec<&DeployHash> { - self.deploy_hashes() - .iter() - .chain(self.transfer_hashes().iter()) - .collect() +impl ProposedBlock { + fn timestamp(&self) -> Timestamp { + self.context().timestamp() } -} -impl BlockLike for ProtoBlock { - fn deploys(&self) -> Vec<&DeployHash> { - self.deploys_and_transfers_iter().collect() + /// How many transactions are being tracked? + pub(crate) fn transaction_count(&self) -> usize { + self.value().count(None) } -} - -/// Block validator component event. -#[derive(Debug, From, Display)] -pub enum Event { - /// A request made of the block validator component. - #[from] - Request(BlockValidationRequest), - - /// A deploy has been successfully found. - #[display(fmt = "deploy {} found", deploy_hash)] - DeployFound { - deploy_hash: DeployHash, - deploy_type: Box, - }, - - /// A request to find a specific deploy, potentially from a peer, failed. - #[display(fmt = "deploy {} missing", _0)] - DeployMissing(DeployHash), - - /// Deploy was invalid. Unable to convert to a deploy type. - #[display(fmt = "deploy {} invalid", _0)] - CannotConvertDeploy(DeployHash), -} -/// State of the current process of block validation. -/// -/// Tracks whether or not there are deploys still missing and who is interested in the final result. -#[derive(DataSize, Debug)] -pub(crate) struct BlockValidationState { - /// Appendable block ensuring that the deploys satisfy the validity conditions. - appendable_block: AppendableBlock, - /// The deploys that have not yet been "crossed off" the list of potential misses. - missing_deploys: HashSet, - /// A list of responders that are awaiting an answer. - responders: SmallVec<[Responder<(bool, T)>; 2]>, - /// Peers that should have the data. - sources: VecDeque, -} - -impl BlockValidationState -where - I: PartialEq + Eq + 'static, -{ - /// Adds alternative source of data. - /// Returns true if we already know about the peer. - fn add_source(&mut self, peer: I) -> bool { - if self.sources.contains(&peer) { - true - } else { - self.sources.push_back(peer); - false - } + pub(crate) fn all_transactions( + &self, + ) -> impl Iterator)> { + self.value().all_transactions() } +} - /// Returns a peer, if there is any, that we haven't yet tried. - fn source(&mut self) -> Option { - self.sources.pop_front() - } +/// The return type of trying to handle a validation request as an already-existing request. +enum MaybeHandled { + /// The request is already being handled - return the wrapped effects and finish. + Handled(Effects), + /// The request is new - it still needs to be handled. + NotHandled(BlockValidationRequest), } #[derive(DataSize, Debug)] -pub(crate) struct BlockValidator { - /// Chainspec loaded for deploy validation. +pub(crate) struct BlockValidator { + /// Component configuration. + config: Config, + /// Chainspec loaded for transaction validation. #[data_size(skip)] chainspec: Arc, + /// Validator matrix. + #[data_size(skip)] + validator_matrix: ValidatorMatrix, /// State of validation of a specific block. - validation_states: HashMap>, - /// Number of requests for a specific deploy hash still in flight. - in_flight: KeyedCounter, + validation_states: HashMap, BlockValidationState>, + /// Requests awaiting storing of a block, keyed by the height of the block being awaited. + requests_on_hold: BTreeMap>, + /// The gas price for validation of proposed blocks. + current_gas_price: u8, } -impl BlockValidator -where - T: BlockLike + Debug + Send + Clone + 'static, - I: Clone + Debug + Send + 'static + Send, -{ +impl BlockValidator { /// Creates a new block validator instance. - pub(crate) fn new(chainspec: Arc) -> Self { + pub(crate) fn new( + chainspec: Arc, + validator_matrix: ValidatorMatrix, + config: Config, + current_gas_price: u8, + ) -> Self { BlockValidator { chainspec, + validator_matrix, + config, validation_states: HashMap::new(), - in_flight: KeyedCounter::default(), + requests_on_hold: BTreeMap::new(), + current_gas_price, } } - /// Prints a log message about an invalid block with duplicated deploys. - fn log_block_with_replay(&self, sender: I, block: &T) { - let mut deploy_counts = BTreeMap::new(); - for deploy_hash in block.deploys() { - *deploy_counts.entry(*deploy_hash).or_default() += 1; - } - let duplicates = deploy_counts - .into_iter() - .filter_map(|(deploy_hash, count): (DeployHash, usize)| { - (count > 1).then(|| format!("{} * {:?}", count, deploy_hash)) - }) - .join(", "); - info!( - ?sender, %duplicates, - "received invalid block containing duplicated deploys" - ); - } -} - -impl Component for BlockValidator -where - T: BlockLike + Debug + Send + Clone + 'static, - I: Clone + Debug + Send + PartialEq + Eq + 'static, - REv: From> - + From> - + From> - + From - + Send, -{ - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( + /// If the request is already being handled, we record the new info and return effects. If not, + /// the request is returned for processing as a new request. + fn try_handle_as_existing_request( &mut self, effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - let mut effects = Effects::new(); - match event { - Event::Request(BlockValidationRequest { + request: BlockValidationRequest, + ) -> MaybeHandled + where + REv: From + + From> + + From> + + Send, + { + if let Some(state) = self.validation_states.get_mut(&request.block) { + let BlockValidationRequest { block, sender, responder, - block_timestamp, - }) => { - let block_deploys = block.deploys(); - let deploy_count = block_deploys.len(); - // Collect the deploys in a set; this also deduplicates them. - let block_deploys: HashSet<_> = block_deploys - .iter() - .map(|deploy_hash| **deploy_hash) - .collect(); - if block_deploys.len() != deploy_count { - self.log_block_with_replay(sender, &block); - return responder.respond((false, block)).ignore(); + .. + } = request; + debug!(%sender, %block, "already validating proposed block"); + match state.add_responder(responder) { + AddResponderResult::Added => {} + AddResponderResult::ValidationCompleted { + responder, + response_to_send, + } => { + debug!( + ?response_to_send, + "proposed block validation already completed" + ); + return MaybeHandled::Handled(responder.respond(response_to_send).ignore()); + } + } + state.add_holder(sender); + + let effects = match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_transactions, + missing_signatures, + } => fetch_transactions_and_signatures( + effect_builder, + holder, + missing_transactions, + missing_signatures, + ), + MaybeStartFetching::Ongoing => { + debug!("ongoing fetches while validating proposed block - noop"); + Effects::new() } - if block_deploys.is_empty() { - // If there are no deploys, return early. - return responder.respond((true, block)).ignore(); + MaybeStartFetching::Unable => { + debug!("no new info while validating proposed block - responding `false`"); + respond_invalid( + Box::new(InvalidProposalError::UnableToFetch), + state.take_responders(), + ) } + MaybeStartFetching::ValidationSucceeded | MaybeStartFetching::ValidationFailed => { + // If validation is already completed, we should have exited in the + // `AddResponderResult::ValidationCompleted` branch above. + error!("proposed block validation already completed - noop"); + Effects::new() + } + }; + MaybeHandled::Handled(effects) + } else { + MaybeHandled::NotHandled(request) + } + } + + fn handle_new_request( + &mut self, + effect_builder: EffectBuilder, + request: BlockValidationRequest, + ) -> Effects + where + REv: From + + From> + + From> + + From + + From + + Send, + { + debug!(sender = %request.sender, block = %request.block, "validating new proposed block"); + debug_assert!(!self.validation_states.contains_key(&request.block)); + + if request.block.value().rewarded_signatures().has_some() { + // The block contains cited signatures - we have to read the relevant blocks and find + // out who the validators are in order to decode the signature IDs + let signature_rewards_max_delay = + self.chainspec.core_config.signature_rewards_max_delay; + let minimum_block_height = request + .proposed_block_height + .saturating_sub(signature_rewards_max_delay); + + debug!( + proposed_block=?request.block, + %minimum_block_height, + proposed_block_height=%request.proposed_block_height, + "block cites signatures, validation required - requesting past blocks from storage" + ); + + effect_builder + .collect_past_blocks_with_metadata( + minimum_block_height..request.proposed_block_height, + false, + ) + .event( + move |past_blocks_with_metadata| Event::GotPastBlocksWithMetadata { + past_blocks_with_metadata, + request, + }, + ) + } else { + self.handle_new_request_with_signatures(effect_builder, request, HashSet::new()) + } + } - // TODO: Clean this up to use `or_insert_with_key` once - // https://github.com/rust-lang/rust/issues/71024 is stabilized. - match self.validation_states.entry(block) { - Entry::Occupied(mut entry) => { - // The entry already exists. - if entry.get().missing_deploys.is_empty() { - // Block has already been validated successfully, early return to - // caller. - effects.extend(responder.respond((true, entry.key().clone())).ignore()); - } else { - // We register ourselves as someone interested in the ultimate - // validation result. - entry.get_mut().responders.push(responder); - // And add an alternative source of data. - entry.get_mut().add_source(sender); - } + /// This function pairs the `SingleBlockRewardedSignatures` entries from `rewarded_signatures` + /// with the relevant past blocks and their metadata. If a block for which some signatures are + /// cited is missing, or if some signatures are double-cited, it will return `None`. + fn relevant_blocks_and_cited_signatures<'b, 'c>( + past_blocks_with_metadata: &'b [Option], + proposed_block_height: u64, + rewarded_signatures: &'c RewardedSignatures, + ) -> Result< + Vec<(&'b BlockWithMetadata, &'c SingleBlockRewardedSignatures)>, + Box, + > { + let mut result = Vec::new(); + // Check whether we know all the blocks for which the proposed block cites some signatures, + // and if no signatures are doubly cited. + for ((past_block_height, signatures), maybe_block) in rewarded_signatures + .iter_with_height(proposed_block_height) + .zip(past_blocks_with_metadata.iter().rev()) + { + match maybe_block { + None if signatures.has_some() => { + trace!(%past_block_height, "maybe_block = None if signatures.has_some() - returning"); + return Err(Box::new( + InvalidProposalError::RewardSignaturesMissingCitedBlock { + cited_block_height: past_block_height, + }, + )); + } + None => { + // we have no block, but there are also no signatures cited for this block, so + // we can continue + trace!(%past_block_height, "maybe_block = None"); + } + Some(block) => { + let padded_signatures = block.block.rewarded_signatures().clone().left_padded( + proposed_block_height.saturating_sub(past_block_height) as usize, + ); + trace!( + ?padded_signatures, + ?rewarded_signatures, + intersection = ?rewarded_signatures.intersection(&padded_signatures), + "maybe_block is Some" + ); + if rewarded_signatures + .intersection(&padded_signatures) + .has_some() + { + // block cited a signature that has been cited before - it is invalid! + debug!( + %past_block_height, + "maybe_block is Some, nonzero intersection with previous" + ); + return Err(Box::new(InvalidProposalError::RewardSignatureReplay { + cited_block_height: past_block_height, + })); } - Entry::Vacant(entry) => { - // Our entry is vacant - create an entry to track the state. - let missing_deploys: HashSet = - entry.key().deploys().iter().map(|hash| **hash).collect(); - - let in_flight = &mut self.in_flight; - let fetch_effects: Effects> = block_deploys - .iter() - .flat_map(|deploy_hash| { - // For every request, increase the number of in-flight... - in_flight.inc(deploy_hash); - // ...then request it. - fetch_deploy(effect_builder, *deploy_hash, sender.clone()) + // everything is OK - save the block in the result + result.push((block, signatures)); + } + } + } + Ok(result) + } + + fn era_ids_vec(past_blocks_with_metadata: &[Option]) -> Vec> { + // This will create a vector of era ids for the past blocks corresponding to cited + // signatures. The index of the entry in the vector will be the number of blocks in the + // past relative to the current block, minus 1 (i.e., 0 is the previous block, 1 is the one + // before that, etc.) - these indices will correspond directly to the indices in + // RewardedSignatures. + past_blocks_with_metadata + .iter() + .rev() + .map(|maybe_metadata| { + maybe_metadata + .as_ref() + .map(|metadata| metadata.block.era_id()) + }) + .collect() + } + + fn get_relevant_validators( + &mut self, + past_blocks_with_metadata: &[Option], + ) -> HashMap> { + let era_ids_vec = Self::era_ids_vec(past_blocks_with_metadata); + // get the set of unique era ids that are present in the cited blocks + let era_ids: HashSet<_> = era_ids_vec.iter().flatten().copied().collect(); + let validator_matrix = &self.validator_matrix; + + era_ids + .into_iter() + .filter_map(move |era_id| { + validator_matrix + .validator_weights(era_id) + .map(|weights| (era_id, weights.into_validator_public_keys().collect())) + }) + .collect() + } + + fn handle_got_past_blocks_with_metadata( + &mut self, + effect_builder: EffectBuilder, + past_blocks_with_metadata: Vec>, + request: BlockValidationRequest, + ) -> Effects + where + REv: From + + From> + + From> + + From + + Send, + { + let rewarded_signatures = request.block.value().rewarded_signatures(); + + match Self::relevant_blocks_and_cited_signatures( + &past_blocks_with_metadata, + request.proposed_block_height, + rewarded_signatures, + ) { + Ok(blocks_and_signatures) => { + let validators = self.get_relevant_validators(&past_blocks_with_metadata); + + // This will be a set of signature IDs of the signatures included in the block, but + // not found in metadata in storage. + let mut missing_sigs = HashSet::new(); + + for (block_with_metadata, single_block_rewarded_sigs) in blocks_and_signatures { + let era_id = block_with_metadata.block.era_id(); + let Some(all_validators) = validators.get(&era_id) else { + return fatal!(effect_builder, "couldn't get validators for {}", era_id) + .ignore(); + }; + let public_keys = single_block_rewarded_sigs + .clone() + .to_validator_set(all_validators.iter().cloned()); + let block_hash = *block_with_metadata.block.hash(); + missing_sigs.extend( + public_keys + .into_iter() + .filter(move |public_key| { + !block_with_metadata + .block_signatures + .has_finality_signature(public_key) }) - .collect(); - effects.extend(fetch_effects); - - let deploy_config = self.chainspec.deploy_config; - entry.insert(BlockValidationState { - appendable_block: AppendableBlock::new(deploy_config, block_timestamp), - missing_deploys, - responders: smallvec![responder], - sources: VecDeque::new(), /* This is empty b/c we create the first - * request using `sender`. */ - }); - } + .map(move |public_key| { + FinalitySignatureId::new(block_hash, era_id, public_key) + }), + ); } + + trace!( + ?missing_sigs, + "handle_got_past_blocks_with_metadata missing_sigs" + ); + + self.handle_new_request_with_signatures(effect_builder, request, missing_sigs) } - Event::DeployFound { - deploy_hash, - deploy_type, - } => { - // We successfully found a hash. Decrease the number of outstanding requests. - self.in_flight.dec(&deploy_hash); - - // If a deploy is received for a given block that makes that block invalid somehow, - // mark it for removal. - let mut invalid = Vec::new(); - - // Our first pass updates all validation states, crossing off the found deploy. - for (key, state) in self.validation_states.iter_mut() { - if state.missing_deploys.remove(&deploy_hash) { - if let Err(err) = state.appendable_block.add(deploy_hash, &*deploy_type) { - // Notify everyone still waiting on it that all is lost. - info!(block=?key, %deploy_hash, ?deploy_type, ?err, "block invalid"); - invalid.push(key.clone()); - } - } + Err(error) => { + if let InvalidProposalError::RewardSignaturesMissingCitedBlock { + cited_block_height, + } = *error + { + // We are missing some blocks necessary for unpacking signatures from storage - + // put the request on hold for now. + self.requests_on_hold + .entry(cited_block_height) + .or_default() + .push(request); + Effects::new() + } else { + // Rewarded signatures pre-validation failed + respond_invalid(error, Some(request.responder)) } + } + } + } - // Now we remove all states that have finished and notify the requestors. - self.validation_states.retain(|key, state| { - if invalid.contains(key) { - state.responders.drain(..).for_each(|responder| { - effects.extend(responder.respond((false, key.clone())).ignore()); - }); - return false; - } - if state.missing_deploys.is_empty() { - // This one is done and valid. - state.responders.drain(..).for_each(|responder| { - effects.extend(responder.respond((true, key.clone())).ignore()); - }); - return false; - } - true - }); + fn handle_block_stored( + &mut self, + effect_builder: EffectBuilder, + stored_block_height: u64, + ) -> Effects + where + REv: From + + From + + From> + + From> + + From + + Send, + { + let mut pending_requests = vec![]; + + while self + .requests_on_hold + .first_key_value() + .is_some_and(|(height, _)| *height <= stored_block_height) + { + // unwrap is safe - we'd break the loop if there were no elements + pending_requests.extend(self.requests_on_hold.pop_first().unwrap().1); + } + + pending_requests + .into_iter() + .flat_map(|request| self.handle_new_request(effect_builder, request)) + .collect() + } + + fn handle_new_request_with_signatures( + &mut self, + effect_builder: EffectBuilder, + BlockValidationRequest { + block, + sender, + responder, + .. + }: BlockValidationRequest, + missing_signatures: HashSet, + ) -> Effects + where + REv: From + + From> + + From> + + From + + Send, + { + if let Some(old_state) = self.validation_states.get_mut(&block) { + // if we got two requests for the same block in quick succession, it is possible that + // a state has been created and inserted for one of them while the other one was + // awaiting the past blocks from storage; in such a case just save the holder and + // responders, and return no effects, as all the fetching will have already been + // started + match old_state.add_responder(responder) { + AddResponderResult::Added => {} + AddResponderResult::ValidationCompleted { + responder, + response_to_send, + } => { + debug!( + ?response_to_send, + "proposed block validation already completed" + ); + return responder.respond(response_to_send).ignore(); + } } - Event::DeployMissing(deploy_hash) => { - info!(%deploy_hash, "request to download deploy timed out"); - // A deploy failed to fetch. If there is still hope (i.e. other outstanding - // requests), we just ignore this little accident. - if self.in_flight.dec(&deploy_hash) != 0 { - return Effects::new(); + old_state.add_holder(sender); + return Effects::new(); + } + + let (mut state, maybe_responder) = BlockValidationState::new( + &block, + missing_signatures, + sender, + responder, + self.current_gas_price, + self.chainspec.as_ref(), + ); + let effects = match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_transactions, + missing_signatures, + } => fetch_transactions_and_signatures( + effect_builder, + holder, + missing_transactions, + missing_signatures, + ), + MaybeStartFetching::ValidationSucceeded => { + debug!("no transactions - block validation complete"); + debug_assert!(maybe_responder.is_some()); + respond_valid(maybe_responder) + } + MaybeStartFetching::ValidationFailed => { + debug_assert!(maybe_responder.is_some()); + respond_invalid( + Box::new(InvalidProposalError::FailedFetcherValidation), + maybe_responder, + ) + } + MaybeStartFetching::Ongoing | MaybeStartFetching::Unable => { + // This `MaybeStartFetching` variant should never be returned here. + error!(%state, "invalid state while handling new block validation"); + debug_assert!(false, "invalid state {}", state); + respond_invalid( + Box::new(InvalidProposalError::UnexpectedFetchStatus), + state.take_responders(), + ) + } + }; + self.validation_states.insert(block, state); + self.purge_oldest_complete(); + effects + } + + fn purge_oldest_complete(&mut self) { + let mut completed_times: Vec<_> = self + .validation_states + .values() + .filter_map(BlockValidationState::block_timestamp_if_completed) + .collect(); + // Sort from newest (highest timestamp) to oldest. + completed_times.sort_unstable_by(|lhs, rhs| rhs.cmp(lhs)); + + // Normally we'll only need to remove a maximum of a single entry, but loop until we don't + // exceed the completed limit to cover any edge cases. + let max_completed_entries = self.config.max_completed_entries as usize; + while completed_times.len() > max_completed_entries { + self.validation_states.retain(|_block, state| { + if completed_times.len() <= max_completed_entries { + return true; + } + if state.block_timestamp_if_completed().as_ref() == completed_times.last() { + debug!( + %state, + num_completed_remaining = (completed_times.len() - 1), + "purging completed block validation state" + ); + let _ = completed_times.pop(); + return false; } + true + }); + } + } - // Flag indicating whether we've retried fetching the deploy. - let mut retried = false; + fn update_era_price(&mut self, current_price: u8) { + self.current_gas_price = current_price; + } - self.validation_states.retain(|key, state| { - if !state.missing_deploys.contains(&deploy_hash) { - return true + fn handle_transaction_fetched( + &mut self, + effect_builder: EffectBuilder, + transaction_hash: TransactionHash, + result: FetchResult, + ) -> Effects + where + REv: From + + From> + + From> + + Send, + { + match &result { + Ok(FetchedData::FromPeer { peer, .. }) => { + debug!(%transaction_hash, %peer, "fetched transaction from peer") + } + Ok(FetchedData::FromStorage { .. }) => { + debug!(%transaction_hash, "fetched transaction locally") + } + Err(error) => warn!(%transaction_hash, %error, "could not fetch transaction"), + } + match result { + Ok(FetchedData::FromStorage { item } | FetchedData::FromPeer { item, .. }) => { + let item_hash = item.hash(); + if item_hash != transaction_hash { + // Hard failure - change state to Invalid. + // this should not be reachable + let responders = self + .validation_states + .values_mut() + .flat_map(|state| state.try_mark_invalid(&transaction_hash)); + return respond_invalid( + Box::new(InvalidProposalError::FetchedIncorrectTransactionById { + expected_transaction_hash: transaction_hash, + actual_transaction_hash: item_hash, + }), + responders, + ); + } + let transaction_footprint = match TransactionFootprint::new(&self.chainspec, &item) + { + Ok(footprint) => footprint, + Err(invalid_transaction_error) => { + warn!( + %transaction_hash, ?invalid_transaction_error, + "could not convert transaction", + ); + // Hard failure - change state to Invalid. + let responders = self + .validation_states + .values_mut() + .flat_map(|state| state.try_mark_invalid(&transaction_hash)); + return respond_invalid(invalid_transaction_error.into(), responders); } - if retried { - // We don't want to retry downloading the same element more than once. - return true + }; + + let mut effects = Effects::new(); + for state in self.validation_states.values_mut() { + let responders = state + .try_add_transaction_footprint(&transaction_hash, &transaction_footprint); + if !responders.is_empty() { + let ret = match &state { + BlockValidationState::InProgress { .. } => { + // this seems to be unreachable as currently written + respond_invalid( + Box::new(InvalidProposalError::TransactionFetchingAborted), + responders, + ) + } + BlockValidationState::Invalid { error, .. } => { + respond_invalid(error.clone(), responders) + } + BlockValidationState::Valid(_) => respond_valid(responders), + }; + effects.extend(ret); } - match state.source() { - Some(peer) => { - info!(%deploy_hash, ?peer, "trying the next peer"); - // There's still hope to download the deploy. - effects.extend( - fetch_deploy(effect_builder, - deploy_hash, - peer, - )); - retried = true; - true - }, - None => { - // Notify everyone still waiting on it that all is lost. - info!(block=?key, %deploy_hash, "could not validate the deploy. block is invalid"); - // This validation state contains a failed deploy hash, it can never - // succeed. - state.responders.drain(..).for_each(|responder| { - effects.extend(responder.respond((false, key.clone())).ignore()); - }); - false - } + } + effects + } + Err(error) => { + match error { + fetcher::Error::Absent { peer, .. } + | fetcher::Error::Rejected { peer, .. } + | fetcher::Error::TimedOut { peer, .. } => { + // Soft failure - just mark the holder as failed and see if we can start + // fetching using a different holder. + let mut effects = Effects::new(); + self.validation_states.values_mut().for_each(|state| { + state.try_mark_holder_failed(&peer); + match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_transactions, + missing_signatures, + } => { + debug!( + %holder, + missing_transactions_len = missing_transactions.len(), + "fetching missing transactions from different peer" + ); + effects.extend(fetch_transactions_and_signatures( + effect_builder, + holder, + missing_transactions, + missing_signatures, + )); + } + MaybeStartFetching::Unable => { + debug!( + "exhausted peers while validating proposed block - \ + responding `false`" + ); + effects.extend(respond_invalid( + Box::new(InvalidProposalError::FetcherError(format!( + "{:?}", + error + ))), + state.take_responders(), + )); + } + MaybeStartFetching::Ongoing + | MaybeStartFetching::ValidationSucceeded + | MaybeStartFetching::ValidationFailed => {} + } + }); + effects + } + fetcher::Error::CouldNotConstructGetRequest { .. } + | fetcher::Error::ValidationMetadataMismatch { .. } => { + // Hard failure - change state to Invalid. + let responders = self + .validation_states + .values_mut() + .flat_map(|state| state.try_mark_invalid(&transaction_hash)); + respond_invalid( + Box::new(InvalidProposalError::FetcherError(format!("{:?}", error))), + responders, + ) } - }); + } + } + } + } - if retried { - // If we retried, we need to increase this counter. - self.in_flight.inc(&deploy_hash); + fn handle_finality_signature_fetched( + &mut self, + effect_builder: EffectBuilder, + finality_signature_id: FinalitySignatureId, + result: FetchResult, + ) -> Effects + where + REv: From + + From> + + From> + + Send, + { + match &result { + Ok(FetchedData::FromPeer { peer, .. }) => { + debug!(%finality_signature_id, %peer, "fetched finality signature from peer") + } + Ok(FetchedData::FromStorage { .. }) => { + debug!(%finality_signature_id, "fetched finality signature locally") + } + Err(error) => { + warn!(%finality_signature_id, %error, "could not fetch finality signature") + } + } + match result { + Ok(FetchedData::FromStorage { .. } | FetchedData::FromPeer { .. }) => { + let mut effects = Effects::new(); + for state in self.validation_states.values_mut() { + let responders = state.try_add_signature(&finality_signature_id); + if !responders.is_empty() { + let ret = match &state { + BlockValidationState::InProgress { .. } => { + // this seems to be unreachable as currently written + respond_invalid( + Box::new( + InvalidProposalError::FinalitySignatureFetchingAborted, + ), + responders, + ) + } + BlockValidationState::Invalid { error, .. } => { + respond_invalid(error.clone(), responders) + } + BlockValidationState::Valid(_) => respond_valid(responders), + }; + effects.extend(ret); + } } + effects } - Event::CannotConvertDeploy(deploy_hash) => { - info!(%deploy_hash, "cannot convert deploy to deploy type"); - // Deploy is invalid. There's no point waiting for other in-flight requests to - // finish. - self.in_flight.dec(&deploy_hash); - - self.validation_states.retain(|key, state| { - if state.missing_deploys.contains(&deploy_hash) { - // Notify everyone still waiting on it that all is lost. - info!(block=?key, %deploy_hash, "could not validate the deploy. block is invalid"); - // This validation state contains a failed deploy hash, it can never - // succeed. - state.responders.drain(..).for_each(|responder| { - effects.extend(responder.respond((false, key.clone())).ignore()); + Err(error) => { + match error { + fetcher::Error::Absent { peer, .. } + | fetcher::Error::Rejected { peer, .. } + | fetcher::Error::TimedOut { peer, .. } => { + // Soft failure - just mark the holder as failed and see if we can start + // fetching using a different holder. + let mut effects = Effects::new(); + self.validation_states.values_mut().for_each(|state| { + state.try_mark_holder_failed(&peer); + match state.start_fetching() { + MaybeStartFetching::Start { + holder, + missing_transactions, + missing_signatures, + } => { + debug!( + %holder, + missing_transactions_len = missing_transactions.len(), + "fetching missing transactions and signatures from different \ + peer" + ); + effects.extend(fetch_transactions_and_signatures( + effect_builder, + holder, + missing_transactions, + missing_signatures, + )); + } + MaybeStartFetching::Unable => { + debug!( + "exhausted peers while validating proposed block - \ + responding `false`" + ); + effects.extend(respond_invalid( + Box::new(InvalidProposalError::FetcherError(format!("{:?}", error))), + state.take_responders())); + } + MaybeStartFetching::Ongoing + | MaybeStartFetching::ValidationSucceeded + | MaybeStartFetching::ValidationFailed => {} + } }); - false - } else { - true + effects } - }); + fetcher::Error::CouldNotConstructGetRequest { .. } + | fetcher::Error::ValidationMetadataMismatch { .. } => { + // Hard failure - change state to Invalid. + let responders = self.validation_states.values_mut().flat_map(|state| { + state.try_mark_invalid_signature(&finality_signature_id) + }); + respond_invalid( + Box::new(InvalidProposalError::FetcherError(format!("{:?}", error))), + responders, + ) + } + } } } - effects } } -/// Returns effects that fetch the deploy and validate it. -fn fetch_deploy( +fn fetch_transactions_and_signatures( effect_builder: EffectBuilder, - deploy_hash: DeployHash, - sender: I, -) -> Effects> + holder: NodeId, + missing_transactions: HashMap, + missing_signatures: HashSet, +) -> Effects +where + REv: From + + From> + + From> + + Send, +{ + let mut effects: Effects = Effects::new(); + for (transaction_hash, approvals_hash) in missing_transactions { + let transaction_id = match transaction_hash { + TransactionHash::Deploy(deploy_hash) => { + TransactionId::new(deploy_hash.into(), approvals_hash) + } + TransactionHash::V1(v1_hash) => TransactionId::new(v1_hash.into(), approvals_hash), + }; + effects.extend( + effect_builder + .fetch::(transaction_id, holder, Box::new(EmptyValidationMetadata)) + .event(move |result| Event::TransactionFetched { + transaction_hash, + result, + }), + ); + } + + for missing_signature in missing_signatures { + effects.extend( + effect_builder + .fetch::( + Box::new(missing_signature.clone()), + holder, + Box::new(EmptyValidationMetadata), + ) + .event(move |result| Event::FinalitySignatureFetched { + finality_signature_id: Box::new(missing_signature), + result, + }), + ) + } + + effects +} + +fn respond_valid( + responders: impl IntoIterator>>>, +) -> Effects { + responders + .into_iter() + .flat_map(|responder| responder.respond(Ok(())).ignore()) + .collect() +} + +fn respond_invalid( + error: Box, + responders: impl IntoIterator>>>, +) -> Effects { + responders + .into_iter() + .flat_map(|responder| responder.respond(Err(error.clone())).ignore()) + .collect() +} + +impl Component for BlockValidator where - REv: From> - + From> + REv: From + + From + + From> + + From> + From - + From> + + From + Send, - T: BlockLike + Debug + Send + Clone + 'static, - I: Clone + Send + PartialEq + Eq + 'static, { - let validate_deploy = move |result: FetchResult| match result { - FetchResult::FromStorage(deploy) | FetchResult::FromPeer(deploy, _) => deploy - .deploy_type() - .map_or(Event::CannotConvertDeploy(deploy_hash), |deploy_type| { - Event::DeployFound { - deploy_hash, - deploy_type: Box::new(deploy_type), - } - }), - }; + type Event = Event; - effect_builder - .fetch_deploy(deploy_hash, sender) - .map_or_else(validate_deploy, move || Event::DeployMissing(deploy_hash)) + fn name(&self) -> &str { + COMPONENT_NAME + } + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::Request(request) => { + debug!(block = ?request.block, "validating proposed block"); + match self.try_handle_as_existing_request(effect_builder, request) { + MaybeHandled::Handled(effects) => effects, + MaybeHandled::NotHandled(request) => { + self.handle_new_request(effect_builder, request) + } + } + } + Event::GotPastBlocksWithMetadata { + past_blocks_with_metadata, + request, + } => self.handle_got_past_blocks_with_metadata( + effect_builder, + past_blocks_with_metadata, + request, + ), + Event::BlockStored(stored_block_height) => { + self.handle_block_stored(effect_builder, stored_block_height) + } + Event::TransactionFetched { + transaction_hash, + result, + } => self.handle_transaction_fetched(effect_builder, transaction_hash, result), + Event::FinalitySignatureFetched { + finality_signature_id, + result, + } => self.handle_finality_signature_fetched( + effect_builder, + *finality_signature_id, + result, + ), + Event::UpdateEraGasPrice(_, current_price) => { + self.update_era_price(current_price); + Effects::new() + } + } + } } diff --git a/node/src/components/block_validator/config.rs b/node/src/components/block_validator/config.rs new file mode 100644 index 0000000000..2263273632 --- /dev/null +++ b/node/src/components/block_validator/config.rs @@ -0,0 +1,16 @@ +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +/// Configuration options for block validation. +#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] +pub struct Config { + pub max_completed_entries: u32, +} + +impl Default for Config { + fn default() -> Self { + Config { + max_completed_entries: 3, + } + } +} diff --git a/node/src/components/block_validator/event.rs b/node/src/components/block_validator/event.rs new file mode 100644 index 0000000000..19365ac507 --- /dev/null +++ b/node/src/components/block_validator/event.rs @@ -0,0 +1,38 @@ +use derive_more::{Display, From}; + +use casper_types::{EraId, FinalitySignature, FinalitySignatureId, Transaction, TransactionHash}; + +use crate::{ + components::fetcher::FetchResult, effect::requests::BlockValidationRequest, + types::BlockWithMetadata, +}; + +#[derive(Debug, From, Display)] +pub(crate) enum Event { + #[from] + Request(BlockValidationRequest), + + #[display(fmt = "past blocks read from storage")] + GotPastBlocksWithMetadata { + past_blocks_with_metadata: Vec>, + request: BlockValidationRequest, + }, + + #[display(fmt = "block {} has been stored", _0)] + BlockStored(u64), + + #[display(fmt = "{} fetched", transaction_hash)] + TransactionFetched { + transaction_hash: TransactionHash, + result: FetchResult, + }, + + #[display(fmt = "{} fetched", finality_signature_id)] + FinalitySignatureFetched { + finality_signature_id: Box, + result: FetchResult, + }, + + #[display(fmt = "{} price for era {}", _1, _0)] + UpdateEraGasPrice(EraId, u8), +} diff --git a/node/src/components/block_validator/keyed_counter.rs b/node/src/components/block_validator/keyed_counter.rs deleted file mode 100644 index 9fb1279bd5..0000000000 --- a/node/src/components/block_validator/keyed_counter.rs +++ /dev/null @@ -1,115 +0,0 @@ -//! Tracks positive integers for keys. - -use std::{ - collections::HashMap, - hash::Hash, - ops::{AddAssign, SubAssign}, -}; - -use datasize::DataSize; - -/// A key-counter. -/// -/// Allows tracking a counter for any key `K`. -/// -/// Any counter that is set to `0` will not use any memory. -#[derive(DataSize, Debug)] -pub(super) struct KeyedCounter(HashMap); - -impl KeyedCounter { - /// Creates a new keyed counter. - fn new() -> Self { - KeyedCounter(Default::default()) - } -} - -impl Default for KeyedCounter { - fn default() -> Self { - Self::new() - } -} - -impl KeyedCounter -where - K: Clone + Eq + Hash, -{ - /// Increases count for a specific key. - /// - /// Returns the new value. - pub(super) fn inc(&mut self, key: &K) -> usize { - match self.0.get_mut(key) { - None => { - self.0.insert(key.clone(), 1); - 1 - } - Some(value) => { - value.add_assign(1); - *value - } - } - } - - /// Decreases count for a specific key. - /// - /// Returns the new value. - /// - /// # Panics - /// - /// Panics if `dec` would become negative. - pub(super) fn dec(&mut self, key: &K) -> usize { - match self.0.get_mut(key) { - Some(value) => { - assert_ne!(*value, 0, "counter should never be zero in tracker"); - - value.sub_assign(1); - - if *value != 0 { - return *value; - } - } - None => panic!("tried to decrease in-flight to negative value"), - }; - - assert_eq!(self.0.remove(key), Some(0)); - - 0 - } -} - -#[cfg(test)] -mod tests { - use super::KeyedCounter; - - #[test] - fn can_count_up() { - let mut kc = KeyedCounter::new(); - assert_eq!(kc.inc(&'a'), 1); - assert_eq!(kc.inc(&'b'), 1); - assert_eq!(kc.inc(&'a'), 2); - } - - #[test] - fn can_count_down() { - let mut kc = KeyedCounter::new(); - assert_eq!(kc.inc(&'a'), 1); - assert_eq!(kc.inc(&'b'), 1); - assert_eq!(kc.dec(&'a'), 0); - assert_eq!(kc.dec(&'b'), 0); - } - - #[test] - #[should_panic(expected = "tried to decrease in-flight to negative value")] - fn panics_on_underflow() { - let mut kc = KeyedCounter::new(); - assert_eq!(kc.inc(&'a'), 1); - assert_eq!(kc.dec(&'a'), 0); - kc.dec(&'a'); - } - - #[test] - #[should_panic(expected = "tried to decrease in-flight to negative value")] - fn panics_on_immediate_underflow() { - let mut kc = KeyedCounter::new(); - kc.dec(&'a'); - } -} diff --git a/node/src/components/block_validator/state.rs b/node/src/components/block_validator/state.rs new file mode 100644 index 0000000000..207c7538b9 --- /dev/null +++ b/node/src/components/block_validator/state.rs @@ -0,0 +1,1349 @@ +use std::{ + collections::{hash_map::Entry, BTreeSet, HashMap, HashSet}, + fmt::{self, Debug, Display, Formatter}, + iter, mem, +}; + +use datasize::DataSize; +use tracing::{debug, error, warn}; + +use casper_types::{ + Approval, ApprovalsHash, Chainspec, FinalitySignatureId, Timestamp, TransactionConfig, + TransactionHash, +}; + +use crate::{ + components::consensus::{ClContext, ProposedBlock}, + effect::Responder, + types::{ + appendable_block::AppendableBlock, InvalidProposalError, NodeId, TransactionFootprint, + }, +}; + +/// The state of a peer which claims to be a holder of the transactions. +#[derive(Clone, Copy, Eq, PartialEq, DataSize, Debug)] +pub(super) enum HolderState { + /// No fetch attempt has been made using this peer. + Unasked, + /// At least one fetch attempt has been made and no fetch attempts have failed when using this + /// peer. + Asked, + /// At least one fetch attempt has failed when using this peer. + Failed, +} + +/// The return type of `BlockValidationState::add_responder`. +pub(super) enum AddResponderResult { + /// The responder was added, meaning validation is still ongoing. + Added, + /// Validation is completed, so the responder should be called with the provided value. + ValidationCompleted { + responder: Responder>>, + response_to_send: Result<(), Box>, + }, +} + +/// The return type of `BlockValidationState::start_fetching`. +#[derive(Eq, PartialEq, Debug)] +pub(super) enum MaybeStartFetching { + /// Should start a new round of fetches. + Start { + holder: NodeId, + missing_transactions: HashMap, + missing_signatures: HashSet, + }, + /// No new round of fetches should be started as one is already in progress. + Ongoing, + /// We still have missing transactions, but all holders have failed. + Unable, + /// Validation has succeeded already. + ValidationSucceeded, + /// Validation has failed already. + ValidationFailed, +} + +#[derive(Clone, Eq, PartialEq, DataSize, Debug)] +pub(super) struct ApprovalInfo { + approvals: BTreeSet, + approvals_hash: ApprovalsHash, +} + +impl ApprovalInfo { + fn new(approvals: BTreeSet, approvals_hash: ApprovalsHash) -> Self { + ApprovalInfo { + approvals, + approvals_hash, + } + } +} + +/// State of the current process of block validation. +/// +/// Tracks whether there are transactions still missing and who is interested in the final +/// result. +#[derive(DataSize, Debug)] +pub(super) enum BlockValidationState { + /// The validity is not yet decided. + InProgress { + /// Appendable block ensuring that the transactions satisfy the validity conditions. + appendable_block: AppendableBlock, + /// The set of approvals contains approvals from transactions that would be finalized with + /// the block. + missing_transactions: HashMap, + /// The set of finality signatures for past blocks cited in this block. + missing_signatures: HashSet, + /// The set of peers which each claim to hold all the transactions. + holders: HashMap, + /// A list of responders that are awaiting an answer. + responders: Vec>>>, + }, + /// The proposed block with the given timestamp is valid. + Valid(Timestamp), + /// The proposed block with the given timestamp is invalid, and the validation error. + /// + /// Note that only hard failures in validation will result in this state. For soft failures, + /// like failing to fetch from a peer, the state will remain `Unknown`, even if there are no + /// more peers to ask, since more peers could be provided before this `BlockValidationState` is + /// purged. + Invalid { + timestamp: Timestamp, + error: Box, + }, +} + +pub(super) type MaybeBlockValidationStateResponder = + Option>>>; + +impl BlockValidationState { + /// Returns a new `BlockValidationState`. + /// + /// If the new state is `Valid` or `Invalid`, the provided responder is also returned so it can + /// be actioned. + pub(super) fn new( + proposed_block: &ProposedBlock, + missing_signatures: HashSet, + sender: NodeId, + responder: Responder>>, + current_gas_price: u8, + chainspec: &Chainspec, + ) -> (Self, MaybeBlockValidationStateResponder) { + let transaction_count = proposed_block.transaction_count(); + if transaction_count == 0 && missing_signatures.is_empty() { + let state = BlockValidationState::Valid(proposed_block.timestamp()); + return (state, Some(responder)); + } + + // this is an optimization, rejects proposal that exceeds lane limits OR + // proposes a transaction in an unsupported lane + if let Err(err) = + Self::validate_transaction_lane_counts(proposed_block, &chainspec.transaction_config) + { + let state = BlockValidationState::Invalid { + timestamp: proposed_block.timestamp(), + error: err, + }; + return (state, Some(responder)); + } + + let proposed_gas_price = proposed_block.value().current_gas_price(); + if current_gas_price != proposed_gas_price { + let state = BlockValidationState::Invalid { + timestamp: proposed_block.timestamp(), + error: Box::new(InvalidProposalError::InvalidGasPrice { + proposed_gas_price, + current_gas_price, + }), + }; + return (state, Some(responder)); + } + + let mut missing_transactions = HashMap::new(); + + for (transaction_hash, approvals) in proposed_block.all_transactions() { + let approval_info: ApprovalInfo = match ApprovalsHash::compute(approvals) { + Ok(approvals_hash) => ApprovalInfo::new(approvals.clone(), approvals_hash), + Err(error) => { + warn!(%transaction_hash, %error, "could not compute approvals hash"); + let state = BlockValidationState::Invalid { + timestamp: proposed_block.timestamp(), + error: Box::new(InvalidProposalError::InvalidApprovalsHash(format!( + "{}", + error + ))), + }; + return (state, Some(responder)); + } + }; + + // this checks to see if the same transaction has been included multiple + // times with different approvals, which is invalid + if missing_transactions + .insert(*transaction_hash, approval_info) + .is_some() + { + warn!(%transaction_hash, "duplicated transaction in proposed block"); + let state = BlockValidationState::Invalid { + timestamp: proposed_block.timestamp(), + error: Box::new(InvalidProposalError::CompetingApprovals { + transaction_hash: *transaction_hash, + }), + }; + return (state, Some(responder)); + } + } + + let state = BlockValidationState::InProgress { + appendable_block: AppendableBlock::new( + chainspec.transaction_config.clone(), + current_gas_price, + proposed_block.timestamp(), + ), + missing_transactions, + missing_signatures, + holders: iter::once((sender, HolderState::Unasked)).collect(), + responders: vec![responder], + }; + + (state, None) + } + + fn validate_transaction_lane_counts( + block: &ProposedBlock, + config: &TransactionConfig, + ) -> Result<(), Box> { + let lanes = config.transaction_v1_config.get_supported_lanes(); + if block.value().has_transaction_in_unsupported_lane(&lanes) { + return Err(Box::new(InvalidProposalError::UnsupportedLane)); + } + for supported_lane in lanes { + let transactions = block.value().count(Some(supported_lane)); + let lane_count_limit = config + .transaction_v1_config + .get_max_transaction_count(supported_lane); + if lane_count_limit < transactions as u64 { + warn!( + supported_lane, + lane_count_limit, transactions, "too many transactions in lane" + ); + return Err(Box::new(InvalidProposalError::ExceedsLaneLimit { + lane_id: supported_lane, + })); + } + } + + Ok(()) + } + + /// Adds the given responder to the collection if the current state is `InProgress` and returns + /// `Added`. + /// + /// If the state is not `InProgress`, `ValidationCompleted` is returned with the responder and + /// the value which should be provided to the responder. + pub(super) fn add_responder( + &mut self, + responder: Responder>>, + ) -> AddResponderResult { + match self { + BlockValidationState::InProgress { responders, .. } => { + responders.push(responder); + AddResponderResult::Added + } + BlockValidationState::Valid(_) => AddResponderResult::ValidationCompleted { + responder, + response_to_send: Ok(()), + }, + BlockValidationState::Invalid { error, .. } => { + AddResponderResult::ValidationCompleted { + responder, + response_to_send: Err(error.clone()), + } + } + } + } + + /// If the current state is `InProgress` and the peer isn't already known, adds the peer. + /// Otherwise, any existing entry is not updated and `false` is returned. + pub(super) fn add_holder(&mut self, holder: NodeId) { + match self { + BlockValidationState::InProgress { + appendable_block, + holders, + .. + } => match holders.entry(holder) { + Entry::Occupied(entry) => { + debug!( + block_timestamp = %appendable_block.timestamp(), + peer = %entry.key(), + "already registered peer as holder for block validation" + ); + } + Entry::Vacant(entry) => { + entry.insert(HolderState::Unasked); + } + }, + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => { + warn!(state = %self, "unexpected state when adding holder"); + } + } + } + + /// If the current state is `InProgress` and the holder is present, sets the holder's state to + /// `Failed`. + pub(super) fn try_mark_holder_failed(&mut self, holder: &NodeId) { + if let BlockValidationState::InProgress { holders, .. } = self { + if let Some(holder_state) = holders.get_mut(holder) { + debug_assert!(*holder_state != HolderState::Unasked); + *holder_state = HolderState::Failed; + } + } + } + + /// Returns fetch info based on the current state: + /// * if `InProgress` and there are no holders `Asked` (i.e. no ongoing fetches) and at least + /// one `Unasked` holder, returns `Start` + /// * if `InProgress` and any holder `Asked`, returns `Ongoing` + /// * if `InProgress` and all holders `Failed`, returns `Unable` + /// * if `Valid` or `Invalid`, returns `ValidationSucceeded` or `ValidationFailed` + /// respectively + pub(super) fn start_fetching(&mut self) -> MaybeStartFetching { + match self { + BlockValidationState::InProgress { + missing_transactions, + missing_signatures, + holders, + .. + } => { + if missing_transactions.is_empty() && missing_signatures.is_empty() { + error!( + "should always have missing transactions or signatures while in state \ + `InProgress`" + ); + debug_assert!(false, "invalid state"); + return MaybeStartFetching::ValidationFailed; + } + let mut unasked = None; + for (peer_id, holder_state) in holders.iter() { + match holder_state { + HolderState::Unasked => { + unasked = Some(*peer_id); + } + HolderState::Asked => return MaybeStartFetching::Ongoing, + HolderState::Failed => {} + } + } + + let holder = match unasked { + Some(peer) => peer, + None => return MaybeStartFetching::Unable, + }; + // Mark the holder as `Asked`. Safe to `expect` as we just found the entry above. + *holders.get_mut(&holder).expect("must be in set") = HolderState::Asked; + let missing_transactions = missing_transactions + .iter() + .map(|(dt_hash, infos)| (*dt_hash, infos.approvals_hash)) + .collect(); + let missing_signatures = missing_signatures.clone(); + MaybeStartFetching::Start { + holder, + missing_transactions, + missing_signatures, + } + } + BlockValidationState::Valid(_) => MaybeStartFetching::ValidationSucceeded, + BlockValidationState::Invalid { .. } => MaybeStartFetching::ValidationFailed, + } + } + + pub(super) fn take_responders( + &mut self, + ) -> Vec>>> { + match self { + BlockValidationState::InProgress { responders, .. } => mem::take(responders), + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => vec![], + } + } + + /// If the current state is `InProgress` and `dt_hash` is present, tries to add the footprint to + /// the appendable block to continue validation of the proposed block. + pub(super) fn try_add_transaction_footprint( + &mut self, + transaction_hash: &TransactionHash, + footprint: &TransactionFootprint, + ) -> Vec>>> { + let (new_state, responders) = match self { + BlockValidationState::InProgress { + appendable_block, + missing_transactions, + missing_signatures, + responders, + .. + } => { + let approvals_info = match missing_transactions.remove(transaction_hash) { + Some(info) => info, + None => { + // If this transaction is not present, just return. + return vec![]; + } + }; + // Try adding the footprint to the appendable block to see if the block remains + // valid. + let approvals = approvals_info.approvals; + let footprint = footprint.clone().with_approvals(approvals); + match appendable_block.add_transaction(&footprint) { + Ok(_) => { + if !missing_transactions.is_empty() || !missing_signatures.is_empty() { + // The appendable block is still valid, but we still have missing + // transactions - nothing further to do here. + debug!( + block_timestamp = %appendable_block.timestamp(), + missing_transactions_len = missing_transactions.len(), + "still missing transactions - block validation incomplete" + ); + return vec![]; + } + debug!( + block_timestamp = %appendable_block.timestamp(), + "no further missing transactions - block validation complete" + ); + let new_state = BlockValidationState::Valid(appendable_block.timestamp()); + (new_state, mem::take(responders)) + } + Err(error) => { + warn!(%transaction_hash, ?footprint, %error, "block invalid"); + let new_state = BlockValidationState::Invalid { + timestamp: appendable_block.timestamp(), + error: error.into(), + }; + (new_state, mem::take(responders)) + } + } + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![], + }; + *self = new_state; + responders + } + + /// If the current state is `InProgress` and `dt_hash` is present, tries to add the footprint to + /// the appendable block to continue validation of the proposed block. + pub(super) fn try_add_signature( + &mut self, + finality_signature_id: &FinalitySignatureId, + ) -> Vec>>> { + let (new_state, responders) = match self { + BlockValidationState::InProgress { + appendable_block, + missing_transactions, + missing_signatures, + responders, + .. + } => { + missing_signatures.remove(finality_signature_id); + if missing_signatures.is_empty() && missing_transactions.is_empty() { + debug!( + block_timestamp = %appendable_block.timestamp(), + "no further missing transactions or signatures - block validation complete" + ); + let new_state = BlockValidationState::Valid(appendable_block.timestamp()); + (new_state, mem::take(responders)) + } else { + debug!( + block_timestamp = %appendable_block.timestamp(), + missing_transactions_len = missing_transactions.len(), + missing_signatures_len = missing_signatures.len(), + "still missing transactions or signatures - block validation incomplete" + ); + return vec![]; + } + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![], + }; + *self = new_state; + responders + } + + /// If the current state is `InProgress` and `dt_hash` is present, sets the state to `Invalid` + /// and returns the responders. + pub(super) fn try_mark_invalid( + &mut self, + transaction_hash: &TransactionHash, + ) -> Vec>>> { + let (timestamp, responders) = match self { + BlockValidationState::InProgress { + appendable_block, + missing_transactions, + responders, + .. + } => { + if !missing_transactions.contains_key(transaction_hash) { + return vec![]; + } + (appendable_block.timestamp(), mem::take(responders)) + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![], + }; + *self = BlockValidationState::Invalid { + timestamp, + error: Box::new(InvalidProposalError::UnfetchedTransaction { + transaction_hash: *transaction_hash, + }), + }; + responders + } + + /// If the current state is `InProgress` and `finality_signature_id` is present, sets the state + /// to `Invalid` and returns the responders. + pub(super) fn try_mark_invalid_signature( + &mut self, + finality_signature_id: &FinalitySignatureId, + ) -> Vec>>> { + let (timestamp, responders) = match self { + BlockValidationState::InProgress { + appendable_block, + missing_signatures, + responders, + .. + } => { + if !missing_signatures.contains(finality_signature_id) { + return vec![]; + } + (appendable_block.timestamp(), mem::take(responders)) + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => return vec![], + }; + *self = BlockValidationState::Invalid { + timestamp, + error: Box::new(InvalidProposalError::InvalidFinalitySignature( + finality_signature_id.clone(), + )), + }; + responders + } + + pub(super) fn block_timestamp_if_completed(&self) -> Option { + match self { + BlockValidationState::InProgress { .. } => None, + BlockValidationState::Valid(timestamp) + | BlockValidationState::Invalid { timestamp, .. } => Some(*timestamp), + } + } + + #[cfg(test)] + pub(super) fn missing_hashes(&self) -> Vec { + match self { + BlockValidationState::InProgress { + missing_transactions, + .. + } => missing_transactions.keys().copied().collect(), + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => vec![], + } + } + + #[cfg(test)] + pub(super) fn holders_mut(&mut self) -> Option<&mut HashMap> { + match self { + BlockValidationState::InProgress { holders, .. } => Some(holders), + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => None, + } + } + + #[cfg(test)] + pub(super) fn responder_count(&self) -> usize { + match self { + BlockValidationState::InProgress { responders, .. } => responders.len(), + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => 0, + } + } + + #[cfg(test)] + pub(super) fn completed(&self) -> bool { + !matches!(self, BlockValidationState::InProgress { .. }) + } +} + +impl Display for BlockValidationState { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + BlockValidationState::InProgress { + appendable_block, + missing_transactions, + missing_signatures, + holders, + responders, + } => { + write!( + formatter, + "BlockValidationState::InProgress({}, {} missing transactions, \ + {} missing signatures, {} holders, {} responders)", + appendable_block, + missing_transactions.len(), + missing_signatures.len(), + holders.len(), + responders.len() + ) + } + BlockValidationState::Valid(timestamp) => { + write!(formatter, "BlockValidationState::Valid({timestamp})") + } + BlockValidationState::Invalid { timestamp, error } => { + write!( + formatter, + "BlockValidationState::Invalid({timestamp} {:?})", + error + ) + } + } + } +} + +#[cfg(test)] +mod tests { + use futures::channel::oneshot; + use rand::Rng; + + use casper_types::{ + testing::TestRng, ChainspecRawBytes, TimeDiff, Transaction, TransactionHash, TransactionV1, + }; + + use super::{super::tests::*, *}; + use crate::utils::Loadable; + + struct Fixture<'a> { + rng: &'a mut TestRng, + transactions: Vec, + chainspec: Chainspec, + } + + impl<'a> Fixture<'a> { + fn new(rng: &'a mut TestRng) -> Self { + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + Fixture { + rng, + transactions: vec![], + chainspec, + } + } + + fn new_with_block_gas_limit(rng: &'a mut TestRng, block_limit: u64) -> Self { + let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + chainspec.transaction_config.block_gas_limit = block_limit; + Fixture { + rng, + transactions: vec![], + chainspec, + } + } + + fn footprints(&self) -> Vec<(TransactionHash, TransactionFootprint)> { + self.transactions + .iter() + .map(|transaction| { + ( + transaction.hash(), + TransactionFootprint::new(&self.chainspec, transaction) + .expect("must create footprint"), + ) + }) + .collect() + } + + fn new_state( + &mut self, + mint_count: u64, + auction_count: u64, + install_upgrade_count: u64, + standard_count: u64, + ) -> (BlockValidationState, MaybeBlockValidationStateResponder) { + let total_non_transfer_count = standard_count + auction_count + install_upgrade_count; + let ttl = TimeDiff::from_seconds(10); + let timestamp = Timestamp::from(1000 + total_non_transfer_count + mint_count); + + let mint_for_block = { + let mut ret = vec![]; + for _ in 0..mint_count { + let txn = new_mint(self.rng, timestamp, ttl); + ret.push((txn.hash(), txn.approvals().clone())); + self.transactions.push(txn); + } + + ret + }; + + let auction_for_block = { + let mut ret = vec![]; + for _ in 0..auction_count { + let txn = new_auction(self.rng, timestamp, ttl); + ret.push((txn.hash(), txn.approvals().clone())); + self.transactions.push(txn); + } + ret + }; + + let install_upgrade_for_block = { + let mut ret = vec![]; + for _ in 0..install_upgrade_count { + let txn: Transaction = + TransactionV1::random_install_upgrade(self.rng, Some(timestamp), Some(ttl)) + .into(); + ret.push((txn.hash(), txn.approvals().clone())); + self.transactions.push(txn); + } + ret + }; + + let standard_for_block = { + let mut ret = vec![]; + for _ in 0..standard_count { + let txn = new_standard(self.rng, timestamp, ttl); + ret.push((txn.hash(), txn.approvals().clone())); + self.transactions.push(txn); + } + ret + }; + + let proposed_block = new_proposed_block( + timestamp, + mint_for_block, + auction_for_block, + install_upgrade_for_block, + standard_for_block, + ); + + BlockValidationState::new( + &proposed_block, + HashSet::new(), + NodeId::random(self.rng), + new_responder(), + 1u8, + &self.chainspec, + ) + } + } + + fn new_responder() -> Responder>> { + let (sender, _receiver) = oneshot::channel(); + Responder::without_shutdown(sender) + } + + // Please note: values in the following test cases must match the production chainspec. + const MAX_LARGE_COUNT: u64 = 1; + const MAX_AUCTION_COUNT: u64 = 650; + const MAX_INSTALL_UPGRADE_COUNT: u64 = 1; + const MAX_MINT_COUNT: u64 = 650; + + #[derive(Debug)] + struct TestCase { + mint_count: u64, + auction_count: u64, + install_upgrade_count: u64, + standard_count: u64, + state_validator: fn((BlockValidationState, MaybeBlockValidationStateResponder)) -> bool, + } + + const NO_TRANSACTIONS: TestCase = TestCase { + mint_count: 0, + auction_count: 0, + install_upgrade_count: 0, + standard_count: 0, + state_validator: |(state, responder)| { + responder.is_some() && matches!(state, BlockValidationState::Valid(_)) + }, + }; + + const FULL_AUCTION: TestCase = TestCase { + mint_count: 0, + auction_count: MAX_AUCTION_COUNT, + install_upgrade_count: 0, + standard_count: 0, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + }; + + const LESS_THAN_MAX_AUCTION: TestCase = TestCase { + auction_count: FULL_AUCTION.auction_count - 1, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + ..FULL_AUCTION + }; + + const TOO_MANY_AUCTION: TestCase = TestCase { + auction_count: FULL_AUCTION.auction_count + 1, + state_validator: |(state, responder)| { + responder.is_some() && matches!(state, BlockValidationState::Invalid { .. }) + }, + ..FULL_AUCTION + }; + + const FULL_INSTALL_UPGRADE: TestCase = TestCase { + mint_count: 0, + auction_count: 0, + install_upgrade_count: MAX_INSTALL_UPGRADE_COUNT, + standard_count: 0, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + }; + + #[allow(dead_code)] + const LESS_THAN_MAX_INSTALL_UPGRADE: TestCase = TestCase { + install_upgrade_count: FULL_INSTALL_UPGRADE.install_upgrade_count - 1, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + ..FULL_INSTALL_UPGRADE + }; + + const TOO_MANY_INSTALL_UPGRADE: TestCase = TestCase { + install_upgrade_count: FULL_INSTALL_UPGRADE.install_upgrade_count + 1, + state_validator: |(state, responder)| { + responder.is_some() && matches!(state, BlockValidationState::Invalid { .. }) + }, + ..FULL_INSTALL_UPGRADE + }; + + const FULL_STANDARD: TestCase = TestCase { + mint_count: 0, + auction_count: 0, + install_upgrade_count: 0, + standard_count: MAX_LARGE_COUNT, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + }; + + // const LESS_THAN_MAX_STANDARD: TestCase = TestCase { + // standard_count: FULL_STANDARD.standard_count - 1, + // state_validator: |(state, responder)| { + // responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + // }, + // ..FULL_STANDARD + // }; + + const TOO_MANY_STANDARD: TestCase = TestCase { + standard_count: FULL_STANDARD.standard_count + 1, + state_validator: |(state, responder)| { + responder.is_some() && matches!(state, BlockValidationState::Invalid { .. }) + }, + ..FULL_STANDARD + }; + + const FULL_MINT: TestCase = TestCase { + mint_count: MAX_MINT_COUNT, + auction_count: 0, + install_upgrade_count: 0, + standard_count: 0, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + }; + + const LESS_THAN_MAX_MINT: TestCase = TestCase { + mint_count: FULL_MINT.mint_count - 1, + state_validator: |(state, responder)| { + responder.is_none() && matches!(state, BlockValidationState::InProgress { .. }) + }, + ..FULL_MINT + }; + + const TOO_MANY_MINT: TestCase = TestCase { + mint_count: FULL_MINT.mint_count + 1, + state_validator: |(state, responder)| { + responder.is_some() && matches!(state, BlockValidationState::Invalid { .. }) + }, + ..FULL_MINT + }; + + fn run_test_case( + TestCase { + mint_count, + auction_count, + install_upgrade_count, + standard_count, + state_validator, + }: TestCase, + rng: &mut TestRng, + ) { + let mut fixture = Fixture::new(rng); + let state = fixture.new_state( + mint_count, + auction_count, + install_upgrade_count, + standard_count, + ); + assert!(state_validator(state)); + } + + #[test] + fn new_state_should_be_valid_with_no_transactions() { + let mut rng = TestRng::new(); + run_test_case(NO_TRANSACTIONS, &mut rng); + } + + #[test] + fn new_state_should_respect_auction_limits() { + let mut rng = TestRng::new(); + run_test_case(TOO_MANY_AUCTION, &mut rng); + run_test_case(FULL_AUCTION, &mut rng); + run_test_case(LESS_THAN_MAX_AUCTION, &mut rng); + } + + #[test] + fn new_state_should_respect_install_upgrade_limits() { + let mut rng = TestRng::new(); + run_test_case(TOO_MANY_INSTALL_UPGRADE, &mut rng); + run_test_case(FULL_INSTALL_UPGRADE, &mut rng); + //TODO: Fix test setup so this isn't identical to the no transactions case + //run_test_case(LESS_THAN_MAX_INSTALL_UPGRADE, &mut rng); + } + + #[test] + fn new_state_should_respect_standard_limits() { + let mut rng = TestRng::new(); + run_test_case(TOO_MANY_STANDARD, &mut rng); + run_test_case(FULL_STANDARD, &mut rng); + // NOTE: current prod chainspec has a limit of 1 large transaction, so one less is 0 which + // makes the test invalid run_test_case(LESS_THAN_MAX_STANDARD, &mut rng); + } + + #[test] + fn new_state_should_respect_mint_limits() { + let mut rng = TestRng::new(); + run_test_case(TOO_MANY_MINT, &mut rng); + run_test_case(FULL_MINT, &mut rng); + run_test_case(LESS_THAN_MAX_MINT, &mut rng); + } + + #[test] + fn new_state_should_be_invalid_with_duplicated_transaction() { + let mut rng = TestRng::new(); + let fixture = Fixture::new(&mut rng); + + let timestamp = Timestamp::from(1000); + let mint = vec![new_mint(fixture.rng, timestamp, TimeDiff::from_millis(200)); 2]; + + let mint_for_block: Vec<(TransactionHash, BTreeSet)> = mint + .iter() + .map(|transaction| (transaction.hash(), transaction.approvals())) + .collect(); + + let proposed_block = new_proposed_block(timestamp, mint_for_block, vec![], vec![], vec![]); + + let (state, maybe_responder) = BlockValidationState::new( + &proposed_block, + HashSet::new(), + NodeId::random(fixture.rng), + new_responder(), + 1u8, + &fixture.chainspec, + ); + + assert!(matches!(state, BlockValidationState::Invalid { .. })); + assert!(maybe_responder.is_some()); + } + + #[test] + fn new_state_should_be_in_progress_with_some_transactions() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + + // This test must generate number of transactions within the limits as per the chainspec. + let (transfer_count, auction_count, install_upgrade_count, standard_count) = loop { + let transfer_count = fixture.rng.gen_range(0..10); + let auction_count = fixture.rng.gen_range(0..20); + let install_upgrade_count = fixture.rng.gen_range(0..2); + let standard_count = fixture.rng.gen_range(0..2); + // Ensure at least one transaction is generated. Otherwise, the state will be Valid. + if transfer_count + auction_count + install_upgrade_count + standard_count > 0 { + break ( + transfer_count, + auction_count, + install_upgrade_count, + standard_count, + ); + } + }; + let (state, maybe_responder) = fixture.new_state( + transfer_count, + auction_count, + install_upgrade_count, + standard_count, + ); + + match state { + BlockValidationState::InProgress { + missing_transactions, + holders, + responders, + .. + } => { + assert_eq!( + missing_transactions.len() as u64, + standard_count + transfer_count + install_upgrade_count + auction_count + ); + assert_eq!(holders.len(), 1); + assert_eq!(holders.values().next().unwrap(), &HolderState::Unasked); + assert_eq!(responders.len(), 1); + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => { + panic!("unexpected state") + } + } + assert!(maybe_responder.is_none()); + } + + #[test] + fn should_add_responder_if_in_progress() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + assert_eq!(state.responder_count(), 1); + + let add_responder_result = state.add_responder(new_responder()); + assert!(matches!(add_responder_result, AddResponderResult::Added)); + assert_eq!(state.responder_count(), 2); + } + + #[test] + fn should_not_add_responder_if_valid() { + let mut state = BlockValidationState::Valid(Timestamp::from(1000)); + let add_responder_result = state.add_responder(new_responder()); + assert!(matches!( + add_responder_result, + AddResponderResult::ValidationCompleted { + response_to_send: Ok(()), + .. + } + )); + assert_eq!(state.responder_count(), 0); + } + + #[test] + fn should_not_add_responder_if_invalid() { + let err = InvalidProposalError::InvalidTransaction( + "should_not_add_responder_if_invalid".to_string(), + ); + let mut state = BlockValidationState::Invalid { + timestamp: Timestamp::from(1000), + error: Box::new(err), + }; + let add_responder_result = state.add_responder(new_responder()); + assert!(matches!( + add_responder_result, + AddResponderResult::ValidationCompleted { + response_to_send: Err(_err), + .. + } + )); + assert_eq!(state.responder_count(), 0); + } + + #[test] + fn should_add_new_holder_if_in_progress() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + assert_eq!(state.holders_mut().unwrap().len(), 1); + + let new_holder = NodeId::random(fixture.rng); + state.add_holder(new_holder); + assert_eq!(state.holders_mut().unwrap().len(), 2); + assert_eq!( + state.holders_mut().unwrap().get(&new_holder), + Some(&HolderState::Unasked) + ); + } + + #[test] + fn should_not_change_holder_state() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + let (holder, holder_state) = state + .holders_mut() + .expect("should have holders") + .iter_mut() + .next() + .expect("should have one entry"); + *holder_state = HolderState::Asked; + let holder = *holder; + + state.add_holder(holder); + assert_eq!(state.holders_mut().unwrap().len(), 1); + assert_eq!( + state.holders_mut().unwrap().get(&holder), + Some(&HolderState::Asked) + ); + } + + #[test] + fn should_start_fetching() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + let (holder, holder_state) = state + .holders_mut() + .expect("should have holders") + .iter_mut() + .next() + .expect("should have one entry"); + assert_eq!(*holder_state, HolderState::Unasked); + let original_holder = *holder; + + // We currently have one unasked holder. Add some failed holders - should still return + // `MaybeStartFetching::Start` containing the original holder. + for _ in 0..3 { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(fixture.rng), HolderState::Failed); + } + + let maybe_start_fetching = state.start_fetching(); + match maybe_start_fetching { + MaybeStartFetching::Start { + holder, + missing_transactions, + .. + } => { + assert_eq!(holder, original_holder); + assert_eq!(missing_transactions.len(), 6); + } + _ => panic!("unexpected return value"), + } + + // The original holder should now be marked as `Asked`. + let holder_state = state.holders_mut().unwrap().get(&original_holder); + assert_eq!(holder_state, Some(&HolderState::Asked)); + } + + #[test] + fn start_fetching_should_return_ongoing_if_any_holder_in_asked_state() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // Change the current (only) holder's state to `Asked`. + let maybe_start_fetching = state.start_fetching(); + assert!(matches!( + maybe_start_fetching, + MaybeStartFetching::Start { .. } + )); + let holder_state = state.holders_mut().unwrap().values().next(); + assert_eq!(holder_state, Some(&HolderState::Asked)); + + // Add some unasked holders and some failed - should still return + // `MaybeStartFetching::Ongoing`. + let unasked_count = fixture.rng.gen_range(0..3); + for _ in 0..unasked_count { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(fixture.rng), HolderState::Unasked); + } + let failed_count = fixture.rng.gen_range(0..3); + for _ in 0..failed_count { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(fixture.rng), HolderState::Failed); + } + + // Clone the holders collection before calling `start_fetching` as it should be unmodified + // by the call. + let holders_before = state.holders_mut().unwrap().clone(); + + // `start_fetching` should return `Ongoing` due to the single `Asked` holder. + let maybe_start_fetching = state.start_fetching(); + assert_eq!(maybe_start_fetching, MaybeStartFetching::Ongoing); + + // The holders should be unchanged. + assert_eq!(state.holders_mut().unwrap(), &holders_before); + } + + #[test] + fn start_fetching_should_return_unable_if_all_holders_in_failed_state() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // Set the original holder's state to `Failed` and add some more failed. + *state + .holders_mut() + .expect("should have holders") + .values_mut() + .next() + .expect("should have one entry") = HolderState::Failed; + + let failed_count = fixture.rng.gen_range(0..3); + for _ in 0..failed_count { + state + .holders_mut() + .unwrap() + .insert(NodeId::random(fixture.rng), HolderState::Failed); + } + + // Clone the holders collection before calling `start_fetching` as it should be unmodified + // by the call. + let holders_before = state.holders_mut().unwrap().clone(); + + // `start_fetching` should return `Unable` due to no un-failed holders. + let maybe_start_fetching = state.start_fetching(); + assert_eq!(maybe_start_fetching, MaybeStartFetching::Unable); + + // The holders should be unchanged. + assert_eq!(state.holders_mut().unwrap(), &holders_before); + } + + #[test] + fn start_fetching_should_return_validation_succeeded_if_valid() { + let mut state = BlockValidationState::Valid(Timestamp::from(1000)); + let maybe_start_fetching = state.start_fetching(); + assert_eq!( + maybe_start_fetching, + MaybeStartFetching::ValidationSucceeded + ); + } + + #[test] + fn start_fetching_should_return_validation_failed_if_invalid() { + let mut state = BlockValidationState::Invalid { + timestamp: Timestamp::from(1000), + error: Box::new(InvalidProposalError::InvalidTransaction( + "start_fetching_should_return_validation_failed_if_invalid".to_string(), + )), + }; + let maybe_start_fetching = state.start_fetching(); + assert_eq!(maybe_start_fetching, MaybeStartFetching::ValidationFailed); + } + + #[test] + fn state_should_change_to_validation_succeeded() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new_with_block_gas_limit(&mut rng, 50_000_000_000_000); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + + // While there is still at least one missing transaction, `try_add_transaction_footprint` + // should keep the state `InProgress` and never return responders. + let mut footprints = fixture.footprints(); + while footprints.len() > 1 { + let (transaction_hash, footprint) = footprints.pop().unwrap(); + let responders = state.try_add_transaction_footprint(&transaction_hash, &footprint); + assert!(responders.is_empty()); + assert!(matches!( + state, + BlockValidationState::InProgress { ref responders, .. } + if !responders.is_empty() + )); + } + + // The final transaction should cause the state to go to `Valid` and the responders to be + // returned. + let (dt_hash, footprint) = footprints.pop().unwrap(); + let responders = state.try_add_transaction_footprint(&dt_hash, &footprint); + assert_eq!(responders.len(), 1); + assert!(matches!(state, BlockValidationState::Valid(_))); + } + + #[test] + fn unrelated_transaction_added_should_not_change_state() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + let (mut state, _maybe_responder) = fixture.new_state(2, 2, 1, 1); + let (appendable_block_before, missing_transactions_before, holders_before) = match &state { + BlockValidationState::InProgress { + appendable_block, + missing_transactions, + holders, + .. + } => ( + appendable_block.clone(), + missing_transactions.clone(), + holders.clone(), + ), + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => { + panic!("unexpected state") + } + }; + + // Create a new, random transaction. + let transaction = new_standard(fixture.rng, 1500.into(), TimeDiff::from_seconds(1)); + let transaction_hash = match &transaction { + Transaction::Deploy(deploy) => TransactionHash::Deploy(*deploy.hash()), + Transaction::V1(v1) => TransactionHash::V1(*v1.hash()), + }; + let chainspec = Chainspec::default(); + let footprint = TransactionFootprint::new(&chainspec, &transaction).unwrap(); + + // Ensure trying to add it doesn't change the state. + let responders = state.try_add_transaction_footprint(&transaction_hash, &footprint); + assert!(responders.is_empty()); + match &state { + BlockValidationState::InProgress { + appendable_block, + missing_transactions: missing_deploys, + holders, + .. + } => { + assert_eq!(&appendable_block_before, appendable_block); + assert_eq!(&missing_transactions_before, missing_deploys); + assert_eq!(&holders_before, holders); + } + BlockValidationState::Valid(_) | BlockValidationState::Invalid { .. } => { + panic!("unexpected state") + } + }; + } + + #[test] + fn state_should_change_to_validation_failed() { + let mut rng = TestRng::new(); + let mut fixture = Fixture::new(&mut rng); + // Add an invalid (future-dated) transaction to the fixture. + let invalid_transaction = + new_standard(fixture.rng, Timestamp::MAX, TimeDiff::from_seconds(1)); + let invalid_transaction_hash = invalid_transaction.hash(); + fixture.transactions.push(invalid_transaction.clone()); + let (mut state, _maybe_responder) = fixture.new_state(1, 1, 1, 1); + assert!(matches!(state, BlockValidationState::InProgress { .. })); + if let BlockValidationState::InProgress { + ref mut missing_transactions, + .. + } = state + { + let approvals = invalid_transaction.approvals(); + let approvals_hash = + ApprovalsHash::compute(&approvals).expect("must get approvals hash"); + let info = ApprovalInfo::new(approvals, approvals_hash); + missing_transactions.insert(invalid_transaction_hash, info); + }; + + // Add some valid deploys, should keep the state `InProgress` and never return responders. + let mut footprints = fixture.footprints(); + while footprints.len() > 3 { + let (dt_hash, footprint) = footprints.pop().unwrap(); + if dt_hash == invalid_transaction_hash { + continue; + } + let responders = state.try_add_transaction_footprint(&dt_hash, &footprint); + assert!(responders.is_empty()); + } + + let transaction_hash = invalid_transaction.hash(); + // The invalid transaction should cause the state to go to `Invalid` and the responders to + // be returned. + let chainspec = Chainspec::default(); + let footprint = TransactionFootprint::new(&chainspec, &invalid_transaction).unwrap(); + let responders = state.try_add_transaction_footprint(&transaction_hash, &footprint); + assert_eq!(responders.len(), 1); + assert!(matches!(state, BlockValidationState::Invalid { .. })); + } +} diff --git a/node/src/components/block_validator/tests.rs b/node/src/components/block_validator/tests.rs new file mode 100644 index 0000000000..d97da1f41a --- /dev/null +++ b/node/src/components/block_validator/tests.rs @@ -0,0 +1,1287 @@ +use std::{collections::VecDeque, sync::Arc, time::Duration}; + +use derive_more::From; +use itertools::Itertools; +use rand::Rng; + +use casper_types::{ + bytesrepr::Bytes, runtime_args, system::standard_payment::ARG_AMOUNT, testing::TestRng, Block, + BlockSignatures, BlockSignaturesV2, Chainspec, ChainspecRawBytes, Deploy, ExecutableDeployItem, + FinalitySignatureV2, RuntimeArgs, SecretKey, TestBlockBuilder, TimeDiff, Transaction, + TransactionHash, TransactionId, TransactionV1, TransactionV1Config, AUCTION_LANE_ID, + INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512, +}; + +use crate::{ + components::{ + consensus::BlockContext, + fetcher::{self, FetchItem}, + }, + effect::requests::StorageRequest, + reactor::{EventQueueHandle, QueueKind, Scheduler}, + testing::LARGE_WASM_LANE_ID, + types::{BlockPayload, ValidatorMatrix}, + utils::{self, Loadable}, +}; + +use super::*; + +#[derive(Debug, From)] +enum ReactorEvent { + #[from] + BlockValidator(Event), + #[from] + TransactionFetcher(FetcherRequest), + #[from] + FinalitySigFetcher(FetcherRequest), + #[from] + Storage(StorageRequest), + #[from] + FatalAnnouncement(#[allow(dead_code)] FatalAnnouncement), +} + +impl From for ReactorEvent { + fn from(req: BlockValidationRequest) -> ReactorEvent { + ReactorEvent::BlockValidator(req.into()) + } +} + +struct MockReactor { + scheduler: &'static Scheduler, + validator_matrix: ValidatorMatrix, +} + +impl MockReactor { + fn new>( + our_secret_key: Arc, + public_keys: I, + ) -> Self { + MockReactor { + scheduler: utils::leak(Scheduler::new(QueueKind::weights(), None)), + validator_matrix: ValidatorMatrix::new_with_validators(our_secret_key, public_keys), + } + } + + async fn expect_block_validator_event(&self) -> Event { + let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; + if let ReactorEvent::BlockValidator(event) = reactor_event { + event + } else { + panic!("unexpected event: {:?}", reactor_event); + } + } + + async fn handle_requests(&self, context: &ValidationContext) { + while let Ok(((_ancestor, event), _)) = + tokio::time::timeout(Duration::from_millis(100), self.scheduler.pop()).await + { + match event { + ReactorEvent::TransactionFetcher(FetcherRequest { + id, + peer, + validation_metadata: _, + responder, + }) => { + if let Some(transaction) = context.get_transaction(id) { + let response = FetchedData::FromPeer { + item: Box::new(transaction), + peer, + }; + responder.respond(Ok(response)).await; + } else { + responder + .respond(Err(fetcher::Error::Absent { + id: Box::new(id), + peer, + })) + .await; + } + } + ReactorEvent::Storage(StorageRequest::GetBlockAndMetadataByHeight { + block_height, + only_from_available_block_range: _, + responder, + }) => { + let maybe_block = context.get_block_with_metadata(block_height); + responder.respond(maybe_block).await; + } + ReactorEvent::FinalitySigFetcher(FetcherRequest { + id, + peer, + validation_metadata: _, + responder, + }) => { + if let Some(signature) = context.get_signature(&id) { + let response = FetchedData::FromPeer { + item: Box::new(signature), + peer, + }; + responder.respond(Ok(response)).await; + } else { + responder + .respond(Err(fetcher::Error::Absent { + id: Box::new(id), + peer, + })) + .await; + } + } + reactor_event => { + panic!("unexpected event: {:?}", reactor_event); + } + }; + } + } +} + +pub(super) fn new_proposed_block_with_cited_signatures( + timestamp: Timestamp, + transfer: Vec<(TransactionHash, BTreeSet)>, + staking: Vec<(TransactionHash, BTreeSet)>, + install_upgrade: Vec<(TransactionHash, BTreeSet)>, + standard: Vec<(TransactionHash, BTreeSet)>, + cited_signatures: RewardedSignatures, +) -> ProposedBlock { + // Accusations and ancestors are empty, and the random bit is always true: + // These values are not checked by the block validator. + let block_context = BlockContext::new(timestamp, vec![]); + let transactions = { + let mut ret = BTreeMap::new(); + ret.insert(MINT_LANE_ID, transfer.into_iter().collect()); + ret.insert(AUCTION_LANE_ID, staking.into_iter().collect()); + ret.insert( + INSTALL_UPGRADE_LANE_ID, + install_upgrade.into_iter().collect(), + ); + ret.insert(LARGE_WASM_LANE_ID, standard.into_iter().collect()); + ret + }; + let block_payload = BlockPayload::new(transactions, vec![], cited_signatures, true, 1u8); + ProposedBlock::new(Arc::new(block_payload), block_context) +} + +pub(super) fn new_proposed_block( + timestamp: Timestamp, + transfer: Vec<(TransactionHash, BTreeSet)>, + staking: Vec<(TransactionHash, BTreeSet)>, + install_upgrade: Vec<(TransactionHash, BTreeSet)>, + standard: Vec<(TransactionHash, BTreeSet)>, +) -> ProposedBlock { + new_proposed_block_with_cited_signatures( + timestamp, + transfer, + staking, + install_upgrade, + standard, + Default::default(), + ) +} + +pub(super) fn new_v1_standard( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, +) -> Transaction { + let transaction_v1 = TransactionV1::random_wasm(rng, Some(timestamp), Some(ttl)); + Transaction::V1(transaction_v1) +} + +pub(super) fn new_auction(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction { + let transaction_v1 = TransactionV1::random_auction(rng, Some(timestamp), Some(ttl)); + Transaction::V1(transaction_v1) +} + +pub(super) fn new_install_upgrade( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, +) -> Transaction { + TransactionV1::random_install_upgrade(rng, Some(timestamp), Some(ttl)).into() +} + +pub(super) fn new_deploy(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction { + let secret_key = SecretKey::random(rng); + let chain_name = "chain".to_string(); + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(1) }, + }; + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::new(), + }; + let dependencies = vec![]; + let gas_price = 1; + + Deploy::new_signed( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + .into() +} + +pub(super) fn new_v1_transfer( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, +) -> Transaction { + TransactionV1::random_transfer(rng, Some(timestamp), Some(ttl)).into() +} + +pub(super) fn new_transfer(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction { + let secret_key = SecretKey::random(rng); + let chain_name = "chain".to_string(); + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(1) }, + }; + let session = ExecutableDeployItem::Transfer { + args: RuntimeArgs::new(), + }; + let dependencies = vec![]; + let gas_price = 1; + + Deploy::new_signed( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + .into() +} + +pub(super) fn new_mint(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction { + if rng.gen() { + new_v1_transfer(rng, timestamp, ttl) + } else { + new_transfer(rng, timestamp, ttl) + } +} + +pub(super) fn new_standard(rng: &mut TestRng, timestamp: Timestamp, ttl: TimeDiff) -> Transaction { + if rng.gen() { + new_v1_standard(rng, timestamp, ttl) + } else { + new_deploy(rng, timestamp, ttl) + } +} + +pub(super) fn new_non_transfer( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, +) -> Transaction { + match rng.gen_range(0..3) { + 0 => new_standard(rng, timestamp, ttl), + 1 => new_install_upgrade(rng, timestamp, ttl), + 2 => new_auction(rng, timestamp, ttl), + _ => unreachable!(), + } +} + +type SecretKeys = BTreeMap>; + +struct ValidationContext { + chainspec: Chainspec, + // Validators + secret_keys: SecretKeys, + // map of height → block + past_blocks: HashMap, + // blocks that will be "stored" during validation + delayed_blocks: HashMap, + transactions: HashMap, + transfers: HashMap, + // map of block height → signatures for the block + signatures: HashMap>, + // map of signatures that aren't stored, but are fetchable + fetchable_signatures: HashMap, + + // fields defining the proposed block that will be validated + transactions_to_include: Vec<(TransactionHash, BTreeSet)>, + transfers_to_include: Vec<(TransactionHash, BTreeSet)>, + signatures_to_include: HashMap>, + proposed_block_height: Option, +} + +impl ValidationContext { + fn new() -> Self { + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + Self { + chainspec, + secret_keys: BTreeMap::new(), + past_blocks: HashMap::new(), + delayed_blocks: HashMap::new(), + transactions: HashMap::new(), + transfers: HashMap::new(), + fetchable_signatures: HashMap::new(), + signatures: HashMap::new(), + transactions_to_include: vec![], + transfers_to_include: vec![], + signatures_to_include: HashMap::new(), + proposed_block_height: None, + } + } + + fn with_num_validators(mut self, rng: &mut TestRng, num_validators: usize) -> Self { + for _ in 0..num_validators { + let validator_key = Arc::new(SecretKey::random(rng)); + self.secret_keys + .insert(PublicKey::from(&*validator_key), validator_key.clone()); + } + self + } + + fn with_count_limits( + mut self, + mint_count: Option, + auction: Option, + install: Option, + large_limit: Option, + ) -> Self { + let transaction_v1_config = TransactionV1Config::default().with_count_limits( + mint_count, + auction, + install, + large_limit, + ); + self.chainspec.transaction_config.transaction_v1_config = transaction_v1_config; + self + } + + fn with_block_gas_limit(mut self, block_gas_limit: u64) -> Self { + self.chainspec.transaction_config.block_gas_limit = block_gas_limit; + self + } + + fn get_validators(&self) -> Vec { + self.secret_keys.keys().cloned().collect() + } + + fn with_past_blocks( + mut self, + rng: &mut TestRng, + min_height: u64, + max_height: u64, + era: EraId, + ) -> Self { + self.past_blocks + .extend((min_height..=max_height).map(|height| { + let block = TestBlockBuilder::new().height(height).era(era).build(rng); + (height, block.into()) + })); + self.proposed_block_height = self + .proposed_block_height + .map(|height| height.max(max_height + 1)) + .or(Some(max_height + 1)); + self + } + + fn with_delayed_blocks( + mut self, + rng: &mut TestRng, + min_height: u64, + max_height: u64, + era: EraId, + ) -> Self { + self.delayed_blocks + .extend((min_height..=max_height).map(|height| { + let block = TestBlockBuilder::new().height(height).era(era).build(rng); + (height, block.into()) + })); + self.proposed_block_height = self + .proposed_block_height + .map(|height| height.max(max_height + 1)) + .or(Some(max_height + 1)); + self + } + + fn get_delayed_blocks(&mut self) -> Vec { + let heights = self.delayed_blocks.keys().cloned().collect(); + self.past_blocks + .extend(std::mem::take(&mut self.delayed_blocks)); + heights + } + + fn with_signatures_for_block<'a, I: IntoIterator>( + mut self, + min_height: u64, + max_height: u64, + validators: I, + ) -> Self { + for validator in validators { + for height in min_height..=max_height { + let block = self + .past_blocks + .get(&height) + .or_else(|| self.delayed_blocks.get(&height)) + .expect("should have block"); + let secret_key = self + .secret_keys + .get(validator) + .expect("should have validator"); + let signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + self.chainspec.name_hash(), + secret_key, + ); + self.signatures + .entry(height) + .or_default() + .insert(validator.clone(), signature); + } + } + self + } + + fn with_fetchable_signatures<'a, I: IntoIterator>( + mut self, + min_height: u64, + max_height: u64, + validators: I, + ) -> Self { + for validator in validators { + for height in min_height..=max_height { + let block = self.past_blocks.get(&height).expect("should have block"); + let secret_key = self + .secret_keys + .get(validator) + .expect("should have validator"); + let signature = FinalitySignature::V2(FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + self.chainspec.name_hash(), + secret_key, + )); + self.fetchable_signatures + .insert(*signature.fetch_id(), signature); + } + } + self + } + + fn include_signatures<'a, I: IntoIterator>( + mut self, + min_height: u64, + max_height: u64, + validators: I, + ) -> Self { + for validator in validators { + for height in min_height..=max_height { + self.signatures_to_include + .entry(height) + .or_default() + .insert(validator.clone()); + } + } + self + } + + fn with_transactions(mut self, transactions: Vec) -> Self { + self.transactions.extend( + transactions + .into_iter() + .map(|transaction| (transaction.clone().fetch_id(), transaction)), + ); + self + } + + fn with_transfers(mut self, transfers: Vec) -> Self { + self.transfers.extend( + transfers + .into_iter() + .map(|transaction| (transaction.clone().fetch_id(), transaction)), + ); + self + } + + fn include_all_transactions(mut self) -> Self { + self.transactions_to_include.extend( + self.transactions + .values() + .map(|transaction| (transaction.hash(), transaction.approvals())), + ); + self + } + + fn include_all_transfers(mut self) -> Self { + self.transfers_to_include.extend( + self.transfers + .values() + .map(|transaction| (transaction.hash(), transaction.approvals())), + ); + self + } + + fn include_transactions)>>( + mut self, + transactions: I, + ) -> Self { + self.transactions_to_include.extend(transactions); + self + } + + fn include_transfers)>>( + mut self, + transfers: I, + ) -> Self { + self.transfers_to_include.extend(transfers); + self + } + + fn get_transaction(&self, id: TransactionId) -> Option { + self.transactions + .get(&id) + .cloned() + .or_else(|| self.transfers.get(&id).cloned()) + } + + fn get_signature(&self, id: &FinalitySignatureId) -> Option { + self.fetchable_signatures.get(id).cloned() + } + + fn get_block_with_metadata(&self, block_height: u64) -> Option { + self.past_blocks.get(&block_height).map(|block| { + let empty_hashmap = HashMap::new(); + let signatures = self.signatures.get(&block_height).unwrap_or(&empty_hashmap); + let mut block_signatures = BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + self.chainspec.name_hash(), + ); + for signature in signatures.values() { + block_signatures + .insert_signature(signature.public_key().clone(), *signature.signature()); + } + BlockWithMetadata { + block: block.clone(), + block_signatures: BlockSignatures::V2(block_signatures), + } + }) + } + + fn proposed_block(&self, timestamp: Timestamp) -> ProposedBlock { + let rewards_window = self.chainspec.core_config.signature_rewards_max_delay; + let rewarded_signatures = self + .proposed_block_height + .map(|proposed_block_height| { + RewardedSignatures::new( + (1..=rewards_window) + .filter_map(|height_diff| proposed_block_height.checked_sub(height_diff)) + .map(|height| { + let signing_validators = self + .signatures_to_include + .get(&height) + .cloned() + .unwrap_or_default(); + SingleBlockRewardedSignatures::from_validator_set( + &signing_validators, + self.secret_keys.keys(), + ) + }), + ) + }) + .unwrap_or_default(); + new_proposed_block_with_cited_signatures( + timestamp, + self.transfers_to_include.to_vec(), + vec![], + vec![], + self.transactions_to_include.to_vec(), + rewarded_signatures, + ) + } + + async fn proposal_is_valid(&mut self, rng: &mut TestRng, timestamp: Timestamp) -> bool { + self.validate_proposed_block(rng, timestamp).await.is_ok() + } + + /// Validates a block using a `BlockValidator` component, and returns the result. + async fn validate_proposed_block( + &mut self, + rng: &mut TestRng, + timestamp: Timestamp, + ) -> Result<(), Box> { + let proposed_block = self.proposed_block(timestamp); + + // Create the reactor and component. + let our_secret_key = self + .secret_keys + .values() + .next() + .expect("should have a secret key") + .clone(); + let reactor = MockReactor::new(our_secret_key, self.secret_keys.keys().cloned()); + let effect_builder = + EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); + let mut block_validator = BlockValidator::new( + Arc::new(self.chainspec.clone()), + reactor.validator_matrix.clone(), + Config::default(), + 1u8, + ); + + // Pass the block to the component. This future will eventually resolve to the result, i.e. + // whether the block is valid or not. + let bob_node_id = NodeId::random(rng); + let block_height = rng.gen_range(0..1000); + let validation_result = tokio::spawn(effect_builder.validate_block( + bob_node_id, + self.proposed_block_height.unwrap_or(block_height), + proposed_block.clone(), + )); + let event = reactor.expect_block_validator_event().await; + let effects = block_validator.handle_event(effect_builder, rng, event); + + // If validity could already be determined, the effect will be the validation response. + if !block_validator.validation_states.is_empty() + && block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { + assert_eq!(1, effects.len()); + for effect in effects { + tokio::spawn(effect).await.unwrap(); // Response. + } + return validation_result.await.unwrap(); + } + + // Otherwise the effects are either requests to fetch the block's transactions, or to fetch + // past blocks for the purpose of signature validation. + let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + + // We make our mock reactor answer with the expected blocks and/or transactions and + // transfers: + reactor.handle_requests(self).await; + + // At this point we either responded with requested transactions, or the past blocks. This + // should generate other events (`GotPastBlocksWithMetadata` in the case of past blocks, or + // a bunch of `TransactionFetched` in the case of transactions). We have to handle them. + let mut effects = Effects::new(); + for future in event_futures { + let events = future.await.unwrap(); + effects.extend( + events + .into_iter() + .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)), + ); + } + + // If there are no effects - some blocks have been missing from storage. Announce the + // finalization of the blocks we have in the context. + if effects.is_empty() { + for block_height in self.get_delayed_blocks() { + effects.extend(block_validator.handle_event( + effect_builder, + rng, + Event::BlockStored(block_height), + )); + } + } + + // If there are still no effects, something went wrong. + assert!(!effects.is_empty()); + + // If there were no signatures in the block, the validity of the block should be determined + // at this point. In such a case, return the result. + if !block_validator.validation_states.is_empty() + && block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { + assert_eq!(1, effects.len()); + for effect in effects { + tokio::spawn(effect).await.unwrap(); + } + return validation_result.await.unwrap(); + } + + // Otherwise, we have more effects to handle. After the blocks have been returned, the + // validator should now ask for the transactions and signatures. + // If some blocks have been delayed, this can be another request for past blocks. + // Let's handle those requests. + let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + + // We make our mock reactor answer with the expected items. + reactor.handle_requests(self).await; + + // Again, we'll have a bunch of events to handle, so we handle them. + let mut effects = Effects::new(); + for future in event_futures { + let events = future.await.unwrap(); + effects.extend( + events + .into_iter() + .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)), + ); + } + + // If there are no effects at this point, something went wrong. + assert!(!effects.is_empty()); + + // If no blocks were delayed, we just returned all the fetched items, so now the validity + // should have been resolved. Return the result if it is so. + if !block_validator.validation_states.is_empty() + && block_validator + .validation_states + .values() + .all(BlockValidationState::completed) + { + assert_eq!(1, effects.len()); + for effect in effects { + tokio::spawn(effect).await.unwrap(); + } + return validation_result.await.unwrap(); + } + + // Otherwise, we have more effects to handle. At this point, all the delayed blocks should + // have been stored and returned, so we just have a bunch of fetch requests to handle. + let event_futures: Vec<_> = effects.into_iter().map(tokio::spawn).collect(); + + // We make our mock reactor answer with the expected items. + reactor.handle_requests(self).await; + + // Again, we'll have a bunch of events to handle. At this point we should have a bunch of + // `TransactionFetched` or `FinalitySignatureFetched` events. We handle them. + let mut effects = Effects::new(); + for future in event_futures { + let events = future.await.unwrap(); + effects.extend( + events + .into_iter() + .flat_map(|event| block_validator.handle_event(effect_builder, rng, event)), + ); + } + + // Nothing more should be requested, so we expect at most one effect: the validation + // response. Zero effects is possible if block validator responded with false before, but + // hasn't marked the state invalid (it can happen when peers are exhausted). In any case, + // the result should be resolved now. + assert!(effects.len() < 2); + for effect in effects { + tokio::spawn(effect).await.unwrap(); // Response. + } + validation_result.await.unwrap() + } +} + +/// Verifies that a block without any transactions or transfers is valid. +#[tokio::test] +async fn empty_block() { + let mut rng = TestRng::new(); + let mut empty_context = ValidationContext::new().with_num_validators(&mut rng, 1); + assert!(empty_context.proposal_is_valid(&mut rng, 1000.into()).await); +} + +/// Verifies that the block validator checks transaction and transfer timestamps and ttl. +#[tokio::test] +async fn ttl() { + // The ttl is 200 ms, and our transactions and transfers have timestamps 900 and 1000. So the + // block timestamp must be at least 1000 and at most 1100. + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let transactions = vec![ + new_non_transfer(&mut rng, 1000.into(), ttl), + new_non_transfer(&mut rng, 900.into(), ttl), + ]; + let transfers: Vec = vec![ + new_v1_transfer(&mut rng, 1000.into(), ttl), + new_v1_transfer(&mut rng, 900.into(), ttl), + ]; + + let mut transactions_context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions.clone()) + .with_count_limits(Some(3000), Some(3000), Some(3000), Some(3000)) + .with_block_gas_limit(15_300_000_000_000) + .include_all_transactions(); + let mut transfers_context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transfers(transfers.clone()) + .with_count_limits(Some(3000), Some(3000), Some(3000), Some(3000)) + .with_block_gas_limit(15_300_000_000_000) + .include_all_transfers(); + let mut both_context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .with_count_limits(Some(3000), Some(3000), Some(3000), Some(3000)) + .with_block_gas_limit(15_300_000_000_000) + .include_all_transactions() + .include_all_transfers(); + + // Both 1000 and 1100 are timestamps compatible with the transactions and transfers. + assert!(both_context.proposal_is_valid(&mut rng, 1000.into()).await); + assert!(both_context.proposal_is_valid(&mut rng, 1100.into()).await); + + // A block with timestamp 999 can't contain a transfer or transactions with timestamp 1000. + assert!( + !transactions_context + .proposal_is_valid(&mut rng, 999.into()) + .await + ); + assert!( + !transfers_context + .proposal_is_valid(&mut rng, 999.into()) + .await + ); + assert!(!both_context.proposal_is_valid(&mut rng, 999.into()).await); + + // At time 1101, the transactions and transfer from time 900 have expired. + assert!( + !transactions_context + .proposal_is_valid(&mut rng, 1101.into()) + .await + ); + assert!( + !transfers_context + .proposal_is_valid(&mut rng, 1101.into()) + .await + ); + assert!(!both_context.proposal_is_valid(&mut rng, 1101.into()).await); +} + +/// Verifies that a block is invalid if it contains a transfer in the transactions section +/// or vice versa. +#[tokio::test] +async fn transfer_transaction_mixup_and_replay() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let deploy = new_deploy(&mut rng, timestamp, ttl); + let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl); + let transfer_orig = new_transfer(&mut rng, timestamp, ttl); + let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl); + + // First we make sure that our transfers and transactions would normally be valid. + let transactions = vec![transaction_v1.clone()]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers(); + assert!(context.proposal_is_valid(&mut rng, timestamp).await); + + // Now we test for different invalid combinations of transactions and transfers: + // 1. Original style transfer in the transactions section. + let transactions = vec![ + transfer_orig.clone(), + transaction_v1.clone(), + deploy.clone(), + ]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers(); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + // 2. V1 transfer in the transactions section. + let transactions = vec![transfer_v1.clone(), transaction_v1.clone(), deploy.clone()]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers(); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + // 3. Legacy deploy in the transfers section. + let transactions = vec![transaction_v1.clone(), deploy.clone()]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone(), deploy.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers(); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + // 4. V1 transaction in the transfers section. + let transactions = vec![transaction_v1.clone(), deploy.clone()]; + let transfers = vec![ + transfer_orig.clone(), + transfer_v1.clone(), + transaction_v1.clone(), + ]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers(); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + + // Each transaction must be unique + let transactions = vec![deploy.clone(), transaction_v1.clone()]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers() + .include_transactions(vec![(deploy.hash(), deploy.approvals())]); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + let transactions = vec![deploy.clone(), transaction_v1.clone()]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers() + .include_transactions(vec![(transaction_v1.hash(), transaction_v1.approvals())]); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + + // And each transfer must be unique, too. + let transactions = vec![deploy.clone(), transaction_v1.clone()]; + let transfers = vec![transfer_v1.clone(), transfer_orig.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers() + .include_transfers(vec![(transfer_v1.hash(), transfer_v1.approvals())]); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); + let transactions = vec![deploy.clone(), transaction_v1.clone()]; + let transfers = vec![transfer_orig.clone(), transfer_v1.clone()]; + let mut context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(transactions) + .with_transfers(transfers) + .include_all_transactions() + .include_all_transfers() + .include_transactions(vec![(transfer_orig.hash(), transfer_orig.approvals())]); + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); +} + +/// Verifies that the block validator fetches from multiple peers. +#[tokio::test] +async fn should_fetch_from_multiple_peers() { + let _ = crate::logging::init(); + tokio::time::timeout(Duration::from_secs(5), async move { + let peer_count = 3; + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_seconds(200); + let transactions = (0..peer_count) + .map(|i| new_non_transfer(&mut rng, (900 + i).into(), ttl)) + .collect_vec(); + let transfers = (0..peer_count) + .map(|i| new_v1_transfer(&mut rng, (1000 + i).into(), ttl)) + .collect_vec(); + + // Assemble the block to be validated. + let transfers_for_block = transfers + .iter() + .map(|transfer| (transfer.hash(), transfer.approvals())) + .collect_vec(); + let standard_for_block = transactions + .iter() + .map(|transaction| (transaction.hash(), transaction.approvals())) + .collect_vec(); + let proposed_block = new_proposed_block( + 1100.into(), + transfers_for_block, + vec![], + vec![], + standard_for_block, + ); + + // Create the reactor and component. + let secret_key = Arc::new(SecretKey::random(&mut rng)); + let public_key = PublicKey::from(&*secret_key); + let reactor = MockReactor::new(secret_key, vec![public_key]); + let effect_builder = + EffectBuilder::new(EventQueueHandle::without_shutdown(reactor.scheduler)); + let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + + chainspec.transaction_config.block_gas_limit = 100_000_000_000_000; + let transaction_v1_config = TransactionV1Config::default().with_count_limits( + Some(3000), + Some(3000), + Some(3000), + Some(3000), + ); + chainspec.transaction_config.transaction_v1_config = transaction_v1_config; + + let mut block_validator = BlockValidator::new( + Arc::new(chainspec), + reactor.validator_matrix.clone(), + Config::default(), + 1u8, + ); + + // Have a validation request for each one of the peers. These futures will eventually all + // resolve to the same result, i.e. whether the block is valid or not. + let validation_results = (0..peer_count) + .map(|_| { + let node_id = NodeId::random(&mut rng); + let block_height = rng.gen_range(0..1000); + tokio::spawn(effect_builder.validate_block( + node_id, + block_height, + proposed_block.clone(), + )) + }) + .collect_vec(); + + let mut fetch_effects = VecDeque::new(); + for index in 0..peer_count { + let event = reactor.expect_block_validator_event().await; + let effects = block_validator.handle_event(effect_builder, &mut rng, event); + if index == 0 { + assert_eq!(effects.len(), 6); + fetch_effects.extend(effects); + } else { + assert!(effects.is_empty()); + } + } + + // The effects are requests to fetch the block's transactions. There are six fetch + // requests, all using the first peer. + let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec(); + + // Provide the first deploy and transfer on first asking. + let context = ValidationContext::new() + .with_num_validators(&mut rng, 1) + .with_transactions(vec![transactions[0].clone()]) + .with_transfers(vec![transfers[0].clone()]); + reactor.handle_requests(&context).await; + + let mut missing = vec![]; + for fetch_result in fetch_results { + let mut events = fetch_result.await.unwrap(); + assert_eq!(1, events.len()); + // The event should be `TransactionFetched`. + let event = events.pop().unwrap(); + // New fetch requests will be made using a different peer for all transactions not + // already registered as fetched. + let effects = block_validator.handle_event(effect_builder, &mut rng, event); + if !effects.is_empty() { + assert!(missing.is_empty()); + missing = block_validator + .validation_states + .values() + .next() + .unwrap() + .missing_hashes(); + } + fetch_effects.extend(effects); + } + + // Handle the second set of fetch requests now. + let fetch_results = fetch_effects.drain(..).map(tokio::spawn).collect_vec(); + + // Provide the first and second deploys and transfers which haven't already been fetched on + // second asking. + let context = context + .with_transactions(vec![transactions[1].clone()]) + .with_transfers(vec![transfers[1].clone()]); + reactor.handle_requests(&context).await; + + missing.clear(); + for fetch_result in fetch_results { + let mut events = fetch_result.await.unwrap(); + assert_eq!(1, events.len()); + // The event should be `TransactionFetched`. + let event = events.pop().unwrap(); + // New fetch requests will be made using a different peer for all transactions not + // already registered as fetched. + let effects = block_validator.handle_event(effect_builder, &mut rng, event); + if !effects.is_empty() { + assert!(missing.is_empty()); + missing = block_validator + .validation_states + .values() + .next() + .unwrap() + .missing_hashes(); + } + fetch_effects.extend(effects); + } + + // Handle the final set of fetch requests now. + let fetch_results = fetch_effects.into_iter().map(tokio::spawn).collect_vec(); + + // Provide all deploys and transfers not already fetched on third asking. + let context = context + .with_transactions(vec![transactions[2].clone()]) + .with_transfers(vec![transfers[2].clone()]); + reactor.handle_requests(&context).await; + + let mut effects = Effects::new(); + for fetch_result in fetch_results { + let mut events = fetch_result.await.unwrap(); + assert_eq!(1, events.len()); + // The event should be `TransactionFetched`. + let event = events.pop().unwrap(); + // Once the block is deemed valid (i.e. when the final missing transaction is + // successfully fetched) the effects will be three validation responses. + effects.extend(block_validator.handle_event(effect_builder, &mut rng, event)); + assert!(effects.is_empty() || effects.len() == peer_count as usize); + } + + for effect in effects { + tokio::spawn(effect).await.unwrap(); + } + + for validation_result in validation_results { + assert!(validation_result.await.unwrap().is_ok()); + } + }) + .await + .expect("should not hang"); +} + +#[tokio::test] +async fn should_validate_block_with_signatures() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl); + let transfer = new_transfer(&mut rng, timestamp, ttl); + let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()) + .with_transactions(vec![transaction_v1]) + .with_transfers(vec![transfer, transfer_v1]) + .include_all_transactions() + .include_all_transfers(); + + let validators = context.get_validators(); + + let mut context = context + .with_signatures_for_block(3, 5, &validators) + .include_signatures(3, 5, &validators); + + assert!(context.proposal_is_valid(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_fetch_missing_signature() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl); + let transfer = new_transfer(&mut rng, timestamp, ttl); + let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()) + .with_transactions(vec![transaction_v1]) + .with_transfers(vec![transfer, transfer_v1]) + .include_all_transactions() + .include_all_transfers(); + + let validators = context.get_validators(); + let mut signing_validators = context.get_validators(); + let leftover = signing_validators.pop().unwrap(); // one validator will be missing from the set that signed + + let mut context = context + .with_signatures_for_block(3, 5, &signing_validators) + .with_fetchable_signatures(3, 5, &[leftover]) + .include_signatures(3, 5, &validators); + + assert!(context.proposal_is_valid(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_fail_if_unable_to_fetch_signature() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let deploy = new_deploy(&mut rng, timestamp, ttl); + let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl); + let transfer = new_transfer(&mut rng, timestamp, ttl); + let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()) + .with_transactions(vec![deploy, transaction_v1]) + .with_transfers(vec![transfer, transfer_v1]) + .include_all_transactions() + .include_all_transfers(); + + let validators = context.get_validators(); + let mut signing_validators = context.get_validators(); + let _ = signing_validators.pop().expect("must pop"); // one validator will be missing from the set that signed + + let mut context = context + .with_signatures_for_block(3, 5, &signing_validators) + .include_signatures(3, 5, &validators); + + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_fail_if_unable_to_fetch_signature_for_block_without_transactions() { + let mut rng = TestRng::new(); + let timestamp = Timestamp::from(1000); + + // No transactions in the block. + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 5, 0.into()); + + let validators = context.get_validators(); + let mut signing_validators = context.get_validators(); + let _ = signing_validators.pop(); // one validator will be missing from the set that signed + + let mut context = context + .with_signatures_for_block(3, 5, &signing_validators) + .include_signatures(3, 5, &validators); + + assert!(!context.proposal_is_valid(&mut rng, timestamp).await); +} + +#[tokio::test] +async fn should_validate_with_delayed_block() { + let mut rng = TestRng::new(); + let ttl = TimeDiff::from_millis(200); + let timestamp = Timestamp::from(1000); + let transaction_v1 = new_v1_standard(&mut rng, timestamp, ttl); + let transfer = new_transfer(&mut rng, timestamp, ttl); + let transfer_v1 = new_v1_transfer(&mut rng, timestamp, ttl); + + let context = ValidationContext::new() + .with_num_validators(&mut rng, 3) + .with_past_blocks(&mut rng, 0, 4, 0.into()) + .with_delayed_blocks(&mut rng, 5, 5, 0.into()) + .with_transactions(vec![transaction_v1]) + .with_transfers(vec![transfer, transfer_v1]) + .include_all_transactions() + .include_all_transfers(); + + let validators = context.get_validators(); + + let mut context = context + .with_signatures_for_block(3, 5, &validators) + .include_signatures(3, 5, &validators); + + assert!(context.proposal_is_valid(&mut rng, timestamp).await); +} diff --git a/node/src/components/chainspec_loader.rs b/node/src/components/chainspec_loader.rs deleted file mode 100644 index 95cc678c4d..0000000000 --- a/node/src/components/chainspec_loader.rs +++ /dev/null @@ -1,964 +0,0 @@ -//! Chainspec loader component. -//! -//! The chainspec loader initializes a node by reading information from the chainspec or an -//! upgrade_point, and committing it to the permanent storage. -//! -//! See -//! -//! for full details. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::{ - fmt::{self, Display, Formatter}, - fs, - path::{Path, PathBuf}, - str::FromStr, - sync::Arc, - time::Duration, -}; - -use datasize::DataSize; -use derive_more::From; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use tokio::task; -use tracing::{debug, error, info, trace, warn}; - -use casper_execution_engine::{ - core::engine_state::{ - self, - genesis::GenesisResult, - upgrade::{UpgradeConfig, UpgradeResult}, - }, - shared::stored_value::StoredValue, -}; -use casper_types::{bytesrepr::FromBytes, EraId, ProtocolVersion}; - -#[cfg(test)] -use crate::utils::RESOURCES_PATH; -use crate::{ - components::Component, - crypto::hash::Digest, - effect::{ - announcements::ChainspecLoaderAnnouncement, - requests::{ - ChainspecLoaderRequest, ContractRuntimeRequest, StateStoreRequest, StorageRequest, - }, - EffectBuilder, EffectExt, Effects, - }, - reactor::ReactorExit, - types::{ - chainspec::{Error, ProtocolConfig, CHAINSPEC_NAME}, - ActivationPoint, Block, BlockHash, BlockHeader, Chainspec, ChainspecInfo, ExitCode, - }, - utils::{self, Loadable}, - NodeRng, -}; - -const UPGRADE_CHECK_INTERVAL: Duration = Duration::from_secs(60); - -/// `ChainspecHandler` events. -#[derive(Debug, From, Serialize)] -pub enum Event { - /// The result of getting the highest block from storage. - Initialize { - maybe_highest_block: Option>, - }, - /// The result of contract runtime running the genesis process. - CommitGenesisResult(#[serde(skip_serializing)] Result), - /// The result of contract runtime running the upgrade process. - UpgradeResult(#[serde(skip_serializing)] Result), - #[from] - Request(ChainspecLoaderRequest), - /// Check config dir to see if an upgrade activation point is available, and if so announce it. - CheckForNextUpgrade, - /// If the result of checking for an upgrade is successful, it is passed here. - GotNextUpgrade(NextUpgrade), - /// The result of the `ChainspecHandler` putting a `Chainspec` to the storage component. - PutToStorage { version: ProtocolVersion }, -} - -impl Display for Event { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Initialize { - maybe_highest_block, - } => { - write!( - formatter, - "initialize(maybe_highest_block: {})", - maybe_highest_block - .as_ref() - .map_or_else(|| "None".to_string(), |block| block.to_string()) - ) - } - Event::CommitGenesisResult(_) => write!(formatter, "commit genesis result"), - Event::UpgradeResult(_) => write!(formatter, "contract runtime upgrade result"), - Event::Request(req) => write!(formatter, "chainspec_loader request: {}", req), - Event::CheckForNextUpgrade => { - write!(formatter, "check for next upgrade") - } - Event::GotNextUpgrade(next_upgrade) => { - write!(formatter, "got {}", next_upgrade) - } - Event::PutToStorage { version } => { - write!(formatter, "put chainspec {} to storage", version) - } - } - } -} - -/// Information about the next protocol upgrade. -#[derive(PartialEq, Eq, DataSize, Debug, Serialize, Deserialize, Clone, JsonSchema)] -pub struct NextUpgrade { - activation_point: ActivationPoint, - #[data_size(skip)] - #[schemars(with = "String")] - protocol_version: ProtocolVersion, -} - -impl NextUpgrade { - pub(crate) fn new( - activation_point: ActivationPoint, - protocol_version: ProtocolVersion, - ) -> Self { - NextUpgrade { - activation_point, - protocol_version, - } - } - - pub(crate) fn activation_point(&self) -> ActivationPoint { - self.activation_point - } -} - -impl From for NextUpgrade { - fn from(protocol_config: ProtocolConfig) -> Self { - NextUpgrade { - activation_point: protocol_config.activation_point, - protocol_version: protocol_config.version, - } - } -} - -impl Display for NextUpgrade { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!( - formatter, - "next upgrade to {} at start of era {}", - self.protocol_version, - self.activation_point.era_id() - ) - } -} - -/// Basic information about the current run of the node software. -#[derive(Clone, Debug)] -pub struct CurrentRunInfo { - pub activation_point: ActivationPoint, - pub protocol_version: ProtocolVersion, - pub initial_state_root_hash: Digest, -} - -#[derive(Clone, DataSize, Debug)] -pub struct ChainspecLoader { - chainspec: Arc, - /// The path to the folder where all chainspec and upgrade_point files will be stored in - /// subdirs corresponding to their versions. - root_dir: PathBuf, - /// If `Some`, we're finished loading and committing the chainspec. - reactor_exit: Option, - /// The initial state root hash for this session. - initial_state_root_hash: Digest, - next_upgrade: Option, - initial_block: Option, -} - -impl ChainspecLoader { - pub(crate) fn new( - chainspec_dir: P, - effect_builder: EffectBuilder, - ) -> Result<(Self, Effects), Error> - where - P: AsRef, - REv: From + From + From + Send, - { - Ok(Self::new_with_chainspec_and_path( - Arc::new(Chainspec::from_path(&chainspec_dir.as_ref())?), - chainspec_dir, - effect_builder, - )) - } - - #[cfg(test)] - pub(crate) fn new_with_chainspec( - chainspec: Arc, - effect_builder: EffectBuilder, - ) -> (Self, Effects) - where - REv: From + From + From + Send, - { - Self::new_with_chainspec_and_path(chainspec, &RESOURCES_PATH.join("local"), effect_builder) - } - - fn new_with_chainspec_and_path( - chainspec: Arc, - chainspec_dir: P, - effect_builder: EffectBuilder, - ) -> (Self, Effects) - where - P: AsRef, - REv: From + From + From + Send, - { - chainspec.validate_config(); - let root_dir = chainspec_dir - .as_ref() - .parent() - .unwrap_or_else(|| { - panic!("chainspec dir must have a parent"); - }) - .to_path_buf(); - - let next_upgrade = next_upgrade(root_dir.clone(), chainspec.protocol_config.version); - - // If the next activation point is the same as the current chainspec one, we've installed - // two new versions, where the first which we're currently running should be immediately - // replaced by the second. - let should_stop = if let Some(next_activation_point) = next_upgrade - .as_ref() - .map(|upgrade| upgrade.activation_point) - { - chainspec.protocol_config.activation_point == next_activation_point - } else { - false - }; - - // In case this is a version which should be immediately replaced by the next version, don't - // create any effects so we exit cleanly for an upgrade without touching the storage - // component. Otherwise create effects which will allow us to initialize properly. - let mut effects = if should_stop { - Effects::new() - } else { - effect_builder - .get_highest_block_from_storage() - .event(|highest_block| Event::Initialize { - maybe_highest_block: highest_block.map(Box::new), - }) - }; - - // Start regularly checking for the next upgrade. - effects.extend( - effect_builder - .set_timeout(UPGRADE_CHECK_INTERVAL) - .event(|_| Event::CheckForNextUpgrade), - ); - - let reactor_exit = should_stop.then(|| ReactorExit::ProcessShouldExit(ExitCode::Success)); - - let chainspec_loader = ChainspecLoader { - chainspec, - root_dir, - reactor_exit, - initial_state_root_hash: Digest::default(), - next_upgrade, - initial_block: None, - }; - - (chainspec_loader, effects) - } - - /// This is a workaround while we have multiple reactors. It should be used in the joiner and - /// validator reactors' constructors to start the recurring task of checking for upgrades. The - /// recurring tasks of the previous reactors will be cancelled when the relevant reactor is - /// destroyed during transition. - pub(crate) fn start_checking_for_upgrades( - &self, - effect_builder: EffectBuilder, - ) -> Effects - where - REv: From + Send, - { - self.check_for_next_upgrade(effect_builder) - } - - pub(crate) fn reactor_exit(&self) -> Option { - self.reactor_exit - } - - /// The state root hash with which this session is starting. It will be the result of running - /// `ContractRuntime::commit_genesis()` or `ContractRuntime::upgrade()` or else the state root - /// hash specified in the highest block. - pub(crate) fn initial_state_root_hash(&self) -> Digest { - self.initial_state_root_hash - } - - pub(crate) fn chainspec(&self) -> &Arc { - &self.chainspec - } - - pub(crate) fn next_upgrade(&self) -> Option { - self.next_upgrade.clone() - } - - pub(crate) fn initial_block_header(&self) -> Option<&BlockHeader> { - self.initial_block.as_ref().map(|block| block.header()) - } - - pub(crate) fn initial_block(&self) -> Option<&Block> { - self.initial_block.as_ref() - } - - pub(crate) fn initial_block_hash(&self) -> Option { - self.initial_block_header().map(|hdr| hdr.hash()) - } - - /// This returns the era at which we will be starting the operation, assuming the highest known - /// block is the last one. It will return the era of the highest known block, unless it is a - /// switch block, in which case it returns the successor to the era of the highest known block. - pub(crate) fn initial_era(&self) -> EraId { - // We want to start the Era Supervisor at the era right after the highest block we - // have. If the block is a switch block, that will be the era that comes next. If - // it's not, we continue the era the highest block belongs to. - self.initial_block_header() - .map(BlockHeader::next_block_era_id) - .unwrap_or_else(|| EraId::from(0)) - } - - /// Returns the era ID of where we should reset back to. This means stored blocks in that and - /// subsequent eras are ignored (conceptually deleted from storage). - pub(crate) fn hard_reset_to_start_of_era(&self) -> Option { - self.chainspec - .protocol_config - .hard_reset - .then(|| self.chainspec.protocol_config.activation_point.era_id()) - } - - fn handle_initialize( - &mut self, - effect_builder: EffectBuilder, - maybe_highest_block: Option>, - ) -> Effects - where - REv: From + From + From + Send, - { - let highest_block = match maybe_highest_block { - Some(block) => { - self.initial_block = Some(*block.clone()); - block - } - None => { - // This is an initial run since we have no blocks. - if self.chainspec.is_genesis() { - // This is a valid initial run on a new network at genesis. - trace!("valid initial run at genesis"); - return effect_builder - .commit_genesis(Arc::clone(&self.chainspec)) - .event(Event::CommitGenesisResult); - } else { - // This is an invalid run of a node version issued after genesis. Instruct the - // process to exit and downgrade the version. - warn!( - "invalid run, no blocks stored but not a genesis chainspec: exit to \ - downgrade" - ); - self.reactor_exit = - Some(ReactorExit::ProcessShouldExit(ExitCode::DowngradeVersion)); - return Effects::new(); - } - } - }; - let highest_block_era_id = highest_block.header().era_id(); - - let previous_protocol_version = highest_block.header().protocol_version(); - let current_chainspec_activation_point = - self.chainspec.protocol_config.activation_point.era_id(); - - if highest_block_era_id.successor() == current_chainspec_activation_point { - if highest_block.header().is_switch_block() { - // This is a valid run immediately after upgrading the node version. - trace!("valid run immediately after upgrade"); - let upgrade_config = - self.new_upgrade_config(&highest_block, previous_protocol_version); - return effect_builder - .upgrade_contract_runtime(upgrade_config) - .event(Event::UpgradeResult); - } else { - // This is an invalid run where blocks are missing from storage. Try exiting the - // process and downgrading the version to recover the missing blocks. - // - // TODO - if migrating data yields a new empty block as a means to store the - // post-migration global state hash, we'll come to this code branch, and we - // should not exit the process in that case. - warn!("invalid run, expected highest block to be switch block: exit to downgrade"); - self.reactor_exit = - Some(ReactorExit::ProcessShouldExit(ExitCode::DowngradeVersion)); - return Effects::new(); - } - } - - if highest_block_era_id < current_chainspec_activation_point { - // This is an invalid run where blocks are missing from storage. Try exiting the - // process and downgrading the version to recover the missing blocks. - warn!("invalid run, missing blocks from storage: exit to downgrade"); - self.reactor_exit = Some(ReactorExit::ProcessShouldExit(ExitCode::DowngradeVersion)); - return Effects::new(); - } - - let debug_assert_version_match = || { - debug_assert!(previous_protocol_version == self.chainspec.protocol_config.version); - }; - - let next_upgrade_activation_point = match self.next_upgrade { - Some(ref next_upgrade) => next_upgrade.activation_point.era_id(), - None => { - // This is a valid run, restarted after an unplanned shutdown. - debug_assert_version_match(); - self.initial_state_root_hash = *highest_block.state_root_hash(); - info!("valid run after an unplanned shutdown with no scheduled upgrade"); - self.reactor_exit = Some(ReactorExit::ProcessShouldContinue); - return Effects::new(); - } - }; - - if highest_block_era_id < next_upgrade_activation_point { - // This is a valid run, restarted after an unplanned shutdown. - debug_assert_version_match(); - self.initial_state_root_hash = *highest_block.state_root_hash(); - info!("valid run after an unplanned shutdown before upgrade due"); - self.reactor_exit = Some(ReactorExit::ProcessShouldContinue); - return Effects::new(); - } - - // The is an invalid run as the highest block era ID >= next activation point, so we're - // running an outdated version. Exit with success to indicate we should upgrade. - // - // TODO - once the block includes the protocol version, we can deduce here whether we're - // running a version where we missed an upgrade and ran on a fork. In that case, we - // should set our exit code to `ExitCode::Abort`. - warn!("running outdated version: exit to upgrade"); - self.reactor_exit = Some(ReactorExit::ProcessShouldExit(ExitCode::Success)); - Effects::new() - } - - fn new_upgrade_config( - &self, - block: &Block, - previous_version: ProtocolVersion, - ) -> Box { - let new_version = self.chainspec.protocol_config.version; - let global_state_update = self - .chainspec - .protocol_config - .global_state_update - .as_ref() - .map(|state_update| { - state_update - .0 - .iter() - .map(|(key, stored_value_bytes)| { - let stored_value = StoredValue::from_bytes(stored_value_bytes) - .unwrap_or_else(|error| { - panic!( - "failed to parse global state value as StoredValue for upgrade: {}", - error - ) - }) - .0; - (*key, stored_value) - }) - .collect() - }) - .unwrap_or_default(); - Box::new(UpgradeConfig::new( - (*block.state_root_hash()).into(), - previous_version, - new_version, - Some(self.chainspec.wasm_config), - Some(self.chainspec.system_costs_config), - Some(self.chainspec.protocol_config.activation_point.era_id()), - Some(self.chainspec.core_config.validator_slots), - Some(self.chainspec.core_config.auction_delay), - Some(self.chainspec.core_config.locked_funds_period.millis()), - Some(self.chainspec.core_config.round_seigniorage_rate), - Some(self.chainspec.core_config.unbonding_delay), - global_state_update, - )) - } - - fn handle_commit_genesis_result( - &mut self, - result: Result, - ) -> Effects { - match result { - Ok(genesis_result) => match genesis_result { - GenesisResult::RootNotFound - | GenesisResult::KeyNotFound(_) - | GenesisResult::TypeMismatch(_) - | GenesisResult::Serialization(_) => { - error!("failed to commit genesis: {}", genesis_result); - self.reactor_exit = Some(ReactorExit::ProcessShouldExit(ExitCode::Abort)); - } - GenesisResult::Success { - post_state_hash, - effect, - } => { - info!("chainspec name {}", self.chainspec.network_config.name); - info!("genesis state root hash {}", post_state_hash); - trace!(%post_state_hash, ?effect); - self.reactor_exit = Some(ReactorExit::ProcessShouldContinue); - self.initial_state_root_hash = post_state_hash.into(); - } - }, - Err(error) => { - error!("failed to commit genesis: {}", error); - self.reactor_exit = Some(ReactorExit::ProcessShouldExit(ExitCode::Abort)); - } - } - Effects::new() - } - - fn handle_upgrade_result( - &mut self, - result: Result, - ) -> Effects { - match result { - Ok(upgrade_result) => match upgrade_result { - UpgradeResult::RootNotFound - | UpgradeResult::KeyNotFound(_) - | UpgradeResult::TypeMismatch(_) - | UpgradeResult::Serialization(_) => { - error!("failed to upgrade contract runtime: {}", upgrade_result); - self.reactor_exit = Some(ReactorExit::ProcessShouldExit(ExitCode::Abort)); - } - UpgradeResult::Success { - post_state_hash, - effect, - } => { - info!("chainspec name {}", self.chainspec.network_config.name); - info!("state root hash {}", post_state_hash); - trace!(%post_state_hash, ?effect); - self.reactor_exit = Some(ReactorExit::ProcessShouldContinue); - self.initial_state_root_hash = post_state_hash.into(); - } - }, - Err(error) => { - error!("failed to upgrade contract runtime: {}", error); - self.reactor_exit = Some(ReactorExit::ProcessShouldExit(ExitCode::Abort)); - } - } - Effects::new() - } - - fn new_chainspec_info(&self) -> ChainspecInfo { - ChainspecInfo::new( - self.chainspec.network_config.name.clone(), - self.initial_state_root_hash, - self.next_upgrade.clone(), - ) - } - - fn get_current_run_info(&self) -> CurrentRunInfo { - CurrentRunInfo { - activation_point: self.chainspec.protocol_config.activation_point, - protocol_version: self.chainspec.protocol_config.version, - initial_state_root_hash: self.initial_state_root_hash, - } - } - - fn check_for_next_upgrade(&self, effect_builder: EffectBuilder) -> Effects - where - REv: From + Send, - { - let root_dir = self.root_dir.clone(); - let current_version = self.chainspec.protocol_config.version; - let mut effects = async move { - let maybe_next_upgrade = - task::spawn_blocking(move || next_upgrade(root_dir, current_version)) - .await - .unwrap_or_else(|error| { - warn!(%error, "failed to join tokio task"); - None - }); - if let Some(next_upgrade) = maybe_next_upgrade { - effect_builder - .announce_upgrade_activation_point_read(next_upgrade) - .await - } - } - .ignore(); - - effects.extend( - effect_builder - .set_timeout(UPGRADE_CHECK_INTERVAL) - .event(|_| Event::CheckForNextUpgrade), - ); - - effects - } - - fn handle_got_next_upgrade(&mut self, next_upgrade: NextUpgrade) -> Effects { - debug!("got {}", next_upgrade); - if let Some(ref current_point) = self.next_upgrade { - if next_upgrade != *current_point { - info!( - new_point=%next_upgrade.activation_point, - %current_point, - "changing upgrade activation point" - ); - } - } - self.next_upgrade = Some(next_upgrade); - Effects::new() - } -} - -impl Component for ChainspecLoader -where - REv: From - + From - + From - + From - + From - + Send, -{ - type Event = Event; - type ConstructionError = Error; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - trace!("{}", event); - match event { - Event::Initialize { - maybe_highest_block: highest_block, - } => self.handle_initialize(effect_builder, highest_block), - Event::CommitGenesisResult(result) => self.handle_commit_genesis_result(result), - Event::UpgradeResult(result) => self.handle_upgrade_result(result), - Event::Request(ChainspecLoaderRequest::GetChainspecInfo(responder)) => { - responder.respond(self.new_chainspec_info()).ignore() - } - Event::Request(ChainspecLoaderRequest::GetCurrentRunInfo(responder)) => { - responder.respond(self.get_current_run_info()).ignore() - } - Event::CheckForNextUpgrade => self.check_for_next_upgrade(effect_builder), - Event::GotNextUpgrade(next_upgrade) => self.handle_got_next_upgrade(next_upgrade), - Event::PutToStorage { version } => { - debug!("stored chainspec {}", version); - effect_builder - .commit_genesis(Arc::clone(&self.chainspec)) - .event(Event::CommitGenesisResult) - } - } - } -} - -/// This struct can be parsed from a TOML-encoded chainspec file. It means that as the -/// chainspec format changes over versions, as long as we maintain the protocol config in this form -/// in the chainspec file, it can continue to be parsed as an `UpgradePoint`. -#[derive(Deserialize)] -struct UpgradePoint { - #[serde(rename = "protocol")] - pub(crate) protocol_config: ProtocolConfig, -} - -impl UpgradePoint { - /// Parses a chainspec file at the given path as an `UpgradePoint`. - fn from_chainspec_path>(path: P) -> Result { - let bytes = utils::read_file(path.as_ref().join(&CHAINSPEC_NAME)) - .map_err(Error::LoadUpgradePoint)?; - Ok(toml::from_slice(&bytes)?) - } -} - -fn dir_name_from_version(version: &ProtocolVersion) -> PathBuf { - PathBuf::from(version.to_string().replace(".", "_")) -} - -/// Iterates the given path, returning the subdir representing the immediate next SemVer version -/// after `current_version`. -/// -/// Subdir names should be semvers with dots replaced with underscores. -fn next_installed_version( - dir: &Path, - current_version: &ProtocolVersion, -) -> Result { - let max_version = - ProtocolVersion::from_parts(u32::max_value(), u32::max_value(), u32::max_value()); - - let mut next_version = max_version; - let mut read_version = false; - for entry in fs::read_dir(dir).map_err(|error| Error::ReadDir { - dir: dir.to_path_buf(), - error, - })? { - let path = match entry { - Ok(dir_entry) => dir_entry.path(), - Err(error) => { - debug!(dir=%dir.display(), %error, "bad entry while reading dir"); - continue; - } - }; - - let subdir_name = match path.file_name() { - Some(name) => name.to_string_lossy().replace("_", "."), - None => continue, - }; - - let version = match ProtocolVersion::from_str(&subdir_name) { - Ok(version) => version, - Err(error) => { - trace!(%error, path=%path.display(), "failed to get a version"); - continue; - } - }; - - if version > *current_version && version < next_version { - next_version = version; - } - read_version = true; - } - - if !read_version { - return Err(Error::NoVersionSubdirFound { - dir: dir.to_path_buf(), - }); - } - - if next_version == max_version { - next_version = *current_version; - } - - Ok(next_version) -} - -/// Uses `next_installed_version()` to find the next versioned subdir. If it exists, reads the -/// UpgradePoint file from there and returns its version and activation point. Returns `None` if -/// there is no greater version available, or if any step errors. -fn next_upgrade(dir: PathBuf, current_version: ProtocolVersion) -> Option { - let next_version = match next_installed_version(&dir, ¤t_version) { - Ok(version) => version, - Err(error) => { - warn!(dir=%dir.display(), %error, "failed to get a valid version from subdirs"); - return None; - } - }; - - if next_version <= current_version { - return None; - } - - let subdir = dir.join(dir_name_from_version(&next_version)); - let upgrade_point = match UpgradePoint::from_chainspec_path(&subdir) { - Ok(upgrade_point) => upgrade_point, - Err(error) => { - debug!(subdir=%subdir.display(), %error, "failed to load upgrade point"); - return None; - } - }; - - if upgrade_point.protocol_config.version != next_version { - warn!( - upgrade_point_version=%upgrade_point.protocol_config.version, - subdir_version=%next_version, - "next chainspec installed to wrong subdir" - ); - return None; - } - - Some(NextUpgrade::from(upgrade_point.protocol_config)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{testing::TestRng, types::chainspec::CHAINSPEC_NAME}; - - #[test] - fn should_get_next_installed_version() { - let tempdir = tempfile::tempdir().expect("should create temp dir"); - - let get_next_version = |current_version: &ProtocolVersion| { - next_installed_version(tempdir.path(), current_version).unwrap() - }; - - let mut current = ProtocolVersion::from_parts(0, 0, 0); - let mut next_version = ProtocolVersion::from_parts(1, 0, 0); - fs::create_dir(tempdir.path().join("1_0_0")).unwrap(); - assert_eq!(get_next_version(¤t), next_version); - current = next_version; - - next_version = ProtocolVersion::from_parts(1, 2, 3); - fs::create_dir(tempdir.path().join("1_2_3")).unwrap(); - assert_eq!(get_next_version(¤t), next_version); - current = next_version; - - fs::create_dir(tempdir.path().join("1_0_3")).unwrap(); - assert_eq!(get_next_version(¤t), next_version); - - fs::create_dir(tempdir.path().join("2_2_2")).unwrap(); - fs::create_dir(tempdir.path().join("3_3_3")).unwrap(); - assert_eq!( - get_next_version(¤t), - ProtocolVersion::from_parts(2, 2, 2) - ); - } - - #[test] - fn should_ignore_invalid_versions() { - let tempdir = tempfile::tempdir().expect("should create temp dir"); - - // Executes `next_installed_version()` and asserts the resulting error as a string starts - // with the given text. - let min_version = ProtocolVersion::from_parts(0, 0, 0); - let assert_error_starts_with = |path: &Path, expected: String| { - let error_msg = next_installed_version(path, &min_version) - .unwrap_err() - .to_string(); - assert!( - error_msg.starts_with(&expected), - "Error message expected to start with \"{}\"\nActual error message: \"{}\"", - expected, - error_msg - ); - }; - - // Try with a non-existent dir. - let non_existent_dir = Path::new("not_a_dir"); - assert_error_starts_with( - non_existent_dir, - format!("failed to read dir {}", non_existent_dir.display()), - ); - - // Try with a dir which has no subdirs. - assert_error_starts_with( - tempdir.path(), - format!( - "failed to get a valid version from subdirs in {}", - tempdir.path().display() - ), - ); - - // Try with a dir which has one subdir which is not a valid version representation. - fs::create_dir(tempdir.path().join("not_a_version")).unwrap(); - assert_error_starts_with( - tempdir.path(), - format!( - "failed to get a valid version from subdirs in {}", - tempdir.path().display() - ), - ); - - // Try with a dir which has a valid and invalid subdir - the invalid one should be ignored. - fs::create_dir(tempdir.path().join("1_2_3")).unwrap(); - assert_eq!( - next_installed_version(tempdir.path(), &min_version).unwrap(), - ProtocolVersion::from_parts(1, 2, 3) - ); - } - - /// Creates the appropriate subdir in `root_dir`, and adds a random chainspec.toml with the - /// protocol_config.version field set to `version`. - fn install_chainspec( - rng: &mut TestRng, - root_dir: &Path, - version: &ProtocolVersion, - ) -> Chainspec { - let mut chainspec = Chainspec::random(rng); - chainspec.protocol_config.version = *version; - - let subdir = root_dir.join(dir_name_from_version(&version)); - fs::create_dir(&subdir).unwrap(); - - let path = subdir.join(CHAINSPEC_NAME); - fs::write( - path, - toml::to_string_pretty(&chainspec).expect("should encode to toml"), - ) - .expect("should install chainspec"); - chainspec - } - - #[test] - fn should_get_next_upgrade() { - let tempdir = tempfile::tempdir().expect("should create temp dir"); - - let next_point = |current_version: &ProtocolVersion| { - next_upgrade(tempdir.path().to_path_buf(), *current_version).unwrap() - }; - - let mut rng = crate::new_rng(); - - let mut current = ProtocolVersion::from_parts(0, 9, 9); - let v1_0_0 = ProtocolVersion::from_parts(1, 0, 0); - let chainspec_v1_0_0 = install_chainspec(&mut rng, tempdir.path(), &v1_0_0); - assert_eq!( - next_point(¤t), - chainspec_v1_0_0.protocol_config.into() - ); - - current = v1_0_0; - let v1_0_3 = ProtocolVersion::from_parts(1, 0, 3); - let chainspec_v1_0_3 = install_chainspec(&mut rng, tempdir.path(), &v1_0_3); - assert_eq!( - next_point(¤t), - chainspec_v1_0_3.protocol_config.into() - ); - } - - #[test] - fn should_not_get_old_or_invalid_upgrade() { - let tempdir = tempfile::tempdir().expect("should create temp dir"); - - let maybe_next_point = |current_version: &ProtocolVersion| { - next_upgrade(tempdir.path().to_path_buf(), *current_version) - }; - - let mut rng = crate::new_rng(); - - // Check we return `None` if there are no version subdirs. - let v1_0_0 = ProtocolVersion::from_parts(1, 0, 0); - let mut current = v1_0_0; - assert!(maybe_next_point(¤t).is_none()); - - // Check we return `None` if current_version == next_version. - let chainspec_v1_0_0 = install_chainspec(&mut rng, tempdir.path(), &v1_0_0); - assert!(maybe_next_point(¤t).is_none()); - - // Check we return `None` if current_version > next_version. - current = ProtocolVersion::from_parts(2, 0, 0); - assert!(maybe_next_point(¤t).is_none()); - - // Check we return `None` if we find an upgrade file where the protocol_config.version field - // doesn't match the subdir name. - let v0_9_9 = ProtocolVersion::from_parts(0, 9, 9); - current = v0_9_9; - assert!(maybe_next_point(¤t).is_some()); - - let mut chainspec_v0_9_9 = chainspec_v1_0_0; - chainspec_v0_9_9.protocol_config.version = v0_9_9; - let path_v1_0_0 = tempdir - .path() - .join(dir_name_from_version(&v1_0_0)) - .join(CHAINSPEC_NAME); - fs::write( - &path_v1_0_0, - toml::to_string_pretty(&chainspec_v0_9_9).expect("should encode to toml"), - ) - .expect("should install upgrade point"); - assert!(maybe_next_point(¤t).is_none()); - - // Check we return `None` if the next version upgrade_point file is corrupt. - fs::write(&path_v1_0_0, "bad data".as_bytes()).unwrap(); - assert!(maybe_next_point(¤t).is_none()); - - // Check we return `None` if the next version upgrade_point file is missing. - fs::remove_file(&path_v1_0_0).unwrap(); - assert!(maybe_next_point(¤t).is_none()); - } -} diff --git a/node/src/components/collector.rs b/node/src/components/collector.rs deleted file mode 100644 index fe3fb0de28..0000000000 --- a/node/src/components/collector.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::{ - collections::HashSet, - convert::Infallible, - fmt::{Debug, Display}, - hash::Hash, -}; - -use derive_more::From; -use serde::Serialize; -use tracing::debug; - -use crate::{ - effect::{announcements::NetworkAnnouncement, EffectBuilder, Effects}, - types::NodeId, - NodeRng, -}; - -use super::Component; - -/// A network payload collector. -/// -/// Stores each received payload. -#[derive(Debug)] -pub struct Collector { - pub payloads: HashSet, -} - -impl Collector

{ - /// Creates a new collector. - pub fn new() -> Self { - Collector { - payloads: HashSet::new(), - } - } -} - -/// Collector event. -#[derive(Debug, From, Serialize)] -pub enum Event

{ - #[from] - NetworkAnnouncement(NetworkAnnouncement), -} - -impl

Display for Event

-where - P: Debug, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Debug::fmt(self, f) - } -} - -impl Component for Collector

-where - P: Display + Debug + Collectable, -{ - type Event = Event

; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - _effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - #[allow(clippy::single_match)] - match event { - Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { - payload, .. - }) => { - debug!("collected {}", payload); - self.payloads.insert(payload.into_collectable()); - } - _ => {} - } - Effects::new() - } -} - -/// Collectable item trait. -/// -/// Some items may be collected not by themselves, but in a modified form (e.g. hash only). -pub trait Collectable { - type CollectedType: Eq + Hash; - - /// Transforms the item into the ultimately collected item. - fn into_collectable(self) -> Self::CollectedType; -} diff --git a/node/src/components/consensus.rs b/node/src/components/consensus.rs index 2dfe0fa357..02b4a7cea6 100644 --- a/node/src/components/consensus.rs +++ b/node/src/components/consensus.rs @@ -1,87 +1,157 @@ //! The consensus component. Provides distributed consensus among the nodes in the network. -#![warn(clippy::integer_arithmetic)] +#![warn(clippy::arithmetic_side_effects)] -mod candidate_block; mod cl_context; mod config; mod consensus_protocol; mod era_supervisor; #[macro_use] -mod highway_core; +pub mod highway_core; +pub(crate) mod error; +mod leader_sequence; mod metrics; -mod protocols; +pub mod protocols; #[cfg(test)] -mod tests; +pub(crate) mod tests; mod traits; +pub mod utils; +mod validator_change; use std::{ - collections::{BTreeMap, HashMap}, - convert::Infallible, + borrow::Cow, fmt::{self, Debug, Display, Formatter}, + sync::Arc, time::Duration, }; use datasize::DataSize; use derive_more::From; -use hex_fmt::HexFmt; use serde::{Deserialize, Serialize}; -use tracing::error; +use tracing::{info, trace}; -use casper_types::{EraId, PublicKey, U512}; +use casper_types::{BlockHash, BlockHeader, EraId, Timestamp}; use crate::{ components::Component, - crypto::hash::Digest, effect::{ - announcements::{BlocklistAnnouncement, ConsensusAnnouncement}, + announcements::{ + ConsensusAnnouncement, FatalAnnouncement, MetaBlockAnnouncement, + PeerBehaviorAnnouncement, + }, + diagnostics_port::DumpConsensusStateRequest, + incoming::{ConsensusDemand, ConsensusMessageIncoming}, requests::{ - BlockProposerRequest, BlockValidationRequest, ChainspecLoaderRequest, ConsensusRequest, - ContractRuntimeRequest, LinearChainRequest, NetworkRequest, StorageRequest, + BlockValidationRequest, ChainspecRawBytesRequest, ConsensusRequest, + ContractRuntimeRequest, NetworkInfoRequest, NetworkRequest, StorageRequest, + TransactionBufferRequest, }, - EffectBuilder, Effects, + EffectBuilder, EffectExt, Effects, }, - fatal, + failpoints::FailpointActivation, protocol::Message, reactor::ReactorEvent, - types::{ActivationPoint, Block, BlockHash, BlockHeader, ProtoBlock, Timestamp}, + types::{BlockPayload, InvalidProposalError, NodeId}, NodeRng, }; +use protocols::{highway::HighwayProtocol, zug::Zug}; +use traits::Context; -use crate::effect::EffectExt; -pub use config::Config; -pub(crate) use consensus_protocol::{BlockContext, EraReport}; -pub(crate) use era_supervisor::EraSupervisor; -pub(crate) use protocols::highway::HighwayProtocol; -use traits::NodeIdT; - +pub use cl_context::ClContext; +pub(crate) use config::{ChainspecConsensusExt, Config}; +pub(crate) use consensus_protocol::{BlockContext, ProposedBlock}; +pub(crate) use era_supervisor::{debug::EraDump, EraSupervisor, SerializedMessage}; +#[cfg(test)] +pub(crate) use highway_core::highway::Vertex as HighwayVertex; +pub(crate) use leader_sequence::LeaderSequence; +pub(crate) use protocols::highway::max_rounds_per_era; #[cfg(test)] -pub(crate) use era_supervisor::oldest_bonded_era; +pub(crate) use protocols::highway::HighwayMessage; + +const COMPONENT_NAME: &str = "consensus"; + +#[allow(clippy::arithmetic_side_effects)] +mod relaxed { + // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the + // module-wide `clippy::arithmetic_side_effects` lint. + + use casper_types::{EraId, PublicKey}; + use datasize::DataSize; + use serde::{Deserialize, Serialize}; + use strum::EnumDiscriminants; + use super::era_supervisor::SerializedMessage; + + #[derive(DataSize, Clone, Serialize, Deserialize, EnumDiscriminants)] + #[strum_discriminants(derive(strum::EnumIter))] + pub(crate) enum ConsensusMessage { + /// A protocol message, to be handled by the instance in the specified era. + Protocol { + era_id: EraId, + payload: SerializedMessage, + }, + /// A request for evidence against the specified validator, from any era that is still + /// bonded in `era_id`. + EvidenceRequest { era_id: EraId, pub_key: PublicKey }, + } +} +pub(crate) use relaxed::{ConsensusMessage, ConsensusMessageDiscriminants}; + +/// A request to be handled by the consensus protocol instance in a particular era. +#[derive(DataSize, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, From)] +pub(crate) enum EraRequest +where + C: Context, +{ + Zug(protocols::zug::SyncRequest), +} + +/// A protocol request message, to be handled by the instance in the specified era. #[derive(DataSize, Clone, Serialize, Deserialize)] -pub enum ConsensusMessage { - /// A protocol message, to be handled by the instance in the specified era. - Protocol { era_id: EraId, payload: Vec }, - /// A request for evidence against the specified validator, from any era that is still bonded - /// in `era_id`. - EvidenceRequest { era_id: EraId, pub_key: PublicKey }, +pub(crate) struct ConsensusRequestMessage { + era_id: EraId, + payload: SerializedMessage, } /// An ID to distinguish different timers. What they are used for is specific to each consensus /// protocol implementation. -#[derive(DataSize, Clone, Copy, Debug, Eq, PartialEq)] +#[derive(DataSize, Clone, Copy, Debug, Eq, PartialEq, Hash)] pub struct TimerId(pub u8); /// An ID to distinguish queued actions. What they are used for is specific to each consensus /// protocol implementation. -#[derive(DataSize, Clone, Copy, Debug, Eq, PartialEq)] +#[derive(DataSize, Clone, Copy, Debug, Eq, PartialEq, Hash)] pub struct ActionId(pub u8); +/// Payload for a block to be proposed. +#[derive(DataSize, Debug, From)] +pub struct NewBlockPayload { + pub(crate) era_id: EraId, + pub(crate) block_payload: Arc, + pub(crate) block_context: BlockContext, +} + +/// The result of validation of a ProposedBlock. +#[derive(DataSize, Debug, From)] +pub struct ResolveValidity { + era_id: EraId, + sender: NodeId, + proposed_block: ProposedBlock, + maybe_error: Option>, +} + /// Consensus component event. #[derive(DataSize, Debug, From)] -pub enum Event { +pub(crate) enum Event { /// An incoming network message. - MessageReceived { sender: I, msg: ConsensusMessage }, + #[from] + Incoming(ConsensusMessageIncoming), + /// A variant used with failpoints - when a message arrives, we fire this event with a delay, + /// and it also causes the message to be handled. + DelayedIncoming(ConsensusMessageIncoming), + /// An incoming demand message. + #[from] + DemandIncoming(ConsensusDemand), /// A scheduled event to be handled by a specified era. Timer { era_id: EraId, @@ -91,52 +161,29 @@ pub enum Event { /// A queued action to be handled by a specific era. Action { era_id: EraId, action_id: ActionId }, /// We are receiving the data we require to propose a new block. - NewProtoBlock { - era_id: EraId, - proto_block: ProtoBlock, - block_context: BlockContext, - parent: Option, - }, + NewBlockPayload(NewBlockPayload), #[from] ConsensusRequest(ConsensusRequest), /// A new block has been added to the linear chain. - BlockAdded(Box), - /// The proto-block has been validated. - ResolveValidity { - era_id: EraId, - sender: I, - proto_block: ProtoBlock, - parent: Option, - valid: bool, + BlockAdded { + header: Box, + header_hash: BlockHash, }, + /// The proposed block has been validated. + ResolveValidity(ResolveValidity), /// Deactivate the era with the given ID, unless the number of faulty validators increases. DeactivateEra { era_id: EraId, faulty_num: usize, delay: Duration, }, - /// Event raised when a new era should be created: once we get the set of validators, the - /// booking block hash and the seed from the key block. - CreateNewEra { - /// The header of the switch block - block: Box, - /// `Ok(block_hash)` if the booking block was found, `Err(era_id)` if not - booking_block_hash: Result, - }, - /// Event raised upon initialization, when a number of eras have to be instantiated at once. - InitializeEras { - key_blocks: HashMap, - booking_blocks: HashMap, - /// This is empty except if the activation era still needs to be instantiated: Its - /// validator set is read from the global state, not from a key block. - validators: BTreeMap, - }, - /// Got the result of checking for an upgrade activation point. - GotUpgradeActivationPoint(ActivationPoint), + /// Dump state for debugging purposes. + #[from] + DumpState(DumpConsensusStateRequest), } impl Debug for ConsensusMessage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { ConsensusMessage::Protocol { era_id, payload: _ } => { write!(f, "Protocol {{ era_id: {:?}, .. }}", era_id) @@ -154,7 +201,12 @@ impl Display for ConsensusMessage { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { ConsensusMessage::Protocol { era_id, payload } => { - write!(f, "protocol message {:10} in {}", HexFmt(payload), era_id) + write!( + f, + "protocol message ({} bytes) in {}", + payload.as_raw().len(), + era_id + ) } ConsensusMessage::EvidenceRequest { era_id, pub_key } => write!( f, @@ -165,10 +217,34 @@ impl Display for ConsensusMessage { } } -impl Display for Event { +impl Debug for ConsensusRequestMessage { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "ConsensusRequestMessage {{ era_id: {:?}, .. }}", + self.era_id + ) + } +} + +impl Display for ConsensusRequestMessage { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "protocol request {:?} in {}", self.payload, self.era_id) + } +} + +impl Display for Event { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - Event::MessageReceived { sender, msg } => write!(f, "msg from {:?}: {}", sender, msg), + Event::Incoming(ConsensusMessageIncoming { sender, message }) => { + write!(f, "message from {:?}: {}", sender, message) + } + Event::DelayedIncoming(ConsensusMessageIncoming { sender, message }) => { + write!(f, "delayed message from {:?}: {}", sender, message) + } + Event::DemandIncoming(demand) => { + write!(f, "demand from {:?}: {}", demand.sender, demand.request_msg) + } Event::Timer { era_id, timestamp, @@ -181,40 +257,44 @@ impl Display for Event { Event::Action { era_id, action_id } => { write!(f, "action (ID {}) for {}", action_id.0, era_id) } - Event::NewProtoBlock { + Event::NewBlockPayload(NewBlockPayload { era_id, - proto_block, + block_payload, block_context, - parent, - } => write!( + }) => write!( f, - "New proto-block for era {:?}: {:?}, {:?}, parent: {:?}", - era_id, proto_block, block_context, parent + "New proposed block for era {:?}: {:?}, {:?}", + era_id, block_payload, block_context ), Event::ConsensusRequest(request) => write!( f, - "A request for consensus component hash been receieved: {:?}", + "A request for consensus component hash been received: {:?}", request ), - Event::BlockAdded(block) => write!( + Event::BlockAdded { + header: _, + header_hash, + } => write!( f, "A block has been added to the linear chain: {}", - block.hash() + header_hash, ), - Event::ResolveValidity { + Event::ResolveValidity(ResolveValidity { era_id, sender, - proto_block, - parent, - valid, - } => write!( + proposed_block, + maybe_error, + }) => write!( f, - "Proto-block received from {:?} for {} with parent {:?} is {}: {:?}", + "Proposed block received from {:?} for {} is {}: {:?}", sender, era_id, - parent, - if *valid { "valid" } else { "invalid" }, - proto_block, + if maybe_error.is_none() { + "valid".to_string() + } else { + format!("invalid ({:?})", maybe_error).to_string() + }, + proposed_block, ), Event::DeactivateEra { era_id, faulty_num, .. @@ -223,129 +303,219 @@ impl Display for Event { "Deactivate old {} unless additional faults are observed; faults so far: {}", era_id, faulty_num ), - Event::CreateNewEra { - booking_block_hash, - block, - } => write!( - f, - "New era should be created; booking block hash: {:?}, switch block: {:?}", - booking_block_hash, block - ), - Event::InitializeEras { .. } => write!(f, "Starting eras should be initialized"), - Event::GotUpgradeActivationPoint(activation_point) => { - write!(f, "new upgrade activation point: {:?}", activation_point) - } + Event::DumpState(req) => Display::fmt(req, f), } } } /// A helper trait whose bounds represent the requirements for a reactor event that `EraSupervisor` /// can work with. -pub trait ReactorEventT: +pub(crate) trait ReactorEventT: ReactorEvent - + From> + + From + Send - + From> - + From + + From> + + From + + From + + From + From - + From> + + From + From + From - + From - + From> - + From> + + From + + From + + From + + From { } -impl ReactorEventT for REv where +impl ReactorEventT for REv where REv: ReactorEvent - + From> + + From + Send - + From> - + From + + From + + From> + + From + + From + From - + From> + + From + From + From - + From - + From> - + From> + + From + + From + + From + + From { } -impl Component for EraSupervisor +mod specimen_support { + use crate::utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator}; + + use super::{ + protocols::{highway, zug}, + ClContext, ConsensusMessage, ConsensusMessageDiscriminants, ConsensusRequestMessage, + EraRequest, SerializedMessage, + }; + + impl LargestSpecimen for ConsensusMessage { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::(estimator, |variant| { + match variant { + ConsensusMessageDiscriminants::Protocol => { + let zug_payload = SerializedMessage::from_message( + &zug::Message::::largest_specimen(estimator, cache), + ); + let highway_payload = SerializedMessage::from_message( + &highway::HighwayMessage::::largest_specimen( + estimator, cache, + ), + ); + + let payload = if zug_payload.as_raw().len() > highway_payload.as_raw().len() + { + zug_payload + } else { + highway_payload + }; + + ConsensusMessage::Protocol { + era_id: LargestSpecimen::largest_specimen(estimator, cache), + payload, + } + } + ConsensusMessageDiscriminants::EvidenceRequest => { + ConsensusMessage::EvidenceRequest { + era_id: LargestSpecimen::largest_specimen(estimator, cache), + pub_key: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } + }) + } + } + + impl LargestSpecimen for ConsensusRequestMessage { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let zug_sync_request = SerializedMessage::from_message( + &zug::SyncRequest::::largest_specimen(estimator, cache), + ); + + ConsensusRequestMessage { + era_id: LargestSpecimen::largest_specimen(estimator, cache), + payload: zug_sync_request, + } + } + } + + impl LargestSpecimen for EraRequest { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + EraRequest::Zug(LargestSpecimen::largest_specimen(estimator, cache)) + } + } +} + +impl Component for EraSupervisor where - I: NodeIdT, - REv: ReactorEventT, + REv: ReactorEventT, { - type Event = Event; - type ConstructionError = Infallible; + type Event = Event; + + fn name(&self) -> &str { + COMPONENT_NAME + } + + fn activate_failpoint(&mut self, activation: &FailpointActivation) { + self.message_delay_failpoint.update_from(activation); + self.proposal_delay_failpoint.update_from(activation); + } fn handle_event( &mut self, effect_builder: EffectBuilder, - mut rng: &mut NodeRng, + rng: &mut NodeRng, event: Self::Event, ) -> Effects { - let mut handling_es = self.handling_wrapper(effect_builder, &mut rng); + trace!("{:?}", event); match event { Event::Timer { era_id, timestamp, timer_id, - } => handling_es.handle_timer(era_id, timestamp, timer_id), - Event::Action { era_id, action_id } => handling_es.handle_action(era_id, action_id), - Event::MessageReceived { sender, msg } => handling_es.handle_message(sender, msg), - Event::NewProtoBlock { - era_id, - proto_block, - block_context, - parent, - } => handling_es.handle_new_proto_block(era_id, proto_block, block_context, parent), - Event::BlockAdded(block) => handling_es.handle_block_added(*block), - Event::ResolveValidity { - era_id, + } => self.handle_timer(effect_builder, rng, era_id, timestamp, timer_id), + Event::Action { era_id, action_id } => { + self.handle_action(effect_builder, rng, era_id, action_id) + } + Event::Incoming(ConsensusMessageIncoming { sender, message }) => { + let delay_by = self.message_delay_failpoint.fire(rng).cloned(); + if let Some(delay) = delay_by { + effect_builder + .set_timeout(Duration::from_millis(delay)) + .event(move |_| { + Event::DelayedIncoming(ConsensusMessageIncoming { sender, message }) + }) + } else { + self.handle_message(effect_builder, rng, sender, *message) + } + } + Event::DelayedIncoming(ConsensusMessageIncoming { sender, message }) => { + self.handle_message(effect_builder, rng, sender, *message) + } + Event::DemandIncoming(ConsensusDemand { sender, - proto_block, - parent, - valid, - } => handling_es.resolve_validity(era_id, sender, proto_block, parent, valid), + request_msg: demand, + auto_closing_responder, + }) => self.handle_demand(effect_builder, rng, sender, demand, auto_closing_responder), + Event::NewBlockPayload(new_block_payload) => { + self.handle_new_block_payload(effect_builder, rng, new_block_payload) + } + Event::BlockAdded { + header, + header_hash: _, + } => self.handle_block_added(effect_builder, rng, *header), + Event::ResolveValidity(resolve_validity) => { + self.resolve_validity(effect_builder, rng, resolve_validity) + } Event::DeactivateEra { era_id, faulty_num, delay, - } => handling_es.handle_deactivate_era(era_id, faulty_num, delay), - Event::CreateNewEra { - block, - booking_block_hash, - } => { - let booking_block_hash = match booking_block_hash { - Ok(hash) => hash, - Err(era_id) => { - error!( - "could not find the booking block in era {}, for era {}", - era_id, - block.header().era_id().successor() - ); - return fatal!( - handling_es.effect_builder, - "couldn't get the booking block hash" - ) - .ignore(); + } => self.handle_deactivate_era(effect_builder, era_id, faulty_num, delay), + Event::ConsensusRequest(ConsensusRequest::Status(responder)) => self.status(responder), + Event::ConsensusRequest(ConsensusRequest::ValidatorChanges(responder)) => { + let validator_changes = self.get_validator_changes(); + responder.respond(validator_changes).ignore() + } + Event::DumpState(req @ DumpConsensusStateRequest { era_id, .. }) => { + let current_era = match self.current_era() { + None => { + return req + .answer(Err(Cow::Owned("consensus not initialized".to_string()))) + .ignore() } + Some(era_id) => era_id, }; - handling_es.handle_create_new_era(*block, booking_block_hash) - } - Event::InitializeEras { - key_blocks, - booking_blocks, - validators, - } => handling_es.handle_initialize_eras(key_blocks, booking_blocks, validators), - Event::GotUpgradeActivationPoint(activation_point) => { - handling_es.got_upgrade_activation_point(activation_point) - } - Event::ConsensusRequest(ConsensusRequest::Status(responder)) => { - handling_es.status(responder) + + let requested_era = era_id.unwrap_or(current_era); + + // We emit some log message to get some performance information and give the + // operator a chance to find out why their node is busy. + info!(era_id=%requested_era.value(), was_latest=era_id.is_none(), "dumping era via diagnostics port"); + + let era_dump_result = self + .open_eras() + .get(&requested_era) + .ok_or_else(|| { + Cow::Owned(format!( + "could not dump consensus, {} not found", + requested_era + )) + }) + .and_then(|era| EraDump::dump_era(era, requested_era)); + + match era_dump_result { + Ok(dump) => req.answer(Ok(&dump)).ignore(), + Err(err) => req.answer(Err(err)).ignore(), + } } } } diff --git a/node/src/components/consensus/candidate_block.rs b/node/src/components/consensus/candidate_block.rs deleted file mode 100644 index 1407d2c13f..0000000000 --- a/node/src/components/consensus/candidate_block.rs +++ /dev/null @@ -1,90 +0,0 @@ -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use casper_types::PublicKey; - -use crate::{ - components::consensus::traits::ConsensusValueT, - crypto::hash::Digest, - types::{ProtoBlock, Timestamp}, -}; - -/// A proposed block. Once the consensus protocol reaches agreement on it, it will be converted to -/// a `FinalizedBlock`. -#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub(crate) struct CandidateBlock { - proto_block: ProtoBlock, - accusations: Vec, - /// The parent candidate block in this era, or `None` if this is the first block in this era. - parent: Option, -} - -impl CandidateBlock { - /// Creates a new candidate block, wrapping a proto block and accusing the given validators. - pub(crate) fn new( - proto_block: ProtoBlock, - accusations: Vec, - parent: Option, - ) -> Self { - CandidateBlock { - proto_block, - accusations, - parent, - } - } - - /// Returns the proto block containing the deploys. - pub(crate) fn proto_block(&self) -> &ProtoBlock { - &self.proto_block - } - - /// Returns the validators accused by this block. - pub(crate) fn accusations(&self) -> &Vec { - &self.accusations - } -} - -impl From for ProtoBlock { - fn from(cb: CandidateBlock) -> ProtoBlock { - cb.proto_block - } -} - -impl ConsensusValueT for CandidateBlock { - type Hash = Digest; - - fn hash(&self) -> Self::Hash { - let CandidateBlock { - proto_block, - accusations, - parent, - } = self; - let mut result = [0; Digest::LENGTH]; - - let mut hasher = VarBlake2b::new(Digest::LENGTH).expect("should create hasher"); - hasher.update(proto_block.hash().inner()); - let data = (accusations, parent); - hasher.update(bincode::serialize(&data).expect("should serialize candidate block data")); - hasher.finalize_variable(|slice| { - result.copy_from_slice(slice); - }); - result.into() - } - - fn needs_validation(&self) -> bool { - !self.proto_block.deploy_hashes().is_empty() - || !self.proto_block.transfer_hashes().is_empty() - } - - fn timestamp(&self) -> Timestamp { - self.proto_block.timestamp() - } - - fn parent(&self) -> Option<&Self::Hash> { - self.parent.as_ref() - } -} diff --git a/node/src/components/consensus/cl_context.rs b/node/src/components/consensus/cl_context.rs index 9c52524dbe..de9502f42c 100644 --- a/node/src/components/consensus/cl_context.rs +++ b/node/src/components/consensus/cl_context.rs @@ -1,23 +1,18 @@ use std::sync::Arc; use datasize::DataSize; +use serde::{Deserialize, Serialize}; use tracing::info; -use casper_types::{PublicKey, SecretKey, Signature}; +use casper_types::{crypto, Digest, PublicKey, SecretKey, Signature}; use crate::{ - components::consensus::{ - candidate_block::CandidateBlock, - traits::{Context, ValidatorSecret}, - }, - crypto::{ - self, - hash::{self, Digest}, - }, + components::consensus::traits::{ConsensusValueT, Context, ValidatorSecret}, + types::BlockPayload, }; #[derive(DataSize)] -pub(crate) struct Keypair { +pub struct Keypair { secret_key: Arc, public_key: PublicKey, } @@ -29,6 +24,11 @@ impl Keypair { public_key, } } + + #[cfg(test)] + pub(crate) fn public_key(&self) -> &PublicKey { + &self.public_key + } } impl From> for Keypair { @@ -47,12 +47,20 @@ impl ValidatorSecret for Keypair { } } -/// The collection of types used for cryptography, IDs and blocks in the CasperLabs node. -#[derive(Clone, DataSize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub(crate) struct ClContext; +impl ConsensusValueT for Arc { + fn needs_validation(&self) -> bool { + self.all_transactions().next().is_some() + || !self.accusations().is_empty() + || self.rewarded_signatures().has_some() + } +} + +/// The collection of types used for cryptography, IDs and blocks in the Casper node. +#[derive(Clone, DataSize, Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] +pub struct ClContext; impl Context for ClContext { - type ConsensusValue = CandidateBlock; + type ConsensusValue = Arc; type ValidatorId = PublicKey; type ValidatorSecret = Keypair; type Signature = Signature; @@ -60,7 +68,7 @@ impl Context for ClContext { type InstanceId = Digest; fn hash(data: &[u8]) -> Digest { - hash::hash(data) + Digest::hash(data) } fn verify_signature(hash: &Digest, public_key: &PublicKey, signature: &Signature) -> bool { @@ -71,3 +79,21 @@ impl Context for ClContext { true } } + +mod specimen_support { + use super::Keypair; + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + use casper_types::{PublicKey, SecretKey}; + use std::sync::Arc; + + impl LargestSpecimen for Keypair { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let secret_key = SecretKey::largest_specimen(estimator, cache); + let public_key = PublicKey::from(&secret_key); + Keypair { + secret_key: Arc::new(secret_key), + public_key, + } + } + } +} diff --git a/node/src/components/consensus/config.rs b/node/src/components/consensus/config.rs index 4f7dc89280..b828431f64 100644 --- a/node/src/components/consensus/config.rs +++ b/node/src/components/consensus/config.rs @@ -1,75 +1,106 @@ +use std::{path::Path, sync::Arc}; + use datasize::DataSize; use serde::{Deserialize, Serialize}; -use casper_types::{ProtocolVersion, SecretKey}; +use casper_types::{Chainspec, PublicKey, SecretKey}; use crate::{ - components::consensus::{protocols::highway::config::Config as HighwayConfig, EraId}, - crypto::hash::Digest, - types::{chainspec::HighwayConfig as HighwayProtocolConfig, Chainspec, TimeDiff, Timestamp}, - utils::External, + components::consensus::{ + era_supervisor::PAST_EVIDENCE_ERAS, + protocols::{highway::config::Config as HighwayConfig, zug::config::Config as ZugConfig}, + EraId, + }, + utils::{External, LoadError, Loadable}, }; +const DEFAULT_MAX_EXECUTION_DELAY: u64 = 3; + /// Consensus configuration. -#[derive(DataSize, Debug, Deserialize, Serialize, Clone)] +#[derive(DataSize, Debug, Serialize, Deserialize, Clone)] // Disallow unknown fields to ensure config files and command-line overrides contain valid keys. #[serde(deny_unknown_fields)] pub struct Config { /// Path to secret key file. - pub secret_key_path: External, + pub secret_key_path: External, + /// The maximum number of blocks by which execution is allowed to lag behind finalization. + /// If it is more than that, consensus will pause, and resume once the executor has caught up. + pub max_execution_delay: u64, /// Highway-specific node configuration. + #[serde(default)] pub highway: HighwayConfig, + /// Zug-specific node configuration. + #[serde(default)] + pub zug: ZugConfig, } impl Default for Config { fn default() -> Self { Config { secret_key_path: External::Missing, + max_execution_delay: DEFAULT_MAX_EXECUTION_DELAY, highway: HighwayConfig::default(), + zug: ZugConfig::default(), } } } -/// Consensus protocol configuration. -#[derive(DataSize, Debug)] -pub(crate) struct ProtocolConfig { - pub(crate) highway_config: HighwayProtocolConfig, - pub(crate) era_duration: TimeDiff, - pub(crate) minimum_era_height: u64, - /// Number of eras before an auction actually defines the set of validators. - /// If you bond with a sufficient bid in era N, you will be a validator in era N + - /// auction_delay + 1 - pub(crate) auction_delay: u64, - pub(crate) unbonding_delay: u64, - /// The network protocol version. - #[data_size(skip)] - pub(crate) protocol_version: ProtocolVersion, - /// The first era ID after the last upgrade - pub(crate) last_activation_point: EraId, - /// Name of the network. - pub(crate) name: String, - /// Genesis timestamp, if available. - pub(crate) genesis_timestamp: Option, - /// The chainspec hash: All nodes in the network agree on it, and it's unique to this network. - pub(crate) chainspec_hash: Digest, +type LoadKeyError = LoadError< as Loadable>::Error>; + +impl Config { + /// Loads the secret key from the configuration file and derives the public key. + pub(crate) fn load_keys>( + &self, + root: P, + ) -> Result<(Arc, PublicKey), LoadKeyError> { + let secret_signing_key: Arc = self.secret_key_path.clone().load(root)?; + let public_key: PublicKey = PublicKey::from(secret_signing_key.as_ref()); + Ok((secret_signing_key, public_key)) + } } -impl From<&Chainspec> for ProtocolConfig { - fn from(chainspec: &Chainspec) -> Self { - ProtocolConfig { - highway_config: chainspec.highway_config, - era_duration: chainspec.core_config.era_duration, - minimum_era_height: chainspec.core_config.minimum_era_height, - auction_delay: chainspec.core_config.auction_delay, - unbonding_delay: chainspec.core_config.unbonding_delay, - protocol_version: chainspec.protocol_config.version, - last_activation_point: chainspec.protocol_config.activation_point.era_id(), - name: chainspec.network_config.name.clone(), - genesis_timestamp: chainspec - .protocol_config - .activation_point - .genesis_timestamp(), - chainspec_hash: chainspec.hash(), - } +pub trait ChainspecConsensusExt { + /// Returns the ID of the last activation era, i.e. the era immediately after the most recent + /// upgrade or restart. + fn activation_era(&self) -> EraId; + + /// Returns the earliest era whose evidence is still relevant to the current era. If the current + /// era is N, that is usually N - 1, except that it's never at or before the most recent + /// activation point. + fn earliest_relevant_era(&self, current_era: EraId) -> EraId; + + /// Returns the earliest era whose switch block is needed to initialize the given era. For era + /// N that will usually be N - A - 1, where A is the auction delay, except that switch block + /// from before the most recent activation point are never used. + fn earliest_switch_block_needed(&self, era_id: EraId) -> EraId; + + /// Returns the number of switch blocks needed for initializing an era. + fn number_of_past_switch_blocks_needed(&self) -> u64; +} + +impl ChainspecConsensusExt for Chainspec { + fn activation_era(&self) -> EraId { + self.protocol_config.activation_point.era_id() + } + + fn earliest_relevant_era(&self, current_era: EraId) -> EraId { + self.activation_era() + .successor() + .max(current_era.saturating_sub(PAST_EVIDENCE_ERAS)) + } + + fn earliest_switch_block_needed(&self, era_id: EraId) -> EraId { + self.activation_era().max( + era_id + .saturating_sub(1) + .saturating_sub(self.core_config.auction_delay), + ) + } + + fn number_of_past_switch_blocks_needed(&self) -> u64 { + self.core_config + .auction_delay + .saturating_add(PAST_EVIDENCE_ERAS) + .saturating_add(1) } } diff --git a/node/src/components/consensus/consensus_protocol.rs b/node/src/components/consensus/consensus_protocol.rs index df3d9e62da..4a40befd82 100644 --- a/node/src/components/consensus/consensus_protocol.rs +++ b/node/src/components/consensus/consensus_protocol.rs @@ -1,25 +1,40 @@ -use std::{any::Any, collections::BTreeMap, fmt::Debug, path::PathBuf}; +use std::{ + any::Any, + fmt::{self, Debug, Display, Formatter}, + path::PathBuf, +}; -use anyhow::Error; use datasize::DataSize; -use serde::{Deserialize, Serialize}; + +use casper_types::{TimeDiff, Timestamp}; use crate::{ components::consensus::{traits::Context, ActionId, TimerId}, - types::{TimeDiff, Timestamp}, + types::NodeId, + NodeRng, }; +use super::era_supervisor::SerializedMessage; + /// Information about the context in which a new block is created. -#[derive(Clone, Copy, DataSize, Eq, PartialEq, Debug, Ord, PartialOrd, Hash)] -pub struct BlockContext { +#[derive(Clone, DataSize, Eq, PartialEq, Debug, Ord, PartialOrd, Hash)] +pub struct BlockContext +where + C: Context, +{ timestamp: Timestamp, - height: u64, + /// The ancestors of the new block, in reverse chronological order, i.e. the first entry is the + /// new block's parent. + ancestor_values: Vec, } -impl BlockContext { +impl BlockContext { /// Constructs a new `BlockContext`. - pub(crate) fn new(timestamp: Timestamp, height: u64) -> Self { - BlockContext { timestamp, height } + pub(crate) fn new(timestamp: Timestamp, ancestor_values: Vec) -> Self { + BlockContext { + timestamp, + ancestor_values, + } } /// The block's timestamp. @@ -27,34 +42,59 @@ impl BlockContext { self.timestamp } + /// The block's relative height, i.e. the number of ancestors in the current era. #[cfg(test)] pub(crate) fn height(&self) -> u64 { - self.height + self.ancestor_values.len() as u64 + } + + /// The values of the block's ancestors. + pub(crate) fn ancestor_values(&self) -> &[C::ConsensusValue] { + &self.ancestor_values } } -/// Equivocation and reward information to be included in the terminal finalized block. -#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(bound( - serialize = "VID: Ord + Serialize", - deserialize = "VID: Ord + Deserialize<'de>", -))] -pub struct EraReport { - /// The set of equivocators. - pub(crate) equivocators: Vec, - /// Rewards for finalization of earlier blocks. - /// - /// This is a measure of the value of each validator's contribution to consensus, in - /// fractions of the configured maximum block reward. - pub(crate) rewards: BTreeMap, - /// Validators that haven't produced any unit during the era. - pub(crate) inactive_validators: Vec, +/// A proposed block, with context. +#[derive(Clone, DataSize, Eq, PartialEq, Debug, Ord, PartialOrd, Hash)] +pub struct ProposedBlock +where + C: Context, +{ + value: C::ConsensusValue, + context: BlockContext, } -#[derive(Clone, Debug, Eq, PartialEq)] +impl ProposedBlock { + pub(crate) fn new(value: C::ConsensusValue, context: BlockContext) -> Self { + ProposedBlock { value, context } + } + + pub(crate) fn value(&self) -> &C::ConsensusValue { + &self.value + } + + pub(crate) fn context(&self) -> &BlockContext { + &self.context + } + + pub(crate) fn destructure(self) -> (C::ConsensusValue, BlockContext) { + (self.value, self.context) + } +} + +impl Display for ProposedBlock { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "proposed block at {}: {}", + self.context.timestamp(), + self.value + ) + } +} + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub(crate) struct TerminalBlockData { - /// The rewards for participating in consensus. - pub(crate) rewards: BTreeMap, /// The list of validators that haven't produced any units. pub(crate) inactive_validators: Vec, } @@ -62,14 +102,14 @@ pub(crate) struct TerminalBlockData { /// A finalized block. All nodes are guaranteed to see the same sequence of blocks, and to agree /// about all the information contained in this type, as long as the total weight of faulty /// validators remains below the threshold. -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq, Hash)] pub(crate) struct FinalizedBlock { /// The finalized value. pub(crate) value: C::ConsensusValue, /// The timestamp at which this value was proposed. pub(crate) timestamp: Timestamp, /// The relative height in this instance of the protocol. - pub(crate) height: u64, + pub(crate) relative_height: u64, /// The validators known to be faulty as seen by this block. pub(crate) equivocators: Vec, /// If this is a terminal block, i.e. the last one to be finalized, this contains additional @@ -79,39 +119,34 @@ pub(crate) struct FinalizedBlock { pub(crate) proposer: C::ValidatorId, } -pub(crate) type ProtocolOutcomes = Vec>; +pub(crate) type ProtocolOutcomes = Vec>; -// TODO: get rid of anyhow::Error; use variant and derive Clone and PartialEq. This is for testing. -#[derive(Debug)] -pub(crate) enum ProtocolOutcome { - CreatedGossipMessage(Vec), - CreatedTargetedMessage(Vec, I), - InvalidIncomingMessage(Vec, I, Error), +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum ProtocolOutcome { + CreatedGossipMessage(SerializedMessage), + CreatedTargetedMessage(SerializedMessage, NodeId), + CreatedMessageToRandomPeer(SerializedMessage), + CreatedRequestToRandomPeer(SerializedMessage), ScheduleTimer(Timestamp, TimerId), QueueAction(ActionId), - /// Request deploys for a new block, providing the necessary context. - CreateNewBlock { - block_context: BlockContext, - past_values: Vec, - parent_value: Option, - }, + /// Request transactions for a new block, providing the necessary context. + CreateNewBlock(BlockContext, Timestamp), /// A block was finalized. FinalizedBlock(FinalizedBlock), /// Request validation of the consensus value, contained in a message received from the given /// node. /// /// The domain logic should verify any intrinsic validity conditions of consensus values, e.g. - /// that it has the expected structure, or that deploys that are mentioned by hash actually - /// exist, and then call `ConsensusProtocol::resolve_validity`. + /// that it has the expected structure, or that transactions that are mentioned by hash + /// actually exist, and then call `ConsensusProtocol::resolve_validity`. ValidateConsensusValue { - sender: I, - consensus_value: C::ConsensusValue, - ancestor_values: Vec, + sender: NodeId, + proposed_block: ProposedBlock, }, /// New direct evidence was added against the given validator. NewEvidence(C::ValidatorId), /// Send evidence about the validator from an earlier era to the peer. - SendEvidence(I, C::ValidatorId), + SendEvidence(NodeId, C::ValidatorId), /// We've detected an equivocation our own node has made. WeAreFaulty, /// We've received a unit from a doppelganger. @@ -119,48 +154,66 @@ pub(crate) enum ProtocolOutcome { /// Too many faulty validators. The protocol's fault tolerance threshold has been exceeded and /// consensus cannot continue. FttExceeded, - /// No progress has been made recently. - StandstillAlert, /// We want to disconnect from a sender of invalid data. - Disconnect(I), + Disconnect(NodeId), + /// We added a proposed block to the protocol state. + /// + /// This is used to inform the transaction buffer, so we don't propose the same transactions + /// again. Does not need to be raised for proposals this node created itself. + HandledProposedBlock(ProposedBlock), } /// An API for a single instance of the consensus. -pub(crate) trait ConsensusProtocol: Send { +pub(crate) trait ConsensusProtocol: Send { /// Upcasts consensus protocol into `dyn Any`. /// /// Typically called on a boxed trait object for downcasting afterwards. fn as_any(&self) -> &dyn Any; /// Handles an incoming message (like NewUnit, RequestDependency). - fn handle_message(&mut self, sender: I, msg: Vec, now: Timestamp) - -> ProtocolOutcomes; + fn handle_message( + &mut self, + rng: &mut NodeRng, + sender: NodeId, + msg: SerializedMessage, + now: Timestamp, + ) -> ProtocolOutcomes; + + /// Handles an incoming request message and returns an optional response. + fn handle_request_message( + &mut self, + rng: &mut NodeRng, + sender: NodeId, + msg: SerializedMessage, + now: Timestamp, + ) -> (ProtocolOutcomes, Option); /// Current instance of consensus protocol is latest era. - fn handle_is_current(&self) -> ProtocolOutcomes; + fn handle_is_current(&self, now: Timestamp) -> ProtocolOutcomes; /// Triggers consensus' timer. - fn handle_timer(&mut self, timestamp: Timestamp, timer_id: TimerId) -> ProtocolOutcomes; + fn handle_timer( + &mut self, + timestamp: Timestamp, + now: Timestamp, + timer_id: TimerId, + rng: &mut NodeRng, + ) -> ProtocolOutcomes; /// Triggers a queued action. - fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes; + fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes; /// Proposes a new value for consensus. - fn propose( - &mut self, - value: C::ConsensusValue, - block_context: BlockContext, - now: Timestamp, - ) -> ProtocolOutcomes; + fn propose(&mut self, proposed_block: ProposedBlock, now: Timestamp) -> ProtocolOutcomes; /// Marks the `value` as valid or invalid, based on validation requested via /// `ProtocolOutcome::ValidateConsensusvalue`. fn resolve_validity( &mut self, - value: &C::ConsensusValue, + proposed_block: ProposedBlock, valid: bool, now: Timestamp, - ) -> ProtocolOutcomes; + ) -> ProtocolOutcomes; /// Turns this instance into an active validator, that participates in the consensus protocol. fn activate_validator( @@ -169,7 +222,7 @@ pub(crate) trait ConsensusProtocol: Send { secret: C::ValidatorSecret, timestamp: Timestamp, unit_hash_file: Option, - ) -> ProtocolOutcomes; + ) -> ProtocolOutcomes; /// Turns this instance into a passive observer, that does not create any new vertices. fn deactivate_validator(&mut self); @@ -184,23 +237,20 @@ pub(crate) trait ConsensusProtocol: Send { fn mark_faulty(&mut self, vid: &C::ValidatorId); /// Sends evidence for a faulty of validator `vid` to the `sender` of the request. - fn request_evidence(&self, sender: I, vid: &C::ValidatorId) -> ProtocolOutcomes; + fn send_evidence(&self, sender: NodeId, vid: &C::ValidatorId) -> ProtocolOutcomes; /// Sets the pause status: While paused we don't create consensus messages other than pings. - fn set_paused(&mut self, paused: bool); + fn set_paused(&mut self, paused: bool, now: Timestamp) -> ProtocolOutcomes; /// Returns the list of all validators that were observed as faulty in this consensus instance. fn validators_with_evidence(&self) -> Vec<&C::ValidatorId>; - /// Returns true if the protocol has received some messages since initialization. - fn has_received_messages(&self) -> bool; - /// Returns whether this instance of a protocol is an active validator. fn is_active(&self) -> bool; /// Returns the instance ID of this instance. fn instance_id(&self) -> &C::InstanceId; - // TODO: Make this lees Highway-specific. + // TODO: Make this less Highway-specific. fn next_round_length(&self) -> Option; } diff --git a/node/src/components/consensus/era_supervisor.rs b/node/src/components/consensus/era_supervisor.rs index dd32f41037..7883c9f5ab 100644 --- a/node/src/components/consensus/era_supervisor.rs +++ b/node/src/components/consensus/era_supervisor.rs @@ -1,3 +1,5 @@ +#![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged. + //! Consensus service is a component that will be communicating with the reactor. //! It will receive events (like incoming message event or create new message event) //! and propagate them to the underlying consensus protocol. @@ -5,94 +7,102 @@ //! it assumes is the concept of era/epoch and that each era runs separate consensus instance. //! Most importantly, it doesn't care about what messages it's forwarding. +pub(super) mod debug; mod era; use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + cmp, + collections::{BTreeMap, BTreeSet, HashMap}, convert::TryInto, fmt::{self, Debug, Formatter}, - path::PathBuf, + fs, io, + path::{Path, PathBuf}, sync::Arc, time::Duration, }; use anyhow::Error; -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; use datasize::DataSize; -use futures::FutureExt; +use futures::{Future, FutureExt}; use itertools::Itertools; use prometheus::Registry; use rand::Rng; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tracing::{debug, error, info, trace, warn}; -use casper_types::{AsymmetricType, EraId, PublicKey, SecretKey, U512}; +use casper_binary_port::{ConsensusStatus, ConsensusValidatorChanges}; + +use casper_types::{ + Approval, AsymmetricType, BlockHash, BlockHeader, Chainspec, ConsensusProtocolName, Digest, + DisplayIter, EraId, PublicKey, RewardedSignatures, Timestamp, Transaction, TransactionHash, + ValidatorChange, +}; use crate::{ - components::consensus::{ - candidate_block::CandidateBlock, - cl_context::{ClContext, Keypair}, - config::ProtocolConfig, - consensus_protocol::{ - BlockContext, ConsensusProtocol, EraReport, FinalizedBlock as CpFinalizedBlock, - ProtocolOutcome, ProtocolOutcomes, + components::{ + consensus::{ + cl_context::{ClContext, Keypair}, + consensus_protocol::{ + ConsensusProtocol, FinalizedBlock as CpFinalizedBlock, ProposedBlock, + ProtocolOutcome, + }, + metrics::Metrics, + validator_change::ValidatorChanges, + ActionId, ChainspecConsensusExt, Config, ConsensusMessage, ConsensusRequestMessage, + Event, HighwayProtocol, NewBlockPayload, ReactorEventT, ResolveValidity, TimerId, Zug, }, - metrics::ConsensusMetrics, - traits::{ConsensusValueT, NodeIdT}, - ActionId, Config, ConsensusMessage, Event, ReactorEventT, TimerId, + network::blocklist::BlocklistJustification, }, - crypto::hash::Digest, effect::{ - requests::{BlockValidationRequest, StorageRequest}, - EffectBuilder, EffectExt, EffectOptionExt, Effects, Responder, + announcements::FatalAnnouncement, + requests::{BlockValidationRequest, ContractRuntimeRequest, StorageRequest}, + AutoClosingResponder, EffectBuilder, EffectExt, Effects, Responder, }, - fatal, + failpoints::Failpoint, + fatal, protocol, types::{ - ActivationPoint, Block, BlockHash, BlockHeader, DeployHash, DeployMetadata, - FinalitySignature, FinalizedBlock, ProtoBlock, TimeDiff, Timestamp, + create_single_block_rewarded_signatures, BlockWithMetadata, ExecutableBlock, + FinalizedBlock, InternalEraReport, MetaBlockState, NodeId, ValidatorMatrix, }, - utils::WithDir, NodeRng, }; pub use self::era::Era; +use super::{traits::ConsensusNetworkMessage, BlockContext}; +use crate::{components::consensus::error::CreateNewEraError, types::InvalidProposalError}; -/// The delay in milliseconds before we shutdown after the number of faulty validators exceeded the +/// The delay in milliseconds before we shut down after the number of faulty validators exceeded the /// fault tolerance threshold. const FTT_EXCEEDED_SHUTDOWN_DELAY_MILLIS: u64 = 60 * 1000; - -type ConsensusConstructor = dyn Fn( - Digest, // the era's unique instance ID - BTreeMap, // validator weights - &HashSet, // slashed validators that are banned in this era - &ProtocolConfig, // the network's chainspec - &Config, // The consensus part of the node config. - Option<&dyn ConsensusProtocol>, // previous era's consensus instance - Timestamp, // start time for this era - u64, // random seed - Timestamp, // now timestamp -) -> ( - Box>, - Vec>, -) + Send; +/// A warning is printed if a timer is delayed by more than this. +const TIMER_DELAY_WARNING_MILLIS: u64 = 1000; + +/// The number of eras across which evidence can be cited. +/// If this is 1, you can cite evidence from the previous era, but not the one before that. +/// To be able to detect that evidence, we also keep that number of active past eras in memory. +pub(super) const PAST_EVIDENCE_ERAS: u64 = 1; +/// The total number of past eras that are kept in memory in addition to the current one. +/// The more recent half of these is active: it contains units and can still accept further units. +/// The older half is in evidence-only state, and only used to validate cited evidence. +pub(super) const PAST_OPEN_ERAS: u64 = 2 * PAST_EVIDENCE_ERAS; #[derive(DataSize)] -pub struct EraSupervisor { - /// A map of active consensus protocols. - /// A value is a trait so that we can run different consensus protocol instances per era. +pub struct EraSupervisor { + /// A map of consensus protocol instances. + /// A value is a trait so that we can run different consensus protocols per era. /// - /// This map always contains exactly `2 * bonded_eras + 1` entries, with the last one being the - /// current one. - active_eras: HashMap>, - secret_signing_key: Arc, - pub(super) public_signing_key: PublicKey, - current_era: EraId, - protocol_config: ProtocolConfig, + /// This map contains three consecutive entries, with the last one being the current era N. Era + /// N - 1 is also kept in memory so that we would still detect any equivocations there and use + /// them in era N to get the equivocator banned. And era N - 2 one is in an "evidence-only" + /// state: It doesn't accept any new Highway units anymore, but we keep the instance in memory + /// so we can evaluate evidence that units in era N - 1 might cite. + /// + /// Since eras at or before the most recent activation point are never instantiated, shortly + /// after that there can temporarily be fewer than three entries in the map. + open_eras: BTreeMap, + validator_matrix: ValidatorMatrix, + chainspec: Arc, config: Config, - #[data_size(skip)] // Negligible for most closures, zero for functions. - new_consensus: Box>, /// The height of the next block to be finalized. /// We keep that in order to be able to signal to the Block Proposer how many blocks have been /// finalized when we request a new block. This way the Block Proposer can know whether it's up @@ -103,172 +113,185 @@ pub struct EraSupervisor { /// The height of the next block to be executed. If this falls too far behind, we pause. next_executed_height: u64, #[data_size(skip)] - metrics: ConsensusMetrics, - /// The path to the folder where unit hash files will be stored. - unit_hashes_folder: PathBuf, - /// The next upgrade activation point. When the era immediately before the activation point is - /// deactivated, the era supervisor indicates that the node should stop running to allow an - /// upgrade. - next_upgrade_activation_point: Option, - /// If true, the process should stop execution to allow an upgrade to proceed. - stop_for_upgrade: bool, - /// Set to true when InitializeEras is handled. - /// TODO: A temporary field. Shouldn't be needed once the Joiner doesn't have a consensus - /// component. - is_initialized: bool, + metrics: Metrics, + /// The path to the folder where unit files will be stored. + unit_files_folder: PathBuf, + last_progress: Timestamp, + + /// Failpoints + pub(super) message_delay_failpoint: Failpoint, + pub(super) proposal_delay_failpoint: Failpoint, } -impl Debug for EraSupervisor { +impl Debug for EraSupervisor { fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - let ae: Vec<_> = self.active_eras.keys().collect(); - write!(formatter, "EraSupervisor {{ active_eras: {:?}, .. }}", ae) + let ae: Vec<_> = self.open_eras.keys().collect(); + write!(formatter, "EraSupervisor {{ open_eras: {:?}, .. }}", ae) } } -impl EraSupervisor -where - I: NodeIdT, -{ +impl EraSupervisor { /// Creates a new `EraSupervisor`, starting in the indicated current era. #[allow(clippy::too_many_arguments)] - pub(crate) fn new>( - current_era: EraId, - config: WithDir, - effect_builder: EffectBuilder, - protocol_config: ProtocolConfig, - maybe_latest_block_header: Option<&BlockHeader>, - next_upgrade_activation_point: Option, + pub(crate) fn new( + storage_dir: &Path, + validator_matrix: ValidatorMatrix, + config: Config, + chainspec: Arc, registry: &Registry, - new_consensus: Box>, - ) -> Result<(Self, Effects>), Error> { - if current_era < protocol_config.last_activation_point { - panic!( - "Current era ({:?}) is before the last activation point ({:?}) - no eras would \ - be instantiated!", - current_era, protocol_config.last_activation_point - ); - } - let unit_hashes_folder = config.with_dir(config.value().highway.unit_hashes_folder.clone()); - let (root, config) = config.into_parts(); - let secret_signing_key = Arc::new(config.secret_key_path.clone().load(root)?); - let public_signing_key = PublicKey::from(secret_signing_key.as_ref()); - info!(our_id = %public_signing_key, "EraSupervisor pubkey",); - let metrics = ConsensusMetrics::new(registry) - .expect("failure to setup and register ConsensusMetrics"); - let activation_era_id = protocol_config.last_activation_point; - let auction_delay = protocol_config.auction_delay; - #[allow(clippy::integer_arithmetic)] // Block height should never reach u64::MAX. - let next_height = maybe_latest_block_header.map_or(0, |hdr| hdr.height() + 1); + ) -> Result { + let unit_files_folder = storage_dir.join("unit_files"); + fs::create_dir_all(&unit_files_folder)?; + info!(our_id = %validator_matrix.public_signing_key(), "EraSupervisor pubkey",); + let metrics = Metrics::new(registry)?; let era_supervisor = Self { - active_eras: Default::default(), - secret_signing_key, - public_signing_key, - current_era, - protocol_config, + open_eras: Default::default(), + validator_matrix, + chainspec, config, - new_consensus, - next_block_height: next_height, + next_block_height: 0, metrics, - unit_hashes_folder, - next_upgrade_activation_point, - stop_for_upgrade: false, - next_executed_height: next_height, - is_initialized: false, + unit_files_folder, + next_executed_height: 0, + last_progress: Timestamp::now(), + message_delay_failpoint: Failpoint::new("consensus.message_delay"), + proposal_delay_failpoint: Failpoint::new("consensus.proposal_delay"), }; - let bonded_eras = era_supervisor.bonded_eras(); - let era_ids: Vec = era_supervisor - .iter_past(current_era, era_supervisor.bonded_eras().saturating_mul(3)) - .collect(); - - // Asynchronously collect the information needed to initialize all recent eras. - let effects = async move { - info!(?era_ids, "collecting key blocks and booking blocks"); + Ok(era_supervisor) + } - let key_blocks = effect_builder - .collect_key_blocks(era_ids.iter().cloned()) - .await - .expect("should have all the key blocks in storage"); + /// Returns whether we are a validator in the current era. + pub(crate) fn is_active_validator(&self) -> bool { + if let Some(era_id) = self.current_era() { + return self.open_eras[&era_id] + .validators() + .contains_key(self.validator_matrix.public_signing_key()); + } + false + } - let booking_blocks = collect_booking_block_hashes( - effect_builder, - era_ids.clone(), - auction_delay, - activation_era_id, - ) - .await; + /// Returns the most recent era. + pub(crate) fn current_era(&self) -> Option { + self.open_eras.keys().last().copied() + } - if current_era > activation_era_id.saturating_add(bonded_eras.saturating_mul(2).into()) - { - // All eras can be initialized using the key blocks only. - (key_blocks, booking_blocks, Default::default()) - } else { - let activation_era_validators = effect_builder - .get_era_validators(activation_era_id) - .await - .unwrap_or_default(); - (key_blocks, booking_blocks, activation_era_validators) - } + pub(crate) fn create_required_eras( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + recent_switch_block_headers: &[BlockHeader], + ) -> Option> { + if !recent_switch_block_headers + .iter() + .tuple_windows() + .all(|(b0, b1)| b0.next_block_era_id() == b1.era_id()) + { + error!("switch block headers are not consecutive; this is a bug"); + return None; } - .event( - move |(key_blocks, booking_blocks, validators)| Event::InitializeEras { - key_blocks, - booking_blocks, - validators, - }, - ); - Ok((era_supervisor, effects)) + let highest_switch_block_header = recent_switch_block_headers.last()?; + + let new_era_id = highest_switch_block_header.next_block_era_id(); + + // We need to initialize current_era and (evidence-only) current_era - 1. + // To initialize an era, all switch blocks between its booking block and its key block are + // required. The booking block for era N is in N - auction_delay - 1, and the key block in + // N - 1. So we need all switch blocks between: + // (including) current_era - 1 - auction_delay - 1 and (excluding) current_era. + // However, we never use any block from before the last activation point. + // + // Example: If auction_delay is 1, to initialize era N we need the switch blocks from era N + // and N - 1. If current_era is 10, we will initialize eras 10 and 9. So we need the switch + // blocks from eras 9, 8, and 7. + let earliest_open_era = self.chainspec.earliest_relevant_era(new_era_id); + let earliest_era = self + .chainspec + .earliest_switch_block_needed(earliest_open_era); + debug_assert!(earliest_era <= new_era_id); + + let earliest_index = recent_switch_block_headers + .iter() + .position(|block_header| block_header.era_id() == earliest_era)?; + let relevant_switch_block_headers = &recent_switch_block_headers[earliest_index..]; + + // We initialize the era that `relevant_switch_block_headers` last block is the key + // block for. We want to initialize the two latest eras, so we have to pass in the whole + // slice for the current era, and omit one element for the other one. We never initialize + // the activation era or an earlier era, however. + // + // In the example above, we would call create_new_era with the switch blocks from eras + // 8 and 9 (to initialize 10) and then 7 and 8 (for era 9). + // (We don't truncate the slice at the start since unneeded blocks are ignored.) + let mut effects = Effects::new(); + let from = relevant_switch_block_headers + .len() + .saturating_sub(PAST_EVIDENCE_ERAS as usize) + .max(1); + let old_current_era = self.current_era(); + let now = Timestamp::now(); + for i in (from..=relevant_switch_block_headers.len()).rev() { + effects.extend(self.create_new_era_effects( + effect_builder, + rng, + &relevant_switch_block_headers[..i], + now, + )); + } + if self.current_era() != old_current_era { + effects.extend(self.make_latest_era_current(effect_builder, rng, now)); + } + effects.extend(self.activate_latest_era_if_needed(effect_builder, rng, now)); + Some(effects) } - /// Returns a temporary container with this `EraSupervisor`, `EffectBuilder` and random number - /// generator, for handling events. - pub(super) fn handling_wrapper<'a, REv: ReactorEventT>( - &'a mut self, - effect_builder: EffectBuilder, - rng: &'a mut NodeRng, - ) -> EraSupervisorHandlingWrapper<'a, I, REv> { - EraSupervisorHandlingWrapper { - era_supervisor: self, - effect_builder, - rng, + /// Returns a list of status changes of active validators. + pub(super) fn get_validator_changes(&self) -> ConsensusValidatorChanges { + let mut result: BTreeMap> = BTreeMap::new(); + for ((_, era0), (era_id, era1)) in self.open_eras.iter().tuple_windows() { + for (pub_key, change) in ValidatorChanges::new(era0, era1).0 { + result.entry(pub_key).or_default().push((*era_id, change)); + } } + ConsensusValidatorChanges::new(result) } fn era_seed(booking_block_hash: BlockHash, key_block_seed: Digest) -> u64 { - let mut result = [0; Digest::LENGTH]; - let mut hasher = VarBlake2b::new(Digest::LENGTH).expect("should create hasher"); - - hasher.update(booking_block_hash); - hasher.update(key_block_seed); - - hasher.finalize_variable(|slice| { - result.copy_from_slice(slice); - }); - - u64::from_le_bytes(result[0..std::mem::size_of::()].try_into().unwrap()) + let result = Digest::hash_pair(booking_block_hash, key_block_seed).value(); + u64::from_le_bytes(result[0..size_of::()].try_into().unwrap()) } /// Returns an iterator over era IDs of `num_eras` past eras, plus the provided one. + /// + /// Note: Excludes the activation point era and earlier eras. The activation point era itself + /// contains only the single switch block we created after the upgrade. There is no consensus + /// instance for it. pub(crate) fn iter_past(&self, era_id: EraId, num_eras: u64) -> impl Iterator { (self - .protocol_config - .last_activation_point + .chainspec + .activation_era() + .successor() .max(era_id.saturating_sub(num_eras)) .value()..=era_id.value()) .map(EraId::from) } /// Returns an iterator over era IDs of `num_eras` past eras, excluding the provided one. + /// + /// Note: Excludes the activation point era and earlier eras. The activation point era itself + /// contains only the single switch block we created after the upgrade. There is no consensus + /// instance for it. pub(crate) fn iter_past_other( &self, era_id: EraId, num_eras: u64, ) -> impl Iterator { (self - .protocol_config - .last_activation_point + .chainspec + .activation_era() + .successor() .max(era_id.saturating_sub(num_eras)) .value()..era_id.value()) .map(EraId::from) @@ -279,492 +302,571 @@ where (era_id.value()..=era_id.value().saturating_add(num_eras)).map(EraId::from) } - /// Starts a new era; panics if it already exists. - #[allow(clippy::too_many_arguments)] // FIXME - fn new_era( + /// Pauses or unpauses consensus: Whenever the last executed block is too far behind the last + /// finalized block, we suspend consensus. + fn update_consensus_pause( &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, era_id: EraId, + ) -> Effects { + let paused = self + .next_block_height + .saturating_sub(self.next_executed_height) + > self.config.max_execution_delay; + self.delegate_to_era(effect_builder, rng, era_id, |consensus, _| { + consensus.set_paused(paused, Timestamp::now()) + }) + } + + /// Initializes a new era. The switch blocks must contain the most recent `auction_delay + 1` + /// ones, in order, but at most as far back as to the last activation point. + pub(super) fn create_new_era_effects( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + switch_blocks: &[BlockHeader], now: Timestamp, - validators: BTreeMap, - newly_slashed: Vec, - slashed: HashSet, - seed: u64, - start_time: Timestamp, - start_height: u64, - ) -> Vec> { - if self.active_eras.contains_key(&era_id) { - panic!("{} already exists", era_id); + ) -> Effects { + match self.create_new_era(switch_blocks, now) { + Ok((era_id, outcomes)) => { + self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes) + } + Err(err) => fatal!( + effect_builder, + "failed to create era; this is a bug: {:?}", + err, + ) + .ignore(), } - self.current_era = era_id; - self.metrics.current_era.set(era_id.value() as i64); - let instance_id = instance_id(&self.protocol_config, era_id); + } + + fn make_latest_era_current( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + now: Timestamp, + ) -> Effects { + let era_id = match self.current_era() { + Some(era_id) => era_id, + None => { + return Effects::new(); + } + }; + self.metrics + .consensus_current_era + .set(era_id.value() as i64); + let start_height = self.era(era_id).start_height; + self.next_block_height = self.next_block_height.max(start_height); + let outcomes = self.era_mut(era_id).consensus.handle_is_current(now); + self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes) + } + + fn activate_latest_era_if_needed( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + now: Timestamp, + ) -> Effects { + let era_id = match self.current_era() { + Some(era_id) => era_id, + None => { + return Effects::new(); + } + }; + if self.era(era_id).consensus.is_active() { + return Effects::new(); + } + let our_id = self.validator_matrix.public_signing_key().clone(); + let outcomes = if !self.era(era_id).validators().contains_key(&our_id) { + info!(era = era_id.value(), %our_id, "not voting; not a validator"); + vec![] + } else { + info!(era = era_id.value(), %our_id, "start voting"); + let secret = Keypair::new( + self.validator_matrix.secret_signing_key().clone(), + our_id.clone(), + ); + let instance_id = self.era(era_id).consensus.instance_id(); + let unit_hash_file = self.protocol_state_file(instance_id); + self.era_mut(era_id).consensus.activate_validator( + our_id, + secret, + now, + Some(unit_hash_file), + ) + }; + self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes) + } + + /// Initializes a new era. The switch blocks must contain the most recent `auction_delay + 1` + /// ones, in order, but at most as far back as to the last activation point. + fn create_new_era( + &mut self, + switch_blocks: &[BlockHeader], + now: Timestamp, + ) -> Result<(EraId, Vec>), CreateNewEraError> { + let key_block = switch_blocks + .last() + .ok_or(CreateNewEraError::AttemptedToCreateEraWithNoSwitchBlocks)?; + let era_id = key_block.era_id().successor(); + + let chainspec_hash = self.chainspec.hash(); + let key_block_hash = key_block.block_hash(); + let instance_id = instance_id(chainspec_hash, era_id, key_block_hash); + + if self.open_eras.contains_key(&era_id) { + debug!(era = era_id.value(), "era already exists"); + return Ok((era_id, vec![])); + } + + let era_end = key_block.clone_era_end().ok_or_else(|| { + CreateNewEraError::LastBlockHeaderNotASwitchBlock { + era_id, + last_block_header: Box::new(key_block.clone()), + } + })?; + + let earliest_era = self.chainspec.earliest_switch_block_needed(era_id); + let switch_blocks_needed = era_id.value().saturating_sub(earliest_era.value()) as usize; + let first_idx = switch_blocks + .len() + .checked_sub(switch_blocks_needed) + .ok_or_else(|| CreateNewEraError::InsufficientSwitchBlocks { + era_id, + switch_blocks: switch_blocks.to_vec(), + })?; + for (i, switch_block) in switch_blocks[first_idx..].iter().enumerate() { + if switch_block.era_id() != earliest_era.saturating_add(i as u64) { + return Err(CreateNewEraError::WrongSwitchBlockEra { + era_id, + switch_blocks: switch_blocks.to_vec(), + }); + } + } + + let validators = era_end.next_era_validator_weights(); + + if let Some(current_era) = self.current_era() { + if current_era > era_id.saturating_add(PAST_EVIDENCE_ERAS) { + warn!(era = era_id.value(), "trying to create obsolete era"); + return Ok((era_id, vec![])); + } + } + + // Compute the seed for the PRNG from the booking block hash and the accumulated seed. + let auction_delay = self.chainspec.core_config.auction_delay as usize; + let booking_block_hash = + if let Some(booking_block) = switch_blocks.iter().rev().nth(auction_delay) { + booking_block.block_hash() + } else { + // If there's no booking block for the `era_id` + // (b/c it would have been from before Genesis, upgrade or emergency restart), + // use a "zero" block hash. This should not hurt the security of the leader + // selection algorithm. + BlockHash::default() + }; + let seed = Self::era_seed(booking_block_hash, *key_block.accumulated_seed()); + + // The beginning of the new era is marked by the key block. + #[allow(clippy::arithmetic_side_effects)] // Block height should never reach u64::MAX. + let start_height = key_block.height() + 1; + let start_time = key_block.timestamp(); + + // Validators that were inactive in the previous era will be excluded from leader selection + // in the new era. + let inactive = era_end.inactive_validators().iter().cloned().collect(); + + // Validators that were only exposed as faulty after the booking block are still in the new + // era's validator set but get banned. + let blocks_after_booking_block = switch_blocks.iter().rev().take(auction_delay); + let faulty = blocks_after_booking_block + .filter_map(|switch_block| switch_block.maybe_equivocators()) + .flat_map(|equivocators| equivocators.iter()) + .cloned() + .collect(); info!( ?validators, %start_time, %now, %start_height, + %chainspec_hash, + %key_block_hash, %instance_id, + %seed, era = era_id.value(), "starting era", ); - // Activate the era if this node was already running when the era began, it is still - // ongoing based on its minimum duration, and we are one of the validators. - let our_id = &self.public_signing_key; - let should_activate = if !validators.contains_key(&our_id) { - info!(era = era_id.value(), %our_id, "not voting; not a validator"); - false - } else { - info!(era = era_id.value(), %our_id, "start voting"); - true - }; - - let prev_era = era_id + let maybe_prev_era = era_id .checked_sub(1) - .and_then(|last_era_id| self.active_eras.get(&last_era_id)); - - let (mut consensus, mut outcomes) = (self.new_consensus)( - instance_id, - validators.clone(), - &slashed, - &self.protocol_config, - &self.config, - prev_era.map(|era| &*era.consensus), - start_time, - seed, - now, - ); + .and_then(|last_era_id| self.open_eras.get(&last_era_id)); + let validators_with_evidence: Vec = maybe_prev_era + .into_iter() + .flat_map(|prev_era| prev_era.consensus.validators_with_evidence()) + .cloned() + .collect(); - if should_activate { - let secret = Keypair::new(self.secret_signing_key.clone(), our_id.clone()); - let unit_hash_file = self.unit_hashes_folder.join(format!( - "unit_hash_{:?}_{}.dat", + // Create and insert the new era instance. + let protocol_state_file = self.protocol_state_file(&instance_id); + let (consensus, mut outcomes) = match self.chainspec.core_config.consensus_protocol { + ConsensusProtocolName::Highway => HighwayProtocol::new_boxed( instance_id, - self.public_signing_key.to_hex() - )); - outcomes.extend(consensus.activate_validator( - our_id.clone(), - secret, + validators.clone(), + &faulty, + &inactive, + self.chainspec.as_ref(), + &self.config, + maybe_prev_era.map(|era| &*era.consensus), + start_time, + seed, now, - Some(unit_hash_file), - )) - } + Some(protocol_state_file), + ), + ConsensusProtocolName::Zug => Zug::new_boxed( + instance_id, + validators.clone(), + &faulty, + &inactive, + self.chainspec.as_ref(), + &self.config, + maybe_prev_era.map(|era| &*era.consensus), + start_time, + seed, + now, + protocol_state_file, + ), + }; let era = Era::new( consensus, start_time, start_height, - newly_slashed, - slashed, - validators, + faulty, + inactive, + validators.clone(), ); - let _ = self.active_eras.insert(era_id, era); - let oldest_bonded_era_id = oldest_bonded_era(&self.protocol_config, era_id); - // Clear the obsolete data from the era whose validators are unbonded now. We only retain - // the information necessary to validate evidence that units in still-bonded eras may refer - // to for cross-era slashing. - if let Some(evidence_only_era_id) = oldest_bonded_era_id.checked_sub(1) { - trace!(era = evidence_only_era_id.value(), "clearing unbonded era"); - if let Some(era) = self.active_eras.get_mut(&evidence_only_era_id) { + let _ = self.open_eras.insert(era_id, era); + + // Activate the era if this node was already running when the era began, it is still + // ongoing based on its minimum duration, and we are one of the validators. + let our_id = self.validator_matrix.public_signing_key().clone(); + if self + .current_era() + .is_some_and(|current_era| current_era > era_id) + { + trace!( + era = era_id.value(), + current_era = ?self.current_era(), + "not voting; initializing past era" + ); + // We're creating an era that's not the current era - which means we're currently + // initializing consensus, and we want to set all the older eras to be evidence only. + if let Some(era) = self.open_eras.get_mut(&era_id) { era.consensus.set_evidence_only(); } + } else { + self.metrics + .consensus_current_era + .set(era_id.value() as i64); + self.next_block_height = self.next_block_height.max(start_height); + outcomes.extend(self.era_mut(era_id).consensus.handle_is_current(now)); + if !self.era(era_id).validators().contains_key(&our_id) { + info!(era = era_id.value(), %our_id, "not voting; not a validator"); + } else { + info!(era = era_id.value(), %our_id, "start voting"); + let secret = Keypair::new( + self.validator_matrix.secret_signing_key().clone(), + our_id.clone(), + ); + let unit_hash_file = self.protocol_state_file(&instance_id); + outcomes.extend(self.era_mut(era_id).consensus.activate_validator( + our_id, + secret, + now, + Some(unit_hash_file), + )) + }; } - // Remove the era that has become obsolete now: The oldest bonded era could still receive - // units that refer to evidence from any era that was bonded when it was the current one. - let oldest_evidence_era_id = oldest_bonded_era(&self.protocol_config, oldest_bonded_era_id); - if let Some(obsolete_era_id) = oldest_evidence_era_id.checked_sub(1) { - trace!(era = obsolete_era_id.value(), "removing obsolete era"); - self.active_eras.remove(&obsolete_era_id); - } - - outcomes - } - - /// Returns `true` if the specified era is active and bonded. - fn is_bonded(&self, era_id: EraId) -> bool { - era_id.saturating_add(self.bonded_eras().into()) >= self.current_era - && era_id <= self.current_era - } - - /// Returns whether the validator with the given public key is bonded in that era. - fn is_validator_in(&self, pub_key: &PublicKey, era_id: EraId) -> bool { - let has_validator = |era: &Era| era.validators().contains_key(&pub_key); - self.active_eras.get(&era_id).map_or(false, has_validator) - } - - /// Returns the most recent active era. - #[cfg(test)] - pub(crate) fn current_era(&self) -> EraId { - self.current_era - } - pub(crate) fn stop_for_upgrade(&self) -> bool { - self.stop_for_upgrade - } - - /// Updates `next_executed_height` based on the given block header, and unpauses consensus if - /// block execution has caught up with finalization. - #[allow(clippy::integer_arithmetic)] // Block height should never reach u64::MAX. - fn executed_block(&mut self, block_header: &BlockHeader) { - self.next_executed_height = self.next_executed_height.max(block_header.height() + 1); - self.update_consensus_pause(); - } - - /// Pauses or unpauses consensus: Whenever the last executed block is too far behind the last - /// finalized block, we suspend consensus. - fn update_consensus_pause(&mut self) { - let paused = self - .next_block_height - .saturating_sub(self.next_executed_height) - > self.config.highway.max_execution_delay; - match self.active_eras.get_mut(&self.current_era) { - Some(era) => era.set_paused(paused), - None => error!( - era = self.current_era.value(), - "current era not initialized" - ), + // Mark validators as faulty for which we have evidence in the previous era. + for pub_key in validators_with_evidence { + let proposed_blocks = self + .era_mut(era_id) + .resolve_evidence_and_mark_faulty(&pub_key); + if !proposed_blocks.is_empty() { + error!( + ?proposed_blocks, + era = era_id.value(), + "unexpected block in new era" + ); + } } - } - fn handle_initialize_eras( - &mut self, - key_blocks: HashMap, - booking_blocks: HashMap, - activation_era_validators: BTreeMap, - ) -> HashMap> { - let mut result_map = HashMap::new(); - - for era_id in self.iter_past(self.current_era, self.bonded_eras().saturating_mul(2)) { - let newly_slashed; - let validators; - let start_height; - let era_start_time; - let seed; - - let booking_block_hash = booking_blocks - .get(&era_id) - .expect("should have booking block"); - - #[allow(clippy::integer_arithmetic)] // Block height should never reach u64::MAX. - if era_id.is_genesis() { - newly_slashed = vec![]; - // The validator set was read from the global state: there's no key block for era 0. - validators = activation_era_validators.clone(); - start_height = 0; - era_start_time = self - .protocol_config - .genesis_timestamp - .expect("must have genesis start time if era ID is 0"); - seed = 0; - } else { - // If this is not era 0, there must be a key block for it. - let key_block = key_blocks.get(&era_id).expect("missing key block"); - start_height = key_block.height() + 1; - era_start_time = key_block.timestamp(); - seed = Self::era_seed(*booking_block_hash, key_block.accumulated_seed()); - if era_id == self.protocol_config.last_activation_point { - // After an upgrade or emergency restart, we don't do cross-era slashing. - newly_slashed = vec![]; - // And we read the validator sets from the global state, because the key block - // might have been overwritten by the upgrade/restart. - validators = activation_era_validators.clone(); + // Clear the obsolete data from the era before the previous one. We only retain the + // information necessary to validate evidence that units in the two most recent eras may + // refer to for cross-era fault tracking. + if let Some(current_era) = self.current_era() { + let mut removed_instance_ids = vec![]; + let earliest_open_era = current_era.saturating_sub(PAST_OPEN_ERAS); + let earliest_active_era = current_era.saturating_sub(PAST_EVIDENCE_ERAS); + self.open_eras.retain(|era_id, era| { + if earliest_open_era > *era_id { + trace!(era = era_id.value(), "removing obsolete era"); + removed_instance_ids.push(*era.consensus.instance_id()); + false + } else if earliest_active_era > *era_id { + trace!(era = era_id.value(), "setting old era to evidence only"); + era.consensus.set_evidence_only(); + true } else { - // If it's neither genesis nor upgrade nor restart, we use the validators from - // the key block and ban validators that were slashed in previous eras. - newly_slashed = key_block - .era_end() - .expect("key block must be a switch block") - .equivocators - .clone(); - validators = key_block - .next_era_validator_weights() - .expect("missing validators from key block") - .clone(); + true + } + }); + for instance_id in removed_instance_ids { + if let Err(err) = fs::remove_file(self.protocol_state_file(&instance_id)) { + match err.kind() { + io::ErrorKind::NotFound => {} + err => warn!(?err, "could not delete unit hash file"), + } } } - - let slashed = self - .iter_past(era_id, self.bonded_eras()) - .filter_map(|old_id| key_blocks.get(&old_id).and_then(|bhdr| bhdr.era_end())) - .flat_map(|era_end| era_end.equivocators.clone()) - .collect(); - - let results = self.new_era( - era_id, - Timestamp::now(), - validators, - newly_slashed, - slashed, - seed, - era_start_time, - start_height, - ); - result_map.insert(era_id, results); } - let active_era_outcomes = self.active_eras[&self.current_era] - .consensus - .handle_is_current(); - result_map - .entry(self.current_era) - .or_default() - .extend(active_era_outcomes); - self.is_initialized = true; - self.next_block_height = self.active_eras[&self.current_era].start_height; - result_map - } - - /// The number of past eras whose validators are still bonded. After this many eras, a former - /// validator is allowed to withdraw their stake, so their signature can't be trusted anymore. - /// - /// A node keeps `2 * bonded_eras` past eras around, because the oldest bonded era could still - /// receive blocks that refer to `bonded_eras` before that. - fn bonded_eras(&self) -> u64 { - bonded_eras(&self.protocol_config) - } -} - -/// Returns an era ID in which the booking block for `era_id` lives, if we can use it. -/// Booking block for era N is the switch block (the last block) in era N – AUCTION_DELAY - 1. -/// To find it, we get the start height of era N - AUCTION_DELAY and subtract 1. -/// We make sure not to use an era ID below the last upgrade activation point, because we will -/// not have instances of eras from before that. -/// -/// We can't use it if it is: -/// * before Genesis -/// * before upgrade -/// * before emergency restart -/// In those cases, returns `None`. -fn valid_booking_block_era_id( - era_id: EraId, - auction_delay: u64, - last_activation_point: EraId, -) -> Option { - let after_booking_era_id = era_id.saturating_sub(auction_delay); - - // If we would have gone below the last activation point (the first `AUCTION_DELAY ` eras after - // an upgrade), we return `None` as there are no booking blocks there that we can use – we - // can't use anything from before an upgrade. - // NOTE that it's OK if `booking_era_id` == `last_activation_point`. - (after_booking_era_id > last_activation_point).then(|| after_booking_era_id.saturating_sub(1)) -} -/// Returns a booking block hash for `era_id`. -async fn get_booking_block_hash( - effect_builder: EffectBuilder, - era_id: EraId, - auction_delay: u64, - last_activation_point: EraId, -) -> BlockHash -where - REv: From, -{ - if let Some(booking_block_era_id) = - valid_booking_block_era_id(era_id, auction_delay, last_activation_point) - { - match effect_builder - .get_switch_block_at_era_id_from_storage(booking_block_era_id) - .await - { - Some(block) => *block.hash(), - None => { - error!( - ?era_id, - ?booking_block_era_id, - "booking block for era must exist" - ); - panic!("booking block not found in storage"); - } - } - } else { - // If there's no booking block for the `era_id` - // (b/c it would have been from before Genesis, upgrade or emergency restart), - // use a "zero" block hash. This should not hurt the security of the leader selection - // algorithm. - BlockHash::default() + Ok((era_id, outcomes)) } -} -/// Returns booking block hashes for the eras. -async fn collect_booking_block_hashes( - effect_builder: EffectBuilder, - era_ids: Vec, - auction_delay: u64, - last_activation_point: EraId, -) -> HashMap -where - REv: From, -{ - let mut booking_block_hashes: HashMap = HashMap::new(); - - for era_id in era_ids { - let booking_block_hash = - get_booking_block_hash(effect_builder, era_id, auction_delay, last_activation_point) - .await; - booking_block_hashes.insert(era_id, booking_block_hash); + /// Returns the path to the era's unit file. + fn protocol_state_file(&self, instance_id: &Digest) -> PathBuf { + self.unit_files_folder.join(format!( + "unit_{:?}_{}.dat", + instance_id, + self.validator_matrix.public_signing_key().to_hex() + )) } - booking_block_hashes -} - -/// A mutable `EraSupervisor` reference, together with an `EffectBuilder`. -/// -/// This is a short-lived convenience type to avoid passing the effect builder through lots of -/// message calls, and making every method individually generic in `REv`. It is only instantiated -/// for the duration of handling a single event. -pub(super) struct EraSupervisorHandlingWrapper<'a, I, REv: 'static> { - pub(super) era_supervisor: &'a mut EraSupervisor, - pub(super) effect_builder: EffectBuilder, - pub(super) rng: &'a mut NodeRng, -} - -impl<'a, I, REv> EraSupervisorHandlingWrapper<'a, I, REv> -where - I: NodeIdT, - REv: ReactorEventT, -{ /// Applies `f` to the consensus protocol of the specified era. - fn delegate_to_era(&mut self, era_id: EraId, f: F) -> Effects> + fn delegate_to_era( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + era_id: EraId, + f: F, + ) -> Effects where - F: FnOnce(&mut dyn ConsensusProtocol) -> Vec>, + F: FnOnce( + &mut dyn ConsensusProtocol, + &mut NodeRng, + ) -> Vec>, { - match self.era_supervisor.active_eras.get_mut(&era_id) { + match self.open_eras.get_mut(&era_id) { None => { - if era_id > self.era_supervisor.current_era { - info!(era = era_id.value(), "received message for future era"); - } else { - info!(era = era_id.value(), "received message for obsolete era"); - } + self.log_missing_era(era_id); Effects::new() } Some(era) => { - let outcomes = f(&mut *era.consensus); - self.handle_consensus_outcomes(era_id, outcomes) + let outcomes = f(&mut *era.consensus, rng); + self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes) + } + } + } + + fn log_missing_era(&self, era_id: EraId) { + let era = era_id.value(); + if let Some(current_era_id) = self.current_era() { + match era_id.cmp(¤t_era_id) { + cmp::Ordering::Greater => trace!(era, "received message for future era"), + cmp::Ordering::Equal => error!(era, "missing current era"), + cmp::Ordering::Less => info!(era, "received message for obsolete era"), } + } else { + info!(era, "received message, but no era initialized"); } } - pub(super) fn handle_timer( + pub(super) fn handle_timer( &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, era_id: EraId, timestamp: Timestamp, timer_id: TimerId, - ) -> Effects> { - self.delegate_to_era(era_id, move |consensus| { - consensus.handle_timer(timestamp, timer_id) + ) -> Effects { + let now = Timestamp::now(); + let delay = now.saturating_diff(timestamp).millis(); + if delay > TIMER_DELAY_WARNING_MILLIS { + warn!( + era = era_id.value(), timer_id = timer_id.0, %delay, + "timer called with long delay" + ); + } + self.delegate_to_era(effect_builder, rng, era_id, move |consensus, rng| { + consensus.handle_timer(timestamp, now, timer_id, rng) }) } - pub(super) fn handle_action( + pub(super) fn handle_action( &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, era_id: EraId, action_id: ActionId, - ) -> Effects> { - self.delegate_to_era(era_id, move |consensus| { + ) -> Effects { + self.delegate_to_era(effect_builder, rng, era_id, move |consensus, _| { consensus.handle_action(action_id, Timestamp::now()) }) } - pub(super) fn handle_message(&mut self, sender: I, msg: ConsensusMessage) -> Effects> { + pub(super) fn handle_message( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + sender: NodeId, + msg: ConsensusMessage, + ) -> Effects { match msg { ConsensusMessage::Protocol { era_id, payload } => { - // If the era is already unbonded, only accept new evidence, because still-bonded - // eras could depend on that. trace!(era = era_id.value(), "received a consensus message"); - self.delegate_to_era(era_id, move |consensus| { - consensus.handle_message(sender, payload, Timestamp::now()) + + self.delegate_to_era(effect_builder, rng, era_id, move |consensus, rng| { + consensus.handle_message(rng, sender, payload, Timestamp::now()) }) } - ConsensusMessage::EvidenceRequest { era_id, pub_key } => { - if !self.era_supervisor.is_bonded(era_id) { - trace!(era = era_id.value(), "not handling message; era too old"); - return Effects::new(); - } - self.era_supervisor - .iter_past(era_id, self.era_supervisor.bonded_eras()) - .flat_map(|e_id| { - self.delegate_to_era(e_id, |consensus| { - consensus.request_evidence(sender.clone(), &pub_key) + ConsensusMessage::EvidenceRequest { era_id, pub_key } => match self.current_era() { + None => Effects::new(), + Some(current_era) => { + if era_id.saturating_add(PAST_EVIDENCE_ERAS) < current_era + || !self.open_eras.contains_key(&era_id) + { + trace!(era = era_id.value(), "not handling message; era too old"); + return Effects::new(); + } + self.iter_past(era_id, PAST_EVIDENCE_ERAS) + .flat_map(|e_id| { + self.delegate_to_era(effect_builder, rng, e_id, |consensus, _| { + consensus.send_evidence(sender, &pub_key) + }) }) - }) - .collect() + .collect() + } + }, + } + } + + pub(super) fn handle_demand( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + sender: NodeId, + request: Box, + auto_closing_responder: AutoClosingResponder, + ) -> Effects { + let ConsensusRequestMessage { era_id, payload } = *request; + + trace!(era = era_id.value(), "received a consensus request"); + match self.open_eras.get_mut(&era_id) { + None => { + self.log_missing_era(era_id); + auto_closing_responder.respond_none().ignore() + } + Some(era) => { + let (outcomes, response) = + era.consensus + .handle_request_message(rng, sender, payload, Timestamp::now()); + let mut effects = + self.handle_consensus_outcomes(effect_builder, rng, era_id, outcomes); + if let Some(payload) = response { + effects.extend( + auto_closing_responder + .respond(ConsensusMessage::Protocol { era_id, payload }.into()) + .ignore(), + ); + } else { + effects.extend(auto_closing_responder.respond_none().ignore()); + } + effects } } } - pub(super) fn handle_new_proto_block( + pub(super) fn handle_new_block_payload( &mut self, - era_id: EraId, - proto_block: ProtoBlock, - block_context: BlockContext, - parent: Option, - ) -> Effects> { - if !self.era_supervisor.is_bonded(era_id) { - warn!(era = era_id.value(), "new proto block in outdated era"); - return Effects::new(); + effect_builder: EffectBuilder, + rng: &mut NodeRng, + new_block_payload: NewBlockPayload, + ) -> Effects { + let NewBlockPayload { + era_id, + block_payload, + block_context, + } = new_block_payload; + match self.current_era() { + None => { + warn!("new block payload but no initialized era"); + Effects::new() + } + Some(current_era) => { + if era_id.saturating_add(PAST_EVIDENCE_ERAS) < current_era + || !self.open_eras.contains_key(&era_id) + { + warn!(era = era_id.value(), "new block payload in outdated era"); + return Effects::new(); + } + let proposed_block = ProposedBlock::new(block_payload, block_context); + self.delegate_to_era(effect_builder, rng, era_id, move |consensus, _| { + consensus.propose(proposed_block, Timestamp::now()) + }) + } } - let accusations = self - .era_supervisor - .iter_past(era_id, self.era_supervisor.bonded_eras()) - .flat_map(|e_id| self.era(e_id).consensus.validators_with_evidence()) - .unique() - .filter(|pub_key| !self.era(era_id).slashed.contains(pub_key)) - .cloned() - .collect(); - let candidate_block = CandidateBlock::new(proto_block, accusations, parent); - self.delegate_to_era(era_id, move |consensus| { - consensus.propose(candidate_block, block_context, Timestamp::now()) - }) } - pub(super) fn handle_block_added(&mut self, block: Block) -> Effects> { - let our_pk = self.era_supervisor.public_signing_key.clone(); - let our_sk = self.era_supervisor.secret_signing_key.clone(); - let era_id = block.header().era_id(); - self.era_supervisor.executed_block(block.header()); - let mut effects = if self.era_supervisor.is_validator_in(&our_pk, era_id) { - let block_hash = block.hash(); - self.effect_builder - .announce_created_finality_signature(FinalitySignature::new( - *block_hash, - era_id, - &our_sk, - our_pk, - )) - .ignore() - } else { - Effects::new() - }; - if era_id < self.era_supervisor.current_era { + pub(super) fn handle_block_added( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + block_header: BlockHeader, + ) -> Effects { + self.last_progress = Timestamp::now(); + self.next_executed_height = self + .next_executed_height + .max(block_header.height().saturating_add(1)); + let era_id = block_header.era_id(); + let mut effects = self.update_consensus_pause(effect_builder, rng, era_id); + + if self + .current_era() + .is_none_or(|current_era| era_id < current_era) + { trace!(era = era_id.value(), "executed block in old era"); return effects; } - if block.header().is_switch_block() && !self.should_upgrade_after(&era_id) { - // if the block is a switch block, we have to get the validators for the new era and - // create it, before we can say we handled the block - let new_era_id = era_id.successor(); - let effect = get_booking_block_hash( - self.effect_builder, - new_era_id, - self.era_supervisor.protocol_config.auction_delay, - self.era_supervisor.protocol_config.last_activation_point, - ) - .event(|booking_block_hash| Event::CreateNewEra { - block: Box::new(block), - booking_block_hash: Ok(booking_block_hash), - }); - effects.extend(effect); + if block_header.next_era_validator_weights().is_some() { + if let Some(era) = self.open_eras.get_mut(&era_id) { + // This was the era's last block. Schedule deactivating this era. + let delay = Timestamp::now() + .saturating_diff(block_header.timestamp()) + .into(); + let faulty_num = era.consensus.validators_with_evidence().len(); + let deactivate_era = move |_| Event::DeactivateEra { + era_id, + faulty_num, + delay, + }; + effects.extend(effect_builder.set_timeout(delay).event(deactivate_era)); + } } effects } - pub(super) fn handle_deactivate_era( + pub(super) fn handle_deactivate_era( &mut self, + effect_builder: EffectBuilder, era_id: EraId, old_faulty_num: usize, delay: Duration, - ) -> Effects> { - let era = if let Some(era) = self.era_supervisor.active_eras.get_mut(&era_id) { + ) -> Effects { + let era = if let Some(era) = self.open_eras.get_mut(&era_id) { era } else { warn!(era = era_id.value(), "trying to deactivate obsolete era"); @@ -774,11 +876,6 @@ where if faulty_num == old_faulty_num { info!(era = era_id.value(), "stop voting in era"); era.consensus.deactivate_validator(); - if self.should_upgrade_after(&era_id) { - // If the next era is at or after the upgrade activation point, stop the node. - info!(era = era_id.value(), "shutting down for upgrade"); - self.era_supervisor.stop_for_upgrade = true; - } Effects::new() } else { let deactivate_era = move |_| Event::DeactivateEra { @@ -786,184 +883,176 @@ where faulty_num, delay, }; - self.effect_builder.set_timeout(delay).event(deactivate_era) + effect_builder.set_timeout(delay).event(deactivate_era) } } - pub(super) fn handle_initialize_eras( - &mut self, - key_blocks: HashMap, - booking_blocks: HashMap, - validators: BTreeMap, - ) -> Effects> { - let result_map = - self.era_supervisor - .handle_initialize_eras(key_blocks, booking_blocks, validators); - - let effects = result_map - .into_iter() - .flat_map(|(era_id, results)| self.handle_consensus_outcomes(era_id, results)) - .collect(); - - info!("finished initializing era supervisor"); - info!(?self.era_supervisor, "current eras"); - - effects + /// Will deactivate voting for the current era. + /// Does nothing if the current era doesn't exist or is inactive already. + pub(crate) fn deactivate_current_era(&mut self) -> Result { + let which_era = self + .current_era() + .ok_or_else(|| "attempt to deactivate an era with no eras instantiated!".to_string())?; + let era = self.era_mut(which_era); + if false == era.consensus.is_active() { + debug!(era_id=%which_era, "attempt to deactivate inactive era"); + return Ok(which_era); + } + era.consensus.deactivate_validator(); + Ok(which_era) } - /// Creates a new era. - pub(super) fn handle_create_new_era( + pub(super) fn resolve_validity( &mut self, - switch_block: Block, - booking_block_hash: BlockHash, - ) -> Effects> { - let (era_end, next_era_validators_weights) = match ( - switch_block.header().era_end(), - switch_block.header().next_era_validator_weights(), - ) { - (Some(era_end), Some(next_era_validator_weights)) => { - (era_end, next_era_validator_weights) - } - _ => { - return fatal!( - self.effect_builder, - "attempted to create a new era with a non-switch block: {}", - switch_block - ) - .ignore() - } - }; - let newly_slashed = era_end.equivocators.clone(); - let era_id = switch_block.header().era_id().successor(); - info!(era = era_id.value(), "era created"); - let seed = EraSupervisor::::era_seed( - booking_block_hash, - switch_block.header().accumulated_seed(), - ); - trace!(%seed, "the seed for {}: {}", era_id, seed); - let slashed = self - .era_supervisor - .iter_past_other(era_id, self.era_supervisor.bonded_eras()) - .flat_map(|e_id| &self.era_supervisor.active_eras[&e_id].newly_slashed) - .chain(&newly_slashed) - .cloned() - .collect(); - #[allow(clippy::integer_arithmetic)] // Block height should never reach u64::MAX. - let mut outcomes = self.era_supervisor.new_era( + effect_builder: EffectBuilder, + rng: &mut NodeRng, + resolve_validity: ResolveValidity, + ) -> Effects { + let ResolveValidity { era_id, - Timestamp::now(), // TODO: This should be passed in. - next_era_validators_weights.clone(), - newly_slashed, - slashed, - seed, - switch_block.header().timestamp(), - switch_block.height() + 1, - ); - outcomes.extend( - self.era_supervisor.active_eras[&era_id] - .consensus - .handle_is_current(), - ); - self.handle_consensus_outcomes(era_id, outcomes) - } - - pub(super) fn resolve_validity( - &mut self, - era_id: EraId, - sender: I, - proto_block: ProtoBlock, - parent: Option, - valid: bool, - ) -> Effects> { - self.era_supervisor.metrics.proposed_block(); + sender, + proposed_block, + maybe_error, + } = resolve_validity; + self.metrics.proposed_block(); let mut effects = Effects::new(); - if !valid { - warn!( - %sender, - era = %era_id.value(), - "invalid consensus value; disconnecting from the sender" - ); - effects.extend(self.disconnect(sender)); + let valid = maybe_error.is_none(); + if let Some(error) = maybe_error { + debug!(%era_id, %sender, ?error, "announcing block peer due to invalid proposal"); + effects.extend({ + effect_builder + .announce_block_peer_with_justification( + sender, + BlocklistJustification::SentInvalidProposal { era: era_id, error }, + ) + .ignore() + }); } - let candidate_blocks = if let Some(era) = self.era_supervisor.active_eras.get_mut(&era_id) { - era.resolve_validity(&proto_block, parent, valid) - } else { - return effects; - }; - for candidate_block in candidate_blocks { - effects.extend(self.delegate_to_era(era_id, |consensus| { - consensus.resolve_validity(&candidate_block, valid, Timestamp::now()) - })); + if self + .open_eras + .get_mut(&era_id) + .is_some_and(|era| era.resolve_validity(&proposed_block, valid)) + { + effects.extend( + self.delegate_to_era(effect_builder, rng, era_id, |consensus, _| { + consensus.resolve_validity(proposed_block.clone(), valid, Timestamp::now()) + }), + ); } effects } - fn handle_consensus_outcomes(&mut self, era_id: EraId, outcomes: T) -> Effects> + pub(crate) fn last_progress(&self) -> Timestamp { + self.last_progress + } + + fn handle_consensus_outcomes( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + era_id: EraId, + outcomes: T, + ) -> Effects where - T: IntoIterator>, + T: IntoIterator>, { outcomes .into_iter() - .flat_map(|result| self.handle_consensus_outcome(era_id, result)) + .flat_map(|result| self.handle_consensus_outcome(effect_builder, rng, era_id, result)) .collect() } /// Returns `true` if any of the most recent eras has evidence against the validator with key /// `pub_key`. fn has_evidence(&self, era_id: EraId, pub_key: PublicKey) -> bool { - self.era_supervisor - .iter_past(era_id, self.era_supervisor.bonded_eras()) + self.iter_past(era_id, PAST_EVIDENCE_ERAS) .any(|eid| self.era(eid).consensus.has_evidence(&pub_key)) } /// Returns the era with the specified ID. Panics if it does not exist. - fn era(&self, era_id: EraId) -> &Era { - &self.era_supervisor.active_eras[&era_id] + fn era(&self, era_id: EraId) -> &Era { + &self.open_eras[&era_id] } /// Returns the era with the specified ID mutably. Panics if it does not exist. - fn era_mut(&mut self, era_id: EraId) -> &mut Era { - self.era_supervisor.active_eras.get_mut(&era_id).unwrap() + fn era_mut(&mut self, era_id: EraId) -> &mut Era { + self.open_eras.get_mut(&era_id).unwrap() } - #[allow(clippy::integer_arithmetic)] // Block height should never reach u64::MAX. - fn handle_consensus_outcome( + #[allow(clippy::arithmetic_side_effects)] // Block height should never reach u64::MAX. + fn handle_consensus_outcome( &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, era_id: EraId, - consensus_result: ProtocolOutcome, - ) -> Effects> { - match consensus_result { - ProtocolOutcome::InvalidIncomingMessage(_, sender, error) => { - warn!( - %sender, - %error, - "invalid incoming message to consensus instance; disconnecting from the sender" - ); - self.disconnect(sender) + consensus_result: ProtocolOutcome, + ) -> Effects { + let current_era = match self.current_era() { + Some(current_era) => current_era, + None => { + error!("no current era"); + return Effects::new(); } + }; + match consensus_result { ProtocolOutcome::Disconnect(sender) => { warn!( %sender, "disconnecting from the sender of invalid data" ); - self.disconnect(sender) + { + effect_builder + .announce_block_peer_with_justification( + sender, + BlocklistJustification::BadConsensusBehavior, + ) + .ignore() + } } ProtocolOutcome::CreatedGossipMessage(payload) => { let message = ConsensusMessage::Protocol { era_id, payload }; - // TODO: we'll want to gossip instead of broadcast here - self.effect_builder - .broadcast_message(message.into()) - .ignore() + let delay_by = self.message_delay_failpoint.fire(rng).cloned(); + async move { + if let Some(delay) = delay_by { + effect_builder + .set_timeout(Duration::from_millis(delay)) + .await; + } + effect_builder + .broadcast_message_to_validators(message.into(), era_id) + .await + } + .ignore() } ProtocolOutcome::CreatedTargetedMessage(payload, to) => { let message = ConsensusMessage::Protocol { era_id, payload }; - self.effect_builder - .send_message(to, message.into()) - .ignore() + effect_builder.enqueue_message(to, message.into()).ignore() + } + ProtocolOutcome::CreatedMessageToRandomPeer(payload) => { + let message = ConsensusMessage::Protocol { era_id, payload }; + + async move { + let peers = effect_builder.get_fully_connected_peers(1).await; + if let Some(to) = peers.into_iter().next() { + effect_builder.enqueue_message(to, message.into()).await; + } + } + .ignore() + } + ProtocolOutcome::CreatedRequestToRandomPeer(payload) => { + let message = ConsensusRequestMessage { era_id, payload }; + + async move { + let peers = effect_builder.get_fully_connected_peers(1).await; + if let Some(to) = peers.into_iter().next() { + effect_builder.enqueue_message(to, message.into()).await; + } + } + .ignore() } ProtocolOutcome::ScheduleTimer(timestamp, timer_id) => { let timediff = timestamp.saturating_diff(Timestamp::now()); - self.effect_builder + effect_builder .set_timeout(timediff.into()) .event(move |_| Event::Timer { era_id, @@ -971,360 +1060,697 @@ where timer_id, }) } - ProtocolOutcome::QueueAction(action_id) => self - .effect_builder + ProtocolOutcome::QueueAction(action_id) => effect_builder .immediately() .event(move |()| Event::Action { era_id, action_id }), - ProtocolOutcome::CreateNewBlock { - block_context, - past_values, - parent_value, - } => { - let past_deploys = past_values - .iter() - .flat_map(|candidate| candidate.proto_block().deploys_and_transfers_iter()) + ProtocolOutcome::CreateNewBlock(block_context, proposal_expiry) => { + let signature_rewards_max_delay = + self.chainspec.core_config.signature_rewards_max_delay; + let current_block_height = self.proposed_block_height(&block_context, era_id); + let minimum_block_height = + current_block_height.saturating_sub(signature_rewards_max_delay); + + let awaitable_appendable_block = effect_builder.request_appendable_block( + block_context.timestamp(), + era_id, + proposal_expiry, + ); + let awaitable_blocks_with_metadata = async move { + effect_builder + .collect_past_blocks_with_metadata( + minimum_block_height..current_block_height, + false, + ) + .await + }; + let accusations = self + .iter_past(era_id, PAST_EVIDENCE_ERAS) + .flat_map(|e_id| self.era(e_id).consensus.validators_with_evidence()) + .unique() + .filter(|pub_key| !self.era(era_id).faulty.contains(pub_key)) .cloned() .collect(); - let parent = parent_value.as_ref().map(CandidateBlock::hash); - self.effect_builder - .request_proto_block( - block_context, - past_deploys, - self.era_supervisor.next_block_height, - self.rng.gen(), - ) - .event(move |(proto_block, block_context)| Event::NewProtoBlock { - era_id, - proto_block, - block_context, - parent, - }) + let random_bit = rng.gen(); + + let validator_matrix = self.validator_matrix.clone(); + + let delay_by = self.proposal_delay_failpoint.fire(rng).cloned(); + async move { + if let Some(delay) = delay_by { + effect_builder + .set_timeout(Duration::from_millis(delay)) + .await; + } + join_2(awaitable_appendable_block, awaitable_blocks_with_metadata).await + } + .event( + move |(appendable_block, maybe_past_blocks_with_metadata)| { + let rewarded_signatures = create_rewarded_signatures( + &maybe_past_blocks_with_metadata, + validator_matrix, + &block_context, + signature_rewards_max_delay, + ); + + let block_payload = Arc::new(appendable_block.into_block_payload( + accusations, + rewarded_signatures, + random_bit, + )); + + Event::NewBlockPayload(NewBlockPayload { + era_id, + block_payload, + block_context, + }) + }, + ) } ProtocolOutcome::FinalizedBlock(CpFinalizedBlock { value, timestamp, - height, + relative_height, terminal_block_data, equivocators, proposer, }) => { - let era = self.era_supervisor.active_eras.get_mut(&era_id).unwrap(); + if era_id != current_era { + debug!(era = era_id.value(), "finalized block in old era"); + return Effects::new(); + } + let era = self.open_eras.get_mut(&era_id).unwrap(); era.add_accusations(&equivocators); era.add_accusations(value.accusations()); // If this is the era's last block, it contains rewards. Everyone who is accused in - // the block or seen as equivocating via the consensus protocol gets slashed. - let era_end = terminal_block_data.map(|tbd| EraReport { - rewards: tbd.rewards, - // TODO: In the first 90 days we don't slash, and we just report all - // equivocators as "inactive" instead. Change this back 90 days after launch, - // and put era.accusations() into equivocators instead of inactive_validators. - equivocators: vec![], - inactive_validators: tbd - .inactive_validators - .into_iter() - .chain(era.accusations()) - .collect(), + // the block or seen as equivocating via the consensus protocol gets faulty. + + // TODO - add support for the `compute_rewards` chainspec parameter coming from + // private chain implementation in the 2.0 rewards scheme. + let _compute_rewards = self.chainspec.core_config.compute_rewards; + let report = terminal_block_data.map(|tbd| { + // If block rewards are disabled, zero them. + // if !compute_rewards { + // for reward in tbd.rewards.values_mut() { + // *reward = 0; + // } + // } + + InternalEraReport { + equivocators: era.accusations(), + inactive_validators: tbd.inactive_validators, + } }); + let proposed_block = Arc::try_unwrap(value).unwrap_or_else(|arc| (*arc).clone()); + let finalized_approvals: HashMap<_, _> = + proposed_block.all_transactions().cloned().collect(); + if let Some(era_report) = report.as_ref() { + info!( + inactive = %DisplayIter::new(&era_report.inactive_validators), + faulty = %DisplayIter::new(&era_report.equivocators), + era_id = era_id.value(), + "era end: inactive and faulty validators" + ); + } let finalized_block = FinalizedBlock::new( - value.into(), - era_end, + proposed_block, + report, + timestamp, era_id, - era.start_height + height, + era.start_height + relative_height, proposer, ); - self.era_supervisor - .metrics - .finalized_block(&finalized_block); - // Announce the finalized proto block. - let mut effects = self - .effect_builder + info!( + era_id = finalized_block.era_id.value(), + height = finalized_block.height, + timestamp = %finalized_block.timestamp, + "finalized block" + ); + self.metrics.finalized_block(&finalized_block); + // Announce the finalized block. + let mut effects = effect_builder .announce_finalized_block(finalized_block.clone()) .ignore(); - self.era_supervisor.next_block_height = finalized_block.height() + 1; - if finalized_block.era_report().is_some() { - // This was the era's last block. Schedule deactivating this era. - let delay = Timestamp::now().saturating_diff(timestamp).into(); - let faulty_num = era.consensus.validators_with_evidence().len(); - let deactivate_era = move |_| Event::DeactivateEra { - era_id, - faulty_num, - delay, - }; - effects.extend(self.effect_builder.set_timeout(delay).event(deactivate_era)); - } + self.next_block_height = self.next_block_height.max(finalized_block.height + 1); // Request execution of the finalized block. - effects.extend(self.effect_builder.execute_block(finalized_block).ignore()); - self.era_supervisor.update_consensus_pause(); + effects.extend( + execute_finalized_block(effect_builder, finalized_approvals, finalized_block) + .ignore(), + ); + let effects_from_updating_pause = + self.update_consensus_pause(effect_builder, rng, era_id); + effects.extend(effects_from_updating_pause); effects } ProtocolOutcome::ValidateConsensusValue { sender, - consensus_value: candidate_block, - ancestor_values: ancestor_blocks, + proposed_block, } => { - if !self.era_supervisor.is_bonded(era_id) { - return Effects::new(); + if era_id.saturating_add(PAST_EVIDENCE_ERAS) < current_era + || !self.open_eras.contains_key(&era_id) + { + debug!(%sender, %era_id, "validate_consensus_value: skipping outdated era"); + return Effects::new(); // Outdated era; we don't need the value anymore. } - let proto_block = candidate_block.proto_block().clone(); - let timestamp = candidate_block.timestamp(); - let parent = candidate_block.parent().cloned(); - let missing_evidence: Vec = candidate_block + let missing_evidence: Vec = proposed_block + .value() .accusations() .iter() .filter(|pub_key| !self.has_evidence(era_id, (*pub_key).clone())) .cloned() .collect(); self.era_mut(era_id) - .add_candidate(candidate_block, missing_evidence.clone()); - let proto_block_deploys_set: BTreeSet = - proto_block.deploys_and_transfers_iter().cloned().collect(); - for ancestor_block in ancestor_blocks { - let ancestor_proto_block = ancestor_block.proto_block(); - for deploy in ancestor_proto_block.deploys_and_transfers_iter() { - if proto_block_deploys_set.contains(deploy) { - return self.resolve_validity( - era_id, - sender, - proto_block, - parent, - false, - ); - } - } + .add_block(proposed_block.clone(), missing_evidence.clone()); + if let Some(transaction_hash) = proposed_block.contains_replay() { + warn!(%sender, %transaction_hash, "block contains a replayed transaction"); + return self.resolve_validity( + effect_builder, + rng, + ResolveValidity { + era_id, + sender, + proposed_block, + maybe_error: Some(Box::new( + InvalidProposalError::AncestorTransactionReplay { + replayed_transaction_hash: transaction_hash, + }, + )), + }, + ); } let mut effects = Effects::new(); for pub_key in missing_evidence { let msg = ConsensusMessage::EvidenceRequest { era_id, pub_key }; - effects.extend( - self.effect_builder - .send_message(sender.clone(), msg.into()) - .ignore(), - ); + effects.extend(effect_builder.send_message(sender, msg.into()).ignore()); } - let effect_builder = self.effect_builder; + let proposed_block_height = + self.proposed_block_height(proposed_block.context(), era_id); effects.extend( async move { - match check_deploys_for_replay_in_previous_eras_and_validate_block( + check_txns_for_replay_in_previous_eras_and_validate_block( effect_builder, era_id, + proposed_block_height, sender, - proto_block, - timestamp, - parent, + proposed_block, ) .await - { - Ok(event) => Some(event), - Err(error) => { - effect_builder - .fatal(file!(), line!(), format!("{:?}", error)) - .await; - None - } - } } - .map_some(std::convert::identity), + .event(std::convert::identity), ); effects } + ProtocolOutcome::HandledProposedBlock(proposed_block) => effect_builder + .announce_proposed_block(proposed_block) + .ignore(), ProtocolOutcome::NewEvidence(pub_key) => { info!(%pub_key, era = era_id.value(), "validator equivocated"); - let mut effects = self - .effect_builder + let mut effects = effect_builder .announce_fault_event(era_id, pub_key.clone(), Timestamp::now()) .ignore(); - for e_id in self - .era_supervisor - .iter_future(era_id, self.era_supervisor.bonded_eras()) - { - let candidate_blocks = - if let Some(era) = self.era_supervisor.active_eras.get_mut(&e_id) { - era.resolve_evidence(&pub_key) - } else { - continue; - }; - for candidate_block in candidate_blocks { - effects.extend(self.delegate_to_era(e_id, |consensus| { - consensus.resolve_validity(&candidate_block, true, Timestamp::now()) - })); + for e_id in self.iter_future(era_id, PAST_EVIDENCE_ERAS) { + let proposed_blocks = if let Some(era) = self.open_eras.get_mut(&e_id) { + era.resolve_evidence_and_mark_faulty(&pub_key) + } else { + continue; + }; + for proposed_block in proposed_blocks { + effects.extend(self.delegate_to_era( + effect_builder, + rng, + e_id, + |consensus, _| { + consensus.resolve_validity(proposed_block, true, Timestamp::now()) + }, + )); } } effects } ProtocolOutcome::SendEvidence(sender, pub_key) => self - .era_supervisor - .iter_past_other(era_id, self.era_supervisor.bonded_eras()) + .iter_past_other(era_id, PAST_EVIDENCE_ERAS) .flat_map(|e_id| { - self.delegate_to_era(e_id, |consensus| { - consensus.request_evidence(sender.clone(), &pub_key) + self.delegate_to_era(effect_builder, rng, e_id, |consensus, _| { + consensus.send_evidence(sender, &pub_key) }) }) .collect(), ProtocolOutcome::WeAreFaulty => Default::default(), ProtocolOutcome::DoppelgangerDetected => Default::default(), - ProtocolOutcome::FttExceeded => { - let eb = self.effect_builder; - eb.set_timeout(Duration::from_millis(FTT_EXCEEDED_SHUTDOWN_DELAY_MILLIS)) - .then(move |_| fatal!(eb, "too many faulty validators")) - .ignore() - } - ProtocolOutcome::StandstillAlert => { - if era_id == self.era_supervisor.current_era { - warn!(era = %era_id.value(), "current era is stalled; shutting down"); - fatal!(self.effect_builder, "current era is stalled; please retry").ignore() - } else { - Effects::new() - } - } + ProtocolOutcome::FttExceeded => effect_builder + .set_timeout(Duration::from_millis(FTT_EXCEEDED_SHUTDOWN_DELAY_MILLIS)) + .then(move |_| fatal!(effect_builder, "too many faulty validators")) + .ignore(), } } - /// Handles registering an upgrade activation point. - pub(super) fn got_upgrade_activation_point( - &mut self, - activation_point: ActivationPoint, - ) -> Effects> { - debug!("got {}", activation_point); - self.era_supervisor.next_upgrade_activation_point = Some(activation_point); - Effects::new() - } - - pub(super) fn status( - &self, - responder: Responder)>>, - ) -> Effects> { - let public_key = self.era_supervisor.public_signing_key.clone(); + pub(super) fn status(&self, responder: Responder>) -> Effects { + let public_key = self.validator_matrix.public_signing_key().clone(); let round_length = self - .era_supervisor - .active_eras - .get(&self.era_supervisor.current_era) + .open_eras + .values() + .last() .and_then(|era| era.consensus.next_round_length()); - responder.respond(Some((public_key, round_length))).ignore() + responder + .respond(Some(ConsensusStatus::new(public_key, round_length))) + .ignore() } - fn disconnect(&self, sender: I) -> Effects> { - self.effect_builder - .announce_disconnect_from_peer(sender) - .ignore() + /// Get a reference to the era supervisor's open eras. + pub(crate) fn open_eras(&self) -> &BTreeMap { + &self.open_eras } - pub(super) fn should_upgrade_after(&self, era_id: &EraId) -> bool { - match self.era_supervisor.next_upgrade_activation_point { - None => false, - Some(upgrade_point) => upgrade_point.should_upgrade(&era_id), - } + /// This node's public signing key. + pub(crate) fn public_key(&self) -> &PublicKey { + self.validator_matrix.public_signing_key() + } + + fn proposed_block_height(&self, block_context: &BlockContext, era_id: EraId) -> u64 { + let initial_era_height = self.era(era_id).start_height; + initial_era_height.saturating_add(block_context.ancestor_values().len() as u64) } } -/// Computes the instance ID for an era, given the era ID and the chainspec hash. -fn instance_id(protocol_config: &ProtocolConfig, era_id: EraId) -> Digest { - let mut result = [0; Digest::LENGTH]; - let mut hasher = VarBlake2b::new(Digest::LENGTH).expect("should create hasher"); +/// A serialized consensus network message. +/// +/// An entirely transparent newtype around raw bytes. Exists solely to avoid accidental +/// double-serialization of network messages, or serialization of unsuitable types. +/// +/// Note that this type fixates the encoding for all consensus implementations to one scheme. +#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[serde(transparent)] +#[repr(transparent)] +pub(crate) struct SerializedMessage(Vec); + +impl SerializedMessage { + /// Serialize the given message from a consensus protocol into bytes. + /// + /// # Panics + /// + /// Will panic if serialization fails (which must never happen -- ensure types are + /// serializable!). + pub(crate) fn from_message(msg: &T) -> Self + where + T: ConsensusNetworkMessage + Serialize, + { + SerializedMessage(bincode::serialize(msg).expect("should serialize message")) + } + + /// Attempt to deserialize a given type from incoming raw bytes. + pub(crate) fn deserialize_incoming(&self) -> Result + where + T: ConsensusNetworkMessage + DeserializeOwned, + { + bincode::deserialize(&self.0) + } + + /// Returns the inner raw bytes. + pub(crate) fn into_raw(self) -> Vec { + self.0 + } - hasher.update(protocol_config.chainspec_hash.as_ref()); - hasher.update(era_id.to_le_bytes()); + /// Returns a reference to the inner raw bytes. + pub(crate) fn as_raw(&self) -> &[u8] { + &self.0 + } +} - hasher.finalize_variable(|slice| { - result.copy_from_slice(slice); - }); - result.into() +#[cfg(test)] +impl SerializedMessage { + /// Deserializes a message into the given value. + /// + /// # Panics + /// + /// Will panic if deserialization fails. + #[track_caller] + pub(crate) fn deserialize_expect(&self) -> T + where + T: ConsensusNetworkMessage + DeserializeOwned, + { + self.deserialize_incoming() + .expect("could not deserialize valid zug message from serialized message") + } } -/// The number of past eras whose validators are still bonded. After this many eras, a former -/// validator is allowed to withdraw their stake, so their signature can't be trusted anymore. -/// -/// A node keeps `2 * bonded_eras` past eras around, because the oldest bonded era could still -/// receive blocks that refer to `bonded_eras` before that. -fn bonded_eras(protocol_config: &ProtocolConfig) -> u64 { - protocol_config - .unbonding_delay - .saturating_sub(protocol_config.auction_delay) +async fn get_transactions( + effect_builder: EffectBuilder, + hashes: Vec, +) -> Vec +where + REv: From, +{ + let from_storage = effect_builder.get_transactions_from_storage(hashes).await; + + let mut ret = vec![]; + for item in from_storage { + match item { + Some((transaction, Some(approvals))) => { + ret.push(transaction.with_approvals(approvals)); + } + Some((transaction, None)) => { + ret.push(transaction); + } + None => continue, + } + } + + ret } -/// The oldest era whose validators are still bonded. -// This is public because it's used in reactor::validator::tests. -pub(crate) fn oldest_bonded_era(protocol_config: &ProtocolConfig, current_era: EraId) -> EraId { - current_era - .saturating_sub(bonded_eras(protocol_config)) - .max(protocol_config.last_activation_point) +async fn execute_finalized_block( + effect_builder: EffectBuilder, + finalized_approvals: HashMap>, + finalized_block: FinalizedBlock, +) where + REv: From + From + From, +{ + for (txn_hash, finalized_approvals) in finalized_approvals { + effect_builder + .store_finalized_approvals(txn_hash, finalized_approvals) + .await; + } + // Get all transactions in order they appear in the finalized block. + let transactions = get_transactions( + effect_builder, + finalized_block.all_transactions().copied().collect(), + ) + .await; + + let executable_block = + ExecutableBlock::from_finalized_block_and_transactions(finalized_block, transactions); + effect_builder + .enqueue_block_for_execution(executable_block, MetaBlockState::new()) + .await } -#[derive(thiserror::Error, Debug, derive_more::Display)] -pub enum ReplayCheckAndValidateBlockError { - BlockHashMissingFromStorage(BlockHash), +/// Computes the instance ID for an era, given the era ID and the chainspec hash. +fn instance_id(chainspec_hash: Digest, era_id: EraId, key_block_hash: BlockHash) -> Digest { + Digest::hash_pair( + key_block_hash.inner().value(), + Digest::hash_pair(chainspec_hash, era_id.to_le_bytes()).value(), + ) } -/// Checks that a [ProtoBlock] does not have deploys we have already included in blocks in previous -/// eras. This is done by repeatedly querying storage for deploy metadata. When metadata is found -/// storage is queried again to get the era id for the included deploy. That era id must *not* be -/// less than the current era, otherwise the deploy is a replay attack. -async fn check_deploys_for_replay_in_previous_eras_and_validate_block( +/// Checks that a `BlockPayload` does not have transactions we have already included in blocks in +/// previous eras. This is done by repeatedly querying storage for transaction metadata. When +/// metadata is found storage is queried again to get the era id for the included transaction. That +/// era id must *not* be less than the current era, otherwise the transaction is a replay attack. +async fn check_txns_for_replay_in_previous_eras_and_validate_block( effect_builder: EffectBuilder, - proto_block_era_id: EraId, - sender: I, - proto_block: ProtoBlock, - timestamp: Timestamp, - parent: Option, -) -> Result, ReplayCheckAndValidateBlockError> + proposed_block_era_id: EraId, + proposed_block_height: u64, + sender: NodeId, + proposed_block: ProposedBlock, +) -> Event where - REv: From> + From, - I: Clone + Send + 'static, + REv: From + From, { - for deploy_hash in proto_block.deploys_and_transfers_iter() { - let execution_results = match effect_builder - .get_deploy_and_metadata_from_storage(*deploy_hash) - .await - { - None => continue, - Some((_, DeployMetadata { execution_results })) => execution_results, - }; - // We have found the deploy in the database. If it was from a previous era, it was a - // replay attack. Get the block header for that deploy to check if it is provably a replay + let txns_era_ids = effect_builder + .get_transactions_era_ids( + proposed_block + .value() + .all_transactions() + .map(|(x, _)| *x) + .collect(), + ) + .await; + + for txn_era_id in txns_era_ids { + // If the stored transaction was executed in a previous era, it is a replay attack. + // + // If not, then it might be this is a transaction for a block on which we are currently + // coming to consensus, and we will rely on the immediate ancestors of the + // block_payload within the current era to determine if we are facing a replay // attack. - for (block_hash, _) in execution_results { - match effect_builder - .get_block_header_from_storage(block_hash) - .await - { - None => { - // The block hash referenced by the deploy does not exist. This is - // a critical database integrity failure. - return Err( - ReplayCheckAndValidateBlockError::BlockHashMissingFromStorage(block_hash), - ); - } - Some(block_header) => { - // If the deploy was included in a block which is from before the current era_id - // then this must have been a replay attack. - // - // If not, then it might be this is a deploy for a block we are currently - // coming to consensus, and we will rely on the immediate ancestors of the - // proto_block within the current era to determine if we are facing a replay - // attack. - if block_header.era_id() < proto_block_era_id { - return Ok(Event::ResolveValidity { - era_id: proto_block_era_id, - sender: sender.clone(), - proto_block: proto_block.clone(), - parent, - valid: false, - }); - } - } - } + if txn_era_id < proposed_block_era_id { + debug!(%sender, %txn_era_id, %proposed_block_era_id, "consensus replay detection: transaction from previous era"); + return Event::ResolveValidity(ResolveValidity { + era_id: proposed_block_era_id, + sender, + proposed_block: proposed_block.clone(), + maybe_error: Some(Box::new( + InvalidProposalError::TransactionReplayPreviousEra { + transaction_era_id: txn_era_id.value(), + proposed_block_era_id: proposed_block_era_id.value(), + }, + )), + }); } } - let sender_for_validate_block: I = sender.clone(); - let (valid, proto_block) = effect_builder - .validate_proto_block(sender_for_validate_block, proto_block.clone(), timestamp) - .await; - - Ok(Event::ResolveValidity { - era_id: proto_block_era_id, + let sender_for_validate_block: NodeId = sender; + let maybe_error = effect_builder + .validate_block( + sender_for_validate_block, + proposed_block_height, + proposed_block.clone(), + ) + .await + .err(); + + Event::ResolveValidity(ResolveValidity { + era_id: proposed_block_era_id, sender, - proto_block, - parent, - valid, + proposed_block, + maybe_error, }) } + +impl ProposedBlock { + /// If this block contains a transaction that's also present in an ancestor, this returns the + /// transaction hash, otherwise `None`. + fn contains_replay(&self) -> Option { + let block_txns_set: BTreeSet = + self.value().all_transaction_hashes().collect(); + self.context() + .ancestor_values() + .iter() + .flat_map(|ancestor| ancestor.all_transaction_hashes()) + .find(|typed_txn_hash| block_txns_set.contains(typed_txn_hash)) + } +} + +/// When `async move { join!(…) }` is used inline, it prevents rustfmt +/// to run on the chained `event` block. +async fn join_2( + t: T, + u: U, +) -> (::Output, ::Output) { + futures::join!(t, u) +} + +// The created RewardedSignatures should contain bit vectors for each of the block for which +// signatures are being cited. If we are eligible to cite 3 blocks, RewardsSignature will contain an +// at-most 3 vectors of bit vectors (Vec>). With `signature_rewards_max_delay = 3` The logic +// is - "we can cite signatures for the blocks parent, parents parent and parents parent parent". +// If we are close to genesis, the outer vector will obviously not have 3 entries. +// (At height 0 there is no parent, at height 1 there is no grandparent etc.) +// The `rewarded_signatures` vector will look something like: +// [[255, 64],[128, 0],[0, 0]] +// Entries in the outer vec are interpreted as: +// - on index 0 - the last finalized block +// - on index 1 - the penultimate finalized block +// - on index 2 - the penpenultimate finalized block +// There are at most `signature_rewards_max_delay` entries in this vector. if we are "close" to +// genesis there can be less (at height 0 there is no history, so there will be no cited blocks, at +// height 1 we can only cite signatures from one block etc.) Each entry in this vector is also a +// vector of u8 numbers. To interpret them we need to realize that if we concatenate all the bytes +// of the numbers, the nth bit will say that the nth validators signature was either cited (if the +// bit is 1) or not (if the bit is 0). To figure out which validator is on position n, we need to +// take all the validators relevant to the era of the particular block, fetch their public keys and +// sort them ascending. In the quoted example we see that: For the parent on the proposed block we +// cite signatures of validators on position 0, 1, 2, 3, 4, 5, 6, 7 and 9 For the grandparent on +// the proposed block we cite signatures of validators on position 0 For the grandgrandparent on +// the proposed block we cite no signatures Please note that due to using u8 as the "packing" +// mechanism it is possible that the byte vector will have more bits than there are validators - we +// round it up to 8 (ceiling(number_of_valuidators/8)), the remaining bits are only used as padding +// to full bytes. +fn create_rewarded_signatures( + maybe_past_blocks_with_metadata: &[Option], + validator_matrix: ValidatorMatrix, + block_context: &BlockContext, + signature_rewards_max_delay: u64, +) -> RewardedSignatures { + let num_ancestor_values = block_context.ancestor_values().len(); + let mut rewarded_signatures = + RewardedSignatures::new(maybe_past_blocks_with_metadata.iter().rev().map( + |maybe_past_block_with_metadata| { + maybe_past_block_with_metadata + .as_ref() + .and_then(|past_block_with_metadata| { + create_single_block_rewarded_signatures( + &validator_matrix, + past_block_with_metadata, + ) + }) + .unwrap_or_default() + }, + )); + + // exclude the signatures that were already included in ancestor blocks + for (past_index, ancestor_rewarded_signatures) in block_context + .ancestor_values() + .iter() + .map(|value| value.rewarded_signatures().clone()) + // the above will only cover the signatures from the same era - chain + // with signatures from the blocks read from storage + .chain( + maybe_past_blocks_with_metadata + .iter() + .rev() + // skip the blocks corresponding to heights covered by + // ancestor_values + .skip(num_ancestor_values) + .map(|maybe_past_block| { + maybe_past_block.as_ref().map_or_else( + // if we're missing a block, this could cause us to include duplicate + // signatures and make our proposal invalid - but this is covered by the + // requirement for a validator to have blocks spanning the max deploy TTL + // in the past + Default::default, + |past_block| past_block.block.rewarded_signatures().clone(), + ) + }), + ) + .enumerate() + .take(signature_rewards_max_delay as usize) + { + rewarded_signatures = rewarded_signatures + .difference(&ancestor_rewarded_signatures.left_padded(past_index.saturating_add(1))); + } + + rewarded_signatures +} + +#[cfg(test)] +mod tests { + use std::collections::{BTreeMap, BTreeSet}; + + use crate::{ + consensus::{ + era_supervisor::create_rewarded_signatures, + tests::utils::{ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, CAROL_PUBLIC_KEY}, + BlockContext, ClContext, + }, + types::{BlockWithMetadata, ValidatorMatrix}, + }; + use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + testing::TestRng, + Block, BlockHash, BlockSignatures, BlockSignaturesV2, BlockV2, Digest, EraId, + ProtocolVersion, PublicKey, RewardedSignatures, Signature, SingleBlockRewardedSignatures, + Timestamp, U512, + }; + + #[test] + fn should_set_first_bit_if_earliest_key_cited() { + // The first bit in the bit list should be set to 1 if the "lowest" (in the sense of public + // key comaparison) public key signature was cited. + let mut rng = TestRng::new(); + + let mut bs_v2 = BlockSignaturesV2::random(&mut rng); + bs_v2.insert_signature( + ALICE_PUBLIC_KEY.clone(), + Signature::ed25519([44; Signature::ED25519_LENGTH]).unwrap(), + ); + let signatures = build_rewarded_signatures_without_historical_blocks(&mut rng, bs_v2); + assert_eq!( + signatures.to_bytes().unwrap(), + vec![Bytes::from(vec![128_u8])].to_bytes().unwrap() + ); + } + + #[test] + fn should_set_third_bit_if_the_first_validator_signature_cited() { + // Given there are three validators, if the first (by public key copmparison) validator + // signature was cited - the third bit should be set to 1 + let mut rng = TestRng::new(); + + let mut bs_v2 = BlockSignaturesV2::random(&mut rng); + bs_v2.insert_signature( + BOB_PUBLIC_KEY.clone(), + Signature::ed25519([44; Signature::ED25519_LENGTH]).unwrap(), + ); + let signatures = build_rewarded_signatures_without_historical_blocks(&mut rng, bs_v2); + assert_eq!( + signatures.to_bytes().unwrap(), + vec![Bytes::from(vec![32_u8])].to_bytes().unwrap() + ); + } + + #[test] + fn should_set_second_bit_if_the_second_validator_signature_cited() { + // Given there are three validators, if the second (by public key copmparison) validator + // signature was cited - the second bit should be set to 1 + let mut rng = TestRng::new(); + + let mut bs_v2 = BlockSignaturesV2::random(&mut rng); + bs_v2.insert_signature( + CAROL_PUBLIC_KEY.clone(), + Signature::ed25519([44; Signature::ED25519_LENGTH]).unwrap(), + ); + let signatures = build_rewarded_signatures_without_historical_blocks(&mut rng, bs_v2); + assert_eq!( + signatures.to_bytes().unwrap(), + vec![Bytes::from(vec![64_u8])].to_bytes().unwrap() + ); + } + + fn build_rewarded_signatures_without_historical_blocks( + rng: &mut TestRng, + bs_v2: BlockSignaturesV2, + ) -> RewardedSignatures { + assert!(*BOB_PUBLIC_KEY > *CAROL_PUBLIC_KEY && *CAROL_PUBLIC_KEY > *ALICE_PUBLIC_KEY); + let signatures_1 = BTreeSet::new(); + let mut validator_public_keys: BTreeMap = BTreeMap::new(); + // Making sure that Alice, Bob and Carols keys by stake have different ordering than + // by PublicKey + validator_public_keys.insert( + ALICE_PUBLIC_KEY.clone(), + U512::MAX.saturating_sub(100.into()), + ); + validator_public_keys.insert(BOB_PUBLIC_KEY.clone(), 1_u64.into()); + validator_public_keys.insert(CAROL_PUBLIC_KEY.clone(), U512::MAX); + + let past_rewarded_signatures = + RewardedSignatures::new(vec![SingleBlockRewardedSignatures::from_validator_set( + &signatures_1, + validator_public_keys.keys(), + )]); + + let block_v2 = BlockV2::new( + BlockHash::random(rng), + Digest::random(rng), + Digest::random(rng), + false, + None, + Timestamp::now(), + EraId::new(1), + 1010, + ProtocolVersion::V2_0_0, + PublicKey::random(rng), + BTreeMap::new(), + past_rewarded_signatures, + 1, + None, + ); + let block = Block::V2(block_v2); + + let block_1 = BlockWithMetadata { + block, + block_signatures: BlockSignatures::V2(bs_v2), + }; + let maybe_past_blocks_with_metadata = vec![Some(block_1)]; + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + validator_matrix.register_validator_weights(EraId::new(1), validator_public_keys); + let timestamp = Timestamp::now(); + let ancestor_values = vec![]; + let block_context = BlockContext::::new(timestamp, ancestor_values); + create_rewarded_signatures( + &maybe_past_blocks_with_metadata, + validator_matrix, + &block_context, + 1, + ) + } +} diff --git a/node/src/components/consensus/era_supervisor/debug.rs b/node/src/components/consensus/era_supervisor/debug.rs new file mode 100644 index 0000000000..f45079b9ff --- /dev/null +++ b/node/src/components/consensus/era_supervisor/debug.rs @@ -0,0 +1,70 @@ +//! Data types used solely for dumping of consensus data via the diagnostics port. + +use std::{ + borrow::Cow, + collections::{BTreeMap, HashSet}, + fmt::{self, Display, Formatter}, +}; + +use casper_types::{EraId, PublicKey, Timestamp, U512}; +use serde::Serialize; + +use crate::components::consensus::{highway_core::State, ClContext, HighwayProtocol}; + +use super::Era; + +/// Debug dump of era used for serialization. +#[derive(Debug, Serialize)] +pub(crate) struct EraDump<'a> { + /// The era that is being dumped. + pub(crate) id: EraId, + + /// The scheduled starting time of this era. + pub(crate) start_time: Timestamp, + /// The height of this era's first block. + pub(crate) start_height: u64, + + // omitted: pending blocks + /// Validators that have been faulty in any of the recent BONDED_ERAS switch blocks. This + /// includes `new_faulty`. + pub(crate) faulty: &'a HashSet, + /// Validators that are excluded from proposing new blocks. + pub(crate) cannot_propose: &'a HashSet, + /// Accusations collected in this era so far. + pub(crate) accusations: &'a HashSet, + /// The validator weights. + pub(crate) validators: &'a BTreeMap, + + /// The state of the highway instance associated with the era. + pub(crate) highway_state: &'a State, +} + +impl Display for EraDump<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "era {}: TBD", self.id) + } +} + +impl<'a> EraDump<'a> { + /// Creates a new `EraDump` from a given era. + pub(crate) fn dump_era(era: &'a Era, era_id: EraId) -> Result> { + let highway = era + .consensus + .as_any() + .downcast_ref::>() + .ok_or(Cow::Borrowed( + "could not downcast `ConsensusProtocol` into `HighwayProtocol`", + ))?; + + Ok(EraDump { + id: era_id, + start_time: era.start_time, + start_height: era.start_height, + faulty: &era.faulty, + cannot_propose: &era.cannot_propose, + accusations: &era.accusations, + validators: &era.validators, + highway_state: highway.highway().state(), + }) + } +} diff --git a/node/src/components/consensus/era_supervisor/era.rs b/node/src/components/consensus/era_supervisor/era.rs index bebed0aa78..92a7787024 100644 --- a/node/src/components/consensus/era_supervisor/era.rs +++ b/node/src/components/consensus/era_supervisor/era.rs @@ -1,23 +1,19 @@ use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, env, }; use datasize::DataSize; use itertools::Itertools; use once_cell::sync::Lazy; -use tracing::{debug, warn}; - -use casper_types::{PublicKey, U512}; - -use crate::{ - components::consensus::{ - candidate_block::CandidateBlock, cl_context::ClContext, - consensus_protocol::ConsensusProtocol, protocols::highway::HighwayProtocol, - traits::ConsensusValueT, - }, - crypto::hash::Digest, - types::{ProtoBlock, Timestamp}, +use tracing::{debug, info, warn}; + +use casper_types::{PublicKey, Timestamp, U512}; + +use crate::components::consensus::{ + cl_context::ClContext, + consensus_protocol::{ConsensusProtocol, ProposedBlock}, + protocols::{highway::HighwayProtocol, zug::Zug}, }; const CASPER_ENABLE_DETAILED_CONSENSUS_METRICS_ENV_VAR: &str = @@ -25,21 +21,18 @@ const CASPER_ENABLE_DETAILED_CONSENSUS_METRICS_ENV_VAR: &str = static CASPER_ENABLE_DETAILED_CONSENSUS_METRICS: Lazy = Lazy::new(|| env::var(CASPER_ENABLE_DETAILED_CONSENSUS_METRICS_ENV_VAR).is_ok()); -/// A candidate block waiting for validation and dependencies. +/// A proposed block waiting for validation and dependencies. #[derive(DataSize)] -pub struct PendingCandidate { - /// The candidate, to be passed into the consensus instance once dependencies are resolved. - candidate: CandidateBlock, - /// Whether the proto block has been validated yet. +pub struct ValidationState { + /// Whether the block has been validated yet. validated: bool, /// A list of IDs of accused validators for which we are still missing evidence. missing_evidence: Vec, } -impl PendingCandidate { - fn new(candidate: CandidateBlock, missing_evidence: Vec) -> Self { - PendingCandidate { - candidate, +impl ValidationState { + fn new(missing_evidence: Vec) -> Self { + ValidationState { validated: false, missing_evidence, } @@ -50,119 +43,99 @@ impl PendingCandidate { } } -pub struct Era { +pub struct Era { /// The consensus protocol instance. - pub(crate) consensus: Box>, + pub(crate) consensus: Box>, /// The scheduled starting time of this era. pub(crate) start_time: Timestamp, /// The height of this era's first block. pub(crate) start_height: u64, - /// Pending candidate blocks, waiting for validation. The boolean is `true` if the proto block - /// has been validated; the vector contains the list of accused validators missing evidence. - candidates: Vec, - /// Validators banned in this and the next BONDED_ERAS eras, because they were slashed in the + /// Pending blocks, waiting for validation and dependencies. + pub(crate) validation_states: HashMap, ValidationState>, + /// Validators banned in this and the next BONDED_ERAS eras, because they were faulty in the /// previous switch block. - pub(crate) newly_slashed: Vec, - /// Validators that have been slashed in any of the recent BONDED_ERAS switch blocks. This - /// includes `newly_slashed`. - pub(crate) slashed: HashSet, + pub(crate) faulty: HashSet, + /// Validators that are excluded from proposing new blocks. + pub(crate) cannot_propose: HashSet, /// Accusations collected in this era so far. - accusations: HashSet, + pub(crate) accusations: HashSet, /// The validator weights. - validators: BTreeMap, + pub(crate) validators: BTreeMap, } -impl Era { +impl Era { pub(crate) fn new( - consensus: Box>, + consensus: Box>, start_time: Timestamp, start_height: u64, - newly_slashed: Vec, - slashed: HashSet, + faulty: HashSet, + cannot_propose: HashSet, validators: BTreeMap, ) -> Self { Era { consensus, start_time, start_height, - candidates: Vec::new(), - newly_slashed, - slashed, + validation_states: HashMap::new(), + faulty, + cannot_propose, accusations: HashSet::new(), validators, } } - /// Adds a new candidate block, together with the accusations for which we don't have evidence - /// yet. - pub(crate) fn add_candidate( + /// Adds a new block, together with the accusations for which we don't have evidence yet. + pub(crate) fn add_block( &mut self, - candidate: CandidateBlock, + proposed_block: ProposedBlock, missing_evidence: Vec, ) { - self.candidates - .push(PendingCandidate::new(candidate, missing_evidence)); + self.validation_states + .insert(proposed_block, ValidationState::new(missing_evidence)); } - /// Marks the dependencies of candidate blocks on evidence against validator `pub_key` as - /// resolved and returns all candidates that have no missing dependencies left. - pub(crate) fn resolve_evidence(&mut self, pub_key: &PublicKey) -> Vec { - for pc in &mut self.candidates { + /// Marks the dependencies of blocks on evidence against validator `pub_key` as resolved and + /// returns all valid blocks that have no missing dependencies left. + pub(crate) fn resolve_evidence_and_mark_faulty( + &mut self, + pub_key: &PublicKey, + ) -> Vec> { + for pc in self.validation_states.values_mut() { pc.missing_evidence.retain(|pk| pk != pub_key); } self.consensus.mark_faulty(pub_key); - self.remove_complete_candidates() + let (complete, incomplete): (HashMap<_, _>, HashMap<_, _>) = self + .validation_states + .drain() + .partition(|(_, validation_state)| validation_state.is_complete()); + self.validation_states = incomplete; + complete.into_keys().collect() } - /// Marks the proto block as valid or invalid, and returns all candidates whose validity is now - /// fully determined. + /// Marks the block payload as valid or invalid. Returns `false` if the block was not present + /// or is still missing evidence. Otherwise, it returns `true`: The block can now be processed + /// by the consensus protocol. pub(crate) fn resolve_validity( &mut self, - proto_block: &ProtoBlock, - parent: Option, + proposed_block: &ProposedBlock, valid: bool, - ) -> Vec { + ) -> bool { if valid { - self.accept_proto_block(proto_block, parent) - } else { - self.reject_proto_block(proto_block, parent) - } - } - - /// Marks the dependencies of candidate blocks on the validity of the specified proto block as - /// resolved and returns all candidates that have no missing dependencies left. - pub(crate) fn accept_proto_block( - &mut self, - proto_block: &ProtoBlock, - parent: Option, - ) -> Vec { - for pc in &mut self.candidates { - if pc.candidate.proto_block() == proto_block && pc.candidate.parent() == parent.as_ref() - { - pc.validated = true; + if let Some(vs) = self.validation_states.get_mut(proposed_block) { + if !vs.missing_evidence.is_empty() { + info!("Cannot resolve validity of proposed block (timestamp {}) due to missing_evidence still present.", proposed_block.context().timestamp()); + vs.validated = true; + return false; + } } } - self.remove_complete_candidates() - } - - /// Removes and returns any candidate blocks depending on the validity of the specified proto - /// block. If it is invalid, all those candidates are invalid. - pub(crate) fn reject_proto_block( - &mut self, - proto_block: &ProtoBlock, - parent: Option, - ) -> Vec { - let (invalid, candidates): (Vec<_>, Vec<_>) = self.candidates.drain(..).partition(|pc| { - pc.candidate.proto_block() == proto_block && pc.candidate.parent() == parent.as_ref() - }); - self.candidates = candidates; - invalid.into_iter().map(|pc| pc.candidate).collect() + self.validation_states.remove(proposed_block).is_some() } /// Adds new accusations from a finalized block. pub(crate) fn add_accusations(&mut self, accusations: &[PublicKey]) { for pub_key in accusations { - if !self.slashed.contains(pub_key) { + if !self.faulty.contains(pub_key) { self.accusations.insert(pub_key.clone()); } } @@ -177,27 +150,9 @@ impl Era { pub(crate) fn validators(&self) -> &BTreeMap { &self.validators } - - /// Sets the pause status: While paused we don't create consensus messages other than pings. - pub(crate) fn set_paused(&mut self, paused: bool) { - self.consensus.set_paused(paused); - } - - /// Removes and returns all candidate blocks with no missing dependencies. - fn remove_complete_candidates(&mut self) -> Vec { - let (complete, candidates): (Vec<_>, Vec<_>) = self - .candidates - .drain(..) - .partition(PendingCandidate::is_complete); - self.candidates = candidates; - complete.into_iter().map(|pc| pc.candidate).collect() - } } -impl DataSize for Era -where - I: DataSize + 'static, -{ +impl DataSize for Era { const IS_DYNAMIC: bool = true; const STATIC_HEAP_SIZE: usize = 0; @@ -209,9 +164,9 @@ where consensus, start_time, start_height, - candidates, - newly_slashed, - slashed, + validation_states, + faulty, + cannot_propose, accusations, validators, } = self; @@ -222,7 +177,7 @@ where let consensus_heap_size = { let any_ref = consensus.as_any(); - if let Some(highway) = any_ref.downcast_ref::>() { + if let Some(highway) = any_ref.downcast_ref::>() { if *CASPER_ENABLE_DETAILED_CONSENSUS_METRICS { let detailed = (*highway).estimate_detailed_heap_size(); match serde_json::to_string(&detailed) { @@ -233,11 +188,19 @@ where } else { (*highway).estimate_heap_size() } + } else if let Some(zug) = any_ref.downcast_ref::>() { + if *CASPER_ENABLE_DETAILED_CONSENSUS_METRICS { + let detailed = (*zug).estimate_detailed_heap_size(); + match serde_json::to_string(&detailed) { + Ok(encoded) => debug!(%encoded, "consensus memory metrics"), + Err(err) => warn!(%err, "error encoding consensus memory metrics"), + } + detailed.total() + } else { + (*zug).estimate_heap_size() + } } else { - warn!( - "could not downcast consensus protocol to \ - HighwayProtocol to determine heap allocation size" - ); + warn!("could not downcast consensus protocol to determine heap allocation size"); 0 } }; @@ -245,9 +208,9 @@ where consensus_heap_size .saturating_add(start_time.estimate_heap_size()) .saturating_add(start_height.estimate_heap_size()) - .saturating_add(candidates.estimate_heap_size()) - .saturating_add(newly_slashed.estimate_heap_size()) - .saturating_add(slashed.estimate_heap_size()) + .saturating_add(validation_states.estimate_heap_size()) + .saturating_add(faulty.estimate_heap_size()) + .saturating_add(cannot_propose.estimate_heap_size()) .saturating_add(accusations.estimate_heap_size()) .saturating_add(validators.estimate_heap_size()) } diff --git a/node/src/components/consensus/error.rs b/node/src/components/consensus/error.rs new file mode 100644 index 0000000000..ab597a641b --- /dev/null +++ b/node/src/components/consensus/error.rs @@ -0,0 +1,26 @@ +use thiserror::Error; + +use casper_types::{BlockHeader, EraId}; + +#[derive(Error, Debug)] +pub enum CreateNewEraError { + #[error("Attempted to create era with no switch blocks.")] + AttemptedToCreateEraWithNoSwitchBlocks, + #[error("Attempted to create {era_id} with non-switch block {last_block_header:?}.")] + LastBlockHeaderNotASwitchBlock { + era_id: EraId, + last_block_header: Box, + }, + #[error("Attempted to create {era_id} with too few switch blocks {switch_blocks:?}.")] + InsufficientSwitchBlocks { + era_id: EraId, + switch_blocks: Vec, + }, + #[error( + "Attempted to create {era_id} with switch blocks from unexpected eras: {switch_blocks:?}." + )] + WrongSwitchBlockEra { + era_id: EraId, + switch_blocks: Vec, + }, +} diff --git a/node/src/components/consensus/highway_core.rs b/node/src/components/consensus/highway_core.rs index 0e55679058..c0cd2524e7 100644 --- a/node/src/components/consensus/highway_core.rs +++ b/node/src/components/consensus/highway_core.rs @@ -33,14 +33,17 @@ mod test_macros; pub(crate) mod active_validator; -pub(crate) mod finality_detector; -pub(crate) mod highway; +pub mod finality_detector; +pub mod highway; pub(crate) mod state; -pub(crate) mod validators; +pub(super) mod synchronizer; mod endorsement; mod evidence; #[cfg(test)] pub(crate) mod highway_testing; -pub(crate) use state::{State, Weight}; +pub use state::{Observation, Panorama, State}; + +// Enables the endorsement mechanism. +const ENABLE_ENDORSEMENTS: bool = false; diff --git a/node/src/components/consensus/highway_core/active_validator.rs b/node/src/components/consensus/highway_core/active_validator.rs index 9ff795a2ff..6eab639bb5 100644 --- a/node/src/components/consensus/highway_core/active_validator.rs +++ b/node/src/components/consensus/highway_core/active_validator.rs @@ -1,28 +1,26 @@ use std::{ fmt::{self, Debug}, - fs::{self, File}, - io::{self, Read, Write}, - path::{Path, PathBuf}, + iter, }; use datasize::DataSize; use tracing::{error, info, trace, warn}; +use casper_types::{TimeDiff, Timestamp}; + use super::{ endorsement::{Endorsement, SignedEndorsement}, evidence::Evidence, highway::{Ping, ValidVertex, Vertex, WireUnit}, - state::{self, Panorama, State, Unit, Weight}, - validators::ValidatorIndex, + state::{self, Panorama, State, Unit}, + ENABLE_ENDORSEMENTS, }; -use crate::{ - components::consensus::{ - consensus_protocol::BlockContext, - highway_core::{highway::SignedWireUnit, state::Fault}, - traits::{Context, ValidatorSecret}, - }, - types::{TimeDiff, Timestamp}, +use crate::components::consensus::{ + consensus_protocol::BlockContext, + highway_core::{highway::SignedWireUnit, state::Fault}, + traits::{Context, ValidatorSecret}, + utils::{ValidatorIndex, Weight}, }; /// An action taken by a validator. @@ -34,10 +32,9 @@ pub(crate) enum Effect { ScheduleTimer(Timestamp), /// `propose` needs to be called with a value for a new block with the specified block context /// and parent value. - RequestNewBlock { - block_context: BlockContext, - fork_choice: Option, - }, + /// The timestamp is the time at which the witness unit will be sent, which will invalidate the + /// proposal - so any response to this request has to be received before that time. + RequestNewBlock(BlockContext, Timestamp), /// This validator is faulty. /// /// When this is returned, the validator automatically deactivates. @@ -67,16 +64,12 @@ where vidx: ValidatorIndex, /// The validator's secret signing key. secret: C::ValidatorSecret, - /// The next round exponent: Our next round will be `1 << next_round_exp` milliseconds long. - next_round_exp: u8, + /// The next round length. + next_round_len: TimeDiff, /// The latest timer we scheduled. next_timer: Timestamp, - /// Panorama and timestamp for a block we are about to propose when we get a consensus value. - next_proposal: Option<(Timestamp, Panorama)>, - /// The path to the file storing the hash of our latest known unit (if any). - unit_file: Option, - /// The last known unit created by us. - own_last_unit: Option>, + /// Panorama and context for a block we are about to propose when we get a consensus value. + next_proposal: Option<(BlockContext, Panorama)>, /// The target fault tolerance threshold. The validator pauses (i.e. doesn't create new units) /// if not enough validators are online to finalize values at this FTT. target_ftt: Weight, @@ -88,7 +81,7 @@ impl Debug for ActiveValidator { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ActiveValidator") .field("vidx", &self.vidx) - .field("next_round_exp", &self.next_round_exp) + .field("next_round_len", &self.next_round_len) .field("next_timer", &self.next_timer) .field("paused", &self.paused) .finish() @@ -104,28 +97,15 @@ impl ActiveValidator { current_time: Timestamp, start_time: Timestamp, state: &State, - unit_file: Option, target_ftt: Weight, instance_id: C::InstanceId, ) -> (Self, Vec>) { - let own_last_unit = unit_file - .as_ref() - .map(read_last_unit) - .transpose() - .map_err(|err| match err.kind() { - io::ErrorKind::NotFound => (), - _ => panic!("got an error reading unit file {:?}: {:?}", unit_file, err), - }) - .ok() - .flatten(); let mut av = ActiveValidator { vidx, secret, - next_round_exp: state.params().init_round_exp(), + next_round_len: state.params().init_round_len(), next_timer: state.params().start_timestamp(), next_proposal: None, - unit_file, - own_last_unit, target_ftt, paused: false, }; @@ -134,49 +114,9 @@ impl ActiveValidator { (av, effects) } - /// Returns whether validator's protocol state is fully synchronized and it's safe to start - /// creating units. - /// - /// If validator restarted within an era, it most likely had created units before that event. It - /// cannot start creating new units until its state is fully synchronized, otherwise it will - /// most likely equivocate. - fn can_vote(&self, state: &State) -> bool { - self.own_last_unit - .as_ref() - .map_or(true, |swunit| state.has_unit(&swunit.hash())) - } - - /// Returns whether validator's protocol state is synchronized up until the panorama of its own - /// last unit. - pub(crate) fn is_own_last_unit_panorama_sync(&self, state: &State) -> bool { - self.own_last_unit.as_ref().map_or(true, |swunit| { - swunit - .wire_unit() - .panorama - .iter_correct_hashes() - .all(|hash| state.has_unit(hash)) - }) - } - - pub(crate) fn take_own_last_unit(&mut self) -> Option> { - self.own_last_unit.take() - } - - /// Cleans up the validator disk state. - /// Deletes all unit files. - pub(crate) fn cleanup(&self) -> io::Result<()> { - let unit_file = if let Some(file) = self.unit_file.as_ref() { - file - } else { - return Ok(()); - }; - - fs::remove_file(unit_file) - } - - /// Sets the next round exponent to the new value. - pub(crate) fn set_round_exp(&mut self, new_round_exp: u8) { - self.next_round_exp = new_round_exp; + /// Sets the next round length to the new value. + pub(crate) fn set_round_len(&mut self, new_round_len: TimeDiff) { + self.next_round_len = new_round_len; } /// Sets the pause status: While paused we don't create any new units, just pings. @@ -201,34 +141,37 @@ impl ActiveValidator { warn!(%timestamp, "skipping outdated timer event"); return effects; } - let r_exp = self.round_exp(state, timestamp); - let r_id = state::round_id(timestamp, r_exp); - let r_len = state::round_len(r_exp); + let r_len = self.round_len(state, timestamp); + let r_id = state::round_id(timestamp, r_len); // Only create new units if enough validators are online. if !self.paused && self.enough_validators_online(state, timestamp) { if timestamp == r_id && state.leader(r_id) == self.vidx { - effects.extend(self.request_new_block(state, instance_id, timestamp)); + let expiry = r_id.saturating_add(self.proposal_request_expiry(r_len)); + effects.extend(self.request_new_block(state, instance_id, timestamp, expiry)); return effects; - } else if timestamp == r_id + self.witness_offset(r_len) { + } else if timestamp == r_id.saturating_add(self.witness_offset(r_len)) { let panorama = self.panorama_at(state, timestamp); - if panorama.has_correct() { - if let Some(witness_unit) = - self.new_unit(panorama, timestamp, None, state, instance_id) + if let Some(witness_unit) = + self.new_unit(panorama, timestamp, None, state, instance_id) + { + if self + .latest_unit(state) + .is_none_or(|latest_unit| latest_unit.round_id() != r_id) { - if self - .latest_unit(state) - .map_or(true, |latest_unit| latest_unit.round_id() != r_id) - { - info!(round_id = %r_id, "sending witness in round with no proposal"); - } - effects.push(Effect::NewVertex(ValidVertex(Vertex::Unit(witness_unit)))); - return effects; + info!(round_id = %r_id, "sending witness in round with no proposal"); } + effects.push(Effect::NewVertex(ValidVertex(Vertex::Unit(witness_unit)))); + return effects; } } } - // We are not creating a new unit. Send a ping if necessary, to show that we're online. - if !state.has_ping(self.vidx, timestamp) { + // We are not creating a new unit. Send a ping once per maximum-length round, to show that + // we're online. + let one_max_round_ago = timestamp.saturating_sub(state.params().max_round_length()); + if !state.has_ping( + self.vidx, + one_max_round_ago.saturating_add(TimeDiff::from_millis(1)), + ) { warn!(%timestamp, "too many validators offline, sending ping"); effects.push(self.send_ping(timestamp, instance_id)); } @@ -245,6 +188,7 @@ impl ActiveValidator { /// tolerance threshold, always counting this validator as online. fn enough_validators_online(&self, state: &State, now: Timestamp) -> bool { // We divide before adding, because total_weight + target_fft could overflow u64. + #[allow(clippy::arithmetic_side_effects)] let target_quorum = state.total_weight() / 2 + self.target_ftt / 2; let online_weight: Weight = state .weights() @@ -295,6 +239,9 @@ impl ActiveValidator { evidence: &Evidence, state: &State, ) -> Vec> { + if !ENABLE_ENDORSEMENTS { + return Vec::new(); + } let vidx = evidence.perpetrator(); state .iter_correct_hashes() @@ -317,45 +264,44 @@ impl ActiveValidator { state: &State, instance_id: C::InstanceId, timestamp: Timestamp, + expiry: Timestamp, ) -> Option> { - if let Some((prop_time, _)) = self.next_proposal.take() { - warn!( - ?timestamp, - "no proposal received for {}; requesting new one", prop_time - ); + if let Some((prop_context, _)) = self.next_proposal.take() { + warn!(?prop_context, "no proposal received; requesting new one"); } let panorama = self.panorama_at(state, timestamp); let maybe_parent_hash = state.fork_choice(&panorama); - if maybe_parent_hash.map_or(false, |hash| state.is_terminal_block(hash)) { + // If the parent is a terminal block, just create a unit without a new block. + if maybe_parent_hash.is_some_and(|hash| state.is_terminal_block(hash)) { return self .new_unit(panorama, timestamp, None, state, instance_id) .map(|proposal_unit| Effect::NewVertex(ValidVertex(Vertex::Unit(proposal_unit)))); } - let maybe_parent = maybe_parent_hash.map(|bh| state.block(bh)); - let height = maybe_parent.map_or(0, |block| block.height); - self.next_proposal = Some((timestamp, panorama)); - let block_context = BlockContext::new(timestamp, height); - Some(Effect::RequestNewBlock { - block_context, - fork_choice: maybe_parent_hash.cloned(), - }) + // Otherwise we need to request a new consensus value to propose. + let ancestor_values = match maybe_parent_hash { + None => vec![], + Some(parent_hash) => iter::once(parent_hash) + .chain(state.ancestor_hashes(parent_hash)) + .map(|bhash| state.block(bhash).value.clone()) + .collect(), + }; + let block_context = BlockContext::new(timestamp, ancestor_values); + self.next_proposal = Some((block_context.clone(), panorama)); + Some(Effect::RequestNewBlock(block_context, expiry)) } /// Proposes a new block with the given consensus value. pub(crate) fn propose( &mut self, value: C::ConsensusValue, - block_context: BlockContext, + block_context: BlockContext, state: &State, instance_id: C::InstanceId, ) -> Vec> { let timestamp = block_context.timestamp(); - let panorama = if let Some((prop_time, panorama)) = self.next_proposal.take() { - if prop_time != timestamp { - warn!( - ?timestamp, - "unexpected proposal; expected timestamp {}", prop_time - ); + let panorama = if let Some((expected_context, panorama)) = self.next_proposal.take() { + if expected_context != block_context { + warn!(?expected_context, ?block_context, "unexpected proposal"); return vec![]; } panorama @@ -371,6 +317,7 @@ impl ActiveValidator { warn!("Creator knows it's faulty. Won't create a message."); return vec![]; } + self.new_unit(panorama, timestamp, Some(value), state, instance_id) .map(|proposal_unit| Effect::NewVertex(ValidVertex(Vertex::Unit(proposal_unit)))) .into_iter() @@ -389,7 +336,7 @@ impl ActiveValidator { if unit.creator == self.vidx || self.is_faulty(state) || !state.is_correct_proposal(unit) { return false; } - let r_id = state::round_id(timestamp, self.round_exp(state, timestamp)); + let r_id = state::round_id(timestamp, self.round_len(state, timestamp)); if unit.timestamp != r_id { trace!( %unit.timestamp, %r_id, @@ -432,15 +379,11 @@ impl ActiveValidator { state: &State, instance_id: C::InstanceId, ) -> Option> { - if !self.can_vote(state) { - info!(?self.own_last_unit, "not voting - last own unit unknown"); - return None; + if value.is_none() && !panorama.has_correct() { + return None; // Wait for the first proposal before creating a unit without a value. } - if let Some((prop_time, _)) = self.next_proposal.take() { - warn!( - ?timestamp, - "canceling proposal for {} due to unit", prop_time - ); + if let Some((prop_context, _)) = self.next_proposal.take() { + warn!(?prop_context, "canceling proposal due to unit"); } for hash in panorama.iter_correct_hashes() { if timestamp < state.unit(hash).timestamp { @@ -460,6 +403,9 @@ impl ActiveValidator { } let seq_number = panorama.next_seq_num(state, self.vidx); let endorsed = state.seen_endorsed(&panorama); + #[allow(clippy::arithmetic_side_effects)] // min_round_length is guaranteed to be > 0. + let round_exp = (self.round_len(state, timestamp) / state.params().min_round_length()) + .trailing_zeros() as u8; let hwunit = WireUnit { panorama, creator: self.vidx, @@ -467,17 +413,11 @@ impl ActiveValidator { value, seq_number, timestamp, - round_exp: self.round_exp(state, timestamp), + round_exp, endorsed, } .into_hashed(); let swunit = SignedWireUnit::new(hwunit, &self.secret); - write_last_unit(&self.unit_file, swunit.clone()).unwrap_or_else(|err| { - panic!( - "should successfully write unit's hash to {:?}, got {:?}", - self.unit_file, err - ) - }); Some(swunit) } @@ -490,18 +430,17 @@ impl ActiveValidator { if self.next_timer > timestamp { return Vec::new(); // We already scheduled the next call; nothing to do. } - let r_exp = self.round_exp(state, timestamp); - let r_id = state::round_id(timestamp, r_exp); - let r_len = state::round_len(r_exp); - self.next_timer = if timestamp < r_id + self.witness_offset(r_len) { - r_id + self.witness_offset(r_len) + let r_len = self.round_len(state, timestamp); + let r_id = state::round_id(timestamp, r_len); + self.next_timer = if timestamp < r_id.saturating_add(self.witness_offset(r_len)) { + r_id.saturating_add(self.witness_offset(r_len)) } else { - let next_r_id = r_id + r_len; + let next_r_id = r_id.saturating_add(r_len); if state.leader(next_r_id) == self.vidx { next_r_id } else { - let next_r_exp = self.round_exp(state, next_r_id); - next_r_id + self.witness_offset(state::round_len(next_r_exp)) + let next_r_len = self.round_len(state, next_r_id); + next_r_id.saturating_add(self.witness_offset(next_r_len)) } }; vec![Effect::ScheduleTimer(self.next_timer)] @@ -514,7 +453,8 @@ impl ActiveValidator { .map_or(state.params().start_timestamp(), |unit| { unit.previous().map_or(unit.timestamp, |vh2| { let unit2 = state.unit(vh2); - unit.timestamp.max(unit2.round_id() + unit2.round_len()) + unit.timestamp + .max(unit2.round_id().saturating_add(unit2.round_len())) }) }) } @@ -523,32 +463,47 @@ impl ActiveValidator { pub(crate) fn latest_unit<'a>(&self, state: &'a State) -> Option<&'a Unit> { state .panorama() - .get(self.vidx) + .get(self.vidx)? .correct() .map(|vh| state.unit(vh)) } /// Checks if validator knows it's faulty. fn is_faulty(&self, state: &State) -> bool { - state.panorama().get(self.vidx).is_faulty() + state + .panorama() + .get(self.vidx) + .is_some_and(|obs| obs.is_faulty()) } /// Returns the duration after the beginning of a round when the witness units are sent. + #[allow(clippy::arithmetic_side_effects)] // Round length will never be large enough to overflow. fn witness_offset(&self, round_len: TimeDiff) -> TimeDiff { round_len * 2 / 3 } - /// The round exponent of the round containing `timestamp`. + /// Returns the duration after the beginning of a round during which a response to a proposal + /// request has to be returned. + #[allow(clippy::arithmetic_side_effects)] // Round length will never be large enough to overflow. + fn proposal_request_expiry(&self, round_len: TimeDiff) -> TimeDiff { + // The time window is 1/6 of the round length - but no shorter than 500 ms, unless that's + // longer than the witness offset, in which case it's just the witness offset. + (round_len / 6) + .max(TimeDiff::from_millis(500)) + .min(self.witness_offset(round_len)) + } + + /// The round length of the round containing `timestamp`. /// - /// This returns `self.next_round_exp`, if that is a valid round exponent for a unit cast at - /// `timestamp`. Otherwise it returns the round exponent of our latest unit. - fn round_exp(&self, state: &State, timestamp: Timestamp) -> u8 { - self.latest_unit(state).map_or(self.next_round_exp, |unit| { - let max_re = self.next_round_exp.max(unit.round_exp); - if unit.timestamp < state::round_id(timestamp, max_re) { - self.next_round_exp + /// This returns `self.next_round_len`, if that is a valid round length for a unit cast at + /// `timestamp`. Otherwise it returns the round length of our latest unit. + fn round_len(&self, state: &State, timestamp: Timestamp) -> TimeDiff { + self.latest_unit(state).map_or(self.next_round_len, |unit| { + let max_rl = self.next_round_len.max(unit.round_len); + if unit.timestamp < state::round_id(timestamp, max_rl) { + self.next_round_len } else { - unit.round_exp + unit.round_len } }) } @@ -558,6 +513,9 @@ impl ActiveValidator { /// We should endorse unit from honest validator that cites _an_ equivocator /// as honest and it cites some new message by that validator. fn should_endorse(&self, vhash: &C::Hash, state: &State) -> bool { + if !ENABLE_ENDORSEMENTS { + return false; + } let unit = state.unit(vhash); !state.is_faulty(unit.creator) && unit @@ -591,9 +549,6 @@ impl ActiveValidator { /// Returns whether the incoming vertex was signed by our key even though we don't have it yet. /// This can only happen if another node is running with the same signing key. pub(crate) fn is_doppelganger_vertex(&self, vertex: &Vertex, state: &State) -> bool { - if !self.can_vote(state) { - return false; - } match vertex { Vertex::Unit(swunit) => { // If we already have the unit in our local state, @@ -612,6 +567,8 @@ impl ActiveValidator { && !state.has_endorsement(endorsements.unit(), self.vidx) } Vertex::Ping(ping) => { + // If we get a ping from ourselves with a later timestamp than the latest one we + // know of, another node must be signing with our key. ping.creator() == self.vidx && !state.has_ping(self.vidx, ping.timestamp()) } Vertex::Evidence(_) => false, @@ -619,58 +576,24 @@ impl ActiveValidator { } pub(crate) fn next_round_length(&self) -> TimeDiff { - state::round_len(self.next_round_exp) + self.next_round_len } } -pub(crate) fn read_last_unit(path: P) -> io::Result> -where - C: Context, - P: AsRef, -{ - let mut file = File::open(path)?; - let mut bytes = Vec::new(); - file.read_to_end(&mut bytes)?; - Ok(serde_json::from_slice(&bytes)?) -} - -pub(crate) fn write_last_unit( - unit_file: &Option, - swunit: SignedWireUnit, -) -> io::Result<()> { - // If there is no unit_file set, do not write to it - let unit_file = if let Some(file) = unit_file.as_ref() { - file - } else { - return Ok(()); - }; - - // Create the file (and its parents) as necessary - if let Some(parent_directory) = unit_file.parent() { - fs::create_dir_all(parent_directory)?; - } - let mut file = File::create(unit_file)?; - - // Finally, write the data to file we created - let bytes = serde_json::to_vec(&swunit)?; - - file.write_all(&bytes) -} - #[cfg(test)] -#[allow(clippy::integer_arithmetic)] // Overflows in tests panic anyway. +#[allow(clippy::arithmetic_side_effects)] // Overflows in tests panic anyway. mod tests { use std::{collections::BTreeSet, fmt::Debug}; - use tempfile::tempdir; - use crate::components::consensus::highway_core::{ - highway_testing::TEST_INSTANCE_ID, validators::ValidatorMap, + use crate::components::consensus::{ + highway_core::highway_testing::TEST_INSTANCE_ID, + utils::{ValidatorMap, Weight}, }; use super::{ super::{ finality_detector::FinalityDetector, - state::{tests::*, State, Weight}, + state::{tests::*, State}, }, Vertex, *, }; @@ -685,14 +608,6 @@ mod tests { panic!("Unexpected effect: {:?}", self); } } - - fn unwrap_timer(self) -> Timestamp { - if let Eff::ScheduleTimer(timestamp) = self { - timestamp - } else { - panic!("expected `ScheduleTimer`, got: {:?}", self) - } - } } struct TestState { @@ -705,69 +620,66 @@ mod tests { impl TestState { fn new( - state: State, + mut state: State, start_time: Timestamp, instance_id: u64, fd: FinalityDetector, validators: Vec, ) -> Self { let mut timers = BTreeSet::new(); - let current_round_id = state::round_id(start_time, state.params().init_round_exp()); + let current_round_id = state::round_id(start_time, state.params().init_round_len()); let earliest_round_start = if start_time == current_round_id { start_time } else { - current_round_id + state::round_len(state.params().init_round_exp()) + current_round_id + state.params().init_round_len() }; let target_ftt = state.total_weight() / 3; - let active_validators = validators - .into_iter() - .map(|vidx| { - let secret = TestSecret(vidx.0); - let (av, effects) = ActiveValidator::new( - vidx, - secret, - start_time, - start_time, - &state, - None, - target_ftt, - TEST_INSTANCE_ID, - ); - - let timestamp = match &*effects { - [ - Effect::ScheduleTimer(timer), - Effect::NewVertex(ValidVertex(Vertex::Ping(_))) - ] => { *timer } - other => panic!("expected timer and ping effects, got={:?}", other), - }; - - if state.leader(earliest_round_start) == vidx { - assert_eq!( - timestamp, earliest_round_start, - "Invalid initial timer scheduled for {:?}.", - vidx, - ) - } else { - let witness_offset = - av.witness_offset(state::round_len(state.params().init_round_exp())); - let witness_timestamp = earliest_round_start + witness_offset; - assert_eq!( - timestamp, witness_timestamp, - "Invalid initial timer scheduled for {:?}.", - vidx, - ) + let mut active_validators = Vec::with_capacity(validators.len()); + for vidx in validators { + let secret = TestSecret(vidx.0); + let (av, effects) = ActiveValidator::new( + vidx, + secret, + start_time, + start_time, + &state, + target_ftt, + TEST_INSTANCE_ID, + ); + + let (timestamp, ping) = match &*effects { + [Effect::ScheduleTimer(timestamp), Effect::NewVertex(ValidVertex(Vertex::Ping(ping)))] => { + (*timestamp, ping) } - timers.insert((timestamp, vidx)); - av - }) - .collect(); + other => panic!("expected timer and ping effects, got={:?}", other), + }; + + state.add_ping(ping.creator(), ping.timestamp()); + + if state.leader(earliest_round_start) == vidx { + assert_eq!( + timestamp, earliest_round_start, + "Invalid initial timer scheduled for {:?}.", + vidx, + ) + } else { + let witness_offset = av.witness_offset(state.params().init_round_len()); + let witness_timestamp = earliest_round_start + witness_offset; + assert_eq!( + timestamp, witness_timestamp, + "Invalid initial timer scheduled for {:?}.", + vidx, + ) + } + timers.insert((timestamp, vidx)); + active_validators.push(av); + } TestState { state, instance_id, fd, - active_validators, + active_validators: active_validators.into_iter().collect(), timers, } } @@ -795,7 +707,7 @@ mod tests { &mut self, vidx: ValidatorIndex, cv: ::ConsensusValue, - block_context: BlockContext, + block_context: BlockContext, ) -> (Vec>, SignedWireUnit) { let validator = &mut self.active_validators[vidx]; let proposal_timestamp = block_context.timestamp(); @@ -807,7 +719,7 @@ mod tests { self.state.add_unit(proposal_wunit.clone()).unwrap(); let effects = validator.on_new_unit( &prop_hash, - proposal_timestamp + 1.into(), + proposal_timestamp + TimeDiff::from_millis(1), &self.state, self.instance_id, ); @@ -824,7 +736,7 @@ mod tests { uhash: &::Hash, ) -> Vec> { let validator = &mut self.active_validators[vidx]; - let delivery_timestamp = self.state.unit(uhash).timestamp + 1.into(); + let delivery_timestamp = self.state.unit(uhash).timestamp + TimeDiff::from_millis(1); let effects = validator.on_new_unit(uhash, delivery_timestamp, &self.state, self.instance_id); self.schedule_timer(vidx, &effects); @@ -871,7 +783,7 @@ mod tests { match *new_units { [] => (), [unit] => { - let _ = self.state.add_unit(unit.clone()).unwrap(); + self.state.add_unit(unit.clone()).unwrap(); } _ => panic!( "Expected at most one timer to be scheduled: {:?}", @@ -912,10 +824,11 @@ mod tests { // 416, and the first witness tick 426. // Alice wants to propose a block, and also make her witness unit at 426. let bctx = match &*test.handle_timer(ALICE, 416.into()) { - [Eff::ScheduleTimer(timestamp), Eff::RequestNewBlock { - block_context: bctx, - .. - }] if *timestamp == 426.into() => *bctx, + [Eff::ScheduleTimer(timestamp), Eff::RequestNewBlock(bctx, expiry)] + if *timestamp == 426.into() && *expiry == 426.into() => + { + bctx.clone() + } effects => panic!("unexpected effects {:?}", effects), }; assert_eq!( @@ -965,7 +878,6 @@ mod tests { 410.into(), 410.into(), &state, - None, Weight(2), TEST_INSTANCE_ID, ); @@ -980,113 +892,26 @@ mod tests { } #[test] - fn waits_until_synchronized() -> Result<(), AddUnitError> { - let instance_id = TEST_INSTANCE_ID; + fn detects_doppelganger_ping() { let mut state = State::new_test(&[Weight(3)], 0); - let a0 = { - let a0 = add_unit!(state, ALICE, 0xB0; N)?; - state.wire_unit(&a0, instance_id).unwrap() - }; - let a1 = { - let a1 = add_unit!(state, ALICE, None; a0.hash())?; - state.wire_unit(&a1, instance_id).unwrap() - }; - let a2 = { - let a2 = add_unit!(state, ALICE, None; a1.hash())?; - state.wire_unit(&a2, instance_id).unwrap() - }; - // Clean state. We want Alice to synchronize first. - state.retain_evidence_only(); - - let unit_file = { - let tmp_dir = tempdir().unwrap(); - let unit_hashes_folder = tmp_dir.path().to_path_buf(); - Some(unit_hashes_folder.join(format!("unit_hash_{:?}.dat", instance_id))) - }; - - // Store `a2` unit as the Alice's last unit. - write_last_unit(&unit_file, a2.clone()).expect("storing unit should succeed"); - - // Alice's last unit is `a2` but `State` is empty. She must synchronize first. - let (mut alice, alice_init_effects) = ActiveValidator::new( + let (active_validator, _init_effects) = ActiveValidator::new( ALICE, - TestSecret(ALICE.0), + ALICE_SEC.clone(), 410.into(), 410.into(), &state, - unit_file, Weight(2), TEST_INSTANCE_ID, ); - let mut next_proposal_timer = match &*alice_init_effects { - &[Effect::ScheduleTimer(timestamp), Effect::NewVertex(ValidVertex(Vertex::Ping(_)))] - if timestamp == 416.into() => - { - timestamp - } - other => panic!("unexpected effects {:?}", other), - }; - - // Alice has to synchronize up until `a2` (including) before she starts proposing. - for unit in vec![a0, a1, a2.clone()] { - next_proposal_timer = - assert_no_proposal(&mut alice, &state, instance_id, next_proposal_timer); - state.add_unit(unit)?; - } - - // After synchronizing the protocol state up until `last_own_unit`, Alice can now propose a - // new block. - let bctx = match &*alice.handle_timer(next_proposal_timer, &state, instance_id) { - [Eff::ScheduleTimer(_), Eff::RequestNewBlock { - block_context: bctx, - .. - }] => *bctx, - effects => panic!("unexpected effects {:?}", effects), - }; - - let proposal_wunit = - unwrap_single(&alice.propose(0xC0FFEE, bctx, &state, instance_id)).unwrap_unit(); - assert_eq!( - proposal_wunit.wire_unit().seq_number, - a2.wire_unit().seq_number + 1, - "new unit should have correct seq_number" - ); - assert_eq!( - proposal_wunit.wire_unit().panorama, - panorama!(a2.hash()), - "new unit should cite the latest unit" - ); - - Ok(()) - } - - // Triggers new proposal by `validator` and verifies that it's empty – no block was proposed. - // Captuers the next witness timer and calls the `validator` with that to return the timer for - // the next proposal. - fn assert_no_proposal( - validator: &mut ActiveValidator, - state: &State, - instance_id: u64, - proposal_timer: Timestamp, - ) -> Timestamp { - let (witness_timestamp, bctx) = - match &*validator.handle_timer(proposal_timer, &state, instance_id) { - [Eff::ScheduleTimer(witness_timestamp), Eff::RequestNewBlock { - block_context: bctx, - .. - }] => (*witness_timestamp, *bctx), - effects => panic!("unexpected effects {:?}", effects), - }; - - let effects = validator.propose(0xC0FFEE, bctx, state, instance_id); - assert!( - effects.is_empty(), - "should not propose blocks until its dependencies are synchronized: {:?}", - effects - ); + let ping = Vertex::Ping(Ping::new(ALICE, 500.into(), TEST_INSTANCE_ID, &ALICE_SEC)); - unwrap_single(&validator.handle_timer(witness_timestamp, &state, instance_id)) - .unwrap_timer() + // The ping is suspicious if it is newer than the latest ping (or unit) that has been added + // to the state. + assert!(active_validator.is_doppelganger_vertex(&ping, &state)); + state.add_ping(ALICE, 499.into()); + assert!(active_validator.is_doppelganger_vertex(&ping, &state)); + state.add_ping(ALICE, 500.into()); + assert!(!active_validator.is_doppelganger_vertex(&ping, &state)); } } diff --git a/node/src/components/consensus/highway_core/endorsement.rs b/node/src/components/consensus/highway_core/endorsement.rs index 1ebb8d452f..99be3dbcee 100644 --- a/node/src/components/consensus/highway_core/endorsement.rs +++ b/node/src/components/consensus/highway_core/endorsement.rs @@ -2,12 +2,10 @@ use datasize::DataSize; use serde::{Deserialize, Serialize}; use thiserror::Error; -use crate::components::consensus::traits::Context; - -use super::validators::ValidatorIndex; +use crate::components::consensus::{traits::Context, utils::ValidatorIndex}; /// An error due to an invalid endorsement. -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error, Eq, PartialEq)] pub(crate) enum EndorsementError { #[error("The creator is not a validator.")] Creator, @@ -26,7 +24,7 @@ pub(crate) enum EndorsementError { serialize = "C::Hash: Serialize", deserialize = "C::Hash: Deserialize<'de>", ))] -pub(crate) struct Endorsement +pub struct Endorsement where C: Context, { @@ -44,13 +42,32 @@ impl Endorsement { } } - pub(crate) fn hash(&self) -> C::Hash { + /// Returns the hash of the endorsement. + pub fn hash(&self) -> C::Hash { ::hash( &bincode::serialize(&(self.unit, self.creator)).expect("serialize endorsement"), ) } } +mod specimen_support { + use crate::{ + components::consensus::ClContext, + utils::specimen::{Cache, LargestSpecimen, SizeEstimator}, + }; + + use super::Endorsement; + + impl LargestSpecimen for Endorsement { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Endorsement { + unit: LargestSpecimen::largest_specimen(estimator, cache), + creator: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } +} + /// Testimony that creator of `unit` was seen honest /// by `endorser` at the moment of creating this endorsement. #[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] @@ -58,7 +75,7 @@ impl Endorsement { serialize = "C::Signature: Serialize", deserialize = "C::Signature: Deserialize<'de>", ))] -pub(crate) struct SignedEndorsement +pub struct SignedEndorsement where C: Context, { @@ -76,19 +93,23 @@ impl SignedEndorsement { } } - pub(crate) fn unit(&self) -> &C::Hash { + /// Returns the unit being endorsed. + pub fn unit(&self) -> &C::Hash { &self.endorsement.unit } - pub(crate) fn validator_idx(&self) -> ValidatorIndex { + /// Returns the creator of the endorsement. + pub fn validator_idx(&self) -> ValidatorIndex { self.endorsement.creator } - pub(crate) fn signature(&self) -> &C::Signature { + /// Returns the signature of the endorsement. + pub fn signature(&self) -> &C::Signature { &self.signature } - pub(crate) fn hash(&self) -> C::Hash { + /// Returns the hash of the endorsement. + pub fn hash(&self) -> C::Hash { self.endorsement.hash() } } diff --git a/node/src/components/consensus/highway_core/evidence.rs b/node/src/components/consensus/highway_core/evidence.rs index 6b9e036cc5..2c58ec95bc 100644 --- a/node/src/components/consensus/highway_core/evidence.rs +++ b/node/src/components/consensus/highway_core/evidence.rs @@ -1,22 +1,17 @@ use std::iter; -use datasize::DataSize; use itertools::Itertools; -use serde::{Deserialize, Serialize}; use thiserror::Error; -use super::validators::ValidatorIndex; use crate::components::consensus::{ - highway_core::{ - endorsement::SignedEndorsement, highway::SignedWireUnit, state::Params, - validators::Validators, - }, + highway_core::{highway::SignedWireUnit, state::Params}, traits::Context, + utils::{ValidatorIndex, Validators}, }; /// An error due to invalid evidence. -#[derive(Debug, Error, PartialEq)] -pub(crate) enum EvidenceError { +#[derive(Debug, Error, Eq, PartialEq)] +pub enum EvidenceError { #[error("The sequence numbers in the equivocating units are different.")] EquivocationDifferentSeqNumbers, #[error("The creators in the equivocating units are different.")] @@ -39,38 +34,57 @@ pub(crate) enum EvidenceError { Signature, } -/// Evidence that a validator is faulty. -#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] -#[serde(bound( - serialize = "C::Hash: Serialize", - deserialize = "C::Hash: Deserialize<'de>", -))] -pub(crate) enum Evidence -where - C: Context, -{ - /// The validator produced two units with the same sequence number. - Equivocation(SignedWireUnit, SignedWireUnit), - /// The validator endorsed two conflicting units. - Endorsements { - /// The endorsement for `unit1`. - endorsement1: SignedEndorsement, - /// The unit with the lower (or equal) sequence number. - unit1: SignedWireUnit, - /// The endorsement for `unit2`, by the same creator as endorsement1. - endorsement2: SignedEndorsement, - /// The unit with the higher (or equal) sequence number, on a conflicting fork of the same - /// creator as `unit1`. - unit2: SignedWireUnit, - /// The predecessors of `unit2`, back to the same sequence number as `unit1`, in reverse - /// chronological order. - swimlane2: Vec>, - }, +#[allow(clippy::arithmetic_side_effects)] +pub mod relaxed { + // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the + // module-wide `clippy::arithmetic_side_effects` lint. + + use datasize::DataSize; + use serde::{Deserialize, Serialize}; + use strum::EnumDiscriminants; + + use crate::components::consensus::{ + highway_core::{endorsement::SignedEndorsement, highway::SignedWireUnit}, + traits::Context, + }; + + /// Evidence that a validator is faulty. + #[derive( + Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash, EnumDiscriminants, + )] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub enum Evidence + where + C: Context, + { + /// The validator produced two units with the same sequence number. + Equivocation(SignedWireUnit, SignedWireUnit), + /// The validator endorsed two conflicting units. + Endorsements { + /// The endorsement for `unit1`. + endorsement1: SignedEndorsement, + /// The unit with the lower (or equal) sequence number. + unit1: SignedWireUnit, + /// The endorsement for `unit2`, by the same creator as endorsement1. + endorsement2: SignedEndorsement, + /// The unit with the higher (or equal) sequence number, on a conflicting fork of the + /// same creator as `unit1`. + unit2: SignedWireUnit, + /// The predecessors of `unit2`, back to the same sequence number as `unit1`, in + /// reverse chronological order. + swimlane2: Vec>, + }, + } } +pub use relaxed::{Evidence, EvidenceDiscriminants}; impl Evidence { /// Returns the ID of the faulty validator. - pub(crate) fn perpetrator(&self) -> ValidatorIndex { + pub fn perpetrator(&self) -> ValidatorIndex { match self { Evidence::Equivocation(unit1, _) => unit1.wire_unit().creator, Evidence::Endorsements { endorsement1, .. } => endorsement1.validator_idx(), @@ -81,8 +95,8 @@ impl Evidence { /// "Validation" can mean different things for different type of evidence. /// /// - For an equivocation, it checks whether the creators, sequence numbers and instance IDs of - /// the two units are the same. - pub(crate) fn validate( + /// the two units are the same. + pub fn validate( &self, validators: &Validators, instance_id: &C::InstanceId, @@ -122,8 +136,8 @@ impl Evidence { instance_id, validators, )?; - if !C::verify_signature(&endorsement1.hash(), v_id, &endorsement1.signature()) - || !C::verify_signature(&endorsement2.hash(), v_id, &endorsement2.signature()) + if !C::verify_signature(&endorsement1.hash(), v_id, endorsement1.signature()) + || !C::verify_signature(&endorsement2.hash(), v_id, endorsement2.signature()) { return Err(EvidenceError::Signature); } @@ -163,3 +177,48 @@ impl Evidence { Ok(()) } } + +mod specimen_support { + + use crate::{ + components::consensus::ClContext, + utils::specimen::{ + estimator_max_rounds_per_era, largest_variant, vec_of_largest_specimen, Cache, + LargestSpecimen, SizeEstimator, + }, + }; + + use super::{Evidence, EvidenceDiscriminants}; + + impl LargestSpecimen for Evidence { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::(estimator, |variant| match variant + { + EvidenceDiscriminants::Equivocation => Evidence::Equivocation( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ), + EvidenceDiscriminants::Endorsements => { + if estimator.parameter_bool("endorsements_enabled") { + Evidence::Endorsements { + endorsement1: LargestSpecimen::largest_specimen(estimator, cache), + unit1: LargestSpecimen::largest_specimen(estimator, cache), + endorsement2: LargestSpecimen::largest_specimen(estimator, cache), + unit2: LargestSpecimen::largest_specimen(estimator, cache), + swimlane2: vec_of_largest_specimen( + estimator, + estimator_max_rounds_per_era(estimator), + cache, + ), + } + } else { + Evidence::Equivocation( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } + } + }) + } + } +} diff --git a/node/src/components/consensus/highway_core/finality_detector.rs b/node/src/components/consensus/highway_core/finality_detector.rs index 59ad4882f3..60b043e05b 100644 --- a/node/src/components/consensus/highway_core/finality_detector.rs +++ b/node/src/components/consensus/highway_core/finality_detector.rs @@ -1,22 +1,22 @@ +//! Functions for detecting finality of proposed blocks and calculating rewards. + mod horizon; -mod rewards; use std::iter; use datasize::DataSize; use tracing::{trace, warn}; -use crate::{ - components::consensus::{ - consensus_protocol::{FinalizedBlock, TerminalBlockData}, - highway_core::{ - highway::Highway, - state::{Observation, State, Unit, Weight}, - validators::ValidatorIndex, - }, - traits::Context, +use casper_types::Timestamp; + +use crate::components::consensus::{ + consensus_protocol::{FinalizedBlock, TerminalBlockData}, + highway_core::{ + highway::Highway, + state::{Observation, State, Unit}, }, - types::Timestamp, + traits::Context, + utils::{ValidatorIndex, Weight}, }; use horizon::Horizon; @@ -55,7 +55,9 @@ impl FinalityDetector { ) -> Result> + 'a, FttExceeded> { let state = highway.state(); let fault_w = state.faulty_weight(); - if fault_w >= self.ftt || fault_w > (state.total_weight() - Weight(1)) / 2 { + // TODO - remove `allow` once false positive ceases. + #[allow(clippy::arithmetic_side_effects)] // False positive on `/ 2`. + if fault_w >= self.ftt || fault_w > (state.total_weight().saturating_sub(Weight(1))) / 2 { warn!(panorama = ?state.panorama(), "fault tolerance threshold exceeded"); return Err(FttExceeded(fault_w)); } @@ -71,7 +73,7 @@ impl FinalityDetector { let finalized_block = FinalizedBlock { value: block.value.clone(), timestamp: unit.timestamp, - height: block.height, + relative_height: block.height, terminal_block_data, equivocators: unit.panorama.iter_faulty().map(to_id).collect(), proposer: to_id(unit.creator), @@ -119,13 +121,13 @@ impl FinalityDetector { let total_w = state.total_weight(); let quorum = self.quorum_for_lvl(target_lvl, total_w); let latest = state.panorama().iter().map(Observation::correct).collect(); - let sec0 = Horizon::level0(candidate, &state, &latest); + let sec0 = Horizon::level0(candidate, state, &latest); let horizons_iter = iter::successors(Some(sec0), |sec| sec.next(quorum)); horizons_iter.skip(1).take(target_lvl).count() } /// Returns the quorum required by a summit with the specified level and the required FTT. - #[allow(clippy::integer_arithmetic)] // See comments. + #[allow(clippy::arithmetic_side_effects)] // See comments. fn quorum_for_lvl(&self, lvl: usize, total_w: Weight) -> Weight { // A level-lvl summit with quorum total_w/2 + t has relative FTT 2t(1 − 1/2^lvl). So: // quorum = total_w / 2 + ftt / 2 / (1 - 1/2^lvl) @@ -140,20 +142,25 @@ impl FinalityDetector { let denominator = 2 * pow_lvl - 2; // The numerator is positive because ftt > 0. // Since this is a lower bound for the quorum, we round up when dividing. - Weight(((numerator + denominator - 1) / denominator) as u64) + Weight( + numerator + .div_ceil(denominator) + .try_into() + .expect("quorum overflow"), + ) } /// Returns the next candidate for finalization, i.e. the lowest block in the fork choice that /// has not been finalized yet. fn next_candidate<'a>(&self, state: &'a State) -> Option<&'a C::Hash> { let fork_choice = state.fork_choice(state.panorama())?; - state.find_ancestor(fork_choice, self.next_height(state)) + state.find_ancestor_proposal(fork_choice, self.next_height(state)) } /// Returns the height of the next block that will be finalized. fn next_height(&self, state: &State) -> u64 { // In a trillion years, we need to make block height u128. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] let height_plus_1 = |bhash| state.block(bhash).height + 1; self.last_finalized.as_ref().map_or(0, height_plus_1) } @@ -179,17 +186,15 @@ impl FinalityDetector { let to_id = |vidx: ValidatorIndex| highway.validators().id(vidx).unwrap().clone(); let state = highway.state(); - // Compute the rewards, and replace each validator index with the validator ID. - let rewards = rewards::compute_rewards(state, bhash); - let rewards_iter = rewards.enumerate(); - let rewards = rewards_iter.map(|(vidx, r)| (to_id(vidx), *r)).collect(); - // Report inactive validators, but only if they had sufficient time to create a unit, i.e. // if at least one maximum-length round passed between the first and last block. // Safe to unwrap: Ancestor at height 0 always exists. - let first_bhash = state.find_ancestor(bhash, 0).unwrap(); - let sufficient_time_for_activity = - unit.timestamp >= state.unit(first_bhash).timestamp + state.params().max_round_length(); + let first_bhash = state.find_ancestor_proposal(bhash, 0).unwrap(); + let sufficient_time_for_activity = unit.timestamp + >= state + .unit(first_bhash) + .timestamp + .saturating_add(state.params().max_round_length()); let inactive_validators = if sufficient_time_for_activity { unit.panorama.iter_none().map(to_id).collect() } else { @@ -197,13 +202,12 @@ impl FinalityDetector { }; TerminalBlockData { - rewards, inactive_validators, } } } -#[allow(unused_qualifications)] // This is to suppress warnings originating in the test macros. +#[allow(unused_qualifications, clippy::arithmetic_side_effects)] // This is to suppress warnings originating in the test macros. #[cfg(test)] mod tests { use super::{ diff --git a/node/src/components/consensus/highway_core/finality_detector/horizon.rs b/node/src/components/consensus/highway_core/finality_detector/horizon.rs index b595634422..9c017da034 100644 --- a/node/src/components/consensus/highway_core/finality_detector/horizon.rs +++ b/node/src/components/consensus/highway_core/finality_detector/horizon.rs @@ -1,11 +1,9 @@ use std::collections::BTreeSet; use crate::components::consensus::{ - highway_core::{ - state::{State, Unit, Weight}, - validators::{ValidatorIndex, ValidatorMap}, - }, + highway_core::state::{State, Unit}, traits::Context, + utils::{ValidatorIndex, ValidatorMap, Weight}, }; type Committee = Vec; @@ -13,7 +11,7 @@ type Committee = Vec; /// A list containing the earliest level-n messages of each member of some committee, for some n. /// /// A summit is a sequence of committees of validators, where each member of the level-n committee -/// has produced a unit that can see level-(n-1) units by a quorum of the level-n committe. +/// has produced a unit that can see level-(n-1) units by a quorum of the level-n committee. /// /// The level-n horizon maps each validator of a level-n committee to their earliest level-n unit. /// From a level-n horizon, the level-(n+1) committee and horizon can be computed. @@ -41,7 +39,9 @@ impl<'a, C: Context> Horizon<'a, C> { let to_lvl0unit = |&maybe_vhash: &Option<&'a C::Hash>| { state .swimlane(maybe_vhash?) - .take_while(|(_, unit)| state.find_ancestor(&unit.block, height) == Some(candidate)) + .take_while(|(_, unit)| { + state.find_ancestor_proposal(&unit.block, height) == Some(candidate) + }) .last() .map(|(_, unit)| unit.seq_number) }; @@ -97,6 +97,7 @@ impl<'a, C: Context> Horizon<'a, C> { /// /// Panics if a member of the committee is not in `self.latest`. This can never happen if the /// committee was computed from a `Horizon` that originated from the same `level0` as this one. + #[allow(dead_code)] pub(super) fn committee_quorum(&self, committee: &[ValidatorIndex]) -> Option { let seen_weight = |idx: &ValidatorIndex| { self.seen_weight(self.state.unit(self.latest[*idx].unwrap()), committee) @@ -110,7 +111,7 @@ impl<'a, C: Context> Horizon<'a, C> { let find_first_lvl_n = |idx: &ValidatorIndex| { self.state .swimlane(self.latest[*idx]?) - .take_while(|(_, unit)| self.seen_weight(unit, &committee) >= quorum) + .take_while(|(_, unit)| self.seen_weight(unit, committee) >= quorum) .last() .map(|(_, unit)| (*idx, unit.seq_number)) }; @@ -136,12 +137,12 @@ impl<'a, C: Context> Horizon<'a, C> { /// Returns whether `unit` can see `idx`'s unit in `self`, where `unit` is considered to see /// itself. fn can_see(&self, unit: &Unit, idx: ValidatorIndex) -> bool { - self.sequence_numbers[idx].map_or(false, |self_sn| { + self.sequence_numbers[idx].is_some_and(|self_sn| { if unit.creator == idx { unit.seq_number >= self_sn } else { let sees_self_sn = |vhash| self.state.unit(vhash).seq_number >= self_sn; - unit.panorama.get(idx).correct().map_or(false, sees_self_sn) + unit.panorama[idx].correct().is_some_and(sees_self_sn) } }) } diff --git a/node/src/components/consensus/highway_core/finality_detector/rewards.rs b/node/src/components/consensus/highway_core/finality_detector/rewards.rs deleted file mode 100644 index 0f922b946b..0000000000 --- a/node/src/components/consensus/highway_core/finality_detector/rewards.rs +++ /dev/null @@ -1,322 +0,0 @@ -use super::Horizon; -use crate::{ - components::consensus::{ - highway_core::{ - state::{Observation, Panorama, State, Weight}, - validators::ValidatorMap, - }, - traits::Context, - }, - types::Timestamp, -}; - -/// Returns the map of rewards to be paid out when the block `bhash` gets finalized. -/// -/// This is the sum of all rewards for finalization of ancestors of `bhash`, as seen from `bhash`. -pub(crate) fn compute_rewards(state: &State, bhash: &C::Hash) -> ValidatorMap { - // The unit that introduced the payout block. - let payout_unit = state.unit(bhash); - // The panorama of the payout block: Rewards must only use this panorama, since it defines - // what everyone who has the block can already see. - let panorama = &payout_unit.panorama; - let mut rewards = ValidatorMap::from(vec![0u64; panorama.len()]); - for proposal_hash in state.ancestor_hashes(bhash) { - for (vidx, r) in compute_rewards_for(state, panorama, proposal_hash).enumerate() { - match rewards[vidx].checked_add(*r) { - Some(sum) => rewards[vidx] = sum, - // Rewards should not overflow. We use one trillion for a block reward, so the full - // rewards for 18 million blocks fit into a u64. - None => panic!( - "rewards for {:?}, {} + {}, overflow u64", - vidx, rewards[vidx], r - ), - } - } - } - rewards -} - -/// Returns the rewards for finalizing the block with hash `proposal_h`. -fn compute_rewards_for( - state: &State, - panorama: &Panorama, - proposal_h: &C::Hash, -) -> ValidatorMap { - let proposal_unit = state.unit(proposal_h); - let r_id = proposal_unit.round_id(); - - // Only consider messages in round `r_id` for the summit. To compute the assigned weight, we - // also include validators who didn't send a message in that round, but were supposed to. - let mut assigned_weight = Weight(0); - let mut latest = ValidatorMap::from(vec![None; panorama.len()]); - for (idx, obs) in panorama.enumerate() { - match round_participation(state, obs, r_id) { - RoundParticipation::Unassigned => continue, - RoundParticipation::No => (), - RoundParticipation::Yes(latest_vh) => latest[idx] = Some(latest_vh), - } - assigned_weight += state.weight(idx); - } - - if assigned_weight.is_zero() { - return ValidatorMap::from(vec![0; latest.len()]); - } - - // Find all level-1 summits. For each validator, store the highest quorum it is a part of. - let horizon = Horizon::level0(proposal_h, state, &latest); - let (mut committee, _) = horizon.prune_committee(Weight(1), latest.keys_some().collect()); - let mut max_quorum = ValidatorMap::from(vec![Weight(0); latest.len()]); - while let Some(quorum) = horizon.committee_quorum(&committee) { - // The current committee is a level-1 summit with `quorum`. Try to go higher: - let (new_committee, pruned) = horizon.prune_committee(quorum + Weight(1), committee); - committee = new_committee; - // Pruned validators are not part of any summit with a higher quorum than this. - for vidx in pruned { - max_quorum[vidx] = quorum; - } - } - - let faulty_w: Weight = panorama.iter_faulty().map(|vidx| state.weight(vidx)).sum(); - - // Collect the block rewards for each validator who is a member of at least one summit. - #[allow(clippy::integer_arithmetic)] // See inline comments. - max_quorum - .iter() - .zip(state.weights()) - .map(|(quorum, weight)| { - // If the summit's quorum was not enough to finalize the block, rewards are reduced. - // A level-1 summit with quorum q has FTT q - 50%, so we need q - 50% > f. - let finality_factor = if *quorum > (state.total_weight() / 2).saturating_add(faulty_w) { - state.params().block_reward() - } else { - state.params().reduced_block_reward() - }; - // Rewards are proportional to the quorum and to the validator's weight. - // Since quorum <= assigned_weight and weight <= total_weight, this won't overflow. - (u128::from(finality_factor) * u128::from(*quorum) / u128::from(assigned_weight) - * u128::from(*weight) - / u128::from(state.total_weight())) as u64 - }) - .collect() -} - -/// Information about how a validator participated in a particular round. -#[derive(Debug, PartialEq)] -enum RoundParticipation<'a, C: Context> { - /// The validator was not assigned: The round ID was not the beginning of one of their rounds. - Unassigned, - /// The validator was assigned but did not create any messages in that round. - No, - /// The validator participated, and this is their latest message in that round. - Yes(&'a C::Hash), -} - -/// Returns information about the participation of a validator with `obs` in round `r_id`. -fn round_participation<'a, C: Context>( - state: &'a State, - obs: &'a Observation, - r_id: Timestamp, -) -> RoundParticipation<'a, C> { - // Find the validator's latest unit in or before round `r_id`. - let maybe_unit = match obs { - Observation::Faulty => return RoundParticipation::Unassigned, - Observation::None => return RoundParticipation::No, - Observation::Correct(latest_vh) => state - .swimlane(latest_vh) - .find(|&(_, unit)| unit.round_id() <= r_id), - }; - maybe_unit.map_or(RoundParticipation::No, |(vh, unit)| { - if unit.round_exp > r_id.trailing_zeros() { - // Round length doesn't divide `r_id`, so the validator was not assigned to that round. - RoundParticipation::Unassigned - } else if unit.timestamp < r_id { - // The latest unit in or before `r_id` was before `r_id`, so they didn't participate. - RoundParticipation::No - } else { - RoundParticipation::Yes(vh) - } - }) -} - -#[allow(unused_qualifications)] // This is to suppress warnings originating in the test macros. -#[allow(clippy::integer_arithmetic)] // Overflows in tests would panic anyway. -#[cfg(test)] -mod tests { - use super::*; - use crate::components::consensus::highway_core::{ - highway_testing::{TEST_BLOCK_REWARD, TEST_ENDORSEMENT_EVIDENCE_LIMIT}, - state::{tests::*, Params}, - validators::ValidatorMap, - }; - - #[test] - fn round_participation_test() -> Result<(), AddUnitError> { - let mut state = State::new_test(&[Weight(5)], 0); // Alice is the only validator. - - // Round ID 0, length 32: Alice participates. - let p0 = add_unit!(state, ALICE, 0, 5u8, 0x1; N)?; // Proposal - let w0 = add_unit!(state, ALICE, 20, 5u8, None; p0)?; // Witness - - // Round ID 32, length 32: Alice partially participates. - let w32 = add_unit!(state, ALICE, 52, 5u8, None; w0)?; // Witness - - // Round ID 64, length 32: Alice doesn't participate. - - // Round ID 96, length 16: Alice participates. - let p96 = add_unit!(state, ALICE, 96, 4u8, 0x2; w32)?; - let w96 = add_unit!(state, ALICE, 106, 4u8, None; p96)?; - - let obs = Observation::Correct(w96); - let rp = |time: u64| round_participation(&state, &obs, Timestamp::from(time)); - assert_eq!(RoundParticipation::Yes(&w0), rp(0)); - assert_eq!(RoundParticipation::Unassigned, rp(16)); - assert_eq!(RoundParticipation::Yes(&w32), rp(32)); - assert_eq!(RoundParticipation::No, rp(64)); - assert_eq!(RoundParticipation::Unassigned, rp(80)); - assert_eq!(RoundParticipation::Yes(&w96), rp(96)); - assert_eq!(RoundParticipation::Unassigned, rp(106)); - assert_eq!(RoundParticipation::No, rp(112)); - Ok(()) - } - - // To keep the form of the reward formula, we spell out Carol's weight 1. - #[allow(clippy::identity_op)] - #[test] - fn compute_rewards_test() -> Result<(), AddUnitError> { - const ALICE_W: u64 = 4; - const BOB_W: u64 = 5; - const CAROL_W: u64 = 1; - const ALICE_BOB_W: u64 = ALICE_W + BOB_W; - const ALICE_CAROL_W: u64 = ALICE_W + CAROL_W; - const BOB_CAROL_W: u64 = BOB_W + CAROL_W; - - let params = Params::new( - 0, - TEST_BLOCK_REWARD, - TEST_BLOCK_REWARD / 5, - 3, - 19, - 3, - u64::MAX, - Timestamp::zero(), - Timestamp::from(u64::MAX), - TEST_ENDORSEMENT_EVIDENCE_LIMIT, - ); - let weights = &[Weight(ALICE_W), Weight(BOB_W), Weight(CAROL_W)]; - let mut state = State::new(weights, params, vec![]); - let total_weight = state.total_weight().0; - - // Round 0: Alice has round length 16, Bob and Carol 8. - // Bob and Alice cite each other, creating a summit with quorum 9. - // Carol only cites Bob, so she's only part of a quorum-6 summit. - assert_eq!(BOB, state.leader(0.into())); - let bp0 = add_unit!(state, BOB, 0, 3u8, 0xB00; N, N, N)?; - let ac0 = add_unit!(state, ALICE, 1, 4u8, None; N, bp0, N)?; - let cc0 = add_unit!(state, CAROL, 1, 3u8, None; N, bp0, N)?; - let bw0 = add_unit!(state, BOB, 5, 3u8, None; ac0, bp0, cc0)?; - let cw0 = add_unit!(state, CAROL, 5, 3u8, None; N, bp0, cc0)?; - let aw0 = add_unit!(state, ALICE, 10, 4u8, None; ac0, bp0, N)?; - - let assigned = total_weight; // Everyone is assigned to round 0. - let rewards0 = ValidatorMap::from(vec![ - TEST_BLOCK_REWARD * ALICE_BOB_W * ALICE_W / (assigned * total_weight), - TEST_BLOCK_REWARD * ALICE_BOB_W * BOB_W / (assigned * total_weight), - TEST_BLOCK_REWARD * BOB_CAROL_W * CAROL_W / (assigned * total_weight), - ]); - - // Round 8: Alice is not assigned (length 16). Bob and Carol make a summit. - assert_eq!(BOB, state.leader(8.into())); - let bp8 = add_unit!(state, BOB, 8, 3u8, 0xB08; ac0, bw0, cw0)?; - let cc8 = add_unit!(state, CAROL, 9, 3u8, None; ac0, bp8, cw0)?; - let bw8 = add_unit!(state, BOB, 13, 3u8, None; aw0, bp8, cc8)?; - let cw8 = add_unit!(state, CAROL, 13, 3u8, None; aw0, bp8, cc8)?; - - let assigned = BOB_CAROL_W; // Alice has round length 16, so she's not in round 8. - let rewards8 = ValidatorMap::from(vec![ - 0, - TEST_BLOCK_REWARD * BOB_CAROL_W * BOB_W / (assigned * total_weight), - TEST_BLOCK_REWARD * BOB_CAROL_W * CAROL_W / (assigned * total_weight), - ]); - - // Round 16: Carol slows down (length 16). Alice and Bob finalize with quorum 9. - // Carol cites only Alice and herself, so she's only in the non-finalizing quorum-5 summit. - assert_eq!(ALICE, state.leader(16.into())); - let ap16 = add_unit!(state, ALICE, 16, 4u8, 0xA16; aw0, bw8, cw8)?; - let bc16 = add_unit!(state, BOB, 17, 3u8, None; ap16, bw8, cw8)?; - let cc16 = add_unit!(state, CAROL, 17, 4u8, None; ap16, bw8, cw8)?; - let bw16 = add_unit!(state, BOB, 19, 3u8, None; ap16, bc16, cw8)?; - let aw16 = add_unit!(state, ALICE, 26, 4u8, None; ap16, bc16, cc16)?; - let cw16 = add_unit!(state, CAROL, 26, 4u8, None; ap16, bw8, cc16)?; - - let assigned = total_weight; // Everyone is assigned. - let reduced_reward = state.params().reduced_block_reward(); - let rewards16 = ValidatorMap::from(vec![ - TEST_BLOCK_REWARD * ALICE_BOB_W * ALICE_W / (assigned * total_weight), - TEST_BLOCK_REWARD * ALICE_BOB_W * BOB_W / (assigned * total_weight), - reduced_reward * ALICE_CAROL_W * CAROL_W / (assigned * total_weight), - ]); - - // Produce a block that can see all three rounds but doesn't see Carol equivocate. - let ap_last = add_unit!(state, ALICE, 0x0; aw16, bw16, cw16)?; - - // Alice's round-16 block can see the first two rounds. - let expected = ValidatorMap::from(vec![ - rewards0[ALICE] + rewards8[ALICE], - rewards0[BOB] + rewards8[BOB], - rewards0[CAROL] + rewards8[CAROL], - ]); - assert_eq!(expected, compute_rewards(&state, &ap16)); - - // Her next block can also see round 16. - let expected = ValidatorMap::from(vec![ - rewards0[ALICE] + rewards8[ALICE] + rewards16[ALICE], - rewards0[BOB] + rewards8[BOB] + rewards16[BOB], - rewards0[CAROL] + rewards8[CAROL] + rewards16[CAROL], - ]); - let pan = &state.unit(&ap_last).panorama; - assert_eq!(rewards0, compute_rewards_for(&state, &pan, &bp0)); - assert_eq!(rewards8, compute_rewards_for(&state, &pan, &bp8)); - assert_eq!(rewards16, compute_rewards_for(&state, &pan, &ap16)); - assert_eq!(expected, compute_rewards(&state, &ap_last)); - - // However, Carol also equivocated in round 16. And Bob saw her! - let _cw16e = add_unit!(state, CAROL, 26, 4u8, None; ap16, bc16, cc16)?; - let bp_last = add_unit!(state, ALICE, 0x0; aw16, bw16, F)?; - - let assigned = ALICE_BOB_W; // Carol is unassigned if she is seen as faulty. - let rewards0f = ValidatorMap::from(vec![ - TEST_BLOCK_REWARD * ALICE_BOB_W * ALICE_W / (assigned * total_weight), - TEST_BLOCK_REWARD * ALICE_BOB_W * BOB_W / (assigned * total_weight), - 0, - ]); - - // Bob alone has only 50% of the weight. Not enough to finalize, so only reduced reward. - let assigned = BOB_W; // Alice has round length 16 and Carol is faulty. - let rewards8f = ValidatorMap::from(vec![ - 0, - reduced_reward * BOB_W * BOB_W / (assigned * total_weight), - 0, - ]); - - let assigned = ALICE_BOB_W; // Carol is unassigned if she is seen as faulty. - let rewards16f = ValidatorMap::from(vec![ - TEST_BLOCK_REWARD * ALICE_BOB_W * ALICE_W / (assigned * total_weight), - TEST_BLOCK_REWARD * ALICE_BOB_W * BOB_W / (assigned * total_weight), - 0, - ]); - - // Bob's block sees Carol as faulty. - let expected = ValidatorMap::from(vec![ - rewards0f[ALICE] + rewards8f[ALICE] + rewards16f[ALICE], - rewards0f[BOB] + rewards8f[BOB] + rewards16f[BOB], - rewards0f[CAROL] + rewards8f[CAROL] + rewards16f[CAROL], - ]); - let pan = &state.unit(&bp_last).panorama; - assert_eq!(rewards0f, compute_rewards_for(&state, &pan, &bp0)); - assert_eq!(rewards8f, compute_rewards_for(&state, &pan, &bp8)); - assert_eq!(rewards16f, compute_rewards_for(&state, &pan, &ap16)); - assert_eq!(expected, compute_rewards(&state, &bp_last)); - - Ok(()) - } -} diff --git a/node/src/components/consensus/highway_core/highway.rs b/node/src/components/consensus/highway_core/highway.rs index aa17c5fb4a..4b657eb02f 100644 --- a/node/src/components/consensus/highway_core/highway.rs +++ b/node/src/components/consensus/highway_core/highway.rs @@ -1,34 +1,38 @@ +//! The implementation of the Highway consensus protocol. + mod vertex; pub(crate) use crate::components::consensus::highway_core::state::Params; -pub(crate) use vertex::{ +pub use vertex::{ Dependency, Endorsements, HashedWireUnit, Ping, SignedWireUnit, Vertex, WireUnit, }; use std::path::PathBuf; use datasize::DataSize; +use serde::{Deserialize, Serialize}; use thiserror::Error; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info, trace, warn}; -use crate::{ - components::consensus::{ - consensus_protocol::BlockContext, - highway_core::{ - active_validator::{ActiveValidator, Effect}, - evidence::EvidenceError, - state::{Fault, State, UnitError, Weight}, - validators::{Validator, Validators}, - }, - traits::Context, +use casper_types::{TimeDiff, Timestamp}; + +use crate::components::consensus::{ + consensus_protocol::BlockContext, + highway_core::{ + active_validator::{ActiveValidator, Effect}, + endorsement::{Endorsement, EndorsementError}, + evidence::{Evidence, EvidenceError}, + state::{Fault, Observation, State, UnitError}, + }, + traits::Context, + utils::{ + wal::{ReadWal, WalEntry, WriteWal}, + Validator, ValidatorIndex, Validators, Weight, }, - types::{TimeDiff, Timestamp}, }; -use super::{ - endorsement::{Endorsement, EndorsementError}, - evidence::Evidence, -}; +/// If a lot of rounds were skipped between two blocks, log at most this many. +const MAX_SKIPPED_PROPOSAL_LOGS: u64 = 10; /// An error due to an invalid vertex. #[derive(Debug, Error, PartialEq)] @@ -44,7 +48,7 @@ pub(crate) enum VertexError { } /// An error due to an invalid ping. -#[derive(Debug, Error, PartialEq)] +#[derive(Debug, Error, Eq, PartialEq)] pub(crate) enum PingError { #[error("The creator is not a validator.")] Creator, @@ -102,8 +106,12 @@ impl From> for Vertex { /// /// Note that this must only be added to the `Highway` instance that created it. Can cause a panic /// or inconsistent state otherwise. -#[derive(Clone, DataSize, Debug, Eq, PartialEq)] -pub(crate) struct ValidVertex(pub(super) Vertex) +#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) struct ValidVertex(pub(crate) Vertex) where C: Context; @@ -134,7 +142,19 @@ pub(crate) enum GetDepOutcome { Evidence(C::ValidatorId), } -/// A passive instance of the Highway protocol, containing its local state. +#[derive(Serialize, Deserialize, Debug, PartialEq)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +struct HighwayWalEntry { + vertex: ValidVertex, + timestamp: Timestamp, +} + +impl WalEntry for HighwayWalEntry {} + +/// An instance of the Highway protocol, containing its local state. /// /// Both observers and active validators must instantiate this, pass in all incoming vertices from /// peers, and use a [FinalityDetector](../finality_detector/struct.FinalityDetector.html) to @@ -152,6 +172,8 @@ where state: State, /// The state of an active validator, who is participating and creating new vertices. active_validator: Option>, + /// The path to the protocol state file. + write_wal: Option>>, } impl Highway { @@ -167,16 +189,65 @@ impl Highway { instance_id: C::InstanceId, validators: Validators, params: Params, + protocol_state_file: Option, ) -> Highway { info!(%validators, instance=%instance_id, "creating Highway instance"); let weights = validators.iter().map(Validator::weight); let banned = validators.iter_banned_idx(); - let state = State::new(weights, params, banned); - Highway { + let cannot_propose = validators.iter_cannot_propose_idx(); + let state = State::new(weights, params, banned, cannot_propose); + let (write_wal, entries) = if let Some(protocol_state_file) = protocol_state_file.as_ref() { + let entries = Self::read_stored_vertices(protocol_state_file); + let write_wal = match WriteWal::>::new(protocol_state_file) { + Ok(wal) => Some(wal), + Err(err) => { + panic!("couldn't open WriteWal: {}", err); + } + }; + (write_wal, entries) + } else { + (None, vec![]) + }; + let mut result = Highway { instance_id, validators, state, active_validator: None, + write_wal, + }; + result.restore_state(entries); + result + } + + fn read_stored_vertices(protocol_state_file: &PathBuf) -> Vec> { + let mut read_wal = match ReadWal::>::new(protocol_state_file) { + Ok(wal) => wal, + Err(err) => { + panic!("couldn't open ReadWal: {}", err); + } + }; + let mut entries = vec![]; + loop { + match read_wal.read_next_entry() { + Ok(Some(entry)) => { + entries.push(entry); + } + Ok(None) => { + break; + } + Err(err) => { + panic!("error while reading ReadWal: {}", err); + } + } + } + entries + } + + fn restore_state(&mut self, entries: Vec>) { + for entry in entries { + // we can safely ignore the effects - they were properly processed when persisting the + // vertex + self.add_valid_vertex(entry.vertex, entry.timestamp); } } @@ -189,7 +260,7 @@ impl Highway { id: C::ValidatorId, secret: C::ValidatorSecret, current_time: Timestamp, - unit_hash_file: Option, + _unit_hash_file: Option, target_ftt: Weight, ) -> Vec> { if self.active_validator.is_some() { @@ -210,31 +281,22 @@ impl Highway { current_time, start_time, &self.state, - unit_hash_file, target_ftt, self.instance_id, ); self.active_validator = Some(av); - effects + self.add_new_own_vertices(effects, current_time) } /// Turns this instance into a passive observer, that does not create any new vertices. pub(crate) fn deactivate_validator(&mut self) { - if let Some(av) = self.active_validator.take() { - match av.cleanup() { - Ok(_) => {} - Err(err) => warn!( - ?err, - "error occurred when cleaning up active validator state" - ), - } - } + self.active_validator = None; } - /// Switches the active validator to a new round exponent. - pub(crate) fn set_round_exp(&mut self, new_round_exp: u8) { + /// Switches the active validator to a new round length. + pub(crate) fn set_round_len(&mut self, new_round_len: TimeDiff) { if let Some(ref mut av) = self.active_validator { - av.set_round_exp(new_round_exp); + av.set_round_len(new_round_len); } } @@ -252,7 +314,7 @@ impl Highway { /// Returns the next missing dependency, or `None` if all dependencies of `pvv` are satisfied. /// /// If this returns `None`, `validate_vertex` can be called. - pub(crate) fn missing_dependency(&self, pvv: &PreValidatedVertex) -> Option> { + pub(super) fn missing_dependency(&self, pvv: &PreValidatedVertex) -> Option> { match pvv.inner() { Vertex::Evidence(_) | Vertex::Ping(_) => None, Vertex::Endorsements(endorsements) => { @@ -299,6 +361,15 @@ impl Highway { now: Timestamp, ) -> Vec> { if !self.has_vertex(&vertex) { + if let Some(ref mut wal) = self.write_wal { + let entry = HighwayWalEntry { + vertex: ValidVertex(vertex.clone()), + timestamp: now, + }; + if let Err(err) = wal.record_entry(&entry) { + error!("error recording entry: {}", err); + } + } match vertex { Vertex::Unit(unit) => self.add_valid_unit(unit, now), Vertex::Evidence(evidence) => self.add_evidence(evidence), @@ -328,7 +399,7 @@ impl Highway { pub(crate) fn has_evidence(&self, vid: &C::ValidatorId) -> bool { self.validators .get_index(vid) - .map_or(false, |vidx| self.state.has_evidence(vidx)) + .is_some_and(|vidx| self.state.has_evidence(vidx)) } /// Marks the given validator as faulty, if it exists. @@ -376,13 +447,44 @@ impl Highway { } } + /// Returns a vertex by a validator with the requested sequence number. + pub(crate) fn get_dependency_by_index( + &self, + vid: ValidatorIndex, + unit_seq: u64, + ) -> GetDepOutcome { + let obs = match self.state.panorama().get(vid) { + Some(obs) => obs, + None => return GetDepOutcome::None, + }; + match obs { + Observation::None => GetDepOutcome::None, + Observation::Faulty => match self.state.maybe_fault(vid) { + None | Some(Fault::Banned) => GetDepOutcome::None, + Some(Fault::Direct(ev)) => { + GetDepOutcome::Vertex(ValidVertex(Vertex::Evidence(ev.clone()))) + } + Some(Fault::Indirect) => match self.validators.id(vid) { + Some(vid) => GetDepOutcome::Evidence(vid.clone()), + None => GetDepOutcome::None, + }, + }, + Observation::Correct(last_seen) => self + .state + .find_in_swimlane(last_seen, unit_seq) + .and_then(|req_hash| self.state.wire_unit(req_hash, self.instance_id)) + .map(|swunit| GetDepOutcome::Vertex(ValidVertex(Vertex::Unit(swunit)))) + .unwrap_or_else(|| GetDepOutcome::None), + } + } + pub(crate) fn handle_timer(&mut self, timestamp: Timestamp) -> Vec> { let instance_id = self.instance_id; // Here we just use the timer's timestamp, and assume it's ~ Timestamp::now() // // This is because proposal units, i.e. new blocks, are - // supposed to thave the exact timestamp that matches the + // supposed to have the exact timestamp that matches the // beginning of the round (which we use as the "round ID"). // // But at least any discrepancy here can only come from event @@ -402,7 +504,7 @@ impl Highway { pub(crate) fn propose( &mut self, value: C::ConsensusValue, - block_context: BlockContext, + block_context: BlockContext, ) -> Vec> { let instance_id = self.instance_id; @@ -428,7 +530,7 @@ impl Highway { timestamp, ) .unwrap_or_else(|| { - debug!("ignoring `propose` event: validator has been deactivated"); + warn!("ignoring `propose` event: validator has been deactivated"); vec![] }) } @@ -504,18 +606,30 @@ impl Highway { F: FnOnce(&mut ActiveValidator, &State) -> Vec>, { let effects = f(self.active_validator.as_mut()?, &self.state); - let mut result = vec![]; + Some(self.add_new_own_vertices(effects, timestamp)) + } + + /// Handles all `NewVertex` effects and adds the vertices to the protocol state. + /// + /// This needs to be applied to all effects created by `ActiveValidator`, so that new vertices + /// are not interpreted as coming from a doppelgänger. + fn add_new_own_vertices( + &mut self, + effects: Vec>, + timestamp: Timestamp, + ) -> Vec> { + let mut result = Vec::with_capacity(effects.len()); for effect in &effects { match effect { Effect::NewVertex(vv) => { result.extend(self.add_valid_vertex(vv.clone(), timestamp)) } Effect::WeAreFaulty(_) => self.deactivate_validator(), - Effect::ScheduleTimer(_) | Effect::RequestNewBlock { .. } => (), + Effect::ScheduleTimer(_) | Effect::RequestNewBlock(_, _) => (), } } result.extend(effects); - Some(result) + result } /// Performs initial validation and returns an error if `vertex` is invalid. (See @@ -550,7 +664,7 @@ impl Highway { return Err(EndorsementError::Banned.into()); } let endorsement: Endorsement = Endorsement::new(unit, *creator); - if !C::verify_signature(&endorsement.hash(), v_id, &signature) { + if !C::verify_signature(&endorsement.hash(), v_id, signature) { return Err(EndorsementError::Signature.into()); } } @@ -588,6 +702,7 @@ impl Highway { let creator = swunit.wire_unit().creator; let was_honest = !self.state.is_faulty(creator); self.state.add_valid_unit(swunit); + self.log_if_missing_proposal(&unit_hash); let mut evidence_effects = self .state .maybe_evidence(creator) @@ -601,30 +716,9 @@ impl Highway { }) .unwrap_or_default(); evidence_effects.extend(self.on_new_unit(&unit_hash, now)); - evidence_effects.extend(self.add_own_last_unit(now)); evidence_effects } - /// If validator's protocol state is synchronized, adds its own last unit (if any) to the - /// protocol state - fn add_own_last_unit(&mut self, now: Timestamp) -> Vec> { - self.map_active_validator( - |av, state| { - if av.is_own_last_unit_panorama_sync(state) { - if let Some(own_last_unit) = av.take_own_last_unit() { - vec![Effect::NewVertex(ValidVertex(Vertex::Unit(own_last_unit)))] - } else { - vec![] - } - } else { - vec![] - } - }, - now, - ) - .unwrap_or_default() - } - /// Adds endorsements to the state. If there are conflicting endorsements, `NewVertex` effects /// are returned containing evidence to prove them faulty. fn add_endorsements(&mut self, endorsements: Endorsements) -> Vec> { @@ -649,7 +743,7 @@ impl Highway { pub(crate) fn is_doppelganger_vertex(&self, vertex: &Vertex) -> bool { self.active_validator .as_ref() - .map_or(false, |av| av.is_doppelganger_vertex(vertex, &self.state)) + .is_some_and(|av| av.is_doppelganger_vertex(vertex, &self.state)) } /// Returns whether this instance of protocol is an active validator. @@ -667,27 +761,83 @@ impl Highway { .as_ref() .map(|av| av.next_round_length()) } + + /// Logs a message if this is a block and any previous blocks were skipped. + fn log_if_missing_proposal(&self, unit_hash: &C::Hash) { + let state = &self.state; + let unit = state.unit(unit_hash); + let r_id = unit.round_id(); + if unit.timestamp != r_id + || unit.block != *unit_hash + || state.leader(r_id) != unit.creator + || state.is_faulty(unit.creator) + { + return; // Not a block by an honest validator. (Don't let faulty validators spam logs.) + } + + // Iterate over all rounds since the parent — or since the start time, if there is none. + let parent_timestamp = if let Some(parent_hash) = state.block(unit_hash).parent() { + state.unit(parent_hash).timestamp + } else { + state.params().start_timestamp() + }; + for skipped_r_id in (1..=MAX_SKIPPED_PROPOSAL_LOGS) + .filter_map(|i| r_id.checked_sub(state.params().min_round_length().checked_mul(i)?)) + .take_while(|skipped_r_id| *skipped_r_id > parent_timestamp) + { + let leader_index = state.leader(skipped_r_id); + let leader_id = match self.validators.id(leader_index) { + None => { + error!(?leader_index, "missing leader validator ID"); + return; + } + Some(leader_id) => leader_id, + }; + if state.is_faulty(leader_index) { + trace!( + ?leader_index, %leader_id, round_id = %skipped_r_id, + "missing proposal: faulty leader was skipped", + ); + } else { + let reason = state.panorama()[leader_index] + .correct() + .and_then(|leader_hash| { + state + .swimlane(leader_hash) + .find(|(_, unit)| unit.timestamp <= skipped_r_id) + .filter(|(_, unit)| unit.timestamp == skipped_r_id) + }) + .map_or("the leader missed their turn", |_| { + "the leader's proposal got orphaned" + }); + info!( + ?leader_index, %leader_id, round_id = %skipped_r_id, + "missing proposal: {}", reason, + ); + } + } + } } #[cfg(test)] +#[allow(clippy::arithmetic_side_effects)] pub(crate) mod tests { use std::{collections::BTreeSet, iter::FromIterator}; - use crate::{ - components::consensus::{ - highway_core::{ - evidence::{Evidence, EvidenceError}, - highway::{ - vertex::Ping, Dependency, Highway, SignedWireUnit, UnitError, Vertex, - VertexError, WireUnit, - }, - highway_testing::TEST_INSTANCE_ID, - state::{tests::*, Panorama, State}, - validators::Validators, + use casper_types::Timestamp; + + use crate::components::consensus::{ + highway_core::{ + evidence::{Evidence, EvidenceError}, + highway::{ + vertex::Ping, Dependency, Highway, SignedWireUnit, UnitError, Vertex, VertexError, + WireUnit, }, - traits::ValidatorSecret, + highway_testing::TEST_INSTANCE_ID, + state::{tests::*, Panorama, State}, }, - types::Timestamp, + traits::ValidatorSecret, + utils::Validators, }; pub(crate) fn test_validators() -> Validators { @@ -712,6 +862,7 @@ pub(crate) mod tests { validators: test_validators(), state, active_validator: None, + write_wal: None, }; let wunit = WireUnit { panorama: Panorama::new(WEIGHTS.len()), @@ -769,6 +920,7 @@ pub(crate) mod tests { validators: test_validators(), state: State::new_test(WEIGHTS, 0), active_validator: None, + write_wal: None, }; let vertex_end_a = Vertex::Endorsements(end_a); @@ -821,6 +973,7 @@ pub(crate) mod tests { validators: test_validators(), state, active_validator: None, + write_wal: None, }; let validate = |wunit0: &WireUnit, @@ -919,6 +1072,7 @@ pub(crate) mod tests { validators: test_validators(), state, active_validator: None, + write_wal: None, }; // Ping by validator that is not bonded, with an index that is outside of boundaries of the @@ -930,6 +1084,30 @@ pub(crate) mod tests { "should use validator that is not bonded" ); // Verify that sending a Ping from a non-existing validator does not panic. - assert_eq!(highway.has_vertex(&ping), false); + assert!(!highway.has_vertex(&ping)); + } + + #[test] + fn own_initial_ping_is_not_from_doppelganger() { + let now: Timestamp = 500.into(); + let later = 501.into(); + + let state: State = State::new_test(WEIGHTS, 0); + let target_ftt = state.total_weight() / 3; + let mut highway = Highway { + instance_id: TEST_INSTANCE_ID, + validators: test_validators(), + state, + active_validator: None, + write_wal: None, + }; + + let _effects = + highway.activate_validator(ALICE.0, ALICE_SEC.clone(), now, None, target_ftt); + + let ping = Vertex::Ping(Ping::new(ALICE, now, TEST_INSTANCE_ID, &ALICE_SEC)); + assert!(!highway.is_doppelganger_vertex(&ping)); + let ping = Vertex::Ping(Ping::new(ALICE, later, TEST_INSTANCE_ID, &ALICE_SEC)); + assert!(highway.is_doppelganger_vertex(&ping)); } } diff --git a/node/src/components/consensus/highway_core/highway/vertex.rs b/node/src/components/consensus/highway_core/highway/vertex.rs index 9670fc2e8c..59fb9aeec3 100644 --- a/node/src/components/consensus/highway_core/highway/vertex.rs +++ b/node/src/components/consensus/highway_core/highway/vertex.rs @@ -3,61 +3,101 @@ use std::{collections::BTreeSet, fmt::Debug}; use datasize::DataSize; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::{ - components::consensus::{ - highway_core::{ - endorsement::SignedEndorsement, - evidence::Evidence, - highway::{PingError, VertexError}, - state::{self, Panorama}, - validators::{ValidatorIndex, Validators}, - }, - traits::{Context, ValidatorSecret}, +use casper_types::Timestamp; + +use crate::components::consensus::{ + highway_core::{ + endorsement::SignedEndorsement, + highway::{PingError, VertexError}, + state::Panorama, }, - types::Timestamp, + traits::{Context, ValidatorSecret}, + utils::{ValidatorIndex, Validators}, }; -/// A dependency of a `Vertex` that can be satisfied by one or more other vertices. -#[derive(Clone, DataSize, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[serde(bound( - serialize = "C::Hash: Serialize", - deserialize = "C::Hash: Deserialize<'de>", -))] -pub(crate) enum Dependency -where - C: Context, -{ - Unit(C::Hash), - Evidence(ValidatorIndex), - Endorsement(C::Hash), - Ping(ValidatorIndex, Timestamp), +#[allow(clippy::arithmetic_side_effects)] +mod relaxed { + // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the + // module-wide `clippy::arithmetic_side_effects` lint. + + use casper_types::Timestamp; + use datasize::DataSize; + use serde::{Deserialize, Serialize}; + use strum::EnumDiscriminants; + + use crate::components::consensus::{ + highway_core::evidence::Evidence, traits::Context, utils::ValidatorIndex, + }; + + use super::{Endorsements, Ping, SignedWireUnit}; + + /// A dependency of a `Vertex` that can be satisfied by one or more other vertices. + #[derive( + DataSize, + Clone, + Debug, + Eq, + PartialEq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + EnumDiscriminants, + )] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub enum Dependency + where + C: Context, + { + /// The hash of a unit. + Unit(C::Hash), + /// The index of the validator against which evidence is needed. + Evidence(ValidatorIndex), + /// The hash of the unit to be endorsed. + Endorsement(C::Hash), + /// The ping by a particular validator for a particular timestamp. + Ping(ValidatorIndex, Timestamp), + } + + /// An element of the protocol state, that might depend on other elements. + /// + /// It is the vertex in a directed acyclic graph, whose edges are dependencies. + #[derive( + DataSize, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash, EnumDiscriminants, + )] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub enum Vertex + where + C: Context, + { + /// A signed unit of the consensus DAG. + Unit(SignedWireUnit), + /// Evidence of a validator's transgression. + Evidence(Evidence), + /// Endorsements for a unit. + Endorsements(Endorsements), + /// A ping conveying the activity of its creator. + Ping(Ping), + } } +pub use relaxed::{Dependency, DependencyDiscriminants, Vertex, VertexDiscriminants}; impl Dependency { /// Returns whether this identifies a unit, as opposed to other types of vertices. - pub(crate) fn is_unit(&self) -> bool { + pub fn is_unit(&self) -> bool { matches!(self, Dependency::Unit(_)) } } -/// An element of the protocol state, that might depend on other elements. -/// -/// It is the vertex in a directed acyclic graph, whose edges are dependencies. -#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] -#[serde(bound( - serialize = "C::Hash: Serialize", - deserialize = "C::Hash: Deserialize<'de>", -))] -pub(crate) enum Vertex -where - C: Context, -{ - Unit(SignedWireUnit), - Evidence(Evidence), - Endorsements(Endorsements), - Ping(Ping), -} - impl Vertex { /// Returns the consensus value mentioned in this vertex, if any. /// @@ -65,7 +105,7 @@ impl Vertex { /// `C::ConsensusValue` is a transaction, it should be validated first (correct signature, /// structure, gas limit, etc.). If it is a hash of a transaction, the transaction should be /// obtained _and_ validated. Only after that, the vertex can be considered valid. - pub(crate) fn value(&self) -> Option<&C::ConsensusValue> { + pub fn value(&self) -> Option<&C::ConsensusValue> { match self { Vertex::Unit(swunit) => swunit.wire_unit().value.as_ref(), Vertex::Evidence(_) | Vertex::Endorsements(_) | Vertex::Ping(_) => None, @@ -73,7 +113,7 @@ impl Vertex { } /// Returns the unit hash of this vertex (if it is a unit). - pub(crate) fn unit_hash(&self) -> Option { + pub fn unit_hash(&self) -> Option { match self { Vertex::Unit(swunit) => Some(swunit.hash()), Vertex::Evidence(_) | Vertex::Endorsements(_) | Vertex::Ping(_) => None, @@ -81,7 +121,7 @@ impl Vertex { } /// Returns the seq number of this vertex (if it is a unit). - pub(crate) fn unit_seq_number(&self) -> Option { + pub fn unit_seq_number(&self) -> Option { match self { Vertex::Unit(swunit) => Some(swunit.wire_unit().seq_number), _ => None, @@ -89,12 +129,12 @@ impl Vertex { } /// Returns whether this is evidence, as opposed to other types of vertices. - pub(crate) fn is_evidence(&self) -> bool { + pub fn is_evidence(&self) -> bool { matches!(self, Vertex::Evidence(_)) } /// Returns a `Timestamp` provided the vertex is a `Vertex::Unit` or `Vertex::Ping`. - pub(crate) fn timestamp(&self) -> Option { + pub fn timestamp(&self) -> Option { match self { Vertex::Unit(signed_wire_unit) => Some(signed_wire_unit.wire_unit().timestamp), Vertex::Ping(ping) => Some(ping.timestamp()), @@ -102,7 +142,8 @@ impl Vertex { } } - pub(crate) fn creator(&self) -> Option { + /// Returns the creator of this vertex, if one is defined. + pub fn creator(&self) -> Option { match self { Vertex::Unit(signed_wire_unit) => Some(signed_wire_unit.wire_unit().creator), Vertex::Ping(ping) => Some(ping.creator), @@ -110,7 +151,8 @@ impl Vertex { } } - pub(crate) fn id(&self) -> Dependency { + /// Returns the ID of this vertex. + pub fn id(&self) -> Dependency { match self { Vertex::Unit(signed_wire_unit) => Dependency::Unit(signed_wire_unit.hash()), Vertex::Evidence(evidence) => Dependency::Evidence(evidence.perpetrator()), @@ -120,20 +162,154 @@ impl Vertex { } /// Returns a reference to the unit, or `None` if this is not a unit. - pub(crate) fn unit(&self) -> Option<&SignedWireUnit> { + pub fn unit(&self) -> Option<&SignedWireUnit> { match self { Vertex::Unit(signed_wire_unit) => Some(signed_wire_unit), _ => None, } } + + /// Returns true whether unit is a proposal. + pub fn is_proposal(&self) -> bool { + self.value().is_some() + } } -#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] +mod specimen_support { + use super::{ + Dependency, DependencyDiscriminants, Endorsements, HashedWireUnit, Ping, SignedEndorsement, + SignedWireUnit, Vertex, VertexDiscriminants, WireUnit, + }; + use crate::{ + components::consensus::ClContext, + utils::specimen::{ + btree_set_distinct_from_prop, largest_variant, vec_prop_specimen, Cache, + LargestSpecimen, SizeEstimator, + }, + }; + + impl LargestSpecimen for Vertex { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::(estimator, |variant| match variant { + VertexDiscriminants::Unit => { + Vertex::Unit(LargestSpecimen::largest_specimen(estimator, cache)) + } + VertexDiscriminants::Evidence => { + Vertex::Evidence(LargestSpecimen::largest_specimen(estimator, cache)) + } + VertexDiscriminants::Endorsements => { + if estimator.parameter_bool("endorsements_enabled") { + Vertex::Endorsements(LargestSpecimen::largest_specimen(estimator, cache)) + } else { + Vertex::Ping(LargestSpecimen::largest_specimen(estimator, cache)) + } + } + VertexDiscriminants::Ping => { + Vertex::Ping(LargestSpecimen::largest_specimen(estimator, cache)) + } + }) + } + } + + impl LargestSpecimen for Dependency { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::(estimator, |variant| { + match variant { + DependencyDiscriminants::Unit => { + Dependency::Unit(LargestSpecimen::largest_specimen(estimator, cache)) + } + DependencyDiscriminants::Evidence => { + Dependency::Evidence(LargestSpecimen::largest_specimen(estimator, cache)) + } + DependencyDiscriminants::Endorsement => { + Dependency::Endorsement(LargestSpecimen::largest_specimen(estimator, cache)) + } + DependencyDiscriminants::Ping => Dependency::Ping( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ), + } + }) + } + } + + impl LargestSpecimen for SignedWireUnit { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SignedWireUnit { + hashed_wire_unit: LargestSpecimen::largest_specimen(estimator, cache), + signature: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } + + impl LargestSpecimen for Endorsements { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Endorsements { + unit: LargestSpecimen::largest_specimen(estimator, cache), + endorsers: if estimator.parameter_bool("endorsements_enabled") { + vec_prop_specimen(estimator, "validator_count", cache) + } else { + Vec::new() + }, + } + } + } + + impl LargestSpecimen for SignedEndorsement { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SignedEndorsement::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } + } + + impl LargestSpecimen for Ping { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Ping { + creator: LargestSpecimen::largest_specimen(estimator, cache), + timestamp: LargestSpecimen::largest_specimen(estimator, cache), + instance_id: LargestSpecimen::largest_specimen(estimator, cache), + signature: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } + + impl LargestSpecimen for HashedWireUnit { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + if let Some(item) = cache.get::() { + return item.clone(); + } + + let hash = LargestSpecimen::largest_specimen(estimator, cache); + let wire_unit = LargestSpecimen::largest_specimen(estimator, cache); + cache.set(HashedWireUnit { hash, wire_unit }).clone() + } + } + + impl LargestSpecimen for WireUnit { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + WireUnit { + panorama: LargestSpecimen::largest_specimen(estimator, cache), + creator: LargestSpecimen::largest_specimen(estimator, cache), + instance_id: LargestSpecimen::largest_specimen(estimator, cache), + value: LargestSpecimen::largest_specimen(estimator, cache), + seq_number: LargestSpecimen::largest_specimen(estimator, cache), + timestamp: LargestSpecimen::largest_specimen(estimator, cache), + round_exp: LargestSpecimen::largest_specimen(estimator, cache), + endorsed: btree_set_distinct_from_prop(estimator, "validator_count", cache), + } + } + } +} + +/// A `WireUnit` together with its hash and a cryptographic signature by its creator. +#[derive(DataSize, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(bound( serialize = "C::Hash: Serialize", deserialize = "C::Hash: Deserialize<'de>", ))] -pub(crate) struct SignedWireUnit +pub struct SignedWireUnit where C: Context, { @@ -153,17 +329,20 @@ impl SignedWireUnit { } } - pub(crate) fn wire_unit(&self) -> &WireUnit { + /// Returns the inner `WireUnit`. + pub fn wire_unit(&self) -> &WireUnit { self.hashed_wire_unit.wire_unit() } - pub(crate) fn hash(&self) -> C::Hash { + /// Returns this unit's hash. + pub fn hash(&self) -> C::Hash { self.hashed_wire_unit.hash() } } +/// A `WireUnit` together with its hash. #[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash)] -pub(crate) struct HashedWireUnit +pub struct HashedWireUnit where C: Context, { @@ -181,15 +360,18 @@ where Self::new_with_hash(wire_unit, hash) } - pub(crate) fn into_inner(self) -> WireUnit { + /// Returns the inner `WireUnit`. + pub fn into_inner(self) -> WireUnit { self.wire_unit } - pub(crate) fn wire_unit(&self) -> &WireUnit { + /// Returns a reference to the inner `WireUnit`. + pub fn wire_unit(&self) -> &WireUnit { &self.wire_unit } - pub(crate) fn hash(&self) -> C::Hash { + /// Returns this unit's hash. + pub fn hash(&self) -> C::Hash { self.hash } @@ -213,23 +395,31 @@ impl<'de, C: Context> Deserialize<'de> for HashedWireUnit { } /// A unit as it is sent over the wire, possibly containing a new block. -#[derive(Clone, DataSize, Eq, PartialEq, Serialize, Deserialize, Hash)] +#[derive(DataSize, Clone, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(bound( serialize = "C::Hash: Serialize", deserialize = "C::Hash: Deserialize<'de>", ))] -pub(crate) struct WireUnit +pub struct WireUnit where C: Context, { - pub(crate) panorama: Panorama, - pub(crate) creator: ValidatorIndex, - pub(crate) instance_id: C::InstanceId, - pub(crate) value: Option, - pub(crate) seq_number: u64, - pub(crate) timestamp: Timestamp, - pub(crate) round_exp: u8, - pub(crate) endorsed: BTreeSet, + /// The panorama of cited units. + pub panorama: Panorama, + /// The index of the creator of this unit. + pub creator: ValidatorIndex, + /// The consensus instance ID for which this unit was created. + pub instance_id: C::InstanceId, + /// The consensus value included in the unit, if any. + pub value: Option, + /// The sequence number of this unit in the creator's swimlane. + pub seq_number: u64, + /// Timestamp of when the unit was created. + pub timestamp: Timestamp, + /// The current round exponent of the unit's creator. + pub round_exp: u8, + /// The units this unit endorses. + pub endorsed: BTreeSet, } impl Debug for WireUnit { @@ -252,7 +442,6 @@ impl Debug for WireUnit { .field("panorama", self.panorama.as_ref()) .field("round_exp", &self.round_exp) .field("endorsed", &self.endorsed) - .field("round_id()", &self.round_id()) .finish() } } @@ -262,13 +451,8 @@ impl WireUnit { HashedWireUnit::new(self) } - /// Returns the time at which the round containing this unit began. - pub(crate) fn round_id(&self) -> Timestamp { - state::round_id(self.timestamp, self.round_exp) - } - /// Returns the creator's previous unit. - pub(crate) fn previous(&self) -> Option<&C::Hash> { + pub fn previous(&self) -> Option<&C::Hash> { self.panorama[self.creator].correct() } @@ -279,21 +463,24 @@ impl WireUnit { } } +/// A set of endorsements for a unit. #[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)] #[serde(bound( serialize = "C::Hash: Serialize", deserialize = "C::Hash: Deserialize<'de>", ))] -pub(crate) struct Endorsements +pub struct Endorsements where C: Context, { - pub(crate) unit: C::Hash, - pub(crate) endorsers: Vec<(ValidatorIndex, C::Signature)>, + /// The endorsed unit. + pub unit: C::Hash, + /// The endorsements for the unit. + pub endorsers: Vec<(ValidatorIndex, C::Signature)>, } impl Endorsements { - /// Returns hash of the endorsed vode. + /// Returns hash of the endorsed vote. pub fn unit(&self) -> &C::Hash { &self.unit } @@ -320,7 +507,7 @@ impl From> for Endorsements { serialize = "C::Hash: Serialize", deserialize = "C::Hash: Deserialize<'de>", ))] -pub(crate) struct Ping +pub struct Ping where C: Context, { @@ -348,12 +535,12 @@ impl Ping { } /// The creator who signals that it is online. - pub(crate) fn creator(&self) -> ValidatorIndex { + pub fn creator(&self) -> ValidatorIndex { self.creator } /// The timestamp when the ping was created. - pub(crate) fn timestamp(&self) -> Timestamp { + pub fn timestamp(&self) -> Timestamp { self.timestamp } diff --git a/node/src/components/consensus/highway_core/highway_testing.rs b/node/src/components/consensus/highway_core/highway_testing.rs index 2780925dd5..3bf85ed3bf 100644 --- a/node/src/components/consensus/highway_core/highway_testing.rs +++ b/node/src/components/consensus/highway_core/highway_testing.rs @@ -1,10 +1,9 @@ -#![allow(clippy::integer_arithmetic)] // In tests, overflows panic anyway. +#![allow(clippy::arithmetic_side_effects)] // In tests, overflows panic anyway. use std::{ collections::{hash_map::DefaultHasher, HashMap, VecDeque}, fmt::{self, Debug, Display, Formatter}, hash::{Hash, Hasher}, - iter::FromIterator, }; use datasize::DataSize; @@ -14,6 +13,8 @@ use rand::Rng; use serde::{Deserialize, Serialize}; use tracing::{trace, warn}; +use casper_types::{TimeDiff, Timestamp}; + use super::{ active_validator::Effect, finality_detector::{FinalityDetector, FttExceeded}, @@ -22,8 +23,6 @@ use super::{ ValidVertex, Vertex, VertexError, }, state::Fault, - validators::Validators, - Weight, }; use crate::{ components::consensus::{ @@ -36,41 +35,30 @@ use crate::{ queue::QueueEntry, }, traits::{ConsensusValueT, Context, ValidatorSecret}, + utils::{Validators, Weight}, BlockContext, }, - types::Timestamp, NodeRng, }; -type ConsensusValue = Vec; +#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, DataSize, Default)] +pub(crate) struct ConsensusValue(Vec); impl ConsensusValueT for ConsensusValue { - type Hash = u64; - fn needs_validation(&self) -> bool { - !self.is_empty() - } - - fn hash(&self) -> Self::Hash { - let mut hasher = DefaultHasher::new(); - std::hash::Hash::hash(&self, &mut hasher); - hasher.finish() - } - - fn timestamp(&self) -> Timestamp { - 0.into() // Not relevant for highway_core tests. + !self.0.is_empty() } +} - fn parent(&self) -> Option<&Self::Hash> { - None // Not relevant for highway_core tests. +impl Display for ConsensusValue { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{:10}", HexFmt(&self.0)) } } -const TEST_MIN_ROUND_EXP: u8 = 12; -const TEST_MAX_ROUND_EXP: u8 = 19; +const TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 12); +const TEST_MAX_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 19); const TEST_END_HEIGHT: u64 = 100000; -pub(crate) const TEST_BLOCK_REWARD: u64 = 1_000_000_000_000; -pub(crate) const TEST_REDUCED_BLOCK_REWARD: u64 = 200_000_000_000; pub(crate) const TEST_INSTANCE_ID: u64 = 42; pub(crate) const TEST_ENDORSEMENT_EVIDENCE_LIMIT: u64 = 20; @@ -78,12 +66,12 @@ pub(crate) const TEST_ENDORSEMENT_EVIDENCE_LIMIT: u64 = 20; enum HighwayMessage { Timer(Timestamp), NewVertex(Box>), - RequestBlock(BlockContext), + RequestBlock(BlockContext), WeAreFaulty(Box>), } impl Debug for HighwayMessage { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { HighwayMessage::Timer(t) => f.debug_tuple("Timer").field(&t.millis()).finish(), HighwayMessage::RequestBlock(bc) => f @@ -114,8 +102,12 @@ impl HighwayMessage { } } - fn is_new_vertex(&self) -> bool { - matches!(self, HighwayMessage::NewVertex(_)) + fn is_new_unit(&self) -> bool { + if let HighwayMessage::NewVertex(vertex) = self { + matches!(**vertex, Vertex::Unit(_)) + } else { + false + } } } @@ -126,7 +118,7 @@ impl From> for HighwayMessage { // validators so for them it's just `Vertex` that needs to be validated. Effect::NewVertex(ValidVertex(v)) => HighwayMessage::NewVertex(Box::new(v)), Effect::ScheduleTimer(t) => HighwayMessage::Timer(t), - Effect::RequestNewBlock { block_context, .. } => { + Effect::RequestNewBlock(block_context, _expiry) => { HighwayMessage::RequestBlock(block_context) } Effect::WeAreFaulty(fault) => HighwayMessage::WeAreFaulty(Box::new(fault)), @@ -136,7 +128,7 @@ impl From> for HighwayMessage { impl PartialOrd for HighwayMessage { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(&other)) + Some(self.cmp(other)) } } @@ -161,7 +153,7 @@ pub(crate) enum TestRunError { } impl Display for TestRunError { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { TestRunError::NoMessages => write!( f, @@ -181,11 +173,11 @@ impl Display for TestRunError { enum Distribution { Uniform, - // TODO: Poisson(f64), https://casperlabs.atlassian.net/browse/HWY-116 + // TODO: Poisson(f64) } impl Distribution { - /// Returns vector of `count` elements of random values between `lower` and `uppwer`. + /// Returns vector of `count` elements of random values between `lower` and `upper`. fn gen_range_vec(&self, rng: &mut NodeRng, lower: u64, upper: u64, count: u8) -> Vec { match self { Distribution::Uniform => (0..count).map(|_| rng.gen_range(lower..upper)).collect(), @@ -198,7 +190,7 @@ trait DeliveryStrategy { &mut self, rng: &mut NodeRng, message: &HighwayMessage, - distributon: &Distribution, + distribution: &Distribution, base_delivery_timestamp: Timestamp, ) -> DeliverySchedule; } @@ -252,7 +244,20 @@ impl HighwayValidator { } } } - None | Some(DesFault::TemporarilyMute { .. }) | Some(DesFault::PermanentlyMute) => { + Some(DesFault::PermanentlyMute) => { + // For mute validators we add it to the state but not gossip. + match msg { + HighwayMessage::NewVertex(_) => { + warn!("Validator is mute – won't gossip vertices in response"); + vec![] + } + HighwayMessage::Timer(_) | HighwayMessage::RequestBlock(_) => vec![msg], + HighwayMessage::WeAreFaulty(ev) => { + panic!("validator equivocated unexpectedly: {:?}", ev); + } + } + } + None | Some(DesFault::TemporarilyMute { .. }) => { // Honest validator. match &msg { HighwayMessage::NewVertex(_) @@ -273,8 +278,8 @@ impl HighwayValidator { // strategies. let mut wunit2 = swunit.wire_unit().clone(); match wunit2.value.as_mut() { - None => wunit2.timestamp += 1.into(), - Some(v) => v.push(0), + None => wunit2.timestamp += TimeDiff::from_millis(1), + Some(v) => v.0.push(0), } let secret = TestSecret(wunit2.creator.0.into()); let hwunit2 = wunit2.into_hashed(); @@ -391,8 +396,8 @@ where /// Helper for getting validator from the underlying virtual net. fn node_mut(&mut self, validator_id: &ValidatorId) -> TestResult<&mut HighwayNode> { self.virtual_net - .node_mut(&validator_id) - .ok_or_else(|| TestRunError::MissingValidator(*validator_id)) + .node_mut(validator_id) + .ok_or(TestRunError::MissingValidator(*validator_id)) } fn call_validator( @@ -450,7 +455,7 @@ where delivery_time, )? { Ok(msgs) => { - trace!("{:?} successfuly added to the state.", v); + trace!("{:?} successfully added to the state.", v); msgs } Err((v, error)) => { @@ -497,7 +502,7 @@ where for FinalizedBlock { value, timestamp: _, - height, + relative_height, terminal_block_data, equivocators: _, proposer: _, @@ -511,11 +516,9 @@ where "" }, value, - height + relative_height, ); - if let Some(t) = terminal_block_data { - warn!(?t.rewards, "rewards and inactive validators are not verified yet"); - } + recipient.push_finalized(value); } @@ -686,13 +689,13 @@ fn crank_until_time( crank_until(hth, rng, |hth| { hth.virtual_net .peek_message() - .map_or(true, |qe| qe.delivery_time > timestamp) + .is_none_or(|qe| qe.delivery_time > timestamp) }) } struct MutableHandle<'a, DS: DeliveryStrategy>(&'a mut HighwayTestHarness); -impl<'a, DS: DeliveryStrategy> MutableHandle<'a, DS> { +impl MutableHandle<'_, DS> { /// Drops all messages from the queue. fn clear_message_queue(&mut self) { self.0.virtual_net.empty_queue(); @@ -713,11 +716,9 @@ impl<'a, DS: DeliveryStrategy> MutableHandle<'a, DS> { fn test_params() -> Params { Params::new( 0, // random seed - TEST_BLOCK_REWARD, - TEST_REDUCED_BLOCK_REWARD, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - TEST_MIN_ROUND_EXP, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + TEST_MIN_ROUND_LEN, TEST_END_HEIGHT, Timestamp::zero(), Timestamp::zero(), // Length depends only on block number. @@ -768,17 +769,17 @@ impl DeliveryStrategy for InstantDeliveryNoDropping { &mut self, _rng: &mut NodeRng, message: &HighwayMessage, - _distributon: &Distribution, + _distribution: &Distribution, base_delivery_timestamp: Timestamp, ) -> DeliverySchedule { match message { HighwayMessage::RequestBlock(bc) => DeliverySchedule::AtInstant(bc.timestamp()), HighwayMessage::Timer(t) => DeliverySchedule::AtInstant(*t), HighwayMessage::NewVertex(_) => { - DeliverySchedule::AtInstant(base_delivery_timestamp + 1.into()) + DeliverySchedule::AtInstant(base_delivery_timestamp + TimeDiff::from_millis(1)) } HighwayMessage::WeAreFaulty(_) => { - DeliverySchedule::AtInstant(base_delivery_timestamp + 1.into()) + DeliverySchedule::AtInstant(base_delivery_timestamp + TimeDiff::from_millis(1)) } } } @@ -842,7 +843,7 @@ impl HighwayTestHarnessBuilder { fn build(self, rng: &mut NodeRng) -> Result, BuilderError> { let consensus_values = (0..self.consensus_values_count) - .map(|el| vec![el]) + .map(|el| ConsensusValue(vec![el])) .collect::>(); let instance_id = 0; @@ -872,7 +873,7 @@ impl HighwayTestHarnessBuilder { // At least 2 validators total and at least one faulty. let faulty_num = rng.gen_range(1..self.max_faulty_validators + 1); - // Randomly (but within chosed range) assign weights to faulty nodes. + // Randomly (but within chosen range) assign weights to faulty nodes. let faulty_weights = self .weight_distribution .gen_range_vec(rng, lower, upper, faulty_num); @@ -882,8 +883,7 @@ impl HighwayTestHarnessBuilder { let honest_weights = { let faulty_sum = faulty_weights.iter().sum::(); let mut weights_to_distribute: u64 = - (faulty_sum * 100 + self.faulty_percent - 1) / self.faulty_percent - - faulty_sum; + (faulty_sum * 100).div_ceil(self.faulty_percent) - faulty_sum; let mut weights = vec![]; while weights_to_distribute > 0 { let weight = if weights_to_distribute < upper { @@ -909,13 +909,12 @@ impl HighwayTestHarnessBuilder { .chain(honest_weights.iter()) .sum::(); - let validators: Validators = Validators::from_iter( - faulty_weights - .iter() - .chain(honest_weights.iter()) - .enumerate() - .map(|(i, weight)| (ValidatorId(i as u64), *weight)), - ); + let validators: Validators = faulty_weights + .iter() + .chain(honest_weights.iter()) + .enumerate() + .map(|(i, weight)| (ValidatorId(i as u64), *weight)) + .collect(); trace!("Weights: {:?}", validators.iter().collect::>()); @@ -935,7 +934,8 @@ impl HighwayTestHarnessBuilder { |(vid, secrets): (ValidatorId, &mut HashMap)| { let v_sec = secrets.remove(&vid).expect("Secret key should exist."); - let mut highway = Highway::new(instance_id, validators.clone(), params.clone()); + let mut highway = + Highway::new(instance_id, validators.clone(), params.clone(), None); let effects = highway.activate_validator(vid, v_sec, start_time, None, Weight(ftt)); let finality_detector = FinalityDetector::new(Weight(ftt)); @@ -1008,7 +1008,7 @@ pub(crate) struct SignatureWrapper(u64); impl Debug for SignatureWrapper { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{:10}", HexFmt(self.0.to_le_bytes())) + write!(f, "{:10}", HexFmt(&self.0.to_le_bytes())) } } @@ -1019,12 +1019,12 @@ pub(crate) struct HashWrapper(u64); impl Debug for HashWrapper { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{:10}", HexFmt(self.0.to_le_bytes())) + write!(f, "{:10}", HexFmt(&self.0.to_le_bytes())) } } impl Display for HashWrapper { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Debug::fmt(self, f) } } @@ -1067,18 +1067,16 @@ mod test_harness { use itertools::Itertools; + use casper_types::Timestamp; + use super::{ crank_until, crank_until_finalized, crank_until_time, test_params, ConsensusValue, HighwayTestHarness, HighwayTestHarnessBuilder, InstantDeliveryNoDropping, TestRunError, - TEST_MIN_ROUND_EXP, + TEST_MIN_ROUND_LEN, }; use crate::{ - components::consensus::{ - highway_core::state, - tests::consensus_des_testing::{Fault as DesFault, ValidatorId}, - }, + components::consensus::tests::consensus_des_testing::{Fault as DesFault, ValidatorId}, logging, - types::Timestamp, }; use logging::{LoggingConfig, LoggingFormat}; @@ -1136,28 +1134,27 @@ mod test_harness { let handle = highway_test_harness.mutable_handle(); let validators = handle.validators(); - let (finalized_values, vertices_produced): (Vec>, Vec) = - validators - .map(|v| { - ( - v.finalized_values().cloned().collect::>(), - v.messages_produced() - .cloned() - .filter(|hwm| hwm.is_new_vertex()) - .count(), - ) - }) - .unzip(); - - vertices_produced + let (finalized_values, units_produced): (Vec>, Vec) = validators + .map(|v| { + ( + v.finalized_values().cloned().collect::>(), + v.messages_produced() + .filter(|&hwm| hwm.is_new_unit()) + .cloned() + .count(), + ) + }) + .unzip(); + + units_produced .into_iter() .enumerate() - .for_each(|(v_idx, vertices_count)| { + .for_each(|(v_idx, units_count)| { // NOTE: Works only when all validators are honest and correct (no "mute" - // validators). Validator produces two `NewVertex` type messages per round. It may + // validators). Validator produces two units per round. It may // produce just one before lambda message is finalized. Add one in case it's just // one round (one consensus value) – 1 message. 1/2=0 but 3/2=1 b/c of the rounding. - let rounds_participated_in = (vertices_count as u8 + 1) / 2; + let rounds_participated_in = (units_count as u8 + 1) / 2; assert_eq!( rounds_participated_in, cv_count, @@ -1274,15 +1271,14 @@ mod test_harness { let mut rng = crate::new_rng(); let cv_count = 10u8; - let max_round_exp = TEST_MIN_ROUND_EXP + 1; - let max_round_len = state::round_len(max_round_exp); + let max_round_len = TEST_MIN_ROUND_LEN * 2; let start_mute = Timestamp::zero() + max_round_len * 2; let should_start_pause = start_mute + max_round_len * 4; let stop_mute = should_start_pause + max_round_len * 3; let params = test_params() - .with_max_round_exp(max_round_exp) + .with_max_round_len(max_round_len) .with_end_height(cv_count as u64); let mut test_harness = HighwayTestHarnessBuilder::new() .max_faulty_validators(3) diff --git a/node/src/components/consensus/highway_core/state.rs b/node/src/components/consensus/highway_core/state.rs index c9aaf9cff5..8e01f28358 100644 --- a/node/src/components/consensus/highway_core/state.rs +++ b/node/src/components/consensus/highway_core/state.rs @@ -1,45 +1,46 @@ mod block; +mod index_panorama; mod panorama; mod params; mod tallies; mod unit; -mod weight; #[cfg(test)] pub(crate) mod tests; pub(crate) use params::Params; use quanta::Clock; -pub(crate) use weight::Weight; +use serde::{Deserialize, Serialize}; -pub(crate) use panorama::{Observation, Panorama}; +pub(crate) use index_panorama::{IndexObservation, IndexPanorama}; +pub use panorama::{Observation, Panorama}; pub(super) use unit::Unit; use std::{ borrow::Borrow, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - convert::identity, iter, }; use datasize::DataSize; use itertools::Itertools; -use rand::{Rng, SeedableRng}; -use rand_chacha::ChaCha8Rng; use thiserror::Error; use tracing::{error, info, trace, warn}; +use casper_types::{TimeDiff, Timestamp}; + use crate::{ components::consensus::{ highway_core::{ endorsement::{Endorsement, SignedEndorsement}, evidence::Evidence, highway::{Endorsements, HashedWireUnit, SignedWireUnit, WireUnit}, - validators::{ValidatorIndex, ValidatorMap}, + ENABLE_ENDORSEMENTS, }, traits::Context, + utils::{ValidatorIndex, ValidatorMap, Weight}, + LeaderSequence, }, - types::{TimeDiff, Timestamp}, utils::ds, }; use block::Block; @@ -47,7 +48,7 @@ use tallies::Tallies; // TODO: The restart mechanism only persists and loads our own latest unit, so that we don't // equivocate after a restart. It doesn't yet persist our latest endorsed units, so we could -// accidentally endorse conflicting votes. Fix this and enable slashing for conflicting +// accidentally endorse conflicting votes. Fix this and enable detecting conflicting // endorsements again. pub(super) const TODO_ENDORSEMENT_EVIDENCE_DISABLED: bool = true; @@ -55,7 +56,7 @@ pub(super) const TODO_ENDORSEMENT_EVIDENCE_DISABLED: bool = true; /// from them. const PING_TIMEOUT: u64 = 3; -#[derive(Debug, Error, PartialEq, Clone)] +#[derive(Debug, Error, Eq, PartialEq, Clone)] pub(crate) enum UnitError { #[error("The unit is a ballot but doesn't cite any block.")] MissingBlock, @@ -77,12 +78,10 @@ pub(crate) enum UnitError { InstanceId, #[error("The signature is invalid.")] Signature, - #[error("The round length exponent has somehow changed within a round.")] - RoundLengthExpChangedWithinRound, - #[error("The round length exponent is less than the minimum allowed by the chain-spec.")] - RoundLengthExpLessThanMinimum, - #[error("The round length exponent is greater than the maximum allowed by the chain-spec.")] - RoundLengthExpGreaterThanMaximum, + #[error("The round length has somehow changed within a round.")] + RoundLengthChangedWithinRound, + #[error("The round length is greater than the maximum allowed by the chainspec.")] + RoundLengthGreaterThanMaximum, #[error("This would be the third unit in that round. Only two are allowed.")] ThreeUnitsInRound, #[error( @@ -111,13 +110,13 @@ pub(crate) enum UnitError { /// The `Banned` state is fixed from the beginning and can't be replaced. However, `Indirect` can /// be replaced with `Direct` evidence, which has the same effect but doesn't rely on information /// from other consensus protocol instances. -#[derive(Clone, DataSize, Debug, Eq, PartialEq, Hash)] -pub(crate) enum Fault +#[derive(Clone, DataSize, Debug, Deserialize, Eq, PartialEq, Hash, Serialize)] +pub enum Fault where C: Context, { - /// The validator was known to be faulty from the beginning. All their messages are considered - /// invalid in this Highway instance. + /// The validator was known to be malicious from the beginning. All their messages are + /// considered invalid in this Highway instance. Banned, /// We have direct evidence of the validator's fault. Direct(Evidence), @@ -126,7 +125,8 @@ where } impl Fault { - pub(crate) fn evidence(&self) -> Option<&Evidence> { + /// Returns the evidence included in this `Fault`. + pub fn evidence(&self) -> Option<&Evidence> { match self { Fault::Banned | Fault::Indirect => None, Fault::Direct(ev) => Some(ev), @@ -139,8 +139,8 @@ impl Fault { /// Both observers and active validators must instantiate this, pass in all incoming vertices from /// peers, and use a [FinalityDetector](../finality_detector/struct.FinalityDetector.html) to /// determine the outcome of the consensus process. -#[derive(Debug, Clone, DataSize)] -pub(crate) struct State +#[derive(Debug, Clone, DataSize, Serialize, Deserialize)] +pub struct State where C: Context, { @@ -148,9 +148,8 @@ where params: Params, /// The validator's voting weights. weights: ValidatorMap, - /// Cumulative validator weights: Entry `i` contains the sum of the weights of validators `0` - /// through `i`. - cumulative_w: ValidatorMap, + /// The pseudorandom sequence of round leaders. + leader_sequence: LeaderSequence, /// All units imported so far, by hash. /// This is a downward closed set: A unit must only be added here once all of its dependencies /// have been added as well, and it has been fully validated. @@ -180,28 +179,29 @@ where pings: ValidatorMap, /// Clock to measure time spent in fork choice computation. #[data_size(skip)] // Not implemented for Clock; probably negligible. + #[serde(skip, default)] + // Serialization is used by external tools only, which cannot make sense of `Clock`. clock: Clock, } impl State { - pub(crate) fn new(weights: I, params: Params, banned: IB) -> State + pub(crate) fn new( + weights: I, + params: Params, + banned: IB, + cannot_propose: IB2, + ) -> State where I: IntoIterator, I::Item: Borrow, IB: IntoIterator, + IB2: IntoIterator, { let weights = ValidatorMap::from(weights.into_iter().map(|w| *w.borrow()).collect_vec()); assert!( - weights.len() > 0, + !weights.is_empty(), "cannot initialize Highway with no validators" ); - let mut sum = Weight(0); - let add = |w: &Weight| { - sum = sum.checked_add(*w).expect("total weight must be < 2^64"); - sum - }; - let cumulative_w = weights.iter().map(add).collect(); - assert!(sum > Weight(0), "total weight must not be zero"); let mut panorama = Panorama::new(weights.len()); let faults: HashMap<_, _> = banned.into_iter().map(|idx| (idx, Fault::Banned)).collect(); for idx in faults.keys() { @@ -211,13 +211,22 @@ impl State { ); panorama[*idx] = Observation::Faulty; } + let mut can_propose: ValidatorMap = weights.iter().map(|_| true).collect(); + for idx in cannot_propose { + assert!( + idx.0 < weights.len() as u32, + "invalid validator index for exclusion from leader sequence" + ); + can_propose[idx] = false; + } + let leader_sequence = LeaderSequence::new(params.seed(), &weights, can_propose); let pings = iter::repeat(params.start_timestamp()) .take(weights.len()) .collect(); State { params, weights, - cumulative_w, + leader_sequence, units: HashMap::new(), blocks: HashMap::new(), faults, @@ -230,27 +239,27 @@ impl State { } /// Returns the fixed parameters. - pub(crate) fn params(&self) -> &Params { + pub fn params(&self) -> &Params { &self.params } /// Returns the number of validators. - pub(crate) fn validator_count(&self) -> usize { + pub fn validator_count(&self) -> usize { self.weights.len() } /// Returns the `idx`th validator's voting weight. - pub(crate) fn weight(&self, idx: ValidatorIndex) -> Weight { + pub fn weight(&self, idx: ValidatorIndex) -> Weight { self.weights[idx] } /// Returns the map of validator weights. - pub(crate) fn weights(&self) -> &ValidatorMap { + pub fn weights(&self) -> &ValidatorMap { &self.weights } /// Returns the total weight of all validators marked faulty in this panorama. - pub(crate) fn faulty_weight_in(&self, panorama: &Panorama) -> Weight { + pub fn faulty_weight_in(&self, panorama: &Panorama) -> Weight { panorama .iter() .zip(&self.weights) @@ -260,26 +269,22 @@ impl State { } /// Returns the total weight of all known-faulty validators. - pub(crate) fn faulty_weight(&self) -> Weight { + pub fn faulty_weight(&self) -> Weight { self.faulty_weight_in(self.panorama()) } /// Returns the sum of all validators' voting weights. - pub(crate) fn total_weight(&self) -> Weight { - *self - .cumulative_w - .as_ref() - .last() - .expect("weight list cannot be empty") + pub fn total_weight(&self) -> Weight { + self.leader_sequence.total_weight() } /// Returns evidence against validator nr. `idx`, if present. - pub(crate) fn maybe_evidence(&self, idx: ValidatorIndex) -> Option<&Evidence> { + pub fn maybe_evidence(&self, idx: ValidatorIndex) -> Option<&Evidence> { self.maybe_fault(idx).and_then(Fault::evidence) } /// Returns endorsements for `unit`, if any. - pub(crate) fn maybe_endorsements(&self, unit: &C::Hash) -> Option> { + pub fn maybe_endorsements(&self, unit: &C::Hash) -> Option> { self.endorsements.get(unit).map(|signatures| Endorsements { unit: *unit, endorsers: signatures.iter_some().map(|(i, sig)| (i, *sig)).collect(), @@ -287,12 +292,12 @@ impl State { } /// Returns whether evidence against validator nr. `idx` is known. - pub(crate) fn has_evidence(&self, idx: ValidatorIndex) -> bool { + pub fn has_evidence(&self, idx: ValidatorIndex) -> bool { self.maybe_evidence(idx).is_some() } /// Returns whether we have all endorsements for `unit`. - pub(crate) fn has_all_endorsements>( + pub fn has_all_endorsements>( &self, unit: &C::Hash, v_ids: I, @@ -308,91 +313,90 @@ impl State { /// Returns whether we have seen enough endorsements for the unit. /// Unit is endorsed when it has endorsements from more than 50% of the validators (by weight). - pub(crate) fn is_endorsed(&self, hash: &C::Hash) -> bool { + pub fn is_endorsed(&self, hash: &C::Hash) -> bool { self.endorsements.contains_key(hash) } /// Returns hash of unit that needs to be endorsed. - pub(crate) fn needs_endorsements(&self, unit: &SignedWireUnit) -> Option { + pub fn needs_endorsements(&self, unit: &SignedWireUnit) -> Option { unit.wire_unit() .endorsed .iter() - .find(|hash| !self.endorsements.contains_key(&hash)) + .find(|hash| !self.endorsements.contains_key(hash)) .cloned() } /// Returns the timestamp of the last ping or unit received from the validator, or the start /// timestamp if we haven't received anything yet. - pub(crate) fn last_seen(&self, idx: ValidatorIndex) -> Timestamp { + pub fn last_seen(&self, idx: ValidatorIndex) -> Timestamp { self.pings[idx] } /// Marks the given validator as faulty, unless it is already banned or we have direct evidence. - pub(crate) fn mark_faulty(&mut self, idx: ValidatorIndex) { + pub fn mark_faulty(&mut self, idx: ValidatorIndex) { self.panorama[idx] = Observation::Faulty; self.faults.entry(idx).or_insert(Fault::Indirect); } /// Returns the fault type of validator nr. `idx`, if it is known to be faulty. - pub(crate) fn maybe_fault(&self, idx: ValidatorIndex) -> Option<&Fault> { + pub fn maybe_fault(&self, idx: ValidatorIndex) -> Option<&Fault> { self.faults.get(&idx) } /// Returns whether validator nr. `idx` is known to be faulty. - pub(crate) fn is_faulty(&self, idx: ValidatorIndex) -> bool { + pub fn is_faulty(&self, idx: ValidatorIndex) -> bool { self.faults.contains_key(&idx) } /// Returns an iterator over all faulty validators. - pub(crate) fn faulty_validators(&self) -> impl Iterator + '_ { + pub fn faulty_validators(&self) -> impl Iterator + '_ { self.faults.keys().cloned() } /// Returns an iterator over latest unit hashes from honest validators. - pub(crate) fn iter_correct_hashes(&self) -> impl Iterator { + pub fn iter_correct_hashes(&self) -> impl Iterator { self.panorama.iter_correct_hashes() } /// Returns the unit with the given hash, if present. - pub(crate) fn maybe_unit(&self, hash: &C::Hash) -> Option<&Unit> { + pub fn maybe_unit(&self, hash: &C::Hash) -> Option<&Unit> { self.units.get(hash) } /// Returns whether the unit with the given hash is known. - pub(crate) fn has_unit(&self, hash: &C::Hash) -> bool { + pub fn has_unit(&self, hash: &C::Hash) -> bool { self.units.contains_key(hash) } /// Returns the unit with the given hash. Panics if not found. - pub(crate) fn unit(&self, hash: &C::Hash) -> &Unit { + pub fn unit(&self, hash: &C::Hash) -> &Unit { self.maybe_unit(hash).expect("unit hash must exist") } /// Returns the block contained in the unit with the given hash, if present. - pub(crate) fn maybe_block(&self, hash: &C::Hash) -> Option<&Block> { + pub fn maybe_block(&self, hash: &C::Hash) -> Option<&Block> { self.blocks.get(hash) } /// Returns the block contained in the unit with the given hash. Panics if not found. - pub(crate) fn block(&self, hash: &C::Hash) -> &Block { + pub fn block(&self, hash: &C::Hash) -> &Block { self.maybe_block(hash).expect("block hash must exist") } /// Returns the complete protocol state's latest panorama. - pub(crate) fn panorama(&self) -> &Panorama { + pub fn panorama(&self) -> &Panorama { &self.panorama } /// Returns the leader in the specified time slot. - pub(crate) fn leader(&self, timestamp: Timestamp) -> ValidatorIndex { - let seed = self.params.seed().wrapping_add(timestamp.millis()); - // We select a random one out of the `total_weight` weight units, starting numbering at 1. - let r = Weight(leader_prng(self.total_weight().0, seed)); - // The weight units are subdivided into intervals that belong to some validator. - // `cumulative_w[i]` denotes the last weight unit that belongs to validator `i`. - // `binary_search` returns the first `i` with `cumulative_w[i] >= r`, i.e. the validator - // who owns the randomly selected weight unit. - self.cumulative_w.binary_search(&r).unwrap_or_else(identity) + /// + /// First the assignment is computed ignoring the `can_propose` flags. Only if the selected + /// leader's entry is `false`, the computation is repeated, this time with the flagged + /// validators excluded. This ensures that once the validator set has been decided, correct + /// validators' slots never get reassigned to someone else, even if after the fact someone is + /// excluded as a leader. + pub fn leader(&self, timestamp: Timestamp) -> ValidatorIndex { + self.leader_sequence.leader(timestamp.millis()) } /// Adds the unit to the protocol state. @@ -418,7 +422,7 @@ impl State { // Update the panorama. let unit = self.unit(&hash); let creator = unit.creator; - let new_obs = match (self.panorama.get(creator), unit.panorama.get(creator)) { + let new_obs = match (&self.panorama[creator], &unit.panorama[creator]) { (Observation::Faulty, _) => Observation::Faulty, (obs0, obs1) if obs0 == obs1 => Observation::Correct(hash), (Observation::None, _) => panic!("missing creator's previous unit"), @@ -448,7 +452,7 @@ impl State { } let idx = evidence.perpetrator(); match self.faults.get(&idx) { - Some(&Fault::Banned) | Some(&Fault::Direct(_)) => return false, + Some(&Fault::Banned | &Fault::Direct(_)) => return false, None | Some(&Fault::Indirect) => (), } // TODO: Should use Display, not Debug! @@ -479,6 +483,8 @@ impl State { .map(|vidx| self.weight(*vidx)) .sum(); // Stake required to consider unit to be endorsed. + // TODO - remove `allow` once false positive ceases. + #[allow(clippy::arithmetic_side_effects)] // False positive on `/ 2`. let threshold = self.total_weight() / 2; if endorsed > threshold { info!(%uhash, "Unit endorsed by at least 1/2 of validators."); @@ -494,7 +500,7 @@ impl State { } /// Returns whether this state already includes an endorsement of `uhash` by `vidx`. - pub(crate) fn has_endorsement(&self, uhash: &C::Hash, vidx: ValidatorIndex) -> bool { + pub fn has_endorsement(&self, uhash: &C::Hash, vidx: ValidatorIndex) -> bool { self.endorsements .get(uhash) .map(|vmap| vmap[vidx].is_some()) @@ -511,20 +517,20 @@ impl State { self.pings[creator] = self.pings[creator].max(timestamp); } - /// Returns `true` if the latest timestamp we have is less than one maximum round length older - /// than the given timestamp. - /// - /// This is to prevent ping spam: If the incoming ping is only slightly newer than a unit or - /// ping we have already received, we drop it without forwarding it to our peers. - pub(crate) fn has_ping(&self, creator: ValidatorIndex, timestamp: Timestamp) -> bool { - self.pings.has(creator) && self.pings[creator] + self.params.max_round_length() > timestamp + /// Returns `true` if the latest timestamp we have is older than the given timestamp. + pub fn has_ping(&self, creator: ValidatorIndex, timestamp: Timestamp) -> bool { + self.pings + .get(creator) + .is_some_and(|ping_time| *ping_time >= timestamp) } /// Returns whether the validator's latest unit or ping is at most `PING_TIMEOUT` maximum round /// lengths old. pub(crate) fn is_online(&self, vidx: ValidatorIndex, now: Timestamp) -> bool { self.pings.has(vidx) - && self.pings[vidx] + self.params.max_round_length() * PING_TIMEOUT >= now + && self.pings[vidx] + .saturating_add(self.params.max_round_length().saturating_mul(PING_TIMEOUT)) + >= now } /// Creates new `Evidence` if the new endorsements contain any that conflict with existing @@ -532,7 +538,7 @@ impl State { /// /// Endorsements must be validated before calling this: The endorsers must exist, the /// signatures must be valid and the endorsed unit must be present in `self.units`. - pub(crate) fn find_conflicting_endorsements( + pub fn find_conflicting_endorsements( &self, endorsements: &Endorsements, instance_id: &C::InstanceId, @@ -541,7 +547,7 @@ impl State { return Vec::new(); } let uhash = endorsements.unit(); - let unit = self.unit(&uhash); + let unit = self.unit(uhash); if !self.has_evidence(unit.creator) { return vec![]; // There are no equivocations, so endorsements cannot conflict. } @@ -551,9 +557,9 @@ impl State { let is_new_endorsement = |&&(vidx, _): &&(ValidatorIndex, _)| { if self.has_evidence(vidx) { false - } else if let Some(known_endorsements) = self.endorsements.get(&uhash) { + } else if let Some(known_endorsements) = self.endorsements.get(uhash) { known_endorsements[vidx].is_none() - } else if let Some(known_endorsements) = self.incomplete_endorsements.get(&uhash) { + } else if let Some(known_endorsements) = self.incomplete_endorsements.get(uhash) { !known_endorsements.contains_key(&vidx) } else { true @@ -579,7 +585,7 @@ impl State { let unit2 = self.unit(uhash2); let ee_limit = self.params().endorsement_evidence_limit(); self.unit(uhash2).creator == unit.creator - && !self.is_compatible(&uhash, uhash2) + && !self.is_compatible(uhash, uhash2) && unit.seq_number.saturating_add(ee_limit) >= unit2.seq_number && unit2.seq_number.saturating_add(ee_limit) >= unit.seq_number }) @@ -615,7 +621,7 @@ impl State { } /// Returns the `SignedWireUnit` with the given hash, if it is present in the state. - pub(crate) fn wire_unit( + pub fn wire_unit( &self, hash: &C::Hash, instance_id: C::InstanceId, @@ -624,6 +630,9 @@ impl State { let maybe_block = self.maybe_block(hash); let value = maybe_block.map(|block| block.value.clone()); let endorsed = unit.claims_endorsed().cloned().collect(); + #[allow(clippy::arithmetic_side_effects)] // min_round_length is guaranteed to be > 0. + let round_exp = + (unit.round_len() / self.params().min_round_length()).trailing_zeros() as u8; let wunit = WireUnit { panorama: unit.panorama.clone(), creator: unit.creator, @@ -631,7 +640,7 @@ impl State { value, seq_number: unit.seq_number, timestamp: unit.timestamp, - round_exp: unit.round_exp, + round_exp, endorsed, }; Some(SignedWireUnit { @@ -646,7 +655,7 @@ impl State { /// all of its ancestors. At each level the block with the highest score is selected from the /// children of the previously selected block (or from all blocks at height 0), until a block /// is reached that has no children with any votes. - pub(crate) fn fork_choice<'a>(&'a self, pan: &Panorama) -> Option<&'a C::Hash> { + pub fn fork_choice<'a>(&'a self, pan: &Panorama) -> Option<&'a C::Hash> { let start = self.clock.start(); // Collect all correct votes in a `Tallies` map, sorted by height. let to_entry = |(obs, w): (&Observation, &Weight)| { @@ -671,7 +680,8 @@ impl State { /// Returns the ancestor of the block with the given `hash`, on the specified `height`, or /// `None` if the block's height is lower than that. - pub(crate) fn find_ancestor<'a>( + /// NOTE: Panics if used on non-proposal hashes. + pub fn find_ancestor_proposal<'a>( &'a self, hash: &'a C::Hash, height: u64, @@ -683,14 +693,14 @@ impl State { if block.height == height { return Some(hash); } - #[allow(clippy::integer_arithmetic)] // block.height > height, otherwise we returned. + #[allow(clippy::arithmetic_side_effects)] // block.height > height, otherwise we returned. let diff = block.height - height; // We want to make the greatest step 2^i such that 2^i <= diff. let max_i = log2(diff) as usize; // A block at height > 0 always has at least its parent entry in skip_idx. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] let i = max_i.min(block.skip_idx.len() - 1); - self.find_ancestor(&block.skip_idx[i], height) + self.find_ancestor_proposal(&block.skip_idx[i], height) } /// Returns an error if `swunit` is invalid. This can be called even if the dependencies are @@ -705,11 +715,12 @@ impl State { if Some(&Fault::Banned) == self.faults.get(&creator) { return Err(UnitError::Banned); } - if wunit.round_exp < self.params.min_round_exp() { - return Err(UnitError::RoundLengthExpLessThanMinimum); - } - if wunit.round_exp > self.params.max_round_exp() { - return Err(UnitError::RoundLengthExpGreaterThanMaximum); + let rl_millis = self.params.min_round_length().millis(); + #[allow(clippy::arithmetic_side_effects)] // We check for overflow before the left shift. + if wunit.round_exp as u32 > rl_millis.leading_zeros() + || rl_millis << wunit.round_exp > self.params.max_round_length().millis() + { + return Err(UnitError::RoundLengthGreaterThanMaximum); } if wunit.value.is_none() && !wunit.panorama.has_correct() { return Err(UnitError::MissingBlock); @@ -717,7 +728,7 @@ impl State { if wunit.panorama.len() != self.validator_count() { return Err(UnitError::PanoramaLength(wunit.panorama.len())); } - if wunit.panorama.get(creator).is_faulty() { + if wunit.panorama[creator].is_faulty() { return Err(UnitError::FaultyCreator); } Ok(()) @@ -739,15 +750,21 @@ impl State { if wunit.seq_number != panorama.next_seq_num(self, creator) { return Err(UnitError::SequenceNumber); } - let r_id = round_id(timestamp, wunit.round_exp); + #[allow(clippy::arithmetic_side_effects)] // We checked for overflow in pre_validate_unit. + let round_len = + TimeDiff::from_millis(self.params.min_round_length().millis() << wunit.round_exp); + let r_id = round_id(timestamp, round_len); let maybe_prev_unit = wunit.previous().map(|vh| self.unit(vh)); if let Some(prev_unit) = maybe_prev_unit { - if prev_unit.round_exp != wunit.round_exp { - // The round exponent must not change within a round: Even with respect to the - // greater of the two exponents, a round boundary must be between the units. - let max_re = prev_unit.round_exp.max(wunit.round_exp); - if prev_unit.timestamp >> max_re == timestamp >> max_re { - return Err(UnitError::RoundLengthExpChangedWithinRound); + if prev_unit.round_len() != round_len { + // The round length must not change within a round: Even with respect to the + // greater of the two lengths, a round boundary must be between the units. + let max_rl = prev_unit.round_len().max(round_len); + #[allow(clippy::arithmetic_side_effects)] // max_rl is always greater than 0. + if prev_unit.timestamp.millis() / max_rl.millis() + == timestamp.millis() / max_rl.millis() + { + return Err(UnitError::RoundLengthChangedWithinRound); } } // There can be at most two units per round: proposal/confirmation and witness. @@ -757,21 +774,31 @@ impl State { } } } - // All endorsed units from the panorama of this wunit. - let endorsements_in_panorama = panorama - .iter_correct_hashes() - .flat_map(|hash| self.unit(hash).claims_endorsed()) - .collect::>(); - if endorsements_in_panorama - .iter() - .any(|&e| !wunit.endorsed.iter().any(|h| h == e)) - { - return Err(UnitError::EndorsementsNotMonotonic); + if ENABLE_ENDORSEMENTS { + // All endorsed units from the panorama of this wunit. + let endorsements_in_panorama = panorama + .iter_correct_hashes() + .flat_map(|hash| self.unit(hash).claims_endorsed()) + .collect::>(); + if endorsements_in_panorama + .iter() + .any(|&e| !wunit.endorsed.iter().any(|h| h == e)) + { + return Err(UnitError::EndorsementsNotMonotonic); + } + for hash in &wunit.endorsed { + if !wunit.panorama.sees(self, hash) { + return Err(UnitError::EndorsedButUnseen { + hash: format!("{:?}", hash), + wire_unit: format!("{:?}", wunit), + }); + } + } } if wunit.value.is_some() { // If this unit is a block, it must be the first unit in this round, its timestamp must // match the round ID, and the creator must be the round leader. - if maybe_prev_unit.map_or(false, |pv| pv.round_id() == r_id) + if maybe_prev_unit.is_some_and(|pv| pv.round_id() == r_id) || timestamp != r_id || self.leader(r_id) != creator { @@ -779,18 +806,10 @@ impl State { } // It's not allowed to create a child block of a terminal block. let is_terminal = |hash: &C::Hash| self.is_terminal_block(hash); - if self.fork_choice(panorama).map_or(false, is_terminal) { + if self.fork_choice(panorama).is_some_and(is_terminal) { return Err(UnitError::ValueAfterTerminalBlock); } } - for hash in &wunit.endorsed { - if !wunit.panorama.sees(self, hash) { - return Err(UnitError::EndorsedButUnseen { - hash: format!("{:?}", hash), - wire_unit: format!("{:?}", wunit), - }); - } - } match self.validate_lnc(creator, panorama, &wunit.endorsed) { None => Ok(()), Some(vidx) => Err(UnitError::LncNaiveCitation(vidx)), @@ -799,7 +818,7 @@ impl State { /// Returns `true` if the `bhash` is a block that can have no children. pub(crate) fn is_terminal_block(&self, bhash: &C::Hash) -> bool { - self.blocks.get(bhash).map_or(false, |block| { + self.blocks.get(bhash).is_some_and(|block| { block.height.saturating_add(1) >= self.params.end_height() && self.unit(bhash).timestamp >= self.params.end_timestamp() }) @@ -809,12 +828,16 @@ impl State { pub(super) fn is_correct_proposal(&self, unit: &Unit) -> bool { !self.is_faulty(unit.creator) && self.leader(unit.timestamp) == unit.creator - && unit.timestamp == round_id(unit.timestamp, unit.round_exp) + && unit.timestamp == round_id(unit.timestamp, unit.round_len) } /// Returns the hash of the message with the given sequence number from the creator of `hash`, /// or `None` if the sequence number is higher than that of the unit with `hash`. - fn find_in_swimlane<'a>(&'a self, hash: &'a C::Hash, seq_number: u64) -> Option<&'a C::Hash> { + pub fn find_in_swimlane<'a>( + &'a self, + hash: &'a C::Hash, + seq_number: u64, + ) -> Option<&'a C::Hash> { let unit = self.unit(hash); match unit.seq_number.checked_sub(seq_number) { None => None, // There is no unit with seq_number in our swimlane. @@ -824,7 +847,7 @@ impl State { let max_i = log2(diff) as usize; // Log is safe because diff is not zero. // Diff is not zero, so the unit has a predecessor and skip_idx is not empty. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] let i = max_i.min(unit.skip_idx.len() - 1); self.find_in_swimlane(&unit.skip_idx[i], seq_number) } @@ -833,7 +856,7 @@ impl State { /// Returns an iterator over units (with hashes) by the same creator, in reverse chronological /// order, starting with the specified unit. Panics if no unit with `uhash` exists. - pub(crate) fn swimlane<'a>( + pub fn swimlane<'a>( &'a self, uhash: &'a C::Hash, ) -> impl Iterator)> { @@ -848,10 +871,7 @@ impl State { /// Returns an iterator over all hashes of ancestors of the block `bhash`, excluding `bhash` /// itself. Panics if `bhash` is not the hash of a known block. - pub(crate) fn ancestor_hashes<'a>( - &'a self, - bhash: &'a C::Hash, - ) -> impl Iterator { + pub fn ancestor_hashes<'a>(&'a self, bhash: &'a C::Hash) -> impl Iterator { let mut next = self.block(bhash).parent(); iter::from_fn(move || { let current = next?; @@ -860,11 +880,6 @@ impl State { }) } - /// Returns `true` if the state contains no units. - pub(crate) fn is_empty(&self) -> bool { - self.units.is_empty() - } - /// Returns the number of units received. #[cfg(test)] pub(crate) fn unit_count(&self) -> usize { @@ -872,7 +887,10 @@ impl State { } /// Returns the set of units (by hash) that are endorsed and seen from the panorama. - pub(crate) fn seen_endorsed(&self, pan: &Panorama) -> BTreeSet { + pub fn seen_endorsed(&self, pan: &Panorama) -> BTreeSet { + if !ENABLE_ENDORSEMENTS { + return Default::default(); + }; // First we collect all units that were already seen as endorsed by earlier units. let mut result: BTreeSet = pan .iter_correct_hashes() @@ -901,8 +919,8 @@ impl State { self.incomplete_endorsements.clear(); } - /// Validates whether a unit with the given panorama and `endorsed` set satsifies the - /// Limited Naïveté Criterion (LNC). + /// Validates whether a unit with the given panorama and `endorsed` set satisfies the + /// Limited Naivety Criterion (LNC). /// Returns index of the first equivocator that was cited naively in violation of the LNC, or /// `None` if the LNC is satisfied. fn validate_lnc( @@ -911,6 +929,9 @@ impl State { panorama: &Panorama, endorsed: &BTreeSet, ) -> Option { + if !ENABLE_ENDORSEMENTS { + return None; + } let violates_lnc = |eq_idx: &ValidatorIndex| !self.satisfies_lnc_for(creator, panorama, endorsed, *eq_idx); panorama.iter_faulty().find(violates_lnc) @@ -1007,7 +1028,7 @@ impl State { let unit = self.unit(hash); match &unit.panorama[eq_idx] { Observation::Correct(eq_hash) => { - if !seen_by_endorsed(eq_hash) && !self.is_compatible(eq_hash, &naive_fork) { + if !seen_by_endorsed(eq_hash) && !self.is_compatible(eq_hash, naive_fork) { return false; } } @@ -1034,12 +1055,12 @@ impl State { /// Returns whether the unit with `hash0` sees the one with `hash1` (i.e. `hash0 ≥ hash1`), /// and sees `hash1`'s creator as correct. - pub(crate) fn sees_correct(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool { + pub fn sees_correct(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool { hash0 == hash1 || self.unit(hash0).panorama.sees_correct(self, hash1) } /// Returns whether the unit with `hash0` sees the one with `hash1` (i.e. `hash0 ≥ hash1`). - pub(crate) fn sees(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool { + pub fn sees(&self, hash0: &C::Hash, hash1: &C::Hash) -> bool { hash0 == hash1 || self.unit(hash0).panorama.sees(self, hash1) } @@ -1051,11 +1072,7 @@ impl State { } /// Returns the panorama of the confirmation for the leader unit `uhash`. - pub(crate) fn confirmation_panorama( - &self, - creator: ValidatorIndex, - uhash: &C::Hash, - ) -> Panorama { + pub fn confirmation_panorama(&self, creator: ValidatorIndex, uhash: &C::Hash) -> Panorama { self.valid_panorama(creator, self.inclusive_panorama(uhash)) } @@ -1063,11 +1080,7 @@ impl State { /// to the given one. It is only modified if necessary for validity: /// * Cite `creator`'s previous unit, i.e. don't equivocate. /// * Satisfy the LNC, i.e. don't add new naively cited forks. - pub(crate) fn valid_panorama( - &self, - creator: ValidatorIndex, - mut pan: Panorama, - ) -> Panorama { + pub fn valid_panorama(&self, creator: ValidatorIndex, mut pan: Panorama) -> Panorama { // Make sure the panorama sees the creator's own previous unit. let maybe_prev_uhash = self.panorama()[creator].correct(); if let Some(prev_uhash) = maybe_prev_uhash { @@ -1098,28 +1111,26 @@ impl State { } /// Returns panorama of a unit where latest entry of the creator is that unit's hash. - pub(crate) fn inclusive_panorama(&self, uhash: &C::Hash) -> Panorama { - let unit = self.unit(&uhash); + pub fn inclusive_panorama(&self, uhash: &C::Hash) -> Panorama { + let unit = self.unit(uhash); let mut pan = unit.panorama.clone(); pan[unit.creator] = Observation::Correct(*uhash); pan } } -/// Returns the round length, given the round exponent. -pub(crate) fn round_len(round_exp: u8) -> TimeDiff { - TimeDiff::from(1_u64.checked_shl(round_exp.into()).unwrap_or(u64::MAX)) -} - -/// Returns the time at which the round with the given timestamp and round exponent began. +/// Returns the time at which the round with the given timestamp and round length began. /// -/// The boundaries of rounds with length `1 << round_exp` are multiples of that length, in +/// The boundaries of rounds with length `l` are multiples of that length, in /// milliseconds since the epoch. So the beginning of the current round is the greatest multiple -/// of `1 << round_exp` that is less or equal to `timestamp`. -pub(crate) fn round_id(timestamp: Timestamp, round_exp: u8) -> Timestamp { - // The greatest multiple less or equal to the timestamp is the timestamp with the last - // `round_exp` bits set to zero. - (timestamp >> round_exp) << round_exp +/// of `l` that is less or equal to `timestamp`. +pub(crate) fn round_id(timestamp: Timestamp, round_len: TimeDiff) -> Timestamp { + if round_len.millis() == 0 { + error!("called round_id with round_len 0."); + return timestamp; + } + #[allow(clippy::arithmetic_side_effects)] // Checked for division by 0 above. + Timestamp::from((timestamp.millis() / round_len.millis()) * round_len.millis()) } /// Returns the base-2 logarithm of `x`, rounded down, i.e. the greatest `i` such that @@ -1133,10 +1144,3 @@ fn log2(x: u64) -> u32 { .trailing_zeros() .saturating_sub(1) } - -/// Returns a pseudorandom `u64` betweend `1` and `upper` (inclusive). -fn leader_prng(upper: u64, seed: u64) -> u64 { - ChaCha8Rng::seed_from_u64(seed) - .gen_range(0..upper) - .saturating_add(1) -} diff --git a/node/src/components/consensus/highway_core/state/block.rs b/node/src/components/consensus/highway_core/state/block.rs index d0b2118a18..18e1629545 100644 --- a/node/src/components/consensus/highway_core/state/block.rs +++ b/node/src/components/consensus/highway_core/state/block.rs @@ -1,23 +1,24 @@ use datasize::DataSize; +use serde::{Deserialize, Serialize}; use super::State; use crate::components::consensus::traits::Context; /// A block: Chains of blocks are the consensus values in the CBC Casper sense. -#[derive(Clone, DataSize, Debug, Eq, PartialEq)] -pub(crate) struct Block +#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct Block where C: Context, { /// The total number of ancestors, i.e. the height in the blockchain. - pub(crate) height: u64, + pub height: u64, /// The payload, e.g. a list of transactions. - pub(crate) value: C::ConsensusValue, + pub value: C::ConsensusValue, /// A skip list index of the block's ancestors. /// /// For every `p = 1 << i` that divides `height`, this contains an `i`-th entry pointing to the /// ancestor with `height - p`. - pub(crate) skip_idx: Vec, + pub skip_idx: Vec, } impl Block { @@ -32,7 +33,7 @@ impl Block { Some(hash) => (state.block(&hash), vec![hash]), }; // In a trillion years, we need to make block height u128. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] let height = parent.height + 1; for i in 0..height.trailing_zeros() as usize { let ancestor = state.block(&skip_idx[i]); @@ -46,7 +47,7 @@ impl Block { } /// Returns the block's parent, or `None` if it has height 0. - pub(crate) fn parent(&self) -> Option<&C::Hash> { + pub fn parent(&self) -> Option<&C::Hash> { self.skip_idx.first() } diff --git a/node/src/components/consensus/highway_core/state/index_panorama.rs b/node/src/components/consensus/highway_core/state/index_panorama.rs new file mode 100644 index 0000000000..ee175e7a80 --- /dev/null +++ b/node/src/components/consensus/highway_core/state/index_panorama.rs @@ -0,0 +1,69 @@ +use std::fmt::Debug; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::components::consensus::{ + highway_core::state::{Observation, Panorama, State}, + traits::Context, + utils::ValidatorMap, +}; + +pub(crate) type IndexPanorama = ValidatorMap; + +/// The observed behavior of a validator at some point in time. +#[derive(Clone, Copy, DataSize, Eq, PartialEq, Serialize, Deserialize, Hash)] +pub(crate) enum IndexObservation { + /// We have evidence that the validator is faulty. + Faulty, + /// The next sequence number we need, i.e. the lowest one that is missing from our protocol + /// state. This is equal to the total number of units we have from that validator, and one more + /// than the highest sequence number we have. + NextSeq(u64), +} + +impl Debug for IndexObservation { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IndexObservation::Faulty => write!(f, "F"), + IndexObservation::NextSeq(next_seq) => write!(f, "{:?}", next_seq), + } + } +} + +impl IndexPanorama { + /// Creates an instance of `IndexPanorama` out of a panorama. + pub(crate) fn from_panorama<'a, C: Context>( + panorama: &'a Panorama, + state: &'a State, + ) -> Self { + let mut validator_map: ValidatorMap = + ValidatorMap::from(vec![IndexObservation::NextSeq(0); panorama.len()]); + for (vid, obs) in panorama.enumerate() { + let index_obs = match obs { + Observation::None => IndexObservation::NextSeq(0), + Observation::Correct(hash) => IndexObservation::NextSeq( + state + .maybe_unit(hash) + .map_or(0, |unit| unit.seq_number.saturating_add(1)), + ), + Observation::Faulty => IndexObservation::Faulty, + }; + validator_map[vid] = index_obs; + } + validator_map + } +} + +mod specimen_support { + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + + use super::IndexObservation; + + impl LargestSpecimen for IndexObservation { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // This is the largest variant since the other one is empty: + IndexObservation::NextSeq(LargestSpecimen::largest_specimen(estimator, cache)) + } + } +} diff --git a/node/src/components/consensus/highway_core/state/panorama.rs b/node/src/components/consensus/highway_core/state/panorama.rs index bb746fbf69..1a3fad41fb 100644 --- a/node/src/components/consensus/highway_core/state/panorama.rs +++ b/node/src/components/consensus/highway_core/state/panorama.rs @@ -1,38 +1,49 @@ use std::{collections::HashSet, fmt::Debug}; -use datasize::DataSize; use itertools::Itertools; -use serde::{Deserialize, Serialize}; - -use crate::{ - components::consensus::{ - highway_core::{ - highway::Dependency, - state::{State, Unit, UnitError}, - validators::{ValidatorIndex, ValidatorMap}, - }, - traits::Context, + +use casper_types::Timestamp; + +use crate::components::consensus::{ + highway_core::{ + highway::Dependency, + state::{State, Unit, UnitError}, }, - types::Timestamp, + traits::Context, + utils::{ValidatorIndex, ValidatorMap}, }; -/// The observed behavior of a validator at some point in time. -#[derive(Clone, DataSize, Eq, PartialEq, Serialize, Deserialize, Hash)] -#[serde(bound( - serialize = "C::Hash: Serialize", - deserialize = "C::Hash: Deserialize<'de>", -))] -pub(crate) enum Observation -where - C: Context, -{ - /// No unit by that validator was observed yet. - None, - /// The validator's latest unit. - Correct(C::Hash), - /// The validator has been seen - Faulty, +#[allow(clippy::arithmetic_side_effects)] +mod relaxed { + // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the + // module-wide `clippy::arithmetic_side_effects` lint. + + use datasize::DataSize; + use serde::{Deserialize, Serialize}; + use strum::EnumDiscriminants; + + use crate::components::consensus::traits::Context; + + /// The observed behavior of a validator at some point in time. + #[derive(Clone, DataSize, Eq, PartialEq, Serialize, Deserialize, Hash, EnumDiscriminants)] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub enum Observation + where + C: Context, + { + /// No unit by that validator was observed yet. + None, + /// The validator's latest unit. + Correct(C::Hash), + /// The validator has been seen + Faulty, + } } +pub use relaxed::{Observation, ObservationDiscriminants}; impl Debug for Observation where @@ -49,28 +60,31 @@ where impl Observation { /// Returns the unit hash, if this is a correct observation. - pub(crate) fn correct(&self) -> Option<&C::Hash> { + pub fn correct(&self) -> Option<&C::Hash> { match self { Self::None | Self::Faulty => None, Self::Correct(hash) => Some(hash), } } - pub(crate) fn is_correct(&self) -> bool { + /// Returns `true` if this `Observation` is an `Observation::Correct`. + pub fn is_correct(&self) -> bool { match self { Self::None | Self::Faulty => false, Self::Correct(_) => true, } } - pub(crate) fn is_faulty(&self) -> bool { + /// Returns `true` if this `Observation` is an `Observation::Faulty`. + pub fn is_faulty(&self) -> bool { match self { Self::Faulty => true, Self::None | Self::Correct(_) => false, } } - pub(crate) fn is_none(&self) -> bool { + /// Returns `true` if this `Observation` is an `Observation::None`. + pub fn is_none(&self) -> bool { match self { Self::None => true, Self::Faulty | Self::Correct(_) => false, @@ -99,7 +113,7 @@ impl Observation { } /// The observed behavior of all validators at some point in time. -pub(crate) type Panorama = ValidatorMap>; +pub type Panorama = ValidatorMap>; impl Panorama { /// Creates a new, empty panorama. @@ -108,33 +122,30 @@ impl Panorama { } /// Returns `true` if there is at least one correct observation. - pub(crate) fn has_correct(&self) -> bool { + pub fn has_correct(&self) -> bool { self.iter().any(Observation::is_correct) } /// Returns an iterator over all honest validators' latest units. - pub(crate) fn iter_correct<'a>( - &'a self, - state: &'a State, - ) -> impl Iterator> { + pub fn iter_correct<'a>(&'a self, state: &'a State) -> impl Iterator> { let to_unit = move |vh: &C::Hash| state.unit(vh); self.iter_correct_hashes().map(to_unit) } /// Returns an iterator over all honest validators' latest units' hashes. - pub(crate) fn iter_correct_hashes(&self) -> impl Iterator { + pub fn iter_correct_hashes(&self) -> impl Iterator { self.iter().filter_map(Observation::correct) } /// Returns an iterator over all faulty validators' indices. - pub(crate) fn iter_faulty(&self) -> impl Iterator + '_ { + pub fn iter_faulty(&self) -> impl Iterator + '_ { self.enumerate() .filter(|(_, obs)| obs.is_faulty()) .map(|(i, _)| i) } /// Returns an iterator over all faulty validators' indices. - pub(crate) fn iter_none(&self) -> impl Iterator + '_ { + pub fn iter_none(&self) -> impl Iterator + '_ { self.enumerate() .filter(|(_, obs)| obs.is_none()) .map(|(i, _)| i) @@ -143,22 +154,24 @@ impl Panorama { /// Returns the correct sequence number for a new unit by `vidx` with this panorama. pub(crate) fn next_seq_num(&self, state: &State, vidx: ValidatorIndex) -> u64 { // In a trillion years, we need to make seq number u128. - #[allow(clippy::integer_arithmetic)] + #[allow(clippy::arithmetic_side_effects)] let add1 = |vh: &C::Hash| state.unit(vh).seq_number + 1; self[vidx].correct().map_or(0, add1) } /// Returns `true` if `self` sees the creator of `hash` as correct, and sees that unit. - pub(crate) fn sees_correct(&self, state: &State, hash: &C::Hash) -> bool { + pub fn sees_correct(&self, state: &State, hash: &C::Hash) -> bool { let unit = state.unit(hash); let can_see = |latest_hash: &C::Hash| { Some(hash) == state.find_in_swimlane(latest_hash, unit.seq_number) }; - self.get(unit.creator).correct().map_or(false, can_see) + self.get(unit.creator) + .and_then(Observation::correct) + .is_some_and(can_see) } /// Returns `true` if `self` sees the unit with the specified `hash`. - pub(crate) fn sees(&self, state: &State, hash_to_be_found: &C::Hash) -> bool { + pub fn sees(&self, state: &State, hash_to_be_found: &C::Hash) -> bool { let unit_to_be_found = state.unit(hash_to_be_found); let mut visited = HashSet::new(); let mut to_visit: Vec<_> = self.iter_correct_hashes().collect(); @@ -195,14 +208,14 @@ impl Panorama { /// Returns the panorama seeing all units seen by `self` with a timestamp no later than /// `timestamp`. Accusations are preserved regardless of the evidence's timestamp. - pub(crate) fn cutoff(&self, state: &State, timestamp: Timestamp) -> Panorama { + pub fn cutoff(&self, state: &State, timestamp: Timestamp) -> Panorama { let obs_cutoff = |obs: &Observation| match obs { Observation::Correct(vhash) => state .swimlane(vhash) .find(|(_, unit)| unit.timestamp <= timestamp) .map(|(vh, _)| *vh) .map_or(Observation::None, Observation::Correct), - obs @ Observation::None | obs @ Observation::Faulty => obs.clone(), + obs @ (Observation::None | Observation::Faulty) => obs.clone(), }; Panorama::from(self.iter().map(obs_cutoff).collect_vec()) } @@ -215,7 +228,7 @@ impl Panorama { /// Returns whether `self` can possibly come later in time than `other`, i.e. it can see /// every honest message and every fault seen by `other`. - pub(super) fn geq(&self, state: &State, other: &Panorama) -> bool { + pub fn geq(&self, state: &State, other: &Panorama) -> bool { let mut pairs_iter = self.iter().zip(other); pairs_iter.all(|(obs_self, obs_other)| obs_self.geq(state, obs_other)) } @@ -238,3 +251,29 @@ impl Panorama { Ok(()) } } + +mod specimen_support { + use crate::{ + components::consensus::ClContext, + utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator}, + }; + + use super::{Observation, ObservationDiscriminants}; + + impl LargestSpecimen for Observation { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + if let Some(item) = cache.get::() { + return item.clone(); + } + + let correct = LargestSpecimen::largest_specimen(estimator, cache); + cache + .set(largest_variant(estimator, |variant| match variant { + ObservationDiscriminants::None => Observation::None, + ObservationDiscriminants::Correct => Observation::Correct(correct), + ObservationDiscriminants::Faulty => Observation::Faulty, + })) + .clone() + } + } +} diff --git a/node/src/components/consensus/highway_core/state/params.rs b/node/src/components/consensus/highway_core/state/params.rs index a3e7f52f38..531507c922 100644 --- a/node/src/components/consensus/highway_core/state/params.rs +++ b/node/src/components/consensus/highway_core/state/params.rs @@ -1,16 +1,15 @@ use datasize::DataSize; +use serde::{Deserialize, Serialize}; -use super::{round_len, TimeDiff, Timestamp}; +use super::{TimeDiff, Timestamp}; /// Protocol parameters for Highway. -#[derive(Debug, DataSize, Clone)] -pub(crate) struct Params { +#[derive(Debug, DataSize, Clone, Serialize, Deserialize)] +pub struct Params { seed: u64, - block_reward: u64, - reduced_block_reward: u64, - min_round_exp: u8, - max_round_exp: u8, - init_round_exp: u8, + min_round_len: TimeDiff, + max_round_len: TimeDiff, + init_round_len: TimeDiff, end_height: u64, start_timestamp: Timestamp, end_timestamp: Timestamp, @@ -23,43 +22,28 @@ impl Params { /// Arguments: /// /// * `seed`: The random seed. - /// * `block_reward`: The total reward that is paid out for a finalized block. Validator rewards - /// for finalization must add up to this number or less. This should be large enough to allow - /// very precise fractions of a block reward while still leaving space for millions of full - /// rewards in a `u64`. - /// * `reduced_block_reward`: The reduced block reward that is paid out even if the heaviest - /// summit does not exceed half the total weight. - /// * `min_round_exp`: The minimum round exponent. `1 << min_round_exp` milliseconds is the - /// minimum round length. - /// * `max_round_exp`: The maximum round exponent. `1 << max_round_exp` milliseconds is the - /// maximum round length. + /// * `min_round_len`: The minimum round length. + /// * `max_round_len`: The maximum round length. /// * `end_height`, `end_timestamp`: The last block will be the first one that has at least the /// specified height _and_ is no earlier than the specified timestamp. No children of this /// block can be proposed. #[allow(clippy::too_many_arguments)] // FIXME pub(crate) fn new( seed: u64, - block_reward: u64, - reduced_block_reward: u64, - min_round_exp: u8, - max_round_exp: u8, - init_round_exp: u8, + min_round_len: TimeDiff, + max_round_len: TimeDiff, + init_round_len: TimeDiff, end_height: u64, start_timestamp: Timestamp, end_timestamp: Timestamp, endorsement_evidence_limit: u64, ) -> Params { - assert!( - reduced_block_reward <= block_reward, - "reduced block reward must not be greater than the reward for a finalized block" - ); + assert_ne!(min_round_len.millis(), 0); // Highway::new_boxed uses at least 1ms. Params { seed, - block_reward, - reduced_block_reward, - min_round_exp, - max_round_exp, - init_round_exp, + min_round_len, + max_round_len, + init_round_len, end_height, start_timestamp, end_timestamp, @@ -68,62 +52,44 @@ impl Params { } /// Returns the random seed. - pub(crate) fn seed(&self) -> u64 { + pub fn seed(&self) -> u64 { self.seed } - /// Returns the total reward for a finalized block. - pub(crate) fn block_reward(&self) -> u64 { - self.block_reward + /// Returns the minimum round length. This is always greater than 0. + pub fn min_round_length(&self) -> TimeDiff { + self.min_round_len } - /// Returns the reduced block reward that is paid out even if the heaviest summit does not - /// exceed half the total weight. This is at most `block_reward`. - pub(crate) fn reduced_block_reward(&self) -> u64 { - self.reduced_block_reward + /// Returns the maximum round length. + pub fn max_round_length(&self) -> TimeDiff { + self.max_round_len } - /// Returns the minimum round exponent. `1 << self.min_round_exp()` milliseconds is the minimum - /// round length. - pub(crate) fn min_round_exp(&self) -> u8 { - self.min_round_exp - } - - /// Returns the maximum round exponent. `1 << self.max_round_exp()` milliseconds is the maximum - /// round length. - pub(crate) fn max_round_exp(&self) -> u8 { - self.max_round_exp - } - - /// Returns the maximum round length, corresponding to the maximum round exponent. - pub(crate) fn max_round_length(&self) -> TimeDiff { - round_len(self.max_round_exp) - } - - /// Returns the initial round exponent. - pub(crate) fn init_round_exp(&self) -> u8 { - self.init_round_exp + /// Returns the initial round length. + pub fn init_round_len(&self) -> TimeDiff { + self.init_round_len } /// Returns the minimum height of the last block. - pub(crate) fn end_height(&self) -> u64 { + pub fn end_height(&self) -> u64 { self.end_height } /// Returns the start timestamp of the era. - pub(crate) fn start_timestamp(&self) -> Timestamp { + pub fn start_timestamp(&self) -> Timestamp { self.start_timestamp } /// Returns the minimum timestamp of the last block. - pub(crate) fn end_timestamp(&self) -> Timestamp { + pub fn end_timestamp(&self) -> Timestamp { self.end_timestamp } /// Returns the maximum number of additional units included in evidence for conflicting /// endorsements. If you endorse two conflicting forks at sequence numbers that differ by more /// than this, you get away with it and are not marked faulty. - pub(crate) fn endorsement_evidence_limit(&self) -> u64 { + pub fn endorsement_evidence_limit(&self) -> u64 { self.endorsement_evidence_limit } } @@ -135,8 +101,8 @@ impl Params { self } - pub(crate) fn with_max_round_exp(mut self, new_max_round_exp: u8) -> Params { - self.max_round_exp = new_max_round_exp; + pub(crate) fn with_max_round_len(mut self, new_max_round_len: TimeDiff) -> Params { + self.max_round_len = new_max_round_len; self } diff --git a/node/src/components/consensus/highway_core/state/tallies.rs b/node/src/components/consensus/highway_core/state/tallies.rs index bd56a58f40..9ada27227d 100644 --- a/node/src/components/consensus/highway_core/state/tallies.rs +++ b/node/src/components/consensus/highway_core/state/tallies.rs @@ -4,10 +4,7 @@ use std::{ ops::Index, }; -use crate::components::consensus::{ - highway_core::state::{State, Weight}, - traits::Context, -}; +use crate::components::consensus::{highway_core::state::State, traits::Context, utils::Weight}; /// A tally of votes at a specific height. This is never empty: It contains at least one vote. /// @@ -68,7 +65,7 @@ impl<'a, C: Context> Tally<'a, C> { /// Adds a vote for a block to the tally, possibly updating the current maximum. fn add(&mut self, bhash: &'a C::Hash, weight: Weight) { let w = self.votes.entry(bhash).or_default(); - *w += weight; + *w = (*w).saturating_add(weight); self.max = (*w, bhash).max(self.max); } @@ -98,7 +95,9 @@ impl<'a, C: Context> Tally<'a, C> { state: &'a State, ) -> Option { let iter = self.votes.into_iter(); - Self::try_from_iter(iter.filter(|&(b, _)| state.find_ancestor(b, height) == Some(bhash))) + Self::try_from_iter( + iter.filter(|&(b, _)| state.find_ancestor_proposal(b, height) == Some(bhash)), + ) } } @@ -109,7 +108,7 @@ impl<'a, C: Context> Tally<'a, C> { /// they most recently voted for. pub(crate) struct Tallies<'a, C: Context>(BTreeMap>); -impl<'a, C: Context> Default for Tallies<'a, C> { +impl Default for Tallies<'_, C> { fn default() -> Self { Tallies(BTreeMap::new()) } @@ -151,8 +150,9 @@ impl<'a, C: Context> Tallies<'a, C> { } // If any block received more than 50%, a decision can be made: Either that block is // the fork choice, or we can pick its highest scoring child from `prev_tally`. + #[allow(clippy::arithmetic_side_effects)] if h_tally.max_w() > total_weight / 2 { - #[allow(clippy::integer_arithmetic)] // height < max_height, so height < u64::MAX + // height < max_height, so height < u64::MAX return Some( match prev_tally.filter_descendants(height, h_tally.max_bhash(), state) { Some(filtered) => (height + 1, filtered.max_bhash()), @@ -197,13 +197,14 @@ impl<'a, C: Context> Tallies<'a, C> { } #[cfg(test)] +#[allow(clippy::arithmetic_side_effects)] mod tests { use super::{ super::{tests::*, State}, *, }; - impl<'a> Tallies<'a, TestContext> { + impl Tallies<'_, TestContext> { /// Returns the number of tallies. pub(crate) fn len(&self) -> usize { self.0.len() diff --git a/node/src/components/consensus/highway_core/state/tests.rs b/node/src/components/consensus/highway_core/state/tests.rs index 54003a907c..0566f4ef31 100644 --- a/node/src/components/consensus/highway_core/state/tests.rs +++ b/node/src/components/consensus/highway_core/state/tests.rs @@ -1,5 +1,5 @@ #![allow(unused_qualifications)] // This is to suppress warnings originating in the test macros. -#![allow(clippy::integer_arithmetic)] // Overflows in tests would panic anyway. +#![allow(clippy::arithmetic_side_effects)] // Overflows in tests would panic anyway. use std::{ collections::{hash_map::DefaultHasher, BTreeSet}, @@ -7,14 +7,13 @@ use std::{ }; use datasize::DataSize; -use rand::{Rng, RngCore}; use super::*; use crate::components::consensus::{ highway_core::{ evidence::EvidenceError, highway::Dependency, - highway_testing::{TEST_BLOCK_REWARD, TEST_ENDORSEMENT_EVIDENCE_LIMIT, TEST_INSTANCE_ID}, + highway_testing::{TEST_ENDORSEMENT_EVIDENCE_LIMIT, TEST_INSTANCE_ID}, }, traits::{ConsensusValueT, ValidatorSecret}, }; @@ -33,9 +32,9 @@ pub(crate) const HANNA: ValidatorIndex = ValidatorIndex(7); pub(crate) const N: Observation = Observation::None; pub(crate) const F: Observation = Observation::Faulty; -const TEST_MIN_ROUND_EXP: u8 = 4; -const TEST_MAX_ROUND_EXP: u8 = 19; -const TEST_INIT_ROUND_EXP: u8 = 4; +const TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 4); +const TEST_MAX_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 19); +const TEST_INIT_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 4); const TEST_ERA_HEIGHT: u64 = 5; #[derive(Clone, DataSize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -62,20 +61,6 @@ impl ConsensusValueT for u32 { fn needs_validation(&self) -> bool { false } - - type Hash = u32; - - fn hash(&self) -> Self::Hash { - *self - } - - fn timestamp(&self) -> Timestamp { - 0.into() // Not relevant for highway_core tests. - } - - fn parent(&self) -> Option<&Self::Hash> { - None // Not relevant for highway_core tests. - } } impl Context for TestContext { @@ -136,11 +121,9 @@ impl SignedWireUnit { pub(crate) fn test_params(seed: u64) -> Params { Params::new( seed, - TEST_BLOCK_REWARD, - TEST_BLOCK_REWARD / 5, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - TEST_INIT_ROUND_EXP, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + TEST_INIT_ROUND_LEN, TEST_ERA_HEIGHT, Timestamp::from(0), Timestamp::from(0), @@ -151,7 +134,7 @@ pub(crate) fn test_params(seed: u64) -> Params { impl State { /// Returns a new `State` with `TestContext` parameters suitable for tests. pub(crate) fn new_test(weights: &[Weight], seed: u64) -> Self { - State::new(weights, test_params(seed), vec![]) + State::new(weights, test_params(seed), vec![], vec![]) } /// Adds the unit to the protocol state, or returns an error if it is invalid. @@ -185,9 +168,9 @@ fn add_unit() -> Result<(), AddUnitError> { // \ / // Carol: c0 let a0 = add_unit!(state, ALICE, 0xA; N, N, N)?; - let b0 = add_unit!(state, BOB, 48, 4u8, 0xB; N, N, N)?; - let c0 = add_unit!(state, CAROL, 49, 4u8, None; N, b0, N)?; - let b1 = add_unit!(state, BOB, 49, 4u8, None; N, b0, c0)?; + let b0 = add_unit!(state, BOB, 48, 0u8, 0xB; N, N, N)?; + let c0 = add_unit!(state, CAROL, 49, 0u8, None; N, b0, N)?; + let b1 = add_unit!(state, BOB, 49, 0u8, None; N, b0, c0)?; let _a1 = add_unit!(state, ALICE, None; a0, b1, c0)?; // Wrong sequence number: Bob hasn't produced b2 yet. @@ -198,7 +181,7 @@ fn add_unit() -> Result<(), AddUnitError> { value: None, seq_number: 3, timestamp: 51.into(), - round_exp: 4u8, + round_exp: 0u8, endorsed: BTreeSet::new(), }; let unit = SignedWireUnit::new(wunit.clone().into_hashed(), &BOB_SEC); @@ -213,18 +196,18 @@ fn add_unit() -> Result<(), AddUnitError> { // Inconsistent panorama: If you see b1, you have to see c0, too. let maybe_err = add_unit!(state, CAROL, None; N, b1, N).err().map(unit_err); assert_eq!(Some(UnitError::InconsistentPanorama(BOB)), maybe_err); - // And you can't make the round exponent too small - let maybe_err = add_unit!(state, CAROL, 50, 5u8, None; N, b1, c0) + // You can't change the round length within a round. + let maybe_err = add_unit!(state, CAROL, 50, 1u8, None; N, b1, c0) .err() .map(unit_err); - assert_eq!(Some(UnitError::RoundLengthExpChangedWithinRound), maybe_err); - // And you can't make the round exponent too big - let maybe_err = add_unit!(state, CAROL, 50, 40u8, None; N, b1, c0) + assert_eq!(Some(UnitError::RoundLengthChangedWithinRound), maybe_err); + // And you can't make the round length too big + let maybe_err = add_unit!(state, CAROL, 50, 36u8, None; N, b1, c0) .err() .map(unit_err); - assert_eq!(Some(UnitError::RoundLengthExpGreaterThanMaximum), maybe_err); + assert_eq!(Some(UnitError::RoundLengthGreaterThanMaximum), maybe_err); // After the round from 48 to 64 has ended, the exponent can change. - let c1 = add_unit!(state, CAROL, 65, 5u8, None; N, b1, c0)?; + let c1 = add_unit!(state, CAROL, 65, 1u8, None; N, b1, c0)?; // Alice has not equivocated yet, and not produced message A1. let missing = panorama!(F, b1, c0).missing_dependency(&state); @@ -254,18 +237,16 @@ fn add_unit() -> Result<(), AddUnitError> { fn ban_and_mark_faulty() -> Result<(), AddUnitError> { let params = Params::new( 0, - TEST_BLOCK_REWARD, - TEST_BLOCK_REWARD / 5, - 4, - 19, - 4, + TimeDiff::from_millis(1 << 4), + TimeDiff::from_millis(1 << 19), + TimeDiff::from_millis(1 << 4), u64::MAX, Timestamp::zero(), Timestamp::from(u64::MAX), TEST_ENDORSEMENT_EVIDENCE_LIMIT, ); // Everyone already knows Alice is faulty, so she is banned. - let mut state = State::new(WEIGHTS, params, vec![ALICE]); + let mut state = State::new(WEIGHTS, params, vec![ALICE], vec![]); assert_eq!(panorama![F, N, N], *state.panorama()); assert_eq!(Some(&Fault::Banned), state.maybe_fault(ALICE)); @@ -343,6 +324,9 @@ fn fork_choice() -> Result<(), AddUnitError> { #[test] fn validate_lnc_no_equivocation() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } let mut state = State::new_test(WEIGHTS, 0); // No equivocations – incoming vote doesn't violate LNC. @@ -361,6 +345,9 @@ fn validate_lnc_no_equivocation() -> Result<(), AddUnitError> { #[test] fn validate_lnc_fault_seen_directly() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Equivocation cited by one honest validator in the vote's panorama. // Does NOT violate LNC. // @@ -383,6 +370,9 @@ fn validate_lnc_fault_seen_directly() -> Result<(), AddUnitError> { #[test] fn validate_lnc_one_equivocator() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Equivocation cited by two honest validators in the vote's panorama – their votes need to // be endorsed. // @@ -418,6 +408,9 @@ fn validate_lnc_one_equivocator() -> Result<(), AddUnitError> { #[test] fn validate_lnc_two_equivocators() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Multiple equivocators and indirect equivocations. // Votes are seen as endorsed by `state` – does not violate LNC. // @@ -459,6 +452,9 @@ fn validate_lnc_two_equivocators() -> Result<(), AddUnitError> { #[test] fn validate_lnc_own_naive_citation() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // a0'<-----+ // Alice | // a0 <--+ | @@ -497,6 +493,9 @@ fn validate_lnc_own_naive_citation() -> Result<(), AddUnitError> { #[test] fn validate_lnc_mixed_citations() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Eric's vote should not require an endorsement as his unit e0 cites equivocator Carol before // the fork. // @@ -505,7 +504,7 @@ fn validate_lnc_mixed_citations() -> Result<(), AddUnitError> { // || | // || | // Bob b0<---------+ | - // + | | + // + | | // | | | // +c1<--+ | | // Carol c0<--+ | | @@ -539,16 +538,19 @@ fn validate_lnc_mixed_citations() -> Result<(), AddUnitError> { #[test] fn validate_lnc_transitive_endorsement() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Endorsements should be transitive to descendants. // c1 doesn't have to be endorsed, it is enough that c0 is. // // Alice a0<-----------+ - // + | + // + | // b0<----+ | // Bob | // | // b0'<---+ | - // + | + // + | // Carol c0<---+c1<----+ // | // | @@ -573,16 +575,19 @@ fn validate_lnc_transitive_endorsement() -> Result<(), AddUnitError #[test] fn validate_lnc_cite_descendant_of_equivocation() -> Result<(), AddUnitError> { - // a0 cites a descendant b1 of an eqiuvocation vote (b0 and b0'). + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } + // a0 cites a descendant b1 of an equivocation vote (b0 and b0'). // This is still detected as violation of the LNC. // // Alice a0<----+ - // + | + // + | // b0<---+b1<----+ | // Bob | // | // b0'<---+ | - // + | + // + | // Carol c0 | // ^ + // Dan +----------+d0 @@ -605,6 +610,9 @@ fn validate_lnc_cite_descendant_of_equivocation() -> Result<(), AddUnitError Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Diagram of the DAG can be found under // /resources/test/dags/validate_lnc_endorse_mix_pairs.png // @@ -640,6 +648,9 @@ fn validate_lnc_endorse_mix_pairs() -> Result<(), AddUnitError> { #[test] fn validate_lnc_shared_equiv_unit() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Diagram of the DAG can be found under // /resources/test/dags/validate_lnc_shared_equiv_unit.png let weights = &[ @@ -685,6 +696,9 @@ fn validate_lnc_shared_equiv_unit() -> Result<(), AddUnitError> { #[test] fn validate_lnc_four_forks() -> Result<(), AddUnitError> { + if !ENABLE_ENDORSEMENTS { + return Ok(()); + } // Diagram of the DAG can be found under // /resources/test/dags/validate_lnc_four_forks.png let weights = &[ @@ -860,38 +874,26 @@ fn test_log2() { } #[test] -fn test_leader_prng() { - let mut rng = crate::new_rng(); - - // Repeat a few times to make it likely that the inner loop runs more than once. - for _ in 0..10 { - let upper = rng.gen_range(1..u64::MAX); - let seed = rng.next_u64(); - - // This tests that the rand crate's gen_range implementation, which is used in - // leader_prng, doesn't change, and uses this algorithm: - // https://github.com/rust-random/rand/blob/73befa480c58dd0461da5f4469d5e04c564d4de3/src/distributions/uniform.rs#L515 - let mut prng = ChaCha8Rng::seed_from_u64(seed); - let zone = upper << upper.leading_zeros(); // A multiple of upper that fits into a u64. - let expected = loop { - // Multiply a random u64 by upper. This is between 0 and u64::MAX * upper. - let prod = (prng.next_u64() as u128) * (upper as u128); - // So prod >> 64 is between 0 and upper - 1. Each interval from (N << 64) to - // (N << 64) + zone contains the same number of such values. - // If the value is in such an interval, return N + 1; otherwise retry. - if (prod as u64) < zone { - break (prod >> 64) as u64 + 1; - } - }; - - assert_eq!(expected, leader_prng(upper, seed)); - } -} - -#[test] -fn test_leader_prng_values() { - // Test a few concrete values, to detect if the ChaCha8Rng impl changes. - assert_eq!(12578764544318200737, leader_prng(u64::MAX, 42)); - assert_eq!(12358540700710939054, leader_prng(u64::MAX, 1337)); - assert_eq!(4134160578770126600, leader_prng(u64::MAX, 0x1020304050607)); +fn test_leader() { + let weights = &[Weight(3), Weight(4), Weight(5), Weight(4), Weight(5)]; + + // All five validators get slots in the leader sequence. If 1, 2 and 4 are excluded, their slots + // get reassigned, but 0 and 3 keep their old slots. + let before = vec![0, 2, 4, 3, 3, 1, 2, 1, 0, 0, 0, 2, 0, 2, 3, 2, 3, 3, 1, 2]; + let after = vec![0, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3]; + let excluded = vec![ValidatorIndex(1), ValidatorIndex(2), ValidatorIndex(4)]; + let state = State::::new(weights, test_params(0), vec![], vec![]); + assert_eq!( + before, + (0..20u64) + .map(|r_id| state.leader(r_id.into()).0) + .collect_vec() + ); + let state = State::::new(weights, test_params(0), vec![], excluded); + assert_eq!( + after, + (0..20u64) + .map(|r_id| state.leader(r_id.into()).0) + .collect_vec() + ); } diff --git a/node/src/components/consensus/highway_core/state/unit.rs b/node/src/components/consensus/highway_core/state/unit.rs index 009893fd87..e6f6f1fe16 100644 --- a/node/src/components/consensus/highway_core/state/unit.rs +++ b/node/src/components/consensus/highway_core/state/unit.rs @@ -1,57 +1,55 @@ use std::collections::BTreeSet; use datasize::DataSize; +use serde::{Deserialize, Serialize}; -use crate::{ - components::consensus::{ - highway_core::{ - highway::SignedWireUnit, - state::{self, Panorama, State}, - validators::ValidatorIndex, - }, - traits::Context, +use casper_types::{TimeDiff, Timestamp}; + +use crate::components::consensus::{ + highway_core::{ + highway::SignedWireUnit, + state::{self, Panorama, State}, }, - types::{TimeDiff, Timestamp}, + traits::Context, + utils::ValidatorIndex, }; /// A unit sent to or received from the network. /// /// This is only instantiated when it gets added to a `State`, and only once it has been validated. -#[derive(Clone, DataSize, Debug, Eq, PartialEq)] -pub(crate) struct Unit +#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct Unit where C: Context, { /// The list of latest units and faults observed by the creator of this message. /// The panorama must be valid, and this unit's creator must not be marked as faulty. - pub(crate) panorama: Panorama, + pub panorama: Panorama, /// The number of earlier messages by the same creator. /// This must be `0` if the creator's entry in the panorama is `None`. Otherwise it must be /// the previous unit's sequence number plus one. - pub(crate) seq_number: u64, + pub seq_number: u64, /// The validator who created and sent this unit. - pub(crate) creator: ValidatorIndex, + pub creator: ValidatorIndex, /// The block this unit votes for. Either it or its parent must be the fork choice. - pub(crate) block: C::Hash, + pub block: C::Hash, /// A skip list index of the creator's swimlane, i.e. the previous unit by the same creator. /// /// For every `p = 1 << i` that divides `seq_number`, this contains an `i`-th entry pointing to /// the older unit with `seq_number - p`. - pub(crate) skip_idx: Vec, + pub skip_idx: Vec, /// This unit's timestamp, in milliseconds since the epoch. This must not be earlier than the /// timestamp of any unit cited in the panorama. - pub(crate) timestamp: Timestamp, + pub timestamp: Timestamp, /// Original signature of the `SignedWireUnit`. - pub(crate) signature: C::Signature, - /// The round exponent of the current round, that this message belongs to. + pub signature: C::Signature, + /// The length of the current round, that this message belongs to. /// - /// The current round consists of all timestamps that agree with this one in all but the last - /// `round_exp` bits. All cited units by `creator` in the same round must have the same round - /// exponent. - pub(crate) round_exp: u8, + /// All cited units by `creator` in the same round must have the same round length. + pub round_len: TimeDiff, /// Units that this one claims are endorsed. /// All of these must be cited (directly or indirectly) by the panorama. - pub(crate) endorsed: BTreeSet, + pub endorsed: BTreeSet, } impl Unit { @@ -78,13 +76,16 @@ impl Unit { .expect("nonempty panorama has nonempty fork choice") }; let mut skip_idx = Vec::new(); - if let Some(hash) = wunit.panorama.get(wunit.creator).correct() { + if let Some(hash) = wunit.panorama[wunit.creator].correct() { skip_idx.push(*hash); for i in 0..wunit.seq_number.trailing_zeros() as usize { let old_unit = state.unit(&skip_idx[i]); skip_idx.push(old_unit.skip_idx[i]); } } + #[allow(clippy::arithmetic_side_effects)] // Only called with valid units. + let round_len = + TimeDiff::from_millis(state.params().min_round_length().millis() << wunit.round_exp); let unit = Unit { panorama: wunit.panorama, seq_number: wunit.seq_number, @@ -93,25 +94,25 @@ impl Unit { skip_idx, timestamp: wunit.timestamp, signature, - round_exp: wunit.round_exp, + round_len, endorsed: wunit.endorsed, }; (unit, wunit.value) } /// Returns the creator's previous unit. - pub(crate) fn previous(&self) -> Option<&C::Hash> { + pub fn previous(&self) -> Option<&C::Hash> { self.skip_idx.first() } /// Returns the time at which the round containing this unit began. - pub(crate) fn round_id(&self) -> Timestamp { - state::round_id(self.timestamp, self.round_exp) + pub fn round_id(&self) -> Timestamp { + state::round_id(self.timestamp, self.round_len) } /// Returns the length of the round containing this unit. - pub(crate) fn round_len(&self) -> TimeDiff { - state::round_len(self.round_exp) + pub fn round_len(&self) -> TimeDiff { + self.round_len } /// Returns whether `unit` cites a new unit from `vidx` in the last panorama. @@ -119,7 +120,7 @@ impl Unit { /// /// NOTE: Returns `false` if `vidx` is faulty or hasn't produced any units according to the /// creator of `vhash`. - pub(crate) fn new_hash_obs(&self, state: &State, vidx: ValidatorIndex) -> bool { + pub fn new_hash_obs(&self, state: &State, vidx: ValidatorIndex) -> bool { let latest_obs = self.panorama[vidx].correct(); let penultimate_obs = self .previous() @@ -131,7 +132,7 @@ impl Unit { } /// Returns an iterator over units this one claims are endorsed. - pub(crate) fn claims_endorsed(&self) -> impl Iterator { + pub fn claims_endorsed(&self) -> impl Iterator { self.endorsed.iter() } } diff --git a/node/src/components/consensus/highway_core/state/weight.rs b/node/src/components/consensus/highway_core/state/weight.rs deleted file mode 100644 index 8996a28663..0000000000 --- a/node/src/components/consensus/highway_core/state/weight.rs +++ /dev/null @@ -1,74 +0,0 @@ -use std::{ - iter::Sum, - ops::{Div, Mul}, -}; - -use datasize::DataSize; -use derive_more::{Add, AddAssign, From, Sub, SubAssign, Sum}; - -/// A vote weight. -#[derive( - Copy, - Clone, - DataSize, - Default, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Add, - Sub, - AddAssign, - SubAssign, - Sum, - From, -)] -pub(crate) struct Weight(pub(crate) u64); - -impl Weight { - /// Checked addition. Returns `None` if overflow occurred. - pub fn checked_add(self, rhs: Weight) -> Option { - Some(Weight(self.0.checked_add(rhs.0)?)) - } - - /// Saturating addition. Returns `Weight(u64::MAX)` if overflow would occur. - pub fn saturating_add(self, rhs: Weight) -> Weight { - Weight(self.0.saturating_add(rhs.0)) - } - - /// Returns `true` if this weight is zero. - pub fn is_zero(self) -> bool { - self.0 == 0 - } -} - -impl<'a> Sum<&'a Weight> for Weight { - fn sum>(iter: I) -> Self { - Weight(iter.map(|w| w.0).sum()) - } -} - -impl Mul for Weight { - type Output = Self; - - #[allow(clippy::integer_arithmetic)] // The caller needs to prevent overflows. - fn mul(self, rhs: u64) -> Self { - Weight(self.0 * rhs) - } -} - -impl Div for Weight { - type Output = Self; - - #[allow(clippy::integer_arithmetic)] // The caller needs to avoid dividing by zero. - fn div(self, rhs: u64) -> Self { - Weight(self.0 / rhs) - } -} - -impl From for u128 { - fn from(Weight(w): Weight) -> u128 { - u128::from(w) - } -} diff --git a/node/src/components/consensus/highway_core/synchronizer.rs b/node/src/components/consensus/highway_core/synchronizer.rs new file mode 100644 index 0000000000..fa4a34024f --- /dev/null +++ b/node/src/components/consensus/highway_core/synchronizer.rs @@ -0,0 +1,569 @@ +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::Debug, + iter, +}; + +use datasize::DataSize; +use itertools::Itertools; +use rand::{thread_rng, RngCore}; +use tracing::{debug, info, trace}; + +use casper_types::Timestamp; + +use crate::{ + components::consensus::{ + consensus_protocol::{ProposedBlock, ProtocolOutcome, ProtocolOutcomes}, + era_supervisor::SerializedMessage, + protocols::highway::{HighwayMessage, ACTION_ID_VERTEX}, + traits::Context, + utils::ValidatorMap, + }, + types::NodeId, +}; + +use super::highway::{Dependency, Highway, PreValidatedVertex, ValidVertex, Vertex}; + +#[cfg(test)] +mod tests; + +/// Incoming pre-validated vertices that we haven't added to the protocol state yet, and the +/// timestamp when we received them. +#[derive(DataSize, Debug)] +pub(crate) struct PendingVertices(HashMap, HashMap>) +where + C: Context; + +impl Default for PendingVertices { + fn default() -> Self { + PendingVertices(Default::default()) + } +} + +impl PendingVertices { + /// Removes expired vertices. + fn remove_expired(&mut self, oldest: Timestamp) -> Vec { + let mut removed = vec![]; + for time_by_sender in self.0.values_mut() { + time_by_sender.retain(|_, time_received| *time_received >= oldest); + } + self.0.retain(|pvv, time_by_peer| { + if time_by_peer.is_empty() { + removed.extend(pvv.inner().unit_hash()); + false + } else { + true + } + }); + removed + } + + /// Adds a vertex, or updates its timestamp. + fn add(&mut self, sender: NodeId, pvv: PreValidatedVertex, time_received: Timestamp) { + self.0 + .entry(pvv) + .or_default() + .entry(sender) + .and_modify(|timestamp| *timestamp = (*timestamp).max(time_received)) + .or_insert(time_received); + } + + /// Adds a holder to the vertex that satisfies `dep`. + fn add_holder(&mut self, dep: &Dependency, sender: NodeId, time_received: Timestamp) { + if let Some((_, holders)) = self.0.iter_mut().find(|(pvv, _)| pvv.inner().id() == *dep) { + holders.entry(sender).or_insert(time_received); + } + } + + /// Adds a vertex, or updates its timestamp. + fn push(&mut self, pv: PendingVertex) { + self.add(pv.sender, pv.pvv, pv.time_received) + } + + fn pop(&mut self) -> Option> { + let pvv = self.0.keys().next()?.clone(); + let (sender, timestamp, is_empty) = { + let time_by_sender = self.0.get_mut(&pvv)?; + let sender = *time_by_sender.keys().next()?; + let timestamp = time_by_sender.remove(&sender)?; + (sender, timestamp, time_by_sender.is_empty()) + }; + if is_empty { + self.0.remove(&pvv); + } + Some(PendingVertex::new(sender, pvv, timestamp)) + } + + /// Returns whether dependency exists in the pending vertices collection. + fn contains_dependency(&self, d: &Dependency) -> bool { + self.0.keys().any(|pvv| &pvv.inner().id() == d) + } + + /// Drops all pending vertices other than evidence. + pub(crate) fn retain_evidence_only(&mut self) { + self.0.retain(|pvv, _| pvv.inner().is_evidence()); + } + + /// Returns number of unique vertices pending in the queue. + pub(crate) fn len(&self) -> u64 { + self.0.len() as u64 + } + + fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl Iterator for PendingVertices { + type Item = PendingVertex; + + fn next(&mut self) -> Option { + self.pop() + } +} + +/// An incoming pre-validated vertex that we haven't added to the protocol state yet. +#[derive(DataSize, Debug)] +pub(crate) struct PendingVertex +where + C: Context, +{ + /// The peer who sent it to us. + sender: NodeId, + /// The pre-validated vertex. + pvv: PreValidatedVertex, + /// The time when we received it. + time_received: Timestamp, +} + +impl PendingVertex { + /// Returns a new pending vertex with the current timestamp. + pub(crate) fn new( + sender: NodeId, + pvv: PreValidatedVertex, + time_received: Timestamp, + ) -> Self { + Self { + sender, + pvv, + time_received, + } + } + + /// Returns the peer from which we received this vertex. + pub(crate) fn sender(&self) -> &NodeId { + &self.sender + } + + /// Returns the vertex waiting to be added. + pub(crate) fn vertex(&self) -> &Vertex { + self.pvv.inner() + } + + /// Returns the pre-validated vertex. + pub(crate) fn pvv(&self) -> &PreValidatedVertex { + &self.pvv + } +} + +impl From> for PreValidatedVertex { + fn from(vertex: PendingVertex) -> Self { + vertex.pvv + } +} + +#[derive(DataSize, Debug)] +pub(crate) struct Synchronizer +where + C: Context, +{ + /// Incoming vertices we can't add yet because they are still missing a dependency. + vertices_awaiting_deps: BTreeMap, PendingVertices>, + /// The vertices that are scheduled to be processed at a later time. The keys of this + /// `BTreeMap` are timestamps when the corresponding vector of vertices will be added. + vertices_to_be_added_later: BTreeMap>, + /// Vertices that might be ready to add to the protocol state: We are not currently waiting for + /// a requested dependency. + vertices_no_deps: PendingVertices, + /// Instance ID of an era for which this synchronizer is constructed. + instance_id: C::InstanceId, + /// Keeps track of the lowest/oldest seen unit per validator when syncing. + /// Used only for logging. + oldest_seen_panorama: ValidatorMap>, + /// Keeps track of the requests we've sent so far and the recipients. + /// Used to decide whether we should ask more nodes for a particular dependency. + requests_sent: BTreeMap, HashSet>, + /// Boolean flag indicating whether we're synchronizing current era. + pub(crate) current_era: bool, +} + +impl Synchronizer { + /// Creates a new synchronizer with the specified timeout for pending vertices. + pub(crate) fn new(validator_len: usize, instance_id: C::InstanceId) -> Self { + Synchronizer { + vertices_awaiting_deps: BTreeMap::new(), + vertices_to_be_added_later: BTreeMap::new(), + vertices_no_deps: Default::default(), + oldest_seen_panorama: iter::repeat(None).take(validator_len).collect(), + instance_id, + requests_sent: BTreeMap::new(), + current_era: true, + } + } + + /// Removes expired pending vertices from the queues, and schedules the next purge. + pub(crate) fn purge_vertices(&mut self, oldest: Timestamp) { + info!("purging synchronizer queues"); + let no_deps_expired = self.vertices_no_deps.remove_expired(oldest); + trace!(?no_deps_expired, "expired no dependencies"); + self.requests_sent.clear(); + let to_be_added_later_expired = + Self::remove_expired(&mut self.vertices_to_be_added_later, oldest); + trace!( + ?to_be_added_later_expired, + "expired to be added later dependencies" + ); + let awaiting_deps_expired = Self::remove_expired(&mut self.vertices_awaiting_deps, oldest); + trace!(?awaiting_deps_expired, "expired awaiting dependencies"); + } + + // Returns number of elements in the `vertices_to_be_added_later` queue. + // Every pending vertex is counted once, even if it has multiple senders. + fn vertices_to_be_added_later_len(&self) -> u64 { + self.vertices_to_be_added_later + .values() + .map(|pv| pv.len()) + .sum() + } + + // Returns number of elements in `vertex_deps` queue. + fn vertices_awaiting_deps_len(&self) -> u64 { + self.vertices_awaiting_deps + .values() + .map(|pv| pv.len()) + .sum() + } + + // Returns number of elements in `vertices_to_be_added` queue. + fn vertices_no_deps_len(&self) -> u64 { + self.vertices_no_deps.len() + } + + pub(crate) fn log_len(&self) { + debug!( + era_id = ?self.instance_id, + vertices_to_be_added_later = self.vertices_to_be_added_later_len(), + vertices_no_deps = self.vertices_no_deps_len(), + vertices_awaiting_deps = self.vertices_awaiting_deps_len(), + "synchronizer queue lengths" + ); + // All units seen have seq_number == 0. + let all_lowest = self + .oldest_seen_panorama + .iter() + .all(|entry| entry.map(|seq_num| seq_num == 0).unwrap_or(false)); + if all_lowest { + debug!("all seen units while synchronization with seq_num=0"); + } else { + debug!(oldest_panorama=%self.oldest_seen_panorama, "oldest seen unit per validator"); + } + } + + /// Store a (pre-validated) vertex which will be added later. This creates a timer to be sent + /// to the reactor. The vertex be added using `Self::add_vertices` when that timer goes off. + pub(crate) fn store_vertex_for_addition_later( + &mut self, + future_timestamp: Timestamp, + now: Timestamp, + sender: NodeId, + pvv: PreValidatedVertex, + ) { + self.vertices_to_be_added_later + .entry(future_timestamp) + .or_default() + .add(sender, pvv, now); + } + + /// Schedules calls to `add_vertex` on any vertices in `vertices_to_be_added_later` which are + /// scheduled for after the given `transpired_timestamp`. In general the specified `timestamp` + /// is approximately `Timestamp::now()`. Vertices keyed by timestamps chronologically before + /// `transpired_timestamp` should all be added. + pub(crate) fn add_past_due_stored_vertices( + &mut self, + timestamp: Timestamp, + ) -> ProtocolOutcomes { + let mut results = vec![]; + let past_due_timestamps: Vec = self + .vertices_to_be_added_later + .range(..=timestamp) // Inclusive range + .map(|(past_due_timestamp, _)| past_due_timestamp.to_owned()) + .collect(); + for past_due_timestamp in past_due_timestamps { + if let Some(vertices_to_add) = + self.vertices_to_be_added_later.remove(&past_due_timestamp) + { + results.extend(self.schedule_add_vertices(vertices_to_add)) + } + } + results + } + + /// Schedules a vertex to be added to the protocol state. + pub(crate) fn schedule_add_vertex( + &mut self, + sender: NodeId, + pvv: PreValidatedVertex, + now: Timestamp, + ) -> ProtocolOutcomes { + self.update_last_seen(&pvv); + let pv = PendingVertex::new(sender, pvv, now); + self.schedule_add_vertices(iter::once(pv)) + } + + fn update_last_seen(&mut self, pvv: &PreValidatedVertex) { + let v = pvv.inner(); + if let (Some(v_id), Some(seq_num)) = (v.creator(), v.unit_seq_number()) { + let prev_seq_num = self.oldest_seen_panorama[v_id].unwrap_or(u64::MAX); + self.oldest_seen_panorama[v_id] = Some(prev_seq_num.min(seq_num)); + } + } + + /// Moves all vertices whose known missing dependency is now satisfied into the + /// `vertices_to_be_added` queue. + pub(crate) fn remove_satisfied_deps(&mut self, highway: &Highway) -> ProtocolOutcomes { + let satisfied_deps = self + .vertices_awaiting_deps + .keys() + .filter(|dep| highway.has_dependency(dep)) + .cloned() + .collect_vec(); + // Safe to unwrap: We know the keys exist. + // TODO: Replace with BTreeMap::drain_filter once stable. + let pvs = satisfied_deps + .into_iter() + .flat_map(|dep| { + self.requests_sent.remove(&dep); + self.vertices_awaiting_deps.remove(&dep).unwrap() + }) + .collect_vec(); + self.schedule_add_vertices(pvs) + } + + /// Pops and returns the next entry from `vertices_to_be_added` that is not yet in the protocol + /// state. Also returns a `ProtocolOutcome` that schedules the next action to add a vertex, + /// unless the queue is empty, and `ProtocolOutcome`s to request missing dependencies. + pub(crate) fn pop_vertex_to_add( + &mut self, + highway: &Highway, + pending_values: &HashMap, HashSet<(ValidVertex, NodeId)>>, + max_requests_for_vertex: usize, + ) -> (Option>, ProtocolOutcomes) { + let mut outcomes = Vec::new(); + // Get the next vertex to be added; skip the ones that are already in the protocol state, + // and the ones that are still missing dependencies. + loop { + let pv = match self.vertices_no_deps.pop() { + None => return (None, outcomes), + Some(pv) if highway.has_vertex(pv.vertex()) => continue, + Some(pv) => pv, + }; + if let Some(dep) = highway.missing_dependency(pv.pvv()) { + let sender = *pv.sender(); + let time_received = pv.time_received; + // Find the first dependency that `pv` needs that we haven't synchronized yet + // and request it from the sender of `pv`. Since it relies on it, it should have + // it as well. + let transitive_dependency = + self.find_transitive_dependency(dep.clone(), &sender, time_received); + if self + .vertices_no_deps + .contains_dependency(&transitive_dependency) + { + // `dep` is already downloaded and waiting in the synchronizer queue to be + // added, we don't have to request it again. Add the `pv` + // back to the queue so that it can be retried later. `dep` does not wait for + // any of the dependencies currently so it should be retried soon. + self.add_missing_dependency(dep.clone(), pv); + continue; + } + // We are still missing a dependency. Store the vertex in the map and request + // the dependency from the sender. + // Make `pv` depend on the direct dependency `dep` and not `transitive_dependency` + // since there's a higher chance of adding `pv` to the protocol + // state after `dep` is added, rather than `transitive_dependency`. + self.add_missing_dependency(dep.clone(), pv); + // If we already have the dependency and it is a proposal that is currently being + // handled by the block validator, and this sender is already known as a source, + // do nothing. + if pending_values + .values() + .flatten() + .any(|(vv, s)| vv.inner().id() == transitive_dependency && s == &sender) + { + continue; + } + // If we already have the dependency and it is a proposal that is currently being + // handled by the block validator, and this sender is not yet known as a source, + // we return the proposal as if this sender had sent it to us, so they get added. + if let Some((vv, _)) = pending_values + .values() + .flatten() + .find(|(vv, _)| vv.inner().id() == transitive_dependency) + { + debug!( + dependency = ?transitive_dependency, %sender, + "adding sender as a source for proposal" + ); + let dep_pv = PendingVertex::new(sender, vv.clone().into(), time_received); + // We found the next vertex to add. + if !self.vertices_no_deps.is_empty() { + // There are still vertices in the queue: schedule next call. + outcomes.push(ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)); + } + return (Some(dep_pv), outcomes); + } + // If we have already requested the dependency from this peer, or from the maximum + // number of peers, do nothing. + let entry = self + .requests_sent + .entry(transitive_dependency.clone()) + .or_default(); + if entry.len() >= max_requests_for_vertex || !entry.insert(sender) { + continue; + } + // Otherwise request the missing dependency from the sender. + let uuid = thread_rng().next_u64(); + debug!(?uuid, dependency = ?transitive_dependency, %sender, "requesting dependency"); + let msg = HighwayMessage::RequestDependency(uuid, transitive_dependency); + outcomes.push(ProtocolOutcome::CreatedTargetedMessage( + SerializedMessage::from_message(&msg), + sender, + )); + continue; + } + // We found the next vertex to add. + if !self.vertices_no_deps.is_empty() { + // There are still vertices in the queue: schedule next call. + outcomes.push(ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)); + } + return (Some(pv), outcomes); + } + } + + // Finds the highest missing dependency (i.e. one that we are waiting to be downloaded) and + // returns it, if any. + fn find_transitive_dependency( + &mut self, + mut missing_dependency: Dependency, + sender: &NodeId, + time_received: Timestamp, + ) -> Dependency { + // If `missing_dependency` is already downloaded and waiting for its dependency to be + // resolved, we will follow that dependency until we find "the bottom" of the + // chain – when there are no more known dependency requests scheduled, + // and we request the last one in the chain. + while let Some((next_missing, pvs)) = self + .vertices_awaiting_deps + .iter_mut() + .find(|(_, pvs)| pvs.contains_dependency(&missing_dependency)) + { + pvs.add_holder(&missing_dependency, *sender, time_received); + missing_dependency = next_missing.clone(); + } + missing_dependency + } + + /// Adds a vertex with a known missing dependency to the queue. + fn add_missing_dependency(&mut self, dep: Dependency, pv: PendingVertex) { + self.vertices_awaiting_deps.entry(dep).or_default().push(pv) + } + + #[cfg(test)] + /// Returns `true` if no vertices are in the queues. + pub(crate) fn is_empty(&self) -> bool { + self.vertices_awaiting_deps.is_empty() + && self.vertices_no_deps.is_empty() + && self.vertices_to_be_added_later.is_empty() + } + + /// Returns `true` if there are any vertices waiting for the specified dependency. + pub(crate) fn is_dependency(&self, dep: &Dependency) -> bool { + self.vertices_awaiting_deps.contains_key(dep) + } + + /// Drops all vertices that (directly or indirectly) have the specified dependencies, and + /// returns the set of their senders. If the specified dependencies are known to be invalid, + /// those senders must be faulty. + pub(crate) fn invalid_vertices(&mut self, mut vertices: Vec>) -> HashSet { + let mut senders = HashSet::new(); + while !vertices.is_empty() { + let (new_vertices, new_senders) = self.do_drop_dependent_vertices(vertices); + vertices = new_vertices; + senders.extend(new_senders); + } + senders + } + + /// Drops all pending vertices other than evidence. + pub(crate) fn retain_evidence_only(&mut self) { + self.vertices_awaiting_deps.clear(); + self.vertices_to_be_added_later.clear(); + self.vertices_no_deps.retain_evidence_only(); + self.requests_sent.clear(); + } + + /// Schedules vertices to be added to the protocol state. + fn schedule_add_vertices(&mut self, pending_vertices: T) -> ProtocolOutcomes + where + T: IntoIterator>, + { + let was_empty = self.vertices_no_deps.is_empty(); + for pv in pending_vertices { + self.vertices_no_deps.push(pv); + } + if was_empty && !self.vertices_no_deps.is_empty() { + vec![ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + } else { + Vec::new() + } + } + + /// Drops all vertices that have the specified direct dependencies, and returns their IDs and + /// senders. + fn do_drop_dependent_vertices( + &mut self, + vertices: Vec>, + ) -> (Vec>, HashSet) { + // collect the vertices that depend on the ones we got in the argument and their senders + vertices + .into_iter() + // filtering by is_unit, so that we don't drop vertices depending on invalid evidence + // or endorsements - we can still get valid ones from someone else and eventually + // satisfy the dependency + .filter(|dep| dep.is_unit()) + .flat_map(|vertex| self.vertices_awaiting_deps.remove(&vertex)) + .flatten() + .map(|pv| (pv.pvv.inner().id(), pv.sender)) + .unzip() + } + + /// Removes all expired entries from a `BTreeMap` of `Vec`s. + fn remove_expired( + map: &mut BTreeMap>, + oldest: Timestamp, + ) -> Vec { + let mut expired = vec![]; + for pvs in map.values_mut() { + expired.extend(pvs.remove_expired(oldest)); + } + let keys = map + .iter() + .filter(|(_, pvs)| pvs.is_empty()) + .map(|(key, _)| key.clone()) + .collect_vec(); + for key in keys { + map.remove(&key); + } + expired + } +} diff --git a/node/src/components/consensus/highway_core/synchronizer/tests.rs b/node/src/components/consensus/highway_core/synchronizer/tests.rs new file mode 100644 index 0000000000..672264f25e --- /dev/null +++ b/node/src/components/consensus/highway_core/synchronizer/tests.rs @@ -0,0 +1,383 @@ +#![allow(clippy::arithmetic_side_effects)] + +use std::collections::BTreeSet; + +use itertools::Itertools; + +use crate::{ + components::consensus::{ + highway_core::{ + highway::{tests::test_validators, ValidVertex}, + highway_testing::TEST_INSTANCE_ID, + state::{tests::*, State}, + }, + BlockContext, + }, + types::NodeId, +}; + +use super::*; + +#[test] +fn purge_vertices() { + let params = test_params(0); + let mut state = State::new(WEIGHTS, params.clone(), vec![], vec![]); + + // We use round exponent 0u8, so a round is 0x10 ms. With seed 0, Carol is the first leader. + // + // time: 0x00 0x0A 0x1A 0x2A 0x3A + // + // Carol c0 — c1 — c2 + // \ + // Bob ————————— b0 — b1 + let c0 = add_unit!(state, CAROL, 0x00, 0u8, 0xA; N, N, N).unwrap(); + let c1 = add_unit!(state, CAROL, 0x0A, 0u8, None; N, N, c0).unwrap(); + let c2 = add_unit!(state, CAROL, 0x1A, 0u8, None; N, N, c1).unwrap(); + let b0 = add_unit!(state, BOB, 0x2A, 0u8, None; N, N, c0).unwrap(); + let b1 = add_unit!(state, BOB, 0x3A, 0u8, None; N, b0, c0).unwrap(); + + // A Highway instance that's just used to create PreValidatedVertex instances below. + let util_highway = + Highway::::new(TEST_INSTANCE_ID, test_validators(), params.clone(), None); + + // Returns the WireUnit with the specified hash. + let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap()); + // Returns the PreValidatedVertex with the specified hash. + let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap(); + + let peer0 = NodeId::from([0; 64]); + + // Create a synchronizer with a 0x20 ms timeout, and a Highway instance. + let max_requests_for_vertex = 5; + let mut sync = Synchronizer::::new(WEIGHTS.len(), TEST_INSTANCE_ID); + let mut highway = + Highway::::new(TEST_INSTANCE_ID, test_validators(), params, None); + + // At time 0x20, we receive c2, b0 and b1 — the latter ahead of their timestamp. + // Since c2 is the first entry in the main queue, processing is scheduled. + let now = 0x20.into(); + assert!(matches!( + *sync.schedule_add_vertex(peer0, pvv(c2), now), + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + sync.store_vertex_for_addition_later(unit(b1).timestamp().unwrap(), now, peer0, pvv(b1)); + sync.store_vertex_for_addition_later(unit(b0).timestamp().unwrap(), now, peer0, pvv(b0)); + + // At time 0x21, we receive c1. + let now = 0x21.into(); + assert!(sync.schedule_add_vertex(peer0, pvv(c1), now).is_empty()); + + // No new vertices can be added yet, because all are missing dependencies. + // The missing dependencies of c1 and c2 are requested. + let (maybe_pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert!(maybe_pv.is_none()); + assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(c0)); + + // At 0x23, c0 gets enqueued and added. + // That puts c1 back into the main queue, since its dependency is satisfied. + let now = 0x23.into(); + let outcomes = sync.schedule_add_vertex(peer0, pvv(c0), now); + assert!( + matches!(*outcomes, [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]), + "unexpected outcomes: {:?}", + outcomes + ); + let (maybe_pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert_eq!(Dependency::Unit(c0), maybe_pv.unwrap().vertex().id()); + assert!(outcomes.is_empty()); + let vv_c0 = highway.validate_vertex(pvv(c0)).expect("c0 is valid"); + highway.add_valid_vertex(vv_c0, now); + let outcomes = sync.remove_satisfied_deps(&highway); + assert!( + matches!(*outcomes, [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]), + "unexpected outcomes: {:?}", + outcomes + ); + + // At time 0x2A, the vertex b0 moves into the main queue. + let now = 0x2A.into(); + assert!(sync.add_past_due_stored_vertices(now).is_empty()); + + // At 0x41, all vertices received at 0x20 are expired, but c1 (received at 0x21) isn't. + // This will remove: + // * b1: still postponed due to future timestamp + // * b0: in the main queue + // * c2: waiting for dependency c1 to be added + let purge_vertex_timeout = 0x20; + #[allow(clippy::arithmetic_side_effects)] + sync.purge_vertices((0x41 - purge_vertex_timeout).into()); + + // The main queue should now contain only c1. If we remove it, the synchronizer is empty. + let (maybe_pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert_eq!(Dependency::Unit(c1), maybe_pv.unwrap().vertex().id()); + assert!(outcomes.is_empty()); + assert!(sync.is_empty()); +} + +#[test] +/// Test that when a vertex depends on a dependency that has already been synchronized, and is +/// waiting in the synchronizer queue state, but is not yet added to the protocol state – that we +/// don't request it again. +fn do_not_download_synchronized_dependencies() { + let params = test_params(0); + // A Highway and state instances that are used to create PreValidatedVertex instances below. + + let mut state = State::new(WEIGHTS, params.clone(), vec![], vec![]); + let util_highway = + Highway::::new(TEST_INSTANCE_ID, test_validators(), params.clone(), None); + + // We use round exponent 0u8, so a round is 0x40 ms. With seed 0, Carol is the first leader. + // + // time: 0x00 0x0A 0x1A 0x2A 0x3A + // + // Carol c0 — c1 — c2 + // \ + // Bob — b0 + + let c0 = add_unit!(state, CAROL, 0x00, 0u8, 0xA; N, N, N).unwrap(); + let c1 = add_unit!(state, CAROL, 0x0A, 0u8, None; N, N, c0).unwrap(); + let c2 = add_unit!(state, CAROL, 0x1A, 0u8, None; N, N, c1).unwrap(); + let b0 = add_unit!(state, BOB, 0x2A, 0u8, None; N, N, c1).unwrap(); + + // Returns the WireUnit with the specified hash. + let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap()); + // Returns the PreValidatedVertex with the specified hash. + let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap(); + + let peer0 = NodeId::from([0; 64]); + let peer1 = NodeId::from([1; 64]); + + // Create a synchronizer with a 0x20 ms timeout, and a Highway instance. + let max_requests_for_vertex = 5; + let mut sync = Synchronizer::::new(WEIGHTS.len(), TEST_INSTANCE_ID); + + let mut highway = + Highway::::new(TEST_INSTANCE_ID, test_validators(), params, None); + let now = 0x20.into(); + + assert!(matches!( + *sync.schedule_add_vertex(peer0, pvv(c2), now), + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + // `c2` can't be added to the protocol state yet b/c it's missing its `c1` dependency. + let (pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert!(pv.is_none()); + assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(c1)); + // Simulate `c1` being downloaded… + let c1_outcomes = sync.schedule_add_vertex(peer0, pvv(c1), now); + assert!(matches!( + *c1_outcomes, + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + // `b0` can't be added to the protocol state b/c it's missing its `c1` dependency, + // but `c1` has already been downloaded so we should not request it again. We will only request + // `c0` as that's what `c1` depends on. + let (pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert!(pv.is_none()); + assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(c0)); + // `c1` is now part of the synchronizer's state, we should not try requesting it if other + // vertices depend on it. + assert!(matches!( + *sync.schedule_add_vertex(peer1, pvv(b0), now), + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + let (pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert!(pv.is_none()); + // `b0` depends on `c1`, that is already in the synchronizer's state, but it also depends on + // `c0` transitively that is not yet known. We should request it, even if we had already + // done that for `c1`. + assert_targeted_message(&unwrap_single(outcomes), &peer1, Dependency::Unit(c0)); + // "Download" the last dependency. + let _ = sync.schedule_add_vertex(peer0, pvv(c0), now); + // Now, the whole chain can be added to the protocol state. + let mut units: BTreeSet> = vec![c0, c1, b0, c2] + .into_iter() + .map(Dependency::Unit) + .collect(); + while let (Some(pv), outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex) + { + // Verify that we don't request any dependency now. + assert!( + !outcomes + .iter() + .any(|outcome| matches!(outcome, ProtocolOutcome::CreatedTargetedMessage(_, _))), + "unexpected dependency request {:?}", + outcomes + ); + let pv_dep = pv.vertex().id(); + assert!(units.remove(&pv_dep), "unexpected dependency"); + match pv_dep { + Dependency::Unit(hash) => { + let vv = highway + .validate_vertex(pvv(hash)) + .unwrap_or_else(|_| panic!("{:?} unit is valid", hash)); + highway.add_valid_vertex(vv, now); + let _ = sync.remove_satisfied_deps(&highway); + } + _ => panic!("expected unit"), + } + } + assert!(sync.is_empty()); +} + +#[test] +fn transitive_proposal_dependency() { + let params = test_params(0); + // A Highway and state instances that are used to create PreValidatedVertex instances below. + + let mut state = State::new(WEIGHTS, params.clone(), vec![], vec![]); + let util_highway = + Highway::::new(TEST_INSTANCE_ID, test_validators(), params.clone(), None); + + // Alice a0 — a1 + // / \ + // Bob / b0 + // / + // Carol c0 + + let a0 = add_unit!(state, ALICE, 0xA; N, N, N).unwrap(); + let c0 = add_unit!(state, CAROL, 0xC; N, N, N).unwrap(); + let a1 = add_unit!(state, ALICE, None; a0, N, c0).unwrap(); + let b0 = add_unit!(state, BOB, None; a1, N, c0).unwrap(); + + // Returns the WireUnit with the specified hash. + let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap()); + // Returns the PreValidatedVertex with the specified hash. + let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap(); + + let peer0 = NodeId::from([0; 64]); + let peer1 = NodeId::from([1; 64]); + + // Create a synchronizer with a 0x200 ms timeout, and a Highway instance. + let max_requests_for_vertex = 5; + let mut sync = Synchronizer::::new(WEIGHTS.len(), TEST_INSTANCE_ID); + + let mut highway = + Highway::::new(TEST_INSTANCE_ID, test_validators(), params, None); + let now = 0x100.into(); + + assert!(matches!( + *sync.schedule_add_vertex(peer0, pvv(a1), now), + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + // `a1` can't be added to the protocol state yet b/c it's missing its `a0` dependency. + let (pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert!(pv.is_none()); + assert_targeted_message(&unwrap_single(outcomes), &peer0, Dependency::Unit(a0)); + + // "Download" and schedule addition of a0. + let a0_outcomes = sync.schedule_add_vertex(peer0, pvv(a0), now); + assert!(matches!( + *a0_outcomes, + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + // `a0` has no dependencies so we can try adding it to the protocol state. + let (maybe_pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + let pv = maybe_pv.expect("expected a0 vertex"); + assert_eq!(pv.vertex(), &unit(a0)); + assert!(outcomes.is_empty()); + + // `b0` can't be added either b/c it's relying on `a1` and `c0`. + assert!(matches!( + *sync.schedule_add_vertex(peer1, pvv(b0), now), + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + let a0_pending_values = { + let mut tmp = HashMap::new(); + let vv = ValidVertex(unit(a0)); + let proposed_block = ProposedBlock::new(1u32, BlockContext::new(now, Vec::new())); + let mut set = HashSet::new(); + set.insert((vv, peer0)); + tmp.insert(proposed_block, set); + tmp + }; + let (maybe_pv, outcomes) = + sync.pop_vertex_to_add(&highway, &a0_pending_values, max_requests_for_vertex); + // `peer1` is added as a holder of `a0`'s deploys due to the indirect dependency. + let pv = maybe_pv.unwrap(); + assert!(pv.sender() == &peer1); + assert!(pv.vertex() == &unit(a0)); + // `b0` depends on `a0` transitively but `a0`'s deploys are being downloaded, + // so we don't re-request it. + assert!(outcomes.is_empty()); + + // If we add `a0` to the protocol state, `a1`'s dependency is satisfied. + // `a1`'s other dependency is `c0`. Since both peers have it we request it from both. + let vv = highway.validate_vertex(pvv(a0)).expect("a0 is valid"); + highway.add_valid_vertex(vv, now); + assert!(matches!( + *sync.remove_satisfied_deps(&highway), + [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] + )); + let (maybe_pv, outcomes) = + sync.pop_vertex_to_add(&highway, &Default::default(), max_requests_for_vertex); + assert!(maybe_pv.is_none()); + match &*outcomes { + [ProtocolOutcome::CreatedTargetedMessage(msg0_serialized, p0), ProtocolOutcome::CreatedTargetedMessage(msg1_serialized, p1)] => + { + let msg0: HighwayMessage = msg0_serialized.deserialize_expect(); + let msg1: HighwayMessage = msg1_serialized.deserialize_expect(); + assert_eq!( + vec![&peer0, &peer1], + vec![p0, p1].into_iter().sorted().collect_vec(), + "expected to request dependency from exactly two different peers", + ); + + match (msg0, msg1) { + ( + HighwayMessage::RequestDependency(_, dep0), + HighwayMessage::RequestDependency(_, dep1), + ) => { + assert_eq!( + dep0, + Dependency::Unit(c0), + "unexpected dependency requested" + ); + assert_eq!( + dep0, dep1, + "we should have requested the same dependency from two different peers" + ); + } + other => panic!("unexpected HighwayMessage variant {:?}", other), + } + } + outcomes => panic!("unexpected outcomes: {:?}", outcomes), + } +} + +fn unwrap_single(vec: Vec) -> T { + assert_eq!( + vec.len(), + 1, + "expected single element in the vector {:?}", + vec + ); + vec.into_iter().next().unwrap() +} + +fn assert_targeted_message( + outcome: &ProtocolOutcome, + peer: &NodeId, + expected: Dependency, +) { + match outcome { + ProtocolOutcome::CreatedTargetedMessage(raw_msg, peer0) => { + assert_eq!(peer, peer0); + let msg = raw_msg.deserialize_expect(); + match msg { + HighwayMessage::RequestDependency(_, got) => assert_eq!(got, expected), + other => panic!("unexpected variant: {:?}", other), + } + } + _ => panic!("unexpected outcome: {:?}", outcome), + } +} diff --git a/node/src/components/consensus/highway_core/test_macros.rs b/node/src/components/consensus/highway_core/test_macros.rs index 8c60bdc4e6..a157cf5405 100644 --- a/node/src/components/consensus/highway_core/test_macros.rs +++ b/node/src/components/consensus/highway_core/test_macros.rs @@ -27,17 +27,19 @@ macro_rules! add_unit { highway::{SignedWireUnit, WireUnit}, highway_testing::TEST_INSTANCE_ID, }, - types::{TimeDiff, Timestamp}, }; + #[allow(unused_imports)] // These might be already imported at the call site. + use casper_types::{TimeDiff, Timestamp}; + let creator = $creator; let panorama = panorama!($($obs),*); let seq_number = panorama.next_seq_num(&$state, creator); let maybe_parent_hash = panorama[creator].correct(); - // Use our most recent round exponent, or the configured initial one. - let round_exp = maybe_parent_hash.map_or_else( - || $state.params().init_round_exp(), - |vh| $state.unit(vh).round_exp, + // Use our most recent round length, or the configured initial one. + let r_len = maybe_parent_hash.map_or_else( + || $state.params().init_round_len(), + |vh| $state.unit(vh).round_len(), ); let value = Option::from($val); // At most two units per round are allowed. @@ -48,19 +50,18 @@ macro_rules! add_unit { // And our timestamp must not be less than any justification's. let mut timestamp = panorama .iter_correct(&$state) - .map(|unit| unit.timestamp + TimeDiff::from(1)) + .map(|unit| unit.timestamp + TimeDiff::from_millis(1)) .chain(two_units_limit) .max() .unwrap_or($state.params().start_timestamp()); // If this is a block: Find the next time we're a leader. if value.is_some() { - #[allow(clippy::integer_arithmetic)] - let r_len = TimeDiff::from(1 << round_exp); - timestamp = state::round_id(timestamp + r_len - TimeDiff::from(1), round_exp); + timestamp = state::round_id(timestamp + r_len - TimeDiff::from_millis(1), r_len); while $state.leader(timestamp) != creator { timestamp += r_len; } } + let round_exp = (r_len / $state.params().min_round_length()).trailing_zeros() as u8; let wunit = WireUnit { panorama, creator, diff --git a/node/src/components/consensus/highway_core/validators.rs b/node/src/components/consensus/highway_core/validators.rs deleted file mode 100644 index 6c30a4ba76..0000000000 --- a/node/src/components/consensus/highway_core/validators.rs +++ /dev/null @@ -1,302 +0,0 @@ -use std::{ - collections::HashMap, - fmt, - hash::Hash, - iter::FromIterator, - ops::{Add, Index, IndexMut}, - slice, vec, -}; - -use datasize::DataSize; -use derive_more::{AsRef, From}; -use itertools::Itertools; -use serde::{Deserialize, Serialize}; - -use super::Weight; -use crate::utils::ds; - -/// The index of a validator, in a list of all validators, ordered by ID. -#[derive( - Copy, Clone, DataSize, Debug, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize, -)] -pub(crate) struct ValidatorIndex(pub(crate) u32); - -impl From for ValidatorIndex { - fn from(idx: u32) -> Self { - ValidatorIndex(idx) - } -} - -/// Information about a validator: their ID and weight. -#[derive(Clone, DataSize, Debug, Eq, PartialEq)] -pub(crate) struct Validator { - weight: Weight, - id: VID, - banned: bool, -} - -impl> From<(VID, W)> for Validator { - fn from((id, weight): (VID, W)) -> Validator { - Validator { - id, - weight: weight.into(), - banned: false, - } - } -} - -impl Validator { - pub(crate) fn id(&self) -> &VID { - &self.id - } - - pub(crate) fn weight(&self) -> Weight { - self.weight - } -} - -/// The validator IDs and weight map. -#[derive(Debug, DataSize, Clone)] -pub(crate) struct Validators -where - VID: Eq + Hash, -{ - index_by_id: HashMap, - validators: Vec>, -} - -impl Validators { - pub(crate) fn total_weight(&self) -> Weight { - self.validators.iter().fold(Weight(0), |sum, v| { - sum.checked_add(v.weight()) - .expect("total weight must be < 2^64") - }) - } - - pub(crate) fn get_index(&self, id: &VID) -> Option { - self.index_by_id.get(id).cloned() - } - - /// Returns validator ID by index, or `None` if it doesn't exist. - pub(crate) fn id(&self, idx: ValidatorIndex) -> Option<&VID> { - self.validators.get(idx.0 as usize).map(Validator::id) - } - - /// Returns an iterator over all validators, sorted by ID. - pub(crate) fn iter(&self) -> impl Iterator> { - self.validators.iter() - } - - /// Marks the validator with that ID as banned, if it exists. - pub(crate) fn ban(&mut self, vid: &VID) { - if let Some(idx) = self.get_index(vid) { - self.validators[idx.0 as usize].banned = true; - } - } - - /// Returns an iterator of all indices of banned validators. - pub(crate) fn iter_banned_idx(&self) -> impl Iterator + '_ { - self.iter() - .enumerate() - .filter(|(_, v)| v.banned) - .map(|(idx, _)| ValidatorIndex::from(idx as u32)) - } - - pub(crate) fn enumerate_ids<'a>(&'a self) -> impl Iterator { - let to_idx = - |(idx, v): (usize, &'a Validator)| (ValidatorIndex::from(idx as u32), v.id()); - self.iter().enumerate().map(to_idx) - } -} - -impl> FromIterator<(VID, W)> for Validators { - fn from_iter>(ii: I) -> Validators { - let mut validators: Vec<_> = ii.into_iter().map(Validator::from).collect(); - validators.sort_by_cached_key(|val| val.id.clone()); - let index_by_id = validators - .iter() - .enumerate() - .map(|(idx, val)| (val.id.clone(), ValidatorIndex(idx as u32))) - .collect(); - Validators { - index_by_id, - validators, - } - } -} - -impl fmt::Display for Validators { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "Validators: index, ID, weight")?; - for (i, val) in self.validators.iter().enumerate() { - writeln!(f, "{:3}, {:?}, {}", i, val.id(), val.weight().0)? - } - Ok(()) - } -} - -#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, AsRef, From, Hash)] -pub(crate) struct ValidatorMap(Vec); - -impl fmt::Display for ValidatorMap> -where - T: fmt::Display, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let view = self - .0 - .iter() - .map(|maybe_el| match maybe_el { - None => "N".to_string(), - Some(el) => format!("{}", el), - }) - .join(", "); - write!(f, "ValidatorMap({})", view)?; - Ok(()) - } -} - -impl DataSize for ValidatorMap -where - T: DataSize, -{ - const IS_DYNAMIC: bool = Vec::::IS_DYNAMIC; - - const STATIC_HEAP_SIZE: usize = Vec::::STATIC_HEAP_SIZE; - - fn estimate_heap_size(&self) -> usize { - ds::vec_sample(&self.0) - } -} - -impl ValidatorMap { - /// Returns the value for the given validator. Panics if the index is out of range. - pub(crate) fn get(&self, idx: ValidatorIndex) -> &T { - &self.0[idx.0 as usize] - } - - /// Returns the number of values. This must equal the number of validators. - pub(crate) fn len(&self) -> usize { - self.0.len() - } - - /// Returns an iterator over all values. - pub(crate) fn iter(&self) -> impl Iterator { - self.0.iter() - } - - /// Returns an iterator over mutable references to all values. - pub(crate) fn iter_mut(&mut self) -> impl Iterator { - self.0.iter_mut() - } - - /// Returns an iterator over all values, by validator index. - pub(crate) fn enumerate(&self) -> impl Iterator { - self.iter() - .enumerate() - .map(|(idx, value)| (ValidatorIndex(idx as u32), value)) - } - - /// Returns `true` if `self` has an entry for validator number `idx`. - pub(crate) fn has(&self, idx: ValidatorIndex) -> bool { - self.0.len() > idx.0 as usize - } - - /// Returns an iterator over all validator indices. - pub(crate) fn keys(&self) -> impl Iterator { - (0..self.len()).map(|idx| ValidatorIndex(idx as u32)) - } - - /// Binary searches this sorted `ValidatorMap` for `x`. - /// - /// If the value is found, `Ok` is returned, containing the index, otherwise `Err`, with the - /// first index at which the value is greater than `x`. - pub fn binary_search(&self, x: &T) -> Result - where - T: Ord, - { - self.0 - .binary_search(x) - .map(|i| ValidatorIndex(i as u32)) - .map_err(|i| ValidatorIndex(i as u32)) - } -} - -impl IntoIterator for ValidatorMap { - type Item = T; - type IntoIter = vec::IntoIter; - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - -impl FromIterator for ValidatorMap { - fn from_iter>(ii: I) -> ValidatorMap { - ValidatorMap(ii.into_iter().collect()) - } -} - -impl Index for ValidatorMap { - type Output = T; - - fn index(&self, vidx: ValidatorIndex) -> &T { - &self.0[vidx.0 as usize] - } -} - -impl IndexMut for ValidatorMap { - fn index_mut(&mut self, vidx: ValidatorIndex) -> &mut T { - &mut self.0[vidx.0 as usize] - } -} - -impl<'a, T> IntoIterator for &'a ValidatorMap { - type Item = &'a T; - type IntoIter = slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.0.iter() - } -} - -impl> Add> for ValidatorMap { - type Output = ValidatorMap; - fn add(mut self, rhs: ValidatorMap) -> Self::Output { - self.0 - .iter_mut() - .zip(rhs) - .for_each(|(lhs_val, rhs_val)| *lhs_val = *lhs_val + rhs_val); - self - } -} - -impl ValidatorMap> { - /// Returns the keys of all validators whose value is `Some`. - pub(crate) fn keys_some(&self) -> impl Iterator + '_ { - self.iter_some().map(|(vidx, _)| vidx) - } - - /// Returns an iterator over all values that are present, together with their index. - pub(crate) fn iter_some(&self) -> impl Iterator + '_ { - self.enumerate() - .filter_map(|(vidx, opt)| opt.as_ref().map(|val| (vidx, val))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn from_iter() { - let weights = vec![ - ("Bob".to_string(), 5u64), - ("Carol".to_string(), 3), - ("Alice".to_string(), 4), - ]; - let validators = Validators::from_iter(weights); - assert_eq!(ValidatorIndex(0), validators.index_by_id["Alice"]); - assert_eq!(ValidatorIndex(1), validators.index_by_id["Bob"]); - assert_eq!(ValidatorIndex(2), validators.index_by_id["Carol"]); - } -} diff --git a/node/src/components/consensus/leader_sequence.rs b/node/src/components/consensus/leader_sequence.rs new file mode 100644 index 0000000000..c45b2bffb2 --- /dev/null +++ b/node/src/components/consensus/leader_sequence.rs @@ -0,0 +1,167 @@ +use datasize::DataSize; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaCha8Rng; +use serde::{Deserialize, Serialize}; +use tracing::error; + +use crate::components::consensus::utils::{ValidatorIndex, ValidatorMap, Weight}; + +/// A pseudorandom sequence of validator indices, distributed by weight. +#[derive(Debug, Clone, DataSize, Serialize, Deserialize)] +pub(crate) struct LeaderSequence { + /// Cumulative validator weights: Entry `i` contains the sum of the weights of validators `0` + /// through `i`. + cumulative_w: ValidatorMap, + /// Cumulative validator weights, but with the weight of banned validators set to `0`. + cumulative_w_leaders: ValidatorMap, + /// This is `false` for validators who have been excluded from the sequence. + leaders: ValidatorMap, + /// The PRNG seed. + seed: u64, +} + +impl LeaderSequence { + pub(crate) fn new( + seed: u64, + weights: &ValidatorMap, + leaders: ValidatorMap, + ) -> LeaderSequence { + let sums = |mut sums: Vec, w: Weight| { + let sum = sums.last().copied().unwrap_or(Weight(0)); + sums.push(sum.checked_add(w).expect("total weight must be < 2^64")); + sums + }; + let cumulative_w = ValidatorMap::from(weights.iter().copied().fold(vec![], sums)); + assert!( + *cumulative_w.as_ref().last().unwrap() > Weight(0), + "total weight must not be zero" + ); + let cumulative_w_leaders = weights + .enumerate() + .map(|(idx, weight)| if leaders[idx] { *weight } else { Weight(0) }) + .fold(vec![], sums) + .into(); + LeaderSequence { + cumulative_w, + cumulative_w_leaders, + leaders, + seed, + } + } + + /// Returns the leader in the specified slot. + /// + /// First the assignment is computed ignoring the `leaders` flags. Only if the selected + /// leader's entry is `false`, the computation is repeated, this time with the flagged + /// validators excluded. This ensures that once the validator set has been decided, correct + /// validators' slots never get reassigned to someone else, even if after the fact someone is + /// excluded as a leader. + pub(crate) fn leader(&self, slot: u64) -> ValidatorIndex { + // The binary search cannot return None; if it does, it's a programming error. In that case, + // we want the tests to panic but production to pick a default. + let panic_or_0 = || { + if cfg!(test) { + panic!("random number out of range"); + } else { + error!("random number out of range"); + ValidatorIndex(0) + } + }; + let seed = self.seed.wrapping_add(slot); + // We select a random one out of the `total_weight` weight units, starting numbering at 1. + let r = Weight(leader_prng(self.total_weight().0, seed)); + // The weight units are subdivided into intervals that belong to some validator. + // `cumulative_w[i]` denotes the last weight unit that belongs to validator `i`. + // `binary_search` returns the first `i` with `cumulative_w[i] >= r`, i.e. the validator + // who owns the randomly selected weight unit. + let leader_index = self + .cumulative_w + .binary_search(&r) + .unwrap_or_else(panic_or_0); + if self.leaders[leader_index] { + return leader_index; + } + // If the selected leader is excluded, we reassign the slot to someone else. This time we + // consider only the non-banned validators. + let total_w_leaders = *self.cumulative_w_leaders.as_ref().last().unwrap(); + let r = Weight(leader_prng(total_w_leaders.0, seed.wrapping_add(1))); + self.cumulative_w_leaders + .binary_search(&r) + .unwrap_or_else(panic_or_0) + } + + /// Returns the sum of all validators' voting weights. + pub(crate) fn total_weight(&self) -> Weight { + *self + .cumulative_w + .as_ref() + .last() + .expect("weight list cannot be empty") + } +} + +/// Returns a pseudorandom `u64` between `1` and `upper` (inclusive). +fn leader_prng(upper: u64, seed: u64) -> u64 { + ChaCha8Rng::seed_from_u64(seed) + .gen_range(0..upper) + .saturating_add(1) +} + +/// Returns a seed that with the given weights results in the desired leader sequence. +#[cfg(test)] +pub(crate) fn find_seed( + seq: &[ValidatorIndex], + weights: &ValidatorMap, + leaders: &ValidatorMap, +) -> u64 { + for seed in 0..1000 { + let ls = LeaderSequence::new(seed, weights, leaders.clone()); + if seq + .iter() + .enumerate() + .all(|(slot, &v_idx)| ls.leader(slot as u64) == v_idx) + { + return seed; + } + } + panic!("No suitable seed for leader sequence found"); +} + +#[test] +fn test_leader_prng() { + use rand::RngCore; + + let mut rng = crate::new_rng(); + + // Repeat a few times to make it likely that the inner loop runs more than once. + for _ in 0..10 { + let upper = rng.gen_range(1..u64::MAX); + let seed = rng.next_u64(); + + // This tests that the rand crate's gen_range implementation, which is used in + // leader_prng, doesn't change, and uses this algorithm: + // https://github.com/rust-random/rand/blob/73befa480c58dd0461da5f4469d5e04c564d4de3/src/distributions/uniform.rs#L515 + let mut prng = ChaCha8Rng::seed_from_u64(seed); + let zone = upper << upper.leading_zeros(); // A multiple of upper that fits into a u64. + let expected = loop { + // Multiply a random u64 by upper. This is between 0 and u64::MAX * upper. + let prod = (prng.next_u64() as u128) * (upper as u128); + // So prod >> 64 is between 0 and upper - 1. Each interval from (N << 64) to + // (N << 64) + zone contains the same number of such values. + // If the value is in such an interval, return N + 1; otherwise retry. + if (prod as u64) < zone { + break (prod >> 64) as u64 + 1; + } + }; + + assert_eq!(expected, leader_prng(upper, seed)); + } +} + +#[test] +fn test_leader_prng_values() { + // Test a few concrete values, to detect if the ChaCha8Rng impl changes. + assert_eq!(12578764544318200737, leader_prng(u64::MAX, 42)); + assert_eq!(12358540700710939054, leader_prng(u64::MAX, 1337)); + assert_eq!(4134160578770126600, leader_prng(u64::MAX, 0x1020304050607)); +} diff --git a/node/src/components/consensus/metrics.rs b/node/src/components/consensus/metrics.rs index 67d292fb1b..5e918fb658 100644 --- a/node/src/components/consensus/metrics.rs +++ b/node/src/components/consensus/metrics.rs @@ -1,67 +1,67 @@ use prometheus::{Gauge, IntGauge, Registry}; -use crate::{ - types::{FinalizedBlock, Timestamp}, - unregister_metric, -}; +use casper_types::Timestamp; + +use crate::{types::FinalizedBlock, unregister_metric}; /// Network metrics to track Consensus #[derive(Debug)] -pub(super) struct ConsensusMetrics { +pub(super) struct Metrics { /// Gauge to track time between proposal and finalization. finalization_time: Gauge, /// Amount of finalized blocks. finalized_block_count: IntGauge, - /// Timestamp of the most recently accepted proto block. + /// Timestamp of the most recently accepted block payload. time_of_last_proposed_block: IntGauge, /// Timestamp of the most recently finalized block. time_of_last_finalized_block: IntGauge, - /// The Current era. - pub(super) current_era: IntGauge, - /// registry component. + /// The current era. + pub(super) consensus_current_era: IntGauge, + /// Registry component. registry: Registry, } -impl ConsensusMetrics { +impl Metrics { pub(super) fn new(registry: &Registry) -> Result { let finalization_time = Gauge::new( "finalization_time", - "the amount of time, in milliseconds, between proposal and finalization of a block", + "the amount of time, in milliseconds, between proposal and finalization of the latest finalized block", )?; let finalized_block_count = IntGauge::new("amount_of_blocks", "the number of blocks finalized so far")?; let time_of_last_proposed_block = IntGauge::new( - "time_of_last_proto_block", - "timestamp of the most recently accepted proto block", + "time_of_last_block_payload", + "timestamp of the most recently accepted block payload", )?; let time_of_last_finalized_block = IntGauge::new( "time_of_last_finalized_block", "timestamp of the most recently finalized block", )?; - let current_era = IntGauge::new("current_era", "The current era")?; + let consensus_current_era = + IntGauge::new("consensus_current_era", "the current era in consensus")?; registry.register(Box::new(finalization_time.clone()))?; registry.register(Box::new(finalized_block_count.clone()))?; - registry.register(Box::new(current_era.clone()))?; + registry.register(Box::new(consensus_current_era.clone()))?; registry.register(Box::new(time_of_last_proposed_block.clone()))?; registry.register(Box::new(time_of_last_finalized_block.clone()))?; - Ok(ConsensusMetrics { + Ok(Metrics { finalization_time, finalized_block_count, time_of_last_proposed_block, time_of_last_finalized_block, - current_era, + consensus_current_era, registry: registry.clone(), }) } /// Updates the metrics based on a newly finalized block. pub(super) fn finalized_block(&mut self, finalized_block: &FinalizedBlock) { - let time_since_proto_block = finalized_block.timestamp().elapsed().millis() as f64; - self.finalization_time.set(time_since_proto_block); + let time_since_block_payload = finalized_block.timestamp.elapsed().millis() as f64; + self.finalization_time.set(time_since_block_payload); self.time_of_last_finalized_block - .set(finalized_block.timestamp().millis() as i64); + .set(finalized_block.timestamp.millis() as i64); self.finalized_block_count - .set(finalized_block.height() as i64); + .set(finalized_block.height as i64); } /// Updates the metrics and records a newly proposed block. @@ -71,11 +71,11 @@ impl ConsensusMetrics { } } -impl Drop for ConsensusMetrics { +impl Drop for Metrics { fn drop(&mut self) { unregister_metric!(self.registry, self.finalization_time); unregister_metric!(self.registry, self.finalized_block_count); - unregister_metric!(self.registry, self.current_era); + unregister_metric!(self.registry, self.consensus_current_era); unregister_metric!(self.registry, self.time_of_last_finalized_block); unregister_metric!(self.registry, self.time_of_last_proposed_block); } diff --git a/node/src/components/consensus/protocols.rs b/node/src/components/consensus/protocols.rs index 8d6a23d8b1..dfbf9a6498 100644 --- a/node/src/components/consensus/protocols.rs +++ b/node/src/components/consensus/protocols.rs @@ -1 +1,5 @@ +//! Implementations of consensus protocols. + +pub mod common; pub(crate) mod highway; +pub(crate) mod zug; diff --git a/node/src/components/consensus/protocols/common.rs b/node/src/components/consensus/protocols/common.rs new file mode 100644 index 0000000000..a5f5a819bf --- /dev/null +++ b/node/src/components/consensus/protocols/common.rs @@ -0,0 +1,133 @@ +//! Utilities common to different consensus algorithms. + +use itertools::Itertools; +use num_rational::Ratio; +use std::collections::{BTreeMap, HashSet}; + +use num_traits::AsPrimitive; + +use crate::components::consensus::{ + traits::Context, + utils::{ValidatorMap, Validators, Weight}, +}; +use casper_types::U512; + +/// Computes the validator set given the stakes and the faulty and inactive +/// reports from the previous eras. +pub fn validators( + faulty: &HashSet, + inactive: &HashSet, + validator_stakes: BTreeMap, +) -> Validators { + let sum_stakes = safe_sum(validator_stakes.values().copied()).expect("should not overflow"); + // We use u64 weights. Scale down by floor(sum / u64::MAX) + 1. + // This guarantees that the resulting sum is greater than 0 and less than u64::MAX. + #[allow(clippy::arithmetic_side_effects)] // Divisor isn't 0 and addition can't overflow. + let scaling_factor: U512 = sum_stakes / U512::from(u64::MAX) + 1; + + // TODO sort validators by descending weight + #[allow(clippy::arithmetic_side_effects)] // Divisor isn't 0. + let mut validators: Validators = validator_stakes + .into_iter() + .map(|(key, stake)| (key, AsPrimitive::::as_(stake / scaling_factor))) + .collect(); + + for vid in faulty { + validators.ban(vid); + } + + for vid in inactive { + validators.set_cannot_propose(vid); + } + + assert!( + validators.ensure_nonzero_proposing_stake(), + "cannot start era with total weight 0" + ); + + validators +} + +/// Compute the validator weight map from the set of validators. +pub(crate) fn validator_weights( + validators: &Validators, +) -> ValidatorMap { + ValidatorMap::from(validators.iter().map(|v| v.weight()).collect_vec()) +} + +/// Computes the fault tolerance threshold for the protocol instance +pub(crate) fn ftt( + finality_threshold_fraction: Ratio, + validators: &Validators, +) -> Weight { + let total_weight = u128::from(validators.total_weight()); + assert!( + finality_threshold_fraction < 1.into(), + "finality threshold must be less than 100%" + ); + #[allow(clippy::arithmetic_side_effects)] // FTT is less than 1, so this can't overflow + let ftt = total_weight * *finality_threshold_fraction.numer() as u128 + / *finality_threshold_fraction.denom() as u128; + (ftt as u64).into() +} + +/// A U512 sum implementation that check for overflow. +fn safe_sum(mut iterator: I) -> Option +where + I: Iterator, +{ + iterator.try_fold(U512::zero(), |acc, n| acc.checked_add(n)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::components::consensus::ClContext; + use casper_types::{testing::TestRng, PublicKey}; + use rand::Rng; + + #[test] + #[should_panic] + fn ftt_panics_during_overflow() { + let rng = &mut TestRng::new(); + let mut validator_stakes = BTreeMap::new(); + validator_stakes.insert(PublicKey::random(rng), U512::MAX); + validator_stakes.insert(PublicKey::random(rng), U512::from(1_u32)); + + validators::(&Default::default(), &Default::default(), validator_stakes); + } + + #[test] + fn total_weights_less_than_u64_max() { + let mut rng = TestRng::new(); + + let (test_stake_1, test_stake_2) = (rng.gen(), rng.gen()); + + let mut test_stakes = |a: u64, b: u64| -> BTreeMap { + let mut result = BTreeMap::new(); + result.insert( + PublicKey::random(&mut rng), + U512::from(a) * U512::from(u128::MAX), + ); + result.insert( + PublicKey::random(&mut rng), + U512::from(b) * U512::from(u128::MAX), + ); + result + }; + + // First, we test with random values. + let stakes = test_stakes(test_stake_1, test_stake_2); + let weights = validators::(&Default::default(), &Default::default(), stakes); + assert!(weights.total_weight().0 < u64::MAX); + + // Then, we test with values that were known to cause issues before. + let stakes = test_stakes(514, 771); + let weights = validators::(&Default::default(), &Default::default(), stakes); + assert!(weights.total_weight().0 < u64::MAX); + + let stakes = test_stakes(668, 614); + let weights = validators::(&Default::default(), &Default::default(), stakes); + assert!(weights.total_weight().0 < u64::MAX); + } +} diff --git a/node/src/components/consensus/protocols/highway.rs b/node/src/components/consensus/protocols/highway.rs index f1523068bc..84490200bb 100644 --- a/node/src/components/consensus/protocols/highway.rs +++ b/node/src/components/consensus/protocols/highway.rs @@ -1,7 +1,6 @@ pub(crate) mod config; mod participation; mod round_success_meter; -mod synchronizer; #[cfg(test)] mod tests; @@ -13,18 +12,19 @@ use std::{ path::PathBuf, }; +use casper_types::{Chainspec, TimeDiff, Timestamp, U512}; use datasize::DataSize; use itertools::Itertools; -use num_traits::AsPrimitive; -use serde::{Deserialize, Serialize}; -use tracing::{error, info, trace, warn}; - -use casper_types::{system::auction::BLOCK_REWARD, U512}; +use rand::RngCore; +use tracing::{debug, error, info, trace, warn}; use crate::{ components::consensus::{ - config::{Config, ProtocolConfig}, - consensus_protocol::{BlockContext, ConsensusProtocol, ProtocolOutcome, ProtocolOutcomes}, + config::Config, + consensus_protocol::{ + BlockContext, ConsensusProtocol, ProposedBlock, ProtocolOutcome, ProtocolOutcomes, + }, + era_supervisor::SerializedMessage, highway_core::{ active_validator::Effect as AvEffect, finality_detector::{FinalityDetector, FttExceeded}, @@ -32,18 +32,19 @@ use crate::{ Dependency, GetDepOutcome, Highway, Params, PreValidatedVertex, ValidVertex, Vertex, VertexError, }, - state, - state::{Observation, Panorama}, - validators::{ValidatorIndex, Validators}, + state::{IndexObservation, IndexPanorama, Observation}, + synchronizer::Synchronizer, }, - traits::{ConsensusValueT, Context, NodeIdT}, + protocols, + traits::{ConsensusValueT, Context}, + utils::ValidatorIndex, ActionId, TimerId, }, - types::{TimeDiff, Timestamp}, + types::NodeId, + NodeRng, }; -pub use self::config::Config as HighwayConfig; -use self::{round_success_meter::RoundSuccessMeter, synchronizer::Synchronizer}; +use self::round_success_meter::RoundSuccessMeter; /// Never allow more than this many units in a piece of evidence for conflicting endorsements, /// even if eras are longer than this. @@ -57,151 +58,129 @@ const TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP: TimerId = TimerId(1); const TIMER_ID_PURGE_VERTICES: TimerId = TimerId(2); /// The timer for logging inactive validators. const TIMER_ID_LOG_PARTICIPATION: TimerId = TimerId(3); -/// The timer for an alert no progress was made in a long time. -const TIMER_ID_STANDSTILL_ALERT: TimerId = TimerId(4); /// The timer for logging synchronizer queue size. -const TIMER_ID_SYNCHRONIZER_LOG: TimerId = TimerId(5); -/// The timer for sending the latest panorama request. -const TIMER_ID_PANORAMA_REQUEST: TimerId = TimerId(6); +const TIMER_ID_SYNCHRONIZER_LOG: TimerId = TimerId(4); +/// The timer to request the latest state from a random peer. +const TIMER_ID_REQUEST_STATE: TimerId = TimerId(5); /// The action of adding a vertex from the `vertices_to_be_added` queue. -const ACTION_ID_VERTEX: ActionId = ActionId(0); +pub(crate) const ACTION_ID_VERTEX: ActionId = ActionId(0); #[derive(DataSize, Debug)] -pub(crate) struct HighwayProtocol +pub(crate) struct HighwayProtocol where - I: DataSize, C: Context, { /// Incoming blocks we can't add yet because we are waiting for validation. - pending_values: HashMap<::Hash, Vec>>, + pending_values: HashMap, HashSet<(ValidVertex, NodeId)>>, finality_detector: FinalityDetector, highway: Highway, - /// A tracker for whether we are keeping up with the current round exponent or not. + /// A tracker for whether we are keeping up with the current round length or not. round_success_meter: RoundSuccessMeter, - synchronizer: Synchronizer, + synchronizer: Synchronizer, pvv_cache: HashMap, PreValidatedVertex>, evidence_only: bool, - /// The panorama snapshot. This is updated periodically, and if it does not change for too - /// long, an alert is raised. - last_panorama: Panorama, - /// If the current era's protocol state has not progressed for this long, return - /// `ProtocolOutcome::StandstillAlert`. - standstill_timeout: TimeDiff, - /// Log inactive or faulty validators periodically, with this interval. - log_participation_interval: TimeDiff, + config: config::Config, } -impl HighwayProtocol { +impl HighwayProtocol { /// Creates a new boxed `HighwayProtocol` instance. #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub(crate) fn new_boxed( instance_id: C::InstanceId, validator_stakes: BTreeMap, - slashed: &HashSet, - protocol_config: &ProtocolConfig, + faulty: &HashSet, + inactive: &HashSet, + chainspec: &Chainspec, config: &Config, - prev_cp: Option<&dyn ConsensusProtocol>, + prev_cp: Option<&dyn ConsensusProtocol>, era_start_time: Timestamp, seed: u64, now: Timestamp, - ) -> (Box>, ProtocolOutcomes) { + protocol_state_file: Option, + ) -> (Box>, ProtocolOutcomes) { let validators_count = validator_stakes.len(); - let sum_stakes: U512 = validator_stakes.iter().map(|(_, stake)| *stake).sum(); - assert!( - !sum_stakes.is_zero(), - "cannot start era with total weight 0" + let validators = protocols::common::validators::(faulty, inactive, validator_stakes); + let highway_config = &chainspec.highway_config; + let ftt = protocols::common::ftt::( + chainspec.core_config.finality_threshold_fraction, + &validators, ); - // For Highway, we need u64 weights. Scale down by sum / u64::MAX, rounded up. - // If we round up the divisor, the resulting sum is guaranteed to be <= u64::MAX. - let scaling_factor = (sum_stakes + U512::from(u64::MAX) - 1) / U512::from(u64::MAX); - let scale_stake = |(key, stake): (C::ValidatorId, U512)| { - (key, AsPrimitive::::as_(stake / scaling_factor)) - }; - let mut validators: Validators = - validator_stakes.into_iter().map(scale_stake).collect(); - - for vid in slashed { - validators.ban(vid); - } - let highway_config = &protocol_config.highway_config; - - let total_weight = u128::from(validators.total_weight()); - let ftt_fraction = highway_config.finality_threshold_fraction; - assert!( - ftt_fraction < 1.into(), - "finality threshold must be less than 100%" - ); - #[allow(clippy::integer_arithmetic)] // FTT is less than 1, so this can't overflow. - let ftt = total_weight * *ftt_fraction.numer() as u128 / *ftt_fraction.denom() as u128; - let ftt = (ftt as u64).into(); + let minimum_round_length = chainspec + .core_config + .minimum_block_time + .max(TimeDiff::from_millis(1)); + // The maximum round exponent x is such that 2^x * m is at most M, where m and M are min + // and max round length. So x is the floor of log_2(M / m). Thus the ceiling of + // log_2(M / m + 1) is always x + 1. + #[allow(clippy::arithmetic_side_effects)] // minimum_round_length is guaranteed to be > 0. + let maximum_round_exponent = (highway_config.maximum_round_length / minimum_round_length) + .saturating_add(1) + .next_power_of_two() + .trailing_zeros() + .saturating_sub(1) as u8; + // Doesn't overflow since it's at most highway_config.maximum_round_length. + #[allow(clippy::arithmetic_side_effects)] + let maximum_round_length = + TimeDiff::from_millis(minimum_round_length.millis() << maximum_round_exponent); let round_success_meter = prev_cp - .and_then(|cp| cp.as_any().downcast_ref::>()) - .map(|highway_proto| highway_proto.next_era_round_succ_meter(era_start_time)) + .and_then(|cp| cp.as_any().downcast_ref::>()) + .map(|highway_proto| highway_proto.next_era_round_succ_meter(era_start_time.max(now))) .unwrap_or_else(|| { RoundSuccessMeter::new( - highway_config.minimum_round_exponent, - highway_config.minimum_round_exponent, - highway_config.maximum_round_exponent, - era_start_time, + minimum_round_length, + minimum_round_length, + maximum_round_length, + era_start_time.max(now), config.into(), ) }); - // This will return the minimum round exponent if we just initialized the meter, i.e. if + // This will return the minimum round length if we just initialized the meter, i.e. if // there was no previous consensus instance or it had no round success meter. - let init_round_exp = round_success_meter.new_exponent(); + let init_round_len = round_success_meter.new_length(); info!( - %init_round_exp, + %init_round_len, "initializing Highway instance", ); // Allow about as many units as part of evidence for conflicting endorsements as we expect // a validator to create during an era. After that, they can endorse two conflicting forks - // without getting slashed. - let min_round_len = state::round_len(highway_config.minimum_round_exponent); - let min_rounds_per_era = protocol_config - .minimum_era_height - .max((TimeDiff::from(1) + protocol_config.era_duration) / min_round_len); - let endorsement_evidence_limit = min_rounds_per_era + // without getting faulty. + let max_rounds_per_era = max_rounds_per_era( + chainspec.core_config.minimum_era_height, + chainspec.core_config.era_duration, + minimum_round_length, + ); + let endorsement_evidence_limit = max_rounds_per_era .saturating_mul(2) .min(MAX_ENDORSEMENT_EVIDENCE_LIMIT); let params = Params::new( seed, - BLOCK_REWARD, - (highway_config.reduced_reward_multiplier * BLOCK_REWARD).to_integer(), - highway_config.minimum_round_exponent, - highway_config.maximum_round_exponent, - init_round_exp, - protocol_config.minimum_era_height, + minimum_round_length, + maximum_round_length, + init_round_len, + chainspec.core_config.minimum_era_height, era_start_time, - era_start_time + protocol_config.era_duration, + era_start_time.saturating_add(chainspec.core_config.era_duration), endorsement_evidence_limit, ); let outcomes = Self::initialize_timers(now, era_start_time, &config.highway); - let highway = Highway::new(instance_id, validators, params); - let last_panorama = highway.state().panorama().clone(); + let highway = Highway::new(instance_id, validators, params, protocol_state_file); let hw_proto = Box::new(HighwayProtocol { pending_values: HashMap::new(), finality_detector: FinalityDetector::new(ftt), highway, round_success_meter, - synchronizer: Synchronizer::new( - config.highway.pending_vertex_timeout, - config.highway.request_latest_state_timeout, - validators_count, - instance_id, - ), + synchronizer: Synchronizer::new(validators_count, instance_id), pvv_cache: Default::default(), evidence_only: false, - last_panorama, - standstill_timeout: config.highway.standstill_timeout, - log_participation_interval: config.highway.log_participation_interval, + config: config.highway.clone(), }); (hw_proto, outcomes) @@ -210,26 +189,28 @@ impl HighwayProtocol { fn initialize_timers( now: Timestamp, era_start_time: Timestamp, - highway_config: &HighwayConfig, - ) -> ProtocolOutcomes { - vec![ - ProtocolOutcome::ScheduleTimer( - now + highway_config.pending_vertex_timeout, - TIMER_ID_PURGE_VERTICES, - ), - ProtocolOutcome::ScheduleTimer( - now.max(era_start_time) + highway_config.log_participation_interval, + config: &config::Config, + ) -> ProtocolOutcomes { + let mut outcomes = vec![ProtocolOutcome::ScheduleTimer( + now.saturating_add(config.pending_vertex_timeout), + TIMER_ID_PURGE_VERTICES, + )]; + if let Some(interval) = config.log_participation_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.max(era_start_time).saturating_add(interval), TIMER_ID_LOG_PARTICIPATION, - ), - ProtocolOutcome::ScheduleTimer( - now.max(era_start_time) + highway_config.standstill_timeout, - TIMER_ID_STANDSTILL_ALERT, - ), - ProtocolOutcome::ScheduleTimer(now + TimeDiff::from(5_000), TIMER_ID_SYNCHRONIZER_LOG), - ] + )); + } + if let Some(interval) = config.log_synchronizer_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.saturating_add(interval), + TIMER_ID_SYNCHRONIZER_LOG, + )); + } + outcomes } - fn process_av_effects(&mut self, av_effects: E, now: Timestamp) -> ProtocolOutcomes + fn process_av_effects(&mut self, av_effects: E, now: Timestamp) -> ProtocolOutcomes where E: IntoIterator>, { @@ -239,10 +220,11 @@ impl HighwayProtocol { .collect() } - fn process_av_effect(&mut self, effect: AvEffect, now: Timestamp) -> ProtocolOutcomes { + fn process_av_effect(&mut self, effect: AvEffect, now: Timestamp) -> ProtocolOutcomes { match effect { AvEffect::NewVertex(vv) => { - self.calculate_round_exponent(&vv, now); + self.log_unit_size(vv.inner(), "sending new unit"); + self.calculate_round_length(&vv, now); self.process_new_vertex(vv) } AvEffect::ScheduleTimer(timestamp) => { @@ -251,19 +233,8 @@ impl HighwayProtocol { TIMER_ID_ACTIVE_VALIDATOR, )] } - AvEffect::RequestNewBlock { - block_context, - fork_choice, - } => { - let parent_value = fork_choice - .as_ref() - .map(|hash| self.highway.state().block(hash).value.clone()); - let past_values = self.non_finalized_values(fork_choice).cloned().collect(); - vec![ProtocolOutcome::CreateNewBlock { - block_context, - past_values, - parent_value, - }] + AvEffect::RequestNewBlock(block_context, expiry) => { + vec![ProtocolOutcome::CreateNewBlock(block_context, expiry)] } AvEffect::WeAreFaulty(fault) => { error!("this validator is faulty: {:?}", fault); @@ -272,7 +243,7 @@ impl HighwayProtocol { } } - fn process_new_vertex(&mut self, vv: ValidVertex) -> ProtocolOutcomes { + fn process_new_vertex(&mut self, vv: ValidVertex) -> ProtocolOutcomes { let mut outcomes = Vec::new(); if let Vertex::Evidence(ev) = vv.inner() { let v_id = self @@ -284,12 +255,14 @@ impl HighwayProtocol { outcomes.push(ProtocolOutcome::NewEvidence(v_id)); } let msg = HighwayMessage::NewVertex(vv.into()); - outcomes.push(ProtocolOutcome::CreatedGossipMessage(msg.serialize())); + outcomes.push(ProtocolOutcome::CreatedGossipMessage( + SerializedMessage::from_message(&msg), + )); outcomes.extend(self.detect_finality()); outcomes } - fn detect_finality(&mut self) -> ProtocolOutcomes { + fn detect_finality(&mut self) -> ProtocolOutcomes { let faulty_weight = match self.finality_detector.run(&self.highway) { Ok(iter) => return iter.map(ProtocolOutcome::FinalizedBlock).collect(), Err(FttExceeded(weight)) => weight.0, @@ -306,9 +279,12 @@ impl HighwayProtocol { /// Adds the given vertices to the protocol state, if possible, or requests missing /// dependencies or validation. Recursively schedules events to add everything that is /// unblocked now. - fn add_vertex(&mut self, now: Timestamp) -> ProtocolOutcomes { - let (maybe_pending_vertex, mut outcomes) = - self.synchronizer.pop_vertex_to_add(&self.highway); + fn add_vertex(&mut self, now: Timestamp) -> ProtocolOutcomes { + let (maybe_pending_vertex, mut outcomes) = self.synchronizer.pop_vertex_to_add( + &self.highway, + &self.pending_values, + self.config.max_requests_for_vertex, + ); let pending_vertex = match maybe_pending_vertex { None => return outcomes, Some(pending_vertex) => pending_vertex, @@ -318,6 +294,7 @@ impl HighwayProtocol { // validator. Continue processing the unit so that it can be added to the state. if self.highway.is_doppelganger_vertex(pending_vertex.vertex()) { error!( + vertex = ?pending_vertex.vertex(), "received vertex from a doppelganger. \ Are you running multiple nodes with the same validator key?", ); @@ -327,13 +304,13 @@ impl HighwayProtocol { // If the vertex is invalid, drop all vertices that depend on this one, and disconnect from // the faulty senders. - let sender = pending_vertex.sender().clone(); + let sender = *pending_vertex.sender(); let vv = match self.highway.validate_vertex(pending_vertex.into()) { Ok(vv) => vv, Err((pvv, err)) => { info!(?pvv, ?err, "invalid vertex"); let vertices = vec![pvv.inner().id()]; - let faulty_senders = self.synchronizer.drop_dependent_vertices(vertices); + let faulty_senders = self.synchronizer.invalid_vertices(vertices); outcomes.extend(faulty_senders.into_iter().map(ProtocolOutcome::Disconnect)); return outcomes; } @@ -346,38 +323,25 @@ impl HighwayProtocol { { let panorama = &swunit.wire_unit().panorama; let fork_choice = self.highway.state().fork_choice(panorama); - let parent_value = - fork_choice.map(|hash| self.highway.state().block(hash).value.hash()); - // The timestamp and parent are currently duplicated: The information in the consensus - // value must match the information in the Highway DAG. - if timestamp != value.timestamp() || parent_value.as_ref() != value.parent() { - info!( - timestamp = %value.timestamp(), consensus_timestamp = %timestamp, - parent = ?value.parent(), consensus_parent = ?parent_value, - "consensus value does not match vertex" - ); - let vertices = vec![vv.inner().id()]; - let faulty_senders = self.synchronizer.drop_dependent_vertices(vertices); - outcomes.extend(faulty_senders.into_iter().map(ProtocolOutcome::Disconnect)); - return outcomes; - } if value.needs_validation() { self.log_proposal(vertex, "requesting proposal validation"); let ancestor_values = self.ancestors(fork_choice).cloned().collect(); - let consensus_value = value.clone(); - self.pending_values - .entry(value.hash()) + let block_context = BlockContext::new(timestamp, ancestor_values); + let proposed_block = ProposedBlock::new(value.clone(), block_context); + if self + .pending_values + .entry(proposed_block.clone()) .or_default() - .push(vv); - outcomes.push(ProtocolOutcome::ValidateConsensusValue { - sender, - consensus_value, - ancestor_values, - }); + .insert((vv, sender)) + { + outcomes.push(ProtocolOutcome::ValidateConsensusValue { + sender, + proposed_block, + }); + } return outcomes; - } else { - self.log_proposal(vertex, "proposal does not need validation"); } + self.log_proposal(vertex, "proposal does not need validation"); } // Either consensus value doesn't need validation or it's not a proposal. @@ -391,10 +355,10 @@ impl HighwayProtocol { outcomes } - fn calculate_round_exponent(&mut self, vv: &ValidVertex, now: Timestamp) { - let new_round_exp = self + fn calculate_round_length(&mut self, vv: &ValidVertex, now: Timestamp) { + let new_round_len = self .round_success_meter - .calculate_new_exponent(self.highway.state()); + .calculate_new_length(self.highway.state()); // If the vertex contains a proposal, register it in the success meter. // It's important to do this _after_ the calculation above - otherwise we might try to // register the proposal before the meter is aware that a new round has started, and it @@ -408,49 +372,51 @@ impl HighwayProtocol { error!(?vertex, "proposal without unit hash and timestamp"); } } - self.highway.set_round_exp(new_round_exp); + self.highway.set_round_len(new_round_len); } - fn add_valid_vertex(&mut self, vv: ValidVertex, now: Timestamp) -> ProtocolOutcomes { + fn add_valid_vertex(&mut self, vv: ValidVertex, now: Timestamp) -> ProtocolOutcomes { if self.evidence_only && !vv.inner().is_evidence() { error!(vertex = ?vv.inner(), "unexpected vertex in evidence-only mode"); return vec![]; } + if self.highway.has_vertex(vv.inner()) { + return vec![]; + } + let mut outcomes = ProtocolOutcomes::new(); + if let (Some(value), Some(unit)) = (vv.inner().value(), vv.inner().unit()) { + // We are adding a proposed block to the protocol state, so we might use it as an + // ancestor in the future. Notify the reactor so we don't re-propose those deploys. + let panorama = &unit.wire_unit().panorama; + let fork_choice = self.highway.state().fork_choice(panorama); + let ancestor_values = self.ancestors(fork_choice).cloned().collect(); + let block_context = BlockContext::new(unit.wire_unit().timestamp, ancestor_values); + let proposed_block = ProposedBlock::new(value.clone(), block_context); + outcomes.push(ProtocolOutcome::HandledProposedBlock(proposed_block)); + } else if let Some(hash) = vv.inner().unit_hash() { + trace!(?hash, "adding unit to the protocol state"); + } else { + trace!(vertex=?vv.inner(), "adding vertex to the protocol state"); + } + self.log_unit_size(vv.inner(), "adding new unit to the protocol state"); self.log_proposal(vv.inner(), "adding valid proposal to the protocol state"); let vertex_id = vv.inner().id(); - // Check whether we should change the round exponent. + // Check whether we should change the round length. // It's important to do it before the vertex is added to the state - this way if the last // round has finished, we now have all the vertices from that round in the state, and no // newer ones. - self.calculate_round_exponent(&vv, now); + self.calculate_round_length(&vv, now); let av_effects = self.highway.add_valid_vertex(vv, now); // Once vertex is added to the state, we can remove it from the cache. self.pvv_cache.remove(&vertex_id); - self.process_av_effects(av_effects, now) + outcomes.extend(self.process_av_effects(av_effects, now)); + outcomes } /// Returns an instance of `RoundSuccessMeter` for the new era: resetting the counters where /// appropriate. - fn next_era_round_succ_meter(&self, era_start_timestamp: Timestamp) -> RoundSuccessMeter { - self.round_success_meter.next_era(era_start_timestamp) - } - - /// Returns an iterator over all the values that are expected to become finalized, but are not - /// finalized yet. - fn non_finalized_values( - &self, - mut fork_choice: Option, - ) -> impl Iterator { - let last_finalized = self.finality_detector.last_finalized(); - iter::from_fn(move || { - if fork_choice.as_ref() == last_finalized { - return None; - } - let maybe_block = fork_choice.map(|bhash| self.highway.state().block(&bhash)); - let value = maybe_block.map(|block| &block.value); - fork_choice = maybe_block.and_then(|block| block.parent().cloned()); - value - }) + fn next_era_round_succ_meter(&self, timestamp: Timestamp) -> RoundSuccessMeter { + self.round_success_meter.next_era(timestamp) } /// Returns an iterator over all the values that are in parents of the given block. @@ -469,9 +435,21 @@ impl HighwayProtocol { /// Prints a log statement listing the inactive and faulty validators. fn log_participation(&self) { - let instance_id = self.highway.instance_id(); let participation = participation::Participation::new(&self.highway); - info!(?participation, %instance_id, "validator participation"); + info!(?participation, "validator participation"); + } + + /// Logs the vertex' (network) serialized size. + fn log_unit_size(&self, vertex: &Vertex, log_msg: &str) { + if self.config.log_unit_sizes { + if let Some(hash) = vertex.unit_hash() { + let size = + SerializedMessage::from_message(&HighwayMessage::NewVertex(vertex.clone())) + .into_raw() + .len(); + info!(size, %hash, "{}", log_msg); + } + } } /// Returns whether the switch block has already been finalized. @@ -479,23 +457,27 @@ impl HighwayProtocol { let is_switch = |block_hash: &C::Hash| self.highway.state().is_terminal_block(block_hash); self.finality_detector .last_finalized() - .map_or(false, is_switch) + .is_some_and(is_switch) } - /// Returns a `StandstillAlert` if no progress was made; otherwise schedules the next check. - fn handle_standstill_alert_timer(&mut self, now: Timestamp) -> ProtocolOutcomes { + /// Request the latest state from a random peer. + fn handle_request_state_timer(&mut self, now: Timestamp) -> ProtocolOutcomes { if self.evidence_only || self.finalized_switch_block() { return vec![]; // Era has ended. No further progress is expected. } - if self.last_panorama == *self.highway.state().panorama() { - return vec![ProtocolOutcome::StandstillAlert]; // No progress within the timeout. + debug!( + instance_id = ?self.highway.instance_id(), + "requesting latest state from random peer", + ); + // Request latest state from a peer and schedule the next request. + let mut outcomes = self.latest_state_request(); + if let Some(interval) = self.config.request_state_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.saturating_add(interval), + TIMER_ID_REQUEST_STATE, + )); } - // Record the current panorama and schedule the next standstill check. - self.last_panorama = self.highway.state().panorama().clone(); - vec![ProtocolOutcome::ScheduleTimer( - now + self.standstill_timeout, - TIMER_ID_STANDSTILL_ALERT, - )] + outcomes } /// Prints a log message if the vertex is a proposal unit. Otherwise returns `false`. @@ -511,16 +493,62 @@ impl HighwayProtocol { return true; }; info!( - %hash, + ?hash, ?creator, creator_index = wire_unit.creator.0, timestamp = %wire_unit.timestamp, round_exp = wire_unit.round_exp, + seq_number = wire_unit.seq_number, "{}", msg ); true } + // Logs the details about the received vertex. + fn log_received_vertex(&self, vertex: &Vertex) { + match vertex { + Vertex::Unit(swu) => { + let creator = if let Some(creator) = vertex + .creator() + .and_then(|vid| self.highway.validators().id(vid)) + { + creator + } else { + error!(?vertex, "invalid creator"); + return; + }; + + let wire_unit = swu.wire_unit(); + let hash = swu.hash(); + + if vertex.is_proposal() { + info!( + ?hash, + ?creator, + creator_index = wire_unit.creator.0, + timestamp = %wire_unit.timestamp, + round_exp = wire_unit.round_exp, + seq_number = wire_unit.seq_number, + "received a proposal" + ); + } else { + trace!( + ?hash, + ?creator, + creator_index = wire_unit.creator.0, + timestamp = %wire_unit.timestamp, + round_exp = wire_unit.round_exp, + seq_number = wire_unit.seq_number, + "received a non-proposal unit" + ); + }; + } + Vertex::Evidence(evidence) => trace!(?evidence, "received an evidence"), + Vertex::Endorsements(endorsement) => trace!(?endorsement, "received an endorsement"), + Vertex::Ping(ping) => trace!(?ping, "received ping"), + } + } + /// Prevalidates the vertex but checks the cache for previously validated vertices. /// Avoids multiple validation of the same vertex. fn pre_validate_vertex( @@ -536,50 +564,194 @@ impl HighwayProtocol { Ok(pvv) } - /// Creates a message to be gossiped that sends the validator's panorama. - fn latest_panorama_request(&self) -> ProtocolOutcomes { - trace!(instance_id=?self.highway.instance_id(), "creating latest state request"); - let request = HighwayMessage::LatestStateRequest(self.highway.state().panorama().clone()); - vec![ProtocolOutcome::CreatedGossipMessage( - (&request).serialize(), + /// Creates a message to send our panorama to a random peer. + fn latest_state_request(&self) -> ProtocolOutcomes { + let request: HighwayMessage = HighwayMessage::LatestStateRequest( + IndexPanorama::from_panorama(self.highway.state().panorama(), self.highway.state()), + ); + vec![ProtocolOutcome::CreatedMessageToRandomPeer( + SerializedMessage::from_message(&request), )] } -} -#[derive(Serialize, Deserialize, Debug)] -#[serde(bound( - serialize = "C::Hash: Serialize", - deserialize = "C::Hash: Deserialize<'de>", -))] -enum HighwayMessage { - NewVertex(Vertex), - RequestDependency(Dependency), - LatestStateRequest(Panorama), + /// Creates a batch of dependency requests if the peer has more units by the validator `vidx` + /// than we do; otherwise sends a batch of missing units to the peer. + fn batch_request( + &self, + rng: &mut NodeRng, + vid: ValidatorIndex, + our_next_seq: u64, + their_next_seq: u64, + ) -> Vec> { + let state = self.highway.state(); + if our_next_seq == their_next_seq { + return vec![]; + } + if our_next_seq < their_next_seq { + // We're behind. Request missing vertices. + (our_next_seq..their_next_seq) + .take(self.config.max_request_batch_size) + .map(|unit_seq_number| { + let uuid = rng.next_u64(); + debug!(?uuid, ?vid, ?unit_seq_number, "requesting dependency"); + HighwayMessage::RequestDependencyByHeight { + uuid, + vid, + unit_seq_number, + } + }) + .collect() + } else { + // We're ahead. + match state.panorama().get(vid) { + None => { + warn!(?vid, "received a request for non-existing validator"); + vec![] + } + Some(observation) => match observation { + Observation::None => { + warn!( + ?vid, + our_next_seq, + ?observation, + "expected unit for validator but found none" + ); + vec![] + } + Observation::Faulty => { + let ev = match state.maybe_evidence(vid) { + Some(ev) => ev.clone(), + None => { + warn!( + ?vid, instance_id=?self.highway.instance_id(), + "panorama marked validator as faulty but no evidence was found" + ); + return vec![]; + } + }; + vec![HighwayMessage::NewVertex(Vertex::Evidence(ev))] + } + Observation::Correct(hash) => (their_next_seq..our_next_seq) + .take(self.config.max_request_batch_size) + .filter_map(|seq_num| { + let unit = state.find_in_swimlane(hash, seq_num).unwrap(); + state + .wire_unit(unit, *self.highway.instance_id()) + .map(|swu| HighwayMessage::NewVertex(Vertex::Unit(swu))) + }) + .collect(), + }, + } + } + } + + /// Grant read-only access to the internal `Highway` instance. + #[inline] + pub(crate) fn highway(&self) -> &Highway { + &self.highway + } } -impl HighwayMessage { - fn serialize(&self) -> Vec { - bincode::serialize(self).expect("should serialize message") +#[allow(clippy::arithmetic_side_effects)] +mod relaxed { + // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the + // module-wide `clippy::arithmetic_side_effects` lint. + + use datasize::DataSize; + use serde::{Deserialize, Serialize}; + use strum::EnumDiscriminants; + + use crate::components::consensus::{ + highway_core::{ + highway::{Dependency, Vertex}, + state::IndexPanorama, + }, + traits::{ConsensusNetworkMessage, Context}, + utils::ValidatorIndex, + }; + + #[derive( + DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, EnumDiscriminants, + )] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub(crate) enum HighwayMessage + where + C: Context, + { + NewVertex(Vertex), + // A dependency request. u64 is a random UUID identifying the request. + RequestDependency(u64, Dependency), + RequestDependencyByHeight { + uuid: u64, + vid: ValidatorIndex, + unit_seq_number: u64, + }, + LatestStateRequest(IndexPanorama), + } + + impl ConsensusNetworkMessage for HighwayMessage {} +} +pub(crate) use relaxed::{HighwayMessage, HighwayMessageDiscriminants}; + +mod specimen_support { + use crate::{ + components::consensus::ClContext, + utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator}, + }; + + use super::{HighwayMessage, HighwayMessageDiscriminants}; + + impl LargestSpecimen for HighwayMessage { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::(estimator, |variant| { + match variant { + HighwayMessageDiscriminants::NewVertex => HighwayMessage::NewVertex( + LargestSpecimen::largest_specimen(estimator, cache), + ), + HighwayMessageDiscriminants::RequestDependency => { + HighwayMessage::RequestDependency( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } + HighwayMessageDiscriminants::RequestDependencyByHeight => { + HighwayMessage::RequestDependencyByHeight { + uuid: LargestSpecimen::largest_specimen(estimator, cache), + vid: LargestSpecimen::largest_specimen(estimator, cache), + unit_seq_number: LargestSpecimen::largest_specimen(estimator, cache), + } + } + HighwayMessageDiscriminants::LatestStateRequest => { + HighwayMessage::LatestStateRequest(LargestSpecimen::largest_specimen( + estimator, cache, + )) + } + } + }) + } } } -impl ConsensusProtocol for HighwayProtocol +impl ConsensusProtocol for HighwayProtocol where - I: NodeIdT, C: Context + 'static, { fn handle_message( &mut self, - sender: I, - msg: Vec, + rng: &mut NodeRng, + sender: NodeId, + msg: SerializedMessage, now: Timestamp, - ) -> ProtocolOutcomes { - match bincode::deserialize(msg.as_slice()) { - Err(err) => vec![ProtocolOutcome::InvalidIncomingMessage( - msg, - sender, - err.into(), - )], + ) -> ProtocolOutcomes { + match msg.deserialize_incoming() { + Err(err) => { + warn!(?err, "could not deserialize highway message"); + vec![ProtocolOutcome::Disconnect(sender)] + } Ok(HighwayMessage::NewVertex(v)) if self.highway.has_vertex(&v) || (self.evidence_only && !v.is_evidence()) => { @@ -600,16 +772,12 @@ where let pvv = match self.pre_validate_vertex(v) { Ok(pvv) => pvv, Err((_, err)) => { - trace!("received an invalid vertex"); // drop the vertices that might have depended on this one - let faulty_senders = self.synchronizer.drop_dependent_vertices(vec![v_id]); - return iter::once(ProtocolOutcome::InvalidIncomingMessage( - msg, - sender, - err.into(), - )) - .chain(faulty_senders.into_iter().map(ProtocolOutcome::Disconnect)) - .collect(); + let faulty_senders = self.synchronizer.invalid_vertices(vec![v_id]); + warn!(?err, ?sender, ?faulty_senders, "invalid incoming message"); + return iter::once(ProtocolOutcome::Disconnect(sender)) + .chain(faulty_senders.into_iter().map(ProtocolOutcome::Disconnect)) + .collect(); } }; // Keep track of whether the prevalidated vertex was from an equivocator @@ -625,7 +793,7 @@ where match pvv.timestamp() { Some(timestamp) - if timestamp > now + self.synchronizer.pending_vertex_timeout() => + if timestamp > now.saturating_add(self.config.pending_vertex_timeout) => { trace!("received a vertex with a timestamp far in the future; dropping"); vec![] @@ -641,200 +809,227 @@ where _ => { // If it's not from an equivocator or it is a transitive dependency, add the // vertex - if !self.log_proposal(pvv.inner(), "received a proposal") { - trace!("received a valid vertex"); - } + self.log_received_vertex(pvv.inner()); self.synchronizer.schedule_add_vertex(sender, pvv, now) } } } - Ok(HighwayMessage::RequestDependency(dep)) => { - trace!("received a request for a dependency"); + Ok(HighwayMessage::RequestDependency(uuid, dep)) => { + trace!(?uuid, dependency=?dep, "received a request for a dependency"); match self.highway.get_dependency(&dep) { GetDepOutcome::None => { - info!(?dep, ?sender, "requested dependency doesn't exist"); + info!(?dep, peer_id=?sender, "requested dependency doesn't exist"); vec![] } GetDepOutcome::Evidence(vid) => { vec![ProtocolOutcome::SendEvidence(sender, vid)] } - // TODO: Should this be done via a gossip service? GetDepOutcome::Vertex(vv) => vec![ProtocolOutcome::CreatedTargetedMessage( - HighwayMessage::NewVertex(vv.into()).serialize(), + SerializedMessage::from_message(&HighwayMessage::NewVertex(vv.into())), sender, )], } } - Ok(HighwayMessage::LatestStateRequest(panorama)) => { + Ok(HighwayMessage::RequestDependencyByHeight { + uuid, + vid, + unit_seq_number, + }) => { + debug!( + ?uuid, + ?vid, + ?unit_seq_number, + "received a request for a dependency" + ); + match self.highway.get_dependency_by_index(vid, unit_seq_number) { + GetDepOutcome::None => { + info!( + ?vid, + ?unit_seq_number, + ?sender, + "requested dependency doesn't exist" + ); + vec![] + } + GetDepOutcome::Evidence(vid) => { + vec![ProtocolOutcome::SendEvidence(sender, vid)] + } + GetDepOutcome::Vertex(vv) => { + vec![ProtocolOutcome::CreatedTargetedMessage( + SerializedMessage::from_message(&HighwayMessage::NewVertex(vv.into())), + sender, + )] + } + } + } + Ok(HighwayMessage::LatestStateRequest(their_index_panorama)) => { trace!("received a request for the latest state"); let state = self.highway.state(); - let create_message = - |observations: ((ValidatorIndex, &Observation), &Observation)| { - let vid = observations.0 .0; - let observations = (observations.0 .1, observations.1); - match observations { - (obs0, obs1) if obs0 == obs1 => None, - - (Observation::None, Observation::None) => None, + let create_message = |((vid, our_obs), their_obs): ( + (ValidatorIndex, &IndexObservation), + &IndexObservation, + )| { + match (*our_obs, *their_obs) { + (our_obs, their_obs) if our_obs == their_obs => vec![], - (Observation::Faulty, _) => state.maybe_evidence(vid).map(|evidence| { + (IndexObservation::Faulty, _) => state + .maybe_evidence(vid) + .map(|evidence| { HighwayMessage::NewVertex(Vertex::Evidence(evidence.clone())) - }), - - (_, Observation::Faulty) => { - Some(HighwayMessage::RequestDependency(Dependency::Evidence(vid))) - } - - (Observation::None, Observation::Correct(hash)) => { - Some(HighwayMessage::RequestDependency(Dependency::Unit(*hash))) - } - - (Observation::Correct(hash), Observation::None) => state - .wire_unit(hash, *self.highway.instance_id()) - .map(|swu| HighwayMessage::NewVertex(Vertex::Unit(swu))), - - (Observation::Correct(our_hash), Observation::Correct(their_hash)) => { - if state.has_unit(their_hash) - && state.panorama().sees_correct(state, their_hash) - { - state - .wire_unit(our_hash, *self.highway.instance_id()) - .map(|swu| HighwayMessage::NewVertex(Vertex::Unit(swu))) - } else if !state.has_unit(their_hash) { - Some(HighwayMessage::RequestDependency(Dependency::Unit( - *their_hash, - ))) - } else { - None - } - } + }) + .into_iter() + .collect(), + + (_, IndexObservation::Faulty) => { + let dependency = Dependency::Evidence(vid); + let uuid = rng.next_u64(); + debug!(?uuid, "requesting evidence"); + vec![HighwayMessage::RequestDependency(uuid, dependency)] } - }; - state - .panorama() + ( + IndexObservation::NextSeq(our_next_seq), + IndexObservation::NextSeq(their_next_seq), + ) => self.batch_request(rng, vid, our_next_seq, their_next_seq), + } + }; + + IndexPanorama::from_panorama(state.panorama(), state) .enumerate() - .zip(&panorama) - .filter_map(create_message) - .map(|msg| { - ProtocolOutcome::CreatedTargetedMessage(msg.serialize(), sender.clone()) + .zip(&their_index_panorama) + .map(create_message) + .flat_map(|msgs| { + msgs.into_iter().map(|msg| { + ProtocolOutcome::CreatedTargetedMessage( + SerializedMessage::from_message(&msg), + sender, + ) + }) }) .collect() } } } - fn handle_timer(&mut self, now: Timestamp, timer_id: TimerId) -> ProtocolOutcomes { + fn handle_request_message( + &mut self, + _rng: &mut NodeRng, + sender: NodeId, + _msg: SerializedMessage, + _now: Timestamp, + ) -> (ProtocolOutcomes, Option) { + info!(?sender, "invalid incoming request"); + (vec![ProtocolOutcome::Disconnect(sender)], None) + } + + fn handle_timer( + &mut self, + timestamp: Timestamp, + _now: Timestamp, + timer_id: TimerId, + _rng: &mut NodeRng, + ) -> ProtocolOutcomes { match timer_id { TIMER_ID_ACTIVE_VALIDATOR => { - let effects = self.highway.handle_timer(now); - self.process_av_effects(effects, now) + let effects = self.highway.handle_timer(timestamp); + self.process_av_effects(effects, timestamp) } TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP => { - self.synchronizer.add_past_due_stored_vertices(now) + self.synchronizer.add_past_due_stored_vertices(timestamp) } TIMER_ID_PURGE_VERTICES => { - self.synchronizer.purge_vertices(now); + let oldest = timestamp.saturating_sub(self.config.pending_vertex_timeout); + self.synchronizer.purge_vertices(oldest); self.pvv_cache.clear(); - let next_time = now + self.synchronizer.pending_vertex_timeout(); + let next_time = timestamp.saturating_add(self.config.pending_vertex_timeout); vec![ProtocolOutcome::ScheduleTimer(next_time, timer_id)] } - TIMER_ID_LOG_PARTICIPATION => { - self.log_participation(); - if !self.evidence_only && !self.finalized_switch_block() { - let next_time = now + self.log_participation_interval; - vec![ProtocolOutcome::ScheduleTimer(next_time, timer_id)] - } else { - vec![] + TIMER_ID_LOG_PARTICIPATION => match self.config.log_participation_interval { + Some(interval) if !self.evidence_only && !self.finalized_switch_block() => { + self.log_participation(); + vec![ProtocolOutcome::ScheduleTimer( + timestamp.saturating_add(interval), + timer_id, + )] } - } - TIMER_ID_STANDSTILL_ALERT => self.handle_standstill_alert_timer(now), + _ => vec![], + }, + TIMER_ID_REQUEST_STATE => self.handle_request_state_timer(timestamp), TIMER_ID_SYNCHRONIZER_LOG => { self.synchronizer.log_len(); - if !self.finalized_switch_block() { - let next_timer = Timestamp::now() + TimeDiff::from(5_000); - vec![ProtocolOutcome::ScheduleTimer(next_timer, timer_id)] - } else { - vec![] - } - } - TIMER_ID_PANORAMA_REQUEST => { - if !self.finalized_switch_block() { - let mut outcomes = self.latest_panorama_request(); - let next_timer = - Timestamp::now() + self.synchronizer.request_latest_state_timeout(); - outcomes.push(ProtocolOutcome::ScheduleTimer(next_timer, timer_id)); - outcomes - } else { - vec![] + match self.config.log_synchronizer_interval { + Some(interval) if !self.finalized_switch_block() => { + vec![ProtocolOutcome::ScheduleTimer( + timestamp.saturating_add(interval), + timer_id, + )] + } + _ => vec![], } } _ => unreachable!("unexpected timer ID"), } } - fn handle_is_current(&self) -> ProtocolOutcomes { + fn handle_is_current(&self, now: Timestamp) -> ProtocolOutcomes { // Request latest protocol state of the current era. - let mut outcomes = self.latest_panorama_request(); - outcomes.push(ProtocolOutcome::ScheduleTimer( - Timestamp::now() + self.synchronizer.request_latest_state_timeout(), - TIMER_ID_PANORAMA_REQUEST, - )); + let mut outcomes = self.latest_state_request(); + // If configured, schedule periodic latest state requests. + if let Some(interval) = self.config.request_state_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.saturating_add(interval), + TIMER_ID_REQUEST_STATE, + )); + } outcomes } - fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes { + fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes { match action_id { ACTION_ID_VERTEX => self.add_vertex(now), _ => unreachable!("unexpected action ID"), } } - fn propose( - &mut self, - value: C::ConsensusValue, - block_context: BlockContext, - now: Timestamp, - ) -> ProtocolOutcomes { + fn propose(&mut self, proposed_block: ProposedBlock, now: Timestamp) -> ProtocolOutcomes { + let (value, block_context) = proposed_block.destructure(); let effects = self.highway.propose(value, block_context); self.process_av_effects(effects, now) } fn resolve_validity( &mut self, - value: &C::ConsensusValue, + proposed_block: ProposedBlock, valid: bool, now: Timestamp, - ) -> ProtocolOutcomes { + ) -> ProtocolOutcomes { if valid { let mut outcomes = self .pending_values - .remove(&value.hash()) + .remove(&proposed_block) .into_iter() .flatten() - .flat_map(|vv| self.add_valid_vertex(vv, now)) + .flat_map(|(vv, _)| self.add_valid_vertex(vv, now)) .collect_vec(); outcomes.extend(self.synchronizer.remove_satisfied_deps(&self.highway)); outcomes.extend(self.detect_finality()); outcomes } else { - // TODO: Slash proposer? + // TODO: Report proposer as faulty? // Drop vertices dependent on the invalid value. - let dropped_vertices = self.pending_values.remove(&value.hash()); - warn!(?value, ?dropped_vertices, "consensus value is invalid"); + let dropped_vertices = self.pending_values.remove(&proposed_block); + warn!(?proposed_block, ?dropped_vertices, "proposal is invalid"); let dropped_vertex_ids = dropped_vertices .into_iter() .flatten() - .map(|vv| { + .map(|(vv, _)| { self.log_proposal(vv.inner(), "dropping invalid proposal"); vv.inner().id() }) .collect(); // recursively remove vertices depending on the dropped ones - let _faulty_senders = self - .synchronizer - .drop_dependent_vertices(dropped_vertex_ids); + let _faulty_senders = self.synchronizer.invalid_vertices(dropped_vertex_ids); // We don't disconnect from the faulty senders here: The block validator considers the // value "invalid" even if it just couldn't download the deploys, which could just be // because the original sender went offline. @@ -848,7 +1043,7 @@ where secret: C::ValidatorSecret, now: Timestamp, unit_hash_file: Option, - ) -> ProtocolOutcomes { + ) -> ProtocolOutcomes { let ftt = self.finality_detector.fault_tolerance_threshold(); let av_effects = self .highway @@ -877,7 +1072,7 @@ where self.highway.mark_faulty(vid); } - fn request_evidence(&self, sender: I, vid: &C::ValidatorId) -> ProtocolOutcomes { + fn send_evidence(&self, sender: NodeId, vid: &C::ValidatorId) -> ProtocolOutcomes { self.highway .validators() .get_index(vid) @@ -887,7 +1082,7 @@ where GetDepOutcome::Vertex(vv) => { let msg = HighwayMessage::NewVertex(vv.into()); Some(ProtocolOutcome::CreatedTargetedMessage( - msg.serialize(), + SerializedMessage::from_message(&msg), sender, )) } @@ -898,20 +1093,15 @@ where } /// Sets the pause status: While paused we don't create any new units, just pings. - fn set_paused(&mut self, paused: bool) { + fn set_paused(&mut self, paused: bool, _now: Timestamp) -> ProtocolOutcomes { self.highway.set_paused(paused); + vec![] } fn validators_with_evidence(&self) -> Vec<&C::ValidatorId> { self.highway.validators_with_evidence().collect() } - fn has_received_messages(&self) -> bool { - !self.highway.state().is_empty() - || !self.synchronizer.is_empty() - || !self.pending_values.is_empty() - } - fn as_any(&self) -> &dyn Any { self } @@ -928,3 +1118,19 @@ where self.highway.next_round_length() } } + +/// Maximum possible rounds in one era. +/// +/// It is the maximum of: +/// - The era duration divided by the minimum round length, that is the maximum number of blocks +/// that can fit within the duration of one era, +/// - The minimum era height, which is the minimum number of blocks for an era to be considered +/// complete. +pub fn max_rounds_per_era( + minimum_era_height: u64, + era_duration: TimeDiff, + minimum_round_length: TimeDiff, +) -> u64 { + #[allow(clippy::arithmetic_side_effects)] // minimum_round_length is guaranteed to be > 0. + minimum_era_height.max((era_duration.saturating_add(1)) / minimum_round_length) +} diff --git a/node/src/components/consensus/protocols/highway/config.rs b/node/src/components/consensus/protocols/highway/config.rs index c947feda4c..fbbf65c90d 100644 --- a/node/src/components/consensus/protocols/highway/config.rs +++ b/node/src/components/consensus/protocols/highway/config.rs @@ -1,42 +1,48 @@ -use std::path::PathBuf; - use serde::{Deserialize, Serialize}; use datasize::DataSize; -use crate::types::TimeDiff; +use casper_types::{serde_option_time_diff, TimeDiff}; use super::round_success_meter::config::Config as RSMConfig; /// Highway-specific configuration. /// NOTE: This is *NOT* protocol configuration that has to be the same on all nodes. #[derive(DataSize, Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] pub struct Config { - /// Path to the folder where unit hash files will be stored. - pub unit_hashes_folder: PathBuf, /// The duration for which incoming vertices with missing dependencies are kept in a queue. pub pending_vertex_timeout: TimeDiff, - /// The frequency at which we will ask peers for their latest state. - pub request_latest_state_timeout: TimeDiff, - /// If the current era's protocol state has not progressed for this long, shut down. - pub standstill_timeout: TimeDiff, + /// Request the latest protocol state from a random peer periodically, with this interval. + #[serde(with = "serde_option_time_diff")] + pub request_state_interval: Option, /// Log inactive or faulty validators periodically, with this interval. - pub log_participation_interval: TimeDiff, - /// The maximum number of blocks by which execution is allowed to lag behind finalization. - /// If it is more than that, consensus will pause, and resume once the executor has caught up. - pub max_execution_delay: u64, + #[serde(with = "serde_option_time_diff")] + pub log_participation_interval: Option, + /// Log synchronizer state periodically, with this interval. + #[serde(with = "serde_option_time_diff")] + pub log_synchronizer_interval: Option, + /// Log the size of every incoming and outgoing serialized unit. + pub log_unit_sizes: bool, + /// The maximum number of peers we request the same vertex from in parallel. + pub max_requests_for_vertex: usize, + /// The maximum number of dependencies we request per validator in a batch. + /// Limits requests per validator in panorama - in order to get a total number of + /// requests, multiply by # of validators. + pub max_request_batch_size: usize, pub round_success_meter: RSMConfig, } impl Default for Config { fn default() -> Self { Config { - unit_hashes_folder: Default::default(), pending_vertex_timeout: "10sec".parse().unwrap(), - request_latest_state_timeout: "5sec".parse().unwrap(), - standstill_timeout: "1min".parse().unwrap(), - log_participation_interval: "10sec".parse().unwrap(), - max_execution_delay: 3, + request_state_interval: Some("10sec".parse().unwrap()), + log_participation_interval: Some("10sec".parse().unwrap()), + log_synchronizer_interval: Some("5sec".parse().unwrap()), + log_unit_sizes: false, + max_requests_for_vertex: 5, + max_request_batch_size: 20, round_success_meter: RSMConfig::default(), } } diff --git a/node/src/components/consensus/protocols/highway/participation.rs b/node/src/components/consensus/protocols/highway/participation.rs index 9ea5a9b180..29af88cedc 100644 --- a/node/src/components/consensus/protocols/highway/participation.rs +++ b/node/src/components/consensus/protocols/highway/participation.rs @@ -1,15 +1,16 @@ use std::cmp::Reverse; +use casper_types::Timestamp; + use crate::{ components::consensus::{ highway_core::{ highway::Highway, state::{Fault, State}, - validators::ValidatorIndex, }, traits::Context, + utils::ValidatorIndex, }, - types::Timestamp, utils::div_round, }; @@ -38,7 +39,11 @@ impl Status { if state.panorama()[idx].is_none() { return Some(Status::Inactive); } - if state.last_seen(idx) + state.params().max_round_length() < now { + if state + .last_seen(idx) + .saturating_add(state.params().max_round_length()) + < now + { let seconds = now.saturating_diff(state.last_seen(idx)).millis() / 1000; return Some(Status::LastSeenSecondsAgo(seconds)); } @@ -48,6 +53,8 @@ impl Status { /// A map of status (faulty, inactive) by validator ID. #[derive(Debug)] +// False positive, as the fields of this struct are all used in logging validator participation. +#[allow(dead_code)] pub(crate) struct Participation where C: Context, @@ -62,7 +69,7 @@ where impl Participation { /// Creates a new `Participation` map, showing validators seen as faulty or inactive by the /// Highway instance. - #[allow(clippy::integer_arithmetic)] // We use u128 to prevent overflows in weight calculation. + #[allow(clippy::arithmetic_side_effects)] // We use u128 to prevent overflows in weight calculation. pub(crate) fn new(highway: &Highway) -> Self { let now = Timestamp::now(); let state = highway.state(); diff --git a/node/src/components/consensus/protocols/highway/round_success_meter.rs b/node/src/components/consensus/protocols/highway/round_success_meter.rs index c86acc4e55..84c8ad73e9 100644 --- a/node/src/components/consensus/protocols/highway/round_success_meter.rs +++ b/node/src/components/consensus/protocols/highway/round_success_meter.rs @@ -1,14 +1,17 @@ +#[cfg(test)] +mod tests; + use std::{cmp::max, collections::VecDeque, mem}; use datasize::DataSize; -use tracing::trace; +use tracing::{error, trace}; -use crate::{ - components::consensus::{ - highway_core::{finality_detector::FinalityDetector, state, State, Weight}, - traits::Context, - }, - types::Timestamp, +use casper_types::{TimeDiff, Timestamp}; + +use crate::components::consensus::{ + highway_core::{finality_detector::FinalityDetector, state, State}, + traits::Context, + utils::Weight, }; pub(crate) mod config; @@ -24,43 +27,43 @@ where rounds: VecDeque, current_round_id: Timestamp, proposals: Vec, - min_round_exp: u8, - max_round_exp: u8, - current_round_exp: u8, + min_round_len: TimeDiff, + max_round_len: TimeDiff, + current_round_len: TimeDiff, config: Config, } impl RoundSuccessMeter { pub fn new( - round_exp: u8, - min_round_exp: u8, - max_round_exp: u8, + round_len: TimeDiff, + min_round_len: TimeDiff, + max_round_len: TimeDiff, timestamp: Timestamp, config: Config, ) -> Self { - let current_round_id = state::round_id(timestamp, round_exp); + let current_round_id = state::round_id(timestamp, round_len); Self { rounds: VecDeque::with_capacity(config.num_rounds_to_consider as usize), current_round_id, proposals: Vec::new(), - min_round_exp, - max_round_exp, - current_round_exp: round_exp, + min_round_len, + max_round_len, + current_round_len: round_len, config, } } - fn change_exponent(&mut self, new_exp: u8, timestamp: Timestamp) { + fn change_length(&mut self, new_len: TimeDiff, timestamp: Timestamp) { self.rounds = VecDeque::with_capacity(self.config.num_rounds_to_consider as usize); - self.current_round_exp = new_exp; - self.current_round_id = state::round_id(timestamp, new_exp); + self.current_round_len = new_len; + self.current_round_id = state::round_id(timestamp, new_len); self.proposals = Vec::new(); } fn check_proposals_success(&self, state: &State, proposal_h: &C::Hash) -> bool { let total_w = state.total_weight(); - #[allow(clippy::integer_arithmetic)] // FTT is less than 100%, so this can't overflow. + #[allow(clippy::arithmetic_side_effects)] // FTT is less than 100%, so this can't overflow. let finality_detector = FinalityDetector::::new(max( Weight( (u128::from(total_w) * *self.config.acceleration_ftt.numer() as u128 @@ -77,7 +80,7 @@ impl RoundSuccessMeter { /// be successful. pub fn new_proposal(&mut self, proposal_h: C::Hash, timestamp: Timestamp) { // only add proposals from within the current round - if state::round_id(timestamp, self.current_round_exp) == self.current_round_id { + if state::round_id(timestamp, self.current_round_len) == self.current_round_id { trace!( %self.current_round_id, timestamp = timestamp.millis(), @@ -88,7 +91,7 @@ impl RoundSuccessMeter { trace!( %self.current_round_id, timestamp = timestamp.millis(), - %self.current_round_exp, + %self.current_round_len, "trying to add proposal for a different round!" ); } @@ -99,19 +102,19 @@ impl RoundSuccessMeter { /// If there is a summit, the round is considered successful. Otherwise, it is considered /// failed. /// Next, a number of last rounds are being checked for success and if not enough of them are - /// successful, we return a higher round exponent for the future. - /// If the exponent shouldn't grow, and the round ID is divisible by a certain number, a lower - /// round exponent is returned. - pub fn calculate_new_exponent(&mut self, state: &State) -> u8 { + /// successful, we return a higher round length for the future. + /// If the length shouldn't grow, and the round ID is divisible by a certain number, a lower + /// round length is returned. + pub fn calculate_new_length(&mut self, state: &State) -> TimeDiff { let now = Timestamp::now(); // if the round hasn't finished, just return whatever we have now - if state::round_id(now, self.current_round_exp) <= self.current_round_id { - return self.new_exponent(); + if state::round_id(now, self.current_round_len) <= self.current_round_id { + return self.new_length(); } - trace!(%self.current_round_id, "calculating exponent"); - let current_round_index = round_index(self.current_round_id, self.current_round_exp); - let new_round_index = round_index(now, self.current_round_exp); + trace!(%self.current_round_id, "calculating length"); + let current_round_index = round_index(self.current_round_id, self.current_round_len); + let new_round_index = round_index(now, self.current_round_len); if mem::take(&mut self.proposals) .into_iter() @@ -134,38 +137,38 @@ impl RoundSuccessMeter { self.rounds.push_front(false); } - let round_len = state::round_len(self.current_round_exp); - self.current_round_id = Timestamp::zero() + round_len.saturating_mul(new_round_index); + self.current_round_id = Timestamp::zero() + .saturating_add(self.current_round_len.saturating_mul(new_round_index)); self.clean_old_rounds(); trace!( - %self.current_round_exp, + %self.current_round_len, "{} failures among the last {} rounds.", self.count_failures(), self.rounds.len() ); - let new_exp = self.new_exponent(); + let new_len = self.new_length(); - trace!(%new_exp, "new exponent calculated"); + trace!(%new_len, "new length calculated"); - if new_exp != self.current_round_exp { - self.change_exponent(new_exp, now); + if new_len != self.current_round_len { + self.change_length(new_len, now); } - new_exp + new_len } /// Returns an instance of `Self` for the new era: resetting the counters where appropriate. - pub fn next_era(&self, era_start_timestamp: Timestamp) -> Self { + pub fn next_era(&self, timestamp: Timestamp) -> Self { Self { rounds: self.rounds.clone(), - current_round_id: state::round_id(era_start_timestamp, self.current_round_exp), + current_round_id: state::round_id(timestamp, self.current_round_len), proposals: Default::default(), - min_round_exp: self.min_round_exp, - max_round_exp: self.max_round_exp, - current_round_exp: self.current_round_exp, + min_round_len: self.min_round_len, + max_round_len: self.max_round_len, + current_round_len: self.current_round_len, config: self.config, } } @@ -180,145 +183,35 @@ impl RoundSuccessMeter { self.rounds.iter().filter(|&success| !success).count() } - /// Returns the round exponent to be used in the next round, based on the previously used round - /// exponent and the current counts of successes and failures. - pub(super) fn new_exponent(&self) -> u8 { - let current_round_index = round_index(self.current_round_id, self.current_round_exp); + /// Returns the round length to be used in the next round, based on the previously used round + /// length and the current counts of successes and failures. + pub(super) fn new_length(&self) -> TimeDiff { + let current_round_index = round_index(self.current_round_id, self.current_round_len); let num_failures = self.count_failures() as u64; - #[allow(clippy::integer_arithmetic)] // The acceleration_parameter is not zero. + #[allow(clippy::arithmetic_side_effects)] // The acceleration_parameter is not zero. if num_failures > self.config.max_failed_rounds() - && self.current_round_exp < self.max_round_exp + && self.current_round_len * 2 <= self.max_round_len { - self.current_round_exp.saturating_add(1) + self.current_round_len * 2 } else if current_round_index % self.config.acceleration_parameter == 0 - && self.current_round_exp > self.min_round_exp + && self.current_round_len > self.min_round_len // we will only accelerate if we collected data about enough rounds && self.rounds.len() as u64 == self.config.num_rounds_to_consider && num_failures < self.config.max_failures_for_acceleration() { - self.current_round_exp.saturating_sub(1) + self.current_round_len / 2 } else { - self.current_round_exp + self.current_round_len } } } /// Returns the round index `i`, if `r_id` is the ID of the `i`-th round after the epoch. -fn round_index(r_id: Timestamp, round_exp: u8) -> u64 { - r_id.millis().checked_shr(u32::from(round_exp)).unwrap_or(0) -} - -#[cfg(test)] -mod tests { - use config::{Config, ACCELERATION_PARAMETER, MAX_FAILED_ROUNDS, NUM_ROUNDS_TO_CONSIDER}; - - use crate::{ - components::consensus::{ - cl_context::ClContext, - protocols::highway::round_success_meter::{config, round_index}, - }, - types::TimeDiff, - }; - - const TEST_ROUND_EXP: u8 = 13; - const TEST_MIN_ROUND_EXP: u8 = 8; - const TEST_MAX_ROUND_EXP: u8 = 19; - - #[test] - fn new_exponent_steady() { - let round_success_meter: super::RoundSuccessMeter = - super::RoundSuccessMeter::new( - TEST_ROUND_EXP, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - crate::types::Timestamp::now(), - Config::default(), - ); - assert_eq!(round_success_meter.new_exponent(), TEST_ROUND_EXP); - } - - #[test] - fn new_exponent_slow_down() { - let mut round_success_meter: super::RoundSuccessMeter = - super::RoundSuccessMeter::new( - TEST_ROUND_EXP, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - crate::types::Timestamp::now(), - Config::default(), - ); - // If there have been more rounds of failure than MAX_FAILED_ROUNDS, slow down - round_success_meter.rounds = vec![false; MAX_FAILED_ROUNDS + 1].into(); - assert_eq!(round_success_meter.new_exponent(), TEST_ROUND_EXP + 1); - } - - #[test] - fn new_exponent_can_not_slow_down_because_max_round_exp() { - // If the round exponent is the same as the maximum round exponent, can't go up - let mut round_success_meter: super::RoundSuccessMeter = - super::RoundSuccessMeter::new( - TEST_MAX_ROUND_EXP, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - crate::types::Timestamp::now(), - Config::default(), - ); - // If there have been more rounds of failure than MAX_FAILED_ROUNDS, slow down -- but can't - // slow down because of ceiling - round_success_meter.rounds = vec![false; MAX_FAILED_ROUNDS + 1].into(); - assert_eq!(round_success_meter.new_exponent(), TEST_MAX_ROUND_EXP); - } - - #[test] - fn new_exponent_speed_up() { - // If there's been enough successful rounds and it's an acceleration round, speed up - let mut round_success_meter: super::RoundSuccessMeter = - super::RoundSuccessMeter::new( - TEST_ROUND_EXP, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - crate::types::Timestamp::now(), - Config::default(), - ); - round_success_meter.rounds = vec![true; NUM_ROUNDS_TO_CONSIDER].into(); - // Increase our round index until we are at an acceleration round - loop { - let current_round_index = round_index( - round_success_meter.current_round_id, - round_success_meter.current_round_exp, - ); - if current_round_index % ACCELERATION_PARAMETER == 0 { - break; - }; - round_success_meter.current_round_id += TimeDiff::from(1); - } - assert_eq!(round_success_meter.new_exponent(), TEST_ROUND_EXP - 1); - } - - #[test] - fn new_exponent_can_not_speed_up_because_min_round_exp() { - // If there's been enough successful rounds and it's an acceleration round, but we are - // already at the smallest round exponent possible, stay at the current round exponent - let mut round_success_meter: super::RoundSuccessMeter = - super::RoundSuccessMeter::new( - TEST_MIN_ROUND_EXP, - TEST_MIN_ROUND_EXP, - TEST_MAX_ROUND_EXP, - crate::types::Timestamp::now(), - Config::default(), - ); - round_success_meter.rounds = vec![true; NUM_ROUNDS_TO_CONSIDER].into(); - // Increase our round index until we are at an acceleration round - loop { - let current_round_index = round_index( - round_success_meter.current_round_id, - round_success_meter.current_round_exp, - ); - if current_round_index % ACCELERATION_PARAMETER == 0 { - break; - }; - round_success_meter.current_round_id += TimeDiff::from(1); - } - assert_eq!(round_success_meter.new_exponent(), TEST_MIN_ROUND_EXP); +#[allow(clippy::arithmetic_side_effects)] // Checking for division by 0. +fn round_index(r_id: Timestamp, round_len: TimeDiff) -> u64 { + if round_len.millis() == 0 { + error!("called round_index with round_len 0."); + return r_id.millis(); } + r_id.millis() / round_len.millis() } diff --git a/node/src/components/consensus/protocols/highway/round_success_meter/config.rs b/node/src/components/consensus/protocols/highway/round_success_meter/config.rs index 484cc67c0f..8ff6d2f6c9 100644 --- a/node/src/components/consensus/protocols/highway/round_success_meter/config.rs +++ b/node/src/components/consensus/protocols/highway/round_success_meter/config.rs @@ -7,12 +7,12 @@ use serde::{Deserialize, Serialize}; /// The number of most recent rounds we will be keeping track of. pub(crate) const NUM_ROUNDS_TO_CONSIDER: usize = 40; /// The number of successful rounds that triggers us to slow down: With this many or fewer -/// successes per `NUM_ROUNDS_TO_CONSIDER`, we increase our round exponent. +/// successes per `NUM_ROUNDS_TO_CONSIDER`, we increase our round length. pub(crate) const NUM_ROUNDS_SLOWDOWN: usize = 10; /// The number of successful rounds that triggers us to speed up: With this many or more successes -/// per `NUM_ROUNDS_TO_CONSIDER`, we decrease our round exponent. +/// per `NUM_ROUNDS_TO_CONSIDER`, we decrease our round length. pub(crate) const NUM_ROUNDS_SPEEDUP: usize = 32; -/// We will try to accelerate (decrease our round exponent) every `ACCELERATION_PARAMETER` rounds if +/// We will try to accelerate (decrease our round length) every `ACCELERATION_PARAMETER` rounds if /// we have few enough failures. pub(crate) const ACCELERATION_PARAMETER: u64 = 40; /// The FTT, as a percentage (i.e. `THRESHOLD = 1` means 1% of the validators' total weight), which diff --git a/node/src/components/consensus/protocols/highway/round_success_meter/tests.rs b/node/src/components/consensus/protocols/highway/round_success_meter/tests.rs new file mode 100644 index 0000000000..85babde4d3 --- /dev/null +++ b/node/src/components/consensus/protocols/highway/round_success_meter/tests.rs @@ -0,0 +1,109 @@ +use config::{Config, ACCELERATION_PARAMETER, MAX_FAILED_ROUNDS, NUM_ROUNDS_TO_CONSIDER}; + +use casper_types::{TimeDiff, Timestamp}; + +use crate::components::consensus::{ + cl_context::ClContext, + protocols::highway::round_success_meter::{config, round_index}, +}; + +const TEST_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 13); +const TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 8); +const TEST_MAX_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 19); + +#[test] +fn new_length_steady() { + let round_success_meter: super::RoundSuccessMeter = super::RoundSuccessMeter::new( + TEST_ROUND_LEN, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + Timestamp::now(), + Config::default(), + ); + assert_eq!(round_success_meter.new_length(), TEST_ROUND_LEN); +} + +#[test] +fn new_length_slow_down() { + let mut round_success_meter: super::RoundSuccessMeter = + super::RoundSuccessMeter::new( + TEST_ROUND_LEN, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + Timestamp::now(), + Config::default(), + ); + // If there have been more rounds of failure than MAX_FAILED_ROUNDS, slow down + round_success_meter.rounds = vec![false; MAX_FAILED_ROUNDS + 1].into(); + assert_eq!(round_success_meter.new_length(), TEST_ROUND_LEN * 2); +} + +#[test] +fn new_length_can_not_slow_down_because_max_round_len() { + // If the round length is the same as the maximum round length, can't go up + let mut round_success_meter: super::RoundSuccessMeter = + super::RoundSuccessMeter::new( + TEST_MAX_ROUND_LEN, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + Timestamp::now(), + Config::default(), + ); + // If there have been more rounds of failure than MAX_FAILED_ROUNDS, slow down -- but can't + // slow down because of ceiling + round_success_meter.rounds = vec![false; MAX_FAILED_ROUNDS + 1].into(); + assert_eq!(round_success_meter.new_length(), TEST_MAX_ROUND_LEN); +} + +#[test] +fn new_length_speed_up() { + // If there's been enough successful rounds and it's an acceleration round, speed up + let mut round_success_meter: super::RoundSuccessMeter = + super::RoundSuccessMeter::new( + TEST_ROUND_LEN, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + Timestamp::now(), + Config::default(), + ); + round_success_meter.rounds = vec![true; NUM_ROUNDS_TO_CONSIDER].into(); + // Increase our round index until we are at an acceleration round + loop { + let current_round_index = round_index( + round_success_meter.current_round_id, + round_success_meter.current_round_len, + ); + if current_round_index % ACCELERATION_PARAMETER == 0 { + break; + }; + round_success_meter.current_round_id += TimeDiff::from_millis(1); + } + assert_eq!(round_success_meter.new_length(), TEST_ROUND_LEN / 2); +} + +#[test] +fn new_length_can_not_speed_up_because_min_round_len() { + // If there's been enough successful rounds and it's an acceleration round, but we are + // already at the smallest round length possible, stay at the current round length + let mut round_success_meter: super::RoundSuccessMeter = + super::RoundSuccessMeter::new( + TEST_MIN_ROUND_LEN, + TEST_MIN_ROUND_LEN, + TEST_MAX_ROUND_LEN, + Timestamp::now(), + Config::default(), + ); + round_success_meter.rounds = vec![true; NUM_ROUNDS_TO_CONSIDER].into(); + // Increase our round index until we are at an acceleration round + loop { + let current_round_index = round_index( + round_success_meter.current_round_id, + round_success_meter.current_round_len, + ); + if current_round_index % ACCELERATION_PARAMETER == 0 { + break; + }; + round_success_meter.current_round_id += TimeDiff::from_millis(1); + } + assert_eq!(round_success_meter.new_length(), TEST_MIN_ROUND_LEN); +} diff --git a/node/src/components/consensus/protocols/highway/synchronizer.rs b/node/src/components/consensus/protocols/highway/synchronizer.rs deleted file mode 100644 index b8235fd441..0000000000 --- a/node/src/components/consensus/protocols/highway/synchronizer.rs +++ /dev/null @@ -1,453 +0,0 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, -}; - -use datasize::DataSize; -use itertools::Itertools; -use tracing::debug; - -use crate::{ - components::consensus::{ - consensus_protocol::ProtocolOutcome, - highway_core::{ - highway::{Dependency, Highway, PreValidatedVertex, Vertex}, - validators::ValidatorMap, - }, - traits::{Context, NodeIdT}, - }, - types::{TimeDiff, Timestamp}, -}; - -use super::{HighwayMessage, ProtocolOutcomes, ACTION_ID_VERTEX}; - -#[cfg(test)] -mod tests; - -/// Incoming pre-validated vertices that we haven't added to the protocol state yet, and the -/// timestamp when we received them. -#[derive(DataSize, Debug)] -pub(crate) struct PendingVertices(HashMap, HashMap>) -where - C: Context; - -impl Default for PendingVertices { - fn default() -> Self { - PendingVertices(Default::default()) - } -} - -impl PendingVertices { - /// Removes expired vertices. - fn remove_expired(&mut self, oldest: Timestamp) { - for time_by_sender in self.0.values_mut() { - time_by_sender.retain(|_, time_received| *time_received >= oldest); - } - self.0.retain(|_, time_by_peer| !time_by_peer.is_empty()) - } - - /// Adds a vertex, or updates its timestamp. - fn add(&mut self, sender: I, pvv: PreValidatedVertex, time_received: Timestamp) { - self.0 - .entry(pvv) - .or_default() - .entry(sender) - .and_modify(|timestamp| *timestamp = (*timestamp).max(time_received)) - .or_insert(time_received); - } - - /// Adds a vertex, or updates its timestamp. - fn push(&mut self, pv: PendingVertex) { - self.add(pv.sender, pv.pvv, pv.time_received) - } - - fn pop(&mut self) -> Option> { - let pvv = self.0.keys().next()?.clone(); - let (sender, timestamp, is_empty) = { - let time_by_sender = self.0.get_mut(&pvv)?; - let sender = time_by_sender.keys().next()?.clone(); - let timestamp = time_by_sender.remove(&sender)?; - (sender, timestamp, time_by_sender.is_empty()) - }; - if is_empty { - self.0.remove(&pvv); - } - Some(PendingVertex::new(sender, pvv, timestamp)) - } - - /// Drops all pending vertices other than evidence. - pub(crate) fn retain_evidence_only(&mut self) { - self.0.retain(|pvv, _| pvv.inner().is_evidence()); - } - - /// Returns number of unique vertices pending in the queue. - pub(crate) fn len(&self) -> u64 { - self.0.len() as u64 - } - - fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -impl Iterator for PendingVertices { - type Item = PendingVertex; - - fn next(&mut self) -> Option { - self.pop() - } -} - -/// An incoming pre-validated vertex that we haven't added to the protocol state yet. -#[derive(DataSize, Debug)] -pub(crate) struct PendingVertex -where - C: Context, -{ - /// The peer who sent it to us. - sender: I, - /// The pre-validated vertex. - pvv: PreValidatedVertex, - /// The time when we received it. - time_received: Timestamp, -} - -impl PendingVertex { - /// Returns a new pending vertex with the current timestamp. - pub(crate) fn new(sender: I, pvv: PreValidatedVertex, time_received: Timestamp) -> Self { - Self { - sender, - pvv, - time_received, - } - } - - /// Returns the peer from which we received this vertex. - pub(crate) fn sender(&self) -> &I { - &self.sender - } - - /// Returns the vertex waiting to be added. - pub(crate) fn vertex(&self) -> &Vertex { - self.pvv.inner() - } - - /// Returns the pre-validated vertex. - pub(crate) fn pvv(&self) -> &PreValidatedVertex { - &self.pvv - } -} - -impl Into> for PendingVertex { - fn into(self) -> PreValidatedVertex { - self.pvv - } -} - -#[derive(DataSize, Debug)] -pub(crate) struct Synchronizer -where - C: Context, -{ - /// Incoming vertices we can't add yet because they are still missing a dependency. - vertex_deps: BTreeMap, PendingVertices>, - /// The vertices that are scheduled to be processed at a later time. The keys of this - /// `BTreeMap` are timestamps when the corresponding vector of vertices will be added. - vertices_to_be_added_later: BTreeMap>, - /// Vertices that might be ready to add to the protocol state: We are not currently waiting for - /// a requested dependency. - vertices_to_be_added: PendingVertices, - /// The duration for which incoming vertices with missing dependencies are kept in a queue. - pending_vertex_timeout: TimeDiff, - /// The duration between two consecutive requests of the latest state. - request_latest_state_timeout: TimeDiff, - /// Instance ID of an era for which this synchronizer is constructed. - instance_id: C::InstanceId, - /// Keeps track of the lowest/oldest seen unit per validator when syncing. - /// Used only for logging. - oldest_seen_panorama: ValidatorMap>, - /// Boolean flag indicating whether we're synchronizing current era. - pub(crate) current_era: bool, -} - -impl Synchronizer { - /// Creates a new synchronizer with the specified timeout for pending vertices. - pub(crate) fn new( - pending_vertex_timeout: TimeDiff, - request_latest_state_timeout: TimeDiff, - validator_len: usize, - instance_id: C::InstanceId, - ) -> Self { - Synchronizer { - vertex_deps: BTreeMap::new(), - vertices_to_be_added_later: BTreeMap::new(), - vertices_to_be_added: Default::default(), - pending_vertex_timeout, - request_latest_state_timeout, - oldest_seen_panorama: iter::repeat(None).take(validator_len).collect(), - instance_id, - current_era: true, - } - } - - /// Removes expired pending vertices from the queues, and schedules the next purge. - pub(crate) fn purge_vertices(&mut self, now: Timestamp) { - let oldest = now.saturating_sub(self.pending_vertex_timeout); - self.vertices_to_be_added.remove_expired(oldest); - Self::remove_expired(&mut self.vertices_to_be_added_later, oldest); - Self::remove_expired(&mut self.vertex_deps, oldest); - } - - // Returns number of elements in the `verties_to_be_added_later` queue. - // Every pending vertex is counted once, even if it has multiple senders. - fn vertices_to_be_added_later_len(&self) -> u64 { - self.vertices_to_be_added_later - .iter() - .map(|(_, pv)| pv.len()) - .sum() - } - - // Returns number of elements in `vertex_deps` queue. - fn vertex_deps_len(&self) -> u64 { - self.vertex_deps.iter().map(|(_, pv)| pv.len()).sum() - } - - // Returns number of elements in `vertices_to_be_added` queue. - fn vertices_to_be_added_len(&self) -> u64 { - self.vertices_to_be_added.len() - } - - pub(crate) fn log_len(&self) { - debug!( - era_id = ?self.instance_id, - vertices_to_be_added_later = self.vertices_to_be_added_later_len(), - vertex_deps = self.vertex_deps_len(), - vertices_to_be_added = self.vertices_to_be_added_len(), - "synchronizer queue lengths" - ); - // All units seen have seq_number == 0. - let all_lowest = self - .oldest_seen_panorama - .iter() - .all(|entry| entry.map(|seq_num| seq_num == 0).unwrap_or(false)); - if all_lowest { - debug!("all seen units while synchronization with seq_num=0"); - } else { - debug!(oldest_panorama=%self.oldest_seen_panorama, "oldest seen unit per validator"); - } - } - - /// Store a (pre-validated) vertex which will be added later. This creates a timer to be sent - /// to the reactor. The vertex be added using `Self::add_vertices` when that timer goes off. - pub(crate) fn store_vertex_for_addition_later( - &mut self, - future_timestamp: Timestamp, - now: Timestamp, - sender: I, - pvv: PreValidatedVertex, - ) { - self.vertices_to_be_added_later - .entry(future_timestamp) - .or_default() - .add(sender, pvv, now); - } - - /// Schedules calls to `add_vertex` on any vertices in `vertices_to_be_added_later` which are - /// scheduled for after the given `transpired_timestamp`. In general the specified `timestamp` - /// is approximately `Timestamp::now()`. Vertices keyed by timestamps chronologically before - /// `transpired_timestamp` should all be added. - pub(crate) fn add_past_due_stored_vertices( - &mut self, - timestamp: Timestamp, - ) -> ProtocolOutcomes { - let mut results = vec![]; - let past_due_timestamps: Vec = self - .vertices_to_be_added_later - .range(..=timestamp) // Inclusive range - .map(|(past_due_timestamp, _)| past_due_timestamp.to_owned()) - .collect(); - for past_due_timestamp in past_due_timestamps { - if let Some(vertices_to_add) = - self.vertices_to_be_added_later.remove(&past_due_timestamp) - { - results.extend(self.schedule_add_vertices(vertices_to_add)) - } - } - results - } - - /// Schedules a vertex to be added to the protocol state. - pub(crate) fn schedule_add_vertex( - &mut self, - sender: I, - pvv: PreValidatedVertex, - now: Timestamp, - ) -> ProtocolOutcomes { - self.update_last_seen(&pvv); - let pv = PendingVertex::new(sender, pvv, now); - self.schedule_add_vertices(iter::once(pv)) - } - - fn update_last_seen(&mut self, pvv: &PreValidatedVertex) { - let v = pvv.inner(); - if let (Some(v_id), Some(seq_num)) = (v.creator(), v.unit_seq_number()) { - let prev_seq_num = self.oldest_seen_panorama[v_id].unwrap_or(u64::MAX); - self.oldest_seen_panorama[v_id] = Some(prev_seq_num.min(seq_num)); - } - } - - /// Moves all vertices whose known missing dependency is now satisfied into the - /// `vertices_to_be_added` queue. - pub(crate) fn remove_satisfied_deps(&mut self, highway: &Highway) -> ProtocolOutcomes { - let satisfied_deps = self - .vertex_deps - .keys() - .filter(|dep| highway.has_dependency(dep)) - .cloned() - .collect_vec(); - // Safe to unwrap: We know the keys exist. TODO: Replace with BTreeMap::retain once stable. - let pvs = satisfied_deps - .into_iter() - .flat_map(|dep| self.vertex_deps.remove(&dep).unwrap()) - .collect_vec(); - self.schedule_add_vertices(pvs) - } - - /// Pops and returns the next entry from `vertices_to_be_added` that is not yet in the protocol - /// state. Also returns a `ProtocolOutcome` that schedules the next action to add a vertex, - /// unless the queue is empty, and `ProtocolOutcome`s to request missing dependencies. - pub(crate) fn pop_vertex_to_add( - &mut self, - highway: &Highway, - ) -> (Option>, ProtocolOutcomes) { - let mut outcomes = Vec::new(); - // Get the next vertex to be added; skip the ones that are already in the protocol state, - // and the ones that are still missing dependencies. - loop { - let pv = match self.vertices_to_be_added.pop() { - None => return (None, outcomes), - Some(pv) if highway.has_vertex(pv.vertex()) => continue, - Some(pv) => pv, - }; - if let Some(dep) = highway.missing_dependency(pv.pvv()) { - // We are still missing a dependency. Store the vertex in the map and request - // the dependency from the sender. - let sender = pv.sender().clone(); - self.add_missing_dependency(dep.clone(), pv); - let ser_msg = HighwayMessage::RequestDependency(dep).serialize(); - outcomes.push(ProtocolOutcome::CreatedTargetedMessage(ser_msg, sender)); - continue; - } - // We found the next vertex to add. - if !self.vertices_to_be_added.is_empty() { - // There are still vertices in the queue: schedule next call. - outcomes.push(ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)); - } - return (Some(pv), outcomes); - } - } - - /// Adds a vertex with a known missing dependency to the queue. - fn add_missing_dependency(&mut self, dep: Dependency, pv: PendingVertex) { - self.vertex_deps.entry(dep).or_default().push(pv) - } - - /// Returns `true` if no vertices are in the queues. - pub(crate) fn is_empty(&self) -> bool { - self.vertex_deps.is_empty() - && self.vertices_to_be_added.is_empty() - && self.vertices_to_be_added_later.is_empty() - } - - /// Returns `true` if there are any vertices waiting for the specified dependency. - pub(crate) fn is_dependency(&self, dep: &Dependency) -> bool { - self.vertex_deps.contains_key(dep) - } - - /// Returns the timeout for pending vertices: Entries older than this are purged periodically. - pub(crate) fn pending_vertex_timeout(&self) -> TimeDiff { - self.pending_vertex_timeout - } - - /// Returns the duration between two consecutive requests of the latest state. - pub(crate) fn request_latest_state_timeout(&self) -> TimeDiff { - self.request_latest_state_timeout - } - - /// Drops all vertices that (directly or indirectly) have the specified dependencies, and - /// returns the set of their senders. If the specified dependencies are known to be invalid, - /// those senders must be faulty. - pub(crate) fn drop_dependent_vertices( - &mut self, - mut vertices: Vec>, - ) -> HashSet { - let mut senders = HashSet::new(); - while !vertices.is_empty() { - let (new_vertices, new_senders) = self.do_drop_dependent_vertices(vertices); - vertices = new_vertices; - senders.extend(new_senders); - } - senders - } - - /// Drops all pending vertices other than evidence. - pub(crate) fn retain_evidence_only(&mut self) { - self.vertex_deps.clear(); - self.vertices_to_be_added_later.clear(); - self.vertices_to_be_added.retain_evidence_only(); - } - - /// Schedules vertices to be added to the protocol state. - fn schedule_add_vertices(&mut self, pending_vertices: T) -> ProtocolOutcomes - where - T: IntoIterator>, - { - let was_empty = self.vertices_to_be_added.is_empty(); - for pv in pending_vertices { - self.vertices_to_be_added.push(pv); - } - if was_empty && !self.vertices_to_be_added.is_empty() { - vec![ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] - } else { - Vec::new() - } - } - - /// Drops all vertices that have the specified direct dependencies, and returns their IDs and - /// senders. - fn do_drop_dependent_vertices( - &mut self, - vertices: Vec>, - ) -> (Vec>, HashSet) { - // collect the vertices that depend on the ones we got in the argument and their senders - vertices - .into_iter() - // filtering by is_unit, so that we don't drop vertices depending on invalid evidence - // or endorsements - we can still get valid ones from someone else and eventually - // satisfy the dependency - .filter(|dep| dep.is_unit()) - .flat_map(|vertex| self.vertex_deps.remove(&vertex)) - .flatten() - .map(|pv| (pv.pvv.inner().id(), pv.sender)) - .unzip() - } - - /// Removes all expired entries from a `BTreeMap` of `Vec`s. - fn remove_expired( - map: &mut BTreeMap>, - oldest: Timestamp, - ) { - for pvs in map.values_mut() { - pvs.remove_expired(oldest); - } - let keys = map - .iter() - .filter(|(_, pvs)| pvs.is_empty()) - .map(|(key, _)| key.clone()) - .collect_vec(); - for key in keys { - map.remove(&key); - } - } -} diff --git a/node/src/components/consensus/protocols/highway/synchronizer/tests.rs b/node/src/components/consensus/protocols/highway/synchronizer/tests.rs deleted file mode 100644 index f6bc8b5eda..0000000000 --- a/node/src/components/consensus/protocols/highway/synchronizer/tests.rs +++ /dev/null @@ -1,113 +0,0 @@ -use super::*; - -use crate::components::consensus::{ - highway_core::{ - highway::tests::test_validators, - highway_testing::TEST_INSTANCE_ID, - state::{tests::*, State}, - }, - protocols::highway::tests::NodeId, -}; - -#[test] -fn purge_vertices() { - let params = test_params(0); - let mut state = State::new(WEIGHTS, params.clone(), vec![]); - - // We use round exponent 4u8, so a round is 0x10 ms. With seed 0, Carol is the first leader. - // - // time: 0x00 0x0A 0x1A 0x2A 0x3A - // - // Carol c0 — c1 — c2 - // \ - // Bob ————————— b0 — b1 - let c0 = add_unit!(state, CAROL, 0x00, 4u8, 0xA; N, N, N).unwrap(); - let c1 = add_unit!(state, CAROL, 0x0A, 4u8, None; N, N, c0).unwrap(); - let c2 = add_unit!(state, CAROL, 0x1A, 4u8, None; N, N, c1).unwrap(); - let b0 = add_unit!(state, BOB, 0x2A, 4u8, None; N, N, c0).unwrap(); - let b1 = add_unit!(state, BOB, 0x3A, 4u8, None; N, b0, c0).unwrap(); - - // A Highway instance that's just used to create PreValidatedVertex instances below. - let util_highway = - Highway::::new(TEST_INSTANCE_ID, test_validators(), params.clone()); - - // Returns the WireUnit with the specified hash. - let unit = |hash: u64| Vertex::Unit(state.wire_unit(&hash, TEST_INSTANCE_ID).unwrap()); - // Returns the PreValidatedVertex with the specified hash. - let pvv = |hash: u64| util_highway.pre_validate_vertex(unit(hash)).unwrap(); - - let peer0 = NodeId(0); - - // Create a synchronizer with a 0x20 ms timeout, and a Highway instance. - let mut sync = Synchronizer::::new( - 0x20.into(), - 0x20.into(), - WEIGHTS.len(), - TEST_INSTANCE_ID, - ); - let mut highway = Highway::::new(TEST_INSTANCE_ID, test_validators(), params); - - // At time 0x20, we receive c2, b0 and b1 — the latter ahead of their timestamp. - // Since c2 is the first entry in the main queue, processing is scheduled. - let now = 0x20.into(); - assert!(matches!( - *sync.schedule_add_vertex(peer0, pvv(c2), now), - [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)] - )); - sync.store_vertex_for_addition_later(unit(b1).timestamp().unwrap(), now, peer0, pvv(b1)); - sync.store_vertex_for_addition_later(unit(b0).timestamp().unwrap(), now, peer0, pvv(b0)); - - // At time 0x21, we receive c1. - let now = 0x21.into(); - assert!(sync.schedule_add_vertex(peer0, pvv(c1), now).is_empty()); - - // No new vertices can be added yet, because all are missing dependencies. - // The missing dependencies of c1 and c2 are requested. - let (maybe_pv, outcomes) = sync.pop_vertex_to_add(&highway); - assert!(maybe_pv.is_none()); - assert!(matches!( - *outcomes, - [ - ProtocolOutcome::CreatedTargetedMessage(_, NodeId(0)), - ProtocolOutcome::CreatedTargetedMessage(_, NodeId(0)), - ] - )); - - // At 0x23, c0 gets enqueued and added. - // That puts c1 back into the main queue, since its dependency is satisfied. - let now = 0x23.into(); - let outcomes = sync.schedule_add_vertex(peer0, pvv(c0), now); - assert!( - matches!(*outcomes, [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]), - "unexpected outcomes: {:?}", - outcomes - ); - let (maybe_pv, outcomes) = sync.pop_vertex_to_add(&highway); - assert_eq!(Dependency::Unit(c0), maybe_pv.unwrap().vertex().id()); - assert!(outcomes.is_empty()); - let vv_c0 = highway.validate_vertex(pvv(c0)).expect("c0 is valid"); - highway.add_valid_vertex(vv_c0, now); - let outcomes = sync.remove_satisfied_deps(&highway); - assert!( - matches!(*outcomes, [ProtocolOutcome::QueueAction(ACTION_ID_VERTEX)]), - "unexpected outcomes: {:?}", - outcomes - ); - - // At time 0x2A, the vertex b0 moves into the main queue. - let now = 0x2A.into(); - assert!(sync.add_past_due_stored_vertices(now).is_empty()); - - // At 0x41, all vertices received at 0x20 are expired, but c1 (received at 0x21) isn't. - // This will remove: - // * b1: still postponed due to future timestamp - // * b0: in the main queue - // * c2: waiting for dependency c1 to be added - sync.purge_vertices(0x41.into()); - - // The main queue should now contain only c1. If we remove it, the synchronizer is empty. - let (maybe_pv, outcomes) = sync.pop_vertex_to_add(&highway); - assert_eq!(Dependency::Unit(c1), maybe_pv.unwrap().vertex().id()); - assert!(outcomes.is_empty()); - assert!(sync.is_empty()); -} diff --git a/node/src/components/consensus/protocols/highway/tests.rs b/node/src/components/consensus/protocols/highway/tests.rs index 1a48feeff0..83e7e781da 100644 --- a/node/src/components/consensus/protocols/highway/tests.rs +++ b/node/src/components/consensus/protocols/highway/tests.rs @@ -1,13 +1,12 @@ -use std::{collections::BTreeSet, sync::Arc}; - -use datasize::DataSize; -use derive_more::Display; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; -use casper_types::{PublicKey, U512}; +use casper_types::{testing::TestRng, PublicKey, TimeDiff, Timestamp, U512}; use crate::{ components::consensus::{ - candidate_block::CandidateBlock, cl_context::{ClContext, Keypair}, config::Config, consensus_protocol::{ConsensusProtocol, ProtocolOutcome}, @@ -15,52 +14,49 @@ use crate::{ highway::{SignedWireUnit, Vertex, WireUnit}, highway_testing, state::{self, tests::ALICE, Observation, Panorama}, - validators::ValidatorIndex, State, }, + max_rounds_per_era, protocols::highway::{ - config::Config as HighwayConfig, HighwayMessage, ACTION_ID_VERTEX, - TIMER_ID_STANDSTILL_ALERT, + config::Config as HighwayConfig, HighwayMessage, HighwayProtocol, ACTION_ID_VERTEX, + }, + tests::utils::{ + new_test_chainspec, ALICE_NODE_ID, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, }, - tests::utils::{new_test_chainspec, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY}, traits::Context, - HighwayProtocol, + utils::{ValidatorIndex, Weight}, + SerializedMessage, }, - types::{ProtoBlock, TimeDiff, Timestamp}, + types::BlockPayload, }; -#[derive(DataSize, Debug, Ord, PartialOrd, Copy, Clone, Display, Hash, Eq, PartialEq)] -pub(crate) struct NodeId(pub u8); - /// Returns a new `State` with `ClContext` parameters suitable for tests. pub(crate) fn new_test_state(weights: I, seed: u64) -> State where I: IntoIterator, - T: Into, + T: Into, { + #[allow(clippy::arithmetic_side_effects)] // Left shift with small enough constants. let params = state::Params::new( seed, - highway_testing::TEST_BLOCK_REWARD, - highway_testing::TEST_BLOCK_REWARD / 5, - 14, - 19, - 4, + TimeDiff::from_millis(1 << 14), + TimeDiff::from_millis(1 << 19), + TimeDiff::from_millis(1 << 14), u64::MAX, 0.into(), - Timestamp::from(u64::MAX), + Timestamp::MAX, highway_testing::TEST_ENDORSEMENT_EVIDENCE_LIMIT, ); let weights = weights.into_iter().map(|w| w.into()).collect::>(); - state::State::new(weights, params, vec![]) + State::new(weights, params, vec![], vec![]) } const INSTANCE_ID_DATA: &[u8; 1] = &[123u8; 1]; -const STANDSTILL_TIMEOUT: &str = "1min"; pub(crate) fn new_test_highway_protocol( weights: I1, - init_slashed: I2, -) -> Box> + init_faulty: I2, +) -> Box> where I1: IntoIterator, I2: IntoIterator, @@ -72,73 +68,43 @@ where .collect::>(); let chainspec = new_test_chainspec(weights.clone()); let config = Config { - secret_key_path: Default::default(), + max_execution_delay: 3, highway: HighwayConfig { - unit_hashes_folder: Default::default(), pending_vertex_timeout: "1min".parse().unwrap(), - standstill_timeout: STANDSTILL_TIMEOUT.parse().unwrap(), - log_participation_interval: "10sec".parse().unwrap(), - max_execution_delay: 3, - round_success_meter: Default::default(), - request_latest_state_timeout: "10sec".parse().unwrap(), + log_participation_interval: Some("10sec".parse().unwrap()), + ..HighwayConfig::default() }, + ..Default::default() }; // Timestamp of the genesis era start and test start. let start_timestamp: Timestamp = 0.into(); - let (hw_proto, outcomes) = HighwayProtocol::::new_boxed( + let (hw_proto, outcomes) = HighwayProtocol::::new_boxed( ClContext::hash(INSTANCE_ID_DATA), weights.into_iter().collect(), - &init_slashed.into_iter().collect(), - &(&chainspec).into(), + &init_faulty.into_iter().collect(), + &None.into_iter().collect(), + &chainspec, &config, None, start_timestamp, 0, start_timestamp, + None, ); - // We expect for messages: + // We expect three messages: // * log participation timer, // * log synchronizer queue length timer, - // * purge synchronizer queue timer, - // * inactivity timer, + // * purge synchronizer queue timer // If there are more, the tests might need to handle them. - assert_eq!(4, outcomes.len()); + assert_eq!(3, outcomes.len()); hw_proto } -#[test] -fn test_highway_protocol_handle_message_parse_error() { - // Build a highway_protocol for instrumentation - let mut highway_protocol: Box> = - new_test_highway_protocol(vec![(ALICE_PUBLIC_KEY.clone(), 100)], vec![]); - - let now = Timestamp::zero(); - let sender = NodeId(123); - let msg = vec![]; - let mut effects: Vec> = - highway_protocol.handle_message(sender.to_owned(), msg.to_owned(), now); - - assert_eq!(effects.len(), 1); - - let maybe_protocol_outcome = effects.pop(); - - match &maybe_protocol_outcome { - None => panic!("We just checked that effects has length 1!"), - Some(ProtocolOutcome::InvalidIncomingMessage(invalid_msg, offending_sender, _err)) => { - assert_eq!( - invalid_msg, &msg, - "Invalid message is not message that was sent." - ); - assert_eq!(offending_sender, &sender, "Unexpected sender.") - } - Some(protocol_outcome) => panic!("Unexpected protocol outcome {:?}", protocol_outcome), - } -} - pub(crate) const N: Observation = Observation::None; #[test] fn send_a_wire_unit_with_too_small_a_round_exp() { + let mut rng = TestRng::new(); let creator: ValidatorIndex = ValidatorIndex(0); let validators = vec![(ALICE_PUBLIC_KEY.clone(), 100)]; let state: State = new_test_state(validators.iter().map(|(_pk, w)| *w), 0); @@ -155,105 +121,68 @@ fn send_a_wire_unit_with_too_small_a_round_exp() { round_exp: 0, endorsed: BTreeSet::new(), }; - let alice_keypair: Keypair = Keypair::from(Arc::new(ALICE_SECRET_KEY.clone())); + let alice_keypair: Keypair = Keypair::from(Arc::clone(&*ALICE_SECRET_KEY)); let highway_message: HighwayMessage = HighwayMessage::NewVertex(Vertex::Unit( SignedWireUnit::new(wunit.into_hashed(), &alice_keypair), )); let mut highway_protocol = new_test_highway_protocol(validators, vec![]); - let sender = NodeId(123); - let msg = bincode::serialize(&highway_message).unwrap(); - let mut outcomes = highway_protocol.handle_message(sender.to_owned(), msg.to_owned(), now); - assert_eq!(outcomes.len(), 1); - - let maybe_protocol_outcome = outcomes.pop(); - match &maybe_protocol_outcome { - None => unreachable!("We just checked that outcomes has length 1!"), - Some(ProtocolOutcome::InvalidIncomingMessage(invalid_msg, offending_sender, err)) => { - assert_eq!( - invalid_msg, &msg, - "Invalid message is not message that was sent." - ); - assert_eq!(offending_sender, &sender, "Unexpected sender."); - assert!( - format!("{:?}", err).starts_with( - "The vertex contains an invalid unit: `The round \ - length exponent is less than the minimum allowed by \ - the chain-spec.`" - ), - "Error message did not start as expected: {:?}", - err - ) - } - Some(protocol_outcome) => panic!("Unexpected protocol outcome {:?}", protocol_outcome), - } + let sender = *ALICE_NODE_ID; + let msg = SerializedMessage::from_message(&highway_message); + let outcomes = highway_protocol.handle_message(&mut rng, sender.to_owned(), msg, now); + assert_eq!(&*outcomes, [ProtocolOutcome::Disconnect(sender)]); } #[test] fn send_a_valid_wire_unit() { - let standstill_timeout: TimeDiff = STANDSTILL_TIMEOUT.parse().unwrap(); + let mut rng = TestRng::new(); let creator: ValidatorIndex = ValidatorIndex(0); let validators = vec![(ALICE_PUBLIC_KEY.clone(), 100)]; let state: State = new_test_state(validators.iter().map(|(_pk, w)| *w), 0); let panorama: Panorama = Panorama::from(vec![N]); let seq_number = panorama.next_seq_num(&state, creator); - let mut now = Timestamp::zero(); + let now = Timestamp::zero(); let wunit: WireUnit = WireUnit { panorama, creator, instance_id: ClContext::hash(INSTANCE_ID_DATA), - value: Some(CandidateBlock::new( - ProtoBlock::new(vec![], vec![], now, false), + value: Some(Arc::new(BlockPayload::new( + BTreeMap::new(), vec![], - None, - )), + Default::default(), + false, + 1u8, + ))), seq_number, timestamp: now, - round_exp: 14, + round_exp: 0, endorsed: BTreeSet::new(), }; - let alice_keypair: Keypair = Keypair::from(Arc::new(ALICE_SECRET_KEY.clone())); + let alice_keypair: Keypair = Keypair::from(Arc::clone(&*ALICE_SECRET_KEY)); let highway_message: HighwayMessage = HighwayMessage::NewVertex(Vertex::Unit( SignedWireUnit::new(wunit.into_hashed(), &alice_keypair), )); let mut highway_protocol = new_test_highway_protocol(validators, vec![]); - let sender = NodeId(123); - let msg = bincode::serialize(&highway_message).unwrap(); + let sender = *ALICE_NODE_ID; + let msg = SerializedMessage::from_message(&highway_message); - let mut outcomes = highway_protocol.handle_message(sender, msg, now); + let mut outcomes = highway_protocol.handle_message(&mut rng, sender, msg, now); while let Some(outcome) = outcomes.pop() { match outcome { - ProtocolOutcome::CreatedGossipMessage(_) | ProtocolOutcome::FinalizedBlock(_) => (), + ProtocolOutcome::CreatedGossipMessage(_) + | ProtocolOutcome::FinalizedBlock(_) + | ProtocolOutcome::HandledProposedBlock(_) => (), ProtocolOutcome::QueueAction(ACTION_ID_VERTEX) => { outcomes.extend(highway_protocol.handle_action(ACTION_ID_VERTEX, now)) } outcome => panic!("Unexpected outcome: {:?}", outcome), } } - - // Our protocol state has changed since initialization, so there is no alert. - now += standstill_timeout; - let outcomes = highway_protocol.handle_timer(now, TIMER_ID_STANDSTILL_ALERT); - match &*outcomes { - [ProtocolOutcome::ScheduleTimer(timestamp, timer_id)] => { - assert_eq!(*timestamp, now + standstill_timeout); - assert_eq!(*timer_id, TIMER_ID_STANDSTILL_ALERT); - } - _ => panic!("Unexpected outcomes: {:?}", outcomes), - } - - // If after another timeout, the state has not changed, an alert is raised. - now += standstill_timeout; - let outcomes = highway_protocol.handle_timer(now, TIMER_ID_STANDSTILL_ALERT); - assert!( - matches!(&*outcomes, [ProtocolOutcome::StandstillAlert]), - "Unexpected outcomes: {:?}", - outcomes - ); } #[test] fn detect_doppelganger() { + let mut rng = TestRng::new(); let creator: ValidatorIndex = ALICE; let validators = vec![ (ALICE_PUBLIC_KEY.clone(), 100), @@ -263,10 +192,15 @@ fn detect_doppelganger() { let panorama: Panorama = Panorama::from(vec![N, N]); let seq_number = panorama.next_seq_num(&state, creator); let instance_id = ClContext::hash(INSTANCE_ID_DATA); - let round_exp = 14; + let round_exp = 0; let now = Timestamp::zero(); - let proto_block = ProtoBlock::new(vec![], vec![], now, false); - let value = CandidateBlock::new(proto_block, vec![], None); + let value = Arc::new(BlockPayload::new( + BTreeMap::new(), + vec![], + Default::default(), + false, + 1u8, + )); let wunit: WireUnit = WireUnit { panorama, creator, @@ -277,20 +211,20 @@ fn detect_doppelganger() { round_exp, endorsed: BTreeSet::new(), }; - let alice_keypair: Keypair = Keypair::from(Arc::new(ALICE_SECRET_KEY.clone())); + let alice_keypair: Keypair = Keypair::from(Arc::clone(&*ALICE_SECRET_KEY)); let highway_message: HighwayMessage = HighwayMessage::NewVertex(Vertex::Unit( SignedWireUnit::new(wunit.into_hashed(), &alice_keypair), )); let mut highway_protocol = new_test_highway_protocol(validators, vec![]); // Activate ALICE as validator. let _ = highway_protocol.activate_validator(ALICE_PUBLIC_KEY.clone(), alice_keypair, now, None); - assert_eq!(highway_protocol.is_active(), true); - let sender = NodeId(123); - let msg = bincode::serialize(&highway_message).unwrap(); + assert!(highway_protocol.is_active()); + let sender = *ALICE_NODE_ID; + let msg = SerializedMessage::from_message(&highway_message); // "Send" a message created by ALICE to an instance of Highway where she's an active validator. // An incoming unit, created by the same validator, should be properly detected as a // doppelganger. - let mut outcomes = highway_protocol.handle_message(sender, msg, now); + let mut outcomes = highway_protocol.handle_message(&mut rng, sender, msg, now); while let Some(outcome) = outcomes.pop() { match outcome { ProtocolOutcome::DoppelgangerDetected => return, @@ -302,3 +236,14 @@ fn detect_doppelganger() { } panic!("failed to return DoppelgangerDetected effect"); } + +#[test] +fn max_rounds_per_era_returns_the_correct_value_for_prod_chainspec_value() { + let max_rounds_per_era = max_rounds_per_era( + 20, + TimeDiff::from_seconds(120 * 60), + TimeDiff::from_millis(32768), + ); + + assert_eq!(219, max_rounds_per_era); +} diff --git a/node/src/components/consensus/protocols/zug.rs b/node/src/components/consensus/protocols/zug.rs new file mode 100644 index 0000000000..eee90806ee --- /dev/null +++ b/node/src/components/consensus/protocols/zug.rs @@ -0,0 +1,2774 @@ +//! # The Zug consensus protocol. +//! +//! This protocol requires that at most _f_ out of _n > 3 f_ validators (by weight) are faulty. It +//! also assumes that there is an upper bound for the network delay: how long a message sent by a +//! correct validator can take before it is delivered. +//! +//! Under these conditions all correct nodes will reach agreement on a chain of _finalized_ blocks. +//! +//! A _quorum_ is a set of validators whose total weight is greater than _(n + f) / 2_. Thus any two +//! quorums always have a correct validator in common. Since _(n + f) / 2 < n - f_, the correct +//! validators constitute a quorum. +//! +//! +//! ## How it Works +//! +//! In every round the designated leader can sign a `Proposal` message to suggest a block. The +//! proposal also points to an earlier round in which the parent block was proposed. +//! +//! Each validator then signs an `Echo` message with the proposal's hash. Correct validators only +//! sign one `Echo` per round, so at most one proposal can get `Echo`s signed by a quorum. If there +//! is a quorum and some other conditions are met (see below), the proposal is _accepted_. The next +//! round's leader can now make a proposal that uses this one as a parent. +//! +//! Each validator that observes the proposal to be accepted in time signs a `Vote(true)` message. +//! If they time out waiting they sign `Vote(false)` instead. If a quorum signs `true`, the round is +//! _committed_ and the proposal and all its ancestors are finalized. If a quorum signs `false`, the +//! round is _skippable_: The next round's leader can now make a proposal with a parent from an +//! earlier round. Correct validators only sign either `true` or `false`, so a round can be either +//! committed or skippable but not both. +//! +//! If there is no accepted proposal all correct validators will eventually vote `false`, so the +//! round becomes skippable. This is what makes the protocol _live_: The next leader will eventually +//! be allowed to make a proposal, because either there is an accepted proposal that can be the +//! parent, or the round will eventually be skippable and an earlier round's proposal can be used as +//! a parent. If the timeout is long enough correct proposers' blocks will usually get finalized. +//! +//! For a proposal to be _accepted_, the parent proposal needs to also be accepted, and all rounds +//! between the parent and the current round must be skippable. This is what makes the protocol +//! _safe_: If two rounds are committed, their proposals must be ancestors of each other, +//! because they are not skippable. Thus no two conflicting blocks can become finalized. +//! +//! Of course there is also a first block: Whenever _all_ earlier rounds are skippable (in +//! particular in the first round) the leader may propose a block with no parent. +//! +//! +//! ## Syncing the State +//! +//! Every new signed message is optimistically sent directly to all peers. We want to guarantee that +//! it is eventually seen by all validators, even if they are not fully connected. This is +//! achieved via a pull-based randomized gossip mechanism: +//! +//! A `SyncRequest` message containing information about a random part of the local protocol state +//! is periodically sent to a random peer. The peer compares that to its local state, and responds +//! with all signed messages that it has and the other is missing. + +pub(crate) mod config; +#[cfg(test)] +mod des_testing; +mod fault; +mod message; +mod params; +mod participation; +mod proposal; +mod round; +#[cfg(test)] +mod tests; + +use std::{ + any::Any, + cmp::Reverse, + collections::{btree_map, BTreeMap, HashMap, HashSet}, + fmt::Debug, + iter, + path::PathBuf, +}; + +use datasize::DataSize; +use either::Either; +use itertools::Itertools; +use rand::{seq::IteratorRandom, Rng}; +use tracing::{debug, error, event, info, trace, warn, Level}; + +use casper_types::{Chainspec, TimeDiff, Timestamp, U512}; + +use crate::{ + components::consensus::{ + config::Config, + consensus_protocol::{ + BlockContext, ConsensusProtocol, FinalizedBlock, ProposedBlock, ProtocolOutcome, + ProtocolOutcomes, TerminalBlockData, + }, + era_supervisor::SerializedMessage, + protocols, + traits::{ConsensusValueT, Context}, + utils::{ + wal::{ReadWal, WalEntry, WriteWal}, + ValidatorIndex, ValidatorMap, Validators, Weight, + }, + ActionId, LeaderSequence, TimerId, + }, + types::NodeId, + utils, NodeRng, +}; +use fault::Fault; +use message::{Content, SignedMessage, SyncResponse}; +use params::Params; +use participation::{Participation, ParticipationStatus}; +use proposal::{HashedProposal, Proposal}; +use round::Round; +use serde::{Deserialize, Serialize}; + +pub(crate) use message::{Message, SyncRequest}; + +/// The timer for syncing with a random peer. +const TIMER_ID_SYNC_PEER: TimerId = TimerId(0); +/// The timer for calling `update`. +const TIMER_ID_UPDATE: TimerId = TimerId(1); +/// The timer for logging inactive validators. +const TIMER_ID_LOG_PARTICIPATION: TimerId = TimerId(2); + +/// The maximum number of future rounds we instantiate if we get messages from rounds that we +/// haven't started yet. +const MAX_FUTURE_ROUNDS: u32 = 7200; // Don't drop messages in 2-hour eras with 1-second rounds. + +/// Identifies a single [`Round`] in the protocol. +pub(crate) type RoundId = u32; + +type ProposalsAwaitingParent = HashSet<(RoundId, NodeId)>; +type ProposalsAwaitingValidation = HashSet<(RoundId, HashedProposal, NodeId)>; + +/// An entry in the Write-Ahead Log, storing a message we had added to our protocol state. +#[derive(Deserialize, Serialize, Debug, PartialEq)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) enum ZugWalEntry { + /// A signed echo or vote. + SignedMessage(SignedMessage), + /// A proposal. + Proposal(Proposal, RoundId), + /// Evidence of a validator double-signing. + Evidence(SignedMessage, Content, C::Signature), +} + +impl WalEntry for ZugWalEntry {} + +/// Contains the portion of the state required for an active validator to participate in the +/// protocol. +#[derive(DataSize)] +pub(crate) struct ActiveValidator +where + C: Context, +{ + idx: ValidatorIndex, + secret: C::ValidatorSecret, +} + +impl Debug for ActiveValidator { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter + .debug_struct("ActiveValidator") + .field("idx", &self.idx) + .field("secret", &"") + .finish() + } +} + +struct FaultySender(NodeId); + +/// Contains the state required for the protocol. +#[derive(Debug, DataSize)] +pub(crate) struct Zug +where + C: Context, +{ + /// Contains numerical parameters for the protocol + params: Params, + /// The timeout for the current round's proposal, in milliseconds + proposal_timeout_millis: f64, + /// The validators in this instantiation of the protocol + validators: Validators, + /// If we are a validator ourselves, we must know which index we + /// are in the [`Validators`] and have a private key for consensus. + active_validator: Option>, + /// When an era has already completed, sometimes we still need to keep + /// it around to provide evidence for equivocation in previous eras. + evidence_only: bool, + /// Proposals which have not yet had their parent accepted, by parent round ID. + proposals_waiting_for_parent: + HashMap, ProposalsAwaitingParent>>, + /// Incoming blocks we can't add yet because we are waiting for validation. + proposals_waiting_for_validation: HashMap, ProposalsAwaitingValidation>, + /// If we requested a new block from the block proposer component this contains the proposal's + /// round ID and the parent's round ID, if there is a parent. + pending_proposal: Option<(BlockContext, RoundId, Option)>, + leader_sequence: LeaderSequence, + /// The [`Round`]s of this protocol which we've instantiated. + rounds: BTreeMap>, + /// List of faulty validators and their type of fault. + faults: HashMap>, + /// The configuration for the protocol + config: config::Config, + /// This is a signed message for every validator we have received a signature from. + active: ValidatorMap>>, + /// The lowest round ID of a block that could still be finalized in the future. + first_non_finalized_round_id: RoundId, + /// The lowest round that needs to be considered in `upgrade`. + maybe_dirty_round_id: Option, + /// The lowest non-skippable round without an accepted value. + current_round: RoundId, + /// The time when the current round started. + current_round_start: Timestamp, + /// Whether anything was recently added to the protocol state. + progress_detected: bool, + /// Whether or not the protocol is currently paused + paused: bool, + /// The next update we have set a timer for. This helps deduplicate redundant calls to + /// `update`. + next_scheduled_update: Timestamp, + /// The write-ahead log to prevent honest nodes from double-signing upon restart. + write_wal: Option>>, + /// A map of random IDs -> tipmestamp of when it has been created, allowing to + /// verify that a response has been asked for. + sent_sync_requests: registered_sync::RegisteredSync, +} + +impl Zug { + fn new_with_params( + validators: Validators, + params: Params, + config: &config::Config, + prev_cp: Option<&dyn ConsensusProtocol>, + seed: u64, + ) -> Zug { + let weights = protocols::common::validator_weights::(&validators); + let active: ValidatorMap<_> = weights.iter().map(|_| None).collect(); + + // Use the estimate from the previous era as the proposal timeout. Start with one minimum + // timeout times the grace period factor: This is what we would settle on if proposals + // always got accepted exactly after one minimum timeout. + let proposal_timeout_millis = prev_cp + .and_then(|cp| cp.as_any().downcast_ref::>()) + .map(|zug| zug.proposal_timeout_millis) + .unwrap_or_else(|| { + config.proposal_timeout.millis() as f64 + * (config.proposal_grace_period as f64 / 100.0 + 1.0) + }); + + let mut can_propose: ValidatorMap = weights.iter().map(|_| true).collect(); + for vidx in validators.iter_cannot_propose_idx() { + can_propose[vidx] = false; + } + let faults: HashMap<_, _> = validators + .iter_banned_idx() + .map(|idx| (idx, Fault::Banned)) + .collect(); + + let leader_sequence = LeaderSequence::new(seed, &weights, can_propose); + + info!( + instance_id = %params.instance_id(), + era_start_time = %params.start_timestamp(), + %proposal_timeout_millis, + "initializing Zug instance", + ); + + Zug { + leader_sequence, + proposals_waiting_for_parent: HashMap::new(), + proposals_waiting_for_validation: HashMap::new(), + rounds: BTreeMap::new(), + first_non_finalized_round_id: 0, + maybe_dirty_round_id: None, + current_round: 0, + current_round_start: Timestamp::MAX, + evidence_only: false, + faults, + active, + config: config.clone(), + params, + proposal_timeout_millis, + validators, + active_validator: None, + pending_proposal: None, + progress_detected: false, + paused: false, + next_scheduled_update: Timestamp::MAX, + write_wal: None, + sent_sync_requests: Default::default(), + } + } + + /// Creates a new [`Zug`] instance. + #[allow(clippy::too_many_arguments)] + fn new( + instance_id: C::InstanceId, + validator_stakes: BTreeMap, + faulty: &HashSet, + inactive: &HashSet, + chainspec: &Chainspec, + config: &Config, + prev_cp: Option<&dyn ConsensusProtocol>, + era_start_time: Timestamp, + seed: u64, + ) -> Zug { + let validators = protocols::common::validators::(faulty, inactive, validator_stakes); + let core_config = &chainspec.core_config; + + let params = Params::new( + instance_id, + core_config.minimum_block_time, + era_start_time, + core_config.minimum_era_height, + era_start_time.saturating_add(core_config.era_duration), + protocols::common::ftt::(core_config.finality_threshold_fraction, &validators), + ); + + Zug::new_with_params(validators, params, &config.zug, prev_cp, seed) + } + + /// Creates a new boxed [`Zug`] instance. + #[allow(clippy::too_many_arguments)] + pub(crate) fn new_boxed( + instance_id: C::InstanceId, + validator_stakes: BTreeMap, + faulty: &HashSet, + inactive: &HashSet, + chainspec: &Chainspec, + config: &Config, + prev_cp: Option<&dyn ConsensusProtocol>, + era_start_time: Timestamp, + seed: u64, + now: Timestamp, + wal_file: PathBuf, + ) -> (Box>, ProtocolOutcomes) { + let mut zug = Self::new( + instance_id, + validator_stakes, + faulty, + inactive, + chainspec, + config, + prev_cp, + era_start_time, + seed, + ); + + let outcomes = zug.open_wal(wal_file, now); + + (Box::new(zug), outcomes) + } + + /// Returns our validator index (if we are an active validator). + fn our_idx(&self) -> Option { + self.active_validator.as_ref().map(|av| av.idx.0) + } + + /// Prints a log statement listing the inactive and faulty validators. + fn log_participation(&self) { + let mut inactive_w: u64 = 0; + let mut faulty_w: u64 = 0; + let total_w = self.validators.total_weight().0; + let mut inactive_validators = Vec::new(); + let mut faulty_validators = Vec::new(); + for (idx, v_id) in self.validators.enumerate_ids() { + if let Some(status) = ParticipationStatus::for_index(idx, self) { + match status { + ParticipationStatus::Equivocated + | ParticipationStatus::EquivocatedInOtherEra => { + faulty_w = faulty_w.saturating_add(self.validators.weight(idx).0); + faulty_validators.push((idx, v_id.clone(), status)); + } + ParticipationStatus::Inactive | ParticipationStatus::LastSeenInRound(_) => { + inactive_w = inactive_w.saturating_add(self.validators.weight(idx).0); + inactive_validators.push((idx, v_id.clone(), status)); + } + } + } + } + inactive_validators.sort_by_key(|(idx, _, status)| (Reverse(*status), *idx)); + faulty_validators.sort_by_key(|(idx, _, status)| (Reverse(*status), *idx)); + let inactive_w_100 = u128::from(inactive_w).saturating_mul(100); + let faulty_w_100 = u128::from(faulty_w).saturating_mul(100); + let participation = Participation:: { + instance_id: *self.instance_id(), + inactive_stake_percent: utils::div_round(inactive_w_100, u128::from(total_w)) as u8, + faulty_stake_percent: utils::div_round(faulty_w_100, u128::from(total_w)) as u8, + inactive_validators, + faulty_validators, + }; + info!( + our_idx = self.our_idx(), + ?participation, + "validator participation" + ); + } + + /// Returns whether the switch block has already been finalized. + fn finalized_switch_block(&self) -> bool { + if let Some(round_id) = self.first_non_finalized_round_id.checked_sub(1) { + self.accepted_switch_block(round_id) || self.accepted_dummy_proposal(round_id) + } else { + false + } + } + + /// Returns whether a block was accepted that, if finalized, would be the last one. + fn accepted_switch_block(&self, round_id: RoundId) -> bool { + match self.round(round_id).and_then(Round::accepted_proposal) { + None => false, + Some((height, proposal)) => { + proposal.maybe_block().is_some() // not a dummy proposal + && height.saturating_add(1) >= self.params.end_height() // reached era height + && proposal.timestamp() >= self.params.end_timestamp() // minimum era duration + } + } + } + + /// Returns whether a proposal without a block was accepted, i.e. whether some ancestor of the + /// accepted proposal is a switch block. + fn accepted_dummy_proposal(&self, round_id: RoundId) -> bool { + match self.round(round_id).and_then(Round::accepted_proposal) { + None => false, + Some((_, proposal)) => proposal.maybe_block().is_none(), + } + } + + /// Returns whether the validator has already sent an `Echo` in this round. + fn has_echoed(&self, round_id: RoundId, validator_idx: ValidatorIndex) -> bool { + self.round(round_id) + .is_some_and(|round| round.has_echoed(validator_idx)) + } + + /// Returns whether the validator has already cast a `true` or `false` vote. + fn has_voted(&self, round_id: RoundId, validator_idx: ValidatorIndex) -> bool { + self.round(round_id) + .is_some_and(|round| round.has_voted(validator_idx)) + } + + /// Request the latest state from a random peer. + fn handle_sync_peer_timer(&mut self, now: Timestamp, rng: &mut NodeRng) -> ProtocolOutcomes { + if self.evidence_only || self.finalized_switch_block() { + return vec![]; // Era has ended. No further progress is expected. + } + trace!( + our_idx = self.our_idx(), + instance_id = ?self.instance_id(), + "syncing with random peer", + ); + // Inform a peer about our protocol state and schedule the next request. + let first_validator_idx = ValidatorIndex(rng.gen_range(0..self.validators.len() as u32)); + let round_id = (self.first_non_finalized_round_id..=self.current_round) + .choose(rng) + .unwrap_or(self.current_round); + let payload = self.create_sync_request(rng, first_validator_idx, round_id); + let mut outcomes = vec![ProtocolOutcome::CreatedRequestToRandomPeer( + SerializedMessage::from_message(&payload), + )]; + // Periodically sync the state with a random peer. + if let Some(interval) = self.config.sync_state_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.saturating_add(interval), + TIMER_ID_SYNC_PEER, + )); + } + outcomes + } + + /// Prints a log message if the message is a proposal. + fn log_proposal(&self, proposal: &HashedProposal, round_id: RoundId, msg: &str) { + let creator_index = self.leader(round_id); + let creator = if let Some(creator) = self.validators.id(creator_index) { + creator + } else { + error!( + our_idx = self.our_idx(), + ?creator_index, + ?round_id, + "{}: invalid creator", + msg + ); + return; + }; + info!( + our_idx = self.our_idx(), + hash = %proposal.hash(), + %creator, + creator_index = creator_index.0, + round_id, + timestamp = %proposal.timestamp(), + "{}", msg, + ); + } + + /// Creates a `SyncRequest` message to inform a peer about our view of the given round, so that + /// the peer can send us any data we are missing. + /// + /// If there are more than 128 validators, the information only covers echoes and votes of + /// validators with index in `first_validator_idx..=(first_validator_idx + 127)`. + fn create_sync_request( + &mut self, + rng: &mut NodeRng, + first_validator_idx: ValidatorIndex, + round_id: RoundId, + ) -> SyncRequest { + let faulty = self.validator_bit_field(first_validator_idx, self.faults.keys().cloned()); + let active = self.validator_bit_field(first_validator_idx, self.active.keys_some()); + let round = match self.round(round_id) { + Some(round) => round, + None => { + return SyncRequest::new_empty_round( + round_id, + first_validator_idx, + faulty, + active, + *self.instance_id(), + self.sent_sync_requests.create_and_register_new_id(rng), + ); + } + }; + let true_votes = + self.validator_bit_field(first_validator_idx, round.votes(true).keys_some()); + let false_votes = + self.validator_bit_field(first_validator_idx, round.votes(false).keys_some()); + // We only request information about the proposal with the most echoes, by weight. + // TODO: If there's no quorum, should we prefer the one for which we have the leader's echo? + let proposal_hash = round.quorum_echoes().or_else(|| { + round + .echoes() + .iter() + .max_by_key(|(_, echo_map)| self.sum_weights(echo_map.keys())) + .map(|(hash, _)| *hash) + }); + let has_proposal = round.proposal().map(HashedProposal::hash) == proposal_hash.as_ref(); + let mut echoes = 0; + if let Some(echo_map) = proposal_hash.and_then(|hash| round.echoes().get(&hash)) { + echoes = self.validator_bit_field(first_validator_idx, echo_map.keys().cloned()); + } + + // We create a new ID that the responder will use to show it's allowed to do so: + let sync_id = self.sent_sync_requests.create_and_register_new_id(rng); + + SyncRequest { + round_id, + proposal_hash, + has_proposal, + first_validator_idx, + echoes, + true_votes, + false_votes, + active, + faulty, + instance_id: *self.instance_id(), + sync_id, + } + } + + /// Returns a bit field where each bit stands for a validator: the least significant one for + /// `first_idx` and the most significant one for `fist_idx + 127`, wrapping around at the total + /// number of validators. The bits of the validators in `index_iter` that fall into that + /// range are set to `1`, the others are `0`. + fn validator_bit_field( + &self, + ValidatorIndex(first_idx): ValidatorIndex, + index_iter: impl Iterator, + ) -> u128 { + let validator_count = self.validators.len() as u32; + if first_idx >= validator_count { + return 0; + } + let mut bit_field: u128 = 0; + for ValidatorIndex(v_idx) in index_iter { + // The validator's bit is v_idx - first_idx, but we wrap around. + let idx = match v_idx.overflowing_sub(first_idx) { + (idx, false) => idx, + // An underflow occurred. Add validator_count to wrap back around. + (idx, true) => idx.wrapping_add(validator_count), + }; + if idx < u128::BITS { + bit_field |= 1_u128.wrapping_shl(idx); // Set bit number i to 1. + } + } + bit_field + } + + /// Returns an iterator over all validator indexes whose bits in the `bit_field` are `1`, where + /// the least significant one stands for `first_idx` and the most significant one for + /// `first_idx + 127`, wrapping around. + fn iter_validator_bit_field( + &self, + ValidatorIndex(mut idx): ValidatorIndex, + mut bit_field: u128, + ) -> impl Iterator { + let validator_count = self.validators.len() as u32; + iter::from_fn(move || { + if bit_field == 0 || idx >= validator_count { + return None; // No remaining bits with value 1. + } + let zeros = bit_field.trailing_zeros(); + // The index of the validator whose bit is 1. We shift the bits to the right so that the + // least significant bit now corresponds to this one, then we output the index and set + // the bit to 0. + bit_field = bit_field.wrapping_shr(zeros); + bit_field &= !1; + idx = match idx.overflowing_add(zeros) { + (i, false) => i, + // If an overflow occurs, go back via an underflow, so the value modulo + // validator_count is correct again. + (i, true) => i + .checked_rem(validator_count)? + .wrapping_sub(validator_count), + } + .checked_rem(validator_count)?; + Some(ValidatorIndex(idx)) + }) + } + + /// Returns whether `v_idx` is covered by a validator index that starts at `first_idx`. + fn validator_bit_field_includes( + &self, + ValidatorIndex(first_idx): ValidatorIndex, + ValidatorIndex(v_idx): ValidatorIndex, + ) -> bool { + let validator_count = self.validators.len() as u32; + if first_idx >= validator_count { + return false; + } + let high_bit = u128::BITS.saturating_sub(1); + // The overflow bit is the 33rd bit of the actual sum. + let (last_idx, last_idx_overflow) = first_idx.overflowing_add(high_bit); + if v_idx >= first_idx { + // v_idx is at least first_idx, so it's in the range unless it's higher than the last + // index, taking into account its 33rd bit. + last_idx_overflow || v_idx <= last_idx + } else { + // v_idx is less than first_idx. But if going from the first to the last index we wrap + // around, we might still arrive at v_idx: + let (v_idx2, v_idx2_overflow) = v_idx.overflowing_add(validator_count); + if v_idx2_overflow == last_idx_overflow { + v_idx2 <= last_idx + } else { + last_idx_overflow + } + } + } + + /// Returns the leader in the specified round. + pub(crate) fn leader(&self, round_id: RoundId) -> ValidatorIndex { + if let Some(round) = self.round(round_id) { + return round.leader(); + } + self.leader_sequence.leader(u64::from(round_id)) + } + + fn create_message( + &mut self, + round_id: RoundId, + content: Content, + ) -> Option> { + let (validator_idx, secret_key) = if let Some(active_validator) = &self.active_validator { + (active_validator.idx, &active_validator.secret) + } else { + return None; + }; + if self.paused { + return None; + } + let already_signed = match &content { + Content::Echo(_) => self.has_echoed(round_id, validator_idx), + Content::Vote(_) => self.has_voted(round_id, validator_idx), + }; + if already_signed { + return None; + } + let signed_msg = SignedMessage::sign_new( + round_id, + *self.instance_id(), + content, + validator_idx, + secret_key, + ); + // We only return the new message if we are able to record it. If that fails we + // wouldn't know about our own message after a restart and risk double-signing. + if self.record_entry(&ZugWalEntry::SignedMessage(signed_msg.clone())) + && self.add_content(signed_msg.clone()) + { + Some(signed_msg) + } else { + debug!( + our_idx = self.our_idx(), + %round_id, + ?content, + "couldn't record a signed message in the WAL or add it to the protocol state" + ); + None + } + } + + /// If we are an active validator and it would be safe for us to sign this message and we + /// haven't signed it before, we sign it, add it to our state and gossip it to the network. + /// + /// Does not call `update`! + fn create_and_gossip_message( + &mut self, + round_id: RoundId, + content: Content, + ) -> ProtocolOutcomes { + let maybe_signed_msg = self.create_message(round_id, content); + maybe_signed_msg + .into_iter() + .map(|signed_msg| { + let message = Message::Signed(signed_msg); + ProtocolOutcome::CreatedGossipMessage(SerializedMessage::from_message(&message)) + }) + .collect() + } + + /// When we receive evidence for a fault, we must notify the rest of the network of this + /// evidence. Beyond that, we can remove all of the faulty validator's previous information + /// from the protocol state. + fn handle_fault( + &mut self, + signed_msg: SignedMessage, + validator_id: C::ValidatorId, + content2: Content, + signature2: C::Signature, + now: Timestamp, + ) -> ProtocolOutcomes { + self.record_entry(&ZugWalEntry::Evidence( + signed_msg.clone(), + content2, + signature2, + )); + self.handle_fault_no_wal(signed_msg, validator_id, content2, signature2, now) + } + + /// Internal to handle_fault, documentation from that applies + fn handle_fault_no_wal( + &mut self, + signed_msg: SignedMessage, + validator_id: C::ValidatorId, + content2: Content, + signature2: C::Signature, + now: Timestamp, + ) -> ProtocolOutcomes { + let validator_idx = signed_msg.validator_idx; + warn!( + our_idx = self.our_idx(), + ?signed_msg, + ?content2, + id = %validator_id, + "validator double-signed" + ); + let fault = Fault::Direct(signed_msg, content2, signature2); + self.faults.insert(validator_idx, fault); + if Some(validator_idx) == self.active_validator.as_ref().map(|av| av.idx) { + error!(our_idx = validator_idx.0, "we are faulty; deactivating"); + self.active_validator = None; + } + self.active[validator_idx] = None; + self.progress_detected = true; + let mut outcomes = vec![ProtocolOutcome::NewEvidence(validator_id)]; + if self.faulty_weight() > self.params.ftt() { + outcomes.push(ProtocolOutcome::FttExceeded); + return outcomes; + } + + // Remove all votes and echoes from the faulty validator: They count towards every quorum + // now so nobody has to store their messages. + for round in self.rounds.values_mut() { + round.remove_votes_and_echoes(validator_idx); + } + + // Recompute quorums; if any new quorums are found, call `update`. + for round_id in + self.first_non_finalized_round_id..=self.rounds.keys().last().copied().unwrap_or(0) + { + if !self.rounds.contains_key(&round_id) { + continue; + } + if self.rounds[&round_id].quorum_echoes().is_none() { + let hashes = self.rounds[&round_id] + .echoes() + .keys() + .copied() + .collect_vec(); + if hashes + .into_iter() + .any(|hash| self.check_new_echo_quorum(round_id, hash)) + { + self.mark_dirty(round_id); + } + } + if self.check_new_vote_quorum(round_id, true) + || self.check_new_vote_quorum(round_id, false) + { + self.mark_dirty(round_id); + } + } + debug!(round_id = ?self.current_round, "Calling update after handle_fault_no_wal"); + outcomes.extend(self.update(now)); + outcomes + } + + /// When we receive a request to synchronize, we must take a careful diff of our state and the + /// state in the sync state to ensure we send them exactly what they need to get back up to + /// speed in the network. + fn handle_sync_request( + &self, + sync_request: SyncRequest, + sender: NodeId, + ) -> (ProtocolOutcomes, Option) { + let SyncRequest { + round_id, + mut proposal_hash, + mut has_proposal, + first_validator_idx, + mut echoes, + true_votes, + false_votes, + active, + faulty, + instance_id, + sync_id, + } = sync_request; + if first_validator_idx.0 >= self.validators.len() as u32 { + info!( + our_idx = self.our_idx(), + first_validator_idx = first_validator_idx.0, + %sender, + "invalid SyncRequest message" + ); + return (vec![ProtocolOutcome::Disconnect(sender)], None); + } + + // If we don't have that round we have no information the requester is missing. + let round = match self.round(round_id) { + Some(round) => round, + None => return (vec![], None), + }; + + // If the peer has no or a wrong proposal we assume they don't have any echoes for the + // correct one. We don't send them the right proposal, though: they might already have it. + if round.quorum_echoes() != proposal_hash && round.quorum_echoes().is_some() { + has_proposal = true; + echoes = 0; + proposal_hash = round.quorum_echoes(); + } + + // The bit field of validators we know to be faulty. + let our_faulty = self.validator_bit_field(first_validator_idx, self.faults.keys().cloned()); + // The echo signatures and proposal/hash we will send in the response. + let mut proposal_or_hash = None; + let mut echo_sigs = BTreeMap::new(); + // The bit field of validators we have echoes from in this round. + let mut our_echoes: u128 = 0; + + if let Some(hash) = proposal_hash { + if let Some(echo_map) = round.echoes().get(&hash) { + // Send them echoes they are missing, but exclude faulty validators. + our_echoes = + self.validator_bit_field(first_validator_idx, echo_map.keys().cloned()); + let missing_echoes = our_echoes & !(echoes | faulty | our_faulty); + for v_idx in self.iter_validator_bit_field(first_validator_idx, missing_echoes) { + echo_sigs.insert(v_idx, echo_map[&v_idx]); + } + if has_proposal { + proposal_or_hash = Some(Either::Right(hash)); + } else { + // If they don't have the proposal make sure we include the leader's echo. + let leader_idx = round.leader(); + if !self.validator_bit_field_includes(first_validator_idx, leader_idx) { + if let Some(signature) = echo_map.get(&leader_idx) { + echo_sigs.insert(leader_idx, *signature); + } + } + if let Some(proposal) = round.proposal() { + if *proposal.hash() == hash { + proposal_or_hash = Some(Either::Left(proposal.inner().clone())); + } + } + } + } + } + + // Send them votes they are missing, but exclude faulty validators. If there already is a + // quorum omit the votes that go against the quorum, since they are irrelevant. + let our_true_votes: u128 = if round.quorum_votes() == Some(false) { + 0 + } else { + self.validator_bit_field(first_validator_idx, round.votes(true).keys_some()) + }; + let missing_true_votes = our_true_votes & !(true_votes | faulty | our_faulty); + let true_vote_sigs = self + .iter_validator_bit_field(first_validator_idx, missing_true_votes) + .map(|v_idx| (v_idx, round.votes(true)[v_idx].unwrap())) + .collect(); + let our_false_votes: u128 = if round.quorum_votes() == Some(true) { + 0 + } else { + self.validator_bit_field(first_validator_idx, round.votes(false).keys_some()) + }; + let missing_false_votes = our_false_votes & !(false_votes | faulty | our_faulty); + let false_vote_sigs = self + .iter_validator_bit_field(first_validator_idx, missing_false_votes) + .map(|v_idx| (v_idx, round.votes(false)[v_idx].unwrap())) + .collect(); + + let mut outcomes = vec![]; + + // Add evidence for validators they don't know are faulty. + let missing_faulty = our_faulty & !faulty; + let mut evidence = vec![]; + for v_idx in self.iter_validator_bit_field(first_validator_idx, missing_faulty) { + match &self.faults[&v_idx] { + Fault::Banned => { + info!( + our_idx = self.our_idx(), + validator_index = v_idx.0, + %sender, + "peer disagrees about banned validator; disconnecting" + ); + return (vec![ProtocolOutcome::Disconnect(sender)], None); + } + Fault::Direct(signed_msg, content2, signature2) => { + evidence.push((signed_msg.clone(), *content2, *signature2)); + } + Fault::Indirect => { + let vid = self.validators.id(v_idx).unwrap().clone(); + outcomes.push(ProtocolOutcome::SendEvidence(sender, vid)); + } + } + } + + // Send any signed messages that prove a validator is not completely inactive. We only + // need to do this for validators that the requester doesn't know are active, and that + // we haven't already included any signature from in our votes, echoes or evidence. + let our_active = self.validator_bit_field(first_validator_idx, self.active.keys_some()); + let missing_active = + our_active & !(active | our_echoes | our_true_votes | our_false_votes | our_faulty); + let signed_messages = self + .iter_validator_bit_field(first_validator_idx, missing_active) + .filter_map(|v_idx| self.active[v_idx].clone()) + .collect(); + + // Send the serialized sync response to the requester + let sync_response = SyncResponse { + round_id, + proposal_or_hash, + echo_sigs, + true_vote_sigs, + false_vote_sigs, + signed_messages, + evidence, + instance_id, + sync_id, + }; + ( + outcomes, + Some(SerializedMessage::from_message(&Message::SyncResponse( + sync_response, + ))), + ) + } + + /// The response containing the parts from the sender's protocol state that we were missing. + fn handle_sync_response( + &mut self, + sync_response: SyncResponse, + sender: NodeId, + now: Timestamp, + ) -> ProtocolOutcomes { + let SyncResponse { + round_id, + proposal_or_hash, + echo_sigs, + true_vote_sigs, + false_vote_sigs, + signed_messages, + evidence, + instance_id, + sync_id, + } = sync_response; + + // We have not asked for any sync response: + if self.sent_sync_requests.try_remove_id(sync_id).is_none() { + debug!( + ?round_id, + ?sync_id, + "Disconnecting from peer due to unwanted sync response" + ); + return vec![ProtocolOutcome::Disconnect(sender)]; + } + + // `echo_sigs`, `true_vote_sigs` and `false_vote_sigs` ought not to have more items than the + // amount of validators. In such a case, the sender is malicious. + if echo_sigs + .len() + .max(true_vote_sigs.len()) + .max(false_vote_sigs.len()) + > self.validators.len() + { + debug!( + ?round_id, + ?sync_id, + "Disconnecting from peer due to mismatching echos number" + ); + return vec![ProtocolOutcome::Disconnect(sender)]; + } + + let local_round_id = self.current_round; + let (proposal_hash, proposal) = match proposal_or_hash { + Some(Either::Left(proposal)) => { + let hashed_prop = HashedProposal::new(proposal); + let hash = hashed_prop.hash(); + debug!(?hash, ?round_id, ?local_round_id, "Got proposal from peer"); + (Some(*hash), Some(hashed_prop.into_inner())) + } + Some(Either::Right(hash)) => { + debug!( + ?hash, + ?round_id, + ?local_round_id, + "Got proposal hash from peer" + ); + (Some(hash), None) + } + None => { + debug!( + ?round_id, + ?local_round_id, + "Got no proposal or hash from peer" + ); + (None, None) + } + }; + + // `signed_messages` is now the previous `signed_messages` + all the messages from + // `echo_sigs`, `true_vote_sigs` and `false_vote_sigs`: + let signed_messages = { + let echo_sigs = proposal_hash + .map(move |hash| { + echo_sigs + .into_iter() + .map(move |(validator_idx, signature)| { + (validator_idx, Content::Echo(hash), signature) + }) + }) + .into_iter() + .flatten(); + let true_vote_sigs = true_vote_sigs + .into_iter() + .map(|(validator_idx, signature)| (validator_idx, Content::Vote(true), signature)); + let false_vote_sigs = false_vote_sigs + .into_iter() + .map(|(validator_idx, signature)| (validator_idx, Content::Vote(false), signature)); + + let sigs = echo_sigs.chain(true_vote_sigs).chain(false_vote_sigs).map( + |(validator_idx, content, signature)| SignedMessage { + round_id, + instance_id, + content, + validator_idx, + signature, + }, + ); + + signed_messages.into_iter().chain(sigs) + }; + + let handle_outcomes = move || -> Result<_, FaultySender> { + let mut outcomes = vec![]; + for signed_msg in signed_messages { + outcomes.extend(self.handle_signed_message(signed_msg, sender, now)?); + } + for (signed_msg, content2, signature2) in evidence { + outcomes + .extend(self.handle_evidence(signed_msg, content2, signature2, sender, now)?); + } + if let Some(proposal) = proposal { + outcomes.extend(self.handle_proposal(round_id, proposal, sender, now)?); + } + Ok(outcomes) + }; + + outcomes_or_disconnect(handle_outcomes()) + } + + /// The main entry point for signed echoes or votes. This function mostly authenticates + /// and authorizes the message, passing it to [`add_content`] if it passes snuff for the + /// main protocol logic. + fn handle_signed_message( + &mut self, + signed_msg: SignedMessage, + sender: NodeId, + now: Timestamp, + ) -> Result, FaultySender> { + let our_idx = self.our_idx(); + let validator_idx = signed_msg.validator_idx; + let validator_id = if let Some(validator_id) = self.validators.id(validator_idx) { + validator_id.clone() + } else { + warn!( + our_idx, + ?signed_msg, + %sender, + "invalid incoming message: validator index out of range", + ); + return Err(FaultySender(sender)); + }; + + if self.faults.contains_key(&validator_idx) { + debug!( + our_idx, + ?validator_id, + "ignoring message from faulty validator" + ); + return Ok(vec![]); + } + + if signed_msg.round_id > self.current_round.saturating_add(MAX_FUTURE_ROUNDS) { + debug!(our_idx, ?signed_msg, "dropping message from future round"); + return Ok(vec![]); + } + + if self.evidence_only { + debug!(our_idx, ?signed_msg, "received an irrelevant message"); + return Ok(vec![]); + } + + if let Some(round) = self.round(signed_msg.round_id) { + if round.contains(&signed_msg.content, validator_idx) { + debug!(our_idx, ?signed_msg, %sender, "received a duplicated message"); + return Ok(vec![]); + } + } + + if !signed_msg.verify_signature(&validator_id) { + warn!(our_idx, ?signed_msg, %sender, "invalid signature",); + return Err(FaultySender(sender)); + } + + if let Some((content2, signature2)) = self.detect_fault(&signed_msg) { + let evidence_msg = Message::Evidence(signed_msg.clone(), content2, signature2); + let mut outcomes = + self.handle_fault(signed_msg, validator_id, content2, signature2, now); + outcomes.push(ProtocolOutcome::CreatedGossipMessage( + SerializedMessage::from_message(&evidence_msg), + )); + return Ok(outcomes); + } + + if self.faults.contains_key(&signed_msg.validator_idx) { + debug!( + our_idx, + ?signed_msg, + "dropping message from faulty validator" + ); + return Ok(vec![]); + } + + self.record_entry(&ZugWalEntry::SignedMessage(signed_msg.clone())); + if self.add_content(signed_msg) { + debug!(round_id = ?self.current_round, "Calling update after add_content"); + Ok(self.update(now)) + } else { + Ok(vec![]) + } + } + + /// Verifies an evidence message that is supposed to contain two conflicting sigantures by the + /// same validator, and then calls `handle_fault`. + fn handle_evidence( + &mut self, + signed_msg: SignedMessage, + content2: Content, + signature2: C::Signature, + sender: NodeId, + now: Timestamp, + ) -> Result, FaultySender> { + let our_idx = self.our_idx(); + let validator_idx = signed_msg.validator_idx; + if let Some(Fault::Direct(..)) = self.faults.get(&validator_idx) { + return Ok(vec![]); // Validator is already known to be faulty. + } + let validator_id = if let Some(validator_id) = self.validators.id(validator_idx) { + validator_id.clone() + } else { + warn!( + our_idx, + ?signed_msg, + %sender, + "invalid incoming evidence: validator index out of range", + ); + return Err(FaultySender(sender)); + }; + if !signed_msg.content.contradicts(&content2) { + warn!( + our_idx, + ?signed_msg, + ?content2, + %sender, + "invalid evidence: contents don't conflict", + ); + return Err(FaultySender(sender)); + } + if !signed_msg.verify_signature(&validator_id) + || !signed_msg + .with(content2, signature2) + .verify_signature(&validator_id) + { + warn!( + our_idx, + ?signed_msg, + ?content2, + %sender, + "invalid signature in evidence", + ); + return Err(FaultySender(sender)); + } + Ok(self.handle_fault(signed_msg, validator_id, content2, signature2, now)) + } + + /// Checks whether an incoming proposal should be added to the protocol state and starts + /// validation. + fn handle_proposal( + &mut self, + round_id: RoundId, + proposal: Proposal, + sender: NodeId, + now: Timestamp, + ) -> Result, FaultySender> { + let leader_idx = self.leader(round_id); + let our_idx = self.our_idx(); + + macro_rules! log_proposal { + ($lvl:expr, $prop:expr, $msg:expr $(,)?) => { + event!( + $lvl, + our_idx, + round_id, + parent = $prop.maybe_parent_round_id, + timestamp = %$prop.timestamp, + leader_idx = leader_idx.0, + ?sender, + "{}", + $msg + ); + } + } + + if let Some(parent_round_id) = proposal.maybe_parent_round_id { + if parent_round_id >= round_id { + log_proposal!( + Level::WARN, + proposal, + "invalid proposal: parent is not from an earlier round", + ); + return Err(FaultySender(sender)); + } + } + + if proposal.timestamp > now.saturating_add(self.config.clock_tolerance) { + log_proposal!( + Level::TRACE, + proposal, + "received a proposal with a timestamp far in the future; dropping", + ); + return Ok(vec![]); + } + if proposal.timestamp > now { + log_proposal!( + Level::TRACE, + proposal, + "received a proposal with a timestamp slightly in the future", + ); + } + if (proposal.maybe_parent_round_id.is_none() || proposal.maybe_block.is_none()) + != proposal.inactive.is_none() + { + log_proposal!( + Level::WARN, + proposal, + "invalid proposal: inactive must be present in all except the first and dummy proposals", + ); + return Err(FaultySender(sender)); + } + if let Some(inactive) = &proposal.inactive { + if inactive + .iter() + .any(|idx| *idx == leader_idx || self.validators.id(*idx).is_none()) + { + log_proposal!( + Level::WARN, + proposal, + "invalid proposal: invalid inactive validator index", + ); + return Err(FaultySender(sender)); + } + } + + let hashed_prop = HashedProposal::new(proposal); + + if self + .round(round_id) + .is_none_or(|round| !round.has_echoes_for_proposal(hashed_prop.hash())) + { + log_proposal!( + Level::DEBUG, + hashed_prop.inner(), + "dropping proposal: missing echoes" + ); + return Ok(vec![]); + } + + if self.round(round_id).and_then(Round::proposal) == Some(&hashed_prop) { + log_proposal!( + Level::DEBUG, + hashed_prop.inner(), + "dropping proposal: we already have it" + ); + return Ok(vec![]); + } + + let ancestor_values = if let Some(parent_round_id) = hashed_prop.maybe_parent_round_id() { + if let Some(ancestor_values) = self.ancestor_values(parent_round_id) { + ancestor_values + } else { + log_proposal!( + Level::DEBUG, + hashed_prop.inner(), + "storing proposal for later; still missing ancestors", + ); + self.proposals_waiting_for_parent + .entry(parent_round_id) + .or_default() + .entry(hashed_prop) + .or_default() + .insert((round_id, sender)); + return Ok(vec![]); + } + } else { + vec![] + }; + + let mut outcomes = self.validate_proposal(round_id, hashed_prop, ancestor_values, sender); + debug!(round_id = ?self.current_round, "Calling update after handle_proposal"); + outcomes.extend(self.update(now)); + Ok(outcomes) + } + + /// Updates the round's outcome and returns `true` if there is a new quorum of echoes for the + /// given hash. + fn check_new_echo_quorum(&mut self, round_id: RoundId, hash: C::Hash) -> bool { + if self.rounds.contains_key(&round_id) + && self.rounds[&round_id].quorum_echoes().is_none() + && self.is_quorum(self.rounds[&round_id].echoes()[&hash].keys().copied()) + { + self.round_mut(round_id).set_quorum_echoes(hash); + return true; + } + false + } + + /// Updates the round's outcome and returns `true` if there is a new quorum of votes with the + /// given value. + fn check_new_vote_quorum(&mut self, round_id: RoundId, vote: bool) -> bool { + if self.rounds.contains_key(&round_id) + && self.rounds[&round_id].quorum_votes().is_none() + && self.is_quorum(self.rounds[&round_id].votes(vote).keys_some()) + { + self.round_mut(round_id).set_quorum_votes(vote); + let our_idx = self.our_idx(); + if !vote { + info!(our_idx, %round_id, "round is now skippable"); + } else if self.rounds[&round_id].accepted_proposal().is_none() { + info!(our_idx, %round_id, "round committed; no accepted proposal yet"); + } + return true; + } + false + } + + /// Adds a signed message to the WAL such that we can avoid double signing upon recovery if the + /// node shuts down. Returns `true` if the message was added successfully. + fn record_entry(&mut self, entry: &ZugWalEntry) -> bool { + match self.write_wal.as_mut().map(|ww| ww.record_entry(entry)) { + None => false, + Some(Ok(())) => true, + Some(Err(err)) => { + self.active_validator = None; + self.write_wal = None; + error!( + our_idx = self.our_idx(), + %err, + "could not record a signed message to the WAL; deactivating" + ); + false + } + } + } + + /// Consumes all of the signed messages we've previously recorded in our write ahead log, and + /// sets up the log for appending future messages. If it fails it prints an error log and + /// the WAL remains `None`: That way we can still observe the protocol but not participate as + /// a validator. + pub(crate) fn open_wal(&mut self, wal_file: PathBuf, now: Timestamp) -> ProtocolOutcomes { + let our_idx = self.our_idx(); + // Open the file for reading. + let mut read_wal = match ReadWal::>::new(&wal_file) { + Ok(read_wal) => read_wal, + Err(err) => { + error!(our_idx, %err, "could not create a ReadWal using this file"); + return vec![]; + } + }; + + let mut outcomes = vec![]; + + // Read all messages recorded in the file. + loop { + match read_wal.read_next_entry() { + Ok(Some(next_entry)) => match next_entry { + ZugWalEntry::SignedMessage(next_message) => { + if !self.add_content(next_message) { + error!(our_idx, "Could not add content from WAL."); + return outcomes; + } + } + ZugWalEntry::Proposal(next_proposal, corresponding_round_id) => { + if self + .round(corresponding_round_id) + .and_then(Round::proposal) + .map(HashedProposal::inner) + == Some(&next_proposal) + { + warn!(our_idx, "Proposal from WAL is duplicated."); + continue; + } + let mut ancestor_values = vec![]; + if let Some(mut round_id) = next_proposal.maybe_parent_round_id { + loop { + let proposal = if let Some(proposal) = + self.round(round_id).and_then(Round::proposal) + { + proposal + } else { + error!(our_idx, "Proposal from WAL is missing ancestors."); + return outcomes; + }; + if self.round(round_id).and_then(Round::quorum_echoes) + != Some(*proposal.hash()) + { + error!(our_idx, "Proposal from WAL has unaccepted ancestor."); + return outcomes; + } + ancestor_values.extend(proposal.maybe_block().cloned()); + match proposal.maybe_parent_round_id() { + None => break, + Some(parent_round_id) => round_id = parent_round_id, + } + } + } + if self + .round_mut(corresponding_round_id) + .insert_proposal(HashedProposal::new(next_proposal.clone())) + { + self.mark_dirty(corresponding_round_id); + if let Some(block) = next_proposal.maybe_block { + let block_context = + BlockContext::new(next_proposal.timestamp, ancestor_values); + let proposed_block = ProposedBlock::new(block, block_context); + outcomes + .push(ProtocolOutcome::HandledProposedBlock(proposed_block)); + } + } + } + ZugWalEntry::Evidence( + conflicting_message, + conflicting_message_content, + conflicting_signature, + ) => { + let validator_id = { + if let Some(validator_id) = + self.validators.id(conflicting_message.validator_idx) + { + validator_id.clone() + } else { + warn!( + our_idx, + index = conflicting_message.validator_idx.0, + "No validator present at this index, despite holding \ + conflicting messages for it in the WAL" + ); + continue; + } + }; + let new_outcomes = self.handle_fault_no_wal( + conflicting_message, + validator_id, + conflicting_message_content, + conflicting_signature, + now, + ); + // Ignore most outcomes: These have been processed before the restart. + outcomes.extend(new_outcomes.into_iter().filter(|outcome| match outcome { + ProtocolOutcome::FttExceeded + | ProtocolOutcome::WeAreFaulty + | ProtocolOutcome::FinalizedBlock(_) + | ProtocolOutcome::ValidateConsensusValue { .. } + | ProtocolOutcome::HandledProposedBlock(..) + | ProtocolOutcome::NewEvidence(_) => true, + ProtocolOutcome::SendEvidence(_, _) + | ProtocolOutcome::CreatedGossipMessage(_) + | ProtocolOutcome::CreatedTargetedMessage(_, _) + | ProtocolOutcome::CreatedMessageToRandomPeer(_) + | ProtocolOutcome::CreatedRequestToRandomPeer(_) + | ProtocolOutcome::ScheduleTimer(_, _) + | ProtocolOutcome::QueueAction(_) + | ProtocolOutcome::CreateNewBlock(_, _) + | ProtocolOutcome::DoppelgangerDetected + | ProtocolOutcome::Disconnect(_) => false, + })); + } + }, + Ok(None) => { + break; + } + Err(err) => { + error!( + our_idx, + ?err, + "couldn't read a message from the WAL: was this node recently shut down?" + ); + return outcomes; // Not setting WAL file; won't actively participate. + } + } + } + + // Open the file for appending. + match WriteWal::new(&wal_file) { + Ok(write_wal) => self.write_wal = Some(write_wal), + Err(err) => error!( + our_idx, + ?err, + ?wal_file, + "could not create a WAL using this file" + ), + } + outcomes + } + + /// Adds a signed message content to the state. + /// Does not call `update` and does not detect faults. + fn add_content(&mut self, signed_msg: SignedMessage) -> bool { + if self.active[signed_msg.validator_idx] + .as_ref() + .is_none_or(|old_msg| old_msg.round_id < signed_msg.round_id) + { + if self.active[signed_msg.validator_idx].is_none() { + // We considered this validator inactive until now, and didn't accept proposals that + // didn't have them in the `inactive` field. Mark all relevant rounds as dirty so + // that the next `update` call checks all proposals again. + self.mark_dirty(self.first_non_finalized_round_id); + } + // Save the latest signed message for participation tracking purposes. + self.active[signed_msg.validator_idx] = Some(signed_msg.clone()); + } + let SignedMessage { + round_id, + instance_id: _, + content, + validator_idx, + signature, + } = signed_msg; + let our_idx = self.our_idx(); + match content { + Content::Echo(hash) => { + if self + .round_mut(round_id) + .insert_echo(hash, validator_idx, signature) + { + debug!(our_idx, round_id, %hash, validator = validator_idx.0, "inserted echo"); + self.progress_detected = true; + if self.check_new_echo_quorum(round_id, hash) { + self.mark_dirty(round_id); + } + return true; + } + } + Content::Vote(vote) => { + if self + .round_mut(round_id) + .insert_vote(vote, validator_idx, signature) + { + debug!( + our_idx, + round_id, + vote, + validator = validator_idx.0, + "inserted vote" + ); + self.progress_detected = true; + if self.check_new_vote_quorum(round_id, vote) { + self.mark_dirty(round_id); + } + return true; + } + } + } + false + } + + /// If there is a signature for conflicting content, returns the content and signature. + fn detect_fault(&self, signed_msg: &SignedMessage) -> Option<(Content, C::Signature)> { + let round = self.round(signed_msg.round_id)?; + match &signed_msg.content { + Content::Echo(hash) => round.echoes().iter().find_map(|(hash2, echo_map)| { + if hash2 == hash { + return None; + } + echo_map + .get(&signed_msg.validator_idx) + .map(|sig| (Content::Echo(*hash2), *sig)) + }), + Content::Vote(vote) => { + round.votes(!vote)[signed_msg.validator_idx].map(|sig| (Content::Vote(!vote), sig)) + } + } + } + + /// Sets an update timer for the given timestamp, unless an earlier timer is already set. + fn schedule_update(&mut self, timestamp: Timestamp) -> ProtocolOutcomes { + debug!(our_idx = self.our_idx(), %timestamp, "schedule update"); + if self.next_scheduled_update > timestamp { + self.next_scheduled_update = timestamp; + vec![ProtocolOutcome::ScheduleTimer(timestamp, TIMER_ID_UPDATE)] + } else { + vec![] + } + } + + /// Updates the state and sends appropriate messages after a signature has been added to a + /// round. + fn update(&mut self, now: Timestamp) -> ProtocolOutcomes { + let mut outcomes = vec![]; + if self.finalized_switch_block() || self.faulty_weight() > self.params.ftt() { + return outcomes; // This era has ended or the FTT was exceeded. + } + if let Some(dirty_round_id) = self.maybe_dirty_round_id { + for round_id in dirty_round_id.. { + outcomes.extend(self.update_round(round_id, now)); + if round_id >= self.current_round { + break; + } + } + } + self.maybe_dirty_round_id = None; + outcomes + } + + /// Updates a round and sends appropriate messages. + fn update_round(&mut self, round_id: RoundId, now: Timestamp) -> ProtocolOutcomes { + self.create_round(round_id); + let mut outcomes = vec![]; + let mut voted_on_round_outcome = false; + + // If we have a proposal, echo it. + if let Some(&hash) = self.rounds[&round_id].proposal().map(HashedProposal::hash) { + outcomes.extend(self.create_and_gossip_message(round_id, Content::Echo(hash))); + } + + // Update the round outcome if there is a new accepted proposal. + if self.update_accepted_proposal(round_id) { + if round_id == self.current_round { + self.update_proposal_timeout(now); + } + // Vote for finalizing this proposal. + outcomes.extend(self.create_and_gossip_message(round_id, Content::Vote(true))); + voted_on_round_outcome = true; + // Proposed descendants of this proposal can now be validated. + if let Some(proposals) = self.proposals_waiting_for_parent.remove(&round_id) { + let ancestor_values = self + .ancestor_values(round_id) + .expect("missing ancestors of accepted proposal"); + for (proposal, rounds_and_senders) in proposals { + for (proposal_round_id, sender) in rounds_and_senders { + outcomes.extend(self.validate_proposal( + proposal_round_id, + proposal.clone(), + ancestor_values.clone(), + sender, + )); + } + } + } + } + + if round_id == self.current_round { + let our_idx = self.our_idx(); + let current_round_start = self.current_round_start; + let current_timeout = current_round_start.saturating_add(self.proposal_timeout()); + if now >= current_timeout { + debug!(?round_id, "Voting false due to timeout"); + let msg_outcomes = self.create_and_gossip_message(round_id, Content::Vote(false)); + voted_on_round_outcome = true; + // Only update the proposal timeout if this is the first time we timed out in this + // round + if !msg_outcomes.is_empty() { + self.update_proposal_timeout(now); + } + outcomes.extend(msg_outcomes); + } else if self.faults.contains_key(&self.leader(round_id)) { + debug!(?round_id, "Voting false due to faults"); + outcomes.extend(self.create_and_gossip_message(round_id, Content::Vote(false))); + voted_on_round_outcome = true; + } + if self.is_skippable_round(round_id) || self.has_accepted_proposal(round_id) { + self.current_round_start = Timestamp::MAX; + self.current_round = self.current_round.saturating_add(1); + info!( + our_idx, + round_id = self.current_round, + leader = self.leader(self.current_round).0, + "started a new round" + ); + } else if let Some((maybe_parent_round_id, timestamp)) = self.suitable_parent_round(now) + { + if now < timestamp { + // The first opportunity to make a proposal is in the future; check again at + // that time. + debug!(our_idx, %now, %timestamp, "update_round - schedule update 1"); + outcomes.extend(self.schedule_update(timestamp)); + } else if self.current_round_start > now { + // A proposal could be made now. Start the timer and propose if leader. + self.current_round_start = now; + outcomes.extend(self.propose_if_leader(maybe_parent_round_id, now)); + let current_timeout = self + .current_round_start + .saturating_add(self.proposal_timeout()); + if current_timeout > now { + debug!(our_idx, %now, %current_timeout, "update_round - schedule update 2"); + outcomes.extend(self.schedule_update(current_timeout)); + } + } else if !voted_on_round_outcome { + // If we weren't able to come to a voting conclusion we need to reschedule + // the check in future. + debug!(round_id, "Scheduling proposal recheck"); + let updated_timestamp = now.saturating_add(self.proposal_timeout()); + outcomes.extend(self.schedule_update(updated_timestamp)); + } + } else { + error!(our_idx, "No suitable parent for current round"); + } + } + + // If the round has an accepted proposal and is committed, it is finalized. + if self.has_accepted_proposal(round_id) && self.is_committed_round(round_id) { + outcomes.extend(self.finalize_round(round_id)); + } + outcomes + } + + /// If a new proposal is accepted in that round, adds it to the round outcome and returns + /// `true`. + fn update_accepted_proposal(&mut self, round_id: RoundId) -> bool { + if self.has_accepted_proposal(round_id) { + return false; // We already have an accepted proposal. + } + let proposal = if let Some(proposal) = self.round(round_id).and_then(Round::proposal) { + proposal + } else { + return false; // We don't have a proposal. + }; + if self.round(round_id).and_then(Round::quorum_echoes) != Some(*proposal.hash()) { + return false; // We don't have a quorum of echoes. + } + if let Some(inactive) = proposal.inactive() { + for (idx, _) in self.validators.enumerate_ids() { + if !inactive.contains(&idx) + && self.active[idx].is_none() + && !self.faults.contains_key(&idx) + { + // The proposal claims validator idx is active but we haven't seen anything from + // them yet. + return false; + } + } + } + let (first_skipped_round_id, rel_height) = + if let Some(parent_round_id) = proposal.maybe_parent_round_id() { + if let Some((parent_height, _)) = self + .round(parent_round_id) + .and_then(Round::accepted_proposal) + { + ( + parent_round_id.saturating_add(1), + parent_height.saturating_add(1), + ) + } else { + return false; // Parent is not accepted yet. + } + } else { + (0, 0) + }; + if (first_skipped_round_id..round_id) + .any(|skipped_round_id| !self.is_skippable_round(skipped_round_id)) + { + return false; // A skipped round is not skippable yet. + } + + // We have a proposal with accepted parent, a quorum of echoes, and all rounds since the + // parent are skippable. That means the proposal is now accepted. + self.round_mut(round_id) + .set_accepted_proposal_height(rel_height); + true + } + + /// Sends a proposal to the `BlockValidator` component for validation. If no validation is + /// needed, immediately calls `insert_proposal`. + fn validate_proposal( + &mut self, + round_id: RoundId, + proposal: HashedProposal, + ancestor_values: Vec, + sender: NodeId, + ) -> ProtocolOutcomes { + let our_idx = self.our_idx(); + if proposal.timestamp() < self.params.start_timestamp() { + info!( + our_idx, + "rejecting proposal with timestamp earlier than era start" + ); + return vec![]; + } + if let Some((_, parent_proposal)) = proposal + .maybe_parent_round_id() + .and_then(|parent_round_id| self.accepted_proposal(parent_round_id)) + { + let min_block_time = self.params.min_block_time(); + if proposal.timestamp() < parent_proposal.timestamp().saturating_add(min_block_time) { + info!( + our_idx, + "rejecting proposal with timestamp earlier than the parent" + ); + return vec![]; + } + if let (Some(inactive), Some(parent_inactive)) = + (proposal.inactive(), parent_proposal.inactive()) + { + if !inactive.is_subset(parent_inactive) { + info!( + our_idx, + "rejecting proposal with more inactive validators than parent" + ); + return vec![]; + } + } + } + let block_context = BlockContext::new(proposal.timestamp(), ancestor_values); + if let Some(block) = proposal + .maybe_block() + .filter(|value| value.needs_validation()) + .cloned() + { + self.log_proposal(&proposal, round_id, "requesting proposal validation"); + let proposed_block = ProposedBlock::new(block, block_context); + if self + .proposals_waiting_for_validation + .entry(proposed_block.clone()) + .or_default() + .insert((round_id, proposal, sender)) + { + return vec![ProtocolOutcome::ValidateConsensusValue { + sender, + proposed_block, + }]; + } + } else { + self.log_proposal(&proposal, round_id, "proposal does not need validation"); + if self.round_mut(round_id).insert_proposal(proposal.clone()) { + self.record_entry(&ZugWalEntry::Proposal(proposal.inner().clone(), round_id)); + self.progress_detected = true; + self.mark_dirty(round_id); + if let Some(block) = proposal.maybe_block().cloned() { + let proposed_block = ProposedBlock::new(block, block_context); + return vec![ProtocolOutcome::HandledProposedBlock(proposed_block)]; + } + } + } + vec![] // Proposal was already known. + } + + /// Finalizes the round, notifying the rest of the node of the finalized block + /// if it contained one. + fn finalize_round(&mut self, round_id: RoundId) -> ProtocolOutcomes { + let mut outcomes = vec![]; + if round_id < self.first_non_finalized_round_id { + return outcomes; // This round was already finalized. + } + let (relative_height, proposal) = if let Some((height, proposal)) = + self.round(round_id).and_then(Round::accepted_proposal) + { + (height, proposal.clone()) + } else { + error!( + our_idx = self.our_idx(), + round_id, "missing finalized proposal; this is a bug" + ); + return outcomes; + }; + if let Some(parent_round_id) = proposal.maybe_parent_round_id() { + // Output the parent first if it isn't already finalized. + outcomes.extend(self.finalize_round(parent_round_id)); + } + for prune_round_id in self.first_non_finalized_round_id..round_id { + info!( + our_idx = self.our_idx(), + round_id = prune_round_id, + "skipped round" + ); + self.round_mut(prune_round_id).prune_skipped(); + } + self.first_non_finalized_round_id = round_id.saturating_add(1); + let value = if let Some(block) = proposal.maybe_block() { + block.clone() + } else { + return outcomes; // This era's last block is already finalized. + }; + let proposer = self + .validators + .id(self.leader(round_id)) + .expect("validator not found") + .clone(); + let terminal_block_data = self.accepted_switch_block(round_id).then(|| { + let inactive_validators = proposal.inactive().map_or_else(Vec::new, |inactive| { + inactive + .iter() + .filter_map(|idx| self.validators.id(*idx)) + .cloned() + .collect() + }); + TerminalBlockData { + inactive_validators, + } + }); + let finalized_block = FinalizedBlock { + value, + timestamp: proposal.timestamp(), + relative_height, + // Faulty validators are already reported to the era supervisor via + // validators_with_evidence. + // TODO: Is this field entirely obsoleted by accusations? + equivocators: vec![], + terminal_block_data, + proposer, + }; + outcomes.push(ProtocolOutcome::FinalizedBlock(finalized_block)); + outcomes + } + + /// Makes a new proposal if we are the current round leader. + fn propose_if_leader( + &mut self, + maybe_parent_round_id: Option, + now: Timestamp, + ) -> ProtocolOutcomes { + match &self.active_validator { + Some(active_validator) if active_validator.idx == self.leader(self.current_round) => {} + _ => return vec![], // Not the current round leader. + } + match self.pending_proposal { + // We already requested a block to propose. + Some((_, round_id, _)) if round_id == self.current_round => return vec![], + _ => {} + } + if self.round_mut(self.current_round).has_proposal() { + return vec![]; // We already made a proposal. + } + let ancestor_values = match maybe_parent_round_id { + Some(parent_round_id) + if self.accepted_switch_block(parent_round_id) + || self.accepted_dummy_proposal(parent_round_id) => + { + // One of the ancestors is the switch block, so this proposal has no block. + return self.create_echo_and_proposal(Proposal::dummy(now, parent_round_id)); + } + Some(parent_round_id) => self + .ancestor_values(parent_round_id) + .expect("missing ancestor value"), + None => vec![], + }; + // Request a block payload to propose. + let block_context = BlockContext::new(now, ancestor_values); + self.pending_proposal = Some(( + block_context.clone(), + self.current_round, + maybe_parent_round_id, + )); + vec![ProtocolOutcome::CreateNewBlock( + block_context, + now.saturating_add(TimeDiff::from_millis(self.proposal_timeout_millis as u64)), + )] + } + + /// Creates a new proposal message in the current round, and a corresponding signed echo, + /// inserts them into our protocol state and gossips them. + fn create_echo_and_proposal(&mut self, proposal: Proposal) -> ProtocolOutcomes { + let round_id = self.current_round; + let hashed_prop = HashedProposal::new(proposal.clone()); + let echo_content = Content::Echo(*hashed_prop.hash()); + let echo = if let Some(echo) = self.create_message(round_id, echo_content) { + echo + } else { + return vec![]; + }; + let prop_msg = Message::Proposal { + round_id, + proposal, + instance_id: *self.instance_id(), + echo, + }; + if !self.record_entry(&ZugWalEntry::Proposal( + hashed_prop.inner().clone(), + round_id, + )) { + error!( + our_idx = self.our_idx(), + "could not record own proposal in WAL" + ); + vec![] + } else if self.round_mut(round_id).insert_proposal(hashed_prop) { + self.mark_dirty(round_id); + vec![ProtocolOutcome::CreatedGossipMessage( + SerializedMessage::from_message(&prop_msg), + )] + } else { + vec![] + } + } + + /// Returns a parent if a block with that parent could be proposed in the current round, and the + /// earliest possible timestamp for a new proposal. + fn suitable_parent_round(&self, now: Timestamp) -> Option<(Option, Timestamp)> { + let min_block_time = self.params.min_block_time(); + let mut maybe_parent = None; + // We iterate through the rounds before the current one, in reverse order. + for round_id in (0..self.current_round).rev() { + if let Some((_, parent)) = self.accepted_proposal(round_id) { + // All rounds higher than this one are skippable. When the accepted proposal's + // timestamp is old enough it can be used as a parent. + let timestamp = parent.timestamp().saturating_add(min_block_time); + if now >= timestamp { + return Some((Some(round_id), timestamp)); + } + if maybe_parent.is_none_or(|(_, timestamp2)| timestamp2 > timestamp) { + maybe_parent = Some((Some(round_id), timestamp)); + } + } + if !self.is_skippable_round(round_id) { + return maybe_parent; + } + } + // All rounds are skippable. When the era starts block 0 can be proposed. + Some((None, self.params.start_timestamp())) + } + + /// Returns whether a quorum has voted for `false`. + fn is_skippable_round(&self, round_id: RoundId) -> bool { + self.rounds.get(&round_id).and_then(Round::quorum_votes) == Some(false) + } + + /// Returns whether a quorum has voted for `true`. + fn is_committed_round(&self, round_id: RoundId) -> bool { + self.rounds.get(&round_id).and_then(Round::quorum_votes) == Some(true) + } + + /// Returns whether a round has an accepted proposal. + fn has_accepted_proposal(&self, round_id: RoundId) -> bool { + self.round(round_id) + .and_then(Round::accepted_proposal) + .is_some() + } + + /// Returns the accepted proposal, if any, together with its height. + fn accepted_proposal(&self, round_id: RoundId) -> Option<(u64, &HashedProposal)> { + self.round(round_id)?.accepted_proposal() + } + + /// Returns the current proposal timeout as a `TimeDiff`. + fn proposal_timeout(&self) -> TimeDiff { + TimeDiff::from_millis(self.proposal_timeout_millis as u64) + } + + /// Updates our `proposal_timeout` based on the latest measured actual delay from the start of + /// the current round until a proposal was accepted or we voted to skip the round. + fn update_proposal_timeout(&mut self, now: Timestamp) { + let proposal_delay_millis = now.saturating_diff(self.current_round_start).millis() as f64; + let grace_period_factor = self.config.proposal_grace_period as f64 / 100.0 + 1.0; + let target_timeout = proposal_delay_millis * grace_period_factor; + let inertia = self.config.proposal_timeout_inertia as f64; + let ftt = self.params.ftt().0 as f64 / self.validators.total_weight().0 as f64; + if target_timeout > self.proposal_timeout_millis { + self.proposal_timeout_millis *= (1.0 / (inertia * (1.0 - ftt))).exp2(); + self.proposal_timeout_millis = self.proposal_timeout_millis.min(target_timeout); + } else { + self.proposal_timeout_millis *= (-1.0 / (inertia * (1.0 + ftt))).exp2(); + let min_timeout = (self.config.proposal_timeout.millis() as f64).max(target_timeout); + self.proposal_timeout_millis = self.proposal_timeout_millis.max(min_timeout); + } + debug!(our_idx = self.our_idx(), %self.proposal_timeout_millis, "proposal timeout updated"); + } + + /// Returns `true` if the given validators, together will all faulty validators, form a quorum. + fn is_quorum(&self, vidxs: impl Iterator) -> bool { + let mut sum = self.faulty_weight(); + let quorum_threshold = self.quorum_threshold(); + if sum > quorum_threshold { + return true; + } + for vidx in vidxs { + if !self.faults.contains_key(&vidx) { + sum = sum.saturating_add(self.validators.weight(vidx)); + if sum > quorum_threshold { + return true; + } + } + } + false + } + + /// Returns the accepted value from the given round and all its ancestors, or `None` if there is + /// no accepted value in any of those rounds. + fn ancestor_values(&self, mut round_id: RoundId) -> Option> { + let mut ancestor_values = vec![]; + loop { + let (_, proposal) = self.accepted_proposal(round_id)?; + ancestor_values.extend(proposal.maybe_block().cloned()); + match proposal.maybe_parent_round_id() { + None => return Some(ancestor_values), + Some(parent_round_id) => round_id = parent_round_id, + } + } + } + + /// Returns the greatest weight such that two sets of validators with this weight can + /// intersect in only faulty validators, i.e. have an intersection of weight `<= ftt`. That is + /// `(total_weight + ftt) / 2`, rounded down. A _quorum_ is any set with a weight strictly + /// greater than this, so any two quorums have at least one correct validator in common. + fn quorum_threshold(&self) -> Weight { + let total_weight = self.validators.total_weight().0; + let ftt = self.params.ftt().0; + // sum_overflow is the 33rd bit of the addition's actual result, representing 2^32. + let (sum, sum_overflow) = total_weight.overflowing_add(ftt); + if sum_overflow { + Weight((sum / 2) | 1u64.reverse_bits()) // Add 2^31. + } else { + Weight(sum / 2) + } + } + + /// Returns the total weight of validators known to be faulty. + fn faulty_weight(&self) -> Weight { + self.sum_weights(self.faults.keys()) + } + + /// Returns the sum of the weights of the given validators. + fn sum_weights<'a>(&self, vidxs: impl Iterator) -> Weight { + vidxs.map(|vidx| self.validators.weight(*vidx)).sum() + } + + /// Retrieves a shared reference to the round. + fn round(&self, round_id: RoundId) -> Option<&Round> { + self.rounds.get(&round_id) + } + + /// Retrieves a mutable reference to the round. + /// If the round doesn't exist yet, it creates an empty one. + fn round_mut(&mut self, round_id: RoundId) -> &mut Round { + match self.rounds.entry(round_id) { + btree_map::Entry::Occupied(entry) => entry.into_mut(), + btree_map::Entry::Vacant(entry) => { + let leader_idx = self.leader_sequence.leader(u64::from(round_id)); + entry.insert(Round::new(self.validators.len(), leader_idx)) + } + } + } + + /// Creates a round if it doesn't exist yet. + fn create_round(&mut self, round_id: RoundId) { + self.round_mut(round_id); // This creates a round as a side effect. + } + + /// Marks a round as dirty so that the next `upgrade` call will reevaluate it. + fn mark_dirty(&mut self, round_id: RoundId) { + if round_id <= self.current_round + && self.maybe_dirty_round_id.is_none_or(|r_id| r_id > round_id) + { + self.maybe_dirty_round_id = Some(round_id); + } + } +} + +impl ConsensusProtocol for Zug +where + C: Context + 'static, +{ + fn handle_message( + &mut self, + _rng: &mut NodeRng, + sender: NodeId, + msg: SerializedMessage, + now: Timestamp, + ) -> ProtocolOutcomes { + let our_idx = self.our_idx(); + match msg.deserialize_incoming() { + Err(err) => { + warn!(%sender, %err, "failed to deserialize Zug message"); + vec![ProtocolOutcome::Disconnect(sender)] + } + Ok(zug_msg) if zug_msg.instance_id() != self.instance_id() => { + let instance_id = zug_msg.instance_id(); + warn!(our_idx, ?instance_id, %sender, "wrong instance ID; disconnecting"); + vec![ProtocolOutcome::Disconnect(sender)] + } + Ok(Message::SyncResponse(sync_response)) => { + self.handle_sync_response(sync_response, sender, now) + } + Ok(Message::Proposal { + round_id, + instance_id: _, + proposal, + echo, + }) => { + // TODO: make sure that `echo` is indeed an echo + debug!(our_idx, %sender, %proposal, %round_id, "handling proposal with echo"); + + let outcomes = || { + let mut outcomes = self.handle_signed_message(echo, sender, now)?; + outcomes.extend(self.handle_proposal(round_id, proposal, sender, now)?); + Ok(outcomes) + }; + + outcomes_or_disconnect(outcomes()) + } + Ok(Message::Signed(signed_msg)) => { + outcomes_or_disconnect(self.handle_signed_message(signed_msg, sender, now)) + } + Ok(Message::Evidence(signed_msg, content2, signature2)) => outcomes_or_disconnect( + self.handle_evidence(signed_msg, content2, signature2, sender, now), + ), + } + } + + /// Handles an incoming request message and returns an optional response. + fn handle_request_message( + &mut self, + _rng: &mut NodeRng, + sender: NodeId, + msg: SerializedMessage, + _now: Timestamp, + ) -> (ProtocolOutcomes, Option) { + let our_idx = self.our_idx(); + match msg.deserialize_incoming::>() { + Err(err) => { + warn!( + our_idx, + %sender, + %err, + "could not deserialize Zug message" + ); + (vec![ProtocolOutcome::Disconnect(sender)], None) + } + Ok(sync_request) if sync_request.instance_id != *self.instance_id() => { + let instance_id = sync_request.instance_id; + warn!(our_idx, ?instance_id, %sender, "wrong instance ID; disconnecting"); + (vec![ProtocolOutcome::Disconnect(sender)], None) + } + Ok(sync_request) => self.handle_sync_request(sync_request, sender), + } + } + + /// Handles the firing of various timers in the protocol. + fn handle_timer( + &mut self, + timestamp: Timestamp, + now: Timestamp, + timer_id: TimerId, + rng: &mut NodeRng, + ) -> ProtocolOutcomes { + match timer_id { + TIMER_ID_SYNC_PEER => self.handle_sync_peer_timer(now, rng), + TIMER_ID_UPDATE => { + if timestamp >= self.next_scheduled_update { + self.next_scheduled_update = Timestamp::MAX; + } + let current_round = self.current_round; + self.mark_dirty(current_round); + debug!(?current_round, "TIMER_ID_UPDATE"); + self.update(now) + } + TIMER_ID_LOG_PARTICIPATION => { + self.log_participation(); + match self.config.log_participation_interval { + Some(interval) if !self.evidence_only && !self.finalized_switch_block() => { + vec![ProtocolOutcome::ScheduleTimer( + now.saturating_add(interval), + timer_id, + )] + } + _ => vec![], + } + } + // TIMER_ID_VERTEX_WITH_FUTURE_TIMESTAMP => { + // self.synchronizer.add_past_due_stored_vertices(now) + // } + timer_id => { + error!( + our_idx = self.our_idx(), + timer_id = timer_id.0, + "unexpected timer ID" + ); + vec![] + } + } + } + + fn handle_is_current(&self, now: Timestamp) -> ProtocolOutcomes { + let mut outcomes = vec![]; + if let Some(interval) = self.config.sync_state_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.max(self.params.start_timestamp()) + .saturating_add(interval), + TIMER_ID_SYNC_PEER, + )); + } + if let Some(interval) = self.config.log_participation_interval { + outcomes.push(ProtocolOutcome::ScheduleTimer( + now.max(self.params.start_timestamp()) + .saturating_add(interval), + TIMER_ID_LOG_PARTICIPATION, + )); + } + outcomes + } + + fn handle_action(&mut self, action_id: ActionId, now: Timestamp) -> ProtocolOutcomes { + error!(our_idx = self.our_idx(), ?action_id, %now, "unexpected action"); + vec![] + } + + fn propose(&mut self, proposed_block: ProposedBlock, now: Timestamp) -> ProtocolOutcomes { + let maybe_parent_round_id = if let Some((block_context, round_id, maybe_parent_round_id)) = + self.pending_proposal.take() + { + if block_context != *proposed_block.context() || round_id != self.current_round { + warn!(our_idx = self.our_idx(), %proposed_block, "skipping outdated proposal"); + self.pending_proposal = Some((block_context, round_id, maybe_parent_round_id)); + return vec![]; + } + maybe_parent_round_id + } else { + error!(our_idx = self.our_idx(), "unexpected call to propose"); + return vec![]; + }; + let inactive = self + .validators + .enumerate_ids() + .map(|(idx, _)| idx) + .filter(|idx| self.active[*idx].is_none() && !self.faults.contains_key(idx)); + let proposal = Proposal::with_block(&proposed_block, maybe_parent_round_id, inactive); + let mut outcomes = self.create_echo_and_proposal(proposal); + let round_id = self.current_round; + warn!(?round_id, "Calling update after proposal"); + outcomes.extend(self.update(now)); + outcomes + } + + fn resolve_validity( + &mut self, + proposed_block: ProposedBlock, + valid: bool, + now: Timestamp, + ) -> ProtocolOutcomes { + let rounds_and_node_ids = self + .proposals_waiting_for_validation + .remove(&proposed_block) + .into_iter() + .flatten(); + let mut outcomes = vec![]; + if valid { + for (round_id, proposal, _sender) in rounds_and_node_ids { + info!(our_idx = self.our_idx(), %round_id, %proposal, "handling valid proposal"); + if self.round_mut(round_id).insert_proposal(proposal.clone()) { + self.record_entry(&ZugWalEntry::Proposal(proposal.into_inner(), round_id)); + self.mark_dirty(round_id); + self.progress_detected = true; + outcomes.push(ProtocolOutcome::HandledProposedBlock( + proposed_block.clone(), + )); + } + } + outcomes.extend(self.update(now)); + } else { + for (round_id, proposal, sender) in rounds_and_node_ids { + // We don't disconnect from the faulty sender here: The block validator considers + // the value "invalid" even if it just couldn't download the deploys, which could + // just be because the original sender went offline. + let validator_index = self.leader(round_id).0; + info!( + our_idx = self.our_idx(), + %validator_index, + %round_id, + %sender, + %proposal, + "dropping invalid proposal" + ); + } + } + outcomes + } + + fn activate_validator( + &mut self, + our_id: C::ValidatorId, + secret: C::ValidatorSecret, + now: Timestamp, + wal_file: Option, + ) -> ProtocolOutcomes { + let mut outcomes = vec![]; + if self.write_wal.is_none() { + if let Some(wal_file) = wal_file { + outcomes.extend(self.open_wal(wal_file, now)); + } + if self.write_wal.is_none() { + error!(?our_id, "missing WAL file; not activating"); + return vec![]; + } + } + if let Some(idx) = self.validators.get_index(&our_id) { + if self.faults.contains_key(&idx) { + error!(our_idx = idx.0, "we are faulty; not activating"); + return outcomes; + } + info!(our_idx = idx.0, "start voting"); + self.active_validator = Some(ActiveValidator { idx, secret }); + debug!( + our_idx = idx.0, + %now, + start_timestamp=%self.params.start_timestamp(), + "activate_validator - schedule update" + ); + outcomes.extend(self.schedule_update(self.params.start_timestamp().max(now))); + } else { + error!( + ?our_id, + "we are not a validator in this era; not activating" + ); + } + outcomes + } + + fn deactivate_validator(&mut self) { + self.active_validator = None; + } + + fn set_evidence_only(&mut self) { + self.evidence_only = true; + self.rounds.clear(); + self.proposals_waiting_for_parent.clear(); + self.proposals_waiting_for_validation.clear(); + } + + fn has_evidence(&self, vid: &C::ValidatorId) -> bool { + self.validators + .get_index(vid) + .and_then(|idx| self.faults.get(&idx)) + .is_some_and(Fault::is_direct) + } + + fn mark_faulty(&mut self, vid: &C::ValidatorId) { + if let Some(idx) = self.validators.get_index(vid) { + self.faults.entry(idx).or_insert(Fault::Indirect); + } + } + + fn send_evidence(&self, peer: NodeId, vid: &C::ValidatorId) -> ProtocolOutcomes { + self.validators + .get_index(vid) + .and_then(|idx| self.faults.get(&idx)) + .cloned() + .map(|fault| match fault { + Fault::Direct(msg, content, sign) => { + vec![ProtocolOutcome::CreatedTargetedMessage( + SerializedMessage::from_message(&Message::Evidence(msg, content, sign)), + peer, + )] + } + _ => vec![], + }) + .unwrap_or_default() + } + + fn set_paused(&mut self, paused: bool, now: Timestamp) -> ProtocolOutcomes { + if self.paused && !paused { + info!( + our_idx = self.our_idx(), + current_round = self.current_round, + "unpausing consensus" + ); + self.paused = paused; + // Reset the timeout to give the proposer another chance, after the pause. + self.current_round_start = Timestamp::MAX; + let round_id = self.current_round; + self.mark_dirty(round_id); + debug!(?round_id, "Calling update after unpausing"); + self.update(now) + } else { + if self.paused != paused { + info!( + our_idx = self.our_idx(), + current_round = self.current_round, + "pausing consensus" + ); + } + self.paused = paused; + vec![] + } + } + + fn validators_with_evidence(&self) -> Vec<&C::ValidatorId> { + self.faults + .iter() + .filter(|(_, fault)| fault.is_direct()) + .filter_map(|(vidx, _)| self.validators.id(*vidx)) + .collect() + } + + fn as_any(&self) -> &dyn Any { + self + } + + fn is_active(&self) -> bool { + self.active_validator.is_some() + } + + fn instance_id(&self) -> &C::InstanceId { + self.params.instance_id() + } + + fn next_round_length(&self) -> Option { + Some(self.params.min_block_time()) + } +} + +fn outcomes_or_disconnect( + result: Result, FaultySender>, +) -> ProtocolOutcomes { + result.unwrap_or_else(|sender| vec![ProtocolOutcome::Disconnect(sender.0)]) +} + +mod specimen_support { + use std::collections::BTreeSet; + + use crate::{ + components::consensus::{utils::ValidatorIndex, ClContext}, + utils::specimen::{ + btree_map_distinct_from_prop, btree_set_distinct_from_prop, largest_variant, + vec_prop_specimen, Cache, LargeUniqueSequence, LargestSpecimen, SizeEstimator, + }, + }; + + use super::{ + message::{ + Content, ContentDiscriminants, Message, MessageDiscriminants, SignedMessage, + SyncResponse, + }, + proposal::Proposal, + SyncRequest, + }; + + impl LargestSpecimen for Message { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::( + estimator, + |variant| match variant { + MessageDiscriminants::SyncResponse => { + Message::SyncResponse(LargestSpecimen::largest_specimen(estimator, cache)) + } + MessageDiscriminants::Proposal => Message::Proposal { + round_id: LargestSpecimen::largest_specimen(estimator, cache), + instance_id: LargestSpecimen::largest_specimen(estimator, cache), + proposal: LargestSpecimen::largest_specimen(estimator, cache), + echo: LargestSpecimen::largest_specimen(estimator, cache), + }, + MessageDiscriminants::Signed => { + Message::Signed(LargestSpecimen::largest_specimen(estimator, cache)) + } + MessageDiscriminants::Evidence => Message::Evidence( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ), + }, + ) + } + } + + impl LargestSpecimen for SyncRequest { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SyncRequest { + round_id: LargestSpecimen::largest_specimen(estimator, cache), + proposal_hash: LargestSpecimen::largest_specimen(estimator, cache), + has_proposal: LargestSpecimen::largest_specimen(estimator, cache), + first_validator_idx: LargestSpecimen::largest_specimen(estimator, cache), + echoes: LargestSpecimen::largest_specimen(estimator, cache), + true_votes: LargestSpecimen::largest_specimen(estimator, cache), + false_votes: LargestSpecimen::largest_specimen(estimator, cache), + active: LargestSpecimen::largest_specimen(estimator, cache), + faulty: LargestSpecimen::largest_specimen(estimator, cache), + instance_id: LargestSpecimen::largest_specimen(estimator, cache), + sync_id: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } + + impl LargeUniqueSequence for ValidatorIndex + where + E: SizeEstimator, + { + fn large_unique_sequence( + _estimator: &E, + count: usize, + _cache: &mut Cache, + ) -> BTreeSet { + Iterator::map((0..u32::MAX).rev(), ValidatorIndex::from) + .take(count) + .collect() + } + } + + impl LargestSpecimen for SyncResponse { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SyncResponse { + round_id: LargestSpecimen::largest_specimen(estimator, cache), + proposal_or_hash: LargestSpecimen::largest_specimen(estimator, cache), + echo_sigs: btree_map_distinct_from_prop(estimator, "validator_count", cache), + true_vote_sigs: btree_map_distinct_from_prop(estimator, "validator_count", cache), + false_vote_sigs: btree_map_distinct_from_prop(estimator, "validator_count", cache), + signed_messages: vec_prop_specimen(estimator, "validator_count", cache), + evidence: vec_prop_specimen(estimator, "validator_count", cache), + instance_id: LargestSpecimen::largest_specimen(estimator, cache), + sync_id: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } + + impl LargestSpecimen for Proposal { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Proposal { + timestamp: LargestSpecimen::largest_specimen(estimator, cache), + maybe_block: LargestSpecimen::largest_specimen(estimator, cache), + maybe_parent_round_id: LargestSpecimen::largest_specimen(estimator, cache), + inactive: Some(btree_set_distinct_from_prop( + estimator, + "validator_count", + cache, + )), + } + } + } + + impl LargestSpecimen for ValidatorIndex { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + u32::largest_specimen(estimator, cache).into() + } + } + + impl LargestSpecimen for SignedMessage { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SignedMessage::sign_new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + &LargestSpecimen::largest_specimen(estimator, cache), + ) + } + } + + impl LargestSpecimen for Content { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + if let Some(item) = cache.get::() { + return *item; + } + + let item = largest_variant::(estimator, |variant| { + match variant { + ContentDiscriminants::Echo => { + Content::Echo(LargestSpecimen::largest_specimen(estimator, cache)) + } + ContentDiscriminants::Vote => { + Content::Vote(LargestSpecimen::largest_specimen(estimator, cache)) + } + } + }); + *cache.set(item) + } + } +} + +mod registered_sync { + use crate::{ + types::{DataSize, NodeRng}, + utils::specimen::{Cache, LargestSpecimen, SizeEstimator}, + }; + use casper_types::{TimeDiff, Timestamp}; + use rand::Rng as _; + use serde::{Deserialize, Serialize}; + use std::collections::BTreeMap; + + #[derive(Default, DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)] + pub struct RegisteredSync(BTreeMap); + + #[derive( + DataSize, Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, + )] + pub struct RandomId(u64); + + impl RegisteredSync { + /// Prunes entries older than one minute. + fn prune_old(&mut self) { + const ONE_MIN: TimeDiff = TimeDiff::from_seconds(60); + + self.0.retain(|_, timestamp| timestamp.elapsed() < ONE_MIN); + } + + pub fn create_and_register_new_id(&mut self, rng: &mut NodeRng) -> RandomId { + self.prune_old(); + + let id = loop { + let id = RandomId::new(rng); + + if self.0.contains_key(&id) == false { + break id; + } + }; + + self.0.insert(id, Timestamp::now()); + + id + } + + /// Tries and remove the random ID from the stored IDs and returns it if it was present. + pub fn try_remove_id(&mut self, id: RandomId) -> Option { + self.0.remove(&id)?; + + Some(id) + } + } + + impl RandomId { + pub fn new(rng: &mut NodeRng) -> Self { + RandomId(rng.gen()) + } + } + + impl LargestSpecimen for RandomId { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + RandomId(u64::MAX) + } + } +} diff --git a/node/src/components/consensus/protocols/zug/config.rs b/node/src/components/consensus/protocols/zug/config.rs new file mode 100644 index 0000000000..3b832d5f7c --- /dev/null +++ b/node/src/components/consensus/protocols/zug/config.rs @@ -0,0 +1,45 @@ +use serde::{Deserialize, Serialize}; + +use datasize::DataSize; + +use casper_types::{serde_option_time_diff, TimeDiff}; + +/// `Zug`-specific configuration. +/// *Note*: This is *not* protocol configuration that has to be the same on all nodes. +#[derive(DataSize, Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct Config { + /// Request the latest protocol state from a random peer periodically, with this interval. 0 + /// means disabled. + #[serde(with = "serde_option_time_diff")] + pub sync_state_interval: Option, + /// Log inactive or faulty validators periodically, with this interval. 0 means disabled. + #[serde(with = "serde_option_time_diff")] + pub log_participation_interval: Option, + /// The minimal and initial timeout for a proposal. + pub proposal_timeout: TimeDiff, + /// The additional proposal delay that is still considered fast enough, in percent. This should + /// take into account variables like empty vs. full blocks, network traffic etc. + /// E.g. if proposing a full block while under heavy load takes 50% longer than an empty one + /// while idle this should be at least 50, meaning that the timeout is 50% longer than + /// necessary for a quorum of recent proposals, approximately. + pub proposal_grace_period: u16, + /// The average number of rounds after which the proposal timeout adapts by a factor of 2. + /// Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. + pub proposal_timeout_inertia: u16, + /// Incoming proposals whose timestamps lie further in the future are rejected. + pub clock_tolerance: TimeDiff, +} + +impl Default for Config { + fn default() -> Self { + Config { + sync_state_interval: Some("1sec".parse().unwrap()), + log_participation_interval: Some("10sec".parse().unwrap()), + proposal_timeout: "1sec".parse().unwrap(), + clock_tolerance: "1sec".parse().unwrap(), + proposal_grace_period: 200, + proposal_timeout_inertia: 10, + } + } +} diff --git a/node/src/components/consensus/protocols/zug/des_testing.rs b/node/src/components/consensus/protocols/zug/des_testing.rs new file mode 100644 index 0000000000..0f000a3204 --- /dev/null +++ b/node/src/components/consensus/protocols/zug/des_testing.rs @@ -0,0 +1,1240 @@ +#![allow(clippy::arithmetic_side_effects)] // In tests, overflows panic anyway. + +use std::{ + collections::{hash_map::DefaultHasher, HashMap, VecDeque}, + fmt::{self, Debug, Display, Formatter}, + hash::{Hash, Hasher}, +}; + +use datasize::DataSize; +use hex_fmt::HexFmt; +use itertools::Itertools; +use rand::{prelude::IteratorRandom, Rng}; +use serde::{Deserialize, Serialize}; +use tracing::{trace, warn}; + +use casper_types::{TimeDiff, Timestamp}; + +use super::{ + config::Config, + message::{Content, Message as ZugProtocolMessage, SignedMessage}, + Params, Zug, +}; +use crate::{ + components::consensus::{ + consensus_protocol::{ + ConsensusProtocol, FinalizedBlock, ProposedBlock, ProtocolOutcome, ProtocolOutcomes, + }, + tests::{ + consensus_des_testing::{ + DeliverySchedule, Fault as DesFault, Message, Node, Target, TargetedMessage, + ValidatorId, VirtualNet, + }, + queue::QueueEntry, + }, + traits::{ConsensusValueT, Context, ValidatorSecret}, + utils::{Validators, Weight}, + ActionId, BlockContext, SerializedMessage, TimerId, + }, + types::NodeId, + NodeRng, +}; + +#[derive(Eq, PartialEq, Clone, Debug, Hash, Serialize, Deserialize, DataSize, Default)] +pub(crate) struct ConsensusValue(Vec); + +impl ConsensusValueT for ConsensusValue { + fn needs_validation(&self) -> bool { + !self.0.is_empty() + } +} + +impl Display for ConsensusValue { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{:10}", HexFmt(&self.0)) + } +} + +const TEST_MIN_ROUND_LEN: TimeDiff = TimeDiff::from_millis(1 << 12); +const TEST_END_HEIGHT: u64 = 100000; +pub(crate) const TEST_INSTANCE_ID: u64 = 42; + +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +enum ZugMessage { + GossipMessage(SerializedMessage), + TargetedMessage(SerializedMessage, NodeId), + MessageToRandomPeer(SerializedMessage), + RequestToRandomPeer(SerializedMessage), + Timer(Timestamp, TimerId), + QueueAction(ActionId), + RequestNewBlock(BlockContext), + FinalizedBlock(FinalizedBlock), + ValidateConsensusValue(NodeId, ProposedBlock), + NewEvidence(ValidatorId), + SendEvidence(NodeId, ValidatorId), + WeAreFaulty, + DoppelgangerDetected, + FttExceeded, + Disconnect(NodeId), + HandledProposedBlock(ProposedBlock), +} + +impl ZugMessage { + fn is_signed_gossip_message(&self) -> bool { + if let ZugMessage::GossipMessage(raw) = self { + let deserialized: super::Message = + raw.deserialize_incoming().expect("message not valid"); + matches!(deserialized, ZugProtocolMessage::Signed(_)) + } else { + false + } + } + + fn is_proposal(&self) -> bool { + if let ZugMessage::GossipMessage(raw) = self { + let deserialized: super::Message = + raw.deserialize_incoming().expect("message not valid"); + matches!(deserialized, ZugProtocolMessage::Proposal { .. }) + } else { + false + } + } +} + +impl PartialOrd for ZugMessage { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ZugMessage { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let mut hasher0 = DefaultHasher::new(); + let mut hasher1 = DefaultHasher::new(); + self.hash(&mut hasher0); + other.hash(&mut hasher1); + hasher0.finish().cmp(&hasher1.finish()) + } +} + +impl From> for ZugMessage { + fn from(outcome: ProtocolOutcome) -> ZugMessage { + match outcome { + ProtocolOutcome::CreatedGossipMessage(msg) => ZugMessage::GossipMessage(msg), + ProtocolOutcome::CreatedTargetedMessage(msg, target) => { + ZugMessage::TargetedMessage(msg, target) + } + ProtocolOutcome::CreatedMessageToRandomPeer(msg) => { + ZugMessage::MessageToRandomPeer(msg) + } + ProtocolOutcome::CreatedRequestToRandomPeer(request) => { + ZugMessage::RequestToRandomPeer(request) + } + ProtocolOutcome::ScheduleTimer(timestamp, timer_id) => { + ZugMessage::Timer(timestamp, timer_id) + } + ProtocolOutcome::QueueAction(action_id) => ZugMessage::QueueAction(action_id), + ProtocolOutcome::CreateNewBlock(block_ctx, _expiry) => { + ZugMessage::RequestNewBlock(block_ctx) + } + ProtocolOutcome::FinalizedBlock(finalized_block) => { + ZugMessage::FinalizedBlock(finalized_block) + } + ProtocolOutcome::ValidateConsensusValue { + sender, + proposed_block, + } => ZugMessage::ValidateConsensusValue(sender, proposed_block), + ProtocolOutcome::NewEvidence(vid) => ZugMessage::NewEvidence(vid), + ProtocolOutcome::SendEvidence(target, vid) => ZugMessage::SendEvidence(target, vid), + ProtocolOutcome::WeAreFaulty => ZugMessage::WeAreFaulty, + ProtocolOutcome::DoppelgangerDetected => ZugMessage::DoppelgangerDetected, + ProtocolOutcome::FttExceeded => ZugMessage::FttExceeded, + ProtocolOutcome::Disconnect(sender) => ZugMessage::Disconnect(sender), + ProtocolOutcome::HandledProposedBlock(proposed_block) => { + ZugMessage::HandledProposedBlock(proposed_block) + } + } + } +} + +#[derive(Debug, Eq, PartialEq)] +pub(crate) enum TestRunError { + /// VirtualNet was missing a validator when it was expected to exist. + MissingValidator(ValidatorId), + /// No more messages in the message queue. + NoMessages, +} + +impl Display for TestRunError { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + TestRunError::NoMessages => write!( + f, + "Test finished prematurely due to lack of messages in the queue" + ), + TestRunError::MissingValidator(id) => { + write!(f, "Virtual net is missing validator {:?}.", id) + } + } + } +} + +enum Distribution { + Uniform, +} + +impl Distribution { + /// Returns vector of `count` elements of random values between `lower` and `upper`. + fn gen_range_vec(&self, rng: &mut NodeRng, lower: u64, upper: u64, count: u8) -> Vec { + match self { + Distribution::Uniform => (0..count).map(|_| rng.gen_range(lower..upper)).collect(), + } + } +} + +trait DeliveryStrategy { + fn gen_delay( + &mut self, + rng: &mut NodeRng, + message: &ZugMessage, + distribution: &Distribution, + base_delivery_timestamp: Timestamp, + ) -> DeliverySchedule; +} + +struct ZugValidator { + zug: Zug, + fault: Option, +} + +impl ZugValidator { + fn new(zug: Zug, fault: Option) -> Self { + ZugValidator { zug, fault } + } + + fn zug_mut(&mut self) -> &mut Zug { + &mut self.zug + } + + fn zug(&self) -> &Zug { + &self.zug + } + + fn post_hook(&mut self, delivery_time: Timestamp, msg: ZugMessage) -> Vec { + match self.fault.as_ref() { + Some(DesFault::TemporarilyMute { from, till }) + if *from <= delivery_time && delivery_time <= *till => + { + // For mute validators we drop the generated messages to be sent, if the delivery + // time is in the interval in which they are muted. + match msg { + ZugMessage::GossipMessage(_) + | ZugMessage::TargetedMessage(_, _) + | ZugMessage::MessageToRandomPeer(_) + | ZugMessage::RequestToRandomPeer(_) + | ZugMessage::SendEvidence(_, _) => { + warn!("Validator is mute – won't send messages in response"); + vec![] + } + ZugMessage::Timer(_, _) + | ZugMessage::QueueAction(_) + | ZugMessage::RequestNewBlock(_) + | ZugMessage::FinalizedBlock(_) + | ZugMessage::ValidateConsensusValue(_, _) + | ZugMessage::NewEvidence(_) + | ZugMessage::Disconnect(_) + | ZugMessage::HandledProposedBlock(_) => vec![msg], + ZugMessage::WeAreFaulty => { + panic!("validator equivocated unexpectedly"); + } + ZugMessage::DoppelgangerDetected => { + panic!("unexpected doppelganger detected"); + } + ZugMessage::FttExceeded => { + panic!("unexpected FTT exceeded"); + } + } + } + Some(DesFault::PermanentlyMute) => { + // For permanently mute validators we drop the generated messages to be sent + match msg { + ZugMessage::GossipMessage(_) + | ZugMessage::TargetedMessage(_, _) + | ZugMessage::MessageToRandomPeer(_) + | ZugMessage::RequestToRandomPeer(_) + | ZugMessage::SendEvidence(_, _) => { + warn!("Validator is mute – won't send messages in response"); + vec![] + } + ZugMessage::Timer(_, _) + | ZugMessage::QueueAction(_) + | ZugMessage::RequestNewBlock(_) + | ZugMessage::FinalizedBlock(_) + | ZugMessage::ValidateConsensusValue(_, _) + | ZugMessage::NewEvidence(_) + | ZugMessage::Disconnect(_) + | ZugMessage::HandledProposedBlock(_) => vec![msg], + ZugMessage::WeAreFaulty => { + panic!("validator equivocated unexpectedly"); + } + ZugMessage::DoppelgangerDetected => { + panic!("unexpected doppelganger detected"); + } + ZugMessage::FttExceeded => { + panic!("unexpected FTT exceeded"); + } + } + } + None | Some(DesFault::TemporarilyMute { .. }) => { + // Honest validator. + match &msg { + ZugMessage::WeAreFaulty => { + panic!("validator equivocated unexpectedly"); + } + ZugMessage::DoppelgangerDetected => { + panic!("unexpected doppelganger detected"); + } + ZugMessage::FttExceeded => { + panic!("unexpected FTT exceeded"); + } + _ => vec![msg], + } + } + Some(DesFault::Equivocate) => match msg { + ZugMessage::GossipMessage(ref serialized_msg) => { + match serialized_msg.deserialize_incoming::>() { + Ok(ZugProtocolMessage::Signed( + signed_msg @ SignedMessage { content, .. }, + )) => match content { + Content::Echo(hash) => { + let conflicting_message = SignedMessage::sign_new( + signed_msg.round_id, + signed_msg.instance_id, + Content::::Echo(HashWrapper( + hash.0.wrapping_add(1), + )), + signed_msg.validator_idx, + &TestSecret(signed_msg.validator_idx.0.into()), + ); + vec![ + ZugMessage::GossipMessage(SerializedMessage::from_message( + &ZugProtocolMessage::Signed(conflicting_message), + )), + msg, + ] + } + Content::Vote(vote) => { + let conflicting_message = SignedMessage::sign_new( + signed_msg.round_id, + signed_msg.instance_id, + Content::::Vote(!vote), + signed_msg.validator_idx, + &TestSecret(signed_msg.validator_idx.0.into()), + ); + vec![ + ZugMessage::GossipMessage(SerializedMessage::from_message( + &ZugProtocolMessage::Signed(conflicting_message), + )), + msg, + ] + } + }, + _ => vec![msg], + } + } + _ => vec![msg], + }, + } + } +} + +type ZugNode = Node; + +type ZugNet = VirtualNet; + +struct ZugTestHarness +where + DS: DeliveryStrategy, +{ + virtual_net: ZugNet, + /// Consensus values to be proposed. + /// Order of values in the vector defines the order in which they will be proposed. + consensus_values: VecDeque, + /// A strategy to pseudo randomly change the message delivery times. + delivery_time_strategy: DS, + /// Distribution of delivery times. + delivery_time_distribution: Distribution, + /// Mapping of validator IDs to node IDs + vid_to_node_id: HashMap, + /// Mapping of node IDs to validator IDs + node_id_to_vid: HashMap, +} + +type TestResult = Result; + +impl ZugTestHarness +where + DS: DeliveryStrategy, +{ + /// Advance the test by one message. + /// + /// Pops one message from the message queue (if there are any) + /// and pass it to the recipient validator for execution. + /// Messages returned from the execution are scheduled for later delivery. + pub(crate) fn crank(&mut self, rng: &mut NodeRng) -> TestResult<()> { + let QueueEntry { + delivery_time, + recipient, + message, + } = self + .virtual_net + .pop_message() + .ok_or(TestRunError::NoMessages)?; + + let span = tracing::trace_span!("crank", validator = %recipient); + let _enter = span.enter(); + trace!( + "Processing: tick {}, sender validator={}, payload {:?}", + delivery_time, + message.sender, + message.payload(), + ); + + let messages = self.process_message(rng, recipient, message, delivery_time)?; + + let targeted_messages = messages + .into_iter() + .filter_map(|zm| { + let delivery = self.delivery_time_strategy.gen_delay( + rng, + &zm, + &self.delivery_time_distribution, + delivery_time, + ); + match delivery { + DeliverySchedule::Drop => { + trace!("{:?} message is dropped.", zm); + None + } + DeliverySchedule::AtInstant(timestamp) => { + trace!("{:?} scheduled for {:?}", zm, timestamp); + self.convert_into_targeted(zm, recipient, rng) + .map(|targeted| (targeted, timestamp)) + } + } + }) + .collect(); + + self.virtual_net.dispatch_messages(targeted_messages); + Ok(()) + } + + fn convert_into_targeted( + &self, + zm: ZugMessage, + creator: ValidatorId, + rng: &mut NodeRng, + ) -> Option> { + let create_msg = |zm: ZugMessage| Message::new(creator, zm); + + match zm { + ZugMessage::GossipMessage(_) => Some(TargetedMessage::new( + create_msg(zm), + Target::AllExcept(creator), + )), + ZugMessage::TargetedMessage(_, target) => self + .node_id_to_vid + .get(&target) + .map(|vid| TargetedMessage::new(create_msg(zm), Target::SingleValidator(*vid))), + ZugMessage::MessageToRandomPeer(_) | ZugMessage::RequestToRandomPeer(_) => self + .virtual_net + .validators_ids() + .choose(rng) + .map(|random_vid| { + TargetedMessage::new(create_msg(zm), Target::SingleValidator(*random_vid)) + }), + ZugMessage::Timer(_, _) + | ZugMessage::QueueAction(_) + | ZugMessage::RequestNewBlock(_) + | ZugMessage::FinalizedBlock(_) + | ZugMessage::ValidateConsensusValue(_, _) + | ZugMessage::NewEvidence(_) + | ZugMessage::Disconnect(_) + | ZugMessage::HandledProposedBlock(_) + | ZugMessage::SendEvidence(_, _) + | ZugMessage::WeAreFaulty + | ZugMessage::DoppelgangerDetected + | ZugMessage::FttExceeded => Some(TargetedMessage::new( + create_msg(zm), + Target::SingleValidator(creator), + )), + } + } + + fn next_consensus_value(&mut self, height: u64) -> ConsensusValue { + self.consensus_values + .get(height as usize) + .cloned() + .unwrap_or_default() + } + + /// Helper for getting validator from the underlying virtual net. + fn node_mut(&mut self, validator_id: &ValidatorId) -> TestResult<&mut ZugNode> { + self.virtual_net + .node_mut(validator_id) + .ok_or(TestRunError::MissingValidator(*validator_id)) + } + + fn call_validator( + &mut self, + delivery_time: Timestamp, + validator_id: &ValidatorId, + f: F, + ) -> TestResult> + where + F: FnOnce(&mut ZugValidator) -> ProtocolOutcomes, + { + let validator_node = self.node_mut(validator_id)?; + let res = f(validator_node.validator_mut()); + let messages = res + .into_iter() + .flat_map(|outcome| { + validator_node + .validator_mut() + .post_hook(delivery_time, ZugMessage::from(outcome)) + }) + .collect(); + Ok(messages) + } + + /// Processes a message sent to `validator_id`. + /// Returns a vector of messages produced by the `validator` in reaction to processing a + /// message. + fn process_message( + &mut self, + rng: &mut NodeRng, + validator_id: ValidatorId, + message: Message, + delivery_time: Timestamp, + ) -> TestResult> { + self.node_mut(&validator_id)? + .push_messages_received(vec![message.clone()]); + + let messages = { + let sender_id = message.sender; + + let zm = message.payload().clone(); + + match zm { + ZugMessage::GossipMessage(msg) + | ZugMessage::TargetedMessage(msg, _) + | ZugMessage::MessageToRandomPeer(msg) => { + let sender = *self + .vid_to_node_id + .get(&sender_id) + .ok_or(TestRunError::MissingValidator(sender_id))?; + self.call_validator(delivery_time, &validator_id, |consensus| { + consensus + .zug_mut() + .handle_message(rng, sender, msg, delivery_time) + })? + } + ZugMessage::RequestToRandomPeer(req) => { + let sender = *self + .vid_to_node_id + .get(&sender_id) + .ok_or(TestRunError::MissingValidator(sender_id))?; + self.call_validator(delivery_time, &validator_id, |consensus| { + let (mut outcomes, maybe_msg) = consensus.zug_mut().handle_request_message( + rng, + sender, + req, + delivery_time, + ); + outcomes.extend( + maybe_msg + .into_iter() + .map(|msg| ProtocolOutcome::CreatedTargetedMessage(msg, sender)), + ); + outcomes + })? + } + ZugMessage::Timer(timestamp, timer_id) => { + self.call_validator(delivery_time, &validator_id, |consensus| { + consensus + .zug_mut() + .handle_timer(timestamp, delivery_time, timer_id, rng) + })? + } + ZugMessage::QueueAction(_) => vec![], // not used in Zug + ZugMessage::RequestNewBlock(block_context) => { + let consensus_value = self.next_consensus_value(block_context.height()); + let proposed_block = ProposedBlock::new(consensus_value, block_context); + + self.call_validator(delivery_time, &validator_id, |consensus| { + consensus.zug_mut().propose(proposed_block, delivery_time) + })? + } + ZugMessage::FinalizedBlock(FinalizedBlock { + value, + timestamp: _, + relative_height, + terminal_block_data, + equivocators: _, + proposer: _, + }) => { + trace!( + "{}consensus value finalized: {:?}, height: {:?}", + if terminal_block_data.is_some() { + "last " + } else { + "" + }, + value, + relative_height, + ); + self.node_mut(&validator_id)?.push_finalized(value); + vec![] + } + ZugMessage::ValidateConsensusValue(_, proposed_block) => { + self.call_validator(delivery_time, &validator_id, |consensus| { + consensus + .zug_mut() + .resolve_validity(proposed_block, true, delivery_time) + })? + } + ZugMessage::NewEvidence(_) => vec![], // irrelevant to consensus + ZugMessage::Disconnect(target) => { + if let Some(vid) = self.node_id_to_vid.get(&target) { + warn!("{} wants to disconnect from {}", validator_id, vid); + } + vec![] // TODO: register the disconnect attempt somehow? + } + ZugMessage::HandledProposedBlock(_) => vec![], // irrelevant to consensus + ZugMessage::WeAreFaulty => { + warn!("{} detected that it is faulty", validator_id); + vec![] // TODO: stop the node or something? + } + ZugMessage::DoppelgangerDetected => { + warn!("{} detected a doppelganger", validator_id); + vec![] // TODO: stop the node or something? + } + ZugMessage::FttExceeded => { + warn!("{} detected FTT exceeded", validator_id); + vec![] // TODO: stop the node or something? + } + ZugMessage::SendEvidence(node_id, vid) => { + self.call_validator(delivery_time, &validator_id, |consensus| { + consensus.zug_mut().send_evidence(node_id, &vid) + })? + } + } + }; + + let recipient = self.node_mut(&validator_id)?; + recipient.push_messages_produced(messages.clone()); + + Ok(messages) + } + + /// Returns a `MutableHandle` on the `ZugTestHarness` object + /// that allows for manipulating internal state of the test state. + fn mutable_handle(&mut self) -> MutableHandle { + MutableHandle(self) + } +} + +fn crank_until( + zth: &mut ZugTestHarness, + rng: &mut NodeRng, + f: F, +) -> TestResult<()> +where + F: Fn(&ZugTestHarness) -> bool, +{ + while !f(zth) { + zth.crank(rng)?; + } + Ok(()) +} + +struct MutableHandle<'a, DS: DeliveryStrategy>(&'a mut ZugTestHarness); + +impl MutableHandle<'_, DS> { + /// Drops all messages from the queue. + fn clear_message_queue(&mut self) { + self.0.virtual_net.empty_queue(); + } + + fn validators(&self) -> impl Iterator { + self.0.virtual_net.validators() + } +} + +#[derive(Debug)] +enum BuilderError { + WeightLimits, +} + +struct ZugTestHarnessBuilder { + /// Maximum number of faulty validators in the network. + /// Defaults to 10. + max_faulty_validators: u8, + /// Percentage of faulty validators' (i.e. equivocators) weight. + /// Defaults to 0 (network is perfectly secure). + faulty_percent: u64, + fault_type: Option, + /// FTT value for the finality detector. + /// If not given, defaults to 1/3 of total validators' weight. + ftt: Option, + /// Number of consensus values to be proposed by the nodes in the network. + /// Those will be generated by the test framework. + /// Defaults to 10. + consensus_values_count: u8, + /// Distribution of message delivery (delaying, dropping) delays.. + delivery_distribution: Distribution, + delivery_strategy: DS, + /// Upper and lower limits for validators' weights. + weight_limits: (u64, u64), + /// Time when the test era starts at. + /// Defaults to 0. + start_time: Timestamp, + /// Era end height. + end_height: u64, + /// Type of discrete distribution of validators' weights. + /// Defaults to uniform. + weight_distribution: Distribution, + /// Zug protocol config + config: Config, +} + +// Default strategy for message delivery. +struct InstantDeliveryNoDropping; + +impl DeliveryStrategy for InstantDeliveryNoDropping { + fn gen_delay( + &mut self, + _rng: &mut NodeRng, + message: &ZugMessage, + _distribution: &Distribution, + base_delivery_timestamp: Timestamp, + ) -> DeliverySchedule { + match message { + ZugMessage::RequestNewBlock(bc) => DeliverySchedule::AtInstant(bc.timestamp()), + ZugMessage::Timer(t, _) => DeliverySchedule::AtInstant(*t), + ZugMessage::GossipMessage(_) + | ZugMessage::TargetedMessage(_, _) + | ZugMessage::MessageToRandomPeer(_) + | ZugMessage::RequestToRandomPeer(_) + | ZugMessage::QueueAction(_) + | ZugMessage::FinalizedBlock(_) + | ZugMessage::ValidateConsensusValue(_, _) + | ZugMessage::NewEvidence(_) + | ZugMessage::Disconnect(_) + | ZugMessage::HandledProposedBlock(_) + | ZugMessage::WeAreFaulty + | ZugMessage::DoppelgangerDetected + | ZugMessage::FttExceeded + | ZugMessage::SendEvidence(_, _) => { + DeliverySchedule::AtInstant(base_delivery_timestamp + TimeDiff::from_millis(1)) + } + } + } +} + +impl ZugTestHarnessBuilder { + fn new() -> Self { + ZugTestHarnessBuilder { + max_faulty_validators: 10, + faulty_percent: 0, + fault_type: None, + ftt: None, + consensus_values_count: 10, + delivery_distribution: Distribution::Uniform, + delivery_strategy: InstantDeliveryNoDropping, + weight_limits: (1, 100), + start_time: Timestamp::zero(), + end_height: TEST_END_HEIGHT, + weight_distribution: Distribution::Uniform, + config: Default::default(), + } + } +} + +impl ZugTestHarnessBuilder { + /// Sets a percentage of weight that will be assigned to malicious nodes. + /// `faulty_weight` must be a value between 0 (inclusive) and 33 (inclusive). + pub(crate) fn faulty_weight_perc(mut self, faulty_weight: u64) -> Self { + self.faulty_percent = faulty_weight; + self + } + + fn fault_type(mut self, fault_type: DesFault) -> Self { + self.fault_type = Some(fault_type); + self + } + + pub(crate) fn consensus_values_count(mut self, count: u8) -> Self { + assert!(count > 0); + self.consensus_values_count = count; + self + } + + pub(crate) fn weight_limits(mut self, lower: u64, upper: u64) -> Self { + assert!( + lower >= 100, + "Lower limit has to be higher than 100 to avoid rounding problems." + ); + self.weight_limits = (lower, upper); + self + } + + fn max_faulty_validators(mut self, max_faulty_count: u8) -> Self { + self.max_faulty_validators = max_faulty_count; + self + } + + fn build(self, rng: &mut NodeRng) -> Result, BuilderError> { + let consensus_values = (0..self.consensus_values_count) + .map(|el| ConsensusValue(vec![el])) + .collect::>(); + + let instance_id = TEST_INSTANCE_ID; + let start_time = self.start_time; + + let (lower, upper) = { + let (l, u) = self.weight_limits; + if l >= u { + return Err(BuilderError::WeightLimits); + } + (l, u) + }; + + let (faulty_weights, honest_weights): (Vec, Vec) = { + if self.faulty_percent == 0 { + // All validators are honest. + let validators_num = rng.gen_range(2..self.max_faulty_validators + 1); + let honest_validators: Vec = self + .weight_distribution + .gen_range_vec(rng, lower, upper, validators_num) + .into_iter() + .map(Weight) + .collect(); + + (vec![], honest_validators) + } else { + // At least 2 validators total and at least one faulty. + let faulty_num = rng.gen_range(1..self.max_faulty_validators + 1); + + // Randomly (but within chosen range) assign weights to faulty nodes. + let faulty_weights = self + .weight_distribution + .gen_range_vec(rng, lower, upper, faulty_num); + + // Assign enough weights to honest nodes so that we reach expected + // `faulty_percentage` ratio. + let honest_weights = { + let faulty_sum = faulty_weights.iter().sum::(); + let mut weights_to_distribute: u64 = + (faulty_sum * 100).div_ceil(self.faulty_percent) - faulty_sum; + let mut weights = vec![]; + while weights_to_distribute > 0 { + let weight = if weights_to_distribute < upper { + weights_to_distribute + } else { + rng.gen_range(lower..upper) + }; + weights.push(weight); + weights_to_distribute -= weight + } + weights + }; + + ( + faulty_weights.into_iter().map(Weight).collect(), + honest_weights.into_iter().map(Weight).collect(), + ) + } + }; + + let weights_sum = faulty_weights + .iter() + .chain(honest_weights.iter()) + .sum::(); + + let validators: Validators = faulty_weights + .iter() + .chain(honest_weights.iter()) + .enumerate() + .map(|(i, weight)| (ValidatorId(i as u64), *weight)) + .collect(); + + trace!("Weights: {:?}", validators.iter().collect::>()); + + let mut secrets = validators + .iter() + .map(|validator| (*validator.id(), TestSecret(validator.id().0))) + .collect(); + + let ftt = self + .ftt + .map(|p| p * weights_sum.0 / 100) + .unwrap_or_else(|| (weights_sum.0 - 1) / 3); + + let params = Params::new( + instance_id, + TEST_MIN_ROUND_LEN, + start_time, + self.end_height, + start_time, // Length depends only on block number. + ftt.into(), + ); + + // Local function creating an instance of `ZugConsensus` for a single validator. + let zug_consensus = + |(vid, secrets): (ValidatorId, &mut HashMap)| { + let v_sec = secrets.remove(&vid).expect("Secret key should exist."); + + let mut zug = Zug::new_with_params( + validators.clone(), + params.clone(), + &self.config, + None, + 0, // random seed + ); + let tmpdir = tempfile::tempdir().expect("could not create tempdir"); + let wal_file = tmpdir.path().join("wal_file.dat"); + let effects = zug.activate_validator(vid, v_sec, start_time, Some(wal_file)); + + (zug, effects.into_iter().map(ZugMessage::from).collect_vec()) + }; + + let faulty_num = faulty_weights.len(); + + let (validators, init_messages) = { + let mut validators_loc = vec![]; + let mut init_messages = vec![]; + + for validator in validators.iter() { + let vid = *validator.id(); + let fault = if vid.0 < faulty_num as u64 { + self.fault_type + } else { + None + }; + let (zug, msgs) = zug_consensus((vid, &mut secrets)); + let zug_consensus = ZugValidator::new(zug, fault); + let validator = Node::new(vid, zug_consensus); + let qm: Vec> = msgs + .into_iter() + .map(|zm| { + // These are messages crated on the start of the network. + // They are sent from validator to himself. + QueueEntry::new(start_time, vid, Message::new(vid, zm)) + }) + .collect(); + init_messages.extend(qm); + validators_loc.push(validator); + } + + (validators_loc, init_messages) + }; + + let delivery_time_strategy = self.delivery_strategy; + + let delivery_time_distribution = self.delivery_distribution; + + let vid_to_node_id: HashMap<_, _> = validators + .iter() + .map(|validator| (validator.id, NodeId::random(rng))) + .collect(); + + let node_id_to_vid: HashMap<_, _> = vid_to_node_id + .iter() + .map(|(vid, node_id)| (*node_id, *vid)) + .collect(); + + let virtual_net = VirtualNet::new(validators, init_messages); + + let zth = ZugTestHarness { + virtual_net, + consensus_values, + delivery_time_strategy, + delivery_time_distribution, + vid_to_node_id, + node_id_to_vid, + }; + + Ok(zth) + } +} + +#[derive(Clone, DataSize, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub(crate) struct TestContext; + +#[derive(Clone, DataSize, Debug, Eq, PartialEq)] +pub(crate) struct TestSecret(pub(crate) u64); + +// Newtype wrapper for test signature. +// Added so that we can use custom Debug impl. +#[derive(Clone, DataSize, Copy, Hash, PartialOrd, Ord, Eq, PartialEq, Serialize, Deserialize)] +pub(crate) struct SignatureWrapper(u64); + +impl Debug for SignatureWrapper { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{:10}", HexFmt(&self.0.to_le_bytes())) + } +} + +// Newtype wrapper for test hash. +// Added so that we can use custom Debug impl. +#[derive(Clone, Copy, DataSize, Hash, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] +pub(crate) struct HashWrapper(u64); + +impl Debug for HashWrapper { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{:10}", HexFmt(&self.0.to_le_bytes())) + } +} + +impl Display for HashWrapper { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self, f) + } +} + +impl ValidatorSecret for TestSecret { + type Hash = HashWrapper; + type Signature = SignatureWrapper; + + fn sign(&self, data: &Self::Hash) -> Self::Signature { + SignatureWrapper(data.0 + self.0) + } +} + +impl Context for TestContext { + type ConsensusValue = ConsensusValue; + type ValidatorId = ValidatorId; + type ValidatorSecret = TestSecret; + type Signature = SignatureWrapper; + type Hash = HashWrapper; + type InstanceId = u64; + + fn hash(data: &[u8]) -> Self::Hash { + let mut hasher = DefaultHasher::new(); + hasher.write(data); + HashWrapper(hasher.finish()) + } + + fn verify_signature( + hash: &Self::Hash, + public_key: &Self::ValidatorId, + signature: &::Signature, + ) -> bool { + let computed_signature = hash.0 + public_key.0; + computed_signature == signature.0 + } +} + +mod test_harness { + use std::{collections::HashSet, fmt::Debug}; + + use super::{ + crank_until, ConsensusValue, InstantDeliveryNoDropping, TestRunError, ZugTestHarness, + ZugTestHarnessBuilder, + }; + use crate::{ + components::consensus::{ + consensus_protocol::ConsensusProtocol, + tests::consensus_des_testing::{Fault as DesFault, ValidatorId}, + }, + logging, + }; + use logging::{LoggingConfig, LoggingFormat}; + + #[test] + fn on_empty_queue_error() { + let mut rng = crate::new_rng(); + let mut zug_test_harness: ZugTestHarness = + ZugTestHarnessBuilder::new() + .consensus_values_count(1) + .weight_limits(100, 120) + .build(&mut rng) + .expect("Construction was successful"); + + zug_test_harness.mutable_handle().clear_message_queue(); + + assert_eq!( + zug_test_harness.crank(&mut rng), + Err(TestRunError::NoMessages), + "Expected the test run to stop." + ); + } + + // Test that all elements of the vector all equal. + fn assert_eq_vectors(coll: Vec, error_msg: &str) { + let mut iter = coll.into_iter(); + let reference = iter.next().unwrap(); + + iter.for_each(|v| assert_eq!(v, reference, "{}", error_msg)); + } + + #[test] + fn liveness_test_no_faults() { + let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true)); + + let mut rng = crate::new_rng(); + let cv_count = 10; + + let mut zug_test_harness = ZugTestHarnessBuilder::new() + .max_faulty_validators(3) + .consensus_values_count(cv_count) + .weight_limits(100, 120) + .build(&mut rng) + .expect("Construction was successful"); + + crank_until(&mut zug_test_harness, &mut rng, |zth| { + // Stop the test when each node finalized expected number of consensus values. + // Note that we're not testing the order of finalization here. + // It will be tested later – it's not the condition for stopping the test run. + zth.virtual_net + .validators() + .all(|v| v.finalized_count() == cv_count as usize) + }) + .unwrap(); + + let handle = zug_test_harness.mutable_handle(); + let validators = handle.validators(); + + let (finalized_values, msgs_produced): (Vec>, Vec) = validators + .map(|v| { + ( + v.finalized_values().cloned().collect::>(), + v.messages_produced() + .filter(|&zm| zm.is_signed_gossip_message() || zm.is_proposal()) + .cloned() + .count(), + ) + }) + .unzip(); + + msgs_produced + .into_iter() + .enumerate() + .for_each(|(v_idx, units_count)| { + // NOTE: Works only when all validators are honest and correct (no "mute" + // validators). Validator produces two units per round. It may + // produce just one before lambda message is finalized. Add one in case it's just + // one round (one consensus value) – 1 message. 1/2=0 but 3/2=1 b/c of the rounding. + let expected_msgs = cv_count as usize * 2; + + assert_eq!( + units_count, expected_msgs, + "Expected that validator={} produced {} messages.", + v_idx, expected_msgs + ) + }); + + assert_eq_vectors( + finalized_values, + "Nodes finalized different consensus values.", + ); + } + + #[test] + fn liveness_test_some_mute() { + let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true)); + + let mut rng = crate::new_rng(); + let cv_count = 10; + let fault_perc = 30; + + let mut zug_test_harness = ZugTestHarnessBuilder::new() + .max_faulty_validators(3) + .faulty_weight_perc(fault_perc) + .fault_type(DesFault::PermanentlyMute) + .consensus_values_count(cv_count) + .weight_limits(100, 120) + .build(&mut rng) + .expect("Construction was successful"); + + crank_until(&mut zug_test_harness, &mut rng, |zth| { + // Stop the test when each node finalized expected number of consensus values. + // Note that we're not testing the order of finalization here. + // It will be tested later – it's not the condition for stopping the test run. + zth.virtual_net + .validators() + .all(|v| v.finalized_count() == cv_count as usize) + }) + .unwrap(); + + let handle = zug_test_harness.mutable_handle(); + let validators = handle.validators(); + + let finalized_values: Vec> = validators + .map(|v| v.finalized_values().cloned().collect::>()) + .collect(); + + assert_eq_vectors( + finalized_values, + "Nodes finalized different consensus values.", + ); + } + + #[test] + fn liveness_test_some_equivocate() { + let _ = logging::init_with_config(&LoggingConfig::new(LoggingFormat::Text, true, true)); + + let mut rng = crate::new_rng(); + let cv_count = 10; + let fault_perc = 10; + + let mut zug_test_harness = ZugTestHarnessBuilder::new() + .max_faulty_validators(3) + .faulty_weight_perc(fault_perc) + .fault_type(DesFault::Equivocate) + .consensus_values_count(cv_count) + .weight_limits(100, 150) + .build(&mut rng) + .expect("Construction was successful"); + + crank_until(&mut zug_test_harness, &mut rng, |zth| { + // Stop the test when each node finalized expected number of consensus values. + // Note that we're not testing the order of finalization here. + // It will be tested later – it's not the condition for stopping the test run. + zth.virtual_net + .validators() + .all(|v| v.finalized_count() == cv_count as usize) + }) + .unwrap(); + + let handle = zug_test_harness.mutable_handle(); + let validators = handle.validators(); + + let (finalized_values, equivocators_seen): ( + Vec>, + Vec>, + ) = validators + .map(|v| { + ( + v.finalized_values().cloned().collect::>(), + v.validator() + .zug() + .validators_with_evidence() + .into_iter() + .cloned() + .collect::>(), + ) + }) + .unzip(); + + assert_eq_vectors( + finalized_values, + "Nodes finalized different consensus values.", + ); + assert_eq_vectors( + equivocators_seen, + "Nodes saw different set of equivocators.", + ); + } +} diff --git a/node/src/components/consensus/protocols/zug/fault.rs b/node/src/components/consensus/protocols/zug/fault.rs new file mode 100644 index 0000000000..725b909a94 --- /dev/null +++ b/node/src/components/consensus/protocols/zug/fault.rs @@ -0,0 +1,31 @@ +use datasize::DataSize; + +use crate::components::consensus::{ + protocols::zug::{Content, SignedMessage}, + traits::Context, +}; + +/// A reason for a validator to be marked as faulty. +/// +/// The `Banned` state is fixed from the beginning and can't be replaced. However, `Indirect` can +/// be replaced with `Direct` evidence, which has the same effect but doesn't rely on information +/// from other consensus protocol instances. +#[derive(DataSize, Clone, Debug, PartialEq)] +pub(crate) enum Fault +where + C: Context, +{ + /// The validator was known to be malicious from the beginning. All their messages are + /// considered invalid in this `Zug` instance. + Banned, + /// We have direct evidence of the validator's fault: two conflicting signatures. + Direct(SignedMessage, Content, C::Signature), + /// The validator is known to be faulty, but the evidence is not in this era. + Indirect, +} + +impl Fault { + pub(super) fn is_direct(&self) -> bool { + matches!(self, Fault::Direct(..)) + } +} diff --git a/node/src/components/consensus/protocols/zug/message.rs b/node/src/components/consensus/protocols/zug/message.rs new file mode 100644 index 0000000000..9274b54d61 --- /dev/null +++ b/node/src/components/consensus/protocols/zug/message.rs @@ -0,0 +1,297 @@ +use std::{collections::BTreeMap, fmt::Debug}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use either::Either; + +use crate::{ + components::consensus::{ + protocols::zug::{Proposal, RoundId}, + traits::{ConsensusNetworkMessage, Context, ValidatorSecret}, + utils::ValidatorIndex, + }, + utils::ds, +}; + +#[allow(clippy::arithmetic_side_effects)] +mod relaxed { + // This module exists solely to exempt the `EnumDiscriminants` macro generated code from the + // module-wide `clippy::arithmetic_side_effects` lint. + + use datasize::DataSize; + use serde::{Deserialize, Serialize}; + use strum::EnumDiscriminants; + + use crate::components::consensus::{ + protocols::zug::{proposal::Proposal, RoundId}, + traits::{ConsensusNetworkMessage, Context}, + }; + + use super::{SignedMessage, SyncResponse}; + + /// The content of a message in the main protocol, as opposed to the proposal, and to sync + /// messages, which are somewhat decoupled from the rest of the protocol. These messages, + /// along with the instance and round ID, are signed by the active validators. + #[derive( + Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, DataSize, EnumDiscriminants, + )] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub(crate) enum Content + where + C: Context, + { + /// By signing the echo of a proposal hash a validator affirms that this is the first (and + /// usually only) proposal by the round leader that they have received. A quorum of echoes + /// is a requirement for a proposal to become accepted. + Echo(C::Hash), + /// By signing a `true` vote a validator confirms that they have accepted a proposal in + /// this round before the timeout. If there is a quorum of `true` votes, the + /// proposal becomes finalized, together with its ancestors. + /// + /// A `false` vote means they timed out waiting for a proposal to get accepted. A quorum of + /// `false` votes allows the next round's leader to make a proposal without waiting for + /// this round's. + Vote(bool), + } + + /// All messages of the protocol. + #[derive( + DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, EnumDiscriminants, + )] + #[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", + ))] + #[strum_discriminants(derive(strum::EnumIter))] + pub(crate) enum Message + where + C: Context, + { + /// Signatures, proposals and evidence the requester was missing. + SyncResponse(SyncResponse), + /// A proposal for a new block. This does not contain any signature; instead, the proposer + /// is expected to sign an echo with the proposal hash. Validators will drop any + /// proposal they receive unless they either have a signed echo by the proposer and + /// the proposer has not double-signed, or they have a quorum of echoes. + Proposal { + round_id: RoundId, + instance_id: C::InstanceId, + proposal: Proposal, + echo: SignedMessage, + }, + /// An echo or vote signed by an active validator. + Signed(SignedMessage), + /// Two conflicting signatures by the same validator. + Evidence(SignedMessage, Content, C::Signature), + } + + impl ConsensusNetworkMessage for Message {} +} +pub(crate) use relaxed::{Content, ContentDiscriminants, Message, MessageDiscriminants}; + +use super::registered_sync::RandomId; + +impl Content { + /// Returns whether the two contents contradict each other. A correct validator is expected to + /// never sign two contradictory contents in the same round. + pub(crate) fn contradicts(&self, other: &Content) -> bool { + match (self, other) { + (Content::Vote(vote0), Content::Vote(vote1)) => vote0 != vote1, + (Content::Echo(hash0), Content::Echo(hash1)) => hash0 != hash1, + _ => false, + } + } +} + +// This has to be implemented manually because of the generic parameter, which isn't +// necessarily `Copy` and that breaks the derive. +impl Copy for Content {} + +/// A vote or echo with a signature. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash, DataSize)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) struct SignedMessage +where + C: Context, +{ + pub(super) round_id: RoundId, + pub(super) instance_id: C::InstanceId, + pub(super) content: Content, + pub(super) validator_idx: ValidatorIndex, + pub(super) signature: C::Signature, +} + +impl SignedMessage { + /// Creates a new signed message with a valid signature. + pub(crate) fn sign_new( + round_id: RoundId, + instance_id: C::InstanceId, + content: Content, + validator_idx: ValidatorIndex, + secret: &C::ValidatorSecret, + ) -> SignedMessage { + let hash = Self::hash_fields(round_id, &instance_id, &content, validator_idx); + SignedMessage { + round_id, + instance_id, + content, + validator_idx, + signature: secret.sign(&hash), + } + } + + /// Creates a new signed message with the alternative content and signature. + pub(crate) fn with(&self, content: Content, signature: C::Signature) -> SignedMessage { + SignedMessage { + content, + signature, + ..*self + } + } + + /// Returns whether the signature is valid. + pub(crate) fn verify_signature(&self, validator_id: &C::ValidatorId) -> bool { + let hash = Self::hash_fields( + self.round_id, + &self.instance_id, + &self.content, + self.validator_idx, + ); + C::verify_signature(&hash, validator_id, &self.signature) + } + + /// Returns the hash of all fields except the signature. + fn hash_fields( + round_id: RoundId, + instance_id: &C::InstanceId, + content: &Content, + validator_idx: ValidatorIndex, + ) -> C::Hash { + let serialized_fields = + bincode::serialize(&(round_id, instance_id, content, validator_idx)) + .expect("failed to serialize fields"); + ::hash(&serialized_fields) + } +} + +/// Partial information about the sender's protocol state. The receiver should send missing data. +/// +/// The sender chooses a random peer and a random era, and includes in its `SyncRequest` message +/// information about received proposals, echoes and votes. The idea is to set the `i`-th bit +/// in the `u128` fields to `1` if we have a signature from the `i`-th validator. +/// +/// To keep the size of these messages constant even if there are more than 128 validators, a +/// random interval is selected and only information about validators in that interval is +/// included: The bit with the lowest significance corresponds to validator number +/// `first_validator_idx`, and the one with the highest to +/// `(first_validator_idx + 127) % validator_count`. +/// +/// For example if there are 500 validators and `first_validator_idx` is 450, the `u128`'s bits +/// refer to validators 450, 451, ..., 499, 0, 1, ..., 77. +#[derive(DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) struct SyncRequest +where + C: Context, +{ + /// The round the information refers to. + pub(crate) round_id: RoundId, + /// The proposal hash with the most echoes (by weight). + pub(crate) proposal_hash: Option, + /// Whether the sender has the proposal with that hash. + pub(crate) has_proposal: bool, + /// The index of the first validator covered by the bit fields below. + pub(crate) first_validator_idx: ValidatorIndex, + /// A bit field with 1 for every validator the sender has an echo from. + pub(crate) echoes: u128, + /// A bit field with 1 for every validator the sender has a `true` vote from. + pub(crate) true_votes: u128, + /// A bit field with 1 for every validator the sender has a `false` vote from. + pub(crate) false_votes: u128, + /// A bit field with 1 for every validator the sender has any signed message from. + pub(crate) active: u128, + /// A bit field with 1 for every validator the sender has evidence against. + pub(crate) faulty: u128, + pub(crate) instance_id: C::InstanceId, + pub(crate) sync_id: RandomId, +} + +impl ConsensusNetworkMessage for SyncRequest {} + +impl SyncRequest { + /// Creates a `SyncRequest` for a round in which we haven't received any messages yet. + pub(super) fn new_empty_round( + round_id: RoundId, + first_validator_idx: ValidatorIndex, + faulty: u128, + active: u128, + instance_id: C::InstanceId, + sync_id: RandomId, + ) -> Self { + SyncRequest { + round_id, + proposal_hash: None, + has_proposal: false, + first_validator_idx, + echoes: 0, + true_votes: 0, + false_votes: 0, + active, + faulty, + instance_id, + sync_id, + } + } +} + +/// The response to a `SyncRequest`, containing proposals, signatures and evidence the requester is +/// missing. +#[derive(DataSize, Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) struct SyncResponse +where + C: Context, +{ + /// The round the information refers to. + pub(crate) round_id: RoundId, + /// The proposal in this round, or its hash. + #[data_size(with = ds::maybe_either)] + pub(crate) proposal_or_hash: Option, C::Hash>>, + /// Echo signatures the requester is missing. + pub(crate) echo_sigs: BTreeMap, + /// Vote signatures for `true` the requester is missing. + pub(crate) true_vote_sigs: BTreeMap, + /// Vote signatures for `false` the requester is missing. + pub(crate) false_vote_sigs: BTreeMap, + /// Signed messages that prove that a validator was active. + pub(crate) signed_messages: Vec>, + /// Evidence against faulty validators. + pub(crate) evidence: Vec<(SignedMessage, Content, C::Signature)>, + pub(crate) instance_id: C::InstanceId, + pub(crate) sync_id: RandomId, +} + +impl Message { + pub(super) fn instance_id(&self) -> &C::InstanceId { + match self { + Message::SyncResponse(SyncResponse { instance_id, .. }) + | Message::Signed(SignedMessage { instance_id, .. }) + | Message::Proposal { instance_id, .. } + | Message::Evidence(SignedMessage { instance_id, .. }, ..) => instance_id, + } + } +} diff --git a/node/src/components/consensus/protocols/zug/params.rs b/node/src/components/consensus/protocols/zug/params.rs new file mode 100644 index 0000000000..f96d70368f --- /dev/null +++ b/node/src/components/consensus/protocols/zug/params.rs @@ -0,0 +1,71 @@ +use datasize::DataSize; +use serde::Serialize; + +use casper_types::{TimeDiff, Timestamp}; + +use crate::components::consensus::{traits::Context, utils::Weight}; + +/// Protocol parameters for `Zug`. +#[derive(Debug, DataSize, Clone, Serialize)] +pub(crate) struct Params +where + C: Context, +{ + instance_id: C::InstanceId, + min_block_time: TimeDiff, + start_timestamp: Timestamp, + end_height: u64, + end_timestamp: Timestamp, + ftt: Weight, +} + +impl Params { + /// Creates a new set of `Zug` protocol parameters. + pub(crate) fn new( + instance_id: C::InstanceId, + min_block_time: TimeDiff, + start_timestamp: Timestamp, + end_height: u64, + end_timestamp: Timestamp, + ftt: Weight, + ) -> Params { + Params { + instance_id, + min_block_time, + start_timestamp, + end_height, + end_timestamp, + ftt, + } + } + + /// Returns the unique identifier for this protocol instance. + pub(crate) fn instance_id(&self) -> &C::InstanceId { + &self.instance_id + } + + /// Returns the minimum difference between a block's and its child's timestamp. + pub(crate) fn min_block_time(&self) -> TimeDiff { + self.min_block_time + } + + /// Returns the start timestamp of the era. + pub(crate) fn start_timestamp(&self) -> Timestamp { + self.start_timestamp + } + + /// Returns the minimum height of the last block. + pub(crate) fn end_height(&self) -> u64 { + self.end_height + } + + /// Returns the minimum timestamp of the last block. + pub(crate) fn end_timestamp(&self) -> Timestamp { + self.end_timestamp + } + + /// The threshold weight above which we are not fault tolerant any longer. + pub(crate) fn ftt(&self) -> Weight { + self.ftt + } +} diff --git a/node/src/components/consensus/protocols/zug/participation.rs b/node/src/components/consensus/protocols/zug/participation.rs new file mode 100644 index 0000000000..f91435a17c --- /dev/null +++ b/node/src/components/consensus/protocols/zug/participation.rs @@ -0,0 +1,62 @@ +use std::fmt::Debug; + +use crate::components::consensus::{ + protocols::zug::{Fault, RoundId, Zug}, + traits::Context, + utils::ValidatorIndex, +}; + +/// A map of status (faulty, inactive) by validator ID. +#[derive(Debug)] +// False positive, as the fields of this struct are all used in logging validator participation. +#[allow(dead_code)] +pub(super) struct Participation +where + C: Context, +{ + pub(super) instance_id: C::InstanceId, + pub(super) faulty_stake_percent: u8, + pub(super) inactive_stake_percent: u8, + pub(super) inactive_validators: Vec<(ValidatorIndex, C::ValidatorId, ParticipationStatus)>, + pub(super) faulty_validators: Vec<(ValidatorIndex, C::ValidatorId, ParticipationStatus)>, +} + +/// A validator's participation status: whether they are faulty or inactive. +#[derive(Copy, Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] +pub(super) enum ParticipationStatus { + LastSeenInRound(RoundId), + Inactive, + EquivocatedInOtherEra, + Equivocated, +} + +impl ParticipationStatus { + /// Returns a `Status` for a validator unless they are honest and online. + pub(super) fn for_index( + idx: ValidatorIndex, + zug: &Zug, + ) -> Option { + if let Some(fault) = zug.faults.get(&idx) { + return Some(match fault { + Fault::Banned | Fault::Indirect => ParticipationStatus::EquivocatedInOtherEra, + Fault::Direct(..) => ParticipationStatus::Equivocated, + }); + } + + let last_seen_round = zug + .active + .get(idx) + .and_then(Option::as_ref) + .map(|signed_msg| signed_msg.round_id); + match last_seen_round { + // not seen at all + None => Some(ParticipationStatus::Inactive), + // seen, but not within last 2 rounds + Some(r_id) if r_id.saturating_add(2) < zug.current_round => { + Some(ParticipationStatus::LastSeenInRound(r_id)) + } + // seen recently + _ => None, + } + } +} diff --git a/node/src/components/consensus/protocols/zug/proposal.rs b/node/src/components/consensus/protocols/zug/proposal.rs new file mode 100644 index 0000000000..f5f8d0b909 --- /dev/null +++ b/node/src/components/consensus/protocols/zug/proposal.rs @@ -0,0 +1,129 @@ +use std::{collections::BTreeSet, fmt}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::Timestamp; + +use crate::components::consensus::{ + consensus_protocol::ProposedBlock, protocols::zug::RoundId, traits::Context, + utils::ValidatorIndex, +}; + +/// A proposal in the consensus protocol. +#[derive(Clone, Hash, Serialize, Deserialize, Debug, PartialEq, Eq, DataSize)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) struct Proposal +where + C: Context, +{ + /// The timestamp when the proposal was created. If finalized, this will be the block's + /// timestamp. + pub(super) timestamp: Timestamp, + /// The proposed block. This must be `None` after the switch block. + pub(super) maybe_block: Option, + /// The parent round. This is `None` if the proposed block has no parent in this era. + pub(super) maybe_parent_round_id: Option, + /// The set of validators that appear to be inactive in this era. + /// This is `None` in round 0 and in dummy blocks. + pub(super) inactive: Option>, +} + +impl Proposal { + /// Creates a new proposal with no block. This must be used if an ancestor would be the + /// switch block, since no blocks can come after the switch block. + pub(super) fn dummy(timestamp: Timestamp, parent_round_id: RoundId) -> Self { + Proposal { + timestamp, + maybe_block: None, + maybe_parent_round_id: Some(parent_round_id), + inactive: None, + } + } + + /// Creates a new proposal with the given block and parent round. If the parent round is none + /// it is proposed as the first block in this era. + pub(super) fn with_block( + proposed_block: &ProposedBlock, + maybe_parent_round_id: Option, + inactive: impl Iterator, + ) -> Self { + Proposal { + maybe_block: Some(proposed_block.value().clone()), + timestamp: proposed_block.context().timestamp(), + maybe_parent_round_id, + inactive: maybe_parent_round_id.map(|_| inactive.collect()), + } + } + + /// Returns the proposal hash. + #[cfg(test)] // Only used in tests; in production use HashedProposal below. + pub(super) fn hash(&self) -> C::Hash { + let serialized = bincode::serialize(&self).expect("failed to serialize fields"); + ::hash(&serialized) + } +} + +/// A proposal with its memoized hash. +#[derive(Clone, Hash, Debug, PartialEq, Eq, DataSize)] +pub(crate) struct HashedProposal +where + C: Context, +{ + hash: C::Hash, + proposal: Proposal, +} + +impl HashedProposal { + pub(crate) fn new(proposal: Proposal) -> Self { + let serialized = bincode::serialize(&proposal).expect("failed to serialize fields"); + let hash = ::hash(&serialized); + HashedProposal { hash, proposal } + } + + pub(crate) fn hash(&self) -> &C::Hash { + &self.hash + } + + pub(crate) fn inner(&self) -> &Proposal { + &self.proposal + } + + pub(crate) fn into_inner(self) -> Proposal { + self.proposal + } + + pub(crate) fn maybe_block(&self) -> Option<&C::ConsensusValue> { + self.proposal.maybe_block.as_ref() + } + + pub(crate) fn timestamp(&self) -> Timestamp { + self.proposal.timestamp + } + + pub(crate) fn inactive(&self) -> Option<&BTreeSet> { + self.proposal.inactive.as_ref() + } + + pub(crate) fn maybe_parent_round_id(&self) -> Option { + self.proposal.maybe_parent_round_id + } +} + +impl fmt::Display for Proposal { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.maybe_block { + None => write!(f, "dummy proposal at {}", self.timestamp), + Some(block) => write!(f, "proposal at {}: {}", self.timestamp, block), + } + } +} + +impl fmt::Display for HashedProposal { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}, hash {}", self.proposal, self.hash) + } +} diff --git a/node/src/components/consensus/protocols/zug/round.rs b/node/src/components/consensus/protocols/zug/round.rs new file mode 100644 index 0000000000..b0d8eacf9b --- /dev/null +++ b/node/src/components/consensus/protocols/zug/round.rs @@ -0,0 +1,239 @@ +use std::{ + collections::{BTreeMap, HashMap}, + fmt::Debug, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + components::consensus::{ + protocols::zug::{Content, HashedProposal}, + traits::Context, + utils::{ValidatorIndex, ValidatorMap}, + }, + utils::ds, +}; + +/// The protocol proceeds in rounds, for each of which we must +/// keep track of proposals, echoes, votes, and the current outcome +/// of the round. +#[derive(Debug, DataSize, PartialEq)] +pub(crate) struct Round +where + C: Context, +{ + /// The leader, who is allowed to create a proposal in this round. + leader_idx: ValidatorIndex, + /// The unique proposal signed by the leader, or the unique proposal with a quorum of echoes. + proposal: Option>, + /// The echoes we've received for each proposal so far. + #[data_size(with = ds::hashmap_sample)] + echoes: HashMap>, + /// The votes we've received for this round so far. + votes: BTreeMap>>, + /// The memoized results in this round. + outcome: RoundOutcome, +} + +impl Round { + /// Creates a new [`Round`] with no proposals, echoes, votes, and empty + /// round outcome. + pub(super) fn new(validator_count: usize, leader_idx: ValidatorIndex) -> Round { + let mut votes = BTreeMap::new(); + votes.insert(false, vec![None; validator_count].into()); + votes.insert(true, vec![None; validator_count].into()); + Round { + leader_idx, + proposal: None, + echoes: HashMap::new(), + votes, + outcome: RoundOutcome::default(), + } + } + + /// Returns the map of all proposals sent to us this round from the leader + pub(super) fn proposal(&self) -> Option<&HashedProposal> { + self.proposal.as_ref() + } + + /// Returns whether we have received at least one proposal. + pub(super) fn has_proposal(&self) -> bool { + self.proposal.is_some() + } + + /// Returns whether this proposal is justified by an echo signature from the round leader or by + /// a quorum of echoes. + pub(super) fn has_echoes_for_proposal(&self, hash: &C::Hash) -> bool { + match (self.quorum_echoes(), self.echoes.get(hash)) { + (Some(quorum_hash), _) => quorum_hash == *hash, + (None, Some(echo_map)) => echo_map.contains_key(&self.leader_idx), + (None, None) => false, + } + } + + /// Inserts a `Proposal` and returns `false` if we already had it or it cannot be added due to + /// missing echoes. + pub(super) fn insert_proposal(&mut self, proposal: HashedProposal) -> bool { + let hash = proposal.hash(); + if self.has_echoes_for_proposal(hash) && self.proposal.as_ref() != Some(&proposal) { + self.proposal = Some(proposal); + true + } else { + false + } + } + + /// Returns the echoes we've received for each proposal so far. + pub(super) fn echoes(&self) -> &HashMap> { + &self.echoes + } + + /// Inserts an `Echo`; returns `false` if we already had it. + pub(super) fn insert_echo( + &mut self, + hash: C::Hash, + validator_idx: ValidatorIndex, + signature: C::Signature, + ) -> bool { + self.echoes + .entry(hash) + .or_default() + .insert(validator_idx, signature) + .is_none() + } + + /// Returns whether the validator has already sent an `Echo` in this round. + pub(super) fn has_echoed(&self, validator_idx: ValidatorIndex) -> bool { + self.echoes + .values() + .any(|echo_map| echo_map.contains_key(&validator_idx)) + } + + /// Stores in the outcome that we have a quorum of echoes for this hash. + pub(super) fn set_quorum_echoes(&mut self, hash: C::Hash) { + self.outcome.quorum_echoes = Some(hash); + if self + .proposal + .as_ref() + .is_some_and(|proposal| *proposal.hash() != hash) + { + self.proposal = None; + } + } + + /// Returns the hash for which we have a quorum of echoes, if any. + pub(super) fn quorum_echoes(&self) -> Option { + self.outcome.quorum_echoes + } + + /// Returns the votes we've received for this round so far. + pub(super) fn votes(&self, vote: bool) -> &ValidatorMap> { + &self.votes[&vote] + } + + /// Inserts a `Vote`; returns `false` if we already had it. + pub(super) fn insert_vote( + &mut self, + vote: bool, + validator_idx: ValidatorIndex, + signature: C::Signature, + ) -> bool { + // Safe to unwrap: Both `true` and `false` entries were created in `new`. + let votes_map = self.votes.get_mut(&vote).unwrap(); + if votes_map[validator_idx].is_none() { + votes_map[validator_idx] = Some(signature); + true + } else { + false + } + } + + /// Returns whether the validator has already cast a `true` or `false` vote. + pub(super) fn has_voted(&self, validator_idx: ValidatorIndex) -> bool { + self.votes(true)[validator_idx].is_some() || self.votes(false)[validator_idx].is_some() + } + + /// Stores in the outcome that we have a quorum of votes for this value. + pub(super) fn set_quorum_votes(&mut self, vote: bool) { + self.outcome.quorum_votes = Some(vote); + } + + /// Returns the value for which we have a quorum of votes, if any. + pub(super) fn quorum_votes(&self) -> Option { + self.outcome.quorum_votes + } + + /// Removes all votes and echoes from the given validator. + pub(super) fn remove_votes_and_echoes(&mut self, validator_idx: ValidatorIndex) { + self.votes.get_mut(&false).unwrap()[validator_idx] = None; + self.votes.get_mut(&true).unwrap()[validator_idx] = None; + self.echoes.retain(|_, echo_map| { + echo_map.remove(&validator_idx); + !echo_map.is_empty() + }); + } + + /// Updates the outcome and marks the proposal that has a quorum of echoes as accepted. It also + /// stores the proposal's block height. + pub(super) fn set_accepted_proposal_height(&mut self, height: u64) { + self.outcome.accepted_proposal_height = Some(height); + } + + /// Returns the accepted proposal, if any, together with its height. + pub(super) fn accepted_proposal(&self) -> Option<(u64, &HashedProposal)> { + let height = self.outcome.accepted_proposal_height?; + let proposal = self.proposal.as_ref()?; + Some((height, proposal)) + } + + /// Check if the round has already received this message. + pub(super) fn contains(&self, content: &Content, validator_idx: ValidatorIndex) -> bool { + match content { + Content::Echo(hash) => self + .echoes + .get(hash) + .is_some_and(|echo_map| echo_map.contains_key(&validator_idx)), + Content::Vote(vote) => self.votes[vote][validator_idx].is_some(), + } + } + + /// Removes the proposal: This round was skipped and will never become finalized. + pub(super) fn prune_skipped(&mut self) { + self.proposal = None; + self.outcome.accepted_proposal_height = None; + } + + /// Returns the validator index of this round's leader. + pub(super) fn leader(&self) -> ValidatorIndex { + self.leader_idx + } +} + +/// Indicates the outcome of a given round. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, DataSize)] +#[serde(bound( + serialize = "C::Hash: Serialize", + deserialize = "C::Hash: Deserialize<'de>", +))] +pub(crate) struct RoundOutcome +where + C: Context, +{ + /// This is `Some(h)` if there is an accepted proposal with relative height `h`, i.e. there is + /// a quorum of echoes, `h` accepted ancestors, and all rounds since the parent's are + /// skippable. + accepted_proposal_height: Option, + quorum_echoes: Option, + quorum_votes: Option, +} + +impl Default for RoundOutcome { + fn default() -> RoundOutcome { + RoundOutcome { + accepted_proposal_height: None, + quorum_echoes: None, + quorum_votes: None, + } + } +} diff --git a/node/src/components/consensus/protocols/zug/tests.rs b/node/src/components/consensus/protocols/zug/tests.rs new file mode 100644 index 0000000000..1fd2c9470c --- /dev/null +++ b/node/src/components/consensus/protocols/zug/tests.rs @@ -0,0 +1,1053 @@ +use super::{registered_sync::RandomId, *}; + +use std::{collections::BTreeSet, sync::Arc}; + +use casper_types::{PublicKey, SecretKey, Timestamp, U512}; +use tempfile::tempdir; +use tracing::info; + +use crate::{ + components::consensus::{ + cl_context::{ClContext, Keypair}, + config::Config, + consensus_protocol::{ConsensusProtocol, ProtocolOutcome}, + leader_sequence, + protocols::common, + tests::utils::{ + new_test_chainspec, ALICE_NODE_ID, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, + BOB_SECRET_KEY, CAROL_PUBLIC_KEY, CAROL_SECRET_KEY, + }, + traits::Context, + }, + testing, + types::BlockPayload, +}; + +const INSTANCE_ID_DATA: &[u8; 1] = &[123u8; 1]; + +/// Creates a new `Zug` instance. +/// +/// The random seed is selected so that the leader sequence starts with `seq`. +pub(crate) fn new_test_zug( + weights: I1, + init_faulty: I2, + seq: &[ValidatorIndex], +) -> Zug +where + I1: IntoIterator, + I2: IntoIterator, + T: Into, +{ + let weights = weights + .into_iter() + .map(|(pk, w)| (pk, w.into())) + .collect::>(); + let mut chainspec = new_test_chainspec(weights.clone()); + chainspec.core_config.minimum_era_height = 3; + let config = Config::default(); + let validators = common::validators::( + &Default::default(), + &Default::default(), + weights.iter().cloned().collect(), + ); + let weights_vmap = common::validator_weights::(&validators); + let leaders = weights.iter().map(|_| true).collect(); + let seed = leader_sequence::find_seed(seq, &weights_vmap, &leaders); + // Timestamp of the genesis era start and test start. + let start_timestamp: Timestamp = 0.into(); + Zug::::new( + ClContext::hash(INSTANCE_ID_DATA), + weights.into_iter().collect(), + &init_faulty.into_iter().collect(), + &None.into_iter().collect(), + &chainspec, + &config, + None, + start_timestamp, + seed, + ) +} + +/// Creates a `signed_message` +fn create_signed_message( + validators: &Validators, + round_id: RoundId, + content: Content, + keypair: &Keypair, +) -> SignedMessage { + let validator_idx = validators.get_index(keypair.public_key()).unwrap(); + let instance_id = ClContext::hash(INSTANCE_ID_DATA); + SignedMessage::sign_new(round_id, instance_id, content, validator_idx, keypair) +} + +/// Creates a `Message::Signed`. +fn create_message( + validators: &Validators, + round_id: RoundId, + content: Content, + keypair: &Keypair, +) -> SerializedMessage { + let signed_msg = create_signed_message(validators, round_id, content, keypair); + SerializedMessage::from_message(&Message::Signed(signed_msg)) +} + +/// Creates a `Message::Proposal` +fn create_proposal_message( + round_id: RoundId, + proposal: &Proposal, + validators: &Validators, + keypair: &Keypair, +) -> SerializedMessage { + let hashed_proposal = HashedProposal::new(proposal.clone()); + let echo_content = Content::Echo(*hashed_proposal.hash()); + let echo = create_signed_message(validators, round_id, echo_content, keypair); + SerializedMessage::from_message(&Message::Proposal { + round_id, + instance_id: ClContext::hash(INSTANCE_ID_DATA), + proposal: proposal.clone(), + echo, + }) +} + +/// Removes all `CreatedGossipMessage`s from `outcomes` and returns the messages, after +/// verifying the signatures and instance ID. +fn remove_gossip( + validators: &Validators, + outcomes: &mut ProtocolOutcomes, +) -> Vec> { + let mut result = Vec::new(); + let expected_instance_id = ClContext::hash(INSTANCE_ID_DATA); + outcomes.retain(|outcome| { + let msg = match outcome { + ProtocolOutcome::CreatedGossipMessage(serialized_msg) => { + serialized_msg.deserialize_expect::>() + } + _ => return true, + }; + assert_eq!(*msg.instance_id(), expected_instance_id); + if let Message::Signed(ref signed_msg) = msg { + let public_key = validators + .id(signed_msg.validator_idx) + .expect("validator ID") + .clone(); + assert!(signed_msg.verify_signature(&public_key)); + } + result.push(msg); + false + }); + result +} + +/// Removes the expected signed message; returns `true` if found. +fn remove_signed( + gossip: &mut Vec>, + expected_round_id: RoundId, + expected_validator_idx: ValidatorIndex, + expected_content: Content, +) -> bool { + let maybe_pos = gossip.iter().position(|message| { + if let Message::Signed(SignedMessage { + round_id, + instance_id: _, + content, + validator_idx, + signature: _, + }) = &message + { + *round_id == expected_round_id + && *validator_idx == expected_validator_idx + && *content == expected_content + } else { + false + } + }); + if let Some(pos) = maybe_pos { + gossip.remove(pos); + true + } else { + false + } +} + +/// Removes the expected proposal message; returns `true` if found. +fn remove_proposal( + gossip: &mut Vec>, + expected_round_id: RoundId, + expected_proposal: &Proposal, +) -> bool { + let maybe_pos = gossip.iter().position(|message| { + if let Message::Proposal { + round_id, + instance_id: _, + proposal, + echo: _, + } = &message + { + *round_id == expected_round_id && proposal == expected_proposal + } else { + false + } + }); + if let Some(pos) = maybe_pos { + gossip.remove(pos); + true + } else { + false + } +} + +/// Removes all `CreatedRequestToRandomPeer`s from `outcomes` and returns the deserialized messages. +fn remove_requests_to_random( + outcomes: &mut ProtocolOutcomes, +) -> Vec> { + let mut result = Vec::new(); + let expected_instance_id = ClContext::hash(INSTANCE_ID_DATA); + outcomes.retain(|outcome| { + let msg: SyncRequest = match outcome { + ProtocolOutcome::CreatedRequestToRandomPeer(msg) => msg.deserialize_expect(), + _ => return true, + }; + assert_eq!(msg.instance_id, expected_instance_id); + result.push(msg); + false + }); + result +} + +/// Removes all `CreatedTargetedMessage`s from `outcomes` and returns the content of +/// all `Message::Signed`, after verifying the signatures. +fn remove_targeted_messages( + validators: &Validators, + expected_peer: NodeId, + outcomes: &mut ProtocolOutcomes, +) -> Vec> { + let mut result = Vec::new(); + let expected_instance_id = ClContext::hash(INSTANCE_ID_DATA); + outcomes.retain(|outcome| { + let (msg, peer) = match outcome { + ProtocolOutcome::CreatedTargetedMessage(serialized_message, peer) => ( + serialized_message.deserialize_expect::>(), + *peer, + ), + _ => return true, + }; + if peer != expected_peer { + return true; + } + assert_eq!(*msg.instance_id(), expected_instance_id); + if let Message::Signed(ref signed_msg) = msg { + let public_key = validators + .id(signed_msg.validator_idx) + .expect("validator ID") + .clone(); + assert!(signed_msg.verify_signature(&public_key)); + } + result.push(msg); + false + }); + result +} + +/// Expects exactly one `CreateNewBlock` in `outcomes`, removes and returns it. +fn remove_create_new_block(outcomes: &mut ProtocolOutcomes) -> BlockContext { + let mut result = None; + outcomes.retain(|outcome| match outcome { + ProtocolOutcome::CreateNewBlock(block_context, _) => { + if let Some(other_context) = result.replace(block_context.clone()) { + panic!( + "got multiple CreateNewBlock outcomes: {:?}, {:?}", + other_context, block_context + ); + } + false + } + _ => true, + }); + result.expect("missing CreateNewBlock outcome") +} + +/// Checks that the `proposals` match the `FinalizedBlock` outcomes. +fn expect_finalized( + outcomes: &ProtocolOutcomes, + proposals: &[(&Proposal, u64)], +) { + let mut proposals_iter = proposals.iter(); + for outcome in outcomes { + if let ProtocolOutcome::FinalizedBlock(fb) = outcome { + if let Some(&(proposal, rel_height)) = proposals_iter.next() { + assert_eq!(fb.relative_height, rel_height); + assert_eq!(fb.timestamp, proposal.timestamp); + assert_eq!(Some(&fb.value), proposal.maybe_block.as_ref()); + } else { + panic!("unexpected finalized block {:?}", fb); + } + } + } + assert_eq!(None, proposals_iter.next(), "missing finalized proposal"); +} + +/// Checks that `outcomes` contains no `FinalizedBlock`, `CreateNewBlock` or `CreatedGossipMessage`. +fn expect_no_gossip_block_finalized(outcomes: ProtocolOutcomes) { + for outcome in outcomes { + match outcome { + ProtocolOutcome::FinalizedBlock(fb) => panic!("unexpected finalized block: {:?}", fb), + ProtocolOutcome::CreatedGossipMessage(msg) => { + panic!("unexpected gossip message {:?}", msg); + } + ProtocolOutcome::CreateNewBlock(block_context, expiry) => { + panic!( + "unexpected CreateNewBlock: {:?} exp. {}", + block_context, expiry + ); + } + _ => {} + } + } +} + +/// Checks that the expected timer was requested by the protocol. +fn expect_timer(outcomes: &ProtocolOutcomes, timestamp: Timestamp, timer_id: TimerId) { + assert!( + outcomes.contains(&ProtocolOutcome::ScheduleTimer(timestamp, timer_id)), + "missing timer {} for {:?} from {:?}", + timer_id.0, + timestamp, + outcomes + ); +} + +/// Creates a new payload with the given random bit and no deploys or transfers. +fn new_payload(random_bit: bool) -> Arc { + Arc::new(BlockPayload::new( + BTreeMap::new(), + vec![], + Default::default(), + random_bit, + 1u8, + )) +} + +fn vote(v: bool) -> Content { + Content::Vote(v) +} + +fn echo(hash: ::Hash) -> Content { + Content::Echo(hash) +} + +fn abc_weights( + alice_w: u64, + bob_w: u64, + carol_w: u64, +) -> (Vec<(PublicKey, U512)>, Validators) { + let weights: Vec<(PublicKey, U512)> = vec![ + (ALICE_PUBLIC_KEY.clone(), U512::from(alice_w)), + (BOB_PUBLIC_KEY.clone(), U512::from(bob_w)), + (CAROL_PUBLIC_KEY.clone(), U512::from(carol_w)), + ]; + let validators = common::validators::( + &Default::default(), + &Default::default(), + weights.iter().cloned().collect(), + ); + (weights, validators) +} + +/// Tests the core logic of the consensus protocol, i.e. the criteria for sending votes and echoes +/// and finalizing blocks. +/// +/// In this scenario Alice has 60%, Bob 30% and Carol 10% of the weight, and we create Carol's +/// consensus instance. Bob makes a proposal in round 0. Alice doesn't see it and makes a proposal +/// without a parent (skipping round 0) in round 1, and proposes a child of that one in round 2. +/// +/// The fork is resolved in Alice's favor: Round 0 becomes skippable and round 2 committed, so +/// Alice's two blocks become finalized. +#[test] +fn zug_no_fault() { + testing::init_logging(); + let mut rng = crate::new_rng(); + let (weights, validators) = abc_weights(60, 30, 10); + let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap(); + let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap(); + let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap(); + let sender = *ALICE_NODE_ID; + + let mut timestamp = Timestamp::from(100000); + + // The first round leaders are Bob, Alice, Alice, Carol, Carol. + let leader_seq = &[bob_idx, alice_idx, alice_idx, carol_idx, carol_idx]; + let mut sc_c = new_test_zug(weights.clone(), vec![], leader_seq); + let dir = tempdir().unwrap(); + sc_c.open_wal(dir.path().join("wal"), timestamp); + + let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone()); + let bob_kp = Keypair::from(BOB_SECRET_KEY.clone()); + let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone()); + + sc_c.activate_validator(CAROL_PUBLIC_KEY.clone(), carol_kp, Timestamp::now(), None); + + let block_time = sc_c.params.min_block_time(); + let proposal_timeout = sc_c.proposal_timeout(); + + let proposal0 = Proposal:: { + timestamp, + maybe_block: Some(new_payload(false)), + maybe_parent_round_id: None, + inactive: None, + }; + let hash0 = proposal0.hash(); + + let proposal1 = Proposal { + timestamp: proposal0.timestamp + block_time, + maybe_block: Some(new_payload(true)), + maybe_parent_round_id: None, + inactive: None, + }; + let hash1 = proposal1.hash(); + + let proposal2 = Proposal { + timestamp: proposal1.timestamp + block_time, + maybe_block: Some(new_payload(true)), + maybe_parent_round_id: Some(1), + inactive: Some(Default::default()), + }; + let hash2 = proposal2.hash(); + + let proposal3 = Proposal { + timestamp: proposal2.timestamp + block_time, + maybe_block: Some(new_payload(false)), + maybe_parent_round_id: Some(2), + inactive: Some(Default::default()), + }; + let hash3 = proposal3.hash(); + + let proposal4 = Proposal:: { + timestamp: proposal3.timestamp + block_time, + maybe_block: None, + maybe_parent_round_id: Some(3), + inactive: None, + }; + + // Carol's node joins a bit late, and gets some messages out of order. + timestamp += block_time; + + // Alice makes a proposal in round 2 with parent in round 1. Alice and Bob echo it. + let msg = create_proposal_message(2, &proposal2, &validators, &alice_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_message(&validators, 2, echo(hash2), &bob_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + + // Alice and Bob even vote for it, so the round is committed! + // But without an accepted parent it isn't finalized yet. + let msg = create_message(&validators, 2, vote(true), &alice_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_message(&validators, 2, vote(true), &bob_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + + // Alice makes a proposal in round 1 with no parent, and echoes it. + let msg = create_proposal_message(1, &proposal1, &validators, &alice_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + + // Now Carol receives Bob's proposal in round 0. Carol echoes it. + let msg = create_proposal_message(0, &proposal0, &validators, &bob_kp); + let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_signed(&mut gossip, 0, carol_idx, echo(hash0))); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + expect_no_gossip_block_finalized(outcomes); + + timestamp += block_time; + + // The first proposal message Carol received had a timestamp in the future, so she didn't store + // the proposal. Re-send it to her so that she has a chance to store it now. + let msg = create_proposal_message(2, &proposal2, &validators, &alice_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + + // On timeout, Carol votes to make round 0 skippable. + let mut outcomes = sc_c.handle_timer(timestamp, timestamp, TIMER_ID_UPDATE, &mut rng); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_signed(&mut gossip, 0, carol_idx, vote(false))); + expect_no_gossip_block_finalized(outcomes); + + // Alice also echoes Bob's round 0 proposal, so it has a quorum and is accepted. With that round + // 1 becomes current and Carol echoes Alice's proposal. That makes a quorum, but since round + // 0 is not skippable round 1 is not yet accepted and thus round 2 is not yet current. + let msg = create_message(&validators, 0, echo(hash0), &alice_kp); + let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_signed(&mut gossip, 1, carol_idx, echo(hash1))); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + let timeout = timestamp + sc_c.proposal_timeout(); + expect_timer(&outcomes, timeout, TIMER_ID_UPDATE); + + // Bob votes false in round 0. That's not a quorum yet. + let msg = create_message(&validators, 0, vote(false), &bob_kp); + expect_no_gossip_block_finalized(sc_c.handle_message(&mut rng, sender, msg, timestamp)); + + // On timeout, Carol votes to make round 1 skippable. + // TODO: Come up with a better test scenario where timestamps are in order. + let mut outcomes = sc_c.handle_timer( + timestamp + proposal_timeout * 2, + timestamp + proposal_timeout * 2, + TIMER_ID_UPDATE, + &mut rng, + ); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_signed(&mut gossip, 1, carol_idx, vote(false))); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + + // But with Alice's vote round 0 becomes skippable. That means rounds 1 and 2 are now accepted + // and Carol votes for them. Since round 2 is already committed, both 1 and 2 are finalized. + // Since round 2 became current, Carol echoes the proposal, too. + let msg = create_message(&validators, 0, vote(false), &alice_kp); + let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_signed(&mut gossip, 2, carol_idx, echo(hash2))); + assert!(remove_signed(&mut gossip, 2, carol_idx, vote(true))); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + expect_finalized(&outcomes, &[(&proposal1, 0), (&proposal2, 1)]); + expect_timer(&outcomes, timestamp + block_time, TIMER_ID_UPDATE); + + timestamp += block_time; + + // In round 3 Carol is the leader, so she creates a new block to propose. + let mut outcomes = sc_c.handle_timer(timestamp, timestamp, TIMER_ID_UPDATE, &mut rng); + let block_context = remove_create_new_block(&mut outcomes); + expect_no_gossip_block_finalized(outcomes); + assert_eq!(block_context.timestamp(), timestamp); + assert_eq!(block_context.ancestor_values().len(), 2); + + let proposed_block = ProposedBlock::new(new_payload(false), block_context); + let mut outcomes = sc_c.propose(proposed_block, timestamp); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_proposal(&mut gossip, 3, &proposal3)); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + + timestamp += block_time; + + // Once Alice echoes Carol's proposal, she can go on to propose in round 4, too. + // Since the round height is 3, the 4th proposal does not contain a block. + let msg = create_message(&validators, 3, echo(hash3), &alice_kp); + let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp); + let mut gossip = remove_gossip(&validators, &mut outcomes); + assert!(remove_signed(&mut gossip, 3, carol_idx, vote(true))); + assert!(remove_proposal(&mut gossip, 4, &proposal4)); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + + // Only when Alice also votes for the switch block is it finalized. + assert!(!sc_c.finalized_switch_block()); + let msg = create_message(&validators, 3, vote(true), &alice_kp); + let mut outcomes = sc_c.handle_message(&mut rng, sender, msg, timestamp); + let gossip = remove_gossip(&validators, &mut outcomes); + assert!(gossip.is_empty(), "unexpected gossip: {:?}", gossip); + expect_finalized(&outcomes, &[(&proposal3, 2)]); + assert!(sc_c.finalized_switch_block()); + + info!("restoring protocol now"); + + let mut zug = new_test_zug(weights, vec![], leader_seq); + zug.open_wal(dir.path().join("wal"), timestamp); + let outcomes = zug.handle_timer(timestamp, timestamp, TIMER_ID_UPDATE, &mut rng); + let proposals123 = [(&proposal1, 0), (&proposal2, 1), (&proposal3, 2)]; + expect_finalized(&outcomes, &proposals123); + assert!(zug.finalized_switch_block()); +} + +/// Tests that a faulty validator counts towards every quorum. +/// +/// In this scenario Alice has 60% of the weight, Bob 10% and Carol 30%. Carol is offline and Bob is +/// faulty. Alice proposes a few blocks but can't finalize them alone. Once Bob double-signs, he +/// counts towards every quorum and Alice's messages suffice to finalize her blocks. +#[test] +fn zug_faults() { + let mut rng = crate::new_rng(); + let (weights, validators) = abc_weights(60, 10, 30); + let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap(); + let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap(); + + // The first round leaders are Carol, Alice, Alice. + let mut zug = new_test_zug(weights, vec![], &[carol_idx, alice_idx, alice_idx]); + + let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone()); + let bob_kp = Keypair::from(BOB_SECRET_KEY.clone()); + let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone()); + + let sender = *ALICE_NODE_ID; + let mut timestamp = Timestamp::now(); + + let proposal1 = Proposal { + timestamp, + maybe_block: Some(new_payload(true)), + maybe_parent_round_id: None, + inactive: None, + }; + + let proposal2 = Proposal { + timestamp: timestamp + zug.params.min_block_time(), + maybe_block: Some(new_payload(true)), + maybe_parent_round_id: Some(1), + inactive: Some(iter::once(carol_idx).collect()), + }; + + timestamp += zug.params.min_block_time(); + + // Alice makes sproposals in rounds 1 and 2, echoes and votes for them. + let msg = create_proposal_message(1, &proposal1, &validators, &alice_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_message(&validators, 1, vote(true), &alice_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_proposal_message(2, &proposal2, &validators, &alice_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_message(&validators, 2, vote(true), &alice_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + + // Since Carol did not make a proposal Alice votes to make round 0 skippable. + let msg = create_message(&validators, 0, vote(false), &alice_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + + // Carol is offline and Alice alone does not have a quorum. + // But if Bob equivocates, he counts towards every quorum, so the blocks get finalized. + let msg = create_message(&validators, 3, vote(true), &bob_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_message(&validators, 3, vote(false), &bob_kp); + let outcomes = zug.handle_message(&mut rng, sender, msg, timestamp); + expect_finalized(&outcomes, &[(&proposal1, 0), (&proposal2, 1)]); + + // Now Carol starts two nodes by mistake, and equivocates. That crosses the FTT. + let msg = create_message(&validators, 3, vote(true), &carol_kp); + expect_no_gossip_block_finalized(zug.handle_message(&mut rng, sender, msg, timestamp)); + let msg = create_message(&validators, 3, vote(false), &carol_kp); + let outcomes = zug.handle_message(&mut rng, sender, msg, timestamp); + assert!(outcomes.contains(&ProtocolOutcome::FttExceeded)); +} + +/// Tests that a `SyncRequest` message is periodically sent to a random peer. +#[test] +fn zug_sends_sync_request() { + let mut rng = crate::new_rng(); + let (weights, validators) = abc_weights(50, 40, 10); + let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap(); + let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap(); + let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap(); + + // The first round leader is Alice. + let mut zug = new_test_zug(weights, vec![], &[alice_idx]); + + let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone()); + let bob_kp = Keypair::from(BOB_SECRET_KEY.clone()); + let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone()); + + let timeout = zug.config.sync_state_interval.expect("request state timer"); + let sender = *ALICE_NODE_ID; + let mut timestamp = Timestamp::from(100000); + + let proposal0 = Proposal:: { + timestamp, + maybe_block: Some(new_payload(false)), + maybe_parent_round_id: None, + inactive: None, + }; + let hash0 = proposal0.hash(); + + let outcomes = zug.handle_is_current(timestamp); + expect_timer(&outcomes, timestamp + timeout, TIMER_ID_SYNC_PEER); + + timestamp += timeout; + + // The protocol state is empty and the SyncRequest should reflect that. + let mut outcomes = zug.handle_timer(timestamp, timestamp, TIMER_ID_SYNC_PEER, &mut rng); + expect_timer(&outcomes, timestamp + timeout, TIMER_ID_SYNC_PEER); + let mut msg_iter = remove_requests_to_random(&mut outcomes).into_iter(); + match (msg_iter.next(), msg_iter.next()) { + ( + Some(SyncRequest { + round_id: 0, + proposal_hash: None, + has_proposal: false, + first_validator_idx: _, + echoes: 0, + true_votes: 0, + false_votes: 0, + active: 0, + faulty: 0, + instance_id: _, + sync_id: _, + }), + None, + ) => {} + (msg0, msg1) => panic!("unexpected messages: {:?}, {:?}", msg0, msg1), + } + + timestamp += timeout; + + // Now we get a proposal and echo from Alice, one false vote from Bob, and Carol double-signs. + let msg = create_proposal_message(0, &proposal0, &validators, &alice_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(false), &bob_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(true), &carol_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(false), &carol_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + + // The next SyncRequest message must include all the new information. + let mut outcomes = zug.handle_timer(timestamp, timestamp, TIMER_ID_SYNC_PEER, &mut rng); + expect_timer(&outcomes, timestamp + timeout, TIMER_ID_SYNC_PEER); + let mut msg_iter = remove_requests_to_random(&mut outcomes).into_iter(); + match (msg_iter.next(), msg_iter.next()) { + ( + Some(SyncRequest { + round_id: 0, + proposal_hash: Some(hash), + has_proposal: true, + first_validator_idx, + echoes, + true_votes: 0, + false_votes, + active, + faulty, + instance_id: _, + sync_id: _, + }), + None, + ) => { + assert_eq!(hash0, hash); + let mut faulty_iter = zug.iter_validator_bit_field(first_validator_idx, faulty); + assert_eq!(Some(carol_idx), faulty_iter.next()); + assert_eq!(None, faulty_iter.next()); + let mut echoes_iter = zug.iter_validator_bit_field(first_validator_idx, echoes); + assert_eq!(Some(alice_idx), echoes_iter.next()); + assert_eq!(None, echoes_iter.next()); + let mut false_iter = zug.iter_validator_bit_field(first_validator_idx, false_votes); + assert_eq!(Some(bob_idx), false_iter.next()); + assert_eq!(None, false_iter.next()); + // When we marked Carol as faulty we removed her entry from the active list. + let expected_active = + zug.validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()); + assert_eq!(active, expected_active); + } + (msg0, msg1) => panic!("unexpected messages: {:?}, {:?}", msg0, msg1), + } +} + +/// Tests that we respond to a `SyncRequest` message with the missing signatures. +#[test] +fn zug_handles_sync_request() { + let mut rng = crate::new_rng(); + let (weights, validators) = abc_weights(50, 40, 10); + let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap(); + let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap(); + let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap(); + + // The first round leader is Alice. + let mut zug = new_test_zug(weights.clone(), vec![], &[alice_idx]); + + let alice_kp = Keypair::from(ALICE_SECRET_KEY.clone()); + let bob_kp = Keypair::from(BOB_SECRET_KEY.clone()); + let carol_kp = Keypair::from(CAROL_SECRET_KEY.clone()); + + let sender = *ALICE_NODE_ID; + let timestamp = Timestamp::from(100000); + + let proposal0 = Proposal { + timestamp, + maybe_block: Some(new_payload(false)), + maybe_parent_round_id: None, + inactive: None, + }; + let hash0 = proposal0.hash(); + + let proposal1 = Proposal:: { + timestamp, + maybe_block: Some(new_payload(true)), + maybe_parent_round_id: None, + inactive: None, + }; + let hash1 = proposal1.hash(); + + // We get a proposal, echo and true vote from Alice, one echo and false vote from Bob, and + // Carol double-signs. + let msg = create_proposal_message(0, &proposal0, &validators, &alice_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, echo(hash0), &bob_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(false), &bob_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(true), &alice_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(true), &carol_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + let msg = create_message(&validators, 0, vote(false), &carol_kp); + zug.handle_message(&mut rng, sender, msg, timestamp); + + let first_validator_idx = ValidatorIndex(rng.gen_range(0..3)); + let sync_id = RandomId::new(&mut rng); + + // The sender has everything we have except the proposal itself. + let msg = SyncRequest:: { + round_id: 0, + proposal_hash: Some(hash0), + has_proposal: false, + first_validator_idx, + echoes: zug.validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()), + true_votes: zug + .validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()), + false_votes: zug + .validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()), + active: zug.validator_bit_field( + first_validator_idx, + vec![alice_idx, bob_idx, carol_idx].into_iter(), + ), + faulty: zug.validator_bit_field(first_validator_idx, vec![carol_idx].into_iter()), + instance_id: *zug.instance_id(), + sync_id, + }; + let (outcomes, response) = zug.handle_request_message( + &mut rng, + sender, + SerializedMessage::from_message(&msg), + timestamp, + ); + assert_eq!( + response + .expect("response") + .deserialize_expect::>(), + Message::SyncResponse(SyncResponse { + round_id: 0, + proposal_or_hash: Some(Either::Left(proposal0)), + echo_sigs: BTreeMap::new(), + true_vote_sigs: BTreeMap::new(), + false_vote_sigs: BTreeMap::new(), + signed_messages: Vec::new(), + evidence: Vec::new(), + instance_id: *zug.instance_id(), + sync_id, + }) + ); + expect_no_gossip_block_finalized(outcomes); + + // But if there are missing messages, these are sent back. + let sync_id = RandomId::new(&mut rng); + let msg = SyncRequest:: { + round_id: 0, + proposal_hash: Some(hash1), // Wrong proposal! + has_proposal: true, + first_validator_idx, + echoes: zug.validator_bit_field(first_validator_idx, vec![alice_idx].into_iter()), + true_votes: zug + .validator_bit_field(first_validator_idx, vec![bob_idx, alice_idx].into_iter()), + false_votes: zug.validator_bit_field(first_validator_idx, vec![].into_iter()), + active: zug.validator_bit_field(first_validator_idx, vec![alice_idx, bob_idx].into_iter()), + faulty: zug.validator_bit_field(first_validator_idx, vec![].into_iter()), + instance_id: *zug.instance_id(), + sync_id, + }; + let (mut outcomes, response) = zug.handle_request_message( + &mut rng, + sender, + SerializedMessage::from_message(&msg), + timestamp, + ); + assert_eq!( + remove_targeted_messages(&validators, sender, &mut outcomes), + vec![] + ); + expect_no_gossip_block_finalized(outcomes); + + let sync_response = match response.expect("response").deserialize_expect() { + Message::SyncResponse(sync_response) => sync_response, + result => panic!("unexpected message: {:?}", result), + }; + + assert_eq!(sync_response.round_id, 0); + assert_eq!(sync_response.proposal_or_hash, Some(Either::Right(hash0))); + assert_eq!( + sync_response.echo_sigs, + zug.round(0).unwrap().echoes()[&hash0] + ); + assert_eq!(sync_response.true_vote_sigs, BTreeMap::new()); + assert_eq!(sync_response.false_vote_sigs.len(), 1); + assert_eq!( + Some(sync_response.false_vote_sigs[&bob_idx]), + zug.round(0).unwrap().votes(false)[bob_idx] + ); + assert_eq!(sync_response.signed_messages, vec![]); + assert_eq!(sync_response.evidence.len(), 1); + assert_eq!(sync_response.sync_id, sync_id); + match (&sync_response.evidence[0], &zug.faults[&carol_idx]) { + ( + (signed_msg, content2, sig2), + Fault::Direct(expected_signed_msg, expected_content2, expected_sig2), + ) => { + assert_eq!(signed_msg, expected_signed_msg); + assert_eq!(content2, expected_content2); + assert_eq!(sig2, expected_sig2); + } + (evidence, fault) => panic!("unexpected evidence: {:?}, {:?}", evidence, fault), + } + + // Create a new instance that doesn't have any data yet, let it send two sync requests to Zug, + // and handle the responses. + let mut zug2 = new_test_zug(weights, vec![], &[alice_idx]); + for _ in 0..2 { + let mut outcomes = zug2.handle_timer(timestamp, timestamp, TIMER_ID_SYNC_PEER, &mut rng); + let msg = loop { + if let ProtocolOutcome::CreatedRequestToRandomPeer(payload) = + outcomes.pop().expect("expected request to random peer") + { + break payload; + } + }; + let (_outcomes, response) = zug.handle_request_message(&mut rng, sender, msg, timestamp); + if let Some(msg) = response { + let mut _outcomes = zug2.handle_message(&mut rng, sender, msg, timestamp); + } + } + + // They should be synced up now: + assert_eq!(zug.rounds, zug2.rounds); + assert_eq!(zug.faults, zug2.faults); + assert_eq!(zug.active, zug2.active); +} + +#[test] +fn test_validator_bit_field() { + fn test_roundtrip(zug: &Zug, first: u32, indexes: Vec, expected: Vec) { + let field = zug.validator_bit_field( + ValidatorIndex(first), + indexes.iter().map(|i| ValidatorIndex(*i)), + ); + let new_indexes: BTreeSet = zug + .iter_validator_bit_field(ValidatorIndex(first), field) + .map(|ValidatorIndex(i)| i) + .collect(); + assert_eq!(expected.into_iter().collect::>(), new_indexes); + } + + let weights100: Vec<(PublicKey, U512)> = (0u8..100) + .map(|i| { + let sk = SecretKey::ed25519_from_bytes([i; SecretKey::ED25519_LENGTH]).unwrap(); + (PublicKey::from(&sk), U512::from(100)) + }) + .collect(); + + let weights250: Vec<(PublicKey, U512)> = (0u8..250) + .map(|i| { + let sk = SecretKey::ed25519_from_bytes([i; SecretKey::ED25519_LENGTH]).unwrap(); + (PublicKey::from(&sk), U512::from(100)) + }) + .collect(); + + let sc100 = new_test_zug(weights100, vec![], &[]); + let sc250 = new_test_zug(weights250, vec![], &[]); + + test_roundtrip(&sc100, 50, vec![], vec![]); + test_roundtrip(&sc250, 50, vec![], vec![]); + test_roundtrip(&sc250, 200, vec![], vec![]); + + test_roundtrip(&sc100, 50, vec![0, 1, 49, 50, 99], vec![50, 99, 0, 1, 49]); + test_roundtrip(&sc250, 50, vec![0, 49, 50, 177, 178, 249], vec![50, 177]); + test_roundtrip( + &sc250, + 200, + vec![0, 77, 78, 200, 249], + vec![200, 249, 0, 77], + ); +} + +#[test] +fn test_quorum() { + // Alice has almost 2/3 of the weight, Bob almost 1/3, and Carol 1. + let weights_without_overflow = (66, 33, 1); + // A similar distribution, but the quorum calculation would overflow if it naively added the + // total weight to the ftt. + let weights_with_overflow = (1 << 63, 1 << 62, 1); + for (a, b, c) in [weights_without_overflow, weights_with_overflow] { + let (weights, validators) = abc_weights(a, b, c); + let alice_idx = validators.get_index(&*ALICE_PUBLIC_KEY).unwrap(); + let bob_idx = validators.get_index(&*BOB_PUBLIC_KEY).unwrap(); + let carol_idx = validators.get_index(&*CAROL_PUBLIC_KEY).unwrap(); + + let mut zug = new_test_zug(weights, vec![], &[]); + + // The threshold is the highest number that's below 2/3 of the weight. + assert_eq!(a, zug.quorum_threshold().0); + + // Alice alone is not a quorum, but with Carol she is. + assert!(!zug.is_quorum(vec![].into_iter())); + assert!(!zug.is_quorum(vec![alice_idx].into_iter())); + assert!(zug.is_quorum(vec![alice_idx, carol_idx].into_iter())); + assert!(zug.is_quorum(vec![alice_idx, bob_idx, carol_idx].into_iter())); + + // If Carol is known to be faulty, she counts towards every quorum. + zug.mark_faulty(&CAROL_PUBLIC_KEY); + + // So now Alice's vote alone is sufficient. + assert!(!zug.is_quorum(vec![].into_iter())); + assert!(zug.is_quorum(vec![alice_idx].into_iter())); + } +} + +#[test] +fn update_proposal_timeout() { + macro_rules! assert_approx { + ($val0:expr, $val1:expr) => { + let v0: f64 = $val0; + let v1: f64 = $val1; + let diff = (v1 - v0).abs(); + let min = v1.abs().min(v0.abs()); + assert!(diff < min * 0.1, "not approximately equal: {}, {}", v0, v1); + }; + } + + let mut rng = crate::new_rng(); + + let (weights, _validators) = abc_weights(1, 2, 3); + let mut zug = new_test_zug(weights, vec![], &[]); + let _outcomes = zug.handle_timer( + Timestamp::from(100000), + Timestamp::from(100000), + TIMER_ID_UPDATE, + &mut rng, + ); + + let round_start = zug.current_round_start; + let grace_factor = zug.config.proposal_grace_period as f64 / 100.0 + 1.0; + let inertia = zug.config.proposal_timeout_inertia; + let initial_timeout = zug.config.proposal_timeout.millis() as f64 * grace_factor; + + let timeout = zug.proposal_timeout().millis() as f64; + + assert_approx!(initial_timeout, timeout); + + // Within 2 * inertia blocks the timeout should double and go back down again, if rounds + // without proposals come before rounds with fast proposals and the fraction of rounds with + // fast proposals is (1 + ftt) / 2, i.e. 2/3. + let fail_rounds = (inertia as f64 * 2.0 / 3.0).round() as u16; + let success_rounds = 2 * inertia - fail_rounds; + for _ in 0..fail_rounds { + zug.update_proposal_timeout(round_start + TimeDiff::from_seconds(10000)); + } + assert_approx!( + 2.0 * initial_timeout, + zug.proposal_timeout().millis() as f64 + ); + for _ in 0..success_rounds { + zug.update_proposal_timeout(round_start + TimeDiff::from_millis(1)); + } + assert_approx!(initial_timeout, zug.proposal_timeout().millis() as f64); + + // If the proposal delay is consistently t, the timeout will settle on t * grace_factor + // within 2 * inertia rounds. + let min_delay = (zug.proposal_timeout().millis() as f64 / grace_factor) as u64; + for _ in 0..10 { + let delay = TimeDiff::from_millis(rng.gen_range(min_delay..(min_delay * 2))); + for _ in 0..(2 * inertia) { + zug.update_proposal_timeout(round_start + delay); + } + assert_eq!( + delay.millis() as f64 * grace_factor, + zug.proposal_timeout().millis() as f64 + ); + } +} diff --git a/node/src/components/consensus/tests/consensus_des_testing.rs b/node/src/components/consensus/tests/consensus_des_testing.rs index 0dafe5fa0b..d47a5a9e08 100644 --- a/node/src/components/consensus/tests/consensus_des_testing.rs +++ b/node/src/components/consensus/tests/consensus_des_testing.rs @@ -6,8 +6,9 @@ use std::{ use datasize::DataSize; +use casper_types::Timestamp; + use super::queue::{MessageT, Queue, QueueEntry}; -use crate::types::Timestamp; /// Enum defining recipients of the message. #[derive(Debug)] diff --git a/node/src/components/consensus/tests/queue.rs b/node/src/components/consensus/tests/queue.rs index e52c4905f2..209f297205 100644 --- a/node/src/components/consensus/tests/queue.rs +++ b/node/src/components/consensus/tests/queue.rs @@ -1,7 +1,9 @@ -use super::consensus_des_testing::{Message, ValidatorId}; -use crate::types::Timestamp; use std::{cmp::Ordering, collections::BinaryHeap, fmt::Debug}; +use casper_types::Timestamp; + +use super::consensus_des_testing::{Message, ValidatorId}; + pub(crate) trait MessageT: PartialEq + Eq + Ord + Clone + Debug {} impl MessageT for T where T: PartialEq + Eq + Ord + Clone + Debug {} diff --git a/node/src/components/consensus/tests/utils.rs b/node/src/components/consensus/tests/utils.rs index 8653bacdfb..f988ff37e9 100644 --- a/node/src/components/consensus/tests/utils.rs +++ b/node/src/components/consensus/tests/utils.rs @@ -1,24 +1,42 @@ +use std::sync::Arc; + use num::Zero; use once_cell::sync::Lazy; -use casper_execution_engine::shared::motes::Motes; -use casper_types::{system::auction::DelegationRate, PublicKey, SecretKey, U512}; +use casper_types::{ + system::auction::DelegationRate, AccountConfig, AccountsConfig, ActivationPoint, Chainspec, + ChainspecRawBytes, Motes, PublicKey, SecretKey, TimeDiff, Timestamp, ValidatorConfig, U512, +}; use crate::{ - types::{ - chainspec::{AccountConfig, AccountsConfig, ValidatorConfig}, - ActivationPoint, Chainspec, Timestamp, - }, + tls::{KeyFingerprint, Sha512}, + types::NodeId, utils::Loadable, }; -pub static ALICE_SECRET_KEY: Lazy = - Lazy::new(|| SecretKey::ed25519_from_bytes([0; SecretKey::ED25519_LENGTH]).unwrap()); -pub static ALICE_PUBLIC_KEY: Lazy = Lazy::new(|| PublicKey::from(&*ALICE_SECRET_KEY)); +pub static ALICE_SECRET_KEY: Lazy> = + Lazy::new(|| Arc::new(SecretKey::ed25519_from_bytes([0; SecretKey::ED25519_LENGTH]).unwrap())); +pub static ALICE_PUBLIC_KEY: Lazy = Lazy::new(|| PublicKey::from(&**ALICE_SECRET_KEY)); +pub static ALICE_NODE_ID: Lazy = Lazy::new(|| { + NodeId::from(KeyFingerprint::from(Sha512::new(match *ALICE_PUBLIC_KEY { + PublicKey::Ed25519(pub_key) => pub_key, + _ => panic!("ALICE_PUBLIC_KEY is Ed25519"), + }))) +}); + +pub static BOB_SECRET_KEY: Lazy> = + Lazy::new(|| Arc::new(SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]).unwrap())); +pub static BOB_PUBLIC_KEY: Lazy = Lazy::new(|| PublicKey::from(&**BOB_SECRET_KEY)); +pub static BOB_NODE_ID: Lazy = Lazy::new(|| { + NodeId::from(KeyFingerprint::from(Sha512::new(match *BOB_PUBLIC_KEY { + PublicKey::Ed25519(pub_key) => pub_key, + _ => panic!("BOB_PUBLIC_KEY is Ed25519"), + }))) +}); -pub static BOB_PRIVATE_KEY: Lazy = - Lazy::new(|| SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]).unwrap()); -pub static BOB_PUBLIC_KEY: Lazy = Lazy::new(|| PublicKey::from(&*BOB_PRIVATE_KEY)); +pub static CAROL_SECRET_KEY: Lazy> = + Lazy::new(|| Arc::new(SecretKey::ed25519_from_bytes([2; SecretKey::ED25519_LENGTH]).unwrap())); +pub static CAROL_PUBLIC_KEY: Lazy = Lazy::new(|| PublicKey::from(&**CAROL_SECRET_KEY)); /// Loads the local chainspec and overrides timestamp and genesis account with the given stakes. /// The test `Chainspec` returned has eras with exactly two blocks. @@ -27,21 +45,23 @@ where I: IntoIterator, T: Into, { - let mut chainspec = Chainspec::from_resources("local"); + let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); let accounts = stakes .into_iter() .map(|(pk, stake)| { - let motes = Motes::new(stake.into()); + let motes = Motes::new(stake); let validator_config = ValidatorConfig::new(motes, DelegationRate::zero()); AccountConfig::new(pk, motes, Some(validator_config)) }) .collect(); let delegators = vec![]; - chainspec.network_config.accounts_config = AccountsConfig::new(accounts, delegators); + let administrators = vec![]; + chainspec.network_config.accounts_config = + AccountsConfig::new(accounts, delegators, administrators); chainspec.protocol_config.activation_point = ActivationPoint::Genesis(Timestamp::now()); // Every era has exactly two blocks. chainspec.core_config.minimum_era_height = 2; - chainspec.core_config.era_duration = 0.into(); + chainspec.core_config.era_duration = TimeDiff::from_millis(0); chainspec } diff --git a/node/src/components/consensus/traits.rs b/node/src/components/consensus/traits.rs index d9361c5955..aad921e46d 100644 --- a/node/src/components/consensus/traits.rs +++ b/node/src/components/consensus/traits.rs @@ -6,36 +6,21 @@ use std::{ use datasize::DataSize; use serde::{de::DeserializeOwned, Serialize}; -use crate::types::Timestamp; - -pub trait NodeIdT: Clone + Display + Debug + Send + Eq + Hash + DataSize + 'static {} -impl NodeIdT for I where I: Clone + Display + Debug + Send + Eq + Hash + DataSize + 'static {} - /// A validator identifier. -pub(crate) trait ValidatorIdT: Eq + Ord + Clone + Debug + Hash + Send + DataSize {} -impl ValidatorIdT for VID where VID: Eq + Ord + Clone + Debug + Hash + Send + DataSize {} +pub trait ValidatorIdT: Eq + Ord + Clone + Debug + Hash + Send + DataSize + Display {} +impl ValidatorIdT for VID where VID: Eq + Ord + Clone + Debug + Hash + Send + DataSize + Display +{} /// The consensus value type, e.g. a list of transactions. -pub(crate) trait ConsensusValueT: - Eq + Clone + Debug + Hash + Serialize + DeserializeOwned + Send + DataSize +pub trait ConsensusValueT: + Eq + Clone + Debug + Display + Hash + Serialize + DeserializeOwned + Send + DataSize { - type Hash: HashT; - - /// Returns hash of self. - fn hash(&self) -> Self::Hash; - /// Returns whether the consensus value needs validation. fn needs_validation(&self) -> bool; - - /// Returns the value's timestamp. - fn timestamp(&self) -> Timestamp; - - /// Returns the parent value, or `None` if this is the first block in this era. - fn parent(&self) -> Option<&Self::Hash>; } /// A hash, as an identifier for a block or unit. -pub(crate) trait HashT: +pub trait HashT: Eq + Ord + Copy + Clone + DataSize + Debug + Display + Hash + Serialize + DeserializeOwned + Send { } @@ -55,7 +40,7 @@ impl HashT for H where } /// A validator's secret signing key. -pub(crate) trait ValidatorSecret: Send + DataSize { +pub trait ValidatorSecret: Send + DataSize { type Hash: DataSize; type Signature: Eq + PartialEq + Clone + Debug + Hash + Serialize + DeserializeOwned + DataSize; @@ -66,7 +51,7 @@ pub(crate) trait ValidatorSecret: Send + DataSize { /// The collection of types the user can choose for cryptography, IDs, transactions, etc. // TODO: These trait bounds make `#[derive(...)]` work for types with a `C: Context` type // parameter. Split this up or replace the derives with explicit implementations. -pub(crate) trait Context: Clone + DataSize + Debug + Eq + Ord + Hash + Send { +pub trait Context: Clone + DataSize + Debug + Eq + Ord + Hash + Send { /// The consensus value type, e.g. a list of transactions. type ConsensusValue: ConsensusValueT; /// Unique identifiers for validators. @@ -96,3 +81,10 @@ pub(crate) trait Context: Clone + DataSize + Debug + Eq + Ord + Hash + Send { signature: &::Signature, ) -> bool; } + +/// A marker trait indicating that the given type is a valid consensus message to be sent across the +/// network. +/// +/// Only implement this for types that are native to the consensus module and never for `Vec`, +/// as this would break accidental double-serialization protection. +pub trait ConsensusNetworkMessage {} diff --git a/node/src/components/consensus/utils.rs b/node/src/components/consensus/utils.rs new file mode 100644 index 0000000000..bcbda1b361 --- /dev/null +++ b/node/src/components/consensus/utils.rs @@ -0,0 +1,8 @@ +//! Various utilities relevant to consensus. + +mod validators; +pub(crate) mod wal; +mod weight; + +pub use validators::{Validator, ValidatorIndex, ValidatorMap, Validators}; +pub use weight::Weight; diff --git a/node/src/components/consensus/utils/validators.rs b/node/src/components/consensus/utils/validators.rs new file mode 100644 index 0000000000..b1215de8f5 --- /dev/null +++ b/node/src/components/consensus/utils/validators.rs @@ -0,0 +1,404 @@ +use std::{ + collections::HashMap, + fmt, + hash::Hash, + iter::FromIterator, + ops::{Add, Index, IndexMut}, + slice, vec, +}; + +use datasize::DataSize; +use derive_more::{AsRef, From}; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use tracing::warn; + +use super::Weight; +use crate::utils::ds; + +/// The index of a validator, in a list of all validators, ordered by ID. +#[derive( + Copy, Clone, DataSize, Debug, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize, +)] +pub struct ValidatorIndex(pub u32); + +impl From for ValidatorIndex { + fn from(idx: u32) -> Self { + ValidatorIndex(idx) + } +} + +/// Information about a validator: their ID and weight. +#[derive(Clone, DataSize, Debug, Eq, PartialEq)] +pub struct Validator { + weight: Weight, + id: VID, + banned: bool, + can_propose: bool, +} + +impl> From<(VID, W)> for Validator { + fn from((id, weight): (VID, W)) -> Validator { + Validator { + id, + weight: weight.into(), + banned: false, + can_propose: true, + } + } +} + +impl Validator { + /// Returns the validator's ID. + pub fn id(&self) -> &VID { + &self.id + } + + /// Returns the validator's weight. + pub fn weight(&self) -> Weight { + self.weight + } +} + +/// The validator IDs and weight map. +#[derive(Debug, DataSize, Clone)] +pub struct Validators +where + VID: Eq + Hash, +{ + index_by_id: HashMap, + validators: Vec>, + total_weight: Weight, +} + +impl Validators { + /// Returns the total weight of the set of validators. + pub fn total_weight(&self) -> Weight { + self.total_weight + } + + /// Returns the weight of the validator with the given index. + /// + /// *Panics* if the validator index does not exist. + pub fn weight(&self, idx: ValidatorIndex) -> Weight { + self.validators[idx.0 as usize].weight + } + + /// Returns `true` if the map is empty. + pub fn is_empty(&self) -> bool { + self.validators.is_empty() + } + + /// Returns the number of validators. + pub fn len(&self) -> usize { + self.validators.len() + } + + /// Gets the index of a validator with the given ID. Returns `None` if no such validator is in + /// the set. + pub fn get_index(&self, id: &VID) -> Option { + self.index_by_id.get(id).cloned() + } + + /// Returns validator ID by index, or `None` if it doesn't exist. + pub fn id(&self, idx: ValidatorIndex) -> Option<&VID> { + self.validators.get(idx.0 as usize).map(Validator::id) + } + + /// Returns an iterator over all validators, sorted by ID. + pub fn iter(&self) -> impl Iterator> { + self.validators.iter() + } + + /// Marks the validator with that ID as banned, if it exists, and excludes it from the leader + /// sequence. + pub fn ban(&mut self, vid: &VID) { + if let Some(idx) = self.get_index(vid) { + self.validators[idx.0 as usize].banned = true; + self.validators[idx.0 as usize].can_propose = false; + } + } + + /// Marks the validator as excluded from the leader sequence. + pub fn set_cannot_propose(&mut self, vid: &VID) { + if let Some(idx) = self.get_index(vid) { + self.validators[idx.0 as usize].can_propose = false; + } + } + + /// Returns an iterator of all indices of banned validators. + pub fn iter_banned_idx(&self) -> impl Iterator + '_ { + self.iter() + .enumerate() + .filter(|(_, v)| v.banned) + .map(|(idx, _)| ValidatorIndex::from(idx as u32)) + } + + /// Returns an iterator of all indices of validators that are not allowed to propose values. + pub fn iter_cannot_propose_idx(&self) -> impl Iterator + '_ { + self.iter() + .enumerate() + .filter(|(_, v)| !v.can_propose) + .map(|(idx, _)| ValidatorIndex::from(idx as u32)) + } + + /// Returns an iterator of pairs (validator index, validator ID). + pub fn enumerate_ids<'a>(&'a self) -> impl Iterator { + let to_idx = + |(idx, v): (usize, &'a Validator)| (ValidatorIndex::from(idx as u32), v.id()); + self.iter().enumerate().map(to_idx) + } + + pub(crate) fn ensure_nonzero_proposing_stake(&mut self) -> bool { + if self.total_weight.is_zero() { + return false; + } + if self.iter().all(|v| v.banned || v.weight.is_zero()) { + warn!("everyone is banned; admitting banned validators anyway"); + for validator in &mut self.validators { + validator.can_propose = true; + validator.banned = false; + } + } else if self.iter().all(|v| !v.can_propose || v.weight.is_zero()) { + warn!("everyone is excluded; allowing proposers who are currently inactive"); + for validator in &mut self.validators { + if !validator.banned { + validator.can_propose = true; + } + } + } + true + } +} + +impl> FromIterator<(VID, W)> for Validators { + fn from_iter>(ii: I) -> Validators { + let mut validators: Vec<_> = ii.into_iter().map(Validator::from).collect(); + let total_weight = validators.iter().fold(Weight(0), |sum, v| { + sum.checked_add(v.weight()) + .expect("total weight must be < 2^64") + }); + validators.sort_by_cached_key(|val| val.id.clone()); + let index_by_id = validators + .iter() + .enumerate() + .map(|(idx, val)| (val.id.clone(), ValidatorIndex(idx as u32))) + .collect(); + Validators { + index_by_id, + validators, + total_weight, + } + } +} + +impl fmt::Display for Validators { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Validators: index, ID, weight")?; + for (i, val) in self.validators.iter().enumerate() { + writeln!(f, "{:3}, {:?}, {}", i, val.id(), val.weight().0)? + } + Ok(()) + } +} + +/// A map from the set of validators to some values. +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, AsRef, From, Hash)] +pub struct ValidatorMap(Vec); + +impl fmt::Display for ValidatorMap> +where + T: fmt::Display, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let view = self + .0 + .iter() + .map(|maybe_el| match maybe_el { + None => "N".to_string(), + Some(el) => format!("{}", el), + }) + .join(", "); + write!(f, "ValidatorMap({})", view)?; + Ok(()) + } +} + +impl DataSize for ValidatorMap +where + T: DataSize, +{ + const IS_DYNAMIC: bool = Vec::::IS_DYNAMIC; + + const STATIC_HEAP_SIZE: usize = Vec::::STATIC_HEAP_SIZE; + + fn estimate_heap_size(&self) -> usize { + ds::vec_sample(&self.0) + } +} + +impl ValidatorMap { + /// Returns the value for the given validator, or `None` if the index is out of range. + pub fn get(&self, idx: ValidatorIndex) -> Option<&T> { + self.0.get(idx.0 as usize) + } + + /// Returns the number of values. This must equal the number of validators. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if this ValidatorMap is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns an iterator over all values. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns an iterator over mutable references to all values. + pub fn iter_mut(&mut self) -> impl Iterator { + self.0.iter_mut() + } + + /// Returns an iterator over all values, by validator index. + pub fn enumerate(&self) -> impl Iterator { + self.iter() + .enumerate() + .map(|(idx, value)| (ValidatorIndex(idx as u32), value)) + } + + /// Returns `true` if `self` has an entry for validator number `idx`. + pub fn has(&self, idx: ValidatorIndex) -> bool { + self.0.len() > idx.0 as usize + } + + /// Returns an iterator over all validator indices. + pub fn keys(&self) -> impl Iterator { + (0..self.len()).map(|idx| ValidatorIndex(idx as u32)) + } + + /// Binary searches this sorted `ValidatorMap` for `x`. + /// + /// Returns the lowest index of an entry `>= x`, or `None` if `x` is greater than all entries. + pub fn binary_search(&self, x: &T) -> Option + where + T: Ord, + { + match self.0.binary_search(x) { + // The standard library's binary search returns `Ok(i)` if it found `x` at index `i`, + // but `i` is not necessarily the lowest such index. + Ok(i) => Some(ValidatorIndex( + (0..i) + .rev() + .take_while(|j| self.0[*j] == *x) + .last() + .unwrap_or(i) as u32, + )), + // It returns `Err(i)` if `x` was not found but `i` is the index where `x` would have to + // be inserted to keep the list. This is either the lowest index of an entry `>= x`... + Err(i) if i < self.len() => Some(ValidatorIndex(i as u32)), + // ...or the end of the list if `x` is greater than all entries. + Err(_) => None, + } + } +} + +impl IntoIterator for ValidatorMap { + type Item = T; + type IntoIter = vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl FromIterator for ValidatorMap { + fn from_iter>(ii: I) -> ValidatorMap { + ValidatorMap(ii.into_iter().collect()) + } +} + +impl Index for ValidatorMap { + type Output = T; + + fn index(&self, vidx: ValidatorIndex) -> &T { + &self.0[vidx.0 as usize] + } +} + +impl IndexMut for ValidatorMap { + fn index_mut(&mut self, vidx: ValidatorIndex) -> &mut T { + &mut self.0[vidx.0 as usize] + } +} + +impl<'a, T> IntoIterator for &'a ValidatorMap { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl> Add> for ValidatorMap { + type Output = ValidatorMap; + fn add(mut self, rhs: ValidatorMap) -> Self::Output { + #[allow(clippy::arithmetic_side_effects)] + self.0 + .iter_mut() + .zip(rhs) + .for_each(|(lhs_val, rhs_val)| *lhs_val = *lhs_val + rhs_val); + self + } +} + +impl ValidatorMap> { + /// Returns the keys of all validators whose value is `Some`. + pub fn keys_some(&self) -> impl Iterator + '_ { + self.iter_some().map(|(vidx, _)| vidx) + } + + /// Returns an iterator over all values that are present, together with their index. + pub fn iter_some(&self) -> impl Iterator + '_ { + self.enumerate() + .filter_map(|(vidx, opt)| opt.as_ref().map(|val| (vidx, val))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn from_iter() { + let weights = vec![ + ("Bob".to_string(), 5u64), + ("Carol".to_string(), 3), + ("Alice".to_string(), 4), + ]; + let validators = Validators::from_iter(weights); + assert_eq!(ValidatorIndex(0), validators.index_by_id["Alice"]); + assert_eq!(ValidatorIndex(1), validators.index_by_id["Bob"]); + assert_eq!(ValidatorIndex(2), validators.index_by_id["Carol"]); + } + + #[test] + fn binary_search() { + let list = ValidatorMap::from(vec![2, 3, 5, 5, 5, 5, 5, 9]); + // Searching for 5 returns the first index, even if the standard library doesn't. + assert!( + list.0.binary_search(&5).expect("5 is in the list") > 2, + "test case where the std's search would return a higher index" + ); + assert_eq!(Some(ValidatorIndex(2)), list.binary_search(&5)); + // Searching for 4 also returns 2, since that is the first index of a value >= 4. + assert_eq!(Some(ValidatorIndex(2)), list.binary_search(&4)); + // 3 is found again, at index 1. + assert_eq!(Some(ValidatorIndex(1)), list.binary_search(&3)); + // 10 is bigger than all entries. + assert_eq!(None, list.binary_search(&10)); + } +} diff --git a/node/src/components/consensus/utils/wal.rs b/node/src/components/consensus/utils/wal.rs new file mode 100644 index 0000000000..c617feb5f9 --- /dev/null +++ b/node/src/components/consensus/utils/wal.rs @@ -0,0 +1,237 @@ +use std::{ + fs::{File, OpenOptions}, + io::{self, BufReader, BufWriter, Read, Seek, Write}, + marker::PhantomData, + path::PathBuf, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::warn; + +pub(crate) trait WalEntry: Serialize + for<'de> Deserialize<'de> {} + +/// A Write-Ahead Log to store every message on disk when we add it to the protocol state. +#[derive(Debug)] +pub(crate) struct WriteWal { + writer: BufWriter, + phantom_context: PhantomData, +} + +impl DataSize for WriteWal { + const IS_DYNAMIC: bool = true; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + self.writer.capacity() + } +} + +#[derive(Error, Debug)] +pub(crate) enum WriteWalError { + #[error("Could not get serialized message size: {0}")] + CouldntGetSerializedSize(bincode::Error), + #[error("Could not serialize size: {0}")] + CouldntSerializeSizeIntoWriter(io::Error), + #[error("Could not serialize message: {0}")] + CouldntSerializeMessageIntoWriter(bincode::Error), + #[error("Could not flush message to disk: {0}")] + CouldntFlushMessageToDisk(io::Error), + #[error("Could not open file: {0}")] + FileCouldntBeOpened(io::Error), +} + +impl WriteWal { + pub(crate) fn new(wal_path: &PathBuf) -> Result { + let file = OpenOptions::new() + .append(true) + .create(true) + .open(wal_path) + .map_err(WriteWalError::FileCouldntBeOpened)?; + Ok(WriteWal { + writer: BufWriter::new(file), + phantom_context: PhantomData, + }) + } + + pub(crate) fn record_entry(&mut self, entry: &E) -> Result<(), WriteWalError> { + // First write the size of the entry as a serialized u64. + let entry_size = + bincode::serialized_size(entry).map_err(WriteWalError::CouldntGetSerializedSize)?; + self.writer + .write_all(&entry_size.to_le_bytes()) + .map_err(WriteWalError::CouldntSerializeSizeIntoWriter)?; + // Write the serialized entry itself. + bincode::serialize_into(&mut self.writer, entry) + .map_err(WriteWalError::CouldntSerializeMessageIntoWriter)?; + self.writer + .flush() + .map_err(WriteWalError::CouldntFlushMessageToDisk)?; + Ok(()) + } +} + +/// A buffer to read a Write-Ahead Log from disk and deserialize its messages. +#[derive(Debug)] +pub(crate) struct ReadWal { + pub(crate) reader: BufReader, + pub(crate) phantom_context: PhantomData, +} + +#[derive(Error, Debug)] +pub(crate) enum ReadWalError { + #[error("Could not create file at {0}: {1}")] + FileCouldntBeCreated(PathBuf, io::Error), + #[error(transparent)] + OtherIOError(#[from] io::Error), + #[error("could not deserialize WAL entry: {0}")] + CouldNotDeserialize(bincode::Error), +} + +impl ReadWal { + pub(crate) fn new(wal_path: &PathBuf) -> Result { + let file = OpenOptions::new() + .create(true) + .truncate(false) + .read(true) + .write(true) + .open(wal_path) + .map_err(|err| ReadWalError::FileCouldntBeCreated(wal_path.clone(), err))?; + Ok(ReadWal { + reader: BufReader::new(file), + phantom_context: PhantomData, + }) + } +} + +impl ReadWal { + /// Reads the next entry from the WAL, or returns an error. + /// If there are 0 bytes left it returns `Ok(None)`. + pub(crate) fn read_next_entry(&mut self) -> Result, ReadWalError> { + // Remember the current position: If we encounter an unreadable entry we trim the file at + // this point so we can continue appending entries after it. + let position = self.reader.stream_position()?; + + // Deserialize the size of the entry, in bytes, as a u64. + let mut entry_size_buf = [0u8; size_of::()]; + if let Err(err) = self.reader.read_exact(&mut entry_size_buf) { + if err.kind() == io::ErrorKind::UnexpectedEof { + self.trim_file(position)?; + return Ok(None); + } + return Err(ReadWalError::OtherIOError(err)); + } + let entry_size = u64::from_le_bytes(entry_size_buf) as usize; + + // Read the serialized entry itself. + let mut entry_buf = vec![0; entry_size]; + if let Err(err) = self.reader.read_exact(&mut entry_buf) { + if err.kind() == io::ErrorKind::UnexpectedEof { + self.trim_file(position)?; + return Ok(None); + } + return Err(ReadWalError::OtherIOError(err)); + } + + // Deserialize and return the entry. + let entry = bincode::deserialize(&entry_buf).map_err(ReadWalError::CouldNotDeserialize)?; + Ok(Some(entry)) + } + + /// Trims the file to the given length and logs a warning if any bytes were removed. + /// + /// This should be called with the position where the last complete entry ended. Incomplete + /// entries can safely be removed because we only send messages after writing them and + /// flushing the buffer, so we won't remove any messages that we already sent. + fn trim_file(&mut self, position: u64) -> Result<(), ReadWalError> { + if self.reader.stream_position()? > position { + warn!("removing incomplete entry from WAL"); + self.reader.get_mut().set_len(position)?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::iter::from_fn; + + use casper_types::Timestamp; + use serde::{Deserialize, Serialize}; + use tempfile::tempdir; + + use super::*; + + #[derive(Serialize, Deserialize, Debug, PartialEq)] + enum TestWalEntry { + Variant1(u32), + Variant2(Timestamp), + } + + impl WalEntry for TestWalEntry {} + + #[test] + // Tests the functionality of the ReadWal and WriteWal by constructing one and manipulating it. + fn test_read_write_wal() { + // Create a bunch of test entries + let mut entries = vec![ + TestWalEntry::Variant1(0), + TestWalEntry::Variant1(1), + TestWalEntry::Variant1(2), + TestWalEntry::Variant2(Timestamp::zero()), + ]; + + // Create a temporary directory which will be removed upon dropping the dir variable, + // using it to store the WAL file. + let dir = tempdir().unwrap(); + let path = dir.path().join("wal"); + + let read_entries = || { + let mut read_wal: ReadWal = ReadWal::new(&path).unwrap(); + from_fn(move || read_wal.read_next_entry().unwrap()).collect::>() + }; + + assert_eq!(read_entries(), vec![]); + + // Record all of the test entries into the WAL file + let mut write_wal: WriteWal = WriteWal::new(&path).unwrap(); + + entries.iter().for_each(move |entry| { + write_wal.record_entry(entry).unwrap(); + }); + + // Assure that the entries were properly written + assert_eq!(entries, read_entries()); + + // Now, we go through and corrupt each entry and ensure that its actually removed by the + // ReadWal when we fail to read it. + loop { + // If there are no more entries, we're done + if entries.is_empty() { + break; + } + + // We create a File in order to drop the last byte from the file + let mut file = OpenOptions::new() + .append(true) + .create(true) + .open(&path) + .unwrap(); + + file.seek(io::SeekFrom::End(-1)).unwrap(); + let position = file.stream_position().unwrap(); + file.set_len(position).unwrap(); + + // We pop the entry off from our in-memory list of entries, then check if that equals + // the on-disk WAL + entries.pop().unwrap(); + + assert_eq!(entries, read_entries()); + } + + // Finally, we assure that there are no more entries at all in the WAL + assert_eq!(entries, read_entries()); + } +} diff --git a/node/src/components/consensus/utils/weight.rs b/node/src/components/consensus/utils/weight.rs new file mode 100644 index 0000000000..dfe836d2ba --- /dev/null +++ b/node/src/components/consensus/utils/weight.rs @@ -0,0 +1,83 @@ +use std::{ + iter::Sum, + ops::{Div, Mul}, +}; + +use datasize::DataSize; +use derive_more::{Add, AddAssign, From, Sub, SubAssign, Sum}; +use serde::{Deserialize, Serialize}; + +/// A vote weight. +#[derive( + Copy, + Clone, + DataSize, + Default, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Add, + Serialize, + Deserialize, + Sub, + AddAssign, + SubAssign, + Sum, + From, +)] +pub struct Weight(pub u64); + +impl Weight { + /// Checked addition. Returns `None` if overflow occurred. + pub fn checked_add(self, rhs: Weight) -> Option { + Some(Weight(self.0.checked_add(rhs.0)?)) + } + + /// Saturating addition. Returns `Weight(u64::MAX)` if overflow would occur. + #[allow(dead_code)] + pub fn saturating_add(self, rhs: Weight) -> Weight { + Weight(self.0.saturating_add(rhs.0)) + } + + /// Saturating subtraction. Returns `Weight(0)` if underflow would occur. + pub fn saturating_sub(self, rhs: Weight) -> Weight { + Weight(self.0.saturating_sub(rhs.0)) + } + + /// Returns `true` if this weight is zero. + pub fn is_zero(self) -> bool { + self.0 == 0 + } +} + +impl<'a> Sum<&'a Weight> for Weight { + fn sum>(iter: I) -> Self { + Weight(iter.map(|w| w.0).sum()) + } +} + +impl Mul for Weight { + type Output = Self; + + #[allow(clippy::arithmetic_side_effects)] // The caller needs to prevent overflows. + fn mul(self, rhs: u64) -> Self { + Weight(self.0 * rhs) + } +} + +impl Div for Weight { + type Output = Self; + + #[allow(clippy::arithmetic_side_effects)] // The caller needs to avoid dividing by zero. + fn div(self, rhs: u64) -> Self { + Weight(self.0 / rhs) + } +} + +impl From for u128 { + fn from(Weight(w): Weight) -> u128 { + u128::from(w) + } +} diff --git a/node/src/components/consensus/validator_change.rs b/node/src/components/consensus/validator_change.rs new file mode 100644 index 0000000000..4f8a362e53 --- /dev/null +++ b/node/src/components/consensus/validator_change.rs @@ -0,0 +1,329 @@ +use std::collections::HashSet; + +use casper_types::{PublicKey, ValidatorChange}; + +use super::era_supervisor::Era; + +pub(super) struct ValidatorChanges(pub(super) Vec<(PublicKey, ValidatorChange)>); + +impl ValidatorChanges { + pub(super) fn new(era0: &Era, era1: &Era) -> Self { + let era0_metadata = EraMetadata::from(era0); + let era1_metadata = EraMetadata::from(era1); + Self::new_from_metadata(era0_metadata, era1_metadata) + } + + fn new_from_metadata(era0_metadata: EraMetadata, era1_metadata: EraMetadata) -> Self { + // Validators in `era0` but not `era1` are labeled `Removed`. + let removed_iter = era0_metadata + .validators + .difference(&era1_metadata.validators) + .map(|&public_key| (public_key.clone(), ValidatorChange::Removed)); + + // Validators in `era1` but not `era0` are labeled `Added`. + let added_iter = era1_metadata + .validators + .difference(&era0_metadata.validators) + .map(|&public_key| (public_key.clone(), ValidatorChange::Added)); + + // Only those seen as faulty in `era1` are labeled `SeenAsFaulty`. + let faulty_iter = era1_metadata + .seen_as_faulty + .iter() + .map(|&public_key| (public_key.clone(), ValidatorChange::SeenAsFaulty)); + + // Faulty peers in `era1` but not `era0` which are also validators in `era1` are labeled + // `Banned`. + let banned_iter = era1_metadata + .faulty + .difference(era0_metadata.faulty) + .filter_map(|public_key| { + if era1_metadata.validators.contains(public_key) { + Some((public_key.clone(), ValidatorChange::Banned)) + } else { + None + } + }); + + // Peers which cannot propose in `era1` but can in `era0` and which are also validators in + // `era1` are labeled `CannotPropose`. + let cannot_propose_iter = era1_metadata + .cannot_propose + .difference(era0_metadata.cannot_propose) + .filter_map(|public_key| { + if era1_metadata.validators.contains(public_key) { + Some((public_key.clone(), ValidatorChange::CannotPropose)) + } else { + None + } + }); + + ValidatorChanges( + removed_iter + .chain(faulty_iter) + .chain(added_iter) + .chain(banned_iter) + .chain(cannot_propose_iter) + .collect(), + ) + } +} + +#[derive(Clone)] +struct EraMetadata<'a> { + validators: HashSet<&'a PublicKey>, + seen_as_faulty: Vec<&'a PublicKey>, + faulty: &'a HashSet, + cannot_propose: &'a HashSet, +} + +impl<'a> From<&'a Era> for EraMetadata<'a> { + fn from(era: &'a Era) -> Self { + let seen_as_faulty = era + .consensus + .validators_with_evidence() + .into_iter() + .collect(); + + let validators = era.validators().keys().collect(); + let faulty = &era.faulty; + let cannot_propose = &era.cannot_propose; + Self { + validators, + seen_as_faulty, + faulty, + cannot_propose, + } + } +} + +#[cfg(test)] +mod tests { + use std::iter; + + use casper_types::testing::TestRng; + + use super::*; + + fn preset_validators(rng: &mut TestRng) -> HashSet { + iter::repeat_with(|| PublicKey::random(rng)) + .take(5) + .collect() + } + + #[test] + fn should_report_added() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let mut era1_metadata = era0_metadata.clone(); + let added_validator = PublicKey::random(&mut rng); + let expected_change = vec![(added_validator.clone(), ValidatorChange::Added)]; + era1_metadata.validators.insert(&added_validator); + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert_eq!(expected_change, actual_change.0); + } + + #[test] + fn should_report_removed() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let era1_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let mut era0_metadata = era1_metadata.clone(); + let removed_validator = PublicKey::random(&mut rng); + let expected_change = vec![(removed_validator.clone(), ValidatorChange::Removed)]; + era0_metadata.validators.insert(&removed_validator); + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert_eq!(expected_change, actual_change.0) + } + + #[test] + fn should_report_seen_as_faulty_in_new_era() { + let mut rng = crate::new_rng(); + + let seen_as_faulty_in_old_era = PublicKey::random(&mut rng); + let era0_metadata = EraMetadata { + validators: Default::default(), + seen_as_faulty: vec![&seen_as_faulty_in_old_era], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + let seen_as_faulty_in_new_era = PublicKey::random(&mut rng); + let era1_metadata = EraMetadata { + validators: Default::default(), + seen_as_faulty: vec![&seen_as_faulty_in_new_era], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + let expected_change = vec![(seen_as_faulty_in_new_era, ValidatorChange::SeenAsFaulty)]; + assert_eq!(expected_change, actual_change.0) + } + + #[test] + fn should_report_banned() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let faulty = validators.iter().next().unwrap(); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let mut era1_metadata = era0_metadata.clone(); + let faulty_set = iter::once(faulty.clone()).collect(); + era1_metadata.faulty = &faulty_set; + + let expected_change = vec![(faulty.clone(), ValidatorChange::Banned)]; + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert_eq!(expected_change, actual_change.0) + } + + #[test] + fn should_not_report_banned_if_in_both_eras() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let faulty = validators.iter().next().unwrap(); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &iter::once(faulty.clone()).collect(), + cannot_propose: &Default::default(), + }; + let era1_metadata = era0_metadata.clone(); + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert!(actual_change.0.is_empty()); + } + + #[test] + fn should_not_report_banned_if_not_a_validator_in_new_era() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let faulty = PublicKey::random(&mut rng); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let mut era1_metadata = era0_metadata.clone(); + let faulty_set = iter::once(faulty).collect(); + era1_metadata.faulty = &faulty_set; + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert!(actual_change.0.is_empty()); + } + + #[test] + fn should_report_cannot_propose() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let cannot_propose = validators.iter().next().unwrap(); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let mut era1_metadata = era0_metadata.clone(); + let cannot_propose_set = iter::once(cannot_propose.clone()).collect(); + era1_metadata.cannot_propose = &cannot_propose_set; + + let expected_change = vec![(cannot_propose.clone(), ValidatorChange::CannotPropose)]; + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert_eq!(expected_change, actual_change.0) + } + + #[test] + fn should_not_report_cannot_propose_if_in_both_eras() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let cannot_propose = validators.iter().next().unwrap(); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &iter::once(cannot_propose.clone()).collect(), + }; + let era1_metadata = era0_metadata.clone(); + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert!(actual_change.0.is_empty()); + } + + #[test] + fn should_not_report_cannot_propose_if_not_a_validator_in_new_era() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let cannot_propose = PublicKey::random(&mut rng); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &Default::default(), + cannot_propose: &Default::default(), + }; + + let mut era1_metadata = era0_metadata.clone(); + let cannot_propose_set = iter::once(cannot_propose).collect(); + era1_metadata.cannot_propose = &cannot_propose_set; + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert!(actual_change.0.is_empty()); + } + + #[test] + fn should_report_no_status_change() { + let mut rng = crate::new_rng(); + let validators = preset_validators(&mut rng); + + let era0_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: validators.iter().collect(), + faulty: &validators, + cannot_propose: &validators, + }; + let era1_metadata = EraMetadata { + validators: validators.iter().collect(), + seen_as_faulty: vec![], + faulty: &validators, + cannot_propose: &validators, + }; + + let actual_change = ValidatorChanges::new_from_metadata(era0_metadata, era1_metadata); + assert!(actual_change.0.is_empty()); + } +} diff --git a/node/src/components/contract_runtime.rs b/node/src/components/contract_runtime.rs index a6df74c0c8..0c7e8eae1c 100644 --- a/node/src/components/contract_runtime.rs +++ b/node/src/components/contract_runtime.rs @@ -1,158 +1,110 @@ //! Contract Runtime component. + mod config; +mod error; +mod event; +mod exec_queue; +mod metrics; mod operations; +mod rewards; +#[cfg(test)] +mod tests; mod types; +mod utils; use std::{ - collections::{BTreeMap, HashMap, VecDeque}, + cmp::Ordering, + collections::BTreeMap, + convert::TryInto, fmt::{self, Debug, Formatter}, - sync::Arc, + path::Path, + sync::{Arc, Mutex}, time::Instant, }; -pub use config::Config; -use smallvec::SmallVec; - -pub use types::{EraValidatorsRequest, ValidatorWeightsByEraIdRequest}; - +use casper_executor_wasm::{ExecutorConfigBuilder, ExecutorKind, ExecutorV2}; use datasize::DataSize; -use derive_more::From; use lmdb::DatabaseFlags; -use prometheus::{self, Histogram, HistogramOpts, IntGauge, Registry}; -use thiserror::Error; -use tracing::{debug, error, trace}; - -use casper_execution_engine::{ - core::engine_state::{ - self, genesis::GenesisResult, step::EvictItem, DeployItem, EngineConfig, EngineState, - ExecuteRequest, GetEraValidatorsError, GetEraValidatorsRequest, RewardItem, SlashItem, - StepRequest, StepResult, +use prometheus::Registry; +use tracing::{debug, error, info, trace}; + +use casper_execution_engine::engine_state::{EngineConfigBuilder, ExecutionEngineV1}; +use casper_storage::{ + data_access_layer::{ + AddressableEntityRequest, AddressableEntityResult, BlockStore, DataAccessLayer, + EntryPointExistsRequest, ExecutionResultsChecksumRequest, FlushRequest, FlushResult, + GenesisRequest, GenesisResult, TrieRequest, }, - shared::newtypes::{Blake2bHash, CorrelationId}, - storage::{ - error::lmdb::Error as StorageLmdbError, global_state::lmdb::LmdbGlobalState, - protocol_data_store::lmdb::LmdbProtocolDataStore, - transaction_source::lmdb::LmdbEnvironment, trie_store::lmdb::LmdbTrieStore, + global_state::{ + state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider}, + transaction_source::lmdb::LmdbEnvironment, + trie_store::lmdb::LmdbTrieStore, }, + system::genesis::GenesisError, + tracking_copy::TrackingCopyError, }; use casper_types::{ - system::auction::ValidatorWeights, ExecutionResult, ProtocolVersion, PublicKey, U512, + account::AccountHash, ActivationPoint, Chainspec, ChainspecRawBytes, ChainspecRegistry, + EntityAddr, EraId, Key, PublicKey, }; use crate::{ - components::Component, - crypto::hash::Digest, + components::{fetcher::FetchResponse, Component, ComponentState}, + contract_runtime::{types::EraPrice, utils::handle_protocol_upgrade}, effect::{ - announcements::ContractRuntimeAnnouncement, - requests::{ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, StorageRequest}, + announcements::{ + ContractRuntimeAnnouncement, FatalAnnouncement, MetaBlockAnnouncement, + UnexecutedBlockAnnouncement, + }, + incoming::{TrieDemand, TrieRequest as TrieRequestMessage, TrieRequestIncoming}, + requests::{ContractRuntimeRequest, NetworkRequest, StorageRequest}, EffectBuilder, EffectExt, Effects, }, + fatal, + protocol::Message, types::{ - Block, BlockHash, BlockHeader, Chainspec, Deploy, DeployHash, DeployHeader, FinalizedBlock, - NodeId, + BlockPayload, ExecutableBlock, FinalizedBlock, InternalEraReport, MetaBlockState, + TrieOrChunk, TrieOrChunkId, }, - utils::WithDir, - NodeRng, StorageConfig, + NodeRng, }; +pub(crate) use config::Config; +pub(crate) use error::{BlockExecutionError, ConfigError, ContractRuntimeError, StateResultError}; +pub(crate) use event::Event; +use exec_queue::{ExecQueue, QueueItem}; +use metrics::Metrics; +#[cfg(test)] +pub(crate) use operations::compute_execution_results_checksum; +pub use operations::execute_finalized_block; +use operations::speculatively_execute; +pub(crate) use types::{ + BlockAndExecutionArtifacts, ExecutionArtifact, ExecutionPreState, SpeculativeExecutionResult, + StepOutcome, +}; +use utils::{exec_and_check_next, run_intensive_task}; -/// Contract runtime component event. -#[derive(Debug, From)] -pub enum Event { - /// A request made for the contract runtime component. - #[from] - Request(Box), - /// Indicates that block has already been finalized and executed in the past. - BlockAlreadyExists(Box), - /// Indicates that a block is not known yet, and needs to be executed. - BlockIsNew(Box), - - /// Results received by the contract runtime. - #[from] - Result(Box), -} - -/// Contract runtime component event. -#[derive(Debug, From)] -pub enum ContractRuntimeResult { - /// Received all requested deploys. - GetDeploysResult { - /// The block that needs the deploys for execution. - finalized_block: FinalizedBlock, - /// Contents of deploys. All deploys are expected to be present in the storage component. - deploys: VecDeque, - }, - /// Received a parent result. - GetParentResult { - /// The block that needs the deploys for execution. - finalized_block: FinalizedBlock, - /// Contents of deploys. All deploys are expected to be present in the storage component. - deploys: VecDeque, - /// Parent of the newly finalized block. - /// If it's the first block after Genesis then `parent` is `None`. - parent: Option<(BlockHash, Digest, Digest)>, - }, - /// The result of running the step on a switch block. - RunStepResult { - /// State of this request. - state: Box, - /// The result. - result: Result, - }, - /// Once a block is executed and committed, re-enter evented flow. - ExecutedAndCommitted(Box), -} - -/// Convenience trait for ContractRuntime's accepted event types. -pub trait ReactorEventT: - From - + From - + From> - + From - + From - + From - + Send -{ -} - -impl ReactorEventT for REv where - REv: From - + From - + From> - + From - + From - + From - + Send -{ -} - -#[derive(DataSize, Debug)] -struct ExecutedBlockSummary { - hash: BlockHash, - state_root_hash: Digest, - accumulated_seed: Digest, -} +const COMPONENT_NAME: &str = "contract_runtime"; -type BlockHeight = u64; +pub(crate) const APPROVALS_CHECKSUM_NAME: &str = "approvals_checksum"; +pub(crate) const EXECUTION_RESULTS_CHECKSUM_NAME: &str = "execution_results_checksum"; /// The contract runtime components. #[derive(DataSize)] -pub struct ContractRuntime { - initial_state: InitialState, - engine_state: Arc>, - metrics: Arc, - - protocol_version: ProtocolVersion, - - /// A mapping from proto block to executed block's ID and post-state hash, to allow - /// identification of a parent block's details once a finalized block has been executed. - /// - /// The key is a tuple of block's height (it's a linear chain so it's monotonically - /// increasing), and the `ExecutedBlockSummary` is derived from the executed block which is - /// created from that proto block. - parent_map: HashMap, - +pub(crate) struct ContractRuntime { + state: ComponentState, + execution_pre_state: Arc>, + #[data_size(skip)] + execution_engine_v1: Arc, + #[data_size(skip)] + execution_engine_v2: ExecutorV2, + metrics: Arc, /// Finalized blocks waiting for their pre-state hash to start executing. - exec_queue: HashMap)>, + exec_queue: ExecQueue, + /// The chainspec. + chainspec: Arc, + #[data_size(skip)] + data_access_layer: Arc>, + current_gas_price: EraPrice, } impl Debug for ContractRuntime { @@ -161,969 +113,760 @@ impl Debug for ContractRuntime { } } -/// Metrics for the contract runtime component. -#[derive(Debug)] -pub struct ContractRuntimeMetrics { - run_execute: Histogram, - apply_effect: Histogram, - commit_upgrade: Histogram, - run_query: Histogram, - commit_step: Histogram, - get_balance: Histogram, - get_validator_weights: Histogram, - get_era_validators: Histogram, - get_era_validator_weights_by_era_id: Histogram, - get_bids: Histogram, - missing_trie_keys: Histogram, - put_trie: Histogram, - read_trie: Histogram, - /// The current chain height. - pub chain_height: IntGauge, -} - -/// Value of upper bound of histogram. -const EXPONENTIAL_BUCKET_START: f64 = 0.01; - -/// Multiplier of previous upper bound for next bound. -const EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0; - -/// Bucket count, with the last bucket going to +Inf which will not be included in the results. -/// - start = 0.01, factor = 2.0, count = 10 -/// - start * factor ^ count = 0.01 * 2.0 ^ 10 = 10.24 -/// - Values above 10.24 (f64 seconds here) will not fall in a bucket that is kept. -const EXPONENTIAL_BUCKET_COUNT: usize = 10; - -const RUN_EXECUTE_NAME: &str = "contract_runtime_run_execute"; -const RUN_EXECUTE_HELP: &str = "tracking run of engine_state.run_execute in seconds."; -const APPLY_EFFECT_NAME: &str = "contract_runtime_apply_commit"; -const APPLY_EFFECT_HELP: &str = "tracking run of engine_state.apply_effect in seconds."; -const RUN_QUERY_NAME: &str = "contract_runtime_run_query"; -const RUN_QUERY_HELP: &str = "tracking run of engine_state.run_query in seconds."; -const COMMIT_STEP_NAME: &str = "contract_runtime_commit_step"; -const COMMIT_STEP_HELP: &str = "tracking run of engine_state.commit_step in seconds."; -const COMMIT_UPGRADE_NAME: &str = "contract_runtime_commit_upgrade"; -const COMMIT_UPGRADE_HELP: &str = "tracking run of engine_state.commit_upgrade in seconds"; -const GET_BALANCE_NAME: &str = "contract_runtime_get_balance"; -const GET_BALANCE_HELP: &str = "tracking run of engine_state.get_balance in seconds."; -const GET_VALIDATOR_WEIGHTS_NAME: &str = "contract_runtime_get_validator_weights"; -const GET_VALIDATOR_WEIGHTS_HELP: &str = - "tracking run of engine_state.get_validator_weights in seconds."; -const GET_ERA_VALIDATORS_NAME: &str = "contract_runtime_get_era_validators"; -const GET_ERA_VALIDATORS_HELP: &str = "tracking run of engine_state.get_era_validators in seconds."; -const GET_ERA_VALIDATORS_WEIGHT_BY_ERA_ID_NAME: &str = - "contract_runtime_get_era_validator_weights_by_era_id"; -const GET_ERA_VALIDATORS_WEIGHT_BY_ERA_ID_HELP: &str = - "tracking run of engine_state.get_era_validator_weights_by_era_id in seconds."; -const GET_BIDS_NAME: &str = "contract_runtime_get_bids"; -const GET_BIDS_HELP: &str = "tracking run of engine_state.get_bids in seconds."; -const READ_TRIE_NAME: &str = "contract_runtime_read_trie"; -const READ_TRIE_HELP: &str = "tracking run of engine_state.read_trie in seconds."; -const PUT_TRIE_NAME: &str = "contract_runtime_put_trie"; -const PUT_TRIE_HELP: &str = "tracking run of engine_state.put_trie in seconds."; -const MISSING_TRIE_KEYS_NAME: &str = "contract_runtime_missing_trie_keys"; -const MISSING_TRIE_KEYS_HELP: &str = "tracking run of engine_state.missing_trie_keys in seconds."; - -/// Create prometheus Histogram and register. -fn register_histogram_metric( - registry: &Registry, - metric_name: &str, - metric_help: &str, -) -> Result { - let common_buckets = prometheus::exponential_buckets( - EXPONENTIAL_BUCKET_START, - EXPONENTIAL_BUCKET_FACTOR, - EXPONENTIAL_BUCKET_COUNT, - )?; - let histogram_opts = HistogramOpts::new(metric_name, metric_help).buckets(common_buckets); - let histogram = Histogram::with_opts(histogram_opts)?; - registry.register(Box::new(histogram.clone()))?; - Ok(histogram) -} - -impl ContractRuntimeMetrics { - /// Constructor of metrics which creates and registers metrics objects for use. - fn new(registry: &Registry) -> Result { - let chain_height = IntGauge::new("chain_height", "current chain height")?; - registry.register(Box::new(chain_height.clone()))?; - Ok(ContractRuntimeMetrics { - chain_height, - run_execute: register_histogram_metric(registry, RUN_EXECUTE_NAME, RUN_EXECUTE_HELP)?, - apply_effect: register_histogram_metric( - registry, - APPLY_EFFECT_NAME, - APPLY_EFFECT_HELP, - )?, - run_query: register_histogram_metric(registry, RUN_QUERY_NAME, RUN_QUERY_HELP)?, - commit_step: register_histogram_metric(registry, COMMIT_STEP_NAME, COMMIT_STEP_HELP)?, - commit_upgrade: register_histogram_metric( - registry, - COMMIT_UPGRADE_NAME, - COMMIT_UPGRADE_HELP, - )?, - get_balance: register_histogram_metric(registry, GET_BALANCE_NAME, GET_BALANCE_HELP)?, - get_validator_weights: register_histogram_metric( - registry, - GET_VALIDATOR_WEIGHTS_NAME, - GET_VALIDATOR_WEIGHTS_HELP, - )?, - get_era_validators: register_histogram_metric( - registry, - GET_ERA_VALIDATORS_NAME, - GET_ERA_VALIDATORS_HELP, - )?, - get_era_validator_weights_by_era_id: register_histogram_metric( - registry, - GET_ERA_VALIDATORS_WEIGHT_BY_ERA_ID_NAME, - GET_ERA_VALIDATORS_WEIGHT_BY_ERA_ID_HELP, - )?, - get_bids: register_histogram_metric(registry, GET_BIDS_NAME, GET_BIDS_HELP)?, - read_trie: register_histogram_metric(registry, READ_TRIE_NAME, READ_TRIE_HELP)?, - put_trie: register_histogram_metric(registry, PUT_TRIE_NAME, PUT_TRIE_HELP)?, - missing_trie_keys: register_histogram_metric( - registry, - MISSING_TRIE_KEYS_NAME, - MISSING_TRIE_KEYS_HELP, - )?, - }) - } -} - -impl Component for ContractRuntime -where - REv: From + Send, -{ - type Event = Event; - type ConstructionError = ConfigError; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::Request(request) => { - match *request { - ContractRuntimeRequest::GetProtocolData { - protocol_version, - responder, - } => { - let result = self - .engine_state - .get_protocol_data(protocol_version) - .map(|inner| inner.map(Box::new)); - - responder.respond(result).ignore() - } - ContractRuntimeRequest::CommitGenesis { - chainspec, - responder, - } => { - let result = self.commit_genesis(chainspec); - responder.respond(result).ignore() - } - ContractRuntimeRequest::Upgrade { - upgrade_config, - responder, - } => { - trace!(?upgrade_config, "upgrade"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = - engine_state.commit_upgrade(correlation_id, *upgrade_config); - metrics - .commit_upgrade - .observe(start.elapsed().as_secs_f64()); - trace!(?result, "upgrade result"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::Query { - query_request, - responder, - } => { - trace!(?query_request, "query"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state.run_query(correlation_id, query_request); - metrics.run_query.observe(start.elapsed().as_secs_f64()); - trace!(?result, "query result"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::GetBalance { - balance_request, - responder, - } => { - trace!(?balance_request, "balance"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state.get_purse_balance( - correlation_id, - balance_request.state_hash(), - balance_request.purse_uref(), - ); - metrics.get_balance.observe(start.elapsed().as_secs_f64()); - trace!(?result, "balance result"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::IsBonded { - state_root_hash, - era_id, - protocol_version, - public_key: validator_key, - responder, - } => { - trace!(era=%era_id, public_key = %validator_key, "is validator bonded request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - let request = - GetEraValidatorsRequest::new(state_root_hash.into(), protocol_version); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let era_validators = - engine_state.get_era_validators(correlation_id, request); - metrics - .get_validator_weights - .observe(start.elapsed().as_secs_f64()); - trace!(?era_validators, "is validator bonded result"); - let is_bonded = era_validators.and_then(|validator_map| { - match validator_map.get(&era_id) { - None => Err(GetEraValidatorsError::EraValidatorsMissing), - Some(era_validators) => { - Ok(era_validators.contains_key(&validator_key)) - } - } - }); - responder.respond(is_bonded).await - } - .ignore() - } - ContractRuntimeRequest::GetEraValidators { request, responder } => { - trace!(?request, "get era validators request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - // Increment the counter to track the amount of times GetEraValidators was - // requested. - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let era_validators = - engine_state.get_era_validators(correlation_id, request.into()); - metrics - .get_era_validators - .observe(start.elapsed().as_secs_f64()); - trace!(?era_validators, "get era validators response"); - responder.respond(era_validators).await - } - .ignore() - } - ContractRuntimeRequest::GetValidatorWeightsByEraId { request, responder } => { - trace!(?request, "get validator weights by era id request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - // Increment the counter to track the amount of times - // GetEraValidatorsByEraId was requested. - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let era_id = request.era_id(); - let era_validators = - engine_state.get_era_validators(correlation_id, request.into()); - let result: Result, GetEraValidatorsError> = - match era_validators { - Ok(era_validators) => { - let validator_weights = - era_validators.get(&era_id).cloned(); - Ok(validator_weights) - } - Err(GetEraValidatorsError::EraValidatorsMissing) => Ok(None), - Err(error) => Err(error), - }; - metrics - .get_era_validator_weights_by_era_id - .observe(start.elapsed().as_secs_f64()); - trace!(?result, "get validator weights by era id response"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::Step { - step_request, - responder, - } => { - trace!(?step_request, "step request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state.commit_step(correlation_id, step_request); - metrics.commit_step.observe(start.elapsed().as_secs_f64()); - trace!(?result, "step response"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::ReadTrie { - trie_key, - responder, - } => { - trace!(?trie_key, "read_trie request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state.read_trie(correlation_id, trie_key); - metrics.read_trie.observe(start.elapsed().as_secs_f64()); - let result = match result { - Ok(result) => result, - Err(error) => { - error!(?error, "read_trie_request"); - None - } - }; - trace!(?result, "read_trie response"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::PutTrie { trie, responder } => { - trace!(?trie, "put_trie request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state - .put_trie_and_find_missing_descendant_trie_keys( - correlation_id, - &*trie, - ); - metrics.put_trie.observe(start.elapsed().as_secs_f64()); - trace!(?result, "put_trie response"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::ExecuteBlock(finalized_block) => { - debug!(?finalized_block, "execute block"); - effect_builder - .get_block_at_height_local(finalized_block.height()) - .event(move |maybe_block| { - maybe_block.map(Box::new).map_or_else( - || Event::BlockIsNew(Box::new(finalized_block)), - Event::BlockAlreadyExists, - ) - }) - } - ContractRuntimeRequest::GetBids { - get_bids_request, - responder, - } => { - trace!(?get_bids_request, "get bids request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state.get_bids(correlation_id, get_bids_request); - metrics.get_bids.observe(start.elapsed().as_secs_f64()); - trace!(?result, "get bids result"); - responder.respond(result).await - } - .ignore() - } - ContractRuntimeRequest::MissingTrieKeys { - trie_key, - responder, - } => { - trace!(?trie_key, "missing_trie_keys request"); - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - async move { - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = - engine_state.missing_trie_keys(correlation_id, vec![trie_key]); - metrics.read_trie.observe(start.elapsed().as_secs_f64()); - trace!(?result, "missing_trie_keys response"); - responder.respond(result).await - } - .ignore() - } - } - } - Event::BlockAlreadyExists(block) => effect_builder - .announce_block_already_executed(*block) - .ignore(), - // If we haven't executed the block before in the past (for example during - // joining), do it now. - Event::BlockIsNew(finalized_block) => { - self.get_deploys(effect_builder, *finalized_block) - } - Event::Result(contract_runtime_result) => match *contract_runtime_result { - ContractRuntimeResult::GetDeploysResult { - finalized_block, - deploys, - } => { - trace!(total = %deploys.len(), ?deploys, "fetched deploys"); - self.handle_get_deploys_result(effect_builder, finalized_block, deploys) - } - - ContractRuntimeResult::GetParentResult { - finalized_block, - deploys, - parent, - } => { - trace!(parent_found = %parent.is_some(), finalized_height = %finalized_block.height(), "fetched parent"); - let parent_summary = parent.map(|(hash, accumulated_seed, state_root_hash)| { - ExecutedBlockSummary { - hash, - state_root_hash, - accumulated_seed, - } - }); - self.handle_get_parent_result( - effect_builder, - finalized_block, - deploys, - parent_summary, - ) - } - - ContractRuntimeResult::RunStepResult { mut state, result } => { - trace!(?result, "run step result"); - match result { - Ok(StepResult::Success { - post_state_hash, - next_era_validators, - }) => { - state.state_root_hash = post_state_hash.into(); - self.finalize_block_execution( - effect_builder, - state, - Some(next_era_validators), - ) - } - _ => { - // When step fails, the auction process is broken and we should panic. - error!(?result, "run step failed - internal contract runtime error"); - panic!("unable to run step"); - } - } - } - ContractRuntimeResult::ExecutedAndCommitted(state) => { - self.execute_all_deploys_or_finalize_block_or_step(effect_builder, state) - } - }, - } - } -} - -/// Error returned from mis-configuring the contract runtime component. -#[derive(Debug, Error)] -pub enum ConfigError { - /// Error initializing the LMDB environment. - #[error("failed to initialize LMDB environment for contract runtime: {0}")] - Lmdb(#[from] StorageLmdbError), - /// Error initializing metrics. - #[error("failed to initialize metrics for contract runtime: {0}")] - Prometheus(#[from] prometheus::Error), -} - impl ContractRuntime { pub(crate) fn new( - initial_state_root_hash: Digest, - initial_block_header: Option<&BlockHeader>, - protocol_version: ProtocolVersion, - storage_config: WithDir, + storage_dir: &Path, contract_runtime_config: &Config, + chainspec: Arc, registry: &Registry, ) -> Result { - let initial_state = InitialState::new(initial_state_root_hash, initial_block_header); - let path = storage_config.with_dir(storage_config.value().path.clone()); - let environment = Arc::new(LmdbEnvironment::new( - path.as_path(), - contract_runtime_config.max_global_state_size(), - contract_runtime_config.max_readers(), - )?); + let execution_pre_state = Arc::new(Mutex::new(ExecutionPreState::default())); - let trie_store = Arc::new(LmdbTrieStore::new( - &environment, - None, - DatabaseFlags::empty(), - )?); - - let protocol_data_store = Arc::new(LmdbProtocolDataStore::new( - &environment, - None, - DatabaseFlags::empty(), - )?); + let current_gas_price = match chainspec.protocol_config.activation_point { + ActivationPoint::EraId(era_id) => { + EraPrice::new(era_id, chainspec.vacancy_config.min_gas_price) + } + ActivationPoint::Genesis(_) => { + EraPrice::new(EraId::new(0), chainspec.vacancy_config.min_gas_price) + } + }; + let enable_addressable_entity = chainspec.core_config.enable_addressable_entity; + let engine_config = EngineConfigBuilder::new() + .with_max_query_depth(contract_runtime_config.max_query_depth_or_default()) + .with_max_associated_keys(chainspec.core_config.max_associated_keys) + .with_max_runtime_call_stack_height(chainspec.core_config.max_runtime_call_stack_height) + .with_minimum_delegation_amount(chainspec.core_config.minimum_delegation_amount) + .with_maximum_delegation_amount(chainspec.core_config.maximum_delegation_amount) + .with_strict_argument_checking(chainspec.core_config.strict_argument_checking) + .with_vesting_schedule_period_millis( + chainspec.core_config.vesting_schedule_period.millis(), + ) + .with_max_delegators_per_validator(chainspec.core_config.max_delegators_per_validator) + .with_wasm_config(chainspec.wasm_config) + .with_system_config(chainspec.system_costs_config) + .with_administrative_accounts(chainspec.core_config.administrators.clone()) + .with_allow_auction_bids(chainspec.core_config.allow_auction_bids) + .with_allow_unrestricted_transfers(chainspec.core_config.allow_unrestricted_transfers) + .with_refund_handling(chainspec.core_config.refund_handling) + .with_fee_handling(chainspec.core_config.fee_handling) + .with_enable_entity(enable_addressable_entity) + .with_trap_on_ambiguous_entity_version( + chainspec.core_config.trap_on_ambiguous_entity_version, + ) + .with_protocol_version(chainspec.protocol_version()) + .with_storage_costs(chainspec.storage_costs) + .with_minimum_bid_amount(chainspec.core_config.minimum_bid_amount) + .build(); + + let data_access_layer = Arc::new( + Self::new_data_access_layer( + storage_dir, + contract_runtime_config, + enable_addressable_entity, + ) + .map_err(ConfigError::GlobalState)?, + ); - let global_state = LmdbGlobalState::empty(environment, trie_store, protocol_data_store)?; - let engine_config = EngineConfig::new(); + let execution_engine_v1 = Arc::new(ExecutionEngineV1::new(engine_config)); + + let executor_v2 = { + let executor_config = ExecutorConfigBuilder::default() + .with_memory_limit(chainspec.wasm_config.v2().max_memory()) + .with_executor_kind(ExecutorKind::Compiled) + .with_wasm_config(*chainspec.wasm_config.v2()) + .with_storage_costs(chainspec.storage_costs) + .with_message_limits(chainspec.wasm_config.messages_limits()) + .build() + .expect("Should build"); + ExecutorV2::new(executor_config, Arc::clone(&execution_engine_v1)) + }; - let engine_state = Arc::new(EngineState::new(global_state, engine_config)); + let metrics = Arc::new(Metrics::new(registry)?); - let metrics = Arc::new(ContractRuntimeMetrics::new(registry)?); Ok(ContractRuntime { - initial_state, - protocol_version, - parent_map: HashMap::new(), - exec_queue: HashMap::new(), - engine_state, + state: ComponentState::Initialized, + execution_pre_state, + execution_engine_v1, + execution_engine_v2: executor_v2, metrics, + exec_queue: Default::default(), + chainspec, + data_access_layer, + current_gas_price, }) } - /// Commits a genesis using a chainspec - fn commit_genesis( - &self, - chainspec: Arc, - ) -> Result { - let correlation_id = CorrelationId::new(); - let genesis_config_hash = chainspec.hash(); - let protocol_version = chainspec.protocol_config.version; - // Transforms a chainspec into a valid genesis config for execution engine. - let ee_config = chainspec.as_ref().into(); - self.engine_state.commit_genesis( - correlation_id, - genesis_config_hash.into(), - protocol_version, - &ee_config, - ) - } + pub(crate) fn set_initial_state(&mut self, sequential_block_state: ExecutionPreState) { + let next_block_height = sequential_block_state.next_block_height(); + let mut execution_pre_state = self.execution_pre_state.lock().unwrap(); + *execution_pre_state = sequential_block_state; - /// Retrieve trie keys for the integrity check. - pub fn trie_store_check(&self, trie_keys: Vec) -> Vec { - let correlation_id = CorrelationId::new(); - match self - .engine_state - .missing_trie_keys(correlation_id, trie_keys) - { - Ok(keys) => keys, - Err(error) => panic!("Error in retrieving keys for DB check: {:?}", error), - } + let new_len = self + .exec_queue + .remove_older_then(execution_pre_state.next_block_height()); + self.metrics.exec_queue_size.set(new_len); + debug!(next_block_height, "ContractRuntime: set initial state"); } - pub(crate) fn set_initial_state( - &mut self, - initial_state_root_hash: Digest, - initial_block_header: Option<&BlockHeader>, - ) { - self.initial_state = InitialState::new(initial_state_root_hash, initial_block_header); + fn new_data_access_layer( + storage_dir: &Path, + contract_runtime_config: &Config, + enable_addressable_entity: bool, + ) -> Result, casper_storage::global_state::error::Error> { + let data_access_layer = { + let environment = Arc::new(LmdbEnvironment::new( + storage_dir, + contract_runtime_config.max_global_state_size_or_default(), + contract_runtime_config.max_readers_or_default(), + contract_runtime_config.manual_sync_enabled_or_default(), + )?); + + let trie_store = Arc::new(LmdbTrieStore::new( + &environment, + None, + DatabaseFlags::empty(), + )?); + + let block_store = BlockStore::new(); + + let max_query_depth = contract_runtime_config.max_query_depth_or_default(); + let global_state = LmdbGlobalState::empty( + environment, + trie_store, + max_query_depth, + enable_addressable_entity, + )?; + + DataAccessLayer { + state: global_state, + block_store, + max_query_depth, + enable_addressable_entity, + } + }; + Ok(data_access_layer) } - /// Adds the "parent map" to the instance of `ContractRuntime`. - /// - /// When transitioning from `joiner` to `validator` states we need - /// to carry over the last finalized block so that the next blocks in the linear chain - /// have the state to build on. - pub(crate) fn set_parent_map_from_block(&mut self, lfb: Option) { - let parent_map = lfb - .into_iter() - .map(|block| { - ( - block.height(), - ExecutedBlockSummary { - hash: *block.hash(), - state_root_hash: *block.state_root_hash(), - accumulated_seed: block.header().accumulated_seed(), - }, - ) - }) - .collect(); - self.parent_map = parent_map; + /// How many blocks are backed up in the queue + pub(crate) fn queue_depth(&self) -> usize { + self.exec_queue.len() } - /// Gets the deploy(s) of the given finalized block from storage. - fn get_deploys( - &mut self, - effect_builder: EffectBuilder, - finalized_block: FinalizedBlock, - ) -> Effects { - let deploy_hashes = finalized_block - .deploys_and_transfers_iter() - .copied() - .collect::>(); - if deploy_hashes.is_empty() { - let result_event = move |_| { - Event::Result(Box::new(ContractRuntimeResult::GetDeploysResult { - finalized_block, - deploys: VecDeque::new(), - })) - }; - return effect_builder.immediately().event(result_event); - } - - let era_id = finalized_block.era_id(); - let height = finalized_block.height(); - - // Get all deploys in order they appear in the finalized block. - effect_builder - .get_deploys_from_storage(deploy_hashes) - .event(move |result| { - Event::Result(Box::new(ContractRuntimeResult::GetDeploysResult { - finalized_block, - deploys: result - .into_iter() - // Assumes all deploys are present - .map(|maybe_deploy| { - maybe_deploy.unwrap_or_else(|| { - panic!( - "deploy for block in era={} and height={} is expected to exist \ - in the storage", - era_id, height - ) - }) - }) - .collect(), - })) - }) - } + /// Commits a genesis request. + pub(crate) fn commit_genesis( + &self, + chainspec: &Chainspec, + chainspec_raw_bytes: &ChainspecRawBytes, + ) -> GenesisResult { + debug!("commit_genesis"); + let start = Instant::now(); + let protocol_version = chainspec.protocol_config.version; + let chainspec_hash = chainspec.hash(); + let genesis_config = chainspec.into(); + let account_bytes = match chainspec_raw_bytes.maybe_genesis_accounts_bytes() { + Some(bytes) => bytes, + None => { + error!("failed to provide genesis account bytes in commit genesis"); + return GenesisResult::Failure(GenesisError::MissingGenesisAccounts); + } + }; - /// Creates and announces the linear chain block. - fn finalize_block_execution( - &mut self, - effect_builder: EffectBuilder, - state: Box, - next_era_validator_weights: Option>, - ) -> Effects { - // The state hash of the last execute-commit cycle is used as the block's post state - // hash. - let next_height = state.finalized_block.height() + 1; - // Update the metric. - self.metrics - .chain_height - .set(state.finalized_block.height() as i64); - let block = self.create_block( - state.finalized_block, - state.state_root_hash, - next_era_validator_weights, + let chainspec_registry = ChainspecRegistry::new_with_genesis( + chainspec_raw_bytes.chainspec_bytes(), + account_bytes, ); - let mut effects = effect_builder - .announce_linear_chain_block(block, state.execution_results) - .ignore(); - // If the child is already finalized, start execution. - if let Some((finalized_block, deploys)) = self.exec_queue.remove(&next_height) { - effects.extend(self.handle_get_deploys_result( - effect_builder, - finalized_block, - deploys, - )); - } - effects - } + let genesis_request = GenesisRequest::new( + chainspec_hash, + protocol_version, + genesis_config, + chainspec_registry, + ); - fn execute_all_deploys_or_finalize_block_or_step( - &mut self, - effect_builder: EffectBuilder, - state: Box, - ) -> Effects { - if state.remaining_deploys.is_empty() { - self.finalize_block_or_step(effect_builder, state) - } else { - self.execute_all_deploys_in_block(state) + let data_access_layer = Arc::clone(&self.data_access_layer); + let result = data_access_layer.genesis(genesis_request); + self.metrics + .commit_genesis + .observe(start.elapsed().as_secs_f64()); + debug!(?result, "upgrade result"); + if result.is_success() { + let flush_req = FlushRequest::new(); + if let FlushResult::Failure(err) = data_access_layer.flush(flush_req) { + return GenesisResult::Failure(GenesisError::TrackingCopy( + TrackingCopyError::Storage(err), + )); + } } + result } - fn finalize_block_or_step( + /// Handles a contract runtime request. + fn handle_contract_runtime_request( &mut self, effect_builder: EffectBuilder, - state: Box, - ) -> Effects { - let era_end = match state.finalized_block.era_report() { - Some(era_end) => era_end, - // Not at a switch block, so we don't need to have next_era_validators when - // constructing the next block - None => return self.finalize_block_execution(effect_builder, state, None), - }; - let reward_items = era_end - .rewards - .iter() - .map(|(vid, &value)| RewardItem::new(vid.clone(), value)) - .collect(); - let slash_items = era_end - .equivocators - .iter() - .map(|vid| SlashItem::new(vid.clone())) - .collect(); - let evict_items = era_end - .inactive_validators - .iter() - .map(|vid| EvictItem::new(vid.clone())) - .collect(); - let era_end_timestamp_millis = state.finalized_block.timestamp().millis(); - let request = StepRequest { - pre_state_hash: state.state_root_hash.into(), - protocol_version: self.protocol_version, - reward_items, - slash_items, - evict_items, - run_auction: true, - next_era_id: state.finalized_block.era_id().successor(), - era_end_timestamp_millis, - }; - effect_builder.run_step(request).event(|result| { - Event::Result(Box::new(ContractRuntimeResult::RunStepResult { - state, - result, - })) - }) - } - - fn execute_all_deploys_in_block(&mut self, mut state: Box) -> Effects { - let engine_state = Arc::clone(&self.engine_state); - let metrics = Arc::clone(&self.metrics); - let protocol_version = self.protocol_version; - let block_time = state.finalized_block.timestamp().millis(); - let proposer = state.finalized_block.proposer(); - async move { - for deploy in state.remaining_deploys.drain(..) { - let deploy_hash = *deploy.id(); - let deploy_header = deploy.header().clone(); - let deploy_item = DeployItem::from(deploy); - - let execute_request = ExecuteRequest::new( - state.state_root_hash.into(), - block_time, - vec![deploy_item], - protocol_version, - proposer.clone(), - ); - - // TODO: this is currently working coincidentally because we are passing only one - // deploy_item per exec. The execution results coming back from the ee lacks the - // mapping between deploy_hash and execution result, and this outer logic is - // enriching it with the deploy hash. If we were passing multiple deploys per exec - // the relation between the deploy and the execution results would be lost. - let result = - operations::execute(engine_state.clone(), metrics.clone(), execute_request) + _rng: &mut NodeRng, + request: ContractRuntimeRequest, + ) -> Effects + where + REv: From + + From + + From + + From + + From + + From + + Send, + { + match request { + ContractRuntimeRequest::Query { + request: query_request, + responder, + } => { + trace!(?query_request, "query"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.query(query_request); + metrics.run_query.observe(start.elapsed().as_secs_f64()); + trace!(?result, "query result"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::QueryByPrefix { + request: query_request, + responder, + } => { + trace!(?query_request, "query by prefix"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + + let result = data_access_layer.prefixed_values(query_request); + metrics.run_query.observe(start.elapsed().as_secs_f64()); + trace!(?result, "query by prefix result"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetBalance { + request: balance_request, + responder, + } => { + trace!(?balance_request, "balance"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.balance(balance_request); + metrics.get_balance.observe(start.elapsed().as_secs_f64()); + trace!(?result, "balance result"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetEraValidators { + request: era_validators_request, + responder, + } => { + trace!(?era_validators_request, "get era validators request"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.era_validators(era_validators_request); + metrics + .get_era_validators + .observe(start.elapsed().as_secs_f64()); + trace!(?result, "era validators result"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetSeigniorageRecipients { request, responder } => { + trace!(?request, "get seigniorage recipients request"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.seigniorage_recipients(request); + metrics + .get_seigniorage_recipients + .observe(start.elapsed().as_secs_f64()); + trace!(?result, "seigniorage recipients result"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, + responder, + } => { + trace!(?state_root_hash, "get execution results checksum request"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let request = ExecutionResultsChecksumRequest::new(state_root_hash); + let result = data_access_layer.execution_result_checksum(request); + metrics + .execution_results_checksum + .observe(start.elapsed().as_secs_f64()); + trace!(?result, "execution result checksum"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetAddressableEntity { + state_root_hash, + entity_addr, + responder, + } => { + trace!(?state_root_hash, "get addressable entity"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let entity_key = match entity_addr { + EntityAddr::SmartContract(_) | EntityAddr::System(_) => Key::AddressableEntity(entity_addr), + EntityAddr::Account(account) => Key::Account(AccountHash::new(account)), + }; + let request = AddressableEntityRequest::new(state_root_hash, entity_key); + let result = data_access_layer.addressable_entity(request); + let result = match &result { + AddressableEntityResult::ValueNotFound(msg) => { + if entity_addr.is_contract() { + trace!(%msg, "can not read addressable entity by Key::AddressableEntity or Key::Account, will try by Key::Hash"); + let entity_key = Key::Hash(entity_addr.value()); + let request = AddressableEntityRequest::new(state_root_hash, entity_key); + data_access_layer.addressable_entity(request) + } + else { + result + } + }, + AddressableEntityResult::RootNotFound | + AddressableEntityResult::Success { .. } | + AddressableEntityResult::Failure(_) => result, + }; + + metrics + .addressable_entity + .observe(start.elapsed().as_secs_f64()); + trace!(?result, "get addressable entity"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetEntryPointExists { + state_root_hash, + contract_hash, + entry_point_name, + responder, + } => { + trace!(?state_root_hash, "get entry point"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let request = EntryPointExistsRequest::new( + state_root_hash, + entry_point_name, + contract_hash, + ); + let result = data_access_layer.entry_point_exists(request); + metrics.entry_points.observe(start.elapsed().as_secs_f64()); + trace!(?result, "get addressable entity"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::GetTaggedValues { + request: tagged_values_request, + responder, + } => { + trace!(?tagged_values_request, "tagged values request"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.tagged_values(tagged_values_request); + metrics + .get_all_values + .observe(start.elapsed().as_secs_f64()); + trace!(?result, "get all values result"); + responder.respond(result).await + } + .ignore() + } + // trie related events + ContractRuntimeRequest::GetTrie { + request: trie_request, + responder, + } => { + trace!(?trie_request, "trie request"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.trie(trie_request); + metrics.get_trie.observe(start.elapsed().as_secs_f64()); + trace!(?result, "trie response"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::PutTrie { + request: put_trie_request, + responder, + } => { + trace!(?put_trie_request, "put trie request"); + let metrics = Arc::clone(&self.metrics); + let data_access_layer = Arc::clone(&self.data_access_layer); + async move { + let start = Instant::now(); + let result = data_access_layer.put_trie(put_trie_request); + let flush_req = FlushRequest::new(); + // PERF: consider flushing periodically. + if let FlushResult::Failure(gse) = data_access_layer.flush(flush_req) { + fatal!(effect_builder, "error flushing data environment {:?}", gse).await; + } + metrics.put_trie.observe(start.elapsed().as_secs_f64()); + trace!(?result, "put trie response"); + responder.respond(result).await + } + .ignore() + } + ContractRuntimeRequest::UpdatePreState { new_pre_state } => { + let next_block_height = new_pre_state.next_block_height(); + self.set_initial_state(new_pre_state); + let current_price = self.current_gas_price.gas_price(); + async move { + let block_header = match effect_builder + .get_highest_complete_block_header_from_storage() + .await + { + Some(header) + if header.is_switch_block() + && (header.height() + 1 == next_block_height) => + { + header + } + Some(_) => { + return fatal!( + effect_builder, + "Latest complete block is not a switch block to update state" + ) + .await; + } + None => { + return fatal!( + effect_builder, + "No complete block header found to update post upgrade state" + ) + .await; + } + }; + + let payload = BlockPayload::new( + BTreeMap::new(), + vec![], + Default::default(), + false, + current_price, + ); + + let finalized_block = FinalizedBlock::new( + payload, + Some(InternalEraReport::default()), + block_header.timestamp(), + block_header.next_block_era_id(), + next_block_height, + PublicKey::System, + ); + + info!("Enqueuing block for execution post state refresh"); + + effect_builder + .enqueue_block_for_execution( + ExecutableBlock::from_finalized_block_and_transactions( + finalized_block, + vec![], + ), + MetaBlockState::new_not_to_be_gossiped(), + ) .await; - - trace!(%deploy_hash, ?result, "deploy execution result"); - // As for now a given state is expected to exist. - let execution_results = result.unwrap(); - match operations::commit_execution_effects( - engine_state.clone(), - metrics.clone(), - state.state_root_hash, - deploy_hash, - execution_results, - ) - .await - { - Ok((state_hash, execution_result)) => { - state - .execution_results - .insert(deploy_hash, (deploy_header, execution_result)); - state.state_root_hash = state_hash; + } + .ignore() + } + ContractRuntimeRequest::DoProtocolUpgrade { + protocol_upgrade_config, + next_block_height, + parent_hash, + parent_seed, + } => { + let mut effects = Effects::new(); + let data_access_layer = Arc::clone(&self.data_access_layer); + let metrics = Arc::clone(&self.metrics); + effects.extend( + handle_protocol_upgrade( + effect_builder, + data_access_layer, + metrics, + protocol_upgrade_config, + next_block_height, + parent_hash, + parent_seed, + ) + .ignore(), + ); + effects + } + ContractRuntimeRequest::EnqueueBlockForExecution { + executable_block, + key_block_height_for_activation_point, + meta_block_state, + } => { + let mut effects = Effects::new(); + let mut exec_queue = self.exec_queue.clone(); + let finalized_block_height = executable_block.height; + let era_id = executable_block.era_id; + let current_pre_state = self.execution_pre_state.lock().unwrap(); + let next_block_height = current_pre_state.next_block_height(); + match finalized_block_height.cmp(&next_block_height) { + // An old block: it won't be enqueued: + Ordering::Less => { + debug!( + %era_id, + "ContractRuntime: finalized block({}) precedes expected next block({})", + finalized_block_height, + next_block_height, + ); + effects.extend( + effect_builder + .announce_not_enqueuing_old_executable_block(finalized_block_height) + .ignore(), + ); } - // When commit fails we panic as we'll not be able to execute the next - // block. - Err(_err) => panic!("unable to commit"), + // This is a future block, we store it into exec_queue, to be executed later: + Ordering::Greater => { + debug!( + %era_id, + "ContractRuntime: enqueuing({}) waiting for({})", + finalized_block_height, next_block_height + ); + info!( + "ContractRuntime: enqueuing finalized block({}) with {} transactions \ + for execution", + finalized_block_height, + executable_block.transactions.len() + ); + exec_queue.insert(QueueItem { + executable_block, + meta_block_state, + }); + } + // This is the next block to be executed, we do it right away: + Ordering::Equal => { + info!( + "ContractRuntime: execute finalized block({}) with {} transactions", + finalized_block_height, + executable_block.transactions.len() + ); + let data_access_layer = Arc::clone(&self.data_access_layer); + let execution_engine_v1 = Arc::clone(&self.execution_engine_v1); + let execution_engine_v2 = self.execution_engine_v2.clone(); + let chainspec = Arc::clone(&self.chainspec); + let metrics = Arc::clone(&self.metrics); + let shared_pre_state = Arc::clone(&self.execution_pre_state); + // the way this works is inobvious. if the current executable block + // executes and its child is enqueued the underlying logic will + // update the pre-state to refer to the child, pop the child from the queue, + // and send a new event of this kind with the child. it will then get into + // this match arm and get executed without being re-enqueued. + effects.extend( + exec_and_check_next( + data_access_layer, + execution_engine_v1, + execution_engine_v2, + chainspec, + metrics, + exec_queue, + shared_pre_state, + current_pre_state.clone(), + effect_builder, + executable_block, + key_block_height_for_activation_point, + meta_block_state, + ) + .ignore(), + ) + } + } + self.metrics + .exec_queue_size + .set(self.exec_queue.len().try_into().unwrap_or(i64::MIN)); + effects + } + ContractRuntimeRequest::SpeculativelyExecute { + block_header, + transaction, + responder, + } => { + let chainspec = Arc::clone(&self.chainspec); + let data_access_layer = Arc::clone(&self.data_access_layer); + let execution_engine_v1 = Arc::clone(&self.execution_engine_v1); + async move { + let result = run_intensive_task(move || { + speculatively_execute( + data_access_layer.as_ref(), + chainspec.as_ref(), + execution_engine_v1.as_ref(), + *block_header, + *transaction, + ) + }) + .await; + responder.respond(result).await } + .ignore() + } + ContractRuntimeRequest::GetEraGasPrice { era_id, responder } => responder + .respond(self.current_gas_price.maybe_gas_price_for_era_id(era_id)) + .ignore(), + ContractRuntimeRequest::UpdateRuntimePrice(era_id, new_gas_price) => { + self.current_gas_price = EraPrice::new(era_id, new_gas_price); + Effects::new() } - state } - .event(|state| Event::Result(Box::new(ContractRuntimeResult::ExecutedAndCommitted(state)))) } - fn handle_get_deploys_result( - &mut self, + /// Handles an incoming request to get a trie. + fn handle_trie_request( + &self, effect_builder: EffectBuilder, - finalized_block: FinalizedBlock, - deploys: VecDeque, - ) -> Effects { - if let Some(state_root_hash) = self.pre_state_hash(&finalized_block) { - let state = Box::new(RequestState { - finalized_block, - remaining_deploys: deploys, - execution_results: HashMap::new(), - state_root_hash, - }); - self.execute_all_deploys_or_finalize_block_or_step(effect_builder, state) - } else { - // Didn't find parent in the `parent_map` cache. - // Read it from the storage. - let height = finalized_block.height(); - effect_builder - .get_block_at_height_local(height - 1) - .event(|parent| { - Event::Result(Box::new(ContractRuntimeResult::GetParentResult { - finalized_block, - deploys, - parent: parent.map(|b| { - ( - *b.hash(), - b.header().accumulated_seed(), - *b.state_root_hash(), - ) - }), - })) - }) + TrieRequestIncoming { sender, message }: TrieRequestIncoming, + ) -> Effects + where + REv: From> + Send, + { + let TrieRequestMessage(ref serialized_id) = *message; + let fetch_response = match self.fetch_trie_local(serialized_id) { + Ok(fetch_response) => fetch_response, + Err(error) => { + debug!("failed to get trie: {}", error); + return Effects::new(); + } + }; + + match Message::new_get_response(&fetch_response) { + Ok(message) => effect_builder.send_message(sender, message).ignore(), + Err(error) => { + error!("failed to create get-response: {}", error); + Effects::new() + } } } - fn handle_get_parent_result( - &mut self, - effect_builder: EffectBuilder, - finalized_block: FinalizedBlock, - deploys: VecDeque, - parent: Option, + /// Handles an incoming demand for a trie. + fn handle_trie_demand( + &self, + TrieDemand { + request_msg, + auto_closing_responder, + .. + }: TrieDemand, ) -> Effects { - match parent { - None => { - let height = finalized_block.height(); - debug!("no pre-state hash for height {}", height); - // re-check the parent map - the parent might have been executed in the meantime! - if let Some(state_root_hash) = self.pre_state_hash(&finalized_block) { - let state = Box::new(RequestState { - finalized_block, - remaining_deploys: deploys, - execution_results: HashMap::new(), - state_root_hash, - }); - self.execute_all_deploys_or_finalize_block_or_step(effect_builder, state) - } else { - // The parent block has not been executed yet; delay handling. - self.exec_queue.insert(height, (finalized_block, deploys)); - Effects::new() - } + let TrieRequestMessage(ref serialized_id) = *request_msg; + let fetch_response = match self.fetch_trie_local(serialized_id) { + Ok(fetch_response) => fetch_response, + Err(error) => { + // Something is wrong in our trie store, but be courteous and still send a reply. + debug!("failed to get trie: {}", error); + return auto_closing_responder.respond_none().ignore(); } - Some(parent_summary) => { - // Parent found in the storage. - // Insert into `parent_map` cache. - // It will be removed in `create_block` method. - self.parent_map - .insert(finalized_block.height().saturating_sub(1), parent_summary); - self.handle_get_deploys_result(effect_builder, finalized_block, deploys) + }; + + match Message::new_get_response(&fetch_response) { + Ok(message) => auto_closing_responder.respond(message).ignore(), + Err(error) => { + // This should never happen, but if it does, we let the peer know we cannot help. + error!("failed to create get-response: {}", error); + auto_closing_responder.respond_none().ignore() } } } - fn create_block( - &mut self, - finalized_block: FinalizedBlock, - state_root_hash: Digest, - next_era_validator_weights: Option>, - ) -> Block { - let (parent_summary_hash, parent_seed) = if self.is_initial_block_child(&finalized_block) { - // The first block after the initial one: get initial block summary if we have one, or - // if not, this should be the genesis child and so we take the default values. - ( - self.initial_state - .block_summary - .as_ref() - .map(|summary| summary.hash) - .unwrap_or_else(|| BlockHash::new(Digest::default())), - self.initial_state - .block_summary - .as_ref() - .map(|summary| summary.accumulated_seed) - .unwrap_or_default(), - ) - } else { - let parent_block_height = finalized_block.height() - 1; - let summary = self - .parent_map - .remove(&parent_block_height) - .unwrap_or_else(|| panic!("failed to take {:?}", parent_block_height)); - (summary.hash, summary.accumulated_seed) - }; - let block_height = finalized_block.height(); - let block = Block::new( - parent_summary_hash, - parent_seed, - state_root_hash, - finalized_block, - next_era_validator_weights, - self.protocol_version, - ); - let summary = ExecutedBlockSummary { - hash: *block.hash(), - state_root_hash, - accumulated_seed: block.header().accumulated_seed(), + /// Reads the trie (or chunk of a trie) under the given key and index. + fn fetch_trie_local( + &self, + serialized_id: &[u8], + ) -> Result, ContractRuntimeError> { + trace!(?serialized_id, "get_trie"); + let trie_or_chunk_id: TrieOrChunkId = bincode::deserialize(serialized_id)?; + let data_access_layer = Arc::clone(&self.data_access_layer); + let maybe_trie = { + let start = Instant::now(); + let TrieOrChunkId(chunk_index, trie_key) = trie_or_chunk_id; + let req = TrieRequest::new(trie_key, Some(chunk_index)); + let maybe_raw = data_access_layer + .trie(req) + .into_raw() + .map_err(ContractRuntimeError::FailedToRetrieveTrieById)?; + let ret = match maybe_raw { + Some(raw) => Some(TrieOrChunk::new(raw.into(), chunk_index)?), + None => None, + }; + self.metrics.get_trie.observe(start.elapsed().as_secs_f64()); + ret }; - let _ = self.parent_map.insert(block_height, summary); - block + Ok(FetchResponse::from_opt(trie_or_chunk_id, maybe_trie)) } - fn pre_state_hash(&mut self, finalized_block: &FinalizedBlock) -> Option { - if self.is_initial_block_child(finalized_block) { - Some(self.initial_state.state_root_hash) - } else { - // Try to get the parent's post-state-hash from the `parent_map`. - // We're subtracting 1 from the height as we want to get _parent's_ post-state hash. - let parent_block_height = finalized_block.height() - 1; - self.parent_map - .get(&parent_block_height) - .map(|summary| summary.state_root_hash) - } + /// Returns data_access_layer, for testing only. + #[cfg(test)] + pub(crate) fn data_access_layer(&self) -> Arc> { + Arc::clone(&self.data_access_layer) } - /// Returns true if the `finalized_block` is an immediate child of the initial block, ie. - /// either genesis or the highest known block at the time of initializing the component. - fn is_initial_block_child(&self, finalized_block: &FinalizedBlock) -> bool { - finalized_block.height() == self.initial_state.child_height + #[cfg(test)] + pub(crate) fn current_era_price(&self) -> EraPrice { + self.current_gas_price } } -/// Holds the state of an ongoing execute-commit cycle spawned from a given `Event::Request`. -#[derive(Debug)] -pub struct RequestState { - /// Finalized block for this request. - pub finalized_block: FinalizedBlock, - /// Deploys which have still to be executed. - pub remaining_deploys: VecDeque, - /// A collection of results of executing the deploys. - pub execution_results: HashMap, - /// Current state root hash of global storage. Is initialized with the parent block's - /// state hash, and is updated after each commit. - pub state_root_hash: Digest, -} +impl Component for ContractRuntime +where + REv: From + + From + + From> + + From + + From + + From + + From + + Send, +{ + type Event = Event; -#[derive(DataSize, Debug, Default)] -struct InitialState { - /// Height of the child of the highest known block at the time of initializing the component. - /// Required for the block executor to know when to stop looking for parent blocks when getting - /// the pre-state hash for execution. With upgrades, we could get a wrong hash if we went too - /// far. - child_height: u64, - /// Summary of the highest known block. - block_summary: Option, - /// Initial state root hash. - state_root_hash: Digest, -} + fn name(&self) -> &str { + COMPONENT_NAME + } -impl InitialState { - fn new(state_root_hash: Digest, block_header: Option<&BlockHeader>) -> Self { - let block_summary = block_header.map(|hdr| ExecutedBlockSummary { - hash: hdr.hash(), - state_root_hash, - accumulated_seed: hdr.accumulated_seed(), - }); - Self { - child_height: block_header.map_or(0, |hdr| hdr.height() + 1), - block_summary, - state_root_hash, + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Event, + ) -> Effects { + match event { + Event::ContractRuntimeRequest(request) => { + self.handle_contract_runtime_request(effect_builder, rng, request) + } + Event::TrieRequestIncoming(request) => { + self.handle_trie_request(effect_builder, request) + } + Event::TrieDemand(demand) => self.handle_trie_demand(demand), } } } diff --git a/node/src/components/contract_runtime/config.rs b/node/src/components/contract_runtime/config.rs index 877fc7e7fb..6381685f24 100644 --- a/node/src/components/contract_runtime/config.rs +++ b/node/src/components/contract_runtime/config.rs @@ -1,10 +1,13 @@ use datasize::DataSize; use serde::{Deserialize, Serialize}; +use tracing::warn; -use casper_execution_engine::shared::utils; +use casper_types::OS_PAGE_SIZE; const DEFAULT_MAX_GLOBAL_STATE_SIZE: usize = 805_306_368_000; // 750 GiB const DEFAULT_MAX_READERS: u32 = 512; +const DEFAULT_MAX_QUERY_DEPTH: u64 = 5; +const DEFAULT_MANUAL_SYNC_ENABLED: bool = true; /// Contract runtime configuration. #[derive(Clone, Copy, DataSize, Debug, Deserialize, Serialize)] @@ -16,25 +19,51 @@ pub struct Config { /// Defaults to 805,306,368,000 == 750 GiB. /// /// The size should be a multiple of the OS page size. - max_global_state_size: Option, + pub max_global_state_size: Option, /// The maximum number of readers to use for the global state store. /// /// Defaults to 512. - max_readers: Option, + pub max_readers: Option, + /// The limit of depth of recursive global state queries. + /// + /// Defaults to 5. + pub max_query_depth: Option, + /// Enable synchronizing to disk only after each block is written. + /// + /// Defaults to `true`. + pub enable_manual_sync: Option, } impl Config { - pub(crate) fn max_global_state_size(&self) -> usize { + /// Max global state size in bytes. + pub fn max_global_state_size_or_default(&self) -> usize { let value = self .max_global_state_size .unwrap_or(DEFAULT_MAX_GLOBAL_STATE_SIZE); - utils::check_multiple_of_page_size(value); + if value % *OS_PAGE_SIZE != 0 { + warn!( + "maximum global state database size {} is not multiple of system page size {}", + value, *OS_PAGE_SIZE + ); + } value } - pub(crate) fn max_readers(&self) -> u32 { + /// Max lmdb readers. + pub fn max_readers_or_default(&self) -> u32 { self.max_readers.unwrap_or(DEFAULT_MAX_READERS) } + + /// Max query depth. + pub fn max_query_depth_or_default(&self) -> u64 { + self.max_query_depth.unwrap_or(DEFAULT_MAX_QUERY_DEPTH) + } + + /// Is manual sync enabled. + pub fn manual_sync_enabled_or_default(&self) -> bool { + self.enable_manual_sync + .unwrap_or(DEFAULT_MANUAL_SYNC_ENABLED) + } } impl Default for Config { @@ -42,6 +71,8 @@ impl Default for Config { Config { max_global_state_size: Some(DEFAULT_MAX_GLOBAL_STATE_SIZE), max_readers: Some(DEFAULT_MAX_READERS), + max_query_depth: Some(DEFAULT_MAX_QUERY_DEPTH), + enable_manual_sync: Some(DEFAULT_MANUAL_SYNC_ENABLED), } } } diff --git a/node/src/components/contract_runtime/error.rs b/node/src/components/contract_runtime/error.rs new file mode 100644 index 0000000000..09af7a8736 --- /dev/null +++ b/node/src/components/contract_runtime/error.rs @@ -0,0 +1,184 @@ +//! Errors that the contract runtime component may raise. +use derive_more::From; +use std::collections::BTreeMap; + +use serde::Serialize; +use thiserror::Error; + +use casper_execution_engine::engine_state::Error as EngineStateError; +use casper_storage::{ + data_access_layer::{ + forced_undelegate::ForcedUndelegateError, BlockRewardsError, FeeError, StepError, + }, + global_state::error::Error as GlobalStateError, + tracking_copy::TrackingCopyError, +}; +use casper_types::{bytesrepr, CLValueError, Digest, EraId, PublicKey, U512}; + +use crate::{ + components::contract_runtime::ExecutionPreState, + types::{ChunkingError, ExecutableBlock, InternalEraReport}, +}; + +/// Common state result errors. +#[derive(Debug, Error)] +pub(crate) enum StateResultError { + /// Invalid state root hash. + #[error("invalid state root hash")] + RootNotFound, + /// Value not found. + #[error("{0}")] + ValueNotFound(String), + /// Failure result. + #[error("{0}")] + Failure(TrackingCopyError), +} + +/// An error returned from mis-configuring the contract runtime component. +#[derive(Debug, Error)] +pub(crate) enum ConfigError { + /// Error initializing the LMDB environment. + #[error("failed to initialize LMDB environment for contract runtime: {0}")] + GlobalState(#[from] GlobalStateError), + /// Error initializing metrics. + #[error("failed to initialize metrics for contract runtime: {0}")] + Prometheus(#[from] prometheus::Error), +} + +/// An enum that represents all possible error conditions of a `contract_runtime` component. +#[derive(Debug, Error, From)] +pub(crate) enum ContractRuntimeError { + /// The provided serialized id cannot be deserialized properly. + #[error("error deserializing id: {0}")] + InvalidSerializedId(#[source] bincode::Error), + // It was not possible to get trie with the specified id + #[error("error retrieving trie by id: {0}")] + FailedToRetrieveTrieById(#[source] GlobalStateError), + /// Chunking error. + #[error("failed to chunk the data {0}")] + ChunkingError(#[source] ChunkingError), +} + +/// An error during block execution. +#[derive(Debug, Error, Serialize)] +pub enum BlockExecutionError { + /// Currently the contract runtime can only execute one commit at a time, so we cannot handle + /// more than one execution result. + #[error("more than one execution result")] + MoreThanOneExecutionResult, + /// Both the block to be executed and the execution pre-state specify the height of the next + /// block. These must agree and this error will be thrown if they do not. + #[error( + "block's height does not agree with execution pre-state. \ + block: {executable_block:?}, \ + execution pre-state: {execution_pre_state:?}" + )] + WrongBlockHeight { + /// The finalized block the system attempted to execute. + executable_block: Box, + /// The state of the block chain prior to block execution that was to be used. + execution_pre_state: Box, + }, + /// A core error thrown by the execution engine. + #[error(transparent)] + EngineState( + #[from] + #[serde(skip_serializing)] + EngineStateError, + ), + /// An error that occurred when trying to run the auction contract. + #[error(transparent)] + Step( + #[from] + #[serde(skip_serializing)] + StepError, + ), + #[error(transparent)] + DistributeFees( + #[from] + #[serde(skip_serializing)] + FeeError, + ), + #[error(transparent)] + DistributeBlockRewards( + #[from] + #[serde(skip_serializing)] + BlockRewardsError, + ), + #[error(transparent)] + ForcedUndelegate( + #[from] + #[serde(skip_serializing)] + ForcedUndelegateError, + ), + /// Failed to compute the approvals checksum. + #[error("failed to compute approvals checksum: {0}")] + FailedToComputeApprovalsChecksum(bytesrepr::Error), + /// Failed to compute the execution results checksum. + #[error("failed to compute execution results checksum: {0}")] + FailedToComputeExecutionResultsChecksum(bytesrepr::Error), + /// Failed to convert the checksum registry to a `CLValue`. + #[error("failed to convert the checksum registry to a clvalue: {0}")] + ChecksumRegistryToCLValue(CLValueError), + /// `EraEnd`s need both an `EraReport` present and a map of the next era validator weights. + /// If one of them is not present while trying to construct an `EraEnd`, this error is + /// produced. + #[error( + "cannot create era end unless we have both an era report and next era validators. \ + era report: {maybe_era_report:?}, \ + next era validator weights: {maybe_next_era_validator_weights:?}" + )] + FailedToCreateEraEnd { + /// An optional `EraReport` we tried to use to construct an `EraEnd`. + maybe_era_report: Option, + /// An optional map of the next era validator weights used to construct an `EraEnd`. + maybe_next_era_validator_weights: Option<(BTreeMap, u8)>, + }, + /// An error that occurred while interacting with lmdb. + #[error(transparent)] + Lmdb( + #[from] + #[serde(skip_serializing)] + GlobalStateError, + ), + /// An error that occurred while getting era validators. + #[error(transparent)] + GetEraValidators( + #[from] + #[serde(skip_serializing)] + TrackingCopyError, + ), + /// A root state hash was not found. + #[error("Root state hash not found in global state.")] + RootNotFound(Digest), + /// Missing checksum registry. + #[error("Missing checksum registry")] + MissingChecksumRegistry, + #[error("Failed to get new era gas price when executing switch block")] + FailedToGetNewEraGasPrice { era_id: EraId }, + // Payment error. + #[error("Error while trying to set up payment for transaction: {0}")] + PaymentError(String), + // Error attempting to set block global data. + #[error("Error while attempting to store block global data: {0}")] + BlockGlobal(String), + #[error("No switch block header available for era: {0}")] + /// No switch block available + NoSwitchBlockHash(u64), + #[error("Unsupported execution kind: {0}")] + /// Unsupported execution kind + UnsupportedTransactionKind(u8), + #[error("Error while converting transaction to internal representation: {0}")] + TransactionConversion(String), + /// Invalid gas limit amount. + #[error("Invalid gas limit amount: {0}")] + InvalidGasLimit(U512), + /// Invalid transaction variant. + #[error("Invalid transaction variant")] + InvalidTransactionVariant, + /// Invalid transaction arguments. + #[error("Invalid transaction arguments")] + InvalidTransactionArgs, + #[error("Data Access Layer conflicts with chainspec setting: {0}")] + InvalidAESetting(bool), +} diff --git a/node/src/components/contract_runtime/event.rs b/node/src/components/contract_runtime/event.rs new file mode 100644 index 0000000000..edef0a937c --- /dev/null +++ b/node/src/components/contract_runtime/event.rs @@ -0,0 +1,36 @@ +use std::{ + fmt, + fmt::{Display, Formatter}, +}; + +use derive_more::From; +use serde::Serialize; + +use crate::effect::{ + incoming::{TrieDemand, TrieRequestIncoming}, + requests::ContractRuntimeRequest, +}; + +#[derive(Debug, From, Serialize)] +pub(crate) enum Event { + #[from] + ContractRuntimeRequest(ContractRuntimeRequest), + + #[from] + TrieRequestIncoming(TrieRequestIncoming), + + #[from] + TrieDemand(TrieDemand), +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::ContractRuntimeRequest(req) => { + write!(f, "contract runtime request: {}", req) + } + Event::TrieRequestIncoming(req) => write!(f, "trie request incoming: {}", req), + Event::TrieDemand(demand) => write!(f, "trie demand: {}", demand), + } + } +} diff --git a/node/src/components/contract_runtime/exec_queue.rs b/node/src/components/contract_runtime/exec_queue.rs new file mode 100644 index 0000000000..f5fb118638 --- /dev/null +++ b/node/src/components/contract_runtime/exec_queue.rs @@ -0,0 +1,56 @@ +use datasize::DataSize; +use std::{ + collections::BTreeMap, + sync::{Arc, Mutex}, +}; + +use crate::types::{ExecutableBlock, MetaBlockState}; + +#[derive(Default, Clone, DataSize)] +pub(super) struct ExecQueue(Arc>>); + +impl ExecQueue { + /// How many blocks are backed up in the queue + pub fn len(&self) -> usize { + self.0 + .lock() + .expect( + "components::contract_runtime: couldn't get execution queue size; mutex poisoned", + ) + .len() + } + + pub fn remove(&mut self, height: u64) -> Option { + self.0 + .lock() + .expect("components::contract_runtime: couldn't remove from the queue; mutex poisoned") + .remove(&height) + } + + pub fn insert(&mut self, item: QueueItem) { + let height = item.executable_block.height; + self.0 + .lock() + .expect("components::contract_runtime: couldn't insert into the queue; mutex poisoned") + .insert(height, item); + } + + /// Remove every entry older than the given height, and return the new len. + pub fn remove_older_then(&mut self, height: u64) -> i64 { + let mut locked_queue = self.0 + .lock() + .expect( + "components::contract_runtime: couldn't initialize contract runtime block execution queue; mutex poisoned" + ); + + *locked_queue = locked_queue.split_off(&height); + + TryInto::try_into(locked_queue.len()).unwrap_or(i64::MIN) + } +} + +// Should it be an enum? +pub(super) struct QueueItem { + pub executable_block: ExecutableBlock, + pub meta_block_state: MetaBlockState, +} diff --git a/node/src/components/contract_runtime/metrics.rs b/node/src/components/contract_runtime/metrics.rs new file mode 100644 index 0000000000..0216626c43 --- /dev/null +++ b/node/src/components/contract_runtime/metrics.rs @@ -0,0 +1,395 @@ +use prometheus::{self, Gauge, Histogram, IntGauge, Registry}; + +use crate::{unregister_metric, utils}; + +/// Value of upper bound of histogram. +const EXPONENTIAL_BUCKET_START: f64 = 0.2; + +/// Multiplier of previous upper bound for next bound. +const EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0; + +/// Bucket count, with the last bucket going to +Inf which will not be included in the results. +/// - start = 0.01, factor = 2.0, count = 10 +/// - start * factor ^ count = 0.01 * 2.0 ^ 10 = 10.24 +/// - Values above 10.24 (f64 seconds here) will not fall in a bucket that is kept. +const EXPONENTIAL_BUCKET_COUNT: usize = 10; + +const EXEC_WASM_V1_NAME: &str = "contract_runtime_exec_wasm_v1"; +const EXEC_WASM_V1_HELP: &str = "time in seconds to execute wasm using the v1 exec engine"; + +const EXEC_BLOCK_PRE_PROCESSING_NAME: &str = "contract_runtime_exec_block_pre_proc"; +const EXEC_BLOCK_PRE_PROCESSING_HELP: &str = + "processing time in seconds before any transactions have processed"; + +const EXEC_BLOCK_POST_PROCESSING_NAME: &str = "contract_runtime_exec_block_post_proc"; +const EXEC_BLOCK_POST_PROCESSING_HELP: &str = + "processing time in seconds after all transactions have processed"; + +const EXEC_BLOCK_STEP_PROCESSING_NAME: &str = "contract_runtime_exec_block_step_proc"; +const EXEC_BLOCK_STEP_PROCESSING_HELP: &str = "processing time in seconds of the end of era step"; + +const EXEC_BLOCK_TOTAL_NAME: &str = "contract_runtime_exec_block_total_proc"; +const EXEC_BLOCK_TOTAL_HELP: &str = + "processing time in seconds for block execution (total elapsed)"; + +const COMMIT_GENESIS_NAME: &str = "contract_runtime_commit_genesis"; +const COMMIT_GENESIS_HELP: &str = "time in seconds to commit an genesis"; + +const COMMIT_UPGRADE_NAME: &str = "contract_runtime_commit_upgrade"; +const COMMIT_UPGRADE_HELP: &str = "time in seconds to commit an upgrade"; + +const RUN_QUERY_NAME: &str = "contract_runtime_run_query"; +const RUN_QUERY_HELP: &str = "time in seconds to run a query in global state"; + +const RUN_QUERY_BY_PREFIX_NAME: &str = "contract_runtime_run_query_by_prefix"; +const RUN_QUERY_BY_PREFIX_HELP: &str = "time in seconds to run a query by prefix in global state"; + +const COMMIT_STEP_NAME: &str = "contract_runtime_commit_step"; +const COMMIT_STEP_HELP: &str = "time in seconds to commit the step at era end"; + +const GET_BALANCE_NAME: &str = "contract_runtime_get_balance"; +const GET_BALANCE_HELP: &str = "time in seconds to get the balance of a purse from global state"; + +const GET_TOTAL_SUPPLY_NAME: &str = "contract_runtime_get_total_supply"; +const GET_TOTAL_SUPPLY_HELP: &str = "time in seconds to get the total supply from global state"; + +const GET_ROUND_SEIGNIORAGE_RATE_NAME: &str = "contract_runtime_get_round_seigniorage_rate"; +const GET_ROUND_SEIGNIORAGE_RATE_HELP: &str = + "time in seconds to get the round seigniorage rate from global state"; + +const GET_ERA_VALIDATORS_NAME: &str = "contract_runtime_get_era_validators"; +const GET_ERA_VALIDATORS_HELP: &str = + "time in seconds to get validators for a given era from global state"; + +const GET_SEIGNIORAGE_RECIPIENTS_NAME: &str = "contract_runtime_get_seigniorage_recipients"; +const GET_SEIGNIORAGE_RECIPIENTS_HELP: &str = + "time in seconds to get seigniorage recipients from global state"; + +const GET_ALL_VALUES_NAME: &str = "contract_runtime_get_all_values"; +const GET_ALL_VALUES_NAME_HELP: &str = + "time in seconds to get all values under a give key from global state"; + +const EXECUTION_RESULTS_CHECKSUM_NAME: &str = "contract_runtime_execution_results_checksum"; +const EXECUTION_RESULTS_CHECKSUM_HELP: &str = "contract_runtime_execution_results_checksum"; + +const ADDRESSABLE_ENTITY_NAME: &str = "contract_runtime_addressable_entity"; +const ADDRESSABLE_ENTITY_HELP: &str = "contract_runtime_addressable_entity"; + +const ENTRY_POINT_NAME: &str = "contract_runtime_entry_point"; +const ENTRY_POINT_HELP: &str = "contract_runtime_entry_point"; + +const PUT_TRIE_NAME: &str = "contract_runtime_put_trie"; +const PUT_TRIE_HELP: &str = "time in seconds to put a trie"; + +const GET_TRIE_NAME: &str = "contract_runtime_get_trie"; +const GET_TRIE_HELP: &str = "time in seconds to get a trie"; + +const EXEC_BLOCK_TNX_PROCESSING_NAME: &str = "contract_runtime_execute_block"; +const EXEC_BLOCK_TNX_PROCESSING_HELP: &str = "time in seconds to execute all deploys in a block"; + +const LATEST_COMMIT_STEP_NAME: &str = "contract_runtime_latest_commit_step"; +const LATEST_COMMIT_STEP_HELP: &str = "duration in seconds of latest commit step at era end"; + +const EXEC_QUEUE_SIZE_NAME: &str = "execution_queue_size"; +const EXEC_QUEUE_SIZE_HELP: &str = + "number of blocks that are currently enqueued and waiting for execution"; + +const TXN_APPROVALS_HASHES: &str = "contract_runtime_txn_approvals_hashes_calculation"; +const TXN_APPROVALS_HASHES_HELP: &str = + "time in seconds to get calculate approvals hashes for executed transactions"; + +const BLOCK_REWARDS_PAYOUT: &str = "contract_runtime_block_rewards_payout"; +const BLOCK_REWARDS_PAYOUT_HELP: &str = "time in seconds to get process rewards payouts"; + +const BATCH_PRUNING_TIME: &str = "contract_runtime_batch_pruning_time"; +const BATCH_PRUNING_TIME_HELP: &str = "time in seconds to perform batch pruning"; + +const DB_FLUSH_TIME: &str = "contract_runtime_db_flush_time"; +const DB_FLUSH_TIME_HELP: &str = "time in seconds to flush changes to the database"; + +const SCRATCH_LMDB_WRITE_TIME: &str = "contract_runtime_scratch_lmdb_write_time"; +const SCRATCH_LMDB_WRITE_TIME_HELP: &str = "time in seconds to write changes to the database"; + +const SEIGNIORAGE_TARGET_FRACTION: &str = "contract_runtime_seigniorage_target_fraction"; +const SEIGNIORAGE_TARGET_FRACTION_HELP: &str = "fraction of target seigniorage minted in era"; + +/// Metrics for the contract runtime component. +#[derive(Debug)] +pub struct Metrics { + pub(super) exec_block_pre_processing: Histogram, + // elapsed before tnx processing + pub(super) exec_block_tnx_processing: Histogram, + // tnx processing elapsed + pub(super) exec_wasm_v1: Histogram, + // ee_v1 execution elapsed + pub(super) exec_block_step_processing: Histogram, + // step processing elapsed + pub(super) exec_block_post_processing: Histogram, + // elapsed after tnx processing + pub(super) exec_block_total: Histogram, + // total elapsed + pub(super) commit_genesis: Histogram, + pub(super) commit_upgrade: Histogram, + pub(super) run_query: Histogram, + pub(super) run_query_by_prefix: Histogram, + pub(super) commit_step: Histogram, + pub(super) get_balance: Histogram, + pub(super) get_total_supply: Histogram, + pub(super) get_round_seigniorage_rate: Histogram, + pub(super) get_era_validators: Histogram, + pub(super) get_seigniorage_recipients: Histogram, + pub(super) get_all_values: Histogram, + pub(super) execution_results_checksum: Histogram, + pub(super) addressable_entity: Histogram, + pub(super) entry_points: Histogram, + pub(super) put_trie: Histogram, + pub(super) get_trie: Histogram, + pub(super) latest_commit_step: Gauge, + pub(super) exec_queue_size: IntGauge, + pub(super) txn_approvals_hashes_calculation: Histogram, + pub(super) block_rewards_payout: Histogram, + pub(super) pruning_time: Histogram, + pub(super) database_flush_time: Histogram, + pub(super) scratch_lmdb_write_time: Histogram, + pub(super) seigniorage_target_fraction: Gauge, + registry: Registry, +} + +impl Metrics { + /// Constructor of metrics which creates and registers metrics objects for use. + pub(super) fn new(registry: &Registry) -> Result { + let common_buckets = prometheus::exponential_buckets( + EXPONENTIAL_BUCKET_START, + EXPONENTIAL_BUCKET_FACTOR, + EXPONENTIAL_BUCKET_COUNT, + )?; + + // make wider buckets for operations that might take longer + let wider_buckets = prometheus::exponential_buckets( + EXPONENTIAL_BUCKET_START * 8.0, + EXPONENTIAL_BUCKET_FACTOR, + EXPONENTIAL_BUCKET_COUNT, + )?; + + // Start from 1 millisecond + // Factor by 2 + // After 10 elements we get to 1s. + // Anything above that should be a warning signal. + let tiny_buckets = prometheus::exponential_buckets(0.001, 2.0, 10)?; + + let latest_commit_step = Gauge::new(LATEST_COMMIT_STEP_NAME, LATEST_COMMIT_STEP_HELP)?; + registry.register(Box::new(latest_commit_step.clone()))?; + + let exec_queue_size = IntGauge::new(EXEC_QUEUE_SIZE_NAME, EXEC_QUEUE_SIZE_HELP)?; + registry.register(Box::new(exec_queue_size.clone()))?; + + let seigniorage_target_fraction = Gauge::new( + SEIGNIORAGE_TARGET_FRACTION, + SEIGNIORAGE_TARGET_FRACTION_HELP, + )?; + registry.register(Box::new(seigniorage_target_fraction.clone()))?; + + Ok(Metrics { + exec_block_pre_processing: utils::register_histogram_metric( + registry, + EXEC_BLOCK_PRE_PROCESSING_NAME, + EXEC_BLOCK_PRE_PROCESSING_HELP, + common_buckets.clone(), + )?, + exec_block_tnx_processing: utils::register_histogram_metric( + registry, + EXEC_BLOCK_TNX_PROCESSING_NAME, + EXEC_BLOCK_TNX_PROCESSING_HELP, + common_buckets.clone(), + )?, + exec_wasm_v1: utils::register_histogram_metric( + registry, + EXEC_WASM_V1_NAME, + EXEC_WASM_V1_HELP, + common_buckets.clone(), + )?, + exec_block_post_processing: utils::register_histogram_metric( + registry, + EXEC_BLOCK_POST_PROCESSING_NAME, + EXEC_BLOCK_POST_PROCESSING_HELP, + common_buckets.clone(), + )?, + exec_block_step_processing: utils::register_histogram_metric( + registry, + EXEC_BLOCK_STEP_PROCESSING_NAME, + EXEC_BLOCK_STEP_PROCESSING_HELP, + common_buckets.clone(), + )?, + exec_block_total: utils::register_histogram_metric( + registry, + EXEC_BLOCK_TOTAL_NAME, + EXEC_BLOCK_TOTAL_HELP, + wider_buckets.clone(), + )?, + run_query: utils::register_histogram_metric( + registry, + RUN_QUERY_NAME, + RUN_QUERY_HELP, + common_buckets.clone(), + )?, + run_query_by_prefix: utils::register_histogram_metric( + registry, + RUN_QUERY_BY_PREFIX_NAME, + RUN_QUERY_BY_PREFIX_HELP, + common_buckets.clone(), + )?, + commit_step: utils::register_histogram_metric( + registry, + COMMIT_STEP_NAME, + COMMIT_STEP_HELP, + common_buckets.clone(), + )?, + commit_genesis: utils::register_histogram_metric( + registry, + COMMIT_GENESIS_NAME, + COMMIT_GENESIS_HELP, + common_buckets.clone(), + )?, + commit_upgrade: utils::register_histogram_metric( + registry, + COMMIT_UPGRADE_NAME, + COMMIT_UPGRADE_HELP, + common_buckets.clone(), + )?, + get_balance: utils::register_histogram_metric( + registry, + GET_BALANCE_NAME, + GET_BALANCE_HELP, + common_buckets.clone(), + )?, + get_total_supply: utils::register_histogram_metric( + registry, + GET_TOTAL_SUPPLY_NAME, + GET_TOTAL_SUPPLY_HELP, + common_buckets.clone(), + )?, + get_round_seigniorage_rate: utils::register_histogram_metric( + registry, + GET_ROUND_SEIGNIORAGE_RATE_NAME, + GET_ROUND_SEIGNIORAGE_RATE_HELP, + common_buckets.clone(), + )?, + get_era_validators: utils::register_histogram_metric( + registry, + GET_ERA_VALIDATORS_NAME, + GET_ERA_VALIDATORS_HELP, + common_buckets.clone(), + )?, + get_seigniorage_recipients: utils::register_histogram_metric( + registry, + GET_SEIGNIORAGE_RECIPIENTS_NAME, + GET_SEIGNIORAGE_RECIPIENTS_HELP, + common_buckets.clone(), + )?, + get_all_values: utils::register_histogram_metric( + registry, + GET_ALL_VALUES_NAME, + GET_ALL_VALUES_NAME_HELP, + common_buckets.clone(), + )?, + execution_results_checksum: utils::register_histogram_metric( + registry, + EXECUTION_RESULTS_CHECKSUM_NAME, + EXECUTION_RESULTS_CHECKSUM_HELP, + common_buckets.clone(), + )?, + addressable_entity: utils::register_histogram_metric( + registry, + ADDRESSABLE_ENTITY_NAME, + ADDRESSABLE_ENTITY_HELP, + common_buckets.clone(), + )?, + entry_points: utils::register_histogram_metric( + registry, + ENTRY_POINT_NAME, + ENTRY_POINT_HELP, + common_buckets.clone(), + )?, + get_trie: utils::register_histogram_metric( + registry, + GET_TRIE_NAME, + GET_TRIE_HELP, + tiny_buckets.clone(), + )?, + put_trie: utils::register_histogram_metric( + registry, + PUT_TRIE_NAME, + PUT_TRIE_HELP, + tiny_buckets, + )?, + latest_commit_step, + exec_queue_size, + txn_approvals_hashes_calculation: utils::register_histogram_metric( + registry, + TXN_APPROVALS_HASHES, + TXN_APPROVALS_HASHES_HELP, + common_buckets.clone(), + )?, + block_rewards_payout: utils::register_histogram_metric( + registry, + BLOCK_REWARDS_PAYOUT, + BLOCK_REWARDS_PAYOUT_HELP, + wider_buckets.clone(), + )?, + pruning_time: utils::register_histogram_metric( + registry, + BATCH_PRUNING_TIME, + BATCH_PRUNING_TIME_HELP, + common_buckets.clone(), + )?, + database_flush_time: utils::register_histogram_metric( + registry, + DB_FLUSH_TIME, + DB_FLUSH_TIME_HELP, + wider_buckets.clone(), + )?, + scratch_lmdb_write_time: utils::register_histogram_metric( + registry, + SCRATCH_LMDB_WRITE_TIME, + SCRATCH_LMDB_WRITE_TIME_HELP, + wider_buckets.clone(), + )?, + seigniorage_target_fraction, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.exec_block_pre_processing); + unregister_metric!(self.registry, self.exec_block_tnx_processing); + unregister_metric!(self.registry, self.exec_wasm_v1); + unregister_metric!(self.registry, self.exec_block_post_processing); + unregister_metric!(self.registry, self.exec_block_step_processing); + unregister_metric!(self.registry, self.exec_block_total); + unregister_metric!(self.registry, self.commit_genesis); + unregister_metric!(self.registry, self.commit_upgrade); + unregister_metric!(self.registry, self.run_query); + unregister_metric!(self.registry, self.run_query_by_prefix); + unregister_metric!(self.registry, self.commit_step); + unregister_metric!(self.registry, self.get_balance); + unregister_metric!(self.registry, self.get_total_supply); + unregister_metric!(self.registry, self.get_round_seigniorage_rate); + unregister_metric!(self.registry, self.get_era_validators); + unregister_metric!(self.registry, self.get_seigniorage_recipients); + unregister_metric!(self.registry, self.get_all_values); + unregister_metric!(self.registry, self.execution_results_checksum); + unregister_metric!(self.registry, self.put_trie); + unregister_metric!(self.registry, self.get_trie); + unregister_metric!(self.registry, self.latest_commit_step); + unregister_metric!(self.registry, self.exec_queue_size); + unregister_metric!(self.registry, self.entry_points); + unregister_metric!(self.registry, self.txn_approvals_hashes_calculation); + unregister_metric!(self.registry, self.block_rewards_payout); + unregister_metric!(self.registry, self.pruning_time); + unregister_metric!(self.registry, self.database_flush_time); + unregister_metric!(self.registry, self.scratch_lmdb_write_time); + unregister_metric!(self.registry, self.seigniorage_target_fraction); + } +} diff --git a/node/src/components/contract_runtime/operations.rs b/node/src/components/contract_runtime/operations.rs index 901b5bebb1..8974fac752 100644 --- a/node/src/components/contract_runtime/operations.rs +++ b/node/src/components/contract_runtime/operations.rs @@ -1,101 +1,1512 @@ -use std::{collections::VecDeque, sync::Arc, time::Instant}; +pub(crate) mod wasm_v2_request; -use super::ContractRuntimeMetrics; -use crate::{crypto::hash::Digest, types::DeployHash}; -use casper_execution_engine::{ - core::engine_state::{ - self, EngineState, ExecutionResult as EngineExecutionResult, ExecutionResults, +use casper_executor_wasm::ExecutorV2; +use itertools::Itertools; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::Instant}; +use tracing::{debug, error, info, trace, warn}; +use wasm_v2_request::{WasmV2Request, WasmV2Result}; + +use casper_execution_engine::engine_state::{ + BlockInfo, ExecutionEngineV1, WasmV1Request, WasmV1Result, +}; +use casper_storage::{ + block_store::types::ApprovalsHashes, + data_access_layer::{ + balance::BalanceHandling, + mint::{BalanceIdentifierTransferArgs, BurnRequest}, + AuctionMethod, BalanceHoldKind, BalanceHoldRequest, BalanceIdentifier, + BalanceIdentifierPurseRequest, BalanceIdentifierPurseResult, BalanceRequest, + BiddingRequest, BlockGlobalRequest, BlockGlobalResult, BlockRewardsRequest, + BlockRewardsResult, DataAccessLayer, EntryPointRequest, EntryPointResult, + EraValidatorsRequest, EraValidatorsResult, EvictItem, FeeRequest, FeeResult, FlushRequest, + HandleFeeMode, HandleFeeRequest, HandleRefundMode, HandleRefundRequest, + InsufficientBalanceHandling, ProofHandling, PruneRequest, PruneResult, StepRequest, + StepResult, TransferRequest, }, - shared::{additive_map::AdditiveMap, newtypes::CorrelationId, transform::Transform}, - storage::global_state::{lmdb::LmdbGlobalState, CommitResult}, + global_state::state::{ + lmdb::LmdbGlobalState, scratch::ScratchGlobalState, CommitProvider, ScratchProvider, + StateProvider, StateReader, + }, + system::runtime_native::Config as NativeRuntimeConfig, +}; +use casper_types::{ + bytesrepr::{self, ToBytes, U32_SERIALIZED_LENGTH}, + execution::{Effects, ExecutionResult, TransformKindV2, TransformV2}, + system::handle_payment::ARG_AMOUNT, + BlockHash, BlockHeader, BlockTime, BlockV2, CLValue, Chainspec, ChecksumRegistry, Digest, + EntityAddr, EraEndV2, EraId, FeeHandling, Gas, InvalidTransaction, InvalidTransactionV1, Key, + ProtocolVersion, PublicKey, RefundHandling, Transaction, TransactionEntryPoint, + AUCTION_LANE_ID, MINT_LANE_ID, U512, }; -use casper_types::{ExecutionResult, Key}; -use engine_state::ExecuteRequest; -use itertools::Itertools; -use tracing::{debug, error, trace}; -/// Commits the execution effects. -pub(super) async fn commit_execution_effects( - engine_state: Arc>, - metrics: Arc, - state_root_hash: Digest, - deploy_hash: DeployHash, - execution_results: ExecutionResults, -) -> Result<(Digest, ExecutionResult), ()> { - let ee_execution_result = execution_results - .into_iter() - .exactly_one() - .expect("should only be one exec result"); - let execution_result = ExecutionResult::from(&ee_execution_result); - - let execution_effect = match ee_execution_result { - EngineExecutionResult::Success { effect, cost, .. } => { - // We do want to see the deploy hash and cost in the logs. - // We don't need to see the effects in the logs. - debug!(?deploy_hash, %cost, "execution succeeded"); - effect - } - EngineExecutionResult::Failure { - error, - effect, - cost, - .. +use super::{ + types::{SpeculativeExecutionResult, StepOutcome}, + utils::{self, calculate_prune_eras}, + BlockAndExecutionArtifacts, BlockExecutionError, ExecutionPreState, Metrics, StateResultError, + APPROVALS_CHECKSUM_NAME, EXECUTION_RESULTS_CHECKSUM_NAME, +}; +use crate::{ + components::fetcher::FetchItem, + contract_runtime::types::ExecutionArtifactBuilder, + types::{self, Chunkable, ExecutableBlock, InternalEraReport, MetaTransaction}, +}; + +/// Executes a finalized block. +#[allow(clippy::too_many_arguments)] +pub fn execute_finalized_block( + data_access_layer: &DataAccessLayer, + execution_engine_v1: &ExecutionEngineV1, + execution_engine_v2: ExecutorV2, + chainspec: &Chainspec, + metrics: Option>, + execution_pre_state: ExecutionPreState, + executable_block: ExecutableBlock, + key_block_height_for_activation_point: u64, + current_gas_price: u8, + next_era_gas_price: Option, + last_switch_block_hash: Option, +) -> Result { + let block_height = executable_block.height; + if block_height != execution_pre_state.next_block_height() { + return Err(BlockExecutionError::WrongBlockHeight { + executable_block: Box::new(executable_block), + execution_pre_state: Box::new(execution_pre_state), + }); + } + if executable_block.era_report.is_some() && next_era_gas_price.is_none() { + return Err(BlockExecutionError::FailedToGetNewEraGasPrice { + era_id: executable_block.era_id.successor(), + }); + } + let start = Instant::now(); + let protocol_version = chainspec.protocol_version(); + let activation_point_era_id = chainspec.protocol_config.activation_point.era_id(); + let prune_batch_size = chainspec.core_config.prune_batch_size; + let native_runtime_config = NativeRuntimeConfig::from_chainspec(chainspec); + let addressable_entity_enabled = chainspec.core_config.enable_addressable_entity(); + + if addressable_entity_enabled != data_access_layer.enable_addressable_entity { + return Err(BlockExecutionError::InvalidAESetting( + data_access_layer.enable_addressable_entity, + )); + } + + // scrape variables from execution pre state + let parent_hash = execution_pre_state.parent_hash(); + let parent_seed = execution_pre_state.parent_seed(); + let parent_block_hash = execution_pre_state.parent_hash(); + let pre_state_root_hash = execution_pre_state.pre_state_root_hash(); + let mut state_root_hash = pre_state_root_hash; // initial state root is parent's state root + + let payment_balance_addr = + match data_access_layer.balance_purse(BalanceIdentifierPurseRequest::new( + state_root_hash, + protocol_version, + BalanceIdentifier::Payment, + )) { + BalanceIdentifierPurseResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)) + } + BalanceIdentifierPurseResult::Failure(tce) => { + return Err(BlockExecutionError::BlockGlobal(format!("{:?}", tce))); + } + BalanceIdentifierPurseResult::Success { purse_addr } => purse_addr, + }; + + // scrape variables from executable block + let block_time = BlockTime::new(executable_block.timestamp.millis()); + + let proposer = executable_block.proposer.clone(); + let era_id = executable_block.era_id; + let mut artifacts = Vec::with_capacity(executable_block.transactions.len()); + + // set up accounting variables / settings + let insufficient_balance_handling = InsufficientBalanceHandling::HoldRemaining; + let refund_handling = chainspec.core_config.refund_handling; + let fee_handling = chainspec.core_config.fee_handling; + let baseline_motes_amount = chainspec.core_config.baseline_motes_amount_u512(); + let balance_handling = BalanceHandling::Available; + + // get scratch state, which must be used for all processing and post-processing data + // requirements. + let scratch_state = data_access_layer.get_scratch_global_state(); + + // pre-processing is finished + if let Some(metrics) = metrics.as_ref() { + metrics + .exec_block_pre_processing + .observe(start.elapsed().as_secs_f64()); + } + + // grabbing transaction id's now to avoid cloning transactions + let transaction_ids = executable_block + .transactions + .iter() + .map(Transaction::fetch_id) + .collect_vec(); + + // transaction processing starts now + let txn_processing_start = Instant::now(); + + // put block_time to global state + // NOTE this must occur prior to any block processing as subsequent logic + // will refer to the block time value being written to GS now. + match scratch_state.block_global(BlockGlobalRequest::block_time( + state_root_hash, + protocol_version, + block_time, + )) { + BlockGlobalResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + BlockGlobalResult::Failure(err) => { + return Err(BlockExecutionError::BlockGlobal(format!("{:?}", err))); + } + BlockGlobalResult::Success { + post_state_hash, .. } => { - // Failure to execute a contract is a user error, not a system error. - // We do want to see the deploy hash, error, and cost in the logs. - // We don't need to see the effects in the logs. - debug!(?deploy_hash, ?error, %cost, "execution failure"); - effect + state_root_hash = post_state_hash; + } + } + + // put protocol version to global state + match scratch_state.block_global(BlockGlobalRequest::set_protocol_version( + state_root_hash, + protocol_version, + )) { + BlockGlobalResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + BlockGlobalResult::Failure(err) => { + return Err(BlockExecutionError::BlockGlobal(format!("{:?}", err))); + } + BlockGlobalResult::Success { + post_state_hash, .. + } => { + state_root_hash = post_state_hash; + } + } + + // put enable addressable entity flag to global state + match scratch_state.block_global(BlockGlobalRequest::set_addressable_entity( + state_root_hash, + protocol_version, + addressable_entity_enabled, + )) { + BlockGlobalResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + BlockGlobalResult::Failure(err) => { + return Err(BlockExecutionError::BlockGlobal(format!("{:?}", err))); + } + BlockGlobalResult::Success { + post_state_hash, .. + } => { + state_root_hash = post_state_hash; + } + } + + let transaction_config = &chainspec.transaction_config; + + for stored_transaction in executable_block.transactions { + let mut artifact_builder = ExecutionArtifactBuilder::new( + &stored_transaction, + baseline_motes_amount, // <-- default minimum cost, may be overridden later in logic + current_gas_price, + ); + let transaction = MetaTransaction::from_transaction( + &stored_transaction, + chainspec.core_config.pricing_handling, + transaction_config, + ) + .map_err(|err| BlockExecutionError::TransactionConversion(err.to_string()))?; + let initiator_addr = transaction.initiator_addr(); + let transaction_hash = transaction.hash(); + let transaction_args = transaction.session_args().clone(); + let entry_point = transaction.entry_point(); + let authorization_keys = transaction.signers(); + + /* + we solve for halting state using a `gas limit` which is the maximum amount of + computation we will allow a given transaction to consume. the transaction itself + provides a function to determine this if provided with the current cost tables + gas_limit is ALWAYS calculated with price == 1. + + next there is the actual cost, i.e. how much we charge for that computation + this is calculated by multiplying the gas limit by the current `gas_price` + gas price has a floor of 1, and the ceiling is configured in the chainspec + NOTE: when the gas price is 1, the gas limit and the cost are coincidentally + equal because x == x * 1; thus it is recommended to run tests with + price >1 to avoid being confused by this. + + the third important value is the amount of computation consumed by executing a + transaction for native transactions there is no wasm and the consumed always + equals the limit for bytecode / wasm based transactions the consumed is based on + what opcodes were executed and can range from >=0 to <=gas_limit. + consumed is determined after execution and is used for refund & fee post-processing. + + we check these top level concerns early so that we can skip if there is an error + */ + + // NOTE: this is the allowed computation limit (gas limit) + let gas_limit = + match stored_transaction.gas_limit(chainspec, transaction.transaction_lane()) { + Ok(gas) => gas, + Err(ite) => { + debug!(%transaction_hash, %ite, "invalid transaction (gas limit)"); + artifact_builder.with_invalid_transaction(&ite); + artifacts.push(artifact_builder.build()); + continue; + } + }; + artifact_builder.with_gas_limit(gas_limit); + + // NOTE: this is the actual adjusted cost that we charge for (gas limit * gas price) + let cost = match stored_transaction.gas_cost( + chainspec, + transaction.transaction_lane(), + current_gas_price, + ) { + Ok(motes) => motes.value(), + Err(ite) => { + debug!(%transaction_hash, "invalid transaction (motes conversion)"); + artifact_builder.with_invalid_transaction(&ite); + artifacts.push(artifact_builder.build()); + continue; + } + }; + artifact_builder.with_added_cost(cost); + + let is_standard_payment = transaction.is_standard_payment(); + let is_custom_payment = !is_standard_payment && transaction.is_custom_payment(); + let is_v1_wasm = transaction.is_v1_wasm(); + let is_v2_wasm = transaction.is_v2_wasm(); + let refund_purse_active = is_custom_payment; + if refund_purse_active { + // if custom payment before doing any processing, initialize the initiator's main purse + // to be the refund purse for this transaction. + // NOTE: when executed, custom payment logic has the option to call set_refund_purse + // on the handle payment contract to set up a different refund purse, if desired. + let handle_refund_request = HandleRefundRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + HandleRefundMode::SetRefundPurse { + target: Box::new(initiator_addr.clone().into()), + }, + ); + let handle_refund_result = scratch_state.handle_refund(handle_refund_request); + if let Err(root_not_found) = + artifact_builder.with_set_refund_purse_result(&handle_refund_result) + { + if root_not_found { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + artifacts.push(artifact_builder.build()); + continue; // don't commit effects, move on + } + state_root_hash = scratch_state + .commit_effects(state_root_hash, handle_refund_result.effects().clone())?; + } + + { + // Ensure the initiator's main purse can cover the penalty payment before proceeding. + let initial_balance_result = scratch_state.balance(BalanceRequest::new( + state_root_hash, + protocol_version, + initiator_addr.clone().into(), + balance_handling, + ProofHandling::NoProofs, + )); + + if let Err(root_not_found) = artifact_builder + .with_initial_balance_result(initial_balance_result.clone(), baseline_motes_amount) + { + if root_not_found { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + trace!(%transaction_hash, "insufficient initial balance"); + debug!(%transaction_hash, ?initial_balance_result, %baseline_motes_amount, "insufficient initial balance"); + artifacts.push(artifact_builder.build()); + // only reads have happened so far, and we can't charge due + // to insufficient balance, so move on with no effects committed + continue; + } + } + + let mut balance_identifier = { + if is_standard_payment { + let contract_might_pay = + addressable_entity_enabled && transaction.is_contract_by_hash_invocation(); + + if contract_might_pay { + match invoked_contract_will_pay(&scratch_state, state_root_hash, &transaction) { + Ok(Some(entity_addr)) => BalanceIdentifier::Entity(entity_addr), + Ok(None) => { + // the initiating account pays using its main purse + trace!(%transaction_hash, "direct invocation with account payment"); + initiator_addr.clone().into() + } + Err(err) => { + trace!(%transaction_hash, "failed to resolve contract self payment"); + artifact_builder + .with_state_result_error(err) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + BalanceIdentifier::PenalizedAccount( + initiator_addr.clone().account_hash(), + ) + } + } + } else { + // the initiating account pays using its main purse + trace!(%transaction_hash, "account session with standard payment"); + initiator_addr.clone().into() + } + } else if is_v2_wasm { + // vm2 does not support custom payment, so it MUST be standard payment + // if transaction runtime is v2 then the initiating account will pay using + // the refund purse + initiator_addr.clone().into() + } else if is_custom_payment { + // this is the custom payment flow + // the initiating account will pay, but wants to do so with a different purse or + // in a custom way. If anything goes wrong, penalize the sender, do not execute + let custom_payment_gas_limit = + Gas::new(chainspec.transaction_config.native_transfer_minimum_motes * 5); + let pay_result = match WasmV1Request::new_custom_payment( + BlockInfo::new( + state_root_hash, + block_time, + parent_block_hash, + block_height, + protocol_version, + ), + custom_payment_gas_limit, + &transaction.to_payment_input_data(), + ) { + Ok(mut pay_request) => { + pay_request + .args + .insert(ARG_AMOUNT, cost) + .map_err(|e| BlockExecutionError::PaymentError(e.to_string()))?; + execution_engine_v1.execute(&scratch_state, pay_request) + } + Err(error) => { + WasmV1Result::invalid_executable_item(custom_payment_gas_limit, error) + } + }; + + let insufficient_payment_deposited = + !pay_result.balance_increased_by_amount(payment_balance_addr, cost); + + if insufficient_payment_deposited || pay_result.error().is_some() { + // Charge initiator for the penalty payment amount + // the most expedient way to do this that aligns with later code + // is to transfer from the initiator's main purse to the payment purse + let transfer_result = scratch_state.transfer(TransferRequest::new_indirect( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + initiator_addr.clone(), + authorization_keys.clone(), + BalanceIdentifierTransferArgs::new( + None, + initiator_addr.clone().into(), + BalanceIdentifier::Payment, + baseline_motes_amount, + None, + ), + )); + + let msg = match pay_result.error() { + Some(err) => format!("{}", err), + None => { + if insufficient_payment_deposited { + "Insufficient custom payment".to_string() + } else { + // this should be unreachable due to guard condition above + let unk = "Unknown custom payment issue"; + warn!(%transaction_hash, unk); + debug_assert!(false, "{}", unk); + unk.to_string() + } + } + }; + // commit penalty payment effects + state_root_hash = scratch_state + .commit_effects(state_root_hash, transfer_result.effects().clone())?; + artifact_builder + .with_error_message(msg) + .with_transfer_result(transfer_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + trace!(%transaction_hash, balance_identifier=?BalanceIdentifier::PenalizedPayment, "account session with custom payment failed"); + BalanceIdentifier::PenalizedPayment + } else { + // commit successful effects + state_root_hash = scratch_state + .commit_effects(state_root_hash, pay_result.effects().clone())?; + artifact_builder + .with_wasm_v1_result(pay_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + trace!(%transaction_hash, balance_identifier=?BalanceIdentifier::Payment, "account session with custom payment success"); + BalanceIdentifier::Payment + } + } else { + BalanceIdentifier::PenalizedAccount(initiator_addr.clone().account_hash()) + } + }; + + let post_payment_balance_result = scratch_state.balance(BalanceRequest::new( + state_root_hash, + protocol_version, + balance_identifier.clone(), + balance_handling, + ProofHandling::NoProofs, + )); + + let lane_id = transaction.transaction_lane(); + + let allow_execution = { + let is_not_penalized = !balance_identifier.is_penalty(); + // in the case of custom payment, we do all payment processing up front after checking + // if the initiator can cover the penalty payment, and then either charge the full + // amount in the happy path or the penalty amount in the sad path...in whichever case + // the sad path is handled by is_penalty and the balance in the payment purse is + // the penalty payment or the full amount but is 'sufficient' either way + let is_sufficient_balance = + is_custom_payment || post_payment_balance_result.is_sufficient(cost); + let is_allowed_by_chainspec = chainspec.is_supported(lane_id); + let allow = is_not_penalized && is_sufficient_balance && is_allowed_by_chainspec; + if !allow { + if artifact_builder.error_message().is_none() { + artifact_builder.with_error_message(format!( + "penalized: {}, sufficient balance: {}, allowed by chainspec: {}", + !is_not_penalized, is_sufficient_balance, is_allowed_by_chainspec + )); + } + info!(%transaction_hash, ?balance_identifier, ?is_sufficient_balance, ?is_not_penalized, ?is_allowed_by_chainspec, "payment preprocessing unsuccessful"); + } else { + debug!(%transaction_hash, ?balance_identifier, ?is_sufficient_balance, ?is_not_penalized, ?is_allowed_by_chainspec, "payment preprocessing successful"); + } + allow + }; + + if allow_execution { + debug!(%transaction_hash, ?allow_execution, "execution allowed"); + if is_standard_payment { + // place a processing hold on the paying account to prevent double spend. + let hold_amount = cost; + let hold_request = BalanceHoldRequest::new_processing_hold( + state_root_hash, + protocol_version, + balance_identifier.clone(), + hold_amount, + insufficient_balance_handling, + ); + let hold_result = scratch_state.balance_hold(hold_request); + state_root_hash = + scratch_state.commit_effects(state_root_hash, hold_result.effects().clone())?; + artifact_builder + .with_balance_hold_result(&hold_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + } + + trace!(%transaction_hash, ?lane_id, "eligible for execution"); + match lane_id { + lane_id if lane_id == MINT_LANE_ID => { + let runtime_args = transaction_args + .as_named() + .ok_or(BlockExecutionError::InvalidTransactionArgs)?; + let entry_point = transaction.entry_point(); + if let TransactionEntryPoint::Transfer = entry_point { + let transfer_result = + scratch_state.transfer(TransferRequest::with_runtime_args( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + initiator_addr.clone(), + authorization_keys, + runtime_args.clone(), + )); + state_root_hash = scratch_state + .commit_effects(state_root_hash, transfer_result.effects().clone())?; + artifact_builder + .with_min_cost(gas_limit.value()) + .with_added_consumed(gas_limit) + .with_transfer_result(transfer_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + } else if let TransactionEntryPoint::Burn = entry_point { + let burn_result = scratch_state.burn(BurnRequest::with_runtime_args( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + initiator_addr.clone(), + authorization_keys, + runtime_args.clone(), + )); + state_root_hash = scratch_state + .commit_effects(state_root_hash, burn_result.effects().clone())?; + artifact_builder + .with_min_cost(gas_limit.value()) + .with_added_consumed(gas_limit) + .with_burn_result(burn_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + } else { + artifact_builder.with_error_message(format!( + "Attempt to call unsupported native mint entrypoint: {}", + entry_point + )); + } + } + lane_id if lane_id == AUCTION_LANE_ID => { + let runtime_args = transaction_args + .as_named() + .ok_or(BlockExecutionError::InvalidTransactionArgs)?; + match AuctionMethod::from_parts(entry_point, runtime_args, chainspec) { + Ok(auction_method) => { + let bidding_result = scratch_state.bidding(BiddingRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + initiator_addr.clone(), + authorization_keys, + auction_method, + )); + state_root_hash = scratch_state.commit_effects( + state_root_hash, + bidding_result.effects().clone(), + )?; + artifact_builder + .with_min_cost(gas_limit.value()) + .with_added_consumed(gas_limit) + .with_bidding_result(bidding_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + } + Err(ame) => { + error!( + %transaction_hash, + ?ame, + "failed to determine auction method" + ); + artifact_builder.with_auction_method_error(&ame); + } + }; + } + _ if is_v1_wasm => { + let wasm_v1_start = Instant::now(); + let session_input_data = transaction.to_session_input_data(); + match WasmV1Request::new_session( + BlockInfo::new( + state_root_hash, + block_time, + parent_block_hash, + block_height, + protocol_version, + ), + gas_limit, + &session_input_data, + ) { + Ok(wasm_v1_request) => { + trace!(%transaction_hash, ?lane_id, ?wasm_v1_request, "able to get wasm v1 request"); + let wasm_v1_result = + execution_engine_v1.execute(&scratch_state, wasm_v1_request); + trace!(%transaction_hash, ?lane_id, ?wasm_v1_result, "able to get wasm v1 result"); + state_root_hash = scratch_state.commit_effects( + state_root_hash, + wasm_v1_result.effects().clone(), + )?; + // note: consumed is scraped from wasm_v1_result along w/ other fields + artifact_builder + .with_wasm_v1_result(wasm_v1_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + } + Err(ire) => { + debug!(%transaction_hash, ?lane_id, ?ire, "unable to get wasm v1 request"); + artifact_builder.with_invalid_wasm_v1_request(&ire); + } + }; + if let Some(metrics) = metrics.as_ref() { + metrics + .exec_wasm_v1 + .observe(wasm_v1_start.elapsed().as_secs_f64()); + } + } + _ if is_v2_wasm => match WasmV2Request::new( + gas_limit, + chainspec.network_config.name.clone(), + state_root_hash, + parent_block_hash, + block_height, + &transaction, + ) { + Ok(wasm_v2_request) => { + match wasm_v2_request.execute( + &execution_engine_v2, + state_root_hash, + &scratch_state, + ) { + Ok(wasm_v2_result) => { + match &wasm_v2_result { + WasmV2Result::Install(install_result) => { + info!( + contract_hash=base16::encode_lower(&install_result.smart_contract_addr()), + pre_state_root_hash=%state_root_hash, + post_state_root_hash=%install_result.post_state_hash(), + "install contract result"); + } + + WasmV2Result::Execute(execute_result) => { + info!( + pre_state_root_hash=%state_root_hash, + post_state_root_hash=%execute_result.post_state_hash(), + host_error=?execute_result.host_error.as_ref(), + "execute contract result"); + } + } + + state_root_hash = wasm_v2_result.post_state_hash(); + artifact_builder.with_wasm_v2_result(wasm_v2_result); + } + Err(wasm_v2_error) => { + artifact_builder.with_wasm_v2_error(wasm_v2_error); + } + } + } + Err(ire) => { + debug!(%transaction_hash, ?lane_id, ?ire, "unable to get wasm v2 request"); + artifact_builder.with_invalid_wasm_v2_request(ire); + } + }, + _ => { + // it is currently not possible to specify a vm other than v1 or v2 on the + // transaction itself, so this should be unreachable + unreachable!("Unknown VM target") + } + } + } + + // clear all holds on the balance_identifier purse before payment processing + { + let hold_request = BalanceHoldRequest::new_clear( + state_root_hash, + protocol_version, + BalanceHoldKind::All, + balance_identifier.clone(), + ); + let hold_result = scratch_state.balance_hold(hold_request); + state_root_hash = + scratch_state.commit_effects(state_root_hash, hold_result.effects().clone())?; + artifact_builder + .with_balance_hold_result(&hold_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; } + + // handle refunds per the chainspec determined setting. + let refund_amount = { + let consumed = + if balance_identifier.is_penalty() || artifact_builder.error_message().is_some() { + artifact_builder.cost_to_use() // no refund for penalty + } else { + artifact_builder.consumed() + }; + + let refund_mode = match refund_handling { + RefundHandling::NoRefund => { + if fee_handling.is_no_fee() && is_custom_payment { + // in no fee mode, we need to return the motes to the refund purse, + // and then point the balance_identifier to the refund purse + // this will result in the downstream no fee handling logic + // placing a hold on the correct purse. + balance_identifier = BalanceIdentifier::Refund; + Some(HandleRefundMode::RefundNoFeeCustomPayment { + initiator_addr: Box::new(initiator_addr.clone()), + limit: gas_limit.value(), + gas_price: current_gas_price, + cost, + }) + } else { + None + } + } + RefundHandling::Burn { refund_ratio } => Some(HandleRefundMode::Burn { + limit: gas_limit.value(), + gas_price: current_gas_price, + cost, + consumed, + source: Box::new(balance_identifier.clone()), + ratio: refund_ratio, + }), + RefundHandling::Refund { refund_ratio } => { + let source = Box::new(balance_identifier.clone()); + if is_custom_payment { + // in custom payment we have to do all payment handling up front. + // therefore, if refunds are turned on we have to transfer the refunded + // amount back to the specified refund purse. + + // the refund purse for a given transaction is set to the initiator's main + // purse by default, but the custom payment provided by the initiator can + // set a different purse when executed. thus, the handle payment system + // contract tracks a refund purse and is handled internally at processing + // time. Outer logic should never assume or refer to a specific purse for + // purposes of refund. instead, `BalanceIdentifier::Refund` is used by outer + // logic, which is interpreted by inner logic to use the currently set + // refund purse. + let target = Box::new(BalanceIdentifier::Refund); + Some(HandleRefundMode::Refund { + initiator_addr: Box::new(initiator_addr.clone()), + limit: gas_limit.value(), + gas_price: current_gas_price, + consumed, + cost, + ratio: refund_ratio, + source, + target, + }) + } else { + // in normal payment handling we put a temporary processing hold + // on the paying purse rather than take the token up front. + // thus, here we only want to determine the refund amount rather than + // attempt to process a refund on something we haven't actually taken yet. + // later in the flow when the processing hold is released and payment is + // finalized we reduce the amount taken by the refunded amount. This avoids + // the churn of taking the token up front via transfer (which writes + // multiple permanent records) and then transfer some of it back (which + // writes more permanent records). + Some(HandleRefundMode::CalculateAmount { + limit: gas_limit.value(), + gas_price: current_gas_price, + consumed, + cost, + ratio: refund_ratio, + source, + }) + } + } + }; + match refund_mode { + Some(refund_mode) => { + let handle_refund_request = HandleRefundRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + refund_mode, + ); + let handle_refund_result = scratch_state.handle_refund(handle_refund_request); + let refunded_amount = handle_refund_result.refund_amount(); + state_root_hash = scratch_state + .commit_effects(state_root_hash, handle_refund_result.effects().clone())?; + artifact_builder + .with_handle_refund_result(&handle_refund_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + + refunded_amount + } + None => U512::zero(), + } + }; + artifact_builder.with_refund_amount(refund_amount); + // handle fees per the chainspec determined setting. + let handle_fee_result = match fee_handling { + FeeHandling::NoFee => { + // in this mode, a gas hold is placed on the payer's purse. + let amount = cost.saturating_sub(refund_amount); + let hold_request = BalanceHoldRequest::new_gas_hold( + state_root_hash, + protocol_version, + balance_identifier, + amount, + insufficient_balance_handling, + ); + let hold_result = scratch_state.balance_hold(hold_request); + state_root_hash = + scratch_state.commit_effects(state_root_hash, hold_result.effects().clone())?; + artifact_builder + .with_balance_hold_result(&hold_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + let handle_fee_request = HandleFeeRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + HandleFeeMode::credit(proposer.clone(), amount, era_id), + ); + scratch_state.handle_fee(handle_fee_request) + } + FeeHandling::Burn => { + // in this mode, the fee portion is burned. + let amount = cost.saturating_sub(refund_amount); + let handle_fee_request = HandleFeeRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + HandleFeeMode::burn(balance_identifier, Some(amount)), + ); + scratch_state.handle_fee(handle_fee_request) + } + FeeHandling::PayToProposer => { + // in this mode, the consumed gas is paid as a fee to the block proposer + let amount = cost.saturating_sub(refund_amount); + let handle_fee_request = HandleFeeRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + HandleFeeMode::pay( + Box::new(initiator_addr.clone()), + balance_identifier, + BalanceIdentifier::Public(*(proposer.clone())), + amount, + ), + ); + scratch_state.handle_fee(handle_fee_request) + } + FeeHandling::Accumulate => { + // in this mode, consumed gas is accumulated into a single purse + // for later distribution + let amount = cost.saturating_sub(refund_amount); + let handle_fee_request = HandleFeeRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + HandleFeeMode::pay( + Box::new(initiator_addr.clone()), + balance_identifier, + BalanceIdentifier::Accumulate, + amount, + ), + ); + scratch_state.handle_fee(handle_fee_request) + } + }; + + state_root_hash = + scratch_state.commit_effects(state_root_hash, handle_fee_result.effects().clone())?; + + artifact_builder + .with_handle_fee_result(&handle_fee_result) + .map_err(|_| BlockExecutionError::RootNotFound(state_root_hash))?; + + // clear refund purse if it was set + if refund_purse_active { + // if refunds are turned on we initialize the refund purse to the initiator's main + // purse before doing any processing. NOTE: when executed, custom payment logic + // has the option to call set_refund_purse on the handle payment contract to set + // up a different refund purse, if desired. + let handle_refund_request = HandleRefundRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + transaction_hash, + HandleRefundMode::ClearRefundPurse, + ); + let handle_refund_result = scratch_state.handle_refund(handle_refund_request); + if let Err(root_not_found) = + artifact_builder.with_clear_refund_purse_result(&handle_refund_result) + { + if root_not_found { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + warn!( + "{}", + artifact_builder.error_message().unwrap_or( + "unknown error encountered when attempting to clear refund purse" + .to_string() + ) + ); + } + state_root_hash = scratch_state + .commit_effects(state_root_hash, handle_refund_result.effects().clone())?; + } + + artifacts.push(artifact_builder.build()); + } + + // transaction processing is finished + if let Some(metrics) = metrics.as_ref() { + metrics + .exec_block_tnx_processing + .observe(txn_processing_start.elapsed().as_secs_f64()); + } + + // post-processing starts now + let post_processing_start = Instant::now(); + + // calculate and store checksums for approvals and execution effects across the transactions in + // the block we do this so that the full set of approvals and the full set of effect metadata + // can be verified if necessary for a given block. the block synchronizer in particular + // depends on the existence of such checksums. + let transaction_approvals_hashes = { + let approvals_checksum = types::compute_approvals_checksum(transaction_ids.clone()) + .map_err(BlockExecutionError::FailedToComputeApprovalsChecksum)?; + let execution_results_checksum = compute_execution_results_checksum( + artifacts.iter().map(|artifact| &artifact.execution_result), + )?; + let mut checksum_registry = ChecksumRegistry::new(); + checksum_registry.insert(APPROVALS_CHECKSUM_NAME, approvals_checksum); + checksum_registry.insert(EXECUTION_RESULTS_CHECKSUM_NAME, execution_results_checksum); + + let mut effects = Effects::new(); + effects.push(TransformV2::new( + Key::ChecksumRegistry, + TransformKindV2::Write( + CLValue::from_t(checksum_registry) + .map_err(BlockExecutionError::ChecksumRegistryToCLValue)? + .into(), + ), + )); + scratch_state.commit_effects(state_root_hash, effects)?; + transaction_ids + .into_iter() + .map(|id| id.approvals_hash()) + .collect() + }; + + if let Some(metrics) = metrics.as_ref() { + metrics + .txn_approvals_hashes_calculation + .observe(post_processing_start.elapsed().as_secs_f64()); + } + + // Pay out ̶b̶l̶o̶c̶k̶ e͇r͇a͇ rewards + // NOTE: despite the name, these rewards are currently paid out per ERA not per BLOCK + // at one point, they were going to be paid out per block (and might be in the future) + // but it ended up settling on per era. the behavior is driven by Some / None + // thus if in future the calling logic passes rewards per block it should just work as is. + // This auto-commits. + if let Some(rewards) = &executable_block.rewards { + let block_rewards_payout_start = Instant::now(); + // Pay out block fees, if relevant. This auto-commits + { + let fee_req = FeeRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + block_time, + ); + debug!(?fee_req, "distributing fees"); + match scratch_state.distribute_fees(fee_req) { + FeeResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + FeeResult::Failure(fer) => return Err(BlockExecutionError::DistributeFees(fer)), + FeeResult::Success { + post_state_hash, .. + } => { + debug!("fee distribution success"); + state_root_hash = post_state_hash; + } + } + } + + let rewards_req = BlockRewardsRequest::new( + native_runtime_config.clone(), + state_root_hash, + protocol_version, + block_time, + rewards.clone(), + ); + debug!(?rewards_req, "distributing rewards"); + match scratch_state.distribute_block_rewards(rewards_req) { + BlockRewardsResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + BlockRewardsResult::Failure(bre) => { + return Err(BlockExecutionError::DistributeBlockRewards(bre)); + } + BlockRewardsResult::Success { + post_state_hash, .. + } => { + debug!("rewards distribution success"); + state_root_hash = post_state_hash; + } + } + if let Some(metrics) = metrics.as_ref() { + metrics + .block_rewards_payout + .observe(block_rewards_payout_start.elapsed().as_secs_f64()); + } + } + + // if era report is some, this is a switch block. a series of end-of-era extra processing must + // transpire before this block is entirely finished. + let step_outcome = if let Some(era_report) = &executable_block.era_report { + // step processing starts now + let step_processing_start = Instant::now(); + + debug!("committing step"); + let step_effects = match commit_step( + native_runtime_config, + &scratch_state, + metrics.clone(), + protocol_version, + state_root_hash, + era_report.clone(), + block_time.value(), + executable_block.era_id.successor(), + ) { + StepResult::RootNotFound => { + return Err(BlockExecutionError::RootNotFound(state_root_hash)); + } + StepResult::Failure(err) => return Err(BlockExecutionError::Step(err)), + StepResult::Success { + effects, + post_state_hash, + .. + } => { + state_root_hash = post_state_hash; + effects + } + }; + debug!("step committed"); + + let era_validators_req = EraValidatorsRequest::new(state_root_hash); + let era_validators_result = data_access_layer.era_validators(era_validators_req); + + let upcoming_era_validators = match era_validators_result { + EraValidatorsResult::RootNotFound => { + panic!("root not found"); + } + EraValidatorsResult::AuctionNotFound => { + panic!("auction not found"); + } + EraValidatorsResult::ValueNotFound(msg) => { + panic!("validator snapshot not found: {}", msg); + } + EraValidatorsResult::Failure(tce) => { + return Err(BlockExecutionError::GetEraValidators(tce)); + } + EraValidatorsResult::Success { era_validators } => era_validators, + }; + + // step processing is finished + if let Some(metrics) = metrics.as_ref() { + metrics + .exec_block_step_processing + .observe(step_processing_start.elapsed().as_secs_f64()); + } + Some(StepOutcome { + step_effects, + upcoming_era_validators, + }) + } else { + None }; - let commit_result = commit( - engine_state, - metrics, + + // Pruning -- this is orthogonal to the contents of the block, but we deliberately do it + // at the end to avoid a read ordering issue during block execution. + if let Some(previous_block_height) = block_height.checked_sub(1) { + if let Some(keys_to_prune) = calculate_prune_eras( + activation_point_era_id, + key_block_height_for_activation_point, + previous_block_height, + prune_batch_size, + ) { + let pruning_start = Instant::now(); + + let first_key = keys_to_prune.first().copied(); + let last_key = keys_to_prune.last().copied(); + info!( + previous_block_height, + %key_block_height_for_activation_point, + %state_root_hash, + first_key=?first_key, + last_key=?last_key, + "commit prune: preparing prune config" + ); + let request = PruneRequest::new(state_root_hash, keys_to_prune); + match scratch_state.prune(request) { + PruneResult::RootNotFound => { + error!( + previous_block_height, + %state_root_hash, + "commit prune: root not found" + ); + panic!( + "Root {} not found while performing a prune.", + state_root_hash + ); + } + PruneResult::MissingKey => { + warn!( + previous_block_height, + %state_root_hash, + "commit prune: key does not exist" + ); + } + PruneResult::Success { + post_state_hash, .. + } => { + info!( + previous_block_height, + %key_block_height_for_activation_point, + %state_root_hash, + %post_state_hash, + first_key=?first_key, + last_key=?last_key, + "commit prune: success" + ); + state_root_hash = post_state_hash; + } + PruneResult::Failure(tce) => { + error!(?tce, "commit prune: failure"); + return Err(tce.into()); + } + } + if let Some(metrics) = metrics.as_ref() { + metrics + .pruning_time + .observe(pruning_start.elapsed().as_secs_f64()); + } + } + } + + { + let database_write_start = Instant::now(); + // Finally, the new state-root-hash from the cumulative changes to global state is + // returned when they are written to LMDB. + state_root_hash = data_access_layer.write_scratch_to_db(state_root_hash, scratch_state)?; + if let Some(metrics) = metrics.as_ref() { + metrics + .scratch_lmdb_write_time + .observe(database_write_start.elapsed().as_secs_f64()); + } + + // Flush once, after all data mutation. + let database_flush_start = Instant::now(); + let flush_req = FlushRequest::new(); + let flush_result = data_access_layer.flush(flush_req); + if let Err(gse) = flush_result.as_error() { + error!("failed to flush lmdb"); + return Err(BlockExecutionError::Lmdb(gse)); + } + if let Some(metrics) = metrics.as_ref() { + metrics + .database_flush_time + .observe(database_flush_start.elapsed().as_secs_f64()); + } + } + + // the rest of this is post process, picking out data bits to return to caller + let next_era_id = executable_block.era_id.successor(); + let maybe_next_era_validator_weights: Option<(BTreeMap, u8)> = + match step_outcome.as_ref() { + None => None, + Some(effects_and_validators) => { + match effects_and_validators + .upcoming_era_validators + .get(&next_era_id) + .cloned() + { + Some(validators) => next_era_gas_price.map(|gas_price| (validators, gas_price)), + None => None, + } + } + }; + + let era_end = match ( + executable_block.era_report, + maybe_next_era_validator_weights, + ) { + (None, None) => None, + ( + Some(InternalEraReport { + equivocators, + inactive_validators, + }), + Some((next_era_validator_weights, next_era_gas_price)), + ) => Some(EraEndV2::new( + equivocators, + inactive_validators, + next_era_validator_weights, + executable_block.rewards.unwrap_or_default(), + next_era_gas_price, + )), + (maybe_era_report, maybe_next_era_validator_weights) => { + if maybe_era_report.is_none() { + error!( + "era_end {}: maybe_era_report is none", + executable_block.era_id + ); + } + if maybe_next_era_validator_weights.is_none() { + error!( + "era_end {}: maybe_next_era_validator_weights is none", + executable_block.era_id + ); + } + return Err(BlockExecutionError::FailedToCreateEraEnd { + maybe_era_report, + maybe_next_era_validator_weights, + }); + } + }; + + let block = Arc::new(BlockV2::new( + parent_hash, + parent_seed, state_root_hash, - execution_effect.transforms, - ) - .await; - trace!(?commit_result, "commit result"); - match commit_result { - Ok(CommitResult::Success { state_root }) => { - debug!(?state_root, "commit succeeded"); - Ok((state_root.into(), execution_result)) - } - _ => { - error!( - ?commit_result, - "commit failed - internal contract runtime error" + executable_block.random_bit, + era_end, + executable_block.timestamp, + executable_block.era_id, + block_height, + protocol_version, + (*proposer).clone(), + executable_block.transaction_map, + executable_block.rewarded_signatures, + current_gas_price, + last_switch_block_hash, + )); + + let proof_of_checksum_registry = match data_access_layer.tracking_copy(state_root_hash)? { + Some(tc) => match tc.reader().read_with_proof(&Key::ChecksumRegistry)? { + Some(proof) => proof, + None => return Err(BlockExecutionError::MissingChecksumRegistry), + }, + None => return Err(BlockExecutionError::RootNotFound(state_root_hash)), + }; + + let approvals_hashes = Box::new(ApprovalsHashes::new( + *block.hash(), + transaction_approvals_hashes, + proof_of_checksum_registry, + )); + + // processing is finished now + if let Some(metrics) = metrics.as_ref() { + metrics + .exec_block_post_processing + .observe(post_processing_start.elapsed().as_secs_f64()); + metrics + .exec_block_total + .observe(start.elapsed().as_secs_f64()); + } + + Ok(BlockAndExecutionArtifacts { + block, + approvals_hashes, + execution_artifacts: artifacts, + step_outcome, + }) +} + +/// Execute the transaction without committing the effects. +/// Intended to be used for discovery operations on read-only nodes. +/// +/// Returns effects of the execution. +pub(super) fn speculatively_execute( + state_provider: &S, + chainspec: &Chainspec, + execution_engine_v1: &ExecutionEngineV1, + block_header: BlockHeader, + input_transaction: Transaction, +) -> SpeculativeExecutionResult +where + S: StateProvider, +{ + let transaction_config = &chainspec.transaction_config; + let maybe_transaction = MetaTransaction::from_transaction( + &input_transaction, + chainspec.core_config.pricing_handling, + transaction_config, + ); + if let Err(error) = maybe_transaction { + return SpeculativeExecutionResult::invalid_transaction(error); + } + let transaction = maybe_transaction.unwrap(); + let state_root_hash = block_header.state_root_hash(); + let parent_block_hash = block_header.block_hash(); + let block_height = block_header.height(); + let block_time = block_header + .timestamp() + .saturating_add(chainspec.core_config.minimum_block_time); + let gas_limit = match input_transaction.gas_limit(chainspec, transaction.transaction_lane()) { + Ok(gas_limit) => gas_limit, + Err(_) => { + return SpeculativeExecutionResult::invalid_gas_limit(input_transaction); + } + }; + + if transaction.is_deploy_transaction() { + if transaction.is_native() { + let limit = Gas::from(chainspec.system_costs_config.mint_costs().transfer); + let protocol_version = chainspec.protocol_version(); + let native_runtime_config = NativeRuntimeConfig::from_chainspec(chainspec); + let transaction_hash = transaction.hash(); + let initiator_addr = transaction.initiator_addr(); + let authorization_keys = transaction.authorization_keys(); + let runtime_args = match transaction.session_args().as_named() { + Some(runtime_args) => runtime_args.clone(), + None => { + return SpeculativeExecutionResult::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::ExpectedNamedArguments, + )); + } + }; + + let result = state_provider.transfer(TransferRequest::with_runtime_args( + native_runtime_config.clone(), + *state_root_hash, + protocol_version, + transaction_hash, + initiator_addr.clone(), + authorization_keys, + runtime_args, + )); + SpeculativeExecutionResult::WasmV1(Box::new(utils::spec_exec_from_transfer_result( + limit, + result, + block_header.block_hash(), + ))) + } else { + let block_info = BlockInfo::new( + *state_root_hash, + block_time.into(), + parent_block_hash, + block_height, + execution_engine_v1.config().protocol_version(), ); - Err(()) + let session_input_data = transaction.to_session_input_data(); + let wasm_v1_result = + match WasmV1Request::new_session(block_info, gas_limit, &session_input_data) { + Ok(wasm_v1_request) => { + execution_engine_v1.execute(state_provider, wasm_v1_request) + } + Err(error) => WasmV1Result::invalid_executable_item(gas_limit, error), + }; + SpeculativeExecutionResult::WasmV1(Box::new(utils::spec_exec_from_wasm_v1_result( + wasm_v1_result, + block_header.block_hash(), + ))) } + } else { + SpeculativeExecutionResult::ReceivedV1Transaction } } -pub(super) async fn commit( - engine_state: Arc>, - metrics: Arc, +fn invoked_contract_will_pay( + state_provider: &ScratchGlobalState, state_root_hash: Digest, - effects: AdditiveMap, -) -> Result { - trace!(?state_root_hash, ?effects, "commit"); - let correlation_id = CorrelationId::new(); - let start = Instant::now(); - let result = engine_state.apply_effect(correlation_id, state_root_hash.into(), effects); - metrics.apply_effect.observe(start.elapsed().as_secs_f64()); - trace!(?result, "commit result"); - result + transaction: &MetaTransaction, +) -> Result, StateResultError> { + let (hash_addr, entry_point_name) = match transaction.contract_direct_address() { + None => { + return Err(StateResultError::ValueNotFound( + "contract direct address not found".to_string(), + )) + } + Some((hash_addr, entry_point_name)) => (hash_addr, entry_point_name), + }; + let entity_addr = EntityAddr::new_smart_contract(hash_addr); + let entry_point_request = EntryPointRequest::new(state_root_hash, entry_point_name, hash_addr); + let entry_point_response = state_provider.entry_point(entry_point_request); + match entry_point_response { + EntryPointResult::RootNotFound => Err(StateResultError::RootNotFound), + EntryPointResult::ValueNotFound(msg) => Err(StateResultError::ValueNotFound(msg)), + EntryPointResult::Failure(tce) => Err(StateResultError::Failure(tce)), + EntryPointResult::Success { entry_point } => { + if entry_point.will_pay_direct_invocation() { + Ok(Some(entity_addr)) + } else { + Ok(None) + } + } + } } -pub(super) async fn execute( - engine_state: Arc>, - metrics: Arc, - execute_request: ExecuteRequest, -) -> Result, engine_state::Error> { - trace!(?execute_request, "execute"); - let correlation_id = CorrelationId::new(); +#[allow(clippy::too_many_arguments)] +fn commit_step( + native_runtime_config: NativeRuntimeConfig, + scratch_state: &ScratchGlobalState, + maybe_metrics: Option>, + protocol_version: ProtocolVersion, + state_hash: Digest, + InternalEraReport { + equivocators, + inactive_validators, + }: InternalEraReport, + era_end_timestamp_millis: u64, + next_era_id: EraId, +) -> StepResult { + // Both inactive validators and equivocators are evicted + let evict_items = inactive_validators + .into_iter() + .chain(equivocators) + .map(EvictItem::new) + .collect(); + + let step_request = StepRequest::new( + native_runtime_config, + state_hash, + protocol_version, + vec![], // <-- casper mainnet currently does not slash + evict_items, + next_era_id, + era_end_timestamp_millis, + ); + + // Commit the step. let start = Instant::now(); - let result = engine_state.run_execute(correlation_id, execute_request); - metrics.run_execute.observe(start.elapsed().as_secs_f64()); - trace!(?result, "execute result"); + let result = scratch_state.step(step_request); + debug_assert!(result.is_success(), "{:?}", result); + if let Some(metrics) = maybe_metrics { + let elapsed = start.elapsed().as_secs_f64(); + metrics.commit_step.observe(elapsed); + metrics.latest_commit_step.set(elapsed); + } + trace!(?result, "step response"); result } + +/// Computes the checksum of the given set of execution results. +/// +/// This will either be a simple hash of the bytesrepr-encoded results (in the case that the +/// serialized results are not greater than `ChunkWithProof::CHUNK_SIZE_BYTES`), or otherwise will +/// be a Merkle root hash of the chunks derived from the serialized results. +pub(crate) fn compute_execution_results_checksum<'a>( + execution_results_iter: impl Iterator + Clone, +) -> Result { + // Serialize the execution results as if they were `Vec`. + let serialized_length = U32_SERIALIZED_LENGTH + + execution_results_iter + .clone() + .map(|exec_result| exec_result.serialized_length()) + .sum::(); + let mut serialized = vec![]; + serialized + .try_reserve_exact(serialized_length) + .map_err(|_| { + BlockExecutionError::FailedToComputeApprovalsChecksum(bytesrepr::Error::OutOfMemory) + })?; + let item_count: u32 = execution_results_iter + .clone() + .count() + .try_into() + .map_err(|_| { + BlockExecutionError::FailedToComputeApprovalsChecksum( + bytesrepr::Error::NotRepresentable, + ) + })?; + item_count + .write_bytes(&mut serialized) + .map_err(BlockExecutionError::FailedToComputeExecutionResultsChecksum)?; + for execution_result in execution_results_iter { + execution_result + .write_bytes(&mut serialized) + .map_err(BlockExecutionError::FailedToComputeExecutionResultsChecksum)?; + } + + // Now hash the serialized execution results, using the `Chunkable` trait's `hash` method to + // chunk if required. + serialized.hash().map_err(|_| { + BlockExecutionError::FailedToComputeExecutionResultsChecksum(bytesrepr::Error::OutOfMemory) + }) +} diff --git a/node/src/components/contract_runtime/operations/wasm_v2_request.rs b/node/src/components/contract_runtime/operations/wasm_v2_request.rs new file mode 100644 index 0000000000..0fae7680d3 --- /dev/null +++ b/node/src/components/contract_runtime/operations/wasm_v2_request.rs @@ -0,0 +1,309 @@ +use std::sync::Arc; + +use bytes::Bytes; +use casper_executor_wasm::{ + install::{ + InstallContractError, InstallContractRequest, InstallContractRequestBuilder, + InstallContractResult, + }, + ExecutorV2, +}; +use casper_executor_wasm_interface::{ + executor::{ + ExecuteRequest, ExecuteRequestBuilder, ExecuteWithProviderError, ExecuteWithProviderResult, + ExecutionKind, + }, + GasUsage, +}; +use casper_storage::{ + global_state::state::{CommitProvider, StateProvider}, + AddressGeneratorBuilder, +}; +use casper_types::{ + execution::Effects, BlockHash, Digest, Gas, Key, TransactionEntryPoint, + TransactionInvocationTarget, TransactionRuntimeParams, TransactionTarget, U512, +}; +use thiserror::Error; +use tracing::info; + +use super::MetaTransaction; + +/// The request to execute a Wasm contract. +pub(crate) enum WasmV2Request { + /// The request to install a Wasm contract. + Install(InstallContractRequest), + /// The request to execute a Wasm contract. + Execute(ExecuteRequest), +} + +/// The result of executing a Wasm contract. +pub(crate) enum WasmV2Result { + /// The result of installing a Wasm contract. + Install(InstallContractResult), + /// The result of executing a Wasm contract. + Execute(ExecuteWithProviderResult), +} + +impl WasmV2Result { + /// Returns the gas usage of the contract execution. + pub(crate) fn gas_usage(&self) -> &GasUsage { + match self { + WasmV2Result::Install(result) => result.gas_usage(), + WasmV2Result::Execute(result) => result.gas_usage(), + } + } + + /// Returns the effects of the contract execution. + pub(crate) fn effects(&self) -> &Effects { + match self { + WasmV2Result::Install(result) => result.effects(), + WasmV2Result::Execute(result) => result.effects(), + } + } + + pub(crate) fn post_state_hash(&self) -> Digest { + match self { + WasmV2Result::Install(result) => result.post_state_hash(), + WasmV2Result::Execute(result) => result.post_state_hash(), + } + } +} + +#[derive(Error, Debug)] +pub(crate) enum WasmV2Error { + #[error(transparent)] + Install(InstallContractError), + #[error(transparent)] + Execute(ExecuteWithProviderError), +} + +#[derive(Clone, Eq, PartialEq, Error, Debug)] +pub(crate) enum InvalidRequest { + #[error("Expected bytes arguments")] + ExpectedBytesArguments, + #[error("Expected target")] + ExpectedTarget, + #[error("Invalid gas limit: {0}")] + InvalidGasLimit(U512), + #[error("Expected transferred value")] + ExpectedTransferredValue, + #[error("Expected V2 runtime")] + ExpectedV2Runtime, +} + +impl WasmV2Request { + pub(crate) fn new( + gas_limit: Gas, + network_name: impl Into>, + state_root_hash: Digest, + parent_block_hash: BlockHash, + block_height: u64, + transaction: &MetaTransaction, + ) -> Result { + let transaction_hash = transaction.hash(); + let initiator_addr = transaction.initiator_addr(); + + let gas_limit: u64 = gas_limit + .value() + .try_into() + .map_err(|_| InvalidRequest::InvalidGasLimit(gas_limit.value()))?; + + let address_generator = AddressGeneratorBuilder::default() + .seed_with(transaction_hash.as_ref()) + .build(); + + let session_args = transaction.session_args(); + + let input_data = session_args + .as_bytesrepr() + .ok_or(InvalidRequest::ExpectedBytesArguments)?; + + let value = transaction + .transferred_value() + .ok_or(InvalidRequest::ExpectedTransferredValue)?; + + enum Target { + Install { + module_bytes: Bytes, + entry_point: String, + transferred_value: u64, + seed: Option<[u8; 32]>, + }, + Session { + module_bytes: Bytes, + }, + Stored { + id: TransactionInvocationTarget, + entry_point: String, + }, + } + + let transaction_target = transaction.target().ok_or(InvalidRequest::ExpectedTarget)?; + let target = match transaction_target { + TransactionTarget::Native => todo!(), // + TransactionTarget::Stored { id, runtime: _ } => match transaction.entry_point() { + TransactionEntryPoint::Custom(entry_point) => Target::Stored { + id: id.clone(), + entry_point: entry_point.clone(), + }, + _ => todo!(), + }, + + TransactionTarget::Session { + module_bytes: _, + runtime: TransactionRuntimeParams::VmCasperV1, + is_install_upgrade: _, // TODO: Handle this + } => { + return Err(InvalidRequest::ExpectedV2Runtime); + } + TransactionTarget::Session { + module_bytes, + runtime: + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + }, + is_install_upgrade: _, // TODO: Handle this + } => match transaction.entry_point() { + TransactionEntryPoint::Call => Target::Session { + module_bytes: module_bytes.clone().take_inner().into(), + }, + TransactionEntryPoint::Custom(entry_point) => Target::Install { + module_bytes: module_bytes.clone().take_inner().into(), + entry_point: entry_point.to_string(), + transferred_value, + seed, + }, + _ => todo!(), + }, + }; + + info!(%transaction_hash, "executing v1 contract"); + + match target { + Target::Install { + module_bytes, + entry_point, + transferred_value, + seed, + } => { + let mut builder = InstallContractRequestBuilder::default(); + + let entry_point = (!entry_point.is_empty()).then_some(entry_point); + + match entry_point { + Some(entry_point) => { + builder = builder + .with_entry_point(entry_point.clone()) + // Args only matter if there is a constructor to be called. + .with_input(input_data.clone().take_inner().into()); + } + None => { + // No input data expected if there is no entry point. This should be + // validated in transaction acceptor. + assert!(input_data.is_empty()); + } + } + + if let Some(seed) = seed { + builder = builder.with_seed(seed); + } + + // Value is expected to be the same as transferred value, it's just taken through + // different API. + debug_assert_eq!(transferred_value, value); + + let install_request = builder + .with_initiator(initiator_addr.account_hash()) + .with_gas_limit(gas_limit) + .with_transaction_hash(transaction_hash) + .with_wasm_bytes(module_bytes) + .with_address_generator(address_generator) + .with_transferred_value(value) + .with_chain_name(network_name) + .with_block_time(transaction.timestamp().into()) + .with_state_hash(state_root_hash) + .with_parent_block_hash(parent_block_hash) + .with_block_height(block_height) + .build() + .expect("should build"); + + Ok(Self::Install(install_request)) + } + Target::Session { .. } | Target::Stored { .. } => { + let mut builder = ExecuteRequestBuilder::default(); + + let initiator_account_hash = &initiator_addr.account_hash(); + + let initiator_key = Key::Account(*initiator_account_hash); + + builder = builder + .with_address_generator(address_generator) + .with_gas_limit(gas_limit) + .with_transaction_hash(transaction_hash) + .with_initiator(*initiator_account_hash) + .with_caller_key(initiator_key) + .with_chain_name(network_name) + .with_transferred_value(value) + .with_block_time(transaction.timestamp().into()) + .with_input(input_data.clone().take_inner().into()) + .with_state_hash(state_root_hash) + .with_parent_block_hash(parent_block_hash) + .with_block_height(block_height); + let execution_kind = match target { + Target::Session { module_bytes } => ExecutionKind::SessionBytes(module_bytes), + Target::Stored { + id: TransactionInvocationTarget::ByHash(smart_contract_addr), + entry_point, + } => ExecutionKind::Stored { + address: smart_contract_addr, + entry_point: entry_point.clone(), + }, + Target::Stored { id, entry_point } => { + todo!("Unsupported target {entry_point} {id:?}") + } + Target::Install { .. } => unreachable!(), + }; + + builder = builder.with_target(execution_kind); + + let execute_request = builder.build().expect("should build"); + + Ok(Self::Execute(execute_request)) + } + } + } + + pub(crate) fn execute

( + self, + engine: &ExecutorV2, + state_root_hash: Digest, + state_provider: &P, + ) -> Result + where + P: StateProvider + CommitProvider, +

::Reader: 'static, + { + match self { + WasmV2Request::Install(install_request) => { + match engine.install_contract(state_root_hash, state_provider, install_request) { + Ok(result) => Ok(WasmV2Result::Install(result)), + Err(error) => Err(WasmV2Error::Install(error)), + } + } + WasmV2Request::Execute(execute_request) => { + match engine.execute_with_provider(state_root_hash, state_provider, execute_request) + { + Ok(result) => Ok(WasmV2Result::Execute(result)), + Err(error) => Err(WasmV2Error::Execute(error)), + } + } + } + } +} + +#[cfg(test)] +mod tests { + #[test] + fn smoke_test() {} +} diff --git a/node/src/components/contract_runtime/rewards.rs b/node/src/components/contract_runtime/rewards.rs new file mode 100644 index 0000000000..a8cd4a1f8b --- /dev/null +++ b/node/src/components/contract_runtime/rewards.rs @@ -0,0 +1,666 @@ +#[cfg(test)] +mod tests; + +use std::{collections::BTreeMap, ops::Range, sync::Arc}; + +use casper_storage::{ + data_access_layer::{ + DataAccessLayer, EraValidatorsRequest, RoundSeigniorageRateRequest, + RoundSeigniorageRateResult, TotalSupplyRequest, TotalSupplyResult, + }, + global_state::state::{lmdb::LmdbGlobalState, StateProvider}, +}; +use futures::stream::{self, StreamExt as _, TryStreamExt as _}; + +use itertools::Itertools; +use num_rational::Ratio; +use num_traits::{CheckedAdd, CheckedMul, ToPrimitive}; +use thiserror::Error; +use tracing::trace; + +use crate::{ + contract_runtime::metrics::Metrics, + effect::{ + requests::{ContractRuntimeRequest, StorageRequest}, + EffectBuilder, + }, + types::ExecutableBlock, +}; +use casper_types::{ + Block, Chainspec, CoreConfig, Digest, EraId, ProtocolVersion, PublicKey, RewardedSignatures, + U512, +}; + +pub(crate) trait ReactorEventT: + Send + From + From +{ +} + +impl ReactorEventT for T where T: Send + From + From {} + +#[derive(Debug)] +pub(crate) struct CitedBlock { + protocol_version: ProtocolVersion, + height: u64, + era_id: EraId, + proposer: PublicKey, + rewarded_signatures: RewardedSignatures, + state_root_hash: Digest, + is_switch_block: bool, + is_genesis: bool, +} + +impl CitedBlock { + fn from_executable_block(block: ExecutableBlock, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + era_id: block.era_id, + height: block.height, + proposer: *block.proposer, + rewarded_signatures: block.rewarded_signatures, + state_root_hash: Digest::default(), + is_switch_block: block.era_report.is_some(), + is_genesis: block.era_id.is_genesis(), + } + } +} + +#[derive(Debug)] +pub(crate) struct RewardsInfo { + eras_info: BTreeMap, + cited_blocks: Vec, + cited_block_height_start: u64, +} + +/// The era information needed in the rewards computation: +#[derive(Debug, Clone)] +pub(crate) struct EraInfo { + weights: BTreeMap, + total_weights: U512, + reward_per_round: Ratio, +} + +#[derive(Error, Debug)] +pub enum RewardsError { + /// We got a block height which is not in the era range it should be in (should not happen). + #[error("block height {0} is not in the era range")] + HeightNotInEraRange(u64), + /// The era is not in the range we have (should not happen). + #[error("era {0} is not in the era range")] + EraIdNotInEraRange(EraId), + /// The validator public key is not in the era it should be in (should not happen). + #[error("validator key {0:?} is not in the era")] + ValidatorKeyNotInEra(Box), + /// We didn't have a required switch block. + #[error("missing switch block for era {0}")] + MissingSwitchBlock(EraId), + /// We got an overflow while computing something. + #[error("arithmetic overflow")] + ArithmeticOverflow, + #[error("failed to fetch block with height {0}")] + FailedToFetchBlockWithHeight(u64), + #[error("failed to fetch era {0}")] + FailedToFetchEra(String), + /// Fetching the era validators succedeed, but no info is present (should not happen). + /// The `Digest` is the one that was queried. + #[error("failed to fetch era validators for {0}")] + FailedToFetchEraValidators(Digest), + #[error("failed to fetch total supply")] + FailedToFetchTotalSupply, + #[error("failed to fetch seigniorage rate")] + FailedToFetchSeigniorageRate, +} + +impl RewardsInfo { + pub async fn new( + effect_builder: EffectBuilder, + data_access_layer: Arc>, + protocol_version: ProtocolVersion, + activation_era_id: EraId, + maybe_upgraded_validators: Option<&BTreeMap>, + signature_rewards_max_delay: u64, + executable_block: ExecutableBlock, + ) -> Result { + let current_era_id = executable_block.era_id; + // All the blocks that may appear as a signed block. They are collected upfront, so that we + // don't have to worry about doing it one by one later. + // + // They are sorted from the oldest to the newest: + + let cited_block_height_start = { + let previous_era_id = current_era_id.saturating_sub(1); + let previous_era_switch_block_header = effect_builder + .get_switch_block_header_by_era_id_from_storage(previous_era_id) + .await + .ok_or(RewardsError::MissingSwitchBlock(previous_era_id))?; + + if previous_era_id.is_genesis() || previous_era_id == activation_era_id { + // We do not attempt to reward blocks from before an upgrade! + previous_era_switch_block_header.height() + } else { + // Here we do not substract 1, because we want one block more: + previous_era_switch_block_header + .height() + .saturating_sub(signature_rewards_max_delay) + } + }; + + // We need just one block from before the upgrade to determine the validators in + // the following era. + let range_to_fetch = cited_block_height_start.saturating_sub(1)..executable_block.height; + let mut cited_blocks = + collect_past_blocks_batched(effect_builder, range_to_fetch.clone()).await?; + + tracing::info!( + current_era_id = %current_era_id.value(), + range_requested = ?range_to_fetch, + num_fetched_blocks = %cited_blocks.len(), + "blocks fetched", + ); + + let eras_info = Self::create_eras_info( + data_access_layer, + activation_era_id, + current_era_id, + maybe_upgraded_validators, + cited_blocks.iter(), + )?; + + cited_blocks.push(CitedBlock::from_executable_block( + executable_block, + protocol_version, + )); + + Ok(RewardsInfo { + eras_info, + cited_blocks, + cited_block_height_start, + }) + } + + #[cfg(test)] + pub fn new_testing(eras_info: BTreeMap, cited_blocks: Vec) -> Self { + let cited_block_height_start = cited_blocks.first().map(|block| block.height).unwrap_or(0); + Self { + eras_info, + cited_blocks, + cited_block_height_start, + } + } + + /// `block_hashs` is an iterator over the era ID to get the information about + the block + /// hash to query to have such information (which may not be from the same era). + fn create_eras_info<'a>( + data_access_layer: Arc>, + activation_era_id: EraId, + current_era_id: EraId, + maybe_upgraded_validators: Option<&BTreeMap>, + mut cited_blocks: impl Iterator, + ) -> Result, RewardsError> { + let oldest_block = cited_blocks.next(); + + // If the oldest block is genesis, we add the validator information for genesis (era 0) from + // era 1, because it's the same: + let oldest_block_is_genesis = oldest_block.is_some_and(|block| block.is_genesis); + + // Here, we gather a list of all of the era ID we need to fetch to calculate the rewards, + // as well as the state root hash allowing to query this information. + // + // To get all of the needed era IDs, we take the very first block, then every switch block + // We take the first block, because we need it for the first cited era, then every switch + // block for every subsequent eras. + // If the first block is itself a switch block, that's fine, because we fetch one block more + // in the first place to handle this case. + let eras_and_state_root_hashes: Vec<_> = oldest_block + .into_iter() + .chain(cited_blocks.filter(|&block| block.is_switch_block)) + .map(|block| { + let state_root_hash = block.state_root_hash; + let protocol_version = block.protocol_version; + let era = if block.is_switch_block { + block.era_id.successor() + } else { + block.era_id + }; + (era, protocol_version, state_root_hash) + }) + .collect(); + + let num_eras_to_fetch = + eras_and_state_root_hashes.len() + usize::from(oldest_block_is_genesis); + + let data_access_layer = &data_access_layer; + + let mut eras_info: BTreeMap<_, _> = eras_and_state_root_hashes + .into_iter() + .map(|(era_id, protocol_version, state_root_hash)| { + let weights = if let (true, Some(upgraded_validators)) = + (era_id == activation_era_id, maybe_upgraded_validators) + { + upgraded_validators.clone() + } else { + let request = EraValidatorsRequest::new(state_root_hash); + let era_validators_result = data_access_layer.era_validators(request); + let msg = format!("{}", era_validators_result); + era_validators_result + .take_era_validators() + .ok_or(msg) + .map_err(RewardsError::FailedToFetchEra)? + // We consume the map to not clone the value: + .into_iter() + .find(|(key, _)| key == &era_id) + .ok_or(RewardsError::FailedToFetchEraValidators(state_root_hash))? + .1 + }; + + let total_supply_request = + TotalSupplyRequest::new(state_root_hash, protocol_version); + let total_supply = match data_access_layer.total_supply(total_supply_request) { + TotalSupplyResult::RootNotFound + | TotalSupplyResult::MintNotFound + | TotalSupplyResult::ValueNotFound(_) + | TotalSupplyResult::Failure(_) => { + return Err(RewardsError::FailedToFetchTotalSupply) + } + TotalSupplyResult::Success { total_supply } => total_supply, + }; + + let seigniorage_rate_request = + RoundSeigniorageRateRequest::new(state_root_hash, protocol_version); + let seigniorage_rate = + match data_access_layer.round_seigniorage_rate(seigniorage_rate_request) { + RoundSeigniorageRateResult::RootNotFound + | RoundSeigniorageRateResult::MintNotFound + | RoundSeigniorageRateResult::ValueNotFound(_) + | RoundSeigniorageRateResult::Failure(_) => { + return Err(RewardsError::FailedToFetchSeigniorageRate); + } + RoundSeigniorageRateResult::Success { rate } => rate, + }; + + let reward_per_round = seigniorage_rate * total_supply; + let total_weights = weights.values().copied().sum(); + + Ok::<_, RewardsError>(( + era_id, + EraInfo { + weights, + total_weights, + reward_per_round, + }, + )) + }) + .try_collect()?; + + // We cannot get the genesis info from a root hash, so we copy it from era 1 when needed. + if oldest_block_is_genesis { + let era_1 = EraId::from(1); + let era_1_info = eras_info + .get(&era_1) + .ok_or(RewardsError::EraIdNotInEraRange(era_1))?; + eras_info.insert(EraId::from(0), era_1_info.clone()); + } + + { + let era_ids: Vec<_> = eras_info.keys().map(|id| id.value()).collect(); + tracing::info!( + current_era_id = %current_era_id.value(), + %num_eras_to_fetch, + eras_fetched = ?era_ids, + ); + } + + Ok(eras_info) + } + + /// Returns the validators from a given era. + pub fn validator_keys( + &self, + era_id: EraId, + ) -> Result + '_, RewardsError> { + let keys = self + .eras_info + .get(&era_id) + .ok_or(RewardsError::EraIdNotInEraRange(era_id))? + .weights + .keys() + .cloned(); + + Ok(keys) + } + + /// Returns the total potential reward per block. + /// Since it is per block, we do not care about the expected number of blocks per era. + pub fn reward(&self, era_id: EraId) -> Result, RewardsError> { + Ok(self + .eras_info + .get(&era_id) + .ok_or(RewardsError::EraIdNotInEraRange(era_id))? + .reward_per_round) + } + + /// Returns the weight ratio for a given validator for a given era. + pub fn weight_ratio( + &self, + era_id: EraId, + validator: &PublicKey, + ) -> Result, RewardsError> { + let era = self + .eras_info + .get(&era_id) + .ok_or(RewardsError::EraIdNotInEraRange(era_id))?; + let weight = era + .weights + .get(validator) + .ok_or_else(|| RewardsError::ValidatorKeyNotInEra(Box::new(validator.clone())))?; + + Ok(Ratio::new(*weight, era.total_weights)) + } + + /// Returns the era in which is the given block height. + pub fn era_for_block_height(&self, height: u64) -> Result { + self.cited_blocks + .iter() + .find_map(|block| (block.height == height).then_some(block.era_id)) + .ok_or(RewardsError::HeightNotInEraRange(height)) + } + + /// Returns all the blocks belonging to an era. + pub fn blocks_from_era(&self, era_id: EraId) -> impl Iterator { + self.cited_blocks + .iter() + .filter(move |block| block.era_id == era_id) + } +} + +impl EraInfo { + #[cfg(test)] + pub fn new_testing(weights: BTreeMap, reward_per_round: Ratio) -> Self { + let total_weights = weights.values().copied().sum(); + Self { + weights, + total_weights, + reward_per_round, + } + } +} + +/// First create the `RewardsInfo` structure, then compute the rewards. +/// It is done in 2 steps so that it is easier to unit test the rewards calculation. +pub(crate) async fn fetch_data_and_calculate_rewards_for_era( + effect_builder: EffectBuilder, + data_access_layer: Arc>, + chainspec: &Chainspec, + metrics: &Arc, + executable_block: ExecutableBlock, +) -> Result>, RewardsError> { + let current_era_id = executable_block.era_id; + tracing::info!( + current_era_id = %current_era_id.value(), + "starting the rewards calculation" + ); + + if current_era_id.is_genesis() + || current_era_id == chainspec.protocol_config.activation_point.era_id() + { + // Special case: genesis block and immediate switch blocks do not yield any reward, because + // there is no block producer, and no signatures from previous blocks to be rewarded: + Ok(BTreeMap::new()) + } else { + let rewards_info = RewardsInfo::new( + effect_builder, + data_access_layer, + chainspec.protocol_version(), + chainspec.protocol_config.activation_point.era_id(), + chainspec + .protocol_config + .global_state_update + .as_ref() + .and_then(|gsu| gsu.validators.as_ref()), + chainspec.core_config.signature_rewards_max_delay, + executable_block, + ) + .await?; + + let cited_blocks_count_current_era = rewards_info.blocks_from_era(current_era_id).count(); + + let reward_per_round_current_era = rewards_info + .eras_info + .get(¤t_era_id) + .expect("expected EraInfo") + .reward_per_round; + + let rewards = rewards_for_era(rewards_info, current_era_id, &chainspec.core_config); + + // Calculate and push reward metric(s) + if let Ok(rewards_map) = &rewards { + let expected_total_seigniorage = reward_per_round_current_era + .to_integer() + .saturating_mul(U512::from(cited_blocks_count_current_era as u64)); + let actual_total_seigniorage = + rewards_map + .iter() + .fold(U512::zero(), |acc, (_, rewards_vec)| { + let current_era_reward = rewards_vec + .first() + .expect("expected current era reward amount"); + acc.saturating_add(*current_era_reward) + }); + let seigniorage_target_fraction = Ratio::new( + actual_total_seigniorage.low_u128(), + expected_total_seigniorage.low_u128(), + ); + let gauge_value = match Ratio::to_f64(&seigniorage_target_fraction) { + Some(v) => v, + None => f64::NAN, + }; + metrics.seigniorage_target_fraction.set(gauge_value) + } + + rewards + } +} + +pub(crate) fn rewards_for_era( + rewards_info: RewardsInfo, + current_era_id: EraId, + core_config: &CoreConfig, +) -> Result>, RewardsError> { + fn to_ratio_u512(ratio: Ratio) -> Ratio { + Ratio::new(U512::from(*ratio.numer()), U512::from(*ratio.denom())) + } + + let ratio_u512_zero = Ratio::new(U512::zero(), U512::one()); + let zero_for_current_era = { + let mut map = BTreeMap::new(); + map.insert(current_era_id, ratio_u512_zero); + map + }; + let mut full_reward_for_validators: BTreeMap<_, _> = rewards_info + .validator_keys(current_era_id)? + .map(|key| (key, zero_for_current_era.clone())) + .collect(); + + let mut increase_value_for_key_and_era = + |key: PublicKey, era: EraId, value: Ratio| -> Result<(), RewardsError> { + match full_reward_for_validators.entry(key) { + std::collections::btree_map::Entry::Vacant(entry) => { + let mut map = BTreeMap::new(); + map.insert(era, value); + entry.insert(map); + } + std::collections::btree_map::Entry::Occupied(mut entry) => { + let old_value = entry.get().get(&era).unwrap_or(&ratio_u512_zero); + let new_value = old_value + .checked_add(&value) + .ok_or(RewardsError::ArithmeticOverflow)?; + entry.get_mut().insert(era, new_value); + } + } + + Ok(()) + }; + + // Rules out a special case: genesis block does not yield any reward, + // because there is no block producer, and no previous blocks whose + // signatures are to be rewarded: + debug_assert!( + current_era_id.is_genesis() == false, + "the genesis block should be handled as a special case" + ); + + let collection_proportion = to_ratio_u512(core_config.collection_rewards_proportion()); + let contribution_proportion = to_ratio_u512(core_config.contribution_rewards_proportion()); + + // Reward for producing a block from this era: + let production_reward = to_ratio_u512(core_config.production_rewards_proportion()) + .checked_mul(&rewards_info.reward(current_era_id)?) + .ok_or(RewardsError::ArithmeticOverflow)?; + + // Collect all rewards as a ratio: + for block in rewards_info.blocks_from_era(current_era_id) { + // Transfer the block production reward for this block proposer: + trace!( + proposer=?block.proposer, + amount=%production_reward.to_integer(), + block=%block.height, + "proposer reward" + ); + increase_value_for_key_and_era(block.proposer.clone(), current_era_id, production_reward)?; + + // Now, let's compute the reward attached to each signed block reported by the block + // we examine: + for (signature_rewards, signed_block_height) in block + .rewarded_signatures + .iter() + .zip((rewards_info.cited_block_height_start..block.height).rev()) + { + let signed_block_era = rewards_info.era_for_block_height(signed_block_height)?; + let validators_providing_signature = + signature_rewards.to_validator_set(rewards_info.validator_keys(signed_block_era)?); + + for signing_validator in validators_providing_signature { + // Reward for contributing to the finality signature, ie signing this block: + let contribution_reward = rewards_info + .weight_ratio(signed_block_era, &signing_validator)? + .checked_mul(&contribution_proportion) + .ok_or(RewardsError::ArithmeticOverflow)? + .checked_mul(&rewards_info.reward(signed_block_era)?) + .ok_or(RewardsError::ArithmeticOverflow)?; + // Reward for gathering this signature. It is both weighted by the block + // producing/signature collecting validator, and the signing validator: + let collection_reward = rewards_info + .weight_ratio(signed_block_era, &signing_validator)? + .checked_mul(&collection_proportion) + .ok_or(RewardsError::ArithmeticOverflow)? + .checked_mul(&rewards_info.reward(signed_block_era)?) + .ok_or(RewardsError::ArithmeticOverflow)?; + + trace!( + signer=?signing_validator, + amount=%contribution_reward.to_integer(), + block=%block.height, + signed_block=%signed_block_height, + "signature contribution reward" + ); + trace!( + collector=?block.proposer, + signer=?signing_validator, + amount=%collection_reward.to_integer(), + block=%block.height, + signed_block=%signed_block_height, + "signature collection reward" + ); + increase_value_for_key_and_era( + signing_validator, + signed_block_era, + contribution_reward, + )?; + increase_value_for_key_and_era( + block.proposer.clone(), + current_era_id, + collection_reward, + )?; + } + } + } + + let rewards_map_to_vec = |rewards_map: BTreeMap>| { + let min_era = rewards_map + .iter() + .find(|(_era, &amount)| !amount.numer().is_zero()) + .map(|(era, _amount)| era) + .copied() + .unwrap_or(current_era_id); + EraId::iter_range_inclusive(min_era, current_era_id) + .rev() + .map(|era_id| { + rewards_map + .get(&era_id) + .copied() + .unwrap_or(ratio_u512_zero) + .to_integer() + }) + .collect() + }; + + // Return the rewards as plain U512: + Ok(full_reward_for_validators + .into_iter() + .map(|(key, amounts)| (key, rewards_map_to_vec(amounts))) + .collect()) +} + +/// Query all the blocks from the given range with a batch mechanism. +async fn collect_past_blocks_batched>( + effect_builder: EffectBuilder, + era_height_span: Range, +) -> Result, RewardsError> { + const STEP: usize = 100; + let only_from_available_block_range = false; + + let batches = { + let range_end = era_height_span.end; + + era_height_span + .step_by(STEP) + .map(move |internal_start| internal_start..range_end.min(internal_start + STEP as u64)) + }; + + stream::iter(batches) + .then(|range| async move { + stream::iter( + effect_builder + .collect_past_blocks_with_metadata( + range.clone(), + only_from_available_block_range, + ) + .await + .into_iter() + .zip(range) + .map(|(maybe_block_with_metadata, height)| { + maybe_block_with_metadata + .ok_or(RewardsError::FailedToFetchBlockWithHeight(height)) + .map(|b| CitedBlock::from(b.block)) + }), + ) + }) + .flatten() + .try_collect() + .await +} + +impl From for CitedBlock { + fn from(block: Block) -> Self { + Self { + protocol_version: block.protocol_version(), + era_id: block.era_id(), + height: block.height(), + proposer: block.proposer().clone(), + rewarded_signatures: block.rewarded_signatures().clone(), + state_root_hash: *block.state_root_hash(), + is_switch_block: block.is_switch_block(), + is_genesis: block.is_genesis(), + } + } +} diff --git a/node/src/components/contract_runtime/rewards/tests.rs b/node/src/components/contract_runtime/rewards/tests.rs new file mode 100644 index 0000000000..59dfb54a2e --- /dev/null +++ b/node/src/components/contract_runtime/rewards/tests.rs @@ -0,0 +1,859 @@ +use crate::testing::{map, set}; +use casper_types::{ + testing::TestRng, AsymmetricType as _, EraId, RewardedSignatures, TestBlockBuilder, +}; +use once_cell::sync::Lazy; +use std::{iter, ops::Deref}; + +use self::constructors::RewardsInfoConstructor; + +use super::*; +use convert::ratio; + +fn val(n: u8) -> PublicKey { + let mut buf = [0; 32]; + (0..22).for_each(|i| buf[i] = n); + PublicKey::ed25519_from_bytes(buf).unwrap() +} + +static VALIDATOR_1: Lazy = Lazy::new(|| val(1)); +static VALIDATOR_2: Lazy = Lazy::new(|| val(2)); +static VALIDATOR_3: Lazy = Lazy::new(|| val(3)); +static VALIDATOR_4: Lazy = Lazy::new(|| val(4)); + +fn core_config( + rng: &mut TestRng, + percent_signatures: u64, + percent_finders: u64, + minimum_era_height: u64, + signature_rewards_max_delay: u64, +) -> CoreConfig { + CoreConfig { + finality_signature_proportion: Ratio::new(percent_signatures, 100), + finders_fee: Ratio::new(percent_finders, 100), + signature_rewards_max_delay, + minimum_era_height, + ..CoreConfig::random(rng) + } +} + +#[test] +fn production_payout_increases_with_the_supply() { + let rng = &mut TestRng::new(); + let percent_signatures = 40; + let percent_finders = 20; + let blocks_per_era = 3; + let signature_rewards_max_delay = 6; + let core_config = core_config( + rng, + percent_signatures, + percent_finders, + blocks_per_era, + signature_rewards_max_delay, + ); + + // Eras info: + + let era_1_reward_per_round = 300; + let era_2_reward_per_round = 400; + + let weights = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(30_u64), + }; + + let constructor = RewardsInfoConstructor::new( + &core_config, + map! { + EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![ + (0, VALIDATOR_1.deref(), vec![]) + ]), + EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![ + (1, VALIDATOR_1.deref(), vec![]), + (2, VALIDATOR_2.deref(), vec![]), + (3, VALIDATOR_3.deref(), vec![]), + ]), + EraId::new(2) => (weights, era_2_reward_per_round, vec![ + (4, VALIDATOR_3.deref(), vec![]), + (5, VALIDATOR_1.deref(), vec![]), + (6, VALIDATOR_2.deref(), vec![]), + ]), + }, + ); + + // Era payouts: + + let rewards_for_era_1 = + rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap(); + let rewards_for_era_2 = + rewards_for_era(constructor.for_era(rng, 2), EraId::new(2), &core_config).unwrap(); + + // Checks: + + for ((recipient_1, amounts_1), (recipient_2, amounts_2)) in + iter::zip(rewards_for_era_1, rewards_for_era_2) + { + let amount_1: U512 = amounts_1.into_iter().sum(); + let amount_2: U512 = amounts_2.into_iter().sum(); + assert_eq!( + ratio(amount_1), + ratio(era_1_reward_per_round) * ratio(core_config.production_rewards_proportion()) + ); + assert_eq!( + ratio(amount_2), + ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion()) + ); + assert_eq!(recipient_1, recipient_2); + assert_eq!(amount_1 * 4 / 3, amount_2); + } +} + +#[test] +fn production_payout_depends_on_the_blocks_produced() { + let rng = &mut TestRng::new(); + let percent_signatures = 33; + let percent_finders = 20; + let blocks_per_era = 3; + let signature_rewards_max_delay = 4; + let core_config = core_config( + rng, + percent_signatures, + percent_finders, + blocks_per_era, + signature_rewards_max_delay, + ); + + // Eras info: + + let era_1_reward_per_round = 300; + let era_2_reward_per_round = 400; + + let weights_1 = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(30_u64), + VALIDATOR_4.clone() => U512::from(89_u64), + }; + + let weights_2 = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(70_u64), + VALIDATOR_4.clone() => U512::from(89_u64), + }; + + let constructor = RewardsInfoConstructor::new( + &core_config, + map! { + EraId::new(0) => (weights_1.clone(), era_1_reward_per_round, vec![ + (0, VALIDATOR_1.deref(), vec![]) + ]), + EraId::new(1) => (weights_1, era_1_reward_per_round, vec![ + (1, VALIDATOR_1.deref(), vec![]), + (2, VALIDATOR_1.deref(), vec![]), + (3, VALIDATOR_3.deref(), vec![]), + ]), + EraId::new(2) => (weights_2, era_2_reward_per_round, vec![ + (4, VALIDATOR_2.deref(), vec![]), + (5, VALIDATOR_3.deref(), vec![]), + (6, VALIDATOR_4.deref(), vec![]), + ]), + }, + ); + + // Era 1 payouts: + + let rewards = + rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap(); + + assert_eq!( + rewards, + map! { + VALIDATOR_1.deref().clone() => vec![(ratio(2 * era_1_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()], + VALIDATOR_2.deref().clone() => vec![U512::zero()], + VALIDATOR_3.deref().clone() => vec![(ratio(era_1_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()], + VALIDATOR_4.deref().clone() => vec![U512::zero()], + } + ); + + // Era 2 payouts: + + let rewards = + rewards_for_era(constructor.for_era(rng, 2), EraId::new(2), &core_config).unwrap(); + + assert_eq!( + rewards, + map! { + VALIDATOR_1.deref().clone() => vec![U512::zero()], + VALIDATOR_2.deref().clone() => vec![(ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()], + VALIDATOR_3.deref().clone() => vec![(ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()], + VALIDATOR_4.deref().clone() => vec![(ratio(era_2_reward_per_round) * ratio(core_config.production_rewards_proportion())).to_integer()], + } + ); +} + +/// Only production & collection fee. +#[test] +fn all_signatures_rewards_without_contribution_fee() { + let rng = &mut TestRng::new(); + let percent_signatures = 40; + let percent_finders = 100; + let blocks_per_era = 3; + let signature_rewards_max_delay = 4; + let core_config = core_config( + rng, + percent_signatures, + percent_finders, + blocks_per_era, + signature_rewards_max_delay, + ); + + // Eras info: + + let era_1_reward_per_round = 300; + let era_2_reward_per_round = 400; + + let weights = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(30_u64), + }; + + // Simple scenario: each validators sign the block finality directly (no "lag"): + let constructor = RewardsInfoConstructor::new( + &core_config, + map! { + EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![ + (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis + ]), + EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![ + (1, VALIDATOR_1.deref(), vec![set!{}]), // Nobody signed the genesis finality + (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}]), + (3, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}]), + ]), + EraId::new(2) => (weights, era_2_reward_per_round, vec![ + (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + (5, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + ]), + }, + ); + + // Era 1 payouts: + + let rewards_for_era_1 = + rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap(); + + let validator_1_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // No finality signature collected: + + ratio(0) * ratio(core_config.collection_rewards_proportion()) + } * ratio(era_1_reward_per_round); + let validator_2_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // All finality signatures collected: + + ratio(core_config.collection_rewards_proportion()) + } * ratio(era_1_reward_per_round); + let validator_3_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // All finality signatures collected: + + ratio(core_config.collection_rewards_proportion()) + } * ratio(era_1_reward_per_round); + + assert_eq!( + map! { + VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()], + VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()], + VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()], + }, + rewards_for_era_1, + ); + + // Era 2 payouts: + + let rewards_for_era_2 = + rewards_for_era(constructor.for_era(rng, 2), EraId::new(2), &core_config).unwrap(); + + let validator_1_expected_payout = { + // 1 block produced: + ratio(1) + * ratio(era_2_reward_per_round) + * ratio(core_config.production_rewards_proportion()) + // All finality signature collected (paid out in era 2): + + ratio(era_1_reward_per_round) * ratio(core_config.collection_rewards_proportion()) + }; + let validator_2_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // All finality signatures collected: + + ratio(core_config.collection_rewards_proportion()) + } * ratio(era_2_reward_per_round); + let validator_3_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // All finality signatures collected: + + ratio(core_config.collection_rewards_proportion()) + } * ratio(era_2_reward_per_round); + + assert_eq!( + map! { + VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()], + VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()], + VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()], + }, + rewards_for_era_2, + ); +} + +/// Only production & contribution fee. +#[test] +fn all_signatures_rewards_without_finder_fee() { + let rng = &mut TestRng::new(); + let percent_signatures = 40; + let percent_finders = 0; + let blocks_per_era = 3; + let signature_rewards_max_delay = 4; + let core_config = core_config( + rng, + percent_signatures, + percent_finders, + blocks_per_era, + signature_rewards_max_delay, + ); + + // Eras info: + + let era_1_reward_per_round = 300; + let era_2_reward_per_round = 400; + + let weights = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(30_u64), + }; + + // Simple scenario: each validators sign the block finality directly (no "lag"): + let constructor = RewardsInfoConstructor::new( + &core_config, + map! { + EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![ + (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis + ]), + EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![ + (1, VALIDATOR_1.deref(), vec![set!{}]), // Nobody signed the genesis finality + (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}]), + (3, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}]), + ]), + EraId::new(2) => (weights, era_2_reward_per_round, vec![ + (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + (5, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + ]), + }, + ); + + // Era 1 payouts: + + let rewards_for_era_1 = + rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap(); + + let validator_1_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // 2 finality signed: + + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_1.deref()) + } * ratio(era_1_reward_per_round); + let validator_2_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // 2 finality signed: + + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_2.deref()) + } * ratio(era_1_reward_per_round); + let validator_3_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // 2 finality signed: + + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_3.deref()) + } * ratio(era_1_reward_per_round); + + assert_eq!( + rewards_for_era_1, + map! { + VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()], + VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()], + VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()], + } + ); +} + +#[test] +fn all_signatures_rewards() { + let rng = &mut TestRng::new(); + let percent_signatures = 40; + let percent_finders = 15; + let blocks_per_era = 3; + let signature_rewards_max_delay = 4; + let core_config = core_config( + rng, + percent_signatures, + percent_finders, + blocks_per_era, + signature_rewards_max_delay, + ); + + // Eras info: + + let era_1_reward_per_round = 300; + let era_2_reward_per_round = 400; + + let weights = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(30_u64), + }; + + // Simple scenario: each validators sign the block finality directly (no "lag"): + let constructor = RewardsInfoConstructor::new( + &core_config, + map! { + EraId::new(0) => (weights.clone(), era_1_reward_per_round, vec![ + (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis + ]), + EraId::new(1) => (weights.clone(), era_1_reward_per_round, vec![ + (1, VALIDATOR_1.deref(), vec![set!{}]), // Nobody signed the genesis finality + (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}]), + (3, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}]), + ]), + EraId::new(2) => (weights, era_2_reward_per_round, vec![ + (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + (5, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{}, set!{}, set!{}]), + ]), + }, + ); + + // Era 1 payouts: + + let rewards_for_era_1 = + rewards_for_era(constructor.for_era(rng, 1), EraId::new(1), &core_config).unwrap(); + + let validator_1_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // No finality signature collected: + + ratio(0) * ratio(core_config.collection_rewards_proportion()) + // 2 finality signed: + + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_1.deref()) + } * ratio(era_1_reward_per_round); + let validator_2_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // All finality signatures collected: + + ratio(core_config.collection_rewards_proportion()) + // 2 finality signed: + + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_2.deref()) + } * ratio(era_1_reward_per_round); + let validator_3_expected_payout = { + // 1 block produced: + ratio(1) * ratio(core_config.production_rewards_proportion()) + // All finality signatures collected: + + ratio(core_config.collection_rewards_proportion()) + // 2 finality signed: + + ratio(2) * ratio(core_config.contribution_rewards_proportion()) * constructor.weight(1, VALIDATOR_3.deref()) + } * ratio(era_1_reward_per_round); + + assert_eq!( + rewards_for_era_1, + map! { + VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()], + VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()], + VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()], + } + ); +} + +#[test] +fn mixed_signatures_pattern() { + let rng = &mut TestRng::new(); + let percent_signatures = 30; + let percent_finders = 27; + let blocks_per_era = 4; + let signature_rewards_max_delay = 4; + let core_config = core_config( + rng, + percent_signatures, + percent_finders, + blocks_per_era, + signature_rewards_max_delay, + ); + + let production = ratio(core_config.production_rewards_proportion()); + let collection = ratio(core_config.collection_rewards_proportion()); + let contribution = ratio(core_config.contribution_rewards_proportion()); + + // Eras info: + + let era_1_reward_per_round = 300; + let era_2_reward_per_round = 400; + + let weights_1 = map! { + VALIDATOR_1.clone() => U512::from(100_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(30_u64), + }; + + let weights_2 = map! { + VALIDATOR_1.clone() => U512::from(93_u64), + VALIDATOR_2.clone() => U512::from(190_u64), + VALIDATOR_3.clone() => U512::from(69_u64), + VALIDATOR_4.clone() => U512::from(212_u64), + }; + + // Complex scenario: + // - not all validators sign + // - in era 2, signatures are reported from era 1 + let constructor = RewardsInfoConstructor::new( + &core_config, + map! { + EraId::new(0) => (weights_1.clone(), era_1_reward_per_round, vec![ + (0, VALIDATOR_1.deref(), vec![]) // No reward for genesis + ]), + EraId::new(1) => (weights_1, era_1_reward_per_round, vec![ + (1, VALIDATOR_2.deref(), vec![set!{}]), // Nobody signed the genesis finality + (2, VALIDATOR_2.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_3.clone()}, set!{}]), + (3, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{VALIDATOR_2.clone()}, set!{}]), // the validator 2 signature is fetched later + (4, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone()}, set!{}, set!{}]), // validator 3 doesn't sign the block 3 + ]), + EraId::new(2) => (weights_2, era_2_reward_per_round, vec![ + (5, VALIDATOR_2.deref(), vec![set!{}, set!{}, set!{}, set!{}]), + (6, VALIDATOR_3.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone(),VALIDATOR_4.clone()}, set!{VALIDATOR_1.clone()}, set!{}, set!{}]), + (7, VALIDATOR_4.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone(),VALIDATOR_3.clone()}, set!{VALIDATOR_3.clone(),VALIDATOR_4.clone()}, set!{VALIDATOR_3.clone()}, set!{}]), + (8, VALIDATOR_1.deref(), vec![set!{VALIDATOR_1.clone(),VALIDATOR_2.clone()}, set!{}, set!{}, set!{}]), + ]), + }, + ); + + // Era 1 payouts: + { + let era = EraId::new(1); + let rewards_for_era_1 = + rewards_for_era(constructor.for_era(rng, era), era, &core_config).unwrap(); + + let validator_1_expected_payout = { + // 2 blocks produced: + ratio(2) * production + // 6 finality signatures collected: + + collection * ( + ratio(2) * constructor.weight(era, VALIDATOR_1.deref()) + + ratio(3) * constructor.weight(era, VALIDATOR_2.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_3.deref()) + ) + // 3 finality signed: + + ratio(3) * contribution * constructor.weight(era, VALIDATOR_1.deref()) + } * ratio(era_1_reward_per_round); + let validator_2_expected_payout = { + // 2 blocks produced: + ratio(2) * production + // 2 finality signatures collected: + + collection * ( + ratio(1) * constructor.weight(era, VALIDATOR_1.deref()) + + ratio(0) * constructor.weight(era, VALIDATOR_2.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_3.deref()) + ) + // 3 finality signed: + + ratio(3) * contribution * constructor.weight(era, VALIDATOR_2.deref()) + } * ratio(era_1_reward_per_round); + let validator_3_expected_payout = { + // No block produced: + ratio(0) * production + // No finality signatures collected: + + ratio(0) * collection + // 2 finality signed: + + ratio(2) * contribution * constructor.weight(era, VALIDATOR_3.deref()) + } * ratio(era_1_reward_per_round); + + assert_eq!( + rewards_for_era_1, + map! { + VALIDATOR_1.clone() => vec![validator_1_expected_payout.to_integer()], + VALIDATOR_2.clone() => vec![validator_2_expected_payout.to_integer()], + VALIDATOR_3.clone() => vec![validator_3_expected_payout.to_integer()], + } + ); + } + + // Era 2 payouts: + { + let era = EraId::new(2); + let rewards_for_era_2 = + rewards_for_era(constructor.for_era(rng, era), era, &core_config).unwrap(); + + let validator_1_expected_payout = vec![ + // 1 block produced: + (production * ratio(1) * ratio(era_2_reward_per_round) + // 2 finality signatures collected: + + collection * { + ratio(1) * constructor.weight(era, VALIDATOR_1.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_2.deref()) + } * ratio(era_2_reward_per_round) + // Finality signed: + + contribution * { + // 3 in current era: + ratio(3) * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_1.deref()) + }).to_integer(), + // 1 contributed in previous era: + (contribution * { + ratio(1) * ratio(era_1_reward_per_round) * constructor.weight(1, VALIDATOR_1.deref()) + }).to_integer() + ]; + + let validator_2_expected_payout = vec![ + // 1 block produced: + (ratio(1) * production * ratio(era_2_reward_per_round) + // No finality signature collected: + // 3 finality signed: + + ratio(3) * contribution * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_2.deref())) + .to_integer() + ]; + + let validator_3_expected_payout = vec![ + // 1 block produced: + (ratio(1) * production * ratio(era_2_reward_per_round) + // 2 finality signatures collected: + + collection * { + ( + ratio(1) * constructor.weight(era, VALIDATOR_1.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_2.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_3.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_4.deref()) + ) * ratio(era_2_reward_per_round) + // collected one signature from era 1 + + ( + ratio(1) * constructor.weight(1, VALIDATOR_1.deref()) + ) * ratio(era_1_reward_per_round) + } + // Finality signed: + + contribution * { + // 3 in current era: + ratio(3) * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_3.deref()) + }).to_integer(), + // for era 1 + (contribution * { + // 1 in previous era: + ratio(1) * ratio(era_1_reward_per_round) * constructor.weight(1, VALIDATOR_3.deref()) + }).to_integer() + ]; + + let validator_4_expected_payout = vec![ + // 1 block produced: + (ratio(1) * production * ratio(era_2_reward_per_round) + // 6 finality signatures collected: + + collection * { + ( + ratio(1) * constructor.weight(era, VALIDATOR_1.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_2.deref()) + + ratio(2) * constructor.weight(era, VALIDATOR_3.deref()) + + ratio(1) * constructor.weight(era, VALIDATOR_4.deref()) + ) * ratio(era_2_reward_per_round) + // collected one signature from era 1 + + ( + ratio(1) * constructor.weight(1, VALIDATOR_3.deref()) + ) * ratio(era_1_reward_per_round) + } + // 3 finality signed: + + ratio(2) * contribution * ratio(era_2_reward_per_round) * constructor.weight(era, VALIDATOR_4.deref())) + .to_integer(), + ]; + + assert_eq!( + rewards_for_era_2, + map! { + VALIDATOR_1.clone() => validator_1_expected_payout, + VALIDATOR_2.clone() => validator_2_expected_payout, + VALIDATOR_3.clone() => validator_3_expected_payout, + VALIDATOR_4.clone() => validator_4_expected_payout, + } + ); + } +} + +mod constructors { + use casper_types::SingleBlockRewardedSignatures; + + use super::*; + use std::collections::BTreeSet; + + type Weights = BTreeMap; + type RewardPerRound = u64; + type BlockInfo<'a> = (u64, &'a PublicKey, Vec>); + + pub(super) struct RewardsInfoConstructor<'a> { + signature_rewards_max_delay: u64, + blocks: BTreeMap>)>, + /// A cache with the validators for each era + validators: BTreeMap>, + } + + impl<'a> RewardsInfoConstructor<'a> { + pub(super) fn new( + core_config: &'a CoreConfig, + blocks: BTreeMap>)>, + ) -> Self { + let validators = blocks + .iter() + .map(|(era_id, (weights, _, _))| (*era_id, weights.keys().cloned().collect())) + .collect(); + + Self { + signature_rewards_max_delay: core_config.signature_rewards_max_delay, + blocks, + validators, + } + } + + /// Returns the relative weight for a validator. + pub(super) fn weight( + &self, + era_id: impl Into, + validator: &PublicKey, + ) -> Ratio { + let weights = &self.blocks[&era_id.into()].0; + let total = weights.values().copied().sum(); + let weight = weights[validator]; + + Ratio::new(weight, total) + } + + pub(super) fn for_era(&self, rng: &mut TestRng, era_id: impl Into) -> RewardsInfo { + let era_id = era_id.into(); + let number_blocks = { + let era_size = self.blocks[&era_id].2.len(); + self.signature_rewards_max_delay as usize + era_size + }; + + let cited_blocks: Vec<_> = self + .blocks + .range(EraId::new(0)..=era_id) + .rev() + .flat_map(|(era_id, (_, _, blocks))| { + let switch_height = blocks.iter().map(|b| b.0).max().unwrap(); + // Blocks are being read in reverse, era by era, so that we can build only the + // latest needed: + blocks.clone().into_iter().rev().map( + move |(height, proposer, rewarded_signatures)| { + let rewarded_signatures = RewardedSignatures::new( + rewarded_signatures.into_iter().enumerate().map( + |(height_offset, signing_validators)| { + let height = + height.saturating_sub(height_offset as u64 + 1); + let era_id = self + .blocks + .iter() + .find_map(|(era_id, (_, _, blocks))| { + blocks + .iter() + .find(|(h, _, _)| h == &height) + .map(|_| era_id) + }) + .unwrap_or_else(|| { + panic!("height {} must be provided", height) + }); + let era_validators = self + .validators + .get(era_id) + .expect("the info for the era to be provided"); + SingleBlockRewardedSignatures::from_validator_set( + &signing_validators, + era_validators, + ) + }, + ), + ); + TestBlockBuilder::new() + .height(height) + .era(*era_id) + .proposer(proposer.clone()) + .rewarded_signatures(rewarded_signatures) + .switch_block(height == switch_height) + }, + ) + }) + .map(move |block_builder| CitedBlock::from(Block::from(block_builder.build(rng)))) + .take(number_blocks) + .collect(); + let cited_blocks: Vec<_> = cited_blocks.into_iter().rev().collect(); + + let first_block = cited_blocks.first().expect("at least one cited block"); + assert!( + cited_blocks.len() >= number_blocks || first_block.is_genesis, + "Not enough blocks provided" + ); + + let eras_info = self + .blocks + .range(first_block.era_id..=era_id) + .map(|(era_id, (weights, reward_per_round, _))| { + ( + *era_id, + EraInfo::new_testing(weights.clone(), ratio(*reward_per_round)), + ) + }) + .collect(); + + RewardsInfo::new_testing(eras_info, cited_blocks) + } + } +} + +mod convert { + use super::*; + use std::convert::TryFrom; + + pub(super) fn ratio(n: impl IntoRatioU512) -> Ratio { + n.into() + } + + pub(super) trait IntoRatioU512 { + fn into(self) -> Ratio; + } + + impl IntoRatioU512 for u64 { + fn into(self) -> Ratio { + Ratio::new(U512::from(self), U512::one()) + } + } + + impl IntoRatioU512 for usize { + fn into(self) -> Ratio { + Ratio::new(U512::from(self), U512::one()) + } + } + + impl IntoRatioU512 for U512 { + fn into(self) -> Ratio { + Ratio::new(self, U512::one()) + } + } + + impl IntoRatioU512 for i32 { + fn into(self) -> Ratio { + Ratio::new(U512::from(u32::try_from(self).unwrap()), U512::one()) + } + } + + impl IntoRatioU512 for Ratio { + fn into(self) -> Ratio { + Ratio::new(U512::from(*self.numer()), U512::from(*self.denom())) + } + } + + impl IntoRatioU512 for Ratio { + fn into(self) -> Ratio { + self + } + } +} diff --git a/node/src/components/contract_runtime/tests.rs b/node/src/components/contract_runtime/tests.rs new file mode 100644 index 0000000000..490f14ef56 --- /dev/null +++ b/node/src/components/contract_runtime/tests.rs @@ -0,0 +1,1158 @@ +use std::{collections::BTreeMap, iter, path::PathBuf, sync::Arc, time::Duration}; + +use casper_storage::data_access_layer::{QueryRequest, QueryResult}; +use derive_more::{Display, From}; +use fs_extra::dir; +use prometheus::Registry; +use rand::RngCore; +use serde::Serialize; +use tempfile::TempDir; + +use casper_types::{ + bytesrepr::Bytes, contracts::ProtocolVersionMajor, runtime_args, BlockHash, Chainspec, + ChainspecRawBytes, Deploy, Digest, EntityVersion, EraId, ExecutableDeployItem, PackageHash, + PricingMode, PublicKey, RuntimeArgs, SecretKey, TimeDiff, Timestamp, Transaction, + TransactionConfig, TransactionRuntimeParams, MINT_LANE_ID, U512, +}; + +use super::*; +use crate::{ + components::{ + network::Identity as NetworkIdentity, + storage::{self, Storage}, + }, + effect::announcements::{ContractRuntimeAnnouncement, ControlAnnouncement, FatalAnnouncement}, + protocol::Message, + reactor::{self, EventQueueHandle, ReactorEvent, Runner}, + testing::{self, network::NetworkedReactor, ConditionCheckReactor}, + types::{ + transaction::{ + calculate_transaction_lane_for_transaction, + transaction_v1_builder::TransactionV1Builder, + }, + BlockPayload, ExecutableBlock, FinalizedBlock, InternalEraReport, MetaBlockState, + }, + utils::{Loadable, WithDir, RESOURCES_PATH}, + NodeRng, +}; + +const FIXTURES_DIRECTORY: &str = "../execution_engine_testing/tests/fixtures"; +fn path_to_lmdb_fixtures() -> PathBuf { + Path::new(env!("CARGO_MANIFEST_DIR")).join(FIXTURES_DIRECTORY) +} + +const RECENT_ERA_COUNT: u64 = 5; +const MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400); +const TEST_TIMEOUT: Duration = Duration::from_secs(10); + +/// Top-level event for the reactor. +#[derive(Debug, From, Serialize, Display)] +#[must_use] +enum Event { + #[from] + ContractRuntime(super::Event), + #[from] + ContractRuntimeRequest(ContractRuntimeRequest), + #[from] + ContractRuntimeAnnouncement(ContractRuntimeAnnouncement), + #[from] + Storage(storage::Event), + #[from] + StorageRequest(StorageRequest), + #[from] + MetaBlockAnnouncement(MetaBlockAnnouncement), +} + +impl ReactorEvent for Event { + fn is_control(&self) -> bool { + false + } + + fn try_into_control(self) -> Option { + None + } +} + +trait Unhandled {} + +impl From for Event { + fn from(_: T) -> Self { + unimplemented!("not handled in contract runtime tests") + } +} + +impl Unhandled for ControlAnnouncement {} + +impl Unhandled for FatalAnnouncement {} + +impl Unhandled for NetworkRequest {} + +impl Unhandled for UnexecutedBlockAnnouncement {} + +struct TestConfig { + config: Config, + fixture_name: Option, +} + +struct Reactor { + storage: Storage, + contract_runtime: ContractRuntime, + _storage_tempdir: TempDir, +} + +impl reactor::Reactor for Reactor { + type Event = Event; + type Config = TestConfig; + type Error = ConfigError; + + fn new( + config: Self::Config, + chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, + registry: &Registry, + _event_queue: EventQueueHandle, + _rng: &mut NodeRng, + ) -> Result<(Self, Effects), Self::Error> { + let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1); + if let Some(fixture_name) = config.fixture_name { + let source = path_to_lmdb_fixtures().join(&fixture_name); + fs_extra::copy_items(&[source], &storage_tempdir, &dir::CopyOptions::default()) + .expect("should copy global state fixture"); + } + + let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config); + let storage = Storage::new( + &storage_withdir, + None, + chainspec.protocol_version(), + EraId::default(), + "test", + MAX_TTL.into(), + RECENT_ERA_COUNT, + Some(registry), + false, + TransactionConfig::default(), + ) + .unwrap(); + + let contract_runtime = + ContractRuntime::new(storage.root_path(), &config.config, chainspec, registry)?; + + let reactor = Reactor { + storage, + contract_runtime, + _storage_tempdir: storage_tempdir, + }; + + Ok((reactor, Effects::new())) + } + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Event, + ) -> Effects { + trace!(?event); + match event { + Event::ContractRuntime(event) => reactor::wrap_effects( + Event::ContractRuntime, + self.contract_runtime + .handle_event(effect_builder, rng, event), + ), + Event::ContractRuntimeRequest(req) => reactor::wrap_effects( + Event::ContractRuntime, + self.contract_runtime + .handle_event(effect_builder, rng, req.into()), + ), + Event::ContractRuntimeAnnouncement(announcement) => { + info!("{announcement}"); + Effects::new() + } + Event::Storage(event) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, event), + ), + Event::StorageRequest(req) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, req.into()), + ), + Event::MetaBlockAnnouncement(announcement) => { + info!("{announcement}"); + Effects::new() + } + } + } +} + +impl NetworkedReactor for Reactor {} + +/// Schedule the given block and its deploys to be executed by the contract runtime. +fn execute_block( + executable_block: ExecutableBlock, +) -> impl FnOnce(EffectBuilder) -> Effects { + |effect_builder| { + effect_builder + .enqueue_block_for_execution(executable_block, MetaBlockState::new()) + .ignore() + } +} + +/// A function to be used a condition check, indicating that execution has started. +fn execution_started(event: &Event) -> bool { + matches!( + event, + Event::ContractRuntimeRequest(ContractRuntimeRequest::EnqueueBlockForExecution { .. }) + ) +} + +/// A function to be used a condition check, indicating that execution has completed. +fn execution_completed(event: &Event) -> bool { + matches!(event, Event::MetaBlockAnnouncement(_)) +} + +#[tokio::test] +async fn should_not_set_shared_pre_state_to_lower_block_height() { + testing::init_logging(); + + let config = Config { + max_global_state_size: Some(100 * 1024 * 1024), + ..Config::default() + }; + let config = TestConfig { + config, + fixture_name: None, + }; + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let chainspec = Arc::new(chainspec); + let chainspec_raw_bytes = Arc::new(chainspec_raw_bytes); + + let mut rng = crate::new_rng(); + let rng = &mut rng; + + let mut runner: Runner> = Runner::new( + config, + Arc::clone(&chainspec), + Arc::clone(&chainspec_raw_bytes), + rng, + ) + .await + .unwrap(); + + // Commit genesis to set up initial global state. + let post_commit_genesis_state_hash = runner + .reactor() + .inner() + .contract_runtime + .commit_genesis(chainspec.as_ref(), chainspec_raw_bytes.as_ref()) + .as_legacy() + .unwrap() + .0; + + let initial_pre_state = ExecutionPreState::new( + 0, + post_commit_genesis_state_hash, + BlockHash::default(), + Digest::default(), + ); + runner + .reactor_mut() + .inner_mut() + .contract_runtime + .set_initial_state(initial_pre_state); + + // Create the genesis immediate switch block. + let block_0 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + BlockPayload::default(), + Some(InternalEraReport::default()), + Timestamp::now(), + EraId::new(0), + 0, + PublicKey::System, + ), + vec![], + ); + + runner + .process_injected_effects(execute_block(block_0)) + .await; + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + // Create the first block of era 1. + let block_1 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + BlockPayload::default(), + None, + Timestamp::now(), + EraId::new(1), + 1, + PublicKey::System, + ), + vec![], + ); + runner + .process_injected_effects(execute_block(block_1)) + .await; + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + // Check that the next block height expected by the contract runtime is 2. + assert_eq!( + runner + .reactor() + .inner() + .contract_runtime + .execution_pre_state + .lock() + .unwrap() + .next_block_height(), + 2 + ); + + // Prepare to create a block which will take a while to execute, i.e. loaded with many deploys + // transferring from node-1's main account to new random public keys. + let node_1_secret_key = SecretKey::from_file( + RESOURCES_PATH + .join("local") + .join("secret_keys") + .join("node-1.pem"), + ) + .unwrap(); + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(100); + let gas_price = 1; + let chain_name = chainspec.network_config.name.clone(); + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(chainspec.system_costs_config.mint_costs().transfer), + }, + }; + + let txns: Vec = iter::repeat_with(|| { + let target_public_key = PublicKey::random(rng); + let session = ExecutableDeployItem::Transfer { + args: runtime_args! { + "amount" => U512::from(chainspec.transaction_config.native_transfer_minimum_motes), + "target" => target_public_key, + "id" => Some(9_u64), + }, + }; + Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment.clone(), + session, + &node_1_secret_key, + None, + )) + }) + .take(200) + .collect(); + + let mut txn_set = BTreeMap::new(); + let val = txns + .iter() + .map(|transaction| { + let hash = transaction.hash(); + let approvals = transaction.approvals(); + (hash, approvals) + }) + .collect(); + txn_set.insert(MINT_LANE_ID, val); + let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8); + let block_2 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + block_payload, + None, + Timestamp::now(), + EraId::new(1), + 2, + PublicKey::System, + ), + txns, + ); + runner + .process_injected_effects(execute_block(block_2)) + .await; + + // Crank until execution is scheduled. + runner + .crank_until(rng, execution_started, TEST_TIMEOUT) + .await; + + // While executing this block, set the execution pre-state to a later block (as if we had sync + // leaped and skipped ahead). + let next_block_height = 9; + tokio::time::sleep(Duration::from_millis(50)).await; + runner + .reactor_mut() + .inner_mut() + .contract_runtime + .set_initial_state(ExecutionPreState::new( + next_block_height, + Digest::hash(rng.next_u64().to_le_bytes()), + BlockHash::random(rng), + Digest::hash(rng.next_u64().to_le_bytes()), + )); + + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + let actual = runner + .reactor() + .inner() + .contract_runtime + .execution_pre_state + .lock() + .unwrap() + .next_block_height(); + + let expected = next_block_height; + + // Check that the next block height expected by the contract runtime is `next_block_height` and + // not 3. + assert_eq!(actual, expected); +} + +fn valid_wasm_txn( + initiator: &SecretKey, + chain_name: &str, + pricing_mode: PricingMode, + name: &str, + runtime_args: RuntimeArgs, +) -> Transaction { + let contract_file = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join(format!("{name}.wasm")); + let module_bytes = Bytes::from(std::fs::read(contract_file).expect("cannot read module bytes")); + let mut txn = Transaction::from( + TransactionV1Builder::new_session_with_runtime_args( + true, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + runtime_args, + ) + .with_chain_name(chain_name) + .with_pricing_mode(pricing_mode) + .with_initiator_addr(PublicKey::from(initiator)) + .build() + .unwrap(), + ); + txn.sign(initiator); + txn +} + +#[allow(clippy::too_many_arguments)] +fn valid_versioned_call_txn( + initiator: &SecretKey, + chain_name: &str, + pricing_mode: PricingMode, + entry_point: &str, + package_hash: PackageHash, + runtime_args: RuntimeArgs, + version: Option, + protocol_version_major: Option, +) -> Transaction { + let mut txn = Transaction::from( + TransactionV1Builder::new_targeting_package_with_runtime_args( + package_hash, + version, + protocol_version_major, + entry_point, + TransactionRuntimeParams::VmCasperV1, + runtime_args, + ) + .with_chain_name(chain_name) + .with_pricing_mode(pricing_mode) + .with_initiator_addr(PublicKey::from(initiator)) + .build() + .unwrap(), + ); + txn.sign(initiator); + txn +} + +#[tokio::test] +async fn should_correctly_manage_entity_version_calls() { + testing::init_logging(); + + let config = Config { + max_global_state_size: Some(100 * 1024 * 1024), + ..Config::default() + }; + let config = TestConfig { + config, + fixture_name: None, + }; + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let chainspec = Arc::new(chainspec); + let chainspec_raw_bytes = Arc::new(chainspec_raw_bytes); + + let mut rng = crate::new_rng(); + let rng = &mut rng; + + let mut runner: Runner> = Runner::new( + config, + Arc::clone(&chainspec), + Arc::clone(&chainspec_raw_bytes), + rng, + ) + .await + .unwrap(); + + // Commit genesis to set up initial global state. + let post_commit_genesis_state_hash = runner + .reactor() + .inner() + .contract_runtime + .commit_genesis(chainspec.as_ref(), chainspec_raw_bytes.as_ref()) + .as_legacy() + .unwrap() + .0; + + let initial_pre_state = ExecutionPreState::new( + 0, + post_commit_genesis_state_hash, + BlockHash::default(), + Digest::default(), + ); + runner + .reactor_mut() + .inner_mut() + .contract_runtime + .set_initial_state(initial_pre_state); + + // Create the genesis immediate switch block. + let block_0 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + BlockPayload::default(), + Some(InternalEraReport::default()), + Timestamp::now(), + EraId::new(0), + 0, + PublicKey::System, + ), + vec![], + ); + + runner + .process_injected_effects(execute_block(block_0)) + .await; + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + // Create the first block of era 1. + let block_1 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + BlockPayload::default(), + None, + Timestamp::now(), + EraId::new(1), + 1, + PublicKey::System, + ), + vec![], + ); + runner + .process_injected_effects(execute_block(block_1)) + .await; + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + // Prepare to create a block which will take a while to execute, i.e. loaded with many deploys + // transferring from node-1's main account to new random public keys. + let node_1_secret_key = SecretKey::from_file( + RESOURCES_PATH + .join("local") + .join("secret_keys") + .join("node-1.pem"), + ) + .unwrap(); + + let node_1_public_key = PublicKey::from(&node_1_secret_key); + let chain_name = chainspec.network_config.name.clone(); + let installer_transaction = valid_wasm_txn( + &node_1_secret_key, + &chain_name, + PricingMode::PaymentLimited { + payment_amount: 250_000_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + "purse_holder_stored", + runtime_args! { + "is_locked" => false + }, + ); + + let lane_id = + calculate_transaction_lane_for_transaction(&installer_transaction, &chainspec).unwrap(); + + let mut txn_set = BTreeMap::new(); + let txn_hash = installer_transaction.hash(); + let approvals = installer_transaction.approvals(); + + txn_set.insert(lane_id, vec![(txn_hash, approvals)]); + + let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8); + let block_2 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + block_payload, + None, + Timestamp::now(), + EraId::new(1), + 2, + PublicKey::System, + ), + vec![installer_transaction], + ); + runner + .process_injected_effects(execute_block(block_2)) + .await; + + // Crank until execution is scheduled. + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + let pre_state_hash = { + let prestate = runner + .reactor() + .inner() + .contract_runtime + .execution_pre_state + .lock() + .expect("must get lock"); + prestate.pre_state_root_hash() + }; + + let key = Key::Account(node_1_public_key.to_account_hash()); + let query_request = QueryRequest::new(pre_state_hash, key, vec![]); + + let package_key = if let QueryResult::Success { value, .. } = runner + .reactor() + .inner() + .contract_runtime + .data_access_layer + .query(query_request) + { + *value + .as_account() + .expect("must get account") + .named_keys() + .get("purse_holder") + .expect("must get package key") + } else { + panic!("query failed"); + }; + + let package_hash = package_key + .into_hash_addr() + .map(PackageHash::new) + .expect("must get package hash"); + + let upgrader_transaction = valid_wasm_txn( + &node_1_secret_key, + &chain_name, + PricingMode::PaymentLimited { + payment_amount: 250_000_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + "purse_holder_stored_upgrader", + runtime_args! { + "contract_package" => package_hash + }, + ); + + let lane_id = + calculate_transaction_lane_for_transaction(&upgrader_transaction, &chainspec).unwrap(); + + let mut txn_set = BTreeMap::new(); + let txn_hash = upgrader_transaction.hash(); + let approvals = upgrader_transaction.approvals(); + + txn_set.insert(lane_id, vec![(txn_hash, approvals)]); + + let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8); + let block_2 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + block_payload, + None, + Timestamp::now(), + EraId::new(1), + 3, + PublicKey::System, + ), + vec![upgrader_transaction], + ); + runner + .process_injected_effects(execute_block(block_2)) + .await; + + // Crank until execution is scheduled. + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + let pre_state_hash = { + let prestate = runner + .reactor() + .inner() + .contract_runtime + .execution_pre_state + .lock() + .expect("must get lock"); + prestate.pre_state_root_hash() + }; + + let query_request = QueryRequest::new(pre_state_hash, package_key, vec![]); + if let QueryResult::Success { value, .. } = runner + .reactor() + .inner() + .contract_runtime + .data_access_layer + .query(query_request) + { + let versions = value + .as_contract_package() + .expect("must get account") + .versions(); + + assert_eq!(2, versions.len()) + } else { + panic!("query failed"); + }; + + let call_by_entity_version_1 = valid_versioned_call_txn( + &node_1_secret_key, + &chain_name, + PricingMode::PaymentLimited { + payment_amount: 250_000_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + "add_named_purse", + package_hash, + runtime_args! { + "purse_name" => "purse" + }, + Some(1), + None, + ); + + let call_by_major_version_and_entity_version = valid_versioned_call_txn( + &node_1_secret_key, + &chain_name, + PricingMode::PaymentLimited { + payment_amount: 250_000_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + "add_named_purse", + package_hash, + runtime_args! { + "purse_name" => "purse" + }, + Some(1), + Some(2), + ); + + let call_by_major_version = valid_versioned_call_txn( + &node_1_secret_key, + &chain_name, + PricingMode::PaymentLimited { + payment_amount: 250_000_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + "add", + package_hash, + runtime_args! { + "purse_name" => "purse" + }, + None, + Some(2), + ); + + let lane_id = + calculate_transaction_lane_for_transaction(&call_by_entity_version_1, &chainspec).unwrap(); + + let txns = vec![ + call_by_entity_version_1, + call_by_major_version_and_entity_version, + call_by_major_version, + ]; + + let mut txn_set = BTreeMap::new(); + let val = txns + .iter() + .map(|txn| { + let hash = txn.hash(); + let approvals = txn.approvals(); + (hash, approvals) + }) + .collect(); + + txn_set.insert(lane_id, val); + + let block_payload = BlockPayload::new(txn_set, vec![], Default::default(), true, 1u8); + let block_3 = ExecutableBlock::from_finalized_block_and_transactions( + FinalizedBlock::new( + block_payload, + None, + Timestamp::now(), + EraId::new(1), + 4, + PublicKey::System, + ), + txns.clone(), + ); + runner + .process_injected_effects(execute_block(block_3)) + .await; + + // Crank until execution is scheduled. + runner + .crank_until(rng, execution_completed, TEST_TIMEOUT) + .await; + + for txn in txns.iter() { + let hash = txn.hash(); + let results = runner + .reactor() + .inner() + .storage + .read_execution_result(&hash) + .unwrap(); + + assert!(results.error_message().is_none()) + } +} + +#[cfg(test)] +mod test_mod { + use std::sync::Arc; + + use prometheus::Registry; + use rand::Rng; + use tempfile::tempdir; + + use casper_storage::{ + data_access_layer::{EntryPointExistsRequest, EntryPointExistsResult}, + global_state::{ + state::{CommitProvider, StateProvider}, + trie::Trie, + }, + }; + use casper_types::{ + account::AccountHash, + bytesrepr, + contracts::{ContractPackageHash, EntryPoint, EntryPoints}, + execution::{TransformKindV2, TransformV2}, + global_state::Pointer, + testing::TestRng, + ActivationPoint, CLType, CLValue, Chainspec, ChunkWithProof, Contract, ContractWasmHash, + CoreConfig, Digest, EntityAddr, EntryPointAccess, EntryPointAddr, EntryPointPayment, + EntryPointType, EntryPointValue, EraId, HashAddr, Key, NamedKeys, ProtocolConfig, + ProtocolVersion, StoredValue, TimeDiff, DEFAULT_FEE_HANDLING, DEFAULT_GAS_HOLD_INTERVAL, + DEFAULT_REFUND_HANDLING, + }; + + use super::{Config as ContractRuntimeConfig, ContractRuntime}; + use crate::{ + components::fetcher::FetchResponse, + contract_runtime::ContractRuntimeError, + types::{ChunkingError, TrieOrChunk, TrieOrChunkId, ValueOrChunk}, + }; + + #[derive(Debug, Clone)] + struct TestPair(Key, StoredValue); + + fn create_pre_condor_contract( + rng: &mut TestRng, + contract_hash: Key, + entry_point_name: &str, + protocol_version: ProtocolVersion, + ) -> Vec { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::new( + entry_point_name, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Caller, + ); + entry_points.add_entry_point(entry_point); + + let contract_package_hash = ContractPackageHash::new(rng.gen()); + let contract_wasm_hash = ContractWasmHash::new(rng.gen()); + let named_keys = NamedKeys::new(); + let contract = Contract::new( + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + ); + vec![TestPair(contract_hash, StoredValue::Contract(contract))] + } + + fn create_entry_point(entity_addr: EntityAddr, entry_point_name: &str) -> Vec { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::new( + entry_point_name, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Caller, + ); + entry_points.add_entry_point(entry_point); + let key = Key::EntryPoint( + EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point_name).unwrap(), + ); + let entry_point = casper_types::EntityEntryPoint::new( + entry_point_name, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Caller, + EntryPointPayment::Caller, + ); + let entry_point_value = EntryPointValue::V1CasperVm(entry_point); + vec![TestPair(key, StoredValue::EntryPoint(entry_point_value))] + } + + // Creates the test pairs that contain data of size + // greater than the chunk limit. + fn create_test_pairs_with_large_data() -> Vec { + let val = CLValue::from_t( + String::from_utf8(vec![b'a'; ChunkWithProof::CHUNK_SIZE_BYTES * 2]).unwrap(), + ) + .unwrap(); + vec![ + TestPair( + Key::Account(AccountHash::new([1_u8; 32])), + StoredValue::CLValue(val.clone()), + ), + TestPair( + Key::Account(AccountHash::new([2_u8; 32])), + StoredValue::CLValue(val), + ), + ] + } + + fn extract_next_hash_from_trie(trie_or_chunk: TrieOrChunk) -> Digest { + let next_hash = if let TrieOrChunk::Value(trie_bytes) = trie_or_chunk { + if let Trie::Node { pointer_block } = bytesrepr::deserialize::>( + trie_bytes.into_inner().into_inner().into(), + ) + .expect("Could not parse trie bytes") + { + if pointer_block.child_count() == 0 { + panic!("expected children"); + } + let (_, ptr) = pointer_block.as_indexed_pointers().next().unwrap(); + match ptr { + Pointer::LeafPointer(ptr) | Pointer::NodePointer(ptr) => ptr, + } + } else { + panic!("expected `Node`"); + } + } else { + panic!("expected `Trie`"); + }; + next_hash + } + + // Creates a test ContractRuntime and feeds the underlying GlobalState with `test_pair`. + // Returns [`ContractRuntime`] instance and the new Merkle root after applying the `test_pair`. + fn create_test_state(rng: &mut TestRng, test_pair: Vec) -> (ContractRuntime, Digest) { + let temp_dir = tempdir().unwrap(); + let chainspec = Chainspec { + protocol_config: ProtocolConfig { + activation_point: ActivationPoint::EraId(EraId::from(2)), + ..ProtocolConfig::random(rng) + }, + core_config: CoreConfig { + max_associated_keys: 10, + max_runtime_call_stack_height: 10, + minimum_delegation_amount: 10, + prune_batch_size: 5, + strict_argument_checking: true, + vesting_schedule_period: TimeDiff::from_millis(1), + max_delegators_per_validator: 0, + allow_auction_bids: true, + allow_unrestricted_transfers: true, + fee_handling: DEFAULT_FEE_HANDLING, + refund_handling: DEFAULT_REFUND_HANDLING, + gas_hold_interval: DEFAULT_GAS_HOLD_INTERVAL, + ..CoreConfig::random(rng) + }, + wasm_config: Default::default(), + system_costs_config: Default::default(), + ..Chainspec::random(rng) + }; + let contract_runtime = ContractRuntime::new( + temp_dir.path(), + &ContractRuntimeConfig::default(), + Arc::new(chainspec), + &Registry::default(), + ) + .unwrap(); + let empty_state_root = contract_runtime.data_access_layer().empty_root(); + let mut effects = casper_types::execution::Effects::new(); + for TestPair(key, value) in test_pair { + effects.push(TransformV2::new(key, TransformKindV2::Write(value))); + } + let post_state_hash = &contract_runtime + .data_access_layer() + .as_ref() + .commit_effects(empty_state_root, effects) + .expect("applying effects to succeed"); + (contract_runtime, *post_state_hash) + } + + fn read_trie(contract_runtime: &ContractRuntime, id: TrieOrChunkId) -> TrieOrChunk { + let serialized_id = bincode::serialize(&id).unwrap(); + match contract_runtime + .fetch_trie_local(&serialized_id) + .expect("expected a successful read") + { + FetchResponse::Fetched(found) => found, + FetchResponse::NotProvided(_) | FetchResponse::NotFound(_) => { + panic!("expected to find the trie") + } + } + } + + #[test] + fn fetching_enty_points_falls_back_to_contract() { + let rng = &mut TestRng::new(); + let hash_addr: HashAddr = rng.gen(); + let contract_hash = Key::Hash(hash_addr); + let entry_point_name = "ep1"; + let initial_state = create_pre_condor_contract( + rng, + contract_hash, + entry_point_name, + ProtocolVersion::V2_0_0, + ); + let (contract_runtime, state_hash) = create_test_state(rng, initial_state); + let request = + EntryPointExistsRequest::new(state_hash, entry_point_name.to_string(), hash_addr); + let res = contract_runtime + .data_access_layer() + .entry_point_exists(request); + assert!(matches!(res, EntryPointExistsResult::Success)); + } + + #[test] + fn fetching_enty_points_fetches_entry_point_from_v2() { + let rng = &mut TestRng::new(); + let hash_addr: HashAddr = rng.gen(); + let entity_addr = EntityAddr::new_smart_contract(hash_addr); + let entry_point_name = "ep1"; + let initial_state = create_entry_point(entity_addr, entry_point_name); + let (contract_runtime, state_hash) = create_test_state(rng, initial_state); + let request = + EntryPointExistsRequest::new(state_hash, entry_point_name.to_string(), hash_addr); + let res = contract_runtime + .data_access_layer() + .entry_point_exists(request); + assert!(matches!(res, EntryPointExistsResult::Success)); + } + + #[test] + fn fetching_enty_points_fetches_fail_when_asking_for_non_existing() { + let rng = &mut TestRng::new(); + let hash_addr: HashAddr = rng.gen(); + let entity_addr = EntityAddr::new_smart_contract(hash_addr); + let initial_state = create_entry_point(entity_addr, "ep1"); + let (contract_runtime, state_hash) = create_test_state(rng, initial_state); + let request = EntryPointExistsRequest::new(state_hash, "ep2".to_string(), hash_addr); + let res = contract_runtime + .data_access_layer() + .entry_point_exists(request); + assert!(matches!(res, EntryPointExistsResult::ValueNotFound { .. })); + } + + #[test] + fn returns_trie_or_chunk() { + let rng = &mut TestRng::new(); + let (contract_runtime, root_hash) = + create_test_state(rng, create_test_pairs_with_large_data()); + + // Expect `Trie` with NodePointer when asking with a root hash. + let trie = read_trie(&contract_runtime, TrieOrChunkId(0, root_hash)); + assert!(matches!(trie, ValueOrChunk::Value(_))); + + // Expect another `Trie` with two LeafPointers. + let trie = read_trie( + &contract_runtime, + TrieOrChunkId(0, extract_next_hash_from_trie(trie)), + ); + assert!(matches!(trie, TrieOrChunk::Value(_))); + + // Now, the next hash will point to the actual leaf, which as we expect + // contains large data, so we expect to get `ChunkWithProof`. + let hash = extract_next_hash_from_trie(trie); + let chunk = match read_trie(&contract_runtime, TrieOrChunkId(0, hash)) { + TrieOrChunk::ChunkWithProof(chunk) => chunk, + other => panic!("expected ChunkWithProof, got {:?}", other), + }; + + assert_eq!(chunk.proof().root_hash(), hash); + + // try to read all the chunks + let count = chunk.proof().count(); + let mut chunks = vec![chunk]; + for i in 1..count { + let chunk = match read_trie(&contract_runtime, TrieOrChunkId(i, hash)) { + TrieOrChunk::ChunkWithProof(chunk) => chunk, + other => panic!("expected ChunkWithProof, got {:?}", other), + }; + chunks.push(chunk); + } + + // there should be no chunk with index `count` + let serialized_id = bincode::serialize(&TrieOrChunkId(count, hash)).unwrap(); + assert!(matches!( + contract_runtime.fetch_trie_local(&serialized_id), + Err(ContractRuntimeError::ChunkingError( + ChunkingError::MerkleConstruction(_) + )) + )); + + // all chunks should be valid + assert!(chunks.iter().all(|chunk| chunk.verify().is_ok())); + + let data: Vec = chunks + .into_iter() + .flat_map(|chunk| chunk.into_chunk()) + .collect(); + + let trie: Trie = + bytesrepr::deserialize(data).expect("trie should deserialize correctly"); + + // should be deserialized to a leaf + assert!(matches!(trie, Trie::Leaf { .. })); + } +} diff --git a/node/src/components/contract_runtime/types.rs b/node/src/components/contract_runtime/types.rs index fdc94d7714..94f9813895 100644 --- a/node/src/components/contract_runtime/types.rs +++ b/node/src/components/contract_runtime/types.rs @@ -1,19 +1,42 @@ -use casper_execution_engine::{ - core::engine_state::GetEraValidatorsRequest, shared::newtypes::Blake2bHash, +use std::{collections::BTreeMap, sync::Arc}; + +use crate::{contract_runtime::StateResultError, types::TransactionHeader}; +use casper_types::{InitiatorAddr, Transfer}; +use datasize::DataSize; +use serde::Serialize; + +use casper_execution_engine::engine_state::{ + Error, InvalidRequest as InvalidWasmV1Request, WasmV1Result, +}; +use casper_storage::{ + block_store::types::ApprovalsHashes, + data_access_layer::{ + auction::AuctionMethodError, mint::BurnResult, BalanceHoldResult, BalanceResult, + BiddingResult, EraValidatorsRequest, HandleFeeResult, HandleRefundResult, TransferResult, + }, }; -use casper_types::{EraId, ProtocolVersion}; +use casper_types::{ + contract_messages::Messages, + execution::{Effects, ExecutionResult, ExecutionResultV2}, + BlockHash, BlockHeaderV2, BlockV2, Digest, EraId, Gas, InvalidDeploy, InvalidTransaction, + InvalidTransactionV1, ProtocolVersion, PublicKey, Transaction, TransactionHash, U512, +}; + +use self::wasm_v2_request::{WasmV2Error, WasmV2Result}; + +use super::operations::wasm_v2_request; /// Request for validator weights for a specific era. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ValidatorWeightsByEraIdRequest { - state_hash: Blake2bHash, + state_hash: Digest, era_id: EraId, protocol_version: ProtocolVersion, } impl ValidatorWeightsByEraIdRequest { /// Constructs a new ValidatorWeightsByEraIdRequest. - pub fn new(state_hash: Blake2bHash, era_id: EraId, protocol_version: ProtocolVersion) -> Self { + pub fn new(state_hash: Digest, era_id: EraId, protocol_version: ProtocolVersion) -> Self { ValidatorWeightsByEraIdRequest { state_hash, era_id, @@ -22,7 +45,7 @@ impl ValidatorWeightsByEraIdRequest { } /// Get the state hash. - pub fn state_hash(&self) -> Blake2bHash { + pub fn state_hash(&self) -> Digest { self.state_hash } @@ -37,41 +60,559 @@ impl ValidatorWeightsByEraIdRequest { } } -impl From for GetEraValidatorsRequest { +impl From for EraValidatorsRequest { fn from(input: ValidatorWeightsByEraIdRequest) -> Self { - GetEraValidatorsRequest::new(input.state_hash, input.protocol_version) + EraValidatorsRequest::new(input.state_hash) } } -/// Request for era validators. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct EraValidatorsRequest { - state_hash: Blake2bHash, - protocol_version: ProtocolVersion, +#[derive(Clone, Debug)] +pub(crate) struct ExecutionArtifactBuilder { + effects: Effects, + hash: TransactionHash, + header: TransactionHeader, + error_message: Option, + messages: Messages, + transfers: Vec, + initiator: InitiatorAddr, + current_price: u8, + cost: U512, + limit: Gas, + consumed: Gas, + refund: U512, + size_estimate: u64, + min_cost: U512, } -impl EraValidatorsRequest { - /// Constructs a new EraValidatorsRequest. - pub fn new(state_hash: Blake2bHash, protocol_version: ProtocolVersion) -> Self { - EraValidatorsRequest { - state_hash, - protocol_version, +impl ExecutionArtifactBuilder { + pub fn new(transaction: &Transaction, min_cost: U512, current_price: u8) -> Self { + ExecutionArtifactBuilder { + effects: Effects::new(), + hash: transaction.hash(), + header: transaction.into(), + error_message: None, + transfers: vec![], + messages: Default::default(), + initiator: transaction.initiator_addr(), + current_price, + cost: U512::zero(), + limit: Gas::zero(), + consumed: Gas::zero(), + refund: U512::zero(), + size_estimate: transaction.size_estimate() as u64, + min_cost, } } - /// Get the state hash. - pub fn state_hash(&self) -> Blake2bHash { - self.state_hash + pub fn error_message(&self) -> Option { + self.error_message.clone() } - /// Get the protocol version. - pub fn protocol_version(&self) -> ProtocolVersion { - self.protocol_version + pub fn consumed(&self) -> U512 { + self.consumed.value() + } + + pub fn cost_to_use(&self) -> U512 { + // to prevent do-nothing exhaustion and other 0 cost scenarios, + // we raise cost to min_cost if less than that + let cost = self.cost; + if cost < self.min_cost { + self.min_cost + } else { + cost + } + } + + pub fn with_added_consumed(&mut self, consumed: Gas) -> &mut Self { + self.consumed = self.consumed.saturating_add(consumed); + self + } + + pub fn with_appended_transfers(&mut self, transfers: &mut Vec) -> &mut Self { + self.transfers.append(transfers); + self + } + + pub fn with_appended_effects(&mut self, effects: Effects) -> &mut Self { + self.effects.append(effects); + self + } + + pub fn with_appended_messages(&mut self, messages: &mut Messages) -> &mut Self { + self.messages.append(messages); + self + } + + pub fn with_state_result_error(&mut self, error: StateResultError) -> Result<&mut Self, ()> { + if let StateResultError::RootNotFound = error { + return Err(()); + } + if self.error_message.is_none() { + self.error_message = Some(format!("{:?}", error)); + } + Ok(self) + } + + pub fn with_initial_balance_result( + &mut self, + balance_result: BalanceResult, + minimum_amount: U512, + ) -> Result<&mut Self, bool> { + if let BalanceResult::RootNotFound = balance_result { + return Err(true); + } + if let (None, Some(err)) = (&self.error_message, balance_result.error()) { + self.error_message = Some(format!("{}", err)); + return Err(false); + } + if let Some(purse) = balance_result.purse_addr() { + let is_sufficient = balance_result.is_sufficient(minimum_amount); + if !is_sufficient { + self.error_message = Some(format!( + "Purse {} has less than {}", + base16::encode_lower(&purse), + minimum_amount + )); + return Ok(self); + } + } + Ok(self) + } + + pub fn with_wasm_v1_result(&mut self, wasm_v1_result: WasmV1Result) -> Result<&mut Self, ()> { + if let Some(Error::RootNotFound(_)) = wasm_v1_result.error() { + return Err(()); + } + self.with_added_consumed(wasm_v1_result.consumed()); + + if let Some(err) = wasm_v1_result.error() { + self.error_message = Some(format!("{}", err)); + } else if wasm_v1_result.consumed() == Gas::zero() { + self.error_message = Some("Wasm consumed 0 gas".to_string()); + } + + if self.error_message.is_some() { + return Ok(self); + } + + self.with_appended_transfers(&mut wasm_v1_result.transfers().clone()) + .with_appended_messages(&mut wasm_v1_result.messages().clone()) + .with_appended_effects(wasm_v1_result.effects().clone()); + Ok(self) + } + + pub fn with_error_message(&mut self, error_message: String) -> &mut Self { + self.error_message = Some(error_message); + self + } + + pub fn with_set_refund_purse_result( + &mut self, + handle_refund_result: &HandleRefundResult, + ) -> Result<&mut Self, bool> { + if let HandleRefundResult::RootNotFound = handle_refund_result { + return Err(true); + } + if let HandleRefundResult::Success { + effects, transfers, .. + } = handle_refund_result + { + self.with_appended_transfers(&mut transfers.clone()) + .with_appended_effects(effects.clone()); + } + if let (None, HandleRefundResult::Failure(_)) = (&self.error_message, handle_refund_result) + { + self.error_message = handle_refund_result.error_message(); + return Err(false); + } + Ok(self) + } + + pub fn with_clear_refund_purse_result( + &mut self, + handle_refund_result: &HandleRefundResult, + ) -> Result<&mut Self, bool> { + if let HandleRefundResult::RootNotFound = handle_refund_result { + return Err(true); + } + if let HandleRefundResult::Success { + effects, transfers, .. + } = handle_refund_result + { + self.with_appended_transfers(&mut transfers.clone()) + .with_appended_effects(effects.clone()); + } + if let (None, HandleRefundResult::Failure(_)) = (&self.error_message, handle_refund_result) + { + self.error_message = handle_refund_result.error_message(); + return Err(false); + } + Ok(self) } + + pub fn with_handle_refund_result( + &mut self, + handle_refund_result: &HandleRefundResult, + ) -> Result<&mut Self, ()> { + if let HandleRefundResult::RootNotFound = handle_refund_result { + return Err(()); + } + if let HandleRefundResult::Success { + effects, transfers, .. + } = handle_refund_result + { + self.with_appended_transfers(&mut transfers.clone()) + .with_appended_effects(effects.clone()); + } + if let (None, HandleRefundResult::Failure(_)) = (&self.error_message, handle_refund_result) + { + self.error_message = handle_refund_result.error_message(); + return Ok(self); + } + Ok(self) + } + + pub fn with_handle_fee_result( + &mut self, + handle_fee_result: &HandleFeeResult, + ) -> Result<&mut Self, ()> { + if let HandleFeeResult::RootNotFound = handle_fee_result { + return Err(()); + } + if let (None, HandleFeeResult::Failure(err)) = (&self.error_message, handle_fee_result) { + self.error_message = Some(format!("{}", err)); + return Ok(self); + } + self.with_appended_effects(handle_fee_result.effects()); + Ok(self) + } + + pub fn with_balance_hold_result( + &mut self, + hold_result: &BalanceHoldResult, + ) -> Result<&mut Self, ()> { + if let BalanceHoldResult::RootNotFound = hold_result { + return Err(()); + } + if let (None, BalanceHoldResult::Failure(err)) = (&self.error_message, hold_result) { + self.error_message = Some(format!("{}", err)); + return Ok(self); + } + self.with_appended_effects(hold_result.effects()); + Ok(self) + } + + pub fn with_added_cost(&mut self, cost: U512) -> &mut Self { + self.cost = self.cost.saturating_add(cost); + self + } + + pub fn with_min_cost(&mut self, min_cost: U512) -> &mut Self { + self.min_cost = min_cost; + self + } + + pub fn with_gas_limit(&mut self, limit: Gas) -> &mut Self { + self.limit = limit; + self + } + + pub fn with_refund_amount(&mut self, refund: U512) -> &mut Self { + self.refund = refund; + self + } + + pub fn with_invalid_transaction( + &mut self, + invalid_transaction: &InvalidTransaction, + ) -> &mut Self { + if self.error_message.is_none() { + self.error_message = Some(format!("{}", invalid_transaction)); + } + self + } + + pub fn with_invalid_wasm_v1_request( + &mut self, + invalid_request: &InvalidWasmV1Request, + ) -> &mut Self { + if self.error_message.is_none() { + self.error_message = Some(format!("{}", invalid_request)); + } + self + } + + pub fn with_auction_method_error( + &mut self, + auction_method_error: &AuctionMethodError, + ) -> &mut Self { + if self.error_message.is_none() { + self.error_message = Some(format!("{}", auction_method_error)); + } + self + } + + pub fn with_transfer_result( + &mut self, + transfer_result: TransferResult, + ) -> Result<&mut Self, ()> { + if let TransferResult::RootNotFound = transfer_result { + return Err(()); + } + if let (None, TransferResult::Failure(err)) = (&self.error_message, &transfer_result) { + self.error_message = Some(format!("{}", err)); + } + if let TransferResult::Success { + effects, + transfers, + cache: _, + } = transfer_result + { + self.with_appended_transfers(&mut transfers.clone()) + .with_appended_effects(effects); + } + Ok(self) + } + + pub fn with_burn_result(&mut self, burn_result: BurnResult) -> Result<&mut Self, ()> { + if let BurnResult::RootNotFound = burn_result { + return Err(()); + } + if let (None, BurnResult::Failure(err)) = (&self.error_message, &burn_result) { + self.error_message = Some(format!("{}", err)); + } + if let BurnResult::Success { effects, cache: _ } = burn_result { + self.with_appended_effects(effects); + } + Ok(self) + } + + pub fn with_bidding_result(&mut self, bidding_result: BiddingResult) -> Result<&mut Self, ()> { + if let BiddingResult::RootNotFound = bidding_result { + return Err(()); + } + if let (None, BiddingResult::Failure(err)) = (&self.error_message, &bidding_result) { + self.error_message = Some(format!("{}", err)); + } + if let BiddingResult::Success { + effects, transfers, .. + } = bidding_result + { + self.with_appended_transfers(&mut transfers.clone()) + .with_appended_effects(effects); + } + Ok(self) + } + + #[allow(unused)] + pub fn with_initiator_addr(&mut self, initiator_addr: InitiatorAddr) -> &mut Self { + self.initiator = initiator_addr; + self + } + + pub(crate) fn build(self) -> ExecutionArtifact { + let actual_cost = self.cost_to_use(); + let result = ExecutionResultV2 { + effects: self.effects, + transfers: self.transfers, + initiator: self.initiator, + refund: self.refund, + limit: self.limit, + consumed: self.consumed, + cost: actual_cost, + current_price: self.current_price, + size_estimate: self.size_estimate, + error_message: self.error_message, + }; + let execution_result = ExecutionResult::V2(Box::new(result)); + ExecutionArtifact::new(self.hash, self.header, execution_result, self.messages) + } + + /// Adds the error message from a `InvalidRequest` to the artifact. + pub(crate) fn with_invalid_wasm_v2_request( + &mut self, + ire: wasm_v2_request::InvalidRequest, + ) -> &mut Self { + if self.error_message.is_none() { + self.error_message = Some(format!("{}", ire)); + } + self + } + + /// Adds the result from a `WasmV2Result` to the artifact. + pub(crate) fn with_wasm_v2_result(&mut self, result: WasmV2Result) -> &mut Self { + self.with_added_consumed(Gas::from(result.gas_usage().gas_spent())); + + // TODO: Use system message to notify about contract hash + + self.with_appended_effects(result.effects().clone()); + + self + } + + /// Adds the error message from a `WasmV2Error` to the artifact. + #[inline] + pub(crate) fn with_wasm_v2_error(&mut self, error: WasmV2Error) -> &mut Self { + self.with_error_message(error.to_string()); + self + } +} + +/// Effects from running step and the next era validators that are gathered when an era ends. +#[derive(Clone, Debug, DataSize)] +pub(crate) struct StepOutcome { + /// Validator sets for all upcoming eras that have already been determined. + pub(crate) upcoming_era_validators: BTreeMap>, + /// An [`Effects`] created by an era ending. + pub(crate) step_effects: Effects, } -impl From for GetEraValidatorsRequest { - fn from(input: EraValidatorsRequest) -> Self { - GetEraValidatorsRequest::new(input.state_hash, input.protocol_version) +#[derive(Clone, Debug, DataSize, PartialEq, Eq, Serialize)] +pub(crate) struct ExecutionArtifact { + pub(crate) transaction_hash: TransactionHash, + pub(crate) transaction_header: TransactionHeader, + pub(crate) execution_result: ExecutionResult, + pub(crate) messages: Messages, +} + +impl ExecutionArtifact { + pub(crate) fn new( + transaction_hash: TransactionHash, + transaction_header: TransactionHeader, + execution_result: ExecutionResult, + messages: Messages, + ) -> Self { + Self { + transaction_hash, + transaction_header, + execution_result, + messages, + } + } +} + +#[doc(hidden)] +/// A [`Block`] that was the result of execution in the `ContractRuntime` along with any execution +/// effects it may have. +#[derive(Clone, Debug, DataSize)] +pub struct BlockAndExecutionArtifacts { + /// The [`Block`] the contract runtime executed. + pub(crate) block: Arc, + /// The [`ApprovalsHashes`] for the transactions in this block. + pub(crate) approvals_hashes: Box, + /// The results from executing the transactions in the block. + pub(crate) execution_artifacts: Vec, + /// The [`Effects`] and the upcoming validator sets determined by the `step` + pub(crate) step_outcome: Option, +} + +/// Type representing results of the speculative execution. +#[derive(Debug)] +pub enum SpeculativeExecutionResult { + InvalidTransaction(InvalidTransaction), + WasmV1(Box), + ReceivedV1Transaction, +} + +impl SpeculativeExecutionResult { + pub fn invalid_gas_limit(transaction: Transaction) -> Self { + match transaction { + Transaction::Deploy(_) => SpeculativeExecutionResult::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::UnableToCalculateGasLimit), + ), + Transaction::V1(_) => SpeculativeExecutionResult::InvalidTransaction( + InvalidTransaction::V1(InvalidTransactionV1::UnableToCalculateGasLimit), + ), + } + } + + pub fn invalid_transaction(error: InvalidTransaction) -> Self { + SpeculativeExecutionResult::InvalidTransaction(error) + } +} + +/// State to use to construct the next block in the blockchain. Includes the state root hash for the +/// execution engine as well as certain values the next header will be based on. +#[derive(DataSize, Default, Debug, Clone, Serialize)] +pub struct ExecutionPreState { + /// The height of the next `Block` to be constructed. Note that this must match the height of + /// the `FinalizedBlock` used to generate the block. + next_block_height: u64, + /// The state root to use when executing deploys. + pre_state_root_hash: Digest, + /// The parent hash of the next `Block`. + parent_hash: BlockHash, + /// The accumulated seed for the pseudo-random number generator to be incorporated into the + /// next `Block`, where additional entropy will be introduced. + parent_seed: Digest, +} + +impl ExecutionPreState { + pub(crate) fn new( + next_block_height: u64, + pre_state_root_hash: Digest, + parent_hash: BlockHash, + parent_seed: Digest, + ) -> Self { + ExecutionPreState { + next_block_height, + pre_state_root_hash, + parent_hash, + parent_seed, + } + } + + /// Creates instance of `ExecutionPreState` from given block header nad Merkle tree hash + /// activation point. + pub fn from_block_header(block_header: &BlockHeaderV2) -> Self { + ExecutionPreState { + pre_state_root_hash: *block_header.state_root_hash(), + next_block_height: block_header.height() + 1, + parent_hash: block_header.block_hash(), + parent_seed: *block_header.accumulated_seed(), + } + } + + // The height of the next `Block` to be constructed. Note that this must match the height of + /// the `FinalizedBlock` used to generate the block. + pub fn next_block_height(&self) -> u64 { + self.next_block_height + } + /// The state root to use when executing deploys. + pub fn pre_state_root_hash(&self) -> Digest { + self.pre_state_root_hash + } + /// The parent hash of the next `Block`. + pub fn parent_hash(&self) -> BlockHash { + self.parent_hash + } + /// The accumulated seed for the pseudo-random number generator to be incorporated into the + /// next `Block`, where additional entropy will be introduced. + pub fn parent_seed(&self) -> Digest { + self.parent_seed + } +} + +#[derive(Clone, Copy, Ord, Eq, PartialOrd, PartialEq, DataSize, Debug)] +pub(crate) struct EraPrice { + era_id: EraId, + gas_price: u8, +} + +impl EraPrice { + pub(crate) fn new(era_id: EraId, gas_price: u8) -> Self { + Self { era_id, gas_price } + } + + pub(crate) fn gas_price(&self) -> u8 { + self.gas_price + } + + pub(crate) fn maybe_gas_price_for_era_id(&self, era_id: EraId) -> Option { + if self.era_id == era_id { + return Some(self.gas_price); + } + + None } } diff --git a/node/src/components/contract_runtime/utils.rs b/node/src/components/contract_runtime/utils.rs new file mode 100644 index 0000000000..7230b069a9 --- /dev/null +++ b/node/src/components/contract_runtime/utils.rs @@ -0,0 +1,939 @@ +use casper_executor_wasm::ExecutorV2; +use num_rational::Ratio; +use once_cell::sync::Lazy; +use std::{ + cmp, + collections::{BTreeMap, HashMap}, + fmt::Debug, + ops::Range, + sync::{Arc, Mutex}, + time::Instant, +}; +use tracing::{debug, error, info}; + +use crate::{ + contract_runtime::{ + exec_queue::{ExecQueue, QueueItem}, + execute_finalized_block, + metrics::Metrics, + rewards, BlockAndExecutionArtifacts, BlockExecutionError, ExecutionPreState, StepOutcome, + }, + effect::{ + announcements::{ContractRuntimeAnnouncement, FatalAnnouncement, MetaBlockAnnouncement}, + requests::{ContractRuntimeRequest, StorageRequest}, + EffectBuilder, + }, + fatal, + types::{ExecutableBlock, MetaBlock, MetaBlockState}, +}; + +use casper_binary_port::SpeculativeExecutionResult; +use casper_execution_engine::engine_state::{ExecutionEngineV1, WasmV1Result}; +use casper_storage::{ + data_access_layer::{ + DataAccessLayer, FlushRequest, FlushResult, ProtocolUpgradeRequest, ProtocolUpgradeResult, + TransferResult, + }, + global_state::state::{lmdb::LmdbGlobalState, CommitProvider, StateProvider}, +}; +use casper_types::{BlockHash, Chainspec, Digest, EraId, Gas, Key, ProtocolUpgradeConfig}; + +/// Maximum number of resource intensive tasks that can be run in parallel. +/// +/// TODO: Fine tune this constant to the machine executing the node. +const MAX_PARALLEL_INTENSIVE_TASKS: usize = 4; +/// Semaphore enforcing maximum number of parallel resource intensive tasks. +static INTENSIVE_TASKS_SEMAPHORE: Lazy = + Lazy::new(|| tokio::sync::Semaphore::new(MAX_PARALLEL_INTENSIVE_TASKS)); + +/// Asynchronously runs a resource intensive task. +/// At most `MAX_PARALLEL_INTENSIVE_TASKS` are being run in parallel at any time. +/// +/// The task is a closure that takes no arguments and returns a value. +/// This function returns a future for that value. +pub(super) async fn run_intensive_task(task: T) -> V +where + T: 'static + Send + FnOnce() -> V, + V: 'static + Send + Debug, +{ + // This will never panic since the semaphore is never closed. + let _permit = INTENSIVE_TASKS_SEMAPHORE.acquire().await.unwrap(); + let result = tokio::task::spawn_blocking(task).await; + match result { + Ok(ret) => ret, + Err(err) => { + error!("{:?}", err); + panic!("intensive contract runtime task errored: {:?}", err); + } + } +} + +// Maybe era end processing instructions. +#[derive(Debug)] +enum EraEndInstruction { + // Is not a switch block. + ExecNonSwitch, + // Is a switch block, and we can calc next era gas price, thus we can exec. + ExecSwitch { next_gas_price: u8 }, + // Is a switch block, but we cannot calc a new gas price. + NoExec, + // Fatal with error string. + Fatal(String), +} + +/// This currently handles reward and dynamic gas price calculation. If in future +/// similar end of era determinations need to be made, they should potentially +/// be added here. +async fn handle_era_end( + data_access_layer: Arc>, + chainspec: Arc, + metrics: Arc, + effect_builder: EffectBuilder, + executable_block: &mut ExecutableBlock, +) -> EraEndInstruction +where + REv: From + + From + + From + + From + + From + + Send, +{ + if executable_block.era_report.is_none() { + return EraEndInstruction::ExecNonSwitch; + } + // this logic could be further broken down to each part if desired + + // reward stuff + if executable_block.rewards.is_none() { + executable_block.rewards = Some(if chainspec.core_config.compute_rewards { + let rewards = match rewards::fetch_data_and_calculate_rewards_for_era( + effect_builder, + data_access_layer.clone(), + chainspec.as_ref(), + &metrics, + executable_block.clone(), + ) + .await + { + Ok(rewards) => rewards, + Err(e) => { + return EraEndInstruction::Fatal(format!( + "Failed to compute the rewards: {e:?}" + )); + } + }; + + debug!("rewards successfully computed"); + + rewards + } else { + BTreeMap::new() + }); + } + + // dynamic gas price stuff + let era_id = executable_block.era_id; + let block_height = executable_block.height; + info!(%era_id, %block_height, "End of era calculating new gas price"); + + if let Some(next_gas_price) = executable_block.next_era_gas_price { + // keep up nodes are executing a block as determined by validators + // and the next era gas price is already determined + return EraEndInstruction::ExecSwitch { next_gas_price }; + } + // we need to calculate the utilization of the block we are about to execute + // and include it in the tally of the utilization for the entire era. + let executable_block_utilization_score = + match executable_block.calc_utilization_score(&chainspec) { + Some(score) => score, + None => { + return EraEndInstruction::Fatal(format!( + "could not calc utilization of executable block {}", + block_height + )); + } + }; + + // BLOCKING CALL + match effect_builder + .get_era_utilization(era_id, block_height, executable_block_utilization_score) + .await + { + Some((utilization, block_count, total_block_count)) => { + if block_count != total_block_count { + return EraEndInstruction::NoExec; + } + + let current_gas_price = executable_block.current_gas_price; + let era_score = { Ratio::new(utilization, block_count).to_integer() }; + + let go_up = chainspec.vacancy_config.upper_threshold; + let go_down = chainspec.vacancy_config.lower_threshold; + let max = chainspec.vacancy_config.max_gas_price; + let min = chainspec.vacancy_config.min_gas_price; + let next_gas_price = if era_score >= go_up { + current_gas_price.saturating_add(1).min(max) + } else if era_score <= go_down { + current_gas_price.saturating_sub(1).max(min) + } else { + current_gas_price + }; + info!(%next_gas_price, "Calculated new gas price"); + EraEndInstruction::ExecSwitch { next_gas_price } + } + None => { + let error = BlockExecutionError::FailedToGetNewEraGasPrice { era_id }; + EraEndInstruction::Fatal(format!("{}", error)) + } + } +} + +/// This function can fatal. +#[allow(clippy::too_many_arguments)] +pub(super) async fn exec_and_check_next( + data_access_layer: Arc>, + execution_engine_v1: Arc, + execution_engine_v2: ExecutorV2, + chainspec: Arc, + metrics: Arc, + mut exec_queue: ExecQueue, + shared_pre_state: Arc>, + current_pre_state: ExecutionPreState, + effect_builder: EffectBuilder, + mut executable_block: ExecutableBlock, + key_block_height_for_activation_point: u64, + mut meta_block_state: MetaBlockState, +) where + REv: From + + From + + From + + From + + From + + Send, +{ + debug!("ContractRuntime: execute_finalized_block_or_requeue"); + + // FIRST determine if we are aware of the last switch block header + let era_id = executable_block.era_id; + let last_switch_block_hash = match era_id.predecessor() { + Some(previous_era) => { + let switch_block_header = effect_builder + .get_switch_block_header_by_era_id_from_storage(previous_era) + .await; + if switch_block_header.is_none() { + return fatal!( + effect_builder, + "switch block header can only be none for genesis era" + ) + .await; + } + switch_block_header.map(|header| header.block_hash()) + } + None => { + // genesis era + None + } + }; + + let era_end_instruction = handle_era_end( + data_access_layer.clone(), + chainspec.clone(), + metrics.clone(), + effect_builder, + &mut executable_block, + ) + .await; + debug!(?era_end_instruction, "era_end_instruction"); + let maybe_next_era_gas_price = match era_end_instruction { + EraEndInstruction::ExecNonSwitch => None, + EraEndInstruction::ExecSwitch { next_gas_price } => Some(next_gas_price), + EraEndInstruction::NoExec => { + info!("ContractRuntime: unable to execute - try again later"); + exec_queue.insert(QueueItem { + meta_block_state, + executable_block, + }); + return; + } + EraEndInstruction::Fatal(msg) => { + return fatal!(effect_builder, "{}", msg).await; + } + }; + + let current_gas_price = executable_block.current_gas_price; + let contract_runtime_metrics = metrics.clone(); + let task = move || { + debug!("ContractRuntime: execute_finalized_block"); + execute_finalized_block( + data_access_layer.as_ref(), + execution_engine_v1.as_ref(), + execution_engine_v2, + chainspec.as_ref(), + Some(contract_runtime_metrics), + current_pre_state, + executable_block, + key_block_height_for_activation_point, + current_gas_price, + maybe_next_era_gas_price, + last_switch_block_hash, + ) + }; + let BlockAndExecutionArtifacts { + block, + approvals_hashes, + execution_artifacts, + step_outcome: maybe_step_outcome, + } = match run_intensive_task(task).await { + Ok(ret) => ret, + Err(error) => { + error!(%error, "failed to execute block"); + return fatal!(effect_builder, "{}", error).await; + } + }; + + // from this point onward we are dealing with the block we just created by executing + let new_execution_pre_state = ExecutionPreState::from_block_header(block.header()); + { + // The `shared_pre_state` could have been set to a block we just fully synced after + // doing a sync leap (via a call to `set_initial_state`). We should not allow a block + // which completed execution just after this to set the `shared_pre_state` back to an + // earlier block height. + let mut shared_pre_state = shared_pre_state.lock().unwrap(); + if shared_pre_state.next_block_height() < new_execution_pre_state.next_block_height() { + debug!( + next_block_height = new_execution_pre_state.next_block_height(), + "ContractRuntime: updating shared pre-state", + ); + *shared_pre_state = new_execution_pre_state.clone(); + } else { + debug!( + current_next_block_height = shared_pre_state.next_block_height(), + attempted_next_block_height = new_execution_pre_state.next_block_height(), + "ContractRuntime: not updating shared pre-state to older state" + ); + } + } + + let current_era_id = block.era_id(); + let block_height = block.height(); + + if let Some(StepOutcome { + step_effects, + mut upcoming_era_validators, + }) = maybe_step_outcome + { + effect_builder + .announce_commit_step_success(current_era_id, step_effects) + .await; + + if current_era_id.is_genesis() { + match upcoming_era_validators + .get(¤t_era_id.successor()) + .cloned() + { + Some(era_validators) => { + upcoming_era_validators.insert(EraId::default(), era_validators); + } + None => { + fatal!(effect_builder, "Missing era 1 validators").await; + } + } + } + + effect_builder + .announce_upcoming_era_validators(current_era_id, upcoming_era_validators) + .await; + } + + debug!( + block_hash = %block.hash(), + height = block.height(), + era = block.era_id().value(), + is_switch_block = block.is_switch_block(), + "executed block" + ); + + let artifacts_map: HashMap<_, _> = execution_artifacts + .iter() + .cloned() + .map(|artifact| (artifact.transaction_hash, artifact.execution_result)) + .collect(); + + if meta_block_state.register_as_stored().was_updated() { + debug!( + %era_id, + %block_height, + "Storing block after execution" + ); + effect_builder + .put_executed_block_to_storage(Arc::clone(&block), approvals_hashes, artifacts_map) + .await; + } else { + debug!( + %era_id, + %block_height, + "Block was already stored before execution, storing approvals" + ); + effect_builder + .put_approvals_hashes_to_storage(approvals_hashes) + .await; + effect_builder + .put_execution_artifacts_to_storage( + *block.hash(), + block.height(), + block.era_id(), + artifacts_map, + ) + .await; + } + + // TODO: if it is an error why allow it in the first place? + if meta_block_state + .register_as_executed() + .was_already_registered() + { + error!( + block_hash = %block.hash(), + block_height = block.height(), + ?meta_block_state, + "should not execute the same block more than once" + ); + } + + if let Some(next_era_gas_price) = maybe_next_era_gas_price { + effect_builder + .announce_new_era_gas_price(current_era_id.successor(), next_era_gas_price) + .await; + } + let meta_block = MetaBlock::new_forward(block, execution_artifacts, meta_block_state); + effect_builder.announce_meta_block(meta_block).await; + + let next_block = exec_queue.remove(new_execution_pre_state.next_block_height()); + + // We schedule the next block from the queue to be executed, if available. + if let Some(QueueItem { + executable_block, + meta_block_state, + }) = next_block + { + metrics.exec_queue_size.dec(); + debug!("ContractRuntime: next block enqueue_block_for_execution"); + effect_builder + .enqueue_block_for_execution(executable_block, meta_block_state) + .await; + } +} + +pub(super) async fn handle_protocol_upgrade( + effect_builder: EffectBuilder, + data_access_layer: Arc>, + metrics: Arc, + upgrade_config: ProtocolUpgradeConfig, + next_block_height: u64, + parent_hash: BlockHash, + parent_seed: Digest, +) where + REv: From + + From + + From + + From + + From + + Send, +{ + debug!(?upgrade_config, "upgrade"); + let start = Instant::now(); + let upgrade_request = ProtocolUpgradeRequest::new(upgrade_config); + + let result = run_intensive_task(move || { + let result = data_access_layer.protocol_upgrade(upgrade_request); + if result.is_success() { + info!("committed upgrade"); + metrics + .commit_upgrade + .observe(start.elapsed().as_secs_f64()); + let flush_req = FlushRequest::new(); + if let FlushResult::Failure(err) = data_access_layer.flush(flush_req) { + return Err(format!("{:?}", err)); + } + } + + Ok(result) + }) + .await; + + match result { + Err(error_msg) => { + // The only way this happens is if there is a problem in the flushing. + error!(%error_msg, ":Error in post upgrade flush"); + fatal!(effect_builder, "{}", error_msg).await; + } + Ok(result) => match result { + ProtocolUpgradeResult::RootNotFound => { + let error_msg = "Root not found for protocol upgrade"; + fatal!(effect_builder, "{}", error_msg).await; + } + ProtocolUpgradeResult::Failure(err) => { + fatal!(effect_builder, "{:?}", err).await; + } + ProtocolUpgradeResult::Success { + post_state_hash, .. + } => { + let post_upgrade_state = ExecutionPreState::new( + next_block_height, + post_state_hash, + parent_hash, + parent_seed, + ); + + effect_builder + .update_contract_runtime_state(post_upgrade_state) + .await + } + }, + } +} + +fn generate_range_by_index( + highest_era: u64, + batch_size: u64, + batch_index: u64, +) -> Option> { + let start = batch_index.checked_mul(batch_size)?; + let end = cmp::min(start.checked_add(batch_size)?, highest_era); + Some(start..end) +} + +/// Calculates era keys to be pruned. +/// +/// Outcomes: +/// * Ok(Some(range)) -- these keys should be pruned +/// * Ok(None) -- nothing to do, either done, or there is not enough eras to prune +pub(super) fn calculate_prune_eras( + activation_era_id: EraId, + activation_height: u64, + current_height: u64, + batch_size: u64, +) -> Option> { + if batch_size == 0 { + // Nothing to do, the batch size is 0. + return None; + } + + let nth_chunk: u64 = match current_height.checked_sub(activation_height) { + Some(nth_chunk) => nth_chunk, + None => { + // Time went backwards, programmer error, etc + error!( + %activation_era_id, + activation_height, + current_height, + batch_size, + "unable to calculate eras to prune (activation height higher than the block height)" + ); + panic!("activation height higher than the block height"); + } + }; + + let range = generate_range_by_index(activation_era_id.value(), batch_size, nth_chunk)?; + + if range.is_empty() { + return None; + } + + Some(range.map(EraId::new).map(Key::EraInfo).collect()) +} + +pub(crate) fn spec_exec_from_transfer_result( + limit: Gas, + transfer_result: TransferResult, + block_hash: BlockHash, +) -> SpeculativeExecutionResult { + let transfers = transfer_result.transfers().to_owned(); + let consumed = limit; + let effects = transfer_result.effects().to_owned(); + let messages = vec![]; + let error_msg = transfer_result + .error() + .to_owned() + .map(|err| format!("{:?}", err)); + + SpeculativeExecutionResult::new( + block_hash, transfers, limit, consumed, effects, messages, error_msg, + ) +} + +pub(crate) fn spec_exec_from_wasm_v1_result( + wasm_v1_result: WasmV1Result, + block_hash: BlockHash, +) -> SpeculativeExecutionResult { + let transfers = wasm_v1_result.transfers().to_owned(); + let limit = wasm_v1_result.limit().to_owned(); + let consumed = wasm_v1_result.consumed().to_owned(); + let effects = wasm_v1_result.effects().to_owned(); + let messages = wasm_v1_result.messages().to_owned(); + let error_msg = wasm_v1_result + .error() + .to_owned() + .map(|err| format!("{:?}", err)); + + SpeculativeExecutionResult::new( + block_hash, transfers, limit, consumed, effects, messages, error_msg, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn calculation_is_safe_with_invalid_input() { + assert_eq!(calculate_prune_eras(EraId::new(0), 0, 0, 0), None); + assert_eq!(calculate_prune_eras(EraId::new(0), 0, 0, 5), None); + assert_eq!(calculate_prune_eras(EraId::new(u64::MAX), 0, 0, 0), None); + assert_eq!( + calculate_prune_eras(EraId::new(u64::MAX), 1, u64::MAX, u64::MAX), + None + ); + } + + #[test] + fn calculation_is_lazy() { + // NOTE: Range of EraInfos is lazy, so it does not consume memory, but getting the last + // batch out of u64::MAX of era info needs to iterate over all chunks. + assert!(calculate_prune_eras(EraId::new(u64::MAX), 0, u64::MAX, 100,).is_none(),); + assert_eq!( + calculate_prune_eras(EraId::new(u64::MAX), 1, 100, 100) + .unwrap() + .len(), + 100 + ); + } + + #[test] + fn should_calculate_prune_eras() { + let activation_height = 50; + let current_height = 50; + const ACTIVATION_POINT_ERA_ID: EraId = EraId::new(5); + + // batch size 1 + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + 1, + ), + Some(vec![Key::EraInfo(EraId::new(0))]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + 1, + ), + Some(vec![Key::EraInfo(EraId::new(1))]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + 1, + ), + Some(vec![Key::EraInfo(EraId::new(2))]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 3, + 1, + ), + Some(vec![Key::EraInfo(EraId::new(3))]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 4, + 1, + ), + Some(vec![Key::EraInfo(EraId::new(4))]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 5, + 1 + ), + None, + ); + assert_eq!( + calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 1), + None, + ); + + // batch size 2 + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + 2, + ), + Some(vec![ + Key::EraInfo(EraId::new(0)), + Key::EraInfo(EraId::new(1)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + 2, + ), + Some(vec![ + Key::EraInfo(EraId::new(2)), + Key::EraInfo(EraId::new(3)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + 2, + ), + Some(vec![Key::EraInfo(EraId::new(4))]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 3, + 2, + ), + None + ); + assert_eq!( + calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 2), + None, + ); + + // batch size 3 + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + 3, + ), + Some(vec![ + Key::EraInfo(EraId::new(0)), + Key::EraInfo(EraId::new(1)), + Key::EraInfo(EraId::new(2)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + 3, + ), + Some(vec![ + Key::EraInfo(EraId::new(3)), + Key::EraInfo(EraId::new(4)), + ]) + ); + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + 3, + ), + None + ); + assert_eq!( + calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 3), + None, + ); + + // batch size 4 + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + 4, + ), + Some(vec![ + Key::EraInfo(EraId::new(0)), + Key::EraInfo(EraId::new(1)), + Key::EraInfo(EraId::new(2)), + Key::EraInfo(EraId::new(3)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + 4, + ), + Some(vec![Key::EraInfo(EraId::new(4))]) + ); + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + 4, + ), + None + ); + assert_eq!( + calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 4), + None, + ); + + // batch size 5 + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + 5, + ), + Some(vec![ + Key::EraInfo(EraId::new(0)), + Key::EraInfo(EraId::new(1)), + Key::EraInfo(EraId::new(2)), + Key::EraInfo(EraId::new(3)), + Key::EraInfo(EraId::new(4)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + 5 + ), + None, + ); + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + 5, + ), + None + ); + assert_eq!( + calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 5), + None, + ); + + // batch size 6 + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + 6, + ), + Some(vec![ + Key::EraInfo(EraId::new(0)), + Key::EraInfo(EraId::new(1)), + Key::EraInfo(EraId::new(2)), + Key::EraInfo(EraId::new(3)), + Key::EraInfo(EraId::new(4)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + 6 + ), + None, + ); + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + 6, + ), + None + ); + assert_eq!( + calculate_prune_eras(ACTIVATION_POINT_ERA_ID, activation_height, u64::MAX, 6), + None, + ); + + // batch size max + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height, + u64::MAX, + ), + Some(vec![ + Key::EraInfo(EraId::new(0)), + Key::EraInfo(EraId::new(1)), + Key::EraInfo(EraId::new(2)), + Key::EraInfo(EraId::new(3)), + Key::EraInfo(EraId::new(4)), + ]) + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 1, + u64::MAX, + ), + None, + ); + + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + current_height + 2, + u64::MAX, + ), + None + ); + assert_eq!( + calculate_prune_eras( + ACTIVATION_POINT_ERA_ID, + activation_height, + u64::MAX, + u64::MAX, + ), + None, + ); + } +} diff --git a/node/src/components/deploy_acceptor.rs b/node/src/components/deploy_acceptor.rs deleted file mode 100644 index e474aef25d..0000000000 --- a/node/src/components/deploy_acceptor.rs +++ /dev/null @@ -1,248 +0,0 @@ -mod config; -mod event; - -use std::{convert::Infallible, fmt::Debug}; - -use thiserror::Error; -use tracing::{debug, error, info}; - -use crate::{ - components::Component, - effect::{ - announcements::DeployAcceptorAnnouncement, - requests::{ContractRuntimeRequest, StorageRequest}, - EffectBuilder, EffectExt, Effects, - }, - types::{chainspec::DeployConfig, Chainspec, Deploy, DeployValidationFailure, NodeId}, - utils::Source, - NodeRng, -}; -use casper_types::Key; - -use crate::effect::Responder; -pub use config::Config; -pub use event::Event; - -#[derive(Debug, Error)] -pub enum Error { - /// An invalid deploy was received from the client. - #[error("invalid deploy: {0}")] - InvalidDeploy(DeployValidationFailure), - /// An invalid account sent a deploy. - #[error("invalid account")] - InvalidAccount, - /// A deploy was sent from account with insufficient balance. - #[error("insufficient balance")] - InsufficientBalance, -} - -/// A helper trait constraining `DeployAcceptor` compatible reactor events. -pub trait ReactorEventT: - From - + From> - + From - + From - + Send -{ -} - -impl ReactorEventT for REv where - REv: From - + From> - + From - + From - + Send -{ -} - -/// The `DeployAcceptor` is the component which handles all new `Deploy`s immediately after they're -/// received by this node, regardless of whether they were provided by a peer or a client. -/// -/// It validates a new `Deploy` as far as possible, stores it if valid, then announces the newly- -/// accepted `Deploy`. -#[derive(Debug)] -pub struct DeployAcceptor { - chain_name: String, - deploy_config: DeployConfig, - verify_accounts: bool, -} - -impl DeployAcceptor { - pub(crate) fn new(config: Config, chainspec: &Chainspec) -> Self { - DeployAcceptor { - chain_name: chainspec.network_config.name.clone(), - deploy_config: chainspec.deploy_config, - verify_accounts: config.verify_accounts(), - } - } - - /// Handles receiving a new `Deploy` from a peer or client. - /// In the case of a peer, there should be no responder and the variant should be `None` - /// In the case of a client, there should be a responder to communicate the validity of the - /// deploy and the variant will be `Some` - fn accept( - &mut self, - effect_builder: EffectBuilder, - deploy: Box, - source: Source, - maybe_responder: Option>>, - ) -> Effects { - let mut cloned_deploy = deploy.clone(); - let mut effects = Effects::new(); - let is_acceptable = cloned_deploy.is_acceptable(&self.chain_name, &self.deploy_config); - if let Err(error) = is_acceptable { - // The client has submitted an invalid deploy. Return an error to the RPC component via - // the responder. - if let Some(responder) = maybe_responder { - effects.extend(responder.respond(Err(Error::InvalidDeploy(error))).ignore()); - } - effects.extend( - effect_builder - .announce_invalid_deploy(deploy, source) - .ignore(), - ); - return effects; - } - - let account_key = deploy.header().account().to_account_hash().into(); - - // Verify account if deploy received from client and node is configured to do so. - if source.from_client() && self.verify_accounts { - return effect_builder - .is_verified_account(account_key) - .event(move |verified| Event::AccountVerificationResult { - deploy, - source, - account_key, - verified, - maybe_responder, - }); - } - - effect_builder - .immediately() - .event(move |_| Event::AccountVerificationResult { - deploy, - source, - account_key, - verified: Some(true), - maybe_responder, - }) - } - - fn account_verification( - &mut self, - effect_builder: EffectBuilder, - deploy: Box, - source: Source, - account_key: Key, - verified: Option, - maybe_responder: Option>>, - ) -> Effects { - let mut effects = Effects::new(); - - match verified { - Some(true) => { - // The client submitted a valid deploy. Return an Ok status to the RPC component via - // the responder. - if let Some(responder) = maybe_responder { - effects.extend(responder.respond(Ok(())).ignore()); - } - - effects.extend(effect_builder.put_deploy_to_storage(deploy.clone()).event( - move |is_new| Event::PutToStorageResult { - deploy, - source, - is_new, - }, - )); - - return effects; - } - - Some(false) => { - info! { - "Received deploy from account {} that does not have minimum balance required", account_key - }; - // The client has submitted a deploy from an account that does not have minimum - // balance required. Return an error message to the RPC component via the responder. - if let Some(responder) = maybe_responder { - effects.extend(responder.respond(Err(Error::InsufficientBalance)).ignore()); - } - } - - None => { - // The client has submitted an invalid deploy. Return an error message to the RPC - // component via the responder. - info! { - "Received deploy from invalid account using {}", account_key - }; - if let Some(responder) = maybe_responder { - effects.extend(responder.respond(Err(Error::InvalidAccount)).ignore()); - } - } - } - - effects.extend( - effect_builder - .announce_invalid_deploy(deploy, source) - .ignore(), - ); - effects - } - - fn handle_put_to_storage( - &mut self, - effect_builder: EffectBuilder, - deploy: Box, - source: Source, - is_new: bool, - ) -> Effects { - if is_new { - return effect_builder - .announce_new_deploy_accepted(deploy, source) - .ignore(); - } - Effects::new() - } -} - -impl Component for DeployAcceptor { - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - debug!(?event, "handling event"); - match event { - Event::Accept { - deploy, - source, - responder, - } => self.accept(effect_builder, deploy, source, responder), - Event::PutToStorageResult { - deploy, - source, - is_new, - } => self.handle_put_to_storage(effect_builder, deploy, source, is_new), - Event::AccountVerificationResult { - deploy, - source, - account_key, - verified, - maybe_responder, - } => self.account_verification( - effect_builder, - deploy, - source, - account_key, - verified, - maybe_responder, - ), - } - } -} diff --git a/node/src/components/deploy_acceptor/config.rs b/node/src/components/deploy_acceptor/config.rs deleted file mode 100644 index 450c3fbfc1..0000000000 --- a/node/src/components/deploy_acceptor/config.rs +++ /dev/null @@ -1,28 +0,0 @@ -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -/// Configuration options for fetching. -#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] -pub struct Config { - verify_accounts: bool, -} - -impl Config { - /// Constructor for deploy_acceptor config. - pub fn new(verify_accounts: bool) -> Self { - Config { verify_accounts } - } - - /// Get verify_accounts setting. - pub(crate) fn verify_accounts(&self) -> bool { - self.verify_accounts - } -} - -impl Default for Config { - fn default() -> Self { - Config { - verify_accounts: true, - } - } -} diff --git a/node/src/components/deploy_acceptor/event.rs b/node/src/components/deploy_acceptor/event.rs deleted file mode 100644 index 9261e47a93..0000000000 --- a/node/src/components/deploy_acceptor/event.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::fmt::{self, Display, Formatter}; - -use serde::Serialize; - -use super::Source; -use crate::{ - components::deploy_acceptor::Error, - effect::{announcements::RpcServerAnnouncement, Responder}, - types::{Deploy, NodeId}, -}; -use casper_types::Key; - -/// `DeployAcceptor` events. -#[derive(Debug, Serialize)] -pub enum Event { - /// The initiating event to accept a new `Deploy`. - Accept { - deploy: Box, - source: Source, - responder: Option>>, - }, - /// The result of the `DeployAcceptor` putting a `Deploy` to the storage component. - PutToStorageResult { - deploy: Box, - source: Source, - is_new: bool, - }, - /// The result of verifying `Account` exists and has meets minimum balance requirements. - AccountVerificationResult { - deploy: Box, - source: Source, - account_key: Key, - verified: Option, - maybe_responder: Option>>, - }, -} - -impl From for Event { - fn from(announcement: RpcServerAnnouncement) -> Self { - match announcement { - RpcServerAnnouncement::DeployReceived { deploy, responder } => Event::Accept { - deploy, - source: Source::::Client, - responder, - }, - } - } -} - -impl Display for Event { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Accept { deploy, source, .. } => { - write!(formatter, "accept {} from {}", deploy.id(), source) - } - Event::PutToStorageResult { deploy, is_new, .. } => { - if *is_new { - write!(formatter, "put new {} to storage", deploy.id()) - } else { - write!(formatter, "had already stored {}", deploy.id()) - } - } - Event::AccountVerificationResult { - deploy, - account_key, - verified, - .. - } => { - let prefix = if verified.unwrap_or(false) { "" } else { "in" }; - write!( - formatter, - "{}valid deploy {} from account {}", - prefix, - deploy.id(), - account_key - ) - } - } - } -} diff --git a/node/src/components/diagnostics_port.rs b/node/src/components/diagnostics_port.rs new file mode 100644 index 0000000000..682750c9b2 --- /dev/null +++ b/node/src/components/diagnostics_port.rs @@ -0,0 +1,326 @@ +//! Diagnostics port component. +//! +//! The diagnostics port listens on a configurable unix socket for incoming connections and allows +//! deep debug access to a running node via special commands. + +mod command; +mod stop_at; +mod tasks; +mod util; + +use std::{ + fmt::{self, Display, Formatter}, + fs, io, + path::{Path, PathBuf}, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tokio::{net::UnixListener, sync::watch}; +use tracing::{debug, error, info, warn}; + +use crate::{ + components::{Component, ComponentState, InitializedComponent, PortBoundComponent}, + effect::{ + announcements::ControlAnnouncement, + diagnostics_port::DumpConsensusStateRequest, + requests::{NetworkInfoRequest, SetNodeStopRequest}, + EffectBuilder, EffectExt, Effects, + }, + reactor::main_reactor::MainEvent, + types::NodeRng, + utils::umask, + WithDir, +}; +pub(crate) use stop_at::StopAtSpec; +pub use tasks::FileSerializer; +use util::ShowUnixAddr; + +const COMPONENT_NAME: &str = "diagnostics_port"; + +/// Diagnostics port configuration. +#[derive(Clone, DataSize, Debug, Serialize, Deserialize)] +pub struct Config { + /// Whether or not the diagnostics port is enabled. + pub enabled: bool, + /// Path to listen on. + pub socket_path: PathBuf, + /// `umask` to apply before creating the socket. + pub socket_umask: u16, +} + +impl Default for Config { + fn default() -> Self { + Self { + enabled: false, + socket_path: "debug.socket".into(), + socket_umask: 0o077, + } + } +} + +/// Diagnostics port component. +#[derive(Debug, DataSize)] +pub(crate) struct DiagnosticsPort { + state: ComponentState, + /// Sender which will cause server and client connections to exit when dropped. + #[data_size(skip)] + _shutdown_sender: Option>, // only used for its `Drop` impl + config: WithDir, +} + +impl DiagnosticsPort { + /// Creates a new diagnostics port component. + pub(crate) fn new(config: WithDir) -> Self { + DiagnosticsPort { + state: ComponentState::Uninitialized, + config, + _shutdown_sender: None, + } + } +} + +/// Diagnostics port event. +#[derive(Debug, Serialize)] +pub(crate) enum Event { + Initialize, +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("diagnostics port event") + } +} + +/// A diagnostics port initialization error. +#[derive(Debug, Error)] +pub(crate) enum Error { + /// Error setting up the diagnostics port's unix socket listener. + #[error("could not setup diagnostics port listener")] + SetupListener(#[from] io::Error), +} + +impl Component for DiagnosticsPort +where + REv: From + + From + + From + + From + + From + + Send, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Event, + ) -> Effects { + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); + Effects::new() + } + ComponentState::Initializing => match event { + Event::Initialize => { + if self.state != ComponentState::Initializing { + return Effects::new(); + } + let (effects, state) = self.bind(self.config.value().enabled, effect_builder); + >::set_state(self, state); + effects + } + }, + ComponentState::Initialized => Effects::new(), + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +impl InitializedComponent for DiagnosticsPort +where + REv: From + + From + + From + + From + + From + + Send, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +impl PortBoundComponent for DiagnosticsPort +where + REv: From + + From + + From + + From + + From + + Send, +{ + type Error = Error; + type ComponentEvent = Event; + + fn listen( + &mut self, + effect_builder: EffectBuilder, + ) -> Result, Self::Error> { + let (shutdown_sender, shutdown_receiver) = watch::channel(()); + + self._shutdown_sender = Some(shutdown_sender); + + let cfg = self.config.value(); + + let socket_path = self.config.with_dir(cfg.socket_path.clone()); + let listener = setup_listener( + &socket_path, + // Mac OS X / Linux use different types for the mask, so we need to call .into() here. + #[allow(clippy::useless_conversion)] + cfg.socket_umask.into(), + )?; + let server = tasks::server(effect_builder, socket_path, listener, shutdown_receiver); + Ok(server.ignore()) + } +} + +/// Sets up a UNIX socket listener at the given path. +/// +/// If the socket already exists, an attempt to delete it is made. Errors during deletion are +/// ignored, but may cause the subsequent socket opening to fail. +fn setup_listener>(path: P, socket_umask: umask::Mode) -> io::Result { + let socket_path = path.as_ref(); + + // This would be racy, but no one is racing us for the socket, so we'll just do a naive + // check-then-delete :). + if socket_path.exists() { + debug!(socket_path=%socket_path.display(), "found stale socket file, trying to remove"); + match fs::remove_file(socket_path) { + Ok(_) => { + debug!("stale socket file removed"); + } + Err(err) => { + // This happens if a background program races us for the removal, as it usually + // means the file is already gone. We can ignore this, but make note of it in the + // log. + warn!(%err, "could not remove stale socket file, assuming race with other process"); + } + } + } + + // This is not thread-safe, as it will set the umask for the entire process, but we assume that + // initialization happens "sufficiently single-threaded". + let umask_guard = umask::temp_umask(socket_umask); + let listener = UnixListener::bind(socket_path)?; + drop(umask_guard); + + debug!(local_addr=%ShowUnixAddr(&listener.local_addr()?), "diagnostics port listening"); + + Ok(listener) +} + +#[cfg(test)] +mod tests { + use std::{ + fs, + os::unix::prelude::{FileTypeExt, PermissionsExt}, + }; + + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::UnixStream, + }; + + use super::setup_listener; + + #[tokio::test] + async fn setup_listener_creates_listener() { + const TEST_MESSAGE: &[u8] = b"hello, world!"; + + let tmpdir = tempfile::tempdir().expect("could not create tempdir"); + let socket_path = tmpdir.path().join("test.socket"); + + // We give it a strict umask to check. + let listener = setup_listener(&socket_path, 0o077).expect("could not setup listener"); + + let meta = fs::metadata(&socket_path).expect("could not get metadata"); + // With the given umask, world and group permissions should be 0. + assert_eq!(meta.permissions().mode() & 0o077, 0); + + // Attempt to connect. + tokio::spawn(async move { + let mut stream = UnixStream::connect(socket_path) + .await + .expect("could not connect to listener"); + stream + .write_all(TEST_MESSAGE) + .await + .expect("could not write to listener"); + }); + + let (mut stream, _socket_addr) = listener + .accept() + .await + .expect("could not accept connection"); + + let mut buffer = Vec::new(); + stream + .read_to_end(&mut buffer) + .await + .expect("failed to read to end"); + assert_eq!(TEST_MESSAGE, buffer.as_slice()); + } + + #[tokio::test] + async fn setup_listener_removes_previous_listener() { + let tmpdir = tempfile::tempdir().expect("could not create tempdir"); + let socket_path = tmpdir.path().join("overwrite-me.socket"); + + fs::write(&socket_path, b"this-file-should-be-deleted-soon") + .expect("could not write to socket-blocking temporary file"); + + let meta = fs::metadata(&socket_path).expect("could not get metadata"); + assert!( + !meta.file_type().is_socket(), + "temporary file created should not be a socket" + ); + + // Creating the listener should remove the underlying file. + let _listener = setup_listener(&socket_path, 0o022).expect("could not setup listener"); + + let meta = fs::metadata(&socket_path).expect("could not get metadata"); + assert!( + meta.file_type().is_socket(), + "did not overwrite previous file" + ); + } +} diff --git a/node/src/components/diagnostics_port/command.rs b/node/src/components/diagnostics_port/command.rs new file mode 100644 index 0000000000..d482157beb --- /dev/null +++ b/node/src/components/diagnostics_port/command.rs @@ -0,0 +1,166 @@ +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + +use serde::Serialize; +use structopt::StructOpt; +use thiserror::Error; + +use super::StopAtSpec; + +/// Command processing error. +/// +/// Failures that occur when trying to parse an incoming client message. +#[derive(Debug, Error)] +pub(super) enum Error { + /// Error processing a line using the shell-like lexer. + #[error("failed to split line using shell lexing rules")] + ShlexFailure, + /// Not a valid command input. + #[error(transparent)] + Invalid(#[from] structopt::clap::Error), +} + +/// Output format information is sent back to the client it. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Default)] +pub(super) enum OutputFormat { + /// Human-readable interactive format. + /// + /// No string form, utilizes the `Display` implementation of types passed in. + #[default] + Interactive, + /// JSON, pretty-printed. + Json, + /// Binary using bincode. + Bincode, +} + +impl Display for OutputFormat { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + OutputFormat::Interactive => f.write_str("interactive"), + OutputFormat::Json => f.write_str("json"), + OutputFormat::Bincode => f.write_str("bincode"), + } + } +} + +impl FromStr for OutputFormat { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "interactive" | "i" => Ok(OutputFormat::Interactive), + "json" | "j" => Ok(OutputFormat::Json), + "bincode" | "b" => Ok(OutputFormat::Bincode), + _ => Err("invalid output format, must be one of 'interactive', 'json', 'bincode'"), + } + } +} + +/// Action to perform. +#[derive(Debug, StructOpt)] +pub(super) enum Action { + /// Retrieve the active diagnostics port session information. + Session, + /// Set options on active diagnostics port session. + Set { + /// Whether or not to omit command confirmation after every command sent. Defaults to off, + /// meaning commands WILL send confirmations. + #[structopt(short, long)] + quiet: Option, + /// Output format for any type of response, one of `interactive`, `json` or `bincode`. + /// Defaults to `interactive`. + #[structopt(short, long)] + output: Option, + }, + /// Show the current log filter configuration. + GetLogFilter, + /// Change the current log filter configuration. + SetLogFilter { directive: String }, + /// Dump the state of the consensus component. + /// + /// It is recommended to set the output format to `bincode` if the data is to be visualized + /// after. + DumpConsensus { + /// Era to dump. If omitted, dumps the latest era. + era: Option, + }, + /// Dump the event queues. + DumpQueues, + /// Get detailed networking insights. + NetInfo, + /// Stop the node at a certain condition. + Stop { + /// When to stop the node. + /// + /// Supports `block:12345` for block height, `era:123` for eras, `block:next` / `era:end` + /// to stop on an upcoming block or switch block, or `now` to stop immediately. Defaults to + /// `block:next`." + /// + /// Returns the previously set stopping point, if any. + #[structopt(short, long, default_value)] + at: StopAtSpec, + /// Ignore all further options to stop and clear any currently scheduled stops. + #[structopt(short, long)] + clear: bool, + }, + /// Activate or clear a failpoint. + /// + /// Failpoint syntax is as follows: `key(,meta:meta_value)*(=value)?`, with `key` being the + /// identifier of the failpoint, `meta` being additional settings, and `value` JSON encoded. + /// + /// If `value` is not set, the failpoint is cleared instead of being set. + /// + /// The following `meta` values are understood: + /// + /// * `sub` sets the subkey (example: `sub:e4c2a1f`) + /// * `p` sets the probability, must be between `0.0` and `1.0` (example: `p:0.1`) + /// * `once` has no value and indicates the failpoint should only be fired once. + /// + /// No colons or commas are allowed in `key`, `meta` or `meta_value`. + /// + /// Examples: + /// + /// * `foobar` clears the failpoint with key "foobar". + /// * `foobar,sub:example value,p:0.123,once={"hello": "world"}` sets the failpoint "foobar", + /// with a subkey of "example value", a probability of 12.3%, to be fired only once, and a + /// JSON encoded value of `{"hello": "world"}`. + SetFailpoint { + /// The failpoint activation/deactivation. + activation: String, + }, + /// Close connection server-side. + Quit, +} + +/// A command to be performed on the node's diagnostic port. +#[derive(Debug, StructOpt)] +pub(super) struct Command { + #[structopt(subcommand)] + pub(super) action: Action, +} + +impl Command { + /// Parses a line of input into a `Command`. + pub(super) fn from_line(line: &str) -> Result { + let mut parts = vec!["casper-diagnostics-port".to_owned()]; + parts.extend(shlex::split(line).ok_or(Error::ShlexFailure)?); + Ok(Self::from_iter_safe(parts.into_iter())?) + } +} + +#[cfg(test)] +mod tests { + use crate::components::diagnostics_port::command::{Action, Command}; + + #[test] + fn can_parse_simple_commands() { + let cmd = Command::from_line("dump-consensus 123").expect("command parsing failed"); + assert!(matches!(cmd.action, Action::DumpConsensus { era } if era == Some(123))); + + let cmd = Command::from_line("dump-queues").expect("command parsing failed"); + assert!(matches!(cmd.action, Action::DumpQueues)); + } +} diff --git a/node/src/components/diagnostics_port/stop_at.rs b/node/src/components/diagnostics_port/stop_at.rs new file mode 100644 index 0000000000..ac80142617 --- /dev/null +++ b/node/src/components/diagnostics_port/stop_at.rs @@ -0,0 +1,106 @@ +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + +use casper_types::EraId; +use datasize::DataSize; +use serde::Serialize; + +/// A specification for a stopping point. +#[derive(Copy, Clone, DataSize, Debug, Eq, PartialEq, Serialize, Default)] +#[cfg_attr(test, derive(proptest_derive::Arbitrary))] +pub(crate) enum StopAtSpec { + /// Stop after completion of the current block. + #[default] + NextBlock, + /// Stop after the completion of the next switch block. + EndOfCurrentEra, + /// Stop immediately. + Immediately, + /// Stop at a given block height. + BlockHeight(u64), + /// Stop at a given era id. + EraId(EraId), +} + +impl Display for StopAtSpec { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + StopAtSpec::NextBlock => f.write_str("block:next"), + StopAtSpec::EndOfCurrentEra => f.write_str("era:end"), + StopAtSpec::Immediately => f.write_str("now"), + StopAtSpec::BlockHeight(height) => write!(f, "block:{}", height), + StopAtSpec::EraId(era_id) => write!(f, "era:{}", era_id.value()), + } + } +} + +impl FromStr for StopAtSpec { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "block:next" => Ok(StopAtSpec::NextBlock), + "era:end" => Ok(StopAtSpec::EndOfCurrentEra), + "now" => Ok(StopAtSpec::Immediately), + val if val.starts_with("block:") => u64::from_str(&val[6..]) + .map_err(|err| format!("could not parse block height: {}", err)) + .map(StopAtSpec::BlockHeight), + val if val.starts_with("era:") => u64::from_str(&val[4..]) + .map_err(|err| format!("could not parse era id: {}", err)) + .map(EraId::new) + .map(StopAtSpec::EraId), + _ => Err("invalid stop-at specification".to_string()), + } + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::StopAtSpec; + use casper_types::EraId; + use proptest::proptest; + + proptest! { + #[test] + fn roundtrip_stop_at_spec(stop_at: StopAtSpec) { + let rendered = stop_at.to_string(); + let parsed = StopAtSpec::from_str(rendered.as_str()).expect("failed to roundtrip"); + assert_eq!(stop_at, parsed); + } + + #[test] + fn string_fuzz_stop_at_spec(input in ".*") { + let _outcome = StopAtSpec::from_str(&input); + } + + #[test] + fn prefixed_examples(input in "(era|block):.*") { + let _outcome = StopAtSpec::from_str(&input); + } + } + + #[test] + fn known_good_examples() { + assert_eq!( + Ok(StopAtSpec::NextBlock), + StopAtSpec::from_str("block:next") + ); + assert_eq!( + Ok(StopAtSpec::EndOfCurrentEra), + StopAtSpec::from_str("era:end") + ); + assert_eq!(Ok(StopAtSpec::Immediately), StopAtSpec::from_str("now")); + assert_eq!( + Ok(StopAtSpec::BlockHeight(123)), + StopAtSpec::from_str("block:123") + ); + assert_eq!( + Ok(StopAtSpec::EraId(EraId::new(123))), + StopAtSpec::from_str("era:123") + ); + } +} diff --git a/node/src/components/diagnostics_port/tasks.rs b/node/src/components/diagnostics_port/tasks.rs new file mode 100644 index 0000000000..12b62eabd5 --- /dev/null +++ b/node/src/components/diagnostics_port/tasks.rs @@ -0,0 +1,869 @@ +use std::{ + borrow::Cow, + fmt::{self, Debug, Display, Formatter}, + fs::{self, File}, + io, + path::PathBuf, + str::FromStr, +}; + +use bincode::{ + config::{AllowTrailing, FixintEncoding, WithOtherIntEncoding, WithOtherTrailing}, + DefaultOptions, Options, +}; +use erased_serde::Serializer as ErasedSerializer; +use futures::future::{self, Either}; +use serde::Serialize; +use thiserror::Error; +use tokio::{ + io::{AsyncBufReadExt, AsyncRead, AsyncWriteExt, BufReader}, + net::{unix::OwnedWriteHalf, UnixListener, UnixStream}, + sync::watch, +}; +use tracing::{debug, info, info_span, warn, Instrument}; + +use casper_types::EraId; +use tracing_subscriber::{filter::ParseError, EnvFilter}; + +use super::{ + command::{Action, Command, OutputFormat}, + util::ShowUnixAddr, +}; +use crate::{ + components::consensus::EraDump, + effect::{ + announcements::{ControlAnnouncement, QueueDumpFormat}, + diagnostics_port::DumpConsensusStateRequest, + requests::{NetworkInfoRequest, SetNodeStopRequest}, + EffectBuilder, + }, + failpoints::FailpointActivation, + logging, + utils::{display_error, opt_display::OptDisplay}, +}; + +/// Success or failure response. +/// +/// This response is sent back to clients after every operation (unless suppressed in quiet mode), +/// indicating the outcome of the operation. +#[derive(Debug, Serialize)] +enum Outcome { + /// Operation succeeded. + Success { + /// Human-readable message giving additional info and/or stating the effect. + msg: String, + }, + /// Operation failed. + Failure { + /// Human-readable message describing the failure that occurred. + reason: String, + }, +} + +impl Outcome { + /// Constructs a new successful outcome. + fn success(msg: S) -> Self { + Outcome::Success { + msg: msg.to_string(), + } + } + + /// Constructs a new failed outcome. + fn failed(reason: S) -> Self { + Outcome::Failure { + reason: reason.to_string(), + } + } +} + +impl Display for Outcome { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Outcome::Success { msg } => { + write!(f, "OK {}", msg) + } + Outcome::Failure { reason } => { + write!(f, "ERR {}", reason) + } + } + } +} + +/// Configuration for a connection diagnostics port session. +#[derive(Copy, Clone, Debug, Default, Serialize)] +struct Session { + /// Whether or not to suppress the operation outcome. + quiet: bool, + /// Output format to send to client. + output: OutputFormat, +} + +impl Display for Session { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self, f) + } +} + +/// A serializer supporting multiple format variants that writes into a file. +pub enum FileSerializer { + /// JSON-format serializer. + Json(serde_json::Serializer), + /// Bincode-format serializer. + Bincode( + bincode::Serializer< + File, + WithOtherTrailing, AllowTrailing>, + >, + ), +} + +impl FileSerializer { + /// Converts the temp file serializer into an actual erased serializer. + pub fn as_serializer<'a>(&'a mut self) -> Box { + match self { + FileSerializer::Json(json) => Box::new(::erase(json)), + FileSerializer::Bincode(bincode) => { + Box::new(::erase(bincode)) + } + } + } +} + +/// Error obtaining a queue dump. +#[derive(Debug, Error)] +enum ObtainDumpError { + /// Error trying to create a temporary directory. + #[error("could not create temporary directory")] + CreateTempDir(#[source] io::Error), + /// Error trying to create a file in the temporary directory. + #[error("could not create file in temporary directory")] + CreateTempFile(#[source] io::Error), + /// Error trying to reopen the file in the temporary directory after writing. + #[error("could not reopen file in temporary directory")] + ReopenTempFile(#[source] io::Error), +} + +impl Session { + /// Creates a serializer for an `EraDump`. + fn create_era_dump_serializer(&self) -> fn(&EraDump<'_>) -> Result, Cow<'static, str>> { + match self.output { + OutputFormat::Interactive => |data: &EraDump| { + let mut buf = data.to_string().into_bytes(); + buf.push(b'\n'); + Ok(buf) + }, + OutputFormat::Json => |data: &EraDump| { + let mut buf = serde_json::to_vec(&data).map_err(|err| { + Cow::Owned(format!("failed to serialize era dump as JSON: {}", err)) + })?; + buf.push(b'\n'); + Ok(buf) + }, + OutputFormat::Bincode => |data: &EraDump| { + bincode::serialize(&data).map_err(|err| { + Cow::Owned(format!("failed to serialize era dump as bincode: {}", err)) + }) + }, + } + } + + /// Creates a generic serializer that is writing to a temporary file. + /// + /// The resulting serializer will write to the given file. + fn create_queue_dump_format(&self, file: File) -> QueueDumpFormat { + match self.output { + OutputFormat::Interactive => QueueDumpFormat::debug(file), + OutputFormat::Json => { + QueueDumpFormat::serde(FileSerializer::Json(serde_json::Serializer::new(file))) + } + OutputFormat::Bincode => { + QueueDumpFormat::serde(FileSerializer::Bincode(bincode::Serializer::new( + file, + // TODO: Do not use `bincode::serialize` above, but rather always instantiate + // options across the file to ensure it is always the same. + DefaultOptions::new() + .with_fixint_encoding() + .allow_trailing_bytes(), + ))) + } + } + } + + /// Processes a single command line sent from a client. + async fn process_line( + &mut self, + effect_builder: EffectBuilder, + writer: &mut OwnedWriteHalf, + line: &str, + ) -> io::Result + where + REv: From + + From + + From + + From + + Send, + { + debug!(%line, "line received"); + match Command::from_line(line) { + Ok(ref cmd) => { + info!(?cmd, "processing command"); + match cmd.action { + Action::Session => { + self.send_outcome(writer, &Outcome::success("showing session info")) + .await?; + self.send_to_client(writer, &self).await?; + } + Action::Set { quiet, output } => { + let mut changed = false; + + if let Some(quiet) = quiet { + changed |= self.quiet != quiet; + self.quiet = quiet; + } + + if let Some(output) = output { + changed |= self.output != output; + self.output = output; + } + + if changed { + self.send_outcome(writer, &Outcome::success("session updated")) + .await?; + } else { + self.send_outcome(writer, &Outcome::success("session unchanged")) + .await?; + } + } + Action::GetLogFilter => match logging::display_global_env_filter() { + Ok(formatted) => { + self.send_outcome(writer, &Outcome::success("found log filter")) + .await?; + self.send_to_client(writer, &formatted).await?; + } + Err(err) => { + self.send_outcome( + writer, + &Outcome::failed(format!("failed to retrieve log filter: {}", err)), + ) + .await?; + } + }, + Action::SetLogFilter { ref directive } => match set_log_filter(directive) { + Ok(()) => { + self.send_outcome( + writer, + &Outcome::success("new logging directive set"), + ) + .await?; + } + Err(err) => { + self.send_outcome( + writer, + &Outcome::failed(format!( + "failed to set new logging directive: {}", + err + )), + ) + .await?; + } + }, + Action::DumpConsensus { era } => { + let output = effect_builder + .diagnostics_port_dump_consensus_state( + era.map(EraId::new), + self.create_era_dump_serializer(), + ) + .await; + + match output { + Ok(ref data) => { + self.send_outcome( + writer, + &Outcome::success("dumping consensus state"), + ) + .await?; + writer.write_all(data).await?; + } + Err(err) => { + self.send_outcome(writer, &Outcome::failed(err)).await?; + } + } + } + Action::DumpQueues => { + // Note: The preferable approach would be to use a tempfile instead of a + // named one in a temporary directory, and return it through the + // responder. This is currently hamstrung by `bincode` not allowing + // the retrival of the inner writer from its serializer. + + match self.obtain_queue_dump(effect_builder).await { + Ok(file) => { + self.send_outcome(writer, &Outcome::success("dumping queues")) + .await?; + + let mut tokio_file = tokio::fs::File::from_std(file); + self.stream_to_client(writer, &mut tokio_file).await?; + } + Err(err) => { + self.send_outcome( + writer, + &Outcome::failed(format!( + "failed to obtain dump: {}", + display_error(&err) + )), + ) + .await?; + } + }; + } + Action::NetInfo => { + self.send_outcome(writer, &Outcome::success("collecting insights")) + .await?; + let insights = effect_builder.get_network_insights().await; + self.send_to_client(writer, &insights).await?; + } + Action::Stop { at, clear } => { + let (msg, stop_at) = if clear { + ("clearing stopping point", None) + } else { + ("setting new stopping point", Some(at)) + }; + let prev = effect_builder.set_node_stop_at(stop_at).await; + self.send_outcome(writer, &Outcome::success(msg)).await?; + self.send_to_client( + writer, + &OptDisplay::new(prev, "no previous stop-at spec"), + ) + .await?; + } + Action::SetFailpoint { ref activation } => { + match FailpointActivation::from_str(activation) { + Ok(fp_activation) => { + effect_builder.activate_failpoint(fp_activation).await; + + self.send_outcome( + writer, + &Outcome::success("failpoint activation sent".to_string()), + ) + .await?; + } + Err(ref err) => { + self.send_outcome( + writer, + &Outcome::failed(format!( + "invalid failpoint activation: {}", + display_error(err) + )), + ) + .await?; + } + } + } + Action::Quit => { + self.send_outcome(writer, &Outcome::success("goodbye!")) + .await?; + return Ok(false); + } + }; + } + Err(err) => { + self.send_outcome(writer, &Outcome::failed(err.to_string().as_str())) + .await? + } + } + + Ok(true) + } + + /// Obtains a queue dump from the reactor. + /// + /// Returns an open file that contains the entire dump. + async fn obtain_queue_dump( + &self, + effect_builder: EffectBuilder, + ) -> Result + where + REv: From + Send, + { + // Note: The preferable approach would be to use a tempfile instead of a + // named one in a temporary directory, and return it through the + // responder. This is currently hamstrung since `bincode` does not + // allow retrieving the inner writer from its serializer. + + let tempdir = tempfile::tempdir().map_err(ObtainDumpError::CreateTempDir)?; + let tempfile_path = tempdir.path().join("queue-dump"); + + let tempfile = File::create(&tempfile_path).map_err(ObtainDumpError::CreateTempFile)?; + + effect_builder + .diagnostics_port_dump_queue(self.create_queue_dump_format(tempfile)) + .await; + + // We can now reopen the file and return it. + let reopened_tempfile = + File::open(tempfile_path).map_err(ObtainDumpError::ReopenTempFile)?; + Ok(reopened_tempfile) + } + + /// Sends an operation outcome. + /// + /// The outcome will be silently dropped if the session is in quiet mode. + async fn send_outcome( + &self, + writer: &mut OwnedWriteHalf, + response: &Outcome, + ) -> io::Result<()> { + if self.quiet { + return Ok(()); + } + + self.send_to_client(writer, response).await + } + + /// Sends a message to the client. + /// + /// Any type of message can be sent to a client, as long as it has a `Display` (use for + /// `interactive` encoding) and `Serialize` (used for `bincode` and `json`) implementation. + async fn send_to_client(&self, writer: &mut OwnedWriteHalf, response: &T) -> io::Result<()> + where + T: Display + Serialize, + { + match self.output { + OutputFormat::Interactive => { + writer.write_all(response.to_string().as_bytes()).await?; + writer.write_all(b"\n").await?; + } + OutputFormat::Json => { + info!("sending json"); + let buf = serde_json::to_string_pretty(response).map_err(|err| { + warn!(%err, "error outputting JSON string"); + io::Error::new(io::ErrorKind::Other, err) + })?; + writer.write_all(buf.as_bytes()).await?; + writer.write_all(b"\n").await?; + } + OutputFormat::Bincode => { + let buf = bincode::serialize(response) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err))?; + writer.write_all(&buf).await?; + } + } + + Ok(()) + } + + /// Streams data from a source to the client. + /// + /// Returns the number of bytes sent. + async fn stream_to_client( + &self, + writer: &mut OwnedWriteHalf, + src: &mut R, + ) -> io::Result { + tokio::io::copy(src, writer).await + } +} + +/// Error while trying to set the global log filter. +#[derive(Debug, Error)] +enum SetLogFilterError { + /// Failed to parse the given directive (the `RUST_LOG=...directive` string). + #[error("could not parse filter directive")] + ParseError(ParseError), + /// Failure setting the correctly parsed filter. + #[error("failed to set global filter")] + SetFailed(anyhow::Error), +} + +/// Sets the global log using the given new directive. +fn set_log_filter(filter_str: &str) -> Result<(), SetLogFilterError> { + let new_filter = EnvFilter::try_new(filter_str).map_err(SetLogFilterError::ParseError)?; + + logging::reload_global_env_filter(new_filter).map_err(SetLogFilterError::SetFailed) +} + +/// Handler for client connection. +/// +/// The core loop for the diagnostics port; reads commands via unix socket and processes them. +/// +/// # Security +/// +/// The handler itself will buffer an unlimited amount of data if no newline is encountered in the +/// input stream. For this reason ensure that only trusted client connect to the socket producing +/// the passed in `stream`. +async fn handler( + effect_builder: EffectBuilder, + stream: UnixStream, + mut shutdown_receiver: watch::Receiver<()>, +) -> io::Result<()> +where + REv: From + + From + + From + + From + + Send, +{ + debug!("accepted new connection on diagnostics port"); + + let (reader, mut writer) = stream.into_split(); + let mut lines = BufReader::new(reader).lines(); + let mut session = Session::default(); + + let mut keep_going = true; + while keep_going { + let shutdown_messages = async { while shutdown_receiver.changed().await.is_ok() {} }; + + match future::select(Box::pin(shutdown_messages), Box::pin(lines.next_line())).await { + Either::Left(_) => { + info!("shutting down diagnostics port connection to client"); + return Ok(()); + } + Either::Right((line_result, _)) => { + if let Some(line) = line_result? { + keep_going = session + .process_line(effect_builder, &mut writer, line.as_str()) + .await?; + } else { + info!("client closed diagnostics port connection"); + return Ok(()); + } + } + } + } + + Ok(()) +} + +/// Server task for diagnostics port. +pub(super) async fn server( + effect_builder: EffectBuilder, + socket_path: PathBuf, + listener: UnixListener, + mut shutdown_receiver: watch::Receiver<()>, +) where + REv: From + + From + + From + + From + + Send, +{ + let handling_shutdown_receiver = shutdown_receiver.clone(); + let mut next_client_id: u64 = 0; + let accept_connections = async move { + loop { + match listener.accept().await { + Ok((stream, client_addr)) => { + let client_id = next_client_id; + + let span = info_span!("diagnostics_port", client_id,); + + span.in_scope(|| { + info!(client_addr = %ShowUnixAddr(&client_addr), "accepted connection"); + }); + + next_client_id += 1; + + tokio::spawn( + handler(effect_builder, stream, handling_shutdown_receiver.clone()) + .instrument(span), + ); + } + Err(err) => { + info!(%err, "failed to accept incoming connection on diagnostics port"); + } + } + } + }; + + let shutdown_messages = async move { while shutdown_receiver.changed().await.is_ok() {} }; + + // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the + // infinite loop to terminate, which never happens. + match future::select(Box::pin(shutdown_messages), Box::pin(accept_connections)).await { + Either::Left(_) => info!("shutting down diagnostics port"), + Either::Right(_) => unreachable!("server accept returns `!`"), + } + + // When we're shutting down, we try to delete the socket, but only warn in case of failure. + match fs::remove_file(&socket_path) { + Ok(_) => { + debug!(socket_path=%socket_path.display(), "removed socket file"); + } + Err(_) => { + warn!(socket_path=%socket_path.display(), "could not remove socket file"); + } + } +} + +#[cfg(test)] +mod tests { + use std::{ + fmt::{self, Debug, Display, Formatter}, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, + }; + + use derive_more::From; + use prometheus::Registry; + use serde::Serialize; + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::UnixStream, + sync::Notify, + }; + + use casper_types::{testing::TestRng, Chainspec, ChainspecRawBytes}; + + use crate::{ + components::{ + diagnostics_port::{self, Config as DiagnosticsPortConfig, DiagnosticsPort}, + network::{self, Identity as NetworkIdentity}, + Component, InitializedComponent, + }, + effect::{ + announcements::ControlAnnouncement, + diagnostics_port::DumpConsensusStateRequest, + requests::{NetworkInfoRequest, SetNodeStopRequest}, + EffectBuilder, EffectExt, Effects, + }, + reactor::{ + self, main_reactor::MainEvent, EventQueueHandle, QueueKind, Reactor as ReactorTrait, + ReactorEvent, + }, + testing::{ + self, + network::{NetworkedReactor, TestingNetwork}, + }, + utils::WeightedRoundRobin, + NodeRng, WithDir, + }; + + pub struct TestReactorConfig { + base_dir: PathBuf, + diagnostics_port: DiagnosticsPortConfig, + } + + impl TestReactorConfig { + /// Creates a new test reactor configuration with a given base dir and index. + fn new>(base_dir: P, idx: usize) -> Self { + TestReactorConfig { + base_dir: base_dir.as_ref().to_owned(), + diagnostics_port: DiagnosticsPortConfig { + enabled: true, + socket_path: format!("node_{}.socket", idx).into(), + socket_umask: 0o022, + }, + } + } + + fn socket_path(&self) -> PathBuf { + self.base_dir.join(&self.diagnostics_port.socket_path) + } + } + + #[derive(Debug)] + struct Error; + + impl From for Error { + fn from(_: prometheus::Error) -> Self { + Self + } + } + + #[derive(Serialize, Debug, From)] + enum Event { + #[from] + DiagnosticsConsole(diagnostics_port::Event), + #[from] + DumpConsensusStateRequest(DumpConsensusStateRequest), + #[from] + ControlAnnouncement(ControlAnnouncement), + #[from] + NetworkInfoRequest(NetworkInfoRequest), + #[from] + SetNodeStopRequest(SetNodeStopRequest), + } + + impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self, f) + } + } + + impl ReactorEvent for Event { + fn is_control(&self) -> bool { + matches!(self, Event::ControlAnnouncement(_)) + } + + fn try_into_control(self) -> Option { + match self { + Event::ControlAnnouncement(ctrl_ann) => Some(ctrl_ann), + _ => None, + } + } + } + + #[derive(Debug)] + struct Reactor { + diagnostics_console: DiagnosticsPort, + } + + impl ReactorTrait for Reactor { + type Event = Event; + type Error = Error; + type Config = TestReactorConfig; + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Event, + ) -> Effects { + match event { + Event::DiagnosticsConsole(event) => reactor::wrap_effects( + Event::DiagnosticsConsole, + self.diagnostics_console + .handle_event(effect_builder, rng, event), + ), + Event::DumpConsensusStateRequest(_) + | Event::SetNodeStopRequest(_) + | Event::ControlAnnouncement(_) + | Event::NetworkInfoRequest(_) => { + panic!("unexpected: {}", event) + } + } + } + + fn new( + cfg: TestReactorConfig, + _chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, + _registry: &Registry, + _event_queue: EventQueueHandle, + _rng: &mut NodeRng, + ) -> Result<(Self, Effects), Error> { + let mut diagnostics_console = + DiagnosticsPort::new(WithDir::new(cfg.base_dir.clone(), cfg.diagnostics_port)); + >::start_initialization( + &mut diagnostics_console, + ); + let reactor = Reactor { + diagnostics_console, + }; + let effects = reactor::wrap_effects( + Event::DiagnosticsConsole, + async {}.event(|()| diagnostics_port::Event::Initialize), + ); + + Ok((reactor, effects)) + } + } + + impl NetworkedReactor for Reactor {} + + /// Runs a single mini-node with a diagnostics console and requests a dump of the (empty) + /// event queue, then returns it. + async fn run_single_node_console_and_dump_events(dump_format: &'static str) -> String { + let mut network = TestingNetwork::::new(); + let mut rng = TestRng::new(); + + let base_dir = tempfile::tempdir().expect("could not create tempdir"); + + // We just add a single node to the network. + let cfg = TestReactorConfig::new(base_dir.path(), 0); + let socket_path = cfg.socket_path(); + let (_node_id, _runner) = network.add_node_with_config(cfg, &mut rng).await.unwrap(); + + // Wait for the listening socket to initialize. + network + .settle(&mut rng, Duration::from_millis(500), Duration::from_secs(5)) + .await; + + let ready = Arc::new(Notify::new()); + + // Start a background task that connects to the unix socket and sends a few requests down. + let client_ready = ready.clone(); + let join_handle = tokio::spawn(async move { + let mut stream = UnixStream::connect(socket_path) + .await + .expect("could not connect to socket path of node"); + + let commands = format!("set -o {} -q true\ndump-queues\nquit\n", dump_format); + stream + .write_all(commands.as_bytes()) + .await + .expect("could not write to listener"); + stream.flush().await.expect("flushing failed"); + + client_ready.notify_one(); + + let mut buffer = Vec::new(); + stream + .read_to_end(&mut buffer) + .await + .expect("could not read console output to end"); + + String::from_utf8(buffer).expect("could not parse output as UTF8") + }); + + // Wait for all the commands to be buffered. + ready.notified().await; + + // Give the node a chance to satisfy the dump. + network + .settle(&mut rng, Duration::from_secs(1), Duration::from_secs(10)) + .await; + + join_handle.await.expect("error joining client task") + } + + #[tokio::test] + async fn ensure_diagnostics_port_can_dump_events_in_json_format() { + testing::init_logging(); + + let output = run_single_node_console_and_dump_events("json").await; + + // The output will be empty queues, albeit formatted as JSON. Just check if there is a + // proper JSON header present. + assert!(output.starts_with(r#"{"queues":{""#)); + } + + #[tokio::test] + async fn ensure_diagnostics_port_can_dump_events_in_interactive_format() { + testing::init_logging(); + + let output = run_single_node_console_and_dump_events("interactive").await; + + // The output will be empty queues in debug format. We only look at the start of the output, + // since some time-triggered output may have already been included. + assert!(output.starts_with(r#"QueueDump { queues: {"#)); + } + + #[tokio::test] + async fn can_dump_actual_events_from_scheduler() { + // Create a scheduler with a few synthetic events. + let scheduler = WeightedRoundRobin::new(QueueKind::weights(), None); + scheduler + .push( + MainEvent::Network(network::Event::SweepOutgoing), + QueueKind::Network, + ) + .await; + scheduler + .push( + MainEvent::Network(network::Event::GossipOurAddress), + QueueKind::Gossip, + ) + .await; + + // Construct the debug representation and compare as strings to avoid issues with missing + // `PartialEq` implementations. + scheduler + .dump(|dump| { + let debug_repr = format!("{:?}", dump); + assert!(debug_repr.starts_with(r#"QueueDump { queues: {"#)); + }) + .await; + } +} diff --git a/node/src/components/diagnostics_port/util.rs b/node/src/components/diagnostics_port/util.rs new file mode 100644 index 0000000000..3c6eb5d328 --- /dev/null +++ b/node/src/components/diagnostics_port/util.rs @@ -0,0 +1,21 @@ +//! Renderer for unix socket addresses. + +use std::fmt::{self, Display, Formatter}; + +use tokio::net::unix::SocketAddr; + +/// Unix socket address `Display` wrapper. +/// +/// Allows displaying a unix socket address. +#[derive(Debug)] +pub(super) struct ShowUnixAddr<'a>(pub &'a SocketAddr); + +impl Display for ShowUnixAddr<'_> { + #[inline] + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self.0.as_pathname() { + Some(path) => path.display().fmt(f), + None => f.write_str(""), + } + } +} diff --git a/node/src/components/event_stream_server.rs b/node/src/components/event_stream_server.rs index e0cd0e623d..e8d9820e3c 100644 --- a/node/src/components/event_stream_server.rs +++ b/node/src/components/event_stream_server.rs @@ -16,35 +16,53 @@ //! This component uses a ring buffer for outbound events providing some robustness against //! unintended subscriber disconnects, if a disconnected subscriber re-subscribes before the buffer //! has advanced past their last received event. -//! -//! For details about the SSE model and a list of supported SSEs, see: -//! mod config; mod event; +mod event_indexer; mod http_server; mod sse_server; +#[cfg(test)] +mod tests; -use std::{convert::Infallible, fmt::Debug}; +use std::{fmt::Debug, net::SocketAddr, path::PathBuf}; use datasize::DataSize; use tokio::sync::{ mpsc::{self, UnboundedSender}, oneshot, }; -use tracing::{info, warn}; +use tracing::{error, info, warn}; +use warp::Filter; -use casper_types::ProtocolVersion; +use casper_types::{InitiatorAddr, ProtocolVersion}; use super::Component; use crate::{ + components::{ComponentState, InitializedComponent, PortBoundComponent}, effect::{EffectBuilder, Effects}, + reactor::main_reactor::MainEvent, + types::TransactionHeader, utils::{self, ListeningError}, NodeRng, }; pub use config::Config; pub(crate) use event::Event; -pub use sse_server::SseData; +use event_indexer::{EventIndex, EventIndexer}; +use sse_server::ChannelsAndFilter; +pub(crate) use sse_server::SseData; + +const COMPONENT_NAME: &str = "event_stream_server"; + +/// This is used to define the number of events to buffer in the tokio broadcast channel to help +/// slower clients to try to avoid missing events (See +/// for further details). The +/// resulting broadcast channel size is `ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE` percent +/// greater than `config.event_stream_buffer_length`. +/// +/// We always want the broadcast channel size to be greater than the event stream buffer length so +/// that a new client can retrieve the entire set of buffered events if desired. +const ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE: u32 = 20; /// A helper trait whose bounds represent the requirements for a reactor event that `run_server` can /// work with. @@ -53,107 +71,319 @@ pub trait ReactorEventT: From + Send {} impl ReactorEventT for REv where REv: From + Send + 'static {} #[derive(DataSize, Debug)] -pub(crate) struct EventStreamServer { +struct InnerServer { /// Channel sender to pass event-stream data to the event-stream server. // TODO - this should not be skipped. Awaiting support for `UnboundedSender` in datasize crate. #[data_size(skip)] - sse_data_sender: UnboundedSender, + sse_data_sender: UnboundedSender<(EventIndex, SseData)>, + event_indexer: EventIndexer, + listening_address: SocketAddr, +} + +#[derive(DataSize, Debug)] +pub(crate) struct EventStreamServer { + state: ComponentState, + config: Config, + storage_path: PathBuf, + api_version: ProtocolVersion, + sse_server: Option, } impl EventStreamServer { - pub(crate) fn new( - config: Config, - api_version: ProtocolVersion, - ) -> Result { - let required_address = utils::resolve_address(&config.address).map_err(|error| { + pub(crate) fn new(config: Config, storage_path: PathBuf, api_version: ProtocolVersion) -> Self { + EventStreamServer { + state: ComponentState::Uninitialized, + config, + storage_path, + api_version, + sse_server: None, + } + } + + fn listen(&mut self) -> Result<(), ListeningError> { + let required_address = utils::resolve_address(&self.config.address).map_err(|error| { warn!( %error, - address=%config.address, + address=%self.config.address, "failed to start event stream server, cannot parse address" ); ListeningError::ResolveAddress(error) })?; - let (sse_data_sender, sse_data_receiver) = mpsc::unbounded_channel(); - // Event stream channels and filter. - let (broadcaster, new_subscriber_info_receiver, sse_filter) = - sse_server::create_channels_and_filter(config.broadcast_channel_size); - - let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); - - let (actual_address, server_with_shutdown) = warp::serve(sse_filter) - .try_bind_with_graceful_shutdown(required_address, async { - shutdown_receiver.await.ok(); - }) - .map_err(|error| ListeningError::Listen { - address: required_address, - error: Box::new(error), - })?; - info!(address=%actual_address, "started event stream server"); - - tokio::spawn(http_server::run( - config, - api_version, - server_with_shutdown, - shutdown_sender, - sse_data_receiver, - broadcaster, + let broadcast_channel_size = self.config.event_stream_buffer_length + * (100 + ADDITIONAL_PERCENT_FOR_BROADCAST_CHANNEL_SIZE) + / 100; + + let ChannelsAndFilter { + event_broadcaster, new_subscriber_info_receiver, - )); + sse_filter, + } = ChannelsAndFilter::new( + broadcast_channel_size as usize, + self.config.max_concurrent_subscribers, + ); + + let (server_shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); + + let (sse_data_sender, sse_data_receiver) = mpsc::unbounded_channel(); + + let listening_address = match self.config.cors_origin.as_str() { + "" => { + let (listening_address, server_with_shutdown) = warp::serve(sse_filter) + .try_bind_with_graceful_shutdown(required_address, async { + shutdown_receiver.await.ok(); + }) + .map_err(|error| ListeningError::Listen { + address: required_address, + error: Box::new(error), + })?; + + tokio::spawn(http_server::run( + self.config.clone(), + self.api_version, + server_with_shutdown, + server_shutdown_sender, + sse_data_receiver, + event_broadcaster, + new_subscriber_info_receiver, + )); + listening_address + } + "*" => { + let (listening_address, server_with_shutdown) = + warp::serve(sse_filter.with(warp::cors().allow_any_origin())) + .try_bind_with_graceful_shutdown(required_address, async { + shutdown_receiver.await.ok(); + }) + .map_err(|error| ListeningError::Listen { + address: required_address, + error: Box::new(error), + })?; + + tokio::spawn(http_server::run( + self.config.clone(), + self.api_version, + server_with_shutdown, + server_shutdown_sender, + sse_data_receiver, + event_broadcaster, + new_subscriber_info_receiver, + )); + listening_address + } + _ => { + let (listening_address, server_with_shutdown) = warp::serve( + sse_filter.with(warp::cors().allow_origin(self.config.cors_origin.as_str())), + ) + .try_bind_with_graceful_shutdown(required_address, async { + shutdown_receiver.await.ok(); + }) + .map_err(|error| ListeningError::Listen { + address: required_address, + error: Box::new(error), + })?; + + tokio::spawn(http_server::run( + self.config.clone(), + self.api_version, + server_with_shutdown, + server_shutdown_sender, + sse_data_receiver, + event_broadcaster, + new_subscriber_info_receiver, + )); + listening_address + } + }; + + info!(address=%listening_address, "started event stream server"); - Ok(EventStreamServer { sse_data_sender }) + let event_indexer = EventIndexer::new(self.storage_path.clone()); + + self.sse_server = Some(InnerServer { + sse_data_sender, + event_indexer, + listening_address, + }); + Ok(()) } /// Broadcasts the SSE data to all clients connected to the event stream. fn broadcast(&mut self, sse_data: SseData) -> Effects { - let _ = self.sse_data_sender.send(sse_data); + if let Some(server) = self.sse_server.as_mut() { + let event_index = server.event_indexer.next_index(); + let _ = server.sse_data_sender.send((event_index, sse_data)); + } Effects::new() } } +impl Drop for EventStreamServer { + fn drop(&mut self) { + let _ = self.broadcast(SseData::Shutdown); + } +} + impl Component for EventStreamServer where REv: ReactorEventT, { type Event = Event; - type ConstructionError = Infallible; fn handle_event( &mut self, - _effect_builder: EffectBuilder, + effect_builder: EffectBuilder, _rng: &mut NodeRng, event: Self::Event, ) -> Effects { - match event { - Event::BlockAdded(block) => self.broadcast(SseData::BlockAdded { - block_hash: *block.hash(), - block: Box::new(*block), - }), - Event::DeployProcessed { - deploy_hash, - deploy_header, - block_hash, - execution_result, - } => self.broadcast(SseData::DeployProcessed { - deploy_hash: Box::new(deploy_hash), - account: Box::new(deploy_header.account().clone()), - timestamp: deploy_header.timestamp(), - ttl: deploy_header.ttl(), - dependencies: deploy_header.dependencies().clone(), - block_hash: Box::new(block_hash), - execution_result, - }), - Event::Fault { - era_id, - public_key, - timestamp, - } => self.broadcast(SseData::Fault { - era_id, - public_key, - timestamp, - }), - Event::FinalitySignature(fs) => self.broadcast(SseData::FinalitySignature(fs)), + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); + Effects::new() + } + ComponentState::Initializing => match event { + Event::Initialize => { + let (effects, state) = self.bind(self.config.enable_server, effect_builder); + >::set_state(self, state); + effects + } + Event::BlockAdded(_) + | Event::TransactionAccepted(_) + | Event::TransactionProcessed { .. } + | Event::TransactionsExpired(_) + | Event::Fault { .. } + | Event::FinalitySignature(_) + | Event::Step { .. } => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is pending initialization" + ); + Effects::new() + } + }, + ComponentState::Initialized => match event { + Event::Initialize => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() + } + Event::BlockAdded(block) => self.broadcast(SseData::BlockAdded { + block_hash: *block.hash(), + block: Box::new((*block).clone()), + }), + Event::TransactionAccepted(transaction) => { + self.broadcast(SseData::TransactionAccepted { transaction }) + } + Event::TransactionProcessed { + transaction_hash, + transaction_header, + block_hash, + execution_result, + messages, + } => { + let (initiator_addr, timestamp, ttl) = match *transaction_header { + TransactionHeader::Deploy(deploy_header) => ( + InitiatorAddr::PublicKey(deploy_header.account().clone()), + deploy_header.timestamp(), + deploy_header.ttl(), + ), + TransactionHeader::V1(metadata) => ( + metadata.initiator_addr().clone(), + metadata.timestamp(), + metadata.ttl(), + ), + }; + self.broadcast(SseData::TransactionProcessed { + transaction_hash: Box::new(transaction_hash), + initiator_addr: Box::new(initiator_addr), + timestamp, + ttl, + block_hash: Box::new(block_hash), + execution_result, + messages, + }) + } + Event::TransactionsExpired(transaction_hashes) => transaction_hashes + .into_iter() + .flat_map(|transaction_hash| { + self.broadcast(SseData::TransactionExpired { transaction_hash }) + }) + .collect(), + Event::Fault { + era_id, + public_key, + timestamp, + } => self.broadcast(SseData::Fault { + era_id, + public_key, + timestamp, + }), + Event::FinalitySignature(fs) => self.broadcast(SseData::FinalitySignature(fs)), + Event::Step { + era_id, + execution_effects, + } => self.broadcast(SseData::Step { + era_id, + execution_effects, + }), + }, } } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +impl InitializedComponent for EventStreamServer +where + REv: ReactorEventT, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +impl PortBoundComponent for EventStreamServer +where + REv: ReactorEventT, +{ + type Error = ListeningError; + type ComponentEvent = Event; + + fn listen( + &mut self, + _effect_builder: EffectBuilder, + ) -> Result, Self::Error> { + self.listen()?; + Ok(Effects::new()) + } } diff --git a/node/src/components/event_stream_server/config.rs b/node/src/components/event_stream_server/config.rs index 84b9e73450..995f478742 100644 --- a/node/src/components/event_stream_server/config.rs +++ b/node/src/components/event_stream_server/config.rs @@ -7,42 +7,44 @@ use serde::{Deserialize, Serialize}; const DEFAULT_ADDRESS: &str = "0.0.0.0:0"; /// Default number of SSEs to buffer. -const DEFAULT_EVENT_STREAM_BUFFER_LENGTH: u32 = 100; +const DEFAULT_EVENT_STREAM_BUFFER_LENGTH: u32 = 5000; -/// Default broadcast channel size. -const DEFAULT_BROADCAST_CHANNEL_SIZE: usize = 100; +/// Default maximum number of subscribers. +const DEFAULT_MAX_CONCURRENT_SUBSCRIBERS: u32 = 100; -/// Default rate limit in qps. -const DEFAULT_QPS_LIMIT: u64 = 100; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; /// SSE HTTP server configuration. #[derive(Clone, DataSize, Debug, Deserialize, Serialize)] // Disallow unknown fields to ensure config files and command-line overrides contain valid keys. #[serde(deny_unknown_fields)] pub struct Config { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// Address to bind event stream SSE HTTP server to. pub address: String, /// Number of SSEs to buffer. pub event_stream_buffer_length: u32, - /// The number of events to buffer in the tokio broadcast channel to help slower clients to try - /// to avoid missing events. See - /// for further details. - pub broadcast_channel_size: usize, + /// Default maximum number of subscribers across all event streams permitted at any one time. + pub max_concurrent_subscribers: u32, - /// Rate limit for queries per second. - pub qps_limit: u64, + /// CORS origin. + pub cors_origin: String, } impl Config { /// Creates a default instance for `EventStreamServer`. pub fn new() -> Self { Config { + enable_server: true, address: DEFAULT_ADDRESS.to_string(), event_stream_buffer_length: DEFAULT_EVENT_STREAM_BUFFER_LENGTH, - broadcast_channel_size: DEFAULT_BROADCAST_CHANNEL_SIZE, - qps_limit: DEFAULT_QPS_LIMIT, + max_concurrent_subscribers: DEFAULT_MAX_CONCURRENT_SUBSCRIBERS, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), } } } diff --git a/node/src/components/event_stream_server/event.rs b/node/src/components/event_stream_server/event.rs index 95d5c9a5c4..d9414ded2b 100644 --- a/node/src/components/event_stream_server/event.rs +++ b/node/src/components/event_stream_server/event.rs @@ -1,32 +1,61 @@ -use std::fmt::{self, Display, Formatter}; +use std::{ + fmt::{self, Display, Formatter}, + sync::Arc, +}; -use casper_types::{EraId, ExecutionResult, PublicKey}; +use crate::types::TransactionHeader; +use itertools::Itertools; -use crate::types::{Block, BlockHash, DeployHash, DeployHeader, FinalitySignature, Timestamp}; +use casper_types::{ + contract_messages::Messages, + execution::{Effects, ExecutionResult}, + Block, BlockHash, EraId, FinalitySignature, PublicKey, Timestamp, Transaction, TransactionHash, +}; #[derive(Debug)] pub enum Event { - BlockAdded(Box), - DeployProcessed { - deploy_hash: DeployHash, - deploy_header: Box, + Initialize, + BlockAdded(Arc), + TransactionAccepted(Arc), + TransactionProcessed { + transaction_hash: TransactionHash, + transaction_header: Box, block_hash: BlockHash, execution_result: Box, + messages: Messages, }, + TransactionsExpired(Vec), Fault { era_id: EraId, - public_key: PublicKey, + public_key: Box, timestamp: Timestamp, }, FinalitySignature(Box), + Step { + era_id: EraId, + execution_effects: Effects, + }, } impl Display for Event { fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { match self { + Event::Initialize => write!(formatter, "initialize"), Event::BlockAdded(block) => write!(formatter, "block added {}", block.hash()), - Event::DeployProcessed { deploy_hash, .. } => { - write!(formatter, "deploy processed {}", deploy_hash) + Event::TransactionAccepted(transaction_hash) => { + write!(formatter, "transaction accepted {}", transaction_hash) + } + Event::TransactionProcessed { + transaction_hash, .. + } => { + write!(formatter, "transaction processed {}", transaction_hash) + } + Event::TransactionsExpired(transaction_hashes) => { + write!( + formatter, + "transactions expired: {}", + transaction_hashes.iter().join(", ") + ) } Event::Fault { era_id, @@ -38,6 +67,7 @@ impl Display for Event { public_key, timestamp, era_id, ), Event::FinalitySignature(fs) => write!(formatter, "finality signature {}", fs), + Event::Step { era_id, .. } => write!(formatter, "step committed for {}", era_id), } } } diff --git a/node/src/components/event_stream_server/event_indexer.rs b/node/src/components/event_stream_server/event_indexer.rs new file mode 100644 index 0000000000..e6dd7f5778 --- /dev/null +++ b/node/src/components/event_stream_server/event_indexer.rs @@ -0,0 +1,168 @@ +use std::{fs, path::PathBuf}; + +use datasize::DataSize; +use tracing::{debug, warn}; + +const CACHE_FILENAME: &str = "sse_index"; + +pub(super) type EventIndex = u32; + +#[derive(Debug, DataSize)] +pub(super) struct EventIndexer { + index: EventIndex, + persistent_cache: PathBuf, +} + +impl EventIndexer { + pub(super) fn new(storage_path: PathBuf) -> Self { + let persistent_cache = storage_path.join(CACHE_FILENAME); + let mut bytes = EventIndex::default().to_le_bytes(); + match fs::read(&persistent_cache) { + Err(error) => { + if persistent_cache.exists() { + warn!( + file = %persistent_cache.display(), + %error, + "failed to read sse cache file" + ); + } + } + Ok(cached_bytes) => { + if cached_bytes.len() == bytes.len() { + bytes.copy_from_slice(cached_bytes.as_slice()); + } else { + warn!( + file = %persistent_cache.display(), + byte_count = %cached_bytes.len(), + "failed to parse sse cache file" + ); + } + } + } + + let index = EventIndex::from_le_bytes(bytes); + debug!(%index, "initialized sse index"); + + EventIndexer { + index, + persistent_cache, + } + } + + pub(super) fn next_index(&mut self) -> EventIndex { + let index = self.index; + self.index = index.wrapping_add(1); + index + } + + #[cfg(test)] + pub(super) fn current_index(&self) -> EventIndex { + self.index + } +} + +impl Drop for EventIndexer { + fn drop(&mut self) { + match fs::write(&self.persistent_cache, self.index.to_le_bytes()) { + Err(error) => warn!( + file = %self.persistent_cache.display(), + %error, + "failed to write sse cache file" + ), + Ok(_) => debug!( + file = %self.persistent_cache.display(), + index = %self.index, + "cached sse index to file" + ), + } + } +} + +#[cfg(test)] +mod tests { + use std::iter; + + use super::*; + use crate::logging; + + #[test] + fn should_persist_in_cache() { + let _ = logging::init(); + let tempdir = tempfile::tempdir().unwrap(); + + // This represents a single session where five events are produced before the session ends. + let init_and_increment_by_five = |expected_first_index: EventIndex| { + let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf()); + for i in 0..5 { + assert_eq!(event_indexer.next_index(), expected_first_index + i); + } + // Explicitly drop, just to be clear that the cache write is being triggered. + drop(event_indexer); + }; + + // Should start at 0 when no cache file exists. + init_and_increment_by_five(0); + + // Should keep reading and writing to cache over ten subsequent sessions. + for session in 1..11 { + init_and_increment_by_five(session * 5); + } + } + + #[test] + fn should_wrap() { + let _ = logging::init(); + let tempdir = tempfile::tempdir().unwrap(); + + let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf()); + event_indexer.index = EventIndex::MAX; + + assert_eq!(event_indexer.next_index(), EventIndex::MAX); + assert_eq!(event_indexer.next_index(), 0); + } + + #[test] + fn should_reset_index_on_cache_read_failure() { + let _ = logging::init(); + let tempdir = tempfile::tempdir().unwrap(); + + // Create a folder with the same name as the cache file to cause reading to fail. + fs::create_dir(tempdir.path().join(CACHE_FILENAME)).unwrap(); + let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf()); + assert_eq!(event_indexer.next_index(), 0); + } + + #[test] + fn should_reset_index_on_corrupt_cache() { + let _ = logging::init(); + let tempdir = tempfile::tempdir().unwrap(); + + { + // Create the cache file with too few bytes to be parsed as an `Index`. + let index: EventIndex = 1; + fs::write( + tempdir.path().join(CACHE_FILENAME), + &index.to_le_bytes()[1..], + ) + .unwrap(); + + let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf()); + assert_eq!(event_indexer.next_index(), 0); + } + + { + // Create the cache file with too many bytes to be parsed as an `Index`. + let index: EventIndex = 1; + let bytes: Vec = index + .to_le_bytes() + .iter() + .chain(iter::once(&0)) + .copied() + .collect(); + fs::write(tempdir.path().join(CACHE_FILENAME), bytes).unwrap(); + + let mut event_indexer = EventIndexer::new(tempdir.path().to_path_buf()); + assert_eq!(event_indexer.next_index(), 0); + } + } +} diff --git a/node/src/components/event_stream_server/http_server.rs b/node/src/components/event_stream_server/http_server.rs index ef240cfb7e..1712f50ff1 100644 --- a/node/src/components/event_stream_server/http_server.rs +++ b/node/src/components/event_stream_server/http_server.rs @@ -10,8 +10,8 @@ use wheelbuf::WheelBuf; use casper_types::ProtocolVersion; use super::{ - sse_server::{BroadcastChannelMessage, NewSubscriberInfo, ServerSentEvent}, - Config, SseData, + sse_server::{BroadcastChannelMessage, Id, NewSubscriberInfo, ServerSentEvent}, + Config, EventIndex, SseData, }; /// Run the HTTP server. @@ -30,14 +30,13 @@ pub(super) async fn run( api_version: ProtocolVersion, server_with_shutdown: impl Future + Send + 'static, server_shutdown_sender: oneshot::Sender<()>, - mut data_receiver: mpsc::UnboundedReceiver, + mut data_receiver: mpsc::UnboundedReceiver<(EventIndex, SseData)>, broadcaster: broadcast::Sender, mut new_subscriber_info_receiver: mpsc::UnboundedReceiver, ) { let server_joiner = task::spawn(server_with_shutdown); // Initialize the index and buffer for the SSEs. - let mut event_index = 0_u32; let mut buffer = WheelBuf::new(vec![ ServerSentEvent::initial_event(api_version); config.event_stream_buffer_length as usize @@ -58,10 +57,30 @@ pub(super) async fn run( // If the client supplied a "start_from" index, provide the buffered events. // If they requested more than is buffered, just provide the whole buffer. if let Some(start_index) = subscriber.start_from { - for event in buffer + // If the buffer's first event ID is in the range [0, buffer size) or + // (Id::MAX - buffer size, Id::MAX], then the events in the buffer are + // considered to have their IDs wrapping round, or that was recently the + // case. In this case, we add `buffer.capacity()` to `start_index` and + // the buffered events' IDs when considering which events to include in + // the requested initial events, effectively shifting all the IDs past + // the wrapping transition. + let buffer_size = buffer.capacity() as Id; + let in_wraparound_zone = buffer .iter() - .skip_while(|event| event.id.unwrap() < start_index) - { + .next() + .map(|event| { + let id = event.id.unwrap(); + id > Id::MAX - buffer_size || id < buffer_size + }) + .unwrap_or_default(); + for event in buffer.iter().skip_while(|event| { + if in_wraparound_zone { + event.id.unwrap().wrapping_add(buffer_size) + < start_index.wrapping_add(buffer_size) + } else { + event.id.unwrap() < start_index + } + }) { // As per sending `SSE_INITIAL_EVENT`, we don't care if this errors. let _ = subscriber.initial_events_sender.send(event.clone()); } @@ -71,7 +90,7 @@ pub(super) async fn run( maybe_data = data_receiver.recv() => { match maybe_data { - Some(data) => { + Some((event_index, data)) => { // Buffer the data and broadcast it to subscribed clients. trace!("Event stream server received {:?}", data); let event = ServerSentEvent { id: Some(event_index), data }; @@ -80,7 +99,6 @@ pub(super) async fn run( // This can validly fail if there are no connected clients, so don't log // the error. let _ = broadcaster.send(message); - event_index = event_index.wrapping_add(1); } None => { // The data sender has been dropped - exit the loop. diff --git a/node/src/components/event_stream_server/sse_server.rs b/node/src/components/event_stream_server/sse_server.rs index 67cfddd190..051d0fd762 100644 --- a/node/src/components/event_stream_server/sse_server.rs +++ b/node/src/components/event_stream_server/sse_server.rs @@ -1,7 +1,18 @@ //! Types and functions used by the http server to manage the event-stream. +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, + sync::{Arc, RwLock}, +}; + use datasize::DataSize; -use futures::{Stream, StreamExt}; +use futures::{future, Stream, StreamExt}; +use http::StatusCode; +use hyper::Body; +#[cfg(test)] +use rand::Rng; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use tokio::sync::{ broadcast::{self, error::RecvError}, @@ -10,25 +21,38 @@ use tokio::sync::{ use tokio_stream::wrappers::{ errors::BroadcastStreamRecvError, BroadcastStream, UnboundedReceiverStream, }; -use tracing::{error, info, trace}; +use tracing::{debug, error, info, warn}; use warp::{ + addr, filters::BoxedFilter, + path, + reject::Rejection, + reply::Response, sse::{self, Event as WarpServerSentEvent}, Filter, Reply, }; -use casper_types::{EraId, ExecutionResult, ProtocolVersion, PublicKey}; - -use crate::types::{Block, BlockHash, DeployHash, FinalitySignature, TimeDiff, Timestamp}; +use casper_types::{ + contract_messages::Messages, + execution::{Effects, ExecutionResult}, + Block, BlockHash, EraId, FinalitySignature, InitiatorAddr, ProtocolVersion, PublicKey, + TimeDiff, Timestamp, Transaction, TransactionHash, +}; +#[cfg(test)] +use casper_types::{ + execution::ExecutionResultV2, testing::TestRng, Deploy, TestBlockBuilder, TransactionV1, +}; -/// The URL path. +/// The URL root path. pub const SSE_API_PATH: &str = "events"; +/// The URL query string field name. +pub const QUERY_FIELD: &str = "start_from"; /// The "id" field of the events sent on the event stream to clients. -type Id = u32; +pub type Id = u32; /// The "data" field of the events sent on the event stream to clients. -#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize)] +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, DataSize, JsonSchema)] pub enum SseData { /// The version of this node's API server. This event will always be the first sent to a new /// client, and will have no associated event ID provided. @@ -39,25 +63,128 @@ pub enum SseData { block_hash: BlockHash, block: Box, }, - /// The given deploy has been executed, committed and forms part of the given block. - DeployProcessed { - deploy_hash: Box, - account: Box, + /// The given transaction has been newly-accepted by this node. + TransactionAccepted { + #[schemars(with = "Transaction", description = "a transaction")] + transaction: Arc, + }, + /// The given transaction has been executed, committed and forms part of the given block. + TransactionProcessed { + transaction_hash: Box, + initiator_addr: Box, timestamp: Timestamp, ttl: TimeDiff, - dependencies: Vec, block_hash: Box, - #[data_size(skip)] + //#[data_size(skip)] execution_result: Box, + messages: Messages, }, + /// The given transaction has expired. + TransactionExpired { transaction_hash: TransactionHash }, /// Generic representation of validator's fault in an era. Fault { era_id: EraId, - public_key: PublicKey, + public_key: Box, timestamp: Timestamp, }, /// New finality signature received. FinalitySignature(Box), + /// The execution effects produced by a `StepRequest`. + Step { + era_id: EraId, + execution_effects: Effects, + }, + /// The node is about to shut down. + Shutdown, +} + +#[cfg(test)] +impl SseData { + /// Returns a random `SseData::BlockAdded`. + pub(super) fn random_block_added(rng: &mut TestRng) -> Self { + let block = TestBlockBuilder::new().build(rng); + SseData::BlockAdded { + block_hash: *block.hash(), + block: Box::new(block.into()), + } + } + + /// Returns a random `SseData::TransactionAccepted`, along with the random `Transaction`. + pub(super) fn random_transaction_accepted(rng: &mut TestRng) -> (Self, Transaction) { + let txn = Transaction::random(rng); + let event = SseData::TransactionAccepted { + transaction: Arc::new(txn.clone()), + }; + (event, txn) + } + + /// Returns a random `SseData::TransactionProcessed`. + pub(super) fn random_transaction_processed(rng: &mut TestRng) -> Self { + let txn = Transaction::random(rng); + let (timestamp, ttl) = match &txn { + Transaction::Deploy(deploy) => (deploy.timestamp(), deploy.ttl()), + Transaction::V1(txn) => (txn.timestamp(), txn.ttl()), + }; + let message_count = rng.gen_range(0..6); + let messages = std::iter::repeat_with(|| rng.gen()) + .take(message_count) + .collect(); + + SseData::TransactionProcessed { + transaction_hash: Box::new(txn.hash()), + initiator_addr: Box::new(txn.initiator_addr()), + timestamp, + ttl, + block_hash: Box::new(BlockHash::random(rng)), + execution_result: Box::new(ExecutionResult::from(ExecutionResultV2::random(rng))), + messages, + } + } + + /// Returns a random `SseData::TransactionExpired` + pub(super) fn random_transaction_expired(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::now() - TimeDiff::from_seconds(20); + let ttl = TimeDiff::from_seconds(10); + let txn = if rng.gen() { + Transaction::from(Deploy::random_with_timestamp_and_ttl(rng, timestamp, ttl)) + } else { + let txn = TransactionV1::random_with_timestamp_and_ttl(rng, Some(timestamp), Some(ttl)); + Transaction::from(txn) + }; + + SseData::TransactionExpired { + transaction_hash: txn.hash(), + } + } + + /// Returns a random `SseData::Fault`. + pub(super) fn random_fault(rng: &mut TestRng) -> Self { + SseData::Fault { + era_id: EraId::new(rng.gen()), + public_key: Box::new(PublicKey::random(rng)), + timestamp: Timestamp::random(rng), + } + } + + /// Returns a random `SseData::FinalitySignature`. + pub(super) fn random_finality_signature(rng: &mut TestRng) -> Self { + SseData::FinalitySignature(Box::new(FinalitySignature::random(rng))) + } + + /// Returns a random `SseData::Step`. + pub(super) fn random_step(rng: &mut TestRng) -> Self { + let execution_effects = ExecutionResultV2::random(rng).effects; + SseData::Step { + era_id: EraId::new(rng.gen()), + execution_effects, + } + } +} + +#[derive(Serialize)] +#[serde(rename_all = "PascalCase")] +pub(super) struct TransactionAccepted { + pub(super) transaction_accepted: Arc, } /// The components of a single SSE. @@ -80,6 +207,7 @@ impl ServerSentEvent { /// The messages sent via the tokio broadcast channel to the handler of each client's SSE stream. #[derive(Clone, PartialEq, Eq, Debug)] +#[allow(clippy::large_enum_variant)] pub(super) enum BroadcastChannelMessage { /// The message should be sent to the client as an SSE with an optional ID. The ID should only /// be `None` where the `data` is `SseData::ApiVersion`. @@ -100,39 +228,160 @@ pub(super) struct NewSubscriberInfo { pub(super) initial_events_sender: mpsc::UnboundedSender, } -/// The endpoint's query string, e.g. `http://localhost:22777/events?start_from=999` -#[derive(Deserialize, Debug)] -struct Query { - start_from: Option, -} - -/// Creates the message-passing channels required to run the event-stream server and the warp filter -/// for the event-stream server. -pub(super) fn create_channels_and_filter( - broadcast_channel_size: usize, -) -> ( - broadcast::Sender, - mpsc::UnboundedReceiver, - BoxedFilter<(impl Reply,)>, -) { - // Create a channel to broadcast new events to all subscribed clients' streams. - let (broadcaster, _) = broadcast::channel(broadcast_channel_size); - let cloned_broadcaster = broadcaster.clone(); - - // Create a channel for `NewSubscriberInfo`s to pass the information required to handle a new - // client subscription. - let (new_subscriber_info_sender, new_subscriber_info_receiver) = mpsc::unbounded_channel(); - - let filter = warp::get() - .and(warp::path(SSE_API_PATH)) - .and(warp::query().map(move |query: Query| { +/// Maps the `event` to a warp event, or `None` if it's a malformed event (ie.: `ApiVersion` event +/// with `id` set or event other than `ApiVersion` without `id`) +fn map_server_sent_event( + event: &ServerSentEvent, +) -> Option> { + let id = match event.id { + Some(id) => { + if matches!(&event.data, &SseData::ApiVersion { .. }) { + error!("ApiVersion should have no event ID"); + return None; + } + id.to_string() + } + None => { + if !matches!(&event.data, &SseData::ApiVersion { .. }) { + error!("only ApiVersion may have no event ID"); + return None; + } + String::new() + } + }; + + match &event.data { + &SseData::ApiVersion { .. } => Some(Ok(WarpServerSentEvent::default() + .json_data(&event.data) + .unwrap_or_else(|error| { + warn!(%error, ?event, "failed to jsonify sse event"); + WarpServerSentEvent::default() + }))), + + &SseData::BlockAdded { .. } + | &SseData::TransactionProcessed { .. } + | &SseData::TransactionExpired { .. } + | &SseData::Fault { .. } + | &SseData::Step { .. } + | &SseData::FinalitySignature(_) + | &SseData::Shutdown => Some(Ok(WarpServerSentEvent::default() + .json_data(&event.data) + .unwrap_or_else(|error| { + warn!(%error, ?event, "failed to jsonify sse event"); + WarpServerSentEvent::default() + }) + .id(id))), + + SseData::TransactionAccepted { transaction } => Some(Ok(WarpServerSentEvent::default() + .json_data(&TransactionAccepted { + transaction_accepted: Arc::clone(transaction), + }) + .unwrap_or_else(|error| { + warn!(%error, "failed to jsonify sse event"); + WarpServerSentEvent::default() + }) + .id(event.id.unwrap().to_string()))), + } +} + +/// Extracts the starting event ID from the provided query, or `None` if `query` is empty. +/// +/// If `query` is not empty, returns a 422 response if `query` doesn't have exactly one entry, +/// "starts_from" mapped to a value representing an event ID. +fn parse_query(query: &HashMap) -> Result, Response> { + if query.is_empty() { + return Ok(None); + } + + if query.len() > 1 { + return Err(create_422()); + } + + match query + .get(QUERY_FIELD) + .and_then(|id_str| id_str.parse::().ok()) + { + Some(id) => Ok(Some(id)), + None => Err(create_422()), + } +} + +/// Creates a 404 response with a useful error message in the body. +fn create_404() -> Response { + let mut response = Response::new(Body::from(format!( + "invalid path: expected '/{root}'\n", + root = SSE_API_PATH, + ))); + *response.status_mut() = StatusCode::NOT_FOUND; + response +} + +/// Creates a 422 response with a useful error message in the body for use in case of a bad query +/// string. +fn create_422() -> Response { + let mut response = Response::new(Body::from(format!( + "invalid query: expected single field '{}='\n", + QUERY_FIELD + ))); + *response.status_mut() = StatusCode::UNPROCESSABLE_ENTITY; + response +} + +/// Creates a 503 response (Service Unavailable) to be returned if the server has too many +/// subscribers. +fn create_503() -> Response { + let mut response = Response::new(Body::from("server has reached limit of subscribers")); + *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; + response +} + +pub(super) struct ChannelsAndFilter { + pub(super) event_broadcaster: broadcast::Sender, + pub(super) new_subscriber_info_receiver: mpsc::UnboundedReceiver, + pub(super) sse_filter: BoxedFilter<(Response,)>, +} + +impl ChannelsAndFilter { + /// Creates the message-passing channels required to run the event-stream server and the warp + /// filter for the event-stream server. + pub(super) fn new(broadcast_channel_size: usize, max_concurrent_subscribers: u32) -> Self { + // Create a channel to broadcast new events to all subscribed clients' streams. + let (event_broadcaster, _) = broadcast::channel(broadcast_channel_size); + let cloned_broadcaster = event_broadcaster.clone(); + + // Create a channel for `NewSubscriberInfo`s to pass the information required to handle a + // new client subscription. + let (new_subscriber_info_sender, new_subscriber_info_receiver) = mpsc::unbounded_channel(); + + let serve = move |query: HashMap, + maybe_remote_address: Option| { + let remote_address = match maybe_remote_address { + Some(address) => address.to_string(), + None => "unknown".to_string(), + }; + + // If we already have the maximum number of subscribers, reject this new one. + if cloned_broadcaster.receiver_count() >= max_concurrent_subscribers as usize { + info!( + %remote_address, + %max_concurrent_subscribers, + "event stream server has max subscribers: rejecting new one" + ); + return create_503(); + } + + let start_from = match parse_query(&query) { + Ok(maybe_id) => maybe_id, + Err(error_response) => return error_response, + }; + // Create a channel for the client's handler to receive the stream of initial events. let (initial_events_sender, initial_events_receiver) = mpsc::unbounded_channel(); // Supply the server with the sender part of the channel along with the client's // requested starting point. let new_subscriber_info = NewSubscriberInfo { - start_from: query.start_from, + start_from, initial_events_sender, }; if new_subscriber_info_sender @@ -148,11 +397,26 @@ pub(super) fn create_channels_and_filter( sse::reply(sse::keep_alive().stream(stream_to_client( initial_events_receiver, ongoing_events_receiver, + remote_address, ))) - })) - .boxed(); + .into_response() + }; + + let sse_filter = warp::get() + .and(path(SSE_API_PATH)) + .and(path::end()) + .and(warp::query()) + .and(addr::remote()) + .map(serve) + .or_else(|_| async move { Ok::<_, Rejection>((create_404(),)) }) + .boxed(); - (broadcaster, new_subscriber_info_receiver, filter) + ChannelsAndFilter { + event_broadcaster, + new_subscriber_info_receiver, + sse_filter, + } + } } /// This takes the two channel receivers and turns them into a stream of SSEs to the subscribed @@ -166,39 +430,186 @@ pub(super) fn create_channels_and_filter( /// either the client disconnects, or the server shuts down (indicated by sending a `Shutdown` /// variant via the channel). This channel will receive all SSEs created from the moment the client /// subscribed to the server's event stream. +/// +/// It also takes an `EventFilter` which causes events to which the client didn't subscribe to be +/// skipped. fn stream_to_client( initial_events: mpsc::UnboundedReceiver, ongoing_events: broadcast::Receiver, + remote_address: String, ) -> impl Stream> + 'static { - UnboundedReceiverStream::new(initial_events) - .map(|event| Ok(BroadcastChannelMessage::ServerSentEvent(event))) - .chain(BroadcastStream::new(ongoing_events)) - .map(|result| { - trace!(?result); - match result { - Ok(BroadcastChannelMessage::ServerSentEvent(event)) => { - match (event.id, &event.data) { - (None, &SseData::ApiVersion { .. }) => Ok(WarpServerSentEvent::default() - .json_data(event.data) - .unwrap_or_default()), - (Some(id), &SseData::BlockAdded { .. }) - | (Some(id), &SseData::DeployProcessed { .. }) - | (Some(id), &SseData::FinalitySignature(_)) - | (Some(id), &SseData::Fault { .. }) => Ok(WarpServerSentEvent::default() - .json_data(event.data) - .unwrap_or_default() - .id(id.to_string())), - _ => unreachable!("only ApiVersion may have no event ID"), + // Keep a record of the IDs of the events delivered via the `initial_events` receiver. + let initial_stream_ids = Arc::new(RwLock::new(HashSet::new())); + let cloned_initial_ids = Arc::clone(&initial_stream_ids); + + // Map the events arriving after the initial stream to the correct error type, filtering out any + // that have already been sent in the initial stream. + let ongoing_stream = BroadcastStream::new(ongoing_events) + .filter_map(move |result| { + let cloned_initial_ids = Arc::clone(&cloned_initial_ids); + let remote_address = remote_address.clone(); + async move { + match result { + Ok(BroadcastChannelMessage::ServerSentEvent(event)) => { + if let Some(id) = event.id { + if cloned_initial_ids.read().unwrap().contains(&id) { + debug!(event_id=%id, "skipped duplicate event"); + return None; + } + } + Some(Ok(event)) + } + Ok(BroadcastChannelMessage::Shutdown) => Some(Err(RecvError::Closed)), + Err(BroadcastStreamRecvError::Lagged(lagged_count)) => { + info!( + %remote_address, + %lagged_count, + "client lagged: dropping event stream connection to client", + ); + Some(Err(RecvError::Lagged(lagged_count))) } } - Ok(BroadcastChannelMessage::Shutdown) => Err(RecvError::Closed), - Err(BroadcastStreamRecvError::Lagged(amount)) => { - info!( - "client lagged by {} events - dropping event stream connection to client", - amount - ); - Err(RecvError::Lagged(amount)) - } } }) + .take_while(|result| future::ready(!matches!(result, Err(RecvError::Closed)))); + + // Serve the initial events followed by the ongoing ones, filtering as dictated by the + // `event_filter`. + UnboundedReceiverStream::new(initial_events) + .map(move |event| { + if let Some(id) = event.id { + let _ = initial_stream_ids.write().unwrap().insert(id); + } + Ok(event) + }) + .chain(ongoing_stream) + .filter_map(move |result| async move { + match result { + Ok(event) => map_server_sent_event(&event), + Err(error) => Some(Err(error)), + } + }) +} + +#[cfg(test)] +mod tests { + use std::iter; + + use casper_types::testing::TestRng; + + use super::*; + use crate::logging; + + /// This test checks that events from the initial stream which are duplicated in the + /// ongoing stream are filtered out. + #[tokio::test] + async fn should_filter_duplicate_events() { + // Returns `count` SSE events. The events will have sequential IDs starting from `start_id`. + fn make_events(rng: &mut TestRng, start_id: Id, count: usize) -> Vec { + (start_id..(start_id + count as u32)) + .map(|id| ServerSentEvent { + id: Some(id), + data: SseData::random_finality_signature(rng), + }) + .collect() + } + + // Returns `NUM_ONGOING_EVENTS` SSE events containing duplicates taken from the end of the + // initial stream. Allows for the full initial stream to be duplicated except for + // its first event (the `ApiVersion` one) which has no ID. + fn make_ongoing_events( + rng: &mut TestRng, + duplicate_count: usize, + initial_events: &[ServerSentEvent], + ) -> Vec { + assert!(duplicate_count < initial_events.len()); + let initial_skip_count = initial_events.len() - duplicate_count; + let unique_start_id = initial_events.len() as Id - 1; + let unique_count = NUM_ONGOING_EVENTS - duplicate_count; + initial_events + .iter() + .skip(initial_skip_count) + .cloned() + .chain(make_events(rng, unique_start_id, unique_count)) + .collect() + } + + // The number of events in the initial stream, excluding the very first `ApiVersion` one. + const NUM_INITIAL_EVENTS: usize = 10; + // The number of events in the ongoing stream, including any duplicated from the initial + // stream. + const NUM_ONGOING_EVENTS: usize = 20; + + let _ = logging::init(); + let mut rng = crate::new_rng(); + + let initial_events: Vec = + iter::once(ServerSentEvent::initial_event(ProtocolVersion::V1_0_0)) + .chain(make_events(&mut rng, 0, NUM_INITIAL_EVENTS)) + .collect(); + + // Run three cases; where only a single event is duplicated, where five are duplicated, and + // where the whole initial stream (except the `ApiVersion`) is duplicated. + for duplicate_count in &[1, 5, NUM_INITIAL_EVENTS] { + // Create the events with the requisite duplicates at the start of the collection. + let ongoing_events = make_ongoing_events(&mut rng, *duplicate_count, &initial_events); + + let (initial_events_sender, initial_events_receiver) = mpsc::unbounded_channel(); + let (ongoing_events_sender, ongoing_events_receiver) = + broadcast::channel(NUM_INITIAL_EVENTS + NUM_ONGOING_EVENTS + 1); + + // Send all the events. + for event in initial_events.iter().cloned() { + initial_events_sender.send(event).unwrap(); + } + for event in ongoing_events.iter().cloned() { + let _ = ongoing_events_sender + .send(BroadcastChannelMessage::ServerSentEvent(event)) + .unwrap(); + } + // Drop the channel senders so that the chained receiver streams can both complete. + drop(initial_events_sender); + drop(ongoing_events_sender); + + // Collect the events emitted by `stream_to_client()` - should not contain duplicates. + let received_events: Vec> = stream_to_client( + initial_events_receiver, + ongoing_events_receiver, + "127.0.0.1:3456".to_string(), + ) + .collect() + .await; + + // Create the expected collection of emitted events. + let deduplicated_events: Vec = initial_events + .iter() + .take(initial_events.len() - duplicate_count) + .cloned() + .chain(ongoing_events) + .collect(); + + assert_eq!(received_events.len(), deduplicated_events.len()); + + // Iterate the received and expected collections, asserting that each matches. As we + // don't have access to the internals of the `WarpServerSentEvent`s, assert using their + // `String` representations. + for (received_event, deduplicated_event) in + received_events.iter().zip(deduplicated_events.iter()) + { + let received_event = received_event.as_ref().unwrap(); + let expected_data_string = serde_json::to_string(&deduplicated_event.data).unwrap(); + + let expected_id_string = if let Some(id) = deduplicated_event.id { + format!("\nid:{}", id) + } else { + String::new() + }; + + let expected_string = + format!("data:{}{}", expected_data_string, expected_id_string); + + assert_eq!(received_event.to_string().trim(), expected_string) + } + } + } } diff --git a/node/src/components/event_stream_server/tests.rs b/node/src/components/event_stream_server/tests.rs new file mode 100644 index 0000000000..5f6b985e13 --- /dev/null +++ b/node/src/components/event_stream_server/tests.rs @@ -0,0 +1,1077 @@ +use std::{ + collections::HashMap, + error::Error, + fs, io, + iter::{self, FromIterator}, + str, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +use bytes::{Buf, Bytes}; +use futures::{join, StreamExt}; +use http::StatusCode; +use pretty_assertions::assert_eq; +use reqwest::Response; +use schemars::schema_for; +use tempfile::TempDir; +use tokio::{ + sync::{Barrier, Notify}, + task::{self, JoinHandle}, + time, +}; +use tracing::debug; + +use casper_types::testing::TestRng; + +use super::*; +use crate::{logging, testing::assert_schema}; +use sse_server::{Id, TransactionAccepted, QUERY_FIELD, SSE_API_PATH as ROOT_PATH}; + +/// The total number of random events `EventStreamServer` will emit by default, excluding the +/// initial `ApiVersion` event. +const EVENT_COUNT: u32 = 100; +/// The maximum number of random events `EventStreamServer` will emit, excluding the initial +/// `ApiVersion` event. +const MAX_EVENT_COUNT: u32 = 100_000_000; +/// The event stream buffer length, set in the server's config. Set to half of the total event +/// count to allow for the buffer purging events in the test. +const BUFFER_LENGTH: u32 = EVENT_COUNT / 2; +/// The maximum amount of time to wait for a test server to complete. If this time is exceeded, the +/// test has probably hung, and should be deemed to have failed. +const MAX_TEST_TIME: Duration = Duration::from_secs(2); +/// The duration of the sleep called between each event being sent by the server. +const DELAY_BETWEEN_EVENTS: Duration = Duration::from_millis(1); + +/// A helper to allow the synchronization of a single client joining the SSE server. +/// +/// It provides the primitives to allow the client to connect to the server just before a specific +/// event is emitted by the server. +#[derive(Clone)] +struct ClientSyncBehavior { + /// The event ID before which the server should wait at the barrier for the client to join. + join_before_event: Id, + /// The barrier to sync the client joining the server. + barrier: Arc, +} + +impl ClientSyncBehavior { + fn new(join_before_event: Id) -> (Self, Arc) { + let barrier = Arc::new(Barrier::new(2)); + let behavior = ClientSyncBehavior { + join_before_event, + barrier: Arc::clone(&barrier), + }; + (behavior, barrier) + } +} + +/// A helper defining the behavior of the server. +#[derive(Clone)] +struct ServerBehavior { + /// Whether the server should have a delay between sending events, to allow a client to keep up + /// and not be disconnected for lagging. + has_delay_between_events: bool, + /// Whether the server should send all events once, or keep repeating the batch up until + /// `MAX_EVENT_COUNT` have been sent. + repeat_events: bool, + /// If `Some`, sets the `max_concurrent_subscribers` server config value, otherwise uses the + /// config default. + max_concurrent_subscribers: Option, + clients: Vec, +} + +impl ServerBehavior { + /// Returns a default new `ServerBehavior`. + /// + /// It has a small delay between events, and sends the collection of random events once. + fn new() -> Self { + ServerBehavior { + has_delay_between_events: true, + repeat_events: false, + max_concurrent_subscribers: None, + clients: Vec::new(), + } + } + + /// Returns a new `ServerBehavior` suitable for testing lagging clients. + /// + /// It has no delay between events, and sends the collection of random events repeatedly up to a + /// maximum of `MAX_EVENT_COUNT` events. + fn new_for_lagging_test() -> Self { + ServerBehavior { + has_delay_between_events: false, + repeat_events: true, + max_concurrent_subscribers: None, + clients: Vec::new(), + } + } + + /// Adds a client sync behavior, specified for the client to connect to the server just before + /// `id` is emitted. + fn add_client_sync_before_event(&mut self, id: Id) -> Arc { + let (client_behavior, barrier) = ClientSyncBehavior::new(id); + self.clients.push(client_behavior); + barrier + } + + /// Sets the `max_concurrent_subscribers` server config value. + fn set_max_concurrent_subscribers(&mut self, count: u32) { + self.max_concurrent_subscribers = Some(count); + } + + /// Waits for all clients which specified they wanted to join just before the given event ID. + async fn wait_for_clients(&self, id: Id) { + for client_behavior in &self.clients { + if client_behavior.join_before_event == id { + debug!("server waiting before event {}", id); + client_behavior.barrier.wait().await; + debug!("server waiting for client to connect before event {}", id); + client_behavior.barrier.wait().await; + debug!("server finished waiting before event {}", id); + } + } + } + + /// Sleeps if `self` was set to enable delays between events. + async fn sleep_if_required(&self) { + if self.has_delay_between_events { + time::sleep(DELAY_BETWEEN_EVENTS).await; + } else { + task::yield_now().await; + } + } +} + +/// A helper to allow the server to be kept alive until a specific call to stop it. +#[derive(Clone)] +struct ServerStopper { + should_stop: Arc, + notifier: Arc, +} + +impl ServerStopper { + fn new() -> Self { + ServerStopper { + should_stop: Arc::new(AtomicBool::new(false)), + notifier: Arc::new(Notify::new()), + } + } + + /// Returns whether the server should stop now or not. + fn should_stop(&self) -> bool { + self.should_stop.load(Ordering::SeqCst) + } + + /// Waits until the server should stop. + async fn wait(&self) { + while !self.should_stop() { + self.notifier.notified().await; + } + } + + /// Tells the server to stop. + fn stop(&self) { + self.should_stop.store(true, Ordering::SeqCst); + self.notifier.notify_one(); + } +} + +impl Drop for ServerStopper { + fn drop(&mut self) { + self.stop(); + } +} + +struct TestFixture { + storage_dir: TempDir, + protocol_version: ProtocolVersion, + events: Vec, + first_event_id: Id, + server_join_handle: Option>, + server_stopper: ServerStopper, +} + +impl TestFixture { + /// Constructs a new `TestFixture` including `EVENT_COUNT` random events ready to be served. + fn new(rng: &mut TestRng) -> Self { + const DISTINCT_EVENTS_COUNT: u32 = 7; + + let _ = logging::init(); + let storage_dir = tempfile::tempdir().unwrap(); + fs::create_dir_all(&storage_dir).unwrap(); + let protocol_version = ProtocolVersion::from_parts(1, 2, 3); + + let mut txns = HashMap::new(); + let events = (0..EVENT_COUNT) + .map(|i| match i % DISTINCT_EVENTS_COUNT { + 0 => SseData::random_block_added(rng), + 1 => { + let (event, txn) = SseData::random_transaction_accepted(rng); + assert!(txns.insert(txn.hash(), txn).is_none()); + event + } + 2 => SseData::random_transaction_processed(rng), + 3 => SseData::random_transaction_expired(rng), + 4 => SseData::random_fault(rng), + 5 => SseData::random_step(rng), + 6 => SseData::random_finality_signature(rng), + _ => unreachable!(), + }) + .collect(); + + TestFixture { + storage_dir, + protocol_version, + events, + first_event_id: 0, + server_join_handle: None, + server_stopper: ServerStopper::new(), + } + } + + /// Creates a new `EventStreamServer` and runs it in a tokio task, returning the actual address + /// the server is listening on. + /// + /// Only one server can be run at a time; this panics if there is already a server task running. + /// + /// The server emits a clone of each of the random events held by the `TestFixture`, in the + /// order in which they're held in the `TestFixture`. + /// + /// The server runs until `TestFixture::stop_server()` is called, or the `TestFixture` is + /// dropped. + async fn run_server(&mut self, server_behavior: ServerBehavior) -> SocketAddr { + if self.server_join_handle.is_some() { + panic!("one `TestFixture` can only run one server at a time"); + } + self.server_stopper = ServerStopper::new(); + + // Set the server to use a channel buffer of half the total events it will emit, unless + // we're running with no delay between events, in which case set a minimal buffer as we're + // trying to cause clients to get ejected for lagging. + let config = Config { + event_stream_buffer_length: if server_behavior.has_delay_between_events { + BUFFER_LENGTH + } else { + 1 + }, + max_concurrent_subscribers: server_behavior + .max_concurrent_subscribers + .unwrap_or(Config::default().max_concurrent_subscribers), + ..Default::default() + }; + let mut server = EventStreamServer::new( + config, + self.storage_dir.path().to_path_buf(), + self.protocol_version, + ); + server.listen().unwrap(); + assert!(server.sse_server.is_some()); + + self.first_event_id = server + .sse_server + .as_ref() + .unwrap() + .event_indexer + .current_index(); + + let first_event_id = server + .sse_server + .as_ref() + .unwrap() + .event_indexer + .current_index(); + let server_address = server.sse_server.as_ref().unwrap().listening_address; + let events = self.events.clone(); + let server_stopper = self.server_stopper.clone(); + + let join_handle = tokio::spawn(async move { + let event_count = if server_behavior.repeat_events { + MAX_EVENT_COUNT + } else { + EVENT_COUNT + }; + for (id, event) in events.iter().cycle().enumerate().take(event_count as usize) { + if server_stopper.should_stop() { + debug!("stopping server early"); + return; + } + server_behavior + .wait_for_clients((id as Id).wrapping_add(first_event_id)) + .await; + let _ = server.broadcast(event.clone()); + server_behavior.sleep_if_required().await; + } + + // Keep the server running until told to stop. Clients connecting from now will only + // receive keepalives. + debug!("server finished sending all events"); + server_stopper.wait().await; + debug!("server stopped"); + }); + + self.server_join_handle = Some(join_handle); + + server_address + } + + /// Stops the currently-running server, if any, panicking if unable to stop the server within + /// `MAX_TEST_TIME`. + /// + /// Must be called and awaited before starting a new server with this particular `TestFixture`. + /// + /// Should be called in every test where a server has been started, since this will ensure + /// failed tests won't hang indefinitely. + async fn stop_server(&mut self) { + let join_handle = match self.server_join_handle.take() { + Some(join_handle) => join_handle, + None => return, + }; + self.server_stopper.stop(); + time::timeout(MAX_TEST_TIME, join_handle) + .await + .expect("stopping server timed out (test hung)") + .expect("server task should not error"); + } + + /// Returns all the events which would have been received by a client, where the client + /// connected just before `from` was emitted from the server. This includes the initial + /// `ApiVersion` event. + /// + /// Also returns the last event's ID, + fn events_filtered_by_id(&self, from: Id) -> (Vec, Id) { + // Convert the IDs to `u128`s to cater for wrapping and add `Id::MAX + 1` to `from` if the + // buffer wrapped and `from` represents an event from after the wrap. + let threshold = Id::MAX - EVENT_COUNT; + let from = if self.first_event_id >= threshold && from < threshold { + from as u128 + Id::MAX as u128 + 1 + } else { + from as u128 + }; + + let id_filter = |id: u128, event: &SseData| -> Option { + if id < from { + return None; + } + + let data = match event { + SseData::TransactionAccepted { transaction } => { + serde_json::to_string(&TransactionAccepted { + transaction_accepted: Arc::clone(transaction), + }) + .unwrap() + } + _ => serde_json::to_string(event).unwrap(), + }; + + Some(ReceivedEvent { + id: Some(id as Id), + data, + }) + }; + + let api_version_event = ReceivedEvent { + id: None, + data: serde_json::to_string(&SseData::ApiVersion(self.protocol_version)).unwrap(), + }; + + let events: Vec<_> = iter::once(api_version_event) + .chain(self.events.iter().enumerate().filter_map(|(id, event)| { + let id = id as u128 + self.first_event_id as u128; + id_filter(id, event) + })) + .collect(); + + let final_id = events + .last() + .expect("should have events") + .id + .expect("should have ID"); + + (events, final_id) + } + + /// Returns all the events which would have been received by a client connected from server + /// startup, including the initial `ApiVersion` event. + /// + /// Also returns the last event's ID. + fn all_events(&self) -> (Vec, Id) { + self.events_filtered_by_id(self.first_event_id) + } +} + +/// Returns the URL for a client to use to connect to the server at the given address. +/// +/// The URL is `/events` with `?start_from=X` query string appended if +/// `maybe_start_from` is `Some`. +fn make_url(server_address: SocketAddr, maybe_start_from: Option) -> String { + format!( + "http://{}/{}/{}", + server_address, + ROOT_PATH, + match maybe_start_from { + Some(start_from) => format!("?{}={}", QUERY_FIELD, start_from), + None => String::new(), + } + ) +} + +/// The representation of an SSE event as received by a subscribed client. +#[derive(Clone, Debug, Eq, PartialEq)] +struct ReceivedEvent { + id: Option, + data: String, +} + +/// Runs a client, consuming all SSE events until the server has emitted the event with ID +/// `final_event_id`. +/// +/// If the client receives a keepalive (i.e. `:`), it panics, as the server has no further events to +/// emit. +/// +/// The client waits at the barrier before connecting to the server, and then again immediately +/// after connecting to ensure the server doesn't start sending events before the client is +/// connected. +async fn subscribe( + url: &str, + barrier: Arc, + final_event_id: Id, + client_id: &str, +) -> Result, reqwest::Error> { + debug!("{} waiting before connecting via {}", client_id, url); + barrier.wait().await; + let response = reqwest::get(url).await?; + debug!("{} waiting after connecting", client_id); + barrier.wait().await; + debug!("{} finished waiting", client_id); + handle_response(response, final_event_id, client_id).await +} + +/// Runs a client, consuming all SSE events until the server has emitted the event with ID +/// `final_event_id`. +/// +/// If the client receives a keepalive (i.e. `:`), it panics, as the server has no further events to +/// emit. +/// +/// There is no synchronization between client and server regarding the client joining. In most +/// tests such synchronization is required, in which case `subscribe()` should be used. +async fn subscribe_no_sync( + url: &str, + final_event_id: Id, + client_id: &str, +) -> Result, reqwest::Error> { + debug!("{} about to connect via {}", client_id, url); + let response = reqwest::get(url).await?; + debug!("{} has connected", client_id); + handle_response(response, final_event_id, client_id).await +} + +/// Converts some bytes to a `String`. +/// +/// If `maybe_previous_bytes` is `Some`, these bytes are prepended to `new_bytes`. If a string +/// cannot be constructed from the resulting bytes, the bytes are returned as an `Err`. +fn bytes_to_string( + maybe_previous_bytes: &mut Option, + new_bytes: Bytes, +) -> Result { + let bytes = if let Some(previous_bytes) = maybe_previous_bytes.take() { + Bytes::from_iter(previous_bytes.chain(new_bytes)) + } else { + new_bytes + }; + str::from_utf8(bytes.as_ref()) + .map(ToString::to_string) + .map_err(|_| bytes) +} + +/// Handles a response from the server. +async fn handle_response( + response: Response, + final_event_id: Id, + client_id: &str, +) -> Result, reqwest::Error> { + if response.status() == StatusCode::SERVICE_UNAVAILABLE { + debug!("{} rejected by server: too many clients", client_id); + assert_eq!( + response.text().await.unwrap(), + "server has reached limit of subscribers" + ); + return Ok(Vec::new()); + } + + // The stream from the server is not always chunked into events, so gather the stream into a + // single `String` until we receive a keepalive. + let mut response_text = String::new(); + let mut stream = response.bytes_stream(); + let final_id_line = format!("id:{}", final_event_id); + let keepalive = ":"; + let mut temp_bytes: Option = None; + while let Some(item) = stream.next().await { + // If the server crashes or returns an error in the stream, it is caught here as `item` + // will be an `Err`. + let new_bytes = item?; + let chunk = match bytes_to_string(&mut temp_bytes, new_bytes) { + Ok(chunk) => chunk, + Err(bytes) => { + // We got a chunk splitting a unicode scalar value - dump the data to `temp_bytes` + // and get the next chunk from the stream. + temp_bytes = Some(bytes); + continue; + } + }; + response_text.push_str(&chunk); + if let Some(line) = response_text + .lines() + .find(|&line| line == final_id_line || line == keepalive) + { + if line == keepalive { + panic!("{} received keepalive", client_id); + } + debug!( + "{} received final event ID {}: exiting", + client_id, final_event_id + ); + break; + } + } + + Ok(parse_response(response_text, client_id)) +} + +/// Iterate the lines of the response body. Each line should be one of +/// * an SSE event: line starts with "data:" and the remainder of the line is a JSON object +/// * an SSE event ID: line starts with "id:" and the remainder is a decimal encoded `u32` +/// * empty +/// * a keepalive: line contains exactly ":" +/// +/// The expected order is: +/// * data: (note, no ID line follows this first event) then the +/// following three repeated for as many events as are applicable to that stream: +/// * data: +/// * id: +/// * empty line +/// +/// then finally, repeated keepalive lines until the server is shut down. +fn parse_response(response_text: String, client_id: &str) -> Vec { + let mut received_events = Vec::new(); + let mut line_itr = response_text.lines(); + while let Some(data_line) = line_itr.next() { + let data = match data_line.strip_prefix("data:") { + Some(data_str) => data_str.to_string(), + None => { + if data_line.trim().is_empty() || data_line.trim() == ":" { + continue; + } else { + panic!( + "{}: data line should start with 'data:'\n{}", + client_id, data_line + ) + } + } + }; + + let id_line = match line_itr.next() { + Some(line) => line, + None => break, + }; + + let id = match id_line.strip_prefix("id:") { + Some(id_str) => Some(id_str.parse().unwrap_or_else(|_| { + panic!("{}: failed to get ID line from:\n{}", client_id, id_line) + })), + None => { + if id_line.trim().is_empty() && received_events.is_empty() { + None + } else if id_line.trim() == ":" { + continue; + } else { + panic!( + "{}: every event must have an ID except the first one", + client_id + ); + } + } + }; + + received_events.push(ReceivedEvent { id, data }); + } + received_events +} + +/// Client setup: +/// * `/events` +/// * no `?start_from=` query +/// * connected before first event +/// +/// Expected to receive all events depending on `filter`. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_events_with_no_query() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(0); + let server_address = fixture.run_server(server_behavior).await; + + let url = make_url(server_address, None); + let (expected_events, final_id) = fixture.all_events(); + let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); + fixture.stop_server().await; + + assert_eq!(received_events, expected_events); +} + +/// Client setup: +/// * `/events?start_from=25` +/// * connected just before event ID 50 +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_events_with_query() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let connect_at_event_id = BUFFER_LENGTH; + let start_from_event_id = BUFFER_LENGTH / 2; + + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(connect_at_event_id); + let server_address = fixture.run_server(server_behavior).await; + + let url = make_url(server_address, Some(start_from_event_id)); + let (expected_events, final_id) = fixture.events_filtered_by_id(start_from_event_id); + let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); + fixture.stop_server().await; + + assert_eq!(received_events, expected_events); +} + +/// Client setup: +/// * `/events?start_from=0` +/// * connected just before event ID 75 +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_remaining_events_with_query() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let connect_at_event_id = BUFFER_LENGTH * 3 / 2; + let start_from_event_id = 0; + + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(connect_at_event_id); + let server_address = fixture.run_server(server_behavior).await; + + let url = make_url(server_address, Some(start_from_event_id)); + let expected_first_event = connect_at_event_id - BUFFER_LENGTH; + let (expected_events, final_id) = fixture.events_filtered_by_id(expected_first_event); + let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); + fixture.stop_server().await; + + assert_eq!(received_events, expected_events); +} + +/// Client setup: +/// * `/events?start_from=25` +/// * connected before first event +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_serve_events_with_query_for_future_event() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(0); + let server_address = fixture.run_server(server_behavior).await; + + let url = make_url(server_address, Some(25)); + let (expected_events, final_id) = fixture.all_events(); + let received_events = subscribe(&url, barrier, final_id, "client").await.unwrap(); + fixture.stop_server().await; + + assert_eq!(received_events, expected_events); +} + +/// Checks that when a server is shut down (e.g. for a node upgrade), connected clients don't have +/// an error while handling the HTTP response. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn server_exit_should_gracefully_shut_down_stream() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + // Start the server, waiting for three clients to connect. + let mut server_behavior = ServerBehavior::new(); + let barrier1 = server_behavior.add_client_sync_before_event(0); + let barrier2 = server_behavior.add_client_sync_before_event(0); + let barrier3 = server_behavior.add_client_sync_before_event(0); + let server_address = fixture.run_server(server_behavior).await; + + let url1 = make_url(server_address, None); + + // Run the three clients, and stop the server after a short delay. + let (received_events1, received_events2, received_events3, _) = join!( + subscribe(&url1, barrier1, EVENT_COUNT, "client 1"), + subscribe(&url1, barrier2, EVENT_COUNT, "client 2"), + subscribe(&url1, barrier3, EVENT_COUNT, "client 3"), + async { + time::sleep(DELAY_BETWEEN_EVENTS * EVENT_COUNT / 2).await; + fixture.stop_server().await + } + ); + + // Ensure all clients' streams terminated without error. + let received_events1 = received_events1.unwrap(); + let received_events2 = received_events2.unwrap(); + let received_events3 = received_events3.unwrap(); + + // Ensure all clients received some events... + assert!(!received_events1.is_empty()); + assert!(!received_events2.is_empty()); + assert!(!received_events3.is_empty()); + + // ...but not the full set they would have if the server hadn't stopped early. + assert!(received_events1.len() < fixture.all_events().0.len()); + assert!(received_events2.len() < fixture.all_events().0.len()); + assert!(received_events3.len() < fixture.all_events().0.len()); + + // Ensure all clients received a `Shutdown` event as the final one. + assert_eq!( + received_events1.last().unwrap().data, + serde_json::to_string(&SseData::Shutdown).unwrap() + ); + assert_eq!( + received_events2.last().unwrap().data, + serde_json::to_string(&SseData::Shutdown).unwrap() + ); + assert_eq!( + received_events3.last().unwrap().data, + serde_json::to_string(&SseData::Shutdown).unwrap() + ); +} + +/// Checks that clients which don't consume the events in a timely manner are forcibly disconnected +/// by the server. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn lagging_clients_should_be_disconnected() { + // Similar to the `subscribe()` function, except this has a long pause at the start and short + // pauses after each read. + // + // The objective is to create backpressure by filling the client's receive buffer, then filling + // the server's send buffer, which in turn causes the server's internal broadcast channel to + // deem that client as lagging. + async fn subscribe_slow( + url: &str, + barrier: Arc, + client_id: &str, + ) -> Result<(), reqwest::Error> { + barrier.wait().await; + let response = reqwest::get(url).await.unwrap(); + barrier.wait().await; + + time::sleep(Duration::from_secs(5)).await; + + let mut stream = response.bytes_stream(); + let pause_between_events = Duration::from_secs(100) / MAX_EVENT_COUNT; + let mut temp_bytes: Option = None; + while let Some(item) = stream.next().await { + // The function is expected to exit here with an `UnexpectedEof` error. + let new_bytes = item?; + let chunk = match bytes_to_string(&mut temp_bytes, new_bytes) { + Ok(chunk) => chunk, + Err(bytes) => { + temp_bytes = Some(bytes); + continue; + } + }; + if chunk.lines().any(|line| line == ":") { + debug!("{} received keepalive: exiting", client_id); + break; + } + time::sleep(pause_between_events).await; + } + + Ok(()) + } + + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + // Start the server, setting it to run with no delay between sending each event. It will send + // at most `MAX_EVENT_COUNT` events, but the clients' futures should return before that, having + // been disconnected for lagging. + let mut server_behavior = ServerBehavior::new_for_lagging_test(); + let barrier = server_behavior.add_client_sync_before_event(0); + let server_address = fixture.run_server(server_behavior).await; + + let url = make_url(server_address, None); + + // Run the slow clients, then stop the server. + let result_slow = subscribe_slow(&url, barrier, "client 1").await; + fixture.stop_server().await; + + // Ensure both slow clients' streams terminated with an `UnexpectedEof` error. + let check_error = |result: Result<(), reqwest::Error>| { + let kind = result + .unwrap_err() + .source() + .expect("reqwest::Error should have source") + .downcast_ref::() + .expect("reqwest::Error's source should be a hyper::Error") + .source() + .expect("hyper::Error should have source") + .downcast_ref::() + .expect("hyper::Error's source should be a std::io::Error") + .kind(); + assert!(matches!(kind, io::ErrorKind::UnexpectedEof)); + }; + check_error(result_slow); +} + +/// Checks that clients using the correct but wrong path get a helpful error response. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_handle_bad_url_path() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let server_address = fixture.run_server(ServerBehavior::new()).await; + + #[rustfmt::skip] + let urls = [ + format!("http://{}", server_address), + format!("http://{}?{}=0", server_address, QUERY_FIELD), + format!("http://{}/bad", server_address), + format!("http://{}/bad?{}=0", server_address, QUERY_FIELD), + format!("http://{}/{}?{}=0", server_address, QUERY_FIELD, ROOT_PATH), + format!("http://{}/{}/bad", server_address, ROOT_PATH), + format!("http://{}/{}/bad?{}=0", server_address, QUERY_FIELD, ROOT_PATH), + ]; + + let expected_body = format!("invalid path: expected '/{0}'", ROOT_PATH); + for url in &urls { + let response = reqwest::get(url).await.unwrap(); + assert_eq!(response.status(), StatusCode::NOT_FOUND, "URL: {}", url); + assert_eq!( + response.text().await.unwrap().trim(), + &expected_body, + "URL: {}", + url + ); + } + + fixture.stop_server().await; +} + +/// Checks that clients using the correct but wrong query get a helpful error +/// response. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_handle_bad_url_query() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let server_address = fixture.run_server(ServerBehavior::new()).await; + + let url = format!("http://{}/{}", server_address, ROOT_PATH); + let urls = [ + format!("{}?not-a-kv-pair", url), + format!("{}?start_fro=0", url), + format!("{}?{}=not-integer", url, QUERY_FIELD), + format!("{}?{}='0'", url, QUERY_FIELD), + format!("{}?{}=0&extra=1", url, QUERY_FIELD), + ]; + + let expected_body = format!( + "invalid query: expected single field '{}='", + QUERY_FIELD + ); + for url in &urls { + let response = reqwest::get(url).await.unwrap(); + assert_eq!( + response.status(), + StatusCode::UNPROCESSABLE_ENTITY, + "URL: {}", + url + ); + assert_eq!( + response.text().await.unwrap().trim(), + &expected_body, + "URL: {}", + url + ); + } + + fixture.stop_server().await; +} + +/// Check that a server which restarts continues from the previous numbering of event IDs. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_persist_event_ids() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + let first_run_final_id = { + // Run the first server to emit the 100 events. + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(0); + let server_address = fixture.run_server(server_behavior).await; + + // Consume these and stop the server. + let url = make_url(server_address, None); + let (_expected_events, final_id) = fixture.all_events(); + let _ = subscribe(&url, barrier, final_id, "client 1") + .await + .unwrap(); + fixture.stop_server().await; + final_id + }; + + assert!(first_run_final_id > 0); + + { + // Start a new server with a client barrier set for just before event ID 100 + 1 (the extra + // event being the `Shutdown`). + let mut server_behavior = ServerBehavior::new(); + let barrier = server_behavior.add_client_sync_before_event(EVENT_COUNT + 1); + let server_address = fixture.run_server(server_behavior).await; + + // Check the test fixture has set the server's first event ID to at least + // `first_run_final_id`. + assert!(fixture.first_event_id >= first_run_final_id); + + // Consume the events and assert their IDs are all >= `first_run_final_id`. + let url = make_url(server_address, None); + let (expected_events, final_id) = fixture.events_filtered_by_id(EVENT_COUNT + 1); + let received_events = subscribe(&url, barrier, final_id, "client 2") + .await + .unwrap(); + fixture.stop_server().await; + + assert_eq!(received_events, expected_events); + assert!(received_events + .iter() + .skip(1) + .all(|event| event.id.unwrap() >= first_run_final_id)); + } +} + +/// Check that a server handles wrapping round past the maximum value for event IDs. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_handle_wrapping_past_max_event_id() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + // Set up an `EventIndexer` cache file as if the server previously stopped at an event with ID + // just less than the maximum. + let start_index = Id::MAX - (BUFFER_LENGTH / 2); + fs::write( + fixture.storage_dir.path().join("sse_index"), + start_index.to_le_bytes(), + ) + .unwrap(); + + // Set up a client which will connect at the start of the stream, and another two for once the + // IDs have wrapped past the maximum value. + let mut server_behavior = ServerBehavior::new(); + let barrier1 = server_behavior.add_client_sync_before_event(start_index); + let barrier2 = server_behavior.add_client_sync_before_event(BUFFER_LENGTH / 2); + let barrier3 = server_behavior.add_client_sync_before_event(BUFFER_LENGTH / 2); + let server_address = fixture.run_server(server_behavior).await; + assert_eq!(fixture.first_event_id, start_index); + + // The first client doesn't need a query string, but the second will request to start from an ID + // from before they wrapped past the maximum value, and the third from event 0. + let url1 = make_url(server_address, None); + let url2 = make_url(server_address, Some(start_index + 1)); + let url3 = make_url(server_address, Some(0)); + let (expected_events1, final_id1) = fixture.all_events(); + let (expected_events2, final_id2) = fixture.events_filtered_by_id(start_index + 1); + let (expected_events3, final_id3) = fixture.events_filtered_by_id(0); + let (received_events1, received_events2, received_events3) = join!( + subscribe(&url1, barrier1, final_id1, "client 1"), + subscribe(&url2, barrier2, final_id2, "client 2"), + subscribe(&url3, barrier3, final_id3, "client 3"), + ); + fixture.stop_server().await; + + assert_eq!(received_events1.unwrap(), expected_events1); + assert_eq!(received_events2.unwrap(), expected_events2); + assert_eq!(received_events3.unwrap(), expected_events3); +} + +/// Checks that a server rejects new clients with an HTTP 503 when it already has the specified +/// limit of connected clients. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn should_limit_concurrent_subscribers() { + let mut rng = crate::new_rng(); + let mut fixture = TestFixture::new(&mut rng); + + // Start the server with `max_concurrent_subscribers == 3`, and set to wait for three clients to + // connect at event 0 and another three at event 1. + let mut server_behavior = ServerBehavior::new(); + server_behavior.set_max_concurrent_subscribers(3); + let barrier1 = server_behavior.add_client_sync_before_event(0); + let barrier2 = server_behavior.add_client_sync_before_event(0); + let barrier3 = server_behavior.add_client_sync_before_event(0); + let barrier4 = server_behavior.add_client_sync_before_event(1); + let barrier5 = server_behavior.add_client_sync_before_event(1); + let barrier6 = server_behavior.add_client_sync_before_event(1); + let server_address = fixture.run_server(server_behavior).await; + + let url = make_url(server_address, None); + + let (expected_events, final_id) = fixture.all_events(); + + // Run the six clients. + let ( + received_events_1, + received_events_2, + received_events_3, + empty_events_1, + empty_events_2, + empty_events_3, + ) = join!( + subscribe(&url, barrier1, final_id, "client 1"), + subscribe(&url, barrier2, final_id, "client 2"), + subscribe(&url, barrier3, final_id, "client 3"), + subscribe(&url, barrier4, final_id, "client 4"), + subscribe(&url, barrier5, final_id, "client 5"), + subscribe(&url, barrier6, final_id, "client 6"), + ); + + // Check the first three received all expected events. + assert_eq!(received_events_1.unwrap(), expected_events); + assert_eq!(received_events_2.unwrap(), expected_events); + assert_eq!(received_events_3.unwrap(), expected_events); + + // Check the second three received no events. + assert!(empty_events_1.unwrap().is_empty()); + assert!(empty_events_2.unwrap().is_empty()); + assert!(empty_events_3.unwrap().is_empty()); + + // Check that now the first clients have all disconnected, three new clients can connect. Have + // them start from event 80 to allow them to actually pull some events off the stream (as the + // server has by now stopped creating any new events). + let start_id = EVENT_COUNT - 20; + + let url = make_url(server_address, Some(start_id)); + + let (expected_events, final_id) = fixture.events_filtered_by_id(start_id); + + let received_events = subscribe_no_sync(&url, final_id, "client 7").await; + + // Check the last three clients' received events are as expected. + assert_eq!(received_events.unwrap(), expected_events); + + fixture.stop_server().await; +} + +/// Rather than being a test proper, this is more a means to easily determine differences between +/// versions of the events emitted by the SSE server by comparing the contents of +/// `resources/test/sse_data_schema.json` across different versions of the codebase. +#[test] +fn json_schema_check() { + let schema_path = format!( + "{}/../resources/test/sse_data_schema.json", + env!("CARGO_MANIFEST_DIR") + ); + let pretty = serde_json::to_string_pretty(&schema_for!(SseData)).unwrap(); + assert_schema(schema_path, pretty); +} diff --git a/node/src/components/fetcher.rs b/node/src/components/fetcher.rs index dcf9d6b1d8..4f84f78273 100644 --- a/node/src/components/fetcher.rs +++ b/node/src/components/fetcher.rs @@ -1,319 +1,99 @@ mod config; +mod error; mod event; +mod fetch_item; +mod fetch_response; +mod fetched_data; +mod fetcher_impls; +mod item_fetcher; +mod item_handle; mod metrics; +mod tag; mod tests; use std::{collections::HashMap, fmt::Debug, time::Duration}; use datasize::DataSize; use prometheus::Registry; -use smallvec::smallvec; -use tracing::{debug, error, info}; - -use casper_execution_engine::{ - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::trie::Trie, -}; -use casper_types::Key; +use tracing::trace; use crate::{ - components::{fetcher::event::FetchResponder, Component}, + components::Component, effect::{ - requests::{ContractRuntimeRequest, LinearChainRequest, NetworkRequest, StorageRequest}, - EffectBuilder, EffectExt, Effects, + announcements::{ + FetchedNewBlockAnnouncement, FetchedNewFinalitySignatureAnnouncement, + PeerBehaviorAnnouncement, + }, + requests::{ + BlockAccumulatorRequest, ContractRuntimeRequest, FetcherRequest, NetworkRequest, + StorageRequest, + }, + EffectBuilder, EffectExt, Effects, Responder, }, protocol::Message, - types::{Block, BlockByHeight, BlockHash, Deploy, DeployHash, Item, NodeId}, + types::NodeId, utils::Source, NodeRng, }; -pub use config::Config; -pub use event::{Event, FetchResult}; -use metrics::FetcherMetrics; - -/// A helper trait constraining `Fetcher` compatible reactor events. -pub trait ReactorEventT: - From> - + From> - + From - + From - // Won't be needed when we implement "get block by height" feature in storage. - + From> - + Send - + 'static -where - T: Item + 'static, - ::Id: 'static, -{ -} - -impl ReactorEventT for REv -where - T: Item + 'static, - ::Id: 'static, - REv: From> - + From> - + From - + From - + From> - + Send - + 'static, -{ -} - -pub trait ItemFetcher { - fn responders(&mut self) -> &mut HashMap>>>; - - fn peer_timeout(&self) -> Duration; - - /// We've been asked to fetch the item by another component of this node. We'll try to get it - /// from our own storage component first, and if that fails, we'll send a request to `peer` for - /// the item. - fn fetch>( - &mut self, - effect_builder: EffectBuilder, - id: T::Id, - peer: NodeId, - responder: FetchResponder, - ) -> Effects> { - // Capture responder for later signalling. - let responders = self.responders(); - responders - .entry(id) - .or_default() - .entry(peer) - .or_default() - .push(responder); - - // Get the item from the storage component. - self.get_from_storage(effect_builder, id, peer) - } - - // Handles attempting to get the item from storage. - fn get_from_storage>( - &mut self, - effect_builder: EffectBuilder, - id: T::Id, - peer: NodeId, - ) -> Effects>; - - /// Handles the `Ok` case for a `Result` of attempting to get the item from the storage - /// component in order to send it to the requester. - fn got_from_storage(&mut self, item: T, peer: NodeId) -> Effects> { - self.signal( - item.id(), - Some(FetchResult::FromStorage(Box::new(item))), - peer, - ) - } - - /// Handles the `Err` case for a `Result` of attempting to get the item from the storage - /// component. - fn failed_to_get_from_storage>( - &mut self, - effect_builder: EffectBuilder, - id: T::Id, - peer: NodeId, - ) -> Effects> { - match Message::new_get_request::(&id) { - Ok(message) => { - let mut effects = effect_builder.send_message(peer, message).ignore(); - - effects.extend( - effect_builder - .set_timeout(self.peer_timeout()) - .event(move |_| Event::TimeoutPeer { id, peer }), - ); - - effects - } - Err(error) => { - error!("failed to construct get request: {}", error); - self.signal(id, None, peer) - } - } - } - - /// Handles signalling responders with the item or `None`. - fn signal( - &mut self, - id: T::Id, - result: Option>, - peer: NodeId, - ) -> Effects> { - let mut effects = Effects::new(); - let mut all_responders = self.responders().remove(&id).unwrap_or_default(); - match result { - Some(ret) => { - // signal all responders waiting for this item - for (_, responders) in all_responders { - for responder in responders { - effects.extend(responder.respond(Some(ret.clone())).ignore()); - } - } - } - None => { - // remove only the peer specific responders for this id - if let Some(responders) = all_responders.remove(&peer) { - for responder in responders { - effects.extend(responder.respond(None).ignore()); - } - } - if !all_responders.is_empty() { - self.responders().insert(id, all_responders); - } - } - } - effects - } -} - -/// The component which fetches an item from local storage or asks a peer if it's not in storage. +pub(crate) use config::Config; +pub(crate) use error::Error; +pub(crate) use event::Event; +pub(crate) use fetch_item::{EmptyValidationMetadata, FetchItem}; +pub(crate) use fetch_response::FetchResponse; +pub(crate) use fetched_data::FetchedData; +use item_fetcher::{ItemFetcher, StoringState}; +use item_handle::ItemHandle; +use metrics::Metrics; +pub(crate) use tag::Tag; + +pub(crate) type FetchResult = Result, Error>; +pub(crate) type FetchResponder = Responder>; + +/// The component which fetches an item from local component(s) or asks a peer if it's not +/// available locally. #[derive(DataSize, Debug)] -pub struct Fetcher +pub(crate) struct Fetcher where - T: Item + 'static, + T: FetchItem, { get_from_peer_timeout: Duration, - responders: HashMap>>>, + item_handles: HashMap>>, + #[data_size(skip)] + name: &'static str, #[data_size(skip)] - metrics: FetcherMetrics, + metrics: Metrics, } -impl Fetcher { +impl Fetcher { pub(crate) fn new( - name: &str, - config: Config, + name: &'static str, + config: &Config, registry: &Registry, ) -> Result { Ok(Fetcher { - get_from_peer_timeout: Duration::from_secs(config.get_from_peer_timeout()), - responders: HashMap::new(), - metrics: FetcherMetrics::new(name, registry)?, + get_from_peer_timeout: config.get_from_peer_timeout().into(), + item_handles: HashMap::new(), + name, + metrics: Metrics::new(name, registry)?, }) } } -impl ItemFetcher for Fetcher { - fn responders( - &mut self, - ) -> &mut HashMap>>> { - &mut self.responders - } - - fn peer_timeout(&self) -> Duration { - self.get_from_peer_timeout - } - - /// Gets a `Deploy` from the storage component. - fn get_from_storage>( - &mut self, - effect_builder: EffectBuilder, - id: DeployHash, - peer: NodeId, - ) -> Effects> { - effect_builder - .get_deploys_from_storage(smallvec![id]) - .event(move |mut results| Event::GetFromStorageResult { - id, - peer, - maybe_item: Box::new(results.pop().expect("can only contain one result")), - }) - } -} - -impl ItemFetcher for Fetcher { - fn responders( - &mut self, - ) -> &mut HashMap>>> { - &mut self.responders - } - - fn peer_timeout(&self) -> Duration { - self.get_from_peer_timeout - } - - fn get_from_storage>( - &mut self, - effect_builder: EffectBuilder, - id: BlockHash, - peer: NodeId, - ) -> Effects> { - effect_builder - .get_block_from_storage(id) - .event(move |result| Event::GetFromStorageResult { - id, - peer, - maybe_item: Box::new(result), - }) - } -} - -impl ItemFetcher for Fetcher { - fn responders( - &mut self, - ) -> &mut HashMap>>> { - &mut self.responders - } - - fn peer_timeout(&self) -> Duration { - self.get_from_peer_timeout - } - - fn get_from_storage>( - &mut self, - effect_builder: EffectBuilder, - id: u64, - peer: NodeId, - ) -> Effects> { - effect_builder - .get_block_at_height_from_storage(id) - .event(move |result| Event::GetFromStorageResult { - id, - peer, - maybe_item: Box::new(result.map(Into::into)), - }) - } -} - -type GlobalStorageTrie = Trie; - -impl ItemFetcher for Fetcher { - fn responders( - &mut self, - ) -> &mut HashMap>>> { - &mut self.responders - } - - fn peer_timeout(&self) -> Duration { - self.get_from_peer_timeout - } - - fn get_from_storage>( - &mut self, - effect_builder: EffectBuilder, - id: Blake2bHash, - peer: NodeId, - ) -> Effects> { - effect_builder - .read_trie(id) - .event(move |maybe_trie| Event::GetFromStorageResult { - id, - peer, - maybe_item: Box::new(maybe_trie), - }) - } -} - impl Component for Fetcher where Fetcher: ItemFetcher, - T: Item + 'static, - REv: ReactorEventT, + T: FetchItem + 'static, + REv: From + + From + + From + + From> + + From + + From + + From + + Send, { type Event = Event; - type ConstructionError = prometheus::Error; fn handle_event( &mut self, @@ -321,47 +101,82 @@ where _rng: &mut NodeRng, event: Self::Event, ) -> Effects { - debug!(?event, "handling event"); + trace!(?event, "Fetcher: handling event"); match event { - Event::Fetch { + Event::Fetch(FetcherRequest { id, peer, + validation_metadata, responder, - } => self.fetch(effect_builder, id, peer, responder), - Event::GetFromStorageResult { + }) => self.fetch(effect_builder, id, peer, validation_metadata, responder), + Event::GetLocallyResult { id, peer, + validation_metadata, maybe_item, - } => match *maybe_item { + responder, + } => match maybe_item { Some(item) => { - self.metrics.found_in_storage.inc(); - self.got_from_storage(item, peer) + self.metrics().found_in_storage.inc(); + responder + .respond(Ok(FetchedData::from_storage(item))) + .ignore() } - None => self.failed_to_get_from_storage(effect_builder, id, peer), + None => self.failed_to_get_locally( + effect_builder, + id, + peer, + validation_metadata, + responder, + ), }, - Event::GotRemotely { item, source } => { - match source { - Source::Peer(peer) => { - self.metrics.found_on_peer.inc(); - self.signal(item.id(), Some(FetchResult::FromPeer(item, peer)), peer) - } - Source::Client | Source::Ourself => { - // TODO - we could possibly also handle this case - Effects::new() - } + Event::GotRemotely { item, source } => match source { + Source::PeerGossiped(peer) | Source::Peer(peer) => { + self.got_from_peer(effect_builder, peer, item) } - } - // We do nothing in the case of having an incoming deploy rejected. - Event::RejectedRemotely { .. } => Effects::new(), + Source::Client | Source::SpeculativeExec | Source::Ourself => Effects::new(), + }, + Event::GotInvalidRemotely { .. } => Effects::new(), Event::AbsentRemotely { id, peer } => { - info!(%id, %peer, "element absent on the remote node"); - self.signal(id, None, peer) + trace!(TAG=%T::TAG, %id, %peer, "item absent on the remote node"); + self.signal( + id.clone(), + Err(Error::Absent { + id: Box::new(id), + peer, + }), + peer, + ) + } + Event::RejectedRemotely { id, peer } => { + trace!(TAG=%T::TAG, %id, %peer, "peer rejected fetch request"); + self.signal( + id.clone(), + Err(Error::Rejected { + id: Box::new(id), + peer, + }), + peer, + ) } - Event::TimeoutPeer { id, peer } => { - info!(%id, %peer, "request timed out"); - self.metrics.timeouts.inc(); - self.signal(id, None, peer) + Event::TimeoutPeer { id, peer } => self.signal( + id.clone(), + Err(Error::TimedOut { + id: Box::new(id), + peer, + }), + peer, + ), + Event::PutToStorage { item, peer } => { + let mut effects = + Self::announce_fetched_new_item(effect_builder, (*item).clone(), peer).ignore(); + effects.extend(self.signal(item.fetch_id(), Ok(*item), peer)); + effects } } } + + fn name(&self) -> &str { + self.name + } } diff --git a/node/src/components/fetcher/config.rs b/node/src/components/fetcher/config.rs index b690171202..c256c23bca 100644 --- a/node/src/components/fetcher/config.rs +++ b/node/src/components/fetcher/config.rs @@ -1,16 +1,21 @@ +use std::str::FromStr; + use datasize::DataSize; use serde::{Deserialize, Serialize}; -const DEFAULT_GET_FROM_PEER_TIMEOUT_SECS: u64 = 3; +use casper_types::TimeDiff; + +const DEFAULT_GET_FROM_PEER_TIMEOUT: &str = "3sec"; /// Configuration options for fetching. #[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] pub struct Config { - get_from_peer_timeout: u64, + get_from_peer_timeout: TimeDiff, } impl Config { - pub(crate) fn get_from_peer_timeout(&self) -> u64 { + /// Returns `get_from_peer` timeout. + pub fn get_from_peer_timeout(&self) -> TimeDiff { self.get_from_peer_timeout } } @@ -18,7 +23,7 @@ impl Config { impl Default for Config { fn default() -> Self { Config { - get_from_peer_timeout: DEFAULT_GET_FROM_PEER_TIMEOUT_SECS, + get_from_peer_timeout: TimeDiff::from_str(DEFAULT_GET_FROM_PEER_TIMEOUT).unwrap(), } } } diff --git a/node/src/components/fetcher/error.rs b/node/src/components/fetcher/error.rs new file mode 100644 index 0000000000..e548d03c89 --- /dev/null +++ b/node/src/components/fetcher/error.rs @@ -0,0 +1,84 @@ +use datasize::DataSize; +use serde::Serialize; +use thiserror::Error; +use tracing::error; + +use crate::{components::fetcher::FetchItem, types::NodeId}; + +#[derive(Clone, Debug, Error, PartialEq, Eq, Serialize)] +pub(crate) enum Error { + #[error("item with id {id:?} absent on peer {peer:?}")] + Absent { id: Box, peer: NodeId }, + + #[error("peer {peer:?} rejected fetch request for item with id {id:?}")] + Rejected { id: Box, peer: NodeId }, + + #[error("timed out getting item with id {id:?} from peer {peer:?}")] + TimedOut { id: Box, peer: NodeId }, + + #[error("could not construct get request for item with id {id:?} for peer {peer:?}")] + CouldNotConstructGetRequest { id: Box, peer: NodeId }, + + #[error( + "ongoing fetch for {id} from {peer} has different validation metadata ({current:?}) to \ + that given in new fetch attempt ({new:?})" + )] + ValidationMetadataMismatch { + id: Box, + peer: NodeId, + current: Box, + new: Box, + }, +} + +impl Error { + pub(crate) fn is_peer_fault(&self) -> bool { + match self { + // The peer claimed to have the item, so it should not be absent. + Error::Absent { .. } | Error::Rejected { .. } | Error::TimedOut { .. } => true, + Error::CouldNotConstructGetRequest { .. } + | Error::ValidationMetadataMismatch { .. } => false, + } + } + + pub(crate) fn id(&self) -> &T::Id { + match self { + Error::Absent { id, .. } => id, + Error::Rejected { id, .. } => id, + Error::TimedOut { id, .. } => id, + Error::CouldNotConstructGetRequest { id, .. } => id, + Error::ValidationMetadataMismatch { id, .. } => id, + } + } + + pub(crate) fn peer(&self) -> &NodeId { + match self { + Error::Absent { peer, .. } + | Error::Rejected { peer, .. } + | Error::TimedOut { peer, .. } + | Error::CouldNotConstructGetRequest { peer, .. } + | Error::ValidationMetadataMismatch { peer, .. } => peer, + } + } +} + +impl DataSize for Error +where + T::Id: DataSize, +{ + const IS_DYNAMIC: bool = ::IS_DYNAMIC; + + const STATIC_HEAP_SIZE: usize = ::STATIC_HEAP_SIZE; + + fn estimate_heap_size(&self) -> usize { + match self { + Error::Absent { id, .. } + | Error::Rejected { id, .. } + | Error::TimedOut { id, .. } + | Error::CouldNotConstructGetRequest { id, .. } => id.estimate_heap_size(), + Error::ValidationMetadataMismatch { + id, current, new, .. + } => id.estimate_heap_size() + current.estimate_heap_size() + new.estimate_heap_size(), + } + } +} diff --git a/node/src/components/fetcher/event.rs b/node/src/components/fetcher/event.rs index 0cc0fbbd91..5401c79a22 100644 --- a/node/src/components/fetcher/event.rs +++ b/node/src/components/fetcher/event.rs @@ -1,99 +1,101 @@ -use std::fmt::{self, Display, Formatter}; +use std::fmt::{self, Debug, Display, Formatter}; -use datasize::DataSize; use serde::Serialize; +use tracing::error; -use super::Item; +use casper_types::Transaction; + +use super::{FetchItem, FetchResponder, FetchResponse}; use crate::{ - effect::{announcements::DeployAcceptorAnnouncement, requests::FetcherRequest, Responder}, - types::{Deploy, NodeId}, + effect::{announcements::TransactionAcceptorAnnouncement, requests::FetcherRequest}, + types::NodeId, utils::Source, }; -#[derive(Clone, DataSize, Debug, PartialEq)] -pub enum FetchResult { - FromStorage(Box), - FromPeer(Box, I), -} - -pub(crate) type FetchResponder = Responder>>; - /// `Fetcher` events. #[derive(Debug, Serialize)] -pub enum Event { +pub(crate) enum Event { /// The initiating event to fetch an item by its id. - Fetch { - id: T::Id, - peer: NodeId, - responder: FetchResponder, - }, + Fetch(FetcherRequest), /// The result of the `Fetcher` getting a item from the storage component. If the /// result is `None`, the item should be requested from the peer. - GetFromStorageResult { + GetLocallyResult { id: T::Id, peer: NodeId, - maybe_item: Box>, + validation_metadata: Box, + maybe_item: Option>, + responder: FetchResponder, }, /// An announcement from a different component that we have accepted and stored the given item. - GotRemotely { - item: Box, - source: Source, - }, + GotRemotely { item: Box, source: Source }, + /// The result of putting the item to storage. + PutToStorage { item: Box, peer: NodeId }, /// A different component rejected an item. - // TODO: If having this event is not desirable, the `DeployAcceptorAnnouncement` needs to be - // split in two instead. - RejectedRemotely { - item: Box, - source: Source, - }, + GotInvalidRemotely { id: T::Id, source: Source }, /// An item was not available on the remote peer. AbsentRemotely { id: T::Id, peer: NodeId }, + /// An item was available on the remote peer, but it chose to not provide it. + RejectedRemotely { id: T::Id, peer: NodeId }, /// The timeout has elapsed and we should clean up state. TimeoutPeer { id: T::Id, peer: NodeId }, } -impl From> for Event { - fn from(request: FetcherRequest) -> Self { - match request { - FetcherRequest::Fetch { - id, - peer, - responder, - } => Event::Fetch { - id, - peer, - responder, - }, +impl Event { + pub(crate) fn from_get_response_serialized_item( + peer: NodeId, + serialized_item: &[u8], + ) -> Option { + match bincode::deserialize::>(serialized_item) { + Ok(FetchResponse::Fetched(item)) => Some(Event::GotRemotely { + item: Box::new(item), + source: Source::Peer(peer), + }), + Ok(FetchResponse::NotFound(id)) => Some(Event::AbsentRemotely { id, peer }), + Ok(FetchResponse::NotProvided(id)) => Some(Event::RejectedRemotely { id, peer }), + Err(error) => { + error!("failed to decode {:?} from {}: {:?}", T::TAG, peer, error); + None + } } } } -// A deploy fetcher knows how to update its state if deploys are coming in via the deploy acceptor. -impl From> for Event { - #[inline] - fn from(announcement: DeployAcceptorAnnouncement) -> Self { +impl From> for Event { + fn from(fetcher_request: FetcherRequest) -> Self { + Event::Fetch(fetcher_request) + } +} + +// A transaction fetcher knows how to update its state if transactions are coming in via the +// transaction acceptor. +impl From for Event { + fn from(announcement: TransactionAcceptorAnnouncement) -> Self { match announcement { - DeployAcceptorAnnouncement::AcceptedNewDeploy { deploy, source } => { - Event::GotRemotely { - item: deploy, - source, - } - } - DeployAcceptorAnnouncement::InvalidDeploy { deploy, source } => { - Event::RejectedRemotely { - item: deploy, - source, - } - } + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, + } => Event::GotRemotely { + item: Box::new((*transaction).clone()), + source, + }, + TransactionAcceptorAnnouncement::InvalidTransaction { + transaction, + source, + } => Event::GotInvalidRemotely { + id: transaction.fetch_id(), + source, + }, } } } -impl Display for Event { +impl Display for Event { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - Event::Fetch { id, .. } => write!(formatter, "request to fetch item at hash {}", id), - Event::GetFromStorageResult { id, maybe_item, .. } => { + Event::Fetch(FetcherRequest { id, .. }) => { + write!(formatter, "request to fetch item at hash {}", id) + } + Event::GetLocallyResult { id, maybe_item, .. } => { if maybe_item.is_some() { write!(formatter, "got {} from storage", id) } else { @@ -101,21 +103,28 @@ impl Display for Event { } } Event::GotRemotely { item, source } => { - write!(formatter, "got {} from {}", item.id(), source) + write!(formatter, "got {} from {}", item.fetch_id(), source) + } + Event::GotInvalidRemotely { id, source } => { + write!(formatter, "invalid item {} from {}", id, source) } - Event::RejectedRemotely { item, source } => write!( - formatter, - "other component rejected {} from {}", - item.id(), - source - ), Event::TimeoutPeer { id, peer } => write!( formatter, "check get from peer timeout for {} with {}", id, peer ), Event::AbsentRemotely { id, peer } => { - write!(formatter, "Item {} was not available on {}", id, peer) + write!(formatter, "item {} was not available on {}", id, peer) + } + Event::RejectedRemotely { id, peer } => { + write!( + formatter, + "request to fetch item {} was rejected by {}", + id, peer + ) + } + Event::PutToStorage { item, .. } => { + write!(formatter, "item {} was put to storage", item.fetch_id()) } } } diff --git a/node/src/components/fetcher/fetch_item.rs b/node/src/components/fetcher/fetch_item.rs new file mode 100644 index 0000000000..34f45acf38 --- /dev/null +++ b/node/src/components/fetcher/fetch_item.rs @@ -0,0 +1,40 @@ +use std::{ + error::Error as StdError, + fmt::{self, Debug, Display, Formatter}, + hash::Hash, +}; + +use datasize::DataSize; +use serde::{de::DeserializeOwned, Serialize}; + +use super::Tag; + +#[derive(Clone, Copy, Eq, PartialEq, Serialize, Debug, DataSize)] +pub(crate) struct EmptyValidationMetadata; + +impl Display for EmptyValidationMetadata { + fn fmt(&self, formatter: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(formatter, "no validation metadata") + } +} + +/// A trait which allows an implementing type to be used by a fetcher component. +pub(crate) trait FetchItem: + Clone + Serialize + DeserializeOwned + Send + Sync + Debug + Display + Eq +{ + /// The type of ID of the item. + type Id: Clone + Eq + Hash + Serialize + DeserializeOwned + Send + Sync + Debug + Display; + /// The error type returned when validating to get the ID of the item. + type ValidationError: StdError + Debug + Display; + /// The type of the metadata provided when validating the item. + type ValidationMetadata: Eq + Clone + Serialize + Debug + DataSize + Send; + + /// The tag representing the type of the item. + const TAG: Tag; + + /// The ID of the specific item. + fn fetch_id(&self) -> Self::Id; + + /// Checks validity of the item, and returns an error if invalid. + fn validate(&self, metadata: &Self::ValidationMetadata) -> Result<(), Self::ValidationError>; +} diff --git a/node/src/components/fetcher/fetch_response.rs b/node/src/components/fetcher/fetch_response.rs new file mode 100644 index 0000000000..23f37b6872 --- /dev/null +++ b/node/src/components/fetcher/fetch_response.rs @@ -0,0 +1,66 @@ +use serde::{Deserialize, Serialize}; + +/// Message to be returned by a peer. Indicates if the item could be fetched or not. +#[derive(Debug, Serialize, Deserialize, strum::EnumDiscriminants)] +#[strum_discriminants(derive(strum::EnumIter))] +pub enum FetchResponse { + /// The requested item. + Fetched(T), + /// The sender does not have the requested item available. + NotFound(Id), + /// The sender chose to not provide the requested item. + NotProvided(Id), +} + +impl FetchResponse { + /// Constructs a fetched or not found from an option and an id. + pub(crate) fn from_opt(id: Id, item: Option) -> Self { + match item { + Some(item) => FetchResponse::Fetched(item), + None => FetchResponse::NotFound(id), + } + } + + /// Returns whether this response is a positive (fetched / "found") one. + pub(crate) fn was_found(&self) -> bool { + matches!(self, FetchResponse::Fetched(_)) + } +} + +impl FetchResponse +where + Self: Serialize, +{ + /// The canonical serialization for the inner encoding of the `FetchResponse` response (see + /// [`Message::GetResponse`]). + pub(crate) fn to_serialized(&self) -> Result, bincode::Error> { + bincode::serialize(self) + } +} + +mod specimen_support { + use crate::utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator}; + use serde::Serialize; + + use super::{FetchResponse, FetchResponseDiscriminants}; + + impl LargestSpecimen + for FetchResponse + { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::(estimator, |variant| { + match variant { + FetchResponseDiscriminants::Fetched => { + FetchResponse::Fetched(LargestSpecimen::largest_specimen(estimator, cache)) + } + FetchResponseDiscriminants::NotFound => { + FetchResponse::NotFound(LargestSpecimen::largest_specimen(estimator, cache)) + } + FetchResponseDiscriminants::NotProvided => FetchResponse::NotProvided( + LargestSpecimen::largest_specimen(estimator, cache), + ), + } + }) + } + } +} diff --git a/node/src/components/fetcher/fetched_data.rs b/node/src/components/fetcher/fetched_data.rs new file mode 100644 index 0000000000..3e27a9d8b8 --- /dev/null +++ b/node/src/components/fetcher/fetched_data.rs @@ -0,0 +1,63 @@ +use std::fmt::{self, Display, Formatter}; + +use datasize::DataSize; +use serde::Serialize; + +use crate::{components::fetcher::FetchItem, types::NodeId}; + +#[derive(Clone, DataSize, Debug, PartialEq, Serialize)] +pub(crate) enum FetchedData { + FromStorage { item: Box }, + FromPeer { item: Box, peer: NodeId }, +} + +impl FetchedData { + pub(crate) fn from_storage(item: Box) -> Self { + FetchedData::FromStorage { item } + } + + pub(crate) fn from_peer(item: T, peer: NodeId) -> Self { + FetchedData::FromPeer { + item: Box::new(item), + peer, + } + } + + pub(crate) fn convert(self) -> FetchedData + where + T: Into, + { + match self { + FetchedData::FromStorage { item } => FetchedData::FromStorage { + item: Box::new((*item).into()), + }, + FetchedData::FromPeer { item, peer } => FetchedData::FromPeer { + item: Box::new((*item).into()), + peer, + }, + } + } +} + +impl FetchedData { + pub(crate) fn id(&self) -> T::Id { + match self { + FetchedData::FromStorage { item } | FetchedData::FromPeer { peer: _, item } => { + item.fetch_id() + } + } + } +} + +impl Display for FetchedData { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + FetchedData::FromStorage { item } => { + write!(f, "fetched {} from storage", item.fetch_id()) + } + FetchedData::FromPeer { item, peer } => { + write!(f, "fetched {} from {}", item.fetch_id(), peer) + } + } + } +} diff --git a/node/src/components/fetcher/fetcher_impls.rs b/node/src/components/fetcher/fetcher_impls.rs new file mode 100644 index 0000000000..668c9c74ed --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls.rs @@ -0,0 +1,9 @@ +mod approvals_hashes_fetcher; +mod block_execution_results_or_chunk_fetcher; +mod block_fetcher; +mod block_header_fetcher; +mod finality_signature_fetcher; +mod legacy_deploy_fetcher; +mod sync_leap_fetcher; +mod transaction_fetcher; +mod trie_or_chunk_fetcher; diff --git a/node/src/components/fetcher/fetcher_impls/approvals_hashes_fetcher.rs b/node/src/components/fetcher/fetcher_impls/approvals_hashes_fetcher.rs new file mode 100644 index 0000000000..f08fc0773e --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/approvals_hashes_fetcher.rs @@ -0,0 +1,76 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use casper_storage::block_store::types::{ApprovalsHashes, ApprovalsHashesValidationError}; +use futures::FutureExt; + +use casper_types::{Block, BlockHash}; + +use crate::{ + components::fetcher::{ + metrics::Metrics, FetchItem, Fetcher, ItemFetcher, ItemHandle, StoringState, Tag, + }, + effect::{requests::StorageRequest, EffectBuilder}, + types::NodeId, +}; + +impl FetchItem for ApprovalsHashes { + type Id = BlockHash; + type ValidationError = ApprovalsHashesValidationError; + type ValidationMetadata = Block; + + const TAG: Tag = Tag::ApprovalsHashes; + + fn fetch_id(&self) -> Self::Id { + *self.block_hash() + } + + fn validate(&self, block: &Block) -> Result<(), Self::ValidationError> { + self.verify(block) + } +} + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = false; + + fn item_handles( + &mut self, + ) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + Send>( + effect_builder: EffectBuilder, + id: BlockHash, + ) -> Option { + effect_builder.get_approvals_hashes_from_storage(id).await + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: ApprovalsHashes, + ) -> StoringState<'a, ApprovalsHashes> { + StoringState::Enqueued( + effect_builder + .put_approvals_hashes_to_storage(Box::new(item)) + .map(|_| ()) + .boxed(), + ) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: ApprovalsHashes, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/fetcher_impls/block_execution_results_or_chunk_fetcher.rs b/node/src/components/fetcher/fetcher_impls/block_execution_results_or_chunk_fetcher.rs new file mode 100644 index 0000000000..cc58f1e602 --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/block_execution_results_or_chunk_fetcher.rs @@ -0,0 +1,55 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; + +use crate::{ + components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState}, + effect::{requests::StorageRequest, EffectBuilder}, + types::{BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, NodeId}, +}; + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = true; + + fn item_handles( + &mut self, + ) -> &mut HashMap< + BlockExecutionResultsOrChunkId, + HashMap>, + > { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + Send>( + effect_builder: EffectBuilder, + id: BlockExecutionResultsOrChunkId, + ) -> Option { + effect_builder + .get_block_execution_results_or_chunk_from_storage(id) + .await + } + + fn put_to_storage<'a, REv>( + _effect_builder: EffectBuilder, + item: BlockExecutionResultsOrChunk, + ) -> StoringState<'a, BlockExecutionResultsOrChunk> { + // Stored by the BlockSynchronizer once all chunks are fetched. + StoringState::WontStore(item) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: BlockExecutionResultsOrChunk, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/fetcher_impls/block_fetcher.rs b/node/src/components/fetcher/fetcher_impls/block_fetcher.rs new file mode 100644 index 0000000000..a17e0cad10 --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/block_fetcher.rs @@ -0,0 +1,81 @@ +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use async_trait::async_trait; +use futures::FutureExt; + +use casper_types::{Block, BlockHash, BlockValidationError}; + +use crate::{ + components::fetcher::{ + metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle, + StoringState, Tag, + }, + effect::{ + announcements::FetchedNewBlockAnnouncement, + requests::{BlockAccumulatorRequest, StorageRequest}, + EffectBuilder, + }, + types::NodeId, +}; + +impl FetchItem for Block { + type Id = BlockHash; + type ValidationError = BlockValidationError; + type ValidationMetadata = EmptyValidationMetadata; + + const TAG: Tag = Tag::Block; + + fn fetch_id(&self) -> Self::Id { + *self.hash() + } + + fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + self.verify() + } +} + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = false; + + fn item_handles(&mut self) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + From + Send>( + effect_builder: EffectBuilder, + id: BlockHash, + ) -> Option { + effect_builder.get_block_from_storage(id).await + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: Block, + ) -> StoringState<'a, Block> { + StoringState::Enqueued( + effect_builder + .put_block_to_storage(Arc::new(item)) + .map(|_| ()) + .boxed(), + ) + } + + async fn announce_fetched_new_item + Send>( + effect_builder: EffectBuilder, + item: Block, + peer: NodeId, + ) { + effect_builder + .announce_fetched_new_block(Arc::new(item), peer) + .await + } +} diff --git a/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs b/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs new file mode 100644 index 0000000000..819487b4e0 --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/block_header_fetcher.rs @@ -0,0 +1,82 @@ +use std::{collections::HashMap, convert::Infallible, time::Duration}; + +use async_trait::async_trait; +use futures::FutureExt; + +use casper_types::{BlockHash, BlockHeader}; + +use crate::{ + components::fetcher::{ + metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle, + StoringState, Tag, + }, + effect::{requests::StorageRequest, EffectBuilder}, + types::NodeId, +}; + +impl FetchItem for BlockHeader { + type Id = BlockHash; + type ValidationError = Infallible; + type ValidationMetadata = EmptyValidationMetadata; + + const TAG: Tag = Tag::BlockHeader; + + fn fetch_id(&self) -> Self::Id { + self.block_hash() + } + + fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + // No need for further validation. The received header has necessarily had its hash + // computed to be the same value we used for the fetch ID if we got here. + Ok(()) + } +} + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = true; + + fn item_handles( + &mut self, + ) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + Send>( + effect_builder: EffectBuilder, + id: BlockHash, + ) -> Option { + // Requests from fetcher are not restricted by the block availability index. + let only_from_available_block_range = false; + effect_builder + .get_block_header_from_storage(id, only_from_available_block_range) + .await + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: BlockHeader, + ) -> StoringState<'a, BlockHeader> { + StoringState::Enqueued( + effect_builder + .put_block_header_to_storage(Box::new(item)) + .map(|_| ()) + .boxed(), + ) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: BlockHeader, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/fetcher_impls/finality_signature_fetcher.rs b/node/src/components/fetcher/fetcher_impls/finality_signature_fetcher.rs new file mode 100644 index 0000000000..591612d6da --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/finality_signature_fetcher.rs @@ -0,0 +1,92 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use futures::FutureExt; + +use casper_types::{crypto, FinalitySignature, FinalitySignatureId}; + +use crate::{ + components::fetcher::{ + metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle, + StoringState, Tag, + }, + effect::{ + announcements::FetchedNewFinalitySignatureAnnouncement, + requests::{BlockAccumulatorRequest, StorageRequest}, + EffectBuilder, + }, + types::NodeId, +}; + +impl FetchItem for FinalitySignature { + type Id = Box; + type ValidationError = crypto::Error; + type ValidationMetadata = EmptyValidationMetadata; + + const TAG: Tag = Tag::FinalitySignature; + + fn fetch_id(&self) -> Self::Id { + Box::new(FinalitySignatureId::new( + *self.block_hash(), + self.era_id(), + self.public_key().clone(), + )) + } + + fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + self.is_verified() + } +} + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = true; + + fn item_handles( + &mut self, + ) -> &mut HashMap, HashMap>> + { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + From + Send>( + effect_builder: EffectBuilder, + id: Box, + ) -> Option { + effect_builder + .get_signature_from_storage(*id.block_hash(), id.public_key().clone()) + .await + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: FinalitySignature, + ) -> StoringState<'a, FinalitySignature> { + StoringState::Enqueued( + effect_builder + .put_finality_signature_to_storage(item) + .map(|_| ()) + .boxed(), + ) + } + + async fn announce_fetched_new_item( + effect_builder: EffectBuilder, + item: FinalitySignature, + peer: NodeId, + ) where + REv: From + Send, + { + effect_builder + .announce_fetched_new_finality_signature(Box::new(item.clone()), peer) + .await + } +} diff --git a/node/src/components/fetcher/fetcher_impls/legacy_deploy_fetcher.rs b/node/src/components/fetcher/fetcher_impls/legacy_deploy_fetcher.rs new file mode 100644 index 0000000000..bfe8b7d35f --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/legacy_deploy_fetcher.rs @@ -0,0 +1,57 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use futures::FutureExt; + +use casper_types::{Deploy, DeployHash, Transaction}; + +use crate::{ + components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState}, + effect::{requests::StorageRequest, EffectBuilder}, + types::{LegacyDeploy, NodeId}, +}; + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = true; + + fn item_handles( + &mut self, + ) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + Send>( + effect_builder: EffectBuilder, + id: DeployHash, + ) -> Option { + effect_builder.get_stored_legacy_deploy(id).await + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: LegacyDeploy, + ) -> StoringState<'a, LegacyDeploy> { + StoringState::Enqueued( + effect_builder + .put_transaction_to_storage(Transaction::from(Deploy::from(item))) + .map(|_| ()) + .boxed(), + ) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: LegacyDeploy, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/fetcher_impls/sync_leap_fetcher.rs b/node/src/components/fetcher/fetcher_impls/sync_leap_fetcher.rs new file mode 100644 index 0000000000..8211edae58 --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/sync_leap_fetcher.rs @@ -0,0 +1,68 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use futures::FutureExt; + +use crate::{ + components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState}, + effect::{requests::StorageRequest, EffectBuilder}, + types::{NodeId, SyncLeap, SyncLeapIdentifier}, +}; + +#[async_trait] +impl ItemFetcher for Fetcher { + // We want the fetcher to ask all the peers we give to it separately, and return their + // responses separately, not just respond with the first SyncLeap it successfully gets from a + // single peer. + const SAFE_TO_RESPOND_TO_ALL: bool = false; + + fn item_handles( + &mut self, + ) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally( + _effect_builder: EffectBuilder, + _id: SyncLeapIdentifier, + ) -> Option { + // We never get a SyncLeap we requested from our own storage. + None + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: SyncLeap, + ) -> StoringState<'a, SyncLeap> { + StoringState::Enqueued( + async move { + for header in item.headers() { + effect_builder + .put_block_header_to_storage(Box::new(header.clone())) + .await; + } + for block_header in item.block_headers_with_signatures { + effect_builder + .put_signatures_to_storage(block_header.block_signatures().clone()) + .await; + } + } + .boxed(), + ) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: SyncLeap, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/fetcher_impls/transaction_fetcher.rs b/node/src/components/fetcher/fetcher_impls/transaction_fetcher.rs new file mode 100644 index 0000000000..1e79c7a9c3 --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/transaction_fetcher.rs @@ -0,0 +1,86 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use futures::FutureExt; + +use casper_types::{InvalidTransaction, Transaction, TransactionId}; + +use crate::{ + components::fetcher::{ + metrics::Metrics, EmptyValidationMetadata, FetchItem, Fetcher, ItemFetcher, ItemHandle, + StoringState, Tag, + }, + effect::{requests::StorageRequest, EffectBuilder}, + types::NodeId, +}; + +impl FetchItem for Transaction { + type Id = TransactionId; + type ValidationError = InvalidTransaction; + type ValidationMetadata = EmptyValidationMetadata; + + const TAG: Tag = Tag::Transaction; + + fn fetch_id(&self) -> Self::Id { + self.compute_id() + } + + fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + self.verify() + } +} + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = true; + + fn item_handles( + &mut self, + ) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + Send>( + effect_builder: EffectBuilder, + id: TransactionId, + ) -> Option { + effect_builder.get_stored_transaction(id).await + } + + fn put_to_storage<'a, REv: From + Send>( + effect_builder: EffectBuilder, + item: Transaction, + ) -> StoringState<'a, Transaction> { + StoringState::Enqueued( + async move { + let is_new = effect_builder + .put_transaction_to_storage(item.clone()) + .await; + // If `is_new` is `false`, the transaction was previously stored, and the incoming + // transaction could have a different set of approvals to the one already stored. + // We can treat the incoming approvals as finalized and now try and store them. + if !is_new { + effect_builder + .store_finalized_approvals(item.hash(), item.approvals()) + .await; + } + } + .boxed(), + ) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: Transaction, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/fetcher_impls/trie_or_chunk_fetcher.rs b/node/src/components/fetcher/fetcher_impls/trie_or_chunk_fetcher.rs new file mode 100644 index 0000000000..0439db71e0 --- /dev/null +++ b/node/src/components/fetcher/fetcher_impls/trie_or_chunk_fetcher.rs @@ -0,0 +1,79 @@ +use std::{collections::HashMap, time::Duration}; + +use async_trait::async_trait; +use tracing::error; + +use casper_storage::data_access_layer::{TrieElement, TrieRequest, TrieResult}; + +use crate::{ + components::fetcher::{metrics::Metrics, Fetcher, ItemFetcher, ItemHandle, StoringState}, + effect::{requests::ContractRuntimeRequest, EffectBuilder}, + types::{NodeId, TrieOrChunk, TrieOrChunkId}, +}; + +#[async_trait] +impl ItemFetcher for Fetcher { + const SAFE_TO_RESPOND_TO_ALL: bool = true; + + fn item_handles( + &mut self, + ) -> &mut HashMap>> { + &mut self.item_handles + } + + fn metrics(&mut self) -> &Metrics { + &self.metrics + } + + fn peer_timeout(&self) -> Duration { + self.get_from_peer_timeout + } + + async fn get_locally + Send>( + effect_builder: EffectBuilder, + id: TrieOrChunkId, + ) -> Option { + let TrieOrChunkId(chunk_index, trie_key) = id; + let request = TrieRequest::new(trie_key, Some(chunk_index)); + let result = effect_builder.get_trie(request).await; + match result { + TrieResult::ValueNotFound(_) => None, + TrieResult::Failure(err) => { + error!(%err, "failed to get trie element locally"); + None + } + TrieResult::Success { element } => match element { + TrieElement::Raw(raw) => match TrieOrChunk::new(raw.into(), 0) { + Ok(voc) => Some(voc), + Err(err) => { + error!(%err, "raw chunking error"); + None + } + }, + TrieElement::Chunked(raw, chunk_id) => match TrieOrChunk::new(raw.into(), chunk_id) + { + Ok(voc) => Some(voc), + Err(err) => { + error!(%err, "chunking error"); + None + } + }, + }, + } + } + + fn put_to_storage<'a, REv>( + _effect_builder: EffectBuilder, + item: TrieOrChunk, + ) -> StoringState<'a, TrieOrChunk> { + // Stored by the GlobalStateSynchronizer once all chunks are fetched. + StoringState::WontStore(item) + } + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + _item: TrieOrChunk, + _peer: NodeId, + ) { + } +} diff --git a/node/src/components/fetcher/item_fetcher.rs b/node/src/components/fetcher/item_fetcher.rs new file mode 100644 index 0000000000..56f6119bbc --- /dev/null +++ b/node/src/components/fetcher/item_fetcher.rs @@ -0,0 +1,296 @@ +use std::{ + collections::{hash_map::Entry, HashMap}, + time::Duration, +}; + +use async_trait::async_trait; +use futures::future::BoxFuture; +use tracing::{debug, error, trace}; + +use super::{Error, Event, FetchResponder, FetchedData, ItemHandle, Metrics}; +use crate::{ + components::{fetcher::FetchItem, network::blocklist::BlocklistJustification}, + effect::{ + announcements::{ + FetchedNewBlockAnnouncement, FetchedNewFinalitySignatureAnnouncement, + PeerBehaviorAnnouncement, + }, + requests::{ + BlockAccumulatorRequest, ContractRuntimeRequest, NetworkRequest, StorageRequest, + }, + EffectBuilder, EffectExt, Effects, + }, + protocol::Message, + types::NodeId, +}; + +pub(super) enum StoringState<'a, T> { + Enqueued(BoxFuture<'a, ()>), + WontStore(T), +} + +#[async_trait] +pub(super) trait ItemFetcher { + /// Indicator on whether it is safe to respond to all of our responders. For example, [Deploy]s + /// and [BlockHeader]s are safe because their [Item::id] is all that is needed for + /// authentication. But other structures have _finality signatures_ or have substructures that + /// require validation. These are not infallible, and only the responders corresponding to the + /// node queried may be responded to. + const SAFE_TO_RESPOND_TO_ALL: bool; + + fn item_handles(&mut self) -> &mut HashMap>>; + + fn metrics(&mut self) -> &Metrics; + + fn peer_timeout(&self) -> Duration; + + /// We've been asked to fetch the item by another component of this node. We'll try to get it + /// locally first (generally from our own storage component), and if that fails, we'll send a + /// request to `peer` for the item. + fn fetch( + &self, + effect_builder: EffectBuilder, + id: T::Id, + peer: NodeId, + validation_metadata: Box, + responder: FetchResponder, + ) -> Effects> + where + REv: From + + From + + From + + Send, + { + Self::get_locally(effect_builder, id.clone()).event(move |result| Event::GetLocallyResult { + id, + peer, + validation_metadata, + maybe_item: result.map(Box::new), + responder, + }) + } + + /// Handles attempting to get the item locally. + async fn get_locally(effect_builder: EffectBuilder, id: T::Id) -> Option + where + REv: From + + From + + From + + Send; + + /// Handles the `Err` case for a `Result` of attempting to get the item locally. + fn failed_to_get_locally( + &mut self, + effect_builder: EffectBuilder, + id: T::Id, + peer: NodeId, + validation_metadata: Box, + responder: FetchResponder, + ) -> Effects> + where + ::Id: 'static, + REv: From> + Send, + { + let peer_timeout = self.peer_timeout(); + // Capture responder for later signalling. + let item_handles = self.item_handles(); + match item_handles.entry(id.clone()).or_default().entry(peer) { + Entry::Occupied(mut entry) => { + let handle = entry.get_mut(); + if handle.validation_metadata() != &*validation_metadata { + let error = Error::ValidationMetadataMismatch { + id: Box::new(id), + peer, + current: Box::new(handle.validation_metadata().clone()), + new: validation_metadata, + }; + error!(%error, "failed to fetch"); + return responder.respond(Err(error)).ignore(); + } + handle.push_responder(responder); + } + Entry::Vacant(entry) => { + entry.insert(ItemHandle::new(validation_metadata, responder)); + } + } + match Message::new_get_request::(&id) { + Ok(message) => { + self.metrics().fetch_total.inc(); + async move { + effect_builder.send_message(peer, message).await; + effect_builder.set_timeout(peer_timeout).await + } + } + .event(move |_| Event::TimeoutPeer { id, peer }), + Err(error) => { + error!(%peer, %error, "failed to construct get request"); + + self.signal( + id.clone(), + Err(Error::CouldNotConstructGetRequest { + id: Box::new(id), + peer, + }), + peer, + ) + } + } + } + + fn got_from_peer( + &mut self, + effect_builder: EffectBuilder, + peer: NodeId, + item: Box, + ) -> Effects> + where + REv: From + From + Send, + { + self.metrics().found_on_peer.inc(); + + let validation_metadata = match self + .item_handles() + .get(&item.fetch_id()) + .and_then(|item_handles| item_handles.get(&peer)) + { + Some(item_handle) => item_handle.validation_metadata(), + None => { + debug!(item_id = %item.fetch_id(), tag = ?T::TAG, %peer, "got unexpected item from peer"); + return Effects::new(); + } + }; + + if let Err(err) = item.validate(validation_metadata) { + debug!(%peer, %err, ?item, "peer sent invalid item"); + effect_builder + .announce_block_peer_with_justification( + peer, + BlocklistJustification::SentInvalidItem { + tag: T::TAG, + error_msg: err.to_string(), + }, + ) + .ignore() + } else { + match Self::put_to_storage(effect_builder, *item.clone()) { + StoringState::WontStore(item) => self.signal(item.fetch_id(), Ok(item), peer), + StoringState::Enqueued(store_future) => { + store_future.event(move |_| Event::PutToStorage { item, peer }) + } + } + } + } + + /// Sends fetched data to all responders + fn respond_to_all(&mut self, id: T::Id, fetched_data: FetchedData) -> Effects> { + let mut effects = Effects::new(); + let item_handles = self.item_handles().remove(&id).unwrap_or_default(); + for (_peer, item_handle) in item_handles { + for responder in item_handle.take_responders() { + effects.extend(responder.respond(Ok(fetched_data.clone())).ignore()); + } + } + effects + } + + /// Responds to all responders corresponding to a specific item-peer combination with a result. + fn send_response_from_peer( + &mut self, + id: T::Id, + result: Result>, + peer: NodeId, + ) -> Effects> { + let mut effects = Effects::new(); + let mut item_handles = self.item_handles().remove(&id).unwrap_or_default(); + match result { + Ok(item) => { + // Since this is a success, we can safely respond to all awaiting processes. + for responder in item_handles + .remove(&peer) + .map(ItemHandle::take_responders) + .unwrap_or_default() + { + effects.extend( + responder + .respond(Ok(FetchedData::from_peer(item.clone(), peer))) + .ignore(), + ); + } + } + Err(error @ Error::TimedOut { .. }) => { + // We take just one responder as only one request had timed out. We want to avoid + // prematurely failing too many waiting processes since other requests may still + // succeed before timing out. + let should_remove_item_handle = match item_handles.get_mut(&peer) { + Some(item_handle) => { + if let Some(responder) = item_handle.pop_front_responder() { + effects.extend(responder.respond(Err(error)).ignore()); + // Only if there's still a responder waiting for the item we increment + // the metric. Otherwise we will count every request as timed out, even + // if the item had been fetched. + trace!(TAG=%T::TAG, %id, %peer, "request timed out"); + self.metrics().timeouts.inc(); + } + item_handle.has_no_responders() + } + None => false, + }; + if should_remove_item_handle { + item_handles.remove(&peer); + } + } + Err( + error @ (Error::Absent { .. } + | Error::Rejected { .. } + | Error::CouldNotConstructGetRequest { .. } + | Error::ValidationMetadataMismatch { .. }), + ) => { + // For all other error variants we can safely respond with failure as there's no + // chance for the request to succeed. + for responder in item_handles + .remove(&peer) + .map(ItemHandle::take_responders) + .unwrap_or_default() + { + effects.extend(responder.respond(Err(error.clone())).ignore()); + } + } + } + if !item_handles.is_empty() { + self.item_handles().insert(id, item_handles); + } + effects + } + + fn put_to_storage<'a, REv>( + _effect_builder: EffectBuilder, + _item: T, + ) -> StoringState<'a, T> + where + REv: From + Send; + + async fn announce_fetched_new_item( + _effect_builder: EffectBuilder, + item: T, + peer: NodeId, + ) where + REv: From + + From + + Send; + + /// Handles signalling responders with the item or an error. + fn signal( + &mut self, + id: T::Id, + result: Result>, + peer: NodeId, + ) -> Effects> { + match result { + Ok(fetched_item) if Self::SAFE_TO_RESPOND_TO_ALL => { + self.respond_to_all(id, FetchedData::from_peer(fetched_item, peer)) + } + Ok(_) => self.send_response_from_peer(id, result, peer), + Err(_) => self.send_response_from_peer(id, result, peer), + } + } +} diff --git a/node/src/components/fetcher/item_handle.rs b/node/src/components/fetcher/item_handle.rs new file mode 100644 index 0000000000..18560c76b9 --- /dev/null +++ b/node/src/components/fetcher/item_handle.rs @@ -0,0 +1,47 @@ +use datasize::DataSize; + +use super::{FetchItem, FetchResponder}; + +#[derive(Debug, DataSize)] +pub(crate) struct ItemHandle +where + T: FetchItem, +{ + validation_metadata: Box, + responders: Vec>, +} + +impl ItemHandle { + pub(super) fn new( + validation_metadata: Box, + responder: FetchResponder, + ) -> Self { + Self { + validation_metadata, + responders: vec![responder], + } + } + + pub(super) fn validation_metadata(&self) -> &T::ValidationMetadata { + &self.validation_metadata + } + + pub(super) fn push_responder(&mut self, responder: FetchResponder) { + self.responders.push(responder) + } + + pub(super) fn pop_front_responder(&mut self) -> Option> { + if self.responders.is_empty() { + return None; + } + Some(self.responders.remove(0)) + } + + pub(super) fn take_responders(self) -> Vec> { + self.responders + } + + pub(super) fn has_no_responders(&self) -> bool { + self.responders.is_empty() + } +} diff --git a/node/src/components/fetcher/metrics.rs b/node/src/components/fetcher/metrics.rs index aae495c067..35c403d633 100644 --- a/node/src/components/fetcher/metrics.rs +++ b/node/src/components/fetcher/metrics.rs @@ -3,51 +3,60 @@ use prometheus::{IntCounter, Registry}; use crate::unregister_metric; #[derive(Debug)] -pub(super) struct FetcherMetrics { +pub(crate) struct Metrics { /// Number of fetch requests that found an item in the storage. - pub(super) found_in_storage: IntCounter, + pub found_in_storage: IntCounter, /// Number of fetch requests that fetched an item from peer. - pub(super) found_on_peer: IntCounter, + pub found_on_peer: IntCounter, /// Number of fetch requests that timed out. - pub(super) timeouts: IntCounter, + pub timeouts: IntCounter, + /// Number of total fetch requests made. + pub fetch_total: IntCounter, /// Reference to the registry for unregistering. registry: Registry, } -impl FetcherMetrics { +impl Metrics { pub(super) fn new(name: &str, registry: &Registry) -> Result { let found_in_storage = IntCounter::new( format!("{}_found_in_storage", name), format!( - "number of fetch requests that found {} in the storage.", + "number of fetch requests that found {} in local storage", name ), )?; let found_on_peer = IntCounter::new( format!("{}_found_on_peer", name), - format!("number of fetch requests that fetched {} from peer.", name), + format!("number of fetch requests that fetched {} from peer", name), )?; let timeouts = IntCounter::new( format!("{}_timeouts", name), format!("number of {} fetch requests that timed out", name), )?; + let fetch_total = IntCounter::new( + format!("{}_fetch_total", name), + format!("number of {} all fetch requests made", name), + )?; registry.register(Box::new(found_in_storage.clone()))?; registry.register(Box::new(found_on_peer.clone()))?; registry.register(Box::new(timeouts.clone()))?; + registry.register(Box::new(fetch_total.clone()))?; - Ok(FetcherMetrics { + Ok(Metrics { found_in_storage, found_on_peer, timeouts, + fetch_total, registry: registry.clone(), }) } } -impl Drop for FetcherMetrics { +impl Drop for Metrics { fn drop(&mut self) { unregister_metric!(self.registry, self.found_in_storage); unregister_metric!(self.registry, self.found_on_peer); unregister_metric!(self.registry, self.timeouts); + unregister_metric!(self.registry, self.fetch_total); } } diff --git a/node/src/components/fetcher/tag.rs b/node/src/components/fetcher/tag.rs new file mode 100644 index 0000000000..980f0b1896 --- /dev/null +++ b/node/src/components/fetcher/tag.rs @@ -0,0 +1,56 @@ +use std::hash::Hash; + +use datasize::DataSize; +use derive_more::Display; +use serde_repr::{Deserialize_repr, Serialize_repr}; +use strum::EnumIter; + +/// An identifier for a specific type implementing the `Item` trait. Each different implementing +/// type should have a unique `Tag` variant. +#[derive( + Clone, + Copy, + DataSize, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize_repr, + Deserialize_repr, + Debug, + Display, + EnumIter, +)] +#[repr(u8)] +pub enum Tag { + /// A transaction identified by its hash and its approvals hash. + #[display(fmt = "transaction")] + Transaction, + /// A legacy deploy identified by its hash alone. + #[display(fmt = "legacy deploy")] + LegacyDeploy, + /// A block. + #[display(fmt = "block")] + Block, + /// A block header. + #[display(fmt = "block header")] + BlockHeader, + /// A trie or chunk of a trie from global state. + #[display(fmt = "trie or chunk")] + TrieOrChunk, + /// A finality signature for a block. + #[display(fmt = "finality signature")] + FinalitySignature, + /// Headers and signatures required to prove that if a given trusted block hash is on the + /// correct chain, then so is a later header, which should be the most recent one according + /// to the sender. + #[display(fmt = "sync leap")] + SyncLeap, + /// The hashes of the finalized deploy approvals sets for a single block. + #[display(fmt = "approvals hashes")] + ApprovalsHashes, + /// The execution results for a single block. + #[display(fmt = "block execution results")] + BlockExecutionResults, +} diff --git a/node/src/components/fetcher/tests.rs b/node/src/components/fetcher/tests.rs index fbe025577c..d45c1c2e19 100644 --- a/node/src/components/fetcher/tests.rs +++ b/node/src/components/fetcher/tests.rs @@ -1,31 +1,50 @@ #![cfg(test)] -#![allow(unreachable_code)] -use std::sync::{Arc, Mutex}; +use std::{ + fmt::{self, Display, Formatter}, + sync::{Arc, Mutex}, +}; -use casper_node_macros::reactor; +use derive_more::From; use futures::FutureExt; +use serde::Serialize; use tempfile::TempDir; use thiserror::Error; -use tokio::time; -use casper_types::ProtocolVersion; +use casper_types::{ + testing::TestRng, BlockV2, Chainspec, ChainspecRawBytes, FinalitySignatureV2, Transaction, + TransactionConfig, TransactionHash, TransactionId, +}; use super::*; use crate::{ - components::{deploy_acceptor, in_memory_network::NetworkController, storage}, + components::{ + consensus::ConsensusRequestMessage, + fetcher, + in_memory_network::{self, InMemoryNetwork, NetworkController}, + network::{GossipedAddress, Identity as NetworkIdentity}, + storage::{self, Storage}, + transaction_acceptor, + }, effect::{ - announcements::{DeployAcceptorAnnouncement, NetworkAnnouncement}, - Responder, + announcements::{ControlAnnouncement, FatalAnnouncement, TransactionAcceptorAnnouncement}, + incoming::{ + ConsensusMessageIncoming, DemandIncoming, FinalitySignatureIncoming, GossiperIncoming, + NetRequestIncoming, NetResponse, NetResponseIncoming, TrieDemand, TrieRequestIncoming, + TrieResponseIncoming, + }, + requests::{AcceptTransactionRequest, MarkBlockCompletedRequest}, }, + fatal, protocol::Message, - reactor::{Reactor as ReactorTrait, Runner}, + reactor::{self, EventQueueHandle, Reactor as ReactorTrait, ReactorEvent, Runner}, testing::{ - network::{Network, NetworkedReactor}, - ConditionCheckReactor, TestRng, + self, + network::{NetworkedReactor, TestingNetwork}, + ConditionCheckReactor, FakeTransactionAcceptor, }, - types::{Deploy, DeployHash, NodeId}, - utils::{WithDir, RESOURCES_PATH}, + types::NodeId, + utils::WithDir, }; const TIMEOUT: Duration = Duration::from_secs(1); @@ -47,199 +66,368 @@ impl Drop for Reactor { pub struct FetcherTestConfig { fetcher_config: Config, storage_config: storage::Config, - deploy_acceptor_config: deploy_acceptor::Config, temp_dir: TempDir, } impl Default for FetcherTestConfig { fn default() -> Self { - let (storage_config, temp_dir) = storage::Config::default_for_tests(); + let (storage_config, temp_dir) = storage::Config::new_for_tests(1); FetcherTestConfig { fetcher_config: Default::default(), storage_config, - deploy_acceptor_config: deploy_acceptor::Config::new(false), temp_dir, } } } -reactor!(Reactor { - type Config = FetcherTestConfig; +#[derive(Debug, From, Serialize)] +enum Event { + #[from] + ControlAnnouncement(ControlAnnouncement), + #[from] + FatalAnnouncement(FatalAnnouncement), + #[from] + Network(in_memory_network::Event), + #[from] + Storage(storage::Event), + #[from] + FakeTransactionAcceptor(transaction_acceptor::Event), + #[from] + TransactionFetcher(fetcher::Event), + #[from] + NetworkRequestMessage(NetworkRequest), + #[from] + StorageRequest(StorageRequest), + #[from] + FetcherRequestTransaction(FetcherRequest), + #[from] + BlockAccumulatorRequest(BlockAccumulatorRequest), + #[from] + AcceptTransactionRequest(AcceptTransactionRequest), + #[from] + TransactionAcceptorAnnouncement(TransactionAcceptorAnnouncement), + #[from] + FetchedNewFinalitySignatureAnnouncement(FetchedNewFinalitySignatureAnnouncement), + #[from] + FetchedNewBlockAnnouncement(FetchedNewBlockAnnouncement), + #[from] + NetRequestIncoming(NetRequestIncoming), + #[from] + NetResponseIncoming(NetResponseIncoming), + #[from] + BlocklistAnnouncement(PeerBehaviorAnnouncement), + #[from] + MarkBlockCompletedRequest(MarkBlockCompletedRequest), + #[from] + TrieDemand(TrieDemand), + #[from] + ContractRuntimeRequest(ContractRuntimeRequest), + #[from] + GossiperIncomingTransaction(GossiperIncoming), + #[from] + GossiperIncomingBlock(GossiperIncoming), + #[from] + GossiperIncomingFinalitySignature(GossiperIncoming), + #[from] + GossiperIncomingGossipedAddress(GossiperIncoming), + #[from] + TrieRequestIncoming(TrieRequestIncoming), + #[from] + TrieResponseIncoming(TrieResponseIncoming), + #[from] + ConsensusMessageIncoming(ConsensusMessageIncoming), + #[from] + ConsensusDemandIncoming(DemandIncoming), + #[from] + FinalitySignatureIncoming(FinalitySignatureIncoming), +} - components: { - chainspec_loader = has_effects ChainspecLoader( - &RESOURCES_PATH.join("local"), - effect_builder - ); - network = infallible InMemoryNetwork::(event_queue, rng); - storage = Storage( - &WithDir::new(cfg.temp_dir.path(), cfg.storage_config), - chainspec_loader.hard_reset_to_start_of_era(), - ProtocolVersion::from_parts(1, 0, 0), - ); - deploy_acceptor = infallible DeployAcceptor(cfg.deploy_acceptor_config, &*chainspec_loader.chainspec()); - deploy_fetcher = Fetcher::("deploy", cfg.fetcher_config, registry); +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self, f) } +} - events: { - network = Event; - deploy_fetcher = Event; +impl ReactorEvent for Event { + fn is_control(&self) -> bool { + matches!(self, Event::ControlAnnouncement(_)) } - requests: { - // This test contains no linear chain requests, so we panic if we receive any. - LinearChainRequest -> !; - NetworkRequest -> network; - StorageRequest -> storage; - StateStoreRequest -> storage; - FetcherRequest -> deploy_fetcher; + fn try_into_control(self) -> Option { + match self { + Event::ControlAnnouncement(ctrl_ann) => Some(ctrl_ann), + _ => None, + } + } +} + +struct Reactor { + network: InMemoryNetwork, + storage: Storage, + fake_transaction_acceptor: FakeTransactionAcceptor, + transaction_fetcher: Fetcher, +} + +impl ReactorTrait for Reactor { + type Event = Event; + type Config = FetcherTestConfig; + type Error = Error; - // The only contract runtime request will be the commit of genesis, which we discard. - ContractRuntimeRequest -> #; + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::Network(event) => reactor::wrap_effects( + Event::Network, + self.network.handle_event(effect_builder, rng, event), + ), + Event::Storage(event) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, event), + ), + Event::FakeTransactionAcceptor(event) => reactor::wrap_effects( + Event::FakeTransactionAcceptor, + self.fake_transaction_acceptor + .handle_event(effect_builder, rng, event), + ), + Event::TransactionFetcher(event) => reactor::wrap_effects( + Event::TransactionFetcher, + self.transaction_fetcher + .handle_event(effect_builder, rng, event), + ), + Event::NetworkRequestMessage(request) => reactor::wrap_effects( + Event::Network, + self.network + .handle_event(effect_builder, rng, request.into()), + ), + Event::StorageRequest(request) => reactor::wrap_effects( + Event::Storage, + self.storage + .handle_event(effect_builder, rng, request.into()), + ), + Event::FetcherRequestTransaction(request) => reactor::wrap_effects( + Event::TransactionFetcher, + self.transaction_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + Event::TransactionAcceptorAnnouncement(announcement) => { + let event = fetcher::Event::from(announcement); + reactor::wrap_effects( + Event::TransactionFetcher, + self.transaction_fetcher + .handle_event(effect_builder, rng, event), + ) + } + Event::AcceptTransactionRequest(AcceptTransactionRequest { + transaction, + is_speculative, + responder, + }) => { + assert!(!is_speculative); + let event = transaction_acceptor::Event::Accept { + transaction, + source: Source::Client, + maybe_responder: Some(responder), + }; + reactor::wrap_effects( + Event::FakeTransactionAcceptor, + self.fake_transaction_acceptor + .handle_event(effect_builder, rng, event), + ) + } + Event::NetRequestIncoming(announcement) => reactor::wrap_effects( + Event::Storage, + self.storage + .handle_event(effect_builder, rng, announcement.into()), + ), + Event::NetResponseIncoming(announcement) => { + let mut announcement_effects = Effects::new(); + let effects = self.handle_net_response(effect_builder, rng, announcement); + announcement_effects.extend(effects); + announcement_effects + } + Event::MarkBlockCompletedRequest(request) => reactor::wrap_effects( + Event::Storage, + self.storage + .handle_event(effect_builder, rng, request.into()), + ), + Event::TrieDemand(_) + | Event::ContractRuntimeRequest(_) + | Event::BlockAccumulatorRequest(_) + | Event::BlocklistAnnouncement(_) + | Event::GossiperIncomingTransaction(_) + | Event::GossiperIncomingBlock(_) + | Event::GossiperIncomingFinalitySignature(_) + | Event::GossiperIncomingGossipedAddress(_) + | Event::TrieRequestIncoming(_) + | Event::TrieResponseIncoming(_) + | Event::ConsensusMessageIncoming(_) + | Event::ConsensusDemandIncoming(_) + | Event::FinalitySignatureIncoming(_) + | Event::FetchedNewBlockAnnouncement(_) + | Event::FetchedNewFinalitySignatureAnnouncement(_) + | Event::ControlAnnouncement(_) + | Event::FatalAnnouncement(_) => panic!("unexpected: {}", event), + } } - announcements: { - // The deploy fetcher needs to be notified about new deploys. - DeployAcceptorAnnouncement -> [deploy_fetcher]; - NetworkAnnouncement -> [fn handle_message]; - // Currently the RpcServerAnnouncement is misnamed - it solely tells of new deploys arriving - // from a client. - RpcServerAnnouncement -> [deploy_acceptor]; - ChainspecLoaderAnnouncement -> [!]; + fn new( + cfg: Self::Config, + chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, + registry: &Registry, + event_queue: EventQueueHandle, + rng: &mut NodeRng, + ) -> Result<(Self, Effects), Self::Error> { + let network = InMemoryNetwork::::new(event_queue, rng); + + let storage = Storage::new( + &WithDir::new(cfg.temp_dir.path(), cfg.storage_config), + chainspec.hard_reset_to_start_of_era(), + chainspec.protocol_config.version, + chainspec.protocol_config.activation_point.era_id(), + &chainspec.network_config.name, + chainspec.transaction_config.max_ttl.into(), + chainspec.core_config.unbonding_delay, + Some(registry), + false, + TransactionConfig::default(), + ) + .unwrap(); + + let fake_transaction_acceptor = FakeTransactionAcceptor::new(); + let transaction_fetcher = + Fetcher::::new("transaction", &cfg.fetcher_config, registry).unwrap(); + let reactor = Reactor { + network, + storage, + fake_transaction_acceptor, + transaction_fetcher, + }; + Ok((reactor, Effects::new())) } -}); +} impl Reactor { - fn handle_message( + fn handle_net_response( &mut self, - effect_builder: EffectBuilder, + effect_builder: EffectBuilder, rng: &mut NodeRng, - network_announcement: NetworkAnnouncement, - ) -> Effects { - // TODO: Make this manual routing disappear and supply appropriate - // announcements. - match network_announcement { - NetworkAnnouncement::MessageReceived { sender, payload } => match payload { - Message::GetRequest { serialized_id, .. } => { - let deploy_hash = match bincode::deserialize(&serialized_id) { - Ok(hash) => hash, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - - match self - .storage - .handle_legacy_direct_deploy_request(deploy_hash) - { - // This functionality was moved out of the storage component and - // should be refactored ASAP. - Some(deploy) => match Message::new_get_response(&deploy) { - Ok(message) => effect_builder.send_message(sender, message).ignore(), - Err(error) => { - error!("failed to create get-response: {}", error); - Effects::new() - } - }, - None => { - debug!("failed to get {} for {}", deploy_hash, sender); - Effects::new() - } + response: NetResponseIncoming, + ) -> Effects { + match *response.message { + NetResponse::Transaction(ref serialized_item) => { + let transaction = match bincode::deserialize::< + FetchResponse, + >(serialized_item) + { + Ok(FetchResponse::Fetched(txn)) => txn, + Ok(FetchResponse::NotFound(txn_hash)) => { + return fatal!( + effect_builder, + "peer did not have transaction with hash {}: {}", + txn_hash, + response.sender, + ) + .ignore(); } - } - - Message::GetResponse { - serialized_item, .. - } => { - let deploy = match bincode::deserialize(&serialized_item) { - Ok(deploy) => Box::new(deploy), - Err(error) => { - error!("failed to decode deploy from {}: {}", sender, error); - return Effects::new(); - } - }; - - self.dispatch_event( - effect_builder, - rng, - ReactorEvent::DeployAcceptor(deploy_acceptor::Event::Accept { - deploy, - source: Source::Peer(sender), - responder: None, - }), - ) - } - msg => panic!("should not get {}", msg), - }, - ann => panic!("should not received any network announcements: {:?}", ann), + Ok(FetchResponse::NotProvided(txn_hash)) => { + return fatal!( + effect_builder, + "peer refused to provide transaction with hash {}: {}", + txn_hash, + response.sender, + ) + .ignore(); + } + Err(error) => { + return fatal!( + effect_builder, + "failed to decode transaction from {}: {}", + response.sender, + error + ) + .ignore(); + } + }; + + self.dispatch_event( + effect_builder, + rng, + Event::FakeTransactionAcceptor(transaction_acceptor::Event::Accept { + transaction, + source: Source::Peer(response.sender), + maybe_responder: None, + }), + ) + } + _ => fatal!( + effect_builder, + "no support for anything but transaction responses in fetcher test" + ) + .ignore(), } } } impl NetworkedReactor for Reactor { - type NodeId = NodeId; - fn node_id(&self) -> NodeId { self.network.node_id() } } -fn announce_deploy_received( - deploy: Deploy, - responder: Option>>, -) -> impl FnOnce(EffectBuilder) -> Effects { - |effect_builder: EffectBuilder| { - effect_builder - .announce_deploy_received(Box::new(deploy), responder) - .ignore() +fn announce_transaction_received( + txn: Transaction, +) -> impl FnOnce(EffectBuilder) -> Effects { + |effect_builder: EffectBuilder| { + effect_builder.try_accept_transaction(txn, false).ignore() } } -type FetchedDeployResult = Arc>)>>; +type FetchedTransactionResult = Arc>)>>; -fn fetch_deploy( - deploy_hash: DeployHash, +fn fetch_txn( + txn_id: TransactionId, node_id: NodeId, - fetched: FetchedDeployResult, -) -> impl FnOnce(EffectBuilder) -> Effects { - move |effect_builder: EffectBuilder| { + fetched: FetchedTransactionResult, +) -> impl FnOnce(EffectBuilder) -> Effects { + move |effect_builder: EffectBuilder| { effect_builder - .fetch_deploy(deploy_hash, node_id) - .then(move |maybe_deploy| async move { + .fetch::(txn_id, node_id, Box::new(EmptyValidationMetadata)) + .then(move |txn| async move { let mut result = fetched.lock().unwrap(); result.0 = true; - result.1 = maybe_deploy; + result.1 = Some(txn); }) .ignore() } } -/// Store a deploy on a target node. -async fn store_deploy( - deploy: &Deploy, +/// Store a transaction on a target node. +async fn store_txn( + txn: &Transaction, node_id: &NodeId, - network: &mut Network, - responder: Option>>, - mut rng: &mut TestRng, + network: &mut TestingNetwork, + rng: &mut TestRng, ) { network - .process_injected_effect_on(node_id, announce_deploy_received(deploy.clone(), responder)) + .process_injected_effect_on(node_id, announce_transaction_received(txn.clone())) .await; - // cycle to deploy acceptor announcement + // cycle to transaction acceptor announcement network .crank_until( node_id, - &mut rng, - move |event: &ReactorEvent| { + rng, + move |event: &Event| { matches!( event, - ReactorEvent::DeployAcceptorAnnouncement( - DeployAcceptorAnnouncement::AcceptedNewDeploy { .. }, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { .. }, ) ) }, @@ -248,12 +436,24 @@ async fn store_deploy( .await; } +#[derive(Debug)] +enum ExpectedFetchedTransactionResult { + TimedOut, + FromStorage { + expected_txn: Box, + }, + FromPeer { + expected_txn: Box, + expected_peer: NodeId, + }, +} + async fn assert_settled( node_id: &NodeId, - deploy_hash: DeployHash, - expected_result: Option>, - fetched: FetchedDeployResult, - network: &mut Network, + txn_id: TransactionId, + expected_result: ExpectedFetchedTransactionResult, + fetched: FetchedTransactionResult, + network: &mut TestingNetwork, rng: &mut TestRng, timeout: Duration, ) { @@ -263,17 +463,49 @@ async fn assert_settled( network.settle_on(rng, has_responded, timeout).await; - let maybe_stored_deploy = network + let maybe_stored_txn = network .nodes() .get(node_id) .unwrap() .reactor() .inner() .storage - .get_deploy_by_hash(deploy_hash); - - assert_eq!(expected_result.is_some(), maybe_stored_deploy.is_some()); - assert_eq!(fetched.lock().unwrap().1, expected_result) + .get_transaction_by_hash(txn_id.transaction_hash()); + + let actual_fetcher_result = fetched.lock().unwrap().1.clone(); + match (expected_result, actual_fetcher_result, maybe_stored_txn) { + // Timed-out case: despite the delayed response causing a timeout, the response does arrive, + // and the TestTransactionAcceptor unconditionally accepts the txn and stores it. For the + // test, we don't care whether it was stored or not, just that the TimedOut event fired. + ( + ExpectedFetchedTransactionResult::TimedOut, + Some(Err(fetcher::Error::TimedOut { .. })), + _, + ) => {} + // FromStorage case: expect txn to correspond to item fetched, as well as stored item. + ( + ExpectedFetchedTransactionResult::FromStorage { expected_txn }, + Some(Ok(FetchedData::FromStorage { item })), + Some(stored_txn), + ) if expected_txn == item && stored_txn == *item => {} + // FromPeer case: txns should correspond, storage should be present and correspond, and + // peers should correspond. + ( + ExpectedFetchedTransactionResult::FromPeer { + expected_txn, + expected_peer, + }, + Some(Ok(FetchedData::FromPeer { item, peer })), + Some(stored_txn), + ) if expected_txn == item && stored_txn == *item && expected_peer == peer => {} + // Sad path case + (expected_result, actual_fetcher_result, maybe_stored_txn) => { + panic!( + "Expected result type {:?} but found {:?} (stored transaction is {:?})", + expected_result, actual_fetcher_result, maybe_stored_txn + ) + } + } } #[tokio::test] @@ -282,34 +514,33 @@ async fn should_fetch_from_local() { NetworkController::::create_active(); let (mut network, mut rng, node_ids) = { - let mut network = Network::::new(); + let mut network = TestingNetwork::::new(); let mut rng = TestRng::new(); let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await; (network, rng, node_ids) }; - // Create a random deploy. - let deploy = Deploy::random(&mut rng); + // Create a random txn. + let txn = Transaction::random(&mut rng); - // Store deploy on a node. + // Store txn on a node. let node_to_store_on = &node_ids[0]; - store_deploy(&deploy, node_to_store_on, &mut network, None, &mut rng).await; + store_txn(&txn, node_to_store_on, &mut network, &mut rng).await; - // Try to fetch the deploy from a node that holds it. + // Try to fetch the txn from a node that holds it. let node_id = node_ids[0]; - let deploy_hash = *deploy.id(); + let txn_id = txn.fetch_id(); let fetched = Arc::new(Mutex::new((false, None))); network - .process_injected_effect_on( - &node_id, - fetch_deploy(deploy_hash, node_id, Arc::clone(&fetched)), - ) + .process_injected_effect_on(&node_id, fetch_txn(txn_id, node_id, Arc::clone(&fetched))) .await; - let expected_result = Some(FetchResult::FromStorage(Box::new(deploy))); + let expected_result = ExpectedFetchedTransactionResult::FromStorage { + expected_txn: Box::new(txn), + }; assert_settled( &node_id, - deploy_hash, + txn_id, expected_result, fetched, &mut network, @@ -327,35 +558,38 @@ async fn should_fetch_from_peer() { NetworkController::::create_active(); let (mut network, mut rng, node_ids) = { - let mut network = Network::::new(); + let mut network = TestingNetwork::::new(); let mut rng = TestRng::new(); let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await; (network, rng, node_ids) }; - // Create a random deploy. - let deploy = Deploy::random(&mut rng); + // Create a random txn. + let txn = Transaction::random(&mut rng); - // Store deploy on a node. - let node_with_deploy = node_ids[0]; - store_deploy(&deploy, &node_with_deploy, &mut network, None, &mut rng).await; + // Store txn on a node. + let node_with_txn = node_ids[0]; + store_txn(&txn, &node_with_txn, &mut network, &mut rng).await; - let node_without_deploy = node_ids[1]; - let deploy_hash = *deploy.id(); + let node_without_txn = node_ids[1]; + let txn_id = txn.fetch_id(); let fetched = Arc::new(Mutex::new((false, None))); - // Try to fetch the deploy from a node that does not hold it; should get from peer. + // Try to fetch the txn from a node that does not hold it; should get from peer. network .process_injected_effect_on( - &node_without_deploy, - fetch_deploy(deploy_hash, node_with_deploy, Arc::clone(&fetched)), + &node_without_txn, + fetch_txn(txn_id, node_with_txn, Arc::clone(&fetched)), ) .await; - let expected_result = Some(FetchResult::FromPeer(Box::new(deploy), node_with_deploy)); + let expected_result = ExpectedFetchedTransactionResult::FromPeer { + expected_txn: Box::new(txn), + expected_peer: node_with_txn, + }; assert_settled( - &node_without_deploy, - deploy_hash, + &node_without_txn, + txn_id, expected_result, fetched, &mut network, @@ -373,28 +607,28 @@ async fn should_timeout_fetch_from_peer() { NetworkController::::create_active(); let (mut network, mut rng, node_ids) = { - let mut network = Network::::new(); + let mut network = TestingNetwork::::new(); let mut rng = TestRng::new(); let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await; (network, rng, node_ids) }; - // Create a random deploy. - let deploy = Deploy::random(&mut rng); - let deploy_hash = *deploy.id(); + // Create a random txn. + let txn = Transaction::random(&mut rng); + let txn_id = txn.fetch_id(); let holding_node = node_ids[0]; let requesting_node = node_ids[1]; - // Store deploy on holding node. - store_deploy(&deploy, &holding_node, &mut network, None, &mut rng).await; + // Store txn on holding node. + store_txn(&txn, &holding_node, &mut network, &mut rng).await; - // Initiate requesting node asking for deploy from holding node. + // Initiate requesting node asking for txn from holding node. let fetched = Arc::new(Mutex::new((false, None))); network .process_injected_effect_on( &requesting_node, - fetch_deploy(deploy_hash, holding_node, Arc::clone(&fetched)), + fetch_txn(txn_id, holding_node, Arc::clone(&fetched)), ) .await; @@ -403,8 +637,8 @@ async fn should_timeout_fetch_from_peer() { .crank_until( &requesting_node, &mut rng, - move |event: &ReactorEvent| { - if let ReactorEvent::NetworkRequest(NetworkRequest::SendMessage { + move |event: &Event| { + if let Event::NetworkRequestMessage(NetworkRequest::SendMessage { payload, .. }) = event { @@ -422,8 +656,8 @@ async fn should_timeout_fetch_from_peer() { .crank_until( &holding_node, &mut rng, - move |event: &ReactorEvent| { - if let ReactorEvent::NetworkRequest(NetworkRequest::SendMessage { + move |event: &Event| { + if let Event::NetworkRequestMessage(NetworkRequest::SendMessage { payload, .. }) = event { @@ -437,16 +671,15 @@ async fn should_timeout_fetch_from_peer() { .await; // Advance time. - let secs_to_advance = Config::default().get_from_peer_timeout(); - time::pause(); - time::advance(Duration::from_secs(secs_to_advance + 10)).await; - time::resume(); + let duration_to_advance: Duration = Config::default().get_from_peer_timeout().into(); + let duration_to_advance = duration_to_advance + Duration::from_secs(10); + testing::advance_time(duration_to_advance).await; // Settle the network, allowing timeout to avoid panic. - let expected_result = None; + let expected_result = ExpectedFetchedTransactionResult::TimedOut; assert_settled( &requesting_node, - deploy_hash, + txn_id, expected_result, fetched, &mut network, diff --git a/node/src/components/gossiper.rs b/node/src/components/gossiper.rs index 1de52d8b50..7bc533278c 100644 --- a/node/src/components/gossiper.rs +++ b/node/src/components/gossiper.rs @@ -1,209 +1,135 @@ mod config; +#[cfg(test)] mod error; mod event; +mod gossip_item; mod gossip_table; +mod item_provider; mod message; mod metrics; +mod provider_impls; mod tests; -use datasize::DataSize; -use futures::FutureExt; -use prometheus::Registry; -use smallvec::smallvec; use std::{ collections::HashSet, - convert::Infallible, fmt::{self, Debug, Formatter}, time::Duration, }; -use tracing::{debug, error, warn}; + +use datasize::DataSize; +use prometheus::Registry; +use tracing::{debug, error, trace, warn}; use crate::{ components::Component, effect::{ announcements::GossiperAnnouncement, - requests::{NetworkRequest, StorageRequest}, - EffectBuilder, EffectExt, Effects, + incoming::GossiperIncoming, + requests::{BeginGossipRequest, NetworkRequest, StorageRequest}, + EffectBuilder, EffectExt, Effects, GossipTarget, }, - protocol::Message as NodeMessage, - types::{Deploy, DeployHash, Item, NodeId}, + types::NodeId, utils::Source, NodeRng, }; -pub use config::Config; -pub use error::Error; -pub use event::Event; +pub(crate) use config::Config; +pub(crate) use event::Event; +pub(crate) use gossip_item::{GossipItem, LargeGossipItem, SmallGossipItem}; use gossip_table::{GossipAction, GossipTable}; -pub use message::Message; -use metrics::GossiperMetrics; - -/// A helper trait whose bounds represent the requirements for a reactor event that `Gossiper` can -/// work with. -pub trait ReactorEventT: - From> - + From>> - + From> - + From - + From> - + Send - + 'static -where - T: Item + 'static, - ::Id: 'static, -{ -} - -impl ReactorEventT for REv -where - T: Item + 'static, - ::Id: 'static, - REv: From> - + From>> - + From> - + From - + From> - + Send - + 'static, -{ -} - -/// This function can be passed in to `Gossiper::new()` as the `get_from_holder` arg when -/// constructing a `Gossiper`. -pub(crate) fn get_deploy_from_storage>( - effect_builder: EffectBuilder, - deploy_hash: DeployHash, - sender: NodeId, -) -> Effects> { - effect_builder - .get_deploys_from_storage(smallvec![deploy_hash]) - .event(move |mut results| { - let result = if results.len() == 1 { - results - .pop() - .unwrap() - .ok_or_else(|| String::from("failed to get deploy from storage")) - } else { - Err(String::from("expected a single result")) - }; - Event::GetFromHolderResult { - item_id: deploy_hash, - requester: sender, - result: Box::new(result), - } - }) -} +use item_provider::ItemProvider; +pub(crate) use message::Message; +use metrics::Metrics; /// The component which gossips to peers and handles incoming gossip messages from peers. #[allow(clippy::type_complexity)] -#[derive(DataSize)] -pub(crate) struct Gossiper +pub(crate) struct Gossiper where - T: Item + 'static, - REv: ReactorEventT, + T: GossipItem + 'static, { table: GossipTable, gossip_timeout: Duration, get_from_peer_timeout: Duration, - #[data_size(skip)] // Not well supported by datasize. - get_from_holder: - Box, T::Id, NodeId) -> Effects> + Send + 'static>, - #[data_size(skip)] - metrics: GossiperMetrics, + validate_and_store_timeout: Duration, + name: &'static str, + metrics: Metrics, } -impl> Gossiper { - /// Constructs a new gossiper component for use where `T::ID_IS_COMPLETE_ITEM == false`, i.e. - /// where the gossip messages themselves don't contain the actual data being gossiped, they - /// contain just the identifiers. - /// - /// `get_from_holder` is called by the gossiper when handling either a `Message::GossipResponse` - /// where the sender indicates it needs the full item, or a `Message::GetRequest`. - /// - /// For an example of how `get_from_holder` should be implemented, see - /// `gossiper::get_deploy_from_store()` which is used by `Gossiper`. +impl Gossiper { + /// Constructs a new gossiper component. /// /// Must be supplied with a name, which should be a snake-case identifier to disambiguate the /// specific gossiper from other potentially present gossipers. - pub(crate) fn new_for_partial_items( - name: &str, + pub(crate) fn new( + name: &'static str, config: Config, - get_from_holder: impl Fn(EffectBuilder, T::Id, NodeId) -> Effects> - + Send - + 'static, registry: &Registry, ) -> Result { - assert!( - !T::ID_IS_COMPLETE_ITEM, - "this should only be called for types where T::ID_IS_COMPLETE_ITEM is false" - ); Ok(Gossiper { table: GossipTable::new(config), - gossip_timeout: Duration::from_secs(config.gossip_request_timeout_secs()), - get_from_peer_timeout: Duration::from_secs(config.get_remainder_timeout_secs()), - get_from_holder: Box::new(get_from_holder), - metrics: GossiperMetrics::new(name, registry)?, + gossip_timeout: config.gossip_request_timeout().into(), + get_from_peer_timeout: config.get_remainder_timeout().into(), + validate_and_store_timeout: config.validate_and_store_timeout().into(), + name, + metrics: Metrics::new(name, registry)?, }) } - /// Constructs a new gossiper component for use where `T::ID_IS_COMPLETE_ITEM == true`, i.e. - /// where the gossip messages themselves contain the actual data being gossiped. - /// - /// Must be supplied with a name, which should be a snake-case identifier to disambiguate the - /// specific gossiper from other potentially present gossipers. - pub(crate) fn new_for_complete_items( - name: &str, - config: Config, - registry: &Registry, - ) -> Result { - assert!( - T::ID_IS_COMPLETE_ITEM, - "this should only be called for types where T::ID_IS_COMPLETE_ITEM is true" - ); - Ok(Gossiper { - table: GossipTable::new(config), - gossip_timeout: Duration::from_secs(config.gossip_request_timeout_secs()), - get_from_peer_timeout: Duration::from_secs(config.get_remainder_timeout_secs()), - get_from_holder: Box::new(|_, item, _| { - panic!("gossiper should never try to get {}", item) - }), - metrics: GossiperMetrics::new(name, registry)?, - }) - } - - /// Handles a new item received from a peer or client for which we should begin gossiping. - /// - /// Note that this doesn't include items gossiped to us; those are handled in `handle_gossip()`. - fn handle_item_received( + /// This could be the first time we've encountered this item in the gossiper (e.g. the + /// `Network` component requesting that we gossip an address, or the `TransactionAcceptor` + /// having accepted a transaction which we received from a client), or it could be the result + /// of this gossiper having requested the complete data from a peer, announcing it, and that + /// complete item having been deemed valid by the relevant component and stored is now ready to + /// be gossiped onwards by us. + fn handle_item_received( &mut self, effect_builder: EffectBuilder, item_id: T::Id, - source: Source, - ) -> Effects> { + source: Source, + target: GossipTarget, + ) -> Effects> + where + REv: From>> + From> + Send, + { debug!(item=%item_id, %source, "received new gossip item"); - if let Some(should_gossip) = self.table.new_complete_data(&item_id, source.node_id()) { - self.metrics.items_received.inc(); - self.gossip( - effect_builder, - item_id, - should_gossip.count, - should_gossip.exclude_peers, - ) - } else { - Effects::new() + match self + .table + .new_complete_data(&item_id, source.node_id(), target) + { + GossipAction::ShouldGossip(should_gossip) => { + self.metrics.items_received.inc(); + Self::gossip( + effect_builder, + item_id, + should_gossip.target, + should_gossip.count, + should_gossip.exclude_peers, + ) + } + GossipAction::Noop => Effects::new(), + GossipAction::AnnounceFinished => { + effect_builder.announce_finished_gossiping(item_id).ignore() + } + GossipAction::GetRemainder { .. } | GossipAction::AwaitingRemainder => { + error!("can't be waiting for remainder since we hold the complete data"); + Effects::new() + } } } /// Gossips the given item ID to `count` random peers excluding the indicated ones. - fn gossip( - &mut self, + fn gossip( effect_builder: EffectBuilder, item_id: T::Id, + gossip_target: GossipTarget, count: usize, exclude_peers: HashSet, - ) -> Effects> { - let message = Message::Gossip(item_id); + ) -> Effects> + where + REv: From>> + Send, + { + let message = Message::Gossip(item_id.clone()); effect_builder - .gossip_message(message, count, exclude_peers) + .gossip_message(message, gossip_target, count, exclude_peers) .event(move |peers| Event::GossipedTo { item_id, requested_count: count, @@ -212,58 +138,77 @@ impl> Gossiper { } /// Handles the response from the network component detailing which peers it gossiped to. - fn gossiped_to( + fn gossiped_to( &mut self, effect_builder: EffectBuilder, item_id: T::Id, requested_count: usize, peers: HashSet, - ) -> Effects> { + ) -> Effects> + where + REv: From> + Send, + { self.metrics.times_gossiped.inc_by(peers.len() as u64); // We don't have any peers to gossip to, so pause the process, which will eventually result // in the entry being removed. if peers.is_empty() { self.metrics.times_ran_out_of_peers.inc(); - - self.table.pause(&item_id); - debug!(item=%item_id, "paused gossiping since no more peers to gossip to"); - return Effects::new(); } // We didn't gossip to as many peers as was requested. Reduce the table entry's in-flight // count. - if peers.len() < requested_count { - self.table - .reduce_in_flight_count(&item_id, requested_count - peers.len()); + let mut effects = Effects::new(); + if peers.len() < requested_count + && self + .table + .reduce_in_flight_count(&item_id, requested_count - peers.len()) + { + effects.extend( + effect_builder + .announce_finished_gossiping(item_id.clone()) + .ignore(), + ); } + // Remember which peers we *tried* to infect. + self.table + .register_infection_attempt(&item_id, peers.iter()); + // Set timeouts to check later that the specified peers all responded. - peers - .into_iter() - .map(|peer| { + for peer in peers { + let item_id = item_id.clone(); + effects.extend( effect_builder .set_timeout(self.gossip_timeout) - .map(move |_| smallvec![Event::CheckGossipTimeout { item_id, peer }]) - .boxed() - }) - .collect() + .event(move |_| Event::CheckGossipTimeout { item_id, peer }), + ) + } + + effects } /// Checks that the given peer has responded to a previous gossip request we sent it. - fn check_gossip_timeout( + fn check_gossip_timeout( &mut self, effect_builder: EffectBuilder, item_id: T::Id, peer: NodeId, - ) -> Effects> { + ) -> Effects> + where + REv: From>> + From> + Send, + { match self.table.check_timeout(&item_id, peer) { - GossipAction::ShouldGossip(should_gossip) => self.gossip( + GossipAction::ShouldGossip(should_gossip) => Self::gossip( effect_builder, item_id, + should_gossip.target, should_gossip.count, should_gossip.exclude_peers, ), GossipAction::Noop => Effects::new(), + GossipAction::AnnounceFinished => { + effect_builder.announce_finished_gossiping(item_id).ignore() + } GossipAction::GetRemainder { .. } | GossipAction::AwaitingRemainder => { warn!( "can't have gossiped if we don't hold the complete data - likely the timeout \ @@ -274,35 +219,30 @@ impl> Gossiper { } } - /// Checks that the given peer has responded to a previous gossip response or `GetRequest` we + /// Checks that the given peer has responded to a previous `GossipResponse` or `GetItem` we /// sent it indicating we wanted to get the full item from it. - fn check_get_from_peer_timeout( + fn check_get_from_peer_timeout( &mut self, effect_builder: EffectBuilder, item_id: T::Id, peer: NodeId, - ) -> Effects> { + ) -> Effects> + where + REv: From>> + From> + Send, + { match self.table.remove_holder_if_unresponsive(&item_id, peer) { - GossipAction::ShouldGossip(should_gossip) => self.gossip( + GossipAction::ShouldGossip(should_gossip) => Self::gossip( effect_builder, item_id, + should_gossip.target, should_gossip.count, should_gossip.exclude_peers, ), GossipAction::GetRemainder { holder } => { // The previous peer failed to provide the item, so we still need to get it. Send - // a `GetRequest` to a different holder and set a timeout to check we got the - // response. - let request = match NodeMessage::new_get_request::(&item_id) { - Ok(request) => request, - Err(error) => { - error!("failed to create get-request: {}", error); - // Treat this as if the holder didn't respond - i.e. try to get from a - // different holder. - return self.check_get_from_peer_timeout(effect_builder, item_id, holder); - } - }; + // a `GetItem` to a different holder and set a timeout to check we got the response. + let request = Message::GetItem(item_id.clone()); let mut effects = effect_builder.send_message(holder, request).ignore(); effects.extend( effect_builder @@ -315,114 +255,167 @@ impl> Gossiper { effects } + GossipAction::AnnounceFinished => { + effect_builder.announce_finished_gossiping(item_id).ignore() + } + GossipAction::Noop | GossipAction::AwaitingRemainder => Effects::new(), } } - /// Handles an incoming gossip request from a peer on the network. - fn handle_gossip( + /// Handles an incoming gossip request from a peer on the network, after having registered the + /// item in the gossip table. + fn handle_gossip( &mut self, effect_builder: EffectBuilder, item_id: T::Id, sender: NodeId, - ) -> Effects> { - let action = if T::ID_IS_COMPLETE_ITEM { - self.table - .new_complete_data(&item_id, Some(sender)) - .map_or_else(|| GossipAction::Noop, GossipAction::ShouldGossip) - } else { - self.table.new_partial_data(&item_id, sender) - }; - - debug!(item=%item_id, %sender, %action, "received gossip request"); - - match action { + action: GossipAction, + ) -> Effects> + where + REv: From>> + From> + Send, + { + let mut effects = match action { GossipAction::ShouldGossip(should_gossip) => { + debug!(item=%item_id, %sender, %should_gossip, "received gossip request"); self.metrics.items_received.inc(); // Gossip the item ID. - let mut effects = self.gossip( + let mut effects = Self::gossip( effect_builder, - item_id, + item_id.clone(), + should_gossip.target, should_gossip.count, should_gossip.exclude_peers, ); // If this is a new complete item to us, announce it. - if T::ID_IS_COMPLETE_ITEM && !should_gossip.is_already_held { + if ID_IS_COMPLETE_ITEM && !should_gossip.is_already_held { debug!(item=%item_id, "announcing new complete gossip item received"); effects.extend( effect_builder - .announce_complete_item_received_via_gossip(item_id) + .announce_complete_item_received_via_gossip(item_id.clone()) .ignore(), ); } // Send a response to the sender indicating whether we already hold the item. let reply = Message::GossipResponse { - item_id, + item_id: item_id.clone(), is_already_held: should_gossip.is_already_held, }; effects.extend(effect_builder.send_message(sender, reply).ignore()); effects } GossipAction::GetRemainder { .. } => { + debug!(item=%item_id, %sender, %action, "received gossip request"); self.metrics.items_received.inc(); // Send a response to the sender indicating we want the full item from them, and set // a timeout for this response. let reply = Message::GossipResponse { - item_id, + item_id: item_id.clone(), is_already_held: false, }; let mut effects = effect_builder.send_message(sender, reply).ignore(); + let item_id_clone = item_id.clone(); effects.extend( effect_builder .set_timeout(self.get_from_peer_timeout) .event(move |_| Event::CheckGetFromPeerTimeout { - item_id, + item_id: item_id_clone, peer: sender, }), ); effects } - GossipAction::Noop | GossipAction::AwaitingRemainder => { + GossipAction::Noop + | GossipAction::AwaitingRemainder + | GossipAction::AnnounceFinished => { + trace!(item=%item_id, %sender, %action, "received gossip request"); // Send a response to the sender indicating we already hold the item. let reply = Message::GossipResponse { - item_id, + item_id: item_id.clone(), is_already_held: true, }; - effect_builder.send_message(sender, reply).ignore() + let mut effects = effect_builder.send_message(sender, reply).ignore(); + + if action == GossipAction::AnnounceFinished { + effects.extend( + effect_builder + .announce_finished_gossiping(item_id.clone()) + .ignore(), + ); + } + + effects } + }; + if T::REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT { + effects.extend( + effect_builder + .announce_gossip_received(item_id, sender) + .ignore(), + ); } + effects } /// Handles an incoming gossip response from a peer on the network. - fn handle_gossip_response( + fn handle_gossip_response( &mut self, effect_builder: EffectBuilder, item_id: T::Id, is_already_held: bool, sender: NodeId, - ) -> Effects> { + ) -> Effects> + where + REv: From>> + + From + + From> + + Send, + Self: ItemProvider, + { let mut effects: Effects<_> = Effects::new(); + if !self.table.has_entry(&item_id) { + debug!( + item = %item_id, + %sender, + "got a gossip response for an item we're not gossiping" + ); + return effects; + } + let action = if is_already_held { self.table.already_infected(&item_id, sender) } else { - if !T::ID_IS_COMPLETE_ITEM { + if !ID_IS_COMPLETE_ITEM { // `sender` doesn't hold the full item; get the item from the component responsible // for holding it, then send it to `sender`. - effects.extend((self.get_from_holder)(effect_builder, item_id, sender)); + let cloned_id = item_id.clone(); + effects.extend( + Self::get_from_storage(effect_builder, item_id.clone()).event( + move |maybe_item| Event::GetFromStorageResult { + item_id: cloned_id, + requester: sender, + maybe_item, + }, + ), + ); } self.table.we_infected(&item_id, sender) }; match action { - GossipAction::ShouldGossip(should_gossip) => effects.extend(self.gossip( + GossipAction::ShouldGossip(should_gossip) => effects.extend(Self::gossip( effect_builder, item_id, + should_gossip.target, should_gossip.count, should_gossip.exclude_peers, )), GossipAction::Noop => (), + GossipAction::AnnounceFinished => { + effects.extend(effect_builder.announce_finished_gossiping(item_id).ignore()) + } GossipAction::GetRemainder { .. } => { error!("shouldn't try to get remainder as result of receiving a gossip response"); } @@ -437,31 +430,113 @@ impl> Gossiper { effects } - /// Handles the `Ok` case for a `Result` of attempting to get the item from the component - /// responsible for holding it, in order to send it to the requester. - fn got_from_holder( + /// Handles the `Some` case when attempting to get the item from storage in order to send it to + /// the requester. + fn got_from_storage( + effect_builder: EffectBuilder, + item: Box, + requester: NodeId, + ) -> Effects> + where + REv: From>> + Send, + { + let message = Message::Item(item); + effect_builder.send_message(requester, message).ignore() + } + + /// Handles the `None` case when attempting to get the item from storage. + fn failed_to_get_from_storage( &mut self, effect_builder: EffectBuilder, - item: T, + item_id: T::Id, + ) -> Effects> + where + REv: From> + Send, + { + error!( + "finished gossiping {} since failed to get from storage", + item_id + ); + + if self.table.force_finish(&item_id) { + return effect_builder.announce_finished_gossiping(item_id).ignore(); + } + + Effects::new() + } + + fn handle_get_item_request( + &self, + effect_builder: EffectBuilder, + item_id: T::Id, requester: NodeId, - ) -> Effects> { - match NodeMessage::new_get_response(&item) { - Ok(message) => effect_builder.send_message(requester, message).ignore(), - Err(error) => { - error!("failed to create get-response: {}", error); - Effects::new() - } + ) -> Effects> + where + REv: From + Send, + Self: ItemProvider, + { + if !self.table.has_entry(&item_id) { + debug!( + item = %item_id, + %requester, + "got a gossip get-item request for an item we're not gossiping" + ); + return Effects::new(); } + + Self::get_from_storage(effect_builder, item_id.clone()).event(move |maybe_item| { + Event::GetFromStorageResult { + item_id, + requester, + maybe_item, + } + }) } - /// Handles the `Err` case for a `Result` of attempting to get the item from the component - /// responsible for holding it. - fn failed_to_get_from_holder(&mut self, item_id: T::Id, error: String) -> Effects> { - self.table.pause(&item_id); - error!( - "paused gossiping {} since failed to get from store: {}", - item_id, error + fn handle_item_received_from_peer( + &self, + effect_builder: EffectBuilder, + item: Box, + sender: NodeId, + ) -> Effects> + where + REv: From> + Send, + { + let item_id = item.gossip_id(); + if !self.table.has_entry(&item_id) { + debug!( + item = %item_id, + %sender, + "got a full gossip item for an item we're not gossiping" + ); + return Effects::new(); + } + + let mut effects = effect_builder + .announce_item_body_received_via_gossip(item, sender) + .ignore(); + effects.extend( + effect_builder + .set_timeout(self.validate_and_store_timeout) + .event(move |_| Event::CheckItemReceivedTimeout { item_id }), ); + effects + } + + /// Checks that having made a `NewItemBody` announcement (in `handle_item_received_from_peer`) + /// we have subsequently received an `ItemReceived` for the item from whichever component is + /// responsible for validating and storing the item. + fn check_item_received_timeout( + &mut self, + effect_builder: EffectBuilder, + item_id: T::Id, + ) -> Effects> + where + REv: From> + Send, + { + if self.table.finish_if_not_held_by_us(&item_id) { + return effect_builder.announce_finished_gossiping(item_id).ignore(); + } Effects::new() } @@ -473,19 +548,20 @@ impl> Gossiper { self.metrics .table_items_finished .set(self.table.items_finished() as i64); - self.metrics - .table_items_paused - .set(self.table.items_paused() as i64); } } -impl Component for Gossiper +/// Impl for gossipers of large items, i.e. where `T::ID_IS_COMPLETE_ITEM` is false. +impl Component for Gossiper where - T: Item + 'static, - REv: ReactorEventT, + T: LargeGossipItem + 'static, + REv: From>> + + From + + From> + + Send, + Self: ItemProvider, { type Event = Event; - type ConstructionError = Infallible; fn handle_event( &mut self, @@ -494,9 +570,22 @@ where event: Self::Event, ) -> Effects { let effects = match event { - Event::ItemReceived { item_id, source } => { - self.handle_item_received(effect_builder, item_id, source) + Event::BeginGossipRequest(BeginGossipRequest { + item_id, + source, + target, + responder, + }) => { + let mut effects = + self.handle_item_received(effect_builder, item_id, source, target); + effects.extend(responder.respond(()).ignore()); + effects } + Event::ItemReceived { + item_id, + source, + target, + } => self.handle_item_received(effect_builder, item_id, source, target), Event::GossipedTo { item_id, requested_count, @@ -508,34 +597,197 @@ where Event::CheckGetFromPeerTimeout { item_id, peer } => { self.check_get_from_peer_timeout(effect_builder, item_id, peer) } - Event::MessageReceived { message, sender } => match message { - Message::Gossip(item_id) => self.handle_gossip(effect_builder, item_id, sender), + Event::Incoming(GossiperIncoming:: { sender, message }) => match *message { + Message::Gossip(item_id) => { + Self::is_stored(effect_builder, item_id.clone()).event(move |result| { + Event::IsStoredResult { + item_id, + sender, + result, + } + }) + } Message::GossipResponse { item_id, is_already_held, } => self.handle_gossip_response(effect_builder, item_id, is_already_held, sender), + Message::GetItem(item_id) => { + self.handle_get_item_request(effect_builder, item_id, sender) + } + Message::Item(item) => { + self.handle_item_received_from_peer(effect_builder, item, sender) + } }, - Event::GetFromHolderResult { + Event::CheckItemReceivedTimeout { item_id } => { + self.check_item_received_timeout(effect_builder, item_id) + } + Event::IsStoredResult { + item_id, + sender, + result: is_stored_locally, + } => { + let action = if self.table.has_entry(&item_id) || !is_stored_locally { + self.table.new_data_id(&item_id, sender) + } else { + // We're not already handling this item, and we do have the full item stored, so + // don't initiate gossiping for it. + GossipAction::Noop + }; + self.handle_gossip(effect_builder, item_id, sender, action) + } + Event::GetFromStorageResult { item_id, requester, - result, - } => match *result { - Ok(item) => self.got_from_holder(effect_builder, item, requester), - Err(error) => self.failed_to_get_from_holder(item_id, error), + maybe_item, + } => match maybe_item { + Some(item) => Self::got_from_storage(effect_builder, item, requester), + None => self.failed_to_get_from_storage(effect_builder, item_id), + }, + }; + self.update_gossip_table_metrics(); + effects + } + + fn name(&self) -> &str { + self.name + } +} + +/// Impl for gossipers of small items, i.e. where `T::ID_IS_COMPLETE_ITEM` is true. +impl Component for Gossiper +where + T: SmallGossipItem + 'static, + REv: From>> + + From + + From> + + Send, + Self: ItemProvider, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + let effects = match event { + Event::BeginGossipRequest(BeginGossipRequest { + item_id, + source, + target, + responder, + }) => { + let mut effects = + self.handle_item_received(effect_builder, item_id, source, target); + effects.extend(responder.respond(()).ignore()); + effects + } + Event::ItemReceived { + item_id, + source, + target, + } => self.handle_item_received(effect_builder, item_id, source, target), + Event::GossipedTo { + item_id, + requested_count, + peers, + } => self.gossiped_to(effect_builder, item_id, requested_count, peers), + Event::CheckGossipTimeout { item_id, peer } => { + self.check_gossip_timeout(effect_builder, item_id, peer) + } + Event::CheckGetFromPeerTimeout { item_id, peer } => { + error!(%item_id, %peer, "should not timeout getting small item from peer"); + Effects::new() + } + Event::Incoming(GossiperIncoming:: { sender, message }) => match *message { + Message::Gossip(item_id) => { + let target = ::id_as_item(&item_id).gossip_target(); + let action = self.table.new_complete_data(&item_id, Some(sender), target); + self.handle_gossip(effect_builder, item_id, sender, action) + } + Message::GossipResponse { + item_id, + is_already_held, + } => self.handle_gossip_response(effect_builder, item_id, is_already_held, sender), + Message::GetItem(item_id) => { + debug!(%item_id, %sender, "unexpected get request for small item"); + Effects::new() + } + Message::Item(item) => { + let item_id = item.gossip_id(); + debug!(%item_id, %sender, "unexpected get response for small item"); + Effects::new() + } }, + Event::CheckItemReceivedTimeout { item_id } => { + error!(%item_id, "should not timeout item-received for small item"); + Effects::new() + } + event @ Event::IsStoredResult { .. } => { + error!(%event, "unexpected is-stored result for small item"); + Effects::new() + } + Event::GetFromStorageResult { + item_id, + requester, + maybe_item, + } => { + error!( + %item_id, %requester, ?maybe_item, + "unexpected get-from-storage result for small item" + ); + Effects::new() + } }; self.update_gossip_table_metrics(); effects } + + fn name(&self) -> &str { + self.name + } } -impl> Debug for Gossiper { +impl Debug + for Gossiper +{ fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { formatter - .debug_struct("Gossiper") + .debug_struct(self.name) .field("table", &self.table) .field("gossip_timeout", &self.gossip_timeout) .field("get_from_peer_timeout", &self.get_from_peer_timeout) + .field( + "validate_and_store_timeout", + &self.validate_and_store_timeout, + ) .finish() } } + +impl DataSize + for Gossiper +{ + const IS_DYNAMIC: bool = true; + + const STATIC_HEAP_SIZE: usize = 0; + + #[inline] + fn estimate_heap_size(&self) -> usize { + let Gossiper { + table, + gossip_timeout, + get_from_peer_timeout, + validate_and_store_timeout, + name, + metrics: _, + } = self; + + table.estimate_heap_size() + + gossip_timeout.estimate_heap_size() + + get_from_peer_timeout.estimate_heap_size() + + validate_and_store_timeout.estimate_heap_size() + + name.estimate_heap_size() + } +} diff --git a/node/src/components/gossiper/config.rs b/node/src/components/gossiper/config.rs index 1e4ba90da0..8027d895c3 100644 --- a/node/src/components/gossiper/config.rs +++ b/node/src/components/gossiper/config.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + use datasize::DataSize; use serde::{ de::{Deserializer, Error as SerdeError, Unexpected}, @@ -5,27 +7,32 @@ use serde::{ }; use tracing::error; +use casper_types::TimeDiff; + #[cfg(test)] -use super::Error; +use super::error::Error; const DEFAULT_INFECTION_TARGET: u8 = 3; const DEFAULT_SATURATION_LIMIT_PERCENT: u8 = 80; pub(super) const MAX_SATURATION_LIMIT_PERCENT: u8 = 99; -pub(super) const DEFAULT_FINISHED_ENTRY_DURATION_SECS: u64 = 60; -const DEFAULT_GOSSIP_REQUEST_TIMEOUT_SECS: u64 = 10; -const DEFAULT_GET_REMAINDER_TIMEOUT_SECS: u64 = 60; +pub(super) const DEFAULT_FINISHED_ENTRY_DURATION: &str = "60sec"; +const DEFAULT_GOSSIP_REQUEST_TIMEOUT: &str = "10sec"; +const DEFAULT_GET_REMAINDER_TIMEOUT: &str = "60sec"; +const DEFAULT_VALIDATE_AND_STORE_TIMEOUT: &str = "60sec"; +#[cfg(test)] +const SMALL_TIMEOUTS_FINISHED_ENTRY_DURATION: &str = "2sec"; #[cfg(test)] -const SMALL_TIMEOUTS_FINISHED_ENTRY_DURATION_SECS: u64 = 2; +const SMALL_TIMEOUTS_GOSSIP_REQUEST_TIMEOUT: &str = "1sec"; #[cfg(test)] -const SMALL_TIMEOUTS_GOSSIP_REQUEST_TIMEOUT_SECS: u64 = 1; +const SMALL_TIMEOUTS_GET_REMAINDER_TIMEOUT: &str = "1sec"; #[cfg(test)] -const SMALL_TIMEOUTS_GET_REMAINDER_TIMEOUT_SECS: u64 = 1; +const SMALL_TIMEOUTS_VALIDATE_AND_STORE_TIMEOUT: &str = "1sec"; /// Configuration options for gossiping. #[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] pub struct Config { /// Target number of peers to infect with a given piece of data. - infection_target: u8, + pub infection_target: u8, /// The saturation limit as a percentage, with a maximum value of 99. Used as a termination /// condition. /// @@ -33,19 +40,22 @@ pub struct Config { /// don't manage to newly infect 3 peers. We will stop gossiping once we know of more than 15 /// holders excluding us since 80% saturation would imply 3 new infections in 15 peers. #[serde(deserialize_with = "deserialize_saturation_limit_percent")] - saturation_limit_percent: u8, + pub saturation_limit_percent: u8, /// The maximum duration in seconds for which to keep finished entries. /// /// The longer they are retained, the lower the likelihood of re-gossiping a piece of data. /// However, the longer they are retained, the larger the list of finished entries can grow. - finished_entry_duration_secs: u64, + pub finished_entry_duration: TimeDiff, /// The timeout duration in seconds for a single gossip request, i.e. for a single gossip /// message sent from this node, it will be considered timed out if the expected response from /// that peer is not received within this specified duration. - gossip_request_timeout_secs: u64, + pub gossip_request_timeout: TimeDiff, /// The timeout duration in seconds for retrieving the remaining part(s) of newly-discovered /// data from a peer which gossiped information about that data to this node. - get_remainder_timeout_secs: u64, + pub get_remainder_timeout: TimeDiff, + /// The timeout duration for a newly-received, gossiped item to be validated and stored by + /// another component before the gossiper abandons waiting to gossip the item onwards. + pub validate_and_store_timeout: TimeDiff, } impl Config { @@ -53,9 +63,10 @@ impl Config { pub(crate) fn new( infection_target: u8, saturation_limit_percent: u8, - finished_entry_duration_secs: u64, - gossip_request_timeout_secs: u64, - get_remainder_timeout_secs: u64, + finished_entry_duration: TimeDiff, + gossip_request_timeout: TimeDiff, + get_remainder_timeout: TimeDiff, + validate_and_store_timeout: TimeDiff, ) -> Result { if saturation_limit_percent > MAX_SATURATION_LIMIT_PERCENT { return Err(Error::InvalidSaturationLimit); @@ -63,18 +74,26 @@ impl Config { Ok(Config { infection_target, saturation_limit_percent, - finished_entry_duration_secs, - gossip_request_timeout_secs, - get_remainder_timeout_secs, + finished_entry_duration, + gossip_request_timeout, + get_remainder_timeout, + validate_and_store_timeout, }) } #[cfg(test)] pub(crate) fn new_with_small_timeouts() -> Self { Config { - finished_entry_duration_secs: SMALL_TIMEOUTS_FINISHED_ENTRY_DURATION_SECS, - gossip_request_timeout_secs: SMALL_TIMEOUTS_GOSSIP_REQUEST_TIMEOUT_SECS, - get_remainder_timeout_secs: SMALL_TIMEOUTS_GET_REMAINDER_TIMEOUT_SECS, + finished_entry_duration: TimeDiff::from_str(SMALL_TIMEOUTS_FINISHED_ENTRY_DURATION) + .unwrap(), + gossip_request_timeout: TimeDiff::from_str(SMALL_TIMEOUTS_GOSSIP_REQUEST_TIMEOUT) + .unwrap(), + get_remainder_timeout: TimeDiff::from_str(SMALL_TIMEOUTS_GET_REMAINDER_TIMEOUT) + .unwrap(), + validate_and_store_timeout: TimeDiff::from_str( + SMALL_TIMEOUTS_VALIDATE_AND_STORE_TIMEOUT, + ) + .unwrap(), ..Default::default() } } @@ -87,16 +106,20 @@ impl Config { self.saturation_limit_percent } - pub(crate) fn finished_entry_duration_secs(&self) -> u64 { - self.finished_entry_duration_secs + pub(crate) fn finished_entry_duration(&self) -> TimeDiff { + self.finished_entry_duration + } + + pub(crate) fn gossip_request_timeout(&self) -> TimeDiff { + self.gossip_request_timeout } - pub(crate) fn gossip_request_timeout_secs(&self) -> u64 { - self.gossip_request_timeout_secs + pub(crate) fn get_remainder_timeout(&self) -> TimeDiff { + self.get_remainder_timeout } - pub(crate) fn get_remainder_timeout_secs(&self) -> u64 { - self.get_remainder_timeout_secs + pub(crate) fn validate_and_store_timeout(&self) -> TimeDiff { + self.validate_and_store_timeout } } @@ -105,9 +128,11 @@ impl Default for Config { Config { infection_target: DEFAULT_INFECTION_TARGET, saturation_limit_percent: DEFAULT_SATURATION_LIMIT_PERCENT, - finished_entry_duration_secs: DEFAULT_FINISHED_ENTRY_DURATION_SECS, - gossip_request_timeout_secs: DEFAULT_GOSSIP_REQUEST_TIMEOUT_SECS, - get_remainder_timeout_secs: DEFAULT_GET_REMAINDER_TIMEOUT_SECS, + finished_entry_duration: TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION).unwrap(), + gossip_request_timeout: TimeDiff::from_str(DEFAULT_GOSSIP_REQUEST_TIMEOUT).unwrap(), + get_remainder_timeout: TimeDiff::from_str(DEFAULT_GET_REMAINDER_TIMEOUT).unwrap(), + validate_and_store_timeout: TimeDiff::from_str(DEFAULT_VALIDATE_AND_STORE_TIMEOUT) + .unwrap(), } } } @@ -142,9 +167,11 @@ mod tests { let invalid_config = Config { infection_target: 3, saturation_limit_percent: MAX_SATURATION_LIMIT_PERCENT + 1, - finished_entry_duration_secs: DEFAULT_FINISHED_ENTRY_DURATION_SECS, - gossip_request_timeout_secs: DEFAULT_GOSSIP_REQUEST_TIMEOUT_SECS, - get_remainder_timeout_secs: DEFAULT_GET_REMAINDER_TIMEOUT_SECS, + finished_entry_duration: TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION).unwrap(), + gossip_request_timeout: TimeDiff::from_str(DEFAULT_GOSSIP_REQUEST_TIMEOUT).unwrap(), + get_remainder_timeout: TimeDiff::from_str(DEFAULT_GET_REMAINDER_TIMEOUT).unwrap(), + validate_and_store_timeout: TimeDiff::from_str(DEFAULT_VALIDATE_AND_STORE_TIMEOUT) + .unwrap(), }; // Parsing should fail. @@ -155,9 +182,10 @@ mod tests { assert!(Config::new( 3, MAX_SATURATION_LIMIT_PERCENT + 1, - DEFAULT_FINISHED_ENTRY_DURATION_SECS, - DEFAULT_GOSSIP_REQUEST_TIMEOUT_SECS, - DEFAULT_GET_REMAINDER_TIMEOUT_SECS, + TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION).unwrap(), + TimeDiff::from_str(DEFAULT_GOSSIP_REQUEST_TIMEOUT).unwrap(), + TimeDiff::from_str(DEFAULT_GET_REMAINDER_TIMEOUT).unwrap(), + TimeDiff::from_str(DEFAULT_VALIDATE_AND_STORE_TIMEOUT).unwrap() ) .is_err()) } diff --git a/node/src/components/gossiper/error.rs b/node/src/components/gossiper/error.rs index b611e76b89..e555679448 100644 --- a/node/src/components/gossiper/error.rs +++ b/node/src/components/gossiper/error.rs @@ -4,15 +4,11 @@ use super::config::MAX_SATURATION_LIMIT_PERCENT; /// Error returned by a `GossipTable`. #[derive(Debug, Error)] -pub enum Error { +pub(crate) enum Error { /// Invalid configuration value for `saturation_limit_percent`. #[error( "invalid saturation_limit_percent - should be between 0 and {} inclusive", MAX_SATURATION_LIMIT_PERCENT )] InvalidSaturationLimit, - - /// Attempted to reset data which had not been paused. - #[error("gossiping is not paused for this data")] - NotPaused, } diff --git a/node/src/components/gossiper/event.rs b/node/src/components/gossiper/event.rs index 4e70993b84..b78dbf6d89 100644 --- a/node/src/components/gossiper/event.rs +++ b/node/src/components/gossiper/event.rs @@ -3,21 +3,29 @@ use std::{ fmt::{self, Display, Formatter}, }; +use derive_more::From; use serde::Serialize; -use super::{Item, Message}; +use casper_types::DisplayIter; + +use super::GossipItem; use crate::{ + effect::{incoming::GossiperIncoming, requests::BeginGossipRequest, GossipTarget}, types::NodeId, - utils::{DisplayIter, Source}, + utils::Source, }; /// `Gossiper` events. -#[derive(Debug, Serialize)] -pub enum Event { +#[derive(Debug, From, Serialize)] +pub(crate) enum Event { + /// A request to gossip an item has been made. + #[from] + BeginGossipRequest(BeginGossipRequest), /// A new item has been received to be gossiped. ItemReceived { item_id: T::Id, - source: Source, + source: Source, + target: GossipTarget, }, /// The network component gossiped to the included peers. GossipedTo { @@ -32,20 +40,41 @@ pub enum Event { /// arrived. CheckGetFromPeerTimeout { item_id: T::Id, peer: NodeId }, /// An incoming gossip network message. - MessageReceived { sender: NodeId, message: Message }, - /// The result of the gossiper getting an item from the component responsible for holding it. - /// If the result is `Ok`, the item should be sent to the requesting peer. - GetFromHolderResult { + #[from] + Incoming(GossiperIncoming), + /// The timeout for waiting for a different component to validate and store the item has + /// elapsed and we should check that `ItemReceived` has been called by now. + CheckItemReceivedTimeout { item_id: T::Id }, + /// The result of the gossiper checking if an item exists in storage. + IsStoredResult { + item_id: T::Id, + sender: NodeId, + result: bool, + }, + /// The result of the gossiper getting an item from storage. If the result is `Some`, the item + /// should be sent to the requesting peer. + GetFromStorageResult { item_id: T::Id, requester: NodeId, - result: Box>, + maybe_item: Option>, }, } -impl Display for Event { +impl Display for Event { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - Event::ItemReceived { item_id, source } => { + Event::BeginGossipRequest(BeginGossipRequest { + item_id, source, .. + }) => { + write!( + formatter, + "begin gossping new item {} received from {}", + item_id, source + ) + } + Event::ItemReceived { + item_id, source, .. + } => { write!(formatter, "new item {} received from {}", item_id, source) } Event::GossipedTo { item_id, peers, .. } => write!( @@ -64,16 +93,32 @@ impl Display for Event { "check get from peer timeout for {} with {}", item_id, peer ), - Event::MessageReceived { sender, message } => { - write!(formatter, "{} received from {}", message, sender) + Event::Incoming(incoming) => { + write!(formatter, "incoming: {}", incoming) } - Event::GetFromHolderResult { - item_id, result, .. + Event::CheckItemReceivedTimeout { item_id } => { + write!(formatter, "check item received timeout for {}", item_id,) + } + Event::IsStoredResult { + item_id, + sender, + result, + } => { + write!( + formatter, + "{} is stored for gossip message from {}: {}", + item_id, sender, result + ) + } + Event::GetFromStorageResult { + item_id, + maybe_item, + .. } => { - if result.is_ok() { - write!(formatter, "got {} from holder component", item_id) + if maybe_item.is_some() { + write!(formatter, "got {} from storage", item_id) } else { - write!(formatter, "failed to get {} from holder component", item_id) + write!(formatter, "failed to get {} from storage", item_id) } } } diff --git a/node/src/components/gossiper/gossip_item.rs b/node/src/components/gossiper/gossip_item.rs new file mode 100644 index 0000000000..11f00bc20d --- /dev/null +++ b/node/src/components/gossiper/gossip_item.rs @@ -0,0 +1,34 @@ +use std::{ + fmt::{Debug, Display}, + hash::Hash, +}; + +use serde::{de::DeserializeOwned, Serialize}; + +use crate::effect::GossipTarget; + +/// A trait which allows an implementing type to be used by a gossiper component. +pub(crate) trait GossipItem: + Clone + Serialize + DeserializeOwned + Send + Sync + Debug + Display + Eq +{ + /// The type of ID of the item. + type Id: Clone + Eq + Hash + Serialize + DeserializeOwned + Send + Sync + Debug + Display; + + /// Whether the item's ID _is_ the complete item or not. + const ID_IS_COMPLETE_ITEM: bool; + /// Whether the arrival of a new gossip message should be announced or not. + const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool; + + /// The ID of the specific item. + fn gossip_id(&self) -> Self::Id; + + /// Identifies the kind of peers which should be targeted for onwards gossiping. + fn gossip_target(&self) -> GossipTarget; +} + +pub(crate) trait LargeGossipItem: GossipItem {} + +pub(crate) trait SmallGossipItem: GossipItem { + /// Convert a `Self::Id` into `Self`. + fn id_as_item(id: &Self::Id) -> &Self; +} diff --git a/node/src/components/gossiper/gossip_table.rs b/node/src/components/gossiper/gossip_table.rs index 2e5fa75fd2..a7ce14d921 100644 --- a/node/src/components/gossiper/gossip_table.rs +++ b/node/src/components/gossiper/gossip_table.rs @@ -10,15 +10,15 @@ use std::{ use datasize::DataSize; #[cfg(test)] use fake_instant::FakeClock as Instant; -use tracing::{debug, error, warn}; +use tracing::{error, trace, warn}; + +use casper_types::DisplayIter; use super::Config; -#[cfg(test)] -use super::Error; -use crate::{types::NodeId, utils::DisplayIter}; +use crate::{effect::GossipTarget, types::NodeId}; #[derive(Debug, PartialEq, Eq)] -pub(crate) enum GossipAction { +pub(super) enum GossipAction { /// This is new data, previously unknown by us, and for which we don't yet hold everything /// required to allow us start gossiping it onwards. We should get the remaining parts from /// the provided holder and not gossip the ID onwards yet. @@ -31,6 +31,9 @@ pub(crate) enum GossipAction { ShouldGossip(ShouldGossip), /// We hold the data locally, and we shouldn't gossip the ID onwards. Noop, + /// We just finished gossiping the data: no need to gossip further, but an announcement that we + /// have finished gossiping this data should be made. + AnnounceFinished, } impl Display for GossipAction { @@ -42,6 +45,7 @@ impl Display for GossipAction { GossipAction::AwaitingRemainder => write!(formatter, "awaiting remainder"), GossipAction::ShouldGossip(should_gossip) => Display::fmt(should_gossip, formatter), GossipAction::Noop => write!(formatter, "should do nothing"), + GossipAction::AnnounceFinished => write!(formatter, "finished gossiping"), } } } @@ -49,13 +53,15 @@ impl Display for GossipAction { /// Used as a return type from API methods to indicate that the caller should continue to gossip the /// given data. #[derive(Debug, PartialEq, Eq)] -pub(crate) struct ShouldGossip { +pub(super) struct ShouldGossip { /// The number of copies of the gossip message to send. - pub(crate) count: usize, + pub(super) count: usize, /// Peers we should avoid gossiping this data to, since they already hold it. - pub(crate) exclude_peers: HashSet, + pub(super) exclude_peers: HashSet, /// Whether we already held the full data or not. - pub(crate) is_already_held: bool, + pub(super) is_already_held: bool, + /// Who to gossip this to. + pub(super) target: GossipTarget, } impl Display for ShouldGossip { @@ -81,48 +87,57 @@ impl Display for ShouldGossip { } #[derive(DataSize, Debug, Default)] -pub(crate) struct State { +pub(super) struct State { /// The peers excluding us which hold the data. holders: HashSet, - /// Whether we hold the full data locally yet or not. - held_by_us: bool, /// The subset of `holders` we have infected. Not just a count so we don't attribute the same /// peer multiple times. infected_by_us: HashSet, /// The count of in-flight gossip messages sent by us for this data. in_flight_count: usize, + /// The relevant target for this data, if known yet. + target: Option, + /// The set of peers we attempted to infect. + attempted_to_infect: HashSet, } impl State { + /// Whether we hold the full data locally yet or not. + fn held_by_us(&self) -> bool { + self.target.is_some() + } + /// Returns whether we should finish gossiping this data. - fn is_finished(&self, infection_target: usize, holders_limit: usize) -> bool { - self.infected_by_us.len() >= infection_target || self.holders.len() >= holders_limit + fn is_finished(&self, infection_target: usize, attempted_to_infect_limit: usize) -> bool { + self.infected_by_us.len() >= infection_target + || self.attempted_to_infect.len() >= attempted_to_infect_limit } /// Returns a `GossipAction` derived from the given state. fn action( &mut self, infection_target: usize, - holders_limit: usize, + attempted_to_infect_limit: usize, is_new: bool, ) -> GossipAction { - if self.is_finished(infection_target, holders_limit) { + if self.is_finished(infection_target, attempted_to_infect_limit) { return GossipAction::Noop; } - if self.held_by_us { + if let Some(target) = self.target { + // The item is held by us, decide whether we should gossip it or not. let count = infection_target.saturating_sub(self.in_flight_count + self.infected_by_us.len()); if count > 0 { self.in_flight_count += count; return GossipAction::ShouldGossip(ShouldGossip { count, - exclude_peers: self.holders.clone(), + target, + exclude_peers: self.attempted_to_infect.clone(), is_already_held: !is_new, }); - } else { - return GossipAction::Noop; } + return GossipAction::Noop; } if is_new { @@ -139,7 +154,7 @@ impl State { } #[derive(DataSize, Debug)] -pub(crate) struct Timeouts { +pub(super) struct Timeouts { values: Vec<(Instant, T)>, } @@ -170,75 +185,63 @@ impl Timeouts { } #[derive(DataSize, Debug)] -pub(crate) struct GossipTable { +pub(super) struct GossipTable { /// Data IDs for which gossiping is still ongoing. current: HashMap, /// Data IDs for which gossiping is complete. finished: HashSet, /// Timeouts for removal of items from the `finished` cache. - finished_timeouts: Timeouts, - /// Data IDs for which gossiping has been paused (likely due to detecting that the data was not - /// correct as per our current knowledge). Such data could later be decided as still requiring - /// to be gossiped, so we retain the `State` part here in order to resume gossiping. - paused: HashMap, - /// Timeouts for removal of items from the `paused` cache. - paused_timeouts: Timeouts, + timeouts: Timeouts, /// See `Config::infection_target`. infection_target: usize, /// Derived from `Config::saturation_limit_percent` - we gossip data while the number of - /// holders doesn't exceed `holders_limit`. - holders_limit: usize, + /// attempts to infect doesn't exceed `attempted_to_infect_limit`. + attempted_to_infect_limit: usize, /// See `Config::finished_entry_duration`. finished_entry_duration: Duration, } impl GossipTable { /// Number of items currently being gossiped. - pub fn items_current(&self) -> usize { + pub(super) fn items_current(&self) -> usize { self.current.len() } /// Number of items that are kept but are finished gossiping. - pub fn items_finished(&self) -> usize { + pub(super) fn items_finished(&self) -> usize { self.finished.len() } - - /// Number of items for which gossipping is currently paused. - pub fn items_paused(&self) -> usize { - self.paused.len() - } } -impl GossipTable { +impl GossipTable { /// Returns a new `GossipTable` using the provided configuration. - pub(crate) fn new(config: Config) -> Self { - let holders_limit = (100 * usize::from(config.infection_target())) + pub(super) fn new(config: Config) -> Self { + let attempted_to_infect_limit = (100 * usize::from(config.infection_target())) / (100 - usize::from(config.saturation_limit_percent())); GossipTable { current: HashMap::new(), finished: HashSet::new(), - finished_timeouts: Timeouts::new(), - paused: HashMap::new(), - paused_timeouts: Timeouts::new(), + timeouts: Timeouts::new(), infection_target: usize::from(config.infection_target()), - holders_limit, - finished_entry_duration: Duration::from_secs(config.finished_entry_duration_secs()), + attempted_to_infect_limit, + finished_entry_duration: config.finished_entry_duration().into(), } } /// We received knowledge about potentially new data with given ID from the given peer. This - /// should only be called where we don't already hold everything locally we need to be able to - /// gossip it onwards. If we are able to gossip the data already, call `new_data` instead. + /// should only be called where we don't already hold everything locally needed to be able to + /// gossip it onwards. If we are able to gossip the data already, call `new_complete_data` + /// instead. /// /// Once we have retrieved everything we need in order to begin gossiping onwards, call - /// `new_data`. + /// `new_complete_data`. /// /// Returns whether we should gossip it, and a list of peers to exclude. - pub(crate) fn new_partial_data(&mut self, data_id: &T, holder: NodeId) -> GossipAction { + pub(super) fn new_data_id(&mut self, data_id: &T, holder: NodeId) -> GossipAction { self.purge_finished(); if self.finished.contains(data_id) { - debug!(item=%data_id, "no further action: item already finished"); + trace!(item=%data_id, "no further action: item already finished"); return GossipAction::Noop; } @@ -247,22 +250,21 @@ impl GossipTable { }; if let Some(action) = self.update_current(data_id, update) { - debug!(item=%data_id, %action, "item is currently being gossiped"); + trace!(item=%data_id, %action, "item is currently being gossiped"); return action; } - if let Some(action) = self.update_paused(data_id, update) { - debug!(item=%data_id, %action, "gossiping item is paused"); - return action; - } - - // This isn't in finished, current, or paused - add a new entry to current. + // This isn't in finished or current - add a new entry to current. let mut state = State::default(); update(&mut state); let is_new = true; - let action = state.action(self.infection_target, self.holders_limit, is_new); - let _ = self.current.insert(*data_id, state); - debug!(item=%data_id, %action, "gossiping new item should begin"); + let action = state.action( + self.infection_target, + self.attempted_to_infect_limit, + is_new, + ); + let _ = self.current.insert(data_id.clone(), state); + trace!(item=%data_id, %action, "gossiping new item should begin"); action } @@ -270,54 +272,55 @@ impl GossipTable { /// its ID should be passed in `maybe_holder`. If received from a client or generated on this /// node, `maybe_holder` should be `None`. /// - /// This should only be called once we hold everything locally we need to be able to gossip it - /// onwards. If we aren't able to gossip this data yet, call `new_partial_data` instead. + /// This should only be called once we hold everything locally needed to be able to gossip it + /// onwards. If we aren't able to gossip this data yet, call `new_data_id` instead. /// /// Returns whether we should gossip it, and a list of peers to exclude. - pub(crate) fn new_complete_data( + pub(super) fn new_complete_data( &mut self, data_id: &T, maybe_holder: Option, - ) -> Option { + target: GossipTarget, + ) -> GossipAction { self.purge_finished(); if self.finished.contains(data_id) { - debug!(item=%data_id, "no further action: item already finished"); - return None; + trace!(item=%data_id, "no further action: item already finished"); + return GossipAction::Noop; } let update = |state: &mut State| { state.holders.extend(maybe_holder); - state.held_by_us = true; - }; - - // Converts the returned action to an optional `ShouldGossip`. - let convert_action = |action: GossipAction| match action { - GossipAction::ShouldGossip(should_gossip) => Some(should_gossip), - GossipAction::Noop => None, - GossipAction::GetRemainder { .. } | GossipAction::AwaitingRemainder => { - unreachable!("can't be waiting for remainder since we hold the complete data") - } + state.target = Some(target); }; if let Some(action) = self.update_current(data_id, update) { - debug!(item=%data_id, %action, "item is currently being gossiped"); - return convert_action(action); - } - - if let Some(action) = self.update_paused(data_id, update) { - debug!(item=%data_id, %action, "gossiping item is paused"); - return convert_action(action); + trace!(item=%data_id, %action, "item is currently being gossiped"); + return action; } - // This isn't in finished, current, or paused - add a new entry to current. + // This isn't in finished or current - add a new entry to current. let mut state = State::default(); update(&mut state); let is_new = true; - let action = state.action(self.infection_target, self.holders_limit, is_new); - let _ = self.current.insert(*data_id, state); - debug!(item=%data_id, %action, "gossiping new item should begin"); - convert_action(action) + let action = state.action( + self.infection_target, + self.attempted_to_infect_limit, + is_new, + ); + let _ = self.current.insert(data_id.clone(), state); + trace!(item=%data_id, %action, "gossiping new item should begin"); + action + } + + pub(super) fn register_infection_attempt<'a>( + &'a mut self, + item_id: &T, + peers: impl Iterator, + ) { + if let Some(state) = self.current.get_mut(item_id) { + state.attempted_to_infect.extend(peers); + } } /// We got a response from a peer we gossiped to indicating we infected it (it didn't previously @@ -326,7 +329,7 @@ impl GossipTable { /// If the given `data_id` is not a member of the current entries (those not deemed finished), /// then `GossipAction::Noop` will be returned under the assumption that the data has already /// finished being gossiped. - pub(crate) fn we_infected(&mut self, data_id: &T, peer: NodeId) -> GossipAction { + pub(super) fn we_infected(&mut self, data_id: &T, peer: NodeId) -> GossipAction { let infected_by_us = true; self.infected(data_id, peer, infected_by_us) } @@ -337,17 +340,17 @@ impl GossipTable { /// If the given `data_id` is not a member of the current entries (those not deemed finished), /// then `GossipAction::Noop` will be returned under the assumption that the data has already /// finished being gossiped. - pub(crate) fn already_infected(&mut self, data_id: &T, peer: NodeId) -> GossipAction { + pub(super) fn already_infected(&mut self, data_id: &T, peer: NodeId) -> GossipAction { let infected_by_us = false; self.infected(data_id, peer, infected_by_us) } fn infected(&mut self, data_id: &T, peer: NodeId, by_us: bool) -> GossipAction { let update = |state: &mut State| { - if !state.held_by_us { + if !state.held_by_us() { warn!( item=%data_id, - %peer, "shouldn't have received a gossip response for partial data" + %peer, "shouldn't have received a gossip response for data we don't hold" ); return; } @@ -358,45 +361,53 @@ impl GossipTable { state.in_flight_count = state.in_flight_count.saturating_sub(1); }; - if let Some(action) = self.update_current(data_id, update) { - return action; - } - - self.update_paused(data_id, update) + self.update_current(data_id, update) .unwrap_or(GossipAction::Noop) } /// Directly reduces the in-flight count of gossip requests for the given item by the given /// amount. /// + /// Returns `true` if there was a current entry for this data and it is now finished. + /// /// This should be called if, after trying to gossip to a given number of peers, we find that /// we've not been able to select enough peers. Without this reduction, the given gossip item - /// would never move from `current` to `finished` or `paused`, and hence would never be purged. - pub(crate) fn reduce_in_flight_count(&mut self, data_id: &T, reduce_by: usize) { - if let Some(state) = self.current.get_mut(data_id) { + /// would never move from `current` to `finished`, and hence would never be purged. + pub(super) fn reduce_in_flight_count(&mut self, data_id: &T, reduce_by: usize) -> bool { + let should_finish = if let Some(state) = self.current.get_mut(data_id) { state.in_flight_count = state.in_flight_count.saturating_sub(reduce_by); - debug!( + trace!( item=%data_id, in_flight_count=%state.in_flight_count, "reduced in-flight count for item" ); + state.in_flight_count == 0 + } else { + false + }; + + if should_finish { + trace!(item=%data_id, "finished gossiping since no more peers to gossip to"); + return self.force_finish(data_id); } + + false } /// Checks if gossip request we sent timed out. /// /// If the peer is already counted as a holder, it has previously responded and this method /// returns Noop. Otherwise it has timed out and we return the appropriate action to take. - pub(crate) fn check_timeout(&mut self, data_id: &T, peer: NodeId) -> GossipAction { + pub(super) fn check_timeout(&mut self, data_id: &T, peer: NodeId) -> GossipAction { let update = |state: &mut State| { debug_assert!( - state.held_by_us, - "shouldn't check timeout for a gossip response for partial data" + state.held_by_us(), + "shouldn't check timeout for a gossip response for data we don't hold" ); - if !state.held_by_us { + if !state.held_by_us() { error!( item=%data_id, - %peer, "shouldn't check timeout for a gossip response for partial data" + %peer, "shouldn't check timeout for a gossip response for data we don't hold" ); return; } @@ -408,11 +419,7 @@ impl GossipTable { } }; - if let Some(action) = self.update_current(data_id, update) { - return action; - } - - self.update_paused(data_id, update) + self.update_current(data_id, update) .unwrap_or(GossipAction::Noop) } @@ -421,60 +428,67 @@ impl GossipTable { /// /// If this causes the list of holders to become empty, and we also don't hold the full data, /// then this entry is removed as if we'd never heard of it. - pub(crate) fn remove_holder_if_unresponsive( + pub(super) fn remove_holder_if_unresponsive( &mut self, data_id: &T, peer: NodeId, ) -> GossipAction { if let Some(mut state) = self.current.remove(data_id) { - if !state.held_by_us { + if !state.held_by_us() { let _ = state.holders.remove(&peer); - debug!(item=%data_id, %peer, "removed peer as a holder of the item"); + trace!(item=%data_id, %peer, "removed peer as a holder of the item"); if state.holders.is_empty() { - // We don't hold the full data, and we don't know any holders - pause the entry - debug!(item=%data_id, "no further action: item now paused as no holders"); + // We don't hold the full data, and we don't know any holders - remove the entry + trace!(item=%data_id, "no further action: item now removed as no holders"); return GossipAction::Noop; } } - let is_new = !state.held_by_us; - let action = state.action(self.infection_target, self.holders_limit, is_new); - let _ = self.current.insert(*data_id, state); - debug!(item=%data_id, %action, "assuming peer response did not timeout"); + let is_new = !state.held_by_us(); + let action = state.action( + self.infection_target, + self.attempted_to_infect_limit, + is_new, + ); + let _ = self.current.insert(data_id.clone(), state); + trace!(item=%data_id, %action, "assuming peer response did not timeout"); return action; } - if let Some(state) = self.paused.get_mut(data_id) { - if !state.held_by_us { - let _ = state.holders.remove(&peer); - debug!(item=%data_id, %peer, "removed peer as a holder of the item"); - } - } - GossipAction::Noop } - /// We have deemed the data not suitable for gossiping further. If left in paused state, the - /// entry will eventually be purged, as for finished entries. - pub(crate) fn pause(&mut self, data_id: &T) { - if let Some(mut state) = self.current.remove(data_id) { - state.in_flight_count = 0; - let timeout = Instant::now() + self.finished_entry_duration; - let _ = self.paused.insert(*data_id, state); - let _ = self.paused_timeouts.push(timeout, *data_id); + /// We have deemed the data not suitable for gossiping further. The entry will be marked as + /// `finished` and eventually be purged. + /// + /// Returns `true` if there was a current entry for this data. + pub(super) fn force_finish(&mut self, data_id: &T) -> bool { + if self.current.remove(data_id).is_some() { + self.insert_to_finished(data_id); + return true; } + false } - /// Resumes gossiping of paused entry. + /// If the data has not been deemed valid by the component responsible for it (i.e. + /// `state.held_by_us` is false) it should not be gossiped onwards by us. The entry will be + /// marked as `finished` and eventually be purged. /// - /// Returns an error if gossiping this data is not in a paused state. - // TODO - remove lint relaxation once the method is used. - #[cfg(test)] - pub(crate) fn resume(&mut self, data_id: &T) -> Result { - let mut state = self.paused.remove(data_id).ok_or(Error::NotPaused)?; - let is_new = !state.held_by_us; - let action = state.action(self.infection_target, self.holders_limit, is_new); - let _ = self.current.insert(*data_id, state); - Ok(action) + /// Returns `true` if such an entry was found and marked `finished`. + pub(super) fn finish_if_not_held_by_us(&mut self, data_id: &T) -> bool { + if self + .current + .get(data_id) + .map(|state| !state.held_by_us()) + .unwrap_or(false) + { + return self.force_finish(data_id); + } + false + } + + /// Returns `true` if the given ID is in `current` or `finished`. + pub(super) fn has_entry(&self, data_id: &T) -> bool { + self.current.contains_key(data_id) || self.finished.contains(data_id) } /// Updates the entry under `data_id` in `self.current` and returns the action we should now @@ -488,66 +502,58 @@ impl GossipTable { ) -> Option { let mut state = self.current.remove(data_id)?; update(&mut state); - if state.is_finished(self.infection_target, self.holders_limit) { - let timeout = Instant::now() + self.finished_entry_duration; - let _ = self.finished.insert(*data_id); - let _ = self.finished_timeouts.push(timeout, *data_id); - return Some(GossipAction::Noop); + if state.is_finished(self.infection_target, self.attempted_to_infect_limit) { + self.insert_to_finished(data_id); + return Some(GossipAction::AnnounceFinished); } let is_new = false; - let action = state.action(self.infection_target, self.holders_limit, is_new); - let _ = self.current.insert(*data_id, state); + let action = state.action( + self.infection_target, + self.attempted_to_infect_limit, + is_new, + ); + let _ = self.current.insert(data_id.clone(), state); Some(action) } - /// Updates the entry under `data_id` in `self.paused` and returns the action we should now - /// take, or `None` if the entry does not exist. - /// - /// If the entry becomes finished, it is moved from `self.paused` to `self.finished`. - fn update_paused(&mut self, data_id: &T, update: F) -> Option { - let mut state = self.paused.remove(data_id)?; - update(&mut state); - if state.is_finished(self.infection_target, self.holders_limit) { - let timeout = Instant::now() + self.finished_entry_duration; - let _ = self.finished.insert(*data_id); - let _ = self.finished_timeouts.push(timeout, *data_id); - } else { - let _ = self.paused.insert(*data_id, state); - } - - Some(GossipAction::Noop) + fn insert_to_finished(&mut self, data_id: &T) { + let timeout = Instant::now() + self.finished_entry_duration; + let _ = self.finished.insert(data_id.clone()); + self.timeouts.push(timeout, data_id.clone()); } /// Retains only those finished entries which still haven't timed out. fn purge_finished(&mut self) { let now = Instant::now(); - for expired_finished in self.finished_timeouts.purge(&now) { + for expired_finished in self.timeouts.purge(&now) { let _ = self.finished.remove(&expired_finished); } + } - for expired_paused in self.paused_timeouts.purge(&now) { - let _ = self.paused.remove(&expired_paused); - } + #[cfg(test)] + pub(super) fn is_empty(&self) -> bool { + self.current.is_empty() && self.finished.is_empty() } } #[cfg(test)] mod tests { - use std::{collections::BTreeSet, iter}; + use std::{collections::BTreeSet, iter, str::FromStr}; use rand::Rng; - use test::Bencher; - use super::{super::config::DEFAULT_FINISHED_ENTRY_DURATION_SECS, *}; - use crate::{crypto::hash::Digest, testing::TestRng, types::DeployHash, utils::DisplayIter}; + use casper_types::{testing::TestRng, DisplayIter, TimeDiff}; + + use super::{super::config::DEFAULT_FINISHED_ENTRY_DURATION, *}; + use crate::logging; const EXPECTED_DEFAULT_INFECTION_TARGET: usize = 3; - const EXPECTED_DEFAULT_HOLDERS_LIMIT: usize = 15; + const EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT: usize = 15; fn random_node_ids(rng: &mut TestRng) -> Vec { iter::repeat_with(|| NodeId::random(rng)) - .take(EXPECTED_DEFAULT_HOLDERS_LIMIT + 3) + .take(EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT + 3) .collect() } @@ -556,7 +562,6 @@ mod tests { let actual: BTreeSet<_> = gossip_table .current .get(data_id) - .or_else(|| gossip_table.paused.get(data_id)) .map_or_else(BTreeSet::new, |state| state.holders.iter().collect()); assert!( expected == actual, @@ -567,7 +572,8 @@ mod tests { } #[test] - fn new_partial_data() { + fn new_data_id() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -577,56 +583,48 @@ mod tests { EXPECTED_DEFAULT_INFECTION_TARGET, gossip_table.infection_target ); - assert_eq!(EXPECTED_DEFAULT_HOLDERS_LIMIT, gossip_table.holders_limit); + assert_eq!( + EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT, + gossip_table.attempted_to_infect_limit + ); - // Check new partial data causes `GetRemainder` to be returned. - let action = gossip_table.new_partial_data(&data_id, node_ids[0]); + // Check new data ID causes `GetRemainder` to be returned. + let action = gossip_table.new_data_id(&data_id, node_ids[0]); let expected = GossipAction::GetRemainder { holder: node_ids[0], }; assert_eq!(expected, action); check_holders(&node_ids[..1], &gossip_table, &data_id); - // Check same partial data from same source causes `AwaitingRemainder` to be returned. - let action = gossip_table.new_partial_data(&data_id, node_ids[0]); + // Check same data ID from same source causes `AwaitingRemainder` to be returned. + let action = gossip_table.new_data_id(&data_id, node_ids[0]); assert_eq!(GossipAction::AwaitingRemainder, action); check_holders(&node_ids[..1], &gossip_table, &data_id); - // Check same partial data from different source causes `AwaitingRemainder` to be returned + // Check same data ID from different source causes `AwaitingRemainder` to be returned // and holders updated. - let action = gossip_table.new_partial_data(&data_id, node_ids[1]); + let action = gossip_table.new_data_id(&data_id, node_ids[1]); assert_eq!(GossipAction::AwaitingRemainder, action); check_holders(&node_ids[..2], &gossip_table, &data_id); - // Pause gossiping and check same partial data from third source causes `Noop` to be - // returned and holders updated. - gossip_table.pause(&data_id); - let action = gossip_table.new_partial_data(&data_id, node_ids[2]); - assert_eq!(GossipAction::Noop, action); - check_holders(&node_ids[..3], &gossip_table, &data_id); - - // Reset the data and check same partial data from fourth source causes `AwaitingRemainder` - // to be returned and holders updated. - gossip_table.resume(&data_id).unwrap(); - let action = gossip_table.new_partial_data(&data_id, node_ids[3]); - assert_eq!(GossipAction::AwaitingRemainder, action); - check_holders(&node_ids[..4], &gossip_table, &data_id); - - // Finish the gossip by reporting three infections, then check same partial data causes + // Finish the gossip by reporting three infections, then check same data ID causes // `Noop` to be returned and holders cleared. - let _ = gossip_table.new_complete_data(&data_id, Some(node_ids[0])); - let limit = 4 + EXPECTED_DEFAULT_INFECTION_TARGET; - for node_id in &node_ids[4..limit] { + let _ = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All); + let limit = 3 + EXPECTED_DEFAULT_INFECTION_TARGET; + for node_id in &node_ids[3..limit] { let _ = gossip_table.we_infected(&data_id, *node_id); } - let action = gossip_table.new_partial_data(&data_id, node_ids[limit]); + let action = gossip_table.new_data_id(&data_id, node_ids[limit]); assert_eq!(GossipAction::Noop, action); check_holders(&node_ids[..0], &gossip_table, &data_id); - // Time the finished data out, then check same partial data causes `GetRemainder` to be + // Time the finished data out, then check same data ID causes `GetRemainder` to be // returned as per a completely new entry. - Instant::advance_time(DEFAULT_FINISHED_ENTRY_DURATION_SECS * 1_000 + 1); - let action = gossip_table.new_partial_data(&data_id, node_ids[0]); + let millis = TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION) + .unwrap() + .millis(); + Instant::advance_time(millis + 1); + let action = gossip_table.new_data_id(&data_id, node_ids[0]); let expected = GossipAction::GetRemainder { holder: node_ids[0], }; @@ -635,14 +633,15 @@ mod tests { } #[test] - fn should_noop_if_we_have_partial_data_and_get_gossip_response() { + fn should_noop_if_we_dont_hold_data_and_get_gossip_response() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_id = NodeId::random(&mut rng); let data_id: u64 = rng.gen(); let mut gossip_table = GossipTable::new(Config::default()); - let _ = gossip_table.new_partial_data(&data_id, node_id); + let _ = gossip_table.new_data_id(&data_id, node_id); let action = gossip_table.we_infected(&data_id, node_id); assert_eq!(GossipAction::AwaitingRemainder, action); @@ -653,6 +652,7 @@ mod tests { #[test] fn new_complete_data() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -660,9 +660,10 @@ mod tests { let mut gossip_table = GossipTable::new(Config::default()); // Check new complete data from us causes `ShouldGossip` to be returned. - let action = gossip_table.new_complete_data(&data_id, None); - let expected = Some(ShouldGossip { + let action = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); + let expected = GossipAction::ShouldGossip(ShouldGossip { count: EXPECTED_DEFAULT_INFECTION_TARGET, + target: GossipTarget::All, exclude_peers: HashSet::new(), is_already_held: false, }); @@ -671,59 +672,52 @@ mod tests { // Check same complete data from other source causes `Noop` to be returned since we still // have all gossip requests in flight. Check it updates holders. - let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0])); - assert!(action.is_none()); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0])); + let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All); + assert_eq!(GossipAction::Noop, action); check_holders(&node_ids[..1], &gossip_table, &data_id); // Check receiving a gossip response, causes `ShouldGossip` to be returned and holders // updated. + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[1])); let action = gossip_table.already_infected(&data_id, node_ids[1]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, + target: GossipTarget::All, exclude_peers: node_ids[..2].iter().cloned().collect(), is_already_held: true, }); assert_eq!(expected, action); check_holders(&node_ids[..2], &gossip_table, &data_id); - // Pause gossiping and check same complete data from third source causes `Noop` to be - // returned and holders updated. - gossip_table.pause(&data_id); - let action = gossip_table.new_complete_data(&data_id, Some(node_ids[2])); - assert!(action.is_none()); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[2])); + let action = gossip_table.new_complete_data(&data_id, Some(node_ids[2]), GossipTarget::All); + assert_eq!(GossipAction::Noop, action); check_holders(&node_ids[..3], &gossip_table, &data_id); - // Reset the data and check same complete data from fourth source causes Noop` to be - // returned since we still have all gossip requests in flight. Check it updates holders. - let action = gossip_table.resume(&data_id).unwrap(); - let expected = GossipAction::ShouldGossip(ShouldGossip { - count: EXPECTED_DEFAULT_INFECTION_TARGET, - exclude_peers: node_ids[..3].iter().cloned().collect(), - is_already_held: true, - }); - assert_eq!(expected, action); - - let action = gossip_table.new_complete_data(&data_id, Some(node_ids[3])); - assert!(action.is_none()); - check_holders(&node_ids[..4], &gossip_table, &data_id); - // Finish the gossip by reporting enough non-infections, then check same complete data // causes `Noop` to be returned and holders cleared. - let limit = 4 + EXPECTED_DEFAULT_INFECTION_TARGET; - for node_id in &node_ids[4..limit] { + let limit = 3 + EXPECTED_DEFAULT_INFECTION_TARGET; + for node_id in &node_ids[3..limit] { + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let _ = gossip_table.we_infected(&data_id, *node_id); } - let action = gossip_table.new_complete_data(&data_id, None); - assert!(action.is_none()); + let action = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); + assert_eq!(GossipAction::Noop, action); check_holders(&node_ids[..0], &gossip_table, &data_id); // Time the finished data out, then check same complete data causes `ShouldGossip` to be // returned as per a completely new entry. - Instant::advance_time(DEFAULT_FINISHED_ENTRY_DURATION_SECS * 1_000 + 1); - let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0])); - let expected = Some(ShouldGossip { + let millis = TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION) + .unwrap() + .millis(); + Instant::advance_time(millis + 1); + + let action = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All); + let expected = GossipAction::ShouldGossip(ShouldGossip { count: EXPECTED_DEFAULT_INFECTION_TARGET, - exclude_peers: node_ids[..1].iter().cloned().collect(), + target: GossipTarget::All, + exclude_peers: HashSet::new(), // We didn't infect anyone yet. is_already_held: false, }); assert_eq!(expected, action); @@ -732,6 +726,7 @@ mod tests { #[test] fn should_terminate_via_infection_limit() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -740,9 +735,10 @@ mod tests { // Add new complete data from us and check two infections doesn't cause us to stop // gossiping. - let _ = gossip_table.new_complete_data(&data_id, None); + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); let limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1; for node_id in node_ids.iter().take(limit) { + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let action = gossip_table.we_infected(&data_id, *node_id); assert_eq!(GossipAction::Noop, action); assert!(!gossip_table.finished.contains(&data_id)); @@ -750,9 +746,11 @@ mod tests { // Check recording an infection from an already-recorded infectee doesn't cause us to stop // gossiping. + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit - 1])); let action = gossip_table.we_infected(&data_id, node_ids[limit - 1]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, + target: GossipTarget::All, exclude_peers: node_ids[..limit].iter().cloned().collect(), is_already_held: true, }); @@ -760,13 +758,15 @@ mod tests { assert!(!gossip_table.finished.contains(&data_id)); // Check third new infection does cause us to stop gossiping. + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit])); let action = gossip_table.we_infected(&data_id, node_ids[limit]); - assert_eq!(GossipAction::Noop, action); + assert_eq!(GossipAction::AnnounceFinished, action); assert!(gossip_table.finished.contains(&data_id)); } #[test] - fn should_terminate_via_incoming_gossip() { + fn should_not_terminate_via_incoming_gossip() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id1: u64 = rng.gen(); @@ -776,27 +776,36 @@ mod tests { // Take the two items close to the termination condition of holder count by simulating // receiving several incoming gossip requests. Each should remain unfinished. - let limit = EXPECTED_DEFAULT_HOLDERS_LIMIT - 1; + let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1; for node_id in node_ids.iter().take(limit) { - let _ = gossip_table.new_partial_data(&data_id1, *node_id); + let _ = gossip_table.new_data_id(&data_id1, *node_id); assert!(!gossip_table.finished.contains(&data_id1)); - let _ = gossip_table.new_complete_data(&data_id2, Some(*node_id)); + let _ = gossip_table.new_complete_data(&data_id2, Some(*node_id), GossipTarget::All); assert!(!gossip_table.finished.contains(&data_id2)); } // Simulate receiving a final gossip request for each, which should cause them both to be // moved to the `finished` collection. - let _ = gossip_table.new_partial_data(&data_id1, node_ids[EXPECTED_DEFAULT_HOLDERS_LIMIT]); - assert!(gossip_table.finished.contains(&data_id1)); + let action = gossip_table.new_data_id( + &data_id1, + node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT], + ); + assert!(!gossip_table.finished.contains(&data_id1)); + assert_eq!(GossipAction::AwaitingRemainder, action); - let _ = gossip_table - .new_complete_data(&data_id2, Some(node_ids[EXPECTED_DEFAULT_HOLDERS_LIMIT])); - assert!(gossip_table.finished.contains(&data_id2)); + let action = gossip_table.new_complete_data( + &data_id2, + Some(node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT]), + GossipTarget::All, + ); + assert!(!gossip_table.finished.contains(&data_id2)); + assert_eq!(GossipAction::Noop, action); } #[test] fn should_terminate_via_checking_timeout() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -805,20 +814,55 @@ mod tests { // Take the item close to the termination condition of holder count by simulating receiving // several incoming gossip requests. It should remain unfinished. - let limit = EXPECTED_DEFAULT_HOLDERS_LIMIT - 1; + let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1; for node_id in node_ids.iter().take(limit) { - let _ = gossip_table.new_complete_data(&data_id, Some(*node_id)); + let _ = gossip_table.new_complete_data(&data_id, Some(*node_id), GossipTarget::All); + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); assert!(!gossip_table.finished.contains(&data_id)); } // Simulate a gossip response timing out, which should cause the item to be moved to the // `finished` collection. - let _ = gossip_table.check_timeout(&data_id, node_ids[EXPECTED_DEFAULT_HOLDERS_LIMIT]); + gossip_table.register_infection_attempt( + &data_id, + iter::once(&node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT]), + ); + let action = gossip_table.check_timeout( + &data_id, + node_ids[EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT], + ); + assert!(gossip_table.finished.contains(&data_id)); + assert_eq!(GossipAction::AnnounceFinished, action); + } + + #[test] + fn should_terminate_via_reducing_in_flight_count() { + let _ = logging::init(); + let mut rng = crate::new_rng(); + let data_id: u64 = rng.gen(); + + let mut gossip_table = GossipTable::new(Config::default()); + + // Take the item close to the termination condition of in-flight count reaching 0. It + // should remain unfinished. + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); + let limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1; + assert!(!gossip_table.reduce_in_flight_count(&data_id, limit)); + assert!(!gossip_table.finished.contains(&data_id)); + + // Reduce the in-flight count to 0, which should cause the item to be moved to the + // `finished` collection. + assert!(gossip_table.reduce_in_flight_count(&data_id, 1)); + assert!(gossip_table.finished.contains(&data_id)); + + // Check that calling this again has no effect and continues to return `false`. + assert!(!gossip_table.reduce_in_flight_count(&data_id, 1)); assert!(gossip_table.finished.contains(&data_id)); } #[test] fn should_terminate_via_saturation() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -827,12 +871,14 @@ mod tests { // Add new complete data with 14 non-infections and check this doesn't cause us to stop // gossiping. - let _ = gossip_table.new_complete_data(&data_id, None); - let limit = EXPECTED_DEFAULT_HOLDERS_LIMIT - 1; + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); + let limit = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 1; for (index, node_id) in node_ids.iter().enumerate().take(limit) { + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let action = gossip_table.already_infected(&data_id, *node_id); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, + target: GossipTarget::All, exclude_peers: node_ids[..(index + 1)].iter().cloned().collect(), is_already_held: true, }); @@ -841,21 +887,25 @@ mod tests { // Check recording a non-infection from an already-recorded holder doesn't cause us to stop // gossiping. + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0])); let action = gossip_table.already_infected(&data_id, node_ids[0]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, + target: GossipTarget::All, exclude_peers: node_ids[..limit].iter().cloned().collect(), is_already_held: true, }); assert_eq!(expected, action); // Check 15th non-infection does cause us to stop gossiping. + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[limit])); let action = gossip_table.we_infected(&data_id, node_ids[limit]); - assert_eq!(GossipAction::Noop, action); + assert_eq!(GossipAction::AnnounceFinished, action); } #[test] fn should_not_terminate_below_infection_limit_and_saturation() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -863,22 +913,30 @@ mod tests { let mut gossip_table = GossipTable::new(Config::default()); // Add new complete data with 2 infections and 11 non-infections. - let _ = gossip_table.new_complete_data(&data_id, None); + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); let infection_limit = EXPECTED_DEFAULT_INFECTION_TARGET - 1; for node_id in &node_ids[0..infection_limit] { + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let _ = gossip_table.we_infected(&data_id, *node_id); } - let holders_limit = EXPECTED_DEFAULT_HOLDERS_LIMIT - 2; - for node_id in &node_ids[infection_limit..holders_limit] { + let attempted_to_infect = EXPECTED_DEFAULT_ATTEMPTED_TO_INFECT_LIMIT - 2; + for node_id in &node_ids[infection_limit..attempted_to_infect] { + gossip_table.register_infection_attempt(&data_id, iter::once(node_id)); let _ = gossip_table.already_infected(&data_id, *node_id); } // Check adding 12th non-infection doesn't cause us to stop gossiping. - let action = gossip_table.already_infected(&data_id, node_ids[holders_limit]); + gossip_table + .register_infection_attempt(&data_id, iter::once(&node_ids[attempted_to_infect])); + let action = gossip_table.already_infected(&data_id, node_ids[attempted_to_infect]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, - exclude_peers: node_ids[..(holders_limit + 1)].iter().cloned().collect(), + target: GossipTarget::All, + exclude_peers: node_ids[..(attempted_to_infect + 1)] + .iter() + .cloned() + .collect(), is_already_held: true, }); assert_eq!(expected, action); @@ -886,6 +944,7 @@ mod tests { #[test] fn check_timeout_should_detect_holder() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -893,17 +952,20 @@ mod tests { let mut gossip_table = GossipTable::new(Config::default()); // Add new complete data and get a response from node 0 only. - let _ = gossip_table.new_complete_data(&data_id, None); + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); let _ = gossip_table.we_infected(&data_id, node_ids[0]); // check_timeout for node 0 should return Noop, and for node 1 it should represent a timed // out response and return ShouldGossip. + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[0])); let action = gossip_table.check_timeout(&data_id, node_ids[0]); assert_eq!(GossipAction::Noop, action); + gossip_table.register_infection_attempt(&data_id, iter::once(&node_ids[1])); let action = gossip_table.check_timeout(&data_id, node_ids[1]); let expected = GossipAction::ShouldGossip(ShouldGossip { count: 1, + target: GossipTarget::All, exclude_peers: node_ids[..=1].iter().cloned().collect(), is_already_held: true, }); @@ -913,29 +975,33 @@ mod tests { #[test] #[cfg_attr( debug_assertions, - should_panic(expected = "shouldn't check timeout for a gossip response for partial data") + should_panic( + expected = "shouldn't check timeout for a gossip response for data we don't hold" + ) )] - fn check_timeout_should_panic_for_partial_copy() { + fn check_timeout_should_panic_for_data_we_dont_hold() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); let mut gossip_table = GossipTable::new(Config::default()); - let _ = gossip_table.new_partial_data(&data_id, node_ids[0]); + let _ = gossip_table.new_data_id(&data_id, node_ids[0]); let _ = gossip_table.check_timeout(&data_id, node_ids[0]); } #[test] fn should_remove_holder_if_unresponsive() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); let mut gossip_table = GossipTable::new(Config::default()); - // Add new partial data from nodes 0 and 1. - let _ = gossip_table.new_partial_data(&data_id, node_ids[0]); - let _ = gossip_table.new_partial_data(&data_id, node_ids[1]); + // Add new data ID from nodes 0 and 1. + let _ = gossip_table.new_data_id(&data_id, node_ids[0]); + let _ = gossip_table.new_data_id(&data_id, node_ids[1]); // Node 0 should be removed from the holders since it hasn't provided us with the full data, // and we should be told to get the remainder from node 1. @@ -950,83 +1016,51 @@ mod tests { // and the entry should be removed since there are no more holders. let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[1]); assert_eq!(GossipAction::Noop, action); - check_holders(&node_ids[..0], &gossip_table, &data_id); assert!(!gossip_table.current.contains_key(&data_id)); - assert!(!gossip_table.paused.contains_key(&data_id)); - - // Add new partial data from node 2 and check gossiping has been resumed. - let action = gossip_table.new_partial_data(&data_id, node_ids[2]); - let expected = GossipAction::GetRemainder { - holder: node_ids[2], - }; - assert_eq!(expected, action); - check_holders(&node_ids[2..3], &gossip_table, &data_id); - - // Node 2 should be removed from the holders since it hasn't provided us with the full data, - // and the entry should be paused since there are no more holders. - let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[2]); - assert_eq!(GossipAction::Noop, action); - check_holders(&node_ids[..0], &gossip_table, &data_id); - assert!(!gossip_table.current.contains_key(&data_id)); - assert!(!gossip_table.paused.contains_key(&data_id)); - - // Add new complete data from node 3 and check gossiping has been resumed. - let action = gossip_table.new_complete_data(&data_id, Some(node_ids[3])); - let expected = Some(ShouldGossip { - count: EXPECTED_DEFAULT_INFECTION_TARGET, - exclude_peers: iter::once(node_ids[3]).collect(), - is_already_held: false, - }); - assert_eq!(expected, action); - check_holders(&node_ids[3..4], &gossip_table, &data_id); + assert!(!gossip_table.finished.contains(&data_id)); } #[test] fn should_not_remove_holder_if_responsive() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); let mut gossip_table = GossipTable::new(Config::default()); - // Add new partial data from node 0 and record that we have received the full data from it. - let _ = gossip_table.new_partial_data(&data_id, node_ids[0]); - let _ = gossip_table.new_complete_data(&data_id, Some(node_ids[0])); + // Add new data ID from node 0 and record that we have received the full data from it. + let _ = gossip_table.new_data_id(&data_id, node_ids[0]); + let _ = gossip_table.new_complete_data(&data_id, Some(node_ids[0]), GossipTarget::All); // Node 0 should remain as a holder since we now hold the complete data. let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[0]); assert_eq!(GossipAction::Noop, action); // Noop as all RPCs are still in-flight check_holders(&node_ids[..1], &gossip_table, &data_id); assert!(gossip_table.current.contains_key(&data_id)); - assert!(!gossip_table.paused.contains_key(&data_id)); } #[test] - fn should_not_auto_resume_manually_paused() { + fn should_force_finish() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); let mut gossip_table = GossipTable::new(Config::default()); - // Add new partial data from node 0, manually pause gossiping, then record that node 0 - // failed to provide the full data. - let _ = gossip_table.new_partial_data(&data_id, node_ids[0]); - gossip_table.pause(&data_id); - let action = gossip_table.remove_holder_if_unresponsive(&data_id, node_ids[0]); - assert_eq!(GossipAction::Noop, action); - check_holders(&node_ids[..0], &gossip_table, &data_id); + // Add new data ID from node 0, then forcibly finish gossiping. + let _ = gossip_table.new_data_id(&data_id, node_ids[0]); + assert!(gossip_table.force_finish(&data_id)); + assert!(gossip_table.finished.contains(&data_id)); - // Add new partial data from node 1 and check gossiping has not been resumed. - let action = gossip_table.new_partial_data(&data_id, node_ids[1]); - assert_eq!(GossipAction::Noop, action); - check_holders(&node_ids[1..2], &gossip_table, &data_id); - assert!(!gossip_table.current.contains_key(&data_id)); - assert!(gossip_table.paused.contains_key(&data_id)); + // Ensure forcibly finishing the same data returns `false`. + assert!(!gossip_table.force_finish(&data_id)); } #[test] fn should_purge() { + let _ = logging::init(); let mut rng = crate::new_rng(); let node_ids = random_node_ids(&mut rng); let data_id: u64 = rng.gen(); @@ -1034,48 +1068,113 @@ mod tests { let mut gossip_table = GossipTable::new(Config::default()); // Add new complete data and finish via infection limit. - let _ = gossip_table.new_complete_data(&data_id, None); + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); for node_id in &node_ids[0..EXPECTED_DEFAULT_INFECTION_TARGET] { let _ = gossip_table.we_infected(&data_id, *node_id); } assert!(gossip_table.finished.contains(&data_id)); // Time the finished data out and check it has been purged. - Instant::advance_time(DEFAULT_FINISHED_ENTRY_DURATION_SECS * 1_000 + 1); + let millis = TimeDiff::from_str(DEFAULT_FINISHED_ENTRY_DURATION) + .unwrap() + .millis(); + Instant::advance_time(millis + 1); gossip_table.purge_finished(); assert!(!gossip_table.finished.contains(&data_id)); - // Add new complete data and pause. - let _ = gossip_table.new_complete_data(&data_id, None); - gossip_table.pause(&data_id); - assert!(gossip_table.paused.contains_key(&data_id)); + // Add new complete data and forcibly finish. + let _ = gossip_table.new_complete_data(&data_id, None, GossipTarget::All); + assert!(gossip_table.force_finish(&data_id)); + assert!(gossip_table.finished.contains(&data_id)); - // Time the paused data out and check it has been purged. - Instant::advance_time(DEFAULT_FINISHED_ENTRY_DURATION_SECS * 1_000 + 1); + // Time the finished data out and check it has been purged. + Instant::advance_time(millis + 1); gossip_table.purge_finished(); - assert!(!gossip_table.paused.contains_key(&data_id)); + assert!(!gossip_table.finished.contains(&data_id)); } - #[bench] - fn benchmark_purging(bencher: &mut Bencher) { - const ENTRY_COUNT: usize = 10_000; - let mut rng = crate::new_rng(); - let node_ids = random_node_ids(&mut rng); - let deploy_ids = iter::repeat_with(|| DeployHash::new(Digest::random(&mut rng))) - .take(ENTRY_COUNT) - .collect::>(); + #[test] + fn timeouts_purge_in_order() { + let mut timeouts = Timeouts::new(); + let now = Instant::now(); + let later_100 = now + Duration::from_millis(100); + let later_200 = now + Duration::from_millis(200); - let mut gossip_table = GossipTable::new(Config::default()); + // Timeouts are added and purged in chronological order. + timeouts.push(now, 0); + timeouts.push(later_100, 1); + timeouts.push(later_200, 2); - // Add new complete data and finish via infection limit. - for deploy_id in &deploy_ids { - let _ = gossip_table.new_complete_data(deploy_id, None); - for node_id in &node_ids[0..EXPECTED_DEFAULT_INFECTION_TARGET] { - let _ = gossip_table.we_infected(deploy_id, *node_id); - } - assert!(gossip_table.finished.contains(&deploy_id)); - } + let now_after_time_travel = now + Duration::from_millis(10); + let purged = timeouts.purge(&now_after_time_travel).collect::>(); - bencher.iter(|| gossip_table.purge_finished()); + assert_eq!(purged, vec![0]); + } + + #[test] + fn timeouts_depends_on_binary_search_by_implementation() { + // This test is meant to document the dependency of + // Timeouts::purge on https://doc.rust-lang.org/std/vec/struct.Vec.html#method.binary_search_by. + // If this test is failing then it's reasonable to believe that the implementation of + // binary_search_by has been updated. + let mut timeouts = Timeouts::new(); + let now = Instant::now(); + let later_100 = now + Duration::from_millis(100); + let later_200 = now + Duration::from_millis(200); + let later_300 = now + Duration::from_millis(300); + let later_400 = now + Duration::from_millis(400); + let later_500 = now + Duration::from_millis(500); + let later_600 = now + Duration::from_millis(600); + + timeouts.push(later_100, 1); + timeouts.push(later_200, 2); + timeouts.push(later_300, 3); + + // If a node's system time was changed to a time earlier than + // the earliest timeout, and a new timeout is added with an instant + // corresponding to this new early time, then this would make the earliest + // timeout the LAST timeout in the vec. + // [100 < 200 < 300 > 0] + timeouts.push(now, 0); + + let now_after_time_travel = now + Duration::from_millis(10); + // Intuitively, we would expect [1,2,3,0] to be in the "purged" vec here. + // This is not the case because we're using binary_search_by, which (currently) + // is implemented with logic that checks if a, b, ... z are in a consistent order. + // in this case, the order that we've established is a < b < ... < z for each element in the + // vec, but we broke that order by inserting '0' last, and for some reason, + // binary_search_by won't find this unless there is a number > n occurring AFTER n + // in the vec. + + let purged = timeouts.purge(&now_after_time_travel).collect::>(); + let empty: Vec = vec![]; + + // This isn't a problem and the order will eventually + // be restored. + assert_eq!(purged, empty); + + timeouts.push(later_400, 4); + timeouts.push(later_500, 5); + timeouts.push(later_600, 6); + + // Now, we advance time another 10 ms and purge again. + // In this scenario, timeouts with a later time are added after our + // improperly ordered "now" timeout + // [100 < 200 < 300 > 0 < 400 < 500 < 600] + let now_after_time_travel = now + Duration::from_millis(20); + let purged = timeouts.purge(&now_after_time_travel).collect::>(); + let expected = [1, 2, 3, 0]; + + assert_eq!(purged, expected); + + // After the previous purge, an order is restored where a < b for consecutive elements in + // the vec. [400 < 500 < 600], so, purging timeouts up to 610 will properly clear + // the vec. + let now_after_time_travel = now + Duration::from_millis(610); + let purged = timeouts.purge(&now_after_time_travel).collect::>(); + let expected = [4, 5, 6]; + + assert_eq!(purged, expected); + assert_eq!(0, timeouts.values.len()); } } diff --git a/node/src/components/gossiper/item_provider.rs b/node/src/components/gossiper/item_provider.rs new file mode 100644 index 0000000000..ae9960ab80 --- /dev/null +++ b/node/src/components/gossiper/item_provider.rs @@ -0,0 +1,17 @@ +use async_trait::async_trait; + +use super::GossipItem; +use crate::effect::{requests::StorageRequest, EffectBuilder}; + +#[async_trait] +pub(super) trait ItemProvider { + async fn is_stored + Send>( + effect_builder: EffectBuilder, + item_id: T::Id, + ) -> bool; + + async fn get_from_storage + Send>( + effect_builder: EffectBuilder, + item_id: T::Id, + ) -> Option>; +} diff --git a/node/src/components/gossiper/message.rs b/node/src/components/gossiper/message.rs index c3c4c4455d..d3078b2c7c 100644 --- a/node/src/components/gossiper/message.rs +++ b/node/src/components/gossiper/message.rs @@ -1,23 +1,34 @@ -use std::fmt::{self, Display, Formatter}; +use std::{ + boxed::Box, + fmt::{self, Display, Formatter}, +}; use serde::{Deserialize, Serialize}; +use strum::EnumDiscriminants; -use super::Item; +use super::GossipItem; -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize, EnumDiscriminants)] +#[strum_discriminants(derive(strum::EnumIter))] #[serde(bound = "for<'a> T: Deserialize<'a>")] -pub enum Message { +pub(crate) enum Message { /// Gossiped out to random peers to notify them of an item we hold. Gossip(T::Id), /// Response to a `Gossip` message. If `is_already_held` is false, the recipient should treat - /// this as a `GetRequest` and send a `GetResponse` containing the item. + /// this as a `GetItem` message and send an `Item` message containing the item. GossipResponse { item_id: T::Id, is_already_held: bool, }, + /// Request to get an item we were previously told about, but the peer timed out and we never + /// received it. + GetItem(T::Id), + /// Response to either a `GossipResponse` with `is_already_held` set to `false` or to a + /// `GetItem` message. Contains the actual item requested. + Item(Box), } -impl Display for Message { +impl Display for Message { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { Message::Gossip(item_id) => write!(formatter, "gossip({})", item_id), @@ -29,6 +40,44 @@ impl Display for Message { "gossip-response({}, {})", item_id, is_already_held ), + Message::GetItem(item_id) => write!(formatter, "gossip-get-item({})", item_id), + Message::Item(item) => write!(formatter, "gossip-item({})", item.gossip_id()), + } + } +} + +mod specimen_support { + use crate::{ + components::gossiper::GossipItem, + utils::specimen::{largest_variant, Cache, LargestSpecimen, SizeEstimator}, + }; + + use super::{Message, MessageDiscriminants}; + + impl LargestSpecimen for Message + where + T: GossipItem + LargestSpecimen, + ::Id: LargestSpecimen, + { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::( + estimator, + |variant| match variant { + MessageDiscriminants::Gossip => { + Message::Gossip(LargestSpecimen::largest_specimen(estimator, cache)) + } + MessageDiscriminants::GossipResponse => Message::GossipResponse { + item_id: LargestSpecimen::largest_specimen(estimator, cache), + is_already_held: LargestSpecimen::largest_specimen(estimator, cache), + }, + MessageDiscriminants::GetItem => { + Message::GetItem(LargestSpecimen::largest_specimen(estimator, cache)) + } + MessageDiscriminants::Item => { + Message::Item(LargestSpecimen::largest_specimen(estimator, cache)) + } + }, + ) } } } diff --git a/node/src/components/gossiper/metrics.rs b/node/src/components/gossiper/metrics.rs index 681c0c3eb1..2bf9d2e900 100644 --- a/node/src/components/gossiper/metrics.rs +++ b/node/src/components/gossiper/metrics.rs @@ -4,15 +4,13 @@ use crate::unregister_metric; /// Metrics for the gossiper component. #[derive(Debug)] -pub struct GossiperMetrics { +pub(super) struct Metrics { /// Total number of items received by the gossiper. pub(super) items_received: IntCounter, /// Total number of gossip requests sent to peers. pub(super) times_gossiped: IntCounter, /// Number of times the process had to pause due to running out of peers. pub(super) times_ran_out_of_peers: IntCounter, - /// Number of items in the gossip table that are paused. - pub(super) table_items_paused: IntGauge, /// Number of items in the gossip table that are currently being gossiped. pub(super) table_items_current: IntGauge, /// Number of items in the gossip table that are finished. @@ -21,7 +19,7 @@ pub struct GossiperMetrics { registry: Registry, } -impl GossiperMetrics { +impl Metrics { /// Creates a new instance of gossiper metrics, using the given prefix. pub fn new(name: &str, registry: &Registry) -> Result { let items_received = IntCounter::new( @@ -39,13 +37,6 @@ impl GossiperMetrics { name ), )?; - let table_items_paused = IntGauge::new( - format!("{}_table_items_paused", name), - format!( - "number of items in the gossip table of {} in state paused", - name - ), - )?; let table_items_current = IntGauge::new( format!("{}_table_items_current", name), format!( @@ -64,15 +55,13 @@ impl GossiperMetrics { registry.register(Box::new(items_received.clone()))?; registry.register(Box::new(times_gossiped.clone()))?; registry.register(Box::new(times_ran_out_of_peers.clone()))?; - registry.register(Box::new(table_items_paused.clone()))?; registry.register(Box::new(table_items_current.clone()))?; registry.register(Box::new(table_items_finished.clone()))?; - Ok(GossiperMetrics { + Ok(Metrics { items_received, times_gossiped, times_ran_out_of_peers, - table_items_paused, table_items_current, table_items_finished, registry: registry.clone(), @@ -80,12 +69,11 @@ impl GossiperMetrics { } } -impl Drop for GossiperMetrics { +impl Drop for Metrics { fn drop(&mut self) { unregister_metric!(self.registry, self.items_received); unregister_metric!(self.registry, self.times_gossiped); unregister_metric!(self.registry, self.times_ran_out_of_peers); - unregister_metric!(self.registry, self.table_items_paused); unregister_metric!(self.registry, self.table_items_current); unregister_metric!(self.registry, self.table_items_finished); } diff --git a/node/src/components/gossiper/provider_impls.rs b/node/src/components/gossiper/provider_impls.rs new file mode 100644 index 0000000000..0d73bae263 --- /dev/null +++ b/node/src/components/gossiper/provider_impls.rs @@ -0,0 +1,4 @@ +mod address_provider; +mod block_provider; +mod finality_signature_provider; +mod transaction_provider; diff --git a/node/src/components/gossiper/provider_impls/address_provider.rs b/node/src/components/gossiper/provider_impls/address_provider.rs new file mode 100644 index 0000000000..885a824cf9 --- /dev/null +++ b/node/src/components/gossiper/provider_impls/address_provider.rs @@ -0,0 +1,31 @@ +use async_trait::async_trait; +use tracing::error; + +use crate::{ + components::{ + gossiper::{GossipItem, Gossiper, ItemProvider}, + network::GossipedAddress, + }, + effect::EffectBuilder, +}; + +#[async_trait] +impl ItemProvider + for Gossiper<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, GossipedAddress> +{ + async fn is_stored( + _effect_builder: EffectBuilder, + item_id: GossipedAddress, + ) -> bool { + error!(%item_id, "address gossiper should never try to check if item is stored"); + false + } + + async fn get_from_storage( + _effect_builder: EffectBuilder, + item_id: GossipedAddress, + ) -> Option> { + error!(%item_id, "address gossiper should never try to get from storage"); + None + } +} diff --git a/node/src/components/gossiper/provider_impls/block_provider.rs b/node/src/components/gossiper/provider_impls/block_provider.rs new file mode 100644 index 0000000000..a66bd3daa8 --- /dev/null +++ b/node/src/components/gossiper/provider_impls/block_provider.rs @@ -0,0 +1,47 @@ +use async_trait::async_trait; +use std::convert::TryInto; + +use casper_types::{BlockHash, BlockV2}; + +use crate::{ + components::gossiper::{GossipItem, GossipTarget, Gossiper, ItemProvider, LargeGossipItem}, + effect::{requests::StorageRequest, EffectBuilder}, +}; + +impl GossipItem for BlockV2 { + type Id = BlockHash; + + const ID_IS_COMPLETE_ITEM: bool = false; + const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = true; + + fn gossip_id(&self) -> Self::Id { + *self.hash() + } + + fn gossip_target(&self) -> GossipTarget { + GossipTarget::Mixed(self.era_id()) + } +} + +impl LargeGossipItem for BlockV2 {} + +#[async_trait] +impl ItemProvider for Gossiper<{ BlockV2::ID_IS_COMPLETE_ITEM }, BlockV2> { + async fn is_stored + Send>( + effect_builder: EffectBuilder, + item_id: BlockHash, + ) -> bool { + effect_builder.is_block_stored(item_id).await + } + + async fn get_from_storage + Send>( + effect_builder: EffectBuilder, + item_id: BlockHash, + ) -> Option> { + if let Some(block) = effect_builder.get_block_from_storage(item_id).await { + block.try_into().ok().map(Box::new) + } else { + None + } + } +} diff --git a/node/src/components/gossiper/provider_impls/finality_signature_provider.rs b/node/src/components/gossiper/provider_impls/finality_signature_provider.rs new file mode 100644 index 0000000000..7bebca95a6 --- /dev/null +++ b/node/src/components/gossiper/provider_impls/finality_signature_provider.rs @@ -0,0 +1,55 @@ +use async_trait::async_trait; + +use casper_types::{FinalitySignature, FinalitySignatureId, FinalitySignatureV2}; + +use crate::{ + components::gossiper::{GossipItem, GossipTarget, Gossiper, ItemProvider, LargeGossipItem}, + effect::{requests::StorageRequest, EffectBuilder}, +}; + +impl GossipItem for FinalitySignatureV2 { + type Id = Box; + + const ID_IS_COMPLETE_ITEM: bool = false; + const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = true; + + fn gossip_id(&self) -> Self::Id { + Box::new(FinalitySignatureId::new( + *self.block_hash(), + self.era_id(), + self.public_key().clone(), + )) + } + + fn gossip_target(&self) -> GossipTarget { + GossipTarget::Mixed(self.era_id()) + } +} + +impl LargeGossipItem for FinalitySignatureV2 {} + +#[async_trait] +impl ItemProvider + for Gossiper<{ FinalitySignatureV2::ID_IS_COMPLETE_ITEM }, FinalitySignatureV2> +{ + async fn is_stored + Send>( + effect_builder: EffectBuilder, + item_id: Box, + ) -> bool { + effect_builder.is_finality_signature_stored(item_id).await + } + + async fn get_from_storage + Send>( + effect_builder: EffectBuilder, + item_id: Box, + ) -> Option> { + if let Some(FinalitySignature::V2(sig)) = effect_builder + .get_finality_signature_from_storage(item_id) + .await + { + Some(Box::new(sig)) + } else { + None + } + } +} diff --git a/node/src/components/gossiper/provider_impls/transaction_provider.rs b/node/src/components/gossiper/provider_impls/transaction_provider.rs new file mode 100644 index 0000000000..042d9037d6 --- /dev/null +++ b/node/src/components/gossiper/provider_impls/transaction_provider.rs @@ -0,0 +1,45 @@ +use async_trait::async_trait; + +use casper_types::{Transaction, TransactionId}; + +use crate::{ + components::gossiper::{GossipItem, GossipTarget, Gossiper, ItemProvider, LargeGossipItem}, + effect::{requests::StorageRequest, EffectBuilder}, +}; + +impl GossipItem for Transaction { + type Id = TransactionId; + + const ID_IS_COMPLETE_ITEM: bool = false; + const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = false; + + fn gossip_id(&self) -> Self::Id { + self.compute_id() + } + + fn gossip_target(&self) -> GossipTarget { + GossipTarget::All + } +} + +impl LargeGossipItem for Transaction {} + +#[async_trait] +impl ItemProvider for Gossiper<{ Transaction::ID_IS_COMPLETE_ITEM }, Transaction> { + async fn is_stored + Send>( + effect_builder: EffectBuilder, + item_id: TransactionId, + ) -> bool { + effect_builder.is_transaction_stored(item_id).await + } + + async fn get_from_storage + Send>( + effect_builder: EffectBuilder, + item_id: TransactionId, + ) -> Option> { + effect_builder + .get_stored_transaction(item_id) + .await + .map(Box::new) + } +} diff --git a/node/src/components/gossiper/tests.rs b/node/src/components/gossiper/tests.rs index 63596bc6ec..78c657bbe6 100644 --- a/node/src/components/gossiper/tests.rs +++ b/node/src/components/gossiper/tests.rs @@ -1,11 +1,13 @@ +// Unrestricted event size is okay in tests. +#![allow(clippy::large_enum_variant)] #![cfg(test)] use std::{ collections::{BTreeSet, HashMap}, - fmt::{self, Debug, Display, Formatter}, iter, + sync::Arc, }; -use derive_more::From; +use derive_more::{Display, From}; use prometheus::Registry; use rand::Rng; use reactor::ReactorEvent; @@ -15,135 +17,110 @@ use thiserror::Error; use tokio::time; use tracing::debug; -use casper_types::ProtocolVersion; +use casper_types::{ + testing::TestRng, BlockV2, Chainspec, ChainspecRawBytes, EraId, FinalitySignatureV2, + ProtocolVersion, TimeDiff, Transaction, TransactionConfig, +}; use super::*; use crate::{ components::{ - contract_runtime::{self, ContractRuntime}, - deploy_acceptor::{self, DeployAcceptor}, in_memory_network::{self, InMemoryNetwork, NetworkController}, + network::{GossipedAddress, Identity as NetworkIdentity}, storage::{self, Storage}, + transaction_acceptor, }, - crypto::hash::Digest, effect::{ announcements::{ - ContractRuntimeAnnouncement, ControlAnnouncement, DeployAcceptorAnnouncement, - GossiperAnnouncement, NetworkAnnouncement, RpcServerAnnouncement, + ControlAnnouncement, FatalAnnouncement, GossiperAnnouncement, + TransactionAcceptorAnnouncement, + }, + incoming::{ + ConsensusDemand, ConsensusMessageIncoming, FinalitySignatureIncoming, + NetRequestIncoming, NetResponseIncoming, TrieDemand, TrieRequestIncoming, + TrieResponseIncoming, }, - requests::{ConsensusRequest, ContractRuntimeRequest, LinearChainRequest}, - Responder, + requests::AcceptTransactionRequest, }, protocol::Message as NodeMessage, - reactor::{self, EventQueueHandle, Runner}, + reactor::{self, EventQueueHandle, QueueKind, Runner, TryCrankOutcome}, testing::{ - network::{Network, NetworkedReactor}, - ConditionCheckReactor, TestRng, + self, + network::{NetworkedReactor, TestingNetwork}, + ConditionCheckReactor, FakeTransactionAcceptor, }, - types::{Chainspec, Deploy, NodeId, Tag}, - utils::{Loadable, WithDir}, + types::NodeId, + utils::WithDir, NodeRng, }; +const RECENT_ERA_COUNT: u64 = 5; +const MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400); +const EXPECTED_GOSSIP_TARGET: GossipTarget = GossipTarget::All; + /// Top-level event for the reactor. -#[derive(Debug, From, Serialize)] +#[derive(Debug, From, Serialize, Display)] #[must_use] enum Event { #[from] Network(in_memory_network::Event), #[from] - Storage(#[serde(skip_serializing)] storage::Event), + Storage(storage::Event), #[from] - DeployAcceptor(#[serde(skip_serializing)] deploy_acceptor::Event), + TransactionAcceptor(#[serde(skip_serializing)] transaction_acceptor::Event), #[from] - DeployGossiper(super::Event), + TransactionGossiper(super::Event), #[from] - NetworkRequest(NetworkRequest), + NetworkRequest(NetworkRequest), #[from] - ControlAnnouncement(ControlAnnouncement), + StorageRequest(StorageRequest), #[from] - NetworkAnnouncement(#[serde(skip_serializing)] NetworkAnnouncement), + AcceptTransactionRequest(AcceptTransactionRequest), #[from] - RpcServerAnnouncement(#[serde(skip_serializing)] RpcServerAnnouncement), + TransactionAcceptorAnnouncement(#[serde(skip_serializing)] TransactionAcceptorAnnouncement), #[from] - DeployAcceptorAnnouncement(#[serde(skip_serializing)] DeployAcceptorAnnouncement), + TransactionGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), #[from] - DeployGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), - #[from] - ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event), + TransactionGossiperIncoming(GossiperIncoming), } impl ReactorEvent for Event { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } + fn is_control(&self) -> bool { + false } -} -impl From for Event { - fn from(request: StorageRequest) -> Self { - Event::Storage(storage::Event::from(request)) + fn try_into_control(self) -> Option { + None } } -impl From for Event { - fn from(request: ContractRuntimeRequest) -> Self { - Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request))) - } -} - -impl From>> for Event { - fn from(request: NetworkRequest>) -> Self { +impl From>> for Event { + fn from(request: NetworkRequest>) -> Self { Event::NetworkRequest(request.map_payload(NodeMessage::from)) } } -impl From for Event { - fn from(_request: ConsensusRequest) -> Self { - unimplemented!("not implemented for gossiper tests") - } -} +trait Unhandled {} -impl From> for Event { - fn from(_request: LinearChainRequest) -> Self { - unimplemented!("not implemented for gossiper tests") +impl From for Event { + fn from(_: T) -> Self { + unimplemented!("not handled in gossiper tests") } } -impl From for Event { - fn from(_request: ContractRuntimeAnnouncement) -> Self { - unimplemented!("not implemented for gossiper tests") - } -} - -impl Display for Event { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Network(event) => write!(formatter, "event: {}", event), - Event::Storage(event) => write!(formatter, "storage: {}", event), - Event::DeployAcceptor(event) => write!(formatter, "deploy acceptor: {}", event), - Event::DeployGossiper(event) => write!(formatter, "deploy gossiper: {}", event), - Event::NetworkRequest(req) => write!(formatter, "network request: {}", req), - Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann), - Event::NetworkAnnouncement(ann) => write!(formatter, "network announcement: {}", ann), - Event::RpcServerAnnouncement(ann) => { - write!(formatter, "api server announcement: {}", ann) - } - Event::DeployAcceptorAnnouncement(ann) => { - write!(formatter, "deploy-acceptor announcement: {}", ann) - } - Event::DeployGossiperAnnouncement(ann) => { - write!(formatter, "deploy-gossiper announcement: {}", ann) - } - Event::ContractRuntime(event) => { - write!(formatter, "contract-runtime event: {:?}", event) - } - } - } -} +impl Unhandled for ConsensusDemand {} +impl Unhandled for ControlAnnouncement {} +impl Unhandled for FatalAnnouncement {} +impl Unhandled for ConsensusMessageIncoming {} +impl Unhandled for GossiperIncoming {} +impl Unhandled for GossiperIncoming {} +impl Unhandled for GossiperIncoming {} +impl Unhandled for NetRequestIncoming {} +impl Unhandled for NetResponseIncoming {} +impl Unhandled for TrieRequestIncoming {} +impl Unhandled for TrieDemand {} +impl Unhandled for TrieResponseIncoming {} +impl Unhandled for FinalitySignatureIncoming {} /// Error type returned by the test reactor. #[derive(Debug, Error)] @@ -155,9 +132,8 @@ enum Error { struct Reactor { network: InMemoryNetwork, storage: Storage, - deploy_acceptor: DeployAcceptor, - deploy_gossiper: Gossiper, - contract_runtime: ContractRuntime, + fake_transaction_acceptor: FakeTransactionAcceptor, + transaction_gossiper: Gossiper<{ Transaction::ID_IS_COMPLETE_ITEM }, Transaction>, _storage_tempdir: TempDir, } @@ -174,51 +150,46 @@ impl reactor::Reactor for Reactor { fn new( config: Self::Config, + _chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, registry: &Registry, event_queue: EventQueueHandle, rng: &mut NodeRng, ) -> Result<(Self, Effects), Self::Error> { - let network = NetworkController::create_node(event_queue, rng); - - let (storage_config, storage_tempdir) = storage::Config::default_for_tests(); + let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1); let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config); - let storage = - Storage::new(&storage_withdir, None, ProtocolVersion::from_parts(1, 0, 0)).unwrap(); - - let contract_runtime_config = contract_runtime::Config::default(); - let contract_runtime = ContractRuntime::new( - Digest::random(rng), + let storage = Storage::new( + &storage_withdir, None, ProtocolVersion::from_parts(1, 0, 0), - storage_withdir, - &contract_runtime_config, - ®istry, + EraId::default(), + "test", + MAX_TTL.into(), + RECENT_ERA_COUNT, + Some(registry), + false, + TransactionConfig::default(), ) .unwrap(); - let deploy_acceptor = DeployAcceptor::new( - deploy_acceptor::Config::new(false), - &Chainspec::from_resources("local"), - ); - let deploy_gossiper = Gossiper::new_for_partial_items( - "deploy_gossiper", + let fake_transaction_acceptor = FakeTransactionAcceptor::new(); + let transaction_gossiper = Gossiper::<{ Transaction::ID_IS_COMPLETE_ITEM }, _>::new( + "transaction_gossiper", config, - get_deploy_from_storage, registry, )?; + let network = NetworkController::create_node(event_queue, rng); let reactor = Reactor { network, storage, - deploy_acceptor, - deploy_gossiper, - contract_runtime, + fake_transaction_acceptor, + transaction_gossiper, _storage_tempdir: storage_tempdir, }; - let effects = Effects::new(); - - Ok((reactor, effects)) + Ok((reactor, Effects::new())) } fn dispatch_event( @@ -227,205 +198,193 @@ impl reactor::Reactor for Reactor { rng: &mut NodeRng, event: Event, ) -> Effects { + trace!(?event); match event { Event::Storage(event) => reactor::wrap_effects( Event::Storage, self.storage.handle_event(effect_builder, rng, event), ), - Event::DeployAcceptor(event) => reactor::wrap_effects( - Event::DeployAcceptor, - self.deploy_acceptor + Event::TransactionAcceptor(event) => reactor::wrap_effects( + Event::TransactionAcceptor, + self.fake_transaction_acceptor .handle_event(effect_builder, rng, event), ), - Event::DeployGossiper(event) => reactor::wrap_effects( - Event::DeployGossiper, - self.deploy_gossiper + Event::TransactionGossiper(super::Event::ItemReceived { + item_id, + source, + target, + }) => { + // Ensure the correct target type for transactions is provided. + assert_eq!(target, EXPECTED_GOSSIP_TARGET); + let event = super::Event::ItemReceived { + item_id, + source, + target, + }; + reactor::wrap_effects( + Event::TransactionGossiper, + self.transaction_gossiper + .handle_event(effect_builder, rng, event), + ) + } + Event::TransactionGossiper(event) => reactor::wrap_effects( + Event::TransactionGossiper, + self.transaction_gossiper .handle_event(effect_builder, rng, event), ), + Event::NetworkRequest(NetworkRequest::Gossip { + payload, + gossip_target, + count, + exclude, + auto_closing_responder, + }) => { + // Ensure the correct target type for transactions is carried through to the + // `Network`. + assert_eq!(gossip_target, EXPECTED_GOSSIP_TARGET); + let request = NetworkRequest::Gossip { + payload, + gossip_target, + count, + exclude, + auto_closing_responder, + }; + reactor::wrap_effects( + Event::Network, + self.network + .handle_event(effect_builder, rng, request.into()), + ) + } Event::NetworkRequest(request) => reactor::wrap_effects( Event::Network, self.network .handle_event(effect_builder, rng, request.into()), ), - Event::ControlAnnouncement(ctrl_ann) => { - unreachable!("unhandled control announcement: {}", ctrl_ann) - } - Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { - sender, - payload, - }) => { - let reactor_event = match payload { - NodeMessage::GetRequest { - tag: Tag::Deploy, - serialized_id, - } => { - // Note: This is copied almost verbatim from the validator reactor and - // needs to be refactored. - - let deploy_hash = match bincode::deserialize(&serialized_id) { - Ok(hash) => hash, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - match self - .storage - .handle_legacy_direct_deploy_request(deploy_hash) - { - // This functionality was moved out of the storage component and - // should be refactored ASAP. - Some(deploy) => { - match NodeMessage::new_get_response(&deploy) { - Ok(message) => { - return effect_builder - .send_message(sender, message) - .ignore(); - } - Err(error) => { - error!("failed to create get-response: {}", error); - return Effects::new(); - } - }; - } - None => { - debug!("failed to get {} for {}", deploy_hash, sender); - return Effects::new(); - } - } - } - NodeMessage::GetResponse { - tag: Tag::Deploy, - serialized_item, - } => { - let deploy = match bincode::deserialize(&serialized_item) { - Ok(deploy) => Box::new(deploy), - Err(error) => { - error!("failed to decode deploy from {}: {}", sender, error); - return Effects::new(); - } - }; - Event::DeployAcceptor(deploy_acceptor::Event::Accept { - deploy, - source: Source::Peer(sender), - responder: None, - }) - } - NodeMessage::DeployGossiper(message) => { - Event::DeployGossiper(super::Event::MessageReceived { sender, message }) - } - msg => panic!("should not get {}", msg), - }; - self.dispatch_event(effect_builder, rng, reactor_event) - } - Event::NetworkAnnouncement(NetworkAnnouncement::GossipOurAddress(_)) => { - unreachable!("should not receive announcements of type GossipOurAddress"); - } - Event::NetworkAnnouncement(NetworkAnnouncement::NewPeer(_)) => { - // We do not care about new peers in the gossiper test. - Effects::new() - } - Event::RpcServerAnnouncement(RpcServerAnnouncement::DeployReceived { - deploy, + Event::StorageRequest(request) => reactor::wrap_effects( + Event::Storage, + self.storage + .handle_event(effect_builder, rng, request.into()), + ), + Event::AcceptTransactionRequest(AcceptTransactionRequest { + transaction, + is_speculative, responder, }) => { - let event = deploy_acceptor::Event::Accept { - deploy, - source: Source::::Client, - responder, + assert!(!is_speculative); + let event = transaction_acceptor::Event::Accept { + transaction, + source: Source::Client, + maybe_responder: Some(responder), }; - self.dispatch_event(effect_builder, rng, Event::DeployAcceptor(event)) + self.dispatch_event(effect_builder, rng, Event::TransactionAcceptor(event)) } - Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::AcceptedNewDeploy { - deploy, - source, - }) => { + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, + }, + ) => { let event = super::Event::ItemReceived { - item_id: *deploy.id(), + item_id: transaction.gossip_id(), source, + target: transaction.gossip_target(), }; - self.dispatch_event(effect_builder, rng, Event::DeployGossiper(event)) - } - Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::InvalidDeploy { - deploy: _, - source: _, - }) => Effects::new(), - Event::DeployGossiperAnnouncement(_ann) => { - unreachable!("the deploy gossiper should never make an announcement") + self.dispatch_event(effect_builder, rng, Event::TransactionGossiper(event)) } + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { + transaction: _, + source: _, + }, + ) => Effects::new(), + Event::TransactionGossiperAnnouncement(GossiperAnnouncement::NewItemBody { + item, + sender, + }) => reactor::wrap_effects( + Event::TransactionAcceptor, + self.fake_transaction_acceptor.handle_event( + effect_builder, + rng, + transaction_acceptor::Event::Accept { + transaction: *item, + source: Source::Peer(sender), + maybe_responder: None, + }, + ), + ), + Event::TransactionGossiperAnnouncement(_ann) => Effects::new(), Event::Network(event) => reactor::wrap_effects( Event::Network, self.network.handle_event(effect_builder, rng, event), ), - Event::ContractRuntime(event) => reactor::wrap_effects( - Event::ContractRuntime, - self.contract_runtime - .handle_event(effect_builder, rng, event), + Event::TransactionGossiperIncoming(incoming) => reactor::wrap_effects( + Event::TransactionGossiper, + self.transaction_gossiper + .handle_event(effect_builder, rng, incoming.into()), ), } } - - fn maybe_exit(&self) -> Option { - unimplemented!() - } } impl NetworkedReactor for Reactor { - type NodeId = NodeId; - fn node_id(&self) -> NodeId { self.network.node_id() } } -fn announce_deploy_received( - deploy: Box, - responder: Option>>, +fn announce_transaction_received( + transaction: &Transaction, ) -> impl FnOnce(EffectBuilder) -> Effects { + let txn = transaction.clone(); |effect_builder: EffectBuilder| { - effect_builder - .announce_deploy_received(deploy, responder) - .ignore() + effect_builder.try_accept_transaction(txn, false).ignore() } } -async fn run_gossip(rng: &mut TestRng, network_size: usize, deploy_count: usize) { - const TIMEOUT: Duration = Duration::from_secs(20); +async fn run_gossip(rng: &mut TestRng, network_size: usize, txn_count: usize) { + const TIMEOUT: Duration = Duration::from_secs(30); const QUIET_FOR: Duration = Duration::from_millis(50); NetworkController::::create_active(); - let mut network = Network::::new(); + let mut network = TestingNetwork::::new(); // Add `network_size` nodes. let node_ids = network.add_nodes(rng, network_size).await; - // Create `deploy_count` random deploys. - let (all_deploy_hashes, mut deploys): (BTreeSet<_>, Vec<_>) = iter::repeat_with(|| { - let deploy = Box::new(Deploy::random(rng)); - (*deploy.id(), deploy) + // Create `txn_count` random transactions. + let (all_txn_hashes, mut txns): (BTreeSet<_>, Vec<_>) = iter::repeat_with(|| { + let txn = Transaction::random(rng); + (txn.hash(), txn) }) - .take(deploy_count) + .take(txn_count) .unzip(); - // Give each deploy to a randomly-chosen node to be gossiped. - for deploy in deploys.drain(..) { + // Give each transaction to a randomly-chosen node to be gossiped. + for txn in txns.drain(..) { let index: usize = rng.gen_range(0..network_size); network - .process_injected_effect_on(&node_ids[index], announce_deploy_received(deploy, None)) + .process_injected_effect_on(&node_ids[index], announce_transaction_received(&txn)) .await; } - // Check every node has every deploy stored locally. - let all_deploys_held = |nodes: &HashMap>>| { + // Check every node has every transaction stored locally. + let all_txns_held = |nodes: &HashMap>>| { nodes.values().all(|runner| { - let hashes = runner.reactor().inner().storage.get_all_deploy_hashes(); - all_deploy_hashes == hashes + for hash in all_txn_hashes.iter() { + if runner + .reactor() + .inner() + .storage + .get_transaction_by_hash(*hash) + .is_none() + { + return false; + } + } + true }) }; - network.settle_on(rng, all_deploys_held, TIMEOUT).await; + network.settle_on(rng, all_txns_held, TIMEOUT).await; // Ensure all responders are called before dropping the network. network.settle(rng, QUIET_FOR, TIMEOUT).await; @@ -435,14 +394,14 @@ async fn run_gossip(rng: &mut TestRng, network_size: usize, deploy_count: usize) #[tokio::test] async fn should_gossip() { - const NETWORK_SIZES: [usize; 3] = [2, 5, 20]; - const DEPLOY_COUNTS: [usize; 3] = [1, 10, 30]; + const NETWORK_SIZES: [usize; 3] = [2, 5, 10]; + const TXN_COUNTS: [usize; 3] = [1, 10, 30]; - let mut rng = crate::new_rng(); + let rng = &mut TestRng::new(); for network_size in &NETWORK_SIZES { - for deploy_count in &DEPLOY_COUNTS { - run_gossip(&mut rng, *network_size, *deploy_count).await + for txn_count in &TXN_COUNTS { + run_gossip(rng, *network_size, *txn_count).await } } } @@ -454,20 +413,20 @@ async fn should_get_from_alternate_source() { const TIMEOUT: Duration = Duration::from_secs(2); NetworkController::::create_active(); - let mut network = Network::::new(); - let mut rng = crate::new_rng(); + let mut network = TestingNetwork::::new(); + let rng = &mut TestRng::new(); // Add `NETWORK_SIZE` nodes. - let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await; + let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; - // Create random deploy. - let deploy = Box::new(Deploy::random(&mut rng)); - let deploy_id = *deploy.id(); + // Create random transaction. + let txn = Transaction::random(rng); + let txn_hash = txn.hash(); - // Give the deploy to nodes 0 and 1 to be gossiped. + // Give the transaction to nodes 0 and 1 to be gossiped. for node_id in node_ids.iter().take(2) { network - .process_injected_effect_on(&node_id, announce_deploy_received(deploy.clone(), None)) + .process_injected_effect_on(node_id, announce_transaction_received(&txn)) .await; } @@ -476,7 +435,7 @@ async fn should_get_from_alternate_source() { matches!(event, Event::NetworkRequest(NetworkRequest::Gossip { .. })) }; network - .crank_until(&node_ids[0], &mut rng, made_gossip_request, TIMEOUT) + .crank_until(&node_ids[0], rng, made_gossip_request, TIMEOUT) .await; assert!(network.remove_node(&node_ids[0]).is_some()); debug!("removed node {}", &node_ids[0]); @@ -486,7 +445,8 @@ async fn should_get_from_alternate_source() { let sent_gossip_response = move |event: &Event| -> bool { match event { Event::NetworkRequest(NetworkRequest::SendMessage { dest, payload, .. }) => { - if let NodeMessage::DeployGossiper(Message::GossipResponse { .. }) = **payload { + if let NodeMessage::TransactionGossiper(Message::GossipResponse { .. }) = **payload + { **dest == node_id_0 } else { false @@ -496,31 +456,28 @@ async fn should_get_from_alternate_source() { } }; network - .crank_until(&node_ids[2], &mut rng, sent_gossip_response, TIMEOUT) + .crank_until(&node_ids[2], rng, sent_gossip_response, TIMEOUT) .await; - // Run nodes 1 and 2 until settled. Node 2 will be waiting for the deploy from node 0. - network.settle(&mut rng, POLL_DURATION, TIMEOUT).await; + // Run nodes 1 and 2 until settled. Node 2 will be waiting for the transaction from node 0. + network.settle(rng, POLL_DURATION, TIMEOUT).await; - // Advance time to trigger node 2's timeout causing it to request the deploy from node 1. - let secs_to_advance = Config::default().get_remainder_timeout_secs(); - time::pause(); - time::advance(Duration::from_secs(secs_to_advance)).await; - time::resume(); - debug!("advanced time by {} secs", secs_to_advance); + // Advance time to trigger node 2's timeout causing it to request the transaction from node 1. + let duration_to_advance = Config::default().get_remainder_timeout(); + testing::advance_time(duration_to_advance.into()).await; - // Check node 0 has the deploy stored locally. - let deploy_held = |nodes: &HashMap>>| { + // Check node 0 has the transaction stored locally. + let txn_held = |nodes: &HashMap>>| { let runner = nodes.get(&node_ids[2]).unwrap(); runner .reactor() .inner() .storage - .get_deploy_by_hash(deploy_id) - .map(|retrieved_deploy| retrieved_deploy == *deploy) + .get_transaction_by_hash(txn_hash) + .map(|retrieved_txn| retrieved_txn == txn) .unwrap_or_default() }; - network.settle_on(&mut rng, deploy_held, TIMEOUT).await; + network.settle_on(rng, txn_held, TIMEOUT).await; NetworkController::::remove_active(); } @@ -531,35 +488,33 @@ async fn should_timeout_gossip_response() { const TIMEOUT: Duration = Duration::from_secs(2); NetworkController::::create_active(); - let mut network = Network::::new(); - let mut rng = crate::new_rng(); + let mut network = TestingNetwork::::new(); + let rng = &mut TestRng::new(); // The target number of peers to infect with a given piece of data. let infection_target = Config::default().infection_target(); // Add `infection_target + 1` nodes. - let mut node_ids = network - .add_nodes(&mut rng, infection_target as usize + 1) - .await; + let mut node_ids = network.add_nodes(rng, infection_target as usize + 1).await; - // Create random deploy. - let deploy = Box::new(Deploy::random(&mut rng)); - let deploy_id = *deploy.id(); + // Create random transaction. + let txn = Transaction::random(rng); + let txn_hash = txn.hash(); - // Give the deploy to node 0 to be gossiped. + // Give the transaction to node 0 to be gossiped. network - .process_injected_effect_on(&node_ids[0], announce_deploy_received(deploy.clone(), None)) + .process_injected_effect_on(&node_ids[0], announce_transaction_received(&txn)) .await; // Run node 0 until it has sent the gossip requests. let made_gossip_request = |event: &Event| -> bool { matches!( event, - Event::DeployGossiper(super::Event::GossipedTo { .. }) + Event::TransactionGossiper(super::Event::GossipedTo { .. }) ) }; network - .crank_until(&node_ids[0], &mut rng, made_gossip_request, TIMEOUT) + .crank_until(&node_ids[0], rng, made_gossip_request, TIMEOUT) .await; // Give node 0 time to set the timeouts before advancing the clock. time::sleep(PAUSE_DURATION).await; @@ -570,30 +525,242 @@ async fn should_timeout_gossip_response() { debug!("removed node {}", node_id); } for _ in 0..infection_target { - let (node_id, _runner) = network.add_node(&mut rng).await.unwrap(); + let (node_id, _runner) = network.add_node(rng).await.unwrap(); node_ids.push(node_id); } // Advance time to trigger node 0's timeout causing it to gossip to the new nodes. - let secs_to_advance = Config::default().gossip_request_timeout_secs(); - time::pause(); - time::advance(Duration::from_secs(secs_to_advance)).await; - time::resume(); - debug!("advanced time by {} secs", secs_to_advance); - - // Check every node has every deploy stored locally. - let deploy_held = |nodes: &HashMap>>| { + let duration_to_advance = Config::default().gossip_request_timeout(); + testing::advance_time(duration_to_advance.into()).await; + + // Check every node has every transaction stored locally. + let txn_held = |nodes: &HashMap>>| { nodes.values().all(|runner| { runner .reactor() .inner() .storage - .get_deploy_by_hash(deploy_id) - .map(|retrieved_deploy| retrieved_deploy == *deploy) + .get_transaction_by_hash(txn_hash) + .map(|retrieved_txn| retrieved_txn == txn) .unwrap_or_default() }) }; - network.settle_on(&mut rng, deploy_held, TIMEOUT).await; + network.settle_on(rng, txn_held, TIMEOUT).await; + + NetworkController::::remove_active(); +} + +#[tokio::test] +async fn should_timeout_new_item_from_peer() { + const NETWORK_SIZE: usize = 2; + const VALIDATE_AND_STORE_TIMEOUT: Duration = Duration::from_secs(1); + const TIMEOUT: Duration = Duration::from_secs(5); + + NetworkController::::create_active(); + let mut network = TestingNetwork::::new(); + let rng = &mut TestRng::new(); + + let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; + let node_0 = node_ids[0]; + let node_1 = node_ids[1]; + // Set the timeout on node 0 low for testing. + let reactor_0 = network + .nodes_mut() + .get_mut(&node_0) + .unwrap() + .reactor_mut() + .inner_mut(); + reactor_0.transaction_gossiper.validate_and_store_timeout = VALIDATE_AND_STORE_TIMEOUT; + // Switch off the fake transaction acceptor on node 0 so that once the new transaction is + // received, no component triggers the `ItemReceived` event. + reactor_0.fake_transaction_acceptor.set_active(false); + + let txn = Transaction::random(rng); + + // Give the transaction to node 1 to gossip to node 0. + network + .process_injected_effect_on(&node_1, announce_transaction_received(&txn)) + .await; + + // Run the network until node 1 has sent the gossip request and node 0 has handled it to the + // point where the `NewItemBody` announcement has been received). + let got_new_item_body_announcement = |event: &Event| -> bool { + matches!( + event, + Event::TransactionGossiperAnnouncement(GossiperAnnouncement::NewItemBody { .. }) + ) + }; + network + .crank_all_until(&node_0, rng, got_new_item_body_announcement, TIMEOUT) + .await; + + // Run node 0 until it receives its own `CheckItemReceivedTimeout` event. + let received_timeout_event = |event: &Event| -> bool { + matches!( + event, + Event::TransactionGossiper(super::Event::CheckItemReceivedTimeout { .. }) + ) + }; + network + .crank_until(&node_0, rng, received_timeout_event, TIMEOUT) + .await; + + // Ensure node 0 makes a `FinishedGossiping` announcement. + let made_finished_gossiping_announcement = |event: &Event| -> bool { + matches!( + event, + Event::TransactionGossiperAnnouncement(GossiperAnnouncement::FinishedGossiping(_)) + ) + }; + network + .crank_until(&node_0, rng, made_finished_gossiping_announcement, TIMEOUT) + .await; + + NetworkController::::remove_active(); +} + +#[tokio::test] +async fn should_not_gossip_old_stored_item_again() { + const NETWORK_SIZE: usize = 2; + const TIMEOUT: Duration = Duration::from_secs(2); + + NetworkController::::create_active(); + let mut network = TestingNetwork::::new(); + let rng = &mut TestRng::new(); + + let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; + let node_0 = node_ids[0]; + + let txn = Transaction::random(rng); + + // Store the transaction on node 0. + let store_txn = |effect_builder: EffectBuilder| { + effect_builder + .put_transaction_to_storage(txn.clone()) + .ignore() + }; + network.process_injected_effect_on(&node_0, store_txn).await; + + // Node 1 sends a gossip message to node 0. + network + .process_injected_effect_on(&node_0, |effect_builder| { + let event = Event::TransactionGossiperIncoming(GossiperIncoming { + sender: node_ids[1], + message: Box::new(Message::Gossip(txn.gossip_id())), + }); + effect_builder + .into_inner() + .schedule(event, QueueKind::Gossip) + .ignore() + }) + .await; + + // Run node 0 until it has handled the gossip message and checked if the transaction is already + // stored. + let checked_if_stored = |event: &Event| -> bool { + matches!( + event, + Event::TransactionGossiper(super::Event::IsStoredResult { .. }) + ) + }; + network + .crank_until(&node_0, rng, checked_if_stored, TIMEOUT) + .await; + // Assert the message did not cause a new entry in the gossip table and spawned no new events. + assert!(network + .nodes() + .get(&node_0) + .unwrap() + .reactor() + .inner() + .transaction_gossiper + .table + .is_empty()); + assert!(matches!( + network.crank(&node_0, rng).await, + TryCrankOutcome::NoEventsToProcess + )); NetworkController::::remove_active(); } + +enum Unexpected { + Response, + GetItem, + Item, +} + +async fn should_ignore_unexpected_message(message_type: Unexpected) { + const NETWORK_SIZE: usize = 2; + const TIMEOUT: Duration = Duration::from_secs(2); + + NetworkController::::create_active(); + let mut network = TestingNetwork::::new(); + let rng = &mut TestRng::new(); + + let node_ids = network.add_nodes(rng, NETWORK_SIZE).await; + let node_0 = node_ids[0]; + + let txn = Box::new(Transaction::random(rng)); + + let message = match message_type { + Unexpected::Response => Message::GossipResponse { + item_id: txn.gossip_id(), + is_already_held: false, + }, + Unexpected::GetItem => Message::GetItem(txn.gossip_id()), + Unexpected::Item => Message::Item(txn), + }; + + // Node 1 sends an unexpected message to node 0. + network + .process_injected_effect_on(&node_0, |effect_builder| { + let event = Event::TransactionGossiperIncoming(GossiperIncoming { + sender: node_ids[1], + message: Box::new(message), + }); + effect_builder + .into_inner() + .schedule(event, QueueKind::Gossip) + .ignore() + }) + .await; + + // Run node 0 until it has handled the gossip message. + let received_gossip_message = + |event: &Event| -> bool { matches!(event, Event::TransactionGossiperIncoming(..)) }; + network + .crank_until(&node_0, rng, received_gossip_message, TIMEOUT) + .await; + // Assert the message did not cause a new entry in the gossip table and spawned no new events. + assert!(network + .nodes() + .get(&node_0) + .unwrap() + .reactor() + .inner() + .transaction_gossiper + .table + .is_empty()); + assert!(matches!( + network.crank(&node_0, rng).await, + TryCrankOutcome::NoEventsToProcess + )); + + NetworkController::::remove_active(); +} + +#[tokio::test] +async fn should_ignore_unexpected_response_message() { + should_ignore_unexpected_message(Unexpected::Response).await +} + +#[tokio::test] +async fn should_ignore_unexpected_get_item_message() { + should_ignore_unexpected_message(Unexpected::GetItem).await +} + +#[tokio::test] +async fn should_ignore_unexpected_item_message() { + should_ignore_unexpected_message(Unexpected::Item).await +} diff --git a/node/src/components/in_memory_network.rs b/node/src/components/in_memory_network.rs index 849cbfb5b1..6610e13d9d 100644 --- a/node/src/components/in_memory_network.rs +++ b/node/src/components/in_memory_network.rs @@ -76,10 +76,10 @@ //! } //! //! #[derive(Debug, From)] -//! enum ShouterEvent { +//! enum ShouterEvent { //! #[from] //! // We received a new message via the network. -//! Net(NetworkAnnouncement), +//! Net(NetworkAnnouncement), //! // Ready to send another message. //! #[from] //! ReadyToSend, @@ -87,8 +87,8 @@ //! //! impl Shouter { //! /// Creates a new shouter. -//! fn new(effect_builder: EffectBuilder) -//! -> (Self, Effects>) { +//! fn new(effect_builder: EffectBuilder) +//! -> (Self, Effects>) { //! (Shouter { //! whispers: Vec::new(), //! shouts: Vec::new(), @@ -100,9 +100,9 @@ //! // Besides its own events, the shouter is capable of receiving network messages. //! impl Component for Shouter //! where -//! REv: From> + Send, +//! REv: From> + Send, //! { -//! type Event = ShouterEvent; +//! type Event = ShouterEvent; //! //! fn handle_event(&mut self, //! effect_builder: EffectBuilder, @@ -148,13 +148,13 @@ //! enum Event { //! /// Asked to perform a network action. //! #[from] -//! Request(NetworkRequest), +//! Request(NetworkRequest), //! /// Event for the shouter. //! #[from] -//! Shouter(ShouterEvent), +//! Shouter(ShouterEvent), //! /// Notified of some network event. //! #[from] -//! Announcement(NetworkAnnouncement) +//! Announcement(NetworkAnnouncement) //! }; //! # //! # impl Display for Event { @@ -163,9 +163,8 @@ //! # } //! # } //! # -//! # impl Display for ShouterEvent -//! # where I: Debug, -//! # P: Debug +//! # impl

Display for ShouterEvent

+//! # where P: Debug, //! # { //! # fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { //! # Debug::fmt(self, fmt) @@ -212,8 +211,6 @@ //! } //! //! impl NetworkedReactor for Reactor { -//! type NodeId = NodeId; -//! //! fn node_id(&self) -> NodeId { //! self.net.node_id() //! } @@ -283,7 +280,6 @@ use std::{ any::Any, cell::RefCell, collections::{HashMap, HashSet}, - convert::Infallible, fmt::{self, Display, Formatter}, sync::{Arc, RwLock}, }; @@ -293,28 +289,30 @@ use serde::Serialize; use tokio::sync::mpsc::{self, error::SendError}; use tracing::{debug, error, info, warn}; +use casper_types::testing::TestRng; + use crate::{ components::Component, - effect::{ - announcements::NetworkAnnouncement, requests::NetworkRequest, EffectBuilder, EffectExt, - Effects, - }, + effect::{requests::NetworkRequest, EffectBuilder, EffectExt, Effects}, logging, reactor::{EventQueueHandle, QueueKind}, - testing::TestRng, types::NodeId, NodeRng, }; +use super::network::FromIncoming; + +const COMPONENT_NAME: &str = "in_memory_network"; + /// A network. type Network

= Arc>>>; /// An in-memory network events. #[derive(Debug, Serialize)] -pub struct Event

(NetworkRequest); +pub(crate) struct Event

(NetworkRequest

); -impl

From> for Event

{ - fn from(req: NetworkRequest) -> Self { +impl

From> for Event

{ + fn from(req: NetworkRequest

) -> Self { Event(req) } } @@ -335,7 +333,7 @@ thread_local! { /// The network controller is used to control the network topology (e.g. adding and removing nodes). #[derive(Debug, Default)] -pub struct NetworkController

{ +pub(crate) struct NetworkController

{ /// Channels for network communication. nodes: Network

, } @@ -357,7 +355,7 @@ where /// # Panics /// /// Panics if the internal lock has been poisoned. - pub fn create_active() { + pub(crate) fn create_active() { let _ = logging::init(); ACTIVE_NETWORK .with(|active_network| active_network.borrow_mut().replace(Box::new(Self::new()))); @@ -368,8 +366,8 @@ where /// # Panics /// /// Panics if the internal lock has been poisoned, a network with the wrong type of message was - /// removed or if there was no network at at all. - pub fn remove_active() { + /// removed or if there was no network at all. + pub(crate) fn remove_active() { assert!( ACTIVE_NETWORK.with(|active_network| { active_network @@ -388,12 +386,12 @@ where /// /// Panics if the internal lock has been poisoned, there is no active network or the active /// network is not of the correct message type. - pub fn create_node( + pub(crate) fn create_node( event_queue: EventQueueHandle, rng: &mut TestRng, ) -> InMemoryNetwork

where - REv: From> + Send, + REv: Send + FromIncoming

, { ACTIVE_NETWORK.with(|active_network| { active_network @@ -412,7 +410,7 @@ where /// /// Panics if the internal lock has been poisoned, the active network is not of the correct /// message type, or the node to remove doesn't exist. - pub fn remove_node(node_id: &NodeId) { + pub(crate) fn remove_node(node_id: &NodeId) { ACTIVE_NETWORK.with(|active_network| { if let Some(active_network) = active_network.borrow_mut().as_mut() { active_network @@ -436,7 +434,7 @@ where rng: &mut TestRng, ) -> InMemoryNetwork

where - REv: From> + Send, + REv: Send + FromIncoming

, { InMemoryNetwork::new_with_data(event_queue, NodeId::random(rng), self.nodes.clone()) } @@ -444,7 +442,7 @@ where /// Networking component connected to an in-memory network. #[derive(Debug)] -pub struct InMemoryNetwork

{ +pub(crate) struct InMemoryNetwork

{ /// Our node id. node_id: NodeId, @@ -459,9 +457,9 @@ where /// Creates a new in-memory network node. /// /// This function is an alias of `NetworkController::create_node_local`. - pub fn new(event_queue: EventQueueHandle, rng: &mut NodeRng) -> Self + pub(crate) fn new(event_queue: EventQueueHandle, rng: &mut NodeRng) -> Self where - REv: From> + Send, + REv: Send + FromIncoming

, { NetworkController::create_node(event_queue, rng) } @@ -473,7 +471,7 @@ where nodes: Network

, ) -> Self where - REv: From> + Send, + REv: Send + FromIncoming

, { let (sender, receiver) = mpsc::unbounded_channel(); @@ -491,7 +489,7 @@ where /// Returns this node's ID. #[inline] - pub fn node_id(&self) -> NodeId { + pub(crate) fn node_id(&self) -> NodeId { self.node_id } } @@ -529,7 +527,6 @@ where P: Display + Clone, { type Event = Event

; - type ConstructionError = Infallible; fn handle_event( &mut self, @@ -541,7 +538,8 @@ where NetworkRequest::SendMessage { dest, payload, - responder, + respond_after_queueing: _, + auto_closing_responder, } => { if *dest == self.node_id { panic!("can't send message to self"); @@ -553,9 +551,13 @@ where error!("network lock has been poisoned") }; - responder.respond(()).ignore() + auto_closing_responder.respond(()).ignore() } - NetworkRequest::Broadcast { payload, responder } => { + NetworkRequest::ValidatorBroadcast { + payload, + auto_closing_responder, + era_id: _, + } => { if let Ok(guard) = self.nodes.read() { for dest in guard.keys().filter(|&node_id| node_id != &self.node_id) { self.send(&guard, *dest, *payload.clone()); @@ -564,13 +566,14 @@ where error!("network lock has been poisoned") }; - responder.respond(()).ignore() + auto_closing_responder.respond(()).ignore() } NetworkRequest::Gossip { payload, count, exclude, - responder, + auto_closing_responder, + gossip_target: _, } => { if let Ok(guard) = self.nodes.read() { let chosen: HashSet<_> = guard @@ -584,25 +587,29 @@ where for dest in chosen.iter() { self.send(&guard, *dest, *payload.clone()); } - responder.respond(chosen).ignore() + auto_closing_responder.respond(chosen).ignore() } else { error!("network lock has been poisoned"); - responder.respond(Default::default()).ignore() + auto_closing_responder.respond(Default::default()).ignore() } } } } + + fn name(&self) -> &str { + COMPONENT_NAME + } } async fn receiver_task( event_queue: EventQueueHandle, mut receiver: mpsc::UnboundedReceiver<(NodeId, P)>, ) where - REv: From>, + REv: FromIncoming

, P: 'static + Send, { while let Some((sender, payload)) = receiver.recv().await { - let announce = NetworkAnnouncement::MessageReceived { sender, payload }; + let announce: REv = REv::from_incoming(sender, payload); event_queue .schedule(announce, QueueKind::NetworkIncoming) diff --git a/node/src/components/linear_chain.rs b/node/src/components/linear_chain.rs deleted file mode 100644 index 4d6808ea1b..0000000000 --- a/node/src/components/linear_chain.rs +++ /dev/null @@ -1,225 +0,0 @@ -mod event; -mod metrics; -mod pending_signatures; -mod signature; -mod signature_cache; -mod state; - -use datasize::DataSize; -use std::{convert::Infallible, fmt::Display, marker::PhantomData}; - -use itertools::Itertools; -use prometheus::Registry; -use tracing::{debug, error}; - -use self::{ - metrics::LinearChainMetrics, - state::{Outcome, Outcomes}, -}; -use super::Component; -use crate::{ - effect::{ - announcements::LinearChainAnnouncement, - requests::{ - ChainspecLoaderRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest, - StorageRequest, - }, - EffectBuilder, EffectExt, EffectResultExt, Effects, - }, - protocol::Message, - types::BlockByHeight, - NodeRng, -}; -use casper_types::ProtocolVersion; - -pub use event::Event; -use state::LinearChain; - -#[derive(DataSize, Debug)] -pub(crate) struct LinearChainComponent { - linear_chain_state: LinearChain, - #[data_size(skip)] - metrics: LinearChainMetrics, - _marker: PhantomData, -} - -impl LinearChainComponent { - pub(crate) fn new( - registry: &Registry, - protocol_version: ProtocolVersion, - auction_delay: u64, - unbonding_delay: u64, - ) -> Result { - let metrics = LinearChainMetrics::new(registry)?; - let linear_chain_state = LinearChain::new(protocol_version, auction_delay, unbonding_delay); - Ok(LinearChainComponent { - linear_chain_state, - metrics, - _marker: PhantomData, - }) - } -} - -fn outcomes_to_effects( - effect_builder: EffectBuilder, - outcomes: Outcomes, -) -> Effects> -where - REv: From - + From> - + From - + From - + From - + Send, - I: Display + Send + 'static, -{ - outcomes - .into_iter() - .map(|outcome| match outcome { - Outcome::StoreBlockSignatures(block_signatures) => effect_builder - .put_signatures_to_storage(block_signatures) - .ignore(), - Outcome::StoreExecutionResults(block_hash, execution_results) => effect_builder - .put_execution_results_to_storage(block_hash, execution_results) - .ignore(), - Outcome::StoreBlock(block) => effect_builder - .put_block_to_storage(block.clone()) - .event(move |_| Event::PutBlockResult { block }), - Outcome::Gossip(fs) => { - let message = Message::FinalitySignature(fs); - effect_builder.broadcast_message(message).ignore() - } - Outcome::AnnounceSignature(fs) => { - effect_builder.announce_finality_signature(fs).ignore() - } - Outcome::AnnounceBlock(block) => effect_builder.announce_block_added(block).ignore(), - Outcome::LoadSignatures(fs) => effect_builder - .get_signatures_from_storage(fs.block_hash) - .event(move |maybe_signatures| { - Event::GetStoredFinalitySignaturesResult(fs, maybe_signatures.map(Box::new)) - }), - Outcome::VerifyIfBonded { - new_fs, - known_fs, - protocol_version, - latest_state_root_hash, - } => effect_builder - .is_bonded_validator( - new_fs.public_key.clone(), - new_fs.era_id, - latest_state_root_hash, - protocol_version, - ) - .result( - |is_bonded| Event::IsBonded(known_fs, new_fs, is_bonded), - |error| { - error!(%error, "checking in future eras returned an error."); - panic!("couldn't check if validator is bonded") - }, - ), - }) - .concat() -} - -impl Component for LinearChainComponent -where - REv: From - + From> - + From - + From - + From - + Send, - I: Display + Send + 'static, -{ - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::Request(LinearChainRequest::BlockRequest(block_hash, sender)) => async move { - match effect_builder.get_block_from_storage(block_hash).await { - None => debug!("failed to get {} for {}", block_hash, sender), - Some(block) => match Message::new_get_response(&block) { - Ok(message) => effect_builder.send_message(sender, message).await, - Err(error) => error!("failed to create get-response {}", error), - }, - } - } - .ignore(), - Event::Request(LinearChainRequest::BlockAtHeightLocal(height, responder)) => { - async move { - let block = effect_builder - .get_block_at_height_from_storage(height) - .await; - responder.respond(block).await - } - .ignore() - } - Event::Request(LinearChainRequest::BlockAtHeight(height, sender)) => async move { - let block_by_height = match effect_builder - .get_block_at_height_from_storage(height) - .await - { - None => { - debug!("failed to get {} for {}", height, sender); - BlockByHeight::Absent(height) - } - Some(block) => BlockByHeight::new(block), - }; - match Message::new_get_response(&block_by_height) { - Ok(message) => effect_builder.send_message(sender, message).await, - Err(error) => { - error!("failed to create get-response {}", error); - } - } - } - .ignore(), - Event::NewLinearChainBlock { - block, - execution_results, - } => { - let outcomes = self - .linear_chain_state - .handle_new_block(block, execution_results); - outcomes_to_effects(effect_builder, outcomes) - } - Event::PutBlockResult { block } => { - let completion_duration = block.header().timestamp().elapsed().millis(); - self.metrics - .block_completion_duration - .set(completion_duration as i64); - let outcomes = self.linear_chain_state.handle_put_block(block); - outcomes_to_effects(effect_builder, outcomes) - } - Event::FinalitySignatureReceived(fs, gossiped) => { - let outcomes = self - .linear_chain_state - .handle_finality_signature(fs, gossiped); - outcomes_to_effects(effect_builder, outcomes) - } - Event::GetStoredFinalitySignaturesResult(fs, maybe_signatures) => { - let outcomes = self - .linear_chain_state - .handle_cached_signatures(maybe_signatures, fs); - outcomes_to_effects(effect_builder, outcomes) - } - Event::IsBonded(maybe_known_signatures, new_fs, is_bonded) => { - let outcomes = self.linear_chain_state.handle_is_bonded( - maybe_known_signatures, - new_fs, - is_bonded, - ); - outcomes_to_effects(effect_builder, outcomes) - } - Event::KnownLinearChainBlock(block) => { - self.linear_chain_state.set_latest_block(*block); - Effects::new() - } - } - } -} diff --git a/node/src/components/linear_chain/event.rs b/node/src/components/linear_chain/event.rs deleted file mode 100644 index 96238155b0..0000000000 --- a/node/src/components/linear_chain/event.rs +++ /dev/null @@ -1,75 +0,0 @@ -use std::{ - collections::HashMap, - fmt::{self, Display, Formatter}, -}; - -use casper_types::ExecutionResult; -use derive_more::From; - -use crate::{ - effect::requests::LinearChainRequest, - types::{Block, BlockSignatures, DeployHash, FinalitySignature}, -}; - -#[derive(Debug, From)] -pub enum Event { - /// A linear chain request issued by another node in the network. - #[from] - Request(LinearChainRequest), - /// New linear chain block has been produced. - NewLinearChainBlock { - /// The block. - block: Box, - /// The deploys' execution results. - execution_results: HashMap, - }, - /// Linear chain block we already know but we may refinalize it when syncing protocol state. - KnownLinearChainBlock(Box), - /// Finality signature received. - /// Not necessarily _new_ finality signature. - FinalitySignatureReceived(Box, bool), - /// The result of putting a block to storage. - PutBlockResult { - /// The block. - block: Box, - }, - /// The result of requesting finality signatures from storage to add pending signatures. - GetStoredFinalitySignaturesResult(Box, Option>), - /// Result of testing if creator of the finality signature is bonded validator. - IsBonded(Option>, Box, bool), -} - -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Request(req) => write!(f, "linear chain request: {}", req), - Event::NewLinearChainBlock { block, .. } => { - write!(f, "linear chain new block: {}", block.hash()) - } - Event::KnownLinearChainBlock(block) => { - write!(f, "linear chain known block: {}", block.hash()) - } - Event::FinalitySignatureReceived(fs, gossiped) => write!( - f, - "linear-chain new finality signature for block: {}, from: {}, external: {}", - fs.block_hash, fs.public_key, gossiped - ), - Event::PutBlockResult { .. } => write!(f, "linear-chain put-block result"), - Event::GetStoredFinalitySignaturesResult(finality_signature, maybe_signatures) => { - write!( - f, - "linear chain get-stored-finality-signatures result for {} found: {}", - finality_signature.block_hash, - maybe_signatures.is_some(), - ) - } - Event::IsBonded(_block, fs, is_bonded) => { - write!( - f, - "linear chain is-bonded for era {} validator {}, is_bonded: {}", - fs.era_id, fs.public_key, is_bonded - ) - } - } - } -} diff --git a/node/src/components/linear_chain/metrics.rs b/node/src/components/linear_chain/metrics.rs deleted file mode 100644 index 25d05076d8..0000000000 --- a/node/src/components/linear_chain/metrics.rs +++ /dev/null @@ -1,30 +0,0 @@ -use prometheus::{IntGauge, Registry}; - -use crate::unregister_metric; - -#[derive(Debug)] -pub(super) struct LinearChainMetrics { - pub(super) block_completion_duration: IntGauge, - /// Prometheus registry used to publish metrics. - registry: Registry, -} - -impl LinearChainMetrics { - pub(super) fn new(registry: &Registry) -> Result { - let block_completion_duration = IntGauge::new( - "block_completion_duration", - "duration of time from consensus through execution for a block", - )?; - registry.register(Box::new(block_completion_duration.clone()))?; - Ok(Self { - block_completion_duration, - registry: registry.clone(), - }) - } -} - -impl Drop for LinearChainMetrics { - fn drop(&mut self) { - unregister_metric!(self.registry, self.block_completion_duration); - } -} diff --git a/node/src/components/linear_chain/pending_signatures.rs b/node/src/components/linear_chain/pending_signatures.rs deleted file mode 100644 index 6301930363..0000000000 --- a/node/src/components/linear_chain/pending_signatures.rs +++ /dev/null @@ -1,176 +0,0 @@ -use datasize::DataSize; -use itertools::Itertools; -use std::collections::HashMap; -use tracing::warn; - -use super::signature::Signature; -use crate::types::BlockHash; -use casper_types::PublicKey; - -/// The maximum number of finality signatures from a single validator we keep in memory while -/// waiting for their block. -const MAX_PENDING_FINALITY_SIGNATURES_PER_VALIDATOR: usize = 1000; - -/// Finality signatures to be inserted in a block once it is available. -/// Keyed by public key of the creator to limit the maximum amount of pending signatures. -#[derive(DataSize, Debug, Default)] -pub(super) struct PendingSignatures { - pending_finality_signatures: HashMap>, -} - -impl PendingSignatures { - pub(super) fn new() -> Self { - PendingSignatures { - pending_finality_signatures: HashMap::new(), - } - } - - // Checks if we have already enqueued that finality signature. - pub(super) fn has_finality_signature( - &self, - creator: &PublicKey, - block_hash: &BlockHash, - ) -> bool { - self.pending_finality_signatures - .get(creator) - .map_or(false, |sigs| sigs.contains_key(block_hash)) - } - - /// Returns signatures for `block_hash` that are still pending. - pub(super) fn collect_pending(&mut self, block_hash: &BlockHash) -> Vec { - let pending_sigs = self - .pending_finality_signatures - .values_mut() - .filter_map(|sigs| sigs.remove(&block_hash)) - .collect_vec(); - self.remove_empty_entries(); - pending_sigs - } - - /// Adds finality signature to the pending collection. - /// Returns `true` if it was added. - pub(super) fn add(&mut self, signature: Signature) -> bool { - let public_key = signature.public_key(); - let block_hash = signature.block_hash(); - let sigs = self - .pending_finality_signatures - .entry(public_key.clone()) - .or_default(); - // Limit the memory we use for storing unknown signatures from each validator. - if sigs.len() >= MAX_PENDING_FINALITY_SIGNATURES_PER_VALIDATOR { - warn!( - %block_hash, %public_key, - "received too many finality signatures for unknown blocks" - ); - return false; - } - // Add the pending signature. - sigs.insert(block_hash, signature); - true - } - - pub(super) fn remove( - &mut self, - public_key: &PublicKey, - block_hash: &BlockHash, - ) -> Option { - let validator_sigs = self.pending_finality_signatures.get_mut(public_key)?; - let sig = validator_sigs.remove(&block_hash); - self.remove_empty_entries(); - sig - } - - /// Removes all entries for which there are no finality signatures. - fn remove_empty_entries(&mut self) { - self.pending_finality_signatures - .retain(|_, sigs| !sigs.is_empty()); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{crypto::generate_ed25519_keypair, testing::TestRng, types::FinalitySignature}; - use casper_types::EraId; - - use std::collections::BTreeMap; - - #[test] - fn membership_test() { - let mut rng = TestRng::new(); - let mut pending_sigs = PendingSignatures::new(); - let block_hash = BlockHash::random(&mut rng); - let block_hash_other = BlockHash::random(&mut rng); - let sig_a = FinalitySignature::random_for_block(block_hash, 0); - let sig_b = FinalitySignature::random_for_block(block_hash_other, 0); - let public_key = sig_a.public_key.clone(); - let public_key_other = sig_b.public_key; - assert!(pending_sigs.add(Signature::External(Box::new(sig_a)))); - assert!(pending_sigs.has_finality_signature(&public_key, &block_hash)); - assert!(!pending_sigs.has_finality_signature(&public_key_other, &block_hash)); - assert!(!pending_sigs.has_finality_signature(&public_key, &block_hash_other)); - } - - #[test] - fn collect_pending() { - let mut rng = TestRng::new(); - let mut pending_sigs = PendingSignatures::new(); - let block_hash = BlockHash::random(&mut rng); - let block_hash_other = BlockHash::random(&mut rng); - let sig_a1 = FinalitySignature::random_for_block(block_hash, 0); - let sig_a2 = FinalitySignature::random_for_block(block_hash, 0); - let sig_b = FinalitySignature::random_for_block(block_hash_other, 0); - assert!(pending_sigs.add(Signature::External(Box::new(sig_a1.clone())))); - assert!(pending_sigs.add(Signature::External(Box::new(sig_a2.clone())))); - assert!(pending_sigs.add(Signature::External(Box::new(sig_b)))); - let collected_sigs: BTreeMap = pending_sigs - .collect_pending(&block_hash) - .into_iter() - .map(|sig| (sig.public_key(), *sig.take())) - .collect(); - let expected_sigs = vec![sig_a1.clone(), sig_a2.clone()] - .into_iter() - .map(|sig| (sig.public_key.clone(), sig)) - .collect(); - assert_eq!(collected_sigs, expected_sigs); - assert!( - !pending_sigs.has_finality_signature(&sig_a1.public_key, &sig_a1.block_hash), - "collecting should remove the signature" - ); - assert!( - !pending_sigs.has_finality_signature(&sig_a2.public_key, &sig_a2.block_hash), - "collecting should remove the signature" - ); - } - - #[test] - fn remove_signature() { - let mut rng = TestRng::new(); - let mut pending_sigs = PendingSignatures::new(); - let block_hash = BlockHash::random(&mut rng); - let sig = FinalitySignature::random_for_block(block_hash, 0); - assert!(pending_sigs.add(Signature::External(Box::new(sig.clone())))); - let removed_sig = pending_sigs.remove(&sig.public_key, &sig.block_hash); - assert!(removed_sig.is_some()); - assert!(!pending_sigs.has_finality_signature(&sig.public_key, &sig.block_hash)); - assert!(pending_sigs - .remove(&sig.public_key, &sig.block_hash) - .is_none()); - } - - #[test] - fn max_limit_respected() { - let mut rng = TestRng::new(); - let mut pending_sigs = PendingSignatures::new(); - let (sec_key, pub_key) = generate_ed25519_keypair(); - let era_id = EraId::new(0); - for _ in 0..MAX_PENDING_FINALITY_SIGNATURES_PER_VALIDATOR { - let block_hash = BlockHash::random(&mut rng); - let sig = FinalitySignature::new(block_hash, era_id, &sec_key, pub_key.clone()); - assert!(pending_sigs.add(Signature::External(Box::new(sig)))); - } - let block_hash = BlockHash::random(&mut rng); - let sig = FinalitySignature::new(block_hash, era_id, &sec_key, pub_key); - assert!(!pending_sigs.add(Signature::External(Box::new(sig)))); - } -} diff --git a/node/src/components/linear_chain/signature.rs b/node/src/components/linear_chain/signature.rs deleted file mode 100644 index 29e9d3ab55..0000000000 --- a/node/src/components/linear_chain/signature.rs +++ /dev/null @@ -1,41 +0,0 @@ -use casper_types::PublicKey; -use datasize::DataSize; - -use crate::types::{BlockHash, FinalitySignature}; - -#[derive(DataSize, Debug)] -pub(super) enum Signature { - Local(Box), - External(Box), -} - -impl Signature { - pub(super) fn to_inner(&self) -> &FinalitySignature { - match self { - Signature::Local(fs) => fs, - Signature::External(fs) => fs, - } - } - - pub(super) fn public_key(&self) -> PublicKey { - self.to_inner().public_key.clone() - } - - pub(super) fn block_hash(&self) -> BlockHash { - self.to_inner().block_hash - } - - pub(super) fn signature(&self) -> casper_types::Signature { - self.to_inner().signature - } - - pub(super) fn take(self) -> Box { - match self { - Signature::Local(fs) | Signature::External(fs) => fs, - } - } - - pub(super) fn is_local(&self) -> bool { - matches!(self, Signature::Local(_)) - } -} diff --git a/node/src/components/linear_chain/signature_cache.rs b/node/src/components/linear_chain/signature_cache.rs deleted file mode 100644 index d4621d2afe..0000000000 --- a/node/src/components/linear_chain/signature_cache.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::{hash_map::Entry, HashMap}; - -use casper_types::{EraId, PublicKey}; -use datasize::DataSize; - -use crate::types::{BlockHash, BlockSignatures}; - -#[derive(DataSize, Debug)] -pub(super) struct SignatureCache { - curr_era: EraId, - signatures: HashMap, -} - -impl SignatureCache { - pub(super) fn new() -> Self { - SignatureCache { - curr_era: EraId::from(0), - signatures: Default::default(), - } - } - - pub(super) fn get(&self, hash: &BlockHash) -> Option { - self.signatures.get(hash).cloned() - } - - pub(super) fn insert(&mut self, block_signature: BlockSignatures) { - // We optimistically assume that most of the signatures that arrive in close temporal - // proximity refer to the same era. - if self.curr_era < block_signature.era_id { - self.signatures.clear(); - self.curr_era = block_signature.era_id; - } - match self.signatures.entry(block_signature.block_hash) { - Entry::Occupied(mut entry) => { - entry.get_mut().proofs.extend(block_signature.proofs); - } - Entry::Vacant(entry) => { - entry.insert(block_signature); - } - } - } - - /// Returns whether finality signature is known already. - pub(super) fn known_signature(&self, block_hash: &BlockHash, public_key: &PublicKey) -> bool { - self.signatures - .get(block_hash) - .map_or(false, |bs| bs.has_proof(public_key)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{testing::TestRng, types::FinalitySignature}; - use casper_types::{EraId, Signature}; - - use std::collections::BTreeMap; - - #[test] - fn adding_signatures() { - let mut rng = TestRng::new(); - let block_hash = BlockHash::random(&mut rng); - let mut cache = SignatureCache::new(); - - // Add first signature for the block. - let mut block_signatures_a = BlockSignatures::new(block_hash, EraId::new(0)); - let sig_a = FinalitySignature::random_for_block(block_hash, 0); - block_signatures_a.insert_proof(sig_a.public_key.clone(), sig_a.signature); - cache.insert(block_signatures_a.clone()); - // Verify that the first signature is cached. - assert!(cache.known_signature(&block_hash, &sig_a.public_key)); - let returned_signatures_a = cache.get(&block_hash).unwrap(); - assert_eq!(block_signatures_a, returned_signatures_a); - - // Adding more signatures for the same block. - let mut block_signatures_b = BlockSignatures::new(block_hash, EraId::new(0)); - let sig_b = FinalitySignature::random_for_block(block_hash, 0); - block_signatures_b.insert_proof(sig_b.public_key.clone(), sig_b.signature); - cache.insert(block_signatures_b.clone()); - // Verify that the second signature is cached. - assert!(cache.known_signature(&block_hash, &sig_b.public_key)); - let returned_signatures_b = cache.get(&block_hash).unwrap(); - - // Cache should extend previously stored signatures with the new ones. - let expected: BTreeMap = block_signatures_a - .proofs - .into_iter() - .chain(block_signatures_b.proofs.into_iter()) - .collect(); - assert_eq!(expected, returned_signatures_b.proofs); - } - - #[test] - fn purge_cache() { - // Cache is purged when it receives a signature for the newer era than the currently cached - // signatures. - let mut rng = TestRng::new(); - let block_hash = BlockHash::random(&mut rng); - let mut cache = SignatureCache::new(); - - // Add signature for a block in era-0. - let mut block_signatures_a = BlockSignatures::new(block_hash, EraId::new(0)); - let sig_a = FinalitySignature::random_for_block(block_hash, 0); - block_signatures_a.insert_proof(sig_a.public_key.clone(), sig_a.signature); - cache.insert(block_signatures_a); - - // Add a signature for a block in era-1. - let mut block_signatures_b = BlockSignatures::new(block_hash, EraId::new(1)); - let sig_b = FinalitySignature::random_for_block(block_hash, 1); - block_signatures_b.insert_proof(sig_b.public_key.clone(), sig_b.signature); - cache.insert(block_signatures_b); - - // Verify that era-0 signature is not cached anymore. - assert!(!cache.known_signature(&block_hash, &sig_a.public_key)); - - // Verify that era-1 signature is cached. - assert!(cache.known_signature(&block_hash, &sig_b.public_key)); - } -} diff --git a/node/src/components/linear_chain/state.rs b/node/src/components/linear_chain/state.rs deleted file mode 100644 index 5873015e11..0000000000 --- a/node/src/components/linear_chain/state.rs +++ /dev/null @@ -1,512 +0,0 @@ -use std::collections::HashMap; - -use datasize::DataSize; -use itertools::Itertools; -use tracing::{debug, warn}; - -use crate::{ - crypto::hash::Digest, - types::{Block, BlockHash, BlockSignatures, DeployHash, FinalitySignature}, -}; -use casper_types::{ExecutionResult, ProtocolVersion}; - -use super::{ - pending_signatures::PendingSignatures, signature::Signature, signature_cache::SignatureCache, -}; -#[derive(DataSize, Debug)] -pub(crate) struct LinearChain { - /// The most recently added block. - latest_block: Option, - /// Finality signatures to be inserted in a block once it is available. - pending_finality_signatures: PendingSignatures, - signature_cache: SignatureCache, - /// Current protocol version of the network. - protocol_version: ProtocolVersion, - auction_delay: u64, - unbonding_delay: u64, -} - -#[derive(Debug)] -pub(super) enum Outcome { - // Store block signatures to storage. - StoreBlockSignatures(BlockSignatures), - // Store execution results to storage. - StoreExecutionResults(BlockHash, HashMap), - // Store block. - StoreBlock(Box), - // Read finality signatures for the block from storage. - LoadSignatures(Box), - // Gossip finality signature to peers. - Gossip(Box), - // Create a reactor announcement about new (valid) finality signatures. - AnnounceSignature(Box), - // Create a reactor announcement about new (valid) block. - AnnounceBlock(Box), - // Check if creator of `new_fs` is known trusted validator. - // Carries additional context necessary to create the corresponding event. - VerifyIfBonded { - new_fs: Box, - known_fs: Option>, - protocol_version: ProtocolVersion, - latest_state_root_hash: Option, - }, -} - -pub(super) type Outcomes = Vec; - -impl LinearChain { - pub(crate) fn new( - protocol_version: ProtocolVersion, - auction_delay: u64, - unbonding_delay: u64, - ) -> Self { - LinearChain { - latest_block: None, - pending_finality_signatures: PendingSignatures::new(), - signature_cache: SignatureCache::new(), - protocol_version, - auction_delay, - unbonding_delay, - } - } - - /// Returns whether we have already enqueued that finality signature. - fn is_pending(&self, fs: &FinalitySignature) -> bool { - let creator = fs.public_key.clone(); - let block_hash = fs.block_hash; - self.pending_finality_signatures - .has_finality_signature(&creator, &block_hash) - } - - /// Returns whether we have already seen and stored the finality signature. - fn is_new(&self, fs: &FinalitySignature) -> bool { - let FinalitySignature { - block_hash, - public_key, - .. - } = fs; - !self.signature_cache.known_signature(block_hash, public_key) - } - - // New linear chain block received. Collect any pending finality signatures that - // were waiting for that block. - fn new_block(&mut self, block: &Block) -> Vec { - let signatures = self.collect_pending_finality_signatures(block.hash()); - if signatures.is_empty() { - return vec![]; - } - let mut block_signatures = BlockSignatures::new(*block.hash(), block.header().era_id()); - for sig in signatures.iter() { - block_signatures.insert_proof(sig.public_key(), sig.signature()); - } - // Cache the signatures as we expect more finality signatures for the new block to - // arrive soon. - self.cache_signatures(block_signatures); - signatures - } - - /// Tries to add the finality signature to the collection of pending finality signatures. - /// Returns true if added successfully, otherwise false. - fn add_pending_finality_signature(&mut self, fs: FinalitySignature, gossiped: bool) -> bool { - let FinalitySignature { - block_hash, - public_key, - era_id, - .. - } = fs.clone(); - if let Some(latest_block) = self.latest_block.as_ref() { - // If it's a switch block it has already forgotten its own era's validators, - // unbonded some old validators, and determined new ones. In that case, we - // should add 1 to last_block_era. - let current_era = latest_block.header().era_id() - + if latest_block.header().is_switch_block() { - 1 - } else { - 0 - }; - let lowest_acceptable_era_id = - (current_era + self.auction_delay).saturating_sub(self.unbonding_delay); - let highest_acceptable_era_id = current_era + self.auction_delay; - if era_id < lowest_acceptable_era_id || era_id > highest_acceptable_era_id { - warn!( - era_id=%era_id.value(), - %public_key, - %block_hash, - "received finality signature for not bonded era." - ); - return false; - } - } - if self.is_pending(&fs) { - debug!(block_hash=%fs.block_hash, public_key=%fs.public_key, - "finality signature already pending"); - return false; - } - if !self.is_new(&fs) { - debug!(block_hash=%fs.block_hash, public_key=%fs.public_key, - "finality signature is already known"); - return false; - } - if let Err(err) = fs.verify() { - warn!(%block_hash, %public_key, %err, "received invalid finality signature"); - return false; - } - debug!(%block_hash, %public_key, "received new finality signature"); - let signature = if gossiped { - Signature::External(Box::new(fs)) - } else { - Signature::Local(Box::new(fs)) - }; - self.pending_finality_signatures.add(signature) - } - - /// Removes finality signature from the pending collection. - fn remove_from_pending_fs(&mut self, fs: &FinalitySignature) -> Option { - let FinalitySignature { - block_hash, - era_id: _era_id, - signature: _signature, - public_key, - } = fs; - debug!(%block_hash, %public_key, "removing finality signature from pending collection"); - self.pending_finality_signatures - .remove(&public_key, &block_hash) - } - - /// Caches the signature. - fn cache_signatures(&mut self, mut signatures: BlockSignatures) { - // Merge already-known signatures and the new ones. - self.get_signatures(&signatures.block_hash) - .iter() - .for_each(|bs| { - for (pk, sig) in bs.proofs.iter() { - signatures.insert_proof((*pk).clone(), *sig); - } - }); - self.signature_cache.insert(signatures); - } - - /// Returns cached finality signatures that we have already validated and stored. - fn get_signatures(&self, block_hash: &BlockHash) -> Option { - self.signature_cache.get(block_hash) - } - - fn current_protocol_version(&self) -> ProtocolVersion { - self.protocol_version - } - - pub(super) fn set_latest_block(&mut self, block: Block) { - self.latest_block = Some(block); - } - - fn latest_block(&self) -> &Option { - &self.latest_block - } - - /// Returns finality signatures for `block_hash`. - fn collect_pending_finality_signatures(&mut self, block_hash: &BlockHash) -> Vec { - self.pending_finality_signatures - .collect_pending(block_hash) - .into_iter() - .filter(|sig| { - let FinalitySignature { - block_hash, - public_key, - .. - } = sig.to_inner(); - !self.signature_cache.known_signature(block_hash, public_key) - }) - .collect_vec() - } - - pub(super) fn handle_new_block( - &mut self, - block: Box, - execution_results: HashMap, - ) -> Outcomes { - let mut outcomes = vec![]; - let signatures = self.new_block(&*block); - if !signatures.is_empty() { - let mut block_signatures = BlockSignatures::new(*block.hash(), block.header().era_id()); - for sig in signatures.iter() { - block_signatures.insert_proof(sig.public_key(), sig.signature()); - } - outcomes.push(Outcome::StoreBlockSignatures(block_signatures)); - for signature in signatures { - if signature.is_local() { - outcomes.push(Outcome::Gossip(Box::new(signature.to_inner().clone()))); - } - outcomes.push(Outcome::AnnounceSignature(signature.take())); - } - }; - let block_hash = *block.hash(); - outcomes.push(Outcome::StoreBlock(block)); - outcomes.push(Outcome::StoreExecutionResults( - block_hash, - execution_results, - )); - outcomes - } - - pub(super) fn handle_put_block(&mut self, block: Box) -> Outcomes { - self.set_latest_block(*block.clone()); - vec![Outcome::AnnounceBlock(block)] - } - - pub(super) fn handle_finality_signature( - &mut self, - fs: Box, - gossiped: bool, - ) -> Outcomes { - let FinalitySignature { block_hash, .. } = *fs; - if !self.add_pending_finality_signature(*fs.clone(), gossiped) { - // If we did not add the signature it means it's either incorrect or we already - // know it. - return vec![]; - } - match self.get_signatures(&block_hash) { - // Not found in the cache, look in the storage. - None => vec![Outcome::LoadSignatures(fs)], - Some(signatures) => self.handle_cached_signatures(Some(Box::new(signatures)), fs), - } - } - - pub(super) fn handle_cached_signatures( - &mut self, - signatures: Option>, - fs: Box, - ) -> Outcomes { - if let Some(known_signatures) = &signatures { - // If the newly-received finality signature does not match the era of previously - // validated signatures reject it as they can't both be - // correct – block was created in a specific era so the IDs have to match. - if known_signatures.era_id != fs.era_id { - warn!(public_key = %fs.public_key, - expected = %known_signatures.era_id, - got = %fs.era_id, - "finality signature with invalid era id."); - self.remove_from_pending_fs(&*fs); - // TODO: Disconnect from the sender. - return vec![]; - } - if known_signatures.has_proof(&fs.public_key) { - self.remove_from_pending_fs(&fs); - return vec![]; - } - // Populate cache so that next incoming signatures don't trigger read from the - // storage. If `known_signatures` are already from cache then this will be a - // noop. - self.cache_signatures(*known_signatures.clone()); - } - // Check if the validator is bonded in the era in which the block was created. - // TODO: Use protocol version that is valid for the block's height. - let protocol_version = self.current_protocol_version(); - let latest_state_root_hash = self - .latest_block() - .as_ref() - .map(|block| *block.header().state_root_hash()); - vec![Outcome::VerifyIfBonded { - new_fs: fs, - known_fs: signatures, - protocol_version, - latest_state_root_hash, - }] - } - - pub(super) fn handle_is_bonded( - &mut self, - maybe_known_signatures: Option>, - new_fs: Box, - is_bonded: bool, - ) -> Outcomes { - if !is_bonded { - // Creator of the finality signature (`new_fs`) is not known to be a trusted - // validator. Neither in the current era nor in the eras for which - // we have already run auctions for. - self.remove_from_pending_fs(&new_fs); - let FinalitySignature { - public_key, - block_hash, - .. - } = *new_fs; - warn!( - validator = %public_key, - %block_hash, - "Received a signature from a validator that is not bonded." - ); - // TODO: Disconnect from the sender. - return vec![]; - } - - match maybe_known_signatures { - None => { - // Unknown block but validator is bonded. - // We should finalize the same block eventually. Either in this or in the - // next eras. New signature is already cached for later. - vec![] - } - Some(mut known_signatures) => { - // New finality signature from a bonded validator. - known_signatures.insert_proof(new_fs.public_key.clone(), new_fs.signature); - // Cache the results in case we receive the same finality signature before we - // manage to store it in the database. - self.cache_signatures(*known_signatures.clone()); - debug!(hash = %known_signatures.block_hash, "storing finality signatures"); - // Announce new finality signatures for other components to pick up. - let mut outcomes = vec![Outcome::AnnounceSignature(new_fs.clone())]; - if let Some(signature) = self.remove_from_pending_fs(&*new_fs) { - // This shouldn't return `None` as we added the `fs` to the pending collection - // when we received it. If it _is_ `None` then a concurrent - // flow must have already removed it. If it's a signature - // created by this node, gossip it. - if signature.is_local() { - outcomes.push(Outcome::Gossip(new_fs.clone())); - } - }; - outcomes.push(Outcome::StoreBlockSignatures(*known_signatures)); - outcomes - } - } - } -} - -#[cfg(test)] -mod tests { - use crate::{crypto::generate_ed25519_keypair, logging, testing::TestRng}; - use casper_types::EraId; - - use super::*; - - #[test] - fn new_block_no_sigs() { - let mut rng = TestRng::new(); - let protocol_version = ProtocolVersion::V1_0_0; - let mut lc = LinearChain::new(protocol_version, 1u64, 1u64); - let block = Block::random(&mut rng); - let execution_results = HashMap::new(); - let new_block_outcomes = - lc.handle_new_block(Box::new(block.clone()), execution_results.clone()); - let block_hash = *block.hash(); - match &*new_block_outcomes { - [Outcome::StoreBlock(outcome_block), Outcome::StoreExecutionResults(outcome_block_hash, outcome_execution_results)] => - { - assert_eq!(&**outcome_block, &block); - assert_eq!(outcome_block_hash, &block_hash); - assert_eq!(outcome_execution_results, &execution_results); - } - others => panic!("unexpected outcome: {:?}", others), - } - let block_stored_outcomes = lc.handle_put_block(Box::new(block.clone())); - match &*block_stored_outcomes { - [Outcome::AnnounceBlock(announced_block)] => { - assert_eq!(&**announced_block, &block); - } - others => panic!("unexpected outcome: {:?}", others), - } - assert_eq!( - lc.latest_block(), - &Some(block), - "should update the latest block" - ); - } - - #[test] - fn pending_sig_rejected() { - let mut rng = TestRng::new(); - let protocol_version = ProtocolVersion::V1_0_0; - let mut lc = LinearChain::new(protocol_version, 1u64, 1u64); - let block_hash = BlockHash::random(&mut rng); - let valid_sig = FinalitySignature::random_for_block(block_hash, 0); - let handle_sig_outcomes = lc.handle_finality_signature(Box::new(valid_sig.clone()), false); - assert!(matches!( - &*handle_sig_outcomes, - &[Outcome::LoadSignatures(_)] - )); - assert!( - lc.handle_finality_signature(Box::new(valid_sig), false) - .is_empty(), - "adding already-pending signature should be a no-op" - ); - } - - // Forces caching of the finality signature. Requires confirming that creator is known to be - // bonded. - fn cache_signature(lc: &mut LinearChain, fs: FinalitySignature) { - // We need to signal that block is known. Otherwise we won't cache the signature. - let block_signatures = BlockSignatures::new(fs.block_hash, fs.era_id); - let outcomes = - lc.handle_cached_signatures(Some(Box::new(block_signatures)), Box::new(fs.clone())); - match &*outcomes { - [Outcome::VerifyIfBonded { - new_fs, known_fs, .. - }] => { - assert_eq!(&fs, &**new_fs); - let outcomes = lc.handle_is_bonded(known_fs.clone(), Box::new(fs), true); - // After confirming that signature is valid and block known, we want to store the - // signature and announce it. - assert!(matches!( - &*outcomes, - &[ - Outcome::AnnounceSignature(_), - Outcome::StoreBlockSignatures(_) - ] - )); - } - others => panic!("unexpected outcomes {:?}", others), - } - } - - #[test] - fn known_sig_rejected() { - let _ = logging::init(); - let mut rng = TestRng::new(); - let protocol_version = ProtocolVersion::V1_0_0; - let mut lc = LinearChain::new(protocol_version, 1u64, 1u64); - let block = Block::random(&mut rng); - let valid_sig = - FinalitySignature::random_for_block(*block.hash(), block.header().era_id().value()); - cache_signature(&mut lc, valid_sig.clone()); - let outcomes = lc.handle_finality_signature(Box::new(valid_sig), false); - assert!( - outcomes.is_empty(), - "adding already-known signature should be a no-op" - ); - } - - #[test] - fn invalid_sig_rejected() { - let _ = logging::init(); - let mut rng = TestRng::new(); - let protocol_version = ProtocolVersion::V1_0_0; - let auction_delay = 1; - let unbonding_delay = 2; - let mut lc = LinearChain::new(protocol_version, auction_delay, unbonding_delay); - // Set the latest known block so that we can trigger the following checks. - let block = Block::random_with_specifics(&mut rng, EraId::new(3), 10, false); - let block_hash = *block.hash(); - let block_era = block.header().era_id(); - let put_block_outcomes = lc.handle_put_block(Box::new(block.clone())); - assert_eq!(put_block_outcomes.len(), 1); - assert_eq!( - lc.latest_block(), - &Some(block), - "should update the latest block" - ); - // signature's era either too low or too high - let era_too_low_sig = FinalitySignature::random_for_block(block_hash, 0); - let outcomes = lc.handle_finality_signature(Box::new(era_too_low_sig), false); - assert!(outcomes.is_empty()); - let era_too_high_sig = - FinalitySignature::random_for_block(block_hash, block_era.value() + auction_delay + 1); - let outcomes = lc.handle_finality_signature(Box::new(era_too_high_sig), false); - assert!(outcomes.is_empty()); - // signature is not valid - let block_hash = BlockHash::random(&mut rng); - let (_, pub_key) = generate_ed25519_keypair(); - let mut invalid_sig = FinalitySignature::random_for_block(block_hash, block_era.value()); - // replace the public key so that the verification fails. - invalid_sig.public_key = pub_key; - let outcomes = lc.handle_finality_signature(Box::new(invalid_sig), false); - assert!(outcomes.is_empty()) - } -} diff --git a/node/src/components/linear_chain_fast_sync.rs b/node/src/components/linear_chain_fast_sync.rs deleted file mode 100644 index 73b89df331..0000000000 --- a/node/src/components/linear_chain_fast_sync.rs +++ /dev/null @@ -1,617 +0,0 @@ -//! Fast linear chain synchronizer. -mod event; -mod metrics; -mod peers; -mod state; -mod traits; - -use std::{collections::BTreeMap, convert::Infallible, fmt::Display, mem}; - -use datasize::DataSize; -use prometheus::Registry; -use tracing::{debug, error, info, trace, warn}; - -use casper_types::{PublicKey, U512}; - -use self::event::{BlockByHashResult, DeploysResult}; - -use super::{ - fetcher::FetchResult, - storage::{self, Storage}, - Component, -}; -use crate::{ - effect::{EffectBuilder, EffectExt, EffectOptionExt, Effects}, - types::{ - ActivationPoint, Block, BlockByHeight, BlockHash, BlockHeader, Chainspec, FinalizedBlock, - }, - NodeRng, -}; -use event::BlockByHeightResult; -pub use event::Event; -pub use metrics::LinearChainSyncMetrics; -pub use peers::PeersState; -pub use state::State; -pub use traits::ReactorEventT; - -#[derive(DataSize, Debug)] -pub(crate) struct LinearChainFastSync { - peers: PeersState, - state: State, - #[data_size(skip)] - metrics: LinearChainSyncMetrics, -} - -#[allow(dead_code)] -impl LinearChainFastSync { - #[allow(clippy::too_many_arguments)] - pub fn new( - registry: &Registry, - _effect_builder: EffectBuilder, - _chainspec: &Chainspec, - _storage: &Storage, - init_hash: Option, - _highest_block: Option, - genesis_validator_weights: BTreeMap, - _next_upgrade_activation_point: Option, - ) -> Result<(Self, Effects>), Err> - where - Err: From + From, - { - let no_effects = Effects::new(); - let state = init_hash.map_or(State::None, |init_hash| { - State::sync_trusted_hash(init_hash, genesis_validator_weights) - }); - let fast_sync = LinearChainFastSync { - peers: PeersState::new(), - state, - metrics: LinearChainSyncMetrics::new(registry)?, - }; - Ok((fast_sync, no_effects)) - } - - /// Add new block to linear chain. - fn add_block(&mut self, block: Block) { - match &mut self.state { - State::None | State::Done => {} - State::SyncingTrustedHash { linear_chain, .. } => linear_chain.push(block), - State::SyncingDescendants { latest_block, .. } => **latest_block = block, - }; - } - - /// Returns `true` if we have finished syncing linear chain. - pub fn is_synced(&self) -> bool { - matches!(self.state, State::None | State::Done) - } - - /// Fast sync won't shut down for an upgrade. - pub fn stopped_for_upgrade(&self) -> bool { - false - } - - fn block_downloaded( - &mut self, - rng: &mut NodeRng, - effect_builder: EffectBuilder, - block: &Block, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - self.peers.reset(rng); - self.state.block_downloaded(block.header()); - self.add_block(block.clone()); - match &mut self.state { - State::None | State::Done => panic!("Downloaded block when in {} state.", self.state), - State::SyncingTrustedHash { - trusted_hash, - trusted_header, - .. - } => { - if *block.hash() == *trusted_hash { - *trusted_header = Some(Box::new(block.header().clone())); - } - if block.header().is_genesis_child() { - info!("linear chain downloaded. Start downloading deploys."); - effect_builder - .immediately() - .event(move |_| Event::StartDownloadingDeploys) - } else { - self.fetch_next_block(effect_builder, rng, block.header()) - } - } - State::SyncingDescendants { .. } => { - // When synchronizing descendants, we want to download block and execute it - // before trying to download the next block in linear chain. - self.fetch_next_block_deploys(effect_builder) - } - } - } - - fn mark_done(&mut self) { - self.state = State::Done; - } - - /// Handles an event indicating that a linear chain block has been executed and handled by - /// consensus component. This is a signal that we can safely continue with the next blocks, - /// without worrying about timing and/or ordering issues. - /// Returns effects that are created as a response to that event. - fn block_handled( - &mut self, - rng: &mut NodeRng, - effect_builder: EffectBuilder, - block: Block, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - let height = block.height(); - let hash = block.hash(); - trace!(%hash, %height, "Downloaded linear chain block."); - // Reset peers before creating new requests. - self.peers.reset(rng); - let block_height = block.height(); - let mut curr_state = mem::replace(&mut self.state, State::None); - match curr_state { - State::None | State::Done => panic!("Block handled when in {:?} state.", &curr_state), - State::SyncingTrustedHash { - highest_block_seen, - trusted_header: None, - .. - } if highest_block_seen == block_height => panic!("Should always have trusted header"), - // If the block we are handling is the highest block seen, transition to syncing - // descendants - State::SyncingTrustedHash { - highest_block_seen, - trusted_hash, - trusted_header, - ref latest_block, - validator_weights, - .. - } if highest_block_seen == block_height => { - // TODO: Fail gracefully in these cases - match latest_block.as_ref() { - Some(expected) => assert_eq!( - expected, &block, - "Block execution result doesn't match received block." - ), - None => panic!("Unexpected block execution results."), - } - let trusted_header = trusted_header.expect("trusted header must be present"); - - info!(%block_height, "Finished synchronizing linear chain up until trusted hash."); - let peer = self.peers.random_unsafe(); - // Kick off syncing trusted hash descendants. - self.state = - State::sync_descendants(trusted_hash, trusted_header, block, validator_weights); - fetch_block_at_height(effect_builder, peer, block_height + 1) - } - // Keep syncing from genesis if we haven't reached the trusted block hash - State::SyncingTrustedHash { - ref mut validator_weights, - ref latest_block, - .. - } => { - match latest_block.as_ref() { - Some(expected) => assert_eq!( - expected, &block, - "Block execution result doesn't match received block." - ), - None => panic!("Unexpected block execution results."), - } - if let Some(validator_weights_for_new_era) = - block.header().next_era_validator_weights() - { - *validator_weights = validator_weights_for_new_era.clone(); - } - self.state = curr_state; - self.fetch_next_block_deploys(effect_builder) - } - State::SyncingDescendants { - ref latest_block, - ref mut validators_for_latest_block, - .. - } => { - assert_eq!( - **latest_block, block, - "Block execution result doesn't match received block." - ); - match block.header().next_era_validator_weights() { - None => (), - Some(validators_for_next_era) => { - *validators_for_latest_block = validators_for_next_era.clone(); - } - } - self.state = curr_state; - self.fetch_next_block(effect_builder, rng, &block.header()) - } - } - } - - /// Returns effects for fetching next block's deploys. - fn fetch_next_block_deploys( - &mut self, - effect_builder: EffectBuilder, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - let peer = self.peers.random_unsafe(); - - let next_block = match &mut self.state { - State::None | State::Done => { - panic!("Tried fetching next block when in {:?} state.", self.state) - } - State::SyncingTrustedHash { - linear_chain, - latest_block, - .. - } => match linear_chain.pop() { - None => None, - Some(block) => { - // Update `latest_block` so that we can verify whether result of execution - // matches the expected value. - latest_block.replace(block.clone()); - Some(block) - } - }, - State::SyncingDescendants { latest_block, .. } => Some((**latest_block).clone()), - }; - - next_block.map_or_else( - || { - warn!("tried fetching next block deploys when there was no block."); - Effects::new() - }, - |block| { - self.metrics.reset_start_time(); - fetch_block_deploys(effect_builder, peer, block) - }, - ) - } - - fn fetch_next_block( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - block_header: &BlockHeader, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - self.peers.reset(rng); - let peer = self.peers.random_unsafe(); - match self.state { - State::SyncingTrustedHash { .. } => { - let parent_hash = *block_header.parent_hash(); - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, peer, parent_hash) - } - State::SyncingDescendants { .. } => { - let next_height = block_header.height() + 1; - self.metrics.reset_start_time(); - fetch_block_at_height(effect_builder, peer, next_height) - } - State::Done | State::None => { - panic!("Tried fetching block when in {:?} state", self.state) - } - } - } - - pub(crate) fn latest_block(&self) -> Option<&Block> { - match &self.state { - State::SyncingTrustedHash { latest_block, .. } => Option::as_ref(&*latest_block), - State::SyncingDescendants { latest_block, .. } => Some(&*latest_block), - State::Done | State::None => None, - } - } -} - -impl Component for LinearChainFastSync -where - I: Display + Clone + Send + PartialEq + 'static, - REv: ReactorEventT, -{ - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::Start(init_peer) => { - match self.state { - State::None => { - // No syncing configured. - trace!("received `Start` event when in {} state.", self.state); - Effects::new() - } - State::Done | State::SyncingDescendants { .. } => { - // Illegal states for syncing start. - error!( - "should not have received `Start` event when in {} state.", - self.state - ); - Effects::new() - } - State::SyncingTrustedHash { trusted_hash, .. } => { - trace!(?trusted_hash, "start synchronization"); - // Start synchronization. - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, init_peer, trusted_hash) - } - } - } - Event::GetBlockHeightResult(block_height, fetch_result) => { - match fetch_result { - BlockByHeightResult::Absent(peer) => { - self.metrics.observe_get_block_by_height(); - trace!(%block_height, %peer, "failed to download block by height. Trying next peer"); - self.peers.failure(&peer); - match self.peers.random() { - None => { - // `block_height` not found on any of the peers. - // We have synchronized all, currently existing, descendants of - // trusted hash. - self.mark_done(); - info!("finished synchronizing descendants of the trusted hash."); - Effects::new() - } - Some(peer) => { - self.metrics.reset_start_time(); - fetch_block_at_height(effect_builder, peer, block_height) - } - } - } - BlockByHeightResult::FromStorage(block) => { - // We shouldn't get invalid data from the storage. - // If we do, it's a bug. - assert_eq!(block.height(), block_height, "Block height mismatch."); - trace!(%block_height, "Linear block found in the local storage."); - // When syncing descendants of a trusted hash, we might have some of them in - // our local storage. If that's the case, just - // continue. - self.block_downloaded(rng, effect_builder, &*block) - } - BlockByHeightResult::FromPeer(block, peer) => { - self.metrics.observe_get_block_by_height(); - trace!(%block_height, %peer, "linear chain block downloaded from a peer"); - if block.height() != block_height - || *block.header().parent_hash() != *self.latest_block().unwrap().hash() - { - warn!( - %peer, - got_height = block.height(), - expected_height = block_height, - got_parent = %block.header().parent_hash(), - expected_parent = %self.latest_block().unwrap().hash(), - "block mismatch", - ); - // NOTE: Signal misbehaving validator to networking layer. - self.peers.ban(&peer); - return self.handle_event( - effect_builder, - rng, - Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::Absent(peer), - ), - ); - } - self.peers.success(peer); - self.block_downloaded(rng, effect_builder, &*block) - } - } - } - Event::GetBlockHashResult(block_hash, fetch_result) => { - match fetch_result { - BlockByHashResult::Absent(peer) => { - self.metrics.observe_get_block_by_hash(); - trace!(%block_hash, %peer, "failed to download block by hash. Trying next peer"); - self.peers.failure(&peer); - match self.peers.random() { - None => { - error!(%block_hash, "Could not download linear block from any of the peers."); - panic!("Failed to download linear chain.") - } - Some(peer) => { - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, peer, block_hash) - } - } - } - BlockByHashResult::FromStorage(block) => { - // We shouldn't get invalid data from the storage. - // If we do, it's a bug. - assert_eq!(*block.hash(), block_hash, "Block hash mismatch."); - trace!(%block_hash, "linear block found in the local storage."); - self.block_downloaded(rng, effect_builder, &*block) - } - BlockByHashResult::FromPeer(block, peer) => { - self.metrics.observe_get_block_by_hash(); - trace!(%block_hash, %peer, "linear chain block downloaded from a peer"); - let header_hash = block.header().hash(); - if header_hash != block_hash || header_hash != *block.hash() { - warn!( - "Block hash mismatch. Expected {} got {} from {}.\ - Block claims to have hash {}. Disconnecting.", - block_hash, - header_hash, - block.hash(), - peer - ); - // NOTE: Signal misbehaving validator to networking layer. - self.peers.ban(&peer); - return self.handle_event( - effect_builder, - rng, - Event::GetBlockHashResult( - block_hash, - BlockByHashResult::Absent(peer), - ), - ); - } - self.peers.success(peer); - self.block_downloaded(rng, effect_builder, &*block) - } - } - } - Event::GetDeploysResult(fetch_result) => { - self.metrics.observe_get_deploys(); - match fetch_result { - event::DeploysResult::Found(block) => { - let block_hash = block.hash(); - trace!(%block_hash, "deploys for linear chain block found"); - // Reset used peers so we can download next block with the full set. - self.peers.reset(rng); - // Execute block - let finalized_block: FinalizedBlock = (*block).into(); - effect_builder.execute_block(finalized_block).ignore() - } - event::DeploysResult::NotFound(block, peer) => { - let block_hash = block.hash(); - trace!(%block_hash, %peer, "deploy for linear chain block not found. Trying next peer"); - self.peers.failure(&peer); - match self.peers.random() { - None => { - error!(%block_hash, - "could not download deploys from linear chain block."); - panic!("Failed to download linear chain deploys.") - } - Some(peer) => { - self.metrics.reset_start_time(); - fetch_block_deploys(effect_builder, peer, *block) - } - } - } - } - } - Event::StartDownloadingDeploys => { - // Start downloading deploys from the first block of the linear chain. - self.peers.reset(rng); - self.fetch_next_block_deploys(effect_builder) - } - Event::NewPeerConnected(peer_id) => { - trace!(%peer_id, "new peer connected"); - // Add to the set of peers we can request things from. - let mut effects = Effects::new(); - if self.peers.is_empty() { - // First peer connected, start downloading. - let cloned_peer_id = peer_id.clone(); - effects.extend( - effect_builder - .immediately() - .event(move |_| Event::Start(cloned_peer_id)), - ); - } - self.peers.push(peer_id); - effects - } - Event::BlockHandled(block) => { - let block_height = block.height(); - let block_hash = *block.hash(); - let effects = self.block_handled(rng, effect_builder, *block); - trace!(%block_height, %block_hash, "block handled"); - effects - } - Event::GotUpgradeActivationPoint(next_upgrade_activation_point) => { - debug!( - ?next_upgrade_activation_point, - "new activation point ignored" - ); - Effects::new() - } - } - } -} - -fn fetch_block_deploys( - effect_builder: EffectBuilder, - peer: I, - block: Block, -) -> Effects> -where - REv: ReactorEventT, -{ - let block_timestamp = block.header().timestamp(); - effect_builder - .validate_block(peer.clone(), block, block_timestamp) - .event(move |(found, block)| { - if found { - Event::GetDeploysResult(DeploysResult::Found(Box::new(block))) - } else { - Event::GetDeploysResult(DeploysResult::NotFound(Box::new(block), peer)) - } - }) -} - -fn fetch_block_by_hash( - effect_builder: EffectBuilder, - peer: I, - block_hash: BlockHash, -) -> Effects> -where - REv: ReactorEventT, -{ - let cloned = peer.clone(); - effect_builder.fetch_block(block_hash, peer).map_or_else( - move |fetch_result| match fetch_result { - FetchResult::FromStorage(block) => { - Event::GetBlockHashResult(block_hash, BlockByHashResult::FromStorage(block)) - } - FetchResult::FromPeer(block, peer) => { - Event::GetBlockHashResult(block_hash, BlockByHashResult::FromPeer(block, peer)) - } - }, - move || Event::GetBlockHashResult(block_hash, BlockByHashResult::Absent(cloned)), - ) -} - -fn fetch_block_at_height( - effect_builder: EffectBuilder, - peer: I, - block_height: u64, -) -> Effects> -where - REv: ReactorEventT, -{ - let cloned = peer.clone(); - effect_builder - .fetch_block_by_height(block_height, peer.clone()) - .map_or_else( - move |fetch_result| match fetch_result { - FetchResult::FromPeer(result, _) => match *result { - BlockByHeight::Absent(ret_height) => { - warn!( - "Fetcher returned result for invalid height. Expected {}, got {}", - block_height, ret_height - ); - Event::GetBlockHeightResult(block_height, BlockByHeightResult::Absent(peer)) - } - BlockByHeight::Block(block) => Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::FromPeer(block, peer), - ), - }, - FetchResult::FromStorage(result) => match *result { - BlockByHeight::Absent(_) => { - // Fetcher should try downloading the block from a peer - // when it can't find it in the storage. - panic!("Should not return `Absent` in `FromStorage`.") - } - BlockByHeight::Block(block) => Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::FromStorage(block), - ), - }, - }, - move || Event::GetBlockHeightResult(block_height, BlockByHeightResult::Absent(cloned)), - ) -} diff --git a/node/src/components/linear_chain_fast_sync/event.rs b/node/src/components/linear_chain_fast_sync/event.rs deleted file mode 100644 index d32cfd6150..0000000000 --- a/node/src/components/linear_chain_fast_sync/event.rs +++ /dev/null @@ -1,69 +0,0 @@ -use crate::types::{ActivationPoint, Block, BlockHash}; - -use std::fmt::{Debug, Display}; - -#[derive(Debug)] -pub enum Event { - Start(I), - GetBlockHashResult(BlockHash, BlockByHashResult), - GetBlockHeightResult(u64, BlockByHeightResult), - GetDeploysResult(DeploysResult), - StartDownloadingDeploys, - NewPeerConnected(I), - BlockHandled(Box), - GotUpgradeActivationPoint(ActivationPoint), -} - -#[derive(Debug)] -pub enum DeploysResult { - Found(Box), - NotFound(Box, I), -} - -#[derive(Debug)] -pub enum BlockByHashResult { - Absent(I), - FromStorage(Box), - FromPeer(Box, I), -} - -#[derive(Debug)] -pub enum BlockByHeightResult { - Absent(I), - FromStorage(Box), - FromPeer(Box, I), -} - -impl Display for Event -where - I: Debug + Display, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Event::Start(init_peer) => write!(f, "Start syncing from peer {}.", init_peer), - Event::GetBlockHashResult(block_hash, r) => { - write!(f, "Get block result for {}: {:?}", block_hash, r) - } - Event::GetDeploysResult(result) => { - write!(f, "Get deploys for block result {:?}", result) - } - Event::StartDownloadingDeploys => write!(f, "Start downloading deploys event."), - Event::NewPeerConnected(peer_id) => write!(f, "A new peer connected: {}", peer_id), - Event::BlockHandled(block) => { - let hash = block.hash(); - let height = block.height(); - write!( - f, - "Block has been handled by consensus. Hash {}, height {}", - hash, height - ) - } - Event::GetBlockHeightResult(height, res) => { - write!(f, "Get block result for height {}: {:?}", height, res) - } - Event::GotUpgradeActivationPoint(activation_point) => { - write!(f, "new upgrade activation point: {:?}", activation_point) - } - } - } -} diff --git a/node/src/components/linear_chain_fast_sync/metrics.rs b/node/src/components/linear_chain_fast_sync/metrics.rs deleted file mode 100644 index 9108b6abbc..0000000000 --- a/node/src/components/linear_chain_fast_sync/metrics.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::time::Instant; - -use prometheus::{Histogram, HistogramOpts, Registry}; - -#[derive(Debug)] -pub struct LinearChainSyncMetrics { - get_block_by_hash: Histogram, - get_block_by_height: Histogram, - get_deploys: Histogram, - request_start: Instant, -} - -const GET_BLOCK_BY_HASH: &str = "linear_chain_sync_get_block_by_hash"; -const GET_BLOCK_BY_HASH_HELP: &str = "histogram of linear_chain_sync get_block_by_hash request"; -const GET_BLOCK_BY_HEIGHT: &str = "linear_chain_sync_get_block_by_height"; -const GET_BLOCK_BY_HEIGHT_HELP: &str = "histogram of linear_chain_sync get_block_by_height request"; -const GET_DEPLOYS: &str = "linear_chain_sync_get_deploys"; -const GET_DEPLOYS_HELP: &str = "histogram of linear_chain_sync get_deploys request"; - -/// Value of upper bound of histogram. -const EXPONENTIAL_BUCKET_START: f64 = 0.01; -/// Multiplier of previous upper bound for next bound. -const EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0; -/// Bucket count, with last going to +Inf. -const EXPONENTIAL_BUCKET_COUNT: usize = 6; - -/// Create prometheus Histogram and register. -fn register_histogram_metric( - registry: &Registry, - metric_name: &str, - metric_help: &str, -) -> Result { - let common_buckets = prometheus::exponential_buckets( - EXPONENTIAL_BUCKET_START, - EXPONENTIAL_BUCKET_FACTOR, - EXPONENTIAL_BUCKET_COUNT, - )?; - let histogram_opts = HistogramOpts::new(metric_name, metric_help).buckets(common_buckets); - let histogram = Histogram::with_opts(histogram_opts)?; - registry.register(Box::new(histogram.clone()))?; - Ok(histogram) -} - -impl LinearChainSyncMetrics { - pub fn new(registry: &Registry) -> Result { - Ok(LinearChainSyncMetrics { - get_block_by_hash: register_histogram_metric( - registry, - GET_BLOCK_BY_HASH, - GET_BLOCK_BY_HASH_HELP, - )?, - get_block_by_height: register_histogram_metric( - registry, - GET_BLOCK_BY_HEIGHT, - GET_BLOCK_BY_HEIGHT_HELP, - )?, - get_deploys: register_histogram_metric(registry, GET_DEPLOYS, GET_DEPLOYS_HELP)?, - request_start: Instant::now(), - }) - } - - pub fn reset_start_time(&mut self) { - self.request_start = Instant::now(); - } - - pub fn observe_get_block_by_hash(&mut self) { - self.get_block_by_hash - .observe(self.request_start.elapsed().as_secs_f64()); - } - - pub fn observe_get_block_by_height(&mut self) { - self.get_block_by_height - .observe(self.request_start.elapsed().as_secs_f64()); - } - - pub fn observe_get_deploys(&mut self) { - self.get_deploys - .observe(self.request_start.elapsed().as_secs_f64()); - } -} diff --git a/node/src/components/linear_chain_fast_sync/peers.rs b/node/src/components/linear_chain_fast_sync/peers.rs deleted file mode 100644 index e0041b9ed0..0000000000 --- a/node/src/components/linear_chain_fast_sync/peers.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::collections::VecDeque; - -use datasize::DataSize; -use rand::{seq::SliceRandom, Rng}; - -#[derive(DataSize, Debug)] -pub struct PeersState { - // Set of peers that we can request blocks from. - peers: Vec, - // Peers we have not yet requested current block from. - // NOTE: Maybe use a bitmask to decide which peers were tried? - peers_to_try: Vec, - // Peers we successfuly downloaded data from previously. - // Have higher chance of having the next data. - succ_peers: VecDeque, - succ_attempts: u8, - succ_attempts_max: u8, -} - -impl PeersState { - pub fn new() -> Self { - PeersState { - peers: Default::default(), - peers_to_try: Default::default(), - succ_peers: Default::default(), - succ_attempts: 0, - succ_attempts_max: 5, - } - } - - /// Resets `peers_to_try` back to all `peers` we know of. - pub(crate) fn reset(&mut self, rng: &mut R) { - self.peers_to_try = self.peers.clone(); - self.peers_to_try.as_mut_slice().shuffle(rng); - } - - /// Returns a random peer. - pub(crate) fn random(&mut self) -> Option { - if self.succ_attempts < self.succ_attempts_max { - self.next_succ().or_else(|| self.peers_to_try.pop()) - } else { - self.succ_attempts = 0; - self.peers_to_try.pop().or_else(|| self.next_succ()) - } - } - - /// Unsafe version of `random_peer`. - /// Panics if no peer is available for querying. - pub(crate) fn random_unsafe(&mut self) -> I { - self.random().expect("At least one peer available.") - } - - /// Peer misbehaved (returned us invalid data). - /// Remove it from the set of nodes we request data from. - pub(crate) fn ban(&mut self, peer: &I) { - self.peers.retain(|p| p != peer); - self.succ_peers.retain(|p| p != peer); - } - - /// Returns whether known peer set is empty. - pub(crate) fn is_empty(&self) -> bool { - self.peers.is_empty() - } - - /// Adds a new peer. - pub(crate) fn push(&mut self, peer: I) { - self.peers.push(peer) - } - - /// Returns the next peer, if any, that we downloaded data the previous time. - /// Keeps the peer in the set of `succ_peers`. - fn next_succ(&mut self) -> Option { - let peer = self.succ_peers.pop_front()?; - self.succ_peers.push_back(peer.clone()); - Some(peer) - } - - /// Peer didn't respond or didn't have the data we asked for. - pub(crate) fn failure(&mut self, peer: &I) { - self.succ_peers.retain(|id| id != peer); - } - - /// Peer had the data we asked for. - pub(crate) fn success(&mut self, peer: I) { - self.succ_attempts += 1; - self.succ_peers.push_back(peer); - } -} diff --git a/node/src/components/linear_chain_fast_sync/state.rs b/node/src/components/linear_chain_fast_sync/state.rs deleted file mode 100644 index 956627e46e..0000000000 --- a/node/src/components/linear_chain_fast_sync/state.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::{collections::BTreeMap, fmt::Display}; - -use datasize::DataSize; - -use crate::types::{Block, BlockHash, BlockHeader}; -use casper_types::{PublicKey, U512}; - -#[derive(DataSize, Debug)] -pub enum State { - /// No syncing of the linear chain configured. - None, - /// Synchronizing the linear chain up until trusted hash. - SyncingTrustedHash { - /// Linear chain block to start sync from. - trusted_hash: BlockHash, - /// Block header corresponding to the trusted hash - trusted_header: Option>, - /// During synchronization we might see new eras being created. - /// Track the highest height and wait until it's handled by consensus. - highest_block_seen: u64, - /// Chain of downloaded blocks from the linear chain. - /// We will `pop()` when executing blocks. - linear_chain: Vec, - /// The most recent block we started to execute. This is updated whenever we start - /// downloading deploys for the next block to be executed. - latest_block: Box>, - /// The weights of the validators for latest block being added. - validator_weights: BTreeMap, - }, - /// Synchronizing the descendants of the trusted hash. - SyncingDescendants { - trusted_hash: BlockHash, - /// Block header corresponding to the trusted hash - trusted_header: Box, - /// The most recent block we started to execute. This is updated whenever we start - /// downloading deploys for the next block to be executed. - latest_block: Box, - /// During synchronization we might see new eras being created. - /// Track the highest height and wait until it's handled by consensus. - highest_block_seen: u64, - /// The validator set for the most recent block being synchronized. - validators_for_latest_block: BTreeMap, - }, - /// Synchronizing done. - Done, -} - -impl Display for State { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - State::None => write!(f, "None"), - State::SyncingTrustedHash { trusted_hash, .. } => { - write!(f, "SyncingTrustedHash(trusted_hash: {:?})", trusted_hash) - } - State::SyncingDescendants { - highest_block_seen, .. - } => write!( - f, - "SyncingDescendants(highest_block_seen: {})", - highest_block_seen - ), - State::Done => write!(f, "Done"), - } - } -} - -impl State { - pub fn sync_trusted_hash( - trusted_hash: BlockHash, - validator_weights: BTreeMap, - ) -> Self { - State::SyncingTrustedHash { - trusted_hash, - highest_block_seen: 0, - linear_chain: Vec::new(), - latest_block: Box::new(None), - validator_weights, - trusted_header: None, - } - } - - pub fn sync_descendants( - trusted_hash: BlockHash, - trusted_header: Box, - latest_block: Block, - validators_for_latest_block: BTreeMap, - ) -> Self { - State::SyncingDescendants { - trusted_hash, - trusted_header, - latest_block: Box::new(latest_block), - highest_block_seen: 0, - validators_for_latest_block, - } - } - - pub fn block_downloaded(&mut self, block: &BlockHeader) { - match self { - State::None | State::Done => {} - State::SyncingTrustedHash { - highest_block_seen, .. - } - | State::SyncingDescendants { - highest_block_seen, .. - } => { - let curr_height = block.height(); - if curr_height > *highest_block_seen { - *highest_block_seen = curr_height; - } - } - }; - } -} diff --git a/node/src/components/linear_chain_fast_sync/traits.rs b/node/src/components/linear_chain_fast_sync/traits.rs deleted file mode 100644 index e072695ad3..0000000000 --- a/node/src/components/linear_chain_fast_sync/traits.rs +++ /dev/null @@ -1,25 +0,0 @@ -use crate::{ - effect::requests::{ - BlockValidationRequest, ContractRuntimeRequest, FetcherRequest, StorageRequest, - }, - types::{Block, BlockByHeight}, -}; -pub trait ReactorEventT: - From - + From> - + From> - + From> - + From - + Send -{ -} - -impl ReactorEventT for REv where - REv: From - + From> - + From> - + From> - + From - + Send -{ -} diff --git a/node/src/components/linear_chain_sync.rs b/node/src/components/linear_chain_sync.rs deleted file mode 100644 index c2defa2983..0000000000 --- a/node/src/components/linear_chain_sync.rs +++ /dev/null @@ -1,949 +0,0 @@ -//! Linear chain synchronizer. -//! -//! Synchronizes the linear chain when node joins the network. -//! -//! Steps are: -//! 1. Fetch blocks up to initial, trusted hash (blocks are downloaded starting from trusted hash up -//! until Genesis). -//! 2. Fetch deploys of the lowest height block. -//! 3. Execute that block. -//! 4. Repeat steps 2-3 until trusted hash is reached. -//! 5. Transition to `SyncingDescendants` state. -//! 6. Fetch child block of highest block. -//! 7. Fetch deploys of that block. -//! 8. Execute that block. -//! 9. Repeat steps 6-8 as long as there's a child in the linear chain. -//! -//! The order of "download block – download deploys – execute" block steps differ, -//! in order to increase the chances of catching up with the linear chain quicker. -//! When synchronizing linear chain up to the trusted hash we cannot execute later blocks without -//! earlier ones. When we're syncing descendants, on the other hand, we can and we want to do it -//! ASAP so that we can start participating in consensus. That's why deploy fetching and block -//! execution is interleaved. If we had downloaded the whole chain, and then deploys, and then -//! execute (as we do in the first, SynchronizeTrustedHash, phase) it would have taken more time and -//! we might miss more eras. - -mod event; -mod metrics; -mod peers; -mod state; -mod traits; - -use std::{collections::BTreeMap, convert::Infallible, fmt::Display, mem, str::FromStr}; - -use datasize::DataSize; -use prometheus::Registry; -use tracing::{error, info, trace, warn}; - -use self::event::{BlockByHashResult, DeploysResult}; -use casper_types::{EraId, ProtocolVersion, PublicKey, U512}; - -use super::{ - fetcher::FetchResult, - storage::{self, Storage}, - Component, -}; -use crate::{ - effect::{EffectBuilder, EffectExt, EffectOptionExt, Effects}, - fatal, - types::{ - ActivationPoint, Block, BlockByHeight, BlockHash, Chainspec, FinalizedBlock, TimeDiff, - }, - NodeRng, -}; -use event::BlockByHeightResult; -pub use event::Event; -pub use metrics::LinearChainSyncMetrics; -pub use peers::PeersState; -pub use state::State; -pub use traits::ReactorEventT; - -#[derive(DataSize, Debug)] -pub(crate) struct LinearChainSync { - peers: PeersState, - state: State, - #[data_size(skip)] - metrics: LinearChainSyncMetrics, - /// The next upgrade activation point. - /// When we download the switch block of an era immediately before the activation point, - /// we need to shut down for an upgrade. - next_upgrade_activation_point: Option, - stop_for_upgrade: bool, - /// Key for storing the linear chain sync state. - state_key: Vec, - /// Acceptable drift between the block creation and now. - /// If less than than this has passed we will consider syncing as finished. - acceptable_drift: TimeDiff, - /// Shortest era that is allowed with the given protocol configuration. - shortest_era: TimeDiff, - /// Flag indicating whether we managed to sync at least one block. - started_syncing: bool, - /// The protocol version the node is currently running with. - protocol_version: ProtocolVersion, -} - -impl LinearChainSync { - // TODO: fix this - #[allow(clippy::too_many_arguments)] - pub fn new( - registry: &Registry, - effect_builder: EffectBuilder, - chainspec: &Chainspec, - storage: &Storage, - init_hash: Option, - highest_block: Option, - _genesis_validator_weights: BTreeMap, - next_upgrade_activation_point: Option, - ) -> Result<(Self, Effects>), Err> - where - REv: From> + Send, - Err: From + From, - { - // set timeout to 5 minutes after now. - let five_minutes = TimeDiff::from_str("5minutes").unwrap(); - let timeout_event = effect_builder - .set_timeout(five_minutes.into()) - .event(|_| Event::InitializeTimeout); - let protocol_version = chainspec.protocol_config.version; - if let Some(state) = read_init_state(storage, chainspec)? { - let linear_chain_sync = LinearChainSync::from_state( - registry, - chainspec, - state, - next_upgrade_activation_point, - protocol_version, - )?; - Ok((linear_chain_sync, timeout_event)) - } else { - let acceptable_drift = chainspec.highway_config.max_round_length(); - // Shortest era is the maximum of the two. - let shortest_era: TimeDiff = std::cmp::max( - chainspec.highway_config.min_round_length() - * chainspec.core_config.minimum_era_height, - chainspec.core_config.era_duration, - ); - let state = match init_hash { - Some(init_hash) => State::sync_trusted_hash( - init_hash, - highest_block.map(|block| block.take_header()), - ), - None => State::Done(highest_block.map(Box::new)), - }; - let state_key = create_state_key(&chainspec); - let linear_chain_sync = LinearChainSync { - peers: PeersState::new(), - state, - metrics: LinearChainSyncMetrics::new(registry)?, - next_upgrade_activation_point, - stop_for_upgrade: false, - state_key, - acceptable_drift, - shortest_era, - started_syncing: false, - protocol_version, - }; - Ok((linear_chain_sync, timeout_event)) - } - } - - /// Initialize `LinearChainSync` component from preloaded `State`. - fn from_state( - registry: &Registry, - chainspec: &Chainspec, - state: State, - next_upgrade_activation_point: Option, - protocol_version: ProtocolVersion, - ) -> Result { - let state_key = create_state_key(chainspec); - info!(?state, "reusing previous state"); - let acceptable_drift = chainspec.highway_config.max_round_length(); - // Shortest era is the maximum of the two. - let shortest_era: TimeDiff = std::cmp::max( - chainspec.highway_config.min_round_length() * chainspec.core_config.minimum_era_height, - chainspec.core_config.era_duration, - ); - Ok(LinearChainSync { - peers: PeersState::new(), - state, - metrics: LinearChainSyncMetrics::new(registry)?, - next_upgrade_activation_point, - stop_for_upgrade: false, - state_key, - acceptable_drift, - shortest_era, - started_syncing: false, - protocol_version, - }) - } - - /// Add new block to linear chain. - fn add_block(&mut self, block: Block) { - self.started_syncing = true; - match &mut self.state { - State::None | State::Done(_) => {} - State::SyncingTrustedHash { linear_chain, .. } => linear_chain.push(block), - State::SyncingDescendants { latest_block, .. } => **latest_block = block, - }; - } - - /// Returns `true` if we have finished syncing linear chain. - pub fn is_synced(&self) -> bool { - matches!(self.state, State::Done(_)) - } - - /// Returns `true` if we should stop for upgrade. - pub fn stopped_for_upgrade(&self) -> bool { - self.stop_for_upgrade - } - - fn block_downloaded( - &mut self, - rng: &mut NodeRng, - effect_builder: EffectBuilder, - block: &Block, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - self.peers.reset(rng); - self.state.block_downloaded(block); - self.add_block(block.clone()); - match &self.state { - State::None | State::Done(_) => { - error!(state=?self.state, "block downloaded when in incorrect state."); - fatal!(effect_builder, "block downloaded in incorrect state").ignore() - } - State::SyncingTrustedHash { - highest_block_header, - .. - } => { - let should_start_downloading_deploys = highest_block_header - .as_ref() - .map(|hdr| hdr.hash() == *block.header().parent_hash()) - .unwrap_or(false) - || block.header().is_genesis_child(); - if should_start_downloading_deploys { - info!("linear chain downloaded. Start downloading deploys."); - effect_builder - .immediately() - .event(move |_| Event::StartDownloadingDeploys) - } else { - self.fetch_next_block(effect_builder, rng, block) - } - } - State::SyncingDescendants { .. } => { - // When synchronizing descendants, we want to download block and execute it - // before trying to download the next block in linear chain. - self.fetch_next_block_deploys(effect_builder) - } - } - } - - fn mark_done(&mut self, latest_block: Option) { - let latest_block = latest_block.map(Box::new); - self.state = State::Done(latest_block); - } - - /// Handles an event indicating that a linear chain block has been executed and handled by - /// consensus component. This is a signal that we can safely continue with the next blocks, - /// without worrying about timing and/or ordering issues. - /// Returns effects that are created as a response to that event. - fn block_handled( - &mut self, - rng: &mut NodeRng, - effect_builder: EffectBuilder, - block: Block, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - let height = block.height(); - let hash = block.hash(); - trace!(%hash, %height, "downloaded linear chain block."); - if block.header().is_switch_block() { - self.state.new_switch_block(&block); - } - if block.header().is_switch_block() && self.should_upgrade(block.header().era_id()) { - info!( - era = block.header().era_id().value(), - "shutting down for upgrade" - ); - return effect_builder - .immediately() - .event(|_| Event::InitUpgradeShutdown); - } - // Reset peers before creating new requests. - self.peers.reset(rng); - let block_height = block.height(); - let curr_state = mem::replace(&mut self.state, State::None); - match curr_state { - State::None | State::Done(_) => { - error!(state=?self.state, "block handled when in incorrect state."); - fatal!(effect_builder, "block handled in incorrect state").ignore() - } - // Keep syncing from genesis if we haven't reached the trusted block hash - State::SyncingTrustedHash { - highest_block_seen, - ref latest_block, - .. - } if highest_block_seen != block_height => { - match latest_block.as_ref() { - Some(expected) if expected != &block => { - error!( - ?expected, got=?block, - "block execution result doesn't match received block" - ); - return fatal!(effect_builder, "unexpected block execution result") - .ignore(); - } - None => { - error!("block execution results received when not expected"); - return fatal!(effect_builder, "unexpected block execution results.") - .ignore(); - } - Some(_) => (), - } - self.state = curr_state; - self.fetch_next_block_deploys(effect_builder) - } - // Otherwise transition to State::SyncingDescendants - State::SyncingTrustedHash { - highest_block_seen, - trusted_hash, - ref latest_block, - maybe_switch_block, - .. - } => { - assert_eq!(highest_block_seen, block_height); - match latest_block.as_ref() { - Some(expected) if expected != &block => { - error!( - ?expected, got=?block, - "block execution result doesn't match received block" - ); - return fatal!(effect_builder, "unexpected block execution result") - .ignore(); - } - None => { - error!("block execution results received when not expected"); - return fatal!(effect_builder, "unexpected block execution results.") - .ignore(); - } - Some(_) => (), - } - info!(%block_height, "Finished synchronizing linear chain up until trusted hash."); - let peer = self.peers.random_unsafe(); - // Kick off syncing trusted hash descendants. - self.state = State::sync_descendants(trusted_hash, block, maybe_switch_block); - fetch_block_at_height(effect_builder, peer, block_height + 1) - } - State::SyncingDescendants { - ref latest_block, - ref maybe_switch_block, - .. - } => { - if latest_block.as_ref() != &block { - error!( - expected=?*latest_block, got=?block, - "block execution result doesn't match received block" - ); - return fatal!(effect_builder, "unexpected block execution result").ignore(); - } - if self.is_recent_block(&block) { - info!( - hash=?block.hash(), - height=?block.header().height(), - era=block.header().era_id().value(), - "downloaded recent block. finished synchronization" - ); - self.mark_done(Some(*latest_block.clone())); - return Effects::new(); - } - if self.is_currently_active_era(&maybe_switch_block) { - info!( - hash=?block.hash(), - height=?block.header().height(), - era=block.header().era_id().value(), - "downloaded switch block of a new era. finished synchronization" - ); - self.mark_done(Some(*latest_block.clone())); - return Effects::new(); - } - self.state = curr_state; - self.fetch_next_block(effect_builder, rng, &block) - } - } - } - - // Returns whether `block` can be considered the tip of the chain. - fn is_recent_block(&self, block: &Block) -> bool { - // Check if block was created "recently". - block.header().timestamp().elapsed() <= self.acceptable_drift - } - - // Returns whether we've just downloaded a switch block of a currently active era. - fn is_currently_active_era(&self, maybe_switch_block: &Option>) -> bool { - match maybe_switch_block { - Some(switch_block) => switch_block.header().timestamp().elapsed() < self.shortest_era, - None => false, - } - } - - /// Returns effects for fetching next block's deploys. - fn fetch_next_block_deploys( - &mut self, - effect_builder: EffectBuilder, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - let peer = self.peers.random_unsafe(); - - let next_block = match &mut self.state { - State::None | State::Done(_) => { - error!(state=?self.state, "tried fetching next block when in wrong state"); - return fatal!( - effect_builder, - "tried fetching next block when in wrong state" - ) - .ignore(); - } - State::SyncingTrustedHash { - linear_chain, - latest_block, - .. - } => match linear_chain.pop() { - None => None, - Some(block) => { - // Update `latest_block` so that we can verify whether result of execution - // matches the expected value. - latest_block.replace(block.clone()); - Some(block) - } - }, - State::SyncingDescendants { latest_block, .. } => Some((**latest_block).clone()), - }; - - next_block.map_or_else( - || { - warn!("tried fetching next block deploys when there was no block."); - Effects::new() - }, - |block| { - self.metrics.reset_start_time(); - fetch_block_deploys(effect_builder, peer, block) - }, - ) - } - - fn fetch_next_block( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - block: &Block, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - self.peers.reset(rng); - let peer = self.peers.random_unsafe(); - match self.state { - State::SyncingTrustedHash { .. } => { - let parent_hash = *block.header().parent_hash(); - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, peer, parent_hash) - } - State::SyncingDescendants { .. } => { - let next_height = block.height() + 1; - self.metrics.reset_start_time(); - fetch_block_at_height(effect_builder, peer, next_height) - } - State::Done(_) | State::None => { - error!(state=?self.state, "tried fetching next block when in wrong state"); - fatal!( - effect_builder, - "tried fetching next block when in wrong state" - ) - .ignore() - } - } - } - - fn handle_upgrade_shutdown( - &mut self, - effect_builder: EffectBuilder, - ) -> Effects> - where - I: Send + 'static, - REv: ReactorEventT, - { - if self.state.is_done() || self.state.is_none() { - error!(state=?self.state, "shutdown for upgrade initiated when in wrong state"); - return fatal!( - effect_builder, - "shutdown for upgrade initiated when in wrong state" - ) - .ignore(); - } - effect_builder - .save_state(self.state_key.clone().into(), Some(self.state.clone())) - .event(|_| Event::Shutdown(true)) - } - - pub(crate) fn latest_block(&self) -> Option<&Block> { - match &self.state { - State::SyncingTrustedHash { latest_block, .. } => Option::as_ref(&*latest_block), - State::SyncingDescendants { latest_block, .. } => Some(&*latest_block), - State::Done(latest_block) => latest_block.as_deref(), - State::None => None, - } - } - - fn should_upgrade(&self, era_id: EraId) -> bool { - match self.next_upgrade_activation_point { - None => false, - Some(activation_point) => activation_point.should_upgrade(&era_id), - } - } - - fn set_last_block_if_syncing_trusted_hash(&mut self, block: &Block) { - if let State::SyncingTrustedHash { - ref mut latest_block, - .. - } = &mut self.state - { - *latest_block = Box::new(Some(block.clone())); - } - self.state.block_downloaded(block); - } -} - -impl Component for LinearChainSync -where - I: Display + Clone + Send + PartialEq + 'static, - REv: ReactorEventT, -{ - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::Start(init_peer) => { - match &self.state { - State::None => { - // No syncing configured. - trace!("received `Start` event when in {} state.", self.state); - Effects::new() - } - State::Done(_) => { - // Illegal states for syncing start. - error!("should not have received `Start` event when in `Done` state.",); - Effects::new() - } - State::SyncingDescendants { latest_block, .. } => { - let next_block_height = latest_block.height() + 1; - info!(?next_block_height, "start synchronization"); - self.metrics.reset_start_time(); - fetch_block_at_height(effect_builder, init_peer, next_block_height) - } - State::SyncingTrustedHash { trusted_hash, .. } => { - trace!(?trusted_hash, "start synchronization"); - // Start synchronization. - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, init_peer, *trusted_hash) - } - } - } - Event::GetBlockHeightResult(block_height, fetch_result) => { - match fetch_result { - BlockByHeightResult::Absent(peer) => { - self.metrics.observe_get_block_by_height(); - trace!( - %block_height, %peer, - "failed to download block by height. Trying next peer" - ); - self.peers.failure(&peer); - match self.peers.random() { - None => { - // `block_height` not found on any of the peers. - // We have synchronized all, currently existing, descendants of - // trusted hash. - info!( - "finished synchronizing descendants of the trusted hash. \ - cleaning state." - ); - self.mark_done(self.latest_block().cloned()); - Effects::new() - } - Some(peer) => { - self.metrics.reset_start_time(); - fetch_block_at_height(effect_builder, peer, block_height) - } - } - } - BlockByHeightResult::FromStorage(block) => { - // We shouldn't get invalid data from the storage. - // If we do, it's a bug. - assert_eq!(block.height(), block_height, "Block height mismatch."); - assert_eq!( - block.protocol_version(), - self.protocol_version, - "block protocol version mismatch" - ); - trace!(%block_height, "Linear block found in the local storage."); - // When syncing descendants of a trusted hash, we might have some of - // them in our local storage. If that's the case, just continue. - self.block_downloaded(rng, effect_builder, &block) - } - BlockByHeightResult::FromPeer(block, peer) => { - self.metrics.observe_get_block_by_height(); - trace!(%block_height, %peer, "linear chain block downloaded from a peer"); - if block.height() != block_height - || *block.header().parent_hash() != *self.latest_block().unwrap().hash() - { - warn!( - %peer, - got_height = block.height(), - expected_height = block_height, - got_parent = %block.header().parent_hash(), - expected_parent = %self.latest_block().unwrap().hash(), - "block mismatch", - ); - // NOTE: Signal misbehaving validator to networking layer. - self.peers.ban(&peer); - return self.handle_event( - effect_builder, - rng, - Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::Absent(peer), - ), - ); - } - if block.protocol_version() != self.protocol_version { - warn!( - %peer, - protocol_version = %self.protocol_version, - block_version = %block.protocol_version(), - "block protocol version mismatch", - ); - // NOTE: Signal misbehaving validator to networking layer. - self.peers.ban(&peer); - return self.handle_event( - effect_builder, - rng, - Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::Absent(peer), - ), - ); - } - self.peers.success(peer); - self.block_downloaded(rng, effect_builder, &block) - } - } - } - Event::GetBlockHashResult(block_hash, fetch_result) => { - match fetch_result { - BlockByHashResult::Absent(peer) => { - self.metrics.observe_get_block_by_hash(); - trace!( - %block_hash, %peer, - "failed to download block by hash. Trying next peer" - ); - self.peers.failure(&peer); - match self.peers.random() { - None if self.started_syncing => { - error!( - %block_hash, - "could not download linear block from any of the peers." - ); - fatal!(effect_builder, "failed to synchronize linear chain") - .ignore() - } - None => { - warn!( - "run out of peers before managed to start syncing. \ - Resetting peers' list and continuing" - ); - self.peers.reset(rng); - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, peer, block_hash) - } - Some(peer) => { - self.metrics.reset_start_time(); - fetch_block_by_hash(effect_builder, peer, block_hash) - } - } - } - BlockByHashResult::FromStorage(block) => { - // We shouldn't get invalid data from the storage. - // If we do, it's a bug. - assert_eq!(*block.hash(), block_hash, "Block hash mismatch."); - trace!(%block_hash, "Linear block found in the local storage."); - // We hit a block that we already had in the storage - which should mean - // that we also have all of its ancestors, so we switch to traversing the - // chain forwards and downloading the deploys. - // We don't want to download and execute a block we already have, so - // instead of calling self.block_downloaded(), we take a shortcut: - self.set_last_block_if_syncing_trusted_hash(&block); - self.block_handled(rng, effect_builder, *block) - } - BlockByHashResult::FromPeer(block, peer) => { - self.metrics.observe_get_block_by_hash(); - trace!(%block_hash, %peer, "linear chain block downloaded from a peer"); - let header_hash = block.header().hash(); - if header_hash != block_hash || header_hash != *block.hash() { - warn!( - "Block hash mismatch. Expected {} got {} from {}.\ - Block claims to have hash {}. Disconnecting.", - block_hash, - header_hash, - block.hash(), - peer - ); - // NOTE: Signal misbehaving validator to networking layer. - self.peers.ban(&peer); - return self.handle_event( - effect_builder, - rng, - Event::GetBlockHashResult( - block_hash, - BlockByHashResult::Absent(peer), - ), - ); - } - self.peers.success(peer); - self.block_downloaded(rng, effect_builder, &block) - } - } - } - Event::GetDeploysResult(fetch_result) => { - self.metrics.observe_get_deploys(); - match fetch_result { - event::DeploysResult::Found(block) => { - let block_hash = block.hash(); - trace!(%block_hash, "deploys for linear chain block found"); - // Reset used peers so we can download next block with the full set. - self.peers.reset(rng); - // Execute block - let finalized_block: FinalizedBlock = (*block).into(); - effect_builder.execute_block(finalized_block).ignore() - } - event::DeploysResult::NotFound(block, peer) => { - let block_hash = block.hash(); - trace!( - %block_hash, %peer, - "deploy for linear chain block not found. Trying next peer" - ); - self.peers.failure(&peer); - match self.peers.random() { - None => { - error!( - %block_hash, - "could not download deploys from linear chain block." - ); - fatal!(effect_builder, "failed to download linear chain deploys") - .ignore() - } - Some(peer) => { - self.metrics.reset_start_time(); - fetch_block_deploys(effect_builder, peer, *block) - } - } - } - } - } - Event::StartDownloadingDeploys => { - // Start downloading deploys from the first block of the linear chain. - self.peers.reset(rng); - self.fetch_next_block_deploys(effect_builder) - } - Event::NewPeerConnected(peer_id) => { - trace!(%peer_id, "new peer connected"); - // Add to the set of peers we can request things from. - let mut effects = Effects::new(); - if self.peers.is_empty() { - // First peer connected, start downloading. - let cloned_peer_id = peer_id.clone(); - effects.extend( - effect_builder - .immediately() - .event(move |_| Event::Start(cloned_peer_id)), - ); - } - self.peers.push(peer_id); - effects - } - Event::BlockHandled(block) => { - let block_height = block.height(); - let block_hash = *block.hash(); - let effects = self.block_handled(rng, effect_builder, *block); - trace!(%block_height, %block_hash, "block handled"); - effects - } - Event::GotUpgradeActivationPoint(next_upgrade_activation_point) => { - trace!(%next_upgrade_activation_point, "new activation point"); - self.next_upgrade_activation_point = Some(next_upgrade_activation_point); - Effects::new() - } - Event::InitUpgradeShutdown => { - info!("shutdown initiated"); - // Serialize and store state. - self.handle_upgrade_shutdown(effect_builder) - } - Event::Shutdown(upgrade) => { - info!(%upgrade, "ready for shutdown"); - self.stop_for_upgrade = upgrade; - Effects::new() - } - Event::InitializeTimeout => { - if !self.started_syncing { - info!("hasn't downloaded any blocks in expected time window. Shutting down…"); - fatal!(effect_builder, "no syncing progress, shutting down…").ignore() - } else { - Effects::new() - } - } - } - } -} - -fn fetch_block_deploys( - effect_builder: EffectBuilder, - peer: I, - block: Block, -) -> Effects> -where - REv: ReactorEventT, -{ - let block_timestamp = block.header().timestamp(); - effect_builder - .validate_block(peer.clone(), block, block_timestamp) - .event(move |(found, block)| { - if found { - Event::GetDeploysResult(DeploysResult::Found(Box::new(block))) - } else { - Event::GetDeploysResult(DeploysResult::NotFound(Box::new(block), peer)) - } - }) -} - -fn fetch_block_by_hash( - effect_builder: EffectBuilder, - peer: I, - block_hash: BlockHash, -) -> Effects> -where - REv: ReactorEventT, -{ - let cloned = peer.clone(); - effect_builder.fetch_block(block_hash, peer).map_or_else( - move |fetch_result| match fetch_result { - FetchResult::FromStorage(block) => { - Event::GetBlockHashResult(block_hash, BlockByHashResult::FromStorage(block)) - } - FetchResult::FromPeer(block, peer) => { - Event::GetBlockHashResult(block_hash, BlockByHashResult::FromPeer(block, peer)) - } - }, - move || Event::GetBlockHashResult(block_hash, BlockByHashResult::Absent(cloned)), - ) -} - -fn fetch_block_at_height( - effect_builder: EffectBuilder, - peer: I, - block_height: u64, -) -> Effects> -where - REv: ReactorEventT, -{ - let cloned = peer.clone(); - effect_builder - .fetch_block_by_height(block_height, peer.clone()) - .map_or_else( - move |fetch_result| match fetch_result { - FetchResult::FromPeer(result, _) => match *result { - BlockByHeight::Absent(ret_height) => { - warn!( - "Fetcher returned result for invalid height. Expected {}, got {}", - block_height, ret_height - ); - Event::GetBlockHeightResult(block_height, BlockByHeightResult::Absent(peer)) - } - BlockByHeight::Block(block) => Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::FromPeer(block, peer), - ), - }, - FetchResult::FromStorage(result) => match *result { - BlockByHeight::Absent(_) => { - // Fetcher should try downloading the block from a peer - // when it can't find it in the storage. - panic!("Should not return `Absent` in `FromStorage`.") - } - BlockByHeight::Block(block) => Event::GetBlockHeightResult( - block_height, - BlockByHeightResult::FromStorage(block), - ), - }, - }, - move || Event::GetBlockHeightResult(block_height, BlockByHeightResult::Absent(cloned)), - ) -} - -/// Returns key in the database, under which the LinearChainSync's state is stored. -fn create_state_key(chainspec: &Chainspec) -> Vec { - format!( - "linear_chain_sync:network_name={}", - chainspec.network_config.name.clone() - ) - .into() -} - -/// Deserialized vector of bytes into `LinearChainSync::State`. -/// Panics on deserialization errors. -fn deserialize_state(serialized_state: &[u8]) -> Option { - bincode::deserialize(&serialized_state).unwrap_or_else(|error| { - // Panicking here should not corrupt the state of any component as it's done in the - // constructor. - panic!( - "could not deserialize state from storage, error {:?}", - error - ) - }) -} - -/// Reads the `LinearChainSync's` state from storage, if any. -/// Panics on deserialization errors. -pub(crate) fn read_init_state( - storage: &Storage, - chainspec: &Chainspec, -) -> Result, storage::Error> { - let key = create_state_key(&chainspec); - if let Some(bytes) = storage.read_state_store(&key)? { - Ok(deserialize_state(&bytes)) - } else { - Ok(None) - } -} - -/// Cleans the linear chain state storage. -/// May fail with storage error. -pub(crate) fn clean_linear_chain_state( - storage: &Storage, - chainspec: &Chainspec, -) -> Result { - let key = create_state_key(&chainspec); - storage.del_state_store(key) -} diff --git a/node/src/components/linear_chain_sync/event.rs b/node/src/components/linear_chain_sync/event.rs deleted file mode 100644 index ab1c916c2d..0000000000 --- a/node/src/components/linear_chain_sync/event.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::types::{ActivationPoint, Block, BlockHash}; - -use std::fmt::{Debug, Display}; - -#[derive(Debug)] -pub enum Event { - Start(I), - GetBlockHashResult(BlockHash, BlockByHashResult), - GetBlockHeightResult(u64, BlockByHeightResult), - GetDeploysResult(DeploysResult), - StartDownloadingDeploys, - NewPeerConnected(I), - BlockHandled(Box), - GotUpgradeActivationPoint(ActivationPoint), - InitUpgradeShutdown, - /// An event instructing us to shutdown if we haven't downloaded any blocks. - InitializeTimeout, - Shutdown(bool), -} - -#[derive(Debug)] -pub enum DeploysResult { - Found(Box), - NotFound(Box, I), -} - -#[derive(Debug)] -pub enum BlockByHashResult { - Absent(I), - FromStorage(Box), - FromPeer(Box, I), -} - -#[derive(Debug)] -pub enum BlockByHeightResult { - Absent(I), - FromStorage(Box), - FromPeer(Box, I), -} - -impl Display for Event -where - I: Debug + Display, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Event::Start(init_peer) => write!(f, "Start syncing from peer {}.", init_peer), - Event::GetBlockHashResult(block_hash, r) => { - write!(f, "Get block result for {}: {:?}", block_hash, r) - } - Event::GetDeploysResult(result) => { - write!(f, "Get deploys for block result {:?}", result) - } - Event::StartDownloadingDeploys => write!(f, "Start downloading deploys event."), - Event::NewPeerConnected(peer_id) => write!(f, "A new peer connected: {}", peer_id), - Event::BlockHandled(block) => { - let hash = block.hash(); - let height = block.height(); - write!( - f, - "Block has been handled by consensus. Hash {}, height {}", - hash, height - ) - } - Event::GetBlockHeightResult(height, res) => { - write!(f, "Get block result for height {}: {:?}", height, res) - } - Event::GotUpgradeActivationPoint(activation_point) => { - write!(f, "new upgrade activation point: {:?}", activation_point) - } - Event::InitUpgradeShutdown => write!(f, "shutdown for upgrade initiatied"), - Event::Shutdown(upgrade) => write!( - f, - "linear chain sync is ready for shutdown. upgrade: {}", - upgrade - ), - Event::InitializeTimeout => write!(f, "Initialize timeout"), - } - } -} diff --git a/node/src/components/linear_chain_sync/metrics.rs b/node/src/components/linear_chain_sync/metrics.rs deleted file mode 100644 index 9108b6abbc..0000000000 --- a/node/src/components/linear_chain_sync/metrics.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::time::Instant; - -use prometheus::{Histogram, HistogramOpts, Registry}; - -#[derive(Debug)] -pub struct LinearChainSyncMetrics { - get_block_by_hash: Histogram, - get_block_by_height: Histogram, - get_deploys: Histogram, - request_start: Instant, -} - -const GET_BLOCK_BY_HASH: &str = "linear_chain_sync_get_block_by_hash"; -const GET_BLOCK_BY_HASH_HELP: &str = "histogram of linear_chain_sync get_block_by_hash request"; -const GET_BLOCK_BY_HEIGHT: &str = "linear_chain_sync_get_block_by_height"; -const GET_BLOCK_BY_HEIGHT_HELP: &str = "histogram of linear_chain_sync get_block_by_height request"; -const GET_DEPLOYS: &str = "linear_chain_sync_get_deploys"; -const GET_DEPLOYS_HELP: &str = "histogram of linear_chain_sync get_deploys request"; - -/// Value of upper bound of histogram. -const EXPONENTIAL_BUCKET_START: f64 = 0.01; -/// Multiplier of previous upper bound for next bound. -const EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0; -/// Bucket count, with last going to +Inf. -const EXPONENTIAL_BUCKET_COUNT: usize = 6; - -/// Create prometheus Histogram and register. -fn register_histogram_metric( - registry: &Registry, - metric_name: &str, - metric_help: &str, -) -> Result { - let common_buckets = prometheus::exponential_buckets( - EXPONENTIAL_BUCKET_START, - EXPONENTIAL_BUCKET_FACTOR, - EXPONENTIAL_BUCKET_COUNT, - )?; - let histogram_opts = HistogramOpts::new(metric_name, metric_help).buckets(common_buckets); - let histogram = Histogram::with_opts(histogram_opts)?; - registry.register(Box::new(histogram.clone()))?; - Ok(histogram) -} - -impl LinearChainSyncMetrics { - pub fn new(registry: &Registry) -> Result { - Ok(LinearChainSyncMetrics { - get_block_by_hash: register_histogram_metric( - registry, - GET_BLOCK_BY_HASH, - GET_BLOCK_BY_HASH_HELP, - )?, - get_block_by_height: register_histogram_metric( - registry, - GET_BLOCK_BY_HEIGHT, - GET_BLOCK_BY_HEIGHT_HELP, - )?, - get_deploys: register_histogram_metric(registry, GET_DEPLOYS, GET_DEPLOYS_HELP)?, - request_start: Instant::now(), - }) - } - - pub fn reset_start_time(&mut self) { - self.request_start = Instant::now(); - } - - pub fn observe_get_block_by_hash(&mut self) { - self.get_block_by_hash - .observe(self.request_start.elapsed().as_secs_f64()); - } - - pub fn observe_get_block_by_height(&mut self) { - self.get_block_by_height - .observe(self.request_start.elapsed().as_secs_f64()); - } - - pub fn observe_get_deploys(&mut self) { - self.get_deploys - .observe(self.request_start.elapsed().as_secs_f64()); - } -} diff --git a/node/src/components/linear_chain_sync/peers.rs b/node/src/components/linear_chain_sync/peers.rs deleted file mode 100644 index e0041b9ed0..0000000000 --- a/node/src/components/linear_chain_sync/peers.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::collections::VecDeque; - -use datasize::DataSize; -use rand::{seq::SliceRandom, Rng}; - -#[derive(DataSize, Debug)] -pub struct PeersState { - // Set of peers that we can request blocks from. - peers: Vec, - // Peers we have not yet requested current block from. - // NOTE: Maybe use a bitmask to decide which peers were tried? - peers_to_try: Vec, - // Peers we successfuly downloaded data from previously. - // Have higher chance of having the next data. - succ_peers: VecDeque, - succ_attempts: u8, - succ_attempts_max: u8, -} - -impl PeersState { - pub fn new() -> Self { - PeersState { - peers: Default::default(), - peers_to_try: Default::default(), - succ_peers: Default::default(), - succ_attempts: 0, - succ_attempts_max: 5, - } - } - - /// Resets `peers_to_try` back to all `peers` we know of. - pub(crate) fn reset(&mut self, rng: &mut R) { - self.peers_to_try = self.peers.clone(); - self.peers_to_try.as_mut_slice().shuffle(rng); - } - - /// Returns a random peer. - pub(crate) fn random(&mut self) -> Option { - if self.succ_attempts < self.succ_attempts_max { - self.next_succ().or_else(|| self.peers_to_try.pop()) - } else { - self.succ_attempts = 0; - self.peers_to_try.pop().or_else(|| self.next_succ()) - } - } - - /// Unsafe version of `random_peer`. - /// Panics if no peer is available for querying. - pub(crate) fn random_unsafe(&mut self) -> I { - self.random().expect("At least one peer available.") - } - - /// Peer misbehaved (returned us invalid data). - /// Remove it from the set of nodes we request data from. - pub(crate) fn ban(&mut self, peer: &I) { - self.peers.retain(|p| p != peer); - self.succ_peers.retain(|p| p != peer); - } - - /// Returns whether known peer set is empty. - pub(crate) fn is_empty(&self) -> bool { - self.peers.is_empty() - } - - /// Adds a new peer. - pub(crate) fn push(&mut self, peer: I) { - self.peers.push(peer) - } - - /// Returns the next peer, if any, that we downloaded data the previous time. - /// Keeps the peer in the set of `succ_peers`. - fn next_succ(&mut self) -> Option { - let peer = self.succ_peers.pop_front()?; - self.succ_peers.push_back(peer.clone()); - Some(peer) - } - - /// Peer didn't respond or didn't have the data we asked for. - pub(crate) fn failure(&mut self, peer: &I) { - self.succ_peers.retain(|id| id != peer); - } - - /// Peer had the data we asked for. - pub(crate) fn success(&mut self, peer: I) { - self.succ_attempts += 1; - self.succ_peers.push_back(peer); - } -} diff --git a/node/src/components/linear_chain_sync/state.rs b/node/src/components/linear_chain_sync/state.rs deleted file mode 100644 index 9b61927640..0000000000 --- a/node/src/components/linear_chain_sync/state.rs +++ /dev/null @@ -1,145 +0,0 @@ -use std::fmt::Display; - -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use crate::types::{Block, BlockHash, BlockHeader}; - -#[derive(Clone, DataSize, Debug, Serialize, Deserialize)] -pub enum State { - /// No syncing of the linear chain configured. - None, - /// Synchronizing the linear chain up until trusted hash. - SyncingTrustedHash { - /// Linear chain block to start sync from. - trusted_hash: BlockHash, - /// The header of the highest block we have in storage (if any). - highest_block_header: Option>, - /// During synchronization we might see new eras being created. - /// Track the highest height and wait until it's handled by consensus. - highest_block_seen: u64, - /// Chain of downloaded blocks from the linear chain. - /// We will `pop()` when executing blocks. - linear_chain: Vec, - /// The most recent block we started to execute. This is updated whenever we start - /// downloading deploys for the next block to be executed. - latest_block: Box>, - /// Switch block of the current era. - /// Updated whenever we see a new switch block. - maybe_switch_block: Option>, - }, - /// Synchronizing the descendants of the trusted hash. - SyncingDescendants { - trusted_hash: BlockHash, - /// The most recent block we started to execute. This is updated whenever we start - /// downloading deploys for the next block to be executed. - latest_block: Box, - /// During synchronization we might see new eras being created. - /// Track the highest height and wait until it's handled by consensus. - highest_block_seen: u64, - /// Switch block of the current era. - /// Updated whenever we see a new switch block. - maybe_switch_block: Option>, - }, - /// Synchronizing done. The single field contains the highest block seen during the - /// synchronization process. - Done(Option>), -} - -impl Display for State { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - State::None => write!(f, "None"), - State::Done(latest_block) => write!(f, "Done(latest_block={})", - if let Some(block) = latest_block { - format!("{{ hash={}, height={} }}", block.hash(), block.height()) - } else { - "None".to_string() - }), - State::SyncingTrustedHash { trusted_hash, highest_block_seen, .. } => { - write!(f, "SyncingTrustedHash(trusted_hash={}, highest_block_seen={})", trusted_hash, highest_block_seen) - }, - State::SyncingDescendants { - trusted_hash, - latest_block, - .. - } => write!( - f, - "SyncingDescendants(trusted_hash={}, latest_block_hash={}, latest_block_height={}, latest_block_era={})", - trusted_hash, - latest_block.header().hash(), - latest_block.header().height(), - latest_block.header().era_id(), - ), - } - } -} - -impl State { - pub fn sync_trusted_hash( - trusted_hash: BlockHash, - highest_block_header: Option, - ) -> Self { - State::SyncingTrustedHash { - trusted_hash, - highest_block_header: highest_block_header.map(Box::new), - highest_block_seen: 0, - linear_chain: Vec::new(), - latest_block: Box::new(None), - maybe_switch_block: None, - } - } - - pub fn sync_descendants( - trusted_hash: BlockHash, - latest_block: Block, - maybe_switch_block: Option>, - ) -> Self { - State::SyncingDescendants { - trusted_hash, - latest_block: Box::new(latest_block), - highest_block_seen: 0, - maybe_switch_block, - } - } - - pub fn block_downloaded(&mut self, block: &Block) { - match self { - State::None | State::Done(_) => {} - State::SyncingTrustedHash { - highest_block_seen, .. - } - | State::SyncingDescendants { - highest_block_seen, .. - } => { - let curr_height = block.height(); - if curr_height > *highest_block_seen { - *highest_block_seen = curr_height; - } - } - }; - } - - /// Returns whether in `Done` state. - pub(crate) fn is_done(&self) -> bool { - matches!(self, State::Done(_)) - } - - /// Returns whether in `None` state. - pub(crate) fn is_none(&self) -> bool { - matches!(self, State::None) - } - - /// Updates the state with a new switch block. - pub(crate) fn new_switch_block(&mut self, block: &Block) { - match self { - State::SyncingDescendants { - maybe_switch_block, .. - } - | State::SyncingTrustedHash { - maybe_switch_block, .. - } => *maybe_switch_block = Some(Box::new(block.clone())), - _ => {} - } - } -} diff --git a/node/src/components/linear_chain_sync/traits.rs b/node/src/components/linear_chain_sync/traits.rs deleted file mode 100644 index 2d39383231..0000000000 --- a/node/src/components/linear_chain_sync/traits.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::{ - effect::{ - announcements::ControlAnnouncement, - requests::{ - BlockValidationRequest, ContractRuntimeRequest, FetcherRequest, StateStoreRequest, - StorageRequest, - }, - }, - types::{Block, BlockByHeight}, -}; -pub trait ReactorEventT: - From - + From> - + From> - + From> - + From - + From - + From - + Send -{ -} - -impl ReactorEventT for REv where - REv: From - + From> - + From> - + From> - + From - + From - + From - + Send -{ -} diff --git a/node/src/components/metrics.rs b/node/src/components/metrics.rs index bb50f8f70b..505d7b8e32 100644 --- a/node/src/components/metrics.rs +++ b/node/src/components/metrics.rs @@ -14,16 +14,14 @@ //! Creation and instantiation of this component happens inside the `reactor::Reactor::new` //! function, which is passed in a `prometheus::Registry` (see 2.). //! -//! 2. Instantiation of an `XYZMetrics` struct should always be combined with registering all of -//! the metrics on a registry. For this reason it is advisable to have the `XYZMetrics::new` -//! method take a `prometheus::Registry` and register it directly. +//! 2. Instantiation of an `XYZMetrics` struct should always be combined with registering all of the +//! metrics on a registry. For this reason it is advisable to have the `XYZMetrics::new` method +//! take a `prometheus::Registry` and register it directly. //! //! 3. Updating metrics is done inside the `handle_event` function by simply calling methods on the //! fields of `self.metrics` (`: XYZMetrics`). **Important**: Metrics should never be read to //! prevent any actual logic depending on them. If a counter is being increment as a metric and -//! also required for busines logic, a second counter should be kept in the component's state. - -use std::convert::Infallible; +//! also required for business logic, a second counter should be kept in the component's state. use datasize::DataSize; use prometheus::{Encoder, Registry, TextEncoder}; @@ -35,6 +33,8 @@ use crate::{ NodeRng, }; +const COMPONENT_NAME: &str = "metrics"; + /// The metrics component. #[derive(DataSize, Debug)] pub(crate) struct Metrics { @@ -45,7 +45,6 @@ pub(crate) struct Metrics { impl Component for Metrics { type Event = MetricsRequest; - type ConstructionError = Infallible; fn handle_event( &mut self, @@ -72,6 +71,10 @@ impl Component for Metrics { } } } + + fn name(&self) -> &str { + COMPONENT_NAME + } } impl Metrics { diff --git a/node/src/components/network.rs b/node/src/components/network.rs index 898e19d50a..afa4b9d7cc 100644 --- a/node/src/components/network.rs +++ b/node/src/components/network.rs @@ -1,879 +1,1134 @@ -mod behavior; +//! Fully connected overlay network +//! +//! The *network component* is an overlay network where each node participating is attempting to +//! maintain a connection to every other node identified on the same network. The component does not +//! guarantee message delivery, so in between reconnections, messages may be lost. +//! +//! # Node IDs +//! +//! Each node has a self-generated node ID based on its self-signed TLS certificate. Whenever a +//! connection is made to another node, it verifies the "server"'s certificate to check that it +//! connected to a valid node and sends its own certificate during the TLS handshake, establishing +//! identity. +//! +//! # Connection +//! +//! Every node has an ID and a public listening address. The objective of each node is to constantly +//! maintain an outgoing connection to each other node (and thus have an incoming connection from +//! these nodes as well). +//! +//! Any incoming connection is, after a handshake process, strictly read from, while any outgoing +//! connection is strictly used for sending messages, also after a handshake. +//! +//! Nodes gossip their public listening addresses periodically, and will try to establish and +//! maintain an outgoing connection to any new address learned. + +mod bincode_format; +pub(crate) mod blocklist; +mod chain_info; mod config; +mod counting_format; mod error; mod event; -mod gossip; -mod one_way_messaging; -mod peer_discovery; -mod protocol_id; +mod gossiped_address; +mod health; +mod identity; +mod insights; +mod limiter; +mod message; +mod message_pack_format; +mod metrics; +mod outgoing; +mod symmetry; +pub(crate) mod tasks; #[cfg(test)] mod tests; -#[cfg(test)] -mod tests_bulk_gossip; use std::{ - collections::{HashMap, HashSet}, - env, + collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, BTreeSet, HashSet, + }, fmt::{self, Debug, Display, Formatter}, - marker::PhantomData, - num::NonZeroU32, - sync::{Arc, Mutex}, - time::Duration, + io, + net::{SocketAddr, TcpListener}, + sync::{Arc, Weak}, + time::{Duration, Instant}, }; use datasize::DataSize; -use futures::{future::BoxFuture, FutureExt}; -use libp2p::{ - core::{connection::ConnectedPoint, upgrade}, - gossipsub::GossipsubEvent, - identify::IdentifyEvent, - identity::Keypair, - kad::KademliaEvent, - mplex::{MaxBufferBehaviour, MplexConfig}, - noise::{self, NoiseConfig, X25519Spec}, - request_response::{RequestResponseEvent, RequestResponseMessage}, - swarm::{SwarmBuilder, SwarmEvent}, - tcp::TokioTcpConfig, - Multiaddr, PeerId, Swarm, Transport, +use itertools::Itertools; +use prometheus::Registry; +use rand::{ + seq::{IteratorRandom, SliceRandom}, + Rng, }; -use prometheus::{IntGauge, Registry}; -use rand::seq::IteratorRandom; use serde::{Deserialize, Serialize}; -use tokio::{select, sync::watch, task::JoinHandle, time}; -use tracing::{debug, error, info, trace, warn}; +use tokio::{ + net::TcpStream, + sync::{ + mpsc::{self, UnboundedSender}, + watch, + }, + task::JoinHandle, +}; +use tokio_openssl::SslStream; +use tokio_util::codec::LengthDelimitedCodec; +use tracing::{debug, error, info, trace, warn, Instrument, Span}; -pub(crate) use self::event::Event; +#[cfg(test)] +use futures::{future::BoxFuture, FutureExt}; + +use casper_types::{EraId, PublicKey, SecretKey}; + +pub(crate) use self::{ + bincode_format::BincodeFormat, + config::{Config, IdentityConfig}, + error::Error, + event::Event, + gossiped_address::GossipedAddress, + identity::Identity, + insights::NetworkInsights, + message::{ + within_message_size_limit_tolerance, EstimatorWeights, FromIncoming, Message, MessageKind, + Payload, + }, +}; use self::{ - behavior::{Behavior, SwarmBehaviorEvent}, - gossip::GossipMessage, - one_way_messaging::{Codec as OneWayCodec, Outgoing as OneWayOutgoingMessage}, - protocol_id::ProtocolId, + blocklist::BlocklistJustification, + chain_info::ChainInfo, + counting_format::{ConnectionId, CountingFormat, Role}, + error::{ConnectionError, Result}, + event::{IncomingConnection, OutgoingConnection}, + health::{HealthConfig, TaggedTimestamp}, + limiter::Limiter, + message::NodeKeyPair, + metrics::Metrics, + outgoing::{DialOutcome, DialRequest, OutgoingConfig, OutgoingManager}, + symmetry::ConnectionSymmetry, + tasks::{MessageQueueItem, NetworkContext}, }; -pub use self::{config::Config, error::Error}; use crate::{ - components::{networking_metrics::NetworkingMetrics, Component}, + components::{gossiper::GossipItem, Component, ComponentState, InitializedComponent}, effect::{ - announcements::NetworkAnnouncement, - requests::{NetworkInfoRequest, NetworkRequest}, - EffectBuilder, EffectExt, Effects, + announcements::PeerBehaviorAnnouncement, + requests::{BeginGossipRequest, NetworkInfoRequest, NetworkRequest, StorageRequest}, + AutoClosingResponder, EffectBuilder, EffectExt, Effects, GossipTarget, }, - fatal, - reactor::{EventQueueHandle, Finalize, QueueKind, ReactorEvent}, - types::{Chainspec, NodeId}, - utils::{self, ds, CountingReceiver, CountingSender, DisplayIter}, + reactor::ReactorEvent, + tls, + types::{NodeId, ValidatorMatrix}, + utils::{self, display_error, Source}, NodeRng, }; -/// Env var which, if it's defined at runtime, enables the network (libp2p based) component. -pub(crate) const ENABLE_LIBP2P_NET_ENV_VAR: &str = "CASPER_ENABLE_LIBP2P_NET"; +const COMPONENT_NAME: &str = "network"; -/// How long to sleep before reconnecting -const RECONNECT_DELAY: Duration = Duration::from_millis(500); +/// How often to keep attempting to reconnect to a node before giving up. Note that reconnection +/// delays increase exponentially! +const RECONNECTION_ATTEMPTS: u8 = 8; -/// A helper trait whose bounds represent the requirements for a payload that `Network` can -/// work with. -pub trait PayloadT: - Serialize + for<'de> Deserialize<'de> + Clone + Debug + Display + Send + 'static -{ -} +/// Basic reconnection timeout. +/// +/// The first reconnection attempt will be made after 2x this timeout. +const BASE_RECONNECTION_TIMEOUT: Duration = Duration::from_secs(1); -impl

PayloadT for P where - P: Serialize + for<'de> Deserialize<'de> + Clone + Debug + Display + Send + 'static -{ -} +/// Interval during which to perform outgoing manager housekeeping. +const OUTGOING_MANAGER_SWEEP_INTERVAL: Duration = Duration::from_secs(1); -/// A helper trait whose bounds represent the requirements for a reactor event that `Network` can -/// work with. -pub trait ReactorEventT: - ReactorEvent + From> + From> + Send + 'static -{ -} +/// How often to send a ping down a healthy connection. +const PING_INTERVAL: Duration = Duration::from_secs(30); -impl ReactorEventT

for REv -where - P: PayloadT, - REv: ReactorEvent + From> + From> + Send + 'static, -{ -} +/// Maximum time for a ping until it connections are severed. +/// +/// If you are running a network under very extreme conditions, it may make sense to alter these +/// values, but usually these values should require no changing. +/// +/// `PING_TIMEOUT` should be less than `PING_INTERVAL` at all times. +const PING_TIMEOUT: Duration = Duration::from_secs(6); + +/// How many pings to send before giving up and dropping the connection. +const PING_RETRIES: u16 = 5; -#[derive(PartialEq, Eq, Debug, DataSize)] -enum ConnectionState { - Pending, - Connected, - Failed, +#[derive(Clone, DataSize, Debug)] +pub(crate) struct OutgoingHandle

{ + #[data_size(skip)] // Unfortunately, there is no way to inspect an `UnboundedSender`. + sender: UnboundedSender>, + peer_addr: SocketAddr, } -// TODO: Get rid of the `Arc>` ASAP. -fn estimate_known_addresses(map: &Arc>>) -> usize { - ds::hash_map_fixed_size(&*(map.lock().expect("lock poisoned"))) +impl

Display for OutgoingHandle

{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "outgoing handle to {}", self.peer_addr) + } } #[derive(DataSize)] -pub struct Network { +pub(crate) struct Network +where + REv: 'static, + P: Payload, +{ + /// Initial configuration values. + cfg: Config, + /// Read-only networking information shared across tasks. + context: Arc>, + + /// Outgoing connections manager. + outgoing_manager: OutgoingManager, ConnectionError>, + /// Tracks whether a connection is symmetric or not. + connection_symmetries: HashMap, + + /// Tracks nodes that have announced themselves as nodes that are syncing. + syncing_nodes: HashSet, #[data_size(skip)] - network_identity: NetworkIdentity, - our_id: NodeId, - /// The set of peers which are current connected to our node. Kept in sync with libp2p - /// internals. - // DataSize note: Connected point contains `Arc`'ed Vecs internally, this is better than - // skipping at least. - #[data_size(with = ds::hash_map_fixed_size)] - peers: HashMap, - /// The set of peers whose address we currently know. Kept in sync with the internal Kademlia - /// routing table. - // DataSize note: `PeerId`s can likely be estimated using `mem::size_of`. - #[data_size(with = ds::hash_set_fixed_size)] - seen_peers: HashSet, - #[data_size(with = ds::vec_fixed_size)] - listening_addresses: Vec, - /// The addresses of known peers to be used for bootstrapping, and their connection states. - /// Wrapped in a [Mutex] so it can be shared with [SwarmEvent] handling (which runs in a - /// separate thread). - #[data_size(with = estimate_known_addresses)] - known_addresses_mut: Arc>>, - /// Whether this node is a bootstrap node or not. - is_bootstrap_node: bool, - /// The channel through which to send outgoing one-way requests. - one_way_message_sender: CountingSender, - max_one_way_message_size: u32, - /// The channel through which to send new messages for gossiping. - gossip_message_sender: CountingSender, - max_gossip_message_size: u32, - /// Channel signaling a shutdown of the network component. + channel_management: Option, + + /// Networking metrics. #[data_size(skip)] - shutdown_sender: Option>, + net_metrics: Arc, + + /// The outgoing bandwidth limiter. #[data_size(skip)] - server_join_handle: Option>, + outgoing_limiter: Limiter, - /// Networking metrics. + /// The limiter for incoming resource usage. + /// + /// This is not incoming bandwidth but an independent resource estimate. #[data_size(skip)] - net_metrics: NetworkingMetrics, + incoming_limiter: Limiter, + + /// The era that is considered the active era by the network component. + active_era: EraId, - _phantom: PhantomData<(REv, P)>, + /// The state of this component. + state: ComponentState, } -impl, P: PayloadT> Network { - /// Creates a new small network component instance. - /// - /// If `notify` is set to `false`, no systemd notifications will be sent, regardless of - /// configuration. +struct ChannelManagement { + /// Channel signaling a shutdown of the network. + // Note: This channel is closed when `Network` is dropped, signalling the receivers that + // they should cease operation. + #[allow(dead_code)] + shutdown_sender: Option>, + + /// Join handle for the server thread. + #[allow(dead_code)] + server_join_handle: Option>, + + /// Channel signaling a shutdown of the incoming connections. + // Note: This channel is closed when we finished syncing, so the `Network` can close all + // connections. When they are re-established, the proper value of the now updated `is_syncing` + // flag will be exchanged on handshake. + #[allow(dead_code)] + close_incoming_sender: Option>, + + /// Handle used by the `message_reader` task to receive a notification that incoming + /// connections should be closed. + close_incoming_receiver: watch::Receiver<()>, +} + +impl Network +where + P: Payload + 'static, + REv: ReactorEvent + + From> + + FromIncoming

+ + From + + From> + + From + + From>, +{ + /// Creates a new network component instance. #[allow(clippy::type_complexity)] - pub(crate) fn new( - event_queue: EventQueueHandle, - config: Config, + pub(crate) fn new>( + cfg: Config, + our_identity: Identity, + node_key_pair: Option<(Arc, PublicKey)>, registry: &Registry, - network_identity: NetworkIdentity, - chainspec: &Chainspec, - notify: bool, - ) -> Result<(Network, Effects>), Error> { - let our_peer_id = PeerId::from(&network_identity); - let our_id = NodeId::from(&network_identity); - - // Convert the known addresses to multiaddr format and prepare the shutdown signal. - let known_addresses = config - .known_addresses - .iter() - .map(|address| { - let multiaddr = address_str_to_multiaddr(address.as_str()); - (multiaddr, ConnectionState::Pending) - }) - .collect::>(); + chain_info_source: C, + validator_matrix: ValidatorMatrix, + allow_handshake: bool, + ) -> Result> { + let net_metrics = Arc::new(Metrics::new(registry)?); + + let outgoing_limiter = Limiter::new( + cfg.max_outgoing_byte_rate_non_validators, + net_metrics.accumulated_outgoing_limiter_delay.clone(), + validator_matrix.clone(), + ); + + let incoming_limiter = Limiter::new( + cfg.max_incoming_message_rate_non_validators, + net_metrics.accumulated_incoming_limiter_delay.clone(), + validator_matrix, + ); + + let outgoing_manager = OutgoingManager::with_metrics( + OutgoingConfig { + retry_attempts: RECONNECTION_ATTEMPTS, + base_timeout: BASE_RECONNECTION_TIMEOUT, + unblock_after_min: cfg.blocklist_retain_min_duration.into(), + unblock_after_max: cfg.blocklist_retain_max_duration.into(), + sweep_timeout: cfg.max_addr_pending_time.into(), + health: HealthConfig { + ping_interval: PING_INTERVAL, + ping_timeout: PING_TIMEOUT, + ping_retries: PING_RETRIES, + pong_limit: (1 + PING_RETRIES as u32) * 2, + }, + }, + net_metrics.create_outgoing_metrics(), + ); + + let context = Arc::new(NetworkContext::new( + &cfg, + our_identity, + node_key_pair.map(NodeKeyPair::new), + chain_info_source.into(), + &net_metrics, + allow_handshake, + )); + + let component = Network { + cfg, + context, + outgoing_manager, + connection_symmetries: HashMap::new(), + syncing_nodes: HashSet::new(), + channel_management: None, + net_metrics, + outgoing_limiter, + incoming_limiter, + // We start with an empty set of validators for era 0 and expect to be updated. + active_era: EraId::new(0), + state: ComponentState::Uninitialized, + }; + + Ok(component) + } + + fn initialize(&mut self, effect_builder: EffectBuilder) -> Result>> { + let mut known_addresses = HashSet::new(); + for address in &self.cfg.known_addresses { + match utils::resolve_address(address) { + Ok(known_address) => { + if !known_addresses.insert(known_address) { + warn!(%address, resolved=%known_address, "ignoring duplicated known address"); + }; + } + Err(ref err) => { + warn!(%address, err=display_error(err), "failed to resolve known address"); + } + } + } // Assert we have at least one known address in the config. if known_addresses.is_empty() { - warn!("{}: no known addresses provided via config", our_id); - return Err(Error::NoKnownAddress); + warn!("no known addresses provided via config or all failed DNS resolution"); + return Err(Error::EmptyKnownHosts); } - let (one_way_message_sender, one_way_message_receiver) = - utils::counting_unbounded_channel(); - let (gossip_message_sender, gossip_message_receiver) = utils::counting_unbounded_channel(); - let (server_shutdown_sender, server_shutdown_receiver) = watch::channel(()); + let mut public_addr = + utils::resolve_address(&self.cfg.public_address).map_err(Error::ResolveAddr)?; - // If the env var "CASPER_ENABLE_LIBP2P_NET" is defined, start the server and exit. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - let network = Network { - network_identity, - our_id, - peers: HashMap::new(), - seen_peers: HashSet::new(), - listening_addresses: vec![], - known_addresses_mut: Arc::new(Mutex::new(known_addresses)), - is_bootstrap_node: config.is_bootstrap_node, - one_way_message_sender, - max_one_way_message_size: 0, - gossip_message_sender, - max_gossip_message_size: 0, - shutdown_sender: Some(server_shutdown_sender), - server_join_handle: None, - net_metrics: NetworkingMetrics::new(&Registry::default())?, - _phantom: PhantomData, - }; - return Ok((network, Effects::new())); - } + // We can now create a listener. + let bind_address = + utils::resolve_address(&self.cfg.bind_address).map_err(Error::ResolveAddr)?; + let listener = TcpListener::bind(bind_address) + .map_err(|error| Error::ListenerCreation(error, bind_address))?; + // We must set non-blocking to `true` or else the tokio task hangs forever. + listener + .set_nonblocking(true) + .map_err(Error::ListenerSetNonBlocking)?; - let net_metrics = NetworkingMetrics::new(registry).map_err(Error::MetricsError)?; + let local_addr = listener.local_addr().map_err(Error::ListenerAddr)?; - if notify { - debug!("our node id: {}", our_id); + // Substitute the actually bound port if set to 0. + if public_addr.port() == 0 { + public_addr.set_port(local_addr.port()); } - // Create a keypair for authenticated encryption of the transport. - let noise_keys = noise::Keypair::::new() - .into_authentic(&network_identity.keypair) - .map_err(Error::StaticKeypairSigning)?; - - let mut mplex_config = MplexConfig::default(); - mplex_config.set_max_buffer_behaviour(MaxBufferBehaviour::Block); - - // Create a tokio-based TCP transport. Use `noise` for authenticated encryption and `mplex` - // for multiplexing of substreams on a TCP stream. - let transport = TokioTcpConfig::new() - .nodelay(true) - .upgrade(upgrade::Version::V1) - .authenticate(NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(MplexConfig::default()) - .timeout(config.connection_setup_timeout.into()) - .boxed(); - - // Create a Swarm to manage peers and events. - let behavior = Behavior::new( - &config, - &net_metrics, - chainspec, - network_identity.keypair.public(), + Arc::get_mut(&mut self.context) + .expect("should be no other pointers") + .initialize(public_addr, effect_builder.into_inner()); + + let protocol_version = self.context.chain_info().protocol_version; + // Run the server task. + // We spawn it ourselves instead of through an effect to get a hold of the join handle, + // which we need to shutdown cleanly later on. + info!(%local_addr, %public_addr, %protocol_version, "starting server background task"); + + let (server_shutdown_sender, server_shutdown_receiver) = watch::channel(()); + let (close_incoming_sender, close_incoming_receiver) = watch::channel(()); + + let context = self.context.clone(); + let server_join_handle = tokio::spawn( + tasks::server( + context, + tokio::net::TcpListener::from_std(listener).map_err(Error::ListenerConversion)?, + server_shutdown_receiver, + ) + .in_current_span(), ); - let mut swarm = SwarmBuilder::new(transport, behavior, our_peer_id) - .executor(Box::new(|future| { - tokio::spawn(future); - })) - .build(); - - // Specify listener. - let listening_address = address_str_to_multiaddr(config.bind_address.as_str()); - Swarm::listen_on(&mut swarm, listening_address.clone()).map_err(|error| Error::Listen { - address: listening_address.clone(), - error, - })?; - info!(%our_id, %listening_address, "network component started listening"); - - // Schedule connection attempts to known peers. - for address in known_addresses.keys() { - debug!(%our_id, %address, "dialing known address"); - Swarm::dial_addr(&mut swarm, address.clone()).map_err(|error| Error::DialPeer { - address: address.clone(), - error, - })?; - } - // Wrap the known_addresses in a mutex so we can share it with the server task. - let known_addresses_mut = Arc::new(Mutex::new(known_addresses)); - let is_bootstrap_node = config.is_bootstrap_node; - - // Start the server task. - let server_join_handle = Some(tokio::spawn(server_task( - event_queue, - one_way_message_receiver, - gossip_message_receiver, - server_shutdown_receiver, - swarm, - known_addresses_mut.clone(), - is_bootstrap_node, - net_metrics.queued_messages.clone(), - ))); - - let network = Network { - network_identity, - our_id, - peers: HashMap::new(), - seen_peers: HashSet::new(), - listening_addresses: vec![], - known_addresses_mut, - is_bootstrap_node, - one_way_message_sender, - max_one_way_message_size: config.max_one_way_message_size, - gossip_message_sender, - max_gossip_message_size: config.max_gossip_message_size, + let channel_management = ChannelManagement { shutdown_sender: Some(server_shutdown_sender), - server_join_handle, - net_metrics, - _phantom: PhantomData, + server_join_handle: Some(server_join_handle), + close_incoming_sender: Some(close_incoming_sender), + close_incoming_receiver, }; - Ok((network, Effects::new())) - } - fn handle_connection_established( - &mut self, - effect_builder: EffectBuilder, - peer_id: NodeId, - endpoint: ConnectedPoint, - num_established: NonZeroU32, - ) -> Effects> { - debug!(%peer_id, ?endpoint, %num_established,"{}: connection established", self.our_id); - - if let ConnectedPoint::Dialer { ref address } = endpoint { - let mut known_addresses = match self.known_addresses_mut.lock() { - Ok(known_addresses) => known_addresses, - Err(err) => { - return fatal!( - effect_builder, - "Could not acquire `known_addresses_mut` mutex: {:?}", - err - ) - .ignore() - } - }; - if let Some(state) = known_addresses.get_mut(address) { - if *state == ConnectionState::Pending { - *state = ConnectionState::Connected - } - } - }; + self.channel_management = Some(channel_management); + + // Learn all known addresses and mark them as unforgettable. + let now = Instant::now(); + let dial_requests: Vec<_> = known_addresses + .into_iter() + .filter_map(|addr| self.outgoing_manager.learn_addr(addr, true, now)) + .collect(); + + let mut effects = self.process_dial_requests(dial_requests); - let _ = self.peers.insert(peer_id, endpoint); + // Start broadcasting our public listening address. + effects.extend( + effect_builder + .set_timeout(self.cfg.initial_gossip_delay.into()) + .event(|_| Event::GossipOurAddress), + ); + + // Start regular housekeeping of the outgoing connections. + effects.extend( + effect_builder + .set_timeout(OUTGOING_MANAGER_SWEEP_INTERVAL) + .event(|_| Event::SweepOutgoing), + ); - self.net_metrics.peers.set(self.peers.len() as i64); - // TODO - see if this can be removed. The announcement is only used by the joiner reactor. - effect_builder.announce_new_peer(peer_id).ignore() + >::set_state(self, ComponentState::Initialized); + Ok(effects) } - /// Queues a message to be sent to a specific node. - fn send_message(&self, destination: NodeId, payload: P) { - let outgoing_message = match OneWayOutgoingMessage::new( - destination, - &payload, - self.max_one_way_message_size, - ) { - Ok(msg) => msg, - Err(error) => { - warn!(%error, %payload, "{}: failed to construct outgoing message", self.our_id); - return; - } - }; - if let Err(error) = self.one_way_message_sender.send_datasized(outgoing_message) { - warn!(%error, "{}: dropped outgoing message, server has shut down", self.our_id); - } else { - // `queued_message` might become -1 for a short amount of time, which is fine. - self.net_metrics.queued_messages.inc(); - } + /// Should only be called after component has been initialized. + fn channel_management(&self) -> &ChannelManagement { + self.channel_management + .as_ref() + .expect("component not initialized properly") } - /// Queues a message to be sent to all nodes. - fn gossip_message(&self, payload: P) { - let gossip_message = match GossipMessage::new(&payload, self.max_gossip_message_size) { - Ok(msg) => msg, - Err(error) => { - warn!(%error, %payload, "{}: failed to construct new gossip message", self.our_id); - return; + /// Queues a message to be sent to validator nodes in the given era. + fn broadcast_message_to_validators(&self, msg: Arc>, era_id: EraId) { + self.net_metrics.broadcast_requests.inc(); + + let mut total_connected_validators_in_era = 0; + let mut total_outgoing_manager_connected_peers = 0; + + for peer_id in self.outgoing_manager.connected_peers() { + total_outgoing_manager_connected_peers += 1; + if self.outgoing_limiter.is_validator_in_era(era_id, &peer_id) { + total_connected_validators_in_era += 1; + self.send_message(peer_id, msg.clone(), None); } - }; - if let Err(error) = self.gossip_message_sender.send_datasized(gossip_message) { - warn!(%error, "{}: dropped new gossip message, server has shut down", self.our_id); } + + debug!( + msg = %msg, + era = era_id.value(), + total_connected_validators_in_era, + total_outgoing_manager_connected_peers, + "broadcast_message_to_validators" + ); } /// Queues a message to `count` random nodes on the network. - fn send_message_to_n_peers( + fn gossip_message( &self, rng: &mut NodeRng, - payload: P, + msg: Arc>, + gossip_target: GossipTarget, count: usize, - exclude: HashSet, + exclude: &HashSet, ) -> HashSet { - let peer_ids = self - .peers - .keys() - .filter(|&peer_id| !exclude.contains(peer_id)) - .choose_multiple(rng, count); - + let is_validator_in_era = + |era: EraId, peer_id: &NodeId| self.outgoing_limiter.is_validator_in_era(era, peer_id); + let peer_ids = choose_gossip_peers( + rng, + gossip_target, + count, + exclude, + self.outgoing_manager.connected_peers(), + is_validator_in_era, + ); if peer_ids.len() != count { - // TODO - set this to `warn!` once we are normally testing with networks large enough to - // make it a meaningful and infrequent log message. - trace!( - wanted = count, - selected = peer_ids.len(), - "{}: could not select enough random nodes for gossiping, not enough non-excluded \ - outgoing connections", - self.our_id - ); + let not_excluded = self + .outgoing_manager + .connected_peers() + .filter(|peer_id| !exclude.contains(peer_id)) + .count(); + if not_excluded > 0 { + let connected = self.outgoing_manager.connected_peers().count(); + debug!( + our_id=%self.context.our_id(), + %gossip_target, + wanted = count, + connected, + not_excluded, + selected = peer_ids.len(), + "could not select enough random nodes for gossiping" + ); + } } for &peer_id in &peer_ids { - self.send_message(*peer_id, payload.clone()); + self.send_message(peer_id, msg.clone(), None); } - peer_ids.into_iter().copied().collect() + peer_ids.into_iter().collect() } - /// Returns the node id of this network node. - #[cfg(test)] - pub(crate) fn node_id(&self) -> NodeId { - self.our_id - } + /// Queues a message to be sent to a specific node. + fn send_message( + &self, + dest: NodeId, + msg: Arc>, + opt_responder: Option>, + ) { + // Try to send the message. + if let Some(connection) = self.outgoing_manager.get_route(dest) { + if msg.payload_is_unsafe_for_syncing_nodes() && self.syncing_nodes.contains(&dest) { + // We should never attempt to send an unsafe message to a peer that we know is still + // syncing. Since "unsafe" does usually not mean immediately catastrophic, we + // attempt to carry on, but warn loudly. + error!(kind=%msg.classify(), node_id=%dest, "sending unsafe message to syncing node"); + } - /// Returns the set of known addresses. - #[cfg(test)] - pub(crate) fn seen_peers(&self) -> &HashSet { - &self.seen_peers + if let Err(msg) = connection.sender.send((msg, opt_responder)) { + // We lost the connection, but that fact has not reached us yet. + warn!(our_id=%self.context.our_id(), %dest, ?msg, "dropped outgoing message, lost connection"); + } else { + self.net_metrics.queued_messages.inc(); + } + } else { + // We are not connected, so the reconnection is likely already in progress. + debug!(our_id=%self.context.our_id(), %dest, ?msg, "dropped outgoing message, no connection"); + } } -} - -fn our_id(swarm: &Swarm) -> NodeId { - NodeId::P2p(*Swarm::local_peer_id(swarm)) -} - -// TODO: Already refactored in branch. -#[allow(clippy::too_many_arguments)] -async fn server_task, P: PayloadT>( - event_queue: EventQueueHandle, - // Receives outgoing one-way messages to be sent out via libp2p. - mut one_way_outgoing_message_receiver: CountingReceiver, - // Receives new gossip messages to be sent out via libp2p. - mut gossip_message_receiver: CountingReceiver, - // Receives notification to shut down the server loop. - mut shutdown_receiver: watch::Receiver<()>, - mut swarm: Swarm, - known_addresses_mut: Arc>>, - is_bootsrap_node: bool, - queued_messages: IntGauge, -) { - //let our_id = our - async move { - loop { - // Note that `select!` will cancel all futures on branches not eventually selected by - // dropping them. Each future inside this macro must be cancellation-safe. - select! { - // `swarm.next_event()` is cancellation-safe - see - // https://github.com/libp2p/rust-libp2p/issues/1876 - swarm_event = swarm.next_event() => { - trace!("{}: {:?}", our_id(&swarm), swarm_event); - handle_swarm_event(&mut swarm, event_queue, swarm_event, &known_addresses_mut, is_bootsrap_node).await; - } - - // `UnboundedReceiver::recv()` is cancellation safe - see - // https://tokio.rs/tokio/tutorial/select#cancellation - maybe_outgoing_message = one_way_outgoing_message_receiver.recv() => { - match maybe_outgoing_message { - Some(outgoing_message) => { - queued_messages.dec(); - // We've received a one-way request to send to a peer. - swarm.send_one_way_message(outgoing_message); - } - None => { - // The data sender has been dropped - exit the loop. - info!("{}: exiting network server task", our_id(&swarm)); - break; - } - } - } - - // `UnboundedReceiver::recv()` is cancellation safe - see - // https://tokio.rs/tokio/tutorial/select#cancellation - maybe_gossip_message = gossip_message_receiver.recv() => { - match maybe_gossip_message { - Some(gossip_message) => { - // We've received a new message to be gossiped. - swarm.gossip(gossip_message); - } - None => { - // The data sender has been dropped - exit the loop. - info!("{}: exiting network server task", our_id(&swarm)); - break; + fn handle_incoming_connection( + &mut self, + incoming: Box>, + span: Span, + ) -> Effects> { + span.clone().in_scope(|| match *incoming { + IncomingConnection::FailedEarly { + peer_addr: _, + ref error, + } => { + // Failed without much info, there is little we can do about this. + debug!(err=%display_error(error), "incoming connection failed early"); + Effects::new() + } + IncomingConnection::Failed { + peer_addr: _, + peer_id: _, + ref error, + } => { + debug!( + err = display_error(error), + "incoming connection failed after TLS setup" + ); + Effects::new() + } + IncomingConnection::Loopback => { + // Loopback connections are closed immediately, but will be marked as such by the + // outgoing manager. We still record that it succeeded in the log, but this should + // be the only time per component instantiation that this happens. + info!("successful incoming loopback connection, will be dropped"); + Effects::new() + } + IncomingConnection::Established { + peer_addr, + public_addr, + peer_id, + peer_consensus_public_key, + stream, + } => { + if self.cfg.max_incoming_peer_connections != 0 { + if let Some(symmetries) = self.connection_symmetries.get(&peer_id) { + let incoming_count = symmetries + .incoming_addrs() + .map(BTreeSet::len) + .unwrap_or_default(); + + if incoming_count >= self.cfg.max_incoming_peer_connections as usize { + info!(%public_addr, + %peer_id, + count=incoming_count, + limit=self.cfg.max_incoming_peer_connections, + "rejecting new incoming connection, limit for peer exceeded" + ); + return Effects::new(); } } } - maybe_shutdown = shutdown_receiver.changed() => { - // Since a `watch` channel is always constructed with an initial value enqueued, - // ignore this (and any others) from the `shutdown_receiver`. + info!(%public_addr, "new incoming connection established"); + + // Learn the address the peer gave us. + let dial_requests = + self.outgoing_manager + .learn_addr(public_addr, false, Instant::now()); + let mut effects = self.process_dial_requests(dial_requests); + + // Update connection symmetries. + if self + .connection_symmetries + .entry(peer_id) + .or_default() + .add_incoming(peer_addr, Instant::now()) + { + self.connection_completed(peer_id); + + // We should NOT update the syncing set when we receive an incoming connection, + // because the `message_sender` which is handling the corresponding outgoing + // connection will not receive the update of the syncing state of the remote + // peer. // - // When the receiver yields an `Err`, the sender has been dropped, indicating we - // should exit this loop. - if maybe_shutdown.is_err() { - info!("{}: shutting down libp2p", our_id(&swarm)); - break; - } + // Such desync may cause the node to try to send "unsafe" requests to the + // syncing node, because the outgoing connection may outlive the + // incoming one, i.e. it may take some time to drop "our" outgoing + // connection after a peer has closed the corresponding incoming connection. } + + // Now we can start the message reader. + let boxed_span = Box::new(span.clone()); + effects.extend( + tasks::message_reader( + self.context.clone(), + stream, + self.incoming_limiter + .create_handle(peer_id, peer_consensus_public_key), + self.channel_management().close_incoming_receiver.clone(), + peer_id, + span.clone(), + ) + .instrument(span) + .event(move |result| Event::IncomingClosed { + result, + peer_id: Box::new(peer_id), + peer_addr, + span: boxed_span, + }), + ); + + effects } - } + }) } - .await; -} -async fn handle_swarm_event, P: PayloadT, E: Display>( - swarm: &mut Swarm, - event_queue: EventQueueHandle, - swarm_event: SwarmEvent, - known_addresses_mut: &Arc>>, - is_bootstrap_node: bool, -) { - let event = match swarm_event { - SwarmEvent::ConnectionEstablished { - peer_id, - endpoint, - num_established, - } => { - // If we dialed the peer, add their listening address to our kademlia instance. - if endpoint.is_dialer() { - swarm.add_discovered_peer(&peer_id, vec![endpoint.get_remote_address().clone()]); + fn handle_incoming_closed( + &mut self, + result: io::Result<()>, + peer_id: NodeId, + peer_addr: SocketAddr, + span: Span, + ) -> Effects> { + span.in_scope(|| { + // Log the outcome. + match result { + Ok(()) => info!("regular connection closing"), + Err(ref err) => warn!(err = display_error(err), "connection dropped"), } - Event::ConnectionEstablished { - peer_id: Box::new(NodeId::from(peer_id)), - endpoint, - num_established, + + // Update the connection symmetries. + if let Entry::Occupied(mut entry) = self.connection_symmetries.entry(peer_id) { + if entry.get_mut().remove_incoming(peer_addr, Instant::now()) { + entry.remove(); + } } - } - SwarmEvent::ConnectionClosed { - peer_id, - endpoint, - num_established, - cause, - } => { - // If we lost the final connection to this peer, do a random kademlia lookup to - // discover any new/replacement peers. - if num_established == 0 { - swarm.discover_peers() + + Effects::new() + }) + } + + /// Determines whether an outgoing peer should be blocked based on the connection error. + fn is_blockable_offense_for_outgoing( + error: &ConnectionError, + ) -> Option { + match error { + // Potentially transient failures. + // + // Note that incompatible versions need to be considered transient, since they occur + // during regular upgrades. + ConnectionError::TlsInitialization(_) + | ConnectionError::TcpConnection(_) + | ConnectionError::TcpNoDelay(_) + | ConnectionError::TlsHandshake(_) + | ConnectionError::HandshakeSend(_) + | ConnectionError::HandshakeRecv(_) + | ConnectionError::HandshakeNotAllowed + | ConnectionError::IncompatibleVersion(_) => None, + + // These errors are potential bugs on our side. + ConnectionError::HandshakeSenderCrashed(_) + | ConnectionError::FailedToReuniteHandshakeSinkAndStream + | ConnectionError::CouldNotEncodeOurHandshake(_) => None, + + // These could be candidates for blocking, but for now we decided not to. + ConnectionError::NoPeerCertificate + | ConnectionError::PeerCertificateInvalid(_) + | ConnectionError::DidNotSendHandshake + | ConnectionError::InvalidRemoteHandshakeMessage(_) + | ConnectionError::InvalidConsensusCertificate(_) => None, + + // Definitely something we want to avoid. + ConnectionError::WrongNetwork(peer_network_name) => { + Some(BlocklistJustification::WrongNetwork { + peer_network_name: peer_network_name.clone(), + }) } - Event::ConnectionClosed { - peer_id: Box::new(NodeId::from(peer_id)), - endpoint, - num_established, - cause: cause.map(|error| error.to_string()), + ConnectionError::WrongChainspecHash(peer_chainspec_hash) => { + Some(BlocklistJustification::WrongChainspecHash { + peer_chainspec_hash: *peer_chainspec_hash, + }) + } + ConnectionError::MissingChainspecHash => { + Some(BlocklistJustification::MissingChainspecHash) } } - SwarmEvent::UnreachableAddr { - peer_id, - address, - error, - attempts_remaining, - } => Event::UnreachableAddress { - peer_id: Box::new(NodeId::from(peer_id)), - address, - error, - attempts_remaining, - }, - SwarmEvent::UnknownPeerUnreachableAddr { address, error } => { - debug!(%address, %error, "{}: failed to connect", our_id(&swarm)); - let we_are_isolated = match known_addresses_mut.lock() { - Err(err) => { - panic!("Could not acquire `known_addresses_mut` mutex: {:?}", err) + } + + /// Sets up an established outgoing connection. + /// + /// Initiates sending of the handshake as soon as the connection is established. + #[allow(clippy::redundant_clone)] + fn handle_outgoing_connection( + &mut self, + outgoing: OutgoingConnection

, + span: Span, + rng: &mut NodeRng, + ) -> Effects> { + let now = Instant::now(); + span.clone().in_scope(|| match outgoing { + OutgoingConnection::FailedEarly { peer_addr, error } + | OutgoingConnection::Failed { + peer_addr, + peer_id: _, + error, + } => { + debug!(err=%display_error(&error), "outgoing connection failed"); + // We perform blocking first, to not trigger a reconnection before blocking. + let mut requests = Vec::new(); + + if let Some(justification) = Self::is_blockable_offense_for_outgoing(&error) { + requests.extend(self.outgoing_manager.block_addr( + peer_addr, + now, + justification, + rng, + )); } - Ok(mut known_addresses) => { - if let Some(state) = known_addresses.get_mut(&address) { - if *state == ConnectionState::Pending { - *state = ConnectionState::Failed - } - } - network_is_isolated(&*known_addresses) + + // Now we can proceed with the regular updates. + requests.extend( + self.outgoing_manager + .handle_dial_outcome(DialOutcome::Failed { + addr: peer_addr, + error, + when: now, + }), + ); + + self.process_dial_requests(requests) + } + OutgoingConnection::Loopback { peer_addr } => { + // Loopback connections are marked, but closed. + info!("successful outgoing loopback connection, will be dropped"); + let request = self + .outgoing_manager + .handle_dial_outcome(DialOutcome::Loopback { addr: peer_addr }); + self.process_dial_requests(request) + } + OutgoingConnection::Established { + peer_addr, + peer_id, + peer_consensus_public_key, + sink, + is_syncing, + } => { + info!("new outgoing connection established"); + + let (sender, receiver) = mpsc::unbounded_channel(); + let handle = OutgoingHandle { sender, peer_addr }; + + let request = self + .outgoing_manager + .handle_dial_outcome(DialOutcome::Successful { + addr: peer_addr, + handle, + node_id: peer_id, + when: now, + }); + + let mut effects = self.process_dial_requests(request); + + // Update connection symmetries. + if self + .connection_symmetries + .entry(peer_id) + .or_default() + .mark_outgoing(now) + { + self.connection_completed(peer_id); + self.update_syncing_nodes_set(peer_id, is_syncing); } - }; + effects.extend( + tasks::message_sender( + receiver, + sink, + self.outgoing_limiter + .create_handle(peer_id, peer_consensus_public_key), + self.net_metrics.queued_messages.clone(), + ) + .instrument(span) + .event(move |_| Event::OutgoingDropped { + peer_id: Box::new(peer_id), + peer_addr, + }), + ); - if we_are_isolated { - if is_bootstrap_node { - info!( - "{}: failed to bootstrap to any other nodes, but continuing to run as we \ - are a bootstrap node", - our_id(&swarm) - ); - } else { - // (Re)schedule connection attempts to known peers. + effects + } + }) + } - // Before reconnecting wait RECONNECT_DELAY - time::sleep(RECONNECT_DELAY).await; + fn handle_network_request( + &self, + request: NetworkRequest

, + rng: &mut NodeRng, + ) -> Effects> { + match request { + NetworkRequest::SendMessage { + dest, + payload, + respond_after_queueing, + auto_closing_responder, + } => { + // We're given a message to send. Pass on the responder so that confirmation + // can later be given once the message has actually been buffered. + self.net_metrics.direct_message_requests.inc(); - // Now that we've slept and re-awoken, grab the mutex again - match known_addresses_mut.lock() { - Err(err) => { - panic!("Could not acquire `known_addresses_mut` mutex: {:?}", err) - } - Ok(known_addresses) => { - for address in known_addresses.keys() { - let our_id = our_id(&swarm); - debug!(%our_id, %address, "dialing known address"); - Swarm::dial_addr(swarm, address.clone()).unwrap_or_else(|err| { - error!(%our_id, %address, - "Swarm error when rescheduling connection: {:?}", - err) - }); - } - } - }; + if respond_after_queueing { + self.send_message(*dest, Arc::new(Message::Payload(*payload)), None); + auto_closing_responder.respond(()).ignore() + } else { + self.send_message( + *dest, + Arc::new(Message::Payload(*payload)), + Some(auto_closing_responder), + ); + Effects::new() } } - return; - } - SwarmEvent::NewListenAddr(address) => Event::NewListenAddress(address), - SwarmEvent::ExpiredListenAddr(address) => Event::ExpiredListenAddress(address), - SwarmEvent::ListenerClosed { addresses, reason } => { - Event::ListenerClosed { addresses, reason } - } - SwarmEvent::ListenerError { error } => Event::ListenerError { error }, - SwarmEvent::Behaviour(SwarmBehaviorEvent::OneWayMessaging(event)) => { - return handle_one_way_messaging_event(swarm, event_queue, event).await; - } - SwarmEvent::Behaviour(SwarmBehaviorEvent::Gossiper(event)) => { - return handle_gossip_event(swarm, event_queue, event).await; - } - SwarmEvent::Behaviour(SwarmBehaviorEvent::Kademlia(KademliaEvent::RoutingUpdated { - peer, - old_peer, - .. - })) => Event::RoutingTableUpdated { peer, old_peer }, - SwarmEvent::Behaviour(SwarmBehaviorEvent::Kademlia(event)) => { - debug!(?event, "{}: new kademlia event", our_id(swarm)); - return; + NetworkRequest::ValidatorBroadcast { + payload, + era_id, + auto_closing_responder, + } => { + // We're given a message to broadcast. + self.broadcast_message_to_validators(Arc::new(Message::Payload(*payload)), era_id); + auto_closing_responder.respond(()).ignore() + } + NetworkRequest::Gossip { + payload, + gossip_target, + count, + exclude, + auto_closing_responder, + } => { + // We're given a message to gossip. + let sent_to = self.gossip_message( + rng, + Arc::new(Message::Payload(*payload)), + gossip_target, + count, + &exclude, + ); + auto_closing_responder.respond(sent_to).ignore() + } } - SwarmEvent::Behaviour(SwarmBehaviorEvent::Identify(event)) => { - return handle_identify_event(swarm, event); + } + + fn handle_outgoing_dropped( + &mut self, + peer_id: NodeId, + peer_addr: SocketAddr, + ) -> Effects> { + let requests = self + .outgoing_manager + .handle_connection_drop(peer_addr, Instant::now()); + + if let Entry::Occupied(mut entry) = self.connection_symmetries.entry(peer_id) { + if entry.get_mut().unmark_outgoing(Instant::now()) { + entry.remove(); + } } - SwarmEvent::IncomingConnection { .. } - | SwarmEvent::IncomingConnectionError { .. } - | SwarmEvent::BannedPeer { .. } - | SwarmEvent::Dialing(_) => return, - }; - event_queue.schedule(event, QueueKind::Network).await; -} -/// Takes the known_addresses of a node and returns if it is isolated. -/// -/// An isolated node has no chance of recovering a connection to the network and is not -/// connected to any peer. -fn network_is_isolated(known_addresses: &HashMap) -> bool { - known_addresses - .values() - .all(|state| *state == ConnectionState::Failed) -} + self.outgoing_limiter.remove_connected_validator(&peer_id); -async fn handle_one_way_messaging_event, P: PayloadT>( - swarm: &mut Swarm, - event_queue: EventQueueHandle, - event: RequestResponseEvent, ()>, -) { - match event { - RequestResponseEvent::Message { - peer, - message: RequestResponseMessage::Request { request, .. }, - } => { - // We've received a one-way request from a peer: announce it via the reactor on the - // `NetworkIncoming` queue. - let sender = NodeId::from(peer); - match bincode::deserialize::

(&request) { - Ok(payload) => { - debug!(%sender, %payload, "{}: incoming one-way message received", our_id(swarm)); - event_queue - .schedule( - NetworkAnnouncement::MessageReceived { sender, payload }, - QueueKind::NetworkIncoming, - ) - .await; - } - Err(error) => { - warn!( - %sender, - %error, - "{}: failed to deserialize incoming one-way message", - our_id(swarm) - ); + self.process_dial_requests(requests) + } + + /// Processes a set of `DialRequest`s, updating the component and emitting needed effects. + fn process_dial_requests(&mut self, requests: T) -> Effects> + where + T: IntoIterator>>, + { + let mut effects = Effects::new(); + + for request in requests { + trace!(%request, "processing dial request"); + match request { + DialRequest::Dial { addr, span } => effects.extend( + tasks::connect_outgoing(self.context.clone(), addr) + .instrument(span.clone()) + .event(|outgoing| Event::OutgoingConnection { + outgoing: Box::new(outgoing), + span, + }), + ), + DialRequest::Disconnect { handle: _, span } => { + // Dropping the `handle` is enough to signal the connection to shutdown. + span.in_scope(|| { + debug!("dropping connection, as requested"); + }); } + DialRequest::SendPing { + peer_id, + nonce, + span, + } => span.in_scope(|| { + trace!("enqueuing ping to be sent"); + self.send_message(peer_id, Arc::new(Message::Ping { nonce }), None); + }), } } - RequestResponseEvent::Message { - message: RequestResponseMessage::Response { .. }, - .. - } => { - // Note that a response will still be emitted immediately after the request has been - // sent, since `RequestResponseCodec::read_response` for the one-way Codec does not - // actually read anything from the given I/O stream. - } - RequestResponseEvent::OutboundFailure { - peer, - request_id, - error, - } => { - warn!( - ?peer, - ?request_id, - ?error, - "{}: outbound failure", - our_id(swarm) - ) - } - RequestResponseEvent::InboundFailure { - peer, - request_id, - error, - } => { - warn!( - ?peer, - ?request_id, - ?error, - "{}: inbound failure", - our_id(swarm) - ) - } - RequestResponseEvent::ResponseSent { peer, request_id } => { - warn!( - ?peer, - ?request_id, - "{}: response should not have been sent for a one-way message", - our_id(swarm) - ) - } + + effects } -} -async fn handle_gossip_event, P: PayloadT>( - swarm: &mut Swarm, - event_queue: EventQueueHandle, - event: GossipsubEvent, -) { - match event { - GossipsubEvent::Message { - propagation_source, - message, - .. - } => { - // We've received a gossiped message: announce it via the reactor on the - // `NetworkIncoming` queue. - let sender = match message.source { - Some(source) => NodeId::from(source), - None => { - warn!(sender=%propagation_source, ?message, "{}: libp2p gossiped message without source", our_id(swarm)); - return; - } - }; - match bincode::deserialize::

(&message.data) { - Ok(payload) => { - debug!(%sender, %payload, "{}: libp2p gossiped message received", our_id(swarm)); - event_queue - .schedule( - NetworkAnnouncement::MessageReceived { sender, payload }, - QueueKind::NetworkIncoming, - ) - .await; - } - Err(error) => { - warn!( - %sender, - %error, - "{}: failed to deserialize gossiped message", - our_id(swarm) + /// Handles a received message. + fn handle_incoming_message( + &mut self, + effect_builder: EffectBuilder, + peer_id: NodeId, + msg: Message

, + span: Span, + ) -> Effects> + where + REv: FromIncoming

+ From, + { + span.in_scope(|| match msg { + Message::Handshake { .. } => { + // We should never receive a handshake message on an established connection. Simply + // discard it. This may be too lenient, so we may consider simply dropping the + // connection in the future instead. + warn!("received unexpected handshake"); + Effects::new() + } + Message::Ping { nonce } => { + // Send a pong. Incoming pings and pongs are rate limited. + + self.send_message(peer_id, Arc::new(Message::Pong { nonce }), None); + Effects::new() + } + Message::Pong { nonce } => { + // Record the time the pong arrived and forward it to outgoing. + let pong = TaggedTimestamp::from_parts(Instant::now(), nonce); + if self.outgoing_manager.record_pong(peer_id, pong) { + // Note: We no longer block peers here with a `PongLimitExceeded` for failed + // pongs, merely warn. + info!( + "peer {} exceeded failed pong limit, or allowed number of pongs", + peer_id // Redundant information due to span, but better safe than sorry. ); } + + Effects::new() } - } - GossipsubEvent::Subscribed { peer_id, .. } => { - debug!(%peer_id, "{}: new gossip subscriber", our_id(swarm)); - } - GossipsubEvent::Unsubscribed { peer_id, .. } => { - debug!(%peer_id, "{}: peer unsubscribed from gossip", our_id(swarm)); - } + Message::Payload(payload) => { + effect_builder.announce_incoming(peer_id, payload).ignore() + } + }) } -} -fn handle_identify_event(swarm: &mut Swarm, event: IdentifyEvent) { - match event { - IdentifyEvent::Received { - peer_id, - info, - observed_addr, - } => { - debug!( - %peer_id, - %info.protocol_version, - %info.agent_version, - ?info.listen_addrs, - ?info.protocols, - %observed_addr, - "{}: identifying info received", - our_id(swarm) - ); - // We've received identifying information from a peer, so add its listening addresses to - // our kademlia instance. - swarm.add_discovered_peer(&peer_id, info.listen_addrs); + /// Emits an announcement that a connection has been completed. + fn connection_completed(&self, peer_id: NodeId) { + trace!(num_peers = self.peers().len(), new_peer=%peer_id, "connection complete"); + self.net_metrics.peers.set(self.peers().len() as i64); + } + + /// Updates a set of known joining nodes. + /// If we've just connected to a non-joining node that peer will be removed from the set. + fn update_syncing_nodes_set(&mut self, peer_id: NodeId, is_syncing: bool) { + // Update set of syncing peers. + if is_syncing { + debug!(%peer_id, "is syncing"); + self.syncing_nodes.insert(peer_id); + } else { + debug!(%peer_id, "is no longer syncing"); + self.syncing_nodes.remove(&peer_id); } - IdentifyEvent::Sent { peer_id } => { - debug!( - "{}: sent our identifying info to {}", - our_id(swarm), - peer_id - ); + } + + /// Returns the set of connected nodes. + pub(crate) fn peers(&self) -> BTreeMap { + let mut ret = BTreeMap::new(); + for node_id in self.outgoing_manager.connected_peers() { + if let Some(connection) = self.outgoing_manager.get_route(node_id) { + ret.insert(node_id, connection.peer_addr.to_string()); + } else { + // This should never happen unless the state of `OutgoingManager` is corrupt. + warn!(%node_id, "route disappeared unexpectedly") + } } - IdentifyEvent::Error { peer_id, error } => { - warn!(%peer_id, %error, "{}: error while attempting to identify peer", our_id(swarm)); + + for (node_id, sym) in &self.connection_symmetries { + if let Some(addrs) = sym.incoming_addrs() { + for addr in addrs { + ret.entry(*node_id).or_insert_with(|| addr.to_string()); + } + } } + + ret } -} -/// Converts a string of the form "127.0.0.1:34553" into a Multiaddr equivalent to -/// "/ip4/127.0.0.1/tcp/34553". -fn address_str_to_multiaddr(address: &str) -> Multiaddr { - let mut parts_itr = address.split(':'); - let multiaddr_str = if address - .chars() - .next() - .expect("cannot convert empty address") - .is_numeric() - { - format!( - "/ip4/{}/tcp/{}", - parts_itr.next().expect("address should contain IP segment"), - parts_itr - .next() - .expect("address should contain port segment") - ) - } else { - format!( - "/dns/{}/tcp/{}", - parts_itr - .next() - .expect("address should contain DNS name segment"), - parts_itr - .next() - .expect("address should contain port segment") - ) - }; - // OK to `expect` for now as this method will become redundant once small_network is removed. - multiaddr_str - .parse() - .expect("address should parse as a multiaddr") + pub(crate) fn fully_connected_peers_random( + &self, + rng: &mut NodeRng, + count: usize, + ) -> Vec { + self.connection_symmetries + .iter() + .filter(|(_, sym)| matches!(sym, ConnectionSymmetry::Symmetric { .. })) + .map(|(node_id, _)| *node_id) + .choose_multiple(rng, count) + } + + pub(crate) fn has_sufficient_fully_connected_peers(&self) -> bool { + self.connection_symmetries + .iter() + .filter(|(_node_id, sym)| matches!(sym, ConnectionSymmetry::Symmetric { .. })) + .count() + >= self.cfg.min_peers_for_initialization as usize + } + + #[cfg(test)] + /// Returns the node id of this network node. + pub(crate) fn node_id(&self) -> NodeId { + self.context.our_id() + } } -impl Finalize for Network { +#[cfg(test)] +const MAX_METRICS_DROP_ATTEMPTS: usize = 25; + +#[cfg(test)] +const DROP_RETRY_DELAY: Duration = Duration::from_millis(100); + +#[cfg(test)] +impl crate::reactor::Finalize for Network +where + REv: Send + 'static, + P: Payload, +{ fn finalize(mut self) -> BoxFuture<'static, ()> { async move { - // Close the shutdown socket, causing the server to exit. - drop(self.shutdown_sender.take()); - - // Wait for the server to exit cleanly. - if let Some(join_handle) = self.server_join_handle.take() { - match join_handle.await { - Ok(_) => debug!("{}: server exited cleanly", self.our_id), - Err(err) => error!(%err, "{}: could not join server task cleanly", self.our_id), + if let Some(mut channel_management) = self.channel_management.take() { + // Close the shutdown socket, causing the server to exit. + drop(channel_management.shutdown_sender.take()); + drop(channel_management.close_incoming_sender.take()); + + // Wait for the server to exit cleanly. + if let Some(join_handle) = channel_management.server_join_handle.take() { + match join_handle.await { + Ok(_) => debug!(our_id=%self.context.our_id(), "server exited cleanly"), + Err(ref err) => { + error!( + our_id=%self.context.our_id(), + err=display_error(err), + "could not join server task cleanly" + ); + } + } } - } else if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - warn!("{}: server shutdown while already shut down", self.our_id) } + + // Ensure there are no ongoing metrics updates. + utils::wait_for_arc_drop( + self.net_metrics, + MAX_METRICS_DROP_ATTEMPTS, + DROP_RETRY_DELAY, + ) + .await; } .boxed() } } -impl Debug for Network { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("Network") - .field("our_id", &self.our_id) - .field("peers", &self.peers) - .field("listening_addresses", &self.listening_addresses) - .field("known_addresses", &self.known_addresses_mut) - .finish() +fn choose_gossip_peers( + rng: &mut NodeRng, + gossip_target: GossipTarget, + count: usize, + exclude: &HashSet, + connected_peers: impl Iterator, + is_validator_in_era: F, +) -> HashSet +where + F: Fn(EraId, &NodeId) -> bool, +{ + let filtered_peers = connected_peers.filter(|peer_id| !exclude.contains(peer_id)); + match gossip_target { + GossipTarget::Mixed(era_id) => { + let (validators, non_validators): (Vec<_>, Vec<_>) = + filtered_peers.partition(|node_id| is_validator_in_era(era_id, node_id)); + + let (first, second) = if rng.gen() { + (validators, non_validators) + } else { + (non_validators, validators) + }; + + first + .choose_multiple(rng, count) + .interleave(second.iter().choose_multiple(rng, count)) + .take(count) + .copied() + .collect() + } + GossipTarget::All => filtered_peers + .choose_multiple(rng, count) + .into_iter() + .collect(), } } -impl, P: PayloadT> Component for Network { +impl Component for Network +where + REv: ReactorEvent + + From> + + From> + + FromIncoming

+ + From + + From> + + From, + P: Payload, +{ type Event = Event

; - type ConstructionError = Error; + + fn name(&self) -> &str { + COMPONENT_NAME + } fn handle_event( &mut self, @@ -881,187 +1136,543 @@ impl, P: PayloadT> Component for Network { rng: &mut NodeRng, event: Self::Event, ) -> Effects { - trace!("{}: {:?}", self.our_id, event); - match event { - Event::ConnectionEstablished { - peer_id, - endpoint, - num_established, - } => self.handle_connection_established( - effect_builder, - *peer_id, - endpoint, - num_established, - ), - Event::ConnectionClosed { - peer_id, - endpoint, - num_established, - cause, - } => { - if num_established == 0 { - let _ = self.peers.remove(&peer_id); - } - debug!(%peer_id, ?endpoint, %num_established, ?cause, "{}: connection closed", self.our_id); - - // Note: We count multiple connections to the same peer as a single connection. - self.net_metrics.peers.set(self.peers.len() as i64); - - Effects::new() - } - Event::UnreachableAddress { - peer_id, - address, - error, - attempts_remaining, - } => { - debug!(%peer_id, %address, %error, %attempts_remaining, "{}: failed to connect", self.our_id); - Effects::new() - } - Event::NewListenAddress(address) => { - self.listening_addresses.push(address); - info!( - "{}: listening on {}", - self.our_id, - DisplayIter::new(self.listening_addresses.iter()) + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" ); Effects::new() } - Event::ExpiredListenAddress(address) => { - self.listening_addresses.retain(|addr| *addr != address); - if self.listening_addresses.is_empty() { - return fatal!(effect_builder, "no remaining listening addresses").ignore(); - } - debug!(%address, "{}: listening address expired", self.our_id); + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); Effects::new() } - Event::ListenerClosed { reason, .. } => { - // If the listener closed without an error, we're already shutting down the server. - // Otherwise, we need to kill the node as it cannot function without a listener. - match reason { - Err(error) => fatal!(effect_builder, "listener closed: {}", error).ignore(), - Ok(()) => { - debug!("{}: listener closed", self.our_id); + ComponentState::Initializing => match event { + Event::Initialize => match self.initialize(effect_builder) { + Ok(effects) => effects, + Err(error) => { + error!(%error, "failed to initialize network component"); + >::set_state( + self, + ComponentState::Fatal(error.to_string()), + ); Effects::new() } + }, + Event::IncomingConnection { .. } + | Event::IncomingMessage { .. } + | Event::IncomingClosed { .. } + | Event::OutgoingConnection { .. } + | Event::OutgoingDropped { .. } + | Event::NetworkRequest { .. } + | Event::NetworkInfoRequest { .. } + | Event::GossipOurAddress + | Event::PeerAddressReceived(_) + | Event::SweepOutgoing + | Event::BlocklistAnnouncement(_) => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is pending initialization" + ); + Effects::new() } - } - Event::ListenerError { error } => { - debug!(%error, "{}: non-fatal listener error", self.our_id); - Effects::new() - } - Event::RoutingTableUpdated { peer, old_peer } => { - if let Some(ref old_peer_id) = old_peer { - self.seen_peers.remove(old_peer_id); + }, + ComponentState::Initialized => match event { + Event::Initialize => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() } - self.seen_peers.insert(peer); - - debug!( - inserted = ?peer, - removed = ?old_peer, - new_size = self.seen_peers.len(), - "kademlia routing table updated" - ); - - Effects::new() - } - - Event::NetworkRequest { - request: - NetworkRequest::SendMessage { - dest, - payload, - responder, - }, - } => { - self.send_message(*dest, *payload); - responder.respond(()).ignore() - } - Event::NetworkRequest { - request: NetworkRequest::Broadcast { payload, responder }, - } => { - self.net_metrics.broadcast_requests.inc(); - self.gossip_message(*payload); - responder.respond(()).ignore() - } - Event::NetworkRequest { request } => match request { - NetworkRequest::SendMessage { - dest, - payload, - responder, - } => { - self.net_metrics.direct_message_requests.inc(); - self.send_message(*dest, *payload); - responder.respond(()).ignore() + Event::IncomingConnection { incoming, span } => { + self.handle_incoming_connection(incoming, span) } - NetworkRequest::Broadcast { payload, responder } => { - self.gossip_message(*payload); - responder.respond(()).ignore() + Event::IncomingMessage { peer_id, msg, span } => { + self.handle_incoming_message(effect_builder, *peer_id, *msg, span) } - NetworkRequest::Gossip { - payload, - count, - exclude, - responder, - } => { - let sent_to = self.send_message_to_n_peers(rng, *payload, count, exclude); - responder.respond(sent_to).ignore() + Event::IncomingClosed { + result, + peer_id, + peer_addr, + span, + } => self.handle_incoming_closed(result, *peer_id, peer_addr, *span), + Event::OutgoingConnection { outgoing, span } => { + self.handle_outgoing_connection(*outgoing, span, rng) } - }, - Event::NetworkInfoRequest { info_request } => match info_request { - NetworkInfoRequest::GetPeers { responder } => { - let peers = self - .peers - .iter() - .map(|(node_id, endpoint)| { - (*node_id, endpoint.get_remote_address().to_string()) - }) - .collect(); - responder.respond(peers).ignore() + Event::OutgoingDropped { peer_id, peer_addr } => { + self.handle_outgoing_dropped(*peer_id, peer_addr) + } + Event::NetworkRequest { req: request } => { + self.handle_network_request(*request, rng) } + Event::NetworkInfoRequest { req } => match *req { + NetworkInfoRequest::Peers { responder } => { + responder.respond(self.peers()).ignore() + } + NetworkInfoRequest::FullyConnectedPeers { count, responder } => responder + .respond(self.fully_connected_peers_random(rng, count)) + .ignore(), + NetworkInfoRequest::Insight { responder } => responder + .respond(NetworkInsights::collect_from_component(self)) + .ignore(), + }, + Event::GossipOurAddress => { + let our_address = GossipedAddress::new( + self.context + .public_addr() + .expect("component not initialized properly"), + ); + + let mut effects = effect_builder + .begin_gossip(our_address, Source::Ourself, our_address.gossip_target()) + .ignore(); + effects.extend( + effect_builder + .set_timeout(self.cfg.gossip_interval.into()) + .event(|_| Event::GossipOurAddress), + ); + effects + } + Event::PeerAddressReceived(gossiped_address) => { + let requests = self.outgoing_manager.learn_addr( + gossiped_address.into(), + false, + Instant::now(), + ); + self.process_dial_requests(requests) + } + Event::SweepOutgoing => { + let now = Instant::now(); + let requests = self.outgoing_manager.perform_housekeeping(rng, now); + + let mut effects = self.process_dial_requests(requests); + + effects.extend( + effect_builder + .set_timeout(OUTGOING_MANAGER_SWEEP_INTERVAL) + .event(|_| Event::SweepOutgoing), + ); + + effects + } + Event::BlocklistAnnouncement(announcement) => match announcement { + PeerBehaviorAnnouncement::OffenseCommitted { + offender, + justification, + } => { + // Note: We do not have a proper by-node-ID blocklist, but rather only block + // the current outgoing address of a peer. + info!(%offender, %justification, "adding peer to blocklist after transgression"); + + if let Some(addr) = self.outgoing_manager.get_addr(*offender) { + let requests = self.outgoing_manager.block_addr( + addr, + Instant::now(), + *justification, + rng, + ); + self.process_dial_requests(requests) + } else { + // Peer got away with it, no longer an outgoing connection. + Effects::new() + } + } + }, }, } } } -/// An ephemeral [libp2p::identity::Keypair] which uniquely identifies this node -#[derive(Clone)] -pub struct NetworkIdentity { - keypair: Keypair, -} +impl InitializedComponent for Network +where + REv: ReactorEvent + + From> + + From> + + FromIncoming

+ + From + + From> + + From, + P: Payload, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); -impl Debug for NetworkIdentity { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!( - f, - "NetworkIdentity(public key: {:?})", - self.keypair.public() - ) + self.state = new_state; } } -impl NetworkIdentity { - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - let keypair = Keypair::generate_ed25519(); - NetworkIdentity { keypair } - } +/// Transport type alias for base encrypted connections. +type Transport = SslStream; + +/// A framed transport for `Message`s. +pub(crate) type FullTransport

= tokio_serde::Framed< + FramedTransport, + Message

, + Arc>, + CountingFormat, +>; + +pub(crate) type FramedTransport = tokio_util::codec::Framed; + +/// Constructs a new full transport on a stream. +/// +/// A full transport contains the framing as well as the encoding scheme used to send messages. +fn full_transport

( + metrics: Weak, + connection_id: ConnectionId, + framed: FramedTransport, + role: Role, +) -> FullTransport

+where + for<'de> P: Serialize + Deserialize<'de>, + for<'de> Message

: Serialize + Deserialize<'de>, +{ + tokio_serde::Framed::new( + framed, + CountingFormat::new(metrics, connection_id, role, BincodeFormat::default()), + ) } -impl From<&Network> for NetworkIdentity { - fn from(network: &Network) -> Self { - network.network_identity.clone() - } +/// Constructs a framed transport. +fn framed_transport(transport: Transport, maximum_net_message_size: u32) -> FramedTransport { + tokio_util::codec::Framed::new( + transport, + LengthDelimitedCodec::builder() + .max_frame_length(maximum_net_message_size as usize) + .new_codec(), + ) } -impl From<&NetworkIdentity> for PeerId { - fn from(network_identity: &NetworkIdentity) -> Self { - PeerId::from(network_identity.keypair.public()) +impl Debug for Network +where + P: Payload, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + // We output only the most important fields of the component, as it gets unwieldy quite fast + // otherwise. + f.debug_struct("Network") + .field("our_id", &self.context.our_id()) + .field("state", &self.state) + .field("public_addr", &self.context.public_addr()) + .finish() } } -impl From<&NetworkIdentity> for NodeId { - fn from(network_identity: &NetworkIdentity) -> Self { - NodeId::from(PeerId::from(network_identity)) +#[cfg(test)] +mod gossip_target_tests { + use std::{collections::BTreeSet, iter}; + + use static_assertions::const_assert; + + use casper_types::testing::TestRng; + + use super::*; + + const VALIDATOR_COUNT: usize = 10; + const NON_VALIDATOR_COUNT: usize = 20; + // The tests assume that we have fewer validators than non-validators. + const_assert!(VALIDATOR_COUNT < NON_VALIDATOR_COUNT); + + struct Fixture { + validators: BTreeSet, + non_validators: BTreeSet, + all_peers: Vec, + } + + impl Fixture { + fn new(rng: &mut TestRng) -> Self { + let validators: BTreeSet = iter::repeat_with(|| NodeId::random(rng)) + .take(VALIDATOR_COUNT) + .collect(); + let non_validators: BTreeSet = iter::repeat_with(|| NodeId::random(rng)) + .take(NON_VALIDATOR_COUNT) + .collect(); + + let mut all_peers: Vec = validators + .iter() + .copied() + .chain(non_validators.iter().copied()) + .collect(); + all_peers.shuffle(rng); + + Fixture { + validators, + non_validators, + all_peers, + } + } + + fn is_validator_in_era(&self) -> impl Fn(EraId, &NodeId) -> bool + '_ { + move |_era_id: EraId, node_id: &NodeId| self.validators.contains(node_id) + } + + fn num_validators<'a>(&self, input: impl Iterator) -> usize { + input + .filter(move |&node_id| self.validators.contains(node_id)) + .count() + } + + fn num_non_validators<'a>(&self, input: impl Iterator) -> usize { + input + .filter(move |&node_id| self.non_validators.contains(node_id)) + .count() + } + } + + #[test] + fn should_choose_mixed() { + const TARGET: GossipTarget = GossipTarget::Mixed(EraId::new(1)); + + let mut rng = TestRng::new(); + let fixture = Fixture::new(&mut rng); + + // Choose more than total count from all peers, exclude none, should return all peers. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT + NON_VALIDATOR_COUNT + 1, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), fixture.all_peers.len()); + + // Choose total count from all peers, exclude none, should return all peers. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT + NON_VALIDATOR_COUNT, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), fixture.all_peers.len()); + + // Choose 2 * VALIDATOR_COUNT from all peers, exclude none, should return all validators and + // VALIDATOR_COUNT non-validators. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + 2 * VALIDATOR_COUNT, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), 2 * VALIDATOR_COUNT); + assert_eq!(fixture.num_validators(chosen.iter()), VALIDATOR_COUNT); + assert_eq!(fixture.num_non_validators(chosen.iter()), VALIDATOR_COUNT); + + // Choose VALIDATOR_COUNT from all peers, exclude none, should return VALIDATOR_COUNT peers, + // half validators and half non-validators. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), VALIDATOR_COUNT); + assert_eq!(fixture.num_validators(chosen.iter()), VALIDATOR_COUNT / 2); + assert_eq!( + fixture.num_non_validators(chosen.iter()), + VALIDATOR_COUNT / 2 + ); + + // Choose two from all peers, exclude none, should return two peers, one validator and one + // non-validator. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + 2, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), 2); + assert_eq!(fixture.num_validators(chosen.iter()), 1); + assert_eq!(fixture.num_non_validators(chosen.iter()), 1); + + // Choose one from all peers, exclude none, should return one peer with 50-50 chance of + // being a validator. + let mut got_validator = false; + let mut got_non_validator = false; + let mut attempts = 0; + while !got_validator || !got_non_validator { + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + 1, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), 1); + let node_id = chosen.iter().next().unwrap(); + got_validator |= fixture.validators.contains(node_id); + got_non_validator |= fixture.non_validators.contains(node_id); + attempts += 1; + assert!(attempts < 1_000_000); + } + + // Choose VALIDATOR_COUNT from all peers, exclude all but one validator, should return the + // one validator and VALIDATOR_COUNT - 1 non-validators. + let exclude: HashSet<_> = fixture + .validators + .iter() + .copied() + .take(VALIDATOR_COUNT - 1) + .collect(); + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT, + &exclude, + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), VALIDATOR_COUNT); + assert_eq!(fixture.num_validators(chosen.iter()), 1); + assert_eq!( + fixture.num_non_validators(chosen.iter()), + VALIDATOR_COUNT - 1 + ); + assert!(exclude.is_disjoint(&chosen)); + + // Choose 3 from all peers, exclude all non-validators, should return 3 validators. + let exclude: HashSet<_> = fixture.non_validators.iter().copied().collect(); + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + 3, + &exclude, + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), 3); + assert_eq!(fixture.num_validators(chosen.iter()), 3); + assert!(exclude.is_disjoint(&chosen)); + } + + #[test] + fn should_choose_all() { + const TARGET: GossipTarget = GossipTarget::All; + + let mut rng = TestRng::new(); + let fixture = Fixture::new(&mut rng); + + // Choose more than total count from all peers, exclude none, should return all peers. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT + NON_VALIDATOR_COUNT + 1, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), fixture.all_peers.len()); + + // Choose total count from all peers, exclude none, should return all peers. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT + NON_VALIDATOR_COUNT, + &HashSet::new(), + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), fixture.all_peers.len()); + + // Choose VALIDATOR_COUNT from only validators, exclude none, should return all validators. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT, + &HashSet::new(), + fixture.validators.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), VALIDATOR_COUNT); + assert_eq!(fixture.num_validators(chosen.iter()), VALIDATOR_COUNT); + + // Choose VALIDATOR_COUNT from only non-validators, exclude none, should return + // VALIDATOR_COUNT non-validators. + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT, + &HashSet::new(), + fixture.non_validators.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), VALIDATOR_COUNT); + assert_eq!(fixture.num_non_validators(chosen.iter()), VALIDATOR_COUNT); + + // Choose VALIDATOR_COUNT from all peers, exclude all but VALIDATOR_COUNT from all peers, + // should return all the non-excluded peers. + let exclude: HashSet<_> = fixture + .all_peers + .iter() + .copied() + .take(NON_VALIDATOR_COUNT) + .collect(); + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + VALIDATOR_COUNT, + &exclude, + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), VALIDATOR_COUNT); + assert!(exclude.is_disjoint(&chosen)); + + // Choose one from all peers, exclude enough non-validators to have an even chance of + // returning a validator as a non-validator, should return one peer with 50-50 chance of + // being a validator. + let exclude: HashSet<_> = fixture + .non_validators + .iter() + .copied() + .take(NON_VALIDATOR_COUNT - VALIDATOR_COUNT) + .collect(); + let mut got_validator = false; + let mut got_non_validator = false; + let mut attempts = 0; + while !got_validator || !got_non_validator { + let chosen = choose_gossip_peers( + &mut rng, + TARGET, + 1, + &exclude, + fixture.all_peers.iter().copied(), + fixture.is_validator_in_era(), + ); + assert_eq!(chosen.len(), 1); + assert!(exclude.is_disjoint(&chosen)); + let node_id = chosen.iter().next().unwrap(); + got_validator |= fixture.validators.contains(node_id); + got_non_validator |= fixture.non_validators.contains(node_id); + attempts += 1; + assert!(attempts < 1_000_000); + } } } diff --git a/node/src/components/network/behavior.rs b/node/src/components/network/behavior.rs deleted file mode 100644 index 1975f349c2..0000000000 --- a/node/src/components/network/behavior.rs +++ /dev/null @@ -1,174 +0,0 @@ -use std::{ - collections::VecDeque, - task::{Context, Poll}, -}; - -use derive_more::From; -use libp2p::{ - core::PublicKey, - gossipsub::{Gossipsub, GossipsubEvent}, - identify::{Identify, IdentifyEvent}, - kad::{record::store::MemoryStore, Kademlia, KademliaEvent}, - request_response::{RequestResponse, RequestResponseEvent}, - swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, - Multiaddr, NetworkBehaviour, PeerId, -}; -use tracing::{debug, trace, warn}; - -use super::{ - gossip::{self, TOPIC}, - one_way_messaging, peer_discovery, Config, GossipMessage, OneWayCodec, OneWayOutgoingMessage, -}; -use crate::{ - components::networking_metrics::NetworkingMetrics, - types::{Chainspec, NodeId}, -}; - -/// An enum defining the top-level events passed to the swarm's handler. This will be received in -/// the swarm's handler wrapped in a `SwarmEvent::Behaviour`. -#[derive(Debug, From)] -pub(super) enum SwarmBehaviorEvent { - OneWayMessaging(RequestResponseEvent, ()>), - Gossiper(GossipsubEvent), - Kademlia(KademliaEvent), - Identify(IdentifyEvent), -} - -/// The top-level behavior used in the libp2p swarm. It holds all subordinate behaviors required to -/// operate the network component. -#[derive(NetworkBehaviour)] -#[behaviour(out_event = "SwarmBehaviorEvent", poll_method = "custom_poll")] -pub(super) struct Behavior { - one_way_message_behavior: RequestResponse, - gossip_behavior: Gossipsub, - kademlia_behavior: Kademlia, - identify_behavior: Identify, - #[behaviour(ignore)] - our_id: NodeId, - /// Events generated by the behavior that are pending a poll. - #[behaviour(ignore)] - events: VecDeque, -} - -impl Behavior { - pub(super) fn new( - config: &Config, - net_metrics: &NetworkingMetrics, - chainspec: &Chainspec, - our_public_key: PublicKey, - ) -> Self { - let one_way_message_behavior = - one_way_messaging::new_behavior(config, net_metrics, chainspec); - - let gossip_behavior = gossip::new_behavior(config, chainspec, our_public_key.clone()); - - let (kademlia_behavior, identify_behavior) = - peer_discovery::new_behaviors(config, chainspec, our_public_key.clone()); - - Behavior { - one_way_message_behavior, - gossip_behavior, - kademlia_behavior, - identify_behavior, - our_id: NodeId::P2p(PeerId::from(our_public_key)), - events: VecDeque::new(), - } - } - - /// Sends the given message out. - pub(super) fn send_one_way_message(&mut self, outgoing_message: OneWayOutgoingMessage) { - let request_id = self - .one_way_message_behavior - .send_request(&outgoing_message.destination, outgoing_message.message); - trace!("{}: sent one-way message {}", self.our_id, request_id); - } - - /// Adds the given peer's details to the kademlia routing table and bootstraps kademlia if this - /// is the first peer added. - /// - /// While bootstrapping is not strictly required, it will normally greatly speed up the process - /// of populating the routing table's k-buckets. - /// - /// We assume that calling bootstrap multiple times will not be problematic, although this will - /// not normally happen. - pub(super) fn add_discovered_peer( - &mut self, - peer_id: &PeerId, - listening_addresses: Vec, - ) { - let should_bootstrap = self - .kademlia_behavior - .kbuckets() - .map(|k_bucket| k_bucket.num_entries()) - .sum::() - == 1; - - for address in listening_addresses { - self.kademlia_behavior.add_address(peer_id, address); - } - - if should_bootstrap { - debug!("{}: bootstrapping kademlia", self.our_id); - if self.kademlia_behavior.bootstrap().is_err() { - warn!( - "{}: could not bootstrap kademlia due to lost connection leaving no peers", - self.our_id - ) - } - } - } - - /// Performs a random kademlia lookup in order to refresh the routing table. - pub(super) fn discover_peers(&mut self) { - let random_address = PeerId::random(); - let query_id = self.kademlia_behavior.get_closest_peers(random_address); - debug!( - "{}: random kademlia lookup for peers closest to {} with {:?}", - self.our_id, random_address, query_id - ); - } - - /// Initiates gossiping the given message. - pub(super) fn gossip(&mut self, message: GossipMessage) { - if let Err(error) = self.gossip_behavior.publish(TOPIC.clone(), message) { - warn!(?error, "{}: failed to gossip new message", self.our_id); - } - } - - /// Polls the behavior for new events. - fn custom_poll( - &mut self, - _context: &mut Context, - _parameterss: &mut impl PollParameters, - ) -> Poll> { - if let Some(event) = self.events.pop_back() { - Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) - } else { - Poll::Pending - } - } -} - -impl NetworkBehaviourEventProcess, ()>> for Behavior { - fn inject_event(&mut self, event: RequestResponseEvent, ()>) { - self.events.push_front(SwarmBehaviorEvent::from(event)); - } -} - -impl NetworkBehaviourEventProcess for Behavior { - fn inject_event(&mut self, event: GossipsubEvent) { - self.events.push_front(SwarmBehaviorEvent::from(event)); - } -} - -impl NetworkBehaviourEventProcess for Behavior { - fn inject_event(&mut self, event: KademliaEvent) { - self.events.push_front(SwarmBehaviorEvent::from(event)); - } -} - -impl NetworkBehaviourEventProcess for Behavior { - fn inject_event(&mut self, event: IdentifyEvent) { - self.events.push_front(SwarmBehaviorEvent::from(event)); - } -} diff --git a/node/src/components/network/bincode_format.rs b/node/src/components/network/bincode_format.rs new file mode 100644 index 0000000000..0d6e47b344 --- /dev/null +++ b/node/src/components/network/bincode_format.rs @@ -0,0 +1,92 @@ +//! Bincode wire format encoder. +//! +//! An encoder for `Bincode` messages with our specific settings pinned. + +use std::{fmt::Debug, io, pin::Pin, sync::Arc}; + +use bincode::{ + config::{ + RejectTrailing, VarintEncoding, WithOtherEndian, WithOtherIntEncoding, WithOtherLimit, + WithOtherTrailing, + }, + Options, +}; +use bytes::{Bytes, BytesMut}; +use serde::{Deserialize, Serialize}; +use tokio_serde::{Deserializer, Serializer}; + +use super::Message; + +/// bincode encoder/decoder for messages. +#[allow(clippy::type_complexity)] +pub struct BincodeFormat( + // Note: `bincode` encodes its options at the type level. The exact shape is determined by + // `BincodeFormat::default()`. + pub(crate) WithOtherTrailing< + WithOtherIntEncoding< + WithOtherEndian< + WithOtherLimit, + bincode::config::LittleEndian, + >, + VarintEncoding, + >, + RejectTrailing, + >, +); + +impl BincodeFormat { + /// Serializes an arbitrary serializable value with the networking bincode serializer. + #[inline] + pub(crate) fn serialize_arbitrary(&self, item: &T) -> io::Result> + where + T: Serialize, + { + self.0 + .serialize(item) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } +} + +impl Debug for BincodeFormat { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("BincodeFormat") + } +} + +impl Default for BincodeFormat { + fn default() -> Self { + let opts = bincode::options() + .with_no_limit() // We rely on framed tokio transports to impose limits. + .with_little_endian() // Default at the time of this writing, we are merely pinning it. + .with_varint_encoding() // Same as above. + .reject_trailing_bytes(); // There is no reason for us not to reject trailing bytes. + BincodeFormat(opts) + } +} + +impl

Serializer>> for BincodeFormat +where + Message

: Serialize, +{ + type Error = io::Error; + + #[inline] + fn serialize(self: Pin<&mut Self>, item: &Arc>) -> Result { + let msg = &**item; + self.serialize_arbitrary(msg).map(Into::into) + } +} + +impl

Deserializer> for BincodeFormat +where + for<'de> Message

: Deserialize<'de>, +{ + type Error = io::Error; + + #[inline] + fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result, Self::Error> { + self.0 + .deserialize(src) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } +} diff --git a/node/src/components/network/blocklist.rs b/node/src/components/network/blocklist.rs new file mode 100644 index 0000000000..f91eab7f3f --- /dev/null +++ b/node/src/components/network/blocklist.rs @@ -0,0 +1,116 @@ +//! Blocklisting support. +//! +//! Blocked peers are prevented from interacting with the node through a variety of means. + +use std::fmt::{self, Display, Formatter}; + +use casper_types::{Digest, EraId}; +use datasize::DataSize; +use serde::Serialize; + +use crate::{ + components::{block_accumulator, fetcher::Tag}, + types::InvalidProposalError, +}; + +/// Reasons why a peer was blocked. +#[derive(DataSize, Debug, Serialize)] +pub(crate) enum BlocklistJustification { + /// Peer sent incorrect item. + SentBadItem { tag: Tag }, + /// Peer sent an item which failed validation. + SentInvalidItem { tag: Tag, error_msg: String }, + /// A finality signature that was sent is invalid. + SentBadFinalitySignature { + /// Error reported by block accumulator. + #[serde(skip_serializing)] + #[data_size(skip)] + error: block_accumulator::Error, + }, + /// A block that was sent is invalid. + SentBadBlock { + /// Error reported by block accumulator. + #[serde(skip_serializing)] + #[data_size(skip)] + error: block_accumulator::Error, + }, + /// An invalid proposal was received. + SentInvalidProposal { + /// The era for which the invalid value was destined. + era: EraId, + /// The specific error. + #[serde(skip_serializing)] + error: Box, + }, + /// Too many unasked or expired pongs were sent by the peer. + #[allow(dead_code)] // Disabled as per 1.5.5 for stability reasons. + PongLimitExceeded, + /// Peer misbehaved during consensus and is blocked for it. + BadConsensusBehavior, + /// Peer is on the wrong network. + WrongNetwork { + /// The network name reported by the peer. + peer_network_name: String, + }, + /// Peer presented the wrong chainspec hash. + WrongChainspecHash { + /// The chainspec hash reported by the peer. + peer_chainspec_hash: Digest, + }, + /// Peer did not present a chainspec hash. + MissingChainspecHash, + /// Peer is considered dishonest. + DishonestPeer, + /// Peer sent too many finality signatures. + SentTooManyFinalitySignatures { max_allowed: u32 }, +} + +impl Display for BlocklistJustification { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + BlocklistJustification::SentBadItem { tag } => { + write!(f, "sent a {} we couldn't parse", tag) + } + BlocklistJustification::SentInvalidItem { tag, error_msg } => { + write!(f, "sent a {} which failed validation ({})", tag, error_msg) + } + BlocklistJustification::SentBadFinalitySignature { error } => write!( + f, + "sent a finality signature that is invalid or unexpected ({})", + error + ), + BlocklistJustification::SentInvalidProposal { era, error } => { + write!(f, "sent an invalid proposal in {} ({:?})", era, error) + } + BlocklistJustification::PongLimitExceeded => { + f.write_str("wrote too many expired or invalid pongs") + } + BlocklistJustification::BadConsensusBehavior => { + f.write_str("sent invalid data in consensus") + } + BlocklistJustification::WrongNetwork { peer_network_name } => write!( + f, + "reported to be on the wrong network ({:?})", + peer_network_name + ), + BlocklistJustification::WrongChainspecHash { + peer_chainspec_hash, + } => write!( + f, + "reported a mismatched chainspec hash ({})", + peer_chainspec_hash + ), + BlocklistJustification::MissingChainspecHash => { + f.write_str("sent handshake without chainspec hash") + } + BlocklistJustification::SentBadBlock { error } => { + write!(f, "sent a block that is invalid or unexpected ({})", error) + } + BlocklistJustification::DishonestPeer => f.write_str("dishonest peer"), + BlocklistJustification::SentTooManyFinalitySignatures { max_allowed } => write!( + f, + "sent too many finality signatures: maximum {max_allowed} signatures are allowed" + ), + } + } +} diff --git a/node/src/components/network/chain_info.rs b/node/src/components/network/chain_info.rs new file mode 100644 index 0000000000..4221e0955a --- /dev/null +++ b/node/src/components/network/chain_info.rs @@ -0,0 +1,75 @@ +//! Network-related chain identification information. + +// TODO: This module and `ChainId` should disappear in its entirety and the actual chainspec be made +// available. + +use std::net::SocketAddr; + +use casper_types::{Chainspec, Digest, ProtocolVersion}; +use datasize::DataSize; + +use super::{ + counting_format::ConnectionId, + message::{ConsensusCertificate, NodeKeyPair}, + Message, +}; + +/// Data retained from the chainspec by the networking component. +/// +/// Typically this information is used for creating handshakes. +#[derive(DataSize, Debug)] +pub(crate) struct ChainInfo { + /// Name of the network we participate in. We only remain connected to peers with the same + /// network name as us. + pub(super) network_name: String, + /// The maximum message size for a network message, as supplied from the chainspec. + pub(super) maximum_net_message_size: u32, + /// The protocol version. + pub(super) protocol_version: ProtocolVersion, + /// The hash of the chainspec. + pub(super) chainspec_hash: Digest, +} + +impl ChainInfo { + /// Create an instance of `ChainInfo` for testing. + #[cfg(test)] + pub fn create_for_testing() -> Self { + let network_name = "rust-tests-network"; + ChainInfo { + network_name: network_name.to_string(), + maximum_net_message_size: 24 * 1024 * 1024, // Hardcoded at 24M. + protocol_version: ProtocolVersion::V1_0_0, + chainspec_hash: Digest::hash(format!("{}-chainspec", network_name)), + } + } + + /// Create a handshake based on chain identification data. + pub(super) fn create_handshake

( + &self, + public_addr: SocketAddr, + consensus_keys: Option<&NodeKeyPair>, + connection_id: ConnectionId, + is_syncing: bool, + ) -> Message

{ + Message::Handshake { + network_name: self.network_name.clone(), + public_addr, + protocol_version: self.protocol_version, + consensus_certificate: consensus_keys + .map(|key_pair| ConsensusCertificate::create(connection_id, key_pair)), + is_syncing, + chainspec_hash: Some(self.chainspec_hash), + } + } +} + +impl From<&Chainspec> for ChainInfo { + fn from(chainspec: &Chainspec) -> Self { + ChainInfo { + network_name: chainspec.network_config.name.clone(), + maximum_net_message_size: chainspec.network_config.maximum_net_message_size, + protocol_version: chainspec.protocol_version(), + chainspec_hash: chainspec.hash(), + } + } +} diff --git a/node/src/components/network/config.rs b/node/src/components/network/config.rs index fd6140743f..fe96467aff 100644 --- a/node/src/components/network/config.rs +++ b/node/src/components/network/config.rs @@ -1,143 +1,167 @@ #[cfg(test)] use std::net::{Ipv4Addr, SocketAddr}; -use std::str::FromStr; +use std::path::PathBuf; +use casper_types::{ProtocolVersion, TimeDiff}; use datasize::DataSize; -use libp2p::request_response::RequestResponseConfig; use serde::{Deserialize, Serialize}; -use crate::{components::small_network, types::TimeDiff}; - -// TODO - remove these defaults once small_network's config has been replaced by this one. -mod temp { - pub(super) const CONNECTION_SETUP_TIMEOUT: &str = "10seconds"; - // TODO - set to reasonable limit, or remove. - pub(super) const MAX_ONE_WAY_MESSAGE_SIZE: u32 = u32::max_value(); - pub(super) const REQUEST_TIMEOUT: &str = "10seconds"; - pub(super) const CONNECTION_KEEP_ALIVE: &str = "10seconds"; - pub(super) const GOSSIP_HEARTBEAT_INTERVAL: &str = "1second"; - // TODO - set to reasonable limit, or remove. - pub(super) const MAX_GOSSIP_MESSAGE_SIZE: u32 = u32::max_value(); - pub(super) const GOSSIP_DUPLICATE_CACHE_TIMEOUT: &str = "1minute"; +use super::EstimatorWeights; + +/// Default binding address. +/// +/// Uses a fixed port per node, but binds on any interface. +const DEFAULT_BIND_ADDRESS: &str = "0.0.0.0:34553"; + +/// Default public address. +/// +/// Automatically sets the port, but defaults publishing localhost as the public address. +const DEFAULT_PUBLIC_ADDRESS: &str = "127.0.0.1:0"; + +const DEFAULT_MIN_PEERS_FOR_INITIALIZATION: u16 = 1; + +/// Default interval for gossiping network addresses. +const DEFAULT_GOSSIP_INTERVAL: TimeDiff = TimeDiff::from_seconds(30); + +/// Default delay until initial round of address gossiping starts. +const DEFAULT_INITIAL_GOSSIP_DELAY: TimeDiff = TimeDiff::from_seconds(5); + +/// Default time limit for an address to be in the pending set. +const DEFAULT_MAX_ADDR_PENDING_TIME: TimeDiff = TimeDiff::from_seconds(60); + +/// Default timeout during which the handshake needs to be completed. +const DEFAULT_HANDSHAKE_TIMEOUT: TimeDiff = TimeDiff::from_seconds(20); + +impl Default for Config { + fn default() -> Self { + Config { + bind_address: DEFAULT_BIND_ADDRESS.to_string(), + public_address: DEFAULT_PUBLIC_ADDRESS.to_string(), + known_addresses: Vec::new(), + min_peers_for_initialization: DEFAULT_MIN_PEERS_FOR_INITIALIZATION, + gossip_interval: DEFAULT_GOSSIP_INTERVAL, + initial_gossip_delay: DEFAULT_INITIAL_GOSSIP_DELAY, + max_addr_pending_time: DEFAULT_MAX_ADDR_PENDING_TIME, + handshake_timeout: DEFAULT_HANDSHAKE_TIMEOUT, + max_incoming_peer_connections: 0, + max_outgoing_byte_rate_non_validators: 0, + max_incoming_message_rate_non_validators: 0, + estimator_weights: Default::default(), + tarpit_version_threshold: None, + tarpit_duration: TimeDiff::from_seconds(600), + tarpit_chance: 0.2, + max_in_flight_demands: 50, + blocklist_retain_min_duration: TimeDiff::from_seconds(600), + blocklist_retain_max_duration: TimeDiff::from_seconds(1600), + identity: None, + } + } } -const DEFAULT_BIND_ADDRESS: &str = "0.0.0.0:22777"; -#[cfg(test)] -/// Address used to bind all local testing networking to by default. -const TEST_BIND_INTERFACE: Ipv4Addr = Ipv4Addr::LOCALHOST; +/// Network identity configuration. +#[derive(DataSize, Debug, Clone, Deserialize, Serialize)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct IdentityConfig { + /// Path to a signed certificate + pub tls_certificate: PathBuf, + /// Path to a secret key. + pub secret_key: PathBuf, + /// Path to a certificate authority certificate + pub ca_certificate: PathBuf, +} -/// Peer-to-peer network configuration. +/// Network configuration. #[derive(DataSize, Debug, Clone, Deserialize, Serialize)] // Disallow unknown fields to ensure config files and command-line overrides contain valid keys. #[serde(deny_unknown_fields)] pub struct Config { /// Address to bind to. pub bind_address: String, + /// Publicly advertised address, in case the node has a different external IP. + /// + /// If the port is specified as `0`, it will be replaced with the actually bound port. + pub public_address: String, /// Known address of a node on the network used for joining. pub known_addresses: Vec, - /// Whether this node is a bootstrap node or not. A boostrap node will continue to run even if - /// it has no peer connections, and is intended to be amongst the first nodes started on a - /// network. - pub is_bootstrap_node: bool, - /// Enable systemd startup notification. - pub systemd_support: bool, - /// The timeout for connection setup (including upgrades) for all inbound and outbound - /// connections. - pub connection_setup_timeout: TimeDiff, - /// The maximum serialized one-way message size in bytes. - pub max_one_way_message_size: u32, - /// The timeout for inbound and outbound requests. - pub request_timeout: TimeDiff, - /// The keep-alive timeout of idle connections. - pub connection_keep_alive: TimeDiff, - /// Interval used for gossip heartbeats. - pub gossip_heartbeat_interval: TimeDiff, - /// Maximum serialized gossip message size in bytes. - pub max_gossip_message_size: u32, - /// Time for which to retain a cached gossip message ID to prevent duplicates being gossiped. - pub gossip_duplicate_cache_timeout: TimeDiff, + /// Minimum number of fully-connected peers to consider component initialized. + pub min_peers_for_initialization: u16, + /// Interval in milliseconds used for gossiping. + pub gossip_interval: TimeDiff, + /// Initial delay before the first round of gossip. + pub initial_gossip_delay: TimeDiff, + /// Maximum allowed time for an address to be kept in the pending set. + pub max_addr_pending_time: TimeDiff, + /// Maximum allowed time for handshake completion. + pub handshake_timeout: TimeDiff, + /// Maximum number of incoming connections per unique peer. Unlimited if `0`. + pub max_incoming_peer_connections: u16, + /// Maximum number of bytes per second allowed for non-validating peers. Unlimited if 0. + pub max_outgoing_byte_rate_non_validators: u32, + /// Maximum of requests answered from non-validating peers. Unlimited if 0. + pub max_incoming_message_rate_non_validators: u32, + /// Weight distribution for the payload impact estimator. + pub estimator_weights: EstimatorWeights, + /// The protocol version at which (or under) tarpitting is enabled. + pub tarpit_version_threshold: Option, + /// If tarpitting is enabled, duration for which connections should be kept open. + pub tarpit_duration: TimeDiff, + /// The chance, expressed as a number between 0.0 and 1.0, of triggering the tarpit. + pub tarpit_chance: f32, + /// Maximum number of demands for objects that can be in-flight. + pub max_in_flight_demands: u32, + /// Minimum time a peer is kept on block list before being redeemed. The actual + /// timeout duration is calculated by selecting a random value between + /// . + pub blocklist_retain_min_duration: TimeDiff, + /// Maximum time a peer is kept on block list before being redeemed. The actual + /// timeout duration is calculated by selecting a random value between + /// . + pub blocklist_retain_max_duration: TimeDiff, + /// Network identity configuration option. + /// + /// An identity will be automatically generated when starting up a node if this option is + /// unspecified. + pub identity: Option, } -impl Default for Config { - fn default() -> Self { - Config { - bind_address: DEFAULT_BIND_ADDRESS.to_string(), - known_addresses: Vec::new(), - is_bootstrap_node: false, - systemd_support: false, - connection_setup_timeout: TimeDiff::from_str(temp::CONNECTION_SETUP_TIMEOUT).unwrap(), - max_one_way_message_size: temp::MAX_ONE_WAY_MESSAGE_SIZE, - request_timeout: TimeDiff::from_str(temp::REQUEST_TIMEOUT).unwrap(), - connection_keep_alive: TimeDiff::from_str(temp::CONNECTION_KEEP_ALIVE).unwrap(), - gossip_heartbeat_interval: TimeDiff::from_str(temp::GOSSIP_HEARTBEAT_INTERVAL).unwrap(), - max_gossip_message_size: temp::MAX_GOSSIP_MESSAGE_SIZE, - gossip_duplicate_cache_timeout: TimeDiff::from_str( - temp::GOSSIP_DUPLICATE_CACHE_TIMEOUT, - ) - .unwrap(), - } - } -} +#[cfg(test)] +/// Reduced gossip interval for local testing. +const DEFAULT_TEST_GOSSIP_INTERVAL: TimeDiff = TimeDiff::from_seconds(1); + +#[cfg(test)] +/// Address used to bind all local testing networking to by default. +const TEST_BIND_INTERFACE: Ipv4Addr = Ipv4Addr::LOCALHOST; #[cfg(test)] impl Config { /// Construct a configuration suitable for testing with no known address that binds to a /// specific address. - pub(super) fn new(bind_address: SocketAddr, is_bootstrap_node: bool) -> Self { + pub(super) fn new(bind_address: SocketAddr) -> Self { Config { bind_address: bind_address.to_string(), + public_address: bind_address.to_string(), known_addresses: vec![bind_address.to_string()], - is_bootstrap_node, + gossip_interval: DEFAULT_TEST_GOSSIP_INTERVAL, ..Default::default() } } /// Constructs a `Config` suitable for use by the first node of a testnet on a single machine. pub(crate) fn default_local_net_first_node(bind_port: u16) -> Self { - Config::new((TEST_BIND_INTERFACE, bind_port).into(), true) + Config::new((TEST_BIND_INTERFACE, bind_port).into()) } /// Constructs a `Config` suitable for use by a node joining a testnet on a single machine. pub(crate) fn default_local_net(known_peer_port: u16) -> Self { Config { bind_address: SocketAddr::from((TEST_BIND_INTERFACE, 0)).to_string(), + public_address: SocketAddr::from((TEST_BIND_INTERFACE, 0)).to_string(), known_addresses: vec![ SocketAddr::from((TEST_BIND_INTERFACE, known_peer_port)).to_string() ], + gossip_interval: DEFAULT_TEST_GOSSIP_INTERVAL, ..Default::default() } } } - -impl From<&small_network::Config> for Config { - fn from(config: &small_network::Config) -> Self { - let public_ip = config - .public_address - .split(':') - .next() - .expect("should get IP from public_address"); - let bind_port = config - .bind_address - .split(':') - .nth(1) - .expect("should get port from bind_address"); - let public_address = format!("{}:{}", public_ip, bind_port); - let is_bootstrap_node = config.known_addresses.contains(&public_address); - Config { - bind_address: config.bind_address.clone(), - known_addresses: config.known_addresses.clone(), - is_bootstrap_node, - systemd_support: config.systemd_support, - ..Default::default() - } - } -} - -impl From<&Config> for RequestResponseConfig { - fn from(config: &Config) -> Self { - let mut request_response_config = RequestResponseConfig::default(); - request_response_config.set_request_timeout(config.request_timeout.into()); - request_response_config.set_connection_keep_alive(config.connection_keep_alive.into()); - request_response_config - } -} diff --git a/node/src/components/network/counting_format.rs b/node/src/components/network/counting_format.rs new file mode 100644 index 0000000000..86e19f9edd --- /dev/null +++ b/node/src/components/network/counting_format.rs @@ -0,0 +1,380 @@ +//! Observability for network serialization/deserialization. +//! +//! This module introduces two IDs: [`ConnectionId`] and [`TraceId`]. The [`ConnectionId`] is a +//! unique ID per established connection that can be independently derive by peers on either of a +//! connection. [`TraceId`] identifies a single message, distinguishing even messages that are sent +//! to the same peer with equal contents. + +use std::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, + pin::Pin, + sync::{Arc, Weak}, +}; + +use bytes::{Bytes, BytesMut}; +use openssl::ssl::SslRef; +use pin_project::pin_project; +#[cfg(test)] +use rand::RngCore; +use static_assertions::const_assert; +use tokio_serde::{Deserializer, Serializer}; +use tracing::{trace, warn}; + +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::Digest; + +use super::{tls::KeyFingerprint, Message, Metrics, Payload}; +use crate::{types::NodeId, utils}; + +/// Lazily-evaluated network message ID generator. +/// +/// Calculates a hash for the wrapped value when `Display::fmt` is called. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +struct TraceId([u8; 8]); + +impl Display for TraceId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str(&base16::encode_lower(&self.0)) + } +} + +/// A metric-updating serializer/deserializer wrapper for network messages. +/// +/// Classifies each message given and updates the `NetworkingMetrics` accordingly. Also emits a +/// TRACE-level message to the `net_out` and `net_in` target with a per-message unique hash when +/// a message is sent or received. +#[pin_project] +#[derive(Debug)] +pub struct CountingFormat { + /// The actual serializer performing the work. + #[pin] + inner: F, + /// Identifier for the connection. + connection_id: ConnectionId, + /// Counter for outgoing messages. + out_count: u64, + /// Counter for incoming messages. + in_count: u64, + /// Our role in the connection. + role: Role, + /// Metrics to update. + metrics: Weak, +} + +impl CountingFormat { + /// Creates a new counting formatter. + #[inline] + pub(super) fn new( + metrics: Weak, + connection_id: ConnectionId, + role: Role, + inner: F, + ) -> Self { + Self { + metrics, + connection_id, + out_count: 0, + in_count: 0, + role, + inner, + } + } +} + +impl Serializer>> for CountingFormat +where + F: Serializer>>, + P: Payload, +{ + type Error = F::Error; + + #[inline] + fn serialize(self: Pin<&mut Self>, item: &Arc>) -> Result { + let this = self.project(); + let projection: Pin<&mut F> = this.inner; + + let serialized = F::serialize(projection, item)?; + let msg_size = serialized.len() as u64; + let msg_kind = item.classify(); + Metrics::record_payload_out(this.metrics, msg_kind, msg_size); + + let trace_id = this + .connection_id + .create_trace_id(this.role.out_flag(), *this.out_count); + *this.out_count += 1; + + trace!(target: "net_out", + msg_id = %trace_id, + msg_size, + msg_kind = %msg_kind, "sending"); + + Ok(serialized) + } +} + +impl Deserializer> for CountingFormat +where + F: Deserializer>, + P: Payload, +{ + type Error = F::Error; + + #[inline] + fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result, Self::Error> { + let this = self.project(); + let projection: Pin<&mut F> = this.inner; + + let msg_size = src.len() as u64; + + let deserialized = F::deserialize(projection, src)?; + let msg_kind = deserialized.classify(); + Metrics::record_payload_in(this.metrics, msg_kind, msg_size); + + let trace_id = this + .connection_id + .create_trace_id(this.role.in_flag(), *this.in_count); + *this.in_count += 1; + + trace!(target: "net_in", + msg_id = %trace_id, + msg_size, + msg_kind = %msg_kind, "received"); + + Ok(deserialized) + } +} + +/// An ID identifying a connection. +/// +/// The ID is guaranteed to be the same on both ends of the connection, but not guaranteed to be +/// unique or sufficiently random. Do not use it for any cryptographic/security related purposes. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub(super) struct ConnectionId([u8; Digest::LENGTH]); + +// Invariant assumed by `ConnectionId`, `Digest` must be <= than `KeyFingerprint`. +const_assert!(KeyFingerprint::LENGTH >= Digest::LENGTH); +// We also assume it is at least 12 bytes. +const_assert!(Digest::LENGTH >= 12); + +/// Random data derived from TLS connections. +#[derive(Copy, Clone, Debug)] +pub(super) struct TlsRandomData { + /// Random data extract from the client of the connection. + combined_random: [u8; 12], +} + +/// Zero-randomness. +/// +/// Used to check random data. +const ZERO_RANDOMNESS: [u8; 12] = [0; 12]; + +impl TlsRandomData { + /// Collects random data from an existing SSL collection. + /// + /// Ideally we would use the TLS session ID, but it is not available on outgoing connections at + /// the times we need it. Instead, we use the `server_random` and `client_random` nonces, which + /// will be the same on both ends of the connection. + fn collect(ssl: &SslRef) -> Self { + // We are using only the first 12 bytes of these 32 byte values here, just in case we missed + // something in our assessment that hashing these should be safe. Additionally, these values + // are XOR'd, not concatenated. All this is done to prevent leaking information about these + // numbers. + // + // Some SSL implementations use timestamps for the first four bytes, so to be sufficiently + // random, we use 4 + 8 bytes of the nonces. + let mut server_random = [0; 12]; + let mut client_random = [0; 12]; + + ssl.server_random(&mut server_random); + + if server_random == ZERO_RANDOMNESS { + warn!("TLS server random is all zeros"); + } + + ssl.client_random(&mut client_random); + + if server_random == ZERO_RANDOMNESS { + warn!("TLS client random is all zeros"); + } + + // Combine using XOR. + utils::xor(&mut server_random, &client_random); + + Self { + combined_random: server_random, + } + } + + /// Creates random `TlsRandomData`. + #[cfg(test)] + fn random(rng: &mut TestRng) -> Self { + let mut buffer = [0u8; 12]; + + rng.fill_bytes(&mut buffer); + + Self { + combined_random: buffer, + } + } +} + +impl ConnectionId { + /// Creates a new connection ID, based on random values from server and client, as well as + /// node IDs. + fn create(random_data: TlsRandomData, our_id: NodeId, their_id: NodeId) -> ConnectionId { + // Hash the resulting random values. + let mut id = Digest::hash(random_data.combined_random).value(); + + // We XOR in a hashes of server and client fingerprint, to ensure that in the case of an + // accidental collision (e.g. when `server_random` and `client_random` turn out to be all + // zeros), we still have a chance of producing a reasonable ID. + utils::xor(&mut id, &our_id.hash_bytes()[0..Digest::LENGTH]); + utils::xor(&mut id, &their_id.hash_bytes()[0..Digest::LENGTH]); + + ConnectionId(id) + } + + /// Creates a new [`TraceID`] based on the message count. + /// + /// The `flag` should be created using the [`Role::in_flag`] or [`Role::out_flag`] method and + /// must be created accordingly (`out_flag` when serializing, `in_flag` when deserializing). + fn create_trace_id(&self, flag: u8, count: u64) -> TraceId { + // Copy the basic network ID. + let mut buffer = self.0; + + // Direction set on first byte. + buffer[0] ^= flag; + + // XOR in message count. + utils::xor(&mut buffer[4..12], &count.to_ne_bytes()); + + // Hash again and truncate. + let full_hash = Digest::hash(buffer); + + // Safe to expect here, as we assert earlier that `Digest` is at least 12 bytes. + let truncated = TryFrom::try_from(&full_hash.value()[0..8]).expect("buffer size mismatch"); + + TraceId(truncated) + } + + #[inline] + /// Returns a reference to the raw bytes of the connection ID. + pub(crate) fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Creates a new connection ID from an existing SSL connection. + #[inline] + pub(crate) fn from_connection(ssl: &SslRef, our_id: NodeId, their_id: NodeId) -> Self { + Self::create(TlsRandomData::collect(ssl), our_id, their_id) + } + + /// Creates a random `ConnectionId`. + #[cfg(test)] + pub(super) fn random(rng: &mut TestRng) -> Self { + ConnectionId::create( + TlsRandomData::random(rng), + NodeId::random(rng), + NodeId::random(rng), + ) + } +} + +/// Message sending direction. +#[derive(Copy, Clone, Debug)] +#[repr(u8)] +pub(super) enum Role { + /// Dialer, i.e. initiator of the connection. + Dialer, + /// Listener, acceptor of the connection. + Listener, +} + +impl Role { + /// Returns a flag suitable for hashing incoming messages. + #[inline] + fn in_flag(self) -> u8 { + !(self.out_flag()) + } + + /// Returns a flag suitable for hashing outgoing messages. + #[inline] + fn out_flag(self) -> u8 { + // The magic flag uses 50% of the bits, to be XOR'd into the hash later. + const MAGIC_FLAG: u8 = 0b10101010; + + match self { + Role::Dialer => MAGIC_FLAG, + Role::Listener => !MAGIC_FLAG, + } + } +} + +#[cfg(test)] +mod tests { + use crate::types::NodeId; + + use super::{ConnectionId, Role, TlsRandomData, TraceId}; + + #[test] + fn trace_id_has_16_character() { + let data = [0, 1, 2, 3, 4, 5, 6, 7]; + + let output = format!("{}", TraceId(data)); + + assert_eq!(output.len(), 16); + } + + #[test] + fn can_create_deterministic_trace_id() { + let mut rng = crate::new_rng(); + + // Scenario: Nodes A and B are connecting to each other. Both connections are established. + let node_a = NodeId::random(&mut rng); + let node_b = NodeId::random(&mut rng); + + // We get two connections, with different Tls random data, but it will be the same on both + // ends of the connection. + let a_to_b_random = TlsRandomData::random(&mut rng); + let a_to_b = ConnectionId::create(a_to_b_random, node_a, node_b); + let a_to_b_alt = ConnectionId::create(a_to_b_random, node_b, node_a); + + // Ensure that either peer ends up with the same connection id. + assert_eq!(a_to_b, a_to_b_alt); + + let b_to_a_random = TlsRandomData::random(&mut rng); + let b_to_a = ConnectionId::create(b_to_a_random, node_b, node_a); + let b_to_a_alt = ConnectionId::create(b_to_a_random, node_a, node_b); + assert_eq!(b_to_a, b_to_a_alt); + + // The connection IDs must be distinct though. + assert_ne!(a_to_b, b_to_a); + + // We are only looking at messages sent on the `a_to_b` connection, although from both ends. + // In our example example, `node_a` is the dialing node, `node_b` the listener. + + // Trace ID on A, after sending to B. + let msg_ab_0_on_a = a_to_b.create_trace_id(Role::Dialer.out_flag(), 0); + + // The same message on B. + let msg_ab_0_on_b = a_to_b.create_trace_id(Role::Listener.in_flag(), 0); + + // These trace IDs must match. + assert_eq!(msg_ab_0_on_a, msg_ab_0_on_b); + + // The second message must have a distinct trace ID. + let msg_ab_1_on_a = a_to_b.create_trace_id(Role::Dialer.out_flag(), 1); + let msg_ab_1_on_b = a_to_b.create_trace_id(Role::Listener.in_flag(), 1); + assert_eq!(msg_ab_1_on_a, msg_ab_1_on_b); + assert_ne!(msg_ab_0_on_a, msg_ab_1_on_a); + + // Sending a message on the **same connection** in a **different direction** also must yield + // a different message id. + let msg_ba_0_on_b = a_to_b.create_trace_id(Role::Listener.out_flag(), 0); + let msg_ba_0_on_a = a_to_b.create_trace_id(Role::Dialer.in_flag(), 0); + assert_eq!(msg_ba_0_on_b, msg_ba_0_on_a); + assert_ne!(msg_ba_0_on_b, msg_ab_0_on_b); + } +} diff --git a/node/src/components/network/error.rs b/node/src/components/network/error.rs index cad6f25112..ef7e4bdf81 100644 --- a/node/src/components/network/error.rs +++ b/node/src/components/network/error.rs @@ -1,46 +1,219 @@ -use std::io; +use std::{error, io, net::SocketAddr, result}; -use libp2p::{core::connection::ConnectionLimit, noise::NoiseError, Multiaddr, TransportError}; +use datasize::DataSize; +use openssl::{error::ErrorStack, ssl}; +use serde::Serialize; use thiserror::Error; +use casper_types::{crypto, Digest, ProtocolVersion}; + +use crate::{ + tls::{LoadCertError, ValidationError}, + utils::ResolveAddressError, +}; + +pub(super) type Result = result::Result; + /// Error type returned by the `Network` component. -#[derive(Debug, Error)] +#[derive(Debug, Error, Serialize)] pub enum Error { - /// Invalid configuration: must have at least one known address. - #[error("config must have at least one known address")] - NoKnownAddress, - - /// Signing libp2p-noise static ID keypair failed. - #[error("signing libp2p-noise static ID keypair failed: {0}")] - StaticKeypairSigning(NoiseError), - - /// Failed to listen. - #[error("failed to listen on {address}: {error}")] - Listen { - address: Multiaddr, - error: TransportError, - }, - - /// Failed to dial the given peer. - #[error("failed to dial the peer on {address}: {error}")] - DialPeer { - address: Multiaddr, - error: ConnectionLimit, - }, - - /// Failed to serialize a message. - #[error("failed to serialize: {0}")] - Serialization(bincode::ErrorKind), - - /// Failed to deserialize a message. - #[error("failed to deserialize: {0}")] - Deserialization(bincode::ErrorKind), - - /// Message too large. - #[error("message of {actual_size} bytes exceeds limit of {max_size} bytes")] - MessageTooLarge { max_size: u32, actual_size: u64 }, - + /// We do not have any known hosts. + #[error("could not resolve at least one known host (or none provided)")] + EmptyKnownHosts, + /// Failed to create a TCP listener. + #[error("failed to create listener on {1}")] + ListenerCreation( + #[serde(skip_serializing)] + #[source] + io::Error, + SocketAddr, + ), + /// Failed to get TCP listener address. + #[error("failed to get listener addr")] + ListenerAddr( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// Failed to set listener to non-blocking. + #[error("failed to set listener to non-blocking")] + ListenerSetNonBlocking( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// Failed to convert std TCP listener to tokio TCP listener. + #[error("failed to convert listener to tokio")] + ListenerConversion( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// Could not resolve root node address. + #[error("failed to resolve network address")] + ResolveAddr( + #[serde(skip_serializing)] + #[source] + ResolveAddressError, + ), /// Instantiating metrics failed. #[error(transparent)] - MetricsError(#[from] prometheus::Error), + Metrics( + #[serde(skip_serializing)] + #[from] + prometheus::Error, + ), + /// Failed to load a certificate. + #[error("failed to load a certificate: {0}")] + LoadCertificate( + #[serde(skip_serializing)] + #[from] + LoadCertError, + ), +} + +// Manual implementation for `DataSize` - the type contains too many FFI variants that are hard to +// size, so we give up on estimating it altogether. +impl DataSize for Error { + const IS_DYNAMIC: bool = false; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + 0 + } +} + +impl DataSize for ConnectionError { + const IS_DYNAMIC: bool = false; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + 0 + } +} + +/// An error related to an incoming or outgoing connection. +#[derive(Debug, Error, Serialize)] +pub enum ConnectionError { + /// Failed to create TLS acceptor. + #[error("failed to create TLS acceptor/connector")] + TlsInitialization( + #[serde(skip_serializing)] + #[source] + ErrorStack, + ), + /// TCP connection failed. + #[error("TCP connection failed")] + TcpConnection( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// Did not succeed setting TCP_NODELAY on the connection. + #[error("Could not set TCP_NODELAY on outgoing connection")] + TcpNoDelay( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// Handshaking error. + #[error("TLS handshake error")] + TlsHandshake( + #[serde(skip_serializing)] + #[source] + ssl::Error, + ), + /// Remote failed to present a client/server certificate. + #[error("no client certificate presented")] + NoPeerCertificate, + /// TLS validation error. + #[error("TLS validation error of peer certificate")] + PeerCertificateInvalid(#[source] ValidationError), + /// Failed to send handshake. + #[error("handshake send failed")] + HandshakeSend( + #[serde(skip_serializing)] + #[source] + IoError, + ), + /// Failed to receive handshake. + #[error("handshake receive failed")] + HandshakeRecv( + #[serde(skip_serializing)] + #[source] + IoError, + ), + /// Peer reported a network name that does not match ours. + #[error("peer is on different network: {0}")] + WrongNetwork(String), + /// Peer reported an incompatible version. + #[error("peer is running incompatible version: {0}")] + IncompatibleVersion(ProtocolVersion), + /// Peer is using a different chainspec. + #[error("peer is using a different chainspec, hash: {0}")] + WrongChainspecHash(Digest), + /// Peer should have included the chainspec hash in the handshake message, + /// but didn't. + #[error("peer did not include chainspec hash in the handshake when it was required")] + MissingChainspecHash, + /// Peer did not send any message, or a non-handshake as its first message. + #[error("peer did not send handshake")] + DidNotSendHandshake, + /// Failed to encode our handshake. + #[error("could not encode our handshake")] + CouldNotEncodeOurHandshake( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// A background sender for our handshake panicked or crashed. + /// + /// This is usually a bug. + #[error("handshake sender crashed")] + HandshakeSenderCrashed( + #[serde(skip_serializing)] + #[source] + tokio::task::JoinError, + ), + /// Could not deserialize the message that is supposed to contain the remotes handshake. + #[error("could not decode remote handshake message")] + InvalidRemoteHandshakeMessage( + #[serde(skip_serializing)] + #[source] + io::Error, + ), + /// The peer sent a consensus certificate, but it was invalid. + #[error("invalid consensus certificate")] + InvalidConsensusCertificate( + #[serde(skip_serializing)] + #[source] + crypto::Error, + ), + /// Failed to reunite handshake sink/stream. + /// + /// This is usually a bug. + #[error("handshake sink/stream could not be reunited")] + FailedToReuniteHandshakeSinkAndStream, + /// Handshake not allowed (Isolated mode) + #[error("handshake not allowed (Isolated mode)")] + HandshakeNotAllowed, +} + +/// IO operation that can time out or close. +#[derive(Debug, Error)] +pub enum IoError +where + E: error::Error + 'static, +{ + /// IO operation timed out. + #[error("io timeout")] + Timeout, + /// Non-timeout IO error. + #[error(transparent)] + Error(#[from] E), + /// Unexpected close/end-of-file. + #[error("closed unexpectedly")] + UnexpectedEof, } diff --git a/node/src/components/network/event.rs b/node/src/components/network/event.rs index 4ceb70a12d..90570c8216 100644 --- a/node/src/components/network/event.rs +++ b/node/src/components/network/event.rs @@ -1,180 +1,289 @@ use std::{ fmt::{self, Debug, Display, Formatter}, io, - num::NonZeroU32, + mem::size_of, + net::SocketAddr, + sync::Arc, }; use derive_more::From; -use libp2p::{ - core::connection::{ConnectedPoint, PendingConnectionError}, - Multiaddr, -}; +use futures::stream::{SplitSink, SplitStream}; use serde::Serialize; use static_assertions::const_assert; +use tracing::Span; + +use casper_types::PublicKey; +use super::{error::ConnectionError, FullTransport, GossipedAddress, Message, NodeId}; use crate::{ - effect::requests::{NetworkInfoRequest, NetworkRequest}, - protocol::Message, - types::NodeId, + effect::{ + announcements::PeerBehaviorAnnouncement, + requests::{NetworkInfoRequest, NetworkRequest}, + }, + protocol::Message as ProtocolMessage, }; -use core::mem; -const _NETWORK_EVENT_SIZE: usize = mem::size_of::>(); -const_assert!(_NETWORK_EVENT_SIZE < 178); +const_assert!(size_of::>() < 65); +/// A network event. #[derive(Debug, From, Serialize)] -#[repr(u8)] -pub enum Event

{ - // ========== Events triggered by the libp2p network behavior ========== - /// A connection to the given peer has been opened. - ConnectionEstablished { - /// Identity of the peer that we have connected to. - peer_id: Box, - /// Endpoint of the connection that has been opened. - #[serde(skip_serializing)] - endpoint: ConnectedPoint, - /// Number of established connections to this peer, including the one that has just been - /// opened. - num_established: NonZeroU32, +pub(crate) enum Event

{ + Initialize, + + /// The TLS handshake completed on the incoming connection. + IncomingConnection { + incoming: Box>, + #[serde(skip)] + span: Span, }, - /// A connection with the given peer has been closed, possibly as a result of an error. - ConnectionClosed { - /// Identity of the peer that we have connected to. + + /// Received network message. + IncomingMessage { peer_id: Box, - /// Endpoint of the connection that has been closed. - #[serde(skip_serializing)] - endpoint: ConnectedPoint, - /// Number of other remaining connections to this same peer. - num_established: u32, - /// Reason for the disconnection, if it was not a successful active close. - cause: Option, + msg: Box>, + #[serde(skip)] + span: Span, }, - /// Tried to dial an address but it ended up being unreachable. - UnreachableAddress { - /// `NodeId` that we were trying to reach. - peer_id: Box, - /// Address that we failed to reach. - address: Multiaddr, - /// Error that has been encountered. + + /// Incoming connection closed. + IncomingClosed { #[serde(skip_serializing)] - error: PendingConnectionError, - /// Number of remaining connection attempts that are being tried for this peer. - attempts_remaining: u32, - }, - /// One of our listeners has reported a new local listening address. - NewListenAddress(Multiaddr), - /// One of our listeners has reported the expiration of a listening address. - ExpiredListenAddress(Multiaddr), - /// One of the listeners gracefully closed. - ListenerClosed { - /// The addresses that the listener was listening on. These addresses are now considered - /// expired, similar to if a [`ExpiredListenAddress`](Event::ExpiredListenAddress) event - /// has been generated for each of them. - addresses: Vec, - /// Reason for the closure. Contains `Ok(())` if the stream produced `None`, or `Err` if - /// the stream produced an error. + result: io::Result<()>, + peer_id: Box, + peer_addr: SocketAddr, #[serde(skip_serializing)] - reason: Result<(), io::Error>, + span: Box, }, - /// One of the listeners reported a non-fatal error. - ListenerError { - /// The listener error. + + /// A new outgoing connection was successfully established. + OutgoingConnection { + outgoing: Box>, #[serde(skip_serializing)] - error: io::Error, + span: Span, }, - /// A new entry was added/updated in the Kademlia routing table. - RoutingTableUpdated { - /// New peer. - #[serde(skip_serializing)] - peer: libp2p::PeerId, - // Note: `addresses` is omitted, as we are not interested in this information currently. - /// Potentially evicted peer (to make room in the routing table). - #[serde(skip_serializing)] - old_peer: Option, + + /// An established connection was terminated. + OutgoingDropped { + peer_id: Box, + peer_addr: SocketAddr, }, - // ========== Other events ========== - /// A network request made by a different component. + /// Incoming network request. #[from] NetworkRequest { #[serde(skip_serializing)] - request: NetworkRequest, + req: Box>, }, /// Incoming network info request. #[from] NetworkInfoRequest { #[serde(skip_serializing)] - info_request: NetworkInfoRequest, + req: Box, }, + + /// The node should gossip its own public listening address. + GossipOurAddress, + + /// We received a peer's public listening address via gossip. + PeerAddressReceived(GossipedAddress), + + /// Housekeeping for the outgoing manager. + SweepOutgoing, + + /// Blocklist announcement. + #[from] + BlocklistAnnouncement(PeerBehaviorAnnouncement), +} + +impl From> for Event { + fn from(req: NetworkRequest) -> Self { + Self::NetworkRequest { req: Box::new(req) } + } +} + +impl From for Event { + fn from(req: NetworkInfoRequest) -> Self { + Self::NetworkInfoRequest { req: Box::new(req) } + } } impl Display for Event

{ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - Event::ConnectionEstablished { - peer_id, - endpoint, - num_established, - } => write!( - f, - "connection {} to {} at {:?} established", - num_established, peer_id, endpoint - ), - Event::ConnectionClosed { - peer_id, - endpoint, - num_established, - cause: Some(error), - } => write!( - f, - "connection to {} at {:?} closed, {} remaining: {}", - peer_id, endpoint, num_established, error - ), - Event::ConnectionClosed { - peer_id, - endpoint, - num_established, - cause: None, - } => write!( - f, - "connection to {} at {:?} closed, {} remaining", - peer_id, endpoint, num_established - ), - Event::UnreachableAddress { + Event::Initialize => write!(f, "initialize"), + Event::IncomingConnection { incoming, span: _ } => { + write!(f, "incoming connection: {}", incoming) + } + Event::IncomingMessage { + peer_id: node_id, + msg, + span: _, + } => write!(f, "msg from {}: {}", node_id, msg), + Event::IncomingClosed { peer_addr, .. } => { + write!(f, "closed connection from {}", peer_addr) + } + Event::OutgoingConnection { outgoing, span: _ } => { + write!(f, "outgoing connection: {}", outgoing) + } + Event::OutgoingDropped { peer_id, peer_addr } => { + write!(f, "dropped outgoing {} {}", peer_id, peer_addr) + } + Event::NetworkRequest { req } => write!(f, "request: {}", req), + Event::NetworkInfoRequest { req } => write!(f, "request: {}", req), + Event::GossipOurAddress => write!(f, "gossip our address"), + Event::PeerAddressReceived(gossiped_address) => { + write!(f, "received gossiped peer address {}", gossiped_address) + } + Event::BlocklistAnnouncement(ann) => { + write!(f, "handling blocklist announcement: {}", ann) + } + Event::SweepOutgoing => { + write!(f, "sweep outgoing connections") + } + } + } +} + +/// Outcome of an incoming connection negotiation. +#[derive(Debug, Serialize)] +pub(crate) enum IncomingConnection

{ + /// The connection failed early on, before even a peer's [`NodeId`] could be determined. + FailedEarly { + /// Remote port the peer dialed us from. + peer_addr: SocketAddr, + /// Error causing the failure. + error: ConnectionError, + }, + /// Connection failed after TLS was successfully established; thus we have a valid [`NodeId`]. + Failed { + /// Remote port the peer dialed us from. + peer_addr: SocketAddr, + /// Peer's [`NodeId`]. + peer_id: NodeId, + /// Error causing the failure. + error: ConnectionError, + }, + /// Connection turned out to be a loopback connection. + Loopback, + /// Connection successfully established. + Established { + /// Remote port the peer dialed us from. + peer_addr: SocketAddr, + /// Public address advertised by the peer. + public_addr: SocketAddr, + /// Peer's [`NodeId`]. + peer_id: NodeId, + /// The public key the peer is validating with, if any. + peer_consensus_public_key: Option, + /// Stream of incoming messages. for incoming connections. + #[serde(skip_serializing)] + stream: SplitStream>, + }, +} + +impl

Display for IncomingConnection

{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + IncomingConnection::FailedEarly { peer_addr, error } => { + write!(f, "early failure from {}: {}", peer_addr, error) + } + IncomingConnection::Failed { + peer_addr, peer_id, - address, error, - attempts_remaining, - } => write!( - f, - "failed to connect to {} at {}, {} attempts remaining: {}", - peer_id, address, attempts_remaining, error - ), - Event::NewListenAddress(address) => write!(f, "new listening address {}", address), - Event::ExpiredListenAddress(address) => { - write!(f, "expired listening address {}", address) - } - Event::ListenerClosed { - addresses, - reason: Ok(()), - } => write!(f, "closed listener {:?}", addresses), - Event::ListenerClosed { - addresses, - reason: Err(error), - } => write!(f, "closed listener {:?}: {}", addresses, error), - Event::ListenerError { error } => write!(f, "non-fatal listener error: {}", error), - Event::RoutingTableUpdated { peer, old_peer } => { - write!(f, "added {} to routing table", peer)?; - if let Some(old_peer_id) = old_peer { - write!(f, " (replaces {})", old_peer_id)?; + } => write!(f, "failure from {}/{}: {}", peer_addr, peer_id, error), + IncomingConnection::Loopback => f.write_str("loopback"), + IncomingConnection::Established { + peer_addr, + public_addr, + peer_id, + peer_consensus_public_key, + stream: _, + } => { + write!( + f, + "connection established from {}/{}; public: {}", + peer_addr, peer_id, public_addr + )?; + + if let Some(public_key) = peer_consensus_public_key { + write!(f, " [{}]", public_key) + } else { + f.write_str(" [no validator id]") } - Ok(()) } + } + } +} + +/// Outcome of an outgoing connection attempt. +#[derive(Debug, Serialize)] +pub(crate) enum OutgoingConnection

{ + /// The outgoing connection failed early on, before a peer's [`NodeId`] could be determined. + FailedEarly { + /// Address that was dialed. + peer_addr: SocketAddr, + /// Error causing the failure. + error: ConnectionError, + }, + /// Connection failed after TLS was successfully established; thus we have a valid [`NodeId`]. + Failed { + /// Address that was dialed. + peer_addr: SocketAddr, + /// Peer's [`NodeId`]. + peer_id: NodeId, + /// Error causing the failure. + error: ConnectionError, + }, + /// Connection turned out to be a loopback connection. + Loopback { peer_addr: SocketAddr }, + /// Connection successfully established. + Established { + /// Address that was dialed. + peer_addr: SocketAddr, + /// Peer's [`NodeId`]. + peer_id: NodeId, + /// The public key the peer is validating with, if any. + peer_consensus_public_key: Option, + /// Sink for outgoing messages. + #[serde(skip_serializing)] + sink: SplitSink, Arc>>, + /// Holds the information whether the remote node is syncing. + is_syncing: bool, + }, +} - Event::NetworkRequest { request } => write!(f, "request: {}", request), - Event::NetworkInfoRequest { info_request } => { - write!(f, "info request: {}", info_request) +impl

Display for OutgoingConnection

{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + OutgoingConnection::FailedEarly { peer_addr, error } => { + write!(f, "early failure to {}: {}", peer_addr, error) + } + OutgoingConnection::Failed { + peer_addr, + peer_id, + error, + } => write!(f, "failure to {}/{}: {}", peer_addr, peer_id, error), + OutgoingConnection::Loopback { peer_addr } => write!(f, "loopback to {}", peer_addr), + OutgoingConnection::Established { + peer_addr, + peer_id, + peer_consensus_public_key, + sink: _, + is_syncing, + } => { + write!( + f, + "connection established to {}/{}, is_syncing: {}", + peer_addr, peer_id, is_syncing + )?; + + if let Some(public_key) = peer_consensus_public_key { + write!(f, " [{}]", public_key) + } else { + f.write_str(" [no validator id]") + } } } } diff --git a/node/src/components/network/gossip.rs b/node/src/components/network/gossip.rs deleted file mode 100644 index 4d01a69c4f..0000000000 --- a/node/src/components/network/gossip.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! This module is home to types/functions related to using libp2p's `GossipSub` behavior, used for -//! gossiping data to subscribed peers. - -use datasize::DataSize; -use libp2p::{ - core::PublicKey, - gossipsub::{ - Gossipsub, GossipsubConfigBuilder, IdentTopic, MessageAuthenticity, ValidationMode, - }, - PeerId, -}; -use once_cell::sync::Lazy; - -use super::{Config, Error, PayloadT}; -use crate::types::Chainspec; - -pub(super) static TOPIC: Lazy = Lazy::new(|| IdentTopic::new("all".to_string())); - -#[derive(DataSize, Debug)] -pub(super) struct GossipMessage(pub Vec); - -impl GossipMessage { - pub(super) fn new(payload: &P, max_size: u32) -> Result { - let serialized_message = - bincode::serialize(payload).map_err(|error| Error::Serialization(*error))?; - - if serialized_message.len() > max_size as usize { - return Err(Error::MessageTooLarge { - max_size, - actual_size: serialized_message.len() as u64, - }); - } - - Ok(GossipMessage(serialized_message)) - } -} - -impl From for Vec { - fn from(message: GossipMessage) -> Self { - message.0 - } -} - -/// Constructs a new libp2p behavior suitable for gossiping. -pub(super) fn new_behavior( - config: &Config, - _chainspec: &Chainspec, - our_public_key: PublicKey, -) -> Gossipsub { - let gossipsub_config = GossipsubConfigBuilder::default() - // TODO - consider not using the default protocol ID prefix. - // .protocol_id(ProtocolId::new(chainspec, "validator/gossip").protocol_name().to_vec()) - .heartbeat_interval(config.gossip_heartbeat_interval.into()) - .max_transmit_size(config.max_gossip_message_size as usize) - .duplicate_cache_time(config.gossip_duplicate_cache_timeout.into()) - .validation_mode(ValidationMode::Permissive) - .build() - .unwrap_or_else(|error| panic!("should construct gossipsub config: {}", error)); - let our_peer_id = PeerId::from(our_public_key); - // TODO - remove `expect` - let mut gossipsub = Gossipsub::new(MessageAuthenticity::Author(our_peer_id), gossipsub_config) - .expect("should construct a new gossipsub behavior"); - // TODO - remove `expect` - gossipsub - .subscribe(&*TOPIC) - .expect("should subscribe to topic"); - gossipsub -} diff --git a/node/src/components/network/gossiped_address.rs b/node/src/components/network/gossiped_address.rs new file mode 100644 index 0000000000..ade3ac93b0 --- /dev/null +++ b/node/src/components/network/gossiped_address.rs @@ -0,0 +1,69 @@ +use std::{ + fmt::{self, Display, Formatter}, + net::SocketAddr, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + components::gossiper::{GossipItem, SmallGossipItem}, + effect::GossipTarget, +}; + +/// Used to gossip our public listening address to peers. +#[derive( + Copy, Clone, DataSize, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug, +)] +pub struct GossipedAddress(SocketAddr); + +impl GossipedAddress { + pub(super) fn new(address: SocketAddr) -> Self { + GossipedAddress(address) + } +} + +impl Display for GossipedAddress { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "gossiped-address {}", self.0) + } +} + +impl GossipItem for GossipedAddress { + const ID_IS_COMPLETE_ITEM: bool = true; + const REQUIRES_GOSSIP_RECEIVED_ANNOUNCEMENT: bool = false; + + type Id = GossipedAddress; + + fn gossip_id(&self) -> Self::Id { + *self + } + + fn gossip_target(&self) -> GossipTarget { + GossipTarget::All + } +} + +impl SmallGossipItem for GossipedAddress { + fn id_as_item(id: &Self::Id) -> &Self { + id + } +} + +impl From for SocketAddr { + fn from(gossiped_address: GossipedAddress) -> Self { + gossiped_address.0 + } +} + +mod specimen_support { + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + + use super::GossipedAddress; + + impl LargestSpecimen for GossipedAddress { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + GossipedAddress::new(LargestSpecimen::largest_specimen(estimator, cache)) + } + } +} diff --git a/node/src/components/network/health.rs b/node/src/components/network/health.rs new file mode 100644 index 0000000000..18d018f12e --- /dev/null +++ b/node/src/components/network/health.rs @@ -0,0 +1,825 @@ +//! Health-check state machine. +//! +//! Health checks perform periodic pings to remote peers to ensure the connection is still alive. It +//! has somewhat complicated logic that is encoded in the `ConnectionHealth` struct, which has +//! multiple implicit states. + +use std::{ + fmt::{self, Display, Formatter}, + time::{Duration, Instant}, +}; + +use datasize::DataSize; +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + +/// Connection health information. +/// +/// All data related to the ping/pong functionality used to verify a peer's networking liveness. +#[derive(Clone, Copy, DataSize, Debug)] +pub(crate) struct ConnectionHealth { + /// The moment the connection was established. + pub(crate) connected_since: Instant, + /// The last ping that was requested to be sent. + pub(crate) last_ping_sent: Option, + /// The most recent pong received. + pub(crate) last_pong_received: Option, + /// Number of invalid pongs received, reset upon receiving a valid pong. + pub(crate) invalid_pong_count: u32, + /// Number of pings that timed out. + pub(crate) ping_timeouts: u32, +} + +/// Health check configuration. +#[derive(DataSize, Debug)] +pub(crate) struct HealthConfig { + /// How often to send a ping to ensure a connection is established. + /// + /// Determines how soon after connecting or a successful ping another ping is sent. + pub(crate) ping_interval: Duration, + /// Duration during which a ping must succeed to be considered successful. + pub(crate) ping_timeout: Duration, + /// Number of retries before giving up and disconnecting a peer due to too many failed pings. + pub(crate) ping_retries: u16, + /// How many spurious pongs to tolerate before banning a peer. + pub(crate) pong_limit: u32, +} + +/// A timestamp with an associated nonce. +#[derive(Clone, Copy, DataSize, Debug)] +pub(crate) struct TaggedTimestamp { + /// The actual timestamp. + timestamp: Instant, + /// The nonce of the timestamp. + nonce: Nonce, +} + +impl TaggedTimestamp { + /// Creates a new tagged timestamp with a random nonce. + pub(crate) fn new(rng: &mut R, timestamp: Instant) -> Self { + Self { + timestamp, + nonce: rng.gen(), + } + } + + /// Creates a new tagged timestamp from parts. + pub(crate) fn from_parts(timestamp: Instant, nonce: Nonce) -> Self { + TaggedTimestamp { nonce, timestamp } + } + + /// Returns the actual timestamp. + pub(crate) fn timestamp(&self) -> Instant { + self.timestamp + } + + /// Returns the nonce inside the timestamp. + pub(crate) fn nonce(self) -> Nonce { + self.nonce + } +} + +/// A number-used-once, specifically one used in pings. +// Note: This nonce used to be a `u32`, but that is too small - since we immediately disconnect when +// a duplicate ping is generated, a `u32` has a ~ 1/(2^32) chance of a consecutive collision. +// +// If we ping every 5 seconds, this is a ~ 0.01% chance over a month, which is too high over +// thousands over nodes. At 64 bits, in theory the upper bound is 0.0000000002%, which is +// better (the period of the RNG used should be >> 64 bits). +// +// While we do check for consecutive ping nonces being generated, we still like the lower +// collision chance for repeated pings being sent. +#[derive(Clone, Copy, DataSize, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub(crate) struct Nonce(u64); + +impl Display for Nonce { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{:016X}", self.0) + } +} + +impl rand::distributions::Distribution for rand::distributions::Standard { + #[inline(always)] + fn sample(&self, rng: &mut R) -> Nonce { + Nonce(rng.gen()) + } +} + +impl ConnectionHealth { + /// Creates a new connection health instance, recording when the connection was established. + pub(crate) fn new(connected_since: Instant) -> Self { + Self { + connected_since, + last_ping_sent: None, + last_pong_received: None, + invalid_pong_count: 0, + ping_timeouts: 0, + } + } +} + +impl ConnectionHealth { + /// Calculate the round-trip time, if possible. + pub(crate) fn calc_rrt(&self) -> Option { + match (self.last_ping_sent, self.last_pong_received) { + (Some(last_ping), Some(last_pong)) if last_ping.nonce == last_pong.nonce => { + Some(last_pong.timestamp.duration_since(last_ping.timestamp)) + } + _ => None, + } + } + + /// Check current health status. + /// + /// This function must be polled periodically and returns a potential action to be performed. + pub(crate) fn update_health( + &mut self, + rng: &mut R, + cfg: &HealthConfig, + now: Instant, + ) -> HealthCheckOutcome { + // Having received too many pongs should always result in a disconnect. + if self.invalid_pong_count > cfg.pong_limit { + return HealthCheckOutcome::GiveUp; + } + + // Our honeymoon period is from first establishment of the connection until we send a ping. + if now.saturating_duration_since(self.connected_since) < cfg.ping_interval { + return HealthCheckOutcome::DoNothing; + } + + let send_ping = match self.last_ping_sent { + Some(last_ping) => { + match self.last_pong_received { + Some(prev_pong) if prev_pong.nonce() == last_ping.nonce() => { + // Normal operation. The next ping should be sent in a regular interval + // after receiving the last pong. + now >= prev_pong.timestamp() + cfg.ping_interval + } + + _ => { + // No matching pong on record. Check if we need to timeout the ping. + if now >= last_ping.timestamp() + cfg.ping_timeout { + self.ping_timeouts += 1; + // Clear the `last_ping_sent`, schedule another to be sent. + self.last_ping_sent = None; + true + } else { + false + } + } + } + } + None => true, + }; + + if send_ping { + if self.ping_timeouts > cfg.ping_retries as u32 { + // We have exceeded the timeouts and will give up as a result. + return HealthCheckOutcome::GiveUp; + } + + let ping = loop { + let candidate = TaggedTimestamp::new(rng, now); + + if let Some(prev) = self.last_ping_sent { + if prev.nonce() == candidate.nonce() { + // Ensure we don't produce consecutive pings. + continue; + } + } + + break candidate; + }; + + self.last_ping_sent = Some(ping); + HealthCheckOutcome::SendPing(ping.nonce()) + } else { + HealthCheckOutcome::DoNothing + } + } + + /// Records a pong that has been sent. + /// + /// If `true`, the maximum number of pongs has been exceeded and the peer should be banned. + pub(crate) fn record_pong(&mut self, cfg: &HealthConfig, tt: TaggedTimestamp) -> bool { + let is_valid_pong = match self.last_ping_sent { + Some(last_ping) if last_ping.nonce() == tt.nonce => { + // Check if we already received a pong for this ping, which is a protocol violation. + if self + .last_pong_received + .map(|existing| existing.nonce() == tt.nonce) + .unwrap_or(false) + { + // Ping is a collsion, ban. + return true; + } + + if last_ping.timestamp() > tt.timestamp() { + // Ping is from the past somehow, ignore it (probably a bug on our side). + return false; + } + + // The ping is valid if it is within the timeout period. + last_ping.timestamp() + cfg.ping_timeout >= tt.timestamp() + } + _ => { + // Either the nonce did not match, or the nonce mismatched. + false + } + }; + + if is_valid_pong { + // Our pong is valid, reset invalid and ping count, then record it. + self.invalid_pong_count = 0; + self.ping_timeouts = 0; + self.last_pong_received = Some(tt); + false + } else { + self.invalid_pong_count += 1; + // If we have exceeded the invalid pong limit, ban. + self.invalid_pong_count > cfg.pong_limit + } + } +} + +/// The outcome of periodic health check. +#[derive(Clone, Copy, Debug)] + +pub(crate) enum HealthCheckOutcome { + /// Do nothing, as we recently took action. + DoNothing, + /// Send a ping with the given nonce. + SendPing(Nonce), + /// Give up on (i.e. terminate) the connection, as we exceeded the allowable ping limit. + GiveUp, +} + +impl LargestSpecimen for Nonce { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Self(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::HashSet, time::Duration}; + + use assert_matches::assert_matches; + use rand::Rng; + + use super::{ConnectionHealth, HealthCheckOutcome, HealthConfig}; + use crate::{ + components::network::health::TaggedTimestamp, testing::test_clock::TestClock, + types::NodeRng, + }; + + impl HealthConfig { + pub(crate) fn test_config() -> Self { + // Note: These values are assumed in tests, so do not change them. + HealthConfig { + ping_interval: Duration::from_secs(5), + ping_timeout: Duration::from_secs(2), + ping_retries: 3, + pong_limit: 6, + } + } + } + + struct Fixtures { + clock: TestClock, + cfg: HealthConfig, + rng: NodeRng, + health: ConnectionHealth, + } + + /// Sets up fixtures used in almost every test. + fn fixtures() -> Fixtures { + let clock = TestClock::new(); + let cfg = HealthConfig::test_config(); + let rng = crate::new_rng(); + + let health = ConnectionHealth::new(clock.now()); + + Fixtures { + clock, + cfg, + rng, + health, + } + } + + #[test] + fn scenario_no_response() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // Repeated checks should not change the outcome. + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // After 4.9 seconds, we still do not send a ping. + clock.advance(Duration::from_millis(4900)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // At 5, we expect our first ping. + clock.advance(Duration::from_millis(100)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Checking health again should not result in another ping. + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + clock.advance(Duration::from_millis(100)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // After two seconds, we expect another ping to be sent, due to timeouts. + clock.advance(Duration::from_millis(2000)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // At this point, two pings have been sent. Configuration says to retry 3 times, so a total + // of five pings is expected. + clock.advance(Duration::from_millis(2000)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + clock.advance(Duration::from_millis(2000)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Finally, without receiving a ping at all, we give up. + clock.advance(Duration::from_millis(2000)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::GiveUp + ); + } + + #[test] + fn pings_use_different_nonces() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + clock.advance(Duration::from_secs(5)); + + let mut nonce_set = HashSet::new(); + + nonce_set.insert(assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + )); + clock.advance(Duration::from_secs(2)); + + nonce_set.insert(assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + )); + clock.advance(Duration::from_secs(2)); + + nonce_set.insert(assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + )); + clock.advance(Duration::from_secs(2)); + + nonce_set.insert(assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + )); + + // Since it is a set, we expect less than 4 items if there were any duplicates. + assert_eq!(nonce_set.len(), 4); + } + + #[test] + fn scenario_all_working() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // At 5 seconds, we expect our first ping. + clock.advance(Duration::from_secs(5)); + + let nonce_1 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + + // Record a reply 500 ms later. + clock.advance(Duration::from_millis(500)); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1))); + + // Our next pong should be 5 seconds later, not 4.5. + clock.advance(Duration::from_millis(4500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + clock.advance(Duration::from_millis(500)); + + let nonce_2 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + + // We test an edge case here where we use the same timestamp for the received pong. + clock.advance(Duration::from_millis(500)); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_2))); + + // Afterwards, no ping should be sent. + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // Do 1000 additional ping/pongs. + for _ in 0..1000 { + clock.advance(Duration::from_millis(5000)); + let nonce = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + clock.advance(Duration::from_millis(250)); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce))); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + } + } + + #[test] + fn scenario_intermittent_failures() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + // We miss two pings initially, before recovering. + clock.advance(Duration::from_secs(5)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + clock.advance(Duration::from_secs(2)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + clock.advance(Duration::from_secs(2)); + + let nonce_1 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + + clock.advance(Duration::from_secs(1)); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1))); + + // We successfully "recovered", this should reset our ping counts. Miss three pings before + // successfully receiving a pong from 4th from here on out. + clock.advance(Duration::from_millis(5500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + let nonce_2 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + clock.advance(Duration::from_millis(500)); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_2))); + + // This again should reset. We miss four more pings and are disconnected. + clock.advance(Duration::from_millis(5500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + clock.advance(Duration::from_millis(2500)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::GiveUp + ); + } + + #[test] + fn ignores_unwanted_pongs() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + clock.advance(Duration::from_secs(5)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Make the `ConnectionHealth` receive some unasked pongs, without exceeding the unasked + // pong limit. + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + + // The retry delay is 2 seconds (instead of 5 for the next pong after success), so ensure + // we retry due to not having received the correct nonce in the pong. + + clock.advance(Duration::from_secs(2)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + } + + #[test] + fn ensure_excessive_pongs_result_in_ban() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + clock.advance(Duration::from_secs(5)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Make the `ConnectionHealth` receive some unasked pongs, without exceeding the unasked + // pong limit. + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + // 6 unasked pongs is still okay. + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + assert!(health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + // 7 is too much. + + // For good measure, we expect the health check to also output a disconnect instruction. + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::GiveUp + ); + } + + #[test] + fn time_reversal_does_not_crash_but_is_ignored() { + // Usually a pong for a given (or any) nonce should always be received with a timestamp + // equal or later than the ping sent out. Due to a programming error or a lucky attacker + + // scheduling issue, there is a very minute chance this can actually happen. + // + // In these cases, the pongs should just be discarded, not crashing due to a underflow in + // the comparison. + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + clock.advance(Duration::from_secs(5)); // t = 5 + + let nonce_1 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + + // Ignore the nonce if sent in the past (and also don't crash). + clock.rewind(Duration::from_secs(1)); // t = 4 + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1))); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + + // Another ping should be sent out, since `nonce_1` was ignored. + clock.advance(Duration::from_secs(3)); // t = 7 + let nonce_2 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + + // Nonce 2 will be received seemingly before the connection was even established. + clock.rewind(Duration::from_secs(3600)); + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_2))); + } + + #[test] + fn handles_missed_health_checks() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + clock.advance(Duration::from_secs(15)); + + // We initially exceed our scheduled first ping by 10 seconds. This will cause the ping to + // be sent right there and then. + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Going forward 1 second should not change anything. + clock.advance(Duration::from_secs(1)); + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + // After another second, two seconds have passed since sending the first ping in total, so + // send another once. + clock.advance(Duration::from_secs(1)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // We have missed two pings total, now wait an hour. This will trigger the third ping. + clock.advance(Duration::from_secs(3600)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Fourth right after + clock.advance(Duration::from_secs(2)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + // Followed by a disconnect. + clock.advance(Duration::from_secs(2)); + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::GiveUp + ); + } + + #[test] + fn ignores_time_travel() { + // Any call of the health update with timestamps that are provably from the past (i.e. + // before a recorded timestamp like a previous ping) should be ignored. + + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + clock.advance(Duration::from_secs(5)); // t = 5 + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + + clock.rewind(Duration::from_secs(3)); // t = 2 + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + + clock.advance(Duration::from_secs(4)); // t = 6 + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::DoNothing + ); + clock.advance(Duration::from_secs(1)); // t = 7 + + assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(_) + ); + } + + #[test] + fn duplicate_pong_immediately_terminates() { + let Fixtures { + mut clock, + cfg, + mut rng, + mut health, + } = fixtures(); + + clock.advance(Duration::from_secs(5)); + let nonce_1 = assert_matches!( + health.update_health(&mut rng, &cfg, clock.now()), + HealthCheckOutcome::SendPing(nonce) => nonce + ); + + clock.advance(Duration::from_secs(1)); + + // Recording the pong once is fine, but the second time should result in a ban. + assert!(!health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1))); + assert!(health.record_pong(&cfg, TaggedTimestamp::from_parts(clock.now(), nonce_1))); + } +} diff --git a/node/src/components/network/identity.rs b/node/src/components/network/identity.rs new file mode 100644 index 0000000000..81a592fcd4 --- /dev/null +++ b/node/src/components/network/identity.rs @@ -0,0 +1,86 @@ +use std::sync::Arc; + +use datasize::DataSize; +use openssl::{ + error::ErrorStack as OpenSslErrorStack, + pkey::{PKey, Private}, + x509::X509, +}; +use thiserror::Error; +use tracing::warn; + +use super::{Config, IdentityConfig}; +use crate::{ + tls::{self, LoadCertError, LoadSecretKeyError, TlsCert, ValidationError}, + types::NodeId, + WithDir, +}; + +#[derive(Debug, Error)] +pub(crate) enum Error { + #[error("could not generate TLS certificate: {0}")] + CouldNotGenerateTlsCertificate(OpenSslErrorStack), + #[error(transparent)] + Validation(#[from] ValidationError), + #[error(transparent)] + LoadCert(#[from] LoadCertError), + #[error(transparent)] + LoadSecretKey(#[from] LoadSecretKeyError), +} + +/// An ephemeral [PKey] and [TlsCert] that identifies this node +#[derive(DataSize, Debug, Clone)] +pub(crate) struct Identity { + pub(super) secret_key: Arc>, + pub(super) tls_certificate: Arc, + pub(super) network_ca: Option>, +} + +impl Identity { + fn new(secret_key: PKey, tls_certificate: TlsCert, network_ca: Option) -> Self { + Self { + secret_key: Arc::new(secret_key), + tls_certificate: Arc::new(tls_certificate), + network_ca: network_ca.map(Arc::new), + } + } + + pub(crate) fn from_config(config: WithDir) -> Result { + match &config.value().identity { + Some(identity) => Self::from_identity_config(identity), + None => Self::with_generated_certs(), + } + } + + fn from_identity_config(identity_config: &IdentityConfig) -> Result { + let not_yet_validated_x509_cert = tls::load_cert(&identity_config.tls_certificate)?; + let secret_key = tls::load_secret_key(&identity_config.secret_key)?; + let x509_cert = tls::tls_cert_from_x509(not_yet_validated_x509_cert)?; + + // Load a ca certificate (if present) + let network_ca = tls::load_cert(&identity_config.ca_certificate)?; + + // A quick sanity check for the loaded cert against supplied CA. + tls::validate_cert_with_authority(x509_cert.as_x509().clone(), &network_ca).map_err( + |error| { + warn!(%error, "the given node certificate is not signed by the network CA"); + Error::Validation(error) + }, + )?; + + Ok(Identity::new(secret_key, x509_cert, Some(network_ca))) + } + + pub(crate) fn with_generated_certs() -> Result { + let (not_yet_validated_x509_cert, secret_key) = + tls::generate_node_cert().map_err(Error::CouldNotGenerateTlsCertificate)?; + let tls_certificate = tls::validate_self_signed_cert(not_yet_validated_x509_cert)?; + Ok(Identity::new(secret_key, tls_certificate, None)) + } +} + +impl From<&Identity> for NodeId { + fn from(identity: &Identity) -> Self { + NodeId::from(identity.tls_certificate.public_key_fingerprint()) + } +} diff --git a/node/src/components/network/insights.rs b/node/src/components/network/insights.rs new file mode 100644 index 0000000000..8d2b5f4dde --- /dev/null +++ b/node/src/components/network/insights.rs @@ -0,0 +1,390 @@ +//! Networking debug insights. +//! +//! The `insights` module exposes some internals of the networking component, mainly for inspection +//! through the diagnostics console. It should specifically not be used for any business logic and +//! affordances made in other corners of the `network` module to allow collecting these +//! insights should neither be abused just because they are available. + +use std::{ + collections::{BTreeSet, HashSet}, + fmt::{self, Debug, Display, Formatter}, + net::SocketAddr, + sync::atomic::Ordering, + time::{Duration, SystemTime}, +}; + +use casper_types::{DisplayIter, EraId, PublicKey}; +use serde::Serialize; + +use crate::{ + types::NodeId, + utils::{opt_display::OptDisplay, TimeAnchor}, +}; + +use super::{ + error::ConnectionError, outgoing::OutgoingState, symmetry::ConnectionSymmetry, Network, + OutgoingHandle, Payload, +}; + +/// A collection of insights into the active networking component. +#[derive(Debug, Serialize)] +pub(crate) struct NetworkInsights { + /// The nodes current ID. + our_id: NodeId, + /// Whether or not a network CA was present (is a private network). + network_ca: bool, + /// The public address of the node. + public_addr: Option, + /// Whether or not the node is syncing. + is_syncing: bool, + /// The active era as seen by the networking component. + net_active_era: EraId, + /// The list of node IDs that are being preferred due to being active validators. + privileged_active_outgoing_nodes: Option>, + /// The list of node IDs that are being preferred due to being upcoming validators. + privileged_upcoming_outgoing_nodes: Option>, + /// The amount of bandwidth allowance currently buffered, ready to be spent. + unspent_bandwidth_allowance_bytes: Option, + /// Map of outgoing connections, along with their current state. + outgoing_connections: Vec<(SocketAddr, OutgoingInsight)>, + /// Map of incoming connections. + connection_symmetries: Vec<(NodeId, ConnectionSymmetryInsight)>, +} + +/// Insight into an outgoing connection. +#[derive(Debug, Serialize)] +struct OutgoingInsight { + /// Whether or not the address is marked unforgettable. + unforgettable: bool, + /// The current connection state. + state: OutgoingStateInsight, +} + +/// The state of an outgoing connection, reduced to exportable insights. +#[derive(Debug, Serialize)] +enum OutgoingStateInsight { + Connecting { + failures_so_far: u8, + since: SystemTime, + }, + Waiting { + failures_so_far: u8, + error: Option, + last_failure: SystemTime, + }, + Connected { + peer_id: NodeId, + peer_addr: SocketAddr, + last_ping_sent: Option, + last_pong_received: Option, + invalid_pong_count: u32, + rtt: Option, + }, + Blocked { + since: SystemTime, + justification: String, + until: SystemTime, + }, + Loopback, +} + +fn time_delta(now: SystemTime, then: SystemTime) -> impl Display { + OptDisplay::new( + now.duration_since(then) + .map(humantime::format_duration) + .ok(), + "err", + ) +} + +impl OutgoingStateInsight { + /// Constructs a new outgoing state insight from a given outgoing state. + fn from_outgoing_state

( + anchor: &TimeAnchor, + state: &OutgoingState, ConnectionError>, + ) -> Self { + match state { + OutgoingState::Connecting { + failures_so_far, + since, + } => OutgoingStateInsight::Connecting { + failures_so_far: *failures_so_far, + since: anchor.convert(*since), + }, + OutgoingState::Waiting { + failures_so_far, + error, + last_failure, + } => OutgoingStateInsight::Waiting { + failures_so_far: *failures_so_far, + error: error.as_ref().map(ToString::to_string), + last_failure: anchor.convert(*last_failure), + }, + OutgoingState::Connected { + peer_id, + handle, + health, + } => OutgoingStateInsight::Connected { + peer_id: *peer_id, + peer_addr: handle.peer_addr, + last_ping_sent: health + .last_ping_sent + .map(|tt| anchor.convert(tt.timestamp())), + last_pong_received: health + .last_pong_received + .map(|tt| anchor.convert(tt.timestamp())), + invalid_pong_count: health.invalid_pong_count, + rtt: health.calc_rrt(), + }, + OutgoingState::Blocked { + since, + justification, + until, + } => OutgoingStateInsight::Blocked { + since: anchor.convert(*since), + justification: justification.to_string(), + until: anchor.convert(*until), + }, + OutgoingState::Loopback => OutgoingStateInsight::Loopback, + } + } + + /// Formats the outgoing state insight with times relative to a given timestamp. + fn fmt_time_relative(&self, now: SystemTime, f: &mut Formatter<'_>) -> fmt::Result { + match self { + OutgoingStateInsight::Connecting { + failures_so_far, + since, + } => write!( + f, + "connecting (fails: {}), since {}", + failures_so_far, + time_delta(now, *since) + ), + OutgoingStateInsight::Waiting { + failures_so_far, + error, + last_failure, + } => write!( + f, + "waiting (fails: {}, last error: {}), since {}", + failures_so_far, + OptDisplay::new(error.as_ref(), "none"), + time_delta(now, *last_failure) + ), + OutgoingStateInsight::Connected { + peer_id, + peer_addr, + last_ping_sent, + last_pong_received, + invalid_pong_count, + rtt, + } => { + let rtt_ms = rtt.map(|duration| duration.as_millis()); + + write!( + f, + "connected -> {} @ {} (rtt {}, invalid {}, last ping/pong {}/{})", + peer_id, + peer_addr, + OptDisplay::new(rtt_ms, "?"), + invalid_pong_count, + OptDisplay::new(last_ping_sent.map(|t| time_delta(now, t)), "-"), + OptDisplay::new(last_pong_received.map(|t| time_delta(now, t)), "-"), + ) + } + OutgoingStateInsight::Blocked { + since, + justification, + until, + } => { + write!( + f, + "blocked since {}, until {}: {}", + time_delta(now, *since), + time_delta(now, *until), + justification + ) + } + OutgoingStateInsight::Loopback => f.write_str("loopback"), + } + } +} + +/// Describes whether a connection is uni- or bi-directional. +#[derive(Debug, Serialize)] +pub(super) enum ConnectionSymmetryInsight { + IncomingOnly { + since: SystemTime, + peer_addrs: BTreeSet, + }, + OutgoingOnly { + since: SystemTime, + }, + Symmetric { + peer_addrs: BTreeSet, + }, + Gone, +} + +impl ConnectionSymmetryInsight { + /// Creates a new insight from a given connection symmetry. + fn from_connection_symmetry(anchor: &TimeAnchor, sym: &ConnectionSymmetry) -> Self { + match sym { + ConnectionSymmetry::IncomingOnly { since, peer_addrs } => { + ConnectionSymmetryInsight::IncomingOnly { + since: anchor.convert(*since), + peer_addrs: peer_addrs.clone(), + } + } + ConnectionSymmetry::OutgoingOnly { since } => ConnectionSymmetryInsight::OutgoingOnly { + since: anchor.convert(*since), + }, + ConnectionSymmetry::Symmetric { peer_addrs } => ConnectionSymmetryInsight::Symmetric { + peer_addrs: peer_addrs.clone(), + }, + ConnectionSymmetry::Gone => ConnectionSymmetryInsight::Gone, + } + } + + /// Formats the connection symmetry insight with times relative to a given timestamp. + fn fmt_time_relative(&self, now: SystemTime, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ConnectionSymmetryInsight::IncomingOnly { since, peer_addrs } => write!( + f, + "<- {} (since {})", + DisplayIter::new(peer_addrs.iter()), + time_delta(now, *since) + ), + ConnectionSymmetryInsight::OutgoingOnly { since } => { + write!(f, "-> (since {})", time_delta(now, *since)) + } + ConnectionSymmetryInsight::Symmetric { peer_addrs } => { + write!(f, "<> {}", DisplayIter::new(peer_addrs.iter())) + } + ConnectionSymmetryInsight::Gone => f.write_str("gone"), + } + } +} + +impl NetworkInsights { + /// Collect networking insights from a given networking component. + pub(super) fn collect_from_component(net: &Network) -> Self + where + P: Payload, + { + // Since we are at the top level of the component, we gain access to inner values of the + // respective structs. We abuse this to gain debugging insights. Note: If limiters are no + // longer a `trait`, the trait methods can be removed as well in favor of direct access. + let (privileged_active_outgoing_nodes, privileged_upcoming_outgoing_nodes) = net + .outgoing_limiter + .debug_inspect_validators(&net.active_era) + .map(|(a, b)| (Some(a), Some(b))) + .unwrap_or_default(); + + let anchor = TimeAnchor::now(); + + let outgoing_connections = net + .outgoing_manager + .outgoing + .iter() + .map(|(addr, outgoing)| { + let state = OutgoingStateInsight::from_outgoing_state(&anchor, &outgoing.state); + ( + *addr, + OutgoingInsight { + unforgettable: outgoing.is_unforgettable, + state, + }, + ) + }) + .collect(); + + let connection_symmetries = net + .connection_symmetries + .iter() + .map(|(id, sym)| { + ( + *id, + ConnectionSymmetryInsight::from_connection_symmetry(&anchor, sym), + ) + }) + .collect(); + + NetworkInsights { + our_id: net.context.our_id(), + network_ca: net.context.network_ca().is_some(), + public_addr: net.context.public_addr(), + is_syncing: net.context.is_syncing().load(Ordering::Relaxed), + net_active_era: net.active_era, + privileged_active_outgoing_nodes, + privileged_upcoming_outgoing_nodes, + unspent_bandwidth_allowance_bytes: net + .outgoing_limiter + .debug_inspect_unspent_allowance(), + outgoing_connections, + connection_symmetries, + } + } +} + +impl Display for NetworkInsights { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let now = SystemTime::now(); + + if !self.network_ca { + f.write_str("Public ")?; + } else { + f.write_str("Private ")?; + } + writeln!( + f, + "node {} @ {:?} (syncing: {})", + self.our_id, self.public_addr, self.is_syncing + )?; + writeln!( + f, + "active era: {} unspent_bandwidth_allowance_bytes: {}", + self.net_active_era, + OptDisplay::new(self.unspent_bandwidth_allowance_bytes, "inactive"), + )?; + let active = self + .privileged_active_outgoing_nodes + .as_ref() + .map(HashSet::iter) + .map(DisplayIter::new); + writeln!( + f, + "privileged active: {}", + OptDisplay::new(active, "inactive") + )?; + let upcoming = self + .privileged_upcoming_outgoing_nodes + .as_ref() + .map(HashSet::iter) + .map(DisplayIter::new); + writeln!( + f, + "privileged upcoming: {}", + OptDisplay::new(upcoming, "inactive") + )?; + + f.write_str("outgoing connections:\n")?; + writeln!(f, "address uf state")?; + for (addr, outgoing) in &self.outgoing_connections { + write!(f, "{:23} {:5} ", addr, outgoing.unforgettable,)?; + outgoing.state.fmt_time_relative(now, f)?; + f.write_str("\n")?; + } + + f.write_str("connection symmetries:\n")?; + writeln!(f, "peer ID symmetry")?; + for (peer_id, symmetry) in &self.connection_symmetries { + write!(f, "{:10} ", peer_id)?; + symmetry.fmt_time_relative(now, f)?; + f.write_str("\n")?; + } + + Ok(()) + } +} diff --git a/node/src/components/network/limiter.rs b/node/src/components/network/limiter.rs new file mode 100644 index 0000000000..963e3406f9 --- /dev/null +++ b/node/src/components/network/limiter.rs @@ -0,0 +1,552 @@ +//! Resource limiters +//! +//! Resource limiters restrict the usable amount of a resource through slowing down the request rate +//! by making each user request an allowance first. + +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, RwLock}, + time::{Duration, Instant}, +}; + +use prometheus::Counter; +use tokio::{runtime::Handle, sync::Mutex, task}; +use tracing::{error, trace, warn}; + +use casper_types::{EraId, PublicKey}; + +use crate::types::{NodeId, ValidatorMatrix}; + +/// Amount of resource allowed to buffer in `Limiter`. +const STORED_BUFFER_SECS: Duration = Duration::from_secs(2); + +/// A limiter dividing resources into two classes based on their validator status. +/// +/// Any consumer of a specific resource is expected to call `create_handle` for every peer and use +/// the returned handle to request a access to a resource. +/// +/// Imposes a limit on non-validator resources while not limiting active validator resources at all. +#[derive(Debug)] +pub(super) struct Limiter { + /// Shared data across all handles. + data: Arc, + /// Set of active and upcoming validators shared across all handles. + validator_matrix: ValidatorMatrix, +} + +impl Limiter { + /// Creates a new class based limiter. + /// + /// Starts the background worker task as well. + pub(super) fn new( + resources_per_second: u32, + wait_time_sec: Counter, + validator_matrix: ValidatorMatrix, + ) -> Self { + Limiter { + data: Arc::new(LimiterData::new(resources_per_second, wait_time_sec)), + validator_matrix, + } + } + + /// Create a handle for a connection using the given peer and optional consensus key. + pub(super) fn create_handle( + &self, + peer_id: NodeId, + consensus_key: Option, + ) -> LimiterHandle { + if let Some(public_key) = consensus_key.as_ref().cloned() { + match self.data.connected_validators.write() { + Ok(mut connected_validators) => { + let _ = connected_validators.insert(peer_id, public_key); + } + Err(_) => { + error!( + "could not update connected validator data set of limiter, lock poisoned" + ); + } + } + } + LimiterHandle { + data: self.data.clone(), + validator_matrix: self.validator_matrix.clone(), + consumer_id: ConsumerId { + _peer_id: peer_id, + consensus_key, + }, + } + } + + pub(super) fn remove_connected_validator(&self, peer_id: &NodeId) { + match self.data.connected_validators.write() { + Ok(mut connected_validators) => { + let _ = connected_validators.remove(peer_id); + } + Err(_) => { + error!( + "could not remove connected validator from data set of limiter, lock poisoned" + ); + } + } + } + + pub(super) fn is_validator_in_era(&self, era: EraId, peer_id: &NodeId) -> bool { + let public_key = match self.data.connected_validators.read() { + Ok(connected_validators) => match connected_validators.get(peer_id) { + None => return false, + Some(public_key) => public_key.clone(), + }, + Err(_) => { + error!("could not read from connected_validators of limiter, lock poisoned"); + return false; + } + }; + + match self.validator_matrix.is_validator_in_era(era, &public_key) { + None => { + warn!(%era, "missing validator weights for given era"); + false + } + Some(is_validator) => is_validator, + } + } + + pub(super) fn debug_inspect_unspent_allowance(&self) -> Option { + Some(task::block_in_place(move || { + Handle::current().block_on(async move { self.data.resources.lock().await.available }) + })) + } + + pub(super) fn debug_inspect_validators( + &self, + current_era: &EraId, + ) -> Option<(HashSet, HashSet)> { + Some(( + self.validator_keys_for_era(current_era), + self.validator_keys_for_era(¤t_era.successor()), + )) + } + + fn validator_keys_for_era(&self, era: &EraId) -> HashSet { + self.validator_matrix + .validator_weights(*era) + .map(|validator_weights| validator_weights.validator_public_keys().cloned().collect()) + .unwrap_or_default() + } +} + +/// The limiter's state. +#[derive(Debug)] +struct LimiterData { + /// Number of resource units to allow for non-validators per second. + resources_per_second: u32, + /// A mapping from node IDs to public keys of validators to which we have an outgoing + /// connection. + connected_validators: RwLock>, + /// Information about available resources. + resources: Mutex, + /// Total time spent waiting. + wait_time_sec: Counter, +} + +/// Resource data. +#[derive(Debug)] +struct ResourceData { + /// How many resource units are buffered. + /// + /// May go negative in the case of a deficit. + available: i64, + /// Last time resource data was refilled. + last_refill: Instant, +} + +impl LimiterData { + /// Creates a new set of class based limiter data. + /// + /// Initial resources will be initialized to 0, with the last refill set to the current time. + fn new(resources_per_second: u32, wait_time_sec: Counter) -> Self { + LimiterData { + resources_per_second, + connected_validators: Default::default(), + resources: Mutex::new(ResourceData { + available: 0, + last_refill: Instant::now(), + }), + wait_time_sec, + } + } +} + +/// Peer class for the `Limiter`. +enum PeerClass { + /// A validator. + Validator, + /// Unclassified/low-priority peer. + NonValidator, +} + +/// A per-peer handle for `Limiter`. +#[derive(Debug)] +pub(super) struct LimiterHandle { + /// Data shared between handles and limiter. + data: Arc, + /// Set of active and upcoming validators. + validator_matrix: ValidatorMatrix, + /// Consumer ID for the sender holding this handle. + consumer_id: ConsumerId, +} + +impl LimiterHandle { + /// Waits until the requester is allocated `amount` additional resources. + pub(super) async fn request_allowance(&self, amount: u32) { + // As a first step, determine the peer class by checking if our id is in the validator set. + + if self.validator_matrix.is_empty() { + // It is likely that we have not been initialized, thus no node is getting the + // reserved resources. In this case, do not limit at all. + trace!("empty set of validators, not limiting resources at all"); + + return; + } + + let peer_class = if let Some(ref public_key) = self.consumer_id.consensus_key { + if self + .validator_matrix + .is_active_or_upcoming_validator(public_key) + { + PeerClass::Validator + } else { + PeerClass::NonValidator + } + } else { + PeerClass::NonValidator + }; + + match peer_class { + PeerClass::Validator => { + // No limit imposed on validators. + } + PeerClass::NonValidator => { + if self.data.resources_per_second == 0 { + return; + } + + let max_stored_resource = ((self.data.resources_per_second as f64) + * STORED_BUFFER_SECS.as_secs_f64()) + as u32; + + // We are a low-priority sender. Obtain a lock on the resources and wait an + // appropriate amount of time to fill them up. + { + let mut resources = self.data.resources.lock().await; + + while resources.available < 0 { + // Determine time delta since last refill. + let now = Instant::now(); + let elapsed = now - resources.last_refill; + resources.last_refill = now; + + // Add appropriate amount of resources, capped at `max_stored_bytes`. We + // are still maintaining the lock here to avoid issues with other + // low-priority requestors. + resources.available += ((elapsed.as_nanos() + * self.data.resources_per_second as u128) + / 1_000_000_000) as i64; + resources.available = resources.available.min(max_stored_resource as i64); + + // If we do not have enough resources available, sleep until we do. + if resources.available < 0 { + let estimated_time_remaining = Duration::from_millis( + (-resources.available) as u64 * 1000 + / self.data.resources_per_second as u64, + ); + + // Note: This sleep call is the reason we are using a tokio mutex + // instead of a regular `std` one, as we are holding it across the + // await point here. + tokio::time::sleep(estimated_time_remaining).await; + self.data + .wait_time_sec + .inc_by(estimated_time_remaining.as_secs_f64()); + } + } + + // Subtract the amount. If available resources go negative as a result, it + // is the next sender's problem. + resources.available -= amount as i64; + } + } + } + } +} + +/// An identity for a consumer. +#[derive(Debug)] +struct ConsumerId { + /// The peer's ID. + _peer_id: NodeId, + /// The remote node's public consensus key. + consensus_key: Option, +} + +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use casper_types::{ChainNameDigest, EraId, SecretKey}; + use num_rational::Ratio; + use prometheus::Counter; + use tokio::time::Instant; + + use super::{Limiter, NodeId, PublicKey}; + use crate::{testing::init_logging, types::ValidatorMatrix}; + + /// Something that happens almost immediately, with some allowance for test jitter. + const SHORT_TIME: Duration = Duration::from_millis(250); + + /// Creates a new counter for testing. + fn new_wait_time_sec() -> Counter { + Counter::new("test_time_waiting", "wait time counter used in tests") + .expect("could not create new counter") + } + + #[tokio::test] + async fn unlimited_limiter_is_unlimited() { + let mut rng = crate::new_rng(); + + // We insert one unrelated active validator to avoid triggering the automatic disabling of + // the limiter in case there are no active validators. + let validator_matrix = + ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(&mut rng))); + let limiter = Limiter::new(0, new_wait_time_sec(), validator_matrix); + + // Try with non-validators or unknown nodes. + let handles = vec![ + limiter.create_handle(NodeId::random(&mut rng), Some(PublicKey::random(&mut rng))), + limiter.create_handle(NodeId::random(&mut rng), None), + ]; + + for handle in handles { + let start = Instant::now(); + handle.request_allowance(0).await; + handle.request_allowance(u32::MAX).await; + handle.request_allowance(1).await; + assert!(start.elapsed() < SHORT_TIME); + } + } + + #[tokio::test] + async fn active_validator_is_unlimited() { + let mut rng = crate::new_rng(); + + let secret_key = SecretKey::random(&mut rng); + let consensus_key = PublicKey::from(&secret_key); + let validator_matrix = ValidatorMatrix::new_with_validator(Arc::new(secret_key)); + let limiter = Limiter::new(1_000, new_wait_time_sec(), validator_matrix); + + let handle = limiter.create_handle(NodeId::random(&mut rng), Some(consensus_key)); + + let start = Instant::now(); + handle.request_allowance(0).await; + handle.request_allowance(u32::MAX).await; + handle.request_allowance(1).await; + assert!(start.elapsed() < SHORT_TIME); + } + + #[tokio::test] + async fn inactive_validator_limited() { + let rng = &mut crate::new_rng(); + + // We insert one unrelated active validator to avoid triggering the automatic disabling of + // the limiter in case there are no active validators. + let validator_matrix = + ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(rng))); + let peers = [ + (NodeId::random(rng), Some(PublicKey::random(rng))), + (NodeId::random(rng), None), + ]; + + let limiter = Limiter::new(1_000, new_wait_time_sec(), validator_matrix); + + for (peer, maybe_public_key) in peers { + let start = Instant::now(); + let handle = limiter.create_handle(peer, maybe_public_key); + + // Send 9_0001 bytes, we expect this to take roughly 15 seconds. + handle.request_allowance(1000).await; + handle.request_allowance(1000).await; + handle.request_allowance(1000).await; + handle.request_allowance(2000).await; + handle.request_allowance(4000).await; + handle.request_allowance(1).await; + let elapsed = start.elapsed(); + + assert!( + elapsed >= Duration::from_secs(9), + "{}s", + elapsed.as_secs_f64() + ); + assert!( + elapsed <= Duration::from_secs(10), + "{}s", + elapsed.as_secs_f64() + ); + } + } + + #[tokio::test] + async fn nonvalidators_parallel_limited() { + let mut rng = crate::new_rng(); + + let wait_metric = new_wait_time_sec(); + + // We insert one unrelated active validator to avoid triggering the automatic disabling of + // the limiter in case there are no active validators. + let validator_matrix = + ValidatorMatrix::new_with_validator(Arc::new(SecretKey::random(&mut rng))); + let limiter = Limiter::new(1_000, wait_metric.clone(), validator_matrix); + + let start = Instant::now(); + + // Parallel test, 5 non-validators sharing 1000 bytes per second. Each sends 1001 bytes, so + // total time is expected to be just over 5 seconds. + let join_handles = (0..5) + .map(|_| { + limiter.create_handle(NodeId::random(&mut rng), Some(PublicKey::random(&mut rng))) + }) + .map(|handle| { + tokio::spawn(async move { + handle.request_allowance(500).await; + handle.request_allowance(150).await; + handle.request_allowance(350).await; + handle.request_allowance(1).await; + }) + }); + + for join_handle in join_handles { + join_handle.await.expect("could not join task"); + } + + let elapsed = start.elapsed(); + assert!(elapsed >= Duration::from_secs(5)); + assert!(elapsed <= Duration::from_secs(6)); + + // Ensure metrics recorded the correct number of seconds. + assert!( + wait_metric.get() <= 6.0, + "wait metric is too large: {}", + wait_metric.get() + ); + + // Note: The limiting will not apply to all data, so it should be slightly below 5 seconds. + assert!( + wait_metric.get() >= 4.5, + "wait metric is too small: {}", + wait_metric.get() + ); + } + + #[tokio::test] + async fn inactive_validators_unlimited_when_no_validators_known() { + init_logging(); + + let mut rng = crate::new_rng(); + + let secret_key = SecretKey::random(&mut rng); + let consensus_key = PublicKey::from(&secret_key); + let wait_metric = new_wait_time_sec(); + let limiter = Limiter::new( + 1_000, + wait_metric.clone(), + ValidatorMatrix::new( + Ratio::new(1, 3), + ChainNameDigest::from_chain_name("casper-example"), + None, + EraId::from(0), + Arc::new(secret_key), + consensus_key.clone(), + 2, + 3, + ), + ); + + // Try with non-validators or unknown nodes. + let handles = vec![ + limiter.create_handle(NodeId::random(&mut rng), Some(PublicKey::random(&mut rng))), + limiter.create_handle(NodeId::random(&mut rng), None), + ]; + + for handle in handles { + let start = Instant::now(); + + // Send 9_0001 bytes, should now finish instantly. + handle.request_allowance(1000).await; + handle.request_allowance(1000).await; + handle.request_allowance(1000).await; + handle.request_allowance(2000).await; + handle.request_allowance(4000).await; + handle.request_allowance(1).await; + assert!(start.elapsed() < SHORT_TIME); + } + + // There should have been no time spent waiting. + assert!( + wait_metric.get() < SHORT_TIME.as_secs_f64(), + "wait_metric is too large: {}", + wait_metric.get() + ); + } + + /// Regression test for #2929. + #[tokio::test] + async fn throttling_of_non_validators_does_not_affect_validators() { + init_logging(); + + let mut rng = crate::new_rng(); + + let secret_key = SecretKey::random(&mut rng); + let consensus_key = PublicKey::from(&secret_key); + let validator_matrix = ValidatorMatrix::new_with_validator(Arc::new(secret_key)); + let limiter = Limiter::new(1_000, new_wait_time_sec(), validator_matrix); + + let non_validator_handle = limiter.create_handle(NodeId::random(&mut rng), None); + let validator_handle = limiter.create_handle(NodeId::random(&mut rng), Some(consensus_key)); + + // We request a large resource at once using a non-validator handle. At the same time, + // validator requests should be still served, even while waiting for the long-delayed + // request still blocking. + let start = Instant::now(); + let background_nv_request = tokio::spawn(async move { + non_validator_handle.request_allowance(5000).await; + non_validator_handle.request_allowance(5000).await; + + Instant::now() + }); + + // Allow for a little bit of time to pass to ensure the background task is running. + tokio::time::sleep(Duration::from_secs(1)).await; + + validator_handle.request_allowance(10000).await; + validator_handle.request_allowance(10000).await; + + let v_finished = Instant::now(); + + let nv_finished = background_nv_request + .await + .expect("failed to join background nv task"); + + let nv_completed = nv_finished.duration_since(start); + assert!( + nv_completed >= Duration::from_millis(4500), + "non-validator did not delay sufficiently: {:?}", + nv_completed + ); + + let v_completed = v_finished.duration_since(start); + assert!( + v_completed <= Duration::from_millis(1500), + "validator did not finish quickly enough: {:?}", + v_completed + ); + } +} diff --git a/node/src/components/network/message.rs b/node/src/components/network/message.rs new file mode 100644 index 0000000000..9c63b4b46d --- /dev/null +++ b/node/src/components/network/message.rs @@ -0,0 +1,1034 @@ +use std::{ + fmt::{self, Debug, Display, Formatter}, + net::SocketAddr, + sync::Arc, +}; + +use datasize::DataSize; +use futures::future::BoxFuture; +use serde::{ + de::{DeserializeOwned, Error as SerdeError}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use strum::EnumDiscriminants; + +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + crypto, AsymmetricType, Chainspec, Digest, ProtocolVersion, PublicKey, SecretKey, Signature, + AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; + +use super::{counting_format::ConnectionId, health::Nonce, BincodeFormat}; +use crate::{ + effect::EffectBuilder, + protocol, + types::NodeId, + utils::{ + opt_display::OptDisplay, + specimen::{Cache, LargestSpecimen, SizeEstimator}, + }, +}; + +use tracing::warn; + +// Additional overhead accounted for (eg. lower level networking packet encapsulation). +const NETWORK_MESSAGE_LIMIT_SAFETY_MARGIN: usize = 256; + +/// The default protocol version to use in absence of one in the protocol version field. +#[inline] +fn default_protocol_version() -> ProtocolVersion { + ProtocolVersion::V1_0_0 +} + +#[derive(Clone, Debug, Deserialize, Serialize, EnumDiscriminants)] +#[strum_discriminants(derive(strum::EnumIter))] +#[allow(clippy::large_enum_variant)] +pub(crate) enum Message

{ + Handshake { + /// Network we are connected to. + network_name: String, + /// The public address of the node connecting. + public_addr: SocketAddr, + /// Protocol version the node is speaking. + #[serde(default = "default_protocol_version")] + protocol_version: ProtocolVersion, + /// A self-signed certificate indicating validator status. + #[serde(default)] + consensus_certificate: Option, + /// True if the node is syncing. + #[serde(default)] + is_syncing: bool, + /// Hash of the chainspec the node is running. + #[serde(default)] + chainspec_hash: Option, + }, + /// A ping request. + Ping { + /// The nonce to be returned with the pong. + nonce: Nonce, + }, + /// A pong response. + Pong { + /// Nonce to match pong to ping. + nonce: Nonce, + }, + Payload(P), +} + +impl Message

{ + /// Classifies a message based on its payload. + #[inline] + pub(super) fn classify(&self) -> MessageKind { + match self { + Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => { + MessageKind::Protocol + } + Message::Payload(payload) => payload.message_kind(), + } + } + + /// Determines whether or not a message is low priority. + #[inline] + pub(super) fn is_low_priority(&self) -> bool { + match self { + Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => false, + Message::Payload(payload) => payload.is_low_priority(), + } + } + + /// Returns the incoming resource estimate of the payload. + #[inline] + pub(super) fn payload_incoming_resource_estimate(&self, weights: &EstimatorWeights) -> u32 { + match self { + Message::Handshake { .. } => 0, + // Ping and Pong have a hardcoded weights. Since every ping will result in a pong being + // sent as a reply, it has a higher weight. + Message::Ping { .. } => 2, + Message::Pong { .. } => 1, + Message::Payload(payload) => payload.incoming_resource_estimate(weights), + } + } + + /// Returns whether or not the payload is unsafe for syncing node consumption. + #[inline] + pub(super) fn payload_is_unsafe_for_syncing_nodes(&self) -> bool { + match self { + Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => false, + Message::Payload(payload) => payload.is_unsafe_for_syncing_peers(), + } + } + + /// Attempts to create a demand-event from this message. + /// + /// Succeeds if the outer message contains a payload that can be converted into a demand. + pub(super) fn try_into_demand( + self, + effect_builder: EffectBuilder, + sender: NodeId, + ) -> Result<(REv, BoxFuture<'static, Option

>), Box> + where + REv: FromIncoming

+ Send, + { + match self { + Message::Handshake { .. } | Message::Ping { .. } | Message::Pong { .. } => { + Err(self.into()) + } + Message::Payload(payload) => { + // Note: For now, the wrapping/unwrap of the payload is a bit unfortunate here. + REv::try_demand_from_incoming(effect_builder, sender, payload) + .map_err(|err| Message::Payload(err).into()) + } + } + } +} + +/// A pair of secret keys used by consensus. +pub(super) struct NodeKeyPair { + secret_key: Arc, + public_key: PublicKey, +} + +impl NodeKeyPair { + /// Creates a new key pair for consensus signing. + pub(super) fn new(key_pair: (Arc, PublicKey)) -> Self { + Self { + secret_key: key_pair.0, + public_key: key_pair.1, + } + } + + /// Sign a value using this keypair. + fn sign>(&self, value: T) -> Signature { + crypto::sign(value, &self.secret_key, &self.public_key) + } +} + +/// Certificate used to indicate that the peer is a validator using the specified public key. +/// +/// Note that this type has custom `Serialize` and `Deserialize` implementations to allow the +/// `public_key` and `signature` fields to be encoded to all-lowercase hex, hence circumventing the +/// checksummed-hex encoding used by `PublicKey` and `Signature` in versions 1.4.2 and 1.4.3. +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) struct ConsensusCertificate { + public_key: PublicKey, + signature: Signature, +} + +impl ConsensusCertificate { + /// Creates a new consensus certificate from a connection ID and key pair. + pub(super) fn create(connection_id: ConnectionId, key_pair: &NodeKeyPair) -> Self { + let signature = key_pair.sign(connection_id.as_bytes()); + ConsensusCertificate { + public_key: key_pair.public_key.clone(), + signature, + } + } + + /// Validates a certificate, returning a `PublicKey` if valid. + pub(super) fn validate(self, connection_id: ConnectionId) -> Result { + crypto::verify(connection_id.as_bytes(), &self.signature, &self.public_key)?; + Ok(self.public_key) + } + + /// Creates a random `ConnectionId`. + #[cfg(test)] + fn random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + let public_key = PublicKey::from(&secret_key); + ConsensusCertificate::create( + ConnectionId::random(rng), + &NodeKeyPair::new((Arc::new(secret_key), public_key)), + ) + } +} + +impl Display for ConsensusCertificate { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "key:{}", self.public_key) + } +} + +/// This type and the `NonHumanReadableCertificate` are helper structs only used in the `Serialize` +/// and `Deserialize` implementations of `ConsensusCertificate` to allow handshaking between nodes +/// running the casper-node v1.4.2 and v1.4.3 software versions. +/// +/// Checksummed-hex encoding was introduced in 1.4.2 and was applied to `PublicKey` and `Signature` +/// types, affecting the encoding of `ConsensusCertificate` since handshaking uses a human-readable +/// type of encoder/decoder. +/// +/// The 1.4.3 version immediately after 1.4.2 used a slightly different style of checksummed-hex +/// encoding which is incompatible with the 1.4.2 style. To effectively disable checksummed-hex +/// encoding, we need to use an all-lowercase form of hex encoding for the `PublicKey` and +/// `Signature` types. +/// +/// The `HumanReadableCertificate` enables that by explicitly being constructed from all-lowercase +/// hex encoded types, while the `NonHumanReadableCertificate` is a simple mirror of +/// `ConsensusCertificate` to allow us to derive `Serialize` and `Deserialize`, avoiding complex +/// hand-written implementations for the non-human-readable case. +#[derive(Serialize, Deserialize)] +struct HumanReadableCertificate { + public_key: String, + signature: String, +} + +#[derive(Serialize, Deserialize)] +struct NonHumanReadableCertificate { + public_key: PublicKey, + signature: Signature, +} + +impl Serialize for ConsensusCertificate { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + let human_readable_certificate = HumanReadableCertificate { + public_key: self.public_key.to_hex().to_lowercase(), + signature: self.signature.to_hex().to_lowercase(), + }; + + return human_readable_certificate.serialize(serializer); + } + + let non_human_readable_certificate = NonHumanReadableCertificate { + public_key: self.public_key.clone(), + signature: self.signature, + }; + non_human_readable_certificate.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for ConsensusCertificate { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let human_readable_certificate = HumanReadableCertificate::deserialize(deserializer)?; + let public_key = PublicKey::from_hex( + human_readable_certificate + .public_key + .to_lowercase() + .as_bytes(), + ) + .map_err(D::Error::custom)?; + let signature = Signature::from_hex( + human_readable_certificate + .signature + .to_lowercase() + .as_bytes(), + ) + .map_err(D::Error::custom)?; + return Ok(ConsensusCertificate { + public_key, + signature, + }); + } + + let non_human_readable_certificate = + NonHumanReadableCertificate::deserialize(deserializer)?; + Ok(ConsensusCertificate { + public_key: non_human_readable_certificate.public_key, + signature: non_human_readable_certificate.signature, + }) + } +} + +impl Display for Message

{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Message::Handshake { + network_name, + public_addr, + protocol_version, + consensus_certificate, + is_syncing, + chainspec_hash, + } => { + write!( + f, + "handshake: {}, public addr: {}, protocol_version: {}, consensus_certificate: {}, is_syncing: {}, chainspec_hash: {}", + network_name, + public_addr, + protocol_version, + OptDisplay::new(consensus_certificate.as_ref(), "none"), + is_syncing, + OptDisplay::new(chainspec_hash.as_ref(), "none") + ) + } + Message::Ping { nonce } => write!(f, "ping({})", nonce), + Message::Pong { nonce } => write!(f, "pong({})", nonce), + Message::Payload(payload) => write!(f, "payload: {}", payload), + } + } +} + +/// A classification system for networking messages. +#[derive(Copy, Clone, Debug)] +pub(crate) enum MessageKind { + /// Non-payload messages, like handshakes. + Protocol, + /// Messages directly related to consensus. + Consensus, + /// Transactions being gossiped. + TransactionGossip, + /// Blocks being gossiped. + BlockGossip, + /// Finality signatures being gossiped. + FinalitySignatureGossip, + /// Addresses being gossiped. + AddressGossip, + /// Transactions being transferred directly (via requests). + TransactionTransfer, + /// Blocks for finality signatures being transferred directly (via requests and other means). + BlockTransfer, + /// Tries transferred, usually as part of chain syncing. + TrieTransfer, + /// Any other kind of payload (or missing classification). + Other, +} + +impl Display for MessageKind { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + MessageKind::Protocol => f.write_str("protocol"), + MessageKind::Consensus => f.write_str("consensus"), + MessageKind::TransactionGossip => f.write_str("transaction_gossip"), + MessageKind::BlockGossip => f.write_str("block_gossip"), + MessageKind::FinalitySignatureGossip => f.write_str("finality_signature_gossip"), + MessageKind::AddressGossip => f.write_str("address_gossip"), + MessageKind::TransactionTransfer => f.write_str("transaction_transfer"), + MessageKind::BlockTransfer => f.write_str("block_transfer"), + MessageKind::TrieTransfer => f.write_str("trie_transfer"), + MessageKind::Other => f.write_str("other"), + } + } +} + +/// Network message payload. +/// +/// Payloads are what is transferred across the network outside of control messages from the +/// networking component itself. +pub(crate) trait Payload: + Serialize + DeserializeOwned + Clone + Debug + Display + Send + Sync + 'static +{ + /// Classifies the payload based on its contents. + fn message_kind(&self) -> MessageKind; + + /// The penalty for resource usage of a message to be applied when processed as incoming. + fn incoming_resource_estimate(&self, _weights: &EstimatorWeights) -> u32; + + /// Determines if the payload should be considered low priority. + fn is_low_priority(&self) -> bool { + false + } + + /// Indicates a message is not safe to send to a syncing node. + /// + /// This functionality should be removed once multiplexed networking lands. + fn is_unsafe_for_syncing_peers(&self) -> bool; +} + +/// Network message conversion support. +pub(crate) trait FromIncoming

{ + /// Creates a new value from a received payload. + fn from_incoming(sender: NodeId, payload: P) -> Self; + + /// Tries to convert a payload into a demand. + /// + /// This function can optionally be called before `from_incoming` to attempt to convert an + /// incoming payload into a potential demand. + fn try_demand_from_incoming( + _effect_builder: EffectBuilder, + _sender: NodeId, + payload: P, + ) -> Result<(Self, BoxFuture<'static, Option

>), P> + where + Self: Sized + Send, + { + Err(payload) + } +} + +/// A generic configuration for payload weights. +/// +/// Implementors of `Payload` are free to interpret this as they see fit. +/// +/// The default implementation sets all weights to zero. +#[derive(DataSize, Debug, Default, Clone, Deserialize, Serialize)] +pub struct EstimatorWeights { + pub consensus: u32, + pub block_gossip: u32, + pub transaction_gossip: u32, + pub finality_signature_gossip: u32, + pub address_gossip: u32, + pub finality_signature_broadcasts: u32, + pub transaction_requests: u32, + pub transaction_responses: u32, + pub legacy_deploy_requests: u32, + pub legacy_deploy_responses: u32, + pub block_requests: u32, + pub block_responses: u32, + pub block_header_requests: u32, + pub block_header_responses: u32, + pub trie_requests: u32, + pub trie_responses: u32, + pub finality_signature_requests: u32, + pub finality_signature_responses: u32, + pub sync_leap_requests: u32, + pub sync_leap_responses: u32, + pub approvals_hashes_requests: u32, + pub approvals_hashes_responses: u32, + pub execution_results_requests: u32, + pub execution_results_responses: u32, +} + +mod specimen_support { + use std::iter; + + use serde::Serialize; + + use crate::utils::specimen::{ + largest_variant, Cache, LargestSpecimen, SizeEstimator, HIGHEST_UNICODE_CODEPOINT, + }; + + use super::{ConsensusCertificate, Message, MessageDiscriminants}; + + impl

LargestSpecimen for Message

+ where + P: Serialize + LargestSpecimen, + { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let largest_network_name = estimator.parameter("network_name_limit"); + + largest_variant::( + estimator, + |variant| match variant { + MessageDiscriminants::Handshake => Message::Handshake { + network_name: iter::repeat(HIGHEST_UNICODE_CODEPOINT) + .take(largest_network_name) + .collect(), + public_addr: LargestSpecimen::largest_specimen(estimator, cache), + protocol_version: LargestSpecimen::largest_specimen(estimator, cache), + consensus_certificate: LargestSpecimen::largest_specimen(estimator, cache), + is_syncing: LargestSpecimen::largest_specimen(estimator, cache), + chainspec_hash: LargestSpecimen::largest_specimen(estimator, cache), + }, + MessageDiscriminants::Ping => Message::Ping { + nonce: LargestSpecimen::largest_specimen(estimator, cache), + }, + MessageDiscriminants::Pong => Message::Pong { + nonce: LargestSpecimen::largest_specimen(estimator, cache), + }, + MessageDiscriminants::Payload => { + Message::Payload(LargestSpecimen::largest_specimen(estimator, cache)) + } + }, + ) + } + } + + impl LargestSpecimen for ConsensusCertificate { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + ConsensusCertificate { + public_key: LargestSpecimen::largest_specimen(estimator, cache), + signature: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } +} + +/// An estimator that uses the serialized network representation as a measure of size. +#[derive(Clone, Debug)] +pub(crate) struct NetworkMessageEstimator<'a> { + /// The chainspec to retrieve estimation values from. + chainspec: &'a Chainspec, +} + +impl<'a> NetworkMessageEstimator<'a> { + /// Creates a new network message estimator. + pub(crate) fn new(chainspec: &'a Chainspec) -> Self { + Self { chainspec } + } + + /// Returns a parameter by name as `i64`. + fn get_parameter(&self, name: &'static str) -> Option { + let max_transaction_size = self + .chainspec + .transaction_config + .transaction_v1_config + .get_max_serialized_length(INSTALL_UPGRADE_LANE_ID); + Some(match name { + // The name limit will be larger than the actual name, so it is a safe upper bound. + "network_name_limit" => self.chainspec.network_config.name.len() as i64, + // These limits are making deploys bigger than they actually are, since many items + // have both a `contract_name` and an `entry_point`. We accept 2X as an upper bound. + "contract_name_limit" => max_transaction_size as i64, + "entry_point_limit" => max_transaction_size as i64, + "recent_era_count" => { + (self.chainspec.core_config.unbonding_delay + - self.chainspec.core_config.auction_delay) as i64 + } + "validator_count" => self.chainspec.core_config.validator_slots as i64, + "minimum_era_height" => self.chainspec.core_config.minimum_era_height as i64, + "era_duration_ms" => self.chainspec.core_config.era_duration.millis() as i64, + "minimum_round_length_ms" => self + .chainspec + .core_config + .minimum_block_time + .millis() + .max(1) as i64, + "max_transaction_size" => max_transaction_size as i64, + "approvals_hashes" => self + .chainspec + .transaction_config + .transaction_v1_config + .get_max_block_count() as i64, + "max_mint_per_block" => self + .chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_count(MINT_LANE_ID) as i64, + "max_auctions_per_block" => { + self.chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_count(AUCTION_LANE_ID) as i64 + } + "max_install_upgrade_transactions_per_block" => { + self.chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_count(INSTALL_UPGRADE_LANE_ID) as i64 + } + "max_standard_transactions_per_block" => { + self.chainspec + .transaction_config + .transaction_v1_config + .get_max_wasm_transaction_count() as i64 + } + "average_approvals_per_transaction_in_block" => { + let max_total_txns = self + .chainspec + .transaction_config + .transaction_v1_config + .get_max_block_count() as i64; + + // Note: The +1 is to overestimate, as depending on the serialization format chosen, + // spreading out the approvals can increase or decrease the size. For + // example, in a length-prefixed encoding, putting them all in one may result + // in a smaller size if variable size integer encoding it used. In a format + // using separators without trailing separators (e.g. commas in JSON), + // spreading out will reduce the total number of bytes. + ((self.chainspec.transaction_config.block_max_approval_count as i64 + + max_total_txns + - 1) + / max_total_txns) + .max(0) + + 1 + } + "max_accusations_per_block" => self.chainspec.core_config.validator_slots as i64, + // `RADIX` from EE. + "max_pointer_per_node" => 255, + // Endorsements are currently hard-disabled (via code). If ever re-enabled, this + // parameter should ideally be removed entirely. + "endorsements_enabled" => 0, + "signature_rewards_max_delay" => { + self.chainspec.core_config.signature_rewards_max_delay as i64 + } + _ => return None, + }) + } +} + +/// Encoding helper function. +/// +/// Encodes a message in the same manner the network component would before sending it. +fn serialize_net_message(data: &T) -> Vec +where + T: Serialize, +{ + BincodeFormat::default() + .serialize_arbitrary(data) + .expect("did not expect serialization to fail") +} + +/// Creates a serialized specimen of the largest possible networking message. +fn generate_largest_message(chainspec: &Chainspec) -> Message { + let estimator = &NetworkMessageEstimator::new(chainspec); + let cache = &mut Cache::default(); + + Message::largest_specimen(estimator, cache) +} + +/// Enforces chainspec configured message size limit. +pub(crate) fn within_message_size_limit_tolerance(chainspec: &Chainspec) -> bool { + // Ensure the size of the largest message generated under these chainspec settings does not + // exceed the configured message size limit. + let configured_maximum = chainspec.network_config.maximum_net_message_size as usize; + let serialized = serialize_net_message(&generate_largest_message(chainspec)); + let calculated_size = serialized.len(); + let within_tolerance = + calculated_size + NETWORK_MESSAGE_LIMIT_SAFETY_MARGIN <= configured_maximum; + if !within_tolerance { + warn!( + calculated_size, + configured_maximum, + "config value [network][maximum_net_message_size] is too small to accommodate the \ + maximum message size" + ); + } + within_tolerance +} + +impl SizeEstimator for NetworkMessageEstimator<'_> { + fn estimate(&self, val: &T) -> usize { + serialize_net_message(&val).len() + } + + fn parameter>(&self, name: &'static str) -> T { + let value = self + .get_parameter(name) + .unwrap_or_else(|| panic!("missing parameter \"{}\" for specimen estimation", name)); + + T::try_from(value).unwrap_or_else(|_| { + panic!( + "Failed to convert the parameter `{name}` of value `{value}` to the type `{}`", + core::any::type_name::() + ) + }) + } +} + +#[cfg(test)] +// We use a variety of weird names in these tests. +#[allow(non_camel_case_types)] +mod tests { + use std::{ + net::{Ipv4Addr, SocketAddr}, + pin::Pin, + }; + + use assert_matches::assert_matches; + use bytes::BytesMut; + use casper_types::ProtocolVersion; + use serde::{de::DeserializeOwned, Deserialize, Serialize}; + use tokio_serde::{Deserializer, Serializer}; + + use crate::{components::network::message_pack_format::MessagePackFormat, protocol}; + + use super::*; + + /// Version 1.0.0 network level message. + /// + /// Note that the message itself may go out of sync over time as `protocol::Message` changes. + /// The test further below ensures that the handshake is accurate in the meantime. + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) enum V1_0_0_Message { + Handshake { + /// Network we are connected to. + network_name: String, + /// The public address of the node connecting. + public_address: SocketAddr, + }, + Payload(protocol::Message), + } + + /// A "conserved" version 1.0.0 handshake. + /// + /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON MAINNET DATA. + const V1_0_0_HANDSHAKE: &[u8] = &[ + 129, 0, 146, 178, 115, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 45, 116, + 101, 115, 116, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54, + ]; + + /// A "conserved" version 1.4.2 handshake. + /// + /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON TESTNET DATA. + const V1_4_2_HANDSHAKE: &[u8] = &[ + 129, 0, 148, 177, 101, 120, 97, 109, 112, 108, 101, 45, 104, 97, 110, 100, 115, 104, 97, + 107, 101, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54, 165, 49, + 46, 52, 46, 50, 146, 217, 68, 48, 50, 48, 50, 56, 51, 99, 48, 68, 54, 56, 55, 57, 51, 51, + 69, 98, 50, 48, 97, 53, 52, 49, 67, 56, 53, 52, 48, 52, 55, 56, 56, 55, 55, 56, 54, 49, + 101, 100, 69, 52, 65, 70, 102, 65, 102, 48, 52, 97, 54, 56, 101, 97, 49, 57, 52, 66, 55, + 65, 52, 48, 48, 52, 54, 52, 50, 52, 101, 217, 130, 48, 50, 99, 68, 70, 65, 51, 51, 51, 99, + 49, 56, 56, 57, 51, 100, 57, 102, 51, 54, 48, 51, 53, 97, 51, 98, 55, 55, 48, 50, 51, 52, + 56, 97, 67, 102, 70, 48, 70, 68, 53, 65, 50, 65, 69, 57, 99, 66, 67, 48, 69, 52, 56, 69, + 53, 57, 100, 100, 48, 56, 53, 53, 56, 49, 97, 54, 48, 49, 53, 57, 66, 55, 102, 99, 67, 99, + 53, 52, 68, 68, 48, 70, 65, 57, 52, 52, 51, 100, 50, 69, 51, 53, 55, 51, 51, 55, 56, 68, + 54, 49, 69, 97, 49, 54, 101, 54, 53, 57, 68, 49, 54, 100, 48, 48, 48, 57, 65, 52, 48, 66, + 55, 55, 53, 48, 66, 67, 67, 69, 65, 69, + ]; + + /// A "conserved" version 1.4.3 handshake. + /// + /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON MAINNET DATA. + const V1_4_3_HANDSHAKE: &[u8] = &[ + 129, 0, 148, 177, 101, 120, 97, 109, 112, 108, 101, 45, 104, 97, 110, 100, 115, 104, 97, + 107, 101, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54, 165, 49, + 46, 52, 46, 51, 146, 217, 68, 48, 50, 48, 51, 51, 49, 101, 102, 98, 102, 55, 99, 99, 51, + 51, 56, 49, 53, 49, 53, 97, 55, 50, 50, 57, 102, 57, 99, 51, 101, 55, 57, 55, 48, 51, 48, + 50, 50, 56, 99, 97, 97, 49, 56, 57, 102, 98, 50, 97, 49, 48, 50, 56, 97, 100, 101, 48, 52, + 101, 50, 57, 55, 48, 102, 55, 52, 99, 53, 217, 130, 48, 50, 55, 54, 54, 52, 56, 54, 54, 55, + 57, 52, 98, 97, 99, 99, 101, 52, 52, 49, 51, 57, 50, 102, 52, 51, 50, 100, 98, 97, 50, 100, + 101, 54, 55, 100, 97, 51, 98, 97, 55, 56, 53, 101, 53, 57, 99, 57, 52, 56, 48, 102, 49, 50, + 54, 55, 57, 52, 101, 100, 55, 56, 98, 56, 101, 53, 50, 57, 57, 57, 55, 54, 49, 99, 48, 56, + 49, 53, 56, 50, 56, 53, 53, 56, 48, 98, 52, 97, 54, 55, 98, 55, 101, 51, 52, 51, 99, 50, + 50, 56, 49, 51, 51, 99, 52, 49, 100, 52, 50, 53, 48, 98, 102, 55, 57, 100, 55, 56, 54, 100, + 55, 99, 49, 57, 57, 99, 97, 57, 55, 55, + ]; + + // Note: MessagePack messages can be visualized using the message pack visualizer at + // https://sugendran.github.io/msgpack-visualizer/. Rust arrays can be copy&pasted and converted + // to base64 using the following one-liner: `import base64; base64.b64encode(bytes([129, 0, + // ...]))` + + // It is very important to note that different versions of the message pack codec crate set the + // human-readable flag in a different manner. Thus the V1.0.0 handshake can be serialized in two + // different ways, with "human readable" enabled and without. + // + // Our V1.0.0 protocol uses the "human readable" enabled version, they key difference being that + // the `SocketAddr` is encoded as a string instead of a two-item array. + + /// A pseudo-1.0.0 handshake, where the serde human readable flag has been changed due to an + /// `rmp` version mismatch. + const BROKEN_V1_0_0_HANDSHAKE: &[u8] = &[ + 129, 0, 146, 178, 115, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 45, 116, + 101, 115, 116, 129, 0, 146, 148, 12, 34, 56, 78, 205, 48, 58, + ]; + + const TEST_SOCKET_ADDR: SocketAddr = SocketAddr::V4(std::net::SocketAddrV4::new( + Ipv4Addr::new(12, 34, 56, 78), + 12346, + )); + + /// Serialize a message using the standard serialization method for handshakes. + fn serialize_message(msg: &M) -> Vec { + let mut serializer = MessagePackFormat; + + Pin::new(&mut serializer) + .serialize(&msg) + .expect("handshake serialization failed") + .into_iter() + .collect() + } + + /// Deserialize a message using the standard deserialization method for handshakes. + fn deserialize_message(serialized: &[u8]) -> M { + let mut deserializer = MessagePackFormat; + + Pin::new(&mut deserializer) + .deserialize(&BytesMut::from(serialized)) + .expect("message deserialization failed") + } + + /// Given a message `from` of type `F`, serializes it, then deserializes it as `T`. + fn roundtrip_message(from: &F) -> T + where + F: Serialize, + T: DeserializeOwned, + { + let serialized = serialize_message(from); + deserialize_message(&serialized) + } + + // This test ensure that the serialization of the `V_1_0_0_Message` has not changed and that the + // serialization/deserialization methods for message in this test are likely accurate. + #[test] + fn v1_0_0_handshake_is_as_expected() { + let handshake = V1_0_0_Message::Handshake { + network_name: "serialization-test".to_owned(), + public_address: TEST_SOCKET_ADDR, + }; + + let serialized = serialize_message::(&handshake); + + assert_eq!(&serialized, V1_0_0_HANDSHAKE); + assert_ne!(&serialized, BROKEN_V1_0_0_HANDSHAKE); + + let deserialized: V1_0_0_Message = deserialize_message(&serialized); + + match deserialized { + V1_0_0_Message::Handshake { + network_name, + public_address, + } => { + assert_eq!(network_name, "serialization-test"); + assert_eq!(public_address, TEST_SOCKET_ADDR); + } + other => { + panic!("did not expect {:?} as the deserialized product", other); + } + } + } + + #[test] + fn v1_0_0_can_decode_current_handshake() { + let mut rng = crate::new_rng(); + let modern_handshake = Message::::Handshake { + network_name: "example-handshake".to_string(), + public_addr: TEST_SOCKET_ADDR, + protocol_version: ProtocolVersion::from_parts(5, 6, 7), + consensus_certificate: Some(ConsensusCertificate::random(&mut rng)), + is_syncing: false, + chainspec_hash: Some(Digest::hash("example-chainspec")), + }; + + let legacy_handshake: V1_0_0_Message = roundtrip_message(&modern_handshake); + + match legacy_handshake { + V1_0_0_Message::Handshake { + network_name, + public_address, + } => { + assert_eq!(network_name, "example-handshake"); + assert_eq!(public_address, TEST_SOCKET_ADDR); + } + V1_0_0_Message::Payload(_) => { + panic!("did not expect legacy handshake to deserialize to payload") + } + } + } + + #[test] + fn current_handshake_decodes_from_v1_0_0() { + let legacy_handshake = V1_0_0_Message::Handshake { + network_name: "example-handshake".to_string(), + public_address: TEST_SOCKET_ADDR, + }; + + let modern_handshake: Message = roundtrip_message(&legacy_handshake); + + if let Message::Handshake { + network_name, + public_addr, + protocol_version, + consensus_certificate, + is_syncing, + chainspec_hash, + } = modern_handshake + { + assert_eq!(network_name, "example-handshake"); + assert_eq!(public_addr, TEST_SOCKET_ADDR); + assert_eq!(protocol_version, ProtocolVersion::V1_0_0); + assert!(consensus_certificate.is_none()); + assert!(!is_syncing); + assert!(chainspec_hash.is_none()) + } else { + panic!("did not expect modern handshake to deserialize to anything but") + } + } + + #[test] + fn current_handshake_decodes_from_historic_v1_0_0() { + let modern_handshake: Message = deserialize_message(V1_0_0_HANDSHAKE); + + if let Message::Handshake { + network_name, + public_addr, + protocol_version, + consensus_certificate, + is_syncing, + chainspec_hash, + } = modern_handshake + { + assert!(!is_syncing); + assert_eq!(network_name, "serialization-test"); + assert_eq!(public_addr, TEST_SOCKET_ADDR); + assert_eq!(protocol_version, ProtocolVersion::V1_0_0); + assert!(consensus_certificate.is_none()); + assert!(!is_syncing); + assert!(chainspec_hash.is_none()) + } else { + panic!("did not expect modern handshake to deserialize to anything but") + } + } + + #[test] + fn current_handshake_decodes_from_historic_v1_4_2() { + let modern_handshake: Message = deserialize_message(V1_4_2_HANDSHAKE); + + if let Message::Handshake { + network_name, + public_addr, + protocol_version, + consensus_certificate, + is_syncing, + chainspec_hash, + } = modern_handshake + { + assert_eq!(network_name, "example-handshake"); + assert_eq!(public_addr, TEST_SOCKET_ADDR); + assert_eq!(protocol_version, ProtocolVersion::from_parts(1, 4, 2)); + assert!(!is_syncing); + let ConsensusCertificate { + public_key, + signature, + } = consensus_certificate.unwrap(); + + assert_eq!( + public_key, + PublicKey::from_hex( + "020283c0d687933eb20a541c8540478877861ede4affaf04a68ea194b7a40046424e" + ) + .unwrap() + ); + assert_eq!( + signature, + Signature::from_hex( + "02cdfa333c18893d9f36035a3b7702348acff0fd5a2ae9cbc0e48e59dd085581a6015\ + 9b7fccc54dd0fa9443d2e3573378d61ea16e659d16d0009a40b7750bcceae" + ) + .unwrap() + ); + assert!(!is_syncing); + assert!(chainspec_hash.is_none()) + } else { + panic!("did not expect modern handshake to deserialize to anything but") + } + } + + #[test] + fn current_handshake_decodes_from_historic_v1_4_3() { + let modern_handshake: Message = deserialize_message(V1_4_3_HANDSHAKE); + + if let Message::Handshake { + network_name, + public_addr, + protocol_version, + consensus_certificate, + is_syncing, + chainspec_hash, + } = modern_handshake + { + assert!(!is_syncing); + assert_eq!(network_name, "example-handshake"); + assert_eq!(public_addr, TEST_SOCKET_ADDR); + assert_eq!(protocol_version, ProtocolVersion::from_parts(1, 4, 3)); + let ConsensusCertificate { + public_key, + signature, + } = consensus_certificate.unwrap(); + + assert_eq!( + public_key, + PublicKey::from_hex( + "020331efbf7cc3381515a7229f9c3e797030228caa189fb2a1028ade04e2970f74c5" + ) + .unwrap() + ); + assert_eq!( + signature, + Signature::from_hex( + "027664866794bacce441392f432dba2de67da3ba785e59c9480f126794ed78b8e5299\ + 9761c08158285580b4a67b7e343c228133c41d4250bf79d786d7c199ca977" + ) + .unwrap() + ); + assert!(!is_syncing); + assert!(chainspec_hash.is_none()) + } else { + panic!("did not expect modern handshake to deserialize to anything but") + } + } + + fn roundtrip_certificate(use_human_readable: bool) { + let mut rng = crate::new_rng(); + let certificate = ConsensusCertificate::random(&mut rng); + + let deserialized = if use_human_readable { + let serialized = serde_json::to_string(&certificate).unwrap(); + serde_json::from_str(&serialized).unwrap() + } else { + let serialized = bincode::serialize(&certificate).unwrap(); + bincode::deserialize(&serialized).unwrap() + }; + assert_eq!(certificate, deserialized); + } + + #[test] + fn serde_json_roundtrip_certificate() { + roundtrip_certificate(true) + } + + #[test] + fn bincode_roundtrip_certificate() { + roundtrip_certificate(false) + } + + #[test] + fn assert_the_largest_specimen_type_and_size() { + let (chainspec, _) = crate::utils::Loadable::from_resources("production"); + let specimen = generate_largest_message(&chainspec); + + assert_matches!( + specimen, + Message::Payload(protocol::Message::GetResponse { .. }), + "the type of the largest possible network message based on the production chainspec has changed" + ); + + let serialized = serialize_net_message(&specimen); + + assert_eq!( + serialized.len(), + 8_388_736, + "the size of the largest possible network message based on the production chainspec has changed" + ); + } +} diff --git a/node/src/components/network/message_pack_format.rs b/node/src/components/network/message_pack_format.rs new file mode 100644 index 0000000000..b991c6639e --- /dev/null +++ b/node/src/components/network/message_pack_format.rs @@ -0,0 +1,43 @@ +//! Message pack wire format encoder. +//! +//! This module is used to pin the correct version of message pack used throughout the codebase to +//! our network decoder via `Cargo.toml`; using `tokio_serde::MessagePack` would instead tie it +//! to the dependency specified in `tokio_serde`'s `Cargo.toml`. + +use std::{io, pin::Pin}; + +use bytes::{Bytes, BytesMut}; +use serde::{Deserialize, Serialize}; +use tokio_serde::{Deserializer, Serializer}; + +/// msgpack encoder/decoder for messages. +#[derive(Debug)] +pub struct MessagePackFormat; + +impl Serializer for MessagePackFormat +where + M: Serialize, +{ + // Note: We cast to `io::Error` because of the `Codec::Error: Into` + // requirement. + type Error = io::Error; + + #[inline] + fn serialize(self: Pin<&mut Self>, item: &M) -> Result { + rmp_serde::to_vec(item) + .map(Into::into) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } +} + +impl Deserializer for MessagePackFormat +where + for<'de> M: Deserialize<'de>, +{ + type Error = io::Error; + + #[inline] + fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result { + rmp_serde::from_read_ref(src).map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) + } +} diff --git a/node/src/components/network/metrics.rs b/node/src/components/network/metrics.rs new file mode 100644 index 0000000000..3f296f7a86 --- /dev/null +++ b/node/src/components/network/metrics.rs @@ -0,0 +1,652 @@ +use std::sync::Weak; + +use prometheus::{Counter, IntCounter, IntGauge, Registry}; +use tracing::debug; + +use super::{outgoing::OutgoingMetrics, MessageKind}; +use crate::unregister_metric; + +/// Network-type agnostic networking metrics. +#[derive(Debug)] +pub(super) struct Metrics { + /// How often a request was made by a component to broadcast. + pub(super) broadcast_requests: IntCounter, + /// How often a request to send a message directly to a peer was made. + pub(super) direct_message_requests: IntCounter, + /// Number of messages still waiting to be sent out (broadcast and direct). + pub(super) queued_messages: IntGauge, + /// Number of connected peers. + pub(super) peers: IntGauge, + + /// Count of outgoing messages that are protocol overhead. + pub(super) out_count_protocol: IntCounter, + /// Count of outgoing messages with consensus payload. + pub(super) out_count_consensus: IntCounter, + /// Count of outgoing messages with deploy gossiper payload. + pub(super) out_count_deploy_gossip: IntCounter, + pub(super) out_count_block_gossip: IntCounter, + pub(super) out_count_finality_signature_gossip: IntCounter, + /// Count of outgoing messages with address gossiper payload. + pub(super) out_count_address_gossip: IntCounter, + /// Count of outgoing messages with deploy request/response payload. + pub(super) out_count_deploy_transfer: IntCounter, + /// Count of outgoing messages with block request/response payload. + pub(super) out_count_block_transfer: IntCounter, + /// Count of outgoing messages with trie request/response payload. + pub(super) out_count_trie_transfer: IntCounter, + /// Count of outgoing messages with other payload. + pub(super) out_count_other: IntCounter, + + /// Volume in bytes of outgoing messages that are protocol overhead. + pub(super) out_bytes_protocol: IntCounter, + /// Volume in bytes of outgoing messages with consensus payload. + pub(super) out_bytes_consensus: IntCounter, + /// Volume in bytes of outgoing messages with deploy gossiper payload. + pub(super) out_bytes_deploy_gossip: IntCounter, + pub(super) out_bytes_block_gossip: IntCounter, + pub(super) out_bytes_finality_signature_gossip: IntCounter, + /// Volume in bytes of outgoing messages with address gossiper payload. + pub(super) out_bytes_address_gossip: IntCounter, + /// Volume in bytes of outgoing messages with deploy request/response payload. + pub(super) out_bytes_deploy_transfer: IntCounter, + /// Volume in bytes of outgoing messages with block request/response payload. + pub(super) out_bytes_block_transfer: IntCounter, + /// Volume in bytes of outgoing messages with block request/response payload. + pub(super) out_bytes_trie_transfer: IntCounter, + /// Volume in bytes of outgoing messages with other payload. + pub(super) out_bytes_other: IntCounter, + + /// Number of outgoing connections in connecting state. + pub(super) out_state_connecting: IntGauge, + /// Number of outgoing connections in waiting state. + pub(super) out_state_waiting: IntGauge, + /// Number of outgoing connections in connected state. + pub(super) out_state_connected: IntGauge, + /// Number of outgoing connections in blocked state. + pub(super) out_state_blocked: IntGauge, + /// Number of outgoing connections in loopback state. + pub(super) out_state_loopback: IntGauge, + + /// Volume in bytes of incoming messages that are protocol overhead. + pub(super) in_bytes_protocol: IntCounter, + /// Volume in bytes of incoming messages with consensus payload. + pub(super) in_bytes_consensus: IntCounter, + /// Volume in bytes of incoming messages with deploy gossiper payload. + pub(super) in_bytes_deploy_gossip: IntCounter, + pub(super) in_bytes_block_gossip: IntCounter, + pub(super) in_bytes_finality_signature_gossip: IntCounter, + /// Volume in bytes of incoming messages with address gossiper payload. + pub(super) in_bytes_address_gossip: IntCounter, + /// Volume in bytes of incoming messages with deploy request/response payload. + pub(super) in_bytes_deploy_transfer: IntCounter, + /// Volume in bytes of incoming messages with block request/response payload. + pub(super) in_bytes_block_transfer: IntCounter, + /// Volume in bytes of incoming messages with block request/response payload. + pub(super) in_bytes_trie_transfer: IntCounter, + /// Volume in bytes of incoming messages with other payload. + pub(super) in_bytes_other: IntCounter, + + /// Count of incoming messages that are protocol overhead. + pub(super) in_count_protocol: IntCounter, + /// Count of incoming messages with consensus payload. + pub(super) in_count_consensus: IntCounter, + /// Count of incoming messages with deploy gossiper payload. + pub(super) in_count_deploy_gossip: IntCounter, + pub(super) in_count_block_gossip: IntCounter, + pub(super) in_count_finality_signature_gossip: IntCounter, + /// Count of incoming messages with address gossiper payload. + pub(super) in_count_address_gossip: IntCounter, + /// Count of incoming messages with deploy request/response payload. + pub(super) in_count_deploy_transfer: IntCounter, + /// Count of incoming messages with block request/response payload. + pub(super) in_count_block_transfer: IntCounter, + /// Count of incoming messages with trie request/response payload. + pub(super) in_count_trie_transfer: IntCounter, + /// Count of incoming messages with other payload. + pub(super) in_count_other: IntCounter, + + /// Number of trie requests accepted for processing. + pub(super) requests_for_trie_accepted: IntCounter, + /// Number of trie requests finished (successful or unsuccessful). + pub(super) requests_for_trie_finished: IntCounter, + + /// Total time spent delaying outgoing traffic to non-validators due to limiter, in seconds. + pub(super) accumulated_outgoing_limiter_delay: Counter, + /// Total time spent delaying incoming traffic from non-validators due to limiter, in seconds. + pub(super) accumulated_incoming_limiter_delay: Counter, + + /// Registry instance. + registry: Registry, +} + +impl Metrics { + /// Creates a new instance of networking metrics. + pub(super) fn new(registry: &Registry) -> Result { + let broadcast_requests = + IntCounter::new("net_broadcast_requests", "number of broadcasting requests")?; + let direct_message_requests = IntCounter::new( + "net_direct_message_requests", + "number of requests to send a message directly to a peer", + )?; + let queued_messages = IntGauge::new( + "net_queued_direct_messages", + "number of messages waiting to be sent out", + )?; + let peers = IntGauge::new("peers", "number of connected peers")?; + + let out_count_protocol = IntCounter::new( + "net_out_count_protocol", + "count of outgoing messages that are protocol overhead", + )?; + let out_count_consensus = IntCounter::new( + "net_out_count_consensus", + "count of outgoing messages with consensus payload", + )?; + let out_count_deploy_gossip = IntCounter::new( + "net_out_count_deploy_gossip", + "count of outgoing messages with deploy gossiper payload", + )?; + let out_count_block_gossip = IntCounter::new( + "net_out_count_block_gossip", + "count of outgoing messages with block gossiper payload", + )?; + let out_count_finality_signature_gossip = IntCounter::new( + "net_out_count_finality_signature_gossip", + "count of outgoing messages with finality signature gossiper payload", + )?; + let out_count_address_gossip = IntCounter::new( + "net_out_count_address_gossip", + "count of outgoing messages with address gossiper payload", + )?; + let out_count_deploy_transfer = IntCounter::new( + "net_out_count_deploy_transfer", + "count of outgoing messages with deploy request/response payload", + )?; + let out_count_block_transfer = IntCounter::new( + "net_out_count_block_transfer", + "count of outgoing messages with block request/response payload", + )?; + let out_count_trie_transfer = IntCounter::new( + "net_out_count_trie_transfer", + "count of outgoing messages with trie payloads", + )?; + let out_count_other = IntCounter::new( + "net_out_count_other", + "count of outgoing messages with other payload", + )?; + + let out_bytes_protocol = IntCounter::new( + "net_out_bytes_protocol", + "volume in bytes of outgoing messages that are protocol overhead", + )?; + let out_bytes_consensus = IntCounter::new( + "net_out_bytes_consensus", + "volume in bytes of outgoing messages with consensus payload", + )?; + let out_bytes_deploy_gossip = IntCounter::new( + "net_out_bytes_deploy_gossip", + "volume in bytes of outgoing messages with deploy gossiper payload", + )?; + let out_bytes_block_gossip = IntCounter::new( + "net_out_bytes_block_gossip", + "volume in bytes of outgoing messages with block gossiper payload", + )?; + let out_bytes_finality_signature_gossip = IntCounter::new( + "net_out_bytes_finality_signature_gossip", + "volume in bytes of outgoing messages with finality signature gossiper payload", + )?; + let out_bytes_address_gossip = IntCounter::new( + "net_out_bytes_address_gossip", + "volume in bytes of outgoing messages with address gossiper payload", + )?; + let out_bytes_deploy_transfer = IntCounter::new( + "net_out_bytes_deploy_transfer", + "volume in bytes of outgoing messages with deploy request/response payload", + )?; + let out_bytes_block_transfer = IntCounter::new( + "net_out_bytes_block_transfer", + "volume in bytes of outgoing messages with block request/response payload", + )?; + let out_bytes_trie_transfer = IntCounter::new( + "net_out_bytes_trie_transfer", + "volume in bytes of outgoing messages with trie payloads", + )?; + let out_bytes_other = IntCounter::new( + "net_out_bytes_other", + "volume in bytes of outgoing messages with other payload", + )?; + + let out_state_connecting = IntGauge::new( + "out_state_connecting", + "number of connections in the connecting state", + )?; + let out_state_waiting = IntGauge::new( + "out_state_waiting", + "number of connections in the waiting state", + )?; + let out_state_connected = IntGauge::new( + "out_state_connected", + "number of connections in the connected state", + )?; + let out_state_blocked = IntGauge::new( + "out_state_blocked", + "number of connections in the blocked state", + )?; + let out_state_loopback = IntGauge::new( + "out_state_loopback", + "number of connections in the loopback state", + )?; + + let in_count_protocol = IntCounter::new( + "net_in_count_protocol", + "count of incoming messages that are protocol overhead", + )?; + let in_count_consensus = IntCounter::new( + "net_in_count_consensus", + "count of incoming messages with consensus payload", + )?; + let in_count_deploy_gossip = IntCounter::new( + "net_in_count_deploy_gossip", + "count of incoming messages with deploy gossiper payload", + )?; + let in_count_block_gossip = IntCounter::new( + "net_in_count_block_gossip", + "count of incoming messages with block gossiper payload", + )?; + let in_count_finality_signature_gossip = IntCounter::new( + "net_in_count_finality_signature_gossip", + "count of incoming messages with finality signature gossiper payload", + )?; + let in_count_address_gossip = IntCounter::new( + "net_in_count_address_gossip", + "count of incoming messages with address gossiper payload", + )?; + let in_count_deploy_transfer = IntCounter::new( + "net_in_count_deploy_transfer", + "count of incoming messages with deploy request/response payload", + )?; + let in_count_block_transfer = IntCounter::new( + "net_in_count_block_transfer", + "count of incoming messages with block request/response payload", + )?; + let in_count_trie_transfer = IntCounter::new( + "net_in_count_trie_transfer", + "count of incoming messages with trie payloads", + )?; + let in_count_other = IntCounter::new( + "net_in_count_other", + "count of incoming messages with other payload", + )?; + + let in_bytes_protocol = IntCounter::new( + "net_in_bytes_protocol", + "volume in bytes of incoming messages that are protocol overhead", + )?; + let in_bytes_consensus = IntCounter::new( + "net_in_bytes_consensus", + "volume in bytes of incoming messages with consensus payload", + )?; + let in_bytes_deploy_gossip = IntCounter::new( + "net_in_bytes_deploy_gossip", + "volume in bytes of incoming messages with deploy gossiper payload", + )?; + let in_bytes_block_gossip = IntCounter::new( + "net_in_bytes_block_gossip", + "volume in bytes of incoming messages with block gossiper payload", + )?; + let in_bytes_finality_signature_gossip = IntCounter::new( + "net_in_bytes_finality_signature_gossip", + "volume in bytes of incoming messages with finality signature gossiper payload", + )?; + let in_bytes_address_gossip = IntCounter::new( + "net_in_bytes_address_gossip", + "volume in bytes of incoming messages with address gossiper payload", + )?; + let in_bytes_deploy_transfer = IntCounter::new( + "net_in_bytes_deploy_transfer", + "volume in bytes of incoming messages with deploy request/response payload", + )?; + let in_bytes_block_transfer = IntCounter::new( + "net_in_bytes_block_transfer", + "volume in bytes of incoming messages with block request/response payload", + )?; + let in_bytes_trie_transfer = IntCounter::new( + "net_in_bytes_trie_transfer", + "volume in bytes of incoming messages with trie payloads", + )?; + let in_bytes_other = IntCounter::new( + "net_in_bytes_other", + "volume in bytes of incoming messages with other payload", + )?; + + let requests_for_trie_accepted = IntCounter::new( + "requests_for_trie_accepted", + "number of trie requests accepted for processing", + )?; + let requests_for_trie_finished = IntCounter::new( + "requests_for_trie_finished", + "number of trie requests finished, successful or not", + )?; + + let accumulated_outgoing_limiter_delay = Counter::new( + "accumulated_outgoing_limiter_delay", + "seconds spent delaying outgoing traffic to non-validators due to limiter, in seconds", + )?; + let accumulated_incoming_limiter_delay = Counter::new( + "accumulated_incoming_limiter_delay", + "seconds spent delaying incoming traffic from non-validators due to limiter, in seconds." + )?; + + registry.register(Box::new(broadcast_requests.clone()))?; + registry.register(Box::new(direct_message_requests.clone()))?; + registry.register(Box::new(queued_messages.clone()))?; + registry.register(Box::new(peers.clone()))?; + + registry.register(Box::new(out_count_protocol.clone()))?; + registry.register(Box::new(out_count_consensus.clone()))?; + registry.register(Box::new(out_count_deploy_gossip.clone()))?; + registry.register(Box::new(out_count_block_gossip.clone()))?; + registry.register(Box::new(out_count_finality_signature_gossip.clone()))?; + registry.register(Box::new(out_count_address_gossip.clone()))?; + registry.register(Box::new(out_count_deploy_transfer.clone()))?; + registry.register(Box::new(out_count_block_transfer.clone()))?; + registry.register(Box::new(out_count_trie_transfer.clone()))?; + registry.register(Box::new(out_count_other.clone()))?; + + registry.register(Box::new(out_bytes_protocol.clone()))?; + registry.register(Box::new(out_bytes_consensus.clone()))?; + registry.register(Box::new(out_bytes_deploy_gossip.clone()))?; + registry.register(Box::new(out_bytes_block_gossip.clone()))?; + registry.register(Box::new(out_bytes_finality_signature_gossip.clone()))?; + registry.register(Box::new(out_bytes_address_gossip.clone()))?; + registry.register(Box::new(out_bytes_deploy_transfer.clone()))?; + registry.register(Box::new(out_bytes_block_transfer.clone()))?; + registry.register(Box::new(out_bytes_trie_transfer.clone()))?; + registry.register(Box::new(out_bytes_other.clone()))?; + + registry.register(Box::new(out_state_connecting.clone()))?; + registry.register(Box::new(out_state_waiting.clone()))?; + registry.register(Box::new(out_state_connected.clone()))?; + registry.register(Box::new(out_state_blocked.clone()))?; + registry.register(Box::new(out_state_loopback.clone()))?; + + registry.register(Box::new(in_count_protocol.clone()))?; + registry.register(Box::new(in_count_consensus.clone()))?; + registry.register(Box::new(in_count_deploy_gossip.clone()))?; + registry.register(Box::new(in_count_block_gossip.clone()))?; + registry.register(Box::new(in_count_finality_signature_gossip.clone()))?; + registry.register(Box::new(in_count_address_gossip.clone()))?; + registry.register(Box::new(in_count_deploy_transfer.clone()))?; + registry.register(Box::new(in_count_block_transfer.clone()))?; + registry.register(Box::new(in_count_trie_transfer.clone()))?; + registry.register(Box::new(in_count_other.clone()))?; + + registry.register(Box::new(in_bytes_protocol.clone()))?; + registry.register(Box::new(in_bytes_consensus.clone()))?; + registry.register(Box::new(in_bytes_deploy_gossip.clone()))?; + registry.register(Box::new(in_bytes_block_gossip.clone()))?; + registry.register(Box::new(in_bytes_finality_signature_gossip.clone()))?; + registry.register(Box::new(in_bytes_address_gossip.clone()))?; + registry.register(Box::new(in_bytes_deploy_transfer.clone()))?; + registry.register(Box::new(in_bytes_block_transfer.clone()))?; + registry.register(Box::new(in_bytes_trie_transfer.clone()))?; + registry.register(Box::new(in_bytes_other.clone()))?; + + registry.register(Box::new(requests_for_trie_accepted.clone()))?; + registry.register(Box::new(requests_for_trie_finished.clone()))?; + + registry.register(Box::new(accumulated_outgoing_limiter_delay.clone()))?; + registry.register(Box::new(accumulated_incoming_limiter_delay.clone()))?; + + Ok(Metrics { + broadcast_requests, + direct_message_requests, + queued_messages, + peers, + out_count_protocol, + out_count_consensus, + out_count_deploy_gossip, + out_count_block_gossip, + out_count_finality_signature_gossip, + out_count_address_gossip, + out_count_deploy_transfer, + out_count_block_transfer, + out_count_trie_transfer, + out_count_other, + out_bytes_protocol, + out_bytes_consensus, + out_bytes_deploy_gossip, + out_bytes_block_gossip, + out_bytes_finality_signature_gossip, + out_bytes_address_gossip, + out_bytes_deploy_transfer, + out_bytes_block_transfer, + out_bytes_trie_transfer, + out_bytes_other, + out_state_connecting, + out_state_waiting, + out_state_connected, + out_state_blocked, + out_state_loopback, + in_count_protocol, + in_count_consensus, + in_count_deploy_gossip, + in_count_block_gossip, + in_count_finality_signature_gossip, + in_count_address_gossip, + in_count_deploy_transfer, + in_count_block_transfer, + in_count_trie_transfer, + in_count_other, + in_bytes_protocol, + in_bytes_consensus, + in_bytes_deploy_gossip, + in_bytes_block_gossip, + in_bytes_finality_signature_gossip, + in_bytes_address_gossip, + in_bytes_deploy_transfer, + in_bytes_block_transfer, + in_bytes_trie_transfer, + in_bytes_other, + requests_for_trie_accepted, + requests_for_trie_finished, + accumulated_outgoing_limiter_delay, + accumulated_incoming_limiter_delay, + registry: registry.clone(), + }) + } + + /// Records an outgoing payload. + pub(crate) fn record_payload_out(this: &Weak, kind: MessageKind, size: u64) { + if let Some(metrics) = this.upgrade() { + match kind { + MessageKind::Protocol => { + metrics.out_bytes_protocol.inc_by(size); + metrics.out_count_protocol.inc(); + } + MessageKind::Consensus => { + metrics.out_bytes_consensus.inc_by(size); + metrics.out_count_consensus.inc(); + } + MessageKind::TransactionGossip => { + metrics.out_bytes_deploy_gossip.inc_by(size); + metrics.out_count_deploy_gossip.inc(); + } + MessageKind::BlockGossip => { + metrics.out_bytes_block_gossip.inc_by(size); + metrics.out_count_block_gossip.inc(); + } + MessageKind::FinalitySignatureGossip => { + metrics.out_bytes_finality_signature_gossip.inc_by(size); + metrics.out_count_finality_signature_gossip.inc(); + } + MessageKind::AddressGossip => { + metrics.out_bytes_address_gossip.inc_by(size); + metrics.out_count_address_gossip.inc(); + } + MessageKind::TransactionTransfer => { + metrics.out_bytes_deploy_transfer.inc_by(size); + metrics.out_count_deploy_transfer.inc(); + } + MessageKind::BlockTransfer => { + metrics.out_bytes_block_transfer.inc_by(size); + metrics.out_count_block_transfer.inc(); + } + MessageKind::TrieTransfer => { + metrics.out_bytes_trie_transfer.inc_by(size); + metrics.out_count_trie_transfer.inc(); + } + MessageKind::Other => { + metrics.out_bytes_other.inc_by(size); + metrics.out_count_other.inc(); + } + } + } else { + debug!("not recording metrics, component already shut down"); + } + } + + /// Records an incoming payload. + pub(crate) fn record_payload_in(this: &Weak, kind: MessageKind, size: u64) { + if let Some(metrics) = this.upgrade() { + match kind { + MessageKind::Protocol => { + metrics.in_bytes_protocol.inc_by(size); + metrics.in_count_protocol.inc(); + } + MessageKind::Consensus => { + metrics.in_bytes_consensus.inc_by(size); + metrics.in_count_consensus.inc(); + } + MessageKind::TransactionGossip => { + metrics.in_bytes_deploy_gossip.inc_by(size); + metrics.in_count_deploy_gossip.inc(); + } + MessageKind::BlockGossip => { + metrics.in_bytes_block_gossip.inc_by(size); + metrics.in_count_block_gossip.inc(); + } + MessageKind::FinalitySignatureGossip => { + metrics.in_bytes_finality_signature_gossip.inc_by(size); + metrics.in_count_finality_signature_gossip.inc(); + } + MessageKind::AddressGossip => { + metrics.in_bytes_address_gossip.inc_by(size); + metrics.in_count_address_gossip.inc(); + } + MessageKind::TransactionTransfer => { + metrics.in_bytes_deploy_transfer.inc_by(size); + metrics.in_count_deploy_transfer.inc(); + } + MessageKind::BlockTransfer => { + metrics.in_bytes_block_transfer.inc_by(size); + metrics.in_count_block_transfer.inc(); + } + MessageKind::TrieTransfer => { + metrics.in_bytes_trie_transfer.inc_by(size); + metrics.in_count_trie_transfer.inc(); + } + MessageKind::Other => { + metrics.in_bytes_other.inc_by(size); + metrics.in_count_other.inc(); + } + } + } else { + debug!("not recording metrics, component already shut down"); + } + } + + /// Creates a set of outgoing metrics that is connected to this set of metrics. + pub(super) fn create_outgoing_metrics(&self) -> OutgoingMetrics { + OutgoingMetrics { + out_state_connecting: self.out_state_connecting.clone(), + out_state_waiting: self.out_state_waiting.clone(), + out_state_connected: self.out_state_connected.clone(), + out_state_blocked: self.out_state_blocked.clone(), + out_state_loopback: self.out_state_loopback.clone(), + } + } + + /// Records that a trie request has been started. + pub(super) fn record_trie_request_start(this: &Weak) { + if let Some(metrics) = this.upgrade() { + metrics.requests_for_trie_accepted.inc(); + } else { + debug!("not recording metrics, component already shut down"); + } + } + + /// Records that a trie request has ended. + pub(super) fn record_trie_request_end(this: &Weak) { + if let Some(metrics) = this.upgrade() { + metrics.requests_for_trie_finished.inc(); + } else { + debug!("not recording metrics, component already shut down"); + } + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.broadcast_requests); + unregister_metric!(self.registry, self.direct_message_requests); + unregister_metric!(self.registry, self.queued_messages); + unregister_metric!(self.registry, self.peers); + + unregister_metric!(self.registry, self.out_count_protocol); + unregister_metric!(self.registry, self.out_count_consensus); + unregister_metric!(self.registry, self.out_count_deploy_gossip); + unregister_metric!(self.registry, self.out_count_block_gossip); + unregister_metric!(self.registry, self.out_count_finality_signature_gossip); + unregister_metric!(self.registry, self.out_count_address_gossip); + unregister_metric!(self.registry, self.out_count_deploy_transfer); + unregister_metric!(self.registry, self.out_count_block_transfer); + unregister_metric!(self.registry, self.out_count_trie_transfer); + unregister_metric!(self.registry, self.out_count_other); + + unregister_metric!(self.registry, self.out_bytes_protocol); + unregister_metric!(self.registry, self.out_bytes_consensus); + unregister_metric!(self.registry, self.out_bytes_deploy_gossip); + unregister_metric!(self.registry, self.out_bytes_block_gossip); + unregister_metric!(self.registry, self.out_bytes_finality_signature_gossip); + unregister_metric!(self.registry, self.out_bytes_address_gossip); + unregister_metric!(self.registry, self.out_bytes_deploy_transfer); + unregister_metric!(self.registry, self.out_bytes_block_transfer); + unregister_metric!(self.registry, self.out_bytes_trie_transfer); + unregister_metric!(self.registry, self.out_bytes_other); + + unregister_metric!(self.registry, self.out_state_connecting); + unregister_metric!(self.registry, self.out_state_waiting); + unregister_metric!(self.registry, self.out_state_connected); + unregister_metric!(self.registry, self.out_state_blocked); + unregister_metric!(self.registry, self.out_state_loopback); + + unregister_metric!(self.registry, self.in_count_protocol); + unregister_metric!(self.registry, self.in_count_consensus); + unregister_metric!(self.registry, self.in_count_deploy_gossip); + unregister_metric!(self.registry, self.in_count_block_gossip); + unregister_metric!(self.registry, self.in_count_finality_signature_gossip); + unregister_metric!(self.registry, self.in_count_address_gossip); + unregister_metric!(self.registry, self.in_count_deploy_transfer); + unregister_metric!(self.registry, self.in_count_block_transfer); + unregister_metric!(self.registry, self.in_count_trie_transfer); + unregister_metric!(self.registry, self.in_count_other); + + unregister_metric!(self.registry, self.in_bytes_protocol); + unregister_metric!(self.registry, self.in_bytes_consensus); + unregister_metric!(self.registry, self.in_bytes_deploy_gossip); + unregister_metric!(self.registry, self.in_bytes_block_gossip); + unregister_metric!(self.registry, self.in_bytes_finality_signature_gossip); + unregister_metric!(self.registry, self.in_bytes_address_gossip); + unregister_metric!(self.registry, self.in_bytes_deploy_transfer); + unregister_metric!(self.registry, self.in_bytes_block_transfer); + unregister_metric!(self.registry, self.in_bytes_trie_transfer); + unregister_metric!(self.registry, self.in_bytes_other); + + unregister_metric!(self.registry, self.requests_for_trie_accepted); + unregister_metric!(self.registry, self.requests_for_trie_finished); + + unregister_metric!(self.registry, self.accumulated_outgoing_limiter_delay); + unregister_metric!(self.registry, self.accumulated_incoming_limiter_delay); + } +} diff --git a/node/src/components/network/one_way_messaging.rs b/node/src/components/network/one_way_messaging.rs deleted file mode 100644 index 56be87c199..0000000000 --- a/node/src/components/network/one_way_messaging.rs +++ /dev/null @@ -1,239 +0,0 @@ -//! This module is home to the infrastructure to support "one-way" messages, i.e. requests which -//! expect no response. -//! -//! For now, as a side-effect of the original small_network component, all peer-to-peer messages -//! defined outside of the network component are one-way. - -use std::{fmt::Debug, future::Future, io, iter, pin::Pin}; - -use datasize::DataSize; -use futures::{AsyncReadExt, AsyncWriteExt, FutureExt}; -use futures_io::{AsyncRead, AsyncWrite}; -use libp2p::{ - request_response::{ - ProtocolSupport, RequestResponse, RequestResponseCodec, RequestResponseConfig, - }, - PeerId, -}; - -use super::{Config, Error, PayloadT, ProtocolId}; -use crate::{ - components::networking_metrics::NetworkingMetrics, - types::{Chainspec, NodeId}, -}; - -/// The inner portion of the `ProtocolId` for the one-way message behavior. A standard prefix and -/// suffix will be applied to create the full protocol name. -const PROTOCOL_NAME_INNER: &str = "validator/one-way"; - -/// Constructs a new libp2p behavior suitable for use by one-way messaging. -pub(super) fn new_behavior( - config: &Config, - net_metrics: &NetworkingMetrics, - chainspec: &Chainspec, -) -> RequestResponse { - let codec = Codec::new(config, net_metrics); - let protocol_id = ProtocolId::new(chainspec, PROTOCOL_NAME_INNER); - let request_response_config = RequestResponseConfig::from(config); - RequestResponse::new( - codec, - iter::once((protocol_id, ProtocolSupport::Full)), - request_response_config, - ) -} - -#[derive(DataSize, Debug)] -pub(super) struct Outgoing { - // Datasize note: `PeerId` can be skipped, as in our case it should be 100% stack allocated. - #[data_size(skip)] - pub destination: PeerId, - pub message: Vec, -} - -impl Outgoing { - pub(super) fn new( - destination: NodeId, - payload: &P, - max_size: u32, - ) -> Result { - let serialized_message = - bincode::serialize(payload).map_err(|error| Error::Serialization(*error))?; - - if serialized_message.len() > max_size as usize { - return Err(Error::MessageTooLarge { - max_size, - actual_size: serialized_message.len() as u64, - }); - } - - match &destination { - NodeId::P2p(destination) => Ok(Outgoing { - destination: *destination, - message: serialized_message, - }), - destination => { - unreachable!( - "can't send to {} (small_network node ID) via libp2p", - destination - ) - } - } - } -} - -impl From for Vec { - fn from(outgoing: Outgoing) -> Self { - outgoing.message - } -} - -/// Implements libp2p `RequestResponseCodec` for one-way messages, i.e. requests which expect no -/// response. -#[derive(Debug, Clone)] -pub(super) struct Codec { - max_message_size: u32, - read_futures_in_flight: prometheus::Gauge, - read_futures_total: prometheus::Gauge, - write_futures_in_flight: prometheus::Gauge, - write_futures_total: prometheus::Gauge, -} - -impl Codec { - pub(super) fn new(config: &Config, net_metrics: &NetworkingMetrics) -> Self { - Self { - max_message_size: config.max_one_way_message_size, - read_futures_in_flight: net_metrics.read_futures_in_flight.clone(), - read_futures_total: net_metrics.read_futures_total.clone(), - write_futures_in_flight: net_metrics.write_futures_in_flight.clone(), - write_futures_total: net_metrics.write_futures_total.clone(), - } - } -} - -impl RequestResponseCodec for Codec { - type Protocol = ProtocolId; - type Request = Vec; - type Response = (); - - fn read_request<'life0, 'life1, 'life2, 'async_trait, T>( - &'life0 mut self, - _protocol: &'life1 Self::Protocol, - io: &'life2 mut T, - ) -> Pin> + 'async_trait + Send>> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - 'life2: 'async_trait, - Self: 'async_trait, - T: AsyncRead + Unpin + Send + 'async_trait, - { - async move { - // Read the length. - let mut buffer = [0; 4]; - io.read(&mut buffer[..]) - .await - .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; - let length = u32::from_le_bytes(buffer); - if length > self.max_message_size { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!( - "message size exceeds limit: {} > {}", - length, self.max_message_size - ), - )); - } - - // Read the payload. - let mut buffer = vec![0; length as usize]; - io.read_exact(&mut buffer).await?; - Ok(buffer) - } - .boxed() - } - - fn read_response<'life0, 'life1, 'life2, 'async_trait, T>( - &'life0 mut self, - _protocol: &'life1 Self::Protocol, - _io: &'life2 mut T, - ) -> Pin> + 'async_trait + Send>> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - 'life2: 'async_trait, - Self: 'async_trait, - T: AsyncRead + Unpin + Send + 'async_trait, - { - self.read_futures_in_flight.inc(); - self.read_futures_total.inc(); - let gauge = self.read_futures_in_flight.clone(); - - // For one-way messages, where no response will be sent by the peer, just return Ok(()). - async move { - gauge.dec(); - Ok(()) - } - .boxed() - } - - fn write_request<'life0, 'life1, 'life2, 'async_trait, T>( - &'life0 mut self, - _protocol: &'life1 Self::Protocol, - io: &'life2 mut T, - request: Self::Request, - ) -> Pin> + 'async_trait + Send>> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - 'life2: 'async_trait, - Self: 'async_trait, - T: AsyncWrite + Unpin + Send + 'async_trait, - { - async move { - // Write the length. - if request.len() > self.max_message_size as usize { - return Err(io::Error::new( - io::ErrorKind::InvalidInput, - format!( - "message size exceeds limit: {} > {}", - request.len(), - self.max_message_size - ), - )); - } - let length = request.len() as u32; - io.write_all(&length.to_le_bytes()).await?; - - // Write the payload. - io.write_all(&request).await?; - - io.close().await?; - Ok(()) - } - .boxed() - } - - fn write_response<'life0, 'life1, 'life2, 'async_trait, T>( - &'life0 mut self, - _protocol: &'life1 Self::Protocol, - _io: &'life2 mut T, - _response: Self::Response, - ) -> Pin> + 'async_trait + Send>> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - 'life2: 'async_trait, - Self: 'async_trait, - T: AsyncWrite + Unpin + Send + 'async_trait, - { - self.write_futures_in_flight.inc(); - self.write_futures_total.inc(); - let gauge = self.write_futures_in_flight.clone(); - // For one-way messages, where no response will be sent by the peer, just return Ok(()). - async move { - gauge.dec(); - Ok(()) - } - .boxed() - } -} diff --git a/node/src/components/network/outgoing.rs b/node/src/components/network/outgoing.rs new file mode 100644 index 0000000000..e90fc8a0ce --- /dev/null +++ b/node/src/components/network/outgoing.rs @@ -0,0 +1,1885 @@ +//! Management of outgoing connections. +//! +//! This module implements outgoing connection management, decoupled from the underlying transport +//! or any higher-level level parts. It encapsulates the reconnection and blocklisting logic on the +//! `SocketAddr` level. +//! +//! # Basic structure +//! +//! Core of this module is the `OutgoingManager`, which supports the following functionality: +//! +//! * Handed a `SocketAddr`s via the `learn_addr` function, it will permanently maintain a +//! connection to the given address, only giving up if retry thresholds are exceeded, after which +//! it will be forgotten. +//! * `block_addr` and `redeem_addr` can be used to maintain a `SocketAddr`-keyed block list. +//! * `OutgoingManager` maintains an internal routing table. The `get_route` function can be used to +//! retrieve a "route" (typically a `sync::channel` accepting network messages) to a remote peer +//! by `NodeId`. +//! +//! # Requirements +//! +//! `OutgoingManager` is decoupled from the underlying protocol, all of its interactions are +//! performed through [`DialRequest`] and [`DialOutcome`]s. This frees the `OutgoingManager` from +//! having to worry about protocol specifics. +//! +//! Three conditions not expressed in code must be fulfilled for the `OutgoingManager` to function: +//! +//! * The `Dialer` is expected to produce `DialOutcomes` for every dial [`DialRequest::Dial`] +//! eventually. These must be forwarded to the `OutgoingManager` via the `handle_dial_outcome` +//! function. +//! * The `perform_housekeeping` method must be called periodically to give the `OutgoingManager` a +//! chance to initiate reconnections and collect garbage. +//! * When a connection is dropped, the connection manager must be notified via +//! `handle_connection_drop`. +//! +//! # Lifecycle +//! +//! The following chart illustrates the lifecycle of an outgoing connection. +//! +//! ```text +//! forget (after n tries) +//! ┌────────────────────────────────────┐ +//! │ learn ▼ +//! │ ┌────────────── unknown/forgotten +//! │ │ (implicit state) +//! │ │ +//! │ │ │ +//! │ │ │ block +//! │ │ │ +//! │ │ │ +//! │ │ ▼ +//! ┌────┴────┐ │ ┌─────────┐ +//! │ │ fail │ block │ │ +//! │ Waiting │◄───────┐ │ ┌─────►│ Blocked │◄──────────┐ +//! ┌───┤ │ │ │ │ │ │ │ +//! │ └────┬────┘ │ │ │ └────┬────┘ │ +//! │ block │ │ │ │ │ │ +//! │ │ timeout │ ▼ │ │ redeem, │ +//! │ │ ┌────┴─────┴───┐ │ block timeout │ +//! │ │ │ │ │ │ +//! │ └───────►│ Connecting │◄──────┘ │ +//! │ │ │ │ +//! │ └─────┬────┬───┘ │ +//! │ │ ▲ │ │ +//! │ success │ │ │ detect │ +//! │ │ │ │ ┌──────────┐ │ +//! │ ┌───────────┐ │ │ │ │ │ │ +//! │ │ │◄────────┘ │ │ │ Loopback │ │ +//! │ │ Connected │ │ └─────►│ │ │ +//! │ │ │ dropped/ │ └──────────┘ │ +//! │ └─────┬─────┴───────────┘ │ +//! │ │ timeout │ +//! │ │ block │ +//! └───────┴─────────────────────────────────────────────────┘ +//! ``` +//! +//! # Timeouts/safety +//! +//! The `sweep` transition for connections usually does not happen during normal operations. Three +//! causes are typical for it: +//! +//! * A configured TCP timeout above [`OutgoingConfig::sweep_timeout`]. +//! * Very slow responses from remote peers (similar to a Slowloris-attack) +//! * Faulty handling by the driver of the [`OutgoingManager`], i.e. the outside component. +//! +//! Should a dial attempt exceed a certain timeout, it is considered failed and put into the waiting +//! state again. +//! +//! If a conflict (multiple successful dial results) occurs, the more recent connection takes +//! precedence over the previous one. This prevents problems when a notification of a terminated +//! connection is overtaken by the new connection announcement. + +use std::{ + collections::{hash_map::Entry, HashMap}, + error::Error, + fmt::{self, Debug, Display, Formatter}, + mem, + net::SocketAddr, + time::{Duration, Instant}, +}; + +use datasize::DataSize; +use prometheus::IntGauge; +use rand::Rng; +use tracing::{debug, error, error_span, field::Empty, info, trace, warn, Span}; + +use super::{ + blocklist::BlocklistJustification, + display_error, + health::{ConnectionHealth, HealthCheckOutcome, HealthConfig, Nonce, TaggedTimestamp}, + NodeId, +}; + +/// An outgoing connection/address in various states. +#[derive(DataSize, Debug)] +pub struct Outgoing +where + H: DataSize, + E: DataSize, +{ + /// Whether or not the address is unforgettable, see `learn_addr` for details. + pub(super) is_unforgettable: bool, + /// The current state the connection/address is in. + pub(super) state: OutgoingState, +} + +/// Active state for a connection/address. +#[derive(DataSize, Debug)] +pub(crate) enum OutgoingState +where + H: DataSize, + E: DataSize, +{ + /// The outgoing address has been known for the first time and we are currently connecting. + Connecting { + /// Number of attempts that failed, so far. + failures_so_far: u8, + /// Time when the connection attempt was instantiated. + since: Instant, + }, + /// The connection has failed at least one connection attempt and is waiting for a retry. + Waiting { + /// Number of attempts that failed, so far. + failures_so_far: u8, + /// The most recent connection error. + /// + /// If not given, the connection was put into a `Waiting` state due to a sweep timeout. + error: Option, + /// The precise moment when the last connection attempt failed. + last_failure: Instant, + }, + /// An established outgoing connection. + Connected { + /// The peers remote ID. + peer_id: NodeId, + /// Handle to a communication channel that can be used to send data to the peer. + /// + /// Can be a channel to decouple sending, or even a direct connection handle. + handle: H, + /// Health of the connection. + health: ConnectionHealth, + }, + /// The address was blocked and will not be retried. + Blocked { + /// Since when the block took effect. + since: Instant, + /// The justification given for blocking. + justification: BlocklistJustification, + /// Until when the block took effect. + until: Instant, + }, + /// The address is owned by ourselves and will not be tried again. + Loopback, +} + +impl Display for OutgoingState +where + H: DataSize, + E: DataSize, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + OutgoingState::Connecting { + failures_so_far, .. + } => { + write!(f, "connecting({})", failures_so_far) + } + OutgoingState::Waiting { + failures_so_far, .. + } => write!(f, "waiting({})", failures_so_far), + OutgoingState::Connected { .. } => write!(f, "connected"), + OutgoingState::Blocked { .. } => write!(f, "blocked"), + OutgoingState::Loopback => write!(f, "loopback"), + } + } +} + +/// The result of dialing `SocketAddr`. +#[derive(Debug)] +pub enum DialOutcome { + /// A connection was successfully established. + Successful { + /// The address dialed. + addr: SocketAddr, + /// A handle to send data down the connection. + handle: H, + /// The remote peer's authenticated node ID. + node_id: NodeId, + /// The moment the connection was established. + when: Instant, + }, + /// The connection attempt failed. + Failed { + /// The address dialed. + addr: SocketAddr, + /// The error encountered while dialing. + error: E, + /// The moment the connection attempt failed. + when: Instant, + }, + /// The connection was aborted, because the remote peer turned out to be a loopback. + Loopback { + /// The address used to connect. + addr: SocketAddr, + }, +} + +impl DialOutcome { + /// Retrieves the socket address from the `DialOutcome`. + fn addr(&self) -> SocketAddr { + match self { + DialOutcome::Successful { addr, .. } + | DialOutcome::Failed { addr, .. } + | DialOutcome::Loopback { addr, .. } => *addr, + } + } +} + +/// A request made for dialing. +#[derive(Clone, Debug)] +#[must_use] +pub(crate) enum DialRequest { + /// Attempt to connect to the outgoing socket address. + /// + /// For every time this request is emitted, there must be a corresponding call to + /// `handle_dial_outcome` eventually. + /// + /// Any logging of connection issues should be done in the context of `span` for better log + /// output. + Dial { addr: SocketAddr, span: Span }, + + /// Disconnects a potentially existing connection. + /// + /// Used when a peer has been blocked or should be disconnected for other reasons. Note that + /// this request can immediately be followed by a connection request, as in the case of a ping + /// timeout. + Disconnect { handle: H, span: Span }, + + /// Send a ping to a peer. + SendPing { + peer_id: NodeId, + nonce: Nonce, + span: Span, + }, +} + +impl Display for DialRequest +where + H: Display, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + DialRequest::Dial { addr, .. } => { + write!(f, "dial: {}", addr) + } + DialRequest::Disconnect { handle, .. } => { + write!(f, "disconnect: {}", handle) + } + DialRequest::SendPing { peer_id, nonce, .. } => { + write!(f, "ping[{}]: {}", nonce, peer_id) + } + } + } +} + +#[derive(DataSize, Debug)] +/// Connection settings for the outgoing connection manager. +pub struct OutgoingConfig { + /// The maximum number of attempts before giving up and forgetting an address, if permitted. + pub(crate) retry_attempts: u8, + /// The basic time slot for exponential backoff when reconnecting. + pub(crate) base_timeout: Duration, + /// Time until an outgoing address is unblocked. + pub(crate) unblock_after_min: Duration, + pub(crate) unblock_after_max: Duration, + /// Safety timeout, after which a connection is no longer expected to finish dialing. + pub(crate) sweep_timeout: Duration, + /// Health check configuration. + pub(crate) health: HealthConfig, +} + +impl OutgoingConfig { + /// Calculates the backoff time. + /// + /// `failed_attempts` (n) is the number of previous attempts *before* the current failure (thus + /// starting at 0). The backoff time will be double for each attempt. + fn calc_backoff(&self, failed_attempts: u8) -> Duration { + (1u32 << failed_attempts as u32) * self.base_timeout + } +} + +/// Manager of outbound connections. +/// +/// See the module documentation for usage suggestions. +#[derive(DataSize, Debug)] +pub struct OutgoingManager +where + H: DataSize, + E: DataSize, +{ + /// Outgoing connections subsystem configuration. + config: OutgoingConfig, + /// Mapping of address to their current connection state. + pub(super) outgoing: HashMap>, + /// Routing table. + /// + /// Contains a mapping from node IDs to connected socket addresses. A missing entry means that + /// the destination is not connected. + routes: HashMap, + /// A set of outgoing metrics. + #[data_size(skip)] + metrics: OutgoingMetrics, +} + +/// A set of metrics used by the outgoing component. +#[derive(Clone, Debug)] +pub(super) struct OutgoingMetrics { + /// Number of outgoing connections in connecting state. + pub(super) out_state_connecting: IntGauge, + /// Number of outgoing connections in waiting state. + pub(super) out_state_waiting: IntGauge, + /// Number of outgoing connections in connected state. + pub(super) out_state_connected: IntGauge, + /// Number of outgoing connections in blocked state. + pub(super) out_state_blocked: IntGauge, + /// Number of outgoing connections in loopback state. + pub(super) out_state_loopback: IntGauge, +} + +// Note: We only implement `Default` here for use in testing with `OutgoingManager::new`. +#[cfg(test)] +impl Default for OutgoingMetrics { + fn default() -> Self { + Self { + out_state_connecting: IntGauge::new( + "out_state_connecting", + "internal out_state_connecting", + ) + .unwrap(), + out_state_waiting: IntGauge::new("out_state_waiting", "internal out_state_waiting") + .unwrap(), + out_state_connected: IntGauge::new( + "out_state_connected", + "internal out_state_connected", + ) + .unwrap(), + out_state_blocked: IntGauge::new("out_state_blocked", "internal out_state_blocked") + .unwrap(), + out_state_loopback: IntGauge::new("out_state_loopback", "internal loopback").unwrap(), + } + } +} + +impl OutgoingManager +where + H: DataSize, + E: DataSize, +{ + /// Creates a new outgoing manager with a set of metrics that is not connected to any registry. + #[cfg(test)] + #[inline] + pub(super) fn new(config: OutgoingConfig) -> Self { + Self::with_metrics(config, Default::default()) + } + + /// Creates a new outgoing manager with an already existing set of metrics. + pub(super) fn with_metrics(config: OutgoingConfig, metrics: OutgoingMetrics) -> Self { + Self { + config, + outgoing: Default::default(), + routes: Default::default(), + metrics, + } + } + + /// Returns a reference to the internal metrics. + #[cfg(test)] + fn metrics(&self) -> &OutgoingMetrics { + &self.metrics + } +} + +/// Creates a logging span for a specific connection. +#[inline] +fn make_span(addr: SocketAddr, outgoing: Option<&Outgoing>) -> Span +where + H: DataSize, + E: DataSize, +{ + // Note: The jury is still out on whether we want to create a single span per connection and + // cache it, or create a new one (with the same connection ID) each time this is called. The + // advantage of the former is external tools have it easier correlating all related + // information, while the drawback is not being able to change the parent span link, which + // might be awkward. + + if let Some(outgoing) = outgoing { + match outgoing.state { + OutgoingState::Connected { peer_id, .. } => { + error_span!("outgoing", %addr, state=%outgoing.state, %peer_id, consensus_key=Empty) + } + _ => { + error_span!("outgoing", %addr, state=%outgoing.state, peer_id=Empty, consensus_key=Empty) + } + } + } else { + error_span!("outgoing", %addr, state = "-") + } +} + +impl OutgoingManager +where + H: DataSize + Clone, + E: DataSize + Error, +{ + /// Changes the state of an outgoing connection. + /// + /// Will trigger an update of the routing table if necessary. Does not emit any other + /// side-effects. + /// + /// Returns the new state, as well as any residual handle. + fn change_outgoing_state( + &mut self, + addr: SocketAddr, + mut new_state: OutgoingState, + ) -> (&mut Outgoing, Option) { + let (prev_state, new_outgoing) = match self.outgoing.entry(addr) { + Entry::Vacant(vacant) => { + let inserted = vacant.insert(Outgoing { + state: new_state, + is_unforgettable: false, + }); + + (None, inserted) + } + + Entry::Occupied(occupied) => { + let prev = occupied.into_mut(); + + mem::swap(&mut prev.state, &mut new_state); + + // `new_state` and `prev.state` are swapped now. + (Some(new_state), prev) + } + }; + + // Update the routing table. + match (&prev_state, &new_outgoing.state) { + (Some(OutgoingState::Connected { .. }), OutgoingState::Connected { .. }) => { + trace!("route unchanged, already connected"); + } + + // Dropping from connected to any other state requires clearing the route. + (Some(OutgoingState::Connected { peer_id, .. }), _) => { + debug!(%peer_id, "route removed"); + self.routes.remove(peer_id); + } + + // Otherwise we have established a new route. + (_, OutgoingState::Connected { peer_id, .. }) => { + debug!(%peer_id, "route added"); + self.routes.insert(*peer_id, addr); + } + + _ => { + trace!("route unchanged"); + } + } + + // Update the metrics, decreasing the count of the state that was left, while increasing + // the new state. Note that this will lead to a non-atomic dec/inc if the previous state + // was the same as before. + match prev_state { + Some(OutgoingState::Blocked { .. }) => self.metrics.out_state_blocked.dec(), + Some(OutgoingState::Connected { .. }) => self.metrics.out_state_connected.dec(), + Some(OutgoingState::Connecting { .. }) => self.metrics.out_state_connecting.dec(), + Some(OutgoingState::Loopback) => self.metrics.out_state_loopback.dec(), + Some(OutgoingState::Waiting { .. }) => self.metrics.out_state_waiting.dec(), + None => { + // Nothing to do, there was no previous state. + } + } + + match new_outgoing.state { + OutgoingState::Blocked { .. } => self.metrics.out_state_blocked.inc(), + OutgoingState::Connected { .. } => self.metrics.out_state_connected.inc(), + OutgoingState::Connecting { .. } => self.metrics.out_state_connecting.inc(), + OutgoingState::Loopback => self.metrics.out_state_loopback.inc(), + OutgoingState::Waiting { .. } => self.metrics.out_state_waiting.inc(), + } + + // Finally, deconstruct the previous state in case we need to preserve the handle. + let handle = if let Some(OutgoingState::Connected { handle, .. }) = prev_state { + Some(handle) + } else { + None + }; + + (new_outgoing, handle) + } + + /// Retrieves the address by peer. + pub(crate) fn get_addr(&self, peer_id: NodeId) -> Option { + self.routes.get(&peer_id).copied() + } + + /// Retrieves a handle to a peer. + /// + /// Primary function to send data to peers; clients retrieve a handle to it which can then + /// be used to send data. + pub(crate) fn get_route(&self, peer_id: NodeId) -> Option<&H> { + let outgoing = self.outgoing.get(self.routes.get(&peer_id)?)?; + + if let OutgoingState::Connected { ref handle, .. } = outgoing.state { + Some(handle) + } else { + None + } + } + + /// Iterates over all connected peer IDs. + pub(crate) fn connected_peers(&'_ self) -> impl Iterator + '_ { + self.routes.keys().copied() + } + + /// Notify about a potentially new address that has been discovered. + /// + /// Immediately triggers the connection process to said address if it was not known before. + /// + /// A connection marked `unforgettable` will never be evicted but reset instead when it exceeds + /// the retry limit. + pub(crate) fn learn_addr( + &mut self, + addr: SocketAddr, + unforgettable: bool, + now: Instant, + ) -> Option> { + let span = make_span(addr, self.outgoing.get(&addr)); + span.clone() + .in_scope(move || match self.outgoing.entry(addr) { + Entry::Occupied(_) => { + trace!("ignoring already known address"); + None + } + Entry::Vacant(_vacant) => { + info!("connecting to newly learned address"); + let (outgoing, _) = self.change_outgoing_state( + addr, + OutgoingState::Connecting { + failures_so_far: 0, + since: now, + }, + ); + if outgoing.is_unforgettable != unforgettable { + outgoing.is_unforgettable = unforgettable; + debug!(unforgettable, "marked"); + } + Some(DialRequest::Dial { addr, span }) + } + }) + } + + pub(crate) fn block_addr( + &mut self, + addr: SocketAddr, + now: Instant, + justification: BlocklistJustification, + rng: &mut R, + ) -> Option> { + let span = make_span(addr, self.outgoing.get(&addr)); + span.clone() + .in_scope(move || match self.outgoing.entry(addr) { + Entry::Vacant(_vacant) => { + info!("unknown address blocked"); + let until = self.calculate_block_until(now, rng); + self.change_outgoing_state( + addr, + OutgoingState::Blocked { + since: now, + justification, + until, + }, + ); + None + } + Entry::Occupied(occupied) => match occupied.get().state { + OutgoingState::Blocked { .. } => { + debug!("address already blocked"); + None + } + OutgoingState::Loopback => { + warn!("loopback address block ignored"); + None + } + OutgoingState::Connected { ref handle, .. } => { + info!("connected address blocked, disconnecting"); + let handle = handle.clone(); + let until = self.calculate_block_until(now, rng); + self.change_outgoing_state( + addr, + OutgoingState::Blocked { + since: now, + justification, + until, + }, + ); + Some(DialRequest::Disconnect { span, handle }) + } + OutgoingState::Waiting { .. } | OutgoingState::Connecting { .. } => { + let until = self.calculate_block_until(now, rng); + info!("address blocked"); + self.change_outgoing_state( + addr, + OutgoingState::Blocked { + since: now, + justification, + until, + }, + ); + None + } + }, + }) + } + + /// Checks if an address is blocked. + #[cfg(test)] + pub(crate) fn is_blocked(&self, addr: SocketAddr) -> bool { + match self.outgoing.get(&addr) { + Some(outgoing) => matches!(outgoing.state, OutgoingState::Blocked { .. }), + None => false, + } + } + + /// Removes an address from the block list. + /// + /// Does nothing if the address was not blocked. + // This function is currently not in use by `network` itself. + #[allow(dead_code)] + pub(crate) fn redeem_addr(&mut self, addr: SocketAddr, now: Instant) -> Option> { + let span = make_span(addr, self.outgoing.get(&addr)); + span.clone() + .in_scope(move || match self.outgoing.entry(addr) { + Entry::Vacant(_) => { + debug!("unknown address redeemed"); + None + } + Entry::Occupied(occupied) => match occupied.get().state { + OutgoingState::Blocked { .. } => { + self.change_outgoing_state( + addr, + OutgoingState::Connecting { + failures_so_far: 0, + since: now, + }, + ); + Some(DialRequest::Dial { addr, span }) + } + _ => { + debug!("address redemption ignored, not blocked"); + None + } + }, + }) + } + + /// Records a pong being received. + pub(super) fn record_pong(&mut self, peer_id: NodeId, pong: TaggedTimestamp) -> bool { + let addr = if let Some(addr) = self.routes.get(&peer_id) { + *addr + } else { + debug!(%peer_id, nonce=%pong.nonce(), "ignoring pong received from peer without route"); + return false; + }; + + if let Some(outgoing) = self.outgoing.get_mut(&addr) { + if let OutgoingState::Connected { ref mut health, .. } = outgoing.state { + health.record_pong(&self.config.health, pong) + } else { + debug!(%peer_id, nonce=%pong.nonce(), "ignoring pong received from peer that is not in connected state"); + false + } + } else { + debug!(%peer_id, nonce=%pong.nonce(), "ignoring pong received from peer without route"); + false + } + } + + /// Performs housekeeping like reconnection or unblocking peers. + /// + /// This function must periodically be called. A good interval is every second. + pub(super) fn perform_housekeeping( + &mut self, + rng: &mut R, + now: Instant, + ) -> Vec> { + let mut to_forget = Vec::new(); + let mut to_fail = Vec::new(); + let mut to_ping_timeout = Vec::new(); + let mut to_reconnect = Vec::new(); + let mut to_ping = Vec::new(); + + for (&addr, outgoing) in &mut self.outgoing { + // Note: `Span::in_scope` is no longer serviceable here due to borrow limitations. + let _span_guard = make_span(addr, Some(outgoing)).entered(); + + match outgoing.state { + // Decide whether to attempt reconnecting a failed-waiting address. + OutgoingState::Waiting { + failures_so_far, + last_failure, + .. + } => { + if failures_so_far > self.config.retry_attempts { + if outgoing.is_unforgettable { + // Unforgettable addresses simply have their timer reset. + info!("unforgettable address reset"); + + to_reconnect.push((addr, 0)); + } else { + // Address had too many attempts at reconnection, we will forget + // it after exiting this closure. + to_forget.push(addr); + + info!("address forgotten"); + } + } else { + // The address has not exceeded the limit, so check if it is due. + let due = last_failure + self.config.calc_backoff(failures_so_far); + if now >= due { + debug!(attempts = failures_so_far, "address reconnecting"); + + to_reconnect.push((addr, failures_so_far)); + } + } + } + + OutgoingState::Blocked { until, .. } => { + if now >= until { + info!("address unblocked"); + to_reconnect.push((addr, 0)); + } + } + + OutgoingState::Connecting { + since, + failures_so_far, + } => { + let timeout = since + self.config.sweep_timeout; + if now >= timeout { + // The outer component has not called us with a `DialOutcome` in a + // reasonable amount of time. This should happen very rarely, ideally + // never. + warn!("address timed out connecting, was swept"); + + // Count the timeout as a failure against the connection. + to_fail.push((addr, failures_so_far + 1)); + } + } + OutgoingState::Connected { + peer_id, + ref mut health, + .. + } => { + // Check if we need to send a ping, or give up and disconnect. + let health_outcome = health.update_health(rng, &self.config.health, now); + + match health_outcome { + HealthCheckOutcome::DoNothing => { + // Nothing to do. + } + HealthCheckOutcome::SendPing(nonce) => { + trace!(%nonce, "sending ping"); + to_ping.push((peer_id, addr, nonce)); + } + HealthCheckOutcome::GiveUp => { + info!("disconnecting after ping retries were exhausted"); + to_ping_timeout.push(addr); + } + } + } + OutgoingState::Loopback => { + // Entry is ignored. Not outputting any `trace` because this is log spam even at + // the `trace` level. + } + } + } + + // Remove all addresses marked for forgetting. + for addr in to_forget { + self.outgoing.remove(&addr); + } + + // Fail connections that are taking way too long to connect. + for (addr, failures_so_far) in to_fail { + let span = make_span(addr, self.outgoing.get(&addr)); + + span.in_scope(|| { + self.change_outgoing_state( + addr, + OutgoingState::Waiting { + failures_so_far, + error: None, + last_failure: now, + }, + ) + }); + } + + let mut dial_requests = Vec::new(); + + // Request disconnection from failed pings. + for addr in to_ping_timeout { + let span = make_span(addr, self.outgoing.get(&addr)); + + let (_, opt_handle) = span.clone().in_scope(|| { + self.change_outgoing_state( + addr, + OutgoingState::Connecting { + failures_so_far: 0, + since: now, + }, + ) + }); + + if let Some(handle) = opt_handle { + dial_requests.push(DialRequest::Disconnect { + handle, + span: span.clone(), + }); + } else { + error!("did not expect connection under ping timeout to not have a residual connection handle. this is a bug"); + } + dial_requests.push(DialRequest::Dial { addr, span }); + } + + // Reconnect others. + dial_requests.extend(to_reconnect.into_iter().map(|(addr, failures_so_far)| { + let span = make_span(addr, self.outgoing.get(&addr)); + + span.clone().in_scope(|| { + self.change_outgoing_state( + addr, + OutgoingState::Connecting { + failures_so_far, + since: now, + }, + ) + }); + + DialRequest::Dial { addr, span } + })); + + // Finally, schedule pings. + dial_requests.extend(to_ping.into_iter().map(|(peer_id, addr, nonce)| { + let span = make_span(addr, self.outgoing.get(&addr)); + DialRequest::SendPing { + peer_id, + nonce, + span, + } + })); + + dial_requests + } + + /// Handles the outcome of a dialing attempt. + /// + /// Note that reconnects will earliest happen on the next `perform_housekeeping` call. + pub(crate) fn handle_dial_outcome( + &mut self, + dial_outcome: DialOutcome, + ) -> Option> { + let addr = dial_outcome.addr(); + let span = make_span(addr, self.outgoing.get(&addr)); + + span.clone().in_scope(move || match dial_outcome { + DialOutcome::Successful { + addr, + handle, + node_id, + when + } => { + info!("established outgoing connection"); + + if let Some(Outgoing{ + state: OutgoingState::Blocked { .. }, .. + }) = self.outgoing.get(&addr) { + // If we connected to a blocked address, do not go into connected, but stay + // blocked instead. + Some(DialRequest::Disconnect{ + handle, span + }) + } else { + // Otherwise, just record the connected state. + self.change_outgoing_state( + addr, + OutgoingState::Connected { + peer_id: node_id, + handle, + health: ConnectionHealth::new(when), + }, + ); + None + } + } + + DialOutcome::Failed { addr, error, when } => { + info!(err = display_error(&error), "outgoing connection failed"); + + if let Some(outgoing) = self.outgoing.get(&addr) { + match outgoing.state { + OutgoingState::Connecting { failures_so_far,.. } => { + self.change_outgoing_state( + addr, + OutgoingState::Waiting { + failures_so_far: failures_so_far + 1, + error: Some(error), + last_failure: when, + }, + ); + None + } + OutgoingState::Blocked { .. } => { + debug!("failed dial outcome after block ignored"); + + // We do not set the connection to "waiting" if an out-of-order failed + // connection arrives, but continue to honor the blocking. + None + } + OutgoingState::Waiting { .. } | + OutgoingState::Connected { .. } | + OutgoingState::Loopback => { + warn!( + "processing dial outcome on a connection that was not marked as connecting or blocked" + ); + + None + } + } + } else { + warn!("processing dial outcome non-existent connection"); + + // If the connection does not exist, do not introduce it! + None + } + } + DialOutcome::Loopback { addr } => { + info!("found loopback address"); + self.change_outgoing_state(addr, OutgoingState::Loopback); + None + } + }) + } + + /// Notifies the connection manager about a dropped connection. + /// + /// This will usually result in an immediate reconnection. + pub(crate) fn handle_connection_drop( + &mut self, + addr: SocketAddr, + now: Instant, + ) -> Option> { + let span = make_span(addr, self.outgoing.get(&addr)); + + span.clone().in_scope(move || { + if let Some(outgoing) = self.outgoing.get(&addr) { + match outgoing.state { + OutgoingState::Waiting { .. } + | OutgoingState::Loopback + | OutgoingState::Connecting { .. } => { + // We should, under normal circumstances, not receive drop notifications for + // any of these. Connection failures are handled by the dialer. + warn!("unexpected drop notification"); + None + } + OutgoingState::Connected { .. } => { + // Drop the handle, immediately initiate a reconnection. + self.change_outgoing_state( + addr, + OutgoingState::Connecting { + failures_so_far: 0, + since: now, + }, + ); + Some(DialRequest::Dial { addr, span }) + } + OutgoingState::Blocked { .. } => { + // Blocked addresses ignore connection drops. + debug!("received drop notification for blocked connection"); + None + } + } + } else { + warn!("received connection drop notification for unknown connection"); + None + } + }) + } + + fn calculate_block_until(&self, now: Instant, rng: &mut R) -> Instant { + let min = self.config.unblock_after_min; + let max = self.config.unblock_after_max; + if min == max { + return now + min; + } + let block_duration = rng.gen_range(min..=max); + now + block_duration + } +} + +#[cfg(test)] +mod tests { + use std::{net::SocketAddr, time::Duration}; + + use assert_matches::assert_matches; + use datasize::DataSize; + use rand::Rng; + use thiserror::Error; + + use super::{DialOutcome, DialRequest, NodeId, OutgoingConfig, OutgoingManager}; + use crate::{ + components::network::{ + blocklist::BlocklistJustification, + health::{HealthConfig, TaggedTimestamp}, + }, + testing::{init_logging, test_clock::TestClock}, + }; + + /// Error for test dialer. + /// + /// Tracks a configurable id for the error. + #[derive(DataSize, Debug, Error)] + #[error("test dialer error({})", id)] + struct TestDialerError { + id: u32, + } + + /// Setup an outgoing configuration for testing. + fn test_config() -> OutgoingConfig { + OutgoingConfig { + retry_attempts: 3, + base_timeout: Duration::from_secs(1), + unblock_after_min: Duration::from_secs(60), + unblock_after_max: Duration::from_secs(60), + sweep_timeout: Duration::from_secs(45), + health: HealthConfig::test_config(), + } + } + + /// Setup an outgoing configuration for testing. + fn config_variant_unblock() -> OutgoingConfig { + OutgoingConfig { + retry_attempts: 3, + base_timeout: Duration::from_secs(1), + unblock_after_min: Duration::from_secs(60), + unblock_after_max: Duration::from_secs(80), + sweep_timeout: Duration::from_secs(45), + health: HealthConfig::test_config(), + } + } + + /// Helper function that checks if a given dial request actually dials the expected address. + fn dials<'a, H, T>(expected: SocketAddr, requests: T) -> bool + where + T: IntoIterator> + 'a, + H: 'a, + { + for req in requests.into_iter() { + if let DialRequest::Dial { addr, .. } = req { + if *addr == expected { + return true; + } + } + } + + false + } + + /// Helper function that checks if a given dial request actually disconnects the expected + /// address. + fn disconnects<'a, H, T>(expected: H, requests: T) -> bool + where + T: IntoIterator> + 'a, + H: 'a + PartialEq, + { + for req in requests.into_iter() { + if let DialRequest::Disconnect { handle, .. } = req { + if *handle == expected { + return true; + } + } + } + + false + } + + #[test] + fn successful_lifecycle() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + let id_a = NodeId::random(&mut rng); + + let mut manager = OutgoingManager::::new(test_config()); + + // We begin by learning a single, regular address, triggering a dial request. + assert!(dials( + addr_a, + &manager.learn_addr(addr_a, false, clock.now()) + )); + assert_eq!(manager.metrics().out_state_connecting.get(), 1); + + // Our first connection attempt fails. The connection should now be in waiting state, but + // not reconnect, since the minimum delay is 2 seconds (2*base_timeout). + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_a, + error: TestDialerError { id: 1 }, + when: clock.now(), + },) + .is_none()); + assert_eq!(manager.metrics().out_state_connecting.get(), 0); + assert_eq!(manager.metrics().out_state_waiting.get(), 1); + + // Performing housekeeping multiple times should not make a difference. + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Advancing the clock will trigger a reconnection on the next housekeeping. + clock.advance_time(2_000); + assert!(dials( + addr_a, + &manager.perform_housekeeping(&mut rng, clock.now()) + )); + assert_eq!(manager.metrics().out_state_connecting.get(), 1); + assert_eq!(manager.metrics().out_state_waiting.get(), 0); + + // This time the connection succeeds. + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr: addr_a, + handle: 99, + node_id: id_a, + when: clock.now(), + },) + .is_none()); + assert_eq!(manager.metrics().out_state_connecting.get(), 0); + assert_eq!(manager.metrics().out_state_connected.get(), 1); + + // The routing table should have been updated and should return the handle. + assert_eq!(manager.get_route(id_a), Some(&99)); + assert_eq!(manager.get_addr(id_a), Some(addr_a)); + + // Time passes, and our connection drops. Reconnecting should be immediate. + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + clock.advance_time(20_000); + assert!(dials( + addr_a, + &manager.handle_connection_drop(addr_a, clock.now()) + )); + assert_eq!(manager.metrics().out_state_connecting.get(), 1); + assert_eq!(manager.metrics().out_state_waiting.get(), 0); + + // The route should have been cleared. + assert!(manager.get_route(id_a).is_none()); + assert!(manager.get_addr(id_a).is_none()); + + // Reconnection is already in progress, so we do not expect another request on housekeeping. + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + } + + #[test] + fn connections_forgotten_after_too_many_tries() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + // Address `addr_b` will be a known address. + let addr_b: SocketAddr = "5.6.7.8:5678".parse().unwrap(); + + let mut manager = OutgoingManager::::new(test_config()); + + // First, attempt to connect. Tests are set to 3 retries after 2, 4 and 8 seconds. + assert!(dials( + addr_a, + &manager.learn_addr(addr_a, false, clock.now()) + )); + assert!(dials( + addr_b, + &manager.learn_addr(addr_b, true, clock.now()) + )); + + // Fail the first connection attempts, not triggering a retry (timeout not reached yet). + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_a, + error: TestDialerError { id: 10 }, + when: clock.now(), + },) + .is_none()); + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_b, + error: TestDialerError { id: 11 }, + when: clock.now(), + },) + .is_none()); + + // Learning the address again should not cause a reconnection. + assert!(manager.learn_addr(addr_a, false, clock.now()).is_none()); + assert!(manager.learn_addr(addr_b, false, clock.now()).is_none()); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + assert!(manager.learn_addr(addr_a, false, clock.now()).is_none()); + assert!(manager.learn_addr(addr_b, false, clock.now()).is_none()); + + // After 1.999 seconds, reconnection should still be delayed. + clock.advance_time(1_999); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Adding 0.001 seconds finally is enough to reconnect. + clock.advance_time(1); + let requests = manager.perform_housekeeping(&mut rng, clock.now()); + assert!(dials(addr_a, &requests)); + assert!(dials(addr_b, &requests)); + + // Waiting for more than the reconnection delay should not be harmful or change + // anything, as we are currently connecting. + clock.advance_time(6_000); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Fail the connection again, wait 3.999 seconds, expecting no reconnection. + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_a, + error: TestDialerError { id: 40 }, + when: clock.now(), + },) + .is_none()); + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_b, + error: TestDialerError { id: 41 }, + when: clock.now(), + },) + .is_none()); + + clock.advance_time(3_999); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Adding 0.001 seconds finally again pushes us over the threshold. + clock.advance_time(1); + let requests = manager.perform_housekeeping(&mut rng, clock.now()); + assert!(dials(addr_a, &requests)); + assert!(dials(addr_b, &requests)); + + // Fail the connection quickly. + clock.advance_time(25); + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_a, + error: TestDialerError { id: 10 }, + when: clock.now(), + },) + .is_none()); + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_b, + error: TestDialerError { id: 10 }, + when: clock.now(), + },) + .is_none()); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // The last attempt should happen 8 seconds after the error, not the last attempt. + clock.advance_time(7_999); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + clock.advance_time(1); + let requests = manager.perform_housekeeping(&mut rng, clock.now()); + assert!(dials(addr_a, &requests)); + assert!(dials(addr_b, &requests)); + + // Fail the last attempt. No more reconnections should be happening. + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_a, + error: TestDialerError { id: 10 }, + when: clock.now(), + },) + .is_none()); + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_b, + error: TestDialerError { id: 10 }, + when: clock.now(), + },) + .is_none()); + + // Only the unforgettable address should be reconnecting. + let requests = manager.perform_housekeeping(&mut rng, clock.now()); + assert!(!dials(addr_a, &requests)); + assert!(dials(addr_b, &requests)); + + // But not `addr_a`, even after a long wait. + clock.advance_time(1_000_000_000); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + } + + #[test] + fn blocking_works() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + // We use `addr_b` as an unforgettable address, which does not mean it cannot be blocked! + let addr_b: SocketAddr = "5.6.7.8:5678".parse().unwrap(); + let addr_c: SocketAddr = "9.0.1.2:9012".parse().unwrap(); + let id_a = NodeId::random(&mut rng); + let id_b = NodeId::random(&mut rng); + let id_c = NodeId::random(&mut rng); + + let mut manager = OutgoingManager::::new(test_config()); + + // Block `addr_a` from the start. + assert!(manager + .block_addr( + addr_a, + clock.now(), + BlocklistJustification::MissingChainspecHash, + &mut rng, + ) + .is_none()); + + // Learning both `addr_a` and `addr_b` should only trigger a connection to `addr_b` now. + assert!(manager.learn_addr(addr_a, false, clock.now()).is_none()); + assert!(dials( + addr_b, + &manager.learn_addr(addr_b, true, clock.now()) + )); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Fifteen seconds later we succeed in connecting to `addr_b`. + clock.advance_time(15_000); + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr: addr_b, + handle: 101, + node_id: id_b, + when: clock.now(), + },) + .is_none()); + assert_eq!(manager.get_route(id_b), Some(&101)); + + // Invariant through housekeeping. + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + assert_eq!(manager.get_route(id_b), Some(&101)); + + // Another fifteen seconds later, we block `addr_b`. + clock.advance_time(15_000); + assert!(disconnects( + 101, + &manager.block_addr( + addr_b, + clock.now(), + BlocklistJustification::MissingChainspecHash, + &mut rng, + ) + )); + + // `addr_c` will be blocked during the connection phase. + assert!(dials( + addr_c, + &manager.learn_addr(addr_c, false, clock.now()) + )); + assert!(manager + .block_addr( + addr_c, + clock.now(), + BlocklistJustification::MissingChainspecHash, + &mut rng, + ) + .is_none()); + + // We are still expect to provide a dial outcome, but afterwards, there should be no + // route to C and an immediate disconnection should be queued. + assert!(disconnects( + 42, + &manager.handle_dial_outcome(DialOutcome::Successful { + addr: addr_c, + handle: 42, + node_id: id_c, + when: clock.now(), + },) + )); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + assert!(manager.get_route(id_c).is_none()); + + // At this point, we have blocked all three addresses. 30 seconds later, the first one is + // unblocked due to the block timing out. + + clock.advance_time(30_000); + assert!(dials( + addr_a, + &manager.perform_housekeeping(&mut rng, clock.now()) + )); + + // Fifteen seconds later, B and C are still blocked, but we redeem B early. + clock.advance_time(15_000); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + assert!(dials(addr_b, &manager.redeem_addr(addr_b, clock.now()))); + + // Succeed both connections, and ensure we have routes to both. + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr: addr_b, + handle: 77, + node_id: id_b, + when: clock.now(), + },) + .is_none()); + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr: addr_a, + handle: 66, + node_id: id_a, + when: clock.now(), + },) + .is_none()); + + assert_eq!(manager.get_route(id_a), Some(&66)); + assert_eq!(manager.get_route(id_b), Some(&77)); + } + + #[test] + fn loopback_handled_correctly() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let loopback_addr: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + + let mut manager = OutgoingManager::::new(test_config()); + + // Loopback addresses are connected to only once, and then marked as loopback forever. + assert!(dials( + loopback_addr, + &manager.learn_addr(loopback_addr, false, clock.now()) + )); + + assert!(manager + .handle_dial_outcome(DialOutcome::Loopback { + addr: loopback_addr, + },) + .is_none()); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Learning loopbacks again should not trigger another connection + assert!(manager + .learn_addr(loopback_addr, false, clock.now()) + .is_none()); + + // Blocking loopbacks does not result in a block, since regular blocks would clear after + // some time. + assert!(manager + .block_addr( + loopback_addr, + clock.now(), + BlocklistJustification::MissingChainspecHash, + &mut rng, + ) + .is_none()); + + clock.advance_time(1_000_000_000); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + } + + #[test] + fn connected_peers_works() { + init_logging(); + + let mut rng = crate::new_rng(); + let clock = TestClock::new(); + + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + let addr_b: SocketAddr = "5.6.7.8:5678".parse().unwrap(); + + let id_a = NodeId::random(&mut rng); + let id_b = NodeId::random(&mut rng); + + let mut manager = OutgoingManager::::new(test_config()); + + manager.learn_addr(addr_a, false, clock.now()); + manager.learn_addr(addr_b, true, clock.now()); + + manager.handle_dial_outcome(DialOutcome::Successful { + addr: addr_a, + handle: 22, + node_id: id_a, + when: clock.now(), + }); + manager.handle_dial_outcome(DialOutcome::Successful { + addr: addr_b, + handle: 33, + node_id: id_b, + when: clock.now(), + }); + + let mut peer_ids: Vec<_> = manager.connected_peers().collect(); + let mut expected = vec![id_a, id_b]; + + peer_ids.sort(); + expected.sort(); + + assert_eq!(peer_ids, expected); + } + + #[test] + fn sweeping_works() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + + let id_a = NodeId::random(&mut rng); + + let mut manager = OutgoingManager::::new(test_config()); + + // Trigger a new connection via learning an address. + assert!(dials( + addr_a, + &manager.learn_addr(addr_a, false, clock.now()) + )); + + // We now let enough time pass to cause the connection to be considered failed aborted. + // No effects are expected at this point. + clock.advance_time(50_000); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // The connection will now experience a regular failure. Since this is the first connection + // failure, it should reconnect after 2 seconds. + clock.advance_time(2_000); + assert!(dials( + addr_a, + &manager.perform_housekeeping(&mut rng, clock.now()) + )); + + // We now simulate the second connection (`handle: 2`) succeeding first, after 1 second. + clock.advance_time(1_000); + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr: addr_a, + handle: 2, + node_id: id_a, + when: clock.now(), + }) + .is_none()); + + // A route should now be established. + assert_eq!(manager.get_route(id_a), Some(&2)); + + // More time passes and the first connection attempt finally finishes. + clock.advance_time(30_000); + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr: addr_a, + handle: 1, + node_id: id_a, + when: clock.now(), + }) + .is_none()); + + // We now expect to be connected through the first connection (see documentation). + assert_eq!(manager.get_route(id_a), Some(&1)); + } + + #[test] + fn blocking_not_overridden_by_racing_failed_connections() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + + let mut manager = OutgoingManager::::new(test_config()); + + assert!(!manager.is_blocked(addr_a)); + + // Block `addr_a` from the start. + assert!(manager + .block_addr( + addr_a, + clock.now(), + BlocklistJustification::MissingChainspecHash, + &mut rng, + ) + .is_none()); + assert!(manager.is_blocked(addr_a)); + + clock.advance_time(60); + + // Receive an "illegal" dial outcome, even though we did not dial. + assert!(manager + .handle_dial_outcome(DialOutcome::Failed { + addr: addr_a, + error: TestDialerError { id: 12345 }, + + // The moment the connection attempt failed. + when: clock.now(), + }) + .is_none()); + + // The failed connection should _not_ have reset the block! + assert!(manager.is_blocked(addr_a)); + clock.advance_time(60); + assert!(manager.is_blocked(addr_a)); + + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + assert!(manager.is_blocked(addr_a)); + } + + #[test] + fn emits_and_accepts_pings() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + let id = NodeId::random(&mut rng); + + // Setup a connection and put it into the connected state. + let mut manager = OutgoingManager::::new(test_config()); + + // Trigger a new connection via learning an address. + assert!(dials(addr, &manager.learn_addr(addr, false, clock.now()))); + + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr, + handle: 1, + node_id: id, + when: clock.now(), + }) + .is_none()); + + // Initial housekeeping should do nothing. + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + // Go through 50 pings, which should be happening every 5 seconds. + for _ in 0..50 { + clock.advance(Duration::from_secs(3)); + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + clock.advance(Duration::from_secs(2)); + + let (_first_nonce, peer_id) = assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::SendPing { nonce, peer_id, .. }] => (nonce, peer_id) + ); + assert_eq!(peer_id, id); + + // After a second, nothing should have changed. + assert!(manager + .perform_housekeeping(&mut rng, clock.now()) + .is_empty()); + + clock.advance(Duration::from_secs(1)); + // Waiting another second (two in total) should trigger another ping. + clock.advance(Duration::from_secs(1)); + + let (second_nonce, peer_id) = assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::SendPing { nonce, peer_id, .. }] => (nonce, peer_id) + ); + + // Ensure the ID is correct. + assert_eq!(peer_id, id); + + // Pong arrives 1 second later. + clock.advance(Duration::from_secs(1)); + + // We now feed back the ping with the correct nonce. This should not result in a ban. + assert!(!manager.record_pong( + peer_id, + TaggedTimestamp::from_parts(clock.now(), second_nonce), + )); + + // This resets the "cycle", the next ping is due in 5 seconds. + } + + // Now we are going to miss 4 pings in a row and expect a disconnect. + clock.advance(Duration::from_secs(5)); + assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::SendPing { .. }] + ); + clock.advance(Duration::from_secs(2)); + assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::SendPing { .. }] + ); + clock.advance(Duration::from_secs(2)); + assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::SendPing { .. }] + ); + clock.advance(Duration::from_secs(2)); + assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::SendPing { .. }] + ); + + // This results in a disconnect, followed by a reconnect. + clock.advance(Duration::from_secs(2)); + let dial_addr = assert_matches!( + manager + .perform_housekeeping(&mut rng, clock.now()) + .as_slice(), + &[DialRequest::Disconnect { .. }, DialRequest::Dial { addr, .. }] => addr + ); + + assert_eq!(dial_addr, addr); + } + + #[test] + fn indicates_issue_when_excessive_pongs_are_encountered() { + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + + let addr: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + let id = NodeId::random(&mut rng); + + // Ensure we have one connected node. + let mut manager = OutgoingManager::::new(test_config()); + + assert!(dials(addr, &manager.learn_addr(addr, false, clock.now()))); + assert!(manager + .handle_dial_outcome(DialOutcome::Successful { + addr, + handle: 1, + node_id: id, + when: clock.now(), + }) + .is_none()); + + clock.advance(Duration::from_millis(50)); + + // We can now receive excessive pongs. + assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(!manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + assert!(manager.record_pong(id, TaggedTimestamp::from_parts(clock.now(), rng.gen()))); + } + + #[test] + fn unblocking_in_variant_block_time() { + init_logging(); + + let mut rng = crate::new_rng(); + let mut clock = TestClock::new(); + let addr_a: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + let mut manager = OutgoingManager::::new(config_variant_unblock()); + + assert!(!manager.is_blocked(addr_a)); + + // Block `addr_a` from the start. + assert!(manager + .block_addr( + addr_a, + clock.now(), + BlocklistJustification::MissingChainspecHash, + &mut rng, + ) + .is_none()); + assert!(manager.is_blocked(addr_a)); + + clock.advance_time(config_variant_unblock().unblock_after_max.as_millis() as u64 + 1); + assert!(dials( + addr_a, + &manager.perform_housekeeping(&mut rng, clock.now()) + )); + assert!(!manager.is_blocked(addr_a)); + } +} diff --git a/node/src/components/network/peer_discovery.rs b/node/src/components/network/peer_discovery.rs deleted file mode 100644 index 93f4625767..0000000000 --- a/node/src/components/network/peer_discovery.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! This module is home to types/functions related to using libp2p's `Kademlia` and `Identify` -//! behaviors, used for peer discovery. - -use libp2p::{ - core::{ProtocolName, PublicKey}, - identify::Identify, - kad::{ - record::store::{MemoryStore, MemoryStoreConfig}, - Kademlia, KademliaConfig, - }, - PeerId, -}; - -use super::{Config, ProtocolId}; -use crate::types::Chainspec; - -/// The inner portion of the `ProtocolId` for the kademlia behavior. A standard prefix and suffix -/// will be applied to create the full protocol name. -const KADEMLIA_PROTOCOL_NAME_INNER: &str = "kademlia-peer-discovery"; - -/// Constructs new libp2p kademlia and identify behaviors suitable for peer-discovery. -pub(super) fn new_behaviors( - config: &Config, - chainspec: &Chainspec, - our_public_key: PublicKey, -) -> (Kademlia, Identify) { - let our_peer_id = PeerId::from(our_public_key.clone()); - - // We don't intend to actually store anything in the Kademlia DHT, so configure accordingly. - let memory_store_config = MemoryStoreConfig { - max_records: 0, - max_value_bytes: 0, - ..Default::default() - }; - let memory_store = MemoryStore::with_config(our_peer_id, memory_store_config); - - let protocol_id = ProtocolId::new(chainspec, KADEMLIA_PROTOCOL_NAME_INNER); - let mut kademlia_config = KademliaConfig::default(); - kademlia_config - .set_protocol_name(protocol_id.protocol_name().to_vec()) - // Require iterative queries to use disjoint paths for increased security. - .disjoint_query_paths(true) - // Closes the connection if it's idle for this amount of time. - .set_connection_idle_timeout(config.connection_keep_alive.into()); - let kademlia = Kademlia::with_config(our_peer_id, memory_store, kademlia_config); - - // Protocol version and agent version are separate to the protocol ID for the Identify behavior. - // See https://github.com/libp2p/specs/tree/master/identify for further details. - let protocol_version = format!("/casper/{}", chainspec.protocol_config.version); - let agent_version = format!("{}/{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); - let identify = Identify::new(protocol_version, agent_version, our_public_key); - - (kademlia, identify) -} diff --git a/node/src/components/network/protocol_id.rs b/node/src/components/network/protocol_id.rs deleted file mode 100644 index 1d8b907911..0000000000 --- a/node/src/components/network/protocol_id.rs +++ /dev/null @@ -1,36 +0,0 @@ -use libp2p::core::ProtocolName; - -use crate::types::Chainspec; - -/// The max length of protocol ID supported by libp2p. See -/// https://docs.rs/libp2p/0.22.0/libp2p/core/trait.ProtocolName.html#tymethod.protocol_name -const MAX_PROTOCOL_ID_LENGTH: usize = 140; - -/// A protocol ID. -#[derive(Clone, Debug)] -pub(super) struct ProtocolId { - id: String, -} - -impl ProtocolId { - pub(super) fn new(chainspec: &Chainspec, name: &str) -> Self { - let id = format!( - "/casper/{}/{}/{}", - chainspec.network_config.name, name, chainspec.protocol_config.version - ); - - assert!( - id.as_bytes().len() <= MAX_PROTOCOL_ID_LENGTH, - "Protocol IDs must not exceed {} bytes in length", - MAX_PROTOCOL_ID_LENGTH - ); - - ProtocolId { id } - } -} - -impl ProtocolName for ProtocolId { - fn protocol_name(&self) -> &[u8] { - self.id.as_bytes() - } -} diff --git a/node/src/components/network/symmetry.rs b/node/src/components/network/symmetry.rs new file mode 100644 index 0000000000..37433fd24a --- /dev/null +++ b/node/src/components/network/symmetry.rs @@ -0,0 +1,300 @@ +//! Connection symmetry management. +//! +//! Tracks the state of connections, which may be uni- or bi-directional, depending on whether a +//! peer has connected back to us. Asymmetric connections are usually removed periodically. + +use std::{collections::BTreeSet, mem, net::SocketAddr, time::Instant}; + +use datasize::DataSize; +use tracing::{debug, warn}; + +/// Describes whether a connection is uni- or bi-directional. +#[derive(DataSize, Debug, Default)] +pub(super) enum ConnectionSymmetry { + /// We have only seen an incoming connection. + IncomingOnly { + /// Time this connection remained incoming only. + since: Instant, + /// The outgoing address of the peer that is connected to us. + peer_addrs: BTreeSet, + }, + /// We have only seen an outgoing connection. + OutgoingOnly { + /// Time this connection remained outgoing only. + since: Instant, + }, + /// The connection is fully symmetric. + Symmetric { + /// The outgoing address on the peer that is connected to us. + peer_addrs: BTreeSet, + }, + /// The connection is invalid/missing and should be removed. + #[default] + Gone, +} + +impl ConnectionSymmetry { + /// A new incoming connection has been registered. + /// + /// Returns true, if the connection achieved symmetry with this change. + pub(super) fn add_incoming(&mut self, peer_addr: SocketAddr, since: Instant) -> bool { + match self { + ConnectionSymmetry::IncomingOnly { + ref mut peer_addrs, .. + } => { + // Already incoming connection, just add it to the pile. + peer_addrs.insert(peer_addr); + debug!( + total_incoming_count = peer_addrs.len(), + "added additional incoming connection on non-symmetric" + ); + false + } + ConnectionSymmetry::OutgoingOnly { .. } => { + // Outgoing graduates to Symmetric when we receive an incoming connection. + let mut peer_addrs = BTreeSet::new(); + peer_addrs.insert(peer_addr); + *self = ConnectionSymmetry::Symmetric { peer_addrs }; + debug!("added incoming connection, now symmetric"); + true + } + ConnectionSymmetry::Symmetric { peer_addrs } => { + // Just record an additional incoming connection. + peer_addrs.insert(peer_addr); + debug!( + total_incoming_count = peer_addrs.len(), + "added additional incoming connection on symmetric" + ); + false + } + ConnectionSymmetry::Gone => { + let mut peer_addrs = BTreeSet::new(); + peer_addrs.insert(peer_addr); + *self = ConnectionSymmetry::IncomingOnly { peer_addrs, since }; + debug!("added incoming connection, now incoming only"); + false + } + } + } + + /// An incoming address has been removed. + /// + /// Returns `false` if the `ConnectionSymmetry` should be removed after this. + pub(super) fn remove_incoming(&mut self, peer_addr: SocketAddr, now: Instant) -> bool { + match self { + ConnectionSymmetry::IncomingOnly { peer_addrs, .. } => { + // Remove the incoming connection, warn if it didn't exist. + if !peer_addrs.remove(&peer_addr) { + warn!("tried to remove non-existent incoming connection from symmetry"); + } + + // Indicate removal if this was the last incoming connection. + if peer_addrs.is_empty() { + *self = ConnectionSymmetry::Gone; + debug!("removed incoming connection, now gone"); + + false + } else { + debug!( + total_incoming_count = peer_addrs.len(), + "removed incoming connection, still has remaining incoming" + ); + + true + } + } + ConnectionSymmetry::OutgoingOnly { .. } => { + warn!("cannot remove incoming connection from outgoing-only"); + true + } + ConnectionSymmetry::Symmetric { peer_addrs } => { + if !peer_addrs.remove(&peer_addr) { + warn!("tried to remove non-existent symmetric connection from symmetry"); + } + if peer_addrs.is_empty() { + *self = ConnectionSymmetry::OutgoingOnly { since: now }; + debug!("removed incoming connection, now incoming-only"); + } + true + } + ConnectionSymmetry::Gone => { + // This is just an error. + warn!("removing incoming connection from already gone symmetry"); + false + } + } + } + + /// Marks a connection as having an outgoing connection. + /// + /// Returns true, if the connection achieved symmetry with this change. + pub(super) fn mark_outgoing(&mut self, now: Instant) -> bool { + match self { + ConnectionSymmetry::IncomingOnly { peer_addrs, .. } => { + // Connection is now complete. + debug!("incoming connection marked outgoing, now complete"); + *self = ConnectionSymmetry::Symmetric { + peer_addrs: mem::take(peer_addrs), + }; + true + } + ConnectionSymmetry::OutgoingOnly { .. } => { + warn!("outgoing connection marked outgoing"); + false + } + ConnectionSymmetry::Symmetric { .. } => { + warn!("symmetric connection marked outgoing"); + false + } + ConnectionSymmetry::Gone => { + *self = ConnectionSymmetry::OutgoingOnly { since: now }; + debug!("absent connection marked outgoing"); + false + } + } + } + + /// Unmarks a connection as having an outgoing connection. + /// + /// Returns `false` if the `ConnectionSymmetry` should be removed after this. + pub(super) fn unmark_outgoing(&mut self, now: Instant) -> bool { + match self { + ConnectionSymmetry::IncomingOnly { .. } => { + warn!("incoming-only unmarked outgoing"); + true + } + ConnectionSymmetry::OutgoingOnly { .. } => { + // With neither incoming, nor outgoing connections, the symmetry is finally gone. + *self = ConnectionSymmetry::Gone; + debug!("outgoing connection unmarked, now gone"); + + false + } + ConnectionSymmetry::Symmetric { peer_addrs } => { + *self = ConnectionSymmetry::IncomingOnly { + peer_addrs: mem::take(peer_addrs), + since: now, + }; + debug!("symmetric connection unmarked, now outgoing only"); + + true + } + ConnectionSymmetry::Gone => { + warn!("gone marked outgoing"); + false + } + } + } + + /// Returns the set of incoming addresses, if any. + pub(super) fn incoming_addrs(&self) -> Option<&BTreeSet> { + match self { + ConnectionSymmetry::IncomingOnly { peer_addrs, .. } + | ConnectionSymmetry::Symmetric { peer_addrs, .. } => Some(peer_addrs), + ConnectionSymmetry::OutgoingOnly { .. } | ConnectionSymmetry::Gone => None, + } + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::BTreeSet, + net::SocketAddr, + time::{Duration, Instant}, + }; + + use crate::testing::test_clock::TestClock; + + use super::ConnectionSymmetry; + + /// Indicates whether or not a connection should be cleaned up. + fn should_be_reaped( + connection_symmetry: &ConnectionSymmetry, + now: Instant, + max_time_asymmetric: Duration, + ) -> bool { + match connection_symmetry { + ConnectionSymmetry::IncomingOnly { since, .. } => now >= *since + max_time_asymmetric, + ConnectionSymmetry::OutgoingOnly { since } => now >= *since + max_time_asymmetric, + ConnectionSymmetry::Symmetric { .. } => false, + ConnectionSymmetry::Gone => true, + } + } + + #[test] + fn symmetry_successful_lifecycles() { + let mut clock = TestClock::new(); + + let max_time_asymmetric = Duration::from_secs(240); + let peer_addr: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + + let mut sym = ConnectionSymmetry::default(); + + // Symmetries that have just been initialized are always reaped instantly. + assert!(should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + + // Adding an incoming address. + sym.add_incoming(peer_addr, clock.now()); + assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + + // Add an outgoing address. + clock.advance(Duration::from_secs(20)); + sym.mark_outgoing(clock.now()); + + // The connection will now never be reaped, as it is symmetrical. + clock.advance(Duration::from_secs(1_000_000)); + assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + } + + #[test] + fn symmetry_lifecycle_reaps_incoming_only() { + let mut clock = TestClock::new(); + + let max_time_asymmetric = Duration::from_secs(240); + let peer_addr: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + let peer_addr2: SocketAddr = "1.2.3.4:1234".parse().unwrap(); + + let mut sym = ConnectionSymmetry::default(); + + // Adding an incoming address prevents it from being reaped. + sym.add_incoming(peer_addr, clock.now()); + assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + + // Adding another incoming address does not change the timeout. + clock.advance(Duration::from_secs(120)); + sym.add_incoming(peer_addr2, clock.now()); + assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + + // We also expected `peer_addr` and `peer_addr2` to be the incoming addresses now. + let mut expected = BTreeSet::new(); + expected.insert(peer_addr); + expected.insert(peer_addr2); + assert_eq!(sym.incoming_addrs(), Some(&expected)); + + // After 240 seconds since the first incoming connection, we finally are due reaping. + clock.advance(Duration::from_secs(120)); + assert!(should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + } + + #[test] + fn symmetry_lifecycle_reaps_outgoing_only() { + let mut clock = TestClock::new(); + + let max_time_asymmetric = Duration::from_secs(240); + + let mut sym = ConnectionSymmetry::default(); + + // Mark as outgoing, to prevent reaping. + sym.mark_outgoing(clock.now()); + assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + + // Marking as outgoing again is usually an error, but should not affect the timeout. + clock.advance(Duration::from_secs(120)); + assert!(!should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + + // After 240 seconds we finally are reaping. + clock.advance(Duration::from_secs(120)); + assert!(should_be_reaped(&sym, clock.now(), max_time_asymmetric)); + } +} diff --git a/node/src/components/network/tasks.rs b/node/src/components/network/tasks.rs new file mode 100644 index 0000000000..9700ddbbb6 --- /dev/null +++ b/node/src/components/network/tasks.rs @@ -0,0 +1,856 @@ +//! Tasks run by the component. + +use std::{ + error::Error as StdError, + fmt::Display, + io, + net::SocketAddr, + pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, Weak, + }, + time::Duration, +}; + +use bincode::Options; +use futures::{ + future::{self, Either}, + stream::{SplitSink, SplitStream}, + Future, SinkExt, StreamExt, +}; +use openssl::{ + pkey::{PKey, Private}, + ssl::Ssl, + x509::X509, +}; +use prometheus::IntGauge; +use rand::Rng; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use tokio::{ + net::TcpStream, + sync::{mpsc::UnboundedReceiver, watch, Semaphore}, +}; +use tokio_openssl::SslStream; +use tokio_serde::{Deserializer, Serializer}; +use tracing::{ + debug, error, error_span, + field::{self, Empty}, + info, trace, warn, Instrument, Span, +}; + +use casper_types::{ProtocolVersion, PublicKey, TimeDiff}; + +use super::{ + chain_info::ChainInfo, + counting_format::{ConnectionId, Role}, + error::{ConnectionError, IoError}, + event::{IncomingConnection, OutgoingConnection}, + full_transport, + limiter::LimiterHandle, + message::NodeKeyPair, + message_pack_format::MessagePackFormat, + EstimatorWeights, Event, FramedTransport, FullTransport, Identity, Message, Metrics, Payload, + Transport, +}; +use crate::{ + components::network::{framed_transport, BincodeFormat, Config, FromIncoming}, + effect::{ + announcements::PeerBehaviorAnnouncement, requests::NetworkRequest, AutoClosingResponder, + EffectBuilder, + }, + reactor::{EventQueueHandle, QueueKind}, + tls::{self, TlsCert, ValidationError}, + types::NodeId, + utils::display_error, +}; + +/// An item on the internal outgoing message queue. +/// +/// Contains a reference counted message and an optional responder to call once the message has been +/// successfully handed over to the kernel for sending. +pub(super) type MessageQueueItem

= (Arc>, Option>); + +/// The outcome of the handshake process. +struct HandshakeOutcome { + /// A framed transport for peer. + framed_transport: FramedTransport, + /// Public address advertised by the peer. + public_addr: SocketAddr, + /// The public key the peer is validating with, if any. + peer_consensus_public_key: Option, + /// Holds the information whether the remote node is syncing. + is_peer_syncing: bool, +} + +/// Low-level TLS connection function. +/// +/// Performs the actual TCP+TLS connection setup. +async fn tls_connect( + context: &NetworkContext, + peer_addr: SocketAddr, +) -> Result<(NodeId, Transport), ConnectionError> +where + REv: 'static, +{ + let stream = TcpStream::connect(peer_addr) + .await + .map_err(ConnectionError::TcpConnection)?; + + stream + .set_nodelay(true) + .map_err(ConnectionError::TcpNoDelay)?; + + let mut transport = tls::create_tls_connector(context.our_cert.as_x509(), &context.secret_key) + .and_then(|connector| connector.configure()) + .and_then(|mut config| { + config.set_verify_hostname(false); + config.into_ssl("this-will-not-be-checked.example.com") + }) + .and_then(|ssl| SslStream::new(ssl, stream)) + .map_err(ConnectionError::TlsInitialization)?; + + SslStream::connect(Pin::new(&mut transport)) + .await + .map_err(ConnectionError::TlsHandshake)?; + + let peer_cert = transport + .ssl() + .peer_certificate() + .ok_or(ConnectionError::NoPeerCertificate)?; + + let validated_peer_cert = context + .validate_peer_cert(peer_cert) + .map_err(ConnectionError::PeerCertificateInvalid)?; + + let peer_id = NodeId::from(validated_peer_cert.public_key_fingerprint()); + + Ok((peer_id, transport)) +} + +/// Initiates a TLS connection to a remote address. +pub(super) async fn connect_outgoing( + context: Arc>, + peer_addr: SocketAddr, +) -> OutgoingConnection

+where + REv: 'static, + P: Payload, +{ + let (peer_id, transport) = match tls_connect(&context, peer_addr).await { + Ok(value) => value, + Err(error) => return OutgoingConnection::FailedEarly { peer_addr, error }, + }; + + // Register the `peer_id` on the [`Span`]. + Span::current().record("peer_id", field::display(peer_id)); + + if peer_id == context.our_id { + info!("outgoing loopback connection"); + return OutgoingConnection::Loopback { peer_addr }; + } + + debug!("Outgoing TLS connection established"); + + // Setup connection id and framed transport. + let connection_id = ConnectionId::from_connection(transport.ssl(), context.our_id, peer_id); + let framed_transport = framed_transport(transport, context.chain_info.maximum_net_message_size); + + // Negotiate the handshake, concluding the incoming connection process. + match negotiate_handshake::(&context, framed_transport, connection_id).await { + Ok(HandshakeOutcome { + framed_transport, + public_addr, + peer_consensus_public_key, + is_peer_syncing: is_syncing, + }) => { + if let Some(ref public_key) = peer_consensus_public_key { + Span::current().record("consensus_key", field::display(public_key)); + } + + if public_addr != peer_addr { + // We don't need the `public_addr`, as we already connected, but warn anyway. + warn!(%public_addr, %peer_addr, "peer advertises a different public address than what we connected to"); + } + + // Setup full framed transport, then close down receiving end of the transport. + let full_transport = full_transport::

( + context.net_metrics.clone(), + connection_id, + framed_transport, + Role::Dialer, + ); + let (sink, _stream) = full_transport.split(); + + OutgoingConnection::Established { + peer_addr, + peer_id, + peer_consensus_public_key, + sink, + is_syncing, + } + } + Err(error) => OutgoingConnection::Failed { + peer_addr, + peer_id, + error, + }, + } +} + +/// A context holding all relevant information for networking communication shared across tasks. +pub(crate) struct NetworkContext +where + REv: 'static, +{ + /// The handle to the reactor's event queue, used by incoming message handlers to put events + /// onto the queue. + event_queue: Option>, + /// Our own [`NodeId`]. + our_id: NodeId, + /// TLS certificate associated with this node's identity. + our_cert: Arc, + /// TLS certificate authority associated with this node's identity. + network_ca: Option>, + /// Secret key associated with `our_cert`. + secret_key: Arc>, + /// Weak reference to the networking metrics shared by all sender/receiver tasks. + net_metrics: Weak, + /// Chain info extract from chainspec. + chain_info: ChainInfo, + /// Optional set of signing keys, to identify as a node during handshake. + node_key_pair: Option, + /// Our own public listening address. + public_addr: Option, + /// Timeout for handshake completion. + handshake_timeout: TimeDiff, + /// Weights to estimate payloads with. + payload_weights: EstimatorWeights, + /// The protocol version at which (or under) tarpitting is enabled. + tarpit_version_threshold: Option, + /// If tarpitting is enabled, duration for which connections should be kept open. + tarpit_duration: TimeDiff, + /// The chance, expressed as a number between 0.0 and 1.0, of triggering the tarpit. + tarpit_chance: f32, + /// Maximum number of demands allowed to be running at once. If 0, no limit is enforced. + max_in_flight_demands: usize, + /// Flag indicating whether this node is syncing. + is_syncing: AtomicBool, + /// If false, will not allow handshake. + allow_handshake: bool, +} + +impl NetworkContext { + pub(super) fn new( + cfg: &Config, + our_identity: Identity, + node_key_pair: Option, + chain_info: ChainInfo, + net_metrics: &Arc, + allow_handshake: bool, + ) -> Self { + // Set the demand max from configuration, regarding `0` as "unlimited". + let max_in_flight_demands = if cfg.max_in_flight_demands == 0 { + usize::MAX + } else { + cfg.max_in_flight_demands as usize + }; + + let Identity { + secret_key, + tls_certificate, + network_ca, + } = our_identity; + let our_id = NodeId::from(tls_certificate.public_key_fingerprint()); + + NetworkContext { + our_id, + public_addr: None, + event_queue: None, + our_cert: tls_certificate, + network_ca, + secret_key, + net_metrics: Arc::downgrade(net_metrics), + chain_info, + node_key_pair, + handshake_timeout: cfg.handshake_timeout, + payload_weights: cfg.estimator_weights.clone(), + tarpit_version_threshold: cfg.tarpit_version_threshold, + tarpit_duration: cfg.tarpit_duration, + tarpit_chance: cfg.tarpit_chance, + max_in_flight_demands, + is_syncing: AtomicBool::new(false), + allow_handshake, + } + } + + pub(super) fn initialize( + &mut self, + our_public_addr: SocketAddr, + event_queue: EventQueueHandle, + ) { + self.public_addr = Some(our_public_addr); + self.event_queue = Some(event_queue); + } + + /// Our own [`NodeId`]. + pub(super) fn our_id(&self) -> NodeId { + self.our_id + } + + /// Our own public listening address. + pub(super) fn public_addr(&self) -> Option { + self.public_addr + } + + /// Chain info extract from chainspec. + pub(super) fn chain_info(&self) -> &ChainInfo { + &self.chain_info + } + + pub(crate) fn validate_peer_cert(&self, peer_cert: X509) -> Result { + match &self.network_ca { + Some(ca_cert) => tls::validate_cert_with_authority(peer_cert, ca_cert), + None => tls::validate_self_signed_cert(peer_cert), + } + } + + pub(crate) fn network_ca(&self) -> Option<&Arc> { + self.network_ca.as_ref() + } + + pub(crate) fn is_syncing(&self) -> &AtomicBool { + &self.is_syncing + } +} + +/// Handles an incoming connection. +/// +/// Sets up a TLS stream and performs the protocol handshake. +async fn handle_incoming( + context: Arc>, + stream: TcpStream, + peer_addr: SocketAddr, +) -> IncomingConnection

+where + REv: From> + 'static, + P: Payload, + for<'de> P: Serialize + Deserialize<'de>, + for<'de> Message

: Serialize + Deserialize<'de>, +{ + let (peer_id, transport) = match server_setup_tls(&context, stream).await { + Ok(value) => value, + Err(error) => { + return IncomingConnection::FailedEarly { peer_addr, error }; + } + }; + + // Register the `peer_id` on the [`Span`] for logging the ID from here on out. + Span::current().record("peer_id", field::display(peer_id)); + + if peer_id == context.our_id { + info!("incoming loopback connection"); + return IncomingConnection::Loopback; + } + + debug!("Incoming TLS connection established"); + + // Setup connection id and framed transport. + let connection_id = ConnectionId::from_connection(transport.ssl(), context.our_id, peer_id); + let framed_transport = framed_transport(transport, context.chain_info.maximum_net_message_size); + + // Negotiate the handshake, concluding the incoming connection process. + match negotiate_handshake::(&context, framed_transport, connection_id).await { + Ok(HandshakeOutcome { + framed_transport, + public_addr, + peer_consensus_public_key, + is_peer_syncing: _, + }) => { + if !context.allow_handshake { + return IncomingConnection::Failed { + peer_addr, + peer_id, + error: ConnectionError::HandshakeNotAllowed, + }; + } + + if let Some(ref public_key) = peer_consensus_public_key { + Span::current().record("consensus_key", field::display(public_key)); + } + + // Establish full transport and close the receiving end. + let full_transport = full_transport::

( + context.net_metrics.clone(), + connection_id, + framed_transport, + Role::Listener, + ); + + let (_sink, stream) = full_transport.split(); + + IncomingConnection::Established { + peer_addr, + public_addr, + peer_id, + peer_consensus_public_key, + stream, + } + } + Err(error) => IncomingConnection::Failed { + peer_addr, + peer_id, + error, + }, + } +} + +/// Server-side TLS setup. +/// +/// This function groups the TLS setup into a convenient function, enabling the `?` operator. +pub(super) async fn server_setup_tls( + context: &NetworkContext, + stream: TcpStream, +) -> Result<(NodeId, Transport), ConnectionError> { + let mut tls_stream = tls::create_tls_acceptor( + context.our_cert.as_x509().as_ref(), + context.secret_key.as_ref(), + ) + .and_then(|ssl_acceptor| Ssl::new(ssl_acceptor.context())) + .and_then(|ssl| SslStream::new(ssl, stream)) + .map_err(ConnectionError::TlsInitialization)?; + + SslStream::accept(Pin::new(&mut tls_stream)) + .await + .map_err(ConnectionError::TlsHandshake)?; + + // We can now verify the certificate. + let peer_cert = tls_stream + .ssl() + .peer_certificate() + .ok_or(ConnectionError::NoPeerCertificate)?; + + let validated_peer_cert = context + .validate_peer_cert(peer_cert) + .map_err(ConnectionError::PeerCertificateInvalid)?; + + Ok(( + NodeId::from(validated_peer_cert.public_key_fingerprint()), + tls_stream, + )) +} + +/// Performs an IO-operation that can time out. +async fn io_timeout(duration: Duration, future: F) -> Result> +where + F: Future>, + E: StdError + 'static, +{ + tokio::time::timeout(duration, future) + .await + .map_err(|_elapsed| IoError::Timeout)? + .map_err(IoError::Error) +} + +/// Performs an IO-operation that can time out or result in a closed connection. +async fn io_opt_timeout(duration: Duration, future: F) -> Result> +where + F: Future>>, + E: StdError + 'static, +{ + let item = tokio::time::timeout(duration, future) + .await + .map_err(|_elapsed| IoError::Timeout)?; + + match item { + Some(Ok(value)) => Ok(value), + Some(Err(err)) => Err(IoError::Error(err)), + None => Err(IoError::UnexpectedEof), + } +} + +/// Negotiates a handshake between two peers. +async fn negotiate_handshake( + context: &NetworkContext, + framed: FramedTransport, + connection_id: ConnectionId, +) -> Result +where + P: Payload, +{ + let mut encoder = MessagePackFormat; + + // Manually encode a handshake. + let handshake_message = context.chain_info.create_handshake::

( + context.public_addr.expect("component not initialized"), + context.node_key_pair.as_ref(), + connection_id, + context.is_syncing.load(Ordering::SeqCst), + ); + + let serialized_handshake_message = Pin::new(&mut encoder) + .serialize(&Arc::new(handshake_message)) + .map_err(ConnectionError::CouldNotEncodeOurHandshake)?; + + // To ensure we are not dead-locking, we split the framed transport here and send the handshake + // in a background task before awaiting one ourselves. This ensures we can make progress + // regardless of the size of the outgoing handshake. + let (mut sink, mut stream) = framed.split(); + + let handshake_send = tokio::spawn(io_timeout(context.handshake_timeout.into(), async move { + sink.send(serialized_handshake_message).await?; + Ok(sink) + })); + + // The remote's message should be a handshake, but can technically be any message. We receive, + // deserialize and check it. + let remote_message_raw = io_opt_timeout(context.handshake_timeout.into(), stream.next()) + .await + .map_err(ConnectionError::HandshakeRecv)?; + + // Ensure the handshake was sent correctly. + let sink = handshake_send + .await + .map_err(ConnectionError::HandshakeSenderCrashed)? + .map_err(ConnectionError::HandshakeSend)?; + + let remote_message: Message

= Pin::new(&mut encoder) + .deserialize(&remote_message_raw) + .map_err(ConnectionError::InvalidRemoteHandshakeMessage)?; + + if let Message::Handshake { + network_name, + public_addr, + protocol_version, + consensus_certificate, + is_syncing, + chainspec_hash, + } = remote_message + { + debug!(%protocol_version, "handshake received"); + + // The handshake was valid, we can check the network name. + if network_name != context.chain_info.network_name { + return Err(ConnectionError::WrongNetwork(network_name)); + } + + // If there is a version mismatch, we treat it as a connection error. We do not ban peers + // for this error, but instead rely on exponential backoff, as bans would result in issues + // during upgrades where nodes may have a legitimate reason for differing versions. + // + // Since we are not using SemVer for versioning, we cannot make any assumptions about + // compatibility, so we allow only exact version matches. + if protocol_version != context.chain_info.protocol_version { + if let Some(threshold) = context.tarpit_version_threshold { + if protocol_version <= threshold { + let mut rng = crate::new_rng(); + + if rng.gen_bool(context.tarpit_chance as f64) { + // If tarpitting is enabled, we hold open the connection for a specific + // amount of time, to reduce load on other nodes and keep them from + // reconnecting. + info!(duration=?context.tarpit_duration, "randomly tarpitting node"); + tokio::time::sleep(Duration::from(context.tarpit_duration)).await; + } else { + debug!(p = context.tarpit_chance, "randomly not tarpitting node"); + } + } + } + return Err(ConnectionError::IncompatibleVersion(protocol_version)); + } + + // We check the chainspec hash to ensure peer is using the same chainspec as us. + // The remote message should always have a chainspec hash at this point since + // we checked the protocol version previously. + let peer_chainspec_hash = chainspec_hash.ok_or(ConnectionError::MissingChainspecHash)?; + if peer_chainspec_hash != context.chain_info.chainspec_hash { + return Err(ConnectionError::WrongChainspecHash(peer_chainspec_hash)); + } + + let peer_consensus_public_key = consensus_certificate + .map(|cert| { + cert.validate(connection_id) + .map_err(ConnectionError::InvalidConsensusCertificate) + }) + .transpose()?; + + let framed_transport = sink + .reunite(stream) + .map_err(|_| ConnectionError::FailedToReuniteHandshakeSinkAndStream)?; + + Ok(HandshakeOutcome { + framed_transport, + public_addr, + peer_consensus_public_key, + is_peer_syncing: is_syncing, + }) + } else { + // Received a non-handshake, this is an error. + Err(ConnectionError::DidNotSendHandshake) + } +} + +/// Runs the server core acceptor loop. +pub(super) async fn server( + context: Arc>, + listener: tokio::net::TcpListener, + mut shutdown_receiver: watch::Receiver<()>, +) where + REv: From> + Send, + P: Payload, +{ + // The server task is a bit tricky, since it has to wait on incoming connections while at the + // same time shut down if the networking component is dropped, otherwise the TCP socket will + // stay open, preventing reuse. + + // We first create a future that never terminates, handling incoming connections: + let accept_connections = async { + let event_queue = context.event_queue.expect("component not initialized"); + loop { + // We handle accept errors here, since they can be caused by a temporary resource + // shortage or the remote side closing the connection while it is waiting in + // the queue. + match listener.accept().await { + Ok((stream, peer_addr)) => { + // The span setup here is used throughout the entire lifetime of the connection. + let span = + error_span!("incoming", %peer_addr, peer_id=Empty, consensus_key=Empty); + + let context = context.clone(); + let handler_span = span.clone(); + tokio::spawn( + async move { + let incoming = + handle_incoming(context.clone(), stream, peer_addr).await; + event_queue + .schedule( + Event::IncomingConnection { + incoming: Box::new(incoming), + span, + }, + QueueKind::NetworkIncoming, + ) + .await; + } + .instrument(handler_span), + ); + } + + // TODO: Handle resource errors gracefully. + // In general, two kinds of errors occur here: Local resource exhaustion, + // which should be handled by waiting a few milliseconds, or remote connection + // errors, which can be dropped immediately. + // + // The code in its current state will consume 100% CPU if local resource + // exhaustion happens, as no distinction is made and no delay introduced. + Err(ref err) => { + warn!(%context.our_id, err=display_error(err), "dropping incoming connection during accept") + } + } + } + }; + + let shutdown_messages = async move { while shutdown_receiver.changed().await.is_ok() {} }; + + // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the + // infinite loop to terminate, which never happens. + match future::select(Box::pin(shutdown_messages), Box::pin(accept_connections)).await { + Either::Left(_) => info!( + %context.our_id, + "shutting down socket, no longer accepting incoming connections" + ), + Either::Right(_) => unreachable!(), + } +} + +/// Network message reader. +/// +/// Schedules all received messages until the stream is closed or an error occurs. +pub(super) async fn message_reader( + context: Arc>, + mut stream: SplitStream>, + limiter: LimiterHandle, + mut close_incoming_receiver: watch::Receiver<()>, + peer_id: NodeId, + span: Span, +) -> io::Result<()> +where + P: DeserializeOwned + Send + Display + Payload, + REv: From> + + FromIncoming

+ + From> + + From + + Send, +{ + let demands_in_flight = Arc::new(Semaphore::new(context.max_in_flight_demands)); + let event_queue = context.event_queue.expect("component not initialized"); + + let read_messages = async move { + while let Some(msg_result) = stream.next().await { + match msg_result { + Ok(msg) => { + trace!(%msg, "message received"); + + let effect_builder = EffectBuilder::new(event_queue); + + match msg.try_into_demand(effect_builder, peer_id) { + Ok((event, wait_for_response)) => { + // Note: For now, demands bypass the limiter, as we expect the + // backpressure to handle this instead. + + // Acquire a permit. If we are handling too many demands at this + // time, this will block, halting the processing of new message, + // thus letting the peer they have reached their maximum allowance. + let in_flight = demands_in_flight + .clone() + .acquire_owned() + .await + // Note: Since the semaphore is reference counted, it must + // explicitly be closed for acquisition to fail, which we + // never do. If this happens, there is a bug in the code; + // we exit with an error and close the connection. + .map_err(|_| { + io::Error::new( + io::ErrorKind::Other, + "demand limiter semaphore closed unexpectedly", + ) + })?; + + Metrics::record_trie_request_start(&context.net_metrics); + + let net_metrics = context.net_metrics.clone(); + // Spawn a future that will eventually send the returned message. It + // will essentially buffer the response. + tokio::spawn(async move { + if let Some(payload) = wait_for_response.await { + // Send message and await its return. `send_message` should + // only return when the message has been buffered, if the + // peer is not accepting data, we will block here until the + // send buffer has sufficient room. + effect_builder.send_message(peer_id, payload).await; + + // Note: We could short-circuit the event queue here and + // directly insert into the outgoing message queue, + // which may be potential performance improvement. + } + + // Missing else: The handler of the demand did not deem it + // worthy a response. Just drop it. + + // After we have either successfully buffered the message for + // sending, failed to do so or did not have a message to send + // out, we consider the request handled and free up the permit. + Metrics::record_trie_request_end(&net_metrics); + drop(in_flight); + }); + + // Schedule the created event. + event_queue + .schedule::(event, QueueKind::NetworkDemand) + .await; + } + Err(msg) => { + // We've received a non-demand message. Ensure we have the proper amount + // of resources, then push it to the reactor. + limiter + .request_allowance( + msg.payload_incoming_resource_estimate( + &context.payload_weights, + ), + ) + .await; + + let queue_kind = if msg.is_low_priority() { + QueueKind::NetworkLowPriority + } else { + QueueKind::NetworkIncoming + }; + + event_queue + .schedule( + Event::IncomingMessage { + peer_id: Box::new(peer_id), + msg, + span: span.clone(), + }, + queue_kind, + ) + .await; + } + } + } + Err(err) => { + warn!( + err = display_error(&err), + "receiving message failed, closing connection" + ); + return Err(err); + } + } + } + Ok(()) + }; + + let shutdown_messages = async move { while close_incoming_receiver.changed().await.is_ok() {} }; + + // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the + // while loop to terminate. + match future::select(Box::pin(shutdown_messages), Box::pin(read_messages)).await { + Either::Left(_) => info!("shutting down incoming connection message reader"), + Either::Right(_) => (), + } + Ok(()) +} + +/// Network message sender. +/// +/// Reads from a channel and sends all messages, until the stream is closed or an error occurs. +pub(super) async fn message_sender

( + mut queue: UnboundedReceiver>, + mut sink: SplitSink, Arc>>, + limiter: LimiterHandle, + counter: IntGauge, +) where + P: Payload, +{ + while let Some((message, opt_responder)) = queue.recv().await { + counter.dec(); + + let estimated_wire_size = match BincodeFormat::default().0.serialized_size(&*message) { + Ok(size) => size as u32, + Err(error) => { + error!( + error = display_error(&error), + "failed to get serialized size of outgoing message, closing outgoing connection" + ); + break; + } + }; + limiter.request_allowance(estimated_wire_size).await; + + let mut outcome = sink.send(message).await; + + // Notify via responder that the message has been buffered by the kernel. + if let Some(auto_closing_responder) = opt_responder { + // Since someone is interested in the message, flush the socket to ensure it was sent. + outcome = outcome.and(sink.flush().await); + auto_closing_responder.respond(()).await; + } + + // We simply error-out if the sink fails, it means that our connection broke. + if let Err(ref err) = outcome { + info!( + err = display_error(err), + "message send failed, closing outgoing connection" + ); + + // To ensure, metrics are up to date, we close the queue and drain it. + queue.close(); + while queue.recv().await.is_some() { + counter.dec(); + } + + break; + }; + } +} diff --git a/node/src/components/network/tests.rs b/node/src/components/network/tests.rs index bdff43047d..73bd10a1ab 100644 --- a/node/src/components/network/tests.rs +++ b/node/src/components/network/tests.rs @@ -1,36 +1,51 @@ +//! Tests for the `network` component. +//! +//! Calling these "unit tests" would be a bit of a misnomer, since they deal mostly with multiple +//! instances of `net` arranged in a network. + use std::{ collections::{HashMap, HashSet}, - env, fmt::{self, Debug, Display, Formatter}, + sync::Arc, time::{Duration, Instant}, }; use derive_more::From; -use pnet::datalink; +use futures::FutureExt; use prometheus::Registry; use reactor::ReactorEvent; -use serde::Serialize; +use serde::{Deserialize, Serialize}; +use smallvec::smallvec; use tracing::{debug, info}; +use casper_types::{Chainspec, ChainspecRawBytes, SecretKey}; + use super::{ - network_is_isolated, Config, Event as NetworkEvent, Network as NetworkComponent, - ENABLE_LIBP2P_NET_ENV_VAR, + chain_info::ChainInfo, Event as NetworkEvent, FromIncoming, GossipedAddress, Identity, + MessageKind, Network, Payload, }; use crate::{ - components::{network::NetworkIdentity, Component}, + components::{ + gossiper::{self, GossipItem, Gossiper}, + network, Component, InitializedComponent, + }, effect::{ - announcements::{ControlAnnouncement, NetworkAnnouncement}, - requests::NetworkRequest, + announcements::{ControlAnnouncement, GossiperAnnouncement, PeerBehaviorAnnouncement}, + incoming::GossiperIncoming, + requests::{ + BeginGossipRequest, ChainspecRawBytesRequest, ContractRuntimeRequest, NetworkRequest, + StorageRequest, + }, EffectBuilder, Effects, }, protocol, - reactor::{self, EventQueueHandle, Finalize, Reactor, Runner}, + reactor::{self, main_reactor::Config, EventQueueHandle, Finalize, Reactor, Runner}, testing::{ self, init_logging, - network::{Network, NetworkedReactor}, + network::{NetworkedReactor, Nodes, TestingNetwork}, ConditionCheckReactor, }, - types::{Chainspec, NodeId}, + types::{NodeId, SyncHandling, ValidatorMatrix}, NodeRng, }; @@ -38,18 +53,30 @@ use crate::{ #[derive(Debug, From, Serialize)] enum Event { #[from] - Network(#[serde(skip_serializing)] NetworkEvent), + Net(#[serde(skip_serializing)] NetworkEvent), #[from] - NetworkRequest(#[serde(skip_serializing)] NetworkRequest), + AddressGossiper(#[serde(skip_serializing)] gossiper::Event), + #[from] + NetworkRequest(#[serde(skip_serializing)] NetworkRequest), #[from] ControlAnnouncement(ControlAnnouncement), #[from] - NetworkAnnouncement(#[serde(skip_serializing)] NetworkAnnouncement), + AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), + #[from] + BeginAddressGossipRequest(BeginGossipRequest), + /// An incoming network message with an address gossiper protocol message. + AddressGossiperIncoming(GossiperIncoming), + #[from] + BlocklistAnnouncement(PeerBehaviorAnnouncement), } impl ReactorEvent for Event { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { + fn is_control(&self) -> bool { + matches!(self, Event::ControlAnnouncement(_)) + } + + fn try_into_control(self) -> Option { + if let Self::ControlAnnouncement(ctrl_ann) = self { Some(ctrl_ann) } else { None @@ -57,24 +84,97 @@ impl ReactorEvent for Event { } } -impl From> for Event { - fn from(_request: NetworkRequest) -> Self { +impl From>> for Event { + fn from(request: NetworkRequest>) -> Self { + Event::NetworkRequest(request.map_payload(Message::from)) + } +} + +impl From> for NetworkEvent { + fn from(request: NetworkRequest) -> NetworkEvent { + NetworkEvent::NetworkRequest { + req: Box::new(request), + } + } +} + +impl From> for Event { + fn from(_request: NetworkRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(_request: StorageRequest) -> Self { unreachable!() } } +impl From for Event { + fn from(_request: ChainspecRawBytesRequest) -> Self { + unreachable!() + } +} + +impl From for Event { + fn from(_request: ContractRuntimeRequest) -> Self { + unreachable!() + } +} + +impl FromIncoming for Event { + fn from_incoming(sender: NodeId, payload: Message) -> Self { + match payload { + Message::AddressGossiper(message) => Event::AddressGossiperIncoming(GossiperIncoming { + sender, + message: Box::new(message), + }), + } + } +} + impl Display for Event { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Debug::fmt(self, f) } } +#[derive(Clone, Debug, Deserialize, Serialize, From)] +enum Message { + #[from] + AddressGossiper(gossiper::Message), +} + +impl Display for Message { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(self, f) + } +} + +impl Payload for Message { + #[inline] + fn message_kind(&self) -> MessageKind { + match self { + Message::AddressGossiper(_) => MessageKind::AddressGossip, + } + } + + fn incoming_resource_estimate(&self, _weights: &super::EstimatorWeights) -> u32 { + 0 + } + + fn is_unsafe_for_syncing_peers(&self) -> bool { + false + } +} + /// Test reactor. /// /// Runs a single network. #[derive(Debug)] struct TestReactor { - network_component: NetworkComponent, + net: Network, + address_gossiper: Gossiper<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, GossipedAddress>, } impl Reactor for TestReactor { @@ -82,29 +182,6 @@ impl Reactor for TestReactor { type Config = Config; type Error = anyhow::Error; - fn new( - config: Self::Config, - registry: &Registry, - event_queue: EventQueueHandle, - rng: &mut NodeRng, - ) -> anyhow::Result<(Self, Effects)> { - let chainspec = Chainspec::random(rng); - let network_identity = NetworkIdentity::new(); - let (network_component, effects) = NetworkComponent::new( - event_queue, - config, - registry, - network_identity, - &chainspec, - false, - )?; - - Ok(( - TestReactor { network_component }, - reactor::wrap_effects(Event::Network, effects), - )) - } - fn dispatch_event( &mut self, effect_builder: EffectBuilder, @@ -112,97 +189,150 @@ impl Reactor for TestReactor { event: Self::Event, ) -> Effects { match event { - Event::Network(event) => reactor::wrap_effects( - Event::Network, - self.network_component + Event::Net(ev) => { + reactor::wrap_effects(Event::Net, self.net.handle_event(effect_builder, rng, ev)) + } + Event::AddressGossiper(event) => reactor::wrap_effects( + Event::AddressGossiper, + self.address_gossiper .handle_event(effect_builder, rng, event), ), - Event::NetworkRequest(request) => self.dispatch_event( - effect_builder, - rng, - Event::Network(NetworkEvent::from(request)), + Event::NetworkRequest(req) => reactor::wrap_effects( + Event::Net, + self.net.handle_event(effect_builder, rng, req.into()), ), Event::ControlAnnouncement(ctrl_ann) => { unreachable!("unhandled control announcement: {}", ctrl_ann) } - Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { - sender, - payload, - }) => { - todo!("{} -- {}", sender, payload); + Event::AddressGossiperAnnouncement(GossiperAnnouncement::NewCompleteItem( + gossiped_address, + )) => reactor::wrap_effects( + Event::Net, + self.net.handle_event( + effect_builder, + rng, + NetworkEvent::PeerAddressReceived(gossiped_address), + ), + ), + + Event::AddressGossiperAnnouncement(GossiperAnnouncement::GossipReceived { .. }) => { + // We do not care about the announcement of a new gossiped item in this test. + Effects::new() } - Event::NetworkAnnouncement(NetworkAnnouncement::GossipOurAddress( - _gossiped_address, - )) => { - unreachable!(); + Event::AddressGossiperAnnouncement(GossiperAnnouncement::FinishedGossiping(_)) => { + // We do not care about the announcement of gossiping finished in this test. + Effects::new() } - Event::NetworkAnnouncement(NetworkAnnouncement::NewPeer(_)) => { - // We do not care about the announcement of new peers in this test. + Event::AddressGossiperAnnouncement(GossiperAnnouncement::NewItemBody { .. }) => { + // Addresses shouldn't have an item body when gossiped. Effects::new() } + Event::BeginAddressGossipRequest(ev) => reactor::wrap_effects( + Event::AddressGossiper, + self.address_gossiper + .handle_event(effect_builder, rng, ev.into()), + ), + Event::AddressGossiperIncoming(incoming) => reactor::wrap_effects( + Event::AddressGossiper, + self.address_gossiper + .handle_event(effect_builder, rng, incoming.into()), + ), + Event::BlocklistAnnouncement(_announcement) => Effects::new(), } } - fn maybe_exit(&self) -> Option { - unimplemented!() + fn new( + cfg: Self::Config, + _chainspec: Arc, + _chainspec_raw_bytes: Arc, + our_identity: Identity, + registry: &Registry, + _event_queue: EventQueueHandle, + rng: &mut NodeRng, + ) -> anyhow::Result<(Self, Effects)> { + let secret_key = SecretKey::random(rng); + let allow_handshake = cfg.node.sync_handling != SyncHandling::Isolated; + let mut net = Network::new( + cfg.network.clone(), + our_identity, + None, + registry, + ChainInfo::create_for_testing(), + ValidatorMatrix::new_with_validator(Arc::new(secret_key)), + allow_handshake, + )?; + let gossiper_config = gossiper::Config::new_with_small_timeouts(); + let address_gossiper = Gossiper::<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, _>::new( + "address_gossiper", + gossiper_config, + registry, + )?; + + net.start_initialization(); + let effects = smallvec![async { smallvec![Event::Net(NetworkEvent::Initialize)] }.boxed()]; + + Ok(( + TestReactor { + net, + address_gossiper, + }, + effects, + )) } } impl NetworkedReactor for TestReactor { - type NodeId = NodeId; - fn node_id(&self) -> NodeId { - self.network_component.node_id() + self.net.node_id() } } impl Finalize for TestReactor { fn finalize(self) -> futures::future::BoxFuture<'static, ()> { - self.network_component.finalize() + self.net.finalize() } } -/// Checks whether or not a given network with a unhealthy node is completely connected. +/// Checks whether or not a given network with potentially blocked nodes is completely connected. fn network_is_complete( blocklist: &HashSet, nodes: &HashMap>>, ) -> bool { - // We need at least one node. - if nodes.is_empty() { - return false; - } - - if nodes.len() == 1 { - let nodes = &nodes.values().collect::>(); - let network_component = &nodes[0].reactor().inner().network_component; - if network_is_isolated( - &*network_component - .known_addresses_mut - .lock() - .expect("Could not lock known_addresses_mut"), - ) { - return true; - } - } + // Collect expected nodes. + let expected: HashSet<_> = nodes + .keys() + .filter(|&node_id| !blocklist.contains(node_id)) + .copied() + .collect(); for (node_id, node) in nodes { - if blocklist.contains(node_id) { - // ignore blocklisted node - continue; + let net = &node.reactor().inner().net; + // TODO: Ensure the connections are symmetrical. + let peers: HashSet<_> = net.peers().into_keys().collect(); + + let mut missing = expected.difference(&peers); + + if let Some(first_missing) = missing.next() { + // We only allow loopbacks to be missing. + if first_missing != node_id { + return false; + } } - if node.reactor().inner().network_component.peers.is_empty() { + + if missing.next().is_some() { + // We have at least two missing, which cannot be. return false; } } - true } /// Checks whether or not a given network has at least one other node in it -fn network_started(net: &Network) -> bool { +fn network_started(net: &TestingNetwork) -> bool { net.nodes() .iter() - .all(|(_, runner)| !runner.reactor().inner().network_component.peers.is_empty()) + .map(|(_, runner)| runner.reactor().inner().net.peers()) + .all(|peers| !peers.is_empty()) } /// Run a two-node network five times. @@ -210,11 +340,6 @@ fn network_started(net: &Network) -> bool { /// Ensures that network cleanup and basic networking works. #[tokio::test] async fn run_two_node_network_five_times() { - // If the env var "CASPER_ENABLE_LIBP2P_NET" is not defined, exit without running the test. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - return; - } - let mut rng = crate::new_rng(); // The networking port used by the tests for the root node. @@ -225,16 +350,18 @@ async fn run_two_node_network_five_times() { for i in 0..5 { info!("two-network test round {}", i); - let mut net = Network::new(); + let mut net = TestingNetwork::new(); let start = Instant::now(); - net.add_node_with_config( - Config::default_local_net_first_node(first_node_port), - &mut rng, - ) - .await - .unwrap(); - net.add_node_with_config(Config::default_local_net(first_node_port), &mut rng) + + let cfg = Config::default().with_network_config( + network::Config::default_local_net_first_node(first_node_port), + ); + net.add_node_with_config(cfg, &mut rng).await.unwrap(); + + let cfg = Config::default() + .with_network_config(network::Config::default_local_net(first_node_port)); + net.add_node_with_config(cfg.clone(), &mut rng) .await .unwrap(); let end = Instant::now(); @@ -274,18 +401,14 @@ async fn run_two_node_network_five_times() { /// Sanity check that we can bind to a real network. /// /// Very unlikely to ever fail on a real machine. +#[cfg(not(target_os = "macos"))] #[tokio::test] async fn bind_to_real_network_interface() { - // If the env var "CASPER_ENABLE_LIBP2P_NET" is not defined, exit without running the test. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - return; - } - init_logging(); let mut rng = crate::new_rng(); - let iface = datalink::interfaces() + let iface = pnet::datalink::interfaces() .into_iter() .find(|net| !net.ips.is_empty() && !net.ips.iter().any(|ip| ip.ip().is_loopback())) .expect("could not find a single networking interface that isn't localhost"); @@ -298,12 +421,11 @@ async fn bind_to_real_network_interface() { .ip(); let port = testing::unused_port_on_localhost(); - let local_net_config = Config::new((local_addr, port).into(), true); + let cfg = + Config::default().with_network_config(network::Config::new((local_addr, port).into())); - let mut net = Network::::new(); - net.add_node_with_config(local_net_config, &mut rng) - .await - .unwrap(); + let mut net = TestingNetwork::::new(); + net.add_node_with_config(cfg, &mut rng).await.unwrap(); // The network should be fully connected. let timeout = Duration::from_secs(2); @@ -321,11 +443,6 @@ async fn bind_to_real_network_interface() { /// Check that a network of varying sizes will connect all nodes properly. #[tokio::test] async fn check_varying_size_network_connects() { - // If the env var "CASPER_ENABLE_LIBP2P_NET" is not defined, exit without running the test. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - return; - } - init_logging(); let mut rng = crate::new_rng(); @@ -334,21 +451,20 @@ async fn check_varying_size_network_connects() { for &number_of_nodes in &[2u16, 3, 5, 9, 15] { let timeout = Duration::from_secs(3 * number_of_nodes as u64); - let mut net = Network::new(); + let mut net = TestingNetwork::new(); // Pick a random port in the higher ranges that is likely to be unused. let first_node_port = testing::unused_port_on_localhost(); + let cfg = Config::default().with_network_config( + network::Config::default_local_net_first_node(first_node_port), + ); - let _ = net - .add_node_with_config( - Config::default_local_net_first_node(first_node_port), - &mut rng, - ) - .await - .unwrap(); + let _ = net.add_node_with_config(cfg, &mut rng).await.unwrap(); + let cfg = Config::default() + .with_network_config(network::Config::default_local_net(first_node_port)); for _ in 1..number_of_nodes { - net.add_node_with_config(Config::default_local_net(first_node_port), &mut rng) + net.add_node_with_config(cfg.clone(), &mut rng) .await .unwrap(); } @@ -369,7 +485,56 @@ async fn check_varying_size_network_connects() { "network did not stay connected after being settled" ); + // Now the network should have an appropriate number of peers. + // This test will run multiple times, so ensure we cleanup all ports. net.finalize().await; } } + +/// Check that a network of varying sizes will connect all nodes properly. +#[tokio::test] +async fn ensure_peers_metric_is_correct() { + init_logging(); + + let mut rng = crate::new_rng(); + + // Larger networks can potentially become more unreliable, so we try with small sizes only. + for &number_of_nodes in &[2u16, 3, 5] { + let timeout = Duration::from_secs(3 * number_of_nodes as u64); + + let mut net = TestingNetwork::new(); + + // Pick a random port in the higher ranges that is likely to be unused. + let first_node_port = testing::unused_port_on_localhost(); + + let cfg = Config::default().with_network_config( + network::Config::default_local_net_first_node(first_node_port), + ); + + let _ = net.add_node_with_config(cfg, &mut rng).await.unwrap(); + + let cfg = Config::default() + .with_network_config(network::Config::default_local_net(first_node_port)); + + for _ in 1..number_of_nodes { + net.add_node_with_config(cfg.clone(), &mut rng) + .await + .unwrap(); + } + + net.settle_on( + &mut rng, + |nodes: &Nodes| { + nodes.values().all(|runner| { + runner.reactor().inner().net.net_metrics.peers.get() + == number_of_nodes as i64 - 1 + }) + }, + timeout, + ) + .await; + + net.finalize().await; + } +} diff --git a/node/src/components/network/tests_bulk_gossip.rs b/node/src/components/network/tests_bulk_gossip.rs deleted file mode 100644 index 637a770abc..0000000000 --- a/node/src/components/network/tests_bulk_gossip.rs +++ /dev/null @@ -1,275 +0,0 @@ -#![allow(unreachable_code)] // TODO: Figure out why this warning triggers. - -use std::{ - collections::{HashMap, HashSet}, - convert::TryFrom, - env, fmt, - fmt::{Debug, Display, Formatter}, - sync::Arc, - thread, - time::Duration, -}; - -use libp2p::kad::kbucket::K_VALUE; -use rand::{distributions::Standard, Rng}; -use serde::{Deserialize, Serialize}; -use tracing::info; - -use casper_node_macros::reactor; - -use super::ENABLE_LIBP2P_NET_ENV_VAR; -use crate::{ - components::{ - collector::Collectable, - network::{Config as NetworkComponentConfig, NetworkIdentity}, - }, - effect::EffectExt, - reactor::Runner, - testing::{ - self, - network::{Network as TestingNetwork, NetworkedReactor}, - ConditionCheckReactor, TestRng, - }, - types::{Chainspec, NodeId}, - utils::read_env, -}; - -// Reactor for load testing, whose networking component just sends dummy payloads around. -reactor!(LoadTestingReactor { - type Config = TestReactorConfig; - - components: { - net = has_effects Network::( - event_queue, cfg.network_config, registry, NetworkIdentity::new(), &cfg.chainspec, false - ); - collector = infallible Collector::(); - } - - events: { - net = Event; - collector = Event; - } - - requests: { - NetworkRequest -> net; - } - - announcements: { - NetworkAnnouncement -> [collector]; - } -}); - -impl NetworkedReactor for LoadTestingReactor { - type NodeId = NodeId; - - fn node_id(&self) -> Self::NodeId { - self.net.node_id() - } -} - -/// Configuration for the test reactor. -#[derive(Debug)] -pub struct TestReactorConfig { - /// The fixed chainspec used in testing. - chainspec: Arc, - /// Network configuration used in testing. - network_config: NetworkComponentConfig, -} - -/// A dummy payload. -#[derive(Clone, Eq, Deserialize, Hash, PartialEq, Serialize)] -pub struct DummyPayload(Vec); - -const DUMMY_PAYLOAD_ID_LEN: usize = 16; - -/// ID of a dummy payload. -type DummyPayloadId = [u8; DUMMY_PAYLOAD_ID_LEN]; - -impl DummyPayload { - /// Creates a new randomly generated payload. - /// - /// # Panics - /// - /// Panics if `sz` is less than `DUMMY_PAYLOAD_ID_LEN` bytes. - fn random_with_size(rng: &mut TestRng, sz: usize) -> Self { - assert!( - sz >= DUMMY_PAYLOAD_ID_LEN, - "payload must be large enough to derive ID" - ); - - DummyPayload(rng.sample_iter(Standard).take(sz).collect()) - } - - /// Returns the ID of the payload. - fn id(&self) -> DummyPayloadId { - TryFrom::try_from(&self.0[..DUMMY_PAYLOAD_ID_LEN]) - .expect("could not get ID data from buffer slice") - } -} - -impl Collectable for DummyPayload { - type CollectedType = DummyPayloadId; - - fn into_collectable(self) -> Self::CollectedType { - self.id() - } -} - -impl Debug for DummyPayload { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "payload ({} bytes: {:?}...)", - self.0.len(), - &self.0[0..self.0.len().min(10)] - ) - } -} - -impl Display for DummyPayload { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Debug::fmt(self, f) - } -} - -// TODO - investigate why this fails on CI. -// DONE - probably because we are not running with --release! -#[ignore] -#[tokio::test] -async fn send_large_message_across_network() { - testing::init_logging(); - - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - eprintln!("{} set, skipping test", ENABLE_LIBP2P_NET_ENV_VAR); - return; - } - - // This can, on a decent machine, be set to 30, 50, maybe even 100 nodes. The default is set to - // 5 to avoid overloading CI. - let node_count: usize = read_env("TEST_NODE_COUNT").unwrap_or(5); - - // Fully connecting a 20 node network takes ~ 3 seconds. This should be ample time for gossip - // and connecting. - let timeout = Duration::from_secs(60); - let payload_size: usize = read_env("TEST_PAYLOAD_SIZE").unwrap_or(1024 * 1024 * 4); - let payload_count: usize = read_env("TEST_PAYLOAD_COUNT").unwrap_or(1); - - let mut rng = crate::new_rng(); - - // Port for first node, other will connect to it. - let first_node_port = testing::unused_port_on_localhost() + 1; - - let mut net = TestingNetwork::::new(); - let chainspec = Arc::new(Chainspec::random(&mut rng)); - - // Create the root node. - let cfg = TestReactorConfig { - chainspec: Arc::clone(&chainspec), - network_config: NetworkComponentConfig::default_local_net_first_node(first_node_port), - }; - - net.add_node_with_config(cfg, &mut rng).await.unwrap(); - - // Hack to get network component to connect. This gives the libp2p thread (which is independent - // of cranking) a little time to bind to the socket. - thread::sleep(Duration::from_secs(2)); - - // Create `node_count-1` additional node instances. - for _ in 1..node_count { - let cfg = TestReactorConfig { - chainspec: Arc::clone(&chainspec), - network_config: NetworkComponentConfig::default_local_net(first_node_port), - }; - - net.add_node_with_config(cfg, &mut rng).await.unwrap(); - } - - info!("Network setup, waiting for discovery to complete"); - net.settle_on(&mut rng, network_online, timeout).await; - info!("Discovery complete"); - - // At this point each node has at least one other peer. Assuming no split, we can now start - // gossiping large payloads. We gossip one on each node. - let node_ids: Vec<_> = net.nodes().keys().cloned().collect(); - for (index, sender) in node_ids.iter().enumerate() { - // Clear all collectors at the beginning of a round. - net.reactors_mut() - .for_each(|reactor| reactor.collector.payloads.clear()); - - let mut dummy_payloads = HashSet::new(); - let mut dummy_payload_ids = HashSet::new(); - - // Prepare a set of dummy payloads. - for _ in 0..payload_count { - let payload = DummyPayload::random_with_size(&mut rng, payload_size); - dummy_payload_ids.insert(payload.id()); - dummy_payloads.insert(payload); - } - - for dummy_payload in &dummy_payloads { - // Calling `broadcast_message` actually triggers libp2p gossping. - net.process_injected_effect_on(sender, |effect_builder| { - effect_builder - .broadcast_message(dummy_payload.clone()) - .ignore() - }) - .await; - } - - info!(?sender, num_payloads = %dummy_payloads.len(), round=index, total_rounds=node_ids.len(), - "Started broadcast/gossip of payloads, waiting for all nodes to receive it"); - net.settle_on( - &mut rng, - others_received(dummy_payload_ids, *sender), - timeout, - ) - .await; - info!(?sender, "Completed gossip test for sender") - } -} - -/// Checks if all nodes are connected to at least one other node. -fn network_online( - nodes: &HashMap>>, -) -> bool { - assert!( - nodes.len() >= 2, - "cannot check for an online network with less than 3 nodes" - ); - - let k_value = usize::from(K_VALUE); - - // Sanity check of K_VALUE. - assert!( - k_value >= 7, - "K_VALUE is really small, expected it to be at least 7" - ); - - // The target of known nodes to go for. This has a hard bound of `K_VALUE`, since if all nodes - // end up in the same bucket, we will start evicting them. In general, we go for K_VALUE/2 for - // reasonable interconnection, or the network size - 1, which is another bound. - let known_nodes_target = (k_value / 2).min(nodes.len() - 1); - - // Checks if all nodes have reached the known nodes target. - nodes - .values() - .all(|runner| runner.reactor().inner().net.seen_peers().len() >= known_nodes_target) -} - -/// Checks whether or not every node except `sender` on the network received the given payload. -fn others_received( - payloads: HashSet, - sender: NodeId, -) -> impl Fn(&HashMap>>) -> bool { - move |nodes| { - nodes - .values() - // We're only interested in the inner reactor. - .map(|runner| runner.reactor().inner()) - // Skip the sender. - .filter(|reactor| reactor.node_id() != sender) - // Ensure others all have received the payload. - // Note: `std` HashSet short circuits on length, so this should be fine. - .all(|reactor| reactor.collector.payloads == payloads) - } -} diff --git a/node/src/components/networking_metrics.rs b/node/src/components/networking_metrics.rs deleted file mode 100644 index b68317d204..0000000000 --- a/node/src/components/networking_metrics.rs +++ /dev/null @@ -1,104 +0,0 @@ -use prometheus::{IntCounter, IntGauge, Registry}; - -use crate::unregister_metric; - -/// Network-type agnostic networking metrics. -pub(super) struct NetworkingMetrics { - /// How often a request was made by a component to broadcast. - pub(super) broadcast_requests: IntCounter, - /// How often a request to send a message directly to a peer was made. - pub(super) direct_message_requests: IntCounter, - /// Current number of open connections. - pub(super) open_connections: IntGauge, - /// Number of messages still waiting to be sent out (broadcast and direct). - pub(super) queued_messages: IntGauge, - /// Number of connected peers. - pub(super) peers: IntGauge, - - // Potentially temporary metrics, not supported by all networking components: - /// Number of do-nothing futures that have not finished executing for read requests. - pub(super) read_futures_in_flight: prometheus::Gauge, - /// Number of do-nothing futures created total (read). - pub(super) read_futures_total: prometheus::Gauge, - /// Number of do-nothing futures that have not finished executing for write responses. - pub(super) write_futures_in_flight: prometheus::Gauge, - /// Number of do-nothing futures created total (write). - pub(super) write_futures_total: prometheus::Gauge, - - /// Registry instance. - registry: Registry, -} - -impl NetworkingMetrics { - /// Creates a new instance of networking metrics. - pub(super) fn new(registry: &Registry) -> Result { - let broadcast_requests = - IntCounter::new("net_broadcast_requests", "number of broadcasting requests")?; - let direct_message_requests = IntCounter::new( - "net_direct_message_requests", - "number of requests to send a message directly to a peer", - )?; - let open_connections = - IntGauge::new("net_open_connections", "number of established connections")?; - let queued_messages = IntGauge::new( - "net_queued_direct_messages", - "number of messages waiting to be sent out", - )?; - let peers = IntGauge::new("peers", "Number of connected peers.")?; - - let read_futures_in_flight = prometheus::Gauge::new( - "owm_read_futures_in_flight", - "number of do-nothing futures in flight created by `Codec::read_response`", - )?; - let read_futures_total = prometheus::Gauge::new( - "owm_read_futures_total", - "number of do-nothing futures total created by `Codec::read_response`", - )?; - let write_futures_in_flight = prometheus::Gauge::new( - "owm_write_futures_in_flight", - "number of do-nothing futures in flight created by `Codec::write_response`", - )?; - let write_futures_total = prometheus::Gauge::new( - "owm_write_futures_total", - "number of do-nothing futures total created by `Codec::write_response`", - )?; - - registry.register(Box::new(broadcast_requests.clone()))?; - registry.register(Box::new(direct_message_requests.clone()))?; - registry.register(Box::new(open_connections.clone()))?; - registry.register(Box::new(queued_messages.clone()))?; - registry.register(Box::new(peers.clone()))?; - - registry.register(Box::new(read_futures_in_flight.clone()))?; - registry.register(Box::new(read_futures_total.clone()))?; - registry.register(Box::new(write_futures_in_flight.clone()))?; - registry.register(Box::new(write_futures_total.clone()))?; - - Ok(NetworkingMetrics { - broadcast_requests, - direct_message_requests, - open_connections, - queued_messages, - peers, - read_futures_in_flight, - read_futures_total, - write_futures_in_flight, - write_futures_total, - registry: registry.clone(), - }) - } -} - -impl Drop for NetworkingMetrics { - fn drop(&mut self) { - unregister_metric!(self.registry, self.broadcast_requests); - unregister_metric!(self.registry, self.direct_message_requests); - unregister_metric!(self.registry, self.open_connections); - unregister_metric!(self.registry, self.queued_messages); - unregister_metric!(self.registry, self.peers); - unregister_metric!(self.registry, self.read_futures_in_flight); - unregister_metric!(self.registry, self.read_futures_total); - unregister_metric!(self.registry, self.write_futures_in_flight); - unregister_metric!(self.registry, self.write_futures_total); - } -} diff --git a/node/src/components/rest_server.rs b/node/src/components/rest_server.rs index b80c4c0a48..bc8e786ab3 100644 --- a/node/src/components/rest_server.rs +++ b/node/src/components/rest_server.rs @@ -14,103 +14,125 @@ //! //! Currently this component supports two endpoints, each of which takes no arguments: //! /status : a human readable JSON equivalent of the info-get-status rpc method. -//! example: curl -X GET 'http://:8888/status' +//! example: curl -X GET 'http://IP:8888/status' //! /metrics : time series data collected from the internals of the node being queried. -//! example: curl -X GET 'http://:8888/metrics' +//! example: curl -X GET 'http://IP:8888/metrics' mod config; +mod docs; mod event; mod filters; mod http_server; +mod info; -use std::{convert::Infallible, fmt::Debug}; +use std::{net::SocketAddr, sync::Arc}; use datasize::DataSize; -use futures::{future::BoxFuture, join, FutureExt}; +use futures::join; +use once_cell::sync::OnceCell; use tokio::{sync::oneshot, task::JoinHandle}; -use tracing::{debug, error, warn}; +use tracing::{error, info, warn}; + +#[cfg(test)] +use futures::{future::BoxFuture, FutureExt}; + +#[cfg(test)] +use tracing::debug; use casper_types::ProtocolVersion; -use super::Component; +use super::{Component, ComponentState, InitializedComponent}; use crate::{ + components::PortBoundComponent, effect::{ requests::{ - ChainspecLoaderRequest, ConsensusRequest, MetricsRequest, NetworkInfoRequest, - StorageRequest, + BlockSynchronizerRequest, ChainspecRawBytesRequest, ConsensusRequest, MetricsRequest, + NetworkInfoRequest, ReactorInfoRequest, RestRequest, StorageRequest, + UpgradeWatcherRequest, }, EffectBuilder, EffectExt, Effects, }, - reactor::Finalize, - types::{NodeId, StatusFeed}, + reactor::main_reactor::MainEvent, + types::{ChainspecInfo, StatusFeed}, utils::{self, ListeningError}, NodeRng, }; - -use crate::effect::requests::RestRequest; pub use config::Config; +pub use docs::DocExample; +pub(crate) use docs::DOCS_EXAMPLE_PROTOCOL_VERSION; pub(crate) use event::Event; +pub(crate) use info::{GetChainspecResult, GetValidatorChangesResult}; + +const COMPONENT_NAME: &str = "rest_server"; /// A helper trait capturing all of this components Request type dependencies. -pub trait ReactorEventT: +pub(crate) trait ReactorEventT: From - + From> - + From> + + From + + From + From - + From + + From + + From + From + From + + From + + From + Send { } impl ReactorEventT for REv where REv: From - + From> - + From> + + From + + From + From - + From + + From + + From + From + From + + From + + From + Send + 'static { } #[derive(DataSize, Debug)] -pub(crate) struct RestServer { +pub(crate) struct InnerRestServer { /// When the message is sent, it signals the server loop to exit cleanly. #[data_size(skip)] + #[allow(dead_code)] shutdown_sender: oneshot::Sender<()>, + /// The address the server is listening on. + local_addr: Arc>, /// The task handle which will only join once the server loop has exited. #[data_size(skip)] + #[allow(dead_code)] server_join_handle: Option>, + /// The network name, as specified in the chainspec + network_name: String, } -impl RestServer { - pub(crate) fn new( - config: Config, - effect_builder: EffectBuilder, - api_version: ProtocolVersion, - ) -> Result - where - REv: ReactorEventT, - { - let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); +#[derive(DataSize, Debug)] +pub(crate) struct RestServer { + /// The component state. + state: ComponentState, + config: Config, + api_version: ProtocolVersion, + network_name: String, + /// Inner server is present only when enabled in the config. + inner_rest: Option, +} - let builder = utils::start_listening(&config.address)?; - let server_join_handle = tokio::spawn(http_server::run( - builder, - effect_builder, +impl RestServer { + pub(crate) fn new(config: Config, api_version: ProtocolVersion, network_name: String) -> Self { + RestServer { + state: ComponentState::Uninitialized, + config, api_version, - shutdown_receiver, - config.qps_limit, - )); - - Ok(RestServer { - shutdown_sender, - server_join_handle: Some(server_join_handle), - }) + network_name, + inner_rest: None, + } } } @@ -119,7 +141,6 @@ where REv: ReactorEventT, { type Event = Event; - type ConstructionError = Infallible; fn handle_event( &mut self, @@ -127,48 +148,249 @@ where _rng: &mut NodeRng, event: Self::Event, ) -> Effects { - match event { - Event::RestRequest(RestRequest::GetStatus { responder }) => async move { - let (last_added_block, peers, chainspec_info, consensus_status) = join!( - effect_builder.get_highest_block_from_storage(), - effect_builder.network_peers(), - effect_builder.get_chainspec_info(), - effect_builder.consensus_status() + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" ); - let status_feed = - StatusFeed::new(last_added_block, peers, chainspec_info, consensus_status); - responder.respond(status_feed).await; + Effects::new() } - .ignore(), - Event::RestRequest(RestRequest::GetMetrics { responder }) => effect_builder - .get_metrics() - .event(move |text| Event::GetMetricsResult { + ComponentState::Initializing => match event { + Event::Initialize => { + let (effects, state) = self.bind(self.config.enable_server, effect_builder); + >::set_state(self, state); + effects + } + Event::RestRequest(_) | Event::GetMetricsResult { .. } => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is pending initialization" + ); + Effects::new() + } + }, + ComponentState::Initialized => match event { + Event::Initialize => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() + } + Event::RestRequest(RestRequest::Status { responder }) => { + let network_name = self.network_name.clone(); + async move { + let ( + last_added_block, + peers, + next_upgrade, + consensus_status, + reactor_state, + last_progress, + node_uptime, + available_block_range, + block_sync, + latest_switch_block_header, + ) = join!( + effect_builder.get_highest_complete_block_from_storage(), + effect_builder.network_peers(), + effect_builder.get_next_upgrade(), + effect_builder.consensus_status(), + effect_builder.get_reactor_state(), + effect_builder.get_last_progress(), + effect_builder.get_uptime(), + effect_builder.get_available_block_range_from_storage(), + effect_builder.get_block_synchronizer_status(), + effect_builder.get_latest_switch_block_header_from_storage() + ); + let starting_state_root_hash = effect_builder + .get_block_header_at_height_from_storage( + available_block_range.low(), + true, + ) + .await + .map(|header| *header.state_root_hash()) + .unwrap_or_default(); + let status_feed = StatusFeed::new( + last_added_block, + peers, + ChainspecInfo::new(network_name, next_upgrade), + consensus_status, + node_uptime.into(), + reactor_state, + last_progress.into_inner(), + available_block_range, + block_sync, + starting_state_root_hash, + latest_switch_block_header.map(|header| header.block_hash()), + ); + responder.respond(status_feed).await; + } + } + .ignore(), + Event::RestRequest(RestRequest::Metrics { responder }) => effect_builder + .get_metrics() + .event(move |text| Event::GetMetricsResult { + text, + main_responder: responder, + }), + Event::GetMetricsResult { text, - main_responder: responder, - }), - Event::GetMetricsResult { - text, - main_responder, - } => main_responder.respond(text).ignore(), + main_responder, + } => main_responder.respond(text).ignore(), + }, } } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +impl InitializedComponent for RestServer +where + REv: ReactorEventT, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +impl PortBoundComponent for RestServer +where + REv: ReactorEventT, +{ + type Error = ListeningError; + type ComponentEvent = Event; + + fn listen( + &mut self, + effect_builder: EffectBuilder, + ) -> Result, Self::Error> { + let cfg = &self.config; + let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); + + let builder = utils::start_listening(&cfg.address)?; + let local_addr: Arc> = Default::default(); + + let server_join_handle = if cfg.cors_origin.is_empty() { + Some(tokio::spawn(http_server::run( + builder, + effect_builder, + self.api_version, + shutdown_receiver, + cfg.qps_limit, + local_addr.clone(), + ))) + } else { + Some(tokio::spawn(http_server::run_with_cors( + builder, + effect_builder, + self.api_version, + shutdown_receiver, + cfg.qps_limit, + local_addr.clone(), + cfg.cors_origin.clone(), + ))) + }; + + let network_name = self.network_name.clone(); + self.inner_rest = Some(InnerRestServer { + local_addr, + shutdown_sender, + server_join_handle, + network_name, + }); + + Ok(Effects::new()) + } } -impl Finalize for RestServer { - fn finalize(mut self) -> BoxFuture<'static, ()> { +#[cfg(test)] +impl crate::reactor::Finalize for RestServer { + fn finalize(self) -> BoxFuture<'static, ()> { async { - let _ = self.shutdown_sender.send(()); + if let Some(mut rest_server) = self.inner_rest { + let _ = rest_server.shutdown_sender.send(()); - // Wait for the server to exit cleanly. - if let Some(join_handle) = self.server_join_handle.take() { - match join_handle.await { - Ok(_) => debug!("rest server exited cleanly"), - Err(error) => error!(%error, "could not join rest server task cleanly"), + // Wait for the server to exit cleanly. + if let Some(join_handle) = rest_server.server_join_handle.take() { + match join_handle.await { + Ok(_) => debug!("rest server exited cleanly"), + Err(error) => error!(%error, "could not join rest server task cleanly"), + } + } else { + warn!("rest server shutdown while already shut down") } } else { - warn!("rest server shutdown while already shut down") + info!("rest server was disabled in config, no shutdown performed") } } .boxed() } } + +#[cfg(test)] +mod schema_tests { + use crate::{testing::assert_schema, types::GetStatusResult}; + use schemars::schema_for; + + use super::{GetChainspecResult, GetValidatorChangesResult}; + + #[test] + fn json_schema_status_check() { + let schema_path = format!( + "{}/../resources/test/rest_schema_status.json", + env!("CARGO_MANIFEST_DIR") + ); + let pretty = serde_json::to_string_pretty(&schema_for!(GetStatusResult)).unwrap(); + assert_schema(schema_path, pretty); + } + + #[test] + fn json_schema_validator_changes_check() { + let schema_path = format!( + "{}/../resources/test/rest_schema_validator_changes.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(GetValidatorChangesResult)).unwrap(), + ); + } + + #[test] + fn json_schema_chainspec_bytes_check() { + let schema_path = format!( + "{}/../resources/test/rest_schema_chainspec_bytes.json", + env!("CARGO_MANIFEST_DIR") + ); + assert_schema( + schema_path, + serde_json::to_string_pretty(&schema_for!(GetChainspecResult)).unwrap(), + ); + } +} diff --git a/node/src/components/rest_server/config.rs b/node/src/components/rest_server/config.rs index 2a05a76c56..d98710269e 100644 --- a/node/src/components/rest_server/config.rs +++ b/node/src/components/rest_server/config.rs @@ -7,25 +7,35 @@ use serde::{Deserialize, Serialize}; const DEFAULT_ADDRESS: &str = "0.0.0.0:0"; /// Default rate limit in qps. const DEFAULT_QPS_LIMIT: u64 = 100; +/// Default CORS origin. +const DEFAULT_CORS_ORIGIN: &str = ""; /// REST HTTP server configuration. #[derive(Clone, DataSize, Debug, Deserialize, Serialize)] // Disallow unknown fields to ensure config files and command-line overrides contain valid keys. #[serde(deny_unknown_fields)] pub struct Config { + /// Setting to enable the HTTP server. + pub enable_server: bool, + /// Address to bind REST HTTP server to. pub address: String, /// Max rate limit in qps. pub qps_limit: u64, + + /// CORS origin. + pub cors_origin: String, } impl Config { /// Creates a default instance for `RestServer`. pub fn new() -> Self { Config { + enable_server: true, address: DEFAULT_ADDRESS.to_string(), qps_limit: DEFAULT_QPS_LIMIT, + cors_origin: DEFAULT_CORS_ORIGIN.to_string(), } } } diff --git a/node/src/components/rest_server/docs.rs b/node/src/components/rest_server/docs.rs new file mode 100644 index 0000000000..1c6ae930da --- /dev/null +++ b/node/src/components/rest_server/docs.rs @@ -0,0 +1,40 @@ +use casper_types::{ProtocolVersion, PublicKey, SecretKey, Timestamp}; +use once_cell::sync::Lazy; + +use crate::types::InternalEraReport; + +pub(crate) const DOCS_EXAMPLE_PROTOCOL_VERSION: ProtocolVersion = + ProtocolVersion::from_parts(1, 5, 3); + +/// A trait used to generate a static hardcoded example of `Self`. +pub trait DocExample { + /// Generates a hardcoded example of `Self`. + fn doc_example() -> &'static Self; +} + +impl DocExample for Timestamp { + fn doc_example() -> &'static Self { + Timestamp::example() + } +} + +static INTERNAL_ERA_REPORT: Lazy = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let equivocators = vec![public_key_1]; + + let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap(); + let public_key_3 = PublicKey::from(&secret_key_3); + let inactive_validators = vec![public_key_3]; + + InternalEraReport { + equivocators, + inactive_validators, + } +}); + +impl DocExample for InternalEraReport { + fn doc_example() -> &'static Self { + &INTERNAL_ERA_REPORT + } +} diff --git a/node/src/components/rest_server/event.rs b/node/src/components/rest_server/event.rs index 8fe9a0565c..49fade40b0 100644 --- a/node/src/components/rest_server/event.rs +++ b/node/src/components/rest_server/event.rs @@ -1,23 +1,18 @@ -use std::{ - fmt::{self, Display, Formatter}, - mem, -}; +use std::fmt::{self, Display, Formatter}; use derive_more::From; use static_assertions::const_assert; -use crate::{ - effect::{requests::RestRequest, Responder}, - types::NodeId, -}; +use crate::effect::{requests::RestRequest, Responder}; -const _REST_EVENT_SIZE: usize = mem::size_of::(); +const _REST_EVENT_SIZE: usize = size_of::(); const_assert!(_REST_EVENT_SIZE < 89); #[derive(Debug, From)] -pub enum Event { +pub(crate) enum Event { + Initialize, #[from] - RestRequest(RestRequest), + RestRequest(RestRequest), GetMetricsResult { text: Option, main_responder: Responder>, @@ -27,6 +22,7 @@ pub enum Event { impl Display for Event { fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { match self { + Event::Initialize => write!(formatter, "initialize"), Event::RestRequest(request) => write!(formatter, "{}", request), Event::GetMetricsResult { text, .. } => match text { Some(txt) => write!(formatter, "get metrics ({} bytes)", txt.len()), diff --git a/node/src/components/rest_server/filters.rs b/node/src/components/rest_server/filters.rs index 3d63bed2e7..a1fd0cb000 100644 --- a/node/src/components/rest_server/filters.rs +++ b/node/src/components/rest_server/filters.rs @@ -12,7 +12,7 @@ use warp::{ use casper_types::ProtocolVersion; -use super::ReactorEventT; +use super::{GetChainspecResult, GetValidatorChangesResult, ReactorEventT}; use crate::{ effect::{requests::RestRequest, EffectBuilder}, reactor::QueueKind, @@ -25,6 +25,12 @@ pub const STATUS_API_PATH: &str = "status"; /// The metrics URL path. pub const METRICS_API_PATH: &str = "metrics"; +/// The validator information URL path. +pub const VALIDATOR_CHANGES_API_PATH: &str = "validator-changes"; + +/// The chainspec file URL path. +pub const CHAINSPEC_API_PATH: &str = "chainspec"; + pub(super) fn create_status_filter( effect_builder: EffectBuilder, api_version: ProtocolVersion, @@ -34,7 +40,7 @@ pub(super) fn create_status_filter( .and_then(move || { effect_builder .make_request( - |responder| RestRequest::GetStatus { responder }, + |responder| RestRequest::Status { responder }, QueueKind::Api, ) .map(move |status_feed| { @@ -53,7 +59,7 @@ pub(super) fn create_metrics_filter( .and_then(move || { effect_builder .make_request( - |responder| RestRequest::GetMetrics { responder }, + |responder| RestRequest::Metrics { responder }, QueueKind::Api, ) .map(|maybe_metrics| match maybe_metrics { @@ -72,3 +78,37 @@ pub(super) fn create_metrics_filter( }) .boxed() } + +pub(super) fn create_validator_changes_filter( + effect_builder: EffectBuilder, + api_version: ProtocolVersion, +) -> BoxedFilter<(Response,)> { + warp::get() + .and(warp::path(VALIDATOR_CHANGES_API_PATH)) + .and_then(move || { + effect_builder + .get_consensus_validator_changes() + .map(move |changes| { + let result = GetValidatorChangesResult::new(api_version, changes); + Ok::<_, Rejection>(reply::json(&result).into_response()) + }) + }) + .boxed() +} + +pub(super) fn create_chainspec_filter( + effect_builder: EffectBuilder, + api_version: ProtocolVersion, +) -> BoxedFilter<(Response,)> { + warp::get() + .and(warp::path(CHAINSPEC_API_PATH)) + .and_then(move || { + effect_builder + .get_chainspec_raw_bytes() + .map(move |chainspec_bytes| { + let result = GetChainspecResult::new(api_version, (*chainspec_bytes).clone()); + Ok::<_, Rejection>(reply::json(&result).into_response()) + }) + }) + .boxed() +} diff --git a/node/src/components/rest_server/http_server.rs b/node/src/components/rest_server/http_server.rs index 7efbd7c8b6..55de370dd3 100644 --- a/node/src/components/rest_server/http_server.rs +++ b/node/src/components/rest_server/http_server.rs @@ -1,7 +1,8 @@ -use std::{convert::Infallible, time::Duration}; +use std::{convert::Infallible, net::SocketAddr, sync::Arc, time::Duration}; use futures::{future, TryFutureExt}; use hyper::server::{conn::AddrIncoming, Builder}; +use once_cell::sync::OnceCell; use tokio::sync::oneshot; use tower::builder::ServiceBuilder; use tracing::{info, warn}; @@ -21,12 +22,21 @@ pub(super) async fn run( api_version: ProtocolVersion, shutdown_receiver: oneshot::Receiver<()>, qps_limit: u64, + local_addr: Arc>, ) { // REST filters. let rest_status = filters::create_status_filter(effect_builder, api_version); let rest_metrics = filters::create_metrics_filter(effect_builder); + let rest_validator_changes = + filters::create_validator_changes_filter(effect_builder, api_version); + let rest_chainspec_filter = filters::create_chainspec_filter(effect_builder, api_version); - let service = warp::service(rest_status.or(rest_metrics)); + let service = warp::service( + rest_status + .or(rest_metrics) + .or(rest_validator_changes) + .or(rest_chainspec_filter), + ); // Start the server, passing a oneshot receiver to allow the server to be shut down gracefully. let make_svc = @@ -37,11 +47,69 @@ pub(super) async fn run( .service(make_svc); let server = builder.serve(rate_limited_service); + if let Err(err) = local_addr.set(server.local_addr()) { + warn!(%err, "failed to set local addr for reflection"); + } info!(address = %server.local_addr(), "started REST server"); // Shutdown the server gracefully. let _ = server - .with_graceful_shutdown(async { + .with_graceful_shutdown(async move { + shutdown_receiver.await.ok(); + }) + .map_err(|error| { + warn!(%error, "error running REST server"); + }) + .await; +} + +/// Run the REST HTTP server with CORS enabled. +/// +/// A message received on `shutdown_receiver` will cause the server to exit cleanly. +pub(super) async fn run_with_cors( + builder: Builder, + effect_builder: EffectBuilder, + api_version: ProtocolVersion, + shutdown_receiver: oneshot::Receiver<()>, + qps_limit: u64, + local_addr: Arc>, + cors_origin: String, +) { + // REST filters. + let rest_status = filters::create_status_filter(effect_builder, api_version); + let rest_metrics = filters::create_metrics_filter(effect_builder); + let rest_validator_changes = + filters::create_validator_changes_filter(effect_builder, api_version); + let rest_chainspec_filter = filters::create_chainspec_filter(effect_builder, api_version); + + let service = warp::service( + rest_status + .or(rest_metrics) + .or(rest_validator_changes) + .or(rest_chainspec_filter) + .with(match cors_origin.as_str() { + "*" => warp::cors().allow_any_origin(), + origin => warp::cors().allow_origin(origin), + }), + ); + + // Start the server, passing a oneshot receiver to allow the server to be shut down gracefully. + let make_svc = + hyper::service::make_service_fn(move |_| future::ok::<_, Infallible>(service.clone())); + + let rate_limited_service = ServiceBuilder::new() + .rate_limit(qps_limit, Duration::from_secs(1)) + .service(make_svc); + + let server = builder.serve(rate_limited_service); + if let Err(err) = local_addr.set(server.local_addr()) { + warn!(%err, "failed to set local addr for reflection"); + } + info!(address = %server.local_addr(), "started REST server"); + + // Shutdown the server gracefully. + let _ = server + .with_graceful_shutdown(async move { shutdown_receiver.await.ok(); }) .map_err(|error| { diff --git a/node/src/components/rest_server/info.rs b/node/src/components/rest_server/info.rs new file mode 100644 index 0000000000..8b0bb2ddef --- /dev/null +++ b/node/src/components/rest_server/info.rs @@ -0,0 +1,102 @@ +use std::str; + +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use casper_binary_port::ConsensusValidatorChanges; + +use casper_types::{ChainspecRawBytes, EraId, ProtocolVersion, PublicKey, ValidatorChange}; + +/// A single change to a validator's status in the given era. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorStatusChange { + /// The era in which the change occurred. + era_id: EraId, + /// The change in validator status. + validator_change: ValidatorChange, +} + +impl JsonValidatorStatusChange { + pub(crate) fn new(era_id: EraId, validator_change: ValidatorChange) -> Self { + JsonValidatorStatusChange { + era_id, + validator_change, + } + } +} + +/// The changes in a validator's status. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct JsonValidatorChanges { + /// The public key of the validator. + public_key: PublicKey, + /// The set of changes to the validator's status. + status_changes: Vec, +} + +impl JsonValidatorChanges { + pub(crate) fn new( + public_key: PublicKey, + status_changes: Vec, + ) -> Self { + JsonValidatorChanges { + public_key, + status_changes, + } + } +} + +/// Result for the "info_get_validator_changes" RPC. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[serde(deny_unknown_fields)] +pub struct GetValidatorChangesResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ProtocolVersion, + /// The validators' status changes. + pub changes: Vec, +} + +impl GetValidatorChangesResult { + pub(crate) fn new(api_version: ProtocolVersion, changes: ConsensusValidatorChanges) -> Self { + let changes = changes + .into_inner() + .into_iter() + .map(|(public_key, mut validator_changes)| { + validator_changes.sort(); + let status_changes = validator_changes + .into_iter() + .map(|(era_id, validator_change)| { + JsonValidatorStatusChange::new(era_id, validator_change) + }) + .collect(); + JsonValidatorChanges::new(public_key, status_changes) + }) + .collect(); + GetValidatorChangesResult { + api_version, + changes, + } + } +} + +/// Result for the "info_get_chainspec" RPC. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +pub struct GetChainspecResult { + /// The RPC API version. + #[schemars(with = "String")] + pub api_version: ProtocolVersion, + /// The chainspec file bytes. + pub chainspec_bytes: ChainspecRawBytes, +} + +impl GetChainspecResult { + pub(crate) fn new(api_version: ProtocolVersion, chainspec_bytes: ChainspecRawBytes) -> Self { + Self { + api_version, + chainspec_bytes, + } + } +} diff --git a/node/src/components/rpc_server.rs b/node/src/components/rpc_server.rs deleted file mode 100644 index 49babcef6a..0000000000 --- a/node/src/components/rpc_server.rs +++ /dev/null @@ -1,349 +0,0 @@ -//! JSON-RPC server -//! -//! The JSON-RPC server provides clients with an API for querying state and -//! sending commands to the node. -//! -//! The actual server is run in backgrounded tasks. RPCs requests are translated into reactor -//! requests to various components. -//! -//! This module currently provides both halves of what is required for an API server: -//! a component implementation that interfaces with other components via being plugged into a -//! reactor, and an external facing http server that exposes various uri routes and converts -//! JSON-RPC requests into the appropriate component events. -//! -//! For the list of supported RPC methods, see: -//! - -mod config; -mod event; -mod http_server; -pub mod rpcs; - -use std::{convert::Infallible, fmt::Debug}; - -use datasize::DataSize; -use futures::join; - -use casper_execution_engine::{ - core::engine_state::{ - self, BalanceRequest, BalanceResult, GetBidsRequest, GetEraValidatorsError, QueryRequest, - QueryResult, - }, - storage::protocol_data::ProtocolData, -}; -use casper_types::{system::auction::EraValidators, Key, ProtocolVersion, URef}; - -use self::rpcs::chain::BlockIdentifier; - -use super::Component; -use crate::{ - components::contract_runtime::EraValidatorsRequest, - crypto::hash::Digest, - effect::{ - announcements::RpcServerAnnouncement, - requests::{ - ChainspecLoaderRequest, ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, - MetricsRequest, NetworkInfoRequest, RpcRequest, StorageRequest, - }, - EffectBuilder, EffectExt, Effects, Responder, - }, - types::{NodeId, StatusFeed}, - utils::{self, ListeningError}, - NodeRng, -}; - -pub use config::Config; -pub(crate) use event::Event; - -/// A helper trait capturing all of this components Request type dependencies. -pub trait ReactorEventT: - From - + From> - + From - + From - + From - + From - + From> - + From - + From> - + From - + Send -{ -} - -impl ReactorEventT for REv where - REv: From - + From> - + From - + From - + From - + From - + From> - + From - + From> - + From - + Send - + 'static -{ -} - -#[derive(DataSize, Debug)] -pub(crate) struct RpcServer {} - -impl RpcServer { - pub(crate) fn new( - config: Config, - effect_builder: EffectBuilder, - api_version: ProtocolVersion, - ) -> Result - where - REv: ReactorEventT, - { - let builder = utils::start_listening(&config.address)?; - tokio::spawn(http_server::run( - builder, - effect_builder, - api_version, - config.qps_limit, - )); - - Ok(RpcServer {}) - } -} - -impl RpcServer { - fn handle_protocol_data( - &mut self, - effect_builder: EffectBuilder, - protocol_version: ProtocolVersion, - responder: Responder>, engine_state::Error>>, - ) -> Effects { - effect_builder - .get_protocol_data(protocol_version) - .event(move |result| Event::QueryProtocolDataResult { - result, - main_responder: responder, - }) - } - - fn handle_query( - &mut self, - effect_builder: EffectBuilder, - state_root_hash: Digest, - base_key: Key, - path: Vec, - responder: Responder>, - ) -> Effects { - let query = QueryRequest::new(state_root_hash.into(), base_key, path); - effect_builder - .query_global_state(query) - .event(move |result| Event::QueryGlobalStateResult { - result, - main_responder: responder, - }) - } - - fn handle_era_validators( - &mut self, - effect_builder: EffectBuilder, - state_root_hash: Digest, - protocol_version: ProtocolVersion, - responder: Responder>, - ) -> Effects { - let request = EraValidatorsRequest::new(state_root_hash.into(), protocol_version); - effect_builder - .get_era_validators_from_contract_runtime(request) - .event(move |result| Event::QueryEraValidatorsResult { - result, - main_responder: responder, - }) - } - - fn handle_get_balance( - &mut self, - effect_builder: EffectBuilder, - state_root_hash: Digest, - purse_uref: URef, - responder: Responder>, - ) -> Effects { - let query = BalanceRequest::new(state_root_hash.into(), purse_uref); - effect_builder - .get_balance(query) - .event(move |result| Event::GetBalanceResult { - result, - main_responder: responder, - }) - } -} - -impl Component for RpcServer -where - REv: ReactorEventT, -{ - type Event = Event; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - _rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::RpcRequest(RpcRequest::SubmitDeploy { deploy, responder }) => effect_builder - .announce_deploy_received(deploy, Some(responder)) - .ignore(), - Event::RpcRequest(RpcRequest::GetBlock { - maybe_id: Some(BlockIdentifier::Hash(hash)), - responder, - }) => effect_builder - .get_block_with_metadata_from_storage(hash) - .event(move |result| Event::GetBlockResult { - maybe_id: Some(BlockIdentifier::Hash(hash)), - result: Box::new(result), - main_responder: responder, - }), - Event::RpcRequest(RpcRequest::GetBlock { - maybe_id: Some(BlockIdentifier::Height(height)), - responder, - }) => effect_builder - .get_block_at_height_with_metadata_from_storage(height) - .event(move |result| Event::GetBlockResult { - maybe_id: Some(BlockIdentifier::Height(height)), - result: Box::new(result), - main_responder: responder, - }), - Event::RpcRequest(RpcRequest::GetBlock { - maybe_id: None, - responder, - }) => effect_builder - .get_highest_block_with_metadata_from_storage() - .event(move |result| Event::GetBlockResult { - maybe_id: None, - result: Box::new(result), - main_responder: responder, - }), - Event::RpcRequest(RpcRequest::GetBlockTransfers { - block_hash, - responder, - }) => effect_builder - .get_block_transfers_from_storage(block_hash) - .event(move |result| Event::GetBlockTransfersResult { - block_hash, - result: Box::new(result), - main_responder: responder, - }), - Event::RpcRequest(RpcRequest::QueryProtocolData { - protocol_version, - responder, - }) => self.handle_protocol_data(effect_builder, protocol_version, responder), - Event::RpcRequest(RpcRequest::QueryGlobalState { - state_root_hash, - base_key, - path, - responder, - }) => self.handle_query(effect_builder, state_root_hash, base_key, path, responder), - Event::RpcRequest(RpcRequest::QueryEraValidators { - state_root_hash, - protocol_version, - responder, - }) => self.handle_era_validators( - effect_builder, - state_root_hash, - protocol_version, - responder, - ), - Event::RpcRequest(RpcRequest::GetBids { - state_root_hash, - responder, - }) => { - let get_bids_request = GetBidsRequest::new(state_root_hash.into()); - effect_builder - .get_bids(get_bids_request) - .event(move |result| Event::GetBidsResult { - result, - main_responder: responder, - }) - } - Event::RpcRequest(RpcRequest::GetBalance { - state_root_hash, - purse_uref, - responder, - }) => self.handle_get_balance(effect_builder, state_root_hash, purse_uref, responder), - Event::RpcRequest(RpcRequest::GetDeploy { hash, responder }) => effect_builder - .get_deploy_and_metadata_from_storage(hash) - .event(move |result| Event::GetDeployResult { - hash, - result: Box::new(result), - main_responder: responder, - }), - Event::RpcRequest(RpcRequest::GetPeers { responder }) => effect_builder - .network_peers() - .event(move |peers| Event::GetPeersResult { - peers, - main_responder: responder, - }), - Event::RpcRequest(RpcRequest::GetStatus { responder }) => async move { - let (last_added_block, peers, chainspec_info, consensus_status) = join!( - effect_builder.get_highest_block_from_storage(), - effect_builder.network_peers(), - effect_builder.get_chainspec_info(), - effect_builder.consensus_status() - ); - let status_feed = - StatusFeed::new(last_added_block, peers, chainspec_info, consensus_status); - responder.respond(status_feed).await; - } - .ignore(), - Event::RpcRequest(RpcRequest::GetMetrics { responder }) => effect_builder - .get_metrics() - .event(move |text| Event::GetMetricsResult { - text, - main_responder: responder, - }), - Event::GetBlockResult { - maybe_id: _, - result, - main_responder, - } => main_responder.respond(*result).ignore(), - Event::GetBlockTransfersResult { - result, - main_responder, - .. - } => main_responder.respond(*result).ignore(), - Event::QueryProtocolDataResult { - result, - main_responder, - } => main_responder.respond(result).ignore(), - Event::QueryGlobalStateResult { - result, - main_responder, - } => main_responder.respond(result).ignore(), - Event::QueryEraValidatorsResult { - result, - main_responder, - } => main_responder.respond(result).ignore(), - Event::GetBidsResult { - result, - main_responder, - } => main_responder.respond(result).ignore(), - Event::GetBalanceResult { - result, - main_responder, - } => main_responder.respond(result).ignore(), - Event::GetDeployResult { - hash: _, - result, - main_responder, - } => main_responder.respond(*result).ignore(), - Event::GetPeersResult { - peers, - main_responder, - } => main_responder.respond(peers).ignore(), - Event::GetMetricsResult { - text, - main_responder, - } => main_responder.respond(text).ignore(), - } - } -} diff --git a/node/src/components/rpc_server/config.rs b/node/src/components/rpc_server/config.rs deleted file mode 100644 index 4169f42c5e..0000000000 --- a/node/src/components/rpc_server/config.rs +++ /dev/null @@ -1,37 +0,0 @@ -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -/// Default binding address for the JSON-RPC HTTP server. -/// -/// Uses a fixed port per node, but binds on any interface. -const DEFAULT_ADDRESS: &str = "0.0.0.0:0"; -/// Default rate limit in qps. -const DEFAULT_QPS_LIMIT: u64 = 100; - -/// JSON-RPC HTTP server configuration. -#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct Config { - /// Address to bind JSON-RPC HTTP server to. - pub address: String, - - /// Max rate limit in qps. - pub qps_limit: u64, -} - -impl Config { - /// Creates a default instance for `RpcServer`. - pub fn new() -> Self { - Config { - address: DEFAULT_ADDRESS.to_string(), - qps_limit: DEFAULT_QPS_LIMIT, - } - } -} - -impl Default for Config { - fn default() -> Self { - Config::new() - } -} diff --git a/node/src/components/rpc_server/event.rs b/node/src/components/rpc_server/event.rs deleted file mode 100644 index f5d92fee61..0000000000 --- a/node/src/components/rpc_server/event.rs +++ /dev/null @@ -1,120 +0,0 @@ -use std::{ - collections::BTreeMap, - fmt::{self, Display, Formatter}, -}; - -use derive_more::From; - -use casper_execution_engine::{ - core::engine_state::{self, BalanceResult, GetBidsResult, GetEraValidatorsError, QueryResult}, - storage::protocol_data::ProtocolData, -}; -use casper_types::{system::auction::EraValidators, Transfer}; - -use crate::{ - effect::{requests::RpcRequest, Responder}, - rpcs::chain::BlockIdentifier, - types::{Block, BlockHash, BlockSignatures, Deploy, DeployHash, DeployMetadata, NodeId}, -}; - -#[derive(Debug, From)] -pub enum Event { - #[from] - RpcRequest(RpcRequest), - GetBlockResult { - maybe_id: Option, - result: Box>, - main_responder: Responder>, - }, - GetBlockTransfersResult { - block_hash: BlockHash, - result: Box>>, - main_responder: Responder>>, - }, - QueryProtocolDataResult { - result: Result>, engine_state::Error>, - main_responder: Responder>, engine_state::Error>>, - }, - QueryGlobalStateResult { - result: Result, - main_responder: Responder>, - }, - QueryEraValidatorsResult { - result: Result, - main_responder: Responder>, - }, - GetBidsResult { - result: Result, - main_responder: Responder>, - }, - GetDeployResult { - hash: DeployHash, - result: Box>, - main_responder: Responder>, - }, - GetPeersResult { - peers: BTreeMap, - main_responder: Responder>, - }, - GetMetricsResult { - text: Option, - main_responder: Responder>, - }, - GetBalanceResult { - result: Result, - main_responder: Responder>, - }, -} - -impl Display for Event { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - match self { - Event::RpcRequest(request) => write!(formatter, "{}", request), - Event::GetBlockResult { - maybe_id: Some(BlockIdentifier::Hash(hash)), - result, - .. - } => write!(formatter, "get block result for {}: {:?}", hash, result), - Event::GetBlockResult { - maybe_id: Some(BlockIdentifier::Height(height)), - result, - .. - } => write!(formatter, "get block result for {}: {:?}", height, result), - Event::GetBlockResult { - maybe_id: None, - result, - .. - } => write!(formatter, "get latest block result: {:?}", result), - Event::GetBlockTransfersResult { - block_hash, result, .. - } => write!( - formatter, - "get block transfers result for block_hash {}: {:?}", - block_hash, result - ), - Event::QueryProtocolDataResult { result, .. } => { - write!(formatter, "query protocol data result: {:?}", result) - } - Event::QueryGlobalStateResult { result, .. } => { - write!(formatter, "query result: {:?}", result) - } - Event::QueryEraValidatorsResult { result, .. } => { - write!(formatter, "query era validators result: {:?}", result) - } - Event::GetBidsResult { result, .. } => { - write!(formatter, "get bids result: {:?}", result) - } - Event::GetBalanceResult { result, .. } => { - write!(formatter, "balance result: {:?}", result) - } - Event::GetDeployResult { hash, result, .. } => { - write!(formatter, "get deploy result for {}: {:?}", hash, result) - } - Event::GetPeersResult { peers, .. } => write!(formatter, "get peers: {}", peers.len()), - Event::GetMetricsResult { text, .. } => match text { - Some(txt) => write!(formatter, "get metrics ({} bytes)", txt.len()), - None => write!(formatter, "get metrics (failed)"), - }, - } - } -} diff --git a/node/src/components/rpc_server/http_server.rs b/node/src/components/rpc_server/http_server.rs deleted file mode 100644 index 7955eb4764..0000000000 --- a/node/src/components/rpc_server/http_server.rs +++ /dev/null @@ -1,132 +0,0 @@ -use std::{convert::Infallible, time::Duration}; - -use futures::future; -use http::{Response, StatusCode}; -use hyper::{ - server::{conn::AddrIncoming, Builder}, - Body, -}; -use serde::Serialize; -use tokio::sync::oneshot; -use tower::builder::ServiceBuilder; -use tracing::{info, trace}; -use warp::{Filter, Rejection}; - -use casper_types::ProtocolVersion; - -use super::{ - rpcs::{self, RpcWithOptionalParamsExt, RpcWithParamsExt, RpcWithoutParamsExt, RPC_API_PATH}, - ReactorEventT, -}; -use crate::effect::EffectBuilder; - -// This is a workaround for not being able to create a `warp_json_rpc::Response` without a -// `warp_json_rpc::Builder`. -fn new_error_response(error: warp_json_rpc::Error) -> Response { - #[derive(Serialize)] - struct JsonRpcErrorResponse { - jsonrpc: String, - id: Option<()>, - error: warp_json_rpc::Error, - } - - let json_response = JsonRpcErrorResponse { - jsonrpc: "2.0".to_string(), - id: None, - error, - }; - - let body = Body::from(serde_json::to_vec(&json_response).unwrap()); - Response::builder() - .status(StatusCode::OK) - .header("Content-Type", "application/json") - .body(body) - .unwrap() -} - -/// Run the JSON-RPC server. -pub(super) async fn run( - builder: Builder, - effect_builder: EffectBuilder, - api_version: ProtocolVersion, - qps_limit: u64, -) { - // RPC filters. - let rpc_put_deploy = rpcs::account::PutDeploy::create_filter(effect_builder, api_version); - let rpc_get_block = rpcs::chain::GetBlock::create_filter(effect_builder, api_version); - let rpc_get_block_transfers = - rpcs::chain::GetBlockTransfers::create_filter(effect_builder, api_version); - let rpc_get_state_root_hash = - rpcs::chain::GetStateRootHash::create_filter(effect_builder, api_version); - let rpc_get_item = rpcs::state::GetItem::create_filter(effect_builder, api_version); - let rpc_get_balance = rpcs::state::GetBalance::create_filter(effect_builder, api_version); - let rpc_get_deploy = rpcs::info::GetDeploy::create_filter(effect_builder, api_version); - let rpc_get_peers = rpcs::info::GetPeers::create_filter(effect_builder, api_version); - let rpc_get_status = rpcs::info::GetStatus::create_filter(effect_builder, api_version); - let rpc_get_era_info = - rpcs::chain::GetEraInfoBySwitchBlock::create_filter(effect_builder, api_version); - let rpc_get_auction_info = - rpcs::state::GetAuctionInfo::create_filter(effect_builder, api_version); - let rpc_get_rpcs = rpcs::docs::ListRpcs::create_filter(effect_builder, api_version); - - // Catch requests where the method is not one we handle. - let unknown_method = warp::path(RPC_API_PATH) - .and(warp_json_rpc::filters::json_rpc()) - .and_then(move |response_builder: warp_json_rpc::Builder| async move { - response_builder - .error(warp_json_rpc::Error::METHOD_NOT_FOUND) - .map_err(|_| warp::reject()) - }); - - // Catch requests which don't parse as JSON. - let parse_failure = warp::path(RPC_API_PATH).and_then(move || async move { - let error_response = new_error_response(warp_json_rpc::Error::PARSE_ERROR); - Ok::<_, Rejection>(error_response) - }); - - // TODO - we can't catch cases where we should return `warp_json_rpc::Error::INVALID_REQUEST` - // (i.e. where the request is JSON, but not valid JSON-RPC). This will require an - // update to or move away from warp_json_rpc. - let service = warp_json_rpc::service( - rpc_put_deploy - .or(rpc_get_block) - .or(rpc_get_block_transfers) - .or(rpc_get_state_root_hash) - .or(rpc_get_item) - .or(rpc_get_balance) - .or(rpc_get_deploy) - .or(rpc_get_peers) - .or(rpc_get_status) - .or(rpc_get_era_info) - .or(rpc_get_auction_info) - .or(rpc_get_rpcs) - .or(unknown_method) - .or(parse_failure), - ); - - // Start the server, passing a oneshot receiver to allow the server to be shut down gracefully. - let make_svc = - hyper::service::make_service_fn(move |_| future::ok::<_, Infallible>(service.clone())); - - let make_svc = ServiceBuilder::new() - .rate_limit(qps_limit, Duration::from_secs(1)) - .service(make_svc); - - let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>(); - - let server = builder.serve(make_svc); - info!(address = %server.local_addr(), "started JSON-RPC server"); - - let server_with_shutdown = server.with_graceful_shutdown(async { - shutdown_receiver.await.ok(); - }); - - let server_joiner = tokio::spawn(server_with_shutdown); - - let _ = server_joiner.await; - - // Shut down the server. - let _ = shutdown_sender.send(()); - - trace!("JSON-RPC server stopped"); -} diff --git a/node/src/components/rpc_server/rpcs.rs b/node/src/components/rpc_server/rpcs.rs deleted file mode 100644 index 09697b2e48..0000000000 --- a/node/src/components/rpc_server/rpcs.rs +++ /dev/null @@ -1,327 +0,0 @@ -//! The set of JSON-RPCs which the API server handles. -//! -//! See for info. - -pub mod account; -pub mod chain; -pub mod docs; -pub mod info; -pub mod state; - -use std::str; - -use futures::{future::BoxFuture, TryFutureExt}; -use http::Response; -use hyper::Body; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use serde_json::Value; -use warp::{ - filters::BoxedFilter, - reject::{self, Reject}, - Filter, -}; -use warp_json_rpc::{filters, Builder}; - -use casper_types::ProtocolVersion; - -use super::{ReactorEventT, RpcRequest}; -use crate::effect::EffectBuilder; -use docs::DocExample; - -/// The URL path. -pub const RPC_API_PATH: &str = "rpc"; - -/// Error code returned if the JSON-RPC response indicates failure. -/// -/// See for details. -#[repr(i64)] -enum ErrorCode { - NoSuchDeploy = -32000, - NoSuchBlock = -32001, - ParseQueryKey = -32002, - QueryFailed = -32003, - QueryFailedToExecute = -32004, - ParseGetBalanceURef = -32005, - GetBalanceFailed = -32006, - GetBalanceFailedToExecute = -32007, - InvalidDeploy = -32008, -} - -#[derive(Debug)] -pub(super) struct Error(String); - -impl Reject for Error {} - -impl From for Error { - fn from(error: anyhow::Error) -> Self { - Error(error.to_string()) - } -} - -/// A JSON-RPC requiring the "params" field to be present. -pub trait RpcWithParams { - /// The JSON-RPC "method" name. - const METHOD: &'static str; - - /// The JSON-RPC request's "params" type. - type RequestParams: Serialize - + for<'de> Deserialize<'de> - + JsonSchema - + DocExample - + Send - + 'static; - - /// The JSON-RPC response's "result" type. - type ResponseResult: Serialize - + for<'de> Deserialize<'de> - + JsonSchema - + DocExample - + Send - + 'static; -} - -/// A trait for creating a JSON-RPC filter where the request is required to have "params". -pub(super) trait RpcWithParamsExt: RpcWithParams { - /// Creates the warp filter for this particular RPC. - fn create_filter( - effect_builder: EffectBuilder, - api_version: ProtocolVersion, - ) -> BoxedFilter<(Response,)> { - let with_valid_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and(filters::params::()) - .and_then( - move |response_builder: Builder, params: Self::RequestParams| { - Self::handle_request(effect_builder, response_builder, params, api_version) - .map_err(reject::custom) - }, - ); - let with_invalid_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and(filters::params::()) - .and_then( - move |response_builder: Builder, _params: Value| async move { - response_builder - .error(warp_json_rpc::Error::INVALID_PARAMS) - .map_err(|_| reject::reject()) - }, - ); - let with_missing_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and_then(move |response_builder: Builder| async move { - response_builder - .error(warp_json_rpc::Error::INVALID_PARAMS) - .map_err(|_| reject::reject()) - }); - with_valid_params - .or(with_invalid_params) - .unify() - .or(with_missing_params) - .unify() - .boxed() - } - - /// Handles the incoming RPC request. - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - params: Self::RequestParams, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>>; -} - -/// A JSON-RPC requiring the "params" field to be absent. -pub trait RpcWithoutParams { - /// The JSON-RPC "method" name. - const METHOD: &'static str; - - /// The JSON-RPC response's "result" type. - type ResponseResult: Serialize - + for<'de> Deserialize<'de> - + JsonSchema - + DocExample - + Send - + 'static; -} - -/// A trait for creating a JSON-RPC filter where the request is not required to have "params". -pub(super) trait RpcWithoutParamsExt: RpcWithoutParams { - /// Creates the warp filter for this particular RPC. - fn create_filter( - effect_builder: EffectBuilder, - api_version: ProtocolVersion, - ) -> BoxedFilter<(Response,)> { - let with_no_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and_then(move |response_builder: Builder| { - Self::handle_request(effect_builder, response_builder, api_version) - .map_err(reject::custom) - }); - let with_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and(filters::params::()) - .and_then( - move |response_builder: Builder, _params: Value| async move { - response_builder - .error(warp_json_rpc::Error::INVALID_PARAMS) - .map_err(|_| reject::reject()) - }, - ); - with_no_params.or(with_params).unify().boxed() - } - - /// Handles the incoming RPC request. - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>>; -} - -/// A JSON-RPC with the "params" field optional. -pub trait RpcWithOptionalParams { - /// The JSON-RPC "method" name. - const METHOD: &'static str; - - /// The JSON-RPC request's "params" type. This will be passed to the handler wrapped in an - /// `Option`. - type OptionalRequestParams: Serialize - + for<'de> Deserialize<'de> - + JsonSchema - + DocExample - + Send - + 'static; - - /// The JSON-RPC response's "result" type. - type ResponseResult: Serialize - + for<'de> Deserialize<'de> - + JsonSchema - + DocExample - + Send - + 'static; -} - -/// A trait for creating a JSON-RPC filter where the request may optionally have "params". -pub(super) trait RpcWithOptionalParamsExt: RpcWithOptionalParams { - /// Creates the warp filter for this particular RPC. - fn create_filter( - effect_builder: EffectBuilder, - api_version: ProtocolVersion, - ) -> BoxedFilter<(Response,)> { - let with_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and(filters::params::()) - .and_then( - move |response_builder: Builder, params: Self::OptionalRequestParams| { - Self::handle_request( - effect_builder, - response_builder, - Some(params), - api_version, - ) - .map_err(reject::custom) - }, - ); - let with_invalid_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and(filters::params::()) - .and_then( - move |response_builder: Builder, _params: Value| async move { - response_builder - .error(warp_json_rpc::Error::INVALID_PARAMS) - .map_err(|_| reject::reject()) - }, - ); - let without_params = warp::path(RPC_API_PATH) - .and(filters::json_rpc()) - .and(filters::method(Self::METHOD)) - .and_then(move |response_builder: Builder| { - Self::handle_request(effect_builder, response_builder, None, api_version) - .map_err(reject::custom) - }); - with_params - .or(without_params) - .unify() - .or(with_invalid_params) - .unify() - .boxed() - } - - /// Handles the incoming RPC request. - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - maybe_params: Option, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>>; -} - -mod common { - use std::convert::TryFrom; - - use once_cell::sync::Lazy; - - use casper_execution_engine::core::engine_state::{self, QueryResult}; - use casper_types::bytesrepr::ToBytes; - - use super::ErrorCode; - use crate::types::json_compatibility::StoredValue; - - pub(super) static MERKLE_PROOF: Lazy = Lazy::new(|| { - String::from( - "01000000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a72536147614625016ef2e0949ac76e\ - 55812421f755abe129b6244fe7168b77f47a72536147614625000000003529cde5c621f857f75f3810611eb4af3\ - f998caaa9d4a3413cf799f99c67db0307010000006ef2e0949ac76e55812421f755abe129b6244fe7168b77f47a\ - 7253614761462501010102000000006e06000000000074769d28aac597a36a03a932d4b43e4f10bf0403ee5c41d\ - d035102553f5773631200b9e173e8f05361b681513c14e25e3138639eb03232581db7557c9e8dbbc83ce9450022\ - 6a9a7fe4f2b7b88d5103a4fc7400f02bf89c860c9ccdd56951a2afe9be0e0267006d820fb5676eb2960e15722f7\ - 725f3f8f41030078f8b2e44bf0dc03f71b176d6e800dc5ae9805068c5be6da1a90b2528ee85db0609cc0fb4bd60\ - bbd559f497a98b67f500e1e3e846592f4918234647fca39830b7e1e6ad6f5b7a99b39af823d82ba1873d0000030\ - 00000010186ff500f287e9b53f823ae1582b1fa429dfede28015125fd233a31ca04d5012002015cc42669a55467\ - a1fdf49750772bfc1aed59b9b085558eb81510e9b015a7c83b0301e3cf4a34b1db6bfa58808b686cb8fe21ebe0c\ - 1bcbcee522649d2b135fe510fe3") - }); - - // Extract the EE `(StoredValue, Vec>)` from the result. - pub(super) fn extract_query_result( - query_result: Result, - ) -> Result<(StoredValue, Vec), (ErrorCode, String)> { - let (value, proof) = match query_result { - Ok(QueryResult::Success { value, proofs }) => (value, proofs), - Ok(query_result) => { - let error_msg = format!("state query failed: {:?}", query_result); - return Err((ErrorCode::QueryFailed, error_msg)); - } - Err(error) => { - let error_msg = format!("state query failed to execute: {:?}", error); - return Err((ErrorCode::QueryFailedToExecute, error_msg)); - } - }; - - let value_compat = match StoredValue::try_from(&*value) { - Ok(value_compat) => value_compat, - Err(error) => { - let error_msg = format!("failed to encode stored value: {:?}", error); - return Err((ErrorCode::QueryFailed, error_msg)); - } - }; - - let proof_bytes = match proof.to_bytes() { - Ok(proof_bytes) => proof_bytes, - Err(error) => { - let error_msg = format!("failed to encode stored value: {:?}", error); - return Err((ErrorCode::QueryFailed, error_msg)); - } - }; - - Ok((value_compat, proof_bytes)) - } -} diff --git a/node/src/components/rpc_server/rpcs/account.rs b/node/src/components/rpc_server/rpcs/account.rs deleted file mode 100644 index 42e162f100..0000000000 --- a/node/src/components/rpc_server/rpcs/account.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! RPCs related to accounts. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::str; - -use futures::{future::BoxFuture, FutureExt}; -use http::Response; -use hyper::Body; -use once_cell::sync::Lazy; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use tracing::info; -use warp_json_rpc::Builder; - -use casper_types::ProtocolVersion; - -use super::{ - docs::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, - Error, ReactorEventT, RpcRequest, RpcWithParams, RpcWithParamsExt, -}; -use crate::{ - components::rpc_server::rpcs::ErrorCode, - effect::EffectBuilder, - reactor::QueueKind, - types::{Deploy, DeployHash}, -}; - -static PUT_DEPLOY_PARAMS: Lazy = Lazy::new(|| PutDeployParams { - deploy: Deploy::doc_example().clone(), -}); -static PUT_DEPLOY_RESULT: Lazy = Lazy::new(|| PutDeployResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - deploy_hash: *Deploy::doc_example().id(), -}); - -/// Params for "account_put_deploy" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct PutDeployParams { - /// The `Deploy`. - pub deploy: Deploy, -} - -impl DocExample for PutDeployParams { - fn doc_example() -> &'static Self { - &*PUT_DEPLOY_PARAMS - } -} - -/// Result for "account_put_deploy" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct PutDeployResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The deploy hash. - pub deploy_hash: DeployHash, -} - -impl DocExample for PutDeployResult { - fn doc_example() -> &'static Self { - &*PUT_DEPLOY_RESULT - } -} - -/// "account_put_deploy" RPC -pub struct PutDeploy {} - -impl RpcWithParams for PutDeploy { - const METHOD: &'static str = "account_put_deploy"; - type RequestParams = PutDeployParams; - type ResponseResult = PutDeployResult; -} - -impl RpcWithParamsExt for PutDeploy { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - params: Self::RequestParams, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - let deploy_hash = *params.deploy.id(); - - // Submit the new deploy to be announced. - let put_deploy_result = effect_builder - .make_request( - |responder| RpcRequest::SubmitDeploy { - deploy: Box::new(params.deploy), - responder, - }, - QueueKind::Api, - ) - .await; - - match put_deploy_result { - Ok(_) => { - info!(%deploy_hash, - "deploy was stored" - ); - let result = Self::ResponseResult { - api_version, - deploy_hash, - }; - Ok(response_builder.success(result)?) - } - Err(error) => { - info!( - %deploy_hash, - %error, - "the deploy submitted by the client was invalid", - ); - Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::InvalidDeploy as i64, - error.to_string(), - ))?) - } - } - } - .boxed() - } -} diff --git a/node/src/components/rpc_server/rpcs/chain.rs b/node/src/components/rpc_server/rpcs/chain.rs deleted file mode 100644 index 0d6dae1ae8..0000000000 --- a/node/src/components/rpc_server/rpcs/chain.rs +++ /dev/null @@ -1,477 +0,0 @@ -//! RPCs related to the block chain. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -mod era_summary; - -use std::str; - -use futures::{future::BoxFuture, FutureExt}; -use http::Response; -use hyper::Body; -use once_cell::sync::Lazy; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use tracing::info; -use warp_json_rpc::Builder; - -use casper_types::{Key, ProtocolVersion, Transfer}; - -use super::{ - docs::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, - Error, ErrorCode, ReactorEventT, RpcRequest, RpcWithOptionalParams, RpcWithOptionalParamsExt, -}; -use crate::{ - crypto::hash::Digest, - effect::EffectBuilder, - reactor::QueueKind, - rpcs::common::{self}, - types::{Block, BlockHash, BlockSignatures, Item, JsonBlock}, -}; -pub use era_summary::EraSummary; -use era_summary::ERA_SUMMARY; - -static GET_BLOCK_PARAMS: Lazy = Lazy::new(|| GetBlockParams { - block_identifier: BlockIdentifier::Hash(Block::doc_example().id()), -}); -static GET_BLOCK_RESULT: Lazy = Lazy::new(|| GetBlockResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - block: Some(JsonBlock::doc_example().clone()), -}); -static GET_BLOCK_TRANSFERS_PARAMS: Lazy = - Lazy::new(|| GetBlockTransfersParams { - block_identifier: BlockIdentifier::Hash(Block::doc_example().id()), - }); -static GET_BLOCK_TRANSFERS_RESULT: Lazy = - Lazy::new(|| GetBlockTransfersResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - block_hash: Some(Block::doc_example().id()), - transfers: Some(vec![Transfer::default()]), - }); -static GET_STATE_ROOT_HASH_PARAMS: Lazy = - Lazy::new(|| GetStateRootHashParams { - block_identifier: BlockIdentifier::Height(Block::doc_example().header().height()), - }); -static GET_STATE_ROOT_HASH_RESULT: Lazy = - Lazy::new(|| GetStateRootHashResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - state_root_hash: Some(*Block::doc_example().header().state_root_hash()), - }); -static GET_ERA_INFO_PARAMS: Lazy = Lazy::new(|| GetEraInfoParams { - block_identifier: BlockIdentifier::Hash(Block::doc_example().id()), -}); -static GET_ERA_INFO_RESULT: Lazy = Lazy::new(|| GetEraInfoResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - era_summary: Some(ERA_SUMMARY.clone()), -}); - -/// Identifier for possible ways to retrieve a block. -#[derive(Serialize, Deserialize, Debug, Clone, Copy, JsonSchema)] -#[serde(deny_unknown_fields)] -pub enum BlockIdentifier { - /// Identify and retrieve the block with its hash. - Hash(BlockHash), - /// Identify and retrieve the block with its height. - Height(u64), -} - -/// Params for "chain_get_block" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetBlockParams { - /// The block hash. - pub block_identifier: BlockIdentifier, -} - -impl DocExample for GetBlockParams { - fn doc_example() -> &'static Self { - &*GET_BLOCK_PARAMS - } -} - -/// Result for "chain_get_block" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetBlockResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The block, if found. - pub block: Option, -} - -impl DocExample for GetBlockResult { - fn doc_example() -> &'static Self { - &*GET_BLOCK_RESULT - } -} - -/// "chain_get_block" RPC. -pub struct GetBlock {} - -impl RpcWithOptionalParams for GetBlock { - const METHOD: &'static str = "chain_get_block"; - type OptionalRequestParams = GetBlockParams; - type ResponseResult = GetBlockResult; -} - -impl RpcWithOptionalParamsExt for GetBlock { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - maybe_params: Option, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Get the block. - let maybe_block_id = maybe_params.map(|params| params.block_identifier); - let (block, signatures) = - match get_block_with_metadata(maybe_block_id, effect_builder).await { - Ok(Some((block, signatures))) => (block, signatures), - Ok(None) => { - let error = warp_json_rpc::Error::custom( - ErrorCode::NoSuchBlock as i64, - "block not known", - ); - return Ok(response_builder.error(error)?); - } - Err(error) => return Ok(response_builder.error(error)?), - }; - - let json_block = JsonBlock::new(block, signatures); - - // Return the result. - let result = Self::ResponseResult { - api_version, - block: Some(json_block), - }; - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// Params for "chain_get_block_transfers" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetBlockTransfersParams { - /// The block hash. - pub block_identifier: BlockIdentifier, -} - -impl DocExample for GetBlockTransfersParams { - fn doc_example() -> &'static Self { - &*GET_BLOCK_TRANSFERS_PARAMS - } -} - -/// Result for "chain_get_block_transfers" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetBlockTransfersResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The block hash, if found. - pub block_hash: Option, - /// The block's transfers, if found. - pub transfers: Option>, -} - -impl GetBlockTransfersResult { - /// Create an instance of GetBlockTransfersResult. - pub fn new( - api_version: ProtocolVersion, - block_hash: Option, - transfers: Option>, - ) -> Self { - GetBlockTransfersResult { - api_version, - block_hash, - transfers, - } - } -} - -impl DocExample for GetBlockTransfersResult { - fn doc_example() -> &'static Self { - &*GET_BLOCK_TRANSFERS_RESULT - } -} - -/// "chain_get_block_transfers" RPC. -pub struct GetBlockTransfers {} - -impl RpcWithOptionalParams for GetBlockTransfers { - const METHOD: &'static str = "chain_get_block_transfers"; - type OptionalRequestParams = GetBlockTransfersParams; - type ResponseResult = GetBlockTransfersResult; -} - -impl RpcWithOptionalParamsExt for GetBlockTransfers { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - maybe_params: Option, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Get the block. - let maybe_block_id = maybe_params.map(|params| params.block_identifier); - let block_hash = match get_block(maybe_block_id, effect_builder).await { - Ok(Some(block)) => *block.hash(), - Ok(None) => { - return Ok(response_builder.success(Self::ResponseResult::new( - api_version, - None, - None, - ))?) - } - Err(error) => return Ok(response_builder.error(error)?), - }; - - let transfers = effect_builder - .make_request( - |responder| RpcRequest::GetBlockTransfers { - block_hash, - responder, - }, - QueueKind::Api, - ) - .await; - - // Return the result. - let result = Self::ResponseResult::new(api_version, Some(block_hash), transfers); - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// Params for "chain_get_state_root_hash" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetStateRootHashParams { - /// The block hash. - pub block_identifier: BlockIdentifier, -} - -impl DocExample for GetStateRootHashParams { - fn doc_example() -> &'static Self { - &*GET_STATE_ROOT_HASH_PARAMS - } -} - -/// Result for "chain_get_state_root_hash" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetStateRootHashResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// Hex-encoded hash of the state root. - pub state_root_hash: Option, -} - -impl DocExample for GetStateRootHashResult { - fn doc_example() -> &'static Self { - &*GET_STATE_ROOT_HASH_RESULT - } -} - -/// "chain_get_state_root_hash" RPC. -pub struct GetStateRootHash {} - -impl RpcWithOptionalParams for GetStateRootHash { - const METHOD: &'static str = "chain_get_state_root_hash"; - type OptionalRequestParams = GetStateRootHashParams; - type ResponseResult = GetStateRootHashResult; -} - -impl RpcWithOptionalParamsExt for GetStateRootHash { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - maybe_params: Option, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Get the block. - let maybe_block_id = maybe_params.map(|params| params.block_identifier); - let maybe_block = match get_block(maybe_block_id, effect_builder).await { - Ok(maybe_block) => maybe_block, - Err(error) => return Ok(response_builder.error(error)?), - }; - - // Return the result. - let result = Self::ResponseResult { - api_version, - state_root_hash: maybe_block.map(|block| *block.state_root_hash()), - }; - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// Params for "chain_get_era_info" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetEraInfoParams { - /// The block identifier. - pub block_identifier: BlockIdentifier, -} - -impl DocExample for GetEraInfoParams { - fn doc_example() -> &'static Self { - &*GET_ERA_INFO_PARAMS - } -} - -/// Result for "chain_get_era_info" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetEraInfoResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The era summary. - pub era_summary: Option, -} - -impl DocExample for GetEraInfoResult { - fn doc_example() -> &'static Self { - &*GET_ERA_INFO_RESULT - } -} - -/// "chain_get_era_info_by_switch_block" RPC -pub struct GetEraInfoBySwitchBlock {} - -impl RpcWithOptionalParams for GetEraInfoBySwitchBlock { - const METHOD: &'static str = "chain_get_era_info_by_switch_block"; - type OptionalRequestParams = GetEraInfoParams; - type ResponseResult = GetEraInfoResult; -} - -impl RpcWithOptionalParamsExt for GetEraInfoBySwitchBlock { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - maybe_params: Option, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // TODO: decide if/how to handle era id - let maybe_block_id = maybe_params.map(|params| params.block_identifier); - let maybe_block = match get_block(maybe_block_id, effect_builder).await { - Ok(maybe_block) => maybe_block, - Err(error) => return Ok(response_builder.error(error)?), - }; - - let block = match maybe_block { - Some(block) => block, - None => { - return Ok(response_builder.success(Self::ResponseResult { - api_version, - era_summary: None, - })?) - } - }; - - let era_id = match block.header().era_end() { - Some(_) => block.header().era_id(), - None => { - return Ok(response_builder.success(Self::ResponseResult { - api_version, - era_summary: None, - })?) - } - }; - - let state_root_hash = block.state_root_hash().to_owned(); - let base_key = Key::EraInfo(era_id); - let path = Vec::new(); - let query_result = effect_builder - .make_request( - |responder| RpcRequest::QueryGlobalState { - state_root_hash, - base_key, - path, - responder, - }, - QueueKind::Api, - ) - .await; - - let (stored_value, proof_bytes) = match common::extract_query_result(query_result) { - Ok(tuple) => tuple, - Err((error_code, error_msg)) => { - info!("{}", error_msg); - return Ok(response_builder - .error(warp_json_rpc::Error::custom(error_code as i64, error_msg))?); - } - }; - - let block_hash = block.hash().to_owned(); - - let result = Self::ResponseResult { - api_version, - era_summary: Some(EraSummary { - block_hash, - era_id, - stored_value, - state_root_hash, - merkle_proof: hex::encode(proof_bytes), - }), - }; - - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -async fn get_block( - maybe_id: Option, - effect_builder: EffectBuilder, -) -> Result, warp_json_rpc::Error> { - match get_block_with_metadata(maybe_id, effect_builder).await { - Ok(Some((block, _))) => Ok(Some(block)), - Ok(None) => { - return Err(warp_json_rpc::Error::custom( - ErrorCode::NoSuchBlock as i64, - "block not known", - )) - } - Err(error) => Err(error), - } -} - -async fn get_block_with_metadata( - maybe_id: Option, - effect_builder: EffectBuilder, -) -> Result, warp_json_rpc::Error> { - // Get the block from storage or the latest from the linear chain. - let getting_specific_block = maybe_id.is_some(); - let maybe_result = effect_builder - .make_request( - |responder| RpcRequest::GetBlock { - maybe_id, - responder, - }, - QueueKind::Api, - ) - .await; - - if maybe_result.is_none() && getting_specific_block { - info!("failed to get {:?} from storage", maybe_id.unwrap()); - return Err(warp_json_rpc::Error::custom( - ErrorCode::NoSuchBlock as i64, - "block not known", - )); - } - - Ok(maybe_result) -} diff --git a/node/src/components/rpc_server/rpcs/chain/era_summary.rs b/node/src/components/rpc_server/rpcs/chain/era_summary.rs deleted file mode 100644 index 1600f948db..0000000000 --- a/node/src/components/rpc_server/rpcs/chain/era_summary.rs +++ /dev/null @@ -1,35 +0,0 @@ -use once_cell::sync::Lazy; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use casper_types::{system::auction::EraInfo, EraId}; - -use crate::{ - crypto::hash::Digest, - rpcs::{common::MERKLE_PROOF, docs::DocExample}, - types::{json_compatibility::StoredValue, Block, BlockHash, Item}, -}; - -pub(super) static ERA_SUMMARY: Lazy = Lazy::new(|| EraSummary { - block_hash: Block::doc_example().id(), - era_id: EraId::from(42), - stored_value: StoredValue::EraInfo(EraInfo::new()), - state_root_hash: *Block::doc_example().header().state_root_hash(), - merkle_proof: MERKLE_PROOF.clone(), -}); - -/// The summary of an era -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct EraSummary { - /// The block hash - pub block_hash: BlockHash, - /// The era id - pub era_id: EraId, - /// The StoredValue containing era information - pub stored_value: StoredValue, - /// Hex-encoded hash of the state root - pub state_root_hash: Digest, - /// The merkle proof - pub merkle_proof: String, -} diff --git a/node/src/components/rpc_server/rpcs/docs.rs b/node/src/components/rpc_server/rpcs/docs.rs deleted file mode 100644 index 89287a67ea..0000000000 --- a/node/src/components/rpc_server/rpcs/docs.rs +++ /dev/null @@ -1,429 +0,0 @@ -//! RPCs related to finding information about currently supported RPCs. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use futures::{future::BoxFuture, FutureExt}; -use http::Response; -use hyper::Body; -use once_cell::sync::Lazy; -use schemars::{ - gen::{SchemaGenerator, SchemaSettings}, - schema::Schema, - JsonSchema, Map, MapEntry, -}; -use serde::{Deserialize, Serialize}; -use serde_json::{json, Value}; -use warp_json_rpc::Builder; - -use casper_types::ProtocolVersion; - -use super::{ - account::PutDeploy, - chain::{GetBlock, GetBlockTransfers, GetStateRootHash}, - info::{GetDeploy, GetPeers, GetStatus}, - state::{GetAuctionInfo, GetBalance, GetItem}, - Error, ReactorEventT, RpcWithOptionalParams, RpcWithParams, RpcWithoutParams, - RpcWithoutParamsExt, -}; -use crate::{effect::EffectBuilder, rpcs::chain::GetEraInfoBySwitchBlock}; - -pub(crate) const DOCS_EXAMPLE_PROTOCOL_VERSION: ProtocolVersion = - ProtocolVersion::from_parts(1, 0, 0); - -const DEFINITIONS_PATH: &str = "#/components/schemas/"; - -// As per https://spec.open-rpc.org/#service-discovery-method. -static OPEN_RPC_SCHEMA: Lazy = Lazy::new(|| { - let contact = OpenRpcContactField { - name: "CasperLabs".to_string(), - url: "https://casperlabs.io".to_string(), - }; - let license = OpenRpcLicenseField { - name: "CasperLabs Open Source License Version 1.0".to_string(), - url: "https://raw.githubusercontent.com/CasperLabs/casper-node/master/LICENSE".to_string(), - }; - let info = OpenRpcInfoField { - version: DOCS_EXAMPLE_PROTOCOL_VERSION.to_string(), - title: "Client API of Casper Node".to_string(), - description: "This describes the JSON-RPC 2.0 API of a node on the Casper network." - .to_string(), - contact, - license, - }; - - let server = OpenRpcServerEntry { - name: "any Casper Network node".to_string(), - url: "http://IP:PORT/rpc/".to_string(), - }; - - let mut schema = OpenRpcSchema { - openrpc: "1.0.0-rc1".to_string(), - info, - servers: vec![server], - methods: vec![], - components: Components { - schemas: Map::new(), - }, - }; - - schema.push_with_params::("receives a Deploy to be executed by the network"); - schema.push_with_params::("returns a Deploy from the network"); - schema.push_without_params::("returns a list of peers connected to the node"); - schema.push_without_params::("returns the current status of the node"); - schema.push_with_optional_params::("returns a Block from the network"); - schema.push_with_optional_params::( - "returns all transfers for a Block from the network", - ); - schema.push_with_optional_params::( - "returns a state root hash at a given Block", - ); - schema.push_with_params::("returns a stored value from the network"); - schema.push_with_params::("returns a purse's balance from the network"); - schema.push_with_optional_params::( - "returns an EraInfo from the network", - ); - schema.push_without_params::( - "returns the bids and validators as of the most recently added Block", - ); - - schema -}); -static LIST_RPCS_RESULT: Lazy = Lazy::new(|| ListRpcsResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - name: "OpenRPC Schema".to_string(), - schema: OPEN_RPC_SCHEMA.clone(), -}); - -/// A trait used to generate a static hardcoded example of `Self`. -pub trait DocExample { - /// Generates a hardcoded example of `Self`. - fn doc_example() -> &'static Self; -} - -/// The main schema for the casper node's RPC server, compliant with https://spec.open-rpc.org. -#[derive(Clone, Serialize, Deserialize, Debug)] -struct OpenRpcSchema { - openrpc: String, - info: OpenRpcInfoField, - servers: Vec, - methods: Vec, - components: Components, -} - -impl OpenRpcSchema { - fn new_generator() -> SchemaGenerator { - let settings = SchemaSettings::default().with(|settings| { - settings.definitions_path = DEFINITIONS_PATH.to_string(); - }); - settings.into_generator() - } - - fn push_with_params(&mut self, summary: &str) { - let mut generator = Self::new_generator(); - - let params_schema = T::RequestParams::json_schema(&mut generator); - let params = Self::make_params(params_schema); - - let result_schema = T::ResponseResult::json_schema(&mut generator); - let result = ResponseResult { - name: format!("{}_result", T::METHOD), - schema: result_schema, - }; - - let examples = vec![Example::from_rpc_with_params::()]; - - let method = Method { - name: T::METHOD.to_string(), - summary: summary.to_string(), - params, - result, - examples, - }; - - self.methods.push(method); - self.update_schemas::(); - self.update_schemas::(); - } - - fn push_without_params(&mut self, summary: &str) { - let mut generator = Self::new_generator(); - - let result_schema = T::ResponseResult::json_schema(&mut generator); - let result = ResponseResult { - name: format!("{}_result", T::METHOD), - schema: result_schema, - }; - - let examples = vec![Example::from_rpc_without_params::()]; - - let method = Method { - name: T::METHOD.to_string(), - summary: summary.to_string(), - params: vec![], - result, - examples, - }; - - self.methods.push(method); - self.update_schemas::(); - } - - fn push_with_optional_params(&mut self, summary: &str) { - let mut generator = Self::new_generator(); - - let params_schema = T::OptionalRequestParams::json_schema(&mut generator); - let params = Self::make_params(params_schema); - - let result_schema = T::ResponseResult::json_schema(&mut generator); - let result = ResponseResult { - name: format!("{}_result", T::METHOD), - schema: result_schema, - }; - - let examples = vec![Example::from_rpc_with_optional_params::()]; - - // TODO - handle adding a description that the params may be omitted if desired. - let method = Method { - name: T::METHOD.to_string(), - summary: summary.to_string(), - params, - result, - examples, - }; - - self.methods.push(method); - self.update_schemas::(); - self.update_schemas::(); - } - - /// Convert the schema for the params type for T into the OpenRpc-compatible map of name, value - /// pairs. - /// - /// As per the standard, the required params must be sorted before the optional ones. - fn make_params(schema: Schema) -> Vec { - let schema_object = schema.into_object().object.expect("should be object"); - let mut required_params = schema_object - .properties - .iter() - .filter(|(name, _)| schema_object.required.contains(*name)) - .map(|(name, schema)| SchemaParam { - name: name.clone(), - schema: schema.clone(), - required: true, - }) - .collect::>(); - let optional_params = schema_object - .properties - .iter() - .filter(|(name, _)| !schema_object.required.contains(*name)) - .map(|(name, schema)| SchemaParam { - name: name.clone(), - schema: schema.clone(), - required: false, - }) - .collect::>(); - required_params.extend(optional_params); - required_params - } - - /// Insert the new entries into the #/components/schemas/ map. Panic if we try to overwrite an - /// entry with a different value. - fn update_schemas(&mut self) { - let generator = Self::new_generator(); - let mut root_schema = generator.into_root_schema_for::(); - for (key, value) in root_schema.definitions.drain(..) { - match self.components.schemas.entry(key) { - MapEntry::Occupied(current_value) => { - assert_eq!( - current_value.get().clone().into_object().metadata, - value.into_object().metadata - ) - } - MapEntry::Vacant(vacant) => { - let _ = vacant.insert(value); - } - } - } - } -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct OpenRpcInfoField { - version: String, - title: String, - description: String, - contact: OpenRpcContactField, - license: OpenRpcLicenseField, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct OpenRpcContactField { - name: String, - url: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct OpenRpcLicenseField { - name: String, - url: String, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct OpenRpcServerEntry { - name: String, - url: String, -} - -/// The struct containing the documentation for the RPCs. -#[derive(Clone, Serialize, Deserialize, Debug)] -pub struct Method { - name: String, - summary: String, - params: Vec, - result: ResponseResult, - examples: Vec, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct SchemaParam { - name: String, - schema: Schema, - required: bool, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct ResponseResult { - name: String, - schema: Schema, -} - -/// An example pair of request params and response result. -#[derive(Clone, Serialize, Deserialize, Debug)] -pub struct Example { - name: String, - params: Vec, - result: ExampleResult, -} - -impl Example { - fn new(method_name: &str, maybe_params_obj: Option, result_value: Value) -> Self { - // Break the params struct into an array of param name and value pairs. - let params = match maybe_params_obj { - Some(params_obj) => params_obj - .as_object() - .unwrap() - .iter() - .map(|(name, value)| ExampleParam { - name: name.clone(), - value: value.clone(), - }) - .collect(), - None => vec![], - }; - - Example { - name: format!("{}_example", method_name), - params, - result: ExampleResult { - name: format!("{}_example_result", method_name), - value: result_value, - }, - } - } - - fn from_rpc_with_params() -> Self { - Self::new( - T::METHOD, - Some(json!(T::RequestParams::doc_example())), - json!(T::ResponseResult::doc_example()), - ) - } - - fn from_rpc_without_params() -> Self { - Self::new(T::METHOD, None, json!(T::ResponseResult::doc_example())) - } - - fn from_rpc_with_optional_params() -> Self { - Self::new( - T::METHOD, - Some(json!(T::OptionalRequestParams::doc_example())), - json!(T::ResponseResult::doc_example()), - ) - } -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct ExampleParam { - name: String, - value: Value, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct ExampleResult { - name: String, - value: Value, -} - -#[derive(Clone, Serialize, Deserialize, Debug)] -struct Components { - schemas: Map, -} - -/// Result for "rpc.discover" RPC response. -// -// Fields named as per https://spec.open-rpc.org/#service-discovery-method. -#[derive(Clone, Serialize, Deserialize, JsonSchema, Debug)] -#[serde(deny_unknown_fields)] -pub struct ListRpcsResult { - /// The RPC API version. - #[schemars(with = "String")] - api_version: ProtocolVersion, - name: String, - /// The list of supported RPCs. - #[schemars(skip)] - schema: OpenRpcSchema, -} - -impl DocExample for ListRpcsResult { - fn doc_example() -> &'static Self { - &*LIST_RPCS_RESULT - } -} - -/// "rpc.discover" RPC. -#[derive(Clone, Serialize, Deserialize, Debug)] -pub struct ListRpcs {} - -impl RpcWithoutParams for ListRpcs { - // Named as per https://spec.open-rpc.org/#service-discovery-method. - const METHOD: &'static str = "rpc.discover"; - type ResponseResult = ListRpcsResult; -} - -impl RpcWithoutParamsExt for ListRpcs { - fn handle_request( - _effect_builder: EffectBuilder, - response_builder: Builder, - _api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { Ok(response_builder.success(ListRpcsResult::doc_example().clone())?) }.boxed() - } -} - -#[cfg(test)] -mod tests { - use crate::{types::Chainspec, utils::Loadable}; - - use super::*; - - #[test] - fn check_docs_example_version() { - let chainspec = Chainspec::from_resources("production"); - assert_eq!( - DOCS_EXAMPLE_PROTOCOL_VERSION, chainspec.protocol_config.version, - "DOCS_EXAMPLE_VERSION needs to be updated to match the [protocol.version] in \ - 'resources/production/chainspec.toml'" - ); - } -} diff --git a/node/src/components/rpc_server/rpcs/info.rs b/node/src/components/rpc_server/rpcs/info.rs deleted file mode 100644 index 7ba3925e90..0000000000 --- a/node/src/components/rpc_server/rpcs/info.rs +++ /dev/null @@ -1,227 +0,0 @@ -//! RPCs returning ancillary information. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::str; - -use futures::{future::BoxFuture, FutureExt}; -use http::Response; -use hyper::Body; -use once_cell::sync::Lazy; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use tracing::info; -use warp_json_rpc::Builder; - -use casper_types::{ExecutionResult, ProtocolVersion}; - -use super::{ - docs::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, - Error, ErrorCode, ReactorEventT, RpcRequest, RpcWithParams, RpcWithParamsExt, RpcWithoutParams, - RpcWithoutParamsExt, -}; -use crate::{ - effect::EffectBuilder, - reactor::QueueKind, - types::{Block, BlockHash, Deploy, DeployHash, GetStatusResult, Item, PeersMap}, -}; - -static GET_DEPLOY_PARAMS: Lazy = Lazy::new(|| GetDeployParams { - deploy_hash: *Deploy::doc_example().id(), -}); -static GET_DEPLOY_RESULT: Lazy = Lazy::new(|| GetDeployResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - deploy: Deploy::doc_example().clone(), - execution_results: vec![JsonExecutionResult { - block_hash: Block::doc_example().id(), - result: ExecutionResult::example().clone(), - }], -}); -static GET_PEERS_RESULT: Lazy = Lazy::new(|| GetPeersResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - peers: GetStatusResult::doc_example().peers.clone(), -}); - -/// Params for "info_get_deploy" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetDeployParams { - /// The deploy hash. - pub deploy_hash: DeployHash, -} - -impl DocExample for GetDeployParams { - fn doc_example() -> &'static Self { - &*GET_DEPLOY_PARAMS - } -} - -/// The execution result of a single deploy. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct JsonExecutionResult { - /// The block hash. - pub block_hash: BlockHash, - /// Execution result. - pub result: ExecutionResult, -} - -/// Result for "info_get_deploy" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetDeployResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The deploy. - pub deploy: Deploy, - /// The map of block hash to execution result. - pub execution_results: Vec, -} - -impl DocExample for GetDeployResult { - fn doc_example() -> &'static Self { - &*GET_DEPLOY_RESULT - } -} - -/// "info_get_deploy" RPC. -pub struct GetDeploy {} - -impl RpcWithParams for GetDeploy { - const METHOD: &'static str = "info_get_deploy"; - type RequestParams = GetDeployParams; - type ResponseResult = GetDeployResult; -} - -impl RpcWithParamsExt for GetDeploy { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - params: Self::RequestParams, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Try to get the deploy and metadata from storage. - let maybe_deploy_and_metadata = effect_builder - .make_request( - |responder| RpcRequest::GetDeploy { - hash: params.deploy_hash, - responder, - }, - QueueKind::Api, - ) - .await; - - let (deploy, metadata) = match maybe_deploy_and_metadata { - Some((deploy, metadata)) => (deploy, metadata), - None => { - info!( - "failed to get {} and metadata from storage", - params.deploy_hash - ); - return Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::NoSuchDeploy as i64, - "deploy not known", - ))?); - } - }; - - // Return the result. - let execution_results = metadata - .execution_results - .into_iter() - .map(|(block_hash, result)| JsonExecutionResult { block_hash, result }) - .collect(); - - let result = Self::ResponseResult { - api_version, - deploy, - execution_results, - }; - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// Result for "info_get_peers" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetPeersResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The node ID and network address of each connected peer. - pub peers: PeersMap, -} - -impl DocExample for GetPeersResult { - fn doc_example() -> &'static Self { - &*GET_PEERS_RESULT - } -} - -/// "info_get_peers" RPC. -pub struct GetPeers {} - -impl RpcWithoutParams for GetPeers { - const METHOD: &'static str = "info_get_peers"; - type ResponseResult = GetPeersResult; -} - -impl RpcWithoutParamsExt for GetPeers { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - let peers = effect_builder - .make_request( - |responder| RpcRequest::GetPeers { responder }, - QueueKind::Api, - ) - .await; - - let result = Self::ResponseResult { - api_version, - peers: PeersMap::from(peers), - }; - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// "info_get_status" RPC. -pub struct GetStatus {} - -impl RpcWithoutParams for GetStatus { - const METHOD: &'static str = "info_get_status"; - type ResponseResult = GetStatusResult; -} - -impl RpcWithoutParamsExt for GetStatus { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Get the status. - let status_feed = effect_builder - .make_request( - |responder| RpcRequest::GetStatus { responder }, - QueueKind::Api, - ) - .await; - - // Convert to `ResponseResult` and send. - let body = Self::ResponseResult::new(status_feed, api_version); - Ok(response_builder.success(body)?) - } - .boxed() - } -} diff --git a/node/src/components/rpc_server/rpcs/state.rs b/node/src/components/rpc_server/rpcs/state.rs deleted file mode 100644 index d9c5c6636c..0000000000 --- a/node/src/components/rpc_server/rpcs/state.rs +++ /dev/null @@ -1,390 +0,0 @@ -//! RPCs related to the state. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::str; - -use futures::{future::BoxFuture, FutureExt}; -use http::Response; -use hyper::Body; -use once_cell::sync::Lazy; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use tracing::info; -use warp_json_rpc::Builder; - -use casper_execution_engine::core::engine_state::{BalanceResult, GetBidsResult}; -use casper_types::{bytesrepr::ToBytes, CLValue, Key, ProtocolVersion, URef, U512}; - -use super::{ - docs::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, - Error, ErrorCode, ReactorEventT, RpcRequest, RpcWithParams, RpcWithParamsExt, -}; -use crate::{ - crypto::hash::Digest, - effect::EffectBuilder, - reactor::QueueKind, - rpcs::{ - common::{self, MERKLE_PROOF}, - RpcWithoutParams, RpcWithoutParamsExt, - }, - types::{ - json_compatibility::{AuctionState, StoredValue}, - Block, - }, -}; - -static GET_ITEM_PARAMS: Lazy = Lazy::new(|| GetItemParams { - state_root_hash: *Block::doc_example().header().state_root_hash(), - key: "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1".to_string(), - path: vec!["inner".to_string()], -}); -static GET_ITEM_RESULT: Lazy = Lazy::new(|| GetItemResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - stored_value: StoredValue::CLValue(CLValue::from_t(1u64).unwrap()), - merkle_proof: MERKLE_PROOF.clone(), -}); -static GET_BALANCE_PARAMS: Lazy = Lazy::new(|| GetBalanceParams { - state_root_hash: *Block::doc_example().header().state_root_hash(), - purse_uref: "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007" - .to_string(), -}); -static GET_BALANCE_RESULT: Lazy = Lazy::new(|| GetBalanceResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - balance_value: U512::from(123_456), - merkle_proof: MERKLE_PROOF.clone(), -}); -static GET_AUCTION_INFO_RESULT: Lazy = Lazy::new(|| GetAuctionInfoResult { - api_version: DOCS_EXAMPLE_PROTOCOL_VERSION, - auction_state: AuctionState::doc_example().clone(), -}); - -/// Params for "state_get_item" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetItemParams { - /// Hash of the state root. - pub state_root_hash: Digest, - /// `casper_types::Key` as formatted string. - pub key: String, - /// The path components starting from the key as base. - #[serde(default)] - pub path: Vec, -} - -impl DocExample for GetItemParams { - fn doc_example() -> &'static Self { - &*GET_ITEM_PARAMS - } -} - -/// Result for "state_get_item" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetItemResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The stored value. - pub stored_value: StoredValue, - /// The merkle proof. - pub merkle_proof: String, -} - -impl DocExample for GetItemResult { - fn doc_example() -> &'static Self { - &*GET_ITEM_RESULT - } -} - -/// "state_get_item" RPC. -pub struct GetItem {} - -impl RpcWithParams for GetItem { - const METHOD: &'static str = "state_get_item"; - type RequestParams = GetItemParams; - type ResponseResult = GetItemResult; -} - -impl RpcWithParamsExt for GetItem { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - params: Self::RequestParams, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Try to parse a `casper_types::Key` from the params. - let base_key = match Key::from_formatted_str(¶ms.key) - .map_err(|error| format!("failed to parse key: {:?}", error)) - { - Ok(key) => key, - Err(error_msg) => { - info!("{}", error_msg); - return Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::ParseQueryKey as i64, - error_msg, - ))?); - } - }; - - // Run the query. - let query_result = effect_builder - .make_request( - |responder| RpcRequest::QueryGlobalState { - state_root_hash: params.state_root_hash, - base_key, - path: params.path, - responder, - }, - QueueKind::Api, - ) - .await; - - let (stored_value, proof_bytes) = match common::extract_query_result(query_result) { - Ok(tuple) => tuple, - Err((error_code, error_msg)) => { - info!("{}", error_msg); - return Ok(response_builder - .error(warp_json_rpc::Error::custom(error_code as i64, error_msg))?); - } - }; - - let result = Self::ResponseResult { - api_version, - stored_value, - merkle_proof: hex::encode(proof_bytes), - }; - - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// Params for "state_get_balance" RPC request. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetBalanceParams { - /// The hash of state root. - pub state_root_hash: Digest, - /// Formatted URef. - pub purse_uref: String, -} - -impl DocExample for GetBalanceParams { - fn doc_example() -> &'static Self { - &*GET_BALANCE_PARAMS - } -} - -/// Result for "state_get_balance" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetBalanceResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The balance value. - pub balance_value: U512, - /// The merkle proof. - pub merkle_proof: String, -} - -impl DocExample for GetBalanceResult { - fn doc_example() -> &'static Self { - &*GET_BALANCE_RESULT - } -} - -/// "state_get_balance" RPC. -pub struct GetBalance {} - -impl RpcWithParams for GetBalance { - const METHOD: &'static str = "state_get_balance"; - type RequestParams = GetBalanceParams; - type ResponseResult = GetBalanceResult; -} - -impl RpcWithParamsExt for GetBalance { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - params: Self::RequestParams, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - // Try to parse the purse's URef from the params. - let purse_uref = match URef::from_formatted_str(¶ms.purse_uref) - .map_err(|error| format!("failed to parse purse_uref: {:?}", error)) - { - Ok(uref) => uref, - Err(error_msg) => { - info!("{}", error_msg); - return Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::ParseGetBalanceURef as i64, - error_msg, - ))?); - } - }; - - // Get the balance. - let balance_result = effect_builder - .make_request( - |responder| RpcRequest::GetBalance { - state_root_hash: params.state_root_hash, - purse_uref, - responder, - }, - QueueKind::Api, - ) - .await; - - let (balance_value, balance_proof) = match balance_result { - Ok(BalanceResult::Success { motes, proof }) => (motes, proof), - Ok(balance_result) => { - let error_msg = format!("get-balance failed: {:?}", balance_result); - info!("{}", error_msg); - return Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::GetBalanceFailed as i64, - error_msg, - ))?); - } - Err(error) => { - let error_msg = format!("get-balance failed to execute: {}", error); - info!("{}", error_msg); - return Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::GetBalanceFailedToExecute as i64, - error_msg, - ))?); - } - }; - - let proof_bytes = match balance_proof.to_bytes() { - Ok(proof_bytes) => proof_bytes, - Err(error) => { - info!("failed to encode stored value: {}", error); - return Ok(response_builder.error(warp_json_rpc::Error::INTERNAL_ERROR)?); - } - }; - - let merkle_proof = hex::encode(proof_bytes); - - // Return the result. - let result = Self::ResponseResult { - api_version, - balance_value, - merkle_proof, - }; - Ok(response_builder.success(result)?) - } - .boxed() - } -} - -/// Result for "state_get_auction_info" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct GetAuctionInfoResult { - /// The RPC API version. - #[schemars(with = "String")] - pub api_version: ProtocolVersion, - /// The auction state. - pub auction_state: AuctionState, -} - -impl DocExample for GetAuctionInfoResult { - fn doc_example() -> &'static Self { - &*GET_AUCTION_INFO_RESULT - } -} - -/// "state_get_auction_info" RPC. -pub struct GetAuctionInfo {} - -impl RpcWithoutParams for GetAuctionInfo { - const METHOD: &'static str = "state_get_auction_info"; - type ResponseResult = GetAuctionInfoResult; -} - -impl RpcWithoutParamsExt for GetAuctionInfo { - fn handle_request( - effect_builder: EffectBuilder, - response_builder: Builder, - api_version: ProtocolVersion, - ) -> BoxFuture<'static, Result, Error>> { - async move { - let block: Block = { - let maybe_block = effect_builder - .make_request( - |responder| RpcRequest::GetBlock { - maybe_id: None, - responder, - }, - QueueKind::Api, - ) - .await; - - match maybe_block { - None => { - let error_msg = - "get-auction-info failed to get last added block".to_string(); - info!("{}", error_msg); - return Ok(response_builder.error(warp_json_rpc::Error::custom( - ErrorCode::NoSuchBlock as i64, - error_msg, - ))?); - } - Some((block, _)) => block, - } - }; - - let protocol_version = api_version; - - // the global state hash of the last block - let state_root_hash = *block.header().state_root_hash(); - // the block height of the last added block - let block_height = block.header().height(); - - let get_bids_result = effect_builder - .make_request( - |responder| RpcRequest::GetBids { - state_root_hash, - responder, - }, - QueueKind::Api, - ) - .await; - - let maybe_bids = if let Ok(GetBidsResult::Success { bids, .. }) = get_bids_result { - Some(bids) - } else { - None - }; - - let era_validators_result = effect_builder - .make_request( - |responder| RpcRequest::QueryEraValidators { - state_root_hash, - protocol_version, - responder, - }, - QueueKind::Api, - ) - .await; - - let era_validators = era_validators_result.ok(); - - let auction_state = - AuctionState::new(state_root_hash, block_height, era_validators, maybe_bids); - - let result = Self::ResponseResult { - api_version, - auction_state, - }; - Ok(response_builder.success(result)?) - } - .boxed() - } -} diff --git a/node/src/components/shutdown_trigger.rs b/node/src/components/shutdown_trigger.rs new file mode 100644 index 0000000000..81e03eca8c --- /dev/null +++ b/node/src/components/shutdown_trigger.rs @@ -0,0 +1,182 @@ +//! Shutdown trigger control. +//! +//! A component that can be primed with a [`StopAtSpec`] and will monitor the node, until it +//! detects a specific spec has been triggered. If so, it instructs the system to shut down through +//! a [`ControlAnnouncement`]. + +use std::{fmt::Display, mem}; + +use datasize::DataSize; +use derive_more::From; +use serde::Serialize; +use tracing::{info, trace}; + +use casper_types::EraId; + +use crate::{ + effect::{ + announcements::ControlAnnouncement, requests::SetNodeStopRequest, EffectBuilder, EffectExt, + Effects, + }, + types::NodeRng, +}; + +use super::{diagnostics_port::StopAtSpec, Component}; + +#[derive(DataSize, Debug, Serialize)] +pub(crate) struct CompletedBlockInfo { + height: u64, + era: EraId, + is_switch_block: bool, +} + +impl CompletedBlockInfo { + pub(crate) fn new(height: u64, era: EraId, is_switch_block: bool) -> Self { + Self { + height, + era, + is_switch_block, + } + } +} + +/// The shutdown trigger component's event. +#[derive(DataSize, Debug, From, Serialize)] +pub(crate) enum Event { + /// An announcement that a block has been completed. + CompletedBlock(CompletedBlockInfo), + /// A request to trigger a shutdown. + #[from] + SetNodeStopRequest(SetNodeStopRequest), +} + +impl Display for Event { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Event::CompletedBlock(block_info) => { + write!( + f, + "completed block: height {}, era {}, switch_block {}", + block_info.height, block_info.era, block_info.is_switch_block + ) + } + Event::SetNodeStopRequest(inner) => { + write!(f, "set node stop request: {}", inner) + } + } + } +} + +const COMPONENT_NAME: &str = "shutdown_trigger"; + +/// Shutdown trigger component. +#[derive(DataSize, Debug)] +pub(crate) struct ShutdownTrigger { + /// The currently active spec for shutdown triggers. + active_spec: Option, + /// The highest block height seen, if any. + /// + /// Constantly kept up to date, so that requests for shutting down on `block:next` can be + /// answered without additional requests. + highest_block_height_seen: Option, +} + +impl ShutdownTrigger { + /// Creates a new instance of the shutdown trigger component. + pub(crate) fn new() -> Self { + Self { + active_spec: None, + highest_block_height_seen: None, + } + } +} + +impl Component for ShutdownTrigger +where + REv: Send + From, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::CompletedBlock(block_info) => { + // We ignore every block that is older than one we already possess. + let prev_height = self.highest_block_height_seen.unwrap_or_default(); + if block_info.height > prev_height { + self.highest_block_height_seen = Some(block_info.height); + } + + // Once the updating is done, check if we need to emit shutdown announcements. + let active_spec = if let Some(spec) = self.active_spec { + spec + } else { + trace!("received block, but no active stop-at spec, ignoring"); + return Effects::new(); + }; + + let should_shutdown = match active_spec { + StopAtSpec::BlockHeight(trigger_height) => block_info.height >= trigger_height, + StopAtSpec::EraId(trigger_era_id) => block_info.era >= trigger_era_id, + StopAtSpec::Immediately => { + // Immediate stops are handled when the request is received. + false + } + StopAtSpec::NextBlock => { + // Any block that is newer than one we already saw is a "next" block. + block_info.height > prev_height + } + StopAtSpec::EndOfCurrentEra => { + // We require that the block we just finished is a switch block. + block_info.height > prev_height && block_info.is_switch_block + } + }; + + if should_shutdown { + info!( + block_height = block_info.height, + block_era = block_info.era.value(), + is_switch_block = block_info.is_switch_block, + %active_spec, + "shutdown triggered due to fulfilled stop-at spec" + ); + effect_builder.announce_user_shutdown_request().ignore() + } else { + trace!( + block_height = block_info.height, + block_era = block_info.era.value(), + is_switch_block = block_info.is_switch_block, + %active_spec, + "not shutting down" + ); + Effects::new() + } + } + + Event::SetNodeStopRequest(SetNodeStopRequest { + mut stop_at, + responder, + }) => { + mem::swap(&mut self.active_spec, &mut stop_at); + + let mut effects = responder.respond(stop_at).ignore(); + + // If we received an immediate shutdown request, send out the control announcement + // directly, instead of waiting for another block. + if matches!(self.active_spec, Some(StopAtSpec::Immediately)) { + effects.extend(effect_builder.announce_user_shutdown_request().ignore()); + } + + effects + } + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} diff --git a/node/src/components/small_network.rs b/node/src/components/small_network.rs deleted file mode 100644 index a7cf7c63ee..0000000000 --- a/node/src/components/small_network.rs +++ /dev/null @@ -1,1394 +0,0 @@ -//! Fully connected overlay network -//! -//! The *small network* is an overlay network where each node participating is connected to every -//! other node on the network. The *small* portion of the name stems from the fact that this -//! approach is not scalable, as it requires at least $O(n)$ network connections and broadcast will -//! result in $O(n^2)$ messages. -//! -//! # Node IDs -//! -//! Each node has a self-generated node ID based on its self-signed TLS certificate. Whenever a -//! connection is made to another node, it verifies the "server"'s certificate to check that it -//! connected to the correct node and sends its own certificate during the TLS handshake, -//! establishing identity. -//! -//! # Messages and payloads -//! -//! The network itself is best-effort, during regular operation, no messages should be lost. -//! -//! # Connection -//! -//! Every node has an ID and a public listening address. The objective of each node is to constantly -//! maintain an outgoing connection to each other node (and thus have an incoming connection from -//! these nodes as well). -//! -//! Any incoming connection is strictly read from, while any outgoing connection is strictly used -//! for sending messages. -//! -//! Nodes gossip their public listening addresses periodically, and on learning of a new address, -//! a node will try to establish an outgoing connection. -//! -//! On losing an incoming or outgoing connection for a given peer, the other connection is closed. -//! No explicit reconnect is attempted. Instead, if the peer is still online, the normal gossiping -//! process will cause both peers to connect again. - -mod chain_info; -mod config; -mod error; -mod event; -mod gossiped_address; -mod message; -#[cfg(test)] -mod tests; - -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - convert::Infallible, - env, - fmt::{self, Debug, Display, Formatter}, - io, - net::{SocketAddr, TcpListener}, - pin::Pin, - result, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::{Duration, Instant}, -}; - -use anyhow::Context; -use datasize::DataSize; -use futures::{ - future::{self, BoxFuture, Either}, - stream::{SplitSink, SplitStream}, - FutureExt, SinkExt, StreamExt, -}; -use once_cell::sync::Lazy; -use openssl::{error::ErrorStack as OpenSslErrorStack, pkey, ssl::Ssl}; -use pkey::{PKey, Private}; -use prometheus::{IntGauge, Registry}; -use rand::seq::IteratorRandom; -use serde::{de::DeserializeOwned, Serialize}; -use thiserror::Error; -use tokio::{ - net::TcpStream, - sync::{ - mpsc::{self, UnboundedReceiver, UnboundedSender}, - watch, - }, - task::JoinHandle, -}; -use tokio_openssl::SslStream; -use tokio_serde::{formats::SymmetricalBincode, SymmetricallyFramed}; -use tokio_util::codec::{Framed, LengthDelimitedCodec}; -use tracing::{debug, error, info, trace, warn}; - -use self::error::Result; -pub(crate) use self::{event::Event, gossiped_address::GossipedAddress, message::Message}; -use crate::{ - components::{ - network::ENABLE_LIBP2P_NET_ENV_VAR, networking_metrics::NetworkingMetrics, Component, - }, - effect::{ - announcements::{BlocklistAnnouncement, NetworkAnnouncement}, - requests::{NetworkInfoRequest, NetworkRequest}, - EffectBuilder, EffectExt, EffectResultExt, Effects, - }, - reactor::{EventQueueHandle, Finalize, QueueKind, ReactorEvent}, - tls::{self, TlsCert, ValidationError}, - types::{NodeId, TimeDiff, Timestamp}, - utils, NodeRng, -}; -use chain_info::ChainInfo; -pub use config::Config; -pub use error::Error; - -const MAX_ASYMMETRIC_CONNECTION_SEEN: u16 = 4; -static BLOCKLIST_RETAIN_DURATION: Lazy = - Lazy::new(|| Duration::from_secs(60 * 10).into()); - -#[derive(DataSize, Debug)] -pub(crate) struct OutgoingConnection

{ - #[data_size(skip)] // Unfortunately, there is no way to inspect an `UnboundedSender`. - sender: UnboundedSender>, - peer_address: SocketAddr, - - // for keeping track of connection asymmetry, tracking the number of times we've seen this - // connection be asymmetric. - times_seen_asymmetric: u16, -} - -#[derive(DataSize, Debug)] -pub(crate) struct IncomingConnection { - peer_address: SocketAddr, - - // for keeping track of connection asymmetry, tracking the number of times we've seen this - // connection be asymmetric. - times_seen_asymmetric: u16, -} - -#[derive(DataSize)] -pub(crate) struct SmallNetwork -where - REv: 'static, -{ - /// Initial configuration values. - cfg: Config, - /// Server certificate. - certificate: Arc, - /// Server secret key. - secret_key: Arc>, - /// Our public listening address. - public_address: SocketAddr, - /// Our node ID, - our_id: NodeId, - /// If we connect to ourself, this flag is set to true. - is_bootstrap_node: bool, - /// Handle to event queue. - event_queue: EventQueueHandle, - - /// Incoming network connection addresses. - incoming: HashMap, - /// Outgoing network connections' messages. - outgoing: HashMap>, - - /// List of addresses which this node will avoid connecting to and the time they were added. - blocklist: HashMap, - - /// Pending outgoing connections: ones for which we are currently trying to make a connection. - pending: HashMap, - - /// Information retained from the chainspec required for operating the networking component. - chain_info: Arc, - - /// Channel signaling a shutdown of the small network. - // Note: This channel is closed when `SmallNetwork` is dropped, signalling the receivers that - // they should cease operation. - #[data_size(skip)] - shutdown_sender: Option>, - /// A clone of the receiver is passed to the message reader for all new incoming connections in - /// order that they can be gracefully terminated. - #[data_size(skip)] - shutdown_receiver: watch::Receiver<()>, - /// Flag to indicate the server has stopped running. - is_stopped: Arc, - /// Join handle for the server thread. - #[data_size(skip)] - server_join_handle: Option>, - - /// Networking metrics. - #[data_size(skip)] - net_metrics: NetworkingMetrics, - - /// Known addresses for this node. - known_addresses: HashSet, -} - -impl SmallNetwork -where - P: Serialize + DeserializeOwned + Clone + Debug + Display + Send + 'static, - REv: ReactorEvent + From> + From>, -{ - /// Creates a new small network component instance. - /// - /// If `notify` is set to `false`, no systemd notifications will be sent, regardless of - /// configuration. - #[allow(clippy::type_complexity)] - pub(crate) fn new>( - event_queue: EventQueueHandle, - cfg: Config, - registry: &Registry, - small_network_identity: SmallNetworkIdentity, - chain_info_source: C, - notify: bool, - ) -> Result<(SmallNetwork, Effects>)> { - let mut known_addresses = HashSet::new(); - for address in &cfg.known_addresses { - match utils::resolve_address(address) { - Ok(known_address) => { - if !known_addresses.insert(known_address) { - warn!(%address, resolved=%known_address, "ignoring duplicated known address"); - }; - } - Err(err) => { - warn!(%address, %err, "failed to resolve known address"); - } - } - } - - // Assert we have at least one known address in the config. - if known_addresses.is_empty() { - warn!("no known addresses provided via config or all failed DNS resolution"); - return Err(Error::InvalidConfig); - } - - let mut public_address = - utils::resolve_address(&cfg.public_address).map_err(Error::ResolveAddr)?; - - let our_id = NodeId::from(&small_network_identity); - let secret_key = small_network_identity.secret_key; - let certificate = small_network_identity.tls_certificate; - - let chain_info = Arc::new(chain_info_source.into()); - - // If the env var "CASPER_ENABLE_LIBP2P_NET" is defined, exit without starting the - // server. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - let model = SmallNetwork { - cfg, - known_addresses, - certificate, - secret_key, - public_address, - our_id, - is_bootstrap_node: false, - event_queue, - incoming: HashMap::new(), - outgoing: HashMap::new(), - pending: HashMap::new(), - blocklist: HashMap::new(), - chain_info, - shutdown_sender: None, - shutdown_receiver: watch::channel(()).1, - server_join_handle: None, - is_stopped: Arc::new(AtomicBool::new(true)), - net_metrics: NetworkingMetrics::new(&Registry::default())?, - }; - return Ok((model, Effects::new())); - } - - let net_metrics = NetworkingMetrics::new(®istry)?; - - // We can now create a listener. - let bind_address = utils::resolve_address(&cfg.bind_address).map_err(Error::ResolveAddr)?; - let listener = TcpListener::bind(bind_address) - .map_err(|error| Error::ListenerCreation(error, bind_address))?; - // We must set non-blocking to `true` or else the tokio task hangs forever. - listener - .set_nonblocking(true) - .map_err(Error::ListenerSetNonBlocking)?; - - // Once the port has been bound, we can notify systemd if instructed to do so. - if notify { - if cfg.systemd_support { - if sd_notify::booted().map_err(Error::SystemD)? { - info!("notifying systemd that the network is ready to receive connections"); - sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) - .map_err(Error::SystemD)?; - } else { - warn!("systemd_support enabled but not booted with systemd, ignoring"); - } - } else { - debug!("systemd_support disabled, not notifying"); - } - } - let local_address = listener.local_addr().map_err(Error::ListenerAddr)?; - - // Substitute the actually bound port if set to 0. - if public_address.port() == 0 { - public_address.set_port(local_address.port()); - } - - // Run the server task. - // We spawn it ourselves instead of through an effect to get a hold of the join handle, - // which we need to shutdown cleanly later on. - info!(%local_address, %public_address, "{}: starting server background task", our_id); - let (server_shutdown_sender, server_shutdown_receiver) = watch::channel(()); - let shutdown_receiver = server_shutdown_receiver.clone(); - let server_join_handle = tokio::spawn(server_task( - event_queue, - tokio::net::TcpListener::from_std(listener).map_err(Error::ListenerConversion)?, - server_shutdown_receiver, - our_id, - )); - - let mut model = SmallNetwork { - cfg, - known_addresses, - certificate, - secret_key, - public_address, - our_id, - is_bootstrap_node: false, - event_queue, - incoming: HashMap::new(), - outgoing: HashMap::new(), - pending: HashMap::new(), - blocklist: HashMap::new(), - chain_info, - shutdown_sender: Some(server_shutdown_sender), - shutdown_receiver, - server_join_handle: Some(server_join_handle), - is_stopped: Arc::new(AtomicBool::new(false)), - net_metrics, - }; - - // Bootstrap process. - let effect_builder = EffectBuilder::new(event_queue); - - // We kick things off by adding effects to connect to all known addresses. This will - // automatically attempt to repeat the connection process if it fails (see - // `connect_to_known_addresses` for details). - let mut effects = model.connect_to_known_addresses(); - - // Start broadcasting our public listening address. - effects.extend( - effect_builder - .set_timeout(model.cfg.initial_gossip_delay.into()) - .event(|_| Event::GossipOurAddress), - ); - - Ok((model, effects)) - } - - /// Queues a message to be sent to all nodes. - fn broadcast_message(&self, msg: Message

) { - for peer_id in self.outgoing.keys() { - self.send_message(*peer_id, msg.clone()); - } - } - - /// Try to establish a connection to all known addresses in the configuration. - /// - /// Will schedule another reconnection if no DNS addresses could be resolved. - fn connect_to_known_addresses(&mut self) -> Effects> { - let mut effects = Effects::new(); - - let now = Instant::now(); - for &address in &self.known_addresses { - self.pending.insert(address, now); - - // Add an effect to connect to the known address. - effects.extend( - connect_outgoing( - address, - Arc::clone(&self.certificate), - Arc::clone(&self.secret_key), - Arc::clone(&self.is_stopped), - ) - .result( - move |(peer_id, transport)| Event::OutgoingEstablished { - peer_id: Box::new(peer_id), - transport, - }, - move |error| Event::OutgoingFailed { - peer_address: Box::new(address), - peer_id: Box::new(None), - error: Box::new(Some(error)), - }, - ), - ); - } - - effects - } - - /// Queues a message to `count` random nodes on the network. - fn gossip_message( - &self, - rng: &mut NodeRng, - msg: Message

, - count: usize, - exclude: HashSet, - ) -> HashSet { - let peer_ids = self - .outgoing - .keys() - .filter(|&peer_id| !exclude.contains(peer_id)) - .choose_multiple(rng, count); - - if peer_ids.len() != count { - // TODO - set this to `warn!` once we are normally testing with networks large enough to - // make it a meaningful and infrequent log message. - trace!( - our_id=%self.our_id, - wanted = count, - selected = peer_ids.len(), - "could not select enough random nodes for gossiping, not enough non-excluded \ - outgoing connections" - ); - } - - for &&peer_id in &peer_ids { - self.send_message(peer_id, msg.clone()); - } - - peer_ids.into_iter().copied().collect() - } - - /// Queues a message to be sent to a specific node. - fn send_message(&self, dest: NodeId, msg: Message

) { - // Try to send the message. - if let Some(connection) = self.outgoing.get(&dest) { - if let Err(msg) = connection.sender.send(msg) { - // We lost the connection, but that fact has not reached us yet. - warn!(our_id=%self.our_id, %dest, ?msg, "dropped outgoing message, lost connection"); - } else { - self.net_metrics.queued_messages.inc(); - } - } else { - // We are not connected, so the reconnection is likely already in progress. - debug!(our_id=%self.our_id, %dest, ?msg, "dropped outgoing message, no connection"); - } - } - - /// Sweep and timeout pending connections. - /// - /// This is a reliability measure that sweeps pending connections, since leftover entries will - /// block any renewed connection attempts to a specific address. - /// - /// In 100% bug free code, this would not be necessary, so this is in here as a stop-gap measure - /// to avoid locking up a node with unremoved pending connections. - /// - /// Connections will only be removed from pending, they will NOT be forcefully disconnected. - fn sweep_pending_connections( - &mut self, - effect_builder: EffectBuilder, - ) -> Effects> { - let now = Instant::now(); - let max_addr_pending_time: Duration = self.cfg.max_addr_pending_time.into(); - - // Remove pending connections that have been pending for a long time. Ideally, we would use - // `drain_filter` here, but it is still unstable, so just collect the keys. - let outdated_keys: Vec<_> = self - .pending - .iter() - .filter_map(|(&addr, ×tamp)| { - if now - timestamp > max_addr_pending_time { - Some(addr) - } else { - None - } - }) - .collect(); - - outdated_keys.iter().for_each(|key| { - warn!(addr=%key, "swept pending address"); - self.pending.remove(key); - }); - - effect_builder - .set_timeout(max_addr_pending_time / 2) - .event(|_| Event::SweepPending) - } - - fn handle_incoming_tls_handshake_completed( - &mut self, - effect_builder: EffectBuilder, - result: Result<(NodeId, Transport)>, - peer_address: SocketAddr, - ) -> Effects> { - match result { - Ok((peer_id, transport)) => { - // If we have connected to ourself, allow the connection to drop. - if peer_id == self.our_id { - self.is_bootstrap_node = true; - debug!( - our_id=%self.our_id, - %peer_address, - local_address=?transport.get_ref().local_addr(), - "connected incoming to ourself - closing connection" - ); - return Effects::new(); - } - - // If the peer has already disconnected, allow the connection to drop. - if let Err(error) = transport.get_ref().peer_addr() { - debug!( - our_id=%self.our_id, - %peer_address, - local_address=?transport.get_ref().local_addr(), - %error, - "incoming connection dropped", - ); - return Effects::new(); - } - - info!(our_id=%self.our_id, %peer_id, %peer_address, "established incoming connection"); - // The sink is only used to send a single handshake message, then dropped. - let (mut sink, stream) = - framed::

(transport, self.chain_info.maximum_net_message_size).split(); - let handshake = self.chain_info.create_handshake(self.public_address); - let mut effects = async move { - let _ = sink.send(handshake).await; - } - .ignore::>(); - - let _ = self.incoming.insert( - peer_id, - IncomingConnection { - peer_address, - times_seen_asymmetric: 0, - }, - ); - self.net_metrics - .open_connections - .set(self.incoming.len() as i64); - - // If the connection is now complete, announce the new peer before starting reader. - effects.extend(self.check_connection_complete(effect_builder, peer_id)); - - effects.extend( - message_reader( - self.event_queue, - stream, - self.shutdown_receiver.clone(), - self.our_id, - peer_id, - ) - .event(move |result| Event::IncomingClosed { - result, - peer_id: Box::new(peer_id), - peer_address: Box::new(peer_address), - }), - ); - - effects - } - Err(err) => { - warn!(our_id=%self.our_id, %peer_address, %err, "TLS handshake failed"); - Effects::new() - } - } - } - - /// Sets up an established outgoing connection. - fn setup_outgoing( - &mut self, - effect_builder: EffectBuilder, - peer_id: NodeId, - transport: Transport, - ) -> Effects> { - // This connection is send-only, we only use the sink. - let peer_address = match transport.get_ref().peer_addr() { - Ok(peer_addr) => peer_addr, - Err(err) => { - // The peer address disappeared, likely because the connection was closed while - // we are setting up. - warn!(%peer_id, %err, "peer connection terminated while setting up outgoing connection, dropping"); - - // We still need to clean up any trace of the connection. - return self.remove(effect_builder, &peer_id, false); - } - }; - - // Remove from pending connection set, but ignore if it is missing. - self.pending.remove(&peer_address); - - // If we have connected to ourself, allow the connection to drop. - if peer_id == self.our_id { - self.is_bootstrap_node = true; - debug!( - our_id=%self.our_id, - peer_address=?transport.get_ref().peer_addr(), - local_address=?transport.get_ref().local_addr(), - "connected outgoing to ourself - closing connection", - ); - return self.reconnect_if_not_connected_to_any_known_addresses(effect_builder); - } - - // The stream is only used to receive a single handshake message and then dropped. - let (sink, stream) = - framed::

(transport, self.chain_info.maximum_net_message_size).split(); - debug!(our_id=%self.our_id, %peer_id, %peer_address, "established outgoing connection"); - - let (sender, receiver) = mpsc::unbounded_channel(); - let connection = OutgoingConnection { - peer_address, - sender, - times_seen_asymmetric: 0, - }; - if self.outgoing.insert(peer_id, connection).is_some() { - // We assume that for a reconnect to have happened, the outgoing entry must have - // been either non-existent yet or cleaned up by the handler of the connection - // closing event. If this is not the case, an assumed invariant has been violated. - error!(our_id=%self.our_id, %peer_id, "did not expect leftover channel in outgoing map"); - } - - let mut effects = self.check_connection_complete(effect_builder, peer_id); - - let handshake = self.chain_info.create_handshake(self.public_address); - - effects.extend( - message_sender( - receiver, - sink, - self.net_metrics.queued_messages.clone(), - handshake, - ) - .event(move |result| Event::OutgoingFailed { - peer_id: Box::new(Some(peer_id)), - peer_address: Box::new(peer_address), - error: Box::new(result.err().map(Into::into)), - }), - ); - effects.extend( - handshake_reader(self.event_queue, stream, self.our_id, peer_id, peer_address) - .ignore::>(), - ); - - effects - } - - fn handle_outgoing_lost( - &mut self, - effect_builder: EffectBuilder, - peer_id: Option, - peer_address: SocketAddr, - error: Option, - ) -> Effects> { - let _ = self.pending.remove(&peer_address); - - if let Some(peer_id) = peer_id { - if let Some(ref err) = error { - warn!( - our_id=%self.our_id, - %peer_id, - %peer_address, - %err, - "outgoing connection failed" - ); - } else { - warn!(our_id=%self.our_id, %peer_id, %peer_address, "outgoing connection closed"); - } - return self.remove(effect_builder, &peer_id, false); - } - - // If we don't have the node ID passed in here, it was never added as an - // outgoing connection, hence no need to call `self.remove()`. - if let Some(ref err) = error { - warn!( - our_id=%self.our_id, - %peer_address, - %err, - "outgoing connection to known address failed" - ); - } else { - warn!( - our_id=%self.our_id, - %peer_address, - "outgoing connection to known address closed" - ); - } - // Since we are not calling `self.remove()`, call the reconnection check explicitly. - self.reconnect_if_not_connected_to_any_known_addresses(effect_builder) - } - - fn remove( - &mut self, - effect_builder: EffectBuilder, - peer_id: &NodeId, - add_to_blocklist: bool, - ) -> Effects> { - if let Some(incoming) = self.incoming.remove(&peer_id) { - trace!(our_id=%self.our_id, %peer_id, "removing peer from the incoming connections"); - let _ = self.pending.remove(&incoming.peer_address); - - self.net_metrics - .open_connections - .set(self.incoming.len() as i64); - } - if let Some(outgoing) = self.outgoing.remove(&peer_id) { - trace!(our_id=%self.our_id, %peer_id, "removing peer from the outgoing connections"); - if add_to_blocklist && !self.known_addresses.contains(&outgoing.peer_address) { - info!(our_id=%self.our_id, %peer_id, "blocklisting peer"); - self.blocklist - .insert(outgoing.peer_address, Timestamp::now()); - } - } - - self.reconnect_if_not_connected_to_any_known_addresses(effect_builder) - } - - /// Gossips our public listening address, and schedules the next such gossip round. - fn gossip_our_address(&mut self, effect_builder: EffectBuilder) -> Effects> { - let our_address = GossipedAddress::new(self.public_address); - let mut effects = effect_builder - .announce_gossip_our_address(our_address) - .ignore(); - effects.extend( - effect_builder - .set_timeout(self.cfg.gossip_interval) - .event(|_| Event::GossipOurAddress), - ); - effects - } - - /// Marks connections as asymmetric (only incoming or only outgoing) and removes them if they - /// pass the upper limit for this. Connections that are symmetrical are reset to 0. - fn enforce_symmetric_connections( - &mut self, - effect_builder: EffectBuilder, - ) -> Effects> { - let mut remove = Vec::new(); - for (node_id, conn) in self.incoming.iter_mut() { - if !self.outgoing.contains_key(node_id) { - if conn.times_seen_asymmetric >= MAX_ASYMMETRIC_CONNECTION_SEEN { - remove.push(*node_id); - } else { - conn.times_seen_asymmetric += 1; - } - } else { - conn.times_seen_asymmetric = 0; - } - } - for (node_id, conn) in self.outgoing.iter_mut() { - if !self.incoming.contains_key(node_id) { - if conn.times_seen_asymmetric >= MAX_ASYMMETRIC_CONNECTION_SEEN { - remove.push(*node_id); - } else { - conn.times_seen_asymmetric += 1; - } - } else { - conn.times_seen_asymmetric = 0; - } - } - let mut effects = Effects::new(); - for node_id in remove { - effects.extend(self.remove(effect_builder, &node_id, true)); - } - effects - } - - /// Handles a received message. - fn handle_message( - &mut self, - effect_builder: EffectBuilder, - peer_id: NodeId, - msg: Message

, - ) -> Effects> - where - REv: From>, - { - match msg { - Message::Handshake { - network_name, - public_address, - protocol_version, - } => { - if network_name != self.chain_info.network_name { - info!( - our_id=%self.our_id, - %peer_id, - our_network=?self.chain_info.network_name, - their_network=?network_name, - our_protocol_version=%self.chain_info.protocol_version, - their_protocol_version=%protocol_version, - "dropping connection due to network name mismatch" - ); - let remove = self.remove(effect_builder, &peer_id, false); - self.update_peers_metric(); - return remove; - } - - // This speeds up the connection process, but masks potential bugs in the gossiper. - let effects = self.connect_to_peer_if_required(public_address); - self.update_peers_metric(); - - effects - } - Message::Payload(payload) => effect_builder - .announce_message_received(peer_id, payload) - .ignore(), - } - } - - fn update_peers_metric(&mut self) { - self.net_metrics.peers.set(self.peers().len() as i64); - } - - fn connect_to_peer_if_required(&mut self, peer_address: SocketAddr) -> Effects> { - let now = Timestamp::now(); - self.blocklist - .retain(|_, ts| *ts > now - *BLOCKLIST_RETAIN_DURATION); - if self.pending.contains_key(&peer_address) - || self.blocklist.contains_key(&peer_address) - || self - .outgoing - .iter() - .any(|(_peer_id, connection)| connection.peer_address == peer_address) - { - // We're already trying to connect, are connected, or the connection is on the blocklist - // - do nothing. - Effects::new() - } else { - // We need to connect. - let now = Instant::now(); - self.pending.insert(peer_address, now); - connect_outgoing( - peer_address, - Arc::clone(&self.certificate), - Arc::clone(&self.secret_key), - Arc::clone(&self.is_stopped), - ) - .result( - move |(peer_id, transport)| Event::OutgoingEstablished { - peer_id: Box::new(peer_id), - transport, - }, - move |error| Event::OutgoingFailed { - peer_id: Box::new(None), - peer_address: Box::new(peer_address), - error: Box::new(Some(error)), - }, - ) - } - } - - /// Checks whether a connection has been established fully, i.e. with an incoming and outgoing - /// connection. - /// - /// Returns either no effect or an announcement that a new peer has connected. - fn check_connection_complete( - &self, - effect_builder: EffectBuilder, - peer_id: NodeId, - ) -> Effects> { - if self.outgoing.contains_key(&peer_id) && self.incoming.contains_key(&peer_id) { - debug!(%peer_id, "connection to peer is now complete"); - effect_builder.announce_new_peer(peer_id).ignore() - } else { - Effects::new() - } - } - - /// If we are isolated, try to reconnect to all known nodes. - fn reconnect_if_not_connected_to_any_known_addresses( - &self, - effect_builder: EffectBuilder, - ) -> Effects> { - if self.is_not_connected_to_any_known_address() { - info!(delay=?self.cfg.isolation_reconnect_delay, "we are isolated. will attempt to reconnect to all known nodes after a delay"); - - effect_builder - .set_timeout(self.cfg.isolation_reconnect_delay.into()) - .event(|_| Event::IsolationReconnection) - } else { - Effects::new() - } - } - - /// Returns the set of connected nodes. - pub(crate) fn peers(&self) -> BTreeMap { - let mut ret = BTreeMap::new(); - for (node_id, connection) in &self.outgoing { - ret.insert(*node_id, connection.peer_address.to_string()); - } - for (node_id, connection) in &self.incoming { - ret.entry(*node_id) - .or_insert_with(|| connection.peer_address.to_string()); - } - ret - } - - /// Returns whether or not this node has been disconnected from all known nodes. - fn is_not_connected_to_any_known_address(&self) -> bool { - for &known_address in &self.known_addresses { - if self.pending.contains_key(&known_address) { - return false; - } - - if self - .outgoing - .values() - .any(|outgoing_connection| outgoing_connection.peer_address == known_address) - { - return false; - } - } - - true - } - - /// Returns the node id of this network node. - /// - Used in validator test. - #[cfg(test)] - pub(crate) fn node_id(&self) -> NodeId { - self.our_id - } -} - -impl Finalize for SmallNetwork -where - REv: Send + 'static, - P: Send + 'static, -{ - fn finalize(mut self) -> BoxFuture<'static, ()> { - async move { - // Close the shutdown socket, causing the server to exit. - drop(self.shutdown_sender.take()); - - // Set the flag to true, ensuring any ongoing attempts to establish outgoing TLS - // connections return errors. - self.is_stopped.store(true, Ordering::SeqCst); - - // Wait for the server to exit cleanly. - if let Some(join_handle) = self.server_join_handle.take() { - match join_handle.await { - Ok(_) => debug!(our_id=%self.our_id, "server exited cleanly"), - Err(err) => error!(%self.our_id,%err, "could not join server task cleanly"), - } - } else if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - warn!(our_id=%self.our_id, "server shutdown while already shut down") - } - } - .boxed() - } -} - -impl Component for SmallNetwork -where - REv: ReactorEvent + From> + From>, - P: Serialize + DeserializeOwned + Clone + Debug + Display + Send + 'static, -{ - type Event = Event

; - type ConstructionError = Infallible; - - fn handle_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::IsolationReconnection => { - if self.is_not_connected_to_any_known_address() { - info!("still isolated after grace time, attempting to reconnect to all known_nodes"); - self.connect_to_known_addresses() - } else { - info!("would attempt to reconnect, but no longer isolated. not reconnecting"); - Effects::new() - } - } - Event::IncomingNew { - stream, - peer_address, - } => { - debug!(our_id=%self.our_id, %peer_address, "incoming connection, starting TLS handshake"); - - setup_tls(stream, self.certificate.clone(), self.secret_key.clone()) - .boxed() - .event(move |result| Event::IncomingHandshakeCompleted { - result: Box::new(result), - peer_address, - }) - } - Event::IncomingHandshakeCompleted { - result, - peer_address, - } => { - self.handle_incoming_tls_handshake_completed(effect_builder, *result, *peer_address) - } - Event::IncomingMessage { peer_id, msg } => { - self.handle_message(effect_builder, *peer_id, *msg) - } - Event::IncomingClosed { - result, - peer_id, - peer_address, - } => { - match result { - Ok(()) => { - info!(our_id=%self.our_id, %peer_id, %peer_address, "connection closed",) - } - Err(err) => { - warn!(our_id=%self.our_id, %peer_id, %peer_address, %err, "connection dropped") - } - } - self.remove(effect_builder, &peer_id, false) - } - Event::OutgoingEstablished { peer_id, transport } => { - self.setup_outgoing(effect_builder, *peer_id, transport) - } - Event::OutgoingFailed { - peer_id, - peer_address, - error, - } => self.handle_outgoing_lost(effect_builder, *peer_id, *peer_address, *error), - Event::SweepPending => self.sweep_pending_connections(effect_builder), - Event::NetworkRequest { req } => { - match *req { - NetworkRequest::SendMessage { - dest, - payload, - responder, - } => { - // We're given a message to send out. - self.net_metrics.direct_message_requests.inc(); - self.send_message(*dest, Message::Payload(*payload)); - responder.respond(()).ignore() - } - NetworkRequest::Broadcast { payload, responder } => { - // We're given a message to broadcast. - self.net_metrics.broadcast_requests.inc(); - self.broadcast_message(Message::Payload(*payload)); - responder.respond(()).ignore() - } - NetworkRequest::Gossip { - payload, - count, - exclude, - responder, - } => { - // We're given a message to gossip. - let sent_to = - self.gossip_message(rng, Message::Payload(*payload), count, exclude); - responder.respond(sent_to).ignore() - } - } - } - Event::NetworkInfoRequest { req } => match *req { - NetworkInfoRequest::GetPeers { responder } => { - responder.respond(self.peers()).ignore() - } - }, - Event::GossipOurAddress => { - let mut effects = self.gossip_our_address(effect_builder); - effects.extend(self.enforce_symmetric_connections(effect_builder)); - effects - } - Event::PeerAddressReceived(gossiped_address) => { - self.connect_to_peer_if_required(gossiped_address.into()) - } - Event::BlocklistAnnouncement(BlocklistAnnouncement::OffenseCommitted(ref peer_id)) => { - warn!(%peer_id, "adding peer to blocklist after transgression"); - self.remove(effect_builder, peer_id, true) - } - } - } -} - -/// Core accept loop for the networking server. -/// -/// Never terminates. -async fn server_task( - event_queue: EventQueueHandle, - listener: tokio::net::TcpListener, - mut shutdown_receiver: watch::Receiver<()>, - our_id: NodeId, -) where - REv: From>, -{ - // The server task is a bit tricky, since it has to wait on incoming connections while at the - // same time shut down if the networking component is dropped, otherwise the TCP socket will - // stay open, preventing reuse. - - // We first create a future that never terminates, handling incoming connections: - let accept_connections = async move { - loop { - // We handle accept errors here, since they can be caused by a temporary resource - // shortage or the remote side closing the connection while it is waiting in - // the queue. - match listener.accept().await { - Ok((stream, peer_address)) => { - // Move the incoming connection to the event queue for handling. - let event = Event::IncomingNew { - stream, - peer_address: Box::new(peer_address), - }; - event_queue - .schedule(event, QueueKind::NetworkIncoming) - .await; - } - // TODO: Handle resource errors gracefully. - // In general, two kinds of errors occur here: Local resource exhaustion, - // which should be handled by waiting a few milliseconds, or remote connection - // errors, which can be dropped immediately. - // - // The code in its current state will consume 100% CPU if local resource - // exhaustion happens, as no distinction is made and no delay introduced. - Err(err) => { - warn!(%our_id, %err, "dropping incoming connection during accept") - } - } - } - }; - - let shutdown_messages = async move { while shutdown_receiver.changed().await.is_ok() {} }; - - // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the - // infinite loop to terminate, which never happens. - match future::select(Box::pin(shutdown_messages), Box::pin(accept_connections)).await { - Either::Left(_) => info!( - %our_id, - "shutting down socket, no longer accepting incoming connections" - ), - Either::Right(_) => unreachable!(), - } -} - -#[derive(Debug, Error)] -pub enum SmallNetworkIdentityError { - #[error("could not generate TLS certificate: {0}")] - CouldNotGenerateTlsCertificate(OpenSslErrorStack), - #[error(transparent)] - ValidationError(#[from] ValidationError), -} - -/// An ephemeral [PKey] and [TlsCert] that identifies this node -#[derive(DataSize, Debug, Clone)] -pub struct SmallNetworkIdentity { - secret_key: Arc>, - tls_certificate: Arc, -} - -impl SmallNetworkIdentity { - pub fn new() -> result::Result { - let (not_yet_validated_x509_cert, secret_key) = tls::generate_node_cert() - .map_err(SmallNetworkIdentityError::CouldNotGenerateTlsCertificate)?; - let tls_certificate = tls::validate_cert(not_yet_validated_x509_cert)?; - Ok(SmallNetworkIdentity { - secret_key: Arc::new(secret_key), - tls_certificate: Arc::new(tls_certificate), - }) - } -} - -impl From<&SmallNetwork> for SmallNetworkIdentity { - fn from(small_network: &SmallNetwork) -> Self { - SmallNetworkIdentity { - secret_key: small_network.secret_key.clone(), - tls_certificate: small_network.certificate.clone(), - } - } -} - -impl From<&SmallNetworkIdentity> for NodeId { - fn from(small_network_identity: &SmallNetworkIdentity) -> Self { - NodeId::from( - small_network_identity - .tls_certificate - .public_key_fingerprint(), - ) - } -} - -/// Server-side TLS handshake. -/// -/// This function groups the TLS handshake into a convenient function, enabling the `?` operator. -async fn setup_tls( - stream: TcpStream, - cert: Arc, - secret_key: Arc>, -) -> Result<(NodeId, Transport)> { - let mut tls_stream = tls::create_tls_acceptor(&cert.as_x509().as_ref(), &secret_key.as_ref()) - .and_then(|ssl_acceptor| Ssl::new(ssl_acceptor.context())) - .and_then(|ssl| SslStream::new(ssl, stream)) - .map_err(Error::AcceptorCreation)?; - - SslStream::accept(Pin::new(&mut tls_stream)) - .await - .map_err(Error::Handshake)?; - - // We can now verify the certificate. - let peer_cert = tls_stream - .ssl() - .peer_certificate() - .ok_or(Error::NoClientCertificate)?; - - Ok(( - NodeId::from(tls::validate_cert(peer_cert)?.public_key_fingerprint()), - tls_stream, - )) -} - -/// Network handshake reader for single handshake message received by outgoing connection. -async fn handshake_reader( - event_queue: EventQueueHandle, - mut stream: SplitStream>, - our_id: NodeId, - peer_id: NodeId, - peer_address: SocketAddr, -) where - P: DeserializeOwned + Send + Display, - REv: From>, -{ - if let Some(Ok(msg @ Message::Handshake { .. })) = stream.next().await { - debug!(%our_id, %msg, %peer_id, "handshake received"); - return event_queue - .schedule( - Event::IncomingMessage { - peer_id: Box::new(peer_id), - msg: Box::new(msg), - }, - QueueKind::NetworkIncoming, - ) - .await; - } - warn!(%our_id, %peer_id, "receiving handshake failed, closing connection"); - event_queue - .schedule( - Event::OutgoingFailed { - peer_id: Box::new(Some(peer_id)), - peer_address: Box::new(peer_address), - error: Box::new(None), - }, - QueueKind::Network, - ) - .await -} - -/// Network message reader. -/// -/// Schedules all received messages until the stream is closed or an error occurs. -async fn message_reader( - event_queue: EventQueueHandle, - mut stream: SplitStream>, - mut shutdown_receiver: watch::Receiver<()>, - our_id: NodeId, - peer_id: NodeId, -) -> io::Result<()> -where - P: DeserializeOwned + Send + Display, - REv: From>, -{ - let read_messages = async move { - while let Some(msg_result) = stream.next().await { - match msg_result { - Ok(msg) => { - debug!(%our_id, %msg, %peer_id, "message received"); - // We've received a message, push it to the reactor. - event_queue - .schedule( - Event::IncomingMessage { - peer_id: Box::new(peer_id), - msg: Box::new(msg), - }, - QueueKind::NetworkIncoming, - ) - .await; - } - Err(err) => { - warn!(%our_id, %err, %peer_id, "receiving message failed, closing connection"); - return Err(err); - } - } - } - Ok(()) - }; - - let shutdown_messages = async move { while shutdown_receiver.changed().await.is_ok() {} }; - - // Now we can wait for either the `shutdown` channel's remote end to do be dropped or the - // while loop to terminate. - match future::select(Box::pin(shutdown_messages), Box::pin(read_messages)).await { - Either::Left(_) => info!( - %our_id, - %peer_id, - "shutting down incoming connection message reader" - ), - Either::Right(_) => (), - } - - Ok(()) -} - -/// Network message sender. -/// -/// Reads from a channel and sends all messages, until the stream is closed or an error occurs. -/// -/// Initially sends a handshake including the `chainspec_hash` as a final handshake step. If the -/// recipient's `chainspec_hash` doesn't match, the connection will be closed. -async fn message_sender

( - mut queue: UnboundedReceiver>, - mut sink: SplitSink, Message

>, - counter: IntGauge, - handshake: Message

, -) -> Result<()> -where - P: Serialize + Send, -{ - sink.send(handshake).await.map_err(Error::MessageNotSent)?; - while let Some(payload) = queue.recv().await { - counter.dec(); - // We simply error-out if the sink fails, it means that our connection broke. - sink.send(payload).await.map_err(Error::MessageNotSent)?; - } - - Ok(()) -} - -/// Transport type alias for base encrypted connections. -type Transport = SslStream; - -/// A framed transport for `Message`s. -type FramedTransport

= SymmetricallyFramed< - Framed, - Message

, - SymmetricalBincode>, ->; - -/// Constructs a new framed transport on a stream. -fn framed

(stream: Transport, maximum_net_message_size: u32) -> FramedTransport

{ - let length_delimited = Framed::new( - stream, - LengthDelimitedCodec::builder() - .max_frame_length(maximum_net_message_size as usize) - .new_codec(), - ); - SymmetricallyFramed::new( - length_delimited, - SymmetricalBincode::>::default(), - ) -} - -/// Initiates a TLS connection to a remote address. -async fn connect_outgoing( - peer_address: SocketAddr, - our_certificate: Arc, - secret_key: Arc>, - server_is_stopped: Arc, -) -> Result<(NodeId, Transport)> { - let ssl = tls::create_tls_connector(&our_certificate.as_x509(), &secret_key) - .context("could not create TLS connector")? - .configure() - .and_then(|mut config| { - config.set_verify_hostname(false); - config.into_ssl("this-will-not-be-checked.example.com") - }) - .map_err(Error::ConnectorConfiguration)?; - - let stream = TcpStream::connect(peer_address) - .await - .context("TCP connection failed")?; - - let mut tls_stream = SslStream::new(ssl, stream).context("tls handshake failed")?; - SslStream::connect(Pin::new(&mut tls_stream)).await?; - - let peer_cert = tls_stream - .ssl() - .peer_certificate() - .ok_or(Error::NoServerCertificate)?; - - let peer_id = tls::validate_cert(peer_cert)?.public_key_fingerprint(); - - if server_is_stopped.load(Ordering::SeqCst) { - debug!( - our_id=%our_certificate.public_key_fingerprint(), - %peer_address, - "server stopped - aborting outgoing TLS connection" - ); - Err(Error::ServerStopped) - } else { - Ok((NodeId::from(peer_id), tls_stream)) - } -} - -impl Debug for SmallNetwork -where - P: Debug, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("SmallNetwork") - .field("our_id", &self.our_id) - .field("certificate", &"") - .field("secret_key", &"") - .field("public_address", &self.public_address) - .field("event_queue", &"") - .field("incoming", &self.incoming) - .field("outgoing", &self.outgoing) - .field("pending", &self.pending) - .finish() - } -} diff --git a/node/src/components/small_network/chain_info.rs b/node/src/components/small_network/chain_info.rs deleted file mode 100644 index f35218d172..0000000000 --- a/node/src/components/small_network/chain_info.rs +++ /dev/null @@ -1,57 +0,0 @@ -//! Network-related chain identification information. - -// TODO: This module and `ChainId` should disappear in its entirety and the actual chainspec be made -// available. - -use std::net::SocketAddr; - -use casper_types::ProtocolVersion; -use datasize::DataSize; - -use super::Message; -use crate::types::Chainspec; - -/// Data retained from the chainspec by the small networking component. -/// -/// Typically this information is used for creating handshakes. -#[derive(DataSize, Debug)] -pub(crate) struct ChainInfo { - /// Name of the network we participate in. We only remain connected to peers with the same - /// network name as us. - pub(super) network_name: String, - /// The maximum message size for a network message, as supplied from the chainspec. - pub(super) maximum_net_message_size: u32, - /// The protocol version. - pub(super) protocol_version: ProtocolVersion, -} - -impl ChainInfo { - /// Create an instance of `ChainInfo` for testing. - #[cfg(test)] - pub fn create_for_testing() -> Self { - ChainInfo { - network_name: "rust-tests-network".to_string(), - maximum_net_message_size: 22 * 1024 * 1024, // Hardcoded at 22M. - protocol_version: ProtocolVersion::V1_0_0, - } - } - - /// Create a handshake based on chain identification data. - pub(super) fn create_handshake

(&self, public_address: SocketAddr) -> Message

{ - Message::Handshake { - network_name: self.network_name.clone(), - public_address, - protocol_version: self.protocol_version, - } - } -} - -impl From<&Chainspec> for ChainInfo { - fn from(chainspec: &Chainspec) -> Self { - ChainInfo { - network_name: chainspec.network_config.name.clone(), - maximum_net_message_size: chainspec.network_config.maximum_net_message_size, - protocol_version: chainspec.protocol_version(), - } - } -} diff --git a/node/src/components/small_network/config.rs b/node/src/components/small_network/config.rs deleted file mode 100644 index 0db2dd1b52..0000000000 --- a/node/src/components/small_network/config.rs +++ /dev/null @@ -1,106 +0,0 @@ -#[cfg(test)] -use std::net::{Ipv4Addr, SocketAddr}; -use std::time::Duration; - -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use crate::types::TimeDiff; - -/// Default binding address. -/// -/// Uses a fixed port per node, but binds on any interface. -const DEFAULT_BIND_ADDRESS: &str = "0.0.0.0:34553"; - -/// Default public address. -/// -/// Automatically sets the port, but defaults publishing localhost as the public address. -const DEFAULT_PUBLIC_ADDRESS: &str = "127.0.0.1:0"; - -/// Default interval for gossiping network addresses. -const DEFAULT_GOSSIP_INTERVAL: Duration = Duration::from_secs(30); - -// Default values for networking configuration: -impl Default for Config { - fn default() -> Self { - Config { - bind_address: DEFAULT_BIND_ADDRESS.to_string(), - public_address: DEFAULT_PUBLIC_ADDRESS.to_string(), - known_addresses: Vec::new(), - gossip_interval: DEFAULT_GOSSIP_INTERVAL, - systemd_support: false, - isolation_reconnect_delay: TimeDiff::from_seconds(2), - initial_gossip_delay: TimeDiff::from_seconds(5), - max_addr_pending_time: TimeDiff::from_seconds(60), - } - } -} - -/// Small network configuration. -#[derive(DataSize, Debug, Clone, Deserialize, Serialize)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct Config { - /// Address to bind to. - pub bind_address: String, - /// Publicly advertised address, in case the node has a different external IP. - /// - /// If the port is specified as `0`, it will be replaced with the actually bound port. - pub public_address: String, - /// Known address of a node on the network used for joining. - pub known_addresses: Vec, - /// Interval in milliseconds used for gossiping. - #[serde(with = "crate::utils::milliseconds")] - pub gossip_interval: Duration, - /// Enable systemd startup notification. - pub systemd_support: bool, - /// Minimum amount of time that has to pass before attempting to reconnect after isolation. - pub isolation_reconnect_delay: TimeDiff, - /// Initial delay before the first round of gossip. - pub initial_gossip_delay: TimeDiff, - /// Maximum allowed time for an address to be kept in the pending set. - pub max_addr_pending_time: TimeDiff, -} - -#[cfg(test)] -/// Reduced gossip interval for local testing. -const DEFAULT_TEST_GOSSIP_INTERVAL: Duration = Duration::from_secs(1); - -#[cfg(test)] -/// Address used to bind all local testing networking to by default. -const TEST_BIND_INTERFACE: Ipv4Addr = Ipv4Addr::LOCALHOST; - -#[cfg(test)] -impl Config { - /// Construct a configuration suitable for testing with no known address that binds to a - /// specific address. - pub(super) fn new(bind_address: SocketAddr) -> Self { - Config { - bind_address: bind_address.to_string(), - public_address: bind_address.to_string(), - known_addresses: vec![bind_address.to_string()], - gossip_interval: DEFAULT_TEST_GOSSIP_INTERVAL, - systemd_support: false, - ..Default::default() - } - } - - /// Constructs a `Config` suitable for use by the first node of a testnet on a single machine. - pub(crate) fn default_local_net_first_node(bind_port: u16) -> Self { - Config::new((TEST_BIND_INTERFACE, bind_port).into()) - } - - /// Constructs a `Config` suitable for use by a node joining a testnet on a single machine. - pub(crate) fn default_local_net(known_peer_port: u16) -> Self { - Config { - bind_address: SocketAddr::from((TEST_BIND_INTERFACE, 0)).to_string(), - public_address: SocketAddr::from((TEST_BIND_INTERFACE, 0)).to_string(), - known_addresses: vec![ - SocketAddr::from((TEST_BIND_INTERFACE, known_peer_port)).to_string() - ], - gossip_interval: DEFAULT_TEST_GOSSIP_INTERVAL, - systemd_support: false, - ..Default::default() - } - } -} diff --git a/node/src/components/small_network/error.rs b/node/src/components/small_network/error.rs deleted file mode 100644 index c617353c03..0000000000 --- a/node/src/components/small_network/error.rs +++ /dev/null @@ -1,136 +0,0 @@ -use std::{io, net::SocketAddr, result, time::SystemTimeError}; - -use openssl::{error::ErrorStack, ssl}; -use serde::Serialize; -use thiserror::Error; - -use crate::{tls::ValidationError, utils::ResolveAddressError}; - -pub(super) type Result = result::Result; - -/// Error type returned by the `SmallNetwork` component. -#[derive(Debug, Error, Serialize)] -pub enum Error { - /// Server failed to present certificate. - #[error("no server certificate presented")] - NoServerCertificate, - /// Client failed to present certificate. - #[error("no client certificate presented")] - NoClientCertificate, - /// Peer ID presented does not match the expected one. - #[error("remote node has wrong ID")] - WrongId, - /// The config must have both or neither of certificate and secret key, and must have at least - /// one known address. - #[error( - "need both or none of cert, secret_key in network config, and at least one known address" - )] - InvalidConfig, - /// Our own certificate is not valid. - #[error("own certificate invalid")] - OwnCertificateInvalid(#[source] ValidationError), - /// Failed to create a TCP listener. - #[error("failed to create listener on {1}")] - ListenerCreation( - #[serde(skip_serializing)] - #[source] - io::Error, - SocketAddr, - ), - /// Failed to get TCP listener address. - #[error("failed to get listener addr")] - ListenerAddr( - #[serde(skip_serializing)] - #[source] - io::Error, - ), - /// Failed to set listener to non-blocking. - #[error("failed to set listener to non-blocking")] - ListenerSetNonBlocking( - #[serde(skip_serializing)] - #[source] - io::Error, - ), - /// Failed to convert std TCP listener to tokio TCP listener. - #[error("failed to convert listener to tokio")] - ListenerConversion( - #[serde(skip_serializing)] - #[source] - io::Error, - ), - /// Could not resolve root node address. - #[error("failed to resolve network address")] - ResolveAddr( - #[serde(skip_serializing)] - #[source] - ResolveAddressError, - ), - /// Failed to send message. - // TODO: Inclusion of the cause is a workaround, we should actually be printing cause-traces - // when logging errors. - #[error("failed to send message: {0}")] - MessageNotSent( - #[serde(skip_serializing)] - #[source] - io::Error, - ), - /// Failed to create TLS acceptor. - #[error("failed to create acceptor")] - AcceptorCreation( - #[serde(skip_serializing)] - #[source] - ErrorStack, - ), - /// Failed to create configuration for TLS connector. - #[error("failed to configure connector")] - ConnectorConfiguration( - #[serde(skip_serializing)] - #[source] - ErrorStack, - ), - /// Failed to generate node TLS certificate. - #[error("failed to generate cert")] - CertificateGeneration( - #[serde(skip_serializing)] - #[source] - ErrorStack, - ), - /// Handshaking error. - #[error("handshake error: {0}")] - Handshake( - #[serde(skip_serializing)] - #[from] - ssl::Error, - ), - /// TLS validation error. - #[error("TLS validation error: {0}")] - TlsValidation(#[from] ValidationError), - /// System time error. - #[error("system time error: {0}")] - SystemTime( - #[serde(skip_serializing)] - #[from] - SystemTimeError, - ), - /// Systemd notification error - #[error("could not interact with systemd: {0}")] - SystemD(#[serde(skip_serializing)] io::Error), - /// Other error. - #[error(transparent)] - Anyhow( - #[serde(skip_serializing)] - #[from] - anyhow::Error, - ), - /// Server has stopped. - #[error("failed to create outgoing connection as server has stopped")] - ServerStopped, - - /// Instantiating metrics failed. - #[error(transparent)] - MetricsError( - #[serde(skip_serializing)] - #[from] - prometheus::Error, - ), -} diff --git a/node/src/components/small_network/event.rs b/node/src/components/small_network/event.rs deleted file mode 100644 index 23596c3a66..0000000000 --- a/node/src/components/small_network/event.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::{ - fmt::{self, Debug, Display, Formatter}, - io, mem, - net::SocketAddr, -}; - -use derive_more::From; -use serde::Serialize; -use static_assertions::const_assert; -use tokio::net::TcpStream; - -use super::{Error, GossipedAddress, Message, NodeId, Transport}; -use crate::{ - effect::{ - announcements::BlocklistAnnouncement, - requests::{NetworkInfoRequest, NetworkRequest}, - }, - protocol::Message as ProtocolMessage, -}; - -const _SMALL_NETWORK_EVENT_SIZE: usize = mem::size_of::>(); -const_assert!(_SMALL_NETWORK_EVENT_SIZE < 89); - -#[derive(Debug, From, Serialize)] -pub enum Event

{ - /// We were isolated and have waited the appropriate time. - IsolationReconnection, - /// A new TCP connection has been established from an incoming connection. - IncomingNew { - #[serde(skip_serializing)] - stream: TcpStream, - peer_address: Box, - }, - /// The TLS handshake completed on the incoming connection. - IncomingHandshakeCompleted { - #[serde(skip_serializing)] - result: Box>, - peer_address: Box, - }, - /// Received network message. - IncomingMessage { - peer_id: Box, - msg: Box>, - }, - /// Incoming connection closed. - IncomingClosed { - #[serde(skip_serializing)] - result: io::Result<()>, - peer_id: Box, - peer_address: Box, - }, - - /// A new outgoing connection was successfully established. - OutgoingEstablished { - peer_id: Box, - #[serde(skip_serializing)] - transport: Transport, - }, - /// An outgoing connection failed to connect or was terminated. - OutgoingFailed { - peer_id: Box>, - peer_address: Box, - error: Box>, - }, - /// Triggers the sweep of the pending addresses. - SweepPending, - - /// Incoming network request. - #[from] - NetworkRequest { - #[serde(skip_serializing)] - req: Box>, - }, - - /// Incoming network info request. - #[from] - NetworkInfoRequest { - #[serde(skip_serializing)] - req: Box>, - }, - - /// The node should gossip its own public listening address. - GossipOurAddress, - /// We received a peer's public listening address via gossip. - PeerAddressReceived(GossipedAddress), - - /// Blocklist announcement - #[from] - BlocklistAnnouncement(BlocklistAnnouncement), -} - -impl From> for Event { - fn from(req: NetworkRequest) -> Self { - Self::NetworkRequest { req: Box::new(req) } - } -} - -impl From> for Event { - fn from(req: NetworkInfoRequest) -> Self { - Self::NetworkInfoRequest { req: Box::new(req) } - } -} - -impl Display for Event

{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::IsolationReconnection => write!(f, "perform reconnection after isolation"), - Event::IncomingNew { peer_address, .. } => { - write!(f, "incoming connection from {}", peer_address) - } - Event::IncomingHandshakeCompleted { - result, - peer_address, - } => write!( - f, - "handshake from {}, is_err {}", - peer_address, - result.is_err() - ), - Event::IncomingMessage { - peer_id: node_id, - msg, - } => write!(f, "msg from {}: {}", node_id, msg), - Event::IncomingClosed { peer_address, .. } => { - write!(f, "closed connection from {}", peer_address) - } - Event::OutgoingEstablished { - peer_id: node_id, .. - } => write!(f, "established outgoing to {}", node_id), - Event::OutgoingFailed { - peer_id, - peer_address, - error, - } => match &**peer_id { - Some(node_id) => write!( - f, - "failed outgoing {} {}: (is_err {})", - node_id, - peer_address, - error.is_some() - ), - None => write!( - f, - "failed outgoing {}: (is_err {})", - peer_address, - error.is_some() - ), - }, - Event::SweepPending => write!(f, "sweep pending"), - Event::NetworkRequest { req } => write!(f, "request: {}", req), - Event::NetworkInfoRequest { req } => write!(f, "request: {}", req), - Event::GossipOurAddress => write!(f, "gossip our address"), - Event::PeerAddressReceived(gossiped_address) => { - write!(f, "received gossiped peer address {}", gossiped_address) - } - Event::BlocklistAnnouncement(ann) => { - write!(f, "handling blocklist announcement: {}", ann) - } - } - } -} diff --git a/node/src/components/small_network/gossiped_address.rs b/node/src/components/small_network/gossiped_address.rs deleted file mode 100644 index c919eeae5d..0000000000 --- a/node/src/components/small_network/gossiped_address.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::{ - fmt::{self, Display, Formatter}, - net::SocketAddr, -}; - -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use crate::types::{Item, Tag}; - -/// Used to gossip our public listening address to peers. -#[derive( - Copy, Clone, DataSize, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Debug, -)] -pub struct GossipedAddress(SocketAddr); - -impl GossipedAddress { - pub(super) fn new(address: SocketAddr) -> Self { - GossipedAddress(address) - } -} - -impl Display for GossipedAddress { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "gossiped-address {}", self.0) - } -} - -impl Item for GossipedAddress { - type Id = GossipedAddress; - const TAG: Tag = Tag::GossipedAddress; - const ID_IS_COMPLETE_ITEM: bool = true; - - fn id(&self) -> Self::Id { - *self - } -} - -impl From for SocketAddr { - fn from(gossiped_address: GossipedAddress) -> Self { - gossiped_address.0 - } -} diff --git a/node/src/components/small_network/message.rs b/node/src/components/small_network/message.rs deleted file mode 100644 index b839cda34e..0000000000 --- a/node/src/components/small_network/message.rs +++ /dev/null @@ -1,223 +0,0 @@ -use std::{ - fmt::{self, Debug, Display, Formatter}, - net::SocketAddr, -}; - -use casper_types::ProtocolVersion; -use serde::{Deserialize, Serialize}; - -/// The default protocol version to use in absence of one in the protocol version field. -#[inline] -fn default_protocol_version() -> ProtocolVersion { - ProtocolVersion::V1_0_0 -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum Message

{ - Handshake { - /// Network we are connected to. - network_name: String, - /// The public address of the node connecting. - public_address: SocketAddr, - /// Protocol version the node is speaking. - #[serde(default = "default_protocol_version")] - protocol_version: ProtocolVersion, - }, - Payload(P), -} - -impl Display for Message

{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Message::Handshake { - network_name, - public_address, - protocol_version, - } => write!( - f, - "handshake: {}, public addr: {}, protocol_version: {}", - network_name, public_address, protocol_version, - ), - Message::Payload(payload) => write!(f, "payload: {}", payload), - } - } -} - -#[cfg(test)] -// We use a variety of weird names in these tests. -#[allow(non_camel_case_types)] -mod tests { - use std::net::SocketAddr; - - use casper_types::ProtocolVersion; - use serde::{de::DeserializeOwned, Deserialize, Serialize}; - - use crate::protocol; - - use super::Message; - - /// Version 1.0.0 network level message. - /// - /// Note that the message itself may go out of sync over time as `protocol::Message` changes. - /// The test further below ensures that the handshake is accurate in the meantime. - #[derive(Clone, Debug, Deserialize, Serialize)] - pub enum V1_0_0_Message { - Handshake { - /// Network we are connected to. - network_name: String, - /// The public address of the node connecting. - public_address: SocketAddr, - }, - Payload(protocol::Message), - } - - /// A "conserved" version 1.0.0 handshake. - /// - /// NEVER CHANGE THIS CONSTANT TO MAKE TESTS PASS, AS IT IS BASED ON MAINNET DATA. - const V1_0_0_HANDSHAKE: &[u8] = &[ - 129, 0, 146, 178, 115, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 45, 116, - 101, 115, 116, 177, 49, 50, 46, 51, 52, 46, 53, 54, 46, 55, 56, 58, 49, 50, 51, 52, 54, - ]; - - // Note: MessagePack messages can be visualized using the message pack visualizer at - // https://sugendran.github.io/msgpack-visualizer/. Rust arrays can be copy&pasted and converted - // to base64 using the following one-liner: `import base64; base64.b64encode(bytes([129, 0, - // ...]))` - - // It is very important to note that different versions of the message pack codec crate set the - // human-readable flag in a different manner. Thus the V1.0.0 handshake can be serialized in two - // different ways, with "human readable" enabled and without. - // - // Our V1.0.0 protocol uses the "human readable" enabled version, they key difference being that - // the `SocketAddr` is encoded as a string instead of a two-item array. - - /// A pseudo-1.0.0 handshake, where the serde human readable flag has been changed due to an - /// `rmp` version mismatch. - const BROKEN_V1_0_0_HANDSHAKE: &[u8] = &[ - 129, 0, 146, 178, 115, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 45, 116, - 101, 115, 116, 129, 0, 146, 148, 12, 34, 56, 78, 205, 48, 58, - ]; - - /// Serialize a message using the standard serialization method for handshakes. - fn serialize_message(msg: &M) -> Vec { - // The actual serialization/deserialization code can be found at - // https://github.com/carllerche/tokio-serde/blob/f3c3d69ce049437973468118c9d01b46e0b1ade5/src/lib.rs#L426-L450 - - rmp_serde::to_vec(&msg).expect("handshake serialization failed") - } - - /// Deserialize a message using the standard deserialization method for handshakes. - fn deserialize_message(serialized: &[u8]) -> M { - rmp_serde::from_read(std::io::Cursor::new(&serialized)) - .expect("handshake deserialization failed") - } - - /// Given a message `from` of type `F`, serializes it, then deserializes it as `T`. - fn roundtrip_message(from: &F) -> T - where - F: Serialize, - T: DeserializeOwned, - { - let serialized = serialize_message(from); - deserialize_message(&serialized) - } - - // This test ensure that the serialization of the `V_1_0_0_Message` has not changed and that the - // serialization/deserialization methods for message in this test are likely accurate. - #[test] - fn v1_0_0_handshake_is_as_expected() { - let handshake = V1_0_0_Message::Handshake { - network_name: "serialization-test".to_owned(), - public_address: ([12, 34, 56, 78], 12346).into(), - }; - - let serialized = serialize_message::(&handshake); - - assert_eq!(&serialized, V1_0_0_HANDSHAKE); - assert_ne!(&serialized, BROKEN_V1_0_0_HANDSHAKE); - - let deserialized: V1_0_0_Message = deserialize_message(&serialized); - - match deserialized { - V1_0_0_Message::Handshake { - network_name, - public_address, - } => { - assert_eq!(network_name, "serialization-test"); - assert_eq!(public_address, ([12, 34, 56, 78], 12346).into()); - } - other => { - panic!("did not expect {:?} as the deserialized product", other); - } - } - } - - #[test] - fn v1_0_0_can_decode_current_handshake() { - let modern_handshake = Message::::Handshake { - network_name: "example-handshake".to_string(), - public_address: ([12, 34, 56, 78], 12346).into(), - protocol_version: ProtocolVersion::from_parts(5, 6, 7), - }; - - let legacy_handshake: V1_0_0_Message = roundtrip_message(&modern_handshake); - - match legacy_handshake { - V1_0_0_Message::Handshake { - network_name, - public_address, - } => { - assert_eq!(network_name, "example-handshake"); - assert_eq!(public_address, ([12, 34, 56, 78], 12346).into()); - } - V1_0_0_Message::Payload(_) => { - panic!("did not expect legacy handshake to deserialize to payload") - } - } - } - - #[test] - fn current_handshake_decodes_from_v1_0_0() { - let legacy_handshake = V1_0_0_Message::Handshake { - network_name: "example-handshake".to_string(), - public_address: ([12, 34, 56, 78], 12346).into(), - }; - - let modern_handshake: Message = roundtrip_message(&legacy_handshake); - - match modern_handshake { - Message::Handshake { - network_name, - public_address, - protocol_version, - } => { - assert_eq!(network_name, "example-handshake"); - assert_eq!(public_address, ([12, 34, 56, 78], 12346).into()); - assert_eq!(protocol_version, ProtocolVersion::V1_0_0); - } - Message::Payload(_) => { - panic!("did not expect modern handshake to deserialize to payload") - } - } - } - - #[test] - fn current_handshake_decodes_from_historic_v1_0_0() { - let modern_handshake: Message = deserialize_message(&V1_0_0_HANDSHAKE); - - match modern_handshake { - Message::Handshake { - network_name, - public_address, - protocol_version, - } => { - assert_eq!(network_name, "serialization-test"); - assert_eq!(public_address, ([12, 34, 56, 78], 12346).into()); - assert_eq!(protocol_version, ProtocolVersion::V1_0_0); - } - Message::Payload(_) => { - panic!("did not expect modern handshake to deserialize to payload") - } - } - } -} diff --git a/node/src/components/small_network/tests.rs b/node/src/components/small_network/tests.rs deleted file mode 100644 index 2d10434675..0000000000 --- a/node/src/components/small_network/tests.rs +++ /dev/null @@ -1,452 +0,0 @@ -//! Tests for the `small_network` component. -//! -//! Calling these "unit tests" would be a bit of a misnomer, since they deal mostly with multiple -//! instances of `small_net` arranged in a network. - -use std::{ - collections::{HashMap, HashSet}, - env, - fmt::{self, Debug, Display, Formatter}, - time::{Duration, Instant}, -}; - -use derive_more::From; -use pnet::datalink; -use prometheus::Registry; -use reactor::ReactorEvent; -use serde::{Deserialize, Serialize}; -use tracing::{debug, info}; - -use super::{ - chain_info::ChainInfo, Config, Event as SmallNetworkEvent, GossipedAddress, SmallNetwork, -}; -use crate::{ - components::{ - gossiper::{self, Gossiper}, - network::ENABLE_LIBP2P_NET_ENV_VAR, - small_network::SmallNetworkIdentity, - Component, - }, - effect::{ - announcements::{ControlAnnouncement, GossiperAnnouncement, NetworkAnnouncement}, - requests::{NetworkRequest, StorageRequest}, - EffectBuilder, Effects, - }, - protocol, - reactor::{self, EventQueueHandle, Finalize, Reactor, Runner}, - testing::{ - self, init_logging, - network::{Network, NetworkedReactor}, - ConditionCheckReactor, - }, - types::NodeId, - utils::Source, - NodeRng, -}; - -/// Test-reactor event. -#[derive(Debug, From, Serialize)] -enum Event { - #[from] - SmallNet(#[serde(skip_serializing)] SmallNetworkEvent), - #[from] - AddressGossiper(#[serde(skip_serializing)] gossiper::Event), - #[from] - NetworkRequest(#[serde(skip_serializing)] NetworkRequest), - #[from] - ControlAnnouncement(ControlAnnouncement), - #[from] - NetworkAnnouncement(#[serde(skip_serializing)] NetworkAnnouncement), - #[from] - AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), -} - -impl ReactorEvent for Event { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } - } -} - -impl From>> for Event { - fn from(request: NetworkRequest>) -> Self { - Event::NetworkRequest(request.map_payload(Message::from)) - } -} - -impl From> for SmallNetworkEvent { - fn from(request: NetworkRequest) -> SmallNetworkEvent { - SmallNetworkEvent::NetworkRequest { - req: Box::new(request), - } - } -} - -impl From> for Event { - fn from(_request: NetworkRequest) -> Self { - unreachable!() - } -} - -impl From for Event { - fn from(_request: StorageRequest) -> Self { - unreachable!() - } -} - -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Debug::fmt(self, f) - } -} - -#[derive(Clone, Debug, Deserialize, Serialize, From)] -enum Message { - #[from] - AddressGossiper(gossiper::Message), -} - -impl Display for Message { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - Debug::fmt(self, f) - } -} - -/// Test reactor. -/// -/// Runs a single small network. -#[derive(Debug)] -struct TestReactor { - net: SmallNetwork, - address_gossiper: Gossiper, -} - -impl Reactor for TestReactor { - type Event = Event; - type Config = Config; - type Error = anyhow::Error; - - fn new( - cfg: Self::Config, - registry: &Registry, - event_queue: EventQueueHandle, - _rng: &mut NodeRng, - ) -> anyhow::Result<(Self, Effects)> { - let small_network_identity = SmallNetworkIdentity::new()?; - let (net, effects) = SmallNetwork::new( - event_queue, - cfg, - registry, - small_network_identity, - ChainInfo::create_for_testing(), - false, - )?; - let gossiper_config = gossiper::Config::new_with_small_timeouts(); - let address_gossiper = - Gossiper::new_for_complete_items("address_gossiper", gossiper_config, registry)?; - - Ok(( - TestReactor { - net, - address_gossiper, - }, - reactor::wrap_effects(Event::SmallNet, effects), - )) - } - - fn dispatch_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::SmallNet(ev) => reactor::wrap_effects( - Event::SmallNet, - self.net.handle_event(effect_builder, rng, ev), - ), - Event::AddressGossiper(event) => reactor::wrap_effects( - Event::AddressGossiper, - self.address_gossiper - .handle_event(effect_builder, rng, event), - ), - Event::NetworkRequest(req) => self.dispatch_event( - effect_builder, - rng, - Event::SmallNet(SmallNetworkEvent::from(req)), - ), - Event::ControlAnnouncement(ctrl_ann) => { - unreachable!("unhandled control announcement: {}", ctrl_ann) - } - Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { - sender, - payload, - }) => { - let reactor_event = match payload { - Message::AddressGossiper(message) => { - Event::AddressGossiper(gossiper::Event::MessageReceived { sender, message }) - } - }; - self.dispatch_event(effect_builder, rng, reactor_event) - } - Event::NetworkAnnouncement(NetworkAnnouncement::GossipOurAddress(gossiped_address)) => { - let event = gossiper::Event::ItemReceived { - item_id: gossiped_address, - source: Source::::Ourself, - }; - self.dispatch_event(effect_builder, rng, Event::AddressGossiper(event)) - } - Event::NetworkAnnouncement(NetworkAnnouncement::NewPeer(_)) => { - // We do not care about the announcement of new peers in this test. - Effects::new() - } - Event::AddressGossiperAnnouncement(ann) => { - let GossiperAnnouncement::NewCompleteItem(gossiped_address) = ann; - let reactor_event = - Event::SmallNet(SmallNetworkEvent::PeerAddressReceived(gossiped_address)); - self.dispatch_event(effect_builder, rng, reactor_event) - } - } - } - - fn maybe_exit(&self) -> Option { - unimplemented!() - } -} - -impl NetworkedReactor for TestReactor { - type NodeId = NodeId; - - fn node_id(&self) -> NodeId { - self.net.node_id() - } -} - -impl Finalize for TestReactor { - fn finalize(self) -> futures::future::BoxFuture<'static, ()> { - self.net.finalize() - } -} - -/// Checks whether or not a given network with a unhealthy node is completely connected. -fn network_is_complete( - blocklist: &HashSet, - nodes: &HashMap>>, -) -> bool { - // We need at least one node. - if nodes.is_empty() { - return false; - } - - if nodes.len() == 1 { - let nodes = &nodes.values().collect::>(); - let net = &nodes[0].reactor().inner().net; - if net.is_not_connected_to_any_known_address() { - return true; - } - } - - for (node_id, node) in nodes { - let net = &node.reactor().inner().net; - if blocklist.contains(node_id) { - // ignore blocklisted node - continue; - } - let outgoing = net.outgoing.keys().collect::>(); - let incoming = net.incoming.keys().collect::>(); - let difference = incoming - .symmetric_difference(&outgoing) - .collect::>(); - - // All nodes should be connected to every other node, except itself, so we add it to the - // set of nodes and pretend we have a loopback connection. - if !difference.is_empty() { - return false; - } - - if outgoing.is_empty() && incoming.is_empty() { - return false; - } - } - true -} - -/// Checks whether or not a given network has at least one other node in it -fn network_started(net: &Network) -> bool { - net.nodes() - .iter() - .map(|(_, runner)| runner.reactor().inner().net.peers()) - .all(|peers| !peers.is_empty()) -} - -/// Run a two-node network five times. -/// -/// Ensures that network cleanup and basic networking works. -#[tokio::test] -async fn run_two_node_network_five_times() { - // If the env var "CASPER_ENABLE_LIBP2P_NET" is defined, exit without running the test. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - return; - } - - let mut rng = crate::new_rng(); - - // The networking port used by the tests for the root node. - let first_node_port = testing::unused_port_on_localhost() + 1; - - init_logging(); - - for i in 0..5 { - info!("two-network test round {}", i); - - let mut net = Network::new(); - - let start = Instant::now(); - net.add_node_with_config( - Config::default_local_net_first_node(first_node_port), - &mut rng, - ) - .await - .unwrap(); - net.add_node_with_config(Config::default_local_net(first_node_port), &mut rng) - .await - .unwrap(); - let end = Instant::now(); - - debug!( - total_time_ms = (end - start).as_millis() as u64, - "finished setting up networking nodes" - ); - - let timeout = Duration::from_secs(20); - let blocklist = HashSet::new(); - net.settle_on( - &mut rng, - |nodes| network_is_complete(&blocklist, nodes), - timeout, - ) - .await; - - assert!( - network_started(&net), - "each node is connected to at least one other node" - ); - - let quiet_for = Duration::from_millis(25); - let timeout = Duration::from_secs(2); - net.settle(&mut rng, quiet_for, timeout).await; - - assert!( - network_is_complete(&blocklist, net.nodes()), - "network did not stay connected" - ); - - net.finalize().await; - } -} - -/// Sanity check that we can bind to a real network. -/// -/// Very unlikely to ever fail on a real machine. -#[tokio::test] -async fn bind_to_real_network_interface() { - // If the env var "CASPER_ENABLE_LIBP2P_NET" is defined, exit without running the test. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - return; - } - - init_logging(); - - let mut rng = crate::new_rng(); - - let iface = datalink::interfaces() - .into_iter() - .find(|net| !net.ips.is_empty() && !net.ips.iter().any(|ip| ip.ip().is_loopback())) - .expect("could not find a single networking interface that isn't localhost"); - - let local_addr = iface - .ips - .into_iter() - .next() - .expect("found a interface with no ips") - .ip(); - let port = testing::unused_port_on_localhost(); - - let local_net_config = Config::new((local_addr, port).into()); - - let mut net = Network::::new(); - net.add_node_with_config(local_net_config, &mut rng) - .await - .unwrap(); - - // The network should be fully connected. - let timeout = Duration::from_secs(2); - let blocklist = HashSet::new(); - net.settle_on( - &mut rng, - |nodes| network_is_complete(&blocklist, nodes), - timeout, - ) - .await; - - net.finalize().await; -} - -/// Check that a network of varying sizes will connect all nodes properly. -#[tokio::test] -async fn check_varying_size_network_connects() { - // If the env var "CASPER_ENABLE_LIBP2P_NET" is defined, exit without running the test. - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - return; - } - - init_logging(); - - let mut rng = crate::new_rng(); - - // Try with a few predefined sets of network sizes. - for &number_of_nodes in &[2u16, 3, 5, 9, 15] { - let timeout = Duration::from_secs(3 * number_of_nodes as u64); - - let mut net = Network::new(); - - // Pick a random port in the higher ranges that is likely to be unused. - let first_node_port = testing::unused_port_on_localhost(); - - let _ = net - .add_node_with_config( - Config::default_local_net_first_node(first_node_port), - &mut rng, - ) - .await - .unwrap(); - - for _ in 1..number_of_nodes { - net.add_node_with_config(Config::default_local_net(first_node_port), &mut rng) - .await - .unwrap(); - } - - // The network should be fully connected. - let blocklist = HashSet::new(); - net.settle_on( - &mut rng, - |nodes| network_is_complete(&blocklist, nodes), - timeout, - ) - .await; - - let blocklist = HashSet::new(); - // This should not make a difference at all, but we're paranoid, so check again. - assert!( - network_is_complete(&blocklist, net.nodes()), - "network did not stay connected after being settled" - ); - - // This test will run multiple times, so ensure we cleanup all ports. - net.finalize().await; - } -} diff --git a/node/src/components/storage.rs b/node/src/components/storage.rs index e7e29f44f2..f72b46bdac 100644 --- a/node/src/components/storage.rs +++ b/node/src/components/storage.rs @@ -5,8 +5,7 @@ //! //! * storing and loading blocks, //! * storing and loading deploys, -//! * [temporary until refactored] holding `DeployMetadata` for each deploy, -//! * holding a read-only copy of the chainspec, +//! * [temporary until refactored] holding `DeployExecutionInfo` for each deploy, //! * keeping an index of blocks by height and //! * [unimplemented] managing disk usage by pruning blocks and deploys from storage. //! @@ -26,185 +25,166 @@ //! * Storing a deploy or block that already exists (same hash) is fine and will silently be //! accepted. //! -//! ## Indices -//! -//! The current implementation keeps only in-memory indices, which are not persisted, based upon the -//! estimate that they are reasonably quick to rebuild on start-up and do not take up much memory. -//! //! ## Errors //! //! The storage component itself is panic free and in general reports three classes of errors: //! Corruption, temporary resource exhaustion and potential bugs. -mod lmdb_ext; +mod config; +pub(crate) mod disjoint_sequences; +mod error; +mod event; +mod metrics; +mod object_pool; #[cfg(test)] mod tests; +mod utils; + +use casper_storage::block_store::{ + lmdb::{IndexedLmdbBlockStore, LmdbBlockStore}, + types::{ + ApprovalsHashes, BlockExecutionResults, BlockHashHeightAndEra, BlockHeight, BlockTransfers, + LatestSwitchBlock, StateStore, StateStoreKey, Tip, TransactionFinalizedApprovals, + }, + BlockStoreError, BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter, +}; -#[cfg(test)] -use std::{collections::BTreeSet, convert::TryFrom}; use std::{ - collections::{btree_map::Entry, BTreeMap, HashSet}, + borrow::Cow, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + convert::TryInto, fmt::{self, Display, Formatter}, - fs, io, mem, - path::PathBuf, + fs::{self, OpenOptions}, + io::ErrorKind, + path::{Path, PathBuf}, + sync::Arc, }; -use datasize::DataSize; -use derive_more::From; -use lmdb::{ - Cursor, Database, DatabaseFlags, Environment, EnvironmentFlags, Transaction, WriteFlags, -}; -use serde::{Deserialize, Serialize}; -use static_assertions::const_assert; +use casper_storage::DbRawBytesSpec; #[cfg(test)] -use tempfile::TempDir; -use thiserror::Error; -use tracing::{debug, error, info}; - -use casper_execution_engine::shared::newtypes::Blake2bHash; -use casper_types::{EraId, ExecutionResult, ProtocolVersion, Transfer, Transform}; +use casper_types::BlockWithSignatures; +use casper_types::{ + bytesrepr::{FromBytes, ToBytes}, + execution::{execution_result_v1, ExecutionResult, ExecutionResultV1}, + Approval, ApprovalsHash, AvailableBlockRange, Block, BlockBody, BlockHash, BlockHeader, + BlockHeaderWithSignatures, BlockSignatures, BlockSignaturesV1, BlockSignaturesV2, BlockV2, + ChainNameDigest, DeployHash, EraId, ExecutionInfo, FinalitySignature, ProtocolVersion, + Timestamp, Transaction, TransactionConfig, TransactionHash, TransactionId, Transfer, U512, +}; +use datasize::DataSize; +use num_rational::Ratio; +use prometheus::Registry; +use smallvec::SmallVec; +use tracing::{debug, error, info, warn}; -use super::Component; -#[cfg(test)] -use crate::crypto::hash::Digest; use crate::{ + components::{ + fetcher::{FetchItem, FetchResponse}, + Component, + }, effect::{ - requests::{StateStoreRequest, StorageRequest}, + announcements::FatalAnnouncement, + incoming::{NetRequest, NetRequestIncoming}, + requests::{MarkBlockCompletedRequest, NetworkRequest, StorageRequest}, EffectBuilder, EffectExt, Effects, }, fatal, - reactor::ReactorEvent, + protocol::Message, types::{ - Block, BlockBody, BlockHash, BlockHeader, BlockHeaderWithMetadata, BlockSignatures, Deploy, - DeployHash, DeployHeader, DeployMetadata, TimeDiff, + BlockExecutionResultsOrChunk, BlockExecutionResultsOrChunkId, BlockWithMetadata, + ExecutableBlock, LegacyDeploy, MaxTtl, NodeId, NodeRng, SyncLeap, SyncLeapIdentifier, + TransactionHeader, VariantMismatch, }, - utils::WithDir, - NodeRng, + utils::{display_error, WithDir}, }; -use lmdb_ext::{LmdbExtError, TransactionExt, WriteTransactionExt}; - -/// Filename for the LMDB database created by the Storage component. -const STORAGE_DB_FILENAME: &str = "storage.lmdb"; - -/// We can set this very low, as there is only a single reader/writer accessing the component at any -/// one time. -const MAX_TRANSACTIONS: u32 = 1; - -/// One Gibibyte. -const GIB: usize = 1024 * 1024 * 1024; - -/// Default max block store size. -const DEFAULT_MAX_BLOCK_STORE_SIZE: usize = 450 * GIB; -/// Default max deploy store size. -const DEFAULT_MAX_DEPLOY_STORE_SIZE: usize = 300 * GIB; -/// Default max deploy metadata store size. -const DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE: usize = 300 * GIB; -/// Default max state store size. -const DEFAULT_MAX_STATE_STORE_SIZE: usize = 10 * GIB; -/// Maximum number of allowed dbs. -const MAX_DB_COUNT: u32 = 7; - -/// OS-specific lmdb flags. -#[cfg(not(target_os = "macos"))] -const OS_FLAGS: EnvironmentFlags = EnvironmentFlags::WRITE_MAP; - -/// OS-specific lmdb flags. -/// -/// Mac OS X exhibits performance regressions when `WRITE_MAP` is used. -#[cfg(target_os = "macos")] -const OS_FLAGS: EnvironmentFlags = EnvironmentFlags::empty(); -const _STORAGE_EVENT_SIZE: usize = mem::size_of::(); -const_assert!(_STORAGE_EVENT_SIZE <= 96); - -#[derive(Debug, From, Serialize)] -#[repr(u8)] -pub enum Event { - /// Incoming storage request. - #[from] - StorageRequest(StorageRequest), - /// Incoming state storage request. - #[from] - StateStoreRequest(StateStoreRequest), -} - -/// A storage component initialization error. -#[derive(Debug, Error)] -pub enum Error { - /// Failure to create the root database directory. - #[error("failed to create database directory `{}`: {}", .0.display(), .1)] - CreateDatabaseDirectory(PathBuf, io::Error), - /// Found a duplicate block-at-height index entry. - #[error("duplicate entries for block at height {height}: {first} / {second}")] - DuplicateBlockIndex { - /// Height at which duplicate was found. - height: u64, - /// First block hash encountered at `height`. - first: BlockHash, - /// Second block hash encountered at `height`. - second: BlockHash, - }, - /// Found a duplicate switch-block-at-era-id index entry. - #[error("duplicate entries for switch block at era id {era_id}: {first} / {second}")] - DuplicateEraIdIndex { - /// Era ID at which duplicate was found. - era_id: EraId, - /// First block hash encountered at `era_id`. - first: BlockHash, - /// Second block hash encountered at `era_id`. - second: BlockHash, - }, - /// LMDB error while operating. - #[error("internal database error: {0}")] - InternalStorage(#[from] LmdbExtError), -} - -// We wholesale wrap lmdb errors and treat them as internal errors here. -impl From for Error { - fn from(err: lmdb::Error) -> Self { - LmdbExtError::from(err).into() - } -} +pub use config::Config; +use disjoint_sequences::{DisjointSequences, Sequence}; +pub use error::FatalStorageError; +use error::GetRequestError; +pub(crate) use event::Event; +use metrics::Metrics; +use object_pool::ObjectPool; + +const COMPONENT_NAME: &str = "storage"; + +/// Key under which completed blocks are to be stored. +const COMPLETED_BLOCKS_STORAGE_KEY: &[u8] = b"completed_blocks_disjoint_sequences"; +/// Name of the file created when initializing a force resync. +const FORCE_RESYNC_FILE_NAME: &str = "force_resync"; + +const STORAGE_FILES: [&str; 5] = [ + "data.lmdb", + "data.lmdb-lock", + "storage.lmdb", + "storage.lmdb-lock", + "sse_index", +]; + +/// The storage component. #[derive(DataSize, Debug)] pub struct Storage { /// Storage location. root: PathBuf, - /// Environment holding LMDB databases. - #[data_size(skip)] - env: Environment, - /// The block header database. - #[data_size(skip)] - block_header_db: Database, - /// The block body database. - #[data_size(skip)] - block_body_db: Database, - /// The block metadata db. - #[data_size(skip)] - block_metadata_db: Database, - /// The deploy database. - #[data_size(skip)] - deploy_db: Database, - /// The deploy metadata database. - #[data_size(skip)] - deploy_metadata_db: Database, - /// The transfer database. - #[data_size(skip)] - transfer_db: Database, - /// The state storage database. + /// Block store + pub(crate) block_store: IndexedLmdbBlockStore, + /// Runs of completed blocks known in storage. + completed_blocks: DisjointSequences, + /// The activation point era of the current protocol version. + activation_era: EraId, + /// The height of the final switch block of the previous protocol version. + key_block_height_for_activation_point: Option, + /// Whether or not memory deduplication is enabled. + enable_mem_deduplication: bool, + /// An in-memory pool of already loaded serialized items. + /// + /// Keyed by serialized item ID, contains the serialized item. + serialized_item_pool: ObjectPool>, + /// The number of eras relative to the highest block's era which are considered as recent for + /// the purpose of deciding how to respond to a `NetRequest::SyncLeap`. + recent_era_count: u64, #[data_size(skip)] - state_store_db: Database, - /// A map of block height to block ID. - block_height_index: BTreeMap, - /// A map of era ID to switch block ID. - switch_block_era_id_index: BTreeMap, + metrics: Option, + /// The maximum TTL of a deploy. + max_ttl: MaxTtl, + /// The hash of the chain name. + chain_name_hash: ChainNameDigest, + /// The transaction config as specified by the chainspec. + transaction_config: TransactionConfig, + /// The utilization of blocks. + utilization_tracker: BTreeMap>, +} + +pub(crate) enum HighestOrphanedBlockResult { + MissingHighestSequence, + Orphan(BlockHeader), + MissingHeader(u64), +} + +impl Display for HighestOrphanedBlockResult { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + HighestOrphanedBlockResult::MissingHighestSequence => { + write!(f, "missing highest sequence") + } + HighestOrphanedBlockResult::Orphan(block_header) => write!( + f, + "orphan, height={}, hash={}", + block_header.height(), + block_header.block_hash() + ), + HighestOrphanedBlockResult::MissingHeader(height) => { + write!(f, "missing header for block at height: {}", height) + } + } + } } impl Component for Storage where - REv: ReactorEvent, + REv: From + From> + Send, { type Event = Event; - type ConstructionError = Error; fn handle_event( &mut self, @@ -213,9 +193,31 @@ where event: Self::Event, ) -> Effects { let result = match event { - Event::StorageRequest(req) => self.handle_storage_request::(req), - Event::StateStoreRequest(req) => { - self.handle_state_store_request::(effect_builder, req) + Event::StorageRequest(req) => self.handle_storage_request(*req), + Event::NetRequestIncoming(ref incoming) => { + match self.handle_net_request_incoming::(effect_builder, incoming) { + Ok(effects) => Ok(effects), + Err(GetRequestError::Fatal(fatal_error)) => Err(fatal_error), + Err(ref other_err) => { + warn!( + sender=%incoming.sender, + err=display_error(other_err), + "error handling net request" + ); + // We could still send the requester a "not found" message, and could do + // so even in the fatal case, but it is safer to not do so at the + // moment, giving less surface area for possible amplification attacks. + Ok(Effects::new()) + } + } + } + Event::MarkBlockCompletedRequest(req) => self.handle_mark_block_completed_request(req), + Event::MakeBlockExecutableRequest(req) => { + let ret = self.make_executable_block(&req.block_hash); + match ret { + Ok(maybe) => Ok(req.responder.respond(maybe).ignore()), + Err(err) => Err(err), + } } }; @@ -227,1073 +229,2178 @@ where Err(err) => fatal!(effect_builder, "storage error: {}", err).ignore(), } } + + fn name(&self) -> &str { + COMPONENT_NAME + } } impl Storage { /// Creates a new storage component. - pub(crate) fn new( + #[allow(clippy::too_many_arguments)] + pub fn new( cfg: &WithDir, hard_reset_to_start_of_era: Option, protocol_version: ProtocolVersion, - ) -> Result { + activation_era: EraId, + network_name: &str, + max_ttl: MaxTtl, + recent_era_count: u64, + registry: Option<&Registry>, + force_resync: bool, + transaction_config: TransactionConfig, + ) -> Result { let config = cfg.value(); // Create the database directory. - let root = cfg.with_dir(config.path.clone()); - if !root.exists() { - fs::create_dir_all(&root) - .map_err(|err| Error::CreateDatabaseDirectory(root.clone(), err))?; + let mut root = cfg.with_dir(config.path.clone()); + let network_subdir = root.join(network_name); + + if !network_subdir.exists() { + fs::create_dir_all(&network_subdir).map_err(|err| { + FatalStorageError::CreateDatabaseDirectory(network_subdir.clone(), err) + })?; + } + + if should_move_storage_files_to_network_subdir(&root, &STORAGE_FILES)? { + move_storage_files_to_network_subdir(&root, &network_subdir, &STORAGE_FILES)?; } + root = network_subdir; + // Calculate the upper bound for the memory map that is potentially used. let total_size = config .max_block_store_size .saturating_add(config.max_deploy_store_size) .saturating_add(config.max_deploy_metadata_store_size); - // Creates the environment and databases. - let env = Environment::new() - .set_flags( - OS_FLAGS | - // We manage our own directory. - EnvironmentFlags::NO_SUB_DIR - // Disable thread local storage, strongly suggested for operation with tokio. - | EnvironmentFlags::NO_TLS, - ) - .set_max_readers(MAX_TRANSACTIONS) - .set_max_dbs(MAX_DB_COUNT) - .set_map_size(total_size) - .open(&root.join(STORAGE_DB_FILENAME))?; - - let block_header_db = env.create_db(Some("block_header"), DatabaseFlags::empty())?; - let block_metadata_db = env.create_db(Some("block_metadata"), DatabaseFlags::empty())?; - let deploy_db = env.create_db(Some("deploys"), DatabaseFlags::empty())?; - let deploy_metadata_db = env.create_db(Some("deploy_metadata"), DatabaseFlags::empty())?; - let transfer_db = env.create_db(Some("transfer"), DatabaseFlags::empty())?; - let state_store_db = env.create_db(Some("state_store"), DatabaseFlags::empty())?; - let block_body_db = env.create_db(Some("block_body"), DatabaseFlags::empty())?; - - // We now need to restore the block-height index. Log messages allow timing here. - info!("reindexing block store"); - let mut block_height_index = BTreeMap::new(); - let mut switch_block_era_id_index = BTreeMap::new(); - let mut block_txn = env.begin_rw_txn()?; - let mut cursor = block_txn.open_rw_cursor(block_header_db)?; - - let mut deleted_block_hashes = HashSet::new(); - // Note: `iter_start` has an undocumented panic if called on an empty database. We rely on - // the iterator being at the start when created. - for (raw_key, raw_val) in cursor.iter() { - let block: BlockHeader = lmdb_ext::deserialize(raw_val)?; - if let Some(invalid_era) = hard_reset_to_start_of_era { - // Remove blocks that are in to-be-upgraded eras, but have obsolete protocol - // versions - they were most likely created before the upgrade and should be - // reverted. - if block.era_id() >= invalid_era && block.protocol_version() < protocol_version { - let _ = deleted_block_hashes.insert(block.hash()); - cursor.del(WriteFlags::empty())?; - continue; + let block_store = LmdbBlockStore::new(root.as_path(), total_size)?; + let indexed_block_store = + IndexedLmdbBlockStore::new(block_store, hard_reset_to_start_of_era, protocol_version)?; + + let metrics = registry.map(Metrics::new).transpose()?; + + let mut component = Self { + root, + block_store: indexed_block_store, + completed_blocks: Default::default(), + activation_era, + key_block_height_for_activation_point: None, + enable_mem_deduplication: config.enable_mem_deduplication, + serialized_item_pool: ObjectPool::new(config.mem_pool_prune_interval), + recent_era_count, + max_ttl, + utilization_tracker: BTreeMap::new(), + metrics, + chain_name_hash: ChainNameDigest::from_chain_name(network_name), + transaction_config, + }; + + if force_resync { + let force_resync_file_path = component.root_path().join(FORCE_RESYNC_FILE_NAME); + // Check if resync is already in progress. Force resync will kick + // in only when the marker file didn't exist before. + // Use `OpenOptions::create_new` to atomically check for the file + // presence and create it if necessary. + match OpenOptions::new() + .create_new(true) + .write(true) + .open(&force_resync_file_path) + { + Ok(_file) => { + // When the force resync marker file was not present and + // is now created, initialize force resync. + info!("initializing force resync"); + // Default `storage.completed_blocks`. + component.completed_blocks = Default::default(); + component.persist_completed_blocks()?; + // Exit the initialization function early. + return Ok(component); + } + Err(io_err) if io_err.kind() == ErrorKind::AlreadyExists => { + info!("skipping force resync as marker file exists"); + } + Err(io_err) => { + warn!( + "couldn't operate on the force resync marker file at path {}: {}", + force_resync_file_path.to_string_lossy(), + io_err + ); } } - // We use the opportunity for a small integrity check. - assert_eq!( - raw_key, - block.hash().as_ref(), - "found corrupt block in database" - ); - insert_to_block_header_indices( - &mut block_height_index, - &mut switch_block_era_id_index, - &block, - )?; } - info!("block store reindexing complete"); - drop(cursor); - block_txn.commit()?; - let deleted_block_hashes_raw = deleted_block_hashes.iter().map(BlockHash::as_ref).collect(); + { + let ro_txn = component.block_store.checkout_ro()?; + let maybe_state_store: Option> = ro_txn.read(StateStoreKey::new( + Cow::Borrowed(COMPLETED_BLOCKS_STORAGE_KEY), + ))?; + match maybe_state_store { + Some(raw) => { + let (mut sequences, _) = DisjointSequences::from_vec(raw) + .map_err(FatalStorageError::UnexpectedDeserializationFailure)?; + + // Truncate the sequences in case we removed blocks via a hard reset. + if let Some(header) = DataReader::::read(&ro_txn, Tip)? { + sequences.truncate(header.height()); + } - initialize_block_body_db(&env, &block_body_db, &deleted_block_hashes_raw)?; - initialize_block_metadata_db(&env, &block_metadata_db, &deleted_block_hashes_raw)?; - initialize_deploy_metadata_db(&env, &deploy_metadata_db, &deleted_block_hashes)?; + component.completed_blocks = sequences; + } + None => { + // No state so far. We can make the following observations: + // + // 1. Any block already in storage from versions prior to 1.5 (no fast-sync) + // MUST have the corresponding global state in contract + // runtime due to the way sync worked previously, so with + // the potential exception of finality signatures, we can + // consider all these blocks complete. 2. Any block acquired + // from that point onwards was subject to the insertion of the + // appropriate announcements (`BlockCompletedAnnouncement`), which would have + // caused the creation of the completed blocks index, thus would not have + // resulted in a `None` value here. + // + // Note that a previous run of this version which aborted early could have + // stored some blocks and/or block-headers without + // completing the sync process. Hence, when setting the + // `completed_blocks` in this None case, we'll only consider blocks + // from a previous protocol version as complete. + + let maybe_block_header: Option = ro_txn.read(Tip)?; + if let Some(highest_block_header) = maybe_block_header { + for height in (0..=highest_block_header.height()).rev() { + let maybe_header: Option = ro_txn.read(height)?; + match maybe_header { + Some(header) if header.protocol_version() < protocol_version => { + component.completed_blocks = + DisjointSequences::new(Sequence::new(0, header.height())); + break; + } + _ => {} + } + } + }; + } + } + } + component.persist_completed_blocks()?; + Ok(component) + } - Ok(Storage { - root, - env, - block_header_db, - block_body_db, - block_metadata_db, - deploy_db, - deploy_metadata_db, - transfer_db, - state_store_db, - block_height_index, - switch_block_era_id_index, - }) + /// Returns the path to the storage folder. + pub(crate) fn root_path(&self) -> &Path { + &self.root } - /// Handles a state store request. - fn handle_state_store_request( + fn handle_net_request_incoming( &mut self, - _effect_builder: EffectBuilder, - req: StateStoreRequest, - ) -> Result, Error> + effect_builder: EffectBuilder, + incoming: &NetRequestIncoming, + ) -> Result, GetRequestError> where - Self: Component, + REv: From> + Send, { - // Incoming requests are fairly simple database write. Errors are handled one level above on - // the call stack, so all we have to do is load or store a value. - match req { - StateStoreRequest::Save { - key, - data, - responder, - } => { - let mut txn = self.env.begin_rw_txn()?; - txn.put(self.state_store_db, &key, &data, WriteFlags::default())?; - txn.commit()?; - Ok(responder.respond(()).ignore()) - } - StateStoreRequest::Load { key, responder } => { - let txn = self.env.begin_ro_txn()?; - let bytes = match txn.get(self.state_store_db, &key) { - Ok(slice) => Some(slice.to_owned()), - Err(lmdb::Error::NotFound) => None, - Err(err) => return Err(err.into()), - }; - Ok(responder.respond(bytes).ignore()) + if self.enable_mem_deduplication { + let unique_id = incoming.message.unique_id(); + + if let Some(serialized_item) = self + .serialized_item_pool + .get(AsRef::<[u8]>::as_ref(&unique_id)) + { + // We found an item in the pool. We can short-circuit all + // deserialization/serialization and return the canned item + // immediately. + let found = Message::new_get_response_from_serialized( + incoming.message.tag(), + serialized_item, + ); + return Ok(effect_builder.send_message(incoming.sender, found).ignore()); } } - } - /// Reads from the state storage DB. - /// If key is non-empty, returns bytes from under the key. Otherwise returns `Ok(None)`. - /// May also fail with storage errors. - #[cfg(not(feature = "fast-sync"))] - pub(crate) fn read_state_store(&self, key: &K) -> Result>, Error> - where - K: AsRef<[u8]>, - { - let txn = self.env.begin_ro_txn()?; - let bytes = match txn.get(self.state_store_db, &key) { - Ok(slice) => Some(slice.to_owned()), - Err(lmdb::Error::NotFound) => None, - Err(err) => return Err(err.into()), - }; - Ok(bytes) - } + match *(incoming.message) { + NetRequest::Transaction(ref serialized_id) => { + let id = decode_item_id::(serialized_id)?; + let opt_item = self.get_transaction_by_id(id)?; + let fetch_response = FetchResponse::from_opt(id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::LegacyDeploy(ref serialized_id) => { + let id = decode_item_id::(serialized_id)?; + let opt_item = self.get_legacy_deploy(id)?; + let fetch_response = FetchResponse::from_opt(id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::Block(ref serialized_id) => { + let id = decode_item_id::(serialized_id)?; + let opt_item: Option = self + .block_store + .checkout_ro() + .map_err(FatalStorageError::from)? + .read(id) + .map_err(FatalStorageError::from)?; + let fetch_response = FetchResponse::from_opt(id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::BlockHeader(ref serialized_id) => { + let item_id = decode_item_id::(serialized_id)?; + let opt_item: Option = self + .block_store + .checkout_ro() + .map_err(FatalStorageError::from)? + .read(item_id) + .map_err(FatalStorageError::from)?; + let fetch_response = FetchResponse::from_opt(item_id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::FinalitySignature(ref serialized_id) => { + let id = decode_item_id::(serialized_id)?; + let opt_item = self + .block_store + .checkout_ro() + .map_err(FatalStorageError::from)? + .read(*id.block_hash()) + .map_err(FatalStorageError::from)? + .and_then(|block_signatures: BlockSignatures| { + block_signatures.finality_signature(id.public_key()) + }); - /// Deletes value living under the key from the state storage DB. - #[cfg(not(feature = "fast-sync"))] - pub(crate) fn del_state_store(&self, key: K) -> Result - where - K: AsRef<[u8]>, - { - let mut txn = self.env.begin_rw_txn()?; - let result = match txn.del(self.state_store_db, &key, None) { - Ok(_) => Ok(true), - Err(lmdb::Error::NotFound) => Ok(false), - Err(err) => Err(err), - }?; - txn.commit()?; - Ok(result) + if let Some(item) = opt_item.as_ref() { + if item.block_hash() != id.block_hash() || item.era_id() != id.era_id() { + return Err(GetRequestError::FinalitySignatureIdMismatch { + requested_id: id, + finality_signature: Box::new(item.clone()), + }); + } + } + let fetch_response = FetchResponse::from_opt(id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::SyncLeap(ref serialized_id) => { + let item_id = decode_item_id::(serialized_id)?; + let fetch_response = self.get_sync_leap(item_id)?; + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::ApprovalsHashes(ref serialized_id) => { + let item_id = decode_item_id::(serialized_id)?; + let opt_item: Option = self + .block_store + .checkout_ro() + .map_err(FatalStorageError::from)? + .read(item_id) + .map_err(FatalStorageError::from)?; + let fetch_response = FetchResponse::from_opt(item_id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + NetRequest::BlockExecutionResults(ref serialized_id) => { + let item_id = decode_item_id::(serialized_id)?; + let opt_item = self.read_block_execution_results_or_chunk(&item_id)?; + let fetch_response = FetchResponse::from_opt(item_id, opt_item); + + Ok(self.update_pool_and_send( + effect_builder, + incoming.sender, + serialized_id, + fetch_response, + )?) + } + } } /// Handles a storage request. - fn handle_storage_request(&mut self, req: StorageRequest) -> Result, Error> - where - Self: Component, - { + fn handle_storage_request( + &mut self, + req: StorageRequest, + ) -> Result, FatalStorageError> { // Note: Database IO is handled in a blocking fashion on purpose throughout this function. // The rationale is that long IO operations are very rare and cache misses frequent, so on // average the actual execution time will be very low. Ok(match req { StorageRequest::PutBlock { block, responder } => { - let mut txn = self.env.begin_rw_txn()?; - if !txn.put_value( - self.block_body_db, - block.header().body_hash(), - block.body(), - true, - )? { - error!("Could not insert block body for block: {}", block); - txn.abort(); - return Ok(responder.respond(false).ignore()); - } - if !txn.put_value(self.block_header_db, block.hash(), block.header(), true)? { - error!("Could not insert block header for block: {}", block); - txn.abort(); - return Ok(responder.respond(false).ignore()); - } - txn.commit()?; - insert_to_block_header_indices( - &mut self.block_height_index, - &mut self.switch_block_era_id_index, - block.header(), - )?; + let mut rw_txn = self.block_store.checkout_rw()?; + let _ = rw_txn.write(&*block)?; + rw_txn.commit()?; + responder.respond(true).ignore() + } + StorageRequest::PutApprovalsHashes { + approvals_hashes, + responder, + } => { + let mut rw_txn = self.block_store.checkout_rw()?; + let _ = rw_txn.write(&*approvals_hashes)?; + rw_txn.commit()?; responder.respond(true).ignore() } StorageRequest::GetBlock { block_hash, responder, - } => responder - .respond(self.get_single_block(&mut self.env.begin_ro_txn()?, &block_hash)?) - .ignore(), - StorageRequest::GetBlockHeaderAtHeight { height, responder } => responder - .respond(self.get_block_header_by_height(&mut self.env.begin_ro_txn()?, height)?) - .ignore(), - StorageRequest::GetBlockAtHeight { height, responder } => responder - .respond(self.get_block_by_height(&mut self.env.begin_ro_txn()?, height)?) - .ignore(), - StorageRequest::GetHighestBlock { responder } => { - let mut txn = self.env.begin_ro_txn()?; + } => { + let maybe_block = self.block_store.checkout_ro()?.read(block_hash)?; + responder.respond(maybe_block).ignore() + } + StorageRequest::IsBlockStored { + block_hash, + responder, + } => { + let txn = self.block_store.checkout_ro()?; responder - .respond(self.get_highest_block(&mut txn)?) + .respond(DataReader::::exists(&txn, block_hash)?) .ignore() } - StorageRequest::GetSwitchBlockHeaderAtEraId { era_id, responder } => responder - .respond( - self.get_switch_block_header_by_era_id(&mut self.env.begin_ro_txn()?, era_id)?, - ) + StorageRequest::GetApprovalsHashes { + block_hash, + responder, + } => responder + .respond(self.block_store.checkout_ro()?.read(block_hash)?) .ignore(), - StorageRequest::GetSwitchBlockAtEraId { era_id, responder } => responder - .respond(self.get_switch_block_by_era_id(&mut self.env.begin_ro_txn()?, era_id)?) + StorageRequest::GetHighestCompleteBlock { responder } => responder + .respond(self.get_highest_complete_block()?) .ignore(), - StorageRequest::GetHighestSwitchBlock { responder } => { - let mut txn = self.env.begin_ro_txn()?; - responder - .respond( - self.switch_block_era_id_index - .keys() - .last() - .and_then(|&era_id| { - self.get_switch_block_by_era_id(&mut txn, era_id) - .transpose() - }) - .transpose()?, - ) - .ignore() + StorageRequest::GetHighestCompleteBlockHeader { responder } => responder + .respond(self.get_highest_complete_block_header()?) + .ignore(), + StorageRequest::GetTransactionsEraIds { + transaction_hashes, + responder, + } => { + let mut era_ids = HashSet::new(); + let txn = self.block_store.checkout_ro()?; + for transaction_hash in &transaction_hashes { + let maybe_block_info: Option = + txn.read(*transaction_hash)?; + if let Some(block_info) = maybe_block_info { + era_ids.insert(block_info.era_id); + } + } + responder.respond(era_ids).ignore() } StorageRequest::GetBlockHeader { block_hash, + only_from_available_block_range, responder, - } => responder - // TODO: Find a solution for efficiently retrieving the blocker header without the - // block. Deserialization that allows trailing bytes could be a possible solution. - .respond( - self.get_single_block(&mut self.env.begin_ro_txn()?, &block_hash)? - .map(|block| block.header().clone()), - ) - .ignore(), + } => { + let txn = self.block_store.checkout_ro()?; + responder + .respond(self.get_single_block_header_restricted( + &txn, + &block_hash, + only_from_available_block_range, + )?) + .ignore() + } StorageRequest::GetBlockTransfers { block_hash, responder, - } => responder - .respond(self.get_transfers(&mut self.env.begin_ro_txn()?, &block_hash)?) - .ignore(), - StorageRequest::PutDeploy { deploy, responder } => { - let mut txn = self.env.begin_rw_txn()?; - let outcome = txn.put_value(self.deploy_db, deploy.id(), &deploy, false)?; - txn.commit()?; - responder.respond(outcome).ignore() + } => { + let maybe_transfers = self.get_transfers(&block_hash)?; + responder.respond(maybe_transfers).ignore() } - StorageRequest::GetDeploys { - deploy_hashes, + StorageRequest::PutTransaction { + transaction, responder, - } => responder - .respond(self.get_deploys(&mut self.env.begin_ro_txn()?, deploy_hashes.as_slice())?) - .ignore(), - StorageRequest::GetDeployHeaders { - deploy_hashes, + } => { + let mut rw_txn = self.block_store.checkout_rw()?; + if DataReader::::exists(&rw_txn, transaction.hash())? + { + responder.respond(false).ignore() + } else { + let _ = rw_txn.write(&*transaction)?; + rw_txn.commit()?; + responder.respond(true).ignore() + } + } + StorageRequest::GetTransactions { + transaction_hashes, responder, } => responder - .respond( - // TODO: Similarly to getting block headers, requires optimized function. - self.get_deploys(&mut self.env.begin_ro_txn()?, deploy_hashes.as_slice())? - .into_iter() - .map(|opt| opt.map(|deploy| deploy.header().clone())) - .collect(), - ) + .respond(self.get_transactions_with_finalized_approvals(transaction_hashes.iter())?) .ignore(), - StorageRequest::PutExecutionResults { - block_hash, - execution_results, + StorageRequest::GetLegacyDeploy { + deploy_hash, responder, } => { - let mut txn = self.env.begin_rw_txn()?; - - let mut transfers: Vec = vec![]; - - for (deploy_hash, execution_result) in execution_results { - let mut metadata = self - .get_deploy_metadata(&mut txn, &deploy_hash)? - .unwrap_or_default(); - - // If we have a previous execution result, we can continue if it is the same. - if let Some(prev) = metadata.execution_results.get(&block_hash) { - if prev == &execution_result { - continue; + let maybe_legacy_deploy = self.get_legacy_deploy(deploy_hash)?; + responder.respond(maybe_legacy_deploy).ignore() + } + StorageRequest::GetTransaction { + transaction_id, + responder, + } => { + let ro_txn = self.block_store.checkout_ro()?; + let maybe_transaction = match Self::get_transaction_with_finalized_approvals( + &ro_txn, + &transaction_id.transaction_hash(), + )? { + None => None, + Some((transaction, maybe_approvals)) => { + let transaction = if let Some(approvals) = maybe_approvals { + transaction.with_approvals(approvals) } else { - debug!(%deploy_hash, %block_hash, "different execution result"); - } + transaction + }; + (transaction.fetch_id() == transaction_id).then_some(transaction) } - - if let ExecutionResult::Success { effect, .. } = execution_result.clone() { - for transform_entry in effect.transforms { - if let Transform::WriteTransfer(transfer) = transform_entry.transform { - transfers.push(transfer); + }; + responder.respond(maybe_transaction).ignore() + } + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, + with_finalized_approvals, + responder, + } => { + let ro_txn = self.block_store.checkout_ro()?; + + let transaction = if with_finalized_approvals { + match Self::get_transaction_with_finalized_approvals( + &ro_txn, + &transaction_hash, + )? { + Some((transaction, maybe_approvals)) => { + if let Some(approvals) = maybe_approvals { + transaction.with_approvals(approvals) + } else { + transaction } } + None => return Ok(responder.respond(None).ignore()), + } + } else { + match ro_txn.read(transaction_hash)? { + Some(transaction) => transaction, + None => return Ok(responder.respond(None).ignore()), } + }; - // TODO: this is currently done like this because rpc get_deploy returns the - // data, but the organization of deploy, block_hash, and - // execution_result is incorrectly represented. it should be - // inverted; for a given block_hash 0n deploys and each deploy has exactly 1 - // result (aka deploy_metadata in this context). - - // Update metadata and write back to db. - metadata - .execution_results - .insert(*block_hash, execution_result); - let was_written = - txn.put_value(self.deploy_metadata_db, &deploy_hash, &metadata, true)?; - assert!( - was_written, - "failed to write deploy metadata for block_hash {} deploy_hash {}", - block_hash, deploy_hash - ); - } + let block_hash_height_and_era: BlockHashHeightAndEra = + match ro_txn.read(transaction_hash)? { + Some(value) => value, + None => return Ok(responder.respond(Some((transaction, None))).ignore()), + }; - let was_written = - txn.put_value(self.transfer_db, &*block_hash, &transfers, true)?; - assert!( - was_written, - "failed to write transfers for block_hash {}", - block_hash - ); + let execution_result = ro_txn.read(transaction_hash)?; + let execution_info = ExecutionInfo { + block_hash: block_hash_height_and_era.block_hash, + block_height: block_hash_height_and_era.block_height, + execution_result, + }; - txn.commit()?; - responder.respond(()).ignore() + responder + .respond(Some((transaction, Some(execution_info)))) + .ignore() } - StorageRequest::GetDeployAndMetadata { - deploy_hash, + StorageRequest::IsTransactionStored { + transaction_id, responder, } => { - let mut txn = self.env.begin_ro_txn()?; - - // A missing deploy causes an early `None` return. - let deploy: Deploy = - if let Some(deploy) = txn.get_value(self.deploy_db, &deploy_hash)? { - deploy - } else { - return Ok(responder.respond(None).ignore()); - }; - - // Missing metadata is filled using a default. - let metadata = self - .get_deploy_metadata(&mut txn, &deploy_hash)? - .unwrap_or_default(); - responder.respond(Some((deploy, metadata))).ignore() + let txn = self.block_store.checkout_ro()?; + let has_transaction = DataReader::::exists( + &txn, + transaction_id.transaction_hash(), + )?; + responder.respond(has_transaction).ignore() } - StorageRequest::GetBlockAndMetadataByHash { + StorageRequest::GetExecutionResults { block_hash, responder, } => { - let mut txn = self.env.begin_ro_txn()?; - - let block: Block = - if let Some(block) = self.get_single_block(&mut txn, &block_hash)? { - block - } else { - return Ok(responder.respond(None).ignore()); - }; - // Check that the hash of the block retrieved is correct. - assert_eq!(&block_hash, block.hash()); - let signatures = match self.get_finality_signatures(&mut txn, &block_hash)? { - Some(signatures) => signatures, - None => BlockSignatures::new(block_hash, block.header().era_id()), - }; - assert!(signatures.verify().is_ok()); - - responder.respond(Some((block, signatures))).ignore() + let txn = self.block_store.checkout_ro()?; + responder + .respond(Self::get_execution_results_with_transaction_headers( + &txn, + &block_hash, + )?) + .ignore() + } + StorageRequest::GetBlockExecutionResultsOrChunk { id, responder } => responder + .respond(self.read_block_execution_results_or_chunk(&id)?) + .ignore(), + StorageRequest::PutExecutionResults { + block_hash, + block_height, + era_id, + execution_results, + responder, + } => { + let mut rw_txn = self.block_store.checkout_rw()?; + let _ = rw_txn.write(&BlockExecutionResults { + block_info: BlockHashHeightAndEra::new(*block_hash, block_height, era_id), + exec_results: execution_results, + })?; + rw_txn.commit()?; + responder.respond(()).ignore() + } + StorageRequest::GetFinalitySignature { id, responder } => { + let maybe_sig = self + .block_store + .checkout_ro()? + .read(*id.block_hash())? + .and_then(|sigs: BlockSignatures| sigs.finality_signature(id.public_key())) + .filter(|sig| sig.era_id() == id.era_id()); + responder.respond(maybe_sig).ignore() + } + StorageRequest::IsFinalitySignatureStored { id, responder } => { + let has_signature = self + .block_store + .checkout_ro()? + .read(*id.block_hash())? + .map(|sigs: BlockSignatures| sigs.has_finality_signature(id.public_key())) + .unwrap_or(false); + responder.respond(has_signature).ignore() } StorageRequest::GetBlockAndMetadataByHeight { block_height, + only_from_available_block_range, responder, } => { - let mut txn = self.env.begin_ro_txn()?; + if !(self.should_return_block(block_height, only_from_available_block_range)) { + return Ok(responder.respond(None).ignore()); + } - let block: Block = - if let Some(block) = self.get_block_by_height(&mut txn, block_height)? { + let ro_txn = self.block_store.checkout_ro()?; + + let block: Block = { + if let Some(block) = ro_txn.read(block_height)? { block } else { return Ok(responder.respond(None).ignore()); - }; + } + }; let hash = block.hash(); - let signatures = match self.get_finality_signatures(&mut txn, hash)? { - Some(signatures) => signatures, - None => BlockSignatures::new(*hash, block.header().era_id()), - }; - responder.respond(Some((block, signatures))).ignore() - } - StorageRequest::GetHighestBlockWithMetadata { responder } => { - let mut txn = self.env.begin_ro_txn()?; - let highest_block: Block = if let Some(block) = self - .block_height_index - .keys() - .last() - .and_then(|&height| self.get_block_by_height(&mut txn, height).transpose()) - .transpose()? - { - block - } else { - return Ok(responder.respond(None).ignore()); - }; - let hash = highest_block.hash(); - let signatures = match self.get_finality_signatures(&mut txn, hash)? { + let block_signatures = match ro_txn.read(*hash)? { Some(signatures) => signatures, - None => BlockSignatures::new(*hash, highest_block.header().era_id()), + None => self.get_default_block_signatures(&block), }; responder - .respond(Some((highest_block, signatures))) + .respond(Some(BlockWithMetadata { + block, + block_signatures, + })) .ignore() } StorageRequest::PutBlockSignatures { signatures, responder, } => { - let mut txn = self.env.begin_rw_txn()?; - let old_data: Option = - txn.get_value(self.block_metadata_db, &signatures.block_hash)?; + if signatures.is_empty() { + error!( + ?signatures, + "should not attempt to store empty collection of block signatures" + ); + return Ok(responder.respond(false).ignore()); + } + let mut txn = self.block_store.checkout_rw()?; + let old_data: Option = txn.read(*signatures.block_hash())?; let new_data = match old_data { None => signatures, Some(mut data) => { - for (pk, sig) in signatures.proofs { - data.insert_proof(pk, sig); + if let Err(error) = data.merge(signatures) { + error!(%error, "failed to put block signatures"); + return Ok(responder.respond(false).ignore()); } data } }; - let outcome = txn.put_value( - self.block_metadata_db, - &new_data.block_hash, - &new_data, - true, - )?; + let _ = txn.write(&new_data)?; txn.commit()?; - responder.respond(outcome).ignore() + responder.respond(true).ignore() + } + StorageRequest::PutFinalitySignature { + signature, + responder, + } => { + let mut rw_txn = self.block_store.checkout_rw()?; + let block_hash = signature.block_hash(); + let mut block_signatures: BlockSignatures = + if let Some(existing_signatures) = rw_txn.read(*block_hash)? { + existing_signatures + } else { + match &*signature { + FinalitySignature::V1(signature) => { + BlockSignaturesV1::new(*signature.block_hash(), signature.era_id()) + .into() + } + FinalitySignature::V2(signature) => BlockSignaturesV2::new( + *signature.block_hash(), + signature.block_height(), + signature.era_id(), + signature.chain_name_hash(), + ) + .into(), + } + }; + match (&mut block_signatures, *signature) { + ( + BlockSignatures::V1(ref mut block_signatures), + FinalitySignature::V1(signature), + ) => { + block_signatures.insert_signature( + signature.public_key().clone(), + *signature.signature(), + ); + } + ( + BlockSignatures::V2(ref mut block_signatures), + FinalitySignature::V2(signature), + ) => { + block_signatures.insert_signature( + signature.public_key().clone(), + *signature.signature(), + ); + } + (block_signatures, signature) => { + let mismatch = + VariantMismatch(Box::new((block_signatures.clone(), signature))); + return Err(FatalStorageError::from(mismatch)); + } + } + + let _ = rw_txn.write(&block_signatures); + rw_txn.commit()?; + responder.respond(true).ignore() } - StorageRequest::GetBlockSignatures { + StorageRequest::GetBlockSignature { block_hash, + public_key, responder, } => { - let result = - self.get_finality_signatures(&mut self.env.begin_ro_txn()?, &block_hash)?; - responder.respond(result).ignore() + let maybe_signatures: Option = + self.block_store.checkout_ro()?.read(block_hash)?; + responder + .respond( + maybe_signatures + .and_then(|signatures| signatures.finality_signature(&public_key)), + ) + .ignore() } - StorageRequest::GetFinalizedDeploys { ttl, responder } => { - responder.respond(self.get_finalized_deploys(ttl)?).ignore() + StorageRequest::GetBlockHeaderByHeight { + block_height, + only_from_available_block_range, + responder, + } => { + let maybe_header = self + .read_block_header_by_height(block_height, only_from_available_block_range)?; + responder.respond(maybe_header).ignore() } - }) - } - - /// Retrieves single block header by height by looking it up in the index and returning it. - fn get_block_header_and_metadata_by_height( - &self, - tx: &mut Tx, - height: u64, - ) -> Result, Error> { - let block_hash = match self.block_height_index.get(&height) { - None => return Ok(None), - Some(block_hash) => block_hash, - }; - let block_header = match self.get_single_block_header(tx, block_hash)? { - None => return Ok(None), - Some(block_header) => block_header, - }; - let block_signatures = match self.get_finality_signatures(tx, block_hash)? { - None => BlockSignatures::new(*block_hash, block_header.era_id()), - Some(signatures) => signatures, - }; - Ok(Some(BlockHeaderWithMetadata { - block_header, - block_signatures, - })) + StorageRequest::GetLatestSwitchBlockHeader { responder } => { + let txn = self.block_store.checkout_ro()?; + let maybe_header = txn.read(LatestSwitchBlock)?; + responder.respond(maybe_header).ignore() + } + StorageRequest::GetSwitchBlockHeaderByEra { era_id, responder } => { + let txn = self.block_store.checkout_ro()?; + let maybe_header = txn.read(era_id)?; + responder.respond(maybe_header).ignore() + } + StorageRequest::PutBlockHeader { + block_header, + responder, + } => { + let mut rw_txn = self.block_store.checkout_rw()?; + let _ = rw_txn.write(&*block_header)?; + rw_txn.commit()?; + responder.respond(true).ignore() + } + StorageRequest::GetAvailableBlockRange { responder } => { + responder.respond(self.get_available_block_range()).ignore() + } + StorageRequest::StoreFinalizedApprovals { + ref transaction_hash, + ref finalized_approvals, + responder, + } => { + info!(txt=?transaction_hash, count=finalized_approvals.len(), "storing finalized approvals {:?}", finalized_approvals); + responder + .respond(self.store_finalized_approvals(transaction_hash, finalized_approvals)?) + .ignore() + } + StorageRequest::PutExecutedBlock { + block, + approvals_hashes, + execution_results, + responder, + } => { + let block: Block = (*block).clone().into(); + let transaction_config = self.transaction_config.clone(); + responder + .respond(self.put_executed_block( + transaction_config, + &block, + &approvals_hashes, + execution_results, + )?) + .ignore() + } + StorageRequest::GetKeyBlockHeightForActivationPoint { responder } => { + // If we haven't already cached the height, try to retrieve the key block header. + if self.key_block_height_for_activation_point.is_none() { + let key_block_era = self.activation_era.predecessor().unwrap_or_default(); + let txn = self.block_store.checkout_ro()?; + let key_block_header: BlockHeader = match txn.read(key_block_era)? { + Some(block_header) => block_header, + None => return Ok(responder.respond(None).ignore()), + }; + self.key_block_height_for_activation_point = Some(key_block_header.height()); + } + responder + .respond(self.key_block_height_for_activation_point) + .ignore() + } + StorageRequest::GetRawData { + key, + responder, + record_id, + } => { + let db_table_id = utils::db_table_id_from_record_id(record_id) + .map_err(|_| FatalStorageError::UnexpectedRecordId(record_id))?; + let txn = self.block_store.checkout_ro()?; + let maybe_data: Option = txn.read((db_table_id, key))?; + match maybe_data { + None => responder.respond(None).ignore(), + Some(db_raw) => responder.respond(Some(db_raw)).ignore(), + } + } + StorageRequest::GetEraUtilizationScore { + era_id, + block_height, + switch_block_utilization, + responder, + } => { + let utilization = + self.get_era_utilization_score(era_id, block_height, switch_block_utilization); + + responder.respond(utilization).ignore() + } + }) } - // Retrieves a block header to handle a network request. - pub fn read_block_header_and_finality_signatures_by_height( + pub(crate) fn read_block_header_by_height( &self, - height: u64, - ) -> Result, Error> { - let mut txn = self.env.begin_ro_txn()?; - let maybe_block_header_and_finality_signatures = - self.get_block_header_and_metadata_by_height(&mut txn, height)?; - drop(txn); - Ok(maybe_block_header_and_finality_signatures) + block_height: u64, + only_from_available_block_range: bool, + ) -> Result, FatalStorageError> { + if !(self.should_return_block(block_height, only_from_available_block_range)) { + Ok(None) + } else { + let txn = self.block_store.checkout_ro()?; + txn.read(block_height).map_err(FatalStorageError::from) + } } - /// Retrieves single block header by height by looking it up in the index and returning it. - fn get_block_header_by_height( + pub(crate) fn get_switch_block_by_era_id( &self, - tx: &mut Tx, - height: u64, - ) -> Result, LmdbExtError> { - self.block_height_index - .get(&height) - .and_then(|block_hash| self.get_single_block_header(tx, block_hash).transpose()) - .transpose() + era_id: &EraId, + ) -> Result, FatalStorageError> { + let txn = self.block_store.checkout_ro()?; + txn.read(*era_id).map_err(FatalStorageError::from) } - /// Retrieves single block by height by looking it up in the index and returning it. - fn get_block_by_height( + /// Retrieves a set of transactions, along with their potential finalized approvals. + #[allow(clippy::type_complexity)] + fn get_transactions_with_finalized_approvals<'a>( &self, - tx: &mut Tx, - height: u64, - ) -> Result, LmdbExtError> { - self.block_height_index - .get(&height) - .and_then(|block_hash| self.get_single_block(tx, block_hash).transpose()) - .transpose() + transaction_hashes: impl Iterator, + ) -> Result>)>; 1]>, FatalStorageError> + { + let ro_txn = self.block_store.checkout_ro()?; + + transaction_hashes + .map(|transaction_hash| { + Self::get_transaction_with_finalized_approvals(&ro_txn, transaction_hash) + }) + .collect() } - /// Retrieves single switch block header by era ID by looking it up in the index and returning - /// it. - fn get_switch_block_header_by_era_id( - &self, - tx: &mut Tx, - era_id: EraId, - ) -> Result, LmdbExtError> { - self.switch_block_era_id_index - .get(&era_id) - .and_then(|block_hash| self.get_single_block_header(tx, block_hash).transpose()) - .transpose() + pub(crate) fn put_executed_block( + &mut self, + transaction_config: TransactionConfig, + block: &Block, + approvals_hashes: &ApprovalsHashes, + execution_results: HashMap, + ) -> Result { + let mut txn = self.block_store.checkout_rw()?; + let era_id = block.era_id(); + let block_utilization_score = block.block_utilization(transaction_config.clone()); + let has_hit_slot_limit = block.has_hit_slot_capacity(transaction_config.clone()); + let block_hash = txn.write(block)?; + let _ = txn.write(approvals_hashes)?; + let block_info = BlockHashHeightAndEra::new(block_hash, block.height(), block.era_id()); + + let utilization = if has_hit_slot_limit { + debug!("Block is at slot capacity, using slot utilization score"); + block_utilization_score + } else if execution_results.is_empty() { + 0u64 + } else { + let total_gas_utilization = { + let total_gas_limit: U512 = execution_results + .values() + .map(|results| match results { + ExecutionResult::V1(v1_result) => match v1_result { + ExecutionResultV1::Failure { cost, .. } => *cost, + ExecutionResultV1::Success { cost, .. } => *cost, + }, + ExecutionResult::V2(v2_result) => v2_result.limit.value(), + }) + .sum(); + + let consumed: u64 = total_gas_limit.as_u64(); + let block_gas_limit = transaction_config.block_gas_limit; + + Ratio::new(consumed * 100u64, block_gas_limit).to_integer() + }; + debug!("Gas utilization at {total_gas_utilization}"); + + let total_size_utilization = { + let size_used: u64 = execution_results + .values() + .map(|results| { + if let ExecutionResult::V2(result) = results { + result.size_estimate + } else { + 0u64 + } + }) + .sum(); + + let block_size_limit = transaction_config.max_block_size as u64; + Ratio::new(size_used * 100, block_size_limit).to_integer() + }; + + debug!("Storage utilization at {total_size_utilization}"); + + let scores = [ + block_utilization_score, + total_size_utilization, + total_gas_utilization, + ]; + + match scores.iter().max() { + Some(max_utlization) => *max_utlization, + None => { + // This should never happen as we just created the scores vector to find the + // max value + warn!("Unable to determine max utilization, marking 0 utilization"); + 0u64 + } + } + }; + + debug!("Utilization for block is {utilization}"); + + let _ = txn.write(&BlockExecutionResults { + block_info, + exec_results: execution_results, + })?; + txn.commit()?; + + match self.utilization_tracker.get_mut(&era_id) { + Some(block_score) => { + block_score.insert(block.height(), utilization); + } + None => { + let mut block_score = BTreeMap::new(); + block_score.insert(block.height(), utilization); + self.utilization_tracker.insert(era_id, block_score); + } + } + + Ok(true) } - /// Retrieves the highest block from the storage, if one exists. - /// May return an LMDB error. - fn get_highest_block( - &self, - txn: &mut Tx, - ) -> Result, LmdbExtError> { - self.block_height_index - .keys() - .last() - .and_then(|&height| self.get_block_by_height(txn, height).transpose()) - .transpose() + /// Handles a [`BlockCompletedAnnouncement`]. + fn handle_mark_block_completed_request( + &mut self, + MarkBlockCompletedRequest { + block_height, + responder, + }: MarkBlockCompletedRequest, + ) -> Result, FatalStorageError> { + let is_new = self.mark_block_complete(block_height)?; + Ok(responder.respond(is_new).ignore()) } - /// Returns vector blocks that satisfy the predicate, starting from the latest one and following - /// the ancestry chain. - fn get_blocks_while( + /// Marks the block at height `block_height` as complete by inserting it + /// into the `completed_blocks` index and storing it to disk. + fn mark_block_complete(&mut self, block_height: u64) -> Result { + let is_new = self.completed_blocks.insert(block_height); + if is_new { + self.persist_completed_blocks()?; + info!( + "Storage: marked block {} complete: {}", + block_height, + self.get_available_block_range() + ); + self.update_chain_height_metrics(); + } else { + debug!( + "Storage: tried to mark already-complete block {} complete", + block_height + ); + } + Ok(is_new) + } + + /// Persists the completed blocks disjoint sequences state to the database. + fn persist_completed_blocks(&mut self) -> Result<(), FatalStorageError> { + let serialized = self + .completed_blocks + .to_bytes() + .map_err(FatalStorageError::UnexpectedSerializationFailure)?; + let mut rw_txn = self.block_store.checkout_rw()?; + rw_txn.write(&StateStore { + key: Cow::Borrowed(COMPLETED_BLOCKS_STORAGE_KEY), + value: serialized, + })?; + rw_txn.commit().map_err(FatalStorageError::from) + } + + /// Retrieves the height of the highest complete block (if any). + pub(crate) fn highest_complete_block_height(&self) -> Option { + self.completed_blocks.highest_sequence().map(Sequence::high) + } + + /// Retrieves the contiguous segment of the block chain starting at the highest known switch + /// block such that the blocks' timestamps cover a duration of at least the max TTL for deploys + /// (a chainspec setting). + /// + /// If storage doesn't hold enough blocks to cover the specified duration, it will still return + /// the highest contiguous segment starting at the highest switch block which it does hold. + pub(crate) fn read_blocks_for_replay_protection( &self, - txn: &mut Tx, - predicate: F, - ) -> Result, LmdbExtError> - where - F: Fn(&Block) -> bool, - { - let mut next_block = self.get_highest_block(txn)?; + ) -> Result, FatalStorageError> { + let ro_txn = self.block_store.checkout_ro()?; + + let timestamp = + match DataReader::::read(&ro_txn, LatestSwitchBlock)? { + Some(last_era_header) => last_era_header + .timestamp() + .saturating_sub(self.max_ttl.value()), + None => Timestamp::now(), + }; + let mut blocks = Vec::new(); - loop { - match next_block { - None => break, - Some(block) if !predicate(&block) => break, - Some(block) => { - next_block = match block.parent() { - None => None, - Some(parent_hash) => self.get_single_block(txn, &parent_hash)?, - }; - blocks.push(block); + for sequence in self.completed_blocks.sequences().iter().rev() { + let hi = sequence.high(); + let low = sequence.low(); + for idx in (low..=hi).rev() { + let maybe_block: Result, BlockStoreError> = ro_txn.read(idx); + match maybe_block { + Ok(Some(block)) => { + let should_continue = block.timestamp() >= timestamp; + blocks.push(block); + if false == should_continue { + return Ok(blocks); + } + } + Ok(None) => { + continue; + } + Err(err) => return Err(FatalStorageError::BlockStoreError(err)), } } } Ok(blocks) } - /// Returns the vector of deploys whose TTL hasn't expired yet. - fn get_finalized_deploys( + /// Returns an executable block. + pub(crate) fn make_executable_block( &self, - ttl: TimeDiff, - ) -> Result, LmdbExtError> { - let mut txn = self.env.begin_ro_txn()?; - // We're interested in deploys whose TTL hasn't expired yet. - let ttl_expired = |block: &Block| block.timestamp().elapsed() < ttl; - let mut deploys = Vec::new(); - for block in self.get_blocks_while(&mut txn, ttl_expired)? { - for deploy_hash in block - .body() - .deploy_hashes() + block_hash: &BlockHash, + ) -> Result, FatalStorageError> { + let (block, transactions) = + match self.read_block_and_finalized_transactions_by_hash(*block_hash)? { + Some(block_and_finalized_transactions) => block_and_finalized_transactions, + None => { + error!( + ?block_hash, + "Storage: unable to make_executable_block for {}", block_hash + ); + return Ok(None); + } + }; + let maybe_finalized_approvals: Option = + self.block_store.checkout_ro()?.read(*block.hash())?; + if let Some(finalized_approvals) = maybe_finalized_approvals { + if transactions.len() != finalized_approvals.approvals_hashes().len() { + error!( + ?block_hash, + "Storage: transaction hashes length mismatch {}", block_hash + ); + return Err(FatalStorageError::ApprovalsHashesLengthMismatch { + block_hash: *block_hash, + expected: transactions.len(), + actual: finalized_approvals.approvals_hashes().len(), + }); + } + for (transaction, hash) in transactions .iter() - .chain(block.body().transfer_hashes()) + .zip(finalized_approvals.approvals_hashes()) { - let deploy_header = self - .get_deploy_header(&mut txn, &deploy_hash)? - .expect("deploy to exist in storage"); - // If block's deploy has already expired, ignore it. - // It may happen that deploy was not expired at the time of proposing a block but it - // is now. - if deploy_header.timestamp().elapsed() > ttl { + let computed_hash = transaction.compute_approvals_hash().map_err(|error| { + error!(%error, "failed to serialize approvals"); + FatalStorageError::UnexpectedSerializationFailure(error) + })?; + if computed_hash == hash { continue; } - deploys.push((*deploy_hash, deploy_header)); + // This should be unreachable as the `BlockSynchronizer` should ensure we have the + // correct approvals before it then calls this method. By returning `Ok(None)` the + // node would be stalled at this block, but should eventually sync leap due to lack + // of progress. It would then backfill this block without executing it. + error!(?block_hash, "Storage: transaction with incorrect approvals"); + return Ok(None); } } - Ok(deploys) + + let executable_block = ExecutableBlock::from_block_and_transactions(block, transactions); + info!(%block_hash, "Storage: created {}", executable_block); + Ok(Some(executable_block)) } - /// Retrieves single switch block by era ID by looking it up in the index and returning it. - fn get_switch_block_by_era_id( + /// Retrieves single block and all of its deploys, with the finalized approvals. + /// If any of the deploys can't be found, returns `Ok(None)`. + fn read_block_and_finalized_transactions_by_hash( &self, - tx: &mut Tx, - era_id: EraId, - ) -> Result, LmdbExtError> { - self.switch_block_era_id_index - .get(&era_id) - .and_then(|block_hash| self.get_single_block(tx, block_hash).transpose()) - .transpose() + block_hash: BlockHash, + ) -> Result)>, FatalStorageError> { + let txn = self.block_store.checkout_ro()?; + + let Some(block) = txn.read(block_hash)? else { + debug!( + ?block_hash, + "Storage: read_block_and_finalized_transactions_by_hash failed to get block for {}", + block_hash + ); + return Ok(None); + }; + + let Block::V2(block) = block else { + debug!( + ?block_hash, + "Storage: read_block_and_finalized_transactions_by_hash expected block V2 {}", + block_hash + ); + return Ok(None); + }; + + let mut transactions = vec![]; + for (transaction, _) in (self + .get_transactions_with_finalized_approvals(block.all_transactions())?) + .into_iter() + .flatten() + { + transactions.push(transaction); + } + + Ok(Some((block, transactions))) } - /// Retrieves the state root hashes from storage to check the integrity of the trie store. - pub fn get_state_root_hashes_for_trie_check(&self) -> Option> { - let mut blake_hashes: Vec = Vec::new(); - let txn = - self.env.begin_ro_txn().ok().unwrap_or_else(|| { - panic!("could not open storage transaction for trie store check") - }); - let mut cursor = txn - .open_ro_cursor(self.block_header_db) - .ok() - .unwrap_or_else(|| panic!("could not create cursor for trie store check")); - for (_, raw_val) in cursor.iter() { - let header: BlockHeader = lmdb_ext::deserialize(raw_val).ok()?; - let blake_hash = Blake2bHash::from(*header.state_root_hash()); - blake_hashes.push(blake_hash); + /// Retrieves the highest complete block header from storage, if one exists. May return an + /// LMDB error. + fn get_highest_complete_block_header(&self) -> Result, FatalStorageError> { + let highest_complete_block_height = match self.completed_blocks.highest_sequence() { + Some(sequence) => sequence.high(), + None => { + return Ok(None); + } + }; + + let txn = self.block_store.checkout_ro()?; + txn.read(highest_complete_block_height) + .map_err(FatalStorageError::from) + } + + /// Retrieves the highest block header with metadata from storage, if one exists. May return an + /// LMDB error. + fn get_highest_complete_block_header_with_signatures( + &self, + txn: &(impl DataReader + DataReader), + ) -> Result, FatalStorageError> { + let highest_complete_block_height = match self.completed_blocks.highest_sequence() { + Some(sequence) => sequence.high(), + None => { + return Ok(None); + } + }; + + let block_header: Option = txn.read(highest_complete_block_height)?; + match block_header { + Some(header) => { + let block_header_hash = header.block_hash(); + let block_signatures: BlockSignatures = match txn.read(block_header_hash)? { + Some(signatures) => signatures, + None => match &header { + BlockHeader::V1(header) => BlockSignatures::V1(BlockSignaturesV1::new( + header.block_hash(), + header.era_id(), + )), + BlockHeader::V2(header) => BlockSignatures::V2(BlockSignaturesV2::new( + header.block_hash(), + header.height(), + header.era_id(), + self.chain_name_hash, + )), + }, + }; + Ok(Some(BlockHeaderWithSignatures::new( + header, + block_signatures, + ))) + } + None => Ok(None), } + } - blake_hashes.sort(); - blake_hashes.dedup(); + /// Retrieves the highest complete block from storage, if one exists. May return an LMDB error. + pub fn get_highest_complete_block(&self) -> Result, FatalStorageError> { + let highest_complete_block_height = match self.highest_complete_block_height() { + Some(height) => height, + None => { + return Ok(None); + } + }; - Some(blake_hashes) + let txn = self.block_store.checkout_ro()?; + txn.read(highest_complete_block_height) + .map_err(FatalStorageError::from) } - /// Retrieves a single block header in a separate transaction from storage. - fn get_single_block_header( + /// Retrieves a single block header in a given transaction from storage + /// respecting the possible restriction on whether the block + /// should be present in the available blocks index. + fn get_single_block_header_restricted( &self, - tx: &mut Tx, + txn: &impl DataReader, block_hash: &BlockHash, - ) -> Result, LmdbExtError> { - let block_header: BlockHeader = match tx.get_value(self.block_header_db, &block_hash)? { - Some(block_header) => block_header, + only_from_available_block_range: bool, + ) -> Result, FatalStorageError> { + let block_header = match txn.read(*block_hash)? { + Some(header) => header, None => return Ok(None), }; - let found_block_header_hash = block_header.hash(); - if found_block_header_hash != *block_hash { - return Err(LmdbExtError::BlockHeaderNotStoredUnderItsHash { - queried_block_hash: *block_hash, - found_block_header_hash, - }); - }; + + if !(self.should_return_block(block_header.height(), only_from_available_block_range)) { + return Ok(None); + } + Ok(Some(block_header)) } - // Retrieves a block header to handle a network request. - pub fn read_block_header_by_hash( + /// Returns headers of complete blocks of the trusted block's ancestors, back to the most + /// recent switch block. + fn get_trusted_ancestor_headers( &self, - block_hash: &BlockHash, - ) -> Result, LmdbExtError> { - let mut txn = self.env.begin_ro_txn()?; - let maybe_block_header = self.get_single_block_header(&mut txn, block_hash)?; - drop(txn); - Ok(maybe_block_header) + txn: &impl DataReader, + trusted_block_header: &BlockHeader, + ) -> Result>, FatalStorageError> { + if trusted_block_header.is_genesis() { + return Ok(Some(vec![])); + } + let available_block_range = self.get_available_block_range(); + let mut result = vec![]; + let mut current_trusted_block_header = trusted_block_header.clone(); + loop { + let parent_hash = current_trusted_block_header.parent_hash(); + let parent_block_header: BlockHeader = match txn.read(*parent_hash)? { + Some(block_header) => block_header, + None => { + warn!(%parent_hash, "block header not found"); + return Ok(None); + } + }; + + if !available_block_range.contains(parent_block_header.height()) { + debug!(%parent_hash, "block header not complete"); + return Ok(None); + } + + result.push(parent_block_header.clone()); + if parent_block_header.is_switch_block() || parent_block_header.is_genesis() { + break; + } + current_trusted_block_header = parent_block_header; + } + Ok(Some(result)) } - /// Retrieves a single block in a separate transaction from storage. - fn get_single_block( + /// Returns headers of all known switch blocks after the trusted block but before + /// highest block, with signatures, plus the signed highest block. + fn get_block_headers_with_signatures( &self, - tx: &mut Tx, + txn: &(impl DataReader + DataReader), + trusted_block_header: &BlockHeader, + highest_block_header_with_signatures: &BlockHeaderWithSignatures, + ) -> Result>, FatalStorageError> { + if trusted_block_header.block_hash() + == highest_block_header_with_signatures + .block_header() + .block_hash() + { + return Ok(Some(vec![])); + } + + let start_era_id: u64 = trusted_block_header.next_block_era_id().into(); + let current_era_id: u64 = highest_block_header_with_signatures + .block_header() + .era_id() + .into(); + + let mut result = vec![]; + + for era_id in start_era_id..current_era_id { + let maybe_block_header: Option = txn.read(EraId::from(era_id))?; + match maybe_block_header { + Some(block_header) => { + let block_signatures = match txn.read(block_header.block_hash())? { + Some(signatures) => signatures, + None => match &block_header { + BlockHeader::V1(header) => BlockSignatures::V1(BlockSignaturesV1::new( + header.block_hash(), + header.era_id(), + )), + BlockHeader::V2(header) => BlockSignatures::V2(BlockSignaturesV2::new( + header.block_hash(), + header.height(), + header.era_id(), + self.chain_name_hash, + )), + }, + }; + result.push(BlockHeaderWithSignatures::new( + block_header, + block_signatures, + )); + } + None => return Ok(None), + } + } + result.push(highest_block_header_with_signatures.clone()); + + Ok(Some(result)) + } + + /// Stores a set of finalized approvals if they are different to the approvals in the original + /// transaction and if they are different to existing finalized approvals if any. + /// + /// Returns `true` if the provided approvals were stored. + fn store_finalized_approvals( + &mut self, + transaction_hash: &TransactionHash, + finalized_approvals: &BTreeSet, + ) -> Result { + let mut txn = self.block_store.checkout_rw()?; + let original_transaction: Transaction = txn.read(*transaction_hash)?.ok_or({ + FatalStorageError::UnexpectedFinalizedApprovals { + transaction_hash: *transaction_hash, + } + })?; + + // Only store the finalized approvals if they are different from the original ones. + let maybe_existing_finalized_approvals: Option> = + txn.read(*transaction_hash)?; + if maybe_existing_finalized_approvals.as_ref() == Some(finalized_approvals) { + return Ok(false); + } + + let original_approvals = original_transaction.approvals(); + if &original_approvals != finalized_approvals { + let _ = txn.write(&TransactionFinalizedApprovals { + transaction_hash: *transaction_hash, + finalized_approvals: finalized_approvals.clone(), + })?; + txn.commit()?; + return Ok(true); + } + + Ok(false) + } + + /// Retrieves successful transfers associated with block. + /// + /// If there is no record of successful transfers for this block, then the list will be built + /// from the execution results and stored to `transfer_db`. The record could have been missing + /// or incorrectly set to an empty collection due to previous synchronization and storage + /// issues. See https://github.com/casper-network/casper-node/issues/4255 and + /// https://github.com/casper-network/casper-node/issues/4268 for further info. + fn get_transfers( + &mut self, block_hash: &BlockHash, - ) -> Result, LmdbExtError> { - let block_header: BlockHeader = match self.get_single_block_header(tx, block_hash)? { - Some(block_header) => block_header, + ) -> Result>, FatalStorageError> { + let mut rw_txn = self.block_store.checkout_rw()?; + let maybe_transfers: Option> = rw_txn.read(*block_hash)?; + if let Some(transfers) = maybe_transfers { + if !transfers.is_empty() { + return Ok(Some(transfers)); + } + } + + let block: Block = match rw_txn.read(*block_hash)? { + Some(block) => block, None => return Ok(None), }; - let block_body: BlockBody = - match tx.get_value(self.block_body_db, block_header.body_hash())? { - Some(block_header) => block_header, + + let deploy_hashes: Vec = match block.clone_body() { + BlockBody::V1(v1) => v1.deploy_and_transfer_hashes().copied().collect(), + BlockBody::V2(v2) => v2 + .all_transactions() + .filter_map(|transaction_hash| match transaction_hash { + TransactionHash::Deploy(deploy_hash) => Some(*deploy_hash), + TransactionHash::V1(_) => None, + }) + .collect(), + }; + + let mut transfers: Vec = vec![]; + for deploy_hash in deploy_hashes { + let transaction_hash = TransactionHash::Deploy(deploy_hash); + let successful_xfers = match rw_txn.read(transaction_hash)? { + Some(exec_result) => successful_transfers(&exec_result), + None => { + error!(%deploy_hash, %block_hash, "should have exec result"); + vec![] + } + }; + transfers.extend(successful_xfers); + } + rw_txn.write(&BlockTransfers { + block_hash: *block_hash, + transfers: transfers.clone(), + })?; + rw_txn.commit()?; + Ok(Some(transfers)) + } + + /// Retrieves a deploy from the deploy store by deploy hash. + fn get_legacy_deploy( + &self, + deploy_hash: DeployHash, + ) -> Result, FatalStorageError> { + let transaction_hash = TransactionHash::from(deploy_hash); + let txn = self.block_store.checkout_ro()?; + let transaction = + match Self::get_transaction_with_finalized_approvals(&txn, &transaction_hash)? { + Some((transaction, maybe_approvals)) => { + if let Some(approvals) = maybe_approvals { + transaction.with_approvals(approvals) + } else { + transaction + } + } None => return Ok(None), }; - let found_block_body_hash = block_body.hash(); - if found_block_body_hash != *block_header.body_hash() { - return Err(LmdbExtError::BlockBodyNotStoredUnderItsHash { - queried_block_body_hash: *block_header.body_hash(), - found_block_body_hash, - }); + + match transaction { + Transaction::Deploy(deploy) => Ok(Some(LegacyDeploy::from(deploy))), + transaction @ Transaction::V1(_) => { + let mismatch = VariantMismatch(Box::new((transaction_hash, transaction))); + error!(%mismatch, "failed getting legacy deploy"); + Err(FatalStorageError::from(mismatch)) + } } - let block = Block::new_from_header_and_body(block_header, block_body); - Ok(Some(block)) } - /// Retrieves a set of deploys from storage. - fn get_deploys( + /// Retrieves a transaction by transaction ID. + fn get_transaction_by_id( &self, - tx: &mut Tx, - deploy_hashes: &[DeployHash], - ) -> Result>, LmdbExtError> { - deploy_hashes - .iter() - .map(|deploy_hash| tx.get_value(self.deploy_db, deploy_hash)) - .collect() + transaction_id: TransactionId, + ) -> Result, FatalStorageError> { + let transaction_hash = transaction_id.transaction_hash(); + let txn = self.block_store.checkout_ro()?; + + let maybe_transaction: Option = txn.read(transaction_hash)?; + let transaction: Transaction = match maybe_transaction { + None => return Ok(None), + Some(transaction) if transaction.fetch_id() == transaction_id => { + return Ok(Some(transaction)); + } + Some(transaction) => transaction, + }; + + let finalized_approvals = match txn.read(transaction_hash)? { + None => return Ok(None), + Some(approvals) => approvals, + }; + + match ( + transaction_id.approvals_hash(), + finalized_approvals, + transaction, + ) { + (approvals_hash, finalized_approvals, Transaction::Deploy(deploy)) => { + match ApprovalsHash::compute(&finalized_approvals) { + Ok(computed_approvals_hash) if computed_approvals_hash == approvals_hash => { + let deploy = deploy.with_approvals(finalized_approvals); + Ok(Some(Transaction::from(deploy))) + } + Ok(_computed_approvals_hash) => Ok(None), + Err(error) => { + error!(%error, "failed to calculate finalized deploy approvals hash"); + Err(FatalStorageError::UnexpectedSerializationFailure(error)) + } + } + } + (approvals_hash, finalized_approvals, Transaction::V1(transaction_v1)) => { + match ApprovalsHash::compute(&finalized_approvals) { + Ok(computed_approvals_hash) if computed_approvals_hash == approvals_hash => { + let transaction_v1 = transaction_v1.with_approvals(finalized_approvals); + Ok(Some(Transaction::from(transaction_v1))) + } + Ok(_computed_approvals_hash) => Ok(None), + Err(error) => { + error!(%error, "failed to calculate finalized transaction approvals hash"); + Err(FatalStorageError::UnexpectedSerializationFailure(error)) + } + } + } + } + } + + /// Retrieves a single transaction along with its finalized approvals. + #[allow(clippy::type_complexity)] + fn get_transaction_with_finalized_approvals( + txn: &(impl DataReader + + DataReader>), + transaction_hash: &TransactionHash, + ) -> Result>)>, FatalStorageError> { + let maybe_transaction: Option = txn.read(*transaction_hash)?; + let transaction = match maybe_transaction { + Some(transaction) => transaction, + None => return Ok(None), + }; + + let maybe_finalized_approvals: Option> = txn.read(*transaction_hash)?; + let ret = (transaction, maybe_finalized_approvals); + + Ok(Some(ret)) } - /// Returns the deploy's header. - fn get_deploy_header( + pub(crate) fn get_sync_leap( &self, - txn: &mut Tx, - deploy_hash: &DeployHash, - ) -> Result, LmdbExtError> { - let maybe_deploy: Option = txn.get_value(self.deploy_db, deploy_hash)?; - Ok(maybe_deploy.map(|deploy| deploy.header().clone())) + sync_leap_identifier: SyncLeapIdentifier, + ) -> Result, FatalStorageError> { + let block_hash = sync_leap_identifier.block_hash(); + + let txn = self.block_store.checkout_ro()?; + + let only_from_available_block_range = true; + let trusted_block_header = match self.get_single_block_header_restricted( + &txn, + &block_hash, + only_from_available_block_range, + )? { + Some(trusted_block_header) => trusted_block_header, + None => return Ok(FetchResponse::NotFound(sync_leap_identifier)), + }; + + let trusted_ancestor_headers = + match self.get_trusted_ancestor_headers(&txn, &trusted_block_header)? { + Some(trusted_ancestor_headers) => trusted_ancestor_headers, + None => return Ok(FetchResponse::NotFound(sync_leap_identifier)), + }; + + // highest block and signatures are not requested + if sync_leap_identifier.trusted_ancestor_only() { + return Ok(FetchResponse::Fetched(SyncLeap { + trusted_ancestor_only: true, + trusted_block_header, + trusted_ancestor_headers, + block_headers_with_signatures: vec![], + })); + } + + let highest_complete_block_header = + match self.get_highest_complete_block_header_with_signatures(&txn)? { + Some(highest_complete_block_header) => highest_complete_block_header, + None => return Ok(FetchResponse::NotFound(sync_leap_identifier)), + }; + + if highest_complete_block_header + .block_header() + .era_id() + .saturating_sub(trusted_block_header.era_id().into()) + > self.recent_era_count.into() + { + return Ok(FetchResponse::NotProvided(sync_leap_identifier)); + } + + if highest_complete_block_header.block_header().height() == 0 { + return Ok(FetchResponse::Fetched(SyncLeap { + trusted_ancestor_only: false, + trusted_block_header, + trusted_ancestor_headers: vec![], + block_headers_with_signatures: vec![], + })); + } + + // The `highest_complete_block_header` and `trusted_block_header` are both within the + // highest complete block range, thus so are all the switch blocks between them. + if let Some(block_headers_with_signatures) = self.get_block_headers_with_signatures( + &txn, + &trusted_block_header, + &highest_complete_block_header, + )? { + return Ok(FetchResponse::Fetched(SyncLeap { + trusted_ancestor_only: false, + trusted_block_header, + trusted_ancestor_headers, + block_headers_with_signatures, + })); + } + + Ok(FetchResponse::NotFound(sync_leap_identifier)) } - /// Retrieves deploy metadata associated with deploy. + /// Creates a serialized representation of a `FetchResponse` and the resulting message. + /// + /// If the given item is `Some`, returns a serialization of `FetchResponse::Fetched`. If + /// enabled, the given serialization is also added to the in-memory pool. /// - /// If no deploy metadata is stored for the specific deploy, an empty metadata instance will be - /// created, but not stored. - fn get_deploy_metadata( + /// If the given item is `None`, returns a non-pooled serialization of + /// `FetchResponse::NotFound`. + fn update_pool_and_send( + &mut self, + effect_builder: EffectBuilder, + sender: NodeId, + serialized_id: &[u8], + fetch_response: FetchResponse, + ) -> Result, FatalStorageError> + where + REv: From> + Send, + T: FetchItem, + { + let serialized = fetch_response + .to_serialized() + .map_err(FatalStorageError::StoredItemSerializationFailure)?; + let shared: Arc<[u8]> = serialized.into(); + + if self.enable_mem_deduplication && fetch_response.was_found() { + self.serialized_item_pool + .put(serialized_id.into(), Arc::downgrade(&shared)); + } + + let message = Message::new_get_response_from_serialized(::TAG, shared); + Ok(effect_builder.send_message(sender, message).ignore()) + } + + /// Returns `true` if the storage should attempt to return a block. Depending on the + /// `only_from_available_block_range` flag it should be unconditional or restricted by the + /// available block range. + fn should_return_block( &self, - tx: &mut Tx, - deploy_hash: &DeployHash, - ) -> Result, Error> { - Ok(tx.get_value(self.deploy_metadata_db, deploy_hash)?) + block_height: u64, + only_from_available_block_range: bool, + ) -> bool { + if only_from_available_block_range { + self.get_available_block_range().contains(block_height) + } else { + true + } } - /// Retrieves transfers associated with block. - /// - /// If no transfers are stored for the block, an empty transfers instance will be - /// created, but not stored. - fn get_transfers( + pub(crate) fn get_available_block_range(&self) -> AvailableBlockRange { + match self.completed_blocks.highest_sequence() { + Some(&seq) => seq.into(), + None => AvailableBlockRange::RANGE_0_0, + } + } + + pub(crate) fn get_highest_orphaned_block_header(&self) -> HighestOrphanedBlockResult { + match self.completed_blocks.highest_sequence() { + None => HighestOrphanedBlockResult::MissingHighestSequence, + Some(seq) => { + let low = seq.low(); + let txn = self + .block_store + .checkout_ro() + .expect("Could not start transaction for lmdb"); + + match txn.read(low) { + Ok(Some(block)) => match block { + Block::V1(_) | Block::V2(_) => { + HighestOrphanedBlockResult::Orphan(block.clone_header()) + } + }, + Ok(None) | Err(_) => HighestOrphanedBlockResult::MissingHeader(low), + } + } + } + } + + /// Returns `count` highest switch block headers, sorted from lowest (oldest) to highest. + pub(crate) fn read_highest_switch_block_headers( &self, - tx: &mut Tx, - block_hash: &BlockHash, - ) -> Result>, Error> { - Ok(tx.get_value(self.transfer_db, block_hash)?) + count: u64, + ) -> Result, FatalStorageError> { + let txn = self.block_store.checkout_ro()?; + if let Some(last_era_header) = + DataReader::::read(&txn, LatestSwitchBlock)? + { + let mut result = vec![]; + let last_era_id = last_era_header.era_id(); + result.push(last_era_header); + for era_id in (0..last_era_id.value()) + .rev() + .take(count as usize) + .map(EraId::new) + { + match txn.read(era_id)? { + None => break, + Some(header) => result.push(header), + } + } + result.reverse(); + debug!( + ?result, + "Storage: read_highest_switch_block_headers count:({})", count + ); + Ok(result) + } else { + Ok(vec![]) + } } - /// Retrieves finality signatures for a block with a given block hash - fn get_finality_signatures( + fn read_block_execution_results_or_chunk( &self, - tx: &mut Tx, - block_hash: &BlockHash, - ) -> Result, Error> { - Ok(tx.get_value(self.block_metadata_db, block_hash)?) + request: &BlockExecutionResultsOrChunkId, + ) -> Result, FatalStorageError> { + let txn = self.block_store.checkout_ro()?; + + let execution_results = match Self::get_execution_results(&txn, request.block_hash())? { + Some(execution_results) => execution_results + .into_iter() + .map(|(_deploy_hash, execution_result)| execution_result) + .collect(), + None => return Ok(None), + }; + Ok(BlockExecutionResultsOrChunk::new( + *request.block_hash(), + request.chunk_index(), + execution_results, + )) } - /// Get the lmdb environment - #[cfg(test)] - pub(crate) fn env(&self) -> &Environment { - &self.env + fn get_default_block_signatures(&self, block: &Block) -> BlockSignatures { + match block { + Block::V1(block) => BlockSignaturesV1::new(*block.hash(), block.era_id()).into(), + Block::V2(block) => BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + self.chain_name_hash, + ) + .into(), + } } -} -/// Inserts the relevant entries to the two indices. -/// -/// If a duplicate entry is encountered, neither index is updated and an error is returned. -fn insert_to_block_header_indices( - block_height_index: &mut BTreeMap, - switch_block_era_id_index: &mut BTreeMap, - block_header: &BlockHeader, -) -> Result<(), Error> { - let block_hash = block_header.hash(); - if let Some(first) = block_height_index.get(&block_header.height()) { - if *first != block_hash { - return Err(Error::DuplicateBlockIndex { - height: block_header.height(), - first: *first, - second: block_hash, - }); + fn update_chain_height_metrics(&self) { + if let Some(metrics) = self.metrics.as_ref() { + if let Some(sequence) = self.completed_blocks.highest_sequence() { + let highest_available_block: i64 = sequence.high().try_into().unwrap_or(i64::MIN); + let lowest_available_block: i64 = sequence.low().try_into().unwrap_or(i64::MIN); + metrics.chain_height.set(highest_available_block); + metrics.highest_available_block.set(highest_available_block); + metrics.lowest_available_block.set(lowest_available_block); + } } } - if block_header.is_switch_block() { - match switch_block_era_id_index.entry(block_header.era_id()) { - Entry::Vacant(entry) => { - let _ = entry.insert(block_hash); - } - Entry::Occupied(entry) => { - if *entry.get() != block_hash { - return Err(Error::DuplicateEraIdIndex { - era_id: block_header.era_id(), - first: *entry.get(), - second: block_hash, - }); + pub(crate) fn read_block_header_by_hash( + &self, + block_hash: &BlockHash, + ) -> Result, FatalStorageError> { + let ro_txn = self.block_store.checkout_ro()?; + + ro_txn.read(*block_hash).map_err(FatalStorageError::from) + } + + fn get_execution_results( + txn: &(impl DataReader + DataReader), + block_hash: &BlockHash, + ) -> Result>, FatalStorageError> { + let block = txn.read(*block_hash)?; + + let block_body = match block { + Some(block) => block.take_body(), + None => return Ok(None), + }; + + let transaction_hashes: Vec = match block_body { + BlockBody::V1(v1) => v1 + .deploy_and_transfer_hashes() + .map(TransactionHash::from) + .collect(), + BlockBody::V2(v2) => v2.all_transactions().copied().collect(), + }; + let mut execution_results = vec![]; + for transaction_hash in transaction_hashes { + match txn.read(transaction_hash)? { + None => { + debug!( + %block_hash, + %transaction_hash, + "retrieved block but execution result for given transaction is absent" + ); + return Ok(None); + } + Some(execution_result) => { + execution_results.push((transaction_hash, execution_result)); } } } + Ok(Some(execution_results)) } - let _ = block_height_index.insert(block_header.height(), block_hash); - Ok(()) -} - -/// On-disk storage configuration. -#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Config { - /// The path to the folder where any files created or read by the storage component will exist. - /// - /// If the folder doesn't exist, it and any required parents will be created. - pub path: PathBuf, - /// The maximum size of the database to use for the block store. - /// - /// The size should be a multiple of the OS page size. - max_block_store_size: usize, - /// The maximum size of the database to use for the deploy store. - /// - /// The size should be a multiple of the OS page size. - max_deploy_store_size: usize, - /// The maximum size of the database to use for the deploy metadata store. - /// - /// The size should be a multiple of the OS page size. - max_deploy_metadata_store_size: usize, - /// The maximum size of the database to use for the component state store. - /// - /// The size should be a multiple of the OS page size. - max_state_store_size: usize, -} + #[allow(clippy::type_complexity)] + fn get_execution_results_with_transaction_headers( + txn: &(impl DataReader + + DataReader + + DataReader), + block_hash: &BlockHash, + ) -> Result>, FatalStorageError> + { + let execution_results = match Self::get_execution_results(txn, block_hash)? { + Some(execution_results) => execution_results, + None => return Ok(None), + }; -impl Default for Config { - fn default() -> Self { - Config { - // No one should be instantiating a config with storage set to default. - path: "/dev/null".into(), - max_block_store_size: DEFAULT_MAX_BLOCK_STORE_SIZE, - max_deploy_store_size: DEFAULT_MAX_DEPLOY_STORE_SIZE, - max_deploy_metadata_store_size: DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE, - max_state_store_size: DEFAULT_MAX_STATE_STORE_SIZE, + let mut ret = Vec::with_capacity(execution_results.len()); + for (transaction_hash, execution_result) in execution_results { + match txn.read(transaction_hash)? { + None => { + error!( + %block_hash, + %transaction_hash, + "missing transaction" + ); + return Ok(None); + } + Some(Transaction::Deploy(deploy)) => ret.push(( + transaction_hash, + deploy.take_header().into(), + execution_result, + )), + Some(Transaction::V1(transaction_v1)) => { + ret.push((transaction_hash, (&transaction_v1).into(), execution_result)) + } + }; } + Ok(Some(ret)) } -} -impl Config { - /// Returns a default `Config` suitable for tests, along with a `TempDir` which must be kept - /// alive for the duration of the test since its destructor removes the dir from the filesystem. - #[cfg(test)] - pub(crate) fn default_for_tests() -> (Self, TempDir) { - let tempdir = tempfile::tempdir().expect("should get tempdir"); - let path = tempdir.path().join("lmdb"); - - let config = Config { - path, - ..Default::default() + fn get_era_utilization_score( + &mut self, + era_id: EraId, + block_height: u64, + block_utilization: u64, + ) -> Option<(u64, u64, u64)> { + let ret = match self.utilization_tracker.get_mut(&era_id) { + Some(utilization) => { + utilization.entry(block_height).or_insert(block_utilization); + + let era_utilization = utilization.values().sum(); + let block_count = utilization.keys().len() as u64; + let total_blocks_for_era = match era_id.predecessor() { + Some(previous_era) => { + let previous_switch_block_height = + match self.get_switch_block_by_era_id(&previous_era) { + Ok(Some(block)) => block.height(), + Ok(None) | Err(_) => return None, + }; + // Determine expected number of blocks from the block_height + // minus the height of the previous switch block + // sw-e1 -> b1 b2 b3 b4 sw-e2 + // 11 12 13 14 15 16 + // answer: 5 (16-11) + block_height.saturating_sub(previous_switch_block_height) + } + // Genesis case + None => block_height, + }; + + Some((era_utilization, block_count, total_blocks_for_era)) + } + None => { + let mut utilization = BTreeMap::new(); + utilization.insert(block_height, block_utilization); + + self.utilization_tracker.insert(era_id, utilization); + + let block_count = 1u64; + let total_blocks_for_era = block_count; + Some((block_utilization, block_count, total_blocks_for_era)) + } }; - (config, tempdir) + + self.utilization_tracker + .retain(|key_era_id, _| key_era_id.value() + 2 >= era_id.value()); + + ret } } -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::StorageRequest(req) => req.fmt(f), - Event::StateStoreRequest(req) => req.fmt(f), +/// Decodes an item's ID, typically from an incoming request. +fn decode_item_id(raw: &[u8]) -> Result +where + T: FetchItem, +{ + bincode::deserialize(raw).map_err(GetRequestError::MalformedIncomingItemId) +} + +fn should_move_storage_files_to_network_subdir( + root: &Path, + file_names: &[&str], +) -> Result { + let mut files_found = vec![]; + let mut files_not_found = vec![]; + + for file_name in file_names { + let file_path = root.join(file_name); + + if file_path.exists() { + files_found.push(file_path); + } else { + files_not_found.push(file_path); } } + + let should_move_files = files_found.len() == file_names.len(); + + if !should_move_files && !files_found.is_empty() { + error!( + "found storage files: {:?}, missing storage files: {:?}", + files_found, files_not_found + ); + + return Err(FatalStorageError::MissingStorageFiles { + missing_files: files_not_found, + }); + } + + Ok(should_move_files) } -// Legacy code follows. -// -// The functionality about for requests directly from the incoming network was previously present in -// the validator reactor's routing code. It is slated for an overhaul, but for the time being the -// code below provides a backwards-compatible interface for this functionality. DO NOT EXPAND, RELY -// ON OR BUILD UPON THIS CODE. +fn move_storage_files_to_network_subdir( + root: &Path, + subdir: &Path, + file_names: &[&str], +) -> Result<(), FatalStorageError> { + file_names + .iter() + .map(|file_name| { + let source_path = root.join(file_name); + let dest_path = subdir.join(file_name); + fs::rename(&source_path, &dest_path).map_err(|original_error| { + FatalStorageError::UnableToMoveFile { + source_path, + dest_path, + original_error, + } + }) + }) + .collect::, FatalStorageError>>()?; -impl Storage { - // Retrieves a deploy from the deploy store to handle a legacy network request. - pub fn handle_legacy_direct_deploy_request(&self, deploy_hash: DeployHash) -> Option { - // NOTE: This function was formerly called `get_deploy_for_peer` and used to create an event - // directly. This caused a dependency of the storage component on networking functionality, - // which is highly problematic. For this reason, the code to send a reply has been moved to - // the dispatching code (which should be removed anyway) as to not taint the interface. - self.env - .begin_ro_txn() - .map_err(Into::into) - .and_then(|mut tx| tx.get_value(self.deploy_db, &deploy_hash)) - .expect("legacy direct deploy request failed") + info!( + "moved files: {:?} from: {:?} to: {:?}", + file_names, root, subdir + ); + Ok(()) +} + +/// Returns all `Transform::WriteTransfer`s from the execution effects if this is an +/// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`. +fn successful_transfers(execution_result: &ExecutionResult) -> Vec { + let mut all_transfers: Vec = vec![]; + match execution_result { + ExecutionResult::V1(ExecutionResultV1::Success { effect, .. }) => { + for transform_v1 in &effect.transforms { + if let execution_result_v1::TransformKindV1::WriteTransfer(transfer_v1) = + &transform_v1.transform + { + all_transfers.push(Transfer::V1(transfer_v1.clone())); + } + } + } + ExecutionResult::V2(execution_result_v2) => { + if execution_result_v2.error_message.is_none() { + for transfer in &execution_result_v2.transfers { + all_transfers.push(transfer.clone()); + } + } + // else no-op: we only record transfers from successful executions. + } + ExecutionResult::V1(ExecutionResultV1::Failure { .. }) => { + // No-op: we only record transfers from successful executions. + } } + all_transfers } // Testing code. The functions below allow direct inspection of the storage component and should // only ever be used when writing tests. #[cfg(test)] impl Storage { - /// Directly returns a deploy from internal store. + /// Directly returns a transaction with finalized approvals from internal store. /// /// # Panics /// /// Panics if an IO error occurs. - pub fn get_deploy_by_hash(&self, deploy_hash: DeployHash) -> Option { - let mut txn = self - .env - .begin_ro_txn() + pub(crate) fn get_transaction_with_finalized_approvals_by_hash( + &self, + transaction_hash: &TransactionHash, + ) -> Option<(Transaction, Option>)> { + let txn = self + .block_store + .checkout_ro() .expect("could not create RO transaction"); - txn.get_value(self.deploy_db, &deploy_hash) - .expect("could not retrieve value from storage") + Self::get_transaction_with_finalized_approvals(&txn, transaction_hash) + .expect("could not retrieve a transaction with finalized approvals from storage") } - /// Reads all known deploy hashes from the internal store. + /// Directly returns an execution result from internal store. /// /// # Panics /// - /// Panics on any IO or db corruption error. - pub fn get_all_deploy_hashes(&self) -> BTreeSet { - let txn = self - .env - .begin_ro_txn() - .expect("could not create RO transaction"); - - let mut cursor = txn - .open_ro_cursor(self.deploy_db) - .expect("could not create cursor"); - - cursor - .iter() - .map(|(raw_key, _)| { - DeployHash::new(Digest::try_from(raw_key).expect("malformed deploy hash in DB")) - }) - .collect() + /// Panics if an IO error occurs. + pub(crate) fn read_execution_result( + &self, + transaction_hash: &TransactionHash, + ) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(*transaction_hash) + .expect("could not retrieve execution result from storage") } - /// Get the switch block for a specified era number in a read-only LMDB database transaction. + /// Directly returns a transaction from internal store. /// /// # Panics /// - /// Panics on any IO or db corruption error. - pub fn transactional_get_switch_block_by_era_id( + /// Panics if an IO error occurs. + pub(crate) fn get_transaction_by_hash( &self, - switch_block_era_num: u64, - ) -> Option { - let mut read_only_lmdb_transaction = self - .env() - .begin_ro_txn() - .expect("Could not start read only transaction for lmdb"); - let switch_block = self - .get_switch_block_by_era_id( - &mut read_only_lmdb_transaction, - EraId::from(switch_block_era_num), - ) - .expect("LMDB panicked trying to get switch block"); - read_only_lmdb_transaction - .commit() - .expect("Could not commit transaction"); - switch_block + transaction_hash: TransactionHash, + ) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(transaction_hash) + .expect("could not retrieve value from storage") } -} -/// Checks the integrity of the block body database and purges stale entries. -fn initialize_block_body_db( - env: &Environment, - block_body_db: &Database, - deleted_block_hashes: &HashSet<&[u8]>, -) -> Result<(), LmdbExtError> { - info!("initializing block body database"); - let mut txn = env.begin_rw_txn()?; - let mut cursor = txn.open_rw_cursor(*block_body_db)?; - - for (raw_key, raw_val) in cursor.iter() { - if deleted_block_hashes.contains(raw_key) { - cursor.del(WriteFlags::empty())?; - continue; - } + pub(crate) fn read_block_by_hash(&self, block_hash: BlockHash) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(block_hash) + .expect("could not retrieve value from storage") + } - let body: BlockBody = lmdb_ext::deserialize(raw_val)?; - assert_eq!( - raw_key, - body.hash().as_ref(), - "found corrupt block body in database" - ); + pub(crate) fn read_block_by_height(&self, height: u64) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(height) + .expect("could not retrieve value from storage") } - drop(cursor); - txn.commit()?; + pub(crate) fn read_highest_block(&self) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(Tip) + .expect("could not retrieve value from storage") + } - info!("block body database initialized"); - Ok(()) -} + pub(crate) fn read_highest_block_header(&self) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(Tip) + .expect("could not retrieve value from storage") + } -/// Checks the integrity of the block metadata database and purges stale entries. -fn initialize_block_metadata_db( - env: &Environment, - block_metadata_db: &Database, - deleted_block_hashes: &HashSet<&[u8]>, -) -> Result<(), LmdbExtError> { - info!("initializing block metadata database"); - let mut txn = env.begin_rw_txn()?; - let mut cursor = txn.open_rw_cursor(*block_metadata_db)?; - - for (raw_key, raw_val) in cursor.iter() { - if deleted_block_hashes.contains(raw_key) { - cursor.del(WriteFlags::empty())?; - continue; - } + pub(crate) fn get_finality_signatures_for_block( + &self, + block_hash: BlockHash, + ) -> Option { + let txn = self + .block_store + .checkout_ro() + .expect("could not create RO transaction"); + let res: Option = txn + .read(block_hash) + .expect("could not retrieve value from storage"); + txn.commit().expect("Could not commit transaction"); + res + } - let signatures: BlockSignatures = lmdb_ext::deserialize(raw_val)?; + pub(crate) fn read_switch_block_by_era_id(&self, era_id: EraId) -> Option { + self.block_store + .checkout_ro() + .expect("could not create RO transaction") + .read(era_id) + .expect("could not retrieve value from storage") + } - // Signature verification could be very slow process - // It iterates over every signature and verifies them. - match signatures.verify() { - Ok(_) => assert_eq!( - raw_key, - signatures.block_hash.as_ref(), - "Corruption in block_metadata_db" - ), - Err(error) => panic!( - "Error: {} in signature verification. Corruption in database", - error - ), + pub(crate) fn read_block_with_signatures_by_hash( + &self, + block_hash: BlockHash, + only_from_available_block_range: bool, + ) -> Option { + let ro_txn = self + .block_store + .checkout_ro() + .expect("should create ro txn"); + let block: Block = ro_txn.read(block_hash).expect("should read block")?; + + if !(self.should_return_block(block.height(), only_from_available_block_range)) { + return None; + } + if block_hash != *block.hash() { + error!( + queried_block_hash = ?block_hash, + actual_block_hash = ?block.hash(), + "block not stored under hash" + ); + debug_assert_eq!(&block_hash, block.hash()); + return None; + } + let block_signatures = ro_txn + .read(block_hash) + .expect("should read block signatures") + .unwrap_or_else(|| self.get_default_block_signatures(&block)); + if block_signatures.is_verified().is_err() { + error!(?block, "invalid block signatures for block"); + debug_assert!(block_signatures.is_verified().is_ok()); + return None; } + Some(BlockWithSignatures::new(block, block_signatures)) } - drop(cursor); - txn.commit()?; - - info!("block metadata database initialized"); - Ok(()) -} - -/// Purges stale entries from the deploy metadata database. -fn initialize_deploy_metadata_db( - env: &Environment, - deploy_metadata_db: &Database, - deleted_block_hashes: &HashSet, -) -> Result<(), LmdbExtError> { - info!("initializing deploy metadata database"); - let mut txn = env.begin_rw_txn()?; - let mut cursor = txn.open_rw_cursor(*deploy_metadata_db)?; - - for (raw_key, raw_val) in cursor.iter() { - let mut deploy_metadata: DeployMetadata = lmdb_ext::deserialize(raw_val)?; - let len_before = deploy_metadata.execution_results.len(); - - deploy_metadata.execution_results = deploy_metadata - .execution_results - .drain() - .filter(|(block_hash, _)| !deleted_block_hashes.contains(block_hash)) - .collect(); - - // If the deploy's execution results are now empty, we just remove them entirely. - if deploy_metadata.execution_results.is_empty() { - cursor.del(WriteFlags::empty())?; - } else if len_before != deploy_metadata.execution_results.len() { - let buffer = lmdb_ext::serialize(&deploy_metadata)?; - cursor.put(&raw_key, &buffer, WriteFlags::empty())?; + pub(crate) fn read_block_with_signatures_by_height( + &self, + height: u64, + only_from_available_block_range: bool, + ) -> Option { + if !(self.should_return_block(height, only_from_available_block_range)) { + return None; } + let ro_txn = self + .block_store + .checkout_ro() + .expect("should create ro txn"); + let block: Block = ro_txn.read(height).expect("should read block")?; + let hash = block.hash(); + let block_signatures = ro_txn + .read(*hash) + .expect("should read block signatures") + .unwrap_or_else(|| self.get_default_block_signatures(&block)); + Some(BlockWithSignatures::new(block, block_signatures)) } - drop(cursor); - txn.commit()?; + pub(crate) fn read_highest_block_with_signatures( + &self, + only_from_available_block_range: bool, + ) -> Option { + let ro_txn = self + .block_store + .checkout_ro() + .expect("should create ro txn"); + let highest_block = if only_from_available_block_range { + let height = self.highest_complete_block_height()?; + ro_txn.read(height).expect("should read block")? + } else { + DataReader::::read(&ro_txn, Tip).expect("should read block")? + }; + let hash = highest_block.hash(); + let block_signatures = match ro_txn.read(*hash).expect("should read block signatures") { + Some(signatures) => signatures, + None => self.get_default_block_signatures(&highest_block), + }; + Some(BlockWithSignatures::new(highest_block, block_signatures)) + } - info!("deploy metadata database initialized"); - Ok(()) + pub(crate) fn read_execution_info( + &self, + transaction_hash: TransactionHash, + ) -> Option { + let txn = self + .block_store + .checkout_ro() + .expect("should create ro txn"); + let block_hash_and_height: BlockHashHeightAndEra = txn + .read(transaction_hash) + .expect("should read block hash and height")?; + let execution_result = txn + .read(transaction_hash) + .expect("should read execution result"); + Some(ExecutionInfo { + block_hash: block_hash_and_height.block_hash, + block_height: block_hash_and_height.block_height, + execution_result, + }) + } + + pub(crate) fn delete_block_utilization_score_by_block_hash(&mut self, block_hash: BlockHash) { + let txn = self.block_store.checkout_ro().expect("mut get read only"); + let block_header: BlockHeader = txn + .read(block_hash) + .expect("should read") + .expect("must have header"); + + let era = block_header.era_id(); + let height = block_header.height(); + + let era_score = self + .utilization_tracker + .get_mut(&era) + .expect("must have era tracker"); + era_score + .remove(&height) + .expect("must have previous entry for this height"); + } } diff --git a/node/src/components/storage/config.rs b/node/src/components/storage/config.rs new file mode 100644 index 0000000000..75573d4a58 --- /dev/null +++ b/node/src/components/storage/config.rs @@ -0,0 +1,82 @@ +use std::path::PathBuf; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +#[cfg(test)] +use tempfile::TempDir; + +const GIB: usize = 1024 * 1024 * 1024; +const DEFAULT_MAX_BLOCK_STORE_SIZE: usize = 450 * GIB; +const DEFAULT_MAX_DEPLOY_STORE_SIZE: usize = 300 * GIB; +const DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE: usize = 300 * GIB; +const DEFAULT_MAX_STATE_STORE_SIZE: usize = 10 * GIB; + +/// On-disk storage configuration. +#[derive(Clone, DataSize, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct Config { + /// The path to the folder where any files created or read by the storage component will exist. + /// + /// If the folder doesn't exist, it and any required parents will be created. + pub path: PathBuf, + /// The maximum size of the database to use for the block store. + /// + /// The size should be a multiple of the OS page size. + pub max_block_store_size: usize, + /// The maximum size of the database to use for the deploy store. + /// + /// The size should be a multiple of the OS page size. + pub max_deploy_store_size: usize, + /// The maximum size of the database to use for the deploy metadata store. + /// + /// The size should be a multiple of the OS page size. + pub max_deploy_metadata_store_size: usize, + /// The maximum size of the database to use for the component state store. + /// + /// The size should be a multiple of the OS page size. + pub max_state_store_size: usize, + /// Whether or not memory deduplication is enabled. + pub enable_mem_deduplication: bool, + /// How many loads before memory duplication checks for dead references. + pub mem_pool_prune_interval: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + // No one should be instantiating a config with storage set to default. + path: "/dev/null".into(), + max_block_store_size: DEFAULT_MAX_BLOCK_STORE_SIZE, + max_deploy_store_size: DEFAULT_MAX_DEPLOY_STORE_SIZE, + max_deploy_metadata_store_size: DEFAULT_MAX_DEPLOY_METADATA_STORE_SIZE, + max_state_store_size: DEFAULT_MAX_STATE_STORE_SIZE, + enable_mem_deduplication: true, + mem_pool_prune_interval: 4096, + } + } +} + +impl Config { + /// Returns a `Config` suitable for tests, along with a `TempDir` which must be kept alive for + /// the duration of the test since its destructor removes the dir from the filesystem. + /// + /// `size_multiplier` is used to multiply the default DB sizes. + #[cfg(test)] + pub(crate) fn new_for_tests(size_multiplier: u8) -> (Self, TempDir) { + if size_multiplier == 0 { + panic!("size_multiplier cannot be zero"); + } + let tempdir = tempfile::tempdir().expect("should get tempdir"); + let path = tempdir.path().join("lmdb"); + + let config = Config { + path, + max_block_store_size: 1024 * 1024 * size_multiplier as usize, + max_deploy_store_size: 1024 * 1024 * size_multiplier as usize, + max_deploy_metadata_store_size: 1024 * 1024 * size_multiplier as usize, + max_state_store_size: 12 * 1024 * size_multiplier as usize, + ..Default::default() + }; + (config, tempdir) + } +} diff --git a/node/src/components/storage/disjoint_sequences.rs b/node/src/components/storage/disjoint_sequences.rs new file mode 100644 index 0000000000..3e10e803f1 --- /dev/null +++ b/node/src/components/storage/disjoint_sequences.rs @@ -0,0 +1,580 @@ +use std::fmt::{self, Display, Formatter}; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + AvailableBlockRange, +}; +use datasize::DataSize; +use itertools::Itertools; +use tracing::trace; + +/// The outcome of an attempt to insert a value into a `Sequence`. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +enum InsertOutcome { + /// The value was greater than `Sequence::high + 1` and wasn't inserted. + TooHigh, + /// The value was inserted at the high end, and is now `Sequence::high`. + ExtendedHigh, + /// The value was a duplicate; inserted and didn't affect the high or low values. + AlreadyInSequence, + /// The value was inserted at the low end, and is now `Sequence::low`. + ExtendedLow, + /// The value was less than `Sequence::low - 1` and wasn't inserted. + TooLow, +} + +/// Represents a continuous sequence of `u64`s. +#[derive(Copy, Clone, Debug, Eq, PartialEq, DataSize, Ord, PartialOrd)] +pub(crate) struct Sequence { + /// The upper bound (inclusive) of the sequence. + high: u64, + /// The lower bound (inclusive) of the sequence. + low: u64, +} + +impl Sequence { + /// Constructs a new sequence using the bounds of `a` and `b`. + /// + /// `low` and `high` will be automatically determined. + pub(super) fn new(a: u64, b: u64) -> Self { + let (low, high) = if a <= b { (a, b) } else { (b, a) }; + Sequence { low, high } + } + + /// Constructs a new sequence containing only `value`. + fn single(value: u64) -> Self { + Sequence { + high: value, + low: value, + } + } + + /// Tries to insert `value` into the sequence. + /// + /// Returns an outcome which indicates where the value was inserted if at all. + fn try_insert(&mut self, value: u64) -> InsertOutcome { + if value == self.high + 1 { + self.high = value; + InsertOutcome::ExtendedHigh + } else if value >= self.low && value <= self.high { + InsertOutcome::AlreadyInSequence + } else if value + 1 == self.low { + self.low = value; + InsertOutcome::ExtendedLow + } else if value > self.high { + InsertOutcome::TooHigh + } else { + InsertOutcome::TooLow + } + } + + /// Returns the inclusive high end of the sequence. + pub(crate) fn high(&self) -> u64 { + self.high + } + + /// Returns the inclusive low end of the sequence. + pub(crate) fn low(&self) -> u64 { + self.low + } +} + +impl From for AvailableBlockRange { + fn from(sequence: Sequence) -> Self { + AvailableBlockRange::new(sequence.low(), sequence.high()) + } +} + +/// Represents a collection of disjoint sequences of `u64`s. +/// +/// The collection is kept ordered from high to low, and each entry represents a discrete portion of +/// the space from [0, u64::MAX] with a gap of at least 1 between each. +/// +/// The collection is ordered this way to optimize insertion for the normal use case: adding +/// monotonically increasing values representing the latest block height. +/// +/// As values are inserted, if two separate sequences become contiguous, they are merged into a +/// single sequence. +/// +/// For example, if `sequences` contains `[9,9], [7,3]` and `8` is inserted, then `sequences` will +/// be reduced to `[9,3]`. +#[derive(Default, Debug, DataSize)] +#[cfg_attr(test, derive(Clone))] +pub(super) struct DisjointSequences { + sequences: Vec, +} + +impl DisjointSequences { + /// Constructs disjoint sequences from one initial sequence. + /// + /// Note: Use [`Default::default()`] to create an empty set of sequences. + pub(super) fn new(initial_sequence: Sequence) -> Self { + DisjointSequences { + sequences: vec![initial_sequence], + } + } + + /// Inserts `value` into the appropriate sequence and merges sequences if required. + /// + /// Returns `true` if `value` was not previously contained in the disjoint sequences. + /// + /// Note, this method is efficient where `value` is one greater than the current highest value. + /// However, it's not advisable to use this method in a loop to rebuild a `DisjointSequences` + /// from a large collection of randomly-ordered values. In that case, it is very much more + /// efficient to use `DisjointSequences::from(mut input: Vec)`. + pub(super) fn insert(&mut self, value: u64) -> bool { + let mut iter_mut = self.sequences.iter_mut().enumerate().peekable(); + + // The index at which to add a new `Sequence` containing only `value`. + let mut maybe_insertion_index = Some(0); + // The index of a `Sequence` to be removed due to the insertion of `value` causing two + // consecutive sequences to become contiguous. + let mut maybe_removal_index = None; + let mut added_new_value = true; + while let Some((index, sequence)) = iter_mut.next() { + match sequence.try_insert(value) { + InsertOutcome::ExtendedHigh => { + // We should exit the loop, and we don't need to add a new sequence; we only + // need to check for merges of sequences when we get `ExtendedLow` since we're + // iterating the sequences from high to low. + maybe_insertion_index = None; + break; + } + InsertOutcome::AlreadyInSequence => { + // We should exit the loop, and we don't need to add a new sequence. + maybe_insertion_index = None; + added_new_value = false; + break; + } + InsertOutcome::TooHigh => { + // We should exit the loop and we need to add a new sequence at this index. + maybe_insertion_index = Some(index); + break; + } + InsertOutcome::TooLow => { + // We need to add a new sequence immediately after this one if this is the last + // sequence. Continue iterating in case this is not the last sequence. + maybe_insertion_index = Some(index + 1); + } + InsertOutcome::ExtendedLow => { + // We should exit the loop, and we don't need to add a new sequence. + maybe_insertion_index = None; + // If the next sequence is now contiguous with this one, update this one's low + // value and set the next sequence to be removed. + if let Some((next_index, next_sequence)) = iter_mut.peek() { + if next_sequence.high + 1 == sequence.low { + sequence.low = next_sequence.low; + maybe_removal_index = Some(*next_index); + } + } + break; + } + }; + } + + if let Some(index_to_insert) = maybe_insertion_index { + self.sequences + .insert(index_to_insert, Sequence::single(value)); + } + + if let Some(index_to_remove) = maybe_removal_index { + let _ = self.sequences.remove(index_to_remove); + } + + trace!(%self, "current state of disjoint sequences"); + added_new_value + } + + /// Returns the highest sequence, or `None` if there are no sequences. + pub(super) fn highest_sequence(&self) -> Option<&Sequence> { + self.sequences.first() + } + + /// Returns all the sequences, if any. + pub(super) fn sequences(&self) -> &Vec { + &self.sequences + } + + /// Reduces the sequence(s), keeping all entries below and including `max_value`. If + /// `max_value` is not already included in a sequence, it will not be added. + /// + /// If the current highest value is lower than `max_value`, or if there are no sequences, this + /// has no effect. + pub(super) fn truncate(&mut self, max_value: u64) { + self.sequences.retain_mut(|sequence| { + if sequence.high <= max_value { + // Keep this sequence unchanged. + return true; + } + + if sequence.low > max_value { + // Delete this entire sequence. + return false; + } + + // This sequence contains `max_value`, so keep the sequence, but reduce its high value. + sequence.high = max_value; + true + }) + } +} +#[cfg(test)] +impl DisjointSequences { + /// Inserts multiple values produced by the given iterator. + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(|height| { + self.insert(height); + }) + } + + /// Returns `true` if `value` exists in the disjoint sequences. + fn contains(&self, value: u64) -> bool { + self.sequences + .iter() + .any(|sequence| value >= sequence.low && value <= sequence.high) + } +} + +impl FromBytes for Sequence { + #[inline] + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (high, bytes) = u64::from_bytes(bytes)?; + let (low, bytes) = u64::from_bytes(bytes)?; + + Ok((Sequence { high, low }, bytes)) + } +} + +impl ToBytes for Sequence { + #[inline] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buf = Vec::new(); + self.write_bytes(&mut buf)?; + Ok(buf) + } + + #[inline] + fn serialized_length(&self) -> usize { + self.high.serialized_length() + self.low.serialized_length() + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.high.write_bytes(writer)?; + self.low.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for DisjointSequences { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Vec::::from_bytes(bytes) + .map(|(sequences, remainder)| (DisjointSequences { sequences }, remainder)) + } + + #[inline] + fn from_vec(bytes: Vec) -> Result<(Self, Vec), bytesrepr::Error> { + Vec::::from_vec(bytes) + .map(|(sequences, remainder)| (DisjointSequences { sequences }, remainder)) + } +} + +impl ToBytes for DisjointSequences { + #[inline] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.sequences.to_bytes() + } + + #[inline] + fn serialized_length(&self) -> usize { + self.sequences.serialized_length() + } + + fn into_bytes(self) -> Result, bytesrepr::Error> + where + Self: Sized, + { + self.sequences.into_bytes() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.sequences.write_bytes(writer) + } +} + +/// This impl is provided to allow for efficient re-building of a `DisjointSequences` from a large, +/// randomly-ordered set of values. +impl From> for DisjointSequences { + fn from(mut input: Vec) -> Self { + input.sort_unstable(); + + let sequences = input + .drain(..) + .peekable() + .batching(|iter| match iter.next() { + None => None, + Some(low) => { + let mut sequence = Sequence::single(low); + while let Some(i) = iter.peek() { + if *i == sequence.high + 1 { + sequence.high = iter.next().unwrap(); + } + } + Some(sequence) + } + }) + .collect(); + + DisjointSequences { sequences } + } +} + +impl Display for DisjointSequences { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + let mut iter = self.sequences.iter().peekable(); + while let Some(sequence) = iter.next() { + write!(formatter, "[{}, {}]", sequence.high, sequence.low)?; + if iter.peek().is_some() { + write!(formatter, ", ")?; + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use rand::{seq::SliceRandom, Rng}; + + use super::*; + + fn new_sequence(a: u64, b: u64) -> Sequence { + let (low, high) = if a <= b { (a, b) } else { (b, a) }; + assert!(low <= high); + Sequence { low, high } + } + + fn assert_matches(actual: &DisjointSequences, expected: &BTreeSet) { + let mut actual_set = BTreeSet::new(); + for sequence in &actual.sequences { + for i in sequence.low..=sequence.high { + assert!(actual_set.insert(i)); + } + } + assert_eq!(&actual_set, expected) + } + + #[test] + fn should_insert_all_u8s_including_duplicates() { + let mut rng = crate::new_rng(); + + let mut disjoint_sequences = DisjointSequences::default(); + let mut expected = BTreeSet::new(); + + while disjoint_sequences.sequences != vec![Sequence { high: 255, low: 0 }] { + let value = rng.gen::() as u64; + let insertion_result = !disjoint_sequences.contains(value); + assert_eq!(insertion_result, disjoint_sequences.insert(value)); + expected.insert(value); + assert_matches(&disjoint_sequences, &expected); + } + } + + #[test] + fn should_extend() { + let to_be_inserted = vec![5_u64, 4, 3, 2, 1]; + let mut expected = BTreeSet::new(); + expected.extend(to_be_inserted.clone()); + + let mut disjoint_sequences = DisjointSequences::default(); + disjoint_sequences.extend(to_be_inserted); + assert_matches(&disjoint_sequences, &expected); + + // Extending with empty set should not modify the sequences. + disjoint_sequences.extend(Vec::::new()); + assert_matches(&disjoint_sequences, &expected); + } + + #[test] + fn should_insert_with_no_duplicates() { + const MAX: u64 = 1000; + + let mut rng = crate::new_rng(); + + let mut values = (0..=MAX).collect::>(); + values.shuffle(&mut rng); + + let mut disjoint_sequences = DisjointSequences::default(); + let mut expected = BTreeSet::new(); + + for value in values { + assert!(disjoint_sequences.insert(value)); + expected.insert(value); + assert_matches(&disjoint_sequences, &expected); + } + + assert_eq!( + disjoint_sequences.sequences, + vec![Sequence { high: MAX, low: 0 }] + ); + } + + #[test] + fn should_construct_from_random_set() { + const MAX: u64 = 2_000_000; + + let mut rng = crate::new_rng(); + + let mut values = (0..=MAX).collect::>(); + values.shuffle(&mut rng); + + let disjoint_sequences = DisjointSequences::from(values); + assert_eq!( + disjoint_sequences.sequences, + vec![Sequence { high: MAX, low: 0 }] + ); + } + + #[test] + fn should_get_highest_sequence() { + let mut disjoint_sequences = DisjointSequences::default(); + assert_eq!(disjoint_sequences.highest_sequence(), None); + + disjoint_sequences.extend([1]); + assert_eq!( + disjoint_sequences.highest_sequence(), + Some(&Sequence { low: 1, high: 1 }) + ); + + disjoint_sequences.extend([5, 6]); + assert_eq!( + disjoint_sequences.highest_sequence(), + Some(&Sequence { low: 5, high: 6 }) + ); + + disjoint_sequences.extend([8, 9]); + assert_eq!( + disjoint_sequences.highest_sequence(), + Some(&Sequence { low: 8, high: 9 }) + ); + } + + #[test] + fn should_truncate() { + const SEQ_HIGH: Sequence = Sequence { high: 11, low: 9 }; + const SEQ_MID: Sequence = Sequence { high: 6, low: 6 }; + const SEQ_LOW: Sequence = Sequence { high: 3, low: 1 }; + let initial_sequences = DisjointSequences { + sequences: vec![SEQ_HIGH, SEQ_MID, SEQ_LOW], + }; + + // Truncate with `max_value` greater or equal to current highest value should be a no-op. + let mut disjoint_sequences = initial_sequences.clone(); + disjoint_sequences.truncate(12); + assert_eq!(disjoint_sequences.sequences, initial_sequences.sequences); + disjoint_sequences.truncate(11); + assert_eq!(disjoint_sequences.sequences, initial_sequences.sequences); + + // Truncate with `max_value` between two sequences should cause the higher sequences to get + // removed and the lower ones retained unchanged. + disjoint_sequences = initial_sequences.clone(); + disjoint_sequences.truncate(SEQ_HIGH.low - 1); + assert_eq!(disjoint_sequences.sequences, vec![SEQ_MID, SEQ_LOW]); + + disjoint_sequences = initial_sequences.clone(); + disjoint_sequences.truncate(SEQ_MID.high); + assert_eq!(disjoint_sequences.sequences, vec![SEQ_MID, SEQ_LOW]); + + disjoint_sequences = initial_sequences.clone(); + disjoint_sequences.truncate(SEQ_MID.low - 1); + assert_eq!(disjoint_sequences.sequences, vec![SEQ_LOW]); + + disjoint_sequences = initial_sequences.clone(); + disjoint_sequences.truncate(SEQ_LOW.high); + assert_eq!(disjoint_sequences.sequences, vec![SEQ_LOW]); + + // Truncate with `max_value` lower than the lowest value should cause all sequences to get + // removed. + disjoint_sequences = initial_sequences.clone(); + disjoint_sequences.truncate(SEQ_LOW.low - 1); + assert!(disjoint_sequences.sequences.is_empty()); + + // Truncate with `max_value` within a sequence should cause that sequence to get updated, + // any higher sequences to get removed, and any lower ones retained unchanged. + disjoint_sequences = initial_sequences.clone(); + let max_value = SEQ_HIGH.high - 1; + disjoint_sequences.truncate(max_value); + assert_eq!( + disjoint_sequences.sequences, + vec![new_sequence(max_value, SEQ_HIGH.low), SEQ_MID, SEQ_LOW] + ); + + disjoint_sequences = initial_sequences.clone(); + let max_value = SEQ_HIGH.low; + disjoint_sequences.truncate(max_value); + assert_eq!( + disjoint_sequences.sequences, + vec![new_sequence(max_value, SEQ_HIGH.low), SEQ_MID, SEQ_LOW] + ); + + disjoint_sequences = initial_sequences.clone(); + let max_value = SEQ_MID.low; + disjoint_sequences.truncate(max_value); + assert_eq!(disjoint_sequences.sequences, vec![SEQ_MID, SEQ_LOW]); + + disjoint_sequences = initial_sequences.clone(); + let max_value = SEQ_LOW.high - 1; + disjoint_sequences.truncate(max_value); + assert_eq!( + disjoint_sequences.sequences, + vec![new_sequence(max_value, SEQ_LOW.low)] + ); + + disjoint_sequences = initial_sequences; + let max_value = SEQ_LOW.low; + disjoint_sequences.truncate(max_value); + assert_eq!( + disjoint_sequences.sequences, + vec![new_sequence(max_value, SEQ_LOW.low)] + ); + + // Truncate on an empty set of sequences should have no effect. + disjoint_sequences = DisjointSequences::default(); + assert!(disjoint_sequences.sequences.is_empty()); + disjoint_sequences.truncate(100); + assert!(disjoint_sequences.sequences.is_empty()); + } + + #[test] + fn roundtrip_to_bytes() { + let mut disjoint_sequences = DisjointSequences::default(); + + disjoint_sequences.extend([4, 5, 6, 7, 8]); + disjoint_sequences.extend([15, 16, 17, 18, 19, 20]); + + // should be represented logically as [(20 to 15), (8 to 4)] and serialize to a sequence of + // `2u32 20u64 15u64 8u64 4u64`. + + let expected = [ + 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + let actual = disjoint_sequences.to_bytes().expect("serialization failed"); + assert_eq!(expected.as_slice(), &actual); + + let expected_inner_state = disjoint_sequences.sequences; + let (restored, remainder) = + DisjointSequences::from_bytes(&actual).expect("deserialization failed"); + assert!(remainder.is_empty()); + + let (restored2, remainder) = + DisjointSequences::from_vec(actual).expect("deserialization failed"); + assert!(remainder.is_empty()); + + assert_eq!(restored.sequences, expected_inner_state); + assert_eq!(restored2.sequences, expected_inner_state); + } +} diff --git a/node/src/components/storage/error.rs b/node/src/components/storage/error.rs new file mode 100644 index 0000000000..9ba2fcf5f5 --- /dev/null +++ b/node/src/components/storage/error.rs @@ -0,0 +1,196 @@ +use std::{fmt::Debug, io, path::PathBuf}; + +use casper_binary_port::RecordId; +use thiserror::Error; +use tracing::error; + +use casper_types::{ + bytesrepr, crypto, BlockBody, BlockHash, BlockHeader, BlockValidationError, DeployHash, Digest, + EraId, FinalitySignature, FinalitySignatureId, TransactionHash, +}; + +use crate::types::VariantMismatch; +use casper_storage::block_store::BlockStoreError; + +/// A fatal storage component error. +/// +/// An error of this kinds indicates that storage is corrupted or otherwise irrecoverably broken, at +/// least for the moment. It should usually be followed by swift termination of the node. +#[derive(Debug, Error)] +pub enum FatalStorageError { + /// Failure to create the root database directory. + #[error("failed to create database directory `{}`: {}", .0.display(), .1)] + CreateDatabaseDirectory(PathBuf, io::Error), + /// Found a duplicate switch-block-at-era-id index entry. + #[error("duplicate entries for switch block at era id {era_id}: {first} / {second}")] + DuplicateEraIdIndex { + /// Era ID at which duplicate was found. + era_id: EraId, + /// First block hash encountered at `era_id`. + first: BlockHash, + /// Second block hash encountered at `era_id`. + second: BlockHash, + }, + /// An internal DB error - blocks should be overwritten. + #[error("failed overwriting block")] + FailedToOverwriteBlock, + /// Record specified in raw request has not been found in the storage module. + #[error("unable to find db for record: {0}")] + DatabaseNotFound(RecordId), + /// Filesystem error while trying to move file. + #[error("unable to move file {source_path} to {dest_path}: {original_error}")] + UnableToMoveFile { + /// The path to the file that should have been moved. + source_path: PathBuf, + /// The path where the file should have been moved to. + dest_path: PathBuf, + /// The original `io::Error` from `fs::rename`. + original_error: io::Error, + }, + /// Mix of missing and found storage files. + #[error("expected files to exist: {missing_files:?}.")] + MissingStorageFiles { + /// The files that were not be found in the storage directory. + missing_files: Vec, + }, + /// Error when validating a block. + #[error(transparent)] + BlockValidation(#[from] BlockValidationError), + /// A block header was not stored under its hash. + #[error( + "Block header not stored under its hash. \ + Queried block hash bytes: {queried_block_hash_bytes:x?}, \ + Found block header hash bytes: {found_block_header_hash:x?}, \ + Block header: {block_header}" + )] + BlockHeaderNotStoredUnderItsHash { + /// The queried block hash. + queried_block_hash_bytes: Vec, + /// The actual header of the block hash. + found_block_header_hash: BlockHash, + /// The block header found in storage. + block_header: Box, + }, + /// Block body did not have a block header. + #[error( + "No block header corresponding to block body found in LMDB. \ + Block body hash: {block_body_hash:?}, \ + Block body: {block_body:?}" + )] + NoBlockHeaderForBlockBody { + /// The block body hash. + block_body_hash: Digest, + /// The block body. + block_body: Box, + }, + /// Could not verify finality signatures for block. + #[error("{0} in signature verification. Database is corrupted.")] + SignatureVerification(crypto::Error), + /// Corrupted block signature index. + #[error( + "Block signatures not indexed by their block hash. \ + Key bytes in LMDB: {raw_key:x?}, \ + Block hash bytes in record: {block_hash_bytes:x?}" + )] + CorruptedBlockSignatureIndex { + /// The key in the block signature index. + raw_key: Vec, + /// The block hash of the signatures found in the index. + block_hash_bytes: Vec, + }, + /// Switch block does not contain era end. + #[error("switch block does not contain era end: {0:?}")] + InvalidSwitchBlock(Box), + /// A block body was found to have more parts than expected. + #[error( + "Found an unexpected part of a block body in the database: \ + {part_hash:?}" + )] + UnexpectedBlockBodyPart { + /// The block body with the issue. + block_body_hash: Digest, + /// The hash of the superfluous body part. + part_hash: Digest, + }, + /// Failed to serialize an item that was found in local storage. + #[error("failed to serialized stored item")] + StoredItemSerializationFailure(#[source] bincode::Error), + /// We tried to store finalized approvals for a nonexistent transaction. + #[error("Tried to store FinalizedApprovals for a nonexistent transaction {transaction_hash}")] + UnexpectedFinalizedApprovals { + /// The missing transaction hash. + transaction_hash: TransactionHash, + }, + /// `ToBytes` serialization failure of an item that should never fail to serialize. + #[error("unexpected serialization failure: {0}")] + UnexpectedSerializationFailure(bytesrepr::Error), + /// `ToBytes` deserialization failure of an item that should never fail to serialize. + #[error("unexpected deserialization failure: {0}")] + UnexpectedDeserializationFailure(bytesrepr::Error), + /// Stored finalized approvals hashes count doesn't match number of deploys. + #[error( + "stored finalized approvals hashes count doesn't match number of deploys: \ + block hash: {block_hash}, expected: {expected}, actual: {actual}" + )] + ApprovalsHashesLengthMismatch { + /// The block hash. + block_hash: BlockHash, + /// The number of deploys in the block. + expected: usize, + /// The number of approvals hashes. + actual: usize, + }, + /// V1 execution results hashmap doesn't have exactly one entry. + #[error( + "stored v1 execution results doesn't have exactly one entry: deploy: {deploy_hash}, number \ + of entries: {results_length}" + )] + InvalidExecutionResultsV1Length { + /// The deploy hash. + deploy_hash: DeployHash, + /// The number of execution results. + results_length: usize, + }, + /// Error initializing metrics. + #[error("failed to initialize metrics for storage: {0}")] + Prometheus(#[from] prometheus::Error), + /// Type mismatch indicating programmer error. + #[error(transparent)] + VariantMismatch(#[from] VariantMismatch), + /// BlockStoreError + #[error(transparent)] + BlockStoreError(#[from] BlockStoreError), + /// BlockStoreError + #[error("unexpected record id {0}")] + UnexpectedRecordId(RecordId), +} + +impl From> for FatalStorageError { + fn from(err: Box) -> Self { + Self::BlockValidation(*err) + } +} + +/// An error that may occur when handling a get request. +/// +/// Wraps a fatal error, callers should check whether the variant is of the fatal or non-fatal kind. +#[derive(Debug, Error)] +pub(super) enum GetRequestError { + /// A fatal error occurred. + #[error(transparent)] + Fatal(#[from] FatalStorageError), + /// Failed to serialized an item ID on an incoming item request. + #[error("failed to deserialize incoming item id")] + MalformedIncomingItemId(#[source] bincode::Error), + #[error( + "id information not matching the finality signature: \ + requested id: {requested_id},\ + signature: {finality_signature}" + )] + FinalitySignatureIdMismatch { + // the ID requested + requested_id: Box, + // the finality signature read from storage + finality_signature: Box, + }, +} diff --git a/node/src/components/storage/event.rs b/node/src/components/storage/event.rs new file mode 100644 index 0000000000..8f02790c9a --- /dev/null +++ b/node/src/components/storage/event.rs @@ -0,0 +1,62 @@ +use std::fmt::{self, Display, Formatter}; + +use derive_more::From; +use serde::Serialize; +use static_assertions::const_assert; + +use crate::effect::{ + incoming::NetRequestIncoming, + requests::{MakeBlockExecutableRequest, MarkBlockCompletedRequest, StorageRequest}, +}; + +const _STORAGE_EVENT_SIZE: usize = size_of::(); +const_assert!(_STORAGE_EVENT_SIZE <= 32); + +/// A storage component event. +#[derive(Debug, From, Serialize)] +#[repr(u8)] +pub(crate) enum Event { + /// Storage request. + #[from] + StorageRequest(Box), + /// Incoming net request. + NetRequestIncoming(Box), + /// Mark block completed request. + #[from] + MarkBlockCompletedRequest(MarkBlockCompletedRequest), + /// Make block executable request. + #[from] + MakeBlockExecutableRequest(Box), +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::StorageRequest(req) => req.fmt(f), + Event::NetRequestIncoming(incoming) => incoming.fmt(f), + Event::MarkBlockCompletedRequest(req) => req.fmt(f), + Event::MakeBlockExecutableRequest(req) => req.fmt(f), + } + } +} + +impl From for Event { + #[inline] + fn from(incoming: NetRequestIncoming) -> Self { + Event::NetRequestIncoming(Box::new(incoming)) + } +} + +impl From for Event { + #[inline] + fn from(request: StorageRequest) -> Self { + Event::StorageRequest(Box::new(request)) + } +} + +impl From for Event { + #[inline] + fn from(request: MakeBlockExecutableRequest) -> Self { + Event::MakeBlockExecutableRequest(Box::new(request)) + } +} diff --git a/node/src/components/storage/lmdb_ext.rs b/node/src/components/storage/lmdb_ext.rs deleted file mode 100644 index a2cca5dbeb..0000000000 --- a/node/src/components/storage/lmdb_ext.rs +++ /dev/null @@ -1,173 +0,0 @@ -//! LMDB extensions. -//! -//! Various traits and helper functions to extend the lower level LMDB functions. Unifies -//! lower-level storage errors from lmdb and serialization issues. -//! -//! ## Serialization -//! -//! The module also centralizes settings and methods for serialization for all parts of storage. -//! -//! Serialization errors are unified into a generic, type erased `std` error to allow for easy -//! interchange of the serialization format if desired. - -use crate::{crypto::hash::Digest, types::BlockHash}; -use lmdb::{Database, RwTransaction, Transaction, WriteFlags}; -use serde::{de::DeserializeOwned, Serialize}; -use thiserror::Error; - -/// Error wrapper for lower-level storage errors. -/// -/// Used to classify storage errors, allowing more accurate reporting on potential issues and -/// crashes. Indicates how to proceed (clearing storage entirely or just restarting) in most cases. -/// -/// Note that accessing a storage with an incompatible version of this software is also considered a -/// case of corruption. -#[derive(Debug, Error)] -pub enum LmdbExtError { - /// The internal database is corrupted and can probably not be salvaged. - #[error("internal storage corrupted: {0}")] - LmdbCorrupted(lmdb::Error), - /// The data stored inside the internal database is corrupted or formatted wrong. - #[error("internal data corrupted: {0}")] - DataCorrupted(Box), - /// A resource has been exhausted at runtime, restarting (potentially with different settings) - /// might fix the problem. Storage integrity is still intact. - #[error("storage exhausted resource (but still intact): {0}")] - ResourceExhausted(lmdb::Error), - /// Error neither corruption nor resource exhaustion occurred, likely a programming error. - #[error("unknown LMDB or serialization error, likely from a bug: {0}")] - Other(Box), - /// The internal database is corrupted and can probably not be salvaged. - #[error( - "Block header not stored under its hash. \ - Queried block hash: {queried_block_hash}, \ - Found block header hash: {found_block_header_hash}" - )] - BlockHeaderNotStoredUnderItsHash { - queried_block_hash: BlockHash, - found_block_header_hash: BlockHash, - }, - #[error( - "Block body not stored under the hash in its header. \ - Queried block body hash: {queried_block_body_hash}, \ - Found block body hash: {found_block_body_hash}" - )] - BlockBodyNotStoredUnderItsHash { - queried_block_body_hash: Digest, - found_block_body_hash: Digest, - }, -} - -// Classifies an `lmdb::Error` according to our scheme. This one of the rare cases where we accept a -// blanked `From<>` implementation for error type conversion. -impl From for LmdbExtError { - fn from(lmdb_error: lmdb::Error) -> Self { - match lmdb_error { - lmdb::Error::PageNotFound - | lmdb::Error::Corrupted - | lmdb::Error::Panic - | lmdb::Error::VersionMismatch - | lmdb::Error::Invalid - | lmdb::Error::Incompatible => LmdbExtError::LmdbCorrupted(lmdb_error), - - lmdb::Error::MapFull - | lmdb::Error::DbsFull - | lmdb::Error::ReadersFull - | lmdb::Error::TlsFull - | lmdb::Error::TxnFull - | lmdb::Error::CursorFull - | lmdb::Error::PageFull - | lmdb::Error::MapResized => LmdbExtError::ResourceExhausted(lmdb_error), - - lmdb::Error::NotFound - | lmdb::Error::BadRslot - | lmdb::Error::BadTxn - | lmdb::Error::BadValSize - | lmdb::Error::BadDbi - | lmdb::Error::KeyExist - | lmdb::Error::Other(_) => LmdbExtError::Other(Box::new(lmdb_error)), - } - } -} - -/// Additional methods on transaction. -pub(super) trait TransactionExt { - /// Helper function to load a value from a database. - fn get_value, V: DeserializeOwned>( - &mut self, - db: Database, - key: &K, - ) -> Result, LmdbExtError>; -} - -/// Additional methods on write transactions. -pub(super) trait WriteTransactionExt { - /// Helper function to write a value to a database. - /// - /// Returns `true` if the value has actually been written, `false` if the key already existed. - /// - /// Setting `overwrite` to true will cause the value to always be written instead. - fn put_value, V: Serialize>( - &mut self, - db: Database, - key: &K, - value: &V, - overwrite: bool, - ) -> Result; -} - -impl TransactionExt for T -where - T: Transaction, -{ - #[inline] - fn get_value, V: DeserializeOwned>( - &mut self, - db: Database, - key: &K, - ) -> Result, LmdbExtError> { - match self.get(db, key) { - // Deserialization failures are likely due to storage corruption. - Ok(raw) => deserialize(raw).map(Some), - Err(lmdb::Error::NotFound) => Ok(None), - Err(err) => Err(err.into()), - } - } -} - -impl WriteTransactionExt for RwTransaction<'_> { - fn put_value, V: Serialize>( - &mut self, - db: Database, - key: &K, - value: &V, - overwrite: bool, - ) -> Result { - let buffer = serialize(value)?; - - let flags = if overwrite { - WriteFlags::empty() - } else { - WriteFlags::NO_OVERWRITE - }; - - match self.put(db, key, &buffer, flags) { - Ok(()) => Ok(true), - // If we did not add the value due to it already existing, just return `false`. - Err(lmdb::Error::KeyExist) => Ok(false), - Err(err) => Err(err.into()), - } - } -} - -/// Deserializes from a buffer. -#[inline(always)] -pub(super) fn deserialize(raw: &[u8]) -> Result { - bincode::deserialize(raw).map_err(|err| LmdbExtError::DataCorrupted(Box::new(err))) -} - -/// Serializes into a buffer. -#[inline(always)] -pub(super) fn serialize(value: &T) -> Result, LmdbExtError> { - bincode::serialize(value).map_err(|err| LmdbExtError::Other(Box::new(err))) -} diff --git a/node/src/components/storage/metrics.rs b/node/src/components/storage/metrics.rs new file mode 100644 index 0000000000..b6ee022b65 --- /dev/null +++ b/node/src/components/storage/metrics.rs @@ -0,0 +1,54 @@ +use prometheus::{self, IntGauge, Registry}; + +use crate::unregister_metric; + +const CHAIN_HEIGHT_NAME: &str = "chain_height"; +const CHAIN_HEIGHT_HELP: &str = "highest complete block (DEPRECATED)"; + +const HIGHEST_AVAILABLE_BLOCK_NAME: &str = "highest_available_block_height"; +const HIGHEST_AVAILABLE_BLOCK_HELP: &str = + "highest height of the available block range (the highest contiguous chain of complete blocks)"; + +const LOWEST_AVAILABLE_BLOCK_NAME: &str = "lowest_available_block_height"; +const LOWEST_AVAILABLE_BLOCK_HELP: &str = + "lowest height of the available block range (the highest contiguous chain of complete blocks)"; + +/// Metrics for the storage component. +#[derive(Debug)] +pub struct Metrics { + // deprecated - replaced by `highest_available_block` + pub(super) chain_height: IntGauge, + pub(super) highest_available_block: IntGauge, + pub(super) lowest_available_block: IntGauge, + registry: Registry, +} + +impl Metrics { + /// Constructor of metrics which creates and registers metrics objects for use. + pub(super) fn new(registry: &Registry) -> Result { + let chain_height = IntGauge::new(CHAIN_HEIGHT_NAME, CHAIN_HEIGHT_HELP)?; + let highest_available_block = + IntGauge::new(HIGHEST_AVAILABLE_BLOCK_NAME, HIGHEST_AVAILABLE_BLOCK_HELP)?; + let lowest_available_block = + IntGauge::new(LOWEST_AVAILABLE_BLOCK_NAME, LOWEST_AVAILABLE_BLOCK_HELP)?; + + registry.register(Box::new(chain_height.clone()))?; + registry.register(Box::new(highest_available_block.clone()))?; + registry.register(Box::new(lowest_available_block.clone()))?; + + Ok(Metrics { + chain_height, + highest_available_block, + lowest_available_block, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.chain_height); + unregister_metric!(self.registry, self.highest_available_block); + unregister_metric!(self.registry, self.lowest_available_block); + } +} diff --git a/node/src/components/storage/object_pool.rs b/node/src/components/storage/object_pool.rs new file mode 100644 index 0000000000..cbf7c89770 --- /dev/null +++ b/node/src/components/storage/object_pool.rs @@ -0,0 +1,216 @@ +//! A reference pool for items/objects. +//! +//! Its core responsibility is to deduplicate potentially expensive loads by keeping a weak +//! reference to any loaded object around, so that any load request for an object that is currently +//! in active use can be satisfied using the already existing copy. +//! +//! It differs from a cache in that it does not hold strong references to an item itself -- once an +//! item is no longer used, it will not be kept in the pool for a later request. As a consequence +//! the memory pool will never consume significantly more memory than what would otherwise be +//! required by the loaded objects that are in active use anyway and thus has an "infinite" +//! capacity. +use std::{ + borrow::Borrow, + collections::HashMap, + hash::Hash, + sync::{Arc, Weak}, +}; + +use datasize::DataSize; + +/// A pool of items/objects. +/// +/// Maintains a pool of weak references and automatically purges them in configurable intervals. +/// +/// # DataSize +/// +/// Typically shared references like `Arc`s are not counted when using `DataSize`, however +/// `ObjectPool` counts its items in "regular" manner, as it is assumed to be the virtual owner. + +#[derive(Debug)] +pub(super) struct ObjectPool { + /// The actual object pool. + items: HashMap>, + /// Interval for garbage collection, will remove dead references on every n-th `put()`. + garbage_collect_interval: u16, + /// Counts how many objects have been added since the last garbage collect interval. + put_count: u16, +} + +impl ObjectPool { + /// Creates a new object pool. + pub(super) fn new(garbage_collect_interval: u16) -> Self { + Self { + items: HashMap::new(), + garbage_collect_interval, + put_count: 0, + } + } +} + +// Note: There is currently a design issue in the `datasize` crate where it does not gracefully +// handle unsized types like slices, thus the derivation for any implementation of `DataSize +// for Box<[T]>` based on `DataSize for Box` and `DataSize for [T]` is bound to be +// incorrect. +// +// Since we currently only use very few different `T`s for `ObjectPool`, we opt to +// implement it manually here and gain a chance to optimize as well. +impl DataSize for ObjectPool> { + const IS_DYNAMIC: bool = true; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + // See https://docs.rs/datasize/0.2.9/src/datasize/std.rs.html#213-224 for details. + let base = self.items.capacity() + * (size_of::>() + size_of::>() + size_of::()); + + base + self + .items + .iter() + .map(|(key, value)| { + // Unfortunately we have to check every instance by upgrading. + let value_size = value.upgrade().map(|v| v.len()).unwrap_or_default(); + key.len() + value_size + }) + .sum::() + } +} + +impl ObjectPool +where + I: Hash + Eq, +{ + /// Stores a serialized object in the pool. + /// + /// At configurable intervals (see `garbage_collect_interval`), the entire pool will be checked + /// and dead references pruned. + pub(super) fn put(&mut self, id: I, item: Weak<[u8]>) { + self.items.insert(id, item); + + if self.put_count >= self.garbage_collect_interval { + self.items.retain(|_, item| item.strong_count() > 0); + + self.put_count = 0; + } + + self.put_count += 1; + } + + /// Retrieves an object from the pool, if present. + pub(super) fn get(&self, id: &Q) -> Option> + where + I: Borrow, + Q: Hash + Eq + ?Sized, + { + self.items.get(id).and_then(Weak::upgrade) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use datasize::DataSize; + + use casper_types::Transaction; + + use super::ObjectPool; + use crate::components::fetcher::FetchItem; + + impl ObjectPool + where + I: DataSize, + { + fn num_entries(&self) -> usize { + self.items.len() + } + } + + #[test] + fn can_load_and_store_items() { + let mut pool: ObjectPool<::Id> = ObjectPool::new(5); + let mut rng = crate::new_rng(); + + let txn1 = Transaction::random(&mut rng); + let txn2 = Transaction::random(&mut rng); + let txn1_id = txn1.fetch_id(); + let txn2_id = txn2.fetch_id(); + let txn1_serialized = bincode::serialize(&txn1).expect("could not serialize first deploy"); + let txn2_serialized = bincode::serialize(&txn2).expect("could not serialize second deploy"); + + let txn1_shared = txn1_serialized.into(); + let txn2_shared = txn2_serialized.into(); + + assert!(pool.get(&txn1_id).is_none()); + assert!(pool.get(&txn2_id).is_none()); + + pool.put(txn1_id, Arc::downgrade(&txn1_shared)); + assert!(Arc::ptr_eq( + &pool.get(&txn1_id).expect("did not find d1"), + &txn1_shared + )); + assert!(pool.get(&txn2_id).is_none()); + + pool.put(txn2_id, Arc::downgrade(&txn2_shared)); + assert!(Arc::ptr_eq( + &pool.get(&txn1_id).expect("did not find d1"), + &txn1_shared + )); + assert!(Arc::ptr_eq( + &pool.get(&txn2_id).expect("did not find d1"), + &txn2_shared + )); + } + + #[test] + fn frees_memory_after_reference_loss() { + let mut pool: ObjectPool<::Id> = ObjectPool::new(5); + let mut rng = crate::new_rng(); + + let txn1 = Transaction::random(&mut rng); + let txn1_id = txn1.fetch_id(); + let txn1_serialized = bincode::serialize(&txn1).expect("could not serialize first deploy"); + + let txn1_shared = txn1_serialized.into(); + + assert!(pool.get(&txn1_id).is_none()); + + pool.put(txn1_id, Arc::downgrade(&txn1_shared)); + assert!(Arc::ptr_eq( + &pool.get(&txn1_id).expect("did not find d1"), + &txn1_shared + )); + + drop(txn1_shared); + assert!(pool.get(&txn1_id).is_none()); + } + + #[test] + fn garbage_is_collected() { + let mut pool: ObjectPool<::Id> = ObjectPool::new(5); + let mut rng = crate::new_rng(); + + assert_eq!(pool.num_entries(), 0); + + for i in 0..5 { + let txn = Transaction::random(&mut rng); + let id = txn.fetch_id(); + let serialized = bincode::serialize(&txn).expect("could not serialize first deploy"); + let shared = serialized.into(); + pool.put(id, Arc::downgrade(&shared)); + assert_eq!(pool.num_entries(), i + 1); + drop(shared); + assert_eq!(pool.num_entries(), i + 1); + } + + let txn = Transaction::random(&mut rng); + let id = txn.fetch_id(); + let serialized = bincode::serialize(&txn).expect("could not serialize first deploy"); + let shared = serialized.into(); + pool.put(id, Arc::downgrade(&shared)); + assert_eq!(pool.num_entries(), 1); + drop(shared); + assert_eq!(pool.num_entries(), 1); + } +} diff --git a/node/src/components/storage/tests.rs b/node/src/components/storage/tests.rs index 62eed7d908..4ec42a91ef 100644 --- a/node/src/components/storage/tests.rs +++ b/node/src/components/storage/tests.rs @@ -1,30 +1,59 @@ //! Unit tests for the storage component. -use std::{borrow::Cow, collections::HashMap}; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + convert::TryInto, + fs::{self, File}, + io, + iter::{self}, + path::{Path, PathBuf}, + sync::Arc, +}; -use lmdb::Transaction; +use once_cell::sync::Lazy; use rand::{prelude::SliceRandom, Rng}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use smallvec::smallvec; -use casper_types::{EraId, ExecutionResult, ProtocolVersion, PublicKey, SecretKey}; +use casper_storage::block_store::{ + types::{ApprovalsHashes, BlockHashHeightAndEra, BlockTransfers}, + BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter, +}; +use casper_types::{ + execution::{Effects, ExecutionResult, ExecutionResultV2}, + generate_ed25519_keypair, + testing::TestRng, + ApprovalsHash, AvailableBlockRange, Block, BlockHash, BlockHeader, BlockHeaderWithSignatures, + BlockSignatures, BlockSignaturesV2, BlockV2, ChainNameDigest, Chainspec, ChainspecRawBytes, + Deploy, DeployHash, Digest, EraId, ExecutionInfo, FinalitySignature, FinalitySignatureV2, Gas, + InitiatorAddr, ProtocolVersion, PublicKey, SecretKey, TestBlockBuilder, TestBlockV1Builder, + TimeDiff, Transaction, TransactionConfig, TransactionHash, TransactionV1Hash, Transfer, + TransferV2, U512, +}; +use tempfile::tempdir; -use super::{Config, Storage}; +use super::{ + move_storage_files_to_network_subdir, should_move_storage_files_to_network_subdir, Config, + Storage, FORCE_RESYNC_FILE_NAME, +}; use crate::{ - components::storage::lmdb_ext::WriteTransactionExt, - crypto::AsymmetricKeyExt, + components::fetcher::{FetchItem, FetchResponse}, effect::{ - requests::{StateStoreRequest, StorageRequest}, + requests::{MarkBlockCompletedRequest, StorageRequest}, Multiple, }, - testing::{ComponentHarness, TestRng, UnitTestEvent}, + storage::TransactionHeader, + testing::{ComponentHarness, UnitTestEvent}, types::{ - Block, BlockHash, BlockHeader, BlockSignatures, Deploy, DeployHash, DeployMetadata, - FinalitySignature, + sync_leap_validation_metadata::SyncLeapValidationMetaData, BlockWithMetadata, + SyncLeapIdentifier, }, - utils::WithDir, + utils::{Loadable, WithDir}, }; +const RECENT_ERA_COUNT: u64 = 7; +const MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400); + fn new_config(harness: &ComponentHarness) -> Config { const MIB: usize = 1024 * 1024; @@ -35,9 +64,119 @@ fn new_config(harness: &ComponentHarness) -> Config { max_deploy_store_size: 50 * MIB, max_deploy_metadata_store_size: 50 * MIB, max_state_store_size: 50 * MIB, + enable_mem_deduplication: true, + mem_pool_prune_interval: 4, } } +fn block_headers_into_heights(block_headers: &[BlockHeader]) -> Vec { + block_headers + .iter() + .map(|block_header| block_header.height()) + .collect() +} + +fn block_headers_with_signatures_into_heights( + block_headers_with_signatures: &[BlockHeaderWithSignatures], +) -> Vec { + block_headers_with_signatures + .iter() + .map(|block_header_with_signatures| block_header_with_signatures.block_header().height()) + .collect() +} + +fn create_sync_leap_test_chain( + non_signed_blocks: &[u64], // indices of blocks to not be signed + include_switch_block_at_tip: bool, + maybe_recent_era_count: Option, // if Some, override default `RECENT_ERA_COUNT` +) -> (Storage, Chainspec, Vec) { + // Test chain: + // S0 S1 B2 B3 S4 B5 B6 S7 B8 B9 S10 B11 B12 + // era 0 | era 1 | era 2 | era 3 | era 4 | era 5 ... + // where + // S - switch block + // B - non-switch block + + // If `include_switch_block_at_tip`, the additional switch block of height 13 will be added at + // the tip of the chain. + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture_from_parts( + &harness, + None, + Some(chainspec.protocol_version()), + None, + None, + maybe_recent_era_count, + ); + + let mut trusted_validator_weights = BTreeMap::new(); + + let (validator_secret_key, validator_public_key) = generate_ed25519_keypair(); + trusted_validator_weights.insert(validator_public_key, U512::from(2000000000000u64)); + + let mut blocks: Vec = vec![]; + let block_count = 13 + include_switch_block_at_tip as u64; + (0_u64..block_count).for_each(|height| { + let is_switch = height == 0 || height % 3 == 1; + let era_id = EraId::from(match height { + 0 => 0, + 1 => 1, + _ => (height + 4) / 3, + }); + let parent_hash = if height == 0 { + BlockHash::new(Digest::default()) + } else { + *blocks.get((height - 1) as usize).unwrap().hash() + }; + + let block = TestBlockBuilder::new() + .era(era_id) + .height(height) + .protocol_version(chainspec.protocol_version()) + .parent_hash(parent_hash) + .validator_weights(trusted_validator_weights.clone()) + .switch_block(is_switch) + .build_versioned(&mut harness.rng); + + blocks.push(block); + }); + blocks.iter().for_each(|block| { + assert!(put_block( + &mut harness, + &mut storage, + Arc::new(block.clone()), + )); + + let fs = FinalitySignatureV2::create( + *block.hash(), + block.height(), + block.era_id(), + chainspec.name_hash(), + &validator_secret_key, + ); + assert!(fs.is_verified().is_ok()); + + let mut block_signatures = BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + chainspec.name_hash(), + ); + block_signatures.insert_signature(fs.public_key().clone(), *fs.signature()); + + if !non_signed_blocks.contains(&block.height()) { + assert!(put_block_signatures( + &mut harness, + &mut storage, + block_signatures.into(), + )); + storage.completed_blocks.insert(block.height()); + } + }); + (storage, chainspec, blocks) +} + /// Storage component test fixture. /// /// Creates a storage component in a temporary directory. @@ -51,79 +190,114 @@ fn storage_fixture(harness: &ComponentHarness) -> Storage { &WithDir::new(harness.tmp.path(), cfg), None, ProtocolVersion::from_parts(1, 0, 0), + EraId::default(), + "test", + MAX_TTL.into(), + RECENT_ERA_COUNT, + None, + false, + TransactionConfig::default(), ) .expect("could not create storage component fixture") } /// Storage component test fixture. /// -/// Creates a storage component in a temporary directory, but with a hard reset to a specified era. +/// Creates a storage component in a temporary directory. /// /// # Panics /// /// Panics if setting up the storage fixture fails. -fn storage_fixture_with_hard_reset( +fn storage_fixture_from_parts( harness: &ComponentHarness, - reset_era_id: EraId, + hard_reset_to_start_of_era: Option, + protocol_version: Option, + network_name: Option<&str>, + max_ttl: Option, + recent_era_count: Option, ) -> Storage { let cfg = new_config(harness); Storage::new( &WithDir::new(harness.tmp.path(), cfg), - Some(reset_era_id), - ProtocolVersion::from_parts(1, 1, 0), + hard_reset_to_start_of_era, + protocol_version.unwrap_or(ProtocolVersion::V1_0_0), + EraId::default(), + network_name.unwrap_or("test"), + max_ttl.unwrap_or(MAX_TTL).into(), + recent_era_count.unwrap_or(RECENT_ERA_COUNT), + None, + false, + TransactionConfig::default(), + ) + .expect("could not create storage component fixture from parts") +} + +/// Storage component test fixture with force resync enabled. +/// +/// Creates a storage component in a given temporary directory. +/// +/// # Panics +/// +/// Panics if setting up the storage fixture fails. +fn storage_fixture_with_force_resync(cfg: &WithDir) -> Storage { + Storage::new( + cfg, + None, + ProtocolVersion::from_parts(1, 0, 0), + EraId::default(), + "test", + MAX_TTL.into(), + RECENT_ERA_COUNT, + None, + true, + TransactionConfig::default(), ) .expect("could not create storage component fixture") } -/// Creates a random block with a specific block height. -fn random_block_at_height(rng: &mut TestRng, height: u64) -> Box { - let mut block = Box::new(Block::random(rng)); - block.set_height(height); - block +/// Storage component test fixture. +/// +/// Creates a storage component in a temporary directory, but with a hard reset to a specified era. +/// +/// # Panics +/// +/// Panics if setting up the storage fixture fails. +fn storage_fixture_with_hard_reset( + harness: &ComponentHarness, + reset_era_id: EraId, +) -> Storage { + storage_fixture_from_parts( + harness, + Some(reset_era_id), + Some(ProtocolVersion::from_parts(1, 1, 0)), + None, + None, + None, + ) } /// Creates 3 random signatures for the given block. -fn random_signatures(rng: &mut TestRng, block: &Block) -> BlockSignatures { - let block_hash = *block.hash(); - let era_id = block.header().era_id(); - let mut block_signatures = BlockSignatures::new(block_hash, era_id); +fn random_signatures( + rng: &mut TestRng, + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, +) -> BlockSignatures { + let mut block_signatures = + BlockSignaturesV2::new(block_hash, block_height, era_id, chain_name_hash); for _ in 0..3 { let secret_key = SecretKey::random(rng); - let signature = FinalitySignature::new( + let signature = FinalitySignatureV2::create( block_hash, + block_height, era_id, + chain_name_hash, &secret_key, - PublicKey::from(&secret_key), ); - block_signatures.insert_proof(signature.public_key, signature.signature); + block_signatures.insert_signature(signature.public_key().clone(), *signature.signature()); } - block_signatures -} - -/// Requests block header at a specific height from a storage component. -fn get_block_header_at_height( - harness: &mut ComponentHarness, - storage: &mut Storage, - height: u64, -) -> Option { - let response = harness.send_request(storage, |responder| { - StorageRequest::GetBlockHeaderAtHeight { height, responder }.into() - }); - assert!(harness.is_idle()); - response -} - -/// Requests block at a specific height from a storage component. -fn get_block_at_height( - harness: &mut ComponentHarness, - storage: &mut Storage, - height: u64, -) -> Option { - let response = harness.send_request(storage, |responder| { - StorageRequest::GetBlockAtHeight { height, responder }.into() - }); - assert!(harness.is_idle()); - response + block_signatures.into() } /// Loads a block from a storage component. @@ -143,14 +317,13 @@ fn get_block( response } -/// Loads a block's signatures from a storage component. -fn get_block_signatures( +fn is_block_stored( harness: &mut ComponentHarness, storage: &mut Storage, block_hash: BlockHash, -) -> Option { +) -> bool { let response = harness.send_request(storage, move |responder| { - StorageRequest::GetBlockSignatures { + StorageRequest::IsBlockStored { block_hash, responder, } @@ -160,15 +333,19 @@ fn get_block_signatures( response } -/// Loads a set of deploys from a storage component. -fn get_deploys( +/// Loads a block header by height from a storage component. +/// Requesting a block header by height is required currently by the RPC +/// component. +fn get_block_header_by_height( harness: &mut ComponentHarness, storage: &mut Storage, - deploy_hashes: Multiple, -) -> Vec> { + block_height: u64, + only_from_available_block_range: bool, +) -> Option { let response = harness.send_request(storage, move |responder| { - StorageRequest::GetDeploys { - deploy_hashes: deploy_hashes.to_vec(), + StorageRequest::GetBlockHeaderByHeight { + block_height, + only_from_available_block_range, responder, } .into() @@ -177,58 +354,139 @@ fn get_deploys( response } -/// Loads a deploy with associated metadata from the storage component. -fn get_deploy_and_metadata( +/// Loads a set of `Transaction`s from a storage component. +/// +/// Applies `into_naive` to all loaded `Transaction`s. +fn get_naive_transactions( harness: &mut ComponentHarness, storage: &mut Storage, - deploy_hash: DeployHash, -) -> Option<(Deploy, DeployMetadata)> { - let response = harness.send_request(storage, |responder| { - StorageRequest::GetDeployAndMetadata { - deploy_hash, + transaction_hashes: Multiple, +) -> Vec> { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetTransactions { + transaction_hashes: transaction_hashes.to_vec(), responder, } .into() }); assert!(harness.is_idle()); response + .into_iter() + .map(|opt_twfa| { + if let Some((transaction, maybe_approvals)) = opt_twfa { + let txn = match maybe_approvals { + None => transaction, + Some(approvals) => transaction.with_approvals(approvals), + }; + Some(txn) + } else { + None + } + }) + .collect() +} + +/// Loads a deploy with associated execution info from the storage component. +/// +/// Any potential finalized approvals are discarded. +fn get_naive_transaction_and_execution_info( + storage: &mut Storage, + transaction_hash: TransactionHash, +) -> Option<(Transaction, Option)> { + let transaction = storage.get_transaction_by_hash(transaction_hash)?; + let execution_info = storage.read_execution_info(transaction.hash()); + Some((transaction, execution_info)) } -/// Requests the highest block from a storage component. -fn get_highest_block( +/// Requests the highest complete block from a storage component. +fn get_highest_complete_block( harness: &mut ComponentHarness, storage: &mut Storage, ) -> Option { let response = harness.send_request(storage, |responder| { - StorageRequest::GetHighestBlock { responder }.into() + StorageRequest::GetHighestCompleteBlock { responder }.into() + }); + assert!(harness.is_idle()); + response +} + +/// Requests the highest complete block header from a storage component. +fn get_highest_complete_block_header( + harness: &mut ComponentHarness, + storage: &mut Storage, +) -> Option { + let response = harness.send_request(storage, |responder| { + StorageRequest::GetHighestCompleteBlockHeader { responder }.into() + }); + assert!(harness.is_idle()); + response +} + +/// Get the era ids of multiple transactions. +fn get_transactions_era_ids( + harness: &mut ComponentHarness, + storage: &mut Storage, + transaction_hashes: HashSet, +) -> HashSet { + let response = harness.send_request(storage, |responder| { + StorageRequest::GetTransactionsEraIds { + transaction_hashes, + responder, + } + .into() }); assert!(harness.is_idle()); response } -/// Loads state from the storage component. -fn load_state( +/// Stores a block in a storage component. +fn put_complete_block( harness: &mut ComponentHarness, storage: &mut Storage, - key: Cow<'static, [u8]>, -) -> Option -where - T: DeserializeOwned, -{ - let response: Option> = harness.send_request(storage, move |responder| { - StateStoreRequest::Load { key, responder }.into() + block: Block, +) -> bool { + let block_height = block.height(); + let response = harness.send_request(storage, move |responder| { + StorageRequest::PutBlock { + block: Arc::new(block), + responder, + } + .into() + }); + assert!(harness.is_idle()); + harness.send_request(storage, move |responder| { + MarkBlockCompletedRequest { + block_height, + responder, + } + .into() }); assert!(harness.is_idle()); + response +} - // NOTE: Unfortunately, the deserialization logic is duplicated here from the effect builder. - response.map(|raw| bincode::deserialize(&raw).expect("deserialization failed")) +// Mark block complete +fn mark_block_complete( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_height: u64, +) -> bool { + let response = harness.send_request(storage, move |responder| { + MarkBlockCompletedRequest { + block_height, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response } /// Stores a block in a storage component. fn put_block( harness: &mut ComponentHarness, storage: &mut Storage, - block: Box, + block: Arc, ) -> bool { let response = harness.send_request(storage, move |responder| { StorageRequest::PutBlock { block, responder }.into() @@ -254,14 +512,36 @@ fn put_block_signatures( response } -/// Stores a deploy in a storage component. -fn put_deploy( +/// Stores a finality signature in a storage component. +fn put_finality_signature( + harness: &mut ComponentHarness, + storage: &mut Storage, + signature: Box, +) -> bool { + let response = harness.send_request(storage, move |responder| { + StorageRequest::PutFinalitySignature { + signature, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response +} + +/// Stores a `Transaction` in a storage component. +fn put_transaction( harness: &mut ComponentHarness, storage: &mut Storage, - deploy: Box, + transaction: &Transaction, ) -> bool { + let transaction = Arc::new(transaction.clone()); let response = harness.send_request(storage, move |responder| { - StorageRequest::PutDeploy { deploy, responder }.into() + StorageRequest::PutTransaction { + transaction, + responder, + } + .into() }); assert!(harness.is_idle()); response @@ -272,40 +552,135 @@ fn put_execution_results( harness: &mut ComponentHarness, storage: &mut Storage, block_hash: BlockHash, - execution_results: HashMap, + block_height: u64, + era_id: EraId, + execution_results: HashMap, ) { - let response = harness.send_request(storage, move |responder| { + harness.send_request(storage, move |responder| { StorageRequest::PutExecutionResults { block_hash: Box::new(block_hash), + block_height, + era_id, execution_results, responder, } .into() }); assert!(harness.is_idle()); +} + +/// Gets available block range from storage. +fn get_available_block_range( + harness: &mut ComponentHarness, + storage: &mut Storage, +) -> AvailableBlockRange { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetAvailableBlockRange { responder }.into() + }); + assert!(harness.is_idle()); response } -/// Saves state from the storage component. -fn save_state( +fn get_approvals_hashes( harness: &mut ComponentHarness, storage: &mut Storage, - key: Cow<'static, [u8]>, - value: &T, -) where - T: Serialize, -{ - // NOTE: Unfortunately, the serialization logic is duplicated here from the effect builder. - let data = bincode::serialize(value).expect("serialization failed"); - harness.send_request(storage, move |responder| { - StateStoreRequest::Save { - key, + block_hash: BlockHash, +) -> Option { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetApprovalsHashes { + block_hash, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response +} + +fn get_block_header( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_hash: BlockHash, + only_from_available_block_range: bool, +) -> Option { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetBlockHeader { + block_hash, + only_from_available_block_range, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response +} + +fn get_block_transfers( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_hash: BlockHash, +) -> Option> { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetBlockTransfers { + block_hash, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response +} + +fn get_block_and_metadata_by_height( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_height: u64, + only_from_available_block_range: bool, +) -> Option { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetBlockAndMetadataByHeight { + block_height, + only_from_available_block_range, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response +} + +fn get_execution_results( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_hash: BlockHash, +) -> Option> { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetExecutionResults { + block_hash, + responder, + } + .into() + }); + assert!(harness.is_idle()); + response +} + +fn get_block_signature( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_hash: BlockHash, + public_key: Box, +) -> Option { + let response = harness.send_request(storage, move |responder| { + StorageRequest::GetBlockSignature { + block_hash, + public_key, responder, - data, } .into() }); assert!(harness.is_idle()); + response } #[test] @@ -321,234 +696,204 @@ fn get_block_of_non_existing_block_returns_none() { } #[test] -fn can_put_and_get_block() { +fn read_block_by_height_with_available_block_range() { let mut harness = ComponentHarness::default(); - let mut storage = storage_fixture(&harness); - // Create a random block, store and load it. - let block = Box::new(Block::random(&mut harness.rng)); + // Create a random block, load and store it. + let block_33 = TestBlockBuilder::new() + .era(1) + .height(33) + .protocol_version(ProtocolVersion::from_parts(1, 5, 0)) + .switch_block(true) + .build_versioned(&mut harness.rng); - let was_new = put_block(&mut harness, &mut storage, block.clone()); - assert!(was_new, "putting block should have returned `true`"); + let mut storage = storage_fixture(&harness); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, true).is_none()); - // Storing the same block again should work, but yield a result of `true`. - let was_new_second_time = put_block(&mut harness, &mut storage, block.clone()); - assert!( - was_new_second_time, - "storing block the second time should have returned `true`" + let was_new = put_complete_block(&mut harness, &mut storage, block_33.clone()); + assert!(was_new); + + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(), + Some(&block_33.clone_header()) + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 33, true).as_ref(), + Some(&block_33.clone_header()) ); - let response = get_block(&mut harness, &mut storage, *block.hash()); - assert_eq!(response.as_ref(), Some(&*block)); + // Create a random block as a different height, load and store it. + let block_14 = TestBlockBuilder::new() + .era(1) + .height(14) + .protocol_version(ProtocolVersion::from_parts(1, 5, 0)) + .switch_block(false) + .build_versioned(&mut harness.rng); - // Also ensure we can retrieve just the header. - let response = harness.send_request(&mut storage, |responder| { - StorageRequest::GetBlockHeader { - block_hash: *block.hash(), - responder, - } - .into() - }); - - assert_eq!(response.as_ref(), Some(block.header())); -} - -#[test] -fn test_get_block_header_and_finality_signatures_by_height() { - let mut harness = ComponentHarness::default(); - let mut storage = storage_fixture(&harness); - - // Create a random block, store and load it. - let block = Block::random(&mut harness.rng); - let mut block_signatures = BlockSignatures::new(block.header().hash(), block.header().era_id()); - - { - let alice_secret_key = - SecretKey::ed25519_from_bytes([1; SecretKey::ED25519_LENGTH]).unwrap(); - let FinalitySignature { - public_key, - signature, - .. - } = FinalitySignature::new( - block.header().hash(), - block.header().era_id(), - &alice_secret_key, - PublicKey::from(&alice_secret_key), - ); - block_signatures.insert_proof(public_key, signature); - } - - { - let bob_secret_key = SecretKey::ed25519_from_bytes([2; SecretKey::ED25519_LENGTH]).unwrap(); - let FinalitySignature { - public_key, - signature, - .. - } = FinalitySignature::new( - block.header().hash(), - block.header().era_id(), - &bob_secret_key, - PublicKey::from(&bob_secret_key), - ); - block_signatures.insert_proof(public_key, signature); - } - - let was_new = put_block(&mut harness, &mut storage, Box::new(block.clone())); - assert!(was_new, "putting block should have returned `true`"); + let was_new = put_complete_block(&mut harness, &mut storage, block_14.clone()); + assert!(was_new); - let mut txn = storage - .env - .begin_rw_txn() - .expect("Could not start transaction"); - let was_new = txn - .put_value( - storage.block_metadata_db, - &block.hash(), - &block_signatures, - true, - ) - .expect("should put value into LMDB"); - assert!( - was_new, - "putting block signatures should have returned `true`" + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(), + Some(&block_14.clone_header()) ); - txn.commit().expect("Could not commit transaction"); - - { - let block_header = storage - .read_block_header_by_hash(block.hash()) - .expect("should not throw exception") - .expect("should not be None"); - assert_eq!( - block_header, - block.header().clone(), - "Should have retrieved expected block header" - ); - } - - { - let block_header_with_metadata = storage - .read_block_header_and_finality_signatures_by_height(block.header().height()) - .expect("should not throw exception") - .expect("should not be None"); - assert_eq!( - block_header_with_metadata.block_header, - block.header().clone(), - "Should have retrieved expected block header" - ); - assert_eq!( - block_header_with_metadata.block_signatures, block_signatures, - "Should have retrieved expected block signatures" - ); - } + assert!(get_block_header_by_height(&mut harness, &mut storage, 14, true).is_none()); } #[test] fn can_retrieve_block_by_height() { let mut harness = ComponentHarness::default(); - let mut storage = storage_fixture(&harness); - // Create a random block, load and store it. - let block_33 = random_block_at_height(&mut harness.rng, 33); - let block_14 = random_block_at_height(&mut harness.rng, 14); - let block_99 = random_block_at_height(&mut harness.rng, 99); + // Create some random blocks, load and store them. + let block_33 = TestBlockBuilder::new() + .era(1) + .height(33) + .protocol_version(ProtocolVersion::from_parts(1, 5, 0)) + .switch_block(true) + .build_versioned(&mut harness.rng); + let block_14 = TestBlockBuilder::new() + .era(1) + .height(14) + .protocol_version(ProtocolVersion::from_parts(1, 5, 0)) + .switch_block(false) + .build_versioned(&mut harness.rng); + let block_99 = TestBlockBuilder::new() + .era(2) + .height(99) + .protocol_version(ProtocolVersion::from_parts(1, 5, 0)) + .switch_block(true) + .build_versioned(&mut harness.rng); + + let mut storage = storage_fixture(&harness); // Both block at ID and highest block should return `None` initially. - assert!(get_block_at_height(&mut harness, &mut storage, 0).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 0).is_none()); - assert!(get_highest_block(&mut harness, &mut storage).is_none()); - assert!(get_block_at_height(&mut harness, &mut storage, 14).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 14).is_none()); - assert!(get_block_at_height(&mut harness, &mut storage, 33).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 33).is_none()); - assert!(get_block_at_height(&mut harness, &mut storage, 99).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 99).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_highest_complete_block(&mut harness, &mut storage).is_none()); + assert!(get_highest_complete_block_header(&mut harness, &mut storage).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 14, false).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 33, false).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none()); // Inserting 33 changes this. - let was_new = put_block(&mut harness, &mut storage, block_33.clone()); + let was_new = put_complete_block(&mut harness, &mut storage, block_33.clone()); assert!(was_new); assert_eq!( - get_highest_block(&mut harness, &mut storage).as_ref(), - Some(&*block_33) + get_highest_complete_block(&mut harness, &mut storage).as_ref(), + Some(&block_33) + ); + assert_eq!( + get_highest_complete_block_header(&mut harness, &mut storage).as_ref(), + Some(&block_33.clone_header()) ); - assert!(get_block_at_height(&mut harness, &mut storage, 0).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 0).is_none()); - assert!(get_block_at_height(&mut harness, &mut storage, 14).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 14).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 14, false).is_none()); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 33).as_ref(), - Some(&*block_33) + get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false) + .map(|blk| blk.block) + .as_ref(), + Some(&block_33) ); assert_eq!( - get_block_header_at_height(&mut harness, &mut storage, 33).as_ref(), - Some(block_33.header()) + get_block_header_by_height(&mut harness, &mut storage, 33, true).as_ref(), + Some(&block_33.clone_header()) ); - assert!(get_block_at_height(&mut harness, &mut storage, 99).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 99).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none()); // Inserting block with height 14, no change in highest. - let was_new = put_block(&mut harness, &mut storage, block_14.clone()); + let was_new = put_complete_block(&mut harness, &mut storage, block_14.clone()); assert!(was_new); assert_eq!( - get_highest_block(&mut harness, &mut storage).as_ref(), - Some(&*block_33) + get_highest_complete_block(&mut harness, &mut storage).as_ref(), + Some(&block_33) + ); + assert_eq!( + get_highest_complete_block_header(&mut harness, &mut storage).as_ref(), + Some(&block_33.clone_header()) + ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false) + .map(|blk| blk.block) + .as_ref(), + Some(&block_14) ); - assert!(get_block_at_height(&mut harness, &mut storage, 0).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 0).is_none()); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 14).as_ref(), - Some(&*block_14) + get_block_header_by_height(&mut harness, &mut storage, 14, true).as_ref(), + None ); assert_eq!( - get_block_header_at_height(&mut harness, &mut storage, 14).as_ref(), - Some(block_14.header()) + get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(), + Some(&block_14.clone_header()) ); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 33).as_ref(), - Some(&*block_33) + get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false) + .map(|blk| blk.block) + .as_ref(), + Some(&block_33) ); assert_eq!( - get_block_header_at_height(&mut harness, &mut storage, 33).as_ref(), - Some(block_33.header()) + get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(), + Some(&block_33.clone_header()) ); - assert!(get_block_at_height(&mut harness, &mut storage, 99).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 99).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 9, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none()); // Inserting block with height 99, changes highest. - let was_new = put_block(&mut harness, &mut storage, block_99.clone()); + let was_new = put_complete_block(&mut harness, &mut storage, block_99.clone()); + // Mark block 99 as complete. + storage.completed_blocks.insert(99); assert!(was_new); assert_eq!( - get_highest_block(&mut harness, &mut storage).as_ref(), - Some(&*block_99) + get_highest_complete_block(&mut harness, &mut storage).as_ref(), + Some(&block_99) ); - assert!(get_block_at_height(&mut harness, &mut storage, 0).is_none()); - assert!(get_block_header_at_height(&mut harness, &mut storage, 0).is_none()); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 14).as_ref(), - Some(&*block_14) + get_highest_complete_block_header(&mut harness, &mut storage).as_ref(), + Some(&block_99.clone_header()) ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); assert_eq!( - get_block_header_at_height(&mut harness, &mut storage, 14).as_ref(), - Some(block_14.header()) + get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false) + .map(|blk| blk.block) + .as_ref(), + Some(&block_14) ); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 33).as_ref(), - Some(&*block_33) + get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(), + Some(&block_14.clone_header()) ); assert_eq!( - get_block_header_at_height(&mut harness, &mut storage, 33).as_ref(), - Some(block_33.header()) + get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false) + .map(|blk| blk.block) + .as_ref(), + Some(&block_33) ); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 99).as_ref(), - Some(&*block_99) + get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(), + Some(&block_33.clone_header()) ); assert_eq!( - get_block_header_at_height(&mut harness, &mut storage, 99).as_ref(), - Some(block_99.header()) + get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false) + .map(|blk| blk.block) + .as_ref(), + Some(&block_99) + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 99, false).as_ref(), + Some(&block_99.clone_header()) ); } @@ -559,157 +904,311 @@ fn different_block_at_height_is_fatal() { let mut storage = storage_fixture(&harness); // Create two different blocks at the same height. - let block_44_a = random_block_at_height(&mut harness.rng, 44); - let block_44_b = random_block_at_height(&mut harness.rng, 44); - - let was_new = put_block(&mut harness, &mut storage, block_44_a.clone()); + let block_44_a = TestBlockBuilder::new() + .era(1) + .height(44) + .switch_block(false) + .build_versioned(&mut harness.rng); + let block_44_b = TestBlockBuilder::new() + .era(1) + .height(44) + .switch_block(false) + .build_versioned(&mut harness.rng); + + let was_new = put_complete_block(&mut harness, &mut storage, block_44_a.clone()); assert!(was_new); - let was_new = put_block(&mut harness, &mut storage, block_44_a); + let was_new = put_complete_block(&mut harness, &mut storage, block_44_a); assert!(was_new); // Putting a different block with the same height should now crash. - put_block(&mut harness, &mut storage, block_44_b); + put_complete_block(&mut harness, &mut storage, block_44_b); } #[test] -fn get_vec_of_non_existing_deploy_returns_nones() { +fn get_vec_of_non_existing_transaction_returns_nones() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - let deploy_id = DeployHash::random(&mut harness.rng); - let response = get_deploys(&mut harness, &mut storage, smallvec![deploy_id]); + let transaction_id = Transaction::random(&mut harness.rng).hash(); + let response = get_naive_transactions(&mut harness, &mut storage, smallvec![transaction_id]); assert_eq!(response, vec![None]); - // Also verify that we can retrieve using an empty set of deploy hashes. - let response = get_deploys(&mut harness, &mut storage, smallvec![]); + // Also verify that we can retrieve using an empty set of transaction hashes. + let response = get_naive_transactions(&mut harness, &mut storage, smallvec![]); assert!(response.is_empty()); } #[test] -fn can_retrieve_store_and_load_deploys() { +fn can_retrieve_store_and_load_transactions() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); // Create a random deploy, store and load it. - let deploy = Box::new(Deploy::random(&mut harness.rng)); + let transaction = Transaction::random(&mut harness.rng); + + let was_new = put_transaction(&mut harness, &mut storage, &transaction); + let block_hash_height_and_era = BlockHashHeightAndEra::new( + BlockHash::random(&mut harness.rng), + harness.rng.gen(), + EraId::random(&mut harness.rng), + ); - let was_new = put_deploy(&mut harness, &mut storage, deploy.clone()); - assert!(was_new, "putting deploy should have returned `true`"); + assert!(was_new, "putting transaction should have returned `true`"); // Storing the same deploy again should work, but yield a result of `false`. - let was_new_second_time = put_deploy(&mut harness, &mut storage, deploy.clone()); + let was_new_second_time = put_transaction(&mut harness, &mut storage, &transaction); assert!( !was_new_second_time, - "storing deploy the second time should have returned `false`" + "storing transaction the second time should have returned `false`" ); - // Retrieve the stored deploy. - let response = get_deploys(&mut harness, &mut storage, smallvec![*deploy.id()]); - assert_eq!(response, vec![Some(deploy.as_ref().clone())]); + // Retrieve the stored transaction. + let response = + get_naive_transactions(&mut harness, &mut storage, smallvec![transaction.hash()]); + assert_eq!(response, vec![Some(transaction.clone())]); - // Also ensure we can retrieve just the header. - let response = harness.send_request(&mut storage, |responder| { - StorageRequest::GetDeployHeaders { - deploy_hashes: vec![*deploy.id()], - responder, + let mut execution_results: HashMap = HashMap::new(); + execution_results.insert( + transaction.hash(), + ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)), + ); + put_execution_results( + &mut harness, + &mut storage, + block_hash_height_and_era.block_hash, + block_hash_height_and_era.block_height, + block_hash_height_and_era.era_id, + execution_results, + ); + + // Finally try to get the execution info as well. Since we did not store any, we expect to get + // the block hash and height from the indices. + let (transaction_response, exec_info_response) = + get_naive_transaction_and_execution_info(&mut storage, transaction.hash()) + .expect("no transaction with execution info returned"); + + assert_eq!(transaction_response, transaction); + match exec_info_response { + Some(ExecutionInfo { + block_hash, + block_height, + execution_result: Some(_), + }) => { + assert_eq!(block_hash_height_and_era.block_hash, block_hash); + assert_eq!(block_hash_height_and_era.block_height, block_height); } - .into() - }); - assert_eq!(response, vec![Some(deploy.header().clone())]); - - // Finally try to get the metadata as well. Since we did not store any, we expect empty default - // metadata to present. - let (deploy_response, metadata_response) = harness - .send_request(&mut storage, |responder| { - StorageRequest::GetDeployAndMetadata { - deploy_hash: *deploy.id(), - responder, - } - .into() - }) - .expect("no deploy with metadata returned"); + Some(ExecutionInfo { + execution_result: None, + .. + }) => { + panic!("We didn't receive any execution info but even though we previously stored it.") + } + None => panic!( + "We stored block info in the deploy hash index but we received nothing in the response." + ), + } + + // Create a random transaction, store and load it. + let transaction = Transaction::random(&mut harness.rng); + + assert!(put_transaction(&mut harness, &mut storage, &transaction)); + // Don't insert to the transaction hash index. Since we have no execution results + // either, we should receive a `None` execution info response. + let (transaction_response, exec_info_response) = + get_naive_transaction_and_execution_info(&mut storage, transaction.hash()) + .expect("no transaction with execution info returned"); - assert_eq!(deploy_response, *deploy); - assert_eq!(metadata_response, DeployMetadata::default()); + assert_eq!(transaction_response, transaction); + assert!( + exec_info_response.is_none(), + "We didn't store any block info in the index but we received it in the response." + ); } #[test] -fn storing_and_loading_a_lot_of_deploys_does_not_exhaust_handles() { +fn should_retrieve_transactions_era_ids() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - let total = 1000; - let batch_size = 25; - - let mut deploy_hashes = Vec::new(); - - for _ in 0..total { - let deploy = Box::new(Deploy::random(&mut harness.rng)); - deploy_hashes.push(*deploy.id()); - put_deploy(&mut harness, &mut storage, deploy); + // Populate the `transaction_hash_index` with 5 transactions from a block in era 1. + let era_1_transactions: Vec = + iter::repeat_with(|| Transaction::random(&mut harness.rng)) + .take(5) + .collect(); + let block_hash_height_and_era = BlockHashHeightAndEra::new( + BlockHash::random(&mut harness.rng), + harness.rng.gen(), + EraId::new(1), + ); + let mut execution_results: HashMap = HashMap::new(); + for transaction in era_1_transactions.clone() { + let _ = put_transaction(&mut harness, &mut storage, &transaction); + execution_results.insert( + transaction.hash(), + ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)), + ); } + put_execution_results( + &mut harness, + &mut storage, + block_hash_height_and_era.block_hash, + block_hash_height_and_era.block_height, + block_hash_height_and_era.era_id, + execution_results, + ); - // Shuffle deploy hashes around to get a random order. - deploy_hashes.as_mut_slice().shuffle(&mut harness.rng); - - // Retrieve all from storage, ensuring they are found. - for chunk in deploy_hashes.chunks(batch_size) { - let result = get_deploys(&mut harness, &mut storage, chunk.iter().cloned().collect()); - assert!(result.iter().all(Option::is_some)); + // Further populate the `transaction_hash_index` with 5 deploys from a block in era 2. + let era_2_transactions: Vec = + iter::repeat_with(|| Transaction::random(&mut harness.rng)) + .take(5) + .collect(); + let block_hash_height_and_era = BlockHashHeightAndEra::new( + BlockHash::random(&mut harness.rng), + harness.rng.gen(), + EraId::new(2), + ); + let mut execution_results: HashMap = HashMap::new(); + for transaction in era_2_transactions.clone() { + let _ = put_transaction(&mut harness, &mut storage, &transaction); + execution_results.insert( + transaction.hash(), + ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)), + ); } -} + put_execution_results( + &mut harness, + &mut storage, + block_hash_height_and_era.block_hash, + block_hash_height_and_era.block_height, + block_hash_height_and_era.era_id, + execution_results, + ); -#[test] -fn store_execution_results_for_two_blocks() { - let mut harness = ComponentHarness::default(); - let mut storage = storage_fixture(&harness); + // Check we get an empty set for deploys not yet executed. + let random_transaction_hashes: HashSet = iter::repeat_with(|| { + if harness.rng.gen() { + TransactionHash::Deploy(DeployHash::random(&mut harness.rng)) + } else { + TransactionHash::V1(TransactionV1Hash::random(&mut harness.rng)) + } + }) + .take(5) + .collect(); + assert!(get_transactions_era_ids( + &mut harness, + &mut storage, + random_transaction_hashes.clone(), + ) + .is_empty()); - let deploy = Deploy::random(&mut harness.rng); + // Check we get back only era 1 for all of the era 1 deploys and similarly for era 2 ones. + let era_1_transaction_hashes: HashSet<_> = era_1_transactions + .iter() + .map(|transaction| transaction.hash()) + .collect(); + let era1: HashSet = iter::once(EraId::new(1)).collect(); + assert_eq!( + get_transactions_era_ids(&mut harness, &mut storage, era_1_transaction_hashes.clone()), + era1 + ); + let era_2_transaction_hashes: HashSet<_> = era_2_transactions + .iter() + .map(|transaction| transaction.hash()) + .collect(); + let era2: HashSet = iter::once(EraId::new(2)).collect(); + assert_eq!( + get_transactions_era_ids(&mut harness, &mut storage, era_2_transaction_hashes.clone()), + era2 + ); - let block_hash_a = BlockHash::random(&mut harness.rng); - let block_hash_b = BlockHash::random(&mut harness.rng); + // Check we get back both eras if we use some from each collection. + let both_eras = HashSet::from_iter([EraId::new(1), EraId::new(2)]); + assert_eq!( + get_transactions_era_ids( + &mut harness, + &mut storage, + era_1_transaction_hashes + .iter() + .take(3) + .chain(era_2_transaction_hashes.iter().take(3)) + .copied() + .collect(), + ), + both_eras + ); - // Store the deploy. - put_deploy(&mut harness, &mut storage, Box::new(deploy.clone())); + // Check we get back only era 1 for era 1 deploys interspersed with unexecuted deploys, and + // similarly for era 2 ones. + assert_eq!( + get_transactions_era_ids( + &mut harness, + &mut storage, + era_1_transaction_hashes + .iter() + .take(1) + .chain(random_transaction_hashes.iter().take(3)) + .copied() + .collect(), + ), + era1 + ); + assert_eq!( + get_transactions_era_ids( + &mut harness, + &mut storage, + era_2_transaction_hashes + .iter() + .take(1) + .chain(random_transaction_hashes.iter().take(3)) + .copied() + .collect(), + ), + era2 + ); - // Ensure deploy exists. + // Check we get back both eras if we use some from each collection and also some unexecuted. assert_eq!( - get_deploys(&mut harness, &mut storage, smallvec![*deploy.id()]), - vec![Some(deploy.clone())] + get_transactions_era_ids( + &mut harness, + &mut storage, + era_1_transaction_hashes + .iter() + .take(3) + .chain(era_2_transaction_hashes.iter().take(3)) + .chain(random_transaction_hashes.iter().take(3)) + .copied() + .collect(), + ), + both_eras ); +} + +#[test] +fn storing_and_loading_a_lot_of_transactions_does_not_exhaust_handles() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + + let total = 1000; + let batch_size = 25; - // Put first execution result. - let first_result: ExecutionResult = harness.rng.gen(); - let mut first_results = HashMap::new(); - first_results.insert(*deploy.id(), first_result.clone()); - put_execution_results(&mut harness, &mut storage, block_hash_a, first_results); + let mut transaction_hashes = Vec::new(); - // Retrieve and check if correct. - let (first_deploy, first_metadata) = - get_deploy_and_metadata(&mut harness, &mut storage, *deploy.id()) - .expect("missing on first attempt"); - assert_eq!(first_deploy, deploy); - let mut expected_per_block_results = HashMap::new(); - expected_per_block_results.insert(block_hash_a, first_result); - assert_eq!(first_metadata.execution_results, expected_per_block_results); + for _ in 0..total { + let transaction = Transaction::random(&mut harness.rng); + transaction_hashes.push(transaction.hash()); + put_transaction(&mut harness, &mut storage, &transaction); + } - // Add second result for the same deploy, different block. - let second_result: ExecutionResult = harness.rng.gen(); - let mut second_results = HashMap::new(); - second_results.insert(*deploy.id(), second_result.clone()); - put_execution_results(&mut harness, &mut storage, block_hash_b, second_results); + // Shuffle transaction hashes around to get a random order. + transaction_hashes.as_mut_slice().shuffle(&mut harness.rng); - // Retrieve the deploy again, should now contain both. - let (second_deploy, second_metadata) = - get_deploy_and_metadata(&mut harness, &mut storage, *deploy.id()) - .expect("missing on second attempt"); - assert_eq!(second_deploy, deploy); - expected_per_block_results.insert(block_hash_b, second_result); - assert_eq!( - second_metadata.execution_results, - expected_per_block_results - ); + // Retrieve all from storage, ensuring they are found. + for chunk in transaction_hashes.chunks(batch_size) { + let result = + get_naive_transactions(&mut harness, &mut storage, chunk.iter().cloned().collect()); + assert!(result.iter().all(Option::is_some)); + } } #[test] @@ -717,76 +1216,57 @@ fn store_random_execution_results() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - // We store results for two different blocks. Each block will have five deploys executed in it, - // with two of these deploys being shared by both blocks, while the remaining three are unique - // per block. + // We store results for two different blocks. Each block will have five deploys executed in it. let block_hash_a = BlockHash::random(&mut harness.rng); let block_hash_b = BlockHash::random(&mut harness.rng); - // Create the shared deploys. - let shared_deploys = vec![ - Deploy::random(&mut harness.rng), - Deploy::random(&mut harness.rng), - ]; - - // Store shared deploys. - for deploy in &shared_deploys { - put_deploy(&mut harness, &mut storage, Box::new(deploy.clone())); - } - // We collect the expected result per deploy in parallel to adding them. let mut expected_outcome = HashMap::new(); fn setup_block( harness: &mut ComponentHarness, storage: &mut Storage, - expected_outcome: &mut HashMap>, + expected_outcome: &mut HashMap, block_hash: &BlockHash, - shared_deploys: &[Deploy], + block_height: u64, + era_id: EraId, ) { - let unique_count = 3; + let transaction_count = 5; // Results for a single block. let mut block_results = HashMap::new(); - // Add three unique deploys to block. - for _ in 0..unique_count { - let deploy = Deploy::random(&mut harness.rng); + // Add deploys to block. + for _ in 0..transaction_count { + let transaction = Transaction::random(&mut harness.rng); - // Store unique deploy. - put_deploy(harness, storage, Box::new(deploy.clone())); + // Store deploy. + put_transaction(harness, storage, &transaction.clone()); - let execution_result: ExecutionResult = harness.rng.gen(); + let execution_result = + ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); + let execution_info = ExecutionInfo { + block_hash: *block_hash, + block_height, + execution_result: Some(execution_result.clone()), + }; // Insert deploy results for the unique block-deploy combination. - let mut map = HashMap::new(); - map.insert(*block_hash, execution_result.clone()); - expected_outcome.insert(*deploy.id(), map); + expected_outcome.insert(transaction.hash(), execution_info); // Add to our expected outcome. - block_results.insert(*deploy.id(), execution_result); - } - - // Insert the shared deploys as well. - for shared_deploy in shared_deploys { - let execution_result: ExecutionResult = harness.rng.gen(); - - // Insert the new result and ensure it is not present yet. - let result = block_results.insert(*shared_deploy.id(), execution_result.clone()); - assert!(result.is_none()); - - // Insert into expected outcome. - let deploy_expected = expected_outcome.entry(*shared_deploy.id()).or_default(); - let prev = deploy_expected.insert(*block_hash, execution_result.clone()); - // Ensure we are not replacing something. - assert!(prev.is_none()); + block_results.insert(transaction.hash(), execution_result); } - // We should have all results for our block collected for the input. - assert_eq!(block_results.len(), unique_count + shared_deploys.len()); - // Now we can submit the block's execution results. - put_execution_results(harness, storage, *block_hash, block_results); + put_execution_results( + harness, + storage, + *block_hash, + block_height, + era_id, + block_results, + ); } setup_block( @@ -794,7 +1274,8 @@ fn store_random_execution_results() { &mut storage, &mut expected_outcome, &block_hash_a, - &shared_deploys, + 1, + EraId::new(1), ); setup_block( @@ -802,18 +1283,19 @@ fn store_random_execution_results() { &mut storage, &mut expected_outcome, &block_hash_b, - &shared_deploys, + 2, + EraId::new(1), ); // At this point, we are all set up and ready to receive results. Iterate over every deploy and // see if its execution-data-per-block matches our expectations. - for (deploy_hash, raw_meta) in expected_outcome.iter() { - let (deploy, metadata) = get_deploy_and_metadata(&mut harness, &mut storage, *deploy_hash) - .expect("missing deploy"); + for (txn_hash, expected_exec_info) in expected_outcome.into_iter() { + let (transaction, maybe_exec_info) = + get_naive_transaction_and_execution_info(&mut storage, txn_hash) + .expect("missing transaction"); - assert_eq!(deploy_hash, deploy.id()); - - assert_eq!(raw_meta, &metadata.execution_results); + assert_eq!(txn_hash, transaction.hash()); + assert_eq!(maybe_exec_info, Some(expected_exec_info)); } } @@ -823,18 +1305,87 @@ fn store_execution_results_twice_for_same_block_deploy_pair() { let mut storage = storage_fixture(&harness); let block_hash = BlockHash::random(&mut harness.rng); - let deploy_hash = DeployHash::random(&mut harness.rng); + let block_height = harness.rng.gen(); + let era_id = EraId::random(&mut harness.rng); + let transaction = Transaction::random(&mut harness.rng); + let transaction_hash = transaction.hash(); + + put_transaction(&mut harness, &mut storage, &transaction); let mut exec_result_1 = HashMap::new(); - exec_result_1.insert(deploy_hash, harness.rng.gen()); + exec_result_1.insert( + transaction_hash, + ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)), + ); let mut exec_result_2 = HashMap::new(); - exec_result_2.insert(deploy_hash, harness.rng.gen()); + let new_exec_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); + exec_result_2.insert(transaction_hash, new_exec_result.clone()); + + put_execution_results( + &mut harness, + &mut storage, + block_hash, + block_height, + era_id, + exec_result_1, + ); + + // Storing a second execution result for the same deploy on the same block should overwrite the + // first. + put_execution_results( + &mut harness, + &mut storage, + block_hash, + block_height, + era_id, + exec_result_2, + ); + + let (returned_transaction, returned_exec_info) = + get_naive_transaction_and_execution_info(&mut storage, transaction_hash) + .expect("missing deploy"); + let expected_exec_info = Some(ExecutionInfo { + block_hash, + block_height, + execution_result: Some(new_exec_result), + }); - put_execution_results(&mut harness, &mut storage, block_hash, exec_result_1); + assert_eq!(returned_transaction, transaction); + assert_eq!(returned_exec_info, expected_exec_info); +} - // Storing a second execution result for the same deploy on the same block should panic. - put_execution_results(&mut harness, &mut storage, block_hash, exec_result_2); +fn prepare_exec_result_with_transfer( + rng: &mut TestRng, + txn_hash: &TransactionHash, +) -> (ExecutionResult, Transfer) { + let initiator_addr = InitiatorAddr::random(rng); + let transfer = Transfer::V2(TransferV2::new( + *txn_hash, + initiator_addr.clone(), + Some(rng.gen()), + rng.gen(), + rng.gen(), + rng.gen(), + Gas::from(rng.gen::()), + Some(rng.gen()), + )); + let limit = Gas::new(rng.gen::()); + let current_price = 1; + let refund = U512::zero(); + let exec_result = ExecutionResult::V2(Box::new(ExecutionResultV2 { + initiator: initiator_addr, + error_message: None, + current_price, + limit, + cost: limit.value(), + consumed: limit, + refund, + transfers: vec![transfer.clone()], + effects: Effects::new(), + size_estimate: rng.gen(), + })); + (exec_result, transfer) } #[test] @@ -842,140 +1393,216 @@ fn store_identical_execution_results() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - let block_hash = BlockHash::random(&mut harness.rng); - let deploy_hash = DeployHash::random(&mut harness.rng); + let deploy = Deploy::random_valid_native_transfer(&mut harness.rng); + let deploy_hash = *deploy.hash(); + let transaction: Transaction = deploy.into(); + let block = Arc::new(Block::V2( + TestBlockBuilder::new() + .transactions(Some(&transaction)) + .build(&mut harness.rng), + )); + put_transaction(&mut harness, &mut storage, &transaction); + put_block(&mut harness, &mut storage, block.clone()); + let block_hash = *block.hash(); - let mut exec_result = HashMap::new(); - exec_result.insert(deploy_hash, harness.rng.gen()); + let (exec_result, transfer) = + prepare_exec_result_with_transfer(&mut harness.rng, &TransactionHash::Deploy(deploy_hash)); + let mut exec_results = HashMap::new(); + exec_results.insert(TransactionHash::from(deploy_hash), exec_result.clone()); - put_execution_results(&mut harness, &mut storage, block_hash, exec_result.clone()); + put_execution_results( + &mut harness, + &mut storage, + block_hash, + block.height(), + block.era_id(), + exec_results.clone(), + ); + { + let retrieved_results = get_execution_results(&mut harness, &mut storage, block_hash) + .expect("should return Some"); + assert_eq!(retrieved_results.len(), 1); + assert_eq!(retrieved_results[0].0, TransactionHash::from(deploy_hash)); + assert_eq!(retrieved_results[0].2, exec_result); + } + let retrieved_transfers = + get_block_transfers(&mut harness, &mut storage, block_hash).expect("should return Some"); + assert_eq!(retrieved_transfers.len(), 1); + assert_eq!(retrieved_transfers[0], transfer); // We should be fine storing the exact same result twice. - put_execution_results(&mut harness, &mut storage, block_hash, exec_result); -} - -/// Example state used in storage. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -struct StateData { - a: Vec, - b: i32, + put_execution_results( + &mut harness, + &mut storage, + block_hash, + block.height(), + block.era_id(), + exec_results, + ); + { + let retrieved_results = get_execution_results(&mut harness, &mut storage, block_hash) + .expect("should return Some"); + assert_eq!(retrieved_results.len(), 1); + assert_eq!(retrieved_results[0].0, TransactionHash::from(deploy_hash)); + assert_eq!(retrieved_results[0].2, exec_result); + } + let retrieved_transfers = + get_block_transfers(&mut harness, &mut storage, block_hash).expect("should return Some"); + assert_eq!(retrieved_transfers.len(), 1); + assert_eq!(retrieved_transfers[0], transfer); } +/// This is a regression test for the issue where `Transfer`s under a block with no deploys could be +/// returned as `None` rather than the expected `Some(vec![])`. The fix should ensure that if no +/// Transfers are found, storage will respond with an empty collection and store the correct value +/// for future requests. +/// +/// See https://github.com/casper-network/casper-node/issues/4255 for further info. #[test] -fn store_and_load_state_data() { - let key1 = b"sample-key-1".to_vec(); - let key2 = b"exkey-2".to_vec(); - +fn should_provide_transfers_if_not_stored() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - // Initially, both keys should return nothing. - let load1 = load_state::(&mut harness, &mut storage, key1.clone().into()); - let load2 = load_state::(&mut harness, &mut storage, key2.clone().into()); - - assert!(load1.is_none()); - assert!(load2.is_none()); - - let data1 = StateData { a: vec![1], b: -1 }; - let data2 = StateData { a: vec![], b: 2 }; - - // Store one after another. - save_state(&mut harness, &mut storage, key1.clone().into(), &data1); - let load1 = load_state::(&mut harness, &mut storage, key1.clone().into()); - let load2 = load_state::(&mut harness, &mut storage, key2.clone().into()); - - assert_eq!(load1, Some(data1.clone())); - assert!(load2.is_none()); - - save_state(&mut harness, &mut storage, key2.clone().into(), &data2); - let load1 = load_state::(&mut harness, &mut storage, key1.clone().into()); - let load2 = load_state::(&mut harness, &mut storage, key2.clone().into()); - - assert_eq!(load1, Some(data1)); - assert_eq!(load2, Some(data2.clone())); + let block_v2 = TestBlockBuilder::new() + .transactions(None) + .build(&mut harness.rng); + assert_eq!(block_v2.all_transactions().count(), 0); + let block = Arc::new(Block::V2(block_v2)); + let block_hash = *block.hash(); + put_block(&mut harness, &mut storage, block); - // Overwrite `data1` in store. - save_state(&mut harness, &mut storage, key1.clone().into(), &data2); - let load1 = load_state::(&mut harness, &mut storage, key1.into()); - let load2 = load_state::(&mut harness, &mut storage, key2.into()); + // Check an empty collection is returned. + let retrieved_transfers = + get_block_transfers(&mut harness, &mut storage, block_hash).expect("should return Some"); + assert!(retrieved_transfers.is_empty()); - assert_eq!(load1, Some(data2.clone())); - assert_eq!(load2, Some(data2)); + // Check the empty collection has been stored. + let reader = storage.block_store.checkout_rw().unwrap(); + let maybe_transfers: Option> = reader.read(block_hash).unwrap(); + assert_eq!(Some(vec![]), maybe_transfers); } +/// This is a regression test for the issue where a valid collection of `Transfer`s under a given +/// block could be erroneously replaced with an empty collection. The fix should ensure that if an +/// empty collection of Transfers is found, storage will replace it with the correct collection and +/// store the correct value for future requests. +/// +/// See https://github.com/casper-network/casper-node/issues/4268 for further info. #[test] -fn persist_state_data() { - let key = b"sample-key-1".to_vec(); - +fn should_provide_transfers_after_emptied() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); - let load = load_state::(&mut harness, &mut storage, key.clone().into()); - assert!(load.is_none()); + let deploy = Deploy::random_valid_native_transfer(&mut harness.rng); + let deploy_hash = *deploy.hash(); + let block = Block::V2( + TestBlockBuilder::new() + .transactions(Some(&Transaction::Deploy(deploy))) + .build(&mut harness.rng), + ); + let block_hash = *block.hash(); + put_block(&mut harness, &mut storage, Arc::new(block.clone())); - let data = StateData { - a: vec![1, 2, 3, 4, 5, 6], - b: -1, - }; + let (exec_result, transfer) = + prepare_exec_result_with_transfer(&mut harness.rng, &TransactionHash::Deploy(deploy_hash)); + let mut exec_results = HashMap::new(); + exec_results.insert(TransactionHash::from(deploy_hash), exec_result); - // Store one after another. - save_state(&mut harness, &mut storage, key.clone().into(), &data); - let load = load_state::(&mut harness, &mut storage, key.clone().into()); - assert_eq!(load, Some(data.clone())); + put_execution_results( + &mut harness, + &mut storage, + block_hash, + block.height(), + block.era_id(), + exec_results.clone(), + ); - let (on_disk, rng) = harness.into_parts(); - let mut harness = ComponentHarness::builder() - .on_disk(on_disk) - .rng(rng) - .build(); - let mut storage = storage_fixture(&harness); + // Replace the valid collection with an empty one. + let mut writer = storage.block_store.checkout_rw().unwrap(); + let empty_transfers = BlockTransfers { + block_hash, + transfers: Vec::::new(), + }; + assert_eq!(writer.write(&empty_transfers).unwrap(), block_hash); + writer.commit().unwrap(); + + // Check the correct value is returned. + let retrieved_transfers = + get_block_transfers(&mut harness, &mut storage, block_hash).expect("should return Some"); + assert_eq!(retrieved_transfers.len(), 1); + assert_eq!(retrieved_transfers[0], transfer); + + // Check the correct value has been stored. + let reader = storage.block_store.checkout_rw().unwrap(); + let maybe_transfers: Option> = reader.read(block_hash).unwrap(); + assert_eq!(Some(vec![transfer]), maybe_transfers); +} - let load = load_state::(&mut harness, &mut storage, key.into()); - assert_eq!(load, Some(data)); +/// Example state used in storage. +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +struct StateData { + a: Vec, + b: i32, } +/* TODO: we can't write the legacy db anymore so this test needs to be refactored. #[test] fn test_legacy_interface() { let mut harness = ComponentHarness::default(); - let mut storage = storage_fixture(&harness); + let storage = storage_fixture(&harness); - let deploy = Box::new(Deploy::random(&mut harness.rng)); - let was_new = put_deploy(&mut harness, &mut storage, deploy.clone()); + let deploy = Deploy::random(&mut harness.rng); + let was_new = storage.write_legacy_deploy(&deploy); assert!(was_new); // Ensure we get the deploy we expect. - let result = storage.handle_legacy_direct_deploy_request(*deploy.id()); - assert_eq!(result, Some(*deploy)); + let result = storage + .get_legacy_deploy(*deploy.hash()) + .expect("should get deploy"); + assert_eq!(result, Some(LegacyDeploy::from(deploy))); // A non-existent deploy should simply return `None`. assert!(storage - .handle_legacy_direct_deploy_request(DeployHash::random(&mut harness.rng)) + .get_legacy_deploy(DeployHash::random(&mut harness.rng)) + .expect("should get deploy") .is_none()) } +*/ #[test] -fn persist_blocks_deploys_and_deploy_metadata_across_instantiations() { +fn persist_blocks_txns_and_execution_info_across_instantiations() { let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); // Create some sample data. - let deploy = Deploy::random(&mut harness.rng); - let block = random_block_at_height(&mut harness.rng, 42); - let execution_result: ExecutionResult = harness.rng.gen(); - - put_deploy(&mut harness, &mut storage, Box::new(deploy.clone())); - put_block(&mut harness, &mut storage, block.clone()); + let transaction = Transaction::random(&mut harness.rng); + let block: Block = TestBlockBuilder::new() + .transactions(Some(&transaction)) + .build_versioned(&mut harness.rng); + + let block_height = block.height(); + let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); + put_transaction(&mut harness, &mut storage, &transaction); + put_complete_block(&mut harness, &mut storage, block.clone()); let mut execution_results = HashMap::new(); - execution_results.insert(*deploy.id(), execution_result.clone()); - put_execution_results(&mut harness, &mut storage, *block.hash(), execution_results); - + execution_results.insert(transaction.hash(), execution_result.clone()); + put_execution_results( + &mut harness, + &mut storage, + *block.hash(), + block.height(), + block.era_id(), + execution_results, + ); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 42).expect("block not indexed properly"), - *block + get_block_and_metadata_by_height(&mut harness, &mut storage, block_height, false) + .expect("block not indexed properly") + .block, + block ); - // After storing everything, destroy the harness and component, then rebuild using the same - // directory as backing. + // After storing everything, destroy the harness and component, then rebuild using the + // same directory as backing. let (on_disk, rng) = harness.into_parts(); let mut harness = ComponentHarness::builder() .on_disk(on_disk) @@ -985,21 +1612,26 @@ fn persist_blocks_deploys_and_deploy_metadata_across_instantiations() { let actual_block = get_block(&mut harness, &mut storage, *block.hash()) .expect("missing block we stored earlier"); - assert_eq!(actual_block, *block); + assert_eq!(actual_block, block); + let actual_txns = + get_naive_transactions(&mut harness, &mut storage, smallvec![transaction.hash()]); + assert_eq!(actual_txns, vec![Some(transaction.clone())]); - let actual_deploys = get_deploys(&mut harness, &mut storage, smallvec![*deploy.id()]); - assert_eq!(actual_deploys, vec![Some(deploy.clone())]); + let (_, maybe_exec_info) = + get_naive_transaction_and_execution_info(&mut storage, transaction.hash()) + .expect("missing deploy we stored earlier"); - let (_, deploy_metadata) = get_deploy_and_metadata(&mut harness, &mut storage, *deploy.id()) - .expect("missing deploy we stored earlier"); - - let execution_results = deploy_metadata.execution_results; - assert_eq!(execution_results.len(), 1); - assert_eq!(execution_results[block.hash()], execution_result); + let retrieved_execution_result = maybe_exec_info + .expect("should have execution info") + .execution_result + .expect("should have execution result"); + assert_eq!(retrieved_execution_result, execution_result); assert_eq!( - get_block_at_height(&mut harness, &mut storage, 42).expect("block index was not restored"), - *block + get_block_and_metadata_by_height(&mut harness, &mut storage, block_height, false) + .expect("block index was not restored") + .block, + block ); } @@ -1009,62 +1641,81 @@ fn should_hard_reset() { let blocks_per_era = 3; let mut harness = ComponentHarness::default(); let mut storage = storage_fixture(&harness); + let chain_name_hash = ChainNameDigest::random(&mut harness.rng); + + let random_txns: Vec<_> = iter::repeat_with(|| Transaction::random(&mut harness.rng)) + .take(blocks_count) + .collect(); // Create and store 8 blocks, 0-2 in era 0, 3-5 in era 1, and 6,7 in era 2. - let blocks: Vec = (0..blocks_count) + let blocks: Vec<_> = (0..blocks_count) .map(|height| { let is_switch = height % blocks_per_era == blocks_per_era - 1; - Block::random_with_specifics( - &mut harness.rng, - EraId::from(height as u64 / 3), - height as u64, - is_switch, - ) + TestBlockBuilder::new() + .era(height as u64 / 3) + .height(height as u64) + .switch_block(is_switch) + .transactions(iter::once( + &random_txns.get(height).expect("should_have_deploy").clone(), + )) + .build_versioned(&mut harness.rng) }) .collect(); for block in &blocks { - assert!(put_block( + assert!(put_complete_block( &mut harness, &mut storage, - Box::new(block.clone()) + block.clone(), )); } // Create and store signatures for these blocks. for block in &blocks { - let block_signatures = random_signatures(&mut harness.rng, block); + let block_signatures = random_signatures( + &mut harness.rng, + *block.hash(), + block.height(), + block.era_id(), + chain_name_hash, + ); assert!(put_block_signatures( &mut harness, &mut storage, - block_signatures + block_signatures, )); } // Add execution results to deploys; deploy 0 will be executed in block 0, deploy 1 in block 1, // and so on. - let mut deploys = vec![]; + let mut transactions = vec![]; let mut execution_results = vec![]; - for block_hash in blocks.iter().map(|block| block.hash()) { - let deploy = Deploy::random(&mut harness.rng); - let execution_result: ExecutionResult = harness.rng.gen(); + for (index, (block_hash, block_height, era_id)) in blocks + .iter() + .map(|block| (block.hash(), block.height(), block.era_id())) + .enumerate() + { + let transaction = random_txns.get(index).expect("should have deploys"); + let execution_result = ExecutionResult::from(ExecutionResultV2::random(&mut harness.rng)); + put_transaction(&mut harness, &mut storage, &transaction.clone()); let mut exec_results = HashMap::new(); - exec_results.insert(*deploy.id(), execution_result); - put_deploy(&mut harness, &mut storage, Box::new(deploy.clone())); + exec_results.insert(transaction.hash(), execution_result); put_execution_results( &mut harness, &mut storage, *block_hash, + block_height, + era_id, exec_results.clone(), ); - deploys.push(deploy); + transactions.push(transaction); execution_results.push(exec_results); } // Check the highest block is #7. assert_eq!( Some(blocks[blocks_count - 1].clone()), - get_highest_block(&mut harness, &mut storage) + get_highest_complete_block(&mut harness, &mut storage) ); // The closure doing the actual checks. @@ -1074,10 +1725,10 @@ fn should_hard_reset() { let mut storage = storage_fixture_with_hard_reset(&harness, EraId::from(reset_era as u64)); // Check highest block is the last from the previous era, or `None` if resetting to era 0. - let highest_block = get_highest_block(&mut harness, &mut storage); + let highest_block = get_highest_complete_block(&mut harness, &mut storage); if reset_era > 0 { assert_eq!( - blocks[blocks_per_era * reset_era - 1], + blocks[blocks_per_era * reset_era - 1].clone(), highest_block.unwrap() ); } else { @@ -1093,20 +1744,28 @@ fn should_hard_reset() { // Check signatures of deleted blocks can't be retrieved. for (index, block) in blocks.iter().enumerate() { - let result = get_block_signatures(&mut harness, &mut storage, *block.hash()); + let result = storage.read_block_with_signatures_by_hash(*block.hash(), false); let should_get_sigs = index < blocks_per_era * reset_era; - assert_eq!(should_get_sigs, result.is_some()); + if should_get_sigs { + assert!(!result.unwrap().block_signatures().is_empty()) + } else if let Some(signed_block) = result { + assert!(signed_block.block_signatures().is_empty()) + } } // Check execution results in deleted blocks have been removed. - for (index, deploy) in deploys.iter().enumerate() { - let (_deploy, metadata) = - get_deploy_and_metadata(&mut harness, &mut storage, *deploy.id()).unwrap(); + for (index, transaction) in transactions.iter().enumerate() { + let (_, maybe_exec_info) = + get_naive_transaction_and_execution_info(&mut storage, transaction.hash()).unwrap(); let should_have_exec_results = index < blocks_per_era * reset_era; - assert_eq!( - should_have_exec_results, - !metadata.execution_results.is_empty() - ); + match maybe_exec_info { + Some(ExecutionInfo { + execution_result, .. + }) => { + assert_eq!(should_have_exec_results, execution_result.is_some()); + } + None => assert!(!should_have_exec_results), + }; } }; @@ -1117,3 +1776,1351 @@ fn should_hard_reset() { // Test with a hard reset to era 0, deleting all blocks and associated data. check(0); } + +#[test] +fn should_create_subdir_named_after_network() { + let harness = ComponentHarness::default(); + let cfg = new_config(&harness); + + let network_name = "test"; + let storage = Storage::new( + &WithDir::new(harness.tmp.path(), cfg.clone()), + None, + ProtocolVersion::from_parts(1, 0, 0), + EraId::default(), + network_name, + MAX_TTL.into(), + RECENT_ERA_COUNT, + None, + false, + TransactionConfig::default(), + ) + .unwrap(); + + let expected_path = cfg.path.join(network_name); + + assert!(expected_path.exists()); + assert_eq!(expected_path, storage.root_path()); +} + +#[test] +fn should_not_try_to_move_nonexistent_files() { + let harness = ComponentHarness::default(); + let cfg = new_config(&harness); + let file_names = ["temp.txt"]; + + let expected = should_move_storage_files_to_network_subdir(&cfg.path, &file_names).unwrap(); + + assert!(!expected); +} + +#[test] +fn should_move_files_if_they_exist() { + let harness = ComponentHarness::default(); + let cfg = new_config(&harness); + let file_names = ["temp1.txt", "temp2.txt", "temp3.txt"]; + + // Storage will create this in the constructor, + // doing this manually since we're not calling the constructor in this test. + fs::create_dir(cfg.path.clone()).unwrap(); + + // create empty files for testing. + File::create(cfg.path.join(file_names[0])).unwrap(); + File::create(cfg.path.join(file_names[1])).unwrap(); + File::create(cfg.path.join(file_names[2])).unwrap(); + + let expected = should_move_storage_files_to_network_subdir(&cfg.path, &file_names).unwrap(); + + assert!(expected); +} + +#[test] +fn should_return_error_if_files_missing() { + let harness = ComponentHarness::default(); + let cfg = new_config(&harness); + let file_names = ["temp1.txt", "temp2.txt", "temp3.txt"]; + + // Storage will create this in the constructor, + // doing this manually since we're not calling the constructor in this test. + fs::create_dir(cfg.path.clone()).unwrap(); + + // create empty files for testing, but not all of the files. + File::create(cfg.path.join(file_names[1])).unwrap(); + File::create(cfg.path.join(file_names[2])).unwrap(); + + let actual = should_move_storage_files_to_network_subdir(&cfg.path, &file_names); + + assert!(actual.is_err()); +} + +#[test] +fn should_actually_move_specified_files() { + let harness = ComponentHarness::default(); + let cfg = new_config(&harness); + let file_names = ["temp1.txt", "temp2.txt", "temp3.txt"]; + let root = cfg.path; + let subdir = root.join("test"); + let src_path1 = root.join(file_names[0]); + let src_path2 = root.join(file_names[1]); + let src_path3 = root.join(file_names[2]); + let dest_path1 = subdir.join(file_names[0]); + let dest_path2 = subdir.join(file_names[1]); + let dest_path3 = subdir.join(file_names[2]); + + // Storage will create this in the constructor, + // doing this manually since we're not calling the constructor in this test. + fs::create_dir_all(subdir.clone()).unwrap(); + + // create empty files for testing. + File::create(src_path1.clone()).unwrap(); + File::create(src_path2.clone()).unwrap(); + File::create(src_path3.clone()).unwrap(); + + assert!(src_path1.exists()); + assert!(src_path2.exists()); + assert!(src_path3.exists()); + + let result = move_storage_files_to_network_subdir(&root, &subdir, &file_names); + + assert!(result.is_ok()); + assert!(!src_path1.exists()); + assert!(!src_path2.exists()); + assert!(!src_path3.exists()); + assert!(dest_path1.exists()); + assert!(dest_path2.exists()); + assert!(dest_path3.exists()); +} + +#[test] +fn can_put_and_get_block() { + let mut harness = ComponentHarness::default(); + + // This test is not restricted by the block availability index. + let only_from_available_block_range = false; + + // Create a random block, store and load it. + let block = TestBlockBuilder::new().build(&mut harness.rng); + + let mut storage = storage_fixture(&harness); + + let was_new = put_complete_block(&mut harness, &mut storage, block.clone().into()); + assert!(was_new, "putting block should have returned `true`"); + + // Storing the same block again should work, but yield a result of `true`. + let was_new_second_time = put_complete_block(&mut harness, &mut storage, block.clone().into()); + assert!( + was_new_second_time, + "storing block the second time should have returned `true`" + ); + + let response = + get_block(&mut harness, &mut storage, *block.hash()).expect("should get response"); + let response: BlockV2 = response.try_into().expect("should get BlockV2"); + assert_eq!(response, block); + + // Also ensure we can retrieve just the header. + let response = harness.send_request(&mut storage, |responder| { + StorageRequest::GetBlockHeader { + block_hash: *block.hash(), + only_from_available_block_range, + responder, + } + .into() + }); + + assert_eq!(response.as_ref(), Some(&block.header().clone().into())); +} + +#[test] +fn should_get_trusted_ancestor_headers() { + let (storage, _, blocks) = create_sync_leap_test_chain(&[], false, None); + + let get_results = |requested_height: usize| -> Vec { + let txn = storage.block_store.checkout_ro().unwrap(); + let requested_block_header = blocks.get(requested_height).unwrap().clone_header(); + storage + .get_trusted_ancestor_headers(&txn, &requested_block_header) + .unwrap() + .unwrap() + .iter() + .map(|block_header| block_header.height()) + .collect() + }; + + assert_eq!(get_results(7), &[6, 5, 4]); + assert_eq!(get_results(9), &[8, 7]); + assert_eq!(get_results(5), &[4]); +} + +#[test] +fn should_get_block_headers_with_signatures() { + let (storage, _, blocks) = create_sync_leap_test_chain(&[], false, None); + + let get_results = |requested_height: usize| -> Vec { + let txn = storage.block_store.checkout_ro().unwrap(); + let requested_block_header = blocks.get(requested_height).unwrap().clone_header(); + let highest_block_header_with_sufficient_signatures = storage + .get_highest_complete_block_header_with_signatures(&txn) + .unwrap() + .unwrap(); + storage + .get_block_headers_with_signatures( + &txn, + &requested_block_header, + &highest_block_header_with_sufficient_signatures, + ) + .unwrap() + .unwrap() + .iter() + .map(|block_header_with_signatures| { + block_header_with_signatures.block_header().height() + }) + .collect() + }; + + assert!( + get_results(12).is_empty(), + "should return empty set if asked for a most recent signed block" + ); + assert_eq!(get_results(5), &[7, 10, 12]); + assert_eq!(get_results(2), &[4, 7, 10, 12]); + assert_eq!(get_results(1), &[4, 7, 10, 12]); + assert_eq!( + get_results(10), + &[12], + "should return only tip if asked for a most recent switch block" + ); + assert_eq!( + get_results(7), + &[10, 12], + "should not include switch block that was directly requested" + ); +} + +#[test] +fn should_get_block_headers_with_signatures_when_no_sufficient_finality_in_most_recent_block() { + let (storage, _, blocks) = create_sync_leap_test_chain(&[12], false, None); + + let get_results = |requested_height: usize| -> Vec { + let txn = storage.block_store.checkout_ro().unwrap(); + let requested_block_header = blocks.get(requested_height).unwrap().clone_header(); + let highest_block_header_with_sufficient_signatures = storage + .get_highest_complete_block_header_with_signatures(&txn) + .unwrap() + .unwrap(); + + storage + .get_block_headers_with_signatures( + &txn, + &requested_block_header, + &highest_block_header_with_sufficient_signatures, + ) + .unwrap() + .unwrap() + .iter() + .map(|block_header_with_signatures| { + block_header_with_signatures.block_header().height() + }) + .collect() + }; + + assert!( + get_results(11).is_empty(), + "should return empty set if asked for a most recent signed block", + ); + assert_eq!(get_results(5), &[7, 10, 11]); + assert_eq!(get_results(2), &[4, 7, 10, 11]); + assert_eq!(get_results(1), &[4, 7, 10, 11]); + assert_eq!( + get_results(10), + &[11], + "should return only tip if asked for a most recent switch block" + ); + assert_eq!( + get_results(7), + &[10, 11], + "should not include switch block that was directly requested" + ); +} + +#[test] +fn should_get_sync_leap() { + let (storage, chainspec, blocks) = create_sync_leap_test_chain(&[], false, None); + + let requested_block_hash = *blocks.get(6).unwrap().hash(); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash); + let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap(); + + let sync_leap = match sync_leap_result { + FetchResponse::Fetched(sync_leap) => sync_leap, + _ => panic!("should have leap sync"), + }; + + assert_eq!(sync_leap.trusted_block_header.height(), 6); + assert_eq!( + block_headers_into_heights(&sync_leap.trusted_ancestor_headers), + vec![5, 4], + ); + assert_eq!( + block_headers_with_signatures_into_heights(&sync_leap.block_headers_with_signatures), + vec![7, 10, 12] + ); + + sync_leap + .validate(&SyncLeapValidationMetaData::from_chainspec(&chainspec)) + .unwrap(); +} + +#[test] +fn sync_leap_block_headers_with_signatures_should_be_empty_when_asked_for_a_tip() { + let (storage, chainspec, blocks) = create_sync_leap_test_chain(&[], false, None); + + let requested_block_hash = *blocks.get(12).unwrap().hash(); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash); + let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap(); + + let sync_leap = match sync_leap_result { + FetchResponse::Fetched(sync_leap) => sync_leap, + _ => panic!("should have leap sync"), + }; + + assert_eq!(sync_leap.trusted_block_header.height(), 12); + assert_eq!( + block_headers_into_heights(&sync_leap.trusted_ancestor_headers), + vec![11, 10], + ); + assert!( + block_headers_with_signatures_into_heights(&sync_leap.block_headers_with_signatures) + .is_empty() + ); + + sync_leap + .validate(&SyncLeapValidationMetaData::from_chainspec(&chainspec)) + .unwrap(); +} + +#[test] +fn sync_leap_should_populate_trusted_ancestor_headers_if_tip_is_a_switch_block() { + let (storage, chainspec, blocks) = create_sync_leap_test_chain(&[], true, None); + + let requested_block_hash = *blocks.get(13).unwrap().hash(); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash); + let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap(); + + let sync_leap = match sync_leap_result { + FetchResponse::Fetched(sync_leap) => sync_leap, + _ => panic!("should have leap sync"), + }; + + assert_eq!(sync_leap.trusted_block_header.height(), 13); + assert_eq!( + block_headers_into_heights(&sync_leap.trusted_ancestor_headers), + vec![12, 11, 10], + ); + assert!( + block_headers_with_signatures_into_heights(&sync_leap.block_headers_with_signatures) + .is_empty() + ); + + sync_leap + .validate(&SyncLeapValidationMetaData::from_chainspec(&chainspec)) + .unwrap(); +} + +#[test] +fn should_respect_allowed_era_diff_in_get_sync_leap() { + let maybe_recent_era_count = Some(1); + let (storage, _, blocks) = create_sync_leap_test_chain(&[], false, maybe_recent_era_count); + + let requested_block_hash = *blocks.get(6).unwrap().hash(); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(requested_block_hash); + let sync_leap_result = storage.get_sync_leap(sync_leap_identifier).unwrap(); + + assert!( + matches!(sync_leap_result, FetchResponse::NotProvided(_)), + "should not have sync leap" + ); +} + +#[test] +fn should_restrict_returned_blocks() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + + // Create the following disjoint sequences: 1-2 4-5 + IntoIterator::into_iter([1, 2, 4, 5]).for_each(|height| { + let block = TestBlockBuilder::new() + .era(1) + .height(height) + .protocol_version(ProtocolVersion::from_parts(1, 5, 0)) + .switch_block(false) + .build_versioned(&mut harness.rng); + + let was_new = put_complete_block(&mut harness, &mut storage, block); + assert!(was_new); + }); + + // Without restriction, the node should attempt to return any requested block + // regardless if it is in the disjoint sequences. + assert!(storage.should_return_block(0, false)); + assert!(storage.should_return_block(1, false)); + assert!(storage.should_return_block(2, false)); + assert!(storage.should_return_block(3, false)); + assert!(storage.should_return_block(4, false)); + assert!(storage.should_return_block(5, false)); + assert!(storage.should_return_block(6, false)); + + // With restriction, the node should attempt to return only the blocks that are + // on the highest disjoint sequence, i.e blocks 4 and 5 only. + assert!(!storage.should_return_block(0, true)); + assert!(!storage.should_return_block(1, true)); + assert!(!storage.should_return_block(2, true)); + assert!(!storage.should_return_block(3, true)); + assert!(storage.should_return_block(4, true)); + assert!(storage.should_return_block(5, true)); + assert!(!storage.should_return_block(6, true)); +} + +#[test] +fn should_get_block_header_by_height() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + + let block = TestBlockBuilder::new().build_versioned(&mut harness.rng); + let expected_header = block.clone_header(); + let height = block.height(); + + // Requesting the block header before it is in storage should return None. + assert!(get_block_header_by_height(&mut harness, &mut storage, height, false).is_none()); + + let was_new = put_complete_block(&mut harness, &mut storage, block); + assert!(was_new); + + // Requesting the block header after it is in storage should return the block header. + let maybe_block_header = get_block_header_by_height(&mut harness, &mut storage, height, false); + assert!(maybe_block_header.is_some()); + assert_eq!(expected_header, maybe_block_header.unwrap()); +} + +#[ignore] +#[test] +fn check_force_resync_with_marker_file() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + let cfg = WithDir::new(harness.tmp.path(), new_config(&harness)); + let force_resync_file_path = storage.root_path().join(FORCE_RESYNC_FILE_NAME); + assert!(!force_resync_file_path.exists()); + + // Add a couple of blocks into storage. + let first_block = TestBlockBuilder::new().build_versioned(&mut harness.rng); + put_complete_block(&mut harness, &mut storage, first_block.clone()); + let second_block = loop { + // We need to make sure that the second random block has different height than the first + // one. + let block = TestBlockBuilder::new().build_versioned(&mut harness.rng); + if block.height() != first_block.height() { + break block; + } + }; + put_complete_block(&mut harness, &mut storage, second_block); + // Make sure the completed blocks are not the default anymore. + assert_ne!( + storage.get_available_block_range(), + AvailableBlockRange::RANGE_0_0 + ); + storage.persist_completed_blocks().unwrap(); + drop(storage); + + // The force resync marker file should not exist yet. + assert!(!force_resync_file_path.exists()); + // Reinitialize storage with force resync enabled. + let mut storage = storage_fixture_with_force_resync(&cfg); + // The marker file should be there now. + assert!(force_resync_file_path.exists()); + // Completed blocks has now been defaulted. + assert_eq!( + storage.get_available_block_range(), + AvailableBlockRange::RANGE_0_0 + ); + let first_block_height = first_block.height(); + // Add a block into storage. + put_complete_block(&mut harness, &mut storage, first_block); + assert_eq!( + storage.get_available_block_range(), + AvailableBlockRange::new(first_block_height, first_block_height) + ); + storage.persist_completed_blocks().unwrap(); + drop(storage); + + // We didn't remove the marker file, so it should still be there. + assert!(force_resync_file_path.exists()); + // Reinitialize storage with force resync enabled. + let storage = storage_fixture_with_force_resync(&cfg); + assert!(force_resync_file_path.exists()); + // The completed blocks didn't default this time as the marker file was + // present. + assert_eq!( + storage.get_available_block_range(), + AvailableBlockRange::new(first_block_height, first_block_height) + ); + drop(storage); + // Remove the marker file. + fs::remove_file(&force_resync_file_path).unwrap(); + assert!(!force_resync_file_path.exists()); + + // Reinitialize storage with force resync enabled. + let storage = storage_fixture_with_force_resync(&cfg); + // The marker file didn't exist, so it was created. + assert!(force_resync_file_path.exists()); + // Completed blocks was defaulted again. + assert_eq!( + storage.get_available_block_range(), + AvailableBlockRange::RANGE_0_0 + ); +} + +// Clippy complains because there's a `OnceCell` in `FinalitySignature`, hence it should not be used +// as a key in `BTreeSet`. However, we don't change the content of the cell during the course of the +// test so there's no risk the hash or order of keys will change. +#[allow(clippy::mutable_key_type)] +#[track_caller] +fn assert_signatures(storage: &Storage, block_hash: BlockHash, expected: Vec) { + let actual = storage.get_finality_signatures_for_block(block_hash); + let actual = actual.map_or(BTreeSet::new(), |signatures| { + signatures.finality_signatures().collect() + }); + let expected: BTreeSet<_> = expected.into_iter().collect(); + assert_eq!(actual, expected); +} + +#[test] +fn store_and_purge_signatures() { + let mut harness = ComponentHarness::default(); + let mut storage = storage_fixture(&harness); + let chain_name_hash = ChainNameDigest::random(&mut harness.rng); + + let block_1 = TestBlockBuilder::new().build(&mut harness.rng); + let fs_1_1 = FinalitySignatureV2::random_for_block( + *block_1.hash(), + block_1.height(), + block_1.header().era_id(), + chain_name_hash, + &mut harness.rng, + ); + let fs_1_2 = FinalitySignatureV2::random_for_block( + *block_1.hash(), + block_1.height(), + block_1.header().era_id(), + chain_name_hash, + &mut harness.rng, + ); + + let block_2 = TestBlockBuilder::new().build(&mut harness.rng); + let fs_2_1 = FinalitySignatureV2::random_for_block( + *block_2.hash(), + block_2.height(), + block_2.header().era_id(), + chain_name_hash, + &mut harness.rng, + ); + let fs_2_2 = FinalitySignatureV2::random_for_block( + *block_2.hash(), + block_2.height(), + block_2.header().era_id(), + chain_name_hash, + &mut harness.rng, + ); + + let block_3 = TestBlockBuilder::new().build(&mut harness.rng); + let fs_3_1 = FinalitySignatureV2::random_for_block( + *block_3.hash(), + block_3.height(), + block_3.header().era_id(), + chain_name_hash, + &mut harness.rng, + ); + let fs_3_2 = FinalitySignatureV2::random_for_block( + *block_3.hash(), + block_3.height(), + block_3.header().era_id(), + chain_name_hash, + &mut harness.rng, + ); + + let block_4 = TestBlockBuilder::new().build(&mut harness.rng); + + let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_1_1.clone().into())); + let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_1_2.clone().into())); + let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_2_1.clone().into())); + let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_2_2.clone().into())); + let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_3_1.clone().into())); + let _ = put_finality_signature(&mut harness, &mut storage, Box::new(fs_3_2.clone().into())); + + assert_signatures( + &storage, + *block_1.hash(), + vec![fs_1_1.into(), fs_1_2.into()], + ); + assert_signatures( + &storage, + *block_2.hash(), + vec![fs_2_1.clone().into(), fs_2_2.clone().into()], + ); + assert_signatures( + &storage, + *block_3.hash(), + vec![fs_3_1.clone().into(), fs_3_2.clone().into()], + ); + assert_signatures(&storage, *block_4.hash(), vec![]); + + // Purging for block_1 should leave sigs for block_2 and block_3 intact. + let mut writer = storage.block_store.checkout_rw().unwrap(); + let _ = DataWriter::::delete(&mut writer, *block_1.hash()); + writer.commit().unwrap(); + assert_signatures(&storage, *block_1.hash(), vec![]); + assert_signatures( + &storage, + *block_2.hash(), + vec![fs_2_1.clone().into(), fs_2_2.clone().into()], + ); + assert_signatures( + &storage, + *block_3.hash(), + vec![fs_3_1.clone().into(), fs_3_2.clone().into()], + ); + assert_signatures(&storage, *block_4.hash(), vec![]); + + // Purging for block_4 (which has no signatures) should not modify state. + let mut writer = storage.block_store.checkout_rw().unwrap(); + let _ = DataWriter::::delete(&mut writer, *block_4.hash()); + writer.commit().unwrap(); + assert_signatures(&storage, *block_1.hash(), vec![]); + assert_signatures( + &storage, + *block_2.hash(), + vec![fs_2_1.into(), fs_2_2.into()], + ); + assert_signatures( + &storage, + *block_3.hash(), + vec![fs_3_1.into(), fs_3_2.into()], + ); + assert_signatures(&storage, *block_4.hash(), vec![]); + + // Purging for all blocks should leave no signatures. + let mut writer = storage.block_store.checkout_rw().unwrap(); + let _ = DataWriter::::delete(&mut writer, *block_1.hash()); + let _ = DataWriter::::delete(&mut writer, *block_2.hash()); + let _ = DataWriter::::delete(&mut writer, *block_3.hash()); + let _ = DataWriter::::delete(&mut writer, *block_4.hash()); + writer.commit().unwrap(); + + assert_signatures(&storage, *block_1.hash(), vec![]); + assert_signatures(&storage, *block_2.hash(), vec![]); + assert_signatures(&storage, *block_3.hash(), vec![]); + assert_signatures(&storage, *block_4.hash(), vec![]); +} + +fn copy_dir_recursive(src: impl AsRef, dest: impl AsRef) -> io::Result<()> { + fs::create_dir_all(&dest)?; + for entry in fs::read_dir(src)? { + let entry = entry?; + if entry.file_type()?.is_dir() { + copy_dir_recursive(entry.path(), dest.as_ref().join(entry.file_name()))?; + } else { + fs::copy(entry.path(), dest.as_ref().join(entry.file_name()))?; + } + } + Ok(()) +} + +#[test] +fn can_retrieve_block_by_height_with_different_block_versions() { + let mut harness = ComponentHarness::default(); + + // BlockV1 as a versioned Block + let block_14 = TestBlockV1Builder::new() + .era(1) + .height(14) + .switch_block(false) + .build(&mut harness.rng); + + // BlockV2 as a versioned Block + let block_v2_33 = TestBlockBuilder::new() + .era(1) + .height(33) + .switch_block(true) + .build_versioned(&mut harness.rng); + let block_33: Block = block_v2_33.clone(); + + // BlockV2 + let block_v2_99 = TestBlockBuilder::new() + .era(2) + .height(99) + .switch_block(true) + .build_versioned(&mut harness.rng); + let block_99: Block = block_v2_99.clone(); + + let mut storage = storage_fixture(&harness); + + assert!(get_block(&mut harness, &mut storage, *block_14.hash()).is_none()); + assert!(get_block(&mut harness, &mut storage, *block_v2_33.hash()).is_none()); + assert!(get_block(&mut harness, &mut storage, *block_v2_99.hash()).is_none()); + assert!(!is_block_stored( + &mut harness, + &mut storage, + *block_14.hash(), + )); + assert!(!is_block_stored( + &mut harness, + &mut storage, + *block_v2_33.hash(), + )); + assert!(!is_block_stored( + &mut harness, + &mut storage, + *block_v2_99.hash(), + )); + + let was_new = put_block(&mut harness, &mut storage, Arc::new(block_33.clone())); + assert!(was_new); + assert!(mark_block_complete( + &mut harness, + &mut storage, + block_v2_33.height(), + )); + + // block is of the current version so it should be returned + let block = + get_block(&mut harness, &mut storage, *block_v2_33.hash()).expect("should have block"); + assert!(matches!(block, Block::V2(_))); + + // block is stored since it was returned before + assert!(is_block_stored( + &mut harness, + &mut storage, + *block_v2_33.hash(), + )); + + assert_eq!( + get_highest_complete_block(&mut harness, &mut storage).as_ref(), + Some(&block_33) + ); + assert_eq!( + get_highest_complete_block_header(&mut harness, &mut storage).as_ref(), + Some(&block_v2_33.clone_header()) + ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 14, false).is_none()); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false) + .unwrap() + .block, + block_33 + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 33, true).as_ref(), + Some(&block_v2_33.clone_header()) + ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none()); + + let was_new = put_block( + &mut harness, + &mut storage, + Arc::new(Block::from(block_14.clone())), + ); + assert!(was_new); + + // block is not of the current version so don't return it + let block = get_block(&mut harness, &mut storage, *block_14.hash()).expect("should have block"); + assert!(matches!(block, Block::V1(_))); + + // block should be stored as versioned and should be returned + assert!(get_block(&mut harness, &mut storage, *block_14.hash()).is_some()); + + // block is stored since it was returned before + assert!(is_block_stored( + &mut harness, + &mut storage, + *block_14.hash(), + )); + + assert_eq!( + get_highest_complete_block(&mut harness, &mut storage).as_ref(), + Some(&block_33) + ); + assert_eq!( + get_highest_complete_block_header(&mut harness, &mut storage).as_ref(), + Some(&block_v2_33.clone_header()) + ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false) + .unwrap() + .block, + Block::from(block_14.clone()) + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 14, true).as_ref(), + None + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(), + Some(&block_14.header().clone().into()) + ); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false) + .unwrap() + .block, + block_33 + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(), + Some(&block_v2_33.clone_header()) + ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 99, false).is_none()); + + // Inserting block with height 99, changes highest. + let was_new = put_complete_block(&mut harness, &mut storage, block_v2_99.clone()); + // Mark block 99 as complete. + storage.completed_blocks.insert(99); + assert!(was_new); + + assert_eq!( + get_highest_complete_block(&mut harness, &mut storage).as_ref(), + Some(&(block_v2_99)) + ); + assert_eq!( + get_highest_complete_block_header(&mut harness, &mut storage).as_ref(), + Some(&block_v2_99.clone_header()) + ); + assert!(get_block_and_metadata_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert!(get_block_header_by_height(&mut harness, &mut storage, 0, false).is_none()); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 14, false) + .unwrap() + .block, + Block::from(block_14.clone()) + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 14, false).as_ref(), + Some(&block_14.header().clone().into()) + ); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 33, false) + .unwrap() + .block, + block_33 + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 33, false).as_ref(), + Some(&block_v2_33.clone_header()) + ); + assert_eq!( + get_block_and_metadata_by_height(&mut harness, &mut storage, 99, false) + .unwrap() + .block, + block_99 + ); + assert_eq!( + get_block_header_by_height(&mut harness, &mut storage, 99, false).as_ref(), + Some(&block_v2_99.clone_header()) + ); +} + +static TEST_STORAGE_DIR_1_5_2: Lazy = Lazy::new(|| { + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../resources/test/storage/1.5.2/storage-1") +}); +static STORAGE_INFO_FILE_NAME: &str = "storage_info.json"; + +#[derive(Serialize, Deserialize, Debug)] +struct Node1_5_2BlockInfo { + height: u64, + era: EraId, + approvals_hashes: Option>, + signatures: Option, + deploy_hashes: Vec, +} + +// Summary information about the context of a database +#[derive(Serialize, Deserialize, Debug)] +struct Node1_5_2StorageInfo { + net_name: String, + protocol_version: ProtocolVersion, + block_range: (u64, u64), + blocks: HashMap, + deploys: Vec, +} + +impl Node1_5_2StorageInfo { + fn from_file(path: impl AsRef) -> Result { + Ok(serde_json::from_slice(fs::read(path)?.as_slice()).expect("Malformed JSON")) + } +} + +// Use the storage component APIs to determine if a block is or is not in storage. +fn assert_block_exists_in_storage( + harness: &mut ComponentHarness, + storage: &mut Storage, + block_hash: &BlockHash, + block_height: u64, + only_from_available_block_range: bool, + expect_exists_as_latest_version: bool, + expect_exists_as_versioned: bool, +) { + let expect_exists = expect_exists_as_latest_version || expect_exists_as_versioned; + + // Check if the block is stored at all + assert_eq!( + is_block_stored(harness, storage, *block_hash), + expect_exists + ); + + // GetBlock should return only blocks from storage that are of the current version. + assert_eq!( + get_block(harness, storage, *block_hash).is_some_and(|block| matches!(block, Block::V2(_))), + expect_exists_as_latest_version + ); + + // Check if we can get the block as a versioned Block. + let block = get_block(harness, storage, *block_hash); + assert_eq!(block.is_some_and(|_| true), expect_exists_as_versioned); + + // Check if the header can be fetched from storage. + assert_eq!( + get_block_header( + harness, + storage, + *block_hash, + only_from_available_block_range, + ) + .is_some_and(|_| true), + expect_exists + ); + assert_eq!( + get_block_header(harness, storage, *block_hash, false).is_some_and(|_| true), + expect_exists + ); + assert_eq!( + storage + .read_block_header_by_hash(block_hash) + .unwrap() + .is_some_and(|_| true), + expect_exists + ); + + assert_eq!( + get_block_header_by_height( + harness, + storage, + block_height, + only_from_available_block_range, + ) + .is_some_and(|_| true), + expect_exists + ); + assert_eq!( + storage + .read_block_header_by_height(block_height, only_from_available_block_range) + .unwrap() + .is_some_and(|_| true), + expect_exists + ); + assert_eq!( + storage + .read_block_header_by_height(block_height, false) + .unwrap() + .is_some_and(|_| true), + expect_exists + ); + + if expect_exists { + assert_eq!( + storage + .read_block_with_signatures_by_height(block_height, false) + .unwrap() + .block() + .hash(), + block_hash + ); + assert_eq!( + storage + .read_block_with_signatures_by_hash(*block_hash, only_from_available_block_range) + .unwrap() + .block() + .height(), + block_height + ); + assert_eq!( + storage + .read_block_with_signatures_by_height(block_height, false) + .unwrap() + .block() + .hash(), + block_hash + ); + assert_eq!( + storage + .read_block_with_signatures_by_height(block_height, only_from_available_block_range) + .unwrap() + .block() + .hash(), + block_hash + ); + } +} + +// Use the storage component APIs to determine if the highest stored block is the one expected. +fn assert_highest_block_in_storage( + harness: &mut ComponentHarness, + storage: &mut Storage, + only_from_available_block_range: bool, + expected_block_hash: &BlockHash, + expected_block_height: u64, +) { + assert_eq!( + get_highest_complete_block_header(harness, storage) + .unwrap() + .height(), + expected_block_height + ); + let highest_block_header = storage.read_highest_block_header().unwrap(); + assert_eq!(highest_block_header.block_hash(), *expected_block_hash); + assert_eq!(highest_block_header.height(), expected_block_height); + assert_eq!( + get_highest_complete_block(harness, storage).unwrap().hash(), + expected_block_hash + ); + assert_eq!( + get_block_and_metadata_by_height(harness, storage, expected_block_height, false) + .unwrap() + .block + .hash(), + expected_block_hash + ); + + if only_from_available_block_range { + assert_eq!( + storage + .read_highest_block_with_signatures(true) + .unwrap() + .block() + .hash(), + expected_block_hash + ); + + assert_eq!( + get_highest_complete_block(harness, storage).unwrap().hash(), + expected_block_hash + ); + } + assert_eq!( + storage + .read_highest_block_with_signatures(false) + .unwrap() + .block() + .hash(), + expected_block_hash + ); +} + +#[test] +// Starting with node 2.0, the `Block` struct is versioned. +// Since this change impacts the storage APIs, create a test to prove that we can still access old +// unversioned blocks through the new APIs and also check that both versioned and unversioned blocks +// can co-exist in storage. +#[ignore = "stop ignoring once decision around Transfer type is made"] +fn check_block_operations_with_node_1_5_2_storage() { + let rng: TestRng = TestRng::new(); + + let temp_dir = tempdir().unwrap(); + copy_dir_recursive(TEST_STORAGE_DIR_1_5_2.as_path(), temp_dir.path()).unwrap(); + let storage_info = + Node1_5_2StorageInfo::from_file(temp_dir.path().join(STORAGE_INFO_FILE_NAME)).unwrap(); + let mut harness = ComponentHarness::builder() + .on_disk(temp_dir) + .rng(rng) + .build(); + let mut storage = storage_fixture_from_parts( + &harness, + None, + Some(ProtocolVersion::from_parts(2, 0, 0)), + Some(storage_info.net_name.as_str()), + None, + None, + ); + let chain_name_hash = ChainNameDigest::random(&mut harness.rng); + + // Check that legacy blocks appear in the available range + let available_range = get_available_block_range(&mut harness, &mut storage); + assert_eq!(available_range.low(), storage_info.block_range.0); + assert_eq!(available_range.high(), storage_info.block_range.1); + + // Check that all legacy blocks can be read as Versioned blocks with version set to V1 + for (hash, block_info) in storage_info.blocks.iter() { + // Since all blocks in this db are V1, the blocks should exist as versioned blocks only. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + hash, + block_info.height, + true, + false, + true, + ); + + // Check version + let block = get_block(&mut harness, &mut storage, *hash).unwrap(); + assert!(matches!(block, Block::V1(_))); + + assert_eq!(block.height(), block_info.height); + + let approvals_hashes = get_approvals_hashes(&mut harness, &mut storage, *hash); + if let Some(expected_approvals_hashes) = &block_info.approvals_hashes { + let stored_approvals_hashes = approvals_hashes.unwrap().approvals_hashes().to_vec(); + assert_eq!(stored_approvals_hashes, expected_approvals_hashes.clone()); + } + + let transfers = get_block_transfers(&mut harness, &mut storage, *hash); + if !block_info.deploy_hashes.is_empty() { + let mut stored_transfers: Vec = transfers + .unwrap() + .iter() + .map(|transfer| match transfer { + Transfer::V1(transfer_v1) => transfer_v1.deploy_hash, + _ => panic!("expected transfer v1 variant"), + }) + .collect(); + stored_transfers.sort(); + let mut expected_deploys = block_info.deploy_hashes.clone(); + expected_deploys.sort(); + assert_eq!(stored_transfers, expected_deploys); + } + + if let Some(expected_signatures) = &block_info.signatures { + for expected_signature in expected_signatures.finality_signatures() { + let stored_signature = get_block_signature( + &mut harness, + &mut storage, + *hash, + Box::new(expected_signature.public_key().clone()), + ) + .unwrap(); + assert_eq!(stored_signature, expected_signature); + } + } + } + + let highest_expected_block_hash = storage_info + .blocks + .iter() + .find_map(|(hash, info)| (info.height == storage_info.block_range.1).then_some(*hash)) + .unwrap(); + + assert_highest_block_in_storage( + &mut harness, + &mut storage, + true, + &highest_expected_block_hash, + storage_info.block_range.1, + ); + + assert!(storage.read_highest_block().is_some()); + assert!(storage.get_highest_complete_block().unwrap().is_some()); + assert!(get_highest_complete_block(&mut harness, &mut storage).is_some()); + assert!(storage.read_highest_block().is_some()); + assert!(get_highest_complete_block_header(&mut harness, &mut storage).is_some()); + assert!(storage.read_highest_block_header().is_some()); + assert_eq!( + storage.read_highest_block().unwrap().height(), + storage_info.block_range.1 + ); + + let mut lowest_stored_block_height = storage_info.block_range.0; + for height in 0..storage_info.block_range.0 { + if get_block_header_by_height(&mut harness, &mut storage, height, false).is_some() { + lowest_stored_block_height = height; + break; + } + } + + // Now add some blocks and test if they can be retrieved correctly + if let Some(new_lowest_height) = lowest_stored_block_height.checked_sub(1) { + // Add a BlockV1 that precedes the lowest available block + let new_lowest_block: Arc = Arc::new( + TestBlockV1Builder::new() + .era(1) + .height(new_lowest_height) + .switch_block(false) + .build_versioned(&mut harness.rng), + ); + + // First check that the block doesn't exist. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + new_lowest_block.hash(), + new_lowest_height, + false, + false, + false, + ); + + // Put the block to storage. + let was_new = put_block(&mut harness, &mut storage, new_lowest_block.clone()); + assert!(was_new); + + let block_signatures = random_signatures( + &mut harness.rng, + *new_lowest_block.hash(), + new_lowest_block.height(), + new_lowest_block.era_id(), + chain_name_hash, + ); + assert!(put_block_signatures( + &mut harness, + &mut storage, + block_signatures, + )); + + // Check that the block was stored and can be fetched as a versioned Block. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + new_lowest_block.hash(), + new_lowest_height, + false, + false, + true, + ); + } + + { + let new_highest_block_height = storage.read_highest_block().unwrap().height() + 1; + + // Add a BlockV2 as a versioned block + let new_highest_block: Arc = Arc::new( + TestBlockBuilder::new() + .era(50) + .height(new_highest_block_height) + .switch_block(true) + .build_versioned(&mut harness.rng), + ); + + // First check that the block doesn't exist. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + new_highest_block.hash(), + new_highest_block_height, + false, + false, + false, + ); + + let was_new = put_block(&mut harness, &mut storage, new_highest_block.clone()); + assert!(was_new); + + let block_signatures = random_signatures( + &mut harness.rng, + *new_highest_block.hash(), + new_highest_block.height(), + new_highest_block.era_id(), + chain_name_hash, + ); + assert!(put_block_signatures( + &mut harness, + &mut storage, + block_signatures, + )); + + // Check that the block was stored and can be fetched as a versioned Block or + // as a block at the latest version. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + new_highest_block.hash(), + new_highest_block_height, + false, + true, + true, + ); + + assert_eq!( + storage.read_highest_block().unwrap().height(), + new_highest_block_height + ); + } + + { + let new_highest_block_height = storage.read_highest_block().unwrap().height() + 1; + + // Add a BlockV2 as a unversioned block + let new_highest_block = TestBlockBuilder::new() + .era(51) + .height(new_highest_block_height) + .switch_block(false) + .build(&mut harness.rng); + + // First check that the block doesn't exist. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + new_highest_block.hash(), + new_highest_block_height, + false, + false, + false, + ); + + // Insert the block and mark it complete. + let was_new = + put_complete_block(&mut harness, &mut storage, new_highest_block.clone().into()); + assert!(was_new); + let block_signatures = random_signatures( + &mut harness.rng, + *new_highest_block.hash(), + new_highest_block.height(), + new_highest_block.era_id(), + chain_name_hash, + ); + assert!(put_block_signatures( + &mut harness, + &mut storage, + block_signatures, + )); + + // Check that the block was stored and can be fetched as a versioned Block or + // as a block at the latest version. + assert_block_exists_in_storage( + &mut harness, + &mut storage, + new_highest_block.hash(), + new_highest_block_height, + true, + true, + true, + ); + + assert_eq!( + storage.read_highest_block().unwrap().height(), + new_highest_block_height + ); + + let available_range = get_available_block_range(&mut harness, &mut storage); + assert_eq!(available_range.high(), new_highest_block_height); + + assert_highest_block_in_storage( + &mut harness, + &mut storage, + true, + new_highest_block.hash(), + new_highest_block_height, + ); + } +} diff --git a/node/src/components/storage/utils.rs b/node/src/components/storage/utils.rs new file mode 100644 index 0000000000..0c16f942b7 --- /dev/null +++ b/node/src/components/storage/utils.rs @@ -0,0 +1,9 @@ +use casper_binary_port::RecordId; +use casper_storage::{DbTableId, UnknownDbTableId}; +use std::convert::TryFrom; + +pub(crate) fn db_table_id_from_record_id( + record_id: RecordId, +) -> Result { + DbTableId::try_from(record_id as u16) +} diff --git a/node/src/components/sync_leaper.rs b/node/src/components/sync_leaper.rs new file mode 100644 index 0000000000..cb35f22ebe --- /dev/null +++ b/node/src/components/sync_leaper.rs @@ -0,0 +1,349 @@ +//! The Sync Leaper +mod error; +mod event; +mod leap_activity; +mod leap_state; +mod metrics; +#[cfg(test)] +mod tests; + +use std::{sync::Arc, time::Instant}; + +use datasize::DataSize; +use prometheus::Registry; +use thiserror::Error; +use tracing::{debug, error, info, warn}; + +use casper_types::Chainspec; + +use crate::{ + components::{ + fetcher::{self, FetchResult, FetchedData}, + Component, + }, + effect::{requests::FetcherRequest, EffectBuilder, EffectExt, Effects}, + types::{ + sync_leap_validation_metadata::SyncLeapValidationMetaData, NodeId, SyncLeap, + SyncLeapIdentifier, + }, + NodeRng, +}; +pub(crate) use error::LeapActivityError; +pub(crate) use event::Event; +pub(crate) use leap_state::LeapState; + +use metrics::Metrics; + +use self::leap_activity::LeapActivity; + +const COMPONENT_NAME: &str = "sync_leaper"; + +#[derive(Clone, Debug, DataSize, Eq, PartialEq)] +pub(crate) enum PeerState { + RequestSent, + Rejected, + CouldntFetch, + Fetched(Box), +} + +#[derive(Debug)] +enum RegisterLeapAttemptOutcome { + DoNothing, + FetchSyncLeapFromPeers(Vec), +} + +#[derive(Debug, Error)] +enum Error { + #[error("fetched a sync leap from storage - {0}")] + FetchedSyncLeapFromStorage(SyncLeapIdentifier), + #[error("received a sync leap response while no requests were in progress - {0}")] + UnexpectedSyncLeapResponse(SyncLeapIdentifier), + #[error("block hash in the response '{actual}' doesn't match the one requested '{expected}'")] + SyncLeapIdentifierMismatch { + expected: SyncLeapIdentifier, + actual: SyncLeapIdentifier, + }, + #[error( + "received a sync leap response from an unknown peer - {peer} - {sync_leap_identifier}" + )] + ResponseFromUnknownPeer { + peer: NodeId, + sync_leap_identifier: SyncLeapIdentifier, + }, +} + +#[derive(Debug, DataSize)] +pub(crate) struct SyncLeaper { + leap_activity: Option, + chainspec: Arc, + #[data_size(skip)] + metrics: Metrics, +} + +impl SyncLeaper { + pub(crate) fn new( + chainspec: Arc, + registry: &Registry, + ) -> Result { + Ok(SyncLeaper { + leap_activity: None, + chainspec, + metrics: Metrics::new(registry)?, + }) + } + + /// Returns whether a sync leap is ongoing or completed and its state if so. + /// + /// If a sync leap has been completed, successfully or not, the results are returned and the + /// attempt is removed, effectively making the component idle. + pub(crate) fn leap_status(&mut self) -> LeapState { + match &self.leap_activity { + None => LeapState::Idle, + Some(activity) => { + let result = activity.status(); + if result.active() == false { + match result { + LeapState::Received { .. } | LeapState::Failed { .. } => { + self.metrics + .sync_leap_duration + .observe(activity.leap_start().elapsed().as_secs_f64()); + } + LeapState::Idle | LeapState::Awaiting { .. } => { + // should be unreachable + error!(status = %result, ?activity, "sync leaper has inconsistent status"); + } + } + self.leap_activity = None; + } + result + } + } + } + + /// Causes any ongoing sync leap attempt to be abandoned, i.e. results gathered so far are + /// dropped and responses received later for this attempt are ignored. + pub(crate) fn purge(&mut self) { + if let Some(activity) = self.leap_activity.take() { + debug!(identifier = %activity.sync_leap_identifier(), "purging sync leap"); + } + } + + #[cfg_attr(doc, aquamarine::aquamarine)] + /// ```mermaid + /// flowchart TD + /// style Start fill:#66ccff,stroke:#333,stroke-width:4px + /// style End fill:#66ccff,stroke:#333,stroke-width:4px + /// + /// title[SyncLeap process - AttemptLeap] + /// title---Start + /// style title fill:#FFF,stroke:#FFF + /// linkStyle 0 stroke-width:0; + /// + /// Start --> A{have at least
one peer?} + /// A -->|Yes| B{is other sync
leap in progress?} + /// A -->|No| End + /// B -->|Yes| C{do sync leap
identifiers match?} + /// C -->|No| End + /// C -->|Yes| D[fetch SyncLeap from potentially
newly learned peers] + /// B -->|No| G[fetch SyncLeap
from all peers] + /// G --> E + /// D --> E[SyncLeap arrives] + /// E --> F[SyncLeap is stored] + /// F --> End + /// ``` + fn register_leap_attempt( + &mut self, + sync_leap_identifier: SyncLeapIdentifier, + peers_to_ask: Vec, + ) -> RegisterLeapAttemptOutcome { + info!(%sync_leap_identifier, "registering leap attempt"); + if peers_to_ask.is_empty() { + error!("tried to start fetching a sync leap without peers to ask"); + return RegisterLeapAttemptOutcome::DoNothing; + } + if let Some(leap_activity) = self.leap_activity.as_mut() { + if leap_activity.sync_leap_identifier() != &sync_leap_identifier { + error!( + current_sync_leap_identifier = %leap_activity.sync_leap_identifier(), + requested_sync_leap_identifier = %sync_leap_identifier, + "tried to start fetching a sync leap for a different sync_leap_identifier" + ); + return RegisterLeapAttemptOutcome::DoNothing; + } + + let peers_not_asked_yet: Vec<_> = peers_to_ask + .iter() + .filter_map(|peer| leap_activity.register_peer(*peer)) + .collect(); + + return if peers_not_asked_yet.is_empty() { + debug!(%sync_leap_identifier, "peers_not_asked_yet.is_empty()"); + RegisterLeapAttemptOutcome::DoNothing + } else { + debug!(%sync_leap_identifier, "fetching sync leap from {} peers not asked yet", peers_not_asked_yet.len()); + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers_not_asked_yet) + }; + } + + debug!(%sync_leap_identifier, "fetching sync leap from {} peers", peers_to_ask.len()); + self.leap_activity = Some(LeapActivity::new( + sync_leap_identifier, + peers_to_ask + .iter() + .map(|peer| (*peer, PeerState::RequestSent)) + .collect(), + Instant::now(), + )); + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers_to_ask) + } + + fn fetch_received( + &mut self, + sync_leap_identifier: SyncLeapIdentifier, + fetch_result: FetchResult, + ) -> Result<(), Error> { + let leap_activity = match &mut self.leap_activity { + Some(leap_activity) => leap_activity, + None => { + return Err(Error::UnexpectedSyncLeapResponse(sync_leap_identifier)); + } + }; + + if leap_activity.sync_leap_identifier() != &sync_leap_identifier { + return Err(Error::SyncLeapIdentifierMismatch { + actual: sync_leap_identifier, + expected: *leap_activity.sync_leap_identifier(), + }); + } + + match fetch_result { + Ok(FetchedData::FromStorage { .. }) => { + Err(Error::FetchedSyncLeapFromStorage(sync_leap_identifier)) + } + Ok(FetchedData::FromPeer { item, peer, .. }) => { + let peer_state = match leap_activity.peers_mut().get_mut(&peer) { + Some(state) => state, + None => { + return Err(Error::ResponseFromUnknownPeer { + peer, + sync_leap_identifier, + }); + } + }; + *peer_state = PeerState::Fetched(Box::new(*item)); + self.metrics.sync_leap_fetched_from_peer.inc(); + Ok(()) + } + Err(fetcher::Error::Rejected { peer, .. }) => { + let peer_state = match leap_activity.peers_mut().get_mut(&peer) { + Some(state) => state, + None => { + return Err(Error::ResponseFromUnknownPeer { + peer, + sync_leap_identifier, + }); + } + }; + info!(%peer, %sync_leap_identifier, "peer rejected our request for a sync leap"); + *peer_state = PeerState::Rejected; + self.metrics.sync_leap_rejected_by_peer.inc(); + Ok(()) + } + Err(error) => { + let peer = error.peer(); + info!(?error, %peer, %sync_leap_identifier, "failed to fetch a sync leap from peer"); + let peer_state = match leap_activity.peers_mut().get_mut(peer) { + Some(state) => state, + None => { + return Err(Error::ResponseFromUnknownPeer { + peer: *peer, + sync_leap_identifier, + }); + } + }; + *peer_state = PeerState::CouldntFetch; + self.metrics.sync_leap_cant_fetch.inc(); + Ok(()) + } + } + } +} + +impl Component for SyncLeaper +where + REv: From> + Send, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match event { + Event::AttemptLeap { + sync_leap_identifier, + peers_to_ask, + } => match self.register_leap_attempt(sync_leap_identifier, peers_to_ask) { + RegisterLeapAttemptOutcome::DoNothing => Effects::new(), + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) => { + let mut effects = Effects::new(); + peers.into_iter().for_each(|peer| { + effects.extend( + effect_builder + .fetch::( + sync_leap_identifier, + peer, + Box::new(SyncLeapValidationMetaData::from_chainspec( + self.chainspec.as_ref(), + )), + ) + .event(move |fetch_result| Event::FetchedSyncLeapFromPeer { + sync_leap_identifier, + fetch_result, + }), + ) + }); + effects + } + }, + Event::FetchedSyncLeapFromPeer { + sync_leap_identifier, + fetch_result, + } => { + // Log potential error with proper severity and continue processing. + if let Err(error) = self.fetch_received(sync_leap_identifier, fetch_result) { + match error { + Error::FetchedSyncLeapFromStorage(_) => error!(%error), + Error::UnexpectedSyncLeapResponse(_) + | Error::SyncLeapIdentifierMismatch { .. } + | Error::ResponseFromUnknownPeer { .. } => warn!(%error), + } + } + Effects::new() + } + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +#[cfg(test)] +impl SyncLeaper { + fn peers(&self) -> Option> { + self.leap_activity + .as_ref() + .and_then(|leap_activity| { + let peers = leap_activity.peers(); + if leap_activity.peers().is_empty() { + None + } else { + Some(peers.clone()) + } + }) + .map(|peers| peers.into_iter().collect::>()) + } +} diff --git a/node/src/components/sync_leaper/error.rs b/node/src/components/sync_leaper/error.rs new file mode 100644 index 0000000000..f321179cf4 --- /dev/null +++ b/node/src/components/sync_leaper/error.rs @@ -0,0 +1,38 @@ +use datasize::DataSize; +use std::{ + fmt, + fmt::{Display, Formatter}, +}; + +use crate::types::{NodeId, SyncLeapIdentifier}; + +#[derive(Debug, Clone, DataSize)] +pub(crate) enum LeapActivityError { + TooOld(SyncLeapIdentifier, Vec), + Unobtainable(SyncLeapIdentifier, Vec), + NoPeers(SyncLeapIdentifier), +} + +impl Display for LeapActivityError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + LeapActivityError::TooOld(sync_leap_identifier, ..) => { + write!(formatter, "too old: {}", sync_leap_identifier) + } + LeapActivityError::Unobtainable(sync_leap_identifier, ..) => { + write!( + formatter, + "unable to acquire data for: {}", + sync_leap_identifier + ) + } + LeapActivityError::NoPeers(sync_leap_identifier) => { + write!( + formatter, + "sync leaper has no peers for: {}", + sync_leap_identifier + ) + } + } + } +} diff --git a/node/src/components/sync_leaper/event.rs b/node/src/components/sync_leaper/event.rs new file mode 100644 index 0000000000..1ba53e845e --- /dev/null +++ b/node/src/components/sync_leaper/event.rs @@ -0,0 +1,43 @@ +use std::fmt::{Display, Formatter}; + +use serde::Serialize; + +use crate::{ + components::fetcher::FetchResult, + types::{NodeId, SyncLeap, SyncLeapIdentifier}, +}; + +#[derive(Debug, Serialize)] +pub(crate) enum Event { + AttemptLeap { + sync_leap_identifier: SyncLeapIdentifier, + peers_to_ask: Vec, + }, + FetchedSyncLeapFromPeer { + sync_leap_identifier: SyncLeapIdentifier, + fetch_result: FetchResult, + }, +} + +impl Display for Event { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Event::AttemptLeap { + sync_leap_identifier, + peers_to_ask, + } => write!( + f, + "sync pulling sync leap: {:?} {:?}", + sync_leap_identifier, peers_to_ask + ), + Event::FetchedSyncLeapFromPeer { + sync_leap_identifier, + fetch_result, + } => write!( + f, + "fetched sync leap from peer: {} {:?}", + sync_leap_identifier, fetch_result + ), + } + } +} diff --git a/node/src/components/sync_leaper/leap_activity.rs b/node/src/components/sync_leaper/leap_activity.rs new file mode 100644 index 0000000000..8e54e7c9a1 --- /dev/null +++ b/node/src/components/sync_leaper/leap_activity.rs @@ -0,0 +1,515 @@ +use std::{cmp::Ordering, collections::HashMap, time::Instant}; + +use datasize::DataSize; + +use crate::types::{NodeId, SyncLeap, SyncLeapIdentifier}; + +use super::{leap_state::LeapState, LeapActivityError, PeerState}; + +#[derive(Debug, DataSize)] +pub(crate) struct LeapActivity { + sync_leap_identifier: SyncLeapIdentifier, + peers: HashMap, + leap_start: Instant, +} + +impl LeapActivity { + pub(crate) fn new( + sync_leap_identifier: SyncLeapIdentifier, + peers: HashMap, + leap_start: Instant, + ) -> Self { + Self { + sync_leap_identifier, + peers, + leap_start, + } + } + + pub(super) fn status(&self) -> LeapState { + let sync_leap_identifier = self.sync_leap_identifier; + let in_flight = self + .peers + .values() + .filter(|state| matches!(state, PeerState::RequestSent)) + .count(); + let responsed = self.peers.len() - in_flight; + + if in_flight == 0 && responsed == 0 { + return LeapState::Failed { + sync_leap_identifier, + in_flight, + error: LeapActivityError::NoPeers(sync_leap_identifier), + from_peers: vec![], + }; + } + if in_flight > 0 && responsed == 0 { + return LeapState::Awaiting { + sync_leap_identifier, + in_flight, + }; + } + match self.best_response() { + Ok((best_available, from_peers)) => LeapState::Received { + in_flight, + best_available: Box::new(best_available), + from_peers, + }, + // `Unobtainable` means we couldn't download it from any peer so far - don't treat it + // as a failure if there are still requests in flight + Err(LeapActivityError::Unobtainable(_, _)) if in_flight > 0 => LeapState::Awaiting { + sync_leap_identifier, + in_flight, + }, + Err(error) => LeapState::Failed { + sync_leap_identifier, + from_peers: vec![], + in_flight, + error, + }, + } + } + + fn best_response(&self) -> Result<(SyncLeap, Vec), LeapActivityError> { + let reject_count = self + .peers + .values() + .filter(|peer_state| matches!(peer_state, PeerState::Rejected)) + .count(); + + let mut peers = vec![]; + let mut maybe_ret = None; + for (peer, peer_state) in &self.peers { + match peer_state { + PeerState::Fetched(sync_leap) => match &maybe_ret { + None => { + maybe_ret = Some(sync_leap); + peers.push(*peer); + } + Some(current_ret) => { + match current_ret + .highest_block_height() + .cmp(&sync_leap.highest_block_height()) + { + Ordering::Less => { + maybe_ret = Some(sync_leap); + peers = vec![*peer]; + } + Ordering::Equal => { + peers.push(*peer); + } + Ordering::Greater => {} + } + } + }, + PeerState::RequestSent | PeerState::Rejected | PeerState::CouldntFetch => {} + } + } + + match maybe_ret { + Some(sync_leap) => Ok((*sync_leap.clone(), peers)), + None => { + if reject_count > 0 { + Err(LeapActivityError::TooOld(self.sync_leap_identifier, peers)) + } else { + Err(LeapActivityError::Unobtainable( + self.sync_leap_identifier, + peers, + )) + } + } + } + } + + pub(crate) fn leap_start(&self) -> Instant { + self.leap_start + } + + pub(crate) fn sync_leap_identifier(&self) -> &SyncLeapIdentifier { + &self.sync_leap_identifier + } + + pub(crate) fn peers(&self) -> &HashMap { + &self.peers + } + + pub(crate) fn peers_mut(&mut self) -> &mut HashMap { + &mut self.peers + } + + /// Registers new leap activity if it wasn't already registered for specified peer. + pub(crate) fn register_peer(&mut self, peer: NodeId) -> Option { + (!self.peers().contains_key(&peer)).then(|| { + self.peers.insert(peer, PeerState::RequestSent); + peer + }) + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::{BTreeSet, HashMap}, + time::Instant, + }; + + use rand::seq::SliceRandom; + + use casper_types::{testing::TestRng, BlockHash, BlockHeader, BlockV2, TestBlockBuilder}; + + use crate::{ + components::sync_leaper::{ + leap_activity::LeapActivity, tests::make_test_sync_leap, LeapActivityError, LeapState, + PeerState, + }, + types::{NodeId, SyncLeap, SyncLeapIdentifier}, + }; + + fn make_random_block_with_height(rng: &mut TestRng, height: u64) -> BlockV2 { + TestBlockBuilder::new() + .era(0) + .height(height) + .switch_block(false) + .build(rng) + } + + fn make_sync_leap_with_trusted_block_header(trusted_block_header: BlockHeader) -> SyncLeap { + SyncLeap { + trusted_ancestor_only: false, + trusted_block_header, + trusted_ancestor_headers: vec![], + block_headers_with_signatures: vec![], + } + } + + fn assert_peers(expected_peers: I, leap_activity: &LeapActivity) + where + I: IntoIterator, + { + let expected_peers: BTreeSet<_> = expected_peers.into_iter().collect(); + let actual_peers: BTreeSet<_> = leap_activity + .peers() + .iter() + .map(|(node_id, _)| *node_id) + .collect(); + assert_eq!(expected_peers, actual_peers); + } + + #[test] + fn best_response_with_single_peer() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let sync_leap = make_test_sync_leap(&mut rng); + let peer_1 = ( + NodeId::random(&mut rng), + PeerState::Fetched(Box::new(sync_leap.clone())), + ); + + let mut leap_activity = LeapActivity { + sync_leap_identifier, + peers: [peer_1.clone()].iter().cloned().collect(), + leap_start: Instant::now(), + }; + + let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap(); + + assert!(!actual_peers.is_empty()); + assert_eq!(actual_peers.first().unwrap(), &peer_1.0); + assert_eq!(actual_sync_leap, sync_leap); + + // Adding peers in other states does not change the result. + let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent); + let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch); + let peer_rejected = (NodeId::random(&mut rng), PeerState::Rejected); + leap_activity.peers.extend( + [peer_request_sent, peer_couldnt_fetch, peer_rejected] + .iter() + .cloned(), + ); + + let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap(); + + assert_eq!(actual_peers.len(), 1); + assert_eq!(actual_peers.first().unwrap(), &peer_1.0); + assert_eq!(actual_sync_leap, sync_leap); + } + + #[test] + fn best_response_with_multiple_peers() { + let mut rng = TestRng::new(); + + // Create 10 sync leaps, each with a distinct height. The height is not greater than 10. + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + let mut heights: Vec = (0..10).collect(); + heights.shuffle(&mut rng); + let mut peers_with_sync_leaps: HashMap<_, _> = heights + .iter() + .map(|height| { + let block = make_random_block_with_height(&mut rng, *height); + let sync_leap = + make_sync_leap_with_trusted_block_header(block.header().clone().into()); + ( + NodeId::random(&mut rng), + PeerState::Fetched(Box::new(sync_leap)), + ) + }) + .collect(); + + // Add another peer with the best response. + let block = make_random_block_with_height(&mut rng, 500); + let best_sync_leap = + make_sync_leap_with_trusted_block_header(block.header().clone().into()); + let peer_1_best_node_id = NodeId::random(&mut rng); + peers_with_sync_leaps.insert( + peer_1_best_node_id, + PeerState::Fetched(Box::new(best_sync_leap.clone())), + ); + let mut leap_activity = LeapActivity { + sync_leap_identifier, + peers: peers_with_sync_leaps.clone(), + leap_start: Instant::now(), + }; + + let (actual_sync_leap, actual_peers) = leap_activity.best_response().unwrap(); + + // Expect only a single peer with the best sync leap. + assert_eq!(actual_peers.len(), 1); + assert_eq!(actual_peers.first().unwrap(), &peer_1_best_node_id); + assert_eq!(actual_sync_leap, best_sync_leap); + + // Add two more peers with even better response. + let block = make_random_block_with_height(&mut rng, 1000); + let best_sync_leap = + make_sync_leap_with_trusted_block_header(block.header().clone().into()); + let peer_2_best_node_id = NodeId::random(&mut rng); + let peer_3_best_node_id = NodeId::random(&mut rng); + leap_activity.peers.extend( + [ + ( + peer_2_best_node_id, + PeerState::Fetched(Box::new(best_sync_leap.clone())), + ), + ( + peer_3_best_node_id, + PeerState::Fetched(Box::new(best_sync_leap.clone())), + ), + ] + .iter() + .cloned(), + ); + + let (actual_sync_leap, mut actual_peers) = leap_activity.best_response().unwrap(); + + // Expect two recently added peers with best sync leap to be reported. + let mut expected_peers = vec![peer_2_best_node_id, peer_3_best_node_id]; + actual_peers.sort_unstable(); + expected_peers.sort_unstable(); + + assert_eq!(actual_peers.len(), 2); + assert_eq!(actual_peers, expected_peers); + assert_eq!(actual_sync_leap, best_sync_leap); + + // Add two more peers with worse response. + let block = make_random_block_with_height(&mut rng, 1); + let worse_sync_leap = + make_sync_leap_with_trusted_block_header(block.header().clone().into()); + let peer_3_worse_node_id = NodeId::random(&mut rng); + let peer_4_worse_node_id = NodeId::random(&mut rng); + leap_activity.peers.extend( + [ + ( + peer_3_worse_node_id, + PeerState::Fetched(Box::new(worse_sync_leap.clone())), + ), + ( + peer_4_worse_node_id, + PeerState::Fetched(Box::new(worse_sync_leap)), + ), + ] + .iter() + .cloned(), + ); + + let (actual_sync_leap, mut actual_peers) = leap_activity.best_response().unwrap(); + + // Expect two previously added best peers with best sync leap to be reported. + let mut expected_peers = vec![peer_2_best_node_id, peer_3_best_node_id]; + actual_peers.sort_unstable(); + expected_peers.sort_unstable(); + + assert_eq!(actual_peers.len(), 2); + assert_eq!(actual_peers, expected_peers); + assert_eq!(actual_sync_leap, best_sync_leap); + + // Adding peers in other states does not change the result. + let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent); + let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch); + let peer_rejected = (NodeId::random(&mut rng), PeerState::Rejected); + leap_activity.peers.extend( + [peer_request_sent, peer_couldnt_fetch, peer_rejected] + .iter() + .cloned(), + ); + + let (actual_sync_leap, mut actual_peers) = leap_activity.best_response().unwrap(); + let mut expected_peers = vec![peer_2_best_node_id, peer_3_best_node_id]; + actual_peers.sort_unstable(); + expected_peers.sort_unstable(); + assert_eq!(actual_peers.len(), 2); + assert_eq!(actual_peers, expected_peers); + assert_eq!(actual_sync_leap, best_sync_leap); + } + + #[test] + fn best_response_failed() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch); + let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent); + + let mut leap_activity = LeapActivity { + sync_leap_identifier, + peers: [peer_couldnt_fetch, peer_request_sent] + .iter() + .cloned() + .collect(), + leap_start: Instant::now(), + }; + + let best_response_error = leap_activity.best_response().unwrap_err(); + assert!(matches!( + best_response_error, + LeapActivityError::Unobtainable(_, _) + )); + + leap_activity + .peers + .insert(NodeId::random(&mut rng), PeerState::Rejected); + let best_response_error = leap_activity.best_response().unwrap_err(); + assert!(matches!( + best_response_error, + LeapActivityError::TooOld(_, _) + )); + } + + #[test] + fn leap_activity_status_failed() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let leap_activity = LeapActivity { + sync_leap_identifier, + peers: HashMap::new(), + leap_start: Instant::now(), + }; + assert!(matches!( + leap_activity.status(), + LeapState::Failed { error, .. } if matches!(error, LeapActivityError::NoPeers(_)) + )); + + let peer_1 = (NodeId::random(&mut rng), PeerState::CouldntFetch); + let leap_activity = LeapActivity { + sync_leap_identifier, + peers: [peer_1].iter().cloned().collect(), + leap_start: Instant::now(), + }; + assert!(matches!( + leap_activity.status(), + LeapState::Failed { error, .. } if matches!(error, LeapActivityError::Unobtainable(_, _)) + )); + } + + #[test] + fn leap_activity_status_awaiting() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer_1 = (NodeId::random(&mut rng), PeerState::RequestSent); + let peer_2 = (NodeId::random(&mut rng), PeerState::RequestSent); + let mut leap_activity = LeapActivity { + sync_leap_identifier, + peers: [peer_1, peer_2].iter().cloned().collect(), + leap_start: Instant::now(), + }; + assert!(matches!(leap_activity.status(), LeapState::Awaiting { .. })); + + leap_activity + .peers + .insert(NodeId::random(&mut rng), PeerState::CouldntFetch); + assert!(matches!(leap_activity.status(), LeapState::Awaiting { .. })); + } + + #[test] + fn leap_activity_status_received() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let sync_leap = make_test_sync_leap(&mut rng); + let peer_1 = ( + NodeId::random(&mut rng), + PeerState::Fetched(Box::new(sync_leap)), + ); + let mut leap_activity = LeapActivity { + sync_leap_identifier, + peers: [peer_1].iter().cloned().collect(), + leap_start: Instant::now(), + }; + assert!(matches!(leap_activity.status(), LeapState::Received { .. })); + + // Adding peers in other states does not change the result. + let peer_request_sent = (NodeId::random(&mut rng), PeerState::RequestSent); + let peer_couldnt_fetch = (NodeId::random(&mut rng), PeerState::CouldntFetch); + let peer_rejected = (NodeId::random(&mut rng), PeerState::Rejected); + leap_activity.peers.extend( + [peer_request_sent, peer_couldnt_fetch, peer_rejected] + .iter() + .cloned(), + ); + + assert!(matches!(leap_activity.status(), LeapState::Received { .. })); + } + + #[test] + fn register_peer() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer_1 = (NodeId::random(&mut rng), PeerState::RequestSent); + + let mut leap_activity = LeapActivity { + sync_leap_identifier, + peers: [peer_1.clone()].iter().cloned().collect(), + leap_start: Instant::now(), + }; + + // Expect the single peer specified on creation. + assert_peers([peer_1.0], &leap_activity); + + // Registering the same peer the second time does not register. + let maybe_registered_peer = leap_activity.register_peer(peer_1.0); + assert!(maybe_registered_peer.is_none()); + + // Still expect only the single peer. + assert_peers([peer_1.0], &leap_activity); + + // Registering additional peer should succeed. + let peer_2 = NodeId::random(&mut rng); + let maybe_registered_peer = leap_activity.register_peer(peer_2); + assert_eq!(maybe_registered_peer, Some(peer_2)); + + // But registering it for the second time should be a noop. + let maybe_registered_peer = leap_activity.register_peer(peer_2); + assert_eq!(maybe_registered_peer, None); + + // Expect two added peers. + assert_peers([peer_1.0, peer_2], &leap_activity); + } +} diff --git a/node/src/components/sync_leaper/leap_state.rs b/node/src/components/sync_leaper/leap_state.rs new file mode 100644 index 0000000000..d66477d38b --- /dev/null +++ b/node/src/components/sync_leaper/leap_state.rs @@ -0,0 +1,156 @@ +use std::fmt::{Display, Formatter}; + +use datasize::DataSize; + +use crate::types::{NodeId, SyncLeap, SyncLeapIdentifier}; + +use super::LeapActivityError; + +#[derive(Debug, DataSize)] +pub(crate) enum LeapState { + Idle, + Awaiting { + sync_leap_identifier: SyncLeapIdentifier, + in_flight: usize, + }, + Received { + best_available: Box, + from_peers: Vec, + in_flight: usize, + }, + Failed { + sync_leap_identifier: SyncLeapIdentifier, + error: LeapActivityError, + from_peers: Vec, + in_flight: usize, + }, +} + +impl Display for LeapState { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + LeapState::Idle => { + write!(f, "Idle") + } + LeapState::Awaiting { + sync_leap_identifier, + in_flight, + } => { + write!( + f, + "Awaiting {} responses for {}", + in_flight, + sync_leap_identifier.block_hash(), + ) + } + LeapState::Received { + best_available, + from_peers, + in_flight, + } => { + write!( + f, + "Received {} from {} peers, awaiting {} responses", + best_available.highest_block_hash(), + from_peers.len(), + in_flight + ) + } + LeapState::Failed { + sync_leap_identifier, + error, + .. + } => { + write!( + f, + "Failed leap for {} {}", + sync_leap_identifier.block_hash(), + error + ) + } + } + } +} + +impl LeapState { + pub(super) fn in_flight(&self) -> usize { + match self { + LeapState::Idle => 0, + LeapState::Awaiting { in_flight, .. } + | LeapState::Received { in_flight, .. } + | LeapState::Failed { in_flight, .. } => *in_flight, + } + } + + pub(super) fn active(&self) -> bool { + self.in_flight() > 0 + } +} + +#[cfg(test)] +mod tests { + use casper_types::{testing::TestRng, BlockHash}; + + use crate::{ + components::sync_leaper::{tests::make_test_sync_leap, LeapActivityError, LeapState}, + types::SyncLeapIdentifier, + }; + + #[test] + fn leap_state() { + let mut rng = TestRng::new(); + + let leap_state = LeapState::Idle; + assert!(!leap_state.active()); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + let leap_state = LeapState::Awaiting { + sync_leap_identifier, + in_flight: 0, + }; + assert!(!leap_state.active()); + assert_eq!(leap_state.in_flight(), 0); + + let leap_state = LeapState::Awaiting { + sync_leap_identifier, + in_flight: 1, + }; + assert!(leap_state.active()); + assert_eq!(leap_state.in_flight(), 1); + + let leap_state = LeapState::Failed { + sync_leap_identifier, + in_flight: 0, + error: LeapActivityError::NoPeers(sync_leap_identifier), + from_peers: vec![], + }; + assert!(!leap_state.active()); + assert_eq!(leap_state.in_flight(), 0); + + let leap_state = LeapState::Failed { + sync_leap_identifier, + in_flight: 1, + error: LeapActivityError::NoPeers(sync_leap_identifier), + from_peers: vec![], + }; + assert!(leap_state.active()); + assert_eq!(leap_state.in_flight(), 1); + + let sync_leap = make_test_sync_leap(&mut rng); + let leap_state = LeapState::Received { + best_available: Box::new(sync_leap.clone()), + from_peers: vec![], + in_flight: 0, + }; + assert!(!leap_state.active()); + assert_eq!(leap_state.in_flight(), 0); + + let leap_state = LeapState::Received { + best_available: Box::new(sync_leap), + from_peers: vec![], + in_flight: 1, + }; + assert!(leap_state.active()); + assert_eq!(leap_state.in_flight(), 1); + } +} diff --git a/node/src/components/sync_leaper/metrics.rs b/node/src/components/sync_leaper/metrics.rs new file mode 100644 index 0000000000..04443d493a --- /dev/null +++ b/node/src/components/sync_leaper/metrics.rs @@ -0,0 +1,77 @@ +use prometheus::{Histogram, IntCounter, Registry}; + +use crate::{unregister_metric, utils}; + +const SYNC_LEAP_DURATION_NAME: &str = "sync_leap_duration_seconds"; +const SYNC_LEAP_DURATION_HELP: &str = "duration (in sec) to perform a successful sync leap"; + +// We use linear buckets to observe the time it takes to do a sync leap. +// Buckets have 1s widths and cover up to 4s durations with this granularity. +const LINEAR_BUCKET_START: f64 = 1.0; +const LINEAR_BUCKET_WIDTH: f64 = 1.0; +const LINEAR_BUCKET_COUNT: usize = 4; + +/// Metrics for the sync leap component. +#[derive(Debug)] +pub(super) struct Metrics { + /// Time duration to perform a sync leap. + pub(super) sync_leap_duration: Histogram, + /// Number of successful sync leap responses that were received from peers. + pub(super) sync_leap_fetched_from_peer: IntCounter, + /// Number of requests that were rejected by peers. + pub(super) sync_leap_rejected_by_peer: IntCounter, + /// Number of requests that couldn't be fetched from peers. + pub(super) sync_leap_cant_fetch: IntCounter, + + registry: Registry, +} + +impl Metrics { + /// Creates a new instance of the block accumulator metrics, using the given prefix. + pub fn new(registry: &Registry) -> Result { + let buckets = prometheus::linear_buckets( + LINEAR_BUCKET_START, + LINEAR_BUCKET_WIDTH, + LINEAR_BUCKET_COUNT, + )?; + + let sync_leap_fetched_from_peer = IntCounter::new( + "sync_leap_fetched_from_peer_total".to_string(), + "number of successful sync leap responses that were received from peers".to_string(), + )?; + let sync_leap_rejected_by_peer = IntCounter::new( + "sync_leap_rejected_by_peer_total".to_string(), + "number of sync leap requests that were rejected by peers".to_string(), + )?; + let sync_leap_cant_fetch = IntCounter::new( + "sync_leap_cant_fetch_total".to_string(), + "number of sync leap requests that couldn't be fetched from peers".to_string(), + )?; + + registry.register(Box::new(sync_leap_fetched_from_peer.clone()))?; + registry.register(Box::new(sync_leap_rejected_by_peer.clone()))?; + registry.register(Box::new(sync_leap_cant_fetch.clone()))?; + + Ok(Metrics { + sync_leap_duration: utils::register_histogram_metric( + registry, + SYNC_LEAP_DURATION_NAME, + SYNC_LEAP_DURATION_HELP, + buckets, + )?, + sync_leap_fetched_from_peer, + sync_leap_rejected_by_peer, + sync_leap_cant_fetch, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.sync_leap_duration); + unregister_metric!(self.registry, self.sync_leap_cant_fetch); + unregister_metric!(self.registry, self.sync_leap_fetched_from_peer); + unregister_metric!(self.registry, self.sync_leap_rejected_by_peer); + } +} diff --git a/node/src/components/sync_leaper/tests.rs b/node/src/components/sync_leaper/tests.rs new file mode 100644 index 0000000000..25c4a0cfc7 --- /dev/null +++ b/node/src/components/sync_leaper/tests.rs @@ -0,0 +1,375 @@ +use std::{collections::BTreeSet, sync::Arc}; + +use prometheus::Registry; + +use casper_types::{testing::TestRng, BlockHash, Chainspec, TestBlockBuilder}; + +use crate::{ + components::{ + fetcher::{self, FetchResult, FetchedData}, + sync_leaper::{LeapState, PeerState, RegisterLeapAttemptOutcome}, + }, + types::{NodeId, SyncLeap, SyncLeapIdentifier}, +}; + +use super::{Error, SyncLeaper}; + +pub(crate) fn make_test_sync_leap(rng: &mut TestRng) -> SyncLeap { + let block = TestBlockBuilder::new().build_versioned(rng); + SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.clone_header(), + trusted_ancestor_headers: vec![], + block_headers_with_signatures: vec![], + } +} + +fn make_sync_leaper(rng: &mut TestRng) -> SyncLeaper { + let chainspec = Chainspec::random(rng); + let registry = Registry::new(); + SyncLeaper::new(Arc::new(chainspec), ®istry).unwrap() +} + +fn assert_peers(expected: &[NodeId], actual: &Vec<(NodeId, PeerState)>) { + // Assert that all new peers are in `RequestSent` state. + for (_, peer_state) in actual { + assert!(matches!(peer_state, &PeerState::RequestSent)); + } + + // Assert that we have the expected list of peers. + let expected: BTreeSet<_> = expected.iter().collect(); + let actual: BTreeSet<_> = actual.iter().map(|(node_id, _)| node_id).collect(); + assert_eq!(expected, actual); +} + +fn assert_peer(sync_leaper: SyncLeaper, (expected_peer, expected_peer_state): (NodeId, PeerState)) { + let peers = sync_leaper.peers().unwrap(); + let (node_id, actual_peer_state) = peers.first().unwrap(); + assert_eq!(node_id, &expected_peer); + assert_eq!(actual_peer_state, &expected_peer_state); +} + +#[test] +fn new_sync_leaper_has_no_activity() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + + assert!(matches!(sync_leaper.leap_status(), LeapState::Idle)); +} + +#[test] +fn register_leap_attempt_no_peers() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + let peers_to_ask = vec![]; + + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing)); + assert!(sync_leaper.peers().is_none()); +} + +#[test] +fn register_leap_attempt_reattempt_for_different_leap_identifier() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer_1 = NodeId::random(&mut rng); + let peers_to_ask = vec![peer_1]; + + // Start with a single peer. + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone()); + // Expect that we should fetch SyncLeap from that peer. + assert!(matches!( + outcome, + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == peers_to_ask + )); + let expected_peers = vec![peer_1]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); + + // Request another sync leap, but for new sync leap identifier. + let sync_leap_identifier = SyncLeapIdentifier::sync_to_historical(BlockHash::random(&mut rng)); + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + // Expect that we should do nothing as the identifiers mismatch. + assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing)); + let expected_peers = vec![peer_1]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); +} + +#[test] +fn register_leap_attempt_with_reattempt_for_the_same_leap_identifier() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer_1 = NodeId::random(&mut rng); + let peers_to_ask = vec![peer_1]; + + // Start with a single peer. + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone()); + // Expect that we should fetch SyncLeap from that peer. + assert!(matches!( + outcome, + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == peers_to_ask + )); + let expected_peers = vec![peer_1]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); + + // Try to register the same peer. + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + // Expect that we should do nothing as the SyncLeap from this peer has already been requested. + assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing)); + let expected_peers = vec![peer_1]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); + + // Try to register one new peer. + let peer_2 = NodeId::random(&mut rng); + let peers_to_ask = vec![peer_2]; + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone()); + // Expect that we should fetch SyncLeap from the new peer only. + assert!(matches!( + outcome, + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == peers_to_ask + )); + let expected_peers = vec![peer_1, peer_2]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); + + // Try to register two already existing peers. + let mut peers_to_ask = vec![peer_1, peer_2]; + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone()); + // Expect that we should do nothing as the SyncLeap from both these peers has already been + // requested. + assert!(matches!(outcome, RegisterLeapAttemptOutcome::DoNothing)); + let expected_peers = vec![peer_1, peer_2]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); + + // Add two new peers for a total set of four, among which two are already registered. + let peer_3 = NodeId::random(&mut rng); + let peer_4 = NodeId::random(&mut rng); + peers_to_ask.push(peer_3); + peers_to_ask.push(peer_4); + let outcome = sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask.clone()); + // Expect that we should fetch SyncLeap from the two new peers only. + assert!(matches!( + outcome, + RegisterLeapAttemptOutcome::FetchSyncLeapFromPeers(peers) if peers == vec![peer_3, peer_4] + )); + let expected_peers = vec![peer_1, peer_2, peer_3, peer_4]; + let actual_peers = sync_leaper.peers().unwrap(); + assert_peers(&expected_peers, &actual_peers); +} + +#[test] +fn fetch_received_from_storage() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap = make_test_sync_leap(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer_1 = NodeId::random(&mut rng); + let peers_to_ask = vec![peer_1]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let fetch_result: FetchResult = Ok(FetchedData::from_storage(Box::new(sync_leap))); + + let actual = sync_leaper + .fetch_received(sync_leap_identifier, fetch_result) + .unwrap_err(); + assert!(matches!(actual, Error::FetchedSyncLeapFromStorage(_))); +} + +#[test] +fn fetch_received_identifier_mismatch() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap = make_test_sync_leap(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let fetch_result: FetchResult = Ok(FetchedData::from_peer(sync_leap, peer)); + + let different_sync_leap_identifier = + SyncLeapIdentifier::sync_to_historical(BlockHash::random(&mut rng)); + + let actual = sync_leaper + .fetch_received(different_sync_leap_identifier, fetch_result) + .unwrap_err(); + + assert!(matches!(actual, Error::SyncLeapIdentifierMismatch { .. })); +} + +#[test] +fn fetch_received_unexpected_response() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap = make_test_sync_leap(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let fetch_result: FetchResult = Ok(FetchedData::from_peer(sync_leap, peer)); + + let actual = sync_leaper + .fetch_received(sync_leap_identifier, fetch_result) + .unwrap_err(); + assert!(matches!(actual, Error::UnexpectedSyncLeapResponse(_))); + + let peers = sync_leaper.peers(); + assert!(peers.is_none()); +} + +#[test] +fn fetch_received_from_unknown_peer() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap = make_test_sync_leap(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let unknown_peer = NodeId::random(&mut rng); + let fetch_result: FetchResult = Ok(FetchedData::from_peer(sync_leap, unknown_peer)); + + let actual = sync_leaper + .fetch_received(sync_leap_identifier, fetch_result) + .unwrap_err(); + assert!(matches!(actual, Error::ResponseFromUnknownPeer { .. })); + + assert_peer(sync_leaper, (peer, PeerState::RequestSent)); +} + +#[test] +fn fetch_received_correctly() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap = make_test_sync_leap(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let fetch_result: FetchResult = Ok(FetchedData::from_peer(sync_leap.clone(), peer)); + + let actual = sync_leaper.fetch_received(sync_leap_identifier, fetch_result); + assert!(actual.is_ok()); + + assert_peer(sync_leaper, (peer, PeerState::Fetched(Box::new(sync_leap)))); +} + +#[test] +fn fetch_received_peer_rejected() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let fetch_result: FetchResult = Err(fetcher::Error::Rejected { + id: Box::new(sync_leap_identifier), + peer, + }); + + let actual = sync_leaper.fetch_received(sync_leap_identifier, fetch_result); + assert!(actual.is_ok()); + + assert_peer(sync_leaper, (peer, PeerState::Rejected)); +} + +#[test] +fn fetch_received_from_unknown_peer_rejected() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let unknown_peer = NodeId::random(&mut rng); + let fetch_result: FetchResult = Err(fetcher::Error::Rejected { + id: Box::new(sync_leap_identifier), + peer: unknown_peer, + }); + + let actual = sync_leaper + .fetch_received(sync_leap_identifier, fetch_result) + .unwrap_err(); + assert!(matches!(actual, Error::ResponseFromUnknownPeer { .. })); + + assert_peer(sync_leaper, (peer, PeerState::RequestSent)); +} + +#[test] +fn fetch_received_other_error() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let fetch_result: FetchResult = Err(fetcher::Error::TimedOut { + id: Box::new(sync_leap_identifier), + peer, + }); + + let actual = sync_leaper.fetch_received(sync_leap_identifier, fetch_result); + assert!(actual.is_ok()); + + assert_peer(sync_leaper, (peer, PeerState::CouldntFetch)); +} + +#[test] +fn fetch_received_from_unknown_peer_other_error() { + let mut rng = TestRng::new(); + + let mut sync_leaper = make_sync_leaper(&mut rng); + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + + let peer = NodeId::random(&mut rng); + let peers_to_ask = vec![peer]; + sync_leaper.register_leap_attempt(sync_leap_identifier, peers_to_ask); + + let unknown_peer = NodeId::random(&mut rng); + let fetch_result: FetchResult = Err(fetcher::Error::TimedOut { + id: Box::new(sync_leap_identifier), + peer: unknown_peer, + }); + + let actual = sync_leaper + .fetch_received(sync_leap_identifier, fetch_result) + .unwrap_err(); + assert!(matches!(actual, Error::ResponseFromUnknownPeer { .. })); + + assert_peer(sync_leaper, (peer, PeerState::RequestSent)); +} diff --git a/node/src/components/transaction_acceptor.rs b/node/src/components/transaction_acceptor.rs new file mode 100644 index 0000000000..37caf3d38d --- /dev/null +++ b/node/src/components/transaction_acceptor.rs @@ -0,0 +1,1174 @@ +mod config; +mod error; +mod event; +mod metrics; +mod tests; + +use std::{collections::BTreeSet, fmt::Debug, sync::Arc}; + +use casper_types::{ + contracts::ProtocolVersionMajor, ContractRuntimeTag, InvalidTransaction, InvalidTransactionV1, +}; +use datasize::DataSize; +use prometheus::Registry; +use tracing::{debug, error, trace}; + +use casper_storage::data_access_layer::{balance::BalanceHandling, BalanceRequest, ProofHandling}; +use casper_types::{ + account::AccountHash, addressable_entity::AddressableEntity, system::auction::ARG_AMOUNT, + AddressableEntityHash, AddressableEntityIdentifier, BlockHeader, Chainspec, EntityAddr, + EntityKind, EntityVersion, EntityVersionKey, ExecutableDeployItem, + ExecutableDeployItemIdentifier, InitiatorAddr, Package, PackageAddr, PackageHash, + PackageIdentifier, Timestamp, Transaction, TransactionEntryPoint, TransactionInvocationTarget, + TransactionTarget, DEFAULT_ENTRY_POINT_NAME, U512, +}; + +use crate::{ + components::Component, + effect::{ + announcements::{FatalAnnouncement, TransactionAcceptorAnnouncement}, + requests::{ContractRuntimeRequest, StorageRequest}, + EffectBuilder, EffectExt, Effects, Responder, + }, + fatal, + types::MetaTransaction, + utils::Source, + NodeRng, +}; + +pub(crate) use config::Config; +pub(crate) use error::{DeployParameterFailure, Error, ParameterFailure}; +pub(crate) use event::{Event, EventMetadata}; + +const COMPONENT_NAME: &str = "transaction_acceptor"; + +const ARG_TARGET: &str = "target"; + +/// A helper trait constraining `TransactionAcceptor` compatible reactor events. +pub(crate) trait ReactorEventT: + From + + From + + From + + From + + From + + Send +{ +} + +impl ReactorEventT for REv where + REv: From + + From + + From + + From + + From + + Send +{ +} + +/// The `TransactionAcceptor` is the component which handles all new `Transaction`s immediately +/// after they're received by this node, regardless of whether they were provided by a peer or a +/// client, unless they were actively retrieved by this node via a fetch request (in which case the +/// fetcher performs the necessary validation and stores it). +/// +/// It validates a new `Transaction` as far as possible, stores it if valid, then announces the +/// newly-accepted `Transaction`. +#[derive(Debug, DataSize)] +pub struct TransactionAcceptor { + acceptor_config: Config, + chainspec: Arc, + administrators: BTreeSet, + #[data_size(skip)] + metrics: metrics::Metrics, + balance_hold_interval: u64, +} + +impl TransactionAcceptor { + pub(crate) fn new( + acceptor_config: Config, + chainspec: Arc, + registry: &Registry, + ) -> Result { + let administrators = chainspec + .core_config + .administrators + .iter() + .map(|public_key| public_key.to_account_hash()) + .collect(); + let balance_hold_interval = chainspec.core_config.gas_hold_interval.millis(); + Ok(TransactionAcceptor { + acceptor_config, + chainspec, + administrators, + metrics: metrics::Metrics::new(registry)?, + balance_hold_interval, + }) + } + + /// Handles receiving a new `Transaction` from the given source. + fn accept( + &mut self, + effect_builder: EffectBuilder, + input_transaction: Transaction, + source: Source, + maybe_responder: Option>>, + ) -> Effects { + trace!(%source, %input_transaction, "checking transaction before accepting"); + let verification_start_timestamp = Timestamp::now(); + let transaction_config = &self.chainspec.as_ref().transaction_config; + let maybe_meta_transaction = MetaTransaction::from_transaction( + &input_transaction, + self.chainspec.as_ref().core_config.pricing_handling, + transaction_config, + ); + let meta_transaction = match maybe_meta_transaction { + Ok(transaction) => transaction, + Err(err) => { + return self.reject_transaction_direct( + effect_builder, + input_transaction, + source, + maybe_responder, + verification_start_timestamp, + Error::InvalidTransaction(err), + ); + } + }; + + let event_metadata = Box::new(EventMetadata::new( + input_transaction, + meta_transaction.clone(), + source, + maybe_responder, + verification_start_timestamp, + )); + + if meta_transaction.is_install_or_upgrade() + && meta_transaction.is_v2_wasm() + && meta_transaction.seed().is_none() + { + return self.reject_transaction( + effect_builder, + *event_metadata, + Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::MissingSeed, + )), + ); + } + + let is_config_compliant = event_metadata + .meta_transaction + .is_config_compliant( + &self.chainspec, + self.acceptor_config.timestamp_leeway, + verification_start_timestamp, + ) + .map_err(Error::InvalidTransaction); + + if let Err(error) = is_config_compliant { + return self.reject_transaction(effect_builder, *event_metadata, error); + } + + // We only perform expiry checks on transactions received from the client. + let current_node_timestamp = event_metadata.verification_start_timestamp; + if event_metadata.source.is_client() + && event_metadata.transaction.expired(current_node_timestamp) + { + let expiry_timestamp = event_metadata.transaction.expires(); + return self.reject_transaction( + effect_builder, + *event_metadata, + Error::Expired { + expiry_timestamp, + current_node_timestamp, + }, + ); + } + + effect_builder + .get_highest_complete_block_header_from_storage() + .event(move |maybe_block_header| Event::GetBlockHeaderResult { + event_metadata, + maybe_block_header: maybe_block_header.map(Box::new), + }) + } + + fn handle_get_block_header_result( + &mut self, + effect_builder: EffectBuilder, + event_metadata: Box, + maybe_block_header: Option>, + ) -> Effects { + let mut effects = Effects::new(); + + let block_header = match maybe_block_header { + Some(block_header) => block_header, + None => { + // this should be unreachable per current design of the system + if let Some(responder) = event_metadata.maybe_responder { + effects.extend(responder.respond(Err(Error::EmptyBlockchain)).ignore()); + } + return effects; + } + }; + + if event_metadata.source.is_client() { + let account_hash = match event_metadata.transaction.initiator_addr() { + InitiatorAddr::PublicKey(public_key) => public_key.to_account_hash(), + InitiatorAddr::AccountHash(account_hash) => account_hash, + }; + let entity_addr = EntityAddr::Account(account_hash.value()); + effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetAddressableEntityResult { + event_metadata, + maybe_entity: result.into_option(), + block_header, + }) + } else { + self.verify_payment(effect_builder, event_metadata, block_header) + } + } + + fn handle_get_entity_result( + &mut self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + maybe_entity: Option, + ) -> Effects { + match maybe_entity { + None => { + let initiator_addr = event_metadata.transaction.initiator_addr(); + let error = Error::parameter_failure( + &block_header, + ParameterFailure::NoSuchAddressableEntity { initiator_addr }, + ); + self.reject_transaction(effect_builder, *event_metadata, error) + } + Some(entity) => { + if let Err(parameter_failure) = + is_authorized_entity(&entity, &self.administrators, &event_metadata) + { + let error = Error::parameter_failure(&block_header, parameter_failure); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + let protocol_version = block_header.protocol_version(); + let balance_handling = BalanceHandling::Available; + let proof_handling = ProofHandling::NoProofs; + let balance_request = BalanceRequest::from_purse( + *block_header.state_root_hash(), + protocol_version, + entity.main_purse(), + balance_handling, + proof_handling, + ); + effect_builder + .get_balance(balance_request) + .event(move |balance_result| Event::GetBalanceResult { + event_metadata, + block_header, + maybe_balance: balance_result.available_balance().copied(), + }) + } + } + } + + fn handle_get_balance_result( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + maybe_balance: Option, + ) -> Effects { + if !event_metadata.source.is_client() { + // This would only happen due to programmer error and should crash the node. Balance + // checks for transactions received from a peer will cause the network to stall. + return fatal!( + effect_builder, + "Balance checks for transactions received from peers should never occur." + ) + .ignore(); + } + match maybe_balance { + None => { + let initiator_addr = event_metadata.transaction.initiator_addr(); + let error = Error::parameter_failure( + &block_header, + ParameterFailure::UnknownBalance { initiator_addr }, + ); + self.reject_transaction(effect_builder, *event_metadata, error) + } + Some(balance) => { + let has_minimum_balance = + balance >= self.chainspec.core_config.baseline_motes_amount_u512(); + if !has_minimum_balance { + let initiator_addr = event_metadata.transaction.initiator_addr(); + let error = Error::parameter_failure( + &block_header, + ParameterFailure::InsufficientBalance { initiator_addr }, + ); + self.reject_transaction(effect_builder, *event_metadata, error) + } else { + self.verify_payment(effect_builder, event_metadata, block_header) + } + } + } + } + + fn verify_payment( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + ) -> Effects { + // Only deploys need their payment code checked. + let payment_identifier = if let Transaction::Deploy(deploy) = &event_metadata.transaction { + if let Err(error) = deploy_payment_is_valid(deploy.payment(), &block_header) { + return self.reject_transaction(effect_builder, *event_metadata, error); + } + deploy.payment().identifier() + } else { + return self.verify_body(effect_builder, event_metadata, block_header); + }; + + match payment_identifier { + // We skip validation if the identifier is a named key, since that could yield a + // validation success at block X, then a validation failure at block X+1 (e.g. if the + // named key is deleted, or updated to point to an item which will fail subsequent + // validation). + ExecutableDeployItemIdentifier::Module + | ExecutableDeployItemIdentifier::Transfer + | ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Name(_), + ) + | ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name { .. }) + | ExecutableDeployItemIdentifier::Package(PackageIdentifier::NameWithMajorVersion { + .. + }) => self.verify_body(effect_builder, event_metadata, block_header), + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Hash(contract_hash), + ) => { + let entity_addr = EntityAddr::SmartContract(contract_hash.value()); + effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { + event_metadata, + block_header, + is_payment: true, + contract_hash, + maybe_entity: result.into_option(), + }) + } + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Addr(entity_addr), + ) => effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetAddressableEntityResult { + event_metadata, + block_header, + maybe_entity: result.into_option(), + }), + ExecutableDeployItemIdentifier::Package( + ref contract_package_identifier @ PackageIdentifier::Hash { package_hash, .. }, + ) + | ExecutableDeployItemIdentifier::Package( + ref contract_package_identifier @ PackageIdentifier::HashWithMajorVersion { + package_hash, + .. + }, + ) => { + let maybe_entity_version = contract_package_identifier.version(); + let maybe_protocol_version_major = + contract_package_identifier.protocol_version_major(); + effect_builder + .get_package(*block_header.state_root_hash(), package_hash.value()) + .event(move |maybe_package| Event::GetPackageResult { + event_metadata, + block_header, + is_payment: true, + package_hash, + maybe_entity_version, + maybe_protocol_version_major, + maybe_package, + }) + } + } + } + + fn verify_body( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + ) -> Effects { + match &event_metadata.meta_transaction { + MetaTransaction::Deploy(_) => { + self.verify_deploy_session(effect_builder, event_metadata, block_header) + } + MetaTransaction::V1(_) => { + self.verify_transaction_v1_body(effect_builder, event_metadata, block_header) + } + } + } + + fn verify_deploy_session( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + ) -> Effects { + let session = match &event_metadata.meta_transaction { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.session(), + MetaTransaction::V1(txn) => { + error!(%txn, "should only handle deploys in verify_deploy_session"); + return self.reject_transaction( + effect_builder, + *event_metadata, + Error::ExpectedDeploy, + ); + } + }; + + match session { + ExecutableDeployItem::Transfer { args } => { + // We rely on the `Deploy::is_config_compliant` to check + // that the transfer amount arg is present and is a valid U512. + if args.get(ARG_TARGET).is_none() { + let error = Error::parameter_failure( + &block_header, + DeployParameterFailure::MissingTransferTarget.into(), + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + } + ExecutableDeployItem::ModuleBytes { module_bytes, .. } => { + if module_bytes.is_empty() { + let error = Error::parameter_failure( + &block_header, + DeployParameterFailure::MissingModuleBytes.into(), + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + } + ExecutableDeployItem::StoredContractByHash { .. } + | ExecutableDeployItem::StoredContractByName { .. } + | ExecutableDeployItem::StoredVersionedContractByHash { .. } + | ExecutableDeployItem::StoredVersionedContractByName { .. } => (), + } + + match session.identifier() { + // We skip validation if the identifier is a named key, since that could yield a + // validation success at block X, then a validation failure at block X+1 (e.g. if the + // named key is deleted, or updated to point to an item which will fail subsequent + // validation). + ExecutableDeployItemIdentifier::Module + | ExecutableDeployItemIdentifier::Transfer + | ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Name(_), + ) + | ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name { .. }) + | ExecutableDeployItemIdentifier::Package(PackageIdentifier::NameWithMajorVersion { + .. + }) => self.validate_transaction_cryptography(effect_builder, event_metadata), + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Hash(entity_hash), + ) => { + let entity_addr = EntityAddr::SmartContract(entity_hash.value()); + effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { + event_metadata, + block_header, + is_payment: false, + contract_hash: entity_hash, + maybe_entity: result.into_option(), + }) + } + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Addr(entity_addr), + ) => effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetAddressableEntityResult { + event_metadata, + block_header, + maybe_entity: result.into_option(), + }), + ExecutableDeployItemIdentifier::Package( + ref package_identifier @ PackageIdentifier::Hash { package_hash, .. }, + ) + | ExecutableDeployItemIdentifier::Package( + ref package_identifier @ PackageIdentifier::HashWithMajorVersion { + package_hash, .. + }, + ) => { + let maybe_package_version = package_identifier.version(); + effect_builder + .get_package(*block_header.state_root_hash(), package_hash.value()) + .event(move |maybe_package| Event::GetPackageResult { + event_metadata, + block_header, + is_payment: false, + package_hash, + maybe_entity_version: maybe_package_version, + maybe_protocol_version_major: None, + maybe_package, + }) + } + } + } + + fn verify_transaction_v1_body( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + ) -> Effects { + enum NextStep { + GetContract(EntityAddr), + GetPackage( + PackageAddr, + Option, + Option, + ), + CryptoValidation, + } + + let next_step = match &event_metadata.meta_transaction { + MetaTransaction::Deploy(meta_deploy) => { + let deploy_hash = meta_deploy.deploy().hash(); + error!( + %deploy_hash, + "should only handle version 1 transactions in verify_transaction_v1_body" + ); + return self.reject_transaction( + effect_builder, + *event_metadata, + Error::ExpectedTransactionV1, + ); + } + MetaTransaction::V1(txn) => match txn.target() { + TransactionTarget::Stored { id, .. } => match id { + TransactionInvocationTarget::ByHash(entity_addr) => { + NextStep::GetContract(EntityAddr::SmartContract(*entity_addr)) + } + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => NextStep::GetPackage(*addr, *version, *protocol_version_major), + TransactionInvocationTarget::ByName(_) + | TransactionInvocationTarget::ByPackageName { .. } => { + NextStep::CryptoValidation + } + }, + TransactionTarget::Native | TransactionTarget::Session { .. } => { + NextStep::CryptoValidation + } + }, + }; + + match next_step { + NextStep::GetContract(entity_addr) => { + // Use `Key::Hash` variant so that we try to retrieve the entity as either an + // AddressableEntity, or fall back to retrieving an un-migrated Contract. + effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { + event_metadata, + block_header, + is_payment: false, + contract_hash: AddressableEntityHash::new(entity_addr.value()), + maybe_entity: result.into_option(), + }) + } + NextStep::GetPackage( + package_addr, + maybe_entity_version, + maybe_protocol_version_major, + ) => effect_builder + .get_package(*block_header.state_root_hash(), package_addr) + .event(move |maybe_package| Event::GetPackageResult { + event_metadata, + block_header, + is_payment: false, + package_hash: PackageHash::new(package_addr), + maybe_entity_version, + maybe_protocol_version_major, + maybe_package, + }), + NextStep::CryptoValidation => { + self.validate_transaction_cryptography(effect_builder, event_metadata) + } + } + } + + fn handle_get_contract_result( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + is_payment: bool, + contract_hash: AddressableEntityHash, + maybe_contract: Option, + ) -> Effects { + let addressable_entity = match maybe_contract { + Some(addressable_entity) => addressable_entity, + None => { + let error = Error::parameter_failure( + &block_header, + ParameterFailure::NoSuchContractAtHash { contract_hash }, + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + }; + + let maybe_entry_point_name = match &event_metadata.meta_transaction { + MetaTransaction::Deploy(meta_deploy) if is_payment => Some( + meta_deploy + .deploy() + .payment() + .entry_point_name() + .to_string(), + ), + MetaTransaction::Deploy(meta_deploy) => Some( + meta_deploy + .deploy() + .session() + .entry_point_name() + .to_string(), + ), + MetaTransaction::V1(_) if is_payment => { + error!("should not fetch a contract to validate payment logic for transaction v1s"); + None + } + MetaTransaction::V1(txn) => match txn.entry_point() { + TransactionEntryPoint::Call => Some(DEFAULT_ENTRY_POINT_NAME.to_owned()), + TransactionEntryPoint::Custom(name) => Some(name.clone()), + TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => None, + }, + }; + + match maybe_entry_point_name { + Some(entry_point_name) => effect_builder + .does_entry_point_exist( + *block_header.state_root_hash(), + contract_hash.value(), + entry_point_name.clone(), + ) + .event(move |entry_point_result| Event::GetEntryPointResult { + event_metadata, + block_header, + is_payment, + entry_point_name, + addressable_entity, + entry_point_exists: entry_point_result.is_success(), + }), + + None => { + if is_payment { + return self.verify_body(effect_builder, event_metadata, block_header); + } + self.validate_transaction_cryptography(effect_builder, event_metadata) + } + } + } + + #[allow(clippy::too_many_arguments)] + fn handle_get_entry_point_result( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + is_payment: bool, + entry_point_name: String, + addressable_entity: AddressableEntity, + entry_point_exist: bool, + ) -> Effects { + match addressable_entity.kind() { + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) + | EntityKind::Account(_) + | EntityKind::System(_) => { + if !entry_point_exist { + let error = Error::parameter_failure( + &block_header, + ParameterFailure::NoSuchEntryPoint { entry_point_name }, + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + if is_payment { + return self.verify_body(effect_builder, event_metadata, block_header); + } + self.validate_transaction_cryptography(effect_builder, event_metadata) + } + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV2) => { + // Engine V2 does not store entrypoint information on chain and relies entirely on + // the Wasm itself. + self.validate_transaction_cryptography(effect_builder, event_metadata) + } + } + } + + #[allow(clippy::too_many_arguments)] + fn handle_get_package_result( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + block_header: Box, + is_payment: bool, + package_hash: PackageHash, + maybe_contract_version: Option, + maybe_protocol_version_major: Option, + maybe_package: Option>, + ) -> Effects { + let package = match maybe_package { + Some(package) => package, + None => { + let error = Error::parameter_failure( + &block_header, + ParameterFailure::NoSuchPackageAtHash { package_hash }, + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + }; + + let maybe_entity_version_key = match self.resolve_entity_version_key( + package.as_ref(), + maybe_contract_version, + maybe_protocol_version_major, + &block_header, + ) { + Ok(maybe) => maybe, + Err(err) => return self.reject_transaction(effect_builder, *event_metadata, *err), + }; + let entity_version_key = match maybe_entity_version_key { + Some(version) => version, + None => { + // We continue to the next step in None case due to the subjective + // nature of global state. + if is_payment { + return self.verify_body(effect_builder, event_metadata, block_header); + } + return self.validate_transaction_cryptography(effect_builder, event_metadata); + } + }; + + if package.is_version_missing(entity_version_key) { + let error = Error::parameter_failure( + &block_header, + ParameterFailure::MissingEntityAtVersion { entity_version_key }, + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + + if !package.is_version_enabled(entity_version_key) { + let error = Error::parameter_failure( + &block_header, + ParameterFailure::DisabledEntityAtVersion { entity_version_key }, + ); + return self.reject_transaction(effect_builder, *event_metadata, error); + } + + match package.lookup_entity_hash(entity_version_key) { + Some(&entity_addr) => { + let contract_hash = AddressableEntityHash::new(entity_addr.value()); + effect_builder + .get_addressable_entity(*block_header.state_root_hash(), entity_addr) + .event(move |result| Event::GetContractResult { + event_metadata, + block_header, + is_payment, + contract_hash, + maybe_entity: result.into_option(), + }) + } + None => { + let error = Error::parameter_failure( + &block_header, + ParameterFailure::InvalidEntityAtVersion { entity_version_key }, + ); + self.reject_transaction(effect_builder, *event_metadata, error) + } + } + } + + /// Resolves EntityVersionKey for a given contract. Returning Some(k) means that k is an enabled + /// version matching the criteria. Returning None doesn't mean there is no fit - it means + /// that we can't for sure determine the version key since the state at execution might be + /// different - we must assume that a valid EntityVersionKey might be present for the package or + /// error out during execution + fn resolve_entity_version_key( + &self, + package: &Package, + maybe_entity_version: Option, + maybe_protocol_version_major: Option, + block_header: &BlockHeader, + ) -> Result, Box> { + let entity_version_key = match (maybe_entity_version, maybe_protocol_version_major) { + (Some(entity_version), Some(major)) => EntityVersionKey::new(major, entity_version), + (Some(_), None) | (None, Some(_)) | (None, None) => return Ok(None), /* In this case + * the runtime + * needs to do + * the + * determination, at this point we can't be sure which versions will be available on execution */ + }; + + if package.is_version_missing(entity_version_key) { + return Err(Box::new(Error::parameter_failure( + block_header, + ParameterFailure::MissingEntityAtVersion { entity_version_key }, + ))); + } + + if !package.is_version_enabled(entity_version_key) { + return Err(Box::new(Error::parameter_failure( + block_header, + ParameterFailure::DisabledEntityAtVersion { entity_version_key }, + ))); + } + Ok(Some(entity_version_key)) + } + + fn validate_transaction_cryptography( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + ) -> Effects { + let is_valid = match &event_metadata.meta_transaction { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .is_valid() + .map_err(|err| Error::InvalidTransaction(err.into())), + MetaTransaction::V1(txn) => txn + .verify() + .map_err(|err| Error::InvalidTransaction(err.into())), + }; + if let Err(error) = is_valid { + return self.reject_transaction(effect_builder, *event_metadata, error); + } + + // If this has been received from the speculative exec server, we just want to call the + // responder and finish. Otherwise store the transaction and announce it if required. + if let Source::SpeculativeExec = event_metadata.source { + if let Some(responder) = event_metadata.maybe_responder { + return responder.respond(Ok(())).ignore(); + } + error!("speculative exec source should always have a responder"); + return Effects::new(); + } + + effect_builder + .put_transaction_to_storage(event_metadata.transaction.clone()) + .event(move |is_new| Event::PutToStorageResult { + event_metadata, + is_new, + }) + } + + fn reject_transaction( + &self, + effect_builder: EffectBuilder, + event_metadata: EventMetadata, + error: Error, + ) -> Effects { + let EventMetadata { + meta_transaction: _, + transaction, + source, + maybe_responder, + verification_start_timestamp, + } = event_metadata; + self.reject_transaction_direct( + effect_builder, + transaction, + source, + maybe_responder, + verification_start_timestamp, + error, + ) + } + + fn reject_transaction_direct( + &self, + effect_builder: EffectBuilder, + transaction: Transaction, + source: Source, + maybe_responder: Option>>, + verification_start_timestamp: Timestamp, + error: Error, + ) -> Effects { + trace!(%error, transaction = %transaction, "rejected transaction"); + self.metrics.observe_rejected(verification_start_timestamp); + let mut effects = Effects::new(); + if let Some(responder) = maybe_responder { + // The client has submitted an invalid transaction + // Return an error to the RPC component via the responder. + effects.extend(responder.respond(Err(error)).ignore()); + } + + effects.extend( + effect_builder + .announce_invalid_transaction(transaction, source) + .ignore(), + ); + effects + } + + fn handle_put_to_storage( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + is_new: bool, + ) -> Effects { + let mut effects = Effects::new(); + if is_new { + debug!(transaction = %event_metadata.transaction, "accepted transaction"); + effects.extend( + effect_builder + .announce_new_transaction_accepted( + Arc::new(event_metadata.transaction), + event_metadata.source, + ) + .ignore(), + ); + } else if matches!(event_metadata.source, Source::Peer(_)) { + // If `is_new` is `false`, the transaction was previously stored. If the source is + // `Peer`, we got here as a result of a `Fetch` or `Fetch`, and + // the incoming transaction could have a different set of approvals to the one already + // stored. We can treat the incoming approvals as finalized and now try and store them. + // If storing them returns `true`, (indicating the approvals are different to any + // previously stored) we can announce a new transaction accepted, causing the fetcher + // to be notified. + return effect_builder + .store_finalized_approvals( + event_metadata.transaction.hash(), + event_metadata.transaction.approvals(), + ) + .event(move |is_new| Event::StoredFinalizedApprovals { + event_metadata, + is_new, + }); + } + self.metrics + .observe_accepted(event_metadata.verification_start_timestamp); + + if let Some(responder) = event_metadata.maybe_responder { + effects.extend(responder.respond(Ok(())).ignore()); + } + effects + } + + fn handle_stored_finalized_approvals( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + is_new: bool, + ) -> Effects { + let EventMetadata { + meta_transaction: _, + transaction, + source, + maybe_responder, + verification_start_timestamp, + } = *event_metadata; + debug!(%transaction, "accepted transaction"); + self.metrics.observe_accepted(verification_start_timestamp); + let mut effects = Effects::new(); + if is_new { + effects.extend( + effect_builder + .announce_new_transaction_accepted(Arc::new(transaction), source) + .ignore(), + ); + } + + if let Some(responder) = maybe_responder { + effects.extend(responder.respond(Ok(())).ignore()); + } + effects + } +} + +impl Component for TransactionAcceptor { + type Event = Event; + + fn name(&self) -> &str { + COMPONENT_NAME + } + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + trace!(?event, "TransactionAcceptor: handling event"); + match event { + Event::Accept { + transaction, + source, + maybe_responder: responder, + } => self.accept(effect_builder, transaction, source, responder), + Event::GetBlockHeaderResult { + event_metadata, + maybe_block_header, + } => self.handle_get_block_header_result( + effect_builder, + event_metadata, + maybe_block_header, + ), + Event::GetAddressableEntityResult { + event_metadata, + block_header, + maybe_entity, + } => self.handle_get_entity_result( + effect_builder, + event_metadata, + block_header, + maybe_entity, + ), + Event::GetBalanceResult { + event_metadata, + block_header, + maybe_balance, + } => self.handle_get_balance_result( + effect_builder, + event_metadata, + block_header, + maybe_balance, + ), + Event::GetContractResult { + event_metadata, + block_header, + is_payment, + contract_hash, + maybe_entity, + } => self.handle_get_contract_result( + effect_builder, + event_metadata, + block_header, + is_payment, + contract_hash, + maybe_entity, + ), + Event::GetPackageResult { + event_metadata, + block_header, + is_payment, + package_hash, + maybe_entity_version, + maybe_protocol_version_major, + maybe_package, + } => self.handle_get_package_result( + effect_builder, + event_metadata, + block_header, + is_payment, + package_hash, + maybe_entity_version, + maybe_protocol_version_major, + maybe_package, + ), + Event::GetEntryPointResult { + event_metadata, + block_header, + is_payment, + entry_point_name, + addressable_entity, + entry_point_exists, + } => self.handle_get_entry_point_result( + effect_builder, + event_metadata, + block_header, + is_payment, + entry_point_name, + addressable_entity, + entry_point_exists, + ), + Event::PutToStorageResult { + event_metadata, + is_new, + } => self.handle_put_to_storage(effect_builder, event_metadata, is_new), + Event::StoredFinalizedApprovals { + event_metadata, + is_new, + } => self.handle_stored_finalized_approvals(effect_builder, event_metadata, is_new), + } + } +} + +// `allow` can be removed once https://github.com/casper-network/casper-node/issues/3063 is fixed. +#[allow(clippy::result_large_err)] +fn is_authorized_entity( + addressable_entity: &AddressableEntity, + administrators: &BTreeSet, + event_metadata: &EventMetadata, +) -> Result<(), ParameterFailure> { + let authorization_keys = event_metadata.transaction.signers(); + + if administrators + .intersection(&authorization_keys) + .next() + .is_some() + { + return Ok(()); + } + + if !addressable_entity.can_authorize(&authorization_keys) { + return Err(ParameterFailure::InvalidAssociatedKeys); + } + + if !addressable_entity.can_deploy_with(&authorization_keys) { + return Err(ParameterFailure::InsufficientSignatureWeight); + } + + Ok(()) +} + +// `allow` can be removed once https://github.com/casper-network/casper-node/issues/3063 is fixed. +#[allow(clippy::result_large_err)] +fn deploy_payment_is_valid( + payment: &ExecutableDeployItem, + block_header: &BlockHeader, +) -> Result<(), Error> { + match payment { + ExecutableDeployItem::Transfer { .. } => { + return Err(Error::parameter_failure( + block_header, + DeployParameterFailure::InvalidPaymentVariant.into(), + )); + } + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + // module bytes being empty implies the payment executable is standard payment. + if module_bytes.is_empty() { + if let Some(value) = args.get(ARG_AMOUNT) { + if value.to_t::().is_err() { + return Err(Error::parameter_failure( + block_header, + DeployParameterFailure::FailedToParsePaymentAmount.into(), + )); + } + } else { + return Err(Error::parameter_failure( + block_header, + DeployParameterFailure::MissingPaymentAmount.into(), + )); + } + } + } + ExecutableDeployItem::StoredContractByHash { .. } + | ExecutableDeployItem::StoredContractByName { .. } + | ExecutableDeployItem::StoredVersionedContractByHash { .. } + | ExecutableDeployItem::StoredVersionedContractByName { .. } => (), + } + Ok(()) +} diff --git a/node/src/components/transaction_acceptor/config.rs b/node/src/components/transaction_acceptor/config.rs new file mode 100644 index 0000000000..3d07446a94 --- /dev/null +++ b/node/src/components/transaction_acceptor/config.rs @@ -0,0 +1,29 @@ +use std::str::FromStr; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::TimeDiff; + +const DEFAULT_TIMESTAMP_LEEWAY: &str = "2sec"; + +/// Configuration options for accepting transactions. +#[derive(Copy, Clone, Serialize, Deserialize, Debug, DataSize)] +pub struct Config { + /// The leeway allowed when considering whether a transaction is future-dated or not. + /// + /// To accommodate minor clock drift, transactions whose timestamps are within + /// `timestamp_leeway` in the future are still acceptable. + /// + /// The maximum value to which `timestamp_leeway` can be set is defined by the chainspec + /// setting `transactions.max_timestamp_leeway`. + pub timestamp_leeway: TimeDiff, +} + +impl Default for Config { + fn default() -> Self { + Config { + timestamp_leeway: TimeDiff::from_str(DEFAULT_TIMESTAMP_LEEWAY).unwrap(), + } + } +} diff --git a/node/src/components/transaction_acceptor/error.rs b/node/src/components/transaction_acceptor/error.rs new file mode 100644 index 0000000000..1ce7409fcd --- /dev/null +++ b/node/src/components/transaction_acceptor/error.rs @@ -0,0 +1,197 @@ +use datasize::DataSize; +use serde::Serialize; +use thiserror::Error; + +use casper_binary_port::ErrorCode as BinaryPortErrorCode; +use casper_types::{ + AddressableEntityHash, BlockHash, BlockHeader, Digest, EntityVersionKey, InitiatorAddr, + InvalidTransaction, PackageHash, Timestamp, +}; + +// `allow` can be removed once https://github.com/casper-network/casper-node/issues/3063 is fixed. +#[allow(clippy::large_enum_variant)] +#[derive(Debug, Error, Serialize)] +pub(crate) enum Error { + /// The block chain has no blocks. + #[error("block chain has no blocks")] + EmptyBlockchain, + + /// The deploy has an invalid transaction. + #[error("invalid transaction: {0}")] + InvalidTransaction(#[from] InvalidTransaction), + + /// The transaction is invalid due to missing or otherwise invalid parameters. + #[error( + "{failure} at state root hash {:?} of block {:?} at height {block_height}", + state_root_hash, + block_hash.inner(), + )] + Parameters { + state_root_hash: Digest, + block_hash: BlockHash, + block_height: u64, + failure: ParameterFailure, + }, + + /// The transaction received by the node from the client has expired. + #[error( + "transaction received by the node expired at {expiry_timestamp} with node's time at \ + {current_node_timestamp}" + )] + Expired { + /// The timestamp when the transaction expired. + expiry_timestamp: Timestamp, + /// The timestamp when the node validated the expiry timestamp. + current_node_timestamp: Timestamp, + }, + + /// Component state error: expected a deploy. + #[error("internal error: expected a deploy")] + ExpectedDeploy, + + /// Component state error: expected a version 1 transaction. + #[error("internal error: expected a transaction")] + ExpectedTransactionV1, +} + +impl Error { + pub(super) fn parameter_failure(block_header: &BlockHeader, failure: ParameterFailure) -> Self { + Error::Parameters { + state_root_hash: *block_header.state_root_hash(), + block_hash: block_header.block_hash(), + block_height: block_header.height(), + failure, + } + } +} + +impl From for BinaryPortErrorCode { + fn from(err: Error) -> Self { + match err { + Error::EmptyBlockchain => BinaryPortErrorCode::EmptyBlockchain, + Error::ExpectedDeploy => BinaryPortErrorCode::ExpectedDeploy, + Error::ExpectedTransactionV1 => BinaryPortErrorCode::ExpectedTransaction, + Error::Expired { .. } => BinaryPortErrorCode::TransactionExpired, + Error::Parameters { failure, .. } => match failure { + ParameterFailure::NoSuchAddressableEntity { .. } => { + BinaryPortErrorCode::NoSuchAddressableEntity + } + ParameterFailure::NoSuchContractAtHash { .. } => { + BinaryPortErrorCode::NoSuchContractAtHash + } + ParameterFailure::NoSuchEntryPoint { .. } => BinaryPortErrorCode::NoSuchEntryPoint, + ParameterFailure::NoSuchPackageAtHash { .. } => { + BinaryPortErrorCode::NoSuchPackageAtHash + } + ParameterFailure::InvalidEntityAtVersion { .. } => { + BinaryPortErrorCode::InvalidEntityAtVersion + } + ParameterFailure::DisabledEntityAtVersion { .. } => { + BinaryPortErrorCode::DisabledEntityAtVersion + } + ParameterFailure::MissingEntityAtVersion { .. } => { + BinaryPortErrorCode::MissingEntityAtVersion + } + ParameterFailure::InvalidAssociatedKeys => { + BinaryPortErrorCode::InvalidAssociatedKeys + } + ParameterFailure::InsufficientSignatureWeight => { + BinaryPortErrorCode::InsufficientSignatureWeight + } + ParameterFailure::InsufficientBalance { .. } => { + BinaryPortErrorCode::InsufficientBalance + } + ParameterFailure::UnknownBalance { .. } => BinaryPortErrorCode::UnknownBalance, + ParameterFailure::Deploy(deploy_failure) => match deploy_failure { + DeployParameterFailure::InvalidPaymentVariant => { + BinaryPortErrorCode::DeployInvalidPaymentVariant + } + DeployParameterFailure::MissingPaymentAmount => { + BinaryPortErrorCode::DeployMissingPaymentAmount + } + DeployParameterFailure::FailedToParsePaymentAmount => { + BinaryPortErrorCode::DeployFailedToParsePaymentAmount + } + DeployParameterFailure::MissingTransferTarget => { + BinaryPortErrorCode::DeployMissingTransferTarget + } + DeployParameterFailure::MissingModuleBytes => { + BinaryPortErrorCode::DeployMissingModuleBytes + } + }, + }, + Error::InvalidTransaction(invalid_transaction) => { + BinaryPortErrorCode::from(invalid_transaction) + } + } + } +} + +/// A representation of the way in which a transaction failed parameter checks. +#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Error, Serialize)] +pub(crate) enum ParameterFailure { + /// No such addressable entity. + #[error("addressable entity under {initiator_addr} does not exist")] + NoSuchAddressableEntity { initiator_addr: InitiatorAddr }, + /// No such contract at given hash. + #[error("contract at {contract_hash} does not exist")] + NoSuchContractAtHash { + contract_hash: AddressableEntityHash, + }, + /// No such contract entrypoint. + #[error("contract does not have entry point '{entry_point_name}'")] + NoSuchEntryPoint { entry_point_name: String }, + /// No such package. + #[error("package at {package_hash} does not exist")] + NoSuchPackageAtHash { package_hash: PackageHash }, + /// Invalid contract at given version. + #[error("invalid entity at version key: {entity_version_key}")] + InvalidEntityAtVersion { + entity_version_key: EntityVersionKey, + }, + /// Invalid contract at given version. + #[error("disabled entity at version key: {entity_version_key}")] + DisabledEntityAtVersion { + entity_version_key: EntityVersionKey, + }, + /// Invalid contract at given version. + #[error("missing entity at version key: {entity_version_key}")] + MissingEntityAtVersion { + entity_version_key: EntityVersionKey, + }, + /// Invalid associated keys. + #[error("account authorization invalid")] + InvalidAssociatedKeys, + /// Insufficient transaction signature weight. + #[error("insufficient transaction signature weight")] + InsufficientSignatureWeight, + /// The transaction's addressable entity has insufficient balance. + #[error("insufficient balance in {initiator_addr}")] + InsufficientBalance { initiator_addr: InitiatorAddr }, + /// The balance of the transaction's addressable entity cannot be read. + #[error("unable to determine balance for {initiator_addr}")] + UnknownBalance { initiator_addr: InitiatorAddr }, + /// Error specific to `Deploy` parameters. + #[error(transparent)] + Deploy(#[from] DeployParameterFailure), +} + +/// A representation of the way in which a deploy failed validation checks. +#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Error, Serialize)] +pub(crate) enum DeployParameterFailure { + /// Transfer is not valid for payment code. + #[error("transfer is not valid for payment code")] + InvalidPaymentVariant, + /// Missing payment "amount" runtime argument. + #[error("missing payment 'amount' runtime argument")] + MissingPaymentAmount, + /// Failed to parse payment "amount" runtime argument. + #[error("failed to parse payment 'amount' runtime argument as U512")] + FailedToParsePaymentAmount, + /// Missing transfer "target" runtime argument. + #[error("missing transfer 'target' runtime argument")] + MissingTransferTarget, + /// Module bytes for session code cannot be empty. + #[error("module bytes for session code cannot be empty")] + MissingModuleBytes, +} diff --git a/node/src/components/transaction_acceptor/event.rs b/node/src/components/transaction_acceptor/event.rs new file mode 100644 index 0000000000..d03c949e3a --- /dev/null +++ b/node/src/components/transaction_acceptor/event.rs @@ -0,0 +1,217 @@ +use std::fmt::{self, Display, Formatter}; + +use serde::Serialize; + +use casper_types::{ + contracts::ProtocolVersionMajor, AddressableEntity, AddressableEntityHash, BlockHeader, + EntityVersion, Package, PackageHash, Timestamp, Transaction, U512, +}; + +use super::{Error, Source}; +use crate::{effect::Responder, types::MetaTransaction}; + +/// A utility struct to hold duplicated information across events. +#[derive(Debug, Serialize)] +pub(crate) struct EventMetadata { + pub(crate) transaction: Transaction, + pub(crate) meta_transaction: MetaTransaction, + pub(crate) source: Source, + pub(crate) maybe_responder: Option>>, + pub(crate) verification_start_timestamp: Timestamp, +} + +impl EventMetadata { + pub(crate) fn new( + transaction: Transaction, + meta_transaction: MetaTransaction, + source: Source, + maybe_responder: Option>>, + verification_start_timestamp: Timestamp, + ) -> Self { + EventMetadata { + transaction, + meta_transaction, + source, + maybe_responder, + verification_start_timestamp, + } + } +} + +/// `TransactionAcceptor` events. +#[derive(Debug, Serialize)] +pub(crate) enum Event { + /// The initiating event to accept a new `Transaction`. + Accept { + transaction: Transaction, + source: Source, + maybe_responder: Option>>, + }, + /// The result of the `TransactionAcceptor` putting a `Transaction` to the storage + /// component. + PutToStorageResult { + event_metadata: Box, + is_new: bool, + }, + /// The result of the `TransactionAcceptor` storing the approvals from a `Transaction` + /// provided by a peer. + StoredFinalizedApprovals { + event_metadata: Box, + is_new: bool, + }, + /// The result of querying the highest available `BlockHeader` from the storage component. + GetBlockHeaderResult { + event_metadata: Box, + maybe_block_header: Option>, + }, + /// The result of querying global state for the `AddressableEntity` associated with the + /// `Transaction`'s execution context (previously known as the account). + GetAddressableEntityResult { + event_metadata: Box, + block_header: Box, + maybe_entity: Option, + }, + /// The result of querying the balance of the `AddressableEntity` associated with the + /// `Transaction`. + GetBalanceResult { + event_metadata: Box, + block_header: Box, + maybe_balance: Option, + }, + /// The result of querying global state for a `Contract` to verify the executable logic. + GetContractResult { + event_metadata: Box, + block_header: Box, + is_payment: bool, + contract_hash: AddressableEntityHash, + maybe_entity: Option, + }, + /// The result of querying global state for a `Package` to verify the executable logic. + GetPackageResult { + event_metadata: Box, + block_header: Box, + is_payment: bool, + package_hash: PackageHash, + maybe_entity_version: Option, + maybe_protocol_version_major: Option, + maybe_package: Option>, + }, + /// The result of querying global state for an `EntryPoint` to verify the executable logic. + GetEntryPointResult { + event_metadata: Box, + block_header: Box, + is_payment: bool, + entry_point_name: String, + addressable_entity: AddressableEntity, + entry_point_exists: bool, + }, +} + +impl Display for Event { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Accept { + transaction, + source, + .. + } => { + write!(formatter, "accept {} from {}", transaction.hash(), source) + } + Event::PutToStorageResult { + event_metadata, + is_new, + .. + } => { + if *is_new { + write!( + formatter, + "put new {} to storage", + event_metadata.transaction.hash() + ) + } else { + write!( + formatter, + "had already stored {}", + event_metadata.transaction.hash() + ) + } + } + Event::StoredFinalizedApprovals { + event_metadata, + is_new, + .. + } => { + if *is_new { + write!( + formatter, + "put new finalized approvals {} to storage", + event_metadata.transaction.hash() + ) + } else { + write!( + formatter, + "had already stored finalized approvals for {}", + event_metadata.transaction.hash() + ) + } + } + Event::GetBlockHeaderResult { event_metadata, .. } => { + write!( + formatter, + "received highest block from storage to validate transaction with hash {}", + event_metadata.transaction.hash() + ) + } + Event::GetAddressableEntityResult { event_metadata, .. } => { + write!( + formatter, + "verifying addressable entity to validate transaction with hash {}", + event_metadata.transaction.hash() + ) + } + Event::GetBalanceResult { event_metadata, .. } => { + write!( + formatter, + "verifying account balance to validate transaction with hash {}", + event_metadata.transaction.hash() + ) + } + Event::GetContractResult { + event_metadata, + block_header, + .. + } => { + write!( + formatter, + "verifying contract to validate transaction with hash {} with state hash {}", + event_metadata.transaction.hash(), + block_header.state_root_hash() + ) + } + Event::GetPackageResult { + event_metadata, + block_header, + .. + } => { + write!( + formatter, + "verifying package to validate transaction with hash {} with state hash {}", + event_metadata.transaction.hash(), + block_header.state_root_hash() + ) + } + Event::GetEntryPointResult { + event_metadata, + block_header, + .. + } => { + write!( + formatter, + "verifying entry point to validate transaction with hash {} with state hash {}", + event_metadata.transaction.hash(), + block_header.state_root_hash(), + ) + } + } + } +} diff --git a/node/src/components/transaction_acceptor/metrics.rs b/node/src/components/transaction_acceptor/metrics.rs new file mode 100644 index 0000000000..9f21f905cd --- /dev/null +++ b/node/src/components/transaction_acceptor/metrics.rs @@ -0,0 +1,71 @@ +use prometheus::{Histogram, Registry}; + +use casper_types::Timestamp; + +use crate::{unregister_metric, utils}; + +const TRANSACTION_ACCEPTED_NAME: &str = "transaction_acceptor_accepted_transaction"; +const TRANSACTION_ACCEPTED_HELP: &str = + "time in seconds to accept a transaction in the transaction acceptor"; +const TRANSACTION_REJECTED_NAME: &str = "transaction_acceptor_rejected_transaction"; +const TRANSACTION_REJECTED_HELP: &str = + "time in seconds to reject a transaction in the transaction acceptor"; + +/// Value of upper bound of the first bucket. In ms. +const EXPONENTIAL_BUCKET_START_MS: f64 = 10.0; + +/// Multiplier of previous upper bound for next bound. +const EXPONENTIAL_BUCKET_FACTOR: f64 = 2.0; + +/// Bucket count, with the last bucket going to +Inf which will not be included in the results. +const EXPONENTIAL_BUCKET_COUNT: usize = 10; + +#[derive(Debug)] +pub(super) struct Metrics { + transaction_accepted: Histogram, + transaction_rejected: Histogram, + registry: Registry, +} + +impl Metrics { + pub(super) fn new(registry: &Registry) -> Result { + let common_buckets = prometheus::exponential_buckets( + EXPONENTIAL_BUCKET_START_MS, + EXPONENTIAL_BUCKET_FACTOR, + EXPONENTIAL_BUCKET_COUNT, + )?; + + Ok(Self { + transaction_accepted: utils::register_histogram_metric( + registry, + TRANSACTION_ACCEPTED_NAME, + TRANSACTION_ACCEPTED_HELP, + common_buckets.clone(), + )?, + transaction_rejected: utils::register_histogram_metric( + registry, + TRANSACTION_REJECTED_NAME, + TRANSACTION_REJECTED_HELP, + common_buckets, + )?, + registry: registry.clone(), + }) + } + + pub(super) fn observe_rejected(&self, start: Timestamp) { + self.transaction_rejected + .observe(start.elapsed().millis() as f64); + } + + pub(super) fn observe_accepted(&self, start: Timestamp) { + self.transaction_accepted + .observe(start.elapsed().millis() as f64); + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.transaction_accepted); + unregister_metric!(self.registry, self.transaction_rejected); + } +} diff --git a/node/src/components/transaction_acceptor/tests.rs b/node/src/components/transaction_acceptor/tests.rs new file mode 100644 index 0000000000..ee77a15fd6 --- /dev/null +++ b/node/src/components/transaction_acceptor/tests.rs @@ -0,0 +1,3067 @@ +#![cfg(test)] + +use std::{ + collections::{BTreeMap, VecDeque}, + fmt::{self, Debug, Display, Formatter}, + iter, + sync::Arc, + time::Duration, +}; + +use derive_more::From; +use futures::{ + channel::oneshot::{self, Sender}, + FutureExt, +}; +use prometheus::Registry; +use reactor::ReactorEvent; +use serde::Serialize; +use tempfile::TempDir; +use thiserror::Error; +use tokio::time; + +use casper_storage::{ + data_access_layer::{ + AddressableEntityResult, BalanceIdentifier, BalanceResult, EntryPointExistsResult, + ProofsResult, QueryResult, + }, + tracking_copy::TrackingCopyError, +}; +use casper_types::{ + account::{Account, AccountHash, ActionThresholds, AssociatedKeys, Weight}, + addressable_entity::AddressableEntity, + bytesrepr::Bytes, + contracts::{ + ContractHash, ContractPackage, ContractPackageStatus, ContractVersionKey, NamedKeys, + }, + global_state::TrieMerkleProof, + testing::TestRng, + Block, BlockV2, CLValue, Chainspec, ChainspecRawBytes, Contract, Deploy, EraId, Groups, + HashAddr, InvalidDeploy, InvalidTransaction, InvalidTransactionV1, Key, PackageAddr, + PricingHandling, PricingMode, ProtocolVersion, PublicKey, SecretKey, StoredValue, + TestBlockBuilder, TimeDiff, Timestamp, Transaction, TransactionArgs, TransactionConfig, + TransactionRuntimeParams, TransactionV1, URef, DEFAULT_BASELINE_MOTES_AMOUNT, +}; + +use super::*; +use crate::{ + components::{ + network::Identity as NetworkIdentity, + storage::{self, Storage}, + }, + consensus::tests::utils::{ALICE_PUBLIC_KEY, BOB_PUBLIC_KEY, CAROL_PUBLIC_KEY}, + effect::{ + announcements::{ControlAnnouncement, TransactionAcceptorAnnouncement}, + requests::{ + ContractRuntimeRequest, MakeBlockExecutableRequest, MarkBlockCompletedRequest, + NetworkRequest, + }, + Responder, + }, + logging, + protocol::Message, + reactor::{self, EventQueueHandle, QueueKind, Runner, TryCrankOutcome}, + testing::ConditionCheckReactor, + types::{transaction::transaction_v1_builder::TransactionV1Builder, NodeId}, + utils::{Loadable, WithDir}, + NodeRng, +}; + +const POLL_INTERVAL: Duration = Duration::from_millis(10); +const TIMEOUT: Duration = Duration::from_secs(30); + +/// Top-level event for the reactor. +#[derive(Debug, From, Serialize)] +#[allow(clippy::large_enum_variant)] +#[must_use] +enum Event { + #[from] + Storage(#[serde(skip_serializing)] storage::Event), + #[from] + TransactionAcceptor(#[serde(skip_serializing)] super::Event), + ControlAnnouncement(ControlAnnouncement), + #[from] + FatalAnnouncement(FatalAnnouncement), + #[from] + TransactionAcceptorAnnouncement(#[serde(skip_serializing)] TransactionAcceptorAnnouncement), + #[from] + ContractRuntime(#[serde(skip_serializing)] ContractRuntimeRequest), + #[from] + StorageRequest(StorageRequest), + #[from] + NetworkRequest(NetworkRequest), +} + +impl From for Event { + fn from(request: MakeBlockExecutableRequest) -> Self { + Event::Storage(storage::Event::MakeBlockExecutableRequest(Box::new( + request, + ))) + } +} + +impl From for Event { + fn from(request: MarkBlockCompletedRequest) -> Self { + Event::Storage(storage::Event::MarkBlockCompletedRequest(request)) + } +} + +impl From for Event { + fn from(control_announcement: ControlAnnouncement) -> Self { + Event::ControlAnnouncement(control_announcement) + } +} + +impl ReactorEvent for Event { + fn is_control(&self) -> bool { + matches!(self, Event::ControlAnnouncement(_)) + } + + fn try_into_control(self) -> Option { + if let Self::ControlAnnouncement(ctrl_ann) = self { + Some(ctrl_ann) + } else { + None + } + } +} + +impl Display for Event { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Storage(event) => write!(formatter, "storage: {}", event), + Event::TransactionAcceptor(event) => { + write!(formatter, "transaction acceptor: {}", event) + } + Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann), + Event::FatalAnnouncement(fatal_ann) => write!(formatter, "fatal: {}", fatal_ann), + Event::TransactionAcceptorAnnouncement(ann) => { + write!(formatter, "transaction-acceptor announcement: {}", ann) + } + + Event::ContractRuntime(event) => { + write!(formatter, "contract-runtime event: {:?}", event) + } + Event::StorageRequest(request) => write!(formatter, "storage request: {:?}", request), + Event::NetworkRequest(request) => write!(formatter, "network request: {:?}", request), + } + } +} + +/// Error type returned by the test reactor. +#[derive(Debug, Error)] +enum Error { + #[error("prometheus (metrics) error: {0}")] + Metrics(#[from] prometheus::Error), +} + +#[derive(Clone, PartialEq, Eq, Debug)] +enum ContractScenario { + Valid, + MissingContractAtHash, + MissingContractAtName, + MissingEntryPoint, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +enum HashOrName { + Hash, + Name, +} +#[derive(Clone, PartialEq, Eq, Debug)] +enum ContractVersionExistance { + PackageDoesNotExist, + PackageExists( + bool, + BTreeMap, + BTreeSet, + ), +} + +#[derive(Clone, PartialEq, Eq, Debug)] +enum ContractPackageScenario { + Valid, + MissingPackageAtHash, + MissingPackageAtName, + MissingContractVersion, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +enum TxnType { + Deploy, + V1, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +enum TestScenario { + FromPeerInvalidTransaction(TxnType), + FromPeerInvalidTransactionZeroPayment(TxnType), + FromPeerExpired(TxnType), + FromPeerValidTransaction(TxnType), + FromPeerRepeatedValidTransaction(TxnType), + FromPeerMissingAccount(TxnType), + FromPeerAccountWithInsufficientWeight(TxnType), + FromPeerAccountWithInvalidAssociatedKeys(TxnType), + FromPeerCustomPaymentContract(ContractScenario), + FromPeerCustomPaymentContractPackage(ContractPackageScenario), + FromPeerSessionContract(TxnType, ContractScenario), + FromPeerSessionContractPackage(TxnType, ContractPackageScenario), + FromClientInvalidTransaction(TxnType), + FromClientInvalidTransactionZeroPayment(TxnType), + FromClientSlightlyFutureDatedTransaction(TxnType), + FromClientFutureDatedTransaction(TxnType), + FromClientExpired(TxnType), + FromClientMissingAccount(TxnType), + FromClientInsufficientBalance(TxnType), + FromClientValidTransaction(TxnType), + FromClientRepeatedValidTransaction(TxnType), + FromClientAccountWithInsufficientWeight(TxnType), + FromClientAccountWithInvalidAssociatedKeys(TxnType), + AccountWithUnknownBalance, + FromClientCustomPaymentContract(ContractScenario), + FromClientCustomPaymentContractPackage(ContractPackageScenario), + FromClientSessionContract(TxnType, ContractScenario), + FromClientSessionContractPackage(TxnType, ContractPackageScenario), + FromClientSignedByAdmin(TxnType), + DeployWithNativeTransferInPayment, + DeployWithEmptySessionModuleBytes, + DeployWithoutPaymentAmount, + DeployWithMangledPaymentAmount, + DeployWithMangledTransferAmount, + DeployWithoutTransferTarget, + DeployWithoutTransferAmount, + DeployWithPaymentOne, + BalanceCheckForDeploySentByPeer, + InvalidPricingModeForTransactionV1, + TooLowGasPriceToleranceForTransactionV1, + TransactionWithPaymentOne, + TooLowGasPriceToleranceForDeploy, + InvalidFields, + InvalidFieldsFromPeer, + InvalidArgumentsKind, + WasmTransactionWithTooBigPayment, + WasmDeployWithTooBigPayment, + RedelegateExceedingMaximumDelegation, + DelegateExceedingMaximumDelegation, + V1ByPackage( + HashOrName, + Option, + Option, + ContractVersionExistance, + ), + VmCasperV2ByPackageHash, +} + +impl TestScenario { + fn source(&self, rng: &mut NodeRng) -> Source { + match self { + TestScenario::FromPeerInvalidTransaction(_) + | TestScenario::FromPeerInvalidTransactionZeroPayment(_) + | TestScenario::FromPeerExpired(_) + | TestScenario::FromPeerValidTransaction(_) + | TestScenario::FromPeerRepeatedValidTransaction(_) + | TestScenario::BalanceCheckForDeploySentByPeer + | TestScenario::FromPeerMissingAccount(_) + | TestScenario::FromPeerAccountWithInsufficientWeight(_) + | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_) + | TestScenario::FromPeerCustomPaymentContract(_) + | TestScenario::FromPeerCustomPaymentContractPackage(_) + | TestScenario::FromPeerSessionContract(..) + | TestScenario::FromPeerSessionContractPackage(..) + | TestScenario::InvalidFieldsFromPeer => Source::Peer(NodeId::random(rng)), + TestScenario::FromClientInvalidTransaction(_) + | TestScenario::FromClientInvalidTransactionZeroPayment(_) + | TestScenario::FromClientSlightlyFutureDatedTransaction(_) + | TestScenario::FromClientFutureDatedTransaction(_) + | TestScenario::FromClientExpired(_) + | TestScenario::FromClientMissingAccount(_) + | TestScenario::FromClientInsufficientBalance(_) + | TestScenario::FromClientValidTransaction(_) + | TestScenario::FromClientRepeatedValidTransaction(_) + | TestScenario::FromClientAccountWithInsufficientWeight(_) + | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_) + | TestScenario::AccountWithUnknownBalance + | TestScenario::DeployWithoutPaymentAmount + | TestScenario::DeployWithMangledPaymentAmount + | TestScenario::DeployWithMangledTransferAmount + | TestScenario::DeployWithoutTransferAmount + | TestScenario::DeployWithPaymentOne + | TestScenario::DeployWithoutTransferTarget + | TestScenario::FromClientCustomPaymentContract(_) + | TestScenario::FromClientCustomPaymentContractPackage(_) + | TestScenario::FromClientSessionContract(..) + | TestScenario::FromClientSessionContractPackage(..) + | TestScenario::FromClientSignedByAdmin(_) + | TestScenario::DeployWithEmptySessionModuleBytes + | TestScenario::DeployWithNativeTransferInPayment + | TestScenario::InvalidPricingModeForTransactionV1 + | TestScenario::TooLowGasPriceToleranceForTransactionV1 + | TestScenario::TooLowGasPriceToleranceForDeploy + | TestScenario::TransactionWithPaymentOne + | TestScenario::InvalidFields + | TestScenario::InvalidArgumentsKind + | TestScenario::WasmTransactionWithTooBigPayment + | TestScenario::WasmDeployWithTooBigPayment + | TestScenario::RedelegateExceedingMaximumDelegation + | TestScenario::DelegateExceedingMaximumDelegation + | TestScenario::VmCasperV2ByPackageHash + | TestScenario::V1ByPackage(..) => Source::Client, + } + } + + fn transaction(&self, rng: &mut TestRng, admin: &SecretKey) -> Transaction { + let secret_key = SecretKey::random(rng); + match self { + TestScenario::FromPeerInvalidTransaction(TxnType::Deploy) + | TestScenario::FromClientInvalidTransaction(TxnType::Deploy) => { + let mut deploy = Deploy::random_valid_native_transfer(rng); + deploy.invalidate(); + Transaction::from(deploy) + } + TestScenario::FromPeerInvalidTransaction(TxnType::V1) + | TestScenario::FromClientInvalidTransaction(TxnType::V1) => { + let mut txn = TransactionV1::random(rng); + txn.invalidate(); + Transaction::from(txn) + } + TestScenario::FromClientInvalidTransactionZeroPayment(TxnType::V1) => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_pricing_mode(PricingMode::PaymentLimited { + standard_payment: true, + gas_price_tolerance: 5, + payment_amount: 0, + }) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::FromPeerInvalidTransactionZeroPayment(TxnType::V1) => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_pricing_mode(PricingMode::PaymentLimited { + standard_payment: true, + gas_price_tolerance: 5, + payment_amount: 0, + }) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::FromClientInvalidTransactionZeroPayment(TxnType::Deploy) => { + Transaction::from(Deploy::random_without_payment_amount(rng)) + } + TestScenario::FromPeerInvalidTransactionZeroPayment(TxnType::Deploy) => { + Transaction::from(Deploy::random_without_payment_amount(rng)) + } + TestScenario::FromPeerExpired(TxnType::Deploy) + | TestScenario::FromClientExpired(TxnType::Deploy) => { + Transaction::from(Deploy::random_expired_deploy(rng)) + } + TestScenario::FromPeerExpired(TxnType::V1) + | TestScenario::FromClientExpired(TxnType::V1) => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::zero()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::FromPeerValidTransaction(txn_type) + | TestScenario::FromPeerRepeatedValidTransaction(txn_type) + | TestScenario::FromPeerMissingAccount(txn_type) + | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(txn_type) + | TestScenario::FromPeerAccountWithInsufficientWeight(txn_type) + | TestScenario::FromClientMissingAccount(txn_type) + | TestScenario::FromClientInsufficientBalance(txn_type) + | TestScenario::FromClientValidTransaction(txn_type) + | TestScenario::FromClientRepeatedValidTransaction(txn_type) + | TestScenario::FromClientAccountWithInvalidAssociatedKeys(txn_type) + | TestScenario::FromClientAccountWithInsufficientWeight(txn_type) => match txn_type { + TxnType::Deploy => Transaction::from(Deploy::random_valid_native_transfer(rng)), + TxnType::V1 => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + }, + TestScenario::FromClientSignedByAdmin(TxnType::Deploy) => { + let mut deploy = Deploy::random_valid_native_transfer(rng); + deploy.sign(admin); + Transaction::from(deploy) + } + TestScenario::FromClientSignedByAdmin(TxnType::V1) => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(admin) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::AccountWithUnknownBalance + | TestScenario::BalanceCheckForDeploySentByPeer => { + Transaction::from(Deploy::random_valid_native_transfer(rng)) + } + TestScenario::DeployWithoutPaymentAmount => { + Transaction::from(Deploy::random_without_payment_amount(rng)) + } + TestScenario::DeployWithMangledPaymentAmount => { + Transaction::from(Deploy::random_with_mangled_payment_amount(rng)) + } + TestScenario::DeployWithoutTransferTarget => { + Transaction::from(Deploy::random_without_transfer_target(rng)) + } + TestScenario::DeployWithoutTransferAmount => { + Transaction::from(Deploy::random_without_transfer_amount(rng)) + } + TestScenario::DeployWithMangledTransferAmount => { + Transaction::from(Deploy::random_with_mangled_transfer_amount(rng)) + } + TestScenario::DeployWithPaymentOne => { + Transaction::from(Deploy::random_with_payment_one(rng)) + } + TestScenario::TransactionWithPaymentOne => { + let timestamp = Timestamp::now() + + Config::default().timestamp_leeway + + TimeDiff::from_millis(1000); + let ttl = TimeDiff::from_seconds(300); + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_pricing_mode(PricingMode::PaymentLimited { + payment_amount: 1u64, + gas_price_tolerance: 2, + standard_payment: true, + }) + .with_chain_name("casper-example") + .with_timestamp(timestamp) + .with_ttl(ttl) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::FromPeerCustomPaymentContract(contract_scenario) + | TestScenario::FromClientCustomPaymentContract(contract_scenario) => { + match contract_scenario { + ContractScenario::Valid | ContractScenario::MissingContractAtName => { + Transaction::from( + Deploy::random_with_valid_custom_payment_contract_by_name(rng), + ) + } + ContractScenario::MissingEntryPoint => Transaction::from( + Deploy::random_with_missing_entry_point_in_payment_contract(rng), + ), + ContractScenario::MissingContractAtHash => { + Transaction::from(Deploy::random_with_missing_payment_contract_by_hash(rng)) + } + } + } + TestScenario::FromPeerCustomPaymentContractPackage(contract_package_scenario) + | TestScenario::FromClientCustomPaymentContractPackage(contract_package_scenario) => { + match contract_package_scenario { + ContractPackageScenario::Valid + | ContractPackageScenario::MissingPackageAtName => Transaction::from( + Deploy::random_with_valid_custom_payment_package_by_name(rng), + ), + ContractPackageScenario::MissingPackageAtHash => { + Transaction::from(Deploy::random_with_missing_payment_package_by_hash(rng)) + } + ContractPackageScenario::MissingContractVersion => Transaction::from( + Deploy::random_with_nonexistent_contract_version_in_payment_package(rng), + ), + } + } + TestScenario::FromPeerSessionContract(TxnType::Deploy, contract_scenario) + | TestScenario::FromClientSessionContract(TxnType::Deploy, contract_scenario) => { + match contract_scenario { + ContractScenario::Valid | ContractScenario::MissingContractAtName => { + Transaction::from(Deploy::random_with_valid_session_contract_by_name(rng)) + } + ContractScenario::MissingContractAtHash => { + Transaction::from(Deploy::random_with_missing_session_contract_by_hash(rng)) + } + ContractScenario::MissingEntryPoint => Transaction::from( + Deploy::random_with_missing_entry_point_in_session_contract(rng), + ), + } + } + TestScenario::FromPeerSessionContract(TxnType::V1, contract_scenario) + | TestScenario::FromClientSessionContract(TxnType::V1, contract_scenario) => { + match contract_scenario { + ContractScenario::Valid | ContractScenario::MissingContractAtName => { + let txn = TransactionV1Builder::new_targeting_invocable_entity_via_alias( + "Test", + "call", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + ContractScenario::MissingContractAtHash => { + let txn = TransactionV1Builder::new_targeting_invocable_entity( + AddressableEntityHash::new(HashAddr::default()), + "call", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + ContractScenario::MissingEntryPoint => { + let txn = TransactionV1Builder::new_targeting_invocable_entity( + AddressableEntityHash::new(HashAddr::default()), + "non-existent-entry-point", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + } + } + TestScenario::FromPeerSessionContractPackage( + TxnType::Deploy, + contract_package_scenario, + ) + | TestScenario::FromClientSessionContractPackage( + TxnType::Deploy, + contract_package_scenario, + ) => match contract_package_scenario { + ContractPackageScenario::Valid | ContractPackageScenario::MissingPackageAtName => { + Transaction::from(Deploy::random_with_valid_session_package_by_name(rng)) + } + ContractPackageScenario::MissingPackageAtHash => { + Transaction::from(Deploy::random_with_missing_session_package_by_hash(rng)) + } + ContractPackageScenario::MissingContractVersion => Transaction::from( + Deploy::random_with_nonexistent_contract_version_in_payment_package(rng), + ), + }, + TestScenario::FromPeerSessionContractPackage( + TxnType::V1, + contract_package_scenario, + ) + | TestScenario::FromClientSessionContractPackage( + TxnType::V1, + contract_package_scenario, + ) => match contract_package_scenario { + ContractPackageScenario::Valid | ContractPackageScenario::MissingPackageAtName => { + let txn = TransactionV1Builder::new_targeting_package_via_alias( + "Test", + None, + None, + "call", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + ContractPackageScenario::MissingPackageAtHash => { + let txn = TransactionV1Builder::new_targeting_package( + PackageHash::new(PackageAddr::default()), + None, + None, + "call", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + ContractPackageScenario::MissingContractVersion => { + let txn = TransactionV1Builder::new_targeting_package( + PackageHash::new(PackageAddr::default()), + Some(6), + Some(2), + "call", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + }, + TestScenario::DeployWithEmptySessionModuleBytes => { + Transaction::from(Deploy::random_with_empty_session_module_bytes(rng)) + } + TestScenario::DeployWithNativeTransferInPayment => { + Transaction::from(Deploy::random_with_native_transfer_in_payment_logic(rng)) + } + TestScenario::FromClientSlightlyFutureDatedTransaction(txn_type) => { + let timestamp = Timestamp::now() + (Config::default().timestamp_leeway / 2); + let ttl = TimeDiff::from_seconds(300); + match txn_type { + TxnType::Deploy => Transaction::from( + Deploy::random_valid_native_transfer_with_timestamp_and_ttl( + rng, timestamp, ttl, + ), + ), + TxnType::V1 => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(timestamp) + .with_ttl(ttl) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + } + } + TestScenario::FromClientFutureDatedTransaction(txn_type) => { + let timestamp = Timestamp::now() + + Config::default().timestamp_leeway + + TimeDiff::from_millis(1000); + let ttl = TimeDiff::from_seconds(300); + match txn_type { + TxnType::Deploy => Transaction::from( + Deploy::random_valid_native_transfer_with_timestamp_and_ttl( + rng, timestamp, ttl, + ), + ), + TxnType::V1 => { + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_timestamp(timestamp) + .with_ttl(ttl) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + } + } + TestScenario::InvalidPricingModeForTransactionV1 => { + let payment_limited_mode_transaction = TransactionV1Builder::new_random(rng) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: 5, + additional_computation_factor: 0, + }) + .with_chain_name("casper-example") + .build() + .expect("must create payment limited transaction"); + Transaction::from(payment_limited_mode_transaction) + } + TestScenario::TooLowGasPriceToleranceForTransactionV1 => { + const TOO_LOW_GAS_PRICE_TOLERANCE: u8 = 0; + + let fixed_mode_transaction = TransactionV1Builder::new_random(rng) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: TOO_LOW_GAS_PRICE_TOLERANCE, + additional_computation_factor: 0, + }) + .with_chain_name("casper-example") + .build() + .expect("must create fixed mode transaction"); + Transaction::from(fixed_mode_transaction) + } + TestScenario::TooLowGasPriceToleranceForDeploy => { + const TOO_LOW_GAS_PRICE_TOLERANCE: u64 = 0; + + let deploy = Deploy::random_with_gas_price(rng, TOO_LOW_GAS_PRICE_TOLERANCE); + Transaction::from(deploy) + } + TestScenario::InvalidFields | TestScenario::InvalidFieldsFromPeer => { + let mut additional_fields = BTreeMap::new(); + additional_fields.insert(42, Bytes::from(vec![1])); + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_ttl(TimeDiff::from_seconds(300)) + .with_secret_key(&secret_key) + .with_additional_fields(additional_fields) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::InvalidArgumentsKind => { + let timestamp = Timestamp::now() + + Config::default().timestamp_leeway + + TimeDiff::from_millis(1000); + let ttl = TimeDiff::from_seconds(300); + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_transaction_args(TransactionArgs::Bytesrepr(Bytes::from(vec![1, 2, 3]))) + .with_chain_name("casper-example") + .with_timestamp(timestamp) + .with_ttl(ttl) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::WasmTransactionWithTooBigPayment => { + let ttl = TimeDiff::from_seconds(300); + let txn = TransactionV1Builder::new_session( + false, + Bytes::from(vec![1]), + TransactionRuntimeParams::VmCasperV1, + ) + .with_pricing_mode(PricingMode::PaymentLimited { + payment_amount: u64::MAX, /* make sure it's a big value that doesn't match + * any wasm lane */ + gas_price_tolerance: 2, + standard_payment: true, + }) + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_ttl(ttl) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::WasmDeployWithTooBigPayment => { + Transaction::from(Deploy::random_with_oversized_payment_amount(rng)) + } + TestScenario::RedelegateExceedingMaximumDelegation => { + let txn = TransactionV1Builder::new_redelegate( + ALICE_PUBLIC_KEY.clone(), + BOB_PUBLIC_KEY.clone(), + 1_000_000_000_000_000_001_u64, /* This is 1 mote more than the + * maximum_delegation_amount in local + * chainspec */ + CAROL_PUBLIC_KEY.clone(), + ) + .unwrap() + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::DelegateExceedingMaximumDelegation => { + let ttl = TimeDiff::from_seconds(300); + let txn = TransactionV1Builder::new_delegate( + ALICE_PUBLIC_KEY.clone(), + BOB_PUBLIC_KEY.clone(), + 1_000_000_000_000_000_001_u64, /* This is 1 mote more than the + * maximum_delegation_amount in local + * chainspec */ + ) + .unwrap() + .with_chain_name("casper-example") + .with_timestamp(Timestamp::now()) + .with_ttl(ttl) + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::VmCasperV2ByPackageHash => { + let txn = TransactionV1Builder::new_targeting_stored( + TransactionInvocationTarget::ByPackageHash { + addr: [1; 32], + version: None, + protocol_version_major: None, + }, + "x", + TransactionRuntimeParams::VmCasperV2 { + transferred_value: 0, + seed: None, + }, + ) + .with_chain_name("casper-example") + .with_secret_key(&secret_key) + .with_transaction_args(TransactionArgs::Bytesrepr(Bytes::new())) + .build() + .unwrap(); + Transaction::from(txn) + } + TestScenario::V1ByPackage(hash_or_name, maybe_version, maybe_protocol_version, ..) => { + let id = match hash_or_name { + HashOrName::Hash => TransactionInvocationTarget::ByPackageHash { + addr: [1; 32], + version: *maybe_version, + protocol_version_major: *maybe_protocol_version, + }, + HashOrName::Name => TransactionInvocationTarget::ByPackageName { + name: "xyz".to_owned(), + version: *maybe_version, + protocol_version_major: *maybe_protocol_version, + }, + }; + let txn = TransactionV1Builder::new_targeting_stored( + id, + "x", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("casper-example") + .with_secret_key(&secret_key) + .build() + .unwrap(); + Transaction::from(txn) + } + } + } + + fn is_valid_transaction_case(&self) -> bool { + match self { + TestScenario::FromPeerRepeatedValidTransaction(_) + | TestScenario::FromPeerExpired(_) + | TestScenario::FromPeerValidTransaction(_) + | TestScenario::FromPeerMissingAccount(_) // account check skipped if from peer + | TestScenario::FromPeerAccountWithInsufficientWeight(_) // account check skipped if from peer + | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_) // account check skipped if from peer + | TestScenario::FromClientRepeatedValidTransaction(_) + | TestScenario::FromClientValidTransaction(_) + | TestScenario::FromClientSlightlyFutureDatedTransaction(_) + | TestScenario::FromClientSignedByAdmin(..) => true, + TestScenario::FromPeerInvalidTransaction(_) + | TestScenario::FromPeerInvalidTransactionZeroPayment(_) + | TestScenario::FromClientInsufficientBalance(_) + | TestScenario::FromClientMissingAccount(_) + | TestScenario::FromClientInvalidTransaction(_) + | TestScenario::FromClientInvalidTransactionZeroPayment(_) + | TestScenario::FromClientFutureDatedTransaction(_) + | TestScenario::FromClientAccountWithInsufficientWeight(_) + | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_) + | TestScenario::AccountWithUnknownBalance + | TestScenario::DeployWithEmptySessionModuleBytes + | TestScenario::DeployWithNativeTransferInPayment + | TestScenario::DeployWithoutPaymentAmount + | TestScenario::DeployWithMangledPaymentAmount + | TestScenario::DeployWithMangledTransferAmount + | TestScenario::DeployWithoutTransferAmount + | TestScenario::DeployWithoutTransferTarget + | TestScenario::DeployWithPaymentOne + | TestScenario::BalanceCheckForDeploySentByPeer + | TestScenario::FromClientExpired(_) => false, + TestScenario::FromPeerCustomPaymentContract(contract_scenario) + | TestScenario::FromPeerSessionContract(_, contract_scenario) + | TestScenario::FromClientCustomPaymentContract(contract_scenario) + | TestScenario::FromClientSessionContract(_, contract_scenario) => match contract_scenario + { + ContractScenario::Valid + | ContractScenario::MissingContractAtName => true, + | ContractScenario::MissingContractAtHash + | ContractScenario::MissingEntryPoint => false, + }, + TestScenario::FromPeerCustomPaymentContractPackage(contract_package_scenario) + | TestScenario::FromPeerSessionContractPackage(_, contract_package_scenario) + | TestScenario::FromClientCustomPaymentContractPackage(contract_package_scenario) + | TestScenario::FromClientSessionContractPackage(_, contract_package_scenario) => { + match contract_package_scenario { + ContractPackageScenario::Valid + | ContractPackageScenario::MissingPackageAtName => true, + | ContractPackageScenario::MissingPackageAtHash + | ContractPackageScenario::MissingContractVersion => false, + } + }, + TestScenario::InvalidPricingModeForTransactionV1 + | TestScenario::TooLowGasPriceToleranceForTransactionV1 + | TestScenario::TransactionWithPaymentOne + | TestScenario::TooLowGasPriceToleranceForDeploy + | TestScenario::InvalidFields + | TestScenario::InvalidFieldsFromPeer + | TestScenario::InvalidArgumentsKind + | TestScenario::WasmTransactionWithTooBigPayment + | TestScenario::WasmDeployWithTooBigPayment + | TestScenario::RedelegateExceedingMaximumDelegation { .. } + | TestScenario::DelegateExceedingMaximumDelegation { .. } + | TestScenario::VmCasperV2ByPackageHash => false, + TestScenario::V1ByPackage(hash_or_name, _, _, scenario, ..) => { + match hash_or_name { + HashOrName::Hash => match scenario { + ContractVersionExistance::PackageDoesNotExist | ContractVersionExistance::PackageExists(false, ..) => false, + ContractVersionExistance::PackageExists(true, ..) => true, + }, + HashOrName::Name => true, + } + }, + } + } + + fn is_repeated_transaction_case(&self) -> bool { + matches!( + self, + TestScenario::FromClientRepeatedValidTransaction(_) + | TestScenario::FromPeerRepeatedValidTransaction(_) + ) + } + + fn contract_scenario(&self) -> Option { + match self { + TestScenario::FromPeerCustomPaymentContract(contract_scenario) + | TestScenario::FromPeerSessionContract(_, contract_scenario) + | TestScenario::FromClientCustomPaymentContract(contract_scenario) + | TestScenario::FromClientSessionContract(_, contract_scenario) => { + Some(contract_scenario.clone()) + } + _ => None, + } + } + + fn is_v2_casper_vm(&self) -> bool { + matches!(self, TestScenario::VmCasperV2ByPackageHash) + } +} + +fn create_account(account_hash: AccountHash, test_scenario: &TestScenario) -> Account { + match test_scenario { + TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_) + | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_) => { + Account::create(AccountHash::default(), NamedKeys::new(), URef::default()) + } + TestScenario::FromPeerAccountWithInsufficientWeight(_) + | TestScenario::FromClientAccountWithInsufficientWeight(_) => { + let invalid_action_threshold = + ActionThresholds::new(Weight::new(100u8), Weight::new(100u8)) + .expect("should create action threshold"); + Account::new( + account_hash, + NamedKeys::new(), + URef::default(), + AssociatedKeys::new(account_hash, Weight::new(1)), + invalid_action_threshold, + ) + } + _ => Account::create(account_hash, NamedKeys::new(), URef::default()), + } +} + +struct Reactor { + storage: Storage, + transaction_acceptor: TransactionAcceptor, + _storage_tempdir: TempDir, + test_scenario: TestScenario, +} + +impl reactor::Reactor for Reactor { + type Event = Event; + type Config = TestScenario; + type Error = Error; + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Event, + ) -> Effects { + debug!("{event:?}"); + match event { + Event::Storage(event) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, event), + ), + Event::StorageRequest(req) => reactor::wrap_effects( + Event::Storage, + self.storage.handle_event(effect_builder, rng, req.into()), + ), + Event::TransactionAcceptor(event) => reactor::wrap_effects( + Event::TransactionAcceptor, + self.transaction_acceptor + .handle_event(effect_builder, rng, event), + ), + Event::ControlAnnouncement(ctrl_ann) => { + panic!("unhandled control announcement: {}", ctrl_ann) + } + Event::FatalAnnouncement(fatal_ann) => { + panic!("unhandled fatal announcement: {}", fatal_ann) + } + Event::TransactionAcceptorAnnouncement(_) => { + // We do not care about transaction acceptor announcements in the acceptor tests. + Effects::new() + } + Event::ContractRuntime(event) => match event { + ContractRuntimeRequest::Query { + request: query_request, + responder, + } => { + let query_result = if let Key::Hash(_) | Key::SmartContract(_) = + query_request.key() + { + match &self.test_scenario { + TestScenario::FromPeerCustomPaymentContractPackage( + ContractPackageScenario::MissingPackageAtHash, + ) + | TestScenario::FromPeerSessionContractPackage( + _, + ContractPackageScenario::MissingPackageAtHash, + ) + | TestScenario::FromClientCustomPaymentContractPackage( + ContractPackageScenario::MissingPackageAtHash, + ) + | TestScenario::FromClientSessionContractPackage( + _, + ContractPackageScenario::MissingPackageAtHash, + ) => QueryResult::ValueNotFound(String::new()), + TestScenario::FromPeerCustomPaymentContractPackage( + ContractPackageScenario::MissingContractVersion, + ) + | TestScenario::FromPeerSessionContractPackage( + _, + ContractPackageScenario::MissingContractVersion, + ) + | TestScenario::FromClientCustomPaymentContractPackage( + ContractPackageScenario::MissingContractVersion, + ) + | TestScenario::FromClientSessionContractPackage( + _, + ContractPackageScenario::MissingContractVersion, + ) + | TestScenario::VmCasperV2ByPackageHash => QueryResult::Success { + value: Box::new(StoredValue::ContractPackage( + ContractPackage::default(), + )), + proofs: vec![], + }, + TestScenario::V1ByPackage( + hash_or_name, + _, + _, + scenario + ) => { + match hash_or_name { + HashOrName::Hash => match scenario { + ContractVersionExistance::PackageDoesNotExist => QueryResult::ValueNotFound("xyz".to_owned()), + ContractVersionExistance::PackageExists(_, versions, disabled_versions) => { + let contract_package = ContractPackage::new( + URef::default(), + versions.clone(), + disabled_versions.clone(), + Groups::default(), + ContractPackageStatus::Unlocked, + ); + QueryResult::Success { + value: Box::new(StoredValue::ContractPackage( + contract_package, + )), + proofs: vec![], + } + }, + }, + HashOrName::Name => unreachable!("Calling contract by name should not result in a package fetch in transaction acceptor"), + } + } + _ => panic!( + "unexpected query: {query_request:?} in {:?}", + self.test_scenario + ), + } + } else { + panic!("expect only queries using Key::Package variant"); + }; + responder.respond(query_result).ignore() + } + ContractRuntimeRequest::GetBalance { + request: balance_request, + responder, + } => { + let key = match balance_request.identifier() { + BalanceIdentifier::Purse(uref) => Key::URef(*uref), + BalanceIdentifier::Public(public_key) => { + Key::Account(public_key.to_account_hash()) + } + BalanceIdentifier::Account(account_hash) + | BalanceIdentifier::PenalizedAccount(account_hash) => { + Key::Account(*account_hash) + } + BalanceIdentifier::Entity(entity_addr) => { + Key::AddressableEntity(*entity_addr) + } + BalanceIdentifier::Internal(addr) => Key::Balance(*addr), + BalanceIdentifier::Refund => { + responder + .respond(BalanceResult::Failure( + TrackingCopyError::NamedKeyNotFound("refund".to_string()), + )) + .ignore::(); + return Effects::new(); + } + BalanceIdentifier::Payment | BalanceIdentifier::PenalizedPayment => { + responder + .respond(BalanceResult::Failure( + TrackingCopyError::NamedKeyNotFound("payment".to_string()), + )) + .ignore::(); + return Effects::new(); + } + BalanceIdentifier::Accumulate => { + responder + .respond(BalanceResult::Failure( + TrackingCopyError::NamedKeyNotFound("accumulate".to_string()), + )) + .ignore::(); + return Effects::new(); + } + }; + let purse_addr = match balance_request.identifier().as_purse_addr() { + Some(purse_addr) => purse_addr, + None => { + responder + .respond(BalanceResult::Failure( + TrackingCopyError::UnexpectedKeyVariant(key), + )) + .ignore::(); + return Effects::new(); + } + }; + + let proof = TrieMerkleProof::new( + key, + StoredValue::CLValue(CLValue::from_t(()).expect("should get CLValue")), + VecDeque::new(), + ); + let baseline_amount = U512::from(DEFAULT_BASELINE_MOTES_AMOUNT); + let motes = if matches!( + self.test_scenario, + TestScenario::FromClientInsufficientBalance(_) + ) { + baseline_amount - 1 + } else { + baseline_amount + }; + let balance_result = + if self.test_scenario == TestScenario::AccountWithUnknownBalance { + BalanceResult::RootNotFound + } else { + let proofs_result = ProofsResult::Proofs { + total_balance_proof: Box::new(proof), + balance_holds: Default::default(), + }; + BalanceResult::Success { + purse_addr, + total_balance: Default::default(), + available_balance: motes, + proofs_result, + } + }; + responder.respond(balance_result).ignore() + } + ContractRuntimeRequest::GetAddressableEntity { + state_root_hash: _, + entity_addr, + responder, + } => { + let result = if matches!( + self.test_scenario, + TestScenario::FromClientMissingAccount(_) + ) || matches!( + self.test_scenario, + TestScenario::FromPeerMissingAccount(_) + ) { + AddressableEntityResult::ValueNotFound("missing account".to_string()) + } else if let EntityAddr::Account(account_hash) = entity_addr { + let account = + create_account(AccountHash::new(account_hash), &self.test_scenario); + AddressableEntityResult::Success { + entity: AddressableEntity::from(account), + } + } else if let EntityAddr::SmartContract(..) = entity_addr { + match self.test_scenario { + TestScenario::FromPeerCustomPaymentContract( + ContractScenario::MissingContractAtHash, + ) + | TestScenario::FromPeerSessionContract( + _, + ContractScenario::MissingContractAtHash, + ) + | TestScenario::FromClientCustomPaymentContract( + ContractScenario::MissingContractAtHash, + ) + | TestScenario::FromClientSessionContract( + _, + ContractScenario::MissingContractAtHash, + ) => AddressableEntityResult::ValueNotFound( + "missing contract".to_string(), + ), + TestScenario::FromPeerCustomPaymentContract( + ContractScenario::MissingEntryPoint, + ) + | TestScenario::FromPeerSessionContract( + _, + ContractScenario::MissingEntryPoint, + ) + | TestScenario::FromClientCustomPaymentContract( + ContractScenario::MissingEntryPoint, + ) + | TestScenario::FromClientSessionContract( + _, + ContractScenario::MissingEntryPoint, + ) => { + let contract = Contract::default(); + AddressableEntityResult::Success { + entity: AddressableEntity::from(contract), + } + } + TestScenario::V1ByPackage(_, _, _, _) => { + let contract = Contract::default(); + AddressableEntityResult::Success { + entity: AddressableEntity::from(contract), + } + } + _ => panic!("unexpected GetAddressableEntity: {:?}", entity_addr), + } + } else { + panic!( + "should GetAddressableEntity using Account or SmartContract variant" + ); + }; + responder.respond(result).ignore() + } + ContractRuntimeRequest::GetEntryPointExists { + state_root_hash: _, + responder, + .. + } => { + if matches!(self.test_scenario, TestScenario::V1ByPackage(..)) { + let result = EntryPointExistsResult::Success; + responder.respond(result).ignore() + } else { + let contract_scenario = self + .test_scenario + .contract_scenario() + .expect("must get contract scenario"); + let result = match contract_scenario { + ContractScenario::Valid => EntryPointExistsResult::Success, + ContractScenario::MissingContractAtHash + | ContractScenario::MissingContractAtName + | ContractScenario::MissingEntryPoint => { + EntryPointExistsResult::ValueNotFound( + "entry point not found".to_string(), + ) + } + }; + responder.respond(result).ignore() + } + } + _ => panic!("should not receive {:?}", event), + }, + Event::NetworkRequest(_) => panic!("test does not handle network requests"), + } + } + + fn new( + config: Self::Config, + chainspec: Arc, + _chainspec_raw_bytes: Arc, + _network_identity: NetworkIdentity, + registry: &Registry, + _event_queue: EventQueueHandle, + _rng: &mut NodeRng, + ) -> Result<(Self, Effects), Self::Error> { + let (storage_config, storage_tempdir) = storage::Config::new_for_tests(1); + let storage_with_dir = WithDir::new(storage_tempdir.path(), storage_config); + + let transaction_acceptor = + TransactionAcceptor::new(Config::default(), Arc::clone(&chainspec), registry)?; + + let storage = Storage::new( + &storage_with_dir, + None, + ProtocolVersion::from_parts(1, 0, 0), + EraId::default(), + "test", + chainspec.transaction_config.max_ttl.into(), + chainspec.core_config.recent_era_count(), + Some(registry), + false, + TransactionConfig::default(), + ) + .unwrap(); + + let reactor = Reactor { + storage, + transaction_acceptor, + _storage_tempdir: storage_tempdir, + test_scenario: config, + }; + + let effects = Effects::new(); + + Ok((reactor, effects)) + } +} + +fn put_block_to_storage_and_mark_complete( + block: Arc, + result_sender: Sender, +) -> impl FnOnce(EffectBuilder) -> Effects { + |effect_builder: EffectBuilder| { + async move { + let block_height = block.height(); + let block: Block = (*block).clone().into(); + let result = effect_builder.put_block_to_storage(Arc::new(block)).await; + effect_builder.mark_block_completed(block_height).await; + result_sender + .send(result) + .expect("receiver should not be dropped yet"); + } + .ignore() + } +} + +fn put_transaction_to_storage( + txn: &Transaction, + result_sender: Sender, +) -> impl FnOnce(EffectBuilder) -> Effects { + let txn = txn.clone(); + |effect_builder: EffectBuilder| { + effect_builder + .put_transaction_to_storage(txn) + .map(|result| { + result_sender + .send(result) + .expect("receiver should not be dropped yet") + }) + .ignore() + } +} + +fn schedule_accept_transaction( + txn: &Transaction, + source: Source, + responder: Responder>, +) -> impl FnOnce(EffectBuilder) -> Effects { + let transaction = txn.clone(); + |effect_builder: EffectBuilder| { + effect_builder + .into_inner() + .schedule( + super::Event::Accept { + transaction, + source, + maybe_responder: Some(responder), + }, + QueueKind::Validation, + ) + .ignore() + } +} + +fn inject_balance_check_for_peer( + txn: &Transaction, + source: Source, + rng: &mut TestRng, + responder: Responder>, + chainspec: &Chainspec, +) -> impl FnOnce(EffectBuilder) -> Effects { + let txn = txn.clone(); + let block = TestBlockBuilder::new().build(rng); + let block_header = Box::new(block.header().clone().into()); + let meta_transaction = MetaTransaction::from_transaction( + &txn, + chainspec.core_config.pricing_handling, + &chainspec.transaction_config, + ) + .unwrap(); + |effect_builder: EffectBuilder| { + let event_metadata = Box::new(EventMetadata::new( + txn, + meta_transaction, + source, + Some(responder), + Timestamp::now(), + )); + effect_builder + .into_inner() + .schedule( + super::Event::GetBalanceResult { + event_metadata, + block_header, + maybe_balance: None, + }, + QueueKind::ContractRuntime, + ) + .ignore() + } +} + +async fn run_transaction_acceptor_without_timeout( + test_scenario: TestScenario, +) -> Result<(), super::Error> { + let _ = logging::init(); + let rng = &mut TestRng::new(); + + let admin = SecretKey::random(rng); + let (mut chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let mut chainspec = match &test_scenario { + TestScenario::TooLowGasPriceToleranceForTransactionV1 => { + chainspec.with_pricing_handling(PricingHandling::Fixed); + chainspec + } + test_scenario if test_scenario.is_v2_casper_vm() => { + chainspec.with_vm_casper_v2(true); + chainspec + } + _ => chainspec, + }; + chainspec.core_config.administrators = iter::once(PublicKey::from(&admin)).collect(); + + let chainspec = Arc::new(chainspec); + let mut runner: Runner> = Runner::new( + test_scenario.clone(), + chainspec.clone(), + Arc::new(chainspec_raw_bytes), + rng, + ) + .await + .unwrap(); + + let block = Arc::new(TestBlockBuilder::new().build(rng)); + // Create a channel to assert that the block was successfully injected into storage. + let (result_sender, result_receiver) = oneshot::channel(); + + runner + .process_injected_effects(put_block_to_storage_and_mark_complete(block, result_sender)) + .await; + + // There are two scheduled events, so we only need to try cranking until the second time it + // returns `Some`. + for _ in 0..2 { + while runner.try_crank(rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + assert!(result_receiver.await.unwrap()); + + // Create a responder to assert the validity of the transaction + let (txn_sender, txn_receiver) = oneshot::channel(); + let txn_responder = Responder::without_shutdown(txn_sender); + + // Create a transaction specific to the test scenario + let txn = test_scenario.transaction(rng, &admin); + // Mark the source as either a peer or a client depending on the scenario. + let source = test_scenario.source(rng); + + { + // Inject the transaction artificially into storage to simulate a previously seen one. + if test_scenario.is_repeated_transaction_case() { + let (result_sender, result_receiver) = oneshot::channel(); + runner + .process_injected_effects(put_transaction_to_storage(&txn, result_sender)) + .await; + while runner.try_crank(rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + // Check that the "previously seen" transaction is present in storage. + assert!(result_receiver.await.unwrap()); + } + + if test_scenario == TestScenario::BalanceCheckForDeploySentByPeer { + let (txn_sender, _) = oneshot::channel(); + let txn_responder = Responder::without_shutdown(txn_sender); + let chainspec = chainspec.as_ref().clone(); + runner + .process_injected_effects(inject_balance_check_for_peer( + &txn, + source.clone(), + rng, + txn_responder, + &chainspec, + )) + .await; + while runner.try_crank(rng).await == TryCrankOutcome::NoEventsToProcess { + time::sleep(POLL_INTERVAL).await; + } + } + } + + runner + .process_injected_effects(schedule_accept_transaction(&txn, source, txn_responder)) + .await; + let test_scenario_clone = test_scenario.clone(); + // Tests where the transaction is already in storage will not trigger any transaction acceptor + // announcement, so use the transaction acceptor `PutToStorage` event as the condition. + let stopping_condition = move |event: &Event| -> bool { + match &test_scenario_clone { + // Check that invalid transactions sent by a client raise the `InvalidTransaction` + // announcement with the appropriate source. + TestScenario::FromClientInvalidTransaction(_) + | TestScenario::FromClientInvalidTransactionZeroPayment(_) + | TestScenario::FromClientFutureDatedTransaction(_) + | TestScenario::FromClientMissingAccount(_) + | TestScenario::FromClientInsufficientBalance(_) + | TestScenario::FromClientAccountWithInvalidAssociatedKeys(_) + | TestScenario::FromClientAccountWithInsufficientWeight(_) + | TestScenario::DeployWithEmptySessionModuleBytes + | TestScenario::AccountWithUnknownBalance + | TestScenario::DeployWithNativeTransferInPayment + | TestScenario::DeployWithoutPaymentAmount + | TestScenario::DeployWithMangledPaymentAmount + | TestScenario::DeployWithMangledTransferAmount + | TestScenario::DeployWithoutTransferTarget + | TestScenario::DeployWithoutTransferAmount + | TestScenario::DeployWithPaymentOne + | TestScenario::InvalidPricingModeForTransactionV1 + | TestScenario::FromClientExpired(_) + | TestScenario::TooLowGasPriceToleranceForTransactionV1 + | TestScenario::TransactionWithPaymentOne + | TestScenario::TooLowGasPriceToleranceForDeploy + | TestScenario::InvalidFields + | TestScenario::InvalidArgumentsKind + | TestScenario::WasmTransactionWithTooBigPayment + | TestScenario::WasmDeployWithTooBigPayment + | TestScenario::RedelegateExceedingMaximumDelegation { .. } + | TestScenario::DelegateExceedingMaximumDelegation { .. } + | TestScenario::VmCasperV2ByPackageHash => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { + source: Source::Client, + .. + } + ) + ) + } + // Check that executable items with valid contracts are successfully stored. Conversely, + // ensure that invalid contracts will raise the invalid transaction announcement. + TestScenario::FromPeerCustomPaymentContract(contract_scenario) + | TestScenario::FromPeerSessionContract(_, contract_scenario) + | TestScenario::FromClientCustomPaymentContract(contract_scenario) + | TestScenario::FromClientSessionContract(_, contract_scenario) => { + match contract_scenario { + ContractScenario::Valid | ContractScenario::MissingContractAtName => matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { .. } + ) + ), + ContractScenario::MissingContractAtHash + | ContractScenario::MissingEntryPoint => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { .. } + ) + ) + } + } + } + // Check that executable items with valid contract packages are successfully stored. + // Conversely, ensure that invalid contract packages will raise the invalid transaction + // announcement. + TestScenario::FromPeerCustomPaymentContractPackage(contract_package_scenario) + | TestScenario::FromPeerSessionContractPackage(_, contract_package_scenario) + | TestScenario::FromClientCustomPaymentContractPackage(contract_package_scenario) + | TestScenario::FromClientSessionContractPackage(_, contract_package_scenario) => { + match contract_package_scenario { + ContractPackageScenario::Valid + | ContractPackageScenario::MissingPackageAtName => matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { .. } + ) + ), + ContractPackageScenario::MissingContractVersion + | ContractPackageScenario::MissingPackageAtHash => matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { .. } + ) + ), + } + } + // Check that invalid transactions sent by a peer raise the `InvalidTransaction` + // announcement with the appropriate source. + TestScenario::FromPeerInvalidTransaction(_) + | TestScenario::FromPeerInvalidTransactionZeroPayment(_) + | TestScenario::BalanceCheckForDeploySentByPeer + | TestScenario::InvalidFieldsFromPeer => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { + source: Source::Peer(_) | Source::PeerGossiped(_), + .. + } + ) + ) + } + // Check that a new and valid, transaction sent by a peer raises an + // `AcceptedNewTransaction` announcement with the appropriate source. + TestScenario::FromPeerValidTransaction(_) + | TestScenario::FromPeerMissingAccount(_) + | TestScenario::FromPeerAccountWithInvalidAssociatedKeys(_) + | TestScenario::FromPeerAccountWithInsufficientWeight(_) + | TestScenario::FromPeerExpired(_) => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + source: Source::Peer(_), + .. + } + ) + ) || matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + source: Source::PeerGossiped(_), + .. + } + ) + ) + } + // Check that a new and valid transaction sent by a client raises an + // `AcceptedNewTransaction` announcement with the appropriate source. + TestScenario::FromClientValidTransaction(_) + | TestScenario::FromClientSlightlyFutureDatedTransaction(_) + | TestScenario::FromClientSignedByAdmin(_) => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + source: Source::Client, + .. + } + ) + ) + } + // Check that repeated valid transactions from a client raises `PutToStorageResult` + // with the `is_new` flag as false. + TestScenario::FromClientRepeatedValidTransaction(_) => matches!( + event, + Event::TransactionAcceptor(super::Event::PutToStorageResult { is_new: false, .. }) + ), + // Check that repeated valid transactions from a peer raises `StoredFinalizedApprovals` + // with the `is_new` flag as false. + TestScenario::FromPeerRepeatedValidTransaction(_) => matches!( + event, + Event::TransactionAcceptor(super::Event::StoredFinalizedApprovals { + is_new: false, + .. + }) + ), + TestScenario::V1ByPackage( + hash_or_name, + entity_version, + protocol_version_major, + scenario, + ) => match hash_or_name { + HashOrName::Hash => match scenario { + ContractVersionExistance::PackageDoesNotExist => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { + source: Source::Client, + .. + } + ) + ) + } + ContractVersionExistance::PackageExists(false, ..) => { + if entity_version.is_none() && protocol_version_major.is_none() { + return matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + source: Source::Client, + .. + } + ) + ); + } + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { + source: Source::Client, + .. + } + ) + ) + } + ContractVersionExistance::PackageExists(true, ..) => { + matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + source: Source::Client, + .. + } + ) + ) + } + }, + HashOrName::Name => matches!( + event, + Event::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + source: Source::Client, + .. + } + ) + ), + }, + } + }; + runner + .reactor_mut() + .set_condition_checker(Box::new(stopping_condition)); + + loop { + match runner.try_crank(rng).await { + TryCrankOutcome::ProcessedAnEvent => { + if runner.reactor().condition_result() { + break; + } + } + TryCrankOutcome::NoEventsToProcess => time::sleep(POLL_INTERVAL).await, + TryCrankOutcome::ShouldExit(exit_code) => panic!("should not exit: {:?}", exit_code), + TryCrankOutcome::Exited => unreachable!(), + } + } + + { + // Assert that the transaction is present in the case of a valid transaction. + // Conversely, assert its absence in the invalid case. + let is_in_storage = runner + .reactor() + .inner() + .storage + .get_transaction_by_hash(txn.hash()) + .is_some(); + + if test_scenario.is_valid_transaction_case() { + assert!(is_in_storage) + } else { + assert!(!is_in_storage) + } + } + + txn_receiver.await.unwrap() +} + +async fn run_transaction_acceptor(test_scenario: TestScenario) -> Result<(), super::Error> { + time::timeout( + TIMEOUT, + run_transaction_acceptor_without_timeout(test_scenario), + ) + .await + .unwrap() +} + +#[tokio::test] +async fn should_accept_valid_deploy_from_peer() { + let result = + run_transaction_acceptor(TestScenario::FromPeerValidTransaction(TxnType::Deploy)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_transaction_v1_from_peer() { + let result = + run_transaction_acceptor(TestScenario::FromPeerValidTransaction(TxnType::V1)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_invalid_deploy_from_peer() { + let result = + run_transaction_acceptor(TestScenario::FromPeerInvalidTransaction(TxnType::Deploy)).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(_) + )) + )) +} + +#[tokio::test] +async fn should_reject_invalid_transaction_v1_from_peer() { + let result = + run_transaction_acceptor(TestScenario::FromPeerInvalidTransaction(TxnType::V1)).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1(_))) + )) +} + +#[tokio::test] +async fn should_reject_zero_payment_transaction_v1_from_peer() { + let result = run_transaction_acceptor(TestScenario::FromPeerInvalidTransactionZeroPayment( + TxnType::V1, + )) + .await; + + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::InvalidPaymentAmount + ))) + )) +} + +#[tokio::test] +async fn should_accept_valid_deploy_from_peer_for_missing_account() { + let result = + run_transaction_acceptor(TestScenario::FromPeerMissingAccount(TxnType::Deploy)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_transaction_v1_from_peer_for_missing_account() { + let result = run_transaction_acceptor(TestScenario::FromPeerMissingAccount(TxnType::V1)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_deploy_from_peer_for_account_with_invalid_associated_keys() { + let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInvalidAssociatedKeys( + TxnType::Deploy, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_transaction_v1_from_peer_for_account_with_invalid_associated_keys() { + let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInvalidAssociatedKeys( + TxnType::V1, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_deploy_from_peer_for_account_with_insufficient_weight() { + let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInsufficientWeight( + TxnType::Deploy, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_transaction_v1_from_peer_for_account_with_insufficient_weight() { + let result = run_transaction_acceptor(TestScenario::FromPeerAccountWithInsufficientWeight( + TxnType::V1, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_deploy_from_client() { + let result = + run_transaction_acceptor(TestScenario::FromClientValidTransaction(TxnType::Deploy)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_valid_transaction_v1_from_client() { + let result = + run_transaction_acceptor(TestScenario::FromClientValidTransaction(TxnType::V1)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_invalid_deploy_from_client() { + let result = + run_transaction_acceptor(TestScenario::FromClientInvalidTransaction(TxnType::Deploy)).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(_) + )) + )) +} + +#[tokio::test] +async fn should_reject_invalid_transaction_v1_from_client() { + let result = + run_transaction_acceptor(TestScenario::FromClientInvalidTransaction(TxnType::V1)).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1(_))) + )) +} + +#[tokio::test] +async fn should_reject_invalid_transaction_v1_zero_payment_from_client() { + let result = run_transaction_acceptor(TestScenario::FromClientInvalidTransactionZeroPayment( + TxnType::V1, + )) + .await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::InvalidPaymentAmount + ))) + )) +} + +#[tokio::test] +async fn should_accept_slightly_future_dated_deploy_from_client() { + let result = run_transaction_acceptor(TestScenario::FromClientSlightlyFutureDatedTransaction( + TxnType::Deploy, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_slightly_future_dated_transaction_v1_from_client() { + let result = run_transaction_acceptor(TestScenario::FromClientSlightlyFutureDatedTransaction( + TxnType::V1, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_future_dated_deploy_from_client() { + let result = run_transaction_acceptor(TestScenario::FromClientFutureDatedTransaction( + TxnType::Deploy, + )) + .await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::TimestampInFuture { .. }) + )) + )) +} + +#[tokio::test] +async fn should_reject_future_dated_transaction_v1_from_client() { + let result = + run_transaction_acceptor(TestScenario::FromClientFutureDatedTransaction(TxnType::V1)).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::TimestampInFuture { .. } + ))) + )) +} + +#[tokio::test] +async fn should_reject_valid_deploy_from_client_for_missing_account() { + let result = + run_transaction_acceptor(TestScenario::FromClientMissingAccount(TxnType::Deploy)).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchAddressableEntity { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_transaction_v1_from_client_for_missing_account() { + let result = + run_transaction_acceptor(TestScenario::FromClientMissingAccount(TxnType::V1)).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchAddressableEntity { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_deploy_from_client_for_account_with_invalid_associated_keys() { + let result = run_transaction_acceptor( + TestScenario::FromClientAccountWithInvalidAssociatedKeys(TxnType::Deploy), + ) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::InvalidAssociatedKeys, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_transaction_v1_from_client_for_account_with_invalid_associated_keys() { + let result = run_transaction_acceptor( + TestScenario::FromClientAccountWithInvalidAssociatedKeys(TxnType::V1), + ) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::InvalidAssociatedKeys, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_deploy_from_client_for_account_with_insufficient_weight() { + let result = run_transaction_acceptor(TestScenario::FromClientAccountWithInsufficientWeight( + TxnType::Deploy, + )) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::InsufficientSignatureWeight, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_transaction_v1_from_client_for_account_with_insufficient_weight() { + let result = run_transaction_acceptor(TestScenario::FromClientAccountWithInsufficientWeight( + TxnType::V1, + )) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::InsufficientSignatureWeight, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_deploy_from_client_for_insufficient_balance() { + let result = + run_transaction_acceptor(TestScenario::FromClientInsufficientBalance(TxnType::Deploy)) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::InsufficientBalance { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_transaction_v1_from_client_for_insufficient_balance() { + let result = + run_transaction_acceptor(TestScenario::FromClientInsufficientBalance(TxnType::V1)).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::InsufficientBalance { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_valid_deploy_from_client_for_unknown_balance() { + let result = run_transaction_acceptor(TestScenario::AccountWithUnknownBalance).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::UnknownBalance { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_repeated_valid_deploy_from_peer() { + let result = run_transaction_acceptor(TestScenario::FromPeerRepeatedValidTransaction( + TxnType::Deploy, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_repeated_valid_transaction_v1_from_peer() { + let result = + run_transaction_acceptor(TestScenario::FromPeerRepeatedValidTransaction(TxnType::V1)).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_repeated_valid_deploy_from_client() { + let result = run_transaction_acceptor(TestScenario::FromClientRepeatedValidTransaction( + TxnType::Deploy, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_repeated_valid_transaction_v1_from_client() { + let result = run_transaction_acceptor(TestScenario::FromClientRepeatedValidTransaction( + TxnType::V1, + )) + .await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_custom_payment_from_client() { + let test_scenario = TestScenario::FromClientCustomPaymentContract(ContractScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_custom_payment_contract_by_name_from_client() { + let test_scenario = + TestScenario::FromClientCustomPaymentContract(ContractScenario::MissingContractAtName); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_custom_payment_contract_by_hash_from_client() { + let test_scenario = + TestScenario::FromClientCustomPaymentContract(ContractScenario::MissingContractAtHash); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchContractAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_entry_point_custom_payment_from_client() { + let test_scenario = + TestScenario::FromClientCustomPaymentContract(ContractScenario::MissingEntryPoint); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchEntryPoint { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_payment_contract_package_by_name_from_client() { + let test_scenario = + TestScenario::FromClientCustomPaymentContractPackage(ContractPackageScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_payment_contract_package_at_name_from_client() { + let test_scenario = TestScenario::FromClientCustomPaymentContractPackage( + ContractPackageScenario::MissingPackageAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_payment_contract_package_at_hash_from_client() { + let test_scenario = TestScenario::FromClientCustomPaymentContractPackage( + ContractPackageScenario::MissingPackageAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_session_contract_from_client() { + let test_scenario = + TestScenario::FromClientSessionContract(TxnType::Deploy, ContractScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_valid_session_contract_from_client() { + let test_scenario = + TestScenario::FromClientSessionContract(TxnType::V1, ContractScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_session_contract_by_name_from_client() { + let test_scenario = TestScenario::FromClientSessionContract( + TxnType::Deploy, + ContractScenario::MissingContractAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_missing_session_contract_by_name_from_client() { + let test_scenario = TestScenario::FromClientSessionContract( + TxnType::V1, + ContractScenario::MissingContractAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_session_contract_by_hash_from_client() { + let test_scenario = TestScenario::FromClientSessionContract( + TxnType::Deploy, + ContractScenario::MissingContractAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchContractAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_session_contract_by_hash_from_client() { + let test_scenario = TestScenario::FromClientSessionContract( + TxnType::V1, + ContractScenario::MissingContractAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchContractAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_entry_point_in_session_contract_from_client() { + let test_scenario = TestScenario::FromClientSessionContract( + TxnType::Deploy, + ContractScenario::MissingEntryPoint, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchEntryPoint { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_entry_point_in_session_contract_from_client() { + let test_scenario = + TestScenario::FromClientSessionContract(TxnType::V1, ContractScenario::MissingEntryPoint); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchEntryPoint { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_session_contract_package_from_client() { + let test_scenario = TestScenario::FromClientSessionContractPackage( + TxnType::Deploy, + ContractPackageScenario::Valid, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_valid_session_contract_package_from_client() { + let test_scenario = + TestScenario::FromClientSessionContractPackage(TxnType::V1, ContractPackageScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_session_contract_package_at_name_from_client() { + let test_scenario = TestScenario::FromClientSessionContractPackage( + TxnType::Deploy, + ContractPackageScenario::MissingPackageAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_missing_session_contract_package_at_name_from_client() { + let test_scenario = TestScenario::FromClientSessionContractPackage( + TxnType::V1, + ContractPackageScenario::MissingPackageAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_session_contract_package_at_hash_from_client() { + let test_scenario = TestScenario::FromClientSessionContractPackage( + TxnType::Deploy, + ContractPackageScenario::MissingPackageAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_session_contract_package_at_hash_from_client() { + let test_scenario = TestScenario::FromClientSessionContractPackage( + TxnType::V1, + ContractPackageScenario::MissingPackageAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_version_in_session_contract_package_from_client() +{ + let test_scenario = TestScenario::FromClientSessionContractPackage( + TxnType::V1, + ContractPackageScenario::MissingContractVersion, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::MissingEntityAtVersion { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_custom_payment_from_peer() { + let test_scenario = TestScenario::FromPeerCustomPaymentContract(ContractScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_custom_payment_contract_by_name_from_peer() { + let test_scenario = + TestScenario::FromPeerCustomPaymentContract(ContractScenario::MissingContractAtName); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_custom_payment_contract_by_hash_from_peer() { + let test_scenario = + TestScenario::FromPeerCustomPaymentContract(ContractScenario::MissingContractAtHash); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchContractAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_entry_point_custom_payment_from_peer() { + let test_scenario = + TestScenario::FromPeerCustomPaymentContract(ContractScenario::MissingEntryPoint); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchEntryPoint { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_payment_contract_package_by_name_from_peer() { + let test_scenario = + TestScenario::FromPeerCustomPaymentContractPackage(ContractPackageScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_payment_contract_package_at_name_from_peer() { + let test_scenario = TestScenario::FromPeerCustomPaymentContractPackage( + ContractPackageScenario::MissingPackageAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_payment_contract_package_at_hash_from_peer() { + let test_scenario = TestScenario::FromPeerCustomPaymentContractPackage( + ContractPackageScenario::MissingPackageAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_session_contract_from_peer() { + let test_scenario = + TestScenario::FromPeerSessionContract(TxnType::Deploy, ContractScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_valid_session_contract_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_session_contract_by_name_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContract( + TxnType::Deploy, + ContractScenario::MissingContractAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_missing_session_contract_by_name_from_peer() { + let test_scenario = + TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::MissingContractAtName); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_session_contract_by_hash_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContract( + TxnType::Deploy, + ContractScenario::MissingContractAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchContractAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_session_contract_by_hash_from_peer() { + let test_scenario = + TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::MissingContractAtHash); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchContractAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_entry_point_in_session_contract_from_peer() { + let test_scenario = + TestScenario::FromPeerSessionContract(TxnType::Deploy, ContractScenario::MissingEntryPoint); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchEntryPoint { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_entry_point_in_session_contract_from_peer() { + let test_scenario = + TestScenario::FromPeerSessionContract(TxnType::V1, ContractScenario::MissingEntryPoint); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchEntryPoint { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_accept_deploy_with_valid_session_contract_package_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContractPackage( + TxnType::Deploy, + ContractPackageScenario::Valid, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_valid_session_contract_package_from_peer() { + let test_scenario = + TestScenario::FromPeerSessionContractPackage(TxnType::V1, ContractPackageScenario::Valid); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_with_missing_session_contract_package_at_name_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContractPackage( + TxnType::Deploy, + ContractPackageScenario::MissingPackageAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_with_missing_session_contract_package_at_name_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContractPackage( + TxnType::V1, + ContractPackageScenario::MissingPackageAtName, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_deploy_with_missing_session_contract_package_at_hash_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContractPackage( + TxnType::Deploy, + ContractPackageScenario::MissingPackageAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_session_contract_package_at_hash_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContractPackage( + TxnType::V1, + ContractPackageScenario::MissingPackageAtHash, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_missing_version_in_session_contract_package_from_peer() { + let test_scenario = TestScenario::FromPeerSessionContractPackage( + TxnType::V1, + ContractPackageScenario::MissingContractVersion, + ); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::MissingEntityAtVersion { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_empty_module_bytes_in_session() { + let test_scenario = TestScenario::DeployWithEmptySessionModuleBytes; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::Deploy(DeployParameterFailure::MissingModuleBytes), + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_insufficient_payment() { + let test_scenario = TestScenario::DeployWithPaymentOne; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::InvalidPaymentAmount) + )) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_transfer_in_payment() { + let test_scenario = TestScenario::DeployWithNativeTransferInPayment; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::Deploy(DeployParameterFailure::InvalidPaymentVariant), + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_without_payment_amount() { + let test_scenario = TestScenario::DeployWithoutPaymentAmount; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::Deploy(DeployParameterFailure::MissingPaymentAmount), + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_mangled_payment_amount() { + let test_scenario = TestScenario::DeployWithMangledPaymentAmount; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::Deploy(DeployParameterFailure::FailedToParsePaymentAmount), + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_without_transfer_amount() { + let test_scenario = TestScenario::DeployWithoutTransferAmount; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::MissingTransferAmount) + )) + )) +} + +#[tokio::test] +async fn should_reject_deploy_without_transfer_target() { + let test_scenario = TestScenario::DeployWithoutTransferTarget; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::Deploy(DeployParameterFailure::MissingTransferTarget), + .. + }) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_mangled_transfer_amount() { + let test_scenario = TestScenario::DeployWithMangledTransferAmount; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::FailedToParseTransferAmount) + )) + )) +} + +#[tokio::test] +async fn should_reject_expired_deploy_from_client() { + let test_scenario = TestScenario::FromClientExpired(TxnType::Deploy); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!(result, Err(super::Error::Expired { .. }))) +} + +#[tokio::test] +async fn should_reject_expired_transaction_v1_from_client() { + let test_scenario = TestScenario::FromClientExpired(TxnType::V1); + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!(result, Err(super::Error::Expired { .. }))) +} + +#[tokio::test] +async fn should_accept_expired_deploy_from_peer() { + let test_scenario = TestScenario::FromPeerExpired(TxnType::Deploy); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_expired_transaction_v1_from_peer() { + let test_scenario = TestScenario::FromPeerExpired(TxnType::V1); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +#[should_panic] +async fn should_panic_when_balance_checking_for_deploy_sent_by_peer() { + let test_scenario = TestScenario::BalanceCheckForDeploySentByPeer; + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_deploy_signed_by_admin_from_client() { + let test_scenario = TestScenario::FromClientSignedByAdmin(TxnType::Deploy); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_accept_transaction_v1_signed_by_admin_from_client() { + let test_scenario = TestScenario::FromClientSignedByAdmin(TxnType::V1); + let result = run_transaction_acceptor(test_scenario).await; + assert!(result.is_ok()) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_invalid_pricing_mode() { + let test_scenario = TestScenario::InvalidPricingModeForTransactionV1; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::InvalidPricingMode { .. } + ))) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_too_low_gas_price_tolerance() { + let test_scenario = TestScenario::TooLowGasPriceToleranceForTransactionV1; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::GasPriceToleranceTooLow { .. } + ))) + )) +} + +#[tokio::test] +async fn should_reject_transaction_v1_with_insufficient_payment() { + let test_scenario = TestScenario::TransactionWithPaymentOne; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::InvalidPaymentAmount + ))) + )) +} + +#[tokio::test] +async fn should_reject_deploy_with_too_low_gas_price_tolerance() { + let test_scenario = TestScenario::TooLowGasPriceToleranceForDeploy; + let result = run_transaction_acceptor(test_scenario).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::GasPriceToleranceTooLow { .. }) + )) + )) +} + +#[tokio::test] +async fn should_reject_transaction_with_unexpected_fields() { + let result = run_transaction_acceptor(TestScenario::InvalidFields).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::UnexpectedTransactionFieldEntries + ))) + )) +} + +#[tokio::test] +async fn should_reject_transaction_from_peer_with_unexpected_fields() { + let result = run_transaction_acceptor(TestScenario::InvalidFieldsFromPeer).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::UnexpectedTransactionFieldEntries + ))) + )) +} + +#[tokio::test] +async fn should_reject_transaction_with_invalid_transaction_args() { + let result = run_transaction_acceptor(TestScenario::InvalidArgumentsKind).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::ExpectedNamedArguments + ))) + )); +} + +#[tokio::test] +async fn should_reject_wasm_transaction_with_limited_too_big_payment() { + let result = run_transaction_acceptor(TestScenario::WasmTransactionWithTooBigPayment).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::NoLaneMatch + ))) + )); +} + +#[tokio::test] +async fn should_reject_deploy_with_payment_amount_larger_than_max_wasm_lane_limit() { + let result = run_transaction_acceptor(TestScenario::WasmDeployWithTooBigPayment).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction( + InvalidTransaction::Deploy(InvalidDeploy::NoLaneMatch) + )) + )); +} + +#[tokio::test] +async fn should_reject_native_delegate_with_exceeding_amount() { + let result = run_transaction_acceptor(TestScenario::DelegateExceedingMaximumDelegation).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::InvalidDelegationAmount { .. } + ))) + )); +} + +#[tokio::test] +async fn should_reject_native_redelegate_with_exceeding_amount() { + let result = run_transaction_acceptor(TestScenario::RedelegateExceedingMaximumDelegation).await; + assert!(matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::InvalidDelegationAmount { .. } + ))) + )); +} + +#[tokio::test] +async fn foobar() { + let result = run_transaction_acceptor(TestScenario::VmCasperV2ByPackageHash).await; + assert!( + matches!( + result, + Err(super::Error::InvalidTransaction(InvalidTransaction::V1( + InvalidTransactionV1::UnsupportedInvocationTarget { id: Some(_) } + ))) + ), + "{result:?}" + ); +} + +#[tokio::test] +async fn should_fail_if_package_doesnt_exist_by_hash() { + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Hash, + None, + None, + ContractVersionExistance::PackageDoesNotExist, + )) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::NoSuchPackageAtHash { .. }, + .. + }) + )); +} + +#[tokio::test] +async fn should_not_fail_if_package_doesnt_exist_by_name() { + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Name, + None, + None, + ContractVersionExistance::PackageDoesNotExist, + )) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_approve_if_transaction_references_no_version_or_major() { + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Hash, + None, + None, + ContractVersionExistance::PackageExists(true, BTreeMap::new(), BTreeSet::new()), + )) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_approve_if_transaction_references_package_by_name() { + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Name, + None, + None, + ContractVersionExistance::PackageExists(true, BTreeMap::new(), BTreeSet::new()), + )) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_approve_if_transaction_references_version_and_no_major() { + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Hash, + Some(1), + None, + ContractVersionExistance::PackageExists(true, BTreeMap::new(), BTreeSet::new()), + )) + .await; + assert!(result.is_ok()); +} + +#[tokio::test] +async fn should_fail_when_asking_for_non_active_exact_version() { + let versions = BTreeMap::from([ + (ContractVersionKey::new(1, 1), ContractHash::from([2; 32])), + (ContractVersionKey::new(2, 1), ContractHash::from([3; 32])), + ]); + let disabled = BTreeSet::from_iter(vec![ContractVersionKey::new(1, 1)].into_iter()); + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Hash, + Some(2), + Some(2), //Assuming current protocol version >= 2 + ContractVersionExistance::PackageExists(false, versions, disabled), + )) + .await; + assert!(matches!( + result, + Err(super::Error::Parameters { + failure: ParameterFailure::MissingEntityAtVersion { .. }, + .. + }) + )) +} + +#[tokio::test] +async fn should_succeed_when_asking_for_active_exact_version() { + let versions = BTreeMap::from([ + (ContractVersionKey::new(1, 1), ContractHash::from([2; 32])), + (ContractVersionKey::new(2, 1), ContractHash::from([3; 32])), + (ContractVersionKey::new(2, 2), ContractHash::from([4; 32])), + ]); + let disabled = BTreeSet::from_iter(vec![ContractVersionKey::new(1, 1)].into_iter()); + let result = run_transaction_acceptor(TestScenario::V1ByPackage( + HashOrName::Hash, + Some(2), + Some(2), //Assuming current protocol version >= 2 + ContractVersionExistance::PackageExists(true, versions, disabled), + )) + .await; + assert!(result.is_ok()) +} diff --git a/node/src/components/transaction_buffer.rs b/node/src/components/transaction_buffer.rs new file mode 100644 index 0000000000..f1f066d000 --- /dev/null +++ b/node/src/components/transaction_buffer.rs @@ -0,0 +1,813 @@ +mod config; +mod event; +mod metrics; +#[cfg(test)] +mod tests; + +use std::{ + collections::{btree_map, BTreeMap, HashMap, HashSet, VecDeque}, + convert::TryInto, + iter::FromIterator, + mem, + sync::Arc, +}; + +use datasize::DataSize; +use futures::FutureExt; +use itertools::Itertools; +use prometheus::Registry; +use smallvec::smallvec; +use tracing::{debug, error, info, warn}; + +use casper_types::{ + Block, BlockV2, Chainspec, Digest, DisplayIter, EraId, Timestamp, Transaction, TransactionHash, + TransactionId, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; + +use crate::{ + components::{ + consensus::{ClContext, ProposedBlock}, + Component, ComponentState, InitializedComponent, + }, + effect::{ + announcements::TransactionBufferAnnouncement, + requests::{StorageRequest, TransactionBufferRequest}, + EffectBuilder, EffectExt, Effects, + }, + fatal, + reactor::main_reactor::MainEvent, + storage::Storage, + types::{ + appendable_block::{AddError, AppendableBlock}, + FinalizedBlock, TransactionFootprint, + }, + NodeRng, +}; +pub(crate) use config::Config; +pub(crate) use event::Event; + +use crate::effect::{requests::ContractRuntimeRequest, Responder}; +use metrics::Metrics; + +const COMPONENT_NAME: &str = "transaction_buffer"; + +#[derive(DataSize, Debug)] +pub(crate) struct TransactionBuffer { + state: ComponentState, + cfg: Config, + chainspec: Arc, + // Keeps track of all transactions the buffer is currently aware of. + // + // `hold` and `dead` are used to filter it on demand as necessary. + // + // The timestamp is the time when the transaction expires. + // Expired items are removed via a self-perpetuating expire event. + buffer: HashMap)>, + // When a maybe-block is in flight, we pause inclusion of the transactions within it in other + // proposed blocks. If the maybe-block becomes an actual block the transaction hashes will get + // put to self.dead, otherwise, the hold will be released and the transactions will become + // eligible to propose again. + hold: BTreeMap>, + // Transaction hashes that should not be proposed, ever. + dead: HashSet, + prices: BTreeMap, + #[data_size(skip)] + metrics: Metrics, +} + +impl TransactionBuffer { + /// Create a transaction buffer. + pub(crate) fn new( + chainspec: Arc, + cfg: Config, + registry: &Registry, + ) -> Result { + Ok(TransactionBuffer { + state: ComponentState::Uninitialized, + cfg, + chainspec, + buffer: HashMap::new(), + hold: BTreeMap::new(), + dead: HashSet::new(), + prices: BTreeMap::new(), + metrics: Metrics::new(registry)?, + }) + } + + pub(crate) fn initialize_component( + &mut self, + effect_builder: EffectBuilder, + storage: &Storage, + ) -> Option> { + if >::is_uninitialized(self) { + info!( + "pending initialization of {}", + >::name(self) + ); + >::set_state( + self, + ComponentState::Initializing, + ); + let blocks = match storage.read_blocks_for_replay_protection() { + Ok(blocks) => blocks, + Err(err) => { + return Some( + fatal!( + effect_builder, + "fatal block store error when attempting to read highest blocks: {}", + err + ) + .ignore(), + ); + } + }; + debug!( + blocks = ?blocks.iter().map(Block::height).collect_vec(), + "TransactionBuffer: initialization" + ); + info!("initialized {}", >::name(self)); + let event = Event::Initialize(blocks); + return Some(smallvec![async { + smallvec![MainEvent::TransactionBuffer(event)] + } + .boxed()]); + } + if >::is_fatal(self) { + return Some( + fatal!( + effect_builder, + "{} failed to initialize", + >::name(self) + ) + .ignore(), + ); + } + None + } + + /// Manages cache ejection. + fn expire(&mut self, effect_builder: EffectBuilder) -> Effects + where + REv: From + From + Send, + { + let now = Timestamp::now(); + let (buffer, mut freed): (HashMap<_, _>, _) = mem::take(&mut self.buffer) + .into_iter() + .partition(|(_, (expiry_time, _))| *expiry_time >= now); + + if !freed.is_empty() { + info!("TransactionBuffer: purging {} transaction(s)", freed.len()); + } + + // clear expired transaction from all holds, then clear any entries that have no items + // remaining + self.hold.iter_mut().for_each(|(_, held_transactions)| { + held_transactions.retain(|transaction_hash| !freed.contains_key(transaction_hash)); + }); + self.hold.retain(|_, remaining| !remaining.is_empty()); + + // retain all those in `dead` which are not in `freed`, at the same time reducing `freed` to + // only those entries not also in `dead` - i.e. transactions which expired without being + // included in a block + self.dead + .retain(|transaction_hash| freed.remove(transaction_hash).is_none()); + self.buffer = buffer; + + if !freed.is_empty() { + info!( + "TransactionBuffer: expiring without executing {} transaction(s)", + freed.len() + ); + debug!( + "TransactionBuffer: expiring without executing {}", + DisplayIter::new(freed.keys()) + ); + } + + if let Some(era_id) = self.prices.keys().max() { + let updated = self + .prices + .clone() + .into_iter() + .filter(|(price_era_id, _)| price_era_id.successor() >= *era_id) + .collect(); + + self.prices = updated; + } + + let mut effects = effect_builder + .announce_expired_transactions(freed.keys().cloned().collect()) + .ignore(); + effects.extend( + effect_builder + .set_timeout(self.cfg.expiry_check_interval().into()) + .event(move |_| Event::Expire), + ); + self.update_all_metrics(); + effects + } + + fn register_transaction_gossiped( + transaction_id: TransactionId, + effect_builder: EffectBuilder, + ) -> Effects + where + REv: From + From + Send, + { + debug!(%transaction_id, "TransactionBuffer: registering gossiped transaction"); + effect_builder + .get_stored_transaction(transaction_id) + .event(move |maybe_transaction| { + Event::StoredTransaction(transaction_id, maybe_transaction.map(Box::new)) + }) + } + + fn handle_get_appendable_block( + &mut self, + effect_builder: EffectBuilder, + timestamp: Timestamp, + era_id: EraId, + request_expiry: Timestamp, + responder: Responder, + ) -> Effects + where + REv: From + Send, + { + if !self.prices.contains_key(&era_id) { + info!("Empty prices field, requesting gas price from contract runtime"); + return effect_builder + .get_current_gas_price(era_id) + .event(move |maybe_gas_price| { + Event::GetGasPriceResult( + maybe_gas_price, + era_id, + timestamp, + request_expiry, + responder, + ) + }); + } + + responder + .respond(self.appendable_block(timestamp, era_id, request_expiry)) + .ignore() + } + + /// Update buffer considering new stored transaction. + fn register_transaction(&mut self, transaction: Transaction) { + let transaction_hash = transaction.hash(); + if let Err(error) = transaction.verify() { + error!(%transaction_hash, ?error, "TransactionBuffer: invalid transaction must not be buffered"); + return; + } + + if self + .hold + .values() + .any(|ths| ths.contains(&transaction_hash)) + { + info!(%transaction_hash, "TransactionBuffer: attempt to register already held transaction"); + return; + } + + let footprint = match TransactionFootprint::new(&self.chainspec, &transaction) { + Ok(footprint) => footprint, + Err(invalid_transaction_error) => { + error!(%transaction_hash, ?invalid_transaction_error, "TransactionBuffer: unable to created transaction footprint"); + return; + } + }; + let expiry_time = transaction.expires(); + match self + .buffer + .insert(transaction_hash, (expiry_time, Some(footprint))) + { + Some(prev) => { + warn!(%transaction_hash, ?prev, "TransactionBuffer: transaction upserted"); + } + None => { + debug!(%transaction_hash, "TransactionBuffer: new transaction buffered"); + self.metrics.total_transactions.inc(); + } + } + } + + /// Update holds considering new proposed block. + fn register_block_proposed(&mut self, proposed_block: ProposedBlock) { + let timestamp = &proposed_block.context().timestamp(); + if let Some(hold_set) = self.hold.get_mut(timestamp) { + debug!(%timestamp, "TransactionBuffer: existing hold timestamp extended"); + hold_set.extend( + proposed_block + .value() + .all_transactions() + .map(|(transaction_hash, _)| *transaction_hash), + ); + } else { + debug!(%timestamp, "TransactionBuffer: new hold timestamp inserted"); + self.hold.insert( + *timestamp, + HashSet::from_iter( + proposed_block + .value() + .all_transactions() + .map(|(transaction_hash, _)| *transaction_hash), + ), + ); + } + self.metrics.held_transactions.set( + self.hold + .values() + .map(|transactions| transactions.len()) + .sum::() + .try_into() + .unwrap_or(i64::MIN), + ); + } + + fn register_transactions<'a>( + &mut self, + timestamp: Timestamp, + transaction_hashes: impl Iterator, + ) { + let expiry_timestamp = timestamp.saturating_add(self.chainspec.transaction_config.max_ttl); + + for transaction_hash in transaction_hashes { + if !self.buffer.contains_key(transaction_hash) { + self.buffer + .insert(*transaction_hash, (expiry_timestamp, None)); + } + self.dead.insert(*transaction_hash); + } + // Transactions held for proposed blocks which did not get finalized in time are eligible + // again + let (hold, _) = mem::take(&mut self.hold) + .into_iter() + .partition(|(ts, _)| *ts > timestamp); + self.hold = hold; + self.update_all_metrics(); + } + + /// Update buffer and holds considering new added block. + fn register_block(&mut self, block: &BlockV2) { + let block_height = block.height(); + let timestamp = block.timestamp(); + debug!(%timestamp, "TransactionBuffer: register_block({}) timestamp finalized", block_height); + self.register_transactions(timestamp, block.all_transactions()); + } + + /// When initializing the buffer, register past blocks in order to provide replay protection. + fn register_versioned_block(&mut self, block: &Block) { + let block_height = block.height(); + let timestamp = block.timestamp(); + debug!( + %timestamp, + "TransactionBuffer: register_versioned_block({}) timestamp finalized", + block_height + ); + match block { + Block::V1(v1_block) => { + let transaction_hashes: Vec = v1_block + .deploy_and_transfer_hashes() + .map(|deploy_hash| TransactionHash::Deploy(*deploy_hash)) + .collect(); + self.register_transactions(timestamp, transaction_hashes.iter()) + } + Block::V2(v2_block) => { + self.register_transactions(timestamp, v2_block.all_transactions()); + } + } + } + + /// Update buffer and holds considering new finalized block. + fn register_block_finalized(&mut self, finalized_block: &FinalizedBlock) { + let block_height = finalized_block.height; + let timestamp = finalized_block.timestamp; + debug!(%timestamp, "TransactionBuffer: register_block_finalized({}) timestamp finalized", block_height); + self.register_transactions(timestamp, finalized_block.all_transactions()); + } + + /// Returns eligible transactions that are buffered and not held or dead. + fn proposable( + &self, + current_era_gas_price: u8, + ) -> impl Iterator { + debug!("TransactionBuffer: getting proposable transactions"); + self.buffer + .iter() + .filter(move |(th, _)| !self.hold.values().any(|hs| hs.contains(th))) + .filter(move |(th, _)| !self.dead.contains(th)) + .filter_map(|(th, (_, maybe_footprint))| { + maybe_footprint.as_ref().map(|footprint| (th, footprint)) + }) + .filter(move |(_, footprint)| footprint.gas_price_tolerance() >= current_era_gas_price) + } + + fn buckets( + &mut self, + current_era_gas_price: u8, + ) -> HashMap<&Digest, Vec<(TransactionHash, &TransactionFootprint)>> { + let proposable = self.proposable(current_era_gas_price); + + let mut buckets: HashMap<_, Vec<_>> = HashMap::new(); + for (transaction_hash, footprint) in proposable { + buckets + .entry(&footprint.payload_hash) + .and_modify(|vec| vec.push((*transaction_hash, footprint))) + .or_insert(vec![(*transaction_hash, footprint)]); + } + buckets + } + + /// Returns a right-sized payload of transactions that can be proposed. + fn appendable_block( + &mut self, + timestamp: Timestamp, + era_id: EraId, + request_expiry: Timestamp, + ) -> AppendableBlock { + let current_era_gas_price = match self.prices.get(&era_id) { + Some(gas_price) => *gas_price, + None => { + return AppendableBlock::new( + self.chainspec.transaction_config.clone(), + self.chainspec.vacancy_config.min_gas_price, + timestamp, + ); + } + }; + let mut ret = AppendableBlock::new( + self.chainspec.transaction_config.clone(), + current_era_gas_price, + timestamp, + ); + if Timestamp::now() >= request_expiry { + debug!("TransactionBuffer: request expiry reached, returning empty proposal"); + return ret; + } + + let mut holds = HashSet::new(); + + let mut have_hit_mint_limit = false; + let mut have_hit_wasm_limit = false; + let mut have_hit_install_upgrade_limit = false; + let mut have_hit_auction_limit = false; + + #[cfg(test)] + let mut iter_counter = 0; + #[cfg(test)] + let iter_limit = self.buffer.len() * 4; + + let mut buckets = self.buckets(current_era_gas_price); + let mut payload_hashes_queue: VecDeque<_> = buckets.keys().cloned().collect(); + + while let Some(payload_hash) = payload_hashes_queue.pop_front() { + if Timestamp::now() > request_expiry { + debug!("TransactionBuffer: request expiry reached, returning proposal"); + break; + } + #[cfg(test)] + { + iter_counter += 1; + assert!( + iter_counter < iter_limit, + "the number of iterations shouldn't be too large" + ); + } + + let Some((transaction_hash, footprint)) = + buckets.get_mut(payload_hash).and_then(Vec::<_>::pop) + else { + continue; + }; + + // bucket wasn't empty - push the hash back into the queue to be processed again on the + // next pass + payload_hashes_queue.push_back(payload_hash); + + if footprint.is_mint() && have_hit_mint_limit { + continue; + } + if footprint.is_install_upgrade() && have_hit_install_upgrade_limit { + continue; + } + if footprint.is_auction() && have_hit_auction_limit { + continue; + } + if footprint.is_wasm_based() && have_hit_wasm_limit { + continue; + } + + let has_multiple_approvals = footprint.approvals.len() > 1; + match ret.add_transaction(footprint) { + Ok(_) => { + debug!(%transaction_hash, "TransactionBuffer: proposing transaction"); + holds.insert(transaction_hash); + } + Err(error) => { + match error { + AddError::Duplicate => { + // it should be physically impossible for a duplicate transaction or + // transaction to be in the transaction buffer, thus this should be + // unreachable + warn!( + ?transaction_hash, + "TransactionBuffer: duplicated transaction or transfer in transaction buffer" + ); + } + AddError::Expired => { + info!( + ?transaction_hash, + "TransactionBuffer: expired transaction or transfer in transaction buffer" + ); + } + AddError::Count(lane_id) => { + match lane_id { + lane_id if lane_id == MINT_LANE_ID => { + have_hit_mint_limit = true; + } + lane_id if lane_id == AUCTION_LANE_ID => { + have_hit_auction_limit = true; + } + lane_id if lane_id == INSTALL_UPGRADE_LANE_ID => { + have_hit_install_upgrade_limit = true; + } + _ => { + have_hit_wasm_limit = true; + } + } + if have_hit_wasm_limit + && have_hit_auction_limit + && have_hit_install_upgrade_limit + && have_hit_mint_limit + { + info!( + ?transaction_hash, + "TransactionBuffer: block fully saturated" + ); + break; + } + } + AddError::ApprovalCount if has_multiple_approvals => { + // keep iterating, we can maybe fit in a deploy with fewer approvals + } + AddError::ApprovalCount | AddError::GasLimit | AddError::BlockSize => { + info!( + ?transaction_hash, + %error, + "TransactionBuffer: a block limit has been reached" + ); + // a block limit has been reached + break; + } + AddError::VariantMismatch(mismatch) => { + error!(?transaction_hash, %mismatch, + "TransactionBuffer: data mismatch when adding transaction" + ); + // keep iterating + } + AddError::ExcessiveTtl => { + error!( + ?transaction_hash, + "TransactionBuffer: skipping transaction with excessive ttl" + ); + // keep iterating + } + AddError::FutureDatedDeploy => { + error!( + ?transaction_hash, + %footprint.timestamp, + "TransactionBuffer: skipping transaction with future dated deploy" + ); + // keep iterating + } + } + } + } + } + + // Put a hold on all proposed transactions / transfers and update metrics + match self.hold.entry(timestamp) { + btree_map::Entry::Vacant(entry) => { + entry.insert(holds); + } + btree_map::Entry::Occupied(mut entry) => { + entry.get_mut().extend(holds); + } + } + self.update_all_metrics(); + + info!( + "produced {}, buffer has {} held, {} dead, {} total", + ret, + self.hold + .values() + .map(|transactions| transactions.len()) + .sum::(), + self.dead.len(), + self.buffer.len() + ); + + ret + } + + /// Updates all transaction count metrics based on the size of the internal structs. + fn update_all_metrics(&mut self) { + // if number of elements is too high to fit, we overflow the metric + // intentionally in order to get some indication that something is wrong. + self.metrics.held_transactions.set( + self.hold + .values() + .map(|transactions| transactions.len()) + .sum::() + .try_into() + .unwrap_or(i64::MIN), + ); + self.metrics + .dead_transactions + .set(self.dead.len().try_into().unwrap_or(i64::MIN)); + self.metrics + .total_transactions + .set(self.buffer.len().try_into().unwrap_or(i64::MIN)); + } +} + +impl InitializedComponent for TransactionBuffer +where + REv: From + + From + + From + + From + + Send + + 'static, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +impl Component for TransactionBuffer +where + REv: From + + From + + From + + From + + Send + + 'static, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); + Effects::new() + } + ComponentState::Initializing => { + match event { + Event::Initialize(blocks) => { + for block in blocks { + self.register_versioned_block(&block); + } + >::set_state( + self, + ComponentState::Initialized, + ); + // start self-expiry management on initialization + effect_builder + .set_timeout(self.cfg.expiry_check_interval().into()) + .event(move |_| Event::Expire) + } + Event::Request(_) + | Event::ReceiveTransactionGossiped(_) + | Event::StoredTransaction(_, _) + | Event::BlockProposed(_) + | Event::Block(_) + | Event::VersionedBlock(_) + | Event::BlockFinalized(_) + | Event::Expire + | Event::UpdateEraGasPrice { .. } + | Event::GetGasPriceResult(_, _, _, _, _) => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is pending initialization" + ); + Effects::new() + } + } + } + ComponentState::Initialized => match event { + Event::Initialize(_) => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() + } + Event::Request(TransactionBufferRequest::GetAppendableBlock { + timestamp, + era_id, + responder, + request_expiry, + }) => self.handle_get_appendable_block( + effect_builder, + timestamp, + era_id, + request_expiry, + responder, + ), + + Event::GetGasPriceResult( + maybe_gas_price, + era_id, + timestamp, + request_expiry, + responder, + ) => match maybe_gas_price { + None => responder + .respond(AppendableBlock::new( + self.chainspec.transaction_config.clone(), + self.chainspec.vacancy_config.min_gas_price, + timestamp, + )) + .ignore(), + Some(gas_price) => { + self.prices.insert(era_id, gas_price); + responder + .respond(self.appendable_block(timestamp, era_id, request_expiry)) + .ignore() + } + }, + Event::BlockFinalized(finalized_block) => { + self.register_block_finalized(&finalized_block); + Effects::new() + } + Event::Block(block) => { + self.register_block(&block); + Effects::new() + } + Event::VersionedBlock(block) => { + self.register_versioned_block(&block); + Effects::new() + } + Event::BlockProposed(proposed) => { + self.register_block_proposed(*proposed); + Effects::new() + } + Event::ReceiveTransactionGossiped(transaction_id) => { + Self::register_transaction_gossiped(transaction_id, effect_builder) + } + Event::StoredTransaction(transaction_id, maybe_transaction) => { + match maybe_transaction { + Some(transaction) => { + self.register_transaction(*transaction); + } + None => { + debug!("cannot register un-stored transaction({})", transaction_id); + } + } + Effects::new() + } + Event::Expire => self.expire(effect_builder), + Event::UpdateEraGasPrice(era_id, next_era_gas_price) => { + self.prices.insert(era_id, next_era_gas_price); + Effects::new() + } + }, + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} diff --git a/node/src/components/transaction_buffer/config.rs b/node/src/components/transaction_buffer/config.rs new file mode 100644 index 0000000000..7f9f03f4c2 --- /dev/null +++ b/node/src/components/transaction_buffer/config.rs @@ -0,0 +1,28 @@ +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::TimeDiff; + +const DEFAULT_EXPIRY_CHECK_INTERVAL: &str = "1min"; + +#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct Config { + /// The interval of checking for expired transactions. + pub expiry_check_interval: TimeDiff, +} + +impl Config { + /// Returns the interval of checking for expired transactions. + pub fn expiry_check_interval(&self) -> TimeDiff { + self.expiry_check_interval + } +} + +impl Default for Config { + fn default() -> Self { + Config { + expiry_check_interval: DEFAULT_EXPIRY_CHECK_INTERVAL.parse().unwrap(), + } + } +} diff --git a/node/src/components/transaction_buffer/event.rs b/node/src/components/transaction_buffer/event.rs new file mode 100644 index 0000000000..bc201e9f13 --- /dev/null +++ b/node/src/components/transaction_buffer/event.rs @@ -0,0 +1,90 @@ +use std::{ + fmt::{self, Display, Formatter}, + sync::Arc, +}; + +use datasize::DataSize; +use derive_more::From; + +use casper_types::{Block, BlockV2, EraId, Timestamp, Transaction, TransactionId}; + +use crate::{ + components::consensus::{ClContext, ProposedBlock}, + effect::{requests::TransactionBufferRequest, Responder}, + types::{appendable_block::AppendableBlock, FinalizedBlock}, +}; + +#[derive(Debug, From, DataSize)] +pub(crate) enum Event { + Initialize(Vec), + #[from] + Request(TransactionBufferRequest), + ReceiveTransactionGossiped(TransactionId), + StoredTransaction(TransactionId, Option>), + BlockProposed(Box>), + Block(Arc), + VersionedBlock(Arc), + BlockFinalized(Box), + Expire, + UpdateEraGasPrice(EraId, u8), + GetGasPriceResult( + Option, + EraId, + Timestamp, + Timestamp, + Responder, + ), +} + +impl Display for Event { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Initialize(blocks) => { + write!(formatter, "initialize, {} blocks", blocks.len()) + } + Event::Request(TransactionBufferRequest::GetAppendableBlock { .. }) => { + write!(formatter, "get appendable block request") + } + Event::ReceiveTransactionGossiped(transaction_id) => { + write!(formatter, "receive transaction gossiped {}", transaction_id) + } + Event::StoredTransaction(transaction_id, maybe_transaction) => { + write!( + formatter, + "{} stored: {:?}", + transaction_id, + maybe_transaction.is_some() + ) + } + Event::BlockProposed(_) => { + write!(formatter, "proposed block") + } + Event::BlockFinalized(finalized_block) => { + write!( + formatter, + "finalized block at height {}", + finalized_block.height + ) + } + Event::Block(_) => { + write!(formatter, "block") + } + Event::VersionedBlock(_) => { + write!(formatter, "versioned block") + } + Event::Expire => { + write!(formatter, "expire transactions") + } + Event::UpdateEraGasPrice(era_id, next_era_gas_price) => { + write!( + formatter, + "gas price {} for era {}", + next_era_gas_price, era_id + ) + } + Event::GetGasPriceResult(_, era_id, _, _, _) => { + write!(formatter, "retrieving gas price for era {}", era_id) + } + } + } +} diff --git a/node/src/components/transaction_buffer/metrics.rs b/node/src/components/transaction_buffer/metrics.rs new file mode 100644 index 0000000000..706076db1f --- /dev/null +++ b/node/src/components/transaction_buffer/metrics.rs @@ -0,0 +1,52 @@ +use prometheus::{IntGauge, Registry}; + +use crate::unregister_metric; + +/// Metrics for the transaction_buffer component. +#[derive(Debug)] +pub(super) struct Metrics { + /// Total number of transactions contained in the transaction buffer. + pub(super) total_transactions: IntGauge, + /// Number of transactions contained in in-flight proposed blocks. + pub(super) held_transactions: IntGauge, + /// Number of transactions that should not be included in future proposals ever again. + pub(super) dead_transactions: IntGauge, + registry: Registry, +} + +impl Metrics { + /// Creates a new instance of the transaction buffer metrics, using the given prefix. + pub fn new(registry: &Registry) -> Result { + let total_transactions = IntGauge::new( + "transaction_buffer_total_transactions".to_string(), + "total number of transactions contained in the transaction buffer.".to_string(), + )?; + let held_transactions = IntGauge::new( + "transaction_buffer_held_transactions".to_string(), + "number of transactions included in in-flight proposed blocks.".to_string(), + )?; + let dead_transactions = IntGauge::new( + "transaction_buffer_dead_transactions".to_string(), + "number of transactions that should not be included in future proposals.".to_string(), + )?; + + registry.register(Box::new(total_transactions.clone()))?; + registry.register(Box::new(held_transactions.clone()))?; + registry.register(Box::new(dead_transactions.clone()))?; + + Ok(Metrics { + total_transactions, + held_transactions, + dead_transactions, + registry: registry.clone(), + }) + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.total_transactions); + unregister_metric!(self.registry, self.held_transactions); + unregister_metric!(self.registry, self.dead_transactions); + } +} diff --git a/node/src/components/transaction_buffer/tests.rs b/node/src/components/transaction_buffer/tests.rs new file mode 100644 index 0000000000..17926637a3 --- /dev/null +++ b/node/src/components/transaction_buffer/tests.rs @@ -0,0 +1,1539 @@ +use std::iter; + +use prometheus::Registry; +use rand::{seq::SliceRandom, Rng}; + +use super::*; +use crate::{ + effect::announcements::TransactionBufferAnnouncement::{self, TransactionsExpired}, + reactor::{EventQueueHandle, QueueKind, Scheduler}, + testing::LARGE_WASM_LANE_ID, + types::{transaction::transaction_v1_builder::TransactionV1Builder, FinalizedBlock}, + utils, +}; +use casper_types::{ + testing::TestRng, Deploy, EraId, SecretKey, TestBlockBuilder, TimeDiff, Transaction, + TransactionConfig, TransactionLaneDefinition, TransactionV1Config, + DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, +}; + +const ERA_ONE: EraId = EraId::new(1u64); +const GAS_PRICE_TOLERANCE: u8 = 1; +const DEFAULT_MINIMUM_GAS_PRICE: u8 = 1; + +fn get_appendable_block( + rng: &mut TestRng, + transaction_buffer: &mut TransactionBuffer, + categories: impl Iterator, + transaction_limit: usize, +) { + let transactions: Vec<_> = categories + .take(transaction_limit) + .map(|category| create_valid_transaction(rng, category, None, None)) + .collect(); + transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes(transaction_buffer, transactions.len(), 0, 0); + + // now check how many transfers were added in the block; should not exceed the config limits. + let timestamp = Timestamp::now(); + let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1)); + let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry); + assert!(appendable_block.transaction_hashes().len() <= transaction_limit); + assert_eq!(transaction_buffer.hold.len(), 1); + assert_container_sizes( + transaction_buffer, + transactions.len(), + 0, + appendable_block.transaction_hashes().len(), + ); +} + +// Generates valid transactions +fn create_valid_transaction( + rng: &mut TestRng, + transaction_lane: u8, + strict_timestamp: Option, + with_ttl: Option, +) -> Transaction { + let transaction_ttl = match with_ttl { + Some(ttl) => ttl, + None => TimeDiff::from_seconds(rng.gen_range(30..100)), + }; + let transaction_timestamp = match strict_timestamp { + Some(timestamp) => timestamp, + None => Timestamp::now(), + }; + + match transaction_lane { + transaction_lane if transaction_lane == MINT_LANE_ID => { + if rng.gen() { + let transaction_v1 = + TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl( + rng, + MINT_LANE_ID, + strict_timestamp, + with_ttl, + ) + .build() + .unwrap(); + Transaction::V1(transaction_v1) + } else { + Transaction::Deploy(Deploy::random_valid_native_transfer_with_timestamp_and_ttl( + rng, + transaction_timestamp, + transaction_ttl, + )) + } + } + transaction_lane if transaction_lane == INSTALL_UPGRADE_LANE_ID => Transaction::V1( + TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl( + rng, + INSTALL_UPGRADE_LANE_ID, + strict_timestamp, + with_ttl, + ) + .build() + .unwrap(), + ), + transaction_lane if transaction_lane == AUCTION_LANE_ID => Transaction::V1( + TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl( + rng, + AUCTION_LANE_ID, + strict_timestamp, + with_ttl, + ) + .build() + .unwrap(), + ), + _ => { + if rng.gen() { + Transaction::Deploy(match (strict_timestamp, with_ttl) { + (Some(timestamp), Some(ttl)) if Timestamp::now() > timestamp + ttl => { + Deploy::random_expired_deploy(rng) + } + _ => Deploy::random_with_valid_session_package_by_name(rng), + }) + } else { + Transaction::V1( + TransactionV1Builder::new_random_with_category_and_timestamp_and_ttl( + rng, + LARGE_WASM_LANE_ID, + strict_timestamp, + with_ttl, + ) + .build() + .unwrap(), + ) + } + } + } +} + +/// Checks sizes of the transaction_buffer containers. Also checks the metrics recorded. +#[track_caller] +fn assert_container_sizes( + transaction_buffer: &TransactionBuffer, + expected_buffer: usize, + expected_dead: usize, + expected_held: usize, +) { + assert_eq!( + transaction_buffer.buffer.len(), + expected_buffer, + "buffer.len {} != expected {}", + transaction_buffer.buffer.len(), + expected_buffer + ); + assert_eq!( + transaction_buffer.dead.len(), + expected_dead, + "dead.len {} != expected {}", + transaction_buffer.dead.len(), + expected_dead + ); + let hold_len = transaction_buffer + .hold + .values() + .map(|transactions| transactions.len()) + .sum::(); + assert_eq!( + hold_len, expected_held, + "hold.len {} != expected {}", + hold_len, expected_held, + ); + assert_eq!( + transaction_buffer.metrics.total_transactions.get(), + expected_buffer as i64, + "metrics total {} != expected {}", + transaction_buffer.metrics.total_transactions.get(), + expected_buffer, + ); + assert_eq!( + transaction_buffer.metrics.held_transactions.get(), + expected_held as i64, + "metrics held {} != expected {}", + transaction_buffer.metrics.held_transactions.get(), + expected_held, + ); + assert_eq!( + transaction_buffer.metrics.dead_transactions.get(), + expected_dead as i64, + "metrics dead {} != expected {}", + transaction_buffer.metrics.dead_transactions.get(), + expected_dead, + ); +} + +const fn all_categories() -> [u8; 4] { + [ + MINT_LANE_ID, + INSTALL_UPGRADE_LANE_ID, + AUCTION_LANE_ID, + LARGE_WASM_LANE_ID, + ] +} + +#[test] +fn register_transaction_and_check_size() { + let mut rng = TestRng::new(); + let chainspec = Chainspec::default(); + for category in all_categories() { + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(chainspec.clone()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + + // Try to register valid transactions + let num_valid_transactions: usize = rng.gen_range(50..500); + let valid_transactions: Vec<_> = (0..num_valid_transactions) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + valid_transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0); + + // Try to register a duplicate transaction + let duplicate_transaction = valid_transactions + .get(rng.gen_range(0..num_valid_transactions)) + .unwrap() + .clone(); + transaction_buffer.register_transaction(duplicate_transaction.clone()); + assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0); + + // Insert transaction without footprint + let bad_transaction = { + let mut deploy = Deploy::random_valid_native_transfer(&mut rng); + deploy.invalidate(); + Transaction::from(deploy) + }; + assert!(bad_transaction.verify().is_err()); + transaction_buffer.register_transaction(bad_transaction); + assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0); + } +} + +#[test] +fn register_block_with_valid_transactions() { + let mut rng = TestRng::new(); + + for category in all_categories() { + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(Chainspec::default()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + + let txns: Vec<_> = (0..10) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + let era_id = EraId::new(rng.gen_range(0..6)); + let height = era_id.value() * 10 + rng.gen_range(0..10); + let is_switch = rng.gen_bool(0.1); + let block = TestBlockBuilder::new() + .era(era_id) + .height(height) + .switch_block(is_switch) + .transactions(&txns) + .build(&mut rng); + + transaction_buffer.register_block(&block); + assert_container_sizes(&transaction_buffer, txns.len(), txns.len(), 0); + } +} + +#[test] +fn register_finalized_block_with_valid_transactions() { + let mut rng = TestRng::new(); + + for category in all_categories() { + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(Chainspec::default()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + + let txns: Vec<_> = (0..10) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + let block = FinalizedBlock::random(&mut rng, &txns); + + transaction_buffer.register_block_finalized(&block); + assert_container_sizes(&transaction_buffer, txns.len(), txns.len(), 0); + } +} + +#[test] +fn get_proposable_transactions() { + let mut rng = TestRng::new(); + + for category in all_categories() { + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(Chainspec::default()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + // populate transaction buffer with some transactions + let transactions: Vec<_> = (0..50) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes(&transaction_buffer, transactions.len(), 0, 0); + + // Create a block with some transactions and register it with the transaction_buffer + let block_transactions: Vec<_> = (0..10) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + let txns: Vec<_> = block_transactions.to_vec(); + let block = FinalizedBlock::random(&mut rng, &txns); + transaction_buffer.register_block_finalized(&block); + assert_container_sizes( + &transaction_buffer, + transactions.len() + block_transactions.len(), + block_transactions.len(), + 0, + ); + + // Check which transactions are proposable. Should return the transactions that were not + // included in the block since those should be dead. + let proposable: Vec<_> = transaction_buffer + .proposable(DEFAULT_MINIMUM_GAS_PRICE) + .collect(); + assert_eq!(proposable.len(), transactions.len()); + let proposable_transaction_hashes: HashSet<_> = + proposable.iter().map(|(th, _)| *th).collect(); + for transaction in transactions.iter() { + assert!(proposable_transaction_hashes.contains(&transaction.hash())); + } + + // Get an appendable block. This should put the deploys on hold. + let timestamp = Timestamp::now(); + let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1)); + let appendable_block = + transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry); + assert_eq!(transaction_buffer.hold.len(), 1); + assert_container_sizes( + &transaction_buffer, + transactions.len() + block_transactions.len(), + block_transactions.len(), + appendable_block.transaction_hashes().len(), + ); + + // Check that held blocks are not proposable + let proposable: Vec<_> = transaction_buffer + .proposable(DEFAULT_MINIMUM_GAS_PRICE) + .collect(); + assert_eq!( + proposable.len(), + transactions.len() - appendable_block.transaction_hashes().len() + ); + for transaction in proposable { + assert!(!appendable_block + .transaction_hashes() + .contains(transaction.0)); + } + } +} + +#[test] +fn get_appendable_block_when_transfers_are_of_one_lane() { + let mut rng = TestRng::new(); + + let transaction_v1_config = + TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10)); + + let transaction_config = TransactionConfig { + block_max_approval_count: 210, + block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first + transaction_v1_config, + ..Default::default() + }; + + let chainspec = Arc::new(Chainspec { + transaction_config: transaction_config.clone(), + ..Default::default() + }); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + get_appendable_block( + &mut rng, + &mut transaction_buffer, + iter::repeat_with(|| MINT_LANE_ID), + transaction_config + .transaction_v1_config + .get_max_transaction_count(MINT_LANE_ID) as usize + + 50, + ); +} + +#[test] +fn get_appendable_block_when_transfers_are_both_legacy_and_v1() { + let mut rng = TestRng::new(); + + let transaction_v1_config = + TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10)); + + let transaction_config = TransactionConfig { + block_max_approval_count: 210, + block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first + transaction_v1_config, + ..Default::default() + }; + + let chainspec = Arc::new(Chainspec { + transaction_config: transaction_config.clone(), + ..Default::default() + }); + + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + get_appendable_block( + &mut rng, + &mut transaction_buffer, + vec![MINT_LANE_ID].into_iter(), + transaction_config + .transaction_v1_config + .get_max_transaction_count(MINT_LANE_ID) as usize + + 50, + ); +} + +#[test] +fn get_appendable_block_when_standards_are_of_one_lane() { + let large_lane_id: u8 = 3; + let mut rng = TestRng::new(); + + let transaction_v1_config = + TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10)); + + let transaction_config = TransactionConfig { + block_max_approval_count: 210, + block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first + transaction_v1_config, + ..Default::default() + }; + + let chainspec = Arc::new(Chainspec { + transaction_config: transaction_config.clone(), + ..Default::default() + }); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + get_appendable_block( + &mut rng, + &mut transaction_buffer, + iter::repeat_with(|| large_lane_id), + transaction_config + .transaction_v1_config + .get_max_transaction_count(large_lane_id) as usize + + 50, + ); +} + +#[test] +fn get_appendable_block_when_standards_are_both_legacy_and_v1() { + let large_lane_id: u8 = 3; + let mut rng = TestRng::new(); + + let transaction_v1_config = + TransactionV1Config::default().with_count_limits(Some(200), Some(0), Some(0), Some(10)); + + let transaction_config = TransactionConfig { + block_max_approval_count: 210, + block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first + transaction_v1_config, + ..Default::default() + }; + + let chainspec = Arc::new(Chainspec { + transaction_config: transaction_config.clone(), + ..Default::default() + }); + + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + get_appendable_block( + &mut rng, + &mut transaction_buffer, + vec![MINT_LANE_ID].into_iter(), + transaction_config + .transaction_v1_config + .get_max_transaction_count(large_lane_id) as usize + + 5, + ); +} + +#[test] +fn block_fully_saturated() { + let mut rng = TestRng::new(); + + let max_transfers = rng.gen_range(0..20); + let max_staking = rng.gen_range(0..20); + let max_install_upgrade = rng.gen_range(0..20); + let max_standard = rng.gen_range(0..20); + + let total_allowed = max_transfers + max_staking + max_install_upgrade + max_standard; + + let transaction_v1_config = TransactionV1Config::default().with_count_limits( + Some(max_transfers), + Some(max_staking), + Some(max_install_upgrade), + Some(max_standard), + ); + + let transaction_config = TransactionConfig { + transaction_v1_config, + block_max_approval_count: 210, + block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first + ..Default::default() + }; + + let chainspec = Chainspec { + transaction_config, + ..Default::default() + }; + + let mut transaction_buffer = + TransactionBuffer::new(Arc::new(chainspec), Config::default(), &Registry::new()).unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + // Try to register 10 more transactions per each category as allowed by the config. + let (transfers, stakings, install_upgrades, standards) = generate_and_register_transactions( + &mut transaction_buffer, + max_transfers + 20, + max_staking + 20, + max_install_upgrade + 20, + max_standard + 20, + &mut rng, + ); + let (transfers_hashes, stakings_hashes, install_upgrades_hashes, standards_hashes) = ( + transfers + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + stakings + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + install_upgrades + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + standards + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + ); + + // Check that we really generated the required number of transactions. + assert_eq!( + transfers.len() + stakings.len() + install_upgrades.len() + standards.len(), + total_allowed as usize + 20 * 4 + ); + + // Ensure that only 'total_allowed' transactions are proposed. + let timestamp = Timestamp::now(); + let expiry = timestamp.saturating_add(TimeDiff::from_seconds(60)); + let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry); + + assert_eq!( + appendable_block.transaction_hashes().len(), + total_allowed as usize + ); + + // Assert the number of proposed transaction types, block should be fully saturated. + let mut proposed_transfers = 0; + let mut proposed_stakings = 0; + let mut proposed_install_upgrades = 0; + let mut proposed_standards = 0; + appendable_block + .transaction_hashes() + .iter() + .for_each(|transaction_hash| { + if transfers_hashes.contains(transaction_hash) { + proposed_transfers += 1; + } else if stakings_hashes.contains(transaction_hash) { + proposed_stakings += 1; + } else if install_upgrades_hashes.contains(transaction_hash) { + proposed_install_upgrades += 1; + } else if standards_hashes.contains(transaction_hash) { + proposed_standards += 1; + } + }); + let mut has_hit_any_limit = false; + if proposed_transfers == max_transfers { + has_hit_any_limit = true; + } + if proposed_stakings == max_staking { + has_hit_any_limit = true; + } + if proposed_install_upgrades == max_install_upgrade { + has_hit_any_limit = true; + } + if proposed_standards == max_standard { + has_hit_any_limit = true; + } + assert!(has_hit_any_limit) +} + +#[test] +fn block_not_fully_saturated() { + let mut rng = TestRng::new(); + + const MIN_COUNT: u64 = 10; + + let max_transfers = rng.gen_range(MIN_COUNT..20); + let max_staking = rng.gen_range(MIN_COUNT..20); + let max_install_upgrade = rng.gen_range(MIN_COUNT..20); + let max_standard = rng.gen_range(MIN_COUNT..20); + + let total_allowed = max_transfers + max_staking + max_install_upgrade + max_standard; + + let transaction_v1_config = TransactionV1Config::default().with_count_limits( + Some(max_transfers), + Some(max_staking), + Some(max_install_upgrade), + Some(max_standard), + ); + + let transaction_config = TransactionConfig { + transaction_v1_config, + block_max_approval_count: 210, + block_gas_limit: u64::MAX, // making sure this test does not hit gas limit first + ..Default::default() + }; + + let chainspec = Chainspec { + transaction_config, + ..Default::default() + }; + + let mut transaction_buffer = + TransactionBuffer::new(Arc::new(chainspec), Config::default(), &Registry::new()).unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + // Try to register less than max capacity per each category as allowed by the config. + let actual_transfer_count = rng.gen_range(0..MIN_COUNT - 1); + let actual_stakings_count = rng.gen_range(0..MIN_COUNT - 1); + let actual_install_upgrade_count = rng.gen_range(0..MIN_COUNT - 1); + let actual_standard_count = rng.gen_range(0..MIN_COUNT - 1); + let (transfers, stakings, install_upgrades, standards) = generate_and_register_transactions( + &mut transaction_buffer, + actual_transfer_count, + actual_stakings_count, + actual_install_upgrade_count, + actual_standard_count, + &mut rng, + ); + let (transfers_hashes, stakings_hashes, install_upgrades_hashes, standards_hashes) = ( + transfers + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + stakings + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + install_upgrades + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + standards + .iter() + .map(|transaction| transaction.hash()) + .collect_vec(), + ); + + // Check that we really generated the required number of transactions. + assert_eq!( + transfers.len() + stakings.len() + install_upgrades.len() + standards.len(), + actual_transfer_count as usize + + actual_stakings_count as usize + + actual_install_upgrade_count as usize + + actual_standard_count as usize + ); + + // Ensure that not more than 'total_allowed' transactions are proposed. + let timestamp = Timestamp::now(); + let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1)); + let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry); + assert!(appendable_block.transaction_hashes().len() <= total_allowed as usize); + + // Assert the number of proposed transaction types, block should not be fully saturated. + let mut proposed_transfers = 0; + let mut proposed_stakings = 0; + let mut proposed_install_upgrades = 0; + let mut proposed_standards = 0; + appendable_block + .transaction_hashes() + .iter() + .for_each(|transaction_hash| { + if transfers_hashes.contains(transaction_hash) { + proposed_transfers += 1; + } else if stakings_hashes.contains(transaction_hash) { + proposed_stakings += 1; + } else if install_upgrades_hashes.contains(transaction_hash) { + proposed_install_upgrades += 1; + } else if standards_hashes.contains(transaction_hash) { + proposed_standards += 1; + } + }); + assert_eq!(proposed_transfers, actual_transfer_count); + assert_eq!(proposed_stakings, actual_stakings_count); + assert_eq!(proposed_install_upgrades, actual_install_upgrade_count); + assert_eq!(proposed_standards, actual_standard_count); +} + +fn generate_and_register_transactions( + transaction_buffer: &mut TransactionBuffer, + transfer_count: u64, + stakings_count: u64, + install_upgrade_count: u64, + standard_count: u64, + rng: &mut TestRng, +) -> ( + Vec, + Vec, + Vec, + Vec, +) { + let transfers: Vec<_> = (0..transfer_count) + .map(|_| create_valid_transaction(rng, MINT_LANE_ID, None, None)) + .collect(); + let stakings: Vec<_> = (0..stakings_count) + .map(|_| create_valid_transaction(rng, AUCTION_LANE_ID, None, None)) + .collect(); + let installs_upgrades: Vec<_> = (0..install_upgrade_count) + .map(|_| create_valid_transaction(rng, INSTALL_UPGRADE_LANE_ID, None, None)) + .collect(); + let standards: Vec<_> = (0..standard_count) + .map(|_| create_valid_transaction(rng, LARGE_WASM_LANE_ID, None, None)) + .collect(); + transfers + .iter() + .chain( + stakings + .iter() + .chain(installs_upgrades.iter().chain(standards.iter())), + ) + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + + (transfers, stakings, installs_upgrades, standards) +} + +#[test] +fn register_transactions_and_blocks() { + let mut rng = TestRng::new(); + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(Chainspec::default()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + // try to register valid transactions + let num_valid_transactions: usize = rng.gen_range(50..500); + let category = rng.gen_range(0..4u8); + let valid_transactions: Vec<_> = (0..num_valid_transactions) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + valid_transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes(&transaction_buffer, valid_transactions.len(), 0, 0); + + // register a block with transactions + let category = rng.gen_range(0..4u8); + let block_transaction: Vec<_> = (0..5) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + let txns: Vec<_> = block_transaction.to_vec(); + let era = rng.gen_range(0..6); + let height = era * 10 + rng.gen_range(0..10); + let is_switch = rng.gen_bool(0.1); + + let block = TestBlockBuilder::new() + .era(era) + .height(height) + .switch_block(is_switch) + .transactions(&txns) + .build(&mut rng); + + transaction_buffer.register_block(&block); + assert_container_sizes( + &transaction_buffer, + block_transaction.len() + valid_transactions.len(), + block_transaction.len(), + 0, + ); + + // try to register the transactions of the block again. Should not work since those transactions + // are dead. + block_transaction + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes( + &transaction_buffer, + block_transaction.len() + valid_transactions.len(), + block_transaction.len(), + 0, + ); + + let pre_proposal_timestamp = Timestamp::now(); + + // get an appendable block. This should put the transactions on hold. + let timestamp = Timestamp::now(); + let expiry = timestamp.saturating_add(TimeDiff::from_seconds(1)); + let appendable_block = transaction_buffer.appendable_block(Timestamp::now(), ERA_ONE, expiry); + assert_eq!(transaction_buffer.hold.len(), 1); + assert_container_sizes( + &transaction_buffer, + block_transaction.len() + valid_transactions.len(), + block_transaction.len(), + appendable_block.transaction_hashes().len(), + ); + + // try to register held transactions again. + let mut held_transactions = valid_transactions + .iter() + .filter(|&transaction| { + appendable_block + .transaction_hashes() + .contains(&transaction.hash()) + }) + .cloned() + .peekable(); + assert!(held_transactions.peek().is_some()); + held_transactions + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes( + &transaction_buffer, + block_transaction.len() + valid_transactions.len(), + block_transaction.len(), + appendable_block.transaction_hashes().len(), + ); + + // test if transactions held for proposed blocks which did not get finalized in time + // are eligible again + let count = rng.gen_range(1..11); + let txns: Vec<_> = iter::repeat_with(|| Transaction::Deploy(Deploy::random(&mut rng))) + .take(count) + .collect(); + let block = FinalizedBlock::random_with_specifics( + &mut rng, + EraId::from(2), + 25, + false, + pre_proposal_timestamp, + &txns, + ); + transaction_buffer.register_block_finalized(&block); + assert_container_sizes( + &transaction_buffer, + block_transaction.len() + valid_transactions.len() + block.all_transactions().count(), + block_transaction.len() + block.all_transactions().count(), + 0, + ); +} + +/// Event for the mock reactor. +#[derive(Debug)] +enum ReactorEvent { + TransactionBufferAnnouncement(TransactionBufferAnnouncement), + Event(#[allow(dead_code)] Event), +} + +impl From for ReactorEvent { + fn from(req: TransactionBufferAnnouncement) -> ReactorEvent { + ReactorEvent::TransactionBufferAnnouncement(req) + } +} + +impl From for ReactorEvent { + fn from(req: Event) -> ReactorEvent { + ReactorEvent::Event(req) + } +} + +struct MockReactor { + scheduler: &'static Scheduler, +} + +impl MockReactor { + fn new() -> Self { + MockReactor { + scheduler: utils::leak(Scheduler::new(QueueKind::weights(), None)), + } + } + + async fn expect_transaction_buffer_expire_announcement( + &self, + should_be_expired: &HashSet, + ) { + let ((_ancestor, reactor_event), _) = self.scheduler.pop().await; + match reactor_event { + ReactorEvent::TransactionBufferAnnouncement(TransactionsExpired(expired)) => { + let expired_set = HashSet::from_iter(expired); + assert_eq!(&expired_set, should_be_expired); + } + _ => { + unreachable!(); + } + }; + } +} + +#[tokio::test] +async fn expire_transactions_and_check_announcement_when_transactions_are_of_one_lane() { + let mut rng = TestRng::new(); + + for category in all_categories() { + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(Chainspec::default()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + let reactor = MockReactor::new(); + let event_queue_handle = EventQueueHandle::without_shutdown(reactor.scheduler); + let effect_builder = EffectBuilder::new(event_queue_handle); + + // generate and register some already expired transactions + let ttl = TimeDiff::from_seconds(rng.gen_range(30..300)); + let past_timestamp = Timestamp::now() + .saturating_sub(ttl) + .saturating_sub(TimeDiff::from_seconds(5)); + + let num_transactions: usize = rng.gen_range(5..50); + let expired_transactions: Vec<_> = (0..num_transactions) + .map(|_| create_valid_transaction(&mut rng, category, Some(past_timestamp), Some(ttl))) + .collect(); + + expired_transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes(&transaction_buffer, expired_transactions.len(), 0, 0); + + // include the last expired transaction in a block and register it + let era = rng.gen_range(0..6); + let expired_txns: Vec<_> = expired_transactions.to_vec(); + let block = TestBlockBuilder::new() + .era(era) + .height(era * 10 + rng.gen_range(0..10)) + .transactions(expired_txns.last()) + .build(&mut rng); + + transaction_buffer.register_block(&block); + assert_container_sizes(&transaction_buffer, expired_transactions.len(), 1, 0); + + // generate and register some valid transactions + let transactions: Vec<_> = (0..num_transactions) + .map(|_| create_valid_transaction(&mut rng, category, None, None)) + .collect(); + transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes( + &transaction_buffer, + transactions.len() + expired_transactions.len(), + 1, + 0, + ); + + // expire transactions and check that they were announced as expired + let mut effects = transaction_buffer.expire(effect_builder); + tokio::spawn(effects.remove(0)).await.unwrap(); + + // the transactions which should be announced as expired are all the expired ones not in a + // block, i.e. all but the last one of `expired_transactions` + let expired_transaction_hashes: HashSet<_> = expired_transactions + .iter() + .take(expired_transactions.len() - 1) + .map(|transaction| transaction.hash()) + .collect(); + reactor + .expect_transaction_buffer_expire_announcement(&expired_transaction_hashes) + .await; + + // the valid transactions should still be in the buffer + assert_container_sizes(&transaction_buffer, transactions.len(), 0, 0); + } +} + +#[tokio::test] +async fn expire_transactions_and_check_announcement_when_transactions_are_of_random_categories() { + let mut rng = TestRng::new(); + + let mut transaction_buffer = TransactionBuffer::new( + Arc::new(Chainspec::default()), + Config::default(), + &Registry::new(), + ) + .unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + let reactor = MockReactor::new(); + let event_queue_handle = EventQueueHandle::without_shutdown(reactor.scheduler); + let effect_builder = EffectBuilder::new(event_queue_handle); + + // generate and register some already expired transactions + let ttl = TimeDiff::from_seconds(rng.gen_range(30..300)); + let past_timestamp = Timestamp::now() + .saturating_sub(ttl) + .saturating_sub(TimeDiff::from_seconds(5)); + + let num_transactions: usize = rng.gen_range(5..50); + let expired_transactions: Vec<_> = (0..num_transactions) + .map(|_| { + let random_lane = *all_categories().choose(&mut rng).unwrap(); + create_valid_transaction(&mut rng, random_lane, Some(past_timestamp), Some(ttl)) + }) + .collect(); + + expired_transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes(&transaction_buffer, expired_transactions.len(), 0, 0); + + // include the last expired transaction in a block and register it + let era = rng.gen_range(0..6); + let expired_txns: Vec<_> = expired_transactions.to_vec(); + let block = TestBlockBuilder::new() + .era(era) + .height(era * 10 + rng.gen_range(0..10)) + .transactions(expired_txns.last()) + .build(&mut rng); + + transaction_buffer.register_block(&block); + assert_container_sizes(&transaction_buffer, expired_transactions.len(), 1, 0); + + // generate and register some valid transactions + let transactions: Vec<_> = (0..num_transactions) + .map(|_| { + let random_lane = *all_categories().choose(&mut rng).unwrap(); + create_valid_transaction(&mut rng, random_lane, None, None) + }) + .collect(); + transactions + .iter() + .for_each(|transaction| transaction_buffer.register_transaction(transaction.clone())); + assert_container_sizes( + &transaction_buffer, + transactions.len() + expired_transactions.len(), + 1, + 0, + ); + + // expire transactions and check that they were announced as expired + let mut effects = transaction_buffer.expire(effect_builder); + tokio::spawn(effects.remove(0)).await.unwrap(); + + // the transactions which should be announced as expired are all the expired ones not in a + // block, i.e. all but the last one of `expired_transactions` + let expired_transaction_hashes: HashSet<_> = expired_transactions + .iter() + .take(expired_transactions.len() - 1) + .map(|transaction| transaction.hash()) + .collect(); + reactor + .expect_transaction_buffer_expire_announcement(&expired_transaction_hashes) + .await; + + // the valid transactions should still be in the buffer + assert_container_sizes(&transaction_buffer, transactions.len(), 0, 0); +} + +fn make_test_chainspec(max_standard_count: u64, max_mint_count: u64) -> Arc { + // These tests uses legacy deploys which always go on the Large lane + const WASM_LANE: u64 = 3; // Large + let large_lane = vec![ + WASM_LANE, + 1_048_576, + 1024, + DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, + max_standard_count, + ]; + let mut transaction_v1_config = TransactionV1Config::default(); + transaction_v1_config.native_mint_lane = + TransactionLaneDefinition::try_from(vec![0, 1024, 1024, 65_000_000_000, max_mint_count]) + .unwrap(); + transaction_v1_config.set_wasm_lanes(vec![ + TransactionLaneDefinition::try_from(large_lane).unwrap() + ]); + + let transaction_config = TransactionConfig { + transaction_v1_config, + block_max_approval_count: (max_standard_count + max_mint_count) as u32, + ..Default::default() + }; + Arc::new(Chainspec { + transaction_config, + ..Default::default() + }) +} + +#[test] +fn should_have_one_bucket_per_distinct_body_hash() { + let mut rng = TestRng::new(); + let max_standard_count = 2; + let max_mint_count = 0; + + let chainspec = make_test_chainspec(max_standard_count, max_mint_count); + + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + let secret_key1 = SecretKey::random(&mut rng); + let ttl = TimeDiff::from_seconds(30); + let deploy1 = Deploy::random_contract_by_name( + &mut rng, + Some(secret_key1), + None, + None, + Some(Timestamp::now()), + Some(ttl), + ); + let deploy1_body_hash = *deploy1.header().body_hash(); + transaction_buffer.register_transaction(deploy1.into()); + + let secret_key2 = SecretKey::random(&mut rng); // different signer + let deploy2 = Deploy::random_contract_by_name( + &mut rng, + Some( + SecretKey::from_pem(secret_key2.to_pem().expect("should pemify")) + .expect("should un-pemify"), + ), + None, + None, + Some(Timestamp::now()), // different timestamp + Some(ttl), + ); + assert_eq!( + &deploy1_body_hash, + deploy2.header().body_hash(), + "1 & 2 should have same body hashes" + ); + transaction_buffer.register_transaction(deploy2.into()); + + let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE); + assert!(buckets.len() == 1, "should be 1 bucket"); + + let deploy3 = Deploy::random_contract_by_name( + &mut rng, + Some( + SecretKey::from_pem(secret_key2.to_pem().expect("should pemify")) + .expect("should un-pemify"), + ), + None, + None, + Some(Timestamp::now()), // different timestamp + Some(ttl), + ); + assert_eq!( + &deploy1_body_hash, + deploy3.header().body_hash(), + "1 & 3 should have same body hashes" + ); + transaction_buffer.register_transaction(deploy3.into()); + let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE); + assert!(buckets.len() == 1, "should still be 1 bucket"); + + let deploy4 = Deploy::random_contract_by_name( + &mut rng, + Some( + SecretKey::from_pem(secret_key2.to_pem().expect("should pemify")) + .expect("should un-pemify"), + ), + Some("some other contract name".to_string()), + None, + Some(Timestamp::now()), // different timestamp + Some(ttl), + ); + assert_ne!( + &deploy1_body_hash, + deploy4.header().body_hash(), + "1 & 4 should have different body hashes" + ); + transaction_buffer.register_transaction(deploy4.into()); + let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE); + assert!(buckets.len() == 2, "should be 2 buckets"); + + let transfer5 = Deploy::random_valid_native_transfer_with_timestamp_and_ttl( + &mut rng, + Timestamp::now(), + ttl, + ); + assert_ne!( + &deploy1_body_hash, + transfer5.header().body_hash(), + "1 & 5 should have different body hashes" + ); + transaction_buffer.register_transaction(transfer5.into()); + let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE); + assert!(buckets.len() == 3, "should be 3 buckets"); +} + +#[test] +fn should_have_diverse_proposable_blocks_with_stocked_buffer() { + let rng = &mut TestRng::new(); + let max_standard_count = 50; + let max_mint_count = 5; + let chainspec = make_test_chainspec(max_standard_count, max_mint_count); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + let cap = (max_standard_count * 100) as usize; + + let secret_keys: Vec = iter::repeat_with(|| SecretKey::random(rng)) + .take(10) + .collect(); + + let contract_names = ["a", "b", "c", "d", "e"]; + let contract_entry_points = ["foo", "bar"]; + + fn ttl(rng: &mut TestRng) -> TimeDiff { + TimeDiff::from_seconds(rng.gen_range(60..3600)) + } + + let mut last_timestamp = Timestamp::now(); + for i in 0..cap { + let ttl = ttl(rng); + let secret_key = Some( + SecretKey::from_pem( + secret_keys[rng.gen_range(0..secret_keys.len())] + .to_pem() + .expect("should pemify"), + ) + .expect("should un-pemify"), + ); + let contract_name = Some(contract_names[rng.gen_range(0..contract_names.len())].into()); + let contract_entry_point = + Some(contract_entry_points[rng.gen_range(0..contract_entry_points.len())].into()); + let deploy = Deploy::random_contract_by_name( + rng, + secret_key, + contract_name, + contract_entry_point, + Some(last_timestamp), + Some(ttl), + ); + transaction_buffer.register_transaction(deploy.into()); + assert_eq!( + transaction_buffer.buffer.len(), + i + 1, + "failed to buffer deploy {i}" + ); + last_timestamp += TimeDiff::from_millis(1); + } + + for i in 0..max_mint_count { + let ttl = ttl(rng); + transaction_buffer.register_transaction( + Deploy::random_valid_native_transfer_with_timestamp_and_ttl(rng, last_timestamp, ttl) + .into(), + ); + assert_eq!( + transaction_buffer.buffer.len(), + i as usize + 1 + cap, + "failed to buffer transfer {i}" + ); + last_timestamp += TimeDiff::from_millis(1); + } + + let expected_count = cap + (max_mint_count as usize); + assert_container_sizes(&transaction_buffer, expected_count, 0, 0); + + let buckets1: HashMap<_, _> = transaction_buffer + .buckets(GAS_PRICE_TOLERANCE) + .into_iter() + .map(|(digest, footprints)| { + ( + *digest, + footprints + .into_iter() + .map(|(hash, footprint)| (hash, footprint.clone())) + .collect_vec(), + ) + }) + .collect(); + assert!( + buckets1.len() > 1, + "should be multiple buckets with this much state" + ); + let buckets2: HashMap<_, _> = transaction_buffer + .buckets(GAS_PRICE_TOLERANCE) + .into_iter() + .map(|(digest, footprints)| { + ( + *digest, + footprints + .into_iter() + .map(|(hash, footprint)| (hash, footprint.clone())) + .collect_vec(), + ) + }) + .collect(); + + assert_eq!( + buckets1, buckets2, + "with same state should get same buckets every time" + ); + + // while it is not impossible to get identical appendable blocks over an unchanged buffer + // using this strategy, it should be very unlikely...the below brute forces a check for this + let expected_eq_tolerance = 1; + let mut actual_eq_count = 0; + let expiry = last_timestamp.saturating_add(TimeDiff::from_seconds(240)); + for _ in 0..10 { + let appendable1 = transaction_buffer.appendable_block(last_timestamp, ERA_ONE, expiry); + let appendable2 = transaction_buffer.appendable_block(last_timestamp, ERA_ONE, expiry); + if appendable1 == appendable2 { + actual_eq_count += 1; + } + } + assert!( + actual_eq_count <= expected_eq_tolerance, + "{} matches exceeded tolerance of {}", + actual_eq_count, + expected_eq_tolerance + ); +} + +#[test] +fn should_be_empty_if_no_time_until_expiry() { + let mut rng = TestRng::new(); + let max_standard_count = 1; + let max_mint_count = 1; + let chainspec = make_test_chainspec(max_standard_count, max_mint_count); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + let secret_key1 = SecretKey::random(&mut rng); + let ttl = TimeDiff::from_seconds(30); + let deploy1 = Deploy::random_contract_by_name( + &mut rng, + Some(secret_key1), + None, + None, + Some(Timestamp::now()), + Some(ttl), + ); + let deploy1_body_hash = *deploy1.header().body_hash(); + transaction_buffer.register_transaction(deploy1.into()); + + let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE); + assert!(buckets.len() == 1, "should be 1 buckets"); + + let transfer2 = Deploy::random_valid_native_transfer_with_timestamp_and_ttl( + &mut rng, + Timestamp::now(), + ttl, + ); + assert_ne!( + &deploy1_body_hash, + transfer2.header().body_hash(), + "1 & 2 should have different body hashes" + ); + transaction_buffer.register_transaction(transfer2.into()); + let buckets = transaction_buffer.buckets(GAS_PRICE_TOLERANCE); + assert!(buckets.len() == 2, "should be 2 buckets"); + + let timestamp = Timestamp::now(); + let appendable = transaction_buffer.appendable_block(timestamp, ERA_ONE, timestamp); + let count = appendable.transaction_count(); + assert!(count == 0, "expected 0 found {}", count); + + // logic should tolerate invalid expiry + let appendable = transaction_buffer.appendable_block( + timestamp, + ERA_ONE, + timestamp.saturating_sub(TimeDiff::from_millis(1)), + ); + let count = appendable.transaction_count(); + assert!(count == 0, "expected 0 found {}", count); +} + +fn register_random_deploys_unique_hashes( + transaction_buffer: &mut TransactionBuffer, + num_deploys: usize, + rng: &mut TestRng, +) { + let deploys = iter::repeat_with(|| { + let name = format!("{}", rng.gen::()); + let call = format!("{}", rng.gen::()); + Deploy::random_contract_by_name( + rng, + None, + Some(name), + Some(call), + Some(Timestamp::now()), // different timestamp + None, + ) + }) + .take(num_deploys); + for deploy in deploys { + transaction_buffer.register_transaction(deploy.into()); + } +} + +fn register_random_deploys_same_hash( + transaction_buffer: &mut TransactionBuffer, + num_deploys: usize, + rng: &mut TestRng, +) { + let deploys = iter::repeat_with(|| { + let name = "test".to_owned(); + let call = "test".to_owned(); + Deploy::random_contract_by_name( + rng, + None, + Some(name), + Some(call), + Some(Timestamp::now()), // different timestamp + None, + ) + }) + .take(num_deploys); + for deploy in deploys { + transaction_buffer.register_transaction(deploy.into()); + } +} + +#[test] +fn test_buckets_single_hash() { + let mut rng = TestRng::new(); + let max_standard_count = 100; + let max_mint_count = 1000; + let chainspec = make_test_chainspec(max_standard_count, max_mint_count); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + register_random_deploys_same_hash(&mut transaction_buffer, 64000, &mut rng); + + let _block = transaction_buffer.appendable_block( + Timestamp::now(), + ERA_ONE, + Timestamp::now() + TimeDiff::from_millis(16384 / 6), + ); +} + +#[test] +fn test_buckets_unique_hashes() { + let mut rng = TestRng::new(); + let max_standard_count = 100; + let max_mint_count = 1000; + let chainspec = make_test_chainspec(max_standard_count, max_mint_count); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + register_random_deploys_unique_hashes(&mut transaction_buffer, 64000, &mut rng); + + let _block = transaction_buffer.appendable_block( + Timestamp::now(), + ERA_ONE, + Timestamp::now() + TimeDiff::from_millis(16384 / 6), + ); +} + +#[test] +fn test_buckets_mixed_load() { + let mut rng = TestRng::new(); + let max_standard_count = 100; + let max_mint_count = 1000; + let chainspec = make_test_chainspec(max_standard_count, max_mint_count); + let mut transaction_buffer = + TransactionBuffer::new(chainspec, Config::default(), &Registry::new()).unwrap(); + transaction_buffer + .prices + .insert(ERA_ONE, DEFAULT_MINIMUM_GAS_PRICE); + + register_random_deploys_unique_hashes(&mut transaction_buffer, 60000, &mut rng); + register_random_deploys_same_hash(&mut transaction_buffer, 4000, &mut rng); + + let _block = transaction_buffer.appendable_block( + Timestamp::now(), + ERA_ONE, + Timestamp::now() + TimeDiff::from_millis(16384 / 6), + ); +} diff --git a/node/src/components/upgrade_watcher.rs b/node/src/components/upgrade_watcher.rs new file mode 100644 index 0000000000..e2fba63c4f --- /dev/null +++ b/node/src/components/upgrade_watcher.rs @@ -0,0 +1,648 @@ +//! Chainspec loader component. +//! +//! The chainspec loader initializes a node by reading information from the chainspec or an +//! upgrade_point, and committing it to the permanent storage. +//! +//! See +//! +//! for full details. + +use std::{ + fmt::{self, Display, Formatter}, + fs, io, + path::{Path, PathBuf}, + str::FromStr, +}; + +use datasize::DataSize; +use derive_more::From; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tokio::task; +use tracing::{debug, error, info, trace, warn}; + +use casper_types::{ + file_utils::{self, ReadFileError}, + Chainspec, EraId, NextUpgrade, ProtocolConfig, ProtocolVersion, TimeDiff, +}; + +use crate::{ + components::{Component, ComponentState, InitializedComponent}, + effect::{ + announcements::UpgradeWatcherAnnouncement, requests::UpgradeWatcherRequest, EffectBuilder, + EffectExt, Effects, + }, + reactor::main_reactor::MainEvent, + utils::chain_specification::parse_toml::CHAINSPEC_FILENAME, + NodeRng, +}; + +const COMPONENT_NAME: &str = "upgrade_watcher"; + +const DEFAULT_UPGRADE_CHECK_INTERVAL: &str = "30sec"; + +#[derive(Copy, Clone, DataSize, Debug, Deserialize, Serialize)] +pub struct Config { + /// How often to scan file system for available upgrades. + pub upgrade_check_interval: TimeDiff, +} + +impl Default for Config { + fn default() -> Self { + Config { + upgrade_check_interval: DEFAULT_UPGRADE_CHECK_INTERVAL.parse().unwrap(), + } + } +} + +/// `ChainspecHandler` events. +#[derive(Debug, From, Serialize)] +pub(crate) enum Event { + /// Start checking for installed upgrades. + Initialize, + #[from] + Request(UpgradeWatcherRequest), + /// Check config dir to see if an upgrade activation point is available, and if so announce it. + CheckForNextUpgrade, + /// If the result of checking for an upgrade is successful, it is passed here. + GotNextUpgrade(Option), +} + +impl Display for Event { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Event::Initialize => { + write!(formatter, "start checking for installed upgrades") + } + Event::Request(_) => { + write!(formatter, "upgrade watcher request") + } + Event::CheckForNextUpgrade => { + write!(formatter, "check for next upgrade") + } + Event::GotNextUpgrade(Some(next_upgrade)) => { + write!(formatter, "got {}", next_upgrade) + } + Event::GotNextUpgrade(None) => { + write!(formatter, "no upgrade detected") + } + } + } +} + +#[derive(Debug, Error)] +pub(crate) enum Error { + /// Error while decoding the chainspec from TOML format. + #[error("decoding from TOML error: {0}")] + DecodingFromToml(#[from] toml::de::Error), + + #[error("chainspec directory does not have a parent")] + NoChainspecDirParent, + + /// Error loading the upgrade point. + #[error("could not load upgrade point: {0}")] + LoadUpgradePoint(ReadFileError), + + /// Failed to read the given directory. + #[error("failed to read dir {}: {error}", dir.display())] + ReadDir { + /// The directory which could not be read. + dir: PathBuf, + /// The underlying error. + error: io::Error, + }, + + /// No subdirectory representing a semver version was found in the given directory. + #[error("failed to get a valid version from subdirs in {}", dir.display())] + NoVersionSubdirFound { + /// The searched directory. + dir: PathBuf, + }, +} + +#[derive(Clone, DataSize, Debug)] +pub(crate) struct UpgradeWatcher { + current_version: ProtocolVersion, + config: Config, + /// The path to the folder where all chainspec and upgrade_point files will be stored in + /// subdirs corresponding to their versions. + root_dir: PathBuf, + state: ComponentState, + next_upgrade: Option, +} + +impl UpgradeWatcher { + pub(crate) fn new>( + chainspec: &Chainspec, + config: Config, + chainspec_dir: P, + ) -> Result { + let root_dir = chainspec_dir + .as_ref() + .parent() + .map(|path| path.to_path_buf()) + .ok_or(Error::NoChainspecDirParent)?; + + let current_version = chainspec.protocol_config.version; + let next_upgrade = next_upgrade(root_dir.clone(), current_version); + + let upgrade_watcher = UpgradeWatcher { + current_version, + config, + root_dir, + state: ComponentState::Uninitialized, + next_upgrade, + }; + + Ok(upgrade_watcher) + } + + pub(crate) fn should_upgrade_after(&self, era_id: EraId) -> bool { + self.next_upgrade + .as_ref() + .is_some_and(|upgrade| upgrade.activation_point().should_upgrade(&era_id)) + } + + pub(crate) fn next_upgrade_activation_point(&self) -> Option { + self.next_upgrade + .map(|next_upgrade| next_upgrade.activation_point().era_id()) + } + + fn start_checking_for_upgrades( + &mut self, + effect_builder: EffectBuilder, + ) -> Effects + where + REv: From + Send, + { + if self.state != ComponentState::Initializing { + return Effects::new(); + } + >::set_state(self, ComponentState::Initialized); + self.check_for_next_upgrade(effect_builder) + } + + fn check_for_next_upgrade(&self, effect_builder: EffectBuilder) -> Effects + where + REv: From + Send, + { + let root_dir = self.root_dir.clone(); + let current_version = self.current_version; + let mut effects = async move { + let maybe_next_upgrade = + task::spawn_blocking(move || next_upgrade(root_dir, current_version)) + .await + .unwrap_or_else(|error| { + warn!(%error, "failed to join tokio task"); + None + }); + effect_builder + .upgrade_watcher_announcement(maybe_next_upgrade) + .await + } + .ignore(); + + effects.extend( + effect_builder + .set_timeout(self.config.upgrade_check_interval.into()) + .event(|_| Event::CheckForNextUpgrade), + ); + + effects + } + + fn handle_got_next_upgrade( + &mut self, + maybe_next_upgrade: Option, + ) -> Effects { + trace!("got {:?}", maybe_next_upgrade); + if self.next_upgrade != maybe_next_upgrade { + let new_point = match &maybe_next_upgrade { + Some(next_upgrade) => next_upgrade.to_string(), + None => "none".to_string(), + }; + let current_point = match &self.next_upgrade { + Some(next_upgrade) => next_upgrade.to_string(), + None => "none".to_string(), + }; + info!( + %new_point, + %current_point, + "changing upgrade activation point" + ); + } + + self.next_upgrade = maybe_next_upgrade; + Effects::new() + } +} + +impl Component for UpgradeWatcher +where + REv: From + From + Send, +{ + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match &self.state { + ComponentState::Fatal(msg) => { + error!( + msg, + ?event, + name = >::name(self), + "should not handle this event when this component has fatal error" + ); + Effects::new() + } + ComponentState::Uninitialized => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is uninitialized" + ); + Effects::new() + } + ComponentState::Initializing => match event { + Event::Initialize => self.start_checking_for_upgrades(effect_builder), + Event::Request(_) | Event::CheckForNextUpgrade | Event::GotNextUpgrade(_) => { + warn!( + ?event, + name = >::name(self), + "should not handle this event when component is pending initialization" + ); + Effects::new() + } + }, + ComponentState::Initialized => match event { + Event::Initialize => { + error!( + ?event, + name = >::name(self), + "component already initialized" + ); + Effects::new() + } + Event::Request(request) => request.0.respond(self.next_upgrade).ignore(), + Event::CheckForNextUpgrade => self.check_for_next_upgrade(effect_builder), + Event::GotNextUpgrade(next_upgrade) => self.handle_got_next_upgrade(next_upgrade), + }, + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} + +impl InitializedComponent for UpgradeWatcher +where + REv: From + From + Send, +{ + fn state(&self) -> &ComponentState { + &self.state + } + + fn set_state(&mut self, new_state: ComponentState) { + info!( + ?new_state, + name = >::name(self), + "component state changed" + ); + + self.state = new_state; + } +} + +/// This struct can be parsed from a TOML-encoded chainspec file. It means that as the +/// chainspec format changes over versions, as long as we maintain the protocol config in this form +/// in the chainspec file, it can continue to be parsed as an `UpgradePoint`. +#[derive(Deserialize)] +struct UpgradePoint { + #[serde(rename = "protocol")] + pub(crate) protocol_config: ProtocolConfig, +} + +impl UpgradePoint { + /// Parses a chainspec file at the given path as an `UpgradePoint`. + fn from_chainspec_path + fmt::Debug>(path: P) -> Result { + let bytes = file_utils::read_file(path.as_ref().join(CHAINSPEC_FILENAME)) + .map_err(Error::LoadUpgradePoint)?; + Ok(toml::from_str(std::str::from_utf8(&bytes).unwrap())?) + } +} + +fn dir_name_from_version(version: ProtocolVersion) -> PathBuf { + PathBuf::from(version.to_string().replace('.', "_")) +} + +/// Iterates the given path, returning the subdir representing the immediate next SemVer version +/// after `current_version`. If no higher version than `current_version` is found, then +/// `current_version` is returned. +/// +/// Subdir names should be semvers with dots replaced with underscores. +fn next_installed_version( + dir: &Path, + current_version: ProtocolVersion, +) -> Result { + let max_version = ProtocolVersion::from_parts(u32::MAX, u32::MAX, u32::MAX); + + let mut next_version = max_version; + let mut read_version = false; + for entry in fs::read_dir(dir).map_err(|error| Error::ReadDir { + dir: dir.to_path_buf(), + error, + })? { + let path = match entry { + Ok(dir_entry) => dir_entry.path(), + Err(error) => { + debug!(dir=%dir.display(), %error, "bad entry while reading dir"); + continue; + } + }; + + let subdir_name = match path.file_name() { + Some(name) => name.to_string_lossy().replace('_', "."), + None => continue, + }; + + let version = match ProtocolVersion::from_str(&subdir_name) { + Ok(version) => version, + Err(error) => { + trace!(%error, path=%path.display(), "UpgradeWatcher: failed to get a version"); + continue; + } + }; + + if version > current_version && version < next_version { + next_version = version; + } + read_version = true; + } + + if !read_version { + return Err(Error::NoVersionSubdirFound { + dir: dir.to_path_buf(), + }); + } + + if next_version == max_version { + next_version = current_version; + } + + Ok(next_version) +} + +/// Uses `next_installed_version()` to find the next versioned subdir. If it exists, reads the +/// UpgradePoint file from there and returns its version and activation point. Returns `None` if +/// there is no greater version available, or if any step errors. +fn next_upgrade(dir: PathBuf, current_version: ProtocolVersion) -> Option { + let next_version = match next_installed_version(&dir, current_version) { + Ok(version) => version, + Err(_error) => { + #[cfg(not(test))] + warn!(dir=%dir.display(), error=%_error, "failed to get a valid version from subdirs"); + return None; + } + }; + + if next_version <= current_version { + return None; + } + + let subdir = dir.join(dir_name_from_version(next_version)); + let upgrade_point = match UpgradePoint::from_chainspec_path(&subdir) { + Ok(upgrade_point) => upgrade_point, + Err(error) => { + debug!(subdir=%subdir.display(), %error, "failed to load upgrade point"); + return None; + } + }; + + if upgrade_point.protocol_config.version != next_version { + warn!( + upgrade_point_version=%upgrade_point.protocol_config.version, + subdir_version=%next_version, + "next chainspec installed to wrong subdir" + ); + return None; + } + + Some(NextUpgrade::from(upgrade_point.protocol_config)) +} + +#[cfg(test)] +mod tests { + use casper_types::{testing::TestRng, ActivationPoint, ChainspecRawBytes}; + + use super::*; + use crate::{logging, utils::Loadable}; + + const V0_0_0: ProtocolVersion = ProtocolVersion::from_parts(0, 0, 0); + const V0_9_9: ProtocolVersion = ProtocolVersion::from_parts(0, 9, 9); + const V1_0_0: ProtocolVersion = ProtocolVersion::from_parts(1, 0, 0); + const V1_0_3: ProtocolVersion = ProtocolVersion::from_parts(1, 0, 3); + const V1_2_3: ProtocolVersion = ProtocolVersion::from_parts(1, 2, 3); + const V2_2_2: ProtocolVersion = ProtocolVersion::from_parts(2, 2, 2); + + #[test] + fn should_get_next_installed_version() { + let tempdir = tempfile::tempdir().expect("should create temp dir"); + + let get_next_version = |current_version: ProtocolVersion| { + next_installed_version(tempdir.path(), current_version).unwrap() + }; + + // Should get next version (major version bump). + fs::create_dir(tempdir.path().join("1_0_0")).unwrap(); + assert_eq!(get_next_version(V0_0_0), V1_0_0); + + // Should get next version (minor version bump). + fs::create_dir(tempdir.path().join("1_2_3")).unwrap(); + assert_eq!(get_next_version(V1_0_0), V1_2_3); + + // Should report current as next version if only lower versions staged. + fs::create_dir(tempdir.path().join("1_0_3")).unwrap(); + assert_eq!(get_next_version(V1_2_3), V1_2_3); + + // Should report lower of two higher versions. + fs::create_dir(tempdir.path().join("2_2_2")).unwrap(); + fs::create_dir(tempdir.path().join("3_3_3")).unwrap(); + assert_eq!(get_next_version(V1_2_3), V2_2_2); + + // If higher versions unstaged, should report current again. + fs::remove_dir_all(tempdir.path().join("2_2_2")).unwrap(); + fs::remove_dir_all(tempdir.path().join("3_3_3")).unwrap(); + assert_eq!(get_next_version(V1_2_3), V1_2_3); + } + + #[test] + fn should_ignore_invalid_versions() { + let tempdir = tempfile::tempdir().expect("should create temp dir"); + + // Executes `next_installed_version()` and asserts the resulting error as a string starts + // with the given text. + let min_version = V0_0_0; + let assert_error_starts_with = |path: &Path, expected: String| { + let error_msg = next_installed_version(path, min_version) + .unwrap_err() + .to_string(); + assert!( + error_msg.starts_with(&expected), + "Error message expected to start with \"{}\"\nActual error message: \"{}\"", + expected, + error_msg + ); + }; + + // Try with a non-existent dir. + let non_existent_dir = Path::new("not_a_dir"); + assert_error_starts_with( + non_existent_dir, + format!("failed to read dir {}", non_existent_dir.display()), + ); + + // Try with a dir which has no subdirs. + assert_error_starts_with( + tempdir.path(), + format!( + "failed to get a valid version from subdirs in {}", + tempdir.path().display() + ), + ); + + // Try with a dir which has one subdir which is not a valid version representation. + fs::create_dir(tempdir.path().join("not_a_version")).unwrap(); + assert_error_starts_with( + tempdir.path(), + format!( + "failed to get a valid version from subdirs in {}", + tempdir.path().display() + ), + ); + + // Try with a dir which has a valid and invalid subdir - the invalid one should be ignored. + fs::create_dir(tempdir.path().join("1_2_3")).unwrap(); + assert_eq!( + next_installed_version(tempdir.path(), min_version).unwrap(), + V1_2_3 + ); + } + + /// Creates the appropriate subdir in `root_dir`, and adds a random chainspec.toml with the + /// protocol_config.version field set to `version`. + fn install_chainspec( + rng: &mut TestRng, + root_dir: &Path, + version: ProtocolVersion, + ) -> Chainspec { + let mut chainspec = Chainspec::random(rng); + chainspec.protocol_config.version = version; + + let subdir = root_dir.join(dir_name_from_version(version)); + fs::create_dir(&subdir).unwrap(); + + let path = subdir.join(CHAINSPEC_FILENAME); + + let pretty = toml::to_string_pretty(&chainspec); + fs::write(path, pretty.expect("should encode to toml")).expect("should install chainspec"); + chainspec + } + + #[test] + fn should_get_next_upgrade() { + let tempdir = tempfile::tempdir().expect("should create temp dir"); + + let next_point = |current_version: ProtocolVersion| { + next_upgrade(tempdir.path().to_path_buf(), current_version).unwrap() + }; + + let mut rng = crate::new_rng(); + + let mut current = ProtocolVersion::from_parts(1, 9, 9); + let v2_0_0 = ProtocolVersion::from_parts(2, 0, 0); + let chainspec_v2_0_0 = install_chainspec(&mut rng, tempdir.path(), v2_0_0); + assert_eq!(next_point(current), chainspec_v2_0_0.protocol_config.into()); + + current = v2_0_0; + let v2_0_3 = ProtocolVersion::from_parts(2, 0, 3); + let chainspec_v2_0_3 = install_chainspec(&mut rng, tempdir.path(), v2_0_3); + assert_eq!(next_point(current), chainspec_v2_0_3.protocol_config.into()); + + let chainspec_v1_0_0 = install_chainspec(&mut rng, tempdir.path(), V1_0_0); + assert_eq!(next_point(V0_9_9), chainspec_v1_0_0.protocol_config.into()); + + let chainspec_v1_0_3 = install_chainspec(&mut rng, tempdir.path(), V1_0_3); + assert_eq!(next_point(V1_0_0), chainspec_v1_0_3.protocol_config.into()); + } + + #[test] + fn should_not_get_old_or_invalid_upgrade() { + let tempdir = tempfile::tempdir().expect("should create temp dir"); + + let maybe_next_point = |current_version: ProtocolVersion| { + next_upgrade(tempdir.path().to_path_buf(), current_version) + }; + + let mut rng = crate::new_rng(); + + // Check we return `None` if there are no version subdirs. + assert!(maybe_next_point(V1_0_0).is_none()); + + // Check we return `None` if current_version == next_version. + let chainspec_v1_0_0 = install_chainspec(&mut rng, tempdir.path(), V1_0_0); + assert!(maybe_next_point(V1_0_0).is_none()); + + // Check we return `None` if current_version > next_version. + assert!(maybe_next_point(V2_2_2).is_none()); + + // Check we return `None` if we find an upgrade file where the protocol_config.version field + // doesn't match the subdir name. + assert!(maybe_next_point(V0_9_9).is_some()); + + let mut chainspec_v0_9_9 = chainspec_v1_0_0; + chainspec_v0_9_9.protocol_config.version = V0_9_9; + let path_v1_0_0 = tempdir + .path() + .join(dir_name_from_version(V1_0_0)) + .join(CHAINSPEC_FILENAME); + fs::write( + &path_v1_0_0, + toml::to_string_pretty(&chainspec_v0_9_9).expect("should encode to toml"), + ) + .expect("should install upgrade point"); + assert!(maybe_next_point(V0_9_9).is_none()); + + // Check we return `None` if the next version upgrade_point file is corrupt. + fs::write(&path_v1_0_0, "bad data".as_bytes()).unwrap(); + assert!(maybe_next_point(V0_9_9).is_none()); + + // Check we return `None` if the next version upgrade_point file is missing. + fs::remove_file(&path_v1_0_0).unwrap(); + assert!(maybe_next_point(V0_9_9).is_none()); + } + + #[test] + fn should_register_unstaged_upgrade() { + let _ = logging::init(); + let tempdir = tempfile::tempdir().expect("should create temp dir"); + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + let mut upgrade_watcher = + UpgradeWatcher::new(&chainspec, Config::default(), tempdir.path()).unwrap(); + assert!(upgrade_watcher.next_upgrade.is_none()); + + let next_upgrade = NextUpgrade::new( + ActivationPoint::EraId(EraId::MAX), + ProtocolVersion::from_parts(9, 9, 9), + ); + let _ = upgrade_watcher.handle_got_next_upgrade(Some(next_upgrade)); + assert_eq!(Some(next_upgrade), upgrade_watcher.next_upgrade); + + let _ = upgrade_watcher.handle_got_next_upgrade(None); + assert!(upgrade_watcher.next_upgrade.is_none()); + } +} diff --git a/node/src/config_migration.rs b/node/src/config_migration.rs index db074b9f5a..86a6bf2ef2 100644 --- a/node/src/config_migration.rs +++ b/node/src/config_migration.rs @@ -1,6 +1,6 @@ use thiserror::Error; -use crate::{reactor::validator::Config, utils::WithDir}; +use crate::{reactor::main_reactor::Config, utils::WithDir}; // This will be changed in favour of an actual old config type when the migration is not a no-op. type OldConfig = Config; @@ -13,7 +13,7 @@ pub enum Error {} /// /// This should be executed after a new version is available, but before the casper-node has been /// run in validator mode using the new version. -pub fn migrate_config( +pub(crate) fn migrate_config( _old_config: WithDir, _new_config: WithDir, ) -> Result<(), Error> { diff --git a/node/src/crypto.rs b/node/src/crypto.rs deleted file mode 100644 index 7dd637f145..0000000000 --- a/node/src/crypto.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Cryptographic types and functions. - -mod asymmetric_key; -mod asymmetric_key_ext; -mod error; -pub mod hash; - -pub use asymmetric_key::{generate_ed25519_keypair, sign, verify}; -pub use asymmetric_key_ext::AsymmetricKeyExt; -pub use error::{Error, Result}; diff --git a/node/src/crypto/asymmetric_key.rs b/node/src/crypto/asymmetric_key.rs deleted file mode 100644 index b4b7b78af6..0000000000 --- a/node/src/crypto/asymmetric_key.rs +++ /dev/null @@ -1,774 +0,0 @@ -//! Asymmetric-key types and functions. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use ed25519_dalek::ExpandedSecretKey; -use k256::ecdsa::{ - signature::{Signer, Verifier}, - Signature as Secp256k1Signature, VerifyingKey as Secp256k1PublicKey, -}; -#[cfg(test)] -use k256::elliptic_curve::sec1::ToEncodedPoint; - -use casper_types::{PublicKey, SecretKey, Signature}; - -pub use super::{Error, Result}; -use crate::crypto::AsymmetricKeyExt; - -/// Generates an Ed25519 keypair using the operating system's cryptographically secure random number -/// generator. -pub fn generate_ed25519_keypair() -> (SecretKey, PublicKey) { - let secret_key = SecretKey::generate_ed25519().unwrap(); - let public_key = PublicKey::from(&secret_key); - (secret_key, public_key) -} - -/// Signs the given message using the given key pair. -pub fn sign>( - message: T, - secret_key: &SecretKey, - public_key: &PublicKey, -) -> Signature { - match (secret_key, public_key) { - (SecretKey::System, PublicKey::System) => { - panic!("cannot create signature with system keys",) - } - (SecretKey::Ed25519(secret_key), PublicKey::Ed25519(public_key)) => { - let expanded_secret_key = ExpandedSecretKey::from(secret_key); - let signature = expanded_secret_key.sign(message.as_ref(), public_key); - Signature::Ed25519(signature) - } - (SecretKey::Secp256k1(secret_key), PublicKey::Secp256k1(_public_key)) => { - let signer = secret_key; - let signature: Secp256k1Signature = signer - .try_sign(message.as_ref()) - .expect("should create signature"); - Signature::Secp256k1(signature) - } - _ => panic!("secret and public key types must match"), - } -} - -/// Verifies the signature of the given message against the given public key. -pub fn verify>( - message: T, - signature: &Signature, - public_key: &PublicKey, -) -> Result<()> { - match (signature, public_key) { - (Signature::System, _) => Err(Error::AsymmetricKey(String::from( - "signatures based on the system key cannot be verified", - ))), - (Signature::Ed25519(signature), PublicKey::Ed25519(public_key)) => public_key - .verify_strict(message.as_ref(), signature) - .map_err(|_| Error::AsymmetricKey(String::from("failed to verify Ed25519 signature"))), - (Signature::Secp256k1(signature), PublicKey::Secp256k1(public_key)) => { - let verifier: &Secp256k1PublicKey = public_key; - verifier - .verify(message.as_ref(), signature) - .map_err(|error| { - Error::AsymmetricKey(format!("failed to verify secp256k1 signature: {}", error)) - }) - } - _ => Err(Error::AsymmetricKey(format!( - "type mismatch between {} and {}", - signature, public_key - ))), - } -} - -#[cfg(test)] -mod tests { - use std::{ - cmp::Ordering, - collections::hash_map::DefaultHasher, - hash::{Hash, Hasher}, - iter, - }; - - use rand::RngCore; - - use openssl::pkey::{PKey, Private, Public}; - - use casper_types::{bytesrepr, AsymmetricType, Tagged}; - - use super::*; - use crate::{crypto::AsymmetricKeyExt, testing::TestRng}; - - type OpenSSLSecretKey = PKey; - type OpenSSLPublicKey = PKey; - - // `SecretKey` does not implement `PartialEq`, so just compare derived `PublicKey`s. - fn assert_secret_keys_equal(lhs: &SecretKey, rhs: &SecretKey) { - assert_eq!(PublicKey::from(lhs), PublicKey::from(rhs)); - } - - fn secret_key_der_roundtrip(secret_key: SecretKey) { - let der_encoded = secret_key.to_der().unwrap(); - let decoded = SecretKey::from_der(&der_encoded).unwrap(); - assert_secret_keys_equal(&secret_key, &decoded); - assert_eq!(secret_key.tag(), decoded.tag()); - - // Ensure malformed encoded version fails to decode. - SecretKey::from_der(&der_encoded[1..]).unwrap_err(); - } - - fn secret_key_pem_roundtrip(secret_key: SecretKey) { - let pem_encoded = secret_key.to_pem().unwrap(); - let decoded = SecretKey::from_pem(pem_encoded.as_bytes()).unwrap(); - assert_secret_keys_equal(&secret_key, &decoded); - assert_eq!(secret_key.tag(), decoded.tag()); - - // Check PEM-encoded can be decoded by openssl. - let _ = OpenSSLSecretKey::private_key_from_pem(pem_encoded.as_bytes()).unwrap(); - - // Ensure malformed encoded version fails to decode. - SecretKey::from_pem(&pem_encoded[1..]).unwrap_err(); - } - - fn known_secret_key_to_pem(expected_key: &SecretKey, known_key_pem: &str, expected_tag: u8) { - let decoded = SecretKey::from_pem(known_key_pem.as_bytes()).unwrap(); - assert_secret_keys_equal(expected_key, &decoded); - assert_eq!(expected_tag, decoded.tag()); - } - - fn secret_key_file_roundtrip(secret_key: SecretKey) { - let tempdir = tempfile::tempdir().unwrap(); - let path = tempdir.path().join("test_secret_key.pem"); - - secret_key.to_file(&path).unwrap(); - let decoded = SecretKey::from_file(&path).unwrap(); - assert_secret_keys_equal(&secret_key, &decoded); - assert_eq!(secret_key.tag(), decoded.tag()); - } - - fn public_key_serialization_roundtrip(public_key: PublicKey) { - // Try to/from bincode. - let serialized = bincode::serialize(&public_key).unwrap(); - let deserialized = bincode::deserialize(&serialized).unwrap(); - assert_eq!(public_key, deserialized); - assert_eq!(public_key.tag(), deserialized.tag()); - - // Try to/from JSON. - let serialized = serde_json::to_vec_pretty(&public_key).unwrap(); - let deserialized = serde_json::from_slice(&serialized).unwrap(); - assert_eq!(public_key, deserialized); - assert_eq!(public_key.tag(), deserialized.tag()); - - // Using bytesrepr. - bytesrepr::test_serialization_roundtrip(&public_key); - } - - fn public_key_der_roundtrip(public_key: PublicKey) { - let der_encoded = public_key.to_der().unwrap(); - let decoded = PublicKey::from_der(&der_encoded).unwrap(); - assert_eq!(public_key, decoded); - - // Check DER-encoded can be decoded by openssl. - let _ = OpenSSLPublicKey::public_key_from_der(&der_encoded).unwrap(); - - // Ensure malformed encoded version fails to decode. - PublicKey::from_der(&der_encoded[1..]).unwrap_err(); - } - - fn public_key_pem_roundtrip(public_key: PublicKey) { - let pem_encoded = public_key.to_pem().unwrap(); - let decoded = PublicKey::from_pem(pem_encoded.as_bytes()).unwrap(); - assert_eq!(public_key, decoded); - assert_eq!(public_key.tag(), decoded.tag()); - - // Check PEM-encoded can be decoded by openssl. - let _ = OpenSSLPublicKey::public_key_from_pem(pem_encoded.as_bytes()).unwrap(); - - // Ensure malformed encoded version fails to decode. - PublicKey::from_pem(&pem_encoded[1..]).unwrap_err(); - } - - fn known_public_key_to_pem(known_key_hex: &str, known_key_pem: &str) { - let key_bytes = hex::decode(known_key_hex).unwrap(); - let decoded = PublicKey::from_pem(known_key_pem.as_bytes()).unwrap(); - assert_eq!(key_bytes, Into::>::into(decoded)); - } - - fn public_key_file_roundtrip(public_key: PublicKey) { - let tempdir = tempfile::tempdir().unwrap(); - let path = tempdir.path().join("test_public_key.pem"); - - public_key.to_file(&path).unwrap(); - let decoded = PublicKey::from_file(&path).unwrap(); - assert_eq!(public_key, decoded); - } - - fn public_key_hex_roundtrip(public_key: PublicKey) { - let hex_encoded = public_key.to_hex(); - let decoded = PublicKey::from_hex(&hex_encoded).unwrap(); - assert_eq!(public_key, decoded); - assert_eq!(public_key.tag(), decoded.tag()); - - // Ensure malformed encoded version fails to decode. - PublicKey::from_hex(&hex_encoded[..1]).unwrap_err(); - PublicKey::from_hex(&hex_encoded[1..]).unwrap_err(); - } - - fn signature_serialization_roundtrip(signature: Signature) { - // Try to/from bincode. - let serialized = bincode::serialize(&signature).unwrap(); - let deserialized: Signature = bincode::deserialize(&serialized).unwrap(); - assert_eq!(signature, deserialized); - assert_eq!(signature.tag(), deserialized.tag()); - - // Try to/from JSON. - let serialized = serde_json::to_vec_pretty(&signature).unwrap(); - let deserialized = serde_json::from_slice(&serialized).unwrap(); - assert_eq!(signature, deserialized); - assert_eq!(signature.tag(), deserialized.tag()); - - // Try to/from using bytesrepr. - let serialized = bytesrepr::serialize(signature).unwrap(); - let deserialized = bytesrepr::deserialize(serialized).unwrap(); - assert_eq!(signature, deserialized); - assert_eq!(signature.tag(), deserialized.tag()) - } - - fn signature_hex_roundtrip(signature: Signature) { - let hex_encoded = signature.to_hex(); - let decoded = Signature::from_hex(hex_encoded.as_bytes()).unwrap(); - assert_eq!(signature, decoded); - assert_eq!(signature.tag(), decoded.tag()); - - // Ensure malformed encoded version fails to decode. - Signature::from_hex(&hex_encoded[..1]).unwrap_err(); - Signature::from_hex(&hex_encoded[1..]).unwrap_err(); - } - - fn hash(data: &T) -> u64 { - let mut hasher = DefaultHasher::new(); - data.hash(&mut hasher); - hasher.finish() - } - - fn check_ord_and_hash(low: T, high: T) { - let low_copy = low.clone(); - - assert_eq!(hash(&low), hash(&low_copy)); - assert_ne!(hash(&low), hash(&high)); - - assert_eq!(Ordering::Less, low.cmp(&high)); - assert_eq!(Some(Ordering::Less), low.partial_cmp(&high)); - - assert_eq!(Ordering::Greater, high.cmp(&low)); - assert_eq!(Some(Ordering::Greater), high.partial_cmp(&low)); - - assert_eq!(Ordering::Equal, low.cmp(&low_copy)); - assert_eq!(Some(Ordering::Equal), low.partial_cmp(&low_copy)); - } - - mod ed25519 { - use rand::Rng; - - use casper_types::ED25519_TAG; - - use super::*; - use crate::crypto::AsymmetricKeyExt; - - const SECRET_KEY_LENGTH: usize = SecretKey::ED25519_LENGTH; - const PUBLIC_KEY_LENGTH: usize = PublicKey::ED25519_LENGTH; - const SIGNATURE_LENGTH: usize = Signature::ED25519_LENGTH; - - #[test] - fn secret_key_from_bytes() { - // Secret key should be `SecretKey::ED25519_LENGTH` bytes. - let bytes = [0; SECRET_KEY_LENGTH + 1]; - assert!(SecretKey::ed25519_from_bytes(&bytes[..]).is_err()); - assert!(SecretKey::ed25519_from_bytes(&bytes[2..]).is_err()); - - // Check the same bytes but of the right length succeeds. - assert!(SecretKey::ed25519_from_bytes(&bytes[1..]).is_ok()); - } - - #[test] - fn secret_key_to_and_from_der() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_ed25519(&mut rng); - let der_encoded = secret_key.to_der().unwrap(); - secret_key_der_roundtrip(secret_key); - - // Check DER-encoded can be decoded by openssl. - let _ = OpenSSLSecretKey::private_key_from_der(&der_encoded).unwrap(); - } - - #[test] - fn secret_key_to_and_from_pem() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_ed25519(&mut rng); - secret_key_pem_roundtrip(secret_key); - } - - #[test] - fn known_secret_key_to_pem() { - // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.3 - const KNOWN_KEY_PEM: &str = r#"-----BEGIN PRIVATE KEY----- -MC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC ------END PRIVATE KEY-----"#; - let key_bytes = - hex::decode("d4ee72dbf913584ad5b6d8f1f769f8ad3afe7c28cbf1d4fbe097a88f44755842") - .unwrap(); - let expected_key = SecretKey::ed25519_from_bytes(key_bytes).unwrap(); - super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, ED25519_TAG); - } - - #[test] - fn secret_key_to_and_from_file() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_ed25519(&mut rng); - secret_key_file_roundtrip(secret_key); - } - - #[test] - fn public_key_serialization_roundtrip() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_ed25519(&mut rng); - super::public_key_serialization_roundtrip(public_key); - } - - #[test] - fn public_key_from_bytes() { - // Public key should be `PublicKey::ED25519_LENGTH` bytes. Create vec with an extra - // byte. - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_ed25519(&mut rng); - let bytes: Vec = iter::once(rng.gen()) - .chain(Into::>::into(public_key)) - .collect::>(); - - assert!(PublicKey::ed25519_from_bytes(&bytes[..]).is_err()); - assert!(PublicKey::ed25519_from_bytes(&bytes[2..]).is_err()); - - // Check the same bytes but of the right length succeeds. - assert!(PublicKey::ed25519_from_bytes(&bytes[1..]).is_ok()); - } - - #[test] - fn public_key_to_and_from_der() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_ed25519(&mut rng); - public_key_der_roundtrip(public_key); - } - - #[test] - fn public_key_to_and_from_pem() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_ed25519(&mut rng); - public_key_pem_roundtrip(public_key); - } - - #[test] - fn known_public_key_to_pem() { - // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.1 - const KNOWN_KEY_HEX: &str = - "19bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1"; - const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- -MCowBQYDK2VwAyEAGb9ECWmEzf6FQbrBZ9w7lshQhqowtrbLDFw4rXAxZuE= ------END PUBLIC KEY-----"#; - super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); - } - - #[test] - fn public_key_to_and_from_file() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_ed25519(&mut rng); - public_key_file_roundtrip(public_key); - } - - #[test] - fn public_key_to_and_from_hex() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_ed25519(&mut rng); - public_key_hex_roundtrip(public_key); - } - - #[test] - fn signature_serialization_roundtrip() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_ed25519(&mut rng); - let public_key = PublicKey::from(&secret_key); - let data = b"data"; - let signature = sign(data, &secret_key, &public_key); - super::signature_serialization_roundtrip(signature); - } - - #[test] - fn signature_from_bytes() { - // Signature should be < ~2^(252.5). - let invalid_bytes = [255; SIGNATURE_LENGTH]; - assert!(Signature::ed25519_from_bytes(&invalid_bytes[..]).is_err()); - - // Signature should be `Signature::ED25519_LENGTH` bytes. - let bytes = [2; SIGNATURE_LENGTH + 1]; - assert!(Signature::ed25519_from_bytes(&bytes[..]).is_err()); - assert!(Signature::ed25519_from_bytes(&bytes[2..]).is_err()); - - // Check the same bytes but of the right length succeeds. - assert!(Signature::ed25519_from_bytes(&bytes[1..]).is_ok()); - } - - #[test] - fn signature_key_to_and_from_hex() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_ed25519(&mut rng); - let public_key = PublicKey::from(&secret_key); - let data = b"data"; - let signature = sign(data, &secret_key, &public_key); - signature_hex_roundtrip(signature); - } - - #[test] - fn public_key_traits() { - let public_key_low = PublicKey::ed25519_from_bytes([1; PUBLIC_KEY_LENGTH]).unwrap(); - let public_key_high = PublicKey::ed25519_from_bytes([3; PUBLIC_KEY_LENGTH]).unwrap(); - check_ord_and_hash(public_key_low, public_key_high) - } - - #[test] - fn public_key_to_account_hash() { - let public_key_high = PublicKey::ed25519_from_bytes([255; PUBLIC_KEY_LENGTH]).unwrap(); - assert_ne!( - public_key_high.to_account_hash().as_ref(), - Into::>::into(public_key_high) - ); - } - - #[test] - fn signature_traits() { - let signature_low = Signature::ed25519([1; SIGNATURE_LENGTH]).unwrap(); - let signature_high = Signature::ed25519([3; SIGNATURE_LENGTH]).unwrap(); - check_ord_and_hash(signature_low, signature_high) - } - - #[test] - fn sign_and_verify() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_ed25519(&mut rng); - - let public_key = PublicKey::from(&secret_key); - let other_public_key = PublicKey::random_ed25519(&mut rng); - let wrong_type_public_key = PublicKey::random_secp256k1(&mut rng); - - let message = b"message"; - let signature = sign(message, &secret_key, &public_key); - - assert!(verify(message, &signature, &public_key).is_ok()); - assert!(verify(message, &signature, &other_public_key).is_err()); - assert!(verify(message, &signature, &wrong_type_public_key).is_err()); - assert!(verify(&message[1..], &signature, &public_key).is_err()); - } - - #[test] - fn bytesrepr_roundtrip_signature() { - let mut rng = TestRng::new(); - let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); - let public_key = PublicKey::from(&ed25519_secret_key); - let data = b"data"; - let signature = sign(data, &ed25519_secret_key, &public_key); - bytesrepr::test_serialization_roundtrip(&signature); - } - } - - mod secp256k1 { - use rand::Rng; - - use casper_types::SECP256K1_TAG; - - use super::*; - use crate::crypto::AsymmetricKeyExt; - - const SECRET_KEY_LENGTH: usize = SecretKey::SECP256K1_LENGTH; - const SIGNATURE_LENGTH: usize = Signature::SECP256K1_LENGTH; - - #[test] - fn secret_key_from_bytes() { - // Secret key should be `SecretKey::SECP256K1_LENGTH` bytes. - // The k256 library will ensure that a byte stream of a length not equal to - // `SECP256K1_LENGTH` will fail due to an assertion internal to the library. - // We can check that invalid byte streams e.g [0;32] does not generate a valid key. - let bytes = [0; SECRET_KEY_LENGTH]; - assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_err()); - - // Check that a valid byte stream produces a valid key - let bytes = [1; SECRET_KEY_LENGTH]; - assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_ok()); - } - - #[test] - fn secret_key_to_and_from_der() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_secp256k1(&mut rng); - secret_key_der_roundtrip(secret_key); - } - - #[test] - fn secret_key_to_and_from_pem() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_secp256k1(&mut rng); - secret_key_pem_roundtrip(secret_key); - } - - #[test] - fn known_secret_key_to_pem() { - // Example values taken from Python client. - const KNOWN_KEY_PEM: &str = r#"-----BEGIN EC PRIVATE KEY----- -MHQCAQEEIL3fqaMKAfXSK1D2PnVVbZlZ7jTv133nukq4+95s6kmcoAcGBSuBBAAK -oUQDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtdkv+kBR5u4ISEAkuc2TFWQHX0 -Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== ------END EC PRIVATE KEY-----"#; - let key_bytes = - hex::decode("bddfa9a30a01f5d22b50f63e75556d9959ee34efd77de7ba4ab8fbde6cea499c") - .unwrap(); - let expected_key = SecretKey::secp256k1_from_bytes(key_bytes).unwrap(); - super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, SECP256K1_TAG); - } - - #[test] - fn secret_key_to_and_from_file() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_secp256k1(&mut rng); - secret_key_file_roundtrip(secret_key); - } - - #[test] - fn public_key_serialization_roundtrip() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - super::public_key_serialization_roundtrip(public_key); - } - - #[test] - fn public_key_from_bytes() { - // Public key should be `PublicKey::SECP256K1_LENGTH` bytes. Create vec with an extra - // byte. - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - let bytes: Vec = iter::once(rng.gen()) - .chain(Into::>::into(public_key)) - .collect::>(); - - assert!(PublicKey::secp256k1_from_bytes(&bytes[..]).is_err()); - assert!(PublicKey::secp256k1_from_bytes(&bytes[2..]).is_err()); - - // Check the same bytes but of the right length succeeds. - assert!(PublicKey::secp256k1_from_bytes(&bytes[1..]).is_ok()); - } - - #[test] - fn public_key_to_and_from_der() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - public_key_der_roundtrip(public_key); - } - - #[test] - fn public_key_to_and_from_pem() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - public_key_pem_roundtrip(public_key); - } - - #[test] - fn known_public_key_to_pem() { - // Example values taken from Python client. - const KNOWN_KEY_HEX: &str = - "03408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084"; - const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- -MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtd -kv+kBR5u4ISEAkuc2TFWQHX0Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== ------END PUBLIC KEY-----"#; - super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); - } - - #[test] - fn public_key_to_and_from_file() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - public_key_file_roundtrip(public_key); - } - - #[test] - fn public_key_to_and_from_hex() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - public_key_hex_roundtrip(public_key); - } - - #[test] - fn signature_serialization_roundtrip() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_secp256k1(&mut rng); - let public_key = PublicKey::from(&secret_key); - let data = b"data"; - let signature = sign(data, &secret_key, &public_key); - super::signature_serialization_roundtrip(signature); - } - - #[test] - fn bytesrepr_roundtrip_signature() { - let mut rng = TestRng::new(); - let secret_key = SecretKey::random_secp256k1(&mut rng); - let public_key = PublicKey::from(&secret_key); - let data = b"data"; - let signature = sign(data, &secret_key, &public_key); - bytesrepr::test_serialization_roundtrip(&signature); - } - - #[test] - fn signature_from_bytes() { - // Signature should be `Signature::SECP256K1_LENGTH` bytes. - let bytes = [2; SIGNATURE_LENGTH + 1]; - assert!(Signature::secp256k1_from_bytes(&bytes[..]).is_err()); - assert!(Signature::secp256k1_from_bytes(&bytes[2..]).is_err()); - - // Check the same bytes but of the right length succeeds. - assert!(Signature::secp256k1_from_bytes(&bytes[1..]).is_ok()); - } - - #[test] - fn signature_key_to_and_from_hex() { - let mut rng = crate::new_rng(); - let secret_key = SecretKey::random_secp256k1(&mut rng); - let public_key = PublicKey::from(&secret_key); - let data = b"data"; - let signature = sign(data, &secret_key, &public_key); - signature_hex_roundtrip(signature); - } - - #[test] - fn public_key_traits() { - let mut rng = crate::new_rng(); - let public_key1 = PublicKey::random_secp256k1(&mut rng); - let public_key2 = PublicKey::random_secp256k1(&mut rng); - if Into::>::into(public_key1.clone()) - < Into::>::into(public_key2.clone()) - { - check_ord_and_hash(public_key1, public_key2) - } else { - check_ord_and_hash(public_key2, public_key1) - } - } - - #[test] - fn public_key_to_account_hash() { - let mut rng = crate::new_rng(); - let public_key = PublicKey::random_secp256k1(&mut rng); - assert_ne!( - public_key.to_account_hash().as_ref(), - Into::>::into(public_key) - ); - } - - #[test] - fn signature_traits() { - let signature_low = Signature::secp256k1([1; SIGNATURE_LENGTH]).unwrap(); - let signature_high = Signature::secp256k1([3; SIGNATURE_LENGTH]).unwrap(); - check_ord_and_hash(signature_low, signature_high) - } - } - - #[test] - fn public_key_traits() { - let mut rng = crate::new_rng(); - let ed25519_public_key = PublicKey::random_ed25519(&mut rng); - let secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); - check_ord_and_hash(ed25519_public_key, secp256k1_public_key); - } - - #[test] - fn signature_traits() { - let signature_low = Signature::ed25519([3; Signature::ED25519_LENGTH]).unwrap(); - let signature_high = Signature::secp256k1([1; Signature::SECP256K1_LENGTH]).unwrap(); - check_ord_and_hash(signature_low, signature_high) - } - - #[test] - fn sign_and_verify() { - let mut rng = crate::new_rng(); - let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); - let secp256k1_secret_key = SecretKey::random_secp256k1(&mut rng); - - let ed25519_public_key = PublicKey::from(&ed25519_secret_key); - let secp256k1_public_key = PublicKey::from(&secp256k1_secret_key); - - let other_ed25519_public_key = PublicKey::random_ed25519(&mut rng); - let other_secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); - - let message = b"message"; - let ed25519_signature = sign(message, &ed25519_secret_key, &ed25519_public_key); - let secp256k1_signature = sign(message, &secp256k1_secret_key, &secp256k1_public_key); - - assert!(verify(message, &ed25519_signature, &ed25519_public_key).is_ok()); - assert!(verify(message, &secp256k1_signature, &secp256k1_public_key).is_ok()); - - assert!(verify(message, &ed25519_signature, &other_ed25519_public_key).is_err()); - assert!(verify(message, &secp256k1_signature, &other_secp256k1_public_key).is_err()); - - assert!(verify(message, &ed25519_signature, &secp256k1_public_key).is_err()); - assert!(verify(message, &secp256k1_signature, &ed25519_public_key).is_err()); - - assert!(verify(&message[1..], &ed25519_signature, &ed25519_public_key).is_err()); - assert!(verify(&message[1..], &secp256k1_signature, &secp256k1_public_key).is_err()); - } - - #[test] - fn should_construct_secp256k1_from_uncompressed_bytes() { - let mut rng = crate::new_rng(); - - let mut secret_key_bytes = [0u8; SecretKey::SECP256K1_LENGTH]; - rng.fill_bytes(&mut secret_key_bytes[..]); - - // Construct a secp256k1 secret key and use that to construct a public key. - let secp256k1_secret_key = k256::SecretKey::from_bytes(secret_key_bytes).unwrap(); - let secp256k1_public_key = secp256k1_secret_key.public_key(); - - // Construct a CL secret key and public key from that (which will be a compressed key). - let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); - let public_key = PublicKey::from(&secret_key); - assert_eq!( - Into::>::into(public_key.clone()).len(), - PublicKey::SECP256K1_LENGTH - ); - assert_ne!( - secp256k1_public_key - .to_encoded_point(false) - .as_bytes() - .len(), - PublicKey::SECP256K1_LENGTH - ); - - // Construct a CL public key from uncompressed public key bytes and ensure it's compressed. - let from_uncompressed_bytes = PublicKey::secp256k1_from_bytes( - secp256k1_public_key.to_encoded_point(false).as_bytes(), - ) - .unwrap(); - assert_eq!(public_key, from_uncompressed_bytes); - - // Construct a CL public key from the uncompressed one's hex representation and ensure it's - // compressed. - let uncompressed_hex = format!( - "02{}", - hex::encode(secp256k1_public_key.to_encoded_point(false).as_bytes()) - ); - let from_uncompressed_hex = PublicKey::from_hex(&uncompressed_hex).unwrap(); - assert_eq!(public_key, from_uncompressed_hex); - } - - #[test] - fn generate_ed25519_should_generate_an_ed25519_key() { - let secret_key = SecretKey::generate_ed25519().unwrap(); - assert!(matches!(secret_key, SecretKey::Ed25519(_))) - } - - #[test] - fn generate_secp256k1_should_generate_an_secp256k1_key() { - let secret_key = SecretKey::generate_secp256k1().unwrap(); - assert!(matches!(secret_key, SecretKey::Secp256k1(_))) - } -} diff --git a/node/src/crypto/asymmetric_key_ext.rs b/node/src/crypto/asymmetric_key_ext.rs deleted file mode 100644 index 5edb731e35..0000000000 --- a/node/src/crypto/asymmetric_key_ext.rs +++ /dev/null @@ -1,468 +0,0 @@ -//! Additional operations an asymmetric key - -use std::path::Path; - -use derp::{Der, Tag}; -use once_cell::sync::Lazy; -use pem::Pem; -#[cfg(test)] -use rand::{Rng, RngCore}; -use untrusted::Input; - -use casper_types::{AsymmetricType, PublicKey, SecretKey, ED25519_TAG, SECP256K1_TAG, SYSTEM_TAG}; - -#[cfg(test)] -use crate::testing::TestRng; -use crate::{crypto::Error, utils}; - -// See https://tools.ietf.org/html/rfc8410#section-10.3 -const ED25519_OBJECT_IDENTIFIER: [u8; 3] = [43, 101, 112]; -const ED25519_PEM_SECRET_KEY_TAG: &str = "PRIVATE KEY"; -const ED25519_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; - -// Ref? -const SECP256K1_OBJECT_IDENTIFIER: [u8; 5] = [43, 129, 4, 0, 10]; -const SECP256K1_PEM_SECRET_KEY_TAG: &str = "EC PRIVATE KEY"; -const SECP256K1_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; - -// See https://www.secg.org/sec1-v2.pdf#subsection.C.4 -const EC_PUBLIC_KEY_OBJECT_IDENTIFIER: [u8; 7] = [42, 134, 72, 206, 61, 2, 1]; - -static ED25519_SECRET_KEY: Lazy = Lazy::new(|| { - let bytes = [15u8; SecretKey::ED25519_LENGTH]; - SecretKey::ed25519_from_bytes(bytes).unwrap() -}); - -static ED25519_PUBLIC_KEY: Lazy = Lazy::new(|| { - let bytes = [15u8; SecretKey::ED25519_LENGTH]; - let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); - PublicKey::from(secret_key) -}); - -/// Additional operations an asymmetric key -pub trait AsymmetricKeyExt: Sized { - /// Constructs a new ed25519 variant using the operating system's cryptographically secure - /// random number generator. - fn generate_ed25519() -> Result; - - /// Constructs a new secp256k1 variant using the operating system's cryptographically secure - /// random number generator. - fn generate_secp256k1() -> Result; - - /// Attempts to write the key bytes to the configured file path. - fn to_file>(&self, file: P) -> Result<(), Error>; - - /// Attempts to read the key bytes from configured file path. - fn from_file>(file: P) -> Result; - - /// DER encodes a key. - fn to_der(&self) -> Result, Error>; - - /// Decodes a key from a DER-encoded slice. - fn from_der>(input: T) -> Result; - - /// PEM encodes a key. - fn to_pem(&self) -> Result; - - /// Decodes a secret key from a PEM-encoded slice. - fn from_pem>(input: T) -> Result; - - /// Duplicates a secret key. - /// - /// Only available for testing and named other than `clone` to prevent accidental use. - #[cfg(test)] - fn duplicate(&self) -> Self; - - /// Generates a random instance using a `TestRng`. - #[cfg(test)] - fn random(rng: &mut TestRng) -> Self; - - /// Generates a random ed25519 instance using a `TestRng`. - #[cfg(test)] - fn random_ed25519(rng: &mut TestRng) -> Self; - - /// Generates a random secp256k1 instance using a `TestRng`. - #[cfg(test)] - fn random_secp256k1(rng: &mut TestRng) -> Self; - - /// Returns an example value for documentation purposes. - fn doc_example() -> &'static Self; -} - -impl AsymmetricKeyExt for SecretKey { - fn generate_ed25519() -> Result { - let mut bytes = [0u8; Self::ED25519_LENGTH]; - getrandom::getrandom(&mut bytes[..])?; - Ok(SecretKey::ed25519_from_bytes(bytes)?) - } - - fn generate_secp256k1() -> Result { - let mut bytes = [0u8; Self::SECP256K1_LENGTH]; - getrandom::getrandom(&mut bytes[..])?; - Ok(SecretKey::secp256k1_from_bytes(bytes)?) - } - - fn to_file>(&self, file: P) -> Result<(), Error> { - utils::write_private_file(file, self.to_pem()?).map_err(Error::SecretKeySave) - } - - fn from_file>(file: P) -> Result { - let data = utils::read_file(file).map_err(Error::SecretKeyLoad)?; - Self::from_pem(data) - } - - fn to_der(&self) -> Result, Error> { - match self { - SecretKey::System => Err(Error::System(String::from("to_der"))), - SecretKey::Ed25519(secret_key) => { - // See https://tools.ietf.org/html/rfc8410#section-10.3 - let mut key_bytes = vec![]; - let mut der = Der::new(&mut key_bytes); - der.octet_string(secret_key.as_ref())?; - - let mut encoded = vec![]; - der = Der::new(&mut encoded); - der.sequence(|der| { - der.integer(&[0])?; - der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; - der.octet_string(&key_bytes) - })?; - Ok(encoded) - } - SecretKey::Secp256k1(secret_key) => { - // See https://www.secg.org/sec1-v2.pdf#subsection.C.4 - let mut oid_bytes = vec![]; - let mut der = Der::new(&mut oid_bytes); - der.oid(&SECP256K1_OBJECT_IDENTIFIER)?; - - let mut encoded = vec![]; - der = Der::new(&mut encoded); - der.sequence(|der| { - der.integer(&[1])?; - der.octet_string(secret_key.to_bytes().as_slice())?; - der.element(Tag::ContextSpecificConstructed0, &oid_bytes) - })?; - Ok(encoded) - } - } - } - - fn from_der>(input: T) -> Result { - let input = Input::from(input.as_ref()); - - let (key_type_tag, raw_bytes) = input.read_all(derp::Error::Read, |input| { - derp::nested(input, Tag::Sequence, |input| { - // Safe to ignore the first value which should be an integer. - let version_slice = - derp::expect_tag_and_get_value(input, Tag::Integer)?.as_slice_less_safe(); - if version_slice.len() != 1 { - return Err(derp::Error::NonZeroUnusedBits); - } - let version = version_slice[0]; - - // Read the next value. - let (tag, value) = derp::read_tag_and_get_value(input)?; - if tag == Tag::Sequence as u8 { - // Expecting an Ed25519 key. - if version != 0 { - return Err(derp::Error::WrongValue); - } - - // The sequence should have one element: an object identifier defining Ed25519. - let object_identifier = value.read_all(derp::Error::Read, |input| { - derp::expect_tag_and_get_value(input, Tag::Oid) - })?; - if object_identifier.as_slice_less_safe() != ED25519_OBJECT_IDENTIFIER { - return Err(derp::Error::WrongValue); - } - - // The third and final value should be the raw bytes of the secret key as an - // octet string in an octet string. - let raw_bytes = derp::nested(input, Tag::OctetString, |input| { - derp::expect_tag_and_get_value(input, Tag::OctetString) - })? - .as_slice_less_safe(); - - return Ok((ED25519_TAG, raw_bytes)); - } else if tag == Tag::OctetString as u8 { - // Expecting a secp256k1 key. - if version != 1 { - return Err(derp::Error::WrongValue); - } - - // The octet string is the secret key. - let raw_bytes = value.as_slice_less_safe(); - - // The object identifier is next. - let parameter0 = - derp::expect_tag_and_get_value(input, Tag::ContextSpecificConstructed0)?; - let object_identifier = parameter0.read_all(derp::Error::Read, |input| { - derp::expect_tag_and_get_value(input, Tag::Oid) - })?; - if object_identifier.as_slice_less_safe() != SECP256K1_OBJECT_IDENTIFIER { - return Err(derp::Error::WrongValue); - } - - // There might be an optional public key as the final value, but we're not - // interested in parsing that. Read it to ensure `input.read_all` doesn't fail - // with unused bytes error. - let _ = derp::read_tag_and_get_value(input); - - return Ok((SECP256K1_TAG, raw_bytes)); - } - - Err(derp::Error::WrongValue) - }) - })?; - - match key_type_tag { - SYSTEM_TAG => Err(Error::AsymmetricKey("cannot construct variant".to_string())), - ED25519_TAG => SecretKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), - SECP256K1_TAG => SecretKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), - _ => Err(Error::AsymmetricKey("unknown type tag".to_string())), - } - } - - fn to_pem(&self) -> Result { - let tag = match self { - SecretKey::System => return Err(Error::System(String::from("to_pem"))), - SecretKey::Ed25519(_) => ED25519_PEM_SECRET_KEY_TAG.to_string(), - SecretKey::Secp256k1(_) => SECP256K1_PEM_SECRET_KEY_TAG.to_string(), - }; - let contents = self.to_der()?; - let pem = Pem { tag, contents }; - Ok(pem::encode(&pem)) - } - - fn from_pem>(input: T) -> Result { - let pem = pem::parse(input)?; - - let secret_key = Self::from_der(&pem.contents)?; - - let bad_tag = |expected_tag: &str| { - Error::FromPem(format!( - "invalid tag: expected {}, got {}", - expected_tag, pem.tag - )) - }; - - match secret_key { - SecretKey::System => return Err(Error::System(String::from("from_pem"))), - SecretKey::Ed25519(_) => { - if pem.tag != ED25519_PEM_SECRET_KEY_TAG { - return Err(bad_tag(ED25519_PEM_SECRET_KEY_TAG)); - } - } - SecretKey::Secp256k1(_) => { - if pem.tag != SECP256K1_PEM_SECRET_KEY_TAG { - return Err(bad_tag(SECP256K1_PEM_SECRET_KEY_TAG)); - } - } - } - - Ok(secret_key) - } - - #[cfg(test)] - fn duplicate(&self) -> Self { - match self { - SecretKey::System => SecretKey::System, - SecretKey::Ed25519(secret_key) => { - Self::ed25519_from_bytes(secret_key.as_ref()).expect("could not copy secret key") - } - SecretKey::Secp256k1(secret_key) => { - Self::secp256k1_from_bytes(secret_key.to_bytes().as_slice()) - .expect("could not copy secret key") - } - } - } - - #[cfg(test)] - fn random(rng: &mut TestRng) -> Self { - if rng.gen() { - Self::random_ed25519(rng) - } else { - Self::random_secp256k1(rng) - } - } - - #[cfg(test)] - fn random_ed25519(rng: &mut TestRng) -> Self { - let mut bytes = [0u8; Self::ED25519_LENGTH]; - rng.fill_bytes(&mut bytes[..]); - SecretKey::ed25519_from_bytes(bytes).unwrap() - } - - #[cfg(test)] - fn random_secp256k1(rng: &mut TestRng) -> Self { - let mut bytes = [0u8; Self::SECP256K1_LENGTH]; - rng.fill_bytes(&mut bytes[..]); - SecretKey::secp256k1_from_bytes(bytes).unwrap() - } - - fn doc_example() -> &'static Self { - &*ED25519_SECRET_KEY - } -} - -impl AsymmetricKeyExt for PublicKey { - fn generate_ed25519() -> Result { - let mut bytes = [0u8; Self::ED25519_LENGTH]; - getrandom::getrandom(&mut bytes[..]).expect("RNG failure!"); - PublicKey::ed25519_from_bytes(bytes).map_err(Into::into) - } - - fn generate_secp256k1() -> Result { - let mut bytes = [0u8; Self::SECP256K1_LENGTH]; - getrandom::getrandom(&mut bytes[..]).expect("RNG failure!"); - PublicKey::secp256k1_from_bytes(bytes).map_err(Into::into) - } - - fn to_file>(&self, file: P) -> Result<(), Error> { - utils::write_file(file, self.to_pem()?).map_err(Error::PublicKeySave) - } - - fn from_file>(file: P) -> Result { - let data = utils::read_file(file).map_err(Error::PublicKeyLoad)?; - Self::from_pem(data) - } - - fn to_der(&self) -> Result, Error> { - match self { - PublicKey::System => Err(Error::System(String::from("to_der"))), - PublicKey::Ed25519(public_key) => { - // See https://tools.ietf.org/html/rfc8410#section-10.1 - let mut encoded = vec![]; - let mut der = Der::new(&mut encoded); - der.sequence(|der| { - der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; - der.bit_string(0, public_key.as_ref()) - })?; - Ok(encoded) - } - PublicKey::Secp256k1(public_key) => { - // See https://www.secg.org/sec1-v2.pdf#subsection.C.3 - let mut encoded = vec![]; - let mut der = Der::new(&mut encoded); - der.sequence(|der| { - der.sequence(|der| { - der.oid(&EC_PUBLIC_KEY_OBJECT_IDENTIFIER)?; - der.oid(&SECP256K1_OBJECT_IDENTIFIER) - })?; - der.bit_string(0, &public_key.to_bytes()) - })?; - Ok(encoded) - } - } - } - - fn from_der>(input: T) -> Result { - let input = Input::from(input.as_ref()); - - let mut key_type_tag = ED25519_TAG; - let raw_bytes = input.read_all(derp::Error::Read, |input| { - derp::nested(input, Tag::Sequence, |input| { - derp::nested(input, Tag::Sequence, |input| { - // Read the first value. - let object_identifier = - derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); - if object_identifier == ED25519_OBJECT_IDENTIFIER { - key_type_tag = ED25519_TAG; - Ok(()) - } else if object_identifier == EC_PUBLIC_KEY_OBJECT_IDENTIFIER { - // Assert the next object identifier is the secp256k1 ID. - let next_object_identifier = - derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); - if next_object_identifier != SECP256K1_OBJECT_IDENTIFIER { - return Err(derp::Error::WrongValue); - } - - key_type_tag = SECP256K1_TAG; - Ok(()) - } else { - Err(derp::Error::WrongValue) - } - })?; - Ok(derp::bit_string_with_no_unused_bits(input)?.as_slice_less_safe()) - }) - })?; - - match key_type_tag { - ED25519_TAG => PublicKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), - SECP256K1_TAG => PublicKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), - _ => unreachable!(), - } - } - - fn to_pem(&self) -> Result { - let tag = match self { - PublicKey::System => return Err(Error::System(String::from("to_pem"))), - PublicKey::Ed25519(_) => ED25519_PEM_PUBLIC_KEY_TAG.to_string(), - PublicKey::Secp256k1(_) => SECP256K1_PEM_PUBLIC_KEY_TAG.to_string(), - }; - let contents = self.to_der()?; - let pem = Pem { tag, contents }; - Ok(pem::encode(&pem)) - } - - fn from_pem>(input: T) -> Result { - let pem = pem::parse(input)?; - let public_key = Self::from_der(&pem.contents)?; - let bad_tag = |expected_tag: &str| { - Error::FromPem(format!( - "invalid tag: expected {}, got {}", - expected_tag, pem.tag - )) - }; - match public_key { - PublicKey::System => return Err(Error::System(String::from("from_pem"))), - PublicKey::Ed25519(_) => { - if pem.tag != ED25519_PEM_PUBLIC_KEY_TAG { - return Err(bad_tag(ED25519_PEM_PUBLIC_KEY_TAG)); - } - } - PublicKey::Secp256k1(_) => { - if pem.tag != SECP256K1_PEM_PUBLIC_KEY_TAG { - return Err(bad_tag(SECP256K1_PEM_PUBLIC_KEY_TAG)); - } - } - } - Ok(public_key) - } - - #[cfg(test)] - fn duplicate(&self) -> Self { - match self { - PublicKey::System => PublicKey::System, - PublicKey::Ed25519(public_key) => { - Self::ed25519_from_bytes(public_key.as_ref()).expect("could not copy public key") - } - PublicKey::Secp256k1(public_key) => { - Self::secp256k1_from_bytes(public_key.to_bytes().as_ref()) - .expect("could not copy public key") - } - } - } - - #[cfg(test)] - fn random(rng: &mut TestRng) -> Self { - let secret_key = SecretKey::random(rng); - PublicKey::from(&secret_key) - } - - #[cfg(test)] - fn random_ed25519(rng: &mut TestRng) -> Self { - let secret_key = SecretKey::random_ed25519(rng); - PublicKey::from(&secret_key) - } - - #[cfg(test)] - fn random_secp256k1(rng: &mut TestRng) -> Self { - let secret_key = SecretKey::random_secp256k1(rng); - PublicKey::from(&secret_key) - } - - fn doc_example() -> &'static Self { - &*ED25519_PUBLIC_KEY - } -} diff --git a/node/src/crypto/error.rs b/node/src/crypto/error.rs deleted file mode 100644 index 47ce220a74..0000000000 --- a/node/src/crypto/error.rs +++ /dev/null @@ -1,82 +0,0 @@ -use std::result; - -use base64::DecodeError; -use hex::FromHexError; -use pem::PemError; -use signature::Error as SignatureError; -use thiserror::Error; - -use crate::utils::{ReadFileError, WriteFileError}; -use casper_types::crypto; - -/// A specialized `std::result::Result` type for cryptographic errors. -pub type Result = result::Result; - -/// Cryptographic errors. -#[derive(Debug, Error)] -pub enum Error { - /// Error resulting from creating or using asymmetric key types. - #[error("asymmetric key error: {0}")] - AsymmetricKey(String), - - /// Error resulting when decoding a type from a hex-encoded representation. - #[error("parsing from hex: {0}")] - FromHex(#[from] FromHexError), - - /// Error trying to read a secret key. - #[error("secret key load failed: {0}")] - SecretKeyLoad(ReadFileError), - - /// Error trying to read a public key. - #[error("public key load failed: {0}")] - PublicKeyLoad(ReadFileError), - - /// Error resulting when decoding a type from a base64 representation. - #[error("decoding error: {0}")] - FromBase64(#[from] DecodeError), - - /// Pem format error. - #[error("pem error: {0}")] - FromPem(String), - - /// DER format error. - #[error("der error: {0}")] - FromDer(#[from] derp::Error), - - /// Error trying to write a secret key. - #[error("secret key save failed: {0}")] - SecretKeySave(WriteFileError), - - /// Error trying to write a public key. - #[error("public key save failed: {0}")] - PublicKeySave(WriteFileError), - - /// Error trying to manipulate the system key. - #[error("invalid operation on system key: {0}")] - System(String), - - /// Error related to the underlying signature crate. - #[error("error in signature")] - Signature(SignatureError), - - /// Error in getting random bytes from the system's preferred random number source. - #[error("failed to get random bytes: {0}")] - GetRandomBytes(#[from] getrandom::Error), -} - -impl From for Error { - fn from(error: PemError) -> Self { - Error::FromPem(error.to_string()) - } -} - -impl From for Error { - fn from(error: crypto::Error) -> Self { - match error { - crypto::Error::AsymmetricKey(string) => Error::AsymmetricKey(string), - crypto::Error::FromHex(error) => Error::FromHex(error), - crypto::Error::FromBase64(error) => Error::FromBase64(error), - crypto::Error::SignatureError(error) => Error::Signature(error), - } - } -} diff --git a/node/src/crypto/hash.rs b/node/src/crypto/hash.rs deleted file mode 100644 index 409ef23c6b..0000000000 --- a/node/src/crypto/hash.rs +++ /dev/null @@ -1,273 +0,0 @@ -//! Cryptographic hash type and function. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::{ - array::TryFromSliceError, - convert::TryFrom, - fmt::{self, Debug, Display, Formatter, LowerHex, UpperHex}, -}; - -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; -use datasize::DataSize; -use hex_buffer_serde::{Hex, HexForm}; -use hex_fmt::HexFmt; -#[cfg(test)] -use rand::Rng; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use casper_execution_engine::shared::newtypes::Blake2bHash; -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use super::Error; -#[cfg(test)] -use crate::testing::TestRng; - -/// The hash digest; a wrapped `u8` array. -#[derive( - Copy, - Clone, - DataSize, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Serialize, - Deserialize, - Default, - JsonSchema, -)] -#[serde(deny_unknown_fields)] -#[schemars(with = "String", description = "Hex-encoded hash digest.")] -pub struct Digest( - #[serde(with = "HexForm::<[u8; Digest::LENGTH]>")] - #[schemars(skip, with = "String")] - [u8; Digest::LENGTH], -); - -impl Digest { - /// Length of `Digest` in bytes. - pub const LENGTH: usize = 32; - - /// Returns a copy of the wrapped `u8` array. - pub fn to_array(&self) -> [u8; Digest::LENGTH] { - self.0 - } - - /// Returns a copy of the wrapped `u8` array as a `Vec` - pub fn to_vec(&self) -> Vec { - self.0.to_vec() - } - - /// Returns a `Digest` parsed from a hex-encoded `Digest`. - pub fn from_hex>(hex_input: T) -> Result { - let mut inner = [0; Digest::LENGTH]; - hex::decode_to_slice(hex_input, &mut inner)?; - Ok(Digest(inner)) - } - - /// Generates a random instance using a `TestRng`. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - Digest(rng.gen::<[u8; Digest::LENGTH]>()) - } -} - -impl AsRef<[u8]> for Digest { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl From<[u8; Digest::LENGTH]> for Digest { - fn from(inner: [u8; Digest::LENGTH]) -> Self { - Digest(inner) - } -} - -impl TryFrom<&[u8]> for Digest { - type Error = TryFromSliceError; - - fn try_from(slice: &[u8]) -> Result { - <[u8; Digest::LENGTH]>::try_from(slice).map(Digest) - } -} - -impl Debug for Digest { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "{}", HexFmt(&self.0)) - } -} - -impl Display for Digest { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "{:10}", HexFmt(&self.0)) - } -} -impl LowerHex for Digest { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - if formatter.alternate() { - write!(formatter, "0x{}", HexFmt(&self.0)) - } else { - write!(formatter, "{}", HexFmt(&self.0)) - } - } -} - -impl UpperHex for Digest { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - if formatter.alternate() { - write!(formatter, "0x{:X}", HexFmt(&self.0)) - } else { - write!(formatter, "{:X}", HexFmt(&self.0)) - } - } -} - -impl ToBytes for Digest { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for Digest { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - FromBytes::from_bytes(bytes).map(|(inner, remainder)| (Digest(inner), remainder)) - } -} - -/// Returns the hash of `data`. -pub fn hash>(data: T) -> Digest { - let mut result = [0; Digest::LENGTH]; - - let mut hasher = VarBlake2b::new(Digest::LENGTH).expect("should create hasher"); - hasher.update(data); - hasher.finalize_variable(|slice| { - result.copy_from_slice(slice); - }); - Digest(result) -} - -impl From for Blake2bHash { - fn from(digest: Digest) -> Self { - let digest_bytes = digest.to_array(); - Blake2bHash::from(digest_bytes) - } -} - -impl From for Digest { - fn from(blake2bhash: Blake2bHash) -> Self { - let bytes = blake2bhash.value(); - Digest::from(bytes) - } -} - -#[cfg(test)] -mod test { - use std::iter::{self, FromIterator}; - - use super::*; - - #[test] - fn blake2b_hash_known() { - let inputs_and_digests = [ - ( - "", - "0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8", - ), - ( - "abc", - "bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319", - ), - ( - "The quick brown fox jumps over the lazy dog", - "01718cec35cd3d796dd00020e0bfecb473ad23457d063b75eff29c0ffa2e58a9", - ), - ]; - for (known_input, expected_digest) in &inputs_and_digests { - let known_input: &[u8] = known_input.as_ref(); - assert_eq!(*expected_digest, format!("{:?}", hash(known_input))); - } - } - - #[test] - fn from_valid_hex_should_succeed() { - for char in "abcdefABCDEF0123456789".chars() { - let input = String::from_iter(iter::repeat(char).take(64)); - assert!(Digest::from_hex(input).is_ok()); - } - } - - #[test] - fn from_hex_invalid_length_should_fail() { - for len in &[2_usize, 62, 63, 65, 66] { - let input = String::from_iter(iter::repeat('f').take(*len)); - assert!(Digest::from_hex(input).is_err()); - } - } - - #[test] - fn from_hex_invalid_char_should_fail() { - for char in "g %-".chars() { - let input = String::from_iter(iter::repeat('f').take(63).chain(iter::once(char))); - assert!(Digest::from_hex(input).is_err()); - } - } - - #[test] - fn should_display_digest_in_hex() { - let hash = Digest([0u8; 32]); - let hash_hex = format!("{:?}", hash); - assert_eq!( - hash_hex, - "0000000000000000000000000000000000000000000000000000000000000000" - ); - } - - #[test] - fn should_print_digest_lower_hex() { - let hash = Digest([10u8; 32]); - let hash_lower_hex = format!("{:x}", hash); - assert_eq!( - hash_lower_hex, - "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" - ) - } - - #[test] - fn should_print_digest_upper_hex() { - let hash = Digest([10u8; 32]); - let hash_lower_hex = format!("{:X}", hash); - assert_eq!( - hash_lower_hex, - "0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A" - ) - } - - #[test] - fn alternate_should_prepend_0x() { - let hash = Digest([0u8; 32]); - let hash_hex_alt = format!("{:#x}", hash); - assert_eq!( - hash_hex_alt, - "0x0000000000000000000000000000000000000000000000000000000000000000" - ) - } - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let hash = Digest::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&hash); - } -} diff --git a/node/src/data_migration.rs b/node/src/data_migration.rs index ba224db48a..94320a489b 100644 --- a/node/src/data_migration.rs +++ b/node/src/data_migration.rs @@ -1,23 +1,20 @@ -use std::{env, fs, io, path::PathBuf}; +use std::{env, fs, io, path::PathBuf, sync::Arc}; use serde::{Deserialize, Serialize}; use thiserror::Error; -use toml::de::Error as TomlDecodeError; use tracing::info; -use casper_execution_engine::shared::newtypes::Blake2bHash; -use casper_types::{ProtocolVersion, PublicKey, SecretKey, Signature}; +use casper_types::{ + crypto, Chainspec, ChainspecRawBytes, Digest, ProtocolVersion, PublicKey, SecretKey, Signature, +}; use crate::{ - crypto, - reactor::validator::Config, - types::{chainspec, Chainspec}, - utils::{LoadError, Loadable, WithDir}, + reactor::main_reactor::Config, + utils::{ + chain_specification::error::Error as LoadChainspecError, LoadError, Loadable, WithDir, + }, }; -// This will be changed in favour of an actual old config type when the migration is not a no-op. -type OldConfig = toml::Value; - /// The name of the file for recording the new global state hash after a data migration. const POST_MIGRATION_STATE_HASH_FILENAME: &str = "post-migration-state-hash"; /// The folder under which the post-migration-state-hash file is written. @@ -27,7 +24,7 @@ const CONFIG_ROOT_DIR_OVERRIDE: &str = "CASPER_CONFIG_DIR"; /// Error returned as a result of migrating data. #[derive(Debug, Error)] -pub enum Error { +pub(crate) enum Error { /// Error serializing state hash info. #[error("error serializing state hash info: {0}")] SerializeStateHashInfo(bincode::Error), @@ -58,36 +55,18 @@ pub enum Error { #[error("invalid signature of state hash info")] InvalidSignatureOfStateHashInfo, - /// Error reading config file. - #[error("error reading config from {path}: {error}")] - ReadConfig { - /// The file path. - path: String, - /// The IO error. - error: io::Error, - }, - - /// Error decoding config file. - #[error("error reading config from {path}: {error}")] - DecodeConfig { - /// The file path. - path: String, - /// The TOML error. - error: TomlDecodeError, - }, - /// Error loading the secret key. #[error("error loading secret key: {0}")] - LoadSecretKey(LoadError), + LoadSecretKey(LoadError), /// Error loading the chainspec. #[error("error loading chainspec: {0}")] - LoadChainspec(chainspec::Error), + LoadChainspec(LoadChainspecError), } #[derive(Serialize, Deserialize)] struct PostMigrationInfo { - state_hash: Blake2bHash, + state_hash: Digest, protocol_version: ProtocolVersion, } @@ -107,7 +86,7 @@ struct SignedPostMigrationInfo { pub(crate) fn read_post_migration_info( protocol_version: ProtocolVersion, public_key: &PublicKey, -) -> Result, Error> { +) -> Result, Error> { do_read_post_migration_info(protocol_version, public_key, info_path()) } @@ -117,7 +96,7 @@ fn do_read_post_migration_info( protocol_version: ProtocolVersion, public_key: &PublicKey, path: PathBuf, -) -> Result, Error> { +) -> Result, Error> { // If the file doesn't exist, return `Ok(None)`. if !path.is_file() { return Ok(None); @@ -135,7 +114,7 @@ fn do_read_post_migration_info( crypto::verify( &signed_info.serialized_info, &signed_info.signature, - &public_key, + public_key, ) .map_err(|_| Error::InvalidSignatureOfStateHashInfo)?; @@ -156,7 +135,7 @@ fn do_read_post_migration_info( /// This must be called after a data migration in order to allow the node to read in the new root /// state on restart. fn write_post_migration_info( - state_hash: Blake2bHash, + state_hash: Digest, new_protocol_version: ProtocolVersion, secret_key: &SecretKey, path: PathBuf, @@ -196,25 +175,26 @@ fn info_path() -> PathBuf { } /// Migrates data from that specified in the old config file to that specified in the new one. -pub fn migrate_data( - _old_config: WithDir, +pub(crate) fn migrate_data( + _old_config: WithDir, new_config: WithDir, ) -> Result<(), Error> { let (new_root, new_config) = new_config.into_parts(); - let new_protocol_version = Chainspec::from_path(&new_root) + let new_protocol_version = <(Chainspec, ChainspecRawBytes)>::from_path(&new_root) .map_err(Error::LoadChainspec)? + .0 .protocol_config .version; - let secret_key = new_config + let secret_key: Arc = new_config .consensus .secret_key_path .load(&new_root) .map_err(Error::LoadSecretKey)?; // Get this by actually migrating the global state data. - let state_hash = Blake2bHash::default(); + let state_hash = Digest::default(); - if state_hash != Blake2bHash::default() { + if state_hash != Digest::default() { write_post_migration_info(state_hash, new_protocol_version, &secret_key, info_path())?; } @@ -226,7 +206,6 @@ mod tests { use rand::Rng; use super::*; - use crate::crypto::AsymmetricKeyExt; #[test] fn should_write_then_read_info() { @@ -234,7 +213,7 @@ mod tests { let info_path = tempdir.path().join(POST_MIGRATION_STATE_HASH_FILENAME); let mut rng = crate::new_rng(); - let state_hash = Blake2bHash::new(&[rng.gen()]); + let state_hash = Digest::hash([rng.gen()]); let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()); let secret_key = SecretKey::random(&mut rng); @@ -262,7 +241,7 @@ mod tests { assert!(maybe_hash.is_none()); // Create the info file and check we can read it. - let state_hash = Blake2bHash::new(&[rng.gen()]); + let state_hash = Digest::hash([rng.gen()]); write_post_migration_info(state_hash, protocol_version, &secret_key, info_path.clone()) .unwrap(); assert!( @@ -295,7 +274,7 @@ mod tests { // Should return `Err` if the signature is invalid. let other_secret_key = SecretKey::random(&mut rng); - let state_hash = Blake2bHash::new(&[rng.gen()]); + let state_hash = Digest::hash([rng.gen()]); write_post_migration_info( state_hash, protocol_version, diff --git a/node/src/effect.rs b/node/src/effect.rs index 0427c6a60f..2347ed0a4c 100644 --- a/node/src/effect.rs +++ b/node/src/effect.rs @@ -1,15 +1,45 @@ //! Effects subsystem. //! //! Effects describe things that the creator of the effect intends to happen, producing a value upon -//! completion. They are, in fact, futures. +//! completion (they actually are boxed futures). //! //! A pinned, boxed future returning an event is called an effect and typed as an `Effect`, -//! where `Ev` is the event's type. Generally, `Ev` is an Event enum defined at the top level of -//! each component in the `crate::components` module. +//! where `Ev` is the event's type, as every effect must have its return value either wrapped in an +//! event through [`EffectExt::event`](EffectExt::event) or ignored using +//! [`EffectExt::ignore`](EffectExt::ignore). As an example, the +//! [`handle_event`](crate::components::Component::handle_event) function of a component always +//! returns `Effect`. //! -//! ## Using effects +//! # A primer on events //! -//! To create an effect, an `EffectBuilder` will be passed in from the relevant reactor. For +//! There are three distinct groups of events found around the node: +//! +//! * (unbound) events: These events are not associated with a particular reactor or component and +//! represent information or requests by themselves. An example is the +//! [`PeerBehaviorAnnouncement`](`crate::effect::announcements::PeerBehaviorAnnouncement`), it can +//! be emitted through an effect by different components and contains the ID of a peer that should +//! be shunned. It is not associated with a particular reactor or component though. +//! +//! While the node is running, these unbound events cannot exist on their own, instead they are +//! typically converted into a concrete reactor event by the effect builder as soon as they are +//! created. +//! +//! * reactor events: A running reactor has a single event type that encompasses all possible +//! unbound events that can occur during its operation and all component events of components it +//! is made of. Usually they are implemented as one large `enum` with only newtype-variants. +//! +//! * component events: Every component defines its own set of events, typically for internal use. +//! If the component is able to process unbound events like announcements or requests, it will +//! have a `From` implementation that allows converting them into a suitable component event. +//! +//! Component events are also created from the return values of effects: While effects do not +//! return events themselves when called, their return values are turned first into component +//! events through the [`event`](EffectExt) method. In a second step, inside the +//! reactors routing code, `wrap_effect` will then convert from component to reactor event. +//! +//! # Using effects +//! +//! To create an effect, an `EffectBuilder` will be passed in by the calling reactor runner. For //! example, given an effect builder `effect_builder`, we can create a `set_timeout` future and turn //! it into an effect: //! @@ -17,6 +47,7 @@ //! use std::time::Duration; //! use casper_node::effect::EffectExt; //! +//! // Note: This is our "component" event. //! enum Event { //! ThreeSecondsElapsed(Duration) //! } @@ -30,43 +61,47 @@ //! `Event::ThreeSecondsElapsed`. Note that effects do nothing on their own, they need to be passed //! to a [`reactor`](../reactor/index.html) to be executed. //! -//! ## Arbitrary effects +//! # Arbitrary effects //! -//! While it is technically possible to turn any future into an effect, it is advisable to only use -//! the effects explicitly listed in this module through traits to create them. Post-processing on -//! effects to turn them into events should also be kept brief. +//! While it is technically possible to turn any future into an effect, it is in general advisable +//! to only use the methods on [`EffectBuilder`] or short, anonymous futures to create effects. //! -//! ## Announcements and effects +//! # Announcements and requests //! -//! Some effects can be further classified into either announcements or requests, although these -//! properties are not reflected in the type system. +//! Events are usually classified into either announcements or requests, although these properties +//! are not reflected in the type system. //! -//! **Announcements** are effects emitted by components that are essentially "fire-and-forget"; the -//! component will never expect an answer for these and does not rely on them being handled. It is -//! also conceivable that they are being cloned and dispatched to multiple components by the -//! reactor. +//! **Announcements** are events that are essentially "fire-and-forget"; the component that created +//! the effect resulting in the creation of the announcement will never expect an "answer". +//! Announcements are often dispatched to multiple components by the reactor; since that usually +//! involves a [`clone`](`Clone::clone`), they should be kept light. //! -//! A good example is the arrival of a new deploy passed in by a client. Depending on the setup it -//! may be stored, buffered or, in certain testing setups, just discarded. None of this is a concern -//! of the component that talks to the client and deserializes the incoming deploy though, which -//! considers the deploy no longer its concern after it has returned an announcement effect. +//! A good example is the arrival of a new transaction passed in by a client. Depending on the setup +//! it may be stored, buffered or, in certain testing setups, just discarded. None of this is a +//! concern of the component that talks to the client and deserializes the incoming transaction +//! though, instead it simply returns an effect that produces an announcement. //! -//! **Requests** are complex effects that are used when a component needs something from -//! outside of itself (typically to be provided by another component); a request requires an -//! eventual response. +//! **Requests** are complex events that are used when a component needs something from other +//! components. Typically, an effect (which uses [`EffectBuilder::make_request`] in its +//! implementation) is called resulting in the actual request being scheduled and handled. In +//! contrast to announcements, requests must always be handled by exactly one component. //! -//! A request **must** have a `Responder` field, which a handler of a request **must** call at -//! some point. Failing to do so will result in a resource leak. +//! Every request has a [`Responder`]-typed field, which a handler of a request calls to produce +//! another effect that will send the return value to the original requesting component. Failing to +//! call the [`Responder::respond`] function will result in a runtime warning. -pub mod announcements; -pub mod requests; +pub(crate) mod announcements; +pub(crate) mod diagnostics_port; +pub(crate) mod incoming; +pub(crate) mod requests; use std::{ any::type_name, borrow::Cow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, fmt::{self, Debug, Display, Formatter}, future::Future, + mem, sync::Arc, time::{Duration, Instant}, }; @@ -74,71 +109,85 @@ use std::{ use datasize::DataSize; use futures::{channel::oneshot, future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{Serialize, Serializer}; use smallvec::{smallvec, SmallVec}; use tokio::{sync::Semaphore, time}; -use tracing::error; -#[cfg(not(feature = "fast-sync"))] -use tracing::warn; +use tracing::{debug, error, warn}; -use casper_execution_engine::{ - core::engine_state::{ - self, - era_validators::GetEraValidatorsError, - genesis::GenesisResult, - step::{StepRequest, StepResult}, - upgrade::{UpgradeConfig, UpgradeResult}, - BalanceRequest, BalanceResult, GetBidsRequest, GetBidsResult, QueryRequest, QueryResult, - MAX_PAYMENT, +use casper_binary_port::{ + ConsensusStatus, ConsensusValidatorChanges, LastProgress, NetworkName, RecordId, Uptime, +}; +use casper_storage::{ + block_store::types::ApprovalsHashes, + data_access_layer::{ + prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult}, + tagged_values::{TaggedValuesRequest, TaggedValuesResult}, + AddressableEntityResult, BalanceRequest, BalanceResult, EraValidatorsRequest, + EraValidatorsResult, ExecutionResultsChecksumResult, PutTrieRequest, PutTrieResult, + QueryRequest, QueryResult, SeigniorageRecipientsRequest, SeigniorageRecipientsResult, + TrieRequest, TrieResult, }, - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::{protocol_data::ProtocolData, trie::Trie}, + DbRawBytesSpec, }; use casper_types::{ - system::auction::EraValidators, EraId, ExecutionResult, Key, ProtocolVersion, PublicKey, - Transfer, U512, + execution::{Effects as ExecutionEffects, ExecutionResult}, + Approval, AvailableBlockRange, Block, BlockHash, BlockHeader, BlockSignatures, + BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest, EntityAddr, EraId, + ExecutionInfo, FinalitySignature, FinalitySignatureId, FinalitySignatureV2, HashAddr, Key, + NextUpgrade, Package, PackageAddr, ProtocolUpgradeConfig, PublicKey, TimeDiff, Timestamp, + Transaction, TransactionHash, TransactionId, Transfer, U512, }; use crate::{ components::{ - chainspec_loader::{CurrentRunInfo, NextUpgrade}, - consensus::BlockContext, - contract_runtime::EraValidatorsRequest, - deploy_acceptor, - fetcher::FetchResult, - small_network::GossipedAddress, + block_synchronizer::{ + GlobalStateSynchronizerError, GlobalStateSynchronizerResponse, TrieAccumulatorError, + TrieAccumulatorResponse, + }, + consensus::{ClContext, EraDump, ProposedBlock}, + contract_runtime::SpeculativeExecutionResult, + diagnostics_port::StopAtSpec, + fetcher::{FetchItem, FetchResult}, + gossiper::GossipItem, + network::{blocklist::BlocklistJustification, FromIncoming, NetworkInsights}, + transaction_acceptor, }, - crypto::hash::Digest, - effect::requests::LinearChainRequest, - reactor::{EventQueueHandle, QueueKind}, + contract_runtime::ExecutionPreState, + failpoints::FailpointActivation, + reactor::{main_reactor::ReactorState, EventQueueHandle, QueueKind}, types::{ - Block, BlockByHeight, BlockHash, BlockHeader, BlockSignatures, Chainspec, ChainspecInfo, - Deploy, DeployHash, DeployHeader, DeployMetadata, FinalitySignature, FinalizedBlock, Item, - ProtoBlock, TimeDiff, Timestamp, + appendable_block::AppendableBlock, BlockExecutionResultsOrChunk, + BlockExecutionResultsOrChunkId, BlockWithMetadata, ExecutableBlock, FinalizedBlock, + InvalidProposalError, LegacyDeploy, MetaBlock, MetaBlockState, NodeId, TransactionHeader, }, - utils::Source, + utils::{fmt_limit::FmtLimit, SharedFlag, Source}, }; use announcements::{ - ChainspecLoaderAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement, - ControlAnnouncement, DeployAcceptorAnnouncement, GossiperAnnouncement, LinearChainAnnouncement, - NetworkAnnouncement, RpcServerAnnouncement, + BlockAccumulatorAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement, + ControlAnnouncement, FatalAnnouncement, FetchedNewBlockAnnouncement, + FetchedNewFinalitySignatureAnnouncement, GossiperAnnouncement, MetaBlockAnnouncement, + PeerBehaviorAnnouncement, QueueDumpFormat, TransactionAcceptorAnnouncement, + TransactionBufferAnnouncement, UnexecutedBlockAnnouncement, UpgradeWatcherAnnouncement, }; +use casper_storage::data_access_layer::EntryPointExistsResult; +use diagnostics_port::DumpConsensusStateRequest; use requests::{ - BlockProposerRequest, BlockValidationRequest, ChainspecLoaderRequest, ConsensusRequest, - ContractRuntimeRequest, FetcherRequest, MetricsRequest, NetworkInfoRequest, NetworkRequest, - ProtoBlockRequest, StateStoreRequest, StorageRequest, + AcceptTransactionRequest, BeginGossipRequest, BlockAccumulatorRequest, + BlockSynchronizerRequest, BlockValidationRequest, ChainspecRawBytesRequest, ConsensusRequest, + ContractRuntimeRequest, FetcherRequest, MakeBlockExecutableRequest, MarkBlockCompletedRequest, + MetricsRequest, NetworkInfoRequest, NetworkRequest, ReactorInfoRequest, SetNodeStopRequest, + StorageRequest, SyncGlobalStateRequest, TransactionBufferRequest, TrieAccumulatorRequest, + UpgradeWatcherRequest, }; -use self::announcements::BlocklistAnnouncement; - /// A resource that will never be available, thus trying to acquire it will wait forever. -static UNOBTAINIUM: Lazy = Lazy::new(|| Semaphore::new(0)); +static UNOBTAINABLE: Lazy = Lazy::new(|| Semaphore::new(0)); /// A pinned, boxed future that produces one or more events. -pub type Effect = BoxFuture<'static, Multiple>; +pub(crate) type Effect = BoxFuture<'static, Multiple>; /// Multiple effects in a container. -pub type Effects = Multiple>; +pub(crate) type Effects = Multiple>; /// A small collection of rarely more than two items. /// @@ -146,40 +195,130 @@ pub type Effects = Multiple>; /// size of two items is chosen because one item is the most common use case, and large items are /// typically boxed. In the latter case two pointers and one enum variant discriminator is almost /// the same size as an empty vec, which is two pointers. -pub type Multiple = SmallVec<[T; 2]>; +pub(crate) type Multiple = SmallVec<[T; 2]>; + +/// The type of peers that should receive the gossip message. +#[derive(Debug, Serialize, PartialEq, Eq, Hash, Copy, Clone, DataSize)] +pub(crate) enum GossipTarget { + /// Both validators and non validators. + Mixed(EraId), + /// All peers. + All, +} + +impl Display for GossipTarget { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + GossipTarget::Mixed(era_id) => write!(formatter, "gossip target mixed for {}", era_id), + GossipTarget::All => write!(formatter, "gossip target all"), + } + } +} /// A responder satisfying a request. #[must_use] #[derive(DataSize)] -pub struct Responder(Option>); +pub(crate) struct Responder { + /// Sender through which the response ultimately should be sent. + sender: Option>, + /// Reactor flag indicating shutdown. + is_shutting_down: SharedFlag, +} + +/// A responder that will automatically send a `None` on drop. +#[must_use] +#[derive(DataSize, Debug)] +pub(crate) struct AutoClosingResponder(Responder>); + +impl AutoClosingResponder { + /// Creates a new auto closing responder from a responder of `Option`. + pub(crate) fn from_opt_responder(responder: Responder>) -> Self { + AutoClosingResponder(responder) + } + + /// Extracts the inner responder. + fn into_inner(mut self) -> Responder> { + let is_shutting_down = self.0.is_shutting_down; + mem::replace( + &mut self.0, + Responder { + sender: None, + is_shutting_down, + }, + ) + } +} + +impl AutoClosingResponder { + /// Send `Some(data)` to the origin of the request. + pub(crate) async fn respond(self, data: T) { + self.into_inner().respond(Some(data)).await; + } + + /// Send `None` to the origin of the request. + pub(crate) async fn respond_none(self) { + self.into_inner().respond(None).await; + } +} + +impl Drop for AutoClosingResponder { + fn drop(&mut self) { + if let Some(sender) = self.0.sender.take() { + debug!( + sending_value = %self.0, + "responding None by dropping auto-close responder" + ); + // We still haven't answered, send an answer. + if let Err(_unsent_value) = sender.send(None) { + debug!( + unsent_value = %self.0, + "failed to auto-close responder, ignoring" + ); + } + } + } +} impl Responder { /// Creates a new `Responder`. #[inline] - fn new(sender: oneshot::Sender) -> Self { - Responder(Some(sender)) + fn new(sender: oneshot::Sender, is_shutting_down: SharedFlag) -> Self { + Responder { + sender: Some(sender), + is_shutting_down, + } } /// Helper method for tests. /// - /// Allows creating a responder manually. This function should not be used, unless you are - /// writing alternative infrastructure, e.g. for tests. + /// Allows creating a responder manually, without observing the shutdown flag. This function + /// should not be used, unless you are writing alternative infrastructure, e.g. for tests. #[cfg(test)] #[inline] - pub(crate) fn create(sender: oneshot::Sender) -> Self { - Responder::new(sender) + pub(crate) fn without_shutdown(sender: oneshot::Sender) -> Self { + Responder::new(sender, SharedFlag::global_shared()) } } -impl Responder { +impl Responder { /// Send `data` to the origin of the request. - pub async fn respond(mut self, data: T) { - if let Some(sender) = self.0.take() { - if sender.send(data).is_err() { - error!("could not send response to request down oneshot channel"); + pub(crate) async fn respond(mut self, data: T) { + if let Some(sender) = self.sender.take() { + if let Err(data) = sender.send(data) { + // If we cannot send a response down the channel, it means the original requester is + // no longer interested in our response. This typically happens during shutdowns, or + // in cases where an originating external request has been cancelled. + + debug!( + data=?FmtLimit::new(1000, &data), + "ignored failure to send response to request down oneshot channel" + ); } } else { - error!("tried to send a value down a responder channel, but it was already used"); + error!( + data=?FmtLimit::new(1000, &data), + "tried to send a value down a responder channel, but it was already used" + ); } } } @@ -198,29 +337,40 @@ impl Display for Responder { impl Drop for Responder { fn drop(&mut self) { - if self.0.is_some() { - // This is usually a very serious error, as another component will now be stuck. - error!( - "{} dropped without being responded to --- \ - this is always a bug and will likely cause another component to be stuck!", - self - ); + if self.sender.is_some() { + if self.is_shutting_down.is_set() { + debug!( + responder=?self, + "ignored dropping of responder during shutdown" + ); + } else { + // This is usually a very serious error, as another component will now be stuck. + // + // See the code `make_request` for more details. + error!( + responder=?self, + "dropped without being responded to outside of shutdown" + ); + } } } } impl Serialize for Responder { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { + fn serialize(&self, serializer: S) -> Result { serializer.serialize_str(&format!("{:?}", self)) } } +impl Serialize for AutoClosingResponder { + fn serialize(&self, serializer: S) -> Result { + self.0.serialize(serializer) + } +} + /// Effect extension for futures, used to convert futures into actual effects. -pub trait EffectExt: Future + Send { - /// Finalizes a future into an effect that returns an event. +pub(crate) trait EffectExt: Future + Send { + /// Finalizes a future into an effect that returns a single event. /// /// The function `f` is used to translate the returned value from an effect into an event. fn event(self, f: F) -> Effects @@ -235,7 +385,7 @@ pub trait EffectExt: Future + Send { /// Effect extension for futures, used to convert futures returning a `Result` into two different /// effects. -pub trait EffectResultExt { +pub(crate) trait EffectResultExt { /// The type the future will return if `Ok`. type Value; /// The type the future will return if `Err`. @@ -252,33 +402,7 @@ pub trait EffectResultExt { U: 'static; } -/// Effect extension for futures, used to convert futures returning an `Option` into two different -/// effects. -pub trait EffectOptionExt { - /// The type the future will return if `Some`. - type Value; - - /// Finalizes a future returning an `Option` into two different effects. - /// - /// The function `f_some` is used to translate the returned value from an effect into an event, - /// while the function `f_none` does the same for a returned `None`. - fn map_or_else(self, f_some: F, f_none: G) -> Effects - where - F: FnOnce(Self::Value) -> U + 'static + Send, - G: FnOnce() -> U + 'static + Send, - U: 'static; - - /// Finalizes a future returning an `Option` into two different effects. - /// - /// The function `f` is used to translate the returned value from an effect into an event, - /// In the case of `None`, empty vector of effects is returned. - fn map_some(self, f: F) -> Effects - where - F: FnOnce(Self::Value) -> U + 'static + Send, - U: 'static; -} - -impl EffectExt for T +impl EffectExt for T where T: Future + Send + 'static + Sized, { @@ -298,7 +422,6 @@ where impl EffectResultExt for T where T: Future> + Send + 'static + Sized, - T: ?Sized, { type Value = V; type Error = E; @@ -316,53 +439,23 @@ where } } -impl EffectOptionExt for T -where - T: Future> + Send + 'static + Sized, - T: ?Sized, -{ - type Value = V; - - fn map_or_else(self, f_some: F, f_none: G) -> Effects - where - F: FnOnce(V) -> U + 'static + Send, - G: FnOnce() -> U + 'static + Send, - U: 'static, - { - smallvec![self - .map(|option| option.map_or_else(f_none, f_some)) - .map(|item| smallvec![item]) - .boxed()] - } - - /// Finalizes a future returning an `Option`. - /// - /// The function `f` is used to translate the returned value from an effect into an event, - /// In the case of `None`, empty vector is returned. - fn map_some(self, f: F) -> Effects - where - F: FnOnce(Self::Value) -> U + 'static + Send, - U: 'static, - { - smallvec![self - .map(|option| option - .map(|el| smallvec![f(el)]) - .unwrap_or_else(|| smallvec![])) - .boxed()] - } -} - /// A builder for [`Effect`](type.Effect.html)s. /// -/// Provides methods allowing the creation of effects which need to be scheduled -/// on the reactor's event queue, without giving direct access to this queue. +/// Provides methods allowing the creation of effects which need to be scheduled on the reactor's +/// event queue, without giving direct access to this queue. +/// +/// The `REv` type parameter indicates which reactor event effects created by this builder will +/// produce as side effects. #[derive(Debug)] -pub struct EffectBuilder(EventQueueHandle); +pub(crate) struct EffectBuilder { + /// A handle to the referenced event queue. + event_queue: EventQueueHandle, +} // Implement `Clone` and `Copy` manually, as `derive` will make it depend on `REv` otherwise. impl Clone for EffectBuilder { fn clone(&self) -> Self { - EffectBuilder(self.0) + *self } } @@ -370,27 +463,51 @@ impl Copy for EffectBuilder {} impl EffectBuilder { /// Creates a new effect builder. - pub fn new(event_queue_handle: EventQueueHandle) -> Self { - EffectBuilder(event_queue_handle) + pub(crate) fn new(event_queue: EventQueueHandle) -> Self { + EffectBuilder { event_queue } } /// Extract the event queue handle out of the effect builder. - #[cfg(test)] - pub fn into_inner(self) -> EventQueueHandle { - self.0 + pub(crate) fn into_inner(self) -> EventQueueHandle { + self.event_queue } /// Performs a request. /// - /// Given a request `Q`, that when completed will yield a result of `T`, produces a future - /// that will + /// Given a request `Q`, that when completed will yield a result of `T`, produces a future that + /// will /// /// 1. create an event to send the request to the respective component (thus `Q: Into`), - /// 2. waits for a response and returns it. + /// 2. wait for a response and return it. /// - /// This function is usually only used internally by effects implement on the effects builder, + /// This function is usually only used internally by effects implemented on the effects builder, /// but IO components may also make use of it. + /// + /// # Cancellation safety + /// + /// This future is cancellation safe: If it is dropped without being polled, it indicates + /// that the original requester is no longer interested in the result, which will be discarded. pub(crate) async fn make_request(self, f: F, queue_kind: QueueKind) -> T + where + T: Send + 'static, + Q: Into, + F: FnOnce(Responder) -> Q, + { + let (event, wait_future) = self.create_request_parts(f); + + // Schedule the request before awaiting the response. + self.event_queue.schedule(event, queue_kind).await; + wait_future.await + } + + /// Creates the part necessary to make a request. + /// + /// A request usually consists of two parts: The request event that needs to be scheduled on the + /// reactor queue and associated future that allows waiting for the response. This function + /// creates both of them without processing or spawning either. + /// + /// Usually you will want to call the higher level `make_request` function. + pub(crate) fn create_request_parts(self, f: F) -> (REv, impl Future) where T: Send + 'static, Q: Into, @@ -400,26 +517,36 @@ impl EffectBuilder { let (sender, receiver) = oneshot::channel(); // Create response function. - let responder = Responder::new(sender); + let responder = Responder::new(sender, self.event_queue.shutdown_flag()); // Now inject the request event into the event loop. let request_event = f(responder).into(); - self.0.schedule(request_event, queue_kind).await; - - match receiver.await { - Ok(value) => value, - Err(err) => { - // The channel should never be closed, ever. If it is, we pretend nothing happened - // though, instead of crashing. - error!(%err, ?queue_kind, "request for {} channel closed, this may be a bug? \ - check if a component is stuck from now on ", type_name::()); - - // We cannot produce any value to satisfy the request, so we just abandon this task - // by waiting on a resource we can never acquire. - let _ = UNOBTAINIUM.acquire().await; - panic!("should never obtain unobtainium semaphore"); + + let fut = async move { + match receiver.await { + Ok(value) => value, + Err(err) => { + // The channel should usually not be closed except during shutdowns, as it + // indicates a panic or disappearance of the remote that is + // supposed to process the request. + // + // If it does happen, we pretend nothing happened instead of crashing. + if self.event_queue.shutdown_flag().is_set() { + debug!(%err, channel=?type_name::(), "ignoring closed channel due to shutdown"); + } else { + error!(%err, channel=?type_name::(), "request for channel closed, this may be a bug? \ + check if a component is stuck from now on"); + } + + // We cannot produce any value to satisfy the request, so we just abandon this + // task by waiting on a resource we can never acquire. + let _ = UNOBTAINABLE.acquire().await; + panic!("should never obtain unobtainable semaphore"); + } } - } + }; + + (request_event, fut) } /// Run and end effect immediately. @@ -428,7 +555,7 @@ impl EffectBuilder { /// "do nothing", as it will still cause a task to be spawned. #[inline(always)] #[allow(clippy::manual_async_fn)] - pub fn immediately(self) -> impl Future + Send { + pub(crate) fn immediately(self) -> impl Future + Send { // Note: This function is implemented manually without `async` sugar because the `Send` // inference seems to not work in all cases otherwise. async {} @@ -437,26 +564,20 @@ impl EffectBuilder { /// Reports a fatal error. Normally called via the `crate::fatal!()` macro. /// /// Usually causes the node to cease operations quickly and exit/crash. - // - // Note: This function is implemented manually without `async` sugar because the `Send` - // inference seems to not work in all cases otherwise. - pub async fn fatal(self, file: &'static str, line: u32, msg: String) + pub(crate) async fn fatal(self, file: &'static str, line: u32, msg: String) where - REv: From, + REv: From, { - self.0 - .schedule( - ControlAnnouncement::FatalError { file, line, msg }, - QueueKind::Control, - ) - .await + self.event_queue + .schedule(FatalAnnouncement { file, line, msg }, QueueKind::Control) + .await; } /// Sets a timeout. pub(crate) async fn set_timeout(self, timeout: Duration) -> Duration { let then = Instant::now(); time::sleep(timeout).await; - Instant::now() - then + then.elapsed() } /// Retrieve a snapshot of the nodes current metrics formatted as string. @@ -473,52 +594,63 @@ impl EffectBuilder { .await } - /// Retrieves block at `height` from the Linear Chain component. - pub(crate) async fn get_block_at_height_local(self, height: u64) -> Option + /// Sends a network message. + /// + /// The message is queued and sent, but no delivery guaranteed. Will return after the message + /// has been buffered in the outgoing kernel buffer and thus is subject to backpressure. + pub(crate) async fn send_message

(self, dest: NodeId, payload: P) where - REv: From>, + REv: From>, { self.make_request( - |responder| LinearChainRequest::BlockAtHeightLocal(height, responder), - QueueKind::Regular, + |responder| NetworkRequest::SendMessage { + dest: Box::new(dest), + payload: Box::new(payload), + respond_after_queueing: false, + auto_closing_responder: AutoClosingResponder::from_opt_responder(responder), + }, + QueueKind::Network, ) - .await + .await; } - /// Sends a network message. + /// Enqueues a network message. /// /// The message is queued in "fire-and-forget" fashion, there is no guarantee that the peer - /// will receive it. - pub(crate) async fn send_message(self, dest: I, payload: P) + /// will receive it. Returns as soon as the message is queued inside the networking component. + pub(crate) async fn enqueue_message

(self, dest: NodeId, payload: P) where - REv: From>, + REv: From>, { self.make_request( |responder| NetworkRequest::SendMessage { dest: Box::new(dest), payload: Box::new(payload), - responder, + respond_after_queueing: true, + auto_closing_responder: AutoClosingResponder::from_opt_responder(responder), }, QueueKind::Network, ) - .await + .await; } - /// Broadcasts a network message. - /// - /// Broadcasts a network message to all peers connected at the time the message is sent. - pub async fn broadcast_message(self, payload: P) + /// Broadcasts a network message to validator peers in the given era. + pub(crate) async fn broadcast_message_to_validators

(self, payload: P, era_id: EraId) where - REv: From>, + REv: From>, { self.make_request( - |responder| NetworkRequest::Broadcast { - payload: Box::new(payload), - responder, + |responder| { + debug!("validator broadcast for {}", era_id); + NetworkRequest::ValidatorBroadcast { + payload: Box::new(payload), + era_id, + auto_closing_responder: AutoClosingResponder::from_opt_responder(responder), + } }, QueueKind::Network, ) - .await + .await; } /// Gossips a network message. @@ -527,83 +659,95 @@ impl EffectBuilder { /// excluding the indicated ones, and sends each a copy of the message. /// /// Returns the IDs of the chosen nodes. - pub async fn gossip_message( + pub(crate) async fn gossip_message

( self, payload: P, + gossip_target: GossipTarget, count: usize, - exclude: HashSet, - ) -> HashSet + exclude: HashSet, + ) -> HashSet where - REv: From>, - I: Send + 'static, + REv: From>, P: Send, { self.make_request( |responder| NetworkRequest::Gossip { payload: Box::new(payload), + gossip_target, count, exclude, - responder, + auto_closing_responder: AutoClosingResponder::from_opt_responder(responder), }, QueueKind::Network, ) .await + .unwrap_or_default() + } + + /// Gets a structure describing the current network status. + pub(crate) async fn get_network_insights(self) -> NetworkInsights + where + REv: From, + { + self.make_request( + |responder| NetworkInfoRequest::Insight { responder }, + QueueKind::Regular, + ) + .await } - /// Gets connected network peers. - pub async fn network_peers(self) -> BTreeMap + /// Gets a map of the current network peers to their socket addresses. + pub(crate) async fn network_peers(self) -> BTreeMap where - REv: From>, - I: Send + 'static, + REv: From, { self.make_request( - |responder| NetworkInfoRequest::GetPeers { responder }, + |responder| NetworkInfoRequest::Peers { responder }, QueueKind::Api, ) .await } - /// Announces that a network message has been received. - pub(crate) async fn announce_message_received(self, sender: I, payload: P) + /// Gets up to `count` fully-connected network peers in random order. + pub async fn get_fully_connected_peers(self, count: usize) -> Vec where - REv: From>, + REv: From, { - self.0 - .schedule( - NetworkAnnouncement::MessageReceived { sender, payload }, - QueueKind::NetworkIncoming, - ) - .await; + self.make_request( + |responder| NetworkInfoRequest::FullyConnectedPeers { count, responder }, + QueueKind::NetworkInfo, + ) + .await } - /// Announces that we should gossip our own public listening address. - pub(crate) async fn announce_gossip_our_address(self, our_address: GossipedAddress) + /// Announces which transactions have expired. + pub(crate) async fn announce_expired_transactions(self, hashes: Vec) where - REv: From>, + REv: From, { - self.0 + self.event_queue .schedule( - NetworkAnnouncement::GossipOurAddress(our_address), - QueueKind::Regular, + TransactionBufferAnnouncement::TransactionsExpired(hashes), + QueueKind::Validation, ) .await; } - /// Announces that a new peer has connected. - pub(crate) async fn announce_new_peer(self, peer_id: I) + /// Announces an incoming network message. + pub(crate) async fn announce_incoming

(self, sender: NodeId, payload: P) where - REv: From>, + REv: FromIncoming

, { - self.0 + self.event_queue .schedule( - NetworkAnnouncement::NewPeer(peer_id), + >::from_incoming(sender, payload), QueueKind::NetworkIncoming, ) .await; } /// Announces that a gossiper has received a new item, where the item's ID is the complete item. - pub(crate) async fn announce_complete_item_received_via_gossip(self, item: T::Id) + pub(crate) async fn announce_complete_item_received_via_gossip(self, item: T::Id) where REv: From>, { @@ -612,633 +756,1143 @@ impl EffectBuilder { "{} must be an item where the ID _is_ the complete item", item ); - self.0 + self.event_queue .schedule( GossiperAnnouncement::NewCompleteItem(item), - QueueKind::Regular, + QueueKind::Gossip, ) .await; } - /// Announces that the HTTP API server has received a deploy. - pub(crate) async fn announce_deploy_received( + /// Announces that a gossiper has received a full item, where the item's ID is NOT the complete + /// item. + pub(crate) async fn announce_item_body_received_via_gossip( self, - deploy: Box, - responder: Option>>, + item: Box, + sender: NodeId, ) where - REv: From, + REv: From>, { - self.0 + self.event_queue .schedule( - RpcServerAnnouncement::DeployReceived { deploy, responder }, - QueueKind::Api, + GossiperAnnouncement::NewItemBody { item, sender }, + QueueKind::Gossip, ) .await; } - /// Announces that a deploy not previously stored has now been accepted and stored. - pub(crate) fn announce_new_deploy_accepted( - self, - deploy: Box, - source: Source, - ) -> impl Future - where - REv: From>, - { - self.0.schedule( - DeployAcceptorAnnouncement::AcceptedNewDeploy { deploy, source }, - QueueKind::Regular, - ) - } - - /// Announces that an invalid deploy has been received. - pub(crate) fn announce_invalid_deploy( - self, - deploy: Box, - source: Source, - ) -> impl Future - where - REv: From>, - { - self.0.schedule( - DeployAcceptorAnnouncement::InvalidDeploy { deploy, source }, - QueueKind::Regular, - ) - } - - /// Announce new block has been created. - pub(crate) async fn announce_linear_chain_block( + /// Announces that the block accumulator has received and stored a new finality signature. + pub(crate) async fn announce_finality_signature_accepted( self, - block: Block, - execution_results: HashMap, + finality_signature: Box, ) where - REv: From, - { - self.0 - .schedule( - ContractRuntimeAnnouncement::linear_chain_block(block, execution_results), - QueueKind::Regular, - ) - .await - } - - /// Announce that a block had been executed before. - pub(crate) async fn announce_block_already_executed(self, block: Block) - where - REv: From, - { - self.0 - .schedule( - ContractRuntimeAnnouncement::block_already_executed(block), - QueueKind::Regular, - ) - .await - } - - /// Announce upgrade activation point read. - pub(crate) async fn announce_upgrade_activation_point_read(self, next_upgrade: NextUpgrade) - where - REv: From, + REv: From, { - self.0 + self.event_queue .schedule( - ChainspecLoaderAnnouncement::UpgradeActivationPointRead(next_upgrade), - QueueKind::Regular, + BlockAccumulatorAnnouncement::AcceptedNewFinalitySignature { finality_signature }, + QueueKind::FinalitySignature, ) - .await + .await; } - /// Puts the given block into the linear block store. - pub(crate) async fn put_block_to_storage(self, block: Box) -> bool + /// Request that a block be made executable, if able to: `ExecutableBlock`. + /// + /// Completion means that the block can be enqueued for processing by the execution engine via + /// the contract_runtime component. + pub(crate) async fn make_block_executable( + self, + block_hash: BlockHash, + ) -> Option where - REv: From, + REv: From, { self.make_request( - |responder| StorageRequest::PutBlock { block, responder }, - QueueKind::Regular, + |responder| MakeBlockExecutableRequest { + block_hash, + responder, + }, + QueueKind::FromStorage, ) .await } - /// Gets the requested block from the linear block store. - pub(crate) async fn get_block_from_storage(self, block_hash: BlockHash) -> Option + /// Request that a block with a specific height be marked completed. + /// + /// Completion means that the block itself (along with its header) and all of its transactions + /// have been persisted to storage and its global state root hash is missing no dependencies + /// in the global state. + pub(crate) async fn mark_block_completed(self, block_height: u64) -> bool where - REv: From, + REv: From, { self.make_request( - |responder| StorageRequest::GetBlock { - block_hash, + |responder| MarkBlockCompletedRequest { + block_height, responder, }, - QueueKind::Regular, + QueueKind::FromStorage, ) .await } - /// Gets the requested block header from the linear block store. - pub(crate) async fn get_block_header_from_storage( + /// Try to accept a transaction received from the JSON-RPC server. + pub(crate) async fn try_accept_transaction( self, - block_hash: BlockHash, - ) -> Option + transaction: Transaction, + is_speculative: bool, + ) -> Result<(), transaction_acceptor::Error> where - REv: From, + REv: From, { self.make_request( - |responder| StorageRequest::GetBlockHeader { - block_hash, + |responder| AcceptTransactionRequest { + transaction, + is_speculative, responder, }, - QueueKind::Regular, + QueueKind::Api, ) .await } - /// Gets the requested signatures for a given block hash. - pub(crate) async fn get_signatures_from_storage( + /// Announces that a transaction not previously stored has now been accepted and stored. + pub(crate) fn announce_new_transaction_accepted( self, - block_hash: BlockHash, - ) -> Option + transaction: Arc, + source: Source, + ) -> impl Future where - REv: From, + REv: From, { - self.make_request( - |responder| StorageRequest::GetBlockSignatures { - block_hash, - responder, + self.event_queue.schedule( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, }, - QueueKind::Regular, + QueueKind::Validation, ) - .await } - /// Puts the requested finality signatures into storage. - pub(crate) async fn put_signatures_to_storage(self, signatures: BlockSignatures) -> bool + /// Announces that we have received a gossip message from this peer, + /// implying the peer holds the indicated item. + pub(crate) async fn announce_gossip_received(self, item_id: T::Id, sender: NodeId) where - REv: From, + REv: From>, + T: GossipItem, { - self.make_request( - |responder| StorageRequest::PutBlockSignatures { - signatures, - responder, - }, - QueueKind::Regular, - ) - .await + self.event_queue + .schedule( + GossiperAnnouncement::GossipReceived { item_id, sender }, + QueueKind::Gossip, + ) + .await; } - /// Gets the requested block's transfers from storage. - pub(crate) async fn get_block_transfers_from_storage( - self, - block_hash: BlockHash, - ) -> Option> + /// Announces that we have finished gossiping the indicated item. + pub(crate) async fn announce_finished_gossiping(self, item_id: T::Id) where - REv: From, + REv: From>, + T: GossipItem, { - self.make_request( - |responder| StorageRequest::GetBlockTransfers { - block_hash, - responder, - }, - QueueKind::Regular, - ) - .await + self.event_queue + .schedule( + GossiperAnnouncement::FinishedGossiping(item_id), + QueueKind::Gossip, + ) + .await; } - /// Requests the block header at the given height. - pub(crate) async fn get_block_header_at_height_from_storage( + pub(crate) fn announce_invalid_transaction( self, - height: u64, - ) -> Option + transaction: Transaction, + source: Source, + ) -> impl Future where - REv: From, + REv: From, + { + self.event_queue.schedule( + TransactionAcceptorAnnouncement::InvalidTransaction { + transaction, + source, + }, + QueueKind::Validation, + ) + } + + /// Announces upgrade activation point read. + pub(crate) async fn upgrade_watcher_announcement(self, maybe_next_upgrade: Option) + where + REv: From, + { + self.event_queue + .schedule( + UpgradeWatcherAnnouncement(maybe_next_upgrade), + QueueKind::Control, + ) + .await; + } + + /// Announces a committed Step success. + pub(crate) async fn announce_commit_step_success(self, era_id: EraId, effects: ExecutionEffects) + where + REv: From, + { + self.event_queue + .schedule( + ContractRuntimeAnnouncement::CommitStepSuccess { era_id, effects }, + QueueKind::ContractRuntime, + ) + .await; + } + + pub(crate) async fn update_contract_runtime_state(self, new_pre_state: ExecutionPreState) + where + REv: From, + { + self.event_queue + .schedule( + ContractRuntimeRequest::UpdatePreState { new_pre_state }, + QueueKind::ContractRuntime, + ) + .await; + } + + /// Announces validators for upcoming era. + pub(crate) async fn announce_upcoming_era_validators( + self, + era_that_is_ending: EraId, + upcoming_era_validators: BTreeMap>, + ) where + REv: From, + { + self.event_queue + .schedule( + ContractRuntimeAnnouncement::UpcomingEraValidators { + era_that_is_ending, + upcoming_era_validators, + }, + QueueKind::ContractRuntime, + ) + .await; + } + + pub(crate) async fn announce_new_era_gas_price(self, era_id: EraId, next_era_gas_price: u8) + where + REv: From, + { + self.event_queue + .schedule( + ContractRuntimeAnnouncement::NextEraGasPrice { + era_id, + next_era_gas_price, + }, + QueueKind::ContractRuntime, + ) + .await; + } + + /// Begins gossiping an item. + pub(crate) async fn begin_gossip(self, item_id: T::Id, source: Source, target: GossipTarget) + where + T: GossipItem, + REv: From>, { self.make_request( - |responder| StorageRequest::GetBlockHeaderAtHeight { height, responder }, - QueueKind::Regular, + |responder| BeginGossipRequest { + item_id, + source, + target, + responder, + }, + QueueKind::Gossip, + ) + .await; + } + + /// Puts the given block into the linear block store. + pub(crate) async fn put_block_to_storage(self, block: Arc) -> bool + where + REv: From, + { + self.make_request( + |responder| StorageRequest::PutBlock { block, responder }, + QueueKind::ToStorage, ) .await } - /// Requests the block at the given height. - pub(crate) async fn get_block_at_height_from_storage(self, height: u64) -> Option + /// Puts the given approvals hashes into the linear block store. + pub(crate) async fn put_approvals_hashes_to_storage( + self, + approvals_hashes: Box, + ) -> bool where REv: From, { self.make_request( - |responder| StorageRequest::GetBlockAtHeight { height, responder }, - QueueKind::Regular, + |responder| StorageRequest::PutApprovalsHashes { + approvals_hashes, + responder, + }, + QueueKind::ToStorage, ) .await } - /// Requests the highest block. - pub(crate) async fn get_highest_block_from_storage(self) -> Option + /// Puts the given block and approvals hashes into the linear block store. + pub(crate) async fn put_executed_block_to_storage( + self, + block: Arc, + approvals_hashes: Box, + execution_results: HashMap, + ) -> bool where REv: From, { self.make_request( - |responder| StorageRequest::GetHighestBlock { responder }, - QueueKind::Regular, + |responder| StorageRequest::PutExecutedBlock { + block, + approvals_hashes, + execution_results, + responder, + }, + QueueKind::ToStorage, + ) + .await + } + + /// Gets the requested block from the linear block store. + pub(crate) async fn get_block_from_storage(self, block_hash: BlockHash) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetBlock { + block_hash, + responder, + }, + QueueKind::FromStorage, ) .await } - /// Requests the header of the switch block at the given era ID. - pub(crate) async fn get_switch_block_header_at_era_id_from_storage( + pub(crate) async fn get_era_utilization( self, era_id: EraId, + block_height: u64, + transaction_count: u64, + ) -> Option<(u64, u64, u64)> + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetEraUtilizationScore { + era_id, + block_height, + switch_block_utilization: transaction_count, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + pub(crate) async fn is_block_stored(self, block_hash: BlockHash) -> bool + where + REv: From, + { + self.make_request( + |responder| StorageRequest::IsBlockStored { + block_hash, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Gets the requested `ApprovalsHashes` from storage. + pub(crate) async fn get_approvals_hashes_from_storage( + self, + block_hash: BlockHash, + ) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetApprovalsHashes { + block_hash, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + pub(crate) async fn get_raw_data( + self, + record_id: RecordId, + key: Vec, + ) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetRawData { + record_id, + key, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Gets the requested block header from the linear block store. + pub(crate) async fn get_block_header_from_storage( + self, + block_hash: BlockHash, + only_from_available_block_range: bool, ) -> Option where REv: From, { self.make_request( - |responder| StorageRequest::GetSwitchBlockHeaderAtEraId { era_id, responder }, - QueueKind::Regular, + |responder| StorageRequest::GetBlockHeader { + block_hash, + only_from_available_block_range, + responder, + }, + QueueKind::FromStorage, ) .await } - /// Requests the switch block at the given era ID. - pub(crate) async fn get_switch_block_at_era_id_from_storage( + pub(crate) async fn get_block_header_at_height_from_storage( self, - era_id: EraId, - ) -> Option + block_height: u64, + only_from_available_block_range: bool, + ) -> Option where REv: From, { self.make_request( - |responder| StorageRequest::GetSwitchBlockAtEraId { era_id, responder }, - QueueKind::Regular, + |responder| StorageRequest::GetBlockHeaderByHeight { + block_height, + only_from_available_block_range, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + pub(crate) async fn get_latest_switch_block_header_from_storage(self) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetLatestSwitchBlockHeader { responder }, + QueueKind::FromStorage, ) .await } - /// Requests the key block header for the given era ID, ie. the header of the switch block at - /// the era before (if one exists). - pub(crate) async fn get_key_block_header_for_era_id_from_storage( + pub(crate) async fn get_switch_block_header_by_era_id_from_storage( self, era_id: EraId, ) -> Option where REv: From, { - let era_before = era_id.checked_sub(1)?; - self.get_switch_block_header_at_era_id_from_storage(era_before) - .await + self.make_request( + |responder| StorageRequest::GetSwitchBlockHeaderByEra { era_id, responder }, + QueueKind::FromStorage, + ) + .await } - /// Requests the key block for the given era ID, ie. the switch block at the era before - /// (if one exists). - pub(crate) async fn get_key_block_for_era_id_from_storage(self, era_id: EraId) -> Option + /// Gets the requested signature for a given block hash. + pub(crate) async fn get_signature_from_storage( + self, + block_hash: BlockHash, + public_key: PublicKey, + ) -> Option where REv: From, { - let era_before = era_id.checked_sub(1)?; - self.get_switch_block_at_era_id_from_storage(era_before) - .await + self.make_request( + |responder| StorageRequest::GetBlockSignature { + block_hash, + public_key: Box::new(public_key), + responder, + }, + QueueKind::FromStorage, + ) + .await } - /// Requests the highest switch block. - // TODO - remove once used. - #[allow(unused)] - pub(crate) async fn get_highest_switch_block_from_storage(self) -> Option + pub(crate) async fn get_execution_results_from_storage( + self, + block_hash: BlockHash, + ) -> Option> where REv: From, { self.make_request( - |responder| StorageRequest::GetHighestSwitchBlock { responder }, - QueueKind::Regular, + |responder| StorageRequest::GetExecutionResults { + block_hash, + responder, + }, + QueueKind::FromStorage, ) .await } - /// Read a trie by its hash key - pub(crate) async fn read_trie(self, trie_key: Blake2bHash) -> Option> + /// Puts a block header to storage. + pub(crate) async fn put_block_header_to_storage(self, block_header: Box) -> bool where - REv: From, + REv: From, { self.make_request( - |responder| ContractRuntimeRequest::ReadTrie { - trie_key, + |responder| StorageRequest::PutBlockHeader { + block_header, responder, }, + QueueKind::ToStorage, + ) + .await + } + + /// Puts the requested block signatures into storage. + /// + /// If `signatures.proofs` is empty, no attempt to store will be made, an error will be logged, + /// and this function will return `false`. + pub(crate) async fn put_signatures_to_storage(self, signatures: BlockSignatures) -> bool + where + REv: From, + { + self.make_request( + |responder| StorageRequest::PutBlockSignatures { + signatures, + responder, + }, + QueueKind::ToStorage, + ) + .await + } + + pub(crate) async fn put_finality_signature_to_storage( + self, + signature: FinalitySignature, + ) -> bool + where + REv: From, + { + self.make_request( + |responder| StorageRequest::PutFinalitySignature { + signature: Box::new(signature), + responder, + }, + QueueKind::ToStorage, + ) + .await + } + + /// Gets the requested block's transfers from storage. + pub(crate) async fn get_block_transfers_from_storage( + self, + block_hash: BlockHash, + ) -> Option> + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetBlockTransfers { + block_hash, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Returns the era IDs of the blocks in which the given transactions were executed. If none + /// of the transactions have been executed yet, an empty set will be returned. + pub(crate) async fn get_transactions_era_ids( + self, + transaction_hashes: HashSet, + ) -> HashSet + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetTransactionsEraIds { + transaction_hashes, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Requests the highest complete block. + pub(crate) async fn get_highest_complete_block_from_storage(self) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetHighestCompleteBlock { responder }, + QueueKind::FromStorage, + ) + .await + } + + /// Requests the highest complete block header. + pub(crate) async fn get_highest_complete_block_header_from_storage(self) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetHighestCompleteBlockHeader { responder }, + QueueKind::FromStorage, + ) + .await + } + + /// Requests the height range of fully available blocks (not just block headers). + pub(crate) async fn get_available_block_range_from_storage(self) -> AvailableBlockRange + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetAvailableBlockRange { responder }, + QueueKind::FromStorage, + ) + .await + } + + /// Synchronize global state under the given root hash. + pub(crate) async fn sync_global_state( + self, + block_hash: BlockHash, + state_root_hash: Digest, + ) -> Result + where + REv: From, + { + self.make_request( + |responder| SyncGlobalStateRequest { + block_hash, + state_root_hash, + responder, + }, + QueueKind::SyncGlobalState, + ) + .await + } + + /// Get a trie or chunk by its ID. + pub(crate) async fn get_trie(self, request: TrieRequest) -> TrieResult + where + REv: From, + { + self.make_request( + |responder| ContractRuntimeRequest::GetTrie { request, responder }, + QueueKind::ContractRuntime, + ) + .await + } + + /// This is currently used for reporting purposes (node status). It should not be used + /// for load bearing determinations, as the reactor state can change between asking for it + /// and being notified about it due to event processing latency. + pub(crate) async fn get_reactor_state(self) -> ReactorState + where + REv: From, + { + self.make_request( + |responder| ReactorInfoRequest::ReactorState { responder }, + QueueKind::Regular, + ) + .await + } + + pub(crate) async fn get_last_progress(self) -> LastProgress + where + REv: From, + { + self.make_request( + |responder| ReactorInfoRequest::LastProgress { responder }, + QueueKind::Regular, + ) + .await + } + + pub(crate) async fn get_uptime(self) -> Uptime + where + REv: From, + { + self.make_request( + |responder| ReactorInfoRequest::Uptime { responder }, + QueueKind::Regular, + ) + .await + } + + pub(crate) async fn get_network_name(self) -> NetworkName + where + REv: From, + { + self.make_request( + |responder| ReactorInfoRequest::NetworkName { responder }, QueueKind::Regular, ) .await } - /// Puts a trie into the trie store and asynchronously returns any missing descendant trie keys. #[allow(unused)] - pub(crate) async fn put_trie_and_find_missing_descendant_trie_keys( + pub(crate) async fn get_balance_holds_interval(self) -> TimeDiff + where + REv: From, + { + self.make_request( + |responder| ReactorInfoRequest::BalanceHoldsInterval { responder }, + QueueKind::Regular, + ) + .await + } + + pub(crate) async fn get_block_synchronizer_status(self) -> BlockSynchronizerStatus + where + REv: From, + { + self.make_request( + |responder| BlockSynchronizerRequest::Status { responder }, + QueueKind::Regular, + ) + .await + } + + /// Puts a trie into the trie store; succeeds only if all the children of the trie are already + /// present in the store. + /// Returns the digest under which the trie was stored if successful. + pub(crate) async fn put_trie_if_all_children_present( self, - trie: Box>, - ) -> Result, engine_state::Error> + request: PutTrieRequest, + ) -> PutTrieResult where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::PutTrie { trie, responder }, - QueueKind::Regular, + |responder| ContractRuntimeRequest::PutTrie { request, responder }, + QueueKind::ContractRuntime, + ) + .await + } + + pub(crate) async fn get_current_gas_price(self, era_id: EraId) -> Option + where + REv: From, + { + self.make_request( + |responder| ContractRuntimeRequest::GetEraGasPrice { era_id, responder }, + QueueKind::ContractRuntime, ) .await } - /// Puts the given deploy into the deploy store. - pub(crate) async fn put_deploy_to_storage(self, deploy: Box) -> bool + pub(crate) async fn put_transaction_to_storage(self, transaction: Transaction) -> bool where REv: From, { self.make_request( - |responder| StorageRequest::PutDeploy { deploy, responder }, - QueueKind::Regular, + |responder| StorageRequest::PutTransaction { + transaction: Arc::new(transaction), + responder, + }, + QueueKind::ToStorage, ) .await } - /// Gets the requested deploys from the deploy store. - pub(crate) async fn get_deploys_from_storage( + /// Gets the requested transactions from storage. + /// + /// Returns the "original" transactions, which are the first received by the node, along with a + /// potentially different set of approvals used during execution of the recorded block. + pub(crate) async fn get_transactions_from_storage( self, - deploy_hashes: Multiple, - ) -> Vec> + transaction_hashes: Vec, + ) -> SmallVec<[Option<(Transaction, Option>)>; 1]> where REv: From, { self.make_request( - |responder| StorageRequest::GetDeploys { - deploy_hashes: deploy_hashes.to_vec(), + |responder| StorageRequest::GetTransactions { + transaction_hashes, responder, }, - QueueKind::Regular, + QueueKind::FromStorage, ) .await } - /// Stores the given execution results for the deploys in the given block in the linear block - /// store. - pub(crate) async fn put_execution_results_to_storage( + /// Gets the requested transaction and its execution info from storage by TransactionHash. + pub(crate) async fn get_transaction_and_exec_info_from_storage( + self, + transaction_hash: TransactionHash, + with_finalized_approvals: bool, + ) -> Option<(Transaction, Option)> + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, + with_finalized_approvals, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Gets the requested legacy deploy from the legacy deploy store by DeployHash only. + /// + /// Returns the legacy deploy containing the set of approvals used during execution of the + /// recorded block, if known. + pub(crate) async fn get_stored_legacy_deploy( + self, + deploy_hash: DeployHash, + ) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetLegacyDeploy { + deploy_hash, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Gets the requested transaction from storage by TransactionId. + /// + /// Returns the "original" transaction, which is the first received by the node, along with a + /// potentially different set of approvals used during execution of the recorded block. + pub(crate) async fn get_stored_transaction( + self, + transaction_id: TransactionId, + ) -> Option + where + REv: From, + { + self.make_request( + |responder| StorageRequest::GetTransaction { + transaction_id, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + pub(crate) async fn is_transaction_stored(self, transaction_id: TransactionId) -> bool + where + REv: From, + { + self.make_request( + |responder| StorageRequest::IsTransactionStored { + transaction_id, + responder, + }, + QueueKind::FromStorage, + ) + .await + } + + /// Stores the given execution results for the transactions in the given block in the linear + /// block store. + pub(crate) async fn put_execution_artifacts_to_storage( self, block_hash: BlockHash, - execution_results: HashMap, + block_height: u64, + era_id: EraId, + execution_results: HashMap, ) where REv: From, { self.make_request( |responder| StorageRequest::PutExecutionResults { block_hash: Box::new(block_hash), + block_height, + era_id, execution_results, responder, }, - QueueKind::Regular, + QueueKind::ToStorage, ) - .await + .await; } - /// Gets the requested deploys from the deploy store. - pub(crate) async fn get_deploy_and_metadata_from_storage( + /// Gets the requested block and its finality signatures. + pub(crate) async fn get_block_at_height_with_metadata_from_storage( self, - deploy_hash: DeployHash, - ) -> Option<(Deploy, DeployMetadata)> + block_height: u64, + only_from_available_block_range: bool, + ) -> Option where REv: From, { self.make_request( - |responder| StorageRequest::GetDeployAndMetadata { - deploy_hash, + |responder| StorageRequest::GetBlockAndMetadataByHeight { + block_height, + only_from_available_block_range, responder, }, - QueueKind::Regular, + QueueKind::FromStorage, ) .await } - /// Gets the requested block and its associated metadata. - pub(crate) async fn get_block_at_height_with_metadata_from_storage( + pub(crate) async fn collect_past_blocks_with_metadata( self, - block_height: u64, - ) -> Option<(Block, BlockSignatures)> + range: std::ops::Range, + only_from_available_block_range: bool, + ) -> Vec> where REv: From, { - self.make_request( - |responder| StorageRequest::GetBlockAndMetadataByHeight { + futures::future::join_all(range.into_iter().map(|block_height| { + self.get_block_at_height_with_metadata_from_storage( block_height, - responder, - }, - QueueKind::Regular, - ) + only_from_available_block_range, + ) + })) .await + .into_iter() + .collect() } - /// Gets the requested block by hash with its associated metadata. - pub(crate) async fn get_block_with_metadata_from_storage( + /// Gets the requested finality signature from storage. + pub(crate) async fn get_finality_signature_from_storage( self, - block_hash: BlockHash, - ) -> Option<(Block, BlockSignatures)> + id: Box, + ) -> Option where REv: From, { self.make_request( - |responder| StorageRequest::GetBlockAndMetadataByHash { - block_hash, - responder, - }, - QueueKind::Regular, + |responder| StorageRequest::GetFinalitySignature { id, responder }, + QueueKind::FromStorage, ) .await } - /// Get the highest block with its associated metadata. - pub(crate) async fn get_highest_block_with_metadata_from_storage( - self, - ) -> Option<(Block, BlockSignatures)> + pub(crate) async fn is_finality_signature_stored(self, id: Box) -> bool where REv: From, { self.make_request( - |responder| StorageRequest::GetHighestBlockWithMetadata { responder }, - QueueKind::Regular, + |responder| StorageRequest::IsFinalitySignatureStored { id, responder }, + QueueKind::FromStorage, ) .await } - /// Gets the requested deploy using the `DeployFetcher`. - pub(crate) async fn fetch_deploy( + /// Fetches an item from a fetcher. + pub(crate) async fn fetch( self, - deploy_hash: DeployHash, - peer: I, - ) -> Option> + id: T::Id, + peer: NodeId, + validation_metadata: Box, + ) -> FetchResult where - REv: From>, - I: Send + 'static, + REv: From>, + T: FetchItem + 'static, { self.make_request( - |responder| FetcherRequest::Fetch { - id: deploy_hash, + |responder| FetcherRequest { + id, peer, + validation_metadata, responder, }, - QueueKind::Regular, + QueueKind::Fetch, ) .await } - /// Gets the requested block using the `BlockFetcher` - pub(crate) async fn fetch_block( + pub(crate) async fn fetch_trie( self, - block_hash: BlockHash, - peer: I, - ) -> Option> + hash: Digest, + peers: Vec, + ) -> Result where - REv: From>, - I: Send + 'static, + REv: From, { self.make_request( - |responder| FetcherRequest::Fetch { - id: block_hash, - peer, + |responder| TrieAccumulatorRequest { + hash, + peers, responder, }, - QueueKind::Regular, + QueueKind::SyncGlobalState, ) .await } - /// Requests a linear chain block at `block_height`. - pub(crate) async fn fetch_block_by_height( + /// Passes the timestamp of a future block for which transactions are to be proposed. + pub(crate) async fn request_appendable_block( self, - block_height: u64, - peer: I, - ) -> Option> + timestamp: Timestamp, + era_id: EraId, + request_expiry: Timestamp, + ) -> AppendableBlock where - REv: From>, - I: Send + 'static, + REv: From, { self.make_request( - |responder| FetcherRequest::Fetch { - id: block_height, - peer, + |responder| TransactionBufferRequest::GetAppendableBlock { + timestamp, + era_id, + request_expiry, responder, }, - QueueKind::Regular, + QueueKind::Consensus, ) .await } - /// Passes the timestamp of a future block for which deploys are to be proposed. - pub(crate) async fn request_proto_block( + /// Enqueues a finalized block execution. + pub(crate) async fn enqueue_block_for_execution( self, - block_context: BlockContext, - past_deploys: HashSet, - next_finalized: u64, - random_bit: bool, - ) -> (ProtoBlock, BlockContext) - where - REv: From, + executable_block: ExecutableBlock, + meta_block_state: MetaBlockState, + ) where + REv: From + From, { - let proto_block = self + // Get the key block height for the current protocol version's activation point, i.e. the + // height of the final block of the previous protocol version. + let key_block_height_for_activation_point = self .make_request( - |responder| { - BlockProposerRequest::RequestProtoBlock(ProtoBlockRequest { - current_instant: block_context.timestamp(), - past_deploys, - next_finalized, - responder, - random_bit, - }) + |responder| StorageRequest::GetKeyBlockHeightForActivationPoint { responder }, + QueueKind::FromStorage, + ) + .await + .unwrap_or_else(|| { + warn!("key block height for current activation point unknown"); + 0 + }); + + self.event_queue + .schedule( + ContractRuntimeRequest::EnqueueBlockForExecution { + executable_block, + key_block_height_for_activation_point, + meta_block_state, }, - QueueKind::Regular, + QueueKind::ContractRuntime, ) .await; - (proto_block, block_context) } - /// Passes a finalized proto-block to the block executor component to execute it. - pub(crate) async fn execute_block(self, finalized_block: FinalizedBlock) - where + pub(crate) async fn enqueue_protocol_upgrade( + self, + upgrade_config: ProtocolUpgradeConfig, + next_block_height: u64, + parent_hash: BlockHash, + parent_seed: Digest, + ) where REv: From, { - self.0 + self.event_queue .schedule( - ContractRuntimeRequest::ExecuteBlock(finalized_block), - QueueKind::Regular, + ContractRuntimeRequest::DoProtocolUpgrade { + protocol_upgrade_config: upgrade_config, + next_block_height, + parent_hash, + parent_seed, + }, + QueueKind::Control, ) - .await + .await; } - /// Checks whether the deploys included in the block exist on the network. This includes - /// the block's timestamp, in order that it be checked against the timestamp of the deploys - /// within the block. - pub(crate) async fn validate_block( + /// Checks whether the transactions included in the block exist on the network and that + /// the block is valid. + pub(crate) async fn validate_block( self, - sender: I, - block: Block, - block_timestamp: Timestamp, - ) -> (bool, Block) + sender: NodeId, + proposed_block_height: u64, + block: ProposedBlock, + ) -> Result<(), Box> where - REv: From>, + REv: From, { self.make_request( |responder| BlockValidationRequest { + proposed_block_height, block, sender, responder, - block_timestamp, }, QueueKind::Regular, ) .await } - /// Checks whether the deploys included in the proto block exist on the network. This includes - /// the block's timestamp, in order that it be checked against the timestamp of the deploys - /// within the block. - pub(crate) async fn validate_proto_block( - self, - sender: I, - block: ProtoBlock, - block_timestamp: Timestamp, - ) -> (bool, ProtoBlock) + /// Announces that a block has been proposed. + pub(crate) async fn announce_proposed_block(self, proposed_block: ProposedBlock) where - REv: From>, + REv: From, { - self.make_request( - |responder| BlockValidationRequest { - block, - sender, - responder, - block_timestamp, - }, - QueueKind::Regular, - ) - .await + self.event_queue + .schedule( + ConsensusAnnouncement::Proposed(Box::new(proposed_block)), + QueueKind::Consensus, + ) + .await; } - /// Announces that a proto block has been finalized. + /// Announces that a block has been finalized. pub(crate) async fn announce_finalized_block(self, finalized_block: FinalizedBlock) where REv: From, { - self.0 + self.event_queue .schedule( ConsensusAnnouncement::Finalized(Box::new(finalized_block)), - QueueKind::Regular, + QueueKind::Consensus, ) - .await + .await; } - /// Announces that a finality signature has been created. - pub(crate) async fn announce_created_finality_signature( - self, - finality_signature: FinalitySignature, - ) where - REv: From, + /// Announces that a meta block has been created or its state has changed. + pub(crate) async fn announce_meta_block(self, meta_block: MetaBlock) + where + REv: From, + { + self.event_queue + .schedule(MetaBlockAnnouncement(meta_block), QueueKind::Regular) + .await; + } + + /// Announces that a finalized block has been created, but it was not + /// executed. + pub(crate) async fn announce_not_enqueuing_old_executable_block(self, block_height: u64) + where + REv: From, { - self.0 + self.event_queue .schedule( - ConsensusAnnouncement::CreatedFinalitySignature(Box::new(finality_signature)), + UnexecutedBlockAnnouncement(block_height), QueueKind::Regular, ) - .await + .await; } /// An equivocation has been detected. @@ -1250,471 +1904,440 @@ impl EffectBuilder { ) where REv: From, { - self.0 + self.event_queue .schedule( ConsensusAnnouncement::Fault { era_id, public_key: Box::new(public_key), timestamp, }, - QueueKind::Regular, + QueueKind::Consensus, ) - .await + .await; } - /// Announce the intent to disconnect from a specific peer, which consensus thinks is faulty. - pub(crate) async fn announce_disconnect_from_peer(self, peer: I) - where - REv: From>, + /// Blocks a specific peer due to a transgression. + /// + /// This function will also emit a log message for the block. + pub(crate) async fn announce_block_peer_with_justification( + self, + offender: NodeId, + justification: BlocklistJustification, + ) where + REv: From, { - self.0 + warn!(%offender, %justification, "banning peer"); + self.event_queue .schedule( - BlocklistAnnouncement::OffenseCommitted(Box::new(peer)), - QueueKind::Regular, + PeerBehaviorAnnouncement::OffenseCommitted { + offender: Box::new(offender), + justification: Box::new(justification), + }, + QueueKind::NetworkInfo, ) - .await + .await; } - /// The linear chain has stored a newly-created block. - pub(crate) async fn announce_block_added(self, block: Box) + /// Gets the next scheduled upgrade, if any. + pub(crate) async fn get_next_upgrade(self) -> Option where - REv: From, + REv: From + Send, { - self.0 - .schedule( - LinearChainAnnouncement::BlockAdded(block), - QueueKind::Regular, - ) + self.make_request(UpgradeWatcherRequest, QueueKind::Control) .await } - /// The linear chain has stored a new finality signature. - pub(crate) async fn announce_finality_signature(self, fs: Box) + /// Requests a query be executed on the Contract Runtime component. + pub(crate) async fn query_global_state(self, request: QueryRequest) -> QueryResult where - REv: From, + REv: From, { - self.0 - .schedule( - LinearChainAnnouncement::NewFinalitySignature(fs), - QueueKind::Regular, - ) - .await + self.make_request( + |responder| ContractRuntimeRequest::Query { request, responder }, + QueueKind::ContractRuntime, + ) + .await } - /// Runs the genesis process on the contract runtime. - pub(crate) async fn commit_genesis( + /// Retrieves an `AddressableEntity` from under the given entity address (or key, if the former + /// is not found) in global state. + pub(crate) async fn get_addressable_entity( self, - chainspec: Arc, - ) -> Result + state_root_hash: Digest, + entity_addr: EntityAddr, + ) -> AddressableEntityResult where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::CommitGenesis { - chainspec, + |responder| ContractRuntimeRequest::GetAddressableEntity { + state_root_hash, + entity_addr, responder, }, - QueueKind::Regular, + QueueKind::ContractRuntime, ) .await } - /// Runs the upgrade process on the contract runtime. - pub(crate) async fn upgrade_contract_runtime( + /// Retrieves an `EntryPointValue` from under the given key in global state if present. + pub(crate) async fn does_entry_point_exist( self, - upgrade_config: Box, - ) -> Result + state_root_hash: Digest, + contract_hash: HashAddr, + entry_point_name: String, + ) -> EntryPointExistsResult where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::Upgrade { - upgrade_config, + |responder| ContractRuntimeRequest::GetEntryPointExists { + state_root_hash, + contract_hash, + entry_point_name, responder, }, - QueueKind::Regular, + QueueKind::ContractRuntime, ) .await } - /// Gets the requested chainspec info from the chainspec loader. - pub(crate) async fn get_chainspec_info(self) -> ChainspecInfo + /// Retrieves a `Package` from under the given key in global state if present. + pub(crate) async fn get_package( + self, + state_root_hash: Digest, + package_addr: PackageAddr, + ) -> Option> where - REv: From + Send, + REv: From, { - self.make_request(ChainspecLoaderRequest::GetChainspecInfo, QueueKind::Regular) - .await + let key = Key::Hash(package_addr); + let query_request = QueryRequest::new(state_root_hash, key, vec![]); + + match self.query_global_state(query_request).await { + QueryResult::RootNotFound | QueryResult::Failure(_) => None, + QueryResult::ValueNotFound(_) => { + let query_request = + QueryRequest::new(state_root_hash, Key::SmartContract(package_addr), vec![]); + debug!("requesting under different key"); + if let QueryResult::Success { value, .. } = + self.query_global_state(query_request).await + { + value.into_package().map(Box::new) + } else { + None + } + } + QueryResult::Success { value, .. } => value + .into_contract_package() + .map(Package::from) + .map(Box::new), + } } - /// Gets the information about the current run of the node software. - pub(crate) async fn get_current_run_info(self) -> CurrentRunInfo + /// Requests a query be executed on the Contract Runtime component. + pub(crate) async fn get_balance(self, request: BalanceRequest) -> BalanceResult where - REv: From, + REv: From, { self.make_request( - ChainspecLoaderRequest::GetCurrentRunInfo, - QueueKind::Regular, + |responder| ContractRuntimeRequest::GetBalance { request, responder }, + QueueKind::ContractRuntime, ) .await } - /// Loads potentially previously stored state from storage. - /// - /// Key must be a unique key across the the application, as all keys share a common namespace. + /// Returns a map of validators weights for all eras as known from `root_hash`. /// - /// If an error occurs during state loading or no data is found, returns `None`. - #[allow(unused)] - pub(crate) async fn load_state(self, key: Cow<'static, [u8]>) -> Option + /// This operation is read only. + pub(crate) async fn get_era_validators_from_contract_runtime( + self, + request: EraValidatorsRequest, + ) -> EraValidatorsResult where - REv: From, - T: DeserializeOwned, + REv: From, { - // There is an ugly truth hidden in here: Due to object safety issues, we cannot ship the - // actual values around, but only the serialized bytes. For this reason this function - // retrieves raw bytes from storage and perform deserialization here. - // - // Errors are prominently logged but not treated further in any way. self.make_request( - move |responder| StateStoreRequest::Load { key, responder }, - QueueKind::Regular, + |responder| ContractRuntimeRequest::GetEraValidators { request, responder }, + QueueKind::ContractRuntime, ) .await - .map(|data| bincode::deserialize(&data)) - .transpose() - .unwrap_or_else(|err| { - let type_name = type_name::(); - panic!( - "could not deserialize state from storage type name {:?} err {:?}", - type_name, err - ); - }) } - /// Retrieves finalized deploys from blocks that were created more recently than the TTL. - pub(crate) async fn get_finalized_deploys( + pub(crate) async fn get_seigniorage_recipients_snapshot_from_contract_runtime( self, - ttl: TimeDiff, - ) -> Vec<(DeployHash, DeployHeader)> + request: SeigniorageRecipientsRequest, + ) -> SeigniorageRecipientsResult where - REv: From, + REv: From, { self.make_request( - move |responder| StorageRequest::GetFinalizedDeploys { ttl, responder }, - QueueKind::Regular, + |responder| ContractRuntimeRequest::GetSeigniorageRecipients { request, responder }, + QueueKind::ContractRuntime, ) .await } - /// Save state to storage. - /// - /// Key must be a unique key across the the application, as all keys share a common namespace. - /// - /// Returns whether or not storing the state was successful. A component that requires state to - /// be successfully stored should check the return value and act accordingly. - #[cfg(not(feature = "fast-sync"))] - pub(crate) async fn save_state(self, key: Cow<'static, [u8]>, value: T) -> bool - where - REv: From, - T: Serialize, - { - match bincode::serialize(&value) { - Ok(data) => { - self.make_request( - move |responder| StateStoreRequest::Save { - key, - data, - responder, - }, - QueueKind::Regular, - ) - .await; - true - } - Err(err) => { - let type_name = type_name::(); - warn!(%type_name, %err, "Error serializing state"); - false - } - } - } - /// Requests a query be executed on the Contract Runtime component. - pub(crate) async fn query_global_state( - self, - query_request: QueryRequest, - ) -> Result + pub(crate) async fn get_tagged_values(self, request: TaggedValuesRequest) -> TaggedValuesResult where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::Query { - query_request, - responder, - }, - QueueKind::Regular, + |responder| ContractRuntimeRequest::GetTaggedValues { request, responder }, + QueueKind::ContractRuntime, ) .await } - pub(crate) async fn is_verified_account(self, account_key: Key) -> Option + pub(crate) async fn get_prefixed_values( + self, + request: PrefixedValuesRequest, + ) -> PrefixedValuesResult where REv: From, - REv: From, { - if let Some(block) = self.get_highest_block_from_storage().await { - let state_hash = (*block.state_root_hash()).into(); - let query_request = QueryRequest::new(state_hash, account_key, vec![]); - if let Ok(QueryResult::Success { value, .. }) = - self.query_global_state(query_request).await - { - if let StoredValue::Account(account) = *value { - let purse_uref = account.main_purse(); - let balance_request = BalanceRequest::new(state_hash, purse_uref); - if let Ok(balance_result) = self.get_balance(balance_request).await { - if let Some(motes) = balance_result.motes() { - return Some(motes >= &*MAX_PAYMENT); - } - } - } - } - } - None + self.make_request( + |responder| ContractRuntimeRequest::QueryByPrefix { request, responder }, + QueueKind::ContractRuntime, + ) + .await } - /// Requests a query be executed on the Contract Runtime component. - pub(crate) async fn get_balance( + /// Returns the value of the execution results checksum stored in the ChecksumRegistry for the + /// given state root hash. + pub(crate) async fn get_execution_results_checksum( self, - balance_request: BalanceRequest, - ) -> Result + state_root_hash: Digest, + ) -> ExecutionResultsChecksumResult where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::GetBalance { - balance_request, + |responder| ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, responder, }, - QueueKind::Regular, + QueueKind::ContractRuntime, ) .await } - /// Returns `ProtocolData` by `ProtocolVersion`. - /// - /// This operation is read only. - pub(crate) async fn get_protocol_data( + /// Get our public key from consensus, and if we're a validator, the next round length. + pub(crate) async fn consensus_status(self) -> Option + where + REv: From, + { + self.make_request(ConsensusRequest::Status, QueueKind::Consensus) + .await + } + + /// Returns a list of validator status changes, by public key. + pub(crate) async fn get_consensus_validator_changes(self) -> ConsensusValidatorChanges + where + REv: From, + { + self.make_request(ConsensusRequest::ValidatorChanges, QueueKind::Consensus) + .await + } + + /// Dump consensus state for a specific era, using the supplied function to serialize the + /// output. + pub(crate) async fn diagnostics_port_dump_consensus_state( self, - protocol_version: ProtocolVersion, - ) -> Result>, engine_state::Error> + era_id: Option, + serialize: fn(&EraDump<'_>) -> Result, Cow<'static, str>>, + ) -> Result, Cow<'static, str>> where - REv: From, + REv: From, { self.make_request( - |responder| ContractRuntimeRequest::GetProtocolData { - protocol_version, + |responder| DumpConsensusStateRequest { + era_id, + serialize, responder, }, - QueueKind::Regular, + QueueKind::Control, ) .await } - /// Returns a map of validators weights for all eras as known from `root_hash`. - /// - /// This operation is read only. - pub(crate) async fn get_era_validators_from_contract_runtime( + /// Dump the event queue contents to the diagnostics port, using the given serializer. + pub(crate) async fn diagnostics_port_dump_queue(self, dump_format: QueueDumpFormat) + where + REv: From, + { + self.make_request( + |responder| ControlAnnouncement::QueueDumpRequest { + dump_format, + finished: responder, + }, + QueueKind::Control, + ) + .await; + } + + /// Activates/deactivates a failpoint from a given activation. + pub(crate) async fn activate_failpoint(self, activation: FailpointActivation) + where + REv: From, + { + self.event_queue + .schedule( + ControlAnnouncement::ActivateFailpoint { activation }, + QueueKind::Control, + ) + .await; + } + + /// Announce that the node be shut down due to a request from a user. + pub(crate) async fn announce_user_shutdown_request(self) + where + REv: From, + { + self.event_queue + .schedule( + ControlAnnouncement::ShutdownDueToUserRequest, + QueueKind::Control, + ) + .await; + } + + /// Announce that a block which wasn't previously stored on this node has been fetched and + /// stored. + pub(crate) async fn announce_fetched_new_block(self, block: Arc, peer: NodeId) + where + REv: From, + { + self.event_queue + .schedule( + FetchedNewBlockAnnouncement { block, peer }, + QueueKind::Fetch, + ) + .await; + } + + /// Announce that a finality signature which wasn't previously stored on this node has been + /// fetched and stored. + pub(crate) async fn announce_fetched_new_finality_signature( self, - request: EraValidatorsRequest, - ) -> Result + finality_signature: Box, + peer: NodeId, + ) where + REv: From, + { + self.event_queue + .schedule( + FetchedNewFinalitySignatureAnnouncement { + finality_signature, + peer, + }, + QueueKind::Fetch, + ) + .await; + } + + /// Get the bytes for the chainspec file and genesis_accounts + /// and global_state bytes if the files are present. + pub(crate) async fn get_chainspec_raw_bytes(self) -> Arc where - REv: From, + REv: From + Send, { self.make_request( - |responder| ContractRuntimeRequest::GetEraValidators { request, responder }, - QueueKind::Regular, + ChainspecRawBytesRequest::GetChainspecRawBytes, + QueueKind::NetworkInfo, ) .await } - /// Requests a query be executed on the Contract Runtime component. - pub(crate) async fn get_bids( + /// Stores a set of given finalized approvals in storage. + /// + /// Any previously stored finalized approvals for the given hash are quietly overwritten + pub(crate) async fn store_finalized_approvals( self, - get_bids_request: GetBidsRequest, - ) -> Result + transaction_hash: TransactionHash, + finalized_approvals: BTreeSet, + ) -> bool where - REv: From, + REv: From, { self.make_request( - |responder| ContractRuntimeRequest::GetBids { - get_bids_request, + |responder| StorageRequest::StoreFinalizedApprovals { + transaction_hash, + finalized_approvals, responder, }, - QueueKind::Regular, + QueueKind::ToStorage, ) .await } - /// Runs the end of era step using the system smart contract. - pub(crate) async fn run_step( + /// Requests execution of a single transaction, without committing its effects. Intended to be + /// used for debugging & discovery purposes. + pub(crate) async fn speculatively_execute( self, - step_request: StepRequest, - ) -> Result + block_header: Box, + transaction: Box, + ) -> SpeculativeExecutionResult where REv: From, { self.make_request( - |responder| ContractRuntimeRequest::Step { - step_request, + |responder| ContractRuntimeRequest::SpeculativelyExecute { + block_header, + transaction, responder, }, - QueueKind::Regular, + QueueKind::ContractRuntime, ) .await } - /// Gets the correct era validators set for the given era. - /// Takes upgrades and emergency restarts into account based on the `initial_state_root_hash` - /// and `activation_era_id` parameters. - pub(crate) async fn get_era_validators(self, era_id: EraId) -> Option> + /// Reads block execution results (or chunk) from Storage component. + pub(crate) async fn get_block_execution_results_or_chunk_from_storage( + self, + id: BlockExecutionResultsOrChunkId, + ) -> Option where - REv: From + From + From, + REv: From, { - let CurrentRunInfo { - activation_point, - protocol_version, - initial_state_root_hash, - } = self.get_current_run_info().await; - let activation_era_id = activation_point.era_id(); - if era_id < activation_era_id { - // we don't support getting the validators from before the last upgrade - return None; - } - if era_id == activation_era_id { - // in the activation era, we read the validators from the global state; we use the - // global state hash of the first block in the era, if it exists - if we can't get it, - // we use the initial_state_root_hash passed from the chainspec loader - let root_hash = if era_id.is_genesis() { - // genesis era - use block at height 0 - self.get_block_header_at_height_from_storage(0) - .await - .map(|hdr| *hdr.state_root_hash()) - .unwrap_or(initial_state_root_hash) - } else { - // non-genesis - calculate the height based on the key block - let maybe_key_block_header = self - .get_key_block_header_for_era_id_from_storage(era_id) - .await; - // this has to be a match because `Option::and_then` can't deal with async closures - match maybe_key_block_header { - None => None, - Some(kb_hdr) => { - self.get_block_header_at_height_from_storage(kb_hdr.height() + 1) - .await - } - } - // default to the initial_state_root_hash if there was no key block or no block - // above the key block for the era - .map_or(initial_state_root_hash, |hdr| *hdr.state_root_hash()) - }; - let req = EraValidatorsRequest::new(root_hash.into(), protocol_version); - self.get_era_validators_from_contract_runtime(req) - .await - .ok() - .and_then(|era_validators| era_validators.get(&era_id).cloned()) - } else { - // in other eras, we just use the validators from the key block - self.get_key_block_header_for_era_id_from_storage(era_id) - .await - .and_then(|kb_hdr| kb_hdr.next_era_validator_weights().cloned()) - } + self.make_request( + |responder| StorageRequest::GetBlockExecutionResultsOrChunk { id, responder }, + QueueKind::FromStorage, + ) + .await } - /// Checks whether the given validator is bonded in the given era. - pub(crate) async fn is_bonded_validator( + /// Gets peers for a given block from the block accumulator. + pub(crate) async fn get_block_accumulated_peers( self, - validator: PublicKey, - era_id: EraId, - latest_state_root_hash: Option, - protocol_version: ProtocolVersion, - ) -> Result - where - REv: From + From + From, - { - // try just reading the era validators first - let maybe_era_validators = self.get_era_validators(era_id).await; - let maybe_is_currently_bonded = - maybe_era_validators.map(|validators| validators.contains_key(&validator)); - - match maybe_is_currently_bonded { - // if we know whether the validator is bonded, just return that - Some(is_bonded) => Ok(is_bonded), - // if not, try checking future eras with the latest state root hash - None => match latest_state_root_hash { - // no root hash later than initial -> we just assume the validator is not bonded - None => Ok(false), - // otherwise, check with contract runtime - Some(state_root_hash) => self - .make_request( - |responder| ContractRuntimeRequest::IsBonded { - state_root_hash, - era_id, - protocol_version, - public_key: validator, - responder, - }, - QueueKind::Regular, - ) - .await - .or_else(|error| { - // Promote this error to a non-error case. - // It's not an error that we can't find the era that was requested. - if error.is_era_validators_missing() { - Ok(false) - } else { - Err(error) - } - }), - }, - } - } - - /// Get our public key from consensus, and if we're a validator, the next round length. - pub(crate) async fn consensus_status(self) -> Option<(PublicKey, Option)> + block_hash: BlockHash, + ) -> Option> where - REv: From, + REv: From, { - self.make_request(ConsensusRequest::Status, QueueKind::Regular) - .await + self.make_request( + |responder| BlockAccumulatorRequest::GetPeersForBlock { + block_hash, + responder, + }, + QueueKind::NetworkInfo, + ) + .await } - /// Collects the key blocks for the eras identified by provided era IDs. Returns - /// `Some(HashMap(era_id → block_header))` if all the blocks have been read correctly, and - /// `None` if at least one was missing. The header for EraId `n` is from the key block for that - /// era, that is, the switch block of era `n-1`, ie. it contains the data necessary for - /// initialization of era `n`. - pub(crate) async fn collect_key_blocks>( - self, - era_ids: I, - ) -> Option> + /// Set a new stopping point for the node. + /// + /// Returns a potentially previously set stop-at spec. + pub(crate) async fn set_node_stop_at(self, stop_at: Option) -> Option where - REv: From, + REv: From, { - futures::future::join_all( - era_ids - .into_iter() - // we would get None for era 0 and that would make it seem like the entire - // function failed - .filter(|era_id| !era_id.is_genesis()) - .map(|era_id| { - self.get_key_block_for_era_id_from_storage(era_id) - .map(move |maybe_block| { - maybe_block.map(|block| (era_id, block.take_header())) - }) - }), + self.make_request( + |responder| SetNodeStopRequest { stop_at, responder }, + QueueKind::Control, ) .await - .into_iter() - .collect() } } @@ -1725,6 +2348,6 @@ impl EffectBuilder { #[macro_export] macro_rules! fatal { ($effect_builder:expr, $($arg:tt)*) => { - $effect_builder.fatal(file!(), line!(), format_args!($($arg)*).to_string()) + $effect_builder.fatal(file!(), line!(), format!($($arg)*)) }; } diff --git a/node/src/effect/announcements.rs b/node/src/effect/announcements.rs index 7826634009..4171c77679 100644 --- a/node/src/effect/announcements.rs +++ b/node/src/effect/announcements.rs @@ -4,22 +4,32 @@ //! module documentation for details. use std::{ - collections::HashMap, - fmt::{self, Display, Formatter}, + collections::BTreeMap, + fmt::{self, Debug, Display, Formatter}, + fs::File, + sync::Arc, }; +use datasize::DataSize; +use itertools::Itertools; use serde::Serialize; -use casper_types::{EraId, ExecutionResult, PublicKey}; +use casper_types::{ + execution::Effects, Block, EraId, FinalitySignature, FinalitySignatureV2, NextUpgrade, + PublicKey, Timestamp, Transaction, TransactionHash, U512, +}; use crate::{ components::{ - chainspec_loader::NextUpgrade, deploy_acceptor::Error, small_network::GossipedAddress, + consensus::{ClContext, ProposedBlock}, + diagnostics_port::FileSerializer, + fetcher::FetchItem, + gossiper::GossipItem, + network::blocklist::BlocklistJustification, }, effect::Responder, - types::{ - Block, Deploy, DeployHash, DeployHeader, FinalitySignature, FinalizedBlock, Item, Timestamp, - }, + failpoints::FailpointActivation, + types::{FinalizedBlock, MetaBlock, NodeId}, utils::Source, }; @@ -31,125 +41,208 @@ use crate::{ /// Control announcements also use a priority queue to ensure that a component that reports a fatal /// error is given as few follow-up events as possible. However, there currently is no guarantee /// that this happens. -#[derive(Debug, Serialize)] +#[derive(Serialize)] #[must_use] -pub enum ControlAnnouncement { +pub(crate) enum ControlAnnouncement { + /// A shutdown has been requested by the user. + ShutdownDueToUserRequest, + + /// The node should shut down with exit code 0 in readiness for the next binary to start. + ShutdownForUpgrade, + + /// The node started in catch up and shutdown mode has caught up to tip and can now exit. + ShutdownAfterCatchingUp, + /// The component has encountered a fatal error and cannot continue. /// - /// This usually triggers a shutdown of the component, reactor or whole application. + /// This usually triggers a shutdown of the application. FatalError { - /// File the fatal error occurred in. file: &'static str, - /// Line number where the fatal error occurred. line: u32, - /// Error message. msg: String, }, + /// An external event queue dump has been requested. + QueueDumpRequest { + /// The format to dump the queue in. + #[serde(skip)] + dump_format: QueueDumpFormat, + /// Responder called when the dump has been finished. + finished: Responder<()>, + }, + /// Activates/deactivates a failpoint. + ActivateFailpoint { + /// The failpoint activation to process. + activation: FailpointActivation, + }, +} + +impl Debug for ControlAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ControlAnnouncement::ShutdownDueToUserRequest => write!(f, "ShutdownDueToUserRequest"), + ControlAnnouncement::ShutdownForUpgrade => write!(f, "ShutdownForUpgrade"), + ControlAnnouncement::ShutdownAfterCatchingUp => write!(f, "ShutdownAfterCatchingUp"), + ControlAnnouncement::FatalError { file, line, msg } => f + .debug_struct("FatalError") + .field("file", file) + .field("line", line) + .field("msg", msg) + .finish(), + ControlAnnouncement::QueueDumpRequest { .. } => { + f.debug_struct("QueueDump").finish_non_exhaustive() + } + ControlAnnouncement::ActivateFailpoint { activation } => f + .debug_struct("ActivateFailpoint") + .field("activation", activation) + .finish(), + } + } } impl Display for ControlAnnouncement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { + ControlAnnouncement::ShutdownDueToUserRequest => { + write!(f, "shutdown due to user request") + } + ControlAnnouncement::ShutdownForUpgrade => write!(f, "shutdown for upgrade"), + ControlAnnouncement::ShutdownAfterCatchingUp => write!(f, "shutdown after catching up"), ControlAnnouncement::FatalError { file, line, msg } => { write!(f, "fatal error [{}:{}]: {}", file, line, msg) } + ControlAnnouncement::QueueDumpRequest { .. } => { + write!(f, "dump event queue") + } + ControlAnnouncement::ActivateFailpoint { activation } => { + write!(f, "failpoint activation: {}", activation) + } } } } -/// A networking layer announcement. -#[derive(Debug, Serialize)] +/// A component has encountered a fatal error and cannot continue. +/// +/// This usually triggers a shutdown of the application. +#[derive(Serialize, Debug)] #[must_use] -pub enum NetworkAnnouncement { - /// A payload message has been received from a peer. - MessageReceived { - /// The sender of the message - sender: I, - /// The message payload - payload: P, - }, - /// Our public listening address should be gossiped across the network. - GossipOurAddress(GossipedAddress), - /// A new peer connection was established. - /// - /// IMPORTANT NOTE: This announcement is a work-around for some short-term functionality. Do - /// not rely on or use this for anything without asking anyone that has written - /// this section of the code first! - NewPeer(I), +pub(crate) struct FatalAnnouncement { + pub(crate) file: &'static str, + pub(crate) line: u32, + pub(crate) msg: String, } -impl Display for NetworkAnnouncement -where - I: Display, - P: Display, -{ - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - NetworkAnnouncement::MessageReceived { sender, payload } => { - write!(formatter, "received from {}: {}", sender, payload) - } - NetworkAnnouncement::GossipOurAddress(_) => write!(formatter, "gossip our address"), - NetworkAnnouncement::NewPeer(id) => { - write!(formatter, "new peer connection established to {}", id) - } - } +impl Display for FatalAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "fatal error [{}:{}]: {}", self.file, self.line, self.msg) } } -/// An RPC API server announcement. -#[derive(Debug, Serialize)] -#[must_use] -pub enum RpcServerAnnouncement { - /// A new deploy received. - DeployReceived { - /// The received deploy. - deploy: Box, - /// A client responder in the case where a client submits a deploy. - responder: Option>>, - }, +#[derive(DataSize, Serialize, Debug)] +pub(crate) struct MetaBlockAnnouncement(pub(crate) MetaBlock); + +impl Display for MetaBlockAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "announcement for meta block {} at height {}", + self.0.hash(), + self.0.height(), + ) + } } -impl Display for RpcServerAnnouncement { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - RpcServerAnnouncement::DeployReceived { deploy, .. } => { - write!(formatter, "api server received {}", deploy.id()) - } - } +#[derive(DataSize, Serialize, Debug)] +pub(crate) struct UnexecutedBlockAnnouncement(pub(crate) u64); + +impl Display for UnexecutedBlockAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "announcement for unexecuted finalized block at height {}", + self.0, + ) } } -/// A `DeployAcceptor` announcement. +/// Queue dump format with handler. +#[derive(Serialize)] +pub(crate) enum QueueDumpFormat { + /// Dump using given serde serializer. + Serde(#[serde(skip)] FileSerializer), + /// Dump writing debug output to file. + Debug(#[serde(skip)] File), +} + +impl QueueDumpFormat { + /// Creates a new queue dump serde format. + pub(crate) fn serde(serializer: FileSerializer) -> Self { + QueueDumpFormat::Serde(serializer) + } + + /// Creates a new queue dump debug format. + pub(crate) fn debug(file: File) -> Self { + QueueDumpFormat::Debug(file) + } +} + +/// A `TransactionAcceptor` announcement. #[derive(Debug, Serialize)] -pub enum DeployAcceptorAnnouncement { - /// A deploy which wasn't previously stored on this node has been accepted and stored. - AcceptedNewDeploy { - /// The new deploy. - deploy: Box, - /// The source (peer or client) of the deploy. - source: Source, +pub(crate) enum TransactionAcceptorAnnouncement { + /// A transaction which wasn't previously stored on this node has been accepted and stored. + AcceptedNewTransaction { + /// The new transaction. + transaction: Arc, + /// The source (peer or client) of the transaction. + source: Source, }, - /// An invalid deploy was received. - InvalidDeploy { - /// The invalid deploy. - deploy: Box, - /// The source (peer or client) of the deploy. - source: Source, + /// An invalid transaction was received. + InvalidTransaction { + /// The invalid transaction. + transaction: Transaction, + /// The source (peer or client) of the transaction. + source: Source, }, } -impl Display for DeployAcceptorAnnouncement { +impl Display for TransactionAcceptorAnnouncement { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - DeployAcceptorAnnouncement::AcceptedNewDeploy { deploy, source } => write!( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, + } => write!( formatter, - "accepted new deploy {} from {}", - deploy.id(), + "accepted new transaction {} from {}", + transaction.hash(), source ), - DeployAcceptorAnnouncement::InvalidDeploy { deploy, source } => { - write!(formatter, "invalid deploy {} from {}", deploy.id(), source) + TransactionAcceptorAnnouncement::InvalidTransaction { + transaction, + source, + } => { + write!( + formatter, + "invalid transaction {} from {}", + transaction.hash(), + source + ) + } + } + } +} + +#[derive(Debug, Serialize)] +pub(crate) enum TransactionBufferAnnouncement { + /// Hashes of the transactions that expired. + TransactionsExpired(Vec), +} + +impl Display for TransactionBufferAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + TransactionBufferAnnouncement::TransactionsExpired(hashes) => { + write!(f, "pruned hashes: {}", hashes.iter().join(", ")) } } } @@ -157,11 +250,11 @@ impl Display for DeployAcceptorAnnouncement { /// A consensus announcement. #[derive(Debug)] -pub enum ConsensusAnnouncement { +pub(crate) enum ConsensusAnnouncement { + /// A block was proposed. + Proposed(Box>), /// A block was finalized. Finalized(Box), - /// A finality signature was created. - CreatedFinalitySignature(Box), /// An equivocation has been detected. Fault { /// The Id of the era in which the equivocation was detected @@ -176,11 +269,11 @@ pub enum ConsensusAnnouncement { impl Display for ConsensusAnnouncement { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - ConsensusAnnouncement::Finalized(block) => { - write!(formatter, "finalized proto block {}", block) + ConsensusAnnouncement::Proposed(block) => { + write!(formatter, "proposed block payload {}", block) } - ConsensusAnnouncement::CreatedFinalitySignature(fs) => { - write!(formatter, "signed an executed block: {}", fs) + ConsensusAnnouncement::Finalized(block) => { + write!(formatter, "finalized block payload {}", block) } ConsensusAnnouncement::Fault { era_id, @@ -188,135 +281,193 @@ impl Display for ConsensusAnnouncement { timestamp, } => write!( formatter, - "Validator fault with public key: {} has been identified at time: {} in era: {}", + "Validator fault with public key: {} has been identified at time: {} in {}", public_key, timestamp, era_id, ), } } } -/// A block-list related announcement. +/// Notable / unexpected peer behavior has been detected by some part of the system. #[derive(Debug, Serialize)] -pub enum BlocklistAnnouncement { +pub(crate) enum PeerBehaviorAnnouncement { /// A given peer committed a blockable offense. - OffenseCommitted(Box), + OffenseCommitted { + /// The peer ID of the offending node. + offender: Box, + /// Justification for blocking the peer. + justification: Box, + }, } -impl Display for BlocklistAnnouncement -where - I: Display, -{ +impl Display for PeerBehaviorAnnouncement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - BlocklistAnnouncement::OffenseCommitted(peer) => { - write!(f, "peer {} committed offense", peer) + PeerBehaviorAnnouncement::OffenseCommitted { + offender, + justification, + } => { + write!(f, "peer {} committed offense: {}", offender, justification) } } } } -/// A ContractRuntime announcement. +/// A Gossiper announcement. #[derive(Debug)] -pub enum ContractRuntimeAnnouncement { - /// A new block from the linear chain was produced. - LinearChainBlock(Box), - /// A block was requested to be executed, but it had been executed before. - BlockAlreadyExecuted(Box), +pub(crate) enum GossiperAnnouncement { + /// A new gossip has been received, but not necessarily the full item. + GossipReceived { item_id: T::Id, sender: NodeId }, + + /// A new item has been received, where the item's ID is the complete item. + NewCompleteItem(T::Id), + + /// A new item has been received where the item's ID is NOT the complete item. + NewItemBody { item: Box, sender: NodeId }, + + /// Finished gossiping about the indicated item. + FinishedGossiping(T::Id), } -impl ContractRuntimeAnnouncement { - /// Create a ContractRuntimeAnnouncement::LinearChainBlock from it's parts. - pub fn linear_chain_block( - block: Block, - execution_results: HashMap, - ) -> Self { - Self::LinearChainBlock(Box::new(LinearChainBlock { - block, - execution_results, - })) +impl Display for GossiperAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + GossiperAnnouncement::GossipReceived { item_id, sender } => { + write!(f, "new gossiped item {} from sender {}", item_id, sender) + } + GossiperAnnouncement::NewCompleteItem(item) => write!(f, "new complete item {}", item), + GossiperAnnouncement::NewItemBody { item, sender } => { + write!(f, "new item body {} from {}", item.gossip_id(), sender) + } + GossiperAnnouncement::FinishedGossiping(item_id) => { + write!(f, "finished gossiping {}", item_id) + } + } } - /// Create a ContractRuntimeAnnouncement::BlockAlreadyExecuted from a Block. - pub fn block_already_executed(block: Block) -> Self { - Self::BlockAlreadyExecuted(Box::new(block)) +} + +#[derive(Debug, Serialize)] +pub(crate) struct UpgradeWatcherAnnouncement(pub(crate) Option); + +impl Display for UpgradeWatcherAnnouncement { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match &self.0 { + Some(next_upgrade) => write!(f, "read {}", next_upgrade), + None => write!(f, "no upgrade staged"), + } } } -/// A ContractRuntimeAnnouncement's block. -#[derive(Debug)] -pub struct LinearChainBlock { - /// The block. - pub block: Block, - /// The results of executing the deploys in this block. - pub execution_results: HashMap, +/// A ContractRuntime announcement. +#[derive(Debug, Serialize)] +pub(crate) enum ContractRuntimeAnnouncement { + /// A step was committed successfully and has altered global state. + CommitStepSuccess { + /// The era id in which the step was committed to global state. + era_id: EraId, + /// The operations and transforms committed to global state. + effects: Effects, + }, + /// New era validators. + UpcomingEraValidators { + /// The era id in which the step was committed to global state. + era_that_is_ending: EraId, + /// The validators for the eras after the `era_that_is_ending` era. + upcoming_era_validators: BTreeMap>, + }, + /// New gas price for an upcoming era has been determined. + NextEraGasPrice { + /// The era id for which the gas price has been determined + era_id: EraId, + /// The gas price as determined by chain utilization. + next_era_gas_price: u8, + }, } impl Display for ContractRuntimeAnnouncement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - ContractRuntimeAnnouncement::LinearChainBlock(linear_chain_block) => { + ContractRuntimeAnnouncement::CommitStepSuccess { era_id, .. } => { + write!(f, "commit step completed for {}", era_id) + } + ContractRuntimeAnnouncement::UpcomingEraValidators { + era_that_is_ending, .. + } => { write!( f, - "created linear chain block {}", - linear_chain_block.block.hash() + "upcoming era validators after current {}.", + era_that_is_ending, ) } - ContractRuntimeAnnouncement::BlockAlreadyExecuted(block) => { - write!(f, "block had been executed before: {}", block.hash()) + ContractRuntimeAnnouncement::NextEraGasPrice { + era_id, + next_era_gas_price, + } => { + write!( + f, + "Calculated gas price {} for era {}", + next_era_gas_price, era_id + ) } } } } -/// A Gossiper announcement. -#[derive(Debug)] -pub enum GossiperAnnouncement { - /// A new item has been received, where the item's ID is the complete item. - NewCompleteItem(T::Id), +#[derive(Debug, Serialize)] +pub(crate) enum BlockAccumulatorAnnouncement { + /// A finality signature which wasn't previously stored on this node has been accepted and + /// stored. + AcceptedNewFinalitySignature { + finality_signature: Box, + }, } -impl Display for GossiperAnnouncement { +impl Display for BlockAccumulatorAnnouncement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - GossiperAnnouncement::NewCompleteItem(item) => write!(f, "new complete item {}", item), + BlockAccumulatorAnnouncement::AcceptedNewFinalitySignature { finality_signature } => { + write!( + f, + "finality signature {} accepted", + finality_signature.gossip_id() + ) + } } } } -/// A linear chain announcement. -#[derive(Debug)] -pub enum LinearChainAnnouncement { - /// A new block has been created and stored locally. - BlockAdded(Box), - /// New finality signature received. - NewFinalitySignature(Box), +/// A block which wasn't previously stored on this node has been fetched and stored. +#[derive(Debug, Serialize)] +pub(crate) struct FetchedNewBlockAnnouncement { + pub(crate) block: Arc, + pub(crate) peer: NodeId, } -impl Display for LinearChainAnnouncement { +impl Display for FetchedNewBlockAnnouncement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - LinearChainAnnouncement::BlockAdded(block) => { - write!(f, "block added {}", block.hash()) - } - LinearChainAnnouncement::NewFinalitySignature(fs) => { - write!(f, "new finality signature {}", fs.block_hash) - } - } + write!( + f, + "new block {} fetched from {}", + self.block.fetch_id(), + self.peer + ) } } -/// A chainspec loader announcement. +/// A finality signature which wasn't previously stored on this node has been fetched and stored. #[derive(Debug, Serialize)] -pub enum ChainspecLoaderAnnouncement { - /// New upgrade recognized. - UpgradeActivationPointRead(NextUpgrade), +pub(crate) struct FetchedNewFinalitySignatureAnnouncement { + pub(crate) finality_signature: Box, + pub(crate) peer: NodeId, } -impl Display for ChainspecLoaderAnnouncement { +impl Display for FetchedNewFinalitySignatureAnnouncement { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - ChainspecLoaderAnnouncement::UpgradeActivationPointRead(next_upgrade) => { - write!(f, "read {}", next_upgrade) - } - } + write!( + f, + "new finality signature {} fetched from {}", + self.finality_signature.fetch_id(), + self.peer + ) } } diff --git a/node/src/effect/diagnostics_port.rs b/node/src/effect/diagnostics_port.rs new file mode 100644 index 0000000000..86c9a093bb --- /dev/null +++ b/node/src/effect/diagnostics_port.rs @@ -0,0 +1,60 @@ +use std::{ + borrow::Cow, + fmt::{Debug, Display}, +}; + +use casper_types::EraId; +use datasize::DataSize; +use futures::Future; +use serde::Serialize; + +use super::Responder; +use crate::components::consensus::EraDump; + +/// A request to dump the internal consensus state of a specific era. +#[derive(DataSize, Serialize)] +pub(crate) struct DumpConsensusStateRequest { + /// Era to serialize. + /// + /// If not given, use active era. + pub(crate) era_id: Option, + /// Serialization function to serialize the given era with. + #[data_size(skip)] + #[serde(skip)] + pub(crate) serialize: fn(&EraDump) -> Result, Cow<'static, str>>, + /// Responder to send the serialized representation into. + pub(crate) responder: Responder, Cow<'static, str>>>, +} + +impl DumpConsensusStateRequest { + pub(crate) fn answer( + self, + value: Result<&EraDump, Cow<'static, str>>, + ) -> impl Future { + let answer = match value { + Ok(data) => (self.serialize)(data), + Err(err) => Err(err), + }; + + self.responder.respond(answer) + } +} + +impl Display for DumpConsensusStateRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "dump consensus state for ")?; + if let Some(ref era_id) = self.era_id { + Display::fmt(era_id, f) + } else { + f.write_str(" latest era") + } + } +} + +impl Debug for DumpConsensusStateRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DumpConsensusStateRequest") + .field("era_id", &self.era_id) + .finish_non_exhaustive() + } +} diff --git a/node/src/effect/incoming.rs b/node/src/effect/incoming.rs new file mode 100644 index 0000000000..3572371d47 --- /dev/null +++ b/node/src/effect/incoming.rs @@ -0,0 +1,233 @@ +//! Announcements of incoming network messages. +//! +//! Any event suffixed -`Incoming` is usually the arrival of a specific network message. + +use std::{ + fmt::{self, Display, Formatter}, + sync::Arc, +}; + +use datasize::DataSize; +use serde::Serialize; + +use casper_types::FinalitySignatureV2; + +use super::AutoClosingResponder; +use crate::{ + components::{consensus, fetcher::Tag, gossiper}, + protocol::Message, + types::{NodeId, TrieOrChunkIdDisplay}, +}; + +/// An envelope for an incoming message, attaching a sender address. +#[derive(DataSize, Debug, Serialize)] +pub struct MessageIncoming { + pub(crate) sender: NodeId, + pub(crate) message: Box, +} + +impl Display for MessageIncoming +where + M: Display, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "incoming from {}: {}", self.sender, self.message) + } +} + +/// An envelope for an incoming demand, attaching a sender address and responder. +#[derive(DataSize, Debug, Serialize)] +pub struct DemandIncoming { + /// The sender from which the demand originated. + pub(crate) sender: NodeId, + /// The wrapped demand. + pub(crate) request_msg: Box, + /// Responder to send the answer down through. + pub(crate) auto_closing_responder: AutoClosingResponder, +} + +impl Display for DemandIncoming +where + M: Display, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "demand from {}: {}", self.sender, self.request_msg) + } +} + +/// A new consensus message arrived. +pub(crate) type ConsensusMessageIncoming = MessageIncoming; + +/// A new message from a gossiper arrived. +pub(crate) type GossiperIncoming = MessageIncoming>; + +/// A new message requesting various objects arrived. +pub(crate) type NetRequestIncoming = MessageIncoming; + +/// A new message responding to a request arrived. +pub(crate) type NetResponseIncoming = MessageIncoming; + +/// A new message requesting a trie arrived. +pub(crate) type TrieRequestIncoming = MessageIncoming; + +/// A demand for a trie that should be answered. +pub(crate) type TrieDemand = DemandIncoming; + +/// A demand for consensus protocol data that should be answered. +pub(crate) type ConsensusDemand = DemandIncoming; + +/// A new message responding to a trie request arrived. +pub(crate) type TrieResponseIncoming = MessageIncoming; + +/// A new finality signature arrived over the network. +pub(crate) type FinalitySignatureIncoming = MessageIncoming; + +/// A request for an object out of storage arrived. +/// +/// Note: The variants here are grouped under a common enum, since they are usually handled by the +/// same component. If this changes, split up this type (see `TrieRequestIncoming` for an +/// example). +#[derive(DataSize, Debug, Serialize)] +#[repr(u8)] +pub(crate) enum NetRequest { + Transaction(Vec), + LegacyDeploy(Vec), + Block(Vec), + BlockHeader(Vec), + FinalitySignature(Vec), + SyncLeap(Vec), + ApprovalsHashes(Vec), + BlockExecutionResults(Vec), +} + +impl Display for NetRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + NetRequest::Transaction(_) => f.write_str("request for transaction"), + NetRequest::LegacyDeploy(_) => f.write_str("request for legacy deploy"), + NetRequest::Block(_) => f.write_str("request for block"), + NetRequest::BlockHeader(_) => f.write_str("request for block header"), + NetRequest::FinalitySignature(_) => { + f.write_str("request for gossiped finality signature") + } + NetRequest::SyncLeap(_) => f.write_str("request for sync leap"), + NetRequest::ApprovalsHashes(_) => f.write_str("request for approvals hashes"), + NetRequest::BlockExecutionResults(_) => { + f.write_str("request for block execution results") + } + } + } +} + +impl NetRequest { + /// Returns a unique identifier of the requested object. + pub(crate) fn unique_id(&self) -> Vec { + let id = match self { + NetRequest::Transaction(ref id) + | NetRequest::LegacyDeploy(ref id) + | NetRequest::Block(ref id) + | NetRequest::BlockHeader(ref id) + | NetRequest::FinalitySignature(ref id) + | NetRequest::SyncLeap(ref id) + | NetRequest::ApprovalsHashes(ref id) + | NetRequest::BlockExecutionResults(ref id) => id, + }; + let mut unique_id = Vec::with_capacity(id.len() + 1); + unique_id.push(self.tag() as u8); + unique_id.extend(id); + + unique_id + } + + /// Returns the tag associated with the request. + pub(crate) fn tag(&self) -> Tag { + match self { + NetRequest::Transaction(_) => Tag::Transaction, + NetRequest::LegacyDeploy(_) => Tag::LegacyDeploy, + NetRequest::Block(_) => Tag::Block, + NetRequest::BlockHeader(_) => Tag::BlockHeader, + NetRequest::FinalitySignature(_) => Tag::FinalitySignature, + NetRequest::SyncLeap(_) => Tag::SyncLeap, + NetRequest::ApprovalsHashes(_) => Tag::ApprovalsHashes, + NetRequest::BlockExecutionResults(_) => Tag::BlockExecutionResults, + } + } +} + +/// A response for a net request. +/// +/// See `NetRequest` for notes. +#[derive(Debug, Serialize)] +pub(crate) enum NetResponse { + Transaction(Arc<[u8]>), + LegacyDeploy(Arc<[u8]>), + Block(Arc<[u8]>), + BlockHeader(Arc<[u8]>), + FinalitySignature(Arc<[u8]>), + SyncLeap(Arc<[u8]>), + ApprovalsHashes(Arc<[u8]>), + BlockExecutionResults(Arc<[u8]>), +} + +// `NetResponse` uses `Arcs`, so we count all data as 0. +impl DataSize for NetResponse { + const IS_DYNAMIC: bool = false; + + const STATIC_HEAP_SIZE: usize = 0; + + fn estimate_heap_size(&self) -> usize { + 0 + } +} + +impl Display for NetResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + NetResponse::Transaction(_) => f.write_str("response, transaction"), + NetResponse::LegacyDeploy(_) => f.write_str("response, legacy deploy"), + NetResponse::Block(_) => f.write_str("response, block"), + NetResponse::BlockHeader(_) => f.write_str("response, block header"), + NetResponse::FinalitySignature(_) => f.write_str("response, finality signature"), + NetResponse::SyncLeap(_) => f.write_str("response for sync leap"), + NetResponse::ApprovalsHashes(_) => f.write_str("response for approvals hashes"), + NetResponse::BlockExecutionResults(_) => { + f.write_str("response for block execution results") + } + } + } +} + +/// A request for a trie. +#[derive(DataSize, Debug, Serialize)] +pub(crate) struct TrieRequest(pub(crate) Vec); + +impl Display for TrieRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "request for trie {}", TrieOrChunkIdDisplay(&self.0)) + } +} + +/// A response to a request for a trie. +#[derive(DataSize, Debug, Serialize)] +pub(crate) struct TrieResponse(pub(crate) Vec); + +impl Display for TrieResponse { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("response, trie") + } +} + +#[cfg(test)] +mod tests { + use super::NetRequest; + + #[test] + fn unique_id_is_unique_across_variants() { + let inner_id = b"example".to_vec(); + + let a = NetRequest::Transaction(inner_id.clone()); + let b = NetRequest::Block(inner_id); + + assert_ne!(a.unique_id(), b.unique_id()); + } +} diff --git a/node/src/effect/requests.rs b/node/src/effect/requests.rs index bd5fd8dc7e..7ca667f0df 100644 --- a/node/src/effect/requests.rs +++ b/node/src/effect/requests.rs @@ -4,65 +4,74 @@ //! top-level module documentation for details. use std::{ - borrow::Cow, - collections::{BTreeMap, HashMap, HashSet}, - fmt::{self, Debug, Display, Formatter}, - mem, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + fmt::{self, Display, Formatter}, sync::Arc, }; use datasize::DataSize; use hex_fmt::HexFmt; use serde::Serialize; +use smallvec::SmallVec; use static_assertions::const_assert; -use casper_execution_engine::{ - core::engine_state::{ - self, - balance::{BalanceRequest, BalanceResult}, - era_validators::GetEraValidatorsError, - genesis::GenesisResult, - query::{GetBidsRequest, GetBidsResult, QueryRequest, QueryResult}, - step::{StepRequest, StepResult}, - upgrade::{UpgradeConfig, UpgradeResult}, - }, - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::{protocol_data::ProtocolData, trie::Trie}, +use casper_binary_port::{ + ConsensusStatus, ConsensusValidatorChanges, LastProgress, NetworkName, RecordId, Uptime, +}; +use casper_storage::{ + block_store::types::ApprovalsHashes, + data_access_layer::{ + prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult}, + tagged_values::{TaggedValuesRequest, TaggedValuesResult}, + AddressableEntityResult, BalanceRequest, BalanceResult, EntryPointExistsResult, + EraValidatorsRequest, EraValidatorsResult, ExecutionResultsChecksumResult, PutTrieRequest, + PutTrieResult, QueryRequest, QueryResult, SeigniorageRecipientsRequest, + SeigniorageRecipientsResult, TrieRequest, TrieResult, + }, + DbRawBytesSpec, }; use casper_types::{ - system::auction::{EraValidators, ValidatorWeights}, - EraId, ExecutionResult, Key, ProtocolVersion, PublicKey, Transfer, URef, + execution::ExecutionResult, Approval, AvailableBlockRange, Block, BlockHash, BlockHeader, + BlockSignatures, BlockSynchronizerStatus, BlockV2, ChainspecRawBytes, DeployHash, Digest, + DisplayIter, EntityAddr, EraId, ExecutionInfo, FinalitySignature, FinalitySignatureId, + HashAddr, NextUpgrade, ProtocolUpgradeConfig, PublicKey, TimeDiff, Timestamp, Transaction, + TransactionHash, TransactionId, Transfer, }; -use super::Responder; +use super::{AutoClosingResponder, GossipTarget, Responder}; use crate::{ components::{ - chainspec_loader::CurrentRunInfo, - contract_runtime::{EraValidatorsRequest, ValidatorWeightsByEraIdRequest}, - deploy_acceptor::Error, - fetcher::FetchResult, + block_synchronizer::{ + GlobalStateSynchronizerError, GlobalStateSynchronizerResponse, TrieAccumulatorError, + TrieAccumulatorResponse, + }, + consensus::{ClContext, ProposedBlock}, + contract_runtime::SpeculativeExecutionResult, + diagnostics_port::StopAtSpec, + fetcher::{FetchItem, FetchResult}, + gossiper::GossipItem, + network::NetworkInsights, + transaction_acceptor, }, - crypto::hash::Digest, - rpcs::chain::BlockIdentifier, + contract_runtime::ExecutionPreState, + reactor::main_reactor::ReactorState, types::{ - Block as LinearBlock, Block, BlockHash, BlockHeader, BlockSignatures, Chainspec, - ChainspecInfo, Deploy, DeployHash, DeployHeader, DeployMetadata, FinalizedBlock, Item, - NodeId, ProtoBlock, StatusFeed, TimeDiff, Timestamp, + appendable_block::AppendableBlock, BlockExecutionResultsOrChunk, + BlockExecutionResultsOrChunkId, BlockWithMetadata, ExecutableBlock, InvalidProposalError, + LegacyDeploy, MetaBlockState, NodeId, StatusFeed, TransactionHeader, }, - utils::DisplayIter, + utils::Source, }; -const _STORAGE_REQUEST_SIZE: usize = mem::size_of::(); -const _STATE_REQUEST_SIZE: usize = mem::size_of::(); -const_assert!(_STORAGE_REQUEST_SIZE < 89); -const_assert!(_STATE_REQUEST_SIZE < 89); +const _STORAGE_REQUEST_SIZE: usize = size_of::(); +const_assert!(_STORAGE_REQUEST_SIZE < 129); /// A metrics request. #[derive(Debug)] -pub enum MetricsRequest { +pub(crate) enum MetricsRequest { /// Render current node metrics as prometheus-formatted string. RenderNodeMetricsText { - /// Resopnder returning the rendered metrics or `None`, if an internal error occurred. + /// Responder returning the rendered metrics or `None`, if an internal error occurred. responder: Responder>, }, } @@ -75,53 +84,58 @@ impl Display for MetricsRequest { } } -const _NETWORK_EVENT_SIZE: usize = mem::size_of::>(); -const_assert!(_NETWORK_EVENT_SIZE < 89); +const _NETWORK_EVENT_SIZE: usize = size_of::>(); +const_assert!(_NETWORK_EVENT_SIZE < 105); /// A networking request. #[derive(Debug, Serialize)] #[must_use] -pub enum NetworkRequest { +pub(crate) enum NetworkRequest

{ /// Send a message on the network to a specific peer. SendMessage { /// Message destination. - dest: Box, + dest: Box, /// Message payload. payload: Box

, - /// Responder to be called when the message is queued. + /// If `true`, the responder will be called early after the message has been queued, not + /// waiting until it has passed to the kernel. + respond_after_queueing: bool, + /// Responder to be called when the message has been *buffered for sending*. #[serde(skip_serializing)] - responder: Responder<()>, + auto_closing_responder: AutoClosingResponder<()>, }, - /// Send a message on the network to all peers. - /// Note: This request is deprecated and should be phased out, as not every network - /// implementation is likely to implement broadcast support. - Broadcast { + /// Send a message on the network to validator peers in the given era. + ValidatorBroadcast { /// Message payload. payload: Box

, + /// Era whose validators are recipients. + era_id: EraId, /// Responder to be called when all messages are queued. #[serde(skip_serializing)] - responder: Responder<()>, + auto_closing_responder: AutoClosingResponder<()>, }, /// Gossip a message to a random subset of peers. Gossip { /// Payload to gossip. payload: Box

, + /// Type of peers that should receive the gossip message. + gossip_target: GossipTarget, /// Number of peers to gossip to. This is an upper bound, otherwise best-effort. count: usize, /// Node IDs of nodes to exclude from gossiping to. #[serde(skip_serializing)] - exclude: HashSet, + exclude: HashSet, /// Responder to be called when all messages are queued. #[serde(skip_serializing)] - responder: Responder>, + auto_closing_responder: AutoClosingResponder>, }, } -impl NetworkRequest { +impl

NetworkRequest

{ /// Transform a network request by mapping the contained payload. /// /// This is a replacement for a `From` conversion that is not possible without specialization. - pub(crate) fn map_payload(self, wrap_payload: F) -> NetworkRequest + pub(crate) fn map_payload(self, wrap_payload: F) -> NetworkRequest where F: FnOnce(P) -> P2, { @@ -129,34 +143,42 @@ impl NetworkRequest { NetworkRequest::SendMessage { dest, payload, - responder, + respond_after_queueing, + auto_closing_responder, } => NetworkRequest::SendMessage { dest, payload: Box::new(wrap_payload(*payload)), - responder, + respond_after_queueing, + auto_closing_responder, }, - NetworkRequest::Broadcast { payload, responder } => NetworkRequest::Broadcast { + NetworkRequest::ValidatorBroadcast { + payload, + era_id, + auto_closing_responder, + } => NetworkRequest::ValidatorBroadcast { payload: Box::new(wrap_payload(*payload)), - responder, + era_id, + auto_closing_responder, }, NetworkRequest::Gossip { payload, + gossip_target, count, exclude, - responder, + auto_closing_responder, } => NetworkRequest::Gossip { payload: Box::new(wrap_payload(*payload)), + gossip_target, count, exclude, - responder, + auto_closing_responder, }, } } } -impl Display for NetworkRequest +impl

Display for NetworkRequest

where - I: Display, P: Display, { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { @@ -164,7 +186,7 @@ where NetworkRequest::SendMessage { dest, payload, .. } => { write!(formatter, "send to {}: {}", dest, payload) } - NetworkRequest::Broadcast { payload, .. } => { + NetworkRequest::ValidatorBroadcast { payload, .. } => { write!(formatter, "broadcast: {}", payload) } NetworkRequest::Gossip { payload, .. } => write!(formatter, "gossip: {}", payload), @@ -173,180 +195,267 @@ where } /// A networking info request. -#[derive(Debug)] -#[must_use] -pub enum NetworkInfoRequest { +#[derive(Debug, Serialize)] +pub(crate) enum NetworkInfoRequest { /// Get incoming and outgoing peers. - GetPeers { + Peers { /// Responder to be called with all connected peers. - // TODO - change the `String` field to a `libp2p::Multiaddr` once small_network is removed. - responder: Responder>, + /// Responds with a map from [NodeId]s to a socket address, represented as a string. + responder: Responder>, + }, + /// Get up to `count` fully-connected peers in random order. + FullyConnectedPeers { + count: usize, + /// Responder to be called with the peers. + responder: Responder>, + }, + /// Get detailed insights into the nodes networking. + Insight { + responder: Responder, }, } -impl Display for NetworkInfoRequest -where - I: Display, -{ +impl Display for NetworkInfoRequest { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - NetworkInfoRequest::GetPeers { responder: _ } => write!(formatter, "get peers"), + NetworkInfoRequest::Peers { responder: _ } => { + formatter.write_str("get peers-to-socket-address map") + } + NetworkInfoRequest::FullyConnectedPeers { + count, + responder: _, + } => { + write!(formatter, "get up to {} fully connected peers", count) + } + NetworkInfoRequest::Insight { responder: _ } => { + formatter.write_str("get networking insights") + } } } } +/// A gossip request. +/// +/// This request usually initiates gossiping process of the specified item. Note that the gossiper +/// will fetch the item itself, so only the ID is needed. +/// +/// The responder will be called as soon as the gossiper has initiated the process. +// Note: This request should eventually entirely replace `ItemReceived`. #[derive(Debug, Serialize)] -/// A storage request. #[must_use] -#[repr(u8)] -pub enum StorageRequest { +pub(crate) struct BeginGossipRequest +where + T: GossipItem, +{ + pub(crate) item_id: T::Id, + pub(crate) source: Source, + pub(crate) target: GossipTarget, + pub(crate) responder: Responder<()>, +} + +impl Display for BeginGossipRequest +where + T: GossipItem, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "begin gossip of {} from {}", self.item_id, self.source) + } +} + +#[derive(Debug, Serialize)] +/// A storage request. +pub(crate) enum StorageRequest { /// Store given block. PutBlock { /// Block to be stored. - block: Box, + block: Arc, /// Responder to call with the result. Returns true if the block was stored on this /// attempt or false if it was previously stored. responder: Responder, }, + /// Store the approvals hashes. + PutApprovalsHashes { + /// Approvals hashes to store. + approvals_hashes: Box, + responder: Responder, + }, + /// Store the block and approvals hashes. + PutExecutedBlock { + /// Block to be stored. + block: Arc, + /// Approvals hashes to store. + approvals_hashes: Box, + execution_results: HashMap, + responder: Responder, + }, /// Retrieve block with given hash. GetBlock { /// Hash of block to be retrieved. block_hash: BlockHash, - /// Responder to call with the result. Returns `None` is the block doesn't exist in local + /// Responder to call with the result. Returns `None` if the block doesn't exist in local /// storage. responder: Responder>, }, - /// Retrieve block header with given height. - GetBlockHeaderAtHeight { - /// Height of the block. - height: BlockHeight, - /// Responder. - responder: Responder>, + IsBlockStored { + block_hash: BlockHash, + responder: Responder, }, - /// Retrieve block with given height. - GetBlockAtHeight { - /// Height of the block. - height: BlockHeight, - /// Responder. - responder: Responder>, + /// Retrieve the approvals hashes. + GetApprovalsHashes { + /// Hash of the block for which to retrieve approvals hashes. + block_hash: BlockHash, + /// Responder to call with the result. Returns `None` if the approvals hashes don't exist + /// in local storage. + responder: Responder>, }, - /// Retrieve highest block. - GetHighestBlock { + /// Retrieve the highest complete block. + GetHighestCompleteBlock { /// Responder. responder: Responder>, }, - /// Retrieve switch block header with given era ID. - GetSwitchBlockHeaderAtEraId { - /// Era ID of the switch block. - era_id: EraId, + /// Retrieve the highest complete block header. + GetHighestCompleteBlockHeader { /// Responder. responder: Responder>, }, - /// Retrieve switch block with given era ID. - GetSwitchBlockAtEraId { - /// Era ID of the switch block. - era_id: EraId, - /// Responder. - responder: Responder>, - }, - /// Retrieve highest switch block. - GetHighestSwitchBlock { - /// Responder. - responder: Responder>, + /// Retrieve the era IDs of the blocks in which the given transactions were executed. + GetTransactionsEraIds { + transaction_hashes: HashSet, + responder: Responder>, }, /// Retrieve block header with given hash. GetBlockHeader { /// Hash of block to get header of. block_hash: BlockHash, - /// Responder to call with the result. Returns `None` is the block header doesn't exist in + /// If true, only return `Some` if the block is in the available block range, i.e. the + /// highest contiguous range of complete blocks. + only_from_available_block_range: bool, + /// Responder to call with the result. Returns `None` if the block header doesn't exist in + /// local storage. + responder: Responder>, + }, + /// Retrieve block header with given hash. + GetRawData { + /// Which record to get. + record_id: RecordId, + /// bytesrepr serialized key. + key: Vec, + /// Responder to call with the result. Returns `None` if the data doesn't exist in + /// local storage. + responder: Responder>, + }, + GetBlockHeaderByHeight { + /// Height of block to get header of. + block_height: u64, + /// If true, only return `Some` if the block is in the available block range, i.e. the + /// highest contiguous range of complete blocks. + only_from_available_block_range: bool, + /// Responder to call with the result. Returns `None` if the block header doesn't exist in /// local storage. responder: Responder>, }, + GetLatestSwitchBlockHeader { + responder: Responder>, + }, + GetSwitchBlockHeaderByEra { + /// Era ID for which to get the block header. + era_id: EraId, + /// Responder to call with the result. + responder: Responder>, + }, /// Retrieve all transfers in a block with given hash. GetBlockTransfers { /// Hash of block to get transfers of. block_hash: BlockHash, - /// Responder to call with the result. Returns `None` is the transfers do not exist in + /// Responder to call with the result. Returns `None` if the transfers do not exist in /// local storage under the block_hash provided. responder: Responder>>, }, - /// Store given deploy. - PutDeploy { - /// Deploy to store. - deploy: Box, - /// Responder to call with the result. Returns true if the deploy was stored on this - /// attempt or false if it was previously stored. + PutTransaction { + transaction: Arc, + /// Returns `true` if the transaction was stored on this attempt or false if it was + /// previously stored. + responder: Responder, + }, + /// Retrieve transaction with given hashes. + GetTransactions { + transaction_hashes: Vec, + #[allow(clippy::type_complexity)] + responder: Responder>)>; 1]>>, + }, + /// Retrieve legacy deploy with given hash. + GetLegacyDeploy { + deploy_hash: DeployHash, + responder: Responder>, + }, + GetTransaction { + transaction_id: TransactionId, + responder: Responder>, + }, + IsTransactionStored { + transaction_id: TransactionId, responder: Responder, }, - /// Retrieve deploys with given hashes. - GetDeploys { - /// Hashes of deploys to be retrieved. - deploy_hashes: Vec, - /// Responder to call with the results. - responder: Responder>>, - }, - /// Retrieve deploy headers with given hashes. - GetDeployHeaders { - /// Hashes of deploy headers to be retrieved. - deploy_hashes: Vec, - /// Responder to call with the results. - responder: Responder>>, - }, - /// Retrieve deploys that are finalized and whose TTL hasn't expired yet. - GetFinalizedDeploys { - /// Maximum TTL of block we're interested in. - /// I.e. we don't want deploys from blocks that are older than this. - ttl: TimeDiff, - /// Responder to call with the results. - responder: Responder>, - }, - /// Store execution results for a set of deploys of a single block. + GetTransactionAndExecutionInfo { + transaction_hash: TransactionHash, + with_finalized_approvals: bool, + responder: Responder)>>, + }, + /// Store execution results for a set of transactions of a single block. /// /// Will return a fatal error if there are already execution results known for a specific - /// deploy/block combination and a different result is inserted. + /// transaction/block combination and a different result is inserted. /// - /// Inserting the same block/deploy combination multiple times with the same execution results - /// is not an error and will silently be ignored. + /// Inserting the same transaction/block combination multiple times with the same execution + /// results is not an error and will silently be ignored. PutExecutionResults { /// Hash of block. block_hash: Box, - /// Mapping of deploys to execution results of the block. - execution_results: HashMap, + block_height: u64, + era_id: EraId, + /// Mapping of transactions to execution results of the block. + execution_results: HashMap, /// Responder to call when done storing. responder: Responder<()>, }, - /// Retrieve deploy and its metadata. - GetDeployAndMetadata { - /// Hash of deploy to be retrieved. - deploy_hash: DeployHash, - /// Responder to call with the results. - responder: Responder>, - }, - /// Retrieve block and its metadata by its hash. - GetBlockAndMetadataByHash { - /// The hash of the block. + GetExecutionResults { block_hash: BlockHash, - /// The responder to call with the results. - responder: Responder>, + responder: Responder>>, + }, + GetBlockExecutionResultsOrChunk { + /// Request ID. + id: BlockExecutionResultsOrChunkId, + /// Responder to call with the execution results. + /// None is returned when we don't have the block in the storage. + responder: Responder>, + }, + /// Retrieve a finality signature by block hash and public key. + GetFinalitySignature { + id: Box, + responder: Responder>, + }, + IsFinalitySignatureStored { + id: Box, + responder: Responder, }, /// Retrieve block and its metadata at a given height. GetBlockAndMetadataByHeight { /// The height of the block. block_height: BlockHeight, + /// Flag indicating whether storage should check the block availability before trying to + /// retrieve it. + only_from_available_block_range: bool, /// The responder to call with the results. - responder: Responder>, - }, - /// Get the highest block and its metadata. - GetHighestBlockWithMetadata { - /// The responder to call the results with. - responder: Responder>, + responder: Responder>, }, - /// Get finality signatures for a Block hash. - GetBlockSignatures { - /// The hash for the request + /// Get a single finality signature for a block hash. + GetBlockSignature { + /// The hash for the request. block_hash: BlockHash, + /// The public key of the signer. + public_key: Box, /// Responder to call with the result. - responder: Responder>, + responder: Responder>, }, /// Store finality signatures. PutBlockSignatures { @@ -356,57 +465,144 @@ pub enum StorageRequest { /// stored. responder: Responder, }, + PutFinalitySignature { + signature: Box, + responder: Responder, + }, + /// Store a block header. + PutBlockHeader { + /// Block header that is to be stored. + block_header: Box, + /// Responder to call with the result, if true then the block header was successfully + /// stored. + responder: Responder, + }, + /// Retrieve the height range of fully available blocks (not just block headers). Returns + /// `[u64::MAX, u64::MAX]` when there are no sequences. + GetAvailableBlockRange { + /// Responder to call with the result. + responder: Responder, + }, + /// Store a set of finalized approvals for a specific transaction. + StoreFinalizedApprovals { + /// The transaction hash to store the finalized approvals for. + transaction_hash: TransactionHash, + /// The set of finalized approvals. + finalized_approvals: BTreeSet, + /// Responder, responded to once the approvals are written. If true, new approvals were + /// written. + responder: Responder, + }, + /// Retrieve the height of the final block of the previous protocol version, if known. + GetKeyBlockHeightForActivationPoint { responder: Responder> }, + /// Retrieve the era utilization score. + GetEraUtilizationScore { + /// The era id. + era_id: EraId, + /// The block height of the switch block + block_height: u64, + /// The utilization within the switch block. + switch_block_utilization: u64, + /// Responder, responded once the utilization for the era has been determined. + responder: Responder>, + }, } impl Display for StorageRequest { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - StorageRequest::PutBlock { block, .. } => write!(formatter, "put {}", block), - StorageRequest::GetBlock { block_hash, .. } => write!(formatter, "get {}", block_hash), - StorageRequest::GetBlockHeaderAtHeight { height, .. } => { - write!(formatter, "get block header at height {}", height) + StorageRequest::PutBlock { block, .. } => { + write!(formatter, "put {}", block) + } + StorageRequest::PutApprovalsHashes { + approvals_hashes, .. + } => { + write!(formatter, "put {}", approvals_hashes) } - StorageRequest::GetBlockAtHeight { height, .. } => { - write!(formatter, "get block at height {}", height) + StorageRequest::GetBlock { block_hash, .. } => { + write!(formatter, "get block {}", block_hash) } - StorageRequest::GetHighestBlock { .. } => write!(formatter, "get highest block"), - StorageRequest::GetSwitchBlockHeaderAtEraId { era_id, .. } => { - write!(formatter, "get switch block header at era id {}", era_id) + StorageRequest::IsBlockStored { block_hash, .. } => { + write!(formatter, "is block {} stored", block_hash) } - StorageRequest::GetSwitchBlockAtEraId { era_id, .. } => { - write!(formatter, "get switch block at era id {}", era_id) + StorageRequest::GetApprovalsHashes { block_hash, .. } => { + write!(formatter, "get approvals hashes {}", block_hash) } - StorageRequest::GetHighestSwitchBlock { .. } => { - write!(formatter, "get highest switch block") + StorageRequest::GetHighestCompleteBlock { .. } => { + write!(formatter, "get highest complete block") + } + StorageRequest::GetHighestCompleteBlockHeader { .. } => { + write!(formatter, "get highest complete block header") + } + StorageRequest::GetTransactionsEraIds { + transaction_hashes, .. + } => { + write!( + formatter, + "get era ids for {} transactions", + transaction_hashes.len() + ) } StorageRequest::GetBlockHeader { block_hash, .. } => { write!(formatter, "get {}", block_hash) } + StorageRequest::GetBlockHeaderByHeight { block_height, .. } => { + write!(formatter, "get header for height {}", block_height) + } + StorageRequest::GetLatestSwitchBlockHeader { .. } => { + write!(formatter, "get latest switch block header") + } + StorageRequest::GetSwitchBlockHeaderByEra { era_id, .. } => { + write!(formatter, "get header for era {}", era_id) + } StorageRequest::GetBlockTransfers { block_hash, .. } => { write!(formatter, "get transfers for {}", block_hash) } - StorageRequest::PutDeploy { deploy, .. } => write!(formatter, "put {}", deploy), - StorageRequest::GetDeploys { deploy_hashes, .. } => { - write!(formatter, "get {}", DisplayIter::new(deploy_hashes.iter())) + StorageRequest::PutTransaction { transaction, .. } => { + write!(formatter, "put {}", transaction) } - StorageRequest::GetDeployHeaders { deploy_hashes, .. } => write!( - formatter, - "get headers {}", - DisplayIter::new(deploy_hashes.iter()) - ), - StorageRequest::PutExecutionResults { block_hash, .. } => { - write!(formatter, "put execution results for {}", block_hash) + StorageRequest::GetTransactions { + transaction_hashes, .. + } => { + write!( + formatter, + "get {}", + DisplayIter::new(transaction_hashes.iter()) + ) + } + StorageRequest::GetLegacyDeploy { deploy_hash, .. } => { + write!(formatter, "get legacy deploy {}", deploy_hash) } - StorageRequest::GetDeployAndMetadata { deploy_hash, .. } => { - write!(formatter, "get deploy and metadata for {}", deploy_hash) + StorageRequest::GetTransaction { transaction_id, .. } => { + write!(formatter, "get transaction {}", transaction_id) } - StorageRequest::GetBlockAndMetadataByHash { block_hash, .. } => { + StorageRequest::GetTransactionAndExecutionInfo { + transaction_hash, .. + } => { write!( formatter, - "get block and metadata for block with hash: {}", - block_hash + "get transaction and exec info {}", + transaction_hash ) } + StorageRequest::IsTransactionStored { transaction_id, .. } => { + write!(formatter, "is transaction {} stored", transaction_id) + } + StorageRequest::PutExecutionResults { block_hash, .. } => { + write!(formatter, "put execution results for {}", block_hash) + } + StorageRequest::GetExecutionResults { block_hash, .. } => { + write!(formatter, "get execution results for {}", block_hash) + } + StorageRequest::GetBlockExecutionResultsOrChunk { id, .. } => { + write!(formatter, "get block execution results or chunk for {}", id) + } + StorageRequest::GetFinalitySignature { id, .. } => { + write!(formatter, "get finality signature {}", id) + } + StorageRequest::IsFinalitySignatureStored { id, .. } => { + write!(formatter, "is finality signature {} stored", id) + } StorageRequest::GetBlockAndMetadataByHeight { block_height, .. } => { write!( formatter, @@ -414,254 +610,120 @@ impl Display for StorageRequest { block_height ) } - StorageRequest::GetHighestBlockWithMetadata { .. } => { - write!(formatter, "get highest block with metadata") - } - StorageRequest::GetBlockSignatures { block_hash, .. } => { + StorageRequest::GetBlockSignature { + block_hash, + public_key, + .. + } => { write!( formatter, - "get finality signatures for block hash {}", - block_hash + "get finality signature for block hash {} from {}", + block_hash, public_key ) } StorageRequest::PutBlockSignatures { .. } => { write!(formatter, "put finality signatures") } - StorageRequest::GetFinalizedDeploys { ttl, .. } => { - write!(formatter, "get finalized deploys, ttl: {:?}", ttl) + StorageRequest::PutFinalitySignature { .. } => { + write!(formatter, "put finality signature") + } + StorageRequest::PutBlockHeader { block_header, .. } => { + write!(formatter, "put block header: {}", block_header) + } + StorageRequest::GetAvailableBlockRange { .. } => { + write!(formatter, "get available block range",) + } + StorageRequest::StoreFinalizedApprovals { + transaction_hash, .. + } => { + write!( + formatter, + "finalized approvals for transaction {}", + transaction_hash + ) + } + StorageRequest::PutExecutedBlock { block, .. } => { + write!(formatter, "put executed block {}", block.hash(),) + } + StorageRequest::GetKeyBlockHeightForActivationPoint { .. } => { + write!( + formatter, + "get key block height for current activation point" + ) + } + StorageRequest::GetRawData { + key, + responder: _responder, + record_id, + } => { + write!(formatter, "get raw data {}::{:?}", record_id, key) + } + StorageRequest::GetEraUtilizationScore { era_id, .. } => { + write!(formatter, "get utilization score for era {}", era_id) } } } } -/// State store request. -#[derive(DataSize, Debug, Serialize)] -#[repr(u8)] -pub enum StateStoreRequest { - /// Stores a piece of state to storage. - Save { - /// Key to store under. - key: Cow<'static, [u8]>, - /// Value to store, already serialized. - #[serde(skip_serializing)] - data: Vec, - /// Notification when storing is complete. - responder: Responder<()>, - }, - /// Loads a piece of state from storage. - Load { - /// Key to load from. - key: Cow<'static, [u8]>, - /// Responder for value, if found, returning the previously passed in serialization form. - responder: Responder>>, - }, +#[derive(Debug, Serialize)] +pub(crate) struct MakeBlockExecutableRequest { + /// Hash of the block to be made executable. + pub block_hash: BlockHash, + /// Responder with the executable block and it's transactions + pub responder: Responder>, } -impl Display for StateStoreRequest { +impl Display for MakeBlockExecutableRequest { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - StateStoreRequest::Save { key, data, .. } => { - write!(f, "save data under {} ({} bytes)", HexFmt(key), data.len()) - } - StateStoreRequest::Load { key, .. } => { - write!(f, "load data from key {}", HexFmt(key)) - } - } + write!(f, "block made executable: {}", self.block_hash) } } -/// Details of a request for a list of deploys to propose in a new block. -#[derive(DataSize, Debug)] -pub struct ProtoBlockRequest { - /// The instant for which the deploy is requested. - pub(crate) current_instant: Timestamp, - /// Set of deploy hashes of deploys that should be excluded in addition to the finalized ones. - pub(crate) past_deploys: HashSet, - /// The height of the next block to be finalized at the point the request was made. - /// This is _only_ a way of expressing how many blocks have been finalized at the moment the - /// request was made. Block Proposer uses this in order to determine if there might be any - /// deploys that are neither in `past_deploys`, nor among the finalized deploys it knows of. - pub(crate) next_finalized: u64, - /// Random bit with which to construct the `ProtoBlock` requested. - pub(crate) random_bit: bool, - /// Responder to call with the result. - pub(crate) responder: Responder, -} - -/// A `BlockProposer` request. -#[derive(DataSize, Debug)] -#[must_use] -pub enum BlockProposerRequest { - /// Request a list of deploys to propose in a new block. - RequestProtoBlock(ProtoBlockRequest), +/// A request to mark a block at a specific height completed. +/// +/// A block is considered complete if +/// +/// * the block header and the actual block are persisted in storage, +/// * all of its transactions are persisted in storage, and +/// * the global state root the block refers to has no missing dependencies locally. +#[derive(Debug, Serialize)] +pub(crate) struct MarkBlockCompletedRequest { + pub block_height: u64, + /// Responds `true` if the block was not previously marked complete. + pub responder: Responder, } -impl Display for BlockProposerRequest { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - BlockProposerRequest::RequestProtoBlock(ProtoBlockRequest { - current_instant, - past_deploys, - next_finalized, - responder: _, - random_bit: _, - }) => write!( - formatter, - "list for inclusion: instant {} past {} next_finalized {}", - current_instant, - past_deploys.len(), - next_finalized - ), - } +impl Display for MarkBlockCompletedRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "block completed: height {}", self.block_height) } } -/// Abstract RPC request. -/// -/// An RPC request is an abstract request that does not concern itself with serialization or -/// transport. -#[derive(Debug)] -#[must_use] -pub enum RpcRequest { - /// Submit a deploy to be announced. - SubmitDeploy { - /// The deploy to be announced. - deploy: Box, - /// Responder to call. - responder: Responder>, - }, - /// If `maybe_identifier` is `Some`, return the specified block if it exists, else `None`. If - /// `maybe_identifier` is `None`, return the latest block. - GetBlock { - /// The identifier (can either be a hash or the height) of the block to be retrieved. - maybe_id: Option, - /// Responder to call with the result. - responder: Responder>, - }, - /// Return transfers for block by hash (if any). - GetBlockTransfers { - /// The hash of the block to retrieve transfers for. - block_hash: BlockHash, - /// Responder to call with the result. - responder: Responder>>, - }, - /// Query the global state at the given root hash. - QueryGlobalState { - /// The state root hash. - state_root_hash: Digest, - /// Hex-encoded `casper_types::Key`. - base_key: Key, - /// The path components starting from the key as base. - path: Vec, - /// Responder to call with the result. - responder: Responder>, - }, - /// Query the global state at the given root hash. - QueryEraValidators { - /// The global state hash. - state_root_hash: Digest, - /// The protocol version. - protocol_version: ProtocolVersion, - /// Responder to call with the result. - responder: Responder>, - }, - /// Get the bids at the given root hash. - GetBids { - /// The global state hash. - state_root_hash: Digest, - /// Responder to call with the result. - responder: Responder>, - }, - /// Query the contract runtime for protocol version data. - QueryProtocolData { - /// The protocol version. - protocol_version: ProtocolVersion, - /// Responder to call with the result. - responder: Responder>, engine_state::Error>>, - }, - /// Query the global state at the given root hash. - GetBalance { - /// The state root hash. - state_root_hash: Digest, - /// The purse URef. - purse_uref: URef, - /// Responder to call with the result. - responder: Responder>, - }, - /// Return the specified deploy and metadata if it exists, else `None`. - GetDeploy { - /// The hash of the deploy to be retrieved. - hash: DeployHash, - /// Responder to call with the result. - responder: Responder>, - }, - /// Return the connected peers. - GetPeers { - /// Responder to call with the result. - responder: Responder>, - }, - /// Return string formatted status or `None` if an error occurred. - GetStatus { - /// Responder to call with the result. - responder: Responder>, - }, - /// Return string formatted, prometheus compatible metrics or `None` if an error occurred. - GetMetrics { - /// Responder to call with the result. - responder: Responder>, +#[derive(DataSize, Debug, Serialize)] +pub(crate) enum TransactionBufferRequest { + GetAppendableBlock { + timestamp: Timestamp, + era_id: EraId, + request_expiry: Timestamp, + responder: Responder, }, } -impl Display for RpcRequest { +impl Display for TransactionBufferRequest { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - RpcRequest::SubmitDeploy { deploy, .. } => write!(formatter, "submit {}", *deploy), - RpcRequest::GetBlock { - maybe_id: Some(BlockIdentifier::Hash(hash)), - .. - } => write!(formatter, "get {}", hash), - RpcRequest::GetBlock { - maybe_id: Some(BlockIdentifier::Height(height)), - .. - } => write!(formatter, "get {}", height), - RpcRequest::GetBlock { maybe_id: None, .. } => write!(formatter, "get latest block"), - RpcRequest::GetBlockTransfers { block_hash, .. } => { - write!(formatter, "get transfers {}", block_hash) - } - RpcRequest::QueryProtocolData { - protocol_version, .. - } => write!(formatter, "protocol_version {}", protocol_version), - RpcRequest::QueryGlobalState { - state_root_hash, - base_key, - path, + TransactionBufferRequest::GetAppendableBlock { + timestamp, + era_id, + request_expiry, .. - } => write!( - formatter, - "query {}, base_key: {}, path: {:?}", - state_root_hash, base_key, path - ), - RpcRequest::QueryEraValidators { - state_root_hash, .. - } => write!(formatter, "auction {}", state_root_hash), - RpcRequest::GetBids { - state_root_hash, .. } => { - write!(formatter, "bids {}", state_root_hash) + write!( + formatter, + "request for appendable block at instant {} for era {} (expires at {})", + timestamp, era_id, request_expiry + ) } - RpcRequest::GetBalance { - state_root_hash, - purse_uref, - .. - } => write!( - formatter, - "balance {}, purse_uref: {}", - state_root_hash, purse_uref - ), - RpcRequest::GetDeploy { hash, .. } => write!(formatter, "get {}", hash), - RpcRequest::GetPeers { .. } => write!(formatter, "get peers"), - RpcRequest::GetStatus { .. } => write!(formatter, "get status"), - RpcRequest::GetMetrics { .. } => write!(formatter, "get metrics"), } } } @@ -672,24 +734,24 @@ impl Display for RpcRequest { /// transport. #[derive(Debug)] #[must_use] -pub enum RestRequest { +pub(crate) enum RestRequest { /// Return string formatted status or `None` if an error occurred. - GetStatus { + Status { /// Responder to call with the result. - responder: Responder>, + responder: Responder, }, /// Return string formatted, prometheus compatible metrics or `None` if an error occurred. - GetMetrics { + Metrics { /// Responder to call with the result. responder: Responder>, }, } -impl Display for RestRequest { +impl Display for RestRequest { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - RestRequest::GetStatus { .. } => write!(formatter, "get status"), - RestRequest::GetMetrics { .. } => write!(formatter, "get metrics"), + RestRequest::Status { .. } => write!(formatter, "get status"), + RestRequest::Metrics { .. } => write!(formatter, "get metrics"), } } } @@ -697,47 +759,38 @@ impl Display for RestRequest { /// A contract runtime request. #[derive(Debug, Serialize)] #[must_use] -pub enum ContractRuntimeRequest { - /// A request to execute a block. - ExecuteBlock(FinalizedBlock), - - /// Get `ProtocolData` by `ProtocolVersion`. - GetProtocolData { - /// The protocol version. - protocol_version: ProtocolVersion, - /// Responder to call with the result. - responder: Responder>, engine_state::Error>>, - }, - /// Commit genesis chainspec. - CommitGenesis { - /// The chainspec. - chainspec: Arc, - /// Responder to call with the result. - responder: Responder>, - }, - /// A request to run upgrade. - Upgrade { - /// Upgrade config. - #[serde(skip_serializing)] - upgrade_config: Box, - /// Responder to call with the upgrade result. - responder: Responder>, +pub(crate) enum ContractRuntimeRequest { + /// A request to enqueue a `ExecutableBlock` for execution. + EnqueueBlockForExecution { + /// A `ExecutableBlock` to enqueue. + executable_block: ExecutableBlock, + /// The key block height for the current protocol version's activation point. + key_block_height_for_activation_point: u64, + meta_block_state: MetaBlockState, }, /// A query request. Query { /// Query request. #[serde(skip_serializing)] - query_request: QueryRequest, + request: QueryRequest, /// Responder to call with the query result. - responder: Responder>, + responder: Responder, + }, + /// A query by prefix request. + QueryByPrefix { + /// Query by prefix request. + #[serde(skip_serializing)] + request: PrefixedValuesRequest, + /// Responder to call with the query result. + responder: Responder, }, /// A balance request. GetBalance { /// Balance request. #[serde(skip_serializing)] - balance_request: BalanceRequest, + request: BalanceRequest, /// Responder to call with the balance result. - responder: Responder>, + responder: Responder, }, /// Returns validator weights. GetEraValidators { @@ -745,133 +798,195 @@ pub enum ContractRuntimeRequest { #[serde(skip_serializing)] request: EraValidatorsRequest, /// Responder to call with the result. - responder: Responder>, + responder: Responder, }, - /// Returns validator weights for given era. - GetValidatorWeightsByEraId { - /// Get validators weights request. + /// Returns the seigniorage recipients snapshot at the given state root hash. + GetSeigniorageRecipients { + /// Get seigniorage recipients request. #[serde(skip_serializing)] - request: ValidatorWeightsByEraIdRequest, + request: SeigniorageRecipientsRequest, /// Responder to call with the result. - responder: Responder, GetEraValidatorsError>>, + responder: Responder, }, - /// Return bids at a given state root hash - GetBids { - /// Get bids request. + /// Return all values at a given state root hash and given key tag. + GetTaggedValues { + /// Get tagged values request. #[serde(skip_serializing)] - get_bids_request: GetBidsRequest, + request: TaggedValuesRequest, /// Responder to call with the result. - responder: Responder>, + responder: Responder, }, - /// Performs a step consisting of calculating rewards, slashing and running the auction at the - /// end of an era. - Step { - /// The step request. - #[serde(skip_serializing)] - step_request: StepRequest, - /// Responder to call with the result. - responder: Responder>, + /// Returns the value of the execution results checksum stored in the ChecksumRegistry for the + /// given state root hash. + GetExecutionResultsChecksum { + state_root_hash: Digest, + responder: Responder, }, - /// Check if validator is bonded in the future era (identified by `era_id`). - IsBonded { - /// State root hash of the LFB. + /// Returns an `AddressableEntity` if found under the given entity_addr. If a legacy `Account` + /// or contract exists under the given key, it will be migrated to an `AddressableEntity` + /// and returned. However, global state is not altered and the migrated record does not + /// actually exist. + GetAddressableEntity { state_root_hash: Digest, - /// Validator public key. - public_key: PublicKey, - /// Era ID in which validator should be bonded in. - era_id: EraId, - /// Protocol version at the `state_root_hash`. - protocol_version: ProtocolVersion, - /// Responder, - responder: Responder>, - }, - /// Read a trie by its hash key - ReadTrie { - /// The hash of the value to get from the `TrieStore` - trie_key: Blake2bHash, + entity_addr: EntityAddr, + responder: Responder, + }, + /// Returns information if an entry point exists under the given state root hash and entry + /// point key. + GetEntryPointExists { + state_root_hash: Digest, + contract_hash: HashAddr, + entry_point_name: String, + responder: Responder, + }, + /// Get a trie or chunk by its ID. + GetTrie { + /// A request for a trie element. + #[serde(skip_serializing)] + request: TrieRequest, /// Responder to call with the result. - responder: Responder>>, + responder: Responder, }, /// Insert a trie into global storage PutTrie { - /// The hash of the value to get from the `TrieStore` - trie: Box>, - /// Responder to call with the result. - responder: Responder, engine_state::Error>>, + /// A request to persist a trie element. + #[serde(skip_serializing)] + request: PutTrieRequest, + /// Responder to call with the result. Contains the hash of the persisted trie. + responder: Responder, }, - /// Get the missing keys under a given trie key in global storage - MissingTrieKeys { - /// The ancestral hash to use when finding hashes that are missing from the `TrieStore` - trie_key: Blake2bHash, - /// Responder to call with the result. - responder: Responder, engine_state::Error>>, + /// Execute transaction without committing results + SpeculativelyExecute { + /// Pre-state. + block_header: Box, + /// Transaction to execute. + transaction: Box, + /// Results + responder: Responder, + }, + UpdateRuntimePrice(EraId, u8), + GetEraGasPrice { + era_id: EraId, + responder: Responder>, + }, + DoProtocolUpgrade { + protocol_upgrade_config: ProtocolUpgradeConfig, + next_block_height: u64, + parent_hash: BlockHash, + parent_seed: Digest, + }, + UpdatePreState { + new_pre_state: ExecutionPreState, }, } impl Display for ContractRuntimeRequest { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - ContractRuntimeRequest::ExecuteBlock(finalized_block) => { - write!(formatter, "finalized_block {}", finalized_block) - } - ContractRuntimeRequest::CommitGenesis { chainspec, .. } => { - write!( - formatter, - "commit genesis {}", - chainspec.protocol_config.version - ) - } - - ContractRuntimeRequest::Upgrade { upgrade_config, .. } => { - write!(formatter, "upgrade request: {:?}", upgrade_config) + ContractRuntimeRequest::EnqueueBlockForExecution { + executable_block, .. + } => { + write!(formatter, "executable_block: {}", executable_block) } - - ContractRuntimeRequest::Query { query_request, .. } => { + ContractRuntimeRequest::Query { + request: query_request, + .. + } => { write!(formatter, "query request: {:?}", query_request) } - + ContractRuntimeRequest::QueryByPrefix { request, .. } => { + write!(formatter, "query by prefix request: {:?}", request) + } ContractRuntimeRequest::GetBalance { - balance_request, .. + request: balance_request, + .. } => write!(formatter, "balance request: {:?}", balance_request), - ContractRuntimeRequest::GetEraValidators { request, .. } => { write!(formatter, "get era validators: {:?}", request) } - - ContractRuntimeRequest::GetValidatorWeightsByEraId { request, .. } => { - write!(formatter, "get validator weights: {:?}", request) + ContractRuntimeRequest::GetSeigniorageRecipients { request, .. } => { + write!(formatter, "get seigniorage recipients for {:?}", request) } - - ContractRuntimeRequest::GetBids { - get_bids_request, .. + ContractRuntimeRequest::GetTaggedValues { + request: get_all_values_request, + .. } => { - write!(formatter, "get bids request: {:?}", get_bids_request) + write!( + formatter, + "get all values request: {:?}", + get_all_values_request + ) } - - ContractRuntimeRequest::Step { step_request, .. } => { - write!(formatter, "step: {:?}", step_request) + ContractRuntimeRequest::GetExecutionResultsChecksum { + state_root_hash, .. + } => write!( + formatter, + "get execution results checksum under {}", + state_root_hash + ), + ContractRuntimeRequest::GetAddressableEntity { + state_root_hash, + entity_addr, + .. + } => { + write!( + formatter, + "get addressable_entity {} under {}", + entity_addr, state_root_hash + ) } - - ContractRuntimeRequest::GetProtocolData { - protocol_version, .. - } => write!(formatter, "protocol_version: {}", protocol_version), - - ContractRuntimeRequest::IsBonded { - public_key, era_id, .. + ContractRuntimeRequest::GetTrie { request, .. } => { + write!(formatter, "get trie: {:?}", request) + } + ContractRuntimeRequest::PutTrie { request, .. } => { + write!(formatter, "trie: {:?}", request) + } + ContractRuntimeRequest::SpeculativelyExecute { + transaction, + block_header, + .. } => { - write!(formatter, "is {} bonded in era {}", public_key, era_id) + write!( + formatter, + "Execute {} on {}", + transaction.hash(), + block_header.state_root_hash() + ) } - ContractRuntimeRequest::ReadTrie { trie_key, .. } => { - write!(formatter, "get trie_key: {}", trie_key) + ContractRuntimeRequest::UpdateRuntimePrice(_, era_gas_price) => { + write!(formatter, "updating price to {}", era_gas_price) } - ContractRuntimeRequest::PutTrie { trie, .. } => { - write!(formatter, "trie: {:?}", trie) + ContractRuntimeRequest::GetEraGasPrice { era_id, .. } => { + write!(formatter, "Get gas price for era {}", era_id) } - ContractRuntimeRequest::MissingTrieKeys { trie_key, .. } => { + ContractRuntimeRequest::GetEntryPointExists { + state_root_hash, + contract_hash, + entry_point_name, + .. + } => { + let formatted_contract_hash = HexFmt(contract_hash); + write!( + formatter, + "get entry point {}-{} under {}", + formatted_contract_hash, entry_point_name, state_root_hash + ) + } + ContractRuntimeRequest::DoProtocolUpgrade { + protocol_upgrade_config, + .. + } => { + write!( + formatter, + "execute protocol upgrade against config: {:?}", + protocol_upgrade_config + ) + } + ContractRuntimeRequest::UpdatePreState { new_pre_state } => { write!( formatter, - "find missing descendants of trie_key: {}", - trie_key + "Updating contract runtimes execution prestate: {:?}", + new_pre_state ) } } @@ -881,44 +996,77 @@ impl Display for ContractRuntimeRequest { /// Fetcher related requests. #[derive(Debug, Serialize)] #[must_use] -pub enum FetcherRequest { - /// Return the specified item if it exists, else `None`. - Fetch { - /// The ID of the item to be retrieved. - id: T::Id, - /// The peer id of the peer to be asked if the item is not held locally - peer: I, - /// Responder to call with the result. - responder: Responder>>, - }, +pub(crate) struct FetcherRequest { + /// The ID of the item to be retrieved. + pub(crate) id: T::Id, + /// The peer id of the peer to be asked if the item is not held locally + pub(crate) peer: NodeId, + /// Metadata used during validation of the fetched item. + pub(crate) validation_metadata: Box, + /// Responder to call with the result. + pub(crate) responder: Responder>, } -impl Display for FetcherRequest { +impl Display for FetcherRequest { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - FetcherRequest::Fetch { id, .. } => write!(formatter, "request item by id {}", id), - } + write!(formatter, "request item by id {}", self.id) + } +} + +/// TrieAccumulator related requests. +#[derive(Debug, Serialize, DataSize)] +#[must_use] +pub(crate) struct TrieAccumulatorRequest { + /// The hash of the trie node. + pub(crate) hash: Digest, + /// The peers to try to fetch from. + pub(crate) peers: Vec, + /// Responder to call with the result. + pub(crate) responder: Responder>, +} + +impl Display for TrieAccumulatorRequest { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "request trie by hash {}", self.hash) + } +} + +#[derive(Debug, Serialize)] +pub(crate) struct SyncGlobalStateRequest { + pub(crate) block_hash: BlockHash, + pub(crate) state_root_hash: Digest, + #[serde(skip)] + pub(crate) responder: + Responder>, +} + +impl Display for SyncGlobalStateRequest { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "request to sync global state at {}", + self.block_hash + ) } } /// A block validator request. -#[derive(Debug)] +#[derive(Debug, DataSize)] #[must_use] -pub struct BlockValidationRequest { +pub(crate) struct BlockValidationRequest { + /// The height of the proposed block in the chain. + pub(crate) proposed_block_height: u64, /// The block to be validated. - pub(crate) block: T, - /// The sender of the block, which will be asked to provide all missing deploys. - pub(crate) sender: I, + pub(crate) block: ProposedBlock, + /// The sender of the block, which will be asked to provide all missing transactions. + pub(crate) sender: NodeId, /// Responder to call with the result. /// - /// Indicates whether or not validation was successful and returns `block` unchanged. - pub(crate) responder: Responder<(bool, T)>, - /// A check will be performed against the deploys to ensure their timestamp is - /// older than or equal to the block itself. - pub(crate) block_timestamp: Timestamp, + /// Indicates whether validation was successful. + pub(crate) responder: Responder>>, } -impl Display for BlockValidationRequest { +impl Display for BlockValidationRequest { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let BlockValidationRequest { block, sender, .. } = self; write!(f, "validate block {} from {}", block, sender) @@ -927,56 +1075,152 @@ impl Display for BlockValidationRequest { type BlockHeight = u64; +#[derive(DataSize, Debug)] +#[must_use] +/// Consensus component requests. +pub(crate) enum ConsensusRequest { + /// Request for our public key, and if we're a validator, the next round length. + Status(Responder>), + /// Request for a list of validator status changes, by public key. + ValidatorChanges(Responder), +} + +/// ChainspecLoader component requests. #[derive(Debug, Serialize)] -/// Requests issued to the Linear Chain component. -pub enum LinearChainRequest { - /// Request whole block from the linear chain, by hash. - BlockRequest(BlockHash, I), - /// Request for a linear chain block at height. - BlockAtHeight(BlockHeight, I), - /// Local request for a linear chain block at height. - // TODO: Unify `BlockAtHeight` and `BlockAtHeightLocal`. - BlockAtHeightLocal(BlockHeight, Responder>), -} - -impl Display for LinearChainRequest { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +pub(crate) enum ChainspecRawBytesRequest { + /// Request for the chainspec file bytes with the genesis_accounts and global_state bytes, if + /// they are present. + GetChainspecRawBytes(Responder>), +} + +impl Display for ChainspecRawBytesRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - LinearChainRequest::BlockRequest(bh, peer) => { - write!(f, "block request for hash {} from {}", bh, peer) - } - LinearChainRequest::BlockAtHeight(height, sender) => { - write!(f, "block request for {} from {}", height, sender) - } - LinearChainRequest::BlockAtHeightLocal(height, _) => { - write!(f, "local request for block at height {}", height) + ChainspecRawBytesRequest::GetChainspecRawBytes(_) => { + write!(f, "get chainspec raw bytes") } } } } -#[derive(DataSize, Debug)] -#[must_use] -/// Consensus component requests. -pub enum ConsensusRequest { - /// Request for our public key, and if we're a validator, the next round length. - Status(Responder)>>), +/// UpgradeWatcher component request to get the next scheduled upgrade, if any. +#[derive(Debug, Serialize)] +pub(crate) struct UpgradeWatcherRequest(pub(crate) Responder>); + +impl Display for UpgradeWatcherRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "get next upgrade") + } +} + +#[derive(Debug, Serialize)] +pub(crate) enum ReactorInfoRequest { + ReactorState { responder: Responder }, + LastProgress { responder: Responder }, + Uptime { responder: Responder }, + NetworkName { responder: Responder }, + BalanceHoldsInterval { responder: Responder }, +} + +impl Display for ReactorInfoRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "get reactor status: {}", + match self { + ReactorInfoRequest::ReactorState { .. } => "ReactorState", + ReactorInfoRequest::LastProgress { .. } => "LastProgress", + ReactorInfoRequest::Uptime { .. } => "Uptime", + ReactorInfoRequest::NetworkName { .. } => "NetworkName", + ReactorInfoRequest::BalanceHoldsInterval { .. } => "BalanceHoldsInterval", + } + ) + } } -/// ChainspecLoader component requests. #[derive(Debug, Serialize)] -pub enum ChainspecLoaderRequest { - /// Chainspec info request. - GetChainspecInfo(Responder), - /// Request for information about the current run. - GetCurrentRunInfo(Responder), +#[allow(clippy::enum_variant_names)] +pub(crate) enum BlockAccumulatorRequest { + GetPeersForBlock { + block_hash: BlockHash, + responder: Responder>>, + }, } -impl Display for ChainspecLoaderRequest { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +impl Display for BlockAccumulatorRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - ChainspecLoaderRequest::GetChainspecInfo(_) => write!(f, "get chainspec info"), - ChainspecLoaderRequest::GetCurrentRunInfo(_) => write!(f, "get current run info"), + BlockAccumulatorRequest::GetPeersForBlock { block_hash, .. } => { + write!(f, "get peers for {}", block_hash) + } } } } + +#[derive(Debug, Serialize)] +pub(crate) enum BlockSynchronizerRequest { + NeedNext, + DishonestPeers, + SyncGlobalStates(Vec<(BlockHash, Digest)>), + Status { + responder: Responder, + }, +} + +impl Display for BlockSynchronizerRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + BlockSynchronizerRequest::NeedNext => { + write!(f, "block synchronizer request: need next") + } + BlockSynchronizerRequest::DishonestPeers => { + write!(f, "block synchronizer request: dishonest peers") + } + BlockSynchronizerRequest::Status { .. } => { + write!(f, "block synchronizer request: status") + } + BlockSynchronizerRequest::SyncGlobalStates(_) => { + write!(f, "request to sync global states") + } + } + } +} + +/// A request to set the current shutdown trigger. +#[derive(DataSize, Debug, Serialize)] +pub(crate) struct SetNodeStopRequest { + /// The specific stop-at spec. + /// + /// If `None`, clears the current stop at setting. + pub(crate) stop_at: Option, + /// Responder to send the previously set stop-at spec to, if any. + pub(crate) responder: Responder>, +} + +impl Display for SetNodeStopRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self.stop_at { + None => f.write_str("clear node stop"), + Some(stop_at) => write!(f, "set node stop to: {}", stop_at), + } + } +} + +/// A request to accept a new transaction. +#[derive(DataSize, Debug, Serialize)] +pub(crate) struct AcceptTransactionRequest { + pub(crate) transaction: Transaction, + pub(crate) is_speculative: bool, + pub(crate) responder: Responder>, +} + +impl Display for AcceptTransactionRequest { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "accept transaction {} is_speculative: {}", + self.transaction.hash(), + self.is_speculative + ) + } +} diff --git a/node/src/failpoints.rs b/node/src/failpoints.rs new file mode 100644 index 0000000000..a1c00f293b --- /dev/null +++ b/node/src/failpoints.rs @@ -0,0 +1,566 @@ +//! Failpoint support. +//! +//! Failpoints can enabled on the node to inject faulty behavior at runtime, for testing and +//! benchmarking purposes. +//! +//! # General usage +//! +//! Failpoints are created in code using `Failpoint`, and activated using a `FailpointActivation`. +//! See the `failpoints::test::various_usecases` test for an example. + +use std::{ + fmt::{self, Debug, Display}, + num::ParseFloatError, + str::FromStr, +}; + +use datasize::DataSize; +use rand::{distributions::Uniform, prelude::Distribution, Rng}; +use serde::{de::DeserializeOwned, Serialize}; +use serde_json::Value; +use thiserror::Error; +use tracing::{info, instrument, trace, warn}; + +use crate::utils::opt_display::OptDisplay; + +/// A specific failpoint. +#[derive(DataSize, Debug)] +pub(crate) struct Failpoint +where + T: DataSize, +{ + /// Key that activates the given failpoint. + #[data_size(skip)] + key: &'static str, + /// Subkey that potentially activates the given failpoint. + subkey: Option, + /// The value of the failpoint, if any. + value: Option, + /// Activation probability. + probability: Option, + /// Whether to trigger the failpoint only once. + once: bool, + /// Whether the failpoint has already fired. + fired: bool, +} + +impl Failpoint +where + T: Debug + DeserializeOwned + DataSize, +{ + /// Creates a new failpoint with a given key. + #[inline(always)] + pub(crate) fn new(key: &'static str) -> Self { + Failpoint { + key, + subkey: None, + value: None, + probability: None, + once: false, + fired: false, + } + } + + /// Creates a new failpoint with a given key and optional subkey. + #[inline] + #[allow(dead_code)] + pub(crate) fn new_with_subkey(key: &'static str, subkey: S) -> Self { + Failpoint { + key, + subkey: Some(subkey.to_string()), + value: None, + probability: None, + once: false, + fired: false, + } + } + + /// Update a failpoint from a given `FailpointActivation`. + /// + /// The failpoint will be changed if the given activation matches `key` and `subkey` only. + #[instrument(level = "error", + fields(fp_key=self.key, + fp_subkey=%OptDisplay::new(self.subkey.as_ref(), "") + ) + )] + pub(crate) fn update_from(&mut self, activation: &FailpointActivation) { + // Check if the failpoint matches. + if activation.key != self.key || activation.subkey != self.subkey { + trace!("not updating failpoint"); + return; + } + + // Values can fail, so update these first. + if let Some(value) = activation.value.as_ref() { + match serde_json::from_value::(value.clone()) { + Ok(value) => self.value = Some(value), + Err(err) => warn!(%err, "failed to deserialize failpoint value"), + } + } else { + self.value = None; + } + + self.probability = activation.probability; + self.once = activation.once; + self.fired = false; + + if self.value.is_some() { + info!("activated failpoint"); + } else { + info!("cleared failpoint"); + } + } + + /// Fire the failpoint, if active. + /// + /// Returns the value of the failpoint, if it fired. + #[inline(always)] + pub(crate) fn fire(&mut self, rng: &mut R) -> Option<&T> { + if self.value.is_some() { + self.do_fire(rng) + } else { + None + } + } + + /// Inner `fire` implementation. + /// + /// `fire` is kept small for facilitate inlining and fast processing of disabled failpoints. + #[inline] + fn do_fire(&mut self, rng: &mut R) -> Option<&T> { + if let Some(p) = self.probability { + let p_range = Uniform::new_inclusive(0.0, 1.0); + if p_range.sample(rng) > p as f64 { + return None; + } + } + + if self.once && self.fired { + return None; + } + + self.fired = true; + self.value() + } + + /// Returns the value of the failpoint, if it is set. + #[inline] + fn value(&self) -> Option<&T> { + self.value.as_ref() + } +} + +/// A parsed failpoint activation. +#[derive(Clone, DataSize, Debug, PartialEq, Serialize)] +pub(crate) struct FailpointActivation { + key: String, + subkey: Option, + #[data_size(skip)] // TODO: Add a `DataSize` implementation for JSON `Value`s. + value: Option, + probability: Option, + once: bool, +} + +impl Display for FailpointActivation { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.key)?; + + if let Some(subkey) = self.subkey.as_ref() { + write!(f, ",sub:{}", subkey)?; + } + + if let Some(p) = self.probability { + write!(f, ",p:{}", p)?; + } + + if self.once { + f.write_str(",once")?; + } + + if let Some(value) = self.value.as_ref() { + // Note on the unwrap: Serializing a `Value` should never fail. + write!(f, "={}", serde_json::to_string(value).unwrap_or_default())?; + } + + Ok(()) + } +} + +impl FailpointActivation { + /// Creates a new [`FailpointActivation`] with the given `key`. + #[inline(always)] + pub(crate) fn new(key: S) -> FailpointActivation { + FailpointActivation { + key: key.to_string(), + subkey: None, + value: None, + probability: None, + once: false, + } + } + + /// Gets the key of this [`FailpointActivation`]. + #[inline(always)] + pub(crate) fn key(&self) -> &str { + &self.key + } + + /// Sets the subkey. + #[inline(always)] + pub(crate) fn subkey(mut self, subkey: S) -> Self { + self.subkey = Some(subkey.to_string()); + self + } + + /// Sets the failpoint's value from JSON. + /// + /// # Panics + /// + /// Will panic if `value` does not cleanly serialize to a [`serde_json::Value`]. + #[inline(always)] + #[allow(unused)] + pub(crate) fn value(self, value: T) -> Self + where + T: Serialize, + { + let value_json: Value = + serde_json::to_value(value).expect("passed in value does not serialize to JSON"); + + self.value_json(value_json) + } + + /// Sets the failpoint's value from JSON. + #[inline(always)] + pub(crate) fn value_json(mut self, value: Value) -> Self { + self.value = Some(value); + self + } + + /// Sets the probability of the failpoint firing. + /// + /// The value will be clamped to `[0.0, 1.0]`. A value of `NaN` will be converted to `0.0`. + #[inline(always)] + pub(crate) fn probability(mut self, probability: f32) -> Self { + // Note: We do not use `clamp`, since it does not remove `NaN`s. + self.probability = Some(probability.clamp(0.0, 1.0)); + self + } + + /// Sets the failpoint to fire only once. + #[inline(always)] + pub(crate) fn once(mut self) -> Self { + self.once = true; + self + } +} + +/// Error parsing a failpoint activation. +#[derive(Debug, Error)] +pub(crate) enum ParseError { + /// The provided value for the failpoint was not valid JSON. + #[error("invalid json value")] + InvalidJson(#[source] serde_json::Error), + /// Left hand side contained no segments. + #[error("no key given")] + MissingKey, + /// Invalid floating literal for probability + #[error("invvalid probability value")] + InvalidProbability(#[source] ParseFloatError), + /// The given meta key is not valid. + #[error("not a known meta key: \"{0}\"")] + InvalidMeta(String), +} + +impl FromStr for FailpointActivation { + type Err = ParseError; + + fn from_str(raw: &str) -> Result { + let (raw_meta, value) = if let Some((left, right)) = raw.split_once('=') { + ( + left, + Some(serde_json::from_str::(right).map_err(ParseError::InvalidJson)?), + ) + } else { + (raw, None) + }; + + let mut fragments = raw_meta.split(','); + let key = fragments.next().ok_or(ParseError::MissingKey)?; + let mut fps = FailpointActivation::new(key); + + for fragment in fragments { + let (meta, meta_value) = if let Some((left, right)) = fragment.split_once(':') { + (left, Some(right)) + } else { + (fragment, None) + }; + + match (meta, meta_value) { + ("sub", Some(v)) => { + fps = fps.subkey(v); + } + ("p", Some(raw_p)) => { + fps = fps.probability(raw_p.parse().map_err(ParseError::InvalidProbability)?); + } + ("once", None) => { + fps = fps.once(); + } + (invalid, _) => return Err(ParseError::InvalidMeta(invalid.to_string())), + } + } + + if let Some(value) = value { + fps = fps.value_json(value); + } + + Ok(fps) + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use casper_types::{testing::TestRng, TimeDiff}; + use serde_json::json; + + use crate::testing::init_logging; + + use super::{Failpoint, FailpointActivation}; + + #[test] + fn parse_failpoints() { + assert_eq!( + FailpointActivation::from_str("foobar").expect("should parse"), + FailpointActivation { + key: "foobar".to_owned(), + subkey: None, + value: None, + probability: None, + once: false + } + ); + + assert_eq!( + FailpointActivation::from_str("foobar,once").expect("should parse"), + FailpointActivation { + key: "foobar".to_owned(), + subkey: None, + value: None, + probability: None, + once: true + } + ); + + assert_eq!( + FailpointActivation::from_str("foobar,sub:xyz").expect("should parse"), + FailpointActivation { + key: "foobar".to_owned(), + subkey: Some("xyz".to_owned()), + value: None, + probability: None, + once: false + } + ); + + assert_eq!( + FailpointActivation::from_str("foobar,p:0.5,sub:xyz,once").expect("should parse"), + FailpointActivation { + key: "foobar".to_owned(), + subkey: Some("xyz".to_owned()), + value: None, + probability: Some(0.5), + once: true + } + ); + + assert_eq!( + FailpointActivation::from_str("foobar,p:0.5,sub:xyz,once=true").expect("should parse"), + FailpointActivation { + key: "foobar".to_owned(), + subkey: Some("xyz".to_owned()), + value: Some(serde_json::json!(true)), + probability: Some(0.5), + once: true + } + ); + + assert_eq!( + FailpointActivation::from_str("foobar={\"hello\": \"world\", \"count\": 1}") + .expect("should parse"), + FailpointActivation { + key: "foobar".to_owned(), + subkey: None, + value: Some(serde_json::json!({"hello": "world", "count": 1})), + probability: None, + once: false + } + ); + } + + #[test] + fn clamping_works() { + assert_eq!( + FailpointActivation::new("test") + .probability(-0.1) + .probability, + Some(0.0) + ); + assert_eq!( + FailpointActivation::new("test") + .probability(0.0) + .probability, + Some(0.0) + ); + assert_eq!( + FailpointActivation::new("test") + .probability(0.1) + .probability, + Some(0.1) + ); + assert_eq!( + FailpointActivation::new("test") + .probability(0.5) + .probability, + Some(0.5) + ); + assert_eq!( + FailpointActivation::new("test") + .probability(0.9) + .probability, + Some(0.9) + ); + assert_eq!( + FailpointActivation::new("test") + .probability(1.0) + .probability, + Some(1.0) + ); + assert_eq!( + FailpointActivation::new("test") + .probability(1.1) + .probability, + Some(1.0) + ); + } + + #[test] + fn display_works() { + assert_eq!( + FailpointActivation::from_str("foobar={\"hello\": \"world\", \"count\": 1}") + .expect("should parse") + .to_string(), + "foobar={\"hello\":\"world\",\"count\":1}" + ); + + assert_eq!( + FailpointActivation::from_str("foobar,p:0.5,sub:xyz,once=true") + .expect("should parse") + .to_string(), + "foobar,sub:xyz,p:0.5,once=true" + ); + + assert_eq!( + FailpointActivation::from_str("abc_123") + .expect("should parse") + .to_string(), + "abc_123" + ); + } + + #[test] + fn various_usecases() { + // Note: This function deliberately exerts different APIs of `FailpointActivation`. When + // using `FailpointActivation` in tests, it is recommend to construct it using the + // builder pattern as opposed to parsing it from strings. + + init_logging(); + + let mut rng = TestRng::new(); + let mut delay_send_fp = Failpoint::::new("example.delay_send"); + + assert!( + delay_send_fp.fire(&mut rng).is_none(), + "failpoint should be disabled" + ); + + let unrelated_activation = + FailpointActivation::from_str("example.unrelated=\"1s\"").unwrap(); + delay_send_fp.update_from(&unrelated_activation); + + assert!( + delay_send_fp.fire(&mut rng).is_none(), + "failpoint should be disabled after unrelated activation" + ); + + let activation = + FailpointActivation::new("example.delay_send").value(TimeDiff::from_seconds(1)); + + delay_send_fp.update_from(&activation); + + let diff = delay_send_fp + .fire(&mut rng) + .expect("should trigger failpoint"); + assert_eq!(*diff, TimeDiff::from_str("1s").unwrap()); + + // Repeat, since `once` is not enabled. + let diff = delay_send_fp + .fire(&mut rng) + .expect("should trigger failpoint a second time"); + assert_eq!(*diff, TimeDiff::from_str("1s").unwrap()); + let diff = delay_send_fp + .fire(&mut rng) + .expect("should trigger failpoint a third time"); + assert_eq!(*diff, TimeDiff::from_str("1s").unwrap()); + + let deactivation = FailpointActivation::from_str("example.delay_send").unwrap(); + + delay_send_fp.update_from(&deactivation); + + assert!( + delay_send_fp.fire(&mut rng).is_none(), + "failpoint should be disabled" + ); + + let once_activation = FailpointActivation::new("example.delay_send") + .once() + .value_json(json!("2s")); + delay_send_fp.update_from(&once_activation); + + let diff = delay_send_fp + .fire(&mut rng) + .expect("should trigger failpoint"); + assert_eq!(*diff, TimeDiff::from_str("2s").unwrap()); + + // Repeat failpoint triggered once should not fire again. + assert!(delay_send_fp.fire(&mut rng).is_none()); + } + + #[test] + fn activation_primes_properly() { + let mut fp = Failpoint::<()>::new("some_failpoint"); + + fp.update_from(&FailpointActivation::from_str("some_failpoint,p:0.5,once=null").unwrap()); + + assert_eq!(fp.probability, Some(0.5)); + assert!(fp.once); + } + + #[test] + fn failpoint_probability_affects_failpoint() { + let mut rng = TestRng::new(); + let mut fp = Failpoint::<()>::new("some_failpoint"); + + // Full activation. + fp.update_from(&FailpointActivation::from_str("some_failpoint=null").unwrap()); + assert!(fp.fire(&mut rng).is_some()); + + // p:1.0 should be the same + fp.update_from(&FailpointActivation::from_str("some_failpoint,p:1.0=null").unwrap()); + assert!(fp.fire(&mut rng).is_some()); + + // p:0.0 essentially disables it + fp.update_from(&FailpointActivation::from_str("some_failpoint,p:0.0=null").unwrap()); + assert!(fp.fire(&mut rng).is_none()); + } +} diff --git a/node/src/failpoints_disabled.rs b/node/src/failpoints_disabled.rs new file mode 100644 index 0000000000..e7810dd92e --- /dev/null +++ b/node/src/failpoints_disabled.rs @@ -0,0 +1,84 @@ +//! Failpoint stubs. +//! +//! This module stubs out enough of the failpoint API to work if the feature is disabled, but never +//! activates them. + +use std::{ + fmt::{self, Display, Formatter}, + marker::PhantomData, + str::FromStr, +}; + +use datasize::DataSize; +use serde::Serialize; +use thiserror::Error; + +/// A dummy failpoint. +#[derive(DataSize, Debug)] +pub(crate) struct Failpoint { + _phantom: PhantomData, +} + +impl Failpoint { + /// Creates a new failpoint with a given key. + #[inline(always)] + pub(crate) fn new(_key: &'static str) -> Self { + Failpoint { + _phantom: PhantomData, + } + } + + /// Creates a new failpoint with a given key and optional subkey. + #[inline] + #[allow(dead_code)] + pub(crate) fn new_with_subkey(_key: &'static str, _subkey: S) -> Self { + Failpoint { + _phantom: PhantomData, + } + } + + /// Ignores the failpoint activation. + #[inline(always)] + pub(crate) fn update_from(&mut self, _activation: &FailpointActivation) {} + + /// Returns `None`. + #[inline(always)] + pub(crate) fn fire(&mut self, _rng: &mut R) -> Option<&T> { + None + } +} + +/// A parsed failpoint activation. +#[derive(Clone, DataSize, Debug, PartialEq, Serialize)] +pub(crate) struct FailpointActivation; + +impl FailpointActivation { + #[allow(dead_code)] + pub(crate) fn new(_key: S) -> FailpointActivation { + FailpointActivation + } + + pub(crate) fn key(&self) -> &str { + "" + } +} + +impl Display for FailpointActivation { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str("(no failpoint support)") + } +} + +/// Error parsing a failpoint activation. +#[derive(Debug, Error)] +#[error("no failpoint support enabled")] +pub(crate) struct ParseError; + +impl FromStr for FailpointActivation { + type Err = ParseError; + + #[inline(always)] + fn from_str(_raw: &str) -> Result { + Err(ParseError) + } +} diff --git a/node/src/lib.rs b/node/src/lib.rs index 8cb3d8eb8a..984e991e03 100644 --- a/node/src/lib.rs +++ b/node/src/lib.rs @@ -8,11 +8,11 @@ //! While the [`main`](fn.main.html) function is the central entrypoint for the node application, //! its core event loop is found inside the [reactor](reactor/index.html). -#![doc(html_root_url = "https://docs.rs/casper-node/1.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-node/2.0.4")] #![doc( - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png", + test(attr(deny(warnings))) )] #![warn( missing_docs, @@ -20,61 +20,70 @@ trivial_numeric_casts, unused_qualifications )] -#![feature(test)] +#![allow(clippy::bool_comparison)] -extern crate test; - -pub mod components; +pub mod cli; +pub(crate) mod components; mod config_migration; -pub mod crypto; mod data_migration; -pub mod effect; +pub(crate) mod effect; +#[cfg_attr(not(feature = "failpoints"), path = "failpoints_disabled.rs")] +pub(crate) mod failpoints; + pub mod logging; -pub mod protocol; -pub mod reactor; +pub(crate) mod protocol; +pub(crate) mod reactor; #[cfg(test)] -pub mod testing; -pub mod tls; +pub(crate) mod testing; +pub(crate) mod tls; pub mod types; pub mod utils; -use std::sync::{ - atomic::{AtomicBool, AtomicUsize}, - Arc, +use std::{ + env, + sync::{atomic::AtomicUsize, Arc}, }; use ansi_term::Color::Red; use once_cell::sync::Lazy; #[cfg(not(test))] use rand::SeedableRng; -use signal_hook::{ - consts::{signal::SIGUSR1, TERM_SIGNALS}, - flag, -}; +use signal_hook::{consts::TERM_SIGNALS, flag}; +use tracing::warn; -pub use components::{ - block_proposer::Config as BlockProposerConfig, - consensus::Config as ConsensusConfig, +pub(crate) use components::{ + binary_port::Config as BinaryPortConfig, block_accumulator::Config as BlockAccumulatorConfig, + block_synchronizer::Config as BlockSynchronizerConfig, + block_validator::Config as BlockValidatorConfig, consensus::Config as ConsensusConfig, contract_runtime::Config as ContractRuntimeConfig, - deploy_acceptor::Config as DeployAcceptorConfig, - event_stream_server::Config as EventStreamServerConfig, - fetcher::Config as FetcherConfig, - gossiper::{Config as GossipConfig, Error as GossipError}, + diagnostics_port::Config as DiagnosticsPortConfig, + event_stream_server::Config as EventStreamServerConfig, fetcher::Config as FetcherConfig, + gossiper::Config as GossipConfig, network::Config as NetworkConfig, rest_server::Config as RestServerConfig, - rpc_server::{rpcs, Config as RpcServerConfig}, - small_network::{Config as SmallNetworkConfig, Error as SmallNetworkError}, - storage::{Config as StorageConfig, Error as StorageError}, + transaction_acceptor::Config as TransactionAcceptorConfig, + transaction_buffer::Config as TransactionBufferConfig, + upgrade_watcher::Config as UpgradeWatcherConfig, }; -pub use config_migration::{migrate_config, Error as ConfigMigrationError}; -pub use data_migration::{migrate_data, Error as DataMigrationError}; -pub use types::NodeRng; -pub use utils::OS_PAGE_SIZE; +pub use components::{ + consensus, contract_runtime, + storage::{self, Config as StorageConfig}, +}; +pub use reactor::main_reactor::Config as MainReactorConfig; +pub(crate) use types::NodeRng; +pub use utils::WithDir; /// The maximum thread count which should be spawned by the tokio runtime. pub const MAX_THREAD_COUNT: usize = 512; fn version_string(color: bool) -> String { - let mut version = format!("{}-{}", env!("CARGO_PKG_VERSION"), env!("VERGEN_SHA_SHORT")); + let mut version = env!("CARGO_PKG_VERSION").to_string(); + if let Some(git_sha) = option_env!("NODE_GIT_SHA") { + version = format!("{}-{}", version, git_sha); + } else { + warn!( + "git sha env var unavailable, casper-node build version will not include git short hash" + ); + } // Add a `@DEBUG` (or similar) tag to release string on non-release builds. if env!("NODE_BUILD_PROFILE") != "release" { @@ -92,21 +101,17 @@ fn version_string(color: bool) -> String { /// Color version string for the compiled node. Filled in at build time, output allocated at /// runtime. -pub static VERSION_STRING_COLOR: Lazy = Lazy::new(|| version_string(true)); +pub(crate) static VERSION_STRING_COLOR: Lazy = Lazy::new(|| version_string(true)); /// Version string for the compiled node. Filled in at build time, output allocated at runtime. -pub static VERSION_STRING: Lazy = Lazy::new(|| version_string(false)); +pub(crate) static VERSION_STRING: Lazy = Lazy::new(|| version_string(false)); /// Global value that indicates the currently running reactor should exit if it is non-zero. -pub static TERMINATION_REQUESTED: Lazy> = +pub(crate) static TERMINATION_REQUESTED: Lazy> = Lazy::new(|| Arc::new(AtomicUsize::new(0))); -/// Global flag that indicates the currently running reactor should dump its event queue. -pub static QUEUE_DUMP_REQUESTED: Lazy> = - Lazy::new(|| Arc::new(AtomicBool::new(false))); - /// Setup UNIX signal hooks for current application. -pub fn setup_signal_hooks() { +pub(crate) fn setup_signal_hooks() { for signal in TERM_SIGNALS { flag::register_usize( *signal, @@ -115,17 +120,55 @@ pub fn setup_signal_hooks() { ) .unwrap_or_else(|error| panic!("failed to register signal {}: {}", signal, error)); } - let _ = flag::register(SIGUSR1, Arc::clone(&*QUEUE_DUMP_REQUESTED)); } /// Constructs a new `NodeRng`. #[cfg(not(test))] -pub fn new_rng() -> NodeRng { +pub(crate) fn new_rng() -> NodeRng { NodeRng::from_entropy() } /// Constructs a new `NodeRng`. #[cfg(test)] -pub fn new_rng() -> NodeRng { +pub(crate) fn new_rng() -> NodeRng { NodeRng::new() } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn version_string_format() { + let string = version_string(false); + let (prefix, profile) = string.split_once('@').unwrap_or((string.as_str(), "")); + let (version, sha) = prefix.split_once('-').unwrap_or((prefix, "")); + + assert_eq!(version, env!("CARGO_PKG_VERSION")); + assert_eq!(sha, env::var("NODE_GIT_SHA").unwrap_or_default().as_str()); + if env!("NODE_BUILD_PROFILE") == "release" { + assert_eq!(profile, ""); + } else { + assert_eq!(profile, env!("NODE_BUILD_PROFILE").to_uppercase()) + } + } + + #[test] + fn version_string_color_format() { + let string = version_string(true); + let (prefix, profile) = string.split_once('@').unwrap_or((string.as_str(), "")); + let (version, sha) = prefix.split_once('-').unwrap_or((prefix, "")); + + assert_eq!(version, env!("CARGO_PKG_VERSION")); + assert_eq!(sha, env::var("NODE_GIT_SHA").unwrap_or_default().as_str()); + if env!("NODE_BUILD_PROFILE") == "release" { + assert_eq!(profile, ""); + } else { + assert_eq!( + profile, + Red.paint(env!("NODE_BUILD_PROFILE").to_uppercase()) + .to_string() + ); + } + } +} diff --git a/node/src/logging.rs b/node/src/logging.rs index 3d6d28785e..2084bdeee5 100644 --- a/node/src/logging.rs +++ b/node/src/logging.rs @@ -1,10 +1,11 @@ //! Logging via the tracing crate. -use std::{env, fmt, io}; +use std::{env, fmt, io, string::ToString}; use ansi_term::{Color, Style}; use anyhow::anyhow; use datasize::DataSize; +use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use smallvec::SmallVec; use tracing::{ @@ -13,15 +14,17 @@ use tracing::{ }; use tracing_subscriber::{ fmt::{ - format, + format::{self, FieldFn, Format, Json, JsonFields, Writer}, time::{FormatTime, SystemTime}, - FmtContext, FormatEvent, FormatFields, FormattedFields, + FmtContext, FormatEvent, FormatFields, FormattedFields, Layer, }, + layer::Layered, registry::LookupSpan, - EnvFilter, + reload::{self, Handle}, + EnvFilter, Registry, }; -const LOG_CONFIGURATION_ENVVAR: &str = "RUST_LOG"; +const LOG_VERBOSITY_LEVEL_ENVVAR: &str = "RUST_LOG"; const LOG_FIELD_MESSAGE: &str = "message"; const LOG_FIELD_TARGET: &str = "log.target"; @@ -29,28 +32,34 @@ const LOG_FIELD_MODULE: &str = "log.module_path"; const LOG_FIELD_FILE: &str = "log.file"; const LOG_FIELD_LINE: &str = "log.line"; +/// Global reload handle. +/// +/// We use a static variable for the reload handle since our logger instance is also global. +static RELOAD_HANDLE: OnceCell = OnceCell::new(); + /// Logging configuration. -#[derive(DataSize, Debug, Default, Deserialize, Serialize)] +#[derive(Clone, DataSize, Debug, Default, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct LoggingConfig { /// Output format for log. - format: LoggingFormat, + pub format: LoggingFormat, /// Colored output (has no effect if JSON format is enabled). /// /// If set, the logger will inject ANSI color codes into log messages. This is useful if /// writing out to stdout or stderr on an ANSI terminal, but not so if writing to a logfile. - color: bool, + pub color: bool, /// Abbreviate module names (has no effect if JSON format is enabled). /// /// If set, human-readable formats will abbreviate module names, `foo::bar::baz::bizz` will /// turn into `f:b:b:bizz`. - abbreviate_modules: bool, + pub abbreviate_modules: bool, } impl LoggingConfig { /// Creates a new instance of LoggingConfig. + #[cfg(test)] pub fn new(format: LoggingFormat, color: bool, abbreviate_modules: bool) -> Self { LoggingConfig { format, @@ -63,24 +72,19 @@ impl LoggingConfig { /// Logging output format. /// /// Defaults to "text"". -#[derive(DataSize, Debug, Deserialize, Serialize)] +#[derive(Clone, DataSize, Debug, Deserialize, Serialize, Default)] #[serde(rename_all = "lowercase")] pub enum LoggingFormat { /// Text format. + #[default] Text, /// JSON format. Json, } -impl Default for LoggingFormat { - fn default() -> Self { - LoggingFormat::Text - } -} - /// This is used to implement tracing's `FormatEvent` so that we can customize the way tracing /// events are formatted. -struct FmtEvent { +pub struct FmtEvent { /// Whether to use ANSI color formatting or not. ansi_color: bool, /// Whether module segments should be shortened to first letter only. @@ -147,13 +151,13 @@ where fn format_event( &self, ctx: &FmtContext<'_, S, N>, - writer: &mut dyn fmt::Write, + mut writer: Writer<'_>, event: &Event<'_>, ) -> fmt::Result { // print the date/time with dimmed style if `ansi_color` is true - self.enable_dimmed_if_ansi(writer)?; - SystemTime.format_time(writer)?; - self.disable_dimmed_if_ansi(writer)?; + self.enable_dimmed_if_ansi(&mut writer)?; + SystemTime.format_time(&mut writer)?; + self.disable_dimmed_if_ansi(&mut writer)?; // print the log level let meta = event.metadata(); @@ -170,7 +174,7 @@ where writer, " {}{:<6}{}", color.prefix(), - meta.level().to_string(), + meta.level(), color.suffix() )?; } else { @@ -205,7 +209,7 @@ where let module = { let full_module_path = meta .module_path() - .or_else(|| field_visitor.module.as_deref()) + .or(field_visitor.module.as_deref()) .unwrap_or_default(); if self.abbreviate_modules { // Use a smallvec for going up to six levels deep. @@ -228,10 +232,10 @@ where let file = if !self.abbreviate_modules { meta.file() - .or_else(|| field_visitor.file.as_deref()) + .or(field_visitor.file.as_deref()) .unwrap_or_default() - .rsplitn(2, '/') - .next() + .rsplit_once('/') + .map(|parts| parts.1) .unwrap_or_default() } else { "" @@ -240,13 +244,13 @@ where let line = meta.line().or(field_visitor.line).unwrap_or_default(); if !module.is_empty() && (!file.is_empty() || self.abbreviate_modules) { - self.enable_dimmed_if_ansi(writer)?; + self.enable_dimmed_if_ansi(&mut writer)?; write!(writer, "[{} {}:{}] ", module, file, line,)?; - self.disable_dimmed_if_ansi(writer)?; + self.disable_dimmed_if_ansi(&mut writer)?; } // print the log message and other fields - ctx.format_fields(writer, event)?; + ctx.format_fields(writer.by_ref(), event)?; writeln!(writer) } } @@ -254,43 +258,116 @@ where /// Initializes the logging system with the default parameters. /// /// See `init_params` for details. +#[cfg(test)] pub fn init() -> anyhow::Result<()> { init_with_config(&Default::default()) } +/// A handle for reloading the logger. +#[allow(clippy::type_complexity)] // Cannot be helped, unfortunately. +pub enum ReloadHandle { + /// Text-logger reload handle. + Text(Handle, FmtEvent>, Registry>>), + /// JSON-logger reload handle. + Json(Handle>, Registry>>), +} + +impl ReloadHandle { + /// Swaps out the [`EnvFilter`] used to filter log events. + fn reload_env_filter(&self, new_filter: EnvFilter) -> Result<(), reload::Error> { + match self { + ReloadHandle::Text(handle) => handle.reload(new_filter), + ReloadHandle::Json(handle) => handle.reload(new_filter), + } + } + + /// Returns a string representation of the current [`EnvFilter`], if set. + fn display_log_filter(&self) -> Result { + match self { + ReloadHandle::Text(handle) => handle.with_current(ToString::to_string), + ReloadHandle::Json(handle) => handle.with_current(ToString::to_string), + } + } +} + +/// Swaps out the global [`EnvFilter`]. +pub fn reload_global_env_filter(new_filter: EnvFilter) -> anyhow::Result<()> { + let handle = RELOAD_HANDLE + .get() + .ok_or_else(|| anyhow!("could not fetch reload handle - logger not initialized?"))?; + handle.reload_env_filter(new_filter)?; + + Ok(()) +} + +/// Returns a string representation of the current global [`EnvFilter`], if set. +pub fn display_global_env_filter() -> anyhow::Result { + let handle = RELOAD_HANDLE + .get() + .ok_or_else(|| anyhow!("could not fetch reload handle - logger not initialized?"))?; + let formatted = handle.display_log_filter()?; + + Ok(formatted) +} + +/// Type alias for the formatting function used. +pub type FormatDebugFn = fn(&mut Writer, &Field, &dyn fmt::Debug) -> fmt::Result; + +fn format_into_debug_writer( + writer: &mut Writer, + field: &Field, + value: &dyn fmt::Debug, +) -> fmt::Result { + match field.name() { + LOG_FIELD_MESSAGE => write!(writer, "{:?}", value), + LOG_FIELD_TARGET | LOG_FIELD_MODULE | LOG_FIELD_FILE | LOG_FIELD_LINE => Ok(()), + _ => write!(writer, "; {}={:?}", field, value), + } +} + /// Initializes the logging system. /// /// This function should only be called once during the lifetime of the application. Do not call /// this outside of the application or testing code, the installed logger is global. /// /// See the `README.md` for hints on how to configure logging at runtime. +// The `io::stdout as fn()...` casts are necessary, as is the `FormatDebugFn` cast. +#[allow(trivial_casts)] pub fn init_with_config(config: &LoggingConfig) -> anyhow::Result<()> { - let formatter = format::debug_fn(|writer, field, value| match field.name() { - LOG_FIELD_MESSAGE => write!(writer, "{:?}", value), - LOG_FIELD_TARGET | LOG_FIELD_MODULE | LOG_FIELD_FILE | LOG_FIELD_LINE => Ok(()), - _ => write!(writer, "; {}={:?}", field, value), - }); + let formatter = format::debug_fn(format_into_debug_writer as FormatDebugFn); let filter = EnvFilter::new( - env::var(LOG_CONFIGURATION_ENVVAR) + env::var(LOG_VERBOSITY_LEVEL_ENVVAR) .as_deref() .unwrap_or("warn,casper_node=info"), ); match config.format { // Setup a new tracing-subscriber writing to `stdout` for logging. - LoggingFormat::Text => tracing_subscriber::fmt() - .with_writer(io::stdout) - .with_env_filter(filter) - .fmt_fields(formatter) - .event_format(FmtEvent::new(config.color, config.abbreviate_modules)) - .try_init(), + LoggingFormat::Text => { + let builder = tracing_subscriber::fmt() + .with_writer(io::stdout as fn() -> io::Stdout) + .with_env_filter(filter) + .fmt_fields(formatter) + .event_format(FmtEvent::new(config.color, config.abbreviate_modules)) + .with_filter_reloading(); + let handle = ReloadHandle::Text(builder.reload_handle()); + builder.try_init().map_err(|error| anyhow!(error))?; + drop(RELOAD_HANDLE.set(handle)); + Ok(()) + } + // JSON logging writes to `stdout` as well but uses the JSON format. - LoggingFormat::Json => tracing_subscriber::fmt() - .with_writer(io::stdout) - .with_env_filter(filter) - .json() - .try_init(), + LoggingFormat::Json => { + let builder = tracing_subscriber::fmt() + .with_writer(io::stdout as fn() -> io::Stdout) + .with_env_filter(filter) + .json() + .with_filter_reloading(); + let handle = ReloadHandle::Json(builder.reload_handle()); + builder.try_init().map_err(|error| anyhow!(error))?; + drop(RELOAD_HANDLE.set(handle)); + Ok(()) + } } - .map_err(|error| anyhow!(error)) } diff --git a/node/src/protocol.rs b/node/src/protocol.rs index 67ac849cb0..5e50a82613 100644 --- a/node/src/protocol.rs +++ b/node/src/protocol.rs @@ -1,26 +1,55 @@ //! A network message type used for communication between nodes -use std::fmt::{self, Display, Formatter}; +use std::{ + fmt::{self, Display, Formatter}, + sync::Arc, +}; use derive_more::From; use fmt::Debug; +use futures::{future::BoxFuture, FutureExt}; use hex_fmt::HexFmt; use serde::{Deserialize, Serialize}; +use strum::EnumDiscriminants; + +use casper_types::{BlockV2, FinalitySignatureV2, Transaction}; use crate::{ - components::{consensus, gossiper, small_network::GossipedAddress}, - types::{Deploy, FinalitySignature, Item, Tag}, + components::{ + consensus, + fetcher::{FetchItem, FetchResponse, Tag}, + gossiper, + network::{EstimatorWeights, FromIncoming, GossipedAddress, MessageKind, Payload}, + }, + effect::{ + incoming::{ + ConsensusDemand, ConsensusMessageIncoming, FinalitySignatureIncoming, GossiperIncoming, + NetRequest, NetRequestIncoming, NetResponse, NetResponseIncoming, TrieDemand, + TrieRequest, TrieRequestIncoming, TrieResponse, TrieResponseIncoming, + }, + AutoClosingResponder, EffectBuilder, + }, + types::NodeId, }; /// Reactor message. -#[derive(Clone, From, Serialize, Deserialize)] -pub enum Message { +#[derive(Clone, From, Serialize, Deserialize, EnumDiscriminants)] +#[strum_discriminants(derive(strum::EnumIter))] +pub(crate) enum Message { /// Consensus component message. #[from] Consensus(consensus::ConsensusMessage), + /// Consensus component demand. + #[from] + ConsensusRequest(consensus::ConsensusRequestMessage), + /// Block gossiper component message. + #[from] + BlockGossiper(gossiper::Message), /// Deploy gossiper component message. #[from] - DeployGossiper(gossiper::Message), + TransactionGossiper(gossiper::Message), + #[from] + FinalitySignatureGossiper(gossiper::Message), /// Address gossiper component message. #[from] AddressGossiper(gossiper::Message), @@ -36,39 +65,148 @@ pub enum Message { /// The type tag of the contained item. tag: Tag, /// The serialized item. - serialized_item: Vec, + serialized_item: Arc<[u8]>, }, /// Finality signature. #[from] - FinalitySignature(Box), + FinalitySignature(Box), +} + +impl Payload for Message { + #[inline] + fn message_kind(&self) -> MessageKind { + match self { + Message::Consensus(_) => MessageKind::Consensus, + Message::ConsensusRequest(_) => MessageKind::Consensus, + Message::BlockGossiper(_) => MessageKind::BlockGossip, + Message::TransactionGossiper(_) => MessageKind::TransactionGossip, + Message::AddressGossiper(_) => MessageKind::AddressGossip, + Message::GetRequest { tag, .. } | Message::GetResponse { tag, .. } => match tag { + Tag::Transaction | Tag::LegacyDeploy => MessageKind::TransactionTransfer, + Tag::Block => MessageKind::BlockTransfer, + Tag::BlockHeader => MessageKind::BlockTransfer, + Tag::TrieOrChunk => MessageKind::TrieTransfer, + Tag::FinalitySignature => MessageKind::Other, + Tag::SyncLeap => MessageKind::BlockTransfer, + Tag::ApprovalsHashes => MessageKind::BlockTransfer, + Tag::BlockExecutionResults => MessageKind::BlockTransfer, + }, + Message::FinalitySignature(_) => MessageKind::Consensus, + Message::FinalitySignatureGossiper(_) => MessageKind::FinalitySignatureGossip, + } + } + + fn is_low_priority(&self) -> bool { + // We only deprioritize requested trie nodes, as they are the most commonly requested item + // during fast sync. + match self { + Message::Consensus(_) => false, + Message::ConsensusRequest(_) => false, + Message::TransactionGossiper(_) => false, + Message::BlockGossiper(_) => false, + Message::FinalitySignatureGossiper(_) => false, + Message::AddressGossiper(_) => false, + Message::GetRequest { tag, .. } if *tag == Tag::TrieOrChunk => true, + Message::GetRequest { .. } => false, + Message::GetResponse { .. } => false, + Message::FinalitySignature(_) => false, + } + } + + #[inline] + fn incoming_resource_estimate(&self, weights: &EstimatorWeights) -> u32 { + match self { + Message::Consensus(_) => weights.consensus, + Message::ConsensusRequest(_) => weights.consensus, + Message::BlockGossiper(_) => weights.block_gossip, + Message::TransactionGossiper(_) => weights.transaction_gossip, + Message::FinalitySignatureGossiper(_) => weights.finality_signature_gossip, + Message::AddressGossiper(_) => weights.address_gossip, + Message::GetRequest { tag, .. } => match tag { + Tag::Transaction => weights.transaction_requests, + Tag::LegacyDeploy => weights.legacy_deploy_requests, + Tag::Block => weights.block_requests, + Tag::BlockHeader => weights.block_header_requests, + Tag::TrieOrChunk => weights.trie_requests, + Tag::FinalitySignature => weights.finality_signature_requests, + Tag::SyncLeap => weights.sync_leap_requests, + Tag::ApprovalsHashes => weights.approvals_hashes_requests, + Tag::BlockExecutionResults => weights.execution_results_requests, + }, + Message::GetResponse { tag, .. } => match tag { + Tag::Transaction => weights.transaction_responses, + Tag::LegacyDeploy => weights.legacy_deploy_responses, + Tag::Block => weights.block_responses, + Tag::BlockHeader => weights.block_header_responses, + Tag::TrieOrChunk => weights.trie_responses, + Tag::FinalitySignature => weights.finality_signature_responses, + Tag::SyncLeap => weights.sync_leap_responses, + Tag::ApprovalsHashes => weights.approvals_hashes_responses, + Tag::BlockExecutionResults => weights.execution_results_responses, + }, + Message::FinalitySignature(_) => weights.finality_signature_broadcasts, + } + } + + fn is_unsafe_for_syncing_peers(&self) -> bool { + match self { + Message::Consensus(_) => false, + Message::ConsensusRequest(_) => false, + Message::BlockGossiper(_) => false, + Message::TransactionGossiper(_) => false, + Message::FinalitySignatureGossiper(_) => false, + Message::AddressGossiper(_) => false, + // Trie requests can deadlock between syncing nodes. + Message::GetRequest { tag, .. } if *tag == Tag::TrieOrChunk => true, + Message::GetRequest { .. } => false, + Message::GetResponse { .. } => false, + Message::FinalitySignature(_) => false, + } + } } impl Message { - pub(crate) fn new_get_request(id: &T::Id) -> Result { + pub(crate) fn new_get_request(id: &T::Id) -> Result { Ok(Message::GetRequest { tag: T::TAG, serialized_id: bincode::serialize(id)?, }) } - pub(crate) fn new_get_response(item: &T) -> Result { + pub(crate) fn new_get_response( + item: &FetchResponse, + ) -> Result { Ok(Message::GetResponse { tag: T::TAG, - serialized_item: bincode::serialize(item)?, + serialized_item: item.to_serialized()?.into(), }) } + + /// Creates a new get response from already serialized data. + pub(crate) fn new_get_response_from_serialized(tag: Tag, serialized_item: Arc<[u8]>) -> Self { + Message::GetResponse { + tag, + serialized_item, + } + } } impl Debug for Message { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Message::Consensus(c) => f.debug_tuple("Consensus").field(&c).finish(), - Message::DeployGossiper(dg) => f.debug_tuple("DeployGossiper").field(&dg).finish(), + Message::ConsensusRequest(c) => f.debug_tuple("ConsensusRequest").field(&c).finish(), + Message::BlockGossiper(dg) => f.debug_tuple("BlockGossiper").field(&dg).finish(), + Message::TransactionGossiper(dg) => f.debug_tuple("DeployGossiper").field(&dg).finish(), + Message::FinalitySignatureGossiper(sig) => f + .debug_tuple("FinalitySignatureGossiper") + .field(&sig) + .finish(), Message::AddressGossiper(ga) => f.debug_tuple("AddressGossiper").field(&ga).finish(), Message::GetRequest { tag, serialized_id } => f .debug_struct("GetRequest") .field("tag", tag) - .field("serialized_item", &HexFmt(serialized_id)) + .field("serialized_id", &HexFmt(serialized_id)) .finish(), Message::GetResponse { tag, @@ -76,7 +214,10 @@ impl Debug for Message { } => f .debug_struct("GetResponse") .field("tag", tag) - .field("serialized_item", &HexFmt(serialized_item)) + .field( + "serialized_item", + &format!("{} bytes", serialized_item.len()), + ) .finish(), Message::FinalitySignature(fs) => { f.debug_tuple("FinalitySignature").field(&fs).finish() @@ -84,12 +225,60 @@ impl Debug for Message { } } } +mod specimen_support { + use crate::utils::specimen::{ + largest_get_request, largest_get_response, largest_variant, Cache, LargestSpecimen, + SizeEstimator, + }; + + use super::{Message, MessageDiscriminants}; + + impl LargestSpecimen for Message { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + largest_variant::( + estimator, + |variant| match variant { + MessageDiscriminants::Consensus => { + Message::Consensus(LargestSpecimen::largest_specimen(estimator, cache)) + } + MessageDiscriminants::ConsensusRequest => Message::ConsensusRequest( + LargestSpecimen::largest_specimen(estimator, cache), + ), + MessageDiscriminants::BlockGossiper => { + Message::BlockGossiper(LargestSpecimen::largest_specimen(estimator, cache)) + } + MessageDiscriminants::TransactionGossiper => Message::TransactionGossiper( + LargestSpecimen::largest_specimen(estimator, cache), + ), + MessageDiscriminants::FinalitySignatureGossiper => { + Message::FinalitySignatureGossiper(LargestSpecimen::largest_specimen( + estimator, cache, + )) + } + MessageDiscriminants::AddressGossiper => Message::AddressGossiper( + LargestSpecimen::largest_specimen(estimator, cache), + ), + MessageDiscriminants::GetRequest => largest_get_request(estimator, cache), + MessageDiscriminants::GetResponse => largest_get_response(estimator, cache), + MessageDiscriminants::FinalitySignature => Message::FinalitySignature( + LargestSpecimen::largest_specimen(estimator, cache), + ), + }, + ) + } + } +} impl Display for Message { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Message::Consensus(consensus) => write!(f, "Consensus::{}", consensus), - Message::DeployGossiper(deploy) => write!(f, "DeployGossiper::{}", deploy), + Message::ConsensusRequest(consensus) => write!(f, "ConsensusRequest({})", consensus), + Message::BlockGossiper(deploy) => write!(f, "BlockGossiper::{}", deploy), + Message::TransactionGossiper(txn) => write!(f, "TransactionGossiper::{}", txn), + Message::FinalitySignatureGossiper(sig) => { + write!(f, "FinalitySignatureGossiper::{}", sig) + } Message::AddressGossiper(gossiped_address) => { write!(f, "AddressGossiper::({})", gossiped_address) } @@ -106,3 +295,188 @@ impl Display for Message { } } } + +impl FromIncoming for REv +where + REv: From + + From + + From> + + From> + + From> + + From> + + From + + From + + From + + From + + From + + From, +{ + fn from_incoming(sender: NodeId, payload: Message) -> Self { + match payload { + Message::Consensus(message) => ConsensusMessageIncoming { + sender, + message: Box::new(message), + } + .into(), + Message::ConsensusRequest(_message) => { + // TODO: Remove this once from_incoming and try_demand_from_incoming are unified. + unreachable!("called from_incoming with a consensus request") + } + Message::BlockGossiper(message) => GossiperIncoming { + sender, + message: Box::new(message), + } + .into(), + Message::TransactionGossiper(message) => GossiperIncoming { + sender, + message: Box::new(message), + } + .into(), + Message::FinalitySignatureGossiper(message) => GossiperIncoming { + sender, + message: Box::new(message), + } + .into(), + Message::AddressGossiper(message) => GossiperIncoming { + sender, + message: Box::new(message), + } + .into(), + Message::GetRequest { tag, serialized_id } => match tag { + Tag::Transaction => NetRequestIncoming { + sender, + message: Box::new(NetRequest::Transaction(serialized_id)), + } + .into(), + Tag::LegacyDeploy => NetRequestIncoming { + sender, + message: Box::new(NetRequest::LegacyDeploy(serialized_id)), + } + .into(), + Tag::Block => NetRequestIncoming { + sender, + message: Box::new(NetRequest::Block(serialized_id)), + } + .into(), + Tag::BlockHeader => NetRequestIncoming { + sender, + message: Box::new(NetRequest::BlockHeader(serialized_id)), + } + .into(), + Tag::TrieOrChunk => TrieRequestIncoming { + sender, + message: Box::new(TrieRequest(serialized_id)), + } + .into(), + Tag::FinalitySignature => NetRequestIncoming { + sender, + message: Box::new(NetRequest::FinalitySignature(serialized_id)), + } + .into(), + Tag::SyncLeap => NetRequestIncoming { + sender, + message: Box::new(NetRequest::SyncLeap(serialized_id)), + } + .into(), + Tag::ApprovalsHashes => NetRequestIncoming { + sender, + message: Box::new(NetRequest::ApprovalsHashes(serialized_id)), + } + .into(), + Tag::BlockExecutionResults => NetRequestIncoming { + sender, + message: Box::new(NetRequest::BlockExecutionResults(serialized_id)), + } + .into(), + }, + Message::GetResponse { + tag, + serialized_item, + } => match tag { + Tag::Transaction => NetResponseIncoming { + sender, + message: Box::new(NetResponse::Transaction(serialized_item)), + } + .into(), + Tag::LegacyDeploy => NetResponseIncoming { + sender, + message: Box::new(NetResponse::LegacyDeploy(serialized_item)), + } + .into(), + Tag::Block => NetResponseIncoming { + sender, + message: Box::new(NetResponse::Block(serialized_item)), + } + .into(), + Tag::BlockHeader => NetResponseIncoming { + sender, + message: Box::new(NetResponse::BlockHeader(serialized_item)), + } + .into(), + Tag::TrieOrChunk => TrieResponseIncoming { + sender, + message: Box::new(TrieResponse(serialized_item.to_vec())), + } + .into(), + Tag::FinalitySignature => NetResponseIncoming { + sender, + message: Box::new(NetResponse::FinalitySignature(serialized_item)), + } + .into(), + Tag::SyncLeap => NetResponseIncoming { + sender, + message: Box::new(NetResponse::SyncLeap(serialized_item)), + } + .into(), + Tag::ApprovalsHashes => NetResponseIncoming { + sender, + message: Box::new(NetResponse::ApprovalsHashes(serialized_item)), + } + .into(), + Tag::BlockExecutionResults => NetResponseIncoming { + sender, + message: Box::new(NetResponse::BlockExecutionResults(serialized_item)), + } + .into(), + }, + Message::FinalitySignature(message) => { + FinalitySignatureIncoming { sender, message }.into() + } + } + } + + fn try_demand_from_incoming( + effect_builder: EffectBuilder, + sender: NodeId, + payload: Message, + ) -> Result<(Self, BoxFuture<'static, Option>), Message> + where + Self: Sized + Send, + { + match payload { + Message::GetRequest { + tag: Tag::TrieOrChunk, + serialized_id, + } => { + let (ev, fut) = effect_builder.create_request_parts(move |responder| TrieDemand { + sender, + request_msg: Box::new(TrieRequest(serialized_id)), + auto_closing_responder: AutoClosingResponder::from_opt_responder(responder), + }); + + Ok((ev, fut.boxed())) + } + Message::ConsensusRequest(request_msg) => { + let (ev, fut) = + effect_builder.create_request_parts(move |responder| ConsensusDemand { + sender, + request_msg: Box::new(request_msg), + auto_closing_responder: AutoClosingResponder::from_opt_responder(responder), + }); + + Ok((ev, fut.boxed())) + } + _ => Err(payload), + } + } +} diff --git a/node/src/reactor.rs b/node/src/reactor.rs index f3a390dd50..caf82ad680 100644 --- a/node/src/reactor.rs +++ b/node/src/reactor.rs @@ -1,93 +1,105 @@ +#![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged. + //! Reactor core. //! //! Any long running instance of the node application uses an event-dispatch pattern: Events are //! generated and stored on an event queue, then processed one-by-one. This process happens inside -//! the reactor*, which also exclusively holds the state of the application besides pending events: +//! the reactor, which also exclusively holds the state of the application besides pending events: +//! +//! 1. The reactor pops a reactor event off the event queue (called a +//! [`Scheduler`](type.Scheduler.html)). +//! 2. The event is dispatched by the reactor via [`Reactor::dispatch_event`]. Since the reactor +//! holds mutable state, it can grant any component that processes an event mutable, exclusive +//! access to its state. +//! 3. Once the [(synchronous)](`crate::components::Component::handle_event`) event processing has +//! completed, the component returns an [`effect`](crate::effect). +//! 4. The reactor spawns a task that executes these effects and possibly schedules more events. +//! 5. go to 1. //! -//! 1. The reactor pops an event off the event queue (called a [`Scheduler`](type.Scheduler.html)). -//! 2. The event is dispatched by the reactor. Since the reactor holds mutable state, it can grant -//! any component that processes an event mutable, exclusive access to its state. -//! 3. Once the (synchronous) event processing has completed, the component returns an effect. -//! 4. The reactor spawns a task that executes these effects and eventually schedules another event. -//! 5. meanwhile go to 1. +//! For descriptions of events and instructions on how to create effects, see the +//! [`effect`](super::effect) module. //! //! # Reactors //! //! There is no single reactor, but rather a reactor for each application type, since it defines //! which components are used and how they are wired up. The reactor defines the state by being a -//! `struct` of components, their initialization through the -//! [`Reactor::new()`](trait.Reactor.html#tymethod.new) and a method -//! [`Reactor::dispatch_event()`](trait.Reactor.html#tymethod.dispatch_event) to dispatch events to -//! components. +//! `struct` of components, their initialization through [`Reactor::new`] and event dispatching to +//! components via [`Reactor::dispatch_event`]. //! -//! With all these set up, a reactor can be executed using a [`Runner`](struct.Runner.html), either -//! in a step-wise manner using [`crank`](struct.Runner.html#method.crank) or indefinitely using -//! [`run`](struct.Runner.html#method.crank). +//! With all these set up, a reactor can be executed using a [`Runner`], either in a step-wise +//! manner using [`Runner::crank`] or indefinitely using [`Runner::run`]. mod event_queue_metrics; -pub mod initializer; -pub mod joiner; +pub(crate) mod main_reactor; mod queue_kind; -pub mod validator; -#[cfg(test)] -use std::sync::Arc; use std::{ any, collections::HashMap, env, fmt::{Debug, Display}, - fs::File, - mem, + io::Write, + num::NonZeroU64, str::FromStr, - sync::atomic::Ordering, + sync::{atomic::Ordering, Arc}, }; use datasize::DataSize; -use futures::{future::BoxFuture, FutureExt}; -use jemalloc_ctl::{epoch as jemalloc_epoch, stats::allocated as jemalloc_allocated}; +use erased_serde::Serialize as ErasedSerialize; +#[cfg(test)] +use fake_instant::FakeClock; +#[cfg(test)] +use futures::future::BoxFuture; +use futures::FutureExt; use once_cell::sync::Lazy; use prometheus::{self, Histogram, HistogramOpts, IntCounter, IntGauge, Registry}; use quanta::{Clock, IntoNanoseconds}; use serde::Serialize; use signal_hook::consts::signal::{SIGINT, SIGQUIT, SIGTERM}; +use stats_alloc::{Stats, INSTRUMENTED_SYSTEM}; use tokio::time::{Duration, Instant}; -use tracing::{debug, debug_span, error, info, instrument, trace, warn}; -use tracing_futures::Instrument; +use tracing::{debug_span, error, info, instrument, trace, warn, Instrument, Span}; + +#[cfg(test)] +use crate::components::ComponentState; +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + Block, BlockHeader, Chainspec, ChainspecRawBytes, FinalitySignature, Transaction, +}; #[cfg(target_os = "linux")] use utils::rlimit::{Limit, OpenFiles, ResourceLimit}; +#[cfg(test)] +use crate::testing::{network::NetworkedReactor, ConditionCheckReactor}; use crate::{ - effect::{announcements::ControlAnnouncement, Effect, EffectBuilder, Effects}, - types::{ExitCode, Timestamp}, + components::{ + block_accumulator, + fetcher::{self, FetchItem}, + network::{blocklist::BlocklistJustification, Identity as NetworkIdentity}, + transaction_acceptor, + }, + effect::{ + announcements::{ControlAnnouncement, PeerBehaviorAnnouncement, QueueDumpFormat}, + incoming::NetResponse, + Effect, EffectBuilder, EffectExt, Effects, + }, + failpoints::FailpointActivation, + types::{BlockExecutionResultsOrChunk, ExitCode, LegacyDeploy, NodeId, SyncLeap, TrieOrChunk}, unregister_metric, - utils::{self, WeightedRoundRobin}, - NodeRng, QUEUE_DUMP_REQUESTED, TERMINATION_REQUESTED, + utils::{self, SharedFlag, WeightedRoundRobin}, + NodeRng, TERMINATION_REQUESTED, }; -#[cfg(test)] -use crate::{reactor::initializer::Reactor as InitializerReactor, types::Chainspec}; -pub use queue_kind::QueueKind; - -/// Optional upper threshold for total RAM allocated in mB before dumping queues to disk. -const MEM_DUMP_THRESHOLD_MB_ENV_VAR: &str = "CL_MEM_DUMP_THRESHOLD_MB"; -static MEM_DUMP_THRESHOLD_MB: Lazy> = Lazy::new(|| { - env::var(MEM_DUMP_THRESHOLD_MB_ENV_VAR) - .map(|threshold_str| { - u64::from_str(&threshold_str).unwrap_or_else(|error| { - panic!( - "can't parse env var {}={} as a u64: {}", - MEM_DUMP_THRESHOLD_MB_ENV_VAR, threshold_str, error - ) - }) - }) - .ok() -}); +use casper_storage::block_store::types::ApprovalsHashes; +pub(crate) use queue_kind::QueueKind; /// Default threshold for when an event is considered slow. Can be overridden by setting the env /// var `CL_EVENT_MAX_MICROSECS=`. const DEFAULT_DISPATCH_EVENT_THRESHOLD: Duration = Duration::from_secs(1); const DISPATCH_EVENT_THRESHOLD_ENV_VAR: &str = "CL_EVENT_MAX_MICROSECS"; +#[cfg(test)] +const POLL_INTERVAL: Duration = Duration::from_millis(10); static DISPATCH_EVENT_THRESHOLD: Lazy = Lazy::new(|| { env::var(DISPATCH_EVENT_THRESHOLD_ENV_VAR) @@ -133,10 +145,10 @@ fn adjust_open_files_limit() { if let Err(err) = new_limit.set() { warn!(%err, current=current_limit.current(), target=best_possible, "did not succeed in raising open files limit") } else { - debug!(?new_limit, "successfully increased open files limit"); + tracing::debug!(?new_limit, "successfully increased open files limit"); } } else { - debug!( + tracing::debug!( ?current_limit, "not changing open files limit, already sufficient" ); @@ -151,23 +163,16 @@ fn adjust_open_files_limit() { info!("not on linux, not adjusting open files limit"); } -/// The value returned by a reactor on completion of the `run()` loop. -#[derive(Clone, Copy, PartialEq, Eq, Debug, DataSize)] -pub enum ReactorExit { - /// The process should continue running, moving to the next reactor. - ProcessShouldContinue, - /// The process should exit with the given exit code to allow the launcher to react - /// accordingly. - ProcessShouldExit(ExitCode), -} - /// Event scheduler /// /// The scheduler is a combination of multiple event queues that are polled in a specific order. It /// is the central hook for any part of the program that schedules events directly. /// /// Components rarely use this, but use a bound `EventQueueHandle` instead. -pub type Scheduler = WeightedRoundRobin; +/// +/// Schedule tuples contain an optional ancestor ID and the actual event. The ancestor ID indicates +/// which potential previous event resulted in the event being created. +pub(crate) type Scheduler = WeightedRoundRobin<(Option, Ev), QueueKind>; /// Event queue handle /// @@ -175,43 +180,80 @@ pub type Scheduler = WeightedRoundRobin; /// outside of the normal event loop. It gives different parts a chance to schedule messages that /// stem from things like external IO. #[derive(DataSize, Debug)] -pub struct EventQueueHandle(&'static Scheduler) +pub(crate) struct EventQueueHandle where - REv: 'static; + REv: 'static, +{ + /// A reference to the scheduler of the event queue. + scheduler: &'static Scheduler, + /// Flag indicating whether or not the reactor processing this event queue is shutting down. + is_shutting_down: SharedFlag, +} // Implement `Clone` and `Copy` manually, as `derive` will make it depend on `R` and `Ev` otherwise. impl Clone for EventQueueHandle { fn clone(&self) -> Self { - EventQueueHandle(self.0) + *self } } impl Copy for EventQueueHandle {} impl EventQueueHandle { - pub(crate) fn new(scheduler: &'static Scheduler) -> Self { - EventQueueHandle(scheduler) + /// Creates a new event queue handle. + pub(crate) fn new(scheduler: &'static Scheduler, is_shutting_down: SharedFlag) -> Self { + EventQueueHandle { + scheduler, + is_shutting_down, + } + } + + /// Creates a new event queue handle that is not connected to a shutdown flag. + /// + /// This method is used in tests, where we are never disabling shutdown warnings anyway. + #[cfg(test)] + pub(crate) fn without_shutdown(scheduler: &'static Scheduler) -> Self { + EventQueueHandle::new(scheduler, SharedFlag::global_shared()) } /// Schedule an event on a specific queue. - #[inline] + /// + /// The scheduled event will not have an ancestor. pub(crate) async fn schedule(self, event: Ev, queue_kind: QueueKind) where REv: From, { - self.0.push(event.into(), queue_kind).await + self.schedule_with_ancestor(None, event, queue_kind).await; + } + + /// Schedule an event on a specific queue. + pub(crate) async fn schedule_with_ancestor( + self, + ancestor: Option, + event: Ev, + queue_kind: QueueKind, + ) where + REv: From, + { + self.scheduler + .push((ancestor, event.into()), queue_kind) + .await; } /// Returns number of events in each of the scheduler's queues. - #[inline] pub(crate) fn event_queues_counts(&self) -> HashMap { - self.0.event_queues_counts() + self.scheduler.event_queues_counts() + } + + /// Returns whether the associated reactor is currently shutting down. + pub(crate) fn shutdown_flag(&self) -> SharedFlag { + self.is_shutting_down } } /// Reactor core. /// /// Any reactor should implement this trait and be executed by the `reactor::run` function. -pub trait Reactor: Sized { +pub(crate) trait Reactor: Sized { // Note: We've gone for the `Sized` bound here, since we return an instance in `new`. As an // alternative, `new` could return a boxed instance instead, removing this requirement. @@ -246,33 +288,55 @@ pub trait Reactor: Sized { /// If any instantiation fails, an error is returned. fn new( cfg: Self::Config, + chainspec: Arc, + chainspec_raw_bytes: Arc, + network_identity: NetworkIdentity, registry: &Registry, event_queue: EventQueueHandle, rng: &mut NodeRng, ) -> Result<(Self, Effects), Self::Error>; - /// If `Some`, indicates that the reactor has completed all its work and should no longer - /// dispatch events. The running process may stop or may keep running with a new reactor. - fn maybe_exit(&self) -> Option; - /// Instructs the reactor to update performance metrics, if any. fn update_metrics(&mut self, _event_queue_handle: EventQueueHandle) {} + + /// Activate/deactivate a failpoint. + fn activate_failpoint(&mut self, _activation: &FailpointActivation) { + // Default is to ignore the failpoint. If failpoint support is enabled for a reactor, route + // the activation to the respective components here. + } + + /// Returns the state of a named components. + /// + /// May return `None` if the component cannot be found, or if the reactor does not support + /// querying component states. + #[allow(dead_code)] + #[cfg(test)] + fn get_component_state(&self, _name: &str) -> Option<&ComponentState> { + None + } } /// A reactor event type. -pub trait ReactorEvent: Send + Debug + From + 'static { - /// Returns the event as a control announcement, if possible. +pub(crate) trait ReactorEvent: Send + Debug + From + 'static { + /// Returns `true` if the event is a control announcement variant. + fn is_control(&self) -> bool; + + /// Converts the event into a control announcement without copying. /// - /// Returns a reference to a wrapped - /// [`ControlAnnouncement`](`crate::effect::announcements::ControlAnnouncement`) if the event - /// is indeed a control announcement variant. - fn as_control(&self) -> Option<&ControlAnnouncement>; + /// Note that this function must return `Some` if and only `is_control` returns `true`. + fn try_into_control(self) -> Option; + + /// Returns a cheap but human-readable description of the event. + fn description(&self) -> &'static str { + "anonymous event" + } } /// A drop-like trait for `async` compatible drop-and-wait. /// /// Shuts down a type by explicitly freeing resources, but allowing to wait on cleanup to complete. -pub trait Finalize: Sized { +#[cfg(test)] +pub(crate) trait Finalize: Sized { /// Runs cleanup code and waits for a shutdown to complete. /// /// This function must always be optional and a way to wait for all resources to be freed, not @@ -294,10 +358,10 @@ struct AllocatedMem { /// A runner for a reactor. /// -/// The runner manages a reactors event queue and reactor itself and can run it either continuously +/// The runner manages a reactor's event queue and reactor itself and can run it either continuously /// or in a step-by-step manner. #[derive(Debug)] -pub struct Runner +pub(crate) struct Runner where R: Reactor, { @@ -308,7 +372,7 @@ where reactor: R, /// Counter for events, to aid tracing. - event_count: usize, + current_event_id: u64, /// Timestamp of last reactor metrics update. last_metrics: Instant, @@ -317,7 +381,7 @@ where metrics: RunnerMetrics, /// Check if we need to update reactor metrics every this many events. - event_metrics_threshold: usize, + event_metrics_threshold: u64, /// Only update reactor metrics if at least this much time has passed. event_metrics_min_delay: Duration, @@ -325,8 +389,8 @@ where /// An accurate, possible TSC-supporting clock. clock: Clock, - /// Last queue dump timestamp - last_queue_dump: Option, + /// Flag indicating the reactor is being shut down. + is_shutting_down: SharedFlag, } /// Metric data for the Runner @@ -336,7 +400,7 @@ struct RunnerMetrics { events: IntCounter, /// Histogram of how long it took to dispatch an event. event_dispatch_duration: Histogram, - /// Total allocated RAM in bytes, as reported by jemalloc. + /// Total allocated RAM in bytes, as reported by stats_alloc. allocated_ram_bytes: IntGauge, /// Total consumed RAM in bytes, as reported by sys-info. consumed_ram_bytes: IntGauge, @@ -349,13 +413,16 @@ struct RunnerMetrics { impl RunnerMetrics { /// Create and register new runner metrics. fn new(registry: &Registry) -> Result { - let events = IntCounter::new("runner_events", "total event count")?; + let events = IntCounter::new( + "runner_events", + "running total count of events handled by this reactor", + )?; // Create an event dispatch histogram, putting extra emphasis on the area between 1-10 us. let event_dispatch_duration = Histogram::with_opts( HistogramOpts::new( "event_dispatch_duration", - "duration of complete dispatch of a single event in nanoseconds", + "time in nanoseconds to dispatch an event", ) .buckets(vec![ 100.0, @@ -419,45 +486,60 @@ where R::Event: Serialize, R::Error: From, { - /// Creates a new runner from a given configuration. - /// - /// Creates a metrics registry that is only going to be used in this runner. - #[inline] - pub async fn new(cfg: R::Config, rng: &mut NodeRng) -> Result { - // Instantiate a new registry for metrics for this reactor. - let registry = Registry::new(); - Self::with_metrics(cfg, rng, ®istry).await - } - /// Creates a new runner from a given configuration, using existing metrics. - #[inline] - #[instrument("runner creation", level = "debug", skip(cfg, rng, registry))] - pub async fn with_metrics( + #[instrument( + "init", + level = "debug", + skip_all, + fields(node_id = %NodeId::from(&network_identity)) + )] + pub(crate) async fn with_metrics( cfg: R::Config, + chainspec: Arc, + chainspec_raw_bytes: Arc, + network_identity: NetworkIdentity, rng: &mut NodeRng, registry: &Registry, ) -> Result { adjust_open_files_limit(); - let event_size = mem::size_of::(); + let event_size = size_of::(); // Check if the event is of a reasonable size. This only emits a runtime warning at startup // right now, since storage size of events is not an issue per se, but copying might be // expensive if events get too large. - if event_size > 16 * mem::size_of::() { + if event_size > 16 * size_of::() { warn!( %event_size, type_name = ?any::type_name::(), "large event size, consider reducing it or boxing" ); } - let scheduler = utils::leak(Scheduler::new(QueueKind::weights())); - - let event_queue = EventQueueHandle::new(scheduler); - let (reactor, initial_effects) = R::new(cfg, registry, event_queue, rng)?; + let event_queue_dump_threshold = + env::var("CL_EVENT_QUEUE_DUMP_THRESHOLD").map_or(None, |s| s.parse::().ok()); + + let scheduler = utils::leak(Scheduler::new( + QueueKind::weights(), + event_queue_dump_threshold, + )); + let is_shutting_down = SharedFlag::new(); + let event_queue = EventQueueHandle::new(scheduler, is_shutting_down); + let (reactor, initial_effects) = R::new( + cfg, + chainspec, + chainspec_raw_bytes, + network_identity, + registry, + event_queue, + rng, + )?; + info!( + "Reactor: with_metrics has: {} initial_effects", + initial_effects.len() + ); // Run all effects from component instantiation. - process_effects(scheduler, initial_effects) + process_effects(None, scheduler, initial_effects, QueueKind::Regular) .instrument(debug_span!("process initial effects")) .await; @@ -466,49 +548,28 @@ where Ok(Runner { scheduler, reactor, - event_count: 0, + current_event_id: 1, metrics: RunnerMetrics::new(registry)?, last_metrics: Instant::now(), event_metrics_min_delay: Duration::from_secs(30), event_metrics_threshold: 1000, clock: Clock::new(), - last_queue_dump: None, + is_shutting_down, }) } - /// Inject (schedule then process) effects created via a call to `create_effects` which is - /// itself passed an instance of an `EffectBuilder`. - #[cfg(test)] - pub(crate) async fn process_injected_effects(&mut self, create_effects: F) - where - F: FnOnce(EffectBuilder) -> Effects, - { - let event_queue = EventQueueHandle::new(self.scheduler); - let effect_builder = EffectBuilder::new(event_queue); - - let effects = create_effects(effect_builder); - - process_effects(self.scheduler, effects) - .instrument(debug_span!( - "process injected effects", - ev = self.event_count - )) - .await; - } - /// Processes a single event on the event queue. /// - /// Returns `false` if processing should stop. - #[inline] - #[instrument("crank", level = "debug", fields(ev = self.event_count), skip(self, rng))] - pub async fn crank(&mut self, rng: &mut NodeRng) -> bool { + /// Returns `Some(exit_code)` if processing should stop. + #[instrument("dispatch", level = "debug", fields(a, ev = self.current_event_id), skip(self, rng))] + pub(crate) async fn crank(&mut self, rng: &mut NodeRng) -> Option { self.metrics.events.inc(); - let event_queue = EventQueueHandle::new(self.scheduler); + let event_queue = EventQueueHandle::new(self.scheduler, self.is_shutting_down); let effect_builder = EffectBuilder::new(event_queue); // Update metrics like memory usage and event queue sizes. - if self.event_count % self.event_metrics_threshold == 0 { + if self.current_event_id % self.event_metrics_threshold == 0 { // We update metrics on the first very event as well to get a good baseline. if self.last_metrics.elapsed() >= self.event_metrics_min_delay { self.reactor.update_metrics(event_queue); @@ -525,86 +586,144 @@ where total, }) = Self::get_allocated_memory() { - debug!(%allocated, %total, "memory allocated"); + trace!(%allocated, %total, "memory allocated"); self.metrics.allocated_ram_bytes.set(allocated as i64); self.metrics.consumed_ram_bytes.set(consumed as i64); self.metrics.total_ram_bytes.set(total as i64); - if let Some(threshold_mb) = *MEM_DUMP_THRESHOLD_MB { - let threshold_bytes = threshold_mb * 1024 * 1024; - if allocated >= threshold_bytes && self.last_queue_dump.is_none() { - info!( - %allocated, - %total, - %threshold_bytes, - "node has allocated enough memory to trigger queue dump" - ); - self.dump_queues().await; - } - } } } - // Dump event queue if requested, stopping the world. - if QUEUE_DUMP_REQUESTED.load(Ordering::SeqCst) { - debug!("dumping event queue as requested"); - self.dump_queues().await; - // Indicate we are done with the dump. - QUEUE_DUMP_REQUESTED.store(false, Ordering::SeqCst); + let ((ancestor, event), queue_kind) = self.scheduler.pop().await; + trace!(%event, %queue_kind, "current"); + let event_desc = event.description(); + + // Create another span for tracing the processing of one event. + Span::current().record("ev", self.current_event_id); + + // If we know the ancestor of an event, record it. + if let Some(ancestor) = ancestor { + Span::current().record("a", ancestor.get()); } - let (event, q) = self.scheduler.pop().await; + // Dispatch the event, then execute the resulting effect. + let start = self.clock.start(); + + let (effects, maybe_exit_code, queue_kind) = if event.is_control() { + // We've received a control event, which will _not_ be handled by the reactor. + match event.try_into_control() { + None => { + // If `as_control().is_some()` is true, but `try_into_control` fails, the trait + // is implemented incorrectly. + error!( + "event::as_control succeeded, but try_into_control failed. this is a bug" + ); - // Create another span for tracing the processing of one event. - let event_span = debug_span!("dispatch events", ev = self.event_count); - let (effects, keep_going) = event_span.in_scope(|| { - // We log events twice, once in display and once in debug mode. - let event_as_string = format!("{}", event); - debug!(event=%event_as_string, ?q); - trace!(?event, ?q); - - // Dispatch the event, then execute the resulting effect. - let start = self.clock.start(); - - let (effects, keep_going) = if let Some(ctrl_ann) = event.as_control() { - // We've received a control event, which will _not_ be handled by the reactor. - match ctrl_ann { - ControlAnnouncement::FatalError { file, line, msg } => { - error!(%file, %line, %msg, "fatal error via control announcement"); - (Default::default(), false) - } + // We ignore the event. + (Effects::new(), None, QueueKind::Control) } - } else { - ( - self.reactor.dispatch_event(effect_builder, rng, event), - true, - ) - }; - - let end = self.clock.end(); + Some(ControlAnnouncement::ShutdownDueToUserRequest) => ( + Effects::new(), + Some(ExitCode::CleanExitDontRestart), + QueueKind::Control, + ), + Some(ControlAnnouncement::ShutdownForUpgrade) => { + (Effects::new(), Some(ExitCode::Success), QueueKind::Control) + } + Some(ControlAnnouncement::ShutdownAfterCatchingUp) => ( + Effects::new(), + Some(ExitCode::CleanExitDontRestart), + QueueKind::Control, + ), + Some(ControlAnnouncement::FatalError { file, line, msg }) => { + error!(%file, %line, %msg, "fatal error via control announcement"); + (Effects::new(), Some(ExitCode::Abort), QueueKind::Control) + } + Some(ControlAnnouncement::QueueDumpRequest { + dump_format, + finished, + }) => { + match dump_format { + QueueDumpFormat::Serde(mut ser) => { + self.scheduler + .dump(move |queue_dump| { + if let Err(err) = + queue_dump.erased_serialize(&mut ser.as_serializer()) + { + warn!(%err, "queue dump failed to serialize"); + } + }) + .await; + } + QueueDumpFormat::Debug(ref file) => { + match file.try_clone() { + Ok(mut local_file) => { + self.scheduler + .dump(move |queue_dump| { + write!(&mut local_file, "{:?}", queue_dump) + .and_then(|_| local_file.flush()) + .map_err(|err| { + warn!( + ?err, + "failed to write/flush queue dump using debug format" + ); + }) + .ok(); + }) + .await; + } + Err(err) => warn!( + %err, + "could not create clone of temporary file for queue debug dump" + ), + }; + } + } - // Warn if processing took a long time, record to histogram. - let delta = self.clock.delta(start, end); - if delta > *DISPATCH_EVENT_THRESHOLD { - warn!( - ns = delta.into_nanos(), - event = %event_as_string, - "event took very long to dispatch" - ); - } - self.metrics - .event_dispatch_duration - .observe(delta.into_nanos() as f64); + // Notify requester that we finished writing the queue dump. + finished.respond(()).await; - (effects, keep_going) - }); + // Do nothing on queue dump otherwise. + (Default::default(), None, QueueKind::Control) + } + Some(ControlAnnouncement::ActivateFailpoint { activation }) => { + self.reactor.activate_failpoint(&activation); - process_effects(self.scheduler, effects) - .instrument(debug_span!("process effects", ev = self.event_count)) - .await; + // No other effects, calling the method is all we had to do. + (Effects::new(), None, QueueKind::Control) + } + } + } else { + ( + self.reactor.dispatch_event(effect_builder, rng, event), + None, + queue_kind, + ) + }; - self.event_count += 1; + let end = self.clock.end(); - keep_going + // Warn if processing took a long time, record to histogram. + let delta = self.clock.delta(start, end); + if delta > *DISPATCH_EVENT_THRESHOLD { + warn!(%event_desc, ns = delta.into_nanos(), "event took very long to dispatch"); + } + self.metrics + .event_dispatch_duration + .observe(delta.into_nanos() as f64); + + // Run effects, with the current event ID as the ancestor for resulting set of events. + process_effects( + NonZeroU64::new(self.current_event_id), + self.scheduler, + effects, + queue_kind, + ) + .in_current_span() + .await; + + self.current_event_id += 1; + + maybe_exit_code } /// Gets both the allocated and total memory from sys-info + jemalloc @@ -617,194 +736,241 @@ where } }; - // mem_info gives us kB + // mem_info gives us kilobytes let total = mem_info.total * 1024; - let consumed = total - (mem_info.free * 1024); - - // whereas jemalloc_ctl gives us the numbers in bytes - match jemalloc_epoch::mib() { - Ok(mib) => { - // jemalloc_ctl requires you to advance the epoch to update its stats - if let Err(advance_error) = mib.advance() { - warn!(%advance_error, "unable to advance jemalloc epoch"); - } - } - Err(error) => { - warn!(%error, "unable to get epoch::mib from jemalloc"); - return None; - } - } - let allocated = match jemalloc_allocated::mib() { - Ok(allocated_mib) => match allocated_mib.read() { - Ok(value) => value as u64, - Err(error) => { - warn!(%error, "unable to read allocated mib using jemalloc"); - return None; - } - }, - Err(error) => { - warn!(%error, "unable to get allocated mib using jemalloc"); - return None; - } - }; + let consumed = total - (mem_info.avail * 1024); + + let Stats { + allocations: _, + deallocations: _, + reallocations: _, + bytes_allocated, + bytes_deallocated, + bytes_reallocated: _, + } = INSTRUMENTED_SYSTEM.stats(); Some(AllocatedMem { - allocated, + allocated: bytes_allocated.saturating_sub(bytes_deallocated) as u64, consumed, total, }) } - /// Handles dumping queue contents to files in /tmp. - async fn dump_queues(&mut self) { - let timestamp = Timestamp::now(); - self.last_queue_dump = Some(timestamp); - let output_fn = format!("/tmp/queue_dump-{}.json", timestamp); - let mut serializer = serde_json::Serializer::pretty(match File::create(&output_fn) { - Ok(file) => file, - Err(error) => { - warn!(%error, "could not create output file ({}) for queue snapshot", output_fn); - return; + /// Runs the reactor until `self.crank` returns `Some` or we get interrupted by a termination + /// signal. + pub(crate) async fn run(&mut self, rng: &mut NodeRng) -> ExitCode { + loop { + match TERMINATION_REQUESTED.load(Ordering::SeqCst) as i32 { + 0 => { + if let Some(exit_code) = self.crank(rng).await { + self.is_shutting_down.set(); + break exit_code; + } + } + SIGINT => { + self.is_shutting_down.set(); + break ExitCode::SigInt; + } + SIGQUIT => { + self.is_shutting_down.set(); + break ExitCode::SigQuit; + } + SIGTERM => { + self.is_shutting_down.set(); + break ExitCode::SigTerm; + } + _ => error!("should be unreachable - bug in signal handler"), } - }); - - if let Err(error) = self.scheduler.snapshot(&mut serializer).await { - warn!(%error, "could not serialize snapshot to {}", output_fn); - return; } + } +} - let debug_dump_filename = format!("/tmp/queue_dump_debug-{}.txt", timestamp); - let mut file = match File::create(&debug_dump_filename) { - Ok(file) => file, - Err(error) => { - warn!(%error, "could not create debug output file ({}) for queue snapshot", debug_dump_filename); - return; - } - }; - if let Err(error) = self.scheduler.debug_dump(&mut file).await { - warn!(%error, "could not serialize debug snapshot to {}", debug_dump_filename); - return; - } +#[cfg(test)] +#[derive(Eq, PartialEq, Debug)] +pub(crate) enum TryCrankOutcome { + NoEventsToProcess, + ProcessedAnEvent, + ShouldExit(ExitCode), + Exited, +} + +#[cfg(test)] +impl Runner +where + R: Reactor, + R::Event: Serialize, + R::Error: From, +{ + /// Creates a new runner from a given configuration. + /// + /// Creates a metrics registry that is only going to be used in this runner. + pub(crate) async fn new( + cfg: R::Config, + chainspec: Arc, + chainspec_raw_bytes: Arc, + rng: &mut NodeRng, + ) -> Result { + // Instantiate a new registry for metrics for this reactor. + let registry = Registry::new(); + let network_identity = NetworkIdentity::with_generated_certs().unwrap(); + Self::with_metrics( + cfg, + chainspec, + chainspec_raw_bytes, + network_identity, + rng, + ®istry, + ) + .await } - /// Processes a single event if there is one, returns `None` otherwise. - #[inline] + /// Create an instance of an `EffectBuilder`. #[cfg(test)] - pub async fn try_crank(&mut self, rng: &mut NodeRng) -> Option { - if self.scheduler.item_count() == 0 { - None - } else { - Some(self.crank(rng).await) - } + pub(crate) fn effect_builder(&self) -> EffectBuilder { + let event_queue = EventQueueHandle::new(self.scheduler, self.is_shutting_down); + EffectBuilder::new(event_queue) } - /// Runs the reactor until `maybe_exit()` returns `Some` or we get interrupted by a termination - /// signal. - #[inline] - pub async fn run(&mut self, rng: &mut NodeRng) -> ReactorExit { - loop { - match TERMINATION_REQUESTED.load(Ordering::SeqCst) as i32 { - 0 => { - if let Some(reactor_exit) = self.reactor.maybe_exit() { - break reactor_exit; - } - if !self.crank(rng).await { - break ReactorExit::ProcessShouldExit(ExitCode::Abort); - } + /// Inject (schedule then process) effects created via a call to `create_effects` which is + /// itself passed an instance of an `EffectBuilder`. + #[cfg(test)] + pub(crate) async fn process_injected_effects(&mut self, create_effects: F) + where + F: FnOnce(EffectBuilder) -> Effects, + { + use tracing::{debug_span, Instrument}; + + let event_queue = EventQueueHandle::new(self.scheduler, self.is_shutting_down); + let effect_builder = EffectBuilder::new(event_queue); + + let effects = create_effects(effect_builder); + + process_effects(None, self.scheduler, effects, QueueKind::Regular) + .instrument(debug_span!( + "process injected effects", + ev = self.current_event_id + )) + .await + } + + /// Processes a single event if there is one and we haven't previously handled an exit code. + pub(crate) async fn try_crank(&mut self, rng: &mut NodeRng) -> TryCrankOutcome { + if self.is_shutting_down.is_set() { + TryCrankOutcome::Exited + } else if self.scheduler.item_count() == 0 { + TryCrankOutcome::NoEventsToProcess + } else { + match self.crank(rng).await { + Some(exit_code) => { + self.is_shutting_down.set(); + TryCrankOutcome::ShouldExit(exit_code) } - SIGINT => break ReactorExit::ProcessShouldExit(ExitCode::SigInt), - SIGQUIT => break ReactorExit::ProcessShouldExit(ExitCode::SigQuit), - SIGTERM => break ReactorExit::ProcessShouldExit(ExitCode::SigTerm), - _ => error!("should be unreachable - bug in signal handler"), + None => TryCrankOutcome::ProcessedAnEvent, } } } /// Returns a reference to the reactor. - #[inline] - pub fn reactor(&self) -> &R { + pub(crate) fn reactor(&self) -> &R { &self.reactor } /// Returns a mutable reference to the reactor. - #[inline] - pub fn reactor_mut(&mut self) -> &mut R { + pub(crate) fn reactor_mut(&mut self) -> &mut R { &mut self.reactor } - /// Deconstructs the runner to return the reactor. - #[inline] - pub fn into_inner(self) -> R { + /// Shuts down a reactor, sealing and draining the entire queue before returning it. + pub(crate) async fn drain_into_inner(self) -> R { + self.is_shutting_down.set(); + self.scheduler.seal(); + for (ancestor, event) in self.scheduler.drain_queues().await { + tracing::debug!(?ancestor, %event, "drained event"); + } self.reactor } } #[cfg(test)] -impl Runner { - pub(crate) async fn new_with_chainspec( - cfg: ::Config, - chainspec: Arc, - ) -> Result::Error> { - let registry = Registry::new(); - let scheduler = utils::leak(Scheduler::new(QueueKind::weights())); - - let event_queue = EventQueueHandle::new(scheduler); - let (reactor, initial_effects) = - InitializerReactor::new_with_chainspec(cfg, ®istry, event_queue, chainspec)?; +impl Runner> +where + R: Reactor + NetworkedReactor, + R::Event: Serialize, + R::Error: From, +{ + /// Cranks the runner until `condition` is true or until `within` has elapsed. + /// + /// Returns `true` if `condition` has been met within the specified timeout. + /// + /// Panics if cranking causes the node to return an exit code. + pub(crate) async fn crank_until(&mut self, rng: &mut TestRng, condition: F, within: Duration) + where + F: Fn(&R::Event) -> bool + Send + 'static, + { + self.reactor.set_condition_checker(Box::new(condition)); - // Run all effects from component instantiation. - let span = debug_span!("process initial effects"); - process_effects(scheduler, initial_effects) - .instrument(span) - .await; + tokio::time::timeout(within, self.crank_and_check_indefinitely(rng)) + .await + .unwrap_or_else(|_| { + panic!( + "Runner::crank_until() timed out after {}s on node {}", + within.as_secs_f64(), + self.reactor.inner().node_id() + ) + }) + } - info!("reactor main loop is ready"); + async fn crank_and_check_indefinitely(&mut self, rng: &mut TestRng) { + loop { + match self.try_crank(rng).await { + TryCrankOutcome::NoEventsToProcess => { + FakeClock::advance_time(POLL_INTERVAL.as_millis() as u64); + tokio::time::sleep(POLL_INTERVAL).await; + continue; + } + TryCrankOutcome::ProcessedAnEvent => {} + TryCrankOutcome::ShouldExit(exit_code) => { + panic!("should not exit: {:?}", exit_code) + } + TryCrankOutcome::Exited => unreachable!(), + } - let event_metrics_min_delay = Duration::from_secs(30); - let now = Instant::now(); - Ok(Runner { - scheduler, - reactor, - event_count: 0, - metrics: RunnerMetrics::new(®istry)?, - // Calculate the `last_metrics` timestamp to be exactly one delay in the past. This will - // cause the runner to collect metrics at the first opportunity. - last_metrics: now.checked_sub(event_metrics_min_delay).unwrap_or(now), - event_metrics_min_delay, - event_metrics_threshold: 1000, - clock: Clock::new(), - last_queue_dump: None, - }) + if self.reactor.condition_result() { + info!("{} met condition", self.reactor.inner().node_id()); + return; + } + } } } /// Spawns tasks that will process the given effects. -#[inline] -async fn process_effects(scheduler: &'static Scheduler, effects: Effects) -where +/// +/// Result events from processing the events will be scheduled with the given ancestor. +async fn process_effects( + ancestor: Option, + scheduler: &'static Scheduler, + effects: Effects, + queue_kind: QueueKind, +) where Ev: Send + 'static, { - // TODO: Properly carry around priorities. - let queue_kind = QueueKind::default(); - for effect in effects { tokio::spawn(async move { for event in effect.await { - scheduler.push(event, queue_kind).await + scheduler.push((ancestor, event), queue_kind).await; } }); } } /// Converts a single effect into another by wrapping it. -#[inline] fn wrap_effect(wrap: F, effect: Effect) -> Effect where F: Fn(Ev) -> REv + Send + 'static, Ev: Send + 'static, REv: Send + 'static, { - // TODO: The double-boxing here is very unfortunate =(. + // The double-boxing here is very unfortunate =(. (async move { let events = effect.await; events.into_iter().map(wrap).collect() @@ -813,8 +979,7 @@ where } /// Converts multiple effects into another by wrapping. -#[inline] -pub fn wrap_effects(wrap: F, effects: Effects) -> Effects +pub(crate) fn wrap_effects(wrap: F, effects: Effects) -> Effects where F: Fn(Ev) -> REv + Send + 'static + Clone, Ev: Send + 'static, @@ -825,3 +990,112 @@ where .map(move |effect| wrap_effect(wrap.clone(), effect)) .collect() } + +fn handle_fetch_response( + reactor: &mut R, + effect_builder: EffectBuilder<::Event>, + rng: &mut NodeRng, + sender: NodeId, + serialized_item: &[u8], +) -> Effects<::Event> +where + I: FetchItem, + R: Reactor, + ::Event: From> + From, +{ + match fetcher::Event::::from_get_response_serialized_item(sender, serialized_item) { + Some(fetcher_event) => { + Reactor::dispatch_event(reactor, effect_builder, rng, fetcher_event.into()) + } + None => effect_builder + .announce_block_peer_with_justification( + sender, + BlocklistJustification::SentBadItem { tag: I::TAG }, + ) + .ignore(), + } +} + +fn handle_get_response( + reactor: &mut R, + effect_builder: EffectBuilder<::Event>, + rng: &mut NodeRng, + sender: NodeId, + message: Box, +) -> Effects<::Event> +where + R: Reactor, + ::Event: From + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From> + + From + + From, +{ + match *message { + NetResponse::Transaction(ref serialized_item) => handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ), + NetResponse::LegacyDeploy(ref serialized_item) => handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ), + NetResponse::Block(ref serialized_item) => { + handle_fetch_response::(reactor, effect_builder, rng, sender, serialized_item) + } + NetResponse::BlockHeader(ref serialized_item) => handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ), + NetResponse::FinalitySignature(ref serialized_item) => { + handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ) + } + NetResponse::SyncLeap(ref serialized_item) => handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ), + NetResponse::ApprovalsHashes(ref serialized_item) => { + handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ) + } + NetResponse::BlockExecutionResults(ref serialized_item) => { + handle_fetch_response::( + reactor, + effect_builder, + rng, + sender, + serialized_item, + ) + } + } +} diff --git a/node/src/reactor/event_queue_metrics.rs b/node/src/reactor/event_queue_metrics.rs index 4dd81c14c0..1b19d46839 100644 --- a/node/src/reactor/event_queue_metrics.rs +++ b/node/src/reactor/event_queue_metrics.rs @@ -29,7 +29,13 @@ impl EventQueueMetrics { let mut event_queue_gauges: HashMap = HashMap::new(); for queue_kind in event_queue_handle.event_queues_counts().keys() { let key = format!("scheduler_queue_{}_count", queue_kind.metrics_name()); - let queue_event_counter = IntGauge::new(key, "Event in the queue.".to_string())?; + let queue_event_counter = IntGauge::new( + key, + format!( + "current number of events in the reactor {} queue", + queue_kind.metrics_name() + ), + )?; registry.register(Box::new(queue_event_counter.clone()))?; let result = event_queue_gauges.insert(*queue_kind, queue_event_counter); assert!(result.is_none(), "Map keys should not be overwritten."); @@ -37,7 +43,7 @@ impl EventQueueMetrics { let event_total = IntGauge::new( "scheduler_queue_total_count", - "total count of events in queues.", + "current total number of events in all reactor queues", )?; registry.register(Box::new(event_total.clone()))?; @@ -64,8 +70,7 @@ impl EventQueueMetrics { .iter() .sorted_by_key(|k| k.0) .map(|(queue, event_count)| { - let _ = self - .event_queue_gauges + self.event_queue_gauges .get(queue) .map(|gauge| gauge.set(*event_count as i64)) .expect("queue exists."); @@ -85,7 +90,7 @@ impl Drop for EventQueueMetrics { .for_each(|(key, queue_gauge)| { self.registry .unregister(Box::new(queue_gauge.clone())) - .unwrap_or_else(|_| error!("unregistering {} failed: was not registered", key)) + .unwrap_or_else(|_| error!("unregistering {} failed: was not registered", key)); }); } } diff --git a/node/src/reactor/initializer.rs b/node/src/reactor/initializer.rs deleted file mode 100644 index 95c6a0f6dc..0000000000 --- a/node/src/reactor/initializer.rs +++ /dev/null @@ -1,341 +0,0 @@ -//! Reactor used to initialize a node. - -use std::fmt::{self, Display, Formatter}; - -use datasize::DataSize; -use derive_more::From; -use prometheus::Registry; -use reactor::ReactorEvent; -use serde::Serialize; -use thiserror::Error; -use tracing::info; - -use crate::{ - components::{ - chainspec_loader::{self, ChainspecLoader}, - contract_runtime::{self, ContractRuntime}, - gossiper, - network::NetworkIdentity, - small_network::{GossipedAddress, SmallNetworkIdentity, SmallNetworkIdentityError}, - storage::{self, Storage}, - Component, - }, - effect::{ - announcements::{ - ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement, - }, - requests::{ - ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest, - RestRequest, StateStoreRequest, StorageRequest, - }, - EffectBuilder, Effects, - }, - protocol::Message, - reactor::{self, validator, EventQueueHandle, ReactorExit}, - types::{chainspec, NodeId}, - utils::WithDir, - NodeRng, -}; - -/// Top-level event for the reactor. -#[derive(Debug, From, Serialize)] -#[must_use] -pub enum Event { - /// Chainspec handler event. - #[from] - Chainspec(chainspec_loader::Event), - - /// Storage event. - - #[from] - Storage(#[serde(skip_serializing)] storage::Event), - - /// Contract runtime event. - #[from] - ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event), - - /// Request for state storage. - #[from] - StateStoreRequest(StateStoreRequest), - - /// Control announcement - #[from] - ControlAnnouncement(ControlAnnouncement), -} - -impl ReactorEvent for Event { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } - } -} - -impl From for Event { - fn from(request: StorageRequest) -> Self { - Event::Storage(storage::Event::StorageRequest(request)) - } -} - -impl From for Event { - fn from(request: ContractRuntimeRequest) -> Self { - Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request))) - } -} - -impl From> for Event { - fn from(_request: NetworkRequest) -> Self { - unreachable!("no network traffic happens during initialization") - } -} - -impl From for Event { - fn from(_announcement: ChainspecLoaderAnnouncement) -> Self { - unreachable!("no chainspec announcements happen during initialization") - } -} - -impl From> for Event { - fn from(_req: LinearChainRequest) -> Self { - unreachable!("no linear chain events happen during initialization") - } -} - -impl From>> for Event { - fn from(_request: NetworkRequest>) -> Self { - unreachable!("no gossiper events happen during initialization") - } -} - -impl From for Event { - fn from(_request: ConsensusRequest) -> Self { - unreachable!("no chainspec announcements happen during initialization") - } -} - -impl From> for Event { - fn from(_request: RestRequest) -> Self { - unreachable!("no rest requests happen during initialization") - } -} - -impl From for Event { - fn from(_request: ContractRuntimeAnnouncement) -> Self { - unreachable!("no block executor requests happen during initialization") - } -} - -impl Display for Event { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Chainspec(event) => write!(formatter, "chainspec: {}", event), - Event::Storage(event) => write!(formatter, "storage: {}", event), - Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event), - Event::StateStoreRequest(request) => { - write!(formatter, "state store request: {}", request) - } - Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann), - } - } -} - -/// Error type returned by the initializer reactor. -#[derive(Debug, Error)] -pub enum Error { - /// `Config` error. - #[error("config error: {0}")] - ConfigError(String), - - /// Metrics-related error - #[error("prometheus (metrics) error: {0}")] - Metrics(#[from] prometheus::Error), - - /// `ChainspecHandler` component error. - #[error("chainspec error: {0}")] - Chainspec(#[from] chainspec::Error), - - /// `Storage` component error. - #[error("storage error: {0}")] - Storage(#[from] storage::Error), - - /// `ContractRuntime` component error. - #[error("contract runtime config error: {0}")] - ContractRuntime(#[from] contract_runtime::ConfigError), - - /// An error that occurred when creating a `SmallNetworkIdentity`. - #[error(transparent)] - SmallNetworkIdentityError(#[from] SmallNetworkIdentityError), -} - -/// Initializer node reactor. -#[derive(DataSize, Debug)] -pub struct Reactor { - pub(super) config: WithDir, - pub(super) chainspec_loader: ChainspecLoader, - pub(super) storage: Storage, - pub(super) contract_runtime: ContractRuntime, - pub(super) small_network_identity: SmallNetworkIdentity, - #[data_size(skip)] - pub(super) network_identity: NetworkIdentity, -} - -impl Reactor { - fn new_with_chainspec_loader( - (crashed, config): ::Config, - registry: &Registry, - chainspec_loader: ChainspecLoader, - chainspec_effects: Effects, - ) -> Result<(Self, Effects), Error> { - let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era(); - - let storage_config = config.map_ref(|cfg| cfg.storage.clone()); - let storage = Storage::new( - &storage_config, - hard_reset_to_start_of_era, - chainspec_loader.chainspec().protocol_config.version, - )?; - - let contract_runtime = ContractRuntime::new( - chainspec_loader.initial_state_root_hash(), - chainspec_loader.initial_block_header(), - chainspec_loader.chainspec().protocol_config.version, - storage_config, - &config.value().contract_runtime, - registry, - )?; - - // TODO: This integrity check is misplaced, it should be part of the components - // `handle_event` function. Ideally it would be in the constructor, but since a query to - // storage needs to be made, this is not possible. - // - // Refactoring this has been postponed for now, since it is unclear whether time-consuming - // integrity checks are even a good idea, as they can block the node for one or more hours - // on restarts (online checks are an alternative). - if crashed { - info!("running trie-store integrity check, this may take a while"); - if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() { - let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone()); - if !missing_trie_keys.is_empty() { - panic!( - "Fatal error! Trie-Key store is not empty.\n {:?}\n \ - Wipe the DB to ensure operations.\n Present state_roots: {:?}", - missing_trie_keys, state_roots - ) - } - } - } - - let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects); - - let small_network_identity = SmallNetworkIdentity::new()?; - - let network_identity = NetworkIdentity::new(); - - let reactor = Reactor { - config, - chainspec_loader, - storage, - contract_runtime, - small_network_identity, - network_identity, - }; - Ok((reactor, effects)) - } -} - -#[cfg(test)] -impl Reactor { - /// Inspect storage. - pub fn storage(&self) -> &Storage { - &self.storage - } -} - -impl reactor::Reactor for Reactor { - type Event = Event; - type Config = (bool, WithDir); - type Error = Error; - - fn new( - config: Self::Config, - registry: &Registry, - event_queue: EventQueueHandle, - _rng: &mut NodeRng, - ) -> Result<(Self, Effects), Error> { - let effect_builder = EffectBuilder::new(event_queue); - - // Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid. - let (chainspec_loader, chainspec_effects) = - ChainspecLoader::new(config.1.dir(), effect_builder)?; - Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects) - } - - fn dispatch_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Event, - ) -> Effects { - match event { - Event::Chainspec(event) => reactor::wrap_effects( - Event::Chainspec, - self.chainspec_loader - .handle_event(effect_builder, rng, event), - ), - Event::Storage(event) => reactor::wrap_effects( - Event::Storage, - self.storage.handle_event(effect_builder, rng, event), - ), - Event::ContractRuntime(event) => reactor::wrap_effects( - Event::ContractRuntime, - self.contract_runtime - .handle_event(effect_builder, rng, event), - ), - Event::StateStoreRequest(request) => { - self.dispatch_event(effect_builder, rng, Event::Storage(request.into())) - } - Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"), - } - } - - fn maybe_exit(&self) -> Option { - self.chainspec_loader.reactor_exit() - } -} - -#[cfg(test)] -pub mod test { - use super::*; - use crate::{ - components::network::ENABLE_LIBP2P_NET_ENV_VAR, testing::network::NetworkedReactor, - types::Chainspec, - }; - use std::{env, sync::Arc}; - - impl Reactor { - pub(crate) fn new_with_chainspec( - config: ::Config, - registry: &Registry, - event_queue: EventQueueHandle, - chainspec: Arc, - ) -> Result<(Self, Effects), Error> { - let effect_builder = EffectBuilder::new(event_queue); - let (chainspec_loader, chainspec_effects) = - ChainspecLoader::new_with_chainspec(chainspec, effect_builder); - Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects) - } - } - - impl NetworkedReactor for Reactor { - type NodeId = NodeId; - fn node_id(&self) -> Self::NodeId { - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - NodeId::from(&self.small_network_identity) - } else { - NodeId::from(&self.network_identity) - } - } - } -} diff --git a/node/src/reactor/joiner.rs b/node/src/reactor/joiner.rs deleted file mode 100644 index 16a7d91988..0000000000 --- a/node/src/reactor/joiner.rs +++ /dev/null @@ -1,953 +0,0 @@ -//! Reactor used to join the network. - -mod memory_metrics; - -use std::{ - collections::BTreeMap, - env, - fmt::{self, Display, Formatter}, - path::PathBuf, - sync::Arc, -}; - -use datasize::DataSize; -use derive_more::From; -use memory_metrics::MemoryMetrics; -use prometheus::Registry; -use reactor::ReactorEvent; -use serde::Serialize; -use tracing::{debug, error, info, warn}; - -#[cfg(not(feature = "fast-sync"))] -use crate::components::linear_chain_sync::{self, LinearChainSync}; -#[cfg(feature = "fast-sync")] -use crate::components::{ - linear_chain_fast_sync as linear_chain_sync, - linear_chain_fast_sync::LinearChainFastSync as LinearChainSync, -}; - -#[cfg(test)] -use crate::testing::network::NetworkedReactor; -use crate::{ - components::{ - block_validator::{self, BlockValidator}, - chainspec_loader::{self, ChainspecLoader}, - contract_runtime::{self, ContractRuntime}, - deploy_acceptor::{self, DeployAcceptor}, - event_stream_server, - event_stream_server::EventStreamServer, - fetcher::{self, Fetcher}, - gossiper::{self, Gossiper}, - linear_chain, - metrics::Metrics, - network::{self, Network, NetworkIdentity, ENABLE_LIBP2P_NET_ENV_VAR}, - rest_server::{self, RestServer}, - small_network::{self, GossipedAddress, SmallNetwork, SmallNetworkIdentity}, - storage::{self, Storage}, - Component, - }, - effect::{ - announcements::{ - ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement, - DeployAcceptorAnnouncement, GossiperAnnouncement, LinearChainAnnouncement, - LinearChainBlock, NetworkAnnouncement, - }, - requests::{ - BlockProposerRequest, BlockValidationRequest, ChainspecLoaderRequest, ConsensusRequest, - ContractRuntimeRequest, FetcherRequest, LinearChainRequest, MetricsRequest, - NetworkInfoRequest, NetworkRequest, RestRequest, StateStoreRequest, StorageRequest, - }, - EffectBuilder, EffectExt, Effects, - }, - protocol::Message, - reactor::{ - self, - event_queue_metrics::EventQueueMetrics, - initializer, - validator::{self, Error, ValidatorInitConfig}, - EventQueueHandle, Finalize, ReactorExit, - }, - types::{ - Block, BlockByHeight, BlockHeader, BlockHeaderWithMetadata, Deploy, ExitCode, NodeId, - ProtoBlock, Tag, Timestamp, - }, - utils::{Source, WithDir}, - NodeRng, -}; -use casper_types::{PublicKey, U512}; - -/// Top-level event for the reactor. -#[allow(clippy::large_enum_variant)] -#[derive(Debug, From, Serialize)] -#[must_use] -pub enum Event { - /// Network event. - #[from] - Network(network::Event), - - /// Small Network event. - #[from] - SmallNetwork(small_network::Event), - - /// Storage event. - #[from] - Storage(#[serde(skip_serializing)] storage::Event), - - #[from] - /// REST server event. - RestServer(#[serde(skip_serializing)] rest_server::Event), - - #[from] - /// Event stream server event. - EventStreamServer(#[serde(skip_serializing)] event_stream_server::Event), - - /// Metrics request. - #[from] - MetricsRequest(#[serde(skip_serializing)] MetricsRequest), - - #[from] - /// Chainspec Loader event. - ChainspecLoader(#[serde(skip_serializing)] chainspec_loader::Event), - - /// Chainspec info request - #[from] - ChainspecLoaderRequest(#[serde(skip_serializing)] ChainspecLoaderRequest), - - /// Network info request. - #[from] - NetworkInfoRequest(#[serde(skip_serializing)] NetworkInfoRequest), - - /// Linear chain fetcher event. - #[from] - BlockFetcher(#[serde(skip_serializing)] fetcher::Event), - - /// Linear chain (by height) fetcher event. - #[from] - BlockByHeightFetcher(#[serde(skip_serializing)] fetcher::Event), - - /// Deploy fetcher event. - #[from] - DeployFetcher(#[serde(skip_serializing)] fetcher::Event), - - /// Deploy acceptor event. - #[from] - DeployAcceptor(#[serde(skip_serializing)] deploy_acceptor::Event), - - /// Block validator event. - #[from] - BlockValidator(#[serde(skip_serializing)] block_validator::Event), - - /// Linear chain event. - #[from] - LinearChainSync(#[serde(skip_serializing)] linear_chain_sync::Event), - - /// Contract Runtime event. - #[from] - ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event), - - /// Linear chain event. - #[from] - LinearChain(#[serde(skip_serializing)] linear_chain::Event), - - /// Address gossiper event. - #[from] - AddressGossiper(gossiper::Event), - - /// Requests. - /// Linear chain block by hash fetcher request. - #[from] - BlockFetcherRequest(#[serde(skip_serializing)] FetcherRequest), - - /// Linear chain block by height fetcher request. - #[from] - BlockByHeightFetcherRequest(#[serde(skip_serializing)] FetcherRequest), - - /// Deploy fetcher request. - #[from] - DeployFetcherRequest(#[serde(skip_serializing)] FetcherRequest), - - /// Block validation request. - #[from] - BlockValidatorRequest(#[serde(skip_serializing)] BlockValidationRequest), - - /// Block proposer request. - #[from] - BlockProposerRequest(#[serde(skip_serializing)] BlockProposerRequest), - - /// Proto block validator request. - #[from] - ProtoBlockValidatorRequest( - #[serde(skip_serializing)] BlockValidationRequest, - ), - - /// Request for state storage. - #[from] - StateStoreRequest(#[serde(skip_serializing)] StateStoreRequest), - - // Announcements - /// A control announcement. - #[from] - ControlAnnouncement(ControlAnnouncement), - - /// Network announcement. - #[from] - NetworkAnnouncement(#[serde(skip_serializing)] NetworkAnnouncement), - - /// Block executor announcement. - #[from] - ContractRuntimeAnnouncement(#[serde(skip_serializing)] ContractRuntimeAnnouncement), - - /// Address Gossiper announcement. - #[from] - AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), - - /// DeployAcceptor announcement. - #[from] - DeployAcceptorAnnouncement(#[serde(skip_serializing)] DeployAcceptorAnnouncement), - - /// Linear chain announcement. - #[from] - LinearChainAnnouncement(#[serde(skip_serializing)] LinearChainAnnouncement), - - /// Chainspec loader announcement. - #[from] - ChainspecLoaderAnnouncement(#[serde(skip_serializing)] ChainspecLoaderAnnouncement), - - /// Consensus request. - #[from] - ConsensusRequest(#[serde(skip_serializing)] ConsensusRequest), -} - -impl ReactorEvent for Event { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } - } -} - -impl From> for Event { - fn from(req: LinearChainRequest) -> Self { - Event::LinearChain(linear_chain::Event::Request(req)) - } -} - -impl From for Event { - fn from(request: StorageRequest) -> Self { - Event::Storage(request.into()) - } -} - -impl From> for Event { - fn from(request: NetworkRequest) -> Self { - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - Event::Network(network::Event::from(request)) - } else { - Event::SmallNetwork(small_network::Event::from(request)) - } - } -} - -impl From>> for Event { - fn from(request: NetworkRequest>) -> Self { - Event::SmallNetwork(small_network::Event::from( - request.map_payload(Message::from), - )) - } -} - -impl From for Event { - fn from(request: ContractRuntimeRequest) -> Event { - Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request))) - } -} - -impl From> for Event { - fn from(request: RestRequest) -> Self { - Event::RestServer(rest_server::Event::RestRequest(request)) - } -} - -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Network(event) => write!(f, "network: {}", event), - Event::SmallNetwork(event) => write!(f, "small network: {}", event), - Event::NetworkAnnouncement(event) => write!(f, "network announcement: {}", event), - Event::Storage(request) => write!(f, "storage: {}", request), - Event::RestServer(event) => write!(f, "rest server: {}", event), - Event::EventStreamServer(event) => write!(f, "event stream server: {}", event), - Event::MetricsRequest(req) => write!(f, "metrics request: {}", req), - Event::ChainspecLoader(event) => write!(f, "chainspec loader: {}", event), - Event::ChainspecLoaderRequest(req) => write!(f, "chainspec loader request: {}", req), - Event::NetworkInfoRequest(req) => write!(f, "network info request: {}", req), - Event::BlockFetcherRequest(request) => write!(f, "block fetcher request: {}", request), - Event::BlockValidatorRequest(request) => { - write!(f, "block validator request: {}", request) - } - Event::DeployFetcherRequest(request) => { - write!(f, "deploy fetcher request: {}", request) - } - Event::LinearChainSync(event) => write!(f, "linear chain: {}", event), - Event::BlockFetcher(event) => write!(f, "block fetcher: {}", event), - Event::BlockByHeightFetcherRequest(request) => { - write!(f, "block by height fetcher request: {}", request) - } - Event::BlockValidator(event) => write!(f, "block validator event: {}", event), - Event::DeployFetcher(event) => write!(f, "deploy fetcher event: {}", event), - Event::BlockProposerRequest(req) => write!(f, "block proposer request: {}", req), - Event::ContractRuntime(event) => write!(f, "contract runtime event: {:?}", event), - Event::LinearChain(event) => write!(f, "linear chain event: {}", event), - Event::ContractRuntimeAnnouncement(announcement) => { - write!(f, "block executor announcement: {}", announcement) - } - Event::ProtoBlockValidatorRequest(req) => write!(f, "block validator request: {}", req), - Event::AddressGossiper(event) => write!(f, "address gossiper: {}", event), - Event::AddressGossiperAnnouncement(ann) => { - write!(f, "address gossiper announcement: {}", ann) - } - Event::BlockByHeightFetcher(event) => { - write!(f, "block by height fetcher event: {}", event) - } - Event::DeployAcceptorAnnouncement(ann) => { - write!(f, "deploy acceptor announcement: {}", ann) - } - Event::DeployAcceptor(event) => write!(f, "deploy acceptor: {}", event), - Event::ControlAnnouncement(ctrl_ann) => write!(f, "control: {}", ctrl_ann), - Event::LinearChainAnnouncement(ann) => write!(f, "linear chain announcement: {}", ann), - Event::ChainspecLoaderAnnouncement(ann) => { - write!(f, "chainspec loader announcement: {}", ann) - } - Event::StateStoreRequest(req) => write!(f, "state store request: {}", req), - Event::ConsensusRequest(req) => write!(f, "consensus request: {:?}", req), - } - } -} - -/// Joining node reactor. -#[derive(DataSize)] -pub struct Reactor { - root: PathBuf, - metrics: Metrics, - network: Network, - small_network: SmallNetwork, - address_gossiper: Gossiper, - config: validator::Config, - chainspec_loader: ChainspecLoader, - storage: Storage, - contract_runtime: ContractRuntime, - linear_chain_fetcher: Fetcher, - linear_chain_sync: LinearChainSync, - block_validator: BlockValidator, - deploy_fetcher: Fetcher, - linear_chain: linear_chain::LinearChainComponent, - // Handles request for linear chain block by height. - block_by_height_fetcher: Fetcher, - pub(super) block_header_by_hash_fetcher: Fetcher, - pub(super) block_header_with_metadata_fetcher: Fetcher, - #[data_size(skip)] - deploy_acceptor: DeployAcceptor, - #[data_size(skip)] - event_queue_metrics: EventQueueMetrics, - #[data_size(skip)] - rest_server: RestServer, - #[data_size(skip)] - event_stream_server: EventStreamServer, - // Attach memory metrics for the joiner. - #[data_size(skip)] // Never allocates data on the heap. - memory_metrics: MemoryMetrics, -} - -impl reactor::Reactor for Reactor { - type Event = Event; - - // The "configuration" is in fact the whole state of the initializer reactor, which we - // deconstruct and reuse. - type Config = WithDir; - type Error = Error; - - fn new( - initializer: Self::Config, - registry: &Registry, - event_queue: EventQueueHandle, - _rng: &mut NodeRng, - ) -> Result<(Self, Effects), Self::Error> { - let (root, initializer) = initializer.into_parts(); - - let initializer::Reactor { - config, - chainspec_loader, - storage, - mut contract_runtime, - small_network_identity, - network_identity, - } = initializer; - - // TODO: Remove wrapper around Reactor::Config instead. - let (_, config) = config.into_parts(); - - let memory_metrics = MemoryMetrics::new(registry.clone())?; - - let event_queue_metrics = EventQueueMetrics::new(registry.clone(), event_queue)?; - - let metrics = Metrics::new(registry.clone()); - - let network_config = network::Config::from(&config.network); - let (network, network_effects) = Network::new( - event_queue, - network_config, - registry, - network_identity, - chainspec_loader.chainspec(), - false, - )?; - let (small_network, small_network_effects) = SmallNetwork::new( - event_queue, - config.network.clone(), - registry, - small_network_identity, - chainspec_loader.chainspec().as_ref(), - false, - )?; - - let linear_chain_fetcher = Fetcher::new("linear_chain", config.fetcher, ®istry)?; - - let mut effects = reactor::wrap_effects(Event::Network, network_effects); - effects.extend(reactor::wrap_effects( - Event::SmallNetwork, - small_network_effects, - )); - - let address_gossiper = - Gossiper::new_for_complete_items("address_gossiper", config.gossip, registry)?; - - let effect_builder = EffectBuilder::new(event_queue); - - let init_hash = config - .node - .trusted_hash - .or_else(|| chainspec_loader.initial_block_hash()); - - match init_hash { - None => { - let chainspec = chainspec_loader.chainspec(); - let era_duration = chainspec.core_config.era_duration; - if let Some(start_time) = chainspec - .protocol_config - .activation_point - .genesis_timestamp() - { - if Timestamp::now() > start_time + era_duration { - error!( - "Node started with no trusted hash after the expected end of \ - the genesis era! Please specify a trusted hash and restart. \ - Time: {}, End of genesis era: {}", - Timestamp::now(), - start_time + era_duration - ); - panic!("should have trusted hash after genesis era") - } - } - info!("No synchronization of the linear chain will be done.") - } - Some(hash) => info!("Synchronizing linear chain from: {:?}", hash), - } - - let protocol_version = &chainspec_loader.chainspec().protocol_config.version; - let rest_server = RestServer::new( - config.rest_server.clone(), - effect_builder, - *protocol_version, - )?; - - let event_stream_server = - EventStreamServer::new(config.event_stream_server.clone(), *protocol_version)?; - - let block_validator = BlockValidator::new(Arc::clone(&chainspec_loader.chainspec())); - - let deploy_fetcher = Fetcher::new("deploy", config.fetcher, ®istry)?; - - let block_by_height_fetcher = Fetcher::new("block_by_height", config.fetcher, ®istry)?; - - let block_header_and_finality_signatures_by_height_fetcher: Fetcher< - BlockHeaderWithMetadata, - > = Fetcher::new( - "block_header_and_finality_signatures_by_height", - config.fetcher, - ®istry, - )?; - - let block_header_by_hash_fetcher: Fetcher = - Fetcher::new("block_header_by_hash", config.fetcher, ®istry)?; - - let deploy_acceptor = - DeployAcceptor::new(config.deploy_acceptor, &*chainspec_loader.chainspec()); - - contract_runtime.set_initial_state( - chainspec_loader.initial_state_root_hash(), - chainspec_loader.initial_block_header(), - ); - - let linear_chain = linear_chain::LinearChainComponent::new( - ®istry, - *protocol_version, - chainspec_loader.chainspec().core_config.auction_delay, - chainspec_loader.chainspec().core_config.unbonding_delay, - )?; - - let validator_weights: BTreeMap = chainspec_loader - .chainspec() - .network_config - .chainspec_validator_stakes() - .into_iter() - .map(|(pk, motes)| (pk, motes.value())) - .collect(); - let maybe_next_activation_point = chainspec_loader - .next_upgrade() - .map(|next_upgrade| next_upgrade.activation_point()); - let (linear_chain_sync, init_sync_effects) = LinearChainSync::new::( - registry, - effect_builder, - chainspec_loader.chainspec(), - &storage, - init_hash, - chainspec_loader.initial_block().cloned(), - validator_weights, - maybe_next_activation_point, - )?; - - effects.extend(reactor::wrap_effects( - Event::LinearChainSync, - init_sync_effects, - )); - effects.extend(reactor::wrap_effects( - Event::ChainspecLoader, - chainspec_loader.start_checking_for_upgrades(effect_builder), - )); - - Ok(( - Self { - root, - metrics, - network, - small_network, - address_gossiper, - config, - chainspec_loader, - storage, - contract_runtime, - linear_chain_sync, - linear_chain_fetcher, - block_validator, - deploy_fetcher, - linear_chain, - block_by_height_fetcher, - block_header_by_hash_fetcher, - block_header_with_metadata_fetcher: - block_header_and_finality_signatures_by_height_fetcher, - deploy_acceptor, - event_queue_metrics, - rest_server, - event_stream_server, - memory_metrics, - }, - effects, - )) - } - - fn dispatch_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Self::Event, - ) -> Effects { - match event { - Event::Network(event) => reactor::wrap_effects( - Event::Network, - self.network.handle_event(effect_builder, rng, event), - ), - Event::SmallNetwork(event) => reactor::wrap_effects( - Event::SmallNetwork, - self.small_network.handle_event(effect_builder, rng, event), - ), - Event::ControlAnnouncement(ctrl_ann) => { - unreachable!("unhandled control announcement: {}", ctrl_ann) - } - Event::NetworkAnnouncement(NetworkAnnouncement::NewPeer(id)) => reactor::wrap_effects( - Event::LinearChainSync, - self.linear_chain_sync.handle_event( - effect_builder, - rng, - linear_chain_sync::Event::NewPeerConnected(id), - ), - ), - Event::NetworkAnnouncement(NetworkAnnouncement::GossipOurAddress(gossiped_address)) => { - let event = gossiper::Event::ItemReceived { - item_id: gossiped_address, - source: Source::::Ourself, - }; - self.dispatch_event(effect_builder, rng, Event::AddressGossiper(event)) - } - Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { - sender, - payload, - }) => match payload { - Message::GetResponse { - tag: Tag::Block, - serialized_item, - } => { - let block = match bincode::deserialize(&serialized_item) { - Ok(block) => Box::new(block), - Err(err) => { - error!("failed to decode block from {}: {}", sender, err); - return Effects::new(); - } - }; - let event = fetcher::Event::GotRemotely { - item: block, - source: Source::Peer(sender), - }; - self.dispatch_event(effect_builder, rng, Event::BlockFetcher(event)) - } - Message::GetResponse { - tag: Tag::BlockByHeight, - serialized_item, - } => { - let block_at_height: BlockByHeight = - match bincode::deserialize(&serialized_item) { - Ok(maybe_block) => maybe_block, - Err(err) => { - error!("failed to decode block from {}: {}", sender, err); - return Effects::new(); - } - }; - - let event = match block_at_height { - BlockByHeight::Absent(block_height) => fetcher::Event::AbsentRemotely { - id: block_height, - peer: sender, - }, - BlockByHeight::Block(block) => fetcher::Event::GotRemotely { - item: Box::new(BlockByHeight::Block(block)), - source: Source::Peer(sender), - }, - }; - self.dispatch_event(effect_builder, rng, Event::BlockByHeightFetcher(event)) - } - Message::GetResponse { - tag: Tag::Deploy, - serialized_item, - } => { - let deploy = match bincode::deserialize(&serialized_item) { - Ok(deploy) => Box::new(deploy), - Err(err) => { - error!("failed to decode deploy from {}: {}", sender, err); - return Effects::new(); - } - }; - let event = Event::DeployAcceptor(deploy_acceptor::Event::Accept { - deploy, - source: Source::Peer(sender), - responder: None, - }); - self.dispatch_event(effect_builder, rng, event) - } - Message::AddressGossiper(message) => { - let event = Event::AddressGossiper(gossiper::Event::MessageReceived { - sender, - message, - }); - self.dispatch_event(effect_builder, rng, event) - } - Message::FinalitySignature(_) => { - debug!("finality signatures not handled in joiner reactor"); - Effects::new() - } - other => { - debug!(?other, "network announcement ignored."); - Effects::new() - } - }, - Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::AcceptedNewDeploy { - deploy, - source, - }) => { - let event = fetcher::Event::GotRemotely { - item: deploy, - source, - }; - self.dispatch_event(effect_builder, rng, Event::DeployFetcher(event)) - } - Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::InvalidDeploy { - deploy, - source, - }) => { - let deploy_hash = *deploy.id(); - let peer = source; - warn!(?deploy_hash, ?peer, "Invalid deploy received from a peer."); - Effects::new() - } - Event::Storage(event) => reactor::wrap_effects( - Event::Storage, - self.storage.handle_event(effect_builder, rng, event), - ), - Event::BlockFetcherRequest(request) => { - self.dispatch_event(effect_builder, rng, Event::BlockFetcher(request.into())) - } - Event::BlockValidatorRequest(request) => { - self.dispatch_event(effect_builder, rng, Event::BlockValidator(request.into())) - } - Event::DeployAcceptor(event) => reactor::wrap_effects( - Event::DeployAcceptor, - self.deploy_acceptor - .handle_event(effect_builder, rng, event), - ), - Event::LinearChainSync(event) => reactor::wrap_effects( - Event::LinearChainSync, - self.linear_chain_sync - .handle_event(effect_builder, rng, event), - ), - Event::BlockFetcher(event) => reactor::wrap_effects( - Event::BlockFetcher, - self.linear_chain_fetcher - .handle_event(effect_builder, rng, event), - ), - Event::BlockValidator(event) => reactor::wrap_effects( - Event::BlockValidator, - self.block_validator - .handle_event(effect_builder, rng, event), - ), - Event::DeployFetcher(event) => reactor::wrap_effects( - Event::DeployFetcher, - self.deploy_fetcher.handle_event(effect_builder, rng, event), - ), - Event::BlockByHeightFetcher(event) => reactor::wrap_effects( - Event::BlockByHeightFetcher, - self.block_by_height_fetcher - .handle_event(effect_builder, rng, event), - ), - Event::DeployFetcherRequest(request) => { - self.dispatch_event(effect_builder, rng, Event::DeployFetcher(request.into())) - } - Event::BlockByHeightFetcherRequest(request) => self.dispatch_event( - effect_builder, - rng, - Event::BlockByHeightFetcher(request.into()), - ), - Event::ContractRuntime(event) => reactor::wrap_effects( - Event::ContractRuntime, - self.contract_runtime - .handle_event(effect_builder, rng, event), - ), - Event::ContractRuntimeAnnouncement(ContractRuntimeAnnouncement::LinearChainBlock( - linear_chain_block, - )) => { - let LinearChainBlock { - block, - execution_results, - } = *linear_chain_block; - let mut effects = Effects::new(); - let block_hash = *block.hash(); - - // send to linear chain - let reactor_event = Event::LinearChain(linear_chain::Event::NewLinearChainBlock { - block: Box::new(block), - execution_results: execution_results - .iter() - .map(|(hash, (_header, results))| (*hash, results.clone())) - .collect(), - }); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - - // send to event stream - for (deploy_hash, (deploy_header, execution_result)) in execution_results { - let reactor_event = - Event::EventStreamServer(event_stream_server::Event::DeployProcessed { - deploy_hash, - deploy_header: Box::new(deploy_header), - block_hash, - execution_result: Box::new(execution_result), - }); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - } - - effects - } - Event::ContractRuntimeAnnouncement( - ContractRuntimeAnnouncement::BlockAlreadyExecuted(block), - ) => self.dispatch_event( - effect_builder, - rng, - Event::LinearChainSync(linear_chain_sync::Event::BlockHandled(Box::new(*block))), - ), - Event::LinearChain(event) => reactor::wrap_effects( - Event::LinearChain, - self.linear_chain.handle_event(effect_builder, rng, event), - ), - Event::BlockProposerRequest(request) => { - // Consensus component should not be trying to create new blocks during joining - // phase. - error!("ignoring block proposer request {}", request); - Effects::new() - } - Event::ProtoBlockValidatorRequest(request) => { - // During joining phase, consensus component should not be requesting - // validation of the proto block. - error!("ignoring proto block validation request {}", request); - Effects::new() - } - Event::AddressGossiper(event) => reactor::wrap_effects( - Event::AddressGossiper, - self.address_gossiper - .handle_event(effect_builder, rng, event), - ), - Event::AddressGossiperAnnouncement(ann) => { - let GossiperAnnouncement::NewCompleteItem(gossiped_address) = ann; - let reactor_event = Event::SmallNetwork(small_network::Event::PeerAddressReceived( - gossiped_address, - )); - self.dispatch_event(effect_builder, rng, reactor_event) - } - - Event::LinearChainAnnouncement(LinearChainAnnouncement::BlockAdded(block)) => { - let mut effects = reactor::wrap_effects( - Event::EventStreamServer, - self.event_stream_server.handle_event( - effect_builder, - rng, - event_stream_server::Event::BlockAdded(block.clone()), - ), - ); - let reactor_event = - Event::LinearChainSync(linear_chain_sync::Event::BlockHandled(block)); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - effects - } - Event::LinearChainAnnouncement(LinearChainAnnouncement::NewFinalitySignature(fs)) => { - let reactor_event = - Event::EventStreamServer(event_stream_server::Event::FinalitySignature(fs)); - self.dispatch_event(effect_builder, rng, reactor_event) - } - Event::RestServer(event) => reactor::wrap_effects( - Event::RestServer, - self.rest_server.handle_event(effect_builder, rng, event), - ), - Event::EventStreamServer(event) => reactor::wrap_effects( - Event::EventStreamServer, - self.event_stream_server - .handle_event(effect_builder, rng, event), - ), - Event::MetricsRequest(req) => reactor::wrap_effects( - Event::MetricsRequest, - self.metrics.handle_event(effect_builder, rng, req), - ), - Event::ChainspecLoader(event) => reactor::wrap_effects( - Event::ChainspecLoader, - self.chainspec_loader - .handle_event(effect_builder, rng, event), - ), - Event::ChainspecLoaderRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::ChainspecLoader(req.into())) - } - Event::StateStoreRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::Storage(req.into())) - } - Event::NetworkInfoRequest(req) => { - let event = if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - Event::Network(network::Event::from(req)) - } else { - Event::SmallNetwork(small_network::Event::from(req)) - }; - self.dispatch_event(effect_builder, rng, event) - } - Event::ChainspecLoaderAnnouncement( - ChainspecLoaderAnnouncement::UpgradeActivationPointRead(next_upgrade), - ) => { - let reactor_event = Event::ChainspecLoader( - chainspec_loader::Event::GotNextUpgrade(next_upgrade.clone()), - ); - let mut effects = self.dispatch_event(effect_builder, rng, reactor_event); - - let reactor_event = - Event::LinearChainSync(linear_chain_sync::Event::GotUpgradeActivationPoint( - next_upgrade.activation_point(), - )); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - effects - } - // This is done to handle status requests from the RestServer - Event::ConsensusRequest(ConsensusRequest::Status(responder)) => { - // no consensus, respond with None - responder.respond(None).ignore() - } - } - } - - fn maybe_exit(&self) -> Option { - if self.linear_chain_sync.stopped_for_upgrade() { - Some(ReactorExit::ProcessShouldExit(ExitCode::Success)) - } else if self.linear_chain_sync.is_synced() { - Some(ReactorExit::ProcessShouldContinue) - } else { - None - } - } - - fn update_metrics(&mut self, event_queue_handle: EventQueueHandle) { - self.memory_metrics.estimate(&self); - self.event_queue_metrics - .record_event_queue_counts(&event_queue_handle); - } -} - -impl Reactor { - /// Deconstructs the reactor into config useful for creating a Validator reactor. Shuts down - /// the network, closing all incoming and outgoing connections, and frees up the listening - /// socket. - pub async fn into_validator_config(self) -> Result { - let latest_block = self.linear_chain_sync.latest_block().cloned(); - // Clean the state of the linear_chain_sync before shutting it down. - #[cfg(not(feature = "fast-sync"))] - linear_chain_sync::clean_linear_chain_state( - &self.storage, - self.chainspec_loader.chainspec(), - )?; - let config = ValidatorInitConfig { - root: self.root, - chainspec_loader: self.chainspec_loader, - config: self.config, - contract_runtime: self.contract_runtime, - storage: self.storage, - latest_block, - event_stream_server: self.event_stream_server, - small_network_identity: SmallNetworkIdentity::from(&self.small_network), - network_identity: NetworkIdentity::from(&self.network), - }; - self.network.finalize().await; - self.small_network.finalize().await; - self.rest_server.finalize().await; - Ok(config) - } -} - -#[cfg(test)] -impl NetworkedReactor for Reactor { - type NodeId = NodeId; - fn node_id(&self) -> Self::NodeId { - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() { - self.small_network.node_id() - } else { - self.network.node_id() - } - } -} - -#[cfg(test)] -impl Reactor { - /// Inspect storage. - pub(crate) fn storage(&self) -> &Storage { - &self.storage - } -} diff --git a/node/src/reactor/joiner/memory_metrics.rs b/node/src/reactor/joiner/memory_metrics.rs deleted file mode 100644 index 948d9b5c4e..0000000000 --- a/node/src/reactor/joiner/memory_metrics.rs +++ /dev/null @@ -1,225 +0,0 @@ -use datasize::DataSize; -use prometheus::{self, Histogram, HistogramOpts, IntGauge, Registry}; -use tracing::debug; - -use super::Reactor; -use crate::unregister_metric; - -///Metrics for memory usage for the joiner -#[derive(Debug)] -pub(super) struct MemoryMetrics { - /// Total estimated heap memory usage. - mem_total: IntGauge, - - /// Estimated heap memory usage of metrics component. - mem_metrics: IntGauge, - /// Estimated heap memory usage of network component. - mem_network: IntGauge, - /// Estimated heap memory usage of small_network component. - mem_small_network: IntGauge, - /// Estimated heap memory usage of address_gossiper component. - mem_address_gossiper: IntGauge, - /// Estimated heap memory usage of the configuration for the validator node. - mem_config: IntGauge, - /// Estimated heap memory usage for the chainspec loader component. - mem_chainspec_loader: IntGauge, - /// Estimated heap memory usage of storage component. - mem_storage: IntGauge, - /// Estimated heap memory usage of the contract runtime component. - mem_contract_runtime: IntGauge, - /// Estimated heap memory usage of the linear chain fetcher component. - mem_linear_chain_fetcher: IntGauge, - /// Estimated heap memory usage of linear chain sync. - mem_linear_chain_sync: IntGauge, - /// Estimated heap memory usage of block validator component. - mem_block_validator: IntGauge, - /// Estimated heap memory usage of deploy fetcher component. - mem_deploy_fetcher: IntGauge, - /// Estimated heap memory usage of linear chain component. - mem_linear_chain: IntGauge, - - /// Histogram detailing how long it took to estimate memory usage. - mem_estimator_runtime_s: Histogram, - - /// Instance of registry component to unregister from when being dropped. - registry: Registry, -} - -impl MemoryMetrics { - /// Initialize a new set of memory metrics for the joiner. - pub(super) fn new(registry: Registry) -> Result { - let mem_total = IntGauge::new("joiner_mem_total", "total memory usage in bytes")?; - let mem_metrics = IntGauge::new("joiner_mem_metrics", "metrics memory usage in bytes")?; - let mem_network = IntGauge::new("joiner_mem_network", "network memory usage in bytes")?; - let mem_small_network = IntGauge::new( - "joiner_mem_small_network", - "small network memory usage in bytes", - )?; - let mem_address_gossiper = IntGauge::new( - "joiner_mem_address_gossiper", - "address_gossiper memory usage in bytes", - )?; - let mem_config = IntGauge::new("joiner_mem_config", "config memory usage in bytes")?; - let mem_chainspec_loader = IntGauge::new( - "joiner_mem_chainspec_loader", - "chainspec_loader memory usage in bytes", - )?; - let mem_storage = IntGauge::new("joiner_mem_storage", "storage memory usage in bytes")?; - let mem_contract_runtime = IntGauge::new( - "joiner_mem_contract_runtime", - "contract_runtime memory usage in bytes", - )?; - let mem_linear_chain_fetcher = IntGauge::new( - "joiner_mem_linear_chain_fetcher", - "linear_chain_fetcher memory usage in bytes", - )?; - let mem_linear_chain_sync = IntGauge::new( - "joiner_mem_linear_chain_sync", - "linear_chain_sync memory usage in bytes", - )?; - let mem_block_validator = IntGauge::new( - "joiner_mem_block_validator", - "block_validator memory usage in bytes", - )?; - let mem_deploy_fetcher = IntGauge::new( - "joiner_mem_deploy_fetcher", - "deploy_fetcher memory usage in bytes", - )?; - let mem_linear_chain = IntGauge::new( - "joiner_mem_linear_chain", - "linear_chain memory usage in bytes", - )?; - let mem_estimator_runtime_s = Histogram::with_opts( - HistogramOpts::new( - "joiner_mem_estimator_runtime_s", - "time taken to estimate memory usage, in seconds", - ) - // Create buckets from four nano second to eight seconds - .buckets(prometheus::exponential_buckets(0.000_000_004, 2.0, 32)?), - )?; - - registry.register(Box::new(mem_total.clone()))?; - registry.register(Box::new(mem_metrics.clone()))?; - registry.register(Box::new(mem_network.clone()))?; - registry.register(Box::new(mem_small_network.clone()))?; - registry.register(Box::new(mem_address_gossiper.clone()))?; - registry.register(Box::new(mem_config.clone()))?; - registry.register(Box::new(mem_chainspec_loader.clone()))?; - registry.register(Box::new(mem_storage.clone()))?; - registry.register(Box::new(mem_contract_runtime.clone()))?; - registry.register(Box::new(mem_linear_chain_fetcher.clone()))?; - registry.register(Box::new(mem_linear_chain_sync.clone()))?; - registry.register(Box::new(mem_block_validator.clone()))?; - registry.register(Box::new(mem_deploy_fetcher.clone()))?; - registry.register(Box::new(mem_linear_chain.clone()))?; - registry.register(Box::new(mem_estimator_runtime_s.clone()))?; - - Ok(MemoryMetrics { - mem_total, - mem_metrics, - mem_network, - mem_small_network, - mem_address_gossiper, - mem_config, - mem_chainspec_loader, - mem_storage, - mem_contract_runtime, - mem_linear_chain_fetcher, - mem_linear_chain_sync, - mem_block_validator, - mem_deploy_fetcher, - mem_linear_chain, - mem_estimator_runtime_s, - registry, - }) - } - - /// Estimates the memory usage and updates metrics. - pub(super) fn estimate(&self, reactor: &Reactor) { - let timer = self.mem_estimator_runtime_s.start_timer(); - - let metrics = reactor.metrics.estimate_heap_size() as i64; - let network = reactor.network.estimate_heap_size() as i64; - let small_network = reactor.small_network.estimate_heap_size() as i64; - let address_gossiper = reactor.address_gossiper.estimate_heap_size() as i64; - let config = reactor.config.estimate_heap_size() as i64; - let chainspec_loader = reactor.chainspec_loader.estimate_heap_size() as i64; - let storage = reactor.storage.estimate_heap_size() as i64; - let contract_runtime = reactor.contract_runtime.estimate_heap_size() as i64; - let linear_chain_fetcher = reactor.linear_chain_fetcher.estimate_heap_size() as i64; - let linear_chain_sync = reactor.linear_chain_sync.estimate_heap_size() as i64; - let block_validator = reactor.block_validator.estimate_heap_size() as i64; - let deploy_fetcher = reactor.deploy_fetcher.estimate_heap_size() as i64; - let linear_chain = reactor.linear_chain.estimate_heap_size() as i64; - - let total = metrics - + network - + small_network - + address_gossiper - + config - + chainspec_loader - + storage - + contract_runtime - + linear_chain_fetcher - + linear_chain_sync - + block_validator - + deploy_fetcher - + linear_chain; - - self.mem_total.set(total); - self.mem_metrics.set(metrics); - self.mem_network.set(network); - self.mem_small_network.set(small_network); - self.mem_address_gossiper.set(address_gossiper); - self.mem_config.set(config); - self.mem_chainspec_loader.set(chainspec_loader); - self.mem_storage.set(storage); - self.mem_contract_runtime.set(contract_runtime); - self.mem_linear_chain_fetcher.set(linear_chain_fetcher); - self.mem_linear_chain_sync.set(linear_chain_sync); - self.mem_block_validator.set(block_validator); - self.mem_deploy_fetcher.set(deploy_fetcher); - self.mem_linear_chain.set(linear_chain); - - // Stop the timer explicitly, don't count logging. - let duration_s = timer.stop_and_record(); - - debug!( - %total, - %duration_s, - %metrics, - %network, - %small_network, - %address_gossiper, - %config , - %chainspec_loader, - %storage , - %contract_runtime, - %linear_chain_fetcher, - %linear_chain_sync, - %block_validator, - %deploy_fetcher, - %linear_chain, - "Collected new set of memory metrics for the joiner"); - } -} - -impl Drop for MemoryMetrics { - fn drop(&mut self) { - unregister_metric!(self.registry, self.mem_total); - unregister_metric!(self.registry, self.mem_metrics); - unregister_metric!(self.registry, self.mem_network); - unregister_metric!(self.registry, self.mem_small_network); - unregister_metric!(self.registry, self.mem_address_gossiper); - unregister_metric!(self.registry, self.mem_config); - unregister_metric!(self.registry, self.mem_chainspec_loader); - unregister_metric!(self.registry, self.mem_storage); - unregister_metric!(self.registry, self.mem_contract_runtime); - unregister_metric!(self.registry, self.mem_linear_chain_fetcher); - unregister_metric!(self.registry, self.mem_linear_chain_sync); - unregister_metric!(self.registry, self.mem_block_validator); - unregister_metric!(self.registry, self.mem_deploy_fetcher); - unregister_metric!(self.registry, self.mem_linear_chain); - unregister_metric!(self.registry, self.mem_estimator_runtime_s); - } -} diff --git a/node/src/reactor/main_reactor.rs b/node/src/reactor/main_reactor.rs new file mode 100644 index 0000000000..60b3c27e4d --- /dev/null +++ b/node/src/reactor/main_reactor.rs @@ -0,0 +1,1807 @@ +//! Main reactor for nodes. + +mod config; +mod control; +mod error; +mod event; +mod fetchers; +mod memory_metrics; +mod utils; + +mod catch_up; +mod genesis_instruction; +mod keep_up; +mod reactor_state; +#[cfg(test)] +mod tests; +mod upgrade_shutdown; +mod upgrading_instruction; +mod validate; + +use std::{collections::BTreeMap, convert::TryInto, sync::Arc, time::Instant}; + +use datasize::DataSize; +use memory_metrics::MemoryMetrics; +use prometheus::Registry; +use tracing::{debug, error, info, warn}; + +use casper_binary_port::{LastProgress, NetworkName, Uptime}; +use casper_types::{ + Block, BlockHash, BlockV2, Chainspec, ChainspecRawBytes, EraId, FinalitySignature, + FinalitySignatureV2, PublicKey, TimeDiff, Timestamp, Transaction, U512, +}; + +#[cfg(test)] +use crate::testing::network::NetworkedReactor; +use crate::{ + components::{ + binary_port::{BinaryPort, BinaryPortInitializationError, Metrics as BinaryPortMetrics}, + block_accumulator::{self, BlockAccumulator}, + block_synchronizer::{self, BlockSynchronizer}, + block_validator::{self, BlockValidator}, + consensus::{self, EraSupervisor}, + contract_runtime::ContractRuntime, + diagnostics_port::DiagnosticsPort, + event_stream_server::{self, EventStreamServer}, + gossiper::{self, GossipItem, Gossiper}, + metrics::Metrics, + network::{self, GossipedAddress, Identity as NetworkIdentity, Network}, + rest_server::RestServer, + shutdown_trigger::{self, CompletedBlockInfo, ShutdownTrigger}, + storage::Storage, + sync_leaper::SyncLeaper, + transaction_acceptor::{self, TransactionAcceptor}, + transaction_buffer, + transaction_buffer::TransactionBuffer, + upgrade_watcher::{self, UpgradeWatcher}, + Component, ValidatorBoundComponent, + }, + effect::{ + announcements::{ + BlockAccumulatorAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement, + ControlAnnouncement, FetchedNewBlockAnnouncement, + FetchedNewFinalitySignatureAnnouncement, GossiperAnnouncement, MetaBlockAnnouncement, + PeerBehaviorAnnouncement, TransactionAcceptorAnnouncement, + TransactionBufferAnnouncement, UnexecutedBlockAnnouncement, UpgradeWatcherAnnouncement, + }, + incoming::{NetResponseIncoming, TrieResponseIncoming}, + requests::{ + AcceptTransactionRequest, ChainspecRawBytesRequest, ContractRuntimeRequest, + ReactorInfoRequest, + }, + EffectBuilder, EffectExt, Effects, GossipTarget, + }, + failpoints::FailpointActivation, + fatal, + protocol::Message, + reactor::{ + self, + event_queue_metrics::EventQueueMetrics, + main_reactor::{fetchers::Fetchers, upgrade_shutdown::SignatureGossipTracker}, + EventQueueHandle, QueueKind, + }, + types::{ + ForwardMetaBlock, MetaBlock, MetaBlockState, SyncHandling, TrieOrChunk, ValidatorMatrix, + }, + utils::{Source, WithDir}, + NodeRng, +}; +pub use config::Config; +pub(crate) use error::Error; +pub(crate) use event::MainEvent; +pub(crate) use reactor_state::ReactorState; + +/// Main node reactor. +/// +/// This following diagram represents how the components involved in the **sync process** interact +/// with each other. +#[cfg_attr(doc, aquamarine::aquamarine)] +/// ```mermaid +/// flowchart TD +/// G((Network)) +/// E((BlockAccumulator)) +/// H[(Storage)] +/// I((SyncLeaper)) +/// A(("Reactor
(control logic)")) +/// B((ContractRuntime)) +/// C((BlockSynchronizer)) +/// D((Consensus)) +/// K((Gossiper)) +/// J((Fetcher)) +/// F((TransactionBuffer)) +/// +/// I -->|"❌
Never get
SyncLeap
from storage"| H +/// linkStyle 0 fill:none,stroke:red,color:red +/// +/// A -->|"Execute block
(genesis or upgrade)"| B +/// +/// G -->|Peers| C +/// G -->|Peers| D +/// +/// C -->|Block data| E +/// +/// J -->|Block data| C +/// +/// D -->|Execute block| B +/// +/// A -->|SyncLeap| I +/// +/// B -->|Put block| H +/// C -->|Mark block complete| H +/// E -->|Mark block complete| H +/// C -->|Execute block| B +/// +/// C -->|Complete block
with Transactions| F +/// +/// K -->|Transaction| F +/// K -->|Block data| E +/// ``` +#[derive(DataSize, Debug)] +pub(crate) struct MainReactor { + // components + // i/o bound components + storage: Storage, + contract_runtime: ContractRuntime, + upgrade_watcher: UpgradeWatcher, + rest_server: RestServer, + binary_port: BinaryPort, + event_stream_server: EventStreamServer, + diagnostics_port: DiagnosticsPort, + shutdown_trigger: ShutdownTrigger, + net: Network, + consensus: EraSupervisor, + + // block handling + block_validator: BlockValidator, + block_accumulator: BlockAccumulator, + block_synchronizer: BlockSynchronizer, + + // transaction handling + transaction_acceptor: TransactionAcceptor, + transaction_buffer: TransactionBuffer, + + // gossiping components + address_gossiper: Gossiper<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, GossipedAddress>, + transaction_gossiper: Gossiper<{ Transaction::ID_IS_COMPLETE_ITEM }, Transaction>, + block_gossiper: Gossiper<{ BlockV2::ID_IS_COMPLETE_ITEM }, BlockV2>, + finality_signature_gossiper: + Gossiper<{ FinalitySignatureV2::ID_IS_COMPLETE_ITEM }, FinalitySignatureV2>, + + // record retrieval + sync_leaper: SyncLeaper, + fetchers: Fetchers, // <-- this contains all fetchers to reduce top-level clutter + + // Non-components. + // metrics + metrics: Metrics, + #[data_size(skip)] // Never allocates heap data. + memory_metrics: MemoryMetrics, + #[data_size(skip)] + event_queue_metrics: EventQueueMetrics, + + // ambient settings / data / load-bearing config + validator_matrix: ValidatorMatrix, + trusted_hash: Option, + chainspec: Arc, + chainspec_raw_bytes: Arc, + + // control logic + state: ReactorState, + max_attempts: usize, + + last_progress: Timestamp, + attempts: usize, + idle_tolerance: TimeDiff, + control_logic_default_delay: TimeDiff, + shutdown_for_upgrade_timeout: TimeDiff, + switched_to_shutdown_for_upgrade: Timestamp, + upgrade_timeout: TimeDiff, + sync_handling: SyncHandling, + signature_gossip_tracker: SignatureGossipTracker, + /// The instant at which the node has started. + node_startup_instant: Instant, + + finality_signature_creation: bool, + prevent_validator_shutdown: bool, +} + +impl reactor::Reactor for MainReactor { + type Event = MainEvent; + type Config = WithDir; + type Error = Error; + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: MainEvent, + ) -> Effects { + match event { + MainEvent::ControlAnnouncement(ctrl_ann) => { + error!("unhandled control announcement: {}", ctrl_ann); + Effects::new() + } + MainEvent::SetNodeStopRequest(req) => reactor::wrap_effects( + MainEvent::ShutdownTrigger, + self.shutdown_trigger + .handle_event(effect_builder, rng, req.into()), + ), + + MainEvent::FatalAnnouncement(fatal_ann) => { + if self.consensus.is_active_validator() && self.prevent_validator_shutdown { + warn!(%fatal_ann, "consensus is active, not shutting down"); + Effects::new() + } else { + let ctrl_ann = + MainEvent::ControlAnnouncement(ControlAnnouncement::FatalError { + file: fatal_ann.file, + line: fatal_ann.line, + msg: fatal_ann.msg, + }); + effect_builder + .into_inner() + .schedule(ctrl_ann, QueueKind::Control) + .ignore() + } + } + + // PRIMARY REACTOR STATE CONTROL LOGIC + MainEvent::ReactorCrank => self.crank(effect_builder, rng), + + MainEvent::MainReactorRequest(req) => match req { + ReactorInfoRequest::ReactorState { responder } => { + responder.respond(self.state).ignore() + } + ReactorInfoRequest::LastProgress { responder } => responder + .respond(LastProgress::new(self.last_progress)) + .ignore(), + ReactorInfoRequest::Uptime { responder } => responder + .respond(Uptime::new(self.node_startup_instant.elapsed().as_secs())) + .ignore(), + ReactorInfoRequest::NetworkName { responder } => responder + .respond(NetworkName::new(self.chainspec.network_config.name.clone())) + .ignore(), + ReactorInfoRequest::BalanceHoldsInterval { responder } => responder + .respond(self.chainspec.core_config.gas_hold_interval) + .ignore(), + }, + MainEvent::MetaBlockAnnouncement(MetaBlockAnnouncement(meta_block)) => self + .handle_meta_block( + effect_builder, + rng, + self.finality_signature_creation, + meta_block, + ), + MainEvent::UnexecutedBlockAnnouncement(UnexecutedBlockAnnouncement(block_height)) => { + let only_from_available_block_range = true; + if let Ok(Some(block_header)) = self + .storage + .read_block_header_by_height(block_height, only_from_available_block_range) + { + let block_hash = block_header.block_hash(); + reactor::wrap_effects( + MainEvent::Consensus, + self.consensus.handle_event( + effect_builder, + rng, + consensus::Event::BlockAdded { + header: Box::new(block_header), + header_hash: block_hash, + }, + ), + ) + } else { + // Warn logging here because this codepath of handling an + // `UnexecutedBlockAnnouncement` is coming from the + // contract runtime when a block with a lower height than + // the next expected executable height is enqueued. This + // happens after restarts when consensus is creating the + // required eras and attempts to retrace its steps in the + // era by enqueuing all finalized blocks starting from the + // first one in that era, blocks which should have already + // been executed and marked complete in storage. + warn!( + block_height, + "Finalized block enqueued for execution, but a complete \ + block header with the same height is not present in storage." + ); + Effects::new() + } + } + + // LOCAL I/O BOUND COMPONENTS + MainEvent::UpgradeWatcher(event) => reactor::wrap_effects( + MainEvent::UpgradeWatcher, + self.upgrade_watcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::UpgradeWatcherRequest(req) => reactor::wrap_effects( + MainEvent::UpgradeWatcher, + self.upgrade_watcher + .handle_event(effect_builder, rng, req.into()), + ), + MainEvent::UpgradeWatcherAnnouncement(UpgradeWatcherAnnouncement( + maybe_next_upgrade, + )) => { + // register activation point of upgrade w/ block accumulator + self.block_accumulator.register_activation_point( + maybe_next_upgrade + .as_ref() + .map(|next_upgrade| next_upgrade.activation_point()), + ); + reactor::wrap_effects( + MainEvent::UpgradeWatcher, + self.upgrade_watcher.handle_event( + effect_builder, + rng, + upgrade_watcher::Event::GotNextUpgrade(maybe_next_upgrade), + ), + ) + } + MainEvent::RestServer(event) => reactor::wrap_effects( + MainEvent::RestServer, + self.rest_server.handle_event(effect_builder, rng, event), + ), + MainEvent::MetricsRequest(req) => reactor::wrap_effects( + MainEvent::MetricsRequest, + self.metrics.handle_event(effect_builder, rng, req), + ), + MainEvent::ChainspecRawBytesRequest( + ChainspecRawBytesRequest::GetChainspecRawBytes(responder), + ) => responder.respond(self.chainspec_raw_bytes.clone()).ignore(), + MainEvent::EventStreamServer(event) => reactor::wrap_effects( + MainEvent::EventStreamServer, + self.event_stream_server + .handle_event(effect_builder, rng, event), + ), + MainEvent::ShutdownTrigger(event) => reactor::wrap_effects( + MainEvent::ShutdownTrigger, + self.shutdown_trigger + .handle_event(effect_builder, rng, event), + ), + MainEvent::DiagnosticsPort(event) => reactor::wrap_effects( + MainEvent::DiagnosticsPort, + self.diagnostics_port + .handle_event(effect_builder, rng, event), + ), + MainEvent::DumpConsensusStateRequest(req) => reactor::wrap_effects( + MainEvent::Consensus, + self.consensus.handle_event(effect_builder, rng, req.into()), + ), + + // NETWORK CONNECTION AND ORIENTATION + MainEvent::Network(event) => reactor::wrap_effects( + MainEvent::Network, + self.net.handle_event(effect_builder, rng, event), + ), + MainEvent::NetworkRequest(req) => { + let event = MainEvent::Network(network::Event::from(req)); + self.dispatch_event(effect_builder, rng, event) + } + MainEvent::NetworkInfoRequest(req) => { + let event = MainEvent::Network(network::Event::from(req)); + self.dispatch_event(effect_builder, rng, event) + } + MainEvent::NetworkPeerBehaviorAnnouncement(ann) => { + let mut effects = Effects::new(); + match &ann { + PeerBehaviorAnnouncement::OffenseCommitted { + offender, + justification: _, + } => { + let event = MainEvent::BlockSynchronizer( + block_synchronizer::Event::DisconnectFromPeer(**offender), + ); + effects.extend(self.dispatch_event(effect_builder, rng, event)); + } + } + effects.extend(self.dispatch_event( + effect_builder, + rng, + MainEvent::Network(ann.into()), + )); + effects + } + MainEvent::NetworkPeerRequestingData(incoming) => reactor::wrap_effects( + MainEvent::Storage, + self.storage + .handle_event(effect_builder, rng, incoming.into()), + ), + MainEvent::NetworkPeerProvidingData(NetResponseIncoming { sender, message }) => { + reactor::handle_get_response(self, effect_builder, rng, sender, message) + } + MainEvent::AddressGossiper(event) => reactor::wrap_effects( + MainEvent::AddressGossiper, + self.address_gossiper + .handle_event(effect_builder, rng, event), + ), + MainEvent::AddressGossiperIncoming(incoming) => reactor::wrap_effects( + MainEvent::AddressGossiper, + self.address_gossiper + .handle_event(effect_builder, rng, incoming.into()), + ), + MainEvent::AddressGossiperCrank(req) => reactor::wrap_effects( + MainEvent::AddressGossiper, + self.address_gossiper + .handle_event(effect_builder, rng, req.into()), + ), + MainEvent::AddressGossiperAnnouncement(gossiper_ann) => match gossiper_ann { + GossiperAnnouncement::GossipReceived { .. } + | GossiperAnnouncement::NewItemBody { .. } + | GossiperAnnouncement::FinishedGossiping(_) => Effects::new(), + GossiperAnnouncement::NewCompleteItem(gossiped_address) => { + let reactor_event = + MainEvent::Network(network::Event::PeerAddressReceived(gossiped_address)); + self.dispatch_event(effect_builder, rng, reactor_event) + } + }, + MainEvent::SyncLeaper(event) => reactor::wrap_effects( + MainEvent::SyncLeaper, + self.sync_leaper.handle_event(effect_builder, rng, event), + ), + MainEvent::Consensus(event) => reactor::wrap_effects( + MainEvent::Consensus, + self.consensus.handle_event(effect_builder, rng, event), + ), + MainEvent::ConsensusMessageIncoming(incoming) => reactor::wrap_effects( + MainEvent::Consensus, + self.consensus + .handle_event(effect_builder, rng, incoming.into()), + ), + MainEvent::ConsensusDemand(demand) => reactor::wrap_effects( + MainEvent::Consensus, + self.consensus + .handle_event(effect_builder, rng, demand.into()), + ), + MainEvent::ConsensusAnnouncement(consensus_announcement) => { + match consensus_announcement { + ConsensusAnnouncement::Proposed(block) => { + let reactor_event = MainEvent::TransactionBuffer( + transaction_buffer::Event::BlockProposed(block), + ); + self.dispatch_event(effect_builder, rng, reactor_event) + } + ConsensusAnnouncement::Finalized(block) => { + let reactor_event = MainEvent::TransactionBuffer( + transaction_buffer::Event::BlockFinalized(block), + ); + self.dispatch_event(effect_builder, rng, reactor_event) + } + ConsensusAnnouncement::Fault { + era_id, + public_key, + timestamp, + } => { + let reactor_event = + MainEvent::EventStreamServer(event_stream_server::Event::Fault { + era_id, + public_key, + timestamp, + }); + self.dispatch_event(effect_builder, rng, reactor_event) + } + } + } + + // BLOCKS + MainEvent::BlockValidator(event) => reactor::wrap_effects( + MainEvent::BlockValidator, + self.block_validator + .handle_event(effect_builder, rng, event), + ), + MainEvent::BlockValidatorRequest(req) => self.dispatch_event( + effect_builder, + rng, + MainEvent::BlockValidator(block_validator::Event::from(req)), + ), + MainEvent::BlockAccumulator(event) => reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator + .handle_event(effect_builder, rng, event), + ), + MainEvent::BlockAccumulatorRequest(request) => reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::BlockSynchronizer(event) => reactor::wrap_effects( + MainEvent::BlockSynchronizer, + self.block_synchronizer + .handle_event(effect_builder, rng, event), + ), + MainEvent::BlockSynchronizerRequest(req) => reactor::wrap_effects( + MainEvent::BlockSynchronizer, + self.block_synchronizer + .handle_event(effect_builder, rng, req.into()), + ), + MainEvent::BlockAccumulatorAnnouncement( + BlockAccumulatorAnnouncement::AcceptedNewFinalitySignature { finality_signature }, + ) => { + debug!( + "notifying finality signature gossiper to start gossiping for: {} , {}", + finality_signature.block_hash(), + finality_signature.public_key(), + ); + let mut effects = reactor::wrap_effects( + MainEvent::FinalitySignatureGossiper, + self.finality_signature_gossiper.handle_event( + effect_builder, + rng, + gossiper::Event::ItemReceived { + item_id: finality_signature.gossip_id(), + source: Source::Ourself, + target: finality_signature.gossip_target(), + }, + ), + ); + + effects.extend(reactor::wrap_effects( + MainEvent::EventStreamServer, + self.event_stream_server.handle_event( + effect_builder, + rng, + event_stream_server::Event::FinalitySignature(Box::new( + (*finality_signature).into(), + )), + ), + )); + + effects + } + MainEvent::BlockGossiper(event) => reactor::wrap_effects( + MainEvent::BlockGossiper, + self.block_gossiper.handle_event(effect_builder, rng, event), + ), + MainEvent::BlockGossiperIncoming(incoming) => reactor::wrap_effects( + MainEvent::BlockGossiper, + self.block_gossiper + .handle_event(effect_builder, rng, incoming.into()), + ), + MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::GossipReceived { + item_id: gossiped_block_id, + sender, + }) => reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::RegisterPeer { + block_hash: gossiped_block_id, + era_id: None, + sender, + }, + ), + ), + MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::NewCompleteItem( + gossiped_block_id, + )) => { + error!(%gossiped_block_id, "gossiper should not announce new block"); + Effects::new() + } + MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::NewItemBody { + item, + sender, + }) => reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::ReceivedBlock { + block: Arc::new(*item), + sender, + }, + ), + ), + MainEvent::BlockGossiperAnnouncement(GossiperAnnouncement::FinishedGossiping( + _gossiped_block_id, + )) => Effects::new(), + MainEvent::BlockFetcherAnnouncement(FetchedNewBlockAnnouncement { block, peer }) => { + // The block accumulator shouldn't concern itself with historical blocks that are + // being fetched. If the block is not convertible to the current version it means + // that it is surely a historical block. + if let Ok(block) = (*block).clone().try_into() { + reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::ReceivedBlock { + block: Arc::new(block), + sender: peer, + }, + ), + ) + } else { + Effects::new() + } + } + + MainEvent::FinalitySignatureIncoming(incoming) => { + // Finality signature received via broadcast. + let sender = incoming.sender; + let finality_signature = incoming.message; + debug!( + "FinalitySignatureIncoming({},{},{},{})", + finality_signature.era_id(), + finality_signature.block_hash(), + finality_signature.public_key(), + sender + ); + let block_accumulator_event = block_accumulator::Event::ReceivedFinalitySignature { + finality_signature, + sender, + }; + reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator_event, + ), + ) + } + MainEvent::FinalitySignatureGossiper(event) => reactor::wrap_effects( + MainEvent::FinalitySignatureGossiper, + self.finality_signature_gossiper + .handle_event(effect_builder, rng, event), + ), + MainEvent::FinalitySignatureGossiperIncoming(incoming) => reactor::wrap_effects( + MainEvent::FinalitySignatureGossiper, + self.finality_signature_gossiper + .handle_event(effect_builder, rng, incoming.into()), + ), + MainEvent::FinalitySignatureGossiperAnnouncement( + GossiperAnnouncement::GossipReceived { + item_id: gossiped_finality_signature_id, + sender, + }, + ) => reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::RegisterPeer { + block_hash: *gossiped_finality_signature_id.block_hash(), + era_id: Some(gossiped_finality_signature_id.era_id()), + sender, + }, + ), + ), + MainEvent::FinalitySignatureGossiperAnnouncement( + GossiperAnnouncement::NewCompleteItem(gossiped_finality_signature_id), + ) => { + error!(%gossiped_finality_signature_id, "gossiper should not announce new finality signature"); + Effects::new() + } + MainEvent::FinalitySignatureGossiperAnnouncement( + GossiperAnnouncement::NewItemBody { item, sender }, + ) => reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::ReceivedFinalitySignature { + finality_signature: item, + sender, + }, + ), + ), + MainEvent::FinalitySignatureGossiperAnnouncement( + GossiperAnnouncement::FinishedGossiping(gossiped_finality_signature_id), + ) => { + self.signature_gossip_tracker + .register_signature(gossiped_finality_signature_id); + Effects::new() + } + MainEvent::FinalitySignatureFetcherAnnouncement( + FetchedNewFinalitySignatureAnnouncement { + finality_signature, + peer, + }, + ) => { + // If the signature is not convertible to the current version it means + // that it is historical. + if let FinalitySignature::V2(sig) = *finality_signature { + reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::ReceivedFinalitySignature { + finality_signature: Box::new(sig), + sender: peer, + }, + ), + ) + } else { + Effects::new() + } + } + + // TRANSACTIONS + MainEvent::TransactionAcceptor(event) => reactor::wrap_effects( + MainEvent::TransactionAcceptor, + self.transaction_acceptor + .handle_event(effect_builder, rng, event), + ), + MainEvent::AcceptTransactionRequest(AcceptTransactionRequest { + transaction, + is_speculative, + responder, + }) => { + let source = if is_speculative { + Source::SpeculativeExec + } else { + Source::Client + }; + let event = transaction_acceptor::Event::Accept { + transaction, + source, + maybe_responder: Some(responder), + }; + reactor::wrap_effects( + MainEvent::TransactionAcceptor, + self.transaction_acceptor + .handle_event(effect_builder, rng, event), + ) + } + MainEvent::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, + }, + ) => { + let mut effects = Effects::new(); + + match source { + Source::Ourself => (), // internal activity does not require further action + Source::Peer(_) => { + // this is a response to a transaction fetch request, dispatch to fetcher + effects.extend(self.fetchers.dispatch_fetcher_event( + effect_builder, + rng, + MainEvent::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, + }, + ), + )); + } + Source::Client | Source::PeerGossiped(_) => { + // we must attempt to gossip onwards + effects.extend(self.dispatch_event( + effect_builder, + rng, + MainEvent::TransactionGossiper(gossiper::Event::ItemReceived { + item_id: transaction.gossip_id(), + source, + target: transaction.gossip_target(), + }), + )); + // notify event stream + effects.extend(self.dispatch_event( + effect_builder, + rng, + MainEvent::EventStreamServer( + event_stream_server::Event::TransactionAccepted(Arc::clone( + &transaction, + )), + ), + )); + } + Source::SpeculativeExec => { + error!( + %transaction, + "transaction acceptor should not announce speculative exec transactions" + ); + } + } + + effects + } + MainEvent::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::InvalidTransaction { + transaction: _, + source: _, + }, + ) => Effects::new(), + MainEvent::TransactionGossiper(event) => reactor::wrap_effects( + MainEvent::TransactionGossiper, + self.transaction_gossiper + .handle_event(effect_builder, rng, event), + ), + MainEvent::TransactionGossiperIncoming(incoming) => reactor::wrap_effects( + MainEvent::TransactionGossiper, + self.transaction_gossiper + .handle_event(effect_builder, rng, incoming.into()), + ), + MainEvent::TransactionGossiperAnnouncement(GossiperAnnouncement::GossipReceived { + .. + }) => { + // Ignore the announcement. + Effects::new() + } + MainEvent::TransactionGossiperAnnouncement(GossiperAnnouncement::NewCompleteItem( + gossiped_transaction_id, + )) => { + error!(%gossiped_transaction_id, "gossiper should not announce new transaction"); + Effects::new() + } + MainEvent::TransactionGossiperAnnouncement(GossiperAnnouncement::NewItemBody { + item, + sender, + }) => reactor::wrap_effects( + MainEvent::TransactionAcceptor, + self.transaction_acceptor.handle_event( + effect_builder, + rng, + transaction_acceptor::Event::Accept { + transaction: *item, + source: Source::PeerGossiped(sender), + maybe_responder: None, + }, + ), + ), + MainEvent::TransactionGossiperAnnouncement( + GossiperAnnouncement::FinishedGossiping(gossiped_txn_id), + ) => { + let reactor_event = MainEvent::TransactionBuffer( + transaction_buffer::Event::ReceiveTransactionGossiped(gossiped_txn_id), + ); + self.dispatch_event(effect_builder, rng, reactor_event) + } + MainEvent::TransactionBuffer(event) => reactor::wrap_effects( + MainEvent::TransactionBuffer, + self.transaction_buffer + .handle_event(effect_builder, rng, event), + ), + MainEvent::TransactionBufferRequest(req) => self.dispatch_event( + effect_builder, + rng, + MainEvent::TransactionBuffer(req.into()), + ), + MainEvent::TransactionBufferAnnouncement( + TransactionBufferAnnouncement::TransactionsExpired(hashes), + ) => { + let reactor_event = MainEvent::EventStreamServer( + event_stream_server::Event::TransactionsExpired(hashes), + ); + self.dispatch_event(effect_builder, rng, reactor_event) + } + + // CONTRACT RUNTIME & GLOBAL STATE + MainEvent::ContractRuntime(event) => reactor::wrap_effects( + MainEvent::ContractRuntime, + self.contract_runtime + .handle_event(effect_builder, rng, event), + ), + MainEvent::ContractRuntimeRequest(req) => reactor::wrap_effects( + MainEvent::ContractRuntime, + self.contract_runtime + .handle_event(effect_builder, rng, req.into()), + ), + MainEvent::ContractRuntimeAnnouncement( + ContractRuntimeAnnouncement::CommitStepSuccess { era_id, effects }, + ) => { + let reactor_event = + MainEvent::EventStreamServer(event_stream_server::Event::Step { + era_id, + execution_effects: effects, + }); + self.dispatch_event(effect_builder, rng, reactor_event) + } + MainEvent::ContractRuntimeAnnouncement( + ContractRuntimeAnnouncement::UpcomingEraValidators { + era_that_is_ending, + upcoming_era_validators, + }, + ) => { + info!( + "UpcomingEraValidators era_that_is_ending: {}", + era_that_is_ending + ); + self.validator_matrix.register_eras(upcoming_era_validators); + Effects::new() + } + MainEvent::ContractRuntimeAnnouncement( + ContractRuntimeAnnouncement::NextEraGasPrice { + era_id, + next_era_gas_price, + }, + ) => { + info!( + "New era gas price {} for era {}", + next_era_gas_price, era_id + ); + let event = MainEvent::ContractRuntimeRequest( + ContractRuntimeRequest::UpdateRuntimePrice(era_id, next_era_gas_price), + ); + let mut effects = self.dispatch_event(effect_builder, rng, event); + let reactor_event = MainEvent::TransactionBuffer( + transaction_buffer::Event::UpdateEraGasPrice(era_id, next_era_gas_price), + ); + effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); + let reactor_event = MainEvent::BlockValidator( + block_validator::Event::UpdateEraGasPrice(era_id, next_era_gas_price), + ); + effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); + effects + } + + MainEvent::TrieRequestIncoming(req) => reactor::wrap_effects( + MainEvent::ContractRuntime, + self.contract_runtime + .handle_event(effect_builder, rng, req.into()), + ), + MainEvent::TrieDemand(demand) => reactor::wrap_effects( + MainEvent::ContractRuntime, + self.contract_runtime + .handle_event(effect_builder, rng, demand.into()), + ), + MainEvent::TrieResponseIncoming(TrieResponseIncoming { sender, message }) => { + reactor::handle_fetch_response::( + self, + effect_builder, + rng, + sender, + &message.0, + ) + } + + // STORAGE + MainEvent::Storage(event) => reactor::wrap_effects( + MainEvent::Storage, + self.storage.handle_event(effect_builder, rng, event), + ), + MainEvent::StorageRequest(req) => reactor::wrap_effects( + MainEvent::Storage, + self.storage.handle_event(effect_builder, rng, req.into()), + ), + MainEvent::MarkBlockCompletedRequest(req) => reactor::wrap_effects( + MainEvent::Storage, + self.storage.handle_event(effect_builder, rng, req.into()), + ), + MainEvent::MakeBlockExecutableRequest(req) => reactor::wrap_effects( + MainEvent::Storage, + self.storage.handle_event(effect_builder, rng, req.into()), + ), + MainEvent::BinaryPort(req) => reactor::wrap_effects( + MainEvent::BinaryPort, + self.binary_port.handle_event(effect_builder, rng, req), + ), + + // This event gets emitted when we manage to read the era validators from the global + // states of a block after an upgrade and its parent. Once that happens, we can check + // for the signs of any changes happening during the upgrade and register the correct + // set of validators in the validators matrix. + MainEvent::GotBlockAfterUpgradeEraValidators( + era_id, + parent_era_validators, + block_era_validators, + ) => { + // `era_id`, being the era of the block after the upgrade, will be absent in the + // validators stored in the block after the upgrade - therefore we will use its + // successor for the comparison. + let era_to_check = era_id.successor(); + // We read the validators for era_id+1 from the parent of the block after the + // upgrade. + let validators_in_parent = match parent_era_validators.get(&era_to_check) { + Some(validators) => validators, + None => { + return fatal!( + effect_builder, + "couldn't find validators for era {} in parent_era_validators", + era_to_check + ) + .ignore(); + } + }; + // We also read the validators from the block after the upgrade itself. + let validators_in_block = match block_era_validators.get(&era_to_check) { + Some(validators) => validators, + None => { + return fatal!( + effect_builder, + "couldn't find validators for era {} in block_era_validators", + era_to_check + ) + .ignore(); + } + }; + // Decide which validators to use for `era_id` in the validators matrix. + let validators_to_register = if validators_in_parent == validators_in_block { + // Nothing interesting happened - register the regular validators, ie. the + // ones stored for `era_id` in the parent of the block after the upgrade. + match parent_era_validators.get(&era_id) { + Some(validators) => validators, + None => { + return fatal!( + effect_builder, + "couldn't find validators for era {} in parent_era_validators", + era_id + ) + .ignore(); + } + } + } else { + // We had an upgrade changing the validators! We use the same validators that + // will be used for the era after the upgrade, as we can't trust the ones we + // would use normally. + validators_in_block + }; + let mut effects = self.update_validator_weights( + effect_builder, + rng, + era_id, + validators_to_register.clone(), + ); + // Crank the reactor so that any synchronizing tasks blocked by the lack of + // validators for `era_id` can resume. + effects.extend( + effect_builder + .immediately() + .event(|_| MainEvent::ReactorCrank), + ); + effects + } + + // DELEGATE ALL FETCHER RELEVANT EVENTS to self.fetchers.dispatch_fetcher_event(..) + MainEvent::LegacyDeployFetcher(..) + | MainEvent::LegacyDeployFetcherRequest(..) + | MainEvent::BlockFetcher(..) + | MainEvent::BlockFetcherRequest(..) + | MainEvent::TransactionFetcher(..) + | MainEvent::TransactionFetcherRequest(..) + | MainEvent::BlockHeaderFetcher(..) + | MainEvent::BlockHeaderFetcherRequest(..) + | MainEvent::TrieOrChunkFetcher(..) + | MainEvent::TrieOrChunkFetcherRequest(..) + | MainEvent::SyncLeapFetcher(..) + | MainEvent::SyncLeapFetcherRequest(..) + | MainEvent::ApprovalsHashesFetcher(..) + | MainEvent::ApprovalsHashesFetcherRequest(..) + | MainEvent::FinalitySignatureFetcher(..) + | MainEvent::FinalitySignatureFetcherRequest(..) + | MainEvent::BlockExecutionResultsOrChunkFetcher(..) + | MainEvent::BlockExecutionResultsOrChunkFetcherRequest(..) => self + .fetchers + .dispatch_fetcher_event(effect_builder, rng, event), + } + } + + fn new( + config: Self::Config, + chainspec: Arc, + chainspec_raw_bytes: Arc, + network_identity: NetworkIdentity, + registry: &Registry, + event_queue: EventQueueHandle, + _rng: &mut NodeRng, + ) -> Result<(Self, Effects), Error> { + let node_startup_instant = Instant::now(); + + let effect_builder = EffectBuilder::new(event_queue); + + let metrics = Metrics::new(registry.clone()); + let memory_metrics = MemoryMetrics::new(registry.clone())?; + let event_queue_metrics = EventQueueMetrics::new(registry.clone(), event_queue)?; + + let protocol_version = chainspec.protocol_config.version; + let prevent_validator_shutdown = config.value().node.prevent_validator_shutdown; + + let trusted_hash = config.value().node.trusted_hash; + let (root_dir, config) = config.into_parts(); + let (our_secret_key, our_public_key) = config.consensus.load_keys(&root_dir)?; + let validator_matrix = ValidatorMatrix::new( + chainspec.core_config.finality_threshold_fraction, + chainspec.name_hash(), + chainspec + .protocol_config + .global_state_update + .as_ref() + .and_then(|global_state_update| global_state_update.validators.clone()), + chainspec.protocol_config.activation_point.era_id(), + our_secret_key.clone(), + our_public_key.clone(), + chainspec.core_config.auction_delay, + chainspec.core_config.signature_rewards_max_delay, + ); + + let storage_config = WithDir::new(&root_dir, config.storage.clone()); + + let hard_reset_to_start_of_era = chainspec.hard_reset_to_start_of_era(); + let storage = Storage::new( + &storage_config, + hard_reset_to_start_of_era, + protocol_version, + chainspec.protocol_config.activation_point.era_id(), + &chainspec.network_config.name, + chainspec.transaction_config.max_ttl.into(), + chainspec.core_config.recent_era_count(), + Some(registry), + config.node.force_resync, + chainspec.transaction_config.clone(), + )?; + + let contract_runtime = ContractRuntime::new( + storage.root_path(), + &config.contract_runtime, + chainspec.clone(), + registry, + )?; + + let allow_handshake = config.node.sync_handling != SyncHandling::Isolated; + + let network = Network::new( + config.network.clone(), + network_identity, + Some((our_secret_key, our_public_key)), + registry, + chainspec.as_ref(), + validator_matrix.clone(), + allow_handshake, + )?; + + let address_gossiper = Gossiper::<{ GossipedAddress::ID_IS_COMPLETE_ITEM }, _>::new( + "address_gossiper", + config.gossip, + registry, + )?; + + let rest_server = RestServer::new( + config.rest_server.clone(), + protocol_version, + chainspec.network_config.name.clone(), + ); + let binary_port_metrics = + BinaryPortMetrics::new(registry).map_err(BinaryPortInitializationError::from)?; + let binary_port = BinaryPort::new( + config.binary_port_server.clone(), + chainspec.clone(), + binary_port_metrics, + ); + let event_stream_server = EventStreamServer::new( + config.event_stream_server.clone(), + storage.root_path().to_path_buf(), + protocol_version, + ); + let diagnostics_port = + DiagnosticsPort::new(WithDir::new(&root_dir, config.diagnostics_port)); + let shutdown_trigger = ShutdownTrigger::new(); + + // local / remote data management + let sync_leaper = SyncLeaper::new(chainspec.clone(), registry)?; + let fetchers = Fetchers::new(&config.fetcher, registry)?; + + // gossipers + let block_gossiper = Gossiper::<{ BlockV2::ID_IS_COMPLETE_ITEM }, _>::new( + "block_gossiper", + config.gossip, + registry, + )?; + let transaction_gossiper = Gossiper::<{ Transaction::ID_IS_COMPLETE_ITEM }, _>::new( + "transaction_gossiper", + config.gossip, + registry, + )?; + let finality_signature_gossiper = Gossiper::< + { FinalitySignatureV2::ID_IS_COMPLETE_ITEM }, + _, + >::new( + "finality_signature_gossiper", config.gossip, registry + )?; + + // consensus + let consensus = EraSupervisor::new( + storage.root_path(), + validator_matrix.clone(), + config.consensus, + chainspec.clone(), + registry, + )?; + + // chain / transaction management + + let block_accumulator = BlockAccumulator::new( + config.block_accumulator, + validator_matrix.clone(), + chainspec.core_config.unbonding_delay, + chainspec.core_config.minimum_block_time, + chainspec.core_config.validator_slots, + registry, + )?; + let block_synchronizer = BlockSynchronizer::new( + config.block_synchronizer, + chainspec.clone(), + chainspec.core_config.simultaneous_peer_requests, + validator_matrix.clone(), + registry, + )?; + let block_validator = BlockValidator::new( + Arc::clone(&chainspec), + validator_matrix.clone(), + config.block_validator, + chainspec.vacancy_config.min_gas_price, + ); + let upgrade_watcher = + UpgradeWatcher::new(chainspec.as_ref(), config.upgrade_watcher, &root_dir)?; + let transaction_acceptor = TransactionAcceptor::new( + config.transaction_acceptor, + Arc::clone(&chainspec), + registry, + )?; + let transaction_buffer = + TransactionBuffer::new(Arc::clone(&chainspec), config.transaction_buffer, registry)?; + + let reactor = MainReactor { + chainspec, + chainspec_raw_bytes, + storage, + contract_runtime, + upgrade_watcher, + net: network, + address_gossiper, + + rest_server, + binary_port, + event_stream_server, + transaction_acceptor, + fetchers, + + block_gossiper, + transaction_gossiper, + finality_signature_gossiper, + sync_leaper, + transaction_buffer, + consensus, + block_validator, + block_accumulator, + block_synchronizer, + diagnostics_port, + shutdown_trigger, + + metrics, + memory_metrics, + event_queue_metrics, + + state: ReactorState::Initialize {}, + attempts: 0, + last_progress: Timestamp::now(), + max_attempts: config.node.max_attempts, + idle_tolerance: config.node.idle_tolerance, + control_logic_default_delay: config.node.control_logic_default_delay, + trusted_hash, + validator_matrix, + sync_handling: config.node.sync_handling, + signature_gossip_tracker: SignatureGossipTracker::new(), + shutdown_for_upgrade_timeout: config.node.shutdown_for_upgrade_timeout, + switched_to_shutdown_for_upgrade: Timestamp::from(0), + upgrade_timeout: config.node.upgrade_timeout, + node_startup_instant, + finality_signature_creation: true, + prevent_validator_shutdown, + }; + info!("MainReactor: instantiated"); + + // If there's an upgrade staged with the same activation point as the current one, we must + // shut down immediately for upgrade. + let should_upgrade_immediately = reactor.upgrade_watcher.next_upgrade_activation_point() + == Some(reactor.chainspec.protocol_config.activation_point.era_id()); + let effects = if should_upgrade_immediately { + info!("MainReactor: immediate shutdown for upgrade"); + effect_builder + .immediately() + .event(|()| MainEvent::ControlAnnouncement(ControlAnnouncement::ShutdownForUpgrade)) + } else { + effect_builder + .immediately() + .event(|()| MainEvent::ReactorCrank) + }; + Ok((reactor, effects)) + } + + fn update_metrics(&mut self, event_queue_handle: EventQueueHandle) { + self.memory_metrics.estimate(self); + self.event_queue_metrics + .record_event_queue_counts(&event_queue_handle) + } + + fn activate_failpoint(&mut self, activation: &FailpointActivation) { + if activation.key().starts_with("consensus") { + >::activate_failpoint( + &mut self.consensus, + activation, + ); + } + if activation.key().starts_with("finality_signature_creation") { + self.finality_signature_creation = false; + } + } +} + +impl MainReactor { + fn update_validator_weights( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + era_id: EraId, + validator_weights: BTreeMap, + ) -> Effects { + self.validator_matrix + .register_validator_weights(era_id, validator_weights); + info!(%era_id, "validator_matrix updated"); + // notify validator bound components + let mut effects = reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator + .handle_validators(effect_builder, rng), + ); + effects.extend(reactor::wrap_effects( + MainEvent::BlockSynchronizer, + self.block_synchronizer + .handle_validators(effect_builder, rng), + )); + effects + } + + fn handle_meta_block( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + create_finality_signatures: bool, + mut meta_block: MetaBlock, + ) -> Effects { + debug!( + "MetaBlock: handling meta block {} {} {:?}", + meta_block.height(), + meta_block.hash(), + meta_block.state() + ); + if !meta_block.state().is_stored() { + return fatal!( + effect_builder, + "MetaBlock: block should be stored after execution or accumulation" + ) + .ignore(); + } + + let mut effects = Effects::new(); + + if meta_block + .mut_state() + .register_as_sent_to_transaction_buffer() + .was_updated() + { + debug!( + "MetaBlock: notifying transaction buffer: {} {}", + meta_block.height(), + meta_block.hash(), + ); + + match &meta_block { + MetaBlock::Forward(fwd_meta_block) => { + effects.extend(reactor::wrap_effects( + MainEvent::TransactionBuffer, + self.transaction_buffer.handle_event( + effect_builder, + rng, + transaction_buffer::Event::Block(Arc::clone(&fwd_meta_block.block)), + ), + )); + } + MetaBlock::Historical(historical_meta_block) => { + effects.extend(reactor::wrap_effects( + MainEvent::TransactionBuffer, + self.transaction_buffer.handle_event( + effect_builder, + rng, + transaction_buffer::Event::VersionedBlock(Arc::clone( + &historical_meta_block.block, + )), + ), + )); + } + } + } + + if let MetaBlock::Forward(forward_meta_block) = &meta_block { + let block = forward_meta_block.block.clone(); + if meta_block + .mut_state() + .register_updated_validator_matrix() + .was_updated() + { + if let Some(validator_weights) = block.header().next_era_validator_weights() { + let era_id = block.era_id(); + let next_era_id = era_id.successor(); + debug!( + "MetaBlock: updating validator matrix: {} {} {} {}", + block.height(), + block.hash(), + era_id, + next_era_id + ); + effects.extend(self.update_validator_weights( + effect_builder, + rng, + next_era_id, + validator_weights.clone(), + )); + } + } + + // Validators gossip the block as soon as they deem it valid, but non-validators + // only gossip once the block is marked complete. + if let Some(true) = self + .validator_matrix + .is_self_validator_in_era(block.era_id()) + { + debug!( + "MetaBlock: updating validator gossip state: {} {}", + block.height(), + block.hash(), + ); + self.update_meta_block_gossip_state( + effect_builder, + rng, + block.hash(), + block.gossip_target(), + meta_block.mut_state(), + &mut effects, + ); + } + + if !meta_block.state().is_executed() { + debug!( + "MetaBlock: unexecuted block: {} {}", + block.height(), + block.hash(), + ); + // We've done as much as we can on a valid but un-executed block. + return effects; + } + + if meta_block + .mut_state() + .register_we_have_tried_to_sign() + .was_updated() + && create_finality_signatures + { + // When this node is a validator in this era, sign and announce. + if let Some(finality_signature) = self + .validator_matrix + .create_finality_signature(block.header()) + { + debug!( + %finality_signature, + "MetaBlock: registering finality signature: {} {}", + block.height(), + block.hash(), + ); + + effects.extend(reactor::wrap_effects( + MainEvent::Storage, + effect_builder + .put_finality_signature_to_storage(finality_signature.clone().into()) + .ignore(), + )); + + effects.extend(reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::CreatedFinalitySignature { + finality_signature: Box::new(finality_signature.clone()), + }, + ), + )); + + let era_id = finality_signature.era_id(); + let payload = Message::FinalitySignature(Box::new(finality_signature)); + effects.extend(reactor::wrap_effects( + MainEvent::Network, + effect_builder + .broadcast_message_to_validators(payload, era_id) + .ignore(), + )); + } + } + } + + if meta_block + .mut_state() + .register_as_validator_notified() + .was_updated() + { + debug!( + "MetaBlock: notifying block validator: {} {}", + meta_block.height(), + meta_block.hash(), + ); + effects.extend(reactor::wrap_effects( + MainEvent::BlockValidator, + self.block_validator.handle_event( + effect_builder, + rng, + block_validator::Event::BlockStored(meta_block.height()), + ), + )); + } + + if meta_block + .mut_state() + .register_as_consensus_notified() + .was_updated() + { + debug!( + "MetaBlock: notifying consensus: {} {}", + meta_block.height(), + meta_block.hash(), + ); + + match &meta_block { + MetaBlock::Forward(fwd_meta_block) => { + effects.extend(reactor::wrap_effects( + MainEvent::Consensus, + self.consensus.handle_event( + effect_builder, + rng, + consensus::Event::BlockAdded { + header: Box::new(fwd_meta_block.block.header().clone().into()), + header_hash: *fwd_meta_block.block.hash(), + }, + ), + )); + } + MetaBlock::Historical(_historical_meta_block) => { + // Historical meta blocks aren't of interest to consensus - consensus only + // cares about new blocks. Hence, we can just do nothing here. + } + } + } + + if let MetaBlock::Forward(forward_meta_block) = &meta_block { + let block = forward_meta_block.block.clone(); + let execution_results = forward_meta_block.execution_results.clone(); + + if meta_block + .mut_state() + .register_as_accumulator_notified() + .was_updated() + { + debug!( + "MetaBlock: notifying accumulator: {} {}", + block.height(), + block.hash(), + ); + let meta_block = ForwardMetaBlock { + block, + execution_results, + state: *meta_block.state(), + }; + + effects.extend(reactor::wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator.handle_event( + effect_builder, + rng, + block_accumulator::Event::ExecutedBlock { meta_block }, + ), + )); + // We've done as much as we can for now, we need to wait for the block + // accumulator to mark the block complete before proceeding further. + return effects; + } + } + + // We *always* want to initialize the contract runtime with the highest complete block. + // In case of an upgrade, we want the reactor to hold off in the `Upgrading` state until + // the immediate switch block is stored and *also* marked complete. + // This will allow the contract runtime to initialize properly (see + // [`refresh_contract_runtime`]) when the reactor is transitioning from `CatchUp` to + // `KeepUp`. + if !meta_block.state().is_marked_complete() { + error!( + block_hash = ?meta_block.hash(), + state = ?meta_block.state(), + "should be a complete block after passing to accumulator" + ); + } else { + debug!( + "MetaBlock: block is marked complete: {} {}", + meta_block.height(), + meta_block.hash(), + ); + } + + if let MetaBlock::Forward(forward_meta_block) = &meta_block { + let block = forward_meta_block.block.clone(); + + debug!( + "MetaBlock: update gossip state: {} {}", + block.height(), + block.hash(), + ); + self.update_meta_block_gossip_state( + effect_builder, + rng, + block.hash(), + block.gossip_target(), + meta_block.mut_state(), + &mut effects, + ); + + if meta_block + .mut_state() + .register_as_synchronizer_notified() + .was_updated() + { + debug!( + "MetaBlock: notifying block synchronizer: {} {}", + block.height(), + block.hash(), + ); + + effects.extend(reactor::wrap_effects( + MainEvent::BlockSynchronizer, + self.block_synchronizer.handle_event( + effect_builder, + rng, + block_synchronizer::Event::MarkBlockExecuted(*block.hash()), + ), + )); + } + } + + debug_assert!( + meta_block.state().verify_complete(), + "meta block {} at height {} has invalid state: {:?}", + meta_block.hash(), + meta_block.height(), + meta_block.state() + ); + + if meta_block + .mut_state() + .register_all_actions_done() + .was_already_registered() + { + error!( + block_hash = ?meta_block.hash(), + state = ?meta_block.state(), + "duplicate meta block announcement emitted" + ); + return effects; + } + + debug!( + "MetaBlock: notifying event stream: {} {}", + meta_block.height(), + meta_block.hash(), + ); + let versioned_block: Arc = match &meta_block { + MetaBlock::Forward(fwd_meta_block) => Arc::new((*fwd_meta_block.block).clone().into()), + MetaBlock::Historical(historical_meta_block) => historical_meta_block.block.clone(), + }; + effects.extend(reactor::wrap_effects( + MainEvent::EventStreamServer, + self.event_stream_server.handle_event( + effect_builder, + rng, + event_stream_server::Event::BlockAdded(Arc::clone(&versioned_block)), + ), + )); + + match &meta_block { + MetaBlock::Forward(fwd_meta_block) => { + for exec_artifact in fwd_meta_block.execution_results.iter() { + let event = event_stream_server::Event::TransactionProcessed { + transaction_hash: exec_artifact.transaction_hash, + transaction_header: Box::new(exec_artifact.transaction_header.clone()), + block_hash: *fwd_meta_block.block.hash(), + execution_result: Box::new(exec_artifact.execution_result.clone()), + messages: exec_artifact.messages.clone(), + }; + + effects.extend(reactor::wrap_effects( + MainEvent::EventStreamServer, + self.event_stream_server + .handle_event(effect_builder, rng, event), + )); + } + } + MetaBlock::Historical(historical_meta_block) => { + for (transaction_hash, transaction_header, execution_result) in + historical_meta_block.execution_results.iter() + { + let event = event_stream_server::Event::TransactionProcessed { + transaction_hash: *transaction_hash, + transaction_header: Box::new(transaction_header.clone()), + block_hash: *historical_meta_block.block.hash(), + execution_result: Box::new(execution_result.clone()), + messages: Vec::new(), + }; + effects.extend(reactor::wrap_effects( + MainEvent::EventStreamServer, + self.event_stream_server + .handle_event(effect_builder, rng, event), + )); + } + } + } + + debug!( + "MetaBlock: notifying shutdown watcher: {} {}", + meta_block.height(), + meta_block.hash(), + ); + effects.extend(reactor::wrap_effects( + MainEvent::ShutdownTrigger, + self.shutdown_trigger.handle_event( + effect_builder, + rng, + shutdown_trigger::Event::CompletedBlock(CompletedBlockInfo::new( + meta_block.height(), + meta_block.era_id(), + meta_block.is_switch_block(), + )), + ), + )); + + effects + } + + fn update_meta_block_gossip_state( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + block_hash: &BlockHash, + gossip_target: GossipTarget, + state: &mut MetaBlockState, + effects: &mut Effects, + ) { + if state.register_as_gossiped().was_updated() { + debug!( + "notifying block gossiper to start gossiping for: {}", + block_hash + ); + effects.extend(reactor::wrap_effects( + MainEvent::BlockGossiper, + self.block_gossiper.handle_event( + effect_builder, + rng, + gossiper::Event::ItemReceived { + item_id: *block_hash, + source: Source::Ourself, + target: gossip_target, + }, + ), + )); + } + } +} + +// TEST ENABLEMENT -- used by integration tests elsewhere +#[cfg(test)] +impl MainReactor { + pub(crate) fn consensus(&self) -> &EraSupervisor { + &self.consensus + } + + pub(crate) fn storage(&self) -> &Storage { + &self.storage + } + + pub(crate) fn contract_runtime(&self) -> &ContractRuntime { + &self.contract_runtime + } +} + +#[cfg(test)] +impl NetworkedReactor for MainReactor { + fn node_id(&self) -> crate::types::NodeId { + self.net.node_id() + } +} diff --git a/node/src/reactor/main_reactor/catch_up.rs b/node/src/reactor/main_reactor/catch_up.rs new file mode 100644 index 0000000000..f98134a566 --- /dev/null +++ b/node/src/reactor/main_reactor/catch_up.rs @@ -0,0 +1,429 @@ +use std::time::Duration; + +use either::Either; +use tracing::{debug, info, warn}; + +use casper_types::{ActivationPoint, BlockHash, TimeDiff, Timestamp}; + +use crate::{ + components::{ + block_accumulator::{SyncIdentifier, SyncInstruction}, + block_synchronizer::BlockSynchronizerProgress, + sync_leaper, + sync_leaper::{LeapActivityError, LeapState}, + ValidatorBoundComponent, + }, + effect::{requests::BlockSynchronizerRequest, EffectBuilder, EffectExt, Effects}, + reactor::{ + main_reactor::{MainEvent, MainReactor}, + wrap_effects, + }, + types::{NodeId, SyncLeap, SyncLeapIdentifier}, + NodeRng, +}; + +pub(super) enum CatchUpInstruction { + Do(Duration, Effects), + CheckLater(String, Duration), + Fatal(String), + ShutdownForUpgrade, + CaughtUp, + CommitGenesis, + CommitUpgrade, +} + +impl MainReactor { + pub(super) fn catch_up_instruction( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> CatchUpInstruction { + // if there is instruction, return to start working on it + // else fall thru with the current best available id for block syncing + let sync_identifier = match self.catch_up_process() { + Either::Right(catch_up_instruction) => return catch_up_instruction, + Either::Left(sync_identifier) => sync_identifier, + }; + debug!( + ?sync_identifier, + block_hash = %sync_identifier.block_hash(), + "CatchUp: sync identifier" + ); + // we check with the block accumulator before doing sync work as it may be aware of one or + // more blocks that are higher than our current highest block + let sync_instruction = self.block_accumulator.sync_instruction(sync_identifier); + debug!( + ?sync_instruction, + block_hash = %sync_instruction.block_hash(), + "CatchUp: sync_instruction" + ); + if let Some(catch_up_instruction) = + self.catch_up_sync_instruction(effect_builder, rng, sync_instruction) + { + // do necessary work to catch up + return catch_up_instruction; + } + // there are no catch up or shutdown instructions, so we must be caught up + CatchUpInstruction::CaughtUp + } + + fn catch_up_process(&mut self) -> Either { + let catch_up_progress = self.block_synchronizer.historical_progress(); + self.update_last_progress(&catch_up_progress, false); + match catch_up_progress { + BlockSynchronizerProgress::Idle => { + // not working on syncing a block (ready to start a new one) + match self.trusted_hash { + Some(trusted_hash) => self.catch_up_trusted_hash(trusted_hash), + None => self.catch_up_no_trusted_hash(), + } + } + BlockSynchronizerProgress::Syncing(block_hash, maybe_block_height, last_progress) => { + // working on syncing a block + self.catch_up_syncing(block_hash, maybe_block_height, last_progress) + } + BlockSynchronizerProgress::Executing(block_hash, _, _) => { + // this code path should be unreachable because we're not + // supposed to enqueue historical blocks for execution. + Either::Right(CatchUpInstruction::Fatal(format!( + "CatchUp: block synchronizer attempted to execute block: {}", + block_hash + ))) + } + BlockSynchronizerProgress::Synced(block_hash, block_height, era_id) => Either::Left( + // for a synced CatchUp block -> we have header, body, global state, any execution + // effects, any referenced deploys, & sufficient finality (by weight) of signatures + SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id), + ), + } + } + + fn catch_up_no_trusted_hash(&mut self) -> Either { + // no trusted hash provided, we will attempt to use local tip if available + match self.storage.get_highest_complete_block() { + Ok(Some(block)) => { + // this is typically a restart scenario; if a node stops and restarts + // quickly enough they can rejoin the network from their highest local block + // if too much time has passed, the node will shutdown and require a + // trusted block hash to be provided via the config file + info!("CatchUp: local tip detected, no trusted hash"); + Either::Left(SyncIdentifier::LocalTip( + *block.hash(), + block.height(), + block.era_id(), + )) + } + Ok(None) => { + match self + .storage + .read_highest_switch_block_headers(1) + .map(|headers| headers.first().cloned()) + { + Ok(Some(_)) => { + // no trusted hash, no local block, no error, must be waiting for genesis + info!("CatchUp: waiting to store genesis immediate switch block"); + Either::Right(CatchUpInstruction::CheckLater( + "waiting for genesis immediate switch block to be stored".to_string(), + self.control_logic_default_delay.into(), + )) + } + Ok(None) => { + // no trusted hash, no local block, might be genesis + self.catch_up_check_genesis() + } + Err(storage_err) => Either::Right(CatchUpInstruction::Fatal(format!( + "CatchUp: Could not read storage to find highest switch block header: {}", + storage_err + ))), + } + } + Err(err) => Either::Right(CatchUpInstruction::Fatal(format!( + "CatchUp: fatal block store error when attempting to read \ + highest complete block: {}", + err + ))), + } + } + + fn catch_up_check_genesis(&mut self) -> Either { + match self.chainspec.protocol_config.activation_point { + ActivationPoint::Genesis(timestamp) => { + // this bootstraps a network; it only occurs once ever on a given network but is + // very load-bearing as errors in this logic can prevent the network from coming + // into existence or surviving its initial existence. + + let now = Timestamp::now(); + let grace_period = timestamp.saturating_add(TimeDiff::from_seconds(180)); + if now > grace_period { + return Either::Right(CatchUpInstruction::Fatal( + "CatchUp: late for genesis; cannot proceed without trusted hash" + .to_string(), + )); + } + let time_remaining = timestamp.saturating_diff(now); + if time_remaining > TimeDiff::default() { + return Either::Right(CatchUpInstruction::CheckLater( + format!("waiting for genesis activation at {}", timestamp), + Duration::from(time_remaining), + )); + } + Either::Right(CatchUpInstruction::CommitGenesis) + } + ActivationPoint::EraId(_) => { + // no trusted hash, no local block, not genesis + Either::Right(CatchUpInstruction::Fatal( + "CatchUp: cannot proceed without trusted hash".to_string(), + )) + } + } + } + + fn catch_up_trusted_hash( + &mut self, + trusted_hash: BlockHash, + ) -> Either { + // if we have a configured trusted hash and we have the header for that block, + // use the higher block height of the local tip and the trusted header + match self.storage.read_block_header_by_hash(&trusted_hash) { + Ok(Some(trusted_header)) => { + match self.storage.get_highest_complete_block() { + Ok(Some(block)) => { + // leap w/ the higher of local tip or trusted hash + let trusted_height = trusted_header.height(); + if trusted_height > block.height() { + Either::Left(SyncIdentifier::BlockIdentifier( + trusted_hash, + trusted_height, + )) + } else { + Either::Left(SyncIdentifier::LocalTip( + *block.hash(), + block.height(), + block.era_id(), + )) + } + } + Ok(None) => Either::Left(SyncIdentifier::BlockHash(trusted_hash)), + Err(_) => Either::Right(CatchUpInstruction::Fatal( + "CatchUp: fatal block store error when attempting to \ + read highest complete block" + .to_string(), + )), + } + } + Ok(None) => { + // we do not have the header for the trusted hash. we may have local tip, + // but we start with the configured trusted hash in this scenario as it is + // necessary to allow a node to re-join if their local state is stale + Either::Left(SyncIdentifier::BlockHash(trusted_hash)) + } + Err(err) => Either::Right(CatchUpInstruction::Fatal(format!( + "CatchUp: fatal block store error when attempting to read \ + highest complete block: {}", + err + ))), + } + } + + fn catch_up_syncing( + &mut self, + block_hash: BlockHash, + maybe_block_height: Option, + last_progress: Timestamp, + ) -> Either { + // if we have not made progress on our attempt to catch up with the network, increment + // attempts counter and try again; the crank logic will shut the node down on the next + // crank if we've exceeded our reattempts + let idleness = Timestamp::now().saturating_diff(last_progress); + if idleness > self.idle_tolerance { + self.attempts += 1; + warn!( + %last_progress, + remaining_attempts = self.max_attempts.saturating_sub(self.attempts), + "CatchUp: idleness detected" + ); + } + match maybe_block_height { + None => Either::Left(SyncIdentifier::BlockHash(block_hash)), + Some(block_height) => { + Either::Left(SyncIdentifier::BlockIdentifier(block_hash, block_height)) + } + } + } + + fn catch_up_sync_instruction( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + sync_instruction: SyncInstruction, + ) -> Option { + match sync_instruction { + SyncInstruction::Leap { block_hash } + | SyncInstruction::LeapIntervalElapsed { block_hash } => { + Some(self.catch_up_leap(effect_builder, rng, block_hash)) + } + SyncInstruction::BlockSync { block_hash } => { + Some(self.catch_up_block_sync(effect_builder, block_hash)) + } + SyncInstruction::CaughtUp { .. } => self.catch_up_check_transition(), + } + } + + fn catch_up_leap( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + block_hash: BlockHash, + ) -> CatchUpInstruction { + // register block builder so that control logic can tell that block is Syncing, + // otherwise block_synchronizer detects as Idle which can cause unnecessary churn + // on subsequent cranks while leaper is awaiting responses. + self.block_synchronizer + .register_block_by_hash(block_hash, true); + let leap_status = self.sync_leaper.leap_status(); + info!(%block_hash, %leap_status, "CatchUp: status"); + match leap_status { + LeapState::Idle => self.catch_up_leaper_idle(effect_builder, rng, block_hash), + LeapState::Awaiting { .. } => CatchUpInstruction::CheckLater( + "sync leaper is awaiting response".to_string(), + self.control_logic_default_delay.into(), + ), + LeapState::Received { + best_available, + from_peers, + .. + } => self.catch_up_leap_received(effect_builder, rng, *best_available, from_peers), + LeapState::Failed { error, .. } => { + self.catch_up_leap_failed(effect_builder, rng, block_hash, error) + } + } + } + + fn catch_up_leap_failed( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + block_hash: BlockHash, + error: LeapActivityError, + ) -> CatchUpInstruction { + self.attempts += 1; + warn!( + %error, + remaining_attempts = %self.max_attempts.saturating_sub(self.attempts), + "CatchUp: failed leap", + ); + self.catch_up_leaper_idle(effect_builder, rng, block_hash) + } + + fn catch_up_leaper_idle( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + block_hash: BlockHash, + ) -> CatchUpInstruction { + // we get a random sampling of peers to ask. + let peers_to_ask = self.net.fully_connected_peers_random( + rng, + self.chainspec.core_config.simultaneous_peer_requests as usize, + ); + if peers_to_ask.is_empty() { + return CatchUpInstruction::CheckLater( + "no peers".to_string(), + self.chainspec.core_config.minimum_block_time.into(), + ); + } + + // latch accumulator progress to allow sync-leap time to do work + self.block_accumulator.reset_last_progress(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(block_hash); + let effects = effect_builder.immediately().event(move |_| { + MainEvent::SyncLeaper(sync_leaper::Event::AttemptLeap { + sync_leap_identifier, + peers_to_ask, + }) + }); + CatchUpInstruction::Do(self.control_logic_default_delay.into(), effects) + } + + fn catch_up_leap_received( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + sync_leap: SyncLeap, + from_peers: Vec, + ) -> CatchUpInstruction { + let block_hash = sync_leap.highest_block_hash(); + let block_height = sync_leap.highest_block_height(); + info!( + %sync_leap, + %block_height, + %block_hash, + "CatchUp: leap received" + ); + + for validator_weights in sync_leap.era_validator_weights( + self.validator_matrix.fault_tolerance_threshold(), + &self.chainspec.protocol_config, + ) { + self.validator_matrix + .register_era_validator_weights(validator_weights); + } + + let mut effects = Effects::new(); + + effects.extend(wrap_effects( + MainEvent::BlockAccumulator, + self.block_accumulator + .handle_validators(effect_builder, rng), + )); + + effects.extend(wrap_effects( + MainEvent::BlockSynchronizer, + self.block_synchronizer + .handle_validators(effect_builder, rng), + )); + + self.block_synchronizer + .register_sync_leap(&sync_leap, from_peers, true); + + CatchUpInstruction::Do(self.control_logic_default_delay.into(), effects) + } + + fn catch_up_block_sync( + &mut self, + effect_builder: EffectBuilder, + block_hash: BlockHash, + ) -> CatchUpInstruction { + if self + .block_synchronizer + .register_block_by_hash(block_hash, true) + { + // NeedNext will self perpetuate until nothing is needed for this block + let mut effects = Effects::new(); + effects.extend(effect_builder.immediately().event(|_| { + MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::NeedNext) + })); + CatchUpInstruction::Do(Duration::ZERO, effects) + } else { + CatchUpInstruction::CheckLater( + format!("block_synchronizer is currently working on {}", block_hash), + self.control_logic_default_delay.into(), + ) + } + } + + fn catch_up_check_transition(&mut self) -> Option { + // we may be starting back up after a shutdown for upgrade; if so we need to + // commit upgrade now before proceeding further + if self.should_commit_upgrade() { + return Some(CatchUpInstruction::CommitUpgrade); + } + // we may need to shutdown to go thru an upgrade + if self.should_shutdown_for_upgrade() { + Some(CatchUpInstruction::ShutdownForUpgrade) + } else { + None + } + } +} diff --git a/node/src/reactor/main_reactor/config.rs b/node/src/reactor/main_reactor/config.rs new file mode 100644 index 0000000000..510eddb48c --- /dev/null +++ b/node/src/reactor/main_reactor/config.rs @@ -0,0 +1,82 @@ +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +use tracing::error; + +use casper_types::Chainspec; + +use crate::{ + logging::LoggingConfig, types::NodeConfig, BinaryPortConfig, BlockAccumulatorConfig, + BlockSynchronizerConfig, BlockValidatorConfig, ConsensusConfig, ContractRuntimeConfig, + DiagnosticsPortConfig, EventStreamServerConfig, FetcherConfig, GossipConfig, NetworkConfig, + RestServerConfig, StorageConfig, TransactionAcceptorConfig, TransactionBufferConfig, + UpgradeWatcherConfig, +}; + +/// Root configuration. +#[derive(Clone, DataSize, Debug, Default, Serialize, Deserialize)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct Config { + /// Config values for the node. + pub node: NodeConfig, + /// Config values for logging. + pub logging: LoggingConfig, + /// Config values for consensus. + pub consensus: ConsensusConfig, + /// Config values for network. + pub network: NetworkConfig, + /// Config values for the event stream server. + pub event_stream_server: EventStreamServerConfig, + /// Config values for the REST server. + pub rest_server: RestServerConfig, + /// Config values for storage. + pub storage: StorageConfig, + /// Config values for gossip. + pub gossip: GossipConfig, + /// Config values for fetchers. + pub fetcher: FetcherConfig, + /// Config values for the contract runtime. + pub contract_runtime: ContractRuntimeConfig, + /// Config values for the transaction acceptor. + pub transaction_acceptor: TransactionAcceptorConfig, + /// Config values for the transaction buffer. + pub transaction_buffer: TransactionBufferConfig, + /// Config values for the diagnostics port. + pub diagnostics_port: DiagnosticsPortConfig, + /// Config values for the block accumulator. + pub block_accumulator: BlockAccumulatorConfig, + /// Config values for the block synchronizer. + pub block_synchronizer: BlockSynchronizerConfig, + /// Config values for the block validator. + pub block_validator: BlockValidatorConfig, + /// Config values for the upgrade watcher. + pub upgrade_watcher: UpgradeWatcherConfig, + /// Config values for the BinaryPort server. + pub binary_port_server: BinaryPortConfig, +} + +impl Config { + /// This modifies `self` so that all configured options are within the bounds set in the + /// provided chainspec. + pub(crate) fn ensure_valid(&mut self, chainspec: &Chainspec) { + if self.transaction_acceptor.timestamp_leeway + > chainspec.transaction_config.max_timestamp_leeway + { + error!( + configured_timestamp_leeway = %self.transaction_acceptor.timestamp_leeway, + max_timestamp_leeway = %chainspec.transaction_config.max_timestamp_leeway, + "setting value for 'transaction_acceptor.timestamp_leeway' to maximum permitted by \ + chainspec 'transaction_config.max_timestamp_leeway'", + ); + self.transaction_acceptor.timestamp_leeway = + chainspec.transaction_config.max_timestamp_leeway; + } + } + + /// Set network config. + #[cfg(test)] + pub(crate) fn with_network_config(mut self, network_config: NetworkConfig) -> Self { + self.network = network_config; + self + } +} diff --git a/node/src/reactor/main_reactor/control.rs b/node/src/reactor/main_reactor/control.rs new file mode 100644 index 0000000000..8de6a3394a --- /dev/null +++ b/node/src/reactor/main_reactor/control.rs @@ -0,0 +1,608 @@ +use std::time::Duration; +use tracing::{debug, error, info, trace}; + +use casper_storage::data_access_layer::GenesisResult; +use casper_types::{BlockHash, BlockHeader, Digest, EraId, PublicKey, Timestamp}; + +use crate::{ + components::{ + binary_port, + block_synchronizer::{self, BlockSynchronizerProgress}, + contract_runtime::ExecutionPreState, + diagnostics_port, event_stream_server, network, rest_server, upgrade_watcher, + }, + effect::{announcements::ControlAnnouncement, EffectBuilder, EffectExt, Effects}, + fatal, + reactor::main_reactor::{ + catch_up::CatchUpInstruction, genesis_instruction::GenesisInstruction, + keep_up::KeepUpInstruction, upgrade_shutdown::UpgradeShutdownInstruction, + upgrading_instruction::UpgradingInstruction, utils, validate::ValidateInstruction, + MainEvent, MainReactor, ReactorState, + }, + types::{BlockPayload, ExecutableBlock, FinalizedBlock, InternalEraReport, MetaBlockState}, + NodeRng, +}; + +impl MainReactor { + pub(super) fn crank( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Effects { + if self.attempts > self.max_attempts { + return fatal!(effect_builder, "exceeded reattempt tolerance").ignore(); + } + let (delay, mut effects) = self.do_crank(effect_builder, rng); + effects.extend( + async move { + if !delay.is_zero() { + tokio::time::sleep(delay).await; + } + } + .event(|_| MainEvent::ReactorCrank), + ); + effects + } + + fn do_crank( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> (Duration, Effects) { + const INITIALIZATION_DELAY_SPEED_UP_FACTOR: u64 = 4; + + match self.state { + ReactorState::Initialize => { + // We can be more greedy when cranking through the initialization process as the + // progress is expected to happen quickly. + let initialization_logic_default_delay = + self.control_logic_default_delay / INITIALIZATION_DELAY_SPEED_UP_FACTOR; + + match self.initialize_next_component(effect_builder) { + Some(effects) => (initialization_logic_default_delay.into(), effects), + None => { + if self.sync_handling.is_isolated() { + // If node is "isolated" it doesn't care about peers + if let Err(msg) = self.refresh_contract_runtime() { + return ( + Duration::ZERO, + fatal!(effect_builder, "{}", msg).ignore(), + ); + } + self.state = ReactorState::KeepUp; + return (Duration::ZERO, Effects::new()); + } + if false == self.net.has_sufficient_fully_connected_peers() { + info!("Initialize: awaiting sufficient fully-connected peers"); + return (initialization_logic_default_delay.into(), Effects::new()); + } + if let Err(msg) = self.refresh_contract_runtime() { + return (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()); + } + info!("Initialize: switch to CatchUp"); + self.state = ReactorState::CatchUp; + (Duration::ZERO, Effects::new()) + } + } + } + ReactorState::Upgrading => match self.upgrading_instruction() { + UpgradingInstruction::CheckLater(msg, wait) => { + debug!("Upgrading: {}", msg); + (wait, Effects::new()) + } + UpgradingInstruction::CatchUp => { + info!("Upgrading: switch to CatchUp"); + self.state = ReactorState::CatchUp; + (Duration::ZERO, Effects::new()) + } + }, + ReactorState::CatchUp => match self.catch_up_instruction(effect_builder, rng) { + CatchUpInstruction::Fatal(msg) => { + (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()) + } + CatchUpInstruction::ShutdownForUpgrade => { + info!("CatchUp: shutting down for upgrade"); + self.switch_to_shutdown_for_upgrade(); + (Duration::ZERO, Effects::new()) + } + CatchUpInstruction::CommitGenesis => match self.commit_genesis(effect_builder) { + GenesisInstruction::Validator(duration, effects) => { + info!("CatchUp: switch to Validate at genesis"); + self.block_synchronizer.purge(); + self.state = ReactorState::Validate; + (duration, effects) + } + GenesisInstruction::NonValidator(duration, effects) => { + info!("CatchUp: non-validator committed genesis"); + self.state = ReactorState::CatchUp; + (duration, effects) + } + GenesisInstruction::Fatal(msg) => ( + Duration::ZERO, + fatal!(effect_builder, "failed to commit genesis: {}", msg).ignore(), + ), + }, + CatchUpInstruction::CommitUpgrade => match self.commit_upgrade(effect_builder) { + Ok(effects) => { + info!("CatchUp: switch to Upgrading"); + self.block_synchronizer.purge(); + self.state = ReactorState::Upgrading; + self.last_progress = Timestamp::now(); + self.attempts = 0; + (Duration::ZERO, effects) + } + Err(msg) => ( + Duration::ZERO, + fatal!(effect_builder, "failed to commit upgrade: {}", msg).ignore(), + ), + }, + CatchUpInstruction::CheckLater(msg, wait) => { + debug!("CatchUp: {}", msg); + (wait, Effects::new()) + } + CatchUpInstruction::Do(wait, effects) => { + debug!("CatchUp: node is processing effects"); + (wait, effects) + } + CatchUpInstruction::CaughtUp => { + if let Err(msg) = self.refresh_contract_runtime() { + return (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()); + } + // shut down instead of switching to KeepUp if catch up and shutdown mode is + // enabled + if self.sync_handling.is_complete_block() { + info!("CatchUp: immediate shutdown after catching up"); + self.state = ReactorState::ShutdownAfterCatchingUp; + (Duration::ZERO, Effects::new()) + } else { + // purge to avoid polluting the status endpoints w/ stale state + info!("CatchUp: switch to KeepUp"); + self.block_synchronizer.purge(); + self.state = ReactorState::KeepUp; + (Duration::ZERO, Effects::new()) + } + } + }, + ReactorState::KeepUp => match self.keep_up_instruction(effect_builder, rng) { + KeepUpInstruction::Fatal(msg) => { + (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()) + } + KeepUpInstruction::ShutdownForUpgrade => { + info!("KeepUp: switch to ShutdownForUpgrade"); + self.switch_to_shutdown_for_upgrade(); + (Duration::ZERO, Effects::new()) + } + KeepUpInstruction::CheckLater(msg, wait) => { + debug!("KeepUp: {}", msg); + (wait, Effects::new()) + } + KeepUpInstruction::Do(wait, effects) => { + debug!("KeepUp: node is processing effects"); + (wait, effects) + } + KeepUpInstruction::CatchUp => { + self.block_synchronizer.purge(); + self.sync_leaper.purge(); + info!("KeepUp: switch to CatchUp"); + self.state = ReactorState::CatchUp; + (Duration::ZERO, Effects::new()) + } + KeepUpInstruction::Validate(effects) => { + info!("KeepUp: switch to Validate"); + // purge to avoid polluting the status endpoints w/ stale state + self.block_synchronizer.purge(); + self.state = ReactorState::Validate; + (Duration::ZERO, effects) + } + }, + ReactorState::Validate => match self.validate_instruction(effect_builder, rng) { + ValidateInstruction::Fatal(msg) => { + (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()) + } + ValidateInstruction::ShutdownForUpgrade => { + info!("Validate: switch to ShutdownForUpgrade"); + self.switch_to_shutdown_for_upgrade(); + (Duration::ZERO, Effects::new()) + } + ValidateInstruction::CheckLater(msg, wait) => { + debug!("Validate: {}", msg); + (wait, Effects::new()) + } + ValidateInstruction::Do(wait, effects) => { + trace!("Validate: node is processing effects"); + (wait, effects) + } + ValidateInstruction::CatchUp => match self.deactivate_consensus_voting() { + Ok(_) => { + info!("Validate: switch to CatchUp"); + self.state = ReactorState::CatchUp; + (Duration::ZERO, Effects::new()) + } + Err(msg) => (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()), + }, + ValidateInstruction::KeepUp => match self.deactivate_consensus_voting() { + Ok(_) => { + info!("Validate: switch to KeepUp"); + self.state = ReactorState::KeepUp; + (Duration::ZERO, Effects::new()) + } + Err(msg) => (Duration::ZERO, fatal!(effect_builder, "{}", msg).ignore()), + }, + }, + ReactorState::ShutdownForUpgrade => { + match self.upgrade_shutdown_instruction(effect_builder) { + UpgradeShutdownInstruction::Fatal(msg) => ( + Duration::ZERO, + fatal!(effect_builder, "ShutdownForUpgrade: {}", msg).ignore(), + ), + UpgradeShutdownInstruction::CheckLater(msg, wait) => { + debug!("ShutdownForUpgrade: {}", msg); + (wait, Effects::new()) + } + UpgradeShutdownInstruction::Do(wait, effects) => { + trace!("ShutdownForUpgrade: node is processing effects"); + (wait, effects) + } + } + } + ReactorState::ShutdownAfterCatchingUp => { + let effects = effect_builder.immediately().event(|()| { + MainEvent::ControlAnnouncement(ControlAnnouncement::ShutdownAfterCatchingUp) + }); + (Duration::ZERO, effects) + } + } + } + + // NOTE: the order in which components are initialized is purposeful, + // so don't alter the order without understanding the semantics + fn initialize_next_component( + &mut self, + effect_builder: EffectBuilder, + ) -> Option> { + // open the diagnostic port first to make sure it can bind & to be responsive during init. + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.diagnostics_port, + MainEvent::DiagnosticsPort(diagnostics_port::Event::Initialize), + ) { + return Some(effects); + } + // init event stream to make sure it can bind & allow early client connection + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.event_stream_server, + MainEvent::EventStreamServer(event_stream_server::Event::Initialize), + ) { + return Some(effects); + } + // init upgrade watcher to make sure we have file access & to observe possible upgrade + // this should be init'd before the rest & rpc servers as the status endpoints include + // detected upgrade info. + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.upgrade_watcher, + MainEvent::UpgradeWatcher(upgrade_watcher::Event::Initialize), + ) { + return Some(effects); + } + + // initialize transaction buffer from local storage; on a new node this is nearly a noop + // but on a restarting node it can be relatively time consuming (depending upon TTL and + // how many transactions there have been within the TTL) + if let Some(effects) = self + .transaction_buffer + .initialize_component(effect_builder, &self.storage) + { + return Some(effects); + } + + // bring up networking near-to-last to avoid unnecessary premature connectivity + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.net, + MainEvent::Network(network::Event::Initialize), + ) { + return Some(effects); + } + + // bring up the BlockSynchronizer after Network to start it's self-perpetuating + // dishonest peer announcing behavior + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.block_synchronizer, + MainEvent::BlockSynchronizer(block_synchronizer::Event::Initialize), + ) { + return Some(effects); + } + + // bring up rpc and rest server last to defer complications (such as put_transaction) and + // for it to be able to answer to /status, which requires various other components to be + // initialized + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.rest_server, + MainEvent::RestServer(rest_server::Event::Initialize), + ) { + return Some(effects); + } + + // bring up binary port + if let Some(effects) = utils::initialize_component( + effect_builder, + &mut self.binary_port, + MainEvent::BinaryPort(binary_port::Event::Initialize), + ) { + return Some(effects); + } + + None + } + + fn commit_genesis(&mut self, effect_builder: EffectBuilder) -> GenesisInstruction { + let genesis_timestamp = match self + .chainspec + .protocol_config + .activation_point + .genesis_timestamp() + { + None => { + return GenesisInstruction::Fatal( + "CommitGenesis: invalid chainspec activation point".to_string(), + ); + } + Some(timestamp) => timestamp, + }; + + // global state starts empty and gets populated based upon chainspec artifacts + let post_state_hash = match self.contract_runtime.commit_genesis( + self.chainspec.clone().as_ref(), + self.chainspec_raw_bytes.clone().as_ref(), + ) { + GenesisResult::Fatal(msg) => { + return GenesisInstruction::Fatal(msg); + } + GenesisResult::Failure(err) => { + return GenesisInstruction::Fatal(format!("genesis error: {}", err)); + } + GenesisResult::Success { + post_state_hash, .. + } => post_state_hash, + }; + + info!( + %post_state_hash, + %genesis_timestamp, + network_name = %self.chainspec.network_config.name, + "CommitGenesis: successful commit; initializing contract runtime" + ); + + let genesis_block_height = 0; + self.initialize_contract_runtime( + genesis_block_height, + post_state_hash, + BlockHash::default(), + Digest::default(), + ); + + let era_id = EraId::default(); + + // as this is a genesis validator, there is no historical syncing necessary + // thus, the retrograde latch is immediately set + self.validator_matrix + .register_retrograde_latch(Some(era_id)); + + // new networks will create a switch block at genesis to + // surface the genesis validators. older networks did not + // have this behavior. + let genesis_switch_block = FinalizedBlock::new( + BlockPayload::default(), + Some(InternalEraReport::default()), + genesis_timestamp, + era_id, + genesis_block_height, + PublicKey::System, + ); + + // this genesis block has no transactions, and will get + // handed off to be stored & marked complete after + // sufficient finality signatures have been collected. + let effects = effect_builder + .enqueue_block_for_execution( + ExecutableBlock::from_finalized_block_and_transactions( + genesis_switch_block, + vec![], + ), + MetaBlockState::new_not_to_be_gossiped(), + ) + .ignore(); + + if self + .chainspec + .network_config + .accounts_config + .is_genesis_validator(self.validator_matrix.public_signing_key()) + { + // validators should switch over and start making blocks + GenesisInstruction::Validator(Duration::ZERO, effects) + } else { + // non-validators should start receiving gossip about the block at height 1 soon + GenesisInstruction::NonValidator(self.control_logic_default_delay.into(), effects) + } + } + + fn upgrading_instruction(&self) -> UpgradingInstruction { + UpgradingInstruction::should_commit_upgrade( + self.should_commit_upgrade(), + self.control_logic_default_delay.into(), + self.last_progress, + self.upgrade_timeout, + ) + } + + fn commit_upgrade( + &mut self, + effect_builder: EffectBuilder, + ) -> Result, String> { + let header = match self.get_local_tip_header()? { + Some(header) if header.is_switch_block() => header, + Some(_) => { + return Err("Latest complete block is not a switch block".to_string()); + } + None => { + return Err("No complete block found in storage".to_string()); + } + }; + + match self.chainspec.upgrade_config_from_parts( + *header.state_root_hash(), + header.protocol_version(), + self.chainspec.protocol_config.activation_point.era_id(), + self.chainspec_raw_bytes.clone(), + ) { + Ok(cfg) => { + let mut effects = Effects::new(); + let next_block_height = header.height() + 1; + effects.extend( + effect_builder + .enqueue_protocol_upgrade( + cfg, + next_block_height, + header.block_hash(), + *header.accumulated_seed(), + ) + .ignore(), + ); + Ok(effects) + } + Err(msg) => Err(msg), + } + } + + pub(super) fn should_shutdown_for_upgrade(&self) -> bool { + let recent_switch_block_headers = match self.storage.read_highest_switch_block_headers(1) { + Ok(headers) => headers, + Err(error) => { + error!( + "{:?}: error getting recent switch block headers: {}", + self.state, error + ); + return false; + } + }; + + if let Some(block_header) = recent_switch_block_headers.last() { + let highest_block_complete = + self.storage.highest_complete_block_height() == Some(block_header.height()); + return highest_block_complete + && self + .upgrade_watcher + .should_upgrade_after(block_header.era_id()); + } + false + } + + pub(super) fn should_commit_upgrade(&self) -> bool { + match self.get_local_tip_header() { + Ok(Some(block_header)) if block_header.is_switch_block() => { + block_header.is_last_block_before_activation(&self.chainspec.protocol_config) + } + Ok(Some(_) | None) => false, + Err(msg) => { + error!("{:?}: {}", self.state, msg); + false + } + } + } + + fn refresh_contract_runtime(&mut self) -> Result<(), String> { + if let Some(block_header) = self.get_local_tip_header()? { + let block_height = block_header.height(); + let state_root_hash = block_header.state_root_hash(); + let block_hash = block_header.block_hash(); + let accumulated_seed = *block_header.accumulated_seed(); + self.initialize_contract_runtime( + block_height + 1, + *state_root_hash, + block_hash, + accumulated_seed, + ); + } + Ok(()) + } + + fn initialize_contract_runtime( + &mut self, + next_block_height: u64, + pre_state_root_hash: Digest, + parent_hash: BlockHash, + parent_seed: Digest, + ) { + // a better approach might be to have an announcement for immediate switch block + // creation, which the contract runtime handles and sets itself into + // the proper state to handle the unexpected block. + // in the meantime, this is expedient. + let initial_pre_state = ExecutionPreState::new( + next_block_height, + pre_state_root_hash, + parent_hash, + parent_seed, + ); + self.contract_runtime.set_initial_state(initial_pre_state); + } + + pub(super) fn update_last_progress( + &mut self, + block_synchronizer_progress: &BlockSynchronizerProgress, + is_sync_back: bool, + ) { + if let BlockSynchronizerProgress::Syncing(_, _, last_progress) = block_synchronizer_progress + { + // do idleness / reattempt checking + let sync_progress = *last_progress; + if sync_progress > self.last_progress { + self.last_progress = sync_progress; + // if any progress has been made, reset attempts + self.attempts = 0; + let state = if is_sync_back { + "Historical".to_string() + } else { + format!("{}", self.state) + }; + debug!( + "{}: last_progress: {} {}", + state, self.last_progress, block_synchronizer_progress + ); + } + if self.last_progress.elapsed() > self.idle_tolerance { + self.attempts += 1; + } + } + } + + fn deactivate_consensus_voting(&mut self) -> Result<(), String> { + let deactivated_era_id = self.consensus.deactivate_current_era()?; + info!( + era_id = %deactivated_era_id, + "{:?}: consensus deactivated", + self.state + ); + Ok(()) + } + + fn switch_to_shutdown_for_upgrade(&mut self) { + self.state = ReactorState::ShutdownForUpgrade; + self.switched_to_shutdown_for_upgrade = Timestamp::now(); + } + + fn get_local_tip_header(&self) -> Result, String> { + match self + .storage + .get_highest_complete_block() + .map_err(|err| format!("Could not read highest complete block: {}", err))? + { + Some(local_tip) => Ok(Some(local_tip.take_header())), + None => Ok(None), + } + } +} diff --git a/node/src/reactor/main_reactor/error.rs b/node/src/reactor/main_reactor/error.rs new file mode 100644 index 0000000000..86a58a1a5b --- /dev/null +++ b/node/src/reactor/main_reactor/error.rs @@ -0,0 +1,75 @@ +use thiserror::Error; + +use casper_execution_engine::engine_state; +use casper_types::{bytesrepr, crypto::ErrorExt as CryptoError}; + +use crate::{ + components::{ + binary_port::BinaryPortInitializationError, + contract_runtime::{self, BlockExecutionError}, + diagnostics_port, network, storage, upgrade_watcher, + }, + utils::{ListeningError, LoadError}, +}; + +/// Error type returned by the validator reactor. +#[derive(Debug, Error)] +pub(crate) enum Error { + /// `UpgradeWatcher` component error. + #[error("upgrade watcher error: {0}")] + UpgradeWatcher(#[from] upgrade_watcher::Error), + + /// Metrics-related error + #[error("prometheus (metrics) error: {0}")] + Metrics(#[from] prometheus::Error), + + /// `Network` component error. + #[error("network error: {0}")] + Network(#[from] network::Error), + + /// An error starting one of the HTTP servers. + #[error("http server listening error: {0}")] + HttpServerListening(#[from] ListeningError), + + /// `Storage` component error. + #[error("storage error: {0}")] + Storage(#[from] storage::FatalStorageError), + + /// `Consensus` component error. + #[error("consensus error: {0}")] + Consensus(#[from] anyhow::Error), + + /// `ContractRuntime` component error. + #[error("contract runtime config error: {0}")] + ContractRuntime(#[from] contract_runtime::ConfigError), + + /// Block execution error. + #[error(transparent)] + BlockExecution(#[from] BlockExecutionError), + + /// Engine state error. + #[error(transparent)] + EngineState(#[from] engine_state::Error), + + /// [`bytesrepr`] error. + #[error("bytesrepr error: {0}")] + BytesRepr(bytesrepr::Error), + + /// `DiagnosticsPort` component error. + #[error("diagnostics port: {0}")] + DiagnosticsPort(#[from] diagnostics_port::Error), + + /// Error while loading the signing key pair. + #[error("signing key pair load error: {0}")] + LoadSigningKeyPair(#[from] LoadError), + + /// `BinaryPort` component error. + #[error("binary port: {0}")] + BinaryPort(#[from] BinaryPortInitializationError), +} + +impl From for Error { + fn from(err: bytesrepr::Error) -> Self { + Self::BytesRepr(err) + } +} diff --git a/node/src/reactor/main_reactor/event.rs b/node/src/reactor/main_reactor/event.rs new file mode 100644 index 0000000000..52ec2cd6b8 --- /dev/null +++ b/node/src/reactor/main_reactor/event.rs @@ -0,0 +1,621 @@ +use std::fmt::{self, Debug, Display, Formatter}; + +use derive_more::From; +use serde::Serialize; + +use casper_types::{ + system::auction::EraValidators, Block, BlockHeader, BlockV2, EraId, FinalitySignature, + FinalitySignatureV2, Transaction, +}; + +use crate::{ + components::{ + binary_port, block_accumulator, + block_synchronizer::{self, GlobalStateSynchronizerEvent, TrieAccumulatorEvent}, + block_validator, consensus, contract_runtime, diagnostics_port, event_stream_server, + fetcher, gossiper, + network::{self, GossipedAddress}, + rest_server, shutdown_trigger, storage, sync_leaper, transaction_acceptor, + transaction_buffer, upgrade_watcher, + }, + effect::{ + announcements::{ + BlockAccumulatorAnnouncement, ConsensusAnnouncement, ContractRuntimeAnnouncement, + ControlAnnouncement, FatalAnnouncement, FetchedNewBlockAnnouncement, + FetchedNewFinalitySignatureAnnouncement, GossiperAnnouncement, MetaBlockAnnouncement, + PeerBehaviorAnnouncement, TransactionAcceptorAnnouncement, + TransactionBufferAnnouncement, UnexecutedBlockAnnouncement, UpgradeWatcherAnnouncement, + }, + diagnostics_port::DumpConsensusStateRequest, + incoming::{ + ConsensusDemand, ConsensusMessageIncoming, FinalitySignatureIncoming, GossiperIncoming, + NetRequestIncoming, NetResponseIncoming, TrieDemand, TrieRequestIncoming, + TrieResponseIncoming, + }, + requests::{ + AcceptTransactionRequest, BeginGossipRequest, BlockAccumulatorRequest, + BlockSynchronizerRequest, BlockValidationRequest, ChainspecRawBytesRequest, + ConsensusRequest, ContractRuntimeRequest, FetcherRequest, MakeBlockExecutableRequest, + MarkBlockCompletedRequest, MetricsRequest, NetworkInfoRequest, NetworkRequest, + ReactorInfoRequest, RestRequest, SetNodeStopRequest, StorageRequest, + SyncGlobalStateRequest, TransactionBufferRequest, TrieAccumulatorRequest, + UpgradeWatcherRequest, + }, + }, + protocol::Message, + reactor::ReactorEvent, + types::{BlockExecutionResultsOrChunk, LegacyDeploy, SyncLeap, TrieOrChunk}, +}; +use casper_storage::block_store::types::ApprovalsHashes; + +// Enforce an upper bound for the `MainEvent` size, which is already quite hefty. +// 192 is six 256 bit copies, ideally we'd be below, but for now we enforce this as an upper limit. +// 200 is where the `large_enum_variant` clippy lint draws the line as well. +const _MAIN_EVENT_SIZE: usize = size_of::(); +//const_assert!(_MAIN_EVENT_SIZE <= 192); + +/// Top-level event for the reactor. +#[derive(Debug, From, Serialize)] +#[must_use] +pub(crate) enum MainEvent { + #[from] + ControlAnnouncement(ControlAnnouncement), + #[from] + FatalAnnouncement(FatalAnnouncement), + + /// Check the status of the reactor, should only be raised by the reactor itself + ReactorCrank, + + #[from] + UpgradeWatcher(#[serde(skip_serializing)] upgrade_watcher::Event), + #[from] + UpgradeWatcherRequest(#[serde(skip_serializing)] UpgradeWatcherRequest), + #[from] + UpgradeWatcherAnnouncement(#[serde(skip_serializing)] UpgradeWatcherAnnouncement), + #[from] + BinaryPort(#[serde(skip_serializing)] binary_port::Event), + #[from] + RestServer(#[serde(skip_serializing)] rest_server::Event), + #[from] + MetricsRequest(#[serde(skip_serializing)] MetricsRequest), + #[from] + ChainspecRawBytesRequest(#[serde(skip_serializing)] ChainspecRawBytesRequest), + #[from] + EventStreamServer(#[serde(skip_serializing)] event_stream_server::Event), + #[from] + ShutdownTrigger(shutdown_trigger::Event), + #[from] + DiagnosticsPort(diagnostics_port::Event), + #[from] + DumpConsensusStateRequest(DumpConsensusStateRequest), + #[from] + Network(network::Event), + #[from] + NetworkRequest(#[serde(skip_serializing)] NetworkRequest), + #[from] + NetworkInfoRequest(#[serde(skip_serializing)] NetworkInfoRequest), + #[from] + NetworkPeerBehaviorAnnouncement(PeerBehaviorAnnouncement), + #[from] + NetworkPeerRequestingData(NetRequestIncoming), + #[from] + NetworkPeerProvidingData(NetResponseIncoming), + #[from] + AddressGossiper(gossiper::Event), + #[from] + AddressGossiperCrank(BeginGossipRequest), + #[from] + AddressGossiperIncoming(GossiperIncoming), + #[from] + AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), + #[from] + SyncLeaper(sync_leaper::Event), + #[from] + SyncLeapFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + SyncLeapFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + Consensus(#[serde(skip_serializing)] consensus::Event), + #[from] + ConsensusMessageIncoming(ConsensusMessageIncoming), + #[from] + ConsensusDemand(ConsensusDemand), + #[from] + ConsensusAnnouncement(#[serde(skip_serializing)] ConsensusAnnouncement), + #[from] + BlockHeaderFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + BlockHeaderFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + BlockValidator(#[serde(skip_serializing)] block_validator::Event), + #[from] + BlockValidatorRequest(#[serde(skip_serializing)] BlockValidationRequest), + #[from] + BlockAccumulator(#[serde(skip_serializing)] block_accumulator::Event), + #[from] + BlockAccumulatorRequest(#[serde(skip_serializing)] BlockAccumulatorRequest), + #[from] + BlockAccumulatorAnnouncement(#[serde(skip_serializing)] BlockAccumulatorAnnouncement), + #[from] + BlockSynchronizer(#[serde(skip_serializing)] block_synchronizer::Event), + #[from] + BlockSynchronizerRequest(#[serde(skip_serializing)] BlockSynchronizerRequest), + + #[from] + ApprovalsHashesFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + ApprovalsHashesFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + + #[from] + BlockGossiper(#[serde(skip_serializing)] gossiper::Event), + #[from] + BlockGossiperIncoming(GossiperIncoming), + #[from] + BlockGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), + #[from] + BlockFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + BlockFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + BlockFetcherAnnouncement(#[serde(skip_serializing)] FetchedNewBlockAnnouncement), + #[from] + MakeBlockExecutableRequest(MakeBlockExecutableRequest), + #[from] + MarkBlockCompletedRequest(MarkBlockCompletedRequest), + #[from] + FinalitySignatureIncoming(FinalitySignatureIncoming), + #[from] + FinalitySignatureGossiper(#[serde(skip_serializing)] gossiper::Event), + #[from] + FinalitySignatureGossiperIncoming(GossiperIncoming), + #[from] + FinalitySignatureGossiperAnnouncement( + #[serde(skip_serializing)] GossiperAnnouncement, + ), + #[from] + FinalitySignatureFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + FinalitySignatureFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + FinalitySignatureFetcherAnnouncement( + #[serde(skip_serializing)] FetchedNewFinalitySignatureAnnouncement, + ), + #[from] + TransactionAcceptor(#[serde(skip_serializing)] transaction_acceptor::Event), + #[from] + AcceptTransactionRequest(AcceptTransactionRequest), + #[from] + TransactionAcceptorAnnouncement(#[serde(skip_serializing)] TransactionAcceptorAnnouncement), + #[from] + TransactionGossiper(#[serde(skip_serializing)] gossiper::Event), + #[from] + TransactionGossiperIncoming(GossiperIncoming), + #[from] + TransactionGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), + #[from] + TransactionBuffer(#[serde(skip_serializing)] transaction_buffer::Event), + #[from] + TransactionBufferAnnouncement(#[serde(skip_serializing)] TransactionBufferAnnouncement), + #[from] + LegacyDeployFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + LegacyDeployFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + TransactionFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + TransactionFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + TransactionBufferRequest(TransactionBufferRequest), + #[from] + ContractRuntime(contract_runtime::Event), + #[from] + ContractRuntimeRequest(ContractRuntimeRequest), + #[from] + ContractRuntimeAnnouncement(#[serde(skip_serializing)] ContractRuntimeAnnouncement), + #[from] + TrieOrChunkFetcher(#[serde(skip_serializing)] fetcher::Event), + #[from] + TrieOrChunkFetcherRequest(#[serde(skip_serializing)] FetcherRequest), + #[from] + BlockExecutionResultsOrChunkFetcher( + #[serde(skip_serializing)] fetcher::Event, + ), + #[from] + BlockExecutionResultsOrChunkFetcherRequest( + #[serde(skip_serializing)] FetcherRequest, + ), + #[from] + TrieRequestIncoming(TrieRequestIncoming), + #[from] + TrieDemand(TrieDemand), + #[from] + TrieResponseIncoming(TrieResponseIncoming), + #[from] + Storage(storage::Event), + #[from] + StorageRequest(StorageRequest), + #[from] + SetNodeStopRequest(SetNodeStopRequest), + #[from] + MainReactorRequest(ReactorInfoRequest), + #[from] + MetaBlockAnnouncement(MetaBlockAnnouncement), + #[from] + UnexecutedBlockAnnouncement(UnexecutedBlockAnnouncement), + + // Event related to figuring out validators for blocks after upgrades. + GotBlockAfterUpgradeEraValidators(EraId, EraValidators, EraValidators), +} + +impl ReactorEvent for MainEvent { + fn is_control(&self) -> bool { + matches!(self, MainEvent::ControlAnnouncement(_)) + } + + fn try_into_control(self) -> Option { + if let Self::ControlAnnouncement(ctrl_ann) = self { + Some(ctrl_ann) + } else { + None + } + } + + #[inline] + fn description(&self) -> &'static str { + match self { + MainEvent::ReactorCrank => "ReactorCrank", + MainEvent::Network(_) => "Network", + MainEvent::SyncLeaper(_) => "SyncLeaper", + MainEvent::TransactionBuffer(_) => "TransactionBuffer", + MainEvent::Storage(_) => "Storage", + MainEvent::RestServer(_) => "RestServer", + MainEvent::EventStreamServer(_) => "EventStreamServer", + MainEvent::UpgradeWatcher(_) => "UpgradeWatcher", + MainEvent::Consensus(_) => "Consensus", + MainEvent::TransactionAcceptor(_) => "TransactionAcceptor", + MainEvent::AcceptTransactionRequest(_) => "AcceptTransactionRequest", + MainEvent::LegacyDeployFetcher(_) => "LegacyDeployFetcher", + MainEvent::TransactionFetcher(_) => "TransactionFetcher", + MainEvent::TransactionGossiper(_) => "TransactionGossiper", + MainEvent::FinalitySignatureGossiper(_) => "FinalitySignatureGossiper", + MainEvent::AddressGossiper(_) => "AddressGossiper", + MainEvent::BlockValidator(_) => "BlockValidator", + MainEvent::ContractRuntimeRequest(_) => "ContractRuntimeRequest", + MainEvent::BlockHeaderFetcher(_) => "BlockHeaderFetcher", + MainEvent::TrieOrChunkFetcher(_) => "TrieOrChunkFetcher", + MainEvent::BlockExecutionResultsOrChunkFetcher(_) => { + "BlockExecutionResultsOrChunkFetcher" + } + MainEvent::FinalitySignatureFetcher(_) => "FinalitySignatureFetcher", + MainEvent::SyncLeapFetcher(_) => "SyncLeapFetcher", + MainEvent::ApprovalsHashesFetcher(_) => "ApprovalsHashesFetcher", + MainEvent::ShutdownTrigger(_) => "ShutdownTrigger", + MainEvent::DiagnosticsPort(_) => "DiagnosticsPort", + MainEvent::NetworkRequest(_) => "NetworkRequest", + MainEvent::NetworkInfoRequest(_) => "NetworkInfoRequest", + MainEvent::BlockHeaderFetcherRequest(_) => "BlockHeaderFetcherRequest", + MainEvent::TrieOrChunkFetcherRequest(_) => "TrieOrChunkFetcherRequest", + MainEvent::BlockExecutionResultsOrChunkFetcherRequest(_) => { + "BlockExecutionResultsOrChunkFetcherRequest" + } + MainEvent::LegacyDeployFetcherRequest(_) => "LegacyDeployFetcherRequest", + MainEvent::TransactionFetcherRequest(_) => "TransactionFetcherRequest", + MainEvent::FinalitySignatureFetcherRequest(_) => "FinalitySignatureFetcherRequest", + MainEvent::SyncLeapFetcherRequest(_) => "SyncLeapFetcherRequest", + MainEvent::ApprovalsHashesFetcherRequest(_) => "ApprovalsHashesFetcherRequest", + MainEvent::TransactionBufferRequest(_) => "TransactionBufferRequest", + MainEvent::BlockValidatorRequest(_) => "BlockValidatorRequest", + MainEvent::MetricsRequest(_) => "MetricsRequest", + MainEvent::ChainspecRawBytesRequest(_) => "ChainspecRawBytesRequest", + MainEvent::UpgradeWatcherRequest(_) => "UpgradeWatcherRequest", + MainEvent::StorageRequest(_) => "StorageRequest", + MainEvent::MarkBlockCompletedRequest(_) => "MarkBlockCompletedRequest", + MainEvent::DumpConsensusStateRequest(_) => "DumpConsensusStateRequest", + MainEvent::ControlAnnouncement(_) => "ControlAnnouncement", + MainEvent::FatalAnnouncement(_) => "FatalAnnouncement", + MainEvent::TransactionAcceptorAnnouncement(_) => "TransactionAcceptorAnnouncement", + MainEvent::ConsensusAnnouncement(_) => "ConsensusAnnouncement", + MainEvent::ContractRuntimeAnnouncement(_) => "ContractRuntimeAnnouncement", + MainEvent::TransactionGossiperAnnouncement(_) => "TransactionGossiperAnnouncement", + MainEvent::AddressGossiperAnnouncement(_) => "AddressGossiperAnnouncement", + MainEvent::UpgradeWatcherAnnouncement(_) => "UpgradeWatcherAnnouncement", + MainEvent::NetworkPeerBehaviorAnnouncement(_) => "BlocklistAnnouncement", + MainEvent::TransactionBufferAnnouncement(_) => "TransactionBufferAnnouncement", + MainEvent::FinalitySignatureFetcherAnnouncement(_) => { + "FinalitySignatureFetcherAnnouncement" + } + MainEvent::AddressGossiperCrank(_) => "BeginAddressGossipRequest", + MainEvent::ConsensusMessageIncoming(_) => "ConsensusMessageIncoming", + MainEvent::ConsensusDemand(_) => "ConsensusDemand", + MainEvent::TransactionGossiperIncoming(_) => "TransactionGossiperIncoming", + MainEvent::FinalitySignatureGossiperIncoming(_) => "FinalitySignatureGossiperIncoming", + MainEvent::AddressGossiperIncoming(_) => "AddressGossiperIncoming", + MainEvent::NetworkPeerRequestingData(_) => "NetRequestIncoming", + MainEvent::NetworkPeerProvidingData(_) => "NetResponseIncoming", + MainEvent::TrieRequestIncoming(_) => "TrieRequestIncoming", + MainEvent::TrieDemand(_) => "TrieDemand", + MainEvent::TrieResponseIncoming(_) => "TrieResponseIncoming", + MainEvent::FinalitySignatureIncoming(_) => "FinalitySignatureIncoming", + MainEvent::ContractRuntime(_) => "ContractRuntime", + MainEvent::FinalitySignatureGossiperAnnouncement(_) => { + "FinalitySignatureGossiperAnnouncement" + } + MainEvent::BlockAccumulator(_) => "BlockAccumulator", + MainEvent::BlockAccumulatorRequest(_) => "BlockAccumulatorRequest", + MainEvent::BlockAccumulatorAnnouncement(_) => "BlockAccumulatorAnnouncement", + MainEvent::BlockSynchronizer(_) => "BlockSynchronizer", + MainEvent::BlockSynchronizerRequest(_) => "BlockSynchronizerRequest", + MainEvent::BlockGossiper(_) => "BlockGossiper", + MainEvent::BlockGossiperIncoming(_) => "BlockGossiperIncoming", + MainEvent::BlockGossiperAnnouncement(_) => "BlockGossiperAnnouncement", + MainEvent::BlockFetcher(_) => "BlockFetcher", + MainEvent::BlockFetcherRequest(_) => "BlockFetcherRequest", + MainEvent::BlockFetcherAnnouncement(_) => "BlockFetcherAnnouncement", + MainEvent::SetNodeStopRequest(_) => "SetNodeStopRequest", + MainEvent::MainReactorRequest(_) => "MainReactorRequest", + MainEvent::MakeBlockExecutableRequest(_) => "MakeBlockExecutableRequest", + MainEvent::MetaBlockAnnouncement(_) => "MetaBlockAnnouncement", + MainEvent::UnexecutedBlockAnnouncement(_) => "UnexecutedBlockAnnouncement", + MainEvent::GotBlockAfterUpgradeEraValidators(_, _, _) => { + "GotImmediateSwitchBlockEraValidators" + } + MainEvent::BinaryPort(_) => "BinaryPort", + } + } +} + +impl Display for MainEvent { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + MainEvent::ReactorCrank => write!(f, "reactor crank"), + MainEvent::Storage(event) => write!(f, "storage: {}", event), + MainEvent::Network(event) => write!(f, "network: {}", event), + MainEvent::SyncLeaper(event) => write!(f, "sync leaper: {}", event), + MainEvent::TransactionBuffer(event) => write!(f, "transaction buffer: {}", event), + MainEvent::RestServer(event) => write!(f, "rest server: {}", event), + MainEvent::EventStreamServer(event) => { + write!(f, "event stream server: {}", event) + } + MainEvent::UpgradeWatcher(event) => write!(f, "upgrade watcher: {}", event), + MainEvent::Consensus(event) => write!(f, "consensus: {}", event), + MainEvent::TransactionAcceptor(event) => write!(f, "transaction acceptor: {}", event), + MainEvent::AcceptTransactionRequest(req) => write!(f, "{}", req), + MainEvent::LegacyDeployFetcher(event) => write!(f, "legacy deploy fetcher: {}", event), + MainEvent::TransactionFetcher(event) => write!(f, "transaction fetcher: {}", event), + MainEvent::TransactionGossiper(event) => write!(f, "transaction gossiper: {}", event), + MainEvent::FinalitySignatureGossiper(event) => { + write!(f, "block signature gossiper: {}", event) + } + MainEvent::AddressGossiper(event) => write!(f, "address gossiper: {}", event), + MainEvent::ContractRuntimeRequest(event) => { + write!(f, "contract runtime request: {:?}", event) + } + MainEvent::BlockValidator(event) => write!(f, "block validator: {}", event), + MainEvent::BlockHeaderFetcher(event) => { + write!(f, "block header fetcher: {}", event) + } + MainEvent::TrieOrChunkFetcher(event) => { + write!(f, "trie or chunk fetcher: {}", event) + } + MainEvent::BlockExecutionResultsOrChunkFetcher(event) => { + write!(f, "block execution results or chunk fetcher: {}", event) + } + MainEvent::FinalitySignatureFetcher(event) => { + write!(f, "finality signature fetcher: {}", event) + } + MainEvent::SyncLeapFetcher(event) => { + write!(f, "sync leap fetcher: {}", event) + } + MainEvent::ApprovalsHashesFetcher(event) => { + write!(f, "approvals hashes fetcher: {}", event) + } + MainEvent::BlockAccumulator(event) => { + write!(f, "block accumulator: {}", event) + } + MainEvent::BlockAccumulatorRequest(req) => { + write!(f, "block accumulator request: {}", req) + } + MainEvent::BlockAccumulatorAnnouncement(ann) => { + write!(f, "block accumulator announcement: {}", ann) + } + MainEvent::BlockSynchronizer(event) => { + write!(f, "block synchronizer: {}", event) + } + MainEvent::BlockSynchronizerRequest(req) => { + write!(f, "block synchronizer request: {}", req) + } + MainEvent::ShutdownTrigger(event) => write!(f, "shutdown trigger: {}", event), + MainEvent::DiagnosticsPort(event) => write!(f, "diagnostics port: {}", event), + MainEvent::NetworkRequest(req) => write!(f, "network request: {}", req), + MainEvent::NetworkInfoRequest(req) => { + write!(f, "network info request: {}", req) + } + MainEvent::ChainspecRawBytesRequest(req) => { + write!(f, "chainspec loader request: {}", req) + } + MainEvent::UpgradeWatcherRequest(req) => { + write!(f, "upgrade watcher request: {}", req) + } + MainEvent::StorageRequest(req) => write!(f, "storage request: {}", req), + MainEvent::MarkBlockCompletedRequest(req) => { + write!(f, "mark block completed request: {}", req) + } + MainEvent::BlockHeaderFetcherRequest(request) => { + write!(f, "block header fetcher request: {}", request) + } + MainEvent::TrieOrChunkFetcherRequest(request) => { + write!(f, "trie or chunk fetcher request: {}", request) + } + MainEvent::BlockExecutionResultsOrChunkFetcherRequest(request) => { + write!( + f, + "block execution results or chunk fetcher request: {}", + request + ) + } + MainEvent::LegacyDeployFetcherRequest(request) => { + write!(f, "legacy deploy fetcher request: {}", request) + } + MainEvent::TransactionFetcherRequest(request) => { + write!(f, "transaction fetcher request: {}", request) + } + MainEvent::FinalitySignatureFetcherRequest(request) => { + write!(f, "finality signature fetcher request: {}", request) + } + MainEvent::SyncLeapFetcherRequest(request) => { + write!(f, "sync leap fetcher request: {}", request) + } + MainEvent::ApprovalsHashesFetcherRequest(request) => { + write!(f, "approvals hashes fetcher request: {}", request) + } + MainEvent::AddressGossiperCrank(request) => { + write!(f, "begin address gossip request: {}", request) + } + MainEvent::TransactionBufferRequest(req) => { + write!(f, "transaction buffer request: {}", req) + } + MainEvent::BlockValidatorRequest(req) => { + write!(f, "block validator request: {}", req) + } + MainEvent::MetricsRequest(req) => write!(f, "metrics request: {}", req), + MainEvent::ControlAnnouncement(ctrl_ann) => write!(f, "control: {}", ctrl_ann), + MainEvent::FatalAnnouncement(fatal_ann) => write!(f, "fatal: {}", fatal_ann), + MainEvent::DumpConsensusStateRequest(req) => { + write!(f, "dump consensus state: {}", req) + } + MainEvent::TransactionAcceptorAnnouncement(ann) => { + write!(f, "transaction acceptor announcement: {}", ann) + } + MainEvent::ConsensusAnnouncement(ann) => { + write!(f, "consensus announcement: {}", ann) + } + MainEvent::ContractRuntimeAnnouncement(ann) => { + write!(f, "block-executor announcement: {}", ann) + } + MainEvent::TransactionGossiperAnnouncement(ann) => { + write!(f, "transaction gossiper announcement: {}", ann) + } + MainEvent::FinalitySignatureGossiperAnnouncement(ann) => { + write!(f, "block signature gossiper announcement: {}", ann) + } + MainEvent::AddressGossiperAnnouncement(ann) => { + write!(f, "address gossiper announcement: {}", ann) + } + MainEvent::TransactionBufferAnnouncement(ann) => { + write!(f, "transaction buffer announcement: {}", ann) + } + MainEvent::UpgradeWatcherAnnouncement(ann) => { + write!(f, "chainspec loader announcement: {}", ann) + } + MainEvent::NetworkPeerBehaviorAnnouncement(ann) => { + write!(f, "blocklist announcement: {}", ann) + } + MainEvent::FinalitySignatureFetcherAnnouncement(ann) => { + write!(f, "finality signature fetcher announcement: {}", ann) + } + MainEvent::ConsensusMessageIncoming(inner) => Display::fmt(inner, f), + MainEvent::ConsensusDemand(inner) => Display::fmt(inner, f), + MainEvent::TransactionGossiperIncoming(inner) => Display::fmt(inner, f), + MainEvent::FinalitySignatureGossiperIncoming(inner) => Display::fmt(inner, f), + MainEvent::AddressGossiperIncoming(inner) => Display::fmt(inner, f), + MainEvent::NetworkPeerRequestingData(inner) => Display::fmt(inner, f), + MainEvent::NetworkPeerProvidingData(inner) => Display::fmt(inner, f), + MainEvent::TrieRequestIncoming(inner) => Display::fmt(inner, f), + MainEvent::TrieDemand(inner) => Display::fmt(inner, f), + MainEvent::TrieResponseIncoming(inner) => Display::fmt(inner, f), + MainEvent::FinalitySignatureIncoming(inner) => Display::fmt(inner, f), + MainEvent::ContractRuntime(inner) => Display::fmt(inner, f), + MainEvent::BlockGossiper(inner) => Display::fmt(inner, f), + MainEvent::BlockGossiperIncoming(inner) => Display::fmt(inner, f), + MainEvent::BlockGossiperAnnouncement(inner) => Display::fmt(inner, f), + MainEvent::BlockFetcher(inner) => Display::fmt(inner, f), + MainEvent::BlockFetcherRequest(inner) => Display::fmt(inner, f), + MainEvent::BlockFetcherAnnouncement(inner) => Display::fmt(inner, f), + MainEvent::SetNodeStopRequest(inner) => Display::fmt(inner, f), + MainEvent::MainReactorRequest(inner) => Display::fmt(inner, f), + MainEvent::MakeBlockExecutableRequest(inner) => Display::fmt(inner, f), + MainEvent::MetaBlockAnnouncement(inner) => Display::fmt(inner, f), + MainEvent::UnexecutedBlockAnnouncement(inner) => Display::fmt(inner, f), + MainEvent::GotBlockAfterUpgradeEraValidators(era_id, _, _) => { + write!( + f, + "got era validators for block after an upgrade in era {}", + era_id + ) + } + MainEvent::BinaryPort(inner) => Display::fmt(inner, f), + } + } +} + +impl From for MainEvent { + fn from(request: SyncGlobalStateRequest) -> Self { + MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer( + request.into(), + )) + } +} + +impl From for MainEvent { + fn from(request: TrieAccumulatorRequest) -> Self { + MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer( + GlobalStateSynchronizerEvent::TrieAccumulator(request.into()), + )) + } +} + +impl From for MainEvent { + fn from(event: GlobalStateSynchronizerEvent) -> Self { + MainEvent::BlockSynchronizer(event.into()) + } +} + +impl From for MainEvent { + fn from(event: TrieAccumulatorEvent) -> Self { + MainEvent::BlockSynchronizer(block_synchronizer::Event::GlobalStateSynchronizer( + event.into(), + )) + } +} + +impl From for MainEvent { + fn from(request: RestRequest) -> Self { + MainEvent::RestServer(rest_server::Event::RestRequest(request)) + } +} + +impl From> for MainEvent { + fn from(request: NetworkRequest) -> Self { + MainEvent::NetworkRequest(request.map_payload(Message::from)) + } +} + +impl From>> for MainEvent { + fn from(request: NetworkRequest>) -> Self { + MainEvent::NetworkRequest(request.map_payload(Message::from)) + } +} + +impl From>> for MainEvent { + fn from(request: NetworkRequest>) -> Self { + MainEvent::NetworkRequest(request.map_payload(Message::from)) + } +} + +impl From>> for MainEvent { + fn from(request: NetworkRequest>) -> Self { + MainEvent::NetworkRequest(request.map_payload(Message::from)) + } +} + +impl From>> for MainEvent { + fn from(request: NetworkRequest>) -> Self { + MainEvent::NetworkRequest(request.map_payload(Message::from)) + } +} + +impl From for MainEvent { + fn from(request: ConsensusRequest) -> Self { + MainEvent::Consensus(consensus::Event::ConsensusRequest(request)) + } +} diff --git a/node/src/reactor/main_reactor/fetchers.rs b/node/src/reactor/main_reactor/fetchers.rs new file mode 100644 index 0000000000..057447294c --- /dev/null +++ b/node/src/reactor/main_reactor/fetchers.rs @@ -0,0 +1,182 @@ +use datasize::DataSize; +use prometheus::Registry; + +use casper_types::{Block, BlockHeader, FinalitySignature, Transaction}; + +use crate::{ + components::{fetcher, fetcher::Fetcher, Component}, + effect::{announcements::TransactionAcceptorAnnouncement, EffectBuilder, Effects}, + reactor, + reactor::main_reactor::MainEvent, + types::{BlockExecutionResultsOrChunk, LegacyDeploy, SyncLeap, TrieOrChunk}, + utils::Source, + FetcherConfig, NodeRng, +}; +use casper_storage::block_store::types::ApprovalsHashes; + +#[derive(DataSize, Debug)] +pub(super) struct Fetchers { + sync_leap_fetcher: Fetcher, + block_fetcher: Fetcher, + block_header_by_hash_fetcher: Fetcher, + approvals_hashes_fetcher: Fetcher, + finality_signature_fetcher: Fetcher, + legacy_deploy_fetcher: Fetcher, + transaction_fetcher: Fetcher, + trie_or_chunk_fetcher: Fetcher, + block_execution_results_or_chunk_fetcher: Fetcher, +} + +impl Fetchers { + pub(super) fn new( + config: &FetcherConfig, + metrics_registry: &Registry, + ) -> Result { + Ok(Fetchers { + sync_leap_fetcher: Fetcher::new("sync_leap_fetcher", config, metrics_registry)?, + block_header_by_hash_fetcher: Fetcher::new("block_header", config, metrics_registry)?, + approvals_hashes_fetcher: Fetcher::new("approvals_hashes", config, metrics_registry)?, + finality_signature_fetcher: Fetcher::new( + "finality_signature_fetcher", + config, + metrics_registry, + )?, + legacy_deploy_fetcher: Fetcher::new("legacy_deploy", config, metrics_registry)?, + block_fetcher: Fetcher::new("block", config, metrics_registry)?, + transaction_fetcher: Fetcher::new("transaction", config, metrics_registry)?, + trie_or_chunk_fetcher: Fetcher::new("trie_or_chunk", config, metrics_registry)?, + block_execution_results_or_chunk_fetcher: Fetcher::new( + "block_execution_results_or_chunk_fetcher", + config, + metrics_registry, + )?, + }) + } + + pub(super) fn dispatch_fetcher_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: MainEvent, + ) -> Effects { + match event { + MainEvent::BlockFetcher(event) => reactor::wrap_effects( + MainEvent::BlockFetcher, + self.block_fetcher.handle_event(effect_builder, rng, event), + ), + MainEvent::BlockFetcherRequest(request) => reactor::wrap_effects( + MainEvent::BlockFetcher, + self.block_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::SyncLeapFetcher(event) => reactor::wrap_effects( + MainEvent::SyncLeapFetcher, + self.sync_leap_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::SyncLeapFetcherRequest(request) => reactor::wrap_effects( + MainEvent::SyncLeapFetcher, + self.sync_leap_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::BlockHeaderFetcher(event) => reactor::wrap_effects( + MainEvent::BlockHeaderFetcher, + self.block_header_by_hash_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::BlockHeaderFetcherRequest(request) => reactor::wrap_effects( + MainEvent::BlockHeaderFetcher, + self.block_header_by_hash_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::ApprovalsHashesFetcher(event) => reactor::wrap_effects( + MainEvent::ApprovalsHashesFetcher, + self.approvals_hashes_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::ApprovalsHashesFetcherRequest(request) => reactor::wrap_effects( + MainEvent::ApprovalsHashesFetcher, + self.approvals_hashes_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::FinalitySignatureFetcher(event) => reactor::wrap_effects( + MainEvent::FinalitySignatureFetcher, + self.finality_signature_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::FinalitySignatureFetcherRequest(request) => reactor::wrap_effects( + MainEvent::FinalitySignatureFetcher, + self.finality_signature_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::LegacyDeployFetcher(event) => reactor::wrap_effects( + MainEvent::LegacyDeployFetcher, + self.legacy_deploy_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::LegacyDeployFetcherRequest(request) => reactor::wrap_effects( + MainEvent::LegacyDeployFetcher, + self.legacy_deploy_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::TransactionFetcher(event) => reactor::wrap_effects( + MainEvent::TransactionFetcher, + self.transaction_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::TransactionFetcherRequest(request) => reactor::wrap_effects( + MainEvent::TransactionFetcher, + self.transaction_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::TrieOrChunkFetcher(event) => reactor::wrap_effects( + MainEvent::TrieOrChunkFetcher, + self.trie_or_chunk_fetcher + .handle_event(effect_builder, rng, event), + ), + MainEvent::TrieOrChunkFetcherRequest(request) => reactor::wrap_effects( + MainEvent::TrieOrChunkFetcher, + self.trie_or_chunk_fetcher + .handle_event(effect_builder, rng, request.into()), + ), + MainEvent::BlockExecutionResultsOrChunkFetcher(event) => reactor::wrap_effects( + MainEvent::BlockExecutionResultsOrChunkFetcher, + self.block_execution_results_or_chunk_fetcher.handle_event( + effect_builder, + rng, + event, + ), + ), + MainEvent::BlockExecutionResultsOrChunkFetcherRequest(request) => { + reactor::wrap_effects( + MainEvent::BlockExecutionResultsOrChunkFetcher, + self.block_execution_results_or_chunk_fetcher.handle_event( + effect_builder, + rng, + request.into(), + ), + ) + } + + // MISC DISPATCHING + MainEvent::TransactionAcceptorAnnouncement( + TransactionAcceptorAnnouncement::AcceptedNewTransaction { + transaction, + source, + }, + ) if matches!(source, Source::Peer(..)) => reactor::wrap_effects( + MainEvent::TransactionFetcher, + self.transaction_fetcher.handle_event( + effect_builder, + rng, + fetcher::Event::GotRemotely { + item: Box::new((*transaction).clone()), + source, + }, + ), + ), + // allow non-fetcher events to fall thru + _ => Effects::new(), + } + } +} diff --git a/node/src/reactor/main_reactor/genesis_instruction.rs b/node/src/reactor/main_reactor/genesis_instruction.rs new file mode 100644 index 0000000000..e764d04c48 --- /dev/null +++ b/node/src/reactor/main_reactor/genesis_instruction.rs @@ -0,0 +1,9 @@ +use std::time::Duration; + +use crate::{effect::Effects, reactor::main_reactor::MainEvent}; + +pub(super) enum GenesisInstruction { + Validator(Duration, Effects), + NonValidator(Duration, Effects), + Fatal(String), +} diff --git a/node/src/reactor/main_reactor/keep_up.rs b/node/src/reactor/main_reactor/keep_up.rs new file mode 100644 index 0000000000..659bdf2c44 --- /dev/null +++ b/node/src/reactor/main_reactor/keep_up.rs @@ -0,0 +1,857 @@ +use std::{ + fmt::{Display, Formatter}, + time::Duration, +}; + +use either::Either; +use tracing::{debug, error, info, warn}; + +use casper_storage::data_access_layer::EraValidatorsRequest; +use casper_types::{ActivationPoint, BlockHash, BlockHeader, EraId, Timestamp}; + +use crate::{ + components::{ + block_accumulator::{SyncIdentifier, SyncInstruction}, + block_synchronizer::BlockSynchronizerProgress, + storage::HighestOrphanedBlockResult, + sync_leaper, + sync_leaper::{LeapActivityError, LeapState}, + }, + effect::{ + requests::BlockSynchronizerRequest, EffectBuilder, EffectExt, EffectResultExt, Effects, + }, + reactor::main_reactor::{MainEvent, MainReactor}, + types::{GlobalStatesMetadata, MaxTtl, SyncLeap, SyncLeapIdentifier}, + NodeRng, +}; + +pub(super) enum KeepUpInstruction { + Validate(Effects), + Do(Duration, Effects), + CheckLater(String, Duration), + CatchUp, + ShutdownForUpgrade, + Fatal(String), +} + +#[derive(Debug, Clone, Copy)] +enum SyncBackInstruction { + Sync { + sync_hash: BlockHash, + sync_era: EraId, + }, + Syncing, + TtlSynced, + GenesisSynced, + NoSync, +} + +impl Display for SyncBackInstruction { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + SyncBackInstruction::Sync { sync_hash, .. } => { + write!(f, "attempt to sync {}", sync_hash) + } + SyncBackInstruction::Syncing => write!(f, "syncing"), + SyncBackInstruction::TtlSynced => write!(f, "ttl reached"), + SyncBackInstruction::GenesisSynced => write!(f, "genesis reached"), + SyncBackInstruction::NoSync => write!(f, "configured to not sync"), + } + } +} + +impl MainReactor { + pub(super) fn keep_up_instruction( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> KeepUpInstruction { + if self.should_shutdown_for_upgrade() { + // controlled shutdown for protocol upgrade. + return KeepUpInstruction::ShutdownForUpgrade; + } + + // if there is instruction, return to start working on it + // else fall thru with the current best available id for block syncing + let sync_identifier = match self.keep_up_process() { + Either::Right(keep_up_instruction) => return keep_up_instruction, + Either::Left(sync_identifier) => sync_identifier, + }; + debug!( + ?sync_identifier, + "KeepUp: sync identifier {}", + sync_identifier.block_hash() + ); + // we check with the block accumulator before doing sync work as it may be aware of one or + // more blocks that are higher than our current highest block + let sync_instruction = self.block_accumulator.sync_instruction(sync_identifier); + debug!( + ?sync_instruction, + "KeepUp: sync_instruction {}", + sync_instruction.block_hash() + ); + if let Some(keep_up_instruction) = + self.keep_up_sync_instruction(effect_builder, sync_instruction) + { + return keep_up_instruction; + } + + // we appear to be keeping up with the network and have some cycles to get other work done + // check to see if we should attempt to sync a missing historical block (if any) + debug!("KeepUp: keeping up with the network; try to sync an historical block"); + if let Some(keep_up_instruction) = self.sync_back_keep_up_instruction(effect_builder, rng) { + return keep_up_instruction; + } + + // we are keeping up, and don't need to sync an historical block; check to see if this + // node should be participating in consensus this era (necessary for re-start scenarios) + self.keep_up_should_validate(effect_builder, rng) + .unwrap_or_else(|| { + KeepUpInstruction::CheckLater( + "node is keeping up".to_string(), + self.control_logic_default_delay.into(), + ) + }) + } + + fn keep_up_should_validate( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Option { + if let ActivationPoint::Genesis(genesis_timestamp) = + self.chainspec.protocol_config.activation_point + { + // this is a non-validator node in KeepUp prior to genesis; there is no reason to + // check consensus in this state, and it log spams if we do, so exiting early + if genesis_timestamp > Timestamp::now() { + return None; + } + } + + if self.sync_handling.is_no_sync() { + // node is not permitted to be a validator with no_sync behavior. + return None; + } + + if self.block_synchronizer.forward_progress().is_active() { + debug!("KeepUp: still syncing a block"); + return None; + } + + let queue_depth = self.contract_runtime.queue_depth(); + if queue_depth > 0 { + debug!("KeepUp: should_validate queue_depth {}", queue_depth); + return None; + } + match self.create_required_eras(effect_builder, rng) { + Ok(Some(effects)) => Some(KeepUpInstruction::Validate(effects)), + Ok(None) => None, + Err(msg) => Some(KeepUpInstruction::Fatal(msg)), + } + } + + fn keep_up_process(&mut self) -> Either { + let forward_progress = self.block_synchronizer.forward_progress(); + self.update_last_progress(&forward_progress, false); + match forward_progress { + BlockSynchronizerProgress::Idle => { + // not working on syncing a block (ready to start a new one) + self.keep_up_idle() + } + BlockSynchronizerProgress::Syncing(block_hash, block_height, _) => { + // working on syncing a block + Either::Left(self.keep_up_syncing(block_hash, block_height)) + } + // waiting for execution - forward only + BlockSynchronizerProgress::Executing(block_hash, block_height, era_id) => { + Either::Left(self.keep_up_executing(block_hash, block_height, era_id)) + } + BlockSynchronizerProgress::Synced(block_hash, block_height, era_id) => { + // for a synced forward block -> we have header, body, any referenced deploys, + // sufficient finality (by weight) of signatures, associated global state and + // execution effects. + Either::Left(self.keep_up_synced(block_hash, block_height, era_id)) + } + } + } + + fn keep_up_idle(&mut self) -> Either { + match self.storage.get_highest_complete_block() { + Ok(Some(block)) => Either::Left(SyncIdentifier::LocalTip( + *block.hash(), + block.height(), + block.era_id(), + )), + Ok(None) => { + // something out of the ordinary occurred; it isn't legit to be in keep up mode + // with no complete local blocks. go back to catch up which will either correct + // or handle retry / shutdown behavior. + error!("KeepUp: block synchronizer idle, local storage has no complete blocks"); + Either::Right(KeepUpInstruction::CatchUp) + } + Err(error) => Either::Right(KeepUpInstruction::Fatal(format!( + "failed to read highest complete block: {}", + error + ))), + } + } + + fn keep_up_syncing( + &mut self, + block_hash: BlockHash, + block_height: Option, + ) -> SyncIdentifier { + match block_height { + None => SyncIdentifier::BlockHash(block_hash), + Some(height) => SyncIdentifier::BlockIdentifier(block_hash, height), + } + } + + fn keep_up_executing( + &mut self, + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + ) -> SyncIdentifier { + SyncIdentifier::ExecutingBlockIdentifier(block_hash, block_height, era_id) + } + + fn keep_up_synced( + &mut self, + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + ) -> SyncIdentifier { + debug!("KeepUp: synced block: {}", block_hash); + // important: scrape forward synchronizer here to return it to idle status + self.block_synchronizer.purge_forward(); + SyncIdentifier::SyncedBlockIdentifier(block_hash, block_height, era_id) + } + + fn keep_up_sync_instruction( + &mut self, + effect_builder: EffectBuilder, + sync_instruction: SyncInstruction, + ) -> Option { + match sync_instruction { + SyncInstruction::Leap { .. } | SyncInstruction::LeapIntervalElapsed { .. } => { + if !self.sync_handling.is_isolated() { + // the block accumulator is unsure what our block position is relative to the + // network and wants to check peers for their notion of current tip. + // to do this, we switch back to CatchUp which will engage the necessary + // machinery to poll the network via the SyncLeap mechanic. if it turns out + // we are actually at or near tip after all, we simply switch back to KeepUp + // and continue onward. the accumulator is designed to periodically do this + // if we've received no gossip about new blocks from peers within an interval. + // this is to protect against partitioning and is not problematic behavior + // when / if it occurs. + Some(KeepUpInstruction::CatchUp) + } else { + // If the node operates in isolated mode the assumption is that it might not + // have any peers. So going back to CatchUp to query their + // notion of tip might effectively disable nodes components to respond. + // That's why - for isolated mode - we bypass this mechanism. + None + } + } + SyncInstruction::BlockSync { block_hash } => { + info!("KeepUp: BlockSync: {:?}", block_hash); + if self + .block_synchronizer + .register_block_by_hash(block_hash, false) + { + info!(%block_hash, "KeepUp: BlockSync: registered block by hash"); + Some(KeepUpInstruction::Do( + Duration::ZERO, + effect_builder.immediately().event(|_| { + MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::NeedNext) + }), + )) + } else { + // this block has already been registered and is being worked on + None + } + } + SyncInstruction::CaughtUp { .. } => { + // the accumulator thinks we are at the tip of the network and we don't need + // to do anything for the next one yet. + None + } + } + } + + fn sync_back_keep_up_instruction( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Option { + let sync_back_progress = self.block_synchronizer.historical_progress(); + debug!(?sync_back_progress, "KeepUp: historical sync back progress"); + self.update_last_progress(&sync_back_progress, true); + match self.sync_back_instruction(&sync_back_progress) { + Ok(Some(sbi @ sync_back_instruction)) => match sync_back_instruction { + SyncBackInstruction::NoSync + | SyncBackInstruction::GenesisSynced + | SyncBackInstruction::TtlSynced => { + // we don't need to sync any historical blocks currently, so we clear both the + // historical synchronizer and the sync back leap activity since they will not + // be required anymore + debug!("KeepUp: {}", sbi); + self.block_synchronizer.purge_historical(); + self.sync_leaper.purge(); + None + } + SyncBackInstruction::Syncing => { + debug!("KeepUp: syncing historical; checking later"); + Some(KeepUpInstruction::CheckLater( + format!("historical {}", SyncBackInstruction::Syncing), + self.control_logic_default_delay.into(), + )) + } + SyncBackInstruction::Sync { + sync_hash, + sync_era, + } => { + debug!(%sync_hash, ?sync_era, validator_matrix_eras=?self.validator_matrix.eras(), "KeepUp: historical sync back instruction"); + if self.validator_matrix.has_era(&sync_era) { + Some(self.sync_back_register(effect_builder, rng, sync_hash)) + } else { + Some(self.sync_back_leap(effect_builder, rng, sync_hash)) + } + } + }, + Ok(None) => None, + Err(msg) => Some(KeepUpInstruction::Fatal(msg)), + } + } + + // Attempts to read the validators from the global states of the block after the upgrade and its + // parent; initiates fetching of the missing global states, if any. + fn try_read_validators_for_block_after_upgrade( + &mut self, + effect_builder: EffectBuilder, + global_states_metadata: GlobalStatesMetadata, + ) -> KeepUpInstruction { + // We try to read the validator sets from global states of two blocks - if either returns + // `RootNotFound`, we'll initiate fetching of the corresponding global state. + let effects = async move { + // Send the requests to contract runtime. + let before_era_validators_request = + EraValidatorsRequest::new(global_states_metadata.before_state_hash); + let before_era_validators_result = effect_builder + .get_era_validators_from_contract_runtime(before_era_validators_request) + .await; + + let after_era_validators_request = + EraValidatorsRequest::new(global_states_metadata.after_state_hash); + let after_era_validators_result = effect_builder + .get_era_validators_from_contract_runtime(after_era_validators_request) + .await; + + let lhs = before_era_validators_result.take_era_validators(); + let rhs = after_era_validators_result.take_era_validators(); + + match (lhs, rhs) { + // ++ -> return era validator weights for before & after + (Some(before_era_validators), Some(after_era_validators)) => { + Ok((before_era_validators, after_era_validators)) + } + // -- => Both were absent - fetch global states for both blocks. + (None, None) => Err(vec![ + ( + global_states_metadata.before_hash, + global_states_metadata.before_state_hash, + ), + ( + global_states_metadata.after_hash, + global_states_metadata.after_state_hash, + ), + ]), + // +- => The after-block's global state was missing - return the hashes. + (Some(_), None) => Err(vec![( + global_states_metadata.after_hash, + global_states_metadata.after_state_hash, + )]), + // -+ => The before-block's global state was missing - return the hashes. + (None, Some(_)) => Err(vec![( + global_states_metadata.before_hash, + global_states_metadata.before_state_hash, + )]), + } + } + .result( + // We got the era validators - just emit the event that will cause them to be compared, + // validators matrix to be updated and reactor to be cranked. + move |(before_era_validators, after_era_validators)| { + MainEvent::GotBlockAfterUpgradeEraValidators( + global_states_metadata.after_era_id, + before_era_validators, + after_era_validators, + ) + }, + // A global state was missing - we ask the BlockSynchronizer to fetch what is needed. + |global_states_to_sync| { + MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::SyncGlobalStates( + global_states_to_sync, + )) + }, + ); + // In either case, there are effects to be processed by the reactor. + KeepUpInstruction::Do(Duration::ZERO, effects) + } + + fn sync_back_leap( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + parent_hash: BlockHash, + ) -> KeepUpInstruction { + // in this flow, we are leveraging the SyncLeap behavior to go backwards + // rather than forwards. as we walk backwards from tip we know the block hash + // of the parent of the earliest contiguous block we have locally (aka a + // "parent_hash") but we don't know what era that parent block is in and we + // may or may not know the validator set for that era to validate finality + // signatures against. we use the leaper to gain awareness of the necessary + // trusted ancestors to our earliest contiguous block to do necessary validation. + let sync_back_status = self.sync_leaper.leap_status(); + info!( + "KeepUp: historical sync back status {} {}", + parent_hash, sync_back_status + ); + debug!( + ?parent_hash, + ?sync_back_status, + "KeepUp: historical sync back status" + ); + match sync_back_status { + LeapState::Idle => { + debug!("KeepUp: historical sync back idle"); + self.sync_back_leaper_idle(effect_builder, rng, parent_hash, Duration::ZERO) + } + LeapState::Awaiting { .. } => KeepUpInstruction::CheckLater( + "KeepUp: historical sync back is awaiting response".to_string(), + self.control_logic_default_delay.into(), + ), + LeapState::Received { + best_available, + from_peers: _, + .. + } => self.sync_back_leap_received(effect_builder, *best_available), + LeapState::Failed { error, .. } => { + self.sync_back_leap_failed(effect_builder, rng, parent_hash, error) + } + } + } + + fn sync_back_leap_failed( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + parent_hash: BlockHash, + error: LeapActivityError, + ) -> KeepUpInstruction { + warn!( + %error, + "KeepUp: failed historical sync back", + ); + self.sync_back_leaper_idle( + effect_builder, + rng, + parent_hash, + self.control_logic_default_delay.into(), + ) + } + + fn sync_back_leaper_idle( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + parent_hash: BlockHash, + offset: Duration, + ) -> KeepUpInstruction { + // we get a random sampling of peers to ask. + let peers_to_ask = self.net.fully_connected_peers_random( + rng, + self.chainspec.core_config.simultaneous_peer_requests as usize, + ); + if peers_to_ask.is_empty() { + return KeepUpInstruction::CheckLater( + "no peers".to_string(), + self.control_logic_default_delay.into(), + ); + } + + // latch accumulator progress to allow sync-leap time to do work + self.block_accumulator.reset_last_progress(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_historical(parent_hash); + + let effects = effect_builder.immediately().event(move |_| { + MainEvent::SyncLeaper(sync_leaper::Event::AttemptLeap { + sync_leap_identifier, + peers_to_ask, + }) + }); + KeepUpInstruction::Do(offset, effects) + } + + fn sync_back_leap_received( + &mut self, + effect_builder: EffectBuilder, + sync_leap: SyncLeap, + ) -> KeepUpInstruction { + // use the leap response to update our recent switch block data (if relevant) and + // era validator weights. if there are other processes which are holding on discovery + // of relevant newly-seen era validator weights, they should naturally progress + // themselves via notification on the event loop. + let block_hash = sync_leap.highest_block_hash(); + let block_height = sync_leap.highest_block_height(); + info!(%sync_leap, %block_height, %block_hash, "KeepUp: historical sync_back received"); + + let era_validator_weights = sync_leap.era_validator_weights( + self.validator_matrix.fault_tolerance_threshold(), + &self.chainspec.protocol_config, + ); + for evw in era_validator_weights { + let era_id = evw.era_id(); + debug!(%era_id, "KeepUp: attempt to register historical validators for era"); + if self.validator_matrix.register_era_validator_weights(evw) { + info!("KeepUp: got historical era {}", era_id); + } else { + debug!(%era_id, "KeepUp: historical era already present or is not relevant"); + } + } + + if let Some(global_states_metadata) = sync_leap.global_states_for_sync_across_upgrade() { + self.try_read_validators_for_block_after_upgrade(effect_builder, global_states_metadata) + } else { + KeepUpInstruction::CheckLater( + "historical sync back received".to_string(), + Duration::ZERO, + ) + } + } + + fn sync_back_register( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + parent_hash: BlockHash, + ) -> KeepUpInstruction { + if self + .block_synchronizer + .register_block_by_hash(parent_hash, true) + { + // sync the parent_hash block; we get a random sampling of peers to ask. + // it is possible that we may get a random sampling that do not have the data + // we need, but the synchronizer should (eventually) detect that and ask for + // more peers via the NeedNext behavior. + let peers_to_ask = self.net.fully_connected_peers_random( + rng, + self.chainspec.core_config.simultaneous_peer_requests as usize, + ); + debug!( + "KeepUp: historical register_block_by_hash: {} peers count: {:?}", + parent_hash, + peers_to_ask.len() + ); + self.block_synchronizer + .register_peers(parent_hash, peers_to_ask); + KeepUpInstruction::Do( + Duration::ZERO, + effect_builder.immediately().event(|_| { + MainEvent::BlockSynchronizerRequest(BlockSynchronizerRequest::NeedNext) + }), + ) + } else { + KeepUpInstruction::CheckLater( + format!("historical syncing {}", parent_hash), + self.control_logic_default_delay.into(), + ) + } + } + + fn sync_back_instruction( + &mut self, + block_synchronizer_progress: &BlockSynchronizerProgress, + ) -> Result, String> { + match block_synchronizer_progress { + BlockSynchronizerProgress::Syncing(_, _, _) => { + debug!("KeepUp: still syncing historical block"); + return Ok(Some(SyncBackInstruction::Syncing)); + } + BlockSynchronizerProgress::Executing(block_hash, height, _) => { + warn!( + %block_hash, + %height, + "Historical block synchronizer should not be waiting for the block to be executed" + ); + } + BlockSynchronizerProgress::Idle | BlockSynchronizerProgress::Synced(_, _, _) => {} + } + // in this flow there is no significant difference between Idle & Synced, as unlike in + // catchup and keepup flows there is no special activity necessary upon getting to Synced + // on an old block. in either case we will attempt to get the next needed block (if any). + // note: for a synced historical block we have header, body, global state, any execution + // effects, any referenced deploys, & sufficient finality (by weight) of signatures. + match self.storage.get_highest_orphaned_block_header() { + HighestOrphanedBlockResult::Orphan(highest_orphaned_block_header) => { + if let Some(synched) = self.synched(&highest_orphaned_block_header)? { + debug!(?synched, "synched result"); + return Ok(Some(synched)); + } + let (sync_hash, sync_era) = + self.sync_hash_and_era(&highest_orphaned_block_header)?; + debug!(?sync_era, %sync_hash, "KeepUp: historical sync target era and block hash"); + + self.validator_matrix + .register_retrograde_latch(Some(sync_era)); + Ok(Some(SyncBackInstruction::Sync { + sync_hash, + sync_era, + })) + } + HighestOrphanedBlockResult::MissingHeader(height) => Err(format!( + "KeepUp: storage is missing historical block header for height {}", + height + )), + HighestOrphanedBlockResult::MissingHighestSequence => { + Err("KeepUp: storage is missing historical highest block sequence".to_string()) + } + } + } + + fn synched( + &self, + highest_orphaned_block_header: &BlockHeader, + ) -> Result, String> { + // if we're configured to not sync, don't sync. + if self.sync_handling.is_no_sync() { + return Ok(Some(SyncBackInstruction::NoSync)); + } + + // if we've reached genesis, there's nothing left to sync. + if highest_orphaned_block_header.is_genesis() { + return Ok(Some(SyncBackInstruction::GenesisSynced)); + } + + if self.sync_handling.is_sync_to_genesis() { + return Ok(None); + } + + // if sync to genesis is false, we require sync to ttl; i.e. if the TTL is 18 + // hours we require sync back to see a contiguous / unbroken + // range of at least 18 hours worth of blocks. note however + // that we measure from the start of the active era (for consensus reasons), + // so this can be up to TTL + era length in practice + + if let Some(highest_switch_block_header) = self + .storage + .read_highest_switch_block_headers(1) + .map_err(|err| err.to_string())? + .last() + { + debug!( + highest_switch_timestamp=?highest_switch_block_header.timestamp(), + highest_orphaned_timestamp=?highest_orphaned_block_header.timestamp(), + "checking max ttl"); + let max_ttl: MaxTtl = self.chainspec.transaction_config.max_ttl.into(); + if max_ttl.synced_to_ttl( + highest_switch_block_header.timestamp(), + highest_orphaned_block_header, + ) { + debug!("is synced to ttl"); + return Ok(Some(SyncBackInstruction::TtlSynced)); + } + } + + Ok(None) + } + + fn sync_hash_and_era( + &self, + highest_orphaned_block_header: &BlockHeader, + ) -> Result<(BlockHash, EraId), String> { + let parent_hash = highest_orphaned_block_header.parent_hash(); + debug!(?highest_orphaned_block_header, %parent_hash, "KeepUp: highest orphaned historical block"); + + // if we are in genesis era but do not have validators loaded for genesis era, + // attempt to skip to switch block of era 1 and leap from there; other validators + // must cite era 0 to prove trusted ancestors for era 1, which will resolve the issue + // when received by this node. + if highest_orphaned_block_header.era_id().is_genesis() + && !self + .validator_matrix + .has_era(&highest_orphaned_block_header.era_id()) + { + match self + .storage + .get_switch_block_by_era_id(&highest_orphaned_block_header.era_id().successor()) + { + Ok(Some(switch)) => { + debug!( + ?highest_orphaned_block_header, + "KeepUp: historical sync in genesis era attempting correction for unmatrixed genesis validators" + ); + return Ok((*switch.hash(), switch.era_id())); + } + Ok(None) => return Err( + "In genesis era with no genesis validators and missing next era switch block" + .to_string(), + ), + Err(err) => return Err(err.to_string()), + } + } + + match self.storage.read_block_header_by_hash(parent_hash) { + Ok(Some(parent_block_header)) => { + // even if we don't have a complete block (all parts and dependencies) + // we may have the parent's block header; if we do we also + // know its era which allows us to know if we have the validator + // set for that era or not + debug!( + ?parent_block_header, + "KeepUp: historical sync found parent block header in storage" + ); + Ok(( + parent_block_header.block_hash(), + parent_block_header.era_id(), + )) + } + Ok(None) => { + debug!(%parent_hash, "KeepUp: historical sync did not find block header in storage"); + let era_id = match highest_orphaned_block_header.era_id().predecessor() { + None => EraId::from(0), + Some(predecessor) => { + // we do not have the parent header and thus don't know what era + // the parent block is in (it could be the same era or the previous + // era). we assume the worst case and ask for the earlier era's + // proof; subtracting 1 here is safe + // since the case where era id is 0 is + // handled above + predecessor + } + }; + Ok((*parent_hash, era_id)) + } + Err(err) => Err(err.to_string()), + } + } +} + +#[cfg(test)] +pub(crate) fn synced_to_ttl( + latest_switch_block_header: &BlockHeader, + highest_orphaned_block_header: &BlockHeader, + max_ttl: casper_types::TimeDiff, +) -> Result { + Ok(highest_orphaned_block_header.height() == 0 + || is_timestamp_at_ttl( + latest_switch_block_header.timestamp(), + highest_orphaned_block_header.timestamp(), + max_ttl, + )) +} + +#[cfg(test)] +fn is_timestamp_at_ttl( + latest_switch_block_timestamp: Timestamp, + lowest_block_timestamp: Timestamp, + max_ttl: casper_types::TimeDiff, +) -> bool { + lowest_block_timestamp < latest_switch_block_timestamp.saturating_sub(max_ttl) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use casper_types::{testing::TestRng, TestBlockBuilder, TimeDiff, Timestamp}; + + use crate::reactor::main_reactor::keep_up::{is_timestamp_at_ttl, synced_to_ttl}; + + const TWO_DAYS_SECS: u32 = 60 * 60 * 24 * 2; + const MAX_TTL: TimeDiff = TimeDiff::from_seconds(86400); + + #[test] + fn should_be_at_ttl() { + let latest_switch_block_timestamp = Timestamp::from_str("2010-06-15 00:00:00.000").unwrap(); + let lowest_block_timestamp = Timestamp::from_str("2010-06-10 00:00:00.000").unwrap(); + let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS); + assert!(is_timestamp_at_ttl( + latest_switch_block_timestamp, + lowest_block_timestamp, + max_ttl + )); + } + + #[test] + fn should_not_be_at_ttl() { + let latest_switch_block_timestamp = Timestamp::from_str("2010-06-15 00:00:00.000").unwrap(); + let lowest_block_timestamp = Timestamp::from_str("2010-06-14 00:00:00.000").unwrap(); + let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS); + assert!(!is_timestamp_at_ttl( + latest_switch_block_timestamp, + lowest_block_timestamp, + max_ttl + )); + } + + #[test] + fn should_detect_ttl_at_the_boundary() { + let latest_switch_block_timestamp = Timestamp::from_str("2010-06-15 00:00:00.000").unwrap(); + let lowest_block_timestamp = Timestamp::from_str("2010-06-12 23:59:59.999").unwrap(); + let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS); + assert!(is_timestamp_at_ttl( + latest_switch_block_timestamp, + lowest_block_timestamp, + max_ttl + )); + + let latest_switch_block_timestamp = Timestamp::from_str("2010-06-15 00:00:00.000").unwrap(); + let lowest_block_timestamp = Timestamp::from_str("2010-06-13 00:00:00.000").unwrap(); + let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS); + assert!(!is_timestamp_at_ttl( + latest_switch_block_timestamp, + lowest_block_timestamp, + max_ttl + )); + + let latest_switch_block_timestamp = Timestamp::from_str("2010-06-15 00:00:00.000").unwrap(); + let lowest_block_timestamp = Timestamp::from_str("2010-06-13 00:00:00.001").unwrap(); + let max_ttl = TimeDiff::from_seconds(TWO_DAYS_SECS); + assert!(!is_timestamp_at_ttl( + latest_switch_block_timestamp, + lowest_block_timestamp, + max_ttl + )); + } + + #[test] + fn should_detect_ttl_at_genesis() { + let rng = &mut TestRng::new(); + + let latest_switch_block = TestBlockBuilder::new() + .era(100) + .height(1000) + .switch_block(true) + .build_versioned(rng); + + let latest_orphaned_block = TestBlockBuilder::new() + .era(0) + .height(0) + .switch_block(true) + .build_versioned(rng); + + assert_eq!(latest_orphaned_block.height(), 0); + assert_eq!( + synced_to_ttl( + &latest_switch_block.clone_header(), + &latest_orphaned_block.clone_header(), + MAX_TTL + ), + Ok(true) + ); + } +} diff --git a/node/src/reactor/main_reactor/memory_metrics.rs b/node/src/reactor/main_reactor/memory_metrics.rs new file mode 100644 index 0000000000..d87f540fa2 --- /dev/null +++ b/node/src/reactor/main_reactor/memory_metrics.rs @@ -0,0 +1,300 @@ +use datasize::DataSize; +use prometheus::{self, Histogram, HistogramOpts, IntGauge, Registry}; +use tracing::debug; + +use super::MainReactor; +use crate::unregister_metric; + +/// Metrics for estimated heap memory usage for the main reactor. +#[derive(Debug)] +pub(super) struct MemoryMetrics { + mem_total: IntGauge, + mem_metrics: IntGauge, + mem_net: IntGauge, + mem_address_gossiper: IntGauge, + mem_storage: IntGauge, + mem_contract_runtime: IntGauge, + mem_rpc_server: IntGauge, + mem_rest_server: IntGauge, + mem_event_stream_server: IntGauge, + mem_consensus: IntGauge, + mem_transaction_gossiper: IntGauge, + mem_finality_signature_gossiper: IntGauge, + mem_block_gossiper: IntGauge, + mem_transaction_buffer: IntGauge, + mem_block_validator: IntGauge, + mem_sync_leaper: IntGauge, + mem_transaction_acceptor: IntGauge, + mem_block_synchronizer: IntGauge, + mem_block_accumulator: IntGauge, + mem_fetchers: IntGauge, + mem_diagnostics_port: IntGauge, + mem_upgrade_watcher: IntGauge, + mem_binary_port: IntGauge, + /// Histogram detailing how long it took to measure memory usage. + mem_estimator_runtime_s: Histogram, + registry: Registry, +} + +impl MemoryMetrics { + /// Initializes a new set of memory metrics. + pub(super) fn new(registry: Registry) -> Result { + let mem_total = IntGauge::new("mem_total", "total memory usage in bytes")?; + let mem_metrics = IntGauge::new("mem_metrics", "metrics memory usage in bytes")?; + let mem_net = IntGauge::new("mem_net", "network memory usage in bytes")?; + let mem_address_gossiper = IntGauge::new( + "mem_address_gossiper", + "address_gossiper memory usage in bytes", + )?; + let mem_storage = IntGauge::new("mem_storage", "storage memory usage in bytes")?; + let mem_contract_runtime = IntGauge::new( + "mem_contract_runtime", + "contract runtime memory usage in bytes", + )?; + let mem_rpc_server = IntGauge::new("mem_rpc_server", "rpc server memory usage in bytes")?; + let mem_rest_server = + IntGauge::new("mem_rest_server", "rest server memory usage in bytes")?; + let mem_event_stream_server = IntGauge::new( + "mem_event_stream_server", + "event stream server memory usage in bytes", + )?; + let mem_consensus = IntGauge::new("mem_consensus", "consensus memory usage in bytes")?; + let mem_fetchers = IntGauge::new("mem_fetchers", "combined fetcher memory usage in bytes")?; + let mem_transaction_gossiper = IntGauge::new( + "mem_transaction_gossiper", + "transaction gossiper memory usage in bytes", + )?; + let mem_finality_signature_gossiper = IntGauge::new( + "mem_finality_signature_gossiper", + "finality signature gossiper memory usage in bytes", + )?; + let mem_block_gossiper = + IntGauge::new("mem_block_gossiper", "block gossiper memory usage in bytes")?; + let mem_transaction_buffer = IntGauge::new( + "mem_transaction_buffer", + "transaction buffer memory usage in bytes", + )?; + let mem_block_validator = IntGauge::new( + "mem_block_validator", + "block validator memory usage in bytes", + )?; + let mem_sync_leaper = + IntGauge::new("mem_sync_leaper", "sync leaper memory usage in bytes")?; + let mem_transaction_acceptor = IntGauge::new( + "mem_transaction_acceptor", + "transaction acceptor memory usage in bytes", + )?; + let mem_block_synchronizer = IntGauge::new( + "mem_block_synchronizer", + "block synchronizer memory usage in bytes", + )?; + let mem_block_accumulator = IntGauge::new( + "mem_block_accumulator", + "block accumulator memory usage in bytes", + )?; + let mem_diagnostics_port = IntGauge::new( + "mem_diagnostics_port", + "diagnostics port memory usage in bytes", + )?; + let mem_upgrade_watcher = IntGauge::new( + "mem_upgrade_watcher", + "upgrade watcher memory usage in bytes", + )?; + let mem_binary_port = + IntGauge::new("mem_binary_port", "binary port memory usage in bytes")?; + let mem_estimator_runtime_s = Histogram::with_opts( + HistogramOpts::new( + "mem_estimator_runtime_s", + "time in seconds to estimate memory usage", + ) + // Create buckets from one nanosecond to eight seconds. + .buckets(prometheus::exponential_buckets(0.000_4, 2.0, 13)?), + )?; + + registry.register(Box::new(mem_total.clone()))?; + registry.register(Box::new(mem_metrics.clone()))?; + registry.register(Box::new(mem_net.clone()))?; + registry.register(Box::new(mem_address_gossiper.clone()))?; + registry.register(Box::new(mem_storage.clone()))?; + registry.register(Box::new(mem_contract_runtime.clone()))?; + registry.register(Box::new(mem_rpc_server.clone()))?; + registry.register(Box::new(mem_rest_server.clone()))?; + registry.register(Box::new(mem_event_stream_server.clone()))?; + registry.register(Box::new(mem_consensus.clone()))?; + registry.register(Box::new(mem_fetchers.clone()))?; + registry.register(Box::new(mem_transaction_gossiper.clone()))?; + registry.register(Box::new(mem_finality_signature_gossiper.clone()))?; + registry.register(Box::new(mem_block_gossiper.clone()))?; + registry.register(Box::new(mem_transaction_buffer.clone()))?; + registry.register(Box::new(mem_block_validator.clone()))?; + registry.register(Box::new(mem_sync_leaper.clone()))?; + registry.register(Box::new(mem_transaction_acceptor.clone()))?; + registry.register(Box::new(mem_block_synchronizer.clone()))?; + registry.register(Box::new(mem_block_accumulator.clone()))?; + registry.register(Box::new(mem_diagnostics_port.clone()))?; + registry.register(Box::new(mem_upgrade_watcher.clone()))?; + registry.register(Box::new(mem_binary_port.clone()))?; + registry.register(Box::new(mem_estimator_runtime_s.clone()))?; + + Ok(MemoryMetrics { + mem_total, + mem_metrics, + mem_net, + mem_address_gossiper, + mem_storage, + mem_contract_runtime, + mem_rpc_server, + mem_rest_server, + mem_event_stream_server, + mem_consensus, + mem_fetchers, + mem_transaction_gossiper, + mem_finality_signature_gossiper, + mem_block_gossiper, + mem_transaction_buffer, + mem_block_validator, + mem_sync_leaper, + mem_transaction_acceptor, + mem_block_synchronizer, + mem_block_accumulator, + mem_diagnostics_port, + mem_upgrade_watcher, + mem_binary_port, + mem_estimator_runtime_s, + registry, + }) + } + + /// Estimates memory usage and updates metrics. + pub(super) fn estimate(&self, reactor: &MainReactor) { + let timer = self.mem_estimator_runtime_s.start_timer(); + + let metrics = reactor.metrics.estimate_heap_size() as i64; + let network = reactor.net.estimate_heap_size() as i64; + let address_gossiper = reactor.address_gossiper.estimate_heap_size() as i64; + let storage = reactor.storage.estimate_heap_size() as i64; + let contract_runtime = reactor.contract_runtime.estimate_heap_size() as i64; + let rest_server = reactor.rest_server.estimate_heap_size() as i64; + let event_stream_server = reactor.event_stream_server.estimate_heap_size() as i64; + let consensus = reactor.consensus.estimate_heap_size() as i64; + let fetchers = reactor.fetchers.estimate_heap_size() as i64; + let transaction_gossiper = reactor.transaction_gossiper.estimate_heap_size() as i64; + let finality_signature_gossiper = + reactor.finality_signature_gossiper.estimate_heap_size() as i64; + let block_gossiper = reactor.block_gossiper.estimate_heap_size() as i64; + let transaction_buffer = reactor.transaction_buffer.estimate_heap_size() as i64; + let block_validator = reactor.block_validator.estimate_heap_size() as i64; + let sync_leaper = reactor.sync_leaper.estimate_heap_size() as i64; + let transaction_acceptor = reactor.transaction_acceptor.estimate_heap_size() as i64; + let block_synchronizer = reactor.block_synchronizer.estimate_heap_size() as i64; + let block_accumulator = reactor.block_accumulator.estimate_heap_size() as i64; + let diagnostics_port = reactor.diagnostics_port.estimate_heap_size() as i64; + let upgrade_watcher = reactor.upgrade_watcher.estimate_heap_size() as i64; + let binary_port = reactor.binary_port.estimate_heap_size() as i64; + + let total = metrics + + network + + address_gossiper + + storage + + contract_runtime + + rest_server + + event_stream_server + + consensus + + fetchers + + transaction_gossiper + + finality_signature_gossiper + + block_gossiper + + transaction_buffer + + block_validator + + sync_leaper + + transaction_acceptor + + block_synchronizer + + block_accumulator + + diagnostics_port + + upgrade_watcher + + binary_port; + + self.mem_net.set(network); + self.mem_address_gossiper.set(address_gossiper); + self.mem_storage.set(storage); + self.mem_contract_runtime.set(contract_runtime); + self.mem_rest_server.set(rest_server); + self.mem_event_stream_server.set(event_stream_server); + self.mem_consensus.set(consensus); + self.mem_fetchers.set(fetchers); + self.mem_transaction_gossiper.set(transaction_gossiper); + self.mem_finality_signature_gossiper + .set(finality_signature_gossiper); + self.mem_block_gossiper.set(block_gossiper); + self.mem_transaction_buffer.set(transaction_buffer); + self.mem_block_validator.set(block_validator); + self.mem_sync_leaper.set(sync_leaper); + self.mem_transaction_acceptor.set(transaction_acceptor); + self.mem_block_synchronizer.set(block_synchronizer); + self.mem_block_accumulator.set(block_accumulator); + self.mem_diagnostics_port.set(diagnostics_port); + self.mem_upgrade_watcher.set(upgrade_watcher); + self.mem_binary_port.set(binary_port); + + self.mem_total.set(total); + self.mem_metrics.set(metrics); + + // Stop the timer explicitly, don't count logging. + let duration_s = timer.stop_and_record(); + + debug!(%total, + %duration_s, + %metrics, + %network, + %address_gossiper, + %storage, + %contract_runtime, + %rest_server, + %event_stream_server, + %consensus, + %fetchers, + %transaction_gossiper, + %finality_signature_gossiper, + %block_gossiper, + %transaction_buffer, + %block_validator, + %sync_leaper, + %transaction_acceptor, + %block_synchronizer, + %block_accumulator, + %diagnostics_port, + %upgrade_watcher, + %binary_port, + "Collected new set of memory metrics."); + } +} + +impl Drop for MemoryMetrics { + fn drop(&mut self) { + unregister_metric!(self.registry, self.mem_total); + unregister_metric!(self.registry, self.mem_metrics); + unregister_metric!(self.registry, self.mem_estimator_runtime_s); + + unregister_metric!(self.registry, self.mem_net); + unregister_metric!(self.registry, self.mem_address_gossiper); + unregister_metric!(self.registry, self.mem_storage); + unregister_metric!(self.registry, self.mem_contract_runtime); + unregister_metric!(self.registry, self.mem_rpc_server); + unregister_metric!(self.registry, self.mem_rest_server); + unregister_metric!(self.registry, self.mem_event_stream_server); + unregister_metric!(self.registry, self.mem_consensus); + unregister_metric!(self.registry, self.mem_fetchers); + unregister_metric!(self.registry, self.mem_transaction_gossiper); + unregister_metric!(self.registry, self.mem_finality_signature_gossiper); + unregister_metric!(self.registry, self.mem_block_gossiper); + unregister_metric!(self.registry, self.mem_transaction_buffer); + unregister_metric!(self.registry, self.mem_block_validator); + unregister_metric!(self.registry, self.mem_sync_leaper); + unregister_metric!(self.registry, self.mem_transaction_acceptor); + unregister_metric!(self.registry, self.mem_block_synchronizer); + unregister_metric!(self.registry, self.mem_block_accumulator); + unregister_metric!(self.registry, self.mem_diagnostics_port); + unregister_metric!(self.registry, self.mem_upgrade_watcher); + unregister_metric!(self.registry, self.mem_binary_port); + } +} diff --git a/node/src/reactor/main_reactor/reactor_state.rs b/node/src/reactor/main_reactor/reactor_state.rs new file mode 100644 index 0000000000..5cbdff2011 --- /dev/null +++ b/node/src/reactor/main_reactor/reactor_state.rs @@ -0,0 +1,76 @@ +use datasize::DataSize; +use derive_more::Display; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The state of the reactor. +#[cfg_attr(doc, aquamarine::aquamarine)] +/// ```mermaid +/// flowchart TD +/// %%{init: { 'flowchart': {'diagramPadding':100} }}%% +/// style Start fill:#66ccff,stroke:#333,stroke-width:4px +/// style End fill:#66ccff,stroke:#333,stroke-width:4px +/// +/// Start --> Initialize +/// Initialize --> CatchUp +/// CatchUp --> KeepUp +/// KeepUp --> CatchUp +/// KeepUp --> Validate +/// Validate --> KeepUp +/// CatchUp --> ShutdownForUpgrade +/// CatchUp --> ShutdownAfterCatchingUp +/// KeepUp --> ShutdownForUpgrade +/// Validate --> ShutdownForUpgrade +/// CatchUp --> Upgrading +/// CatchUp -->|at genesis| Validate +/// Upgrading --> CatchUp +/// ShutdownForUpgrade --> End +/// ``` +/// ```mermaid +/// flowchart TD +/// style Start fill:#66ccff,stroke:#333,stroke-width:4px +/// style End fill:#66ccff,stroke:#333,stroke-width:4px +/// style F fill:#ffcc66,stroke:#333,stroke-width:4px +/// style G fill:#ffcc66,stroke:#333,stroke-width:4px +/// title[CatchUp process] +/// title---Start +/// style title fill:#FFF,stroke:#FFF +/// linkStyle 0 stroke-width:0; +/// +/// Start --> A["get sync identifier (sync starting point)"] +/// A --> BlockHash +/// A --> BlockIdentifier +/// A --> SyncedBlockIdentifier +/// A --> LocalTip +/// BlockHash --> E[process identifier in
block accumulator] +/// BlockIdentifier --> E +/// SyncedBlockIdentifier --> E +/// LocalTip --> E +/// CaughtUp --> H[handle upgrade
if needed] +/// H --> End +/// E -->|more data needed
from network
to let us sync near tip| Leap +/// E -->|block represented by
identifier is not stored
locally, sync it| BlockSync +/// E -->|we think we're close
enough to the tip|CaughtUp +/// Leap --> F[initiate SyncLeap
and retry later] +/// BlockSync --> G[initiate BlockSync
and retry later] +/// ``` +#[derive( + Copy, Clone, PartialEq, Eq, Serialize, Deserialize, DataSize, Debug, Display, JsonSchema, +)] +#[schemars(description = "The state of the reactor.")] +pub enum ReactorState { + /// Get all components and reactor state set up on start. + Initialize, + /// Orient to the network and attempt to catch up to tip. + CatchUp, + /// Running commit upgrade and creating immediate switch block. + Upgrading, + /// Stay caught up with tip. + KeepUp, + /// Node is currently caught up and is an active validator. + Validate, + /// Node should be shut down for upgrade. + ShutdownForUpgrade, + /// Node should shut down after catching up. + ShutdownAfterCatchingUp, +} diff --git a/node/src/reactor/main_reactor/tests.rs b/node/src/reactor/main_reactor/tests.rs new file mode 100644 index 0000000000..43db016a59 --- /dev/null +++ b/node/src/reactor/main_reactor/tests.rs @@ -0,0 +1,95 @@ +mod auction; +mod binary_port; +mod configs_override; +mod consensus_rules; +mod fixture; +mod gas_price; +mod initial_stakes; +mod network_general; +mod rewards; +mod switch_blocks; +mod transaction_scenario; +mod transactions; + +use std::{collections::BTreeSet, sync::Arc, time::Duration}; + +use num_rational::Ratio; +use tracing::info; + +use casper_storage::{ + data_access_layer::{ + balance::{BalanceHandling, BalanceResult}, + BalanceRequest, BidsRequest, TotalSupplyRequest, TotalSupplyResult, + }, + global_state::state::StateProvider, +}; +use casper_types::{ + execution::ExecutionResult, system::auction::BidKind, testing::TestRng, Chainspec, Deploy, + EraId, FeeHandling, Gas, HoldBalanceHandling, Key, PricingHandling, PricingMode, PublicKey, + RefundHandling, SecretKey, StoredValue, TimeDiff, Timestamp, Transaction, TransactionHash, + U512, +}; + +use crate::{ + components::consensus::{ClContext, ConsensusMessage, HighwayMessage, HighwayVertex}, + effect::incoming::ConsensusMessageIncoming, + reactor::{ + main_reactor::{MainEvent, MainReactor}, + Runner, + }, + testing::{self, filter_reactor::FilterReactor, ConditionCheckReactor}, + types::{transaction::transaction_v1_builder::TransactionV1Builder, NodeId}, + utils::RESOURCES_PATH, +}; + +const ERA_ZERO: EraId = EraId::new(0); +const ERA_ONE: EraId = EraId::new(1); +const ERA_TWO: EraId = EraId::new(2); +const ERA_THREE: EraId = EraId::new(3); +const TEN_SECS: Duration = Duration::from_secs(10); +const THIRTY_SECS: Duration = Duration::from_secs(30); +const ONE_MIN: Duration = Duration::from_secs(60); + +type Nodes = testing::network::Nodes>; + +impl Runner>> { + fn main_reactor(&self) -> &MainReactor { + self.reactor().inner().inner() + } + + fn main_reactor_as_mut(&mut self) -> &mut MainReactor { + self.reactor.inner_mut().inner_mut() + } +} + +/// Given a block height and a node id, returns a predicate to check if the lowest available block +/// for the specified node is at or below the specified height. +fn node_has_lowest_available_block_at_or_below_height( + height: u64, + node_id: NodeId, +) -> impl Fn(&Nodes) -> bool { + move |nodes: &Nodes| { + nodes.get(&node_id).is_none_or(|runner| { + let available_block_range = runner.main_reactor().storage().get_available_block_range(); + if available_block_range.low() == 0 && available_block_range.high() == 0 { + false + } else { + available_block_range.low() <= height + } + }) + } +} + +fn is_ping(event: &MainEvent) -> bool { + if let MainEvent::ConsensusMessageIncoming(ConsensusMessageIncoming { message, .. }) = event { + if let ConsensusMessage::Protocol { ref payload, .. } = **message { + return matches!( + payload.deserialize_incoming::>(), + Ok(HighwayMessage::::NewVertex(HighwayVertex::Ping( + _ + ))) + ); + } + } + false +} diff --git a/node/src/reactor/main_reactor/tests/auction.rs b/node/src/reactor/main_reactor/tests/auction.rs new file mode 100644 index 0000000000..c7bdd7ff28 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/auction.rs @@ -0,0 +1,336 @@ +use std::sync::Arc; + +use casper_types::{ + execution::TransformKindV2, + system::{auction::BidAddr, AUCTION}, + Deploy, Key, PublicKey, StoredValue, TimeDiff, Timestamp, Transaction, U512, +}; + +use crate::reactor::main_reactor::tests::{ + configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes, + ERA_ONE, ERA_TWO, ONE_MIN, TEN_SECS, +}; + +#[tokio::test] +async fn run_withdraw_bid_network() { + let alice_stake = 200_000_000_000_u64; + let initial_stakes = InitialStakes::FromVec(vec![alice_stake.into(), 10_000_000_000]); + + let unbonding_delay = 2; + + let mut fixture = TestFixture::new( + initial_stakes, + Some(ConfigsOverride { + unbonding_delay, + ..Default::default() + }), + ) + .await; + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + + // Wait for all nodes to complete block 0. + fixture.run_until_block_height(0, ONE_MIN).await; + + // Ensure our post genesis assumption that Alice has a bid is correct. + fixture.check_bid_existence_at_tip(&alice_public_key, None, true); + + // Create & sign deploy to withdraw Alice's full stake. + let mut deploy = Deploy::withdraw_bid( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + alice_public_key.clone(), + alice_stake.into(), + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + deploy.sign(&alice_secret_key); + let txn = Transaction::Deploy(deploy); + let txn_hash = txn.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + // Ensure execution succeeded and that there is a Prune transform for the bid's key. + let bid_key = Key::BidAddr(BidAddr::from(alice_public_key.clone())); + fixture + .successful_execution_transforms(&txn_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Prune(prune_key) => prune_key == &bid_key, + _ => false, + }) + .expect("should have a prune record for bid"); + + // Crank the network forward until the era ends. + fixture + .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN) + .await; + + // The bid record should have been pruned once unbonding ran. + fixture.check_bid_existence_at_tip(&alice_public_key, None, false); + + // Crank the network forward until the unbonding queue is processed. + fixture + .run_until_stored_switch_block_header( + ERA_ONE.saturating_add(unbonding_delay + 1), + ONE_MIN * 2, + ) + .await; +} + +#[tokio::test] +async fn run_undelegate_bid_network() { + let alice_stake = 200_000_000_000_u64; + let bob_stake = 300_000_000_000_u64; + let initial_stakes = InitialStakes::FromVec(vec![alice_stake.into(), bob_stake.into()]); + + let unbonding_delay = 2; + + let mut fixture = TestFixture::new( + initial_stakes, + Some(ConfigsOverride { + unbonding_delay, + ..Default::default() + }), + ) + .await; + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_public_key = PublicKey::from(&*fixture.node_contexts[1].secret_key); + + // Wait for all nodes to complete block 0. + fixture.run_until_block_height(0, ONE_MIN).await; + + // Ensure our post genesis assumption that Alice and Bob have bids is correct. + fixture.check_bid_existence_at_tip(&alice_public_key, None, true); + fixture.check_bid_existence_at_tip(&bob_public_key, None, true); + // Alice should not have a delegation bid record for Bob (yet). + fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false); + + // Have Alice delegate to Bob. + // + // Note, in the real world validators usually don't also delegate to other validators, but in + // this test fixture the only accounts in the system are those created for genesis validators. + let alice_delegation_amount = + U512::from(fixture.chainspec.core_config.minimum_delegation_amount); + let mut deploy = Deploy::delegate( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + bob_public_key.clone(), + alice_public_key.clone(), + alice_delegation_amount, + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + deploy.sign(&alice_secret_key); + let txn = Transaction::Deploy(deploy); + let txn_hash = txn.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + // Ensure execution succeeded and that there is a Write transform for the bid's key. + let bid_key = Key::BidAddr(BidAddr::new_from_public_keys( + &bob_public_key, + Some(&alice_public_key), + )); + fixture + .successful_execution_transforms(&txn_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => { + Key::from(bid_kind.bid_addr()) == bid_key + } + _ => false, + }) + .expect("should have a write record for delegate bid"); + + // Alice should now have a delegation bid record for Bob. + fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), true); + + // Create & sign transaction to undelegate from Alice to Bob. + let mut deploy = Deploy::undelegate( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + bob_public_key.clone(), + alice_public_key.clone(), + alice_delegation_amount, + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + deploy.sign(&alice_secret_key); + let txn = Transaction::Deploy(deploy); + let txn_hash = txn.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + // Ensure execution succeeded and that there is a Prune transform for the bid's key. + fixture + .successful_execution_transforms(&txn_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Prune(prune_key) => prune_key == &bid_key, + _ => false, + }) + .expect("should have a prune record for undelegated bid"); + + // Crank the network forward until the era ends. + fixture + .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN) + .await; + + // Ensure the validator records are still present but the undelegated bid is gone. + fixture.check_bid_existence_at_tip(&alice_public_key, None, true); + fixture.check_bid_existence_at_tip(&bob_public_key, None, true); + fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false); + + // Crank the network forward until the unbonding queue is processed. + fixture + .run_until_stored_switch_block_header( + ERA_ONE.saturating_add(unbonding_delay + 1), + ONE_MIN * 2, + ) + .await; +} + +#[tokio::test] +async fn run_redelegate_bid_network() { + let alice_stake = 200_000_000_000_u64; + let bob_stake = 300_000_000_000_u64; + let charlie_stake = 300_000_000_000_u64; + let initial_stakes = InitialStakes::FromVec(vec![ + alice_stake.into(), + bob_stake.into(), + charlie_stake.into(), + ]); + + let spec_override = ConfigsOverride { + unbonding_delay: 1, + minimum_era_height: 5, + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_public_key = PublicKey::from(&*fixture.node_contexts[1].secret_key); + let charlie_public_key = PublicKey::from(&*fixture.node_contexts[2].secret_key); + + // Wait for all nodes to complete block 0. + fixture.run_until_block_height(0, ONE_MIN).await; + + // Ensure our post genesis assumption that Alice, Bob and Charlie have bids is correct. + fixture.check_bid_existence_at_tip(&alice_public_key, None, true); + fixture.check_bid_existence_at_tip(&bob_public_key, None, true); + fixture.check_bid_existence_at_tip(&charlie_public_key, None, true); + // Alice should not have a delegation bid record for Bob or Charlie (yet). + fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false); + fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), false); + + // Have Alice delegate to Bob. + let alice_delegation_amount = + U512::from(fixture.chainspec.core_config.minimum_delegation_amount); + let mut deploy = Deploy::delegate( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + bob_public_key.clone(), + alice_public_key.clone(), + alice_delegation_amount, + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + deploy.sign(&alice_secret_key); + let txn = Transaction::Deploy(deploy); + let txn_hash = txn.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, ONE_MIN) + .await; + + // Ensure execution succeeded and that there is a Write transform for the bid's key. + let bid_key = Key::BidAddr(BidAddr::new_from_public_keys( + &bob_public_key, + Some(&alice_public_key), + )); + + fixture + .successful_execution_transforms(&txn_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => { + Key::from(bid_kind.bid_addr()) == bid_key + } + _ => false, + }) + .expect("should have a write record for delegate bid"); + + // Alice should now have a delegation bid record for Bob. + fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), true); + + // Create & sign transaction to undelegate Alice from Bob and delegate to Charlie. + let mut deploy = Deploy::redelegate( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + bob_public_key.clone(), + alice_public_key.clone(), + charlie_public_key.clone(), + alice_delegation_amount, + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + + deploy.sign(&alice_secret_key); + let transaction = Transaction::Deploy(deploy); + let transaction_hash = transaction.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(transaction).await; + fixture + .run_until_executed_transaction(&transaction_hash, TEN_SECS) + .await; + + // Ensure execution succeeded and that there is a Prune transform for the bid's key. + fixture + .successful_execution_transforms(&transaction_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Prune(prune_key) => prune_key == &bid_key, + _ => false, + }) + .expect("should have a prune record for undelegated bid"); + + // Original delegation bid should be removed. + fixture.check_bid_existence_at_tip(&bob_public_key, Some(&alice_public_key), false); + // Redelegate doesn't occur until after unbonding delay elapses. + fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), false); + + // Crank the network forward to run out the unbonding delay. + // First, close out the era the redelegate was processed in. + fixture + .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN) + .await; + // The undelegate is in the unbonding queue. + fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), false); + // Unbonding delay is 1 on this test network, so step 1 more era. + fixture + .run_until_stored_switch_block_header(ERA_TWO, ONE_MIN) + .await; + + // Ensure the validator records are still present. + fixture.check_bid_existence_at_tip(&alice_public_key, None, true); + fixture.check_bid_existence_at_tip(&bob_public_key, None, true); + // Ensure redelegated bid exists. + fixture.check_bid_existence_at_tip(&charlie_public_key, Some(&alice_public_key), true); +} diff --git a/node/src/reactor/main_reactor/tests/binary_port.rs b/node/src/reactor/main_reactor/tests/binary_port.rs new file mode 100644 index 0000000000..92d18f917e --- /dev/null +++ b/node/src/reactor/main_reactor/tests/binary_port.rs @@ -0,0 +1,1405 @@ +use std::{ + collections::{BTreeMap, HashMap}, + convert::{TryFrom, TryInto}, + iter, + sync::Arc, + time::Duration, +}; + +use casper_binary_port::{ + AccountInformation, AddressableEntityInformation, BalanceResponse, BinaryMessage, + BinaryMessageCodec, BinaryResponse, BinaryResponseAndRequest, Command, CommandHeader, + ConsensusStatus, ConsensusValidatorChanges, ContractInformation, DictionaryItemIdentifier, + DictionaryQueryResult, EntityIdentifier, EraIdentifier, ErrorCode, GetRequest, + GetTrieFullResult, GlobalStateEntityQualifier, GlobalStateQueryResult, GlobalStateRequest, + InformationRequest, InformationRequestTag, KeyPrefix, LastProgress, NetworkName, NodeStatus, + PackageIdentifier, PurseIdentifier, ReactorStateName, RecordId, ResponseType, RewardResponse, + Uptime, ValueWithProof, +}; +use casper_storage::global_state::state::CommitProvider; +use casper_types::{ + account::AccountHash, + addressable_entity::{ActionThresholds, AssociatedKeys, NamedKeyAddr, NamedKeyValue}, + bytesrepr::{Bytes, FromBytes, ToBytes}, + contracts::{ContractHash, ContractPackage, ContractPackageHash}, + execution::{Effects, TransformKindV2, TransformV2}, + system::auction::DelegatorKind, + testing::TestRng, + Account, AddressableEntity, AvailableBlockRange, Block, BlockHash, BlockHeader, + BlockIdentifier, BlockSynchronizerStatus, BlockWithSignatures, ByteCode, ByteCodeAddr, + ByteCodeHash, ByteCodeKind, CLValue, CLValueDictionary, ChainspecRawBytes, Contract, + ContractRuntimeTag, ContractWasm, ContractWasmHash, DictionaryAddr, Digest, EntityAddr, + EntityKind, EntityVersions, GlobalStateIdentifier, Key, KeyTag, NextUpgrade, Package, + PackageAddr, PackageHash, Peers, ProtocolVersion, PublicKey, Rewards, SecretKey, StoredValue, + Transaction, Transfer, URef, U512, +}; +use futures::{SinkExt, StreamExt}; +use rand::Rng; +use tokio::{net::TcpStream, time::timeout}; +use tokio_util::codec::Framed; + +use crate::{ + reactor::{main_reactor::MainReactor, Runner}, + testing::{ + self, filter_reactor::FilterReactor, network::TestingNetwork, ConditionCheckReactor, + }, + types::{transaction::transaction_v1_builder::TransactionV1Builder, NodeId}, +}; + +use crate::reactor::main_reactor::tests::{ + fixture::TestFixture, initial_stakes::InitialStakes, ERA_ONE, +}; + +const GUARANTEED_BLOCK_HEIGHT: u64 = 4; + +const TEST_DICT_NAME: &str = "test_dict"; +const TEST_DICT_ITEM_KEY: &str = "test_key"; +const MESSAGE_SIZE: u32 = 1024 * 1024 * 10; + +struct TestData { + rng: TestRng, + protocol_version: ProtocolVersion, + chainspec_raw_bytes: ChainspecRawBytes, + highest_block: Block, + secret_signing_key: Arc, + state_root_hash: Digest, + effects: TestEffects, + era_one_validator: PublicKey, +} + +fn network_produced_blocks( + nodes: &HashMap>>>, + block_count: u64, +) -> bool { + nodes.values().all(|node| { + node.reactor() + .inner() + .inner() + .storage() + .get_available_block_range() + .high() + >= block_count + }) +} + +async fn setup() -> ( + Framed, + ( + impl futures::Future>, TestRng)>, + TestData, + ), +) { + let mut fixture = TestFixture::new( + InitialStakes::AllEqual { + count: 4, + stake: 100, + }, + None, + ) + .await; + let chainspec_raw_bytes = ChainspecRawBytes::clone(&fixture.chainspec_raw_bytes); + let mut rng = fixture.rng_mut().create_child(); + let net = fixture.network_mut(); + net.settle_on( + &mut rng, + |nodes| network_produced_blocks(nodes, GUARANTEED_BLOCK_HEIGHT), + Duration::from_secs(59), + ) + .await; + let (_, first_node) = net + .nodes() + .iter() + .next() + .expect("should have at least one node"); + let secret_signing_key = first_node + .main_reactor() + .validator_matrix + .secret_signing_key() + .clone(); + let highest_block = net + .nodes() + .iter() + .find_map(|(_, runner)| { + runner + .reactor() + .inner() + .inner() + .storage() + .read_highest_block() + }) + .expect("should have highest block"); + let era_end = first_node + .main_reactor() + .storage() + .get_switch_block_by_era_id(&ERA_ONE) + .expect("should not fail retrieving switch block") + .expect("should have switch block") + .clone_era_end() + .expect("should have era end"); + let Rewards::V2(rewards) = era_end.rewards() else { + panic!("should have rewards V2"); + }; + + let effects = test_effects(&mut rng); + + let state_root_hash = first_node + .main_reactor() + .contract_runtime() + .data_access_layer() + .commit_effects(*highest_block.state_root_hash(), effects.effects.clone()) + .expect("should commit effects"); + + // Get the binary port address. + let binary_port_addr = first_node + .main_reactor() + .binary_port + .bind_address() + .expect("should be bound"); + + let protocol_version = first_node.main_reactor().chainspec.protocol_version(); + // We let the entire network run in the background, until our request completes. + let finish_cranking = fixture.run_until_stopped(rng.create_child()); + + // Set-up client. + let address = format!("localhost:{}", binary_port_addr.port()); + let stream = TcpStream::connect(address.clone()) + .await + .expect("should create stream"); + + ( + Framed::new(stream, BinaryMessageCodec::new(MESSAGE_SIZE)), + ( + finish_cranking, + TestData { + rng, + protocol_version, + chainspec_raw_bytes, + highest_block, + secret_signing_key, + state_root_hash, + effects, + era_one_validator: rewards + .last_key_value() + .expect("should have at least one reward") + .0 + .clone(), + }, + ), + ) +} + +fn test_effects(rng: &mut TestRng) -> TestEffects { + // we set up some basic data for global state tests, including an account and a dictionary + let pre_migration_account_hash = AccountHash::new(rng.gen()); + let post_migration_account_hash = AccountHash::new(rng.gen()); + let main_purse: URef = rng.gen(); + + let pre_migration_contract_package_hash = ContractPackageHash::new(rng.gen()); + let pre_migration_contract_hash = ContractHash::new(rng.gen()); + let post_migration_contract_package_hash = ContractPackageHash::new(rng.gen()); + let post_migration_contract_hash = ContractHash::new(rng.gen()); + let wasm_hash = ContractWasmHash::new(rng.gen()); + + let package_addr: PackageAddr = rng.gen(); + let package_access_key: URef = rng.gen(); + let entity_addr: EntityAddr = rng.gen(); + let entity_bytecode_hash: ByteCodeHash = ByteCodeHash::new(rng.gen()); + + let dict_seed_uref: URef = rng.gen(); + let dict_key = Key::dictionary(dict_seed_uref, TEST_DICT_ITEM_KEY.as_bytes()); + let dict_value = CLValueDictionary::new( + CLValue::from_t(rng.gen::()).unwrap(), + dict_seed_uref.addr().to_vec(), + TEST_DICT_ITEM_KEY.as_bytes().to_vec(), + ); + + let mut effects = Effects::new(); + + effects.push(TransformV2::new( + Key::Account(pre_migration_account_hash), + TransformKindV2::Write(StoredValue::Account(Account::new( + pre_migration_account_hash, + iter::once((TEST_DICT_NAME.to_owned(), Key::URef(dict_seed_uref))) + .collect::>() + .into(), + main_purse, + Default::default(), + Default::default(), + ))), + )); + effects.push(TransformV2::new( + Key::Account(post_migration_account_hash), + TransformKindV2::Write(StoredValue::CLValue( + CLValue::from_t(Key::AddressableEntity(entity_addr)).expect("should create CLValue"), + )), + )); + effects.push(TransformV2::new( + dict_key, + TransformKindV2::Write(StoredValue::CLValue(CLValue::from_t(dict_value).unwrap())), + )); + effects.push(TransformV2::new( + Key::NamedKey( + NamedKeyAddr::new_from_string(entity_addr, TEST_DICT_NAME.to_owned()) + .expect("should create named key addr"), + ), + TransformKindV2::Write(StoredValue::NamedKey( + NamedKeyValue::from_concrete_values( + Key::URef(dict_seed_uref), + TEST_DICT_NAME.to_owned(), + ) + .expect("should create named key value"), + )), + )); + effects.push(TransformV2::new( + Key::Balance(main_purse.addr()), + TransformKindV2::Write(StoredValue::CLValue( + CLValue::from_t(U512::one()).expect("should create CLValue"), + )), + )); + + effects.push(TransformV2::new( + Key::Hash(pre_migration_contract_package_hash.value()), + TransformKindV2::Write(StoredValue::ContractPackage(ContractPackage::new( + package_access_key, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ))), + )); + effects.push(TransformV2::new( + Key::Hash(post_migration_contract_package_hash.value()), + TransformKindV2::Write(StoredValue::CLValue( + CLValue::from_t((Key::SmartContract(package_addr), package_access_key)) + .expect("should create CLValue"), + )), + )); + + effects.push(TransformV2::new( + Key::Hash(pre_migration_contract_hash.value()), + TransformKindV2::Write(StoredValue::Contract(Contract::new( + pre_migration_contract_package_hash, + wasm_hash, + Default::default(), + Default::default(), + ProtocolVersion::V2_0_0, + ))), + )); + effects.push(TransformV2::new( + Key::Hash(post_migration_contract_hash.value()), + TransformKindV2::Write(StoredValue::CLValue( + CLValue::from_t(Key::AddressableEntity(entity_addr)).expect("should create CLValue"), + )), + )); + + effects.push(TransformV2::new( + Key::Hash(wasm_hash.value()), + TransformKindV2::Write(StoredValue::ContractWasm(ContractWasm::new( + rng.random_vec(10..100), + ))), + )); + + effects.push(TransformV2::new( + Key::SmartContract(package_addr), + TransformKindV2::Write(StoredValue::SmartContract(Package::new( + EntityVersions::default(), + Default::default(), + Default::default(), + Default::default(), + ))), + )); + effects.push(TransformV2::new( + Key::AddressableEntity(entity_addr), + TransformKindV2::Write(StoredValue::AddressableEntity(AddressableEntity::new( + PackageHash::new(package_addr), + entity_bytecode_hash, + ProtocolVersion::V2_0_0, + main_purse, + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ))), + )); + effects.push(TransformV2::new( + Key::ByteCode(ByteCodeAddr::new_wasm_addr(entity_bytecode_hash.value())), + TransformKindV2::Write(StoredValue::ByteCode(ByteCode::new( + ByteCodeKind::V1CasperWasm, + rng.random_vec(10..100), + ))), + )); + + TestEffects { + effects, + pre_migration_account_hash, + post_migration_account_hash, + pre_migration_contract_package_hash, + post_migration_contract_package_hash, + pre_migration_contract_hash, + post_migration_contract_hash, + package_addr, + entity_addr, + dict_seed_uref, + } +} + +struct TestEffects { + effects: Effects, + pre_migration_account_hash: AccountHash, + post_migration_account_hash: AccountHash, + pre_migration_contract_package_hash: ContractPackageHash, + post_migration_contract_package_hash: ContractPackageHash, + pre_migration_contract_hash: ContractHash, + post_migration_contract_hash: ContractHash, + package_addr: PackageAddr, + entity_addr: EntityAddr, + dict_seed_uref: URef, +} + +struct TestCase { + name: &'static str, + request: Command, + asserter: Box bool>, +} + +fn validate_metadata( + response: &BinaryResponse, + expected_payload_type: Option, +) -> bool { + response.is_success() + && response.returned_data_type_tag() + == expected_payload_type.map(|payload_type| payload_type as u8) + && expected_payload_type.is_none_or(|_| !response.payload().is_empty()) +} + +fn validate_deserialization(response: &BinaryResponse) -> Option +where + T: FromBytes, +{ + FromBytes::from_bytes(response.payload()) + .ok() + .map(|(data, remainder)| { + assert!(remainder.is_empty()); + data + }) +} + +fn assert_response( + response: &BinaryResponse, + payload_type: Option, + validator: F, +) -> bool +where + T: FromBytes, + F: FnOnce(T) -> bool, +{ + validate_metadata(response, payload_type) + && payload_type + .is_none_or(|_| validate_deserialization::(response).is_some_and(validator)) +} + +#[tokio::test] +async fn binary_port_component_handles_all_requests() { + testing::init_logging(); + + let ( + mut client, + ( + finish_cranking, + TestData { + mut rng, + protocol_version, + chainspec_raw_bytes: network_chainspec_raw_bytes, + highest_block, + secret_signing_key, + state_root_hash, + effects, + era_one_validator, + }, + ), + ) = setup().await; + + let test_cases = &[ + block_header_info(*highest_block.hash()), + block_with_signatures_info(*highest_block.hash()), + peers(), + uptime(), + last_progress(), + reactor_state(), + network_name(), + consensus_validator_changes(), + block_synchronizer_status(), + available_block_range(highest_block.height()), + next_upgrade(), + consensus_status(), + chainspec_raw_bytes(network_chainspec_raw_bytes), + latest_switch_block_header(), + node_status(protocol_version), + get_block_header(highest_block.clone_header()), + get_block_transfers(highest_block.clone_header()), + get_era_summary(state_root_hash), + get_all_bids(state_root_hash), + get_trie(state_root_hash), + get_dictionary_item_by_addr( + state_root_hash, + *Key::dictionary(effects.dict_seed_uref, TEST_DICT_ITEM_KEY.as_bytes()) + .as_dictionary() + .unwrap(), + ), + get_dictionary_item_by_seed_uref( + state_root_hash, + effects.dict_seed_uref, + TEST_DICT_ITEM_KEY.to_owned(), + ), + get_dictionary_item_by_legacy_named_key( + state_root_hash, + effects.pre_migration_account_hash, + TEST_DICT_NAME.to_owned(), + TEST_DICT_ITEM_KEY.to_owned(), + ), + get_dictionary_item_by_named_key( + state_root_hash, + effects.entity_addr, + TEST_DICT_NAME.to_owned(), + TEST_DICT_ITEM_KEY.to_owned(), + ), + try_spec_exec_invalid(&mut rng), + try_accept_transaction_invalid(&mut rng), + try_accept_transaction(&secret_signing_key), + get_balance(state_root_hash, effects.pre_migration_account_hash), + get_balance_account_not_found(state_root_hash), + get_balance_purse_uref_not_found(state_root_hash), + get_named_keys_by_prefix(state_root_hash, effects.entity_addr), + get_reward( + Some(EraIdentifier::Era(ERA_ONE)), + era_one_validator.clone(), + None, + ), + get_reward( + Some(EraIdentifier::Block(BlockIdentifier::Height(1))), + era_one_validator, + None, + ), + get_protocol_version(protocol_version), + get_entity(state_root_hash, effects.entity_addr), + get_entity_without_bytecode(state_root_hash, effects.entity_addr), + get_entity_pre_migration_account(state_root_hash, effects.pre_migration_account_hash), + get_entity_post_migration_account(state_root_hash, effects.post_migration_account_hash), + get_entity_pre_migration_contract(state_root_hash, effects.pre_migration_contract_hash), + get_entity_post_migration_contract(state_root_hash, effects.post_migration_contract_hash), + get_package(state_root_hash, effects.package_addr), + get_package_pre_migration(state_root_hash, effects.pre_migration_contract_package_hash), + get_package_post_migration( + state_root_hash, + effects.post_migration_contract_package_hash, + ), + ]; + + for ( + index, + TestCase { + name, + request, + asserter, + }, + ) in test_cases.iter().enumerate() + { + let original_request_bytes = { + let header = CommandHeader::new(request.tag(), index as u16); + let header_bytes = ToBytes::to_bytes(&header).expect("should serialize"); + let request_bytes = ToBytes::to_bytes(&request).expect("should serialize"); + + [header_bytes, request_bytes].concat() + }; + + client + .send(BinaryMessage::new(original_request_bytes.clone())) + .await + .expect("should send message"); + + let response = timeout(Duration::from_secs(10), client.next()) + .await + .unwrap_or_else(|err| panic!("{}: should complete without timeout: {}", name, err)) + .unwrap_or_else(|| panic!("{}: should have bytes", name)) + .unwrap_or_else(|err| panic!("{}: should have ok response: {}", name, err)); + let (binary_response_and_request, _): (BinaryResponseAndRequest, _) = + FromBytes::from_bytes(response.payload()).expect("should deserialize response"); + + let bytes_sent_via_tcp = Bytes::from(original_request_bytes).to_bytes().unwrap(); + let mirrored_request_bytes = binary_response_and_request.request(); + assert_eq!(mirrored_request_bytes, bytes_sent_via_tcp, "{}", name); + + binary_response_and_request.request(); + + assert!(asserter(binary_response_and_request.response()), "{}", name); + } + + let (_net, _rng) = timeout(Duration::from_secs(10), finish_cranking) + .await + .unwrap_or_else(|_| panic!("should finish cranking without timeout")); +} + +fn block_header_info(hash: BlockHash) -> TestCase { + TestCase { + name: "block_header_info", + request: Command::Get( + InformationRequest::BlockHeader(Some(BlockIdentifier::Hash(hash))) + .try_into() + .expect("should convert"), + ), + asserter: Box::new(move |response| { + assert_response::(response, Some(ResponseType::BlockHeader), |header| { + header.block_hash() == hash + }) + }), + } +} + +fn block_with_signatures_info(hash: BlockHash) -> TestCase { + TestCase { + name: "block_with_signatures_info", + request: Command::Get( + InformationRequest::BlockWithSignatures(Some(BlockIdentifier::Hash(hash))) + .try_into() + .expect("should convert"), + ), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::BlockWithSignatures), + |header| *header.block().hash() == hash, + ) + }), + } +} + +fn peers() -> TestCase { + TestCase { + name: "peers", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::Peers.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::(response, Some(ResponseType::Peers), |peers| { + !peers.into_inner().is_empty() + }) + }), + } +} + +fn uptime() -> TestCase { + TestCase { + name: "uptime", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::Uptime.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::(response, Some(ResponseType::Uptime), |uptime| { + uptime.into_inner() > 0 + }) + }), + } +} + +fn last_progress() -> TestCase { + TestCase { + name: "last_progress", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::LastProgress.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::LastProgress), + |last_progress| last_progress.into_inner().millis() > 0, + ) + }), + } +} + +fn reactor_state() -> TestCase { + TestCase { + name: "reactor_state", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::ReactorState.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::ReactorState), + |reactor_state| matches!(reactor_state.into_inner().as_str(), "Validate"), + ) + }), + } +} + +fn network_name() -> TestCase { + TestCase { + name: "network_name", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::NetworkName.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::NetworkName), + |network_name| &network_name.into_inner() == "casper-example", + ) + }), + } +} + +fn consensus_validator_changes() -> TestCase { + TestCase { + name: "consensus_validator_changes", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::ConsensusValidatorChanges.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::ConsensusValidatorChanges), + |cvc| cvc.into_inner().is_empty(), + ) + }), + } +} + +fn block_synchronizer_status() -> TestCase { + TestCase { + name: "block_synchronizer_status", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::BlockSynchronizerStatus.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::BlockSynchronizerStatus), + |bss| bss.historical().is_none() && bss.forward().is_none(), + ) + }), + } +} + +fn available_block_range(expected_height: u64) -> TestCase { + TestCase { + name: "available_block_range", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::AvailableBlockRange.into(), + key: vec![], + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::AvailableBlockRange), + |abr| abr.low() == 0 && abr.high() >= expected_height, + ) + }), + } +} + +fn next_upgrade() -> TestCase { + TestCase { + name: "next_upgrade", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::NextUpgrade.into(), + key: vec![], + }), + asserter: Box::new(|response| assert_response::(response, None, |_| true)), + } +} + +fn consensus_status() -> TestCase { + TestCase { + name: "consensus_status", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::ConsensusStatus.into(), + key: vec![], + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::ConsensusStatus), + |_| true, + ) + }), + } +} + +fn chainspec_raw_bytes(network_chainspec_raw_bytes: ChainspecRawBytes) -> TestCase { + TestCase { + name: "chainspec_raw_bytes", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::ChainspecRawBytes.into(), + key: vec![], + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::ChainspecRawBytes), + |crb| crb == network_chainspec_raw_bytes, + ) + }), + } +} + +fn latest_switch_block_header() -> TestCase { + TestCase { + name: "latest_switch_block_header", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::LatestSwitchBlockHeader.into(), + key: vec![], + }), + asserter: Box::new(move |response| { + assert_response::(response, Some(ResponseType::BlockHeader), |header| { + header.is_switch_block() + }) + }), + } +} + +fn node_status(expected_version: ProtocolVersion) -> TestCase { + TestCase { + name: "node_status", + request: Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::NodeStatus.into(), + key: vec![], + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::NodeStatus), + |node_status| { + node_status.protocol_version == expected_version + && !node_status.peers.into_inner().is_empty() + && node_status.chainspec_name == "casper-example" + && node_status.last_added_block_info.is_some() + && node_status.our_public_signing_key.is_some() + && node_status.block_sync.historical().is_none() + && node_status.block_sync.forward().is_none() + && matches!(node_status.reactor_state.into_inner().as_str(), "Validate") + && node_status.latest_switch_block_hash.is_some() + }, + ) + }), + } +} + +fn get_block_header(expected: BlockHeader) -> TestCase { + TestCase { + name: "get_block_header", + request: Command::Get(GetRequest::Record { + record_type_tag: RecordId::BlockHeader.into(), + key: expected.block_hash().to_bytes().unwrap(), + }), + asserter: Box::new(move |response| { + assert_response::(response, Some(ResponseType::BlockHeader), |header| { + header == expected + }) + }), + } +} + +fn get_block_transfers(expected: BlockHeader) -> TestCase { + TestCase { + name: "get_block_transfers", + request: Command::Get(GetRequest::Record { + record_type_tag: RecordId::Transfer.into(), + key: expected.block_hash().to_bytes().unwrap(), + }), + asserter: Box::new(move |response| { + validate_metadata(response, Some(ResponseType::Transfers)) + && bincode::deserialize::>(response.payload()).is_ok() + }), + } +} + +fn get_era_summary(state_root_hash: Digest) -> TestCase { + TestCase { + name: "get_era_summary", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::Item { + base_key: Key::EraSummary, + path: vec![], + }, + )))), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::GlobalStateQueryResult), + |res| { + let (value, _) = res.into_inner(); + matches!(value, StoredValue::EraInfo(_)) + }, + ) + }), + } +} + +fn get_all_bids(state_root_hash: Digest) -> TestCase { + TestCase { + name: "get_all_bids", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::AllItems { + key_tag: KeyTag::Bid, + }, + )))), + asserter: Box::new(|response| { + assert_response::, _>( + response, + Some(ResponseType::StoredValues), + |res| res.iter().all(|v| matches!(v, StoredValue::BidKind(_))), + ) + }), + } +} + +fn get_trie(digest: Digest) -> TestCase { + TestCase { + name: "get_trie", + request: Command::Get(GetRequest::Trie { trie_key: digest }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::GetTrieFullResult), + |res| res.into_inner().is_some(), + ) + }), + } +} + +fn get_dictionary_item_by_addr(state_root_hash: Digest, addr: DictionaryAddr) -> TestCase { + TestCase { + name: "get_dictionary_item_by_addr", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::DictionaryItem { + identifier: DictionaryItemIdentifier::DictionaryItem(addr), + }, + )))), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::DictionaryQueryResult), + |res| { + matches!( + res.into_inner(), + (key, res) if key == Key::Dictionary(addr) && res.value().as_cl_value().is_some() + ) + }, + ) + }), + } +} + +fn get_dictionary_item_by_seed_uref( + state_root_hash: Digest, + seed_uref: URef, + dictionary_item_key: String, +) -> TestCase { + TestCase { + name: "get_dictionary_item_by_seed_uref", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::DictionaryItem { + identifier: DictionaryItemIdentifier::URef { + seed_uref, + dictionary_item_key: dictionary_item_key.clone(), + }, + }, + )))), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::DictionaryQueryResult), + |res| { + let expected_key = Key::dictionary(seed_uref, dictionary_item_key.as_bytes()); + matches!( + res.into_inner(), + (key, res) if key == expected_key && res.value().as_cl_value().is_some() + ) + }, + ) + }), + } +} + +fn get_dictionary_item_by_legacy_named_key( + state_root_hash: Digest, + hash: AccountHash, + dictionary_name: String, + dictionary_item_key: String, +) -> TestCase { + TestCase { + name: "get_dictionary_item_by_legacy_named_key", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::DictionaryItem { + identifier: DictionaryItemIdentifier::AccountNamedKey { + hash, + dictionary_name, + dictionary_item_key, + }, + }, + )))), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::DictionaryQueryResult), + |res| matches!(res.into_inner(),(_, res) if res.value().as_cl_value().is_some()), + ) + }), + } +} + +fn get_dictionary_item_by_named_key( + state_root_hash: Digest, + addr: EntityAddr, + dictionary_name: String, + dictionary_item_key: String, +) -> TestCase { + TestCase { + name: "get_dictionary_item_by_named_key", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::DictionaryItem { + identifier: DictionaryItemIdentifier::EntityNamedKey { + addr, + dictionary_name, + dictionary_item_key, + }, + }, + )))), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::DictionaryQueryResult), + |res| matches!(res.into_inner(),(_, res) if res.value().as_cl_value().is_some()), + ) + }), + } +} + +fn get_balance(state_root_hash: Digest, account_hash: AccountHash) -> TestCase { + TestCase { + name: "get_balance", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::Balance { + purse_identifier: PurseIdentifier::Account(account_hash), + }, + )))), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::BalanceResponse), + |res| res.available_balance == U512::one(), + ) + }), + } +} + +fn get_balance_account_not_found(state_root_hash: Digest) -> TestCase { + TestCase { + name: "get_balance_account_not_found", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::Balance { + purse_identifier: PurseIdentifier::Account(AccountHash([9; 32])), + }, + )))), + asserter: Box::new(|response| response.error_code() == ErrorCode::PurseNotFound as u16), + } +} + +fn get_balance_purse_uref_not_found(state_root_hash: Digest) -> TestCase { + TestCase { + name: "get_balance_purse_uref_not_found", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::Balance { + purse_identifier: PurseIdentifier::Purse(URef::new([9; 32], Default::default())), + }, + )))), + asserter: Box::new(|response| response.error_code() == ErrorCode::PurseNotFound as u16), + } +} + +fn get_named_keys_by_prefix(state_root_hash: Digest, entity_addr: EntityAddr) -> TestCase { + TestCase { + name: "get_named_keys_by_prefix", + request: Command::Get(GetRequest::State(Box::new(GlobalStateRequest::new( + Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + GlobalStateEntityQualifier::ItemsByPrefix { + key_prefix: KeyPrefix::NamedKeysByEntity(entity_addr), + }, + )))), + asserter: Box::new(|response| { + assert_response::, _>( + response, + Some(ResponseType::StoredValues), + |res| res.iter().all(|v| matches!(v, StoredValue::NamedKey(_))), + ) + }), + } +} + +fn get_reward( + era_identifier: Option, + validator: PublicKey, + delegator: Option, +) -> TestCase { + let key = InformationRequest::Reward { + era_identifier, + validator: validator.into(), + delegator: delegator.map(Box::new), + }; + + TestCase { + name: "get_reward", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::(response, Some(ResponseType::Reward), |reward| { + // test fixture sets delegation rate to 0 + reward.amount() > U512::zero() && reward.delegation_rate() == 0 + }) + }), + } +} + +fn get_protocol_version(expected: ProtocolVersion) -> TestCase { + let key = InformationRequest::ProtocolVersion; + + TestCase { + name: "get_protocol_version", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: vec![], + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::ProtocolVersion), + |version| expected == version, + ) + }), + } +} + +fn get_entity(state_root_hash: Digest, entity_addr: EntityAddr) -> TestCase { + let key = InformationRequest::Entity { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: EntityIdentifier::EntityAddr(entity_addr), + include_bytecode: true, + }; + + TestCase { + name: "get_entity", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::AddressableEntityInformation), + |res| res.bytecode().is_some(), + ) + }), + } +} + +fn get_entity_without_bytecode(state_root_hash: Digest, entity_addr: EntityAddr) -> TestCase { + let key = InformationRequest::Entity { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: EntityIdentifier::EntityAddr(entity_addr), + include_bytecode: false, + }; + + TestCase { + name: "get_entity_without_bytecode", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(|response| { + assert_response::( + response, + Some(ResponseType::AddressableEntityInformation), + |res| res.bytecode().is_none(), + ) + }), + } +} + +fn get_entity_pre_migration_account( + state_root_hash: Digest, + account_hash: AccountHash, +) -> TestCase { + let key = InformationRequest::Entity { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: EntityIdentifier::AccountHash(account_hash), + include_bytecode: false, + }; + + TestCase { + name: "get_entity_pre_migration_account", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::AccountInformation), + |res| res.account().account_hash() == account_hash, + ) + }), + } +} + +fn get_entity_post_migration_account( + state_root_hash: Digest, + account_hash: AccountHash, +) -> TestCase { + let key = InformationRequest::Entity { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: EntityIdentifier::AccountHash(account_hash), + include_bytecode: false, + }; + + TestCase { + name: "get_entity_post_migration_account", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::AddressableEntityInformation), + |_| true, + ) + }), + } +} + +fn get_entity_pre_migration_contract( + state_root_hash: Digest, + contract_hash: ContractHash, +) -> TestCase { + let key = InformationRequest::Entity { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: EntityIdentifier::ContractHash(contract_hash), + include_bytecode: true, + }; + + TestCase { + name: "get_entity_pre_migration_contract", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::ContractInformation), + |res| res.wasm().is_some(), + ) + }), + } +} + +fn get_entity_post_migration_contract( + state_root_hash: Digest, + contract_hash: ContractHash, +) -> TestCase { + let key = InformationRequest::Entity { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: EntityIdentifier::ContractHash(contract_hash), + include_bytecode: true, + }; + + TestCase { + name: "get_entity_post_migration_contract", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::( + response, + Some(ResponseType::AddressableEntityInformation), + |res| res.bytecode().is_some(), + ) + }), + } +} + +fn get_package(state_root_hash: Digest, package_addr: PackageAddr) -> TestCase { + let key = InformationRequest::Package { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: PackageIdentifier::PackageAddr(package_addr), + }; + + TestCase { + name: "get_package", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::, _>( + response, + Some(ResponseType::PackageWithProof), + |_| true, + ) + }), + } +} + +fn get_package_pre_migration( + state_root_hash: Digest, + contract_package_hash: ContractPackageHash, +) -> TestCase { + let key = InformationRequest::Package { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: PackageIdentifier::ContractPackageHash(contract_package_hash), + }; + + TestCase { + name: "get_package_pre_migration", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::, _>( + response, + Some(ResponseType::ContractPackageWithProof), + |_| true, + ) + }), + } +} + +fn get_package_post_migration( + state_root_hash: Digest, + contract_package_hash: ContractPackageHash, +) -> TestCase { + let key = InformationRequest::Package { + state_identifier: Some(GlobalStateIdentifier::StateRootHash(state_root_hash)), + identifier: PackageIdentifier::ContractPackageHash(contract_package_hash), + }; + + TestCase { + name: "get_package_post_migration", + request: Command::Get(GetRequest::Information { + info_type_tag: key.tag().into(), + key: key.to_bytes().expect("should serialize key"), + }), + asserter: Box::new(move |response| { + assert_response::, _>( + response, + Some(ResponseType::PackageWithProof), + |_| true, + ) + }), + } +} + +fn try_accept_transaction(key: &SecretKey) -> TestCase { + let transaction = Transaction::V1( + TransactionV1Builder::new_targeting_invocable_entity_via_alias( + "Test", + "call", + casper_types::TransactionRuntimeParams::VmCasperV1, + ) + .with_secret_key(key) + .with_chain_name("casper-example") + .build() + .unwrap(), + ); + TestCase { + name: "try_accept_transaction", + request: Command::TryAcceptTransaction { transaction }, + asserter: Box::new(|response| response.error_code() == ErrorCode::NoError as u16), + } +} + +fn try_accept_transaction_invalid(rng: &mut TestRng) -> TestCase { + let transaction = Transaction::V1(TransactionV1Builder::new_random(rng).build().unwrap()); + TestCase { + name: "try_accept_transaction_invalid", + request: Command::TryAcceptTransaction { transaction }, + asserter: Box::new(|response| ErrorCode::try_from(response.error_code()).is_ok()), + } +} + +fn try_spec_exec_invalid(rng: &mut TestRng) -> TestCase { + let transaction = Transaction::V1(TransactionV1Builder::new_random(rng).build().unwrap()); + TestCase { + name: "try_spec_exec_invalid", + request: Command::TrySpeculativeExec { transaction }, + asserter: Box::new(|response| ErrorCode::try_from(response.error_code()).is_ok()), + } +} + +#[tokio::test] +async fn binary_port_component_rejects_requests_with_invalid_header_version() { + testing::init_logging(); + + let (mut client, (finish_cranking, _)) = setup().await; + + let request = Command::Get(GetRequest::Information { + info_type_tag: InformationRequestTag::Uptime.into(), + key: vec![], + }); + + let mut header = CommandHeader::new(request.tag(), 0); + + // Make the binary protocol version incompatible. + header.set_binary_request_version(header.version() + 1); + + let header_bytes = ToBytes::to_bytes(&header).expect("should serialize"); + let original_request_bytes = header_bytes + .iter() + .chain( + ToBytes::to_bytes(&request) + .expect("should serialize") + .iter(), + ) + .cloned() + .collect::>(); + client + .send(BinaryMessage::new(original_request_bytes.clone())) + .await + .expect("should send message"); + let response = timeout(Duration::from_secs(10), client.next()) + .await + .unwrap_or_else(|_| panic!("should complete without timeout")) + .unwrap_or_else(|| panic!("should have bytes")) + .unwrap_or_else(|_| panic!("should have ok response")); + let (binary_response_and_request, _): (BinaryResponseAndRequest, _) = + FromBytes::from_bytes(response.payload()).expect("should deserialize response"); + + assert_eq!( + binary_response_and_request.response().error_code(), + ErrorCode::CommandHeaderVersionMismatch as u16 + ); + + let (_net, _rng) = timeout(Duration::from_secs(10), finish_cranking) + .await + .unwrap_or_else(|_| panic!("should finish cranking without timeout")); +} diff --git a/node/src/reactor/main_reactor/tests/configs_override.rs b/node/src/reactor/main_reactor/tests/configs_override.rs new file mode 100644 index 0000000000..03427773d0 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/configs_override.rs @@ -0,0 +1,177 @@ +use std::collections::BTreeSet; + +use num_rational::Ratio; + +use casper_types::{ + ConsensusProtocolName, FeeHandling, HoldBalanceHandling, PricingHandling, PublicKey, + RefundHandling, TimeDiff, TransactionV1Config, +}; + +use crate::types::SyncHandling; + +/// Options to allow overriding default chainspec and config settings. +pub(crate) struct ConfigsOverride { + pub era_duration: TimeDiff, + pub minimum_block_time: TimeDiff, + pub minimum_era_height: u64, + pub unbonding_delay: u64, + pub round_seigniorage_rate: Ratio, + pub consensus_protocol: ConsensusProtocolName, + pub finders_fee: Ratio, + pub finality_signature_proportion: Ratio, + pub signature_rewards_max_delay: u64, + pub storage_multiplier: u8, + pub max_gas_price: u8, + pub min_gas_price: u8, + pub upper_threshold: u64, + pub lower_threshold: u64, + pub max_block_size: u32, + pub block_gas_limit: u64, + pub refund_handling_override: Option, + pub fee_handling_override: Option, + pub pricing_handling_override: Option, + pub allow_prepaid_override: Option, + pub balance_hold_interval_override: Option, + pub administrators: Option>, + pub chain_name: Option, + pub gas_hold_balance_handling: Option, + pub transaction_v1_override: Option, + pub node_config_override: NodeConfigOverride, +} + +impl ConfigsOverride { + pub(crate) fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self { + self.refund_handling_override = Some(refund_handling); + self + } + + pub(crate) fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self { + self.fee_handling_override = Some(fee_handling); + self + } + + pub(crate) fn with_pricing_handling(mut self, pricing_handling: PricingHandling) -> Self { + self.pricing_handling_override = Some(pricing_handling); + self + } + + #[allow(unused)] + pub(crate) fn with_allow_prepaid(mut self, allow_prepaid: bool) -> Self { + self.allow_prepaid_override = Some(allow_prepaid); + self + } + + pub(crate) fn with_balance_hold_interval(mut self, balance_hold_interval: TimeDiff) -> Self { + self.balance_hold_interval_override = Some(balance_hold_interval); + self + } + + pub(crate) fn with_min_gas_price(mut self, min_gas_price: u8) -> Self { + self.min_gas_price = min_gas_price; + self + } + + pub(crate) fn with_max_gas_price(mut self, max_gas_price: u8) -> Self { + self.max_gas_price = max_gas_price; + self + } + + pub(crate) fn with_lower_threshold(mut self, lower_threshold: u64) -> Self { + self.lower_threshold = lower_threshold; + self + } + + pub(crate) fn with_upper_threshold(mut self, upper_threshold: u64) -> Self { + self.upper_threshold = upper_threshold; + self + } + + pub(crate) fn with_block_size(mut self, max_block_size: u32) -> Self { + self.max_block_size = max_block_size; + self + } + + pub(crate) fn with_block_gas_limit(mut self, block_gas_limit: u64) -> Self { + self.block_gas_limit = block_gas_limit; + self + } + + pub(crate) fn with_minimum_era_height(mut self, minimum_era_height: u64) -> Self { + self.minimum_era_height = minimum_era_height; + self + } + + pub(crate) fn with_administrators(mut self, administrators: BTreeSet) -> Self { + self.administrators = Some(administrators); + self + } + + pub(crate) fn with_chain_name(mut self, chain_name: String) -> Self { + self.chain_name = Some(chain_name); + self + } + + pub(crate) fn with_gas_hold_balance_handling( + mut self, + gas_hold_balance_handling: HoldBalanceHandling, + ) -> Self { + self.gas_hold_balance_handling = Some(gas_hold_balance_handling); + self + } + + pub(crate) fn with_transaction_v1_config( + mut self, + transaction_v1config: TransactionV1Config, + ) -> Self { + self.transaction_v1_override = Some(transaction_v1config); + self + } + + pub(crate) fn with_idle_tolerance(mut self, idle_tolernace: TimeDiff) -> Self { + let config = NodeConfigOverride { + idle_tolerance: Some(idle_tolernace), + ..Default::default() + }; + self.node_config_override = config; + self + } +} + +impl Default for ConfigsOverride { + fn default() -> Self { + ConfigsOverride { + era_duration: TimeDiff::from_millis(0), // zero means use the default value + minimum_block_time: "1second".parse().unwrap(), + minimum_era_height: 2, + unbonding_delay: 3, + round_seigniorage_rate: Ratio::new(1, 100), + consensus_protocol: ConsensusProtocolName::Zug, + finders_fee: Ratio::new(1, 4), + finality_signature_proportion: Ratio::new(1, 3), + signature_rewards_max_delay: 5, + storage_multiplier: 1, + max_gas_price: 3, + min_gas_price: 1, + upper_threshold: 90, + lower_threshold: 50, + max_block_size: 10_485_760u32, + block_gas_limit: 10_000_000_000_000u64, + refund_handling_override: None, + fee_handling_override: None, + pricing_handling_override: None, + allow_prepaid_override: None, + balance_hold_interval_override: None, + administrators: None, + chain_name: None, + gas_hold_balance_handling: None, + transaction_v1_override: None, + node_config_override: NodeConfigOverride::default(), + } + } +} + +#[derive(Clone, Default)] +pub(crate) struct NodeConfigOverride { + pub sync_handling_override: Option, + pub idle_tolerance: Option, +} diff --git a/node/src/reactor/main_reactor/tests/consensus_rules.rs b/node/src/reactor/main_reactor/tests/consensus_rules.rs new file mode 100644 index 0000000000..2052d72076 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/consensus_rules.rs @@ -0,0 +1,295 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use either::Either; +use tokio::time::{self}; +use tracing::{error, info}; + +use casper_types::{ + system::auction::BidsExt, ConsensusProtocolName, EraId, PublicKey, SecretKey, Timestamp, U512, +}; + +use crate::{ + components::consensus::{self, NewBlockPayload}, + effect::{requests::NetworkRequest, EffectExt}, + protocol::Message, + reactor::main_reactor::{ + tests::{ + configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes, + switch_blocks::SwitchBlocks, ERA_TWO, ONE_MIN, + }, + MainEvent, + }, + types::BlockPayload, +}; + +#[tokio::test] +async fn run_equivocator_network() { + let mut rng = crate::new_rng(); + + let alice_secret_key = Arc::new(SecretKey::random(&mut rng)); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_secret_key = Arc::new(SecretKey::random(&mut rng)); + let bob_public_key = PublicKey::from(&*bob_secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut rng)); + let charlie_public_key = PublicKey::from(&*charlie_secret_key); + + let mut stakes = BTreeMap::new(); + stakes.insert(alice_public_key.clone(), U512::from(1)); + stakes.insert(bob_public_key.clone(), U512::from(1)); + stakes.insert(charlie_public_key, U512::from(u64::MAX)); + + // Here's where things go wrong: Bob doesn't run a node at all, and Alice runs two! + let secret_keys = vec![ + alice_secret_key.clone(), + alice_secret_key, + charlie_secret_key, + ]; + + // We configure the era to take 15 rounds. That should guarantee that the two nodes equivocate. + let spec_override = ConfigsOverride { + minimum_era_height: 10, + consensus_protocol: ConsensusProtocolName::Highway, + storage_multiplier: 2, + ..Default::default() + }; + + let mut fixture = + TestFixture::new_with_keys(rng, secret_keys, stakes.clone(), Some(spec_override)).await; + + let min_round_len = fixture.chainspec.core_config.minimum_block_time; + let mut maybe_first_message_time = None; + + let mut alice_reactors = fixture + .network + .reactors_mut() + .filter(|reactor| *reactor.inner().consensus().public_key() == alice_public_key); + + // Delay all messages to and from the first of Alice's nodes until three rounds after the first + // message. Further, significantly delay any incoming pings to avoid the node detecting the + // doppelganger and deactivating itself. + alice_reactors.next().unwrap().set_filter(move |event| { + if crate::reactor::main_reactor::tests::is_ping(&event) { + return Either::Left(time::sleep((min_round_len * 30).into()).event(move |_| event)); + } + let now = Timestamp::now(); + match &event { + MainEvent::ConsensusMessageIncoming(_) => {} + MainEvent::NetworkRequest( + NetworkRequest::SendMessage { payload, .. } + | NetworkRequest::ValidatorBroadcast { payload, .. } + | NetworkRequest::Gossip { payload, .. }, + ) if matches!(**payload, Message::Consensus(_)) => {} + _ => return Either::Right(event), + }; + let first_message_time = *maybe_first_message_time.get_or_insert(now); + if now < first_message_time + min_round_len * 3 { + return Either::Left(time::sleep(min_round_len.into()).event(move |_| event)); + } + Either::Right(event) + }); + + // Significantly delay all incoming pings to the second of Alice's nodes. + alice_reactors.next().unwrap().set_filter(move |event| { + if crate::reactor::main_reactor::tests::is_ping(&event) { + return Either::Left(time::sleep((min_round_len * 30).into()).event(move |_| event)); + } + Either::Right(event) + }); + + drop(alice_reactors); + + let era_count = 4; + + let timeout = ONE_MIN * (era_count + 1) as u32; + info!("Waiting for {} eras to end.", era_count); + fixture + .run_until_stored_switch_block_header(EraId::new(era_count - 1), timeout) + .await; + + // network settled; select data to analyze + let switch_blocks = SwitchBlocks::collect(fixture.network.nodes(), era_count); + let mut era_bids = BTreeMap::new(); + for era in 0..era_count { + era_bids.insert(era, switch_blocks.bids(fixture.network.nodes(), era)); + } + + // Since this setup sometimes produces no equivocation or an equivocation in era 2 rather than + // era 1, we set an offset here. If neither era has an equivocation, exit early. + // TODO: Remove this once https://github.com/casper-network/casper-node/issues/1859 is fixed. + for switch_block in &switch_blocks.headers { + let era_id = switch_block.era_id(); + let count = switch_blocks.equivocators(era_id.value()).len(); + info!("equivocators in {}: {}", era_id, count); + } + let offset = if !switch_blocks.equivocators(1).is_empty() { + 0 + } else if !switch_blocks.equivocators(2).is_empty() { + error!("failed to equivocate in era 1 - asserting equivocation detected in era 2"); + 1 + } else { + error!("failed to equivocate in era 1 or 2"); + return; + }; + + // Era 0 consists only of the genesis block. + // In era 1, Alice equivocates. Since eviction takes place with a delay of one + // (`auction_delay`) era, she is still included in the next era's validator set. + let next_era_id = 1 + offset; + + assert_eq!( + switch_blocks.equivocators(next_era_id), + [alice_public_key.clone()] + ); + let next_era_bids = era_bids.get(&next_era_id).expect("should have offset era"); + + let next_era_alice = next_era_bids + .validator_bid(&alice_public_key) + .expect("should have Alice's offset bid"); + assert!( + next_era_alice.inactive(), + "Alice's bid should be inactive in offset era." + ); + assert!(switch_blocks + .next_era_validators(next_era_id) + .contains_key(&alice_public_key)); + + // In era 2 Alice is banned. Banned validators count neither as faulty nor inactive, even + // though they cannot participate. In the next era, she will be evicted. + let future_era_id = 2 + offset; + assert_eq!(switch_blocks.equivocators(future_era_id), []); + let future_era_bids = era_bids + .get(&future_era_id) + .expect("should have future era"); + let future_era_alice = future_era_bids + .validator_bid(&alice_public_key) + .expect("should have Alice's future bid"); + assert!( + future_era_alice.inactive(), + "Alice's bid should be inactive in future era." + ); + assert!(!switch_blocks + .next_era_validators(future_era_id) + .contains_key(&alice_public_key)); + + // In era 3 Alice is not a validator anymore and her bid remains deactivated. + let era_3 = 3; + if offset == 0 { + assert_eq!(switch_blocks.equivocators(era_3), []); + let era_3_bids = era_bids.get(&era_3).expect("should have era 3 bids"); + let era_3_alice = era_3_bids + .validator_bid(&alice_public_key) + .expect("should have Alice's era 3 bid"); + assert!( + era_3_alice.inactive(), + "Alice's bid should be inactive in era 3." + ); + assert!(!switch_blocks + .next_era_validators(era_3) + .contains_key(&alice_public_key)); + } + + // Bob is inactive. + assert_eq!( + switch_blocks.inactive_validators(1), + [bob_public_key.clone()] + ); + assert_eq!( + switch_blocks.inactive_validators(2), + [bob_public_key.clone()] + ); + + for (era, bids) in era_bids { + for (public_key, stake) in &stakes { + let bid = bids + .validator_bid(public_key) + .expect("should have bid for public key {public_key} in era {era}"); + let staked_amount = bid.staked_amount(); + assert!( + staked_amount >= *stake, + "expected stake {} for public key {} in era {}, found {}", + staked_amount, + public_key, + era, + stake + ); + } + } +} + +// This test exercises a scenario in which a proposed block contains invalid accusations. +// Blocks containing no transactions or transfers used to be incorrectly marked as not needing +// validation even if they contained accusations, which opened up a security hole through which a +// malicious validator could accuse whomever they wanted of equivocating and have these +// accusations accepted by the other validators. This has been patched and the test asserts that +// such a scenario is no longer possible. +#[tokio::test] +async fn empty_proposed_block_validation_regression() { + let initial_stakes = InitialStakes::AllEqual { + count: 4, + stake: 100, + }; + let spec_override = ConfigsOverride { + minimum_era_height: 15, + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + + let malicious_validator = + PublicKey::from(fixture.node_contexts.first().unwrap().secret_key.as_ref()); + info!("Malicious validator: {}", malicious_validator); + let everyone_else: Vec<_> = fixture + .node_contexts + .iter() + .skip(1) + .map(|node_context| PublicKey::from(node_context.secret_key.as_ref())) + .collect(); + let malicious_id = fixture.node_contexts.first().unwrap().id; + let malicious_runner = fixture.network.nodes_mut().get_mut(&malicious_id).unwrap(); + malicious_runner + .reactor_mut() + .inner_mut() + .set_filter(move |event| match event { + MainEvent::Consensus(consensus::Event::NewBlockPayload(NewBlockPayload { + era_id, + block_payload: _, + block_context, + })) => { + info!("Accusing everyone else!"); + // We hook into the NewBlockPayload event to replace the block being proposed with + // an empty one that accuses all the validators, except the malicious validator. + Either::Right(MainEvent::Consensus(consensus::Event::NewBlockPayload( + NewBlockPayload { + era_id, + block_payload: Arc::new(BlockPayload::new( + BTreeMap::new(), + everyone_else.clone(), + Default::default(), + false, + 1u8, + )), + block_context, + }, + ))) + } + event => Either::Right(event), + }); + + info!("Waiting for the first era after genesis to end."); + fixture.run_until_consensus_in_era(ERA_TWO, ONE_MIN).await; + let switch_blocks = SwitchBlocks::collect(fixture.network.nodes(), 2); + + // Nobody actually double-signed. The accusations should have had no effect. + assert_eq!( + switch_blocks.equivocators(0), + [], + "expected no equivocators" + ); + // If the malicious validator was the first proposer, all their Highway units might be invalid, + // because they all refer to the invalid proposal, so they might get flagged as inactive. No + // other validators should be considered inactive. + match switch_blocks.inactive_validators(0) { + [] => {} + [inactive_validator] if malicious_validator == *inactive_validator => {} + inactive => panic!("unexpected inactive validators: {:?}", inactive), + } +} diff --git a/node/src/reactor/main_reactor/tests/fixture.rs b/node/src/reactor/main_reactor/tests/fixture.rs new file mode 100644 index 0000000000..31bd85533b --- /dev/null +++ b/node/src/reactor/main_reactor/tests/fixture.rs @@ -0,0 +1,1012 @@ +use itertools::Itertools; +use std::{ + collections::BTreeMap, convert::TryFrom, iter, net::SocketAddr, str::FromStr, sync::Arc, + time::Duration, +}; + +use num_rational::Ratio; +use num_traits::Zero; +use rand::Rng; +use tempfile::TempDir; +use tokio::time::error::Elapsed; +use tracing::info; + +use casper_storage::{ + data_access_layer::{ + balance::{BalanceHandling, BalanceResult}, + BalanceRequest, BidsRequest, BidsResult, ProofHandling, + }, + global_state::state::{StateProvider, StateReader}, +}; +use casper_types::{ + execution::{ExecutionResult, TransformV2}, + system::auction::{DelegationRate, DelegatorKind}, + testing::TestRng, + AccountConfig, AccountsConfig, ActivationPoint, AddressableEntityHash, Block, BlockBody, + BlockHash, BlockV2, CLValue, Chainspec, ChainspecRawBytes, EraEnd, EraId, Key, Motes, + NextUpgrade, ProtocolVersion, PublicKey, SecretKey, StoredValue, SystemHashRegistry, TimeDiff, + Timestamp, Transaction, TransactionHash, ValidatorConfig, U512, +}; + +use crate::{ + components::{gossiper, network, storage}, + effect::EffectExt, + reactor::main_reactor::{ + tests::{ + configs_override::{ConfigsOverride, NodeConfigOverride}, + initial_stakes::InitialStakes, + Nodes, ERA_TWO, + }, + Config, MainReactor, ReactorState, + }, + testing::{self, filter_reactor::FilterReactor, network::TestingNetwork}, + types::NodeId, + utils::{External, Loadable, Source, RESOURCES_PATH}, + WithDir, +}; + +pub(crate) struct NodeContext { + pub id: NodeId, + pub secret_key: Arc, + pub config: Config, + pub storage_dir: TempDir, +} + +pub(crate) struct TestFixture { + pub rng: TestRng, + pub node_contexts: Vec, + pub network: TestingNetwork>, + pub chainspec: Arc, + pub chainspec_raw_bytes: Arc, +} + +impl TestFixture { + /// Sets up a new fixture with the number of nodes indicated by `initial_stakes`. + /// + /// Runs the network until all nodes are initialized (i.e. none of their reactor states are + /// still `ReactorState::Initialize`). + pub(crate) async fn new( + initial_stakes: InitialStakes, + spec_override: Option, + ) -> Self { + let rng = TestRng::new(); + Self::new_with_rng(initial_stakes, spec_override, rng).await + } + + pub(crate) async fn new_with_rng( + initial_stakes: InitialStakes, + spec_override: Option, + mut rng: TestRng, + ) -> Self { + let stake_values = match initial_stakes { + InitialStakes::FromVec(stakes) => { + stakes.into_iter().map(|stake| stake.into()).collect() + } + InitialStakes::Random { count } => { + // By default, we use very large stakes so we would catch overflow issues. + iter::from_fn(|| Some(U512::from(rng.gen_range(100..999)) * U512::from(u128::MAX))) + .take(count) + .collect() + } + InitialStakes::AllEqual { count, stake } => { + vec![stake.into(); count] + } + }; + + let secret_keys: Vec> = (0..stake_values.len()) + .map(|_| Arc::new(SecretKey::random(&mut rng))) + .collect(); + + let stakes = secret_keys + .iter() + .zip(stake_values) + .map(|(secret_key, stake)| (PublicKey::from(secret_key.as_ref()), stake)) + .collect(); + Self::new_with_keys(rng, secret_keys, stakes, spec_override).await + } + + pub(crate) async fn new_with_keys( + mut rng: TestRng, + secret_keys: Vec>, + stakes: BTreeMap, + spec_override: Option, + ) -> Self { + testing::init_logging(); + + // Load the `local` chainspec. + let (mut chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + + let min_motes = 100_000_000_000_000_000u64; + let max_motes = min_motes * 100; + let balance = U512::from(rng.gen_range(min_motes..max_motes)); + + // Override accounts with those generated from the keys. + let accounts = stakes + .into_iter() + .map(|(public_key, bonded_amount)| { + let validator_config = + ValidatorConfig::new(Motes::new(bonded_amount), DelegationRate::zero()); + AccountConfig::new(public_key, Motes::new(balance), Some(validator_config)) + }) + .collect(); + let delegators = vec![]; + let administrators = vec![]; + chainspec.network_config.accounts_config = + AccountsConfig::new(accounts, delegators, administrators); + + // Allow 2 seconds startup time per validator. + let genesis_time = Timestamp::now() + TimeDiff::from_seconds(secret_keys.len() as u32 * 2); + info!( + "creating test chain configuration, genesis: {}", + genesis_time + ); + chainspec.protocol_config.activation_point = ActivationPoint::Genesis(genesis_time); + chainspec.core_config.finality_threshold_fraction = Ratio::new(34, 100); + chainspec.core_config.era_duration = TimeDiff::from_millis(0); + chainspec.core_config.auction_delay = 1; + chainspec.core_config.validator_slots = 100; + let ConfigsOverride { + era_duration, + minimum_block_time, + minimum_era_height, + unbonding_delay, + round_seigniorage_rate, + consensus_protocol, + finders_fee, + finality_signature_proportion, + signature_rewards_max_delay, + storage_multiplier, + max_gas_price, + min_gas_price, + upper_threshold, + lower_threshold, + max_block_size, + block_gas_limit, + refund_handling_override, + fee_handling_override, + pricing_handling_override, + allow_prepaid_override, + balance_hold_interval_override, + administrators, + chain_name, + gas_hold_balance_handling, + transaction_v1_override, + node_config_override, + } = spec_override.unwrap_or_default(); + if era_duration != TimeDiff::from_millis(0) { + chainspec.core_config.era_duration = era_duration; + } + info!(?block_gas_limit); + chainspec.core_config.minimum_block_time = minimum_block_time; + chainspec.core_config.minimum_era_height = minimum_era_height; + chainspec.core_config.unbonding_delay = unbonding_delay; + chainspec.core_config.round_seigniorage_rate = round_seigniorage_rate; + chainspec.core_config.consensus_protocol = consensus_protocol; + chainspec.core_config.finders_fee = finders_fee; + chainspec.core_config.finality_signature_proportion = finality_signature_proportion; + chainspec.core_config.minimum_block_time = minimum_block_time; + chainspec.core_config.minimum_era_height = minimum_era_height; + chainspec.vacancy_config.min_gas_price = min_gas_price; + chainspec.vacancy_config.max_gas_price = max_gas_price; + chainspec.vacancy_config.upper_threshold = upper_threshold; + chainspec.vacancy_config.lower_threshold = lower_threshold; + chainspec.transaction_config.block_gas_limit = block_gas_limit; + chainspec.transaction_config.max_block_size = max_block_size; + chainspec.highway_config.maximum_round_length = + chainspec.core_config.minimum_block_time * 2; + chainspec.core_config.signature_rewards_max_delay = signature_rewards_max_delay; + + if let Some(refund_handling) = refund_handling_override { + chainspec.core_config.refund_handling = refund_handling; + } + if let Some(fee_handling) = fee_handling_override { + chainspec.core_config.fee_handling = fee_handling; + } + if let Some(pricing_handling) = pricing_handling_override { + chainspec.core_config.pricing_handling = pricing_handling; + } + if let Some(allow_prepaid) = allow_prepaid_override { + chainspec.core_config.allow_prepaid = allow_prepaid; + } + if let Some(balance_hold_interval) = balance_hold_interval_override { + chainspec.core_config.gas_hold_interval = balance_hold_interval; + } + if let Some(administrators) = administrators { + chainspec.core_config.administrators = administrators; + } + if let Some(chain_name) = chain_name { + chainspec.network_config.name = chain_name; + } + if let Some(gas_hold_balance_handling) = gas_hold_balance_handling { + chainspec.core_config.gas_hold_balance_handling = gas_hold_balance_handling; + } + if let Some(transaction_v1_config) = transaction_v1_override { + chainspec.transaction_config.transaction_v1_config = transaction_v1_config + } + + let applied_block_gas_limit = chainspec.transaction_config.block_gas_limit; + + info!(?applied_block_gas_limit); + + let mut fixture = TestFixture { + rng, + node_contexts: vec![], + network: TestingNetwork::new(), + chainspec: Arc::new(chainspec), + chainspec_raw_bytes: Arc::new(chainspec_raw_bytes), + }; + + for secret_key in secret_keys { + let (config, storage_dir) = fixture.create_node_config( + secret_key.as_ref(), + None, + storage_multiplier, + node_config_override.clone(), + ); + fixture.add_node(secret_key, config, storage_dir).await; + } + + fixture + .run_until( + move |nodes: &Nodes| { + nodes.values().all(|runner| { + !matches!(runner.main_reactor().state, ReactorState::Initialize) + }) + }, + Duration::from_secs(20), + ) + .await; + + fixture + } + + /// Access the environments RNG. + #[inline(always)] + pub(crate) fn rng_mut(&mut self) -> &mut TestRng { + &mut self.rng + } + + /// Returns the highest complete block from node 0. + /// + /// Panics if there is no such block. + #[track_caller] + pub(crate) fn highest_complete_block(&self) -> Block { + let node_0 = self + .node_contexts + .first() + .expect("should have at least one node") + .id; + self.network + .nodes() + .get(&node_0) + .expect("should have node 0") + .main_reactor() + .storage() + .get_highest_complete_block() + .expect("should not error reading db") + .expect("node 0 should have a complete block") + } + + /// Get block by height + pub(crate) fn get_block_by_height(&self, block_height: u64) -> Block { + let node_0 = self + .node_contexts + .first() + .expect("should have at least one node") + .id; + + self.network + .nodes() + .get(&node_0) + .expect("should have node 0") + .main_reactor() + .storage() + .read_block_by_height(block_height) + .expect("failure to read block at height") + } + + #[track_caller] + pub(crate) fn get_block_gas_price_by_public_key( + &self, + maybe_public_key: Option<&PublicKey>, + ) -> u8 { + let node_id = match maybe_public_key { + None => { + &self + .node_contexts + .first() + .expect("should have at least one node") + .id + } + Some(public_key) => { + let (node_id, _) = self + .network + .nodes() + .iter() + .find(|(_, runner)| runner.main_reactor().consensus.public_key() == public_key) + .expect("should have runner"); + + node_id + } + }; + + self.network + .nodes() + .get(node_id) + .expect("should have node 0") + .main_reactor() + .storage() + .get_highest_complete_block() + .expect("should not error reading db") + .expect("node 0 should have a complete block") + .maybe_current_gas_price() + .expect("must have gas price") + } + + #[track_caller] + pub(crate) fn switch_block(&self, era: EraId) -> BlockV2 { + let node_0 = self + .node_contexts + .first() + .expect("should have at least one node") + .id; + self.network + .nodes() + .get(&node_0) + .expect("should have node 0") + .main_reactor() + .storage() + .read_switch_block_by_era_id(era) + .and_then(|block| BlockV2::try_from(block).ok()) + .unwrap_or_else(|| panic!("node 0 should have a switch block V2 for {}", era)) + } + + #[track_caller] + pub(crate) fn create_node_config( + &mut self, + secret_key: &SecretKey, + maybe_trusted_hash: Option, + storage_multiplier: u8, + node_config_override: NodeConfigOverride, + ) -> (Config, TempDir) { + // Set the network configuration. + let network_cfg = match self.node_contexts.first() { + Some(first_node) => { + let known_address = + SocketAddr::from_str(&first_node.config.network.bind_address).unwrap(); + network::Config::default_local_net(known_address.port()) + } + None => { + let port = testing::unused_port_on_localhost(); + network::Config::default_local_net_first_node(port) + } + }; + let mut cfg = Config { + network: network_cfg, + gossip: gossiper::Config::new_with_small_timeouts(), + binary_port_server: crate::BinaryPortConfig { + allow_request_get_all_values: true, + allow_request_get_trie: true, + allow_request_speculative_exec: true, + ..Default::default() + }, + ..Default::default() + }; + let NodeConfigOverride { + sync_handling_override, + idle_tolerance, + } = node_config_override; + if let Some(sync_handling) = sync_handling_override { + cfg.node.sync_handling = sync_handling; + } + if let Some(idle) = idle_tolerance { + cfg.node.idle_tolerance = idle + } + + // Additionally set up storage in a temporary directory. + let (storage_cfg, temp_dir) = storage::Config::new_for_tests(storage_multiplier); + // ...and the secret key for our validator. + { + let secret_key_path = temp_dir.path().join("secret_key"); + secret_key + .to_file(secret_key_path.clone()) + .expect("could not write secret key"); + cfg.consensus.secret_key_path = External::Path(secret_key_path); + } + cfg.storage = storage_cfg; + cfg.node.trusted_hash = maybe_trusted_hash; + cfg.contract_runtime.max_global_state_size = + Some(1024 * 1024 * storage_multiplier as usize); + + (cfg, temp_dir) + } + + /// Adds a node to the network. + /// + /// If a previously-removed node is to be re-added, then the `secret_key`, `config` and + /// `storage_dir` returned in the `NodeContext` during removal should be used here in order to + /// ensure the same storage dir is used across both executions. + pub(crate) async fn add_node( + &mut self, + secret_key: Arc, + config: Config, + storage_dir: TempDir, + ) -> NodeId { + let (id, _) = self + .network + .add_node_with_config_and_chainspec( + WithDir::new(RESOURCES_PATH.join("local"), config.clone()), + Arc::clone(&self.chainspec), + Arc::clone(&self.chainspec_raw_bytes), + &mut self.rng, + ) + .await + .expect("could not add node to reactor"); + let node_context = NodeContext { + id, + secret_key, + config, + storage_dir, + }; + self.node_contexts.push(node_context); + info!("added node {} with id {}", self.node_contexts.len() - 1, id); + id + } + + #[track_caller] + pub(crate) fn remove_and_stop_node(&mut self, index: usize) -> NodeContext { + let node_context = self.node_contexts.remove(index); + let runner = self.network.remove_node(&node_context.id).unwrap(); + runner.is_shutting_down.set(); + info!("removed node {} with id {}", index, node_context.id); + node_context + } + + /// Runs the network until `condition` is true. + /// + /// Returns an error if the condition isn't met in time. + pub(crate) async fn try_run_until( + &mut self, + condition: F, + within: Duration, + ) -> Result<(), Elapsed> + where + F: Fn(&Nodes) -> bool, + { + self.network + .try_settle_on(&mut self.rng, condition, within) + .await + } + + /// Runs the network until `condition` is true. + /// + /// Panics if the condition isn't met in time. + pub(crate) async fn run_until(&mut self, condition: F, within: Duration) + where + F: Fn(&Nodes) -> bool, + { + self.network + .settle_on(&mut self.rng, condition, within) + .await + } + + /// Runs the network until all nodes reach the given completed block height. + /// + /// Returns an error if the condition isn't met in time. + pub(crate) async fn try_run_until_block_height( + &mut self, + block_height: u64, + within: Duration, + ) -> Result<(), Elapsed> { + self.try_run_until( + move |nodes: &Nodes| { + nodes.values().all(|runner| { + runner + .main_reactor() + .storage() + .get_highest_complete_block() + .expect("should not error reading db") + .map(|block| block.height()) + == Some(block_height) + }) + }, + within, + ) + .await + } + + /// Runs the network until all nodes reach the given completed block height. + /// + /// Panics if the condition isn't met in time. + pub(crate) async fn run_until_block_height(&mut self, block_height: u64, within: Duration) { + self.try_run_until_block_height(block_height, within) + .await + .unwrap_or_else(|_| { + panic!( + "should reach block {} within {} seconds", + block_height, + within.as_secs_f64(), + ) + }) + } + + /// Runs the network until all nodes' consensus components reach the given era. + /// + /// Panics if the condition isn't met in time. + pub(crate) async fn run_until_consensus_in_era(&mut self, era_id: EraId, within: Duration) { + self.try_until_consensus_in_era(era_id, within) + .await + .unwrap_or_else(|_| { + panic!( + "should reach {} within {} seconds", + era_id, + within.as_secs_f64(), + ) + }) + } + + /// Runs the network until all nodes' consensus components reach the given era. + pub(crate) async fn try_until_consensus_in_era( + &mut self, + era_id: EraId, + within: Duration, + ) -> Result<(), Elapsed> { + self.try_run_until( + move |nodes: &Nodes| { + nodes + .values() + .all(|runner| runner.main_reactor().consensus().current_era() == Some(era_id)) + }, + within, + ) + .await + } + + /// Runs the network until all nodes' storage components have stored the switch block header for + /// the given era. + /// + /// Panics if the condition isn't met in time. + pub(crate) async fn run_until_stored_switch_block_header( + &mut self, + era_id: EraId, + within: Duration, + ) { + self.try_until_stored_switch_block_header(era_id, within) + .await + .unwrap_or_else(|_| { + panic!( + "should have stored switch block header for {} within {} seconds", + era_id, + within.as_secs_f64(), + ) + }) + } + + /// Runs the network until all nodes' storage components have stored the switch block header for + /// the given era. + pub(crate) async fn try_until_stored_switch_block_header( + &mut self, + era_id: EraId, + within: Duration, + ) -> Result<(), Elapsed> { + self.try_run_until( + move |nodes: &Nodes| { + nodes.values().all(|runner| { + let available_block_range = + runner.main_reactor().storage().get_available_block_range(); + runner + .main_reactor() + .storage() + .read_highest_switch_block_headers(1) + .unwrap() + .last() + .is_some_and(|header| { + header.era_id() == era_id + && available_block_range.contains(header.height()) + }) + }) + }, + within, + ) + .await + } + + /// Runs the network until all nodes have executed the given transaction and stored the + /// execution result. + /// + /// Panics if the condition isn't met in time. + pub(crate) async fn run_until_executed_transaction( + &mut self, + txn_hash: &TransactionHash, + within: Duration, + ) { + self.try_run_until( + move |nodes: &Nodes| { + nodes.values().all(|runner| { + if runner + .main_reactor() + .storage() + .read_execution_result(txn_hash) + .is_some() + { + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(*txn_hash); + + if let Some(exec_info) = exec_info { + runner + .main_reactor() + .storage() + .read_block_header_by_height(exec_info.block_height, true) + .unwrap() + .is_some() + } else { + false + } + } else { + false + } + }) + }, + within, + ) + .await + .unwrap_or_else(|_| { + panic!( + "should have stored execution result for {} within {} seconds", + txn_hash, + within.as_secs_f64(), + ) + }) + } + + pub(crate) async fn schedule_upgrade_for_era_two(&mut self) { + for runner in self.network.runners_mut() { + runner + .process_injected_effects(|effect_builder| { + let upgrade = NextUpgrade::new( + ActivationPoint::EraId(ERA_TWO), + ProtocolVersion::from_parts(999, 0, 0), + ); + effect_builder + .upgrade_watcher_announcement(Some(upgrade)) + .ignore() + }) + .await; + } + } + + #[track_caller] + pub(crate) fn check_bid_existence_at_tip( + &self, + validator_public_key: &PublicKey, + delegator_public_key: Option<&PublicKey>, + should_exist: bool, + ) { + let (_, runner) = self + .network + .nodes() + .iter() + .find(|(_, runner)| { + runner.main_reactor().consensus.public_key() == validator_public_key + }) + .expect("should have runner"); + + let highest_block = runner + .main_reactor() + .storage + .read_highest_block_with_signatures(true) + .expect("should have block") + .into_inner() + .0; + let bids_request = BidsRequest::new(*highest_block.state_root_hash()); + let bids_result = runner + .main_reactor() + .contract_runtime + .data_access_layer() + .bids(bids_request); + + let delegator_kind = delegator_public_key.map(|pk| DelegatorKind::PublicKey(pk.clone())); + + if let BidsResult::Success { bids } = bids_result { + match bids.iter().find(|bid_kind| { + &bid_kind.validator_public_key() == validator_public_key + && bid_kind.delegator_kind() == delegator_kind + }) { + None => { + if should_exist { + panic!("should have bid in {}", highest_block.era_id()); + } + } + Some(bid) => { + if !should_exist && !bid.is_unbond() { + info!("unexpected bid record existence: {:?}", bid); + panic!("expected to not have bid"); + } + } + } + } else { + panic!("network should have bids: {:?}", bids_result); + } + } + + /// Returns the hash of the given system contract. + #[track_caller] + pub(crate) fn system_contract_hash(&self, system_contract_name: &str) -> AddressableEntityHash { + let node_0 = self + .node_contexts + .first() + .expect("should have at least one node") + .id; + let reactor = self + .network + .nodes() + .get(&node_0) + .expect("should have node 0") + .main_reactor(); + + let highest_block = reactor + .storage + .read_highest_block() + .expect("should have block"); + + // we need the native auction addr so we can directly call it w/o wasm + // we can get it out of the system entity registry which is just a + // value in global state under a stable key. + let maybe_registry = reactor + .contract_runtime + .data_access_layer() + .checkout(*highest_block.state_root_hash()) + .expect("should checkout") + .expect("should have view") + .read(&Key::SystemEntityRegistry) + .expect("should not have gs storage error") + .expect("should have stored value"); + + let system_entity_registry: SystemHashRegistry = match maybe_registry { + StoredValue::CLValue(cl_value) => CLValue::into_t(cl_value).unwrap(), + _ => { + panic!("expected CLValue") + } + }; + + (*system_entity_registry.get(system_contract_name).unwrap()).into() + } + + #[track_caller] + pub(crate) fn get_current_era_price(&self) -> u8 { + let (_, runner) = self + .network + .nodes() + .iter() + .next() + .expect("must have runner"); + + let price = runner.main_reactor().contract_runtime.current_era_price(); + + price.gas_price() + } + + #[track_caller] + pub(crate) fn check_account_balance_hold_at_tip(&self, account_public_key: PublicKey) -> U512 { + let (_, runner) = self + .network + .nodes() + .iter() + .find(|(_, runner)| runner.main_reactor().consensus.public_key() == &account_public_key) + .expect("must have runner"); + + let highest_block = runner + .main_reactor() + .storage + .read_highest_block() + .expect("should have block"); + + let balance_request = BalanceRequest::from_public_key( + *highest_block.state_root_hash(), + highest_block.protocol_version(), + account_public_key, + BalanceHandling::Available, + ProofHandling::NoProofs, + ); + + let balance_result = runner + .main_reactor() + .contract_runtime + .data_access_layer() + .balance(balance_request); + + match balance_result { + BalanceResult::RootNotFound => { + panic!("Root not found during balance query") + } + BalanceResult::Success { proofs_result, .. } => proofs_result.total_held_amount(), + BalanceResult::Failure(tce) => { + panic!("tracking copy error: {:?}", tce) + } + } + } + + pub(crate) async fn inject_transaction(&mut self, txn: Transaction) { + // saturate the network with the transactions via just making them all store and accept it + // they're all validators so one of them should propose it + for runner in self.network.runners_mut() { + runner + .process_injected_effects(|effect_builder| { + effect_builder + .put_transaction_to_storage(txn.clone()) + .ignore() + }) + .await; + runner + .process_injected_effects(|effect_builder| { + effect_builder + .announce_new_transaction_accepted(Arc::new(txn.clone()), Source::Client) + .ignore() + }) + .await; + } + } + + /// Returns the transforms from the stored, successful execution result for the given + /// transaction from node 0. + /// + /// Panics if there is no such execution result, or if it is not a `Success` variant. + #[track_caller] + pub(crate) fn successful_execution_transforms( + &self, + txn_hash: &TransactionHash, + ) -> Vec { + let node_0 = self + .node_contexts + .first() + .expect("should have at least one node") + .id; + match self + .network + .nodes() + .get(&node_0) + .expect("should have node 0") + .main_reactor() + .storage() + .read_execution_result(txn_hash) + .expect("node 0 should have given execution result") + { + ExecutionResult::V1(_) => unreachable!(), + ExecutionResult::V2(execution_result_v2) => { + if execution_result_v2.error_message.is_none() { + execution_result_v2.effects.transforms().to_vec() + } else { + panic!( + "transaction execution failed: {:?} gas: {}", + execution_result_v2.error_message, execution_result_v2.consumed + ); + } + } + } + } + + pub(crate) fn delete_block_utilization_score_by_block_hash_in_node( + &mut self, + node_public_key: &PublicKey, + block_hash: BlockHash, + ) { + let (_, runner) = self + .network + .nodes_mut() + .iter_mut() + .find(|(_, runner)| runner.main_reactor().consensus.public_key() == node_public_key) + .expect("should have runner"); + + runner + .main_reactor_as_mut() + .storage + .delete_block_utilization_score_by_block_hash(block_hash) + } + + pub(crate) async fn check_gas_price_for_nodes( + &mut self, + expected_gas_price: u8, + within: Duration, + ) { + self.try_run_until( + move |nodes| { + nodes.values().all(|runner| { + let era_end = runner + .main_reactor() + .storage() + .read_highest_switch_block_headers(1) + .unwrap() + .last() + .expect("must have block header") + .clone_era_end() + .expect("must have era end for switch block"); + + if let EraEnd::V2(era_end) = era_end { + era_end.next_era_gas_price() == expected_gas_price + } else { + false + } + }) + }, + within, + ) + .await + .unwrap_or_else(|_| { + panic!( + "should have same gas price across all nodes within {} seconds", + within.as_secs_f64(), + ) + }) + } + + #[inline(always)] + pub(crate) fn network_mut(&mut self) -> &mut TestingNetwork> { + &mut self.network + } + + pub(crate) fn run_until_stopped( + self, + rng: TestRng, + ) -> impl futures::Future>, TestRng)> { + self.network.crank_until_stopped(rng) + } + + /// Runs the network until all nodes have executed the given transaction and stored the + /// execution result. + /// + /// Panics if the condition isn't met in time. + pub(crate) async fn assert_execution_in_lane( + &mut self, + txn_hash: &TransactionHash, + lane_id: u8, + within: Duration, + ) { + self.try_run_until( + move |nodes: &Nodes| { + nodes.values().all(|runner| { + if runner + .main_reactor() + .storage() + .read_execution_result(txn_hash) + .is_some() + { + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(*txn_hash); + + if let Some(exec_info) = exec_info { + if let BlockBody::V2(v2_body) = runner + .main_reactor() + .storage() + .read_block_by_height(exec_info.block_height) + .unwrap() + .take_body() + { + v2_body.transactions_by_lane_id(lane_id).contains(txn_hash) + } else { + false + } + } else { + false + } + } else { + false + } + }) + }, + within, + ) + .await + .unwrap_or_else(|_| { + panic!( + "should have stored execution result for {} within {} seconds", + txn_hash, + within.as_secs_f64(), + ) + }) + } +} diff --git a/node/src/reactor/main_reactor/tests/gas_price.rs b/node/src/reactor/main_reactor/tests/gas_price.rs new file mode 100644 index 0000000000..2d40e57a85 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/gas_price.rs @@ -0,0 +1,313 @@ +use std::{sync::Arc, time::Duration}; + +use casper_types::{ + testing::TestRng, Chainspec, EraId, PricingHandling, PricingMode, PublicKey, SecretKey, + TimeDiff, Transaction, TransactionV1Config, U512, +}; + +use crate::{ + reactor::main_reactor::tests::{ + configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes, + ERA_ONE, ERA_TWO, ERA_ZERO, ONE_MIN, TEN_SECS, THIRTY_SECS, + }, + types::transaction::transaction_v1_builder::TransactionV1Builder, +}; + +#[allow(clippy::enum_variant_names)] +enum GasPriceScenario { + SlotUtilization, + SizeUtilization(u32), + GasConsumptionUtilization(u64), +} + +async fn run_gas_price_scenario(gas_price_scenario: GasPriceScenario) { + let mut rng = TestRng::new(); + let alice_stake = 200_000_000_000_u64; + let bob_stake = 300_000_000_000_u64; + let charlie_stake = 300_000_000_000_u64; + let initial_stakes: Vec = + vec![alice_stake.into(), bob_stake.into(), charlie_stake.into()]; + + let mut secret_keys: Vec> = (0..3) + .map(|_| Arc::new(SecretKey::random(&mut rng))) + .collect(); + + let stakes = secret_keys + .iter() + .zip(initial_stakes) + .map(|(secret_key, stake)| (PublicKey::from(secret_key.as_ref()), stake)) + .collect(); + + let non_validating_secret_key = SecretKey::random(&mut rng); + let non_validating_public_key = PublicKey::from(&non_validating_secret_key); + secret_keys.push(Arc::new(non_validating_secret_key)); + + let min_gas_price: u8 = 1; + let max_gas_price: u8 = 3; + + let spec_override = match gas_price_scenario { + GasPriceScenario::SlotUtilization => { + let mut transaction_config = TransactionV1Config::default(); + transaction_config.native_mint_lane.max_transaction_count = 1; + ConfigsOverride::default().with_transaction_v1_config(transaction_config) + } + GasPriceScenario::SizeUtilization(block_size) => { + ConfigsOverride::default().with_block_size(block_size) + } + GasPriceScenario::GasConsumptionUtilization(gas_limit) => { + ConfigsOverride::default().with_block_gas_limit(gas_limit) + } + } + .with_lower_threshold(5u64) + .with_upper_threshold(10u64) + .with_minimum_era_height(5) + .with_min_gas_price(min_gas_price) + .with_max_gas_price(max_gas_price); + + let mut fixture = + TestFixture::new_with_keys(rng, secret_keys, stakes, Some(spec_override)).await; + + assert_eq!( + min_gas_price, + fixture.chainspec.vacancy_config.min_gas_price + ); + assert_eq!( + max_gas_price, + fixture.chainspec.vacancy_config.max_gas_price + ); + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + + fixture + .run_until_stored_switch_block_header(ERA_ZERO, ONE_MIN) + .await; + + let mut switch_block = fixture.switch_block(ERA_ZERO); + let mut next_gas_price = switch_block + .era_end() + .expect("this is a switch block") + .next_era_gas_price(); + assert_eq!(next_gas_price, min_gas_price, "price should start at min"); + + let mut current_era = switch_block.era_id(); + let chain_name = fixture.chainspec.network_config.name.clone(); + + assert_eq!(current_era, EraId::new(0), "current era should be genesis"); + // Run the network at load for at least 5 eras. + for idx in 1..=max_gas_price { + let rng = fixture.rng_mut(); + let target_public_key = PublicKey::random(rng); + let fixed_native_mint_transaction = + TransactionV1Builder::new_transfer(10_000_000_000u64, None, target_public_key, None) + .expect("must get builder") + .with_chain_name(chain_name.clone()) + .with_secret_key(&alice_secret_key) + .with_ttl(TimeDiff::from_seconds(120 * 10)) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: max_gas_price, + additional_computation_factor: 0, + }) + .build() + .expect("must get transaction"); + + let txn = Transaction::V1(fixed_native_mint_transaction); + fixture.inject_transaction(txn).await; + let next_era = current_era.successor(); + fixture + .run_until_stored_switch_block_header(next_era, ONE_MIN) + .await; + switch_block = fixture.switch_block(EraId::new(idx as u64)); + next_gas_price = switch_block + .era_end() + .expect("this is a switch block") + .next_era_gas_price(); + let expected = { + let mut val = min_gas_price + idx; + if val > max_gas_price { + val = max_gas_price; + } + val + }; + assert_eq!( + next_gas_price, expected, + "price goes up by 1 each era (with current settings), up to the max" + ); + current_era = next_era; + } + + assert_eq!( + next_gas_price, max_gas_price, + "calculated gas price should match the max gas price" + ); + assert_eq!( + current_era, + EraId::new(max_gas_price as u64), + "we cranked a number of eras to walk up to the max price" + ); + + let gas_price_for_non_validating_node = + fixture.get_block_gas_price_by_public_key(Some(&non_validating_public_key)); + assert_eq!(max_gas_price, gas_price_for_non_validating_node); + let rng = fixture.rng_mut(); + let target_public_key = PublicKey::random(rng); + + let holds_before = fixture.check_account_balance_hold_at_tip(alice_public_key.clone()); + let amount = 10_000_000_000u64; + + let fixed_native_mint_transaction = + TransactionV1Builder::new_transfer(amount, None, target_public_key, None) + .expect("must get builder") + .with_chain_name(chain_name) + .with_secret_key(&alice_secret_key) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: max_gas_price, + additional_computation_factor: 0, + }) + .build() + .expect("must get transaction"); + + let txn = Transaction::V1(fixed_native_mint_transaction); + let txn_hash = txn.hash(); + + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, Duration::from_secs(20)) + .await; + + let holds_after = fixture.check_account_balance_hold_at_tip(alice_public_key.clone()); + + let current_gas_price = fixture + .highest_complete_block() + .maybe_current_gas_price() + .expect("must have gas price"); + + let cost = match fixture.chainspec.core_config.pricing_handling { + PricingHandling::PaymentLimited => 0, + PricingHandling::Fixed => { + fixture.chainspec.system_costs_config.mint_costs().transfer * (current_gas_price as u32) + } + }; + + assert_eq!(holds_after, holds_before + U512::from(cost)); + + // Run the network at zero load and ensure the value falls back to the floor. + for _ in 0..5 { + let next_era = current_era.successor(); + fixture + .run_until_stored_switch_block_header(next_era, ONE_MIN) + .await; + current_era = next_era; + } + + let expected_gas_price = fixture.chainspec.vacancy_config.min_gas_price; + let actual_gas_price = fixture.get_current_era_price(); + assert_eq!(actual_gas_price, expected_gas_price); +} + +#[tokio::test] +async fn should_raise_gas_price_to_ceiling_and_reduce_to_floor_based_on_slot_utilization() { + let scenario = GasPriceScenario::SlotUtilization; + run_gas_price_scenario(scenario).await +} + +#[tokio::test] +async fn should_raise_gas_price_to_ceiling_and_reduce_to_floor_based_on_gas_consumption() { + let gas_limit = Chainspec::default() + .system_costs_config + .mint_costs() + .transfer as u64; + let scenario = GasPriceScenario::GasConsumptionUtilization(gas_limit); + run_gas_price_scenario(scenario).await +} + +#[tokio::test] +async fn should_raise_gas_price_to_ceiling_and_reduce_to_floor_based_on_size_consumption() { + // The size of a native transfer is roughly 300 ~ 400 bytes + let size_limit = 600u32; + let scenario = GasPriceScenario::SizeUtilization(size_limit); + run_gas_price_scenario(scenario).await +} + +#[tokio::test] +async fn gas_price_calc_should_not_stall_network() { + let initial_stakes = InitialStakes::AllEqual { + count: 5, + stake: 1_000_000_000, + }; + + let min_gas_price: u8 = 1; + let max_gas_price: u8 = 3; + let minimum_era_height = 5; + + let mut transaction_config = TransactionV1Config::default(); + transaction_config.native_mint_lane.max_transaction_count = 1; + + let spec_override = ConfigsOverride::default() + .with_transaction_v1_config(transaction_config) + .with_lower_threshold(5u64) + .with_upper_threshold(10u64) + .with_minimum_era_height(minimum_era_height) + .with_idle_tolerance(TimeDiff::from_seconds(1)) + .with_min_gas_price(min_gas_price) + .with_max_gas_price(max_gas_price); + + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + + // Run through the first era. + fixture + .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN) + .await; + + let chain_name = fixture.chainspec.network_config.name.clone(); + let secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + + let rng = fixture.rng_mut(); + let target_public_key = PublicKey::random(rng); + let node_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let node_public_key = PublicKey::from(&*node_secret_key); + + let fixed_native_mint_transaction = TransactionV1Builder::new_transfer( + 10_000_000_000u64, + None, + target_public_key.clone(), + None, + ) + .expect("must get builder") + .with_chain_name(chain_name.clone()) + .with_secret_key(&secret_key) + .with_ttl(TimeDiff::from_seconds(120 * 10)) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: max_gas_price, + additional_computation_factor: 0, + }) + .build() + .expect("must get transaction"); + + let txn = Transaction::V1(fixed_native_mint_transaction); + let txn_hash = txn.hash(); + fixture.inject_transaction(txn).await; + + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + let block_hash = *fixture.highest_complete_block().hash(); + + fixture.delete_block_utilization_score_by_block_hash_in_node(&node_public_key, block_hash); + + fixture + .run_until_stored_switch_block_header(ERA_TWO, ONE_MIN) + .await; + + let gas_price = fixture + .switch_block(ERA_TWO) + .header() + .era_end() + .unwrap() + .next_era_gas_price(); + + fixture + .check_gas_price_for_nodes(gas_price, THIRTY_SECS) + .await; +} diff --git a/node/src/reactor/main_reactor/tests/initial_stakes.rs b/node/src/reactor/main_reactor/tests/initial_stakes.rs new file mode 100644 index 0000000000..1257f58c8a --- /dev/null +++ b/node/src/reactor/main_reactor/tests/initial_stakes.rs @@ -0,0 +1,5 @@ +pub(crate) enum InitialStakes { + FromVec(Vec), + Random { count: usize }, + AllEqual { count: usize, stake: u128 }, +} diff --git a/node/src/reactor/main_reactor/tests/network_general.rs b/node/src/reactor/main_reactor/tests/network_general.rs new file mode 100644 index 0000000000..5d3e2fc5a8 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/network_general.rs @@ -0,0 +1,813 @@ +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use casper_binary_port::{ + BinaryMessage, BinaryMessageCodec, BinaryResponseAndRequest, Command, CommandHeader, + InformationRequest, Uptime, +}; +use either::Either; +use futures::{SinkExt, StreamExt}; +use num_rational::Ratio; +use tokio::{ + net::TcpStream, + time::{self, timeout}, +}; +use tokio_util::codec::Framed; +use tracing::info; + +use casper_types::{ + bytesrepr::{FromBytes, ToBytes}, + execution::TransformKindV2, + system::{auction::BidAddr, AUCTION}, + testing::TestRng, + AvailableBlockRange, Deploy, Key, Peers, PublicKey, SecretKey, StoredValue, TimeDiff, + Timestamp, Transaction, +}; + +use crate::{ + effect::{requests::ContractRuntimeRequest, EffectExt}, + reactor::{ + main_reactor::{ + tests::{ + configs_override::{ConfigsOverride, NodeConfigOverride}, + fixture::TestFixture, + initial_stakes::InitialStakes, + node_has_lowest_available_block_at_or_below_height, Nodes, ERA_ONE, ERA_THREE, + ERA_TWO, ERA_ZERO, ONE_MIN, TEN_SECS, THIRTY_SECS, + }, + MainEvent, MainReactor, ReactorState, + }, + Runner, + }, + testing::{filter_reactor::FilterReactor, network::TestingNetwork, ConditionCheckReactor}, + types::{ExitCode, NodeId, SyncHandling}, + utils::Source, +}; + +#[tokio::test] +async fn run_network() { + // Set up a network with five nodes and run until in era 2. + let initial_stakes = InitialStakes::Random { count: 5 }; + let mut fixture = TestFixture::new(initial_stakes, None).await; + fixture.run_until_consensus_in_era(ERA_TWO, ONE_MIN).await; +} + +#[tokio::test] +async fn historical_sync_with_era_height_1() { + let initial_stakes = InitialStakes::Random { count: 5 }; + let spec_override = ConfigsOverride { + minimum_block_time: "4seconds".parse().unwrap(), + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + + // Wait for all nodes to reach era 3. + fixture.run_until_consensus_in_era(ERA_THREE, ONE_MIN).await; + + // Create a joiner node. + let secret_key = SecretKey::random(&mut fixture.rng); + let trusted_hash = *fixture.highest_complete_block().hash(); + let (mut config, storage_dir) = fixture.create_node_config( + &secret_key, + Some(trusted_hash), + 1, + NodeConfigOverride::default(), + ); + config.node.sync_handling = SyncHandling::Genesis; + let joiner_id = fixture + .add_node(Arc::new(secret_key), config, storage_dir) + .await; + + // Wait for joiner node to sync back to the block from era 1 + fixture + .run_until( + node_has_lowest_available_block_at_or_below_height(1, joiner_id), + ONE_MIN, + ) + .await; + + // Remove the weights for era 0 and era 1 from the validator matrix + let runner = fixture + .network + .nodes_mut() + .get_mut(&joiner_id) + .expect("Could not find runner for node {joiner_id}"); + let reactor = runner.reactor_mut().inner_mut().inner_mut(); + reactor.validator_matrix.purge_era_validators(&ERA_ZERO); + reactor.validator_matrix.purge_era_validators(&ERA_ONE); + + // Continue syncing and check if the joiner node reaches era 0 + fixture + .run_until( + node_has_lowest_available_block_at_or_below_height(0, joiner_id), + ONE_MIN, + ) + .await; +} + +#[tokio::test] +async fn should_not_historical_sync_no_sync_node() { + let initial_stakes = InitialStakes::Random { count: 5 }; + let spec_override = ConfigsOverride { + minimum_block_time: "4seconds".parse().unwrap(), + minimum_era_height: 2, + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + + // Wait for all nodes to complete block 1. + fixture.run_until_block_height(1, ONE_MIN).await; + + // Create a joiner node. + let highest_block = fixture.highest_complete_block(); + let trusted_hash = *highest_block.hash(); + let trusted_height = highest_block.height(); + assert!( + trusted_height > 0, + "trusted height must be non-zero to allow for checking that the joiner doesn't do \ + historical syncing" + ); + info!("joining node using block {trusted_height} {trusted_hash}"); + let secret_key = SecretKey::random(&mut fixture.rng); + let (mut config, storage_dir) = fixture.create_node_config( + &secret_key, + Some(trusted_hash), + 1, + NodeConfigOverride::default(), + ); + config.node.sync_handling = SyncHandling::NoSync; + let joiner_id = fixture + .add_node(Arc::new(secret_key), config, storage_dir) + .await; + + let joiner_avail_range = |nodes: &Nodes| { + nodes + .get(&joiner_id) + .expect("should have joiner") + .main_reactor() + .storage() + .get_available_block_range() + }; + + // Run until the joiner doesn't have the default available block range, i.e. it has completed + // syncing the initial block. + fixture + .try_run_until( + |nodes: &Nodes| joiner_avail_range(nodes) != AvailableBlockRange::RANGE_0_0, + ONE_MIN, + ) + .await + .expect("timed out waiting for joiner to sync first block"); + + let available_block_range_pre = joiner_avail_range(fixture.network.nodes()); + + let pre = available_block_range_pre.low(); + assert!( + pre >= trusted_height, + "should not have acquired a block earlier than trusted hash block {} {}", + pre, + trusted_height + ); + + // Ensure the joiner's chain is advancing. + fixture + .try_run_until( + |nodes: &Nodes| joiner_avail_range(nodes).high() > available_block_range_pre.high(), + ONE_MIN, + ) + .await + .unwrap_or_else(|_| { + panic!( + "timed out waiting for joiner's highest complete block to exceed {}", + available_block_range_pre.high() + ) + }); + + // Ensure the joiner is not doing historical sync. + fixture + .try_run_until( + |nodes: &Nodes| joiner_avail_range(nodes).low() < available_block_range_pre.low(), + TEN_SECS, + ) + .await + .unwrap_err(); +} + +#[tokio::test] +async fn should_catch_up_and_shutdown() { + let initial_stakes = InitialStakes::Random { count: 5 }; + let spec_override = ConfigsOverride { + minimum_block_time: "4seconds".parse().unwrap(), + minimum_era_height: 2, + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + + // Wait for all nodes to complete block 1. + fixture.run_until_block_height(1, ONE_MIN).await; + + // Create a joiner node. + let highest_block = fixture.highest_complete_block(); + let trusted_hash = *highest_block.hash(); + let trusted_height = highest_block.height(); + assert!( + trusted_height > 0, + "trusted height must be non-zero to allow for checking that the joiner doesn't do \ + historical syncing" + ); + + info!("joining node using block {trusted_height} {trusted_hash}"); + let secret_key = SecretKey::random(&mut fixture.rng); + let (mut config, storage_dir) = fixture.create_node_config( + &secret_key, + Some(trusted_hash), + 1, + NodeConfigOverride::default(), + ); + config.node.sync_handling = SyncHandling::CompleteBlock; + let joiner_id = fixture + .add_node(Arc::new(secret_key), config, storage_dir) + .await; + + let joiner_avail_range = |nodes: &Nodes| { + nodes + .get(&joiner_id) + .expect("should have joiner") + .main_reactor() + .storage() + .get_available_block_range() + }; + + // Run until the joiner shuts down after catching up + fixture + .network + .settle_on_node_exit( + &mut fixture.rng, + &joiner_id, + ExitCode::CleanExitDontRestart, + ONE_MIN, + ) + .await; + + let available_block_range = joiner_avail_range(fixture.network.nodes()); + + let low = available_block_range.low(); + assert!( + low >= trusted_height, + "should not have acquired a block earlier than trusted hash block {low} {trusted_hash}", + ); + + let highest_block_height = fixture.highest_complete_block().height(); + let high = available_block_range.high(); + assert!( + low < high && high <= highest_block_height, + "should have acquired more recent blocks before shutting down {low} {high} {highest_block_height}", + ); +} + +fn network_is_in_keepup( + nodes: &HashMap>>>, +) -> bool { + nodes + .values() + .all(|node| node.reactor().inner().inner().state == ReactorState::KeepUp) +} + +const MESSAGE_SIZE: u32 = 1024 * 1024 * 10; + +async fn setup_network_and_get_binary_port_handle( + initial_stakes: InitialStakes, + spec_override: ConfigsOverride, +) -> ( + Framed, + impl futures::Future>, TestRng)>, +) { + let mut fixture = timeout( + Duration::from_secs(10), + TestFixture::new(initial_stakes, Some(spec_override)), + ) + .await + .unwrap(); + let mut rng = fixture.rng_mut().create_child(); + let net = fixture.network_mut(); + net.settle_on(&mut rng, network_is_in_keepup, Duration::from_secs(59)) + .await; + let (_, first_node) = net + .nodes() + .iter() + .next() + .expect("should have at least one node"); + let binary_port_addr = first_node + .main_reactor() + .binary_port + .bind_address() + .unwrap(); + let finish_cranking = fixture.run_until_stopped(rng.create_child()); + let address = format!("localhost:{}", binary_port_addr.port()); + let stream = TcpStream::connect(address.clone()) + .await + .expect("should create stream"); + let client = Framed::new(stream, BinaryMessageCodec::new(MESSAGE_SIZE)); + (client, finish_cranking) +} + +#[tokio::test] +async fn should_start_in_isolation() { + let initial_stakes = InitialStakes::Random { count: 1 }; + let spec_override = ConfigsOverride { + node_config_override: NodeConfigOverride { + sync_handling_override: Some(SyncHandling::Isolated), + idle_tolerance: None, + }, + ..Default::default() + }; + let (mut client, finish_cranking) = + setup_network_and_get_binary_port_handle(initial_stakes, spec_override).await; + + let uptime_request_bytes = { + let request = Command::Get( + InformationRequest::Uptime + .try_into() + .expect("should convert"), + ); + let header = CommandHeader::new(request.tag(), 1_u16); + let header_bytes = ToBytes::to_bytes(&header).expect("should serialize"); + header_bytes + .iter() + .chain( + ToBytes::to_bytes(&request) + .expect("should serialize") + .iter(), + ) + .cloned() + .collect::>() + }; + client + .send(BinaryMessage::new(uptime_request_bytes)) + .await + .expect("should send message"); + let response = timeout(Duration::from_secs(20), client.next()) + .await + .unwrap_or_else(|err| panic!("should complete uptime request without timeout: {}", err)) + .unwrap_or_else(|| panic!("should have bytes")) + .unwrap_or_else(|err| panic!("should have ok response: {}", err)); + let (binary_response_and_request, _): (BinaryResponseAndRequest, _) = + FromBytes::from_bytes(response.payload()).expect("should deserialize response"); + let response = binary_response_and_request.response().payload(); + let (uptime, remainder): (Uptime, _) = + FromBytes::from_bytes(response).expect("Peers should be deserializable"); + assert!(remainder.is_empty()); + assert!(uptime.into_inner() > 0); + let (_net, _rng) = timeout(Duration::from_secs(20), finish_cranking) + .await + .unwrap_or_else(|_| panic!("should finish cranking without timeout")); +} + +#[tokio::test] +async fn should_be_peerless_in_isolation() { + let initial_stakes = InitialStakes::Random { count: 1 }; + let spec_override = ConfigsOverride { + node_config_override: NodeConfigOverride { + sync_handling_override: Some(SyncHandling::Isolated), + idle_tolerance: None, + }, + ..Default::default() + }; + let (mut client, finish_cranking) = + setup_network_and_get_binary_port_handle(initial_stakes, spec_override).await; + + let peers_request_bytes = { + let request = Command::Get( + InformationRequest::Peers + .try_into() + .expect("should convert"), + ); + let header = CommandHeader::new(request.tag(), 1_u16); + let header_bytes = ToBytes::to_bytes(&header).expect("should serialize"); + header_bytes + .iter() + .chain( + ToBytes::to_bytes(&request) + .expect("should serialize") + .iter(), + ) + .cloned() + .collect::>() + }; + client + .send(BinaryMessage::new(peers_request_bytes)) + .await + .expect("should send message"); + let response = timeout(Duration::from_secs(20), client.next()) + .await + .unwrap_or_else(|err| panic!("should complete peers request without timeout: {}", err)) + .unwrap_or_else(|| panic!("should have bytes")) + .unwrap_or_else(|err| panic!("should have ok response: {}", err)); + let (binary_response_and_request, _): (BinaryResponseAndRequest, _) = + FromBytes::from_bytes(response.payload()).expect("should deserialize response"); + let response = binary_response_and_request.response().payload(); + + let (peers, remainder): (Peers, _) = + FromBytes::from_bytes(response).expect("Peers should be deserializable"); + assert!(remainder.is_empty()); + assert!( + peers.into_inner().is_empty(), + "should not have peers in isolated mode" + ); + + let (_net, _rng) = timeout(Duration::from_secs(20), finish_cranking) + .await + .unwrap_or_else(|_| panic!("should finish cranking without timeout")); +} + +#[tokio::test] +async fn network_should_recover_from_stall() { + // Set up a network with three nodes. + let initial_stakes = InitialStakes::AllEqual { + count: 3, + stake: 100, + }; + let mut fixture = TestFixture::new(initial_stakes, None).await; + + // Let all nodes progress until block 2 is marked complete. + fixture.run_until_block_height(2, ONE_MIN).await; + + // Kill all nodes except for node 0. + let mut stopped_nodes = vec![]; + for _ in 1..fixture.node_contexts.len() { + let node_context = fixture.remove_and_stop_node(1); + stopped_nodes.push(node_context); + } + + // Expect node 0 can't produce more blocks, i.e. the network has stalled. + fixture + .try_run_until_block_height(3, ONE_MIN) + .await + .expect_err("should time out"); + + // Restart the stopped nodes. + for node_context in stopped_nodes { + fixture + .add_node( + node_context.secret_key, + node_context.config, + node_context.storage_dir, + ) + .await; + } + + // Ensure all nodes progress until block 3 is marked complete. + fixture.run_until_block_height(3, TEN_SECS).await; +} + +#[tokio::test] +async fn node_should_rejoin_after_ejection() { + let initial_stakes = InitialStakes::AllEqual { + count: 5, + stake: 1_000_000_000, + }; + let minimum_era_height = 4; + let configs_override = ConfigsOverride { + minimum_era_height, + minimum_block_time: "4096 ms".parse().unwrap(), + round_seigniorage_rate: Ratio::new(1, 1_000_000_000_000), + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(configs_override)).await; + + // Run through the first era. + fixture + .run_until_block_height(minimum_era_height, ONE_MIN) + .await; + + let stopped_node = fixture.remove_and_stop_node(1); + let stopped_secret_key = Arc::clone(&stopped_node.secret_key); + let stopped_public_key = PublicKey::from(&*stopped_secret_key); + + // Wait until the stopped node is ejected and removed from the validators set. + fixture + .run_until_consensus_in_era( + (fixture.chainspec.core_config.auction_delay + 3).into(), + ONE_MIN, + ) + .await; + + // Restart the node. + // Use the hash of the current highest complete block as the trusted hash. + let mut config = stopped_node.config; + config.node.trusted_hash = Some(*fixture.highest_complete_block().hash()); + fixture + .add_node(stopped_node.secret_key, config, stopped_node.storage_dir) + .await; + + // Create & sign deploy to reactivate the stopped node's bid. + // The bid amount will make sure that the rejoining validator proposes soon after it rejoins. + let mut deploy = Deploy::add_bid( + fixture.chainspec.network_config.name.clone(), + fixture.system_contract_hash(AUCTION), + stopped_public_key.clone(), + 100_000_000_000_000_000_u64.into(), + 10, + Timestamp::now(), + TimeDiff::from_seconds(60), + ); + deploy.sign(&stopped_secret_key); + let txn = Transaction::Deploy(deploy); + let txn_hash = txn.hash(); + + // Inject the transaction and run the network until executed. + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, THIRTY_SECS) + .await; + + // Ensure execution succeeded and that there is a Write transform for the bid's key. + let bid_key = Key::BidAddr(BidAddr::from(stopped_public_key.clone())); + fixture + .successful_execution_transforms(&txn_hash) + .iter() + .find(|transform| match transform.kind() { + TransformKindV2::Write(StoredValue::BidKind(bid_kind)) => { + Key::from(bid_kind.bid_addr()) == bid_key + } + _ => false, + }) + .expect("should have a write record for bid"); + + // Wait until the auction delay passes, plus one era for a margin of error. + fixture + .run_until_consensus_in_era( + (2 * fixture.chainspec.core_config.auction_delay + 6).into(), + ONE_MIN, + ) + .await; +} + +async fn assert_network_shutdown_for_upgrade_with_stakes(initial_stakes: InitialStakes) { + let mut fixture = TestFixture::new(initial_stakes, None).await; + + // An upgrade is scheduled for era 2, after the switch block in era 1 (height 2). + fixture.schedule_upgrade_for_era_two().await; + + // Run until the nodes shut down for the upgrade. + fixture + .network + .settle_on_exit(&mut fixture.rng, ExitCode::Success, ONE_MIN) + .await; +} + +#[tokio::test] +async fn nodes_should_have_enough_signatures_before_upgrade_with_equal_stake() { + // Equal stake ensures that one node was able to learn about signatures created by the other, by + // whatever means necessary (gossiping, broadcasting, fetching, etc.). + let initial_stakes = InitialStakes::AllEqual { + count: 2, + stake: u128::MAX, + }; + assert_network_shutdown_for_upgrade_with_stakes(initial_stakes).await; +} + +#[tokio::test] +async fn nodes_should_have_enough_signatures_before_upgrade_with_one_dominant_stake() { + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 255]); + assert_network_shutdown_for_upgrade_with_stakes(initial_stakes).await; +} + +#[tokio::test] +async fn dont_upgrade_without_switch_block() { + let initial_stakes = InitialStakes::Random { count: 2 }; + let mut fixture = TestFixture::new(initial_stakes, None).await; + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + eprintln!( + "Running 'dont_upgrade_without_switch_block' test with rng={}", + fixture.rng + ); + + // An upgrade is scheduled for era 2, after the switch block in era 1 (height 2). + // We artificially delay the execution of that block. + fixture.schedule_upgrade_for_era_two().await; + for runner in fixture.network.runners_mut() { + let mut exec_request_received = false; + runner.reactor_mut().inner_mut().set_filter(move |event| { + if let MainEvent::ContractRuntimeRequest( + ContractRuntimeRequest::EnqueueBlockForExecution { + executable_block, .. + }, + ) = &event + { + if executable_block.era_report.is_some() + && executable_block.era_id == ERA_ONE + && !exec_request_received + { + info!("delaying {}", executable_block); + exec_request_received = true; + return Either::Left( + time::sleep(Duration::from_secs(10)).event(move |_| event), + ); + } + info!("not delaying {}", executable_block); + } + Either::Right(event) + }); + } + + // Run until the nodes shut down for the upgrade. + fixture + .network + .settle_on_exit(&mut fixture.rng, ExitCode::Success, ONE_MIN) + .await; + + // Verify that the switch block has been stored: Even though it was delayed the node didn't + // restart before executing and storing it. + for runner in fixture.network.nodes().values() { + let header = runner + .main_reactor() + .storage() + .read_block_header_by_height(2, false) + .expect("failed to read from storage") + .expect("missing switch block"); + assert_eq!(ERA_ONE, header.era_id(), "era should be 1"); + assert!(header.is_switch_block(), "header should be switch block"); + } +} + +#[tokio::test] +async fn should_store_finalized_approvals() { + // Set up a network with two nodes where node 0 (Alice) is effectively guaranteed to be the + // proposer. + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + let mut fixture = TestFixture::new(initial_stakes, None).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_secret_key = Arc::clone(&fixture.node_contexts[1].secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); // just for ordering testing purposes + + // Wait for all nodes to complete era 0. + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + // Submit a transaction. + let mut transaction_alice_bob = Transaction::from( + Deploy::random_valid_native_transfer_without_deps(&mut fixture.rng), + ); + let mut transaction_alice_bob_charlie = transaction_alice_bob.clone(); + let mut transaction_bob_alice = transaction_alice_bob.clone(); + + transaction_alice_bob.sign(&alice_secret_key); + transaction_alice_bob.sign(&bob_secret_key); + + transaction_alice_bob_charlie.sign(&alice_secret_key); + transaction_alice_bob_charlie.sign(&bob_secret_key); + transaction_alice_bob_charlie.sign(&charlie_secret_key); + + transaction_bob_alice.sign(&bob_secret_key); + transaction_bob_alice.sign(&alice_secret_key); + + // We will be testing the correct sequence of approvals against the transaction signed by Bob + // and Alice. + // The transaction signed by Alice and Bob should give the same ordering of approvals. + let expected_approvals: Vec<_> = transaction_bob_alice.approvals().iter().cloned().collect(); + + // We'll give the transaction signed by Alice, Bob and Charlie to Bob, so these will be his + // original approvals. Save these for checks later. + let bobs_original_approvals: Vec<_> = transaction_alice_bob_charlie + .approvals() + .iter() + .cloned() + .collect(); + assert_ne!(bobs_original_approvals, expected_approvals); + + let transaction_hash = transaction_alice_bob.hash(); + + for runner in fixture.network.runners_mut() { + let transaction = if runner.main_reactor().consensus().public_key() == &alice_public_key { + // Alice will propose the transaction signed by Alice and Bob. + transaction_alice_bob.clone() + } else { + // Bob will receive the transaction signed by Alice, Bob and Charlie. + transaction_alice_bob_charlie.clone() + }; + runner + .process_injected_effects(|effect_builder| { + effect_builder + .put_transaction_to_storage(transaction.clone()) + .ignore() + }) + .await; + runner + .process_injected_effects(|effect_builder| { + effect_builder + .announce_new_transaction_accepted(Arc::new(transaction), Source::Client) + .ignore() + }) + .await; + } + + // Run until the transaction gets executed. + let has_stored_exec_results = |nodes: &Nodes| { + nodes.values().all(|runner| { + let read = runner + .main_reactor() + .storage() + .read_execution_result(&transaction_hash); + read.is_some() + }) + }; + fixture.run_until(has_stored_exec_results, ONE_MIN).await; + + // Check if the approvals agree. + for runner in fixture.network.nodes().values() { + let maybe_dwa = runner + .main_reactor() + .storage() + .get_transaction_with_finalized_approvals_by_hash(&transaction_hash); + let maybe_finalized_approvals = maybe_dwa + .as_ref() + .and_then(|dwa| dwa.1.clone()) + .map(|fa| fa.iter().cloned().collect()); + let maybe_original_approvals = maybe_dwa + .as_ref() + .map(|(transaction, _approvals)| transaction.approvals().iter().cloned().collect()); + if runner.main_reactor().consensus().public_key() != &alice_public_key { + // Bob should have finalized approvals, and his original approvals should be different. + assert_eq!( + maybe_finalized_approvals.as_ref(), + Some(&expected_approvals) + ); + assert_eq!( + maybe_original_approvals.as_ref(), + Some(&bobs_original_approvals) + ); + } else { + // Alice should only have the correct approvals as the original ones, and no finalized + // approvals (as they wouldn't be stored, because they would be the same as the + // original ones). + assert_eq!(maybe_finalized_approvals.as_ref(), None); + assert_eq!(maybe_original_approvals.as_ref(), Some(&expected_approvals)); + } + } +} + +#[tokio::test] +async fn should_update_last_progress_after_block_execution() { + // Set up a network with two nodes. + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + let mut fixture = TestFixture::new(initial_stakes, None).await; + + // Let all nodes reach consensus in era 0. + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + // Prepare and submit a transaction. + let transaction = Transaction::from(Deploy::random_valid_native_transfer_without_deps( + &mut fixture.rng, + )); + let transaction_hash = transaction.hash(); + + for runner in fixture.network.runners_mut() { + let transaction = transaction.clone(); + runner + .process_injected_effects(|eff| { + eff.put_transaction_to_storage(transaction.clone()).ignore() + }) + .await; + + runner + .process_injected_effects(|eff| { + eff.announce_new_transaction_accepted(Arc::new(transaction), Source::Client) + .ignore() + }) + .await; + } + + // For each node, capture its last_progress before execution. + let stored_last_progresses: Vec<_> = fixture + .network + .nodes() + .values() + .map(|node| { + let reactor = node.main_reactor(); + assert_eq!(reactor.state, ReactorState::Validate); + reactor.last_progress + }) + .collect(); + + // Run until the transaction gets executed. + let has_stored_exec_results = |nodes: &Nodes| { + nodes.values().all(|runner| { + let read = runner + .main_reactor() + .storage() + .read_execution_result(&transaction_hash); + read.is_some() + }) + }; + fixture.run_until(has_stored_exec_results, ONE_MIN).await; + + // For each node, verify its last_progress has been updated. + for (stored_last_progress, node) in stored_last_progresses + .into_iter() + .zip(fixture.network.nodes().values()) + { + assert!(node.main_reactor().last_progress > stored_last_progress); + } +} diff --git a/node/src/reactor/main_reactor/tests/rewards.rs b/node/src/reactor/main_reactor/tests/rewards.rs new file mode 100644 index 0000000000..310caae564 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/rewards.rs @@ -0,0 +1,717 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + time::Duration, +}; + +use num::Zero; +use num_rational::Ratio; +use num_traits::One; + +use casper_storage::{ + data_access_layer::{TotalSupplyRequest, TotalSupplyResult}, + global_state::state::StateProvider, +}; +use casper_types::{ + Block, ConsensusProtocolName, EraId, ProtocolVersion, PublicKey, Rewards, TimeDiff, U512, +}; + +use crate::{ + failpoints::FailpointActivation, + reactor::{ + main_reactor::tests::{ + configs_override::ConfigsOverride, fixture::TestFixture, initial_stakes::InitialStakes, + switch_blocks::SwitchBlocks, ERA_THREE, ERA_TWO, + }, + Reactor, + }, +}; + +// Fundamental network parameters that are not critical for assessing reward calculation correctness +const STAKE: u128 = 1000000000; +const PRIME_STAKES: [u128; 5] = [106907, 106921, 106937, 106949, 106957]; +const ERA_COUNT: u64 = 3; +const ERA_DURATION: u64 = 20000; +//milliseconds +const MIN_HEIGHT: u64 = 6; +const BLOCK_TIME: u64 = 1750; +//milliseconds +const TIME_OUT: u64 = 600; +//seconds +const SEIGNIORAGE: (u64, u64) = (1u64, 100u64); +const REPRESENTATIVE_NODE_INDEX: usize = 0; +// Parameters we generally want to vary +const CONSENSUS_ZUG: ConsensusProtocolName = ConsensusProtocolName::Zug; +const CONSENSUS_HIGHWAY: ConsensusProtocolName = ConsensusProtocolName::Highway; +const FINDERS_FEE_ZERO: (u64, u64) = (0u64, 1u64); +const FINDERS_FEE_HALF: (u64, u64) = (1u64, 2u64); +//const FINDERS_FEE_ONE: (u64, u64) = (1u64, 1u64); +const FINALITY_SIG_PROP_ZERO: (u64, u64) = (0u64, 1u64); +const FINALITY_SIG_PROP_HALF: (u64, u64) = (1u64, 2u64); +const FINALITY_SIG_PROP_ONE: (u64, u64) = (1u64, 1u64); +const FILTERED_NODES_INDICES: &[usize] = &[3, 4]; +const FINALITY_SIG_LOOKBACK: u64 = 3; + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_all_finality_small_prime_five_eras() { + run_rewards_network_scenario( + PRIME_STAKES, + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + &[], + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_ZERO.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_all_finality_small_prime_five_eras_no_lookback() { + run_rewards_network_scenario( + PRIME_STAKES, + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + &[], + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_ZERO.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: 0, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_no_finality_small_nominal_five_eras() { + run_rewards_network_scenario( + [STAKE, STAKE, STAKE, STAKE, STAKE], + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + &[], + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_ZERO.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ZERO.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_half_finality_half_finders_small_nominal_five_eras() { + run_rewards_network_scenario( + [STAKE, STAKE, STAKE, STAKE, STAKE], + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + &[], + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_HALF.into(), + finality_signature_proportion: FINALITY_SIG_PROP_HALF.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_half_finality_half_finders_small_nominal_five_eras_no_lookback() { + run_rewards_network_scenario( + [STAKE, STAKE, STAKE, STAKE, STAKE], + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + &[], + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_HALF.into(), + finality_signature_proportion: FINALITY_SIG_PROP_HALF.into(), + signature_rewards_max_delay: 0, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_all_finality_half_finders_small_nominal_five_eras_no_lookback() { + run_rewards_network_scenario( + [STAKE, STAKE, STAKE, STAKE, STAKE], + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + &[], + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_HALF.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: 0, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_all_finality_half_finders() { + run_rewards_network_scenario( + [ + STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, + ], + ERA_COUNT, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + FILTERED_NODES_INDICES, + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_HALF.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_all_finality_half_finders_five_eras() { + run_rewards_network_scenario( + [ + STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, + ], + 5, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + FILTERED_NODES_INDICES, + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_HALF.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_zug_all_finality_zero_finders() { + run_rewards_network_scenario( + [ + STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, + ], + ERA_COUNT, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + FILTERED_NODES_INDICES, + ConfigsOverride { + consensus_protocol: CONSENSUS_ZUG, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_ZERO.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_highway_all_finality_zero_finders() { + run_rewards_network_scenario( + [ + STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, + ], + ERA_COUNT, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + FILTERED_NODES_INDICES, + ConfigsOverride { + consensus_protocol: CONSENSUS_HIGHWAY, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_ZERO.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ONE.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +#[cfg_attr(not(feature = "failpoints"), ignore)] +async fn run_reward_network_highway_no_finality() { + run_rewards_network_scenario( + [ + STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, STAKE, + ], + ERA_COUNT, + TIME_OUT, + REPRESENTATIVE_NODE_INDEX, + FILTERED_NODES_INDICES, + ConfigsOverride { + consensus_protocol: CONSENSUS_HIGHWAY, + era_duration: TimeDiff::from_millis(ERA_DURATION), + minimum_era_height: MIN_HEIGHT, + minimum_block_time: TimeDiff::from_millis(BLOCK_TIME), + round_seigniorage_rate: SEIGNIORAGE.into(), + finders_fee: FINDERS_FEE_ZERO.into(), + finality_signature_proportion: FINALITY_SIG_PROP_ZERO.into(), + signature_rewards_max_delay: FINALITY_SIG_LOOKBACK, + ..Default::default() + }, + ) + .await; +} + +#[tokio::test] +async fn rewards_are_calculated() { + let initial_stakes = InitialStakes::Random { count: 5 }; + let spec_override = ConfigsOverride { + minimum_era_height: 3, + ..Default::default() + }; + let mut fixture = TestFixture::new(initial_stakes, Some(spec_override)).await; + fixture + .run_until_consensus_in_era(ERA_THREE, Duration::from_secs(150)) + .await; + + let switch_block = fixture.switch_block(ERA_TWO); + + for reward in switch_block + .era_end() + .unwrap() + .rewards() + .values() + .map(|amounts| { + amounts + .iter() + .fold(U512::zero(), |acc, amount| *amount + acc) + }) + { + assert_ne!(reward, U512::zero()); + } +} + +async fn run_rewards_network_scenario( + initial_stakes: impl Into>, + era_count: u64, + time_out: u64, //seconds + representative_node_index: usize, + filtered_nodes_indices: &[usize], + spec_override: ConfigsOverride, +) { + trait AsU512Ext { + fn into_u512(self) -> Ratio; + } + impl AsU512Ext for Ratio { + fn into_u512(self) -> Ratio { + Ratio::new(U512::from(*self.numer()), U512::from(*self.denom())) + } + } + + let initial_stakes = initial_stakes.into(); + + // Instantiate the chain + let mut fixture = + TestFixture::new(InitialStakes::FromVec(initial_stakes), Some(spec_override)).await; + + for i in filtered_nodes_indices { + let filtered_node = fixture.network.runners_mut().nth(*i).unwrap(); + filtered_node + .reactor_mut() + .inner_mut() + .activate_failpoint(&FailpointActivation::new("finality_signature_creation")); + } + + // Run the network for a specified number of eras + let timeout = Duration::from_secs(time_out); + fixture + .run_until_stored_switch_block_header(EraId::new(era_count - 1), timeout) + .await; + + // DATA COLLECTION + // Get the switch blocks and bid structs first + let switch_blocks = SwitchBlocks::collect(fixture.network.nodes(), era_count); + + // Representative node + // (this test should normally run a network at nominal performance with identical nodes) + let representative_node = fixture + .network + .nodes() + .values() + .nth(representative_node_index) + .unwrap(); + let representative_storage = &representative_node.main_reactor().storage; + let representative_runtime = &representative_node.main_reactor().contract_runtime; + + // Recover highest completed block height + let highest_completed_height = representative_storage + .highest_complete_block_height() + .expect("missing highest completed block"); + + // Get all the blocks + let blocks: Vec = (0..highest_completed_height + 1) + .map(|i| { + representative_storage + .read_block_by_height(i) + .expect("block not found") + }) + .collect(); + + let protocol_version = ProtocolVersion::from_parts(2, 0, 0); + + // Get total supply history + let total_supply: Vec = (0..highest_completed_height + 1) + .map(|height: u64| { + let state_hash = *representative_storage + .read_block_header_by_height(height, true) + .expect("failure to read block header") + .unwrap() + .state_root_hash(); + let total_supply_req = TotalSupplyRequest::new(state_hash, protocol_version); + let result = representative_runtime + .data_access_layer() + .total_supply(total_supply_req); + + if let TotalSupplyResult::Success { total_supply } = result { + total_supply + } else { + panic!("expected success, not: {:?}", result); + } + }) + .collect(); + + // Tiny helper function + #[inline] + fn add_to_rewards( + recipient: PublicKey, + era: EraId, + reward: Ratio, + rewards: &mut BTreeMap>>, + ) { + match rewards.get_mut(&recipient) { + Some(map) => { + *map.entry(era).or_insert(Ratio::zero()) += reward; + } + None => { + let mut map = BTreeMap::new(); + map.insert(era, reward); + rewards.insert(recipient, map); + } + } + } + + let mut recomputed_total_supply = BTreeMap::new(); + recomputed_total_supply.insert(0, Ratio::from(total_supply[0])); + let recomputed_rewards: BTreeMap<_, _> = switch_blocks + .headers + .iter() + .enumerate() + .map(|(i, switch_block)| { + if switch_block.is_genesis() || switch_block.height() > highest_completed_height { + return (i, BTreeMap::new()); + } + let mut recomputed_era_rewards = BTreeMap::new(); + if !switch_block.is_genesis() { + let supply_carryover = recomputed_total_supply + .get(&(i - 1)) + .copied() + .expect("expected prior recomputed supply value"); + recomputed_total_supply.insert(i, supply_carryover); + } + + // It's not a genesis block, so we know there's something with a lower era id + let previous_switch_block_height = switch_blocks.headers[i - 1].height(); + let current_era_slated_weights = match switch_blocks.headers[i - 1].clone_era_end() { + Some(era_report) => era_report.next_era_validator_weights().clone(), + _ => panic!("unexpectedly absent era report"), + }; + let total_current_era_weights = current_era_slated_weights + .iter() + .fold(U512::zero(), move |acc, s| acc + s.1); + let weights_block_idx = if switch_blocks.headers[i - 1].is_genesis() { + i - 1 + } else { + i - 2 + }; + let (previous_era_slated_weights, total_previous_era_weights) = + match switch_blocks.headers[weights_block_idx].clone_era_end() { + Some(era_report) => { + let next_weights = era_report.next_era_validator_weights().clone(); + let total_next_weights = next_weights + .iter() + .fold(U512::zero(), move |acc, s| acc + s.1); + (next_weights, total_next_weights) + } + _ => panic!("unexpectedly absent era report"), + }; + + let rewarded_range = + previous_switch_block_height as usize + 1..switch_block.height() as usize + 1; + let rewarded_blocks = &blocks[rewarded_range]; + let block_reward = (Ratio::::one() + - fixture + .chainspec + .core_config + .finality_signature_proportion + .into_u512()) + * recomputed_total_supply[&(i - 1)] + * fixture + .chainspec + .core_config + .round_seigniorage_rate + .into_u512(); + let signatures_reward = fixture + .chainspec + .core_config + .finality_signature_proportion + .into_u512() + * recomputed_total_supply[&(i - 1)] + * fixture + .chainspec + .core_config + .round_seigniorage_rate + .into_u512(); + let previous_signatures_reward_idx = if switch_blocks.headers[i - 1].is_genesis() { + i - 1 + } else { + i - 2 + }; + let previous_signatures_reward = fixture + .chainspec + .core_config + .finality_signature_proportion + .into_u512() + * recomputed_total_supply[&previous_signatures_reward_idx] + * fixture + .chainspec + .core_config + .round_seigniorage_rate + .into_u512(); + + rewarded_blocks.iter().for_each(|block: &Block| { + // Block production rewards + let proposer = block.proposer().clone(); + add_to_rewards( + proposer.clone(), + block.era_id(), + block_reward, + &mut recomputed_era_rewards, + ); + + // Recover relevant finality signatures + block.rewarded_signatures().iter().enumerate().for_each( + |(offset, signatures_packed)| { + if block.height() as usize - offset - 1 + <= previous_switch_block_height as usize + { + let rewarded_contributors = signatures_packed.to_validator_set( + previous_era_slated_weights + .keys() + .cloned() + .collect::>(), + ); + rewarded_contributors.iter().for_each(|contributor| { + let contributor_proportion = Ratio::new( + previous_era_slated_weights + .get(contributor) + .copied() + .expect("expected current era validator"), + total_previous_era_weights, + ); + // collection always goes to the era in which the block citing the + // reward was created + add_to_rewards( + proposer.clone(), + block.era_id(), + fixture.chainspec.core_config.finders_fee.into_u512() + * contributor_proportion + * previous_signatures_reward, + &mut recomputed_era_rewards, + ); + add_to_rewards( + contributor.clone(), + switch_blocks.headers[i - 1].era_id(), + (Ratio::::one() + - fixture.chainspec.core_config.finders_fee.into_u512()) + * contributor_proportion + * previous_signatures_reward, + &mut recomputed_era_rewards, + ) + }); + } else { + let rewarded_contributors = signatures_packed.to_validator_set( + current_era_slated_weights + .keys() + .cloned() + .collect::>(), + ); + rewarded_contributors.iter().for_each(|contributor| { + let contributor_proportion = Ratio::new( + *current_era_slated_weights + .get(contributor) + .expect("expected current era validator"), + total_current_era_weights, + ); + add_to_rewards( + proposer.clone(), + block.era_id(), + fixture.chainspec.core_config.finders_fee.into_u512() + * contributor_proportion + * signatures_reward, + &mut recomputed_era_rewards, + ); + add_to_rewards( + contributor.clone(), + block.era_id(), + (Ratio::::one() + - fixture.chainspec.core_config.finders_fee.into_u512()) + * contributor_proportion + * signatures_reward, + &mut recomputed_era_rewards, + ); + }); + } + }, + ); + }); + + // Make sure we round just as we do in the real code, at the end of an era's + // calculation, right before minting and transferring + recomputed_era_rewards.iter_mut().for_each(|(_, rewards)| { + rewards.values_mut().for_each(|amount| { + *amount = amount.trunc(); + }); + let truncated_reward = rewards.values().sum::>(); + let era_end_supply = recomputed_total_supply + .get_mut(&i) + .expect("expected supply at end of era"); + *era_end_supply += truncated_reward; + }); + + (i, recomputed_era_rewards) + }) + .collect(); + + // Recalculated total supply is equal to observed total supply + switch_blocks.headers.iter().for_each(|header| { + if header.height() <= highest_completed_height { + assert_eq!( + Ratio::from(total_supply[header.height() as usize]), + *(recomputed_total_supply + .get(&(header.era_id().value() as usize)) + .expect("expected recalculated supply")), + "total supply does not match at height {}", + header.height() + ); + } + }); + + // Recalculated rewards are equal to observed rewards; total supply increase is equal to total + // rewards; + recomputed_rewards.iter().for_each(|(era, rewards)| { + if era > &0 && switch_blocks.headers[*era].height() <= highest_completed_height { + let observed_total_rewards = match switch_blocks.headers[*era] + .clone_era_end() + .expect("expected EraEnd") + .rewards() + { + Rewards::V1(v1_rewards) => v1_rewards + .iter() + .fold(U512::zero(), |acc, reward| U512::from(*reward.1) + acc), + Rewards::V2(v2_rewards) => v2_rewards + .iter() + .flat_map(|(_key, amounts)| amounts) + .fold(U512::zero(), |acc, reward| *reward + acc), + }; + let recomputed_total_rewards: U512 = rewards + .values() + .flat_map(|amounts| amounts.values().map(|reward| reward.to_integer())) + .sum(); + assert_eq!( + Ratio::from(recomputed_total_rewards), + Ratio::from(observed_total_rewards), + "total rewards do not match at era {}\nobserved = {:#?}\nrecomputed = {:#?}", + era, + switch_blocks.headers[*era] + .clone_era_end() + .expect("") + .rewards(), + rewards, + ); + assert_eq!( + Ratio::from(recomputed_total_rewards), + recomputed_total_supply + .get(era) + .expect("expected recalculated supply") + - recomputed_total_supply + .get(&(era - 1)) + .expect("expected recalculated supply"), + "supply growth does not match rewards at era {}", + era + ) + } + }) +} diff --git a/node/src/reactor/main_reactor/tests/switch_blocks.rs b/node/src/reactor/main_reactor/tests/switch_blocks.rs new file mode 100644 index 0000000000..ed5890f406 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/switch_blocks.rs @@ -0,0 +1,70 @@ +use std::collections::BTreeMap; + +use casper_storage::{ + data_access_layer::{BidsRequest, BidsResult}, + global_state::state::StateProvider, +}; +use casper_types::{system::auction::BidKind, BlockHeader, EraId, PublicKey, U512}; + +use crate::reactor::main_reactor::tests::Nodes; + +/// A set of consecutive switch blocks. +pub(crate) struct SwitchBlocks { + pub headers: Vec, +} + +impl SwitchBlocks { + /// Collects all switch blocks of the first `era_count` eras, and asserts that they are equal + /// in all nodes. + pub(crate) fn collect(nodes: &Nodes, era_count: u64) -> SwitchBlocks { + let mut headers = Vec::new(); + for era_number in 0..era_count { + let mut header_iter = nodes.values().map(|runner| { + let storage = runner.main_reactor().storage(); + let maybe_block = storage.read_switch_block_by_era_id(EraId::from(era_number)); + maybe_block.expect("missing switch block").take_header() + }); + let header = header_iter.next().unwrap(); + assert_eq!(era_number, header.era_id().value()); + for other_header in header_iter { + assert_eq!(header, other_header); + } + headers.push(header); + } + SwitchBlocks { headers } + } + + /// Returns the list of equivocators in the given era. + pub(crate) fn equivocators(&self, era_number: u64) -> &[PublicKey] { + self.headers[era_number as usize] + .maybe_equivocators() + .expect("era end") + } + + /// Returns the list of inactive validators in the given era. + pub(crate) fn inactive_validators(&self, era_number: u64) -> &[PublicKey] { + self.headers[era_number as usize] + .maybe_inactive_validators() + .expect("era end") + } + + /// Returns the list of validators in the successor era. + pub(crate) fn next_era_validators(&self, era_number: u64) -> &BTreeMap { + self.headers[era_number as usize] + .next_era_validator_weights() + .expect("validators") + } + + /// Returns the set of bids in the auction contract at the end of the given era. + pub(crate) fn bids(&self, nodes: &Nodes, era_number: u64) -> Vec { + let state_root_hash = *self.headers[era_number as usize].state_root_hash(); + for runner in nodes.values() { + let request = BidsRequest::new(state_root_hash); + let data_provider = runner.main_reactor().contract_runtime().data_access_layer(); + if let BidsResult::Success { bids } = data_provider.bids(request) { + return bids; + } + } + unreachable!("at least one node should have bids for era {}", era_number); + } +} diff --git a/node/src/reactor/main_reactor/tests/transaction_scenario.rs b/node/src/reactor/main_reactor/tests/transaction_scenario.rs new file mode 100644 index 0000000000..1998c82523 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/transaction_scenario.rs @@ -0,0 +1,309 @@ +mod asertions; +mod utils; +use asertions::{ + ExecResultCost, PublicKeyBalanceChange, PublicKeyTotalMeetsAvailable, TotalSupplyChange, + TransactionFailure, TransactionSuccessful, +}; +use casper_types::{ + testing::TestRng, FeeHandling, Gas, PricingMode, PublicKey, RefundHandling, TimeDiff, + Transaction, U512, +}; +use num_rational::Ratio; +use utils::{build_wasm_transction, RunUntilCondition, TestScenarioBuilder}; + +use crate::{ + reactor::main_reactor::tests::{ + transactions::{ + invalid_wasm_txn, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, BOB_SECRET_KEY, + CHARLIE_PUBLIC_KEY, MIN_GAS_PRICE, + }, + ONE_MIN, + }, + testing::LARGE_WASM_LANE_ID, + types::transaction::transaction_v1_builder::TransactionV1Builder, +}; + +#[tokio::test] +async fn should_accept_transfer_without_id() { + let mut rng = TestRng::new(); + let builder = TestScenarioBuilder::new(); + let mut test_scenario = builder.build(&mut rng).await; + + let transfer_amount = 2_500_000_001_u64; //This should be + //1 mote more than the native_transfer_minimum_motes in local + // chainspec that we use for tests + let chain_name = test_scenario.chain_name(); + test_scenario.setup().await.unwrap(); + + let mut txn: Transaction = Transaction::from( + TransactionV1Builder::new_transfer(transfer_amount, None, CHARLIE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn.sign(&ALICE_SECRET_KEY); + let hash = txn.hash(); + test_scenario.run(vec![txn]).await.unwrap(); + + test_scenario.assert(TransactionSuccessful::new(hash)).await; +} + +#[tokio::test] +async fn should_native_transfer_nofee_norefund_fixed() { + const TRANSFER_AMOUNT: u64 = 30_000_000_000; + let mut rng = TestRng::new(); + let builder = TestScenarioBuilder::new() + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + let mut test_scenario = builder.build(&mut rng).await; + + let chain_name = test_scenario.chain_name(); + test_scenario.setup().await.unwrap(); + + let mut txn: Transaction = Transaction::from( + TransactionV1Builder::new_transfer( + TRANSFER_AMOUNT, + None, + CHARLIE_PUBLIC_KEY.clone(), + Some(0xDEADBEEF), + ) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn.sign(&ALICE_SECRET_KEY); + let hash = txn.hash(); + test_scenario.run(vec![txn]).await.unwrap(); + + let expected_transfer_gas: U512 = test_scenario.mint_const_transfer_cost().into(); + test_scenario.assert(TransactionSuccessful::new(hash)).await; + + test_scenario + .assert(ExecResultCost::new( + hash, + expected_transfer_gas, + Gas::new(expected_transfer_gas), + )) + .await; + test_scenario + .assert(PublicKeyBalanceChange::new( + ALICE_PUBLIC_KEY.clone(), + -(TRANSFER_AMOUNT as i64), + -((TRANSFER_AMOUNT + expected_transfer_gas.as_u64()) as i64), + )) + .await; + //Charlie should have the transfer amount at his disposal + test_scenario + .assert(PublicKeyBalanceChange::new( + CHARLIE_PUBLIC_KEY.clone(), + TRANSFER_AMOUNT as i64, + TRANSFER_AMOUNT as i64, + )) + .await; + // Check if the hold is released. + let hold_release_block_height = test_scenario.get_block_height() + 9; // Block time is 1s. + test_scenario + .run_until(RunUntilCondition::BlockHeight { + block_height: hold_release_block_height, + within: ONE_MIN, + }) + .await + .unwrap(); + test_scenario + .assert(PublicKeyTotalMeetsAvailable::new(ALICE_PUBLIC_KEY.clone())) + .await; +} + +#[tokio::test] +async fn erroneous_native_transfer_nofee_norefund_fixed() { + let mut rng = TestRng::new(); + let builder = TestScenarioBuilder::new() + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + let mut test_scenario = builder.build(&mut rng).await; + let chain_name = test_scenario.chain_name(); + test_scenario.setup().await.unwrap(); + + let transfer_amount = test_scenario.native_transfer_minimum_motes() + 100; + + let mut txn: Transaction = Transaction::from( + TransactionV1Builder::new_transfer(transfer_amount, None, CHARLIE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_initiator_addr(PublicKey::from(ALICE_SECRET_KEY.as_ref())) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }) + .with_chain_name(chain_name.clone()) + .build() + .unwrap(), + ); + txn.sign(&ALICE_SECRET_KEY); + let hash = txn.hash(); + test_scenario.run(vec![txn]).await.unwrap(); + + test_scenario.assert(TransactionSuccessful::new(hash)).await; + + let mut txn: Transaction = Transaction::from( + TransactionV1Builder::new_transfer( + transfer_amount + 100, + None, + BOB_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_initiator_addr(CHARLIE_PUBLIC_KEY.clone()) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn.sign(&ALICE_SECRET_KEY); + let hash = txn.hash(); + test_scenario.run(vec![txn]).await.unwrap(); + test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed. + let expected_transfer_cost = test_scenario.mint_const_transfer_cost() as u64; + let expected_transfer_gas: U512 = expected_transfer_cost.into(); + test_scenario + .assert(ExecResultCost::new( + hash, + expected_transfer_gas, + Gas::new(expected_transfer_gas), + )) + .await; + // Even though the transaction failed, a hold must still be in place for the transfer cost. + // The hold will show up in "available" being smaller than "total" + test_scenario + .assert(PublicKeyBalanceChange::new( + CHARLIE_PUBLIC_KEY.clone(), + transfer_amount as i64, + (transfer_amount - expected_transfer_cost) as i64, + )) + .await; +} + +#[tokio::test] +async fn should_cancel_refund_for_erroneous_wasm() { + // as a punitive measure, refunds are not issued for erroneous wasms even + // if refunds are turned on. + + let mut rng = TestRng::new(); + let refund_ratio = Ratio::new(1, 2); + let builder = TestScenarioBuilder::new() + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + let mut test_scenario = builder.build(&mut rng).await; + let chain_name = test_scenario.chain_name(); + test_scenario.setup().await.unwrap(); + let mut txn = build_wasm_transction( + chain_name, + &BOB_SECRET_KEY, + PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }, + ); + txn.sign(&BOB_SECRET_KEY); + let hash = txn.hash(); + test_scenario.run(vec![txn]).await.unwrap(); + test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed. + let expected_transaction_cost = 1_000_000_000_000_u64; // transaction gas limit for large wasms lane + test_scenario + .assert(ExecResultCost::new( + hash, + expected_transaction_cost.into(), + Gas::new(0), + )) + .await; + + test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed. + // Bob gets no refund because the wasm errored + test_scenario + .assert(PublicKeyBalanceChange::new( + BOB_PUBLIC_KEY.clone(), + -(expected_transaction_cost as i64), + -(expected_transaction_cost as i64), + )) + .await; + + // Alice should get the all the fee since it's set to pay to proposer + // AND Bob didn't get a refund + test_scenario + .assert(PublicKeyBalanceChange::new( + ALICE_PUBLIC_KEY.clone(), + expected_transaction_cost as i64, + expected_transaction_cost as i64, + )) + .await; +} + +#[tokio::test] +async fn should_not_refund_erroneous_wasm_burn_fixed() { + let mut rng = TestRng::new(); + let refund_ratio = Ratio::new(1, 2); + let builder = TestScenarioBuilder::new() + .with_refund_handling(RefundHandling::Burn { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer) + .with_minimum_era_height(5) // make the era longer so that the transaction doesn't land in the switch block. + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + let mut test_scenario = builder.build(&mut rng).await; + test_scenario.setup().await.unwrap(); + let gas_limit = test_scenario + .get_gas_limit_for_lane(LARGE_WASM_LANE_ID) // The wasm should fall in this lane + .unwrap(); + let txn = invalid_wasm_txn( + BOB_SECRET_KEY.clone(), + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + ); + let hash = txn.hash(); + + let exec_infos = test_scenario.run(vec![txn]).await.unwrap(); + + test_scenario.assert(TransactionFailure::new(hash)).await; // transaction should have failed. + test_scenario + .assert(ExecResultCost::new(hash, gas_limit.into(), Gas::new(0))) + .await; + // Supply shouldn't change (refund handling is burn, but the wasm was erroneous so we don't + // calulate refund) + test_scenario + .assert(TotalSupplyChange::new(0, exec_infos[0].block_height)) + .await; + // Bobs transaction was invalid. He should get NO refund. But also - + // since no refund is calculated nothing will be burned (despite + // RefundHandling::Burn - we don't calculate refunds for erroneous wasms) + test_scenario + .assert(PublicKeyBalanceChange::new( + BOB_PUBLIC_KEY.clone(), + -(gas_limit as i64), + -(gas_limit as i64), + )) + .await; + // Alice gets payed for executing the transaction since it's set to pay to proposer + test_scenario + .assert(PublicKeyBalanceChange::new( + ALICE_PUBLIC_KEY.clone(), + gas_limit as i64, + gas_limit as i64, + )) + .await; +} diff --git a/node/src/reactor/main_reactor/tests/transaction_scenario/asertions.rs b/node/src/reactor/main_reactor/tests/transaction_scenario/asertions.rs new file mode 100644 index 0000000000..363b93dbec --- /dev/null +++ b/node/src/reactor/main_reactor/tests/transaction_scenario/asertions.rs @@ -0,0 +1,206 @@ +use super::utils::{Assertion, TestStateSnapshot}; +use crate::reactor::main_reactor::tests::transactions::{ + assert_exec_result_cost, exec_result_is_success, BalanceAmount, +}; +use async_trait::async_trait; +use casper_types::{Gas, PublicKey, TransactionHash, U512}; +use once_cell::sync::Lazy; +use std::collections::BTreeMap; + +pub(crate) struct TransactionSuccessful { + hash: TransactionHash, +} + +impl TransactionSuccessful { + pub(crate) fn new(hash: TransactionHash) -> Self { + Self { hash } + } +} + +pub static ZERO_BALANCE_AMOUNT: Lazy = Lazy::new(BalanceAmount::zero); + +#[async_trait] +impl Assertion for TransactionSuccessful { + async fn assert(&self, snapshots_at_heights: BTreeMap) { + let current_state = snapshots_at_heights.last_key_value().unwrap().1; + assert!(current_state.exec_infos.contains_key(&self.hash)); + let exec_info = current_state.exec_infos.get(&self.hash).unwrap(); + assert!(exec_info.execution_result.is_some()); + let result = exec_info.execution_result.as_ref().unwrap(); + assert!(exec_result_is_success(result)); + } +} + +pub(crate) struct TransactionFailure { + hash: TransactionHash, +} + +impl TransactionFailure { + pub(crate) fn new(hash: TransactionHash) -> Self { + Self { hash } + } +} + +#[async_trait] +impl Assertion for TransactionFailure { + async fn assert(&self, snapshots_at_heights: BTreeMap) { + let current_state = snapshots_at_heights.last_key_value().unwrap().1; + assert!(current_state.exec_infos.contains_key(&self.hash)); + let exec_info = current_state.exec_infos.get(&self.hash).unwrap(); + assert!(exec_info.execution_result.is_some()); + let result = exec_info.execution_result.as_ref().unwrap(); + assert!(!exec_result_is_success(result)); + } +} + +pub(crate) struct ExecResultCost { + hash: TransactionHash, + expected_cost: U512, + expected_consumed_gas: Gas, +} + +impl ExecResultCost { + pub(crate) fn new( + hash: TransactionHash, + expected_cost: U512, + expected_consumed_gas: Gas, + ) -> Self { + Self { + hash, + expected_cost, + expected_consumed_gas, + } + } +} + +#[async_trait] +impl Assertion for ExecResultCost { + async fn assert(&self, snapshots_at_heights: BTreeMap) { + let current_state = snapshots_at_heights.last_key_value().unwrap().1; + assert!(current_state.exec_infos.contains_key(&self.hash)); + let exec_info = current_state.exec_infos.get(&self.hash).unwrap(); + assert!(exec_info.execution_result.is_some()); + let result = exec_info.execution_result.as_ref().unwrap(); + assert_exec_result_cost( + result.clone(), + self.expected_cost, + self.expected_consumed_gas, + "transfer_cost_fixed_price_no_fee_no_refund", + ); + } +} + +pub(crate) struct TotalSupplyChange { + //It's an signed integer since we can expect either an increase or decrease. + total_supply_change: i64, + at_block_height: u64, +} + +impl TotalSupplyChange { + pub(crate) fn new(total_supply_change: i64, at_block_height: u64) -> Self { + Self { + total_supply_change, + at_block_height, + } + } +} + +#[async_trait] +impl Assertion for TotalSupplyChange { + async fn assert(&self, snapshots_at_heights: BTreeMap) { + let before = snapshots_at_heights.get(&1).unwrap(); + let after = snapshots_at_heights.get(&self.at_block_height).unwrap(); + let before_total_supply = before.total_supply; + let got = after.total_supply; + let total_supply = self.total_supply_change; + let expected = if total_supply > 0 { + before_total_supply + .checked_add((total_supply.unsigned_abs()).into()) + .unwrap() + } else { + before_total_supply + .checked_sub((total_supply.unsigned_abs()).into()) + .unwrap() + }; + assert_eq!(expected, got); + } +} + +/// Assert that the account associated with the given public key has observed a change in balance. +/// Can assert on total and available balance. +pub(crate) struct PublicKeyBalanceChange { + /// public key of the account which needs to be queried + public_key: PublicKey, + //It's an signed integer since we can expect either an increase or decrease. + total_balance_change: i64, + //It's an signed integer since we can expect either an increase or decrease. + available_balance_change: i64, +} + +impl PublicKeyBalanceChange { + pub(crate) fn new( + public_key: PublicKey, + total_balance_change: i64, + available_balance_change: i64, + ) -> Self { + Self { + public_key, + total_balance_change, + available_balance_change, + } + } +} + +#[async_trait] +impl Assertion for PublicKeyBalanceChange { + async fn assert(&self, snapshots_at_heights: BTreeMap) { + let account_hash = self.public_key.to_account_hash(); + let before = snapshots_at_heights.get(&0).unwrap(); + let after = snapshots_at_heights.last_key_value().unwrap().1; + let before_balance = before + .balances + .get(&account_hash) + //There is a chance that the key we're asking for was not an account in + // genesis, if that's true we don't expect it to be at height 0. + .unwrap_or(&ZERO_BALANCE_AMOUNT); + let before_total = before_balance.total.as_u64(); + let before_available = before_balance.available.as_u64(); + let after_total = after.balances.get(&account_hash).unwrap().total.as_u64(); + let after_available = after + .balances + .get(&account_hash) + .unwrap() + .available + .as_u64(); + assert_eq!( + after_total as i64, + before_total as i64 + self.total_balance_change + ); + assert_eq!( + after_available as i64, + before_available as i64 + self.available_balance_change + ); + } +} + +pub(crate) struct PublicKeyTotalMeetsAvailable { + public_key: PublicKey, +} + +impl PublicKeyTotalMeetsAvailable { + pub(crate) fn new(public_key: PublicKey) -> Self { + Self { public_key } + } +} + +#[async_trait] +impl Assertion for PublicKeyTotalMeetsAvailable { + async fn assert(&self, snapshots_at_heights: BTreeMap) { + let account_hash = self.public_key.to_account_hash(); + let after = snapshots_at_heights.last_key_value().unwrap().1; + let balance = after.balances.get(&account_hash).unwrap(); + let after_total = balance.total; + let after_available = balance.available; + assert_eq!(after_total, after_available); + } +} diff --git a/node/src/reactor/main_reactor/tests/transaction_scenario/utils.rs b/node/src/reactor/main_reactor/tests/transaction_scenario/utils.rs new file mode 100644 index 0000000000..90450fd097 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/transaction_scenario/utils.rs @@ -0,0 +1,490 @@ +use async_trait::async_trait; +use casper_storage::{ + data_access_layer::{ + balance::BalanceHandling, + tagged_values::{TaggedValuesRequest, TaggedValuesResult, TaggedValuesSelection}, + BalanceRequest, BalanceResult, ProofHandling, TotalSupplyRequest, TotalSupplyResult, + }, + global_state::state::StateProvider, +}; +use casper_types::{ + account::AccountHash, bytesrepr::Bytes, testing::TestRng, EraId, ExecutionInfo, FeeHandling, + KeyTag, PricingHandling, PricingMode, PublicKey, RefundHandling, SecretKey, TimeDiff, + Transaction, TransactionHash, TransactionRuntimeParams, U512, +}; +use once_cell::sync::OnceCell; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; + +use crate::{ + reactor::main_reactor::tests::{ + configs_override::ConfigsOverride, + fixture::TestFixture, + transactions::{ + BalanceAmount, ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, BOB_SECRET_KEY, + }, + ERA_ONE, ONE_MIN, TEN_SECS, + }, + types::transaction::transaction_v1_builder::TransactionV1Builder, +}; + +pub(crate) struct TestStateSnapshot { + pub(crate) exec_infos: BTreeMap, + pub(crate) balances: BTreeMap, + pub(crate) total_supply: U512, +} + +/// This defines the condition +/// a network should achieve after setup and start +/// before we can proceed with transaction injection +#[derive(Clone, Debug)] +pub(crate) enum RunUntilCondition { + /// Runs the network until all nodes reach the given completed block height. + BlockHeight { block_height: u64, within: Duration }, + /// Runs the network until all nodes' consensus components reach the given era. + ConsensusInEra { era_id: EraId, within: Duration }, +} + +impl RunUntilCondition { + async fn run_until(&self, fixture: &mut TestFixture) -> Result<(), TestScenarioError> { + match self { + RunUntilCondition::BlockHeight { + block_height, + within, + } => { + fixture + .try_run_until_block_height(*block_height, *within) + .await + } + RunUntilCondition::ConsensusInEra { era_id, within } => { + fixture.try_until_consensus_in_era(*era_id, *within).await + } + } + .map_err(|_| TestScenarioError::NetworkDidNotStabilize) + } +} + +#[derive(Debug)] +pub(crate) enum TestScenarioError { + UnexpectedState, + NetworkDidNotStabilize, + CannotSetBeforeState, +} + +struct ScenarioDataInstance { + fixture: TestFixture, + block_height: u64, +} + +impl ScenarioDataInstance { + pub(crate) async fn inject_transaction(&mut self, txn: Transaction) { + self.fixture.inject_transaction(txn).await + } + + pub(crate) async fn run_until_executed_transaction( + &mut self, + txn_hash: &TransactionHash, + within: Duration, + ) { + self.fixture + .run_until_executed_transaction(txn_hash, within) + .await + } +} + +#[async_trait] +pub(crate) trait Assertion: Send + Sync { + async fn assert(&self, snapshots_at_heights: BTreeMap); +} + +#[derive(Debug, Clone, Eq, PartialEq)] +enum TestScenarioState { + PreSetup, + PreRun, + Running, +} + +pub(crate) struct TestScenario { + state: TestScenarioState, + data: ScenarioDataInstance, + initial_run_until: RunUntilCondition, + exec_infos: BTreeMap, + state_before_test: OnceCell, +} + +impl TestScenario { + pub(crate) async fn setup(&mut self) -> Result<(), TestScenarioError> { + if self.state != TestScenarioState::PreSetup { + return Err(TestScenarioError::UnexpectedState); + } + self.run_until(self.initial_run_until.clone()).await?; + self.state_before_test + .set(self.get_current_state().await) + .map_err(|_| TestScenarioError::CannotSetBeforeState)?; + self.state = TestScenarioState::PreRun; + Ok(()) + } + + pub(crate) async fn run( + &mut self, + to_inject: Vec, + ) -> Result, TestScenarioError> { + if self.state == TestScenarioState::PreSetup { + return Err(TestScenarioError::UnexpectedState); + } + let mut to_ret = vec![]; + for transaction in &to_inject { + let hash = transaction.hash(); + self.data.inject_transaction(transaction.clone()).await; + self.data + .run_until_executed_transaction(&hash, TEN_SECS) + .await; + let (_node_id, runner) = self.data.fixture.network.nodes().iter().next().unwrap(); + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(hash) + .expect("Expected transaction to be included in a block."); + let transaction_block_height = exec_info.block_height; + if transaction_block_height > self.data.block_height { + self.data.block_height = transaction_block_height; + } + to_ret.push(exec_info.clone()); + self.exec_infos.insert(hash, exec_info); + } + self.state = TestScenarioState::Running; + Ok(to_ret) + } + + pub(crate) async fn run_until( + &mut self, + run_until: RunUntilCondition, + ) -> Result<(), TestScenarioError> { + run_until.run_until(&mut self.data.fixture).await + } + + pub(crate) fn chain_name(&self) -> String { + self.data.fixture.chainspec.network_config.name.clone() + } + + pub(crate) async fn assert(&mut self, assertion: T) { + if self.state_before_test.get().is_none() { + panic!("TestScenario not in state eligible to do assertions"); + } + let max_block_height = self.data.fixture.highest_complete_block().height(); + let mut snapshots = BTreeMap::new(); + for i in 0..=max_block_height { + snapshots.insert(i, self.get_state_at_height(i).await); + } + assertion.assert(snapshots).await + } + + async fn get_state_at_height(&self, block_height: u64) -> TestStateSnapshot { + let all_accounts = self.get_all_accounts(block_height).await; + let mut balances = BTreeMap::new(); + for account_hash in all_accounts { + let balance_amount = self.get_balance_amount(account_hash, block_height).await; + balances.insert(account_hash, balance_amount); + } + + let total_supply = self.get_total_supply(block_height).await; + let exec_infos: BTreeMap = self + .exec_infos + .iter() + .filter_map(|(k, v)| { + if v.block_height <= block_height { + Some((*k, v.clone())) + } else { + None + } + }) + .collect(); + + TestStateSnapshot { + exec_infos, + balances, + total_supply, + } + } + + async fn get_current_state(&self) -> TestStateSnapshot { + let block = self.data.fixture.highest_complete_block(); + let block_height = block.height(); + self.get_state_at_height(block_height).await + } + + async fn get_all_accounts(&self, block_height: u64) -> Vec { + let fixture = &self.data.fixture; + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + let request = + TaggedValuesRequest::new(state_hash, TaggedValuesSelection::All(KeyTag::Account)); + match runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .tagged_values(request) + { + TaggedValuesResult::Success { values, .. } => values + .iter() + .filter_map(|el| el.as_account().map(|el| el.account_hash())) + .collect(), + _ => panic!("Couldn't get all account hashes"), + } + } + + pub(crate) fn get_balance( + &self, + account_hash: AccountHash, + block_height: Option, + get_total: bool, + ) -> BalanceResult { + let fixture = &self.data.fixture; + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let protocol_version = fixture.chainspec.protocol_version(); + let block_height = block_height.unwrap_or( + runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"), + ); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + let balance_handling = if get_total { + BalanceHandling::Total + } else { + BalanceHandling::Available + }; + runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .balance(BalanceRequest::from_account_hash( + state_hash, + protocol_version, + account_hash, + balance_handling, + ProofHandling::NoProofs, + )) + } + + async fn get_balance_amount( + &self, + account_hash: AccountHash, + block_height: u64, + ) -> BalanceAmount { + let block_height = Some(block_height); + + let total = self + .get_balance(account_hash, block_height, true) + .total_balance() + .copied() + .unwrap_or(U512::zero()); + let available = self + .get_balance(account_hash, block_height, false) + .available_balance() + .copied() + .unwrap_or(U512::zero()); + BalanceAmount { available, total } + } + + async fn get_total_supply(&self, block_height: u64) -> U512 { + let fixture = &self.data.fixture; + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let protocol_version = fixture.chainspec.protocol_version(); + let state_hash = *runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap() + .state_root_hash(); + + let total_supply_req = TotalSupplyRequest::new(state_hash, protocol_version); + let result = runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .total_supply(total_supply_req); + + if let TotalSupplyResult::Success { total_supply } = result { + total_supply + } else { + panic!("Can't get total supply") + } + } + + pub(crate) fn mint_const_transfer_cost(&self) -> u32 { + self.data + .fixture + .chainspec + .system_costs_config + .mint_costs() + .transfer + } + + pub(crate) fn native_transfer_minimum_motes(&self) -> u64 { + self.data + .fixture + .chainspec + .transaction_config + .native_transfer_minimum_motes + } + + pub(crate) fn get_gas_limit_for_lane(&self, lane_id: u8) -> Option { + self.data + .fixture + .chainspec + .transaction_config + .transaction_v1_config + .get_lane_by_id(lane_id) + .map(|el| el.max_transaction_gas_limit) + } + + pub(crate) fn get_block_height(&self) -> u64 { + self.data.block_height + } +} + +type StakesType = Option<(Vec>, BTreeMap)>; + +#[derive(Default)] +pub(crate) struct TestScenarioBuilder { + maybe_stakes_setup: StakesType, + maybe_pricing_handling: Option, + maybe_initial_run_until: Option, + maybe_refund_handling: Option, + maybe_fee_handling: Option, + maybe_balance_hold_interval_override: Option, + maybe_minimum_era_height: Option, +} + +impl TestScenarioBuilder { + pub fn new() -> Self { + Self::default() + } + + pub async fn build(self, rng: &mut TestRng) -> TestScenario { + let TestScenarioBuilder { + maybe_stakes_setup, + maybe_pricing_handling, + maybe_initial_run_until, + maybe_refund_handling, + maybe_fee_handling, + maybe_balance_hold_interval_override, + maybe_minimum_era_height, + } = self; + let (secret_keys, stakes) = maybe_stakes_setup.unwrap_or({ + let stakes: BTreeMap = vec![ + (ALICE_PUBLIC_KEY.clone(), U512::from(u128::MAX)), /* Node 0 is effectively + * guaranteed to be the + * proposer. */ + (BOB_PUBLIC_KEY.clone(), U512::from(1)), + ] + .into_iter() + .collect(); + let secret_keys = vec![ALICE_SECRET_KEY.clone(), BOB_SECRET_KEY.clone()]; + (secret_keys, stakes) + }); + + let pricing_handling = maybe_pricing_handling.unwrap_or(PricingHandling::Fixed); + let initial_run_until = + maybe_initial_run_until.unwrap_or(RunUntilCondition::ConsensusInEra { + era_id: ERA_ONE, + within: ONE_MIN, + }); + let config = ConfigsOverride::default().with_pricing_handling(pricing_handling); + let config = if let Some(refund_handling) = maybe_refund_handling { + config.with_refund_handling(refund_handling) + } else { + config + }; + let config = if let Some(fee_handling) = maybe_fee_handling { + config.with_fee_handling(fee_handling) + } else { + config + }; + let config = + if let Some(balance_hold_interval_override) = maybe_balance_hold_interval_override { + config.with_balance_hold_interval(balance_hold_interval_override) + } else { + config + }; + let config = if let Some(minimum_era_height) = maybe_minimum_era_height { + config.with_minimum_era_height(minimum_era_height) + } else { + config + }; + let child_rng = rng.create_child(); + let fixture = + TestFixture::new_with_keys(child_rng, secret_keys, stakes, Some(config)).await; + let data = ScenarioDataInstance { + fixture, + block_height: 0_u64, + }; + + TestScenario { + state: TestScenarioState::PreSetup, + data, + initial_run_until, + exec_infos: BTreeMap::new(), + state_before_test: OnceCell::new(), + } + } + + /// Sets refund handling config option. + pub fn with_refund_handling(mut self, refund_handling: RefundHandling) -> Self { + self.maybe_refund_handling = Some(refund_handling); + self + } + + pub(crate) fn with_fee_handling(mut self, fee_handling: FeeHandling) -> Self { + self.maybe_fee_handling = Some(fee_handling); + self + } + + pub(crate) fn with_balance_hold_interval(mut self, balance_hold_interval: TimeDiff) -> Self { + self.maybe_balance_hold_interval_override = Some(balance_hold_interval); + self + } + + pub(crate) fn with_minimum_era_height(mut self, minimum_era_height: u64) -> Self { + self.maybe_minimum_era_height = Some(minimum_era_height); + self + } +} + +pub(super) fn build_wasm_transction( + chain_name: String, + from: &SecretKey, + pricing: PricingMode, +) -> Transaction { + //These bytes are intentionally so large - this way they fall into "WASM_LARGE" category in the + // local chainspec Alternatively we could change the chainspec to have a different limits + // for the wasm categories, but that would require aligning all tests that use local + // chainspec + let module_bytes = Bytes::from(vec![1; 172_033]); + Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(chain_name) + .with_pricing_mode(pricing) + .with_initiator_addr(PublicKey::from(from)) + .build() + .unwrap(), + ) +} diff --git a/node/src/reactor/main_reactor/tests/transactions.rs b/node/src/reactor/main_reactor/tests/transactions.rs new file mode 100644 index 0000000000..a958836db5 --- /dev/null +++ b/node/src/reactor/main_reactor/tests/transactions.rs @@ -0,0 +1,5447 @@ +use super::{fixture::TestFixture, *}; +use crate::{ + testing::LARGE_WASM_LANE_ID, + types::{transaction::calculate_transaction_lane_for_transaction, MetaTransaction}, +}; +use casper_storage::data_access_layer::{ + AddressableEntityRequest, BalanceIdentifier, BalanceIdentifierPurseRequest, + BalanceIdentifierPurseResult, ProofHandling, QueryRequest, QueryResult, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::NamedKeyAddr, + runtime_args, + system::mint::{ARG_AMOUNT, ARG_TARGET}, + AccessRights, AddressableEntity, Digest, EntityAddr, ExecutableDeployItem, ExecutionInfo, + TransactionRuntimeParams, URef, URefAddr, +}; +use once_cell::sync::Lazy; + +use crate::reactor::main_reactor::tests::{ + configs_override::ConfigsOverride, initial_stakes::InitialStakes, +}; +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + execution::ExecutionResultV1, +}; + +pub(crate) static ALICE_SECRET_KEY: Lazy> = Lazy::new(|| { + Arc::new(SecretKey::ed25519_from_bytes([0xAA; SecretKey::ED25519_LENGTH]).unwrap()) +}); +pub(crate) static ALICE_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*ALICE_SECRET_KEY.clone())); + +pub(crate) static BOB_SECRET_KEY: Lazy> = Lazy::new(|| { + Arc::new(SecretKey::ed25519_from_bytes([0xBB; SecretKey::ED25519_LENGTH]).unwrap()) +}); +pub(crate) static BOB_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*BOB_SECRET_KEY.clone())); + +pub(crate) static CHARLIE_SECRET_KEY: Lazy> = Lazy::new(|| { + Arc::new(SecretKey::ed25519_from_bytes([0xCC; SecretKey::ED25519_LENGTH]).unwrap()) +}); +pub(crate) static CHARLIE_PUBLIC_KEY: Lazy = + Lazy::new(|| PublicKey::from(&*CHARLIE_SECRET_KEY.clone())); + +// The amount of gas it takes to execute the generated do_nothing.wasm. +// Passing this around as a constant is brittle and should be replaced +// with a more sustainable solution in the future. +const DO_NOTHING_WASM_EXECUTION_GAS: u64 = 117720_u64; +pub(crate) const MIN_GAS_PRICE: u8 = 1; +const CHAIN_NAME: &str = "single-transaction-test-net"; + +struct SingleTransactionTestCase { + fixture: TestFixture, + alice_public_key: PublicKey, + bob_public_key: PublicKey, + charlie_public_key: PublicKey, +} + +#[derive(Debug, PartialEq)] +pub(crate) struct BalanceAmount { + pub(crate) available: U512, + pub(crate) total: U512, +} + +impl BalanceAmount { + pub(crate) fn zero() -> Self { + Self { + available: U512::zero(), + total: U512::zero(), + } + } +} + +impl SingleTransactionTestCase { + fn default_test_config() -> ConfigsOverride { + ConfigsOverride::default() + .with_minimum_era_height(5) // make the era longer so that the transaction doesn't land in the switch block. + .with_balance_hold_interval(TimeDiff::from_seconds(5)) + .with_chain_name("single-transaction-test-net".to_string()) + } + + async fn new( + alice_secret_key: Arc, + bob_secret_key: Arc, + charlie_secret_key: Arc, + network_config: Option, + ) -> Self { + let rng = TestRng::new(); + + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_public_key = PublicKey::from(&*bob_secret_key); + let charlie_public_key = PublicKey::from(&*charlie_secret_key); + + let stakes = vec![ + (alice_public_key.clone(), U512::from(u128::MAX)), /* Node 0 is effectively + * guaranteed to be the + * proposer. */ + (bob_public_key.clone(), U512::from(1)), + ] + .into_iter() + .collect(); + + let fixture = TestFixture::new_with_keys( + rng, + vec![alice_secret_key.clone(), bob_secret_key.clone()], + stakes, + network_config, + ) + .await; + Self { + fixture, + alice_public_key, + bob_public_key, + charlie_public_key, + } + } + + fn chainspec(&self) -> &Chainspec { + &self.fixture.chainspec + } + + fn get_balances( + &mut self, + block_height: Option, + ) -> (BalanceAmount, BalanceAmount, Option) { + let alice_total_balance = + *get_balance(&self.fixture, &self.alice_public_key, block_height, true) + .total_balance() + .expect("Expected Alice to have a balance."); + let bob_total_balance = + *get_balance(&self.fixture, &self.bob_public_key, block_height, true) + .total_balance() + .expect("Expected Bob to have a balance."); + + let alice_available_balance = + *get_balance(&self.fixture, &self.alice_public_key, block_height, false) + .available_balance() + .expect("Expected Alice to have a balance."); + let bob_available_balance = + *get_balance(&self.fixture, &self.bob_public_key, block_height, false) + .available_balance() + .expect("Expected Bob to have a balance."); + + let charlie_available_balance = + get_balance(&self.fixture, &self.charlie_public_key, block_height, false) + .available_balance() + .copied(); + + let charlie_total_balance = + get_balance(&self.fixture, &self.charlie_public_key, block_height, true) + .available_balance() + .copied(); + + let charlie_amount = charlie_available_balance.map(|avail_balance| BalanceAmount { + available: avail_balance, + total: charlie_total_balance.unwrap(), + }); + + ( + BalanceAmount { + available: alice_available_balance, + total: alice_total_balance, + }, + BalanceAmount { + available: bob_available_balance, + total: bob_total_balance, + }, + charlie_amount, + ) + } + + async fn send_transaction( + &mut self, + txn: Transaction, + ) -> (TransactionHash, u64, ExecutionResult) { + let txn_hash = txn.hash(); + + self.fixture.inject_transaction(txn).await; + self.fixture + .run_until_executed_transaction(&txn_hash, Duration::from_secs(30)) + .await; + + let (_node_id, runner) = self.fixture.network.nodes().iter().next().unwrap(); + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(txn_hash) + .expect("Expected transaction to be included in a block."); + + ( + txn_hash, + exec_info.block_height, + exec_info + .execution_result + .expect("Exec result should have been stored."), + ) + } + + fn get_total_supply(&mut self, block_height: Option) -> U512 { + let (_node_id, runner) = self.fixture.network.nodes().iter().next().unwrap(); + let protocol_version = self.fixture.chainspec.protocol_version(); + let height = block_height.unwrap_or( + runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"), + ); + let state_hash = *runner + .main_reactor() + .storage() + .read_block_header_by_height(height, true) + .expect("failure to read block header") + .unwrap() + .state_root_hash(); + + let total_supply_req = TotalSupplyRequest::new(state_hash, protocol_version); + let result = runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .total_supply(total_supply_req); + + if let TotalSupplyResult::Success { total_supply } = result { + total_supply + } else { + panic!("Can't get total supply") + } + } + + fn get_accumulate_purse_balance( + &mut self, + block_height: Option, + get_total: bool, + ) -> BalanceResult { + let (_node_id, runner) = self.fixture.network.nodes().iter().next().unwrap(); + let protocol_version = self.fixture.chainspec.protocol_version(); + let block_height = block_height.unwrap_or( + runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"), + ); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + let balance_handling = if get_total { + BalanceHandling::Total + } else { + BalanceHandling::Available + }; + runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .balance(BalanceRequest::new( + state_hash, + protocol_version, + BalanceIdentifier::Accumulate, + balance_handling, + ProofHandling::NoProofs, + )) + } +} + +async fn transfer_to_account>( + fixture: &mut TestFixture, + amount: A, + from: &SecretKey, + to: PublicKey, + pricing: PricingMode, + transfer_id: Option, +) -> (TransactionHash, u64, ExecutionResult) { + let chain_name = fixture.chainspec.network_config.name.clone(); + + let mut txn = Transaction::from( + TransactionV1Builder::new_transfer(amount, None, to, transfer_id) + .unwrap() + .with_initiator_addr(PublicKey::from(from)) + .with_pricing_mode(pricing) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + + txn.sign(from); + let txn_hash = txn.hash(); + + fixture.inject_transaction(txn).await; + + info!("transfer_to_account starting run_until_executed_transaction"); + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + info!("transfer_to_account finished run_until_executed_transaction"); + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(txn_hash) + .expect("Expected transaction to be included in a block."); + + ( + txn_hash, + exec_info.block_height, + exec_info + .execution_result + .expect("Exec result should have been stored."), + ) +} + +async fn send_add_bid>( + fixture: &mut TestFixture, + amount: A, + signing_key: &SecretKey, + pricing: PricingMode, +) -> (TransactionHash, u64, ExecutionResult) { + let chain_name = fixture.chainspec.network_config.name.clone(); + let public_key = PublicKey::from(signing_key); + + let mut txn = Transaction::from( + TransactionV1Builder::new_add_bid(public_key.clone(), 10, amount, None, None, None) + .unwrap() + .with_initiator_addr(public_key) + .with_pricing_mode(pricing) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + + txn.sign(signing_key); + let txn_hash = txn.hash(); + + fixture.inject_transaction(txn).await; + + info!("transfer_to_account starting run_until_executed_transaction"); + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + info!("transfer_to_account finished run_until_executed_transaction"); + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(txn_hash) + .expect("Expected transaction to be included in a block."); + + ( + txn_hash, + exec_info.block_height, + exec_info + .execution_result + .expect("Exec result should have been stored."), + ) +} + +async fn send_wasm_transaction( + fixture: &mut TestFixture, + from: &SecretKey, + pricing: PricingMode, +) -> (TransactionHash, u64, ExecutionResult) { + let chain_name = fixture.chainspec.network_config.name.clone(); + + //These bytes are intentionally so large - this way they fall into "WASM_LARGE" category in the + // local chainspec Alternatively we could change the chainspec to have a different limits + // for the wasm categories, but that would require aligning all tests that use local + // chainspec + let module_bytes = Bytes::from(vec![1; 172_033]); + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(chain_name) + .with_pricing_mode(pricing) + .with_initiator_addr(PublicKey::from(from)) + .build() + .unwrap(), + ); + + txn.sign(from); + let txn_hash = txn.hash(); + + fixture.inject_transaction(txn).await; + fixture + .run_until_executed_transaction(&txn_hash, TEN_SECS) + .await; + + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let exec_info = runner + .main_reactor() + .storage() + .read_execution_info(txn_hash) + .expect("Expected transaction to be included in a block."); + + ( + txn_hash, + exec_info.block_height, + exec_info + .execution_result + .expect("Exec result should have been stored."), + ) +} + +fn get_main_purse(fixture: &mut TestFixture, account_key: &PublicKey) -> Result { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let block_height = runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + let protocol_version = fixture.chainspec.protocol_version(); + let identifier = BalanceIdentifier::Account(account_key.to_account_hash()); + let request = BalanceIdentifierPurseRequest::new(state_hash, protocol_version, identifier); + match runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .balance_purse(request) + { + BalanceIdentifierPurseResult::Success { purse_addr } => Ok(purse_addr), + BalanceIdentifierPurseResult::RootNotFound | BalanceIdentifierPurseResult::Failure(_) => { + Err(()) + } + } +} + +pub(crate) fn get_balance( + fixture: &TestFixture, + account_key: &PublicKey, + block_height: Option, + get_total: bool, +) -> BalanceResult { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let protocol_version = fixture.chainspec.protocol_version(); + let block_height = block_height.unwrap_or( + runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"), + ); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + let balance_handling = if get_total { + BalanceHandling::Total + } else { + BalanceHandling::Available + }; + runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .balance(BalanceRequest::from_public_key( + state_hash, + protocol_version, + account_key.clone(), + balance_handling, + ProofHandling::NoProofs, + )) +} + +fn get_bids(fixture: &mut TestFixture, block_height: Option) -> Option> { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let block_height = block_height.unwrap_or( + runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"), + ); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + + runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .bids(BidsRequest::new(state_hash)) + .into_option() +} + +fn get_payment_purse_balance( + fixture: &mut TestFixture, + block_height: Option, +) -> BalanceResult { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let protocol_version = fixture.chainspec.protocol_version(); + let block_height = block_height.unwrap_or( + runner + .main_reactor() + .storage() + .highest_complete_block_height() + .expect("missing highest completed block"), + ); + let block_header = runner + .main_reactor() + .storage() + .read_block_header_by_height(block_height, true) + .expect("failure to read block header") + .unwrap(); + let state_hash = *block_header.state_root_hash(); + runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .balance(BalanceRequest::new( + state_hash, + protocol_version, + BalanceIdentifier::Payment, + BalanceHandling::Available, + ProofHandling::NoProofs, + )) +} + +fn get_entity_addr_from_account_hash( + fixture: &mut TestFixture, + state_root_hash: Digest, + account_hash: AccountHash, +) -> EntityAddr { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let result = match runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .query(QueryRequest::new( + state_root_hash, + Key::Account(account_hash), + vec![], + )) { + QueryResult::Success { value, .. } => value, + err => panic!("Expected QueryResult::Success but got {:?}", err), + }; + + let key = if fixture.chainspec.core_config.enable_addressable_entity { + result + .as_cl_value() + .expect("should have a CLValue") + .to_t::() + .expect("should have a Key") + } else { + result.as_account().expect("must have account"); + Key::Account(account_hash) + }; + + match key { + Key::Account(account_has) => EntityAddr::Account(account_has.value()), + Key::Hash(hash) => EntityAddr::SmartContract(hash), + Key::AddressableEntity(addr) => addr, + _ => panic!("unexpected key"), + } +} + +fn get_entity( + fixture: &mut TestFixture, + state_root_hash: Digest, + entity_addr: EntityAddr, +) -> AddressableEntity { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let (key, is_contract) = if fixture.chainspec.core_config.enable_addressable_entity { + (Key::AddressableEntity(entity_addr), false) + } else { + match entity_addr { + EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => (Key::Hash(hash), true), + EntityAddr::Account(hash) => (Key::Account(AccountHash::new(hash)), false), + } + }; + + let result = match runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .query(QueryRequest::new(state_root_hash, key, vec![])) + { + QueryResult::Success { value, .. } => value, + err => panic!("Expected QueryResult::Success but got {:?}", err), + }; + + if fixture.chainspec.core_config.enable_addressable_entity { + result + .into_addressable_entity() + .expect("should have an AddressableEntity") + } else if is_contract { + AddressableEntity::from(result.as_contract().expect("must have contract").clone()) + } else { + AddressableEntity::from(result.as_account().expect("must have account").clone()) + } +} + +fn get_entity_named_key( + fixture: &mut TestFixture, + state_root_hash: Digest, + entity_addr: EntityAddr, + named_key: &str, +) -> Option { + if fixture.chainspec.core_config.enable_addressable_entity { + let key = Key::NamedKey( + NamedKeyAddr::new_from_string(entity_addr, named_key.to_owned()) + .expect("should be valid NamedKeyAddr"), + ); + + match query_global_state(fixture, state_root_hash, key) { + Some(val) => match &*val { + StoredValue::NamedKey(named_key) => { + Some(named_key.get_key().expect("should have a Key")) + } + value => panic!("Expected NamedKey but got {:?}", value), + }, + None => None, + } + } else { + match entity_addr { + EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => { + match query_global_state(fixture, state_root_hash, Key::Hash(hash)) { + Some(val) => match &*val { + StoredValue::Contract(contract) => { + contract.named_keys().get(named_key).copied() + } + value => panic!("Expected Contract but got {:?}", value), + }, + None => None, + } + } + EntityAddr::Account(hash) => { + match query_global_state( + fixture, + state_root_hash, + Key::Account(AccountHash::new(hash)), + ) { + Some(val) => match &*val { + StoredValue::Account(account) => { + account.named_keys().get(named_key).copied() + } + value => panic!("Expected Account but got {:?}", value), + }, + None => None, + } + } + } + } +} + +fn query_global_state( + fixture: &mut TestFixture, + state_root_hash: Digest, + key: Key, +) -> Option> { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + match runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .query(QueryRequest::new(state_root_hash, key, vec![])) + { + QueryResult::Success { value, .. } => Some(value), + _err => None, + } +} + +fn get_entity_by_account_hash( + fixture: &mut TestFixture, + state_root_hash: Digest, + account_hash: AccountHash, +) -> AddressableEntity { + let (_node_id, runner) = fixture.network.nodes().iter().next().unwrap(); + let key = if fixture.chainspec.core_config.enable_addressable_entity { + Key::AddressableEntity(EntityAddr::Account(account_hash.value())) + } else { + Key::Account(account_hash) + }; + runner + .main_reactor() + .contract_runtime() + .data_access_layer() + .addressable_entity(AddressableEntityRequest::new(state_root_hash, key)) + .into_option() + .unwrap_or_else(|| { + panic!( + "Expected to find an entity: root_hash {:?}, account hash {:?}", + state_root_hash, account_hash + ) + }) +} + +pub(crate) fn assert_exec_result_cost( + exec_result: ExecutionResult, + expected_cost: U512, + expected_consumed_gas: Gas, + msg: &str, +) { + match exec_result { + ExecutionResult::V2(exec_result_v2) => { + assert_eq!(exec_result_v2.cost, expected_cost, "{} cost", msg); + assert_eq!( + exec_result_v2.consumed, expected_consumed_gas, + "{} consumed", + msg + ); + } + _ => { + panic!("Unexpected exec result version.") + } + } +} + +// Returns `true` is the execution result is a success. +pub fn exec_result_is_success(exec_result: &ExecutionResult) -> bool { + match exec_result { + ExecutionResult::V2(execution_result_v2) => execution_result_v2.error_message.is_none(), + ExecutionResult::V1(ExecutionResultV1::Success { .. }) => true, + ExecutionResult::V1(ExecutionResultV1::Failure { .. }) => false, + } +} + +#[tokio::test] +async fn should_accept_transfer_without_id() { + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + + let config = ConfigsOverride::default().with_pricing_handling(PricingHandling::Fixed); + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + let transfer_amount = fixture + .chainspec + .transaction_config + .native_transfer_minimum_motes + + 100; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let (_, _, result) = transfer_to_account( + &mut fixture, + transfer_amount, + &alice_secret_key, + PublicKey::from(&*charlie_secret_key), + PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }, + None, + ) + .await; + + assert!(exec_result_is_success(&result)) +} + +#[tokio::test] +async fn should_native_transfer_nofee_norefund_fixed() { + const TRANSFER_AMOUNT: u64 = 30_000_000_000; + + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + + let config = ConfigsOverride::default() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + let charlie_public_key = PublicKey::from(&*charlie_secret_key); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true) + .available_balance() + .expect("Expected Alice to have a balance."); + + let (_txn_hash, block_height, exec_result) = transfer_to_account( + &mut fixture, + TRANSFER_AMOUNT, + &alice_secret_key, + PublicKey::from(&*charlie_secret_key), + PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }, + Some(0xDEADBEEF), + ) + .await; + + let expected_transfer_gas = fixture + .chainspec + .system_costs_config + .mint_costs() + .transfer + .into(); + let expected_transfer_cost = expected_transfer_gas; // since we set gas_price_tolerance to 1. + + assert_exec_result_cost( + exec_result, + expected_transfer_cost, + Gas::new(expected_transfer_gas), + "transfer_cost_fixed_price_no_fee_no_refund", + ); + + let alice_available_balance = + get_balance(&fixture, &alice_public_key, Some(block_height), false); + let alice_total_balance = get_balance(&fixture, &alice_public_key, Some(block_height), true); + + // since FeeHandling is set to NoFee, we expect that there's a hold on Alice's balance for the + // cost of the transfer. The total balance of Alice now should be the initial balance - the + // amount transferred to Charlie. + let alice_expected_total_balance = alice_initial_balance - TRANSFER_AMOUNT; + // The available balance is the initial balance - the amount transferred to Charlie - the hold + // for the transfer cost. + let alice_expected_available_balance = alice_expected_total_balance - expected_transfer_cost; + + assert_eq!( + alice_total_balance + .available_balance() + .expect("Expected Alice to have a balance") + .clone(), + alice_expected_total_balance + ); + assert_eq!( + alice_available_balance + .available_balance() + .expect("Expected Alice to have a balance") + .clone(), + alice_expected_available_balance + ); + + let charlie_balance = get_balance(&fixture, &charlie_public_key, Some(block_height), false); + assert_eq!( + charlie_balance + .available_balance() + .expect("Expected Charlie to have a balance") + .clone(), + TRANSFER_AMOUNT.into() + ); + + // Check if the hold is released. + let hold_release_block_height = block_height + 8; // Block time is 1s. + fixture + .run_until_block_height(hold_release_block_height, ONE_MIN) + .await; + + let alice_available_balance = get_balance( + &fixture, + &alice_public_key, + Some(hold_release_block_height), + false, + ); + let alice_total_balance = get_balance( + &fixture, + &alice_public_key, + Some(hold_release_block_height), + true, + ); + + assert_eq!( + alice_available_balance.available_balance(), + alice_total_balance.available_balance() + ); +} + +#[tokio::test] +async fn erroneous_native_transfer_nofee_norefund_fixed() { + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + + let config = ConfigsOverride::default() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + let charlie_public_key = PublicKey::from(&*charlie_secret_key); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let transfer_amount = fixture + .chainspec + .transaction_config + .native_transfer_minimum_motes + + 100; + + // Transfer some token to Charlie. + let (_txn_hash, _block, exec_result) = transfer_to_account( + &mut fixture, + transfer_amount, + &alice_secret_key, + PublicKey::from(&*charlie_secret_key), + PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }, + None, + ) + .await; + assert!(exec_result_is_success(&exec_result)); + + // Attempt to transfer more than Charlie has to Bob. + let bob_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + let (_txn_hash, block_height, exec_result) = transfer_to_account( + &mut fixture, + transfer_amount + 100, + &charlie_secret_key, + PublicKey::from(&*bob_secret_key), + PricingMode::Fixed { + gas_price_tolerance: 1, + additional_computation_factor: 0, + }, + None, + ) + .await; + assert!(!exec_result_is_success(&exec_result)); // transaction should have failed. + + let expected_transfer_gas = fixture + .chainspec + .system_costs_config + .mint_costs() + .transfer + .into(); + let expected_transfer_cost = expected_transfer_gas; // since we set gas_price_tolerance to 1. + + assert_exec_result_cost( + exec_result, + expected_transfer_cost, + Gas::new(expected_transfer_gas), + "failed_transfer_cost_fixed_price_no_fee_no_refund", + ); + + // Even though the transaction failed, a hold must still be in place for the transfer cost. + let charlie_available_balance = + get_balance(&fixture, &charlie_public_key, Some(block_height), false); + assert_eq!( + charlie_available_balance + .available_balance() + .expect("Expected Charlie to have a balance") + .clone(), + U512::from(transfer_amount) - expected_transfer_cost + ); +} + +#[tokio::test] +async fn should_native_transfer_nofee_norefund_payment_limited() { + const TRANSFER_AMOUNT: u64 = 30_000_000_000; + + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + let charlie_public_key = PublicKey::from(&*charlie_secret_key); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true) + .available_balance() + .expect("Expected Alice to have a balance."); + + const TRANSFER_PAYMENT: u64 = 100_000_000; + + // This transaction should be included since the tolerance is above the min gas price. + let (_txn_hash, block_height, exec_result) = transfer_to_account( + &mut fixture, + TRANSFER_AMOUNT, + &alice_secret_key, + PublicKey::from(&*charlie_secret_key), + PricingMode::PaymentLimited { + payment_amount: TRANSFER_PAYMENT, + gas_price_tolerance: MIN_GAS_PRICE + 1, + standard_payment: true, + }, + None, + ) + .await; + + let expected_transfer_cost = TRANSFER_PAYMENT * MIN_GAS_PRICE as u64; + + assert!(exec_result_is_success(&exec_result)); // transaction should have succeeded. + assert_exec_result_cost( + exec_result, + expected_transfer_cost.into(), + Gas::new(TRANSFER_PAYMENT), + "transfer_cost_payment_limited_price_no_fee_no_refund", + ); + + let alice_available_balance = + get_balance(&fixture, &alice_public_key, Some(block_height), false); + let alice_total_balance = get_balance(&fixture, &alice_public_key, Some(block_height), true); + + // since FeeHandling is set to NoFee, we expect that there's a hold on Alice's balance for the + // cost of the transfer. The total balance of Alice now should be the initial balance - the + // amount transferred to Charlie. + let alice_expected_total_balance = alice_initial_balance - TRANSFER_AMOUNT; + // The available balance is the initial balance - the amount transferred to Charlie - the hold + // for the transfer cost. + let alice_expected_available_balance = alice_expected_total_balance - expected_transfer_cost; + + assert_eq!( + alice_total_balance + .available_balance() + .expect("Expected Alice to have a balance") + .clone(), + alice_expected_total_balance + ); + assert_eq!( + alice_available_balance + .available_balance() + .expect("Expected Alice to have a balance") + .clone(), + alice_expected_available_balance + ); + + let charlie_balance = get_balance(&fixture, &charlie_public_key, Some(block_height), false); + assert_eq!( + charlie_balance + .available_balance() + .expect("Expected Charlie to have a balance") + .clone(), + TRANSFER_AMOUNT.into() + ); + + // Check if the hold is released. + let hold_release_block_height = block_height + 8; // Block time is 1s. + fixture + .run_until_block_height(hold_release_block_height, ONE_MIN) + .await; + + let alice_available_balance = get_balance( + &fixture, + &alice_public_key, + Some(hold_release_block_height), + false, + ); + let alice_total_balance = get_balance( + &fixture, + &alice_public_key, + Some(hold_release_block_height), + true, + ); + + assert_eq!( + alice_available_balance.available_balance(), + alice_total_balance.available_balance() + ); +} + +#[tokio::test] +async fn should_native_auction_with_nofee_norefund_payment_limited() { + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true) + .available_balance() + .expect("Expected Alice to have a balance."); + + const BID_PAYMENT_AMOUNT: u64 = 2_500_000_000; + + let bid_amount = fixture.chainspec.core_config.minimum_bid_amount + 1; + // This transaction should be included since the tolerance is above the min gas price. + let (_txn_hash, block_height, exec_result) = send_add_bid( + &mut fixture, + bid_amount, + &alice_secret_key, + PricingMode::PaymentLimited { + payment_amount: BID_PAYMENT_AMOUNT, + gas_price_tolerance: MIN_GAS_PRICE + 1, + standard_payment: true, + }, + ) + .await; + + let expected_add_bid_consumed = fixture + .chainspec + .system_costs_config + .auction_costs() + .add_bid; + let expected_add_bid_cost = expected_add_bid_consumed * MIN_GAS_PRICE as u64; + + assert!(exec_result_is_success(&exec_result)); // transaction should have succeeded. + + let transfers = exec_result.transfers(); + assert!(!transfers.is_empty(), "transfers should not be empty"); + assert_eq!(transfers.len(), 1, "transfers should have 1 entry"); + let transfer = transfers.first().expect("transfer entry should exist"); + let transfer_amount = transfer.amount(); + assert_eq!( + transfer_amount, + U512::from(bid_amount), + "transfer amount should match the bid amount" + ); + + assert_exec_result_cost( + exec_result, + expected_add_bid_cost.into(), + expected_add_bid_consumed.into(), + "add_bid_with_classic_pricing_no_fee_no_refund", + ); + + let alice_available_balance = + get_balance(&fixture, &alice_public_key, Some(block_height), false); + let alice_total_balance = get_balance(&fixture, &alice_public_key, Some(block_height), true); + + // since FeeHandling is set to NoFee, we expect that there's a hold on Alice's balance for the + // cost of the transfer. The total balance of Alice now should be the initial balance - the + // amount transferred to Charlie. + let alice_expected_total_balance = alice_initial_balance - bid_amount; + // The available balance is the initial balance - the amount transferred to Charlie - the hold + // for the transfer cost. + let alice_expected_available_balance = alice_expected_total_balance - expected_add_bid_cost; + + assert_eq!( + alice_total_balance + .available_balance() + .expect("Expected Alice to have a balance") + .clone(), + alice_expected_total_balance + ); + assert_eq!( + alice_available_balance + .available_balance() + .expect("Expected Alice to have a balance") + .clone(), + alice_expected_available_balance + ); +} + +#[tokio::test] +#[should_panic = "within 10 seconds"] +async fn should_reject_threshold_below_min_gas_price() { + const TRANSFER_AMOUNT: u64 = 30_000_000_000; + + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + // This transaction should NOT be included since the tolerance is below the min gas price. + let (_, _, _) = transfer_to_account( + &mut fixture, + TRANSFER_AMOUNT, + &alice_secret_key, + PublicKey::from(&*charlie_secret_key), + PricingMode::PaymentLimited { + payment_amount: 1000, + gas_price_tolerance: MIN_GAS_PRICE - 1, + standard_payment: true, + }, + None, + ) + .await; +} + +#[tokio::test] +async fn should_not_overcharge_native_operations_fixed() { + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); // Node 0 is effectively guaranteed to be the proposer. + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(1, 2), + }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_secret_key = Arc::clone(&fixture.node_contexts[1].secret_key); + let bob_public_key = PublicKey::from(&*bob_secret_key); + let charlie_secret_key = Arc::new(SecretKey::random(&mut fixture.rng)); + let charlie_public_key = PublicKey::from(&*charlie_secret_key); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let bob_initial_balance = *get_balance(&fixture, &bob_public_key, None, true) + .total_balance() + .expect("Expected Bob to have a balance."); + let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true) + .total_balance() + .expect("Expected Alice to have a balance."); + + let transfer_amount = fixture + .chainspec + .transaction_config + .native_transfer_minimum_motes + + 100; + + let (_txn_hash, block_height, exec_result) = transfer_to_account( + &mut fixture, + transfer_amount, + &bob_secret_key, + PublicKey::from(&*charlie_secret_key), + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + None, + ) + .await; + + assert!(exec_result_is_success(&exec_result)); // transaction should have succeeded. + + let expected_transfer_gas: u64 = fixture + .chainspec + .system_costs_config + .mint_costs() + .transfer + .into(); + let expected_transfer_cost = expected_transfer_gas * MIN_GAS_PRICE as u64; + assert_exec_result_cost( + exec_result, + expected_transfer_cost.into(), + expected_transfer_gas.into(), + "cost should equal consumed", + ); + + let bob_available_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), false) + .available_balance() + .expect("Expected Bob to have a balance"); + let bob_total_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), true) + .total_balance() + .expect("Expected Bob to have a balance"); + + let alice_available_balance = + *get_balance(&fixture, &alice_public_key, Some(block_height), false) + .available_balance() + .expect("Expected Alice to have a balance"); + let alice_total_balance = *get_balance(&fixture, &alice_public_key, Some(block_height), true) + .total_balance() + .expect("Expected Alice to have a balance"); + + // Bob shouldn't get a refund since there is no refund for native transfers. + let bob_expected_total_balance = bob_initial_balance - transfer_amount - expected_transfer_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get the full fee since there is no refund for native transfers. + let alice_expected_total_balance = alice_initial_balance + expected_transfer_cost; + let alice_expected_available_balance = alice_expected_total_balance; + + let charlie_balance = *get_balance(&fixture, &charlie_public_key, Some(block_height), false) + .available_balance() + .expect("Expected Charlie to have a balance"); + assert_eq!(charlie_balance.clone(), transfer_amount.into()); + + assert_eq!( + bob_available_balance.clone(), + bob_expected_available_balance + ); + + assert_eq!(bob_total_balance.clone(), bob_expected_total_balance); + + assert_eq!( + alice_available_balance.clone(), + alice_expected_available_balance + ); + + assert_eq!(alice_total_balance.clone(), alice_expected_total_balance); +} + +#[tokio::test] +async fn should_cancel_refund_for_erroneous_wasm() { + // as a punitive measure, refunds are not issued for erroneous wasms even + // if refunds are turned on. + + let initial_stakes = InitialStakes::FromVec(vec![u128::MAX, 1]); // Node 0 is effectively guaranteed to be the proposer. + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut fixture = TestFixture::new(initial_stakes, Some(config)).await; + + let alice_secret_key = Arc::clone(&fixture.node_contexts[0].secret_key); + let alice_public_key = PublicKey::from(&*alice_secret_key); + let bob_secret_key = Arc::clone(&fixture.node_contexts[1].secret_key); + let bob_public_key = PublicKey::from(&*bob_secret_key); + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let bob_initial_balance = *get_balance(&fixture, &bob_public_key, None, true) + .total_balance() + .expect("Expected Bob to have a balance."); + let alice_initial_balance = *get_balance(&fixture, &alice_public_key, None, true) + .total_balance() + .expect("Expected Alice to have a balance."); + + let (_txn_hash, block_height, exec_result) = send_wasm_transaction( + &mut fixture, + &bob_secret_key, + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + ) + .await; + + assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid. + + let expected_transaction_gas: u64 = fixture + .chainspec + .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID); + let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64; + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(0), + "wasm_transaction_fees_are_refunded", + ); + + let bob_available_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), false) + .available_balance() + .expect("Expected Bob to have a balance"); + let bob_total_balance = *get_balance(&fixture, &bob_public_key, Some(block_height), true) + .total_balance() + .expect("Expected Bob to have a balance"); + + let alice_available_balance = + *get_balance(&fixture, &alice_public_key, Some(block_height), false) + .available_balance() + .expect("Expected Alice to have a balance"); + let alice_total_balance = *get_balance(&fixture, &alice_public_key, Some(block_height), true) + .total_balance() + .expect("Expected Alice to have a balance"); + + // Bob gets no refund because the wasm errored + let bob_expected_total_balance = bob_initial_balance - expected_transaction_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get the all the fee since it's set to pay to proposer + // AND Bob didn't get a refund + let alice_expected_total_balance = alice_initial_balance + expected_transaction_cost; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_available_balance.clone(), + bob_expected_available_balance + ); + + assert_eq!(bob_total_balance.clone(), bob_expected_total_balance); + + assert_eq!( + alice_available_balance.clone(), + alice_expected_available_balance + ); + + assert_eq!(alice_total_balance.clone(), alice_expected_total_balance); +} + +#[tokio::test] +async fn should_refund_ratio_of_unconsumed_gas_fixed() { + let refund_ratio = Ratio::new(1, 3); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + let txn = valid_wasm_txn( + BOB_SECRET_KEY.clone(), + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + ); + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let gas_limit = txn + .gas_limit(test.chainspec(), lane_id) + .unwrap() + .value() + .as_u64(); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(exec_result_is_success(&exec_result)); + + let expected_transaction_cost = gas_limit * MIN_GAS_PRICE as u64; + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), /* Magic value, this is the amount of gas + * consumed by do_nothing.wasm */ + "wasm_transaction_fees_are_refunded", + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + + // Bob should get 1/3 of the cost for the unspent gas. Since this transaction consumed 0 + // gas, the unspent gas is equal to the limit. + let refund_amount: u64 = (refund_ratio + * Ratio::from( + expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * MIN_GAS_PRICE as u64, + )) + .to_integer(); + + let bob_expected_total_balance = + bob_initial_balance.total.as_u64() - expected_transaction_cost + refund_amount; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get the non-refunded part of the fee since it's set to pay to proposer + let alice_expected_total_balance = + alice_initial_balance.total.as_u64() + expected_transaction_cost - refund_amount; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.as_u64(), + bob_expected_available_balance + ); + + assert_eq!( + bob_current_balance.total.as_u64(), + bob_expected_total_balance + ); + + assert_eq!( + alice_current_balance.available.as_u64(), + alice_expected_available_balance + ); + + assert_eq!( + alice_current_balance.total.as_u64(), + alice_expected_total_balance + ); +} + +async fn should_not_refund_erroneous_wasm_burn(txn_pricing_mode: PricingMode) { + // if refund handling is set to burn, and an erroneous wasm is processed + // ALL of the spent token is treated as the fee, thus there is no refund, and thus + // nothing is burned. + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Burn { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + let expected_transaction_gas: u64 = gas_limit.unwrap_or( + test.chainspec() + .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID), + ); + let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64; + + assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid. + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(0), + "wasm_transaction_refunds_are_burnt", + ); + + // Bobs transaction was invalid. He should get NO refund. + // Since there is no refund - there will also be nothing burned. + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob doesn't get a refund. The refund is burnt. + let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get the non-refunded part of the fee since it's set to pay to proposer + let alice_expected_total_balance = alice_initial_balance.total + expected_transaction_cost; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +async fn should_burn_refunds(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, _gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 3); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Burn { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let expected_transaction_gas = txn + .gas_limit(test.chainspec(), lane_id) + .unwrap() + .value() + .as_u64(); + let gas_cost = txn + .gas_cost(test.chainspec(), lane_id, min_gas_price) + .unwrap(); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(exec_result_is_success(&exec_result)); + assert_exec_result_cost( + exec_result, + gas_cost.value(), + Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), + "wasm_transaction_refunds_are_burnt", + ); + + let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64; + + let refund_amount: U512 = (refund_ratio + * Ratio::from( + expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * min_gas_price as u64, + )) + .to_integer() + .into(); + + // Bobs transaction was valid. He should get a refund. + // 1/3 of the unspent gas should be burned + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply - refund_amount + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob doesn't get a refund. The refund is burnt. + let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get the non-refunded part of the fee since it's set to pay to proposer + let alice_expected_total_balance = + alice_initial_balance.total + expected_transaction_cost - refund_amount; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_burn_refunds_fixed() { + should_burn_refunds(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_burn_refunds_payment_limited() { + should_burn_refunds(PricingMode::PaymentLimited { + payment_amount: 2_500_000_001, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +#[tokio::test] +async fn should_not_refund_erroneous_wasm_burn_fixed() { + should_not_refund_erroneous_wasm_burn(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_not_refund_erroneous_wasm_burn_payment_limited() { + should_not_refund_erroneous_wasm_burn(PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn should_burn_refund_nofee(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, _gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Burn { refund_ratio }) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let gas_limit = txn.gas_limit(test.chainspec(), lane_id).unwrap(); + let gas_cost = txn + .gas_cost(test.chainspec(), lane_id, min_gas_price) + .unwrap(); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + let consumed = exec_result.consumed().as_u64(); + let consumed_price = consumed * min_gas_price as u64; + let expected_transaction_cost = gas_cost.value().as_u64(); + assert!(exec_result_is_success(&exec_result)); + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), /* Magic value, this is the amount of gas + * consumed by do_nothing.wasm */ + "only_refunds_are_burnt_no_fee", + ); + + //TODO shouldn't this be (refund_ratio* Ratio::from((expected_transaction_cost - + // consumed_price))? + let refund_amount: U512 = (refund_ratio + * Ratio::from((gas_limit.value().as_u64() * min_gas_price as u64) - consumed_price)) + .to_integer() + .into(); + + // We set it up so that the refunds are burnt so check this. + let total_supply = test.get_total_supply(Some(block_height)); + assert_eq!(total_supply, initial_total_supply - refund_amount); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob doesn't get a refund. The refund is burnt. A hold is put in place for the + // transaction cost. + let bob_balance_hold = U512::from(expected_transaction_cost) - refund_amount; + let bob_expected_total_balance = bob_initial_balance.total - refund_amount; + let bob_expected_available_balance = bob_current_balance.total - bob_balance_hold; + + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_burn_refund_nofee_fixed() { + should_burn_refund_nofee(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_burn_refund_nofee_payment_limited() { + should_burn_refund_nofee(PricingMode::PaymentLimited { + payment_amount: 4_000_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn should_burn_fee_and_burn_refund(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Burn { refund_ratio }) + .with_fee_handling(FeeHandling::Burn); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + + // Fixed transaction pricing. + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let expected_transaction_gas = gas_limit.unwrap_or( + txn.gas_limit(test.chainspec(), lane_id) + .unwrap() + .value() + .as_u64(), + ); + let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid. + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(0), + "fees_and_refunds_are_burnt_separately", + ); + + // Both refunds and fees should be burnt (even though they are burnt separately). Refund + fee + // amounts to the txn cost so expect that the total supply is reduced by that amount. + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply - expected_transaction_cost + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // The refund and the fees are burnt. No holds should be in place. + let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost; + let bob_expected_available_balance = bob_current_balance.total; + + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_burn_fee_and_burn_refund_fixed() { + should_burn_fee_and_burn_refund(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_burn_fee_and_burn_refund_payment_limited() { + should_burn_fee_and_burn_refund(PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn should_burn_fee_erroneous_wasm(txn_pricing_mode: PricingMode) { + // if erroneous wasm is processed, all the unconsumed amount goes to the fee + // and is thus all of it is burned if FeeHandling == Burn + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::Burn); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + + // Fixed transaction pricing. + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let expected_transaction_gas = gas_limit.unwrap_or( + txn.gas_limit(test.chainspec(), lane_id) + .unwrap() + .value() + .as_u64(), + ); + + let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid. + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(0), + "refunds_are_payed_and_fees_are_burnt", + ); + + // This transaction was erroneous, there should be no refund + let refund_amount: U512 = U512::zero(); + + // Only fees are burnt, so the refund_amount should still be in the total supply. + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply - expected_transaction_cost + refund_amount + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob should get back the refund. The fees are burnt and no holds should be in place. + let bob_expected_total_balance = + bob_initial_balance.total - expected_transaction_cost + refund_amount; + let bob_expected_available_balance = bob_current_balance.total; + + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_burn_fee_erroneous_wasm_fixed() { + should_burn_fee_erroneous_wasm(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_burn_fee_erroneous_wasm_payment_limited() { + should_burn_fee_erroneous_wasm(PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn should_refund_unconsumed_and_gas_hold_fee(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, _gas_limit) = match_pricing_mode(&txn_pricing_mode); + let refund_ratio = Ratio::new(1, 3); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let gas_limit = txn + .gas_limit(test.chainspec(), lane_id) + .unwrap() + .value() + .as_u64(); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(exec_result_is_success(&exec_result)); + + let expected_transaction_cost = gas_limit * min_gas_price as u64; + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), + "wasm_transaction_fees_are_refunded", + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + + // Bob should get 1/3 of the cost for the unspent gas. Since this transaction consumed 0 + // gas, the unspent gas is equal to the limit. + let refund_amount: u64 = (refund_ratio + * Ratio::from( + expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * min_gas_price as u64, + )) + .to_integer(); + + // Bob should get back the refund. The fees should be on hold, so Bob's total should be the + // same as initial. + let bob_expected_total_balance = bob_initial_balance.total; + let bob_expected_available_balance = + bob_current_balance.total - expected_transaction_cost + refund_amount; + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_refund_unconsumed_and_gas_hold_fee_fixed() { + should_refund_unconsumed_and_gas_hold_fee(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_refund_unconsumed_and_gas_hold_fee_payment_limited() { + should_refund_unconsumed_and_gas_hold_fee(PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn should_gas_hold_fee_erroneous_wasm(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode); + let meta_transaction = MetaTransaction::from_transaction( + &txn, + test.chainspec().core_config.pricing_handling, + &test.chainspec().transaction_config, + ) + .unwrap(); + // Fixed transaction pricing. + let expected_consumed_gas = Gas::new(0); // expect that this transaction doesn't consume any gas since it has invalid wasm. + let expected_transaction_gas = gas_limit.unwrap_or( + meta_transaction + .gas_limit(test.chainspec()) + .unwrap() + .value() + .as_u64(), + ); + let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid. + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + expected_consumed_gas, + "refunds_are_payed_and_fees_are_on_hold", + ); + + // Nothing is burnt so total supply should be the same. + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob should get back the refund. The fees should be on hold, so Bob's total should be the + // same as initial. + let bob_expected_total_balance = bob_initial_balance.total; + // There is no refund for bob because we don't pay refunds for transactions that errored during + // execution + let bob_expected_available_balance = bob_current_balance.total - expected_transaction_cost; + + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_gas_hold_fee_erroneous_wasm_fixed() { + should_gas_hold_fee_erroneous_wasm(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn should_gas_hold_fee_erroneous_wasm_payment_limited() { + should_gas_hold_fee_erroneous_wasm(PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +#[tokio::test] +async fn should_burn_fee_refund_unconsumed_custom_payment() { + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::Burn); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + // This contract uses custom payment. + let contract_file = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("ee_601_regression.wasm"); + let module_bytes = Bytes::from(std::fs::read(contract_file).expect("cannot read module bytes")); + + let expected_transaction_gas = 2_500_000_000u64; + let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64; + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_pricing_mode(PricingMode::PaymentLimited { + payment_amount: expected_transaction_gas, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: false, + }) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + match &exec_result { + ExecutionResult::V2(exec_result_v2) => { + assert_eq!(exec_result_v2.cost, expected_transaction_cost.into()); + } + _ => { + panic!("Unexpected exec result version.") + } + } + + let refund_amount = exec_result.refund().expect("should have refund"); + + // Expect that the fees are burnt. + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply - expected_transaction_cost + refund_amount + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob should get a refund. Since the contract doesn't set a custom purse for the refund, it + // should get the refund in the main purse. + let bob_expected_total_balance = + bob_initial_balance.total - expected_transaction_cost + refund_amount; + let bob_expected_available_balance = bob_expected_total_balance; // No holds expected. + + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn should_allow_norefund_nofee_custom_payment() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + // This contract uses custom payment. + let contract_file = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("ee_601_regression.wasm"); + let module_bytes = Bytes::from(std::fs::read(contract_file).expect("cannot read module bytes")); + + let expected_transaction_gas = 1_000_000_000_000u64; + let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64; + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_pricing_mode(PricingMode::PaymentLimited { + payment_amount: expected_transaction_gas, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: false, + }) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + match exec_result { + ExecutionResult::V2(exec_result_v2) => { + assert_eq!(exec_result_v2.cost, expected_transaction_cost.into()); + } + _ => { + panic!("Unexpected exec result version.") + } + } + + let payment_purse_balance = get_payment_purse_balance(&mut test.fixture, Some(block_height)); + assert_eq!( + *payment_purse_balance + .total_balance() + .expect("should have total balance"), + U512::zero(), + "payment purse should have a 0 balance" + ); + + // we're not burning anything, so total supply should be the same + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply, + "total supply should be the same before and after" + ); + + // updated balances + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + + // the proposer's balance should be the same because we are in no fee mode + assert_eq!( + alice_initial_balance, alice_current_balance, + "the proposers balance should be unchanged as we are in no fee mode" + ); + + // the initiator should have a hold equal to the cost + assert_eq!( + bob_current_balance.total.clone(), + bob_initial_balance.total, + "bob's total balance should be unchanged as we are in no fee mode" + ); + + assert_ne!( + bob_current_balance.available.clone(), + bob_initial_balance.total, + "bob's available balance and total balance should not be the same" + ); + + let bob_expected_available_balance = bob_initial_balance.total - expected_transaction_cost; + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance, + "bob's available balance should reflect a hold for the cost" + ); +} + +async fn transfer_fee_is_burnt_no_refund(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::Burn); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_amount = test + .chainspec() + .transaction_config + .native_transfer_minimum_motes + + 100; + + let txn = transfer_txn( + ALICE_SECRET_KEY.clone(), + &CHARLIE_PUBLIC_KEY, + txn_pricing_mode, + transfer_amount, + ); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, _, _) = test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + let expected_transfer_gas: u64 = gas_limit.unwrap_or( + test.chainspec() + .system_costs_config + .mint_costs() + .transfer + .into(), + ); + let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64; + + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + assert_eq!(exec_result.transfers().len(), 1, "{:?}", exec_result); + assert_exec_result_cost( + exec_result, + expected_transfer_cost.into(), + expected_transfer_gas.into(), + "transfer_fee_is_burnt_no_refund", + ); + + // The fees should have been burnt so expect the total supply to have been + // reduced by the fee that was burnt. + let total_supply_after_txn = test.get_total_supply(Some(block_height)); + assert_ne!( + total_supply_after_txn, initial_total_supply, + "total supply should be lowered" + ); + let diff = initial_total_supply - total_supply_after_txn; + assert_eq!( + diff, + U512::from(expected_transfer_cost), + "total supply should be lowered by expected transfer cost" + ); + + // Get the current balances after the transaction and check them. + let (alice_current_balance, _, charlie_balance) = test.get_balances(Some(block_height)); + let alice_expected_total_balance = + alice_initial_balance.total - transfer_amount - expected_transfer_cost; + let alice_expected_available_balance = alice_expected_total_balance; + assert_eq!( + charlie_balance + .expect("Charlie should have a balance.") + .total, + transfer_amount.into(), + ); + assert_eq!( + alice_current_balance.available, alice_expected_available_balance, + "alice available balance should match" + ); + assert_eq!(alice_current_balance.total, alice_expected_total_balance); +} + +#[tokio::test] +async fn transfer_fee_is_burnt_no_refund_fixed_pricing() { + transfer_fee_is_burnt_no_refund(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn transfer_fee_is_burnt_no_refund_payment_limited_pricing() { + transfer_fee_is_burnt_no_refund(PricingMode::PaymentLimited { + payment_amount: 100_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +// PTP == fee pay to proposer +async fn fee_ptp_no_refund(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_amount = test + .chainspec() + .transaction_config + .native_transfer_minimum_motes + + 100; + + let txn = transfer_txn( + BOB_SECRET_KEY.clone(), + &CHARLIE_PUBLIC_KEY, + txn_pricing_mode, + transfer_amount, + ); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + let expected_transfer_gas: u64 = gas_limit.unwrap_or( + test.chainspec() + .system_costs_config + .mint_costs() + .transfer + .into(), + ); + let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64; + + assert!(exec_result_is_success(&exec_result)); + assert_exec_result_cost( + exec_result, + expected_transfer_cost.into(), + expected_transfer_gas.into(), + "fee_is_payed_to_proposer_no_refund", + ); + + // Nothing should be burnt. + assert_eq!( + initial_total_supply, + test.get_total_supply(Some(block_height)), + "total supply should unchanged" + ); + + let (alice_current_balance, bob_current_balance, charlie_balance) = + test.get_balances(Some(block_height)); + + // since Alice was the proposer of the block, it should get back the transfer fee since + // FeeHandling is set to PayToProposer. + let bob_expected_total_balance = + bob_initial_balance.total - transfer_amount - expected_transfer_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + let alice_expected_total_balance = alice_initial_balance.total + expected_transfer_cost; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + charlie_balance + .expect("Expected Charlie to have a balance") + .total, + transfer_amount.into() + ); + assert_eq!( + bob_current_balance.available, + bob_expected_available_balance + ); + assert_eq!(bob_current_balance.total, bob_expected_total_balance); + assert_eq!( + alice_current_balance.available, + alice_expected_available_balance + ); + assert_eq!(alice_current_balance.total, alice_expected_total_balance); +} + +#[tokio::test] +async fn fee_ptp_norefund_fixed_pricing() { + fee_ptp_no_refund(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn fee_ptp_norefund_payment_limited() { + fee_ptp_no_refund(PricingMode::PaymentLimited { + payment_amount: 100_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn erroneous_wasm_transaction_no_refund(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = invalid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode.clone()); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + let expected_transaction_gas: u64 = gas_limit.unwrap_or( + test.chainspec() + .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID), + ); + let expected_transaction_cost = expected_transaction_gas * min_gas_price as u64; + + assert!(!exec_result_is_success(&exec_result)); // transaction should not succeed because the wasm bytes are invalid. + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(0), + format!( + "erroneous_wasm_transaction_no_refund {:?}", + txn_pricing_mode + ) + .as_str(), + ); + + // Nothing is burnt so total supply should be the same. + assert_eq!( + initial_total_supply, + test.get_total_supply(Some(block_height)) + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob gets no refund, we don't pay refunds on erroneous wasm + let bob_expected_total_balance = bob_initial_balance.total - expected_transaction_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get all the fee since it's set to pay to proposer and Bob got no refund + let alice_expected_total_balance = alice_initial_balance.total + expected_transaction_cost; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +async fn wasm_transaction_ptp_fee_and_refund(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let refund_ratio = Ratio::new(1, 3); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::Refund { refund_ratio }) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let txn = valid_wasm_txn(BOB_SECRET_KEY.clone(), txn_pricing_mode.clone()); + let lane_id = calculate_transaction_lane_for_transaction(&txn, test.chainspec()).unwrap(); + let expected_transaction_gas = gas_limit.unwrap_or( + txn.gas_limit(test.chainspec(), lane_id) + .unwrap() + .value() + .as_u64(), + ); + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(exec_result_is_success(&exec_result)); + + let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64; + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + Gas::new(DO_NOTHING_WASM_EXECUTION_GAS), + format!("wasm_transaction_ptp_fee_and_refund {:?}", txn_pricing_mode).as_str(), + ); + + // Nothing is burnt so total supply should be the same. + assert_eq!( + initial_total_supply, + test.get_total_supply(Some(block_height)) + ); + + // Bob should get back half of the cost for the unspent gas. Since this transaction consumed 0 + // gas, the unspent gas is equal to the limit. + let refund_amount: U512 = (refund_ratio + * Ratio::from( + expected_transaction_cost - DO_NOTHING_WASM_EXECUTION_GAS * min_gas_price as u64, + )) + .to_integer() + .into(); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + let bob_expected_total_balance = + bob_initial_balance.total - expected_transaction_cost + refund_amount; + let bob_expected_available_balance = bob_expected_total_balance; + + // Alice should get the non-refunded part of the fee since it's set to pay to proposer + let alice_expected_total_balance = + alice_initial_balance.total + expected_transaction_cost - refund_amount; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); +} + +#[tokio::test] +async fn erroneous_wasm_transaction_norefund_fixed_pricing() { + erroneous_wasm_transaction_no_refund(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn wasm_transaction_refund_fixed_pricing() { + wasm_transaction_ptp_fee_and_refund(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn wasm_transaction_payment_limited_refund() { + erroneous_wasm_transaction_no_refund(PricingMode::PaymentLimited { + payment_amount: 2500000000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn fee_is_accumulated_and_distributed_no_refund(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let admins: BTreeSet = vec![ALICE_PUBLIC_KEY.clone(), BOB_PUBLIC_KEY.clone()] + .into_iter() + .collect(); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::Accumulate) + .with_administrators(admins); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_amount = test + .chainspec() + .transaction_config + .native_transfer_minimum_motes + + 100; + + let txn = transfer_txn( + BOB_SECRET_KEY.clone(), + &CHARLIE_PUBLIC_KEY, + txn_pricing_mode, + transfer_amount, + ); + + let expected_transfer_gas: u64 = gas_limit.unwrap_or( + test.chainspec() + .system_costs_config + .mint_costs() + .transfer + .into(), + ); + let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + let (alice_initial_balance, bob_initial_balance, _charlie_initial_balance) = + test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + let acc_purse_initial_balance = *test + .get_accumulate_purse_balance(None, false) + .available_balance() + .expect("Accumulate purse should have a balance."); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + assert!(exec_result_is_success(&exec_result)); + assert_exec_result_cost( + exec_result, + expected_transfer_cost.into(), + expected_transfer_gas.into(), + "fee_is_accumulated_and_distributed_no_refund", + ); + + assert_eq!( + initial_total_supply, + test.get_total_supply(Some(block_height)), + "total supply should remain unchanged" + ); + + let (alice_current_balance, bob_current_balance, charlie_balance) = + test.get_balances(Some(block_height)); + + let bob_expected_total_balance = + bob_initial_balance.total - transfer_amount - expected_transfer_cost; + let bob_expected_available_balance = bob_expected_total_balance; + + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + charlie_balance + .expect("Expected Charlie to have a balance") + .total, + transfer_amount.into() + ); + + assert_eq!( + bob_current_balance.available, + bob_expected_available_balance + ); + assert_eq!(bob_current_balance.total, bob_expected_total_balance); + assert_eq!( + alice_current_balance.available, + alice_expected_available_balance + ); + assert_eq!(alice_current_balance.total, alice_expected_total_balance); + + let acc_purse_balance = *test + .get_accumulate_purse_balance(Some(block_height), false) + .available_balance() + .expect("Accumulate purse should have a balance."); + + // The fees should be sent to the accumulation purse. + assert_eq!( + acc_purse_balance - acc_purse_initial_balance, + expected_transfer_cost.into() + ); + + test.fixture + .run_until_block_height(block_height + 10, ONE_MIN) + .await; + + let accumulate_purse_balance = *test + .get_accumulate_purse_balance(Some(block_height + 10), false) + .available_balance() + .expect("Accumulate purse should have a balance."); + + assert_eq!(accumulate_purse_balance, U512::from(0)); +} + +#[tokio::test] +async fn fee_is_accumulated_and_distributed_no_refund_fixed_pricing() { + fee_is_accumulated_and_distributed_no_refund(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn fee_is_accumulated_and_distributed_no_refund_payment_limited_pricing() { + fee_is_accumulated_and_distributed_no_refund(PricingMode::PaymentLimited { + payment_amount: 100_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +fn transfer_txn>( + from: Arc, + to: &PublicKey, + pricing_mode: PricingMode, + amount: A, +) -> Transaction { + let mut txn = Transaction::from( + TransactionV1Builder::new_transfer(amount, None, to.clone(), None) + .unwrap() + .with_initiator_addr(PublicKey::from(&*from)) + .with_pricing_mode(pricing_mode) + .with_chain_name(CHAIN_NAME) + .build() + .unwrap(), + ); + txn.sign(&from); + txn +} + +pub(crate) fn invalid_wasm_txn( + initiator: Arc, + pricing_mode: PricingMode, +) -> Transaction { + //These bytes are intentionally so large - this way they fall into "WASM_LARGE" category in the + // local chainspec Alternatively we could change the chainspec to have a different limits + // for the wasm categories, but that would require aligning all tests that use local + // chainspec + let module_bytes = Bytes::from(vec![1; 172_033]); + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_pricing_mode(pricing_mode) + .with_initiator_addr(PublicKey::from(&*initiator)) + .build() + .unwrap(), + ); + txn.sign(&initiator); + txn +} + +fn valid_wasm_txn(initiator: Arc, pricing_mode: PricingMode) -> Transaction { + let contract_file = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("do_nothing.wasm"); + let module_bytes = Bytes::from(std::fs::read(contract_file).expect("cannot read module bytes")); + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_pricing_mode(pricing_mode) + .with_initiator_addr(PublicKey::from(&*initiator)) + .build() + .unwrap(), + ); + txn.sign(&initiator); + txn +} + +fn match_pricing_mode(txn_pricing_mode: &PricingMode) -> (PricingHandling, u8, Option) { + match txn_pricing_mode { + PricingMode::PaymentLimited { + gas_price_tolerance, + payment_amount, + .. + } => ( + PricingHandling::PaymentLimited, + *gas_price_tolerance, + Some(*payment_amount), + ), + PricingMode::Fixed { + gas_price_tolerance, + .. + } => (PricingHandling::Fixed, *gas_price_tolerance, None), + PricingMode::Prepaid { .. } => unimplemented!(), + } +} + +#[tokio::test] +async fn holds_should_be_added_and_cleared_fixed_pricing() { + holds_should_be_added_and_cleared(PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }) + .await; +} + +#[tokio::test] +async fn holds_should_be_added_and_cleared_payment_limited_pricing() { + holds_should_be_added_and_cleared(PricingMode::PaymentLimited { + payment_amount: 100_000_000, + gas_price_tolerance: MIN_GAS_PRICE, + standard_payment: true, + }) + .await; +} + +async fn holds_should_be_added_and_cleared(txn_pricing_mode: PricingMode) { + let (price_handling, min_gas_price, gas_limit) = match_pricing_mode(&txn_pricing_mode); + + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(price_handling) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_amount = U512::from( + test.chainspec() + .transaction_config + .native_transfer_minimum_motes, + ); + + // transfer from bob to charlie + let txn = transfer_txn( + BOB_SECRET_KEY.clone(), + &CHARLIE_PUBLIC_KEY, + txn_pricing_mode, + transfer_amount, + ); + + let expected_transfer_gas: u64 = gas_limit.unwrap_or( + test.chainspec() + .system_costs_config + .mint_costs() + .transfer + .into(), + ); + let expected_transfer_cost = expected_transfer_gas * min_gas_price as u64; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (_, bob_initial_balance, _) = test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); // transaction should have succeeded. + assert_exec_result_cost( + exec_result, + expected_transfer_cost.into(), + expected_transfer_gas.into(), + "holds_should_be_added_and_cleared", + ); + + assert_eq!( + initial_total_supply, + test.get_total_supply(Some(block_height)), + "total supply should remain unchanged" + ); + + // Get the current balances after the transaction and check them. + let (_, bob_current_balance, charlie_balance) = test.get_balances(Some(block_height)); + assert_eq!( + charlie_balance + .expect("Expected Charlie to have a balance") + .total, + transfer_amount, + "charlie's balance should equal transfer amount" + ); + assert_ne!( + bob_current_balance.available, bob_current_balance.total, + "total and available should NOT be equal at this point" + ); + assert_eq!( + bob_initial_balance.total, + bob_current_balance.total + transfer_amount, + "total balance should be original total balance - transferred amount" + ); + assert_eq!( + bob_initial_balance.total, + bob_current_balance.available + expected_transfer_cost + transfer_amount, + "diff from initial balance should equal available + cost + transfer_amount" + ); + + test.fixture + .run_until_block_height(block_height + 5, ONE_MIN) + .await; + let (_, bob_balance, _) = test.get_balances(Some(block_height + 5)); + assert_eq!( + bob_balance.available, bob_balance.total, + "total and available should be equal at this point" + ); +} + +#[tokio::test] +async fn fee_holds_are_amortized() { + let refund_ratio = Ratio::new(1, 2); + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::Burn { refund_ratio }) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Amortized) + .with_balance_hold_interval(TimeDiff::from_seconds(10)); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + let txn = invalid_wasm_txn( + BOB_SECRET_KEY.clone(), + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + ); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + let initial_total_supply = test.get_total_supply(None); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + + // Fixed transaction pricing. + let expected_transaction_gas: u64 = test + .chainspec() + .get_max_gas_limit_by_category(LARGE_WASM_LANE_ID); + + let expected_transaction_cost = expected_transaction_gas * MIN_GAS_PRICE as u64; + // transaction should not succeed because the wasm bytes are invalid. + // this transaction has invalid wasm, so the baseline will be used as consumed + assert!(!exec_result_is_success(&exec_result)); + + let expected_consumed = Gas::new(0); + assert_exec_result_cost( + exec_result, + expected_transaction_cost.into(), + expected_consumed, + "fee_holds_are_amortized", + ); + + // This transaction consumed 0 gas, the unspent gas is equal to the limit, so we apply the + // refund ratio to the full transaction cost. + // error transactions no longer refund + let refund_amount = U512::zero(); + + // We set it up so that the refunds are burnt so check this. + assert_eq!( + test.get_total_supply(Some(block_height)), + initial_total_supply - refund_amount + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // Bob doesn't get a refund. The refund is burnt. A hold is put in place for the + // transaction cost. + let bob_balance_hold = U512::from(expected_transaction_cost) - refund_amount; + let bob_expected_total_balance = bob_initial_balance.total - refund_amount; + let bob_expected_available_balance = bob_current_balance.total - bob_balance_hold; + + // Alice shouldn't get anything since we are operating with no fees + let alice_expected_total_balance = alice_initial_balance.total; + let alice_expected_available_balance = alice_expected_total_balance; + + assert_eq!( + bob_current_balance.available.clone(), + bob_expected_available_balance + ); + assert_eq!( + bob_current_balance.total.clone(), + bob_expected_total_balance + ); + assert_eq!( + alice_current_balance.available.clone(), + alice_expected_available_balance + ); + assert_eq!( + alice_current_balance.total.clone(), + alice_expected_total_balance + ); + + let bob_prev_available_balance = bob_current_balance.available; + test.fixture + .run_until_block_height(block_height + 1, ONE_MIN) + .await; + let (_, bob_balance, _) = test.get_balances(Some(block_height + 1)); + assert!( + bob_prev_available_balance < bob_balance.available, + "available should have increased since some part of the hold should have been amortized" + ); + + // Check to see if more holds have amortized. + let bob_prev_available_balance = bob_current_balance.available; + test.fixture + .run_until_block_height(block_height + 3, ONE_MIN) + .await; + let (_, bob_balance, _) = test.get_balances(Some(block_height + 3)); + assert!( + bob_prev_available_balance < bob_balance.available, + "available should have increased since some part of the hold should have been amortized" + ); + + // After 10s (10 blocks in this case) the holds should have been completely amortized + test.fixture + .run_until_block_height(block_height + 10, ONE_MIN) + .await; + let (_, bob_balance, _) = test.get_balances(Some(block_height + 10)); + assert_eq!( + bob_balance.total, bob_balance.available, + "available should have increased since some part of the hold should have been amortized" + ); +} + +#[tokio::test] +async fn sufficient_balance_is_available_after_amortization() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Amortized) + .with_balance_hold_interval(TimeDiff::from_seconds(10)); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_cost: U512 = + U512::from(test.chainspec().system_costs_config.mint_costs().transfer) * MIN_GAS_PRICE; + let min_transfer_amount = U512::from( + test.chainspec() + .transaction_config + .native_transfer_minimum_motes, + ); + let half_transfer_cost = + (Ratio::new(U512::from(1), U512::from(2)) * transfer_cost).to_integer(); + + // Fund Charlie with some token. + let transfer_amount = min_transfer_amount * 2 + transfer_cost + half_transfer_cost; + let txn = transfer_txn( + BOB_SECRET_KEY.clone(), + &CHARLIE_PUBLIC_KEY, + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + transfer_amount, + ); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); + + let charlie_balance = test.get_balances(Some(block_height)).2.unwrap(); + assert_eq!( + charlie_balance.available.clone(), + charlie_balance.total.clone() + ); + assert_eq!(charlie_balance.available.clone(), transfer_amount); + + // Now Charlie has balance to do 2 transfers of the minimum amount but can't pay for both as the + // same time. Let's say the min transfer amount is 2_500_000_000 and the cost of a transfer + // is 50_000. Charlie now has 5_000_075_000 as set up above. He can transfer 2_500_000_000 + // which will put a hold of 50_000. His available balance would be 2_500_025_000. + // He can't issue a new transfer of 2_500_000_000 right away because he doesn't have enough + // balance to pay for the transfer. He'll need to wait until at least half of the holds + // amortize. In this case he needs to wait half of the amortization time for 25_000 to + // become available to him. After this period, he will have 2_500_050_000 available which + // will allow him to do another transfer. + let txn = transfer_txn( + CHARLIE_SECRET_KEY.clone(), + &BOB_PUBLIC_KEY, + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + min_transfer_amount, + ); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); + + let charlie_balance = test.get_balances(Some(block_height)).2.unwrap(); + assert_eq!( + charlie_balance.total.clone(), + min_transfer_amount + transfer_cost + half_transfer_cost, /* one `min_transfer_amount` + * should have gone to Bob. */ + ); + assert_eq!( + charlie_balance.available.clone(), + min_transfer_amount + half_transfer_cost, // transfer cost should be held. + ); + + // Let's wait for about 5 sec (5 blocks in this case) which should provide enough time for at + // half of the holds to get amortized. + test.fixture + .run_until_block_height(block_height + 5, ONE_MIN) + .await; + let charlie_balance = test.get_balances(Some(block_height + 5)).2.unwrap(); + assert!( + charlie_balance.available >= min_transfer_amount + transfer_cost, /* right now he should + * have enough to make + * a transfer. */ + ); + assert!( + charlie_balance.available < charlie_balance.total, /* some of the holds + * should still be in + * place. */ + ); + + // Send another transfer to Bob for `min_transfer_amount`. + let txn = transfer_txn( + CHARLIE_SECRET_KEY.clone(), + &BOB_PUBLIC_KEY, + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + min_transfer_amount, + ); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); // We expect this transfer to succeed since Charlie has enough balance. + let charlie_balance = test.get_balances(Some(block_height)).2.unwrap(); + assert_eq!( + charlie_balance.total.clone(), + transfer_cost + half_transfer_cost, // two `min_transfer_amount` should have gone to Bob. + ); +} + +#[tokio::test] +async fn validator_credit_is_written_and_cleared_after_auction() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_cost: U512 = + U512::from(test.chainspec().system_costs_config.mint_costs().transfer) * MIN_GAS_PRICE; + let min_transfer_amount = U512::from( + test.chainspec() + .transaction_config + .native_transfer_minimum_motes, + ); + let half_transfer_cost = + (Ratio::new(U512::from(1), U512::from(2)) * transfer_cost).to_integer(); + + // Fund Charlie with some token. + let transfer_amount = min_transfer_amount * 2 + transfer_cost + half_transfer_cost; + let txn = transfer_txn( + BOB_SECRET_KEY.clone(), + &CHARLIE_PUBLIC_KEY, + PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }, + transfer_amount, + ); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); + + let charlie_balance = test.get_balances(Some(block_height)).2.unwrap(); + assert_eq!( + charlie_balance.available.clone(), + charlie_balance.total.clone() + ); + assert_eq!(charlie_balance.available.clone(), transfer_amount); + + let bids = + get_bids(&mut test.fixture, Some(block_height)).expect("Expected to get some bid records."); + + let _ = bids + .into_iter() + .find(|bid_kind| match bid_kind { + BidKind::Credit(credit) => { + credit.amount() == transfer_cost + && credit.validator_public_key() == &*ALICE_PUBLIC_KEY // Alice is the proposer. + } + _ => false, + }) + .expect("Expected to find the credit for the consumed transfer cost in the bid records."); + + test.fixture + .run_until_consensus_in_era( + ERA_ONE.saturating_add(test.chainspec().core_config.auction_delay), + ONE_MIN, + ) + .await; + + // Check that the credits were cleared after the auction. + let bids = get_bids(&mut test.fixture, None).expect("Expected to get some bid records."); + assert!(!bids + .into_iter() + .any(|bid| matches!(bid, BidKind::Credit(_)))); +} + +#[tokio::test] +async fn add_and_withdraw_bid_transaction() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let bid_amount = test.chainspec().core_config.minimum_bid_amount + 10; + + let mut txn = Transaction::from( + TransactionV1Builder::new_add_bid( + PublicKey::from(&**BOB_SECRET_KEY), + 0, + bid_amount, + None, + None, + None, + ) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (_, _bob_initial_balance, _) = test.get_balances(None); + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); + + test.fixture + .run_until_consensus_in_era(ERA_TWO, ONE_MIN) + .await; + + let mut txn = Transaction::from( + TransactionV1Builder::new_withdraw_bid(PublicKey::from(&**BOB_SECRET_KEY), bid_amount) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); +} + +#[tokio::test] +async fn delegate_and_undelegate_bid_transaction() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let delegate_amount = U512::from(500_000_000_000u64); + + let mut txn = Transaction::from( + TransactionV1Builder::new_delegate( + PublicKey::from(&**BOB_SECRET_KEY), + PublicKey::from(&**ALICE_SECRET_KEY), + delegate_amount, + ) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let mut txn = Transaction::from( + TransactionV1Builder::new_undelegate( + PublicKey::from(&**BOB_SECRET_KEY), + PublicKey::from(&**ALICE_SECRET_KEY), + delegate_amount, + ) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result)); +} + +#[tokio::test] +async fn insufficient_funds_transfer_from_account() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_amount = U512::max_value(); + + let txn_v1 = + TransactionV1Builder::new_transfer(transfer_amount, None, ALICE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let price = txn_v1 + .payment_amount() + .expect("must have payment amount as txns are using payment_limited"); + let mut txn = Transaction::from(txn_v1); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let expected_cost: U512 = U512::from(price) * MIN_GAS_PRICE; + + assert_eq!(result.error_message.as_deref(), Some("Insufficient funds")); + assert_eq!(result.cost, expected_cost); +} + +#[tokio::test] +async fn insufficient_funds_add_bid() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (_, bob_initial_balance, _) = test.get_balances(None); + let bid_amount = bob_initial_balance.total; + + let txn = + TransactionV1Builder::new_add_bid(BOB_PUBLIC_KEY.clone(), 0, bid_amount, None, None, None) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let price = txn.payment_amount().expect("must get payment amount"); + let mut txn = Transaction::from(txn); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let bid_cost: U512 = U512::from(price) * MIN_GAS_PRICE; + + assert_eq!( + result.error_message.as_deref(), + Some("ApiError::AuctionError(TransferToBidPurse) [64516]") + ); + assert_eq!(result.cost, bid_cost); +} + +#[tokio::test] +async fn insufficient_funds_transfer_from_purse() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let purse_name = "test_purse"; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // first we set up a purse for Bob + let purse_create_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("transfer_main_purse_to_new_purse.wasm"); + let module_bytes = + Bytes::from(std::fs::read(purse_create_contract).expect("cannot read module bytes")); + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_runtime_args(runtime_args! { "destination" => purse_name, "amount" => U512::zero() }) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + + let state_root_hash = *test.fixture.highest_complete_block().state_root_hash(); + let entity_addr = get_entity_addr_from_account_hash( + &mut test.fixture, + state_root_hash, + BOB_PUBLIC_KEY.to_account_hash(), + ); + let key = get_entity_named_key(&mut test.fixture, state_root_hash, entity_addr, purse_name) + .expect("expected a key"); + let uref = *key.as_uref().expect("Expected a URef"); + + // now we try to transfer from the purse we just created + let transfer_amount = U512::max_value(); + let txn = TransactionV1Builder::new_transfer( + transfer_amount, + Some(uref), + ALICE_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let price = txn.payment_amount().expect("must get payment amount"); + let mut txn = Transaction::from(txn); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let transfer_cost: U512 = U512::from(price) * MIN_GAS_PRICE; + + assert_eq!(result.error_message.as_deref(), Some("Insufficient funds")); + assert_eq!(result.cost, transfer_cost); +} + +#[tokio::test] +async fn insufficient_funds_when_caller_lacks_minimum_balance() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (_, bob_initial_balance, _) = test.get_balances(None); + let transfer_amount = bob_initial_balance.total - U512::one(); + let txn = + TransactionV1Builder::new_transfer(transfer_amount, None, ALICE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let price = txn.payment_amount().expect("must get payment amount"); + let mut txn = Transaction::from(txn); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let transfer_cost: U512 = U512::from(price) * MIN_GAS_PRICE; + + assert_eq!(result.error_message.as_deref(), Some("Insufficient funds")); + assert_eq!(result.cost, transfer_cost); +} + +#[tokio::test] +async fn charge_when_session_code_succeeds() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("transfer_purse_to_account.wasm"); + let module_bytes = Bytes::from(std::fs::read(contract).expect("cannot read module bytes")); + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + + let transferred_amount = 1; + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_runtime_args(runtime_args! { + ARG_TARGET => CHARLIE_PUBLIC_KEY.to_account_hash(), + ARG_AMOUNT => U512::from(transferred_amount) + }) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .with_pricing_mode(PricingMode::Fixed { + gas_price_tolerance: 5, + additional_computation_factor: 2, /*Makes the transaction + * "Large" despite the fact that the actual + * WASM bytes categorize it as "Small" */ + }) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // alice should get the fee since she is the proposer. + let fee = alice_current_balance.total - alice_initial_balance.total; + + assert!( + fee > U512::zero(), + "fee is {}, expected to be greater than 0", + fee + ); + assert_eq!( + bob_current_balance.total, + bob_initial_balance.total - transferred_amount - fee, + "bob should pay the fee" + ); +} + +#[tokio::test] +async fn charge_when_session_code_fails_with_user_error() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let revert_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("revert.wasm"); + let module_bytes = + Bytes::from(std::fs::read(revert_contract).expect("cannot read module bytes")); + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!( + matches!( + &exec_result, + ExecutionResult::V2(res) if res.error_message.as_deref() == Some("User error: 100") + ), + "{:?}", + exec_result.error_message() + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // alice should get the fee since she is the proposer. + let fee = alice_current_balance.total - alice_initial_balance.total; + + assert!( + fee > U512::zero(), + "fee is {}, expected to be greater than 0", + fee + ); + let init = bob_initial_balance.total; + let curr = bob_current_balance.total; + let actual = curr; + let expected = init - fee; + assert_eq!(actual, expected, "init {} curr {} fee {}", init, curr, fee,); +} + +#[tokio::test] +async fn charge_when_session_code_runs_out_of_gas() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let revert_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("endless_loop.wasm"); + let module_bytes = + Bytes::from(std::fs::read(revert_contract).expect("cannot read module bytes")); + + let (alice_initial_balance, bob_initial_balance, _) = test.get_balances(None); + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!( + matches!( + &exec_result, + ExecutionResult::V2(res) if res.error_message.as_deref() == Some("Out of gas error") + ), + "{:?}", + exec_result + ); + + let (alice_current_balance, bob_current_balance, _) = test.get_balances(Some(block_height)); + // alice should get the fee since she is the proposer. + let fee = alice_current_balance.total - alice_initial_balance.total; + + assert!( + fee > U512::zero(), + "fee is {}, expected to be greater than 0", + fee + ); + assert_eq!( + bob_current_balance.total, + bob_initial_balance.total - fee, + "bob should pay the fee" + ); +} + +#[tokio::test] +async fn successful_purse_to_purse_transfer() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let purse_name = "test_purse"; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, _, _) = test.get_balances(None); + + // first we set up a purse for Bob + let purse_create_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("transfer_main_purse_to_new_purse.wasm"); + let module_bytes = + Bytes::from(std::fs::read(purse_create_contract).expect("cannot read module bytes")); + + let baseline_motes = test + .fixture + .chainspec + .core_config + .baseline_motes_amount_u512(); + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_runtime_args( + runtime_args! { "destination" => purse_name, "amount" => baseline_motes + U512::one() }, + ) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + + let state_root_hash = *test.fixture.highest_complete_block().state_root_hash(); + let bob_addr = get_entity_addr_from_account_hash( + &mut test.fixture, + state_root_hash, + BOB_PUBLIC_KEY.to_account_hash(), + ); + let bob_purse_key = + get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name) + .expect("expected a key"); + let bob_purse = *bob_purse_key.as_uref().expect("Expected a URef"); + + let alice_addr = get_entity_addr_from_account_hash( + &mut test.fixture, + state_root_hash, + ALICE_PUBLIC_KEY.to_account_hash(), + ); + let alice = get_entity(&mut test.fixture, state_root_hash, alice_addr); + + // now we try to transfer from the purse we just created + let transfer_amount = 1; + let mut txn = Transaction::from( + TransactionV1Builder::new_transfer( + transfer_amount, + Some(bob_purse), + alice.main_purse(), + None, + ) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + + let (alice_current_balance, _, _) = test.get_balances(Some(block_height)); + assert_eq!( + alice_current_balance.total, + alice_initial_balance.total + transfer_amount, + ); +} + +#[tokio::test] +async fn successful_purse_to_account_transfer() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let purse_name = "test_purse"; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let (alice_initial_balance, _, _) = test.get_balances(None); + + // first we set up a purse for Bob + let purse_create_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("transfer_main_purse_to_new_purse.wasm"); + let module_bytes = + Bytes::from(std::fs::read(purse_create_contract).expect("cannot read module bytes")); + + let baseline_motes = test + .fixture + .chainspec + .core_config + .baseline_motes_amount_u512(); + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_runtime_args( + runtime_args! { "destination" => purse_name, "amount" => baseline_motes + U512::one() }, + ) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + + let state_root_hash = *test.fixture.highest_complete_block().state_root_hash(); + let bob_addr = get_entity_addr_from_account_hash( + &mut test.fixture, + state_root_hash, + BOB_PUBLIC_KEY.to_account_hash(), + ); + let bob_purse_key = + get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, purse_name) + .expect("expected a key"); + let bob_purse = *bob_purse_key.as_uref().expect("Expected a URef"); + + // now we try to transfer from the purse we just created + let transfer_amount = 1; + let mut txn = Transaction::from( + TransactionV1Builder::new_transfer( + transfer_amount, + Some(bob_purse), + ALICE_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + + let (alice_current_balance, _, _) = test.get_balances(Some(block_height)); + assert_eq!( + alice_current_balance.total, + alice_initial_balance.total + transfer_amount, + ); +} + +async fn bob_transfers_to_charlie_via_native_transfer_deploy( + configs_override: ConfigsOverride, + with_source: bool, +) -> ExecutionResult { + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(configs_override), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let state_root_hash = *test.fixture.highest_complete_block().state_root_hash(); + let entity = get_entity_by_account_hash( + &mut test.fixture, + state_root_hash, + BOB_PUBLIC_KEY.to_account_hash(), + ); + + let source = if with_source { + Some(entity.main_purse()) + } else { + None + }; + + let mut txn: Transaction = Deploy::native_transfer( + CHAIN_NAME.to_string(), + source, + BOB_PUBLIC_KEY.clone(), + CHARLIE_PUBLIC_KEY.clone(), + None, + Timestamp::now(), + TimeDiff::from_seconds(600), + 10, + ) + .into(); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + exec_result +} + +#[tokio::test] +async fn should_transfer_with_source_purse_deploy_fixed_norefund_nofee() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, true).await; + + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + assert_eq!( + exec_result.transfers().len(), + 1, + "native transfer should have exactly 1 transfer" + ); +} + +#[tokio::test] +async fn should_transfer_with_source_purse_deploy_payment_limited_refund_fee() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(99, 100), + }) + .with_fee_handling(FeeHandling::PayToProposer) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, true).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + assert_eq!( + exec_result.transfers().len(), + 1, + "native transfer should have exactly 1 transfer" + ); + assert_eq!( + exec_result.refund(), + Some(U512::zero()), + "cost should equal consumed thus no refund" + ); +} + +#[tokio::test] +async fn should_transfer_with_main_purse_deploy_fixed_norefund_nofee() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, false).await; + + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + assert_eq!( + exec_result.transfers().len(), + 1, + "native transfer should have exactly 1 transfer" + ); +} + +#[tokio::test] +async fn should_transfer_with_main_purse_deploy_payment_limited_refund_fee() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(99, 100), + }) + .with_fee_handling(FeeHandling::PayToProposer) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + let exec_result = bob_transfers_to_charlie_via_native_transfer_deploy(config, false).await; + assert!(exec_result_is_success(&exec_result), "{:?}", exec_result); + assert_eq!( + exec_result.transfers().len(), + 1, + "native transfer should have exactly 1 transfer" + ); + assert_eq!( + exec_result.refund(), + Some(U512::zero()), + "cost should equal consumed thus no refund" + ); +} + +#[tokio::test] +async fn out_of_gas_txn_does_not_produce_effects() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::PayToProposer); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // This WASM creates named key called "new_key". Then it would loop endlessly trying to write a + // value to storage. Eventually it will run out of gas and it should exit causing a revert. + let revert_contract = RESOURCES_PATH + .join("..") + .join("target") + .join("wasm32-unknown-unknown") + .join("release") + .join("endless_loop_with_effects.wasm"); + let module_bytes = + Bytes::from(std::fs::read(revert_contract).expect("cannot read module bytes")); + + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + false, + module_bytes, + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + let (_txn_hash, block_height, exec_result) = test.send_transaction(txn).await; + assert!( + matches!( + &exec_result, + ExecutionResult::V2(res) if res.error_message.as_deref() == Some("Out of gas error") + ), + "{:?}", + exec_result + ); + + let state_root_hash = *test + .fixture + .get_block_by_height(block_height) + .state_root_hash(); + let bob_addr = get_entity_addr_from_account_hash( + &mut test.fixture, + state_root_hash, + BOB_PUBLIC_KEY.to_account_hash(), + ); + + // Named key should not exist since the execution was reverted because it was out of gas. + assert!( + get_entity_named_key(&mut test.fixture, state_root_hash, bob_addr, "new_key").is_none() + ); +} + +#[tokio::test] +async fn gas_holds_accumulate_for_multiple_transactions_in_the_same_block() { + let config = SingleTransactionTestCase::default_test_config() + .with_min_gas_price(MIN_GAS_PRICE) + .with_max_gas_price(MIN_GAS_PRICE) + .with_pricing_handling(PricingHandling::Fixed) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee) + .with_balance_hold_interval(TimeDiff::from_seconds(5)); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + const TRANSFER_AMOUNT: u64 = 30_000_000_000; + + let chain_name = test.fixture.chainspec.network_config.name.clone(); + let txn_pricing_mode = PricingMode::Fixed { + gas_price_tolerance: MIN_GAS_PRICE, + additional_computation_factor: 0, + }; + let expected_transfer_gas = test.chainspec().system_costs_config.mint_costs().transfer; + let expected_transfer_cost: U512 = U512::from(expected_transfer_gas) * MIN_GAS_PRICE; + + let mut txn_1 = Transaction::from( + TransactionV1Builder::new_transfer(TRANSFER_AMOUNT, None, CHARLIE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(txn_pricing_mode.clone()) + .with_chain_name(chain_name.clone()) + .build() + .unwrap(), + ); + txn_1.sign(&ALICE_SECRET_KEY); + let txn_1_hash = txn_1.hash(); + + let mut txn_2 = Transaction::from( + TransactionV1Builder::new_transfer( + 2 * TRANSFER_AMOUNT, + None, + CHARLIE_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(txn_pricing_mode.clone()) + .with_chain_name(chain_name.clone()) + .build() + .unwrap(), + ); + txn_2.sign(&ALICE_SECRET_KEY); + let txn_2_hash = txn_2.hash(); + + let mut txn_3 = Transaction::from( + TransactionV1Builder::new_transfer( + 3 * TRANSFER_AMOUNT, + None, + CHARLIE_PUBLIC_KEY.clone(), + None, + ) + .unwrap() + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(txn_pricing_mode) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn_3.sign(&ALICE_SECRET_KEY); + let txn_3_hash = txn_3.hash(); + + test.fixture.inject_transaction(txn_1).await; + test.fixture.inject_transaction(txn_2).await; + test.fixture.inject_transaction(txn_3).await; + + test.fixture + .run_until_executed_transaction(&txn_1_hash, TEN_SECS) + .await; + test.fixture + .run_until_executed_transaction(&txn_2_hash, TEN_SECS) + .await; + test.fixture + .run_until_executed_transaction(&txn_3_hash, TEN_SECS) + .await; + + let (_node_id, runner) = test.fixture.network.nodes().iter().next().unwrap(); + let ExecutionInfo { + block_height: txn_1_block_height, + execution_result: txn_1_exec_result, + .. + } = runner + .main_reactor() + .storage() + .read_execution_info(txn_1_hash) + .expect("Expected transaction to be included in a block."); + let ExecutionInfo { + block_height: txn_2_block_height, + execution_result: txn_2_exec_result, + .. + } = runner + .main_reactor() + .storage() + .read_execution_info(txn_2_hash) + .expect("Expected transaction to be included in a block."); + let ExecutionInfo { + block_height: txn_3_block_height, + execution_result: txn_3_exec_result, + .. + } = runner + .main_reactor() + .storage() + .read_execution_info(txn_3_hash) + .expect("Expected transaction to be included in a block."); + + let txn_1_exec_result = txn_1_exec_result.expect("Expected result for txn 1"); + let txn_2_exec_result = txn_2_exec_result.expect("Expected result for txn 2"); + let txn_3_exec_result = txn_3_exec_result.expect("Expected result for txn 3"); + + assert!(exec_result_is_success(&txn_1_exec_result)); + assert!(exec_result_is_success(&txn_2_exec_result)); + assert!(exec_result_is_success(&txn_3_exec_result)); + + assert_exec_result_cost( + txn_1_exec_result, + expected_transfer_cost, + expected_transfer_gas.into(), + "gas_holds_accumulate_for_multiple_transactions_in_the_same_block txn1", + ); + assert_exec_result_cost( + txn_2_exec_result, + expected_transfer_cost, + expected_transfer_gas.into(), + "gas_holds_accumulate_for_multiple_transactions_in_the_same_block txn2", + ); + assert_exec_result_cost( + txn_3_exec_result, + expected_transfer_cost, + expected_transfer_gas.into(), + "gas_holds_accumulate_for_multiple_transactions_in_the_same_block txn3", + ); + + let max_block_height = std::cmp::max( + std::cmp::max(txn_1_block_height, txn_2_block_height), + txn_3_block_height, + ); + let alice_total_holds: U512 = get_balance( + &test.fixture, + &ALICE_PUBLIC_KEY, + Some(max_block_height), + false, + ) + .proofs_result() + .expect("Expected Alice to proof results.") + .balance_holds() + .expect("Expected Alice to have holds.") + .values() + .map(|block_holds| block_holds.values().copied().sum()) + .sum(); + assert_eq!( + alice_total_holds, + expected_transfer_cost * 3, + "Total holds amount should be equal to the cost of the 3 transactions." + ); + + test.fixture + .run_until_block_height(max_block_height + 5, ONE_MIN) + .await; + let alice_total_holds: U512 = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, false) + .proofs_result() + .expect("Expected Alice to proof results.") + .balance_holds() + .expect("Expected Alice to have holds.") + .values() + .map(|block_holds| block_holds.values().copied().sum()) + .sum(); + assert_eq!( + alice_total_holds, + U512::from(0), + "Holds should have expired." + ); +} + +#[tokio::test] +async fn gh_5058_regression_custom_payment_with_deploy_variant_works() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // This WASM creates named key called "new_key". Then it would loop endlessly trying to write a + // value to storage. Eventually it will run out of gas and it should exit causing a revert. + let base_path = RESOURCES_PATH + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + + let payment_amount = U512::from(1_000_000u64); + + let txn = { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(100); + let gas_price = 1; + let chain_name = test.chainspec().network_config.name.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("gh_5058_regression.wasm")) + .unwrap() + .into(), + args: runtime_args! { + "amount" => payment_amount, + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! {}, + }; + + Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment, + session, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )) + }; + + let acct = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, true); + assert!(acct.total_balance().cloned().unwrap() >= payment_amount); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + + assert_eq!(exec_result.error_message(), None); +} + +#[tokio::test] +async fn should_penalize_failed_custom_payment() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // This WASM creates named key called "new_key". Then it would loop endlessly trying to write a + // value to storage. Eventually it will run out of gas and it should exit causing a revert. + let base_path = RESOURCES_PATH + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + + let payment_amount = U512::from(1_000_000u64); + + let txn = { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(100); + let gas_price = 1; + let chain_name = test.chainspec().network_config.name.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! { + "amount" => payment_amount, + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! { + "this_is_session" => true, + }, + }; + + Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment, + session, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )) + }; + + let acct = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, true); + assert!(acct.total_balance().cloned().unwrap() >= payment_amount); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + + assert_ne!(exec_result.error_message(), None); + + assert!(exec_result + .error_message() + .expect("should have err message") + .starts_with("Insufficient custom payment")) +} + +#[tokio::test] +async fn gh_5082_install_upgrade_should_allow_adding_new_version() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // This WASM creates named key called "new_key". Then it would loop endlessly trying to write a + // value to storage. Eventually it will run out of gas and it should exit causing a revert. + let base_path = RESOURCES_PATH + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + + let txn_1 = { + let chain_name = test.chainspec().network_config.name.clone(); + + let module_bytes = std::fs::read(base_path.join("do_nothing_stored.wasm")).unwrap(); + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + true, + module_bytes.into(), + TransactionRuntimeParams::VmCasperV1, + ) + .with_initiator_addr(ALICE_PUBLIC_KEY.clone()) + .with_pricing_mode(PricingMode::PaymentLimited { + payment_amount: 100_000_000_000u64, + gas_price_tolerance: 1, + standard_payment: true, + }) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn.sign(&ALICE_SECRET_KEY); + txn + }; + + let (_txn_hash, _block_height, exec_result_1) = test.send_transaction(txn_1).await; + + assert_eq!(exec_result_1.error_message(), None); // should succeed + + let txn_2 = { + let chain_name = test.chainspec().network_config.name.clone(); + + let module_bytes = std::fs::read(base_path.join("do_nothing_stored.wasm")).unwrap(); + let mut txn = Transaction::from( + TransactionV1Builder::new_session( + true, + module_bytes.into(), + TransactionRuntimeParams::VmCasperV1, + ) + .with_initiator_addr(BOB_PUBLIC_KEY.clone()) + .with_pricing_mode(PricingMode::PaymentLimited { + payment_amount: 100_000_000_000u64, + gas_price_tolerance: 1, + // This is the key part of the test: we are using `standard_payment == false` to use + // session code as payment code. This should fail to add new + // contract version. + standard_payment: false, + }) + .with_chain_name(chain_name) + .build() + .unwrap(), + ); + txn.sign(&BOB_SECRET_KEY); + txn + }; + + let (_txn_hash, _block_height, exec_result_2) = test.send_transaction(txn_2).await; + + assert_eq!( + exec_result_2.error_message(), + Some("ApiError::NotAllowedToAddContractVersion [48]".to_string()) + ); // should not succeed, adding new contract version during payment is not allowed. +} + +#[tokio::test] +async fn should_allow_custom_payment() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::NoRefund) + .with_fee_handling(FeeHandling::NoFee); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + // This WASM creates named key called "new_key". Then it would loop endlessly trying to write a + // value to storage. Eventually it will run out of gas and it should exit causing a revert. + let base_path = RESOURCES_PATH + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + + let payment_amount = U512::from(1_000_000u64); + + let txn = { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(100); + let gas_price = 1; + let chain_name = test.chainspec().network_config.name.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("non_standard_payment.wasm")) + .unwrap() + .into(), + args: runtime_args! { + "amount" => payment_amount, + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! { + "this_is_session" => true, + }, + }; + + Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment, + session, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )) + }; + + let acct = get_balance(&test.fixture, &ALICE_PUBLIC_KEY, None, true); + assert!(acct.total_balance().cloned().unwrap() >= payment_amount); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + + assert_eq!(exec_result.error_message(), None); + assert!( + exec_result.consumed() > U512::zero(), + "should have consumed gas" + ); +} + +#[tokio::test] +async fn should_allow_native_transfer_v1() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(99, 100), + }) + .with_fee_handling(FeeHandling::PayToProposer) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let transfer_amount = U512::from(100); + + let txn_v1 = + TransactionV1Builder::new_transfer(transfer_amount, None, CHARLIE_PUBLIC_KEY.clone(), None) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let payment = txn_v1 + .payment_amount() + .expect("must have payment amount as txns are using payment_limited"); + let mut txn = Transaction::from(txn_v1); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let expected_cost: U512 = U512::from(payment) * MIN_GAS_PRICE; + assert_eq!(result.error_message.as_deref(), None); + assert_eq!(result.cost, expected_cost); + assert_eq!(result.transfers.len(), 1, "should have exactly 1 transfer"); +} + +#[tokio::test] +async fn should_allow_native_burn() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(99, 100), + }) + .with_fee_handling(FeeHandling::PayToProposer) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + let burn_amount = U512::from(100); + + let txn_v1 = TransactionV1Builder::new_burn(burn_amount, None) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let payment = txn_v1 + .payment_amount() + .expect("must have payment amount as txns are using payment_limited"); + let mut txn = Transaction::from(txn_v1); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let expected_cost: U512 = U512::from(payment) * MIN_GAS_PRICE; + assert_eq!(result.error_message.as_deref(), None); + assert_eq!(result.cost, expected_cost); +} + +#[tokio::test] +async fn should_not_allow_unverified_native_burn() { + let config = SingleTransactionTestCase::default_test_config() + .with_pricing_handling(PricingHandling::PaymentLimited) + .with_refund_handling(RefundHandling::Refund { + refund_ratio: Ratio::new(99, 100), + }) + .with_fee_handling(FeeHandling::PayToProposer) + .with_gas_hold_balance_handling(HoldBalanceHandling::Accrued); + + let mut test = SingleTransactionTestCase::new( + ALICE_SECRET_KEY.clone(), + BOB_SECRET_KEY.clone(), + CHARLIE_SECRET_KEY.clone(), + Some(config), + ) + .await; + + test.fixture + .run_until_consensus_in_era(ERA_ONE, ONE_MIN) + .await; + + let burn_amount = U512::from(100); + + let alice_uref_addr = + get_main_purse(&mut test.fixture, &ALICE_PUBLIC_KEY).expect("should have main purse"); + let alice_purse = URef::new(alice_uref_addr, AccessRights::all()); + + let txn_v1 = TransactionV1Builder::new_burn(burn_amount, Some(alice_purse)) + .unwrap() + .with_chain_name(CHAIN_NAME) + .with_initiator_addr(PublicKey::from(&**BOB_SECRET_KEY)) + .build() + .unwrap(); + let price = txn_v1 + .payment_amount() + .expect("must have payment amount as txns are using payment_limited"); + let mut txn = Transaction::from(txn_v1); + txn.sign(&BOB_SECRET_KEY); + + let (_txn_hash, _block_height, exec_result) = test.send_transaction(txn).await; + let ExecutionResult::V2(result) = exec_result else { + panic!("Expected ExecutionResult::V2 but got {:?}", exec_result); + }; + let expected_cost: U512 = U512::from(price) * MIN_GAS_PRICE; + let expected_error = format!("Forged reference: {}", alice_purse); + assert_eq!(result.error_message, Some(expected_error)); + assert_eq!(result.cost, expected_cost); +} + +enum SizingScenario { + Gas, + SerializedLength, +} + +async fn run_sizing_scenario(sizing_scenario: SizingScenario) { + let mut rng = TestRng::new(); + let alice_stake = 200_000_000_000_u64; + let bob_stake = 300_000_000_000_u64; + let charlie_stake = 300_000_000_000_u64; + let initial_stakes: Vec = + vec![alice_stake.into(), bob_stake.into(), charlie_stake.into()]; + + let secret_keys: Vec> = (0..3) + .map(|_| Arc::new(SecretKey::random(&mut rng))) + .collect(); + + let stakes = secret_keys + .iter() + .zip(initial_stakes) + .map(|(secret_key, stake)| (PublicKey::from(secret_key.as_ref()), stake)) + .collect(); + + let mut fixture = TestFixture::new_with_keys(rng, secret_keys, stakes, None).await; + + fixture + .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN) + .await; + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let base_path = RESOURCES_PATH + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + + let (payment_1, session_1) = match sizing_scenario { + SizingScenario::Gas => { + // We create two equally sized deploys, and ensure that they are both + // executed in the non largest lane by gas limit. + let gas_limit_for_lane_4 = fixture + .chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_gas_limit(4u8); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(gas_limit_for_lane_4), + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! {}, + }; + + (payment, session) + } + SizingScenario::SerializedLength => { + let gas_limit_for_lane_3 = fixture + .chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_gas_limit(3u8); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(gas_limit_for_lane_3) + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! {}, + }; + + (payment, session) + } + }; + + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(100); + let gas_price = 1; + let chain_name = fixture.chainspec.network_config.name.clone(); + + let transaction_1 = Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment_1, + session_1, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )); + + let wasm_lanes = fixture + .chainspec + .transaction_config + .transaction_v1_config + .wasm_lanes(); + + let largest_lane = wasm_lanes + .iter() + .max_by(|left, right| { + left.max_transaction_length + .cmp(&right.max_transaction_length) + }) + .map(|definition| definition.id) + .expect("must have lane id for largest lane"); + + let (payment_2, session_2) = match sizing_scenario { + SizingScenario::Gas => { + // We create two equally sized deploys, and ensure that they are both + // executed in the non largest lane by gas limit. + let gas_limit_for_lane_3 = fixture + .chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_gas_limit(3u8); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(gas_limit_for_lane_3), + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! {}, + }; + + (payment, session) + } + SizingScenario::SerializedLength => { + let largest_lane_gas_limit = fixture + .chainspec + .transaction_config + .transaction_v1_config + .get_max_transaction_gas_limit(largest_lane); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(largest_lane_gas_limit) + }, + }; + + let faucet_fund_amount = U512::from(400_000_000_000_000u64); + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("faucet_stored.wasm")) + .unwrap() + .into(), + args: runtime_args! {"id" => 1u64, ARG_AMOUNT => faucet_fund_amount }, + }; + + (payment, session) + } + }; + + let transaction_2 = Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment_2, + session_2, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )); + + // Both deploys are of roughly equal length but should be sized differently based on + // their payment amount. + + let txn_1 = transaction_1.hash(); + let txn_2 = transaction_2.hash(); + + fixture.inject_transaction(transaction_1).await; + fixture.inject_transaction(transaction_2).await; + + match sizing_scenario { + SizingScenario::Gas => { + fixture + .assert_execution_in_lane(&txn_1, 4u8, TEN_SECS) + .await; + fixture + .assert_execution_in_lane(&txn_2, 3u8, TEN_SECS) + .await; + } + SizingScenario::SerializedLength => { + fixture + .assert_execution_in_lane(&txn_1, 3u8, TEN_SECS) + .await; + fixture + .assert_execution_in_lane(&txn_2, largest_lane, TEN_SECS) + .await; + } + } +} + +#[tokio::test] +async fn should_correctly_assign_wasm_deploys_in_lanes_for_payment_limited_by_gas_limit() { + run_sizing_scenario(SizingScenario::Gas).await +} + +#[tokio::test] +async fn should_correctly_assign_wasm_deploys_in_lanes_for_payment_limited_by_serialized_length() { + run_sizing_scenario(SizingScenario::SerializedLength).await +} + +#[tokio::test] +async fn should_assign_deploy_to_largest_lane_by_payment_amount_only_in_payment_limited() { + let mut rng = TestRng::new(); + let alice_stake = 200_000_000_000_u64; + let bob_stake = 300_000_000_000_u64; + let charlie_stake = 300_000_000_000_u64; + let initial_stakes: Vec = + vec![alice_stake.into(), bob_stake.into(), charlie_stake.into()]; + + let secret_keys: Vec> = (0..3) + .map(|_| Arc::new(SecretKey::random(&mut rng))) + .collect(); + + let stakes = secret_keys + .iter() + .zip(initial_stakes) + .map(|(secret_key, stake)| (PublicKey::from(secret_key.as_ref()), stake)) + .collect(); + + let mut fixture = TestFixture::new_with_keys(rng, secret_keys, stakes, None).await; + + fixture + .run_until_stored_switch_block_header(ERA_ONE, ONE_MIN) + .await; + + fixture.run_until_consensus_in_era(ERA_ONE, ONE_MIN).await; + + let base_path = RESOURCES_PATH + .parent() + .unwrap() + .join("target") + .join("wasm32-unknown-unknown") + .join("release"); + + let mut wasm_lanes = fixture + .chainspec + .transaction_config + .transaction_v1_config + .wasm_lanes() + .clone(); + + wasm_lanes.sort_by(|a, b| { + a.max_transaction_gas_limit + .cmp(&b.max_transaction_gas_limit) + }); + + let (smallest_lane_id, smallest_gas_limt, smallest_size_limit_for_deploy) = wasm_lanes + .first() + .map(|lane_def| { + ( + lane_def.id, + lane_def.max_transaction_gas_limit, + lane_def.max_transaction_length, + ) + }) + .expect("must have at least one lane"); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(smallest_gas_limt), + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! {}, + }; + + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(100); + let gas_price = 1; + let chain_name = fixture.chainspec.network_config.name.clone(); + + let transaction = Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment, + session, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )); + + let small_txn_hash = transaction.hash(); + let small_txn_size = transaction.serialized_length() as u64; + assert!(small_txn_size < smallest_size_limit_for_deploy); + + fixture.inject_transaction(transaction).await; + + fixture + .assert_execution_in_lane(&small_txn_hash, smallest_lane_id, TEN_SECS) + .await; + + let (largest_lane_id, largest_gas_limt) = wasm_lanes + .last() + .map(|lane_def| (lane_def.id, lane_def.max_transaction_gas_limit)) + .expect("must have at least one lane"); + + assert_ne!(largest_lane_id, smallest_lane_id); + assert!(largest_gas_limt > smallest_gas_limt); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(largest_gas_limt), + }, + }; + + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: std::fs::read(base_path.join("do_nothing.wasm")) + .unwrap() + .into(), + args: runtime_args! {}, + }; + + let chain_name = fixture.chainspec.network_config.name.clone(); + + let transaction = Transaction::Deploy(Deploy::new_signed( + timestamp, + ttl, + gas_price, + vec![], + chain_name.clone(), + payment, + session, + &ALICE_SECRET_KEY, + Some(ALICE_PUBLIC_KEY.clone()), + )); + + let largest_txn_hash = transaction.hash(); + + let largest_txn_size = transaction.serialized_length() as u64; + // This is misnomer, its the size of the deploy meant to be in the + // largest lane. + assert!(largest_txn_size < smallest_size_limit_for_deploy); + + fixture.inject_transaction(transaction).await; + + fixture + .assert_execution_in_lane(&largest_txn_hash, largest_lane_id, TEN_SECS) + .await; +} diff --git a/node/src/reactor/main_reactor/upgrade_shutdown.rs b/node/src/reactor/main_reactor/upgrade_shutdown.rs new file mode 100644 index 0000000000..60e082e1e3 --- /dev/null +++ b/node/src/reactor/main_reactor/upgrade_shutdown.rs @@ -0,0 +1,142 @@ +use std::{collections::HashMap, time::Duration}; + +use datasize::DataSize; +use tracing::debug; + +use casper_types::{BlockHash, FinalitySignatureId}; + +use crate::{ + effect::{announcements::ControlAnnouncement, EffectBuilder, EffectExt, Effects}, + reactor::main_reactor::{MainEvent, MainReactor}, + types::EraValidatorWeights, +}; + +use casper_types::EraId; + +const DELAY_BEFORE_SHUTDOWN: Duration = Duration::from_secs(2); + +#[derive(Debug, DataSize)] +pub(super) struct SignatureGossipTracker { + era_id: EraId, + finished_gossiping: HashMap>, +} + +impl SignatureGossipTracker { + pub(super) fn new() -> Self { + Self { + era_id: EraId::from(0), + finished_gossiping: Default::default(), + } + } + + pub(super) fn register_signature(&mut self, signature_id: Box) { + // ignore the signature if it's from an older era + if signature_id.era_id() < self.era_id { + return; + } + // if we registered a signature in a higher era, reset the cache + if signature_id.era_id() > self.era_id { + self.era_id = signature_id.era_id(); + self.finished_gossiping = Default::default(); + } + // record that the signature has finished gossiping + self.finished_gossiping + .entry(*signature_id.block_hash()) + .or_default() + .push(*signature_id); + } + + fn finished_gossiping_enough(&self, validator_weights: &EraValidatorWeights) -> bool { + if validator_weights.era_id() != self.era_id { + debug!( + relevant_era=%validator_weights.era_id(), + our_era_id=%self.era_id, + "SignatureGossipTracker has no record of the relevant era!" + ); + return false; + } + self.finished_gossiping + .iter() + .all(|(block_hash, signatures)| { + let gossiped_weight_sufficient = validator_weights + .signature_weight(signatures.iter().map(|sig_id| sig_id.public_key())) + .is_sufficient(true); + debug!( + %gossiped_weight_sufficient, + %block_hash, + "SignatureGossipTracker: gossiped finality signatures check" + ); + gossiped_weight_sufficient + }) + } +} + +pub(super) enum UpgradeShutdownInstruction { + Do(Duration, Effects), + CheckLater(String, Duration), + Fatal(String), +} + +impl MainReactor { + pub(super) fn upgrade_shutdown_instruction( + &self, + effect_builder: EffectBuilder, + ) -> UpgradeShutdownInstruction { + if self.switched_to_shutdown_for_upgrade.elapsed() > self.shutdown_for_upgrade_timeout { + return self.schedule_shutdown_for_upgrade(effect_builder); + } + let recent_switch_block_headers = match self.storage.read_highest_switch_block_headers(1) { + Ok(headers) => headers, + Err(error) => { + return UpgradeShutdownInstruction::Fatal(format!( + "error getting recent switch block headers: {}", + error + )) + } + }; + if let Some(block_header) = recent_switch_block_headers.last() { + let highest_switch_block_era = block_header.era_id(); + return match self + .validator_matrix + .validator_weights(highest_switch_block_era) + { + Some(validator_weights) => self + .upgrade_shutdown_has_sufficient_finality(effect_builder, &validator_weights), + None => UpgradeShutdownInstruction::Fatal( + "validator_weights cannot be missing".to_string(), + ), + }; + } + UpgradeShutdownInstruction::Fatal("recent_switch_block_headers cannot be empty".to_string()) + } + + fn upgrade_shutdown_has_sufficient_finality( + &self, + effect_builder: EffectBuilder, + validator_weights: &EraValidatorWeights, + ) -> UpgradeShutdownInstruction { + let finished_gossiping_enough = self + .signature_gossip_tracker + .finished_gossiping_enough(validator_weights); + if finished_gossiping_enough { + self.schedule_shutdown_for_upgrade(effect_builder) + } else { + UpgradeShutdownInstruction::CheckLater( + "waiting for completion of gossiping signatures".to_string(), + DELAY_BEFORE_SHUTDOWN, + ) + } + } + + fn schedule_shutdown_for_upgrade( + &self, + effect_builder: EffectBuilder, + ) -> UpgradeShutdownInstruction { + // Allow a delay to acquire more finality signatures + let effects = effect_builder + .set_timeout(DELAY_BEFORE_SHUTDOWN) + .event(|_| MainEvent::ControlAnnouncement(ControlAnnouncement::ShutdownForUpgrade)); + // should not need to crank the control logic again as the reactor will shutdown + UpgradeShutdownInstruction::Do(DELAY_BEFORE_SHUTDOWN, effects) + } +} diff --git a/node/src/reactor/main_reactor/upgrading_instruction.rs b/node/src/reactor/main_reactor/upgrading_instruction.rs new file mode 100644 index 0000000000..63e44af633 --- /dev/null +++ b/node/src/reactor/main_reactor/upgrading_instruction.rs @@ -0,0 +1,27 @@ +use std::time::Duration; + +use casper_types::{TimeDiff, Timestamp}; + +pub(super) enum UpgradingInstruction { + CheckLater(String, Duration), + CatchUp, +} + +impl UpgradingInstruction { + pub(super) fn should_commit_upgrade( + should_commit_upgrade: bool, + wait: Duration, + last_progress: Timestamp, + upgrade_timeout: TimeDiff, + ) -> UpgradingInstruction { + if should_commit_upgrade { + if last_progress.elapsed() > upgrade_timeout { + UpgradingInstruction::CatchUp + } else { + UpgradingInstruction::CheckLater("awaiting upgrade".to_string(), wait) + } + } else { + UpgradingInstruction::CatchUp + } + } +} diff --git a/node/src/reactor/main_reactor/utils.rs b/node/src/reactor/main_reactor/utils.rs new file mode 100644 index 0000000000..7c375a24ab --- /dev/null +++ b/node/src/reactor/main_reactor/utils.rs @@ -0,0 +1,26 @@ +use futures::FutureExt; +use smallvec::smallvec; +use tracing::info; + +use crate::{ + components::InitializedComponent, + effect::{EffectBuilder, EffectExt, Effects}, + fatal, + reactor::main_reactor::MainEvent, +}; + +pub(super) fn initialize_component( + effect_builder: EffectBuilder, + component: &mut impl InitializedComponent, + initiating_event: MainEvent, +) -> Option> { + if component.is_uninitialized() { + component.start_initialization(); + info!("pending initialization of {}", component.name()); + return Some(smallvec![async { smallvec![initiating_event] }.boxed()]); + } + if component.is_fatal() { + return Some(fatal!(effect_builder, "{} failed to initialize", component.name()).ignore()); + } + None +} diff --git a/node/src/reactor/main_reactor/validate.rs b/node/src/reactor/main_reactor/validate.rs new file mode 100644 index 0000000000..962b4fb4c6 --- /dev/null +++ b/node/src/reactor/main_reactor/validate.rs @@ -0,0 +1,208 @@ +use casper_types::Timestamp; +use std::time::Duration; +use tracing::{debug, info, warn}; + +use crate::{ + components::{ + block_accumulator::{SyncIdentifier, SyncInstruction}, + consensus::ChainspecConsensusExt, + }, + effect::{EffectBuilder, Effects}, + reactor::{ + self, + main_reactor::{MainEvent, MainReactor}, + }, + storage::HighestOrphanedBlockResult, + types::MaxTtl, + NodeRng, +}; + +/// Cranking delay when encountered a non-switch block when checking the validator status. +const VALIDATION_STATUS_DELAY_FOR_NON_SWITCH_BLOCK: Duration = Duration::from_secs(2); + +pub(super) enum ValidateInstruction { + Do(Duration, Effects), + CheckLater(String, Duration), + CatchUp, + KeepUp, + ShutdownForUpgrade, + Fatal(String), +} + +impl MainReactor { + pub(super) fn validate_instruction( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> ValidateInstruction { + let last_progress = self.consensus.last_progress(); + if last_progress > self.last_progress { + self.last_progress = last_progress; + } + + let queue_depth = self.contract_runtime.queue_depth(); + if queue_depth > 0 { + let idleness = Timestamp::now().saturating_diff(last_progress); + if idleness > self.idle_tolerance { + warn!("Validate: idleness tolerance reached with backed up queue, switching to catch up"); + return ValidateInstruction::CatchUp; + } + + warn!("Validate: should_validate queue_depth {}", queue_depth); + return ValidateInstruction::CheckLater( + "allow time for contract runtime execution to occur".to_string(), + self.control_logic_default_delay.into(), + ); + } + + match self.storage.get_highest_complete_block() { + Ok(Some(highest_complete_block)) => { + // If we're lagging behind the rest of the network, fall back out of Validate mode. + let sync_identifier = SyncIdentifier::LocalTip( + *highest_complete_block.hash(), + highest_complete_block.height(), + highest_complete_block.era_id(), + ); + + if let SyncInstruction::Leap { .. } = + self.block_accumulator.sync_instruction(sync_identifier) + { + return ValidateInstruction::CatchUp; + } + + if !highest_complete_block.is_switch_block() { + return ValidateInstruction::CheckLater( + "tip is not a switch block, don't change from validate state".to_string(), + VALIDATION_STATUS_DELAY_FOR_NON_SWITCH_BLOCK, + ); + } + } + Ok(None) => { + return ValidateInstruction::CheckLater( + "no complete block found in storage".to_string(), + self.control_logic_default_delay.into(), + ); + } + Err(error) => { + return ValidateInstruction::Fatal(format!( + "Could not read highest complete block from storage due to storage error: {}", + error + )); + } + } + + if self.should_shutdown_for_upgrade() { + return ValidateInstruction::ShutdownForUpgrade; + } + + match self.create_required_eras(effect_builder, rng) { + Ok(Some(effects)) => { + if effects.is_empty() { + ValidateInstruction::CheckLater( + "consensus state is up to date".to_string(), + self.control_logic_default_delay.into(), + ) + } else { + ValidateInstruction::Do(Duration::ZERO, effects) + } + } + Ok(None) => ValidateInstruction::KeepUp, + Err(msg) => ValidateInstruction::Fatal(msg), + } + } + + pub(super) fn create_required_eras( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + ) -> Result>, String> { + let recent_switch_block_headers = self + .storage + .read_highest_switch_block_headers(self.chainspec.number_of_past_switch_blocks_needed()) + .map_err(|err| err.to_string())?; + + let highest_switch_block_header = match recent_switch_block_headers.last() { + None => { + debug!( + "{}: create_required_eras: recent_switch_block_headers is empty", + self.state + ); + return Ok(None); + } + Some(header) => header, + }; + debug!( + era = highest_switch_block_header.era_id().value(), + block_hash = %highest_switch_block_header.block_hash(), + height = highest_switch_block_header.height(), + "{}: highest_switch_block_header", self.state + ); + + let highest_era_weights = match highest_switch_block_header.next_era_validator_weights() { + None => { + return Err(format!( + "{}: highest switch block has no era end: {}", + self.state, highest_switch_block_header, + )); + } + Some(weights) => weights, + }; + if !highest_era_weights.contains_key(self.consensus.public_key()) { + debug!( + era = highest_switch_block_header.era_id().successor().value(), + "{}: this is not a validating node in this era", self.state + ); + return Ok(None); + } + + if let HighestOrphanedBlockResult::Orphan(highest_orphaned_block_header) = + self.storage.get_highest_orphaned_block_header() + { + let max_ttl: MaxTtl = self.chainspec.transaction_config.max_ttl.into(); + if max_ttl.synced_to_ttl( + highest_switch_block_header.timestamp(), + &highest_orphaned_block_header, + ) { + debug!(%self.state,"{}: sufficient TTL awareness to safely participate in consensus", self.state); + } else { + info!( + "{}: insufficient TTL awareness to safely participate in consensus", + self.state + ); + return Ok(None); + } + } else { + return Err("get_highest_orphaned_block_header failed to produce record".to_string()); + } + + let era_id = highest_switch_block_header.era_id(); + if self.upgrade_watcher.should_upgrade_after(era_id) { + info!( + "{}: upgrade required after era {}", + self.state, + era_id.value() + ); + return Ok(None); + } + + let create_required_eras = + self.consensus + .create_required_eras(effect_builder, rng, &recent_switch_block_headers); + match &create_required_eras { + Some(effects) => { + if effects.is_empty() { + info!(state = %self.state,"create_required_eras is empty"); + } else { + info!(state = %self.state,"will attempt to create required eras for consensus"); + } + } + None => { + info!(state = %self.state,"create_required_eras is none"); + } + } + Ok( + create_required_eras + .map(|effects| reactor::wrap_effects(MainEvent::Consensus, effects)), + ) + } +} diff --git a/node/src/reactor/queue_kind.rs b/node/src/reactor/queue_kind.rs index bd8755bddf..628ccc0ee6 100644 --- a/node/src/reactor/queue_kind.rs +++ b/node/src/reactor/queue_kind.rs @@ -12,7 +12,9 @@ use serde::Serialize; /// Scheduling priority. /// /// Priorities are ordered from lowest to highest. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, IntoEnumIterator, PartialOrd, Ord, Serialize)] +#[derive( + Copy, Clone, Debug, Eq, PartialEq, Hash, IntoEnumIterator, PartialOrd, Ord, Serialize, Default, +)] pub enum QueueKind { /// Control messages for the runtime itself. Control, @@ -20,12 +22,37 @@ pub enum QueueKind { /// /// Their load may vary and grouping them together in one queue aides DoS protection. NetworkIncoming, + /// Network events that are low priority. + NetworkLowPriority, + /// Network events demand a resource directly. + NetworkDemand, /// Network events that were initiated by the local node, such as outgoing messages. Network, + /// NetworkInfo events. + NetworkInfo, + /// Fetch events. + Fetch, + /// SyncGlobalState events. + SyncGlobalState, + /// FinalitySignature events. + FinalitySignature, /// Events of unspecified priority. /// /// This is the default queue. + #[default] Regular, + /// Gossiper events. + Gossip, + /// Get from storage events. + FromStorage, + /// Put to storage events. + ToStorage, + /// Contract runtime events. + ContractRuntime, + /// Consensus events. + Consensus, + /// Validation events. + Validation, /// Reporting events on the local node. /// /// Metric events take precedence over most other events since missing a request for metrics @@ -38,20 +65,26 @@ impl Display for QueueKind { let str_value = match self { QueueKind::Control => "Control", QueueKind::NetworkIncoming => "NetworkIncoming", + QueueKind::NetworkLowPriority => "NetworkLowPriority", + QueueKind::NetworkDemand => "NetworkDemand", QueueKind::Network => "Network", + QueueKind::NetworkInfo => "NetworkInfo", + QueueKind::Fetch => "Fetch", QueueKind::Regular => "Regular", + QueueKind::Gossip => "Gossip", + QueueKind::FromStorage => "FromStorage", + QueueKind::ToStorage => "ToStorage", + QueueKind::ContractRuntime => "ContractRuntime", + QueueKind::SyncGlobalState => "SyncGlobalState", + QueueKind::FinalitySignature => "FinalitySignature", + QueueKind::Consensus => "Consensus", + QueueKind::Validation => "Validation", QueueKind::Api => "Api", }; write!(f, "{}", str_value) } } -impl Default for QueueKind { - fn default() -> Self { - QueueKind::Regular - } -} - impl QueueKind { /// Returns the weight of a specific queue. /// @@ -59,12 +92,24 @@ impl QueueKind { /// each event processing round. fn weight(self) -> NonZeroUsize { NonZeroUsize::new(match self { - // Note: Control events should be very rare, but we do want to process them right away. - QueueKind::Control => 32, - QueueKind::NetworkIncoming => 4, + QueueKind::NetworkLowPriority => 1, + QueueKind::NetworkInfo => 2, + QueueKind::NetworkDemand => 2, + QueueKind::NetworkIncoming => 8, QueueKind::Network => 4, - QueueKind::Regular => 8, - QueueKind::Api => 16, + QueueKind::Regular => 4, + QueueKind::Fetch => 4, + QueueKind::Gossip => 4, + QueueKind::FromStorage => 4, + QueueKind::ToStorage => 4, + QueueKind::ContractRuntime => 4, + QueueKind::SyncGlobalState => 4, + QueueKind::Consensus => 4, + QueueKind::FinalitySignature => 4, + QueueKind::Validation => 8, + QueueKind::Api => 8, + // Note: Control events should be very rare, but we do want to process them right away. + QueueKind::Control => 16, }) .expect("weight must be positive") } @@ -80,9 +125,21 @@ impl QueueKind { match self { QueueKind::Control => "control", QueueKind::NetworkIncoming => "network_incoming", + QueueKind::NetworkDemand => "network_demands", + QueueKind::NetworkLowPriority => "network_low_priority", QueueKind::Network => "network", - QueueKind::Regular => "regular", + QueueKind::NetworkInfo => "network_info", + QueueKind::SyncGlobalState => "sync_global_state", + QueueKind::Fetch => "fetch", + QueueKind::Gossip => "gossip", + QueueKind::FromStorage => "from_storage", + QueueKind::ToStorage => "to_storage", + QueueKind::ContractRuntime => "contract_runtime", + QueueKind::Consensus => "consensus", + QueueKind::Validation => "validation", + QueueKind::FinalitySignature => "finality_signature", QueueKind::Api => "api", + QueueKind::Regular => "regular", } } } diff --git a/node/src/reactor/validator.rs b/node/src/reactor/validator.rs deleted file mode 100644 index 9ece1ddbdc..0000000000 --- a/node/src/reactor/validator.rs +++ /dev/null @@ -1,1083 +0,0 @@ -//! Reactor for validator nodes. -//! -//! Validator nodes join the validator-only network upon startup. - -mod config; -mod error; -mod memory_metrics; -#[cfg(test)] -mod tests; - -use std::{ - env, - fmt::{self, Debug, Display, Formatter}, - path::PathBuf, - sync::Arc, -}; - -use datasize::DataSize; -use derive_more::From; -use prometheus::Registry; -use reactor::ReactorEvent; -use serde::Serialize; -use tracing::{debug, error, trace, warn}; - -#[cfg(test)] -use crate::testing::network::NetworkedReactor; - -use crate::{ - components::{ - block_proposer::{self, BlockProposer}, - block_validator::{self, BlockValidator}, - chainspec_loader::{self, ChainspecLoader}, - consensus::{self, EraSupervisor, HighwayProtocol}, - contract_runtime::{self, ContractRuntime}, - deploy_acceptor::{self, DeployAcceptor}, - event_stream_server::{self, EventStreamServer}, - fetcher::{self, Fetcher}, - gossiper::{self, Gossiper}, - linear_chain, - metrics::Metrics, - network::{self, Network, NetworkIdentity, ENABLE_LIBP2P_NET_ENV_VAR}, - rest_server::{self, RestServer}, - rpc_server::{self, RpcServer}, - small_network::{self, GossipedAddress, SmallNetwork, SmallNetworkIdentity}, - storage::{self, Storage}, - Component, - }, - effect::{ - announcements::{ - BlocklistAnnouncement, ChainspecLoaderAnnouncement, ConsensusAnnouncement, - ContractRuntimeAnnouncement, ControlAnnouncement, DeployAcceptorAnnouncement, - GossiperAnnouncement, LinearChainAnnouncement, LinearChainBlock, NetworkAnnouncement, - RpcServerAnnouncement, - }, - requests::{ - BlockProposerRequest, BlockValidationRequest, ChainspecLoaderRequest, ConsensusRequest, - ContractRuntimeRequest, FetcherRequest, LinearChainRequest, MetricsRequest, - NetworkInfoRequest, NetworkRequest, RestRequest, RpcRequest, StateStoreRequest, - StorageRequest, - }, - EffectBuilder, EffectExt, Effects, - }, - protocol::Message, - reactor::{self, event_queue_metrics::EventQueueMetrics, EventQueueHandle, ReactorExit}, - types::{Block, BlockHash, Deploy, ExitCode, NodeId, ProtoBlock, Tag}, - utils::{Source, WithDir}, - NodeRng, -}; -pub use config::Config; -pub use error::Error; -use linear_chain::LinearChainComponent; -use memory_metrics::MemoryMetrics; - -/// Top-level event for the reactor. -#[derive(Debug, From, Serialize)] -#[must_use] -pub enum Event { - /// Network event. - #[from] - Network(network::Event), - /// Small network event. - #[from] - SmallNetwork(small_network::Event), - /// Block proposer event. - #[from] - BlockProposer(#[serde(skip_serializing)] block_proposer::Event), - #[from] - /// Storage event. - Storage(#[serde(skip_serializing)] storage::Event), - #[from] - /// RPC server event. - RpcServer(#[serde(skip_serializing)] rpc_server::Event), - #[from] - /// REST server event. - RestServer(#[serde(skip_serializing)] rest_server::Event), - #[from] - /// Event stream server event. - EventStreamServer(#[serde(skip_serializing)] event_stream_server::Event), - #[from] - /// Chainspec Loader event. - ChainspecLoader(#[serde(skip_serializing)] chainspec_loader::Event), - #[from] - /// Consensus event. - Consensus(#[serde(skip_serializing)] consensus::Event), - /// Deploy acceptor event. - #[from] - DeployAcceptor(#[serde(skip_serializing)] deploy_acceptor::Event), - /// Deploy fetcher event. - #[from] - DeployFetcher(#[serde(skip_serializing)] fetcher::Event), - /// Deploy gossiper event. - #[from] - DeployGossiper(#[serde(skip_serializing)] gossiper::Event), - /// Address gossiper event. - #[from] - AddressGossiper(gossiper::Event), - /// Contract runtime event. - #[from] - ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event), - /// Block validator event. - #[from] - ProtoBlockValidator(#[serde(skip_serializing)] block_validator::Event), - /// Linear chain event. - #[from] - LinearChain(#[serde(skip_serializing)] linear_chain::Event), - - // Requests - /// Network request. - #[from] - NetworkRequest(#[serde(skip_serializing)] NetworkRequest), - /// Network info request. - #[from] - NetworkInfoRequest(#[serde(skip_serializing)] NetworkInfoRequest), - /// Deploy fetcher request. - #[from] - DeployFetcherRequest(#[serde(skip_serializing)] FetcherRequest), - /// Block proposer request. - #[from] - BlockProposerRequest(#[serde(skip_serializing)] BlockProposerRequest), - /// Block validator request. - #[from] - ProtoBlockValidatorRequest( - #[serde(skip_serializing)] BlockValidationRequest, - ), - /// Metrics request. - #[from] - MetricsRequest(#[serde(skip_serializing)] MetricsRequest), - /// Chainspec info request - #[from] - ChainspecLoaderRequest(#[serde(skip_serializing)] ChainspecLoaderRequest), - /// Storage request. - #[from] - StorageRequest(#[serde(skip_serializing)] StorageRequest), - /// Request for state storage. - #[from] - StateStoreRequest(StateStoreRequest), - - // Announcements - /// Control announcement. - #[from] - ControlAnnouncement(ControlAnnouncement), - /// Network announcement. - #[from] - NetworkAnnouncement(#[serde(skip_serializing)] NetworkAnnouncement), - /// API server announcement. - #[from] - RpcServerAnnouncement(#[serde(skip_serializing)] RpcServerAnnouncement), - /// DeployAcceptor announcement. - #[from] - DeployAcceptorAnnouncement(#[serde(skip_serializing)] DeployAcceptorAnnouncement), - /// Consensus announcement. - #[from] - ConsensusAnnouncement(#[serde(skip_serializing)] ConsensusAnnouncement), - /// ContractRuntime announcement. - #[from] - ContractRuntimeAnnouncement(#[serde(skip_serializing)] ContractRuntimeAnnouncement), - /// Deploy Gossiper announcement. - #[from] - DeployGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), - /// Address Gossiper announcement. - #[from] - AddressGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement), - /// Linear chain announcement. - #[from] - LinearChainAnnouncement(#[serde(skip_serializing)] LinearChainAnnouncement), - /// Chainspec loader announcement. - #[from] - ChainspecLoaderAnnouncement(#[serde(skip_serializing)] ChainspecLoaderAnnouncement), - /// Blocklist announcement. - #[from] - BlocklistAnnouncement(BlocklistAnnouncement), -} - -impl ReactorEvent for Event { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } - } -} - -impl From> for Event { - fn from(request: RpcRequest) -> Self { - Event::RpcServer(rpc_server::Event::RpcRequest(request)) - } -} - -impl From> for Event { - fn from(request: RestRequest) -> Self { - Event::RestServer(rest_server::Event::RestRequest(request)) - } -} - -impl From> for Event { - fn from(request: NetworkRequest) -> Self { - Event::NetworkRequest(request.map_payload(Message::from)) - } -} - -impl From>> for Event { - fn from(request: NetworkRequest>) -> Self { - Event::NetworkRequest(request.map_payload(Message::from)) - } -} - -impl From>> for Event { - fn from(request: NetworkRequest>) -> Self { - Event::NetworkRequest(request.map_payload(Message::from)) - } -} - -impl From for Event { - fn from(request: ContractRuntimeRequest) -> Event { - Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request))) - } -} - -impl From for Event { - fn from(request: ConsensusRequest) -> Self { - Event::Consensus(consensus::Event::ConsensusRequest(request)) - } -} - -impl From> for Event { - fn from(request: LinearChainRequest) -> Self { - Event::LinearChain(linear_chain::Event::Request(request)) - } -} - -impl Display for Event { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Event::Network(event) => write!(f, "network: {}", event), - Event::SmallNetwork(event) => write!(f, "small network: {}", event), - Event::BlockProposer(event) => write!(f, "block proposer: {}", event), - Event::Storage(event) => write!(f, "storage: {}", event), - Event::RpcServer(event) => write!(f, "rpc server: {}", event), - Event::RestServer(event) => write!(f, "rest server: {}", event), - Event::EventStreamServer(event) => write!(f, "event stream server: {}", event), - Event::ChainspecLoader(event) => write!(f, "chainspec loader: {}", event), - Event::Consensus(event) => write!(f, "consensus: {}", event), - Event::DeployAcceptor(event) => write!(f, "deploy acceptor: {}", event), - Event::DeployFetcher(event) => write!(f, "deploy fetcher: {}", event), - Event::DeployGossiper(event) => write!(f, "deploy gossiper: {}", event), - Event::AddressGossiper(event) => write!(f, "address gossiper: {}", event), - Event::ContractRuntime(event) => write!(f, "contract runtime: {:?}", event), - Event::LinearChain(event) => write!(f, "linear-chain event {}", event), - Event::ProtoBlockValidator(event) => write!(f, "block validator: {}", event), - Event::NetworkRequest(req) => write!(f, "network request: {}", req), - Event::NetworkInfoRequest(req) => write!(f, "network info request: {}", req), - Event::ChainspecLoaderRequest(req) => write!(f, "chainspec loader request: {}", req), - Event::StorageRequest(req) => write!(f, "storage request: {}", req), - Event::StateStoreRequest(req) => write!(f, "state store request: {}", req), - Event::DeployFetcherRequest(req) => write!(f, "deploy fetcher request: {}", req), - Event::BlockProposerRequest(req) => write!(f, "block proposer request: {}", req), - Event::ProtoBlockValidatorRequest(req) => write!(f, "block validator request: {}", req), - Event::MetricsRequest(req) => write!(f, "metrics request: {}", req), - Event::ControlAnnouncement(ctrl_ann) => write!(f, "control: {}", ctrl_ann), - Event::NetworkAnnouncement(ann) => write!(f, "network announcement: {}", ann), - Event::RpcServerAnnouncement(ann) => write!(f, "api server announcement: {}", ann), - Event::DeployAcceptorAnnouncement(ann) => { - write!(f, "deploy acceptor announcement: {}", ann) - } - Event::ConsensusAnnouncement(ann) => write!(f, "consensus announcement: {}", ann), - Event::ContractRuntimeAnnouncement(ann) => { - write!(f, "block-executor announcement: {}", ann) - } - Event::DeployGossiperAnnouncement(ann) => { - write!(f, "deploy gossiper announcement: {}", ann) - } - Event::AddressGossiperAnnouncement(ann) => { - write!(f, "address gossiper announcement: {}", ann) - } - Event::LinearChainAnnouncement(ann) => write!(f, "linear chain announcement: {}", ann), - Event::ChainspecLoaderAnnouncement(ann) => { - write!(f, "chainspec loader announcement: {}", ann) - } - Event::BlocklistAnnouncement(ann) => { - write!(f, "blocklist announcement: {}", ann) - } - } - } -} - -/// The configuration needed to initialize a Validator reactor -pub struct ValidatorInitConfig { - pub(super) root: PathBuf, - pub(super) config: Config, - pub(super) chainspec_loader: ChainspecLoader, - pub(super) storage: Storage, - pub(super) contract_runtime: ContractRuntime, - pub(super) latest_block: Option, - pub(super) event_stream_server: EventStreamServer, - pub(super) small_network_identity: SmallNetworkIdentity, - pub(super) network_identity: NetworkIdentity, -} - -#[cfg(test)] -impl ValidatorInitConfig { - /// Inspect storage. - pub(crate) fn storage(&self) -> &Storage { - &self.storage - } -} - -impl Debug for ValidatorInitConfig { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "ValidatorInitConfig {{ .. }}") - } -} - -/// Validator node reactor. -#[derive(DataSize, Debug)] -pub struct Reactor { - metrics: Metrics, - small_network: SmallNetwork, - network: Network, - address_gossiper: Gossiper, - storage: Storage, - contract_runtime: ContractRuntime, - rpc_server: RpcServer, - rest_server: RestServer, - event_stream_server: EventStreamServer, - chainspec_loader: ChainspecLoader, - consensus: EraSupervisor, - #[data_size(skip)] - deploy_acceptor: DeployAcceptor, - deploy_fetcher: Fetcher, - deploy_gossiper: Gossiper, - block_proposer: BlockProposer, - proto_block_validator: BlockValidator, - linear_chain: LinearChainComponent, - - // Non-components. - #[data_size(skip)] // Never allocates heap data. - memory_metrics: MemoryMetrics, - - #[data_size(skip)] - event_queue_metrics: EventQueueMetrics, -} - -#[cfg(test)] -impl Reactor { - /// Inspect consensus. - pub(crate) fn consensus(&self) -> &EraSupervisor { - &self.consensus - } - /// Inspect storage. - pub(crate) fn storage(&self) -> &Storage { - &self.storage - } -} - -impl reactor::Reactor for Reactor { - type Event = Event; - - // The "configuration" is in fact the whole state of the joiner reactor, which we - // deconstruct and reuse. - type Config = ValidatorInitConfig; - type Error = Error; - - fn new( - config: Self::Config, - registry: &Registry, - event_queue: EventQueueHandle, - _rng: &mut NodeRng, - ) -> Result<(Self, Effects), Error> { - let ValidatorInitConfig { - root, - config, - chainspec_loader, - storage, - mut contract_runtime, - latest_block, - event_stream_server, - small_network_identity, - network_identity, - } = config; - - let memory_metrics = MemoryMetrics::new(registry.clone())?; - - let event_queue_metrics = EventQueueMetrics::new(registry.clone(), event_queue)?; - - let metrics = Metrics::new(registry.clone()); - - let effect_builder = EffectBuilder::new(event_queue); - let network_config = network::Config::from(&config.network); - let (network, network_effects) = Network::new( - event_queue, - network_config, - registry, - network_identity, - chainspec_loader.chainspec(), - true, - )?; - let (small_network, small_network_effects) = SmallNetwork::new( - event_queue, - config.network, - registry, - small_network_identity, - chainspec_loader.chainspec().as_ref(), - true, - )?; - - let address_gossiper = - Gossiper::new_for_complete_items("address_gossiper", config.gossip, registry)?; - - let protocol_version = &chainspec_loader.chainspec().protocol_config.version; - let rpc_server = - RpcServer::new(config.rpc_server.clone(), effect_builder, *protocol_version)?; - let rest_server = RestServer::new( - config.rest_server.clone(), - effect_builder, - *protocol_version, - )?; - - let deploy_acceptor = - DeployAcceptor::new(config.deploy_acceptor, &*chainspec_loader.chainspec()); - let deploy_fetcher = Fetcher::new("deploy", config.fetcher, ®istry)?; - let deploy_gossiper = Gossiper::new_for_partial_items( - "deploy_gossiper", - config.gossip, - gossiper::get_deploy_from_storage::, - registry, - )?; - let (block_proposer, block_proposer_effects) = BlockProposer::new( - registry.clone(), - effect_builder, - latest_block - .as_ref() - .map(|block| block.height() + 1) - .unwrap_or(0), - chainspec_loader.chainspec().as_ref(), - config.block_proposer, - )?; - - let initial_era = latest_block.as_ref().map_or_else( - || chainspec_loader.initial_era(), - |block| block.header().next_block_era_id(), - ); - let mut effects = reactor::wrap_effects(Event::BlockProposer, block_proposer_effects); - - let maybe_next_activation_point = chainspec_loader - .next_upgrade() - .map(|next_upgrade| next_upgrade.activation_point()); - let (consensus, init_consensus_effects) = EraSupervisor::new( - initial_era, - WithDir::new(root, config.consensus), - effect_builder, - chainspec_loader.chainspec().as_ref().into(), - latest_block.as_ref().map(Block::header), - maybe_next_activation_point, - registry, - Box::new(HighwayProtocol::new_boxed), - )?; - effects.extend(reactor::wrap_effects( - Event::Consensus, - init_consensus_effects, - )); - contract_runtime.set_initial_state( - chainspec_loader.initial_state_root_hash(), - chainspec_loader.initial_block_header(), - ); - contract_runtime.set_parent_map_from_block(latest_block); - - let proto_block_validator = BlockValidator::new(Arc::clone(&chainspec_loader.chainspec())); - let linear_chain = linear_chain::LinearChainComponent::new( - ®istry, - *protocol_version, - chainspec_loader.chainspec().core_config.auction_delay, - chainspec_loader.chainspec().core_config.unbonding_delay, - )?; - - effects.extend(reactor::wrap_effects(Event::Network, network_effects)); - effects.extend(reactor::wrap_effects( - Event::SmallNetwork, - small_network_effects, - )); - effects.extend(reactor::wrap_effects( - Event::ChainspecLoader, - chainspec_loader.start_checking_for_upgrades(effect_builder), - )); - - Ok(( - Reactor { - metrics, - network, - small_network, - address_gossiper, - storage, - contract_runtime, - rpc_server, - rest_server, - event_stream_server, - chainspec_loader, - consensus, - deploy_acceptor, - deploy_fetcher, - deploy_gossiper, - block_proposer, - proto_block_validator, - linear_chain, - memory_metrics, - event_queue_metrics, - }, - effects, - )) - } - - fn dispatch_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: Event, - ) -> Effects { - match event { - Event::Network(event) => reactor::wrap_effects( - Event::Network, - self.network.handle_event(effect_builder, rng, event), - ), - Event::SmallNetwork(event) => reactor::wrap_effects( - Event::SmallNetwork, - self.small_network.handle_event(effect_builder, rng, event), - ), - Event::BlockProposer(event) => reactor::wrap_effects( - Event::BlockProposer, - self.block_proposer.handle_event(effect_builder, rng, event), - ), - Event::Storage(event) => reactor::wrap_effects( - Event::Storage, - self.storage.handle_event(effect_builder, rng, event), - ), - Event::RpcServer(event) => reactor::wrap_effects( - Event::RpcServer, - self.rpc_server.handle_event(effect_builder, rng, event), - ), - Event::RestServer(event) => reactor::wrap_effects( - Event::RestServer, - self.rest_server.handle_event(effect_builder, rng, event), - ), - Event::EventStreamServer(event) => reactor::wrap_effects( - Event::EventStreamServer, - self.event_stream_server - .handle_event(effect_builder, rng, event), - ), - Event::ChainspecLoader(event) => reactor::wrap_effects( - Event::ChainspecLoader, - self.chainspec_loader - .handle_event(effect_builder, rng, event), - ), - Event::Consensus(event) => reactor::wrap_effects( - Event::Consensus, - self.consensus.handle_event(effect_builder, rng, event), - ), - Event::DeployAcceptor(event) => reactor::wrap_effects( - Event::DeployAcceptor, - self.deploy_acceptor - .handle_event(effect_builder, rng, event), - ), - Event::DeployFetcher(event) => reactor::wrap_effects( - Event::DeployFetcher, - self.deploy_fetcher.handle_event(effect_builder, rng, event), - ), - Event::DeployGossiper(event) => reactor::wrap_effects( - Event::DeployGossiper, - self.deploy_gossiper - .handle_event(effect_builder, rng, event), - ), - Event::AddressGossiper(event) => reactor::wrap_effects( - Event::AddressGossiper, - self.address_gossiper - .handle_event(effect_builder, rng, event), - ), - Event::ContractRuntime(event) => reactor::wrap_effects( - Event::ContractRuntime, - self.contract_runtime - .handle_event(effect_builder, rng, event), - ), - Event::ProtoBlockValidator(event) => reactor::wrap_effects( - Event::ProtoBlockValidator, - self.proto_block_validator - .handle_event(effect_builder, rng, event), - ), - Event::LinearChain(event) => reactor::wrap_effects( - Event::LinearChain, - self.linear_chain.handle_event(effect_builder, rng, event), - ), - - // Requests: - Event::NetworkRequest(req) => { - let event = if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - Event::Network(network::Event::from(req)) - } else { - Event::SmallNetwork(small_network::Event::from(req)) - }; - self.dispatch_event(effect_builder, rng, event) - } - Event::NetworkInfoRequest(req) => { - let event = if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - Event::Network(network::Event::from(req)) - } else { - Event::SmallNetwork(small_network::Event::from(req)) - }; - self.dispatch_event(effect_builder, rng, event) - } - Event::DeployFetcherRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::DeployFetcher(req.into())) - } - Event::BlockProposerRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::BlockProposer(req.into())) - } - Event::ProtoBlockValidatorRequest(req) => self.dispatch_event( - effect_builder, - rng, - Event::ProtoBlockValidator(block_validator::Event::from(req)), - ), - Event::MetricsRequest(req) => reactor::wrap_effects( - Event::MetricsRequest, - self.metrics.handle_event(effect_builder, rng, req), - ), - Event::ChainspecLoaderRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::ChainspecLoader(req.into())) - } - Event::StorageRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::Storage(req.into())) - } - Event::StateStoreRequest(req) => { - self.dispatch_event(effect_builder, rng, Event::Storage(req.into())) - } - - // Announcements: - Event::ControlAnnouncement(ctrl_ann) => { - unreachable!("unhandled control announcement: {}", ctrl_ann) - } - Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { - sender, - payload, - }) => { - let reactor_event = match payload { - Message::Consensus(msg) => { - Event::Consensus(consensus::Event::MessageReceived { sender, msg }) - } - Message::DeployGossiper(message) => { - Event::DeployGossiper(gossiper::Event::MessageReceived { sender, message }) - } - Message::AddressGossiper(message) => { - Event::AddressGossiper(gossiper::Event::MessageReceived { sender, message }) - } - Message::GetRequest { tag, serialized_id } => match tag { - Tag::Deploy => { - let deploy_hash = match bincode::deserialize(&serialized_id) { - Ok(hash) => hash, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - - match self - .storage - .handle_legacy_direct_deploy_request(deploy_hash) - { - // This functionality was moved out of the storage component and - // should be refactored ASAP. - Some(deploy) => { - match Message::new_get_response(&deploy) { - Ok(message) => { - return effect_builder - .send_message(sender, message) - .ignore(); - } - Err(error) => { - error!("failed to create get-response: {}", error); - return Effects::new(); - } - }; - } - None => { - debug!("failed to get {} for {}", deploy_hash, sender); - return Effects::new(); - } - } - } - Tag::Block => { - let block_hash = match bincode::deserialize(&serialized_id) { - Ok(hash) => hash, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - Event::LinearChain(linear_chain::Event::Request( - LinearChainRequest::BlockRequest(block_hash, sender), - )) - } - Tag::BlockByHeight => { - let height = match bincode::deserialize(&serialized_id) { - Ok(block_by_height) => block_by_height, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - Event::LinearChain(linear_chain::Event::Request( - LinearChainRequest::BlockAtHeight(height, sender), - )) - } - Tag::GossipedAddress => { - warn!("received get request for gossiped-address from {}", sender); - return Effects::new(); - } - Tag::BlockHeaderByHash => { - let block_hash: BlockHash = match bincode::deserialize(&serialized_id) { - Ok(block_hash) => block_hash, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - - match self.storage.read_block_header_by_hash(&block_hash) { - Ok(Some(block_header)) => { - match Message::new_get_response(&block_header) { - Err(error) => { - error!("failed to create get-response: {}", error); - return Effects::new(); - } - Ok(message) => { - return effect_builder - .send_message(sender, message) - .ignore(); - } - }; - } - Ok(None) => { - debug!("failed to get {} for {}", block_hash, sender); - return Effects::new(); - } - Err(error) => { - error!( - "failed to get {} for {}: {}", - block_hash, sender, error - ); - return Effects::new(); - } - } - } - Tag::BlockHeaderAndFinalitySignaturesByHeight => { - let block_height = match bincode::deserialize(&serialized_id) { - Ok(block_height) => block_height, - Err(error) => { - error!( - "failed to decode {:?} from {}: {}", - serialized_id, sender, error - ); - return Effects::new(); - } - }; - match self - .storage - .read_block_header_and_finality_signatures_by_height(block_height) - { - Ok(Some(block_header)) => { - match Message::new_get_response(&block_header) { - Ok(message) => { - return effect_builder - .send_message(sender, message) - .ignore(); - } - Err(error) => { - error!("failed to create get-response: {}", error); - return Effects::new(); - } - }; - } - Ok(None) => { - debug!("failed to get {} for {}", block_height, sender); - return Effects::new(); - } - Err(error) => { - error!( - "failed to get {} for {}: {}", - block_height, sender, error - ); - return Effects::new(); - } - } - } - }, - Message::GetResponse { - tag, - serialized_item, - } => match tag { - Tag::Deploy => { - let deploy = match bincode::deserialize(&serialized_item) { - Ok(deploy) => Box::new(deploy), - Err(error) => { - error!("failed to decode deploy from {}: {}", sender, error); - return Effects::new(); - } - }; - Event::DeployAcceptor(deploy_acceptor::Event::Accept { - deploy, - source: Source::Peer(sender), - responder: None, - }) - } - Tag::Block => { - error!( - "cannot handle get response for block-by-hash from {}", - sender - ); - return Effects::new(); - } - Tag::BlockByHeight => { - error!( - "cannot handle get response for block-by-height from {}", - sender - ); - return Effects::new(); - } - Tag::GossipedAddress => { - error!( - "cannot handle get response for gossiped-address from {}", - sender - ); - return Effects::new(); - } - Tag::BlockHeaderByHash => { - error!( - "cannot handle get response for block-header-by-hash from {}", - sender - ); - return Effects::new(); - } - Tag::BlockHeaderAndFinalitySignaturesByHeight => { - error!( - "cannot handle get response for \ - block-header-and-finality-signatures-by-height from {}", - sender - ); - return Effects::new(); - } - }, - Message::FinalitySignature(fs) => { - Event::LinearChain(linear_chain::Event::FinalitySignatureReceived(fs, true)) - } - }; - self.dispatch_event(effect_builder, rng, reactor_event) - } - Event::NetworkAnnouncement(NetworkAnnouncement::GossipOurAddress(gossiped_address)) => { - let event = gossiper::Event::ItemReceived { - item_id: gossiped_address, - source: Source::::Ourself, - }; - self.dispatch_event(effect_builder, rng, Event::AddressGossiper(event)) - } - Event::NetworkAnnouncement(NetworkAnnouncement::NewPeer(_peer_id)) => { - trace!("new peer announcement not handled in the validator reactor"); - Effects::new() - } - Event::RpcServerAnnouncement(RpcServerAnnouncement::DeployReceived { - deploy, - responder, - }) => { - let event = deploy_acceptor::Event::Accept { - deploy, - source: Source::::Client, - responder, - }; - self.dispatch_event(effect_builder, rng, Event::DeployAcceptor(event)) - } - Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::AcceptedNewDeploy { - deploy, - source, - }) => { - let deploy_type = match deploy.deploy_type() { - Ok(deploy_type) => deploy_type, - Err(error) => { - tracing::error!("Invalid deploy: {:?}", error); - return Effects::new(); - } - }; - - let event = block_proposer::Event::BufferDeploy { - hash: *deploy.id(), - deploy_type: Box::new(deploy_type), - }; - let mut effects = - self.dispatch_event(effect_builder, rng, Event::BlockProposer(event)); - - let event = gossiper::Event::ItemReceived { - item_id: *deploy.id(), - source: source.clone(), - }; - effects.extend(self.dispatch_event( - effect_builder, - rng, - Event::DeployGossiper(event), - )); - - let event = fetcher::Event::GotRemotely { - item: deploy, - source, - }; - effects.extend(self.dispatch_event( - effect_builder, - rng, - Event::DeployFetcher(event), - )); - - effects - } - Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::InvalidDeploy { - deploy: _, - source: _, - }) => Effects::new(), - Event::ConsensusAnnouncement(consensus_announcement) => match consensus_announcement { - ConsensusAnnouncement::Finalized(block) => { - let reactor_event = - Event::BlockProposer(block_proposer::Event::FinalizedBlock(block)); - self.dispatch_event(effect_builder, rng, reactor_event) - } - ConsensusAnnouncement::CreatedFinalitySignature(fs) => self.dispatch_event( - effect_builder, - rng, - Event::LinearChain(linear_chain::Event::FinalitySignatureReceived(fs, false)), - ), - ConsensusAnnouncement::Fault { - era_id, - public_key, - timestamp, - } => { - let reactor_event = - Event::EventStreamServer(event_stream_server::Event::Fault { - era_id, - public_key: *public_key, - timestamp, - }); - self.dispatch_event(effect_builder, rng, reactor_event) - } - }, - Event::ContractRuntimeAnnouncement(ContractRuntimeAnnouncement::LinearChainBlock( - linear_chain_block, - )) => { - let LinearChainBlock { - block, - execution_results, - } = *linear_chain_block; - let mut effects = Effects::new(); - let block_hash = *block.hash(); - - // send to linear chain - let reactor_event = Event::LinearChain(linear_chain::Event::NewLinearChainBlock { - block: Box::new(block), - execution_results: execution_results - .iter() - .map(|(hash, (_header, results))| (*hash, results.clone())) - .collect(), - }); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - - // send to event stream - for (deploy_hash, (deploy_header, execution_result)) in execution_results { - let reactor_event = - Event::EventStreamServer(event_stream_server::Event::DeployProcessed { - deploy_hash, - deploy_header: Box::new(deploy_header), - block_hash, - execution_result: Box::new(execution_result), - }); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - } - - effects - } - Event::ContractRuntimeAnnouncement( - ContractRuntimeAnnouncement::BlockAlreadyExecuted(_), - ) => { - debug!("Ignoring `BlockAlreadyExecuted` announcement in `validator` reactor."); - Effects::new() - } - Event::DeployGossiperAnnouncement(_ann) => { - unreachable!("the deploy gossiper should never make an announcement") - } - Event::AddressGossiperAnnouncement(ann) => { - let GossiperAnnouncement::NewCompleteItem(gossiped_address) = ann; - let reactor_event = Event::SmallNetwork(small_network::Event::PeerAddressReceived( - gossiped_address, - )); - self.dispatch_event(effect_builder, rng, reactor_event) - } - Event::LinearChainAnnouncement(LinearChainAnnouncement::BlockAdded(block)) => { - let reactor_event = - Event::EventStreamServer(event_stream_server::Event::BlockAdded(block.clone())); - let mut effects = self.dispatch_event(effect_builder, rng, reactor_event); - let reactor_event = Event::Consensus(consensus::Event::BlockAdded(block)); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - effects - } - Event::LinearChainAnnouncement(LinearChainAnnouncement::NewFinalitySignature(fs)) => { - let reactor_event = - Event::EventStreamServer(event_stream_server::Event::FinalitySignature(fs)); - self.dispatch_event(effect_builder, rng, reactor_event) - } - Event::ChainspecLoaderAnnouncement( - ChainspecLoaderAnnouncement::UpgradeActivationPointRead(next_upgrade), - ) => { - let reactor_event = Event::ChainspecLoader( - chainspec_loader::Event::GotNextUpgrade(next_upgrade.clone()), - ); - let mut effects = self.dispatch_event(effect_builder, rng, reactor_event); - - let reactor_event = Event::Consensus(consensus::Event::GotUpgradeActivationPoint( - next_upgrade.activation_point(), - )); - effects.extend(self.dispatch_event(effect_builder, rng, reactor_event)); - effects - } - Event::BlocklistAnnouncement(ann) => { - self.dispatch_event(effect_builder, rng, Event::SmallNetwork(ann.into())) - } - } - } - - fn update_metrics(&mut self, event_queue_handle: EventQueueHandle) { - self.memory_metrics.estimate(&self); - self.event_queue_metrics - .record_event_queue_counts(&event_queue_handle) - } - - fn maybe_exit(&self) -> Option { - self.consensus - .stop_for_upgrade() - .then(|| ReactorExit::ProcessShouldExit(ExitCode::Success)) - } -} - -#[cfg(test)] -impl NetworkedReactor for Reactor { - type NodeId = NodeId; - fn node_id(&self) -> Self::NodeId { - if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - self.network.node_id() - } else { - self.small_network.node_id() - } - } -} diff --git a/node/src/reactor/validator/config.rs b/node/src/reactor/validator/config.rs deleted file mode 100644 index 610b3bdd09..0000000000 --- a/node/src/reactor/validator/config.rs +++ /dev/null @@ -1,42 +0,0 @@ -use datasize::DataSize; -use serde::{Deserialize, Serialize}; - -use crate::{ - logging::LoggingConfig, types::NodeConfig, BlockProposerConfig, ConsensusConfig, - ContractRuntimeConfig, DeployAcceptorConfig, EventStreamServerConfig, FetcherConfig, - GossipConfig, RestServerConfig, RpcServerConfig, SmallNetworkConfig, StorageConfig, -}; - -/// Root configuration. -#[derive(DataSize, Debug, Default, Deserialize, Serialize)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct Config { - /// Node configuration. - pub node: NodeConfig, - /// Logging configuration. - pub logging: LoggingConfig, - /// Consensus configuration. - pub consensus: ConsensusConfig, - /// Network configuration. - pub network: SmallNetworkConfig, - /// Event stream API server configuration. - pub event_stream_server: EventStreamServerConfig, - /// REST API server configuration. - pub rest_server: RestServerConfig, - /// RPC API server configuration. - pub rpc_server: RpcServerConfig, - /// On-disk storage configuration. - pub storage: StorageConfig, - /// Gossip protocol configuration. - pub gossip: GossipConfig, - /// Fetcher configuration. - pub fetcher: FetcherConfig, - /// Contract runtime configuration. - pub contract_runtime: ContractRuntimeConfig, - /// Deploy acceptor configuration. - pub deploy_acceptor: DeployAcceptorConfig, - /// Block proposer configuration. - #[serde(default)] - pub block_proposer: BlockProposerConfig, -} diff --git a/node/src/reactor/validator/error.rs b/node/src/reactor/validator/error.rs deleted file mode 100644 index eebf0999f5..0000000000 --- a/node/src/reactor/validator/error.rs +++ /dev/null @@ -1,42 +0,0 @@ -use thiserror::Error; - -use crate::{ - components::{contract_runtime, network, small_network, storage}, - utils::ListeningError, -}; - -/// Error type returned by the validator reactor. -#[derive(Debug, Error)] -pub enum Error { - /// Metrics-related error - #[error("prometheus (metrics) error: {0}")] - Metrics(#[from] prometheus::Error), - - /// `Network` component error. - #[error("network error: {0}")] - Network(#[from] network::Error), - - /// `SmallNetwork` component error. - #[error("small network error: {0}")] - SmallNetwork(#[from] small_network::Error), - - /// An error starting one of the HTTP servers. - #[error("http server listening error: {0}")] - ListeningError(#[from] ListeningError), - - /// `Storage` component error. - #[error("storage error: {0}")] - Storage(#[from] storage::Error), - - /// `Consensus` component error. - #[error("consensus error: {0}")] - Consensus(#[from] anyhow::Error), - - /// `ContractRuntime` component error. - #[error("contract runtime config error: {0}")] - ContractRuntime(#[from] contract_runtime::ConfigError), - - /// Failed to serialize data. - #[error("serialization: {0}")] - Serialization(#[source] bincode::ErrorKind), -} diff --git a/node/src/reactor/validator/memory_metrics.rs b/node/src/reactor/validator/memory_metrics.rs deleted file mode 100644 index f90328a9ef..0000000000 --- a/node/src/reactor/validator/memory_metrics.rs +++ /dev/null @@ -1,247 +0,0 @@ -use std::env; - -use datasize::DataSize; -use prometheus::{self, Histogram, HistogramOpts, IntGauge, Registry}; -use tracing::debug; - -use super::Reactor; -use crate::{components::network::ENABLE_LIBP2P_NET_ENV_VAR, unregister_metric}; - -/// Metrics for memory usage. -#[derive(Debug)] -pub(super) struct MemoryMetrics { - /// Total estimated heap memory usage. - mem_total: IntGauge, - - /// Estimated heap memory usage of metrics component. - mem_metrics: IntGauge, - /// Estimated heap memory usage of network component. - mem_net: IntGauge, - /// Estimated heap memory usage of address gossiper component. - mem_address_gossiper: IntGauge, - /// Estimated heap memory usage of storage component. - mem_storage: IntGauge, - /// Estimated heap memory usage of contract runtime component. - mem_contract_runtime: IntGauge, - /// Estimated heap memory usage of rpc server component. - mem_rpc_server: IntGauge, - /// Estimated heap memory usage of rest server component. - mem_rest_server: IntGauge, - /// Estimated heap memory usage of event stream server component. - mem_event_stream_server: IntGauge, - /// Estimated heap memory usage of chainspec loader component. - mem_chainspec_loader: IntGauge, - /// Estimated heap memory usage of consensus component. - mem_consensus: IntGauge, - /// Estimated heap memory usage of deploy fetcher component. - mem_deploy_fetcher: IntGauge, - /// Estimated heap memory usage of deploy gossiper component. - mem_deploy_gossiper: IntGauge, - /// Estimated heap memory usage of block_proposer component. - mem_block_proposer: IntGauge, - /// Estimated heap memory usage of block validator component. - mem_proto_block_validator: IntGauge, - /// Estimated heap memory usage of linear chain component. - mem_linear_chain: IntGauge, - - /// Histogram detailing how long it took to measure memory usage. - mem_estimator_runtime_s: Histogram, - - /// Instance of registry to unregister from when being dropped. - registry: Registry, -} - -impl MemoryMetrics { - /// Initializes a new set of memory metrics. - pub(super) fn new(registry: Registry) -> Result { - let mem_total = IntGauge::new("mem_total", "total memory usage in bytes")?; - let mem_metrics = IntGauge::new("mem_metrics", "metrics memory usage in bytes")?; - let mem_net = IntGauge::new("mem_net", "net memory usage in bytes")?; - let mem_address_gossiper = IntGauge::new( - "mem_address_gossiper", - "address_gossiper memory usage in bytes", - )?; - let mem_storage = IntGauge::new("mem_storage", "storage memory usage in bytes")?; - let mem_contract_runtime = IntGauge::new( - "mem_contract_runtime", - "contract_runtime memory usage in bytes", - )?; - let mem_rpc_server = IntGauge::new("mem_rpc_server", "rpc_server memory usage in bytes")?; - let mem_rest_server = - IntGauge::new("mem_rest_server", "mem_rest_server memory usage in bytes")?; - let mem_event_stream_server = IntGauge::new( - "mem_event_stream_server", - "mem_event_stream_server memory usage in bytes", - )?; - let mem_chainspec_loader = IntGauge::new( - "mem_chainspec_loader", - "chainspec_loader memory usage in bytes", - )?; - let mem_consensus = IntGauge::new("mem_consensus", "consensus memory usage in bytes")?; - let mem_deploy_fetcher = - IntGauge::new("mem_deploy_fetcher", "deploy_fetcher memory usage in bytes")?; - let mem_deploy_gossiper = IntGauge::new( - "mem_deploy_gossiper", - "deploy_gossiper memory usage in bytes", - )?; - let mem_block_proposer = - IntGauge::new("mem_block_proposer", "block_proposer memory usage in bytes")?; - let mem_proto_block_validator = IntGauge::new( - "mem_proto_block_validator", - "proto_block_validator memory usage in bytes", - )?; - let mem_linear_chain = - IntGauge::new("mem_linear_chain", "linear_chain memory usage in bytes")?; - - let mem_estimator_runtime_s = Histogram::with_opts( - HistogramOpts::new( - "mem_estimator_runtime_s", - "time taken to estimate memory usage, in seconds", - ) - // Create buckets from one nanosecond to eight seconds. - .buckets(prometheus::exponential_buckets(0.000_000_004, 2.0, 32)?), - )?; - - registry.register(Box::new(mem_total.clone()))?; - registry.register(Box::new(mem_metrics.clone()))?; - registry.register(Box::new(mem_net.clone()))?; - registry.register(Box::new(mem_address_gossiper.clone()))?; - registry.register(Box::new(mem_storage.clone()))?; - registry.register(Box::new(mem_contract_runtime.clone()))?; - registry.register(Box::new(mem_rpc_server.clone()))?; - registry.register(Box::new(mem_rest_server.clone()))?; - registry.register(Box::new(mem_event_stream_server.clone()))?; - registry.register(Box::new(mem_chainspec_loader.clone()))?; - registry.register(Box::new(mem_consensus.clone()))?; - registry.register(Box::new(mem_deploy_fetcher.clone()))?; - registry.register(Box::new(mem_deploy_gossiper.clone()))?; - registry.register(Box::new(mem_block_proposer.clone()))?; - registry.register(Box::new(mem_proto_block_validator.clone()))?; - registry.register(Box::new(mem_linear_chain.clone()))?; - registry.register(Box::new(mem_estimator_runtime_s.clone()))?; - - Ok(MemoryMetrics { - mem_total, - mem_metrics, - mem_net, - mem_address_gossiper, - mem_storage, - mem_contract_runtime, - mem_rpc_server, - mem_rest_server, - mem_event_stream_server, - mem_chainspec_loader, - mem_consensus, - mem_deploy_fetcher, - mem_deploy_gossiper, - mem_block_proposer, - mem_proto_block_validator, - mem_linear_chain, - mem_estimator_runtime_s, - registry, - }) - } - - /// Estimates memory usage and updates metrics. - pub(super) fn estimate(&self, reactor: &Reactor) { - let timer = self.mem_estimator_runtime_s.start_timer(); - - let metrics = reactor.metrics.estimate_heap_size() as i64; - let net = if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_ok() { - reactor.network.estimate_heap_size() as i64 - } else { - reactor.small_network.estimate_heap_size() as i64 - }; - let address_gossiper = reactor.address_gossiper.estimate_heap_size() as i64; - let storage = reactor.storage.estimate_heap_size() as i64; - let contract_runtime = reactor.contract_runtime.estimate_heap_size() as i64; - let rpc_server = reactor.rpc_server.estimate_heap_size() as i64; - let rest_server = reactor.rest_server.estimate_heap_size() as i64; - let event_stream_server = reactor.event_stream_server.estimate_heap_size() as i64; - let chainspec_loader = reactor.chainspec_loader.estimate_heap_size() as i64; - let consensus = reactor.consensus.estimate_heap_size() as i64; - let deploy_fetcher = reactor.deploy_fetcher.estimate_heap_size() as i64; - let deploy_gossiper = reactor.deploy_gossiper.estimate_heap_size() as i64; - let block_proposer = reactor.block_proposer.estimate_heap_size() as i64; - let proto_block_validator = reactor.proto_block_validator.estimate_heap_size() as i64; - - let linear_chain = reactor.linear_chain.estimate_heap_size() as i64; - - let total = metrics - + net - + address_gossiper - + storage - + contract_runtime - + rpc_server - + rest_server - + event_stream_server - + chainspec_loader - + consensus - + deploy_fetcher - + deploy_gossiper - + block_proposer - + proto_block_validator - + linear_chain; - - self.mem_total.set(total); - self.mem_metrics.set(metrics); - self.mem_net.set(net); - self.mem_address_gossiper.set(address_gossiper); - self.mem_storage.set(storage); - self.mem_contract_runtime.set(contract_runtime); - self.mem_rpc_server.set(rpc_server); - self.mem_rest_server.set(rest_server); - self.mem_event_stream_server.set(event_stream_server); - self.mem_chainspec_loader.set(chainspec_loader); - self.mem_consensus.set(consensus); - self.mem_deploy_fetcher.set(deploy_fetcher); - self.mem_deploy_gossiper.set(deploy_gossiper); - self.mem_block_proposer.set(block_proposer); - self.mem_proto_block_validator.set(proto_block_validator); - self.mem_linear_chain.set(linear_chain); - - // Stop the timer explicitly, don't count logging. - let duration_s = timer.stop_and_record(); - - debug!(%total, - %duration_s, - %metrics, - %net, - %address_gossiper, - %storage, - %contract_runtime, - %rpc_server, - %rest_server, - %event_stream_server, - %chainspec_loader, - %consensus, - %deploy_fetcher, - %deploy_gossiper, - %block_proposer, - %proto_block_validator, - %linear_chain, - "Collected new set of memory metrics."); - } -} - -impl Drop for MemoryMetrics { - fn drop(&mut self) { - unregister_metric!(self.registry, self.mem_total); - unregister_metric!(self.registry, self.mem_metrics); - unregister_metric!(self.registry, self.mem_net); - unregister_metric!(self.registry, self.mem_address_gossiper); - unregister_metric!(self.registry, self.mem_storage); - unregister_metric!(self.registry, self.mem_contract_runtime); - unregister_metric!(self.registry, self.mem_rpc_server); - unregister_metric!(self.registry, self.mem_rest_server); - unregister_metric!(self.registry, self.mem_event_stream_server); - unregister_metric!(self.registry, self.mem_chainspec_loader); - unregister_metric!(self.registry, self.mem_consensus); - unregister_metric!(self.registry, self.mem_deploy_fetcher); - unregister_metric!(self.registry, self.mem_deploy_gossiper); - unregister_metric!(self.registry, self.mem_block_proposer); - unregister_metric!(self.registry, self.mem_proto_block_validator); - unregister_metric!(self.registry, self.mem_linear_chain); - unregister_metric!(self.registry, self.mem_estimator_runtime_s); - } -} diff --git a/node/src/reactor/validator/tests.rs b/node/src/reactor/validator/tests.rs deleted file mode 100644 index 41e40c1bee..0000000000 --- a/node/src/reactor/validator/tests.rs +++ /dev/null @@ -1,244 +0,0 @@ -use std::{collections::BTreeMap, sync::Arc, time::Duration}; - -use anyhow::bail; -use log::info; -use num::Zero; -use num_rational::Ratio; -use rand::Rng; -use tempfile::TempDir; - -use casper_execution_engine::shared::motes::Motes; -use casper_types::{system::auction::DelegationRate, EraId, PublicKey, SecretKey, U512}; - -use crate::{ - components::{consensus, gossiper, small_network, storage}, - crypto::AsymmetricKeyExt, - reactor::{initializer, joiner, validator, ReactorExit, Runner}, - testing::{self, network::Network, TestRng}, - types::{ - chainspec::{AccountConfig, AccountsConfig, ValidatorConfig}, - ActivationPoint, Chainspec, Timestamp, - }, - utils::{External, Loadable, WithDir, RESOURCES_PATH}, - NodeRng, -}; - -struct TestChain { - // Keys that validator instances will use, can include duplicates - keys: Vec, - storages: Vec, - chainspec: Arc, -} - -type Nodes = crate::testing::network::Nodes; - -impl TestChain { - /// Instantiates a new test chain configuration. - /// - /// Generates secret keys for `size` validators and creates a matching chainspec. - fn new(rng: &mut TestRng, size: usize) -> Self { - let keys: Vec = (0..size).map(|_| SecretKey::random(rng)).collect(); - let stakes = keys - .iter() - .map(|secret_key| { - // We use very large stakes so we would catch overflow issues. - let stake = U512::from(rng.gen_range(100..999)) * U512::from(u128::MAX); - (PublicKey::from(secret_key), stake) - }) - .collect(); - Self::new_with_keys(rng, keys, stakes) - } - - /// Instantiates a new test chain configuration. - /// - /// Takes a vector of bonded keys with specified bond amounts. - fn new_with_keys( - rng: &mut TestRng, - keys: Vec, - stakes: BTreeMap, - ) -> Self { - // Load the `local` chainspec. - let mut chainspec = Chainspec::from_resources("local"); - - // Override accounts with those generated from the keys. - let accounts = stakes - .into_iter() - .map(|(public_key, bonded_amount)| { - let validator_config = - ValidatorConfig::new(Motes::new(bonded_amount), DelegationRate::zero()); - AccountConfig::new( - public_key, - Motes::new(U512::from(rng.gen_range(10000..99999999))), - Some(validator_config), - ) - }) - .collect(); - let delegators = vec![]; - chainspec.network_config.accounts_config = AccountsConfig::new(accounts, delegators); - - // Make the genesis timestamp 45 seconds from now, to allow for all validators to start up. - chainspec.protocol_config.activation_point = - ActivationPoint::Genesis(Timestamp::now() + 45000.into()); - - chainspec.core_config.minimum_era_height = 1; - chainspec.highway_config.finality_threshold_fraction = Ratio::new(34, 100); - chainspec.core_config.era_duration = 10.into(); - chainspec.core_config.auction_delay = 1; - chainspec.core_config.unbonding_delay = 3; - - TestChain { - keys, - chainspec: Arc::new(chainspec), - storages: Vec::new(), - } - } - - /// Creates an initializer/validator configuration for the `idx`th validator. - fn create_node_config(&mut self, idx: usize, first_node_port: u16) -> validator::Config { - // Set the network configuration. - let mut cfg = validator::Config { - network: if idx == 0 { - small_network::Config::default_local_net_first_node(first_node_port) - } else { - small_network::Config::default_local_net(first_node_port) - }, - gossip: gossiper::Config::new_with_small_timeouts(), - ..Default::default() - }; - - // ...and the secret key for our validator. - cfg.consensus.secret_key_path = External::from_value(self.keys[idx].duplicate()); - - // Additionally set up storage in a temporary directory. - let (storage_cfg, temp_dir) = storage::Config::default_for_tests(); - cfg.consensus.highway.unit_hashes_folder = temp_dir.path().to_path_buf(); - self.storages.push(temp_dir); - cfg.storage = storage_cfg; - - cfg - } - - async fn create_initialized_network( - &mut self, - rng: &mut NodeRng, - ) -> anyhow::Result> { - let root = RESOURCES_PATH.join("local"); - - let mut network: Network = Network::new(); - let first_node_port = testing::unused_port_on_localhost(); - - for idx in 0..self.keys.len() { - let cfg = self.create_node_config(idx, first_node_port); - - // We create an initializer reactor here and run it to completion. - let mut initializer_runner = Runner::::new_with_chainspec( - (false, WithDir::new(root.clone(), cfg)), - Arc::clone(&self.chainspec), - ) - .await?; - let reactor_exit = initializer_runner.run(rng).await; - if reactor_exit != ReactorExit::ProcessShouldContinue { - bail!("failed to initialize successfully"); - } - - // Now we can construct the actual node. - let initializer = initializer_runner.into_inner(); - let mut joiner_runner = - Runner::::new(WithDir::new(root.clone(), initializer), rng) - .await?; - let _ = joiner_runner.run(rng).await; - - let config = joiner_runner.into_inner().into_validator_config().await?; - - network - .add_node_with_config(config, rng) - .await - .expect("could not add node to reactor"); - } - - Ok(network) - } -} - -/// Given an era number, returns a predicate to check if all of the nodes are in the specified era. -fn is_in_era(era_id: EraId) -> impl Fn(&Nodes) -> bool { - move |nodes: &Nodes| { - nodes - .values() - .all(|runner| runner.reactor().inner().consensus().current_era() == era_id) - } -} - -#[tokio::test] -async fn run_validator_network() { - testing::init_logging(); - - let mut rng = crate::new_rng(); - - // Instantiate a new chain with a fixed size. - const NETWORK_SIZE: usize = 5; - let mut chain = TestChain::new(&mut rng, NETWORK_SIZE); - - let mut net = chain - .create_initialized_network(&mut rng) - .await - .expect("network initialization failed"); - - // Wait for all nodes to agree on one era. - net.settle_on(&mut rng, is_in_era(EraId::from(1)), Duration::from_secs(90)) - .await; - - net.settle_on(&mut rng, is_in_era(EraId::from(2)), Duration::from_secs(60)) - .await; -} - -// TODO: fix this test -#[tokio::test] -async fn run_equivocator_network() { - testing::init_logging(); - - let mut rng = crate::new_rng(); - - let alice_sk = SecretKey::random(&mut rng); - let size: usize = 2; - let mut keys: Vec = (1..size).map(|_| SecretKey::random(&mut rng)).collect(); - let mut stakes: BTreeMap = keys - .iter() - .map(|secret_key| (PublicKey::from(secret_key), U512::from(100))) - .collect(); - stakes.insert(PublicKey::from(&alice_sk), U512::from(1)); - keys.push(alice_sk.clone()); - keys.push(alice_sk); - - let mut chain = TestChain::new_with_keys(&mut rng, keys, stakes); - let protocol_config = (&*chain.chainspec).into(); - - let mut net = chain - .create_initialized_network(&mut rng) - .await - .expect("network initialization failed"); - - info!("Waiting for Era 0 to end"); - net.settle_on( - &mut rng, - is_in_era(EraId::from(1)), - Duration::from_secs(600), - ) - .await; - - let last_era_number = 5; - let timeout = Duration::from_secs(90); - - for era_number in 2..last_era_number { - info!("Waiting for Era {} to end", era_number); - net.settle_on(&mut rng, is_in_era(EraId::from(era_number)), timeout) - .await; - } - - // Make sure we waited long enough for this test to include unbonding and dropping eras. - let oldest_bonded_era_id = - consensus::oldest_bonded_era(&protocol_config, EraId::from(last_era_number)); - let oldest_evidence_era_id = - consensus::oldest_bonded_era(&protocol_config, oldest_bonded_era_id); - assert!(!oldest_evidence_era_id.is_genesis()); -} diff --git a/node/src/testing.rs b/node/src/testing.rs index 26fe87b4e5..4079dace44 100644 --- a/node/src/testing.rs +++ b/node/src/testing.rs @@ -4,80 +4,129 @@ //! `casper-node` library. mod condition_check_reactor; -mod multi_stage_test_reactor; -pub mod network; -mod test_rng; +mod fake_transaction_acceptor; +pub(crate) mod filter_reactor; +pub(crate) mod network; +pub(crate) mod test_clock; use std::{ any::type_name, fmt::Debug, + fs, + io::Write, marker::PhantomData, - net::{Ipv4Addr, TcpListener}, + ops::Range, + sync::atomic::{AtomicU16, Ordering}, time, }; use anyhow::Context; +use assert_json_diff::{assert_json_eq, assert_json_matches_no_panic, CompareMode, Config}; use derive_more::From; use futures::channel::oneshot; -use serde::{de::DeserializeOwned, Serialize}; +use once_cell::sync::Lazy; +use rand::Rng; +use serde_json::Value; use tempfile::TempDir; use tokio::runtime::{self, Runtime}; -use tracing::{debug, info, warn}; +use tracing::{debug, warn}; + +use casper_types::testing::TestRng; use crate::{ components::Component, - effect::{announcements::ControlAnnouncement, EffectBuilder, Effects, Responder}, + effect::{ + announcements::{ControlAnnouncement, FatalAnnouncement}, + requests::NetworkRequest, + EffectBuilder, Effects, Responder, + }, logging, + protocol::Message, reactor::{EventQueueHandle, QueueKind, ReactorEvent, Scheduler}, }; pub(crate) use condition_check_reactor::ConditionCheckReactor; -pub(crate) use multi_stage_test_reactor::MultiStageTestReactor; -pub(crate) use test_rng::TestRng; +pub(crate) use fake_transaction_acceptor::FakeTransactionAcceptor; /// Time to wait (at most) for a `fatal` to resolve before considering the dropping of a responder a /// problem. const FATAL_GRACE_TIME: time::Duration = time::Duration::from_secs(3); -pub fn bincode_roundtrip(value: &T) { - let serialized = bincode::serialize(value).unwrap(); - let deserialized = bincode::deserialize(serialized.as_slice()).unwrap(); - assert_eq!(*value, deserialized); +/// The range of ports used to allocate ports for network ports. +/// +/// The IANA ephemeral port range is 49152–65535, while Linux uses 32768–60999 by default. Windows +/// on the other hand uses 1025–60000. Mac OS X seems to use 49152-65535. For this reason this +/// constant uses different values on different systems. +#[cfg(not(target_os = "windows"))] +const TEST_PORT_RANGE: Range = { + // Note: Ensure the range is prime, so that any chosen `TEST_PORT_STRIDE` wraps around without + // conflicting. + + // All reasonable non-Windows systems seem to have a "hole" just below port 30000. + // + // This also does not conflict with nctl ports. + 29000..29997 +}; + +// On windows, we sneak into the upper end instead. +#[cfg(target_os = "windows")] +const TEST_PORT_RANGE: Range = 60001..60998; + +/// Random offset + stride for port generation. +const TEST_PORT_STRIDE: u16 = 29; + +pub(crate) const LARGE_WASM_LANE_ID: u8 = 3; + +macro_rules! map { + () => { std::collections::BTreeMap::new() }; + ( $first_key:expr => $first_value:expr $( , $key:expr => $value:expr )* $(,)? ) => {{ + let mut map = std::collections::BTreeMap::new(); + // There is no reason to add twice the same key. + // Since it's used for testing, we can panic in such a case: + assert!(map.insert($first_key, $first_value).is_none()); + $( + assert!(map.insert($key, $value).is_none()); + )* + map + }}; } +macro_rules! set { + () => { std::collections::BTreeSet::new() }; + ( $first_value:expr $( , $value:expr )* $(,)? ) => {{ + let mut set = std::collections::BTreeSet::new(); + // There is no reason to add twice the same key. + // Since it's used for testing, we can panic in such a case: + assert!(set.insert($first_value)); + $( + assert!(set.insert($value)); + )* + set + }} +} +pub(crate) use map; +pub(crate) use set; /// Create an unused port on localhost. +/// +/// Returns a random port on localhost, provided that no other applications are binding ports inside +/// `TEST_PORT_RANGE` and no other testing process is run in parallel. Should the latter happen, +/// some randomization is used to avoid conflicts, without guarantee of success. pub(crate) fn unused_port_on_localhost() -> u16 { - // Unfortunately a randomly generated port by a random number generator still has a chance to - // hit the occasional duplicate or an already bound port once in a while, due to the small port - // space. For this reason, we ask the OS for an unused port instead and hope that no one binds - // to it in the meantime. - - // For a collision to occur, it is now required that after running this function, but before - // rebinding the port, an unrelated program or a parallel running test must manage to bind to - // precisely this port, hitting the same port randomly. - - // This is slightly better than a strictly random port, since it takes already bound ports - // across the entire interface into account, but it does rely on the OS providing random ports - // when asked for a _unused_ one. - - // An alternative approach is to create a bound port with `SO_REUSEPORT`, which would close the - // gap between calling this function and binding again, never calling listening on the instance - // created by this function, but still blocking it from being reassigned by accident. This - // approach requires the networking component to either accept arbitrary incoming sockets to be - // passed in or bind with `SO_REUSEPORT` as well, both are undesirable options. See - // https://stackoverflow.com/questions/14388706/how-do-so-reuseaddr-and-so-reuseport-differ for - // a detailed description on port reuse flags. - - let listener = TcpListener::bind((Ipv4Addr::new(127, 0, 0, 1), 0)) - .expect("could not bind new random port on localhost"); - let local_addr = listener - .local_addr() - .expect("local listener has no address?"); - - let port = local_addr.port(); - info!(%port, "OS generated random localhost port"); - - // Once we drop the listener, the port should be closed. - port + // Previous iterations of this implementation tried other approaches such as binding an + // ephemeral port and using that. This ran into race condition issues when the port was reused + // in the timespan where it was released and rebound. + + // The simpler approach is to select a random port from the non-ephemeral range and hope that no + // daemons are already bound/listening on it, which should not be the case on a CI system. + + // We use a random offset and stride to stretch this a little bit, should two processes run at + // the same time. + static NEXT_PORT: Lazy = Lazy::new(|| { + rand::thread_rng() + .gen_range(TEST_PORT_RANGE.start..(TEST_PORT_RANGE.start + TEST_PORT_STRIDE)) + .into() + }); + + NEXT_PORT.fetch_add(TEST_PORT_STRIDE, Ordering::SeqCst) } /// Sets up logging for testing. @@ -104,9 +153,6 @@ pub(crate) struct ComponentHarness { pub(crate) rng: TestRng, /// Scheduler for events. Only explicitly polled by the harness. pub(crate) scheduler: &'static Scheduler, - /// An event queue handle to the scheduler. - #[allow(unused)] // TODO: Remove once in use. - pub(crate) event_queue_handle: EventQueueHandle, /// Effect builder pointing at the scheduler. pub(crate) effect_builder: EffectBuilder, /// A temporary directory that can be used to store various data. @@ -122,7 +168,7 @@ pub(crate) struct ComponentHarnessBuilder { _phantom: PhantomData, } -impl ComponentHarnessBuilder { +impl ComponentHarnessBuilder { /// Builds a component harness instance. /// /// # Panics @@ -156,10 +202,10 @@ impl ComponentHarnessBuilder { } }; - let rng = self.rng.unwrap_or_else(TestRng::new); + let rng = self.rng.unwrap_or_default(); - let scheduler = Box::leak(Box::new(Scheduler::new(QueueKind::weights()))); - let event_queue_handle = EventQueueHandle::new(scheduler); + let scheduler = Box::leak(Box::new(Scheduler::new(QueueKind::weights(), None))); + let event_queue_handle = EventQueueHandle::without_shutdown(scheduler); let effect_builder = EffectBuilder::new(event_queue_handle); let runtime = runtime::Builder::new_multi_thread() .enable_all() @@ -169,7 +215,6 @@ impl ComponentHarnessBuilder { Ok(ComponentHarness { rng, scheduler, - event_queue_handle, effect_builder, tmp, runtime, @@ -213,7 +258,7 @@ impl ComponentHarness { let (sender, receiver) = oneshot::channel(); // Create response function. - let responder = Responder::create(sender); + let responder = Responder::without_shutdown(sender); // Create the event for the component. let request_event = f(responder); @@ -255,19 +300,35 @@ impl ComponentHarness { // Iterate over all events that currently are inside the queue and fish out any fatal. for _ in 0..(self.scheduler.item_count()) { - let (ev, _queue_kind) = self.runtime.block_on(self.scheduler.pop()); - - if let Some(ctrl_ann) = ev.as_control() { - match ctrl_ann { - fatal @ ControlAnnouncement::FatalError { .. } => { - panic!( - "a control announcement requesting a fatal error was received: {}", - fatal - ) - } + let ((_ancestor, ev), _queue_kind) = self.runtime.block_on(self.scheduler.pop()); + + if !ev.is_control() { + debug!(?ev, "ignoring event while looking for a fatal"); + continue; + } + match ev.try_into_control().unwrap() { + ControlAnnouncement::ShutdownDueToUserRequest { .. } => { + panic!("a control announcement requesting a shutdown due to user request was received") } - } else { - debug!(?ev, "ignoring event while looking for a fatal") + ControlAnnouncement::ShutdownForUpgrade { .. } => { + panic!("a control announcement requesting a shutdown for upgrade was received") + } + ControlAnnouncement::ShutdownAfterCatchingUp { .. } => { + panic!("a control announcement requesting a shutdown after catching up was received") + } + fatal @ ControlAnnouncement::FatalError { .. } => { + panic!( + "a control announcement requesting a fatal error was received: {}", + fatal + ) + } + ControlAnnouncement::QueueDumpRequest { .. } => { + panic!("queue dumps are not supported in the test harness") + } + ControlAnnouncement::ActivateFailpoint { .. } => { + panic!("currently no failpoint activations implemented in test harness") + // TODO: forward to component instead + }, } } @@ -290,7 +351,7 @@ impl ComponentHarness { } } -impl Default for ComponentHarness { +impl Default for ComponentHarness { fn default() -> Self { Self::builder().build() } @@ -298,27 +359,91 @@ impl Default for ComponentHarness { /// A special event for unit tests. /// -/// Essentially discards all event (they are not even processed by the unit testing hardness), +/// Essentially discards most events (they are not even processed by the unit testing harness), /// except for control announcements, which are preserved. #[derive(Debug, From)] -pub enum UnitTestEvent { +pub(crate) enum UnitTestEvent { /// A preserved control announcement. #[from] ControlAnnouncement(ControlAnnouncement), - /// A different event. - Other, + #[from] + FatalAnnouncement(FatalAnnouncement), + /// A network request made by the component under test. + #[from] + NetworkRequest(#[allow(dead_code)] NetworkRequest), } impl ReactorEvent for UnitTestEvent { - fn as_control(&self) -> Option<&ControlAnnouncement> { + fn is_control(&self) -> bool { + match self { + UnitTestEvent::ControlAnnouncement(_) | UnitTestEvent::FatalAnnouncement(_) => true, + UnitTestEvent::NetworkRequest(_) => false, + } + } + + fn try_into_control(self) -> Option { match self { UnitTestEvent::ControlAnnouncement(ctrl_ann) => Some(ctrl_ann), - UnitTestEvent::Other => None, + UnitTestEvent::FatalAnnouncement(FatalAnnouncement { file, line, msg }) => { + Some(ControlAnnouncement::FatalError { file, line, msg }) + } + UnitTestEvent::NetworkRequest(_) => None, } } } -#[test] -fn default_works_without_panicking_for_component_harness() { - let _harness = ComponentHarness::<()>::default(); +/// Helper function to simulate the passage of time. +pub(crate) async fn advance_time(duration: time::Duration) { + tokio::time::pause(); + tokio::time::advance(duration).await; + tokio::time::resume(); + debug!("advanced time by {} secs", duration.as_secs()); +} + +/// Assert that the file at `schema_path` matches the provided `actual_schema`, which can be derived +/// from `schemars::schema_for!` or `schemars::schema_for_value!`, for example. This method will +/// create a temporary file with the actual schema and print the location if it fails. +pub fn assert_schema(schema_path: String, actual_schema: String) { + let expected_schema = fs::read_to_string(&schema_path).unwrap(); + let expected_schema: Value = serde_json::from_str(&expected_schema).unwrap(); + let mut temp_file = tempfile::Builder::new() + .suffix(".json") + .tempfile_in(env!("OUT_DIR")) + .unwrap(); + temp_file.write_all(actual_schema.as_bytes()).unwrap(); + let actual_schema: Value = serde_json::from_str(&actual_schema).unwrap(); + let (_file, temp_file_path) = temp_file.keep().unwrap(); + + let result = assert_json_matches_no_panic( + &actual_schema, + &expected_schema, + Config::new(CompareMode::Strict), + ); + assert_eq!( + result, + Ok(()), + "schema does not match:\nexpected:\n{}\nactual:\n{}\n", + schema_path, + temp_file_path.display() + ); + assert_json_eq!(actual_schema, expected_schema); +} + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::{unused_port_on_localhost, ComponentHarness}; + + #[test] + fn default_works_without_panicking_for_component_harness() { + let _harness = ComponentHarness::<()>::default(); + } + + #[test] + fn can_generate_at_least_100_unused_ports() { + let ports: HashSet = (0..100).map(|_| unused_port_on_localhost()).collect(); + + assert_eq!(ports.len(), 100); + } } diff --git a/node/src/testing/condition_check_reactor.rs b/node/src/testing/condition_check_reactor.rs index c045df191c..8a52f8641c 100644 --- a/node/src/testing/condition_check_reactor.rs +++ b/node/src/testing/condition_check_reactor.rs @@ -1,15 +1,24 @@ -use std::fmt::{self, Debug, Formatter}; +use std::{ + fmt::{self, Debug, Formatter}, + sync::Arc, +}; use futures::future::BoxFuture; use prometheus::Registry; +use casper_types::{Chainspec, ChainspecRawBytes}; + use super::network::NetworkedReactor; use crate::{ + components::{network::Identity as NetworkIdentity, ComponentState}, effect::{EffectBuilder, Effects}, - reactor::{EventQueueHandle, Finalize, Reactor, ReactorExit}, + reactor::{EventQueueHandle, Finalize, Reactor}, + types::NodeId, NodeRng, }; +type ConditionChecker = Box::Event) -> bool + Send>; + /// A reactor wrapping an inner reactor, and which has an optional hook into /// `Reactor::dispatch_event()`. /// @@ -18,33 +27,31 @@ use crate::{ /// been met. /// /// Once the condition is met, the hook is reset to `None`. -pub struct ConditionCheckReactor { +pub(crate) struct ConditionCheckReactor { reactor: R, - condition_checker: Option bool + Send>>, + condition_checker: Option>, condition_result: bool, } impl ConditionCheckReactor { /// Sets the condition checker hook. - pub fn set_condition_checker( - &mut self, - condition_checker: Box bool + Send>, - ) { + pub(crate) fn set_condition_checker(&mut self, condition_checker: ConditionChecker) { self.condition_checker = Some(condition_checker); + self.condition_result = false; } /// Returns the result of the last execution of the condition checker hook. - pub fn condition_result(&self) -> bool { + pub(crate) fn condition_result(&self) -> bool { self.condition_result } /// Returns a reference to the wrapped reactor. - pub fn inner(&self) -> &R { + pub(crate) fn inner(&self) -> &R { &self.reactor } /// Returns a mutable reference to the wrapped reactor. - pub fn inner_mut(&mut self) -> &mut R { + pub(crate) fn inner_mut(&mut self) -> &mut R { &mut self.reactor } } @@ -56,11 +63,22 @@ impl Reactor for ConditionCheckReactor { fn new( config: Self::Config, + chainspec: Arc, + chainspec_raw_bytes: Arc, + network_identity: NetworkIdentity, registry: &Registry, event_queue: EventQueueHandle, rng: &mut NodeRng, ) -> Result<(Self, Effects), Self::Error> { - let (reactor, effects) = R::new(config, registry, event_queue, rng)?; + let (reactor, effects) = R::new( + config, + chainspec, + chainspec_raw_bytes, + network_identity, + registry, + event_queue, + rng, + )?; Ok(( Self { reactor, @@ -88,8 +106,8 @@ impl Reactor for ConditionCheckReactor { self.reactor.dispatch_event(effect_builder, rng, event) } - fn maybe_exit(&self) -> Option { - self.reactor.maybe_exit() + fn get_component_state(&self, name: &str) -> Option<&ComponentState> { + self.inner().get_component_state(name) } } @@ -100,9 +118,7 @@ impl Finalize for ConditionCheckReactor { } impl NetworkedReactor for ConditionCheckReactor { - type NodeId = R::NodeId; - - fn node_id(&self) -> Self::NodeId { + fn node_id(&self) -> NodeId { self.reactor.node_id() } } diff --git a/node/src/testing/fake_transaction_acceptor.rs b/node/src/testing/fake_transaction_acceptor.rs new file mode 100644 index 0000000000..a6fa6a86a4 --- /dev/null +++ b/node/src/testing/fake_transaction_acceptor.rs @@ -0,0 +1,150 @@ +#![allow(clippy::boxed_local)] // We use boxed locals to pass on event data unchanged. + +//! The `FakeTransactionAcceptor` behaves as per the real `TransactionAcceptor` but without any +//! transaction verification being performed. +//! +//! When a new transaction is passed in, it is unconditionally accepted. This means that the +//! `FakeTransactionAcceptor` puts the transaction to storage, and once that has completed, +//! announces the transaction if the storage result indicates it's a new transaction. + +use std::sync::Arc; + +use tracing::debug; + +use casper_types::{Chainspec, Timestamp, Transaction}; + +pub(crate) use crate::components::transaction_acceptor::{Error, Event}; +use crate::{ + components::{transaction_acceptor::EventMetadata, Component}, + effect::{ + announcements::TransactionAcceptorAnnouncement, requests::StorageRequest, EffectBuilder, + EffectExt, Effects, Responder, + }, + types::MetaTransaction, + utils::Source, + NodeRng, +}; + +const COMPONENT_NAME: &str = "fake_transaction_acceptor"; + +pub(crate) trait ReactorEventT: + From + From + From + Send +{ +} + +impl ReactorEventT for REv where + REv: From + From + From + Send +{ +} + +#[derive(Debug)] +pub struct FakeTransactionAcceptor { + is_active: bool, + chainspec: Chainspec, +} + +impl FakeTransactionAcceptor { + pub(crate) fn new() -> Self { + FakeTransactionAcceptor { + is_active: true, + chainspec: Chainspec::default(), + } + } + + pub(crate) fn set_active(&mut self, new_setting: bool) { + self.is_active = new_setting; + } + + fn accept( + &mut self, + effect_builder: EffectBuilder, + transaction: Transaction, + source: Source, + maybe_responder: Option>>, + ) -> Effects { + let meta_transaction = MetaTransaction::from_transaction( + &transaction, + self.chainspec.core_config.pricing_handling, + &self.chainspec.transaction_config, + ) + .unwrap(); + let event_metadata = Box::new(EventMetadata::new( + transaction.clone(), + meta_transaction, + source, + maybe_responder, + Timestamp::now(), + )); + effect_builder + .put_transaction_to_storage(transaction) + .event(move |is_new| Event::PutToStorageResult { + event_metadata, + is_new, + }) + } + + fn handle_put_to_storage( + &self, + effect_builder: EffectBuilder, + event_metadata: Box, + is_new: bool, + ) -> Effects { + let EventMetadata { + meta_transaction: _, + transaction, + source, + maybe_responder, + verification_start_timestamp: _, + } = *event_metadata; + let mut effects = Effects::new(); + if is_new { + effects.extend( + effect_builder + .announce_new_transaction_accepted(Arc::new(transaction), source) + .ignore(), + ); + } + + if let Some(responder) = maybe_responder { + effects.extend(responder.respond(Ok(())).ignore()); + } + effects + } +} + +impl Component for FakeTransactionAcceptor { + type Event = Event; + + fn handle_event( + &mut self, + effect_builder: EffectBuilder, + _rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + if !self.is_active { + debug!( + ?event, + "FakeTransactionAcceptor: not active - ignoring event" + ); + return Effects::new(); + } + debug!(?event, "FakeTransactionAcceptor: handling event"); + match event { + Event::Accept { + transaction, + source, + maybe_responder, + } => self.accept(effect_builder, transaction, source, maybe_responder), + Event::PutToStorageResult { + event_metadata, + is_new, + .. + } => self.handle_put_to_storage(effect_builder, event_metadata, is_new), + _ => unimplemented!("unexpected {:?}", event), + } + } + + fn name(&self) -> &str { + COMPONENT_NAME + } +} diff --git a/node/src/testing/filter_reactor.rs b/node/src/testing/filter_reactor.rs new file mode 100644 index 0000000000..90a51a9c50 --- /dev/null +++ b/node/src/testing/filter_reactor.rs @@ -0,0 +1,115 @@ +use std::{ + fmt::{self, Debug, Formatter}, + sync::Arc, +}; + +use either::Either; +use futures::future::BoxFuture; +use prometheus::Registry; + +use casper_types::{Chainspec, ChainspecRawBytes}; + +use super::network::NetworkedReactor; +use crate::{ + components::network::Identity as NetworkIdentity, + effect::{EffectBuilder, Effects}, + failpoints::FailpointActivation, + reactor::{EventQueueHandle, Finalize, Reactor}, + types::NodeId, + NodeRng, +}; + +pub(crate) trait EventFilter: + FnMut(Ev) -> Either, Ev> + Send + 'static +{ +} +impl EventFilter for T where T: FnMut(Ev) -> Either, Ev> + Send + 'static {} + +/// A reactor wrapping an inner reactor, which has a hook into `Reactor::dispatch_event()` that +/// allows overriding or modifying event handling. +pub(crate) struct FilterReactor { + reactor: R, + filter: Box>, +} + +/// A filter that doesn't modify the behavior. +impl FilterReactor { + /// Sets the event filter. + pub(crate) fn set_filter(&mut self, filter: impl EventFilter) { + self.filter = Box::new(filter); + } + + /// Returns a reference to the wrapped reactor. + pub(crate) fn inner(&self) -> &R { + &self.reactor + } + + pub(crate) fn inner_mut(&mut self) -> &mut R { + &mut self.reactor + } +} + +impl Reactor for FilterReactor { + type Event = R::Event; + type Config = R::Config; + type Error = R::Error; + + fn new( + config: Self::Config, + chainspec: Arc, + chainspec_raw_bytes: Arc, + network_identity: NetworkIdentity, + registry: &Registry, + event_queue: EventQueueHandle, + rng: &mut NodeRng, + ) -> Result<(Self, Effects), Self::Error> { + let (reactor, effects) = R::new( + config, + chainspec, + chainspec_raw_bytes, + network_identity, + registry, + event_queue, + rng, + )?; + let filter = Box::new(Either::Right); + Ok((Self { reactor, filter }, effects)) + } + + fn dispatch_event( + &mut self, + effect_builder: EffectBuilder, + rng: &mut NodeRng, + event: Self::Event, + ) -> Effects { + match (self.filter)(event) { + Either::Left(effects) => effects, + Either::Right(event) => self.reactor.dispatch_event(effect_builder, rng, event), + } + } + + fn activate_failpoint(&mut self, activation: &FailpointActivation) { + self.reactor.activate_failpoint(activation); + } +} + +impl Finalize for FilterReactor { + fn finalize(self) -> BoxFuture<'static, ()> { + self.reactor.finalize() + } +} + +impl NetworkedReactor for FilterReactor { + fn node_id(&self) -> NodeId { + self.reactor.node_id() + } +} + +impl Debug for FilterReactor { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + formatter + .debug_struct("FilterReactor") + .field("reactor", &self.reactor) + .finish() + } +} diff --git a/node/src/testing/multi_stage_test_reactor.rs b/node/src/testing/multi_stage_test_reactor.rs deleted file mode 100644 index cee32f457a..0000000000 --- a/node/src/testing/multi_stage_test_reactor.rs +++ /dev/null @@ -1,517 +0,0 @@ -pub mod test_chain; - -use std::{ - fmt::{self, Display, Formatter}, - mem, - path::PathBuf, - sync::Arc, -}; - -use derive_more::From; -use futures::FutureExt; -use once_cell::sync::Lazy; -use prometheus::Registry; -use serde::Serialize; -use thiserror::Error; -use tracing::warn; - -use crate::{ - components::{consensus::EraSupervisor, storage::Storage}, - effect::{announcements::ControlAnnouncement, EffectBuilder, EffectExt, Effects}, - reactor::{ - initializer::Reactor as InitializerReactor, - joiner::Reactor as JoinerReactor, - validator::{Reactor as ValidatorReactor, ValidatorInitConfig}, - wrap_effects, EventQueueHandle, QueueKind, Reactor, ReactorEvent, ReactorExit, Scheduler, - }, - testing::network::NetworkedReactor, - types::{Chainspec, NodeId}, - utils::{self, WithDir, RESOURCES_PATH}, - NodeRng, -}; - -pub static CONFIG_DIR: Lazy = Lazy::new(|| RESOURCES_PATH.join("local")); - -#[derive(Debug, Error)] -pub enum MultiStageTestReactorError { - #[error("Could not make initializer reactor: {0}")] - CouldNotMakeInitializerReactor(::Error), - - #[error(transparent)] - PrometheusError(#[from] prometheus::Error), -} - -#[derive(Debug, From, Serialize)] -#[allow(clippy::large_enum_variant)] -pub enum MultiStageTestEvent { - // Events wrapping internal reactor events. - #[from] - InitializerEvent(::Event), - #[from] - JoinerEvent(::Event), - #[from] - ValidatorEvent(::Event), - - // Events related to stage transitions. - JoinerFinalized(#[serde(skip_serializing)] Box), - - // Control announcement. - // These would only be used for fatal errors emitted by the multi-stage reactor itself, all - // "real" control announcements will be inside `InitializerEvent`, `JoinerEvent` or - // `ValidatorEvent`. - #[from] - ControlAnnouncement(ControlAnnouncement), -} - -impl ReactorEvent for MultiStageTestEvent { - fn as_control(&self) -> Option<&ControlAnnouncement> { - if let Self::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } - } -} - -impl Display for MultiStageTestEvent { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - MultiStageTestEvent::InitializerEvent(ev) => { - write!(f, "initializer event: {}", ev) - } - MultiStageTestEvent::JoinerEvent(ev) => { - write!(f, "joiner event: {}", ev) - } - MultiStageTestEvent::ValidatorEvent(ev) => { - write!(f, "validator event: {}", ev) - } - MultiStageTestEvent::JoinerFinalized(_) => { - write!(f, "joiner finalization complete") - } - MultiStageTestEvent::ControlAnnouncement(ctrl_ann) => { - write!(f, "control: {}", ctrl_ann) - } - } - } -} - -pub(crate) enum MultiStageTestReactor { - Deactivated, - Initializer { - initializer_reactor: Box, - initializer_event_queue_handle: EventQueueHandle<::Event>, - registry: Box, - }, - Joiner { - joiner_reactor: Box, - joiner_event_queue_handle: EventQueueHandle<::Event>, - registry: Box, - }, - JoinerFinalizing { - maybe_validator_init_config: Option>, - node_id: NodeId, - registry: Box, - }, - Validator { - validator_reactor: Box, - validator_event_queue_handle: EventQueueHandle<::Event>, - }, -} - -impl MultiStageTestReactor { - pub fn consensus(&self) -> Option<&EraSupervisor> { - match self { - MultiStageTestReactor::Deactivated => unreachable!(), - MultiStageTestReactor::Initializer { .. } - | MultiStageTestReactor::Joiner { .. } - | MultiStageTestReactor::JoinerFinalizing { .. } => None, - MultiStageTestReactor::Validator { - validator_reactor, .. - } => Some(validator_reactor.consensus()), - } - } - - pub fn storage(&self) -> Option<&Storage> { - match self { - MultiStageTestReactor::Deactivated => unreachable!(), - MultiStageTestReactor::Initializer { - initializer_reactor, - .. - } => Some(initializer_reactor.storage()), - MultiStageTestReactor::Joiner { joiner_reactor, .. } => Some(joiner_reactor.storage()), - MultiStageTestReactor::JoinerFinalizing { - maybe_validator_init_config: None, - .. - } => None, - MultiStageTestReactor::JoinerFinalizing { - maybe_validator_init_config: Some(validator_init_config), - .. - } => Some(validator_init_config.storage()), - MultiStageTestReactor::Validator { - validator_reactor, .. - } => Some(validator_reactor.storage()), - } - } -} - -/// Long-running task that forwards events arriving on one scheduler to another. -async fn forward_to_queue(source: &Scheduler, target_queue: EventQueueHandle) -where - O: From, -{ - // Note: This will keep waiting forever if the sending end disappears, which is fine for tests. - loop { - let (event, queue_kind) = source.pop().await; - target_queue.schedule(event, queue_kind).await; - } -} - -pub(crate) struct InitializerReactorConfigWithChainspec { - config: ::Config, - chainspec: Arc, -} - -impl Reactor for MultiStageTestReactor { - type Event = MultiStageTestEvent; - - type Config = InitializerReactorConfigWithChainspec; - - type Error = MultiStageTestReactorError; - - fn new( - initializer_reactor_config_with_chainspec: Self::Config, - registry: &Registry, - event_queue: EventQueueHandle, - _rng: &mut NodeRng, - ) -> Result<(Self, Effects), Self::Error> { - let initializer_scheduler = utils::leak(Scheduler::new(QueueKind::weights())); - let initializer_event_queue_handle: EventQueueHandle< - ::Event, - > = EventQueueHandle::new(initializer_scheduler); - - tokio::spawn(forward_to_queue(initializer_scheduler, event_queue)); - - let (initializer_reactor, initializer_effects) = InitializerReactor::new_with_chainspec( - initializer_reactor_config_with_chainspec.config, - ®istry, - initializer_event_queue_handle, - initializer_reactor_config_with_chainspec.chainspec, - ) - .map_err(MultiStageTestReactorError::CouldNotMakeInitializerReactor)?; - - Ok(( - MultiStageTestReactor::Initializer { - initializer_reactor: Box::new(initializer_reactor), - initializer_event_queue_handle, - registry: Box::new(registry.clone()), - }, - wrap_effects(MultiStageTestEvent::InitializerEvent, initializer_effects), - )) - } - - fn dispatch_event( - &mut self, - effect_builder: EffectBuilder, - rng: &mut NodeRng, - event: MultiStageTestEvent, - ) -> Effects { - // We need to enforce node ids stay constant through state transitions - let old_node_id = self.node_id(); - - // Take ownership of self - let mut multi_stage_test_reactor = mem::replace(self, MultiStageTestReactor::Deactivated); - - // Keep track of whether the event signalled we should do a state transition - let mut should_transition = false; - - // Process the event - let mut effects = match (event, &mut multi_stage_test_reactor) { - (event, MultiStageTestReactor::Deactivated) => { - unreachable!( - "Event sent to deactivated three stage test reactor: {}", - event - ) - } - ( - MultiStageTestEvent::InitializerEvent(initializer_event), - MultiStageTestReactor::Initializer { - ref mut initializer_reactor, - initializer_event_queue_handle, - .. - }, - ) => { - let effect_builder = EffectBuilder::new(*initializer_event_queue_handle); - - let effects = wrap_effects( - MultiStageTestEvent::InitializerEvent, - initializer_reactor.dispatch_event(effect_builder, rng, initializer_event), - ); - - match initializer_reactor.maybe_exit() { - Some(ReactorExit::ProcessShouldContinue) => { - should_transition = true; - } - Some(_) => panic!("failed to transition from initializer to joiner"), - None => (), - } - - effects - } - ( - MultiStageTestEvent::JoinerEvent(joiner_event), - MultiStageTestReactor::Joiner { - ref mut joiner_reactor, - joiner_event_queue_handle, - .. - }, - ) => { - let effect_builder = EffectBuilder::new(*joiner_event_queue_handle); - - let effects = wrap_effects( - MultiStageTestEvent::JoinerEvent, - joiner_reactor.dispatch_event(effect_builder, rng, joiner_event), - ); - - match joiner_reactor.maybe_exit() { - Some(ReactorExit::ProcessShouldContinue) => { - should_transition = true; - } - Some(_) => panic!("failed to transition from initializer to joiner"), - None => (), - } - - effects - } - ( - MultiStageTestEvent::JoinerFinalized(validator_config), - MultiStageTestReactor::JoinerFinalizing { - ref mut maybe_validator_init_config, - .. - }, - ) => { - should_transition = true; - - *maybe_validator_init_config = Some(validator_config); - - // No effects, just transitioning. - Effects::new() - } - ( - MultiStageTestEvent::ValidatorEvent(validator_event), - MultiStageTestReactor::Validator { - ref mut validator_reactor, - validator_event_queue_handle, - .. - }, - ) => { - let effect_builder = EffectBuilder::new(*validator_event_queue_handle); - - let effects = wrap_effects( - MultiStageTestEvent::ValidatorEvent, - validator_reactor.dispatch_event(effect_builder, rng, validator_event), - ); - - if validator_reactor.maybe_exit().is_some() { - panic!("validator reactor should never stop"); - } - - effects - } - (event, three_stage_test_reactor) => { - let stage = match three_stage_test_reactor { - MultiStageTestReactor::Deactivated => "Deactivated", - MultiStageTestReactor::Initializer { .. } => "Initializing", - MultiStageTestReactor::Joiner { .. } => "Joining", - MultiStageTestReactor::JoinerFinalizing { .. } => "Finalizing joiner", - MultiStageTestReactor::Validator { .. } => "Validating", - }; - - warn!( - ?event, - ?stage, - "discarded event due to not being in the right stage" - ); - - // TODO: Fix code to stop discarding events and change below to unreachable!() - Effects::new() - } - }; - - if should_transition { - match multi_stage_test_reactor { - MultiStageTestReactor::Deactivated => { - // We will never transition when `Deactivated` - unreachable!() - } - MultiStageTestReactor::Initializer { - initializer_reactor, - initializer_event_queue_handle, - registry, - } => { - let dropped_events_count = effects.len(); - if dropped_events_count != 0 { - warn!("when transitioning from initializer to joiner, left {} effects unhandled", dropped_events_count) - } - - assert_eq!( - initializer_event_queue_handle - .event_queues_counts() - .values() - .sum::(), - 0, - "before transitioning from initializer to joiner, there should be no \ - unprocessed events" - ); - - let joiner_scheduler = utils::leak(Scheduler::new(QueueKind::weights())); - let joiner_event_queue_handle = EventQueueHandle::new(joiner_scheduler); - - tokio::spawn(forward_to_queue( - joiner_scheduler, - effect_builder.into_inner(), - )); - - let (joiner_reactor, joiner_effects) = JoinerReactor::new( - WithDir::new(&*CONFIG_DIR, *initializer_reactor), - ®istry, - joiner_event_queue_handle, - rng, - ) - .expect("joiner initialization failed"); - - *self = MultiStageTestReactor::Joiner { - joiner_reactor: Box::new(joiner_reactor), - joiner_event_queue_handle, - registry, - }; - - effects.extend( - wrap_effects(MultiStageTestEvent::JoinerEvent, joiner_effects).into_iter(), - ) - } - MultiStageTestReactor::Joiner { - joiner_reactor, - joiner_event_queue_handle, - registry, - } => { - let dropped_events_count = effects.len(); - if dropped_events_count != 0 { - warn!("when transitioning from joiner to validator, left {} effects unhandled", dropped_events_count) - } - - assert_eq!( - joiner_event_queue_handle - .event_queues_counts() - .values() - .sum::(), - 0, - "before transitioning from joiner to validator, \ - there should be no unprocessed events" - ); - - // `into_validator_config` is just waiting for networking sockets to shut down - // and will not stall on disabled event processing, so it is safe to block here. - // Since shutting down the joiner is an `async` function, we offload it into an - // effect and let runner do it. - - let node_id = joiner_reactor.node_id(); - effects.extend(joiner_reactor.into_validator_config().boxed().event( - |res_validator_init_config| { - let validator_init_config = res_validator_init_config.unwrap(); - MultiStageTestEvent::JoinerFinalized(Box::new(validator_init_config)) - }, - )); - - *self = MultiStageTestReactor::JoinerFinalizing { - maybe_validator_init_config: None, - node_id, - registry, - }; - } - MultiStageTestReactor::JoinerFinalizing { - maybe_validator_init_config: opt_validator_config, - node_id: _, - registry, - } => { - let validator_config = opt_validator_config.expect("trying to transition from joiner finalizing into validator, but there is no validator config?"); - - // JoinerFinalizing transitions into a validator. - let validator_scheduler = utils::leak(Scheduler::new(QueueKind::weights())); - let validator_event_queue_handle = EventQueueHandle::new(validator_scheduler); - - tokio::spawn(forward_to_queue( - validator_scheduler, - effect_builder.into_inner(), - )); - - let (validator_reactor, validator_effects) = ValidatorReactor::new( - *validator_config, - ®istry, - validator_event_queue_handle, - rng, - ) - .expect("validator intialization failed"); - - *self = MultiStageTestReactor::Validator { - validator_reactor: Box::new(validator_reactor), - validator_event_queue_handle, - }; - - effects.extend( - wrap_effects(MultiStageTestEvent::ValidatorEvent, validator_effects) - .into_iter(), - ) - } - MultiStageTestReactor::Validator { .. } => { - // Validator reactors don't transition to anything - unreachable!() - } - } - } else { - // The reactor's state didn't transition, so put back the reactor we seized ownership of - *self = multi_stage_test_reactor; - } - - let new_node_id = self.node_id(); - assert_eq!(old_node_id, new_node_id); - - if let MultiStageTestReactor::Deactivated = self { - panic!("Reactor should no longer be Deactivated!") - } - - effects - } - - fn maybe_exit(&self) -> Option { - match self { - MultiStageTestReactor::Deactivated => unreachable!(), - MultiStageTestReactor::Initializer { - initializer_reactor, - .. - } => initializer_reactor.maybe_exit(), - MultiStageTestReactor::Joiner { joiner_reactor, .. } => joiner_reactor.maybe_exit(), - MultiStageTestReactor::JoinerFinalizing { .. } => None, - MultiStageTestReactor::Validator { - validator_reactor, .. - } => validator_reactor.maybe_exit(), - } - } -} - -impl NetworkedReactor for MultiStageTestReactor { - type NodeId = NodeId; - fn node_id(&self) -> Self::NodeId { - match self { - MultiStageTestReactor::Deactivated => unreachable!(), - MultiStageTestReactor::Initializer { - initializer_reactor, - .. - } => initializer_reactor.node_id(), - MultiStageTestReactor::Joiner { joiner_reactor, .. } => joiner_reactor.node_id(), - MultiStageTestReactor::JoinerFinalizing { node_id, .. } => *node_id, - MultiStageTestReactor::Validator { - validator_reactor, .. - } => validator_reactor.node_id(), - } - } -} diff --git a/node/src/testing/multi_stage_test_reactor/test_chain.rs b/node/src/testing/multi_stage_test_reactor/test_chain.rs deleted file mode 100644 index ef87d8045f..0000000000 --- a/node/src/testing/multi_stage_test_reactor/test_chain.rs +++ /dev/null @@ -1,401 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use log::info; -use num::Zero; -use num_rational::Ratio; -use rand::Rng; -use tempfile::TempDir; - -use casper_execution_engine::shared::motes::Motes; -use casper_types::{system::auction::DelegationRate, EraId, PublicKey, SecretKey, U512}; - -use crate::{ - components::{gossiper, small_network, storage, storage::Storage}, - crypto::AsymmetricKeyExt, - reactor::validator, - testing::{ - self, - multi_stage_test_reactor::{InitializerReactorConfigWithChainspec, CONFIG_DIR}, - network::{Network, Nodes}, - MultiStageTestReactor, - }, - types::{ - chainspec::{AccountConfig, AccountsConfig, ValidatorConfig}, - ActivationPoint, BlockHash, Chainspec, NodeId, Timestamp, - }, - utils::{External, Loadable, WithDir}, - NodeRng, -}; - -#[derive(Clone)] -struct SecretKeyWithStake { - secret_key: SecretKey, - stake: u64, -} - -impl PartialEq for SecretKeyWithStake { - fn eq(&self, other: &Self) -> bool { - self.stake == other.stake - && PublicKey::from(&self.secret_key) == PublicKey::from(&other.secret_key) - } -} - -impl Eq for SecretKeyWithStake {} - -struct TestChain { - // Keys that validator instances will use, can include duplicates - storages: Vec, - chainspec: Arc, - first_node_port: u16, - network: Network, -} - -impl TestChain { - /// Instantiates a new test chain configuration. - /// - /// Generates secret keys for `size` validators and creates a matching chainspec. - async fn new(size: usize, rng: &mut NodeRng) -> Self { - assert!( - size >= 1, - "Network size must have at least one node (size: {})", - size - ); - let first_node_secret_key_with_stake = SecretKeyWithStake { - secret_key: SecretKey::random(rng), - stake: rng.gen_range(100..999), - }; - let other_secret_keys_with_stakes = { - let mut other_secret_keys_with_stakes = Vec::new(); - for _ in 1..size { - let staked_secret_key = SecretKeyWithStake { - secret_key: SecretKey::random(rng), - stake: rng.gen_range(100..999), - }; - other_secret_keys_with_stakes.push(staked_secret_key) - } - other_secret_keys_with_stakes - }; - - Self::new_with_keys( - first_node_secret_key_with_stake, - other_secret_keys_with_stakes, - rng, - ) - .await - } - - /// Instantiates a new test chain configuration. - /// - /// Takes a vector of bonded keys with specified bond amounts. - async fn new_with_keys( - first_node_secret_key_with_stake: SecretKeyWithStake, - other_secret_keys_with_stakes: Vec, - rng: &mut NodeRng, - ) -> Self { - // Load the `local` chainspec. - let mut chainspec: Chainspec = Chainspec::from_resources("local"); - - // Override accounts with those generated from the keys. - let genesis_accounts = std::iter::once(&first_node_secret_key_with_stake) - .chain(other_secret_keys_with_stakes.iter()) - .map(|staked_secret_key| { - let public_key = PublicKey::from(&staked_secret_key.secret_key); - let validator_config = ValidatorConfig::new( - Motes::new(U512::from(staked_secret_key.stake)), - DelegationRate::zero(), - ); - AccountConfig::new( - public_key, - Motes::new(U512::from(rng.gen_range(10000..99999999))), - Some(validator_config), - ) - }) - .collect(); - let delegators = vec![]; - chainspec.network_config.accounts_config = - AccountsConfig::new(genesis_accounts, delegators); - - // Make the genesis timestamp 45 seconds from now, to allow for all validators to start up. - chainspec.protocol_config.activation_point = - ActivationPoint::Genesis(Timestamp::now() + 45000.into()); - - chainspec.core_config.minimum_era_height = 4; - chainspec.highway_config.finality_threshold_fraction = Ratio::new(34, 100); - chainspec.core_config.era_duration = 10.into(); - chainspec.core_config.auction_delay = 1; - chainspec.core_config.unbonding_delay = 3; - - // Assign a port for the first node (TODO: this has a race condition) - let first_node_port = testing::unused_port_on_localhost(); - - // Create the test network - let network: Network = Network::new(); - - let mut test_chain = TestChain { - chainspec: Arc::new(chainspec), - storages: Vec::new(), - first_node_port, - network, - }; - - // Add the nodes to the chain - test_chain - .add_node(true, first_node_secret_key_with_stake.secret_key, None, rng) - .await; - - for secret_key_with_stake in other_secret_keys_with_stakes { - test_chain - .add_node(false, secret_key_with_stake.secret_key, None, rng) - .await; - } - - test_chain - } - - /// Creates an initializer/validator configuration for the `idx`th validator. - async fn add_node( - &mut self, - first_node: bool, - secret_key: SecretKey, - trusted_hash: Option, - rng: &mut NodeRng, - ) -> NodeId { - // Set the network configuration. - let network = if first_node { - small_network::Config::default_local_net_first_node(self.first_node_port) - } else { - small_network::Config::default_local_net(self.first_node_port) - }; - - let mut validator_config = validator::Config { - network, - gossip: gossiper::Config::new_with_small_timeouts(), - ..Default::default() - }; - - // ...and the secret key for our validator. - validator_config.consensus.secret_key_path = External::from_value(secret_key); - - // Set a trust hash if one has been provided. - validator_config.node.trusted_hash = trusted_hash; - - // Additionally set up storage in a temporary directory. - let (storage_config, temp_dir) = storage::Config::default_for_tests(); - validator_config.consensus.highway.unit_hashes_folder = temp_dir.path().to_path_buf(); - self.storages.push(temp_dir); - validator_config.storage = storage_config; - - // Bundle our config with a chainspec for creating a multi-stage reactor - let config = InitializerReactorConfigWithChainspec { - config: (false, WithDir::new(&*CONFIG_DIR, validator_config)), - chainspec: Arc::clone(&self.chainspec), - }; - - // Add the node (a multi-stage reactor) with the specified config to the network - self.network - .add_node_with_config(config, rng) - .await - .expect("could not add node to reactor") - .0 - } -} - -/// Given an era number, returns a predicate to check if all of the nodes are in the specified era. -fn is_in_era(era_num: u64) -> impl Fn(&Nodes) -> bool { - move |nodes: &Nodes| { - let era_id = EraId::from(era_num); - nodes.values().all(|runner| { - runner - .reactor() - .inner() - .consensus() - .map_or(false, |consensus| consensus.current_era() == era_id) - }) - } -} - -#[tokio::test] -async fn run_validator_network() { - testing::init_logging(); - - let mut rng = crate::new_rng(); - - // Instantiate a new chain with a fixed size. - const NETWORK_SIZE: usize = 5; - let mut chain = TestChain::new(NETWORK_SIZE, &mut rng).await; - - // Wait for all nodes to agree on one era. - for era_num in 1..=2 { - info!("Waiting for Era {} to end", era_num); - chain - .network - .settle_on(&mut rng, is_in_era(era_num), Duration::from_secs(600)) - .await; - } -} - -#[tokio::test] -async fn run_equivocator_network() { - // Test that we won't panic if a node equivocates - // Creates an equivocating node by launching two reactors with the same private key (alice_sk) - // The two nodes will create signatures of distinct units causing an equivocation - testing::init_logging(); - - let mut rng: NodeRng = crate::new_rng(); - - let first_node_secret_key_with_stake = SecretKeyWithStake { - secret_key: SecretKey::random(&mut rng), - stake: 100, - }; - let alice_sk = SecretKeyWithStake { - secret_key: SecretKey::random(&mut rng), - stake: 1, - }; - let other_secret_keys_with_stakes = vec![alice_sk.clone(), alice_sk]; - - let mut chain = TestChain::new_with_keys( - first_node_secret_key_with_stake, - other_secret_keys_with_stakes, - &mut rng, - ) - .await; - - for era_num in 1..=5 { - info!("Waiting for Era {} to end", era_num); - chain - .network - .settle_on(&mut rng, is_in_era(era_num), Duration::from_secs(600)) - .await; - } -} - -async fn get_switch_block_hash( - switch_block_era_num: u64, - net: &mut Network, - rng: &mut NodeRng, -) -> BlockHash { - let era_after_switch_block_era_num = switch_block_era_num + 1; - info!("Waiting for Era {} to end", era_after_switch_block_era_num); - net.settle_on( - rng, - is_in_era(era_after_switch_block_era_num), - Duration::from_secs(600), - ) - .await; - - info!( - "Querying storage for switch block for Era {}", - switch_block_era_num - ); - - // Get the storage for the first reactor - let storage: &Storage = net - .nodes() - .values() - .next() - .expect("need at least one node") - .reactor() - .inner() - .storage() - .expect("Can not access storage of first node"); - let switch_block = storage - .transactional_get_switch_block_by_era_id(switch_block_era_num) - .expect("Could not find block for era num"); - let switch_block_hash = switch_block.hash(); - info!( - "Found block hash for Era {}: {}", - switch_block_era_num, switch_block_hash - ); - *switch_block_hash -} - -/// Test a node joining to a single node network -#[ignore] -#[tokio::test] -async fn test_joiner() { - testing::init_logging(); - - const INITIAL_NETWORK_SIZE: usize = 1; - - let mut rng = crate::new_rng(); - - // Create a chain with just one node - let mut chain = TestChain::new(INITIAL_NETWORK_SIZE, &mut rng).await; - - assert_eq!( - chain.network.nodes().len(), - INITIAL_NETWORK_SIZE, - "There should be just one bonded validator in the network" - ); - - // Get the first switch block hash - let first_switch_block_hash = get_switch_block_hash(1, &mut chain.network, &mut rng).await; - - // Have a node join the network with that hash - info!("Joining with trusted hash {}", first_switch_block_hash); - let joiner_node_secret_key = SecretKey::random(&mut rng); - chain - .add_node( - false, - joiner_node_secret_key, - Some(first_switch_block_hash), - &mut rng, - ) - .await; - - assert_eq!( - chain.network.nodes().len(), - 2, - "There should be two validators in the network (one bonded and one read only)" - ); - - let era_num = 3; - info!("Waiting for Era {} to end", era_num); - chain - .network - .settle_on(&mut rng, is_in_era(era_num), Duration::from_secs(600)) - .await; -} - -/// Test a node joining to a network with five nodes -#[ignore] -#[tokio::test] -async fn test_joiner_network() { - testing::init_logging(); - - const INITIAL_NETWORK_SIZE: usize = 5; - - let mut rng = crate::new_rng(); - - let mut chain = TestChain::new(INITIAL_NETWORK_SIZE, &mut rng).await; - - assert_eq!( - chain.network.nodes().len(), - INITIAL_NETWORK_SIZE, - "Wrong number of bonded validators in the network" - ); - - // Get the first switch block hash - let first_switch_block_hash = get_switch_block_hash(1, &mut chain.network, &mut rng).await; - - // Have a node join the network with that hash - info!("Joining with trusted hash {}", first_switch_block_hash); - let joiner_node_secret_key = SecretKey::random(&mut rng); - chain - .add_node( - false, - joiner_node_secret_key, - Some(first_switch_block_hash), - &mut rng, - ) - .await; - - assert_eq!(chain.network.nodes().len(), INITIAL_NETWORK_SIZE + 1,); - - let era_num = 3; - info!("Waiting for Era {} to end", era_num); - chain - .network - .settle_on(&mut rng, is_in_era(era_num), Duration::from_secs(600)) - .await; -} diff --git a/node/src/testing/network.rs b/node/src/testing/network.rs index 93e0721a01..c542bcc253 100644 --- a/node/src/testing/network.rs +++ b/node/src/testing/network.rs @@ -2,38 +2,56 @@ use std::{ collections::{hash_map::Entry, HashMap}, - fmt::{Debug, Display}, - hash::Hash, + fmt::Debug, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, time::Duration, }; use fake_instant::FakeClock as Instant; use futures::future::{BoxFuture, FutureExt}; use serde::Serialize; -use tokio::time; +use tokio::time::{self, error::Elapsed}; use tracing::{debug, error_span}; use tracing_futures::Instrument; +use casper_types::testing::TestRng; + +use casper_types::{Chainspec, ChainspecRawBytes}; + use super::ConditionCheckReactor; use crate::{ + components::ComponentState, effect::{EffectBuilder, Effects}, - reactor::{Finalize, Reactor, Runner}, - testing::TestRng, + reactor::{Finalize, Reactor, Runner, TryCrankOutcome}, + tls::KeyFingerprint, + types::{ExitCode, NodeId}, + utils::Loadable, NodeRng, }; /// Type alias for set of nodes inside a network. /// /// Provided as a convenience for writing condition functions for `settle_on` and friends. -pub type Nodes = HashMap<::NodeId, Runner>>; +pub(crate) type Nodes = HashMap>>; /// A reactor with networking functionality. -pub trait NetworkedReactor: Sized { - /// The node ID on the networking level. - type NodeId: Eq + Hash + Clone + Display + Debug; - +/// +/// Test reactors implementing this SHOULD implement at least the `node_id` function if they have +/// proper networking functionality. +pub(crate) trait NetworkedReactor: Sized { /// Returns the node ID assigned to this specific reactor instance. - fn node_id(&self) -> Self::NodeId; + /// + /// The default implementation generates a pseudo-id base on its memory address. + fn node_id(&self) -> NodeId { + #[allow(trivial_casts)] + let addr = self as *const _ as usize; + let mut raw: [u8; KeyFingerprint::LENGTH] = [0; KeyFingerprint::LENGTH]; + raw[0..(size_of::())].copy_from_slice(&addr.to_be_bytes()); + NodeId::from(KeyFingerprint::from(raw)) + } } /// Time interval for which to poll an observed testing network when no events have occurred. @@ -45,12 +63,12 @@ const POLL_INTERVAL: Duration = Duration::from_millis(10); /// `crank_all`. As an alternative, the `settle` and `settle_all` functions can be used to continue /// cranking until a condition has been reached. #[derive(Debug, Default)] -pub struct Network { +pub(crate) struct TestingNetwork { /// Current network. - nodes: HashMap<::NodeId, Runner>>, + nodes: HashMap>>, } -impl Network +impl TestingNetwork where R: Reactor + NetworkedReactor, R::Config: Default, @@ -64,15 +82,15 @@ where /// /// Panics if a duplicate node ID is being inserted. This should only happen in case a randomly /// generated ID collides. - pub async fn add_node( - &mut self, - rng: &mut TestRng, - ) -> Result<(R::NodeId, &mut Runner>), R::Error> { + pub(crate) async fn add_node<'a, 'b: 'a>( + &'a mut self, + rng: &'b mut TestRng, + ) -> Result<(NodeId, &'a mut Runner>), R::Error> { self.add_node_with_config(Default::default(), rng).await } /// Adds `count` new nodes to the network, and returns their IDs. - pub async fn add_nodes(&mut self, rng: &mut TestRng, count: usize) -> Vec { + pub(crate) async fn add_nodes(&mut self, rng: &mut TestRng, count: usize) -> Vec { let mut node_ids = vec![]; for _ in 0..count { let (node_id, _runner) = self.add_node(rng).await.unwrap(); @@ -82,15 +100,15 @@ where } } -impl Network +impl TestingNetwork where R: Reactor + NetworkedReactor, R::Event: Serialize, R::Error: From + From, { /// Creates a new network. - pub fn new() -> Self { - Network { + pub(crate) fn new() -> Self { + TestingNetwork { nodes: HashMap::new(), } } @@ -100,16 +118,40 @@ where /// # Panics /// /// Panics if a duplicate node ID is being inserted. - pub async fn add_node_with_config( - &mut self, + pub(crate) async fn add_node_with_config<'a, 'b: 'a>( + &'a mut self, + cfg: R::Config, + rng: &'b mut NodeRng, + ) -> Result<(NodeId, &'a mut Runner>), R::Error> { + let (chainspec, chainspec_raw_bytes) = + <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + self.add_node_with_config_and_chainspec( + cfg, + Arc::new(chainspec), + Arc::new(chainspec_raw_bytes), + rng, + ) + .await + } + + /// Creates a new networking node on the network. + /// + /// # Panics + /// + /// Panics if a duplicate node ID is being inserted. + pub(crate) async fn add_node_with_config_and_chainspec<'a, 'b: 'a>( + &'a mut self, cfg: R::Config, - rng: &mut NodeRng, - ) -> Result<(R::NodeId, &mut Runner>), R::Error> { - let runner: Runner> = Runner::new(cfg, rng).await?; + chainspec: Arc, + chainspec_raw_bytes: Arc, + rng: &'b mut NodeRng, + ) -> Result<(NodeId, &'a mut Runner>), R::Error> { + let runner: Runner> = + Runner::new(cfg, chainspec, chainspec_raw_bytes, rng).await?; let node_id = runner.reactor().node_id(); - let node_ref = match self.nodes.entry(node_id.clone()) { + let node_ref = match self.nodes.entry(node_id) { Entry::Occupied(_) => { // This happens in the event of the extremely unlikely hash collision, or if the // node ID was set manually. @@ -122,33 +164,31 @@ where } /// Removes a node from the network. - pub fn remove_node(&mut self, node_id: &R::NodeId) -> Option>> { + pub(crate) fn remove_node( + &mut self, + node_id: &NodeId, + ) -> Option>> { self.nodes.remove(node_id) } - /// Crank the specified runner once, returning the number of events processed. - pub async fn crank(&mut self, node_id: &R::NodeId, rng: &mut TestRng) -> usize { + /// Crank the specified runner once. + pub(crate) async fn crank(&mut self, node_id: &NodeId, rng: &mut TestRng) -> TryCrankOutcome { let runner = self.nodes.get_mut(node_id).expect("should find node"); - let node_id = runner.reactor().node_id(); - if runner + runner .try_crank(rng) .instrument(error_span!("crank", node_id = %node_id)) .await - .is_some() - { - 1 - } else { - 0 - } } /// Crank only the specified runner until `condition` is true or until `within` has elapsed. /// /// Returns `true` if `condition` has been met within the specified timeout. - pub async fn crank_until( + /// + /// Panics if cranking causes the node to return an exit code. + pub(crate) async fn crank_until( &mut self, - node_id: &R::NodeId, + node_id: &NodeId, rng: &mut TestRng, condition: F, within: Duration, @@ -158,68 +198,114 @@ where self.nodes .get_mut(node_id) .unwrap() - .reactor_mut() - .set_condition_checker(Box::new(condition)); - - time::timeout(within, self.crank_and_check_indefinitely(node_id, rng)) + .crank_until(rng, condition, within) .await - .unwrap() - } - - async fn crank_and_check_indefinitely(&mut self, node_id: &R::NodeId, rng: &mut TestRng) { - loop { - if self.crank(node_id, rng).await == 0 { - Instant::advance_time(POLL_INTERVAL.as_millis() as u64); - time::sleep(POLL_INTERVAL).await; - continue; - } - - if self - .nodes - .get(node_id) - .unwrap() - .reactor() - .condition_result() - { - debug!("{} met condition", node_id); - return; - } - } } /// Crank all runners once, returning the number of events processed. - pub async fn crank_all(&mut self, rng: &mut TestRng) -> usize { + /// + /// Panics if any node returns an exit code. + async fn crank_all(&mut self, rng: &mut TestRng) -> usize { let mut event_count = 0; for node in self.nodes.values_mut() { let node_id = node.reactor().node_id(); - event_count += if node + match node .try_crank(rng) .instrument(error_span!("crank", node_id = %node_id)) .await - .is_some() { - 1 - } else { - 0 + TryCrankOutcome::NoEventsToProcess => (), + TryCrankOutcome::ProcessedAnEvent => event_count += 1, + TryCrankOutcome::ShouldExit(exit_code) => { + panic!("should not exit: {:?}", exit_code) + } + TryCrankOutcome::Exited => unreachable!(), } } event_count } - /// Process events on all nodes until all event queues are empty for at least `quiet_for`. + /// Crank all runners until `condition` is true on the specified runner or until `within` has + /// elapsed. /// - /// # Panics + /// Returns `true` if `condition` has been met within the specified timeout. /// - /// Panics if after `within` the event queues are still not idle. - pub async fn settle(&mut self, rng: &mut TestRng, quiet_for: Duration, within: Duration) { + /// Panics if cranking causes the node to return an exit code. + pub(crate) async fn crank_all_until( + &mut self, + node_id: &NodeId, + rng: &mut TestRng, + condition: F, + within: Duration, + ) where + F: Fn(&R::Event) -> bool + Send + 'static, + { + self.nodes + .get_mut(node_id) + .unwrap() + .reactor_mut() + .set_condition_checker(Box::new(condition)); + + time::timeout(within, self.crank_and_check_all_indefinitely(node_id, rng)) + .await + .unwrap() + } + + async fn crank_and_check_all_indefinitely( + &mut self, + node_to_check: &NodeId, + rng: &mut TestRng, + ) { + loop { + let mut no_events = true; + for node in self.nodes.values_mut() { + let node_id = node.reactor().node_id(); + match node + .try_crank(rng) + .instrument(error_span!("crank", node_id = %node_id)) + .await + { + TryCrankOutcome::NoEventsToProcess => (), + TryCrankOutcome::ProcessedAnEvent => { + no_events = false; + } + TryCrankOutcome::ShouldExit(exit_code) => { + panic!("should not exit: {:?}", exit_code) + } + TryCrankOutcome::Exited => unreachable!(), + } + if node_id == *node_to_check && node.reactor().condition_result() { + debug!("{} met condition", node_to_check); + return; + } + } + + if no_events { + Instant::advance_time(POLL_INTERVAL.as_millis() as u64); + time::sleep(POLL_INTERVAL).await; + continue; + } + } + } + + /// Process events on all nodes until all event queues are empty for at least `quiet_for`. + /// + /// Panics if after `within` the event queues are still not idle, or if any node returns an exit + /// code. + pub(crate) async fn settle( + &mut self, + rng: &mut TestRng, + quiet_for: Duration, + within: Duration, + ) { time::timeout(within, self.settle_indefinitely(rng, quiet_for)) .await .unwrap_or_else(|_| { - panic!(format!( + panic!( "network did not settle for {:?} within {:?}", quiet_for, within - )) + ) }) } @@ -244,20 +330,39 @@ where /// Runs the main loop of every reactor until `condition` is true. /// - /// # Panics + /// Returns an error if the `condition` is not reached inside of `within`. + /// + /// Panics if any node returns an exit code. To settle on an exit code, use `settle_on_exit` + /// instead. + pub(crate) async fn try_settle_on( + &mut self, + rng: &mut TestRng, + condition: F, + within: Duration, + ) -> Result<(), Elapsed> + where + F: Fn(&Nodes) -> bool, + { + time::timeout(within, self.settle_on_indefinitely(rng, condition)).await + } + + /// Runs the main loop of every reactor until `condition` is true. /// - /// If the `condition` is not reached inside of `within`, panics. - pub async fn settle_on(&mut self, rng: &mut TestRng, condition: F, within: Duration) + /// Panics if the `condition` is not reached inside of `within`, or if any node returns an exit + /// code. + /// + /// To settle on an exit code, use `settle_on_exit` instead. + pub(crate) async fn settle_on(&mut self, rng: &mut TestRng, condition: F, within: Duration) where F: Fn(&Nodes) -> bool, { - time::timeout(within, self.settle_on_indefinitely(rng, condition)) + self.try_settle_on(rng, condition, within) .await .unwrap_or_else(|_| { - panic!(format!( - "network did not settle on condition within {:?}", - within - )) + panic!( + "network did not settle on condition within {} seconds", + within.as_secs_f64() + ) }) } @@ -279,25 +384,202 @@ where } } + /// Runs the main loop of every reactor until the nodes return the expected exit code. + /// + /// Panics if the nodes do not exit inside of `within`, or if any node returns an unexpected + /// exit code. + pub(crate) async fn settle_on_exit( + &mut self, + rng: &mut TestRng, + expected: ExitCode, + within: Duration, + ) { + time::timeout(within, self.settle_on_exit_indefinitely(rng, expected)) + .await + .unwrap_or_else(|_| panic!("network did not settle on condition within {:?}", within)) + } + + /// Runs the main loop of every reactor until a specified node returns the expected exit code. + /// + /// Panics if the node does not exit inside of `within`, or if any node returns an unexpected + /// exit code. + pub(crate) async fn settle_on_node_exit( + &mut self, + rng: &mut TestRng, + node_id: &NodeId, + expected: ExitCode, + within: Duration, + ) { + time::timeout( + within, + self.settle_on_node_exit_indefinitely(rng, node_id, expected), + ) + .await + .unwrap_or_else(|elapsed| { + panic!( + "network did not settle on condition within {within:?}, time elapsed: {elapsed:?}", + ) + }) + } + + /// Keeps cranking the network until every reactor's specified component is in the given state. + /// + /// # Panics + /// + /// Panics if any reactor returns `None` on its [`Reactor::get_component_state()`] call. + pub(crate) async fn _settle_on_component_state( + &mut self, + rng: &mut TestRng, + name: &str, + state: &ComponentState, + timeout: Duration, + ) { + self.settle_on( + rng, + |net| { + net.values() + .all(|runner| match runner.reactor().get_component_state(name) { + Some(actual_state) => actual_state == state, + None => panic!("unknown or unsupported component: {}", name), + }) + }, + timeout, + ) + .await; + } + + /// Starts a background process that will crank all nodes until stopped. + /// + /// Returns a future that will, once polled, stop all cranking and return the network and the + /// the random number generator. Note that the stop command will be sent as soon as the returned + /// future is polled (awaited), but no sooner. + pub(crate) fn crank_until_stopped( + mut self, + mut rng: TestRng, + ) -> impl futures::Future + where + R: Send + 'static, + { + let stop = Arc::new(AtomicBool::new(false)); + let handle = tokio::spawn({ + let stop = stop.clone(); + async move { + while !stop.load(Ordering::Relaxed) { + if self.crank_all(&mut rng).await == 0 { + time::sleep(POLL_INTERVAL).await; + }; + } + (self, rng) + } + }); + + async move { + // Trigger the background process stop. + stop.store(true, Ordering::Relaxed); + handle.await.expect("failed to join background crank") + } + } + + async fn settle_on_exit_indefinitely(&mut self, rng: &mut TestRng, expected: ExitCode) { + let mut exited_as_expected = 0; + loop { + if exited_as_expected == self.nodes.len() { + debug!(?expected, "all nodes exited with expected code"); + break; + } + + let mut event_count = 0; + for node in self.nodes.values_mut() { + let node_id = node.reactor().node_id(); + match node + .try_crank(rng) + .instrument(error_span!("crank", node_id = %node_id)) + .await + { + TryCrankOutcome::NoEventsToProcess => (), + TryCrankOutcome::ProcessedAnEvent => event_count += 1, + TryCrankOutcome::ShouldExit(exit_code) if exit_code == expected => { + exited_as_expected += 1; + event_count += 1; + } + TryCrankOutcome::ShouldExit(exit_code) => { + panic!( + "unexpected exit: expected {:?}, got {:?}", + expected, exit_code + ) + } + TryCrankOutcome::Exited => (), + } + } + + if event_count == 0 { + // No events processed, wait for a bit to avoid 100% cpu usage. + Instant::advance_time(POLL_INTERVAL.as_millis() as u64); + time::sleep(POLL_INTERVAL).await; + } + } + } + + async fn settle_on_node_exit_indefinitely( + &mut self, + rng: &mut TestRng, + node_id: &NodeId, + expected: ExitCode, + ) { + 'outer: loop { + let mut event_count = 0; + for node in self.nodes.values_mut() { + let current_node_id = node.reactor().node_id(); + match node + .try_crank(rng) + .instrument(error_span!("crank", node_id = %node_id)) + .await + { + TryCrankOutcome::NoEventsToProcess => (), + TryCrankOutcome::ProcessedAnEvent => event_count += 1, + TryCrankOutcome::ShouldExit(exit_code) + if (exit_code == expected && current_node_id == *node_id) => + { + debug!(?expected, ?node_id, "node exited with expected code"); + break 'outer; + } + TryCrankOutcome::ShouldExit(exit_code) => { + panic!( + "unexpected exit: expected {expected:?} for node {node_id:?}, got {exit_code:?} for node {current_node_id:?}", + ) + } + TryCrankOutcome::Exited => (), + } + } + + if event_count == 0 { + // No events processed, wait for a bit to avoid 100% cpu usage. + Instant::advance_time(POLL_INTERVAL.as_millis() as u64); + time::sleep(POLL_INTERVAL).await; + } + } + } + /// Returns the internal map of nodes. - pub fn nodes(&self) -> &HashMap>> { + pub(crate) fn nodes(&self) -> &HashMap>> { &self.nodes } - /// Returns the internal map of nodes. - pub fn nodes_mut(&mut self) -> &mut HashMap>> { + /// Returns the internal map of nodes, mutable. + pub(crate) fn nodes_mut(&mut self) -> &mut HashMap>> { &mut self.nodes } - /// Returns an iterator over all reactors. - pub fn reactors(&self) -> impl Iterator { - self.nodes.values().map(|runner| runner.reactor().inner()) + /// Returns an iterator over all runners, mutable. + pub(crate) fn runners_mut( + &mut self, + ) -> impl Iterator>> { + self.nodes.values_mut() } /// Returns an iterator over all reactors, mutable. - pub fn reactors_mut(&mut self) -> impl Iterator { - self.nodes - .values_mut() + pub(crate) fn reactors_mut(&mut self) -> impl Iterator { + self.runners_mut() .map(|runner| runner.reactor_mut().inner_mut()) } @@ -305,8 +587,11 @@ where /// /// The effects are created via a call to `create_effects` which is itself passed an instance of /// an `EffectBuilder`. - pub async fn process_injected_effect_on(&mut self, node_id: &R::NodeId, create_effects: F) - where + pub(crate) async fn process_injected_effect_on( + &mut self, + node_id: &NodeId, + create_effects: F, + ) where F: FnOnce(EffectBuilder) -> Effects, { let runner = self.nodes.get_mut(node_id).unwrap(); @@ -318,11 +603,10 @@ where } } -impl Finalize for Network +impl Finalize for TestingNetwork where R: Finalize + NetworkedReactor + Reactor + Send + 'static, - R::Event: Serialize, - R::NodeId: Send, + R::Event: Serialize + Send + Sync, R::Error: From, { fn finalize(self) -> BoxFuture<'static, ()> { @@ -331,7 +615,7 @@ where async move { // Shutdown the sender of every reactor node to ensure the port is open again. for (_, node) in self.nodes.into_iter() { - node.into_inner().finalize().await; + node.drain_into_inner().await.finalize().await; } debug!("network finalized"); diff --git a/node/src/testing/test_clock.rs b/node/src/testing/test_clock.rs new file mode 100644 index 0000000000..746a440933 --- /dev/null +++ b/node/src/testing/test_clock.rs @@ -0,0 +1,86 @@ +//! Testing clock +//! +//! A controllable clock for testing. +//! +//! # When to use `FakeClock` instead +//! +//! The [`TestClock`] is suitable for code written with "external" time passed in through its +//! regular interfaces already in mind. Code that does not conform to this should use `FakeClock` +//! and conditional compilation (`#[cfg(test)] ...`) instead. + +use std::time::{Duration, Instant}; + +/// How far back the test clock can go (roughly 10 years). +const TEST_CLOCK_LEEWAY: Duration = Duration::from_secs(315_569_520); + +/// A rewindable and forwardable clock for testing that does not tick on its own. +#[derive(Debug)] +pub struct TestClock { + /// The current time set on the clock. + now: Instant, +} + +impl Default for TestClock { + fn default() -> Self { + TestClock::new() + } +} + +impl TestClock { + /// Creates a new testing clock. + /// + /// Testing clocks will not advance unless prompted to do so. + pub fn new() -> Self { + Self { + now: Instant::now() + TEST_CLOCK_LEEWAY, + } + } + + /// Returns the "current" time. + pub fn now(&self) -> Instant { + self.now + } + + /// Advances the clock by duration. + pub fn advance(&mut self, duration: Duration) { + self.now += duration; + } + + /// Turns the clock by duration. + pub fn rewind(&mut self, duration: Duration) { + self.now -= duration; + } + + /// `FakeClock` compatible interface. + pub fn advance_time(&mut self, ms: u64) { + self.advance(Duration::from_millis(ms)) + } +} + +#[cfg(test)] +mod tests { + use std::{thread, time::Duration}; + + use super::TestClock; + + #[test] + fn test_clock_operation() { + let mut clock = TestClock::new(); + + let initial = clock.now(); + + // Ensure the clock does not advance on its own. + thread::sleep(Duration::from_millis(10)); + + assert_eq!(initial, clock.now()); + + // Ensure the clock can go forwards and backwards. + clock.advance(Duration::from_secs(1)); + clock.advance_time(1_000); + + assert_eq!(clock.now() - initial, Duration::from_secs(2)); + + clock.rewind(Duration::from_secs(3)); + assert_eq!(initial - clock.now(), Duration::from_secs(1)); + } +} diff --git a/node/src/testing/test_rng.rs b/node/src/testing/test_rng.rs deleted file mode 100644 index fb6c48ff9c..0000000000 --- a/node/src/testing/test_rng.rs +++ /dev/null @@ -1,162 +0,0 @@ -#![cfg(test)] - -use std::{ - cell::RefCell, - cmp, env, - fmt::{self, Debug, Display, Formatter}, - thread, -}; - -use hex_fmt::HexFmt; -use rand::{self, CryptoRng, Error, Rng, RngCore, SeedableRng}; -use rand_pcg::Pcg64Mcg; - -thread_local! { - static THIS_THREAD_HAS_RNG: RefCell = RefCell::new(false); -} - -const CL_TEST_SEED: &str = "CL_TEST_SEED"; - -type Seed = ::Seed; // [u8; 16] - -/// A fast, seedable pseudorandom number generator for use in tests which prints the seed if the -/// thread in which it is created panics. -/// -/// Only one `TestRng` is permitted per thread. -pub struct TestRng { - seed: Seed, - rng: Pcg64Mcg, -} - -impl TestRng { - /// Constructs a new `TestRng` using a seed generated from the env var `CL_TEST_SEED` if set or - /// from cryptographically secure random data if not. - /// - /// Note that `new()` or `default()` should only be called once per test. If a test needs to - /// spawn multiple threads each with their own `TestRng`, then use `new()` to create a single, - /// master `TestRng`, then use it to create a seed per child thread. The child `TestRng`s can - /// then be constructed in their own threads via `from_seed()`. - /// - /// # Panics - /// - /// Panics if a `TestRng` has already been created on this thread. - pub fn new() -> Self { - Self::set_flag_or_panic(); - - let mut seed = Seed::default(); - match env::var(CL_TEST_SEED) { - Ok(seed_as_hex) => { - hex::decode_to_slice(&seed_as_hex, &mut seed).unwrap_or_else(|error| { - THIS_THREAD_HAS_RNG.with(|flag| { - *flag.borrow_mut() = false; - }); - panic!("can't parse '{}' as a TestRng seed: {}", seed_as_hex, error) - }); - } - Err(_) => { - rand::thread_rng().fill(&mut seed); - } - }; - - let rng = Pcg64Mcg::from_seed(seed); - - TestRng { seed, rng } - } - - /// Constructs a new `TestRng` using `seed`. This should be used in cases where a test needs to - /// spawn multiple threads each with their own `TestRng`. A single, master `TestRng` should be - /// constructed before any child threads are spawned, and that one should be used to create - /// seeds for the child threads' `TestRng`s. - /// - /// # Panics - /// - /// Panics if a `TestRng` has already been created on this thread. - pub fn from_seed(seed: Seed) -> Self { - Self::set_flag_or_panic(); - let rng = Pcg64Mcg::from_seed(seed); - TestRng { seed, rng } - } - - fn set_flag_or_panic() { - THIS_THREAD_HAS_RNG.with(|flag| { - if *flag.borrow() { - panic!("cannot create multiple TestRngs on the same thread"); - } - *flag.borrow_mut() = true; - }); - } -} - -impl Default for TestRng { - fn default() -> Self { - TestRng::new() - } -} - -impl Display for TestRng { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "TestRng seed: {}", HexFmt(&self.seed)) - } -} - -impl Debug for TestRng { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - Display::fmt(self, formatter) - } -} - -impl Drop for TestRng { - fn drop(&mut self) { - if thread::panicking() { - let line_1 = format!("Thread: {}", thread::current().name().unwrap_or("unnamed")); - let line_2 = "To reproduce failure, try running with env var:"; - let line_3 = format!("{}={}", CL_TEST_SEED, HexFmt(&self.seed)); - let max_length = cmp::max(line_1.len(), line_2.len()); - let border = "=".repeat(max_length); - println!( - "\n{}\n{}\n{}\n{}\n{}\n", - border, line_1, line_2, line_3, border - ); - } - } -} - -impl SeedableRng for TestRng { - type Seed = ::Seed; - - fn from_seed(seed: Self::Seed) -> Self { - Self::from_seed(seed) - } -} - -impl RngCore for TestRng { - fn next_u32(&mut self) -> u32 { - self.rng.next_u32() - } - - fn next_u64(&mut self) -> u64 { - self.rng.next_u64() - } - - fn fill_bytes(&mut self, dest: &mut [u8]) { - self.rng.fill_bytes(dest) - } - - fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { - self.rng.try_fill_bytes(dest) - } -} - -impl CryptoRng for TestRng {} - -mod tests { - use super::*; - - #[test] - #[should_panic(expected = "cannot create multiple TestRngs on the same thread")] - fn second_test_rng_in_thread_should_panic() { - let _test_rng1 = TestRng::new(); - let seed = [1; 16]; - let _test_rng2 = TestRng::from_seed(seed); - } -} diff --git a/node/src/tls.rs b/node/src/tls.rs index c17eae6510..b5bcaf0003 100644 --- a/node/src/tls.rs +++ b/node/src/tls.rs @@ -31,18 +31,18 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; -use anyhow::Context; +use casper_types::file_utils::{read_file, ReadFileError}; use datasize::DataSize; use hex_fmt::HexFmt; use nid::Nid; use openssl::{ asn1::{Asn1Integer, Asn1IntegerRef, Asn1Time}, bn::{BigNum, BigNumContext}, - ec, + ec::{self, EcKey}, error::ErrorStack, hash::{DigestBytes, MessageDigest}, nid, - pkey::{PKey, PKeyRef, Private}, + pkey::{PKey, PKeyRef, Private, Public}, sha, ssl::{SslAcceptor, SslConnector, SslContextBuilder, SslMethod, SslVerifyMode, SslVersion}, x509::{X509Builder, X509Name, X509NameBuilder, X509NameRef, X509Ref, X509}, @@ -52,11 +52,9 @@ use rand::{ distributions::{Distribution, Standard}, Rng, }; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; use thiserror::Error; -use crate::utils::{read_file, write_file}; - // This is inside a private module so that the generated `BigArray` does not form part of this // crate's public API, and hence also doesn't appear in the rustdocs. mod big_array { @@ -65,7 +63,7 @@ mod big_array { big_array! { BigArray; } } -/// The chosen signature algorithm (**ECDSA with SHA512**). +/// The chosen signature algorithm (**ECDSA with SHA512**). const SIGNATURE_ALGORITHM: Nid = Nid::ECDSA_WITH_SHA512; /// The underlying elliptic curve (**P-521**). @@ -81,7 +79,7 @@ type SslResult = Result; /// SHA512 hash. #[derive(Copy, Clone, DataSize, Deserialize, Serialize)] -struct Sha512(#[serde(with = "big_array::BigArray")] [u8; Sha512::SIZE]); +pub struct Sha512(#[serde(with = "big_array::BigArray")] [u8; Sha512::SIZE]); impl Sha512 { /// Size of digest in bytes. @@ -91,7 +89,7 @@ impl Sha512 { const NID: Nid = Nid::SHA512; /// Create a new Sha512 by hashing a slice. - fn new>(data: B) -> Self { + pub fn new>(data: B) -> Self { let mut openssl_sha = sha::Sha512::new(); openssl_sha.update(data.as_ref()); Sha512(openssl_sha.finish()) @@ -166,6 +164,12 @@ impl From<[u8; KeyFingerprint::LENGTH]> for KeyFingerprint { } } +impl From for KeyFingerprint { + fn from(hash: Sha512) -> Self { + Self(hash) + } +} + #[cfg(test)] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> KeyFingerprint { @@ -241,23 +245,49 @@ impl PartialEq for TlsCert { impl Eq for TlsCert {} -// Serialization and deserialization happens only via x509, which is checked upon deserialization. -impl<'de> Deserialize<'de> for TlsCert { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - validate_cert(x509_serde::deserialize(deserializer)?).map_err(serde::de::Error::custom) - } +/// Error during loading a x509 certificate. +#[derive(Debug, Error, Serialize)] +pub enum LoadCertError { + #[error("could not load certificate file: {0}")] + ReadFile( + #[serde(skip_serializing)] + #[source] + ReadFileError, + ), + #[error("unable to load x509 certificate {0:?}")] + X509CertFromPem( + #[serde(skip_serializing)] + #[source] + ErrorStack, + ), } -impl Serialize for TlsCert { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - x509_serde::serialize(&self.x509, serializer) - } +/// Load a certificate from a file. +pub(crate) fn load_cert>(src: P) -> Result { + let pem = read_file(src.as_ref()).map_err(LoadCertError::ReadFile)?; + X509::from_pem(&pem).map_err(LoadCertError::X509CertFromPem) +} + +/// Error during loading a secret key. +#[derive(Debug, Error, Serialize)] +pub(crate) enum LoadSecretKeyError { + #[error("could not load secret key file: {0}")] + ReadFile( + #[serde(skip_serializing)] + #[source] + ReadFileError, + ), + #[error("unable to load private key from pem {0:?}")] + PrivateKeyFromPem( + #[serde(skip_serializing)] + #[source] + ErrorStack, + ), +} + +pub(crate) fn load_secret_key>(src: P) -> Result, LoadSecretKeyError> { + let pem = read_file(src.as_ref()).map_err(LoadSecretKeyError::ReadFile)?; + PKey::private_key_from_pem(&pem).map_err(LoadSecretKeyError::PrivateKeyFromPem) } /// A signed value. @@ -428,13 +458,46 @@ pub enum ValidationError { #[source] ErrorStack, ), + /// Wrong certificate authority. + #[error("the certificate is not signed by provided certificate authority")] + WrongCertificateAuthority, + /// Failed to read public key from certificate. + #[error("error reading public key from ca certificate: {0:?}")] + CannotReadCAPublicKey( + #[serde(skip_serializing)] + #[source] + ErrorStack, + ), +} + +/// Checks that the certificate is signed by a provided certificate authority and returns the +/// fingerprint of the public key. +pub(crate) fn validate_cert_with_authority( + cert: X509, + ca: &X509, +) -> Result { + let authority_key = ca + .public_key() + .map_err(ValidationError::CannotReadCAPublicKey)?; + + validate_cert_expiration_date(&cert)?; + + if !cert + .verify(authority_key.as_ref()) + .map_err(ValidationError::FailedToValidateSignature)? + { + return Err(ValidationError::WrongCertificateAuthority); + } + + // Ensure that the key is using the correct curve parameters. + tls_cert_from_x509(cert) } /// Checks that the cryptographic parameters on a certificate are correct and returns the /// fingerprint of the public key. /// /// At the very least this ensures that no weaker ciphers have been used to forge a certificate. -pub(crate) fn validate_cert(cert: X509) -> Result { +pub(crate) fn validate_self_signed_cert(cert: X509) -> Result { if cert.signature_algorithm().object().nid() != SIGNATURE_ALGORITHM { // The signature algorithm is not of the exact kind we are using to generate our // certificates, an attacker could have used a weaker one to generate colliding keys. @@ -459,33 +522,10 @@ pub(crate) fn validate_cert(cert: X509) -> Result { } // Check expiration times against current time. - let asn1_now = Asn1Time::from_unix(now()).map_err(ValidationError::TimeIssue)?; - if asn1_now - .compare(cert.not_before()) - .map_err(ValidationError::TimeIssue)? - != Ordering::Greater - { - return Err(ValidationError::NotYetValid); - } - - if asn1_now - .compare(cert.not_after()) - .map_err(ValidationError::TimeIssue)? - != Ordering::Less - { - return Err(ValidationError::Expired); - } + validate_cert_expiration_date(&cert)?; // Ensure that the key is using the correct curve parameters. - let public_key = cert - .public_key() - .map_err(ValidationError::CannotReadPublicKey)?; - - let ec_key = public_key - .ec_key() - .map_err(ValidationError::CouldNotExtractEcKey)?; - - ec_key.check_key().map_err(ValidationError::KeyFailsCheck)?; + let (public_key, ec_key) = validate_cert_ec_key(&cert)?; if ec_key.group().curve_name() != Some(SIGNATURE_CURVE) { // The underlying curve is not the one we chose. return Err(ValidationError::WrongCurve); @@ -499,17 +539,45 @@ pub(crate) fn validate_cert(cert: X509) -> Result { return Err(ValidationError::InvalidSignature); } - // We now have a valid certificate and can extract the fingerprint. + tls_cert_from_x509_and_key(cert, ec_key) +} + +/// Creates a [`TlsCert`] instance from [`X509`] cert instance. +/// +/// This function only ensures that the cert contains EC public key, and is suitable for quickly +/// validating certs signed by CA. +pub(crate) fn tls_cert_from_x509(cert: X509) -> Result { + let (_public_key, ec_key) = validate_cert_ec_key(&cert)?; + tls_cert_from_x509_and_key(cert, ec_key) +} + +fn tls_cert_from_x509_and_key( + cert: X509, + ec_key: EcKey, +) -> Result { + let cert_fingerprint = cert_fingerprint(&cert)?; + let key_fingerprint = key_fingerprint(&ec_key)?; + Ok(TlsCert { + x509: cert, + cert_fingerprint, + key_fingerprint, + }) +} + +/// Calculate a fingerprint for the X509 certificate. +pub(crate) fn cert_fingerprint(cert: &X509) -> Result { assert_eq!(Sha512::NID, SIGNATURE_DIGEST); let digest = &cert .digest(Sha512::create_message_digest()) .map_err(ValidationError::InvalidFingerprint)?; let cert_fingerprint = CertFingerprint(Sha512::from_openssl_digest(digest)); + Ok(cert_fingerprint) +} - // Additionally we can calculate a fingerprint for the public key: +/// Calculate a fingerprint for the public EC key. +pub(crate) fn key_fingerprint(ec_key: &EcKey) -> Result { let mut big_num_context = BigNumContext::new().map_err(ValidationError::BigNumContextNotAvailable)?; - let buf = ec_key .public_key() .to_bytes( @@ -520,46 +588,41 @@ pub(crate) fn validate_cert(cert: X509) -> Result { &mut big_num_context, ) .map_err(ValidationError::PublicKeyEncodingFailed)?; - - let key_fingerprint = KeyFingerprint(Sha512::new(&buf)); - - Ok(TlsCert { - x509: cert, - cert_fingerprint, - key_fingerprint, - }) + let key_fingerprint = KeyFingerprint(Sha512::new(buf)); + Ok(key_fingerprint) } -/// Loads a certificate from a file. -pub(crate) fn load_cert>(src: P) -> anyhow::Result { - let pem = read_file(src.as_ref()).with_context(|| "failed to load certificate")?; - - Ok(X509::from_pem(&pem).context("parsing certificate")?) -} - -/// Loads a private key from a file. -pub(crate) fn load_private_key>(src: P) -> anyhow::Result> { - let pem = read_file(src.as_ref()).with_context(|| "failed to load private key")?; - - // TODO: It might be that we need to call `PKey::private_key_from_pkcs8` instead. - Ok(PKey::private_key_from_pem(&pem).context("parsing private key")?) +/// Validate cert's public key, and it's EC key parameters. +fn validate_cert_ec_key(cert: &X509) -> Result<(PKey, EcKey), ValidationError> { + let public_key = cert + .public_key() + .map_err(ValidationError::CannotReadPublicKey)?; + let ec_key = public_key + .ec_key() + .map_err(ValidationError::CouldNotExtractEcKey)?; + ec_key.check_key().map_err(ValidationError::KeyFailsCheck)?; + Ok((public_key, ec_key)) } -/// Saves a certificate to a file. -pub fn save_cert>(cert: &X509Ref, dest: P) -> anyhow::Result<()> { - let pem = cert.to_pem().context("converting certificate to PEM")?; - - write_file(dest, pem).with_context(|| "failed to write certificate")?; - Ok(()) -} +/// Check cert's expiration times against current time. +fn validate_cert_expiration_date(cert: &X509) -> Result<(), ValidationError> { + let asn1_now = Asn1Time::from_unix(now()).map_err(ValidationError::TimeIssue)?; + if asn1_now + .compare(cert.not_before()) + .map_err(ValidationError::TimeIssue)? + != Ordering::Greater + { + return Err(ValidationError::NotYetValid); + } -/// Saves a private key to a file. -pub fn save_private_key>(key: &PKeyRef, dest: P) -> anyhow::Result<()> { - let pem = key - .private_key_to_pem_pkcs8() - .context("converting private key to PEM")?; + if asn1_now + .compare(cert.not_after()) + .map_err(ValidationError::TimeIssue)? + != Ordering::Less + { + return Err(ValidationError::Expired); + } - write_file(dest, pem).with_context(|| "failed to write private key")?; Ok(()) } @@ -626,7 +689,7 @@ fn num_eq(num: &Asn1IntegerRef, other: u32) -> SslResult { let r = BigNum::from_u32(other)?; // The `BigNum` API seems to be really lacking here. - Ok(l.is_negative() == r.is_negative() && l.ucmp(&r.as_ref()) == Ordering::Equal) + Ok(l.is_negative() == r.is_negative() && l.ucmp(r.as_ref()) == Ordering::Equal) } /// Generates a secret key suitable for TLS encryption. @@ -647,7 +710,7 @@ fn generate_private_key() -> SslResult> { // TODO: Please verify this for accuracy! let ec_group = ec::EcGroup::from_curve_name(SIGNATURE_CURVE)?; - let ec_key = ec::EcKey::generate(ec_group.as_ref())?; + let ec_key = EcKey::generate(ec_group.as_ref())?; PKey::from_ec_key(ec_key) } @@ -684,51 +747,13 @@ fn generate_cert(private_key: &PKey, cn: &str) -> SslResult { // Cheap sanity check. assert!( - validate_cert(cert.clone()).is_ok(), + validate_self_signed_cert(cert.clone()).is_ok(), "newly generated cert does not pass our own validity check" ); Ok(cert) } -/// Serde support for `openx509::X509` certificates. -/// -/// Will also check if certificates are valid according to `validate_cert` when deserializing. -mod x509_serde { - use std::str; - - use openssl::x509::X509; - use serde::{Deserialize, Deserializer, Serializer}; - - use super::validate_cert; - - /// Serde-compatible serialization for X509 certificates. - pub(super) fn serialize(value: &X509, serializer: S) -> Result - where - S: Serializer, - { - let encoded = value.to_pem().map_err(serde::ser::Error::custom)?; - - // We don't expect encoding to fail, since PEMs are ASCII, but pass the error just in case. - serializer.serialize_str(str::from_utf8(&encoded).map_err(serde::ser::Error::custom)?) - } - - /// Serde-compatible deserialization for X509 certificates. - pub(super) fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - // Create an extra copy for simplicity here. If this becomes a bottleneck, feel free to try - // to leverage Cow here, or implement a custom visitor that handles both cases. - let s: String = Deserialize::deserialize(deserializer)?; - let x509 = X509::from_pem(s.as_bytes()).map_err(serde::de::Error::custom)?; - - validate_cert(x509) - .map_err(serde::de::Error::custom) - .map(|tc| tc.x509) - } -} - // Below are trait implementations for signatures and fingerprints. Both implement the full set of // traits that are required to stick into either a `HashMap` or `BTreeMap`. impl PartialEq for Sha512 { @@ -756,7 +781,7 @@ impl PartialOrd for Sha512 { impl Debug for Sha512 { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}", HexFmt(&self.0[..])) + write!(f, "{}", base16::encode_lower(&self.0[..])) } } @@ -811,13 +836,13 @@ impl Hash for Sha512 { // TODO: Benchmark if this is really worthwhile over the automatic derivation. chunk.copy_from_slice(&self.bytes()[0..8]); - state.write_u64(u64::from_le_bytes(chunk)) + state.write_u64(u64::from_le_bytes(chunk)); } } #[cfg(test)] -mod test { - use super::{generate_node_cert, mkname, name_to_string, validate_cert, TlsCert}; +mod tests { + use super::*; #[test] fn simple_name_to_string() { @@ -830,17 +855,79 @@ mod test { } #[test] - fn test_tls_cert_serde_roundtrip() { - let (cert, _private_key) = generate_node_cert().expect("failed to generate key, cert pair"); + fn test_validate_self_signed_cert() { + let (cert, private_key) = generate_node_cert().expect("failed to generate key, cert pair"); + + // Validates self signed cert + let _tls_cert = + validate_self_signed_cert(cert).expect("generated self signed cert is not valid"); + + // Cert signed by a CA does not validate as self signed + let ca_private_key = generate_private_key().expect("failed to generate private key"); + let ca_signed_cert = make_ca_signed_cert(private_key, ca_private_key); + + let error = validate_self_signed_cert(ca_signed_cert) + .expect_err("should not validate ca signed cert as self signed"); + assert!( + matches!(error, ValidationError::InvalidSignature), + "{:?}", + error + ); + } - let tls_cert = validate_cert(cert).expect("generated cert is not valid"); + #[test] + fn test_validate_cert_with_authority() { + let (ca_cert, ca_private_key) = + generate_node_cert().expect("failed to generate key, cert pair"); - // There is no `PartialEq` impl for `TlsCert`, so we simply serialize it twice. - let serialized = bincode::serialize(&tls_cert).expect("could not serialize"); - let deserialized: TlsCert = - bincode::deserialize(serialized.as_slice()).expect("could not deserialize"); - let serialized_again = bincode::serialize(&deserialized).expect("could not serialize"); + let (different_ca_cert, _ca_private_key) = + generate_node_cert().expect("failed to generate key, cert pair"); + + let node_private_key = generate_private_key().expect("failed to generate private key"); + + let node_cert = make_ca_signed_cert(node_private_key, ca_private_key); + + validate_self_signed_cert(node_cert.clone()) + .expect_err("should not validate CA signed cert as self signed"); + + let _node_tls_cert = validate_cert_with_authority(node_cert.clone(), &ca_cert) + .expect("should validate with ca cert"); + + let validation_error = validate_cert_with_authority(node_cert, &different_ca_cert) + .expect_err("should not validate cert against different CA"); + + assert!( + matches!(validation_error, ValidationError::WrongCertificateAuthority), + "{:?}", + validation_error + ); + } - assert_eq!(serialized, serialized_again); + fn make_ca_signed_cert(private_key: PKey, ca_private_key: PKey) -> X509 { + let mut builder = X509Builder::new().unwrap(); + builder.set_version(2).unwrap(); + builder + .set_serial_number(mknum(1).unwrap().as_ref()) + .unwrap(); + let issuer = mkname("US", "Casper Blockchain", "Casper Network").unwrap(); + builder.set_issuer_name(issuer.as_ref()).unwrap(); + builder.set_subject_name(issuer.as_ref()).unwrap(); + let ts = now(); + builder + .set_not_before(Asn1Time::from_unix(ts - 60).unwrap().as_ref()) + .unwrap(); + builder + .set_not_after( + Asn1Time::from_unix(ts + 10 * 365 * 24 * 60 * 60) + .unwrap() + .as_ref(), + ) + .unwrap(); + builder.set_pubkey(private_key.as_ref()).unwrap(); + assert_eq!(Sha512::NID, SIGNATURE_DIGEST); + builder + .sign(ca_private_key.as_ref(), Sha512::create_message_digest()) + .unwrap(); + builder.build() } } diff --git a/node/src/types.rs b/node/src/types.rs index 8bb8a7becb..4fd6a8e7aa 100644 --- a/node/src/types.rs +++ b/node/src/types.rs @@ -2,39 +2,47 @@ pub(crate) mod appendable_block; mod block; -pub mod chainspec; -mod deploy; +mod chunkable; mod exit_code; -mod item; -pub mod json_compatibility; +mod max_ttl; mod node_config; mod node_id; -mod peers_map; +/// Peers map. mod status_feed; -mod timestamp; +mod sync_leap; +pub(crate) mod sync_leap_validation_metadata; +pub(crate) mod transaction; +mod validator_matrix; +mod value_or_chunk; + +use std::fmt::Debug; use rand::{CryptoRng, RngCore}; #[cfg(not(test))] use rand_chacha::ChaCha20Rng; +use thiserror::Error; -pub use block::{ - json_compatibility::JsonBlock, Block, BlockBody, BlockHash, BlockHeader, BlockSignatures, - BlockValidationError, FinalitySignature, -}; -pub(crate) use block::{BlockByHeight, BlockHeaderWithMetadata, FinalizedBlock, ProtoBlock}; -pub(crate) use chainspec::ActivationPoint; -pub use chainspec::Chainspec; -pub use deploy::{ - Approval, Deploy, DeployHash, DeployHeader, DeployMetadata, DeployValidationFailure, - Error as DeployError, +pub(crate) use block::{ + compute_approvals_checksum, create_single_block_rewarded_signatures, + BlockExecutionResultsOrChunkId, BlockPayload, BlockWithMetadata, ForwardMetaBlock, + InvalidProposalError, MetaBlock, MetaBlockMergeError, MetaBlockState, }; +pub use block::{BlockExecutionResultsOrChunk, ExecutableBlock, FinalizedBlock, InternalEraReport}; +pub use chunkable::Chunkable; +pub use datasize::DataSize; pub use exit_code::ExitCode; -pub use item::{Item, Tag}; -pub use node_config::NodeConfig; +pub(crate) use max_ttl::MaxTtl; +pub use node_config::{NodeConfig, SyncHandling}; pub(crate) use node_id::NodeId; -pub use peers_map::PeersMap; pub use status_feed::{ChainspecInfo, GetStatusResult, StatusFeed}; -pub use timestamp::{TimeDiff, Timestamp}; +pub(crate) use sync_leap::{GlobalStatesMetadata, SyncLeap, SyncLeapIdentifier}; +pub(crate) use transaction::{ + LegacyDeploy, MetaTransaction, TransactionFootprint, TransactionHeader, +}; +pub(crate) use validator_matrix::{EraValidatorWeights, SignatureWeight, ValidatorMatrix}; +pub use value_or_chunk::{ + ChunkingError, TrieOrChunk, TrieOrChunkId, TrieOrChunkIdDisplay, ValueOrChunk, +}; /// An object-safe RNG trait that requires a cryptographically strong random number generator. pub trait CryptoRngCore: CryptoRng + RngCore {} @@ -47,4 +55,9 @@ pub type NodeRng = ChaCha20Rng; /// The RNG used throughout the node for testing. #[cfg(test)] -pub type NodeRng = crate::testing::TestRng; +pub type NodeRng = casper_types::testing::TestRng; + +/// The variants in the given types are expected to all be the same. +#[derive(Debug, Error)] +#[error("mismatch in variants: {0:?}")] +pub struct VariantMismatch(pub(super) Box); diff --git a/node/src/types/appendable_block.rs b/node/src/types/appendable_block.rs index 79a0176e2d..88acc59e3e 100644 --- a/node/src/types/appendable_block.rs +++ b/node/src/types/appendable_block.rs @@ -1,133 +1,304 @@ -use std::collections::HashSet; +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::{self, Display, Formatter}, +}; -use casper_execution_engine::shared::gas::Gas; use datasize::DataSize; -use num_traits::Zero; +use itertools::Itertools; use thiserror::Error; +use tracing::error; -use crate::{ - components::block_proposer::DeployType, - types::{chainspec::DeployConfig, DeployHash, ProtoBlock, Timestamp}, +use casper_types::{ + Approval, Gas, PublicKey, RewardedSignatures, Timestamp, TransactionConfig, TransactionHash, + AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512, }; +use super::{BlockPayload, TransactionFootprint, VariantMismatch}; + #[derive(Debug, Error)] pub(crate) enum AddError { - #[error("would exceed maximum transfer count per block")] - TransferCount, - #[error("would exceed maximum deploy count per block")] - DeployCount, + #[error("would exceed maximum count for the category per block")] + Count(u8), + #[error("would exceed maximum approval count per block")] + ApprovalCount, #[error("would exceed maximum gas per block")] GasLimit, #[error("would exceed maximum block size")] BlockSize, - #[error("duplicate deploy")] + #[error("duplicate deploy or transaction")] Duplicate, - #[error("payment amount could not be converted to gas")] - InvalidGasAmount, - #[error("deploy is not valid in this context")] - InvalidDeploy, + #[error("deploy or transaction has expired")] + Expired, + #[error(transparent)] + VariantMismatch(#[from] VariantMismatch), + #[error("transaction has excessive ttl")] + ExcessiveTtl, + #[error("transaction is future dated")] + FutureDatedDeploy, } /// A block that is still being added to. It keeps track of and enforces block limits. -#[derive(Clone, DataSize, Debug)] -pub struct AppendableBlock { - deploy_config: DeployConfig, - deploy_hashes: Vec, - transfer_hashes: Vec, - deploy_and_transfer_set: HashSet, +#[derive(Clone, Eq, PartialEq, DataSize, Debug)] +pub(crate) struct AppendableBlock { + transaction_config: TransactionConfig, + current_gas_price: u8, + transactions: BTreeMap, timestamp: Timestamp, - #[data_size(skip)] - total_gas: Gas, - total_size: usize, } impl AppendableBlock { /// Creates an empty `AppendableBlock`. - pub(crate) fn new(deploy_config: DeployConfig, timestamp: Timestamp) -> Self { + pub(crate) fn new( + transaction_config: TransactionConfig, + current_gas_price: u8, + timestamp: Timestamp, + ) -> Self { AppendableBlock { - deploy_config, - deploy_hashes: Vec::new(), - transfer_hashes: Vec::new(), + transaction_config, + current_gas_price, + transactions: BTreeMap::new(), timestamp, - deploy_and_transfer_set: HashSet::new(), - total_gas: Gas::zero(), - total_size: 0, } } - /// Returns the total size of all deploys so far. - pub(crate) fn total_size(&self) -> usize { - self.total_size - } - - /// Attempts to add a deploy to the block; returns an error if that would violate a validity - /// condition. - pub(crate) fn add( + /// Attempt to append transaction to block. + pub(crate) fn add_transaction( &mut self, - hash: DeployHash, - deploy_type: &DeployType, + footprint: &TransactionFootprint, ) -> Result<(), AddError> { - if self.deploy_and_transfer_set.contains(&hash) { + if self + .transactions + .keys() + .contains(&footprint.transaction_hash) + { return Err(AddError::Duplicate); } - if !deploy_type - .header() - .is_valid(&self.deploy_config, self.timestamp) + if footprint.ttl > self.transaction_config.max_ttl { + return Err(AddError::ExcessiveTtl); + } + if footprint.timestamp > self.timestamp { + return Err(AddError::FutureDatedDeploy); + } + let expires = footprint.timestamp.saturating_add(footprint.ttl); + if expires < self.timestamp { + return Err(AddError::Expired); + } + let lane_id = footprint.lane_id; + let limit = self + .transaction_config + .transaction_v1_config + .get_max_transaction_count(lane_id); + // check total count by category + let count = self + .transactions + .iter() + .filter(|(_, item)| item.lane_id == lane_id) + .count(); + if count.checked_add(1).ok_or(AddError::Count(lane_id))? > limit as usize { + return Err(AddError::Count(lane_id)); + } + // check total gas + let gas_limit: U512 = self + .transactions + .values() + .map(|item| item.gas_limit.value()) + .sum(); + if gas_limit + .checked_add(footprint.gas_limit.value()) + .ok_or(AddError::GasLimit)? + > U512::from(self.transaction_config.block_gas_limit) { - return Err(AddError::InvalidDeploy); + return Err(AddError::GasLimit); } - if deploy_type.is_transfer() { - if self.has_max_transfer_count() { - return Err(AddError::TransferCount); - } - self.transfer_hashes.push(hash); - } else { - if self.has_max_deploy_count() { - return Err(AddError::DeployCount); - } - // Only deploys count towards the size and gas limits. - let new_total_size = self - .total_size - .checked_add(deploy_type.size()) - .filter(|size| *size <= self.deploy_config.max_block_size as usize) - .ok_or(AddError::BlockSize)?; - let payment_amount = deploy_type.payment_amount(); - let gas_price = deploy_type.header().gas_price(); - let gas = - Gas::from_motes(payment_amount, gas_price).ok_or(AddError::InvalidGasAmount)?; - let new_total_gas = self.total_gas.checked_add(gas).ok_or(AddError::GasLimit)?; - if new_total_gas > Gas::from(self.deploy_config.block_gas_limit) { - return Err(AddError::GasLimit); - } - self.deploy_hashes.push(hash); - self.total_gas = new_total_gas; - self.total_size = new_total_size; + // check total byte size + let size: usize = self + .transactions + .values() + .map(|item| item.size_estimate) + .sum(); + if size + .checked_add(footprint.size_estimate) + .ok_or(AddError::BlockSize)? + > self.transaction_config.max_block_size as usize + { + return Err(AddError::BlockSize); + } + // check total approvals + let count: usize = self + .transactions + .values() + .map(|item| item.approvals_count()) + .sum(); + if count + .checked_add(footprint.approvals_count()) + .ok_or(AddError::ApprovalCount)? + > self.transaction_config.block_max_approval_count as usize + { + return Err(AddError::ApprovalCount); } - self.deploy_and_transfer_set.insert(hash); + self.transactions + .insert(footprint.transaction_hash, footprint.clone()); Ok(()) } - /// Creates a `ProtoBlock` with the `AppendableBlock`s deploys and transfers, and the given - /// random bit. - pub(crate) fn into_proto_block(self, random_bit: bool) -> ProtoBlock { + /// Creates a `BlockPayload` with the `AppendableBlock`s transactions and transfers, and the + /// given random bit and accusations. + pub(crate) fn into_block_payload( + self, + accusations: Vec, + rewarded_signatures: RewardedSignatures, + random_bit: bool, + ) -> BlockPayload { let AppendableBlock { - deploy_hashes, - transfer_hashes, - timestamp, + transactions: footprints, + current_gas_price: price, .. } = self; - ProtoBlock::new(deploy_hashes, transfer_hashes, timestamp, random_bit) + + fn collate( + lane: u8, + collater: &mut BTreeMap)>>, + items: &BTreeMap, + ) { + let mut ret = vec![]; + for (x, y) in items.iter().filter(|(_, y)| y.lane_id == lane) { + ret.push((*x, y.approvals.clone())); + } + if !ret.is_empty() { + collater.insert(lane, ret); + } + } + + let mut transactions = BTreeMap::new(); + collate(MINT_LANE_ID, &mut transactions, &footprints); + collate(AUCTION_LANE_ID, &mut transactions, &footprints); + collate(INSTALL_UPGRADE_LANE_ID, &mut transactions, &footprints); + for lane_id in self + .transaction_config + .transaction_v1_config + .wasm_lanes() + .iter() + .map(|lane| lane.id()) + { + collate(lane_id, &mut transactions, &footprints); + } + + BlockPayload::new( + transactions, + accusations, + rewarded_signatures, + random_bit, + price, + ) + } + + pub(crate) fn timestamp(&self) -> Timestamp { + self.timestamp + } + + fn category_lane(&self, lane: u8) -> usize { + self.transactions + .iter() + .filter(|(_, f)| f.lane_id == lane) + .count() } - /// Returns `true` if the number of transfers is already the maximum allowed count, i.e. no - /// more transfers can be added to this block. - fn has_max_transfer_count(&self) -> bool { - self.transfer_hashes.len() == self.deploy_config.block_max_transfer_count as usize + #[cfg(test)] + pub fn transaction_count(&self) -> usize { + self.transactions.len() + } +} + +impl Display for AppendableBlock { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + let total_count = self.transactions.len(); + let mint_count = self.category_lane(MINT_LANE_ID); + let auction_count = self.category_lane(AUCTION_LANE_ID); + let install_upgrade_count = self.category_lane(INSTALL_UPGRADE_LANE_ID); + let wasm_count = total_count - mint_count - auction_count - install_upgrade_count; + let total_gas_limit: Gas = self + .transactions + .values() + .map(|f| f.gas_limit) + .try_fold(Gas::new(0), |acc, gas| acc.checked_add(gas)) + .unwrap_or(Gas::MAX); + let total_approvals_count: usize = self + .transactions + .values() + .map(|f| f.approvals_count()) + .sum(); + let total_size_estimate: usize = self.transactions.values().map(|f| f.size_estimate).sum(); + + write!( + formatter, + "AppendableBlock(timestamp-{}: + mint: {mint_count}, \ + auction: {auction_count}, \ + install_upgrade: {install_upgrade_count}, \ + wasm: {wasm_count}, \ + total count: {total_count}, \ + approvals: {total_approvals_count}, \ + gas: {total_gas_limit}, \ + size: {total_size_estimate})", + self.timestamp, + ) + } +} + +#[cfg(test)] +mod tests { + use casper_types::{testing::TestRng, SingleBlockRewardedSignatures, TimeDiff}; + + use crate::testing::LARGE_WASM_LANE_ID; + + use super::*; + use std::collections::HashSet; + + impl AppendableBlock { + pub(crate) fn transaction_hashes(&self) -> HashSet { + self.transactions.keys().copied().collect() + } } - /// Returns `true` if the number of deploys is already the maximum allowed count, i.e. no more - /// deploys can be added to this block. - fn has_max_deploy_count(&self) -> bool { - self.deploy_hashes.len() == self.deploy_config.block_max_deploy_count as usize + #[test] + pub fn should_build_block_payload_from_all_transactions() { + let mut test_rng = TestRng::new(); + let mut appendable_block = AppendableBlock::new( + TransactionConfig::default(), + 0, + Timestamp::now() + TimeDiff::from_millis(15000), + ); + let transfer_footprint = TransactionFootprint::random_of_lane(MINT_LANE_ID, &mut test_rng); + let auction_footprint = + TransactionFootprint::random_of_lane(AUCTION_LANE_ID, &mut test_rng); + let install_upgrade_footprint = + TransactionFootprint::random_of_lane(INSTALL_UPGRADE_LANE_ID, &mut test_rng); + let large_wasm_footprint = + TransactionFootprint::random_of_lane(LARGE_WASM_LANE_ID, &mut test_rng); + let signatures = RewardedSignatures::new(vec![SingleBlockRewardedSignatures::random( + &mut test_rng, + 2, + )]); + appendable_block + .add_transaction(&transfer_footprint) + .unwrap(); + appendable_block + .add_transaction(&auction_footprint) + .unwrap(); + appendable_block + .add_transaction(&install_upgrade_footprint) + .unwrap(); + appendable_block + .add_transaction(&large_wasm_footprint) + .unwrap(); + let block_payload = appendable_block.into_block_payload(vec![], signatures.clone(), false); + let transaction_hashes: BTreeSet = + block_payload.all_transaction_hashes().collect(); + assert!(transaction_hashes.contains(&transfer_footprint.transaction_hash)); + assert!(transaction_hashes.contains(&auction_footprint.transaction_hash)); + assert!(transaction_hashes.contains(&install_upgrade_footprint.transaction_hash)); + assert!(transaction_hashes.contains(&large_wasm_footprint.transaction_hash)); + assert_eq!(transaction_hashes.len(), 4); + assert_eq!(*block_payload.rewarded_signatures(), signatures); } } diff --git a/node/src/types/block.rs b/node/src/types/block.rs index cd26b7033d..a878fedb6c 100644 --- a/node/src/types/block.rs +++ b/node/src/types/block.rs @@ -1,1792 +1,98 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] +mod approvals_hashes; +mod block_execution_results_or_chunk; +mod block_execution_results_or_chunk_id; +mod block_payload; +mod block_with_metadata; +mod executable_block; +mod finalized_block; +mod invalid_proposal_error; +mod meta_block; -#[cfg(test)] -use std::iter; -use std::{ - array::TryFromSliceError, - collections::BTreeMap, - error::Error as StdError, - fmt::{self, Debug, Display, Formatter}, - hash::Hash, -}; - -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; - -use datasize::DataSize; -use hex::FromHexError; -use hex_fmt::HexList; -use once_cell::sync::Lazy; -#[cfg(test)] -use rand::Rng; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use thiserror::Error; - -#[cfg(test)] -use casper_types::system::auction::BLOCK_REWARD; use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - EraId, ProtocolVersion, PublicKey, SecretKey, Signature, U512, + bytesrepr::{self, ToBytes}, + Digest, FinalitySignature, SingleBlockRewardedSignatures, TransactionId, }; -use super::{Item, Tag, Timestamp}; -#[cfg(test)] -use crate::crypto::generate_ed25519_keypair; -#[cfg(test)] -use crate::testing::TestRng; -use crate::{ - components::consensus, - crypto::{ - self, - hash::{self, Digest}, - AsymmetricKeyExt, - }, - rpcs::docs::DocExample, - types::{Deploy, DeployHash, JsonBlock}, - utils::DisplayIter, +pub use block_execution_results_or_chunk::BlockExecutionResultsOrChunk; +pub(crate) use block_execution_results_or_chunk_id::BlockExecutionResultsOrChunkId; +pub use block_payload::BlockPayload; +pub(crate) use block_with_metadata::BlockWithMetadata; +pub use executable_block::ExecutableBlock; +pub use finalized_block::{FinalizedBlock, InternalEraReport}; +pub(crate) use invalid_proposal_error::InvalidProposalError; +pub(crate) use meta_block::{ + ForwardMetaBlock, MergeMismatchError as MetaBlockMergeError, MetaBlock, State as MetaBlockState, }; -static ERA_REPORT: Lazy = Lazy::new(|| { - let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); - let public_key_1 = PublicKey::from(&secret_key_1); - let equivocators = vec![public_key_1]; - - let secret_key_2 = SecretKey::ed25519_from_bytes([1; 32]).unwrap(); - let public_key_2 = PublicKey::from(&secret_key_2); - let mut rewards = BTreeMap::new(); - rewards.insert(public_key_2, 1000); - - let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap(); - let public_key_3 = PublicKey::from(&secret_key_3); - let inactive_validators = vec![public_key_3]; - - EraReport { - equivocators, - rewards, - inactive_validators, - } -}); -static ERA_END: Lazy = Lazy::new(|| { - let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); - let public_key_1 = PublicKey::from(&secret_key_1); - let next_era_validator_weights = { - let mut next_era_validator_weights: BTreeMap = BTreeMap::new(); - next_era_validator_weights.insert(public_key_1, U512::from(123)); - next_era_validator_weights.insert( - PublicKey::from( - &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(), - ), - U512::from(456), - ); - next_era_validator_weights.insert( - PublicKey::from( - &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(), - ), - U512::from(789), - ); - next_era_validator_weights - }; - - let era_report = EraReport::doc_example().clone(); - EraEnd::new(era_report, next_era_validator_weights) -}); -static FINALIZED_BLOCK: Lazy = Lazy::new(|| { - let deploy_hashes = vec![*Deploy::doc_example().id()]; - let random_bit = true; - let timestamp = *Timestamp::doc_example(); - let proto_block = ProtoBlock::new(deploy_hashes, vec![], timestamp, random_bit); - let era_report = Some(EraReport::doc_example().clone()); - let era_id = EraId::from(1); - let height = 10; - let secret_key = SecretKey::doc_example(); - let public_key = PublicKey::from(secret_key); - FinalizedBlock::new(proto_block, era_report, era_id, height, public_key) -}); -static BLOCK: Lazy = Lazy::new(|| { - let parent_hash = BlockHash::new(Digest::from([7u8; Digest::LENGTH])); - let state_root_hash = Digest::from([8u8; Digest::LENGTH]); - let finalized_block = FinalizedBlock::doc_example().clone(); - let parent_seed = Digest::from([9u8; Digest::LENGTH]); - let protocol_version = ProtocolVersion::V1_0_0; - - let secret_key = SecretKey::doc_example(); - let public_key = PublicKey::from(secret_key); - - let next_era_validator_weights = { - let mut next_era_validator_weights: BTreeMap = BTreeMap::new(); - next_era_validator_weights.insert(public_key, U512::from(123)); - next_era_validator_weights.insert( - PublicKey::from( - &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(), - ), - U512::from(456), - ); - next_era_validator_weights.insert( - PublicKey::from( - &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(), - ), - U512::from(789), - ); - Some(next_era_validator_weights) - }; - - Block::new( - parent_hash, - parent_seed, - state_root_hash, - finalized_block, - next_era_validator_weights, - protocol_version, - ) -}); -static JSON_BLOCK: Lazy = Lazy::new(|| { - let block = Block::doc_example().clone(); - let mut block_signature = BlockSignatures::new(*block.hash(), block.header().era_id); - - let secret_key = SecretKey::doc_example(); - let public_key = PublicKey::from(secret_key); - - let signature = crypto::sign(block.hash.inner(), &secret_key, &public_key); - block_signature.insert_proof(public_key, signature); - - JsonBlock::new(block, block_signature) -}); - -/// Error returned from constructing or validating a `Block`. -#[derive(Debug, Error)] -pub enum Error { - /// Error while encoding to JSON. - #[error("encoding to JSON: {0}")] - EncodeToJson(#[from] serde_json::Error), - - /// Error while decoding from JSON. - #[error("decoding from JSON: {0}")] - DecodeFromJson(Box), -} - -impl From for Error { - fn from(error: FromHexError) -> Self { - Error::DecodeFromJson(Box::new(error)) - } -} - -impl From for Error { - fn from(error: TryFromSliceError) -> Self { - Error::DecodeFromJson(Box::new(error)) - } -} - -/// A cryptographic hash identifying a `ProtoBlock`. -#[derive( - Copy, - Clone, - DataSize, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Serialize, - Deserialize, - Debug, - Default, -)] -pub struct ProtoBlockHash(Digest); - -impl ProtoBlockHash { - /// Constructs a new `ProtoBlockHash`. - pub fn new(hash: Digest) -> Self { - ProtoBlockHash(hash) - } - - /// Returns the wrapped inner hash. - pub fn inner(&self) -> &Digest { - &self.0 - } -} - -impl Display for ProtoBlockHash { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "proto-block-hash({})", self.0) - } -} - -/// The piece of information that will become the content of a future block (isn't finalized or -/// executed yet) -/// -/// From the view of the consensus protocol this is the "consensus value": The protocol deals with -/// finalizing an order of `ProtoBlock`s. Only after consensus has been reached, the block's -/// deploys actually get executed, and the executed block gets signed. -/// -/// The word "proto" does _not_ refer to "protocol" or "protobuf"! It is just a prefix to highlight -/// that this comes before a block in the linear, executed, finalized blockchain is produced. -#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct ProtoBlock { - hash: ProtoBlockHash, - deploy_hashes: Vec, - transfer_hashes: Vec, - timestamp: Timestamp, - random_bit: bool, -} - -impl ProtoBlock { - pub(crate) fn new( - deploy_hashes: Vec, - transfer_hashes: Vec, - timestamp: Timestamp, - random_bit: bool, - ) -> Self { - let hash = ProtoBlockHash::new(hash::hash( - &bincode::serialize(&(&deploy_hashes, &transfer_hashes, timestamp, random_bit)) - .expect("serialize ProtoBlock"), - )); - - ProtoBlock { - hash, - deploy_hashes, - transfer_hashes, - timestamp, - random_bit, - } - } - - pub(crate) fn hash(&self) -> &ProtoBlockHash { - &self.hash - } - - /// Returns the time when this proto block was proposed. - pub(crate) fn timestamp(&self) -> Timestamp { - self.timestamp - } - - /// The list of deploy hashes included in the block. - pub(crate) fn deploy_hashes(&self) -> &Vec { - &self.deploy_hashes - } - - /// The list of deploy hashes included in the block. - pub(crate) fn transfer_hashes(&self) -> &Vec { - &self.transfer_hashes - } - - pub(crate) fn deploys_and_transfers_iter(&self) -> impl Iterator { - self.deploy_hashes() - .iter() - .chain(self.transfer_hashes().iter()) - } - - /// A random bit needed for initializing a future era. - pub(crate) fn random_bit(&self) -> bool { - self.random_bit - } -} - -impl Display for ProtoBlock { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!( - formatter, - "proto block {}, deploys {}, transfers {}, random bit {}, timestamp {}", - self.hash.inner(), - HexList(&self.deploy_hashes), - HexList(&self.transfer_hashes), - self.random_bit(), - self.timestamp, - ) - } -} - -/// Equivocation and reward information to be included in the terminal finalized block. -pub type EraReport = consensus::EraReport; - -impl Display for EraReport { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - let slashings = DisplayIter::new(&self.equivocators); - let rewards = DisplayIter::new( - self.rewards - .iter() - .map(|(public_key, amount)| format!("{}: {}", public_key, amount)), - ); - write!(f, "era end: slash {}, reward {}", slashings, rewards) - } -} - -impl ToBytes for EraReport { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.equivocators.to_bytes()?); - buffer.extend(self.rewards.to_bytes()?); - buffer.extend(self.inactive_validators.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.equivocators.serialized_length() - + self.rewards.serialized_length() - + self.inactive_validators.serialized_length() - } -} - -impl FromBytes for EraReport { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (equivocators, remainder) = Vec::::from_bytes(bytes)?; - let (rewards, remainder) = BTreeMap::::from_bytes(remainder)?; - let (inactive_validators, remainder) = Vec::::from_bytes(remainder)?; - - let era_report = EraReport { - equivocators, - rewards, - inactive_validators, - }; - Ok((era_report, remainder)) - } -} - -impl DocExample for EraReport { - fn doc_example() -> &'static Self { - &*ERA_REPORT - } -} - -/// The piece of information that will become the content of a future block after it was finalized -/// and before execution happened yet. -#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct FinalizedBlock { - deploy_hashes: Vec, - transfer_hashes: Vec, - timestamp: Timestamp, - random_bit: bool, - era_report: Option, - era_id: EraId, - height: u64, - proposer: PublicKey, -} - -impl FinalizedBlock { - pub(crate) fn new( - proto_block: ProtoBlock, - era_report: Option, - era_id: EraId, - height: u64, - proposer: PublicKey, - ) -> Self { - FinalizedBlock { - deploy_hashes: proto_block.deploy_hashes, - transfer_hashes: proto_block.transfer_hashes, - timestamp: proto_block.timestamp, - random_bit: proto_block.random_bit, - era_report, - era_id, - height, - proposer, - } - } - - /// The timestamp from when the proto block was proposed. - pub(crate) fn timestamp(&self) -> Timestamp { - self.timestamp - } - - /// Returns slashing and reward information if this is a switch block, i.e. the last block of - /// its era. - pub(crate) fn era_report(&self) -> Option<&EraReport> { - self.era_report.as_ref() - } - - /// Returns the ID of the era this block belongs to. - pub(crate) fn era_id(&self) -> EraId { - self.era_id - } - - /// Returns the height of this block. - pub(crate) fn height(&self) -> u64 { - self.height - } - - pub(crate) fn proposer(&self) -> PublicKey { - self.proposer.clone() - } - - /// Returns an iterator over all deploy and transfer hashes. - pub(crate) fn deploys_and_transfers_iter(&self) -> impl Iterator { - self.deploy_hashes.iter().chain(&self.transfer_hashes) - } - - /// Generates a random instance using a `TestRng`. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - let era = rng.gen_range(0..5); - let height = era * 10 + rng.gen_range(0..10); - let is_switch = rng.gen_bool(0.1); - - FinalizedBlock::random_with_specifics(rng, EraId::from(era), height, is_switch) - } - - /// Generates a random instance using a `TestRng`, but using the specified era ID and height. - #[cfg(test)] - pub fn random_with_specifics( - rng: &mut TestRng, - era_id: EraId, - height: u64, - is_switch: bool, - ) -> Self { - let deploy_count = rng.gen_range(0..11); - let deploy_hashes = iter::repeat_with(|| DeployHash::new(Digest::random(rng))) - .take(deploy_count) - .collect(); - let random_bit = rng.gen(); - // TODO - make Timestamp deterministic. - let timestamp = Timestamp::now(); - let proto_block = ProtoBlock::new(deploy_hashes, vec![], timestamp, random_bit); - - let era_report = if is_switch { - let equivocators_count = rng.gen_range(0..5); - let rewards_count = rng.gen_range(0..5); - let inactive_count = rng.gen_range(0..5); - Some(EraReport { - equivocators: iter::repeat_with(|| { - PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()) - }) - .take(equivocators_count) - .collect(), - rewards: iter::repeat_with(|| { - let pub_key = PublicKey::from( - &SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(), - ); - let reward = rng.gen_range(1..(BLOCK_REWARD + 1)); - (pub_key, reward) - }) - .take(rewards_count) - .collect(), - inactive_validators: iter::repeat_with(|| { - PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()) - }) - .take(inactive_count) - .collect(), - }) - } else { - None - }; - let secret_key: SecretKey = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); - let public_key = PublicKey::from(&secret_key); - - FinalizedBlock::new(proto_block, era_report, era_id, height, public_key) - } -} - -impl DocExample for FinalizedBlock { - fn doc_example() -> &'static Self { - &*FINALIZED_BLOCK - } -} - -impl From for FinalizedBlock { - fn from(block: Block) -> Self { - FinalizedBlock { - deploy_hashes: block.body.deploy_hashes, - transfer_hashes: block.body.transfer_hashes, - timestamp: block.header.timestamp, - random_bit: block.header.random_bit, - era_report: block.header.era_end.map(|era_end| era_end.era_report), - era_id: block.header.era_id, - height: block.header.height, - proposer: block.body.proposer, - } - } -} - -impl Display for FinalizedBlock { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!( - formatter, - "finalized block in era {:?}, height {}, deploys {:10}, transfers {:10}, \ - random bit {}, timestamp {}", - self.era_id, - self.height, - HexList(&self.deploy_hashes), - HexList(&self.transfer_hashes), - self.random_bit, - self.timestamp, - )?; - if let Some(ee) = &self.era_report { - write!(formatter, ", era_end: {}", ee)?; - } - Ok(()) - } -} - -/// A cryptographic hash identifying a [`Block`](struct.Block.html). -#[derive( - Copy, - Clone, - DataSize, - Default, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Serialize, - Deserialize, - Debug, - JsonSchema, -)] -#[serde(deny_unknown_fields)] -pub struct BlockHash(Digest); - -impl BlockHash { - /// Constructs a new `BlockHash`. - pub fn new(hash: Digest) -> Self { - BlockHash(hash) - } - - /// Returns the wrapped inner hash. - pub fn inner(&self) -> &Digest { - &self.0 - } - - /// Creates a random block hash. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - let hash = Digest::random(rng); - BlockHash(hash) - } -} - -impl Display for BlockHash { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "block-hash({})", self.0,) - } -} - -impl From for BlockHash { - fn from(digest: Digest) -> Self { - Self(digest) - } -} - -impl AsRef<[u8]> for BlockHash { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl ToBytes for BlockHash { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for BlockHash { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (hash, remainder) = Digest::from_bytes(bytes)?; - let block_hash = BlockHash(hash); - Ok((block_hash, remainder)) - } -} - -#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] -/// A struct to contain information related to the end of an era and validator weights for the -/// following era. -pub struct EraEnd { - /// The era end information. - era_report: EraReport, - /// The validator weights for the next era. - next_era_validator_weights: BTreeMap, -} - -impl EraEnd { - pub fn new( - era_report: EraReport, - next_era_validator_weights: BTreeMap, - ) -> Self { - EraEnd { - era_report, - next_era_validator_weights, - } - } - - pub fn era_report(&self) -> &EraReport { - &self.era_report - } -} - -impl ToBytes for EraEnd { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.era_report.to_bytes()?); - buffer.extend(self.next_era_validator_weights.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.era_report.serialized_length() + self.next_era_validator_weights.serialized_length() - } -} - -impl FromBytes for EraEnd { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (era_report, bytes) = EraReport::from_bytes(bytes)?; - let (next_era_validator_weights, bytes) = BTreeMap::::from_bytes(bytes)?; - let era_end = EraEnd { - era_report, - next_era_validator_weights, - }; - Ok((era_end, bytes)) - } -} - -impl Display for EraEnd { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "era_report: {} ", self.era_report) - } -} - -impl DocExample for EraEnd { - fn doc_example() -> &'static Self { - &*ERA_END - } -} - -/// The header portion of a [`Block`](struct.Block.html). -#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] -pub struct BlockHeader { - parent_hash: BlockHash, - state_root_hash: Digest, - body_hash: Digest, - random_bit: bool, - accumulated_seed: Digest, - era_end: Option, - timestamp: Timestamp, - era_id: EraId, - height: u64, - protocol_version: ProtocolVersion, -} - -impl BlockHeader { - /// The parent block's hash. - pub fn parent_hash(&self) -> &BlockHash { - &self.parent_hash - } - - /// The root hash of the resulting global state. - pub fn state_root_hash(&self) -> &Digest { - &self.state_root_hash - } - - /// The hash of the block's body. - pub fn body_hash(&self) -> &Digest { - &self.body_hash - } - - /// A random bit needed for initializing a future era. - pub fn random_bit(&self) -> bool { - self.random_bit - } - - /// A seed needed for initializing a future era. - pub fn accumulated_seed(&self) -> Digest { - self.accumulated_seed - } - - /// Returns reward and slashing information if this is the era's last block. - pub fn era_end(&self) -> Option<&EraReport> { - match &self.era_end { - Some(data) => Some(data.era_report()), - None => None, - } - } - - /// The timestamp from when the proto block was proposed. - pub fn timestamp(&self) -> Timestamp { - self.timestamp - } - - /// Era ID in which this block was created. - pub fn era_id(&self) -> EraId { - self.era_id - } - - /// Returns the era ID in which the next block would be created (that is, this block's era ID, - /// or its successor if this is a switch block). - pub fn next_block_era_id(&self) -> EraId { - if self.era_end.is_some() { - self.era_id.successor() - } else { - self.era_id - } - } - - /// Returns the height of this block, i.e. the number of ancestors. - pub fn height(&self) -> u64 { - self.height - } - - /// Returns the protocol version of the network from when this block was created. - pub fn protocol_version(&self) -> ProtocolVersion { - self.protocol_version - } - - /// Returns `true` if this block is the last one in the current era. - pub fn is_switch_block(&self) -> bool { - self.era_end.is_some() - } - - /// The validators for the upcoming era and their respective weights. - pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { - match &self.era_end { - Some(era_end) => { - let validator_weights = &era_end.next_era_validator_weights; - Some(validator_weights) - } - None => None, - } - } - - /// Hash of the block header. - pub fn hash(&self) -> BlockHash { - let serialized_header = Self::serialize(&self) - .unwrap_or_else(|error| panic!("should serialize block header: {}", error)); - BlockHash::new(hash::hash(&serialized_header)) - } - - /// Returns true if block is Genesis' child. - /// Genesis child block is from era 0 and height 0. - pub(crate) fn is_genesis_child(&self) -> bool { - self.era_id().is_genesis() && self.height() == 0 - } - - // Serialize the block header. - fn serialize(&self) -> Result, bytesrepr::Error> { - self.to_bytes() - } -} - -impl Display for BlockHeader { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!( - formatter, - "block header parent hash {}, post-state hash {}, body hash {}, \ - random bit {}, accumulated seed {}, timestamp {}", - self.parent_hash.inner(), - self.state_root_hash, - self.body_hash, - self.random_bit, - self.accumulated_seed, - self.timestamp, - )?; - if let Some(ee) = &self.era_end { - write!(formatter, ", era_end: {}", ee)?; - } - Ok(()) - } -} - -impl ToBytes for BlockHeader { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.parent_hash.to_bytes()?); - buffer.extend(self.state_root_hash.to_bytes()?); - buffer.extend(self.body_hash.to_bytes()?); - buffer.extend(self.random_bit.to_bytes()?); - buffer.extend(self.accumulated_seed.to_bytes()?); - buffer.extend(self.era_end.to_bytes()?); - buffer.extend(self.timestamp.to_bytes()?); - buffer.extend(self.era_id.to_bytes()?); - buffer.extend(self.height.to_bytes()?); - buffer.extend(self.protocol_version.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.parent_hash.serialized_length() - + self.state_root_hash.serialized_length() - + self.body_hash.serialized_length() - + self.random_bit.serialized_length() - + self.accumulated_seed.serialized_length() - + self.era_end.serialized_length() - + self.timestamp.serialized_length() - + self.era_id.serialized_length() - + self.height.serialized_length() - + self.protocol_version.serialized_length() - } -} - -impl FromBytes for BlockHeader { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?; - let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; - let (body_hash, remainder) = Digest::from_bytes(remainder)?; - let (random_bit, remainder) = bool::from_bytes(remainder)?; - let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?; - let (era_end, remainder) = Option::::from_bytes(remainder)?; - let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; - let (era_id, remainder) = EraId::from_bytes(remainder)?; - let (height, remainder) = u64::from_bytes(remainder)?; - let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; - let block_header = BlockHeader { - parent_hash, - state_root_hash, - body_hash, - random_bit, - accumulated_seed, - era_end, - timestamp, - era_id, - height, - protocol_version, - }; - Ok((block_header, remainder)) - } -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct BlockHeaderWithMetadata { - pub block_header: BlockHeader, - pub block_signatures: BlockSignatures, -} - -impl Display for BlockHeaderWithMetadata { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{} and {}", self.block_header, self.block_signatures) - } -} - -/// The body portion of a block. -#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] -pub struct BlockBody { - proposer: PublicKey, - deploy_hashes: Vec, - transfer_hashes: Vec, -} - -impl BlockBody { - /// Creates a new body from deploy and transfer hashes. - pub(crate) fn new( - proposer: PublicKey, - deploy_hashes: Vec, - transfer_hashes: Vec, - ) -> Self { - BlockBody { - proposer, - deploy_hashes, - transfer_hashes, - } - } - - /// Block proposer. - pub fn proposer(&self) -> &PublicKey { - &self.proposer - } - - /// Retrieves the deploy hashes within the block. - pub(crate) fn deploy_hashes(&self) -> &Vec { - &self.deploy_hashes - } - - /// Retrieves the transfer hashes within the block. - pub(crate) fn transfer_hashes(&self) -> &Vec { - &self.transfer_hashes - } - - /// Computes the body hash - pub(crate) fn hash(&self) -> Digest { - let serialized_body = self - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize block body: {}", error)); - hash::hash(&serialized_body) - } -} - -impl Display for BlockBody { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "{:?}", self)?; - Ok(()) - } -} - -impl ToBytes for BlockBody { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.proposer.to_bytes()?); - buffer.extend(self.deploy_hashes.to_bytes()?); - buffer.extend(self.transfer_hashes.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.proposer.serialized_length() - + self.deploy_hashes.serialized_length() - + self.transfer_hashes.serialized_length() - } -} - -impl FromBytes for BlockBody { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (proposer, bytes) = PublicKey::from_bytes(bytes)?; - let (deploy_hashes, bytes) = Vec::::from_bytes(bytes)?; - let (transfer_hashes, bytes) = Vec::::from_bytes(bytes)?; - let body = BlockBody { - proposer, - deploy_hashes, - transfer_hashes, - }; - Ok((body, bytes)) - } -} - -/// An error that can arise when validating a block's cryptographic integrity using its hashes -#[derive(Debug)] -pub enum BlockValidationError { - /// Problem serializing some of a block's data into bytes - SerializationError(bytesrepr::Error), - - /// The body hash in the header is not the same as the hash of the body of the block - UnexpectedBodyHash { - /// The block body hash specified in the header that is apparently incorrect - expected_by_block_header: Digest, - /// The actual hash of the block's body - actual: Digest, - }, - - /// The block's hash is not the same as the header's hash - UnexpectedBlockHash { - /// The hash specified by the block - expected_by_block: BlockHash, - /// The actual hash of the block - actual: BlockHash, - }, -} - -impl Display for BlockValidationError { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "{:?}", self) - } -} - -impl From for BlockValidationError { - fn from(err: bytesrepr::Error) -> Self { - BlockValidationError::SerializationError(err) - } -} - -/// A storage representation of finality signatures with the associated block hash. -#[derive(Debug, Serialize, Deserialize, Clone, DataSize, PartialEq)] -pub struct BlockSignatures { - /// The block hash for a given block. - pub(crate) block_hash: BlockHash, - /// The era id for the given set of finality signatures. - pub(crate) era_id: EraId, - /// The signatures associated with the block hash. - pub(crate) proofs: BTreeMap, -} - -impl BlockSignatures { - pub(crate) fn new(block_hash: BlockHash, era_id: EraId) -> Self { - BlockSignatures { - block_hash, - era_id, - proofs: BTreeMap::new(), - } - } - - pub(crate) fn insert_proof( - &mut self, - public_key: PublicKey, - signature: Signature, - ) -> Option { - self.proofs.insert(public_key, signature) - } - - pub(crate) fn has_proof(&self, public_key: &PublicKey) -> bool { - self.proofs.contains_key(public_key) - } - - /// Verify the signatures contained within. - pub(crate) fn verify(&self) -> crypto::Result<()> { - for (public_key, signature) in self.proofs.iter() { - let signature = FinalitySignature { - block_hash: self.block_hash, - era_id: self.era_id, - signature: *signature, - public_key: public_key.clone(), - }; - signature.verify()?; - } - Ok(()) - } -} - -impl Display for BlockSignatures { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!( - formatter, - "block signatures for hash: {} in era_id: {} with {} proofs", - self.block_hash, - self.era_id, - self.proofs.len() - ) - } -} - -/// A proto-block after execution, with the resulting post-state-hash. This is the core component -/// of the Casper linear blockchain. -#[derive(DataSize, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub struct Block { - hash: BlockHash, - header: BlockHeader, - body: BlockBody, -} - -impl Block { - pub(crate) fn new( - parent_hash: BlockHash, - parent_seed: Digest, - state_root_hash: Digest, - finalized_block: FinalizedBlock, - next_era_validator_weights: Option>, - protocol_version: ProtocolVersion, - ) -> Self { - let body = BlockBody::new( - finalized_block.proposer.clone(), - finalized_block.deploy_hashes, - finalized_block.transfer_hashes, - ); - let body_hash = body.hash(); - - let era_end = match finalized_block.era_report { - Some(era_report) => Some(EraEnd::new(era_report, next_era_validator_weights.unwrap())), - None => None, - }; - - let mut accumulated_seed = [0; Digest::LENGTH]; - - let mut hasher = VarBlake2b::new(Digest::LENGTH).expect("should create hasher"); - hasher.update(parent_seed); - hasher.update([finalized_block.random_bit as u8]); - hasher.finalize_variable(|slice| { - accumulated_seed.copy_from_slice(slice); - }); - - let header = BlockHeader { - parent_hash, - state_root_hash, - body_hash, - random_bit: finalized_block.random_bit, - accumulated_seed: accumulated_seed.into(), - era_end, - timestamp: finalized_block.timestamp, - era_id: finalized_block.era_id, - height: finalized_block.height, - protocol_version, - }; - - Self::new_from_header_and_body(header, body) - } - - pub(crate) fn new_from_header_and_body(header: BlockHeader, body: BlockBody) -> Self { - let hash = header.hash(); - Block { hash, header, body } - } - - pub(crate) fn header(&self) -> &BlockHeader { - &self.header - } - - pub(crate) fn body(&self) -> &BlockBody { - &self.body - } - - pub(crate) fn take_header(self) -> BlockHeader { - self.header - } - - /// The hash of this block's header. - pub fn hash(&self) -> &BlockHash { - &self.hash - } - - pub(crate) fn state_root_hash(&self) -> &Digest { - self.header.state_root_hash() - } - - /// The deploy hashes included in this block. - pub fn deploy_hashes(&self) -> &Vec { - self.body.deploy_hashes() - } - - /// The list of transfer hashes included in the block. - pub fn transfer_hashes(&self) -> &Vec { - self.body.transfer_hashes() - } - - /// The height of a block. - pub fn height(&self) -> u64 { - self.header.height() - } - - /// The protocol version of the block. - pub fn protocol_version(&self) -> ProtocolVersion { - self.header.protocol_version - } - - /// Returns the hash of the parent block. - /// If the block is the first block in the linear chain returns `None`. - pub fn parent(&self) -> Option<&BlockHash> { - if self.header.is_genesis_child() { - None - } else { - Some(self.header.parent_hash()) - } - } - - /// Returns the timestamp of the block. - pub fn timestamp(&self) -> Timestamp { - self.header.timestamp() - } - - /// Check the integrity of a block by hashing its body and header - pub fn verify(&self) -> Result<(), BlockValidationError> { - let actual_body_hash = self.body.hash(); - if self.header.body_hash != actual_body_hash { - return Err(BlockValidationError::UnexpectedBodyHash { - expected_by_block_header: self.header.body_hash, - actual: actual_body_hash, - }); - } - let actual_header_hash = self.header.hash(); - if self.hash != actual_header_hash { - return Err(BlockValidationError::UnexpectedBlockHash { - expected_by_block: self.hash, - actual: actual_header_hash, - }); - } - Ok(()) - } - - /// Overrides the height of a block. - #[cfg(test)] - pub fn set_height(&mut self, height: u64) -> &mut Self { - self.header.height = height; - self.hash = self.header.hash(); - self - } - - /// Generates a random instance using a `TestRng`. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - let era = rng.gen_range(0..5); - let height = era * 10 + rng.gen_range(0..10); - let is_switch = rng.gen_bool(0.1); - - Block::random_with_specifics(rng, EraId::from(era), height, is_switch) - } - - /// Generates a random instance using a `TestRng`, but using the specified era ID and height. - #[cfg(test)] - pub fn random_with_specifics( - rng: &mut TestRng, - era_id: EraId, - height: u64, - is_switch: bool, - ) -> Self { - let parent_hash = BlockHash::new(Digest::random(rng)); - let state_root_hash = Digest::random(rng); - let finalized_block = FinalizedBlock::random_with_specifics(rng, era_id, height, is_switch); - let parent_seed = Digest::random(rng); - let protocol_version = ProtocolVersion::V1_0_0; - let next_era_validator_weights = match finalized_block.era_report { - Some(_) => Some(BTreeMap::::default()), - None => None, - }; - - Block::new( - parent_hash, - parent_seed, - state_root_hash, - finalized_block, - next_era_validator_weights, - protocol_version, - ) - } -} - -impl DocExample for Block { - fn doc_example() -> &'static Self { - &*BLOCK - } -} - -impl Display for Block { - fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!( - formatter, - "executed block {}, parent hash {}, post-state hash {}, body hash {}, \ - random bit {}, timestamp {}, era_id {}, height {}", - self.hash.inner(), - self.header.parent_hash.inner(), - self.header.state_root_hash, - self.header.body_hash, - self.header.random_bit, - self.header.timestamp, - self.header.era_id.value(), - self.header.height, - )?; - if let Some(ee) = &self.header.era_end { - write!(formatter, ", era_end: {}", ee)?; - } - Ok(()) - } -} - -impl ToBytes for Block { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.hash.to_bytes()?); - buffer.extend(self.header.to_bytes()?); - buffer.extend(self.body.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.hash.serialized_length() - + self.header.serialized_length() - + self.body.serialized_length() - } -} - -impl FromBytes for Block { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (hash, remainder) = BlockHash::from_bytes(bytes)?; - let (header, remainder) = BlockHeader::from_bytes(remainder)?; - let (body, remainder) = BlockBody::from_bytes(remainder)?; - let block = Block { hash, header, body }; - Ok((block, remainder)) - } -} - -impl Item for Block { - type Id = BlockHash; - - const TAG: Tag = Tag::Block; - const ID_IS_COMPLETE_ITEM: bool = false; - - fn id(&self) -> Self::Id { - *self.hash() - } -} - -/// A wrapper around `Block` for the purposes of fetching blocks by height in linear chain. -#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum BlockByHeight { - Absent(u64), - Block(Box), -} - -impl From for BlockByHeight { - fn from(block: Block) -> Self { - BlockByHeight::new(block) - } -} - -impl BlockByHeight { - /// Creates a new `BlockByHeight` - pub fn new(block: Block) -> Self { - BlockByHeight::Block(Box::new(block)) - } - - pub fn height(&self) -> u64 { - match self { - BlockByHeight::Absent(height) => *height, - BlockByHeight::Block(block) => block.height(), - } - } -} - -impl Display for BlockByHeight { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - BlockByHeight::Absent(height) => write!(f, "Block at height {} was absent.", height), - BlockByHeight::Block(block) => { - let hash: BlockHash = block.header().hash(); - write!(f, "Block at {} with hash {} found.", block.height(), hash) - } - } - } -} - -impl Item for BlockByHeight { - type Id = u64; - - const TAG: Tag = Tag::BlockByHeight; - const ID_IS_COMPLETE_ITEM: bool = false; - - fn id(&self) -> Self::Id { - self.height() - } -} - -pub(crate) mod json_compatibility { - use super::*; - - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - struct Reward { - validator: PublicKey, - amount: u64, - } - - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - struct ValidatorWeight { - validator: PublicKey, - weight: U512, - } - - /// Equivocation and reward information to be included in the terminal block. - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - struct JsonEraReport { - equivocators: Vec, - rewards: Vec, - inactive_validators: Vec, - } - - impl From for JsonEraReport { - fn from(era_report: EraReport) -> Self { - JsonEraReport { - equivocators: era_report.equivocators, - rewards: era_report - .rewards - .into_iter() - .map(|(validator, amount)| Reward { validator, amount }) +#[cfg_attr(doc, aquamarine::aquamarine)] +/// ```mermaid +/// flowchart TD +/// style Start fill:#66ccff,stroke:#333,stroke-width:4px +/// style End fill:#66ccff,stroke:#333,stroke-width:4px +/// style A fill:#ffcc66,stroke:#333,stroke-width:4px +/// style B fill:#ffcc66,stroke:#333,stroke-width:4px +/// style Q fill:#ADD8E6,stroke:#333,stroke-width:4px +/// style S fill:#ADD8E6,stroke:#333,stroke-width:4px +/// title[FinalitySignature lifecycle] +/// title---Start +/// style title fill:#FFF,stroke:#FFF +/// linkStyle 0 stroke-width:0; +/// Start --> A["Validators"] +/// Start --> B["Non-validators"] +/// A --> C["Validator creates FS"] +/// A --> D["Received
broadcasted FS"] +/// A --> E["Received
gossiped FS"] +/// D --> I +/// E --> I +/// H --> End +/// C --> G["Put FS to storage"] +/// G --> H["Broadcast FS to Validators"] +/// G --> I["Register FS
in BlockAccumulator"] +/// I --> J{"Has sufficient
finality
and block?"} +/// J --> |Yes| K["Put all FS
to storage"] +/// J --> |No| L["Keep waiting
for more
signatures"] +/// B --> F["Keeping up with
the network"] +/// F --> M["Received
gossiped FS"] +/// M --> N["Register FS
in BlockAccumulator"] +/// N --> O{"Has sufficient
finality
and block?"} +/// O --> |No| L +/// O --> |Yes| P["Put all FS
to storage"] +/// P --> Q["Initiate forward
sync process
(click)"] +/// Q --> R["If forward or historical sync
process fetched and
stored additional FS
register them in
BlockAccumulator"] +/// B --> S["Initiate historical
sync process
(click)"] +/// S --> R +/// click Q "../components/block_synchronizer/block_acquisition/enum.BlockAcquisitionState.html" +/// click S "../components/block_synchronizer/block_acquisition/enum.BlockAcquisitionState.html" +/// R --> End +/// K --> End +/// ``` +#[allow(dead_code)] +type ValidatorFinalitySignature = FinalitySignature; + +/// Returns the hash of the bytesrepr-encoded deploy_ids. +pub(crate) fn compute_approvals_checksum( + txn_ids: Vec, +) -> Result { + let bytes = txn_ids.into_bytes()?; + Ok(Digest::hash(bytes)) +} + +/// Creates a new recorded finality signatures, from a validator matrix, and a block +/// with metadata. +pub(crate) fn create_single_block_rewarded_signatures( + validator_matrix: &super::ValidatorMatrix, + past_block_with_metadata: &BlockWithMetadata, +) -> Option { + validator_matrix + .validator_weights(past_block_with_metadata.block.era_id()) + .map(|weights| { + SingleBlockRewardedSignatures::from_validator_set( + &past_block_with_metadata + .block_signatures + .signers() + .cloned() .collect(), - inactive_validators: era_report.inactive_validators, - } - } - } - - impl From for EraReport { - fn from(era_report: JsonEraReport) -> Self { - let equivocators = era_report.equivocators; - let rewards = era_report - .rewards - .into_iter() - .map(|reward| (reward.validator, reward.amount)) - .collect(); - let inactive_validators = era_report.inactive_validators; - EraReport { - equivocators, - rewards, - inactive_validators, - } - } - } - - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - struct JsonEraEnd { - era_report: JsonEraReport, - next_era_validator_weights: Vec, - } - - impl From for JsonEraEnd { - fn from(data: EraEnd) -> Self { - let json_era_end = JsonEraReport::from(data.era_report); - let json_validator_weights = data - .next_era_validator_weights - .iter() - .map(|(validator, weight)| ValidatorWeight { - validator: validator.clone(), - weight: *weight, - }) - .collect(); - JsonEraEnd { - era_report: json_era_end, - next_era_validator_weights: json_validator_weights, - } - } - } - - impl From for EraEnd { - fn from(json_data: JsonEraEnd) -> Self { - let era_report = EraReport::from(json_data.era_report); - let validator_weights = json_data - .next_era_validator_weights - .iter() - .map(|validator_weight| { - (validator_weight.validator.clone(), validator_weight.weight) - }) - .collect(); - EraEnd::new(era_report, validator_weights) - } - } - - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - struct JsonBlockHeader { - parent_hash: BlockHash, - state_root_hash: Digest, - body_hash: Digest, - random_bit: bool, - accumulated_seed: Digest, - era_end: Option, - timestamp: Timestamp, - era_id: EraId, - height: u64, - protocol_version: ProtocolVersion, - } - - impl From for JsonBlockHeader { - fn from(block_header: BlockHeader) -> Self { - JsonBlockHeader { - parent_hash: block_header.parent_hash, - state_root_hash: block_header.state_root_hash, - body_hash: block_header.body_hash, - random_bit: block_header.random_bit, - accumulated_seed: block_header.accumulated_seed, - era_end: block_header.era_end.map(JsonEraEnd::from), - timestamp: block_header.timestamp, - era_id: block_header.era_id, - height: block_header.height, - protocol_version: block_header.protocol_version, - } - } - } - - impl From for BlockHeader { - fn from(block_header: JsonBlockHeader) -> Self { - BlockHeader { - parent_hash: block_header.parent_hash, - state_root_hash: block_header.state_root_hash, - body_hash: block_header.body_hash, - random_bit: block_header.random_bit, - accumulated_seed: block_header.accumulated_seed, - era_end: block_header.era_end.map(EraEnd::from), - timestamp: block_header.timestamp, - era_id: block_header.era_id, - height: block_header.height, - protocol_version: block_header.protocol_version, - } - } - } - - /// A JSON-friendly representation of `Body` - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - pub struct JsonBlockBody { - proposer: PublicKey, - deploy_hashes: Vec, - transfer_hashes: Vec, - } - - impl From for JsonBlockBody { - fn from(body: BlockBody) -> Self { - JsonBlockBody { - proposer: body.proposer().clone(), - deploy_hashes: body.deploy_hashes().clone(), - transfer_hashes: body.transfer_hashes().clone(), - } - } - } - - impl From for BlockBody { - fn from(json_body: JsonBlockBody) -> Self { - BlockBody { - proposer: json_body.proposer, - deploy_hashes: json_body.deploy_hashes, - transfer_hashes: json_body.transfer_hashes, - } - } - } - - /// A JSON-friendly representation of `Block`. - #[derive(Serialize, Deserialize, Debug, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - pub struct JsonBlock { - hash: BlockHash, - header: JsonBlockHeader, - body: JsonBlockBody, - proofs: Vec, - } - - impl JsonBlock { - /// Create a new JSON Block with a Linear chain block and its associated signatures. - pub fn new(block: Block, signatures: BlockSignatures) -> Self { - let hash = *block.hash(); - let header = JsonBlockHeader::from(block.header.clone()); - let body = JsonBlockBody::from(block.body); - let proofs = signatures.proofs.into_iter().map(JsonProof::from).collect(); - - JsonBlock { - hash, - header, - body, - proofs, - } - } - - /// Returns the hashes of the `Deploy`s included in the `Block`. - pub fn deploy_hashes(&self) -> &Vec { - &self.body.deploy_hashes - } - - /// Returns the hashes of the transfer `Deploy`s included in the `Block`. - pub fn transfer_hashes(&self) -> &Vec { - &self.body.transfer_hashes - } - } - - impl DocExample for JsonBlock { - fn doc_example() -> &'static Self { - &*JSON_BLOCK - } - } - - impl From for Block { - fn from(block: JsonBlock) -> Self { - Block { - hash: block.hash, - header: BlockHeader::from(block.header), - body: BlockBody::from(block.body), - } - } - } - - /// A JSON-friendly representation of a proof, i.e. a block's finality signature. - #[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] - #[serde(deny_unknown_fields)] - pub struct JsonProof { - public_key: PublicKey, - signature: Signature, - } - - impl From<(PublicKey, Signature)> for JsonProof { - fn from((public_key, signature): (PublicKey, Signature)) -> JsonProof { - JsonProof { - public_key, - signature, - } - } - } - - impl From for (PublicKey, Signature) { - fn from(proof: JsonProof) -> (PublicKey, Signature) { - (proof.public_key, proof.signature) - } - } - - #[test] - fn block_json_roundtrip() { - let mut rng = TestRng::new(); - let block: Block = Block::random(&mut rng); - let empty_signatures = BlockSignatures::new(*block.hash(), block.header().era_id); - let json_block = JsonBlock::new(block.clone(), empty_signatures); - let block_deserialized = Block::from(json_block); - assert_eq!(block, block_deserialized); - } -} - -/// A validator's signature of a block, to confirm it is finalized. Clients and joining nodes should -/// wait until the signers' combined weight exceeds their fault tolerance threshold before accepting -/// the block as finalized. -#[derive(Debug, Clone, Serialize, Deserialize, DataSize, PartialEq, Eq)] -pub struct FinalitySignature { - /// Hash of a block this signature is for. - pub block_hash: BlockHash, - /// Era in which the block was created in. - pub era_id: EraId, - /// Signature over the block hash. - pub signature: Signature, - /// Public key of the signing validator. - pub public_key: PublicKey, -} - -impl FinalitySignature { - /// Create an instance of `FinalitySignature`. - pub fn new( - block_hash: BlockHash, - era_id: EraId, - secret_key: &SecretKey, - public_key: PublicKey, - ) -> Self { - let mut bytes = block_hash.inner().to_vec(); - bytes.extend_from_slice(&era_id.to_le_bytes()); - let signature = crypto::sign(bytes, &secret_key, &public_key); - FinalitySignature { - block_hash, - era_id, - signature, - public_key, - } - } - - /// Verifies whether the signature is correct. - pub fn verify(&self) -> crypto::Result<()> { - // NOTE: This needs to be in sync with the `new` constructor. - let mut bytes = self.block_hash.inner().to_vec(); - bytes.extend_from_slice(&self.era_id.to_le_bytes()); - crypto::verify(bytes, &self.signature, &self.public_key) - } - - #[cfg(test)] - pub fn random_for_block(block_hash: BlockHash, era_id: u64) -> Self { - let (sec_key, pub_key) = generate_ed25519_keypair(); - FinalitySignature::new(block_hash, EraId::new(era_id), &sec_key, pub_key) - } -} - -impl Display for FinalitySignature { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "finality signature for block hash {}, from {}", - &self.block_hash, &self.public_key - ) - } -} - -#[cfg(test)] -mod tests { - use casper_types::bytesrepr; - - use super::*; - use crate::testing::TestRng; - use std::rc::Rc; - - #[test] - fn json_block_roundtrip() { - let mut rng = crate::new_rng(); - let block = Block::random(&mut rng); - let json_string = serde_json::to_string_pretty(&block).unwrap(); - let decoded = serde_json::from_str(&json_string).unwrap(); - assert_eq!(block, decoded); - } - - #[test] - fn json_finalized_block_roundtrip() { - let mut rng = crate::new_rng(); - let finalized_block = FinalizedBlock::random(&mut rng); - let json_string = serde_json::to_string_pretty(&finalized_block).unwrap(); - let decoded = serde_json::from_str(&json_string).unwrap(); - assert_eq!(finalized_block, decoded); - } - - #[test] - fn block_bytesrepr_roundtrip() { - let mut rng = TestRng::new(); - let block = Block::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&block); - } - - #[test] - fn block_header_bytesrepr_roundtrip() { - let mut rng = TestRng::new(); - let block_header: BlockHeader = Block::random(&mut rng).header; - bytesrepr::test_serialization_roundtrip(&block_header); - } - - #[test] - fn bytesrepr_roundtrip_era_report() { - let mut rng = TestRng::new(); - let loop_iterations = 50; - for _ in 0..loop_iterations { - let finalized_block = FinalizedBlock::random(&mut rng); - if let Some(era_report) = finalized_block.era_report() { - bytesrepr::test_serialization_roundtrip(era_report); - } - } - } - - #[test] - fn bytesrepr_roundtrip_era_end() { - let mut rng = TestRng::new(); - let loop_iterations = 50; - for _ in 0..loop_iterations { - let block = Block::random(&mut rng); - if let Some(data) = block.header.era_end { - bytesrepr::test_serialization_roundtrip(&data) - } - } - } - - #[test] - fn random_block_check() { - let mut rng = TestRng::from_seed([1u8; 16]); - let loop_iterations = 50; - for _ in 0..loop_iterations { - Block::random(&mut rng) - .verify() - .expect("block hash should check"); - } - } - - #[test] - fn block_check_bad_body_hash_sad_path() { - let mut rng = TestRng::from_seed([2u8; 16]); - let mut block = Block::random(&mut rng); - - let bogus_block_hash = hash::hash(&[0xde, 0xad, 0xbe, 0xef]); - block.header.body_hash = bogus_block_hash; - - let actual_body_hash = block.body.hash(); - - // No Eq trait for BlockValidationError, so pattern match - match block.verify() { - Err(BlockValidationError::UnexpectedBodyHash { - expected_by_block_header, - actual, - }) if expected_by_block_header == bogus_block_hash && actual == actual_body_hash => {} - unexpected => panic!("Bad check response: {:?}", unexpected), - } - } - - #[test] - fn block_check_bad_block_hash_sad_path() { - let mut rng = TestRng::from_seed([3u8; 16]); - let mut block = Block::random(&mut rng); - - let bogus_block_hash: BlockHash = hash::hash(&[0xde, 0xad, 0xbe, 0xef]).into(); - block.hash = bogus_block_hash; - - let actual_block_hash = block.header.hash(); - - // No Eq trait for BlockValidationError, so pattern match - match block.verify() { - Err(BlockValidationError::UnexpectedBlockHash { - expected_by_block, - actual, - }) if expected_by_block == bogus_block_hash && actual == actual_block_hash => {} - unexpected => panic!("Bad check response: {:?}", unexpected), - } - } - - #[test] - fn finality_signature() { - let mut rng = TestRng::new(); - let block = Block::random(&mut rng); - // Signature should be over both block hash and era id. - let (secret_key, public_key) = generate_ed25519_keypair(); - let secret_rc = Rc::new(secret_key); - let era_id = EraId::from(1); - let fs = FinalitySignature::new(*block.hash(), era_id, &secret_rc, public_key.clone()); - assert!(fs.verify().is_ok()); - let signature = fs.signature; - // Verify that signature includes era id. - let fs_manufactured = FinalitySignature { - block_hash: *block.hash(), - era_id: EraId::from(2), - signature, - public_key, - }; - // Test should fail b/c `signature` is over `era_id=1` and here we're using `era_id=2`. - assert!(fs_manufactured.verify().is_err()); - } + weights.validator_public_keys(), + ) + }) } diff --git a/node/src/types/block/approvals_hashes.rs b/node/src/types/block/approvals_hashes.rs new file mode 100644 index 0000000000..58ed9f523d --- /dev/null +++ b/node/src/types/block/approvals_hashes.rs @@ -0,0 +1,303 @@ +use std::{ + collections::BTreeMap, + fmt::{self, Display, Formatter}, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::error; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + global_state::TrieMerkleProof, + ApprovalsHash, Block, BlockHash, BlockV1, BlockV2, DeployId, Digest, Key, StoredValue, + TransactionId, +}; + +use crate::{ + components::{ + contract_runtime::APPROVALS_CHECKSUM_NAME, + fetcher::{FetchItem, Tag}, + }, + types::{self, VariantMismatch}, +}; + +use casper_storage::global_state::trie_store::operations::compute_state_hash; + +/// The data which is gossiped by validators to non-validators upon creation of a new block. +#[derive(DataSize, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub(crate) struct ApprovalsHashes { + /// Hash of the block that contains transactions that are relevant to the approvals. + block_hash: BlockHash, + /// The set of all transactions' finalized approvals' hashes. + approvals_hashes: Vec, + /// The Merkle proof of the checksum registry containing the checksum of the finalized + /// approvals. + #[data_size(skip)] + merkle_proof_approvals: TrieMerkleProof, +} + +impl ApprovalsHashes { + #[allow(dead_code)] + pub(crate) fn new( + block_hash: BlockHash, + approvals_hashes: Vec, + merkle_proof_approvals: TrieMerkleProof, + ) -> Self { + Self { + block_hash, + approvals_hashes, + merkle_proof_approvals, + } + } + + fn verify(&self, block: &Block) -> Result<(), ApprovalsHashesValidationError> { + let merkle_proof_approvals = &self.merkle_proof_approvals; + if *merkle_proof_approvals.key() != Key::ChecksumRegistry { + return Err(ApprovalsHashesValidationError::InvalidKeyType); + } + + let proof_state_root_hash = compute_state_hash(merkle_proof_approvals) + .map_err(ApprovalsHashesValidationError::TrieMerkleProof)?; + + if proof_state_root_hash != *block.state_root_hash() { + return Err(ApprovalsHashesValidationError::StateRootHashMismatch { + proof_state_root_hash, + block_state_root_hash: *block.state_root_hash(), + }); + } + + let value_in_proof = merkle_proof_approvals + .value() + .as_cl_value() + .and_then(|cl_value| cl_value.clone().into_t().ok()) + .and_then(|registry: BTreeMap| { + registry.get(APPROVALS_CHECKSUM_NAME).copied() + }) + .ok_or(ApprovalsHashesValidationError::InvalidChecksumRegistry)?; + + let computed_approvals_checksum = match block { + Block::V1(v1_block) => compute_legacy_approvals_checksum(self.deploy_ids(v1_block))?, + Block::V2(v2_block) => { + types::compute_approvals_checksum(self.transaction_ids(v2_block)) + .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)? + } + }; + + if value_in_proof != computed_approvals_checksum { + return Err(ApprovalsHashesValidationError::ApprovalsChecksumMismatch { + computed_approvals_checksum, + value_in_proof, + }); + } + + Ok(()) + } + + pub(crate) fn deploy_ids(&self, v1_block: &BlockV1) -> Vec { + let approval_hashes = &self.approvals_hashes; + v1_block + .deploy_and_transfer_hashes() + .zip(approval_hashes) + .map(|(x, y)| DeployId::new(*x, *y)) + .collect() + } + + pub fn transaction_ids(&self, v2_block: &BlockV2) -> Vec { + let approval_hashes = &self.approvals_hashes; + v2_block + .all_transactions() + .zip(approval_hashes) + .map(|(x, y)| TransactionId::new(*x, *y)) + .collect() + } + + pub(crate) fn block_hash(&self) -> &BlockHash { + &self.block_hash + } +} + +impl FetchItem for ApprovalsHashes { + type Id = BlockHash; + type ValidationError = ApprovalsHashesValidationError; + type ValidationMetadata = Block; + + const TAG: Tag = Tag::ApprovalsHashes; + + fn fetch_id(&self) -> Self::Id { + *self.block_hash() + } + + fn validate(&self, block: &Block) -> Result<(), Self::ValidationError> { + self.verify(block) + } +} + +impl Display for ApprovalsHashes { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "approvals hashes for {}", self.block_hash()) + } +} + +impl ToBytes for ApprovalsHashes { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.approvals_hashes.write_bytes(writer)?; + self.merkle_proof_approvals.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.approvals_hashes.serialized_length() + + self.merkle_proof_approvals.serialized_length() + } +} + +impl FromBytes for ApprovalsHashes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (approvals_hashes, remainder) = Vec::::from_bytes(remainder)?; + let (merkle_proof_approvals, remainder) = + TrieMerkleProof::::from_bytes(remainder)?; + Ok(( + ApprovalsHashes { + block_hash, + approvals_hashes, + merkle_proof_approvals, + }, + remainder, + )) + } +} + +/// Returns the hash of the bytesrepr-encoded deploy_ids, as used until the `Block` enum became +/// available. +pub(crate) fn compute_legacy_approvals_checksum( + deploy_ids: Vec, +) -> Result { + let bytes = deploy_ids + .into_bytes() + .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?; + Ok(Digest::hash(bytes)) +} + +/// An error that can arise when validating `ApprovalsHashes`. +#[derive(Error, Debug, DataSize)] +#[non_exhaustive] +pub(crate) enum ApprovalsHashesValidationError { + /// The key provided in the proof is not a `Key::ChecksumRegistry`. + #[error("key provided in proof is not a Key::ChecksumRegistry")] + InvalidKeyType, + + /// An error while computing the state root hash implied by the Merkle proof. + #[error("failed to compute state root hash implied by proof")] + TrieMerkleProof(bytesrepr::Error), + + /// The state root hash implied by the Merkle proof doesn't match that in the block. + #[error("state root hash implied by the Merkle proof doesn't match that in the block")] + StateRootHashMismatch { + proof_state_root_hash: Digest, + block_state_root_hash: Digest, + }, + + /// The value provided in the proof cannot be parsed to the checksum registry type. + #[error("value provided in the proof cannot be parsed to the checksum registry type")] + InvalidChecksumRegistry, + + /// An error while computing the checksum of the approvals. + #[error("failed to compute checksum of the approvals")] + ApprovalsChecksum(bytesrepr::Error), + + /// The approvals checksum provided doesn't match one calculated from the approvals. + #[error("provided approvals checksum doesn't match one calculated from the approvals")] + ApprovalsChecksumMismatch { + computed_approvals_checksum: Digest, + value_in_proof: Digest, + }, + + #[error(transparent)] + #[data_size(skip)] + VariantMismatch(#[from] VariantMismatch), +} + +mod specimen_support { + use std::collections::BTreeMap; + + use casper_types::{ + bytesrepr::Bytes, + global_state::{Pointer, TrieMerkleProof, TrieMerkleProofStep}, + CLValue, Digest, Key, StoredValue, + }; + + use crate::{ + contract_runtime::{APPROVALS_CHECKSUM_NAME, EXECUTION_RESULTS_CHECKSUM_NAME}, + utils::specimen::{ + largest_variant, vec_of_largest_specimen, vec_prop_specimen, Cache, LargestSpecimen, + SizeEstimator, + }, + }; + use casper_storage::block_store::types::ApprovalsHashes; + + impl LargestSpecimen for ApprovalsHashes { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let data = { + let mut map = BTreeMap::new(); + map.insert( + APPROVALS_CHECKSUM_NAME, + Digest::largest_specimen(estimator, cache), + ); + map.insert( + EXECUTION_RESULTS_CHECKSUM_NAME, + Digest::largest_specimen(estimator, cache), + ); + map + }; + let merkle_proof_approvals = TrieMerkleProof::new( + Key::ChecksumRegistry, + StoredValue::CLValue(CLValue::from_t(data).expect("a correct cl value")), + // 2^64/2^13 = 2^51, so 51 items: + vec_of_largest_specimen(estimator, 51, cache).into(), + ); + ApprovalsHashes::new( + LargestSpecimen::largest_specimen(estimator, cache), + vec_prop_specimen(estimator, "approvals_hashes", cache), + merkle_proof_approvals, + ) + } + } + + impl LargestSpecimen for TrieMerkleProofStep { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + #[derive(strum::EnumIter)] + enum TrieMerkleProofStepDiscriminants { + Node, + Extension, + } + + largest_variant(estimator, |variant| match variant { + TrieMerkleProofStepDiscriminants::Node => TrieMerkleProofStep::Node { + hole_index: u8::MAX, + indexed_pointers_with_hole: vec![ + ( + u8::MAX, + Pointer::LeafPointer(LargestSpecimen::largest_specimen( + estimator, cache + )) + ); + estimator.parameter("max_pointer_per_node") + ], + }, + TrieMerkleProofStepDiscriminants::Extension => TrieMerkleProofStep::Extension { + affix: Bytes::from(vec![u8::MAX; Key::max_serialized_length()]), + }, + }) + } + } +} diff --git a/node/src/types/block/block_execution_results_or_chunk.rs b/node/src/types/block/block_execution_results_or_chunk.rs new file mode 100644 index 0000000000..ea31b4b922 --- /dev/null +++ b/node/src/types/block/block_execution_results_or_chunk.rs @@ -0,0 +1,481 @@ +use std::fmt::{self, Debug, Display, Formatter}; + +use datasize::DataSize; +use once_cell::sync::OnceCell; +use serde::{Deserialize, Serialize}; +use tracing::{debug, error}; + +#[cfg(test)] +use casper_types::execution::ExecutionResultV2; +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{ + bytesrepr::{self, ToBytes}, + execution::ExecutionResult, + BlockHash, ChunkWithProof, ChunkWithProofVerificationError, Digest, +}; + +use super::BlockExecutionResultsOrChunkId; +use crate::{ + components::{ + block_synchronizer::ExecutionResultsChecksum, + fetcher::{FetchItem, Tag}, + }, + types::{Chunkable, ValueOrChunk}, + utils::ds, +}; + +/// Represents execution results for all deploys in a single block or a chunk of this complete +/// value. +#[derive(Clone, Serialize, Deserialize, Debug, Eq, DataSize)] +pub struct BlockExecutionResultsOrChunk { + /// Block to which this value or chunk refers to. + pub(super) block_hash: BlockHash, + /// Complete execution results for the block or a chunk of the complete data. + pub(super) value: ValueOrChunk>, + #[serde(skip)] + #[data_size(with = ds::once_cell)] + pub(super) is_valid: OnceCell>, +} + +impl BlockExecutionResultsOrChunk { + pub(crate) fn new( + block_hash: BlockHash, + chunk_index: u64, + execution_results: Vec, + ) -> Option { + fn make_value_or_chunk( + data: T, + block_hash: &BlockHash, + chunk_index: u64, + ) -> Option> { + match ValueOrChunk::new(data, chunk_index) { + Ok(value_or_chunk) => Some(value_or_chunk), + Err(error) => { + error!( + %block_hash, %chunk_index, %error, + "failed to construct `BlockExecutionResultsOrChunk`" + ); + None + } + } + } + + let is_v1 = matches!(execution_results.first(), Some(ExecutionResult::V1(_))); + + // If it's not V1, just construct the `ValueOrChunk` from `Vec`. + if !is_v1 { + let value = make_value_or_chunk(execution_results, &block_hash, chunk_index)?; + return Some(BlockExecutionResultsOrChunk { + block_hash, + value, + is_valid: OnceCell::new(), + }); + } + + // If it is V1, we need to construct the `ValueOrChunk` from a `Vec` if + // it's big enough to need chunking, otherwise we need to use the `Vec` as + // the `ValueOrChunk::Value`. + let mut v1_results = Vec::with_capacity(execution_results.len()); + for result in &execution_results { + if let ExecutionResult::V1(v1_result) = result { + v1_results.push(v1_result); + } else { + error!( + ?execution_results, + "all execution results should be version 1" + ); + return None; + } + } + if v1_results.serialized_length() <= ChunkWithProof::CHUNK_SIZE_BYTES { + // Avoid using `make_value_or_chunk(execution_results, ..)` as that will chunk if + // `v1_results.serialized_length() == ChunkWithProof::CHUNK_SIZE_BYTES`, since + // `execution_results.serialized_length()` will definitely be greater than + // `ChunkWithProof::CHUNK_SIZE_BYTES` due to the extra tag byte specifying V1 in the + // enum `ExecutionResult`. + let value = ValueOrChunk::Value(execution_results); + return Some(BlockExecutionResultsOrChunk { + block_hash, + value, + is_valid: OnceCell::new(), + }); + } + + let v1_value = make_value_or_chunk(v1_results, &block_hash, chunk_index)?; + let value = match v1_value { + ValueOrChunk::Value(_) => { + error!( + ?execution_results, + "v1 execution results of this size should be chunked" + ); + return None; + } + ValueOrChunk::ChunkWithProof(chunk) => ValueOrChunk::ChunkWithProof(chunk), + }; + + Some(BlockExecutionResultsOrChunk { + block_hash, + value, + is_valid: OnceCell::new(), + }) + } + + /// Verifies equivalence of the execution results (or chunks) Merkle root hash with the + /// expected value. + pub fn validate(&self, expected: &Digest) -> Result { + *self.is_valid.get_or_init(|| match &self.value { + ValueOrChunk::Value(block_execution_results) => { + // If results is not empty and all are V1, convert and verify. + let is_v1 = matches!( + block_execution_results.first(), + Some(ExecutionResult::V1(_)) + ); + let actual = if is_v1 { + let mut v1_results = Vec::with_capacity(block_execution_results.len()); + for result in block_execution_results { + if let ExecutionResult::V1(v1_result) = result { + v1_results.push(v1_result); + } else { + debug!( + ?block_execution_results, + "all execution results should be version 1" + ); + return Ok(false); + } + } + Chunkable::hash(&v1_results)? + } else { + Chunkable::hash(&block_execution_results)? + }; + Ok(&actual == expected) + } + ValueOrChunk::ChunkWithProof(chunk_with_proof) => { + Ok(&chunk_with_proof.proof().root_hash() == expected) + } + }) + } + + /// Consumes `self` and returns inner `ValueOrChunk` field. + pub fn into_value(self) -> ValueOrChunk> { + self.value + } + + /// Returns the hash of the block this execution result belongs to. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + #[cfg(test)] + pub(crate) fn new_mock_value(rng: &mut TestRng, block_hash: BlockHash) -> Self { + Self::new_mock_value_with_multiple_random_results(rng, block_hash, 1) + } + + #[cfg(test)] + pub(crate) fn new_mock_value_with_multiple_random_results( + rng: &mut TestRng, + block_hash: BlockHash, + num_results: usize, + ) -> Self { + let execution_results: Vec = (0..num_results) + .map(|_| ExecutionResultV2::random(rng).into()) + .collect(); + + Self { + block_hash, + value: ValueOrChunk::new(execution_results, 0).unwrap(), + is_valid: OnceCell::with_value(Ok(true)), + } + } + + #[cfg(test)] + pub(crate) fn value(&self) -> &ValueOrChunk> { + &self.value + } + + #[cfg(test)] + pub(crate) fn new_from_value( + block_hash: BlockHash, + value: ValueOrChunk>, + ) -> Self { + Self { + block_hash, + value, + is_valid: OnceCell::new(), + } + } +} + +impl PartialEq for BlockExecutionResultsOrChunk { + fn eq(&self, other: &BlockExecutionResultsOrChunk) -> bool { + // Destructure to make sure we don't accidentally omit fields. + let BlockExecutionResultsOrChunk { + block_hash, + value, + is_valid: _, + } = self; + *block_hash == other.block_hash && *value == other.value + } +} + +impl FetchItem for BlockExecutionResultsOrChunk { + type Id = BlockExecutionResultsOrChunkId; + type ValidationError = ChunkWithProofVerificationError; + type ValidationMetadata = ExecutionResultsChecksum; + + const TAG: Tag = Tag::BlockExecutionResults; + + fn fetch_id(&self) -> Self::Id { + let chunk_index = match &self.value { + ValueOrChunk::Value(_) => 0, + ValueOrChunk::ChunkWithProof(chunks) => chunks.proof().index(), + }; + BlockExecutionResultsOrChunkId { + chunk_index, + block_hash: self.block_hash, + } + } + + fn validate(&self, metadata: &ExecutionResultsChecksum) -> Result<(), Self::ValidationError> { + if let ValueOrChunk::ChunkWithProof(chunk_with_proof) = &self.value { + chunk_with_proof.verify()?; + } + if let ExecutionResultsChecksum::Checkable(expected) = *metadata { + if !self + .validate(&expected) + .map_err(ChunkWithProofVerificationError::Bytesrepr)? + { + return Err(ChunkWithProofVerificationError::UnexpectedRootHash); + } + } + Ok(()) + } +} + +impl Display for BlockExecutionResultsOrChunk { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let size = match &self.value { + ValueOrChunk::Value(exec_results) => exec_results.serialized_length(), + ValueOrChunk::ChunkWithProof(chunk) => chunk.serialized_length(), + }; + write!( + f, + "block execution results or chunk ({size} bytes) for block {}", + self.block_hash.inner() + ) + } +} + +mod specimen_support { + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + + use super::BlockExecutionResultsOrChunk; + use once_cell::sync::OnceCell; + + impl LargestSpecimen for BlockExecutionResultsOrChunk { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockExecutionResultsOrChunk { + block_hash: LargestSpecimen::largest_specimen(estimator, cache), + value: LargestSpecimen::largest_specimen(estimator, cache), + is_valid: OnceCell::with_value(Ok(true)), + } + } + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use casper_types::{ + execution::{execution_result_v1::ExecutionEffect, ExecutionResultV1}, + testing::TestRng, + ChunkWithProof, TransferAddr, + }; + + use super::*; + use crate::contract_runtime::compute_execution_results_checksum; + + fn compute_execution_results_v1_checksum( + v1_execution_results: Vec<&ExecutionResultV1>, + ) -> ExecutionResultsChecksum { + ExecutionResultsChecksum::Checkable(v1_execution_results.hash().unwrap()) + } + + /// Checks that a Vec of `ExecutionResultV1`s which are right at the limit to avoid being + /// chunked are still not chunked when constructing a BlockExecutionResultsOrChunk from them + /// when they are held as a Vec of `ExecutionResult`s. + #[test] + fn should_not_chunk_for_v1_at_upper_bound() { + let rng = &mut TestRng::new(); + + // The serialized_length() of this should be equal to `ChunkWithProof::CHUNK_SIZE_BYTES` + let execution_results_v1 = vec![ExecutionResultV1::Failure { + effect: ExecutionEffect::default(), + transfers: vec![TransferAddr::new([1; 32]); 262143], + cost: 2_u64.into(), + error_message: "ninebytes".to_string(), + }]; + assert!( + execution_results_v1.serialized_length() == ChunkWithProof::CHUNK_SIZE_BYTES, + "need execution_results_v1.serialized_length() [{}] to be <= \ + ChunkWithProof::CHUNK_SIZE_BYTES [{}]", + execution_results_v1.serialized_length(), + ChunkWithProof::CHUNK_SIZE_BYTES + ); + // The serialized_length() of this should be greater than `ChunkWithProof::CHUNK_SIZE_BYTES` + // meaning it would be chunked unless we explicitly avoid chunking it in the + // `BlockExecutionResultsOrChunk` constructor. + let execution_results = execution_results_v1 + .iter() + .map(|res| ExecutionResult::V1(res.clone())) + .collect::>(); + assert!( + execution_results.serialized_length() > ChunkWithProof::CHUNK_SIZE_BYTES, + "need execution_results.serialized_length() [{}] to be > \ + ChunkWithProof::CHUNK_SIZE_BYTES [{}]", + execution_results_v1.serialized_length(), + ChunkWithProof::CHUNK_SIZE_BYTES + ); + assert!(execution_results.serialized_length() > ChunkWithProof::CHUNK_SIZE_BYTES); + + let block_hash = BlockHash::random(rng); + let value_or_chunk = + BlockExecutionResultsOrChunk::new(block_hash, 0, execution_results).unwrap(); + assert!(matches!(value_or_chunk.value, ValueOrChunk::Value(_))); + } + + #[test] + fn should_validate_v1_unchunked_checksum() { + let rng = &mut TestRng::new(); + let execution_results = vec![ + ExecutionResult::V1(rng.gen()), + ExecutionResult::V1(rng.gen()), + ]; + let checksum = compute_execution_results_v1_checksum( + execution_results + .iter() + .map(|exec_result| match exec_result { + ExecutionResult::V1(exec_result) => exec_result, + _ => unreachable!(), + }) + .collect(), + ); + + let block_hash = BlockHash::random(rng); + let block_results = + BlockExecutionResultsOrChunk::new(block_hash, 0, execution_results).unwrap(); + // Ensure the results weren't chunked. + assert!(matches!(block_results.value, ValueOrChunk::Value(_))); + + FetchItem::validate(&block_results, &checksum).unwrap(); + } + + #[test] + fn should_validate_v1_chunked_checksum() { + let rng = &mut TestRng::new(); + + let v1_result: ExecutionResultV1 = rng.gen(); + // Ensure we fill with enough copies to cause three chunks. + let count = (2 * ChunkWithProof::CHUNK_SIZE_BYTES / v1_result.serialized_length()) + 1; + let execution_results = vec![ExecutionResult::V1(v1_result); count]; + let checksum = compute_execution_results_v1_checksum( + execution_results + .iter() + .map(|exec_result| match exec_result { + ExecutionResult::V1(exec_result) => exec_result, + _ => unreachable!(), + }) + .collect(), + ); + + let block_hash = BlockHash::random(rng); + for chunk_index in 0..3 { + let block_results = BlockExecutionResultsOrChunk::new( + block_hash, + chunk_index, + execution_results.clone(), + ) + .unwrap(); + // Ensure the results were chunked. + assert!(matches!( + block_results.value, + ValueOrChunk::ChunkWithProof(_) + )); + + FetchItem::validate(&block_results, &checksum).unwrap(); + } + } + + #[test] + fn should_validate_v1_empty_checksum() { + let rng = &mut TestRng::new(); + let checksum = compute_execution_results_v1_checksum(vec![]); + + let block_results = + BlockExecutionResultsOrChunk::new(BlockHash::random(rng), 0, vec![]).unwrap(); + FetchItem::validate(&block_results, &checksum).unwrap(); + } + + #[test] + fn should_validate_versioned_unchunked_checksum() { + let rng = &mut TestRng::new(); + let execution_results = vec![ + ExecutionResult::from(ExecutionResultV2::random(rng)), + ExecutionResult::from(ExecutionResultV2::random(rng)), + ]; + let checksum = ExecutionResultsChecksum::Checkable( + compute_execution_results_checksum(execution_results.iter()).unwrap(), + ); + + let block_hash = BlockHash::random(rng); + let block_results = + BlockExecutionResultsOrChunk::new(block_hash, 0, execution_results).unwrap(); + // Ensure the results weren't chunked. + assert!(matches!(block_results.value, ValueOrChunk::Value(_))); + + FetchItem::validate(&block_results, &checksum).unwrap(); + } + + #[test] + fn should_validate_versioned_chunked_checksum() { + let rng = &mut TestRng::new(); + + let v2_result = ExecutionResultV2::random(rng); + // Ensure we fill with enough copies to cause three chunks. + let count = (2 * ChunkWithProof::CHUNK_SIZE_BYTES / v2_result.serialized_length()) + 1; + let execution_results = vec![ExecutionResult::V2(Box::new(v2_result)); count]; + let checksum = ExecutionResultsChecksum::Checkable( + compute_execution_results_checksum(execution_results.iter()).unwrap(), + ); + + let block_hash = BlockHash::random(rng); + for chunk_index in 0..3 { + let block_results = BlockExecutionResultsOrChunk::new( + block_hash, + chunk_index, + execution_results.clone(), + ) + .unwrap(); + // Ensure the results were chunked. + assert!(matches!( + block_results.value, + ValueOrChunk::ChunkWithProof(_) + )); + + FetchItem::validate(&block_results, &checksum).unwrap(); + } + } + + #[test] + fn should_validate_versioned_empty_checksum() { + let rng = &mut TestRng::new(); + let checksum = ExecutionResultsChecksum::Checkable( + compute_execution_results_checksum(None.into_iter()).unwrap(), + ); + + let block_results = + BlockExecutionResultsOrChunk::new(BlockHash::random(rng), 0, vec![]).unwrap(); + FetchItem::validate(&block_results, &checksum).unwrap(); + } +} diff --git a/node/src/types/block/block_execution_results_or_chunk_id.rs b/node/src/types/block/block_execution_results_or_chunk_id.rs new file mode 100644 index 0000000000..b0bf6ceb85 --- /dev/null +++ b/node/src/types/block/block_execution_results_or_chunk_id.rs @@ -0,0 +1,71 @@ +use std::{ + fmt::{self, Debug, Display, Formatter}, + hash::Hash, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::BlockHash; + +/// ID of the request for block execution results or chunk. +#[derive(DataSize, Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub(crate) struct BlockExecutionResultsOrChunkId { + /// Index of the chunk being requested. + pub(super) chunk_index: u64, + /// Hash of the block. + pub(super) block_hash: BlockHash, +} + +impl BlockExecutionResultsOrChunkId { + /// Returns an instance of post-1.5 request for block execution results. + /// The `chunk_index` is set to 0 as the starting point of the fetch cycle. + /// If the effects are stored without chunking the index will be 0 as well. + pub fn new(block_hash: BlockHash) -> Self { + BlockExecutionResultsOrChunkId { + chunk_index: 0, + block_hash, + } + } + + /// Returns the request for the `next_chunk` retaining the original request's block hash. + pub fn next_chunk(&self, next_chunk: u64) -> Self { + BlockExecutionResultsOrChunkId { + chunk_index: next_chunk, + block_hash: self.block_hash, + } + } + + pub(crate) fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + pub(crate) fn chunk_index(&self) -> u64 { + self.chunk_index + } +} + +impl Display for BlockExecutionResultsOrChunkId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "execution results for {} or chunk #{}", + self.block_hash, self.chunk_index + ) + } +} + +mod specimen_support { + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + + use super::BlockExecutionResultsOrChunkId; + + impl LargestSpecimen for BlockExecutionResultsOrChunkId { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockExecutionResultsOrChunkId { + chunk_index: u64::MAX, + block_hash: LargestSpecimen::largest_specimen(estimator, cache), + } + } + } +} diff --git a/node/src/types/block/block_payload.rs b/node/src/types/block/block_payload.rs new file mode 100644 index 0000000000..9ed3e4d2cc --- /dev/null +++ b/node/src/types/block/block_payload.rs @@ -0,0 +1,180 @@ +use std::{ + cmp::{Ord, PartialOrd}, + collections::{BTreeMap, BTreeSet}, + fmt::{self, Display, Formatter}, + hash::Hash, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::{ + Approval, PublicKey, RewardedSignatures, TransactionHash, AUCTION_LANE_ID, + INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; + +/// The piece of information that will become the content of a future block (isn't finalized or +/// executed yet) +/// +/// From the view of the consensus protocol this is the "consensus value": The protocol deals with +/// finalizing an order of `BlockPayload`s. Only after consensus has been reached, the block's +/// transactions actually get executed, and the executed block gets signed. +#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct BlockPayload { + transactions: BTreeMap)>>, + accusations: Vec, + rewarded_signatures: RewardedSignatures, + random_bit: bool, + current_gas_price: u8, +} + +impl Default for BlockPayload { + fn default() -> Self { + Self { + transactions: Default::default(), + accusations: vec![], + rewarded_signatures: Default::default(), + random_bit: false, + current_gas_price: 1u8, + } + } +} + +impl BlockPayload { + pub(crate) fn new( + transactions: BTreeMap)>>, + accusations: Vec, + rewarded_signatures: RewardedSignatures, + random_bit: bool, + current_gas_price: u8, + ) -> Self { + BlockPayload { + transactions, + accusations, + rewarded_signatures, + random_bit, + current_gas_price, + } + } + + /// Returns the hashes and approvals of the mint transactions within the block. + pub fn mint(&self) -> impl Iterator)> { + let mut ret = vec![]; + if let Some(transactions) = self.transactions.get(&MINT_LANE_ID) { + for transaction in transactions { + ret.push(transaction); + } + } + ret.into_iter() + } + + /// Returns the hashes and approvals of the auction transactions within the block. + pub fn auction(&self) -> impl Iterator)> { + let mut ret = vec![]; + if let Some(transactions) = self.transactions.get(&AUCTION_LANE_ID) { + for transaction in transactions { + ret.push(transaction); + } + } + ret.into_iter() + } + + /// Returns the hashes and approvals of the installer / upgrader transactions within the block. + pub fn install_upgrade(&self) -> impl Iterator)> { + let mut ret = vec![]; + if let Some(transactions) = self.transactions.get(&INSTALL_UPGRADE_LANE_ID) { + for transaction in transactions { + ret.push(transaction); + } + } + ret.into_iter() + } + + /// Returns all the transaction hashes and approvals within the block by lane. + pub fn transactions_by_lane( + &self, + lane: u8, + ) -> impl Iterator)> { + let mut ret = vec![]; + if let Some(transactions) = self.transactions.get(&lane) { + for transaction in transactions { + ret.push(transaction); + } + } + ret.into_iter() + } + + pub(crate) fn finalized_payload(&self) -> BTreeMap> { + let mut ret = BTreeMap::new(); + for (category, transactions) in &self.transactions { + let transactions = transactions.iter().map(|(tx, _)| *tx).collect(); + ret.insert(*category, transactions); + } + + ret + } + + /// Returns true if even 1 transaction is in a lane other than supported. + pub fn has_transaction_in_unsupported_lane(&self, supported_lanes: &[u8]) -> bool { + // for all transaction lanes, if any of them are not in supported_lanes, true + self.transactions + .keys() + .any(|lane_id| !supported_lanes.contains(lane_id)) + } + + /// Returns count of transactions by category. + pub fn count(&self, lane: Option) -> usize { + match lane { + None => self.transactions.values().map(Vec::len).sum(), + Some(lane) => match self.transactions.get(&lane) { + Some(values) => values.len(), + None => 0, + }, + } + } + + /// Returns all the transaction hashes and approvals within the block. + pub fn all_transactions(&self) -> impl Iterator)> { + self.transactions.values().flatten() + } + + /// Returns the set of validators that are reported as faulty in this block. + pub(crate) fn accusations(&self) -> &Vec { + &self.accusations + } + + pub(crate) fn random_bit(&self) -> bool { + self.random_bit + } + + /// The finality signatures for the past blocks that will be rewarded in this block. + pub(crate) fn rewarded_signatures(&self) -> &RewardedSignatures { + &self.rewarded_signatures + } + + /// The current gas price to execute the payload against. + pub(crate) fn current_gas_price(&self) -> u8 { + self.current_gas_price + } + + pub(crate) fn all_transaction_hashes(&self) -> impl Iterator { + let mut ret: Vec = vec![]; + for values in self.transactions.values() { + for (transaction_hash, _) in values { + ret.push(*transaction_hash); + } + } + ret.into_iter() + } +} + +impl Display for BlockPayload { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + let count = self.count(None); + write!(formatter, "payload: {} txns", count)?; + if !self.accusations.is_empty() { + write!(formatter, ", {} accusations", self.accusations.len())?; + } + Ok(()) + } +} diff --git a/node/src/types/block/block_with_metadata.rs b/node/src/types/block/block_with_metadata.rs new file mode 100644 index 0000000000..1199897818 --- /dev/null +++ b/node/src/types/block/block_with_metadata.rs @@ -0,0 +1,22 @@ +use casper_types::{Block, BlockSignatures}; +use serde::{Deserialize, Serialize}; +use std::fmt; + +/// A wrapper around `Block` for the purposes of fetching blocks by height in linear chain. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct BlockWithMetadata { + pub block: Block, + pub block_signatures: BlockSignatures, +} + +impl fmt::Display for BlockWithMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "block #{}, {}, with {} block signatures", + self.block.height(), + self.block.hash(), + self.block_signatures.len() + ) + } +} diff --git a/node/src/types/block/executable_block.rs b/node/src/types/block/executable_block.rs new file mode 100644 index 0000000000..7225cba3cd --- /dev/null +++ b/node/src/types/block/executable_block.rs @@ -0,0 +1,204 @@ +use super::{FinalizedBlock, InternalEraReport}; +use casper_types::{ + BlockV2, Chainspec, EraId, PublicKey, RewardedSignatures, Timestamp, Transaction, + TransactionHash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512, +}; +use datasize::DataSize; +use num_rational::Ratio; +use serde::Serialize; +use std::{ + collections::{BTreeMap, HashMap}, + fmt, +}; +use tracing::warn; + +/// Data necessary for a block to be executed. +#[derive(DataSize, Debug, Clone, PartialEq, Serialize)] +pub struct ExecutableBlock { + pub(crate) rewarded_signatures: RewardedSignatures, + pub(crate) timestamp: Timestamp, + pub(crate) random_bit: bool, + pub(crate) era_report: Option, + pub(crate) era_id: EraId, + pub(crate) height: u64, + pub(crate) proposer: Box, + pub(crate) current_gas_price: u8, + /// The transactions for the `FinalizedBlock`. + pub(crate) transactions: Vec, + pub(crate) transaction_map: BTreeMap>, + /// `None` may indicate that the rewards have not been computed yet, + /// or that the block is not a switch one. + pub(crate) rewards: Option>>, + /// `None` may indicate that the next era gas has not been computed yet, + /// or that the block is not a switch one. + pub(crate) next_era_gas_price: Option, +} + +impl ExecutableBlock { + pub(crate) fn mint(&self) -> Vec { + self.transaction_map + .get(&MINT_LANE_ID) + .cloned() + .unwrap_or(vec![]) + } + + pub(crate) fn auction(&self) -> Vec { + self.transaction_map + .get(&AUCTION_LANE_ID) + .cloned() + .unwrap_or(vec![]) + } + + pub(crate) fn install_upgrade(&self) -> Vec { + self.transaction_map + .get(&INSTALL_UPGRADE_LANE_ID) + .cloned() + .unwrap_or(vec![]) + } + + /// Creates a new `ExecutedBlock` from a `FinalizedBlock` and its transactions. + pub fn from_finalized_block_and_transactions( + finalized_block: FinalizedBlock, + transactions: Vec, + ) -> Self { + Self { + rewarded_signatures: finalized_block.rewarded_signatures, + timestamp: finalized_block.timestamp, + random_bit: finalized_block.random_bit, + era_report: finalized_block.era_report, + era_id: finalized_block.era_id, + height: finalized_block.height, + proposer: finalized_block.proposer, + transactions, + transaction_map: finalized_block.transactions, + rewards: None, + next_era_gas_price: None, + current_gas_price: finalized_block.current_gas_price, + } + } + + /// Creates a new `ExecutedBlock` from a `BlockV2` and its deploys. + pub fn from_block_and_transactions(block: BlockV2, transactions: Vec) -> Self { + let era_report = block.era_end().map(|ee| InternalEraReport { + equivocators: ee.equivocators().into(), + inactive_validators: ee.inactive_validators().into(), + }); + + Self { + rewarded_signatures: block.rewarded_signatures().clone(), + timestamp: block.timestamp(), + random_bit: block.random_bit(), + era_report, + era_id: block.era_id(), + height: block.height(), + proposer: Box::new(block.proposer().clone()), + transactions, + transaction_map: block.transactions().clone(), + rewards: block.era_end().map(|era_end| era_end.rewards().clone()), + next_era_gas_price: block.era_end().map(|era_end| era_end.next_era_gas_price()), + current_gas_price: block.header().current_gas_price(), + } + } + + pub(crate) fn calc_utilization_score(&self, chainspec: &Chainspec) -> Option { + let cfg = &chainspec.transaction_config.transaction_v1_config; + let per_block_capacity = cfg.get_max_block_count(); + let max_block_size = chainspec.transaction_config.max_block_size as u64; + let block_gas_limit = chainspec.transaction_config.block_gas_limit; + + let mut has_hit_slot_limit = false; + let mut transaction_hash_to_lane_id = HashMap::new(); + + for (lane_id, transactions) in self.transaction_map.iter() { + transaction_hash_to_lane_id.extend( + transactions + .iter() + .map(|transaction| (transaction, *lane_id)), + ); + let max_count = cfg.get_max_transaction_count(*lane_id); + if max_count == transactions.len() as u64 { + has_hit_slot_limit = true; + } + } + + if has_hit_slot_limit { + Some(100u64) + } else if self.transactions.is_empty() { + Some(0u64) + } else { + let size_utilization: u64 = { + let total_size_of_transactions: u64 = self + .transactions + .iter() + .map(|transaction| transaction.size_estimate() as u64) + .sum(); + + Ratio::new(total_size_of_transactions * 100, max_block_size).to_integer() + }; + let gas_utilization: u64 = { + let total_gas_limit: u64 = self + .transactions + .iter() + .map( + |transaction| match transaction_hash_to_lane_id.get(&transaction.hash()) { + Some(lane_id) => match &transaction.gas_limit(chainspec, *lane_id) { + Ok(gas_limit) => gas_limit.value().as_u64(), + Err(_) => { + warn!("Unable to determine gas limit"); + 0u64 + } + }, + None => { + warn!("Unable to determine gas limit"); + 0u64 + } + }, + ) + .sum(); + + Ratio::new(total_gas_limit * 100, block_gas_limit).to_integer() + }; + + let slot_utilization = + Ratio::new(self.transactions.len() as u64 * 100, per_block_capacity).to_integer(); + + let utilization_scores = [slot_utilization, gas_utilization, size_utilization]; + + utilization_scores.iter().max().copied() + } + } +} + +impl fmt::Display for ExecutableBlock { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executable block #{} in {}, timestamp {}, {} transfers, {} staking txns, {} \ + install/upgrade txns", + self.height, + self.era_id, + self.timestamp, + self.mint().len(), + self.auction().len(), + self.install_upgrade().len(), + )?; + for (lane, wasm_transaction) in self.transaction_map.iter() { + if *lane < 3 { + continue; + } + write!( + formatter, + ", lane: {} with {} transactions", + *lane, + wasm_transaction.len() + )?; + } + if let Some(ref ee) = self.era_report { + write!(formatter, ", era_end: {:?}", ee)?; + } + if let Some(ref next_era_gas_price) = self.next_era_gas_price { + write!(formatter, ", next_era_gas_price: {}", next_era_gas_price)?; + } + Ok(()) + } +} diff --git a/node/src/types/block/finalized_block.rs b/node/src/types/block/finalized_block.rs new file mode 100644 index 0000000000..9dcdb4c83e --- /dev/null +++ b/node/src/types/block/finalized_block.rs @@ -0,0 +1,265 @@ +use std::{ + cmp::{Ord, PartialOrd}, + collections::BTreeMap, + fmt::{self, Display, Formatter}, + hash::Hash, +}; + +#[cfg(test)] +use std::collections::BTreeSet; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use casper_types::{SecretKey, Transaction}; +#[cfg(test)] +use {casper_types::testing::TestRng, rand::Rng}; + +use casper_types::{ + BlockV2, EraId, PublicKey, RewardedSignatures, Timestamp, TransactionHash, AUCTION_LANE_ID, + INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; + +use super::BlockPayload; + +/// The piece of information that will become the content of a future block after it was finalized +/// and before execution happened yet. +#[derive(Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct FinalizedBlock { + pub(crate) transactions: BTreeMap>, + pub(crate) rewarded_signatures: RewardedSignatures, + pub(crate) timestamp: Timestamp, + pub(crate) random_bit: bool, + pub(crate) era_report: Option, + pub(crate) era_id: EraId, + pub(crate) height: u64, + pub(crate) proposer: Box, + pub(crate) current_gas_price: u8, +} + +/// `EraReport` used only internally. The one in types is a part of `EraEndV1`. +#[derive( + Clone, DataSize, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize, Default, +)] +pub struct InternalEraReport { + /// The set of equivocators. + pub equivocators: Vec, + /// Validators that haven't produced any unit during the era. + pub inactive_validators: Vec, +} + +impl FinalizedBlock { + pub(crate) fn new( + block_payload: BlockPayload, + era_report: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + proposer: PublicKey, + ) -> Self { + let current_gas_price = block_payload.current_gas_price(); + let transactions = block_payload.finalized_payload(); + + FinalizedBlock { + transactions, + rewarded_signatures: block_payload.rewarded_signatures().clone(), + timestamp, + random_bit: block_payload.random_bit(), + era_report, + era_id, + height, + proposer: Box::new(proposer), + current_gas_price, + } + } + + pub(crate) fn mint(&self) -> Vec { + self.transactions + .get(&MINT_LANE_ID) + .map(|transactions| transactions.to_vec()) + .unwrap_or_default() + } + + pub(crate) fn auction(&self) -> Vec { + self.transactions + .get(&AUCTION_LANE_ID) + .map(|transactions| transactions.to_vec()) + .unwrap_or_default() + } + pub(crate) fn install_upgrade(&self) -> Vec { + self.transactions + .get(&INSTALL_UPGRADE_LANE_ID) + .map(|transactions| transactions.to_vec()) + .unwrap_or_default() + } + + /// The list of deploy hashes chained with the list of transfer hashes. + pub(crate) fn all_transactions(&self) -> impl Iterator { + self.transactions.values().flatten() + } + + /// Generates a random instance using a `TestRng` and includes specified deploys. + #[cfg(test)] + pub(crate) fn random<'a, I: IntoIterator>( + rng: &mut TestRng, + txns_iter: I, + ) -> Self { + let era = rng.gen_range(0..5); + let height = era * 10 + rng.gen_range(0..10); + let is_switch = rng.gen_bool(0.1); + + FinalizedBlock::random_with_specifics( + rng, + EraId::from(era), + height, + is_switch, + Timestamp::now(), + txns_iter, + ) + } + + /// Generates a random instance using a `TestRng`, but using the specified values. + /// If `deploy` is `None`, random deploys will be generated, otherwise, the provided `deploy` + /// will be used. + #[cfg(test)] + pub(crate) fn random_with_specifics<'a, I: IntoIterator>( + rng: &mut TestRng, + era_id: EraId, + height: u64, + is_switch: bool, + timestamp: Timestamp, + txns_iter: I, + ) -> Self { + let mut transactions = BTreeMap::new(); + let mut standard = vec![]; + for transaction in txns_iter { + standard.push((transaction.hash(), BTreeSet::new())); + } + transactions.insert(3, standard); + let rewarded_signatures = Default::default(); + let random_bit = rng.gen(); + let block_payload = + BlockPayload::new(transactions, vec![], rewarded_signatures, random_bit, 1u8); + + let era_report = if is_switch { + Some(InternalEraReport::random(rng)) + } else { + None + }; + let secret_key: SecretKey = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + let public_key = PublicKey::from(&secret_key); + + FinalizedBlock::new( + block_payload, + era_report, + timestamp, + era_id, + height, + public_key, + ) + } +} + +impl From for FinalizedBlock { + fn from(block: BlockV2) -> Self { + FinalizedBlock { + transactions: block.transactions().clone(), + timestamp: block.timestamp(), + random_bit: block.random_bit(), + era_report: block.era_end().map(|era_end| InternalEraReport { + equivocators: Vec::from(era_end.equivocators()), + inactive_validators: Vec::from(era_end.inactive_validators()), + }), + era_id: block.era_id(), + height: block.height(), + proposer: Box::new(block.proposer().clone()), + rewarded_signatures: block.rewarded_signatures().clone(), + current_gas_price: block.header().current_gas_price(), + } + } +} + +impl Display for FinalizedBlock { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "finalized block #{} in {}, timestamp {}, {} transfers, {} staking txns, {} \ + install/upgrade txns,", + self.height, + self.era_id, + self.timestamp, + self.mint().len(), + self.auction().len(), + self.install_upgrade().len(), + )?; + for (category, transactions) in self.transactions.iter() { + write!( + formatter, + "lane: {} has {} transactions", + category, + transactions.len() + )?; + } + if let Some(ref ee) = self.era_report { + write!(formatter, ", era_end: {:?}", ee)?; + } + Ok(()) + } +} + +impl InternalEraReport { + /// Returns a random `InternalEraReport`. + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + let equivocators_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let equivocators = core::iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + let inactive_validators = core::iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + + InternalEraReport { + equivocators, + inactive_validators, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::Deploy; + + #[test] + fn should_convert_from_proposable_to_finalized_without_dropping_hashes() { + let mut rng = TestRng::new(); + + let large_lane_id = 3; + let standard = Transaction::Deploy(Deploy::random(&mut rng)); + let hash = standard.hash(); + let transactions = { + let mut ret = BTreeMap::new(); + ret.insert(large_lane_id, vec![(hash, BTreeSet::new())]); + ret.insert(MINT_LANE_ID, vec![]); + ret.insert(INSTALL_UPGRADE_LANE_ID, vec![]); + ret.insert(AUCTION_LANE_ID, vec![]); + ret + }; + let block_payload = BlockPayload::new(transactions, vec![], Default::default(), false, 1u8); + + let fb = FinalizedBlock::new( + block_payload, + None, + Timestamp::now(), + EraId::random(&mut rng), + 90, + PublicKey::random(&mut rng), + ); + + let transactions = fb.transactions.get(&large_lane_id).unwrap(); + assert!(!transactions.is_empty()) + } +} diff --git a/node/src/types/block/invalid_proposal_error.rs b/node/src/types/block/invalid_proposal_error.rs new file mode 100644 index 0000000000..83d71e259c --- /dev/null +++ b/node/src/types/block/invalid_proposal_error.rs @@ -0,0 +1,65 @@ +use crate::types::DataSize; +use casper_types::{FinalitySignatureId, InvalidTransaction, TransactionHash}; + +#[derive(DataSize, Debug, Clone)] +pub(crate) enum InvalidProposalError { + Appendable(String), + InvalidTransaction(String), + AncestorTransactionReplay { + replayed_transaction_hash: TransactionHash, + }, + UnfetchedTransaction { + transaction_hash: TransactionHash, + }, + RewardSignaturesMissingCitedBlock { + cited_block_height: u64, + }, + RewardSignatureReplay { + cited_block_height: u64, + }, + InvalidFinalitySignature(FinalitySignatureId), + ExceedsLaneLimit { + lane_id: u8, + }, + UnsupportedLane, + InvalidGasPrice { + proposed_gas_price: u8, + current_gas_price: u8, + }, + InvalidApprovalsHash(String), + CompetingApprovals { + transaction_hash: TransactionHash, + }, + UnableToFetch, + FailedFetcherValidation, + UnexpectedFetchStatus, + FetchedIncorrectTransactionById { + expected_transaction_hash: TransactionHash, + actual_transaction_hash: TransactionHash, + }, + TransactionFetchingAborted, + FetcherError(String), + FinalitySignatureFetchingAborted, + TransactionReplayPreviousEra { + transaction_era_id: u64, + proposed_block_era_id: u64, + }, +} + +impl From for Box { + fn from(appendable_block_error: crate::types::appendable_block::AddError) -> Self { + Box::new(InvalidProposalError::Appendable(format!( + "{}", + appendable_block_error + ))) + } +} + +impl From for Box { + fn from(invalid_transaction: InvalidTransaction) -> Self { + Box::new(InvalidProposalError::InvalidTransaction(format!( + "{}", + invalid_transaction + ))) + } +} diff --git a/node/src/types/block/meta_block.rs b/node/src/types/block/meta_block.rs new file mode 100644 index 0000000000..e555d1266e --- /dev/null +++ b/node/src/types/block/meta_block.rs @@ -0,0 +1,345 @@ +mod merge_mismatch_error; +mod state; + +use std::{convert::TryFrom, sync::Arc}; + +use crate::types::TransactionHeader; +use datasize::DataSize; +use serde::Serialize; + +use casper_types::{ + execution::ExecutionResult, ActivationPoint, Block, BlockHash, BlockV2, EraId, TransactionHash, +}; + +pub(crate) use merge_mismatch_error::MergeMismatchError; +pub(crate) use state::State; + +use crate::contract_runtime::ExecutionArtifact; + +/// A block along with its execution results and state recording which actions have been taken +/// related to the block. +/// +/// Some or all of these actions should be taken after a block is formed on a node via: +/// * execution (ContractRuntime executing a FinalizedBlock) +/// * accumulation (BlockAccumulator receiving a gossiped block and its finality signatures) +/// * historical sync (BlockSynchronizer fetching all data relating to a block) +#[derive(Clone, Eq, PartialEq, Serialize, Debug, DataSize)] +pub(crate) enum MetaBlock { + Forward(ForwardMetaBlock), + Historical(HistoricalMetaBlock), +} + +impl MetaBlock { + pub(crate) fn new_forward( + block: Arc, + execution_results: Vec, + state: State, + ) -> Self { + Self::Forward(ForwardMetaBlock { + block, + execution_results, + state, + }) + } + + pub(crate) fn new_historical( + block: Arc, + execution_results: Vec<(TransactionHash, TransactionHeader, ExecutionResult)>, + state: State, + ) -> Self { + Self::Historical(HistoricalMetaBlock { + block, + execution_results, + state, + }) + } + + pub(crate) fn height(&self) -> u64 { + match &self { + MetaBlock::Forward(meta_block) => meta_block.block.height(), + MetaBlock::Historical(meta_block) => meta_block.block.height(), + } + } + + pub(crate) fn era_id(&self) -> EraId { + match &self { + MetaBlock::Forward(meta_block) => meta_block.block.era_id(), + MetaBlock::Historical(meta_block) => meta_block.block.era_id(), + } + } + + pub(crate) fn is_switch_block(&self) -> bool { + match &self { + MetaBlock::Forward(meta_block) => meta_block.block.is_switch_block(), + MetaBlock::Historical(meta_block) => meta_block.block.is_switch_block(), + } + } + + pub(crate) fn hash(&self) -> BlockHash { + match &self { + MetaBlock::Forward(meta_block) => *meta_block.block.hash(), + MetaBlock::Historical(meta_block) => *meta_block.block.hash(), + } + } + + pub(crate) fn mut_state(&mut self) -> &mut State { + match self { + MetaBlock::Forward(meta_block) => &mut meta_block.state, + MetaBlock::Historical(meta_block) => &mut meta_block.state, + } + } + + pub(crate) fn state(&self) -> &State { + match &self { + MetaBlock::Forward(meta_block) => &meta_block.state, + MetaBlock::Historical(meta_block) => &meta_block.state, + } + } +} + +#[derive(Clone, Eq, PartialEq, Serialize, Debug, DataSize)] +pub(crate) struct ForwardMetaBlock { + pub(crate) block: Arc, + pub(crate) execution_results: Vec, + pub(crate) state: State, +} + +#[derive(Clone, Eq, PartialEq, Serialize, Debug, DataSize)] +pub(crate) struct HistoricalMetaBlock { + pub(crate) block: Arc, + pub(crate) execution_results: Vec<(TransactionHash, TransactionHeader, ExecutionResult)>, + pub(crate) state: State, +} + +impl ForwardMetaBlock { + pub(crate) fn merge(mut self, other: ForwardMetaBlock) -> Result { + if self.block != other.block { + return Err(MergeMismatchError::Block); + } + + if self.execution_results.is_empty() { + if !other.execution_results.is_empty() { + self.execution_results = other.execution_results; + } + } else if !other.execution_results.is_empty() + && self.execution_results != other.execution_results + { + return Err(MergeMismatchError::ExecutionResults); + } + + self.state = self.state.merge(other.state)?; + + Ok(self) + } + + /// Is this a switch block? + pub(crate) fn is_switch_block(&self) -> bool { + self.block.is_switch_block() + } + + /// Is this the last block before a protocol version upgrade? + pub(crate) fn is_upgrade_boundary(&self, activation_point: ActivationPoint) -> bool { + match activation_point { + ActivationPoint::EraId(era_id) => { + self.is_switch_block() && self.block.era_id().successor() == era_id + } + ActivationPoint::Genesis(_) => false, + } + } +} + +impl TryFrom for ForwardMetaBlock { + type Error = String; + + fn try_from(value: MetaBlock) -> Result { + match value { + MetaBlock::Forward(meta_block) => Ok(meta_block), + MetaBlock::Historical(_) => { + Err("Could not convert Historical Meta Block to Forward Meta Block".to_string()) + } + } + } +} + +impl From for MetaBlock { + fn from(value: ForwardMetaBlock) -> Self { + Self::Forward(value) + } +} + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use casper_types::{ + execution::ExecutionResultV2, testing::TestRng, TestBlockBuilder, TransactionV1, + }; + + use super::*; + + #[test] + fn should_merge_when_same_non_empty_execution_results() { + let rng = &mut TestRng::new(); + + let block = Arc::new(TestBlockBuilder::new().build(rng)); + let txn = TransactionV1::random(rng); + let execution_results = vec![ExecutionArtifact::new( + TransactionHash::V1(*txn.hash()), + (&txn).into(), + ExecutionResult::from(ExecutionResultV2::random(rng)), + Vec::new(), + )]; + let state = State::new_already_stored(); + + let meta_block1: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), execution_results.clone(), state) + .try_into() + .unwrap(); + let meta_block2: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), execution_results.clone(), state) + .try_into() + .unwrap(); + + let merged = meta_block1.clone().merge(meta_block2.clone()).unwrap(); + + assert_eq!(merged.block, block); + assert_eq!(merged.execution_results, execution_results); + assert_eq!(merged.state, State::new_already_stored()); + assert_eq!(meta_block2.merge(meta_block1).unwrap(), merged) + } + + #[test] + fn should_merge_when_both_empty_execution_results() { + let rng = &mut TestRng::new(); + + let block = Arc::new(TestBlockBuilder::new().build(rng)); + let state = State::new(); + + let meta_block1: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), vec![], state) + .try_into() + .unwrap(); + let meta_block2: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), vec![], state) + .try_into() + .unwrap(); + + let merged = meta_block1.clone().merge(meta_block2.clone()).unwrap(); + + assert_eq!(merged.block, block); + assert!(merged.execution_results.is_empty()); + assert_eq!(merged.state, state); + assert_eq!(meta_block2.merge(meta_block1).unwrap(), merged) + } + + #[test] + fn should_merge_when_one_empty_execution_results() { + let rng = &mut TestRng::new(); + + let block = Arc::new(TestBlockBuilder::new().build(rng)); + let txn = TransactionV1::random(rng); + let execution_results = vec![ExecutionArtifact::new( + TransactionHash::V1(*txn.hash()), + (&txn).into(), + ExecutionResult::from(ExecutionResultV2::random(rng)), + Vec::new(), + )]; + let state = State::new_not_to_be_gossiped(); + + let meta_block1: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), execution_results.clone(), state) + .try_into() + .unwrap(); + let meta_block2: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), vec![], state) + .try_into() + .unwrap(); + + let merged = meta_block1.clone().merge(meta_block2.clone()).unwrap(); + + assert_eq!(merged.block, block); + assert_eq!(merged.execution_results, execution_results); + assert_eq!(merged.state, state); + assert_eq!(meta_block2.merge(meta_block1).unwrap(), merged) + } + + #[test] + fn should_fail_to_merge_different_blocks() { + let rng = &mut TestRng::new(); + + let block1 = Arc::new(TestBlockBuilder::new().build(rng)); + let block2 = Arc::new( + TestBlockBuilder::new() + .era(block1.era_id().successor()) + .height(block1.height() + 1) + .switch_block(true) + .build(rng), + ); + let txn = TransactionV1::random(rng); + let execution_results = vec![ExecutionArtifact::new( + TransactionHash::V1(*txn.hash()), + (&txn).into(), + ExecutionResult::from(ExecutionResultV2::random(rng)), + Vec::new(), + )]; + let state = State::new(); + + let meta_block1: ForwardMetaBlock = + MetaBlock::new_forward(block1, execution_results.clone(), state) + .try_into() + .unwrap(); + let meta_block2: ForwardMetaBlock = + MetaBlock::new_forward(block2, execution_results, state) + .try_into() + .unwrap(); + + assert!(matches!( + meta_block1.clone().merge(meta_block2.clone()), + Err(MergeMismatchError::Block) + )); + assert!(matches!( + meta_block2.merge(meta_block1), + Err(MergeMismatchError::Block) + )); + } + + #[test] + fn should_fail_to_merge_different_execution_results() { + let rng = &mut TestRng::new(); + + let block = Arc::new(TestBlockBuilder::new().build(rng)); + let txn1 = TransactionV1::random(rng); + let execution_results1 = vec![ExecutionArtifact::new( + TransactionHash::V1(*txn1.hash()), + (&txn1).into(), + ExecutionResult::from(ExecutionResultV2::random(rng)), + Vec::new(), + )]; + let txn2 = TransactionV1::random(rng); + let execution_results2 = vec![ExecutionArtifact::new( + TransactionHash::V1(*txn2.hash()), + (&txn2).into(), + ExecutionResult::from(ExecutionResultV2::random(rng)), + Vec::new(), + )]; + let state = State::new(); + + let meta_block1: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), execution_results1, state) + .try_into() + .unwrap(); + let meta_block2: ForwardMetaBlock = + MetaBlock::new_forward(Arc::clone(&block), execution_results2, state) + .try_into() + .unwrap(); + + assert!(matches!( + meta_block1.clone().merge(meta_block2.clone()), + Err(MergeMismatchError::ExecutionResults) + )); + assert!(matches!( + meta_block2.merge(meta_block1), + Err(MergeMismatchError::ExecutionResults) + )); + } +} diff --git a/node/src/types/block/meta_block/merge_mismatch_error.rs b/node/src/types/block/meta_block/merge_mismatch_error.rs new file mode 100644 index 0000000000..a2de312222 --- /dev/null +++ b/node/src/types/block/meta_block/merge_mismatch_error.rs @@ -0,0 +1,10 @@ +use thiserror::Error; +use tracing::error; + +#[derive(Error, Debug)] +pub(crate) enum MergeMismatchError { + #[error("block mismatch when merging meta blocks")] + Block, + #[error("execution results mismatch when merging meta blocks")] + ExecutionResults, +} diff --git a/node/src/types/block/meta_block/state.rs b/node/src/types/block/meta_block/state.rs new file mode 100644 index 0000000000..7490420014 --- /dev/null +++ b/node/src/types/block/meta_block/state.rs @@ -0,0 +1,267 @@ +use datasize::DataSize; +use serde::Serialize; + +use super::MergeMismatchError; + +#[derive(Clone, Copy, Debug, DataSize)] +pub(crate) enum StateChange { + Updated, + AlreadyRegistered, +} + +impl StateChange { + pub(crate) fn was_updated(self) -> bool { + matches!(self, StateChange::Updated) + } + + pub(crate) fn was_already_registered(self) -> bool { + matches!(self, StateChange::AlreadyRegistered) + } +} + +impl From for StateChange { + fn from(current_state: bool) -> Self { + if current_state { + StateChange::AlreadyRegistered + } else { + StateChange::Updated + } + } +} + +#[derive(Clone, Copy, Eq, PartialEq, Default, Serialize, Debug, DataSize)] +pub(crate) struct State { + pub(super) stored: bool, + pub(super) sent_to_transaction_buffer: bool, + pub(super) updated_validator_matrix: bool, + pub(super) gossiped: bool, + pub(super) executed: bool, + pub(super) tried_to_sign: bool, + pub(super) consensus_notified: bool, + pub(super) accumulator_notified: bool, + pub(super) synchronizer_notified: bool, + pub(super) validator_notified: bool, + pub(super) sufficient_finality: bool, + pub(super) marked_complete: bool, + pub(super) all_actions_done: bool, +} + +impl State { + /// Returns a new `State` with all fields set to `false`. + pub(crate) fn new() -> Self { + Self::default() + } + + /// Returns a new `State` with all fields set to `false` except for `gossiped`. + pub(crate) fn new_not_to_be_gossiped() -> Self { + State { + gossiped: true, + ..Self::default() + } + } + + /// Returns a new `State` with all fields set to `false` except for `stored`. + pub(crate) fn new_already_stored() -> Self { + State { + stored: true, + ..Self::default() + } + } + + /// Returns a new `State` which a historical block is expected to have after it has been synced. + pub(crate) fn new_after_historical_sync() -> Self { + State { + stored: true, + sent_to_transaction_buffer: false, + updated_validator_matrix: true, + gossiped: true, + executed: true, + tried_to_sign: true, + consensus_notified: false, + accumulator_notified: true, + synchronizer_notified: true, + validator_notified: false, + sufficient_finality: true, + marked_complete: true, + all_actions_done: false, + } + } + + pub(crate) fn is_stored(&self) -> bool { + self.stored + } + + pub(crate) fn is_executed(&self) -> bool { + self.executed + } + + pub(crate) fn has_sufficient_finality(&self) -> bool { + self.sufficient_finality + } + + pub(crate) fn is_marked_complete(&self) -> bool { + self.marked_complete + } + + pub(crate) fn register_as_stored(&mut self) -> StateChange { + let outcome = StateChange::from(self.stored); + self.stored = true; + outcome + } + + pub(crate) fn register_as_sent_to_transaction_buffer(&mut self) -> StateChange { + let outcome = StateChange::from(self.sent_to_transaction_buffer); + self.sent_to_transaction_buffer = true; + outcome + } + + pub(crate) fn register_updated_validator_matrix(&mut self) -> StateChange { + let outcome = StateChange::from(self.updated_validator_matrix); + self.updated_validator_matrix = true; + outcome + } + + pub(crate) fn register_as_gossiped(&mut self) -> StateChange { + let outcome = StateChange::from(self.gossiped); + self.gossiped = true; + outcome + } + + pub(crate) fn register_as_executed(&mut self) -> StateChange { + let outcome = StateChange::from(self.executed); + self.executed = true; + outcome + } + + pub(crate) fn register_we_have_tried_to_sign(&mut self) -> StateChange { + let outcome = StateChange::from(self.tried_to_sign); + self.tried_to_sign = true; + outcome + } + + pub(crate) fn register_as_consensus_notified(&mut self) -> StateChange { + let outcome = StateChange::from(self.consensus_notified); + self.consensus_notified = true; + outcome + } + + pub(crate) fn register_as_accumulator_notified(&mut self) -> StateChange { + let outcome = StateChange::from(self.accumulator_notified); + self.accumulator_notified = true; + outcome + } + + pub(crate) fn register_as_synchronizer_notified(&mut self) -> StateChange { + let outcome = StateChange::from(self.synchronizer_notified); + self.synchronizer_notified = true; + outcome + } + + pub(crate) fn register_as_validator_notified(&mut self) -> StateChange { + let outcome = StateChange::from(self.validator_notified); + self.validator_notified = true; + outcome + } + + pub(crate) fn register_has_sufficient_finality(&mut self) -> StateChange { + let outcome = StateChange::from(self.sufficient_finality); + self.sufficient_finality = true; + outcome + } + + pub(crate) fn register_as_marked_complete(&mut self) -> StateChange { + let outcome = StateChange::from(self.marked_complete); + self.marked_complete = true; + outcome + } + + pub(crate) fn register_all_actions_done(&mut self) -> StateChange { + let outcome = StateChange::from(self.all_actions_done); + self.all_actions_done = true; + outcome + } + + pub(super) fn merge(mut self, other: State) -> Result { + let State { + ref mut stored, + ref mut sent_to_transaction_buffer, + ref mut updated_validator_matrix, + ref mut gossiped, + ref mut executed, + ref mut tried_to_sign, + ref mut consensus_notified, + ref mut accumulator_notified, + ref mut synchronizer_notified, + ref mut validator_notified, + ref mut sufficient_finality, + ref mut marked_complete, + ref mut all_actions_done, + } = self; + + *stored |= other.stored; + *sent_to_transaction_buffer |= other.sent_to_transaction_buffer; + *updated_validator_matrix |= other.updated_validator_matrix; + *gossiped |= other.gossiped; + *executed |= other.executed; + *tried_to_sign |= other.tried_to_sign; + *consensus_notified |= other.consensus_notified; + *accumulator_notified |= other.accumulator_notified; + *synchronizer_notified |= other.synchronizer_notified; + *validator_notified |= other.validator_notified; + *sufficient_finality |= other.sufficient_finality; + *marked_complete |= other.marked_complete; + *all_actions_done |= other.all_actions_done; + + Ok(self) + } + + pub(crate) fn verify_complete(&self) -> bool { + self.stored + && self.sent_to_transaction_buffer + && self.updated_validator_matrix + && self.gossiped + && self.executed + && self.tried_to_sign + && self.consensus_notified + && self.accumulator_notified + && self.synchronizer_notified + && self.validator_notified + && self.sufficient_finality + && self.marked_complete + } + + #[cfg(test)] + pub(crate) fn set_sufficient_finality(&mut self, has_sufficient_finality: bool) { + self.sufficient_finality = has_sufficient_finality; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_merge() { + let all_true = State { + stored: true, + sent_to_transaction_buffer: true, + updated_validator_matrix: true, + gossiped: true, + executed: true, + tried_to_sign: true, + consensus_notified: true, + accumulator_notified: true, + synchronizer_notified: true, + validator_notified: true, + sufficient_finality: true, + marked_complete: true, + all_actions_done: true, + }; + let all_false = State::default(); + + assert_eq!(all_true.merge(all_false).unwrap(), all_true); + assert_eq!(all_false.merge(all_true).unwrap(), all_true); + assert_eq!(all_true.merge(all_true).unwrap(), all_true); + assert_eq!(all_false.merge(all_false).unwrap(), all_false); + } +} diff --git a/node/src/types/chainspec.rs b/node/src/types/chainspec.rs deleted file mode 100644 index 3302c3e68e..0000000000 --- a/node/src/types/chainspec.rs +++ /dev/null @@ -1,420 +0,0 @@ -//! The chainspec is a set of configuration options for the network. All validators must apply the -//! same set of options in order to join and act as a peer in a given network. - -mod accounts_config; -mod activation_point; -mod core_config; -mod deploy_config; -mod error; -mod global_state_update; -mod highway_config; -mod network_config; -mod parse_toml; -mod protocol_config; - -use std::{fmt::Debug, path::Path}; - -use datasize::DataSize; -#[cfg(test)] -use rand::Rng; -use serde::Serialize; -use tracing::{error, warn}; - -use casper_execution_engine::{ - core::engine_state::genesis::ExecConfig, - shared::{system_config::SystemConfig, wasm_config::WasmConfig}, -}; -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - ProtocolVersion, -}; - -#[cfg(test)] -pub(crate) use self::accounts_config::{AccountConfig, ValidatorConfig}; -pub use self::error::Error; -pub(crate) use self::{ - accounts_config::AccountsConfig, activation_point::ActivationPoint, core_config::CoreConfig, - deploy_config::DeployConfig, global_state_update::GlobalStateUpdate, - highway_config::HighwayConfig, network_config::NetworkConfig, protocol_config::ProtocolConfig, -}; -#[cfg(test)] -use crate::testing::TestRng; -use crate::{ - crypto::hash::{self, Digest}, - utils::Loadable, -}; - -/// The name of the chainspec file on disk. -pub const CHAINSPEC_NAME: &str = "chainspec.toml"; - -/// A collection of configuration settings describing the state of the system at genesis and after -/// upgrades to basic system functionality occurring after genesis. -#[derive(DataSize, PartialEq, Eq, Serialize, Debug)] -pub struct Chainspec { - #[serde(rename = "protocol")] - pub(crate) protocol_config: ProtocolConfig, - #[serde(rename = "network")] - pub(crate) network_config: NetworkConfig, - #[serde(rename = "core")] - pub(crate) core_config: CoreConfig, - #[serde(rename = "highway")] - pub(crate) highway_config: HighwayConfig, - #[serde(rename = "deploys")] - pub(crate) deploy_config: DeployConfig, - #[serde(rename = "wasm")] - pub(crate) wasm_config: WasmConfig, - #[serde(rename = "system_costs")] - pub(crate) system_costs_config: SystemConfig, -} - -impl Chainspec { - /// Checks whether the values set in the config make sense and prints warnings or panics if - /// they don't. - pub(crate) fn validate_config(&self) { - let min_era_ms = 1u64 << self.highway_config.minimum_round_exponent; - // If the era duration is set to zero, we will treat it as explicitly stating that eras - // should be defined by height only. - if self.core_config.era_duration.millis() > 0 - && self.core_config.era_duration.millis() - < self.core_config.minimum_era_height * min_era_ms - { - warn!("era duration is less than minimum era height * round length!"); - } - - self.highway_config.validate_config(); - } - - /// Serializes `self` and hashes the resulting bytes. - pub(crate) fn hash(&self) -> Digest { - let serialized_chainspec = self.to_bytes().unwrap_or_else(|error| { - error!(%error, "failed to serialize chainspec"); - vec![] - }); - hash::hash(&serialized_chainspec) - } - - /// Returns true if this chainspec has an activation_point specifying era ID 0. - pub(crate) fn is_genesis(&self) -> bool { - self.protocol_config.activation_point.is_genesis() - } - - /// Returns the protocol version of the chainspec. - pub(crate) fn protocol_version(&self) -> ProtocolVersion { - self.protocol_config.version - } -} - -#[cfg(test)] -impl Chainspec { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let protocol_config = ProtocolConfig::random(rng); - let network_config = NetworkConfig::random(rng); - let core_config = CoreConfig::random(rng); - let highway_config = HighwayConfig::random(rng); - let deploy_config = DeployConfig::random(rng); - let wasm_costs_config = rng.gen(); - let system_costs_config = rng.gen(); - - Chainspec { - protocol_config, - network_config, - core_config, - highway_config, - deploy_config, - wasm_config: wasm_costs_config, - system_costs_config, - } - } -} - -impl ToBytes for Chainspec { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.protocol_config.to_bytes()?); - buffer.extend(self.network_config.to_bytes()?); - buffer.extend(self.core_config.to_bytes()?); - buffer.extend(self.highway_config.to_bytes()?); - buffer.extend(self.deploy_config.to_bytes()?); - buffer.extend(self.wasm_config.to_bytes()?); - buffer.extend(self.system_costs_config.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.protocol_config.serialized_length() - + self.network_config.serialized_length() - + self.core_config.serialized_length() - + self.highway_config.serialized_length() - + self.deploy_config.serialized_length() - + self.wasm_config.serialized_length() - + self.system_costs_config.serialized_length() - } -} - -impl FromBytes for Chainspec { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (protocol_config, remainder) = ProtocolConfig::from_bytes(bytes)?; - let (network_config, remainder) = NetworkConfig::from_bytes(remainder)?; - let (core_config, remainder) = CoreConfig::from_bytes(remainder)?; - let (highway_config, remainder) = HighwayConfig::from_bytes(remainder)?; - let (deploy_config, remainder) = DeployConfig::from_bytes(remainder)?; - let (wasm_config, remainder) = WasmConfig::from_bytes(remainder)?; - let (system_costs_config, remainder) = SystemConfig::from_bytes(remainder)?; - let chainspec = Chainspec { - protocol_config, - network_config, - core_config, - highway_config, - deploy_config, - wasm_config, - system_costs_config, - }; - Ok((chainspec, remainder)) - } -} - -impl Loadable for Chainspec { - type Error = Error; - - fn from_path>(path: P) -> Result { - parse_toml::parse_toml(path.as_ref().join(CHAINSPEC_NAME)) - } -} - -impl From<&Chainspec> for ExecConfig { - fn from(chainspec: &Chainspec) -> Self { - ExecConfig::new( - chainspec.network_config.accounts_config.clone().into(), - chainspec.wasm_config, - chainspec.system_costs_config, - chainspec.core_config.validator_slots, - chainspec.core_config.auction_delay, - chainspec.core_config.locked_funds_period.millis(), - chainspec.core_config.round_seigniorage_rate, - chainspec.core_config.unbonding_delay, - chainspec - .protocol_config - .activation_point - .genesis_timestamp() - .map_or(0, |timestamp| timestamp.millis()), - ) - } -} - -#[cfg(test)] -mod tests { - use std::fs; - - use num_rational::Ratio; - use once_cell::sync::Lazy; - - use casper_execution_engine::shared::{ - host_function_costs::{HostFunction, HostFunctionCosts}, - motes::Motes, - opcode_costs::OpcodeCosts, - storage_costs::StorageCosts, - stored_value::StoredValue, - wasm_config::WasmConfig, - }; - use casper_types::{EraId, ProtocolVersion, U512}; - - use super::*; - use crate::{ - types::{TimeDiff, Timestamp}, - utils::RESOURCES_PATH, - }; - - static EXPECTED_GENESIS_HOST_FUNCTION_COSTS: Lazy = - Lazy::new(|| HostFunctionCosts { - read_value: HostFunction::new(127, [0, 1, 0]), - read_value_local: HostFunction::new(128, [0, 1, 0]), - write: HostFunction::new(140, [0, 1, 0, 2]), - write_local: HostFunction::new(141, [0, 1, 2, 3]), - add: HostFunction::new(100, [0, 1, 2, 3]), - new_uref: HostFunction::new(122, [0, 1, 2]), - load_named_keys: HostFunction::new(121, [0, 1]), - ret: HostFunction::new(133, [0, 1]), - get_key: HostFunction::new(113, [0, 1, 2, 3, 4]), - has_key: HostFunction::new(119, [0, 1]), - put_key: HostFunction::new(125, [0, 1, 2, 3]), - remove_key: HostFunction::new(132, [0, 1]), - revert: HostFunction::new(134, [0]), - is_valid_uref: HostFunction::new(120, [0, 1]), - add_associated_key: HostFunction::new(101, [0, 1, 2]), - remove_associated_key: HostFunction::new(129, [0, 1]), - update_associated_key: HostFunction::new(139, [0, 1, 2]), - set_action_threshold: HostFunction::new(135, [0, 1]), - get_caller: HostFunction::new(112, [0]), - get_blocktime: HostFunction::new(111, [0]), - create_purse: HostFunction::new(108, [0, 1]), - transfer_to_account: HostFunction::new(138, [0, 1, 2, 3, 4, 5, 6]), - transfer_from_purse_to_account: HostFunction::new(136, [0, 1, 2, 3, 4, 5, 6, 7, 8]), - transfer_from_purse_to_purse: HostFunction::new(137, [0, 1, 2, 3, 4, 5, 6, 7]), - get_balance: HostFunction::new(110, [0, 1, 2]), - get_phase: HostFunction::new(117, [0]), - get_system_contract: HostFunction::new(118, [0, 1, 2]), - get_main_purse: HostFunction::new(114, [0]), - read_host_buffer: HostFunction::new(126, [0, 1, 2]), - create_contract_package_at_hash: HostFunction::new(106, [0, 1]), - create_contract_user_group: HostFunction::new(107, [0, 1, 2, 3, 4, 5, 6, 7]), - add_contract_version: HostFunction::new(102, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - disable_contract_version: HostFunction::new(109, [0, 1, 2, 3]), - call_contract: HostFunction::new(104, [0, 1, 2, 3, 4, 5, 6]), - call_versioned_contract: HostFunction::new(105, [0, 1, 2, 3, 4, 5, 6, 7, 8]), - get_named_arg_size: HostFunction::new(116, [0, 1, 2]), - get_named_arg: HostFunction::new(115, [0, 1, 2, 3]), - remove_contract_user_group: HostFunction::new(130, [0, 1, 2, 3]), - provision_contract_user_group_uref: HostFunction::new(124, [0, 1, 2, 3, 4]), - remove_contract_user_group_urefs: HostFunction::new(131, [0, 1, 2, 3, 4, 5]), - print: HostFunction::new(123, [0, 1]), - blake2b: HostFunction::new(133, [0, 1, 2, 3]), - }); - static EXPECTED_GENESIS_WASM_COSTS: Lazy = Lazy::new(|| { - WasmConfig::new( - 17, // initial_memory - 19, // max_stack_height - EXPECTED_GENESIS_COSTS, - EXPECTED_GENESIS_STORAGE_COSTS, - *EXPECTED_GENESIS_HOST_FUNCTION_COSTS, - ) - }); - - const EXPECTED_GENESIS_STORAGE_COSTS: StorageCosts = StorageCosts::new(101); - - const EXPECTED_GENESIS_COSTS: OpcodeCosts = OpcodeCosts { - bit: 13, - add: 14, - mul: 15, - div: 16, - load: 17, - store: 18, - op_const: 19, - local: 20, - global: 21, - control_flow: 22, - integer_comparison: 23, - conversion: 24, - unreachable: 25, - nop: 26, - current_memory: 27, - grow_memory: 28, - regular: 29, - }; - - fn check_spec(spec: Chainspec, is_first_version: bool) { - if is_first_version { - assert_eq!( - spec.protocol_config.version, - ProtocolVersion::from_parts(0, 9, 0) - ); - assert_eq!( - spec.protocol_config.activation_point.genesis_timestamp(), - Some(Timestamp::from(1600454700000)) - ); - assert_eq!(spec.network_config.accounts_config.accounts().len(), 4); - - let accounts: Vec<_> = { - let mut accounts = spec.network_config.accounts_config.accounts().to_vec(); - accounts.sort_by_key(|account_config| { - (account_config.balance(), account_config.bonded_amount()) - }); - accounts - }; - - for (index, account_config) in accounts.into_iter().enumerate() { - assert_eq!(account_config.balance(), Motes::new(U512::from(index + 1)),); - assert_eq!( - account_config.bonded_amount(), - Motes::new(U512::from((index as u64 + 1) * 10)) - ); - } - } else { - assert_eq!( - spec.protocol_config.version, - ProtocolVersion::from_parts(1, 0, 0) - ); - assert_eq!( - spec.protocol_config.activation_point.era_id(), - EraId::from(1) - ); - assert!(spec.network_config.accounts_config.accounts().is_empty()); - assert!(spec.protocol_config.global_state_update.is_some()); - for value in spec.protocol_config.global_state_update.unwrap().0.values() { - assert!(StoredValue::from_bytes(value).is_ok()); - } - } - - assert_eq!(spec.network_config.name, "test-chain"); - - assert_eq!(spec.core_config.era_duration, TimeDiff::from(180000)); - assert_eq!(spec.core_config.minimum_era_height, 9); - assert_eq!( - spec.highway_config.finality_threshold_fraction, - Ratio::new(2, 25) - ); - assert_eq!(spec.highway_config.minimum_round_exponent, 14); - assert_eq!(spec.highway_config.maximum_round_exponent, 19); - assert_eq!( - spec.highway_config.reduced_reward_multiplier, - Ratio::new(1, 5) - ); - - assert_eq!( - spec.deploy_config.max_payment_cost, - Motes::new(U512::from(9)) - ); - assert_eq!(spec.deploy_config.max_ttl, TimeDiff::from(26300160000)); - assert_eq!(spec.deploy_config.max_dependencies, 11); - assert_eq!(spec.deploy_config.max_block_size, 12); - assert_eq!(spec.deploy_config.block_max_deploy_count, 125); - assert_eq!(spec.deploy_config.block_gas_limit, 13); - - assert_eq!(spec.wasm_config, *EXPECTED_GENESIS_WASM_COSTS); - } - - #[ignore = "We probably need to reconsider our approach here"] - #[test] - fn check_bundled_spec() { - let chainspec = Chainspec::from_resources("test/valid/0_9_0"); - check_spec(chainspec, true); - let chainspec = Chainspec::from_resources("test/valid/1_0_0"); - check_spec(chainspec, false); - } - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let chainspec = Chainspec::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&chainspec); - } - - #[ignore = "We probably need to reconsider our approach here"] - #[test] - fn should_have_deterministic_chainspec_hash() { - const PATH: &str = "test/valid/0_9_0"; - const PATH_UNORDERED: &str = "test/valid/0_9_0_unordered"; - - let accounts: Vec = { - let path = RESOURCES_PATH.join(PATH).join("accounts.toml"); - fs::read(path).expect("should read file") - }; - - let accounts_unordered: Vec = { - let path = RESOURCES_PATH.join(PATH_UNORDERED).join("accounts.toml"); - fs::read(path).expect("should read file") - }; - - // Different accounts.toml file content - assert_ne!(accounts, accounts_unordered); - - let chainspec = Chainspec::from_resources(PATH); - let chainspec_unordered = Chainspec::from_resources(PATH_UNORDERED); - - // Deserializes into equal objects - assert_eq!(chainspec, chainspec_unordered); - - // With equal hashes - assert_eq!(chainspec.hash(), chainspec_unordered.hash()); - } -} diff --git a/node/src/types/chainspec/accounts_config.rs b/node/src/types/chainspec/accounts_config.rs deleted file mode 100644 index ef5364b769..0000000000 --- a/node/src/types/chainspec/accounts_config.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! The accounts config is a set of configuration options that is used to create accounts at -//! genesis, and set up auction contract with validators and delegators. -mod account_config; -mod delegator_config; -mod validator_config; - -use std::path::Path; - -use datasize::DataSize; -use serde::{Deserialize, Deserializer, Serialize}; - -use casper_execution_engine::core::engine_state::GenesisAccount; -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -#[cfg(test)] -use crate::testing::TestRng; -use crate::utils::{self, Loadable}; - -use super::error::ChainspecAccountsLoadError; -pub use account_config::AccountConfig; -pub use delegator_config::DelegatorConfig; -pub use validator_config::ValidatorConfig; - -const CHAINSPEC_ACCOUNTS_FILENAME: &str = "accounts.toml"; - -fn sorted_vec_deserializer<'de, T, D>(deserializer: D) -> Result, D::Error> -where - T: Deserialize<'de> + Ord, - D: Deserializer<'de>, -{ - let mut vec = Vec::::deserialize(deserializer)?; - vec.sort_unstable(); - Ok(vec) -} - -#[derive(PartialEq, Eq, Serialize, Deserialize, DataSize, Debug, Clone)] -pub struct AccountsConfig { - #[serde(deserialize_with = "sorted_vec_deserializer")] - accounts: Vec, - #[serde(default, deserialize_with = "sorted_vec_deserializer")] - delegators: Vec, -} - -impl AccountsConfig { - pub fn new(accounts: Vec, delegators: Vec) -> Self { - Self { - accounts, - delegators, - } - } - - pub fn accounts(&self) -> &[AccountConfig] { - &self.accounts - } - - pub fn delegators(&self) -> &[DelegatorConfig] { - &self.delegators - } - - #[cfg(test)] - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let alpha = AccountConfig::random(rng); - let accounts = vec![ - alpha.clone(), - AccountConfig::random(rng), - AccountConfig::random(rng), - AccountConfig::random(rng), - ]; - - let mut delegator = DelegatorConfig::random(rng); - delegator.validator_public_key = alpha.public_key; - - let delegators = vec![delegator]; - - AccountsConfig { - accounts, - delegators, - } - } -} - -impl ToBytes for AccountsConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.accounts.to_bytes()?); - buffer.extend(self.delegators.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.accounts.serialized_length() + self.delegators.serialized_length() - } -} - -impl FromBytes for AccountsConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (accounts, remainder) = FromBytes::from_bytes(bytes)?; - let (delegators, remainder) = FromBytes::from_bytes(remainder)?; - let accounts_config = AccountsConfig::new(accounts, delegators); - Ok((accounts_config, remainder)) - } -} - -impl Loadable for AccountsConfig { - type Error = ChainspecAccountsLoadError; - - fn from_path>(path: P) -> Result { - let accounts_path = path.as_ref().join(CHAINSPEC_ACCOUNTS_FILENAME); - if !accounts_path.is_file() { - return Ok(AccountsConfig::new(vec![], vec![])); - } - let bytes = utils::read_file(accounts_path)?; - let toml_chainspec: AccountsConfig = toml::from_slice(&bytes)?; - Ok(toml_chainspec) - } -} - -impl From for Vec { - fn from(accounts_config: AccountsConfig) -> Self { - let mut genesis_accounts = Vec::with_capacity(accounts_config.accounts.len()); - for account_config in accounts_config.accounts { - let genesis_account = account_config.into(); - genesis_accounts.push(genesis_account); - } - for delegator_config in accounts_config.delegators { - let genesis_account = delegator_config.into(); - genesis_accounts.push(genesis_account); - } - - genesis_accounts - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn serialization_roundtrip() { - let mut rng = TestRng::new(); - let accounts_config = AccountsConfig::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&accounts_config); - } -} diff --git a/node/src/types/chainspec/accounts_config/delegator_config.rs b/node/src/types/chainspec/accounts_config/delegator_config.rs deleted file mode 100644 index 00a4927eca..0000000000 --- a/node/src/types/chainspec/accounts_config/delegator_config.rs +++ /dev/null @@ -1,144 +0,0 @@ -use datasize::DataSize; -#[cfg(test)] -use rand::{distributions::Standard, prelude::*}; -use serde::{Deserialize, Serialize}; - -use casper_execution_engine::{core::engine_state::GenesisAccount, shared::motes::Motes}; -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - PublicKey, -}; -#[cfg(test)] -use casper_types::{SecretKey, U512}; - -#[cfg(test)] -use crate::testing::TestRng; - -#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, DataSize, Debug, Clone)] -pub struct DelegatorConfig { - pub(super) validator_public_key: PublicKey, - delegator_public_key: PublicKey, - balance: Motes, - delegated_amount: Motes, -} - -impl DelegatorConfig { - pub fn new( - validator_public_key: PublicKey, - delegator_public_key: PublicKey, - balance: Motes, - delegated_amount: Motes, - ) -> Self { - Self { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - } - } - - pub fn validator_public_key(&self) -> &PublicKey { - &self.validator_public_key - } - - pub fn delegator_public_key(&self) -> &PublicKey { - &self.delegator_public_key - } - - pub fn balance(&self) -> Motes { - self.balance - } - - pub fn delegated_amount(&self) -> Motes { - self.delegated_amount - } - - #[cfg(test)] - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let validator_public_key = - PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); - let delegator_public_key = - PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); - let balance = Motes::new(U512::from(rng.gen::())); - let delegated_amount = Motes::new(U512::from(rng.gen::())); - - DelegatorConfig { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - } - } -} - -#[cfg(test)] -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> DelegatorConfig { - let validator_public_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()) - .unwrap() - .into(); - let delegator_public_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()) - .unwrap() - .into(); - - let mut u512_array = [0u8; 64]; - rng.fill_bytes(u512_array.as_mut()); - let balance = Motes::new(U512::from(u512_array)); - - rng.fill_bytes(u512_array.as_mut()); - let delegated_amount = Motes::new(U512::from(u512_array)); - - DelegatorConfig::new( - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - ) - } -} - -impl ToBytes for DelegatorConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.validator_public_key.to_bytes()?); - buffer.extend(self.delegator_public_key.to_bytes()?); - buffer.extend(self.balance.to_bytes()?); - buffer.extend(self.delegated_amount.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.validator_public_key.serialized_length() - + self.delegator_public_key.serialized_length() - + self.balance.serialized_length() - + self.delegated_amount.serialized_length() - } -} - -impl FromBytes for DelegatorConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; - let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?; - let (balance, remainder) = FromBytes::from_bytes(remainder)?; - let (delegated_amount, remainder) = FromBytes::from_bytes(remainder)?; - let delegator_config = DelegatorConfig { - validator_public_key, - delegator_public_key, - balance, - delegated_amount, - }; - Ok((delegator_config, remainder)) - } -} - -impl From for GenesisAccount { - fn from(delegator_config: DelegatorConfig) -> Self { - GenesisAccount::delegator( - delegator_config.validator_public_key, - delegator_config.delegator_public_key, - delegator_config.balance, - delegator_config.delegated_amount, - ) - } -} diff --git a/node/src/types/chainspec/core_config.rs b/node/src/types/chainspec/core_config.rs deleted file mode 100644 index 4d10758a33..0000000000 --- a/node/src/types/chainspec/core_config.rs +++ /dev/null @@ -1,125 +0,0 @@ -use datasize::DataSize; -use num::rational::Ratio; -#[cfg(test)] -use rand::Rng; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -#[cfg(test)] -use crate::testing::TestRng; -use crate::types::TimeDiff; - -#[derive(Copy, Clone, DataSize, PartialEq, Eq, Serialize, Deserialize, Debug)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct CoreConfig { - pub(crate) era_duration: TimeDiff, - pub(crate) minimum_era_height: u64, - pub(crate) validator_slots: u32, - /// Number of eras before an auction actually defines the set of validators. - /// If you bond with a sufficient bid in era N, you will be a validator in era N + - /// auction_delay + 1 - pub(crate) auction_delay: u64, - /// The period after genesis during which a genesis validator's bid is locked. - pub(crate) locked_funds_period: TimeDiff, - /// The delay in number of eras for paying out the the unbonding amount. - pub(crate) unbonding_delay: u64, - /// Round seigniorage rate represented as a fractional number. - #[data_size(skip)] - pub(crate) round_seigniorage_rate: Ratio, -} - -#[cfg(test)] -impl CoreConfig { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let era_duration = TimeDiff::from(rng.gen_range(600_000..604_800_000)); - let minimum_era_height = rng.gen_range(5..100); - let validator_slots = rng.gen(); - let auction_delay = rng.gen::() as u64; - let locked_funds_period = TimeDiff::from(rng.gen_range(600_000..604_800_000)); - let unbonding_delay = rng.gen_range(1..1_000_000_000); - let round_seigniorage_rate = Ratio::new( - rng.gen_range(1..1_000_000_000), - rng.gen_range(1..1_000_000_000), - ); - - CoreConfig { - era_duration, - minimum_era_height, - validator_slots, - auction_delay, - locked_funds_period, - unbonding_delay, - round_seigniorage_rate, - } - } -} - -impl ToBytes for CoreConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.era_duration.to_bytes()?); - buffer.extend(self.minimum_era_height.to_bytes()?); - buffer.extend(self.validator_slots.to_bytes()?); - buffer.extend(self.auction_delay.to_bytes()?); - buffer.extend(self.locked_funds_period.to_bytes()?); - buffer.extend(self.unbonding_delay.to_bytes()?); - buffer.extend(self.round_seigniorage_rate.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.era_duration.serialized_length() - + self.minimum_era_height.serialized_length() - + self.validator_slots.serialized_length() - + self.auction_delay.serialized_length() - + self.locked_funds_period.serialized_length() - + self.unbonding_delay.serialized_length() - + self.round_seigniorage_rate.serialized_length() - } -} - -impl FromBytes for CoreConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (era_duration, remainder) = TimeDiff::from_bytes(bytes)?; - let (minimum_era_height, remainder) = u64::from_bytes(remainder)?; - let (validator_slots, remainder) = u32::from_bytes(remainder)?; - let (auction_delay, remainder) = u64::from_bytes(remainder)?; - let (locked_funds_period, remainder) = TimeDiff::from_bytes(remainder)?; - let (unbonding_delay, remainder) = u64::from_bytes(remainder)?; - let (round_seigniorage_rate, remainder) = Ratio::::from_bytes(remainder)?; - let config = CoreConfig { - era_duration, - minimum_era_height, - validator_slots, - auction_delay, - locked_funds_period, - unbonding_delay, - round_seigniorage_rate, - }; - Ok((config, remainder)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let config = CoreConfig::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&config); - } - - #[test] - fn toml_roundtrip() { - let mut rng = crate::new_rng(); - let config = CoreConfig::random(&mut rng); - let encoded = toml::to_string_pretty(&config).unwrap(); - let decoded = toml::from_str(&encoded).unwrap(); - assert_eq!(config, decoded); - } -} diff --git a/node/src/types/chainspec/deploy_config.rs b/node/src/types/chainspec/deploy_config.rs deleted file mode 100644 index 89e19e9c7e..0000000000 --- a/node/src/types/chainspec/deploy_config.rs +++ /dev/null @@ -1,166 +0,0 @@ -#[cfg(test)] -use std::str::FromStr; - -use datasize::DataSize; -#[cfg(test)] -use num_traits::Zero; -#[cfg(test)] -use rand::Rng; -use serde::{Deserialize, Serialize}; - -#[cfg(test)] -use casper_execution_engine::core::engine_state::MAX_PAYMENT_AMOUNT; -use casper_execution_engine::shared::motes::Motes; -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - U512, -}; - -#[cfg(test)] -use crate::testing::TestRng; -use crate::types::TimeDiff; - -#[derive(Copy, Clone, DataSize, PartialEq, Eq, Serialize, Deserialize, Debug)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub struct DeployConfig { - pub(crate) max_payment_cost: Motes, - pub(crate) max_ttl: TimeDiff, - pub(crate) max_dependencies: u8, - pub(crate) max_block_size: u32, - pub(crate) block_max_deploy_count: u32, - pub(crate) block_max_transfer_count: u32, - pub(crate) block_gas_limit: u64, - pub(crate) payment_args_max_length: u32, - pub(crate) session_args_max_length: u32, - pub(crate) native_transfer_minimum_motes: u64, -} - -#[cfg(test)] -impl DeployConfig { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let max_payment_cost = Motes::new(U512::from(rng.gen_range(1_000_000..1_000_000_000))); - let max_ttl = TimeDiff::from(rng.gen_range(60_000..3_600_000)); - let max_dependencies = rng.gen(); - let max_block_size = rng.gen_range(1_000_000..1_000_000_000); - let block_max_deploy_count = rng.gen(); - let block_max_transfer_count = rng.gen(); - let block_gas_limit = rng.gen_range(100_000_000_000..1_000_000_000_000_000); - let payment_args_max_length = rng.gen(); - let session_args_max_length = rng.gen(); - let native_transfer_minimum_motes = - rng.gen_range(MAX_PAYMENT_AMOUNT..1_000_000_000_000_000); - - DeployConfig { - max_payment_cost, - max_ttl, - max_dependencies, - max_block_size, - block_max_deploy_count, - block_max_transfer_count, - block_gas_limit, - payment_args_max_length, - session_args_max_length, - native_transfer_minimum_motes, - } - } -} - -#[cfg(test)] -impl Default for DeployConfig { - fn default() -> Self { - DeployConfig { - max_payment_cost: Motes::zero(), - max_ttl: TimeDiff::from_str("1day").unwrap(), - max_dependencies: 10, - max_block_size: 10_485_760, - block_max_deploy_count: 10, - block_max_transfer_count: 1000, - block_gas_limit: 10_000_000_000_000, - payment_args_max_length: 1024, - session_args_max_length: 1024, - native_transfer_minimum_motes: MAX_PAYMENT_AMOUNT, - } - } -} - -impl ToBytes for DeployConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.max_payment_cost.value().to_bytes()?); - buffer.extend(self.max_ttl.to_bytes()?); - buffer.extend(self.max_dependencies.to_bytes()?); - buffer.extend(self.max_block_size.to_bytes()?); - buffer.extend(self.block_max_deploy_count.to_bytes()?); - buffer.extend(self.block_max_transfer_count.to_bytes()?); - buffer.extend(self.block_gas_limit.to_bytes()?); - buffer.extend(self.payment_args_max_length.to_bytes()?); - buffer.extend(self.session_args_max_length.to_bytes()?); - buffer.extend(self.native_transfer_minimum_motes.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.max_payment_cost.value().serialized_length() - + self.max_ttl.serialized_length() - + self.max_dependencies.serialized_length() - + self.max_block_size.serialized_length() - + self.block_max_deploy_count.serialized_length() - + self.block_max_transfer_count.serialized_length() - + self.block_gas_limit.serialized_length() - + self.payment_args_max_length.serialized_length() - + self.session_args_max_length.serialized_length() - + self.native_transfer_minimum_motes.serialized_length() - } -} - -impl FromBytes for DeployConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (max_payment_cost, remainder) = U512::from_bytes(bytes)?; - let max_payment_cost = Motes::new(max_payment_cost); - let (max_ttl, remainder) = TimeDiff::from_bytes(remainder)?; - let (max_dependencies, remainder) = u8::from_bytes(remainder)?; - let (max_block_size, remainder) = u32::from_bytes(remainder)?; - let (block_max_deploy_count, remainder) = u32::from_bytes(remainder)?; - let (block_max_transfer_count, remainder) = u32::from_bytes(remainder)?; - let (block_gas_limit, remainder) = u64::from_bytes(remainder)?; - let (payment_args_max_length, remainder) = u32::from_bytes(remainder)?; - let (session_args_max_length, remainder) = u32::from_bytes(remainder)?; - let (native_transfer_minimum_motes, remainder) = u64::from_bytes(remainder)?; - let config = DeployConfig { - max_payment_cost, - max_ttl, - max_dependencies, - max_block_size, - block_max_deploy_count, - block_max_transfer_count, - block_gas_limit, - payment_args_max_length, - session_args_max_length, - native_transfer_minimum_motes, - }; - Ok((config, remainder)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let config = DeployConfig::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&config); - } - - #[test] - fn toml_roundtrip() { - let mut rng = crate::new_rng(); - let config = DeployConfig::random(&mut rng); - let encoded = toml::to_string_pretty(&config).unwrap(); - let decoded = toml::from_str(&encoded).unwrap(); - assert_eq!(config, decoded); - } -} diff --git a/node/src/types/chainspec/error.rs b/node/src/types/chainspec/error.rs deleted file mode 100644 index 8e8eb8e019..0000000000 --- a/node/src/types/chainspec/error.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::{io, path::PathBuf}; - -use thiserror::Error; -use uint::FromDecStrErr; - -use casper_types::account::ACCOUNT_HASH_LENGTH; - -use crate::utils::ReadFileError; - -/// Error returned by the ChainspecLoader. -#[derive(Debug, Error)] -pub enum Error { - /// Error while decoding the chainspec from TOML format. - #[error("decoding from TOML error: {0}")] - DecodingFromToml(#[from] toml::de::Error), - - /// Error while decoding Motes from a decimal format. - #[error("decoding motes from base-10 error: {0}")] - DecodingMotes(#[from] FromDecStrErr), - - /// Error loading the chainspec. - #[error("could not load chainspec: {0}")] - LoadChainspec(ReadFileError), - - /// Error loading the upgrade point. - #[error("could not load upgrade point: {0}")] - LoadUpgradePoint(ReadFileError), - - /// Error loading the chainspec accounts. - #[error("could not load chainspec accounts: {0}")] - LoadChainspecAccounts(#[from] ChainspecAccountsLoadError), - - /// Error loading the global state update. - #[error("could not load the global state update: {0}")] - LoadGlobalStateUpgrade(#[from] GlobalStateUpdateLoadError), - - /// Failed to read the given directory. - #[error("failed to read dir {}: {error}", dir.display())] - ReadDir { - /// The directory which could not be read. - dir: PathBuf, - /// The underlying error. - error: io::Error, - }, - - /// No subdirectory representing a semver version was found in the given directory. - #[error("failed to get a valid version from subdirs in {}", dir.display())] - NoVersionSubdirFound { - /// The searched directory. - dir: PathBuf, - }, -} - -/// Error loading chainspec accounts file. -#[derive(Debug, Error)] -pub enum ChainspecAccountsLoadError { - /// Error loading the accounts file. - #[error("could not load accounts: {0}")] - LoadAccounts(#[from] ReadFileError), - - /// Error while decoding the chainspec accounts from TOML format. - #[error("decoding from TOML error: {0}")] - DecodingFromToml(#[from] toml::de::Error), - - /// Error while decoding a chainspec account's key hash from hex format. - #[error("decoding from hex error: {0}")] - DecodingFromHex(#[from] hex::FromHexError), - - /// Error while decoding Motes from a decimal format. - #[error("decoding motes from base-10 error: {0}")] - DecodingMotes(#[from] FromDecStrErr), - - /// Decoding a chainspec account's key hash yielded an invalid length byte array. - #[error("expected hash length of {}, got {0}", ACCOUNT_HASH_LENGTH)] - InvalidHashLength(usize), - - /// Error while decoding a chainspec account's key hash from base-64 format. - #[error("crypto module error: {0}")] - Crypto(#[from] crate::crypto::Error), -} - -/// Error loading global state update file. -#[derive(Debug, Error)] -pub enum GlobalStateUpdateLoadError { - /// Error loading the accounts file. - #[error("could not load the file: {0}")] - LoadFile(#[from] ReadFileError), - - /// Error while decoding the chainspec accounts from TOML format. - #[error("decoding from TOML error: {0}")] - DecodingFromToml(#[from] toml::de::Error), - - /// Error while decoding a serialized value from a base64 encoded string. - #[error("decoding from base64 error: {0}")] - DecodingFromBase64(#[from] base64::DecodeError), - - /// Error while decoding a key from formatted string. - #[error("decoding from formatted string error: {0}")] - DecodingKeyFromStr(String), -} diff --git a/node/src/types/chainspec/global_state_update.rs b/node/src/types/chainspec/global_state_update.rs deleted file mode 100644 index 52251d2de4..0000000000 --- a/node/src/types/chainspec/global_state_update.rs +++ /dev/null @@ -1,107 +0,0 @@ -use std::{collections::BTreeMap, convert::TryFrom, path::Path}; - -use datasize::DataSize; -#[cfg(test)] -use rand::Rng; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - bytesrepr::{self, Bytes, FromBytes, ToBytes}, - Key, -}; - -use super::error::GlobalStateUpdateLoadError; - -#[cfg(test)] -use crate::testing::TestRng; -use crate::utils::{self, Loadable}; - -const GLOBAL_STATE_UPDATE_FILENAME: &str = "global_state.toml"; - -#[derive(PartialEq, Eq, Serialize, Deserialize, DataSize, Debug, Clone)] -pub struct GlobalStateUpdateEntry { - key: String, - value: String, -} - -#[derive(PartialEq, Eq, Serialize, Deserialize, DataSize, Debug, Clone)] -pub struct GlobalStateUpdateConfig { - entries: Vec, -} - -impl Loadable for Option { - type Error = GlobalStateUpdateLoadError; - - fn from_path>(path: P) -> Result { - let update_path = path.as_ref().join(GLOBAL_STATE_UPDATE_FILENAME); - if !update_path.is_file() { - return Ok(None); - } - let bytes = utils::read_file(update_path)?; - let toml_update: GlobalStateUpdateConfig = toml::from_slice(&bytes)?; - Ok(Some(toml_update)) - } -} - -/// Type storing the information about modifications to be applied to the global state. -/// It stores the serialized `StoredValue`s corresponding to keys to be modified. -#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, DataSize, Debug)] -pub struct GlobalStateUpdate(pub(crate) BTreeMap); - -impl ToBytes for GlobalStateUpdate { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -#[cfg(test)] -impl GlobalStateUpdate { - fn random(rng: &mut TestRng) -> Self { - let entries = rng.gen_range(0..10); - let mut map = BTreeMap::new(); - for _ in 0..entries { - map.insert(rng.gen(), rng.gen()); - } - Self(map) - } -} - -impl FromBytes for GlobalStateUpdate { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (update, remainder) = BTreeMap::::from_bytes(bytes)?; - let global_state_update = GlobalStateUpdate(update); - Ok((global_state_update, remainder)) - } -} - -impl TryFrom for GlobalStateUpdate { - type Error = GlobalStateUpdateLoadError; - - fn try_from(config: GlobalStateUpdateConfig) -> Result { - let mut map = BTreeMap::new(); - for entry in config.entries.into_iter() { - let key = Key::from_formatted_str(&entry.key).map_err(|err| { - GlobalStateUpdateLoadError::DecodingKeyFromStr(format!("{}", err)) - })?; - let value = base64::decode(&entry.value)?.into(); - let _ = map.insert(key, value); - } - Ok(GlobalStateUpdate(map)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn global_state_update_bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let update = GlobalStateUpdate::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&update); - } -} diff --git a/node/src/types/chainspec/highway_config.rs b/node/src/types/chainspec/highway_config.rs deleted file mode 100644 index dabb6f5068..0000000000 --- a/node/src/types/chainspec/highway_config.rs +++ /dev/null @@ -1,142 +0,0 @@ -use datasize::DataSize; -use num::rational::Ratio; -#[cfg(test)] -use rand::Rng; -use serde::{Deserialize, Serialize}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -#[cfg(test)] -use crate::testing::TestRng; -#[cfg(not(feature = "fast-sync"))] -use crate::types::TimeDiff; - -#[derive(Copy, Clone, DataSize, PartialEq, Eq, Serialize, Deserialize, Debug)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub(crate) struct HighwayConfig { - #[data_size(skip)] - pub(crate) finality_threshold_fraction: Ratio, - pub(crate) minimum_round_exponent: u8, - pub(crate) maximum_round_exponent: u8, - /// The factor by which rewards for a round are multiplied if the greatest summit has ≤50% - /// quorum, i.e. no finality. - #[data_size(skip)] - pub(crate) reduced_reward_multiplier: Ratio, -} - -impl HighwayConfig { - /// Checks whether the values set in the config make sense panics if they don't. - pub fn validate_config(&self) { - if self.minimum_round_exponent > self.maximum_round_exponent { - panic!( - "Minimum round exponent is greater than the maximum round exponent.\n\ - Minimum round exponent: {min},\n\ - Maximum round exponent: {max}", - min = self.minimum_round_exponent, - max = self.maximum_round_exponent - ); - } - - if self.finality_threshold_fraction <= Ratio::new(0, 1) - || self.finality_threshold_fraction >= Ratio::new(1, 1) - { - panic!( - "Finality threshold fraction is not in the range (0, 1)! Finality threshold: {ftt}", - ftt = self.finality_threshold_fraction - ); - } - - if self.reduced_reward_multiplier > Ratio::new(1, 1) { - panic!( - "Reduced reward multiplier is not in the range [0, 1]! Multiplier: {rrm}", - rrm = self.reduced_reward_multiplier - ); - } - } - - /// Returns the length of the longest allowed round. - #[cfg(not(feature = "fast-sync"))] - pub fn max_round_length(&self) -> TimeDiff { - TimeDiff::from(1 << self.maximum_round_exponent) - } - - /// Returns the length of the shortest allowed round. - #[cfg(not(feature = "fast-sync"))] - pub fn min_round_length(&self) -> TimeDiff { - TimeDiff::from(1 << self.minimum_round_exponent) - } -} - -#[cfg(test)] -impl HighwayConfig { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let finality_threshold_fraction = Ratio::new(rng.gen_range(1..100), 100); - let minimum_round_exponent = rng.gen_range(0..16); - let maximum_round_exponent = rng.gen_range(16..22); - let reduced_reward_multiplier = Ratio::new(rng.gen_range(0..10), 10); - - HighwayConfig { - finality_threshold_fraction, - minimum_round_exponent, - maximum_round_exponent, - reduced_reward_multiplier, - } - } -} - -impl ToBytes for HighwayConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.finality_threshold_fraction.to_bytes()?); - buffer.extend(self.minimum_round_exponent.to_bytes()?); - buffer.extend(self.maximum_round_exponent.to_bytes()?); - buffer.extend(self.reduced_reward_multiplier.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.finality_threshold_fraction.serialized_length() - + self.minimum_round_exponent.serialized_length() - + self.maximum_round_exponent.serialized_length() - + self.reduced_reward_multiplier.serialized_length() - } -} - -impl FromBytes for HighwayConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (finality_threshold_fraction, remainder) = Ratio::::from_bytes(bytes)?; - let (minimum_round_exponent, remainder) = u8::from_bytes(remainder)?; - let (maximum_round_exponent, remainder) = u8::from_bytes(remainder)?; - let (reduced_reward_multiplier, remainder) = Ratio::::from_bytes(remainder)?; - let config = HighwayConfig { - finality_threshold_fraction, - minimum_round_exponent, - maximum_round_exponent, - reduced_reward_multiplier, - }; - Ok((config, remainder)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let config = HighwayConfig::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&config); - } - - #[test] - fn toml_roundtrip() { - let mut rng = crate::new_rng(); - let config = HighwayConfig::random(&mut rng); - let encoded = toml::to_string_pretty(&config).unwrap(); - let decoded = toml::from_str(&encoded).unwrap(); - assert_eq!(config, decoded); - } -} diff --git a/node/src/types/chainspec/network_config.rs b/node/src/types/chainspec/network_config.rs deleted file mode 100644 index b35bdbee84..0000000000 --- a/node/src/types/chainspec/network_config.rs +++ /dev/null @@ -1,103 +0,0 @@ -use datasize::DataSize; -#[cfg(test)] -use rand::Rng; -use serde::Serialize; - -use casper_execution_engine::shared::motes::Motes; -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - PublicKey, -}; - -use super::AccountsConfig; -#[cfg(test)] -use crate::testing::TestRng; - -#[derive(Clone, DataSize, PartialEq, Eq, Serialize, Debug)] -pub struct NetworkConfig { - /// The network name. - pub(crate) name: String, - /// The maximum size of an accepted network message, in bytes. - pub(crate) maximum_net_message_size: u32, - /// Validator accounts specified in the chainspec. - // Note: `accounts_config` must be the last field on this struct due to issues in the TOML - // crate - see . - pub(crate) accounts_config: AccountsConfig, -} - -impl NetworkConfig { - /// Returns a vector of chainspec validators' public key and their stake. - pub fn chainspec_validator_stakes(&self) -> Vec<(PublicKey, Motes)> { - self.accounts_config - .accounts() - .iter() - .filter_map(|account_config| { - if account_config.is_genesis_validator() { - Some((account_config.public_key(), account_config.bonded_amount())) - } else { - None - } - }) - .collect() - } -} - -#[cfg(test)] -impl NetworkConfig { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let name = rng.gen::().to_string(); - let accounts = vec![rng.gen(), rng.gen(), rng.gen(), rng.gen(), rng.gen()]; - let delegators = vec![rng.gen(), rng.gen(), rng.gen(), rng.gen(), rng.gen()]; - let accounts_config = AccountsConfig::new(accounts, delegators); - let maximum_net_message_size = 4 + rng.gen_range(0..4); - - NetworkConfig { - name, - accounts_config, - maximum_net_message_size, - } - } -} - -impl ToBytes for NetworkConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.name.to_bytes()?); - buffer.extend(self.accounts_config.to_bytes()?); - buffer.extend(self.maximum_net_message_size.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.name.serialized_length() - + self.accounts_config.serialized_length() - + self.maximum_net_message_size.serialized_length() - } -} - -impl FromBytes for NetworkConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (name, remainder) = String::from_bytes(bytes)?; - let (accounts_config, remainder) = FromBytes::from_bytes(remainder)?; - let (maximum_net_message_size, remainder) = FromBytes::from_bytes(remainder)?; - let config = NetworkConfig { - name, - accounts_config, - maximum_net_message_size, - }; - Ok((config, remainder)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let config = NetworkConfig::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&config); - } -} diff --git a/node/src/types/chainspec/parse_toml.rs b/node/src/types/chainspec/parse_toml.rs deleted file mode 100644 index e6680667ed..0000000000 --- a/node/src/types/chainspec/parse_toml.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! Helper struct and function for parsing a chainspec configuration file into its respective domain -//! object. -//! -//! This is necessary because the `network_config` field of the `Chainspec` struct contains a `Vec` -//! of chainspec accounts, but the chainspec toml file contains a path to a further file which -//! contains the accounts' details, rather than the chainspec file containing the accounts' details -//! itself. - -use std::{convert::TryFrom, path::Path}; - -use serde::{Deserialize, Serialize}; - -use casper_execution_engine::shared::{system_config::SystemConfig, wasm_config::WasmConfig}; -use casper_types::ProtocolVersion; - -use super::{ - accounts_config::AccountsConfig, global_state_update::GlobalStateUpdateConfig, ActivationPoint, - Chainspec, CoreConfig, DeployConfig, Error, GlobalStateUpdate, HighwayConfig, NetworkConfig, - ProtocolConfig, -}; -use crate::utils::{self, Loadable}; - -#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -struct TomlNetwork { - name: String, - maximum_net_message_size: u32, -} - -#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -struct TomlProtocol { - version: ProtocolVersion, - hard_reset: bool, - activation_point: ActivationPoint, -} - -/// A chainspec configuration as laid out in the TOML-encoded configuration file. -#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] -// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. -#[serde(deny_unknown_fields)] -pub(super) struct TomlChainspec { - protocol: TomlProtocol, - network: TomlNetwork, - core: CoreConfig, - deploys: DeployConfig, - highway: HighwayConfig, - wasm: WasmConfig, - system_costs: SystemConfig, -} - -impl From<&Chainspec> for TomlChainspec { - fn from(chainspec: &Chainspec) -> Self { - let protocol = TomlProtocol { - version: chainspec.protocol_config.version, - hard_reset: chainspec.protocol_config.hard_reset, - activation_point: chainspec.protocol_config.activation_point, - }; - let network = TomlNetwork { - name: chainspec.network_config.name.clone(), - maximum_net_message_size: chainspec.network_config.maximum_net_message_size, - }; - let core = chainspec.core_config; - let deploys = chainspec.deploy_config; - let highway = chainspec.highway_config; - let wasm = chainspec.wasm_config; - let system_costs = chainspec.system_costs_config; - - TomlChainspec { - protocol, - network, - core, - deploys, - highway, - wasm, - system_costs, - } - } -} - -pub(super) fn parse_toml>(chainspec_path: P) -> Result { - let bytes = utils::read_file(chainspec_path.as_ref()).map_err(Error::LoadChainspec)?; - let toml_chainspec: TomlChainspec = toml::from_slice(&bytes)?; - - let root = chainspec_path - .as_ref() - .parent() - .unwrap_or_else(|| Path::new("")); - - // accounts.toml must live in the same directory as chainspec.toml. - let accounts_config = AccountsConfig::from_path(root)?; - - let network_config = NetworkConfig { - name: toml_chainspec.network.name, - accounts_config, - maximum_net_message_size: toml_chainspec.network.maximum_net_message_size, - }; - - let global_state_update = Option::::from_path(root)? - .map(GlobalStateUpdate::try_from) - .transpose()?; - - let protocol_config = ProtocolConfig { - version: toml_chainspec.protocol.version, - hard_reset: toml_chainspec.protocol.hard_reset, - activation_point: toml_chainspec.protocol.activation_point, - global_state_update, - }; - - Ok(Chainspec { - protocol_config, - network_config, - core_config: toml_chainspec.core, - deploy_config: toml_chainspec.deploys, - highway_config: toml_chainspec.highway, - wasm_config: toml_chainspec.wasm, - system_costs_config: toml_chainspec.system_costs, - }) -} diff --git a/node/src/types/chainspec/protocol_config.rs b/node/src/types/chainspec/protocol_config.rs deleted file mode 100644 index 1ec8058d8a..0000000000 --- a/node/src/types/chainspec/protocol_config.rs +++ /dev/null @@ -1,116 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::str::FromStr; - -use datasize::DataSize; -#[cfg(test)] -use rand::Rng; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - ProtocolVersion, -}; - -use super::{ActivationPoint, GlobalStateUpdate}; -#[cfg(test)] -use crate::testing::TestRng; - -#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, DataSize, Debug)] -pub struct ProtocolConfig { - #[data_size(skip)] - pub(crate) version: ProtocolVersion, - /// Whether we need to clear latest blocks back to the switch block just before the activation - /// point or not. - pub(crate) hard_reset: bool, - /// This protocol config applies starting at the era specified in the activation point. - pub(crate) activation_point: ActivationPoint, - /// Any arbitrary updates we might want to make to the global state at the start of the era - /// specified in the activation point. - pub(crate) global_state_update: Option, -} - -#[cfg(test)] -impl ProtocolConfig { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - let protocol_version = ProtocolVersion::from_parts( - rng.gen_range(0..10), - rng.gen::() as u32, - rng.gen::() as u32, - ); - let activation_point = ActivationPoint::random(rng); - - ProtocolConfig { - version: protocol_version, - hard_reset: rng.gen(), - activation_point, - global_state_update: None, - } - } -} - -impl ToBytes for ProtocolConfig { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.version.to_string().to_bytes()?); - buffer.extend(self.hard_reset.to_bytes()?); - buffer.extend(self.activation_point.to_bytes()?); - buffer.extend(self.global_state_update.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.version.to_string().serialized_length() - + self.hard_reset.serialized_length() - + self.activation_point.serialized_length() - + self.global_state_update.serialized_length() - } -} - -impl FromBytes for ProtocolConfig { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (protocol_version_string, remainder) = String::from_bytes(bytes)?; - let protocol_version = ProtocolVersion::from_str(&protocol_version_string) - .map_err(|_| bytesrepr::Error::Formatting)?; - let (hard_reset, remainder) = bool::from_bytes(remainder)?; - let (activation_point, remainder) = ActivationPoint::from_bytes(remainder)?; - let (global_state_update, remainder) = Option::::from_bytes(remainder)?; - let protocol_config = ProtocolConfig { - version: protocol_version, - activation_point, - global_state_update, - hard_reset, - }; - Ok((protocol_config, remainder)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn activation_point_bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let activation_point = ActivationPoint::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&activation_point); - } - - #[test] - fn protocol_config_bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let config = ProtocolConfig::random(&mut rng); - bytesrepr::test_serialization_roundtrip(&config); - } - - #[test] - fn toml_roundtrip() { - let mut rng = crate::new_rng(); - let config = ProtocolConfig::random(&mut rng); - let encoded = toml::to_string_pretty(&config).unwrap(); - let decoded = toml::from_str(&encoded).unwrap(); - assert_eq!(config, decoded); - } -} diff --git a/node/src/types/chunkable.rs b/node/src/types/chunkable.rs new file mode 100644 index 0000000000..5211fda128 --- /dev/null +++ b/node/src/types/chunkable.rs @@ -0,0 +1,87 @@ +use std::{borrow::Cow, convert::Infallible}; + +use casper_types::{ + bytesrepr::{self, Bytes, ToBytes}, + execution::{ExecutionResult, ExecutionResultV1, ExecutionResultV2}, + Digest, +}; + +use super::value_or_chunk::HashingTrieRaw; + +/// Implemented for types that are chunked when sending over the wire and/or before storing the +/// trie store. +pub trait Chunkable { + /// Error returned when mapping `Self` into bytes. + type Error: std::fmt::Debug; + /// Maps `Self` into bytes. + /// + /// Returns a [`Cow`] instance in case the resulting bytes are the same as input and we don't + /// want to reinitialize. This also helps with a case where returning a vector of bytes + /// would require instantiating a `Vec` locally (see [`casper_types::bytesrepr::ToBytes`]) + /// but can't be returned as reference. Alternative encoding would be to consume `Self` and + /// return `Vec` but that may do it unnecessarily if `Self` would be to used again. + fn as_bytes(&self) -> Result>, Self::Error>; + + /// Serializes the `self` using the [`Chunkable`] implementation for that type + /// and returns a [`Digest`] of the serialized bytes. + fn hash(&self) -> Result { + let bytes = self.as_bytes()?; + Ok(Digest::hash_into_chunks_if_necessary(&bytes)) + } +} + +impl Chunkable for Vec { + type Error = Infallible; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Borrowed(self)) + } +} + +impl Chunkable for Bytes { + type Error = Infallible; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Borrowed(self.inner_bytes())) + } +} + +impl Chunkable for HashingTrieRaw { + type Error = Infallible; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Borrowed(self.inner().inner().inner_bytes())) + } +} + +impl Chunkable for &Vec { + type Error = bytesrepr::Error; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Owned((*self).to_bytes()?)) + } +} + +impl Chunkable for Vec { + type Error = bytesrepr::Error; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Owned(self.to_bytes()?)) + } +} + +impl Chunkable for Vec<&ExecutionResultV1> { + type Error = bytesrepr::Error; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Owned(self.to_bytes()?)) + } +} + +impl Chunkable for Vec<&ExecutionResultV2> { + type Error = bytesrepr::Error; + + fn as_bytes(&self) -> Result>, Self::Error> { + Ok(Cow::Owned(self.to_bytes()?)) + } +} diff --git a/node/src/types/deploy.rs b/node/src/types/deploy.rs deleted file mode 100644 index fa2701dac2..0000000000 --- a/node/src/types/deploy.rs +++ /dev/null @@ -1,1179 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::{ - array::TryFromSliceError, - collections::HashMap, - error::Error as StdError, - fmt::{self, Debug, Display, Formatter}, -}; - -use datasize::DataSize; -use hex::FromHexError; -use itertools::Itertools; -use num_traits::Zero; -use once_cell::sync::Lazy; -#[cfg(test)] -use rand::{Rng, RngCore}; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use thiserror::Error; -use tracing::{info, warn}; - -use casper_execution_engine::{ - core::engine_state::{executable_deploy_item::ExecutableDeployItem, DeployItem}, - shared::motes::Motes, -}; -use casper_types::{ - bytesrepr::{self, FromBytes, ToBytes}, - runtime_args, - system::standard_payment::ARG_AMOUNT, - AsymmetricType, ExecutionResult, PublicKey, RuntimeArgs, SecretKey, Signature, U512, -}; - -use super::{BlockHash, Item, Tag, TimeDiff, Timestamp}; -#[cfg(test)] -use crate::testing::TestRng; -use crate::{ - components::block_proposer::DeployType, - crypto, - crypto::{ - hash::{self, Digest}, - AsymmetricKeyExt, - }, - rpcs::docs::DocExample, - types::chainspec::DeployConfig, - utils::DisplayIter, -}; - -static DEPLOY: Lazy = Lazy::new(|| { - let payment_args = runtime_args! { - "quantity" => 1000 - }; - let payment = ExecutableDeployItem::StoredContractByName { - name: String::from("casper-example"), - entry_point: String::from("example-entry-point"), - args: payment_args, - }; - let session_args = runtime_args! { - "amount" => 1000 - }; - let session = ExecutableDeployItem::Transfer { args: session_args }; - let serialized_body = serialize_body(&payment, &session); - let body_hash = hash::hash(&serialized_body); - - let secret_key = SecretKey::doc_example(); - let header = DeployHeader { - account: PublicKey::from(secret_key), - timestamp: *Timestamp::doc_example(), - ttl: TimeDiff::from(3_600_000), - gas_price: 1, - body_hash, - dependencies: vec![DeployHash::new(Digest::from([1u8; Digest::LENGTH]))], - chain_name: String::from("casper-example"), - }; - let serialized_header = serialize_header(&header); - let hash = DeployHash::new(hash::hash(&serialized_header)); - - let signature = Signature::from_hex( - "012dbf03817a51794a8e19e0724884075e6d1fbec326b766ecfa6658b41f81290da85e23b24e88b1c8d976\ - 1185c961daee1adab0649912a6477bcd2e69bd91bd08" - .as_bytes(), - ) - .unwrap(); - let approval = Approval { - signer: PublicKey::from(secret_key), - signature, - }; - - Deploy { - hash, - header, - payment, - session, - approvals: vec![approval], - is_valid: None, - } -}); - -/// A representation of the way in which a deploy failed validation checks. -#[derive(Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Error)] -pub enum DeployValidationFailure { - /// Invalid chain name. - #[error("invalid chain name: expected {expected}, got {got}")] - InvalidChainName { - /// The expected chain name. - expected: String, - /// The received chain name. - got: String, - }, - - /// Too many dependencies. - #[error("{got} dependencies exceeds limit of {max_dependencies}")] - ExcessiveDependencies { - /// The dependencies limit. - max_dependencies: u8, - /// The actual number of dependencies provided. - got: usize, - }, - - /// Deploy is too large. - #[error("{deploy_size} deploy size exceeds block size limit of {max_block_size}")] - ExcessiveSize { - /// The block size limit. - max_block_size: u32, - /// The size of the deploy provided. - deploy_size: usize, - }, - - /// Excessive time-to-live. - #[error("time-to-live of {got} exceeds limit of {max_ttl}")] - ExcessiveTimeToLive { - /// The time-to-live limit. - max_ttl: TimeDiff, - /// The received time-to-live. - got: TimeDiff, - }, - - /// The provided body hash does not match the actual hash of the body. - #[error("the provided body hash does not match the actual hash of the body")] - InvalidBodyHash, - - /// The provided deploy hash does not match the actual hash of the deploy. - #[error("the provided hash does not match the actual hash of the deploy")] - InvalidDeployHash, - - /// Invalid approval. - #[error("the approval at index {index} is invalid: {error_msg}")] - InvalidApproval { - /// The index of the approval at fault. - index: usize, - /// The approval validation error. - error_msg: String, - }, - - /// Excessive length of deploy's session args. - #[error("serialized session code runtime args of {got} exceeds limit of {max_length}")] - ExcessiveSessionArgsLength { - /// The byte size limit of session arguments. - max_length: usize, - /// The received length of session arguments. - got: usize, - }, - - /// Excessive length of deploy's payment args. - #[error("serialized payment code runtime args of {got} exceeds limit of {max_length}")] - ExcessivePaymentArgsLength { - /// The byte size limit of payment arguments. - max_length: usize, - /// The received length of payment arguments. - got: usize, - }, - - /// Missing transfer amount. - #[error("missing transfer amount")] - MissingTransferAmount, - - /// Invalid transfer amount. - #[error("invalid transfer amount")] - InvalidTransferAmount, - - /// Insufficient transfer amount. - #[error("insufficient transfer amount; minimum: {minimum} attempted: {attempted}")] - InsufficientTransferAmount { - /// The minimum transfer amount. - minimum: U512, - /// The attempted transfer amount. - attempted: U512, - }, -} - -/// Errors other than validation failures relating to `Deploy`s. -#[derive(Debug, Error)] -pub enum Error { - /// Error while encoding to JSON. - #[error("encoding to JSON: {0}")] - EncodeToJson(#[from] serde_json::Error), - - /// Error while decoding from JSON. - #[error("decoding from JSON: {0}")] - DecodeFromJson(Box), - - /// Failed to get "amount" from `payment()`'s runtime args. - #[error("invalid payment: missing \"amount\" arg")] - InvalidPayment, -} - -impl From for Error { - fn from(error: FromHexError) -> Self { - Error::DecodeFromJson(Box::new(error)) - } -} - -impl From for Error { - fn from(error: TryFromSliceError) -> Self { - Error::DecodeFromJson(Box::new(error)) - } -} - -/// The cryptographic hash of a [`Deploy`](struct.Deploy.html). -#[derive( - Copy, - Clone, - DataSize, - Ord, - PartialOrd, - Eq, - PartialEq, - Hash, - Serialize, - Deserialize, - Debug, - Default, - JsonSchema, -)] -#[serde(deny_unknown_fields)] -#[schemars(with = "String", description = "Hex-encoded deploy hash.")] -pub struct DeployHash(#[schemars(skip)] Digest); - -impl DeployHash { - /// Constructs a new `DeployHash`. - pub fn new(hash: Digest) -> Self { - DeployHash(hash) - } - - /// Returns the wrapped inner hash. - pub fn inner(&self) -> &Digest { - &self.0 - } - - /// Creates a random deploy hash. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - let hash = Digest::random(rng); - DeployHash(hash) - } -} - -impl Display for DeployHash { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "deploy-hash({})", self.0,) - } -} - -impl From for DeployHash { - fn from(digest: Digest) -> Self { - Self(digest) - } -} - -impl AsRef<[u8]> for DeployHash { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl ToBytes for DeployHash { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for DeployHash { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - Digest::from_bytes(bytes).map(|(inner, remainder)| (DeployHash(inner), remainder)) - } -} - -/// The header portion of a [`Deploy`](struct.Deploy.html). -#[derive( - Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, JsonSchema, -)] -#[serde(deny_unknown_fields)] -pub struct DeployHeader { - account: PublicKey, - timestamp: Timestamp, - ttl: TimeDiff, - gas_price: u64, - body_hash: Digest, - dependencies: Vec, - chain_name: String, -} - -impl DeployHeader { - /// The account within which the deploy will be run. - pub fn account(&self) -> &PublicKey { - &self.account - } - - /// When the deploy was created. - pub fn timestamp(&self) -> Timestamp { - self.timestamp - } - - /// How long the deploy will stay valid. - pub fn ttl(&self) -> TimeDiff { - self.ttl - } - - /// Has this deploy expired? - pub fn expired(&self, current_instant: Timestamp) -> bool { - let lifespan = self.timestamp + self.ttl; - lifespan < current_instant - } - - /// Price per gas unit for this deploy. - pub fn gas_price(&self) -> u64 { - self.gas_price - } - - /// Hash of the Wasm code. - pub fn body_hash(&self) -> &Digest { - &self.body_hash - } - - /// Other deploys that have to be run before this one. - pub fn dependencies(&self) -> &Vec { - &self.dependencies - } - - /// Which chain the deploy is supposed to be run on. - pub fn chain_name(&self) -> &str { - &self.chain_name - } - - /// Determine if this deploy header has valid values based on a `DeployConfig` and timestamp. - pub fn is_valid(&self, deploy_config: &DeployConfig, current_timestamp: Timestamp) -> bool { - let ttl_valid = self.ttl() <= deploy_config.max_ttl; - let timestamp_valid = self.timestamp() <= current_timestamp; - let not_expired = !self.expired(current_timestamp); - let num_deps_valid = self.dependencies().len() <= deploy_config.max_dependencies as usize; - ttl_valid && timestamp_valid && not_expired && num_deps_valid - } -} - -impl DeployHeader { - /// Returns the timestamp of when the deploy expires, i.e. `self.timestamp + self.ttl`. - pub fn expires(&self) -> Timestamp { - self.timestamp + self.ttl - } -} - -impl ToBytes for DeployHeader { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.account.to_bytes()?); - buffer.extend(self.timestamp.to_bytes()?); - buffer.extend(self.ttl.to_bytes()?); - buffer.extend(self.gas_price.to_bytes()?); - buffer.extend(self.body_hash.to_bytes()?); - buffer.extend(self.dependencies.to_bytes()?); - buffer.extend(self.chain_name.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.account.serialized_length() - + self.timestamp.serialized_length() - + self.ttl.serialized_length() - + self.gas_price.serialized_length() - + self.body_hash.serialized_length() - + self.dependencies.serialized_length() - + self.chain_name.serialized_length() - } -} - -impl FromBytes for DeployHeader { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (account, remainder) = PublicKey::from_bytes(bytes)?; - let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; - let (ttl, remainder) = TimeDiff::from_bytes(remainder)?; - let (gas_price, remainder) = u64::from_bytes(remainder)?; - let (body_hash, remainder) = Digest::from_bytes(remainder)?; - let (dependencies, remainder) = Vec::::from_bytes(remainder)?; - let (chain_name, remainder) = String::from_bytes(remainder)?; - let deploy_header = DeployHeader { - account, - timestamp, - ttl, - gas_price, - body_hash, - dependencies, - chain_name, - }; - Ok((deploy_header, remainder)) - } -} - -impl Display for DeployHeader { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!( - formatter, - "deploy-header[account: {}, timestamp: {}, ttl: {}, gas_price: {}, body_hash: {}, dependencies: [{}], chain_name: {}]", - self.account, - self.timestamp, - self.ttl, - self.gas_price, - self.body_hash, - DisplayIter::new(self.dependencies.iter()), - self.chain_name, - ) - } -} - -/// A struct containing a signature and the public key of the signer. -#[derive( - Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, JsonSchema, -)] -#[serde(deny_unknown_fields)] -pub struct Approval { - signer: PublicKey, - signature: Signature, -} - -impl Approval { - /// Returns the public key of the approval's signer. - pub fn signer(&self) -> &PublicKey { - &self.signer - } - - /// Returns the approval signature. - pub fn signature(&self) -> &Signature { - &self.signature - } -} - -impl Display for Approval { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!(formatter, "approval({})", self.signer) - } -} - -impl ToBytes for Approval { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.signer.to_bytes()?); - buffer.extend(self.signature.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.signer.serialized_length() + self.signature.serialized_length() - } -} - -impl FromBytes for Approval { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (signer, remainder) = PublicKey::from_bytes(bytes)?; - let (signature, remainder) = Signature::from_bytes(remainder)?; - let approval = Approval { signer, signature }; - Ok((approval, remainder)) - } -} - -/// A deploy; an item containing a smart contract along with the requester's signature(s). -#[derive( - Clone, DataSize, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, JsonSchema, -)] -#[serde(deny_unknown_fields)] -pub struct Deploy { - hash: DeployHash, - header: DeployHeader, - payment: ExecutableDeployItem, - session: ExecutableDeployItem, - approvals: Vec, - #[serde(skip)] - is_valid: Option>, -} - -impl Deploy { - /// Constructs a new signed `Deploy`. - #[allow(clippy::too_many_arguments)] - pub fn new( - timestamp: Timestamp, - ttl: TimeDiff, - gas_price: u64, - dependencies: Vec, - chain_name: String, - payment: ExecutableDeployItem, - session: ExecutableDeployItem, - secret_key: &SecretKey, - ) -> Deploy { - let serialized_body = serialize_body(&payment, &session); - let body_hash = hash::hash(&serialized_body); - - let account = PublicKey::from(secret_key); - // Remove duplicates. - let dependencies = dependencies.into_iter().unique().collect(); - let header = DeployHeader { - account, - timestamp, - ttl, - gas_price, - body_hash, - dependencies, - chain_name, - }; - let serialized_header = serialize_header(&header); - let hash = DeployHash::new(hash::hash(&serialized_header)); - - let mut deploy = Deploy { - hash, - header, - payment, - session, - approvals: vec![], - is_valid: None, - }; - - deploy.sign(secret_key); - deploy - } - - /// Adds a signature of this deploy's hash to its approvals. - pub fn sign(&mut self, secret_key: &SecretKey) { - let signer = PublicKey::from(secret_key); - let signature = crypto::sign(&self.hash, secret_key, &signer); - let approval = Approval { signer, signature }; - self.approvals.push(approval); - } - - /// Returns the `DeployHash` identifying this `Deploy`. - pub fn id(&self) -> &DeployHash { - &self.hash - } - - /// Returns a reference to the `DeployHeader` of this `Deploy`. - pub fn header(&self) -> &DeployHeader { - &self.header - } - - /// Returns the `DeployHeader` of this `Deploy`. - pub fn take_header(self) -> DeployHeader { - self.header - } - - /// Returns the `ExecutableDeployItem` for payment code. - pub fn payment(&self) -> &ExecutableDeployItem { - &self.payment - } - - /// Returns the `ExecutableDeployItem` for session code. - pub fn session(&self) -> &ExecutableDeployItem { - &self.session - } - - /// Returns the `Approval`s for this deploy. - pub fn approvals(&self) -> &[Approval] { - &self.approvals - } - - /// Returns the `DeployType`. - pub fn deploy_type(&self) -> Result { - let header = self.header().clone(); - let size = self.serialized_length(); - if self.session().is_transfer() { - // TODO: we need a non-zero value constant for wasm-less transfer cost. - let payment_amount = Motes::zero(); - Ok(DeployType::Transfer { - header, - payment_amount, - size, - }) - } else { - let payment_item = self.payment().clone(); - let payment_amount = { - // In the happy path for a payment we expect: - // - args to exist - // - contain "amount" - // - be a valid U512 value. - let value = payment_item - .args() - .get(ARG_AMOUNT) - .ok_or(Error::InvalidPayment)?; - let value = value - .clone() - .into_t::() - .map_err(|_| Error::InvalidPayment)?; - Motes::new(value) - }; - Ok(DeployType::Other { - header, - payment_amount, - size, - }) - } - } - - /// Returns true if and only if: - /// * the deploy hash is correct (should be the hash of the header), and - /// * the body hash is correct (should be the hash of the body), and - /// * all approvals are valid signatures of the deploy hash - pub fn is_valid(&mut self) -> Result<(), DeployValidationFailure> { - match self.is_valid.as_ref() { - None => { - let validity = validate_deploy(self); - self.is_valid = Some(validity.clone()); - validity - } - Some(validity) => validity.clone(), - } - } - - /// Returns true if and only if: - /// * the chain_name is correct, - /// * the configured parameters are complied with, - /// * the deploy is valid - /// - /// Note: if everything else checks out, calls the computationally expensive `is_valid` method. - pub fn is_acceptable( - &mut self, - chain_name: &str, - config: &DeployConfig, - ) -> Result<(), DeployValidationFailure> { - let deploy_size = self.serialized_length(); - if deploy_size > config.max_block_size as usize { - return Err(DeployValidationFailure::ExcessiveSize { - max_block_size: config.max_block_size, - deploy_size, - }); - } - - let header = self.header(); - if header.chain_name() != chain_name { - info!( - deploy_hash = %self.id(), - deploy_header = %header, - chain_name = %header.chain_name(), - "invalid chain identifier" - ); - return Err(DeployValidationFailure::InvalidChainName { - expected: chain_name.to_string(), - got: header.chain_name().to_string(), - }); - } - - if header.dependencies().len() > config.max_dependencies as usize { - info!( - deploy_hash = %self.id(), - deploy_header = %header, - max_dependencies = %config.max_dependencies, - "deploy dependency ceiling exceeded" - ); - return Err(DeployValidationFailure::ExcessiveDependencies { - max_dependencies: config.max_dependencies, - got: header.dependencies().len(), - }); - } - - if header.ttl() > config.max_ttl { - info!( - deploy_hash = %self.id(), - deploy_header = %header, - max_ttl = %config.max_ttl, - "deploy ttl excessive" - ); - return Err(DeployValidationFailure::ExcessiveTimeToLive { - max_ttl: config.max_ttl, - got: header.ttl(), - }); - } - - let payment_args_length = self.payment().args().serialized_length(); - if payment_args_length > config.payment_args_max_length as usize { - info!( - payment_args_length, - payment_args_max_length = config.payment_args_max_length, - "payment args excessive" - ); - return Err(DeployValidationFailure::ExcessivePaymentArgsLength { - max_length: config.payment_args_max_length as usize, - got: payment_args_length, - }); - } - - let session_args_length = self.session().args().serialized_length(); - if session_args_length > config.session_args_max_length as usize { - info!( - session_args_length, - session_args_max_length = config.session_args_max_length, - "session args excessive" - ); - return Err(DeployValidationFailure::ExcessiveSessionArgsLength { - max_length: config.session_args_max_length as usize, - got: session_args_length, - }); - } - - if self.session().is_transfer() { - let item = self.session().clone(); - let attempted = item - .args() - .get(ARG_AMOUNT) - .ok_or(DeployValidationFailure::MissingTransferAmount)? - .clone() - .into_t::() - .map_err(|_| DeployValidationFailure::InvalidTransferAmount)?; - let minimum = U512::from(config.native_transfer_minimum_motes); - if attempted < minimum { - return Err(DeployValidationFailure::InsufficientTransferAmount { - minimum, - attempted, - }); - } - } - - self.is_valid() - } - - /// Generates a random instance using a `TestRng`. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - let timestamp = Timestamp::random(rng); - let ttl = TimeDiff::from(rng.gen_range(60_000..3_600_000)); - let gas_price = rng.gen_range(1..100); - - let dependencies = vec![ - DeployHash::new(hash::hash(rng.next_u64().to_le_bytes())), - DeployHash::new(hash::hash(rng.next_u64().to_le_bytes())), - DeployHash::new(hash::hash(rng.next_u64().to_le_bytes())), - ]; - let chain_name = String::from("casper-example"); - - let payment = rng.gen(); - let session = rng.gen(); - - let secret_key = SecretKey::random(rng); - - Deploy::new( - timestamp, - ttl, - gas_price, - dependencies, - chain_name, - payment, - session, - &secret_key, - ) - } -} - -impl DocExample for Deploy { - fn doc_example() -> &'static Self { - &*DEPLOY - } -} - -fn serialize_header(header: &DeployHeader) -> Vec { - header - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize deploy header: {}", error)) -} - -fn serialize_body(payment: &ExecutableDeployItem, session: &ExecutableDeployItem) -> Vec { - let mut buffer = payment - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize payment code: {}", error)); - buffer.extend( - session - .to_bytes() - .unwrap_or_else(|error| panic!("should serialize session code: {}", error)), - ); - buffer -} - -// Computationally expensive validity check for a given deploy instance, including -// asymmetric_key signing verification. -fn validate_deploy(deploy: &Deploy) -> Result<(), DeployValidationFailure> { - let serialized_body = serialize_body(&deploy.payment, &deploy.session); - let body_hash = hash::hash(&serialized_body); - if body_hash != deploy.header.body_hash { - warn!(?deploy, ?body_hash, "invalid deploy body hash"); - return Err(DeployValidationFailure::InvalidBodyHash); - } - - let serialized_header = serialize_header(&deploy.header); - let hash = DeployHash::new(hash::hash(&serialized_header)); - if hash != deploy.hash { - warn!(?deploy, ?hash, "invalid deploy hash"); - return Err(DeployValidationFailure::InvalidDeployHash); - } - - // We don't need to check for an empty set here. EE checks that the correct number and weight of - // signatures are provided when executing the deploy, so all we need to do here is check that - // any provided signatures are valid. - for (index, approval) in deploy.approvals.iter().enumerate() { - if let Err(error) = crypto::verify(&deploy.hash, &approval.signature, &approval.signer) { - warn!(?deploy, "failed to verify approval {}: {}", index, error); - return Err(DeployValidationFailure::InvalidApproval { - index, - error_msg: error.to_string(), - }); - } - } - - Ok(()) -} - -impl Item for Deploy { - type Id = DeployHash; - - const TAG: Tag = Tag::Deploy; - const ID_IS_COMPLETE_ITEM: bool = false; - - fn id(&self) -> Self::Id { - *self.id() - } -} - -impl Display for Deploy { - fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { - write!( - formatter, - "deploy[{}, {}, payment_code: {}, session_code: {}, approvals: {}]", - self.hash, - self.header, - self.payment, - self.session, - DisplayIter::new(self.approvals.iter()) - ) - } -} - -impl From for DeployItem { - fn from(deploy: Deploy) -> Self { - let address = deploy.header().account().to_account_hash(); - let authorization_keys = deploy - .approvals() - .iter() - .map(|approval| approval.signer().to_account_hash()) - .collect(); - - DeployItem::new( - address, - deploy.session().clone(), - deploy.payment().clone(), - deploy.header().gas_price(), - authorization_keys, - casper_types::DeployHash::new(deploy.id().inner().to_array()), - ) - } -} - -/// The deploy mutable metadata. -/// -/// Currently a stop-gap measure to associate an immutable deploy with additional metadata. Holds -/// execution results. -#[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq)] -pub struct DeployMetadata { - /// The block hashes of blocks containing the related deploy, along with the results of - /// executing the related deploy in the context of one or more blocks. - pub execution_results: HashMap, -} - -impl ToBytes for Deploy { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.header.to_bytes()?); - buffer.extend(self.hash.to_bytes()?); - buffer.extend(self.payment.to_bytes()?); - buffer.extend(self.session.to_bytes()?); - buffer.extend(self.approvals.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.header.serialized_length() - + self.hash.serialized_length() - + self.payment.serialized_length() - + self.session.serialized_length() - + self.approvals.serialized_length() - } -} - -impl FromBytes for Deploy { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (header, remainder) = DeployHeader::from_bytes(bytes)?; - let (hash, remainder) = DeployHash::from_bytes(remainder)?; - let (payment, remainder) = ExecutableDeployItem::from_bytes(remainder)?; - let (session, remainder) = ExecutableDeployItem::from_bytes(remainder)?; - let (approvals, remainder) = Vec::::from_bytes(remainder)?; - let maybe_valid_deploy = Deploy { - header, - hash, - payment, - session, - approvals, - is_valid: None, - }; - Ok((maybe_valid_deploy, remainder)) - } -} - -#[cfg(test)] -mod tests { - use std::{iter, time::Duration}; - - use casper_execution_engine::core::engine_state::MAX_PAYMENT_AMOUNT; - use casper_types::{bytesrepr::Bytes, CLValue}; - - use super::*; - use crate::crypto::AsymmetricKeyExt; - - #[test] - fn json_roundtrip() { - let mut rng = crate::new_rng(); - let deploy = Deploy::random(&mut rng); - let json_string = serde_json::to_string_pretty(&deploy).unwrap(); - let decoded = serde_json::from_str(&json_string).unwrap(); - assert_eq!(deploy, decoded); - } - - #[test] - fn bincode_roundtrip() { - let mut rng = crate::new_rng(); - let deploy = Deploy::random(&mut rng); - let serialized = bincode::serialize(&deploy).unwrap(); - let deserialized = bincode::deserialize(&serialized).unwrap(); - assert_eq!(deploy, deserialized); - } - - #[test] - fn bytesrepr_roundtrip() { - let mut rng = crate::new_rng(); - let hash = DeployHash(Digest::random(&mut rng)); - bytesrepr::test_serialization_roundtrip(&hash); - - let deploy = Deploy::random(&mut rng); - bytesrepr::test_serialization_roundtrip(deploy.header()); - bytesrepr::test_serialization_roundtrip(&deploy); - } - - fn create_deploy( - rng: &mut TestRng, - ttl: TimeDiff, - dependency_count: usize, - chain_name: &str, - ) -> Deploy { - let secret_key = SecretKey::random(rng); - let dependencies = iter::repeat_with(|| DeployHash::random(rng)) - .take(dependency_count) - .collect(); - let transfer_args = { - let mut transfer_args = RuntimeArgs::new(); - let value = - CLValue::from_t(U512::from(MAX_PAYMENT_AMOUNT)).expect("should create CLValue"); - transfer_args.insert_cl_value(ARG_AMOUNT, value); - transfer_args - }; - Deploy::new( - Timestamp::now(), - ttl, - 1, - dependencies, - chain_name.to_string(), - ExecutableDeployItem::ModuleBytes { - module_bytes: Bytes::new(), - args: RuntimeArgs::new(), - }, - ExecutableDeployItem::Transfer { - args: transfer_args, - }, - &secret_key, - ) - } - - #[test] - fn is_valid() { - let mut rng = crate::new_rng(); - let mut deploy = create_deploy(&mut rng, DeployConfig::default().max_ttl, 0, "net-1"); - assert_eq!(deploy.is_valid, None, "is valid should initially be None"); - deploy.is_valid().expect("should be valid"); - assert_eq!(deploy.is_valid, Some(Ok(())), "is valid should be true"); - } - - fn check_is_not_valid(mut invalid_deploy: Deploy, expected_error: DeployValidationFailure) { - assert!( - invalid_deploy.is_valid.is_none(), - "is valid should initially be None" - ); - let actual_error = invalid_deploy.is_valid().unwrap_err(); - - // Ignore the `error_msg` field of `InvalidApproval` when comparing to expected error, as - // this makes the test too fragile. Otherwise expect the actual error should exactly match - // the expected error. - match expected_error { - DeployValidationFailure::InvalidApproval { - index: expected_index, - .. - } => match actual_error { - DeployValidationFailure::InvalidApproval { - index: actual_index, - .. - } => { - assert_eq!(actual_index, expected_index); - } - _ => panic!("expected {}, got: {}", expected_error, actual_error), - }, - _ => { - assert_eq!(actual_error, expected_error,); - } - } - - // The actual error should have been lazily initialized correctly. - assert_eq!( - invalid_deploy.is_valid, - Some(Err(actual_error)), - "is valid should now be Some" - ); - } - - #[test] - fn not_valid_due_to_invalid_body_hash() { - let mut rng = crate::new_rng(); - let mut deploy = create_deploy(&mut rng, DeployConfig::default().max_ttl, 0, "net-1"); - - deploy.session = ExecutableDeployItem::Transfer { - args: runtime_args! { - "amount" => 1 - }, - }; - check_is_not_valid(deploy, DeployValidationFailure::InvalidBodyHash); - } - - #[test] - fn not_valid_due_to_invalid_deploy_hash() { - let mut rng = crate::new_rng(); - let mut deploy = create_deploy(&mut rng, DeployConfig::default().max_ttl, 0, "net-1"); - - deploy.header.gas_price = 2; - check_is_not_valid(deploy, DeployValidationFailure::InvalidDeployHash); - } - - #[test] - fn not_valid_due_to_invalid_approval() { - let mut rng = crate::new_rng(); - let mut deploy = create_deploy(&mut rng, DeployConfig::default().max_ttl, 0, "net-1"); - - let deploy2 = Deploy::random(&mut rng); - - deploy.approvals.extend(deploy2.approvals); - check_is_not_valid( - deploy, - DeployValidationFailure::InvalidApproval { - index: 1, - error_msg: String::new(), // This field is ignored in the check. - }, - ); - } - - #[test] - fn is_acceptable() { - let mut rng = crate::new_rng(); - let chain_name = "net-1"; - let deploy_config = DeployConfig::default(); - - let mut deploy = create_deploy( - &mut rng, - deploy_config.max_ttl, - deploy_config.max_dependencies.into(), - &chain_name, - ); - deploy - .is_acceptable(chain_name, &deploy_config) - .expect("should be acceptable"); - } - - #[test] - fn not_acceptable_due_to_invalid_chain_name() { - let mut rng = crate::new_rng(); - let expected_chain_name = "net-1"; - let wrong_chain_name = "net-2".to_string(); - let deploy_config = DeployConfig::default(); - - let mut deploy = create_deploy( - &mut rng, - deploy_config.max_ttl, - deploy_config.max_dependencies.into(), - &wrong_chain_name, - ); - - let expected_error = DeployValidationFailure::InvalidChainName { - expected: expected_chain_name.to_string(), - got: wrong_chain_name, - }; - - assert_eq!( - deploy.is_acceptable(expected_chain_name, &deploy_config), - Err(expected_error) - ); - assert!( - deploy.is_valid.is_none(), - "deploy should not have run expensive `is_valid` call" - ); - } - - #[test] - fn not_acceptable_due_to_excessive_dependencies() { - let mut rng = crate::new_rng(); - let chain_name = "net-1"; - let deploy_config = DeployConfig::default(); - - let dependency_count = usize::from(deploy_config.max_dependencies + 1); - - let mut deploy = create_deploy( - &mut rng, - deploy_config.max_ttl, - dependency_count, - &chain_name, - ); - - let expected_error = DeployValidationFailure::ExcessiveDependencies { - max_dependencies: deploy_config.max_dependencies, - got: dependency_count, - }; - - assert_eq!( - deploy.is_acceptable(chain_name, &deploy_config), - Err(expected_error) - ); - assert!( - deploy.is_valid.is_none(), - "deploy should not have run expensive `is_valid` call" - ); - } - - #[test] - fn not_acceptable_due_to_excessive_ttl() { - let mut rng = crate::new_rng(); - let chain_name = "net-1"; - let deploy_config = DeployConfig::default(); - - let ttl = deploy_config.max_ttl + TimeDiff::from(Duration::from_secs(1)); - - let mut deploy = create_deploy( - &mut rng, - ttl, - deploy_config.max_dependencies.into(), - &chain_name, - ); - - let expected_error = DeployValidationFailure::ExcessiveTimeToLive { - max_ttl: deploy_config.max_ttl, - got: ttl, - }; - - assert_eq!( - deploy.is_acceptable(chain_name, &deploy_config), - Err(expected_error) - ); - assert!( - deploy.is_valid.is_none(), - "deploy should not have run expensive `is_valid` call" - ); - } -} diff --git a/node/src/types/exit_code.rs b/node/src/types/exit_code.rs index 6b5399abf0..f9c46281fc 100644 --- a/node/src/types/exit_code.rs +++ b/node/src/types/exit_code.rs @@ -5,12 +5,13 @@ use signal_hook::consts::signal::{SIGINT, SIGQUIT, SIGTERM}; /// termination signal. const SIGNAL_OFFSET: u8 = 128; -/// Exit codes which should be used by the casper-node binary, and provided by the initializer -/// reactor to the binary. +/// Exit codes which should be used by the casper-node binary, and provided by the reactor to the +/// binary. /// /// Note that a panic will result in the Rust process producing an exit code of 101. #[derive(Clone, Copy, PartialEq, Eq, Debug, DataSize)] #[repr(u8)] +#[non_exhaustive] pub enum ExitCode { /// The process should exit with success. The launcher should proceed to run the next /// installed version of `casper-node`. @@ -18,9 +19,13 @@ pub enum ExitCode { /// The process should exit with `101`, equivalent to panicking. The launcher should not /// restart the node. Abort = 101, - /// The process should exit with `102`. The launcher should proceed to run the previous - /// installed version of `casper-node`. + /// The process should exit with `102`. It used to be an indication to the launcher + /// that it should proceed to run the previous installed version of `casper-node`. + /// It is no longer used, but we keep it here to avoid it being reassigned to other features. + #[doc(hidden)] DowngradeVersion = 102, + /// The process should exit with `103`. The user requested a node shut down without restart. + CleanExitDontRestart = 103, /// The exit code Rust uses by default when interrupted via an `INT` signal. SigInt = SIGNAL_OFFSET + SIGINT as u8, /// The exit code Rust uses by default when interrupted via a `QUIT` signal. diff --git a/node/src/types/item.rs b/node/src/types/item.rs deleted file mode 100644 index 00cffb0496..0000000000 --- a/node/src/types/item.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::{ - fmt::{Debug, Display}, - hash::Hash, -}; - -use derive_more::Display; -use serde::{de::DeserializeOwned, Serialize}; -use serde_repr::{Deserialize_repr, Serialize_repr}; - -use crate::types::{BlockHash, BlockHeader, BlockHeaderWithMetadata}; -use casper_execution_engine::{ - shared::{newtypes::Blake2bHash, stored_value::StoredValue}, - storage::trie::Trie, -}; -use casper_types::{bytesrepr::ToBytes, Key}; - -/// An identifier for a specific type implementing the `Item` trait. Each different implementing -/// type should have a unique `Tag` variant. -#[derive( - Clone, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize_repr, - Deserialize_repr, - Debug, - Display, -)] -#[repr(u8)] -pub enum Tag { - /// A deploy. - Deploy, - /// A block. - Block, - /// A gossiped public listening address. - GossipedAddress, - /// A block requested by its height in the linear chain. - BlockByHeight, - /// A block header requested by its hash. - BlockHeaderByHash, - /// A block header and its finality signatures requested by its height in the linear chain. - BlockHeaderAndFinalitySignaturesByHeight, -} - -/// A trait which allows an implementing type to be used by the gossiper and fetcher components, and -/// furthermore allows generic network messages to include this type due to the provision of the -/// type-identifying `TAG`. -pub trait Item: Clone + Serialize + DeserializeOwned + Send + Sync + Debug + Display { - /// The type of ID of the item. - type Id: Copy + Eq + Hash + Serialize + DeserializeOwned + Send + Sync + Debug + Display; - /// The tag representing the type of the item. - const TAG: Tag; - /// Whether the item's ID _is_ the complete item or not. - const ID_IS_COMPLETE_ITEM: bool; - - /// The ID of the specific item. - fn id(&self) -> Self::Id; -} - -impl Item for Trie { - type Id = Blake2bHash; - const TAG: Tag = Tag::Deploy; - const ID_IS_COMPLETE_ITEM: bool = false; - - fn id(&self) -> Self::Id { - let node_bytes = self.to_bytes().expect("Could not serialize trie to bytes"); - Blake2bHash::new(&node_bytes) - } -} - -impl Item for BlockHeader { - type Id = BlockHash; - const TAG: Tag = Tag::BlockHeaderByHash; - const ID_IS_COMPLETE_ITEM: bool = false; - - fn id(&self) -> Self::Id { - self.hash() - } -} - -impl Item for BlockHeaderWithMetadata { - type Id = u64; - const TAG: Tag = Tag::BlockHeaderAndFinalitySignaturesByHeight; - const ID_IS_COMPLETE_ITEM: bool = false; - - fn id(&self) -> Self::Id { - self.block_header.height() - } -} diff --git a/node/src/types/json_compatibility.rs b/node/src/types/json_compatibility.rs deleted file mode 100644 index df9a9692b7..0000000000 --- a/node/src/types/json_compatibility.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! Types which are serializable to JSON, which map to types defined outside this module. - -mod account; -mod auction_state; -mod contracts; -mod stored_value; - -pub use account::Account; -pub use auction_state::AuctionState; -use casper_types::{contracts::NamedKeys, NamedKey}; -pub use contracts::{Contract, ContractPackage}; -pub use stored_value::StoredValue; - -/// A helper function to change NamedKeys into a Vec -pub fn vectorize(keys: &NamedKeys) -> Vec { - let named_keys = keys - .iter() - .map(|(name, key)| NamedKey { - name: name.clone(), - key: key.to_formatted_string(), - }) - .collect(); - named_keys -} diff --git a/node/src/types/json_compatibility/account.rs b/node/src/types/json_compatibility/account.rs deleted file mode 100644 index 770a41e793..0000000000 --- a/node/src/types/json_compatibility/account.rs +++ /dev/null @@ -1,59 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use datasize::DataSize; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use crate::types::json_compatibility::vectorize; -use casper_execution_engine::shared::account::Account as ExecutionEngineAccount; -use casper_types::{account::AccountHash, NamedKey, URef}; - -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, DataSize, JsonSchema)] -#[serde(deny_unknown_fields)] -struct AssociatedKey { - account_hash: AccountHash, - weight: u8, -} - -/// Thresholds that have to be met when executing an action of a certain type. -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, DataSize, JsonSchema)] -#[serde(deny_unknown_fields)] -struct ActionThresholds { - deployment: u8, - key_management: u8, -} - -/// Structure representing a user's account, stored in global state. -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, DataSize, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct Account { - account_hash: AccountHash, - #[data_size(skip)] - named_keys: Vec, - #[data_size(skip)] - main_purse: URef, - associated_keys: Vec, - action_thresholds: ActionThresholds, -} - -impl From<&ExecutionEngineAccount> for Account { - fn from(ee_account: &ExecutionEngineAccount) -> Self { - Account { - account_hash: ee_account.account_hash(), - named_keys: vectorize(ee_account.named_keys()), - main_purse: ee_account.main_purse(), - associated_keys: ee_account - .associated_keys() - .map(|(account_hash, weight)| AssociatedKey { - account_hash: *account_hash, - weight: weight.value(), - }) - .collect(), - action_thresholds: ActionThresholds { - deployment: ee_account.action_thresholds().deployment().value(), - key_management: ee_account.action_thresholds().key_management().value(), - }, - } - } -} diff --git a/node/src/types/json_compatibility/auction_state.rs b/node/src/types/json_compatibility/auction_state.rs deleted file mode 100644 index 8c25ff06bb..0000000000 --- a/node/src/types/json_compatibility/auction_state.rs +++ /dev/null @@ -1,216 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::collections::BTreeMap; - -use num_traits::Zero; -use once_cell::sync::Lazy; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use casper_types::{ - system::auction::{Bid, Bids, DelegationRate, Delegator, EraValidators}, - AccessRights, EraId, PublicKey, SecretKey, URef, U512, -}; - -use crate::{crypto::hash::Digest, rpcs::docs::DocExample}; - -static ERA_VALIDATORS: Lazy = Lazy::new(|| { - let public_key_1 = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - - let mut validator_weights = BTreeMap::new(); - validator_weights.insert(public_key_1, U512::from(10)); - - let mut era_validators = BTreeMap::new(); - era_validators.insert(EraId::from(10u64), validator_weights); - - era_validators -}); -static BIDS: Lazy = Lazy::new(|| { - let bonding_purse = URef::new([250; 32], AccessRights::READ_ADD_WRITE); - let staked_amount = U512::from(10); - let release_era: u64 = 42; - - let validator_public_key: PublicKey = - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - let delegator_public_key: PublicKey = - SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - - let delegator = Delegator::unlocked( - delegator_public_key.clone(), - U512::from(10), - bonding_purse, - validator_public_key.clone(), - ); - let mut delegators = BTreeMap::new(); - delegators.insert(delegator_public_key, delegator); - - let bid = Bid::locked( - validator_public_key.clone(), - bonding_purse, - staked_amount, - DelegationRate::zero(), - release_era, - ); - let mut bids = BTreeMap::new(); - bids.insert(validator_public_key, bid); - - bids -}); -static AUCTION_INFO: Lazy = Lazy::new(|| { - let state_root_hash = Digest::from([11; Digest::LENGTH]); - let height: u64 = 10; - let era_validators = Some(EraValidators::doc_example().clone()); - let bids = Some(Bids::doc_example().clone()); - AuctionState::new(state_root_hash, height, era_validators, bids) -}); - -/// A validator's weight. -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct JsonValidatorWeights { - public_key: PublicKey, - weight: U512, -} - -/// The validators for the given era. -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct JsonEraValidators { - era_id: EraId, - validator_weights: Vec, -} - -/// A delegator associated with the given validator. -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct JsonDelegator { - public_key: PublicKey, - staked_amount: U512, - bonding_purse: URef, - delegatee: PublicKey, -} - -/// An entry in a founding validator map representing a bid. -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct JsonBid { - /// The purse that was used for bonding. - bonding_purse: URef, - /// The amount of tokens staked by a validator (not including delegators). - staked_amount: U512, - /// The delegation rate. - delegation_rate: DelegationRate, - /// The delegators. - delegators: Vec, - /// Is this an inactive validator. - inactive: bool, -} - -impl From for JsonBid { - fn from(bid: Bid) -> Self { - let mut json_delegators: Vec = Vec::with_capacity(bid.delegators().len()); - for (public_key, delegator) in bid.delegators().iter() { - json_delegators.push(JsonDelegator { - public_key: public_key.clone(), - staked_amount: *delegator.staked_amount(), - bonding_purse: *delegator.bonding_purse(), - delegatee: delegator.validator_public_key().clone(), - }); - } - JsonBid { - bonding_purse: *bid.bonding_purse(), - staked_amount: *bid.staked_amount(), - delegation_rate: *bid.delegation_rate(), - delegators: json_delegators, - inactive: bid.inactive(), - } - } -} - -/// A Json representation of a single bid. -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct JsonBids { - public_key: PublicKey, - bid: JsonBid, -} - -/// Data structure summarizing auction contract data. -#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct AuctionState { - /// Global state hash. - pub state_root_hash: Digest, - /// Block height. - pub block_height: u64, - /// Era validators. - pub era_validators: Vec, - /// All bids contained within a vector. - bids: Vec, -} - -impl AuctionState { - /// Create new instance of `AuctionState` - pub fn new( - state_root_hash: Digest, - block_height: u64, - era_validators: Option, - bids: Option, - ) -> Self { - let mut json_era_validators: Vec = Vec::new(); - for (era_id, validator_weights) in era_validators.unwrap().iter() { - let mut json_validator_weights: Vec = Vec::new(); - for (public_key, weight) in validator_weights.iter() { - json_validator_weights.push(JsonValidatorWeights { - public_key: public_key.clone(), - weight: *weight, - }); - } - json_era_validators.push(JsonEraValidators { - era_id: *era_id, - validator_weights: json_validator_weights, - }); - } - - let mut json_bids: Vec = Vec::new(); - for (public_key, bid) in bids.unwrap().iter() { - let json_bid = JsonBid::from(bid.clone()); - json_bids.push(JsonBids { - public_key: public_key.clone(), - bid: json_bid, - }); - } - - AuctionState { - state_root_hash, - block_height, - era_validators: json_era_validators, - bids: json_bids, - } - } -} - -impl DocExample for AuctionState { - fn doc_example() -> &'static Self { - &*AUCTION_INFO - } -} - -impl DocExample for EraValidators { - fn doc_example() -> &'static Self { - &*ERA_VALIDATORS - } -} - -impl DocExample for Bids { - fn doc_example() -> &'static Self { - &*BIDS - } -} diff --git a/node/src/types/json_compatibility/contracts.rs b/node/src/types/json_compatibility/contracts.rs deleted file mode 100644 index 1c116d6bad..0000000000 --- a/node/src/types/json_compatibility/contracts.rs +++ /dev/null @@ -1,113 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use datasize::DataSize; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use crate::types::json_compatibility::vectorize; -use casper_types::{ - Contract as DomainContract, ContractHash, ContractPackage as DomainContractPackage, - ContractPackageHash, ContractWasmHash, EntryPoint, NamedKey, ProtocolVersion, URef, -}; - -#[derive( - Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, DataSize, JsonSchema, -)] -pub struct ContractVersion { - protocol_version_major: u32, - contract_version: u32, - contract_hash: ContractHash, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, DataSize, JsonSchema)] -pub struct DisabledVersion { - protocol_version_major: u32, - contract_version: u32, -} - -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, DataSize, JsonSchema)] -pub struct Groups { - group: String, - #[data_size(skip)] - keys: Vec, -} - -/// A contract struct that can be serialized as JSON object. -#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, DataSize, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct Contract { - contract_package_hash: ContractPackageHash, - contract_wasm_hash: ContractWasmHash, - #[data_size(skip)] - named_keys: Vec, - #[data_size(skip)] - entry_points: Vec, - #[data_size(skip)] - #[schemars(with = "String")] - protocol_version: ProtocolVersion, -} - -impl From<&DomainContract> for Contract { - fn from(contract: &DomainContract) -> Self { - let entry_points = contract.entry_points().clone().take_entry_points(); - let named_keys = vectorize(contract.named_keys()); - Contract { - contract_package_hash: contract.contract_package_hash(), - contract_wasm_hash: contract.contract_wasm_hash(), - named_keys, - entry_points, - protocol_version: contract.protocol_version(), - } - } -} - -/// Contract definition, metadata, and security container. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, DataSize, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct ContractPackage { - #[data_size(skip)] - access_key: URef, - versions: Vec, - disabled_versions: Vec, - groups: Vec, -} - -impl From<&DomainContractPackage> for ContractPackage { - fn from(contract_package: &DomainContractPackage) -> Self { - let versions = contract_package - .versions() - .iter() - .map(|(version_key, hash)| ContractVersion { - protocol_version_major: version_key.protocol_version_major(), - contract_version: version_key.contract_version(), - contract_hash: *hash, - }) - .collect(); - - let disabled_versions = contract_package - .disabled_versions() - .iter() - .map(|version| DisabledVersion { - protocol_version_major: version.protocol_version_major(), - contract_version: version.contract_version(), - }) - .collect(); - - let groups = contract_package - .groups() - .iter() - .map(|(group, keys)| Groups { - group: group.clone().value().to_string(), - keys: keys.iter().cloned().collect(), - }) - .collect(); - - ContractPackage { - access_key: contract_package.access_key(), - versions, - disabled_versions, - groups, - } - } -} diff --git a/node/src/types/json_compatibility/stored_value.rs b/node/src/types/json_compatibility/stored_value.rs deleted file mode 100644 index a2e474d99b..0000000000 --- a/node/src/types/json_compatibility/stored_value.rs +++ /dev/null @@ -1,84 +0,0 @@ -//! This file provides types to allow conversion from an EE `StoredValue` into a similar type -//! which can be serialized to a valid JSON representation. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::convert::TryFrom; - -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use casper_execution_engine::shared::stored_value::StoredValue as ExecutionEngineStoredValue; -use casper_types::{ - bytesrepr::{self, ToBytes}, - system::auction::{Bid, EraInfo, SeigniorageRecipients, UnbondingPurse}, - CLValue, DeployInfo, Transfer, -}; - -use super::{Account, Contract, ContractPackage}; - -/// Representation of a value stored in global state. -/// -/// `Account`, `Contract` and `ContractPackage` have their own `json_compatibility` representations -/// (see their docs for further info). -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug, JsonSchema)] -#[serde(deny_unknown_fields)] -pub enum StoredValue { - /// A CasperLabs value. - CLValue(CLValue), - /// An account. - Account(Account), - /// A contract's Wasm - ContractWasm(String), - /// Methods and type signatures supported by a contract. - Contract(Contract), - /// A contract definition, metadata, and security container. - ContractPackage(ContractPackage), - /// A record of a transfer - Transfer(Transfer), - /// A record of a deploy - DeployInfo(DeployInfo), - /// Auction metadata - EraInfo(EraInfo), - /// A bid - Bid(Box), - /// A withdraw - Withdraw(Vec), - /// The seignorage recipients. - EraValidators(SeigniorageRecipients), -} - -impl TryFrom<&ExecutionEngineStoredValue> for StoredValue { - type Error = bytesrepr::Error; - - fn try_from(ee_stored_value: &ExecutionEngineStoredValue) -> Result { - let stored_value = match ee_stored_value { - ExecutionEngineStoredValue::CLValue(cl_value) => StoredValue::CLValue(cl_value.clone()), - ExecutionEngineStoredValue::Account(account) => StoredValue::Account(account.into()), - ExecutionEngineStoredValue::ContractWasm(contract_wasm) => { - StoredValue::ContractWasm(hex::encode(&contract_wasm.to_bytes()?)) - } - ExecutionEngineStoredValue::Contract(contract) => { - StoredValue::Contract(contract.into()) - } - ExecutionEngineStoredValue::ContractPackage(contract_package) => { - StoredValue::ContractPackage(contract_package.into()) - } - ExecutionEngineStoredValue::Transfer(transfer) => StoredValue::Transfer(*transfer), - ExecutionEngineStoredValue::DeployInfo(deploy_info) => { - StoredValue::DeployInfo(deploy_info.clone()) - } - ExecutionEngineStoredValue::EraInfo(era_info) => StoredValue::EraInfo(era_info.clone()), - ExecutionEngineStoredValue::Bid(bid) => StoredValue::Bid(bid.clone()), - ExecutionEngineStoredValue::Withdraw(unbonding_purses) => { - StoredValue::Withdraw(unbonding_purses.clone()) - } - ExecutionEngineStoredValue::EraValidators(recipients) => { - StoredValue::EraValidators(recipients.clone()) - } - }; - - Ok(stored_value) - } -} diff --git a/node/src/types/max_ttl.rs b/node/src/types/max_ttl.rs new file mode 100644 index 0000000000..698fdf50fa --- /dev/null +++ b/node/src/types/max_ttl.rs @@ -0,0 +1,180 @@ +use datasize::DataSize; + +use casper_types::{BlockHeader, TimeDiff, Timestamp}; + +#[derive(DataSize, Debug)] +pub struct MaxTtl(TimeDiff); + +impl MaxTtl { + /// Create instance. + pub fn new(max_ttl: TimeDiff) -> Self { + MaxTtl(max_ttl) + } + + /// Get inner value. + pub fn value(&self) -> TimeDiff { + self.0 + } + + /// If rearview is earlier than (vantage - ttl duration), ttl has elapsed. + pub fn ttl_elapsed(&self, vantage: Timestamp, rearview: Timestamp) -> bool { + rearview < vantage.saturating_sub(self.0) + } + + /// Determine if orphaned block header is older than ttl requires. + pub fn synced_to_ttl( + &self, + latest_switch_block_timestamp: Timestamp, + highest_orphaned_block_header: &BlockHeader, + ) -> bool { + if highest_orphaned_block_header.is_genesis() { + true + } else { + self.ttl_elapsed( + latest_switch_block_timestamp, + highest_orphaned_block_header.timestamp(), + ) + } + } +} + +/// Wrap a TimeDiff as a MaxTtl. +impl From for MaxTtl { + fn from(value: TimeDiff) -> Self { + MaxTtl::new(value) + } +} + +#[cfg(test)] +mod tests { + use casper_types::{testing::TestRng, TestBlockBuilder, TimeDiff, Timestamp}; + + use crate::types::MaxTtl; + + const SUB_MAX_TTL: TimeDiff = TimeDiff::from_millis(1); + const MAX_TTL: TimeDiff = TimeDiff::from_millis(2); + + fn assert_ttl( + higher: Timestamp, + lower: Timestamp, + max_ttl: TimeDiff, + elapsed_expected: bool, + msg: &str, + ) { + let max_ttl: MaxTtl = max_ttl.into(); + let elapsed = max_ttl.ttl_elapsed(higher, lower); + assert_eq!(elapsed, elapsed_expected, "{}", msg); + } + + #[test] + fn should_elapse() { + let higher = Timestamp::now(); + let lower = higher + .saturating_sub(MAX_TTL) + .saturating_sub(TimeDiff::from_millis(1)); + assert_ttl( + higher, + lower, + MAX_TTL, + true, + "1 milli over ttl should have elapsed", + ); + } + + #[test] + fn should_not_elapse() { + let higher = Timestamp::now(); + let lower = higher.saturating_sub(SUB_MAX_TTL); + assert_ttl(higher, lower, MAX_TTL, false, "should not have elapsed"); + } + + #[test] + fn should_not_elapse_with_equal_timestamps() { + let timestamp = Timestamp::now(); + assert_ttl( + timestamp, + timestamp, + MAX_TTL, + false, + "equal timestamps should not be elapsed", + ); + } + + #[test] + fn should_not_elapse_on_cusp() { + let higher = Timestamp::now(); + let lower = higher.saturating_sub(MAX_TTL); + assert_ttl( + higher, + lower, + MAX_TTL, + false, + "should not have elapsed exactly on cusp of ttl", + ); + } + + #[test] + fn should_not_err() { + let higher = Timestamp::now(); + let lower = higher.saturating_sub(SUB_MAX_TTL); + let max_ttl: MaxTtl = MAX_TTL.into(); + let elapsed = max_ttl.ttl_elapsed(lower, higher); + assert!( + !elapsed, + "can't have elapsed because timestamps are chronologically reversed (programmer error)" + ); + } + + fn assert_sync_to_ttl(is_genesis: bool, ttl_synced_expected: bool, msg: &str) { + let max_ttl: MaxTtl = MAX_TTL.into(); + let rng = &mut TestRng::new(); + let (latest_switch_block_timestamp, highest_orphaned_block_header) = if is_genesis { + let block = TestBlockBuilder::new() + .era(0) + .height(0) + .switch_block(true) + .build(rng); + // it does not matter what this value is; if genesis has been reached + // while walking backwards, there are no earlier blocks to get + // thus all sync scenarios have succeeded / are satisfied + let timestamp = Timestamp::random(rng); + (timestamp, block.header().clone()) + } else { + let block = TestBlockBuilder::new() + .era(1) + .height(1) + .switch_block(false) + .build(rng); + // project a sufficiently advanced future timestamp for the test. + let mut timestamp = block.timestamp().saturating_add(max_ttl.value()); + if ttl_synced_expected { + timestamp = timestamp.saturating_add(TimeDiff::from_millis(1)) + } + (timestamp, block.header().clone()) + }; + let synced = max_ttl.synced_to_ttl( + latest_switch_block_timestamp, + &highest_orphaned_block_header.into(), + ); + assert_eq!(synced, ttl_synced_expected, "{}", msg); + } + + #[test] + fn should_handle_genesis_special_case() { + assert_sync_to_ttl( + true, + true, + "genesis should always satisfy sync to ttl requirement", + ); + } + + #[test] + fn should_be_synced_to_ttl() { + assert_sync_to_ttl(false, true, "should be sync'd to ttl"); + } + + #[test] + fn should_not_be_synced_to_ttl() { + assert_sync_to_ttl(false, false, "should not be sync'd to ttl"); + } +} diff --git a/node/src/types/node_config.rs b/node/src/types/node_config.rs index 57342f286f..f0f2081a3d 100644 --- a/node/src/types/node_config.rs +++ b/node/src/types/node_config.rs @@ -1,13 +1,110 @@ use datasize::DataSize; use serde::{Deserialize, Serialize}; -use crate::types::BlockHash; +use casper_types::{BlockHash, TimeDiff}; -/// Node configuration. -#[derive(Default, DataSize, Debug, Deserialize, Serialize)] +const DEFAULT_IDLE_TOLERANCE: &str = "20min"; +const DEFAULT_MAX_ATTEMPTS: usize = 3; +const DEFAULT_CONTROL_LOGIC_DEFAULT_DELAY: &str = "1sec"; +const DEFAULT_SHUTDOWN_FOR_UPGRADE_TIMEOUT: &str = "2min"; +const DEFAULT_UPGRADE_TIMEOUT: &str = "30sec"; + +/// Node sync configuration. +#[derive(DataSize, Debug, Deserialize, Serialize, Clone, Default, Eq, PartialEq)] +#[serde(rename_all = "lowercase")] +pub enum SyncHandling { + /// Attempt to acquire all historical state back to genesis. + Genesis, + /// Only attempt to acquire necessary blocks to satisfy Time to Live requirements. + #[default] + Ttl, + /// Don't attempt to sync historical blocks. + NoSync, + /// Don't attempt to sync historical blocks and shut down node instead of switching to KeepUp + /// after acquiring the first complete block + CompleteBlock, + /// The node operates in isolation - no peers are needed, the node won't wait for peers to + /// switch to KeepUp. + Isolated, +} + +impl SyncHandling { + /// Sync to Genesis? + pub fn is_sync_to_genesis(&self) -> bool { + matches!(self, SyncHandling::Genesis) + } + + /// Sync to Ttl? + pub fn is_sync_to_ttl(&self) -> bool { + matches!(self, SyncHandling::Ttl) + } + + /// Don't Sync? + pub fn is_no_sync(&self) -> bool { + matches!(self, SyncHandling::NoSync) + } + + /// Don't Sync and shut down? + pub fn is_complete_block(&self) -> bool { + matches!(self, SyncHandling::CompleteBlock) + } + + /// Isolated? + pub fn is_isolated(&self) -> bool { + matches!(self, SyncHandling::Isolated) + } +} + +/// Node fast-sync configuration. +#[derive(DataSize, Debug, Deserialize, Serialize, Clone)] // Disallow unknown fields to ensure config files and command-line overrides contain valid keys. #[serde(deny_unknown_fields)] pub struct NodeConfig { /// Hash used as a trust anchor when joining, if any. pub trusted_hash: Option, + + /// Which historical sync option? + /// Genesis: sync all the way back to genesis + /// Ttl: sync the necessary number of historical blocks to satisfy TTL requirement. + /// NoSync: don't attempt to get any historical records; i.e. go forward only. + pub sync_handling: SyncHandling, + + /// Idle time after which the syncing process is considered stalled. + pub idle_tolerance: TimeDiff, + + /// When the syncing process is considered stalled, it'll be retried up to `max_attempts` + /// times. + pub max_attempts: usize, + + /// Default delay for the control events that have no dedicated delay requirements. + pub control_logic_default_delay: TimeDiff, + + /// Flag which forces the node to resync all of the blocks. + pub force_resync: bool, + + /// Shutdown for upgrade state timeout, after which the node will upgrade regardless whether + /// all the conditions are satisfied. + pub shutdown_for_upgrade_timeout: TimeDiff, + + /// Maximum time a node will wait for an upgrade to commit. + pub upgrade_timeout: TimeDiff, + + /// If true, prevents a node from shutting down if it is supposed to be a validator in the era. + pub prevent_validator_shutdown: bool, +} + +impl Default for NodeConfig { + fn default() -> NodeConfig { + NodeConfig { + trusted_hash: None, + sync_handling: SyncHandling::default(), + idle_tolerance: DEFAULT_IDLE_TOLERANCE.parse().unwrap(), + max_attempts: DEFAULT_MAX_ATTEMPTS, + control_logic_default_delay: DEFAULT_CONTROL_LOGIC_DEFAULT_DELAY.parse().unwrap(), + force_resync: false, + shutdown_for_upgrade_timeout: DEFAULT_SHUTDOWN_FOR_UPGRADE_TIMEOUT.parse().unwrap(), + upgrade_timeout: DEFAULT_UPGRADE_TIMEOUT.parse().unwrap(), + prevent_validator_shutdown: false, + } + } } diff --git a/node/src/types/node_id.rs b/node/src/types/node_id.rs index 282b6e47b6..c4b9410078 100644 --- a/node/src/types/node_id.rs +++ b/node/src/types/node_id.rs @@ -1,55 +1,34 @@ -use std::{ - fmt::{self, Debug, Display, Formatter}, - str::FromStr, -}; +use std::fmt::{self, Debug, Display, Formatter}; use datasize::DataSize; use hex_fmt::HexFmt; -use libp2p::PeerId; -#[cfg(test)] -use multihash::Multihash; use once_cell::sync::Lazy; #[cfg(test)] -use rand::{Rng, RngCore}; +use rand::Rng; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(test)] -use crate::testing::TestRng; -use crate::{rpcs::docs::DocExample, tls::KeyFingerprint}; +use casper_types::testing::TestRng; + +use crate::{components::rest_server::DocExample, tls::KeyFingerprint}; /// The network identifier for a node. +/// +/// A node's ID is derived from the fingerprint of its TLS certificate. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, DataSize)] -pub enum NodeId { - Tls(KeyFingerprint), - #[data_size(skip)] - P2p(PeerId), -} +pub struct NodeId(KeyFingerprint); impl NodeId { /// Generates a random instance using a `TestRng`. #[cfg(test)] pub(crate) fn random(rng: &mut TestRng) -> Self { - if rng.gen() { - Self::random_tls(rng) - } else { - Self::random_p2p(rng) - } + Self(rng.gen()) } - /// Generates a random Tls instance using a `TestRng`. - #[cfg(test)] - pub(crate) fn random_tls(rng: &mut TestRng) -> Self { - NodeId::Tls(rng.gen()) - } - - /// Generates a random P2p instance using a `TestRng`. - #[cfg(test)] - pub(crate) fn random_p2p(rng: &mut TestRng) -> Self { - let mut bytes = [0u8; 32]; - rng.fill_bytes(&mut bytes[..]); - let multihash = Multihash::wrap(multihash::Code::Identity.into(), &bytes).unwrap(); - let peer_id = PeerId::from_multihash(multihash).expect("should construct from multihash"); - NodeId::P2p(peer_id) + /// Returns the raw bytes of the underlying hash of the ID. + #[inline] + pub fn hash_bytes(&self) -> &[u8] { + self.0.as_ref() } } @@ -57,166 +36,148 @@ impl NodeId { #[derive(Serialize, Deserialize)] enum NodeIdAsBytes { Tls(KeyFingerprint), - P2p(Vec), } /// Used to serialize and deserialize `NodeID` where the (de)serializer is a human-readable type. #[derive(Serialize, Deserialize)] enum NodeIdAsString { Tls(String), - P2p(String), } impl Serialize for NodeId { fn serialize(&self, serializer: S) -> Result { if serializer.is_human_readable() { - let helper = match self { - NodeId::Tls(key_fingerprint) => { - NodeIdAsString::Tls(hex::encode(key_fingerprint.as_ref())) - } - NodeId::P2p(peer_id) => NodeIdAsString::P2p(peer_id.to_base58()), - }; - return helper.serialize(serializer); + NodeIdAsString::Tls(base16::encode_lower(&self.0)).serialize(serializer) + } else { + NodeIdAsBytes::Tls(self.0).serialize(serializer) } - - let helper = match self { - NodeId::Tls(key_fingerprint) => NodeIdAsBytes::Tls(*key_fingerprint), - NodeId::P2p(peer_id) => NodeIdAsBytes::P2p(peer_id.to_bytes()), - }; - helper.serialize(serializer) } } impl<'de> Deserialize<'de> for NodeId { fn deserialize>(deserializer: D) -> Result { if deserializer.is_human_readable() { - let helper = NodeIdAsString::deserialize(deserializer)?; - match helper { - NodeIdAsString::Tls(hex_value) => { - let bytes = hex::decode(hex_value).map_err(D::Error::custom)?; - if bytes.len() != KeyFingerprint::LENGTH { - return Err(SerdeError::custom("wrong length")); - } - let mut array = [0_u8; KeyFingerprint::LENGTH]; - array.copy_from_slice(bytes.as_slice()); - return Ok(NodeId::Tls(KeyFingerprint::from(array))); - } - NodeIdAsString::P2p(b58_value) => { - let peer_id = PeerId::from_str(&b58_value).map_err(D::Error::custom)?; - return Ok(NodeId::P2p(peer_id)); - } - } - } + let NodeIdAsString::Tls(hex_value) = NodeIdAsString::deserialize(deserializer)?; - let helper = NodeIdAsBytes::deserialize(deserializer)?; - match helper { - NodeIdAsBytes::Tls(key_fingerprint) => Ok(NodeId::Tls(key_fingerprint)), - NodeIdAsBytes::P2p(bytes) => { - let peer_id = - PeerId::from_bytes(&bytes).map_err(|_| D::Error::custom("invalid PeerId"))?; - Ok(NodeId::P2p(peer_id)) + let bytes = base16::decode(hex_value.as_bytes()).map_err(D::Error::custom)?; + if bytes.len() != KeyFingerprint::LENGTH { + return Err(SerdeError::custom("wrong length")); } + let mut array = [0_u8; KeyFingerprint::LENGTH]; + array.copy_from_slice(bytes.as_slice()); + + Ok(NodeId(KeyFingerprint::from(array))) + } else { + let NodeIdAsBytes::Tls(key_fingerprint) = NodeIdAsBytes::deserialize(deserializer)?; + Ok(NodeId(key_fingerprint)) } } } static NODE_ID: Lazy = - Lazy::new(|| NodeId::Tls(KeyFingerprint::from([1u8; KeyFingerprint::LENGTH]))); + Lazy::new(|| NodeId(KeyFingerprint::from([1u8; KeyFingerprint::LENGTH]))); impl DocExample for NodeId { fn doc_example() -> &'static Self { - &*NODE_ID + &NODE_ID } } impl Debug for NodeId { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - NodeId::Tls(key_fingerprint) => write!( - formatter, - "NodeId::Tls({})", - HexFmt(key_fingerprint.as_ref()) - ), - NodeId::P2p(peer_id) => write!(formatter, "PeerId::P2p({})", peer_id.to_base58()), - } + write!(formatter, "NodeId({})", base16::encode_lower(&self.0)) } } impl Display for NodeId { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - match self { - NodeId::Tls(key_fingerprint) => write!( - formatter, - "NodeId::Tls({:10})", - HexFmt(key_fingerprint.as_ref()) - ), - NodeId::P2p(peer_id) => { - let base58_peer_id = peer_id.to_base58(); - write!( - formatter, - "NodeId::P2p({}..{})", - &base58_peer_id[8..12], - &base58_peer_id[(base58_peer_id.len() - 4)..] - ) - } - } + write!(formatter, "tls:{:10}", HexFmt(&self.0)) } } impl From for NodeId { fn from(id: KeyFingerprint) -> Self { - NodeId::Tls(id) - } -} - -impl From for NodeId { - fn from(id: PeerId) -> Self { - NodeId::P2p(id) + NodeId(id) } } #[cfg(test)] impl From<[u8; KeyFingerprint::LENGTH]> for NodeId { fn from(raw_bytes: [u8; KeyFingerprint::LENGTH]) -> Self { - NodeId::Tls(KeyFingerprint::from(raw_bytes)) + NodeId(KeyFingerprint::from(raw_bytes)) } } #[cfg(test)] -mod test { +mod tests { use super::*; + const EXAMPLE_HASH_RAW: [u8; 64] = [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, + 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, + 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, + 0x3c, 0x3d, 0x3e, 0x3f, + ]; + #[test] fn serde_roundtrip_tls() { let mut rng = crate::new_rng(); - let node_id = NodeId::random_tls(&mut rng); + let node_id = NodeId::random(&mut rng); let serialized = bincode::serialize(&node_id).unwrap(); let decoded = bincode::deserialize(&serialized).unwrap(); assert_eq!(node_id, decoded); } #[test] - fn serde_roundtrip_p2p() { - let mut rng = crate::new_rng(); - let node_id = NodeId::random_p2p(&mut rng); + fn bincode_known_specimen() { + let node_id = NodeId::from(EXAMPLE_HASH_RAW); let serialized = bincode::serialize(&node_id).unwrap(); - let decoded = bincode::deserialize(&serialized).unwrap(); - assert_eq!(node_id, decoded); + + // The bincode representation is a 4 byte tag of all zeros, followed by the hash bytes. + let expected: [u8; 68] = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, + 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, + 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + ]; + + assert_eq!(&expected[..], serialized.as_slice()); } #[test] - fn json_roundtrip_tls() { - let mut rng = crate::new_rng(); - let node_id = NodeId::random_tls(&mut rng); + fn json_known_specimen() { + let node_id = NodeId::from(EXAMPLE_HASH_RAW); let json_string = serde_json::to_string_pretty(&node_id).unwrap(); - let decoded = serde_json::from_str(&json_string).unwrap(); - assert_eq!(node_id, decoded); + + let expected = "{\n \"Tls\": \"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f\"\n}"; + assert_eq!(expected, json_string.as_str()); } #[test] - fn json_roundtrip_p2p() { + fn msgpack_default_settings_known_specimen() { + let node_id = NodeId::from(EXAMPLE_HASH_RAW); + + let serialized = rmp_serde::to_vec(&node_id).unwrap(); + + let expected: [u8; 132] = [ + 129, 0, 217, 128, 48, 48, 48, 49, 48, 50, 48, 51, 48, 52, 48, 53, 48, 54, 48, 55, 48, + 56, 48, 57, 48, 97, 48, 98, 48, 99, 48, 100, 48, 101, 48, 102, 49, 48, 49, 49, 49, 50, + 49, 51, 49, 52, 49, 53, 49, 54, 49, 55, 49, 56, 49, 57, 49, 97, 49, 98, 49, 99, 49, + 100, 49, 101, 49, 102, 50, 48, 50, 49, 50, 50, 50, 51, 50, 52, 50, 53, 50, 54, 50, 55, + 50, 56, 50, 57, 50, 97, 50, 98, 50, 99, 50, 100, 50, 101, 50, 102, 51, 48, 51, 49, 51, + 50, 51, 51, 51, 52, 51, 53, 51, 54, 51, 55, 51, 56, 51, 57, 51, 97, 51, 98, 51, 99, 51, + 100, 51, 101, 51, 102, + ]; + + assert_eq!(serialized, expected); + } + + #[test] + fn json_roundtrip_tls() { let mut rng = crate::new_rng(); - let node_id = NodeId::random_p2p(&mut rng); + let node_id = NodeId::random(&mut rng); let json_string = serde_json::to_string_pretty(&node_id).unwrap(); let decoded = serde_json::from_str(&json_string).unwrap(); assert_eq!(node_id, decoded); diff --git a/node/src/types/peers_map.rs b/node/src/types/peers_map.rs deleted file mode 100644 index 89375adeb4..0000000000 --- a/node/src/types/peers_map.rs +++ /dev/null @@ -1,34 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::collections::BTreeMap; - -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use crate::types::NodeId; - -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, JsonSchema)] -#[serde(deny_unknown_fields)] -struct PeerEntry { - node_id: String, - address: String, -} - -/// Map of peer IDs to network addresses. -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, JsonSchema)] -#[serde(deny_unknown_fields)] -pub struct PeersMap(Vec); - -impl From> for PeersMap { - fn from(input: BTreeMap) -> Self { - let ret = input - .into_iter() - .map(|(node_id, address)| PeerEntry { - node_id: node_id.to_string(), - address, - }) - .collect(); - PeersMap(ret) - } -} diff --git a/node/src/types/status_feed.rs b/node/src/types/status_feed.rs index c1b04634da..6c6f154321 100644 --- a/node/src/types/status_feed.rs +++ b/node/src/types/status_feed.rs @@ -1,25 +1,23 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use std::{ collections::BTreeMap, - hash::Hash, net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Duration, }; use once_cell::sync::Lazy; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use casper_types::{EraId, ProtocolVersion, PublicKey}; +use casper_binary_port::ConsensusStatus; +use casper_types::{ + ActivationPoint, AvailableBlockRange, Block, BlockHash, BlockSynchronizerStatus, Digest, EraId, + NextUpgrade, Peers, ProtocolVersion, PublicKey, TimeDiff, Timestamp, +}; use crate::{ - components::{ - chainspec_loader::NextUpgrade, - rpc_server::rpcs::docs::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, - }, - crypto::{hash::Digest, AsymmetricKeyExt}, - types::{ActivationPoint, Block, BlockHash, NodeId, PeersMap, TimeDiff, Timestamp}, + components::rest_server::{DocExample, DOCS_EXAMPLE_PROTOCOL_VERSION}, + reactor::main_reactor::ReactorState, + types::NodeId, }; static CHAINSPEC_INFO: Lazy = Lazy::new(|| { @@ -29,7 +27,6 @@ static CHAINSPEC_INFO: Lazy = Lazy::new(|| { ); ChainspecInfo { name: String::from("casper-example"), - starting_state_root_hash: Digest::from([2u8; Digest::LENGTH]), next_upgrade: Some(next_upgrade), } }); @@ -39,13 +36,20 @@ static GET_STATUS_RESULT: Lazy = Lazy::new(|| { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 54321); let mut peers = BTreeMap::new(); peers.insert(*node_id, socket_addr.to_string()); - let status_feed = StatusFeed:: { - last_added_block: Some(Block::doc_example().clone()), + let status_feed = StatusFeed { + last_added_block: Some(Block::example().clone()), peers, chainspec_info: ChainspecInfo::doc_example().clone(), - our_public_signing_key: Some(PublicKey::doc_example().clone()), - round_length: Some(TimeDiff::from(1 << 16)), + our_public_signing_key: Some(PublicKey::example().clone()), + round_length: Some(TimeDiff::from_millis(1 << 16)), version: crate::VERSION_STRING.as_str(), + node_uptime: Duration::from_secs(13), + reactor_state: ReactorState::Initialize, + last_progress: Timestamp::from(0), + available_block_range: AvailableBlockRange::RANGE_0_0, + block_sync: BlockSynchronizerStatus::example().clone(), + starting_state_root_hash: Digest::default(), + latest_switch_block_hash: Some(BlockHash::default()), }; GetStatusResult::new(status_feed, DOCS_EXAMPLE_PROTOCOL_VERSION) }); @@ -55,28 +59,19 @@ static GET_STATUS_RESULT: Lazy = Lazy::new(|| { pub struct ChainspecInfo { /// Name of the network. name: String, - /// The state root hash with which this session is starting. It will be the result of running - /// `ContractRuntime::commit_genesis()` or `ContractRuntime::upgrade()` or else the state root - /// hash specified in the highest block on startup. - starting_state_root_hash: Digest, next_upgrade: Option, } impl DocExample for ChainspecInfo { fn doc_example() -> &'static Self { - &*CHAINSPEC_INFO + &CHAINSPEC_INFO } } impl ChainspecInfo { - pub(crate) fn new( - chainspec_network_name: String, - starting_state_root_hash: Digest, - next_upgrade: Option, - ) -> Self { + pub(crate) fn new(chainspec_network_name: String, next_upgrade: Option) -> Self { ChainspecInfo { name: chainspec_network_name, - starting_state_root_hash, next_upgrade, } } @@ -84,12 +79,11 @@ impl ChainspecInfo { /// Data feed for client "info_get_status" endpoint. #[derive(Debug, Serialize)] -#[serde(bound = "I: Eq + Hash + Ord + Serialize")] -pub struct StatusFeed { +pub struct StatusFeed { /// The last block added to the chain. pub last_added_block: Option, /// The peer nodes which are connected to this node. - pub peers: BTreeMap, + pub peers: BTreeMap, /// The chainspec info for this node. pub chainspec_info: ChainspecInfo, /// Our public signing key. @@ -98,19 +92,44 @@ pub struct StatusFeed { pub round_length: Option, /// The compiled node version. pub version: &'static str, + /// Time that passed since the node has started. + pub node_uptime: Duration, + /// The current state of node reactor. + pub reactor_state: ReactorState, + /// Timestamp of the last recorded progress in the reactor. + pub last_progress: Timestamp, + /// The available block range in storage. + pub available_block_range: AvailableBlockRange, + /// The status of the block synchronizer builders. + pub block_sync: BlockSynchronizerStatus, + /// The state root hash of the lowest block in the available block range. + pub starting_state_root_hash: Digest, + /// The hash of the latest switch block. + pub latest_switch_block_hash: Option, } -impl StatusFeed { +impl StatusFeed { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( last_added_block: Option, - peers: BTreeMap, + peers: BTreeMap, chainspec_info: ChainspecInfo, - consensus_status: Option<(PublicKey, Option)>, + consensus_status: Option, + node_uptime: Duration, + reactor_state: ReactorState, + last_progress: Timestamp, + available_block_range: AvailableBlockRange, + block_sync: BlockSynchronizerStatus, + starting_state_root_hash: Digest, + latest_switch_block_hash: Option, ) -> Self { - let (our_public_signing_key, round_length) = match consensus_status { - Some((public_key, round_length)) => (Some(public_key), round_length), - None => (None, None), - }; + let (our_public_signing_key, round_length) = + consensus_status.map_or((None, None), |consensus_status| { + ( + Some(consensus_status.validator_public_key().clone()), + consensus_status.round_length(), + ) + }); StatusFeed { last_added_block, peers, @@ -118,12 +137,19 @@ impl StatusFeed { our_public_signing_key, round_length, version: crate::VERSION_STRING.as_str(), + node_uptime, + reactor_state, + last_progress, + available_block_range, + block_sync, + starting_state_root_hash, + latest_switch_block_hash, } } } /// Minimal info of a `Block`. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] #[serde(deny_unknown_fields)] pub struct MinimalBlockInfo { hash: BlockHash, @@ -136,30 +162,37 @@ pub struct MinimalBlockInfo { impl From for MinimalBlockInfo { fn from(block: Block) -> Self { + let proposer = match &block { + Block::V1(v1) => v1.proposer().clone(), + Block::V2(v2) => v2.proposer().clone(), + }; + MinimalBlockInfo { hash: *block.hash(), - timestamp: block.header().timestamp(), - era_id: block.header().era_id(), - height: block.header().height(), - state_root_hash: *block.header().state_root_hash(), - creator: block.body().proposer().clone(), + timestamp: block.timestamp(), + era_id: block.era_id(), + height: block.height(), + state_root_hash: *block.state_root_hash(), + creator: proposer, } } } /// Result for "info_get_status" RPC response. -#[derive(Serialize, Deserialize, Debug, JsonSchema)] +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] #[serde(deny_unknown_fields)] pub struct GetStatusResult { + /// The node ID and network address of each connected peer. + pub peers: Peers, /// The RPC API version. #[schemars(with = "String")] pub api_version: ProtocolVersion, + /// The compiled node version. + pub build_version: String, /// The chainspec name. pub chainspec_name: String, - /// The state root hash used at the start of the current session. - pub starting_state_root_hash: String, - /// The node ID and network address of each connected peer. - pub peers: PeersMap, + /// The state root hash of the lowest block in the available block range. + pub starting_state_root_hash: Digest, /// The minimal info of the last block from the linear chain. pub last_added_block_info: Option, /// Our public signing key. @@ -168,31 +201,50 @@ pub struct GetStatusResult { pub round_length: Option, /// Information about the next scheduled upgrade. pub next_upgrade: Option, - /// The compiled node version. - pub build_version: String, + /// Time that passed since the node has started. + pub uptime: TimeDiff, + /// The current state of node reactor. + pub reactor_state: ReactorState, + /// Timestamp of the last recorded progress in the reactor. + pub last_progress: Timestamp, + /// The available block range in storage. + pub available_block_range: AvailableBlockRange, + /// The status of the block synchronizer builders. + pub block_sync: BlockSynchronizerStatus, + /// The hash of the latest switch block. + pub latest_switch_block_hash: Option, } impl GetStatusResult { - pub(crate) fn new(status_feed: StatusFeed, api_version: ProtocolVersion) -> Self { + #[allow(deprecated)] + pub(crate) fn new(status_feed: StatusFeed, api_version: ProtocolVersion) -> Self { GetStatusResult { + peers: Peers::from(status_feed.peers), api_version, chainspec_name: status_feed.chainspec_info.name, - starting_state_root_hash: status_feed - .chainspec_info - .starting_state_root_hash - .to_string(), - peers: PeersMap::from(status_feed.peers), + starting_state_root_hash: status_feed.starting_state_root_hash, last_added_block_info: status_feed.last_added_block.map(Into::into), our_public_signing_key: status_feed.our_public_signing_key, round_length: status_feed.round_length, next_upgrade: status_feed.chainspec_info.next_upgrade, + uptime: status_feed.node_uptime.into(), + reactor_state: status_feed.reactor_state, + last_progress: status_feed.last_progress, + available_block_range: status_feed.available_block_range, + block_sync: status_feed.block_sync, + latest_switch_block_hash: status_feed.latest_switch_block_hash, + #[cfg(not(test))] build_version: crate::VERSION_STRING.clone(), + + // Prevent these values from changing between test sessions + #[cfg(test)] + build_version: String::from("1.0.0-xxxxxxxxx@DEBUG"), } } } impl DocExample for GetStatusResult { fn doc_example() -> &'static Self { - &*GET_STATUS_RESULT + &GET_STATUS_RESULT } } diff --git a/node/src/types/sync_leap.rs b/node/src/types/sync_leap.rs new file mode 100644 index 0000000000..2834b641cf --- /dev/null +++ b/node/src/types/sync_leap.rs @@ -0,0 +1,2451 @@ +use std::{ + collections::{BTreeMap, HashMap}, + fmt::{self, Display, Formatter}, + iter, +}; + +use datasize::DataSize; +use itertools::Itertools; +use num_rational::Ratio; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::error; + +use casper_types::{ + crypto, BlockHash, BlockHeader, BlockHeaderWithSignatures, + BlockHeaderWithSignaturesValidationError, BlockSignatures, Digest, EraId, ProtocolConfig, +}; + +use crate::{ + components::fetcher::{FetchItem, Tag}, + types::EraValidatorWeights, + utils::{self, BlockSignatureError}, +}; + +use super::sync_leap_validation_metadata::SyncLeapValidationMetaData; + +#[derive(Error, Debug)] +pub(crate) enum SyncLeapValidationError { + #[error("No ancestors of the trusted block provided.")] + MissingTrustedAncestors, + #[error("The SyncLeap does not contain proof that all its headers are on the right chain.")] + IncompleteProof, + #[error(transparent)] + HeadersNotSufficientlySigned(BlockSignatureError), + #[error("The block signatures are not cryptographically valid: {0}")] + Crypto(crypto::Error), + #[error(transparent)] + BlockHeaderWithSignatures(BlockHeaderWithSignaturesValidationError), + #[error("Too many switch blocks: leaping across that many eras is not allowed.")] + TooManySwitchBlocks, + #[error("Trusted ancestor headers must be in reverse chronological order.")] + TrustedAncestorsNotSorted, + #[error("Last trusted ancestor is not a switch block.")] + MissingAncestorSwitchBlock, + #[error( + "Only the last trusted ancestor is allowed to be a switch block or the genesis block." + )] + UnexpectedAncestorSwitchBlock, + #[error("Signed block headers present despite trusted_ancestor_only flag.")] + UnexpectedBlockHeadersWithSignatures, +} + +/// Identifier for a SyncLeap. +#[derive(Debug, Serialize, Deserialize, Copy, Clone, Hash, PartialEq, Eq, DataSize)] +pub(crate) struct SyncLeapIdentifier { + /// The block hash of the initial trusted block. + block_hash: BlockHash, + /// If true, block_header_with_signaturess are not required. + trusted_ancestor_only: bool, +} + +impl SyncLeapIdentifier { + pub(crate) fn sync_to_tip(block_hash: BlockHash) -> Self { + SyncLeapIdentifier { + block_hash, + trusted_ancestor_only: false, + } + } + + pub(crate) fn sync_to_historical(block_hash: BlockHash) -> Self { + SyncLeapIdentifier { + block_hash, + trusted_ancestor_only: true, + } + } + + pub(crate) fn block_hash(&self) -> BlockHash { + self.block_hash + } + + pub(crate) fn trusted_ancestor_only(&self) -> bool { + self.trusted_ancestor_only + } +} + +impl Display for SyncLeapIdentifier { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "{} trusted_ancestor_only: {}", + self.block_hash, self.trusted_ancestor_only + ) + } +} + +// Additional data for syncing blocks immediately after upgrades +#[derive(Debug, Clone, Copy)] +pub(crate) struct GlobalStatesMetadata { + // Hash, era ID, global state and protocol version of the block after upgrade + pub(crate) after_hash: BlockHash, + pub(crate) after_era_id: EraId, + pub(crate) after_state_hash: Digest, + // Hash, global state and protocol version of the block before upgrade + pub(crate) before_hash: BlockHash, + pub(crate) before_state_hash: Digest, +} + +/// Headers and signatures required to prove that if a given trusted block hash is on the correct +/// chain, then so is a later header, which should be the most recent one according to the sender. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, DataSize)] +pub(crate) struct SyncLeap { + /// Requester indicates if they want only the header and ancestor headers, + /// of if they want everything. + pub trusted_ancestor_only: bool, + /// The header of the trusted block specified by hash by the requester. + pub trusted_block_header: BlockHeader, + /// The block headers of the trusted block's ancestors, back to the most recent switch block. + pub trusted_ancestor_headers: Vec, + /// The headers of all switch blocks known to the sender, after the trusted block but before + /// their highest block, with signatures, plus the signed highest block. + pub block_headers_with_signatures: Vec, +} + +impl SyncLeap { + pub(crate) fn era_validator_weights( + &self, + fault_tolerance_fraction: Ratio, + protocol_config: &ProtocolConfig, + ) -> impl Iterator + '_ { + // determine if the validator set has been updated in the + // current protocol version through an emergency upgrade + let validators_changed_in_current_protocol = protocol_config + .global_state_update + .as_ref() + .is_some_and(|global_state_update| global_state_update.validators.is_some()); + let current_protocol_version = protocol_config.version; + + let block_protocol_versions: HashMap<_, _> = self + .headers() + .map(|hdr| (hdr.height(), hdr.protocol_version())) + .collect(); + self.switch_blocks_headers() + .find(|block_header| block_header.is_genesis()) + .into_iter() + .flat_map(move |block_header| { + Some(EraValidatorWeights::new( + EraId::default(), + block_header.next_era_validator_weights().cloned()?, + fault_tolerance_fraction, + )) + }) + .chain( + self.switch_blocks_headers() + // filter out switch blocks preceding upgrades - we don't want to read the era + // validators directly from them, as they might have been altered by the + // upgrade, we'll get them from the blocks' global states instead + // + // we can reliably determine if the validator set was changed by an upgrade to + // the current protocol version by looking at the chainspec. If validators have + // not been altered in any way, then we can use the set reported in the sync + // leap by the previous switch block and not read the global states + .filter(move |block_header| { + block_protocol_versions + .get(&(block_header.height() + 1)) + .is_none_or(|other_protocol_version| { + if block_header.protocol_version() == *other_protocol_version { + true + } else if *other_protocol_version == current_protocol_version { + !validators_changed_in_current_protocol + } else { + false + } + }) + }) + .flat_map(move |block_header| { + Some(EraValidatorWeights::new( + block_header.next_block_era_id(), + block_header.next_era_validator_weights().cloned()?, + fault_tolerance_fraction, + )) + }), + ) + } + + pub(crate) fn global_states_for_sync_across_upgrade(&self) -> Option { + let headers_by_height: HashMap<_, _> = + self.headers().map(|hdr| (hdr.height(), hdr)).collect(); + + let maybe_header_before_upgrade = self.switch_blocks_headers().find(|header| { + headers_by_height + .get(&(header.height() + 1)) + .is_some_and(|other_header| { + other_header.protocol_version() != header.protocol_version() + }) + }); + + maybe_header_before_upgrade.map(|before_header| { + let after_header = headers_by_height + .get(&(before_header.height() + 1)) + .unwrap(); // safe, because it had to be Some when we checked it above + GlobalStatesMetadata { + after_hash: after_header.block_hash(), + after_era_id: after_header.era_id(), + after_state_hash: *after_header.state_root_hash(), + before_hash: before_header.block_hash(), + before_state_hash: *before_header.state_root_hash(), + } + }) + } + + pub(crate) fn highest_block_height(&self) -> u64 { + self.headers() + .map(BlockHeader::height) + .max() + .unwrap_or_else(|| self.trusted_block_header.height()) + } + + pub(crate) fn highest_block_header_and_signatures( + &self, + ) -> (&BlockHeader, Option<&BlockSignatures>) { + let header = self + .headers() + .max_by_key(|header| header.height()) + .unwrap_or(&self.trusted_block_header); + let signatures = self + .block_headers_with_signatures + .iter() + .find(|block_header_with_signatures| { + block_header_with_signatures.block_header().height() == header.height() + }) + .map(|block_header_with_signatures| block_header_with_signatures.block_signatures()); + (header, signatures) + } + + pub(crate) fn highest_block_hash(&self) -> BlockHash { + self.highest_block_header_and_signatures().0.block_hash() + } + + pub(crate) fn headers(&self) -> impl Iterator { + iter::once(&self.trusted_block_header) + .chain(&self.trusted_ancestor_headers) + .chain( + self.block_headers_with_signatures + .iter() + .map(|sh| sh.block_header()), + ) + } + + pub(crate) fn switch_blocks_headers(&self) -> impl Iterator { + self.headers().filter(|header| header.is_switch_block()) + } +} + +impl Display for SyncLeap { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "sync leap message for trusted {}", + self.trusted_block_header.block_hash() + ) + } +} + +impl FetchItem for SyncLeap { + type Id = SyncLeapIdentifier; + type ValidationError = SyncLeapValidationError; + type ValidationMetadata = SyncLeapValidationMetaData; + + const TAG: Tag = Tag::SyncLeap; + + fn fetch_id(&self) -> Self::Id { + SyncLeapIdentifier { + block_hash: self.trusted_block_header.block_hash(), + trusted_ancestor_only: self.trusted_ancestor_only, + } + } + + fn validate( + &self, + validation_metadata: &SyncLeapValidationMetaData, + ) -> Result<(), Self::ValidationError> { + if self.trusted_ancestor_headers.is_empty() && self.trusted_block_header.height() > 0 { + return Err(SyncLeapValidationError::MissingTrustedAncestors); + } + if self.block_headers_with_signatures.len() as u64 + > validation_metadata.recent_era_count.saturating_add(1) + { + return Err(SyncLeapValidationError::TooManySwitchBlocks); + } + if self + .trusted_ancestor_headers + .iter() + .tuple_windows() + .any(|(child, parent)| *child.parent_hash() != parent.block_hash()) + { + return Err(SyncLeapValidationError::TrustedAncestorsNotSorted); + } + let mut trusted_ancestor_iter = self.trusted_ancestor_headers.iter().rev(); + if let Some(last_ancestor) = trusted_ancestor_iter.next() { + if !last_ancestor.is_switch_block() && !last_ancestor.is_genesis() { + return Err(SyncLeapValidationError::MissingAncestorSwitchBlock); + } + } + if trusted_ancestor_iter.any(BlockHeader::is_switch_block) { + return Err(SyncLeapValidationError::UnexpectedAncestorSwitchBlock); + } + if self.trusted_ancestor_only && !self.block_headers_with_signatures.is_empty() { + return Err(SyncLeapValidationError::UnexpectedBlockHeadersWithSignatures); + } + + let mut headers: BTreeMap = self + .headers() + .map(|header| (header.block_hash(), header)) + .collect(); + let mut signatures: BTreeMap> = BTreeMap::new(); + for block_header in &self.block_headers_with_signatures { + signatures + .entry(block_header.block_signatures().era_id()) + .or_default() + .push(block_header.block_signatures()); + } + + let mut headers_with_sufficient_finality: Vec = + vec![self.trusted_block_header.block_hash()]; + + while let Some(hash) = headers_with_sufficient_finality.pop() { + if let Some(header) = headers.remove(&hash) { + headers_with_sufficient_finality.push(*header.parent_hash()); + if let Some(mut validator_weights) = header.next_era_validator_weights() { + // If this is a switch block right before the upgrade to the current protocol + // version, and if this upgrade changes the validator set, use the validator + // weights from the chainspec. + if header.next_block_era_id() == validation_metadata.activation_point.era_id() { + if let Some(updated_weights) = validation_metadata + .global_state_update + .as_ref() + .and_then(|update| update.validators.as_ref()) + { + validator_weights = updated_weights + } + } + + if let Some(era_sigs) = signatures.remove(&header.next_block_era_id()) { + for sigs in era_sigs { + if let Err(err) = utils::check_sufficient_block_signatures( + validator_weights, + validation_metadata.finality_threshold_fraction, + Some(sigs), + ) { + return Err(SyncLeapValidationError::HeadersNotSufficientlySigned( + err, + )); + } + headers_with_sufficient_finality.push(*sigs.block_hash()); + } + } + } + } + } + + // any orphaned headers == incomplete proof + let incomplete_headers_proof = !headers.is_empty(); + // any orphaned signatures == incomplete proof + let incomplete_signatures_proof = !signatures.is_empty(); + + if incomplete_headers_proof || incomplete_signatures_proof { + return Err(SyncLeapValidationError::IncompleteProof); + } + + for block_header in &self.block_headers_with_signatures { + block_header + .is_valid() + .map_err(SyncLeapValidationError::BlockHeaderWithSignatures)?; + } + + // defer cryptographic verification until last to avoid unnecessary computation + for block_header in &self.block_headers_with_signatures { + block_header + .block_signatures() + .is_verified() + .map_err(SyncLeapValidationError::Crypto)?; + } + + Ok(()) + } +} + +mod specimen_support { + use crate::utils::specimen::{ + estimator_max_rounds_per_era, vec_of_largest_specimen, vec_prop_specimen, + BlockHeaderWithoutEraEnd, Cache, LargestSpecimen, SizeEstimator, + }; + + use super::{SyncLeap, SyncLeapIdentifier}; + + impl LargestSpecimen for SyncLeap { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // Will at most contain as many blocks as a single era. And how many blocks can + // there be in an era is determined by the chainspec: it's the + // maximum of minimum_era_height and era_duration / minimum_block_time + let count = estimator_max_rounds_per_era(estimator).saturating_sub(1); + + let non_switch_block_ancestors: Vec = + vec_of_largest_specimen(estimator, count, cache); + + let mut trusted_ancestor_headers = + vec![LargestSpecimen::largest_specimen(estimator, cache)]; + trusted_ancestor_headers.extend( + non_switch_block_ancestors + .into_iter() + .map(BlockHeaderWithoutEraEnd::into_block_header), + ); + + let block_headers_with_signatures = + vec_prop_specimen(estimator, "recent_era_count", cache); + SyncLeap { + trusted_ancestor_only: LargestSpecimen::largest_specimen(estimator, cache), + trusted_block_header: LargestSpecimen::largest_specimen(estimator, cache), + trusted_ancestor_headers, + block_headers_with_signatures, + } + } + } + + impl LargestSpecimen for SyncLeapIdentifier { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SyncLeapIdentifier { + block_hash: LargestSpecimen::largest_specimen(estimator, cache), + trusted_ancestor_only: true, + } + } + } +} + +#[cfg(test)] +mod tests { + // The `FetchItem::::validate()` function can potentially return the + // `SyncLeapValidationError::BlockWithMetadata` error as a result of calling + // `BlockHeaderWithMetadata::validate()`, but in practice this will always be detected earlier + // as an `SyncLeapValidationError::IncompleteProof` error. Hence, there is no explicit test for + // `SyncLeapValidationError::BlockWithMetadata`. + + use std::{ + collections::{BTreeMap, BTreeSet}, + iter, + }; + + use num_rational::Ratio; + use rand::Rng; + + use casper_types::{ + crypto, testing::TestRng, ActivationPoint, Block, BlockHash, BlockHeader, + BlockHeaderWithSignatures, BlockSignaturesV2, BlockV2, ChainNameDigest, EraEndV2, EraId, + FinalitySignatureV2, GlobalStateUpdate, ProtocolConfig, ProtocolVersion, PublicKey, + SecretKey, TestBlockBuilder, Timestamp, TransactionHash, TransactionV1Hash, + AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, U512, + }; + + use super::SyncLeap; + use crate::{ + components::fetcher::FetchItem, + types::{ + sync_leap::SyncLeapValidationError, + sync_leap_validation_metadata::SyncLeapValidationMetaData, EraValidatorWeights, + SyncLeapIdentifier, + }, + utils::BlockSignatureError, + }; + + fn make_block_header_with_signatures_from_height( + height: usize, + test_chain: &[BlockV2], + validators: &[ValidatorSpec], + chain_name_hash: ChainNameDigest, + add_proofs: bool, + ) -> BlockHeaderWithSignatures { + let header = Block::from(test_chain.get(height).unwrap()).clone_header(); + make_block_header_with_signatures_from_header( + &header, + validators, + chain_name_hash, + add_proofs, + ) + } + + fn make_block_header_with_signatures_from_header( + block_header: &BlockHeader, + validators: &[ValidatorSpec], + chain_name_hash: ChainNameDigest, + add_proofs: bool, + ) -> BlockHeaderWithSignatures { + let hash = block_header.block_hash(); + let height = block_header.height(); + let era_id = block_header.era_id(); + let mut block_signatures = BlockSignaturesV2::new(hash, height, era_id, chain_name_hash); + validators.iter().for_each( + |ValidatorSpec { + secret_key, + public_key: _, + weight: _, + }| { + let fin_sig = + FinalitySignatureV2::create(hash, height, era_id, chain_name_hash, secret_key); + if add_proofs { + block_signatures + .insert_signature(fin_sig.public_key().clone(), *fin_sig.signature()); + } + }, + ); + + BlockHeaderWithSignatures::new(block_header.clone(), block_signatures.into()) + } + + fn make_test_sync_leap_with_chain( + validators: &[ValidatorSpec], + test_chain: &[BlockV2], + query: usize, + trusted_ancestor_headers: &[usize], + blok_headers_with_signatures: &[usize], + chain_name_hash: ChainNameDigest, + add_proofs: bool, + ) -> SyncLeap { + let trusted_block_header = Block::from(test_chain.get(query).unwrap()).clone_header(); + + let trusted_ancestor_headers: Vec<_> = trusted_ancestor_headers + .iter() + .map(|height| Block::from(test_chain.get(*height).unwrap()).clone_header()) + .collect(); + + let block_headers_with_signatures: Vec<_> = blok_headers_with_signatures + .iter() + .map(|height| { + make_block_header_with_signatures_from_height( + *height, + test_chain, + validators, + chain_name_hash, + add_proofs, + ) + }) + .collect(); + + SyncLeap { + trusted_ancestor_only: false, + trusted_block_header, + trusted_ancestor_headers, + block_headers_with_signatures, + } + } + + // Each generated era gets two validators pulled from the provided `validators` set. + fn make_test_sync_leap_with_validators( + rng: &mut TestRng, + validators: &[ValidatorSpec], + switch_blocks: &[u64], + query: usize, + trusted_ancestor_headers: &[usize], + block_headers_with_signatures: &[usize], + add_proofs: bool, + ) -> SyncLeap { + let mut test_chain_spec = + TestChainSpec::new(rng, Some(switch_blocks.to_vec()), None, validators); + let test_chain: Vec<_> = test_chain_spec.iter().take(12).collect(); + let chain_name_hash = ChainNameDigest::random(rng); + + make_test_sync_leap_with_chain( + validators, + &test_chain, + query, + trusted_ancestor_headers, + block_headers_with_signatures, + chain_name_hash, + add_proofs, + ) + } + + fn make_test_sync_leap( + rng: &mut TestRng, + switch_blocks: &[u64], + query: usize, + trusted_ancestor_headers: &[usize], + block_headers_with_signatures: &[usize], + add_proofs: bool, + ) -> SyncLeap { + const DEFAULT_VALIDATOR_WEIGHT: u32 = 100; + + let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair) + .take(2) + .map(|(secret_key, public_key)| ValidatorSpec { + secret_key, + public_key, + weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()), + }) + .collect(); + make_test_sync_leap_with_validators( + rng, + &validators, + switch_blocks, + query, + trusted_ancestor_headers, + block_headers_with_signatures, + add_proofs, + ) + } + + fn test_sync_leap_validation_metadata() -> SyncLeapValidationMetaData { + let unbonding_delay = 7; + let auction_delay = 1; + let activation_point = ActivationPoint::EraId(3000.into()); + let finality_threshold_fraction = Ratio::new(1, 3); + + SyncLeapValidationMetaData::new( + unbonding_delay - auction_delay, // As per `CoreConfig::recent_era_count()`. + activation_point, + None, + finality_threshold_fraction, + ) + } + + #[test] + fn should_validate_correct_sync_leap() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + // Querying for a non-switch block. + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + let result = sync_leap.validate(&validation_metadata); + assert!(result.is_ok()); + + // Querying for a switch block. + let query = 6; + let trusted_ancestor_headers = [5, 4, 3]; + let block_headers_with_signatures = [9, 11]; + let add_proofs = true; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + let result = sync_leap.validate(&validation_metadata); + assert!(result.is_ok()); + } + + #[test] + fn should_check_trusted_ancestors() { + let mut rng = TestRng::new(); + let validation_metadata = test_sync_leap_validation_metadata(); + + // Trusted ancestors can't be empty when trusted block height is greater than 0. + let block = TestBlockBuilder::new().height(1).build(&mut rng); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.take_header().into(), + trusted_ancestor_headers: Default::default(), + block_headers_with_signatures: Default::default(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::MissingTrustedAncestors) + )); + + // When trusted block height is 0, validation should not fail due trusted ancestors being + // empty. + let block = TestBlockBuilder::new().height(0).build(&mut rng); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.take_header().into(), + trusted_ancestor_headers: Default::default(), + block_headers_with_signatures: Default::default(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(!matches!( + result, + Err(SyncLeapValidationError::MissingTrustedAncestors) + )); + assert!(result.is_ok()); + } + + #[test] + fn should_check_block_headers_with_signatures_size() { + let mut rng = TestRng::new(); + let validation_metadata = test_sync_leap_validation_metadata(); + + let max_allowed_size = validation_metadata.recent_era_count + 1; + + // Max allowed size should NOT trigger the `TooManySwitchBlocks` error. + let generated_block_count = max_allowed_size; + + let block = TestBlockBuilder::new().height(0).build_versioned(&mut rng); + let chain_name_hash = ChainNameDigest::random(&mut rng); + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.clone_header(), + trusted_ancestor_headers: Default::default(), + block_headers_with_signatures: iter::repeat_with(|| { + let block = TestBlockBuilder::new().build_versioned(&mut rng); + let hash = block.hash(); + let height = block.height(); + BlockHeaderWithSignatures::new( + block.clone_header(), + BlockSignaturesV2::new(*hash, height, 0.into(), chain_name_hash).into(), + ) + }) + .take(generated_block_count as usize) + .collect(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(!matches!( + result, + Err(SyncLeapValidationError::TooManySwitchBlocks) + )); + + // Generating one more block should trigger the `TooManySwitchBlocks` error. + let generated_block_count = max_allowed_size + 1; + + let block = TestBlockBuilder::new().height(0).build_versioned(&mut rng); + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.take_header(), + trusted_ancestor_headers: Default::default(), + block_headers_with_signatures: iter::repeat_with(|| { + let block = TestBlockBuilder::new().build_versioned(&mut rng); + let hash = block.hash(); + let height = block.height(); + BlockHeaderWithSignatures::new( + block.clone_header(), + BlockSignaturesV2::new(*hash, height, 0.into(), chain_name_hash).into(), + ) + }) + .take(generated_block_count as usize) + .collect(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::TooManySwitchBlocks) + )); + } + + #[test] + fn should_detect_unsorted_trusted_ancestors() { + let mut rng = TestRng::new(); + let validation_metadata = test_sync_leap_validation_metadata(); + + // Test block iterator produces blocks in order, however, the `trusted_ancestor_headers` is + // expected to be sorted backwards (from the most recent ancestor back to the switch block). + // Therefore, the generated blocks should cause the `TrustedAncestorsNotSorted` error to be + // triggered. + let block = TestBlockBuilder::new().height(0).build(&mut rng); + let block_iterator = + TestBlockIterator::new(block.clone(), &mut rng, None, None, Default::default()); + let block = Block::from(block); + + let trusted_ancestor_headers = block_iterator + .take(3) + .map(|block| block.take_header().into()) + .collect(); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.take_header(), + trusted_ancestor_headers, + block_headers_with_signatures: Default::default(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::TrustedAncestorsNotSorted) + )); + + // Single trusted ancestor header it should never trigger the `TrustedAncestorsNotSorted` + // error. + let block = TestBlockBuilder::new().height(0).build(&mut rng); + let block_iterator = + TestBlockIterator::new(block.clone(), &mut rng, None, None, Default::default()); + + let trusted_ancestor_headers = block_iterator + .take(1) + .map(|block| block.take_header().into()) + .collect(); + + let block = Block::from(block); + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.take_header(), + trusted_ancestor_headers, + block_headers_with_signatures: Default::default(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(!matches!( + result, + Err(SyncLeapValidationError::TrustedAncestorsNotSorted) + )); + } + + #[test] + fn should_detect_missing_ancestor_switch_block() { + let mut rng = TestRng::new(); + let validation_metadata = test_sync_leap_validation_metadata(); + + // Make sure `TestBlockIterator` creates no switch blocks. + let switch_blocks = None; + + let block = TestBlockBuilder::new().height(0).build(&mut rng); + let block_iterator = TestBlockIterator::new( + block.clone(), + &mut rng, + switch_blocks, + None, + Default::default(), + ); + + let trusted_ancestor_headers: Vec<_> = block_iterator + .take(3) + .map(|block| block.take_header().into()) + .collect::>() + .into_iter() + .rev() + .collect(); + + let block = Block::from(block); + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: block.take_header(), + trusted_ancestor_headers, + block_headers_with_signatures: Default::default(), + }; + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::MissingAncestorSwitchBlock) + )); + } + + #[test] + fn should_detect_unexpected_ancestor_switch_block() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S S + let switch_blocks = [0, 2, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + // Intentionally include two consecutive switch blocks (3, 2) in the + // `trusted_ancestor_headers`, which should trigger the error. + let trusted_ancestor_headers = [4, 3, 2]; + + let query = 5; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::UnexpectedAncestorSwitchBlock) + )); + } + + #[test] + fn should_detect_unexpected_block_header_with_signatures() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let mut sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // When `trusted_ancestor_only` we expect an error when `block_headers_with_signatures` is + // not empty. + sync_leap.trusted_ancestor_only = true; + + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::UnexpectedBlockHeadersWithSignatures) + )); + } + + #[test] + fn should_detect_not_sufficiently_signed_headers() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = false; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + let result = sync_leap.validate(&validation_metadata); + assert!( + matches!(result, Err(SyncLeapValidationError::HeadersNotSufficientlySigned(inner)) + if matches!(&inner, BlockSignatureError::InsufficientWeightForFinality{ + trusted_validator_weights: _, + block_signatures: _, + signature_weight, + total_validator_weight:_, + fault_tolerance_fraction:_ } if signature_weight == &Some(Box::new(0.into())))) + ); + } + + #[test] + fn should_detect_orphaned_headers() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let mut sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // Add single orphaned block. Signatures are cloned from a legit block to avoid bailing on + // the signature validation check. + let orphaned_block = TestBlockBuilder::new().build_versioned(&mut rng); + let orphaned_block_header_with_signatures = BlockHeaderWithSignatures::new( + orphaned_block.clone_header(), + sync_leap + .block_headers_with_signatures + .first() + .unwrap() + .block_signatures() + .clone(), + ); + sync_leap + .block_headers_with_signatures + .push(orphaned_block_header_with_signatures); + + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::IncompleteProof) + )); + } + + #[test] + fn should_detect_orphaned_signatures() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let mut sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // Insert signature from an era nowhere near the sync leap data. Base it on one of the + // existing signatures to avoid bailing on the signature validation check. + let mut invalid_block_header_with_signatures = sync_leap + .block_headers_with_signatures + .first_mut() + .unwrap() + .clone(); + invalid_block_header_with_signatures.invalidate_era(); + sync_leap + .block_headers_with_signatures + .push(invalid_block_header_with_signatures); + + let result = sync_leap.validate(&validation_metadata); + assert!(matches!( + result, + Err(SyncLeapValidationError::IncompleteProof) + )); + } + + #[test] + fn should_fail_when_signature_fails_crypto_verification() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + let validation_metadata = test_sync_leap_validation_metadata(); + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let mut sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + let mut invalid_block_header_with_signatures = + sync_leap.block_headers_with_signatures.pop().unwrap(); + invalid_block_header_with_signatures.invalidate_last_signature(); + sync_leap + .block_headers_with_signatures + .push(invalid_block_header_with_signatures); + + let result = sync_leap.validate(&validation_metadata); + assert!(matches!(result, Err(SyncLeapValidationError::Crypto(_)))); + } + + #[test] + fn should_use_correct_validator_weights_on_upgrade() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + + const INDEX_OF_THE_LAST_SWITCH_BLOCK: usize = 1; + let block_headers_with_signatures = [6, 9, 11]; + + let add_proofs = true; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // Setup upgrade after the last switch block. + let upgrade_block = sync_leap + .block_headers_with_signatures + .get(INDEX_OF_THE_LAST_SWITCH_BLOCK) + .unwrap(); + let upgrade_era = upgrade_block.block_header().era_id().successor(); + let activation_point = ActivationPoint::EraId(upgrade_era); + + // Set up validator change. + const DEFAULT_VALIDATOR_WEIGHT: u64 = 100; + let new_validators: BTreeMap<_, _> = iter::repeat_with(crypto::generate_ed25519_keypair) + .take(2) + .map(|(_, public_key)| (public_key, DEFAULT_VALIDATOR_WEIGHT.into())) + .collect(); + let global_state_update = GlobalStateUpdate { + validators: Some(new_validators), + entries: Default::default(), + }; + + let unbonding_delay = 7; + let auction_delay = 1; + let finality_threshold_fraction = Ratio::new(1, 3); + let validation_metadata = SyncLeapValidationMetaData::new( + unbonding_delay - auction_delay, // As per `CoreConfig::recent_era_count()`. + activation_point, + Some(global_state_update), + finality_threshold_fraction, + ); + + let result = sync_leap.validate(&validation_metadata); + + // By asserting on the `HeadersNotSufficientlySigned` error (with bogus validators set to + // the original validators from the chain) we can prove that the validators smuggled in the + // validation metadata were actually used in the verification process. + let expected_bogus_validators: Vec<_> = sync_leap + .block_headers_with_signatures + .last() + .unwrap() + .block_signatures() + .signers() + .cloned() + .collect(); + assert!( + matches!(result, Err(SyncLeapValidationError::HeadersNotSufficientlySigned(inner)) + if matches!(&inner, BlockSignatureError::BogusValidators{ + trusted_validator_weights: _, + block_signatures: _, + bogus_validators + } if bogus_validators == &expected_bogus_validators)) + ); + } + + #[test] + fn should_return_headers() { + let mut rng = TestRng::new(); + let chain_name_hash = ChainNameDigest::random(&mut rng); + + let trusted_block = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let trusted_ancestor_1 = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let trusted_ancestor_2 = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + let trusted_ancestor_3 = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let signed_block_1 = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let signed_block_2 = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let signed_block_3 = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + let block_header_with_signatures_1 = make_block_header_with_signatures_from_header( + &signed_block_1.clone_header(), + &[], + chain_name_hash, + false, + ); + let block_header_with_signatures_2 = make_block_header_with_signatures_from_header( + &signed_block_2.clone_header(), + &[], + chain_name_hash, + false, + ); + let block_header_with_signatures_3 = make_block_header_with_signatures_from_header( + &signed_block_3.clone_header(), + &[], + chain_name_hash, + false, + ); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: trusted_block.clone_header(), + trusted_ancestor_headers: vec![ + trusted_ancestor_1.clone_header(), + trusted_ancestor_2.clone_header(), + trusted_ancestor_3.clone_header(), + ], + block_headers_with_signatures: vec![ + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ], + }; + + let actual_headers: BTreeSet<_> = sync_leap + .headers() + .map(|header| header.block_hash()) + .collect(); + let expected_headers: BTreeSet<_> = [ + trusted_block, + trusted_ancestor_1, + trusted_ancestor_2, + trusted_ancestor_3, + signed_block_1, + signed_block_2, + signed_block_3, + ] + .iter() + .map(|block| *block.hash()) + .collect(); + assert_eq!(expected_headers, actual_headers); + } + + #[test] + fn should_return_switch_block_headers() { + let mut rng = TestRng::new(); + + let chain_name_hash = ChainNameDigest::random(&mut rng); + + let trusted_block = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let trusted_ancestor_1 = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let trusted_ancestor_2 = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + let trusted_ancestor_3 = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let signed_block_1 = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let signed_block_2 = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let signed_block_3 = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + let block_header_with_signatures_1 = make_block_header_with_signatures_from_header( + &signed_block_1.clone_header(), + &[], + chain_name_hash, + false, + ); + let block_header_with_signatures_2 = make_block_header_with_signatures_from_header( + &signed_block_2.clone_header(), + &[], + chain_name_hash, + false, + ); + let block_header_with_signatures_3 = make_block_header_with_signatures_from_header( + &signed_block_3.clone_header(), + &[], + chain_name_hash, + false, + ); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: trusted_block.clone_header(), + trusted_ancestor_headers: vec![ + trusted_ancestor_1.clone_header(), + trusted_ancestor_2.clone_header(), + trusted_ancestor_3.clone_header(), + ], + block_headers_with_signatures: vec![ + block_header_with_signatures_1.clone(), + block_header_with_signatures_2.clone(), + block_header_with_signatures_3.clone(), + ], + }; + + let actual_headers: BTreeSet<_> = sync_leap + .switch_blocks_headers() + .map(|header| header.block_hash()) + .collect(); + let expected_headers: BTreeSet<_> = [ + trusted_ancestor_1.clone(), + signed_block_1.clone(), + signed_block_2.clone(), + ] + .iter() + .map(|block| *block.hash()) + .collect(); + assert_eq!(expected_headers, actual_headers); + + // Also test when the trusted block is a switch block. + let trusted_block = TestBlockBuilder::new() + .switch_block(true) + .build_versioned(&mut rng); + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: trusted_block.clone_header(), + trusted_ancestor_headers: vec![ + trusted_ancestor_1.clone_header(), + trusted_ancestor_2.clone_header(), + trusted_ancestor_3.clone_header(), + ], + block_headers_with_signatures: vec![ + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ], + }; + let actual_headers: BTreeSet<_> = sync_leap + .switch_blocks_headers() + .map(|header| header.block_hash()) + .collect(); + let expected_headers: BTreeSet<_> = [ + trusted_block, + trusted_ancestor_1, + signed_block_1, + signed_block_2, + ] + .iter() + .map(|block| *block.hash()) + .collect(); + assert_eq!(expected_headers, actual_headers); + } + + #[test] + fn should_return_highest_block_header_from_trusted_block() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let valid_sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // `valid_sync_leap` created above is a well formed SyncLeap structure for the test chain. + // We can use the blocks it contains to generate SyncLeap structures as required for + // the test, because we know the heights of the blocks in the test chain as well as + // their sigs. + let highest_block = valid_sync_leap + .block_headers_with_signatures + .last() + .unwrap() + .block_header() + .clone(); + let lowest_blocks: Vec<_> = valid_sync_leap + .trusted_ancestor_headers + .iter() + .take(2) + .cloned() + .collect(); + let middle_blocks: Vec<_> = valid_sync_leap + .block_headers_with_signatures + .iter() + .take(2) + .cloned() + .collect(); + + let highest_block_height = highest_block.height(); + let highest_block_hash = highest_block.block_hash(); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: highest_block.clone(), + trusted_ancestor_headers: lowest_blocks, + block_headers_with_signatures: middle_blocks, + }; + assert_eq!( + sync_leap + .highest_block_header_and_signatures() + .0 + .block_hash(), + highest_block.block_hash() + ); + assert_eq!(sync_leap.highest_block_hash(), highest_block_hash); + assert_eq!(sync_leap.highest_block_height(), highest_block_height); + } + + #[test] + fn should_return_highest_block_header_from_trusted_ancestors() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let valid_sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // `valid_sync_leap` created above is a well formed SyncLeap structure for the test chain. + // We can use the blocks it contains to generate SyncLeap structures as required for + // the test, because we know the heights of the blocks in the test chain as well as + // their sigs. + let highest_block = valid_sync_leap + .block_headers_with_signatures + .last() + .unwrap() + .block_header() + .clone(); + let lowest_blocks: Vec<_> = valid_sync_leap + .trusted_ancestor_headers + .iter() + .take(2) + .cloned() + .collect(); + let middle_blocks: Vec<_> = valid_sync_leap + .block_headers_with_signatures + .iter() + .take(2) + .cloned() + .collect(); + + let highest_block_height = highest_block.height(); + let highest_block_hash = highest_block.block_hash(); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: lowest_blocks.first().unwrap().clone(), + trusted_ancestor_headers: vec![highest_block], + block_headers_with_signatures: middle_blocks, + }; + assert_eq!( + sync_leap + .highest_block_header_and_signatures() + .0 + .block_hash(), + highest_block_hash + ); + assert_eq!(sync_leap.highest_block_hash(), highest_block_hash); + assert_eq!(sync_leap.highest_block_height(), highest_block_height); + } + + #[test] + fn should_return_highest_block_header_from_block_headers_with_signatures() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let valid_sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // `valid_sync_leap` created above is a well formed SyncLeap structure for the test chain. + // We can use the blocks it contains to generate SyncLeap structures as required for + // the test, because we know the heights of the blocks in the test chain as well as + // their sigs. + let highest_block = valid_sync_leap + .block_headers_with_signatures + .last() + .unwrap() + .clone(); + let lowest_blocks: Vec<_> = valid_sync_leap + .trusted_ancestor_headers + .iter() + .take(2) + .cloned() + .collect(); + let middle_blocks: Vec<_> = valid_sync_leap + .block_headers_with_signatures + .iter() + .take(2) + .cloned() + .map(|block_header_with_signatures| block_header_with_signatures.block_header().clone()) + .collect(); + + let highest_block_height = highest_block.block_header().height(); + let highest_block_hash = highest_block.block_header().block_hash(); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: lowest_blocks.first().unwrap().clone(), + trusted_ancestor_headers: middle_blocks, + block_headers_with_signatures: vec![highest_block.clone()], + }; + assert_eq!( + sync_leap + .highest_block_header_and_signatures() + .0 + .block_hash(), + highest_block.block_header().block_hash() + ); + assert_eq!(sync_leap.highest_block_hash(), highest_block_hash); + assert_eq!(sync_leap.highest_block_height(), highest_block_height); + } + + #[test] + fn should_return_sigs_when_highest_block_is_signed() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + assert!(sync_leap.highest_block_header_and_signatures().1.is_some()); + } + + #[test] + fn should_not_return_sigs_when_highest_block_is_not_signed() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let sync_leap = make_test_sync_leap( + &mut rng, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + // `sync_leap` is a well formed SyncLeap structure for the test chain. We can use the blocks + // it contains to generate SyncLeap structures as required for the test, because we know the + // heights of the blocks in the test chain as well as their sigs. + let highest_block = sync_leap + .block_headers_with_signatures + .last() + .unwrap() + .clone(); + let lowest_blocks: Vec<_> = sync_leap + .trusted_ancestor_headers + .iter() + .take(2) + .cloned() + .collect(); + let middle_blocks: Vec<_> = sync_leap + .block_headers_with_signatures + .iter() + .take(2) + .cloned() + .collect(); + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: highest_block.block_header().clone(), + trusted_ancestor_headers: lowest_blocks, + block_headers_with_signatures: middle_blocks, + }; + assert!(sync_leap.highest_block_header_and_signatures().1.is_none()); + } + + #[test] + fn should_return_era_validator_weights_for_correct_sync_leap() { + // Chain + // 0 1 2 3 4 5 6 7 8 9 10 11 + // S S S S + let switch_blocks = [0, 3, 6, 9]; + + let mut rng = TestRng::new(); + + // Test block iterator will pull 2 validators for each created block. Indices 0 and 1 are + // used for validators for the trusted ancestor headers. + const FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET: usize = 2; + + let validators: Vec<_> = (1..100) + .map(|weight| { + let (secret_key, public_key) = crypto::generate_ed25519_keypair(); + ValidatorSpec { + secret_key, + public_key, + weight: Some(U512::from(weight)), + } + }) + .collect(); + + let query = 5; + let trusted_ancestor_headers = [4, 3]; + let block_headers_with_signatures = [6, 9, 11]; + let add_proofs = true; + let sync_leap = make_test_sync_leap_with_validators( + &mut rng, + &validators, + &switch_blocks, + query, + &trusted_ancestor_headers, + &block_headers_with_signatures, + add_proofs, + ); + + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + let mut block_iter = sync_leap.block_headers_with_signatures.iter(); + let first_switch_block = block_iter.next().unwrap().clone(); + let protocol_version = first_switch_block.block_header().protocol_version(); + let validator_1 = validators + .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET) + .unwrap(); + let validator_2 = validators + .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 1) + .unwrap(); + let first_era_validator_weights = EraValidatorWeights::new( + first_switch_block.block_header().era_id(), + [validator_1, validator_2] + .iter() + .map( + |ValidatorSpec { + secret_key: _, + public_key, + weight, + }| (public_key.clone(), weight.unwrap()), + ) + .collect(), + fault_tolerance_fraction, + ); + + let second_switch_block = block_iter.next().unwrap().clone(); + let validator_1 = validators + .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 2) + .unwrap(); + let validator_2 = validators + .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 3) + .unwrap(); + let second_era_validator_weights = EraValidatorWeights::new( + second_switch_block.block_header().era_id(), + [validator_1, validator_2] + .iter() + .map( + |ValidatorSpec { + secret_key: _, + public_key, + weight, + }| (public_key.clone(), weight.unwrap()), + ) + .collect(), + fault_tolerance_fraction, + ); + + let third_block = block_iter.next().unwrap().clone(); + let validator_1 = validators + .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 4) + .unwrap(); + let validator_2 = validators + .get(FIRST_BLOCK_HEADER_WITH_SIGNATURES_VALIDATOR_OFFSET + 5) + .unwrap(); + let third_era_validator_weights = EraValidatorWeights::new( + third_block.block_header().era_id(), + [validator_1, validator_2] + .iter() + .map( + |ValidatorSpec { + secret_key: _, + public_key, + weight, + }| (public_key.clone(), weight.unwrap()), + ) + .collect(), + fault_tolerance_fraction, + ); + + let protocol_config = ProtocolConfig { + version: protocol_version, + global_state_update: None, + activation_point: ActivationPoint::EraId(EraId::random(&mut rng)), + hard_reset: rng.gen(), + }; + + let result: Vec<_> = sync_leap + .era_validator_weights(fault_tolerance_fraction, &protocol_config) + .collect(); + assert_eq!( + result, + vec![ + first_era_validator_weights, + second_era_validator_weights, + third_era_validator_weights, + ] + ) + } + + #[test] + fn should_not_return_global_states_when_no_upgrade() { + let mut rng = TestRng::new(); + + const DEFAULT_VALIDATOR_WEIGHT: u32 = 100; + + let chain_name_hash = ChainNameDigest::random(&mut rng); + + let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair) + .take(2) + .map(|(secret_key, public_key)| ValidatorSpec { + secret_key, + public_key, + weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()), + }) + .collect(); + + let mut test_chain_spec = TestChainSpec::new(&mut rng, Some(vec![4, 8]), None, &validators); + let chain: Vec<_> = test_chain_spec.iter().take(12).collect(); + + let sync_leap = make_test_sync_leap_with_chain( + &validators, + &chain, + 11, + &[10, 9, 8], + &[], + chain_name_hash, + false, + ); + + let global_states_metadata = sync_leap.global_states_for_sync_across_upgrade(); + assert!(global_states_metadata.is_none()); + } + + #[test] + fn should_return_global_states_when_upgrade() { + let mut rng = TestRng::new(); + + const DEFAULT_VALIDATOR_WEIGHT: u32 = 100; + + let chain_name_hash = ChainNameDigest::random(&mut rng); + + let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair) + .take(2) + .map(|(secret_key, public_key)| ValidatorSpec { + secret_key, + public_key, + weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()), + }) + .collect(); + + let mut test_chain_spec = + TestChainSpec::new(&mut rng, Some(vec![4, 8]), Some(vec![8]), &validators); + let chain: Vec<_> = test_chain_spec.iter().take(12).collect(); + + let sync_leap = make_test_sync_leap_with_chain( + &validators, + &chain, + 11, + &[10, 9, 8], + &[], + chain_name_hash, + false, + ); + + let global_states_metadata = sync_leap + .global_states_for_sync_across_upgrade() + .expect("should be Some"); + + assert_eq!(global_states_metadata.after_hash, *chain[9].hash()); + assert_eq!(global_states_metadata.after_era_id, chain[9].era_id()); + assert_eq!( + global_states_metadata.after_state_hash, + *chain[9].state_root_hash() + ); + + assert_eq!(global_states_metadata.before_hash, *chain[8].hash()); + assert_eq!( + global_states_metadata.before_state_hash, + *chain[8].state_root_hash() + ); + } + + #[test] + fn should_return_global_states_when_immediate_switch_block() { + let mut rng = TestRng::new(); + + const DEFAULT_VALIDATOR_WEIGHT: u32 = 100; + + let chain_name_hash = ChainNameDigest::random(&mut rng); + + let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair) + .take(2) + .map(|(secret_key, public_key)| ValidatorSpec { + secret_key, + public_key, + weight: Some(DEFAULT_VALIDATOR_WEIGHT.into()), + }) + .collect(); + + let mut test_chain_spec = + TestChainSpec::new(&mut rng, Some(vec![4, 8, 9]), Some(vec![8]), &validators); + let chain: Vec<_> = test_chain_spec.iter().take(12).collect(); + + let sync_leap = make_test_sync_leap_with_chain( + &validators, + &chain, + 9, + &[8], + &[], + chain_name_hash, + false, + ); + + let global_states_metadata = sync_leap + .global_states_for_sync_across_upgrade() + .expect("should be Some"); + + assert_eq!(global_states_metadata.after_hash, *chain[9].hash()); + assert_eq!(global_states_metadata.after_era_id, chain[9].era_id()); + assert_eq!( + global_states_metadata.after_state_hash, + *chain[9].state_root_hash() + ); + + assert_eq!(global_states_metadata.before_hash, *chain[8].hash()); + assert_eq!( + global_states_metadata.before_state_hash, + *chain[8].state_root_hash() + ); + } + + #[test] + fn era_validator_weights_without_genesis_without_upgrade() { + let mut rng = TestRng::new(); + + let trusted_block = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let version = ProtocolVersion::from_parts(1, 5, 0); + + let ( + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ) = make_three_switch_blocks_at_era_and_height_and_version( + &mut rng, + (1, 10, version), + (2, 20, version), + (3, 30, version), + ); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: trusted_block.clone_header(), + trusted_ancestor_headers: vec![], + block_headers_with_signatures: vec![ + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ], + }; + + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + // Assert only if correct eras are selected, since the + // `should_return_era_validator_weights_for_correct_sync_leap` test already covers the + // actual weight validation. + + let protocol_config = ProtocolConfig { + version, + global_state_update: None, + hard_reset: false, + activation_point: ActivationPoint::EraId(EraId::random(&mut rng)), + }; + + let actual_eras: BTreeSet = sync_leap + .era_validator_weights(fault_tolerance_fraction, &protocol_config) + .map(|era_validator_weights| era_validator_weights.era_id().into()) + .collect(); + let mut expected_eras: BTreeSet = BTreeSet::new(); + // Expect successors of the eras of switch blocks. + expected_eras.extend([2, 3, 4]); + assert_eq!(expected_eras, actual_eras); + } + + #[test] + fn era_validator_weights_without_genesis_with_switch_block_preceding_immediate_switch_block() { + let mut rng = TestRng::new(); + + let trusted_block = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let version_1 = ProtocolVersion::from_parts(1, 4, 0); + let version_2 = ProtocolVersion::from_parts(1, 5, 0); + + let ( + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ) = make_three_switch_blocks_at_era_and_height_and_version( + &mut rng, + (1, 10, version_1), + (2, 20, version_1), + (3, 21, version_2), + ); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: trusted_block.clone_header(), + trusted_ancestor_headers: vec![], + block_headers_with_signatures: vec![ + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ], + }; + + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + // Assert only if correct eras are selected, since the + // `should_return_era_validator_weights_for_correct_sync_leap` test already covers the + // actual weight validation. + + let protocol_config = ProtocolConfig { + version: version_2, + global_state_update: Some(GlobalStateUpdate { + validators: Some(BTreeMap::new()), + entries: BTreeMap::new(), + }), + hard_reset: false, + activation_point: ActivationPoint::EraId(EraId::random(&mut rng)), + }; + + let actual_eras: BTreeSet = sync_leap + .era_validator_weights(fault_tolerance_fraction, &protocol_config) + .map(|era_validator_weights| era_validator_weights.era_id().into()) + .collect(); + let mut expected_eras: BTreeSet = BTreeSet::new(); + + // Block #1 (era=1, height=10) + // Block #2 (era=2, height=20) - block preceding immediate switch block + // Block #3 (era=3, height=21) - immediate switch block. + // Expect the successor of block #2 to be not present. + expected_eras.extend([2, 4]); + assert_eq!(expected_eras, actual_eras); + + let protocol_config = ProtocolConfig { + version: version_2, + global_state_update: None, + hard_reset: rng.gen(), + activation_point: ActivationPoint::EraId(EraId::random(&mut rng)), + }; + + let actual_eras: BTreeSet = sync_leap + .era_validator_weights(fault_tolerance_fraction, &protocol_config) + .map(|era_validator_weights| era_validator_weights.era_id().into()) + .collect(); + let mut expected_eras: BTreeSet = BTreeSet::new(); + + // Block #1 (era=1, height=10) + // Block #2 (era=2, height=20) - block preceding immediate switch block + // Block #3 (era=3, height=21) - immediate switch block. + // Expect era 3 to be present since the upgrade did not change the validators in any way. + expected_eras.extend([2, 3, 4]); + assert_eq!(expected_eras, actual_eras); + } + + #[test] + fn era_validator_weights_with_genesis_without_upgrade() { + let mut rng = TestRng::new(); + + let trusted_block = TestBlockBuilder::new() + .switch_block(false) + .build_versioned(&mut rng); + + let version = ProtocolVersion::from_parts(1, 5, 0); + + let ( + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ) = make_three_switch_blocks_at_era_and_height_and_version( + &mut rng, + (0, 0, version), + (1, 10, version), + (2, 20, version), + ); + + let sync_leap = SyncLeap { + trusted_ancestor_only: false, + trusted_block_header: trusted_block.clone_header(), + trusted_ancestor_headers: vec![], + block_headers_with_signatures: vec![ + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ], + }; + + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + // Assert only if correct eras are selected, since the + // `should_return_era_validator_weights_for_correct_sync_leap` test already covers the + // actual weight validation. + let protocol_config = ProtocolConfig { + version, + global_state_update: None, + hard_reset: false, + activation_point: ActivationPoint::EraId(EraId::random(&mut rng)), + }; + + let actual_eras: BTreeSet = sync_leap + .era_validator_weights(fault_tolerance_fraction, &protocol_config) + .map(|era_validator_weights| era_validator_weights.era_id().into()) + .collect(); + let mut expected_eras: BTreeSet = BTreeSet::new(); + // Expect genesis era id and its successor as well as the successors of the eras of + // non-genesis switch blocks. + expected_eras.extend([0, 1, 2, 3]); + assert_eq!(expected_eras, actual_eras); + } + + fn make_three_switch_blocks_at_era_and_height_and_version( + rng: &mut TestRng, + (era_1, height_1, version_1): (u64, u64, ProtocolVersion), + (era_2, height_2, version_2): (u64, u64, ProtocolVersion), + (era_3, height_3, version_3): (u64, u64, ProtocolVersion), + ) -> ( + BlockHeaderWithSignatures, + BlockHeaderWithSignatures, + BlockHeaderWithSignatures, + ) { + let chain_name_hash = ChainNameDigest::random(rng); + let signed_block_1 = TestBlockBuilder::new() + .height(height_1) + .era(era_1) + .protocol_version(version_1) + .switch_block(true) + .build_versioned(rng); + let signed_block_2 = TestBlockBuilder::new() + .height(height_2) + .era(era_2) + .protocol_version(version_2) + .switch_block(true) + .build_versioned(rng); + let signed_block_3 = TestBlockBuilder::new() + .height(height_3) + .era(era_3) + .protocol_version(version_3) + .switch_block(true) + .build_versioned(rng); + + let block_header_with_signatures_1 = make_block_header_with_signatures_from_header( + &signed_block_1.clone_header(), + &[], + chain_name_hash, + false, + ); + let block_header_with_signatures_2 = make_block_header_with_signatures_from_header( + &signed_block_2.clone_header(), + &[], + chain_name_hash, + false, + ); + let block_header_with_signatures_3 = make_block_header_with_signatures_from_header( + &signed_block_3.clone_header(), + &[], + chain_name_hash, + false, + ); + ( + block_header_with_signatures_1, + block_header_with_signatures_2, + block_header_with_signatures_3, + ) + } + + #[test] + fn should_construct_proper_sync_leap_identifier() { + let mut rng = TestRng::new(); + + let sync_leap_identifier = SyncLeapIdentifier::sync_to_tip(BlockHash::random(&mut rng)); + assert!(!sync_leap_identifier.trusted_ancestor_only()); + + let sync_leap_identifier = + SyncLeapIdentifier::sync_to_historical(BlockHash::random(&mut rng)); + assert!(sync_leap_identifier.trusted_ancestor_only()); + } + + // Describes a single item from the set of validators that will be used for switch blocks + // created by TestChainSpec. + pub(crate) struct ValidatorSpec { + pub(crate) secret_key: SecretKey, + pub(crate) public_key: PublicKey, + // If `None`, weight will be chosen randomly. + pub(crate) weight: Option, + } + + // Utility struct that can be turned into an iterator that generates + // continuous and descending blocks (i.e. blocks that have consecutive height + // and parent hashes are correctly set). The height of the first block + // in a series is chosen randomly. + // + // Additionally, this struct allows to generate switch blocks at a specific location in the + // chain, for example: Setting `switch_block_indices` to [1, 3] and generating 5 blocks will + // cause the 2nd and 4th blocks to be switch blocks. Validators for all eras are filled from + // the `validators` parameter. + pub(crate) struct TestChainSpec<'a> { + block: BlockV2, + rng: &'a mut TestRng, + switch_block_indices: Option>, + upgrades_indices: Option>, + validators: &'a [ValidatorSpec], + } + + impl<'a> TestChainSpec<'a> { + pub(crate) fn new( + test_rng: &'a mut TestRng, + switch_block_indices: Option>, + upgrades_indices: Option>, + validators: &'a [ValidatorSpec], + ) -> Self { + let block = TestBlockBuilder::new().build(test_rng); + Self { + block, + rng: test_rng, + switch_block_indices, + upgrades_indices, + validators, + } + } + + pub(crate) fn iter(&mut self) -> TestBlockIterator { + let block_height = self.block.height(); + + const DEFAULT_VALIDATOR_WEIGHT: u64 = 100; + + TestBlockIterator::new( + self.block.clone(), + self.rng, + self.switch_block_indices + .clone() + .map(|switch_block_indices| { + switch_block_indices + .iter() + .map(|index| index + block_height) + .collect() + }), + self.upgrades_indices.clone().map(|upgrades_indices| { + upgrades_indices + .iter() + .map(|index| index + block_height) + .collect() + }), + self.validators + .iter() + .map( + |ValidatorSpec { + secret_key: _, + public_key, + weight, + }| { + ( + public_key.clone(), + weight.unwrap_or(DEFAULT_VALIDATOR_WEIGHT.into()), + ) + }, + ) + .collect(), + ) + } + } + + pub(crate) struct TestBlockIterator<'a> { + block: BlockV2, + protocol_version: ProtocolVersion, + rng: &'a mut TestRng, + switch_block_indices: Option>, + upgrades_indices: Option>, + validators: Vec<(PublicKey, U512)>, + next_validator_index: usize, + } + + impl<'a> TestBlockIterator<'a> { + pub fn new( + block: BlockV2, + rng: &'a mut TestRng, + switch_block_indices: Option>, + upgrades_indices: Option>, + validators: Vec<(PublicKey, U512)>, + ) -> Self { + let protocol_version = block.protocol_version(); + Self { + block, + protocol_version, + rng, + switch_block_indices, + upgrades_indices, + validators, + next_validator_index: 0, + } + } + } + + impl Iterator for TestBlockIterator<'_> { + type Item = BlockV2; + + fn next(&mut self) -> Option { + let (is_successor_of_switch_block, is_upgrade, maybe_validators) = match &self + .switch_block_indices + { + Some(switch_block_heights) + if switch_block_heights.contains(&self.block.height()) => + { + let prev_height = self.block.height().saturating_sub(1); + let is_successor_of_switch_block = switch_block_heights.contains(&prev_height); + let is_upgrade = is_successor_of_switch_block + && self + .upgrades_indices + .as_ref() + .is_some_and(|upgrades_indices| { + upgrades_indices.contains(&prev_height) + }); + ( + is_successor_of_switch_block, + is_upgrade, + Some(self.validators.clone()), + ) + } + Some(switch_block_heights) => { + let prev_height = self.block.height().saturating_sub(1); + let is_successor_of_switch_block = switch_block_heights.contains(&prev_height); + let is_upgrade = is_successor_of_switch_block + && self + .upgrades_indices + .as_ref() + .is_some_and(|upgrades_indices| { + upgrades_indices.contains(&prev_height) + }); + (is_successor_of_switch_block, is_upgrade, None) + } + None => (false, false, None), + }; + + let maybe_validators = if let Some(validators) = maybe_validators { + let first_validator = validators.get(self.next_validator_index).unwrap(); + let second_validator = validators.get(self.next_validator_index + 1).unwrap(); + + // Put two validators in each switch block. + let mut validators_for_block = BTreeMap::new(); + validators_for_block.insert(first_validator.0.clone(), first_validator.1); + validators_for_block.insert(second_validator.0.clone(), second_validator.1); + self.next_validator_index += 2; + + // If we're out of validators, do round robin on the provided list. + if self.next_validator_index >= self.validators.len() { + self.next_validator_index = 0; + } + Some(validators_for_block) + } else { + None + }; + + if is_upgrade { + self.protocol_version = ProtocolVersion::from_parts( + self.protocol_version.value().major, + self.protocol_version.value().minor + 1, + self.protocol_version.value().patch, + ); + } + + let gas_price: u8 = 1u8; + + let era_end = maybe_validators.map(|validators| { + let rnd = EraEndV2::random(self.rng); + EraEndV2::new( + Vec::from(rnd.equivocators()), + Vec::from(rnd.inactive_validators()), + validators, + rnd.rewards().clone(), + gas_price, + ) + }); + let next_block_era_id = if is_successor_of_switch_block { + self.block.era_id().successor() + } else { + self.block.era_id() + }; + let count = self.rng.gen_range(0..6); + let mint_hashes = + iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng))) + .take(count) + .collect(); + let count = self.rng.gen_range(0..6); + let auction_hashes = + iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng))) + .take(count) + .collect(); + let count = self.rng.gen_range(0..6); + let install_upgrade_hashes = + iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng))) + .take(count) + .collect(); + let count = self.rng.gen_range(0..6); + let standard_hashes = + iter::repeat_with(|| TransactionHash::V1(TransactionV1Hash::random(self.rng))) + .take(count) + .collect(); + + let transactions = { + let mut ret = BTreeMap::new(); + ret.insert(MINT_LANE_ID, mint_hashes); + ret.insert(AUCTION_LANE_ID, auction_hashes); + ret.insert(INSTALL_UPGRADE_LANE_ID, install_upgrade_hashes); + ret.insert(3, standard_hashes); + ret + }; + + let next = BlockV2::new( + *self.block.hash(), + *self.block.accumulated_seed(), + *self.block.state_root_hash(), + self.rng.gen(), + era_end, + Timestamp::now(), + next_block_era_id, + self.block.height() + 1, + self.protocol_version, + PublicKey::random(self.rng), + transactions, + Default::default(), + gas_price, + Default::default(), + ); + + self.block = next.clone(); + Some(next) + } + } + + #[test] + fn should_create_valid_chain() { + let mut rng = TestRng::new(); + let mut test_block = TestChainSpec::new(&mut rng, None, None, &[]); + let mut block_batch = test_block.iter().take(100); + let mut parent_block: BlockV2 = block_batch.next().unwrap(); + for current_block in block_batch { + assert_eq!( + current_block.height(), + parent_block.height() + 1, + "height should grow monotonically" + ); + assert_eq!( + current_block.parent_hash(), + parent_block.hash(), + "block's parent should point at previous block" + ); + parent_block = current_block; + } + } + + #[test] + fn should_create_switch_blocks() { + let switch_block_indices = vec![0, 10, 76]; + + let validators: Vec<_> = iter::repeat_with(crypto::generate_ed25519_keypair) + .take(2) + .map(|(secret_key, public_key)| ValidatorSpec { + secret_key, + public_key, + weight: None, + }) + .collect(); + + let mut rng = TestRng::new(); + let mut test_block = TestChainSpec::new( + &mut rng, + Some(switch_block_indices.clone()), + None, + &validators, + ); + let block_batch: Vec<_> = test_block.iter().take(100).collect(); + + let base_height = block_batch.first().expect("should have block").height(); + + for block in block_batch { + if switch_block_indices + .iter() + .map(|index| index + base_height) + .any(|index| index == block.height()) + { + assert!(block.is_switch_block()) + } else { + assert!(!block.is_switch_block()) + } + } + } +} diff --git a/node/src/types/sync_leap_validation_metadata.rs b/node/src/types/sync_leap_validation_metadata.rs new file mode 100644 index 0000000000..4dc8a97c1f --- /dev/null +++ b/node/src/types/sync_leap_validation_metadata.rs @@ -0,0 +1,39 @@ +use casper_types::{ActivationPoint, Chainspec, GlobalStateUpdate}; +use datasize::DataSize; +use num_rational::Ratio; +use serde::Serialize; + +#[derive(Clone, DataSize, Debug, Eq, PartialEq, Serialize)] +pub(crate) struct SyncLeapValidationMetaData { + pub(crate) recent_era_count: u64, + pub(crate) activation_point: ActivationPoint, + pub(crate) global_state_update: Option, + #[data_size(skip)] + pub(crate) finality_threshold_fraction: Ratio, +} + +impl SyncLeapValidationMetaData { + #[cfg(test)] + pub fn new( + recent_era_count: u64, + activation_point: ActivationPoint, + global_state_update: Option, + finality_threshold_fraction: Ratio, + ) -> Self { + Self { + recent_era_count, + activation_point, + global_state_update, + finality_threshold_fraction, + } + } + + pub(crate) fn from_chainspec(chainspec: &Chainspec) -> Self { + Self { + recent_era_count: chainspec.core_config.recent_era_count(), + activation_point: chainspec.protocol_config.activation_point, + global_state_update: chainspec.protocol_config.global_state_update.clone(), + finality_threshold_fraction: chainspec.core_config.finality_threshold_fraction, + } + } +} diff --git a/node/src/types/timestamp.rs b/node/src/types/timestamp.rs deleted file mode 100644 index 33969ce7c5..0000000000 --- a/node/src/types/timestamp.rs +++ /dev/null @@ -1,365 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use std::{ - fmt::{self, Display, Formatter}, - ops::{Add, AddAssign, Div, Mul, Rem, Sub}, - str::FromStr, - time::{Duration, SystemTime}, -}; - -use datasize::DataSize; -use derive_more::{Add, AddAssign, From, Shl, Shr, Sub, SubAssign}; -use humantime::{DurationError, TimestampError}; -use once_cell::sync::Lazy; -#[cfg(test)] -use rand::Rng; -use schemars::JsonSchema; -use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; - -use casper_types::bytesrepr::{self, FromBytes, ToBytes}; - -use crate::rpcs::docs::DocExample; - -#[cfg(test)] -use crate::testing::TestRng; - -static TIMESTAMP_EXAMPLE: Lazy = Lazy::new(|| { - let example_str: &str = "2020-11-17T00:39:24.072Z"; - Timestamp::from_str(example_str).unwrap() -}); - -/// A timestamp type, representing a concrete moment in time. -#[derive( - DataSize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Shr, Shl, JsonSchema, -)] -#[serde(deny_unknown_fields)] -#[schemars(with = "String", description = "Timestamp formatted as per RFC 3339")] -pub struct Timestamp(u64); - -impl Timestamp { - /// Returns the timestamp of the current moment. - pub fn now() -> Self { - let millis = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() as u64; - Timestamp(millis) - } - - /// Returns the time that has elapsed since this timestamp. - pub fn elapsed(&self) -> TimeDiff { - TimeDiff(Timestamp::now().0.saturating_sub(self.0)) - } - - /// Returns a zero timestamp. - pub fn zero() -> Self { - Timestamp(0) - } - - /// Returns the timestamp as the number of milliseconds since the Unix epoch - pub fn millis(&self) -> u64 { - self.0 - } - - /// Returns the difference between `self` and `other`, or `0` if `self` is earlier than `other`. - pub fn saturating_diff(self, other: Timestamp) -> TimeDiff { - TimeDiff(self.0.saturating_sub(other.0)) - } - - /// Returns the difference between `self` and `other`, or `0` if that would be before the epoch. - pub fn saturating_sub(self, other: TimeDiff) -> Timestamp { - Timestamp(self.0.saturating_sub(other.0)) - } - - /// Returns the number of trailing zeros in the number of milliseconds since the epoch. - pub fn trailing_zeros(&self) -> u8 { - self.0.trailing_zeros() as u8 - } - - /// Generates a random instance using a `TestRng`. - #[cfg(test)] - pub fn random(rng: &mut TestRng) -> Self { - Timestamp(1_596_763_000_000 + rng.gen_range(200_000..1_000_000)) - } -} - -impl Display for Timestamp { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let system_time = SystemTime::UNIX_EPOCH - .checked_add(Duration::from_millis(self.0)) - .expect("should be within system time limits"); - write!(f, "{}", humantime::format_rfc3339_millis(system_time)) - } -} - -impl DocExample for Timestamp { - fn doc_example() -> &'static Self { - &*TIMESTAMP_EXAMPLE - } -} - -impl FromStr for Timestamp { - type Err = TimestampError; - - fn from_str(value: &str) -> Result { - let system_time = humantime::parse_rfc3339_weak(value)?; - let inner = system_time - .duration_since(SystemTime::UNIX_EPOCH) - .map_err(|_| TimestampError::OutOfRange)? - .as_millis() as u64; - Ok(Timestamp(inner)) - } -} - -impl Add for Timestamp { - type Output = Timestamp; - - fn add(self, diff: TimeDiff) -> Timestamp { - Timestamp(self.0 + diff.0) - } -} - -impl AddAssign for Timestamp { - fn add_assign(&mut self, rhs: TimeDiff) { - self.0 += rhs.0; - } -} - -impl Sub for Timestamp { - type Output = Timestamp; - - fn sub(self, diff: TimeDiff) -> Timestamp { - Timestamp(self.0 - diff.0) - } -} - -impl Div for Timestamp { - type Output = u64; - - fn div(self, rhs: TimeDiff) -> u64 { - self.0 / rhs.0 - } -} - -impl Rem for Timestamp { - type Output = TimeDiff; - - fn rem(self, diff: TimeDiff) -> TimeDiff { - TimeDiff(self.0 % diff.0) - } -} - -impl Serialize for Timestamp { - fn serialize(&self, serializer: S) -> Result { - if serializer.is_human_readable() { - self.to_string().serialize(serializer) - } else { - self.0.serialize(serializer) - } - } -} - -impl<'de> Deserialize<'de> for Timestamp { - fn deserialize>(deserializer: D) -> Result { - if deserializer.is_human_readable() { - let value_as_string = String::deserialize(deserializer)?; - Timestamp::from_str(&value_as_string).map_err(SerdeError::custom) - } else { - let inner = u64::deserialize(deserializer)?; - Ok(Timestamp(inner)) - } - } -} - -impl ToBytes for Timestamp { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for Timestamp { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - u64::from_bytes(bytes).map(|(inner, remainder)| (Timestamp(inner), remainder)) - } -} - -impl From for Timestamp { - fn from(milliseconds_since_epoch: u64) -> Timestamp { - Timestamp(milliseconds_since_epoch) - } -} - -/// A time difference between two timestamps. -#[derive( - Debug, - Default, - Clone, - Copy, - DataSize, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Add, - AddAssign, - Sub, - SubAssign, - From, - JsonSchema, -)] -#[serde(deny_unknown_fields)] -#[schemars(with = "String", description = "Human-readable duration.")] -pub struct TimeDiff(u64); - -impl Display for TimeDiff { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!(f, "{}", humantime::format_duration(Duration::from(*self))) - } -} - -impl FromStr for TimeDiff { - type Err = DurationError; - - fn from_str(value: &str) -> Result { - let inner = humantime::parse_duration(value)?.as_millis() as u64; - Ok(TimeDiff(inner)) - } -} - -impl TimeDiff { - /// Returns the time difference as the number of milliseconds since the Unix epoch - pub fn millis(&self) -> u64 { - self.0 - } - - /// Creates a new time difference from seconds. - pub const fn from_seconds(seconds: u32) -> Self { - TimeDiff(seconds as u64 * 1_000) - } - - /// Returns the product, or `TimeDiff(u64::MAX)` if it would overflow. - pub fn saturating_mul(self, rhs: u64) -> Self { - TimeDiff(self.0.saturating_mul(rhs)) - } -} - -impl Mul for TimeDiff { - type Output = TimeDiff; - - fn mul(self, rhs: u64) -> TimeDiff { - TimeDiff(self.0 * rhs) - } -} - -impl Div for TimeDiff { - type Output = TimeDiff; - - fn div(self, rhs: u64) -> TimeDiff { - TimeDiff(self.0 / rhs) - } -} - -impl Div for TimeDiff { - type Output = u64; - - fn div(self, rhs: TimeDiff) -> u64 { - self.0 / rhs.0 - } -} - -impl From for Duration { - fn from(diff: TimeDiff) -> Duration { - Duration::from_millis(diff.0) - } -} - -impl Serialize for TimeDiff { - fn serialize(&self, serializer: S) -> Result { - if serializer.is_human_readable() { - self.to_string().serialize(serializer) - } else { - self.0.serialize(serializer) - } - } -} - -impl<'de> Deserialize<'de> for TimeDiff { - fn deserialize>(deserializer: D) -> Result { - if deserializer.is_human_readable() { - let value_as_string = String::deserialize(deserializer)?; - TimeDiff::from_str(&value_as_string).map_err(SerdeError::custom) - } else { - let inner = u64::deserialize(deserializer)?; - Ok(TimeDiff(inner)) - } - } -} - -impl ToBytes for TimeDiff { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for TimeDiff { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - u64::from_bytes(bytes).map(|(inner, remainder)| (TimeDiff(inner), remainder)) - } -} - -impl From for TimeDiff { - fn from(duration: Duration) -> TimeDiff { - TimeDiff(duration.as_millis() as u64) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn timestamp_serialization_roundtrip() { - let timestamp = Timestamp::now(); - - let timestamp_as_string = timestamp.to_string(); - assert_eq!( - timestamp, - Timestamp::from_str(×tamp_as_string).unwrap() - ); - - let serialized_json = serde_json::to_string(×tamp).unwrap(); - assert_eq!(timestamp, serde_json::from_str(&serialized_json).unwrap()); - - let serialized_bincode = bincode::serialize(×tamp).unwrap(); - assert_eq!( - timestamp, - bincode::deserialize(&serialized_bincode).unwrap() - ); - - bytesrepr::test_serialization_roundtrip(×tamp); - } - - #[test] - fn timediff_serialization_roundtrip() { - let mut rng = crate::new_rng(); - let timediff = TimeDiff(rng.gen()); - - let timediff_as_string = timediff.to_string(); - assert_eq!(timediff, TimeDiff::from_str(&timediff_as_string).unwrap()); - - let serialized_json = serde_json::to_string(&timediff).unwrap(); - assert_eq!(timediff, serde_json::from_str(&serialized_json).unwrap()); - - let serialized_bincode = bincode::serialize(&timediff).unwrap(); - assert_eq!(timediff, bincode::deserialize(&serialized_bincode).unwrap()); - - bytesrepr::test_serialization_roundtrip(&timediff); - } -} diff --git a/node/src/types/transaction.rs b/node/src/types/transaction.rs new file mode 100644 index 0000000000..803e6a1677 --- /dev/null +++ b/node/src/types/transaction.rs @@ -0,0 +1,12 @@ +pub(crate) mod arg_handling; +mod deploy; +mod meta_transaction; +mod transaction_footprint; +pub(crate) use deploy::LegacyDeploy; +#[cfg(test)] +pub(crate) use meta_transaction::calculate_transaction_lane_for_transaction; +pub(crate) use meta_transaction::{MetaTransaction, TransactionHeader}; +pub(crate) use transaction_footprint::TransactionFootprint; +pub(crate) mod fields_container; +pub(crate) mod initiator_addr_and_secret_key; +pub(crate) mod transaction_v1_builder; diff --git a/node/src/types/transaction/arg_handling.rs b/node/src/types/transaction/arg_handling.rs new file mode 100644 index 0000000000..6b347c243b --- /dev/null +++ b/node/src/types/transaction/arg_handling.rs @@ -0,0 +1,1459 @@ +//! Collection of helper functions and structures to reason about amorphic RuntimeArgs. +use core::marker::PhantomData; + +use casper_types::{ + account::AccountHash, + bytesrepr::FromBytes, + system::auction::{DelegatorKind, Reservation, ARG_VALIDATOR}, + CLType, CLTyped, CLValue, CLValueError, Chainspec, InvalidTransactionV1, PublicKey, + RuntimeArgs, TransactionArgs, URef, U512, +}; +#[cfg(test)] +use casper_types::{bytesrepr::ToBytes, TransferTarget}; +use tracing::debug; + +const TRANSFER_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const TRANSFER_ARG_SOURCE: OptionalArg = OptionalArg::new("source"); +const TRANSFER_ARG_TARGET: &str = "target"; +// "id" for legacy reasons, if the argument is passed it is [Option] +const TRANSFER_ARG_ID: OptionalArg> = OptionalArg::new("id"); + +const BURN_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const BURN_ARG_SOURCE: OptionalArg = OptionalArg::new("source"); + +const ADD_BID_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const ADD_BID_ARG_DELEGATION_RATE: RequiredArg = RequiredArg::new("delegation_rate"); +const ADD_BID_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT: OptionalArg = + OptionalArg::new("minimum_delegation_amount"); +const ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT: OptionalArg = + OptionalArg::new("maximum_delegation_amount"); +const ADD_BID_ARG_RESERVED_SLOTS: OptionalArg = OptionalArg::new("reserved_slots"); + +const WITHDRAW_BID_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const WITHDRAW_BID_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const DELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const DELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const DELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const UNDELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const UNDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const UNDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const REDELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const REDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const REDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const REDELEGATE_ARG_NEW_VALIDATOR: RequiredArg = RequiredArg::new("new_validator"); + +const ACTIVATE_BID_ARG_VALIDATOR: RequiredArg = RequiredArg::new(ARG_VALIDATOR); + +const CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY: RequiredArg = + RequiredArg::new("new_public_key"); + +const ADD_RESERVATIONS_ARG_RESERVATIONS: RequiredArg> = + RequiredArg::new("reservations"); + +const CANCEL_RESERVATIONS_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const CANCEL_RESERVATIONS_ARG_DELEGATORS: RequiredArg> = + RequiredArg::new("delegators"); + +struct RequiredArg { + name: &'static str, + _phantom: PhantomData, +} + +impl RequiredArg { + const fn new(name: &'static str) -> Self { + Self { + name, + _phantom: PhantomData, + } + } + + fn get(&self, args: &RuntimeArgs) -> Result + where + T: CLTyped + FromBytes, + { + let cl_value = args.get(self.name).ok_or_else(|| { + debug!("missing required runtime argument '{}'", self.name); + InvalidTransactionV1::MissingArg { + arg_name: self.name.to_string(), + } + })?; + parse_cl_value(cl_value, self.name) + } + + #[cfg(test)] + fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError> + where + T: CLTyped + ToBytes, + { + args.insert(self.name, value) + } +} + +struct OptionalArg { + name: &'static str, + _phantom: PhantomData, +} + +impl OptionalArg { + const fn new(name: &'static str) -> Self { + Self { + name, + _phantom: PhantomData, + } + } + + fn get(&self, args: &RuntimeArgs) -> Result, InvalidTransactionV1> + where + T: CLTyped + FromBytes, + { + let cl_value = match args.get(self.name) { + Some(value) => value, + None => return Ok(None), + }; + let value = parse_cl_value::(cl_value, self.name)?; + Ok(Some(value)) + } + + #[cfg(test)] + fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError> + where + T: CLTyped + ToBytes, + { + args.insert(self.name, value) + } +} + +fn parse_cl_value( + cl_value: &CLValue, + arg_name: &str, +) -> Result { + cl_value.to_t::().map_err(|error| { + let error = match error { + CLValueError::Serialization(error) => InvalidTransactionV1::InvalidArg { + arg_name: arg_name.to_string(), + error, + }, + CLValueError::Type(_) => InvalidTransactionV1::unexpected_arg_type( + arg_name.to_string(), + vec![T::cl_type()], + cl_value.cl_type().clone(), + ), + }; + debug!("{error}"); + error + }) +} + +/// Creates a `RuntimeArgs` suitable for use in a transfer transaction. +#[cfg(test)] +pub fn new_transfer_args, T: Into>( + amount: A, + maybe_source: Option, + target: T, + maybe_id: Option, +) -> Result { + let mut args = RuntimeArgs::new(); + if let Some(source) = maybe_source { + TRANSFER_ARG_SOURCE.insert(&mut args, source)?; + } + match target.into() { + TransferTarget::PublicKey(public_key) => args.insert(TRANSFER_ARG_TARGET, public_key)?, + TransferTarget::AccountHash(account_hash) => { + args.insert(TRANSFER_ARG_TARGET, account_hash)? + } + TransferTarget::URef(uref) => args.insert(TRANSFER_ARG_TARGET, uref)?, + } + TRANSFER_ARG_AMOUNT.insert(&mut args, amount.into())?; + if maybe_id.is_some() { + TRANSFER_ARG_ID.insert(&mut args, maybe_id)?; + } + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a transfer transaction. +pub fn has_valid_transfer_args( + args: &TransactionArgs, + native_transfer_minimum_motes: u64, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + + let amount = TRANSFER_ARG_AMOUNT.get(args)?; + if amount < U512::from(native_transfer_minimum_motes) { + debug!( + minimum = %native_transfer_minimum_motes, + %amount, + "insufficient transfer amount" + ); + return Err(InvalidTransactionV1::InsufficientTransferAmount { + minimum: native_transfer_minimum_motes, + attempted: amount, + }); + } + let _source = TRANSFER_ARG_SOURCE.get(args)?; + + let target_cl_value = args.get(TRANSFER_ARG_TARGET).ok_or_else(|| { + debug!("missing required runtime argument '{TRANSFER_ARG_TARGET}'"); + InvalidTransactionV1::MissingArg { + arg_name: TRANSFER_ARG_TARGET.to_string(), + } + })?; + match target_cl_value.cl_type() { + CLType::PublicKey => { + let _ = parse_cl_value::(target_cl_value, TRANSFER_ARG_TARGET); + } + CLType::ByteArray(32) => { + let _ = parse_cl_value::(target_cl_value, TRANSFER_ARG_TARGET); + } + CLType::URef => { + let _ = parse_cl_value::(target_cl_value, TRANSFER_ARG_TARGET); + } + _ => { + debug!( + "expected runtime argument '{TRANSFER_ARG_TARGET}' to be of type {}, {} or {}, + but is {}", + CLType::PublicKey, + CLType::ByteArray(32), + CLType::URef, + target_cl_value.cl_type() + ); + return Err(InvalidTransactionV1::unexpected_arg_type( + TRANSFER_ARG_TARGET.to_string(), + vec![CLType::PublicKey, CLType::ByteArray(32), CLType::URef], + target_cl_value.cl_type().clone(), + )); + } + } + + let _maybe_id = TRANSFER_ARG_ID.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a burn transaction. +#[cfg(test)] +pub fn new_burn_args>( + amount: A, + maybe_source: Option, +) -> Result { + let mut args = RuntimeArgs::new(); + if let Some(source) = maybe_source { + BURN_ARG_SOURCE.insert(&mut args, source)?; + } + BURN_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a burn transaction. +pub fn has_valid_burn_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> { + let native_burn_minimum_motes = 1; + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + + let amount = BURN_ARG_AMOUNT.get(args)?; + if amount < U512::from(native_burn_minimum_motes) { + debug!( + minimum = %native_burn_minimum_motes, + %amount, + "insufficient burn amount" + ); + return Err(InvalidTransactionV1::InsufficientBurnAmount { + minimum: native_burn_minimum_motes, + attempted: amount, + }); + } + let _source = BURN_ARG_SOURCE.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in an add_bid transaction. +#[cfg(test)] +pub fn new_add_bid_args>( + public_key: PublicKey, + delegation_rate: u8, + amount: A, + maybe_minimum_delegation_amount: Option, + maybe_maximum_delegation_amount: Option, + maybe_reserved_slots: Option, +) -> Result { + let mut args = RuntimeArgs::new(); + ADD_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + ADD_BID_ARG_DELEGATION_RATE.insert(&mut args, delegation_rate)?; + ADD_BID_ARG_AMOUNT.insert(&mut args, amount.into())?; + if let Some(minimum_delegation_amount) = maybe_minimum_delegation_amount { + ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT.insert(&mut args, minimum_delegation_amount)?; + }; + if let Some(maximum_delegation_amount) = maybe_maximum_delegation_amount { + ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT.insert(&mut args, maximum_delegation_amount)?; + }; + if let Some(reserved_slots) = maybe_reserved_slots { + ADD_BID_ARG_RESERVED_SLOTS.insert(&mut args, reserved_slots)?; + }; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in an add_bid transaction. +pub fn has_valid_add_bid_args( + chainspec: &Chainspec, + args: &TransactionArgs, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _public_key = ADD_BID_ARG_PUBLIC_KEY.get(args)?; + let _delegation_rate = ADD_BID_ARG_DELEGATION_RATE.get(args)?; + let amount = ADD_BID_ARG_AMOUNT.get(args)?; + if amount.is_zero() { + return Err(InvalidTransactionV1::InsufficientAmount { attempted: amount }); + } + let minimum_delegation_amount = ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT.get(args)?; + if let Some(attempted) = minimum_delegation_amount { + let floor = chainspec.core_config.minimum_delegation_amount; + if attempted < floor { + return Err(InvalidTransactionV1::InvalidMinimumDelegationAmount { floor, attempted }); + } + } + let maximum_delegation_amount = ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT.get(args)?; + if let Some(attempted) = maximum_delegation_amount { + let ceiling = chainspec.core_config.maximum_delegation_amount; + if attempted > ceiling { + return Err(InvalidTransactionV1::InvalidMaximumDelegationAmount { + ceiling, + attempted, + }); + } + } + let reserved_slots = ADD_BID_ARG_RESERVED_SLOTS.get(args)?; + if let Some(attempted) = reserved_slots { + let ceiling = chainspec.core_config.max_delegators_per_validator; + if attempted > ceiling { + return Err(InvalidTransactionV1::InvalidReservedSlots { + ceiling, + attempted: attempted as u64, + }); + } + } + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a withdraw_bid transaction. +#[cfg(test)] +pub fn new_withdraw_bid_args>( + public_key: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + WITHDRAW_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + WITHDRAW_BID_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a withdraw_bid transaction. +pub fn has_valid_withdraw_bid_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _public_key = WITHDRAW_BID_ARG_PUBLIC_KEY.get(args)?; + let _amount = WITHDRAW_BID_ARG_AMOUNT.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a delegate transaction. +#[cfg(test)] +pub fn new_delegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + DELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + DELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + DELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a delegate transaction. +pub fn has_valid_delegate_args( + chainspec: &Chainspec, + args: &TransactionArgs, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _delegator = DELEGATE_ARG_DELEGATOR.get(args)?; + let _validator = DELEGATE_ARG_VALIDATOR.get(args)?; + let amount = DELEGATE_ARG_AMOUNT.get(args)?; + // We don't check for minimum since this could be a second delegation + let maximum_delegation_amount = chainspec.core_config.maximum_delegation_amount; + if amount > maximum_delegation_amount.into() { + return Err(InvalidTransactionV1::InvalidDelegationAmount { + ceiling: maximum_delegation_amount, + attempted: amount, + }); + } + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in an undelegate transaction. +#[cfg(test)] +pub fn new_undelegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + UNDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + UNDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + UNDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in an undelegate transaction. +pub fn has_valid_undelegate_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _delegator = UNDELEGATE_ARG_DELEGATOR.get(args)?; + let _validator = UNDELEGATE_ARG_VALIDATOR.get(args)?; + let _amount = UNDELEGATE_ARG_AMOUNT.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a redelegate transaction. +#[cfg(test)] +pub fn new_redelegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + new_validator: PublicKey, +) -> Result { + let mut args = RuntimeArgs::new(); + REDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + REDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + REDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + REDELEGATE_ARG_NEW_VALIDATOR.insert(&mut args, new_validator)?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a redelegate transaction. +pub fn has_valid_redelegate_args( + chainspec: &Chainspec, + args: &TransactionArgs, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _delegator = REDELEGATE_ARG_DELEGATOR.get(args)?; + let _validator = REDELEGATE_ARG_VALIDATOR.get(args)?; + let _new_validator = REDELEGATE_ARG_NEW_VALIDATOR.get(args)?; + let amount = REDELEGATE_ARG_AMOUNT.get(args)?; + // We don't check for minimum since this could be a second delegation + let maximum_delegation_amount = chainspec.core_config.maximum_delegation_amount; + if amount > maximum_delegation_amount.into() { + return Err(InvalidTransactionV1::InvalidDelegationAmount { + attempted: amount, + ceiling: maximum_delegation_amount, + }); + } + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a delegate transaction. +#[cfg(test)] +pub fn new_activate_bid_args(validator: PublicKey) -> Result { + let mut args = RuntimeArgs::new(); + ACTIVATE_BID_ARG_VALIDATOR.insert(&mut args, validator)?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in an activate bid transaction. +pub fn has_valid_activate_bid_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _validator = ACTIVATE_BID_ARG_VALIDATOR.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a change bid public key transaction. +#[cfg(test)] +pub fn new_change_bid_public_key_args( + public_key: PublicKey, + new_public_key: PublicKey, +) -> Result { + let mut args = RuntimeArgs::new(); + CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.insert(&mut args, new_public_key)?; + Ok(args) +} + +/// Checks the given `RuntimeArgs` are suitable for use in a change bid public key transaction. +pub fn has_valid_change_bid_public_key_args( + args: &TransactionArgs, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _public_key = CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.get(args)?; + let _new_public_key = CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.get(args)?; + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in an add reservations transaction. +#[cfg(test)] +pub fn new_add_reservations_args( + reservations: Vec, +) -> Result { + let mut args = RuntimeArgs::new(); + ADD_RESERVATIONS_ARG_RESERVATIONS.insert(&mut args, reservations)?; + Ok(args) +} + +/// Checks the given `TransactionArgs` are suitable for use in an add reservations transaction. +pub fn has_valid_add_reservations_args( + chainspec: &Chainspec, + args: &TransactionArgs, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let reservations = ADD_RESERVATIONS_ARG_RESERVATIONS.get(args)?; + let ceiling = chainspec.core_config.max_delegators_per_validator; + let attempted: u32 = reservations.len().try_into().map_err(|_| { + //This will only happen if reservations.len is bigger than u32 + InvalidTransactionV1::InvalidReservedSlots { + ceiling, + attempted: reservations.len() as u64, + } + })?; + if attempted > ceiling { + return Err(InvalidTransactionV1::InvalidReservedSlots { + ceiling, + attempted: attempted as u64, + }); + } + Ok(()) +} + +/// Creates a `RuntimeArgs` suitable for use in a cancel reservations transaction. +#[cfg(test)] +pub fn new_cancel_reservations_args( + validator: PublicKey, + delegators: Vec, +) -> Result { + let mut args = RuntimeArgs::new(); + CANCEL_RESERVATIONS_ARG_VALIDATOR.insert(&mut args, validator)?; + CANCEL_RESERVATIONS_ARG_DELEGATORS.insert(&mut args, delegators)?; + Ok(args) +} + +/// Checks the given `TransactionArgs` are suitable for use in an add reservations transaction. +pub fn has_valid_cancel_reservations_args( + args: &TransactionArgs, +) -> Result<(), InvalidTransactionV1> { + let args = args + .as_named() + .ok_or(InvalidTransactionV1::ExpectedNamedArguments)?; + let _validator = CANCEL_RESERVATIONS_ARG_VALIDATOR.get(args)?; + let _delegators = CANCEL_RESERVATIONS_ARG_DELEGATORS.get(args)?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use core::ops::Range; + + use super::*; + use casper_execution_engine::engine_state::engine_config::{ + DEFAULT_MAXIMUM_DELEGATION_AMOUNT, DEFAULT_MINIMUM_DELEGATION_AMOUNT, + }; + use casper_types::{runtime_args, testing::TestRng, CLType, TransactionArgs}; + use rand::Rng; + + #[test] + fn should_validate_transfer_args() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + // Check random args, PublicKey target, within motes limit. + let args = new_transfer_args( + U512::from(rng.gen_range(min_motes..=u64::MAX)), + rng.gen::().then(|| rng.gen()), + PublicKey::random(rng), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap(); + + // Check random args, AccountHash target, within motes limit. + let args = new_transfer_args( + U512::from(rng.gen_range(min_motes..=u64::MAX)), + rng.gen::().then(|| rng.gen()), + rng.gen::(), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap(); + + // Check random args, URef target, within motes limit. + let args = new_transfer_args( + U512::from(rng.gen_range(min_motes..=u64::MAX)), + rng.gen::().then(|| rng.gen()), + rng.gen::(), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap(); + + // Check at minimum motes limit. + let args = new_transfer_args( + U512::from(min_motes), + rng.gen::().then(|| rng.gen()), + PublicKey::random(rng), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap(); + + // Check with extra arg. + let mut args = new_transfer_args( + U512::from(min_motes), + rng.gen::().then(|| rng.gen()), + PublicKey::random(rng), + rng.gen::().then(|| rng.gen()), + ) + .unwrap(); + args.insert("a", 1).unwrap(); + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes).unwrap(); + } + + #[test] + fn transfer_args_with_low_amount_should_be_invalid() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + + let args = runtime_args! { + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes - 1), + TRANSFER_ARG_TARGET => PublicKey::random(rng) + }; + + let expected_error = InvalidTransactionV1::InsufficientTransferAmount { + minimum: min_motes, + attempted: U512::from(min_motes - 1), + }; + + assert_eq!( + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes), + Err(expected_error) + ); + } + + #[test] + fn transfer_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + + // Missing "target". + let args = runtime_args! { + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: TRANSFER_ARG_TARGET.to_string(), + }; + assert_eq!( + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + TRANSFER_ARG_TARGET => PublicKey::random(rng) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: TRANSFER_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes), + Err(expected_error) + ); + } + + #[test] + fn transfer_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + let min_motes = 10_u64; + + // Wrong "target" type (a required arg). + let args = runtime_args! { + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes), + TRANSFER_ARG_TARGET => "wrong" + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + TRANSFER_ARG_TARGET.to_string(), + vec![CLType::PublicKey, CLType::ByteArray(32), CLType::URef], + CLType::String, + ); + assert_eq!( + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes), + Err(expected_error) + ); + + // Wrong "source" type (an optional arg). + let args = runtime_args! { + TRANSFER_ARG_AMOUNT.name => U512::from(min_motes), + TRANSFER_ARG_SOURCE.name => 1_u8, + TRANSFER_ARG_TARGET => PublicKey::random(rng) + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + TRANSFER_ARG_SOURCE.name.to_string(), + vec![URef::cl_type()], + CLType::U8, + ); + assert_eq!( + has_valid_transfer_args(&TransactionArgs::Named(args), min_motes), + Err(expected_error) + ); + } + #[cfg(test)] + fn check_add_bid_args(args: &TransactionArgs) -> Result<(), InvalidTransactionV1> { + has_valid_add_bid_args(&Chainspec::default(), args) + } + + #[test] + fn should_validate_add_bid_args() { + let rng = &mut TestRng::new(); + let floor = DEFAULT_MINIMUM_DELEGATION_AMOUNT; + let ceiling = DEFAULT_MAXIMUM_DELEGATION_AMOUNT; + let reserved_max = 1200; // there doesn't seem to be a const for this? + let minimum_delegation_amount = rng.gen::().then(|| rng.gen_range(floor..floor * 2)); + let maximum_delegation_amount = rng.gen::().then(|| rng.gen_range(floor..ceiling)); + let reserved_slots = rng.gen::().then(|| rng.gen_range(0..reserved_max)); + + // Check random args. + let mut args = new_add_bid_args( + PublicKey::random(rng), + rng.gen(), + rng.gen::(), + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ) + .unwrap(); + check_add_bid_args(&TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + check_add_bid_args(&TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn add_bid_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "public_key". + let args = runtime_args! { + ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::(), + ADD_BID_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: ADD_BID_ARG_PUBLIC_KEY.name.to_string(), + }; + assert_eq!( + check_add_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "delegation_rate". + let args = runtime_args! { + ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + ADD_BID_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: ADD_BID_ARG_DELEGATION_RATE.name.to_string(), + }; + assert_eq!( + check_add_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::() + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: ADD_BID_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + check_add_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn add_bid_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + ADD_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + ADD_BID_ARG_DELEGATION_RATE.name => rng.gen::(), + ADD_BID_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + ADD_BID_ARG_AMOUNT.name.to_string(), + vec![CLType::U512], + CLType::U64, + ); + assert_eq!( + check_add_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_withdraw_bid_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_withdraw_bid_args(PublicKey::random(rng), rng.gen::()).unwrap(); + has_valid_withdraw_bid_args(&TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_withdraw_bid_args(&TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn withdraw_bid_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "public_key". + let args = runtime_args! { + WITHDRAW_BID_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: WITHDRAW_BID_ARG_PUBLIC_KEY.name.to_string(), + }; + assert_eq!( + has_valid_withdraw_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + WITHDRAW_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: WITHDRAW_BID_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + has_valid_withdraw_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn withdraw_bid_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + WITHDRAW_BID_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + WITHDRAW_BID_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + WITHDRAW_BID_ARG_AMOUNT.name.to_string(), + vec![CLType::U512], + CLType::U64, + ); + assert_eq!( + has_valid_withdraw_bid_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_delegate_args() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_delegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + rng.gen_range(0_u64..1_000_000_000_000_000_000_u64), + ) + .unwrap(); + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn delegate_args_with_too_big_amount_should_fail() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Check random args. + let args = new_delegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + 1_000_000_000_000_000_001_u64, + ) + .unwrap(); + let expected_error = InvalidTransactionV1::InvalidDelegationAmount { + ceiling: 1_000_000_000_000_000_000_u64, + attempted: 1_000_000_000_000_000_001_u64.into(), + }; + assert_eq!( + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn delegate_args_with_missing_required_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Missing "delegator". + let args = runtime_args! { + DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + DELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: DELEGATE_ARG_DELEGATOR.name.to_string(), + }; + assert_eq!( + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "validator". + let args = runtime_args! { + DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + DELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: DELEGATE_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!( + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: DELEGATE_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn delegate_args_with_wrong_type_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + DELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + DELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + DELEGATE_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + DELEGATE_ARG_AMOUNT.name.to_string(), + vec![CLType::U512], + CLType::U64, + ); + assert_eq!( + has_valid_delegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_undelegate_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_undelegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + rng.gen::(), + ) + .unwrap(); + has_valid_undelegate_args(&TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_undelegate_args(&TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn undelegate_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "delegator". + let args = runtime_args! { + UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: UNDELEGATE_ARG_DELEGATOR.name.to_string(), + }; + assert_eq!( + has_valid_undelegate_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "validator". + let args = runtime_args! { + UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()) + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: UNDELEGATE_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!( + has_valid_undelegate_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: UNDELEGATE_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + has_valid_undelegate_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn undelegate_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + UNDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + UNDELEGATE_ARG_AMOUNT.name => rng.gen::() + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + UNDELEGATE_ARG_AMOUNT.name.to_string(), + vec![CLType::U512], + CLType::U64, + ); + assert_eq!( + has_valid_undelegate_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_redelegate_args() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = new_redelegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + rng.gen_range(0_u64..1_000_000_000_000_000_000_u64), + PublicKey::random(rng), + ) + .unwrap(); + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn redelegate_args_with_too_much_amount_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + let args = new_redelegate_args( + PublicKey::random(rng), + PublicKey::random(rng), + 1_000_000_000_000_000_001_u64, + PublicKey::random(rng), + ) + .unwrap(); + let expected_error = InvalidTransactionV1::InvalidDelegationAmount { + ceiling: 1_000_000_000_000_000_000_u64, + attempted: 1_000_000_000_000_000_001_u64.into(), + }; + assert_eq!( + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn redelegate_args_with_missing_required_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Missing "delegator". + let args = runtime_args! { + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen_range(0_u64..1_000_000_000_000_000_000_u64)), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: REDELEGATE_ARG_DELEGATOR.name.to_string(), + }; + assert_eq!( + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "validator". + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen_range(0_u64..1_000_000_000_000_000_000_u64),), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: REDELEGATE_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!( + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "amount". + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: REDELEGATE_ARG_AMOUNT.name.to_string(), + }; + assert_eq!( + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "new_validator". + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => U512::from(rng.gen::()), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: REDELEGATE_ARG_NEW_VALIDATOR.name.to_string(), + }; + assert_eq!( + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn redelegate_args_with_wrong_type_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Wrong "amount" type. + let args = runtime_args! { + REDELEGATE_ARG_DELEGATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_VALIDATOR.name => PublicKey::random(rng), + REDELEGATE_ARG_AMOUNT.name => rng.gen_range(0_u64..1_000_000_000_000_000_000_u64), + REDELEGATE_ARG_NEW_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + REDELEGATE_ARG_AMOUNT.name.to_string(), + vec![CLType::U512], + CLType::U64, + ); + assert_eq!( + has_valid_redelegate_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_change_bid_public_key_args() { + let rng = &mut TestRng::new(); + + // Check random args. + let mut args = + new_change_bid_public_key_args(PublicKey::random(rng), PublicKey::random(rng)).unwrap(); + has_valid_change_bid_public_key_args(&TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn change_bid_public_key_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "public_key". + let args = runtime_args! { + CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name.to_string(), + }; + assert_eq!( + has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "new_public_key". + let args = runtime_args! { + CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name.to_string(), + }; + assert_eq!( + has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn change_bid_public_key_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "public_key" type. + let args = runtime_args! { + CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name => rng.gen::(), + CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name.to_string(), + vec![CLType::PublicKey], + CLType::U8, + ); + assert_eq!( + has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Wrong "new_public_key" type. + let args = runtime_args! { + CHANGE_BID_PUBLIC_KEY_ARG_PUBLIC_KEY.name => PublicKey::random(rng), + CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name => rng.gen::(), + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + CHANGE_BID_PUBLIC_KEY_ARG_NEW_PUBLIC_KEY.name.to_string(), + vec![CLType::PublicKey], + CLType::U8, + ); + assert_eq!( + has_valid_change_bid_public_key_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_add_reservations_args() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + let reservations = rng.random_vec(1..100); + + // Check random args. + let mut args = new_add_reservations_args(reservations).unwrap(); + has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn add_reservations_args_with_too_many_reservations_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + // local chainspec allows 1200 delegators to a validator + let reservations = rng.random_vec(1201..=1201); + let args = new_add_reservations_args(reservations).unwrap(); + + let expected_error = InvalidTransactionV1::InvalidReservedSlots { + ceiling: 1200, + attempted: 1201, + }; + assert_eq!( + has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn add_reservations_args_with_missing_required_should_be_invalid() { + let chainspec = Chainspec::default(); + // Missing "reservations". + let args = runtime_args! {}; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: ADD_RESERVATIONS_ARG_RESERVATIONS.name.to_string(), + }; + assert_eq!( + has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn add_reservations_args_with_wrong_type_should_be_invalid() { + let chainspec = Chainspec::default(); + let rng = &mut TestRng::new(); + + // Wrong "reservations" type. + let args = runtime_args! { + ADD_RESERVATIONS_ARG_RESERVATIONS.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + ADD_RESERVATIONS_ARG_RESERVATIONS.name.to_string(), + vec![CLType::List(Box::new(CLType::Any))], + CLType::PublicKey, + ); + assert_eq!( + has_valid_add_reservations_args(&chainspec, &TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn should_validate_cancel_reservations_args() { + let rng = &mut TestRng::new(); + + let validator = PublicKey::random(rng); + let delegators = rng.random_vec(0..100); + + // Check random args. + let mut args = new_cancel_reservations_args(validator, delegators).unwrap(); + has_valid_cancel_reservations_args(&TransactionArgs::Named(args.clone())).unwrap(); + + // Check with extra arg. + args.insert("a", 1).unwrap(); + has_valid_cancel_reservations_args(&TransactionArgs::Named(args)).unwrap(); + } + + #[test] + fn cancel_reservations_args_with_missing_required_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Missing "validator". + let args = runtime_args! { + CANCEL_RESERVATIONS_ARG_DELEGATORS.name => rng.random_vec::, DelegatorKind>(0..100), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: CANCEL_RESERVATIONS_ARG_VALIDATOR.name.to_string(), + }; + assert_eq!( + has_valid_cancel_reservations_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Missing "delegators". + let args = runtime_args! { + CANCEL_RESERVATIONS_ARG_VALIDATOR.name => PublicKey::random(rng), + }; + let expected_error = InvalidTransactionV1::MissingArg { + arg_name: CANCEL_RESERVATIONS_ARG_DELEGATORS.name.to_string(), + }; + assert_eq!( + has_valid_cancel_reservations_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn cancel_reservations_args_with_wrong_type_should_be_invalid() { + let rng = &mut TestRng::new(); + + // Wrong "validator" type. + let args = runtime_args! { + CANCEL_RESERVATIONS_ARG_VALIDATOR.name => rng.random_vec::, PublicKey>(0..100), + CANCEL_RESERVATIONS_ARG_DELEGATORS.name => rng.random_vec::, DelegatorKind>(0..100), + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + CANCEL_RESERVATIONS_ARG_VALIDATOR.name.to_string(), + vec![CLType::PublicKey], + CLType::List(Box::new(CLType::PublicKey)), + ); + assert_eq!( + has_valid_cancel_reservations_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + + // Wrong "delegators" type. + let args = runtime_args! { + CANCEL_RESERVATIONS_ARG_VALIDATOR.name => PublicKey::random(rng), + CANCEL_RESERVATIONS_ARG_DELEGATORS.name => rng.gen::(), + }; + let expected_error = InvalidTransactionV1::unexpected_arg_type( + CANCEL_RESERVATIONS_ARG_DELEGATORS.name.to_string(), + vec![CLType::List(Box::new(CLType::Any))], + CLType::U8, + ); + assert_eq!( + has_valid_cancel_reservations_args(&TransactionArgs::Named(args)), + Err(expected_error) + ); + } + + #[test] + fn native_calls_require_named_args() { + let chainspec = Chainspec::default(); + let args = TransactionArgs::Bytesrepr(vec![b'a'; 100].into()); + let expected_error = InvalidTransactionV1::ExpectedNamedArguments; + assert_eq!( + has_valid_transfer_args(&args, 0).as_ref(), + Err(&expected_error) + ); + assert_eq!(check_add_bid_args(&args).as_ref(), Err(&expected_error)); + assert_eq!( + has_valid_withdraw_bid_args(&args).as_ref(), + Err(&expected_error) + ); + assert_eq!( + has_valid_delegate_args(&chainspec, &args).as_ref(), + Err(&expected_error) + ); + assert_eq!( + has_valid_undelegate_args(&args).as_ref(), + Err(&expected_error) + ); + assert_eq!( + has_valid_redelegate_args(&chainspec, &args).as_ref(), + Err(&expected_error) + ); + assert_eq!( + has_valid_add_reservations_args(&chainspec, &args).as_ref(), + Err(&expected_error) + ); + assert_eq!( + has_valid_cancel_reservations_args(&args).as_ref(), + Err(&expected_error) + ); + } +} diff --git a/node/src/types/transaction/deploy.rs b/node/src/types/transaction/deploy.rs new file mode 100644 index 0000000000..161a7d0be3 --- /dev/null +++ b/node/src/types/transaction/deploy.rs @@ -0,0 +1,3 @@ +mod legacy_deploy; + +pub(crate) use legacy_deploy::LegacyDeploy; diff --git a/node/src/types/transaction/deploy/legacy_deploy.rs b/node/src/types/transaction/deploy/legacy_deploy.rs new file mode 100644 index 0000000000..90adc1086c --- /dev/null +++ b/node/src/types/transaction/deploy/legacy_deploy.rs @@ -0,0 +1,98 @@ +use std::fmt::{self, Display, Formatter}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + Deploy, DeployHash, InvalidDeploy, Transaction, +}; + +use crate::components::fetcher::{EmptyValidationMetadata, FetchItem, Tag}; + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, DataSize, Debug)] +pub(crate) struct LegacyDeploy(Deploy); + +impl FetchItem for LegacyDeploy { + type Id = DeployHash; + type ValidationError = InvalidDeploy; + type ValidationMetadata = EmptyValidationMetadata; + + const TAG: Tag = Tag::LegacyDeploy; + + fn fetch_id(&self) -> Self::Id { + *self.0.hash() + } + + fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + self.0.has_valid_hash() + } +} + +impl ToBytes for LegacyDeploy { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for LegacyDeploy { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Deploy::from_bytes(bytes).map(|(inner, remainder)| (LegacyDeploy(inner), remainder)) + } +} + +impl From for Deploy { + fn from(legacy_deploy: LegacyDeploy) -> Self { + legacy_deploy.0 + } +} + +impl From for Transaction { + fn from(legacy_deploy: LegacyDeploy) -> Self { + Self::Deploy(legacy_deploy.0) + } +} + +impl From for LegacyDeploy { + fn from(deploy: Deploy) -> Self { + Self(deploy) + } +} + +impl Display for LegacyDeploy { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "legacy-{}", self.0) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = crate::new_rng(); + let legacy_deploy = LegacyDeploy::from(Deploy::random(&mut rng)); + bytesrepr::test_serialization_roundtrip(&legacy_deploy); + } +} + +mod specimen_support { + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + + use super::LegacyDeploy; + + impl LargestSpecimen for LegacyDeploy { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + LegacyDeploy(LargestSpecimen::largest_specimen(estimator, cache)) + } + } +} diff --git a/node/src/types/transaction/fields_container.rs b/node/src/types/transaction/fields_container.rs new file mode 100644 index 0000000000..2d2610343b --- /dev/null +++ b/node/src/types/transaction/fields_container.rs @@ -0,0 +1,346 @@ +#[cfg(test)] +use super::arg_handling; +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + TransactionArgs, TransactionEntryPoint, TransactionScheduling, TransactionTarget, +}; +#[cfg(test)] +use casper_types::{ + testing::TestRng, PublicKey, RuntimeArgs, TransactionInvocationTarget, + TransactionRuntimeParams, TransferTarget, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, + MINT_LANE_ID, +}; +#[cfg(test)] +use rand::{Rng, RngCore}; +use std::collections::BTreeMap; + +pub(crate) const ARGS_MAP_KEY: u16 = 0; +pub(crate) const TARGET_MAP_KEY: u16 = 1; +pub(crate) const ENTRY_POINT_MAP_KEY: u16 = 2; +pub(crate) const SCHEDULING_MAP_KEY: u16 = 3; + +#[derive(Clone, Eq, PartialEq, Debug)] +pub(crate) enum FieldsContainerError { + CouldNotSerializeField { field_index: u16 }, +} + +pub(crate) struct FieldsContainer { + pub(super) args: TransactionArgs, + pub(super) target: TransactionTarget, + pub(super) entry_point: TransactionEntryPoint, + pub(super) scheduling: TransactionScheduling, +} + +impl FieldsContainer { + pub(crate) fn new( + args: TransactionArgs, + target: TransactionTarget, + entry_point: TransactionEntryPoint, + scheduling: TransactionScheduling, + ) -> Self { + FieldsContainer { + args, + target, + entry_point, + scheduling, + } + } + + pub(crate) fn to_map(&self) -> Result, FieldsContainerError> { + let mut map: BTreeMap = BTreeMap::new(); + map.insert( + ARGS_MAP_KEY, + self.args.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: ARGS_MAP_KEY, + } + })?, + ); + map.insert( + TARGET_MAP_KEY, + self.target.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: TARGET_MAP_KEY, + } + })?, + ); + map.insert( + ENTRY_POINT_MAP_KEY, + self.entry_point.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: ENTRY_POINT_MAP_KEY, + } + })?, + ); + map.insert( + SCHEDULING_MAP_KEY, + self.scheduling.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: SCHEDULING_MAP_KEY, + } + })?, + ); + Ok(map) + } + + /// Returns a random `FieldsContainer`. + #[cfg(test)] + pub(crate) fn random(rng: &mut TestRng) -> Self { + use casper_types::URef; + + match rng.gen_range(0..=12) { + 0 => { + let amount = rng.gen_range(2_500_000_000..=u64::MAX); + let maybe_source: Option = rng.gen(); + let target = TransferTarget::random(rng); + let maybe_id = rng.gen::().then(|| rng.gen()); + let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + TransactionScheduling::random(rng), + ) + } + 1 => { + let public_key = PublicKey::random(rng); + let delegation_rate = rng.gen(); + let amount = rng.gen::(); + let minimum_delegation_amount = rng.gen::().then(|| rng.gen()); + let maximum_delegation_amount = + minimum_delegation_amount.map(|minimum_delegation_amount| { + minimum_delegation_amount + rng.gen::() as u64 + }); + let reserved_slots = rng.gen::().then(|| rng.gen::()); + let args = arg_handling::new_add_bid_args( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::AddBid, + TransactionScheduling::random(rng), + ) + } + 2 => { + let public_key = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_withdraw_bid_args(public_key, amount).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::WithdrawBid, + TransactionScheduling::random(rng), + ) + } + 3 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_delegate_args(delegator, validator, amount).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Delegate, + TransactionScheduling::random(rng), + ) + } + 4 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_undelegate_args(delegator, validator, amount).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Undelegate, + TransactionScheduling::random(rng), + ) + } + 5 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let new_validator = PublicKey::random(rng); + let args = + arg_handling::new_redelegate_args(delegator, validator, amount, new_validator) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Redelegate, + TransactionScheduling::random(rng), + ) + } + 6 => Self::random_standard(rng), + 7 => { + let mut buffer = vec![0u8; rng.gen_range(1..100)]; + rng.fill_bytes(buffer.as_mut()); + let is_install_upgrade = rng.gen(); + let target = TransactionTarget::Session { + is_install_upgrade, + module_bytes: Bytes::from(buffer), + runtime: TransactionRuntimeParams::VmCasperV1, + }; + FieldsContainer::new( + TransactionArgs::Named(RuntimeArgs::random(rng)), + target, + TransactionEntryPoint::Call, + TransactionScheduling::random(rng), + ) + } + 8 => { + let amount = rng.gen::(); + let maybe_source: Option = rng.gen(); + let args = arg_handling::new_burn_args(amount, maybe_source).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Burn, + TransactionScheduling::random(rng), + ) + } + 9 => { + let validator = PublicKey::random(rng); + let args = arg_handling::new_activate_bid_args(validator).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::ActivateBid, + TransactionScheduling::random(rng), + ) + } + 10 => { + let public_key = PublicKey::random(rng); + let new_public_key = PublicKey::random(rng); + let args = arg_handling::new_change_bid_public_key_args(public_key, new_public_key) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::ChangeBidPublicKey, + TransactionScheduling::random(rng), + ) + } + 11 => { + let number = rng.gen_range(0..500); + let mut reservations = vec![]; + for _ in 0..number { + reservations.push(rng.gen()); + } + let args = arg_handling::new_add_reservations_args(reservations).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::AddReservations, + TransactionScheduling::random(rng), + ) + } + 12 => { + let validator = PublicKey::random(rng); + let number = rng.gen_range(0..500); + let mut delegators = vec![]; + for _ in 0..number { + delegators.push(rng.gen()); + } + let args = + arg_handling::new_cancel_reservations_args(validator, delegators).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::CancelReservations, + TransactionScheduling::random(rng), + ) + } + _ => unreachable!(), + } + } + + /// Returns a random `FieldsContainer`. + #[cfg(test)] + pub fn random_of_lane(rng: &mut TestRng, lane_id: u8) -> Self { + match lane_id { + MINT_LANE_ID => Self::random_transfer(rng), + AUCTION_LANE_ID => Self::random_staking(rng), + INSTALL_UPGRADE_LANE_ID => Self::random_install_upgrade(rng), + _ => Self::random_standard(rng), + } + } + + #[cfg(test)] + fn random_install_upgrade(rng: &mut TestRng) -> Self { + let target = TransactionTarget::Session { + module_bytes: Bytes::from(rng.random_vec(0..100)), + runtime: TransactionRuntimeParams::VmCasperV1, + is_install_upgrade: true, + }; + FieldsContainer::new( + TransactionArgs::Named(RuntimeArgs::random(rng)), + target, + TransactionEntryPoint::Call, + TransactionScheduling::random(rng), + ) + } + + #[cfg(test)] + fn random_staking(rng: &mut TestRng) -> Self { + let public_key = PublicKey::random(rng); + let delegation_rate = rng.gen(); + let amount = rng.gen::(); + let minimum_delegation_amount = rng.gen::().then(|| rng.gen()); + let maximum_delegation_amount = minimum_delegation_amount + .map(|minimum_delegation_amount| minimum_delegation_amount + rng.gen::() as u64); + let reserved_slots = rng.gen::().then(|| rng.gen::()); + let args = arg_handling::new_add_bid_args( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::AddBid, + TransactionScheduling::random(rng), + ) + } + + #[cfg(test)] + fn random_transfer(rng: &mut TestRng) -> Self { + let amount = rng.gen_range(2_500_000_000..=u64::MAX); + let maybe_source = if rng.gen() { Some(rng.gen()) } else { None }; + let target = TransferTarget::random(rng); + let maybe_id = rng.gen::().then(|| rng.gen()); + let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + TransactionScheduling::random(rng), + ) + } + + #[cfg(test)] + fn random_standard(rng: &mut TestRng) -> Self { + let target = TransactionTarget::Stored { + id: TransactionInvocationTarget::random(rng), + runtime: TransactionRuntimeParams::VmCasperV1, + }; + FieldsContainer::new( + TransactionArgs::Named(RuntimeArgs::random(rng)), + target, + TransactionEntryPoint::Custom(rng.random_string(1..11)), + TransactionScheduling::random(rng), + ) + } +} diff --git a/node/src/types/transaction/initiator_addr_and_secret_key.rs b/node/src/types/transaction/initiator_addr_and_secret_key.rs new file mode 100644 index 0000000000..99cdd6e643 --- /dev/null +++ b/node/src/types/transaction/initiator_addr_and_secret_key.rs @@ -0,0 +1,44 @@ +use casper_types::{InitiatorAddr, PublicKey, SecretKey}; + +/// Used when constructing a deploy or transaction. +#[derive(Debug)] +pub(crate) enum InitiatorAddrAndSecretKey<'a> { + /// Provides both the initiator address and the secret key (not necessarily for the same + /// initiator address) used to sign the deploy or transaction. + Both { + /// The initiator address of the account. + initiator_addr: InitiatorAddr, + /// The secret key used to sign the deploy or transaction. + secret_key: &'a SecretKey, + }, + /// The initiator address only (no secret key). The deploy or transaction will be created + /// unsigned. + #[allow(unused)] + InitiatorAddr(InitiatorAddr), + /// The initiator address will be derived from the provided secret key, and the deploy or + /// transaction will be signed by the same secret key. + #[allow(unused)] + SecretKey(&'a SecretKey), +} + +impl InitiatorAddrAndSecretKey<'_> { + /// The address of the initiator of a `TransactionV1`. + pub fn initiator_addr(&self) -> InitiatorAddr { + match self { + InitiatorAddrAndSecretKey::Both { initiator_addr, .. } + | InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) => initiator_addr.clone(), + InitiatorAddrAndSecretKey::SecretKey(secret_key) => { + InitiatorAddr::PublicKey(PublicKey::from(*secret_key)) + } + } + } + + /// The secret key of the initiator of a `TransactionV1`. + pub fn secret_key(&self) -> Option<&SecretKey> { + match self { + InitiatorAddrAndSecretKey::Both { secret_key, .. } + | InitiatorAddrAndSecretKey::SecretKey(secret_key) => Some(secret_key), + InitiatorAddrAndSecretKey::InitiatorAddr(_) => None, + } + } +} diff --git a/node/src/types/transaction/meta_transaction.rs b/node/src/types/transaction/meta_transaction.rs new file mode 100644 index 0000000000..8fa8e85f9d --- /dev/null +++ b/node/src/types/transaction/meta_transaction.rs @@ -0,0 +1,497 @@ +mod meta_deploy; +mod meta_transaction_v1; +mod transaction_header; +use casper_execution_engine::engine_state::{SessionDataDeploy, SessionDataV1, SessionInputData}; +#[cfg(test)] +use casper_types::InvalidTransactionV1; +use casper_types::{ + account::AccountHash, bytesrepr::ToBytes, Approval, Chainspec, Digest, ExecutableDeployItem, + Gas, GasLimited, HashAddr, InitiatorAddr, InvalidTransaction, Phase, PricingHandling, + PricingMode, TimeDiff, Timestamp, Transaction, TransactionArgs, TransactionConfig, + TransactionEntryPoint, TransactionHash, TransactionTarget, INSTALL_UPGRADE_LANE_ID, +}; +use core::fmt::{self, Debug, Display, Formatter}; +use meta_deploy::MetaDeploy; +pub(crate) use meta_transaction_v1::MetaTransactionV1; +use serde::Serialize; +use std::{borrow::Cow, collections::BTreeSet}; +pub(crate) use transaction_header::*; + +#[cfg(test)] +use super::fields_container::{ARGS_MAP_KEY, ENTRY_POINT_MAP_KEY, TARGET_MAP_KEY}; + +#[derive(Clone, Debug, Serialize)] +pub(crate) enum MetaTransaction { + Deploy(MetaDeploy), + V1(MetaTransactionV1), +} + +impl MetaTransaction { + /// Returns the `TransactionHash` identifying this transaction. + pub(crate) fn hash(&self) -> TransactionHash { + match self { + MetaTransaction::Deploy(meta_deploy) => { + TransactionHash::from(*meta_deploy.deploy().hash()) + } + MetaTransaction::V1(txn) => TransactionHash::from(*txn.hash()), + } + } + + /// Timestamp. + pub(crate) fn timestamp(&self) -> Timestamp { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().header().timestamp(), + MetaTransaction::V1(v1) => v1.timestamp(), + } + } + + /// Time to live. + pub(crate) fn ttl(&self) -> TimeDiff { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().header().ttl(), + MetaTransaction::V1(v1) => v1.ttl(), + } + } + + /// Returns the `Approval`s for this transaction. + pub(crate) fn approvals(&self) -> BTreeSet { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().approvals().clone(), + MetaTransaction::V1(v1) => v1.approvals().clone(), + } + } + + /// Returns the address of the initiator of the transaction. + pub(crate) fn initiator_addr(&self) -> &InitiatorAddr { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.initiator_addr(), + MetaTransaction::V1(txn) => txn.initiator_addr(), + } + } + + /// Returns the set of account hashes corresponding to the public keys of the approvals. + pub(crate) fn signers(&self) -> BTreeSet { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + MetaTransaction::V1(txn) => txn + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + } + } + + /// Returns `true` if `self` represents a native transfer deploy or a native V1 transaction. + pub(crate) fn is_native(&self) -> bool { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().is_transfer(), + MetaTransaction::V1(v1_txn) => *v1_txn.target() == TransactionTarget::Native, + } + } + + /// Should this transaction use standard payment processing? + pub(crate) fn is_standard_payment(&self) -> bool { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .payment() + .is_standard_payment(Phase::Payment), + MetaTransaction::V1(v1) => { + if let PricingMode::PaymentLimited { + standard_payment, .. + } = v1.pricing_mode() + { + *standard_payment + } else { + true + } + } + } + } + + /// Should this transaction use custom payment processing? + pub(crate) fn is_custom_payment(&self) -> bool { + match self { + MetaTransaction::Deploy(meta_deploy) => !meta_deploy + .deploy() + .payment() + .is_standard_payment(Phase::Payment), + MetaTransaction::V1(v1) => { + if let PricingMode::PaymentLimited { + standard_payment, .. + } = v1.pricing_mode() + { + !*standard_payment + } else { + false + } + } + } + } + + /// Authorization keys. + pub(crate) fn authorization_keys(&self) -> BTreeSet { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + MetaTransaction::V1(transaction_v1) => transaction_v1 + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + } + } + + /// The session args. + pub(crate) fn session_args(&self) -> Cow { + match self { + MetaTransaction::Deploy(meta_deploy) => Cow::Owned(TransactionArgs::Named( + meta_deploy.deploy().session().args().clone(), + )), + MetaTransaction::V1(transaction_v1) => Cow::Borrowed(transaction_v1.args()), + } + } + + /// The entry point. + pub(crate) fn entry_point(&self) -> TransactionEntryPoint { + match self { + MetaTransaction::Deploy(meta_deploy) => { + meta_deploy.deploy().session().entry_point_name().into() + } + MetaTransaction::V1(transaction_v1) => transaction_v1.entry_point().clone(), + } + } + + /// The transaction lane. + pub(crate) fn transaction_lane(&self) -> u8 { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.lane_id(), + MetaTransaction::V1(v1) => v1.lane_id(), + } + } + + /// Returns the gas price tolerance. + pub(crate) fn gas_price_tolerance(&self) -> Result { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .gas_price_tolerance() + .map_err(InvalidTransaction::from), + MetaTransaction::V1(v1) => Ok(v1.gas_price_tolerance()), + } + } + + pub(crate) fn gas_limit(&self, chainspec: &Chainspec) -> Result { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .gas_limit(chainspec) + .map_err(InvalidTransaction::from), + MetaTransaction::V1(v1) => v1.gas_limit(chainspec), + } + } + + /// Is the transaction the original transaction variant. + pub(crate) fn is_deploy_transaction(&self) -> bool { + match self { + MetaTransaction::Deploy(_) => true, + MetaTransaction::V1(_) => false, + } + } + + /// Does this transaction provide the hash addr for a specific contract to invoke directly? + pub(crate) fn is_contract_by_hash_invocation(&self) -> bool { + self.contract_direct_address().is_some() + } + + /// Returns a `hash_addr` for a targeted contract, if known. + pub(crate) fn contract_direct_address(&self) -> Option<(HashAddr, String)> { + match self { + MetaTransaction::Deploy(meta_deploy) => { + if let ExecutableDeployItem::StoredContractByHash { + hash, entry_point, .. + } = meta_deploy.session() + { + return Some((hash.value(), entry_point.clone())); + } + } + MetaTransaction::V1(v1) => { + return v1.contract_direct_address(); + } + } + None + } + + /// Create a new `MetaTransaction` from a `Transaction`. + pub(crate) fn from_transaction( + transaction: &Transaction, + pricing_handling: PricingHandling, + transaction_config: &TransactionConfig, + ) -> Result { + match transaction { + Transaction::Deploy(deploy) => MetaDeploy::from_deploy( + deploy.clone(), + pricing_handling, + &transaction_config.transaction_v1_config, + ) + .map(MetaTransaction::Deploy), + Transaction::V1(v1) => MetaTransactionV1::from_transaction_v1( + v1, + &transaction_config.transaction_v1_config, + ) + .map(MetaTransaction::V1), + } + } + + pub(crate) fn is_config_compliant( + &self, + chainspec: &Chainspec, + timestamp_leeway: TimeDiff, + at: Timestamp, + ) -> Result<(), InvalidTransaction> { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy + .deploy() + .is_config_compliant(chainspec, timestamp_leeway, at) + .map_err(InvalidTransaction::from), + MetaTransaction::V1(v1) => v1 + .is_config_compliant(chainspec, timestamp_leeway, at) + .map_err(InvalidTransaction::from), + } + } + + pub(crate) fn payload_hash(&self) -> Digest { + match self { + MetaTransaction::Deploy(meta_deploy) => *meta_deploy.deploy().body_hash(), + MetaTransaction::V1(v1) => *v1.payload_hash(), + } + } + + pub(crate) fn to_session_input_data(&self) -> SessionInputData { + let initiator_addr = self.initiator_addr(); + let is_standard_payment = self.is_standard_payment(); + match self { + MetaTransaction::Deploy(meta_deploy) => { + let deploy = meta_deploy.deploy(); + let data = SessionDataDeploy::new( + deploy.hash(), + deploy.session(), + initiator_addr, + self.signers().clone(), + is_standard_payment, + ); + SessionInputData::DeploySessionData { data } + } + MetaTransaction::V1(v1) => { + let data = SessionDataV1::new( + v1.args().as_named().expect("V1 wasm args should be named and validated at the transaction acceptor level"), + v1.target(), + v1.entry_point(), + v1.lane_id() == INSTALL_UPGRADE_LANE_ID, + v1.hash(), + v1.pricing_mode(), + initiator_addr, + self.signers().clone(), + is_standard_payment, + ); + SessionInputData::SessionDataV1 { data } + } + } + } + + /// Returns the `SessionInputData` for a payment code if present. + pub(crate) fn to_payment_input_data(&self) -> SessionInputData { + match self { + MetaTransaction::Deploy(meta_deploy) => { + let initiator_addr = meta_deploy.initiator_addr(); + let is_standard_payment = matches!(meta_deploy.deploy().payment(), ExecutableDeployItem::ModuleBytes { module_bytes, .. } if module_bytes.is_empty()); + let deploy = meta_deploy.deploy(); + let data = SessionDataDeploy::new( + deploy.hash(), + deploy.payment(), + initiator_addr, + self.signers().clone(), + is_standard_payment, + ); + SessionInputData::DeploySessionData { data } + } + MetaTransaction::V1(v1) => { + let initiator_addr = v1.initiator_addr(); + + let is_standard_payment = if let PricingMode::PaymentLimited { + standard_payment, + .. + } = v1.pricing_mode() + { + *standard_payment + } else { + true + }; + + // Under V1 transaction we don't have a separate payment code, and custom payment is + // executed as session code with a phase set to Payment. + let data = SessionDataV1::new( + v1.args().as_named().expect("V1 wasm args should be named and validated at the transaction acceptor level"), + v1.target(), + v1.entry_point(), + v1.lane_id() == INSTALL_UPGRADE_LANE_ID, + v1.hash(), + v1.pricing_mode(), + initiator_addr, + self.signers().clone(), + is_standard_payment, + ); + SessionInputData::SessionDataV1 { data } + } + } + } + + /// Size estimate. + pub(crate) fn size_estimate(&self) -> usize { + match self { + MetaTransaction::Deploy(meta_deploy) => meta_deploy.deploy().serialized_length(), + MetaTransaction::V1(v1) => v1.serialized_length(), + } + } + + pub(crate) fn is_v1_wasm(&self) -> bool { + match self { + MetaTransaction::Deploy(_) => true, + MetaTransaction::V1(v1) => v1.is_v1_wasm(), + } + } + + pub(crate) fn is_v2_wasm(&self) -> bool { + match self { + MetaTransaction::Deploy(_) => false, + MetaTransaction::V1(v1) => v1.is_v2_wasm(), + } + } + + pub(crate) fn seed(&self) -> Option<[u8; 32]> { + match self { + MetaTransaction::Deploy(_) => None, + MetaTransaction::V1(v1) => v1.seed(), + } + } + + pub(crate) fn is_install_or_upgrade(&self) -> bool { + match self { + MetaTransaction::Deploy(_) => false, + MetaTransaction::V1(meta_transaction_v1) => { + meta_transaction_v1.lane_id() == INSTALL_UPGRADE_LANE_ID + } + } + } + + pub(crate) fn transferred_value(&self) -> Option { + match self { + MetaTransaction::Deploy(_) => None, + MetaTransaction::V1(v1) => Some(v1.transferred_value()), + } + } + + pub(crate) fn target(&self) -> Option { + match self { + MetaTransaction::Deploy(_) => None, + MetaTransaction::V1(v1) => Some(v1.target().clone()), + } + } +} + +impl Display for MetaTransaction { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + MetaTransaction::Deploy(meta_deploy) => Display::fmt(meta_deploy.deploy(), formatter), + MetaTransaction::V1(txn) => Display::fmt(txn, formatter), + } + } +} + +#[cfg(test)] +/// Calculates the laned based on properties of the transaction +pub(crate) fn calculate_transaction_lane_for_transaction( + transaction: &Transaction, + chainspec: &Chainspec, +) -> Result { + use casper_types::calculate_transaction_lane; + + match transaction { + Transaction::Deploy(_) => { + let meta = MetaTransaction::from_transaction( + transaction, + chainspec.core_config.pricing_handling, + &chainspec.transaction_config, + )?; + Ok(meta.transaction_lane()) + } + Transaction::V1(v1) => { + let args_binary_len = v1 + .payload() + .fields() + .get(&ARGS_MAP_KEY) + .map(|field| field.len()) + .unwrap_or(0); + let target: TransactionTarget = + v1.deserialize_field(TARGET_MAP_KEY).map_err(|error| { + InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error }) + })?; + let entry_point: TransactionEntryPoint = + v1.deserialize_field(ENTRY_POINT_MAP_KEY).map_err(|error| { + InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error }) + })?; + let serialized_length = v1.serialized_length(); + let pricing_mode = v1.payload().pricing_mode(); + calculate_transaction_lane( + &entry_point, + &target, + pricing_mode, + &chainspec.transaction_config.transaction_v1_config, + serialized_length as u64, + args_binary_len as u64, + ) + .map_err(InvalidTransaction::V1) + } + } +} + +#[cfg(test)] +mod proptests { + use super::*; + use casper_types::{gens::legal_transaction_arb, TransactionLaneDefinition}; + use proptest::prelude::*; + + proptest! { + #[test] + fn construction_roundtrip(transaction in legal_transaction_arb()) { + let mut transaction_config = TransactionConfig::default(); + transaction_config.transaction_v1_config.set_wasm_lanes(vec![ + TransactionLaneDefinition { + id: 3, + max_transaction_length: u64::MAX/2, + max_transaction_args_length: 10000, + max_transaction_gas_limit: u64::MAX/2, + max_transaction_count: 10, + }, + TransactionLaneDefinition { + id: 4, + max_transaction_length: u64::MAX, + max_transaction_args_length: 10000, + max_transaction_gas_limit: u64::MAX, + max_transaction_count: 10, + }, + ]); + let maybe_transaction = MetaTransaction::from_transaction(&transaction, PricingHandling::PaymentLimited, &transaction_config); + prop_assert!(maybe_transaction.is_ok(), "{:?}", maybe_transaction); + } + } +} diff --git a/node/src/types/transaction/meta_transaction/meta_deploy.rs b/node/src/types/transaction/meta_transaction/meta_deploy.rs new file mode 100644 index 0000000000..55d1b25250 --- /dev/null +++ b/node/src/types/transaction/meta_transaction/meta_deploy.rs @@ -0,0 +1,146 @@ +use datasize::DataSize; +use once_cell::sync::OnceCell; +use serde::Serialize; + +#[cfg(test)] +use casper_types::TransactionLaneDefinition; +use casper_types::{ + calculate_lane_id_for_deploy, Deploy, ExecutableDeployItem, InitiatorAddr, InvalidTransaction, + PricingHandling, TransactionV1Config, +}; +#[derive(Clone, Debug, Serialize, DataSize)] +pub(crate) struct MetaDeploy { + deploy: Deploy, + //We need to keep this id here since we can fetch it only from chainspec. + lane_id: u8, + #[data_size(skip)] + #[serde(skip)] + initiator_addr: OnceCell, +} + +impl MetaDeploy { + pub(crate) fn from_deploy( + deploy: Deploy, + pricing_handling: PricingHandling, + config: &TransactionV1Config, + ) -> Result { + let lane_id = calculate_lane_id_for_deploy(&deploy, pricing_handling, config) + .map_err(InvalidTransaction::Deploy)?; + let initiator_addr = OnceCell::new(); + Ok(MetaDeploy { + deploy, + lane_id, + initiator_addr, + }) + } + + pub(crate) fn initiator_addr(&self) -> &InitiatorAddr { + self.initiator_addr + .get_or_init(|| InitiatorAddr::PublicKey(self.deploy.account().clone())) + } + + pub(crate) fn lane_id(&self) -> u8 { + self.lane_id + } + + pub(crate) fn session(&self) -> &ExecutableDeployItem { + self.deploy.session() + } + + pub(crate) fn deploy(&self) -> &Deploy { + &self.deploy + } +} + +#[cfg(test)] +pub(crate) fn calculate_lane_id_of_biggest_wasm( + wasm_lanes: &[TransactionLaneDefinition], +) -> Option { + wasm_lanes + .iter() + .max_by(|left, right| { + left.max_transaction_length + .cmp(&right.max_transaction_length) + }) + .map(|definition| definition.id) +} +#[cfg(test)] +mod tests { + use super::calculate_lane_id_of_biggest_wasm; + use casper_types::TransactionLaneDefinition; + #[test] + fn calculate_lane_id_of_biggest_wasm_should_return_none_on_empty() { + let wasms = vec![]; + assert!(calculate_lane_id_of_biggest_wasm(&wasms).is_none()); + } + + #[test] + fn calculate_lane_id_of_biggest_wasm_should_return_biggest() { + let wasms = vec![ + TransactionLaneDefinition { + id: 0, + max_transaction_length: 1, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + TransactionLaneDefinition { + id: 1, + max_transaction_length: 10, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + ]; + assert_eq!(calculate_lane_id_of_biggest_wasm(&wasms), Some(1)); + let wasms = vec![ + TransactionLaneDefinition { + id: 0, + max_transaction_length: 1, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + TransactionLaneDefinition { + id: 1, + max_transaction_length: 10, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + TransactionLaneDefinition { + id: 2, + max_transaction_length: 7, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + ]; + assert_eq!(calculate_lane_id_of_biggest_wasm(&wasms), Some(1)); + + let wasms = vec![ + TransactionLaneDefinition { + id: 0, + max_transaction_length: 1, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + TransactionLaneDefinition { + id: 1, + max_transaction_length: 10, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + TransactionLaneDefinition { + id: 2, + max_transaction_length: 70, + max_transaction_args_length: 2, + max_transaction_gas_limit: 3, + max_transaction_count: 4, + }, + ]; + assert_eq!(calculate_lane_id_of_biggest_wasm(&wasms), Some(2)); + } +} diff --git a/node/src/types/transaction/meta_transaction/meta_transaction_v1.rs b/node/src/types/transaction/meta_transaction/meta_transaction_v1.rs new file mode 100644 index 0000000000..1e1638a457 --- /dev/null +++ b/node/src/types/transaction/meta_transaction/meta_transaction_v1.rs @@ -0,0 +1,1009 @@ +use crate::types::transaction::arg_handling; +use casper_types::{ + bytesrepr::ToBytes, calculate_transaction_lane, crypto, Approval, Chainspec, + ContractRuntimeTag, Digest, DisplayIter, Gas, HashAddr, InitiatorAddr, InvalidTransaction, + InvalidTransactionV1, PricingHandling, PricingMode, TimeDiff, Timestamp, TransactionArgs, + TransactionConfig, TransactionEntryPoint, TransactionInvocationTarget, + TransactionRuntimeParams, TransactionScheduling, TransactionTarget, TransactionV1, + TransactionV1Config, TransactionV1ExcessiveSizeError, TransactionV1Hash, AUCTION_LANE_ID, + MINT_LANE_ID, U512, +}; +use core::fmt::{self, Debug, Display, Formatter}; +use datasize::DataSize; +use once_cell::sync::OnceCell; +use serde::Serialize; +use std::collections::BTreeSet; +use tracing::debug; + +const ARGS_MAP_KEY: u16 = 0; +const TARGET_MAP_KEY: u16 = 1; +const ENTRY_POINT_MAP_KEY: u16 = 2; +const SCHEDULING_MAP_KEY: u16 = 3; +const EXPECTED_NUMBER_OF_FIELDS: usize = 4; + +#[derive(Clone, Debug, Serialize, DataSize)] +pub(crate) struct MetaTransactionV1 { + hash: TransactionV1Hash, + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + pricing_mode: PricingMode, + initiator_addr: InitiatorAddr, + args: TransactionArgs, + target: TransactionTarget, + entry_point: TransactionEntryPoint, + lane_id: u8, + scheduling: TransactionScheduling, + approvals: BTreeSet, + serialized_length: usize, + payload_hash: Digest, + has_valid_hash: Result<(), InvalidTransactionV1>, + #[serde(skip)] + #[data_size(skip)] + is_verified: OnceCell>, +} + +impl MetaTransactionV1 { + pub(crate) fn from_transaction_v1( + v1: &TransactionV1, + transaction_v1_config: &TransactionV1Config, + ) -> Result { + let args_binary_len = v1 + .payload() + .fields() + .get(&ARGS_MAP_KEY) + .map(|field| field.len()) + .unwrap_or(0); + let args: TransactionArgs = v1.deserialize_field(ARGS_MAP_KEY).map_err(|error| { + InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error }) + })?; + let target: TransactionTarget = v1.deserialize_field(TARGET_MAP_KEY).map_err(|error| { + InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error }) + })?; + let entry_point: TransactionEntryPoint = + v1.deserialize_field(ENTRY_POINT_MAP_KEY).map_err(|error| { + InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error }) + })?; + let scheduling: TransactionScheduling = + v1.deserialize_field(SCHEDULING_MAP_KEY).map_err(|error| { + InvalidTransaction::V1(InvalidTransactionV1::CouldNotDeserializeField { error }) + })?; + + if v1.number_of_fields() != EXPECTED_NUMBER_OF_FIELDS { + return Err(InvalidTransaction::V1( + InvalidTransactionV1::UnexpectedTransactionFieldEntries, + )); + } + + let payload_hash = v1.payload_hash()?; + let serialized_length = v1.serialized_length(); + let pricing_mode = v1.payload().pricing_mode(); + let lane_id = calculate_transaction_lane( + &entry_point, + &target, + pricing_mode, + transaction_v1_config, + serialized_length as u64, + args_binary_len as u64, + )?; + let has_valid_hash = v1.has_valid_hash(); + let approvals = v1.approvals().clone(); + Ok(MetaTransactionV1::new( + *v1.hash(), + v1.chain_name().to_string(), + v1.timestamp(), + v1.ttl(), + v1.pricing_mode().clone(), + v1.initiator_addr().clone(), + args, + target, + entry_point, + lane_id, + scheduling, + serialized_length, + payload_hash, + approvals, + has_valid_hash, + )) + } + + fn is_native_mint(&self) -> bool { + self.lane_id == MINT_LANE_ID + } + + fn is_native_auction(&self) -> bool { + self.lane_id == AUCTION_LANE_ID + } + + pub(crate) fn is_v1_wasm(&self) -> bool { + match &self.target { + TransactionTarget::Native => false, + TransactionTarget::Stored { + runtime: stored_runtime, + .. + } => { + matches!(stored_runtime, TransactionRuntimeParams::VmCasperV1 { .. }) + && (!self.is_native_mint() && !self.is_native_auction()) + } + TransactionTarget::Session { + runtime: session_runtime, + .. + } => { + matches!(session_runtime, TransactionRuntimeParams::VmCasperV1 { .. }) + && (!self.is_native_mint() && !self.is_native_auction()) + } + } + } + + pub(crate) fn is_v2_wasm(&self) -> bool { + match &self.target { + TransactionTarget::Native => false, + TransactionTarget::Stored { + runtime: stored_runtime, + .. + } => { + matches!(stored_runtime, TransactionRuntimeParams::VmCasperV2 { .. }) + && (!self.is_native_mint() && !self.is_native_auction()) + } + TransactionTarget::Session { + runtime: session_runtime, + .. + } => { + matches!(session_runtime, TransactionRuntimeParams::VmCasperV2 { .. }) + && (!self.is_native_mint() && !self.is_native_auction()) + } + } + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + hash: TransactionV1Hash, + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + pricing_mode: PricingMode, + initiator_addr: InitiatorAddr, + args: TransactionArgs, + target: TransactionTarget, + entry_point: TransactionEntryPoint, + lane_id: u8, + scheduling: TransactionScheduling, + serialized_length: usize, + payload_hash: Digest, + approvals: BTreeSet, + has_valid_hash: Result<(), InvalidTransactionV1>, + ) -> Self { + Self { + hash, + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + args, + target, + entry_point, + lane_id, + scheduling, + approvals, + serialized_length, + payload_hash, + has_valid_hash, + is_verified: OnceCell::new(), + } + } + + /// Returns the runtime args of the transaction. + pub(crate) fn args(&self) -> &TransactionArgs { + &self.args + } + + /// Returns the `DeployHash` identifying this `Deploy`. + pub(crate) fn hash(&self) -> &TransactionV1Hash { + &self.hash + } + + /// Returns the `Approvals`. + pub(crate) fn approvals(&self) -> &BTreeSet { + &self.approvals + } + + /// Returns `Ok` if and only if: + /// * the transaction hash is correct (see [`TransactionV1::has_valid_hash`] for details) + /// * approvals are non-empty, and + /// * all approvals are valid signatures of the signed hash + pub(crate) fn verify(&self) -> Result<(), InvalidTransactionV1> { + self.is_verified.get_or_init(|| self.do_verify()).clone() + } + + /// Returns `Ok` if and only if this transaction's body hashes to the value of `body_hash()`, + /// and if this transaction's header hashes to the value claimed as the transaction hash. + pub(crate) fn has_valid_hash(&self) -> &Result<(), InvalidTransactionV1> { + &self.has_valid_hash + } + + fn do_verify(&self) -> Result<(), InvalidTransactionV1> { + if self.approvals.is_empty() { + debug!(?self, "transaction has no approvals"); + return Err(InvalidTransactionV1::EmptyApprovals); + } + + self.has_valid_hash().clone()?; + + for (index, approval) in self.approvals.iter().enumerate() { + if let Err(error) = crypto::verify(self.hash, approval.signature(), approval.signer()) { + debug!( + ?self, + "failed to verify transaction approval {}: {}", index, error + ); + return Err(InvalidTransactionV1::InvalidApproval { index, error }); + } + } + + Ok(()) + } + + /// Returns the entry point of the transaction. + pub(crate) fn entry_point(&self) -> &TransactionEntryPoint { + &self.entry_point + } + + /// Returns the hash_addr and entry point name of a smart contract, if applicable. + pub(crate) fn contract_direct_address(&self) -> Option<(HashAddr, String)> { + let hash_addr = self.target().contract_hash_addr()?; + let entry_point = self.entry_point.custom_entry_point()?; + Some((hash_addr, entry_point)) + } + + /// Returns the transaction lane. + pub(crate) fn lane_id(&self) -> u8 { + self.lane_id + } + + /// Returns payload hash of the transaction. + pub(crate) fn payload_hash(&self) -> &Digest { + &self.payload_hash + } + + /// Returns the pricing mode for the transaction. + pub(crate) fn pricing_mode(&self) -> &PricingMode { + &self.pricing_mode + } + + /// Returns the initiator_addr of the transaction. + pub(crate) fn initiator_addr(&self) -> &InitiatorAddr { + &self.initiator_addr + } + + /// Returns the target of the transaction. + pub(crate) fn target(&self) -> &TransactionTarget { + &self.target + } + + /// Returns `true` if the serialized size of the transaction is not greater than + /// `max_transaction_size`. + fn is_valid_size( + &self, + max_transaction_size: u32, + ) -> Result<(), TransactionV1ExcessiveSizeError> { + let actual_transaction_size = self.serialized_length; + if actual_transaction_size > max_transaction_size as usize { + return Err(TransactionV1ExcessiveSizeError { + max_transaction_size, + actual_transaction_size, + }); + } + Ok(()) + } + + /// Returns the creation timestamp of the `Deploy`. + pub(crate) fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid. + /// + /// After this duration has ended, the `Deploy` will be considered expired. + pub(crate) fn ttl(&self) -> TimeDiff { + self.ttl + } + /// Returns the scheduling of the transaction. + pub(crate) fn contract_runtime_tag(&self) -> Option { + match &self.target { + TransactionTarget::Native => None, + TransactionTarget::Stored { runtime, .. } => Some(runtime.contract_runtime_tag()), + TransactionTarget::Session { runtime, .. } => Some(runtime.contract_runtime_tag()), + } + } + + /// Returns `Ok` if and only if: + /// * the chain_name is correct, + /// * the configured parameters are complied with at the given timestamp + pub(crate) fn is_config_compliant( + &self, + chainspec: &Chainspec, + timestamp_leeway: TimeDiff, + at: Timestamp, + ) -> Result<(), InvalidTransactionV1> { + let transaction_config = chainspec.transaction_config.clone(); + + match self.contract_runtime_tag() { + Some(expected_runtime @ ContractRuntimeTag::VmCasperV1) => { + if !transaction_config.runtime_config.vm_casper_v1 { + // NOTE: In current implementation native transactions should be executed on + // both VmCasperV1 and VmCasperV2. This may change once we + // have a more stable VmCasperV2 that can also process calls + // to system contracts in VM2 chunked args style. + + return Err(InvalidTransactionV1::InvalidTransactionRuntime { + expected: expected_runtime, + }); + } + + if !self.args.is_named() { + // VmCasperV1 runtime expected named arguments and does not support bytes + // variant. + return Err(InvalidTransactionV1::ExpectedNamedArguments); + } + } + Some(expected_runtime @ ContractRuntimeTag::VmCasperV2) => { + if !transaction_config.runtime_config.vm_casper_v2 { + // NOTE: In current implementation native transactions should be executed on + // both VmCasperV1 and VmCasperV2. This may change once we + // have a more stable VmCasperV2 that can also process calls + // to system contracts in VM2 chunked args style. + + return Err(InvalidTransactionV1::InvalidTransactionRuntime { + expected: expected_runtime, + }); + } + + if !self.args.is_bytesrepr() { + // VmCasperV2 runtime expected bytes arguments and does not support named + // variant. + return Err(InvalidTransactionV1::ExpectedBytesArguments); + } + + match self.pricing_mode { + PricingMode::PaymentLimited { + standard_payment, + payment_amount, + .. + } => { + if payment_amount == 0u64 { + return Err(InvalidTransactionV1::InvalidPaymentAmount); + } + if !standard_payment { + // V2 runtime expects standard payment in the payment limited mode. + return Err(InvalidTransactionV1::InvalidPricingMode { + price_mode: self.pricing_mode.clone(), + }); + } + } + PricingMode::Fixed { .. } => {} + PricingMode::Prepaid { .. } => {} + } + + if let TransactionTarget::Stored { + id: + id @ TransactionInvocationTarget::ByPackageHash { .. } + | id @ TransactionInvocationTarget::ByPackageName { .. }, + runtime: _, + } = self.target.clone() + { + return Err(InvalidTransactionV1::UnsupportedInvocationTarget { id: Some(id) }); + } + } + None => { + // noop + } + } + + self.is_valid_size( + transaction_config + .transaction_v1_config + .get_max_serialized_length(self.lane_id) as u32, + )?; + + let chain_name = chainspec.network_config.name.clone(); + + if self.chain_name != chain_name { + debug!( + transaction_hash = %self.hash(), + chain_name = %self.chain_name, + timestamp= %self.timestamp, + ttl= %self.ttl, + pricing_mode= %self.pricing_mode, + initiator_addr= %self.initiator_addr, + target= %self.target, + entry_point= %self.entry_point, + lane_id= %self.lane_id, + scheduling= %self.scheduling, + "invalid chain identifier" + ); + return Err(InvalidTransactionV1::InvalidChainName { + expected: chain_name, + got: self.chain_name.to_string(), + }); + } + + let price_handling = chainspec.core_config.pricing_handling; + let pricing_mode = &self.pricing_mode; + + match pricing_mode { + PricingMode::PaymentLimited { payment_amount, .. } => { + if *payment_amount == 0u64 { + return Err(InvalidTransactionV1::InvalidPaymentAmount); + } + if let PricingHandling::PaymentLimited = price_handling { + if self.is_native_mint() { + let entry_point = &self.entry_point; + let expected_payment = match &entry_point { + TransactionEntryPoint::Transfer => { + chainspec.system_costs_config.mint_costs().transfer + } + TransactionEntryPoint::Burn => { + chainspec.system_costs_config.mint_costs().burn + } + _ => { + return Err(InvalidTransactionV1::UnexpectedEntryPoint { + entry_point: entry_point.clone(), + lane_id: self.lane_id, + }) + } + }; + if *payment_amount < expected_payment.into() { + return Err(InvalidTransactionV1::InvalidPaymentAmount); + } + } else if self.is_native_auction() { + let entry_point = &self.entry_point; + let expected_payment = match &entry_point { + TransactionEntryPoint::AddBid | TransactionEntryPoint::ActivateBid => { + chainspec.system_costs_config.auction_costs().add_bid + } + TransactionEntryPoint::WithdrawBid => { + chainspec.system_costs_config.auction_costs().withdraw_bid + } + TransactionEntryPoint::Delegate => { + chainspec.system_costs_config.auction_costs().delegate + } + TransactionEntryPoint::Undelegate => { + chainspec.system_costs_config.auction_costs().undelegate + } + TransactionEntryPoint::Redelegate => { + chainspec.system_costs_config.auction_costs().redelegate + } + TransactionEntryPoint::ChangeBidPublicKey => { + chainspec + .system_costs_config + .auction_costs() + .change_bid_public_key + } + TransactionEntryPoint::AddReservations => { + chainspec + .system_costs_config + .auction_costs() + .add_reservations + } + TransactionEntryPoint::CancelReservations => { + chainspec + .system_costs_config + .auction_costs() + .cancel_reservations + } + _ => { + return Err(InvalidTransactionV1::UnexpectedEntryPoint { + entry_point: entry_point.clone(), + lane_id: self.lane_id, + }) + } + }; + if *payment_amount < expected_payment { + return Err(InvalidTransactionV1::InvalidPaymentAmount); + } + } else if *payment_amount < chainspec.core_config.baseline_motes_amount { + return Err(InvalidTransactionV1::InvalidPaymentAmount); + } + } else { + return Err(InvalidTransactionV1::InvalidPricingMode { + price_mode: pricing_mode.clone(), + }); + } + } + PricingMode::Fixed { .. } => { + if let PricingHandling::Fixed = price_handling { + } else { + return Err(InvalidTransactionV1::InvalidPricingMode { + price_mode: pricing_mode.clone(), + }); + } + } + PricingMode::Prepaid { .. } => { + if !chainspec.core_config.allow_prepaid { + // Currently Prepaid isn't implemented, and we should + // not be accepting transactions with this mode. + return Err(InvalidTransactionV1::InvalidPricingMode { + price_mode: pricing_mode.clone(), + }); + } + } + } + + let min_gas_price = chainspec.vacancy_config.min_gas_price; + let gas_price_tolerance = self.gas_price_tolerance(); + if gas_price_tolerance < min_gas_price { + return Err(InvalidTransactionV1::GasPriceToleranceTooLow { + min_gas_price_tolerance: min_gas_price, + provided_gas_price_tolerance: gas_price_tolerance, + }); + } + + self.is_header_metadata_valid(&transaction_config, timestamp_leeway, at, &self.hash)?; + + let max_associated_keys = chainspec.core_config.max_associated_keys; + + if self.approvals.len() > max_associated_keys as usize { + debug!( + transaction_hash = %self.hash(), + number_of_approvals = %self.approvals.len(), + max_associated_keys = %max_associated_keys, + "number of transaction approvals exceeds the limit" + ); + return Err(InvalidTransactionV1::ExcessiveApprovals { + got: self.approvals.len() as u32, + max_associated_keys, + }); + } + + let gas_limit = self + .pricing_mode + .gas_limit(chainspec, self.lane_id) + .map_err(Into::::into)?; + let block_gas_limit = Gas::new(U512::from(transaction_config.block_gas_limit)); + if gas_limit > block_gas_limit { + debug!( + amount = %gas_limit, + %block_gas_limit, + "transaction gas limit exceeds block gas limit" + ); + return Err(InvalidTransactionV1::ExceedsBlockGasLimit { + block_gas_limit: transaction_config.block_gas_limit, + got: Box::new(gas_limit.value()), + }); + } + + self.is_body_metadata_valid(chainspec, &transaction_config) + } + + fn is_body_metadata_valid( + &self, + chainspec: &Chainspec, + config: &TransactionConfig, + ) -> Result<(), InvalidTransactionV1> { + let lane_id = self.lane_id; + if !config.transaction_v1_config.is_supported(lane_id) { + return Err(InvalidTransactionV1::InvalidTransactionLane(lane_id)); + } + + let max_serialized_length = config + .transaction_v1_config + .get_max_serialized_length(lane_id); + let actual_length = self.serialized_length; + if actual_length > max_serialized_length as usize { + return Err(InvalidTransactionV1::ExcessiveSize( + TransactionV1ExcessiveSizeError { + max_transaction_size: max_serialized_length as u32, + actual_transaction_size: actual_length, + }, + )); + } + + let max_args_length = config.transaction_v1_config.get_max_args_length(lane_id); + + let args_length = self.args.serialized_length(); + if args_length > max_args_length as usize { + debug!( + args_length, + max_args_length = max_args_length, + "transaction runtime args excessive size" + ); + return Err(InvalidTransactionV1::ExcessiveArgsLength { + max_length: max_args_length as usize, + got: args_length, + }); + } + + match &self.target { + TransactionTarget::Native => match self.entry_point { + TransactionEntryPoint::Call => { + debug!( + entry_point = %self.entry_point, + "native transaction cannot have call entry point" + ); + Err(InvalidTransactionV1::EntryPointCannotBeCall) + } + TransactionEntryPoint::Custom(_) => { + debug!( + entry_point = %self.entry_point, + "native transaction cannot have custom entry point" + ); + Err(InvalidTransactionV1::EntryPointCannotBeCustom { + entry_point: self.entry_point.clone(), + }) + } + TransactionEntryPoint::Transfer => arg_handling::has_valid_transfer_args( + &self.args, + config.native_transfer_minimum_motes, + ), + TransactionEntryPoint::Burn => arg_handling::has_valid_burn_args(&self.args), + TransactionEntryPoint::AddBid => { + arg_handling::has_valid_add_bid_args(chainspec, &self.args) + } + TransactionEntryPoint::WithdrawBid => { + arg_handling::has_valid_withdraw_bid_args(&self.args) + } + TransactionEntryPoint::Delegate => { + arg_handling::has_valid_delegate_args(chainspec, &self.args) + } + TransactionEntryPoint::Undelegate => { + arg_handling::has_valid_undelegate_args(&self.args) + } + TransactionEntryPoint::Redelegate => { + arg_handling::has_valid_redelegate_args(chainspec, &self.args) + } + TransactionEntryPoint::ActivateBid => { + arg_handling::has_valid_activate_bid_args(&self.args) + } + TransactionEntryPoint::ChangeBidPublicKey => { + arg_handling::has_valid_change_bid_public_key_args(&self.args) + } + TransactionEntryPoint::AddReservations => { + arg_handling::has_valid_add_reservations_args(chainspec, &self.args) + } + TransactionEntryPoint::CancelReservations => { + arg_handling::has_valid_cancel_reservations_args(&self.args) + } + }, + TransactionTarget::Stored { .. } => match &self.entry_point { + TransactionEntryPoint::Custom(_) => Ok(()), + TransactionEntryPoint::Call + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + debug!( + entry_point = %self.entry_point, + "transaction targeting stored entity/package must have custom entry point" + ); + Err(InvalidTransactionV1::EntryPointMustBeCustom { + entry_point: self.entry_point.clone(), + }) + } + }, + TransactionTarget::Session { module_bytes, .. } => match &self.entry_point { + TransactionEntryPoint::Call | TransactionEntryPoint::Custom(_) => { + if module_bytes.is_empty() { + debug!("transaction with session code must not have empty module bytes"); + return Err(InvalidTransactionV1::EmptyModuleBytes); + } + Ok(()) + } + TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + debug!( + entry_point = %self.entry_point, + "transaction with session code must use custom or default 'call' entry point" + ); + Err(InvalidTransactionV1::EntryPointMustBeCustom { + entry_point: self.entry_point.clone(), + }) + } + }, + } + } + + fn is_header_metadata_valid( + &self, + config: &TransactionConfig, + timestamp_leeway: TimeDiff, + at: Timestamp, + transaction_hash: &TransactionV1Hash, + ) -> Result<(), InvalidTransactionV1> { + if self.ttl() > config.max_ttl { + debug!( + %transaction_hash, + transaction_header = %self, + max_ttl = %config.max_ttl, + "transaction ttl excessive" + ); + return Err(InvalidTransactionV1::ExcessiveTimeToLive { + max_ttl: config.max_ttl, + got: self.ttl(), + }); + } + + if self.timestamp() > at + timestamp_leeway { + debug!( + %transaction_hash, transaction_header = %self, %at, + "transaction timestamp in the future" + ); + return Err(InvalidTransactionV1::TimestampInFuture { + validation_timestamp: at, + timestamp_leeway, + got: self.timestamp(), + }); + } + + Ok(()) + } + + /// Returns the gas price tolerance for the given transaction. + pub(crate) fn gas_price_tolerance(&self) -> u8 { + match self.pricing_mode { + PricingMode::PaymentLimited { + gas_price_tolerance, + .. + } => gas_price_tolerance, + PricingMode::Fixed { + gas_price_tolerance, + .. + } => gas_price_tolerance, + PricingMode::Prepaid { .. } => { + // TODO: Change this when prepaid gets implemented. + 0u8 + } + } + } + + /// Returns the serialized length of the transaction. + pub(crate) fn serialized_length(&self) -> usize { + self.serialized_length + } + + /// Returns the gas limit for the transaction. + pub(crate) fn gas_limit(&self, chainspec: &Chainspec) -> Result { + self.pricing_mode() + .gas_limit(chainspec, self.lane_id) + .map_err(Into::into) + } + + /// Returns the seed of the transaction. + pub(crate) fn seed(&self) -> Option<[u8; 32]> { + match &self.target { + TransactionTarget::Native => None, + TransactionTarget::Stored { id: _, runtime: _ } => None, + TransactionTarget::Session { + is_install_upgrade: _, + runtime, + module_bytes: _, + } => runtime.seed(), + } + } + + /// Returns the transferred value of the transaction. + pub(crate) fn transferred_value(&self) -> u64 { + match &self.target { + TransactionTarget::Native => 0, + TransactionTarget::Stored { id: _, runtime } => match runtime { + TransactionRuntimeParams::VmCasperV1 => 0, + TransactionRuntimeParams::VmCasperV2 { + transferred_value, .. + } => *transferred_value, + }, + TransactionTarget::Session { + is_install_upgrade: _, + runtime, + module_bytes: _, + } => match runtime { + TransactionRuntimeParams::VmCasperV1 => 0, + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed: _, + } => *transferred_value, + }, + } + } +} + +impl Display for MetaTransactionV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "meta-transaction-v1[hash: {}, chain_name: {}, timestamp: {}, ttl: {}, pricing_mode: {}, initiator_addr: {}, target: {}, entry_point: {}, lane_id: {}, scheduling: {}, approvals: {}]", + self.hash, + self.chain_name, + self.timestamp, + self.ttl, + self.pricing_mode, + self.initiator_addr, + self.target, + self.entry_point, + self.lane_id, + self.scheduling, + DisplayIter::new(self.approvals.iter()) + ) + } +} + +#[cfg(test)] +mod tests { + use super::MetaTransactionV1; + use crate::types::transaction::transaction_v1_builder::TransactionV1Builder; + use casper_types::{ + testing::TestRng, InvalidTransaction, InvalidTransactionV1, PricingMode, SecretKey, + TransactionInvocationTarget, TransactionLaneDefinition, TransactionRuntimeParams, + TransactionV1Config, + }; + + #[test] + fn limited_amount_should_determine_transaction_lane_for_session() { + let rng = &mut TestRng::new(); + let secret_key = SecretKey::random(rng); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: 1001, + gas_price_tolerance: 1, + standard_payment: true, + }; + + let transaction_v1 = TransactionV1Builder::new_session( + false, + vec![1; 30].into(), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("x".to_string()) + .with_pricing_mode(pricing_mode) + .with_secret_key(&secret_key) + .build() + .unwrap(); + let config = build_v1_config(); + + let meta_transaction = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config) + .expect("meta transaction should be valid"); + assert_eq!(meta_transaction.lane_id(), 4); + } + + #[test] + fn limited_amount_should_fail_if_does_not_fit_in_any_lane() { + let rng = &mut TestRng::new(); + let secret_key = SecretKey::random(rng); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: 1000000, + gas_price_tolerance: 1, + standard_payment: true, + }; + + let transaction_v1 = TransactionV1Builder::new_session( + false, + vec![1; 30].into(), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("x".to_string()) + .with_pricing_mode(pricing_mode) + .with_secret_key(&secret_key) + .build() + .unwrap(); + let config = build_v1_config(); + + let res = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config); + assert!(matches!( + res, + Err(InvalidTransaction::V1(InvalidTransactionV1::NoLaneMatch)) + )) + } + + #[test] + fn limited_amount_should_fail_if_transaction_size_does_not_fit_in_any_lane() { + let rng = &mut TestRng::new(); + let secret_key = SecretKey::random(rng); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: 100, + gas_price_tolerance: 1, + standard_payment: true, + }; + + let transaction_v1 = TransactionV1Builder::new_session( + false, + vec![1; 3000].into(), + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("x".to_string()) + .with_pricing_mode(pricing_mode) + .with_secret_key(&secret_key) + .build() + .unwrap(); + let mut config = TransactionV1Config::default(); + config.set_wasm_lanes(vec![ + TransactionLaneDefinition { + id: 3, + max_transaction_length: 200, + max_transaction_args_length: 100, + max_transaction_gas_limit: 100, + max_transaction_count: 10, + }, + TransactionLaneDefinition { + id: 4, + max_transaction_length: 500, + max_transaction_args_length: 100, + max_transaction_gas_limit: 10000, + max_transaction_count: 10, + }, + ]); + + let res = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config); + assert!(matches!( + res, + Err(InvalidTransaction::V1(InvalidTransactionV1::NoLaneMatch)) + )) + } + + #[test] + fn limited_amount_should_determine_transaction_lane_for_stored() { + let rng = &mut TestRng::new(); + let secret_key = SecretKey::random(rng); + let pricing_mode = PricingMode::PaymentLimited { + payment_amount: 1001, + gas_price_tolerance: 1, + standard_payment: true, + }; + + let transaction_v1 = TransactionV1Builder::new_targeting_stored( + TransactionInvocationTarget::ByName("xyz".to_string()), + "abc", + TransactionRuntimeParams::VmCasperV1, + ) + .with_chain_name("x".to_string()) + .with_secret_key(&secret_key) + .with_pricing_mode(pricing_mode) + .build() + .unwrap(); + let config = build_v1_config(); + + let meta_transaction = MetaTransactionV1::from_transaction_v1(&transaction_v1, &config) + .expect("meta transaction should be valid"); + assert_eq!(meta_transaction.lane_id(), 4); + } + + fn build_v1_config() -> TransactionV1Config { + let mut config = TransactionV1Config::default(); + config.set_wasm_lanes(vec![ + TransactionLaneDefinition { + id: 3, + max_transaction_length: 10000, + max_transaction_args_length: 100, + max_transaction_gas_limit: 100, + max_transaction_count: 10, + }, + TransactionLaneDefinition { + id: 4, + max_transaction_length: 10001, + max_transaction_args_length: 100, + max_transaction_gas_limit: 10000, + max_transaction_count: 10, + }, + TransactionLaneDefinition { + id: 5, + max_transaction_length: 10002, + max_transaction_args_length: 100, + max_transaction_gas_limit: 1000, + max_transaction_count: 10, + }, + ]); + config + } +} diff --git a/node/src/types/transaction/meta_transaction/transaction_header.rs b/node/src/types/transaction/meta_transaction/transaction_header.rs new file mode 100644 index 0000000000..fa0c6b0108 --- /dev/null +++ b/node/src/types/transaction/meta_transaction/transaction_header.rs @@ -0,0 +1,77 @@ +use casper_types::{DeployHeader, InitiatorAddr, TimeDiff, Timestamp, Transaction, TransactionV1}; +use core::fmt::{self, Display, Formatter}; +use datasize::DataSize; +use serde::Serialize; + +#[derive(Debug, Clone, DataSize, PartialEq, Eq, Serialize)] +pub(crate) struct TransactionV1Metadata { + initiator_addr: InitiatorAddr, + timestamp: Timestamp, + ttl: TimeDiff, +} + +impl TransactionV1Metadata { + pub(crate) fn initiator_addr(&self) -> &InitiatorAddr { + &self.initiator_addr + } + + pub(crate) fn timestamp(&self) -> Timestamp { + self.timestamp + } + + pub(crate) fn ttl(&self) -> TimeDiff { + self.ttl + } +} + +impl Display for TransactionV1Metadata { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction-v1-metadata[initiator_addr: {}]", + self.initiator_addr, + ) + } +} + +#[derive(Debug, Clone, DataSize, Serialize, PartialEq, Eq)] +/// A versioned wrapper for a transaction header or deploy header. +pub(crate) enum TransactionHeader { + Deploy(DeployHeader), + V1(TransactionV1Metadata), +} + +impl From for TransactionHeader { + fn from(header: DeployHeader) -> Self { + Self::Deploy(header) + } +} + +impl From<&TransactionV1> for TransactionHeader { + fn from(transaction_v1: &TransactionV1) -> Self { + let meta = TransactionV1Metadata { + initiator_addr: transaction_v1.initiator_addr().clone(), + timestamp: transaction_v1.timestamp(), + ttl: transaction_v1.ttl(), + }; + Self::V1(meta) + } +} + +impl From<&Transaction> for TransactionHeader { + fn from(transaction: &Transaction) -> Self { + match transaction { + Transaction::Deploy(deploy) => deploy.header().clone().into(), + Transaction::V1(v1) => v1.into(), + } + } +} + +impl Display for TransactionHeader { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionHeader::Deploy(header) => Display::fmt(header, formatter), + TransactionHeader::V1(meta) => Display::fmt(meta, formatter), + } + } +} diff --git a/node/src/types/transaction/transaction_footprint.rs b/node/src/types/transaction/transaction_footprint.rs new file mode 100644 index 0000000000..9b28ea04c1 --- /dev/null +++ b/node/src/types/transaction/transaction_footprint.rs @@ -0,0 +1,159 @@ +use crate::types::MetaTransaction; +#[cfg(test)] +use casper_types::{testing::TestRng, U512}; +use casper_types::{ + Approval, Chainspec, Digest, Gas, InvalidTransaction, InvalidTransactionV1, TimeDiff, + Timestamp, Transaction, TransactionHash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, + MINT_LANE_ID, +}; +use datasize::DataSize; +#[cfg(test)] +use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeSet; + +#[derive(Clone, Debug, DataSize, Eq, PartialEq, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +/// The block footprint of a transaction. +pub(crate) struct TransactionFootprint { + /// The identifying hash. + pub(crate) transaction_hash: TransactionHash, + /// Transaction payload hash. + pub(crate) payload_hash: Digest, + /// The estimated gas consumption. + pub(crate) gas_limit: Gas, + /// The gas tolerance. + pub(crate) gas_price_tolerance: u8, + /// The bytesrepr serialized length. + pub(crate) size_estimate: usize, + /// The transaction lane_id. + pub(crate) lane_id: u8, + /// Timestamp of the transaction. + pub(crate) timestamp: Timestamp, + /// Time to live for the transaction. + pub(crate) ttl: TimeDiff, + /// The approvals. + pub(crate) approvals: BTreeSet, +} + +impl TransactionFootprint { + pub(crate) fn new( + chainspec: &Chainspec, + transaction: &Transaction, + ) -> Result { + let transaction = MetaTransaction::from_transaction( + transaction, + chainspec.core_config.pricing_handling, + &chainspec.transaction_config, + )?; + Self::new_from_meta_transaction(chainspec, &transaction) + } + + fn new_from_meta_transaction( + chainspec: &Chainspec, + transaction: &MetaTransaction, + ) -> Result { + let gas_price_tolerance = transaction.gas_price_tolerance()?; + let gas_limit = transaction.gas_limit(chainspec)?; + let lane_id = transaction.transaction_lane(); + if !chainspec + .transaction_config + .transaction_v1_config + .is_supported(lane_id) + { + return Err(InvalidTransaction::V1( + InvalidTransactionV1::InvalidTransactionLane(lane_id), + )); + } + let transaction_hash = transaction.hash(); + let size_estimate = transaction.size_estimate(); + let payload_hash = transaction.payload_hash(); + let timestamp = transaction.timestamp(); + let ttl = transaction.ttl(); + let approvals = transaction.approvals(); + Ok(TransactionFootprint { + transaction_hash, + payload_hash, + gas_limit, + gas_price_tolerance, + size_estimate, + lane_id, + timestamp, + ttl, + approvals, + }) + } + + /// Sets approvals. + pub(crate) fn with_approvals(mut self, approvals: BTreeSet) -> Self { + self.approvals = approvals; + self + } + + /// The approval count, if known. + pub(crate) fn approvals_count(&self) -> usize { + self.approvals.len() + } + + /// Is mint interaction. + pub(crate) fn is_mint(&self) -> bool { + if self.lane_id == MINT_LANE_ID { + return true; + } + + false + } + + /// Is auction interaction. + pub(crate) fn is_auction(&self) -> bool { + if self.lane_id == AUCTION_LANE_ID { + return true; + } + + false + } + + pub(crate) fn is_install_upgrade(&self) -> bool { + if self.lane_id == INSTALL_UPGRADE_LANE_ID { + return true; + } + + false + } + + pub(crate) fn is_wasm_based(&self) -> bool { + if !self.is_mint() && !self.is_auction() && !self.is_install_upgrade() { + return true; + } + + false + } + + pub(crate) fn gas_price_tolerance(&self) -> u8 { + self.gas_price_tolerance + } + + #[cfg(test)] + pub fn random_of_lane(lane_id: u8, rng: &mut TestRng) -> Self { + let transaction_hash = TransactionHash::random(rng); + let payload_hash = Digest::random(rng); + let gas_limit = Gas::new(U512::from(1)); + let gas_price_tolerance = rng.gen(); + let size_estimate = rng.gen_range(1000..2000); + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_millis(15000); + let mut approvals = BTreeSet::new(); + approvals.insert(Approval::random(rng)); + TransactionFootprint { + transaction_hash, + payload_hash, + gas_limit, + gas_price_tolerance, + size_estimate, + lane_id, + timestamp, + ttl, + approvals, + } + } +} diff --git a/node/src/types/transaction/transaction_v1_builder.rs b/node/src/types/transaction/transaction_v1_builder.rs new file mode 100644 index 0000000000..62412e1ce7 --- /dev/null +++ b/node/src/types/transaction/transaction_v1_builder.rs @@ -0,0 +1,777 @@ +#[cfg(test)] +use super::arg_handling; +use super::fields_container::{FieldsContainer, FieldsContainerError}; +use crate::types::transaction::initiator_addr_and_secret_key::InitiatorAddrAndSecretKey; +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + Digest, InitiatorAddr, PricingMode, RuntimeArgs, SecretKey, TimeDiff, Timestamp, + TransactionArgs, TransactionEntryPoint, TransactionRuntimeParams, TransactionScheduling, + TransactionTarget, TransactionV1, TransactionV1Payload, +}; +#[cfg(test)] +use casper_types::{ + contracts::ProtocolVersionMajor, testing::TestRng, AddressableEntityHash, Approval, + CLValueError, EntityVersion, PackageHash, PublicKey, TransactionConfig, + TransactionInvocationTarget, TransferTarget, URef, U512, +}; +use core::marker::PhantomData; +#[cfg(test)] +use rand::Rng; +use std::collections::{BTreeMap, BTreeSet}; + +/// A builder for constructing `TransactionV1` instances with various configuration options. +/// +/// The `TransactionV1Builder` provides a flexible API for specifying different transaction +/// parameters like the target, scheduling, entry point, and signing options. Once all the required +/// fields are set, the transaction can be built by calling [`build`](Self::build). +/// +/// # Fields +/// +/// - `args`: Arguments passed to the transaction's runtime, initialized to +/// [`RuntimeArgs::new`](RuntimeArgs::new). +/// - `target`: Specifies the target of the transaction, which can be native or other custom +/// targets. Defaults to [`TransactionTarget::Native`](TransactionTarget::Native). +/// - `scheduling`: Determines the scheduling mechanism of the transaction, e.g., standard or +/// immediate, and is initialized to +/// [`TransactionScheduling::Standard`](TransactionScheduling::Standard). +/// - `entry_point`: Defines the transaction's entry point, such as transfer or another defined +/// action. Defaults to [`TransactionEntryPoint::Transfer`](TransactionEntryPoint::Transfer). +/// - `chain_name`: The name of the blockchain where the transaction will be executed. Initially set +/// to `None` and must be provided before building the transaction. +/// +/// ## Time-Related Fields +/// - `timestamp`: The timestamp at which the transaction is created. It is either set to the +/// current time using [`Timestamp::now`](Timestamp::now) or [`Timestamp::zero`](Timestamp::zero) +/// without the `std-fs-io` feature. +/// - `ttl`: Time-to-live for the transaction, specified as a [`TimeDiff`], representing how long +/// the transaction is valid for execution. Defaults to [`Self::DEFAULT_TTL`]. +/// +/// ## Pricing and Initiator Fields +/// - `pricing_mode`: Specifies the pricing mode to use for transaction execution (e.g., fixed or +/// dynamic). Defaults to [`Self::DEFAULT_PRICING_MODE`]. +/// - `initiator_addr`: The address of the initiator who creates and signs the transaction. +/// Initially set to `None` and must be set before building. +/// +/// ## Signing Fields +/// - `secret_key`: The secret key used to sign the transaction. This field is conditional based on +/// the compilation environment: +/// - In normal mode, it holds a reference to the secret key (`Option<&'a SecretKey>`). +/// - In testing mode or with the `std` feature enabled, it holds an owned secret key +/// (`Option`). +/// +/// ## Invalid Approvals +/// - `invalid_approvals`: A collection of invalid approvals used for testing purposes. This field +/// is available only when the `std` or `testing` features are enabled, or in a test environment. +/// +/// ## Phantom Data +/// - `_phantom_data`: Ensures the correct lifetime `'a` is respected for the builder, helping with +/// proper borrowing and memory safety. +#[derive(Debug)] +pub(crate) struct TransactionV1Builder<'a> { + /// Arguments passed to the transaction's runtime. + args: TransactionArgs, + /// The target of the transaction (e.g., native). + target: TransactionTarget, + /// Defines how the transaction is scheduled (e.g., standard, immediate). + scheduling: TransactionScheduling, + /// Specifies the entry point of the transaction (e.g., transfer). + entry_point: TransactionEntryPoint, + /// The name of the blockchain where the transaction will be executed. + chain_name: Option, + /// The timestamp of the transaction. + timestamp: Timestamp, + /// The time-to-live for the transaction, representing how long it's valid for execution. + ttl: TimeDiff, + /// The pricing mode used for the transaction's execution cost. + pricing_mode: PricingMode, + /// The address of the transaction initiator. + initiator_addr: Option, + /// The secret key used for signing the transaction (in normal mode). + #[cfg(not(test))] + secret_key: Option<&'a SecretKey>, + /// The secret key used for signing the transaction (in testing or with `std` feature). + #[cfg(test)] + secret_key: Option, + /// A list of invalid approvals for testing purposes. + #[cfg(test)] + invalid_approvals: Vec, + /// Additional fields + #[cfg(test)] + additional_fields: BTreeMap, + /// Phantom data to ensure the correct lifetime for references. + _phantom_data: PhantomData<&'a ()>, +} + +impl<'a> TransactionV1Builder<'a> { + /// The default time-to-live for transactions, i.e. 30 minutes. + pub const DEFAULT_TTL: TimeDiff = TimeDiff::from_millis(30 * 60 * 1_000); + /// The default pricing mode for v1 transactions, ie FIXED cost. + pub const DEFAULT_PRICING_MODE: PricingMode = PricingMode::PaymentLimited { + payment_amount: 10_000_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }; + /// The default scheduling for transactions, i.e. `Standard`. + pub const DEFAULT_SCHEDULING: TransactionScheduling = TransactionScheduling::Standard; + + /// Creates a new `TransactionV1Builder` instance with default settings. + /// + /// # Important + /// + /// Before calling [`build`](Self::build), you must ensure that either: + /// - A chain name is provided by calling [`with_chain_name`](Self::with_chain_name), + /// - An initiator address is set by calling [`with_initiator_addr`](Self::with_initiator_addr), + /// - or a secret key is set by calling [`with_secret_key`](Self::with_secret_key). + /// + /// # Default Values + /// This function sets the following default values upon creation: + /// + /// - `chain_name`: Initialized to `None`. + /// - `timestamp`: Set to the current time using [`Timestamp::now`](Timestamp::now), or + /// [`Timestamp::zero`](Timestamp::zero) if the `std-fs-io` feature is disabled. + /// - `ttl`: Defaults to [`Self::DEFAULT_TTL`]. + /// - `pricing_mode`: Defaults to [`Self::DEFAULT_PRICING_MODE`]. + /// - `initiator_addr`: Initialized to `None`. + /// - `secret_key`: Initialized to `None`. + /// + /// Additionally, the following internal fields are configured: + /// + /// - `args`: Initialized to an empty [`RuntimeArgs::new`](RuntimeArgs::new). + /// - `entry_point`: Set to + /// [`TransactionEntryPoint::Transfer`](TransactionEntryPoint::Transfer). + /// - `target`: Defaults to [`TransactionTarget::Native`](TransactionTarget::Native). + /// - `scheduling`: Defaults to + /// [`TransactionScheduling::Standard`](TransactionScheduling::Standard). + /// + /// # Testing and Additional Configuration + /// + /// - If the `std` or `testing` feature is enabled, or in test configurations, the + /// `invalid_approvals` field is initialized as an empty vector. + /// + /// # Returns + /// + /// A new `TransactionV1Builder` instance. + pub(crate) fn new() -> Self { + let timestamp = Timestamp::now(); + + TransactionV1Builder { + args: TransactionArgs::Named(RuntimeArgs::new()), + entry_point: TransactionEntryPoint::Transfer, + target: TransactionTarget::Native, + scheduling: TransactionScheduling::Standard, + chain_name: None, + timestamp, + ttl: Self::DEFAULT_TTL, + pricing_mode: Self::DEFAULT_PRICING_MODE, + initiator_addr: None, + secret_key: None, + _phantom_data: PhantomData, + #[cfg(test)] + invalid_approvals: vec![], + #[cfg(test)] + additional_fields: BTreeMap::new(), + } + } + + /// Returns a new `TransactionV1Builder` suitable for building a native transfer transaction. + #[cfg(test)] + pub(crate) fn new_transfer, T: Into>( + amount: A, + maybe_source: Option, + target: T, + maybe_id: Option, + ) -> Result { + let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id)?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.target = TransactionTarget::Native; + builder.entry_point = TransactionEntryPoint::Transfer; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native burn transaction. + #[cfg(test)] + pub(crate) fn new_burn>( + amount: A, + maybe_source: Option, + ) -> Result { + let args = arg_handling::new_burn_args(amount, maybe_source)?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.entry_point = TransactionEntryPoint::Burn; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native add_bid transaction. + #[cfg(test)] + pub(crate) fn new_add_bid>( + public_key: PublicKey, + delegation_rate: u8, + amount: A, + minimum_delegation_amount: Option, + maximum_delegation_amount: Option, + reserved_slots: Option, + ) -> Result { + let args = arg_handling::new_add_bid_args( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + )?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.target = TransactionTarget::Native; + builder.entry_point = TransactionEntryPoint::AddBid; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native withdraw_bid + /// transaction. + #[cfg(test)] + pub(crate) fn new_withdraw_bid>( + public_key: PublicKey, + amount: A, + ) -> Result { + let args = arg_handling::new_withdraw_bid_args(public_key, amount)?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.target = TransactionTarget::Native; + builder.entry_point = TransactionEntryPoint::WithdrawBid; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native delegate transaction. + #[cfg(test)] + pub(crate) fn new_delegate>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + ) -> Result { + let args = arg_handling::new_delegate_args(delegator, validator, amount)?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.target = TransactionTarget::Native; + builder.entry_point = TransactionEntryPoint::Delegate; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native undelegate transaction. + #[cfg(test)] + pub(crate) fn new_undelegate>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + ) -> Result { + let args = arg_handling::new_undelegate_args(delegator, validator, amount)?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.target = TransactionTarget::Native; + builder.entry_point = TransactionEntryPoint::Undelegate; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + /// Returns a new `TransactionV1Builder` suitable for building a native redelegate transaction. + #[cfg(test)] + pub(crate) fn new_redelegate>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + new_validator: PublicKey, + ) -> Result { + let args = arg_handling::new_redelegate_args(delegator, validator, amount, new_validator)?; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(args); + builder.target = TransactionTarget::Native; + builder.entry_point = TransactionEntryPoint::Redelegate; + builder.scheduling = Self::DEFAULT_SCHEDULING; + Ok(builder) + } + + #[cfg(test)] + pub(crate) fn new_targeting_stored>( + id: TransactionInvocationTarget, + entry_point: E, + runtime: TransactionRuntimeParams, + ) -> Self { + let target = TransactionTarget::Stored { id, runtime }; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(RuntimeArgs::new()); + builder.target = target; + builder.entry_point = TransactionEntryPoint::Custom(entry_point.into()); + builder.scheduling = Self::DEFAULT_SCHEDULING; + builder + } + + #[cfg(test)] + pub(crate) fn new_targeting_stored_with_runtime_args>( + id: TransactionInvocationTarget, + entry_point: E, + runtime: TransactionRuntimeParams, + runtime_args: RuntimeArgs, + ) -> Self { + let target = TransactionTarget::Stored { id, runtime }; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(runtime_args); + builder.target = target; + builder.entry_point = TransactionEntryPoint::Custom(entry_point.into()); + builder.scheduling = Self::DEFAULT_SCHEDULING; + builder + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a stored + /// entity. + #[cfg(test)] + pub(crate) fn new_targeting_invocable_entity>( + hash: AddressableEntityHash, + entry_point: E, + runtime: TransactionRuntimeParams, + ) -> Self { + let id = TransactionInvocationTarget::new_invocable_entity(hash); + Self::new_targeting_stored(id, entry_point, runtime) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a stored + /// entity via its alias. + #[cfg(test)] + pub(crate) fn new_targeting_invocable_entity_via_alias, E: Into>( + alias: A, + entry_point: E, + runtime: TransactionRuntimeParams, + ) -> Self { + let id = TransactionInvocationTarget::new_invocable_entity_alias(alias.into()); + Self::new_targeting_stored(id, entry_point, runtime) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a + /// package. + #[cfg(test)] + pub(crate) fn new_targeting_package>( + hash: PackageHash, + version: Option, + protocol_version_major: Option, + entry_point: E, + runtime: TransactionRuntimeParams, + ) -> Self { + let id = TransactionInvocationTarget::new_package_with_major( + hash, + version, + protocol_version_major, + ); + Self::new_targeting_stored(id, entry_point, runtime) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a + /// package. + #[cfg(test)] + pub(crate) fn new_targeting_package_with_runtime_args>( + hash: PackageHash, + version: Option, + protocol_version_major: Option, + entry_point: E, + runtime: TransactionRuntimeParams, + runtime_args: RuntimeArgs, + ) -> Self { + let id = TransactionInvocationTarget::ByPackageHash { + addr: hash.value(), + version, + protocol_version_major, + }; + Self::new_targeting_stored_with_runtime_args(id, entry_point, runtime, runtime_args) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction targeting a + /// package via its alias. + #[cfg(test)] + pub(crate) fn new_targeting_package_via_alias, E: Into>( + alias: A, + version: Option, + protocol_version_major: Option, + entry_point: E, + runtime: TransactionRuntimeParams, + ) -> Self { + let id = TransactionInvocationTarget::new_package_alias_with_major( + alias.into(), + version, + protocol_version_major, + ); + Self::new_targeting_stored(id, entry_point, runtime) + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction for running session + /// logic, i.e. compiled Wasm. + pub(crate) fn new_session( + is_install_upgrade: bool, + module_bytes: Bytes, + runtime: TransactionRuntimeParams, + ) -> Self { + let target = TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + }; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(RuntimeArgs::new()); + builder.target = target; + builder.entry_point = TransactionEntryPoint::Call; + builder.scheduling = Self::DEFAULT_SCHEDULING; + builder + } + + /// Returns a new `TransactionV1Builder` suitable for building a transaction for running session + /// logic, i.e. compiled Wasm. + #[cfg(test)] + pub(crate) fn new_session_with_runtime_args( + is_install_upgrade: bool, + module_bytes: Bytes, + runtime: TransactionRuntimeParams, + runtime_args: RuntimeArgs, + ) -> Self { + let target = TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + }; + let mut builder = TransactionV1Builder::new(); + builder.args = TransactionArgs::Named(runtime_args); + builder.target = target; + builder.entry_point = TransactionEntryPoint::Call; + builder.scheduling = Self::DEFAULT_SCHEDULING; + builder + } + + /// Returns a new `TransactionV1Builder` which will build a random, valid but possibly expired + /// transaction. + /// + /// The transaction can be made invalid in the following ways: + /// * unsigned by calling `with_no_secret_key` + /// * given an invalid approval by calling `with_invalid_approval` + #[cfg(test)] + pub(crate) fn new_random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + let ttl_millis = rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()); + let fields = FieldsContainer::random(rng); + TransactionV1Builder { + chain_name: Some(rng.random_string(5..10)), + timestamp: Timestamp::random(rng), + ttl: TimeDiff::from_millis(ttl_millis), + args: TransactionArgs::Named(RuntimeArgs::random(rng)), + target: fields.target, + entry_point: fields.entry_point, + scheduling: fields.scheduling, + pricing_mode: PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + initiator_addr: Some(InitiatorAddr::PublicKey(PublicKey::from(&secret_key))), + secret_key: Some(secret_key), + _phantom_data: PhantomData, + invalid_approvals: vec![], + #[cfg(test)] + additional_fields: BTreeMap::new(), + } + } + + #[cfg(test)] + pub(crate) fn new_random_with_category_and_timestamp_and_ttl( + rng: &mut TestRng, + lane: u8, + timestamp: Option, + ttl: Option, + ) -> Self { + let secret_key = SecretKey::random(rng); + let ttl_millis = ttl.map_or( + rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()), + |ttl| ttl.millis(), + ); + let FieldsContainer { + args, + target, + entry_point, + scheduling, + } = FieldsContainer::random_of_lane(rng, lane); + TransactionV1Builder { + chain_name: Some(rng.random_string(5..10)), + timestamp: timestamp.unwrap_or(Timestamp::now()), + ttl: TimeDiff::from_millis(ttl_millis), + args, + target, + entry_point, + scheduling, + pricing_mode: PricingMode::PaymentLimited { + payment_amount: 2_500_000_000, + gas_price_tolerance: 3, + standard_payment: true, + }, + initiator_addr: Some(InitiatorAddr::PublicKey(PublicKey::from(&secret_key))), + secret_key: Some(secret_key), + _phantom_data: PhantomData, + invalid_approvals: vec![], + #[cfg(test)] + additional_fields: BTreeMap::new(), + } + } + + /// Sets the `chain_name` in the transaction. + /// + /// Must be provided or building will fail. + pub(crate) fn with_chain_name>(mut self, chain_name: C) -> Self { + self.chain_name = Some(chain_name.into()); + self + } + + /// Sets the `timestamp` in the transaction. + /// + /// If not provided, the timestamp will be set to the time when the builder was constructed. + pub(crate) fn with_timestamp(mut self, timestamp: Timestamp) -> Self { + self.timestamp = timestamp; + self + } + + /// Sets the `ttl` (time-to-live) in the transaction. + /// + /// If not provided, the ttl will be set to [`Self::DEFAULT_TTL`]. + pub(crate) fn with_ttl(mut self, ttl: TimeDiff) -> Self { + self.ttl = ttl; + self + } + + /// Sets the `pricing_mode` in the transaction. + /// + /// If not provided, the pricing mode will be set to [`Self::DEFAULT_PRICING_MODE`]. + #[cfg(test)] + pub(crate) fn with_pricing_mode(mut self, pricing_mode: PricingMode) -> Self { + self.pricing_mode = pricing_mode; + self + } + + /// Sets the `initiator_addr` in the transaction. + /// + /// If not provided, the public key derived from the secret key used in the builder will be + /// used as the `InitiatorAddr::PublicKey` in the transaction. + #[cfg(test)] + pub(crate) fn with_initiator_addr>(mut self, initiator_addr: I) -> Self { + self.initiator_addr = Some(initiator_addr.into()); + self + } + + /// Sets the secret key used to sign the transaction on calling [`build`](Self::build). + /// + /// If not provided, the transaction can still be built, but will be unsigned and will be + /// invalid until subsequently signed. + pub(crate) fn with_secret_key(mut self, secret_key: &'a SecretKey) -> Self { + #[cfg(not(test))] + { + self.secret_key = Some(secret_key); + } + #[cfg(test)] + { + self.secret_key = Some( + SecretKey::from_der(secret_key.to_der().expect("should der-encode")) + .expect("should der-decode"), + ); + } + self + } + + /// Manually sets additional fields + #[cfg(test)] + pub(crate) fn with_additional_fields( + mut self, + additional_fields: BTreeMap, + ) -> Self { + self.additional_fields = additional_fields; + self + } + + /// Sets the runtime args in the transaction. + /// + /// NOTE: this overwrites any existing runtime args. To append to existing args, use + /// [`TransactionV1Builder::with_runtime_arg`]. + #[cfg(test)] + pub(crate) fn with_runtime_args(mut self, args: RuntimeArgs) -> Self { + self.args = TransactionArgs::Named(args); + self + } + + /// Sets the transaction args in the transaction. + /// + /// NOTE: this overwrites any existing transaction_args args. + #[cfg(test)] + pub fn with_transaction_args(mut self, args: TransactionArgs) -> Self { + self.args = args; + self + } + + /// Returns the new transaction, or an error if non-defaulted fields were not set. + /// + /// For more info, see [the `TransactionBuilder` documentation](TransactionV1Builder). + pub(crate) fn build(self) -> Result { + self.do_build() + } + + #[cfg(not(test))] + fn do_build(self) -> Result { + let initiator_addr_and_secret_key = match (self.initiator_addr, self.secret_key) { + (Some(initiator_addr), Some(secret_key)) => InitiatorAddrAndSecretKey::Both { + initiator_addr, + secret_key, + }, + (Some(initiator_addr), None) => { + InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) + } + (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key), + (None, None) => return Err(TransactionV1BuilderError::MissingInitiatorAddr), + }; + + let chain_name = self + .chain_name + .ok_or(TransactionV1BuilderError::MissingChainName)?; + + let container = + FieldsContainer::new(self.args, self.target, self.entry_point, self.scheduling) + .to_map() + .map_err(|err| match err { + FieldsContainerError::CouldNotSerializeField { field_index } => { + TransactionV1BuilderError::CouldNotSerializeField { field_index } + } + })?; + + let transaction = build_transaction( + chain_name, + self.timestamp, + self.ttl, + self.pricing_mode, + container, + initiator_addr_and_secret_key, + ); + + Ok(transaction) + } + + #[cfg(test)] + fn do_build(self) -> Result { + let initiator_addr_and_secret_key = match (self.initiator_addr, &self.secret_key) { + (Some(initiator_addr), Some(secret_key)) => InitiatorAddrAndSecretKey::Both { + initiator_addr, + secret_key, + }, + (Some(initiator_addr), None) => { + InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) + } + (None, Some(secret_key)) => InitiatorAddrAndSecretKey::SecretKey(secret_key), + (None, None) => return Err(TransactionV1BuilderError::MissingInitiatorAddr), + }; + + let chain_name = self + .chain_name + .ok_or(TransactionV1BuilderError::MissingChainName)?; + let mut container = + FieldsContainer::new(self.args, self.target, self.entry_point, self.scheduling) + .to_map() + .map_err(|err| match err { + FieldsContainerError::CouldNotSerializeField { field_index } => { + TransactionV1BuilderError::CouldNotSerializeField { field_index } + } + })?; + let mut additional_fields = self.additional_fields.clone(); + container.append(&mut additional_fields); + + let mut transaction = build_transaction( + chain_name, + self.timestamp, + self.ttl, + self.pricing_mode, + container, + initiator_addr_and_secret_key, + ); + + transaction.apply_approvals(self.invalid_approvals); + + Ok(transaction) + } +} + +fn build_transaction( + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + pricing_mode: PricingMode, + fields: BTreeMap, + initiator_addr_and_secret_key: InitiatorAddrAndSecretKey, +) -> TransactionV1 { + let initiator_addr = initiator_addr_and_secret_key.initiator_addr(); + let transaction_v1_payload = TransactionV1Payload::new( + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + ); + let hash = Digest::hash( + transaction_v1_payload + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize body: {}", error)), + ); + let mut transaction = TransactionV1::new(hash.into(), transaction_v1_payload, BTreeSet::new()); + + if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() { + transaction.sign(secret_key); + } + transaction +} + +use core::fmt::{self, Display, Formatter}; + +/// Errors returned while building a [`TransactionV1`] using a [`TransactionV1Builder`]. +#[derive(Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub(crate) enum TransactionV1BuilderError { + /// Failed to build transaction due to missing initiator_addr. + /// + /// Call [`TransactionV1Builder::with_initiator_addr`] or + /// [`TransactionV1Builder::with_secret_key`] before calling [`TransactionV1Builder::build`]. + MissingInitiatorAddr, + /// Failed to build transaction due to missing chain name. + /// + /// Call [`TransactionV1Builder::with_chain_name`] before calling + /// [`TransactionV1Builder::build`]. + MissingChainName, + /// Failed to build transaction due to an error when calling `to_bytes` on one of the payload + /// `field`. + CouldNotSerializeField { + /// The field index that failed to serialize. + field_index: u16, + }, +} + +impl Display for TransactionV1BuilderError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionV1BuilderError::MissingInitiatorAddr => { + write!( + formatter, + "transaction requires account - use `with_account` or `with_secret_key`" + ) + } + TransactionV1BuilderError::MissingChainName => { + write!( + formatter, + "transaction requires chain name - use `with_chain_name`" + ) + } + TransactionV1BuilderError::CouldNotSerializeField { field_index } => { + write!(formatter, "Cannot serialize field at index {}", field_index) + } + } + } +} diff --git a/node/src/types/validator_matrix.rs b/node/src/types/validator_matrix.rs new file mode 100644 index 0000000000..d0681fde4d --- /dev/null +++ b/node/src/types/validator_matrix.rs @@ -0,0 +1,806 @@ +#[cfg(test)] +use std::iter; +use std::{ + collections::{BTreeMap, HashSet}, + fmt::{self, Debug, Formatter}, + sync::{Arc, RwLock, RwLockReadGuard}, +}; + +use datasize::DataSize; +use itertools::Itertools; +use num_rational::Ratio; +use serde::Serialize; +use tracing::info; + +use casper_types::{ + BlockHeaderV2, ChainNameDigest, EraId, FinalitySignatureV2, PublicKey, SecretKey, U512, +}; + +const MINIMUM_CUSP_ERA_COUNT: u64 = 2; +const PROPOSED_BLOCK_ERA_TOLERANCE: u64 = 1; + +#[derive(Eq, PartialEq, Debug, Copy, Clone, DataSize)] +pub(crate) enum SignatureWeight { + /// Too few signatures to make any guarantees about the block's finality. + Insufficient, + /// At least one honest validator has signed the block. + Weak, + /// There can be no blocks on other forks that also have this many signatures. + Strict, +} + +impl SignatureWeight { + pub(crate) fn is_sufficient(&self, requires_strict_finality: bool) -> bool { + match self { + SignatureWeight::Insufficient => false, + SignatureWeight::Weak => false == requires_strict_finality, + SignatureWeight::Strict => true, + } + } +} + +#[derive(Clone, DataSize)] +pub(crate) struct ValidatorMatrix { + inner: Arc>>, + chainspec_name_hash: ChainNameDigest, + chainspec_validators: Option>>, + chainspec_activation_era: EraId, + #[data_size(skip)] + finality_threshold_fraction: Ratio, + secret_signing_key: Arc, + public_signing_key: PublicKey, + auction_delay: u64, + signature_rewards_max_delay: u64, + retrograde_latch: Option, +} + +impl ValidatorMatrix { + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + finality_threshold_fraction: Ratio, + chainspec_name_hash: ChainNameDigest, + chainspec_validators: Option>, + chainspec_activation_era: EraId, + secret_signing_key: Arc, + public_signing_key: PublicKey, + auction_delay: u64, + signature_rewards_max_delay: u64, + ) -> Self { + let inner = Arc::new(RwLock::new(BTreeMap::new())); + ValidatorMatrix { + inner, + finality_threshold_fraction, + chainspec_name_hash, + chainspec_validators: chainspec_validators.map(Arc::new), + chainspec_activation_era, + secret_signing_key, + public_signing_key, + auction_delay, + signature_rewards_max_delay, + retrograde_latch: None, + } + } + + /// Creates a new validator matrix with just a single validator. + #[cfg(test)] + pub(crate) fn new_with_validator(secret_signing_key: Arc) -> Self { + let public_signing_key = PublicKey::from(&*secret_signing_key); + let finality_threshold_fraction = Ratio::new(1, 3); + let era_id = EraId::new(0); + let weights = EraValidatorWeights::new( + era_id, + iter::once((public_signing_key.clone(), 100.into())).collect(), + finality_threshold_fraction, + ); + ValidatorMatrix { + inner: Arc::new(RwLock::new(iter::once((era_id, weights)).collect())), + chainspec_name_hash: ChainNameDigest::from_chain_name("casper-example"), + chainspec_validators: None, + chainspec_activation_era: EraId::from(0), + finality_threshold_fraction, + public_signing_key, + secret_signing_key, + auction_delay: 1, + signature_rewards_max_delay: 3, + retrograde_latch: None, + } + } + + /// Creates a new validator matrix with multiple validators. + #[cfg(test)] + pub(crate) fn new_with_validators>( + secret_signing_key: Arc, + public_keys: I, + ) -> Self { + let public_signing_key = PublicKey::from(&*secret_signing_key); + let finality_threshold_fraction = Ratio::new(1, 3); + let era_id = EraId::new(0); + let weights = EraValidatorWeights::new( + era_id, + public_keys + .into_iter() + .map(|pub_key| (pub_key, 100.into())) + .collect(), + finality_threshold_fraction, + ); + ValidatorMatrix { + inner: Arc::new(RwLock::new(iter::once((era_id, weights)).collect())), + chainspec_name_hash: ChainNameDigest::from_chain_name("casper-example"), + chainspec_validators: None, + chainspec_activation_era: EraId::from(0), + finality_threshold_fraction, + public_signing_key, + secret_signing_key, + auction_delay: 1, + signature_rewards_max_delay: 3, + retrograde_latch: None, + } + } + + /// Sets signature_rewards_max_delay to imputed value. + #[cfg(test)] + pub(crate) fn with_signature_rewards_max_delay( + mut self, + signature_rewards_max_delay: u64, + ) -> Self { + self.signature_rewards_max_delay = signature_rewards_max_delay; + self + } + + #[cfg(test)] + pub(crate) fn public_keys(&self, era_id: &EraId) -> Vec { + let mut ret = vec![]; + if let Some(evw) = self.read_inner().get(era_id) { + for validator_public_key in evw.validator_public_keys() { + ret.push(validator_public_key.clone()); + } + } + ret + } + + // Register the era of the highest orphaned block. + pub(crate) fn register_retrograde_latch(&mut self, latch_era: Option) { + self.retrograde_latch = latch_era; + } + + // When the chain starts, the validator weights will be the same until the unbonding delay is + // elapsed. This allows us to possibly infer the weights of other eras if the era registered is + // within the unbonding delay. + // Currently, we only infer the validator weights for era 0 from the set registered for era 1. + // This is needed for the case where we want to sync leap to a block in era 0 of a pre 1.5.0 + // network for which we cant get the validator weights from a switch block. + pub(crate) fn register_era_validator_weights( + &mut self, + validators: EraValidatorWeights, + ) -> bool { + let was_present = self.register_era_validator_weights_bounded(validators.clone()); + if validators.era_id() == EraId::from(1) { + self.register_era_validator_weights_bounded(EraValidatorWeights::new( + EraId::from(0), + validators.validator_weights, + validators.finality_threshold_fraction, + )); + info!("ValidatorMatrix: Inferred validator weights for Era 0 from weights in Era 1"); + } + was_present + } + + fn register_era_validator_weights_bounded(&mut self, validators: EraValidatorWeights) -> bool { + let era_id = validators.era_id; + let mut guard = self + .inner + .write() + .expect("poisoned lock on validator matrix"); + let is_new = guard.insert(era_id, validators).is_none(); + + let latch_era = if let Some(era) = self.retrograde_latch.as_ref() { + *era + } else { + return is_new; + }; + + let nth = self.cache_tail_max_len(); + // avoid multiplication + let excess_entry_count = guard.len().saturating_sub(nth).saturating_sub(nth); + let mut removed = false; + for _ in 0..excess_entry_count { + let median_era = guard.keys().rev().nth(nth).copied().unwrap(); + if median_era <= latch_era { + break; + } else { + guard.remove(&median_era); + if median_era == era_id { + removed = true; + } + } + } + is_new && !removed + } + + pub(crate) fn register_validator_weights( + &mut self, + era_id: EraId, + validator_weights: BTreeMap, + ) { + if self.read_inner().contains_key(&era_id) == false { + self.register_era_validator_weights(EraValidatorWeights::new( + era_id, + validator_weights, + self.finality_threshold_fraction, + )); + } + } + + pub(crate) fn register_eras( + &mut self, + era_weights: BTreeMap>, + ) { + for (era_id, weights) in era_weights { + self.register_validator_weights(era_id, weights); + } + } + + pub(crate) fn has_era(&self, era_id: &EraId) -> bool { + self.read_inner().contains_key(era_id) + } + + pub(crate) fn validator_weights(&self, era_id: EraId) -> Option { + if let (true, Some(chainspec_validators)) = ( + era_id == self.chainspec_activation_era, + self.chainspec_validators.as_ref(), + ) { + Some(EraValidatorWeights::new( + era_id, + (**chainspec_validators).clone(), + self.finality_threshold_fraction, + )) + } else { + self.read_inner().get(&era_id).cloned() + } + } + + pub(crate) fn fault_tolerance_threshold(&self) -> Ratio { + self.finality_threshold_fraction + } + + pub(crate) fn is_empty(&self) -> bool { + self.read_inner().is_empty() + } + + /// Returns whether `pub_key` is the ID of a validator in this era, or `None` if the validator + /// information for that era is missing. + pub(crate) fn is_validator_in_era( + &self, + era_id: EraId, + public_key: &PublicKey, + ) -> Option { + if let (true, Some(chainspec_validators)) = ( + era_id == self.chainspec_activation_era, + self.chainspec_validators.as_ref(), + ) { + Some(chainspec_validators.contains_key(public_key)) + } else { + self.read_inner() + .get(&era_id) + .map(|validator_weights| validator_weights.is_validator(public_key)) + } + } + + pub(crate) fn public_signing_key(&self) -> &PublicKey { + &self.public_signing_key + } + + pub(crate) fn secret_signing_key(&self) -> &Arc { + &self.secret_signing_key + } + + /// Returns whether `pub_key` is the ID of a validator in this era, or `None` if the validator + /// information for that era is missing. + pub(crate) fn is_self_validator_in_era(&self, era_id: EraId) -> Option { + self.is_validator_in_era(era_id, &self.public_signing_key) + } + + /// Determine if the active validator is in a current or upcoming set of active validators. + #[inline] + pub(crate) fn is_active_or_upcoming_validator(&self, public_key: &PublicKey) -> bool { + // This function is potentially expensive and could be memoized, with the cache being + // invalidated when the max value of the `BTreeMap` changes. + self.read_inner() + .values() + .rev() + .take(self.auction_delay as usize + 1) + .any(|validator_weights| validator_weights.is_validator(public_key)) + } + + pub(crate) fn create_finality_signature( + &self, + block_header: &BlockHeaderV2, + ) -> Option { + if self + .is_self_validator_in_era(block_header.era_id()) + .unwrap_or(false) + { + return Some(FinalitySignatureV2::create( + block_header.block_hash(), + block_header.height(), + block_header.era_id(), + self.chainspec_name_hash, + &self.secret_signing_key, + )); + } + None + } + + fn read_inner(&self) -> RwLockReadGuard> { + self.inner.read().unwrap() + } + + pub(crate) fn eras(&self) -> Vec { + self.read_inner().keys().copied().collect_vec() + } + + pub fn chain_name_hash(&self) -> ChainNameDigest { + self.chainspec_name_hash + } + + #[cfg(test)] + pub(crate) fn purge_era_validators(&mut self, era_id: &EraId) { + self.inner.write().unwrap().remove(era_id); + } + + fn cache_head_max_len(&self) -> usize { + MINIMUM_CUSP_ERA_COUNT.saturating_add(self.auction_delay) as usize + } + + fn cache_tail_max_len(&self) -> usize { + let min_plus_auction_delay = self.cache_head_max_len(); + let signature_rewards_max_delay = + self.signature_rewards_max_delay + .saturating_add(PROPOSED_BLOCK_ERA_TOLERANCE) as usize; + min_plus_auction_delay.max(signature_rewards_max_delay) + } + + #[cfg(test)] + pub(crate) fn entries_max(&self) -> usize { + self.cache_tail_max_len() * 2 + } +} + +impl Debug for ValidatorMatrix { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("ValidatorMatrix") + .field("weights", &*self.read_inner()) + .field( + "finality_threshold_fraction", + &self.finality_threshold_fraction, + ) + .finish() + } +} + +#[derive(DataSize, Debug, Eq, PartialEq, Serialize, Default, Clone)] +pub(crate) struct EraValidatorWeights { + era_id: EraId, + validator_weights: BTreeMap, + #[data_size(skip)] + finality_threshold_fraction: Ratio, +} + +impl EraValidatorWeights { + pub(crate) fn new( + era_id: EraId, + validator_weights: BTreeMap, + finality_threshold_fraction: Ratio, + ) -> Self { + EraValidatorWeights { + era_id, + validator_weights, + finality_threshold_fraction, + } + } + + pub(crate) fn era_id(&self) -> EraId { + self.era_id + } + + pub(crate) fn is_empty(&self) -> bool { + self.validator_weights.is_empty() + } + + pub(crate) fn get_total_weight(&self) -> U512 { + self.validator_weights.values().copied().sum() + } + + pub(crate) fn validator_public_keys(&self) -> impl Iterator { + self.validator_weights.keys() + } + + pub(crate) fn into_validator_public_keys(self) -> impl Iterator { + self.validator_weights.into_keys() + } + + pub(crate) fn missing_validators<'a>( + &self, + validator_keys: impl Iterator, + ) -> impl Iterator { + let provided_keys: HashSet<_> = validator_keys.cloned().collect(); + self.validator_weights + .keys() + .filter(move |&validator| !provided_keys.contains(validator)) + } + + pub(crate) fn bogus_validators<'a>( + &self, + validator_keys: impl Iterator, + ) -> Vec { + validator_keys + .filter(move |validator_key| !self.validator_weights.keys().contains(validator_key)) + .cloned() + .collect() + } + + pub(crate) fn get_weight(&self, public_key: &PublicKey) -> U512 { + match self.validator_weights.get(public_key) { + None => U512::zero(), + Some(w) => *w, + } + } + + pub(crate) fn is_validator(&self, public_key: &PublicKey) -> bool { + self.validator_weights.contains_key(public_key) + } + + pub(crate) fn signed_weight<'a>( + &self, + validator_keys: impl Iterator, + ) -> U512 { + validator_keys + .map(|validator_key| self.get_weight(validator_key)) + .sum() + } + + pub(crate) fn signature_weight<'a>( + &self, + validator_keys: impl Iterator, + ) -> SignatureWeight { + // sufficient is ~33.4%, strict is ~66.7% by default in highway + // in some cases, we may already have strict weight or better before even starting. + // this is optimal, but in the cases where we do not we are willing to start work + // on acquiring block data on a block for which we have at least sufficient weight. + // nevertheless, we will try to attain strict weight before fully accepting such + // a block. + let finality_threshold_fraction = self.finality_threshold_fraction; + let strict = Ratio::new(1, 2) * (Ratio::from_integer(1) + finality_threshold_fraction); + let total_era_weight = self.get_total_weight(); + + let signature_weight = self.signed_weight(validator_keys); + if signature_weight * U512::from(*strict.denom()) + > total_era_weight * U512::from(*strict.numer()) + { + return SignatureWeight::Strict; + } + if signature_weight * U512::from(*finality_threshold_fraction.denom()) + > total_era_weight * U512::from(*finality_threshold_fraction.numer()) + { + return SignatureWeight::Weak; + } + SignatureWeight::Insufficient + } +} + +#[cfg(test)] +mod tests { + use std::iter; + + use casper_types::EraId; + use num_rational::Ratio; + + use crate::{ + components::consensus::tests::utils::{ + ALICE_PUBLIC_KEY, ALICE_SECRET_KEY, BOB_PUBLIC_KEY, CAROL_PUBLIC_KEY, + }, + types::SignatureWeight, + }; + + use super::{EraValidatorWeights, ValidatorMatrix}; + + fn empty_era_validator_weights(era_id: EraId) -> EraValidatorWeights { + EraValidatorWeights::new( + era_id, + iter::once((ALICE_PUBLIC_KEY.clone(), 100.into())).collect(), + Ratio::new(1, 3), + ) + } + + #[test] + fn signature_weight_at_boundary_equal_weights() { + let weights = EraValidatorWeights::new( + EraId::default(), + [ + (ALICE_PUBLIC_KEY.clone(), 100.into()), + (BOB_PUBLIC_KEY.clone(), 100.into()), + (CAROL_PUBLIC_KEY.clone(), 100.into()), + ] + .into(), + Ratio::new(1, 3), + ); + + assert_eq!( + weights.signature_weight([ALICE_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Insufficient + ); + assert_eq!( + weights.signature_weight([BOB_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Insufficient + ); + assert_eq!( + weights.signature_weight([CAROL_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Insufficient + ); + assert_eq!( + weights.signature_weight([ALICE_PUBLIC_KEY.clone(), BOB_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Weak + ); + assert_eq!( + weights.signature_weight([ALICE_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Weak + ); + assert_eq!( + weights.signature_weight([BOB_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Weak + ); + assert_eq!( + weights.signature_weight( + [ + ALICE_PUBLIC_KEY.clone(), + BOB_PUBLIC_KEY.clone(), + CAROL_PUBLIC_KEY.clone() + ] + .iter() + ), + SignatureWeight::Strict + ); + } + + #[test] + fn signature_weight_at_boundary_unequal_weights() { + let weights = EraValidatorWeights::new( + EraId::default(), + [ + (ALICE_PUBLIC_KEY.clone(), 101.into()), + (BOB_PUBLIC_KEY.clone(), 100.into()), + (CAROL_PUBLIC_KEY.clone(), 100.into()), + ] + .into(), + Ratio::new(1, 3), + ); + + assert_eq!( + weights.signature_weight([ALICE_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Weak + ); + assert_eq!( + weights.signature_weight([BOB_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Insufficient + ); + assert_eq!( + weights.signature_weight([CAROL_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Insufficient + ); + assert_eq!( + weights.signature_weight([ALICE_PUBLIC_KEY.clone(), BOB_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Strict + ); + assert_eq!( + weights.signature_weight([ALICE_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Strict + ); + assert_eq!( + weights.signature_weight([BOB_PUBLIC_KEY.clone(), CAROL_PUBLIC_KEY.clone()].iter()), + SignatureWeight::Weak + ); + assert_eq!( + weights.signature_weight( + [ + ALICE_PUBLIC_KEY.clone(), + BOB_PUBLIC_KEY.clone(), + CAROL_PUBLIC_KEY.clone() + ] + .iter() + ), + SignatureWeight::Strict + ); + } + + #[test] + fn register_validator_weights_pruning() { + // Create a validator matrix and saturate it with entries. + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()); + let mut era_validator_weights = vec![validator_matrix.validator_weights(0.into()).unwrap()]; + + let entries_max = validator_matrix.entries_max(); + era_validator_weights.extend( + (1..entries_max as u64) + .map(EraId::from) + .map(empty_era_validator_weights), + ); + for evw in era_validator_weights + .iter() + .take(entries_max) + .skip(1) + .cloned() + { + assert!( + validator_matrix.register_era_validator_weights(evw), + "register_era_validator_weights" + ); + } + let actual = validator_matrix + .read_inner() + .keys() + .copied() + .map(EraId::value) + .collect::>(); + // For a `entries_max` value of 8, the validator + // matrix should contain eras 0 through 7 inclusive. + assert_eq!(vec![0u64, 1, 2, 3, 4, 5, 6, 7], actual); + + // Now that we have 6 entries in the validator matrix, try adding more. + // We should have an entry for era 3 (we have eras 0 through 5 + // inclusive). + let median = entries_max as u64 / 2; + assert!( + validator_matrix.has_era(&median.into()), + "should have median era {}", + median + ); + // Add era 7, which would be the 7th entry in the matrix. Skipping era + // 6 should have no effect on the pruning. + era_validator_weights.push(empty_era_validator_weights((entries_max as u64 + 1).into())); + + // set retrograde latch to simulate a fully synced node + validator_matrix.register_retrograde_latch(Some(EraId::new(0))); + + // Now the entry for era 3 should be dropped, and we should be left with + // the 4 lowest eras [0, 1, 2, 3] and 4 highest eras [5, 6, 7, 9]. + assert!(validator_matrix + .register_era_validator_weights(era_validator_weights.last().cloned().unwrap())); + assert!( + !validator_matrix.has_era(&median.into()), + "should not have median era {}", + median + ); + let len = validator_matrix.read_inner().len(); + assert_eq!( + len, entries_max, + "expected entries {} actual entries: {}", + entries_max, len + ); + let expected = vec![0u64, 1, 2, 3, 5, 6, 7, 9]; + let actual = validator_matrix + .read_inner() + .keys() + .copied() + .map(EraId::value) + .collect::>(); + assert_eq!(expected, actual, "{:?} {:?}", expected, actual); + + // Adding existing eras shouldn't change the state. + let old_state: Vec = validator_matrix.read_inner().keys().copied().collect(); + let repeat = era_validator_weights + .last() + .cloned() + .expect("should have last entry"); + assert!( + !validator_matrix.register_era_validator_weights(repeat), + "should not re-register already registered era" + ); + let new_state: Vec = validator_matrix.read_inner().keys().copied().collect(); + assert_eq!(old_state, new_state, "state should be unchanged"); + } + + #[test] + fn register_validator_weights_latched_pruning() { + // TODO: write a version of this test that is not hardcoded with so many assumptions about + // the internal state of the matrix. The replacement test should dynamically + // determine the range and misc idx and count variables rather than hard coding + // them. + + // Create a validator matrix and saturate it with entries. + let mut validator_matrix = ValidatorMatrix::new_with_validator(ALICE_SECRET_KEY.clone()) + .with_signature_rewards_max_delay(2); + // Set the retrograde latch to 10 so we can register all eras lower or + // equal to 10. + let entries_max = validator_matrix.entries_max(); + validator_matrix.register_retrograde_latch(Some(EraId::from(10))); + let mut era_validator_weights = vec![validator_matrix.validator_weights(0.into()).unwrap()]; + era_validator_weights.extend( + (1..=entries_max as u64) + .map(EraId::from) + .map(empty_era_validator_weights), + ); + for evw in era_validator_weights + .iter() + .take(entries_max + 1) + .skip(1) + .cloned() + { + assert!( + validator_matrix.register_era_validator_weights(evw), + "register_era_validator_weights" + ); + } + + // Register eras [7, 8, 9]. + era_validator_weights.extend((7..=9).map(EraId::from).map(empty_era_validator_weights)); + for evw in era_validator_weights.iter().rev().take(3).cloned() { + assert!( + validator_matrix.register_era_validator_weights(evw), + "register_era_validator_weights" + ); + } + + // Set the retrograde latch to era 5. + validator_matrix.register_retrograde_latch(Some(EraId::from(5))); + // Add era 10 to the weights. + era_validator_weights.push(empty_era_validator_weights(EraId::from(10))); + assert_eq!(era_validator_weights.len(), 11); + // As the current weights in the matrix are [0, ..., 9], register era + // 10. This should succeed anyway since it's the highest weight. + assert!( + validator_matrix.register_era_validator_weights(era_validator_weights[10].clone()), + "register_era_validator_weights" + ); + // The latch was previously set to 5, so now all weights which are + // neither the lowest 3, highest 3, or higher than the latched era + // should have been purged. + // Given we had weights [0, ..., 10] and the latch is 5, we should + // be left with [0, 1, 2, 3, 4, 5, 8, 9, 10]. + for era in 0..=5 { + assert!(validator_matrix.has_era(&EraId::from(era))); + } + for era in 6..=7 { + assert!(!validator_matrix.has_era(&EraId::from(era))); + } + for era in 8..=10 { + assert!(validator_matrix.has_era(&EraId::from(era))); + } + + // Make sure era 6, which was previously purged, is not registered as + // it is greater than the latch, which is 5. + assert!( + !validator_matrix.register_era_validator_weights(era_validator_weights[6].clone()), + "register_era_validator_weights" + ); + + // Set the retrograde latch to era 6. + validator_matrix.register_retrograde_latch(Some(EraId::from(6))); + // Make sure era 6 is now registered. + assert!( + validator_matrix.register_era_validator_weights(era_validator_weights[6].clone()), + "register_era_validator_weights" + ); + + // Set the retrograde latch to era 1. + validator_matrix.register_retrograde_latch(Some(EraId::from(1))); + // Register era 10 again to drive the purging mechanism. + assert!( + !validator_matrix.register_era_validator_weights(era_validator_weights[10].clone()), + "register_era_validator_weights" + ); + // The latch was previously set to 1, so now all weights which are + // neither the lowest 3, highest 3, or higher than the latched era + // should have been purged. + // Given we had weights [0, 1, 2, 3, 4, 5, 6, 8, 9, 10] and the latch + // is 1, we should be left with [0, 1, 2, 8, 9, 10]. + for era in 0..=2 { + assert!(validator_matrix.has_era(&EraId::from(era))); + } + for era in 3..=7 { + assert!(!validator_matrix.has_era(&EraId::from(era))); + } + for era in 8..=10 { + assert!(validator_matrix.has_era(&EraId::from(era))); + } + } +} diff --git a/node/src/types/value_or_chunk.rs b/node/src/types/value_or_chunk.rs new file mode 100644 index 0000000000..835321cfd8 --- /dev/null +++ b/node/src/types/value_or_chunk.rs @@ -0,0 +1,284 @@ +use std::fmt::{self, Debug, Display, Formatter}; + +use datasize::DataSize; +use hex_fmt::HexFmt; +use once_cell::sync::OnceCell; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use casper_storage::global_state::trie::TrieRaw; +use casper_types::{ + execution::ExecutionResult, ChunkWithProof, ChunkWithProofVerificationError, Digest, + MerkleConstructionError, +}; + +use super::Chunkable; +use crate::{ + components::fetcher::{EmptyValidationMetadata, FetchItem, Tag}, + utils::ds, +}; + +/// Represents a value or a chunk of data with attached proof. +/// +/// Chunk with attached proof is used when the requested +/// value is larger than [ChunkWithProof::CHUNK_SIZE_BYTES]. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, DataSize)] +pub enum ValueOrChunk { + /// Represents a value. + Value(V), + /// Represents a chunk of data with attached proof. + ChunkWithProof(ChunkWithProof), +} + +/// Error returned when constructing an instance of [`ValueOrChunk`]. +#[derive(Debug, Error)] +pub enum ChunkingError { + /// Merkle proof construction error. + #[error("error constructing Merkle proof for chunk")] + MerkleConstruction( + #[from] + #[source] + MerkleConstructionError, + ), + /// Serialization error. + #[error("error serializing data into chunks: {0}")] + SerializationError(String), +} + +impl ValueOrChunk { + /// Creates an instance of [`ValueOrChunk::Value`] if data size is less than or equal to + /// [`ChunkWithProof::CHUNK_SIZE_BYTES`] or a [`ValueOrChunk::ChunkWithProof`] if it is greater. + /// In the latter case it will return only the `chunk_index`-th chunk of the value's byte + /// representation. + /// + /// NOTE: The [`Chunkable`] instance used here needs to match the one used when calling + /// [`Digest::hash_into_chunks_if_necessary`]. This is to ensure that type is turned into + /// bytes consistently before chunking and hashing. If not then the Merkle proofs for chunks + /// won't match. + pub fn new(data: V, chunk_index: u64) -> Result + where + V: Chunkable, + { + let bytes = Chunkable::as_bytes(&data).map_err(|error| { + ChunkingError::SerializationError(format!( + "failed to chunk {:?}: {:?}", + std::any::type_name::(), + error + )) + })?; + // NOTE: Cannot accept the chunk size bytes as an argument without changing the + // IndexedMerkleProof. The chunk size there is hardcoded and will be used when + // determining the chunk. + if bytes.len() <= ChunkWithProof::CHUNK_SIZE_BYTES { + Ok(ValueOrChunk::Value(data)) + } else { + let chunk_with_proof = ChunkWithProof::new(&bytes, chunk_index) + .map_err(ChunkingError::MerkleConstruction)?; + Ok(ValueOrChunk::ChunkWithProof(chunk_with_proof)) + } + } +} + +impl Display for ValueOrChunk { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + ValueOrChunk::Value(data) => write!(f, "value {}", data), + ValueOrChunk::ChunkWithProof(chunk) => write!( + f, + "chunk #{} with proof, root hash {}", + chunk.proof().index(), + chunk.proof().root_hash() + ), + } + } +} + +impl Display for ValueOrChunk> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + ValueOrChunk::Value(data) => write!(f, "value: {} execution results", data.len()), + ValueOrChunk::ChunkWithProof(chunk) => write!( + f, + "chunk #{} with proof, root hash {}", + chunk.proof().index(), + chunk.proof().root_hash() + ), + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, DataSize)] +pub struct HashingTrieRaw { + inner: TrieRaw, + #[serde(skip)] + #[data_size(with = ds::once_cell)] + hash: OnceCell, +} + +impl From for HashingTrieRaw { + fn from(inner: TrieRaw) -> HashingTrieRaw { + HashingTrieRaw { + inner, + hash: OnceCell::new(), + } + } +} + +impl Display for HashingTrieRaw { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{:10}", HexFmt(self.inner.inner())) + } +} + +impl HashingTrieRaw { + fn hash(&self) -> Digest { + *self.hash.get_or_init(|| Digest::hash(self.inner.inner())) + } + + pub fn inner(&self) -> &TrieRaw { + &self.inner + } + + pub fn into_inner(self) -> TrieRaw { + self.inner + } +} + +/// Represents an enum that can contain either a whole trie or a chunk of it. +pub type TrieOrChunk = ValueOrChunk; + +impl FetchItem for TrieOrChunk { + type Id = TrieOrChunkId; + type ValidationError = ChunkWithProofVerificationError; + type ValidationMetadata = EmptyValidationMetadata; + + const TAG: Tag = Tag::TrieOrChunk; + + fn fetch_id(&self) -> Self::Id { + match self { + TrieOrChunk::Value(trie_raw) => TrieOrChunkId(0, trie_raw.hash()), + TrieOrChunk::ChunkWithProof(chunked_data) => TrieOrChunkId( + chunked_data.proof().index(), + chunked_data.proof().root_hash(), + ), + } + } + + fn validate(&self, _metadata: &EmptyValidationMetadata) -> Result<(), Self::ValidationError> { + match self { + TrieOrChunk::Value(_) => Ok(()), + TrieOrChunk::ChunkWithProof(chunk_with_proof) => chunk_with_proof.verify(), + } + } +} + +/// Represents the ID of a `TrieOrChunk` - containing the index and the root hash. +/// The root hash is the hash of the trie node as a whole. +/// The index is the index of a chunk if the node's size is too large and requires chunking. For +/// small nodes, it's always 0. +#[derive(DataSize, Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct TrieOrChunkId(pub u64, pub Digest); + +impl TrieOrChunkId { + /// Returns the trie key part of the ID. + pub fn digest(&self) -> &Digest { + &self.1 + } + + /// Given a serialized ID, deserializes it for display purposes. + fn fmt_serialized(f: &mut Formatter, serialized_id: &[u8]) -> fmt::Result { + match bincode::deserialize::(serialized_id) { + Ok(ref trie_or_chunk_id) => Display::fmt(trie_or_chunk_id, f), + Err(_) => f.write_str(""), + } + } +} + +/// Helper struct to on-demand deserialize a trie or chunk ID for display purposes. +pub struct TrieOrChunkIdDisplay<'a>(pub &'a [u8]); + +impl Display for TrieOrChunkIdDisplay<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + TrieOrChunkId::fmt_serialized(f, self.0) + } +} + +impl Display for TrieOrChunkId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "({}, {})", self.0, self.1) + } +} + +#[cfg(test)] +mod tests { + use casper_types::{bytesrepr::Bytes, ChunkWithProof}; + + use super::ValueOrChunk; + + #[test] + fn returns_value_or_chunk() { + let input: Bytes = vec![1u8; 1].into(); + let value = ValueOrChunk::new(input, 0).unwrap(); + assert!(matches!(value, ValueOrChunk::Value { .. })); + + let input: Bytes = vec![1u8; ChunkWithProof::CHUNK_SIZE_BYTES + 1].into(); + let value_or_chunk = ValueOrChunk::new(input.clone(), 0).unwrap(); + let first_chunk = match value_or_chunk { + ValueOrChunk::Value(_) => panic!("expected chunk"), + ValueOrChunk::ChunkWithProof(chunk) => chunk, + }; + + // try to read all the chunks + let chunk_count = first_chunk.proof().count(); + let mut chunks = vec![first_chunk]; + + for i in 1..chunk_count { + match ValueOrChunk::new(input.clone(), i).unwrap() { + ValueOrChunk::Value(_) => panic!("expected chunk"), + ValueOrChunk::ChunkWithProof(chunk) => chunks.push(chunk), + } + } + + // there should be no chunk with index `chunk_count` + assert!(matches!( + ValueOrChunk::new(input.clone(), chunk_count), + Err(super::ChunkingError::MerkleConstruction(_)) + )); + + // all chunks should be valid + assert!(chunks.iter().all(|chunk| chunk.verify().is_ok())); + + // reassemble the data + let data: Vec = chunks + .into_iter() + .flat_map(|chunk| chunk.into_chunk()) + .collect(); + + // Since `Bytes` are chunked "as-is", there's no deserialization of the bytes required. + let retrieved_bytes: Bytes = data.into(); + + assert_eq!(input, retrieved_bytes); + } +} + +mod specimen_support { + use crate::utils::specimen::{Cache, LargestSpecimen, SizeEstimator}; + + use super::{TrieOrChunkId, ValueOrChunk}; + + impl LargestSpecimen for TrieOrChunkId { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + TrieOrChunkId( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } + } + + impl LargestSpecimen for ValueOrChunk { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // By definition, the chunk is always the largest (8MiB): + ValueOrChunk::ChunkWithProof(LargestSpecimen::largest_specimen(estimator, cache)) + } + } +} diff --git a/node/src/utils.rs b/node/src/utils.rs index 66ef598c31..9a6d060f3a 100644 --- a/node/src/utils.rs +++ b/node/src/utils.rs @@ -1,56 +1,51 @@ //! Various functions that are not limited to a particular module, but are too small to warrant //! being factored out into standalone crates. -mod counting_channel; -pub mod ds; +mod block_signatures; +pub(crate) mod chain_specification; +pub(crate) mod config_specification; +mod display_error; +pub(crate) mod ds; mod external; -pub mod milliseconds; -pub mod pid_file; +pub(crate) mod fmt_limit; +pub(crate) mod opt_display; #[cfg(target_os = "linux")] pub(crate) mod rlimit; -mod round_robin; +pub(crate) mod round_robin; +pub(crate) mod specimen; +pub(crate) mod umask; +pub mod work_queue; use std::{ - cell::RefCell, fmt::{self, Debug, Display, Formatter}, - fs, - io::{self, Write}, + io, net::{SocketAddr, ToSocketAddrs}, - ops::{Add, Div}, - os::unix::fs::OpenOptionsExt, + ops::{Add, BitXorAssign, Div}, path::{Path, PathBuf}, + sync::atomic::{AtomicBool, Ordering}, + time::{Instant, SystemTime}, }; + #[cfg(test)] -use std::{env, str::FromStr}; +use std::{any, sync::Arc, time::Duration}; use datasize::DataSize; use hyper::server::{conn::AddrIncoming, Builder, Server}; -use libc::{c_long, sysconf, _SC_PAGESIZE}; +#[cfg(test)] use once_cell::sync::Lazy; +use prometheus::{self, Histogram, HistogramOpts, Registry}; use serde::Serialize; use thiserror::Error; -use tracing::warn; +use tracing::{error, warn}; -pub(crate) use counting_channel::{counting_unbounded_channel, CountingReceiver, CountingSender}; +use crate::types::NodeId; +pub(crate) use block_signatures::{check_sufficient_block_signatures, BlockSignatureError}; +pub(crate) use display_error::display_error; #[cfg(test)] -pub use external::RESOURCES_PATH; +pub(crate) use external::RESOURCES_PATH; pub use external::{External, LoadError, Loadable}; pub(crate) use round_robin::WeightedRoundRobin; -/// Sensible default for many if not all systems. -const DEFAULT_PAGE_SIZE: usize = 4096; - -/// OS page size. -pub static OS_PAGE_SIZE: Lazy = Lazy::new(|| { - // https://www.gnu.org/software/libc/manual/html_node/Sysconf.html - let value: c_long = unsafe { sysconf(_SC_PAGESIZE) }; - if value <= 0 { - DEFAULT_PAGE_SIZE - } else { - value as usize - } -}); - /// DNS resolution error. #[derive(Debug, Error)] #[error("could not resolve `{address}`: {kind}")] @@ -83,6 +78,31 @@ impl Display for ResolveAddressErrorKind { } } +/// Backport of `Result::flatten`, see . +pub trait FlattenResult { + /// The output of the flattening operation. + type Output; + + /// Flattens one level. + /// + /// This function is named `flatten_result` instead of `flatten` to avoid name collisions once + /// `Result::flatten` stabilizes. + fn flatten_result(self) -> Self::Output; +} + +impl FlattenResult for Result, E> { + type Output = Result; + + #[inline] + fn flatten_result(self) -> Self::Output { + match self { + Ok(Ok(v)) => Ok(v), + Ok(Err(e)) => Err(e), + Err(e) => Err(e), + } + } +} + /// Parses a network address from a string, with DNS resolution. pub(crate) fn resolve_address(address: &str) -> Result { address @@ -100,7 +120,7 @@ pub(crate) fn resolve_address(address: &str) -> Result(value: T) -> &'static T { Box::leak(Box::new(value)) } -/// A display-helper that shows iterators display joined by ",". -#[derive(Debug)] -pub(crate) struct DisplayIter(RefCell>); +/// A flag shared across multiple subsystem. +#[derive(Copy, Clone, DataSize, Debug)] +pub(crate) struct SharedFlag(&'static AtomicBool); -impl DisplayIter { - pub(crate) fn new(item: T) -> Self { - DisplayIter(RefCell::new(Some(item))) +impl SharedFlag { + /// Creates a new shared flag. + /// + /// The flag is initially not set. + pub(crate) fn new() -> Self { + SharedFlag(leak(AtomicBool::new(false))) } -} - -impl Display for DisplayIter -where - I: IntoIterator, - T: Display, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - if let Some(src) = self.0.borrow_mut().take() { - let mut first = true; - for item in src.into_iter().take(f.width().unwrap_or(usize::MAX)) { - if first { - first = false; - write!(f, "{}", item)?; - } else { - write!(f, ", {}", item)?; - } - } - Ok(()) - } else { - write!(f, "DisplayIter:GONE") - } + /// Checks whether the flag is set. + pub(crate) fn is_set(self) -> bool { + self.0.load(Ordering::SeqCst) } -} -/// Error reading a file. -#[derive(Debug, Error)] -#[error("could not read '{0}': {error}", .path.display())] -pub struct ReadFileError { - /// Path that failed to be read. - path: PathBuf, - /// The underlying OS error. - #[source] - error: io::Error, -} - -/// Error writing a file -#[derive(Debug, Error)] -#[error("could not write to '{0}': {error}", .path.display())] -pub struct WriteFileError { - /// Path that failed to be written to. - path: PathBuf, - /// The underlying OS error. - #[source] - error: io::Error, -} + /// Set the flag. + pub(crate) fn set(self) { + self.0.store(true, Ordering::SeqCst); + } -/// Read complete at `path` into memory. -/// -/// Wraps `fs::read`, but preserves the filename for better error printing. -pub fn read_file>(filename: P) -> Result, ReadFileError> { - let path = filename.as_ref(); - fs::read(path).map_err(|error| ReadFileError { - path: path.to_owned(), - error, - }) -} + /// Returns a shared instance of the flag for testing. + /// + /// The returned flag should **never** have `set` be called upon it. + #[cfg(test)] + pub(crate) fn global_shared() -> Self { + static SHARED_FLAG: Lazy = Lazy::new(SharedFlag::new); -/// Write data to `path`. -/// -/// Wraps `fs::write`, but preserves the filename for better error printing. -pub(crate) fn write_file, B: AsRef<[u8]>>( - filename: P, - data: B, -) -> Result<(), WriteFileError> { - let path = filename.as_ref(); - fs::write(path, data.as_ref()).map_err(|error| WriteFileError { - path: path.to_owned(), - error, - }) + *SHARED_FLAG + } } -/// Writes data to `path`, ensuring only the owner can read or write it. -/// -/// Otherwise functions like [`write_file`]. -pub(crate) fn write_private_file, B: AsRef<[u8]>>( - filename: P, - data: B, -) -> Result<(), WriteFileError> { - let path = filename.as_ref(); - fs::OpenOptions::new() - .write(true) - .create(true) - .mode(0o600) - .open(path) - .and_then(|mut file| file.write_all(data.as_ref())) - .map_err(|error| WriteFileError { - path: path.to_owned(), - error, - }) +impl Default for SharedFlag { + fn default() -> Self { + Self::new() + } } /// With-directory context. @@ -278,6 +236,11 @@ impl WithDir { &self.value } + /// Get a mutable reference to the inner value. + pub fn value_mut(&mut self) -> &mut T { + &mut self.value + } + /// Adds `self.dir` as a parent if `path` is relative, otherwise returns `path` unchanged. pub fn with_dir(&self, path: PathBuf) -> PathBuf { if path.is_relative() { @@ -290,36 +253,45 @@ impl WithDir { /// The source of a piece of data. #[derive(Clone, Debug, Serialize)] -pub enum Source { +pub(crate) enum Source { /// A peer with the wrapped ID. - Peer(I), + PeerGossiped(NodeId), + /// A peer with the wrapped ID. + Peer(NodeId), /// A client. Client, + /// A client via the speculative_exec server. + SpeculativeExec, /// This node. Ourself, } -impl Source { - pub(crate) fn from_client(&self) -> bool { - matches!(self, Source::Client) +impl Source { + #[allow(clippy::wrong_self_convention)] + pub(crate) fn is_client(&self) -> bool { + match self { + Source::Client | Source::SpeculativeExec => true, + Source::PeerGossiped(_) | Source::Peer(_) | Source::Ourself => false, + } } -} -impl Source { /// If `self` represents a peer, returns its ID, otherwise returns `None`. - pub(crate) fn node_id(&self) -> Option { + pub(crate) fn node_id(&self) -> Option { match self { - Source::Peer(node_id) => Some(node_id.clone()), - Source::Client | Source::Ourself => None, + Source::Peer(node_id) | Source::PeerGossiped(node_id) => Some(*node_id), + Source::Client | Source::SpeculativeExec | Source::Ourself => None, } } } -impl Display for Source { +impl Display for Source { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { - Source::Peer(node_id) => Display::fmt(node_id, formatter), + Source::PeerGossiped(node_id) | Source::Peer(node_id) => { + Display::fmt(node_id, formatter) + } Source::Client => write!(formatter, "client"), + Source::SpeculativeExec => write!(formatter, "client (speculative exec)"), Source::Ourself => write!(formatter, "ourself"), } } @@ -335,7 +307,20 @@ where (numerator + denominator / T::from(2)) / denominator } -/// Used to unregister a metric from the Prometheus registry. +/// Creates a prometheus Histogram and registers it. +pub(crate) fn register_histogram_metric( + registry: &Registry, + metric_name: &str, + metric_help: &str, + buckets: Vec, +) -> Result { + let histogram_opts = HistogramOpts::new(metric_name, metric_help).buckets(buckets); + let histogram = Histogram::with_opts(histogram_opts)?; + registry.register(Box::new(histogram.clone()))?; + Ok(histogram) +} + +/// Unregisters a metric from the Prometheus registry. #[macro_export] macro_rules! unregister_metric { ($registry:expr, $metric:expr) => { @@ -350,26 +335,169 @@ macro_rules! unregister_metric { }; } -/// Reads an envvar from the environment and, if present, parses it. -/// -/// Only absent envvars are returned as `None`. +/// XORs two byte sequences. /// /// # Panics /// -/// Panics on any parse error. +/// Panics if `lhs` and `rhs` are not of equal length. +#[inline] +pub(crate) fn xor(lhs: &mut [u8], rhs: &[u8]) { + // Implementing SIMD support is left as an exercise for the reader. + assert_eq!(lhs.len(), rhs.len(), "xor inputs should have equal length"); + lhs.iter_mut() + .zip(rhs.iter()) + .for_each(|(sb, &cb)| sb.bitxor_assign(cb)); +} + +/// Wait until all strong references for a particular arc have been dropped. +/// +/// Downgrades and immediately drops the `Arc`, keeping only a weak reference. The reference will +/// then be polled `attempts` times, unless it has a strong reference count of 0. +/// +/// Returns whether or not `arc` has zero strong references left. +/// +/// # Note +/// +/// Using this function is usually a potential architectural issue and it should be used very +/// sparingly. Consider introducing a different access pattern for the value under `Arc`. #[cfg(test)] -pub fn read_env(name: &str) -> Option -where - ::Err: Debug, -{ - match env::var(name) { - Ok(raw) => Some( - raw.parse() - .unwrap_or_else(|_| panic!("cannot parse envvar `{}`", name)), - ), - Err(env::VarError::NotPresent) => None, - Err(err) => { - panic!(err) +pub(crate) async fn wait_for_arc_drop( + arc: Arc, + attempts: usize, + retry_delay: Duration, +) -> bool { + // Ensure that if we do hold the last reference, we are now going to 0. + let weak = Arc::downgrade(&arc); + drop(arc); + + for _ in 0..attempts { + let strong_count = weak.strong_count(); + + if strong_count == 0 { + // Everything has been dropped, we are done. + return true; + } + + tokio::time::sleep(retry_delay).await; + } + + error!( + attempts, ?retry_delay, ty=%any::type_name::(), + "failed to clean up shared reference" + ); + + false +} + +/// An anchor for converting an `Instant` into a wall-clock (`SystemTime`) time. +#[derive(Copy, Clone, Debug)] +pub(crate) struct TimeAnchor { + /// The reference instant used for conversion. + now: Instant, + /// The reference wall-clock timestamp used for conversion. + wall_clock_now: SystemTime, +} + +impl TimeAnchor { + /// Creates a new time anchor. + /// + /// Will take a sample of the monotonic clock and the current time and store it in the anchor. + pub(crate) fn now() -> Self { + TimeAnchor { + now: Instant::now(), + wall_clock_now: SystemTime::now(), + } + } + + /// Converts a point in time from the monotonic clock to wall clock time, using this anchor. + #[inline] + pub(crate) fn convert(&self, then: Instant) -> SystemTime { + if then > self.now { + self.wall_clock_now + then.duration_since(self.now) + } else { + self.wall_clock_now - self.now.duration_since(then) } } } + +#[cfg(test)] +mod tests { + use std::{sync::Arc, time::Duration}; + + use crate::utils::SharedFlag; + + use super::{wait_for_arc_drop, xor}; + + #[test] + fn xor_works() { + let mut lhs = [0x43, 0x53, 0xf2, 0x2f, 0xa9, 0x70, 0xfb, 0xf4]; + let rhs = [0x04, 0x0b, 0x5c, 0xa1, 0xef, 0x11, 0x12, 0x23]; + let xor_result = [0x47, 0x58, 0xae, 0x8e, 0x46, 0x61, 0xe9, 0xd7]; + + xor(&mut lhs, &rhs); + + assert_eq!(lhs, xor_result); + } + + #[test] + #[should_panic(expected = "equal length")] + fn xor_panics_on_uneven_inputs() { + let mut lhs = [0x43, 0x53, 0xf2, 0x2f, 0xa9, 0x70, 0xfb, 0xf4]; + let rhs = [0x04, 0x0b, 0x5c, 0xa1, 0xef, 0x11]; + + xor(&mut lhs, &rhs); + } + + #[tokio::test] + async fn arc_drop_waits_for_drop() { + let retry_delay = Duration::from_millis(25); + let attempts = 15; + + let arc = Arc::new(()); + + let arc_in_background = arc.clone(); + let _weak_in_background = Arc::downgrade(&arc); + + // At this point, the Arc has the following refernces: + // + // * main test task (`arc`, strong) + // * background strong reference (`arc_in_background`) + // * background weak reference (`weak_in_background`) + + // Phase 1: waiting for the arc should fail, because there still is the background + // reference. + assert!(!wait_for_arc_drop(arc, attempts, retry_delay).await); + + // We "restore" the arc from the background arc. + let arc = arc_in_background.clone(); + + // Add another "foreground" weak reference. + let weak = Arc::downgrade(&arc); + + // Phase 2: Our background tasks drops its reference, now we should succeed. + drop(arc_in_background); + assert!(wait_for_arc_drop(arc, attempts, retry_delay).await); + + // Immedetialy after, we should not be able to obtain a strong reference anymore. + // This test fails only if we have a race condition, so false positive tests are possible. + assert!(weak.upgrade().is_none()); + } + + #[test] + fn shared_flag_sanity_check() { + let flag = SharedFlag::new(); + let copied = flag; + + assert!(!flag.is_set()); + assert!(!copied.is_set()); + assert!(!flag.is_set()); + assert!(!copied.is_set()); + + flag.set(); + + assert!(flag.is_set()); + assert!(copied.is_set()); + assert!(flag.is_set()); + assert!(copied.is_set()); + } +} diff --git a/node/src/utils/block_signatures.rs b/node/src/utils/block_signatures.rs new file mode 100644 index 0000000000..fdf0e9649b --- /dev/null +++ b/node/src/utils/block_signatures.rs @@ -0,0 +1,389 @@ +use std::collections::BTreeMap; + +use num::rational::Ratio; +use thiserror::Error; + +use casper_types::{BlockSignatures, PublicKey, U512}; + +/// Computes the quorum for the fraction of weight of signatures that will be considered +/// sufficient. This is the lowest weight so that any two sets of validators with that weight have +/// at least one honest validator in common. +fn quorum_fraction(fault_tolerance_fraction: Ratio) -> Ratio { + (fault_tolerance_fraction + 1) / 2 +} + +/// Returns `Ok(())` if the block signatures' total weight exceeds the threshold which is +/// calculated using the provided quorum formula. Returns an error if it doesn't, or if one of the +/// signatures does not belong to a validator. +/// +/// This does _not_ cryptographically verify the signatures. +pub(crate) fn check_sufficient_block_signatures_with_quorum_formula( + trusted_validator_weights: &BTreeMap, + fault_tolerance_fraction: Ratio, + maybe_block_signatures: Option<&BlockSignatures>, + quorum_formula: F, +) -> Result<(), BlockSignatureError> +where + F: Fn(Ratio) -> Ratio, +{ + // Calculate the weight of the signatures + let mut signature_weight: U512 = U512::zero(); + let mut minimum_weight: Option = None; + + let total_weight: U512 = trusted_validator_weights + .iter() + .map(|(_, weight)| *weight) + .sum(); + + match maybe_block_signatures { + Some(block_signatures) => { + let mut bogus_validators = vec![]; + for public_key in block_signatures.signers() { + match trusted_validator_weights.get(public_key) { + None => { + bogus_validators.push(public_key.clone()); + continue; + } + Some(validator_weight) => { + if minimum_weight.is_none_or(|min_w| *validator_weight < min_w) { + minimum_weight = Some(*validator_weight); + } + signature_weight += *validator_weight; + } + } + } + if !bogus_validators.is_empty() { + return Err(BlockSignatureError::BogusValidators { + trusted_validator_weights: trusted_validator_weights.clone(), + block_signatures: Box::new(block_signatures.clone()), + bogus_validators, + }); + } + + let quorum_fraction = (quorum_formula)(fault_tolerance_fraction); + // Verify: signature_weight / total_weight >= lower_bound + // Equivalent to the following + if signature_weight * U512::from(*quorum_fraction.denom()) + <= total_weight * U512::from(*quorum_fraction.numer()) + { + return Err(BlockSignatureError::InsufficientWeightForFinality { + trusted_validator_weights: trusted_validator_weights.clone(), + block_signatures: maybe_block_signatures + .map(|signatures| Box::new(signatures.clone())), + signature_weight: Some(Box::new(signature_weight)), + total_validator_weight: Box::new(total_weight), + fault_tolerance_fraction, + }); + } + + Ok(()) + } + None => { + // No signatures provided, return early. + Err(BlockSignatureError::InsufficientWeightForFinality { + trusted_validator_weights: trusted_validator_weights.clone(), + block_signatures: None, + signature_weight: None, + total_validator_weight: Box::new(total_weight), + fault_tolerance_fraction, + }) + } + } +} + +/// Returns `Ok(())` if the block signatures' total weight exceeds the threshold calculated by +/// the [quorum_fraction] function. Returns an error if it doesn't, or if one of the signatures does +/// not belong to a validator. +/// +/// This does _not_ cryptographically verify the signatures. +pub(crate) fn check_sufficient_block_signatures( + trusted_validator_weights: &BTreeMap, + fault_tolerance_fraction: Ratio, + block_signatures: Option<&BlockSignatures>, +) -> Result<(), BlockSignatureError> { + check_sufficient_block_signatures_with_quorum_formula( + trusted_validator_weights, + fault_tolerance_fraction, + block_signatures, + quorum_fraction, + ) +} + +#[derive(Error, Debug)] +pub(crate) enum BlockSignatureError { + #[error( + "Block signatures contain bogus validator. \ + trusted validator weights: {trusted_validator_weights:?}, \ + block signatures: {block_signatures:?}, \ + bogus validator public keys: {bogus_validators:?}" + )] + BogusValidators { + trusted_validator_weights: BTreeMap, + block_signatures: Box, + bogus_validators: Vec, + }, + + #[error( + "Insufficient weight for finality. \ + trusted validator weights: {trusted_validator_weights:?}, \ + block signatures: {block_signatures:?}, \ + signature weight: {signature_weight:?}, \ + total validator weight: {total_validator_weight}, \ + fault tolerance fraction: {fault_tolerance_fraction}" + )] + InsufficientWeightForFinality { + trusted_validator_weights: BTreeMap, + block_signatures: Option>, + signature_weight: Option>, + total_validator_weight: Box, + fault_tolerance_fraction: Ratio, + }, +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use casper_types::{ + crypto, testing::TestRng, BlockHash, BlockSignaturesV2, ChainNameDigest, EraId, + FinalitySignature, SecretKey, + }; + + use super::*; + + const TEST_VALIDATOR_WEIGHT: usize = 1; + + fn generate_validators( + n_validators: usize, + ) -> (BTreeMap, BTreeMap) { + let mut keys = BTreeMap::new(); + let mut weights = BTreeMap::new(); + + for _ in 0..n_validators { + let (secret_key, pub_key) = crypto::generate_ed25519_keypair(); + keys.insert(pub_key.clone(), secret_key); + weights.insert(pub_key, U512::from(TEST_VALIDATOR_WEIGHT)); + } + + (keys, weights) + } + + fn create_signatures( + rng: &mut TestRng, + validators: &BTreeMap, + n_sigs: usize, + ) -> BlockSignaturesV2 { + let era_id = EraId::new(rng.gen_range(10..100)); + + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let chain_name_hash = ChainNameDigest::random(rng); + + let mut sigs = BlockSignaturesV2::new(block_hash, block_height, era_id, chain_name_hash); + + for (pub_key, secret_key) in validators.iter().take(n_sigs) { + let sig = crypto::sign(block_hash, secret_key, pub_key); + sigs.insert_signature(pub_key.clone(), sig); + } + + sigs + } + + #[test] + fn block_signatures_sufficiency() { + const TOTAL_VALIDATORS: usize = 20; + const TOTAL_VALIDATORS_WEIGHT: usize = TOTAL_VALIDATORS * TEST_VALIDATOR_WEIGHT; + const INSUFFICIENT_BLOCK_SIGNATURES: usize = 13; + const JUST_ENOUGH_BLOCK_SIGNATURES: usize = 14; + + let mut rng = TestRng::new(); + + // Total validator weights is 20 (1 for each validator). + let (validators, validator_weights) = generate_validators(TOTAL_VALIDATORS); + + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + // for 20 validators with 20 total validator weight, + // and `fault_tolerance_fraction` = 1/3 (~= 6.666) + // and the `quorum fraction` = 2/3 (~= 13.333) + // + // we need signatures of weight: + // - 13 or less for `InsufficientWeightForFinality` + // - 14 for Ok + + let insufficient = create_signatures(&mut rng, &validators, INSUFFICIENT_BLOCK_SIGNATURES); + let just_enough_weight = + create_signatures(&mut rng, &validators, JUST_ENOUGH_BLOCK_SIGNATURES); + + let result = check_sufficient_block_signatures( + &validator_weights, + fault_tolerance_fraction, + Some(&BlockSignatures::from(insufficient)), + ); + assert!(matches!( + result, + Err(BlockSignatureError::InsufficientWeightForFinality { + trusted_validator_weights: _, + block_signatures: _, + signature_weight, + total_validator_weight, + fault_tolerance_fraction: _ + }) if *total_validator_weight == TOTAL_VALIDATORS_WEIGHT.into() && **signature_weight.as_ref().unwrap() == INSUFFICIENT_BLOCK_SIGNATURES.into() + )); + + let result = check_sufficient_block_signatures( + &validator_weights, + fault_tolerance_fraction, + Some(&BlockSignatures::from(just_enough_weight)), + ); + assert!(result.is_ok()); + } + + #[test] + fn block_signatures_sufficiency_with_quorum_formula() { + const TOTAL_VALIDATORS: usize = 20; + const TOTAL_VALIDATORS_WEIGHT: usize = TOTAL_VALIDATORS * TEST_VALIDATOR_WEIGHT; + const INSUFFICIENT_BLOCK_SIGNATURES: usize = 6; + const JUST_ENOUGH_BLOCK_SIGNATURES: usize = 7; + + let mut rng = TestRng::new(); + + // Total validator weights is 20 (1 for each validator). + let (validators, validator_weights) = generate_validators(TOTAL_VALIDATORS_WEIGHT); + + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + // `identity` function is transparent, so the calculated quorum fraction will be equal to + // the `fault_tolerance_fraction`. + let custom_quorum_formula = std::convert::identity; + + // for 20 validators with 20 total validator weight, + // and `fault_tolerance_fraction` = 1/3 (~= 6.666) + // and the `quorum fraction` = 1/3 (~= 6.666) + // + // we need signatures of weight: + // - 6 or less for `InsufficientWeightForFinality` + // - 7 for Ok + + let insufficient = create_signatures(&mut rng, &validators, INSUFFICIENT_BLOCK_SIGNATURES); + let just_enough_weight = + create_signatures(&mut rng, &validators, JUST_ENOUGH_BLOCK_SIGNATURES); + + let result = check_sufficient_block_signatures_with_quorum_formula( + &validator_weights, + fault_tolerance_fraction, + Some(&BlockSignatures::from(insufficient)), + custom_quorum_formula, + ); + assert!(matches!( + result, + Err(BlockSignatureError::InsufficientWeightForFinality { + trusted_validator_weights: _, + block_signatures: _, + signature_weight, + total_validator_weight, + fault_tolerance_fraction: _ + }) if *total_validator_weight == TOTAL_VALIDATORS_WEIGHT.into() && **signature_weight.as_ref().unwrap() == INSUFFICIENT_BLOCK_SIGNATURES.into() + )); + + let result = check_sufficient_block_signatures_with_quorum_formula( + &validator_weights, + fault_tolerance_fraction, + Some(&BlockSignatures::from(just_enough_weight)), + custom_quorum_formula, + ); + assert!(result.is_ok()); + } + + #[test] + fn block_signatures_sufficiency_with_quorum_formula_without_signatures() { + const TOTAL_VALIDATORS: usize = 20; + const TOTAL_VALIDATORS_WEIGHT: usize = TOTAL_VALIDATORS * TEST_VALIDATOR_WEIGHT; + let (_, validator_weights) = generate_validators(TOTAL_VALIDATORS_WEIGHT); + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + let custom_quorum_formula = std::convert::identity; + + let result = check_sufficient_block_signatures_with_quorum_formula( + &validator_weights, + fault_tolerance_fraction, + None, + custom_quorum_formula, + ); + assert!(matches!( + result, + Err(BlockSignatureError::InsufficientWeightForFinality { + trusted_validator_weights: _, + block_signatures, + signature_weight, + total_validator_weight, + fault_tolerance_fraction: _ + }) if *total_validator_weight == TOTAL_VALIDATORS_WEIGHT.into() && signature_weight.is_none() && block_signatures.is_none() + )); + } + + #[test] + fn detects_bogus_validator() { + const TOTAL_VALIDATORS: usize = 20; + const JUST_ENOUGH_BLOCK_SIGNATURES: usize = 14; + + let mut rng = TestRng::new(); + + let (validators, validator_weights) = generate_validators(TOTAL_VALIDATORS); + let fault_tolerance_fraction = Ratio::new_raw(1, 3); + + // Generate correct signatures. + let mut signatures = create_signatures(&mut rng, &validators, JUST_ENOUGH_BLOCK_SIGNATURES); + let result = check_sufficient_block_signatures( + &validator_weights, + fault_tolerance_fraction, + Some(&BlockSignatures::from(signatures.clone())), + ); + assert!(result.is_ok()); + + // Smuggle bogus proofs in. + let block_hash = *signatures.block_hash(); + let block_height = signatures.block_height(); + let era_id = signatures.era_id(); + let chain_name_hash = signatures.chain_name_hash(); + let finality_sig_1 = FinalitySignature::random_for_block( + block_hash, + block_height, + era_id, + chain_name_hash, + &mut rng, + ); + signatures.insert_signature( + finality_sig_1.public_key().clone(), + *finality_sig_1.signature(), + ); + let finality_sig_2 = FinalitySignature::random_for_block( + block_hash, + block_height, + era_id, + chain_name_hash, + &mut rng, + ); + signatures.insert_signature( + finality_sig_2.public_key().clone(), + *finality_sig_2.signature(), + ); + let result = check_sufficient_block_signatures( + &validator_weights, + fault_tolerance_fraction, + Some(&BlockSignatures::from(signatures)), + ); + let error = result.unwrap_err(); + if let BlockSignatureError::BogusValidators { + trusted_validator_weights: _, + block_signatures: _, + bogus_validators, + } = error + { + assert!(bogus_validators.contains(finality_sig_1.public_key())); + assert!(bogus_validators.contains(finality_sig_2.public_key())); + assert_eq!(bogus_validators.len(), 2); + } else { + panic!("unexpected err: {}", error); + } + } +} diff --git a/node/src/utils/chain_specification.rs b/node/src/utils/chain_specification.rs new file mode 100644 index 0000000000..307ce17054 --- /dev/null +++ b/node/src/utils/chain_specification.rs @@ -0,0 +1,856 @@ +pub(crate) mod error; +pub(crate) mod parse_toml; + +use std::collections::HashSet; + +use num_rational::Ratio; +use once_cell::sync::Lazy; +use tracing::{error, info, warn}; + +use casper_types::{ + system::auction::VESTING_SCHEDULE_LENGTH_MILLIS, Chainspec, ConsensusProtocolName, CoreConfig, + ProtocolConfig, TimeDiff, TransactionConfig, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, + MINT_LANE_ID, +}; + +use crate::components::network; + +static RESERVED_LANE_IDS: Lazy> = + Lazy::new(|| vec![MINT_LANE_ID, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID]); + +/// Returns `false` and logs errors if the values set in the config don't make sense. +#[tracing::instrument(ret, level = "info", skip(chainspec), fields(hash = % chainspec.hash()))] +pub fn validate_chainspec(chainspec: &Chainspec) -> bool { + info!("begin chainspec validation"); + + if chainspec.core_config.unbonding_delay <= chainspec.core_config.auction_delay { + warn!( + "unbonding delay is set to {} but it should be greater than the auction delay (currently set to {})", + chainspec.core_config.unbonding_delay, chainspec.core_config.auction_delay); + return false; + } + + // If the era duration is set to zero, we will treat it as explicitly stating that eras + // should be defined by height only. + if chainspec.core_config.era_duration.millis() > 0 + && chainspec.core_config.era_duration + < chainspec.core_config.minimum_block_time * chainspec.core_config.minimum_era_height + { + warn!("era duration is less than minimum era height * block time!"); + } + + if chainspec.core_config.consensus_protocol == ConsensusProtocolName::Highway { + if chainspec.core_config.minimum_block_time > chainspec.highway_config.maximum_round_length + { + error!( + minimum_block_time = %chainspec.core_config.minimum_block_time, + maximum_round_length = %chainspec.highway_config.maximum_round_length, + "minimum_block_time must be less or equal than maximum_round_length", + ); + return false; + } + match chainspec.highway_config.is_valid() { + Ok(_) => return true, + Err(msg) => { + error!(msg); + return false; + } + } + } + + // We don't support lookback by more than one era in the rewards scheme. + if chainspec.core_config.minimum_era_height < chainspec.core_config.signature_rewards_max_delay + { + error!( + minimum_era_height = %chainspec.core_config.minimum_era_height, + signature_rewards_max_delay = %chainspec.core_config.signature_rewards_max_delay, + "signature_rewards_max_delay must be less than minimum_era_height" + ); + return false; + } + + network::within_message_size_limit_tolerance(chainspec) + && validate_protocol_config(&chainspec.protocol_config) + && validate_core_config(&chainspec.core_config) + && validate_transaction_config(&chainspec.transaction_config) +} + +/// Checks whether the values set in the config make sense and returns `false` if they don't. +pub(crate) fn validate_protocol_config(_protocol_config: &ProtocolConfig) -> bool { + true +} + +/// Returns `false` if unbonding delay is not greater than auction delay to ensure +/// that `recent_era_count()` yields a value of at least 1. +pub(crate) fn validate_core_config(core_config: &CoreConfig) -> bool { + if core_config.unbonding_delay <= core_config.auction_delay { + warn!( + unbonding_delay = core_config.unbonding_delay, + auction_delay = core_config.auction_delay, + "unbonding delay should be greater than auction delay", + ); + return false; + } + + // If the era duration is set to zero, we will treat it as explicitly stating that eras + // should be defined by height only. Warn only. + if core_config.era_duration.millis() > 0 + && core_config.era_duration.millis() + < core_config.minimum_era_height * core_config.minimum_block_time.millis() + { + warn!("era duration is less than minimum era height * round length!"); + } + + if core_config.finality_threshold_fraction <= Ratio::new(0, 1) + || core_config.finality_threshold_fraction >= Ratio::new(1, 1) + { + error!( + ftf = %core_config.finality_threshold_fraction, + "finality threshold fraction is not in the range (0, 1)", + ); + return false; + } + + if core_config.finality_signature_proportion <= Ratio::new(0, 1) + || core_config.finality_signature_proportion >= Ratio::new(1, 1) + { + error!( + fsp = %core_config.finality_signature_proportion, + "finality signature proportion is not in the range (0, 1)", + ); + return false; + } + if core_config.finders_fee <= Ratio::new(0, 1) || core_config.finders_fee >= Ratio::new(1, 1) { + error!( + fsp = %core_config.finders_fee, + "finder's fee proportion is not in the range (0, 1)", + ); + return false; + } + + if core_config.vesting_schedule_period > TimeDiff::from_millis(VESTING_SCHEDULE_LENGTH_MILLIS) { + error!( + vesting_schedule_millis = core_config.vesting_schedule_period.millis(), + max_millis = VESTING_SCHEDULE_LENGTH_MILLIS, + "vesting schedule period too long", + ); + return false; + } + + true +} + +/// Validates `TransactionConfig` parameters +pub(crate) fn validate_transaction_config(transaction_config: &TransactionConfig) -> bool { + // The total number of transactions should not exceed the number of approvals because each + // transaction needs at least one approval to be valid. + let total_txn_slots = transaction_config + .transaction_v1_config + .get_max_block_count(); + if transaction_config.block_max_approval_count < total_txn_slots as u32 { + return false; + } + let mut seen_max_transaction_size = HashSet::new(); + if transaction_config + .transaction_v1_config + .wasm_lanes() + .is_empty() + { + error!("Wasm lanes chainspec config is empty."); + return false; + } + for wasm_lane_config in transaction_config.transaction_v1_config.wasm_lanes().iter() { + if RESERVED_LANE_IDS.contains(&wasm_lane_config.id) { + error!("One of the defined wasm lanes has declared an id that is reserved for system lanes. Offending lane id: {}", wasm_lane_config.id); + return false; + } + let max_transaction_length = wasm_lane_config.max_transaction_length; + if seen_max_transaction_size.contains(&max_transaction_length) { + error!("Found wasm lane configuration that has non-unique max_transaction_length. Duplicate value: {}", max_transaction_length); + return false; + } + seen_max_transaction_size.insert(max_transaction_length); + } + + let mut seen_max_gas_prices = HashSet::new(); + for wasm_lane_config in transaction_config.transaction_v1_config.wasm_lanes().iter() { + //No need to check reserved lanes, we just did that + let max_transaction_gas_limit = wasm_lane_config.max_transaction_gas_limit; + if seen_max_gas_prices.contains(&max_transaction_gas_limit) { + error!("Found wasm lane configuration that has non-unique max_transaction_gas_limit. Duplicate value: {}", max_transaction_gas_limit); + return false; + } + seen_max_gas_prices.insert(max_transaction_gas_limit); + } + true +} + +#[cfg(test)] +mod tests { + use std::fs; + + use num_rational::Ratio; + use once_cell::sync::Lazy; + + use casper_types::{ + bytesrepr::FromBytes, ActivationPoint, BrTableCost, ChainspecRawBytes, ControlFlowCosts, + CoreConfig, EraId, GlobalStateUpdate, HighwayConfig, HostFunction, HostFunctionCostsV1, + HostFunctionCostsV2, HostFunctionV2, MessageLimits, Motes, OpcodeCosts, ProtocolConfig, + ProtocolVersion, StoredValue, TestBlockBuilder, TimeDiff, Timestamp, TransactionConfig, + TransactionLaneDefinition, TransactionV1Config, WasmConfig, WasmV1Config, WasmV2Config, + MINT_LANE_ID, + }; + + use super::*; + use crate::{ + testing::init_logging, + utils::{Loadable, RESOURCES_PATH}, + }; + + const EXPECTED_GENESIS_COSTS: OpcodeCosts = OpcodeCosts { + bit: 13, + add: 14, + mul: 15, + div: 16, + load: 17, + store: 18, + op_const: 19, + local: 20, + global: 21, + control_flow: ControlFlowCosts { + block: 1, + op_loop: 2, + op_if: 3, + op_else: 4, + end: 5, + br: 6, + br_if: 7, + br_table: BrTableCost { + cost: 0, + size_multiplier: 1, + }, + op_return: 8, + call: 9, + call_indirect: 10, + drop: 11, + select: 12, + }, + integer_comparison: 22, + conversion: 23, + unreachable: 24, + nop: 25, + current_memory: 26, + grow_memory: 27, + sign: 28, + }; + static EXPECTED_GENESIS_HOST_FUNCTION_COSTS: Lazy = + Lazy::new(|| HostFunctionCostsV1 { + read_value: HostFunction::new(127, [0, 1, 0]), + dictionary_get: HostFunction::new(128, [0, 1, 0]), + write: HostFunction::new(140, [0, 1, 0, 2]), + dictionary_put: HostFunction::new(141, [0, 1, 2, 3]), + add: HostFunction::new(100, [0, 1, 2, 3]), + new_uref: HostFunction::new(122, [0, 1, 2]), + load_named_keys: HostFunction::new(121, [0, 1]), + ret: HostFunction::new(133, [0, 1]), + get_key: HostFunction::new(113, [0, 1, 2, 3, 4]), + has_key: HostFunction::new(119, [0, 1]), + put_key: HostFunction::new(125, [0, 1, 2, 3]), + remove_key: HostFunction::new(132, [0, 1]), + revert: HostFunction::new(134, [0]), + is_valid_uref: HostFunction::new(120, [0, 1]), + add_associated_key: HostFunction::new(101, [0, 1, 2]), + remove_associated_key: HostFunction::new(129, [0, 1]), + update_associated_key: HostFunction::new(139, [0, 1, 2]), + set_action_threshold: HostFunction::new(135, [0, 1]), + get_caller: HostFunction::new(112, [0]), + get_blocktime: HostFunction::new(111, [0]), + create_purse: HostFunction::new(108, [0, 1]), + transfer_to_account: HostFunction::new(138, [0, 1, 2, 3, 4, 5, 6]), + transfer_from_purse_to_account: HostFunction::new(136, [0, 1, 2, 3, 4, 5, 6, 7, 8]), + transfer_from_purse_to_purse: HostFunction::new(137, [0, 1, 2, 3, 4, 5, 6, 7]), + get_balance: HostFunction::new(110, [0, 1, 2]), + get_phase: HostFunction::new(117, [0]), + get_system_contract: HostFunction::new(118, [0, 1, 2]), + get_main_purse: HostFunction::new(114, [0]), + read_host_buffer: HostFunction::new(126, [0, 1, 2]), + create_contract_package_at_hash: HostFunction::new(106, [0, 1]), + create_contract_user_group: HostFunction::new(107, [0, 1, 2, 3, 4, 5, 6, 7]), + add_contract_version: HostFunction::new(102, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + add_contract_version_with_message_topics: HostFunction::new( + 102, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + ), + add_package_version_with_message_topics: HostFunction::new( + 102, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + ), + disable_contract_version: HostFunction::new(109, [0, 1, 2, 3]), + call_contract: HostFunction::new(104, [0, 1, 2, 3, 4, 5, 6]), + call_versioned_contract: HostFunction::new(105, [0, 1, 2, 3, 4, 5, 6, 7, 8]), + get_named_arg_size: HostFunction::new(116, [0, 1, 2]), + get_named_arg: HostFunction::new(115, [0, 1, 2, 3]), + remove_contract_user_group: HostFunction::new(130, [0, 1, 2, 3]), + provision_contract_user_group_uref: HostFunction::new(124, [0, 1, 2, 3, 4]), + remove_contract_user_group_urefs: HostFunction::new(131, [0, 1, 2, 3, 4, 5]), + print: HostFunction::new(123, [0, 1]), + blake2b: HostFunction::new(133, [0, 1, 2, 3]), + random_bytes: HostFunction::new(123, [0, 1]), + enable_contract_version: HostFunction::new(142, [0, 1, 2, 3]), + generic_hash: HostFunction::new(152, [0, 1, 2, 3, 4]), + manage_message_topic: HostFunction::new(100, [0, 1, 2, 4]), + emit_message: HostFunction::new(100, [0, 1, 2, 3]), + cost_increase_per_message: 50, + get_block_info: HostFunction::new(330, [0, 0]), + recover_secp256k1: HostFunction::new(331, [0, 1, 2, 3, 4, 5]), + verify_signature: HostFunction::new(332, [0, 1, 2, 3, 4, 5]), + call_package_version: HostFunction::new(105, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + }); + static EXPECTED_GENESIS_HOST_FUNCTION_COSTS_V2: Lazy = + Lazy::new(|| HostFunctionCostsV2 { + read: HostFunctionV2::new(100, [0, 1, 2, 3, 4, 5]), + write: HostFunctionV2::new(101, [0, 1, 2, 3, 4]), + remove: HostFunctionV2::new(114, [0, 1, 2]), + copy_input: HostFunctionV2::new(102, [0, 1]), + ret: HostFunctionV2::new(103, [0, 1]), + create: HostFunctionV2::new(104, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + transfer: HostFunctionV2::new(108, [0, 1, 2]), + env_balance: HostFunctionV2::new(109, [0, 1, 2, 3]), + upgrade: HostFunctionV2::new(110, [0, 1, 2, 3, 4, 5]), + call: HostFunctionV2::new(111, [0, 1, 2, 3, 4, 5, 6, 7, 8]), + print: HostFunctionV2::new(112, [0, 1]), + emit: HostFunctionV2::new(113, [0, 1, 2, 3]), + env_info: HostFunctionV2::new(114, [0, 1]), + }); + static EXPECTED_GENESIS_WASM_COSTS: Lazy = Lazy::new(|| { + let wasm_v1_config = WasmV1Config::new( + 17, // initial_memory + 19, // max_stack_height + EXPECTED_GENESIS_COSTS, + *EXPECTED_GENESIS_HOST_FUNCTION_COSTS, + ); + let wasm_v2_config = WasmV2Config::new( + 17, // initial_memory + EXPECTED_GENESIS_COSTS, + *EXPECTED_GENESIS_HOST_FUNCTION_COSTS_V2, + ); + WasmConfig::new(MessageLimits::default(), wasm_v1_config, wasm_v2_config) + }); + + #[test] + fn core_config_toml_roundtrip() { + let mut rng = crate::new_rng(); + let config = CoreConfig::random(&mut rng); + let encoded = toml::to_string_pretty(&config).unwrap(); + let decoded = toml::from_str(&encoded).unwrap(); + assert_eq!(config, decoded); + } + + #[test] + fn transaction_config_toml_roundtrip() { + let mut rng = crate::new_rng(); + let config = TransactionConfig::random(&mut rng); + let encoded = toml::to_string_pretty(&config).unwrap(); + let decoded = toml::from_str(&encoded).unwrap(); + assert_eq!(config, decoded); + } + + #[test] + fn protocol_config_toml_roundtrip() { + let mut rng = crate::new_rng(); + let config = ProtocolConfig::random(&mut rng); + let encoded = toml::to_string_pretty(&config).unwrap(); + let decoded = toml::from_str(&encoded).unwrap(); + assert_eq!(config, decoded); + } + + #[test] + fn highway_config_toml_roundtrip() { + let mut rng = crate::new_rng(); + let config = HighwayConfig::random(&mut rng); + let encoded = toml::to_string_pretty(&config).unwrap(); + let decoded = toml::from_str(&encoded).unwrap(); + assert_eq!(config, decoded); + } + + #[test] + fn should_validate_round_length() { + let (mut chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("local"); + + // Minimum block time greater than maximum round length. + chainspec.core_config.consensus_protocol = ConsensusProtocolName::Highway; + chainspec.core_config.minimum_block_time = TimeDiff::from_millis(8); + chainspec.highway_config.maximum_round_length = TimeDiff::from_millis(7); + assert!( + !validate_chainspec(&chainspec), + "chainspec should not be valid" + ); + + chainspec.core_config.minimum_block_time = TimeDiff::from_millis(7); + chainspec.highway_config.maximum_round_length = TimeDiff::from_millis(7); + assert!(validate_chainspec(&chainspec), "chainspec should be valid"); + } + + #[ignore = "We probably need to reconsider our approach here"] + #[test] + fn should_have_deterministic_chainspec_hash() { + const PATH: &str = "test/valid/0_9_0"; + const PATH_UNORDERED: &str = "test/valid/0_9_0_unordered"; + + let accounts: Vec = { + let path = RESOURCES_PATH.join(PATH).join("accounts.toml"); + fs::read(path).expect("should read file") + }; + + let accounts_unordered: Vec = { + let path = RESOURCES_PATH.join(PATH_UNORDERED).join("accounts.toml"); + fs::read(path).expect("should read file") + }; + + // Different accounts.toml file content + assert_ne!(accounts, accounts_unordered); + + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources(PATH); + let (chainspec_unordered, _) = + <(Chainspec, ChainspecRawBytes)>::from_resources(PATH_UNORDERED); + + // Deserializes into equal objects + assert_eq!(chainspec, chainspec_unordered); + + // With equal hashes + assert_eq!(chainspec.hash(), chainspec_unordered.hash()); + } + + #[test] + fn should_have_valid_finality_threshold() { + let mut rng = crate::new_rng(); + let mut core_config = CoreConfig::random(&mut rng); + // Should be valid for FTT > 0 and < 1. + core_config.finality_threshold_fraction = Ratio::new(1, u64::MAX); + assert!( + validate_core_config(&core_config), + "1 over max should be valid ftt" + ); + core_config.finality_threshold_fraction = Ratio::new(u64::MAX - 1, u64::MAX); + assert!( + validate_core_config(&core_config), + "less than max over max should be valid ftt" + ); + core_config.finality_threshold_fraction = Ratio::new(0, 1); + assert!( + !validate_core_config(&core_config), + "FTT == 0 or >= 1 should be invalid ftt" + ); + core_config.finality_threshold_fraction = Ratio::new(1, 1); + assert!( + !validate_core_config(&core_config), + "1 over 1 should be invalid ftt" + ); + core_config.finality_threshold_fraction = Ratio::new(u64::MAX, u64::MAX); + assert!( + !validate_core_config(&core_config), + "max over max should be invalid ftt" + ); + core_config.finality_threshold_fraction = Ratio::new(u64::MAX, u64::MAX - 1); + assert!( + !validate_core_config(&core_config), + "max over less than max should be invalid ftt" + ); + } + + #[test] + fn should_have_valid_transaction_counts() { + let transaction_v1_config = TransactionV1Config::default(); + + let transaction_v1_config = + transaction_v1_config.with_count_limits(Some(100), Some(1), None, None); + + let transaction_config = TransactionConfig { + block_max_approval_count: 100, + transaction_v1_config, + ..Default::default() + }; + assert!( + !validate_transaction_config(&transaction_config), + "max approval count that is not at least equal to sum of `block_max_[txn type]_count`s \ + should be invalid" + ); + + let transaction_v1_config = TransactionV1Config::default(); + + let transaction_v1_config = + transaction_v1_config.with_count_limits(Some(100), Some(50), Some(25), Some(25)); + + let transaction_config = TransactionConfig { + block_max_approval_count: 200, + transaction_v1_config, + ..Default::default() + }; + assert!( + validate_transaction_config(&transaction_config), + "max approval count equal to sum of `block_max_[txn type]_count`s should be valid" + ); + + let transaction_v1_config = TransactionV1Config::default(); + let transaction_v1_config = + transaction_v1_config.with_count_limits(Some(100), Some(50), Some(25), Some(24)); + + let transaction_config = TransactionConfig { + block_max_approval_count: 200, + transaction_v1_config, + ..Default::default() + }; + assert!( + validate_transaction_config(&transaction_config), + "max approval count greater than sum of `block_max_[txn type]_count`s should be valid" + ); + } + + #[test] + fn should_perform_checks_with_global_state_update() { + let mut rng = crate::new_rng(); + let mut protocol_config = ProtocolConfig::random(&mut rng); + + // We force `global_state_update` to be `Some`. + protocol_config.global_state_update = Some(GlobalStateUpdate::random(&mut rng)); + + // TODO: seems like either protocol config validity should be implemented, or this sham of + // a test should be removed. + assert!(validate_protocol_config(&protocol_config), "currently there are no validation rules for this config, so minimal type correctness should be valid"); + } + + #[test] + fn should_perform_checks_without_global_state_update() { + let mut rng = crate::new_rng(); + let mut protocol_config = ProtocolConfig::random(&mut rng); + + // We force `global_state_update` to be `None`. + protocol_config.global_state_update = None; + + // TODO: seems like either protocol config validity should be implemented, or this sham of + // a test should be removed. + assert!(validate_protocol_config(&protocol_config), "currently there are no validation rules for this config, so minimal type correctness should be valid"); + } + + #[test] + fn should_recognize_blocks_before_activation_point() { + let past_version = ProtocolVersion::from_parts(1, 0, 0); + let current_version = ProtocolVersion::from_parts(2, 0, 0); + let future_version = ProtocolVersion::from_parts(3, 0, 0); + + let upgrade_era = EraId::from(5); + let previous_era = upgrade_era.saturating_sub(1); + + let rng = &mut crate::new_rng(); + let protocol_config = ProtocolConfig { + version: current_version, + hard_reset: false, + activation_point: ActivationPoint::EraId(upgrade_era), + global_state_update: None, + }; + + let block = TestBlockBuilder::new() + .era(previous_era) + .height(100) + .protocol_version(past_version) + .switch_block(true) + .build(rng); + assert!( + block + .header() + .is_last_block_before_activation(&protocol_config), + "The block before this protocol version: a switch block with previous era and version." + ); + + // + let block = TestBlockBuilder::new() + .era(upgrade_era) + .height(100) + .protocol_version(past_version) + .switch_block(true) + .build(rng); + assert!( + !block + .header() + .is_last_block_before_activation(&protocol_config), + "Not the activation point: wrong era." + ); + let block = TestBlockBuilder::new() + .era(previous_era) + .height(100) + .protocol_version(current_version) + .switch_block(true) + .build(rng); + assert!( + !block + .header() + .is_last_block_before_activation(&protocol_config), + "Not the activation point: wrong version." + ); + + let block = TestBlockBuilder::new() + .era(previous_era) + .height(100) + .protocol_version(future_version) + .switch_block(true) + .build(rng); + assert!( + !block + .header() + .is_last_block_before_activation(&protocol_config), + "Alleged upgrade is in the past" + ); + + let block = TestBlockBuilder::new() + .era(previous_era) + .height(100) + .protocol_version(past_version) + .switch_block(false) + .build(rng); + assert!( + !block + .header() + .is_last_block_before_activation(&protocol_config), + "Not the activation point: not a switch block." + ); + } + + #[test] + fn should_have_valid_production_chainspec() { + init_logging(); + + let (chainspec, _raw_bytes): (Chainspec, ChainspecRawBytes) = + Loadable::from_resources("production"); + + assert!(validate_chainspec(&chainspec)); + } + + fn check_spec(spec: Chainspec, is_first_version: bool) { + if is_first_version { + assert_eq!( + spec.protocol_config.version, + ProtocolVersion::from_parts(0, 9, 0) + ); + assert_eq!( + spec.protocol_config.activation_point.genesis_timestamp(), + Some(Timestamp::from(1600454700000)) + ); + assert_eq!(spec.network_config.accounts_config.accounts().len(), 4); + + let accounts: Vec<_> = { + let mut accounts = spec.network_config.accounts_config.accounts().to_vec(); + accounts.sort_by_key(|account_config| { + (account_config.balance(), account_config.bonded_amount()) + }); + accounts + }; + + for (index, account_config) in accounts.into_iter().enumerate() { + assert_eq!(account_config.balance(), Motes::new(index + 1),); + assert_eq!( + account_config.bonded_amount(), + Motes::new((index as u64 + 1) * 10) + ); + } + } else { + assert_eq!( + spec.protocol_config.version, + ProtocolVersion::from_parts(1, 0, 0) + ); + assert_eq!( + spec.protocol_config.activation_point.era_id(), + EraId::from(1) + ); + assert!(spec.network_config.accounts_config.accounts().is_empty()); + assert!(spec.protocol_config.global_state_update.is_some()); + assert!(spec + .protocol_config + .global_state_update + .as_ref() + .unwrap() + .validators + .is_some()); + for value in spec + .protocol_config + .global_state_update + .unwrap() + .entries + .values() + { + assert!(StoredValue::from_bytes(value).is_ok()); + } + } + + assert_eq!(spec.network_config.name, "test-chain"); + + assert_eq!(spec.core_config.era_duration, TimeDiff::from_seconds(180)); + assert_eq!(spec.core_config.minimum_era_height, 9); + assert_eq!( + spec.core_config.finality_threshold_fraction, + Ratio::new(2, 25) + ); + assert_eq!( + spec.highway_config.maximum_round_length, + TimeDiff::from_seconds(525) + ); + + assert_eq!( + spec.transaction_config.deploy_config.max_payment_cost, + Motes::new(9) + ); + assert_eq!( + spec.transaction_config.max_ttl, + TimeDiff::from_seconds(26_300_160) + ); + assert_eq!(spec.transaction_config.max_block_size, 12); + assert_eq!( + spec.transaction_config + .transaction_v1_config + .get_max_transaction_count(MINT_LANE_ID), + 125 + ); + assert_eq!(spec.transaction_config.block_gas_limit, 13); + + assert_eq!(spec.wasm_config, *EXPECTED_GENESIS_WASM_COSTS); + } + + #[ignore = "We probably need to reconsider our approach here"] + #[test] + fn check_bundled_spec() { + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("test/valid/0_9_0"); + check_spec(chainspec, true); + let (chainspec, _) = <(Chainspec, ChainspecRawBytes)>::from_resources("test/valid/1_0_0"); + check_spec(chainspec, false); + } + + #[test] + fn should_fail_when_wasm_lanes_have_duplicate_max_transaction_length() { + let mut v1_config = TransactionV1Config::default(); + let definition_1 = TransactionLaneDefinition { + id: 3, + max_transaction_length: 100, + max_transaction_args_length: 100, + max_transaction_gas_limit: 100, + max_transaction_count: 10, + }; + let definition_2 = TransactionLaneDefinition { + id: 4, + max_transaction_length: 10000, + max_transaction_args_length: 100, + max_transaction_gas_limit: 101, + max_transaction_count: 10, + }; + let definition_3 = TransactionLaneDefinition { + id: 5, + max_transaction_length: 1000, + max_transaction_args_length: 100, + max_transaction_gas_limit: 102, + max_transaction_count: 10, + }; + v1_config.set_wasm_lanes(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + let transaction_config = TransactionConfig { + transaction_v1_config: v1_config.clone(), + ..Default::default() + }; + assert!(validate_transaction_config(&transaction_config)); + let mut definition_2 = definition_2.clone(); + definition_2.max_transaction_length = definition_1.max_transaction_length; + v1_config.set_wasm_lanes(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + let transaction_config = TransactionConfig { + transaction_v1_config: v1_config, + ..Default::default() + }; + assert!(!validate_transaction_config(&transaction_config)); + } + + #[test] + fn should_fail_when_wasm_lanes_have_duplicate_max_gas_price() { + let mut v1_config = TransactionV1Config::default(); + let definition_1 = TransactionLaneDefinition { + id: 3, + max_transaction_length: 100, + max_transaction_args_length: 100, + max_transaction_gas_limit: 100, + max_transaction_count: 10, + }; + let definition_2 = TransactionLaneDefinition { + id: 4, + max_transaction_length: 10000, + max_transaction_args_length: 100, + max_transaction_gas_limit: 101, + max_transaction_count: 10, + }; + let definition_3 = TransactionLaneDefinition { + id: 5, + max_transaction_length: 1000, + max_transaction_args_length: 100, + max_transaction_gas_limit: 102, + max_transaction_count: 10, + }; + v1_config.set_wasm_lanes(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + let transaction_config = TransactionConfig { + transaction_v1_config: v1_config.clone(), + ..Default::default() + }; + assert!(validate_transaction_config(&transaction_config)); + let mut definition_2 = definition_2.clone(); + definition_2.max_transaction_gas_limit = definition_1.max_transaction_gas_limit; + v1_config.set_wasm_lanes(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + let transaction_config = TransactionConfig { + transaction_v1_config: v1_config, + ..Default::default() + }; + assert!(!validate_transaction_config(&transaction_config)); + } + + #[test] + fn should_fail_when_wasm_lanes_have_reseved_ids() { + fail_validation_with_lane_id(MINT_LANE_ID); + fail_validation_with_lane_id(AUCTION_LANE_ID); + fail_validation_with_lane_id(INSTALL_UPGRADE_LANE_ID); + } + + fn fail_validation_with_lane_id(lane_id: u8) { + let mut v1_config = TransactionV1Config::default(); + let definition_1 = TransactionLaneDefinition { + id: lane_id, + max_transaction_length: 100, + max_transaction_args_length: 100, + max_transaction_gas_limit: 100, + max_transaction_count: 10, + }; + v1_config.set_wasm_lanes(vec![definition_1.clone()]); + let transaction_config = TransactionConfig { + transaction_v1_config: v1_config.clone(), + ..Default::default() + }; + assert!(!validate_transaction_config(&transaction_config)); + } + + #[test] + fn should_valid_no_wasm_lanes() { + let mut v1_config = TransactionV1Config::default(); + v1_config.set_wasm_lanes(vec![]); + let transaction_config = TransactionConfig { + transaction_v1_config: v1_config.clone(), + ..Default::default() + }; + assert!(!validate_transaction_config(&transaction_config)); + } +} diff --git a/node/src/utils/chain_specification/error.rs b/node/src/utils/chain_specification/error.rs new file mode 100644 index 0000000000..76a6917bef --- /dev/null +++ b/node/src/utils/chain_specification/error.rs @@ -0,0 +1,68 @@ +use thiserror::Error; +use uint::FromDecStrErr; + +use casper_types::{file_utils::ReadFileError, GlobalStateUpdateError}; + +/// Error returned when loading the chainspec. +#[derive(Debug, Error)] +pub enum Error { + /// Error while decoding the chainspec from TOML format. + #[error("decoding from TOML error: {0}")] + DecodingFromToml(#[from] toml::de::Error), + + /// Error while decoding Motes from a decimal format. + #[error("decoding motes from base-10 error: {0}")] + DecodingMotes(#[from] FromDecStrErr), + + /// Error loading the chainspec. + #[error("could not load chainspec: {0}")] + LoadChainspec(ReadFileError), + + /// Error loading the chainspec accounts. + #[error("could not load chainspec accounts: {0}")] + LoadChainspecAccounts(#[from] ChainspecAccountsLoadError), + + /// Error loading the global state update. + #[error("could not load the global state update: {0}")] + LoadGlobalStateUpgrade(#[from] GlobalStateUpdateLoadError), +} + +/// Error loading chainspec accounts file. +#[derive(Debug, Error)] +pub enum ChainspecAccountsLoadError { + /// Error loading the accounts file. + #[error("could not load accounts: {0}")] + LoadAccounts(#[from] ReadFileError), + + /// Error while decoding the chainspec accounts from TOML format. + #[error("decoding from TOML error: {0}")] + DecodingFromToml(#[from] toml::de::Error), + + /// Error while decoding a chainspec account's key hash from hex format. + #[error("decoding from hex error: {0}")] + DecodingFromHex(#[from] base16::DecodeError), + + /// Error while decoding Motes from a decimal format. + #[error("decoding motes from base-10 error: {0}")] + DecodingMotes(#[from] FromDecStrErr), + + /// Error while decoding a chainspec account's key hash from base-64 format. + #[error("crypto module error: {0}")] + Crypto(#[from] casper_types::crypto::ErrorExt), +} + +/// Error loading global state update file. +#[derive(Debug, Error)] +pub enum GlobalStateUpdateLoadError { + /// Error loading the accounts file. + #[error("could not load the file: {0}")] + LoadFile(#[from] ReadFileError), + + /// Error while decoding the chainspec accounts from TOML format. + #[error("decoding from TOML error: {0}")] + DecodingFromToml(#[from] toml::de::Error), + + /// Error decoding kvp items. + #[error("decoding key value entries error: {0}")] + DecodingKeyValuePairs(#[from] GlobalStateUpdateError), +} diff --git a/node/src/utils/chain_specification/parse_toml.rs b/node/src/utils/chain_specification/parse_toml.rs new file mode 100644 index 0000000000..1a3ce947a3 --- /dev/null +++ b/node/src/utils/chain_specification/parse_toml.rs @@ -0,0 +1,216 @@ +//! Helper struct and function for parsing a chainspec configuration file into its respective domain +//! object. +//! +//! The runtime representation defined by the chainspec object graph is all-inclusive. +//! However, as an implementation detail, the reference implementation splits the data up into +//! multiple topical files. +//! +//! In addition to the mandatory base chainspec file, there is a file containing genesis account +//! definitions for a given network (produced at genesis). This file contains all accounts that will +//! be (or were, historically) created at genesis, their initial balances, initial staking (both +//! validators and delegators). The total initial supply of a new network is a consequence of the +//! sum of the token issued to these accounts. For a test network or small sidechain, the contents +//! of this file might be small but for a full sized network there is quite a lot of data. +//! +//! +//! Further, when protocol version upgrades are put forth they are allowed to have a file containing +//! proposed changes to global state that if accepted will be applied as of the upgrade's block +//! height and onward. This file is optional (more clearly, on an as needed basis only), a given +//! network might not ever have such a file over its lifetime, and the contents of the file can +//! be arbitrarily large as it contains encoded bytes of data. Each such file is directly associated +//! to the specific chainspec file the changes are proposed with; each one is essentially a one off. +//! +//! This capability can and has been used to allow the introduction of new capabilities to the +//! system which require some introduction of value(s) to global state to enable; this is a purely +//! additive / extension type upgrade. However, this capability can also be leveraged as part of a +//! social consensus to make changes to the validator set and / or to assert new values for existing +//! global state entries. In either case, the contents of the file are parseable and verifiable in +//! advance of their acceptance and application to a given network. + +use std::{convert::TryFrom, path::Path}; + +use serde::{Deserialize, Serialize}; + +use casper_types::{ + bytesrepr::Bytes, file_utils, AccountsConfig, ActivationPoint, Chainspec, ChainspecRawBytes, + CoreConfig, GlobalStateUpdate, GlobalStateUpdateConfig, HighwayConfig, NetworkConfig, + ProtocolConfig, ProtocolVersion, StorageCosts, SystemConfig, TransactionConfig, VacancyConfig, + WasmConfig, +}; + +use crate::utils::{ + chain_specification::error::{ChainspecAccountsLoadError, Error, GlobalStateUpdateLoadError}, + Loadable, +}; + +// The names of chainspec related files on disk. +/// The chainspec file name. +pub const CHAINSPEC_FILENAME: &str = "chainspec.toml"; +/// The genesis accounts file name. +pub const CHAINSPEC_ACCOUNTS_FILENAME: &str = "accounts.toml"; +/// The global state update file name. +pub const CHAINSPEC_GLOBAL_STATE_FILENAME: &str = "global_state.toml"; + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +struct TomlNetwork { + name: String, + maximum_net_message_size: u32, +} + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +struct TomlProtocol { + version: ProtocolVersion, + hard_reset: bool, + activation_point: ActivationPoint, +} + +/// A chainspec configuration as laid out in the TOML-encoded configuration file. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug)] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub(super) struct TomlChainspec { + protocol: TomlProtocol, + network: TomlNetwork, + core: CoreConfig, + transactions: TransactionConfig, + highway: HighwayConfig, + wasm: WasmConfig, + system_costs: SystemConfig, + vacancy: VacancyConfig, + storage_costs: StorageCosts, +} + +impl From<&Chainspec> for TomlChainspec { + fn from(chainspec: &Chainspec) -> Self { + let protocol = TomlProtocol { + version: chainspec.protocol_config.version, + hard_reset: chainspec.protocol_config.hard_reset, + activation_point: chainspec.protocol_config.activation_point, + }; + let network = TomlNetwork { + name: chainspec.network_config.name.clone(), + maximum_net_message_size: chainspec.network_config.maximum_net_message_size, + }; + let core = chainspec.core_config.clone(); + let transactions = chainspec.transaction_config.clone(); + let highway = chainspec.highway_config; + let wasm = chainspec.wasm_config; + let system_costs = chainspec.system_costs_config; + let vacancy = chainspec.vacancy_config; + let storage_costs = chainspec.storage_costs; + + TomlChainspec { + protocol, + network, + core, + transactions, + highway, + wasm, + system_costs, + vacancy, + storage_costs, + } + } +} + +pub(super) fn parse_toml>( + chainspec_path: P, +) -> Result<(Chainspec, ChainspecRawBytes), Error> { + let chainspec_bytes = + file_utils::read_file(chainspec_path.as_ref()).map_err(Error::LoadChainspec)?; + let toml_chainspec: TomlChainspec = + toml::from_str(std::str::from_utf8(&chainspec_bytes).unwrap())?; + + let root = chainspec_path + .as_ref() + .parent() + .unwrap_or_else(|| Path::new("")); + + // accounts.toml must live in the same directory as chainspec.toml. + let (accounts_config, maybe_genesis_accounts_bytes) = parse_toml_accounts(root)?; + + let network_config = NetworkConfig { + name: toml_chainspec.network.name, + accounts_config, + maximum_net_message_size: toml_chainspec.network.maximum_net_message_size, + }; + + // global_state_update.toml must live in the same directory as chainspec.toml. + let (global_state_update, maybe_global_state_bytes) = match parse_toml_global_state(root)? { + Some((config, bytes)) => ( + Some( + GlobalStateUpdate::try_from(config) + .map_err(GlobalStateUpdateLoadError::DecodingKeyValuePairs)?, + ), + Some(bytes), + ), + None => (None, None), + }; + + let protocol_config = ProtocolConfig { + version: toml_chainspec.protocol.version, + hard_reset: toml_chainspec.protocol.hard_reset, + activation_point: toml_chainspec.protocol.activation_point, + global_state_update, + }; + + let chainspec = Chainspec { + protocol_config, + network_config, + core_config: toml_chainspec.core, + transaction_config: toml_chainspec.transactions, + highway_config: toml_chainspec.highway, + wasm_config: toml_chainspec.wasm, + system_costs_config: toml_chainspec.system_costs, + vacancy_config: toml_chainspec.vacancy, + storage_costs: toml_chainspec.storage_costs, + }; + let chainspec_raw_bytes = ChainspecRawBytes::new( + Bytes::from(chainspec_bytes), + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + ); + + Ok((chainspec, chainspec_raw_bytes)) +} + +impl Loadable for (Chainspec, ChainspecRawBytes) { + type Error = Error; + + fn from_path>(path: P) -> Result { + parse_toml(path.as_ref().join(CHAINSPEC_FILENAME)) + } +} + +/// Returns `Self` and the raw bytes of the file. +/// +/// If the file doesn't exist, returns `Ok` with an empty `AccountsConfig` and `None` bytes. +pub(super) fn parse_toml_accounts>( + dir_path: P, +) -> Result<(AccountsConfig, Option), ChainspecAccountsLoadError> { + let accounts_path = dir_path.as_ref().join(CHAINSPEC_ACCOUNTS_FILENAME); + if !accounts_path.is_file() { + let config = AccountsConfig::new(vec![], vec![], vec![]); + let maybe_bytes = None; + return Ok((config, maybe_bytes)); + } + let bytes = file_utils::read_file(accounts_path)?; + let config: AccountsConfig = toml::from_str(std::str::from_utf8(&bytes).unwrap())?; + Ok((config, Some(Bytes::from(bytes)))) +} + +pub(super) fn parse_toml_global_state>( + path: P, +) -> Result, GlobalStateUpdateLoadError> { + let update_path = path.as_ref().join(CHAINSPEC_GLOBAL_STATE_FILENAME); + if !update_path.is_file() { + return Ok(None); + } + let bytes = file_utils::read_file(update_path)?; + let config = toml::from_str(std::str::from_utf8(&bytes).unwrap())?; + Ok(Some((config, Bytes::from(bytes)))) +} diff --git a/node/src/utils/config_specification.rs b/node/src/utils/config_specification.rs new file mode 100644 index 0000000000..9888e3a108 --- /dev/null +++ b/node/src/utils/config_specification.rs @@ -0,0 +1,34 @@ +use crate::MainReactorConfig as Config; + +pub fn validate_config(config: &Config) -> bool { + if config.network.blocklist_retain_max_duration < config.network.blocklist_retain_min_duration { + return false; + } + true +} + +#[cfg(test)] +mod tests { + use super::validate_config; + use crate::MainReactorConfig as Config; + use casper_types::TimeDiff; + + #[test] + fn validate_config_should_fail_malformed_blocklist_definition() { + let mut config = Config::default(); + config.network.blocklist_retain_max_duration = TimeDiff::from_seconds(10); + config.network.blocklist_retain_min_duration = TimeDiff::from_seconds(11); + assert!(!validate_config(&config)); + } + + #[test] + fn validate_config_should_not_fail_when_blocklist_definitions_are_ok() { + let mut config = Config::default(); + config.network.blocklist_retain_max_duration = TimeDiff::from_seconds(11); + config.network.blocklist_retain_min_duration = TimeDiff::from_seconds(10); + assert!(validate_config(&config)); + config.network.blocklist_retain_max_duration = TimeDiff::from_seconds(10); + config.network.blocklist_retain_min_duration = TimeDiff::from_seconds(10); + assert!(validate_config(&config)); + } +} diff --git a/node/src/utils/counting_channel.rs b/node/src/utils/counting_channel.rs deleted file mode 100644 index 0cd42c9d41..0000000000 --- a/node/src/utils/counting_channel.rs +++ /dev/null @@ -1,189 +0,0 @@ -//! Support for counting channels. -//! -//! Regular tokio channels do not make the number of pending items available. Counting channels wrap -//! regular tokio channels but keep a counter of the number of items up-to-date with every `send` -//! and `recv`. The `len` method can be used to retrieve the number of items. -//! -//! The channel also counts the heap memory used by items on the stack if `DataSize` is implemented -//! for `T` and `send_datasized` is used instead of send. Internally it stores the size of each item -//! instead of recalculating on `recv` to avoid underflows due to interior mutability. - -use std::{ - mem, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; - -use datasize::DataSize; -use tokio::sync::mpsc::{error::SendError, unbounded_channel, UnboundedReceiver, UnboundedSender}; - -/// Create an unbounded tokio channel, wrapped in counting sender/receiver. -pub fn counting_unbounded_channel() -> (CountingSender, CountingReceiver) { - let (tx, rx) = unbounded_channel(); - let counter = Arc::new(AtomicUsize::new(0)); - let memory_used = Arc::new(AtomicUsize::new(0)); - - ( - CountingSender { - sender: tx, - counter: counter.clone(), - memory_used: memory_used.clone(), - }, - CountingReceiver { - receiver: rx, - counter, - memory_used, - }, - ) -} - -/// Counting sender. -#[derive(Debug)] -pub struct CountingSender { - sender: UnboundedSender<(usize, T)>, - counter: Arc, - memory_used: Arc, -} - -impl CountingSender { - /// Internal sending function. - // This function allows implementing a non-counting `send` function if needed. - #[inline] - fn do_send(&self, size: usize, message: T) -> Result> { - // We increase the counters before attempting to add values, to avoid a race that would - // occur if a receiver fetches the item in between, which would cause a usize underflow. - self.memory_used.fetch_add(size, Ordering::SeqCst); - let count = self.counter.fetch_add(1, Ordering::SeqCst); - - self.sender - .send((size, message)) - .map_err(|err| { - // Item was rejected, correct counts. - self.memory_used.fetch_sub(size, Ordering::SeqCst); - self.counter.fetch_sub(1, Ordering::SeqCst); - SendError(err.0 .1) - }) - .map(|_| count) - } - - /// Returns the count, i.e. messages currently inside the channel. - #[inline] - #[allow(dead_code)] // TODO: Remove once this function is used. - pub fn len(&self) -> usize { - self.counter.load(Ordering::SeqCst) - } -} - -impl CountingSender -where - T: DataSize, -{ - /// Sends a message down the channel, recording heap memory usage and count. - #[inline] - pub fn send_datasized(&self, message: T) -> Result> { - self.do_send( - message.estimate_heap_size() + mem::size_of::<(usize, T)>(), - message, - ) - } -} - -impl DataSize for CountingSender -where - T: DataSize, -{ - const IS_DYNAMIC: bool = T::IS_DYNAMIC; - const STATIC_HEAP_SIZE: usize = 0; - - fn estimate_heap_size(&self) -> usize { - self.memory_used.load(Ordering::SeqCst) - } -} - -pub struct CountingReceiver { - receiver: UnboundedReceiver<(usize, T)>, - counter: Arc, - memory_used: Arc, -} - -impl CountingReceiver { - /// Receives a message from the channel, decreasing the count on success. - #[inline] - pub async fn recv(&mut self) -> Option { - self.receiver.recv().await.map(|(size, value)| { - self.memory_used.fetch_sub(size, Ordering::SeqCst); - self.counter.fetch_sub(1, Ordering::SeqCst); - value - }) - } - - /// Returns the count, i.e. messages currently inside the channel. - #[inline] - #[allow(dead_code)] // TODO: Remove once this function is used. - pub fn len(&self) -> usize { - self.counter.load(Ordering::SeqCst) - } -} - -impl DataSize for CountingReceiver -where - T: DataSize, -{ - const IS_DYNAMIC: bool = T::IS_DYNAMIC; - const STATIC_HEAP_SIZE: usize = 0; - - fn estimate_heap_size(&self) -> usize { - self.memory_used.load(Ordering::SeqCst) - } -} - -#[cfg(test)] -mod tests { - use std::mem::size_of; - - use datasize::DataSize; - - use super::counting_unbounded_channel; - - #[tokio::test] - async fn test_counting_channel() { - let item_in_queue_size = size_of::<(usize, u32)>(); - let (tx, mut rc) = counting_unbounded_channel::(); - - assert_eq!(tx.len(), 0); - assert_eq!(rc.len(), 0); - assert_eq!(tx.estimate_heap_size(), 0); - assert_eq!(rc.estimate_heap_size(), 0); - - tx.send_datasized(99).unwrap(); - tx.send_datasized(100).unwrap(); - tx.send_datasized(101).unwrap(); - tx.send_datasized(102).unwrap(); - tx.send_datasized(103).unwrap(); - - assert_eq!(tx.len(), 5); - assert_eq!(rc.len(), 5); - - assert_eq!(tx.estimate_heap_size(), 5 * item_in_queue_size); - assert_eq!(rc.estimate_heap_size(), 5 * item_in_queue_size); - - rc.recv().await.unwrap(); - rc.recv().await.unwrap(); - - assert_eq!(tx.len(), 3); - assert_eq!(rc.len(), 3); - - assert_eq!(tx.estimate_heap_size(), 3 * item_in_queue_size); - assert_eq!(rc.estimate_heap_size(), 3 * item_in_queue_size); - - tx.send_datasized(104).unwrap(); - - assert_eq!(tx.len(), 4); - assert_eq!(rc.len(), 4); - - assert_eq!(tx.estimate_heap_size(), 4 * item_in_queue_size); - assert_eq!(rc.estimate_heap_size(), 4 * item_in_queue_size); - } -} diff --git a/node/src/utils/display_error.rs b/node/src/utils/display_error.rs new file mode 100644 index 0000000000..a99cb712b5 --- /dev/null +++ b/node/src/utils/display_error.rs @@ -0,0 +1,88 @@ +//! Error formatting workaround. +//! +//! This module can be removed once/if the tracing issue +//! has been resolved, which adds a special syntax +//! for this case and the known issue has been +//! fixed, which cuts traces short after the first cause. +//! +//! In the meantime, the `display_error` function should be used to format errors in log messages. + +use std::{ + error, + fmt::{self, Display, Formatter}, +}; + +use tracing::field; + +/// Wraps an error to ensure it gets properly captured by tracing. +pub(crate) fn display_error<'a, T>(err: &'a T) -> field::DisplayValue> +where + T: error::Error + 'a, +{ + field::display(ErrFormatter(err)) +} + +/// An error formatter. +#[derive(Clone, Copy, Debug)] +pub(crate) struct ErrFormatter<'a, T>(pub &'a T); + +impl Display for ErrFormatter<'_, T> +where + T: error::Error, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut opt_source: Option<&(dyn error::Error)> = Some(self.0); + + while let Some(source) = opt_source { + write!(f, "{}", source)?; + opt_source = source.source(); + + if opt_source.is_some() { + f.write_str(": ")?; + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use thiserror::Error; + + use super::ErrFormatter; + + #[derive(Debug, Error)] + #[error("this is baz")] + struct Baz; + + #[derive(Debug, Error)] + #[error("this is bar")] + struct Bar(#[source] Baz); + + #[derive(Debug, Error)] + enum MyError { + #[error("this is foo")] + Foo { + #[source] + bar: Bar, + }, + } + + #[test] + fn test_formatter_formats_single() { + let single = Baz; + + assert_eq!(ErrFormatter(&single).to_string().as_str(), "this is baz"); + } + + #[test] + fn test_formatter_formats_nested() { + let nested = MyError::Foo { bar: Bar(Baz) }; + + assert_eq!( + ErrFormatter(&nested).to_string().as_str(), + "this is foo: this is bar: this is baz" + ); + } +} diff --git a/node/src/utils/ds.rs b/node/src/utils/ds.rs index 06c9653582..146e772e6b 100644 --- a/node/src/utils/ds.rs +++ b/node/src/utils/ds.rs @@ -1,11 +1,10 @@ //! Datasize helper functions. -use std::{ - collections::{HashMap, HashSet}, - mem, -}; +use std::collections::HashMap; use datasize::DataSize; +use either::Either; +use once_cell::sync::OnceCell; use rand::{ rngs::StdRng, seq::{IteratorRandom, SliceRandom}, @@ -15,21 +14,6 @@ use rand::{ /// Number of items to sample when sampling a large collection. const SAMPLE_SIZE: usize = 50; -/// Estimate memory usage of a hashmap of keys and values each with no heap allocations. -pub fn hash_map_fixed_size(hashmap: &HashMap) -> usize { - hashmap.capacity() * (mem::size_of::() + mem::size_of::() + mem::size_of::()) -} - -/// Estimate memory usage of a hashset of items with no heap allocations. -pub fn hash_set_fixed_size(hashset: &HashSet) -> usize { - hashset.capacity() * (mem::size_of::() + mem::size_of::()) -} - -/// Estimate memory usage of a vec of items with no heap allocations. -pub fn vec_fixed_size(vec: &Vec) -> usize { - vec.capacity() * mem::size_of::() -} - /// Creates an RNG for sampling based on the length of a collection. fn sampling_rng(len: usize) -> StdRng { // We use a fixed seed RNG here and hope the size will provide enough entropy to avoid gross @@ -63,13 +47,13 @@ where if vec.len() < SAMPLE_SIZE { vec.estimate_heap_size() } else { - let base_size = vec.capacity() * mem::size_of::(); + let base_size = vec.capacity() * size_of::(); let mut rng = sampling_rng(vec.len()); let sampled = vec .as_slice() .choose_multiple(&mut rng, SAMPLE_SIZE) - .map(|v| v.estimate_heap_size()) + .map(DataSize::estimate_heap_size) .sum(); base_size + scale_sample(vec.len(), sampled) } @@ -81,11 +65,27 @@ where K: DataSize, V: DataSize, { + // Copied from + // https://github.com/CasperLabs/datasize-rs/blob/e04c3251eb5473651a0abf55c18869acaef635c1/datasize/src/std.rs#L201-L220 + fn estimate_hashbrown_rawtable(capacity: usize) -> usize { + let buckets = if capacity < 8 { + if capacity < 4 { + 4 + } else { + 8 + } + } else { + (capacity * 8 / 7).next_power_of_two() + }; + let size = size_of::(); + let ctrl_offset = size * buckets; + ctrl_offset + buckets + } + if map.len() < SAMPLE_SIZE { map.estimate_heap_size() } else { - let base_size = - map.capacity() * (mem::size_of::() + mem::size_of::() + mem::size_of::()); + let base_size = estimate_hashbrown_rawtable::<(K, V)>(map.capacity()); let mut rng = sampling_rng(map.len()); @@ -100,6 +100,25 @@ where } } +pub(crate) fn once_cell(cell: &OnceCell) -> usize +where + T: DataSize, +{ + cell.get().map_or(0, |value| value.estimate_heap_size()) +} + +pub(crate) fn maybe_either(either: &Option>) -> usize +where + T: DataSize, + U: DataSize, +{ + match either { + None => 0, + Some(Either::Left(left)) => left.estimate_heap_size(), + Some(Either::Right(right)) => right.estimate_heap_size(), + } +} + #[cfg(test)] #[allow(clippy::assertions_on_constants)] // used by sanity checks around `SAMPLE_SIZE` mod tests { diff --git a/node/src/utils/external.rs b/node/src/utils/external.rs index 5884db0a73..83e0798574 100644 --- a/node/src/utils/external.rs +++ b/node/src/utils/external.rs @@ -6,8 +6,13 @@ use std::{ fmt::{Debug, Display}, path::{Path, PathBuf}, + sync::Arc, }; +use casper_types::{ + crypto, + file_utils::{read_file, ReadFileError}, +}; use datasize::DataSize; #[cfg(test)] use once_cell::sync::Lazy; @@ -20,8 +25,7 @@ use thiserror::Error; use casper_types::SecretKey; -use super::{read_file, ReadFileError}; -use crate::{crypto, crypto::AsymmetricKeyExt, tls}; +use crate::tls::{self, LoadCertError, LoadSecretKeyError}; /// Path to bundled resources. #[cfg(test)] @@ -39,38 +43,25 @@ pub static RESOURCES_PATH: Lazy = /// An `External` also always provides a default, which will always result in an error when `load` /// is called. Should the underlying type `T` implement `Default`, the `with_default` can be /// used instead. -#[derive(Clone, DataSize, Eq, Debug, Deserialize, PartialEq, Serialize)] +#[derive(Clone, DataSize, Eq, Debug, Deserialize, PartialEq, Serialize, Default)] #[serde(untagged)] -pub enum External { +pub enum External { /// Value that should be loaded from an external path. Path(PathBuf), - /// Loaded or immediate value. - #[serde(skip)] - Loaded(T), /// The value has not been specified, but a default has been requested. #[serde(skip)] + #[default] Missing, } -impl External { - /// Creates an external from a value. - pub fn from_value(value: T) -> Self { - External::Loaded(value) - } - - /// Creates an external referencing a path. - pub fn from_path>(path: P) -> Self { - External::Path(path.as_ref().to_owned()) - } -} - -impl External -where - T: Loadable, -{ +impl External { /// Loads the value if not loaded already, resolving relative paths from `root` or returns /// available value. If the value is `Missing`, returns an error. - pub fn load>(self, root: P) -> Result> { + pub fn load(self, root: P) -> Result> + where + T: Loadable, + P: AsRef, + { match self { External::Path(path) => { let full_path = if path.is_relative() { @@ -87,35 +78,9 @@ where path: full_path.canonicalize().unwrap_or(full_path), }) } - External::Loaded(value) => Ok(value), External::Missing => Err(LoadError::Missing), } } - - /// Returns the full path to the external item, or `None` if the type is `Loaded` or `Missing`. - pub fn full_path>(&self, root: P) -> Option { - match self { - External::Path(path) => Some(if path.is_relative() { - root.as_ref().join(&path) - } else { - path.clone() - }), - _ => None, - } - } -} - -impl External -where - T: Loadable + Default, -{ - /// Insert a default value if missing. - pub fn with_default(self) -> Self { - match self { - External::Missing => External::Loaded(Default::default()), - _ => self, - } - } } /// A value that can be loaded from a file. @@ -139,12 +104,6 @@ pub trait Loadable: Sized { } } -impl Default for External { - fn default() -> Self { - External::Missing - } -} - fn display_res_path(result: &Result) -> String { result .as_ref() @@ -173,7 +132,16 @@ impl Loadable for X509 { type Error = anyhow::Error; fn from_path>(path: P) -> Result { - tls::load_cert(path) + let error = match tls::load_cert(path) { + Ok(cert) => return Ok(cert), + Err(LoadCertError::ReadFile(error)) => { + anyhow::Error::new(error).context("failed to load certificate") + } + Err(LoadCertError::X509CertFromPem(error)) => { + anyhow::Error::new(error).context("parsing certificate") + } + }; + Err(error) } } @@ -181,15 +149,25 @@ impl Loadable for PKey { type Error = anyhow::Error; fn from_path>(path: P) -> Result { - tls::load_private_key(path) + let error = match tls::load_secret_key(path) { + Ok(secret_key) => return Ok(secret_key), + Err(LoadSecretKeyError::ReadFile(error)) => { + anyhow::Error::new(error).context("failed to load private key") + } + Err(LoadSecretKeyError::PrivateKeyFromPem(error)) => { + anyhow::Error::new(error).context("parsing private key") + } + }; + + Err(error) } } -impl Loadable for SecretKey { - type Error = crypto::Error; +impl Loadable for Arc { + type Error = crypto::ErrorExt; fn from_path>(path: P) -> Result { - AsymmetricKeyExt::from_file(path) + Ok(Arc::new(SecretKey::from_file(path)?)) } } @@ -207,7 +185,7 @@ mod tests { #[test] fn test_to_string() { - let val: External<()> = External::Path("foo/bar.toml".into()); + let val: External = External::Path("foo/bar.toml".into()); assert_eq!( "\"foo/bar.toml\"", serde_json::to_string(&val).expect("serialization error") @@ -218,7 +196,7 @@ mod tests { fn test_load_from_string() { let input = "\"foo/bar.toml\""; - let val: External<()> = serde_json::from_str(input).expect("deserialization failed"); + let val: External = serde_json::from_str(input).expect("deserialization failed"); assert_eq!(External::Path("foo/bar.toml".into()), val); } diff --git a/node/src/utils/fmt_limit.rs b/node/src/utils/fmt_limit.rs new file mode 100644 index 0000000000..26286a2b6f --- /dev/null +++ b/node/src/utils/fmt_limit.rs @@ -0,0 +1,121 @@ +//! Wrappers to display a limited amount of data from collections using `fmt`. + +use std::fmt::{self, Debug, Formatter, Write}; + +/// A display wrapper showing a limited amount of a formatted rendering. +/// +/// Any characters exceeding the given length will be omitted and replaced by `...`. +pub(crate) struct FmtLimit<'a, T> { + limit: usize, + item: &'a T, +} + +impl<'a, T> FmtLimit<'a, T> { + /// Creates a new limited formatter. + #[inline] + pub(crate) fn new(limit: usize, item: &'a T) -> Self { + FmtLimit { limit, item } + } +} + +/// Helper that limits writing to a given `fmt::Writer`. +struct LimitWriter<'a, W> { + /// The wrapper writer. + inner: &'a mut W, + /// How many characters are left. + left: usize, + /// Whether or not the writer is "closed". + /// + /// Closing happens when an additional character is written after `left` has reached 0 and will + /// trigger the ellipses to be written out. + closed: bool, +} + +impl<'a, W> LimitWriter<'a, W> { + /// Constructs a new `LimitWriter`. + #[inline] + fn new(inner: &'a mut W, limit: usize) -> Self { + LimitWriter { + inner, + left: limit, + closed: false, + } + } +} + +impl Write for LimitWriter<'_, W> +where + W: Write, +{ + #[inline] + fn write_str(&mut self, s: &str) -> fmt::Result { + if self.closed { + return Ok(()); + } + + if self.left == 0 { + self.closed = true; + self.inner.write_str("...")?; + return Ok(()); + } + + // A tad bit slow, but required for correct unicode output. + for c in s.chars().take(self.left) { + self.write_char(c)?; + } + + Ok(()) + } + + #[inline] + fn write_char(&mut self, c: char) -> fmt::Result { + if self.closed { + return Ok(()); + } + + if self.left == 0 { + self.closed = true; + self.inner.write_str("...")?; + return Ok(()); + } + + self.left -= 1; + self.inner.write_char(c) + } +} + +impl Debug for FmtLimit<'_, T> +where + T: Debug, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut limit_writer = LimitWriter::new(f, self.limit); + write!(&mut limit_writer, "{:?}", self.item) + } +} + +// Note: If required, a `Display` implementation can be added easily for `FmtLimit`. + +#[cfg(test)] +mod tests { + use crate::utils::fmt_limit::FmtLimit; + + #[test] + fn limit_debug_works() { + let collection: Vec<_> = (0..5).collect(); + + // Sanity check. + assert_eq!(format!("{:?}", collection), "[0, 1, 2, 3, 4]"); + + assert_eq!(format!("{:?}", FmtLimit::new(3, &collection)), "[0,..."); + assert_eq!(format!("{:?}", FmtLimit::new(0, &collection)), "..."); + assert_eq!( + format!("{:?}", FmtLimit::new(1000, &collection)), + "[0, 1, 2, 3, 4]" + ); + assert_eq!( + format!("{:?}", FmtLimit::new(15, &collection)), + "[0, 1, 2, 3, 4]" + ); + } +} diff --git a/node/src/utils/milliseconds.rs b/node/src/utils/milliseconds.rs deleted file mode 100644 index 9d8d8f95df..0000000000 --- a/node/src/utils/milliseconds.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! Load and store milliseconds directly as `Duration` using serde. - -use serde::{ser::Error, Deserialize, Deserializer, Serializer}; -use std::{convert::TryFrom, time::Duration}; - -/// Serializes a `Duration` as milliseconds. -/// -/// Limited to 64 bit. -pub fn serialize(value: &Duration, serializer: S) -> Result -where - S: Serializer, -{ - let ms = u64::try_from(value.as_millis()).map_err(|_err| { - S::Error::custom(format!( - "duration {:?} is too large to be convert down to 64-bit milliseconds", - value - )) - })?; - serializer.serialize_u64(ms) -} - -/// Deserializes a `Duration` as 64-bit milliseconds. -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - let ms = u64::deserialize(deserializer)?; - - Ok(Duration::from_millis(ms)) -} - -#[cfg(test)] -mod tests { - use serde::{Deserialize, Serialize}; - use std::time::Duration; - - #[test] - fn round_trip() { - #[derive(Debug, Deserialize, PartialEq, Serialize)] - struct Example(#[serde(with = "super")] Duration); - - let value = Example(Duration::from_millis(12345)); - - let json = serde_json::to_string(&value).expect("serialization failed"); - assert_eq!(json, "12345"); - let deserialized = serde_json::from_str(&json).expect("deserialization failed"); - - assert_eq!(value, deserialized); - } -} diff --git a/node/src/utils/opt_display.rs b/node/src/utils/opt_display.rs new file mode 100644 index 0000000000..906a1031a9 --- /dev/null +++ b/node/src/utils/opt_display.rs @@ -0,0 +1,72 @@ +//! `Display` wrapper for optional values. +//! +//! Allows displaying an `Option`, where `T` already implements `Display`. + +use std::fmt::{Display, Formatter, Result}; + +use serde::Serialize; + +/// Wrapper around `Option` that implements `Display`. +/// +/// For convenience, it also includes a `Serialize` implementation that works identical to the +/// underlying `Option` serialization. +pub struct OptDisplay<'a, T> { + /// The actual `Option` being displayed. + inner: Option, + /// Value to substitute if `inner` is `None`. + empty_display: &'a str, +} + +impl Serialize for OptDisplay<'_, T> +where + T: Serialize, +{ + fn serialize(&self, serializer: S) -> core::result::Result + where + S: serde::Serializer, + { + self.inner.serialize(serializer) + } +} + +impl<'a, T: Display> OptDisplay<'a, T> { + /// Creates a new `OptDisplay`. + #[inline] + pub fn new(maybe_display: Option, empty_display: &'a str) -> Self { + Self { + inner: maybe_display, + empty_display, + } + } +} + +impl Display for OptDisplay<'_, T> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + match self.inner { + None => f.write_str(self.empty_display), + Some(ref val) => val.fmt(f), + } + } +} + +#[cfg(test)] +mod tests { + use super::OptDisplay; + + #[test] + fn opt_display_works() { + let some_value: Option = Some(12345); + + assert_eq!( + OptDisplay::new(some_value.as_ref(), "does not matter").to_string(), + "12345" + ); + + let none_value: Option = None; + assert_eq!( + OptDisplay::new(none_value.as_ref(), "should be none").to_string(), + "should be none" + ); + } +} diff --git a/node/src/utils/pid_file.rs b/node/src/utils/pid_file.rs deleted file mode 100644 index 11d09de920..0000000000 --- a/node/src/utils/pid_file.rs +++ /dev/null @@ -1,245 +0,0 @@ -//! PidFile utility type. -//! -//! PidFiles are used to gate access to a resource, as well as detect unclean shutdowns. - -use std::{ - fs::{self, File}, - io::{self, Read, Seek, SeekFrom, Write}, - num::ParseIntError, - path::{Path, PathBuf}, - process, -}; - -use fs2::FileExt; -use thiserror::Error; -use tracing::warn; - -/// A PID (process ID) file. -/// -/// Records the current process' PID, removes it on exit. Can be used to determine whether or not -/// an application was shut down cleanly. -/// -/// The pidfile is held open with an exclusive but advisory lock. -#[derive(Debug)] -pub struct PidFile { - /// The pidfile. - /// - /// The file will be locked for the lifetime of `PidFile`. - pidfile: File, - /// The pidfile location. - path: PathBuf, - /// Previous pidfile contents. - previous: Option, -} - -/// An error acquiring a pidfile. -#[derive(Debug, Error)] -pub enum PidFileError { - /// The pidfile could not be opened at all. - #[error("could not open pidfile: {0}")] - CouldNotOpen(#[source] io::Error), - /// The pidfile could not be locked. - #[error("could not lock pidfile: {0}")] - LockFailed(#[source] io::Error), - /// Error reading pidfile contents. - #[error("reading existing pidfile failed: {0}")] - ReadFailed(#[source] io::Error), - /// Error writing pidfile contents. - #[error("updating pidfile failed: {0}")] - WriteFailed(#[source] io::Error), - /// The pidfile was corrupted, its contents could not be read. - #[error("corrupt pidfile")] - Corrupted(ParseIntError), -} - -/// PidFile outcome. -/// -/// High-level description of the outcome of opening and locking the PIDfile. -#[must_use] -#[derive(Debug)] -pub enum PidFileOutcome { - /// Another instance of the node is likely running, or an attempt was made to reuse a pidfile. - /// - /// **Recommendation**: Exit to avoid resource conflicts. - AnotherNodeRunning(PidFileError), - /// The node crashed previously and could potentially have been corrupted. - /// - /// **Recommendation**: Run an integrity check, then potentially continue with initialization. - /// **Store the `PidFile`**. - Crashed(PidFile), - /// Clean start, pidfile lock acquired. - /// - /// **Recommendation**: Continue with initialization, but **store the `PidFile`**. - Clean(PidFile), - /// There was an error managing the pidfile, not sure if we have crashed or not. - /// - /// **Recommendation**: Exit, as it will not be possible to determine a crash at the next - /// start. - PidFileError(PidFileError), -} - -impl PidFile { - /// Acquire a `PidFile` and give an actionable outcome. - /// - /// **Important**: This function should be called **before** opening whatever resources it is - /// protecting. - pub fn acquire>(path: P) -> PidFileOutcome { - match PidFile::new(path) { - Ok(pidfile) => { - if pidfile.unclean_shutdown() { - PidFileOutcome::Crashed(pidfile) - } else { - PidFileOutcome::Clean(pidfile) - } - } - Err(err @ PidFileError::LockFailed(_)) => PidFileOutcome::AnotherNodeRunning(err), - Err(err) => PidFileOutcome::PidFileError(err), - } - } - - /// Creates a new pidfile. - /// - /// The error-behavior of this function is important and can be used to distinguish between - /// different conditions described in [`PidFileError`]. If the `PidFile` is instantiated before - /// the resource it is supposed to protect, the following actions are recommended: - fn new>(path: P) -> Result { - // First we try to open the pidfile, without disturbing it. - let mut pidfile = fs::OpenOptions::new() - .truncate(false) - .create(true) - .read(true) - .write(true) - .open(path.as_ref()) - .map_err(PidFileError::CouldNotOpen)?; - - // Now try to acquire an exclusive lock. This will fail if another process or another - // instance of `PidFile` is holding a lock onto the same pidfile. - pidfile - .try_lock_exclusive() - .map_err(PidFileError::LockFailed)?; - - // At this point, we're the exclusive users of the file and can read its contents. - let mut raw_contents = String::new(); - pidfile - .read_to_string(&mut raw_contents) - .map_err(PidFileError::ReadFailed)?; - - // Note: We cannot distinguish an empty file from a non-existing file, unfortunately. - let previous = if raw_contents.is_empty() { - None - } else { - Some(raw_contents.parse().map_err(PidFileError::Corrupted)?) - }; - - let pid = process::id(); - - // Truncate and rewind. - pidfile.set_len(0).map_err(PidFileError::WriteFailed)?; - pidfile - .seek(SeekFrom::Start(0)) - .map_err(PidFileError::WriteFailed)?; - - // Do our best to ensure that we always have some contents in the file immediately. - pidfile - .write_all(pid.to_string().as_bytes()) - .map_err(PidFileError::WriteFailed)?; - - pidfile.flush().map_err(PidFileError::WriteFailed)?; - - Ok(PidFile { - pidfile, - path: path.as_ref().to_owned(), - previous, - }) - } - - /// Whether or not the PidFile indicated a previously unclean shutdown. - fn unclean_shutdown(&self) -> bool { - // If there are any previous contents, we crashed. We check for our own PID already before. - self.previous.is_some() - } -} - -impl Drop for PidFile { - fn drop(&mut self) { - // When dropping the pidfile, we delete its file. We are still keeping the logs and the - // opened file handle, which will get cleaned up naturally. - if let Err(err) = fs::remove_file(&self.path) { - warn!(path=%self.path.display(), %err, "could not delete pidfile"); - } - } -} - -#[cfg(test)] -mod tests { - use std::fs; - - use tempfile::TempDir; - - use super::{PidFile, PidFileOutcome}; - - #[test] - fn pidfile_creates_file_and_cleans_it_up() { - let tmp_dir = TempDir::new().expect("could not create tmp_dir"); - let pidfile_path = tmp_dir.path().join("create_and_cleanup.pid"); - - let outcome = PidFile::acquire(&pidfile_path); - - match outcome { - PidFileOutcome::Clean(pidfile) => { - // Check the pidfile exists, then verify it gets removed after dropping the pidfile. - assert!(pidfile_path.exists()); - drop(pidfile); - assert!(!pidfile_path.exists()); - } - other => panic!("pidfile outcome not clean, but {:?}", other), - } - } - - #[test] - fn detects_unclean_shutdown() { - let tmp_dir = TempDir::new().expect("could not create tmp_dir"); - let pidfile_path = tmp_dir.path().join("create_and_cleanup.pid"); - - // We create a garbage pidfile to simulate an unclean shutdown. - fs::write(&pidfile_path, b"12345").expect("could not write garbage pid file"); - - let outcome = PidFile::acquire(&pidfile_path); - - match outcome { - PidFileOutcome::Crashed(pidfile) => { - // Now check if the written pid matches our PID. - assert_eq!(pidfile.previous, Some(12345)); - - // After we've crashed, we still expect cleanup. - assert!(pidfile_path.exists()); - drop(pidfile); - assert!(!pidfile_path.exists()); - } - other => panic!("pidfile outcome did not detect crash, is {:?}", other), - } - } - - #[test] - fn blocks_second_instance() { - let tmp_dir = TempDir::new().expect("could not create tmp_dir"); - let pidfile_path = tmp_dir.path().join("create_and_cleanup.pid"); - - let outcome = PidFile::acquire(&pidfile_path); - - match outcome { - PidFileOutcome::Clean(_pidfile) => { - match PidFile::acquire(&pidfile_path) { - PidFileOutcome::AnotherNodeRunning(_) => { - // All good, this is what we expected. - } - other => panic!( - "expected detection of duplicate pidfile access, instead got: {:?}", - other - ), - } - } - other => panic!("pidfile outcome not clean, but {:?}", other), - } - } -} diff --git a/node/src/utils/rlimit.rs b/node/src/utils/rlimit.rs index 12fde3ed0b..fe76f2ddcb 100644 --- a/node/src/utils/rlimit.rs +++ b/node/src/utils/rlimit.rs @@ -17,10 +17,16 @@ use fmt::Formatter; /// A scalar limit. pub type Limit = libc::rlim_t; +#[cfg(target_os = "linux")] +pub type LimitResourceId = libc::__rlimit_resource_t; + +#[cfg(target_os = "macos")] +pub type LimitResourceId = libc::c_int; + /// A kind of limit that can be set/retrieved. pub trait LimitKind { /// The `resource` id use for libc calls. - const LIBC_RESOURCE: libc::__rlimit_resource_t; + const LIBC_RESOURCE: LimitResourceId; } /// Maximum number of open files (`ulimit -n`). @@ -28,13 +34,9 @@ pub trait LimitKind { pub struct OpenFiles; impl LimitKind for OpenFiles { - const LIBC_RESOURCE: libc::__rlimit_resource_t = libc::RLIMIT_NOFILE; + const LIBC_RESOURCE: LimitResourceId = libc::RLIMIT_NOFILE; } -/// Infinite resource, i.e. no limit. -#[allow(dead_code)] -const INFINITE: Limit = libc::RLIM_INFINITY; - /// Wrapper around libc resource limit type. #[derive(Copy, Clone)] pub struct ResourceLimit { diff --git a/node/src/utils/round_robin.rs b/node/src/utils/round_robin.rs index dedf0b00cb..8b85b924c1 100644 --- a/node/src/utils/round_robin.rs +++ b/node/src/utils/round_robin.rs @@ -5,18 +5,17 @@ //! synchronization primitives under the hood. use std::{ - collections::{HashMap, VecDeque}, - fmt::Debug, - fs::File, + collections::{BTreeMap, HashMap, VecDeque}, + fmt::{Debug, Display}, hash::Hash, - io::{self, BufWriter, Write}, num::NonZeroUsize, - sync::atomic::{AtomicUsize, Ordering}, + sync::atomic::{AtomicBool, AtomicUsize, Ordering}, }; use enum_iterator::IntoEnumIterator; -use serde::{ser::SerializeMap, Serialize, Serializer}; -use tokio::sync::{Mutex, Semaphore}; +use serde::Serialize; +use tokio::sync::{Mutex, MutexGuard, Semaphore}; +use tracing::{debug, warn}; /// Weighted round-robin scheduler. /// @@ -41,11 +40,21 @@ pub struct WeightedRoundRobin { /// Number of items in all queues combined. total: Semaphore, + + /// Whether or not the queue is sealed (not accepting any more items). + sealed: AtomicBool, + + /// Dump count of events only when there is a 10%+ increase of events compared to the previous + /// report. Setting to `None` disables the dump function. + recent_event_count_peak: Option, } /// State that wraps queue and its event count. #[derive(Debug)] struct QueueState { + /// A queue's event counter. + /// + /// Do not modify this unless you are holding the `queue` lock. event_count: AtomicUsize, queue: Mutex>, } @@ -58,6 +67,15 @@ impl QueueState { } } + /// Remove all events from a queue. + #[cfg(test)] + async fn drain(&self) -> Vec { + let mut guard = self.queue.lock().await; + let events: Vec = guard.drain(..).collect(); + self.event_count.fetch_sub(events.len(), Ordering::SeqCst); + events + } + #[inline] async fn push_back(&self, element: I) { self.queue.lock().await.push_back(element); @@ -100,93 +118,32 @@ struct Slot { tickets: usize, } -impl WeightedRoundRobin +#[derive(Debug, Serialize)] +/// A dump of the internal queues. +pub struct QueueDump<'a, K, I> where - I: Serialize, - K: Copy + Clone + Eq + Hash + IntoEnumIterator + Serialize, + K: Ord + Eq, { - /// Create a snapshot of the queue by locking it and serializing it. + /// Queues being dumped. /// - /// The serialized events are streamed directly into `serializer`. - /// - /// # Warning - /// - /// This function locks all queues in the order defined by the order defined by - /// `IntoEnumIterator`. Calling it multiple times in parallel is safe, but other code that locks - /// more than one queue at the same time needs to be aware of this. - pub async fn snapshot(&self, serializer: S) -> Result<(), S::Error> { - // Lock all queues in order get a snapshot, but release eagerly. This way we are guaranteed - // to have a consistent result, but we also allow for queues to be used again earlier. - let mut locks = Vec::new(); - - for kind in K::into_enum_iter() { - let queue_guard = self - .queues - .get(&kind) - .expect("missing queue while snapshotting") - .queue - .lock() - .await; - - locks.push((kind, queue_guard)); - } - - let mut map = serializer.serialize_map(Some(locks.len()))?; - - // By iterating over the guards, they are dropped in order. - for (kind, guard) in locks { - let vd = &*guard; - map.serialize_key(&kind)?; - map.serialize_value(vd)?; - } - map.end()?; - - Ok(()) - } + /// A `BTreeMap` is used to make the ordering constant, it will be in the natural order defined + /// by `Ord` on `K`. + queues: BTreeMap>, } impl WeightedRoundRobin where I: Debug, K: Copy + Clone + Eq + Hash + IntoEnumIterator + Debug, -{ - /// Dump the contents of the queues (`Debug` representation) to a given file. - pub async fn debug_dump(&self, file: &mut File) -> Result<(), io::Error> { - let mut locks = Vec::new(); - for kind in K::into_enum_iter() { - let queue_guard = self - .queues - .get(&kind) - .expect("missing queue while dumping") - .queue - .lock() - .await; - - locks.push((kind, queue_guard)); - } - - let mut writer = BufWriter::new(file); - for (kind, guard) in locks { - let queue = &*guard; - writer.write_all(format!("Queue: {:?} ({}) [\n", kind, queue.len()).as_bytes())?; - for event in queue.iter() { - writer.write_all(format!("\t{:?}\n", event).as_bytes())?; - } - writer.write_all(b"]\n")?; - } - writer.flush() - } -} - -impl WeightedRoundRobin -where - K: Copy + Clone + Eq + Hash, { /// Creates a new weighted round-robin scheduler. /// /// Creates a queue for each pair given in `weights`. The second component of each `weight` is /// the number of times to return items from one queue before moving on to the next one. - pub(crate) fn new(weights: Vec<(K, NonZeroUsize)>) -> Self { + pub(crate) fn new( + weights: Vec<(K, NonZeroUsize)>, + initial_event_count_threshold: Option, + ) -> Self { assert!(!weights.is_empty(), "must provide at least one slot"); let queues = weights @@ -210,21 +167,88 @@ where slots, queues, total: Semaphore::new(0), + sealed: AtomicBool::new(false), + recent_event_count_peak: initial_event_count_threshold.map(AtomicUsize::new), + } + } + + /// Dump the queue contents to the given dumper function. + pub async fn dump)>(&self, dumper: F) + where + K: Ord, + { + let locks = self.lock_queues().await; + let mut queues = BTreeMap::new(); + for (kind, guard) in &locks { + let queue = &**guard; + queues.insert(*kind, queue); + } + + let queue_dump = QueueDump { queues }; + dumper(&queue_dump); + } + + /// Lock all queues in a well-defined order to avoid deadlocks conditions. + async fn lock_queues(&self) -> Vec<(K, MutexGuard<'_, VecDeque>)> { + let mut locks = Vec::new(); + for kind in K::into_enum_iter() { + let queue_guard = self + .queues + .get(&kind) + .expect("missing queue while locking") + .queue + .lock() + .await; + + locks.push((kind, queue_guard)); } + + locks } +} +fn should_dump_queues(total: usize, recent_threshold: usize) -> bool { + total > ((recent_threshold * 11) / 10) +} + +impl WeightedRoundRobin +where + K: Copy + Clone + Eq + Hash + Display, +{ /// Pushes an item to a queue identified by key. /// /// ## Panics /// /// Panics if the queue identified by key `queue` does not exist. pub(crate) async fn push(&self, item: I, queue: K) { + if self.sealed.load(Ordering::SeqCst) { + debug!("queue sealed, dropping item"); + return; + } + self.queues .get(&queue) .expect("tried to push to non-existent queue") .push_back(item) .await; + // NOTE: Count may be off by one b/c of the way locking works when elements are popped. + // It's fine for its purposes. + if let Some(recent_event_count_peak) = &self.recent_event_count_peak { + let total = self.queues.iter().map(|q| q.1.event_count()).sum::(); + let recent_threshold = recent_event_count_peak.load(Ordering::SeqCst); + if should_dump_queues(total, recent_threshold) { + recent_event_count_peak.store(total, Ordering::SeqCst); + let info: Vec<_> = self + .queues + .iter() + .map(|q| (q.0.to_string(), q.1.event_count())) + .filter(|(_, count)| count > &0) + .collect(); + warn!("Current event queue size ({total}) is above the threshold ({recent_threshold}): details {info:?}"); + } + } + // We increase the item count after we've put the item into the queue. self.total.add_permits(1); } @@ -268,6 +292,46 @@ where } } + /// Drains all events from a specific queue. + #[cfg(test)] + pub(crate) async fn drain_queue(&self, queue: K) -> Vec { + let events = self + .queues + .get(&queue) + .expect("queue to be drained disappeared") + .drain() + .await; + + // TODO: This is racy if someone is calling `pop` at the same time. + self.total + .acquire_many(events.len() as u32) + .await + .expect("could not acquire tickets during drain") + .forget(); + + events + } + + /// Drains all events from all queues. + #[cfg(test)] + pub async fn drain_queues(&self) -> Vec { + let mut events = Vec::new(); + let keys: Vec = self.queues.keys().cloned().collect(); + + for kind in keys { + events.extend(self.drain_queue(kind).await); + } + events + } + + /// Seals the queue, preventing it from accepting any more items. + /// + /// Items pushed into the queue via `push` will be dropped immediately. + #[cfg(test)] + pub fn seal(&self) { + self.sealed.store(true, Ordering::SeqCst); + } + /// Returns the number of events currently in the queue. #[cfg(test)] pub(crate) fn item_count(&self) -> usize { @@ -292,7 +356,7 @@ mod tests { use super::*; #[repr(usize)] - #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] + #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, IntoEnumIterator)] enum QueueKind { One = 1, Two, @@ -307,9 +371,18 @@ mod tests { } } + impl Display for QueueKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + QueueKind::One => write!(f, "One"), + QueueKind::Two => write!(f, "Two"), + } + } + } + #[tokio::test] async fn should_respect_weighting() { - let scheduler = WeightedRoundRobin::::new(weights()); + let scheduler = WeightedRoundRobin::::new(weights(), None); // Push three items on to each queue let future1 = scheduler .push('a', QueueKind::One) @@ -329,4 +402,60 @@ mod tests { assert_eq!(('f', QueueKind::Two), scheduler.pop().await); assert_eq!(('c', QueueKind::One), scheduler.pop().await); } + + #[tokio::test] + async fn can_seal_queue() { + let scheduler = WeightedRoundRobin::::new(weights(), None); + + assert_eq!(scheduler.item_count(), 0); + scheduler.push('a', QueueKind::One).await; + assert_eq!(scheduler.item_count(), 1); + scheduler.push('b', QueueKind::Two).await; + assert_eq!(scheduler.item_count(), 2); + + scheduler.seal(); + assert_eq!(scheduler.item_count(), 2); + scheduler.push('c', QueueKind::One).await; + assert_eq!(scheduler.item_count(), 2); + scheduler.push('d', QueueKind::One).await; + assert_eq!(scheduler.item_count(), 2); + + assert_eq!(('a', QueueKind::One), scheduler.pop().await); + assert_eq!(scheduler.item_count(), 1); + assert_eq!(('b', QueueKind::Two), scheduler.pop().await); + assert_eq!(scheduler.item_count(), 0); + assert!(scheduler.drain_queues().await.is_empty()); + } + + #[test] + fn should_calculate_dump_threshold() { + let total = 0; + let recent_threshold = 100; + assert!(!should_dump_queues(total, recent_threshold)); + + let total = 100; + let recent_threshold = 100; + assert!(!should_dump_queues(total, recent_threshold)); + + let total = 109; + let recent_threshold = 100; + assert!(!should_dump_queues(total, recent_threshold)); + + let total = 110; + let recent_threshold = 100; + assert!(!should_dump_queues(total, recent_threshold)); + + // Dump only if there is 10%+ increase in event count + let total = 111; + let recent_threshold = 100; + assert!(should_dump_queues(total, recent_threshold)); + + let total = 112; + let recent_threshold = 100; + assert!(should_dump_queues(total, recent_threshold)); + + let total = 1_000_000; + let recent_threshold = 100; + assert!(should_dump_queues(total, recent_threshold)); + } } diff --git a/node/src/utils/specimen.rs b/node/src/utils/specimen.rs new file mode 100644 index 0000000000..43e5cea633 --- /dev/null +++ b/node/src/utils/specimen.rs @@ -0,0 +1,1271 @@ +//! Specimen support. +//! +//! Structs implementing the specimen trait allow for specific sample instances being created, such +//! as the biggest possible. + +use std::{ + any::{Any, TypeId}, + collections::{BTreeMap, BTreeSet, HashMap}, + convert::{TryFrom, TryInto}, + iter::FromIterator, + net::{Ipv6Addr, SocketAddr, SocketAddrV6}, + sync::Arc, +}; + +use either::Either; +use once_cell::sync::OnceCell; +use serde::Serialize; +use strum::{EnumIter, IntoEnumIterator}; + +use casper_types::{ + account::AccountHash, + bytesrepr::Bytes, + crypto::{sign, PublicKey, Signature}, + AccessRights, Approval, ApprovalsHash, AsymmetricType, Block, BlockHash, BlockHeader, + BlockHeaderV1, BlockHeaderV2, BlockHeaderWithSignatures, BlockSignatures, BlockSignaturesV2, + BlockV2, ChainNameDigest, ChunkWithProof, Deploy, DeployHash, DeployId, Digest, EraEndV1, + EraEndV2, EraId, EraReport, ExecutableDeployItem, FinalitySignature, FinalitySignatureId, + FinalitySignatureV2, PackageHash, ProtocolVersion, RewardedSignatures, RuntimeArgs, SecretKey, + SemVer, SingleBlockRewardedSignatures, TimeDiff, Timestamp, Transaction, TransactionHash, + TransactionId, TransactionRuntimeParams, TransactionV1, TransactionV1Hash, URef, + AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, KEY_HASH_LENGTH, MINT_LANE_ID, U512, +}; + +use crate::{ + components::{ + consensus::{max_rounds_per_era, utils::ValidatorMap}, + fetcher::Tag, + }, + protocol::Message, + types::{ + transaction::transaction_v1_builder::TransactionV1Builder, BlockExecutionResultsOrChunk, + BlockPayload, FinalizedBlock, InternalEraReport, LegacyDeploy, SyncLeap, TrieOrChunk, + }, +}; +use casper_storage::block_store::types::ApprovalsHashes; + +/// The largest valid unicode codepoint that can be encoded to UTF-8. +pub(crate) const HIGHEST_UNICODE_CODEPOINT: char = '\u{10FFFF}'; +const LARGE_WASM_LANE_ID: u8 = 3; + +/// A cache used for memoization, typically on a single estimator. +#[derive(Debug, Default)] +pub(crate) struct Cache { + /// A map of items that have been hashed. Indexed by type. + items: HashMap>>, +} + +impl Cache { + /// Retrieves a potentially memoized instance. + pub(crate) fn get(&mut self) -> Option<&T> { + self.get_all::() + .first() + .map(|box_any| box_any.downcast_ref::().expect("cache corrupted")) + } + + /// Sets the memoized instance if not already set. + /// + /// Returns a reference to the memoized instance. Note that this may be an instance other than + /// the passed in `item`, if the cache entry was not empty before/ + pub(crate) fn set(&mut self, item: T) -> &T { + let items = self.get_all::(); + if items.is_empty() { + let boxed_item: Box = Box::new(item); + items.push(boxed_item); + } + self.get::().expect("should not be empty") + } + + /// Get or insert the vector storing item instances. + fn get_all(&mut self) -> &mut Vec> { + self.items.entry(TypeId::of::()).or_default() + } +} + +/// Given a specific type instance, estimates its serialized size. +pub(crate) trait SizeEstimator { + /// Estimate the serialized size of a value. + fn estimate(&self, val: &T) -> usize; + + /// Requires a parameter. + /// + /// Parameters indicate potential specimens which values to expect, e.g. a maximum number of + /// items configured for a specific collection. + /// + /// ## Panics + /// + /// - If the named parameter is not set, panics. + /// - If `T` is of an invalid type. + fn parameter>(&self, name: &'static str) -> T; + + /// Require a parameter, cast into a boolean. + /// + /// See [`parameter`] for details. Will return `false` if the stored value is `0`, + /// otherwise `true`. + /// + /// This method exists because `bool` does not implement `TryFrom`. + /// + /// ## Panics + /// + /// Same as [`parameter`]. + fn parameter_bool(&self, name: &'static str) -> bool { + self.parameter::(name) != 0 + } +} + +/// Supports returning a maximum size specimen. +/// +/// "Maximum size" refers to the instance that uses the highest amount of memory and is also most +/// likely to have the largest representation when serialized. +pub(crate) trait LargestSpecimen: Sized { + /// Returns the largest possible specimen for this type. + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self; +} + +/// Supports generating a unique sequence of specimen that are as large as possible. +pub(crate) trait LargeUniqueSequence +where + Self: Sized + Ord, + E: SizeEstimator, +{ + /// Create a new sequence of the largest possible unique specimens. + /// + /// Note that multiple calls to this function will return overlapping sequences. + // Note: This functions returns a materialized sequence instead of a generator to avoid + // complications with borrowing `E`. + fn large_unique_sequence(estimator: &E, count: usize, cache: &mut Cache) -> BTreeSet; +} + +/// Produces the largest variant of a specific `enum` using an estimator and a generation function. +pub(crate) fn largest_variant(estimator: &E, generator: F) -> T +where + T: Serialize, + D: IntoEnumIterator, + E: SizeEstimator, + F: FnMut(D) -> T, +{ + D::iter() + .map(generator) + .max_by_key(|candidate| estimator.estimate(candidate)) + .expect("should have at least one candidate") +} + +/// Generates a vec of a given size filled with the largest specimen. +pub(crate) fn vec_of_largest_specimen( + estimator: &E, + count: usize, + cache: &mut Cache, +) -> Vec { + let mut vec = Vec::new(); + for _ in 0..count { + vec.push(LargestSpecimen::largest_specimen(estimator, cache)); + } + vec +} + +/// Generates a vec of the largest specimen, with a size from a property. +pub(crate) fn vec_prop_specimen( + estimator: &E, + parameter_name: &'static str, + cache: &mut Cache, +) -> Vec { + let mut count = estimator.parameter(parameter_name); + if count < 0 { + count = 0; + } + + vec_of_largest_specimen(estimator, count as usize, cache) +} + +/// Generates a `BTreeMap` with the size taken from a property. +/// +/// Keys are generated uniquely using `LargeUniqueSequence`, while values will be largest specimen. +pub(crate) fn btree_map_distinct_from_prop( + estimator: &E, + parameter_name: &'static str, + cache: &mut Cache, +) -> BTreeMap +where + V: LargestSpecimen, + K: Ord + LargeUniqueSequence + Sized, + E: SizeEstimator, +{ + let mut count = estimator.parameter(parameter_name); + if count < 0 { + count = 0; + } + + K::large_unique_sequence(estimator, count as usize, cache) + .into_iter() + .map(|key| (key, LargestSpecimen::largest_specimen(estimator, cache))) + .collect() +} + +/// Generates a `BTreeSet` with the size taken from a property. +/// +/// Value are generated uniquely using `LargeUniqueSequence`. +pub(crate) fn btree_set_distinct_from_prop( + estimator: &E, + parameter_name: &'static str, + cache: &mut Cache, +) -> BTreeSet +where + T: Ord + LargeUniqueSequence + Sized, + E: SizeEstimator, +{ + let mut count = estimator.parameter(parameter_name); + if count < 0 { + count = 0; + } + + T::large_unique_sequence(estimator, count as usize, cache) +} + +/// Generates a `BTreeSet` with a given amount of items. +/// +/// Value are generated uniquely using `LargeUniqueSequence`. +pub(crate) fn btree_set_distinct( + estimator: &E, + count: usize, + cache: &mut Cache, +) -> BTreeSet +where + T: Ord + LargeUniqueSequence + Sized, + E: SizeEstimator, +{ + T::large_unique_sequence(estimator, count, cache) +} + +impl LargestSpecimen for SocketAddr { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SocketAddr::V6(SocketAddrV6::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for SocketAddrV6 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SocketAddrV6::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for Ipv6Addr { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + // Leading zeros get shorted, ensure there are none in the address. + Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + ) + } +} + +impl LargestSpecimen for bool { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + true + } +} + +impl LargestSpecimen for u8 { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + u8::MAX + } +} + +impl LargestSpecimen for u16 { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + u16::MAX + } +} + +impl LargestSpecimen for u32 { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + u32::MAX + } +} + +impl LargestSpecimen for u64 { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + u64::MAX + } +} + +impl LargestSpecimen for u128 { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + u128::MAX + } +} + +impl LargestSpecimen for [T; N] { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + [LargestSpecimen::largest_specimen(estimator, cache); N] + } +} + +impl LargestSpecimen for Option +where + T: LargestSpecimen, +{ + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Some(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for Box +where + T: LargestSpecimen, +{ + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Box::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for Arc +where + T: LargestSpecimen, +{ + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Arc::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for (T1, T2) +where + T1: LargestSpecimen, + T2: LargestSpecimen, +{ + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + ( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for (T1, T2, T3) +where + T1: LargestSpecimen, + T2: LargestSpecimen, + T3: LargestSpecimen, +{ + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + ( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +// Various third party crates. + +impl LargestSpecimen for Either +where + L: LargestSpecimen + Serialize, + R: LargestSpecimen + Serialize, +{ + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let l = L::largest_specimen(estimator, cache); + let r = R::largest_specimen(estimator, cache); + + if estimator.estimate(&l) >= estimator.estimate(&r) { + Either::Left(l) + } else { + Either::Right(r) + } + } +} + +// impls for `casper_types`, which is technically a foreign crate -- so we put them here. +impl LargestSpecimen for ProtocolVersion { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + ProtocolVersion::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for URef { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + URef::new( + [LargestSpecimen::largest_specimen(estimator, cache); 32], + AccessRights::READ_ADD_WRITE, + ) + } +} + +impl LargestSpecimen for AccountHash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + AccountHash::new([LargestSpecimen::largest_specimen(estimator, cache); 32]) + } +} + +impl LargestSpecimen for SemVer { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + SemVer { + major: LargestSpecimen::largest_specimen(estimator, cache), + minor: LargestSpecimen::largest_specimen(estimator, cache), + patch: LargestSpecimen::largest_specimen(estimator, cache), + } + } +} + +impl LargestSpecimen for PublicKey { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + PublicKey::large_unique_sequence(estimator, 1, cache) + .into_iter() + .next() + .unwrap() + } +} + +// Dummy implementation to replace the buggy real one below: +impl LargeUniqueSequence for PublicKey +where + E: SizeEstimator, +{ + fn large_unique_sequence(estimator: &E, count: usize, cache: &mut Cache) -> BTreeSet { + let data_vec = cache.get_all::(); + + /// Generates a secret key from a fixed, numbered seed. + fn generate_key(estimator: &E, seed: usize) -> PublicKey { + // Like `Signature`, we do not wish to pollute the types crate here. + #[derive(Copy, Clone, Debug, EnumIter)] + enum PublicKeyDiscriminants { + System, + Ed25519, + Secp256k1, + } + largest_variant::(estimator, |variant| { + // We take advantage of two things here: + // + // 1. The required seed bytes for Ed25519 and Secp256k1 are both the same length of + // 32 bytes. + // 2. While Secp256k1 does not allow the most trivial seed bytes of 0x00..0001, a a + // hash function output seems to satisfy it, and our current hashing scheme also + // output 32 bytes. + let seed_bytes = Digest::hash(seed.to_be_bytes()).value(); + + match variant { + PublicKeyDiscriminants::System => PublicKey::system(), + PublicKeyDiscriminants::Ed25519 => { + let ed25519_sec = SecretKey::ed25519_from_bytes(seed_bytes) + .expect("unable to create ed25519 key from seed bytes"); + PublicKey::from(&ed25519_sec) + } + PublicKeyDiscriminants::Secp256k1 => { + let secp256k1_sec = SecretKey::secp256k1_from_bytes(seed_bytes) + .expect("unable to create secp256k1 key from seed bytes"); + PublicKey::from(&secp256k1_sec) + } + } + }) + } + + while data_vec.len() < count { + let seed = data_vec.len(); + let key = generate_key(estimator, seed); + data_vec.push(Box::new(key)); + } + + debug_assert!(data_vec.len() >= count); + let output_set: BTreeSet = data_vec[..count] + .iter() + .map(|item| item.downcast_ref::().expect("cache corrupted")) + .cloned() + .collect(); + debug_assert_eq!(output_set.len(), count); + + output_set + } +} + +impl LargeUniqueSequence for Digest +where + E: SizeEstimator, +{ + fn large_unique_sequence(_estimator: &E, count: usize, _cache: &mut Cache) -> BTreeSet { + (0..count).map(|n| Digest::hash(n.to_ne_bytes())).collect() + } +} + +impl LargestSpecimen for Signature { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + if let Some(item) = cache.get::() { + return *item; + } + + // Note: We do not use strum generated discriminator enums for the signature, as we do not + // want to make `strum` a direct dependency of `casper-types`, to keep its size down. + #[derive(Debug, Copy, Clone, EnumIter)] + enum SignatureDiscriminants { + System, + Ed25519, + Secp256k1, + } + + *cache.set(largest_variant::( + estimator, + |variant| match variant { + SignatureDiscriminants::System => Signature::system(), + SignatureDiscriminants::Ed25519 => { + let ed25519_sec = &SecretKey::generate_ed25519().expect("a correct secret"); + + sign([0_u8], ed25519_sec, &ed25519_sec.into()) + } + SignatureDiscriminants::Secp256k1 => { + let secp256k1_sec = &SecretKey::generate_secp256k1().expect("a correct secret"); + + sign([0_u8], secp256k1_sec, &secp256k1_sec.into()) + } + }, + )) + } +} + +impl LargestSpecimen for EraId { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + EraId::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for Timestamp { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + const MAX_TIMESTAMP_HUMAN_READABLE: u64 = 253_402_300_799; + Timestamp::from(MAX_TIMESTAMP_HUMAN_READABLE) + } +} + +impl LargestSpecimen for TimeDiff { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + TimeDiff::from_millis(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for BlockHeaderV1 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockHeaderV1::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + OnceCell::with_value(LargestSpecimen::largest_specimen(estimator, cache)), + ) + } +} + +impl LargestSpecimen for BlockHeaderV2 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockHeaderV2::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + OnceCell::with_value(LargestSpecimen::largest_specimen(estimator, cache)), + ) + } +} + +impl LargestSpecimen for BlockHeader { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let v1 = BlockHeaderV1::largest_specimen(estimator, cache); + let v2 = BlockHeaderV2::largest_specimen(estimator, cache); + + if estimator.estimate(&v1) > estimator.estimate(&v2) { + BlockHeader::V1(v1) + } else { + BlockHeader::V2(v2) + } + } +} + +/// A wrapper around `BlockHeader` that implements `LargestSpecimen` without including the era +/// end. +pub(crate) struct BlockHeaderWithoutEraEnd(BlockHeaderV2); + +impl BlockHeaderWithoutEraEnd { + pub(crate) fn into_block_header(self) -> BlockHeader { + BlockHeader::V2(self.0) + } +} + +impl LargestSpecimen for BlockHeaderWithoutEraEnd { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockHeaderWithoutEraEnd(BlockHeaderV2::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + None, + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + OnceCell::with_value(LargestSpecimen::largest_specimen(estimator, cache)), + )) + } +} + +impl LargestSpecimen for EraEndV1 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + EraEndV1::new( + LargestSpecimen::largest_specimen(estimator, cache), + btree_map_distinct_from_prop(estimator, "validator_count", cache), + ) + } +} + +impl LargestSpecimen for EraEndV2 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let rewards = { + let count = estimator.parameter("validator_count"); + + PublicKey::large_unique_sequence(estimator, count, cache) + .into_iter() + // at most two reward amounts per validator + .map(|key| (key, vec_of_largest_specimen(estimator, 2, cache))) + .collect() + }; + EraEndV2::new( + vec_prop_specimen(estimator, "validator_count", cache), + vec_prop_specimen(estimator, "validator_count", cache), + btree_map_distinct_from_prop(estimator, "validator_count", cache), + rewards, + 1u8, + ) + } +} + +impl LargestSpecimen for InternalEraReport { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + InternalEraReport { + equivocators: vec_prop_specimen(estimator, "validator_count", cache), + inactive_validators: vec_prop_specimen(estimator, "validator_count", cache), + } + } +} + +impl LargestSpecimen for BlockHeaderWithSignatures { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockHeaderWithSignatures::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for BlockSignatures { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let mut block_signatures = BlockSignaturesV2::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ); + let sigs = btree_map_distinct_from_prop(estimator, "validator_count", cache); + sigs.into_iter().for_each(|(public_key, sig)| { + block_signatures.insert_signature(public_key, sig); + }); + BlockSignatures::V2(block_signatures) + } +} + +impl LargestSpecimen for BlockV2 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let mint_hashes = vec![ + TransactionHash::largest_specimen(estimator, cache); + estimator.parameter::("max_mint_per_block") + ]; + let auction_hashes = vec![ + TransactionHash::largest_specimen(estimator, cache); + estimator.parameter::("max_auctions_per_block") + ]; + let install_upgrade_hashes = + vec![ + TransactionHash::largest_specimen(estimator, cache); + estimator.parameter::("max_install_upgrade_transactions_per_block") + ]; + let standard_hashes = vec![ + TransactionHash::largest_specimen(estimator, cache); + estimator + .parameter::("max_standard_transactions_per_block") + ]; + + let transactions = { + let mut ret = BTreeMap::new(); + ret.insert(MINT_LANE_ID, mint_hashes); + ret.insert(AUCTION_LANE_ID, auction_hashes); + ret.insert(INSTALL_UPGRADE_LANE_ID, install_upgrade_hashes); + ret.insert(3, standard_hashes); + ret + }; + + BlockV2::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + transactions, + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for Block { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Block::V2(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for FinalizedBlock { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + FinalizedBlock::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for FinalitySignature { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + FinalitySignature::V2(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for FinalitySignatureV2 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + FinalitySignatureV2::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for FinalitySignatureId { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + FinalitySignatureId::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for EraReport { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + EraReport::new( + vec_prop_specimen(estimator, "validator_count", cache), + btree_map_distinct_from_prop(estimator, "validator_count", cache), + vec_prop_specimen(estimator, "validator_count", cache), + ) + } +} + +impl LargestSpecimen for BlockHash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + BlockHash::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for ChainNameDigest { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + // ChainNameDigest is fixed size by definition, so any value will do. + ChainNameDigest::from_chain_name("") + } +} + +// impls for `casper_hashing`, which is technically a foreign crate -- so we put them here. +impl LargestSpecimen for Digest { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + // Hashes are fixed size by definition, so any value will do. + Digest::hash("") + } +} + +impl LargestSpecimen for BlockPayload { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // We cannot just use the standard largest specimen for `TransactionHashWithApprovals`, as + // this would cause a quadratic increase in transactions. Instead, we generate one + // large transaction that contains the number of approvals if they are spread out + // across the block. + + let large_txn = match Transaction::largest_specimen(estimator, cache) { + Transaction::Deploy(deploy) => { + Transaction::Deploy(deploy.with_approvals(btree_set_distinct_from_prop( + estimator, + "average_approvals_per_transaction_in_block", + cache, + ))) + } + Transaction::V1(v1) => { + Transaction::V1(v1.with_approvals(btree_set_distinct_from_prop( + estimator, + "average_approvals_per_transaction_in_block", + cache, + ))) + } + }; + + let large_txn_hash_with_approvals = (large_txn.hash(), large_txn.approvals()); + + let mut transactions = BTreeMap::new(); + transactions.insert( + MINT_LANE_ID, + vec![ + large_txn_hash_with_approvals.clone(); + estimator.parameter::("max_mint_per_block") + ], + ); + transactions.insert( + AUCTION_LANE_ID, + vec![ + large_txn_hash_with_approvals.clone(); + estimator.parameter::("max_auctions_per_block") + ], + ); + transactions.insert( + LARGE_WASM_LANE_ID, + vec![ + large_txn_hash_with_approvals.clone(); + estimator.parameter::("max_standard_transactions_per_block") + ], + ); + transactions.insert( + INSTALL_UPGRADE_LANE_ID, + vec![ + large_txn_hash_with_approvals; + estimator.parameter::("max_install_upgrade_transactions_per_block") + ], + ); + + BlockPayload::new( + transactions, + vec_prop_specimen(estimator, "max_accusations_per_block", cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for RewardedSignatures { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + RewardedSignatures::new( + std::iter::repeat(LargestSpecimen::largest_specimen(estimator, cache)) + .take(estimator.parameter("signature_rewards_max_delay")), + ) + } +} + +impl LargestSpecimen for SingleBlockRewardedSignatures { + fn largest_specimen(estimator: &E, _cache: &mut Cache) -> Self { + SingleBlockRewardedSignatures::pack( + std::iter::repeat(1).take(estimator.parameter("validator_count")), + ) + } +} + +impl LargestSpecimen for DeployHash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + DeployHash::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for Approval { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + Approval::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargeUniqueSequence for Approval +where + Self: Sized + Ord, + E: SizeEstimator, +{ + fn large_unique_sequence(estimator: &E, count: usize, cache: &mut Cache) -> BTreeSet { + PublicKey::large_unique_sequence(estimator, count, cache) + .into_iter() + .map(|public_key| { + Approval::new( + public_key, + LargestSpecimen::largest_specimen(estimator, cache), + ) + }) + .collect() + } +} + +impl LargestSpecimen for (TransactionHash, Option>) { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // Note: This is an upper bound, the actual value is lower. We are keeping the order of + // magnitude intact though. + let max_items = estimator.parameter::("max_transfers_per_block") + + estimator.parameter::("max_standard_per_block"); + + let transaction = ( + TransactionHash::largest_specimen(estimator, cache), + Some(btree_set_distinct(estimator, max_items, cache)), + ); + let v1 = ( + TransactionHash::largest_specimen(estimator, cache), + Some(btree_set_distinct(estimator, max_items, cache)), + ); + + if estimator.estimate(&transaction) > estimator.estimate(&v1) { + transaction + } else { + v1 + } + } +} + +impl LargestSpecimen for Deploy { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // Note: Deploys have a maximum size enforced on their serialized representation. A deploy + // generated here is guaranteed to exceed this maximum size due to the session code + // being this maximum size already (see the [`LargestSpecimen`] implementation of + // [`ExecutableDeployItem`]). For this reason, we leave `dependencies` and `payment` + // small. + Deploy::new_signed( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + Default::default(), // See note. + largest_chain_name(estimator), + LargestSpecimen::largest_specimen(estimator, cache), + ExecutableDeployItem::Transfer { + args: Default::default(), // See note. + }, + &LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for DeployId { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + DeployId::new( + LargestSpecimen::largest_specimen(estimator, cache), + LargestSpecimen::largest_specimen(estimator, cache), + ) + } +} + +impl LargestSpecimen for ApprovalsHash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let deploy_ah = ApprovalsHash(LargestSpecimen::largest_specimen(estimator, cache)); + let txn_v1_ah = ApprovalsHash(LargestSpecimen::largest_specimen(estimator, cache)); + + if estimator.estimate(&deploy_ah) >= estimator.estimate(&txn_v1_ah) { + deploy_ah + } else { + txn_v1_ah + } + } +} + +impl LargestSpecimen for TransactionV1Hash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + TransactionV1Hash::new(LargestSpecimen::largest_specimen(estimator, cache)) + } +} + +impl LargestSpecimen for TransactionV1 { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // See comment in `impl LargestSpecimen for ExecutableDeployItem` below for rationale here. + let max_size_with_margin = + estimator.parameter::("max_transaction_size").max(0) as usize + 10 * 4; + TransactionV1Builder::new_session( + true, + Bytes::from(vec_of_largest_specimen( + estimator, + max_size_with_margin, + cache, + )), + TransactionRuntimeParams::VmCasperV1, + ) + .with_secret_key(&LargestSpecimen::largest_specimen(estimator, cache)) + .with_timestamp(LargestSpecimen::largest_specimen(estimator, cache)) + .with_ttl(LargestSpecimen::largest_specimen(estimator, cache)) + .with_chain_name(largest_chain_name(estimator)) + .build() + .unwrap() + } +} + +impl LargestSpecimen for TransactionId { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let deploy_hash = + TransactionHash::Deploy(LargestSpecimen::largest_specimen(estimator, cache)); + let v1_hash = TransactionHash::V1(LargestSpecimen::largest_specimen(estimator, cache)); + + let deploy = TransactionId::new( + deploy_hash, + LargestSpecimen::largest_specimen(estimator, cache), + ); + let v1 = TransactionId::new(v1_hash, LargestSpecimen::largest_specimen(estimator, cache)); + + if estimator.estimate(&deploy) >= estimator.estimate(&v1) { + deploy + } else { + v1 + } + } +} + +impl LargestSpecimen for Transaction { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let deploy = Transaction::Deploy(LargestSpecimen::largest_specimen(estimator, cache)); + let v1 = Transaction::V1(LargestSpecimen::largest_specimen(estimator, cache)); + + if estimator.estimate(&deploy) >= estimator.estimate(&v1) { + deploy + } else { + v1 + } + } +} + +impl LargestSpecimen for TransactionHash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let deploy_hash = + TransactionHash::Deploy(LargestSpecimen::largest_specimen(estimator, cache)); + let v1_hash = TransactionHash::V1(LargestSpecimen::largest_specimen(estimator, cache)); + + if estimator.estimate(&deploy_hash) >= estimator.estimate(&v1_hash) { + deploy_hash + } else { + v1_hash + } + } +} + +// EE impls +impl LargestSpecimen for ExecutableDeployItem { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + // `module_bytes` already blows this up to the maximum deploy size, so we use this variant + // as the largest always and don't need to fill in any args. + // + // However, this does not hold true for all encoding schemes: An inefficient encoding can + // easily, via `RuntimeArgs`, result in a much larger encoded size, e.g. when encoding an + // array of 1-byte elements in a format that uses string quoting and a delimiter to seperate + // elements. + // + // We compromise by not supporting encodings this inefficient and add 10 * a 32-bit integer + // as a safety margin for tags and length prefixes. + let max_size_with_margin = + estimator.parameter::("max_transaction_size").max(0) as usize + 10 * 4; + + ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::from(vec_of_largest_specimen( + estimator, + max_size_with_margin, + cache, + )), + args: RuntimeArgs::new(), + } + } +} + +impl LargestSpecimen for U512 { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + U512::max_value() + } +} + +impl LargestSpecimen for PackageHash { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + PackageHash::new([LargestSpecimen::largest_specimen(estimator, cache); KEY_HASH_LENGTH]) + } +} + +impl LargestSpecimen for ChunkWithProof { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + ChunkWithProof::new(&[0xFF; Self::CHUNK_SIZE_BYTES], 0) + .expect("the chunk to be correctly created") + } +} + +impl LargestSpecimen for SecretKey { + fn largest_specimen(_estimator: &E, _cache: &mut Cache) -> Self { + SecretKey::ed25519_from_bytes([u8::MAX; 32]).expect("valid secret key bytes") + } +} + +impl LargestSpecimen for ValidatorMap { + fn largest_specimen(estimator: &E, cache: &mut Cache) -> Self { + let max_validators = estimator.parameter("validator_count"); + + ValidatorMap::from_iter( + std::iter::repeat_with(|| LargestSpecimen::largest_specimen(estimator, cache)) + .take(max_validators), + ) + } +} + +/// Returns the largest `Message::GetRequest`. +pub(crate) fn largest_get_request(estimator: &E, cache: &mut Cache) -> Message { + largest_variant::(estimator, |variant| { + match variant { + Tag::Transaction => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::LegacyDeploy => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::Block => Message::new_get_request::(&LargestSpecimen::largest_specimen( + estimator, cache, + )), + Tag::BlockHeader => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::TrieOrChunk => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::FinalitySignature => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::SyncLeap => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::ApprovalsHashes => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::BlockExecutionResults => Message::new_get_request::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + } + .expect("did not expect new_get_request from largest deploy to fail") + }) +} + +/// Returns the largest `Message::GetResponse`. +pub(crate) fn largest_get_response(estimator: &E, cache: &mut Cache) -> Message { + largest_variant::(estimator, |variant| { + match variant { + Tag::Transaction => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::LegacyDeploy => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::Block => Message::new_get_response::(&LargestSpecimen::largest_specimen( + estimator, cache, + )), + Tag::BlockHeader => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::TrieOrChunk => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::FinalitySignature => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::SyncLeap => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::ApprovalsHashes => Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ), + Tag::BlockExecutionResults => { + Message::new_get_response::( + &LargestSpecimen::largest_specimen(estimator, cache), + ) + } + } + .expect("did not expect new_get_response from largest deploy to fail") + }) +} + +/// Returns the largest string allowed for a chain name. +fn largest_chain_name(estimator: &E) -> String { + string_max_characters(estimator.parameter("network_name_limit")) +} + +/// Returns a string with `len`s characters of the largest possible size. +fn string_max_characters(max_char: usize) -> String { + std::iter::repeat(HIGHEST_UNICODE_CODEPOINT) + .take(max_char) + .collect() +} + +/// Returns the max rounds per era with the specimen parameters. +/// +/// See the [`max_rounds_per_era`] function. +pub(crate) fn estimator_max_rounds_per_era(estimator: &impl SizeEstimator) -> usize { + let minimum_era_height = estimator.parameter("minimum_era_height"); + let era_duration_ms = TimeDiff::from_millis(estimator.parameter("era_duration_ms")); + let minimum_round_length_ms = + TimeDiff::from_millis(estimator.parameter("minimum_round_length_ms")); + + max_rounds_per_era(minimum_era_height, era_duration_ms, minimum_round_length_ms) + .try_into() + .expect("to be a valid `usize`") +} + +#[cfg(test)] +mod tests { + use super::Cache; + + #[test] + fn memoization_cache_simple() { + let mut cache = Cache::default(); + + assert!(cache.get::().is_none()); + assert!(cache.get::().is_none()); + + cache.set::(1234); + assert_eq!(cache.get::(), Some(&1234)); + + cache.set::("a string is not copy".to_owned()); + assert_eq!( + cache.get::().map(String::as_str), + Some("a string is not copy") + ); + assert_eq!(cache.get::(), Some(&1234)); + + cache.set::("this should not overwrite".to_owned()); + assert_eq!( + cache.get::().map(String::as_str), + Some("a string is not copy") + ); + } +} diff --git a/node/src/utils/umask.rs b/node/src/utils/umask.rs new file mode 100644 index 0000000000..ceebd661da --- /dev/null +++ b/node/src/utils/umask.rs @@ -0,0 +1,30 @@ +//! Umask setting functions. + +/// File mode. +pub(crate) type Mode = libc::mode_t; + +/// Set the umask to `new_mode`, returning the current mode. +fn umask(new_mode: Mode) -> Mode { + // `umask` is always successful (according to the manpage), so there is no error condition to + // check. + unsafe { libc::umask(new_mode) } +} + +/// Sets a new umask, returning a guard that will restore the current umask when dropped. +pub(crate) fn temp_umask(new_mode: Mode) -> UmaskGuard { + let prev = umask(new_mode); + UmaskGuard { prev } +} + +/// Guard for umask, will restore the contained umask on drop. +#[derive(Clone, Debug)] +pub(crate) struct UmaskGuard { + /// The mode stored in the guard. + prev: Mode, +} + +impl Drop for UmaskGuard { + fn drop(&mut self) { + umask(self.prev); + } +} diff --git a/node/src/utils/work_queue.rs b/node/src/utils/work_queue.rs new file mode 100644 index 0000000000..a04bc9b7c7 --- /dev/null +++ b/node/src/utils/work_queue.rs @@ -0,0 +1,361 @@ +//! Work queue for finite work. +//! +//! A queue that allows for processing a variable amount of work that may spawn more jobs, but is +//! expected to finish eventually. + +use std::{ + collections::VecDeque, + sync::{Arc, Mutex}, +}; + +use futures::{stream, Stream}; +use tokio::sync::Notify; + +/// Multi-producer, multi-consumer async job queue with end conditions. +/// +/// Keeps track of in-progress jobs and can indicate to workers that all work has been finished. +/// Intended to be used for jobs that will spawn other jobs during processing, but stop once all +/// jobs have finished. +/// +/// # Example use +/// +/// ```rust +/// #![allow(non_snake_case)] +/// # use std::{sync::Arc, time::Duration}; +/// # +/// # use futures::stream::{futures_unordered::FuturesUnordered, StreamExt}; +/// # +/// # use casper_node::utils::work_queue::WorkQueue; +/// # +/// type DemoJob = (&'static str, usize); +/// +/// /// Job processing function. +/// /// +/// /// For a given job `(name, n)`, returns two jobs with `n = n - 1`, unless `n == 0`. +/// async fn process_job(job: DemoJob) -> Vec { +/// tokio::time::sleep(Duration::from_millis(25)).await; +/// +/// let (tag, n) = job; +/// +/// if n == 0 { +/// Vec::new() +/// } else { +/// vec![(tag, n - 1), (tag, n - 1)] +/// } +/// } +/// +/// /// Job-processing worker. +/// /// +/// /// `id` is the worker ID for logging. +/// async fn worker(id: usize, q: Arc>) { +/// println!("worker {}: init", id); +/// +/// while let Some(job) = q.next_job().await { +/// println!("worker {}: start job {:?}", id, job.inner()); +/// for new_job in process_job(job.inner().clone()).await { +/// q.push_job(new_job); +/// } +/// println!("worker {}: finish job {:?}", id, job.inner()); +/// } +/// +/// println!("worker {}: shutting down", id); +/// } +/// +/// const WORKER_COUNT: usize = 3; +/// # +/// # async fn test_func() { +/// let q = Arc::new(WorkQueue::default()); +/// q.push_job(("A", 3)); +/// +/// let workers: FuturesUnordered<_> = (0..WORKER_COUNT).map(|id| worker(id, q.clone())).collect(); +/// +/// // Wait for all workers to finish. +/// workers.for_each(|_| async move {}).await; +/// # } +/// # let rt = tokio::runtime::Runtime::new().unwrap(); +/// # let handle = rt.handle(); +/// # handle.block_on(test_func()); +/// ``` +#[derive(Debug)] +pub struct WorkQueue { + /// Inner workings of the queue. + inner: Mutex>, + /// Notifier for waiting tasks. + notify: Notify, +} + +/// Queue inner state. +#[derive(Debug)] +struct QueueInner { + /// Jobs currently in the queue. + jobs: VecDeque, + /// Number of jobs that have been popped from the queue using `next_job` but not finished. + in_progress: usize, +} + +// Manual default implementation, since the derivation would require a `T: Default` trait bound. +impl Default for WorkQueue { + fn default() -> Self { + Self { + inner: Default::default(), + notify: Default::default(), + } + } +} + +impl Default for QueueInner { + fn default() -> Self { + Self { + jobs: Default::default(), + in_progress: Default::default(), + } + } +} + +impl WorkQueue { + /// Pop a job from the queue. + /// + /// If there is a job in the queue, returns the job and increases the internal in progress + /// counter by one. + /// + /// If there are still jobs in progress, but none queued, waits until either of these conditions + /// changes, then retries. + /// + /// If there are no jobs available and no jobs in progress, returns `None`. + pub async fn next_job(self: &Arc) -> Option> { + loop { + let waiting; + { + let mut inner = self.inner.lock().expect("lock poisoned"); + match inner.jobs.pop_front() { + Some(job) => { + // We got a job, increase the `in_progress` count and return. + inner.in_progress += 1; + return Some(JobHandle { + job, + queue: self.clone(), + }); + } + None => { + // No job found. Check if we are completely done. + if inner.in_progress == 0 { + // No more jobs, no jobs in progress. We are done! + return None; + } + + // Otherwise, we have to wait. + waiting = self.notify.notified(); + } + } + } + + // Note: Any notification sent while executing this segment (after the guard has been + // dropped, but before `waiting.await` has been entered) will still be picked up by + // `waiting.await`, as the call to `notified()` marks the beginning of the waiting + // period, not `waiting.await`. See `tests::notification_assumption_holds`. + + // After freeing the lock, wait for a new job to arrive or be finished. + waiting.await; + } + } + + /// Pushes a job onto the queue. + /// + /// If there are any worker waiting on `next_job`, one of them will receive the job. + pub fn push_job(&self, job: T) { + let mut inner = self.inner.lock().expect("lock poisoned"); + + inner.jobs.push_back(job); + self.notify.notify_waiters(); + } + + /// Returns the number of jobs in the queue. + pub fn num_jobs(&self) -> usize { + self.inner.lock().expect("lock poisoned").jobs.len() + } + + /// Creates a streaming consumer of the work queue. + #[inline] + pub fn to_stream(self: Arc) -> impl Stream> { + stream::unfold(self, |work_queue| async move { + let next = work_queue.next_job().await; + next.map(|handle| (handle, work_queue)) + }) + } + + /// Mark job completion. + /// + /// This is an internal function to be used by `JobHandle`, which locks the internal queue and + /// decreases the in-progress count by one. + fn complete_job(&self) { + let mut inner = self.inner.lock().expect("lock poisoned"); + + inner.in_progress -= 1; + self.notify.notify_waiters(); + } +} + +/// Handle containing a job. +/// +/// Holds a job popped from the job queue. +/// +/// The job will be considered completed once `JobHandle` has been dropped. +#[derive(Debug)] +pub struct JobHandle { + /// The protected job. + job: T, + /// Queue job was removed from. + queue: Arc>, +} + +impl JobHandle { + /// Returns a reference to the inner job. + pub fn inner(&self) -> &T { + &self.job + } +} + +impl Drop for JobHandle { + fn drop(&mut self) { + self.queue.complete_job(); + } +} + +#[cfg(test)] +mod tests { + use std::{ + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, + time::Duration, + }; + + use futures::{FutureExt, StreamExt}; + use tokio::sync::Notify; + + use super::WorkQueue; + + #[derive(Debug)] + struct TestJob(u32); + + // Verify that the assumption made about `Notification` -- namely that a call to `notified()` is + // enough to "register" the waiter -- holds. + #[test] + fn notification_assumption_holds() { + let not = Notify::new(); + + // First attempt to await a notification, should return pending. + assert!(not.notified().now_or_never().is_none()); + + // Second, we notify, then try notification again. Should also return pending, as we were + // "not around" when the notification happened. + not.notify_waiters(); + assert!(not.notified().now_or_never().is_none()); + + // Finally, we "register" for notification beforehand. + let waiter = not.notified(); + not.notify_waiters(); + assert!(waiter.now_or_never().is_some()); + } + + /// Process a job, sleeping a short amout of time on every 5th job. + async fn job_worker_simple(queue: Arc>, sum: Arc) { + while let Some(job) = queue.next_job().await { + if job.inner().0 % 5 == 0 { + tokio::time::sleep(Duration::from_millis(50)).await; + } + + sum.fetch_add(job.inner().0, Ordering::SeqCst); + } + } + + /// Process a job, sleeping a short amount of time on every job. + /// + /// Spawns two additional jobs for every job processed, decreasing the job number until reaching + /// zero. + async fn job_worker_binary(queue: Arc>, sum: Arc) { + while let Some(job) = queue.next_job().await { + tokio::time::sleep(Duration::from_millis(10)).await; + + sum.fetch_add(job.inner().0, Ordering::SeqCst); + + if job.inner().0 > 0 { + queue.push_job(TestJob(job.inner().0 - 1)); + queue.push_job(TestJob(job.inner().0 - 1)); + } + } + } + + #[tokio::test] + async fn empty_queue_exits_immediately() { + let q: Arc> = Arc::new(Default::default()); + assert!(q.next_job().await.is_none()); + } + + #[tokio::test] + async fn large_front_loaded_queue_terminates() { + let num_jobs = 1_000; + let q: Arc> = Arc::new(Default::default()); + for job in (0..num_jobs).map(TestJob) { + q.push_job(job); + } + + let mut workers = Vec::new(); + let output = Arc::new(AtomicU32::new(0)); + for _ in 0..3 { + workers.push(tokio::spawn(job_worker_simple(q.clone(), output.clone()))); + } + + // We use a different pattern for waiting here, see the doctest for a solution that does not + // spawn. + for worker in workers { + worker.await.expect("task panicked"); + } + + let expected_total = (num_jobs * (num_jobs - 1)) / 2; + assert_eq!(output.load(Ordering::SeqCst), expected_total); + } + + #[tokio::test] + async fn stream_interface_works() { + let num_jobs = 1_000; + let q: Arc> = Arc::new(Default::default()); + for job in (0..num_jobs).map(TestJob) { + q.push_job(job); + } + + let mut current = 0; + let mut stream = Box::pin(q.to_stream()); + while let Some(job) = stream.next().await { + assert_eq!(job.inner().0, current); + current += 1; + } + } + + #[tokio::test] + async fn complex_queue_terminates() { + let num_jobs = 5; + let q: Arc> = Arc::new(Default::default()); + for _ in 0..num_jobs { + q.push_job(TestJob(num_jobs)); + } + + let mut workers = Vec::new(); + let output = Arc::new(AtomicU32::new(0)); + for _ in 0..3 { + workers.push(tokio::spawn(job_worker_binary(q.clone(), output.clone()))); + } + + // We use a different pattern for waiting here, see the doctest for a solution that does not + // spawn. + for worker in workers { + worker.await.expect("task panicked"); + } + + // A single job starting at `k` will add `SUM_{n=0}^{k} (k-n) * 2^n`, which is + // 57 for `k=5`. We start 5 jobs, so we expect `5 * 57 = 285` to be the result. + let expected_total = 285; + assert_eq!(output.load(Ordering::SeqCst), expected_total); + } +} diff --git a/node_macros/.gitignore b/node_macros/.gitignore deleted file mode 100644 index 96ef6c0b94..0000000000 --- a/node_macros/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/target -Cargo.lock diff --git a/node_macros/Cargo.toml b/node_macros/Cargo.toml deleted file mode 100644 index 0ae7cacd87..0000000000 --- a/node_macros/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "casper-node-macros" -version = "1.0.0" -authors = ["Marc Brinkmann "] -edition = "2018" -description = "A macro to create reactor implementations for the casper-node." -readme = "README.md" -documentation = "https://docs.rs/casper-node-macros" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/node_macros" -license-file = "../LICENSE" - -[dependencies] -indexmap = "1.6.0" -Inflector = "0.11.4" -proc-macro2 = "1.0.21" -quote = "1.0.8" -syn = { version = "1.0.40", features = ["full", "extra-traits"] } - -[lib] -proc-macro = true diff --git a/node_macros/README.md b/node_macros/README.md deleted file mode 100644 index 7a32dabc99..0000000000 --- a/node_macros/README.md +++ /dev/null @@ -1,149 +0,0 @@ -# `casper-node-macros` - -[![LOGO](https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) - -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) -[![Crates.io](https://img.shields.io/crates/v/casper-node-macros)](https://crates.io/crates/casper-node-macros) -[![Documentation](https://docs.rs/casper-node-macros/badge.svg)](https://docs.rs/casper-node-macros) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) - -## License - -Licensed under the [CasperLabs Open Source License (COSL)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE). - ---- - -The `casper-node-macros` crate offers an easy-to-use macro to create reactor implementations for the component system. It enforces a set of convention and allows generating a large amount of otherwise boilerplate heavy setup code comfortably. - -The macro is invoked by calling the `cosy_macro::reactor` macro as follows: - -```rust -reactor!(NameOfReactor { - type Config = ConfigurationType; - - components: { - component_a = CompA(constructor_arg_1, constructor_arg_2, ...); - component_b = has_effects CompB(constructor_arg1, ..); - // ... - } - - events: { - component_a = Event; - } - - requests: { - StorageRequest -> component_a; - NetworkRequest -> component_b; - ThirdRequest -> component_a; - AnotherRequest -> !; - } - - announcements: { - NetworkAnnouncement -> [component_a, component_b]; - StorageAnnouncement -> []; - } -}); -``` - -The sections in detail: - -## Outer definition - -The definition of - -```rust -reactor!(NameOfReactor { - type Config = ConfigurationType; - // ... -}); -``` - -indicates that - -* the newly created reactor will be named `NameOfReactor` and -* its configuration type will be `ConfigurationType`. - -The types `NameOfReactorEvent` and `NameOfReactorError` will be automatically generated as well. - -## Component definition - -Components are defined in the first section: - -```rust - components: { - component_a = CompA(constructor_arg_1, constructor_arg_2, ...); - component_b = has_effects CompB(constructor_arg1, ..); - // ... - } -``` - -Here - -* two components will be defined as fields on the reactor struct, the fields being named `component_a` and `component_b` respectively, -* their type will be `crate::components::comp_a::CompA` (automatically deduced from the name), -* they will be constructed passing in `constructor_arg_1`, `constructor_arg_2` to the first and `constructor_arg1` to the second component, -* `CompA::new` will return just the component, while `CompB::new` will return a tuple of `(component, effects)`, indicated by the `has_effects` keyword, -* two variants `NameOfReactorEvent::ComponentA` and `NameOfReactorEvent::ComponentB` will be added to the reactors event type, -* these events will wrap `crate::components::comp_a::Event` (see caveat below) and `crate::components::comp_b::Event` respectively, -* a `From` impl will be generated for `NameOfReactorEvent` (similarly for `comp_b`), -* the appropriate variants will similarly be added to the `NameOfReactorError` enum, -* and all variants of `NameOfReactorEvent` that wrap a component event will be forwarded to that component's `handle_event` function. - -Note that during construction, the parameters `cfg`, `registry`, `event_queue` and `rng` are available, as well as the local variable `effect_builder`. - -## Event overrides - -Ideally all `NameOfReactorEvent` newtype variants would be written as `NameOfReactorEvent::SomeComponent(::Event>` in the generated code, which unfortunately is not possible due to a current shortcoming in the Rust trait system that will likely only be fixed with [chalk](https://github.com/rust-lang/chalk). - -As a workaround, `NameOfReactorEvent::SomeComponent(crate::components::some_component::Event` will be used instead. This solution only works for event types that do not have their own type parameters. If they have, the `Event` portion can be replaced using the event override section of the macro invocation: - -```rust - events: { - component_a = Event; - } -``` - -This will result in `crate::components::comp_a::Event` to be used to set the newtypes inner value instead. - -## Request routing - -The third section defines how requests are routed: - -```rust - requests: { - StorageRequest -> component_a; - NetworkRequest -> component_b; - ThirdRequest -> component_a; - AnotherRequest -> #; - } -``` - -In the example, - -* `StorageRequest`s are routed to `component_a`, -* `NetworkRequest`s are routed to `component_b`, -* `ThirdRequest`s are routed to `component_a` (note that multiple requests can be routed to a single component instance), and -* `AnotherRequest` is discarded quietly. - -Instead of `#`, a request can be routed to `!`, which will panic once it receives a request. - -Routing a request `ExampleRequest` to an `example_target` means that - -* a `NameOfReactorEvent::ExampleRequest(ExampleRequest)` variant is generated, -* a `From` is generated for `NameOfReactorEvent` and -* when dispatching a `NameOfReactorEvent::ExampleRequest` it will be turned into `example_target`'s event type using `From`, then dispatched to `example_target`'s `handle_event`. - -Not routing a request means that the reactor does not support components that require it. - -## Announcement routing - -Announcements are routed almost exactly like requests - -```rust - announcements: { - NetworkAnnouncement -> [component_a, component_b]; - StorageAnnouncement -> []; - } -``` - -with the key difference being that instead of a single target, an announcement is routed to zero or more instead. `!` and `#` can be used as targets the same way they are used with requests as well. diff --git a/node_macros/src/gen.rs b/node_macros/src/gen.rs deleted file mode 100644 index 0ea76cf482..0000000000 --- a/node_macros/src/gen.rs +++ /dev/null @@ -1,403 +0,0 @@ -use crate::{ - parse::{ReactorDefinition, Target}, - util::suffix_ident, -}; -use proc_macro2::TokenStream; -use quote::quote; - -/// Generates the top level reactor `struct`. -/// -/// Will generate a field for each component to be used. -pub(crate) fn generate_reactor(def: &ReactorDefinition) -> TokenStream { - let reactor_ident = def.reactor_ident(); - - let mut reactor_fields = Vec::new(); - - for component in def.components() { - let field_name = component.field_ident(); - let full_type = component.full_component_type(); - - reactor_fields.push(quote!(#field_name: #full_type)); - } - - quote!( - /// Top-level reactor. - #[derive(Debug)] - pub struct #reactor_ident { - #(#reactor_fields,)* - } - ) -} - -/// Generates types for the reactor implementation. -pub(crate) fn generate_reactor_types(def: &ReactorDefinition) -> TokenStream { - let reactor_ident = def.reactor_ident(); - let event_ident = suffix_ident(&reactor_ident, "Event"); - let error_ident = suffix_ident(&reactor_ident, "Error"); - - let mut event_variants = Vec::new(); - let mut error_variants = Vec::new(); - let mut display_variants = Vec::new(); - let mut error_display_variants = Vec::new(); - let mut error_source_variants = Vec::new(); - let mut from_impls = Vec::new(); - - for component in def.components() { - let variant_ident = component.variant_ident(); - let full_event_type = def.component_event(component); - let full_error_type = component.full_error_type(quote!(#event_ident)); - let field_name = component.field_ident().to_string(); - - let event_variant_doc = format!("Event from `{}` component", field_name); - event_variants.push(quote!( - #[doc = #event_variant_doc] - #variant_ident(#full_event_type))); - - let error_variant_doc = format!("Error constructing `{}` component", field_name); - error_variants.push(quote!( - #[doc = #error_variant_doc] - #variant_ident(#full_error_type))); - - display_variants.push(quote!( - #event_ident::#variant_ident(inner) => write!(f, "{}: {}", #field_name, inner) - )); - - error_display_variants.push(quote!( - #error_ident::#variant_ident(inner) => write!(f, "{}: {}", #field_name, inner) - )); - - error_source_variants.push(quote!( - #error_ident::#variant_ident(inner) => Some(inner) - )); - - from_impls.push(quote!( - impl From<#full_event_type> for #event_ident { - fn from(event: #full_event_type) -> Self { - #event_ident::#variant_ident(event) - } - } - )); - } - - // NOTE: Cannot use `From::from` to directly construct next component's event because doing so - // prevents us from implementing discards. - - // Add a variant for each request and a `From` implementation. - for request in def.requests() { - let variant_ident = request.variant_ident(); - let full_request_type = request.full_request_type(); - - let event_variant_doc = format!("Incoming `{}`", variant_ident); - event_variants.push(quote!( - #[doc = #event_variant_doc] - #variant_ident(#full_request_type))); - - display_variants.push(quote!( - #event_ident::#variant_ident(inner) => ::std::fmt::Display::fmt(inner, f) - )); - - from_impls.push(quote!( - impl From<#full_request_type> for #event_ident { - fn from(request: #full_request_type) -> Self { - #event_ident::#variant_ident(request) - } - } - )); - } - - for announcement in def.announcements() { - let variant_ident = announcement.variant_ident(); - let full_announcement_type = announcement.full_announcement_type(); - - let event_variant_doc = format!("Incoming `{}`", variant_ident); - event_variants.push(quote!( - #[doc = #event_variant_doc] - #variant_ident(#full_announcement_type))); - - display_variants.push(quote!( - #event_ident::#variant_ident(inner) => ::std::fmt::Display::fmt(inner, f) - )); - - from_impls.push(quote!( - impl From<#full_announcement_type> for #event_ident { - fn from(announcement: #full_announcement_type) -> Self { - #event_ident::#variant_ident(announcement) - } - } - )); - } - - let event_docs = format!("Events of `{}` reactor.", reactor_ident); - let error_docs = format!("Construction errors of `{}` reactor.", reactor_ident); - - quote!( - #[doc = #event_docs] - #[allow(clippy::large_enum_variant)] - #[derive(Debug, serde::Serialize)] - pub enum #event_ident { - #(#event_variants,)* - } - - impl crate::reactor::ReactorEvent for #event_ident { - fn as_control(&self) -> Option<&crate::effect::announcements::ControlAnnouncement> { - if let #event_ident::ControlAnnouncement(ref ctrl_ann) = self { - Some(ctrl_ann) - } else { - None - } - } - } - - #[doc = #error_docs] - #[derive(Debug)] - pub enum #error_ident { - #(#error_variants,)* - /// Failure to initialize metrics. - MetricsInitialization(prometheus::Error), - } - - impl std::fmt::Display for #event_ident { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - #(#display_variants,)* - } - } - } - - impl std::fmt::Display for #error_ident { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - #(#error_display_variants,)* - #error_ident::MetricsInitialization(err) => write!(f, "metrics_initialization: {}", err), - } - } - } - - #(#from_impls)* - - impl From for #error_ident { - fn from(err: prometheus::Error) -> Self { - #error_ident::MetricsInitialization(err) - } - } - - impl std::error::Error for #error_ident { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - #(#error_source_variants,)* - #error_ident::MetricsInitialization(inner) => Some(inner), - } - } - } - ) -} - -/// Generates the reactor implementation itself. -pub(crate) fn generate_reactor_impl(def: &ReactorDefinition) -> TokenStream { - let reactor_ident = def.reactor_ident(); - let event_ident = def.event_ident(); - let error_ident = def.error_ident(); - let config = def.config_type().as_given(); - - let mut dispatches = Vec::new(); - - // Generate dispatches for component events. - for component in def.components() { - let variant_ident = component.variant_ident(); - let full_component_type = component.full_component_type(); - let field_ident = component.field_ident(); - - dispatches.push(quote!( - #event_ident::#variant_ident(event) => { - crate::reactor::wrap_effects( - #event_ident::#variant_ident, - <#full_component_type as crate::components::Component<#event_ident>>::handle_event(&mut self.#field_ident, effect_builder, rng, event) - ) - }, - )); - } - - // Dispatch requests as well. - for request in def.requests() { - let request_variant_ident = request.variant_ident(); - - match request.target() { - Target::Discard => { - dispatches.push(quote!( - #event_ident::#request_variant_ident(request) => { - // Request is discarded. - // TODO: Add `trace!` call here? Consider the log spam though. - Default::default() - }, - )); - } - Target::Panic => { - dispatches.push(quote!( - #event_ident::#request_variant_ident(request) => { - // Request is discarded. - panic!("received event that was explicitly routed to a panic: {:?}", request) - }, - )); - } - Target::Dest(ref dest) => { - let dest_component_type = def.component(dest).full_component_type(); - let dest_variant_ident = def.component(dest).variant_ident(); - let dest_field_ident = dest; - - dispatches.push(quote!( - #event_ident::#request_variant_ident(request) => { - // Turn request into event for target component. - let dest_event = <#dest_component_type as crate::components::Component>::Event::from(request); - - // Route the newly created event to the component. - crate::reactor::wrap_effects( - #event_ident::#dest_variant_ident, - <#dest_component_type as crate::components::Component>::handle_event(&mut self.#dest_field_ident, effect_builder, rng, dest_event) - ) - }, - )); - } - Target::Dispatch(ref fname) => { - dispatches.push(quote!( - #event_ident::#request_variant_ident(request) => { - self.#fname(effect_builder, rng, request) - }, - )); - } - } - } - - // Announcements dispatched also. - for announcement in def.announcements() { - let announcement_variant_ident = announcement.variant_ident(); - - let mut announcement_dispatches = Vec::new(); - for target in announcement.targets() { - match target { - Target::Discard => { - // Don't do anything. - // TODO: Add `trace!` call here? Consider the log spam though. - } - Target::Panic => { - announcement_dispatches.push(quote!( - panic!("announcement received that was expressively declard as panic: {:?}", - announcement); - )); - } - Target::Dest(ref dest) => { - let dest_component_type = def.component(dest).full_component_type(); - let dest_variant_ident = def.component(dest).variant_ident(); - let dest_field_ident = dest; - - announcement_dispatches.push(quote!( - // Dispatch announcement to target: - let dest_event = <#dest_component_type as crate::components::Component>::Event::from(announcement); - - let effects = crate::reactor::wrap_effects( - #event_ident::#dest_variant_ident, - <#dest_component_type as crate::components::Component>::handle_event(&mut self.#dest_field_ident, effect_builder, rng, dest_event) - ); - - announcement_effects.extend(effects.into_iter()); - )); - } - Target::Dispatch(ref fname) => { - announcement_dispatches.push(quote!( - let effects = self.#fname(effect_builder, rng, announcement); - - announcement_effects.extend(effects.into_iter()); - )); - } - } - } - - dispatches.push(quote!( - #event_ident::#announcement_variant_ident(announcement) => { - let mut announcement_effects = crate::effect::Multiple::new(); - - #(#announcement_dispatches)* - - announcement_effects - } - )) - } - - let mut component_instantiations = Vec::new(); - let mut component_fields = Vec::new(); - - for cdef in def.components() { - let field_ident = cdef.field_ident(); - let component_type = cdef.full_component_type(); - let variant_ident = cdef.variant_ident(); - - let constructor_args = cdef.component_arguments(); - - let suffix = if cdef.is_infallible() { - quote!() - } else { - quote!(.map_err(#error_ident::#variant_ident)?) - }; - - if cdef.has_effects() { - component_instantiations.push(quote!( - let (#field_ident, effects) = #component_type::new(#(#constructor_args),*) - #suffix; - let wrapped_effects: crate::effect::Effects<#event_ident> = crate::reactor::wrap_effects(#event_ident::#variant_ident, effects); - - all_effects.extend(wrapped_effects.into_iter()); - )); - } else { - component_instantiations.push(quote!( - let #field_ident = #component_type::new(#(#constructor_args),*) - #suffix; - )); - } - - component_fields.push(quote!(#field_ident)); - } - - quote!( - impl crate::reactor::Reactor for #reactor_ident { - type Event = #event_ident; - type Error = #error_ident; - type Config = #config; - - fn dispatch_event( - &mut self, - effect_builder: crate::effect::EffectBuilder, - rng: &mut crate::NodeRng, - event: Self::Event, - ) -> crate::effect::Effects { - match event { - #(#dispatches)* - } - } - - fn new( - cfg: Self::Config, - registry: &prometheus::Registry, - event_queue: crate::reactor::EventQueueHandle, - rng: &mut crate::NodeRng, - ) -> Result<(Self, crate::effect::Effects), Self::Error> { - let mut all_effects = crate::effect::Effects::new(); - - let effect_builder = crate::effect::EffectBuilder::new(event_queue); - - // Instantiate each component. - #(#component_instantiations)* - - // Assign component fields during reactor construction. - let reactor = #reactor_ident { - #(#component_fields,)* - }; - - // To avoid unused warnings. - let _ = effect_builder; - - Ok((reactor, all_effects)) - } - - fn maybe_exit(&self) -> Option { None } - } - ) -} diff --git a/node_macros/src/lib.rs b/node_macros/src/lib.rs deleted file mode 100644 index 40d999cee9..0000000000 --- a/node_macros/src/lib.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! Generates reactors with routing from concise definitions. See `README.md` for details. - -#![doc(html_root_url = "https://docs.rs/casper-node-macros/1.0.0")] -#![doc( - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) -)] -#![warn(missing_docs, trivial_casts, trivial_numeric_casts)] - -mod gen; -mod parse; -mod rust_type; -mod util; - -use proc_macro::TokenStream; -use syn::parse_macro_input; - -use parse::ReactorDefinition; - -/// Generates a new reactor implementation, along with types. -#[proc_macro] -pub fn reactor(input: TokenStream) -> TokenStream { - let mut def = parse_macro_input!(input as ReactorDefinition); - - // Insert the control announcements. - def.inject_control_announcements(); - - let mut output: proc_macro2::TokenStream = Default::default(); - - output.extend(gen::generate_reactor(&def)); - output.extend(gen::generate_reactor_types(&def)); - output.extend(gen::generate_reactor_impl(&def)); - - output.into() -} diff --git a/node_macros/src/parse.rs b/node_macros/src/parse.rs deleted file mode 100644 index d4699be079..0000000000 --- a/node_macros/src/parse.rs +++ /dev/null @@ -1,542 +0,0 @@ -//! Parser for reactor macro. -//! -//! Contains the `Parse` implementations for the intermediate representation of the macro, which is -//! `ReactorDefinition`. Many functions required by the code generator are also included here as -//! methods in this representation. - -use std::{ - convert::{TryFrom, TryInto}, - fmt::{self, Debug, Formatter}, -}; - -use indexmap::{IndexMap, IndexSet}; -use inflector::cases::pascalcase::to_pascal_case; -use quote::quote; -use syn::{ - braced, bracketed, parenthesized, - parse::{Parse, ParseStream, Result}, - punctuated::Punctuated, - Expr, Ident, ItemType, Path, Token, Type, -}; - -use crate::{rust_type::RustType, util::to_ident}; -use proc_macro2::TokenStream; - -#[derive(Debug)] -pub(crate) struct ReactorDefinition { - /// Identifier of the reactor type. - /// - /// Example: `ExampleReactor`. - reactor_type_ident: Ident, - - /// Reactor's associated configuration type. - /// - /// A full type that will later be the `Reactor::Config` associated type. - config_type: RustType, - - /// Mapping of component attribute names to their types. - /// - /// Example: "net" maps to `crate::components::small_net::SmallNet`. - components: IndexMap, - - /// Overrides for events of components. - /// - /// Example: "net" may have an event type that differs from - /// `crate::components::small_net::Event`. - events: IndexMap, - - /// List of request routing directives. - requests: Vec, - - /// List of announcement routing directives. - announcements: Vec, -} - -impl ReactorDefinition { - /// Returns the reactor's type's identifier (e.g. `ExampleReactor`). - pub fn reactor_ident(&self) -> Ident { - self.reactor_type_ident.clone() - } - - /// Returns the reactor's associated event type's identifier (e.g. `ExampleReactorEvent`). - pub fn event_ident(&self) -> Ident { - let mut event_str = self.reactor_ident().to_string(); - event_str.push_str("Event"); - to_ident(&event_str) - } - - /// Returns the reactor's associated error type's identifier (e.g. `ExampleReactorError`). - pub fn error_ident(&self) -> Ident { - let mut event_str = self.reactor_ident().to_string(); - event_str.push_str("Error"); - to_ident(&event_str) - } - - /// Returns an iterator over all announcement mappings. - pub fn announcements(&self) -> impl Iterator { - self.announcements.iter() - } - - /// Returns an iterator over all component definitions. - pub fn components(&self) -> impl Iterator { - self.components.values() - } - - /// Returns the configuration type. - pub fn config_type(&self) -> &RustType { - &self.config_type - } - - /// Returns an iterator over all request mappings. - pub fn requests(&self) -> impl Iterator { - self.requests.iter() - } - - /// Returns the a full component by ident. - pub fn component(&self, ident: &Ident) -> &ComponentDefinition { - &self.components[ident] - } - - /// Returns the type for the event associated with a specific component. - pub fn component_event(&self, component: &ComponentDefinition) -> TokenStream { - let component_type = component.component_type(); - let module_ident = component_type.module_ident(); - - let event_ident = if let Some(event_def) = self.events.get(component.field_ident()) { - let path = event_def.event_type.as_given(); - quote!(#path) - } else { - let ident = to_ident("Event"); - quote!(#ident) - }; - - quote!(crate::components::#module_ident::#event_ident) - } - - /// Update a parsed reactor to include control announcements. - pub fn inject_control_announcements(&mut self) { - // For now, we allow no manual control announcements implementation. - let ty: Type = syn::parse_str("crate::effect::announcements::ControlAnnouncement") - .expect("Hardcoded ControlAnnouncement type could not be parsed"); - self.announcements.push(AnnouncementDefinition { - announcement_type: ty - .try_into() - .expect("could not convert hardcoded `ControlAnnouncement` to `RustType`"), - targets: vec![Target::Panic], - }) - } -} - -impl Parse for ReactorDefinition { - fn parse(input: ParseStream) -> Result { - let content; - // formerly `name` - let reactor_type_ident: Ident = input.parse()?; - - // Outer and config type. - braced!(content in input); - let config: ItemType = content.parse()?; - - // Components. - let component_content; - let _: kw::components = content.parse()?; - let _: Token!(:) = content.parse()?; - braced!(component_content in content); - - let mut components = IndexMap::new(); - for cdef in component_content - .parse_terminated::(ComponentDefinition::parse)? - { - components.insert(cdef.name.clone(), cdef); - } - - // Event (-overrides) - let event_content; - let _: kw::events = content.parse()?; - let _: Token!(:) = content.parse()?; - braced!(event_content in content); - - let mut events = IndexMap::new(); - for edef in - event_content.parse_terminated::(EventDefinition::parse)? - { - events.insert(edef.name.clone(), edef); - } - - // Requests. - let requests_content; - let _: kw::requests = content.parse()?; - let _: Token!(:) = content.parse()?; - braced!(requests_content in content); - - let requests: Vec<_> = requests_content - .parse_terminated::(RequestDefinition::parse)? - .into_iter() - .collect(); - - // Announcements. - let announcements_content; - let _: kw::announcements = content.parse()?; - let _: Token!(:) = content.parse()?; - braced!(announcements_content in content); - let announcements: Vec<_> = announcements_content - .parse_terminated::(AnnouncementDefinition::parse)? - .into_iter() - .collect(); - - // We can now perform some rudimentary checks. Component keys are converted to strings, so - // rid them of their span information. - let component_keys: IndexSet<_> = - components.keys().map(|ident| ident.to_string()).collect(); - - // Ensure that the `events` section does not point to non-existing components. - let events_keys: IndexSet<_> = events.keys().collect(); - - // We cannot use the `difference` function, because equal idents compare different based on - // their span. - for key in &events_keys { - if !component_keys.contains(&key.to_string()) { - return Err(syn::Error::new_spanned( - key, - format!("An event entry points to a non-existing component: {}", key), - )); - } - } - - // Ensure that requests are not routed to non-existing events. - let request_target_keys: IndexSet<_> = requests - .iter() - .filter_map(|req| req.target.as_dest()) - .collect(); - - for key in &request_target_keys { - if !component_keys.contains(&key.to_string()) { - return Err(syn::Error::new_spanned( - key, - format!("An request route to a non-existing component: {}", key), - )); - } - } - - // Ensure that requests are not routed to non-existing events. - let announce_target_keys: IndexSet<_> = announcements - .iter() - .map(|ann| ann.targets.iter()) - .flatten() - .filter_map(Target::as_dest) - .collect(); - - for key in &announce_target_keys { - if !component_keys.contains(&key.to_string()) { - return Err(syn::Error::new_spanned( - key, - format!("An announcement route to a non-existing component: {}", key), - )); - } - } - - Ok(ReactorDefinition { - reactor_type_ident, - config_type: RustType::try_from(config.ty.as_ref().clone()) - .map_err(|err| syn::parse::Error::new_spanned(config.ty, err))?, - components, - events, - requests, - announcements, - }) - } -} - -/// A definition of a component. -pub(crate) struct ComponentDefinition { - /// The attribute-style name of the component, e.g. `net`. - name: Ident, - /// The components type. - component_type: RustType, - /// Arguments passed to the components `new` constructor when constructing. - component_arguments: Vec, - /// Whether or not the component has actual effects when constructed. - has_effects: bool, - /// Whether or not the component's `new` function returns a component instead of a `Result`. - is_infallible: bool, -} - -impl ComponentDefinition { - /// Returns the component construction arguments. - pub(crate) fn component_arguments(&self) -> &[Expr] { - self.component_arguments.as_slice() - } - - /// Returns an ident identifying the component that is suitable for a struct field, e.g. `net`. - pub(crate) fn field_ident(&self) -> &Ident { - &self.name - } - - /// Returns an ident identifying the component that is suitable for a variant, e.g. `Net`. - pub fn variant_ident(&self) -> Ident { - to_ident(&to_pascal_case(&self.field_ident().to_string())) - } - - /// Returns the type of the component. - pub(crate) fn component_type(&self) -> &RustType { - &self.component_type - } - - /// Returns the full path for a component by prefixing it with `crate::components::`, e.g. - /// `crate::components::small_net::SmallNet` - pub fn full_component_type(&self) -> TokenStream { - let component_type = self.component_type(); - let module_ident = component_type.module_ident(); - let ty = component_type.ty(); - quote!(crate::components::#module_ident::#ty) - } - - /// Returns the full path for a component's event e.g. `crate::components::small_net::Error` - pub fn full_error_type(&self, reactor_event_type: TokenStream) -> TokenStream { - let comp_type = self.full_component_type(); - quote!(<#comp_type as crate::components::Component<#reactor_event_type>>::ConstructionError) - } - - /// Returns whether or not the component returns effects upon instantiation. - pub fn has_effects(&self) -> bool { - self.has_effects - } - - /// Returns whether the component always returns a component directly instead of a `Result`. - pub fn is_infallible(&self) -> bool { - self.is_infallible - } -} - -impl Debug for ComponentDefinition { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("ComponentDefinition") - .field("name", &self.name.to_string()) - .field("component_type", &self.component_type) - .field("component_arguments", &"TODO: fmtargs") - .finish() - } -} - -impl Parse for ComponentDefinition { - fn parse(input: ParseStream) -> Result { - // Parse left hand side and type def. - let name: Ident = input.parse()?; - let _: Token!(=) = input.parse()?; - - let has_effects = if input.peek(kw::has_effects) { - let _: kw::has_effects = input.parse()?; - true - } else { - false - }; - - let is_infallible = if input.peek(kw::infallible) { - let _: kw::infallible = input.parse()?; - true - } else { - false - }; - - let ty: Path = input.parse()?; - - // Parse arguments - let content; - parenthesized!(content in input); - - let args: Punctuated = content.parse_terminated(Expr::parse)?; - Ok(ComponentDefinition { - name, - component_type: RustType::new(ty), - component_arguments: args.into_iter().collect(), - has_effects, - is_infallible, - }) - } -} - -/// An event-definition -/// -/// Typically only used to override tricky event definitions. -#[derive(Debug)] -pub(crate) struct EventDefinition { - /// Identifier of the components. - pub name: Ident, - /// Event type to use. - pub event_type: RustType, -} - -impl Parse for EventDefinition { - fn parse(input: ParseStream) -> Result { - // Parse left hand side and type def. - let name: Ident = input.parse()?; - let _: Token!(=) = input.parse()?; - let ty: Path = input.parse()?; - - Ok(EventDefinition { - name, - event_type: RustType::new(ty), - }) - } -} - -#[derive(Debug)] -/// A definition of a request routing. -pub(crate) struct RequestDefinition { - pub request_type: RustType, - pub target: Target, -} - -impl RequestDefinition { - /// Returns an ident identifying the request that is suitable for a variant, e.g. - /// `NetworkRequest`. - pub fn variant_ident(&self) -> Ident { - self.request_type.ident() - } - - /// Returns the type of the request. - pub(crate) fn request_type(&self) -> &RustType { - &self.request_type - } - - /// Returns the target of the request. - pub(crate) fn target(&self) -> &Target { - &self.target - } - - /// Returns the full path for a request. - pub fn full_request_type(&self) -> TokenStream { - let request_type = self.request_type(); - let ty = request_type.ty(); - quote!(crate::effect::requests::#ty) - } -} - -impl Parse for RequestDefinition { - fn parse(input: ParseStream) -> Result { - let request_type = RustType::new(input.parse()?); - let _: Token!(->) = input.parse()?; - - let target = input.parse()?; - - Ok(RequestDefinition { - request_type, - target, - }) - } -} - -#[derive(Debug)] -/// A definition of an announcement. -pub(crate) struct AnnouncementDefinition { - pub announcement_type: RustType, - pub targets: Vec, -} - -impl AnnouncementDefinition { - /// Returns the type of the announcement. - pub(crate) fn announcement_type(&self) -> &RustType { - &self.announcement_type - } - - /// Returns the full path for an announcement. - pub(crate) fn full_announcement_type(&self) -> TokenStream { - let announcement_type = self.announcement_type(); - let ty = announcement_type.ty(); - quote!(crate::effect::announcements::#ty) - } - - /// Returns an iterator over the targets of the announcement. - pub(crate) fn targets(&self) -> impl Iterator { - self.targets.iter() - } - - /// Returns an ident identifying the announcement that is suitable for a variant, e.g. - /// `NetworkAnnouncement`. - pub fn variant_ident(&self) -> Ident { - self.announcement_type.ident() - } -} - -impl Parse for AnnouncementDefinition { - fn parse(input: ParseStream) -> Result { - let announcement_type = RustType::new(input.parse()?); - let _: Token!(->) = input.parse()?; - - let content; - bracketed!(content in input); - let targets = content - .parse_terminated::(Target::parse)? - .into_iter() - .collect(); - - Ok(AnnouncementDefinition { - announcement_type, - targets, - }) - } -} - -/// A routing target. -pub(crate) enum Target { - /// Discard whatever is being routed. - Discard, - /// When anything is routed to this target, panic. - Panic, - /// Forward to destination. - Dest(Ident), - /// Dispatch using a method. - Dispatch(Ident), -} - -impl Target { - /// Returns a reference to the destination identifier if the target is a destination, or `None`. - fn as_dest(&self) -> Option<&Ident> { - match self { - Target::Discard | Target::Panic | Target::Dispatch(_) => None, - Target::Dest(ident) => Some(ident), - } - } -} - -impl Debug for Target { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Target::Discard => write!(f, "#"), - Target::Panic => write!(f, "!"), - Target::Dest(id) => write!(f, "{}", id.to_string()), - Target::Dispatch(id) => write!(f, "{}()", id.to_string()), - } - } -} - -impl Parse for Target { - fn parse(input: ParseStream) -> Result { - if input.peek(Token!(!)) { - let _: Token!(!) = input.parse()?; - Ok(Target::Panic) - } else if input.peek(Token!(#)) { - let _: Token!(#) = input.parse()?; - Ok(Target::Discard) - } else if input.peek(Token!(fn)) { - let _: Token!(fn) = input.parse()?; - let dispatch = input.parse()?; - - Ok(Target::Dispatch(dispatch)) - } else { - input.parse().map(Target::Dest) - } - } -} - -/// Custom keywords. -/// -/// This module groups custom keywords used by the parser. -mod kw { - syn::custom_keyword!(components); - syn::custom_keyword!(events); - syn::custom_keyword!(requests); - syn::custom_keyword!(announcements); - syn::custom_keyword!(infallible); - syn::custom_keyword!(has_effects); -} diff --git a/node_macros/src/rust_type.rs b/node_macros/src/rust_type.rs deleted file mode 100644 index 0897e2ca5d..0000000000 --- a/node_macros/src/rust_type.rs +++ /dev/null @@ -1,76 +0,0 @@ -//! A full (but unchecked) Rust type, along with convenience methods. - -use fmt::Debug; -use std::{ - convert::TryFrom, - fmt::{self, Formatter}, -}; - -use inflector::cases::snakecase::to_snake_case; -use proc_macro2::TokenStream; -use quote::quote; -use syn::{Ident, Path, Type}; - -use crate::util::to_ident; - -/// A fully pathed Rust type with type arguments, e.g. `crate::components::SmallNet`. -pub(crate) struct RustType(Path); - -impl Debug for RustType { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - let path = &self.0; - write!(f, "{}", quote!(#path).to_string()) - } -} - -impl RustType { - /// Creates a new `RustType` from a path. - pub fn new(path: Path) -> Self { - RustType(path) - } - - /// Returns the types identifier without type arguments, e.g. `SmallNet`. - pub fn ident(&self) -> Ident { - self.0 - .segments - .last() - .expect("type has no last part?") - .ident - .clone() - } - - /// Returns the type without the path, but with type arguments, e.g. `SmallNet`. - pub fn ty(&self) -> TokenStream { - let ident = self.ident(); - let args = &self - .0 - .segments - .last() - .expect("type has no last part?") - .arguments; - quote!(#ident #args) - } - - /// Returns the full type as it was given in the macro call. - pub fn as_given(&self) -> &Path { - &self.0 - } - - /// Returns the module name that canonically would contain the type, e.g. `small_net`. - /// - /// Based on the identifier only, i.e. will discard any actualy path. - pub fn module_ident(&self) -> Ident { - to_ident(&to_snake_case(&self.ident().to_string())) - } -} - -impl TryFrom for RustType { - type Error = String; - - fn try_from(value: Type) -> core::result::Result { - match value { - Type::Path(type_path) => Ok(RustType(type_path.path)), - broken => Err(format!("cannot convert input {:?} to RustType", broken)), - } - } -} diff --git a/node_macros/src/util.rs b/node_macros/src/util.rs deleted file mode 100644 index 9b855e03dc..0000000000 --- a/node_macros/src/util.rs +++ /dev/null @@ -1,11 +0,0 @@ -use proc_macro2::Span; -use syn::Ident; - -pub(crate) fn to_ident(s: &str) -> Ident { - Ident::new(s, Span::call_site()) -} - -/// Returns an ident with a suffix. -pub(crate) fn suffix_ident(ident: &Ident, suffix: &str) -> Ident { - to_ident(&format!("{}{}", ident.to_string(), suffix)) -} diff --git a/resources/dev-net/chainspec.toml b/resources/dev-net/chainspec.toml new file mode 100644 index 0000000000..b7e547fa11 --- /dev/null +++ b/resources/dev-net/chainspec.toml @@ -0,0 +1,505 @@ +[protocol] +# Protocol version. +version = '2.0.4' +# Whether we need to clear latest blocks back to the switch block just before the activation point or not. +hard_reset = true +# This protocol version becomes active at this point. +# +# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By +# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up +# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used +# in contract-runtime for computing genesis post-state hash. +# +# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. +activation_point = 16444 + +[network] +# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by +# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis +# post-state hash. +name = 'dev-net' +# The maximum size of an acceptable networking message in bytes. Any message larger than this will +# be rejected at the networking level. +maximum_net_message_size = 25_165_824 + +[core] +# Era duration. +era_duration = '120 minutes' +# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the +# minimum height. +minimum_era_height = 20 +# Minimum difference between a block's and its child's timestamp. +minimum_block_time = '16384 ms' +# Number of slots available in validator auction. +validator_slots = 100 +# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. +# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as +# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize +# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. +finality_threshold_fraction = [1, 3] +# Protocol version from which nodes are required to hold strict finality signatures. +start_protocol_version_with_strict_finality_signatures_required = '1.5.0' +# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'. +# Used to determine finality sufficiency for new joiners syncing blocks created +# in a protocol version before +# `start_protocol_version_with_strict_finality_signatures_required`. +legacy_required_finality = 'Strict' +# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, +# you will be a validator in era N + auction_delay + 1. +auction_delay = 1 +# The period after genesis during which a genesis validator's bid is locked. +locked_funds_period = '0 days' +# The period in which genesis validator's bid is released over time after it's unlocked. +vesting_schedule_period = '0 weeks' +# Default number of eras that need to pass to be able to withdraw unbonded funds. +unbonding_delay = 7 +# Round seigniorage rate represented as a fraction of the total supply. +# +# Annual issuance: 8% +# Minimum block time: 2^14 milliseconds +# Ticks per year: 31536000000 +# +# (1+0.08)^((2^14)/31536000000)-1 is expressed as a fractional number below +# Python: +# from fractions import Fraction +# Fraction((1 + 0.08)**((2**14)/31536000000) - 1).limit_denominator(1000000000) +round_seigniorage_rate = [7, 175070816] +# Maximum number of associated keys for a single account. +max_associated_keys = 100 +# Maximum height of contract runtime call stack. +max_runtime_call_stack_height = 12 +# Minimum allowed delegation amount in motes +minimum_delegation_amount = 500_000_000_000 +# Maximum allowed delegation amount in motes +maximum_delegation_amount = 1_000_000_000_000_000_000 +# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than +# the value specified will be treated as a full unbond of a validator and their associated delegators +minimum_bid_amount = 10_000_000_000_000 +# Global state prune batch size (0 = this feature is off) +prune_batch_size = 0 +# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +strict_argument_checking = false +# Number of simultaneous peer requests. +simultaneous_peer_requests = 5 +# The consensus protocol to use. Options are "Zug" and "Highway". +consensus_protocol = 'Zug' +# The maximum amount of delegators per validator. +max_delegators_per_validator = 1200 +# The split in finality signature rewards between block producer and participating signers. +finders_fee = [1, 5] +# The proportion of baseline rewards going to reward finality signatures specifically. +finality_signature_proportion = [95, 100] +# Lookback interval indicating which past block we are looking at to reward. +signature_rewards_max_delay = 3 +# Allows transfers between accounts in the blockchain network. +# +# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators. +# Changing this option makes sense only on private chains. +allow_unrestricted_transfers = true +# Enables the auction entry points 'delegate' and 'add_bid'. +# +# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These +# auction entry points will return an error if called when this option is set to false. +allow_auction_bids = true +# If set to false, then consensus doesn't compute rewards and always uses 0. +compute_rewards = true +# Defines how refunds of the unused portion of payment amounts are calculated and handled. +# +# Valid options are: +# 'refund': a ratio of the unspent token is returned to the spender. +# 'burn': a ratio of the unspent token is burned. +# 'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio. +# This causes excess payment amounts to be sent to either a +# pre-defined purse, or back to the sender. The refunded amount is calculated as the given ratio of the payment amount +# minus the execution costs. +refund_handling = { type = 'refund', refund_ratio = [75, 100] } +# Defines how fees are handled. +# +# Valid options are: +# 'no_fee': fees are eliminated. +# 'pay_to_proposer': fees are paid to the block proposer +# 'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all +# administrator accounts +# 'burn': fees are burned +fee_handling = { type = 'pay_to_proposer' } +# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake. +validator_credit_cap = [1, 5] +# Defines how pricing is handled. +# +# Valid options are: +# 'payment_limited': senders of transaction self-specify how much they pay. +# 'fixed': costs are fixed, per the cost table +# 'prepaid': prepaid transaction (currently not supported) +pricing_handling = { type = 'payment_limited' } +# Does the network allow pre-payment for future +# execution? Currently not supported. +# +allow_prepaid = false +# Defines how gas holds affect available balance calculations. +# +# Valid options are: +# 'accrued': sum of full value of all non-expired holds. +# 'amortized': sum of each hold is amortized over the time remaining until expiry. +# +# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`, +# with accrued, the full hold amount would be applied +# with amortized, half the hold amount would be applied +gas_hold_balance_handling = { type = 'accrued' } +# Defines how long gas holds last. +# +# If fee_handling is set to 'no_fee', the system places a balance hold on the payer +# equal to the value the fee would have been. Such balance holds expire after a time +# interval has elapsed. This setting controls how long that interval is. The available +# balance of a purse equals its total balance minus the held amount(s) of non-expired +# holds (see gas_hold_balance_handling setting for details of how that is calculated). +# +# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse, +# a hold for 100 is placed on that purse and is considered when calculating total balance +# for 24 hours starting from the block_time when the hold was placed. +gas_hold_interval = '24 hours' +# List of public keys of administrator accounts. Setting this option makes only on private chains which require +# administrator accounts for regulatory reasons. +administrators = [] +# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable +# entity in lazy manner. +# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade; +# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage +# will be written underneath Key::Hash. +# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account +# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated +# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten +# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top +# level records +# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade +# the flag cannot be disabled in a future protocol upgrade. +enable_addressable_entity = false +# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount. +baseline_motes_amount = 2_500_000_000 +# Flag on whether ambiguous entity versions returns an execution error. +trap_on_ambiguous_entity_version = false + +[highway] +# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. +maximum_round_length = '66 seconds' + +[transactions] +# The duration after the transaction timestamp that it can be included in a block. +max_ttl = '2 hours' +# The maximum number of approvals permitted in a single block. +block_max_approval_count = 2600 +# Maximum block size in bytes including transactions contained by the block. 0 means unlimited. +max_block_size = 5_242_880 +# The upper limit of total gas of all transactions in a block. +block_gas_limit = 1_625_000_000_000 +# The minimum amount in motes for a valid native transfer. +native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' + +# Configuration of the transaction runtime. +[transactions.enabled_runtime] +vm_casper_v1 = true +vm_casper_v2 = true + +[transactions.v1] +# The configuration settings for the lanes of transactions including both native and Wasm based interactions. +# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1 +# respectively +# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction +# within a given lane. +# The maximum length in bytes of runtime args per V1 transaction. +# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels) +# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and +# the lane must be present and defined. +# Different casper networks may not impose such a restriction. +# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane +# [2] -> Max args length size in bytes for a given transaction in a certain lane +# [3] -> Transaction gas limit for a given transaction in a certain lane +# [4] -> The maximum number of transactions the lane can contain +native_mint_lane = [0, 2048, 1024, 100_000_000, 650] +native_auction_lane = [1, 3096, 2048, 2_500_000_000, 650] +install_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1] +wasm_lanes = [ + [3, 750_000, 2048, 1_000_000_000_000, 1], + [4, 131_072, 1024, 100_000_000_000, 2], + [5, 65_536, 512, 5_000_000_000, 80] +] + +[transactions.deploy] +# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. +max_payment_cost = '0' +# The limit of length of serialized payment code arguments. +payment_args_max_length = 1024 +# The limit of length of serialized session code arguments. +session_args_max_length = 1024 + +[wasm.v1] +# Amount of free memory (in 64kB pages) each contract can use for stack. +max_memory = 64 +# Max stack height (native WebAssembly stack limiter). +max_stack_height = 500 + +[storage_costs] +# Gas charged per byte stored in the global state. +gas_per_byte = 1_117_587 + +# For each opcode cost below there exists a static cost and a dynamic cost. +# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks. +[wasm.v1.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v1.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v1.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs +[wasm.v1.host_function_costs] +add = { cost = 5_800, arguments = [0, 0, 0, 0] } +add_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] } +add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] } +add_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +add_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +blake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] } +call_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] } +call_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } +create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } +create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +create_purse = { cost = 2_500_000_000, arguments = [0, 0] } +disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +get_balance = { cost = 3_000_000, arguments = [0, 0, 0] } +get_blocktime = { cost = 330, arguments = [0] } +get_caller = { cost = 380, arguments = [0] } +get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } +get_main_purse = { cost = 1_300, arguments = [0] } +get_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] } +get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } +get_phase = { cost = 710, arguments = [0] } +get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } +has_key = { cost = 1_500, arguments = [0, 840] } +is_valid_uref = { cost = 760, arguments = [0, 0] } +load_named_keys = { cost = 42_000, arguments = [0, 0] } +new_uref = { cost = 17_000, arguments = [0, 0, 590] } +random_bytes = { cost = 200, arguments = [0, 0] } +print = { cost = 20_000, arguments = [0, 4_600] } +provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } +put_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] } +read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } +read_value = { cost = 60_000, arguments = [0, 120_000, 0] } +dictionary_get = { cost = 5_500, arguments = [0, 590, 0] } +remove_associated_key = { cost = 4_200, arguments = [0, 0] } +remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } +remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] } +remove_key = { cost = 61_000, arguments = [0, 3_200] } +ret = { cost = 23_000, arguments = [0, 420_000] } +revert = { cost = 500, arguments = [0] } +set_action_threshold = { cost = 74_000, arguments = [0, 0] } +transfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +transfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] } +update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } +write = { cost = 14_000, arguments = [0, 0, 0, 980] } +dictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +enable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +manage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] } +emit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] } +generic_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] } +cost_increase_per_message = 50 +get_block_info = { cost = 330, arguments = [0, 0] } +recover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +verify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +call_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } + +[wasm.v2] +# Amount of free memory each contract can use for stack. +max_memory = 17 + +[wasm.v2.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v2.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v2.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +[wasm.v2.host_function_costs] +read = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +write = { cost = 0, arguments = [0, 0, 0, 0, 0] } +remove = { cost = 0, arguments = [0, 0, 0] } +copy_input = { cost = 0, arguments = [0, 0] } +ret = { cost = 0, arguments = [0, 0] } +create = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer = { cost = 0, arguments = [0, 0, 0] } +env_balance = { cost = 0, arguments = [0, 0, 0, 0] } +upgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +call = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +print = { cost = 0, arguments = [0, 0] } +emit = { cost = 0, arguments = [0, 0, 0, 0] } +env_info = { cost = 0, arguments = [0, 0] } + +[wasm.messages_limits] +max_topic_name_size = 256 +max_topics_per_contract = 128 +max_message_size = 1_024 + +[system_costs] +# Penalty charge for calling invalid entry point in a system contract. +no_such_entrypoint = 2_500_000_000 + +[system_costs.auction_costs] +get_era_validators = 2_500_000_000 +read_seigniorage_recipients = 5_000_000_000 +add_bid = 2_500_000_000 +withdraw_bid = 2_500_000_000 +delegate = 2_500_000_000 +undelegate = 2_500_000_000 +run_auction = 2_500_000_000 +slash = 2_500_000_000 +distribute = 2_500_000_000 +withdraw_delegator_reward = 5_000_000_000 +withdraw_validator_reward = 5_000_000_000 +read_era_id = 2_500_000_000 +activate_bid = 2_500_000_000 +redelegate = 2_500_000_000 +change_bid_public_key = 5_000_000_000 +add_reservations = 2_500_000_000 +cancel_reservations = 2_500_000_000 + +[system_costs.mint_costs] +mint = 2_500_000_000 +reduce_total_supply = 2_500_000_000 +create = 2_500_000_000 +balance = 100_000_000 +burn = 100_000_000 +transfer = 100_000_000 +read_base_round_reward = 2_500_000_000 +mint_into_existing_purse = 2_500_000_000 + +[system_costs.handle_payment_costs] +get_payment_purse = 10_000 +set_refund_purse = 10_000 +get_refund_purse = 10_000 +finalize_payment = 2_500_000_000 + +[system_costs.standard_payment_costs] +pay = 10_000 + +[vacancy] +# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network. +# +# The network starts with a current_gas_price of min_gas_price. +# +# Each block has multiple limits (bytes, transactions, transfers, gas, etc.) +# The utilization for a block is determined by the highest percentage utilization of each these limits. +# +# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here) +# 19 transactons -> 19/20 or 95% +# 600 transfers -> 600/650 or 92.3% +# resulting block utilization is 95 +# +# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is +# adjusted with the following: +# +# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price. +# If utilization falls between the thresholds, current_gas_price is not changed. +# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price. +# +# The cost charged for the transaction is simply the gas_used * current_gas_price. +upper_threshold = 90 +lower_threshold = 50 +max_gas_price = 1 +min_gas_price = 1 diff --git a/resources/dev-net/config-example.toml b/resources/dev-net/config-example.toml new file mode 100644 index 0000000000..6b8cb67a36 --- /dev/null +++ b/resources/dev-net/config-example.toml @@ -0,0 +1,651 @@ +# ================================ +# Configuration options for a node +# ================================ +[node] + +# If set, use this hash as a trust anchor when joining an existing network. +#trusted_hash = 'HEX-FORMATTED BLOCK HASH' + +# Historical sync behavior for this node. Options are: +# 'ttl' (node will attempt to acquire all block data to comply with time to live enforcement) +# 'genesis' (node will attempt to acquire all block data back to genesis) +# 'nosync' (node will only acquire blocks moving forward) +# 'isolated' (node will initialize without peers and will not accept peers) +# 'completeblock' (node will acquire complete block and shutdown) +# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`. +# it is recommended for dedicated validator nodes to be in ttl mode to increase +# their ability to maintain maximal uptime...if a long-running genesis validator +# goes offline and comes back up while in genesis mode, it must backfill +# any gaps in its block awareness before resuming validation. +# +# it is recommended for reporting non-validator nodes to be in genesis mode to +# enable support for queries at any block height. +# +# it is recommended for non-validator working nodes (for dapp support, etc) to run in +# ttl or nosync mode (depending upon their specific data requirements). +# +# thus for instance a node backing a block explorer would prefer genesis mode, +# while a node backing a dapp interested in very recent activity would prefer to run in nosync mode, +# and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode. +# note: as time goes on, the time to sync back to genesis takes progressively longer. +# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting +# (it is currently ~18 hours by default on production and production-like networks but subject to change). +# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating +# in consensus / switching to validate mode. it is primarily for lightweight nodes that are +# only interested in recent activity. +# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to +# binary port, rest server, event server, and diagnostic port connections. +sync_handling = 'genesis' + +# Idle time after which the syncing process is considered stalled. +idle_tolerance = '20 minutes' + +# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. +max_attempts = 3 + +# Default delay for the control events that have no dedicated delay requirements. +control_logic_default_delay = '1 second' + +# Flag which forces the node to resync all of the blocks. +force_resync = false + +# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all +# conditions are satisfied. +shutdown_for_upgrade_timeout = '2 minutes' + +# Maximum time a node will wait for an upgrade to commit. +upgrade_timeout = '3 hours' + +# The node detects when it should do a controlled shutdown when it is in a detectably bad state +# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be +# allowed to shutdown, and if restarted that node will generally recover gracefully and resume +# normal operation. However, actively validating nodes have subjective state in memory that is +# lost on shutdown / restart and must be reacquired from other validating nodes on restart. +# If all validating nodes shutdown in the middle of an era, social consensus is required to restart +# the network. As a mitigation for that, the following config can be set to true on some validator +# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled +# shutdown events and stay up. This allows them to act as sentinels for the consensus data for +# other restarting nodes. This config is inert on non-validating nodes. +prevent_validator_shutdown = false + +# ================================= +# Configuration options for logging +# ================================= +[logging] + +# Output format. Possible values are 'text' or 'json'. +format = 'json' + +# Colored output. Has no effect if format = 'json'. +color = false + +# Abbreviate module names in text output. Has no effect if format = 'json'. +abbreviate_modules = false + + +# =================================== +# Configuration options for consensus +# =================================== +[consensus] + +# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign +# consensus messages. +secret_key_path = '/etc/casper/validator_keys/secret_key.pem' + +# The maximum number of blocks by which execution is allowed to lag behind finalization. +# If it is more than that, consensus will pause, and resume once the executor has caught up. +max_execution_delay = 3 + + +# ======================================= +# Configuration options for Zug consensus +# ======================================= +[consensus.zug] + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '1 second' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of +# echo messages, before they vote to make the round skippable and move on to the next proposer. +proposal_timeout = '10 seconds' + +# The additional proposal delay that is still considered fast enough, in percent. This should +# take into account variables like empty vs. full blocks, network traffic etc. +# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one +# while idle this should be at least 50, meaning that the timeout is 50% longer than +# necessary for a quorum of recent proposals, approximately. +proposal_grace_period = 200 + +# The average number of rounds after which the proposal timeout adapts by a factor of 2. +# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. +proposal_timeout_inertia = 10 + +# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp +# lies in the future by more than that are rejected. +clock_tolerance = '1 second' + + +# =========================================== +# Configuration options for Highway consensus +# =========================================== +[consensus.highway] + +# The duration for which incoming vertices with missing dependencies should be kept in a queue. +pending_vertex_timeout = '30 minutes' + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# Log the synchronizer state periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' + +# Log the size of every incoming and outgoing serialized unit. +log_unit_sizes = false + +# The maximum number of peers we request the same vertex from in parallel. +max_requests_for_vertex = 5 + +# The maximum number of dependencies we request per validator in a batch. +# Limits requests per validator in panorama - in order to get a total number of +# requests, multiply by # of validators. +max_request_batch_size = 20 + +[consensus.highway.round_success_meter] +# The number of most recent rounds we will be keeping track of. +num_rounds_to_consider = 40 + +# The number of successful rounds that triggers us to slow down: With this many or fewer +# successes per `num_rounds_to_consider`, we increase our round length. +num_rounds_slowdown = 10 + +# The number of successful rounds that triggers us to speed up: With this many or more successes +# per `num_rounds_to_consider`, we decrease our round length. +num_rounds_speedup = 32 + +# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if +# we have few enough failures. +acceleration_parameter = 40 + +# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which +# we will use for looking for a summit in order to determine a proposal's finality. +# The required quorum in a summit we will look for to check if a round was successful is +# determined by this FTT. +acceleration_ftt = [1, 100] + + +# ==================================== +# Configuration options for networking +# ==================================== +[network] + +# The public address of the node. +# +# It must be publicly available in order to allow peers to connect to this node. +# If the port is set to 0, the actual bound port will be substituted. +public_address = ':0' + +# Address to bind to for listening. +# If port is set to 0, a random port will be used. +bind_address = '0.0.0.0:35000' + +# Addresses to connect to in order to join the network. +# +# If not set, this node will not be able to attempt to connect to the network. Instead it will +# depend upon peers connecting to it. This is normally only useful for the first node of the +# network. +# +# Multiple addresses can be given and the node will attempt to connect to each, requiring at least +# one connection. +known_addresses = ['52.90.123.125:35000','44.197.182.12:35000','3.84.211.31:35000','54.174.173.4:35000'] + +# Minimum number of fully-connected peers to consider network component initialized. +min_peers_for_initialization = 2 + +# The interval between each fresh round of gossiping the node's public address. +gossip_interval = '120 seconds' + +# Initial delay for starting address gossipping after the network starts. This should be slightly +# more than the expected time required for initial connections to complete. +initial_gossip_delay = '5 seconds' + +# How long a connection is allowed to be stuck as pending before it is abandoned. +max_addr_pending_time = '1 minute' + +# Maximum time allowed for a connection handshake between two nodes to be completed. Connections +# exceeding this threshold are considered unlikely to be healthy or even malicious and thus +# terminated. +handshake_timeout = '20 seconds' + +# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional +# connections will be rejected. A value of `0` means unlimited. +max_incoming_peer_connections = 3 + +# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers. +# A value of `0` means unlimited. +max_outgoing_byte_rate_non_validators = 6553600 + +# The maximum allowed total impact of requests from non-validating peers per second answered. +# A value of `0` means unlimited. +max_incoming_message_rate_non_validators = 3000 + +# Maximum number of requests for data from a single peer that are allowed be buffered. A value of +# `0` means unlimited. +max_in_flight_demands = 50 + +# Version threshold to enable tarpit for. +# +# When set to a version (the value may be `null` to disable the feature), any peer that reports a +# protocol version equal or below the threshold will be rejected only after holding open the +# connection for a specific (`tarpit_duration`) amount of time. +# +# This option makes most sense to enable on known nodes with addresses where legacy nodes that are +# still in operation are connecting to, as these older versions will only attempt to reconnect to +# other nodes once they have exhausted their set of known nodes. +tarpit_version_threshold = '1.2.1' + +# How long to hold connections to trapped legacy nodes. +tarpit_duration = '10 minutes' + +# The probability [0.0, 1.0] of this node trapping a legacy node. +# +# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a +# single known node to hold open a connection to prevent the node from reconnecting. This should be +# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of +# legacy nodes running this software. +tarpit_chance = 0.2 + +# Minimum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_min_duration = '2 minutes' + +# Maximum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_max_duration = '10 minutes' + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "node_cert.pem" +# secret_key = "node.pem" +# ca_certificate = "ca_cert.pem" + +# Weights for impact estimation of incoming messages, used in combination with +# `max_incoming_message_rate_non_validators`. +# +# Any weight set to 0 means that the category of traffic is exempt from throttling. +[network.estimator_weights] +consensus = 0 +block_gossip = 1 +transaction_gossip = 0 +finality_signature_gossip = 1 +address_gossip = 0 +finality_signature_broadcasts = 0 +transaction_requests = 1 +transaction_responses = 0 +legacy_deploy_requests = 1 +legacy_deploy_responses = 0 +block_requests = 1 +block_responses = 0 +block_header_requests = 1 +block_header_responses = 0 +trie_requests = 1 +trie_responses = 0 +finality_signature_requests = 1 +finality_signature_responses = 0 +sync_leap_requests = 1 +sync_leap_responses = 0 +approvals_hashes_requests = 1 +approvals_hashes_responses = 0 +execution_results_requests = 1 +execution_results_responses = 0 + +# ================================================== +# Configuration options for the BinaryPort server +# ================================================== +[binary_port_server] + +# Flag which enables the BinaryPort server. +enable_server = true + +# Listening address for BinaryPort server. +address = '0.0.0.0:7779' + +# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_all_values = false + +# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_trie = false + +# Flag that enables the `TrySpeculativeExec` request. Disabled by default. +allow_request_speculative_exec = false + +# Maximum size of a message in bytes. +max_message_size_bytes = 134_217_728 + +# Maximum number of connections to the server. +max_connections = 5 + +# The global max rate of requests (per second) before they are limited. +# The implementation uses a sliding window algorithm. +qps_limit = 110 + +# Initial time given to a connection before it expires +initial_connection_lifetime = '10 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +# [`Command::Get(GetRequest::Record)`] is sent to the node +get_record_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Information)`] is sent to the node +get_information_request_termination_delay = '5 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::State)`] is sent to the node +get_state_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Trie)`] is sent to the node +get_trie_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TryAcceptTransaction`] is sent to the node +accept_transaction_request_termination_delay = '24 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TrySpeculativeExec`] is sent to the node +speculative_exec_request_termination_delay = '0 seconds' + + +# ============================================== +# Configuration options for the REST HTTP server +# ============================================== +[rest_server] + +# Flag which enables the REST HTTP server. +enable_server = true + +# Listening address for REST HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the REST HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:8888' + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 100 + +# Specifies which origin will be reported as allowed by REST server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ========================================================== +# Configuration options for the SSE HTTP event stream server +# ========================================================== +[event_stream_server] + +# Flag which enables the SSE HTTP event stream server. +enable_server = true + +# Listening address for SSE HTTP event stream server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:9999' + +# The number of event stream events to buffer. +event_stream_buffer_length = 5000 + +# The maximum number of subscribers across all event streams the server will permit at any one time. +max_concurrent_subscribers = 100 + +# Specifies which origin will be reported as allowed by event stream server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# =============================================== +# Configuration options for the storage component +# =============================================== +[storage] + +# Path (absolute, or relative to this config.toml) to the folder where any files created +# or read by the storage component will exist. A subfolder named with the network name will be +# automatically created and used for the storage component files. +# +# If the folder doesn't exist, it and any required parents will be created. +# +# If unset, the path must be supplied as an argument via the CLI. +path = '/var/lib/casper/casper-node' + +# Maximum size of the database to use for the block store. +# +# The size should be a multiple of the OS page size. +# +# 483_183_820_800 == 450 GiB. +max_block_store_size = 483_183_820_800 + +# Maximum size of the database to use for the deploy store. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_store_size = 322_122_547_200 + +# Maximum size of the database to use for the deploy metadata. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_metadata_store_size = 322_122_547_200 + +# Maximum size of the database to use for the state snapshots. +# +# The size should be a multiple of the OS page size. +# +# 10_737_418_240 == 10 GiB. +max_state_store_size = 10_737_418_240 + +# Memory deduplication. +# +# If enabled, nodes will attempt to share loaded objects if possible. +enable_mem_deduplication = true + +# Memory duplication garbage collection. +# +# Sets the frequency how often the memory pool cache is swept for free references. +# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept. +mem_pool_prune_interval = 4096 + + +# =================================== +# Configuration options for gossiping +# =================================== +[gossip] + +# Target number of peers to infect with a given piece of data. +infection_target = 3 + +# The saturation limit as a percentage, with a maximum value of 99. Used as a termination +# condition. +# +# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't +# manage to newly infect 3 peers. We will stop gossiping once we know of more than 15 holders +# excluding us since 80% saturation would imply 3 new infections in 15 peers. +saturation_limit_percent = 80 + +# The maximum duration for which to keep finished entries. +# +# The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, +# the longer they are retained, the larger the list of finished entries can grow. +finished_entry_duration = '1 minute' + +# The timeout duration for a single gossip request, i.e. for a single gossip message +# sent from this node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +gossip_request_timeout = '30 seconds' + +# The timeout duration for retrieving the remaining part(s) of newly-discovered data +# from a peer which gossiped information about that data to this node. +get_remainder_timeout = '5 seconds' + +# The timeout duration for a newly-received, gossiped item to be validated and stored by another +# component before the gossiper abandons waiting to gossip the item onwards. +validate_and_store_timeout = '1 minute' + + +# =============================================== +# Configuration options for the block accumulator +# =============================================== +[block_accumulator] + +# Block height difference threshold for starting to execute the blocks. +attempt_execution_threshold = 3 + +# Accepted time interval for inactivity in block accumulator. +dead_air_interval = '3 minutes' + +# Time after which the block acceptors are considered old and can be purged. +purge_interval = '1 minute' + + +# ================================================ +# Configuration options for the block synchronizer +# ================================================ +[block_synchronizer] + +# Maximum number of fetch-trie tasks to run in parallel during block synchronization. +max_parallel_trie_fetches = 5000 + +# Time interval for the node to ask for refreshed peers. +peer_refresh_interval = '90 seconds' + +# Time interval for the node to check what the block synchronizer needs to acquire next. +need_next_interval = '1 second' + +# Time interval for recurring disconnection of dishonest peers. +disconnect_dishonest_peers_interval = '10 seconds' + +# Time interval for resetting the latch in block builders. +latch_reset_interval = '5 seconds' + + +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + +# ================================== +# Configuration options for fetchers +# ================================== +[fetcher] + +# The timeout duration for a single fetcher request, i.e. for a single fetcher message +# sent from this node to another node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +get_from_peer_timeout = '10 seconds' + + +# ======================================================== +# Configuration options for the contract runtime component +# ======================================================== +[contract_runtime] + +# Optional maximum size of the database to use for the global state store. +# +# If unset, defaults to 805,306,368,000 == 750 GiB. +# +# The size should be a multiple of the OS page size. +max_global_state_size = 2_089_072_132_096 + +# Optional depth limit to use for global state queries. +# +# If unset, defaults to 5. +#max_query_depth = 5 + +# Enable manual synchronizing to disk. +# +# If unset, defaults to true. +#enable_manual_sync = true + + +# ================================================== +# Configuration options for the transaction acceptor +# ================================================== +[transaction_acceptor] + +# The leeway allowed when considering whether a transaction is future-dated or not. +# +# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `transaction.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + +# =========================================== +# Configuration options for the transaction buffer +# =========================================== +[transaction_buffer] + +# The interval of checking for expired transactions. +expiry_check_interval = '1 minute' + + +# ============================================== +# Configuration options for the diagnostics port +# ============================================== +[diagnostics_port] + +# If set, the diagnostics port will be available on a UNIX socket. +enabled = false + +# Filename for the UNIX domain socket the diagnostics port listens on. +socket_path = "debug.socket" + +# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the +# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`, +# which allows for group access as well. +socket_umask = 0o077 + + +# ============================================= +# Configuration options for the upgrade watcher +# ============================================= +[upgrade_watcher] + +# How often to scan file system for available upgrades. +upgrade_check_interval = '30 seconds' diff --git a/resources/integration-test/chainspec.toml b/resources/integration-test/chainspec.toml new file mode 100644 index 0000000000..84686ce061 --- /dev/null +++ b/resources/integration-test/chainspec.toml @@ -0,0 +1,505 @@ +[protocol] +# Protocol version. +version = '2.0.4' +# Whether we need to clear latest blocks back to the switch block just before the activation point or not. +hard_reset = true +# This protocol version becomes active at this point. +# +# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By +# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up +# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used +# in contract-runtime for computing genesis post-state hash. +# +# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. +activation_point = 17386 + +[network] +# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by +# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis +# post-state hash. +name = 'integration-test' +# The maximum size of an acceptable networking message in bytes. Any message larger than this will +# be rejected at the networking level. +maximum_net_message_size = 25_165_824 + +[core] +# Era duration. +era_duration = '120 minutes' +# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the +# minimum height. +minimum_era_height = 20 +# Minimum difference between a block's and its child's timestamp. +minimum_block_time = '16384 ms' +# Number of slots available in validator auction. +validator_slots = 100 +# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. +# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as +# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize +# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. +finality_threshold_fraction = [1, 3] +# Protocol version from which nodes are required to hold strict finality signatures. +start_protocol_version_with_strict_finality_signatures_required = '1.5.0' +# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'. +# Used to determine finality sufficiency for new joiners syncing blocks created +# in a protocol version before +# `start_protocol_version_with_strict_finality_signatures_required`. +legacy_required_finality = 'Strict' +# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, +# you will be a validator in era N + auction_delay + 1. +auction_delay = 1 +# The period after genesis during which a genesis validator's bid is locked. +locked_funds_period = '0 days' +# The period in which genesis validator's bid is released over time after it's unlocked. +vesting_schedule_period = '0 weeks' +# Default number of eras that need to pass to be able to withdraw unbonded funds. +unbonding_delay = 7 +# Round seigniorage rate represented as a fraction of the total supply. +# +# Annual issuance: 8% +# Minimum block time: 2^14 milliseconds +# Ticks per year: 31536000000 +# +# (1+0.08)^((2^14)/31536000000)-1 is expressed as a fractional number below +# Python: +# from fractions import Fraction +# Fraction((1 + 0.08)**((2**14)/31536000000) - 1).limit_denominator(1000000000) +round_seigniorage_rate = [7, 175070816] +# Maximum number of associated keys for a single account. +max_associated_keys = 100 +# Maximum height of contract runtime call stack. +max_runtime_call_stack_height = 12 +# Minimum allowed delegation amount in motes +minimum_delegation_amount = 500_000_000_000 +# Maximum allowed delegation amount in motes +maximum_delegation_amount = 1_000_000_000_000_000_000 +# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than +# the value specified will be treated as a full unbond of a validator and their associated delegators +minimum_bid_amount = 10_000_000_000_000 +# Global state prune batch size (0 = this feature is off) +prune_batch_size = 0 +# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +strict_argument_checking = false +# Number of simultaneous peer requests. +simultaneous_peer_requests = 5 +# The consensus protocol to use. Options are "Zug" and "Highway". +consensus_protocol = 'Zug' +# The maximum amount of delegators per validator. +max_delegators_per_validator = 1200 +# The split in finality signature rewards between block producer and participating signers. +finders_fee = [1, 5] +# The proportion of baseline rewards going to reward finality signatures specifically. +finality_signature_proportion = [95, 100] +# Lookback interval indicating which past block we are looking at to reward. +signature_rewards_max_delay = 3 +# Allows transfers between accounts in the blockchain network. +# +# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators. +# Changing this option makes sense only on private chains. +allow_unrestricted_transfers = true +# Enables the auction entry points 'delegate' and 'add_bid'. +# +# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These +# auction entry points will return an error if called when this option is set to false. +allow_auction_bids = true +# If set to false, then consensus doesn't compute rewards and always uses 0. +compute_rewards = true +# Defines how refunds of the unused portion of payment amounts are calculated and handled. +# +# Valid options are: +# 'refund': a ratio of the unspent token is returned to the spender. +# 'burn': a ratio of the unspent token is burned. +# 'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio. +# This causes excess payment amounts to be sent to either a +# pre-defined purse, or back to the sender. The refunded amount is calculated as the given ratio of the payment amount +# minus the execution costs. +refund_handling = { type = 'refund', refund_ratio = [75, 100] } +# Defines how fees are handled. +# +# Valid options are: +# 'no_fee': fees are eliminated. +# 'pay_to_proposer': fees are paid to the block proposer +# 'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all +# administrator accounts +# 'burn': fees are burned +fee_handling = { type = 'pay_to_proposer' } +# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake. +validator_credit_cap = [1, 5] +# Defines how pricing is handled. +# +# Valid options are: +# 'payment_limited': senders of transaction self-specify how much they pay. +# 'fixed': costs are fixed, per the cost table +# 'prepaid': prepaid transaction (currently not supported) +pricing_handling = { type = 'payment_limited' } +# Does the network allow pre-payment for future +# execution? Currently not supported. +# +allow_prepaid = false +# Defines how gas holds affect available balance calculations. +# +# Valid options are: +# 'accrued': sum of full value of all non-expired holds. +# 'amortized': sum of each hold is amortized over the time remaining until expiry. +# +# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`, +# with accrued, the full hold amount would be applied +# with amortized, half the hold amount would be applied +gas_hold_balance_handling = { type = 'accrued' } +# Defines how long gas holds last. +# +# If fee_handling is set to 'no_fee', the system places a balance hold on the payer +# equal to the value the fee would have been. Such balance holds expire after a time +# interval has elapsed. This setting controls how long that interval is. The available +# balance of a purse equals its total balance minus the held amount(s) of non-expired +# holds (see gas_hold_balance_handling setting for details of how that is calculated). +# +# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse, +# a hold for 100 is placed on that purse and is considered when calculating total balance +# for 24 hours starting from the block_time when the hold was placed. +gas_hold_interval = '24 hours' +# List of public keys of administrator accounts. Setting this option makes only on private chains which require +# administrator accounts for regulatory reasons. +administrators = [] +# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable +# entity in lazy manner. +# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade; +# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage +# will be written underneath Key::Hash. +# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account +# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated +# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten +# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top +# level records +# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade +# the flag cannot be disabled in a future protocol upgrade. +enable_addressable_entity = false +# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount. +baseline_motes_amount = 2_500_000_000 +# Flag on whether ambiguous entity versions returns an execution error. +trap_on_ambiguous_entity_version = false + +[highway] +# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. +maximum_round_length = '66 seconds' + +[transactions] +# The duration after the transaction timestamp that it can be included in a block. +max_ttl = '2 hours' +# The maximum number of approvals permitted in a single block. +block_max_approval_count = 2600 +# Maximum block size in bytes including transactions contained by the block. 0 means unlimited. +max_block_size = 5_242_880 +# The upper limit of total gas of all transactions in a block. +block_gas_limit = 1_625_000_000_000 +# The minimum amount in motes for a valid native transfer. +native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' + +# Configuration of the transaction runtime. +[transactions.enabled_runtime] +vm_casper_v1 = true +vm_casper_v2 = false + +[transactions.v1] +# The configuration settings for the lanes of transactions including both native and Wasm based interactions. +# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1 +# respectively +# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction +# within a given lane. +# The maximum length in bytes of runtime args per V1 transaction. +# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels) +# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and +# the lane must be present and defined. +# Different casper networks may not impose such a restriction. +# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane +# [2] -> Max args length size in bytes for a given transaction in a certain lane +# [3] -> Transaction gas limit for a given transaction in a certain lane +# [4] -> The maximum number of transactions the lane can contain +native_mint_lane = [0, 2048, 1024, 100_000_000, 650] +native_auction_lane = [1, 3096, 2048, 2_500_000_000, 650] +install_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1] +wasm_lanes = [ + [3, 750_000, 2048, 1_000_000_000_000, 1], + [4, 131_072, 1024, 100_000_000_000, 2], + [5, 65_536, 512, 5_000_000_000, 80] +] + +[transactions.deploy] +# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. +max_payment_cost = '0' +# The limit of length of serialized payment code arguments. +payment_args_max_length = 1024 +# The limit of length of serialized session code arguments. +session_args_max_length = 1024 + +[wasm.v1] +# Amount of free memory (in 64kB pages) each contract can use for stack. +max_memory = 64 +# Max stack height (native WebAssembly stack limiter). +max_stack_height = 500 + +[storage_costs] +# Gas charged per byte stored in the global state. +gas_per_byte = 1_117_587 + +# For each opcode cost below there exists a static cost and a dynamic cost. +# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks. +[wasm.v1.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v1.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v1.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs +[wasm.v1.host_function_costs] +add = { cost = 5_800, arguments = [0, 0, 0, 0] } +add_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] } +add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] } +add_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +add_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +blake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] } +call_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] } +call_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } +create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } +create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +create_purse = { cost = 2_500_000_000, arguments = [0, 0] } +disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +get_balance = { cost = 3_000_000, arguments = [0, 0, 0] } +get_blocktime = { cost = 330, arguments = [0] } +get_caller = { cost = 380, arguments = [0] } +get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } +get_main_purse = { cost = 1_300, arguments = [0] } +get_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] } +get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } +get_phase = { cost = 710, arguments = [0] } +get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } +has_key = { cost = 1_500, arguments = [0, 840] } +is_valid_uref = { cost = 760, arguments = [0, 0] } +load_named_keys = { cost = 42_000, arguments = [0, 0] } +new_uref = { cost = 17_000, arguments = [0, 0, 590] } +random_bytes = { cost = 200, arguments = [0, 0] } +print = { cost = 20_000, arguments = [0, 4_600] } +provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } +put_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] } +read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } +read_value = { cost = 60_000, arguments = [0, 120_000, 0] } +dictionary_get = { cost = 5_500, arguments = [0, 590, 0] } +remove_associated_key = { cost = 4_200, arguments = [0, 0] } +remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } +remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] } +remove_key = { cost = 61_000, arguments = [0, 3_200] } +ret = { cost = 23_000, arguments = [0, 420_000] } +revert = { cost = 500, arguments = [0] } +set_action_threshold = { cost = 74_000, arguments = [0, 0] } +transfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +transfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] } +update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } +write = { cost = 14_000, arguments = [0, 0, 0, 980] } +dictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +enable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +manage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] } +emit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] } +generic_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] } +cost_increase_per_message = 50 +get_block_info = { cost = 330, arguments = [0, 0] } +recover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +verify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +call_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } + +[wasm.v2] +# Amount of free memory each contract can use for stack. +max_memory = 17 + +[wasm.v2.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v2.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v2.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +[wasm.v2.host_function_costs] +read = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +write = { cost = 0, arguments = [0, 0, 0, 0, 0] } +remove = { cost = 0, arguments = [0, 0, 0] } +copy_input = { cost = 0, arguments = [0, 0] } +ret = { cost = 0, arguments = [0, 0] } +create = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer = { cost = 0, arguments = [0, 0, 0] } +env_balance = { cost = 0, arguments = [0, 0, 0, 0] } +upgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +call = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +print = { cost = 0, arguments = [0, 0] } +emit = { cost = 0, arguments = [0, 0, 0, 0] } +env_info = { cost = 0, arguments = [0, 0] } + +[wasm.messages_limits] +max_topic_name_size = 256 +max_topics_per_contract = 128 +max_message_size = 1_024 + +[system_costs] +# Penalty charge for calling invalid entry point in a system contract. +no_such_entrypoint = 2_500_000_000 + +[system_costs.auction_costs] +get_era_validators = 2_500_000_000 +read_seigniorage_recipients = 5_000_000_000 +add_bid = 2_500_000_000 +withdraw_bid = 2_500_000_000 +delegate = 2_500_000_000 +undelegate = 2_500_000_000 +run_auction = 2_500_000_000 +slash = 2_500_000_000 +distribute = 2_500_000_000 +withdraw_delegator_reward = 5_000_000_000 +withdraw_validator_reward = 5_000_000_000 +read_era_id = 2_500_000_000 +activate_bid = 2_500_000_000 +redelegate = 2_500_000_000 +change_bid_public_key = 5_000_000_000 +add_reservations = 2_500_000_000 +cancel_reservations = 2_500_000_000 + +[system_costs.mint_costs] +mint = 2_500_000_000 +reduce_total_supply = 2_500_000_000 +create = 2_500_000_000 +balance = 100_000_000 +burn = 100_000_000 +transfer = 100_000_000 +read_base_round_reward = 2_500_000_000 +mint_into_existing_purse = 2_500_000_000 + +[system_costs.handle_payment_costs] +get_payment_purse = 10_000 +set_refund_purse = 10_000 +get_refund_purse = 10_000 +finalize_payment = 2_500_000_000 + +[system_costs.standard_payment_costs] +pay = 10_000 + +[vacancy] +# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network. +# +# The network starts with a current_gas_price of min_gas_price. +# +# Each block has multiple limits (bytes, transactions, transfers, gas, etc.) +# The utilization for a block is determined by the highest percentage utilization of each these limits. +# +# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here) +# 19 transactons -> 19/20 or 95% +# 600 transfers -> 600/650 or 92.3% +# resulting block utilization is 95 +# +# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is +# adjusted with the following: +# +# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price. +# If utilization falls between the thresholds, current_gas_price is not changed. +# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price. +# +# The cost charged for the transaction is simply the gas_used * current_gas_price. +upper_threshold = 90 +lower_threshold = 50 +max_gas_price = 1 +min_gas_price = 1 diff --git a/resources/integration-test/config-example.toml b/resources/integration-test/config-example.toml new file mode 100644 index 0000000000..e764041036 --- /dev/null +++ b/resources/integration-test/config-example.toml @@ -0,0 +1,651 @@ +# ================================ +# Configuration options for a node +# ================================ +[node] + +# If set, use this hash as a trust anchor when joining an existing network. +#trusted_hash = 'HEX-FORMATTED BLOCK HASH' + +# Historical sync behavior for this node. Options are: +# 'ttl' (node will attempt to acquire all block data to comply with time to live enforcement) +# 'genesis' (node will attempt to acquire all block data back to genesis) +# 'nosync' (node will only acquire blocks moving forward) +# 'isolated' (node will initialize without peers and will not accept peers) +# 'completeblock' (node will acquire complete block and shutdown) +# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`. +# it is recommended for dedicated validator nodes to be in ttl mode to increase +# their ability to maintain maximal uptime...if a long-running genesis validator +# goes offline and comes back up while in genesis mode, it must backfill +# any gaps in its block awareness before resuming validation. +# +# it is recommended for reporting non-validator nodes to be in genesis mode to +# enable support for queries at any block height. +# +# it is recommended for non-validator working nodes (for dapp support, etc) to run in +# ttl or nosync mode (depending upon their specific data requirements). +# +# thus for instance a node backing a block explorer would prefer genesis mode, +# while a node backing a dapp interested in very recent activity would prefer to run in nosync mode, +# and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode. +# note: as time goes on, the time to sync back to genesis takes progressively longer. +# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting +# (it is currently ~18 hours by default on production and production-like networks but subject to change). +# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating +# in consensus / switching to validate mode. it is primarily for lightweight nodes that are +# only interested in recent activity. +# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to +# binary port, rest server, event server, and diagnostic port connections. +sync_handling = 'genesis' + +# Idle time after which the syncing process is considered stalled. +idle_tolerance = '20 minutes' + +# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. +max_attempts = 3 + +# Default delay for the control events that have no dedicated delay requirements. +control_logic_default_delay = '1 second' + +# Flag which forces the node to resync all of the blocks. +force_resync = false + +# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all +# conditions are satisfied. +shutdown_for_upgrade_timeout = '2 minutes' + +# Maximum time a node will wait for an upgrade to commit. +upgrade_timeout = '3 hours' + +# The node detects when it should do a controlled shutdown when it is in a detectably bad state +# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be +# allowed to shutdown, and if restarted that node will generally recover gracefully and resume +# normal operation. However, actively validating nodes have subjective state in memory that is +# lost on shutdown / restart and must be reacquired from other validating nodes on restart. +# If all validating nodes shutdown in the middle of an era, social consensus is required to restart +# the network. As a mitigation for that, the following config can be set to true on some validator +# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled +# shutdown events and stay up. This allows them to act as sentinels for the consensus data for +# other restarting nodes. This config is inert on non-validating nodes. +prevent_validator_shutdown = false + +# ================================= +# Configuration options for logging +# ================================= +[logging] + +# Output format. Possible values are 'text' or 'json'. +format = 'json' + +# Colored output. Has no effect if format = 'json'. +color = false + +# Abbreviate module names in text output. Has no effect if format = 'json'. +abbreviate_modules = false + + +# =================================== +# Configuration options for consensus +# =================================== +[consensus] + +# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign +# consensus messages. +secret_key_path = '/etc/casper/validator_keys/secret_key.pem' + +# The maximum number of blocks by which execution is allowed to lag behind finalization. +# If it is more than that, consensus will pause, and resume once the executor has caught up. +max_execution_delay = 3 + + +# ======================================= +# Configuration options for Zug consensus +# ======================================= +[consensus.zug] + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '1 second' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of +# echo messages, before they vote to make the round skippable and move on to the next proposer. +proposal_timeout = '10 seconds' + +# The additional proposal delay that is still considered fast enough, in percent. This should +# take into account variables like empty vs. full blocks, network traffic etc. +# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one +# while idle this should be at least 50, meaning that the timeout is 50% longer than +# necessary for a quorum of recent proposals, approximately. +proposal_grace_period = 200 + +# The average number of rounds after which the proposal timeout adapts by a factor of 2. +# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. +proposal_timeout_inertia = 10 + +# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp +# lies in the future by more than that are rejected. +clock_tolerance = '1 second' + + +# =========================================== +# Configuration options for Highway consensus +# =========================================== +[consensus.highway] + +# The duration for which incoming vertices with missing dependencies should be kept in a queue. +pending_vertex_timeout = '30 minutes' + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# Log the synchronizer state periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' + +# Log the size of every incoming and outgoing serialized unit. +log_unit_sizes = false + +# The maximum number of peers we request the same vertex from in parallel. +max_requests_for_vertex = 5 + +# The maximum number of dependencies we request per validator in a batch. +# Limits requests per validator in panorama - in order to get a total number of +# requests, multiply by # of validators. +max_request_batch_size = 20 + +[consensus.highway.round_success_meter] +# The number of most recent rounds we will be keeping track of. +num_rounds_to_consider = 40 + +# The number of successful rounds that triggers us to slow down: With this many or fewer +# successes per `num_rounds_to_consider`, we increase our round length. +num_rounds_slowdown = 10 + +# The number of successful rounds that triggers us to speed up: With this many or more successes +# per `num_rounds_to_consider`, we decrease our round length. +num_rounds_speedup = 32 + +# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if +# we have few enough failures. +acceleration_parameter = 40 + +# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which +# we will use for looking for a summit in order to determine a proposal's finality. +# The required quorum in a summit we will look for to check if a round was successful is +# determined by this FTT. +acceleration_ftt = [1, 100] + + +# ==================================== +# Configuration options for networking +# ==================================== +[network] + +# The public address of the node. +# +# It must be publicly available in order to allow peers to connect to this node. +# If the port is set to 0, the actual bound port will be substituted. +public_address = ':0' + +# Address to bind to for listening. +# If port is set to 0, a random port will be used. +bind_address = '0.0.0.0:35000' + +# Addresses to connect to in order to join the network. +# +# If not set, this node will not be able to attempt to connect to the network. Instead it will +# depend upon peers connecting to it. This is normally only useful for the first node of the +# network. +# +# Multiple addresses can be given and the node will attempt to connect to each, requiring at least +# one connection. +known_addresses = ['3.81.135.135:35000','34.207.240.74:35000','18.208.195.207:35000','3.90.67.160:35000'] + +# Minimum number of fully-connected peers to consider network component initialized. +min_peers_for_initialization = 2 + +# The interval between each fresh round of gossiping the node's public address. +gossip_interval = '120 seconds' + +# Initial delay for starting address gossipping after the network starts. This should be slightly +# more than the expected time required for initial connections to complete. +initial_gossip_delay = '5 seconds' + +# How long a connection is allowed to be stuck as pending before it is abandoned. +max_addr_pending_time = '1 minute' + +# Maximum time allowed for a connection handshake between two nodes to be completed. Connections +# exceeding this threshold are considered unlikely to be healthy or even malicious and thus +# terminated. +handshake_timeout = '20 seconds' + +# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional +# connections will be rejected. A value of `0` means unlimited. +max_incoming_peer_connections = 3 + +# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers. +# A value of `0` means unlimited. +max_outgoing_byte_rate_non_validators = 6553600 + +# The maximum allowed total impact of requests from non-validating peers per second answered. +# A value of `0` means unlimited. +max_incoming_message_rate_non_validators = 3000 + +# Maximum number of requests for data from a single peer that are allowed be buffered. A value of +# `0` means unlimited. +max_in_flight_demands = 50 + +# Version threshold to enable tarpit for. +# +# When set to a version (the value may be `null` to disable the feature), any peer that reports a +# protocol version equal or below the threshold will be rejected only after holding open the +# connection for a specific (`tarpit_duration`) amount of time. +# +# This option makes most sense to enable on known nodes with addresses where legacy nodes that are +# still in operation are connecting to, as these older versions will only attempt to reconnect to +# other nodes once they have exhausted their set of known nodes. +tarpit_version_threshold = '1.2.1' + +# How long to hold connections to trapped legacy nodes. +tarpit_duration = '10 minutes' + +# The probability [0.0, 1.0] of this node trapping a legacy node. +# +# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a +# single known node to hold open a connection to prevent the node from reconnecting. This should be +# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of +# legacy nodes running this software. +tarpit_chance = 0.2 + +# Minimum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_min_duration = '2 minutes' + +# Maximum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_max_duration = '10 minutes' + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "node_cert.pem" +# secret_key = "node.pem" +# ca_certificate = "ca_cert.pem" + +# Weights for impact estimation of incoming messages, used in combination with +# `max_incoming_message_rate_non_validators`. +# +# Any weight set to 0 means that the category of traffic is exempt from throttling. +[network.estimator_weights] +consensus = 0 +block_gossip = 1 +transaction_gossip = 0 +finality_signature_gossip = 1 +address_gossip = 0 +finality_signature_broadcasts = 0 +transaction_requests = 1 +transaction_responses = 0 +legacy_deploy_requests = 1 +legacy_deploy_responses = 0 +block_requests = 1 +block_responses = 0 +block_header_requests = 1 +block_header_responses = 0 +trie_requests = 1 +trie_responses = 0 +finality_signature_requests = 1 +finality_signature_responses = 0 +sync_leap_requests = 1 +sync_leap_responses = 0 +approvals_hashes_requests = 1 +approvals_hashes_responses = 0 +execution_results_requests = 1 +execution_results_responses = 0 + +# ================================================== +# Configuration options for the BinaryPort server +# ================================================== +[binary_port_server] + +# Flag which enables the BinaryPort server. +enable_server = true + +# Listening address for BinaryPort server. +address = '0.0.0.0:7779' + +# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_all_values = true + +# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_trie = false + +# Flag that enables the `TrySpeculativeExec` request. Disabled by default. +allow_request_speculative_exec = false + +# Maximum size of a message in bytes. +max_message_size_bytes = 134_217_728 + +# Maximum number of connections to the server. +max_connections = 5 + +# The global max rate of requests (per second) before they are limited. +# The implementation uses a sliding window algorithm. +qps_limit = 1000 + +# Initial time given to a connection before it expires +initial_connection_lifetime = '10 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +# [`Command::Get(GetRequest::Record)`] is sent to the node +get_record_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Information)`] is sent to the node +get_information_request_termination_delay = '5 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::State)`] is sent to the node +get_state_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Trie)`] is sent to the node +get_trie_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TryAcceptTransaction`] is sent to the node +accept_transaction_request_termination_delay = '24 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TrySpeculativeExec`] is sent to the node +speculative_exec_request_termination_delay = '0 seconds' + + +# ============================================== +# Configuration options for the REST HTTP server +# ============================================== +[rest_server] + +# Flag which enables the REST HTTP server. +enable_server = true + +# Listening address for REST HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the REST HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:8888' + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 100 + +# Specifies which origin will be reported as allowed by REST server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ========================================================== +# Configuration options for the SSE HTTP event stream server +# ========================================================== +[event_stream_server] + +# Flag which enables the SSE HTTP event stream server. +enable_server = true + +# Listening address for SSE HTTP event stream server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:9999' + +# The number of event stream events to buffer. +event_stream_buffer_length = 5000 + +# The maximum number of subscribers across all event streams the server will permit at any one time. +max_concurrent_subscribers = 100 + +# Specifies which origin will be reported as allowed by event stream server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# =============================================== +# Configuration options for the storage component +# =============================================== +[storage] + +# Path (absolute, or relative to this config.toml) to the folder where any files created +# or read by the storage component will exist. A subfolder named with the network name will be +# automatically created and used for the storage component files. +# +# If the folder doesn't exist, it and any required parents will be created. +# +# If unset, the path must be supplied as an argument via the CLI. +path = '/var/lib/casper/casper-node' + +# Maximum size of the database to use for the block store. +# +# The size should be a multiple of the OS page size. +# +# 483_183_820_800 == 450 GiB. +max_block_store_size = 483_183_820_800 + +# Maximum size of the database to use for the deploy store. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_store_size = 322_122_547_200 + +# Maximum size of the database to use for the deploy metadata. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_metadata_store_size = 322_122_547_200 + +# Maximum size of the database to use for the state snapshots. +# +# The size should be a multiple of the OS page size. +# +# 10_737_418_240 == 10 GiB. +max_state_store_size = 10_737_418_240 + +# Memory deduplication. +# +# If enabled, nodes will attempt to share loaded objects if possible. +enable_mem_deduplication = true + +# Memory duplication garbage collection. +# +# Sets the frequency how often the memory pool cache is swept for free references. +# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept. +mem_pool_prune_interval = 4096 + + +# =================================== +# Configuration options for gossiping +# =================================== +[gossip] + +# Target number of peers to infect with a given piece of data. +infection_target = 3 + +# The saturation limit as a percentage, with a maximum value of 99. Used as a termination +# condition. +# +# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't +# manage to newly infect 3 peers. We will stop gossiping once we know of more than 15 holders +# excluding us since 80% saturation would imply 3 new infections in 15 peers. +saturation_limit_percent = 80 + +# The maximum duration for which to keep finished entries. +# +# The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, +# the longer they are retained, the larger the list of finished entries can grow. +finished_entry_duration = '1 minute' + +# The timeout duration for a single gossip request, i.e. for a single gossip message +# sent from this node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +gossip_request_timeout = '30 seconds' + +# The timeout duration for retrieving the remaining part(s) of newly-discovered data +# from a peer which gossiped information about that data to this node. +get_remainder_timeout = '5 seconds' + +# The timeout duration for a newly-received, gossiped item to be validated and stored by another +# component before the gossiper abandons waiting to gossip the item onwards. +validate_and_store_timeout = '1 minute' + + +# =============================================== +# Configuration options for the block accumulator +# =============================================== +[block_accumulator] + +# Block height difference threshold for starting to execute the blocks. +attempt_execution_threshold = 3 + +# Accepted time interval for inactivity in block accumulator. +dead_air_interval = '3 minutes' + +# Time after which the block acceptors are considered old and can be purged. +purge_interval = '1 minute' + + +# ================================================ +# Configuration options for the block synchronizer +# ================================================ +[block_synchronizer] + +# Maximum number of fetch-trie tasks to run in parallel during block synchronization. +max_parallel_trie_fetches = 5000 + +# Time interval for the node to ask for refreshed peers. +peer_refresh_interval = '90 seconds' + +# Time interval for the node to check what the block synchronizer needs to acquire next. +need_next_interval = '1 second' + +# Time interval for recurring disconnection of dishonest peers. +disconnect_dishonest_peers_interval = '10 seconds' + +# Time interval for resetting the latch in block builders. +latch_reset_interval = '5 seconds' + + +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + +# ================================== +# Configuration options for fetchers +# ================================== +[fetcher] + +# The timeout duration for a single fetcher request, i.e. for a single fetcher message +# sent from this node to another node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +get_from_peer_timeout = '10 seconds' + + +# ======================================================== +# Configuration options for the contract runtime component +# ======================================================== +[contract_runtime] + +# Optional maximum size of the database to use for the global state store. +# +# If unset, defaults to 805,306,368,000 == 750 GiB. +# +# The size should be a multiple of the OS page size. +max_global_state_size = 2_089_072_132_096 + +# Optional depth limit to use for global state queries. +# +# If unset, defaults to 5. +#max_query_depth = 5 + +# Enable manual synchronizing to disk. +# +# If unset, defaults to true. +#enable_manual_sync = true + + +# ================================================== +# Configuration options for the transaction acceptor +# ================================================== +[transaction_acceptor] + +# The leeway allowed when considering whether a transaction is future-dated or not. +# +# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `transaction.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + +# =========================================== +# Configuration options for the transaction buffer +# =========================================== +[transaction_buffer] + +# The interval of checking for expired transactions. +expiry_check_interval = '1 minute' + + +# ============================================== +# Configuration options for the diagnostics port +# ============================================== +[diagnostics_port] + +# If set, the diagnostics port will be available on a UNIX socket. +enabled = false + +# Filename for the UNIX domain socket the diagnostics port listens on. +socket_path = "debug.socket" + +# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the +# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`, +# which allows for group access as well. +socket_umask = 0o077 + + +# ============================================= +# Configuration options for the upgrade watcher +# ============================================= +[upgrade_watcher] + +# How often to scan file system for available upgrades. +upgrade_check_interval = '30 seconds' diff --git a/resources/joiner/.gitignore b/resources/joiner/.gitignore deleted file mode 100644 index ba24fd9619..0000000000 --- a/resources/joiner/.gitignore +++ /dev/null @@ -1 +0,0 @@ -chainspec.toml diff --git a/resources/joiner/accounts.toml b/resources/joiner/accounts.toml deleted file mode 100644 index 038c27254a..0000000000 --- a/resources/joiner/accounts.toml +++ /dev/null @@ -1,10 +0,0 @@ -[[accounts]] -public_key = "01522ef6c89038019cb7af05c340623804392dd2bb1f4dab5e4a9c3ab752fc0179" -balance = "1000000000000000000000000000" - -[[accounts]] -public_key = "01f60bce2bb1059c41910eac1e7ee6c3ef4c8fcc63a901eb9603c1524cadfb0c18" -balance = "1000000000000000000" - -[accounts.validator] -bonded_amount = "500000000000000" diff --git a/resources/joiner/chainspec.toml.in b/resources/joiner/chainspec.toml.in deleted file mode 120000 index 919941edbd..0000000000 --- a/resources/joiner/chainspec.toml.in +++ /dev/null @@ -1 +0,0 @@ -../local/chainspec.toml.in \ No newline at end of file diff --git a/resources/joiner/config.toml b/resources/joiner/config.toml deleted file mode 100644 index 7315ad3142..0000000000 --- a/resources/joiner/config.toml +++ /dev/null @@ -1 +0,0 @@ -../local/config.toml \ No newline at end of file diff --git a/resources/joiner/secret_keys b/resources/joiner/secret_keys deleted file mode 120000 index 0c8cd555ee..0000000000 --- a/resources/joiner/secret_keys +++ /dev/null @@ -1 +0,0 @@ -../local/secret_keys/ \ No newline at end of file diff --git a/resources/local/chainspec.toml.in b/resources/local/chainspec.toml.in index 6379020b4d..ce6a2377bf 100644 --- a/resources/local/chainspec.toml.in +++ b/resources/local/chainspec.toml.in @@ -1,6 +1,6 @@ [protocol] # Protocol version. -version = '1.0.0' +version = '2.0.4' # Whether we need to clear latest blocks back to the switch block just before the activation point or not. hard_reset = false # This protocol version becomes active at this point. @@ -20,135 +20,300 @@ activation_point = '${TIMESTAMP}' name = 'casper-example' # The maximum size of an acceptable networking message in bytes. Any message larger than this will # be rejected at the networking level. -maximum_net_message_size = 23_068_672 +maximum_net_message_size = 25_165_824 [core] # Era duration. -era_duration = '41seconds' +era_duration = '41 seconds' # Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the # minimum height. -minimum_era_height = 10 +minimum_era_height = 5 +# Minimum difference between a block's and its child's timestamp. +minimum_block_time = '4096 ms' # Number of slots available in validator auction. -validator_slots = 5 +validator_slots = 7 +# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. +# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as +# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize +# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. +finality_threshold_fraction = [1, 3] +# Protocol version from which nodes are required to hold strict finality signatures. +start_protocol_version_with_strict_finality_signatures_required = '1.5.0' +# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'. +# Used to determine finality sufficiency for new joiners syncing blocks created +# in a protocol version before +# `start_protocol_version_with_strict_finality_signatures_required`. +legacy_required_finality = 'Strict' + # Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, # you will be a validator in era N + auction_delay + 1. -auction_delay = 3 +auction_delay = 1 # The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' +locked_funds_period = '0 days' +# The period in which genesis validator's bid is released over time after it's unlocked. +vesting_schedule_period = '0 weeks' # Default number of eras that need to pass to be able to withdraw unbonded funds. -unbonding_delay = 14 +unbonding_delay = 7 # Round seigniorage rate represented as a fraction of the total supply. # -# Annual issuance: 2% -# Minimum round exponent: 12 -# Ticks per year: 31536000000 +# A rate that makes the rewards roughly 0.05% of the initial stake per block under default NCTL settings. +round_seigniorage_rate = [1, 4_200_000_000_000_000_000] +# Maximum number of associated keys for a single account. +max_associated_keys = 100 +# Maximum height of contract runtime call stack. +max_runtime_call_stack_height = 12 +# Minimum allowed delegation amount in motes +minimum_delegation_amount = 500_000_000_000 +# Maximum allowed delegation amount in motes +maximum_delegation_amount = 1_000_000_000_000_000_000 +# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than +# the value specified will be treated as a full unbond of a validator and their associated delegators +minimum_bid_amount = 100_000_000_000_000 +# Global state prune batch size (0 = this feature is off) +prune_batch_size = 0 +# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +strict_argument_checking = false +# Number of simultaneous peer requests. +simultaneous_peer_requests = 5 +# The consensus protocol to use. Options are "Zug" and "Highway". +consensus_protocol = 'Zug' +# The maximum amount of delegators per validator. +max_delegators_per_validator = 1200 +# The split in finality signature rewards between block producer and participating signers. +finders_fee = [1, 5] +# The proportion of baseline rewards going to reward finality signatures specifically. +finality_signature_proportion = [1, 2] +# Lookback interval indicating which past block we are looking at to reward. +signature_rewards_max_delay = 3 +# Allows transfers between accounts in the blockchain network. +# +# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators. +# Changing this option makes sense only on private chains. +allow_unrestricted_transfers = true +# Enables the auction entry points 'delegate' and 'add_bid'. +# +# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These +# auction entry points will return an error if called when this option is set to false. +allow_auction_bids = true +# If set to false, then consensus doesn't compute rewards and always uses 0. +compute_rewards = true +# Defines how refunds of the unused portion of payment amounts are calculated and handled. # -# (1+0.02)^((2^12)/31536000000)-1 is expressed as a fractional number below. -round_seigniorage_rate = [15_959, 6_204_824_582_392] +# Valid options are: +# 'refund': a ratio of the unspent token is returned to the spender. +# 'burn': a ratio of the unspent token is burned. +# 'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio. +# This causes excess payment amounts to be sent to either a +# pre-defined purse, or back to the sender. The refunded amount is calculated as the given ratio of the payment amount +# minus the execution costs. +refund_handling = { type = 'refund', refund_ratio = [75, 100] } +# Defines how fees are handled. +# +# Valid options are: +# 'no_fee': fees are eliminated. +# 'pay_to_proposer': fees are paid to the block proposer +# 'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all +# administrator accounts +# 'burn': fees are burned +fee_handling = { type = 'pay_to_proposer' } +# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake. +validator_credit_cap = [1, 5] +# Defines how pricing is handled. +# +# Valid options are: +# 'payment_limited': senders of transaction self-specify how much they pay. +# 'fixed': costs are fixed, per the cost table +# 'prepaid': prepaid transaction (currently not supported) +pricing_handling = { type = 'payment_limited' } +# Does the network allow pre-payment for future +# execution? Currently not supported. +# +allow_prepaid = false +# Defines how gas holds affect available balance calculations. +# +# Valid options are: +# 'accrued': sum of full value of all non-expired holds. +# 'amortized': sum of each hold is amortized over the time remaining until expiry. +# +# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`, +# with accrued, the full hold amount would be applied +# with amortized, half the hold amount would be applied +gas_hold_balance_handling = { type = 'accrued' } +# Defines how long gas holds last. +# +# If fee_handling is set to 'no_fee', the system places a balance hold on the payer +# equal to the value the fee would have been. Such balance holds expire after a time +# interval has elapsed. This setting controls how long that interval is. The available +# balance of a purse equals its total balance minus the held amount(s) of non-expired +# holds (see gas_hold_balance_handling setting for details of how that is calculated). +# +# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse, +# a hold for 100 is placed on that purse and is considered when calculating total balance +# for 24 hours starting from the block_time when the hold was placed. +gas_hold_interval = '24 hours' +# List of public keys of administrator accounts. Setting this option makes only on private chains which require +# administrator accounts for regulatory reasons. +administrators = [] +# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable +# entity in lazy manner. +# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade; +# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage +# will be written underneath Key::Hash. +# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account +# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated +# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten +# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top +# level records +# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade +# the flag cannot be disabled in a future protocol upgrade. +enable_addressable_entity = false + +# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount. +baseline_motes_amount = 2_500_000_000 +# Flag on whether ambiguous entity versions returns an execution error. +trap_on_ambiguous_entity_version = false [highway] -# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. -# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as -# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize -# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. -finality_threshold_fraction = [1, 3] -# Integer between 0 and 255. The power of two that is the number of milliseconds in the minimum round length, and -# therefore the minimum delay between a block and its child. E.g. 14 means 2^14 milliseconds, i.e. about 16 seconds. -minimum_round_exponent = 12 -# Integer between 0 and 255. Must be greater than `minimum_round_exponent`. The power of two that is the number of -# milliseconds in the maximum round length, and therefore the maximum delay between a block and its child. E.g. 19 -# means 2^19 milliseconds, i.e. about 8.7 minutes. -maximum_round_exponent = 19 -# The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. -# Expressed as a fraction (1/5 by default). -reduced_reward_multiplier = [1, 5] - -[deploys] +# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. +maximum_round_length = '17 seconds' + +[transactions] +# The duration after the transaction timestamp that it can be included in a block. +max_ttl = '2 hours' +# The maximum number of approvals permitted in a single block. +block_max_approval_count = 2600 +# Maximum block size in bytes including transactions contained by the block. 0 means unlimited. +max_block_size = 5_242_880 +# The upper limit of total gas of all transactions in a block. +block_gas_limit = 1_625_000_000_000 +# The minimum amount in motes for a valid native transfer. +native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' +# Configuration of the transaction runtime. +[transactions.enabled_runtime] +vm_casper_v1 = true +vm_casper_v2 = false + +[transactions.v1] +# The configuration settings for the lanes of transactions including both native and Wasm based interactions. +# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1 +# respectively +# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction +# within a given lane. +# The maximum length in bytes of runtime args per V1 transaction. +# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels) +# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and +# the lane must be present and defined. +# Different casper networks may not impose such a restriction. +# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane +# [2] -> Max args length size in bytes for a given transaction in a certain lane +# [3] -> Transaction gas limit for a given transaction in a certain lane +# [4] -> The maximum number of transactions the lane can contain +native_mint_lane = [0, 2048, 1024, 100_000_000, 650] +native_auction_lane = [1, 3096, 2048, 2_500_000_000, 650] +install_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1] +wasm_lanes = [ + [3, 262_144, 1024, 1_000_000_000_000, 1], + [4, 131_072, 1024, 100_000_000_000, 2], + [5, 65_536, 512, 5_000_000_000, 80] +] + +[transactions.deploy] # The maximum number of Motes allowed to be spent during payment. 0 means unlimited. max_payment_cost = '0' -# The duration after the deploy timestamp that it can be included in a block. -max_ttl = '1day' -# The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). -max_dependencies = 10 -# Maximum block size in bytes including deploys contained by the block. 0 means unlimited. -max_block_size = 10_485_760 -# The maximum number of non-transfer deploys permitted in a single block. -block_max_deploy_count = 100 -# The maximum number of wasm-less transfer deploys permitted in a single block. -block_max_transfer_count = 1000 -# The upper limit of total gas of all deploys in a block. -block_gas_limit = 10_000_000_000_000 # The limit of length of serialized payment code arguments. payment_args_max_length = 1024 # The limit of length of serialized session code arguments. session_args_max_length = 1024 -# The minimum amount in motes for a valid native transfer. -native_transfer_minimum_motes = 2_500_000_000 -[wasm] +[wasm.v1] # Amount of free memory (in 64kB pages) each contract can use for stack. max_memory = 64 # Max stack height (native WebAssembly stack limiter). -max_stack_height = 65_536 +max_stack_height = 500 -[wasm.storage_costs] +[storage_costs] # Gas charged per byte stored in the global state. -gas_per_byte = 630_000 +gas_per_byte = 1_117_587 -[wasm.opcode_costs] +# For each opcode cost below there exists a static cost and a dynamic cost. +# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks. +[wasm.v1.opcode_costs] # Bit operations multiplier. -bit = 300 +bit = 105 # Arithmetic add operations multiplier. -add = 210 +add = 105 # Mul operations multiplier. -mul = 240 +mul = 105 # Div operations multiplier. -div = 320 +div = 105 # Memory load operation multiplier. -load = 2_500 +load = 105 # Memory store operation multiplier. -store = 4_700 +store = 105 # Const store operation multiplier. -const = 110 +const = 105 # Local operations multiplier. -local = 390 +local = 105 # Global operations multiplier. -global = 390 -# Control flow operations multiplier. -control_flow = 440 +global = 105 # Integer operations multiplier. -integer_comparison = 250 +integer_comparison = 105 # Conversion operations multiplier. -conversion = 420 +conversion = 105 # Unreachable operation multiplier. -unreachable = 270 +unreachable = 105 # Nop operation multiplier. -nop = 200 +nop = 105 # Get current memory operation multiplier. -current_memory = 290 +current_memory = 105 # Grow memory cost, per page (64kb). -grow_memory = 240_000 -# Regular opcode cost. -regular = 210 +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v1.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v1.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 # Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs -[wasm.host_function_costs] +[wasm.v1.host_function_costs] add = { cost = 5_800, arguments = [0, 0, 0, 0] } -add_associated_key = { cost = 9_000, arguments = [0, 0, 0] } -add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } -blake2b = { cost = 200, arguments = [0, 0, 0, 0] } -call_contract = { cost = 4_500, arguments = [0, 0, 0, 0, 0, 420, 0] } -call_versioned_contract = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +add_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] } +add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] } +add_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +add_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +blake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] } +call_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] } +call_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -create_purse = { cost = 170_000, arguments = [0, 0] } +create_purse = { cost = 2_500_000_000, arguments = [0, 0] } disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } -get_balance = { cost = 3_800, arguments = [0, 0, 0] } +get_balance = { cost = 3_000_000, arguments = [0, 0, 0] } get_blocktime = { cost = 330, arguments = [0] } get_caller = { cost = 380, arguments = [0] } get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } get_main_purse = { cost = 1_300, arguments = [0] } -get_named_arg = { cost = 200, arguments = [0, 0, 0, 0] } +get_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] } get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } get_phase = { cost = 710, arguments = [0] } get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } @@ -156,57 +321,179 @@ has_key = { cost = 1_500, arguments = [0, 840] } is_valid_uref = { cost = 760, arguments = [0, 0] } load_named_keys = { cost = 42_000, arguments = [0, 0] } new_uref = { cost = 17_000, arguments = [0, 0, 590] } +random_bytes = { cost = 200, arguments = [0, 0] } print = { cost = 20_000, arguments = [0, 4_600] } provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } -put_key = { cost = 38_000, arguments = [0, 1_100, 0, 0] } +put_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] } read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } -read_value = { cost = 6_000, arguments = [0, 0, 0] } -read_value_local = { cost = 5_500, arguments = [0, 590, 0] } +read_value = { cost = 60_000, arguments = [0, 120_000, 0] } +dictionary_get = { cost = 5_500, arguments = [0, 590, 0] } remove_associated_key = { cost = 4_200, arguments = [0, 0] } remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } -remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 0] } +remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] } remove_key = { cost = 61_000, arguments = [0, 3_200] } ret = { cost = 23_000, arguments = [0, 420_000] } revert = { cost = 500, arguments = [0] } set_action_threshold = { cost = 74_000, arguments = [0, 0] } -transfer_from_purse_to_account = { cost = 160_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -transfer_from_purse_to_purse = { cost = 82_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -transfer_to_account = { cost = 24_000, arguments = [0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +transfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] } update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } write = { cost = 14_000, arguments = [0, 0, 0, 980] } -write_local = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +dictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +enable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +manage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] } +emit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] } +generic_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] } +cost_increase_per_message = 50 +get_block_info = { cost = 330, arguments = [0, 0] } +recover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +verify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +call_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } + +[wasm.v2] +# Amount of free memory each contract can use for stack. +max_memory = 17 + +[wasm.v2.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v2.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v2.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +[wasm.v2.host_function_costs] +read = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +write = { cost = 0, arguments = [0, 0, 0, 0, 0] } +remove = { cost = 0, arguments = [0, 0, 0] } +copy_input = { cost = 0, arguments = [0, 0] } +ret = { cost = 0, arguments = [0, 0] } +create = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer = { cost = 0, arguments = [0, 0, 0] } +env_balance = { cost = 0, arguments = [0, 0, 0, 0] } +upgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +call = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +print = { cost = 0, arguments = [0, 0] } +emit = { cost = 0, arguments = [0, 0, 0, 0] } +env_info = { cost = 0, arguments = [0, 0] } + +[wasm.messages_limits] +max_topic_name_size = 256 +max_topics_per_contract = 128 +max_message_size = 1_024 [system_costs] -wasmless_transfer_cost = 10_000 +# Penalty charge for calling invalid entry point in a system contract. +no_such_entrypoint = 2_500_000_000 [system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 +get_era_validators = 2_500_000_000 +read_seigniorage_recipients = 5_000_000_000 +add_bid = 2_500_000_000 +withdraw_bid = 2_500_000_000 +delegate = 2_500_000_000 +undelegate = 2_500_000_000 +run_auction = 2_500_000_000 +slash = 2_500_000_000 +distribute = 2_500_000_000 +withdraw_delegator_reward = 5_000_000_000 +withdraw_validator_reward = 5_000_000_000 +read_era_id = 2_500_000_000 +activate_bid = 2_500_000_000 +redelegate = 2_500_000_000 +change_bid_public_key = 5_000_000_000 +add_reservations = 2_500_000_000 +cancel_reservations = 2_500_000_000 [system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 +mint = 2_500_000_000 +reduce_total_supply = 2_500_000_000 +create = 2_500_000_000 +balance = 100_000_000 +burn = 100_000_000 +transfer = 100_000_000 +read_base_round_reward = 2_500_000_000 +mint_into_existing_purse = 2_500_000_000 [system_costs.handle_payment_costs] get_payment_purse = 10_000 set_refund_purse = 10_000 get_refund_purse = 10_000 -finalize_payment = 10_000 +finalize_payment = 2_500_000_000 [system_costs.standard_payment_costs] pay = 10_000 + +[vacancy] +# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network. +# +# The network starts with a current_gas_price of min_gas_price. +# +# Each block has multiple limits (bytes, transactions, transfers, gas, etc.) +# The utilization for a block is determined by the highest percentage utilization of each these limits. +# +# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here) +# 19 transactons -> 19/20 or 95% +# 600 transfers -> 600/650 or 92.3% +# resulting block utilization is 95 +# +# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is +# adjusted with the following: +# +# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price. +# If utilization falls between the thresholds, current_gas_price is not changed. +# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price. +# +# The cost charged for the transaction is simply the gas_used * current_gas_price. +upper_threshold = 90 +lower_threshold = 50 +max_gas_price = 3 +min_gas_price = 1 diff --git a/resources/local/config.toml b/resources/local/config.toml index 80026056a1..7d8335f199 100644 --- a/resources/local/config.toml +++ b/resources/local/config.toml @@ -6,6 +6,67 @@ # If set, use this hash as a trust anchor when joining an existing network. #trusted_hash = 'HEX-FORMATTED BLOCK HASH' +# Historical sync behavior for this node. Options are: +# 'ttl' (node will attempt to acquire all block data to comply with time to live enforcement) +# 'genesis' (node will attempt to acquire all block data back to genesis) +# 'nosync' (node will only acquire blocks moving forward) +# 'isolated' (node will initialize without peers and will not accept peers) +# 'completeblock' (node will acquire complete block and shutdown) +# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`. +# it is recommended for dedicated validator nodes to be in ttl mode to increase +# their ability to maintain maximal uptime...if a long-running genesis validator +# goes offline and comes back up while in genesis mode, it must backfill +# any gaps in its block awareness before resuming validation. +# +# it is recommended for reporting non-validator nodes to be in genesis mode to +# enable support for queries at any block height. +# +# it is recommended for non-validator working nodes (for dapp support, etc) to run in +# ttl or nosync mode (depending upon their specific data requirements). +# +# thus for instance a node backing a block explorer would prefer genesis mode, +# while a node backing a dapp interested in very recent activity would prefer to run in nosync mode, +# and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode. +# note: as time goes on, the time to sync back to genesis takes progressively longer. +# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting +# (it is currently ~18 hours by default on production and production-like networks but subject to change). +# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating +# in consensus / switching to validate mode. it is primarily for lightweight nodes that are +# only interested in recent activity. +# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to +# binary port, rest server, event server, and diagnostic port connections. +sync_handling = 'genesis' + +# Idle time after which the syncing process is considered stalled. +idle_tolerance = '20 minutes' + +# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. +max_attempts = 3 + +# Default delay for the control events that have no dedicated delay requirements. +control_logic_default_delay = '1 second' + +# Flag which forces the node to resync all of the blocks. +force_resync = false + +# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all +# conditions are satisfied. +shutdown_for_upgrade_timeout = '2 minutes' + +# Maximum time a node will wait for an upgrade to commit. +upgrade_timeout = '3 hours' + +# The node detects when it should do a controlled shutdown when it is in a detectably bad state +# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be +# allowed to shutdown, and if restarted that node will generally recover gracefully and resume +# normal operation. However, actively validating nodes have subjective state in memory that is +# lost on shutdown / restart and must be reacquired from other validating nodes on restart. +# If all validating nodes shutdown in the middle of an era, social consensus is required to restart +# the network. As a mitigation for that, the following config can be set to true on some validator +# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled +# shutdown events and stay up. This allows them to act as sentinels for the consensus data for +# other restarting nodes. This config is inert on non-validating nodes. +prevent_validator_shutdown = false # ================================= # Configuration options for logging @@ -31,41 +92,88 @@ abbreviate_modules = false # consensus messages. secret_key_path = 'secret_key.pem' +# The maximum number of blocks by which execution is allowed to lag behind finalization. +# If it is more than that, consensus will pause, and resume once the executor has caught up. +max_execution_delay = 3 + + +# ======================================= +# Configuration options for Zug consensus +# ======================================= +[consensus.zug] + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '50 ms' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of +# echo messages, before they vote to make the round skippable and move on to the next proposer. +proposal_timeout = '10 seconds' + +# The additional proposal delay that is still considered fast enough, in percent. This should +# take into account variables like empty vs. full blocks, network traffic etc. +# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one +# while idle this should be at least 50, meaning that the timeout is 50% longer than +# necessary for a quorum of recent proposals, approximately. +proposal_grace_period = 200 + +# The average number of rounds after which the proposal timeout adapts by a factor of 2. +# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. +proposal_timeout_inertia = 10 + +# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp +# lies in the future by more than that are rejected. +clock_tolerance = '1 second' + + # =========================================== # Configuration options for Highway consensus # =========================================== [consensus.highway] -# The folder in which the files with per-era latest unit hashes will be stored. -unit_hashes_folder = "../node-storage" # The duration for which incoming vertices with missing dependencies should be kept in a queue. -pending_vertex_timeout = '1min' +pending_vertex_timeout = '1 minute' -request_latest_state_timeout = '30sec' - -# If the current era's protocol state has not progressed for this long, shut down. -standstill_timeout = '5min' +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' # Log inactive or faulty validators periodically, with this interval. -log_participation_interval = '1min' +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '15 seconds' -# The maximum number of blocks by which execution is allowed to lag behind finalization. -# If it is more than that, consensus will pause, and resume once the executor has caught up. -max_execution_delay = 3 +# Log the synchronizer state periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' + +# Log the size of every incoming and outgoing serialized unit. +log_unit_sizes = false + +# The maximum number of peers we request the same vertex from in parallel. +max_requests_for_vertex = 5 + +# The maximum number of dependencies we request per validator in a batch. +# Limits requests per validator in panorama - in order to get a total number of +# requests, multiply by # of validators. +max_request_batch_size = 20 [consensus.highway.round_success_meter] # The number of most recent rounds we will be keeping track of. num_rounds_to_consider = 40 # The number of successful rounds that triggers us to slow down: With this many or fewer -# successes per `num_rounds_to_consider`, we increase our round exponent. +# successes per `num_rounds_to_consider`, we increase our round length. num_rounds_slowdown = 10 # The number of successful rounds that triggers us to speed up: With this many or more successes -# per `num_rounds_to_consider`, we decrease our round exponent. +# per `num_rounds_to_consider`, we decrease our round length. num_rounds_speedup = 32 -# We will try to accelerate (decrease our round exponent) every `acceleration_parameter` rounds if +# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if # we have few enough failures. acceleration_parameter = 40 @@ -75,6 +183,7 @@ acceleration_parameter = 40 # determined by this FTT. acceleration_ftt = [1, 100] + # ==================================== # Configuration options for networking # ==================================== @@ -100,49 +209,177 @@ bind_address = '0.0.0.0:34553' # one connection. known_addresses = ['127.0.0.1:34553'] -# The interval (in milliseconds) between each fresh round of gossiping the node's public address. -gossip_interval = 30000 +# Minimum number of fully-connected peers to consider network component initialized. +min_peers_for_initialization = 3 -# Enable systemd support. If enabled, the node will notify systemd once it has synced and its -# listening socket for incoming connections is open. -# -# It is usually better to leave this option off and enable it explicitly via command-line override -# only in the unit files themselves via `-C=network.systemd_support=true`. -systemd_support = false - -# Minimum amount of time that has to pass before attempting to reconnect after losing all -# connections to established nodes. -isolation_reconnect_delay = '2s' +# The interval between each fresh round of gossiping the node's public address. +gossip_interval = '30 seconds' # Initial delay for starting address gossipping after the network starts. This should be slightly # more than the expected time required for initial connections to complete. -initial_gossip_delay = '5s' +initial_gossip_delay = '5 seconds' # How long a connection is allowed to be stuck as pending before it is abandoned. -max_addr_pending_time = '1min' +max_addr_pending_time = '1 minute' -# ============================================= -# Configuration options for the JSON-RPC HTTP server -# ============================================= -[rpc_server] +# Maximum time allowed for a connection handshake between two nodes to be completed. Connections +# exceeding this threshold are considered unlikely to be healthy or even malicious and thus +# terminated. +handshake_timeout = '20 seconds' -# Listening address for JSON-RPC HTTP server. If the port is set to 0, a random port will be used. +# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional +# connections will be rejected. A value of `0` means unlimited. +max_incoming_peer_connections = 3 + +# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers. +# A value of `0` means unlimited. +max_outgoing_byte_rate_non_validators = 0 + +# The maximum allowed total impact of requests from non-validating peers per second answered. +# A value of `0` means unlimited. +max_incoming_message_rate_non_validators = 0 + +# Maximum number of requests for data from a single peer that are allowed be buffered. A value of +# `0` means unlimited. +max_in_flight_demands = 50 + +# Version threshold to enable tarpit for. # -# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, -# the JSON-RPC HTTP server will not run, but the node will be otherwise unaffected. +# When set to a version (the value may be `null` to disable the feature), any peer that reports a +# protocol version equal or below the threshold will be rejected only after holding open the +# connection for a specific (`tarpit_duration`) amount of time. # -# The actual bound address will be reported via a log line if logging is enabled. -address = '0.0.0.0:7777' +# This option makes most sense to enable on known nodes with addresses where legacy nodes that are +# still in operation are connecting to, as these older versions will only attempt to reconnect to +# other nodes once they have exhausted their set of known nodes. +tarpit_version_threshold = '1.2.1' + +# How long to hold connections to trapped legacy nodes. +tarpit_duration = '10 minutes' + +# The probability [0.0, 1.0] of this node trapping a legacy node. +# +# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a +# single known node to hold open a connection to prevent the node from reconnecting. This should be +# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of +# legacy nodes running this software. +tarpit_chance = 0.2 + +# Minimum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_min_duration = '2 minutes' + +# Maximum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_max_duration = '10 minutes' + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "local_node_cert.pem" +# secret_key = "local_node.pem" +# ca_certificate = "ca_cert.pem" + +# Weights for impact estimation of incoming messages, used in combination with +# `max_incoming_message_rate_non_validators`. +# +# Any weight set to 0 means that the category of traffic is exempt from throttling. +[network.estimator_weights] +consensus = 0 +block_gossip = 1 +transaction_gossip = 0 +finality_signature_gossip = 1 +address_gossip = 0 +finality_signature_broadcasts = 0 +transaction_requests = 1 +transaction_responses = 0 +legacy_deploy_requests = 1 +legacy_deploy_responses = 0 +block_requests = 1 +block_responses = 0 +block_header_requests = 1 +block_header_responses = 0 +trie_requests = 1 +trie_responses = 0 +finality_signature_requests = 1 +finality_signature_responses = 0 +sync_leap_requests = 1 +sync_leap_responses = 0 +approvals_hashes_requests = 1 +approvals_hashes_responses = 0 +execution_results_requests = 1 +execution_results_responses = 0 + +# ================================================== +# Configuration options for the BinaryPort server +# ================================================== +[binary_port_server] + +# Flag which enables the BinaryPort server. +enable_server = true + +# Listening address for BinaryPort server. +address = '0.0.0.0:7779' + +# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +# We enable it for NCTL testing since we need deeper inspection for the network in tests. +allow_request_get_all_values = true + +# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_trie = false + +# Flag that enables the `TrySpeculativeExec` request. Disabled by default. +allow_request_speculative_exec = false + +# Maximum size of a message in bytes. +max_message_size_bytes = 4_194_304 + +# Maximum number of connections to the server. +max_connections = 5 # The global max rate of requests (per second) before they are limited. -# Request will be delayed to the next 1 second bucket once limited. -qps_limit = 100 +# The implementation uses a sliding window algorithm. +qps_limit = 110 -# ============================================= +# Initial time given to a connection before it expires +initial_connection_lifetime = '10 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +# [`Command::Get(GetRequest::Record)`] is sent to the node +get_record_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Information)`] is sent to the node +get_information_request_termination_delay = '5 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::State)`] is sent to the node +get_state_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Trie)`] is sent to the node +get_trie_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TryAcceptTransaction`] is sent to the node +accept_transaction_request_termination_delay = '24 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TrySpeculativeExec`] is sent to the node +speculative_exec_request_termination_delay = '0 seconds' + +# ============================================== # Configuration options for the REST HTTP server -# ============================================= +# ============================================== [rest_server] +# Flag which enables the REST HTTP server. +enable_server = true + # Listening address for REST HTTP server. If the port is set to 0, a random port will be used. # # If the specified port cannot be bound to, a random port will be tried instead. If binding fails, @@ -155,11 +392,22 @@ address = '0.0.0.0:8888' # Request will be delayed to the next 1 second bucket once limited. qps_limit = 100 -# ============================================= +# Specifies which origin will be reported as allowed by REST server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ========================================================== # Configuration options for the SSE HTTP event stream server -# ============================================= +# ========================================================== [event_stream_server] +# Flag which enables the SSE HTTP event stream server. +enable_server = true + # Listening address for SSE HTTP event stream server. If the port is set to 0, a random port will be used. # # If the specified port cannot be bound to, a random port will be tried instead. If binding fails, @@ -169,14 +417,17 @@ qps_limit = 100 address = '0.0.0.0:9999' # The number of event stream events to buffer. -event_stream_buffer_length = 100 +event_stream_buffer_length = 5000 -# The capacity of the broadcast channel size. -broadcast_channel_size = 100 +# The maximum number of subscribers across all event streams the server will permit at any one time. +max_concurrent_subscribers = 100 -# The global max rate of requests (per second) before they are limited. -# Request will be delayed to the next 1 second bucket once limited. -qps_limit = 100 +# Specifies which origin will be reported as allowed by event stream server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' # =============================================== # Configuration options for the storage component @@ -184,7 +435,8 @@ qps_limit = 100 [storage] # Path (absolute, or relative to this config.toml) to the folder where any files created -# or read by the storage component will exist. +# or read by the storage component will exist. A subfolder named with the network name will be +# automatically created and used for the storage component files. # # If the folder doesn't exist, it and any required parents will be created. # @@ -219,6 +471,18 @@ max_deploy_metadata_store_size = 12_884_901_888 # 10_737_418_240 == 10 GiB. max_state_store_size = 10_737_418_240 +# Memory deduplication. +# +# If enabled, nodes will attempt to share loaded objects if possible. +enable_mem_deduplication = true + +# Memory duplication garbage collection. +# +# Sets the frequency how often the memory pool cache is swept for free references. +# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept. +mem_pool_prune_interval = 4096 + + # =================================== # Configuration options for gossiping # =================================== @@ -235,62 +499,153 @@ infection_target = 3 # excluding us since 80% saturation would imply 3 new infections in 15 peers. saturation_limit_percent = 80 -# The maximum duration in seconds for which to keep finished entries. +# The maximum duration for which to keep finished entries. # # The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, # the longer they are retained, the larger the list of finished entries can grow. -finished_entry_duration_secs = 60 +finished_entry_duration = '1 minute' -# The timeout duration in seconds for a single gossip request, i.e. for a single gossip message +# The timeout duration for a single gossip request, i.e. for a single gossip message # sent from this node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -gossip_request_timeout_secs = 10 +gossip_request_timeout = '10 seconds' -# The timeout duration in seconds for retrieving the remaining part(s) of newly-discovered data +# The timeout duration for retrieving the remaining part(s) of newly-discovered data # from a peer which gossiped information about that data to this node. -get_remainder_timeout_secs = 5 +get_remainder_timeout = '5 seconds' +# The timeout duration for a newly-received, gossiped item to be validated and stored by another +# component before the gossiper abandons waiting to gossip the item onwards. +validate_and_store_timeout = '1 minute' -# =================================== -# Configuration options for fetcher -# =================================== + +# =============================================== +# Configuration options for the block accumulator +# =============================================== +[block_accumulator] + +# Block height difference threshold for starting to execute the blocks. +attempt_execution_threshold = 3 + +# Accepted time interval for inactivity in block accumulator. +dead_air_interval = '3 minutes' + +# Time after which the block acceptors are considered old and can be purged. +purge_interval = '5 minutes' + + +# ================================================ +# Configuration options for the block synchronizer +# ================================================ +[block_synchronizer] + +# Maximum number of fetch-trie tasks to run in parallel during block synchronization. +max_parallel_trie_fetches = 5000 + +# Time interval for the node to ask for refreshed peers. +peer_refresh_interval = '90 seconds' + +# Time interval for the node to check what the block synchronizer needs to acquire next. +need_next_interval = '1 second' + +# Time interval for recurring disconnection of dishonest peers. +disconnect_dishonest_peers_interval = '10 seconds' + +# Time interval for resetting the latch in block builders. +latch_reset_interval = '5 seconds' + + +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + +# ================================== +# Configuration options for fetchers +# ================================== [fetcher] -# The timeout duration in seconds for a single fetcher request, i.e. for a single fetcher message +# The timeout duration for a single fetcher request, i.e. for a single fetcher message # sent from this node to another node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -get_from_peer_timeout = 3 - -# =================================================== -# Configuration options for deploy acceptor component -# =================================================== -[deploy_acceptor] - -# If true, the deploy acceptor will verify the account associated with a received deploy prior to accepting it. -verify_accounts = true +get_from_peer_timeout = '10 seconds' # ======================================================== # Configuration options for the contract runtime component # ======================================================== [contract_runtime] -# Optional setting to enable bonding or not. If unset, defaults to false. -#enable_bonding = false # Optional maximum size of the database to use for the global state store. # -# If unset, defaults to 32,212,254,720 == 30 GiB. +# If unset, defaults to 805,306,368,000 == 750 GiB. # # The size should be a multiple of the OS page size. -#max_global_state_size = 32_212_254_720 +max_global_state_size = 32_212_254_720 + +# Optional depth limit to use for global state queries. +# +# If unset, defaults to 5. +max_query_depth = 5 + +# Enable manual synchronizing to disk. +# +# If unset, defaults to true. +enable_manual_sync = true + + +# ================================================== +# Configuration options for the transaction acceptor +# ================================================== +[transaction_acceptor] + +# The leeway allowed when considering whether a transaction is future-dated or not. +# +# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `transaction.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + +# =========================================== +# Configuration options for the transaction buffer +# =========================================== +[transaction_buffer] + +# The interval of checking for expired transactions. +expiry_check_interval = '1 minute' -# ==================================================================== -# Configuration options for selecting deploys to propose in new blocks -# ==================================================================== -[block_proposer] +# ============================================== +# Configuration options for the diagnostics port +# ============================================== +[diagnostics_port] + +# If set, the diagnostics port will be available on a UNIX socket. +enabled = true + +# Filename for the UNIX domain socket the diagnostics port listens on. +socket_path = "debug.socket" + +# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the +# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`, +# which allows for group access as well. +socket_umask = 0o077 + + +# ============================================= +# Configuration options for the upgrade watcher +# ============================================= +[upgrade_watcher] -# Deploys are only proposed in a new block if the have been received at least this long ago. -# A longer delay makes it more likely that many proposed deploys are already known by the -# other nodes, and don't have to be requested from the proposer afterwards. -#deploy_delay = '1min' +# How often to scan file system for available upgrades. +upgrade_check_interval = '30 seconds' diff --git a/resources/mainnet/chainspec.toml b/resources/mainnet/chainspec.toml new file mode 100644 index 0000000000..ff2de9e2bc --- /dev/null +++ b/resources/mainnet/chainspec.toml @@ -0,0 +1,505 @@ +[protocol] +# Protocol version. +version = '2.0.4' +# Whether we need to clear latest blocks back to the switch block just before the activation point or not. +hard_reset = true +# This protocol version becomes active at this point. +# +# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By +# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up +# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used +# in contract-runtime for computing genesis post-state hash. +# +# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. +activation_point = 17889 + +[network] +# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by +# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis +# post-state hash. +name = 'casper' +# The maximum size of an acceptable networking message in bytes. Any message larger than this will +# be rejected at the networking level. +maximum_net_message_size = 25_165_824 + +[core] +# Era duration. +era_duration = '120 minutes' +# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the +# minimum height. +minimum_era_height = 20 +# Minimum difference between a block's and its child's timestamp. +minimum_block_time = '16384 ms' +# Number of slots available in validator auction. +validator_slots = 100 +# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. +# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as +# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize +# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. +finality_threshold_fraction = [1, 3] +# Protocol version from which nodes are required to hold strict finality signatures. +start_protocol_version_with_strict_finality_signatures_required = '1.5.0' +# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'. +# Used to determine finality sufficiency for new joiners syncing blocks created +# in a protocol version before +# `start_protocol_version_with_strict_finality_signatures_required`. +legacy_required_finality = 'Any' +# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, +# you will be a validator in era N + auction_delay + 1. +auction_delay = 1 +# The period after genesis during which a genesis validator's bid is locked. +locked_funds_period = '0 days' +# The period in which genesis validator's bid is released over time after it's unlocked. +vesting_schedule_period = '0 weeks' +# Default number of eras that need to pass to be able to withdraw unbonded funds. +unbonding_delay = 7 +# Round seigniorage rate represented as a fraction of the total supply. +# +# Annual issuance: 8% +# Minimum block time: 2^14 milliseconds +# Ticks per year: 31536000000 +# +# (1+0.08)^((2^14)/31536000000)-1 is expressed as a fractional number below +# Python: +# from fractions import Fraction +# Fraction((1 + 0.08)**((2**14)/31536000000) - 1).limit_denominator(1000000000) +round_seigniorage_rate = [7, 175070816] +# Maximum number of associated keys for a single account. +max_associated_keys = 100 +# Maximum height of contract runtime call stack. +max_runtime_call_stack_height = 12 +# Minimum allowed delegation amount in motes +minimum_delegation_amount = 500_000_000_000 +# Maximum allowed delegation amount in motes +maximum_delegation_amount = 1_000_000_000_000_000_000 +# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than +# the value specified will be treated as a full unbond of a validator and their associated delegators +minimum_bid_amount = 10_000_000_000_000 +# Global state prune batch size (0 = this feature is off) +prune_batch_size = 0 +# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +strict_argument_checking = false +# Number of simultaneous peer requests. +simultaneous_peer_requests = 5 +# The consensus protocol to use. Options are "Zug" and "Highway". +consensus_protocol = 'Zug' +# The maximum amount of delegators per validator. +max_delegators_per_validator = 1200 +# The split in finality signature rewards between block producer and participating signers. +finders_fee = [1, 5] +# The proportion of baseline rewards going to reward finality signatures specifically. +finality_signature_proportion = [95, 100] +# Lookback interval indicating which past block we are looking at to reward. +signature_rewards_max_delay = 3 +# Allows transfers between accounts in the blockchain network. +# +# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators. +# Changing this option makes sense only on private chains. +allow_unrestricted_transfers = true +# Enables the auction entry points 'delegate' and 'add_bid'. +# +# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These +# auction entry points will return an error if called when this option is set to false. +allow_auction_bids = true +# If set to false, then consensus doesn't compute rewards and always uses 0. +compute_rewards = true +# Defines how refunds of the unused portion of payment amounts are calculated and handled. +# +# Valid options are: +# 'refund': a ratio of the unspent token is returned to the spender. +# 'burn': a ratio of the unspent token is burned. +# 'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio. +# This causes excess payment amounts to be sent to either a +# pre-defined purse, or back to the sender. The refunded amount is calculated as the given ratio of the payment amount +# minus the execution costs. +refund_handling = { type = 'refund', refund_ratio = [75, 100] } +# Defines how fees are handled. +# +# Valid options are: +# 'no_fee': fees are eliminated. +# 'pay_to_proposer': fees are paid to the block proposer +# 'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all +# administrator accounts +# 'burn': fees are burned +fee_handling = { type = 'pay_to_proposer' } +# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake. +validator_credit_cap = [1, 5] +# Defines how pricing is handled. +# +# Valid options are: +# 'payment_limited': senders of transaction self-specify how much they pay. +# 'fixed': costs are fixed, per the cost table +# 'prepaid': prepaid transaction (currently not supported) +pricing_handling = { type = 'payment_limited' } +# Does the network allow pre-payment for future +# execution? Currently not supported. +# +allow_prepaid = false +# Defines how gas holds affect available balance calculations. +# +# Valid options are: +# 'accrued': sum of full value of all non-expired holds. +# 'amortized': sum of each hold is amortized over the time remaining until expiry. +# +# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`, +# with accrued, the full hold amount would be applied +# with amortized, half the hold amount would be applied +gas_hold_balance_handling = { type = 'accrued' } +# Defines how long gas holds last. +# +# If fee_handling is set to 'no_fee', the system places a balance hold on the payer +# equal to the value the fee would have been. Such balance holds expire after a time +# interval has elapsed. This setting controls how long that interval is. The available +# balance of a purse equals its total balance minus the held amount(s) of non-expired +# holds (see gas_hold_balance_handling setting for details of how that is calculated). +# +# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse, +# a hold for 100 is placed on that purse and is considered when calculating total balance +# for 24 hours starting from the block_time when the hold was placed. +gas_hold_interval = '24 hours' +# List of public keys of administrator accounts. Setting this option makes only on private chains which require +# administrator accounts for regulatory reasons. +administrators = [] +# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable +# entity in lazy manner. +# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade; +# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage +# will be written underneath Key::Hash. +# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account +# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated +# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten +# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top +# level records +# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade +# the flag cannot be disabled in a future protocol upgrade. +enable_addressable_entity = false +# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount. +baseline_motes_amount = 2_500_000_000 +# Flag on whether ambiguous entity versions returns an execution error. +trap_on_ambiguous_entity_version = false + +[highway] +# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. +maximum_round_length = '66 seconds' + +[transactions] +# The duration after the transaction timestamp that it can be included in a block. +max_ttl = '2 hours' +# The maximum number of approvals permitted in a single block. +block_max_approval_count = 2600 +# Maximum block size in bytes including transactions contained by the block. 0 means unlimited. +max_block_size = 5_242_880 +# The upper limit of total gas of all transactions in a block. +block_gas_limit = 1_625_000_000_000 +# The minimum amount in motes for a valid native transfer. +native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' + +# Configuration of the transaction runtime. +[transactions.enabled_runtime] +vm_casper_v1 = true +vm_casper_v2 = false + +[transactions.v1] +# The configuration settings for the lanes of transactions including both native and Wasm based interactions. +# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1 +# respectively +# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction +# within a given lane. +# The maximum length in bytes of runtime args per V1 transaction. +# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels) +# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and +# the lane must be present and defined. +# Different casper networks may not impose such a restriction. +# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane +# [2] -> Max args length size in bytes for a given transaction in a certain lane +# [3] -> Transaction gas limit for a given transaction in a certain lane +# [4] -> The maximum number of transactions the lane can contain +native_mint_lane = [0, 2048, 1024, 100_000_000, 650] +native_auction_lane = [1, 3096, 2048, 2_500_000_000, 650] +install_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1] +wasm_lanes = [ + [3, 750_000, 2048, 1_000_000_000_000, 1], + [4, 131_072, 1024, 100_000_000_000, 2], + [5, 65_536, 512, 5_000_000_000, 80] +] + +[transactions.deploy] +# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. +max_payment_cost = '0' +# The limit of length of serialized payment code arguments. +payment_args_max_length = 1024 +# The limit of length of serialized session code arguments. +session_args_max_length = 1024 + +[wasm.v1] +# Amount of free memory (in 64kB pages) each contract can use for stack. +max_memory = 64 +# Max stack height (native WebAssembly stack limiter). +max_stack_height = 500 + +[storage_costs] +# Gas charged per byte stored in the global state. +gas_per_byte = 1_117_587 + +# For each opcode cost below there exists a static cost and a dynamic cost. +# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks. +[wasm.v1.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v1.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v1.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs +[wasm.v1.host_function_costs] +add = { cost = 5_800, arguments = [0, 0, 0, 0] } +add_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] } +add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] } +add_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +add_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +blake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] } +call_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] } +call_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } +create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } +create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +create_purse = { cost = 2_500_000_000, arguments = [0, 0] } +disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +get_balance = { cost = 3_000_000, arguments = [0, 0, 0] } +get_blocktime = { cost = 330, arguments = [0] } +get_caller = { cost = 380, arguments = [0] } +get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } +get_main_purse = { cost = 1_300, arguments = [0] } +get_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] } +get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } +get_phase = { cost = 710, arguments = [0] } +get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } +has_key = { cost = 1_500, arguments = [0, 840] } +is_valid_uref = { cost = 760, arguments = [0, 0] } +load_named_keys = { cost = 42_000, arguments = [0, 0] } +new_uref = { cost = 17_000, arguments = [0, 0, 590] } +random_bytes = { cost = 200, arguments = [0, 0] } +print = { cost = 20_000, arguments = [0, 4_600] } +provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } +put_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] } +read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } +read_value = { cost = 60_000, arguments = [0, 120_000, 0] } +dictionary_get = { cost = 5_500, arguments = [0, 590, 0] } +remove_associated_key = { cost = 4_200, arguments = [0, 0] } +remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } +remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] } +remove_key = { cost = 61_000, arguments = [0, 3_200] } +ret = { cost = 23_000, arguments = [0, 420_000] } +revert = { cost = 500, arguments = [0] } +set_action_threshold = { cost = 74_000, arguments = [0, 0] } +transfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +transfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] } +update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } +write = { cost = 14_000, arguments = [0, 0, 0, 980] } +dictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +enable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +manage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] } +emit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] } +generic_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] } +cost_increase_per_message = 50 +get_block_info = { cost = 330, arguments = [0, 0] } +recover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +verify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +call_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } + +[wasm.v2] +# Amount of free memory each contract can use for stack. +max_memory = 17 + +[wasm.v2.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v2.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v2.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +[wasm.v2.host_function_costs] +read = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +write = { cost = 0, arguments = [0, 0, 0, 0, 0] } +remove = { cost = 0, arguments = [0, 0, 0] } +copy_input = { cost = 0, arguments = [0, 0] } +ret = { cost = 0, arguments = [0, 0] } +create = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer = { cost = 0, arguments = [0, 0, 0] } +env_balance = { cost = 0, arguments = [0, 0, 0, 0] } +upgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +call = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +print = { cost = 0, arguments = [0, 0] } +emit = { cost = 0, arguments = [0, 0, 0, 0] } +env_info = { cost = 0, arguments = [0, 0] } + +[wasm.messages_limits] +max_topic_name_size = 256 +max_topics_per_contract = 128 +max_message_size = 1_024 + +[system_costs] +# Penalty charge for calling invalid entry point in a system contract. +no_such_entrypoint = 2_500_000_000 + +[system_costs.auction_costs] +get_era_validators = 2_500_000_000 +read_seigniorage_recipients = 5_000_000_000 +add_bid = 2_500_000_000 +withdraw_bid = 2_500_000_000 +delegate = 2_500_000_000 +undelegate = 2_500_000_000 +run_auction = 2_500_000_000 +slash = 2_500_000_000 +distribute = 2_500_000_000 +withdraw_delegator_reward = 5_000_000_000 +withdraw_validator_reward = 5_000_000_000 +read_era_id = 2_500_000_000 +activate_bid = 2_500_000_000 +redelegate = 2_500_000_000 +change_bid_public_key = 5_000_000_000 +add_reservations = 2_500_000_000 +cancel_reservations = 2_500_000_000 + +[system_costs.mint_costs] +mint = 2_500_000_000 +reduce_total_supply = 2_500_000_000 +create = 2_500_000_000 +balance = 100_000_000 +burn = 100_000_000 +transfer = 100_000_000 +read_base_round_reward = 2_500_000_000 +mint_into_existing_purse = 2_500_000_000 + +[system_costs.handle_payment_costs] +get_payment_purse = 10_000 +set_refund_purse = 10_000 +get_refund_purse = 10_000 +finalize_payment = 2_500_000_000 + +[system_costs.standard_payment_costs] +pay = 10_000 + +[vacancy] +# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network. +# +# The network starts with a current_gas_price of min_gas_price. +# +# Each block has multiple limits (bytes, transactions, transfers, gas, etc.) +# The utilization for a block is determined by the highest percentage utilization of each these limits. +# +# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here) +# 19 transactons -> 19/20 or 95% +# 600 transfers -> 600/650 or 92.3% +# resulting block utilization is 95 +# +# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is +# adjusted with the following: +# +# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price. +# If utilization falls between the thresholds, current_gas_price is not changed. +# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price. +# +# The cost charged for the transaction is simply the gas_used * current_gas_price. +upper_threshold = 90 +lower_threshold = 50 +max_gas_price = 1 +min_gas_price = 1 diff --git a/resources/mainnet/config-example.toml b/resources/mainnet/config-example.toml new file mode 100644 index 0000000000..2ce69b1c9d --- /dev/null +++ b/resources/mainnet/config-example.toml @@ -0,0 +1,645 @@ +# ================================ +# Configuration options for a node +# ================================ +[node] + +# If set, use this hash as a trust anchor when joining an existing network. +#trusted_hash = 'HEX-FORMATTED BLOCK HASH' + +# Historical sync behavior for this node. Options are: +# 'genesis' (node will attempt to acquire all block data back to genesis) +# note: as time goes on, the time to sync all the way back to genesis takes progressively longer. +# 'ttl' (node will attempt to acquire all block data to comply with time to live enforcement) +# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting +# (it is currently ~18 hours by default on production and production-like networks but subject to change). +# 'nosync' (node will only acquire blocks moving forward) +# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating +# in consensus / switching to validate mode. it is primarily for lightweight nodes that are +# only interested in recent activity. +# 'isolated' (node will initialize without peers and will not accept peers) +# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to +# binary port, rest server, event server, and diagnostic port connections. +sync_handling = 'ttl' + +# Idle time after which the syncing process is considered stalled. +idle_tolerance = '20 minutes' + +# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. +max_attempts = 3 + +# Default delay for the control events that have no dedicated delay requirements. +control_logic_default_delay = '1 second' + +# Flag which forces the node to resync all of the blocks. +force_resync = false + +# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all +# conditions are satisfied. +shutdown_for_upgrade_timeout = '2 minutes' + +# Maximum time a node will wait for an upgrade to commit. +upgrade_timeout = '30 seconds' + +# The node detects when it should do a controlled shutdown when it is in a detectably bad state +# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be +# allowed to shutdown, and if restarted that node will generally recover gracefully and resume +# normal operation. However, actively validating nodes have subjective state in memory that is +# lost on shutdown / restart and must be reacquired from other validating nodes on restart. +# If all validating nodes shutdown in the middle of an era, social consensus is required to restart +# the network. As a mitigation for that, the following config can be set to true on some validator +# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled +# shutdown events and stay up. This allows them to act as sentinels for the consensus data for +# other restarting nodes. This config is inert on non-validating nodes. +prevent_validator_shutdown = false + +# ================================= +# Configuration options for logging +# ================================= +[logging] + +# Output format. Possible values are 'text' or 'json'. +format = 'json' + +# Colored output. Has no effect if format = 'json'. +color = false + +# Abbreviate module names in text output. Has no effect if format = 'json'. +abbreviate_modules = false + + +# =================================== +# Configuration options for consensus +# =================================== +[consensus] + +# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign +# consensus messages. +secret_key_path = '/etc/casper/validator_keys/secret_key.pem' + +# The maximum number of blocks by which execution is allowed to lag behind finalization. +# If it is more than that, consensus will pause, and resume once the executor has caught up. +max_execution_delay = 3 + + +# ======================================= +# Configuration options for Zug consensus +# ======================================= +[consensus.zug] + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '1 second' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of +# echo messages, before they vote to make the round skippable and move on to the next proposer. +proposal_timeout = '10 seconds' + +# The additional proposal delay that is still considered fast enough, in percent. This should +# take into account variables like empty vs. full blocks, network traffic etc. +# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one +# while idle this should be at least 50, meaning that the timeout is 50% longer than +# necessary for a quorum of recent proposals, approximately. +proposal_grace_period = 200 + +# The average number of rounds after which the proposal timeout adapts by a factor of 2. +# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. +proposal_timeout_inertia = 10 + +# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp +# lies in the future by more than that are rejected. +clock_tolerance = '1 second' + + +# =========================================== +# Configuration options for Highway consensus +# =========================================== +[consensus.highway] + +# The duration for which incoming vertices with missing dependencies should be kept in a queue. +pending_vertex_timeout = '30 minutes' + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# Log the synchronizer state periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' + +# Log the size of every incoming and outgoing serialized unit. +log_unit_sizes = false + +# The maximum number of peers we request the same vertex from in parallel. +max_requests_for_vertex = 5 + +# The maximum number of dependencies we request per validator in a batch. +# Limits requests per validator in panorama - in order to get a total number of +# requests, multiply by # of validators. +max_request_batch_size = 20 + +[consensus.highway.round_success_meter] +# The number of most recent rounds we will be keeping track of. +num_rounds_to_consider = 40 + +# The number of successful rounds that triggers us to slow down: With this many or fewer +# successes per `num_rounds_to_consider`, we increase our round length. +num_rounds_slowdown = 10 + +# The number of successful rounds that triggers us to speed up: With this many or more successes +# per `num_rounds_to_consider`, we decrease our round length. +num_rounds_speedup = 32 + +# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if +# we have few enough failures. +acceleration_parameter = 40 + +# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which +# we will use for looking for a summit in order to determine a proposal's finality. +# The required quorum in a summit we will look for to check if a round was successful is +# determined by this FTT. +acceleration_ftt = [1, 100] + + +# ==================================== +# Configuration options for networking +# ==================================== +[network] + +# The public address of the node. +# +# It must be publicly available in order to allow peers to connect to this node. +# If the port is set to 0, the actual bound port will be substituted. +public_address = ':0' + +# Address to bind to for listening. +# If port is set to 0, a random port will be used. +bind_address = '0.0.0.0:35000' + +# Addresses to connect to in order to join the network. +# +# If not set, this node will not be able to attempt to connect to the network. Instead it will +# depend upon peers connecting to it. This is normally only useful for the first node of the +# network. +# +# Multiple addresses can be given and the node will attempt to connect to each, requiring at least +# one connection. +known_addresses = ['135.148.34.108:35000','135.148.169.178:35000','51.81.107.100:35000','135.148.34.20:35000'] + +# Minimum number of fully-connected peers to consider network component initialized. +min_peers_for_initialization = 3 + +# The interval between each fresh round of gossiping the node's public address. +gossip_interval = '120 seconds' + +# Initial delay for starting address gossipping after the network starts. This should be slightly +# more than the expected time required for initial connections to complete. +initial_gossip_delay = '5 seconds' + +# How long a connection is allowed to be stuck as pending before it is abandoned. +max_addr_pending_time = '1 minute' + +# Maximum time allowed for a connection handshake between two nodes to be completed. Connections +# exceeding this threshold are considered unlikely to be healthy or even malicious and thus +# terminated. +handshake_timeout = '20 seconds' + +# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional +# connections will be rejected. A value of `0` means unlimited. +max_incoming_peer_connections = 3 + +# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers. +# A value of `0` means unlimited. +max_outgoing_byte_rate_non_validators = 6553600 + +# The maximum allowed total impact of requests from non-validating peers per second answered. +# A value of `0` means unlimited. +max_incoming_message_rate_non_validators = 3000 + +# Maximum number of requests for data from a single peer that are allowed be buffered. A value of +# `0` means unlimited. +max_in_flight_demands = 50 + +# Version threshold to enable tarpit for. +# +# When set to a version (the value may be `null` to disable the feature), any peer that reports a +# protocol version equal or below the threshold will be rejected only after holding open the +# connection for a specific (`tarpit_duration`) amount of time. +# +# This option makes most sense to enable on known nodes with addresses where legacy nodes that are +# still in operation are connecting to, as these older versions will only attempt to reconnect to +# other nodes once they have exhausted their set of known nodes. +tarpit_version_threshold = '1.2.1' + +# How long to hold connections to trapped legacy nodes. +tarpit_duration = '10 minutes' + +# The probability [0.0, 1.0] of this node trapping a legacy node. +# +# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a +# single known node to hold open a connection to prevent the node from reconnecting. This should be +# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of +# legacy nodes running this software. +tarpit_chance = 0.2 + +# Minimum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_min_duration = '2 minutes' + +# Maximum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_max_duration = '10 minutes' + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "node_cert.pem" +# secret_key = "node.pem" +# ca_certificate = "ca_cert.pem" + +# Weights for impact estimation of incoming messages, used in combination with +# `max_incoming_message_rate_non_validators`. +# +# Any weight set to 0 means that the category of traffic is exempt from throttling. +[network.estimator_weights] +consensus = 0 +block_gossip = 1 +transaction_gossip = 0 +finality_signature_gossip = 1 +address_gossip = 0 +finality_signature_broadcasts = 0 +transaction_requests = 1 +transaction_responses = 0 +legacy_deploy_requests = 1 +legacy_deploy_responses = 0 +block_requests = 1 +block_responses = 0 +block_header_requests = 1 +block_header_responses = 0 +trie_requests = 1 +trie_responses = 0 +finality_signature_requests = 1 +finality_signature_responses = 0 +sync_leap_requests = 1 +sync_leap_responses = 0 +approvals_hashes_requests = 1 +approvals_hashes_responses = 0 +execution_results_requests = 1 +execution_results_responses = 0 + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "local_node_cert.pem" +# secret_key = "local_node.pem" +# ca_certificate = "ca_cert.pem" + + +# ================================================== +# Configuration options for the BinaryPort server +# ================================================== +[binary_port_server] + +# Flag which enables the BinaryPort server. +enable_server = true + +# Listening address for BinaryPort server. +address = '0.0.0.0:7779' + +# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_all_values = false + +# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_trie = false + +# Flag that enables the `TrySpeculativeExec` request. Disabled by default. +allow_request_speculative_exec = false + +# Maximum size of a message in bytes. +max_message_size_bytes = 4_194_304 + +# Maximum number of connections to the server. +max_connections = 5 + +# The global max rate of requests (per second) before they are limited. +# The implementation uses a sliding window algorithm. +qps_limit = 110 + +# Initial time given to a connection before it expires +initial_connection_lifetime = '10 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +# [`Command::Get(GetRequest::Record)`] is sent to the node +get_record_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Information)`] is sent to the node +get_information_request_termination_delay = '5 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::State)`] is sent to the node +get_state_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Trie)`] is sent to the node +get_trie_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TryAcceptTransaction`] is sent to the node +accept_transaction_request_termination_delay = '24 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TrySpeculativeExec`] is sent to the node +speculative_exec_request_termination_delay = '0 seconds' + + +# ============================================== +# Configuration options for the REST HTTP server +# ============================================== +[rest_server] + +# Flag which enables the REST HTTP server. +enable_server = true + +# Listening address for REST HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the REST HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:8888' + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 20 + +# Specifies which origin will be reported as allowed by REST server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ========================================================== +# Configuration options for the SSE HTTP event stream server +# ========================================================== +[event_stream_server] + +# Flag which enables the SSE HTTP event stream server. +enable_server = true + +# Listening address for SSE HTTP event stream server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:9999' + +# The number of event stream events to buffer. +event_stream_buffer_length = 5000 + +# The maximum number of subscribers across all event streams the server will permit at any one time. +max_concurrent_subscribers = 100 + +# Specifies which origin will be reported as allowed by event stream server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# =============================================== +# Configuration options for the storage component +# =============================================== +[storage] + +# Path (absolute, or relative to this config.toml) to the folder where any files created +# or read by the storage component will exist. A subfolder named with the network name will be +# automatically created and used for the storage component files. +# +# If the folder doesn't exist, it and any required parents will be created. +# +# If unset, the path must be supplied as an argument via the CLI. +path = '/var/lib/casper/casper-node' + +# Maximum size of the database to use for the block store. +# +# The size should be a multiple of the OS page size. +# +# 483_183_820_800 == 450 GiB. +max_block_store_size = 483_183_820_800 + +# Maximum size of the database to use for the deploy store. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_store_size = 322_122_547_200 + +# Maximum size of the database to use for the deploy metadata. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_metadata_store_size = 322_122_547_200 + +# Maximum size of the database to use for the state snapshots. +# +# The size should be a multiple of the OS page size. +# +# 10_737_418_240 == 10 GiB. +max_state_store_size = 10_737_418_240 + +# Memory deduplication. +# +# If enabled, nodes will attempt to share loaded objects if possible. +enable_mem_deduplication = true + +# Memory duplication garbage collection. +# +# Sets the frequency how often the memory pool cache is swept for free references. +# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept. +mem_pool_prune_interval = 4096 + + +# =================================== +# Configuration options for gossiping +# =================================== +[gossip] + +# Target number of peers to infect with a given piece of data. +infection_target = 3 + +# The saturation limit as a percentage, with a maximum value of 99. Used as a termination +# condition. +# +# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't +# manage to newly infect 3 peers. We will stop gossiping once we know of more than 15 holders +# excluding us since 80% saturation would imply 3 new infections in 15 peers. +saturation_limit_percent = 80 + +# The maximum duration for which to keep finished entries. +# +# The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, +# the longer they are retained, the larger the list of finished entries can grow. +finished_entry_duration = '1 minute' + +# The timeout duration for a single gossip request, i.e. for a single gossip message +# sent from this node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +gossip_request_timeout = '30 seconds' + +# The timeout duration for retrieving the remaining part(s) of newly-discovered data +# from a peer which gossiped information about that data to this node. +get_remainder_timeout = '5 seconds' + +# The timeout duration for a newly-received, gossiped item to be validated and stored by another +# component before the gossiper abandons waiting to gossip the item onwards. +validate_and_store_timeout = '1 minute' + + +# =============================================== +# Configuration options for the block accumulator +# =============================================== +[block_accumulator] + +# Block height difference threshold for starting to execute the blocks. +attempt_execution_threshold = 3 + +# Accepted time interval for inactivity in block accumulator. +dead_air_interval = '3 minutes' + +# Time after which the block acceptors are considered old and can be purged. +purge_interval = '1 minute' + + +# ================================================ +# Configuration options for the block synchronizer +# ================================================ +[block_synchronizer] + +# Maximum number of fetch-trie tasks to run in parallel during block synchronization. +max_parallel_trie_fetches = 5000 + +# Time interval for the node to ask for refreshed peers. +peer_refresh_interval = '90 seconds' + +# Time interval for the node to check what the block synchronizer needs to acquire next. +need_next_interval = '1 second' + +# Time interval for recurring disconnection of dishonest peers. +disconnect_dishonest_peers_interval = '10 seconds' + +# Time interval for resetting the latch in block builders. +latch_reset_interval = '5 seconds' + + +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + +# ================================== +# Configuration options for fetchers +# ================================== +[fetcher] + +# The timeout duration for a single fetcher request, i.e. for a single fetcher message +# sent from this node to another node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +get_from_peer_timeout = '10 seconds' + + +# ======================================================== +# Configuration options for the contract runtime component +# ======================================================== +[contract_runtime] + +# Optional maximum size of the database to use for the global state store. +# +# If unset, defaults to 805,306,368,000 == 750 GiB. +# +# The size should be a multiple of the OS page size. +max_global_state_size = 2_089_072_132_096 + +# Optional depth limit to use for global state queries. +# +# If unset, defaults to 5. +#max_query_depth = 5 + +# Enable manual synchronizing to disk. +# +# If unset, defaults to true. +#enable_manual_sync = true + + +# ================================================== +# Configuration options for the transaction acceptor +# ================================================== +[transaction_acceptor] + +# The leeway allowed when considering whether a transaction is future-dated or not. +# +# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `transaction.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + +# =========================================== +# Configuration options for the transaction buffer +# =========================================== +[transaction_buffer] + +# The interval of checking for expired transactions. +expiry_check_interval = '1 minute' + + +# ============================================== +# Configuration options for the diagnostics port +# ============================================== +[diagnostics_port] + +# If set, the diagnostics port will be available on a UNIX socket. +enabled = false + +# Filename for the UNIX domain socket the diagnostics port listens on. +socket_path = "debug.socket" + +# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the +# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`, +# which allows for group access as well. +socket_umask = 0o077 + + +# ============================================= +# Configuration options for the upgrade watcher +# ============================================= +[upgrade_watcher] + +# How often to scan file system for available upgrades. +upgrade_check_interval = '30 seconds' diff --git a/resources/production/chainspec.toml b/resources/production/chainspec.toml index c677652502..ff2de9e2bc 100644 --- a/resources/production/chainspec.toml +++ b/resources/production/chainspec.toml @@ -1,8 +1,8 @@ [protocol] # Protocol version. -version = '1.0.0' +version = '2.0.4' # Whether we need to clear latest blocks back to the switch block just before the activation point or not. -hard_reset = false +hard_reset = true # This protocol version becomes active at this point. # # If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By @@ -11,7 +11,7 @@ hard_reset = false # in contract-runtime for computing genesis post-state hash. # # If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. -activation_point = '2021-03-31T15:00:00Z' +activation_point = 17889 [network] # Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by @@ -20,138 +20,306 @@ activation_point = '2021-03-31T15:00:00Z' name = 'casper' # The maximum size of an acceptable networking message in bytes. Any message larger than this will # be rejected at the networking level. -maximum_net_message_size = 23_068_672 +maximum_net_message_size = 25_165_824 [core] # Era duration. -era_duration = '120minutes' +era_duration = '120 minutes' # Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the # minimum height. minimum_era_height = 20 +# Minimum difference between a block's and its child's timestamp. +minimum_block_time = '16384 ms' # Number of slots available in validator auction. validator_slots = 100 +# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. +# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as +# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize +# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. +finality_threshold_fraction = [1, 3] +# Protocol version from which nodes are required to hold strict finality signatures. +start_protocol_version_with_strict_finality_signatures_required = '1.5.0' +# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'. +# Used to determine finality sufficiency for new joiners syncing blocks created +# in a protocol version before +# `start_protocol_version_with_strict_finality_signatures_required`. +legacy_required_finality = 'Any' # Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, # you will be a validator in era N + auction_delay + 1. auction_delay = 1 # The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' +locked_funds_period = '0 days' +# The period in which genesis validator's bid is released over time after it's unlocked. +vesting_schedule_period = '0 weeks' # Default number of eras that need to pass to be able to withdraw unbonded funds. unbonding_delay = 7 # Round seigniorage rate represented as a fraction of the total supply. # # Annual issuance: 8% -# Minimum round exponent: 16 +# Minimum block time: 2^14 milliseconds # Ticks per year: 31536000000 # -# (1+0.08)^((2^16)/31536000000)-1 is expressed as a fractional number below +# (1+0.08)^((2^14)/31536000000)-1 is expressed as a fractional number below # Python: # from fractions import Fraction -# Fraction((1 + 0.08)**((2**16)/31536000000) - 1).limit_denominator(1000000000) -round_seigniorage_rate = [147, 919121747] +# Fraction((1 + 0.08)**((2**14)/31536000000) - 1).limit_denominator(1000000000) +round_seigniorage_rate = [7, 175070816] +# Maximum number of associated keys for a single account. +max_associated_keys = 100 +# Maximum height of contract runtime call stack. +max_runtime_call_stack_height = 12 +# Minimum allowed delegation amount in motes +minimum_delegation_amount = 500_000_000_000 +# Maximum allowed delegation amount in motes +maximum_delegation_amount = 1_000_000_000_000_000_000 +# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than +# the value specified will be treated as a full unbond of a validator and their associated delegators +minimum_bid_amount = 10_000_000_000_000 +# Global state prune batch size (0 = this feature is off) +prune_batch_size = 0 +# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +strict_argument_checking = false +# Number of simultaneous peer requests. +simultaneous_peer_requests = 5 +# The consensus protocol to use. Options are "Zug" and "Highway". +consensus_protocol = 'Zug' +# The maximum amount of delegators per validator. +max_delegators_per_validator = 1200 +# The split in finality signature rewards between block producer and participating signers. +finders_fee = [1, 5] +# The proportion of baseline rewards going to reward finality signatures specifically. +finality_signature_proportion = [95, 100] +# Lookback interval indicating which past block we are looking at to reward. +signature_rewards_max_delay = 3 +# Allows transfers between accounts in the blockchain network. +# +# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators. +# Changing this option makes sense only on private chains. +allow_unrestricted_transfers = true +# Enables the auction entry points 'delegate' and 'add_bid'. +# +# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These +# auction entry points will return an error if called when this option is set to false. +allow_auction_bids = true +# If set to false, then consensus doesn't compute rewards and always uses 0. +compute_rewards = true +# Defines how refunds of the unused portion of payment amounts are calculated and handled. +# +# Valid options are: +# 'refund': a ratio of the unspent token is returned to the spender. +# 'burn': a ratio of the unspent token is burned. +# 'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio. +# This causes excess payment amounts to be sent to either a +# pre-defined purse, or back to the sender. The refunded amount is calculated as the given ratio of the payment amount +# minus the execution costs. +refund_handling = { type = 'refund', refund_ratio = [75, 100] } +# Defines how fees are handled. +# +# Valid options are: +# 'no_fee': fees are eliminated. +# 'pay_to_proposer': fees are paid to the block proposer +# 'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all +# administrator accounts +# 'burn': fees are burned +fee_handling = { type = 'pay_to_proposer' } +# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake. +validator_credit_cap = [1, 5] +# Defines how pricing is handled. +# +# Valid options are: +# 'payment_limited': senders of transaction self-specify how much they pay. +# 'fixed': costs are fixed, per the cost table +# 'prepaid': prepaid transaction (currently not supported) +pricing_handling = { type = 'payment_limited' } +# Does the network allow pre-payment for future +# execution? Currently not supported. +# +allow_prepaid = false +# Defines how gas holds affect available balance calculations. +# +# Valid options are: +# 'accrued': sum of full value of all non-expired holds. +# 'amortized': sum of each hold is amortized over the time remaining until expiry. +# +# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`, +# with accrued, the full hold amount would be applied +# with amortized, half the hold amount would be applied +gas_hold_balance_handling = { type = 'accrued' } +# Defines how long gas holds last. +# +# If fee_handling is set to 'no_fee', the system places a balance hold on the payer +# equal to the value the fee would have been. Such balance holds expire after a time +# interval has elapsed. This setting controls how long that interval is. The available +# balance of a purse equals its total balance minus the held amount(s) of non-expired +# holds (see gas_hold_balance_handling setting for details of how that is calculated). +# +# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse, +# a hold for 100 is placed on that purse and is considered when calculating total balance +# for 24 hours starting from the block_time when the hold was placed. +gas_hold_interval = '24 hours' +# List of public keys of administrator accounts. Setting this option makes only on private chains which require +# administrator accounts for regulatory reasons. +administrators = [] +# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable +# entity in lazy manner. +# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade; +# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage +# will be written underneath Key::Hash. +# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account +# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated +# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten +# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top +# level records +# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade +# the flag cannot be disabled in a future protocol upgrade. +enable_addressable_entity = false +# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount. +baseline_motes_amount = 2_500_000_000 +# Flag on whether ambiguous entity versions returns an execution error. +trap_on_ambiguous_entity_version = false [highway] -# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. -# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as -# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize -# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. -finality_threshold_fraction = [1, 3] -# Integer between 0 and 255. The power of two that is the number of milliseconds in the minimum round length, and -# therefore the minimum delay between a block and its child. E.g. 14 means 2^14 milliseconds, i.e. about 16 seconds. -minimum_round_exponent = 16 -# Integer between 0 and 255. Must be greater than `minimum_round_exponent`. The power of two that is the number of -# milliseconds in the maximum round length, and therefore the maximum delay between a block and its child. E.g. 19 -# means 2^19 milliseconds, i.e. about 8.7 minutes. -maximum_round_exponent = 18 -# The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. -# Expressed as a fraction (1/5 by default). -reduced_reward_multiplier = [1, 5] - -[deploys] +# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. +maximum_round_length = '66 seconds' + +[transactions] +# The duration after the transaction timestamp that it can be included in a block. +max_ttl = '2 hours' +# The maximum number of approvals permitted in a single block. +block_max_approval_count = 2600 +# Maximum block size in bytes including transactions contained by the block. 0 means unlimited. +max_block_size = 5_242_880 +# The upper limit of total gas of all transactions in a block. +block_gas_limit = 1_625_000_000_000 +# The minimum amount in motes for a valid native transfer. +native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' + +# Configuration of the transaction runtime. +[transactions.enabled_runtime] +vm_casper_v1 = true +vm_casper_v2 = false + +[transactions.v1] +# The configuration settings for the lanes of transactions including both native and Wasm based interactions. +# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1 +# respectively +# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction +# within a given lane. +# The maximum length in bytes of runtime args per V1 transaction. +# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels) +# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and +# the lane must be present and defined. +# Different casper networks may not impose such a restriction. +# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane +# [2] -> Max args length size in bytes for a given transaction in a certain lane +# [3] -> Transaction gas limit for a given transaction in a certain lane +# [4] -> The maximum number of transactions the lane can contain +native_mint_lane = [0, 2048, 1024, 100_000_000, 650] +native_auction_lane = [1, 3096, 2048, 2_500_000_000, 650] +install_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1] +wasm_lanes = [ + [3, 750_000, 2048, 1_000_000_000_000, 1], + [4, 131_072, 1024, 100_000_000_000, 2], + [5, 65_536, 512, 5_000_000_000, 80] +] + +[transactions.deploy] # The maximum number of Motes allowed to be spent during payment. 0 means unlimited. max_payment_cost = '0' -# The duration after the deploy timestamp that it can be included in a block. -max_ttl = '1day' -# The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). -max_dependencies = 10 -# Maximum block size in bytes including deploys contained by the block. 0 means unlimited. -max_block_size = 10_485_760 -# The maximum number of non-transfer deploys permitted in a single block. -block_max_deploy_count = 100 -# The maximum number of wasm-less transfer deploys permitted in a single block. -block_max_transfer_count = 2500 -# The upper limit of total gas of all deploys in a block. -block_gas_limit = 10_000_000_000_000 # The limit of length of serialized payment code arguments. payment_args_max_length = 1024 # The limit of length of serialized session code arguments. session_args_max_length = 1024 -# The minimum amount in motes for a valid native transfer. -native_transfer_minimum_motes = 2_500_000_000 -[wasm] +[wasm.v1] # Amount of free memory (in 64kB pages) each contract can use for stack. max_memory = 64 # Max stack height (native WebAssembly stack limiter). -max_stack_height = 65_536 +max_stack_height = 500 -[wasm.storage_costs] +[storage_costs] # Gas charged per byte stored in the global state. -gas_per_byte = 630_000 +gas_per_byte = 1_117_587 -[wasm.opcode_costs] +# For each opcode cost below there exists a static cost and a dynamic cost. +# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks. +[wasm.v1.opcode_costs] # Bit operations multiplier. -bit = 300 +bit = 105 # Arithmetic add operations multiplier. -add = 210 +add = 105 # Mul operations multiplier. -mul = 240 +mul = 105 # Div operations multiplier. -div = 320 +div = 105 # Memory load operation multiplier. -load = 2_500 +load = 105 # Memory store operation multiplier. -store = 4_700 +store = 105 # Const store operation multiplier. -const = 110 +const = 105 # Local operations multiplier. -local = 390 +local = 105 # Global operations multiplier. -global = 390 -# Control flow operations multiplier. -control_flow = 440 +global = 105 # Integer operations multiplier. -integer_comparison = 250 +integer_comparison = 105 # Conversion operations multiplier. -conversion = 420 +conversion = 105 # Unreachable operation multiplier. -unreachable = 270 +unreachable = 105 # Nop operation multiplier. -nop = 200 +nop = 105 # Get current memory operation multiplier. -current_memory = 290 +current_memory = 105 # Grow memory cost, per page (64kb). -grow_memory = 240_000 -# Regular opcode cost. -regular = 210 +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v1.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v1.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 # Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs -[wasm.host_function_costs] +[wasm.v1.host_function_costs] add = { cost = 5_800, arguments = [0, 0, 0, 0] } -add_associated_key = { cost = 9_000, arguments = [0, 0, 0] } -add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } -blake2b = { cost = 200, arguments = [0, 0, 0, 0] } -call_contract = { cost = 4_500, arguments = [0, 0, 0, 0, 0, 420, 0] } -call_versioned_contract = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +add_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] } +add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] } +add_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +add_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +blake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] } +call_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] } +call_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -create_purse = { cost = 170_000, arguments = [0, 0] } +create_purse = { cost = 2_500_000_000, arguments = [0, 0] } disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } -get_balance = { cost = 3_800, arguments = [0, 0, 0] } +get_balance = { cost = 3_000_000, arguments = [0, 0, 0] } get_blocktime = { cost = 330, arguments = [0] } get_caller = { cost = 380, arguments = [0] } get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } get_main_purse = { cost = 1_300, arguments = [0] } -get_named_arg = { cost = 200, arguments = [0, 0, 0, 0] } +get_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] } get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } get_phase = { cost = 710, arguments = [0] } get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } @@ -159,57 +327,179 @@ has_key = { cost = 1_500, arguments = [0, 840] } is_valid_uref = { cost = 760, arguments = [0, 0] } load_named_keys = { cost = 42_000, arguments = [0, 0] } new_uref = { cost = 17_000, arguments = [0, 0, 590] } +random_bytes = { cost = 200, arguments = [0, 0] } print = { cost = 20_000, arguments = [0, 4_600] } provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } -put_key = { cost = 38_000, arguments = [0, 1_100, 0, 0] } +put_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] } read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } -read_value = { cost = 6_000, arguments = [0, 0, 0] } -read_value_local = { cost = 5_500, arguments = [0, 590, 0] } +read_value = { cost = 60_000, arguments = [0, 120_000, 0] } +dictionary_get = { cost = 5_500, arguments = [0, 590, 0] } remove_associated_key = { cost = 4_200, arguments = [0, 0] } remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } -remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 0] } +remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] } remove_key = { cost = 61_000, arguments = [0, 3_200] } ret = { cost = 23_000, arguments = [0, 420_000] } revert = { cost = 500, arguments = [0] } set_action_threshold = { cost = 74_000, arguments = [0, 0] } -transfer_from_purse_to_account = { cost = 160_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -transfer_from_purse_to_purse = { cost = 82_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -transfer_to_account = { cost = 24_000, arguments = [0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +transfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] } update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } write = { cost = 14_000, arguments = [0, 0, 0, 980] } -write_local = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +dictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +enable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +manage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] } +emit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] } +generic_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] } +cost_increase_per_message = 50 +get_block_info = { cost = 330, arguments = [0, 0] } +recover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +verify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +call_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } + +[wasm.v2] +# Amount of free memory each contract can use for stack. +max_memory = 17 + +[wasm.v2.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v2.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v2.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +[wasm.v2.host_function_costs] +read = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +write = { cost = 0, arguments = [0, 0, 0, 0, 0] } +remove = { cost = 0, arguments = [0, 0, 0] } +copy_input = { cost = 0, arguments = [0, 0] } +ret = { cost = 0, arguments = [0, 0] } +create = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer = { cost = 0, arguments = [0, 0, 0] } +env_balance = { cost = 0, arguments = [0, 0, 0, 0] } +upgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +call = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +print = { cost = 0, arguments = [0, 0] } +emit = { cost = 0, arguments = [0, 0, 0, 0] } +env_info = { cost = 0, arguments = [0, 0] } + +[wasm.messages_limits] +max_topic_name_size = 256 +max_topics_per_contract = 128 +max_message_size = 1_024 [system_costs] -wasmless_transfer_cost = 10_000 +# Penalty charge for calling invalid entry point in a system contract. +no_such_entrypoint = 2_500_000_000 [system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 +get_era_validators = 2_500_000_000 +read_seigniorage_recipients = 5_000_000_000 +add_bid = 2_500_000_000 +withdraw_bid = 2_500_000_000 +delegate = 2_500_000_000 +undelegate = 2_500_000_000 +run_auction = 2_500_000_000 +slash = 2_500_000_000 +distribute = 2_500_000_000 +withdraw_delegator_reward = 5_000_000_000 +withdraw_validator_reward = 5_000_000_000 +read_era_id = 2_500_000_000 +activate_bid = 2_500_000_000 +redelegate = 2_500_000_000 +change_bid_public_key = 5_000_000_000 +add_reservations = 2_500_000_000 +cancel_reservations = 2_500_000_000 [system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 +mint = 2_500_000_000 +reduce_total_supply = 2_500_000_000 +create = 2_500_000_000 +balance = 100_000_000 +burn = 100_000_000 +transfer = 100_000_000 +read_base_round_reward = 2_500_000_000 +mint_into_existing_purse = 2_500_000_000 [system_costs.handle_payment_costs] get_payment_purse = 10_000 set_refund_purse = 10_000 get_refund_purse = 10_000 -finalize_payment = 10_000 +finalize_payment = 2_500_000_000 [system_costs.standard_payment_costs] pay = 10_000 + +[vacancy] +# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network. +# +# The network starts with a current_gas_price of min_gas_price. +# +# Each block has multiple limits (bytes, transactions, transfers, gas, etc.) +# The utilization for a block is determined by the highest percentage utilization of each these limits. +# +# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here) +# 19 transactons -> 19/20 or 95% +# 600 transfers -> 600/650 or 92.3% +# resulting block utilization is 95 +# +# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is +# adjusted with the following: +# +# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price. +# If utilization falls between the thresholds, current_gas_price is not changed. +# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price. +# +# The cost charged for the transaction is simply the gas_used * current_gas_price. +upper_threshold = 90 +lower_threshold = 50 +max_gas_price = 1 +min_gas_price = 1 diff --git a/resources/production/config-example.toml b/resources/production/config-example.toml index 2cb7e5f436..7680d0f979 100644 --- a/resources/production/config-example.toml +++ b/resources/production/config-example.toml @@ -6,6 +6,67 @@ # If set, use this hash as a trust anchor when joining an existing network. #trusted_hash = 'HEX-FORMATTED BLOCK HASH' +# Historical sync behavior for this node. Options are: +# 'ttl' (node will attempt to acquire all block data to comply with time to live enforcement) +# 'genesis' (node will attempt to acquire all block data back to genesis) +# 'nosync' (node will only acquire blocks moving forward) +# 'isolated' (node will initialize without peers and will not accept peers) +# 'completeblock' (node will acquire complete block and shutdown) +# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`. +# it is recommended for dedicated validator nodes to be in ttl mode to increase +# their ability to maintain maximal uptime...if a long-running genesis validator +# goes offline and comes back up while in genesis mode, it must backfill +# any gaps in its block awareness before resuming validation. +# +# it is recommended for reporting non-validator nodes to be in genesis mode to +# enable support for queries at any block height. +# +# it is recommended for non-validator working nodes (for dapp support, etc) to run in +# ttl or nosync mode (depending upon their specific data requirements). +# +# thus for instance a node backing a block explorer would prefer genesis mode, +# while a node backing a dapp interested in very recent activity would prefer to run in nosync mode, +# and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode. +# note: as time goes on, the time to sync back to genesis takes progressively longer. +# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting +# (it is currently ~18 hours by default on production and production-like networks but subject to change). +# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating +# in consensus / switching to validate mode. it is primarily for lightweight nodes that are +# only interested in recent activity. +# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to +# binary port, rest server, event server, and diagnostic port connections. +sync_handling = 'ttl' + +# Idle time after which the syncing process is considered stalled. +idle_tolerance = '20 minutes' + +# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. +max_attempts = 3 + +# Default delay for the control events that have no dedicated delay requirements. +control_logic_default_delay = '1 second' + +# Flag which forces the node to resync all of the blocks. +force_resync = false + +# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all +# conditions are satisfied. +shutdown_for_upgrade_timeout = '2 minutes' + +# Maximum time a node will wait for an upgrade to commit. +upgrade_timeout = '3 hours' + +# The node detects when it should do a controlled shutdown when it is in a detectably bad state +# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be +# allowed to shutdown, and if restarted that node will generally recover gracefully and resume +# normal operation. However, actively validating nodes have subjective state in memory that is +# lost on shutdown / restart and must be reacquired from other validating nodes on restart. +# If all validating nodes shutdown in the middle of an era, social consensus is required to restart +# the network. As a mitigation for that, the following config can be set to true on some validator +# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled +# shutdown events and stay up. This allows them to act as sentinels for the consensus data for +# other restarting nodes. This config is inert on non-validating nodes. +prevent_validator_shutdown = false # ================================= # Configuration options for logging @@ -26,46 +87,93 @@ abbreviate_modules = false # Configuration options for consensus # =================================== [consensus] + # Path (absolute, or relative to this config.toml) to validator's secret key file used to sign # consensus messages. secret_key_path = '/etc/casper/validator_keys/secret_key.pem' +# The maximum number of blocks by which execution is allowed to lag behind finalization. +# If it is more than that, consensus will pause, and resume once the executor has caught up. +max_execution_delay = 3 + + +# ======================================= +# Configuration options for Zug consensus +# ======================================= +[consensus.zug] + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '1 second' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of +# echo messages, before they vote to make the round skippable and move on to the next proposer. +proposal_timeout = '10 seconds' + +# The additional proposal delay that is still considered fast enough, in percent. This should +# take into account variables like empty vs. full blocks, network traffic etc. +# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one +# while idle this should be at least 50, meaning that the timeout is 50% longer than +# necessary for a quorum of recent proposals, approximately. +proposal_grace_period = 200 + +# The average number of rounds after which the proposal timeout adapts by a factor of 2. +# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. +proposal_timeout_inertia = 10 + +# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp +# lies in the future by more than that are rejected. +clock_tolerance = '1 second' + # =========================================== # Configuration options for Highway consensus # =========================================== [consensus.highway] -# The folder in which the files with per-era latest unit hashes will be stored. -unit_hashes_folder = "/var/lib/casper/casper-node" # The duration for which incoming vertices with missing dependencies should be kept in a queue. -pending_vertex_timeout = '1min' +pending_vertex_timeout = '30 minutes' -request_latest_state_timeout = '30sec' - -# If the current era's protocol state has not progressed for this long, shut down. -standstill_timeout = '5min' +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' # Log inactive or faulty validators periodically, with this interval. -log_participation_interval = '1min' +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' -# The maximum number of blocks by which execution is allowed to lag behind finalization. -# If it is more than that, consensus will pause, and resume once the executor has caught up. -max_execution_delay = 3 +# Log the synchronizer state periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' + +# Log the size of every incoming and outgoing serialized unit. +log_unit_sizes = false + +# The maximum number of peers we request the same vertex from in parallel. +max_requests_for_vertex = 5 + +# The maximum number of dependencies we request per validator in a batch. +# Limits requests per validator in panorama - in order to get a total number of +# requests, multiply by # of validators. +max_request_batch_size = 20 [consensus.highway.round_success_meter] # The number of most recent rounds we will be keeping track of. num_rounds_to_consider = 40 # The number of successful rounds that triggers us to slow down: With this many or fewer -# successes per `num_rounds_to_consider`, we increase our round exponent. +# successes per `num_rounds_to_consider`, we increase our round length. num_rounds_slowdown = 10 # The number of successful rounds that triggers us to speed up: With this many or more successes -# per `num_rounds_to_consider`, we decrease our round exponent. +# per `num_rounds_to_consider`, we decrease our round length. num_rounds_speedup = 32 -# We will try to accelerate (decrease our round exponent) every `acceleration_parameter` rounds if +# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if # we have few enough failures. acceleration_parameter = 40 @@ -99,48 +207,169 @@ bind_address = '0.0.0.0:35000' # # Multiple addresses can be given and the node will attempt to connect to each, requiring at least # one connection. -known_addresses = ['139.162.132.144:35000','3.225.191.9:35000','31.7.207.16:35000','178.238.235.196:35000','209.145.60.74:35000','157.90.131.49:35000','18.188.152.102:35000','94.130.107.198:35000','135.181.134.57:35000','47.88.87.63:35000','139.59.247.32:35000','188.40.83.254:35000','135.181.165.110:35000','54.180.220.20:35000','148.251.190.103:35000','54.39.129.79:35000','54.39.129.78:35000','88.99.95.7:35000','101.36.120.117:35000','52.207.122.179:35000','18.144.20.51:35000','168.119.209.31:35000','134.209.16.172:35000','18.219.70.138:35000','3.221.194.62:35000','168.119.69.6:35000','62.171.135.101:35000','46.101.61.107:35000','13.58.71.180:35000','52.51.46.127:35000','157.90.131.121:35000','148.251.135.60:35000','54.215.53.35:35000','18.184.78.232:35000','18.188.103.230:35000','168.119.137.143:35000','54.179.8.192:35000','1.15.171.36:35000','47.57.239.181:35000','47.242.53.164:35000','139.59.247.32:35000','99.81.225.72:35000','82.95.0.200:35000','54.252.66.23:35000','134.209.243.124:35000','3.141.97.53:35000','98.149.220.243:35000','46.4.91.24:35000'] - +known_addresses = ['168.119.137.143:35000', '47.251.14.254:35000', '47.242.53.164:35000', '46.101.61.107:35000', '47.88.87.63:35000', '35.152.42.229:35000', '206.189.47.102:35000', '134.209.243.124:35000', '148.251.190.103:35000', '167.172.32.44:35000', '165.22.252.48:35000', '18.219.70.138:35000', '3.225.191.9:35000', '3.221.194.62:35000', '101.36.120.117:35000', '54.151.24.120:35000', '148.251.135.60:35000', '18.188.103.230:35000', '54.215.53.35:35000', '88.99.95.7:35000', '99.81.225.72:35000', '52.207.122.179:35000', '3.135.134.105:35000', '62.171.135.101:35000', '139.162.132.144:35000', '63.33.251.206:35000', '135.181.165.110:35000', '135.181.134.57:35000', '94.130.107.198:35000', '54.180.220.20:35000', '188.40.83.254:35000', '157.90.131.121:35000', '134.209.110.11:35000', '168.119.69.6:35000', '45.76.251.225:35000', '168.119.209.31:35000', '31.7.207.16:35000', '209.145.60.74:35000', '54.252.66.23:35000', '134.209.16.172:35000', '178.238.235.196:35000', '18.217.20.213:35000', '3.14.161.135:35000', '3.12.207.193:35000', '3.12.207.193:35000'] +# Minimum number of fully-connected peers to consider network component initialized. +min_peers_for_initialization = 3 -# The interval (in milliseconds) between each fresh round of gossiping the node's public address. -gossip_interval = 120_000 - -# Enable systemd support. If enabled, the node will notify systemd once it has synced and its -# listening socket for incoming connections is open. -# -# It is usually better to leave this option off and enable it explicitly via command-line override -# only in the unit files themselves via `-C=network.systemd_support=true`. -systemd_support = false - -# Minimum amount of time that has to pass before attempting to reconnect after losing all -# connections to established nodes. -isolation_reconnect_delay = '2s' +# The interval between each fresh round of gossiping the node's public address. +gossip_interval = '120 seconds' # Initial delay for starting address gossipping after the network starts. This should be slightly # more than the expected time required for initial connections to complete. -initial_gossip_delay = '5s' +initial_gossip_delay = '5 seconds' # How long a connection is allowed to be stuck as pending before it is abandoned. -max_addr_pending_time = '1min' +max_addr_pending_time = '1 minute' +# Maximum time allowed for a connection handshake between two nodes to be completed. Connections +# exceeding this threshold are considered unlikely to be healthy or even malicious and thus +# terminated. +handshake_timeout = '20 seconds' -# ================================================== -# Configuration options for the JSON-RPC HTTP server -# ================================================== -[rpc_server] +# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional +# connections will be rejected. A value of `0` means unlimited. +max_incoming_peer_connections = 3 -# Listening address for JSON-RPC HTTP server. If the port is set to 0, a random port will be used. +# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers. +# A value of `0` means unlimited. +max_outgoing_byte_rate_non_validators = 6553600 + +# The maximum allowed total impact of requests from non-validating peers per second answered. +# A value of `0` means unlimited. +max_incoming_message_rate_non_validators = 3000 + +# Maximum number of requests for data from a single peer that are allowed be buffered. A value of +# `0` means unlimited. +max_in_flight_demands = 50 + +# Version threshold to enable tarpit for. # -# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, -# the JSON-RPC HTTP server will not run, but the node will be otherwise unaffected. +# When set to a version (the value may be `null` to disable the feature), any peer that reports a +# protocol version equal or below the threshold will be rejected only after holding open the +# connection for a specific (`tarpit_duration`) amount of time. # -# The actual bound address will be reported via a log line if logging is enabled. -address = '0.0.0.0:7777' +# This option makes most sense to enable on known nodes with addresses where legacy nodes that are +# still in operation are connecting to, as these older versions will only attempt to reconnect to +# other nodes once they have exhausted their set of known nodes. +tarpit_version_threshold = '1.2.1' + +# How long to hold connections to trapped legacy nodes. +tarpit_duration = '10 minutes' + +# The probability [0.0, 1.0] of this node trapping a legacy node. +# +# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a +# single known node to hold open a connection to prevent the node from reconnecting. This should be +# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of +# legacy nodes running this software. +tarpit_chance = 0.2 + +# Minimum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_min_duration = '2 minutes' + +# Maximum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_max_duration = '10 minutes' + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "node_cert.pem" +# secret_key = "node.pem" +# ca_certificate = "ca_cert.pem" + +# Weights for impact estimation of incoming messages, used in combination with +# `max_incoming_message_rate_non_validators`. +# +# Any weight set to 0 means that the category of traffic is exempt from throttling. +[network.estimator_weights] +consensus = 0 +block_gossip = 1 +transaction_gossip = 0 +finality_signature_gossip = 1 +address_gossip = 0 +finality_signature_broadcasts = 0 +transaction_requests = 1 +transaction_responses = 0 +legacy_deploy_requests = 1 +legacy_deploy_responses = 0 +block_requests = 1 +block_responses = 0 +block_header_requests = 1 +block_header_responses = 0 +trie_requests = 1 +trie_responses = 0 +finality_signature_requests = 1 +finality_signature_responses = 0 +sync_leap_requests = 1 +sync_leap_responses = 0 +approvals_hashes_requests = 1 +approvals_hashes_responses = 0 +execution_results_requests = 1 +execution_results_responses = 0 + +# ================================================== +# Configuration options for the BinaryPort server +# ================================================== +[binary_port_server] + +# Flag which enables the BinaryPort server. +enable_server = true + +# Listening address for BinaryPort server. +address = '0.0.0.0:7779' + +# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_all_values = false + +# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_trie = false + +# Flag that enables the `TrySpeculativeExec` request. Disabled by default. +allow_request_speculative_exec = false + +# Maximum size of a message in bytes. +max_message_size_bytes = 134_217_728 + +# Maximum number of connections to the server. +max_connections = 5 # The global max rate of requests (per second) before they are limited. -# Request will be delayed to the next 1 second bucket once limited. -qps_limit = 50 +# The implementation uses a sliding window algorithm. +qps_limit = 110 + +# Initial time given to a connection before it expires +initial_connection_lifetime = '10 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +# [`Command::Get(GetRequest::Record)`] is sent to the node +get_record_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Information)`] is sent to the node +get_information_request_termination_delay = '5 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::State)`] is sent to the node +get_state_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Trie)`] is sent to the node +get_trie_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TryAcceptTransaction`] is sent to the node +accept_transaction_request_termination_delay = '24 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TrySpeculativeExec`] is sent to the node +speculative_exec_request_termination_delay = '0 seconds' # ============================================== @@ -148,6 +377,9 @@ qps_limit = 50 # ============================================== [rest_server] +# Flag which enables the REST HTTP server. +enable_server = true + # Listening address for REST HTTP server. If the port is set to 0, a random port will be used. # # If the specified port cannot be bound to, a random port will be tried instead. If binding fails, @@ -158,7 +390,14 @@ address = '0.0.0.0:8888' # The global max rate of requests (per second) before they are limited. # Request will be delayed to the next 1 second bucket once limited. -qps_limit = 10 +qps_limit = 100 + +# Specifies which origin will be reported as allowed by REST server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' # ========================================================== @@ -166,6 +405,9 @@ qps_limit = 10 # ========================================================== [event_stream_server] +# Flag which enables the SSE HTTP event stream server. +enable_server = true + # Listening address for SSE HTTP event stream server. If the port is set to 0, a random port will be used. # # If the specified port cannot be bound to, a random port will be tried instead. If binding fails, @@ -177,13 +419,15 @@ address = '0.0.0.0:9999' # The number of event stream events to buffer. event_stream_buffer_length = 5000 -# The capacity of the broadcast channel size. -broadcast_channel_size = 6500 - -# The global max rate of requests (per second) before they are limited. -# Request will be delayed to the next 1 second bucket once limited. -qps_limit = 10 +# The maximum number of subscribers across all event streams the server will permit at any one time. +max_concurrent_subscribers = 100 +# Specifies which origin will be reported as allowed by event stream server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' # =============================================== # Configuration options for the storage component @@ -191,7 +435,8 @@ qps_limit = 10 [storage] # Path (absolute, or relative to this config.toml) to the folder where any files created -# or read by the storage component will exist. +# or read by the storage component will exist. A subfolder named with the network name will be +# automatically created and used for the storage component files. # # If the folder doesn't exist, it and any required parents will be created. # @@ -226,6 +471,17 @@ max_deploy_metadata_store_size = 322_122_547_200 # 10_737_418_240 == 10 GiB. max_state_store_size = 10_737_418_240 +# Memory deduplication. +# +# If enabled, nodes will attempt to share loaded objects if possible. +enable_mem_deduplication = true + +# Memory duplication garbage collection. +# +# Sets the frequency how often the memory pool cache is swept for free references. +# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept. +mem_pool_prune_interval = 4096 + # =================================== # Configuration options for gossiping @@ -243,63 +499,153 @@ infection_target = 3 # excluding us since 80% saturation would imply 3 new infections in 15 peers. saturation_limit_percent = 80 -# The maximum duration in seconds for which to keep finished entries. +# The maximum duration for which to keep finished entries. # # The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, # the longer they are retained, the larger the list of finished entries can grow. -finished_entry_duration_secs = 60 +finished_entry_duration = '1 minute' -# The timeout duration in seconds for a single gossip request, i.e. for a single gossip message +# The timeout duration for a single gossip request, i.e. for a single gossip message # sent from this node, it will be considered timed out if the expected response from that peer is # not received within this specified duration. -gossip_request_timeout_secs = 30 +gossip_request_timeout = '30 seconds' -# The timeout duration in seconds for retrieving the remaining part(s) of newly-discovered data +# The timeout duration for retrieving the remaining part(s) of newly-discovered data # from a peer which gossiped information about that data to this node. -get_remainder_timeout_secs = 5 +get_remainder_timeout = '5 seconds' +# The timeout duration for a newly-received, gossiped item to be validated and stored by another +# component before the gossiper abandons waiting to gossip the item onwards. +validate_and_store_timeout = '1 minute' -# ================================= -# Configuration options for fetcher -# ================================= -[fetcher] -# The timeout duration in seconds for a single fetcher request, i.e. for a single fetcher message -# sent from this node to another node, it will be considered timed out if the expected response from that peer is -# not received within this specified duration. -get_from_peer_timeout = 3 +# =============================================== +# Configuration options for the block accumulator +# =============================================== +[block_accumulator] + +# Block height difference threshold for starting to execute the blocks. +attempt_execution_threshold = 3 + +# Accepted time interval for inactivity in block accumulator. +dead_air_interval = '3 minutes' + +# Time after which the block acceptors are considered old and can be purged. +purge_interval = '1 minute' + + +# ================================================ +# Configuration options for the block synchronizer +# ================================================ +[block_synchronizer] + +# Maximum number of fetch-trie tasks to run in parallel during block synchronization. +max_parallel_trie_fetches = 5000 + +# Time interval for the node to ask for refreshed peers. +peer_refresh_interval = '90 seconds' + +# Time interval for the node to check what the block synchronizer needs to acquire next. +need_next_interval = '1 second' + +# Time interval for recurring disconnection of dishonest peers. +disconnect_dishonest_peers_interval = '10 seconds' + +# Time interval for resetting the latch in block builders. +latch_reset_interval = '5 seconds' + +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] -# =================================================== -# Configuration options for deploy acceptor component -# =================================================== -[deploy_acceptor] +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + +# ================================== +# Configuration options for fetchers +# ================================== +[fetcher] -# If true, the deploy acceptor will verify the account associated with a received deploy prior to accepting it. -verify_accounts = true +# The timeout duration for a single fetcher request, i.e. for a single fetcher message +# sent from this node to another node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +get_from_peer_timeout = '10 seconds' # ======================================================== # Configuration options for the contract runtime component # ======================================================== [contract_runtime] -# Optional setting to enable bonding or not. If unset, defaults to false. -#enable_bonding = false # Optional maximum size of the database to use for the global state store. # # If unset, defaults to 805,306,368,000 == 750 GiB. # # The size should be a multiple of the OS page size. -#max_global_state_size = 805306368000 +max_global_state_size = 2_089_072_132_096 + +# Optional depth limit to use for global state queries. +# +# If unset, defaults to 5. +#max_query_depth = 5 + +# Enable manual synchronizing to disk. +# +# If unset, defaults to true. +#enable_manual_sync = true + + +# ================================================== +# Configuration options for the transaction acceptor +# ================================================== +[transaction_acceptor] + +# The leeway allowed when considering whether a transaction is future-dated or not. +# +# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `transaction.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + +# =========================================== +# Configuration options for the transaction buffer +# =========================================== +[transaction_buffer] + +# The interval of checking for expired transactions. +expiry_check_interval = '1 minute' + + +# ============================================== +# Configuration options for the diagnostics port +# ============================================== +[diagnostics_port] + +# If set, the diagnostics port will be available on a UNIX socket. +enabled = false + +# Filename for the UNIX domain socket the diagnostics port listens on. +socket_path = "debug.socket" + +# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the +# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`, +# which allows for group access as well. +socket_umask = 0o077 -# ==================================================================== -# Configuration options for selecting deploys to propose in new blocks -# ==================================================================== -[block_proposer] +# ============================================= +# Configuration options for the upgrade watcher +# ============================================= +[upgrade_watcher] -# Deploys are only proposed in a new block if the have been received at least this long ago. -# A longer delay makes it more likely that many proposed deploys are already known by the -# other nodes, and don't have to be requested from the proposer afterwards. -#deploy_delay = '1min' +# How often to scan file system for available upgrades. +upgrade_check_interval = '30 seconds' diff --git a/resources/test/rest_schema_chainspec_bytes.json b/resources/test/rest_schema_chainspec_bytes.json new file mode 100644 index 0000000000..4ce0a7acc1 --- /dev/null +++ b/resources/test/rest_schema_chainspec_bytes.json @@ -0,0 +1,69 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetChainspecResult", + "description": "Result for the \"info_get_chainspec\" RPC.", + "type": "object", + "required": [ + "api_version", + "chainspec_bytes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "chainspec_bytes": { + "description": "The chainspec file bytes.", + "allOf": [ + { + "$ref": "#/definitions/ChainspecRawBytes" + } + ] + } + }, + "definitions": { + "ChainspecRawBytes": { + "description": "The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files.", + "type": "object", + "required": [ + "chainspec_bytes" + ], + "properties": { + "chainspec_bytes": { + "description": "Raw bytes of the current chainspec.toml file.", + "allOf": [ + { + "$ref": "#/definitions/Bytes" + } + ] + }, + "maybe_genesis_accounts_bytes": { + "description": "Raw bytes of the current genesis accounts.toml file.", + "anyOf": [ + { + "$ref": "#/definitions/Bytes" + }, + { + "type": "null" + } + ] + }, + "maybe_global_state_bytes": { + "description": "Raw bytes of the current global_state.toml file.", + "anyOf": [ + { + "$ref": "#/definitions/Bytes" + }, + { + "type": "null" + } + ] + } + } + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + } + } +} \ No newline at end of file diff --git a/resources/test/rest_schema_status.json b/resources/test/rest_schema_status.json new file mode 100644 index 0000000000..f1a156d42d --- /dev/null +++ b/resources/test/rest_schema_status.json @@ -0,0 +1,433 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetStatusResult", + "description": "Result for \"info_get_status\" RPC response.", + "type": "object", + "required": [ + "api_version", + "available_block_range", + "block_sync", + "build_version", + "chainspec_name", + "last_progress", + "peers", + "reactor_state", + "starting_state_root_hash", + "uptime" + ], + "properties": { + "peers": { + "description": "The node ID and network address of each connected peer.", + "allOf": [ + { + "$ref": "#/definitions/Peers" + } + ] + }, + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "build_version": { + "description": "The compiled node version.", + "type": "string" + }, + "chainspec_name": { + "description": "The chainspec name.", + "type": "string" + }, + "starting_state_root_hash": { + "description": "The state root hash of the lowest block in the available block range.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "last_added_block_info": { + "description": "The minimal info of the last block from the linear chain.", + "anyOf": [ + { + "$ref": "#/definitions/MinimalBlockInfo" + }, + { + "type": "null" + } + ] + }, + "our_public_signing_key": { + "description": "Our public signing key.", + "anyOf": [ + { + "$ref": "#/definitions/PublicKey" + }, + { + "type": "null" + } + ] + }, + "round_length": { + "description": "The next round length if this node is a validator.", + "anyOf": [ + { + "$ref": "#/definitions/TimeDiff" + }, + { + "type": "null" + } + ] + }, + "next_upgrade": { + "description": "Information about the next scheduled upgrade.", + "anyOf": [ + { + "$ref": "#/definitions/NextUpgrade" + }, + { + "type": "null" + } + ] + }, + "uptime": { + "description": "Time that passed since the node has started.", + "allOf": [ + { + "$ref": "#/definitions/TimeDiff" + } + ] + }, + "reactor_state": { + "description": "The current state of node reactor.", + "allOf": [ + { + "$ref": "#/definitions/ReactorState" + } + ] + }, + "last_progress": { + "description": "Timestamp of the last recorded progress in the reactor.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + }, + "available_block_range": { + "description": "The available block range in storage.", + "allOf": [ + { + "$ref": "#/definitions/AvailableBlockRange" + } + ] + }, + "block_sync": { + "description": "The status of the block synchronizer builders.", + "allOf": [ + { + "$ref": "#/definitions/BlockSynchronizerStatus" + } + ] + }, + "latest_switch_block_hash": { + "description": "The hash of the latest switch block.", + "anyOf": [ + { + "$ref": "#/definitions/BlockHash" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false, + "definitions": { + "Peers": { + "description": "Map of peer IDs to network addresses.", + "type": "array", + "items": { + "$ref": "#/definitions/PeerEntry" + } + }, + "PeerEntry": { + "description": "Node peer entry.", + "type": "object", + "required": [ + "address", + "node_id" + ], + "properties": { + "node_id": { + "description": "Node id.", + "type": "string" + }, + "address": { + "description": "Node address.", + "type": "string" + } + }, + "additionalProperties": false + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "MinimalBlockInfo": { + "description": "Minimal info of a `Block`.", + "type": "object", + "required": [ + "creator", + "era_id", + "hash", + "height", + "state_root_hash", + "timestamp" + ], + "properties": { + "hash": { + "$ref": "#/definitions/BlockHash" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + }, + "era_id": { + "$ref": "#/definitions/EraId" + }, + "height": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "state_root_hash": { + "$ref": "#/definitions/Digest" + }, + "creator": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "NextUpgrade": { + "description": "Information about the next protocol upgrade.", + "type": "object", + "required": [ + "activation_point", + "protocol_version" + ], + "properties": { + "activation_point": { + "$ref": "#/definitions/ActivationPoint" + }, + "protocol_version": { + "$ref": "#/definitions/ProtocolVersion" + } + } + }, + "ActivationPoint": { + "description": "The first era to which the associated protocol version applies.", + "anyOf": [ + { + "description": "Era id.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + { + "description": "Genesis timestamp.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + } + ] + }, + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "ReactorState": { + "description": "The state of the reactor.", + "oneOf": [ + { + "description": "Get all components and reactor state set up on start.", + "type": "string", + "enum": [ + "Initialize" + ] + }, + { + "description": "Orient to the network and attempt to catch up to tip.", + "type": "string", + "enum": [ + "CatchUp" + ] + }, + { + "description": "Running commit upgrade and creating immediate switch block.", + "type": "string", + "enum": [ + "Upgrading" + ] + }, + { + "description": "Stay caught up with tip.", + "type": "string", + "enum": [ + "KeepUp" + ] + }, + { + "description": "Node is currently caught up and is an active validator.", + "type": "string", + "enum": [ + "Validate" + ] + }, + { + "description": "Node should be shut down for upgrade.", + "type": "string", + "enum": [ + "ShutdownForUpgrade" + ] + }, + { + "description": "Node should shut down after catching up.", + "type": "string", + "enum": [ + "ShutdownAfterCatchingUp" + ] + } + ] + }, + "AvailableBlockRange": { + "description": "An unbroken, inclusive range of blocks.", + "type": "object", + "required": [ + "high", + "low" + ], + "properties": { + "low": { + "description": "The inclusive lower bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "high": { + "description": "The inclusive upper bound of the range.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "BlockSynchronizerStatus": { + "description": "The status of the block synchronizer.", + "type": "object", + "properties": { + "historical": { + "description": "The status of syncing a historical block, if any.", + "anyOf": [ + { + "$ref": "#/definitions/BlockSyncStatus" + }, + { + "type": "null" + } + ] + }, + "forward": { + "description": "The status of syncing a forward block, if any.", + "anyOf": [ + { + "$ref": "#/definitions/BlockSyncStatus" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BlockSyncStatus": { + "description": "The status of syncing an individual block.", + "type": "object", + "required": [ + "acquisition_state", + "block_hash" + ], + "properties": { + "block_hash": { + "description": "The block hash.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the block, if known.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "acquisition_state": { + "description": "The state of acquisition of the data associated with the block.", + "type": "string" + } + }, + "additionalProperties": false + } + } +} \ No newline at end of file diff --git a/resources/test/rest_schema_validator_changes.json b/resources/test/rest_schema_validator_changes.json new file mode 100644 index 0000000000..c7a7340d4e --- /dev/null +++ b/resources/test/rest_schema_validator_changes.json @@ -0,0 +1,146 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "GetValidatorChangesResult", + "description": "Result for the \"info_get_validator_changes\" RPC.", + "type": "object", + "required": [ + "api_version", + "changes" + ], + "properties": { + "api_version": { + "description": "The RPC API version.", + "type": "string" + }, + "changes": { + "description": "The validators' status changes.", + "type": "array", + "items": { + "$ref": "#/definitions/JsonValidatorChanges" + } + } + }, + "additionalProperties": false, + "definitions": { + "JsonValidatorChanges": { + "description": "The changes in a validator's status.", + "type": "object", + "required": [ + "public_key", + "status_changes" + ], + "properties": { + "public_key": { + "description": "The public key of the validator.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "status_changes": { + "description": "The set of changes to the validator's status.", + "type": "array", + "items": { + "$ref": "#/definitions/JsonValidatorStatusChange" + } + } + }, + "additionalProperties": false + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "JsonValidatorStatusChange": { + "description": "A single change to a validator's status in the given era.", + "type": "object", + "required": [ + "era_id", + "validator_change" + ], + "properties": { + "era_id": { + "description": "The era in which the change occurred.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "validator_change": { + "description": "The change in validator status.", + "allOf": [ + { + "$ref": "#/definitions/ValidatorChange" + } + ] + } + }, + "additionalProperties": false + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "ValidatorChange": { + "description": "A change to a validator's status between two eras.", + "oneOf": [ + { + "description": "The validator got newly added to the validator set.", + "type": "string", + "enum": [ + "Added" + ] + }, + { + "description": "The validator was removed from the validator set.", + "type": "string", + "enum": [ + "Removed" + ] + }, + { + "description": "The validator was banned from this era.", + "type": "string", + "enum": [ + "Banned" + ] + }, + { + "description": "The validator was excluded from proposing new blocks in this era.", + "type": "string", + "enum": [ + "CannotPropose" + ] + }, + { + "description": "We saw the validator misbehave in this era.", + "type": "string", + "enum": [ + "SeenAsFaulty" + ] + } + ] + } + } +} \ No newline at end of file diff --git a/resources/test/sse_data_schema.json b/resources/test/sse_data_schema.json new file mode 100644 index 0000000000..e11e4f4032 --- /dev/null +++ b/resources/test/sse_data_schema.json @@ -0,0 +1,5255 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "SseData", + "description": "The \"data\" field of the events sent on the event stream to clients.", + "oneOf": [ + { + "description": "The version of this node's API server. This event will always be the first sent to a new client, and will have no associated event ID provided.", + "type": "object", + "required": [ + "ApiVersion" + ], + "properties": { + "ApiVersion": { + "$ref": "#/definitions/ProtocolVersion" + } + }, + "additionalProperties": false + }, + { + "description": "The given block has been added to the linear chain and stored locally.", + "type": "object", + "required": [ + "BlockAdded" + ], + "properties": { + "BlockAdded": { + "type": "object", + "required": [ + "block", + "block_hash" + ], + "properties": { + "block_hash": { + "$ref": "#/definitions/BlockHash" + }, + "block": { + "$ref": "#/definitions/Block" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "The given transaction has been newly-accepted by this node.", + "type": "object", + "required": [ + "TransactionAccepted" + ], + "properties": { + "TransactionAccepted": { + "type": "object", + "required": [ + "transaction" + ], + "properties": { + "transaction": { + "description": "a transaction", + "allOf": [ + { + "$ref": "#/definitions/Transaction" + } + ] + } + } + } + }, + "additionalProperties": false + }, + { + "description": "The given transaction has been executed, committed and forms part of the given block.", + "type": "object", + "required": [ + "TransactionProcessed" + ], + "properties": { + "TransactionProcessed": { + "type": "object", + "required": [ + "block_hash", + "execution_result", + "initiator_addr", + "messages", + "timestamp", + "transaction_hash", + "ttl" + ], + "properties": { + "transaction_hash": { + "$ref": "#/definitions/TransactionHash" + }, + "initiator_addr": { + "$ref": "#/definitions/InitiatorAddr" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + }, + "ttl": { + "$ref": "#/definitions/TimeDiff" + }, + "block_hash": { + "$ref": "#/definitions/BlockHash" + }, + "execution_result": { + "$ref": "#/definitions/ExecutionResult" + }, + "messages": { + "type": "array", + "items": { + "$ref": "#/definitions/Message" + } + } + } + } + }, + "additionalProperties": false + }, + { + "description": "The given transaction has expired.", + "type": "object", + "required": [ + "TransactionExpired" + ], + "properties": { + "TransactionExpired": { + "type": "object", + "required": [ + "transaction_hash" + ], + "properties": { + "transaction_hash": { + "$ref": "#/definitions/TransactionHash" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "Generic representation of validator's fault in an era.", + "type": "object", + "required": [ + "Fault" + ], + "properties": { + "Fault": { + "type": "object", + "required": [ + "era_id", + "public_key", + "timestamp" + ], + "properties": { + "era_id": { + "$ref": "#/definitions/EraId" + }, + "public_key": { + "$ref": "#/definitions/PublicKey" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "New finality signature received.", + "type": "object", + "required": [ + "FinalitySignature" + ], + "properties": { + "FinalitySignature": { + "$ref": "#/definitions/FinalitySignature" + } + }, + "additionalProperties": false + }, + { + "description": "The execution effects produced by a `StepRequest`.", + "type": "object", + "required": [ + "Step" + ], + "properties": { + "Step": { + "type": "object", + "required": [ + "era_id", + "execution_effects" + ], + "properties": { + "era_id": { + "$ref": "#/definitions/EraId" + }, + "execution_effects": { + "$ref": "#/definitions/Effects" + } + } + } + }, + "additionalProperties": false + }, + { + "description": "The node is about to shut down.", + "type": "string", + "enum": [ + "Shutdown" + ] + } + ], + "definitions": { + "ProtocolVersion": { + "description": "Casper Platform protocol version", + "type": "string" + }, + "BlockHash": { + "description": "Hex-encoded cryptographic hash of a block.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "Digest": { + "description": "Hex-encoded hash digest.", + "type": "string" + }, + "Block": { + "description": "A block after execution.", + "oneOf": [ + { + "description": "The legacy, initial version of the block.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/definitions/BlockV1" + } + }, + "additionalProperties": false + }, + { + "description": "The version 2 of the block.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/definitions/BlockV2" + } + }, + "additionalProperties": false + } + ] + }, + "BlockV1": { + "description": "A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 1.", + "type": "object", + "required": [ + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "description": "The block hash identifying this block.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "header": { + "description": "The header portion of the block.", + "allOf": [ + { + "$ref": "#/definitions/BlockHeaderV1" + } + ] + }, + "body": { + "description": "The body portion of the block.", + "allOf": [ + { + "$ref": "#/definitions/BlockBodyV1" + } + ] + } + } + }, + "BlockHeaderV1": { + "description": "The header portion of a block.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "era_id", + "height", + "parent_hash", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent block's hash.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "state_root_hash": { + "description": "The root hash of global state after the deploys in this block have been executed.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "body_hash": { + "description": "The hash of the block's body.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "random_bit": { + "description": "A random bit needed for initializing a future era.", + "type": "boolean" + }, + "accumulated_seed": { + "description": "A seed needed for initializing a future era.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "era_end": { + "description": "The `EraEnd` of a block if it is a switch block.", + "anyOf": [ + { + "$ref": "#/definitions/EraEndV1" + }, + { + "type": "null" + } + ] + }, + "timestamp": { + "description": "The timestamp from when the block was proposed.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + }, + "era_id": { + "description": "The era ID in which this block was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "height": { + "description": "The height of this block, i.e. the number of ancestors.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version of the network from when this block was created.", + "allOf": [ + { + "$ref": "#/definitions/ProtocolVersion" + } + ] + } + } + }, + "EraEndV1": { + "description": "Information related to the end of an era, and validator weights for the following era.", + "type": "object", + "required": [ + "era_report", + "next_era_validator_weights" + ], + "properties": { + "era_report": { + "description": "Equivocation, reward and validator inactivity information.", + "allOf": [ + { + "$ref": "#/definitions/EraReport_for_PublicKey" + } + ] + }, + "next_era_validator_weights": { + "description": "The validators for the upcoming era and their respective weights.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_ValidatorWeight" + } + ] + } + } + }, + "EraReport_for_PublicKey": { + "description": "Equivocation, reward and validator inactivity information.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "rewards" + ], + "properties": { + "equivocators": { + "description": "The set of equivocators.", + "type": "array", + "items": { + "$ref": "#/definitions/PublicKey" + } + }, + "rewards": { + "description": "Rewards for finalization of earlier blocks.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_EraReward" + } + ] + }, + "inactive_validators": { + "description": "Validators that haven't produced any unit during the era.", + "type": "array", + "items": { + "$ref": "#/definitions/PublicKey" + } + } + } + }, + "PublicKey": { + "description": "Hex-encoded cryptographic public key, including the algorithm tag prefix.", + "examples": [ + { + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an immediate switch block after a network upgrade rather than a specific validator. Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }, + { + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }, + { + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + } + ], + "type": "string" + }, + "Array_of_EraReward": { + "type": "array", + "items": { + "$ref": "#/definitions/EraReward" + } + }, + "EraReward": { + "description": "A validator's public key paired with a measure of the value of its contribution to consensus, as a fraction of the configured maximum block reward.", + "type": "object", + "required": [ + "amount", + "validator" + ], + "properties": { + "validator": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "amount": { + "description": "The reward amount.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + } + }, + "Array_of_ValidatorWeight": { + "type": "array", + "items": { + "$ref": "#/definitions/ValidatorWeight" + } + }, + "ValidatorWeight": { + "description": "A validator's public key paired with its weight, i.e. the total number of motes staked by it and its delegators.", + "type": "object", + "required": [ + "validator", + "weight" + ], + "properties": { + "validator": { + "description": "The validator's public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "weight": { + "description": "The validator's weight.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + } + }, + "U512": { + "description": "Decimal representation of a 512-bit integer.", + "type": "string" + }, + "Timestamp": { + "description": "Timestamp formatted as per RFC 3339", + "type": "string" + }, + "EraId": { + "description": "Era ID newtype.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "BlockBodyV1": { + "description": "The body portion of a block. Version 1.", + "type": "object", + "required": [ + "deploy_hashes", + "proposer", + "transfer_hashes" + ], + "properties": { + "proposer": { + "description": "The public key of the validator which proposed the block.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "deploy_hashes": { + "description": "The deploy hashes of the non-transfer deploys within the block.", + "type": "array", + "items": { + "$ref": "#/definitions/DeployHash" + } + }, + "transfer_hashes": { + "description": "The deploy hashes of the transfers within the block.", + "type": "array", + "items": { + "$ref": "#/definitions/DeployHash" + } + } + } + }, + "DeployHash": { + "description": "Hex-encoded deploy hash.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "BlockV2": { + "description": "A block after execution, with the resulting global state root hash. This is the core component of the Casper linear blockchain. Version 2.", + "type": "object", + "required": [ + "body", + "hash", + "header" + ], + "properties": { + "hash": { + "description": "The block hash identifying this block.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "header": { + "description": "The header portion of the block.", + "allOf": [ + { + "$ref": "#/definitions/BlockHeaderV2" + } + ] + }, + "body": { + "description": "The body portion of the block.", + "allOf": [ + { + "$ref": "#/definitions/BlockBodyV2" + } + ] + } + } + }, + "BlockHeaderV2": { + "description": "The header portion of a block.", + "type": "object", + "required": [ + "accumulated_seed", + "body_hash", + "current_gas_price", + "era_id", + "height", + "parent_hash", + "proposer", + "protocol_version", + "random_bit", + "state_root_hash", + "timestamp" + ], + "properties": { + "parent_hash": { + "description": "The parent block's hash.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "state_root_hash": { + "description": "The root hash of global state after the deploys in this block have been executed.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "body_hash": { + "description": "The hash of the block's body.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "random_bit": { + "description": "A random bit needed for initializing a future era.", + "type": "boolean" + }, + "accumulated_seed": { + "description": "A seed needed for initializing a future era.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "era_end": { + "description": "The `EraEnd` of a block if it is a switch block.", + "anyOf": [ + { + "$ref": "#/definitions/EraEndV2" + }, + { + "type": "null" + } + ] + }, + "timestamp": { + "description": "The timestamp from when the block was proposed.", + "allOf": [ + { + "$ref": "#/definitions/Timestamp" + } + ] + }, + "era_id": { + "description": "The era ID in which this block was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "height": { + "description": "The height of this block, i.e. the number of ancestors.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "protocol_version": { + "description": "The protocol version of the network from when this block was created.", + "allOf": [ + { + "$ref": "#/definitions/ProtocolVersion" + } + ] + }, + "proposer": { + "description": "The public key of the validator which proposed the block.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "current_gas_price": { + "description": "The gas price of the era", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "last_switch_block_hash": { + "description": "The most recent switch block hash.", + "anyOf": [ + { + "$ref": "#/definitions/BlockHash" + }, + { + "type": "null" + } + ] + } + } + }, + "EraEndV2": { + "description": "Information related to the end of an era, and validator weights for the following era.", + "type": "object", + "required": [ + "equivocators", + "inactive_validators", + "next_era_gas_price", + "next_era_validator_weights", + "rewards" + ], + "properties": { + "equivocators": { + "description": "The set of equivocators.", + "type": "array", + "items": { + "$ref": "#/definitions/PublicKey" + } + }, + "inactive_validators": { + "description": "Validators that haven't produced any unit during the era.", + "type": "array", + "items": { + "$ref": "#/definitions/PublicKey" + } + }, + "next_era_validator_weights": { + "description": "The validators for the upcoming era and their respective weights.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_ValidatorWeight" + } + ] + }, + "rewards": { + "description": "The rewards distributed to the validators.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/U512" + } + } + }, + "next_era_gas_price": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + } + }, + "BlockBodyV2": { + "description": "The body portion of a block. Version 2.", + "type": "object", + "required": [ + "rewarded_signatures", + "transactions" + ], + "properties": { + "transactions": { + "description": "Map of transactions mapping categories to a list of transaction hashes.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/definitions/TransactionHash" + } + } + }, + "rewarded_signatures": { + "description": "List of identifiers for finality signatures for a particular past block.", + "allOf": [ + { + "$ref": "#/definitions/RewardedSignatures" + } + ] + } + } + }, + "TransactionHash": { + "description": "A versioned wrapper for a transaction hash or deploy hash.", + "oneOf": [ + { + "description": "A deploy hash.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/definitions/DeployHash" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction hash.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/definitions/TransactionV1Hash" + } + }, + "additionalProperties": false + } + ] + }, + "TransactionV1Hash": { + "description": "Hex-encoded TransactionV1 hash.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + }, + "RewardedSignatures": { + "description": "Describes finality signatures that will be rewarded in a block. Consists of a vector of `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor block. The first entry represents the signatures for the parent block, the second for the parent of the parent, and so on.", + "type": "array", + "items": { + "$ref": "#/definitions/SingleBlockRewardedSignatures" + } + }, + "SingleBlockRewardedSignatures": { + "description": "List of identifiers for finality signatures for a particular past block.\n\nThat past block height is current_height - signature_rewards_max_delay, the latter being defined in the chainspec.\n\nWe need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality signers because we need a bit of time to get the block finality.", + "type": "array", + "items": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "Transaction": { + "description": "A versioned wrapper for a transaction or deploy.", + "oneOf": [ + { + "description": "A deploy.", + "type": "object", + "required": [ + "Deploy" + ], + "properties": { + "Deploy": { + "$ref": "#/definitions/Deploy" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transaction.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/definitions/TransactionV1" + } + }, + "additionalProperties": false + } + ] + }, + "Deploy": { + "description": "A signed smart contract.", + "type": "object", + "required": [ + "approvals", + "hash", + "header", + "payment", + "session" + ], + "properties": { + "hash": { + "$ref": "#/definitions/DeployHash" + }, + "header": { + "$ref": "#/definitions/DeployHeader" + }, + "payment": { + "$ref": "#/definitions/ExecutableDeployItem" + }, + "session": { + "$ref": "#/definitions/ExecutableDeployItem" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/definitions/Approval" + }, + "uniqueItems": true + } + }, + "additionalProperties": false + }, + "DeployHeader": { + "description": "The header portion of a [`Deploy`].", + "type": "object", + "required": [ + "account", + "body_hash", + "chain_name", + "dependencies", + "gas_price", + "timestamp", + "ttl" + ], + "properties": { + "account": { + "$ref": "#/definitions/PublicKey" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + }, + "ttl": { + "$ref": "#/definitions/TimeDiff" + }, + "gas_price": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "body_hash": { + "$ref": "#/definitions/Digest" + }, + "dependencies": { + "type": "array", + "items": { + "$ref": "#/definitions/DeployHash" + } + }, + "chain_name": { + "type": "string" + } + }, + "additionalProperties": false + }, + "TimeDiff": { + "description": "Human-readable duration.", + "type": "string" + }, + "ExecutableDeployItem": { + "description": "The executable component of a [`Deploy`].", + "oneOf": [ + { + "description": "Executable specified as raw bytes that represent Wasm code and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "ModuleBytes" + ], + "properties": { + "ModuleBytes": { + "type": "object", + "required": [ + "args", + "module_bytes" + ], + "properties": { + "module_bytes": { + "description": "Hex-encoded raw Wasm bytes.", + "allOf": [ + { + "$ref": "#/definitions/Bytes" + } + ] + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/definitions/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByHash" + ], + "properties": { + "StoredContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract hash.", + "allOf": [ + { + "$ref": "#/definitions/ContractHash" + } + ] + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/definitions/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredContractByName" + ], + "properties": { + "StoredContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "entry_point": { + "description": "Name of an entry point.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/definitions/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by its [`PackageHash`], entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByHash" + ], + "properties": { + "StoredVersionedContractByHash": { + "type": "object", + "required": [ + "args", + "entry_point", + "hash" + ], + "properties": { + "hash": { + "description": "Hex-encoded contract package hash.", + "allOf": [ + { + "$ref": "#/definitions/ContractPackageHash" + } + ] + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/definitions/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Stored versioned contract referenced by a named key existing in the signer's account context, entry point and an instance of [`RuntimeArgs`].", + "type": "object", + "required": [ + "StoredVersionedContractByName" + ], + "properties": { + "StoredVersionedContractByName": { + "type": "object", + "required": [ + "args", + "entry_point", + "name" + ], + "properties": { + "name": { + "description": "Named key.", + "type": "string" + }, + "version": { + "description": "An optional version of the contract to call. It will default to the highest enabled version if no value is specified.", + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, + "entry_point": { + "description": "Entry point name.", + "type": "string" + }, + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/definitions/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "A native transfer which does not contain or reference a Wasm code.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "type": "object", + "required": [ + "args" + ], + "properties": { + "args": { + "description": "Runtime arguments.", + "allOf": [ + { + "$ref": "#/definitions/RuntimeArgs" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "Bytes": { + "description": "Hex-encoded bytes.", + "type": "string" + }, + "RuntimeArgs": { + "description": "Represents a collection of arguments passed to a smart contract.", + "type": "array", + "items": { + "$ref": "#/definitions/NamedArg" + } + }, + "NamedArg": { + "description": "Named arguments to a contract.", + "type": "array", + "items": [ + { + "type": "string" + }, + { + "$ref": "#/definitions/CLValue" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "CLValue": { + "description": "A Casper value, i.e. a value which can be stored and manipulated by smart contracts.\n\nIt holds the underlying data as a type-erased, serialized `Vec` and also holds the CLType of the underlying data as a separate member.\n\nThe `parsed` field, representing the original value, is a convenience only available when a CLValue is encoded to JSON, and can always be set to null if preferred.", + "type": "object", + "required": [ + "bytes", + "cl_type" + ], + "properties": { + "cl_type": { + "$ref": "#/definitions/CLType" + }, + "bytes": { + "type": "string" + }, + "parsed": true + }, + "additionalProperties": false + }, + "CLType": { + "description": "Casper types, i.e. types which can be stored and manipulated by smart contracts.\n\nProvides a description of the underlying data type of a [`CLValue`](crate::CLValue).", + "oneOf": [ + { + "description": "`bool` primitive.", + "type": "string", + "enum": [ + "Bool" + ] + }, + { + "description": "`i32` primitive.", + "type": "string", + "enum": [ + "I32" + ] + }, + { + "description": "`i64` primitive.", + "type": "string", + "enum": [ + "I64" + ] + }, + { + "description": "`u8` primitive.", + "type": "string", + "enum": [ + "U8" + ] + }, + { + "description": "`u32` primitive.", + "type": "string", + "enum": [ + "U32" + ] + }, + { + "description": "`u64` primitive.", + "type": "string", + "enum": [ + "U64" + ] + }, + { + "description": "[`U128`] large unsigned integer type.", + "type": "string", + "enum": [ + "U128" + ] + }, + { + "description": "[`U256`] large unsigned integer type.", + "type": "string", + "enum": [ + "U256" + ] + }, + { + "description": "[`U512`] large unsigned integer type.", + "type": "string", + "enum": [ + "U512" + ] + }, + { + "description": "`()` primitive.", + "type": "string", + "enum": [ + "Unit" + ] + }, + { + "description": "`String` primitive.", + "type": "string", + "enum": [ + "String" + ] + }, + { + "description": "[`Key`] system type.", + "type": "string", + "enum": [ + "Key" + ] + }, + { + "description": "[`URef`] system type.", + "type": "string", + "enum": [ + "URef" + ] + }, + { + "description": "[`PublicKey`](crate::PublicKey) system type.", + "type": "string", + "enum": [ + "PublicKey" + ] + }, + { + "description": "`Option` of a `CLType`.", + "type": "object", + "required": [ + "Option" + ], + "properties": { + "Option": { + "$ref": "#/definitions/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Variable-length list of a single `CLType` (comparable to a `Vec`).", + "type": "object", + "required": [ + "List" + ], + "properties": { + "List": { + "$ref": "#/definitions/CLType" + } + }, + "additionalProperties": false + }, + { + "description": "Fixed-length list of a single `CLType` (comparable to a Rust array).", + "type": "object", + "required": [ + "ByteArray" + ], + "properties": { + "ByteArray": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "`Result` with `Ok` and `Err` variants of `CLType`s.", + "type": "object", + "required": [ + "Result" + ], + "properties": { + "Result": { + "type": "object", + "required": [ + "err", + "ok" + ], + "properties": { + "ok": { + "$ref": "#/definitions/CLType" + }, + "err": { + "$ref": "#/definitions/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Map with keys of a single `CLType` and values of a single `CLType`.", + "type": "object", + "required": [ + "Map" + ], + "properties": { + "Map": { + "type": "object", + "required": [ + "key", + "value" + ], + "properties": { + "key": { + "$ref": "#/definitions/CLType" + }, + "value": { + "$ref": "#/definitions/CLType" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "1-ary tuple of a `CLType`.", + "type": "object", + "required": [ + "Tuple1" + ], + "properties": { + "Tuple1": { + "type": "array", + "items": { + "$ref": "#/definitions/CLType" + }, + "maxItems": 1, + "minItems": 1 + } + }, + "additionalProperties": false + }, + { + "description": "2-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple2" + ], + "properties": { + "Tuple2": { + "type": "array", + "items": { + "$ref": "#/definitions/CLType" + }, + "maxItems": 2, + "minItems": 2 + } + }, + "additionalProperties": false + }, + { + "description": "3-ary tuple of `CLType`s.", + "type": "object", + "required": [ + "Tuple3" + ], + "properties": { + "Tuple3": { + "type": "array", + "items": { + "$ref": "#/definitions/CLType" + }, + "maxItems": 3, + "minItems": 3 + } + }, + "additionalProperties": false + }, + { + "description": "Unspecified type.", + "type": "string", + "enum": [ + "Any" + ] + } + ] + }, + "ContractHash": { + "description": "The hash address of the contract", + "type": "string" + }, + "ContractPackageHash": { + "description": "The hash address of the contract package", + "type": "string" + }, + "Approval": { + "description": "A struct containing a signature of a transaction hash and the public key of the signer.", + "type": "object", + "required": [ + "signature", + "signer" + ], + "properties": { + "signer": { + "$ref": "#/definitions/PublicKey" + }, + "signature": { + "$ref": "#/definitions/Signature" + } + }, + "additionalProperties": false + }, + "Signature": { + "description": "Hex-encoded cryptographic signature, including the algorithm tag prefix.", + "type": "string" + }, + "TransactionV1": { + "description": "A unit of work sent by a client to the network, which when executed can cause global state to be altered.", + "type": "object", + "required": [ + "approvals", + "hash", + "payload" + ], + "properties": { + "hash": { + "$ref": "#/definitions/TransactionV1Hash" + }, + "payload": { + "$ref": "#/definitions/TransactionV1Payload" + }, + "approvals": { + "type": "array", + "items": { + "$ref": "#/definitions/Approval" + }, + "uniqueItems": true + } + } + }, + "TransactionV1Payload": { + "description": "Internal payload of the transaction. The actual data over which the signing is done.", + "type": "object", + "required": [ + "chain_name", + "fields", + "initiator_addr", + "pricing_mode", + "timestamp", + "ttl" + ], + "properties": { + "initiator_addr": { + "$ref": "#/definitions/InitiatorAddr" + }, + "timestamp": { + "$ref": "#/definitions/Timestamp" + }, + "ttl": { + "$ref": "#/definitions/TimeDiff" + }, + "chain_name": { + "type": "string" + }, + "pricing_mode": { + "$ref": "#/definitions/PricingMode" + }, + "fields": { + "type": "object", + "additionalProperties": true + } + }, + "additionalProperties": false + }, + "InitiatorAddr": { + "description": "The address of the initiator of a TransactionV1.", + "oneOf": [ + { + "description": "The public key of the initiator.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "The account hash derived from the public key of the initiator.", + "type": "object", + "required": [ + "AccountHash" + ], + "properties": { + "AccountHash": { + "$ref": "#/definitions/AccountHash" + } + }, + "additionalProperties": false + } + ] + }, + "AccountHash": { + "description": "Account hash as a formatted string.", + "type": "string" + }, + "PricingMode": { + "description": "Pricing mode of a Transaction.", + "oneOf": [ + { + "description": "The original payment model, where the creator of the transaction specifies how much they will pay, at what gas price.", + "type": "object", + "required": [ + "PaymentLimited" + ], + "properties": { + "PaymentLimited": { + "type": "object", + "required": [ + "gas_price_tolerance", + "payment_amount", + "standard_payment" + ], + "properties": { + "payment_amount": { + "description": "User-specified payment amount.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "gas_price_tolerance": { + "description": "User-specified gas_price tolerance (minimum 1). This is interpreted to mean \"do not include this transaction in a block if the current gas price is greater than this number\"", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "standard_payment": { + "description": "Standard payment.", + "type": "boolean" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The cost of the transaction is determined by the cost table, per the transaction category.", + "type": "object", + "required": [ + "Fixed" + ], + "properties": { + "Fixed": { + "type": "object", + "required": [ + "additional_computation_factor", + "gas_price_tolerance" + ], + "properties": { + "additional_computation_factor": { + "description": "User-specified additional computation factor (minimum 0). If \"0\" is provided, no additional logic is applied to the computation limit. Each value above \"0\" tells the node that it needs to treat the transaction as if it uses more gas than it's serialized size indicates. Each \"1\" will increase the \"wasm lane\" size bucket for this transaction by 1. So if the size of the transaction indicates bucket \"0\" and \"additional_computation_factor = 2\", the transaction will be treated as a \"2\".", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "gas_price_tolerance": { + "description": "User-specified gas_price tolerance (minimum 1). This is interpreted to mean \"do not include this transaction in a block if the current gas price is greater than this number\"", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The payment for this transaction was previously paid, as proven by the receipt hash (this is for future use, not currently implemented).", + "type": "object", + "required": [ + "Prepaid" + ], + "properties": { + "Prepaid": { + "type": "object", + "required": [ + "receipt" + ], + "properties": { + "receipt": { + "description": "Pre-paid receipt.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionResult": { + "description": "The versioned result of executing a single deploy.", + "oneOf": [ + { + "description": "Version 1 of execution result type.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/definitions/ExecutionResultV1" + } + }, + "additionalProperties": false + }, + { + "description": "Version 2 of execution result type.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/definitions/ExecutionResultV2" + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionResultV1": { + "description": "The result of executing a single deploy.", + "oneOf": [ + { + "description": "The result of a failed execution.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "object", + "required": [ + "cost", + "effect", + "error_message", + "transfers" + ], + "properties": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/definitions/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of version 1 Transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/definitions/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "error_message": { + "description": "The error message associated with executing the deploy.", + "type": "string" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "The result of a successful execution.", + "type": "object", + "required": [ + "Success" + ], + "properties": { + "Success": { + "type": "object", + "required": [ + "cost", + "effect", + "transfers" + ], + "properties": { + "effect": { + "description": "The effect of executing the deploy.", + "allOf": [ + { + "$ref": "#/definitions/ExecutionEffect" + } + ] + }, + "transfers": { + "description": "A record of Transfers performed while executing the deploy.", + "type": "array", + "items": { + "$ref": "#/definitions/TransferAddr" + } + }, + "cost": { + "description": "The cost of executing the deploy.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "ExecutionEffect": { + "description": "The sequence of execution transforms from a single deploy.", + "type": "object", + "required": [ + "operations", + "transforms" + ], + "properties": { + "operations": { + "description": "The resulting operations.", + "type": "array", + "items": { + "$ref": "#/definitions/Operation" + } + }, + "transforms": { + "description": "The sequence of execution transforms.", + "type": "array", + "items": { + "$ref": "#/definitions/TransformV1" + } + } + }, + "additionalProperties": false + }, + "Operation": { + "description": "An operation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "kind": { + "description": "The type of operation.", + "allOf": [ + { + "$ref": "#/definitions/OpKind" + } + ] + } + }, + "additionalProperties": false + }, + "OpKind": { + "description": "The type of operation performed while executing a deploy.", + "oneOf": [ + { + "description": "A read operation.", + "type": "string", + "enum": [ + "Read" + ] + }, + { + "description": "A write operation.", + "type": "string", + "enum": [ + "Write" + ] + }, + { + "description": "An addition.", + "type": "string", + "enum": [ + "Add" + ] + }, + { + "description": "An operation which has no effect.", + "type": "string", + "enum": [ + "NoOp" + ] + }, + { + "description": "A prune operation.", + "type": "string", + "enum": [ + "Prune" + ] + } + ] + }, + "TransformV1": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "transform" + ], + "properties": { + "key": { + "description": "The formatted string of the `Key`.", + "type": "string" + }, + "transform": { + "description": "The transformation.", + "allOf": [ + { + "$ref": "#/definitions/TransformKindV1" + } + ] + } + }, + "additionalProperties": false + }, + "TransformKindV1": { + "description": "The actual transformation performed while executing a deploy.", + "oneOf": [ + { + "description": "A transform having no effect.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes the given CLValue to global state.", + "type": "object", + "required": [ + "WriteCLValue" + ], + "properties": { + "WriteCLValue": { + "$ref": "#/definitions/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Account to global state.", + "type": "object", + "required": [ + "WriteAccount" + ], + "properties": { + "WriteAccount": { + "$ref": "#/definitions/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Writes a smart contract as Wasm to global state.", + "type": "string", + "enum": [ + "WriteContractWasm" + ] + }, + { + "description": "Writes a smart contract to global state.", + "type": "string", + "enum": [ + "WriteContract" + ] + }, + { + "description": "Writes a smart contract package to global state.", + "type": "string", + "enum": [ + "WriteContractPackage" + ] + }, + { + "description": "Writes the given DeployInfo to global state.", + "type": "object", + "required": [ + "WriteDeployInfo" + ], + "properties": { + "WriteDeployInfo": { + "$ref": "#/definitions/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given EraInfo to global state.", + "type": "object", + "required": [ + "WriteEraInfo" + ], + "properties": { + "WriteEraInfo": { + "$ref": "#/definitions/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given version 1 Transfer to global state.", + "type": "object", + "required": [ + "WriteTransfer" + ], + "properties": { + "WriteTransfer": { + "$ref": "#/definitions/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Bid to global state.", + "type": "object", + "required": [ + "WriteBid" + ], + "properties": { + "WriteBid": { + "$ref": "#/definitions/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Withdraw to global state.", + "type": "object", + "required": [ + "WriteWithdraw" + ], + "properties": { + "WriteWithdraw": { + "type": "array", + "items": { + "$ref": "#/definitions/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `i32`.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `u64`.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U128`.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/definitions/U128" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U256`.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/definitions/U256" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given `U512`.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/definitions/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds the given collection of named keys.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "type": "array", + "items": { + "$ref": "#/definitions/NamedKey" + } + } + }, + "additionalProperties": false + }, + { + "description": "A failed transformation, containing an error message.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given Unbonding to global state.", + "type": "object", + "required": [ + "WriteUnbonding" + ], + "properties": { + "WriteUnbonding": { + "type": "array", + "items": { + "$ref": "#/definitions/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Writes the addressable entity to global state.", + "type": "string", + "enum": [ + "WriteAddressableEntity" + ] + }, + { + "description": "Removes pathing to keyed value within global state. This is a form of soft delete; the underlying value remains in global state and is reachable from older global state root hashes where it was included in the hash up.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/definitions/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Writes the given BidKind to global state.", + "type": "object", + "required": [ + "WriteBidKind" + ], + "properties": { + "WriteBidKind": { + "$ref": "#/definitions/BidKind" + } + }, + "additionalProperties": false + } + ] + }, + "DeployInfo": { + "description": "Information relating to the given Deploy.", + "type": "object", + "required": [ + "deploy_hash", + "from", + "gas", + "source", + "transfers" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash.", + "allOf": [ + { + "$ref": "#/definitions/DeployHash" + } + ] + }, + "transfers": { + "description": "Version 1 transfers performed by the Deploy.", + "type": "array", + "items": { + "$ref": "#/definitions/TransferAddr" + } + }, + "from": { + "description": "Account identifier of the creator of the Deploy.", + "allOf": [ + { + "$ref": "#/definitions/AccountHash" + } + ] + }, + "source": { + "description": "Source purse used for payment of the Deploy.", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "gas": { + "description": "Gas cost of executing the Deploy.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + }, + "TransferAddr": { + "description": "Hex-encoded version 1 transfer address.", + "type": "string" + }, + "URef": { + "description": "Hex-encoded, formatted URef.", + "type": "string" + }, + "EraInfo": { + "description": "Auction metadata. Intended to be recorded at each era.", + "type": "object", + "required": [ + "seigniorage_allocations" + ], + "properties": { + "seigniorage_allocations": { + "type": "array", + "items": { + "$ref": "#/definitions/SeigniorageAllocation" + } + } + }, + "additionalProperties": false + }, + "SeigniorageAllocation": { + "description": "Information about a seigniorage allocation", + "oneOf": [ + { + "description": "Info about a seigniorage allocation for a validator", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "type": "object", + "required": [ + "amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "type": "object", + "required": [ + "amount", + "delegator_public_key", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "Delegator's public key", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Info about a seigniorage allocation for a delegator", + "type": "object", + "required": [ + "DelegatorKind" + ], + "properties": { + "DelegatorKind": { + "type": "object", + "required": [ + "amount", + "delegator_kind", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "description": "Delegator kind", + "allOf": [ + { + "$ref": "#/definitions/DelegatorKind" + } + ] + }, + "validator_public_key": { + "description": "Validator's public key", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "amount": { + "description": "Allocated amount", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + ] + }, + "DelegatorKind": { + "description": "Auction bid variants. Kinds of delegation bids.", + "oneOf": [ + { + "description": "Delegation from public key.", + "type": "object", + "required": [ + "PublicKey" + ], + "properties": { + "PublicKey": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + { + "description": "Delegation from purse.", + "type": "object", + "required": [ + "Purse" + ], + "properties": { + "Purse": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "TransferV1": { + "description": "Represents a version 1 transfer from one purse to another.", + "type": "object", + "required": [ + "amount", + "deploy_hash", + "from", + "gas", + "source", + "target" + ], + "properties": { + "deploy_hash": { + "description": "Hex-encoded Deploy hash of Deploy that created the transfer.", + "allOf": [ + { + "$ref": "#/definitions/DeployHash" + } + ] + }, + "from": { + "description": "Account from which transfer was executed", + "allOf": [ + { + "$ref": "#/definitions/AccountHash" + } + ] + }, + "to": { + "description": "Account to which funds are transferred", + "anyOf": [ + { + "$ref": "#/definitions/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "target": { + "description": "Target purse", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "amount": { + "description": "Transfer amount", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "gas": { + "description": "Gas", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "id": { + "description": "User-defined id", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Bid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "delegators", + "inactive", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/definitions/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "delegators": { + "description": "This validator's delegators, indexed by their public keys.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_PublicKeyAndDelegator" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\".", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "VestingSchedule": { + "type": "object", + "required": [ + "initial_release_timestamp_millis" + ], + "properties": { + "initial_release_timestamp_millis": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "locked_amounts": { + "type": [ + "array", + "null" + ], + "items": { + "$ref": "#/definitions/U512" + }, + "maxItems": 14, + "minItems": 14 + } + }, + "additionalProperties": false + }, + "Array_of_PublicKeyAndDelegator": { + "type": "array", + "items": { + "$ref": "#/definitions/PublicKeyAndDelegator" + } + }, + "PublicKeyAndDelegator": { + "description": "A delegator associated with the given validator.", + "type": "object", + "required": [ + "delegator", + "delegator_public_key" + ], + "properties": { + "delegator_public_key": { + "description": "The public key of the delegator.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "delegator": { + "description": "The delegator details.", + "allOf": [ + { + "$ref": "#/definitions/Delegator" + } + ] + } + } + }, + "Delegator": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_public_key", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_public_key": { + "$ref": "#/definitions/PublicKey" + }, + "staked_amount": { + "$ref": "#/definitions/U512" + }, + "bonding_purse": { + "$ref": "#/definitions/URef" + }, + "validator_public_key": { + "$ref": "#/definitions/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/definitions/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "WithdrawPurse": { + "description": "A withdraw purse, a legacy structure.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + }, + "U128": { + "description": "Decimal representation of a 128-bit integer.", + "type": "string" + }, + "U256": { + "description": "Decimal representation of a 256-bit integer.", + "type": "string" + }, + "NamedKey": { + "description": "A key with a name.", + "type": "object", + "required": [ + "key", + "name" + ], + "properties": { + "name": { + "description": "The name of the entry.", + "type": "string" + }, + "key": { + "description": "The value of the entry: a casper `Key` type.", + "allOf": [ + { + "$ref": "#/definitions/Key" + } + ] + } + }, + "additionalProperties": false + }, + "Key": { + "description": "The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, user accounts) are stored in global state.", + "type": "string" + }, + "UnbondingPurse": { + "description": "Unbonding purse.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation", + "unbonder_public_key", + "validator_public_key" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "unbonder_public_key": { + "description": "Unbonders public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/definitions/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "BidKind": { + "description": "Auction bid variants.", + "oneOf": [ + { + "description": "A unified record indexed on validator data, with an embedded collection of all delegator bids assigned to that validator. The Unified variant is for legacy retrograde support, new instances will not be created going forward.", + "type": "object", + "required": [ + "Unified" + ], + "properties": { + "Unified": { + "$ref": "#/definitions/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only validator data.", + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/definitions/ValidatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bid record containing only delegator data.", + "type": "object", + "required": [ + "Delegator" + ], + "properties": { + "Delegator": { + "$ref": "#/definitions/DelegatorBid" + } + }, + "additionalProperties": false + }, + { + "description": "A bridge record pointing to a new `ValidatorBid` after the public key was changed.", + "type": "object", + "required": [ + "Bridge" + ], + "properties": { + "Bridge": { + "$ref": "#/definitions/Bridge" + } + }, + "additionalProperties": false + }, + { + "description": "Credited amount.", + "type": "object", + "required": [ + "Credit" + ], + "properties": { + "Credit": { + "$ref": "#/definitions/ValidatorCredit" + } + }, + "additionalProperties": false + }, + { + "description": "Reservation", + "type": "object", + "required": [ + "Reservation" + ], + "properties": { + "Reservation": { + "$ref": "#/definitions/Reservation" + } + }, + "additionalProperties": false + }, + { + "description": "Unbond", + "type": "object", + "required": [ + "Unbond" + ], + "properties": { + "Unbond": { + "$ref": "#/definitions/Unbond" + } + }, + "additionalProperties": false + } + ] + }, + "ValidatorBid": { + "description": "An entry in the validator map.", + "type": "object", + "required": [ + "bonding_purse", + "delegation_rate", + "inactive", + "maximum_delegation_amount", + "minimum_delegation_amount", + "reserved_slots", + "staked_amount", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "bonding_purse": { + "description": "The purse that was used for bonding.", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "staked_amount": { + "description": "The amount of tokens staked by a validator (not including delegators).", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "delegation_rate": { + "description": "Delegation rate", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "vesting_schedule": { + "description": "Vesting schedule for a genesis validator. `None` if non-genesis validator.", + "anyOf": [ + { + "$ref": "#/definitions/VestingSchedule" + }, + { + "type": "null" + } + ] + }, + "inactive": { + "description": "`true` if validator has been \"evicted\"", + "type": "boolean" + }, + "minimum_delegation_amount": { + "description": "Minimum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "maximum_delegation_amount": { + "description": "Maximum allowed delegation amount in motes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "reserved_slots": { + "description": "Slots reserved for specific delegators", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "DelegatorBid": { + "description": "Represents a party delegating their stake to a validator (or \"delegatee\")", + "type": "object", + "required": [ + "bonding_purse", + "delegator_kind", + "staked_amount", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "$ref": "#/definitions/DelegatorKind" + }, + "staked_amount": { + "$ref": "#/definitions/U512" + }, + "bonding_purse": { + "$ref": "#/definitions/URef" + }, + "validator_public_key": { + "$ref": "#/definitions/PublicKey" + }, + "vesting_schedule": { + "anyOf": [ + { + "$ref": "#/definitions/VestingSchedule" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "Bridge": { + "description": "A bridge record pointing to a new `ValidatorBid` after the public key was changed.", + "type": "object", + "required": [ + "era_id", + "new_validator_public_key", + "old_validator_public_key" + ], + "properties": { + "old_validator_public_key": { + "description": "Previous validator public key associated with the bid.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "new_validator_public_key": { + "description": "New validator public key associated with the bid.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "era_id": { + "description": "Era when bridge record was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + } + }, + "additionalProperties": false + }, + "ValidatorCredit": { + "description": "Validator credit record.", + "type": "object", + "required": [ + "amount", + "era_id", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validator public key", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "era_id": { + "description": "The era id the credit was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "amount": { + "description": "The credit amount.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + } + }, + "additionalProperties": false + }, + "Reservation": { + "description": "Represents a validator reserving a slot for specific delegator", + "type": "object", + "required": [ + "delegation_rate", + "delegator_kind", + "validator_public_key" + ], + "properties": { + "delegator_kind": { + "description": "Delegator kind.", + "allOf": [ + { + "$ref": "#/definitions/DelegatorKind" + } + ] + }, + "validator_public_key": { + "description": "Validator public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "delegation_rate": { + "description": "Individual delegation rate.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Unbond": { + "type": "object", + "required": [ + "eras", + "unbond_kind", + "validator_public_key" + ], + "properties": { + "validator_public_key": { + "description": "Validators public key.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + }, + "unbond_kind": { + "description": "Unbond kind.", + "allOf": [ + { + "$ref": "#/definitions/UnbondKind" + } + ] + }, + "eras": { + "description": "Unbond amounts per era.", + "type": "array", + "items": { + "$ref": "#/definitions/UnbondEra" + } + } + }, + "additionalProperties": false + }, + "UnbondKind": { + "description": "Unbond variants.", + "oneOf": [ + { + "type": "object", + "required": [ + "Validator" + ], + "properties": { + "Validator": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "DelegatedPublicKey" + ], + "properties": { + "DelegatedPublicKey": { + "$ref": "#/definitions/PublicKey" + } + }, + "additionalProperties": false + }, + { + "type": "object", + "required": [ + "DelegatedPurse" + ], + "properties": { + "DelegatedPurse": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "UnbondEra": { + "description": "Unbond amounts per era.", + "type": "object", + "required": [ + "amount", + "bonding_purse", + "era_of_creation" + ], + "properties": { + "bonding_purse": { + "description": "Bonding Purse", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "era_of_creation": { + "description": "Era in which this unbonding request was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "amount": { + "description": "Unbonding Amount.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "new_validator": { + "description": "The validator public key to re-delegate to.", + "anyOf": [ + { + "$ref": "#/definitions/PublicKey" + }, + { + "type": "null" + } + ] + } + }, + "additionalProperties": false + }, + "ExecutionResultV2": { + "description": "The result of executing a single transaction.", + "type": "object", + "required": [ + "consumed", + "cost", + "current_price", + "effects", + "initiator", + "limit", + "refund", + "size_estimate", + "transfers" + ], + "properties": { + "initiator": { + "description": "Who initiated this transaction.", + "allOf": [ + { + "$ref": "#/definitions/InitiatorAddr" + } + ] + }, + "error_message": { + "description": "If there is no error message, this execution was processed successfully. If there is an error message, this execution failed to fully process for the stated reason.", + "type": [ + "string", + "null" + ] + }, + "current_price": { + "description": "The current gas price. I.e. how many motes are charged for each unit of computation.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "limit": { + "description": "The maximum allowed gas limit for this transaction", + "allOf": [ + { + "$ref": "#/definitions/Gas" + } + ] + }, + "consumed": { + "description": "How much gas was consumed executing this transaction.", + "allOf": [ + { + "$ref": "#/definitions/Gas" + } + ] + }, + "cost": { + "description": "How much was paid for this transaction.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "refund": { + "description": "How much unconsumed gas was refunded (if any)?", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "transfers": { + "description": "A record of transfers performed while executing this transaction.", + "type": "array", + "items": { + "$ref": "#/definitions/Transfer" + } + }, + "size_estimate": { + "description": "The size estimate of the transaction", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "effects": { + "description": "The effects of executing this transaction.", + "allOf": [ + { + "$ref": "#/definitions/Effects" + } + ] + } + }, + "additionalProperties": false + }, + "Gas": { + "description": "The `Gas` struct represents a `U512` amount of gas.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "Transfer": { + "description": "A versioned wrapper for a transfer.", + "oneOf": [ + { + "description": "A version 1 transfer.", + "type": "object", + "required": [ + "Version1" + ], + "properties": { + "Version1": { + "$ref": "#/definitions/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "A version 2 transfer.", + "type": "object", + "required": [ + "Version2" + ], + "properties": { + "Version2": { + "$ref": "#/definitions/TransferV2" + } + }, + "additionalProperties": false + } + ] + }, + "TransferV2": { + "description": "Represents a version 2 transfer from one purse to another.", + "type": "object", + "required": [ + "amount", + "from", + "gas", + "source", + "target", + "transaction_hash" + ], + "properties": { + "transaction_hash": { + "description": "Transaction that created the transfer.", + "allOf": [ + { + "$ref": "#/definitions/TransactionHash" + } + ] + }, + "from": { + "description": "Entity from which transfer was executed.", + "allOf": [ + { + "$ref": "#/definitions/InitiatorAddr" + } + ] + }, + "to": { + "description": "Account to which funds are transferred.", + "anyOf": [ + { + "$ref": "#/definitions/AccountHash" + }, + { + "type": "null" + } + ] + }, + "source": { + "description": "Source purse.", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "target": { + "description": "Target purse.", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "amount": { + "description": "Transfer amount.", + "allOf": [ + { + "$ref": "#/definitions/U512" + } + ] + }, + "gas": { + "description": "Gas.", + "allOf": [ + { + "$ref": "#/definitions/Gas" + } + ] + }, + "id": { + "description": "User-defined ID.", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "Effects": { + "description": "A log of all transforms produced during execution.", + "type": "array", + "items": { + "$ref": "#/definitions/TransformV2" + } + }, + "TransformV2": { + "description": "A transformation performed while executing a deploy.", + "type": "object", + "required": [ + "key", + "kind" + ], + "properties": { + "key": { + "$ref": "#/definitions/Key" + }, + "kind": { + "$ref": "#/definitions/TransformKindV2" + } + }, + "additionalProperties": false + }, + "TransformKindV2": { + "description": "Representation of a single transformation occurring during execution.\n\nNote that all arithmetic variants of `TransformKindV2` are commutative which means that a given collection of them can be executed in any order to produce the same end result.", + "oneOf": [ + { + "description": "An identity transformation that does not modify a value in the global state.\n\nCreated as a result of reading from the global state.", + "type": "string", + "enum": [ + "Identity" + ] + }, + { + "description": "Writes a new value in the global state.", + "type": "object", + "required": [ + "Write" + ], + "properties": { + "Write": { + "$ref": "#/definitions/StoredValue" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in the global state.", + "type": "object", + "required": [ + "AddInt32" + ], + "properties": { + "AddInt32": { + "type": "integer", + "format": "int32" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in the global state.", + "type": "object", + "required": [ + "AddUInt64" + ], + "properties": { + "AddUInt64": { + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in the global state.", + "type": "object", + "required": [ + "AddUInt128" + ], + "properties": { + "AddUInt128": { + "$ref": "#/definitions/U128" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in the global state.", + "type": "object", + "required": [ + "AddUInt256" + ], + "properties": { + "AddUInt256": { + "$ref": "#/definitions/U256" + } + }, + "additionalProperties": false + }, + { + "description": "A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in the global state.", + "type": "object", + "required": [ + "AddUInt512" + ], + "properties": { + "AddUInt512": { + "$ref": "#/definitions/U512" + } + }, + "additionalProperties": false + }, + { + "description": "Adds new named keys to an existing entry in the global state.\n\nThis transform assumes that the existing stored value is either an Account or a Contract.", + "type": "object", + "required": [ + "AddKeys" + ], + "properties": { + "AddKeys": { + "$ref": "#/definitions/NamedKeys" + } + }, + "additionalProperties": false + }, + { + "description": "Removes the pathing to the global state entry of the specified key. The pruned element remains reachable from previously generated global state root hashes, but will not be included in the next generated global state root hash and subsequent state accumulated from it.", + "type": "object", + "required": [ + "Prune" + ], + "properties": { + "Prune": { + "$ref": "#/definitions/Key" + } + }, + "additionalProperties": false + }, + { + "description": "Represents the case where applying a transform would cause an error.", + "type": "object", + "required": [ + "Failure" + ], + "properties": { + "Failure": { + "$ref": "#/definitions/TransformError" + } + }, + "additionalProperties": false + } + ] + }, + "StoredValue": { + "description": "A value stored in Global State.", + "oneOf": [ + { + "description": "A CLValue.", + "type": "object", + "required": [ + "CLValue" + ], + "properties": { + "CLValue": { + "$ref": "#/definitions/CLValue" + } + }, + "additionalProperties": false + }, + { + "description": "An account.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/definitions/Account" + } + }, + "additionalProperties": false + }, + { + "description": "Contract wasm.", + "type": "object", + "required": [ + "ContractWasm" + ], + "properties": { + "ContractWasm": { + "$ref": "#/definitions/ContractWasm" + } + }, + "additionalProperties": false + }, + { + "description": "A contract.", + "type": "object", + "required": [ + "Contract" + ], + "properties": { + "Contract": { + "$ref": "#/definitions/Contract" + } + }, + "additionalProperties": false + }, + { + "description": "A contract package.", + "type": "object", + "required": [ + "ContractPackage" + ], + "properties": { + "ContractPackage": { + "$ref": "#/definitions/ContractPackage" + } + }, + "additionalProperties": false + }, + { + "description": "A version 1 transfer.", + "type": "object", + "required": [ + "Transfer" + ], + "properties": { + "Transfer": { + "$ref": "#/definitions/TransferV1" + } + }, + "additionalProperties": false + }, + { + "description": "Info about a deploy.", + "type": "object", + "required": [ + "DeployInfo" + ], + "properties": { + "DeployInfo": { + "$ref": "#/definitions/DeployInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Info about an era.", + "type": "object", + "required": [ + "EraInfo" + ], + "properties": { + "EraInfo": { + "$ref": "#/definitions/EraInfo" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`Bid`].", + "type": "object", + "required": [ + "Bid" + ], + "properties": { + "Bid": { + "$ref": "#/definitions/Bid" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores withdraw information.", + "type": "object", + "required": [ + "Withdraw" + ], + "properties": { + "Withdraw": { + "type": "array", + "items": { + "$ref": "#/definitions/WithdrawPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "Unbonding information.", + "type": "object", + "required": [ + "Unbonding" + ], + "properties": { + "Unbonding": { + "type": "array", + "items": { + "$ref": "#/definitions/UnbondingPurse" + } + } + }, + "additionalProperties": false + }, + { + "description": "An `AddressableEntity`.", + "type": "object", + "required": [ + "AddressableEntity" + ], + "properties": { + "AddressableEntity": { + "$ref": "#/definitions/AddressableEntity" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores [`BidKind`].", + "type": "object", + "required": [ + "BidKind" + ], + "properties": { + "BidKind": { + "$ref": "#/definitions/BidKind" + } + }, + "additionalProperties": false + }, + { + "description": "A smart contract `Package`.", + "type": "object", + "required": [ + "SmartContract" + ], + "properties": { + "SmartContract": { + "$ref": "#/definitions/Package" + } + }, + "additionalProperties": false + }, + { + "description": "A record of byte code.", + "type": "object", + "required": [ + "ByteCode" + ], + "properties": { + "ByteCode": { + "$ref": "#/definitions/ByteCode" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message topic.", + "type": "object", + "required": [ + "MessageTopic" + ], + "properties": { + "MessageTopic": { + "$ref": "#/definitions/MessageTopicSummary" + } + }, + "additionalProperties": false + }, + { + "description": "Variant that stores a message digest.", + "type": "object", + "required": [ + "Message" + ], + "properties": { + "Message": { + "$ref": "#/definitions/MessageChecksum" + } + }, + "additionalProperties": false + }, + { + "description": "A NamedKey record.", + "type": "object", + "required": [ + "NamedKey" + ], + "properties": { + "NamedKey": { + "$ref": "#/definitions/NamedKeyValue" + } + }, + "additionalProperties": false + }, + { + "description": "A prepayment record.", + "type": "object", + "required": [ + "Prepayment" + ], + "properties": { + "Prepayment": { + "$ref": "#/definitions/PrepaymentKind" + } + }, + "additionalProperties": false + }, + { + "description": "An entrypoint record.", + "type": "object", + "required": [ + "EntryPoint" + ], + "properties": { + "EntryPoint": { + "$ref": "#/definitions/EntryPointValue" + } + }, + "additionalProperties": false + }, + { + "description": "Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of a [`crate::CLValue`] and [`crate::CLType`].", + "type": "object", + "required": [ + "RawBytes" + ], + "properties": { + "RawBytes": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "Account": { + "description": "Represents an Account in the global state.", + "type": "object", + "required": [ + "account_hash", + "action_thresholds", + "associated_keys", + "main_purse", + "named_keys" + ], + "properties": { + "account_hash": { + "$ref": "#/definitions/AccountHash" + }, + "named_keys": { + "$ref": "#/definitions/NamedKeys" + }, + "main_purse": { + "$ref": "#/definitions/URef" + }, + "associated_keys": { + "$ref": "#/definitions/AccountAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/definitions/AccountActionThresholds" + } + }, + "additionalProperties": false + }, + "NamedKeys": { + "description": "A collection of named keys.", + "type": "array", + "items": { + "$ref": "#/definitions/NamedKey" + } + }, + "AccountAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_AssociatedKey" + } + ] + }, + "Array_of_AssociatedKey": { + "type": "array", + "items": { + "$ref": "#/definitions/AssociatedKey" + } + }, + "AssociatedKey": { + "description": "A weighted public key.", + "type": "object", + "required": [ + "account_hash", + "weight" + ], + "properties": { + "account_hash": { + "description": "The account hash of the public key.", + "allOf": [ + { + "$ref": "#/definitions/AccountHash" + } + ] + }, + "weight": { + "description": "The weight assigned to the public key.", + "allOf": [ + { + "$ref": "#/definitions/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "AccountAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "AccountActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/definitions/AccountAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/definitions/AccountAssociatedKeyWeight" + } + ] + } + } + }, + "ContractWasm": { + "description": "A container for contract's WASM bytes.", + "type": "object", + "required": [ + "bytes" + ], + "properties": { + "bytes": { + "$ref": "#/definitions/Bytes" + } + } + }, + "Contract": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "contract_package_hash", + "contract_wasm_hash", + "entry_points", + "named_keys", + "protocol_version" + ], + "properties": { + "contract_package_hash": { + "$ref": "#/definitions/ContractPackageHash" + }, + "contract_wasm_hash": { + "$ref": "#/definitions/ContractWasmHash" + }, + "named_keys": { + "$ref": "#/definitions/NamedKeys" + }, + "entry_points": { + "type": "array", + "items": { + "$ref": "#/definitions/EntryPoint" + } + }, + "protocol_version": { + "$ref": "#/definitions/ProtocolVersion" + } + } + }, + "ContractWasmHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "EntryPoint": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/definitions/Parameter" + } + }, + "ret": { + "$ref": "#/definitions/CLType" + }, + "access": { + "$ref": "#/definitions/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/definitions/EntryPointType" + } + } + }, + "Parameter": { + "description": "Parameter to a method", + "type": "object", + "required": [ + "cl_type", + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "cl_type": { + "$ref": "#/definitions/CLType" + } + } + }, + "EntryPointAccess": { + "description": "Enum describing the possible access control options for a contract entry point (method).", + "oneOf": [ + { + "description": "Anyone can call this method (no access controls).", + "type": "string", + "enum": [ + "Public" + ] + }, + { + "description": "Only users from the listed groups may call this method. Note: if the list is empty then this method is not callable from outside the contract.", + "type": "object", + "required": [ + "Groups" + ], + "properties": { + "Groups": { + "type": "array", + "items": { + "$ref": "#/definitions/Group" + } + } + }, + "additionalProperties": false + }, + { + "description": "Can't be accessed directly but are kept in the derived wasm bytes.", + "type": "string", + "enum": [ + "Template" + ] + } + ] + }, + "Group": { + "description": "A (labelled) \"user group\". Each method of a versioned contract may be associated with one or more user groups which are allowed to call it.", + "type": "string" + }, + "EntryPointType": { + "description": "Context of method execution\n\nMost significant bit represents version i.e. - 0b0 -> 0.x/1.x (session & contracts) - 0b1 -> 2.x and later (introduced installer, utility entry points)", + "oneOf": [ + { + "description": "Runs using the calling entity's context. In v1.x this was used for both \"session\" code run using the originating Account's context, and also for \"StoredSession\" code that ran in the caller's context. While this made systemic sense due to the way the runtime context nesting works, this dual usage was very confusing to most human beings.\n\nIn v2.x the renamed Caller variant is exclusively used for wasm run using the initiating account entity's context. Previously installed 1.x stored session code should continue to work as the binary value matches but we no longer allow such logic to be upgraded, nor do we allow new stored session to be installed.", + "type": "string", + "enum": [ + "Caller" + ] + }, + { + "description": "Runs using the called entity's context.", + "type": "string", + "enum": [ + "Called" + ] + }, + { + "description": "Extract a subset of bytecode and installs it as a new smart contract. Runs using the called entity's context.", + "type": "string", + "enum": [ + "Factory" + ] + } + ] + }, + "ContractPackage": { + "description": "Contract definition, metadata, and security container.", + "type": "object", + "required": [ + "access_key", + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "access_key": { + "description": "Key used to add or disable versions", + "allOf": [ + { + "$ref": "#/definitions/URef" + } + ] + }, + "versions": { + "description": "All versions (enabled & disabled)", + "type": "array", + "items": { + "$ref": "#/definitions/ContractVersion" + } + }, + "disabled_versions": { + "description": "Disabled versions", + "type": "array", + "items": { + "$ref": "#/definitions/ContractVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the contract. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a contract is locked", + "allOf": [ + { + "$ref": "#/definitions/ContractPackageStatus" + } + ] + } + } + }, + "ContractVersion": { + "type": "object", + "required": [ + "contract_hash", + "contract_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_version": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "contract_hash": { + "$ref": "#/definitions/ContractHash" + } + } + }, + "ContractVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `ContractVersion`.", + "type": "array", + "items": [ + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + ], + "maxItems": 2, + "minItems": 2 + }, + "Array_of_NamedUserGroup": { + "type": "array", + "items": { + "$ref": "#/definitions/NamedUserGroup" + } + }, + "NamedUserGroup": { + "type": "object", + "required": [ + "group_name", + "group_users" + ], + "properties": { + "group_name": { + "allOf": [ + { + "$ref": "#/definitions/Group" + } + ] + }, + "group_users": { + "type": "array", + "items": { + "$ref": "#/definitions/URef" + }, + "uniqueItems": true + } + } + }, + "ContractPackageStatus": { + "description": "A enum to determine the lock status of the contract package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "AddressableEntity": { + "description": "Methods and type signatures supported by a contract.", + "type": "object", + "required": [ + "action_thresholds", + "associated_keys", + "byte_code_hash", + "entity_kind", + "main_purse", + "package_hash", + "protocol_version" + ], + "properties": { + "protocol_version": { + "$ref": "#/definitions/ProtocolVersion" + }, + "entity_kind": { + "$ref": "#/definitions/EntityKind" + }, + "package_hash": { + "$ref": "#/definitions/PackageHash" + }, + "byte_code_hash": { + "$ref": "#/definitions/ByteCodeHash" + }, + "main_purse": { + "$ref": "#/definitions/URef" + }, + "associated_keys": { + "$ref": "#/definitions/EntityAssociatedKeys" + }, + "action_thresholds": { + "$ref": "#/definitions/EntityActionThresholds" + } + } + }, + "EntityKind": { + "description": "The type of Package.", + "oneOf": [ + { + "description": "Package associated with a native contract implementation.", + "type": "object", + "required": [ + "System" + ], + "properties": { + "System": { + "$ref": "#/definitions/SystemEntityType" + } + }, + "additionalProperties": false + }, + { + "description": "Package associated with an Account hash.", + "type": "object", + "required": [ + "Account" + ], + "properties": { + "Account": { + "$ref": "#/definitions/AccountHash" + } + }, + "additionalProperties": false + }, + { + "description": "Packages associated with Wasm stored on chain.", + "type": "object", + "required": [ + "SmartContract" + ], + "properties": { + "SmartContract": { + "$ref": "#/definitions/ContractRuntimeTag" + } + }, + "additionalProperties": false + } + ] + }, + "SystemEntityType": { + "description": "System contract types.\n\nUsed by converting to a `u32` and passing as the `system_contract_index` argument of `ext_ffi::casper_get_system_contract()`.", + "oneOf": [ + { + "description": "Mint contract.", + "type": "string", + "enum": [ + "Mint" + ] + }, + { + "description": "Handle Payment contract.", + "type": "string", + "enum": [ + "HandlePayment" + ] + }, + { + "description": "Standard Payment contract.", + "type": "string", + "enum": [ + "StandardPayment" + ] + }, + { + "description": "Auction contract.", + "type": "string", + "enum": [ + "Auction" + ] + } + ] + }, + "ContractRuntimeTag": { + "description": "Runtime used to execute a Transaction.", + "type": "string", + "enum": [ + "VmCasperV1", + "VmCasperV2" + ] + }, + "PackageHash": { + "description": "The hex-encoded address of the Package.", + "type": "string" + }, + "ByteCodeHash": { + "description": "The hash address of the contract wasm", + "type": "string" + }, + "EntityAssociatedKeys": { + "description": "A collection of weighted public keys (represented as account hashes) associated with an account.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_AssociatedKey" + } + ] + }, + "EntityActionThresholds": { + "description": "Thresholds that have to be met when executing an action of a certain type.", + "type": "object", + "required": [ + "deployment", + "key_management", + "upgrade_management" + ], + "properties": { + "deployment": { + "description": "Threshold for deploy execution.", + "allOf": [ + { + "$ref": "#/definitions/EntityAssociatedKeyWeight" + } + ] + }, + "upgrade_management": { + "description": "Threshold for upgrading contracts.", + "allOf": [ + { + "$ref": "#/definitions/EntityAssociatedKeyWeight" + } + ] + }, + "key_management": { + "description": "Threshold for managing action threshold.", + "allOf": [ + { + "$ref": "#/definitions/EntityAssociatedKeyWeight" + } + ] + } + } + }, + "EntityAssociatedKeyWeight": { + "description": "The weight associated with public keys in an account's associated keys.", + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "Package": { + "description": "Entity definition, metadata, and security container.", + "type": "object", + "required": [ + "disabled_versions", + "groups", + "lock_status", + "versions" + ], + "properties": { + "versions": { + "description": "All versions (enabled & disabled).", + "allOf": [ + { + "$ref": "#/definitions/Array_of_EntityVersionAndEntityAddr" + } + ] + }, + "disabled_versions": { + "description": "Collection of disabled entity versions. The runtime will not permit disabled entity versions to be executed.", + "type": "array", + "items": { + "$ref": "#/definitions/EntityVersionKey" + }, + "uniqueItems": true + }, + "groups": { + "description": "Mapping maintaining the set of URefs associated with each \"user group\". This can be used to control access to methods in a particular version of the entity. A method is callable by any context which \"knows\" any of the URefs associated with the method's user group.", + "allOf": [ + { + "$ref": "#/definitions/Array_of_NamedUserGroup" + } + ] + }, + "lock_status": { + "description": "A flag that determines whether a entity is locked", + "allOf": [ + { + "$ref": "#/definitions/PackageStatus" + } + ] + } + } + }, + "Array_of_EntityVersionAndEntityAddr": { + "type": "array", + "items": { + "$ref": "#/definitions/EntityVersionAndEntityAddr" + } + }, + "EntityVersionAndEntityAddr": { + "type": "object", + "required": [ + "entity_addr", + "entity_version_key" + ], + "properties": { + "entity_version_key": { + "allOf": [ + { + "$ref": "#/definitions/EntityVersionKey" + } + ] + }, + "entity_addr": { + "allOf": [ + { + "$ref": "#/definitions/EntityAddr" + } + ] + } + } + }, + "EntityVersionKey": { + "description": "Major element of `ProtocolVersion` combined with `EntityVersion`.", + "type": "object", + "required": [ + "entity_version", + "protocol_version_major" + ], + "properties": { + "protocol_version_major": { + "description": "Major element of `ProtocolVersion` a `ContractVersion` is compatible with.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "entity_version": { + "description": "Automatically incremented value for a contract version within a major `ProtocolVersion`.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + } + }, + "EntityAddr": { + "description": "The address for an AddressableEntity which contains the 32 bytes and tagging information.", + "anyOf": [ + { + "description": "The address for a system entity account or contract.", + "type": "string" + }, + { + "description": "The address of an entity that corresponds to an Account.", + "type": "string" + }, + { + "description": "The address of an entity that corresponds to a Userland smart contract.", + "type": "string" + } + ] + }, + "PackageStatus": { + "description": "A enum to determine the lock status of the package.", + "oneOf": [ + { + "description": "The package is locked and cannot be versioned.", + "type": "string", + "enum": [ + "Locked" + ] + }, + { + "description": "The package is unlocked and can be versioned.", + "type": "string", + "enum": [ + "Unlocked" + ] + } + ] + }, + "ByteCode": { + "description": "A container for contract's Wasm bytes.", + "type": "object", + "required": [ + "bytes", + "kind" + ], + "properties": { + "kind": { + "$ref": "#/definitions/ByteCodeKind" + }, + "bytes": { + "$ref": "#/definitions/Bytes" + } + } + }, + "ByteCodeKind": { + "description": "The type of Byte code.", + "oneOf": [ + { + "description": "Empty byte code.", + "type": "string", + "enum": [ + "Empty" + ] + }, + { + "description": "Byte code to be executed with the version 1 Casper execution engine.", + "type": "string", + "enum": [ + "V1CasperWasm" + ] + }, + { + "description": "Byte code to be executed with the version 2 Casper execution engine.", + "type": "string", + "enum": [ + "V2CasperWasm" + ] + } + ] + }, + "MessageTopicSummary": { + "description": "Summary of a message topic that will be stored in global state.", + "type": "object", + "required": [ + "blocktime", + "message_count", + "topic_name" + ], + "properties": { + "message_count": { + "description": "Number of messages in this topic.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "blocktime": { + "description": "Block timestamp in which these messages were emitted.", + "allOf": [ + { + "$ref": "#/definitions/BlockTime" + } + ] + }, + "topic_name": { + "description": "Name of the topic.", + "type": "string" + } + } + }, + "BlockTime": { + "description": "A newtype wrapping a [`u64`] which represents the block time.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "MessageChecksum": { + "description": "Message checksum as a formatted string.", + "type": "string" + }, + "NamedKeyValue": { + "description": "A NamedKey value.", + "type": "object", + "required": [ + "name", + "named_key" + ], + "properties": { + "named_key": { + "description": "The actual `Key` encoded as a CLValue.", + "allOf": [ + { + "$ref": "#/definitions/CLValue" + } + ] + }, + "name": { + "description": "The name of the `Key` encoded as a CLValue.", + "allOf": [ + { + "$ref": "#/definitions/CLValue" + } + ] + } + } + }, + "PrepaymentKind": { + "description": "Container for bytes recording location, type and data for a gas pre payment", + "type": "object", + "required": [ + "prepayment_data", + "prepayment_kind", + "receipt" + ], + "properties": { + "receipt": { + "$ref": "#/definitions/Digest" + }, + "prepayment_kind": { + "type": "integer", + "format": "uint8", + "minimum": 0.0 + }, + "prepayment_data": { + "$ref": "#/definitions/Bytes" + } + } + }, + "EntryPointValue": { + "description": "The encaspulated representation of entrypoints.", + "oneOf": [ + { + "description": "Entrypoints to be executed against the V1 Casper VM.", + "type": "object", + "required": [ + "V1CasperVm" + ], + "properties": { + "V1CasperVm": { + "$ref": "#/definitions/EntityEntryPoint" + } + }, + "additionalProperties": false + } + ] + }, + "EntityEntryPoint": { + "description": "Type signature of a method. Order of arguments matter since can be referenced by index as well as name.", + "type": "object", + "required": [ + "access", + "args", + "entry_point_payment", + "entry_point_type", + "name", + "ret" + ], + "properties": { + "name": { + "type": "string" + }, + "args": { + "type": "array", + "items": { + "$ref": "#/definitions/Parameter" + } + }, + "ret": { + "$ref": "#/definitions/CLType" + }, + "access": { + "$ref": "#/definitions/EntryPointAccess" + }, + "entry_point_type": { + "$ref": "#/definitions/EntryPointType" + }, + "entry_point_payment": { + "$ref": "#/definitions/EntryPointPayment" + } + } + }, + "EntryPointPayment": { + "description": "An enum specifying who pays for the invocation and execution of the entrypoint.", + "oneOf": [ + { + "description": "The caller must cover costs", + "type": "string", + "enum": [ + "Caller" + ] + }, + { + "description": "Will cover costs if directly invoked.", + "type": "string", + "enum": [ + "DirectInvocationOnly" + ] + }, + { + "description": "will cover costs to execute self including any subsequent invoked contracts", + "type": "string", + "enum": [ + "SelfOnward" + ] + } + ] + }, + "TransformError": { + "description": "Error type for applying and combining transforms.\n\nA `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible (e.g. trying to add a number to a string).", + "oneOf": [ + { + "description": "Error while (de)serializing data.", + "type": "object", + "required": [ + "Serialization" + ], + "properties": { + "Serialization": { + "$ref": "#/definitions/BytesreprError" + } + }, + "additionalProperties": false + }, + { + "description": "Type mismatch error.", + "type": "object", + "required": [ + "TypeMismatch" + ], + "properties": { + "TypeMismatch": { + "$ref": "#/definitions/TypeMismatch" + } + }, + "additionalProperties": false + }, + { + "description": "Type no longer supported.", + "type": "string", + "enum": [ + "Deprecated" + ] + } + ] + }, + "BytesreprError": { + "description": "Serialization and deserialization errors.", + "oneOf": [ + { + "description": "Early end of stream while deserializing.", + "type": "string", + "enum": [ + "EarlyEndOfStream" + ] + }, + { + "description": "Formatting error while deserializing.", + "type": "string", + "enum": [ + "Formatting" + ] + }, + { + "description": "Not all input bytes were consumed in [`deserialize`].", + "type": "string", + "enum": [ + "LeftOverBytes" + ] + }, + { + "description": "Out of memory error.", + "type": "string", + "enum": [ + "OutOfMemory" + ] + }, + { + "description": "No serialized representation is available for a value.", + "type": "string", + "enum": [ + "NotRepresentable" + ] + }, + { + "description": "Exceeded a recursion depth limit.", + "type": "string", + "enum": [ + "ExceededRecursionDepth" + ] + } + ] + }, + "TypeMismatch": { + "description": "An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations.", + "type": "object", + "required": [ + "expected", + "found" + ], + "properties": { + "expected": { + "description": "The name of the expected type.", + "type": "string" + }, + "found": { + "description": "The actual type found.", + "type": "string" + } + } + }, + "Message": { + "description": "Message that was emitted by an addressable entity during execution.", + "type": "object", + "required": [ + "block_index", + "entity_addr", + "message", + "topic_index", + "topic_name", + "topic_name_hash" + ], + "properties": { + "entity_addr": { + "description": "The identity of the entity that produced the message.", + "allOf": [ + { + "$ref": "#/definitions/EntityAddr" + } + ] + }, + "message": { + "description": "The payload of the message.", + "allOf": [ + { + "$ref": "#/definitions/MessagePayload" + } + ] + }, + "topic_name": { + "description": "The name of the topic on which the message was emitted on.", + "type": "string" + }, + "topic_name_hash": { + "description": "The hash of the name of the topic.", + "allOf": [ + { + "$ref": "#/definitions/TopicNameHash" + } + ] + }, + "topic_index": { + "description": "Message index in the topic.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "block_index": { + "description": "Message index in the block.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + } + } + }, + "MessagePayload": { + "description": "The payload of the message emitted by an addressable entity during execution.", + "oneOf": [ + { + "description": "Human readable string message.", + "type": "object", + "required": [ + "String" + ], + "properties": { + "String": { + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "Message represented as raw bytes.", + "type": "object", + "required": [ + "Bytes" + ], + "properties": { + "Bytes": { + "$ref": "#/definitions/Bytes" + } + }, + "additionalProperties": false + } + ] + }, + "TopicNameHash": { + "description": "The hash of the name of the message topic.", + "type": "string" + }, + "FinalitySignature": { + "description": "A validator's signature of a block, confirming it is finalized.", + "oneOf": [ + { + "description": "Version 1 of the finality signature.", + "type": "object", + "required": [ + "V1" + ], + "properties": { + "V1": { + "$ref": "#/definitions/FinalitySignatureV1" + } + }, + "additionalProperties": false + }, + { + "description": "Version 2 of the finality signature.", + "type": "object", + "required": [ + "V2" + ], + "properties": { + "V2": { + "$ref": "#/definitions/FinalitySignatureV2" + } + }, + "additionalProperties": false + } + ] + }, + "FinalitySignatureV1": { + "description": "A validator's signature of a block, confirming it is finalized.", + "type": "object", + "required": [ + "block_hash", + "era_id", + "public_key", + "signature" + ], + "properties": { + "block_hash": { + "description": "The block hash of the associated block.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "era_id": { + "description": "The era in which the associated block was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "signature": { + "description": "The signature over the block hash of the associated block.", + "allOf": [ + { + "$ref": "#/definitions/Signature" + } + ] + }, + "public_key": { + "description": "The public key of the signing validator.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + } + } + }, + "FinalitySignatureV2": { + "description": "A validator's signature of a block, confirming it is finalized.", + "type": "object", + "required": [ + "block_hash", + "block_height", + "chain_name_hash", + "era_id", + "public_key", + "signature" + ], + "properties": { + "block_hash": { + "description": "The block hash of the associated block.", + "allOf": [ + { + "$ref": "#/definitions/BlockHash" + } + ] + }, + "block_height": { + "description": "The height of the associated block.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "era_id": { + "description": "The era in which the associated block was created.", + "allOf": [ + { + "$ref": "#/definitions/EraId" + } + ] + }, + "chain_name_hash": { + "description": "The hash of the chain name of the associated block.", + "allOf": [ + { + "$ref": "#/definitions/ChainNameDigest" + } + ] + }, + "signature": { + "description": "The signature over the block hash of the associated block.", + "allOf": [ + { + "$ref": "#/definitions/Signature" + } + ] + }, + "public_key": { + "description": "The public key of the signing validator.", + "allOf": [ + { + "$ref": "#/definitions/PublicKey" + } + ] + } + } + }, + "ChainNameDigest": { + "description": "Hex-encoded cryptographic hash of a chain name.", + "allOf": [ + { + "$ref": "#/definitions/Digest" + } + ] + } + } +} \ No newline at end of file diff --git a/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/data.lmdb b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/data.lmdb new file mode 100644 index 0000000000..1da2720f23 Binary files /dev/null and b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/data.lmdb differ diff --git a/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/data.lmdb-lock b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/data.lmdb-lock new file mode 100644 index 0000000000..a082482ce5 Binary files /dev/null and b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/data.lmdb-lock differ diff --git a/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/sse_index b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/sse_index new file mode 100644 index 0000000000..0b1371d351 Binary files /dev/null and b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/sse_index differ diff --git a/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/storage.lmdb b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/storage.lmdb new file mode 100644 index 0000000000..6589c798b8 Binary files /dev/null and b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/storage.lmdb differ diff --git a/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/storage.lmdb-lock b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/storage.lmdb-lock new file mode 100644 index 0000000000..5f33a9e9a3 Binary files /dev/null and b/resources/test/storage/1.5.2/storage-1/storage/casper-net-1/storage.lmdb-lock differ diff --git a/resources/test/storage/1.5.2/storage-1/storage_info.json b/resources/test/storage/1.5.2/storage-1/storage_info.json new file mode 100644 index 0000000000..f7065c5419 --- /dev/null +++ b/resources/test/storage/1.5.2/storage-1/storage_info.json @@ -0,0 +1,1302 @@ +{ + "net_name": "casper-net-1", + "protocol_version": "1.5.2", + "block_range": [ + 35, + 94 + ], + "blocks": { + "a0d2b616b99812e49553852118012173bf190dcac8299be9d319773a7405e7ac": { + "height": 86, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "a0d2b616b99812e49553852118012173bf190dcac8299be9d319773a7405e7ac", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "017bbb6f24b706b7a16c44dbe75c8fad2cd7701db361225ad296b06aa66853ece94804e8cecee76f61828ebad3422706b278da46845150265664091aaec62a890a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01fc09409ba0e990d22eebb79bf1c85d27b2d145ee110541420a059497b9114fa17b8ab486cddbd7a927d638d61d8edff424284525638e14c0f76d3fcde225d10b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0120f0ca080c2237c1e04470057959ff3b165e2547d8821f857fafbb238073d5327d46b587136ccf8a4733d53f0996baa11153ad25cfb97ae3a6ffb50433254a0c", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "016c5d7bfdb97e82ed4e30c39c434cbd3458e33c35c5a31800820afee42efc35fb4f37d044adf049fc82db00773ad517fe411e58cac579b7a12143b39e4d30ec09", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0154d4c6d16afb98ff1720369d741aa884a5ba785ff862a34050902e67a28d564e64b0ab26b47ab268c17a7c91853184d0b4184035faa99b4e5cab038e2f3dea0c" + } + } + }, + "deploy_hashes": [] + }, + "cb8148f82ff933b3653fbd63faf7c7c212bd34e1f7654364a56b129f73cdefaf": { + "height": 46, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "cb8148f82ff933b3653fbd63faf7c7c212bd34e1f7654364a56b129f73cdefaf", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01cb46f48eafdde8d963bd1c19e4a81935eb95865bb09613eb1994ff4fc92ab3de29acdfea42a7d3f03a7131164751512a6704fa7e9a05d4d0b3531943abfee20d", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01953d4b92fd808c0821865b29b20a52b04c1dd3857d2b12e9a33ad3baf2e12b7a9245a93b82043f96bf11f7b3dacf2176b992c74cd893bfd97a199647e50bc105", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01325850bdcc15a8b59d629d04c909f2864c1b24969ea1b6dc8c0c6d5e57b6fe1d7f92a58fd145cf076c291440807e89299a7687e8eb0c53aa0de846d7a6fc9505", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01f6affd41cedaa733f855f249804563a1b7fc061a163bf1d1b9aba04a31409f2a8b50ad947f52597f046f5a2135c978608b838f871898ae0db0f221e4faee4307", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "015fcd0f05be54b0df41151bd91b8e0eabb525b4808e9c96a166298dda9233cb229b85c5f5fd5e7f91af1d525c9bb6bf7114f36223893220770c31d32f9b206302" + } + } + }, + "deploy_hashes": [] + }, + "1021135eeec9fa9b32bf3e06b2689f44bb9ffdb34665542c2d117b0b0a9731fb": { + "height": 78, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "1021135eeec9fa9b32bf3e06b2689f44bb9ffdb34665542c2d117b0b0a9731fb", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01c1cb9ed59ea1eae441cdefc7307f451da29bde85d4f49c258edef8e4dac09209b1902aed3fb1a5321c83c883515337b010226f8682d88e61dcb7a55a1391e205", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01d404d4ab96399e749e6b4c5d3b334aa8084d7b48a7e5c80705cfb7aa8b47f4c2742200425f214ed7777c0fbd4d273c7a8656b79c2812731a206010e93fe17207", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0189356a822c83edfe5e417047a67b48ffe5c045d49fa1881b4b182a5c2e046f9d68eb3907c982215f7cceca592fe39a282250c17b689caddebb5d37e857a5db08", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01568a9c0f3a5f170bb9885a25920e894103bf178812696af7194e33d1a0c50d80e56ab2497019e32dbfb81b6cdb152e4eceac304667045010e375ec053a71f00a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01754e08049770f2952fed60192b3b6a2a65829255585be04590ef460142a4d60ca677ffd14a1dc9257675e5a841965e08fef7f35c8f294d4ea3878f55128b3309" + } + } + }, + "deploy_hashes": [] + }, + "faf8913c012d445d4dc7af178d15aa8b041a3b45e63980995bd7a62da5d8673f": { + "height": 80, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "faf8913c012d445d4dc7af178d15aa8b041a3b45e63980995bd7a62da5d8673f", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0178f5aca8508cea6c6f9b22b99e5ed66c183c3c9909ffa77d29e658b0310ed58a854a11fad7789c8a4e7ddc89a342443a411f3e152bf3cbce715604db636a2205", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "013ce1a805d847b5b949176c60a68dba2360536caf0583c5c20bedb7226c56c97cb5462c516b852ea801284f06005c77a4fbe3391712bfb6ab935bef6bd740610f", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "016e767114f25421c8856718447372737dfb8b967041fba6a263df12af41ec80d643c3897493757d2b7033768b02334eee786141ac128e82dae77b10d196f26407", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "013b74f945aab7f55c14d5bd5d1b72c6a901b909bb02ae96cf598ef56669d03adca45a0e841851988a2dcbe4d2425a24fc3746a5f8468245f31beaad032188ff0d", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01fc10350304daba32973ad769db4f25c19236032df95b725ee8c024aaa811ec5d296a2a34043d769562bece9a193d40cceda9f5ccfbf0a707de1dd67b779bc106" + } + } + }, + "deploy_hashes": [] + }, + "2d8b530265d358bd58fcf0c9b61f21d2eb308113816839a86d28a6ab11831337": { + "height": 58, + "era": 6, + "approvals_hashes": [ + "0064f9e5242f911736c124c436eb7ae425eb37d750cfc15612f5b3df2075a76d", + "9be78f45253429d11bce1db6c044d29b3e0b0fc6e8e0e98015bf9b8ed326c0d0", + "e6ecd44c2983fe9ac8f164589549d210bf67b7b74cddff4fbdf60fe42e41d7eb", + "f9e2354c4262da873c910bccb1230e35c29b822974d7e28ccd2362d2136e86f5", + "47a41934a6dc1003650c7277659747319ce39ba508c4f13ccdfd58d7ec48ebd7", + "96b0ae48e4fea3e2866e3c9a1475913ef54033fd46bdef328bce02874116bd5d", + "5da97f90027452baa240b38388a3d59c85f75bc6bac746c51b4b5631d74859c8", + "c046041fb4f3b5a56da32a5a2a753d99fa2b422d63c5d8bdaae1cfb72e18a071", + "660bcc38f3b43ceb9c25a0a17e0170c6e2755c3bcfa74104a5bf99b939738323", + "78985db9f0d30a91a6f3ca643888f33a09e2d5d58a45a84e5d09012535b962f3" + ], + "signatures": { + "V1": { + "block_hash": "2d8b530265d358bd58fcf0c9b61f21d2eb308113816839a86d28a6ab11831337", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01986f42e383aa51382402f99edfdf4c43751229d4c29e8cd0cd3f47bad40baa2e1e4c5980f8408c8eda7cdd71ad41f2ae65cfc27dcbbf7697ad87499638da4709", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0177d63b478b1bfef60caba68703cf28b925a87f5b541af969e641fdc198659f5a4c75dc452ee77a86d05851facdc121fd43dccc20508b741b9a04c95e0e9ebe06", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "019ec6d148d34914a34ec0159e49d5e48f7a64f3677b9d2895d9831ce83b9d412d81debfbd683e903fc16c365684398e2fcad892d862a524fac8e93e9f0d970a09", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01d80f7bcc382e4e777256d70d2f5bf3a57872c959f70c9724ed565472171e51e08bc0a27a561f4d3f99a8b1e54f8a4c5fc7ba82891471f69b6715000ee3bc870b", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "019b4a46d9af2f49c70e6969bf02c2a63f7d0e257255e09c7444fe281efa00351376d4962ae412dcac2b8532d372603075e6e38e51f834420b5255aa0224f0a101" + } + } + }, + "deploy_hashes": [ + "163025024c68c001b1a940c2e63ae4e4654adea462078841b2f66ff6c9e04074", + "1b9d7591e1e800a19a99647849decdb074925f270ea16540ec9b356de9721e4c", + "448bbee6f586414eccb05fb97d28f26e59a4bc542497b323bcad51d71af92d6e", + "a9af14844140bab502565a0e6cacbff0227b19e4209d3495c97e275e5b6a6dac", + "706050b93a7e9bc5a19fd5fab792eb5ef5a9854ec3181fbb054f5dc7d4ee170f", + "a97dc9782e2dce920821d08ee85f2853b5dbf97c57c9349772ed36c723eb3542", + "ef35f210444ef48eb4bea3ea58867037cbd4106af59cba36a533e20625c01523", + "660eeb2cb9fdd88ad3296de498e38b9b97291cccf19cfb1b5c477c2c94f87b65", + "82c80204bbf55182a6785e92d4ff59f500d9cf561aca305b25d7f95ef258f7bf", + "e55f6646b64c204ca7f55d244a8879e5ac1e982a049b1d577f80418274490249" + ] + }, + "a7e8a96a2608a249a831fc57607507f7fe6873afcf0c8af8f61ff05f5e2c8394": { + "height": 51, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "a7e8a96a2608a249a831fc57607507f7fe6873afcf0c8af8f61ff05f5e2c8394", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01735a315b9a6ce63741e84a34b9e27e2245235eb9184573b016b5b6eb7a0038a0badbc19e709eeecd9f7ece2b26b89f00655dc5be57032ca7fc4fb33e5f29b106", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0141977992a0317989ad9b774c49a1cbf3751b402b6b32d2de48add0f8a863e61a2c1a711636a7bb609f79a830ac31cb770d0f3a2b83b186d98c1d8ac6c455ec07", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "019c8898f2368e6ce797ddf4d400526f43de91b66b6717fad9c305e3c5931e10dfad523bd154e7d6e6a3861847f4fa956746a3578f15b193ea621938acf8e4a509", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01ac6142980c8e91e26ac54634ba97390faa4501961d7a120b099cbf4184ae055ae2dc72352d371628fb84b02d1df20d1f191cacb84350941cbdf94873471a9a03", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0195bd1a586ccc900162d525ddcf6649002979b15eff3972b7ba1d3ae1369b78d9c8226910dc911f2076a636cc3502514d3dc57ba21b12020a3867755a009bf10b" + } + } + }, + "deploy_hashes": [] + }, + "938b44fa8b62572a4820cf285f8aa3c695b03e2ebc1e6cba754523504f01831c": { + "height": 84, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "938b44fa8b62572a4820cf285f8aa3c695b03e2ebc1e6cba754523504f01831c", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01f5e877256c046b04c8f52d4346c89801b29ab02e639259f9a0fed5a8e2dc3b3ef543914fa3592356ce9b2246982615708306592409ddc70808237fc926b09f0a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "018b01cf26c7f550ce26f1ef5611d0df09da556d2611c3dcc1c4ddf5762da0c30e6374349c3206d9083fff4377537f2f79e97a7e266ddfdfe06c87ba7d326df70f", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01ef1752923dedaf0a815ead66d49f2cf889fafb52542014d396a97e415768122b05cdff8f8ceb21680b350ada7004564b20a4d351fd26241c121e54f0a6c3b109", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "015068e11c1963cf5c06fc37589a348873b823228b9185146d04f9cda469578b41fe617a5ab442654437bbca398ccebf43290c1497d6f77e1b05bf166a64cb0c0c", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01eace705660fd13b1a10e6a0f03e14763acd1d4109397c07febd4a6f481ae73a26c30422cbf9418229820a9e2d5c35caac9cf2a05665e65de4fcf37a9caffef08" + } + } + }, + "deploy_hashes": [] + }, + "c01371740d9b2883c92b5c519469cae84f82f6057586f33735c58f6e0db40e79": { + "height": 60, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "c01371740d9b2883c92b5c519469cae84f82f6057586f33735c58f6e0db40e79", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "017230f32818eef81ef2596462de3d2a5f5cfc9d73e30c8ceb757750383e78a3da1bd5f887b71b81a862ec5a01b8e219b03df294817c873f608f3ae61b6ab93e09", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01ab097d5ca951b9fc1abc56956c98456b0c704a72fe8f94775b6e4a263db67a644547d12f28b66d3eb7b36582aaee9c8ddfbf9633885fb703aeb0a2e72be8280c", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "018e374c4989aee1e44574d7d22cf07cb776f7110c3f7c9b1bdae00051f2b8fb8dc7502011048979a55a0514d9be5e800f6cb24df250aa0a297a122b69d33d6f0f", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0161529228a54d9decee38997411c5a8a2a027c2230e264a8ac58a01d6165d69a5a8ee36daa60ec380659a3d7655dc3d6846e621ad12a50a3959439130cad4150a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01ac20ebeb1438ab507eeee56d9475a8c9fd9254fc76c3852191f0c877be362e4a86fb832c35502feb570c02642deec0ae664d2308f9bb0c2be9166d3e3e42cb0a" + } + } + }, + "deploy_hashes": [] + }, + "021aaf989b2159ff7c551ccab2108dec6d2b846f0c9207e9cf0667b357be5659": { + "height": 76, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "021aaf989b2159ff7c551ccab2108dec6d2b846f0c9207e9cf0667b357be5659", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01b82b9d6a43f06068d4aa0a0d7e2e1f255766c5a6cb88508a2c7460acb652b98ee8a5f04418d39c412ed29be6cf4edfe503bc71557ee62bffa7184999c652440e", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01ac8e62376c5f50d2ea8bbb8af8eb482cf9e6e113d337ba9153416be5c4495e5d6c9a80be220ece312e2aec26cbfba6a3263ea3903f6b5f881d6d04d48ccf3b07", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0119db4e2beb2724955ce91715cd71e949313c0f171bdb726eae621d60f9e27df2588a0cddc4ba368bd6264b31e50ef1bdcb8ec88a84ae6af6ab6ac00a10a0a800", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "011719bb72ac094fe584d06049b4392f2ed846365153ba656d808408b6c78234cb269e63b33f64fdab1554a3069cf3e86f394a24b805cc1c91db118e9d74cf9903", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01759a3353032a34b28413e46d7267119d6e9d7f82e8a4bf90823dc7750d84d47070b18adcede5f3bb799df1560b5b47d946265c0b703f3bc9f8e6322412101c04" + } + } + }, + "deploy_hashes": [] + }, + "4d050f8a53cb8e171c63a865bea6dfb25608c14c561c675fc09a863544e19415": { + "height": 70, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "4d050f8a53cb8e171c63a865bea6dfb25608c14c561c675fc09a863544e19415", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "015ab8f04b55a396f6e409dfaf6d228ff912a7eead71425961ea3c9bfd891056187a6ca0081947e87d58fa4cea6f99c4a1751c7638536752a12397a983049d320c", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "013a71c907808ff9c27f28af23703ff3add6f8a97b5c350c711a9c7a5d3d359ceb9afcce84a48ecd065fc8ac26b9afa4f92462472e3d9afb4a8666bcb924e83103", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "017666860f8f4f90d4381edb09fcce00e029a2cd3bafecbdd7525a485d3a864404b7a8c167fafc02d88d0050b7e78ae3793d33860adae5584906d3fad15155480f", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01acb4b4cda08418052eb283ec2cb16b950e6658fe0bda574575448005e6238f46cc266219e0bfa8d4590db4f2283438c596e20817e03145e5e7da6373a2f70e0f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01fd7f47f38b47cb2731d85a2b11147a9520c69663fcec65d6303a485317d8ccc3e1a79402048cf1210956a02facc27bf1af18b6ce2aeeeb98e0d14c5f7900fc0e" + } + } + }, + "deploy_hashes": [] + }, + "bb75eabb9db28f69281325959e32149faeaeebbd122df7c5157c7f2b06f80ebe": { + "height": 45, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "bb75eabb9db28f69281325959e32149faeaeebbd122df7c5157c7f2b06f80ebe", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01ab2546144a84918f9020045a663b643c248e21fb5d15fac4e242f7c159938e6b7b3b604403ebe7a85f4dee4cb6c263c6c1900e29b3451a3c3d55df017c40d70e", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01bc20e0dc59e54176b7da3351939e8dd8f4056dc1708505bc2a86be5dbf20459afcc16629aa3460ab8dbfd259a4fbd0d0a6e7b1bc72083cef12c790d119d3070a", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01e1e64bfc0685927602d2e1b46a6112a00c38316b82190518bb8302c5aeb3daf6c592e49c595e8f9f54d5d1269a201181edd34dfe400eaef3fc47d32e2b86a80c", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "014c63ade982bb75e47057ac48fd6b710f02fa174d9671e6cfb0fbfdd90e85d2e6e302e74ad3cba017a3e4fbfb9fcb17327548341b5cebde32ac1a80d5d6b3b001", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01baa09af7100c9b383668543ef9789a4989bdb0cb91a943cb605cfcbe33da7381e2f6441f503ec926fce6536ea4d8fa3469320133f73e2845c4fd0e7bc5cc5a09" + } + } + }, + "deploy_hashes": [] + }, + "d1d65bdbdc1fe4e7301ab4d921bc504772c3dbab47310be453bd606edf286efd": { + "height": 54, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "d1d65bdbdc1fe4e7301ab4d921bc504772c3dbab47310be453bd606edf286efd", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01ec296f2a7a748b81b4773fcb253e422cbfcfca3fd1cabe95104df7f942683e2c4ad86a9fc63c6f6c59c7b04ef95276b8574dc8c71a475f60109af51342492e0f", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "014e5cc193ce3eac79bfd2f2369bdaf7a6eb748a507a4e7c5fde0b1a6b6f05267a3df285f40d44c3c4753af2ab7c7f88cada326fb85c413f5ad506d29f8053a40d", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0132b18719d4d9b64ca4e92af2e0fa47562af672e7c017e9256dfb784e31cc8195ef410fb31bf1a05adcf5f029e3b1bee6634c5c4f80d383afad09512156f7560b", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0157d8d02e9f4f619bc0a8a95f3bc61cc6521b744263b31889f6da4fdb3ffadb033e57ced766e231d0bbbdc7e3ba9ef662ea1f14cbd85dea9f6f0f058e1d943607", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "013ae0332623ebd4c86ba3c087ef2c68e9ae08e851afd56557398d33cee5e39391ff8b879cdf815de544d6df7e703522e1006040880a9893a1c7f53e873e3c0f00" + } + } + }, + "deploy_hashes": [] + }, + "20855b7a61e36df78070a1b697b8b187b20efc49ecf450554283e384ff0e5902": { + "height": 67, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "20855b7a61e36df78070a1b697b8b187b20efc49ecf450554283e384ff0e5902", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01603fde1ea29deb3a5e35bc20c1544ffc5fbd8f58e6a0b4ad9a738ccae97274b023d6ea40cdc177f1ddfa8d10e0ec6c6645e8d854c36408e51b8229df48bfa50e", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01122abfa228c75a8b1bd71099678eacb2f8cc62cf95121ff058076bd0da5ccced23332e7426dad453a6305e44c1ca4c294a5086a1f0da267a21534bba51a8650d", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01bab63348e6be0f65c9af7d6b9ab92cca90779b9960818c908667e79508a065d87cf13ced784a43fc716bc8973e28a40d65a98c23cbe3ab3caedafd970bdf290d", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01768af65f974975528b23eae96a5f7739b806d32e56e6509057e9b0d96f089ad80d6e719d4367cafb6aeb5c8e05b5e1d211b52fde31092e04e363ac8242ba010e", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "012eae04c947c8c3335d5542c33da1f93a2202fdb03aeae04ce30d28393ad7f94f80a5d164f1370126b80b7a3d26cf213b5c0940a50cc6e2f0f58198231b02fa07" + } + } + }, + "deploy_hashes": [] + }, + "ddb95006ea8d3293555af637f0fde53e5d14f6b8b9c61d1c433155af734d6498": { + "height": 43, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "ddb95006ea8d3293555af637f0fde53e5d14f6b8b9c61d1c433155af734d6498", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "014d867980ef65c933c3e4e6b50dfd2e83223530b5d6eac8650daf2079e00eda0fce2dc222c7d3fb50bac6098dd20b208bcf424ab88c2c3aa2d73d20ab228a3909", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01a2b3aaba8ed701ef46ec55e0b8c5e7b35a8f980497025238687b2129b80cb3198c3195c7d21913f72b234728a5e9698b073c0f0b26062df2d62161c8f41e9606", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "015dc3a199f4998b3edd8b32d8c432bd2efd149425781b5a6892e9a5d51ed9e9f23b4538c2685d8c304c3ff3e953b7b038b68ef950309bbc9e99391fee626bee0e", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "019885590c89a861bf6f05fd00b2821f940b8442a9a50eb808bd8d1149791ef76200680c8afa1e68889fcf47d180949949309a1ba3b15cc1c4d6a865f937e22a0b", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "018a70d0003eba0f4a99bc1d22113e0d25877e9ae0f090617458e9b069c93d3a27f003265517b04f984139d3d207c4e641e32289359c8bf1e5f0c256f65c29c005" + } + } + }, + "deploy_hashes": [] + }, + "42a656756c7b5bbfcdf32db19cf177361e9c0a6705a16a0d494e320175f09224": { + "height": 53, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "42a656756c7b5bbfcdf32db19cf177361e9c0a6705a16a0d494e320175f09224", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0173857a991249ccb028f8cb1f52324b4bb5a67ef3a3a60f6e7df61086c5ea8956519a288133d024cd76ab9b0bcd3958f85b26001322f9714d3159e1288fea7d0b", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01a0f04275b62ae0a12982c9b86785f71d4567c6f44f6757b7dbbb9b932cf00c8ea6e4c3131b81380d29b5fd91e25695c77f198ce8c397c103695aca73faf97f0c", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01e2170db2b4270bbd43861eeaad31bc76c1b4099f3d6ec827b15575d65f6f933d6bcd2a303396ae0bb617632dc00a89698c63dc280f689943aaefdc5782111009", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01725c203217ba1e454dbbcca334604a1df68f416169fd566d1d0e8821a066e39098236b43cb1a77d8f3a1b447287400dcaf3d5beddcba4c8f2e2fe222c63feb0a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01375999d63755c55b80bcd79c3647fa9e52f945b2824ce848e5094d439455765876c66b88b641c26622b649009ee41722746b26dd024d55bfcffc497ccc312304" + } + } + }, + "deploy_hashes": [] + }, + "4013cc057d7774fd5aaef3e8062d3cf3786cb094d7109ebfce2d5f86903ec736": { + "height": 88, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "4013cc057d7774fd5aaef3e8062d3cf3786cb094d7109ebfce2d5f86903ec736", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0160c30d9f4d76b1fa89f3f130afbd930953921931daae3dea5053adba02a9203b9542a586061c798be54d017bae4c51dcd675389a14ccdb00694236d599cdb507", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01bb81d1b641fc92acce550af000f764563fa553c71079aa66be8511d42da68892506c64257a3147427908bfc6398243c92856c8ee965c92e63924b7503f7a000f", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01500a006a8fcf834465929d0f60f4f38ee2212f5f6b98c92afe2ff39329050e973ded4afff955f853301b12cb4e7c6c3cf174d904cefc8b1d704a11dc16ad3e0a", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "016c66db6b6b65e87bef11e4b8284b7f30a62f6080e0303fd79669f1e4a0be83a0133248062ec7fab1c166d0c7de33a5eec9093681fac72253d4422517bde88a0f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0164d80fe439397439efac06962f3233bdffd4bd2620af6eb42c34c7388d52d8e4b1f8499ff185a4fa58ebdc9b54d57a255d8bd903ed5c0d7f7bbaccb04dcbdd0b" + } + } + }, + "deploy_hashes": [] + }, + "1bbba85fdaa53289c5a5ef5a10e264aecfc23b2cf385883c38f0afeb3430bfc2": { + "height": 52, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "1bbba85fdaa53289c5a5ef5a10e264aecfc23b2cf385883c38f0afeb3430bfc2", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0137c87dfc36d74d40e3454751acc46d95cbb8501cf892024ceee50455681a4368815d99a7d5a924ce8cf551be8c57d980e69d101a9559f305621aa733294ce200", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01d7ba75b022046120ca777a9bb07d6b12d5be56738f1e63a83f8d529c1e352adc3bd66ec2611e83895cfccbfe0526b088de6fc1f44dcb70c9f111126f89335903", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01f317117ff091da876d47f9e836e3e716725fd41a5ace99a91903807fe0424056aa0ba43aea4bfdd1ffb39b18fc3ed262b88a0b4c099d867d26060c956c03c404", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0105c51cfa823470fe3527c6836295b04e97499a4ca9789b77c9cc8131136245311794d085f7226c362876f8c68a731c96fc9feaaebe511072ad5119c7e3293e0a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01ad671b04cd47e3a57dcd42d0be5a630b6e67f6f796998f6b72cb19f948911cf61ae4b0cc7283b683cd9f22edf5bbe6f3f84022cebb8d6a8c688ef23f9001690e" + } + } + }, + "deploy_hashes": [] + }, + "5d27c40448a47e4dea45466443471a7782c0a562124bf7a153ec54e90271f20c": { + "height": 92, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "5d27c40448a47e4dea45466443471a7782c0a562124bf7a153ec54e90271f20c", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01fe941f1c097daf47b2713344f452bcfe348a6efde20bf12235f0da754973c1dcf1107151990b14597c9bcce46e2058b0867bf06e5b1a6d12e9855f645c99170f", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "013760f3af9afe14eaef1327b6d7d1c021e29fb9a6d17f9592fced143a1bda881d7a273c76174ad83105ea0e314cf46351e19dd9e8fe4f8e4b5e7815be38c0c90d", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01b10ae47b777786bb666a137ba4fd027ac31b8ca005eaa0ec5518346f24f1b5148e35f899593ad6a24132cff3a2fd1e1983eac6bb25e747067464dc2d93062205", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01894ec9959620928f5799d796c38ad1d2000f6e9a6f6dbcfd38351d6b6464ec21f76442f8774f1bf368914be0e967c2be4b991443eae3de3d5df60807dc826600", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "015027a55452e3ac79c60cf9a4baf581631351f7dd9761a52256fdec17c15c1a42901d40814fdc2379b79220c3995337d887048d3035549dc9ad14c631949cdb06" + } + } + }, + "deploy_hashes": [] + }, + "63a250af208974b970f7c2328bfab4aa3740023b43d29ca34012ea6afe585ca7": { + "height": 47, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "63a250af208974b970f7c2328bfab4aa3740023b43d29ca34012ea6afe585ca7", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01850a1fba8ea4a524cad7239db5a7de0360f899d54f5dea1cec06d8a3071eb95f49a9758deb9649118576405c323b0c95d8b736adf9102baed075a0d5bf623408", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "010055bd55638a89aa31b7e67cb0966bd6b13dd38677f9ba9cb4d5ff74061d4c9f792dc044df008d7dab9b6ef83f706d1bb6c70481e9b00ac16827c11a334e3a05", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "011a3864d9e55d38771119cb8bae3f645528bb5fe8231bf67c7b6bc6b7915c18227041a8307190e7250ebc95b20a2a645294484a4032c65380525f7f7258f38b0f", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01d141af946b2d80b91f0a3f42a599023713934329e15573b0df41e2fe7691049214780b23c72a75f54a9e04d4f6e11654c50475b195de1efd02c3ec62c875ea06", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01ceb94402ce8a411b40e7ed0be769cc2c56261965fdfea963a1c06ca6c6499e49fd706313c534740ad46add89f4b89b6bce3119733ab58b7ad7c7c6c56643b103" + } + } + }, + "deploy_hashes": [] + }, + "774bb86933d8150611299b63e4e8b6a3d38c074dd3867f4b66acfa91f903c9fc": { + "height": 48, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "774bb86933d8150611299b63e4e8b6a3d38c074dd3867f4b66acfa91f903c9fc", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "011f7e0806092aea798b88e1ba3d37064c95cbdc3859376f4fd547b13789af4f7f3dd9c986493fd54a79fdacadab767779e6d209111b54c73bfa7e3f6923bee102", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01450fc312565737a0dfc1ac94ab7a38d778a7255233642448f3165b299e0b3710a855cb45156638de7c95c09b3c7d629813414fba8c944bec5a0cfcfa5753d90e", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01ccd464f69ef99cf714011427c62ceeb2e5ec46b03ca26b13298117377a9c5b17c79c498d44572cf4f911ee196a6560ec7d4652808268a92bc7fab706939f5d07", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0178ad515b357cc1475759362c718fda98c774c962eeefb41147518193575f852cd62b48e3e4b5186db55b0a5db52f2502bd84f9f9007ba6f02e520ab67fcc130c", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01353b0a9e972ea1c5c644966a30d07c89ad11395ddd0a2dbe008c1fee8d6a8803a1cb66e50e90eb6fb774aebff644f49d9b28eb3c7958b66fc522ba5fec172e04" + } + } + }, + "deploy_hashes": [] + }, + "2d890cf5edce165cf1362bc54bb8e4b14e9a3a42f412980d9cf5f688ab7ed431": { + "height": 87, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "2d890cf5edce165cf1362bc54bb8e4b14e9a3a42f412980d9cf5f688ab7ed431", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "018acfc82599e1947af1f45403226cfc3e27d11243bf372ef4ed99290bab7de2e2719f0df7427c73cc76c67f96279503f7a2cf28643d5907dff3c55369e96e1103", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01fbde4d5d3d57828919d518de6cab2650860a4aee8c651ea841e280233014c56a9bd01b43e2b7b04e8a6daf6b9994ca24b699a0feb62365263a51d7b6a3c6570b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01177c37af6d7db73303f6b4bcb0f7cabb801e6e83520eba88511fc82dc92d28fb2e99af7f5787bf4131603ef92ef09806e96c710e322572566e33aae5e4706502", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "012c9cf741bca2447c8b31bae1f23c12d354c46bcde06114d7b5dfb138dfe497bf32aefe6547835c3bf5ab34203828433d3c49ed822ea8cab64f738fe298d51d0f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01e788ff68add1ec4ba30ad2473a1e1914f49145069baf70b727ab3d85f1870196fc61347d4e763d41dbc4e84f958f7343149704b9c022893d4e043a51e536a30e" + } + } + }, + "deploy_hashes": [] + }, + "a7099f4d3a5735ec54e833b0dab9d2f38e02bfbbfd8bbf00d3d4f59bd62e1ba3": { + "height": 50, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "a7099f4d3a5735ec54e833b0dab9d2f38e02bfbbfd8bbf00d3d4f59bd62e1ba3", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01600ce47d211a19cd35d543e960351f55c36cea88d414500da0aebeefec27a829049cac0c52aae955b9d44dc8d94c3f984b379c06ff4365a062c96bf269238104", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01ba1784f771ef81b9d5240291e4ac119e080d779b1c1a6298405b26a30bac98330c08ebded884d38abf969fd9c31d4a85d9b0fce19a244bf68b3d9d8693cf3d0f", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01b18d572138b3617b3109d4a0023e8cd9f8f21b565daefcdc8a4b4f628a528af78543f46030e617510f8d922d5031d9b93574c0bacaec8929b1b0c8d3c5006a0d", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01100026f933d94a640238e4fd855ff863c3a38a7e2641deac6684aad30a5c3152236cd8a8111c310ac412a22dbcb62e02e24343b7567da67aadaf74a360d62f02", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0118037852b59d1ec371b72d76cd8c5cdad48eb80c129da9b5a318a6ab8016d10db39d4d5348280bc2edab04562c5c900f316ddd60cf9a8a5863087e6054fc5c05" + } + } + }, + "deploy_hashes": [] + }, + "cac38102cd15c1e21fea483838849d63d8b3510045f759a6b8a020e1286b1416": { + "height": 79, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "cac38102cd15c1e21fea483838849d63d8b3510045f759a6b8a020e1286b1416", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01853926a0fb42bcb00a01e4df9e7584e1db64363a4ef6d859e3a5d073e9164f819667201f920e9ecb39daf9b05ccb41ce6a829b12d50d800a042e0d1dc718f402", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01d881f0de6381836da827f110f01b7a17c34a62c588abb0c066fbfc687a5704418d778fb37527fd9bfa73ba76dca33f5f5ba966b087d340a9a792ed88182fe005", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01ca0ca0c7f0dade288f21449ab4bf242e227cd8165f60c21949198f8142d98284240d6134f26cdba9dde60aee3c3a017f71470fd0632f8283e25de82cbd3fa108", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "011a92c7a2c0daf8b15a71cfc8adc0401a5b071df843fd725a5bf66051cf02e3c8df4b60ae56dc091a2049e1a8f9e8cd51116f851264109efcc081c9d9b06bc400", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01940c4473f9845daff31c33ac9c3165110cecff7d58911816faa3643f2965f9e31f79995b475c501d69cae782bdfd1f1ed66b51ff64188de83a4fd3213a877e00" + } + } + }, + "deploy_hashes": [] + }, + "8813bc48aab46912eb829f5d8c4d0b1803c3be64e2fe6feab5d7b5a7c10b03fa": { + "height": 42, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "8813bc48aab46912eb829f5d8c4d0b1803c3be64e2fe6feab5d7b5a7c10b03fa", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01c168ebc69fa7514b8c206ac889ba92877fe4223f1665360815dcca4ae958bb488bcc43d2401829555a0eee623732add264bdd36ce0c6356b492c024c3d64b905", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01ce66ce24e35a47e834f780f8cd1223bc1929e48bac474efc4a6c12adbbaa401656f7e8ed2df1806220a464979ac5c9b7fc4e4e2494d595b666941a2a40003b04", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01bb83f0fb82cda0243c0bad4e517dbaf97502e482eeb38bbd5655e71508983dc55078a7bd1738c342c20fe41932ee0e352a95de4923b87a5648ab823c5cd0a106", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01ea32550e0f59532718629577bac54e50e23124dc128dd30b93ce18cb13add5ba55c483b2bb56bb34aeded9c8f777451cc563d56d71370ef728180cad11cc5705", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "019d1e480814b691626f18759e9db54b66e594d2b6ad1d3870b6b1e487877e5d9426b87f68bb4d86e258f6decf30c32fb8896051c716726120a59a4e45656b0906" + } + } + }, + "deploy_hashes": [] + }, + "945da24b9d3ea9dcc6272a6b4bb5902f3d420950a24bab1cb8bfd83a2bb04431": { + "height": 38, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "945da24b9d3ea9dcc6272a6b4bb5902f3d420950a24bab1cb8bfd83a2bb04431", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01fc1b2e5e932457f0442aa46a1ab1217ae6f8a9d67526276cfd22515960b94e696a9be191fe41aca5f4e939c7c7ab9201ce09457d077da8bfd75bbd9b0d01e102", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01cee51ed263f70fb33d95490cd51838bc049c215c83067fa95bb345f0ffc1a0b2774ba4ed7c9f68e69010702f4007766b12a7fcb556acbf2fa2dcad165da6f608", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01f4d6df04b1c154cc18213ad42e0e10990b0ddbecc100673f07ed0b09a27ae84e88d5d1b2b223bc6bd3ccb779d1c6096bc837f8ac40522c4773bbe916bc4b870f", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01482bd61421bd6a6728a3197c70de900ce34915796f515ac2782a41c6026bd9ade2d0ee9b8e3a48cc395236f48f043260dd2541ad1079be63aa557d639af0490b", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "012b1cdbea5b8c9325218264e82c6883aa18724de24b4c87fcbce8a34a240520db70e3d388b39cf01de6bd1511680a5f19ddf47bb2cad440b0cdd9adbda98ca907" + } + } + }, + "deploy_hashes": [] + }, + "f620ad616351b9dcb82ca5fb430db0d11aed4328e6be83382b27ff5157500b78": { + "height": 64, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "f620ad616351b9dcb82ca5fb430db0d11aed4328e6be83382b27ff5157500b78", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01138febb7d549d35448dfa542d7aec5da50ca025b9f4f503e875198ce097b67d3c1e43877fde83e74a50c6097c3e7e6eba4dcc6a60104cc9f998f8b2c2a557f07", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01d4982bc2d7ab129dd6192d6ea54292b1c783f08f46fae7c3c4a75344f220113693a44395e6d1ac76c2c41a5ff4f78084e94d5419413f8494eae22402afa6f503", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01ec9ee13ff765d09d6c490c2e52959a94f5b02398303610851e60205b3d07dee8c61e4f907ed5e7fe091fc4c0b557c2484e77dbce68d4d9a164d6ac239068ec08", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01f7961cef7d6c84414783e36e80e11c1944667b9b9eb68962203db212ecca702367e3a1554d450cbfe40b9fec97dfb8698d658f6cd20b6e792576ff2cd197b80d", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "014219be42f2b95b5110c6aa038a70b885fec984f5ad6ffc2a2611571c6685b46fcea00c3f2868492aa73b2e6b6129ddf84ea1f8dab889d69e433f888c7faeb10f" + } + } + }, + "deploy_hashes": [] + }, + "0665324472c593720b2a5cb9c81f2ec6a40401a7c22396addb1ddca4a8d56292": { + "height": 37, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "0665324472c593720b2a5cb9c81f2ec6a40401a7c22396addb1ddca4a8d56292", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01eff1532f2df9481408adf43453e5bb88823eb8f297a6b35001099225cd95035a18fcbe5e0508d6829245d036137edf8944465ae7803b772a741ae91876033c03", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01d94cb2df59468ded1a195e71ef8f024c0d783fa16e81e595f7e3c2476e29920159f549774ced5fec64cc1d015f832fc5d6a5cc963a5fc4d2cb5012a8313af204", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0166d01aa11620ec909866f23f326c87104253c4e63a639ae174ae1aab3d1957bb3bff80dabd28f0e5259cd683fd4572bc71f7075fb49d621af04002bf6bf10d01", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01145e7bc9d3338f4cf8ee44043e0f3882fa5ef7dc9314d020ac032011a5dd578eec1414f0e1651c5cf752f1e04360bff3db8bf237c51a99622800941bafc4f60f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "011d699445ef39cba40f288b4a90beea58b45d9320123abaf275f844393eb6c1a0123f75b55fa2a769d9faf39f99cb245cb96402221aa743f9d538195d8b78160f" + } + } + }, + "deploy_hashes": [] + }, + "b9cf69f0fa345fcc557db5d8db68268f2f6d35101c61f1098318fb26286d6c7c": { + "height": 90, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "b9cf69f0fa345fcc557db5d8db68268f2f6d35101c61f1098318fb26286d6c7c", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01e2c28c8dcbb67c069370425d841593721d366f12193eef17b99bc4d34f151cb19b00079da647e040735bb08877636b2c960357c012a7806ac587ac79688fbd08", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01fb5b99658b031151fac010eeedabb53a1c1420832e536ce91d8b61ab34a237f764b90e6e51944de94c3c4a98591268997a53bbc85968bc14289ad5d9994e2e0b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01057a3cd15979ebb7bb7d634e5ccacde8ddb4981ea2681754caf37ff045d97035220b5f3770db73b3a9fbf1f2e295a84ec5dac308f331bae6bbaed0ecfbff6407", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01dff3ef0b5d35b93efb64cdc90389be98731b23bbfb35b31b95c9d2f97f729599731bbe1cfb34c1bbc16dc3acd9cc0a97c2eb9b0936ffa77bbaaa77fe4bb16008", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01c3037327b46b9468a95494ad1a4b6d18f35021e4c908751a37cfb5fcea6b93048e875affc68338abb75f9ae83767d719505cc26bf7e385c8f430101f03214f01" + } + } + }, + "deploy_hashes": [] + }, + "7408c61539684ee7746e96dbb9f16345034152e8c651a73b04f26b9f2d360d15": { + "height": 72, + "era": 8, + "approvals_hashes": [ + "033d6b795ef871489d1a18d3ca2b74c41e7db65f6940bb9dd65a9257f6a0b2b2", + "1b6c898189cb2b53ee452bd6d3e92bc744b4580ac9ea0eb5b4be28c7a9c3468c", + "f7e95f9c1f0736ec473d4cb0a274bf95f711f380465709ccf756371f17a05f86", + "7d55ce986588c8b189853b25db367464a02e1b79e32c67a92fdbeacbfcf7ec6e", + "80b04ad30cf4fdf9ac2a3e90bb10ca02330623f1d43a5738a77251b78d6b7e8f", + "97b64e2b22a94cabb6cd2da9d18e960b465bb2aeef5cceee6732297fc835fc35", + "57e568c46020c01e47d75d35fd5be5bf4679017e390354f462ec6fb4f38df535", + "45e94f63fa6d799022387d572ec1111e8634e795a795c15e21b4f86386f5d4de", + "2292a74ef35c7047ba9dd5b8ff330636015625dcf9463e4ce13e7e7de87504c2", + "b256b2aca6af557b5738a8f52955667533ab6e3052b764604c45d223f933bde8", + "e3bb473e8848ee0ea8bced89e4e0856c9b0d39394c93868550c6035fb2e65092", + "93ea21227094da28071acc87909e99a5207cc9813b50e501f0d7768f492b8d36", + "c422832b16c800b9675ad5e5d50d78b2a53cc10b83cd22420583fada372a363e", + "2bb82ec1f0e804cfe500b1636592101a037cae9b041fe4ff21027bb7db0fb581", + "e56caeaf7e9ef61863344697c846a27f65adefb78e06ceaf9e173573fde12469", + "3ab38de4ec925f81a74028ff4772017138cbab0184f8d85b7a932be5c174d182", + "1a9003c6c109f152b878ed6ff6ad6c6d6c0f1ced6d28d2e77711ee8d9b244f2b", + "ca0ada18b435d0be2aaec004a049d09b3f7c51b18a840f75dcf50a5cab007c47", + "35624839bfa79935fdf31135bda492c149950d692c5fd04b247e0870b121e569", + "fbe260ce449087a7d19074be566722bd980504bb810b59c9b1edb7292b9bf8d6" + ], + "signatures": { + "V1": { + "block_hash": "7408c61539684ee7746e96dbb9f16345034152e8c651a73b04f26b9f2d360d15", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01c4ad89a993e09c8e673098d63ac8ab280c71ee5a47247fa422c7696788ce26bdb781f3f9034978ff97845363efbe80ece80d37d19a4615b431015715d4088307", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01de890ce9f6248fa04bbc6ff75ed76fd75cfa33da54a423a985d8fb437dc7c1513cc0b07c1832c81dc08538a1bcef31be87d0590d1267b7866a0f087aa9c9fa0c", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01847e68664fa01e189075e8753d76c96fd998ae9c2bca94bdde83ac24c400ba75d7d75e3e9eecd14443bacafaae57de873f17cd19cc63cf1c2519942b6893d80d", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01813c754eb967477728d33535f423516f88ea2bd5e4b63b215ba35a9e1111b38013616dce39acb9377c2b6d739c92067b70ce28dff54ce9245cd90b058364fa00", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "019d74e99365f3888cdf80ae1c8123afae2c8623b412eed9d6f7127c20a4cbc7d1d918efc8f32c65e5ed93133468b410baee70d72d00a33dd070fbd1d44c16bc01" + } + } + }, + "deploy_hashes": [ + "9ab82d1a1c70f7ae053c6c45157d40c6ac0d93d087d4f574d35edbe39dd483b0", + "b87c10c4ba0846b615f77ee42f50a10279a9ed4d810671623952aa8d81988234", + "a397110248532ae6749617a4bf1f26b3a0130ff03c97d3489d1d66163c169857", + "3306c637dcb597811449816c39a4d8293ce21edc60310b56cdabeaa6ba252bf8", + "159ffefef5d88b2cb99fcceb471a3d2f0c2910c7c9de2db2c1e9845b60766224", + "58223e9b91d3ec7b73453217adbff91ccc2d34d514105bf9889599d1e9122b21", + "ed4ce8a502ef432f8d66be72afd80fe1415f15e710e39d2b63971a6f737c2c4c", + "abf88aa99476275ca657eaec26264c8b441fc5e6a8682f234e73d63163ed2415", + "35216eb0d783e53accf6e74a118c85e9674c856c04d22330cbd8497913741507", + "888c0d7a5988cb47eb83e1e7c4fa58a8d229b58b61cf932cc8f0622c864e707c", + "2651cb3fb060926d9f5edadfec7a41c7ff71f29f9fb9548f1c3624f5351e9a6b", + "4821b3e2a53f31ccbe3645a8fc17aab6e6ec9e7dc38fbfbbc2e96194da443181", + "1c98f5f335f0cf980192d56548a80ffb7b8d8c30e60f82765bc1a7ed9b41542a", + "5eb79399b62d08320e321d73ceb50523327d26149c6728a8d8a1ff33dabe722a", + "d6926f2291b62c998cfd73ffc154d1e8259dbc3daa101fdaa24752e65d85027d", + "ab8bc98911a606aade80c3e24b854d971d52d9e20a12744e0f97dcf95fea9ac9", + "607559135a1b9c446b53446b3cff100b3ff0f6efb474019a09562b83b833bd1c", + "ebb11e7fb132d580752254a1166bcf9fe2569f4952c97227de8847eb456c094c", + "ece0da05da8a349d5fcf89552c24dea8f17f876697e272b2175790a11b0d8521", + "e5f4a0ec09d7918c9871de663a677c2ad6bb88141e3825a5a8e57b4956a62d3a" + ] + }, + "36f0f62d44b2471a6e2cfd45c0aa1d7eba737857a09735cee3252c65cc83217a": { + "height": 57, + "era": 6, + "approvals_hashes": [ + "8311310da1eeed47708ec3b218eefe82627dca8ca04255d13d99c494805c9b96", + "10d4be0f65ac1ae7682c43ab9f873e6f6f9199c240746e7c61c7451b633dd678", + "a2a6a958006556addd9fd4bf15d6c04bb0de083bfa2acecf9aeb3e700a767d0b", + "0cdd390c6c003ea0e9e0266a08adfc789c91ebd948324500b60399c3efd255b0", + "9423d0418543b7942d9e27a3696c775363edd18ace6f65dbce465e869f1edc3a", + "2a2da83f3fb70d8f332e707602bad7e761629b9a1c4f88b5b3aed5a41b55c3ee", + "8d2fb469089235c04cc11987aec5cc9a6c60e53d81a91b7ecfd9ee6e6b257869" + ], + "signatures": { + "V1": { + "block_hash": "36f0f62d44b2471a6e2cfd45c0aa1d7eba737857a09735cee3252c65cc83217a", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01a79f9284083f38c817015c8b6be19c621a424ae128a84d4ca795a94f019db74a7c1c656cf05881a5751f23a8bf98284b82c21d099608e2337b8529d0bee3e30b", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01c9bc50cd5bbbf9827a859ca1726af5f417dd03243971a6e2747f789a5149552eaefdcd8fc31ee9539bdea8df6a11a035af5e4a92b08cb5063db17f7c96fb2906", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0134b3d37a02fd2f5d8071be3c7f96c4d9ed701c5b36b1f1bbd3c1ddcac5470c3d4eb53b49695ddc5e05e9e7f7177a183167b7795f565e5b2b69f29e71edecb006", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0129b3ae0d98a771adba2d71b5289683cd07b032d5ea63962598eb5b46924cb9c40687553edf024f9cb0da7b7ce1c97a70224fc404d814523e1b8191f6001da10f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0177c18ff2a970d84e16d26b1310f4331fec9ce0adfeeb9151851bfde2a78e08f8e7f520031daab513ecdd57f53203c0a0e393190e01999dac20171b0c48ba1f0b" + } + } + }, + "deploy_hashes": [ + "2af0f120ccd04bead484b6053d87cb41930e650cfb8f2b7c2ba37c5f3f5de701", + "aa2ae8d13b0087c762dfc34cc5b3d0a30d04382a2fc707f259225915b342c09f", + "2a6c3ec008d34e9fc614bc6c264af5e3834a847d5690211a3393ffdfa4206a1b", + "975cd4ddf71c0fe4c81439e9b4fc09ace17fd644efd455b995c3445403b18cba", + "faaff0d5bb9e1b68616a7225b37840f87bba48b2257607033a96fccaa4e0401c", + "f1c728bb6a8217bcf4c41d0d415d08ffd043f2b15de2c949ddad4e072cf56c57", + "6942cff9e560c7d0864b812f30bea56e59e26be5136bc188197e1916fb3fab9d" + ] + }, + "29936b2e3e9920b624bb1539840949fcfb21e27bf347f0433d73861da6f9d608": { + "height": 81, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "29936b2e3e9920b624bb1539840949fcfb21e27bf347f0433d73861da6f9d608", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "018ef77cb741d610411cf2e1d54ec63c63adc87449557c3adc200812e8d263c5b550a928dacbbad43738b0847d1acffcbafa75bc2ba352d2fee3a431d6e947eb00", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "018382f4d0d690c2efd8a6178cd35c1c60beec2a9a8e9acd1b296e90fc9c15f8cc26e6e9864bd5c1bb74533b6969876274dab437183a7640ec2eb0ac7776abc108", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "015c86bf7799ca7ed974fabcaba50a0a210fb393d09a0e259a82458adbb1c50e1450abf4483255e55b1850c2a5734263d7db68228903e8560a634483e4641aed01", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "010a1709e850f9878452e17427780a884ff4203a217d91c8e2b1d4dac5467eda84048433bf61c8094f747a62a219572308cc1cafd93ec13ef462e6711ae99ee600", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0137567bf30d37f7cfa272df1441867a714e1a1eb63e6a360a7db59e4e95b7ca37cd3343f20006364279f2942559bdb4448c161752b05c07e6806730dc9b2d3c06" + } + } + }, + "deploy_hashes": [] + }, + "c06cad6478773686dd1fb7d9658f9e91fadfa222464d0403ddea9f6430f93146": { + "height": 56, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "c06cad6478773686dd1fb7d9658f9e91fadfa222464d0403ddea9f6430f93146", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0107c592e78c968908a5cd06395b0c2f4d866b72221f3aa301695d215c66e9a9970cfe4b6bda6b918d9771ebaee0dc57577dd6b6e58af77fab78bd718636b2bd0a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01a849ed4d48ae86b377c419117d2db755be4f84fde5a0659cd925049f504b5180bb67a29eac411b2406a7ed95016bf46da619719dad187d2d0894593687bdab0e", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "018381440af0fcc446d7bf2edd6f059b9ae107c344b0c4e2f4c8df509a353f0363c91ec8607ed813a9cd4d03c5b14e16bd4788d008f04546713f70578898ef8904", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "013e79ba6d2de29b15a021ddc4f7eedea643d12a088cefb60e8b6bcad8fa7fc7da39264799efd94764aeb5d039f62d015a2c702a973c91d48c96da605c21a1900f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01dd0c290c1c8a973856a80360e4913a027daca1bee0f6f58fd2049c9b4fa9ffd323f81a891701a1c5961f38a789ef049310e99593de0d5a6954a065d1e072ad00" + } + } + }, + "deploy_hashes": [] + }, + "81230f3a7a613e48bcbc2b0c4edfb4681e03eee92b096498452a13c0bdc8abfd": { + "height": 63, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "81230f3a7a613e48bcbc2b0c4edfb4681e03eee92b096498452a13c0bdc8abfd", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01effc516754793d6a5c005a8174b127d7efdf49c9d3eb20996a5271a29284e7769096a8343e4e4d3004540c75b18332910c51e8a7b0c82ca8862175144ad98307", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "013035aa4e7638186e8bead9684406b51bbb85964c1666a11de3a6bdc72cde5494b4c10a94a06d94a5b3e4f8ebafd5c9363deee7909e5101e47f32cddde295b500", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01f34639e11f292ab50f4aa54760771dee89868c6a52cee726d5dc2f936ce77375276647dceefa45ad01dfd747541dab17bcf04114f1c03047443e8e167f66e005", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01b24a0876a2397467050e3d94ad046c40513237ec1dd73680dd8628773fcb07b2b1f5162b0451a6a5bae93dd9b068e4733098efad3c68fa6099d85e1e1f37e50a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0178d98ec1b7133e4a330744368f46864e14362f48abe874982017113df6254e0252fb12216820e4b895f200546e25d545d5361fae2081d29ab5a33a445ed53101" + } + } + }, + "deploy_hashes": [] + }, + "f3643655bbbcd415f479d538dd91b44cc22f2bdfa5ad46dac3b73f825fe80891": { + "height": 55, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "f3643655bbbcd415f479d538dd91b44cc22f2bdfa5ad46dac3b73f825fe80891", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "018fed3db8814f9995c64246d0ce76d48c588c13e7278f08ce3a389c954aa039b751f4680f822c2bb33b3ebc1629c417f77a24183c290e378ec0dc983c619a060f", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01d8e641443bc14227e9113f8cb70406e42375f1a43acb324252742a9acd04aa08f825f4da9df9c2dc3d0a4ca81c9bdbb632cfe7211e147666e16542df824a3006", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "011a8608ace69f02910d465277af8567620b7041f72a1e7f9549493b2e7cfa3b78cbafe8fddd01d210021b3a992c07a1050b6f970db7d1a9ef56915b52cb3f8c0a", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01b3a798b231ee2ca7088e73b09dfdb8373b50fc82299d5cf84c56a3ff8838ee5a35e97c874c5b717f5cf6c264f89f45c2a28e75d19788e235dda343536f3ea509", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01badf5a3f14073def89b4b9bbc7f84082eaf40cac5cd004321962485e6050d3241a87eb85650de8cdaa9d02cbfc8a126728738638a10c8c640209c0590102d706" + } + } + }, + "deploy_hashes": [] + }, + "dc27efe32e93fa37926307465eaaecf460ab5d286f5f891995055b661619fa7f": { + "height": 82, + "era": 9, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "dc27efe32e93fa37926307465eaaecf460ab5d286f5f891995055b661619fa7f", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01c2f572ada6b53c50a439ee9ddb5ae17100af1eb676e7a35f940bd2f25b0dd8b00e715c8db34ff573d067d6f446e552faa15895be3117337b9047fd4e0b4d2901", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0163481871dc8147578d9c2ebafc2cfd69cd908b8c820dc9a1d2a318e55df7917cac5c855eaced0c72f1bca45aa45faca557395494c404d399fc1ac84303ff420a", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "013afb0d0731d39e793433e949478dcfd88eddf64410183cb5703895cec51f59015c4103e8f4ca8da1a8771e0e5bf62c8dcdb82d12a50ab016c14c5e0c3670f80b", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0113f14644afc884a7b9149c7b83b3f69efb4c557bbc34140bcac7396768a81caa4b53fb3d483a8692e7d230ee1505a2d7984510d1b1eba3322e83ae4bbbc8cc0f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01752cf4c4f4c2d1688b10727229575bba62ca400b6c56994d06c77c8b3a66190c1bf6356a2c0c3609b85ff419b839c3997287b7077e86215fe09fa158a5bf3501" + } + } + }, + "deploy_hashes": [] + }, + "50e9cffa88931fb44296172cbe2d2989a94ec2d5b2cbb80ab7364d4ce6ffd0a6": { + "height": 94, + "era": 10, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "50e9cffa88931fb44296172cbe2d2989a94ec2d5b2cbb80ab7364d4ce6ffd0a6", + "era_id": 10, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01b1af9625ea93e953a48a2223dac3c9f5c014037c22d8de356a0f4337c325afa838614f54b575210602f01a08a4e87b52117f64fe9cb540fecb839dda05764101", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "018c5c0b7d36458ec0e4b6b7677104c91ddb6e29efb3d9c2df0ce945d835269f9635e6cbf8661fdd0332a5e085fcd56b092ed81ea03bc66ba761397d3fb14e0a08", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01ab069545602645cab96be41e6bf0d6925a209caee37cab1025f7cf9befe19cd2f676c14671fd2a230895cc23eb712acfbe04d6c94f27431915a0c8528f2a4402", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01a7082345c330f2a6b19b5f13f9e3ba18a00de5363c589ab3cd17512b9881cb160c6396d8eacb4fb8c8d000f2d8c7cea802cea79cbe2a057dcb76e0f60ec9940c", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0143ff53b152977e00cc68ca3f1710d112c691295e28dd02fb025019a1a50bc4d0fc0723617085544d67d58153933dae9d02eb85e0f453065ca926c8d91f2edb0e" + } + } + }, + "deploy_hashes": [] + }, + "18a51771a0a694e7602c53b4510c4b63b63086c926256a635f84e4ef017f800e": { + "height": 85, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "18a51771a0a694e7602c53b4510c4b63b63086c926256a635f84e4ef017f800e", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "015038be26fff75bb8725e496f7de8d2c1f04e697f3e9fa9427df75f631574340c36ef605dc9cf1d27aafcdf42e9b5a9e03719a6ae86b3a6151d905988b1e61b04", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01c554f533d5ef7e9ba1432ef3350783f9e709a4c4414aac6a3f23d9ccf51f8893e4373147e54cf27619792d8dbc1403aafe12f3d8c06b821bfa7d9a94a12a8706", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "017db5d39ae7841c10f510cffb2945f9850287c30f3515f81fb49cb1488f3378cceb127af16c8f29d1e115521aa942f1eee0522dbd884c34ea9c95fba63d40520e", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "018a27a18d0020ee7ee8479d2964886dc4509cd4f3f7704b847d403c9c63d1fae4c78851f26b1e89e2f6c2222dd6c5c2fe62236d8063ddd065899e63dd5b93a10b", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0116b448374a553ad4223c18a2cdb7f65905e7917e28c3c90cd53c8dacd330f6da62d0f9b8d9a86f15e1a1c0fa819ee849e6c5de3beb33ceea7f973b505c56480e" + } + } + }, + "deploy_hashes": [] + }, + "329b31a78a7df60a673962a768736c58f32df95a8d15c3ed64cfde1fd4d0c2d9": { + "height": 77, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "329b31a78a7df60a673962a768736c58f32df95a8d15c3ed64cfde1fd4d0c2d9", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01268e33fc72093ef389abe40d03173f535fb2dfd7c2421f276e6ed79ef7fbc76b9f305f789175ca049c39ce2e8a27e0972858120e84ef9b3633df6681d329b70c", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01a01bb0ac13f5134863c1fd170dee15f712f5289e3f16ea33baba305a246aace391cc6cb3980282a09a464724302d8973ec00efd53bd623377ce793be21bb340c", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "012a931e60a1d7753eabe8ec2f60ee83606aff2892dd52dc1b756e093a54631a99484e6d9a45c72d0669528b1b84af123d2040b6ce79988433dfd74b14c033f103", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01f7645dbd75afccff1583fad3f534105d62ad2766d87ff3fc574994b5f16f4726ac851566545bde8db635fd5b4a86bb01a7f0e19123ef961e8082350ea3954b05", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01c6cc33a7555c4aa29fbd5a110a3ac238f2beee5f047a91a19c484a48ecd2fdb6b70370ab863c39be8474578d7a747bde4ace519f8a0f4dc824b2dd7c6dc8fd05" + } + } + }, + "deploy_hashes": [] + }, + "8846f414cfecd61e3a405cbf1aece1fe1c98fb55dbaa33480d2222158254bbdd": { + "height": 59, + "era": 6, + "approvals_hashes": [ + "01b558bc601689ca51ebcfd32613794e08df5e7b15697c27aac2ba22ee9344ce", + "ea07919ed117b48a625f6b2139432eb08602f98755e7e9a2c5b98a0373674da0", + "3bb65a1cd0eefd75ee0ec864ad65d2bb6844391b645812887779f680a2097694" + ], + "signatures": { + "V1": { + "block_hash": "8846f414cfecd61e3a405cbf1aece1fe1c98fb55dbaa33480d2222158254bbdd", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "014ded5a73f5a4ffc0ba7d074fd40a768ff3b6cad345b5c588b2811536d5cc653ea3964cbe8f281c4d99dedfcf1dcab08de4ebb98046b4011b941cdd4aa70a7702", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01638a6ca05e18e467e070a652d8220835be3a3f61c1f06e40d316ebd97c3b59ca612264b26617ece9f0f00aa73ae2e31e0cc4e09ebc732cace053ce7cc359490b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "018bcca0710e6298a14eee2b4d997d55d46c8cd8c048662073f3d10dda3ca5123b41c622205bccf42601c47fcd7594ee35d39170d5499b6ce129d953529fed9903", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0143a0c296853f95679687b58a6d6e84e7dec9864428c5f689b0941f0965fd05fc196521d3e20879d156d80cd849d202f8d62c772272e989765290d28036751303", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01678054f97cb18785d59a7b4f5adafa916449e42f30759d90aa70dbb51bd5843d3395baf8cf9796cc4b0423d115d772d1e87b78e4e41340ec3f24fdff0b763607" + } + } + }, + "deploy_hashes": [ + "2533d10d8eaaae38d9b3b8870e17ed535434e4708a84e444c669299e84abde3a", + "4a743f9843488b64a3940da46870977917f6e58e32bf1dbdeec84f8c24a33116", + "2fb450c21452b99bb8a21bc3a71a9e3266f540cde8577f671b857c0edcda2827" + ] + }, + "a62654b2f9a65e0591e38015b617c6fe37e3a50dba474223030d75129d4adaf6": { + "height": 41, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "a62654b2f9a65e0591e38015b617c6fe37e3a50dba474223030d75129d4adaf6", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01efe56d094104ede2a186932c26d733d512313a1e57866732ca4cdc554ef1b5c7b09a869f81ae1ab100f22d5facd4255642dadc0a08da4454b0034f3bcde6b308", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "018067e568f86370ec5cac5b2d229300d5b4629ef3ee3e3347a7cb5f33770ebbeec9a0ce415db388572b99f1f53fc5c256149561e088646b99140358d56e808f0b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "015054092a62bed9e1ea285f35bc310fabf9d3f96c79ce5976a3e3735b83c05bd4fb41e02b80d15c7ff5f50f5cae79c177d067131698cae07310e69b4c9c553f09", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01a1145c92db74488261224c0db0d9427df46206c16e0b1fff899aac7b5125b4dce0e716e8aa4a6c6042feb3674ccf4f081318ca7488e2cda456c32d03ae11410d", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "011821e258815fe27b80d20c86ad666f9d1b0b6aedd5ccef1d94048b7a530b552e30e0b7497ec18ae0f1738602392be9d2f6f1128c2b0006309ce2f340eff2ad0b" + } + } + }, + "deploy_hashes": [] + }, + "18c1d3267d5cd2f311a501760704f262a12996a589623f7f73903ea28bc3b21f": { + "height": 35, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "18c1d3267d5cd2f311a501760704f262a12996a589623f7f73903ea28bc3b21f", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01b9eb76556d4919712a797c40dbb42446622cd6f10ee55af8d2f06e41fa72176bb60d32489dce6512586382d6ea39aa3d4fcef0570e63eb29db57d60e95acf309", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01aeb94eb2d7c54e7e533c89de372f97016dbdf6bae5d6baa3c19ce065f2ef353bedced4c0e5263e0c636b7aa2ac8a06a5402676830aa9683bf9200793c420bd03", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "013f8b80fa31c7a3db73852912ac31188c5660a327d6a370e2db6ee70fd569b56a3511178ac54b597b2cca47e5b268f449c7b84d19f186e7bc13909f0e305e4f06", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01f375e17bdda7fb9125c3a0d768f88846debf1af07e9ef2466ee94cf9db5e0af088d41a08cb6970e55d1a7c8167fa63ee12edea54c59fa8be9ad04e9b11c8ca07", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01b97099e25cb2df1caf813236a7ae4d1dc64f1487ffbfa696fe7c1bf663001bda78e127df1f3b1838c0e73715b72f64f9152021a98d645bc297fb6442e45df101" + } + } + }, + "deploy_hashes": [] + }, + "f517a95ec6050156a8537bc4ab6b2b2a115dbf8583d92bff986a94ae6cb2ff09": { + "height": 83, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "f517a95ec6050156a8537bc4ab6b2b2a115dbf8583d92bff986a94ae6cb2ff09", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0156cca848f711896442a5e1a46526b0b0015f4edf74d3e6c332b3df999b98db476dfde8e3fc0dc8e0d66eb8aa22855c07b76b05c37980c743071c7c711445fd09", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01bda830c252d5ecf66653a5dd9a7f2a5fcf6e6778adabf66d106d302a109682898649dd9bb64c54fc6669392bfbbdc641921b2fe48c38f483c84de22892f78d0c", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0149bbc7f26f0e5a76b0cd21131d3945c81e3fe8332b6279a0fa4d7e6d3f53ba2390f4a509dbf31b65a1876da9e0ac29a4e73cba637b24353934f026eae9d7fa04", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "015b962326367032127bd44a645fc0ca36307399c5fc433e7b386782170aaff64680133312cd870146a626643fed8a29ee3a6f814b9938ccf67ddf499adc51ca07", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01d3c8f430aacda4738b6d785ca0e83a8dfaa9ad7c940bbc8bd52938f0caf2629316581cf7c72e5499f160f9a33883d7d43258893a2b43abd0882a272e04ef160a" + } + } + }, + "deploy_hashes": [] + }, + "1a9bf2c6bcb799136040ee0dafd44b5eb126220e098161f82641aeb6ae85461e": { + "height": 66, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "1a9bf2c6bcb799136040ee0dafd44b5eb126220e098161f82641aeb6ae85461e", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01cf50f3a4e5ec2b55ff33c893669b243c7127208c30396cbefa7303b7eb46f466d9b282f3bd4d55c2a65acd5242eeadb35cea9c23adfb750a162631cfe293c50d", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01b3d4d4e7c7e52d8869d0ff74952621b665beebf7ad9e7783dfbcf0c24f586de55ede57086afd2551f60b0baba2b7d8b8cd06134bf12f464d592eda6c6b69bc03", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01e85321fdb3f9f38ce4b60e1f57d737ebaa608b7055d109a6556bcd0543f8ed828e68231d2fa178ce01b4e6ba5b72704ab486e0c4f3ef70f87eae337549db6c03", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01e4613eb3e6923f89c4e405145ce388320303acfa708813038f1a68c2ceeeba20138a3c3c2014243970b8ec88d265c30d459a8677fd14c338e7bb365ec7acf70d", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01cfb8a99e19d240745dcf9f27e8e0e2c35df90508b50d827165cf188e78b21a49694db6216eb438334686f803c2f4fd45ca64fd0edcd10e8a8f410bdfd11dcf01" + } + } + }, + "deploy_hashes": [] + }, + "b77cbbd252fa04e3685e3b22331670fcbe2215bd1dba06e2e49d9d9e4f055557": { + "height": 40, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "b77cbbd252fa04e3685e3b22331670fcbe2215bd1dba06e2e49d9d9e4f055557", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01c3ae7049e70e00ad85842d10b4b38e5da8abac966bf429cd8f8bed8a567b6f2a54668db968a4d0aa734e57d660341a68b14ecb482f4628d8c2121c432c41e909", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "018d6291632d14c7b0a1c55b9f5a41c99125d2437828bf36e1805b5247dad90a312e3aab3045393d1d5861a028461853a90fd96279eb7a09e8e77262a830e5ca0d", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01f195903943eb942244027e30431d523c886aad058edf781294f4860378aea569e7ba3c76df3b46db2439e21c05a7f1eb3e0fe21a54dff09789fea7cc16f2aa09", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "019e56d1429f3151d1eea1f113184e6a254880c14bd28dece3a9c5130dc7e53b867462fc8c948cb1c65749f5fb99d0eade89e37961a4ffd6f48570b872eb3eb80c", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01364daea728bc656324c9251559af5629667645c7b45c0189d653819a837f86ed1e1745622f581c589fadb138032b49fa4365a8c443b0f99695f99c8af552b106" + } + } + }, + "deploy_hashes": [] + }, + "89f42a9616f94d71f1c31589ab89ac38a600ec58fad8713fd3c28b8c1f4e7ec8": { + "height": 74, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "89f42a9616f94d71f1c31589ab89ac38a600ec58fad8713fd3c28b8c1f4e7ec8", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "011e94d090a77c80e296a516664a577558c58242504b77002e049cebacfe0bd9801f54efcee0521d4a8088ec23eb1c55ecc165d9438840bf0ac0d1d711544be603", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01423220f2b56273e6abb6fe5460bde496a7ebf12e998e0bf049d31324d899a5260195db7740a8f7c724ea65cf1114471dc393f380ce7231327e013db74fbb5102", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01050b83336eff034207c4f40164132059755162ed52d3959f573c8c456b66ed70e249a914b98099954034a3ad7373c8688e29f77164341037ece4cbd44fa5cc0b", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "015e98ea39d87ed3e310ad12282b2f59dd61d2668d9a22bdf97b9d58f1a2f15b8764ef0593642b4121602699ae87011398551f0db115e872e8a4c0b02a0c362b03", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01e5fbcd676f616e7b4b7ded48d651346361dd0e7eddad6ea78705d056068d089e5fec73439a075f5ddb26326012163159bb8aa5ae7bc12f89ff05875604b25f0a" + } + } + }, + "deploy_hashes": [] + }, + "96607840ea4a72f4cc942fec7a10a902e19ae1bf2513acced2787e972eb209f4": { + "height": 93, + "era": 10, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "96607840ea4a72f4cc942fec7a10a902e19ae1bf2513acced2787e972eb209f4", + "era_id": 10, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0172af335870196156ad1bfff98728b613bdb1c09a37a06ebacdf5fcce47bd763298bb0449141641793184418263c0d5c186e9f060bf897939edbf7e8860c3fe06", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01e0bdaa0d179740660606aa662eeabfbc5024ff5cac6b3a8df1bb66133fc6ab722ee61bfd70b50437de820103f7b0b0b0c0625658de2cd05c7c35d57d4c79bb02", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01fb60bd2cd3c17c2d1f2439b88571024d3ce70dfdd0aaad41ba7a6e5e29601ae555aa9720b865c3269638c009de6ab6b8814bed9a079a9f9b2b762c9c3becc507", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "015a909ae323a4ee11c86946bb760fd3f391a430861a9ed2f4baf97bbce48980f073d51bcd36fbeba42e1df0c12b21c0be0b98dfae00d7241ad1991205df474f06", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01d7ad431bee6078c51a13c278ffeff0118007c7c65d15a5db2d501d563227cac967b4b2ce5b707b16954a0187a5c15a06f975abc5afe78f30100e145fcb77610b" + } + } + }, + "deploy_hashes": [] + }, + "846bda8fbf66660999eebe45678129278147be95efe5d919640ebe0eab6efabc": { + "height": 36, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "846bda8fbf66660999eebe45678129278147be95efe5d919640ebe0eab6efabc", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01fb136aa412c3b925ac255aa80b06d8bb3800bc89c7c1a349cb4b92f2153a9d1bec2d5b2c67be7f4c7fe767be9978801042ca7e0e0f474b700898d9209c83ad00", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0124438488b90c154e048fc88d983ea130b04f56ca9e7153d9eebca7c0745c6f110e1a77cc5198754c35230adb02051caf7692083c8a7390830023d6abc9603201", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0110dbfcf752496964d5b48518e5a135740e53f09aa6bad6ee42ba67771a314ed5ce502ec5275a5aa908cdb68e0a49428928f2bcff517e4c76a4d34df69ba34a05", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "016c4fcda8a74bc271ad1125a409b2e8b285334f1fc201c0a289878043b55ef36871099f9490493e34de35eb501aa377787fff971ba9c393e1c422becfad7b1a01", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "011fc6a840c70999ceb9a0ff937f5b0cd5dbfea15129c086b19fd77b01c28b702a810d6bbaed3b2780c25a3c00bafe6f14ecca250a3947112013438cd68805d800" + } + } + }, + "deploy_hashes": [] + }, + "aa88cfaee3c7993f8d30a0dbb6039d2d009b34725d747b7c639a29de810ab274": { + "height": 44, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "aa88cfaee3c7993f8d30a0dbb6039d2d009b34725d747b7c639a29de810ab274", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "018ac0475e0c87be0df6dcd945ad05ec8bbc57dc2dae6c464e66c760e2bdcc817315b98f14dc254b2890a7ca42dcbd31de1bae0d00187b55807fc9e894b64b860a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01e72790907e1802bb1640bd4630a97d9c92dc8b478cff92e2b9d567b6fd83802e779c4c4d27a340079dc86aa3681e51b95b6428dc39534136fe419e1d8ebea303", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "018a4ddc2606619dc1be1cbf0b00fd30b06ac95e844e6822d1d970160aa0f83eab5b8a751d4ea6c5c4b67460246cf6dab88d878d102a0be81cf905aa0e4bd6b10b", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "017f349109f4e27147947ed81825d88cfd17ca95e9ea98b4eee836edf8502a5c4eb92ac6b29096c3664a31b3089090d0a1e6d2a4fd4f21ef64186fd7496d5b5f0c", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "016d5e2c01638072e25bfa793347fab8595b74f35bb8c8730c26b1ffaa39b1d684b6d0ed42267217b6a4d7b634d0f14db84476b8ad1572eb2fc110ecea6b71710d" + } + } + }, + "deploy_hashes": [] + }, + "6f386926f07ad93d0a9330d20ddea32f22596cb5c6ac96e81fcc32dc164c26a9": { + "height": 73, + "era": 8, + "approvals_hashes": [ + "4997980abd95d62dcfb204e1bdbe897052417bf2608fa4b9558e5979d970aadf", + "c686ca14f7b5e3f161c1e62722adf980983bcef63364bfd8f2990ed510f0ebce", + "5c9387c7727ef45134fbf420c99bdb17dd0020b995589f781101b56e71a6a071" + ], + "signatures": { + "V1": { + "block_hash": "6f386926f07ad93d0a9330d20ddea32f22596cb5c6ac96e81fcc32dc164c26a9", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "018ff5d691f249abbbecbb932e09f5b6875ddab8ffac2784ddf2c268e2a2ea1ceab1e35def2d81293227d91da1ffc2504fc2101711dccc47242c5aabab811dfc04", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01edee50b7f072fc34dd6b337cbc25f31b38ad4b76609ca05d3e0ed464bfcce43eaf9ff5d5685bd20f6be191177fb3ef48ce892c89c16540e67da8face85702f00", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01f37a1537b47a5c258984acf00724d0166b09a7c282ecb5facb355aaab659d1c1d66aee63fb1dab6eb39667462aefbbcff2c037a9069eeee95d710fe603c96c0c", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01dd313aa8cf069742b71a7607052aaaed0eb0c5a1b853caf9d4cb4620eaf6922859c33745f27e206a032cc9b4dd8e58cb767c7fdda1533ead09f22a0e42560f01", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "012f6a17c7ce971c07ce80b3a9f145e90ff25162e1b87b75b9e3f7469dd353cef6ca7b4e7551d1a63453042bbd054306773a67ce3e406204cfec42338d5d7f4a05" + } + } + }, + "deploy_hashes": [ + "3f69510d52aaa21601d82c4d415856c15af50e9b250a04f6f038c427670fdcbf", + "5ae77f177af25fb12cee62c75261249c108151a02d91f4febcf5afee9c4c8140", + "00c5500f389bc2d1cba67c45d9f8132f2bc7fccdc0402ea42b73d76f71961a3e" + ] + }, + "5bcd384899f8d5a0c05a73b8e77e7276084b06a0d18e8b8196bbc79886d36408": { + "height": 69, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "5bcd384899f8d5a0c05a73b8e77e7276084b06a0d18e8b8196bbc79886d36408", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01e5706817fe115d655ce3bdbcc39d3205ab630cb7de6022596156971d9e114990415a957060d43963df17147473ace84263eea39a4b24858967dc0a4bfaa4e105", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01dc75709ba51fb806fc2b5e58af0be5e3c7f324a4dae46a70145eac0e3fe6fda1420ba7f4c8de2d5e9782870e157fd0a663f3cbd465edeb66ce324dd030a9b60e", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01f38c71752cece41707687655c25baa20da9312bbfc975f9006cfba58a1c7133228291347e7e1bc2188eac4332bca8dca7bbe3877bac4efee1d3a485bc59d5c05", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "019aae6cd225ef80c5bda0459cc7537b856b814b7eac74d9751072474534fc9b4179ef740d65b067e2329da274ea1adbb4f29964c83914600a51e6fe0ed5c5f004", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01c88ef5480217a1a3877a1b173867993134174ade09d47689b70903a8f7b373df9e9ccb79867f96db7b6312148fa2a975617cd3249c9da25bbaa8709803d3ea09" + } + } + }, + "deploy_hashes": [] + }, + "7438b956bfcde6a2d7ba7dfc16877f6a317350e6dfec7242c6213988770b1b36": { + "height": 39, + "era": 4, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "7438b956bfcde6a2d7ba7dfc16877f6a317350e6dfec7242c6213988770b1b36", + "era_id": 4, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0109106bc12a79112245151aa5aa8d0f2c9c988cb2dc6d620288c58686e6a523372579b3dac27401e59765e0dc491fc0dedb0e3aa989bb0a2a8f50cb312fbfe507", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "013aac1eb740575433812580497dd9ce79a9d7c88233a75f8bfda086973539a91f47eba7f473c175af5b53945b5c7f717e52604fcd45d9fda02a4f3e381ce7ad07", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01d5644ed7210c70c4f43f48b309928e90142b460baca1cc1c37228120d97ccc5c63b057b75ff58c4bc97ed25358aefa563ac27aa4f390b28f172bce1be66c560a", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "010ea241ab144f62c7d527fc61bd95e32a2c3ab0a63a2356d1422c5ad99b49b83a76b7b771ebfedd3324436bd877b7f9753b5eee5ed8e1e667017d3c56484f400a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "012bbb276ec8b5badbf2bc05517c12d7807db0aa5d01f133ebc33c4f1d783a5cbcbf51b6ca6355ae26d06e0166f02f42584ec46ba8f4368a519f492d2124e6d30d" + } + } + }, + "deploy_hashes": [] + }, + "327321d3af7a7c7ab1a71ba11bc72f144be0c87a09d92cce6adc4be37f7877e0": { + "height": 68, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "327321d3af7a7c7ab1a71ba11bc72f144be0c87a09d92cce6adc4be37f7877e0", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "0133a8c2e506191a0851cdbee9ba3723a5437786c313c1e2b29f7f6218babd36aa003dfc3ec8a9e579720f96bc6fe55e30c5d01e9fe2f6b3ed74d671437a41060a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0138afc27d809fd593127b76fc806750183f0ecf975b2cf8035357c129cbe4992afc333eeda56ee80a950df8537a581658e3a79e139dafcdee4514ce7014620801", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "019f96974fd00d6f42de44ec0fbae92a6b994a9ccb76e2dd6704faa99abfaadf2017ea44ee5e7da6f83d920828319052e99a0d8351223b6da084da425930e6af0e", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01f02c2a88fddda27a7f71248a2bd3660e38670079b51ef93df19a78dca7f1e0dfacb2dbd3b372393463673f57189185d9b36140f0695b3e1fffff0f7b195d4c07", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "016805140ea5322148483659d822673c4740c56d8af711049fc6fa5e612286847cef550dea8853b46e2cc73d3994f051ed86c18f65251fbbec2eb78e7a5d74660a" + } + } + }, + "deploy_hashes": [] + }, + "f50447de37b23c28f4133fc2cde2c772bce0af1e49d76690532b614af1d0e33c": { + "height": 75, + "era": 8, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "f50447de37b23c28f4133fc2cde2c772bce0af1e49d76690532b614af1d0e33c", + "era_id": 8, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01ff02b2ddda99b82fdf9be6215156af4eb61f8e43d0a4039969499352ac1eaee59ef9e435cb6a0128af12b11408e19f1377416c2851daab569f93ea09ec68ab03", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01a9baa6a882daa9f3b71ba60a833a67509f1f29234b2645db1fa611907690677d840691692c263cec187bf017755cdc3193fe85e8f4c2209633d02459c3b1e10b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01be743da4a9a8185972a40e46b624aa9e979f50d8153bb82a19db559abfc25823cae65d9e1beb0d5399c9ba5c6f3902025d524e46bccb6799cd5fec4c83687f06", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01c08b376e860b67b7f1fd81e5f631d8d9b2fa12652153b4d4f7495d12875cd6ef5db500a4c899113b0fc3ba2c0c0f5d0044064b7d94c9c411abea58e72129450a", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0199ec9093a7114f833fad696893d9ef4d2d98b11491b0fb7add9ae5b3801bc0d9caf471a3e4baf08e78563810f32da0615118c579261a057126abcf8ea447d10a" + } + } + }, + "deploy_hashes": [] + }, + "d08662b263ca4651b9bd64fbcd3f70f47b5c6ce9ee64115cc52b8b03852bd41f": { + "height": 71, + "era": 7, + "approvals_hashes": [ + "2ef51ff9566e7eb1e1ea28274d96057d01036318ea5f2a6e02d859f7ef252b48", + "9e529633309b294838f942be44093904e042bcf7d0fb908ca2e2dc926370c654", + "04379f664120d7d140880706c6b3bac23211376e792e02343339b6e89c8fc524" + ], + "signatures": { + "V1": { + "block_hash": "d08662b263ca4651b9bd64fbcd3f70f47b5c6ce9ee64115cc52b8b03852bd41f", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "016f901233dc957cb9cacd2d90007997afc8a7bd63d937e7690d69eefeb9d5eeb1505a201ff67aa4f4990f71cb2f8b0668b9b9b37d56e5141071c76e0a6bc55a0f", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0152826f8a87e6209ccb6f19710e932c21fd1bfd1ce35a23eb5122c5128ddbc22987df7808e9ba183f0443fbbd7d492fb1416b07ec6ff464dea68f1c4c377f5903", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "012260ca9f54fc49cf678608f7b7182ed837054ee7845f981c3c246ca4bc309452727c727eef5cd8361002f8e83b1e527b947eacb78721259ed3760e4cb858c902", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0140301844864cac3b64377783c7eeb72f5fcf25390d14aabc8395fc78eb42d5d392ffc7d782bdbaf630dac750cc41520a6ca7476e61bc00dc69e4e35967915c0f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0168af3aef7194e69365d59426af9febec01748061139f9e871e49f07ff8bb556c48bdd6b964b52a3e7c15f79e736cf26d2a112465c6f55b14282afc501e0d930e" + } + } + }, + "deploy_hashes": [ + "36916afec0f03d9c623d5bea8a75fcd524200171f86bffad870b76c4bbcb34a7", + "6a7ed752ef92764a64ebf88df0c44ed7d58211f89dd7bd6add801540b78e8785", + "d5825b9890a8ff70091f8aae7ef7cc50455c5689b7b0b1c9b37a4e04785e5805" + ] + }, + "d760a41123785884162e45edfb1401603c60714c6cc79cad915387640b3c7c80": { + "height": 89, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "d760a41123785884162e45edfb1401603c60714c6cc79cad915387640b3c7c80", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "014a69d3c75ca2cff5cc25aed853d355adbae83609646c26b19c6837a4905db46e15064751a4a2253ddb231ca1ae18c8f5b88dcd48dd7ea9385b0d9befd6f4e40b", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01162d57c9602c3bc3387e1d01cfd9f112dc592ad7d763505962c57753e5f2bceeb8fe3766e17b0dcb9e059e127263c12415b08a75a7cafb03a1736c521c7b2e0d", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "013e252c82dd7f7deee6afa749a24cbd0792364d79354b1a1cc7b364a42d518d681ea15ca18a61dd90ae3cd2c3107996b1c59218c341ab400ad09ce54f12dae608", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "013c5fe5a70327570dc4f6a835ca353b95939a3e96678151d7e7d49bc7401feac63916c6d4a86dff1f97e5cc83e70fdafaef455cc0666bc73505b6e8a192c2f70d", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01e4cc1ec44a703b1a0b8a7b93f75ef0023f38c33e545b1263637470f8c2804c5d09bac20ae122dafca70a439353cc69f5e8518b9f8065bbd95371dcf97f892d02" + } + } + }, + "deploy_hashes": [] + }, + "8af440e30a8a0f3959f6504b19fe387a62d2b021c28862afbfd97707f6e1ef6c": { + "height": 61, + "era": 6, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "8af440e30a8a0f3959f6504b19fe387a62d2b021c28862afbfd97707f6e1ef6c", + "era_id": 6, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01a93fa7ed0fa5008c81ddca9dc760875a9bd84eba5c4ca561bb8e7434bb1a89b6d22fe129dd5bac3c68fa941e6d4dd0fc8f29bec15a0d78c9862db91b0a0b2e0f", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01407ba0fe0b7e70211bef8b39fd27f5edf4d2e3aff02194dbefcf9f734821255f3fb9cd0662f1192197375ca22c56499935e0fc5816df9df1bdefa67d62cdc00c", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "0140f7c24cd4ee18805778359f799451d5a4da6b70026a92144e60bacdc6b603b884d486fc2072e13616049f56974823fc5d1e07553d08fbda583e1118eccc2404", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01ec88cba12e12a6de4e49e81cb5bbcafaae69b5a9e25991cc848700586920aa1aebe78a1545ef59929e37575d6211fdd9f660dbea6a692a669d82457b419f9802", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0135ab7477aed57f31f7211ef9302e963a4b071346fe6956ad5518241f472b3fb3d469df0aae58b3c27e396775a908ea9a60ddb94c7bc3c0689f04fb3d65f92200" + } + } + }, + "deploy_hashes": [] + }, + "8952435b382f2e66462b4e04cc17b67b80a978bfef4a57b2fdc465839890f240": { + "height": 65, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "8952435b382f2e66462b4e04cc17b67b80a978bfef4a57b2fdc465839890f240", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "012a5ae2665f218cbe7e70e0a5a3da0e612b93eba6abb900a77279b98121ea26ebbf973e44e6dacfcf3c028d04c061ff31023f63af0b50705dd6b2d845a438e80a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "018241745d90073634de1705af754d7227a4d37056077c4d2b136309feccd7faa0c76174a185b9a2b69cc1d03fff38debcb34bfeeb2088f6a2204eaac480748608", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01bfdce3909d4474e2cb2feea13de1d4597d1ad0cdee56713fe48aac8e2550267eff2032ff1a34b9454398f0c9685e10ee9bddff9ff2e6edb27dbb0dfd91df610e", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01cfbaba578f82a6f6f5bb3336b6d2c7288d6e704a5b1d92348a2ece3cbe1b52258424dfbf414905f415a9a10469b1d117e582c4bb97c19bdc4c5b4a6063468808", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01844e70fea67d5a715dc65d099fcd25e7bc60bf98f884bc416f735514596dba83d1fbceaebb29768ef1075d941cb66806f19634ccde8f777aa7408d319410fe0d" + } + } + }, + "deploy_hashes": [] + }, + "ee09fa098a05f4fb6db77d700805389d4c099e09a7fe44cda0ca3baacf78c150": { + "height": 62, + "era": 7, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "ee09fa098a05f4fb6db77d700805389d4c099e09a7fe44cda0ca3baacf78c150", + "era_id": 7, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01bcab36324f166cfe835ac4e7e3b87798eba274c89aa27f89f694382f6aed169ec15b9c2326b9ac57ec2a020a6c75ebb258000a6a16f7fcb2a659f293c51b480a", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "01ad6e3cdbcba3ba61cb69d7fe0849765554cde2b6ab03ca1bd5efc4a87ad9347d131b55b15e312e768702625d9a7784ba3091e062d038a2fc7ece027b5d96740b", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "013520068954881e81331a299ad2143b6dfb3ce929e01f8f1ac2927a571fbcd28867cbaf9cb029571e59206b9f508644b9ef200ce0cd26eadfd62cd2767f213209", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "014fe6a703b2dd02d13b33fe43b3d8684ad71c98f543778aec5365c61a1ae0a0646bc29a52c268d2a3dcde9d7f61cd82a4ab4e3f8df94309a277b1b77536ba610e", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "0153e3f03cd865894151085dcbb843837e909bc9145196ee4334b1b40fcfb91c12a950fdb0fd56d56344d03eb921750da89bce51e15b3314103a2b6e7a17364e04" + } + } + }, + "deploy_hashes": [] + }, + "89af4615b4254df1d8af49becad7e998e4b296d1cb0cfea07524a0fd1589223b": { + "height": 49, + "era": 5, + "approvals_hashes": null, + "signatures": { + "V1": { + "block_hash": "89af4615b4254df1d8af49becad7e998e4b296d1cb0cfea07524a0fd1589223b", + "era_id": 5, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "015e07a579305638638cfe266b6e2e1f11d1bc8b07a615173f688cf9e7df0584b8265a689d722b52ed10bc7f0d75fa0e7c1036f35392fd3ea6439c40e241157203", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "0187f05e9224380f0f81fa77d36f74c36cd39eaa711b46bb4b4b8978ec5690adc1208052ba43b5755368817290615481a09d044da2b9872cad2fcc024392138a03", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "01115c591b8f77c142304e682b0517dac6128eb27bbcf83f849f9f77a67480410cf7948bd48f956fe18628224cf2bc319b50fb1916d979068990fc8892774dde09", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "01816d3e9cf8836ece45c7a54dd2372deffc416ef0af03f508f5fe210b78debb7196f01d28e5a92cc0c9b5919eecdb48d1ed87698ae1b210ca06a3596f2f6b320e", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "019cc260438112197ff50d777a2d4bf83920ecc317a293affe5cafbf6794bb09d9b32d6515cab233668d76d8efea5c53c018c75a82a73fc86f9c7add1e4ba82602" + } + } + }, + "deploy_hashes": [] + }, + "3de15c392cd63c17e53fcbdbb6ff544d86016bcee9b79e79f63f33ac14c489f9": { + "height": 91, + "era": 9, + "approvals_hashes": [], + "signatures": { + "V1": { + "block_hash": "3de15c392cd63c17e53fcbdbb6ff544d86016bcee9b79e79f63f33ac14c489f9", + "era_id": 9, + "proofs": { + "0141a6deb6cb64ff1cfd2c1e1c360253f916f6f26f0e1e056571ab27dfac988f92": "01f9c5bd4be0acf631e5bf9cfb1e9588aeef2fe0d6965d79b5496257dc06d4261e85e9ff0960a95d13313c268d2adb49b57bd77b34185d3636f22e89cb7fed3508", + "016fd7ba314c3ecf97dd518a94a6e087eafe8456bf1760e50b6e3e8e633149987c": "011561b7700db49c4d2c4915c7af51c8c018b29d5180b954db947b51c095915e322698dbdd035cf82aac82e3f415256eb728ccd5e1e9fce91517320f38bf4e8b05", + "019714c55306f3870ae7f781885eef9de8239da028205c6d18b90598e6637b7523": "017498f7a7c23d2205f47fe8cb1492bf7975ed80ad81e9b9e4e6de97de685cdeb7b0a422114a84b10e025d90f61327010b0c9644ec12a689dbb07fc1eccb340a0e", + "01ac4faea6049265220b355cf81cc3dff9ffdfe8a050525815e14d5cfe82a14cc4": "0156a5c0ef6d96b450d16248a65c68668852fc32429a859b8af29d81c9c28839f6ad2f2b872434cb6d6a6a447b21364f9b79d5026c44113d131152725a1dc6f10f", + "01e048d866c65abc1d83b632dfd1760d43581e1109d52837e6d4679cc298387fca": "01fa32312fadb626abc4ff34692e05016d71b2cc5586332060c94cdacd1bd2060af1694f9cb42514b4816aea1ea6e644160bcf5b8bce67b505107e9220565ba901" + } + } + }, + "deploy_hashes": [] + } + }, + "deploys": [ + "2af0f120ccd04bead484b6053d87cb41930e650cfb8f2b7c2ba37c5f3f5de701", + "aa2ae8d13b0087c762dfc34cc5b3d0a30d04382a2fc707f259225915b342c09f", + "2a6c3ec008d34e9fc614bc6c264af5e3834a847d5690211a3393ffdfa4206a1b", + "975cd4ddf71c0fe4c81439e9b4fc09ace17fd644efd455b995c3445403b18cba", + "faaff0d5bb9e1b68616a7225b37840f87bba48b2257607033a96fccaa4e0401c", + "f1c728bb6a8217bcf4c41d0d415d08ffd043f2b15de2c949ddad4e072cf56c57", + "6942cff9e560c7d0864b812f30bea56e59e26be5136bc188197e1916fb3fab9d", + "163025024c68c001b1a940c2e63ae4e4654adea462078841b2f66ff6c9e04074", + "1b9d7591e1e800a19a99647849decdb074925f270ea16540ec9b356de9721e4c", + "448bbee6f586414eccb05fb97d28f26e59a4bc542497b323bcad51d71af92d6e", + "a9af14844140bab502565a0e6cacbff0227b19e4209d3495c97e275e5b6a6dac", + "706050b93a7e9bc5a19fd5fab792eb5ef5a9854ec3181fbb054f5dc7d4ee170f", + "a97dc9782e2dce920821d08ee85f2853b5dbf97c57c9349772ed36c723eb3542", + "ef35f210444ef48eb4bea3ea58867037cbd4106af59cba36a533e20625c01523", + "660eeb2cb9fdd88ad3296de498e38b9b97291cccf19cfb1b5c477c2c94f87b65", + "82c80204bbf55182a6785e92d4ff59f500d9cf561aca305b25d7f95ef258f7bf", + "e55f6646b64c204ca7f55d244a8879e5ac1e982a049b1d577f80418274490249", + "2533d10d8eaaae38d9b3b8870e17ed535434e4708a84e444c669299e84abde3a", + "4a743f9843488b64a3940da46870977917f6e58e32bf1dbdeec84f8c24a33116", + "2fb450c21452b99bb8a21bc3a71a9e3266f540cde8577f671b857c0edcda2827", + "36916afec0f03d9c623d5bea8a75fcd524200171f86bffad870b76c4bbcb34a7", + "6a7ed752ef92764a64ebf88df0c44ed7d58211f89dd7bd6add801540b78e8785", + "d5825b9890a8ff70091f8aae7ef7cc50455c5689b7b0b1c9b37a4e04785e5805", + "9ab82d1a1c70f7ae053c6c45157d40c6ac0d93d087d4f574d35edbe39dd483b0", + "b87c10c4ba0846b615f77ee42f50a10279a9ed4d810671623952aa8d81988234", + "a397110248532ae6749617a4bf1f26b3a0130ff03c97d3489d1d66163c169857", + "3306c637dcb597811449816c39a4d8293ce21edc60310b56cdabeaa6ba252bf8", + "159ffefef5d88b2cb99fcceb471a3d2f0c2910c7c9de2db2c1e9845b60766224", + "58223e9b91d3ec7b73453217adbff91ccc2d34d514105bf9889599d1e9122b21", + "ed4ce8a502ef432f8d66be72afd80fe1415f15e710e39d2b63971a6f737c2c4c", + "abf88aa99476275ca657eaec26264c8b441fc5e6a8682f234e73d63163ed2415", + "35216eb0d783e53accf6e74a118c85e9674c856c04d22330cbd8497913741507", + "888c0d7a5988cb47eb83e1e7c4fa58a8d229b58b61cf932cc8f0622c864e707c", + "2651cb3fb060926d9f5edadfec7a41c7ff71f29f9fb9548f1c3624f5351e9a6b", + "4821b3e2a53f31ccbe3645a8fc17aab6e6ec9e7dc38fbfbbc2e96194da443181", + "1c98f5f335f0cf980192d56548a80ffb7b8d8c30e60f82765bc1a7ed9b41542a", + "5eb79399b62d08320e321d73ceb50523327d26149c6728a8d8a1ff33dabe722a", + "d6926f2291b62c998cfd73ffc154d1e8259dbc3daa101fdaa24752e65d85027d", + "ab8bc98911a606aade80c3e24b854d971d52d9e20a12744e0f97dcf95fea9ac9", + "607559135a1b9c446b53446b3cff100b3ff0f6efb474019a09562b83b833bd1c", + "ebb11e7fb132d580752254a1166bcf9fe2569f4952c97227de8847eb456c094c", + "ece0da05da8a349d5fcf89552c24dea8f17f876697e272b2175790a11b0d8521", + "e5f4a0ec09d7918c9871de663a677c2ad6bb88141e3825a5a8e57b4956a62d3a", + "3f69510d52aaa21601d82c4d415856c15af50e9b250a04f6f038c427670fdcbf", + "5ae77f177af25fb12cee62c75261249c108151a02d91f4febcf5afee9c4c8140", + "00c5500f389bc2d1cba67c45d9f8132f2bc7fccdc0402ea42b73d76f71961a3e" + ] +} \ No newline at end of file diff --git a/resources/test/valid/0_9_0/accounts.toml b/resources/test/valid/0_9_0/accounts.toml deleted file mode 100644 index 2e948ac592..0000000000 --- a/resources/test/valid/0_9_0/accounts.toml +++ /dev/null @@ -1,39 +0,0 @@ -[[accounts]] -public_key = "0148bc7fdb0375d480fbd03e77f74ffedc30b9f3954455fe04da15843a0a6af0c7" -balance = "1" - -[accounts.validator] -bonded_amount = "10" - -[[accounts]] -public_key = "011f66ea6321a48a935f66e97d4f7e60ee2d7fc9ccc62dfbe310f33b4839fc62eb" -balance = "2" - -[accounts.validator] -bonded_amount = "20" - -[[accounts]] -public_key = "0189e744783c2d70902a5f2ef78e82e1f44102b5eb08ca6234241d95e50f615a6b" -balance = "3" - -[accounts.validator] -bonded_amount = "30" - -[[accounts]] -public_key = "01569b41d574c46390212d698660b5326269ddb0a761d1294258897ac717b4958b" -balance = "4" - -[accounts.validator] -bonded_amount = "40" - -[[delegators]] -validator_public_key = "01569b41d574c46390212d698660b5326269ddb0a761d1294258897ac717b4958b" -delegator_public_key = "020248509e67db3127f82d5224c5c18eac00f96d1edeadbadc8eb2c8606227b56873" -balance = "0" -delegated_amount = "10" - -[[delegators]] -validator_public_key = "011f66ea6321a48a935f66e97d4f7e60ee2d7fc9ccc62dfbe310f33b4839fc62eb" -delegator_public_key = "020249c3a6d5006cd7634b54647f889265b85d36d837c066179548758b4eedab8186" -balance = "0" -delegated_amount = "20" diff --git a/resources/test/valid/0_9_0/chainspec.toml b/resources/test/valid/0_9_0/chainspec.toml deleted file mode 100644 index 5992f417dd..0000000000 --- a/resources/test/valid/0_9_0/chainspec.toml +++ /dev/null @@ -1,140 +0,0 @@ -[protocol] -version = '0.9.0' -hard_reset = false -activation_point = '2020-09-18T18:45:00Z' - -[network] -name = 'test-chain' -maximum_net_message_size = 23_068_672 - -[core] -era_duration = '3minutes' -minimum_era_height = 9 -validator_slots = 5 -auction_delay = 3 -locked_funds_period = '90days' -round_seigniorage_rate = [6_414, 623_437_335_209] -unbonding_delay = 14 - -[highway] -finality_threshold_fraction = [2, 25] -minimum_round_exponent = 14 -maximum_round_exponent = 19 -reduced_reward_multiplier = [1, 5] - -[deploys] -max_payment_cost = '9' -max_ttl = '10months' -max_dependencies = 11 -max_block_size = 12 -block_max_deploy_count = 125 -block_max_transfer_count = 1000 -block_gas_limit = 13 -payment_args_max_length = 1024 -session_args_max_length = 1024 -native_transfer_minimum_motes = 2_500_000_000 - -[wasm] -max_memory = 17 -max_stack_height = 19 - -[wasm.opcode_costs] -bit = 13 -add = 14 -mul = 15 -div = 16 -load = 17 -store = 18 -const = 19 -local = 20 -global = 21 -control_flow = 22 -integer_comparison = 23 -conversion = 24 -unreachable = 25 -nop = 26 -current_memory = 27 -grow_memory = 28 -regular = 29 - -[wasm.storage_costs] -gas_per_byte = 101 - -[wasm.host_function_costs] -add = { cost = 100, arguments = [0, 1, 2, 3] } -add_associated_key = { cost = 101, arguments = [0, 1, 2] } -add_contract_version = { cost = 102, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] } -blake2b = { cost = 133, arguments = [0, 1, 2, 3] } -call_contract = { cost = 104, arguments = [0, 1, 2, 3, 4, 5, 6] } -call_versioned_contract = { cost = 105, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8] } -create_contract_package_at_hash = { cost = 106, arguments = [0, 1] } -create_contract_user_group = { cost = 107, arguments = [0, 1, 2, 3, 4, 5, 6, 7] } -create_purse = { cost = 108, arguments = [0, 1] } -disable_contract_version = { cost = 109, arguments = [0, 1, 2, 3] } -get_balance = { cost = 110, arguments = [0, 1, 2] } -get_blocktime = { cost = 111, arguments = [0] } -get_caller = { cost = 112, arguments = [0] } -get_key = { cost = 113, arguments = [0, 1, 2, 3, 4] } -get_main_purse = { cost = 114, arguments = [0] } -get_named_arg = { cost = 115, arguments = [0, 1, 2, 3] } -get_named_arg_size = { cost = 116, arguments = [0, 1, 2] } -get_phase = { cost = 117, arguments = [0] } -get_system_contract = { cost = 118, arguments = [0, 1, 2] } -has_key = { cost = 119, arguments = [0, 1] } -is_valid_uref = { cost = 120, arguments = [0, 1] } -load_named_keys = { cost = 121, arguments = [0, 1] } -new_uref = { cost = 122, arguments = [0, 1, 2] } -print = { cost = 123, arguments = [0, 1] } -provision_contract_user_group_uref = { cost = 124, arguments = [0,1,2,3,4] } -put_key = { cost = 125, arguments = [0, 1, 2, 3] } -read_host_buffer = { cost = 126, arguments = [0, 1, 2] } -read_value = { cost = 127, arguments = [0, 1, 0] } -read_value_local = { cost = 128, arguments = [0, 1, 0] } -remove_associated_key = { cost = 129, arguments = [0, 1] } -remove_contract_user_group = { cost = 130, arguments = [0, 1, 2, 3] } -remove_contract_user_group_urefs = { cost = 131, arguments = [0,1,2,3,4,5] } -remove_key = { cost = 132, arguments = [0, 1] } -ret = { cost = 133, arguments = [0, 1] } -revert = { cost = 134, arguments = [0] } -set_action_threshold = { cost = 135, arguments = [0, 1] } -transfer_from_purse_to_account = { cost = 136, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8] } -transfer_from_purse_to_purse = { cost = 137, arguments = [0, 1, 2, 3, 4, 5, 6, 7] } -transfer_to_account = { cost = 138, arguments = [0, 1, 2, 3, 4, 5, 6] } -update_associated_key = { cost = 139, arguments = [0, 1, 2] } -write = { cost = 140, arguments = [0, 1, 0, 2] } -write_local = { cost = 141, arguments = [0, 1, 2, 3] } - -[system_costs] -wasmless_transfer_cost = 10_000 - -[system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 - -[system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 - -[system_costs.handle_payment_costs] -get_payment_purse = 10_000 -set_refund_purse = 10_000 -get_refund_purse = 10_000 -finalize_payment = 10_000 - -[system_costs.standard_payment_costs] -pay = 10_000 diff --git a/resources/test/valid/0_9_0_unordered/accounts.toml b/resources/test/valid/0_9_0_unordered/accounts.toml deleted file mode 100644 index 9da6540e2c..0000000000 --- a/resources/test/valid/0_9_0_unordered/accounts.toml +++ /dev/null @@ -1,40 +0,0 @@ -[[delegators]] -validator_public_key = "011f66ea6321a48a935f66e97d4f7e60ee2d7fc9ccc62dfbe310f33b4839fc62eb" -delegator_public_key = "020249c3a6d5006cd7634b54647f889265b85d36d837c066179548758b4eedab8186" -balance = "0" -delegated_amount = "20" - - -[[accounts]] -public_key = "0189e744783c2d70902a5f2ef78e82e1f44102b5eb08ca6234241d95e50f615a6b" -balance = "3" - -[accounts.validator] -bonded_amount = "30" - -[[accounts]] -public_key = "01569b41d574c46390212d698660b5326269ddb0a761d1294258897ac717b4958b" -balance = "4" - -[accounts.validator] -bonded_amount = "40" - -[[delegators]] -validator_public_key = "01569b41d574c46390212d698660b5326269ddb0a761d1294258897ac717b4958b" -delegator_public_key = "020248509e67db3127f82d5224c5c18eac00f96d1edeadbadc8eb2c8606227b56873" -balance = "0" -delegated_amount = "10" - -[[accounts]] -public_key = "011f66ea6321a48a935f66e97d4f7e60ee2d7fc9ccc62dfbe310f33b4839fc62eb" -balance = "2" - -[accounts.validator] -bonded_amount = "20" - -[[accounts]] -public_key = "0148bc7fdb0375d480fbd03e77f74ffedc30b9f3954455fe04da15843a0a6af0c7" -balance = "1" - -[accounts.validator] -bonded_amount = "10" diff --git a/resources/test/valid/0_9_0_unordered/chainspec.toml b/resources/test/valid/0_9_0_unordered/chainspec.toml deleted file mode 100644 index 5992f417dd..0000000000 --- a/resources/test/valid/0_9_0_unordered/chainspec.toml +++ /dev/null @@ -1,140 +0,0 @@ -[protocol] -version = '0.9.0' -hard_reset = false -activation_point = '2020-09-18T18:45:00Z' - -[network] -name = 'test-chain' -maximum_net_message_size = 23_068_672 - -[core] -era_duration = '3minutes' -minimum_era_height = 9 -validator_slots = 5 -auction_delay = 3 -locked_funds_period = '90days' -round_seigniorage_rate = [6_414, 623_437_335_209] -unbonding_delay = 14 - -[highway] -finality_threshold_fraction = [2, 25] -minimum_round_exponent = 14 -maximum_round_exponent = 19 -reduced_reward_multiplier = [1, 5] - -[deploys] -max_payment_cost = '9' -max_ttl = '10months' -max_dependencies = 11 -max_block_size = 12 -block_max_deploy_count = 125 -block_max_transfer_count = 1000 -block_gas_limit = 13 -payment_args_max_length = 1024 -session_args_max_length = 1024 -native_transfer_minimum_motes = 2_500_000_000 - -[wasm] -max_memory = 17 -max_stack_height = 19 - -[wasm.opcode_costs] -bit = 13 -add = 14 -mul = 15 -div = 16 -load = 17 -store = 18 -const = 19 -local = 20 -global = 21 -control_flow = 22 -integer_comparison = 23 -conversion = 24 -unreachable = 25 -nop = 26 -current_memory = 27 -grow_memory = 28 -regular = 29 - -[wasm.storage_costs] -gas_per_byte = 101 - -[wasm.host_function_costs] -add = { cost = 100, arguments = [0, 1, 2, 3] } -add_associated_key = { cost = 101, arguments = [0, 1, 2] } -add_contract_version = { cost = 102, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] } -blake2b = { cost = 133, arguments = [0, 1, 2, 3] } -call_contract = { cost = 104, arguments = [0, 1, 2, 3, 4, 5, 6] } -call_versioned_contract = { cost = 105, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8] } -create_contract_package_at_hash = { cost = 106, arguments = [0, 1] } -create_contract_user_group = { cost = 107, arguments = [0, 1, 2, 3, 4, 5, 6, 7] } -create_purse = { cost = 108, arguments = [0, 1] } -disable_contract_version = { cost = 109, arguments = [0, 1, 2, 3] } -get_balance = { cost = 110, arguments = [0, 1, 2] } -get_blocktime = { cost = 111, arguments = [0] } -get_caller = { cost = 112, arguments = [0] } -get_key = { cost = 113, arguments = [0, 1, 2, 3, 4] } -get_main_purse = { cost = 114, arguments = [0] } -get_named_arg = { cost = 115, arguments = [0, 1, 2, 3] } -get_named_arg_size = { cost = 116, arguments = [0, 1, 2] } -get_phase = { cost = 117, arguments = [0] } -get_system_contract = { cost = 118, arguments = [0, 1, 2] } -has_key = { cost = 119, arguments = [0, 1] } -is_valid_uref = { cost = 120, arguments = [0, 1] } -load_named_keys = { cost = 121, arguments = [0, 1] } -new_uref = { cost = 122, arguments = [0, 1, 2] } -print = { cost = 123, arguments = [0, 1] } -provision_contract_user_group_uref = { cost = 124, arguments = [0,1,2,3,4] } -put_key = { cost = 125, arguments = [0, 1, 2, 3] } -read_host_buffer = { cost = 126, arguments = [0, 1, 2] } -read_value = { cost = 127, arguments = [0, 1, 0] } -read_value_local = { cost = 128, arguments = [0, 1, 0] } -remove_associated_key = { cost = 129, arguments = [0, 1] } -remove_contract_user_group = { cost = 130, arguments = [0, 1, 2, 3] } -remove_contract_user_group_urefs = { cost = 131, arguments = [0,1,2,3,4,5] } -remove_key = { cost = 132, arguments = [0, 1] } -ret = { cost = 133, arguments = [0, 1] } -revert = { cost = 134, arguments = [0] } -set_action_threshold = { cost = 135, arguments = [0, 1] } -transfer_from_purse_to_account = { cost = 136, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8] } -transfer_from_purse_to_purse = { cost = 137, arguments = [0, 1, 2, 3, 4, 5, 6, 7] } -transfer_to_account = { cost = 138, arguments = [0, 1, 2, 3, 4, 5, 6] } -update_associated_key = { cost = 139, arguments = [0, 1, 2] } -write = { cost = 140, arguments = [0, 1, 0, 2] } -write_local = { cost = 141, arguments = [0, 1, 2, 3] } - -[system_costs] -wasmless_transfer_cost = 10_000 - -[system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 - -[system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 - -[system_costs.handle_payment_costs] -get_payment_purse = 10_000 -set_refund_purse = 10_000 -get_refund_purse = 10_000 -finalize_payment = 10_000 - -[system_costs.standard_payment_costs] -pay = 10_000 diff --git a/resources/test/valid/1_0_0/chainspec.toml b/resources/test/valid/1_0_0/chainspec.toml deleted file mode 100644 index 01a44630cf..0000000000 --- a/resources/test/valid/1_0_0/chainspec.toml +++ /dev/null @@ -1,140 +0,0 @@ -[protocol] -version = '1.0.0' -hard_reset = false -activation_point = 1 - -[network] -name = 'test-chain' -maximum_net_message_size = 23_068_672 - -[core] -era_duration = '3minutes' -minimum_era_height = 9 -validator_slots = 5 -auction_delay = 3 -locked_funds_period = '90days' -round_seigniorage_rate = [6_414, 623_437_335_209] -unbonding_delay = 14 - -[highway] -finality_threshold_fraction = [2, 25] -minimum_round_exponent = 14 -maximum_round_exponent = 19 -reduced_reward_multiplier = [1, 5] - -[deploys] -max_payment_cost = '9' -max_ttl = '10months' -max_dependencies = 11 -max_block_size = 12 -block_max_deploy_count = 125 -block_max_transfer_count = 1000 -block_gas_limit = 13 -payment_args_max_length = 1024 -session_args_max_length = 1024 -native_transfer_minimum_motes = 2_500_000_000 - -[wasm] -max_memory = 17 -max_stack_height = 19 - -[wasm.opcode_costs] -bit = 13 -add = 14 -mul = 15 -div = 16 -load = 17 -store = 18 -const = 19 -local = 20 -global = 21 -control_flow = 22 -integer_comparison = 23 -conversion = 24 -unreachable = 25 -nop = 26 -current_memory = 27 -grow_memory = 28 -regular = 29 - -[wasm.storage_costs] -gas_per_byte = 101 - -[wasm.host_function_costs] -add = { cost = 100, arguments = [0, 1, 2, 3] } -add_associated_key = { cost = 101, arguments = [0, 1, 2] } -add_contract_version = { cost = 102, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] } -blake2b = { cost = 133, arguments = [0, 1, 2, 3] } -call_contract = { cost = 104, arguments = [0, 1, 2, 3, 4, 5, 6] } -call_versioned_contract = { cost = 105, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8] } -create_contract_package_at_hash = { cost = 106, arguments = [0, 1] } -create_contract_user_group = { cost = 107, arguments = [0, 1, 2, 3, 4, 5, 6, 7] } -create_purse = { cost = 108, arguments = [0, 1] } -disable_contract_version = { cost = 109, arguments = [0, 1, 2, 3] } -get_balance = { cost = 110, arguments = [0, 1, 2] } -get_blocktime = { cost = 111, arguments = [0] } -get_caller = { cost = 112, arguments = [0] } -get_key = { cost = 113, arguments = [0, 1, 2, 3, 4] } -get_main_purse = { cost = 114, arguments = [0] } -get_named_arg = { cost = 115, arguments = [0, 1, 2, 3] } -get_named_arg_size = { cost = 116, arguments = [0, 1, 2] } -get_phase = { cost = 117, arguments = [0] } -get_system_contract = { cost = 118, arguments = [0, 1, 2] } -has_key = { cost = 119, arguments = [0, 1] } -is_valid_uref = { cost = 120, arguments = [0, 1] } -load_named_keys = { cost = 121, arguments = [0, 1] } -new_uref = { cost = 122, arguments = [0, 1, 2] } -print = { cost = 123, arguments = [0, 1] } -provision_contract_user_group_uref = { cost = 124, arguments = [0,1,2,3,4] } -put_key = { cost = 125, arguments = [0, 1, 2, 3] } -read_host_buffer = { cost = 126, arguments = [0, 1, 2] } -read_value = { cost = 127, arguments = [0, 1, 0] } -read_value_local = { cost = 128, arguments = [0, 1, 0] } -remove_associated_key = { cost = 129, arguments = [0, 1] } -remove_contract_user_group = { cost = 130, arguments = [0, 1, 2, 3] } -remove_contract_user_group_urefs = { cost = 131, arguments = [0,1,2,3,4,5] } -remove_key = { cost = 132, arguments = [0, 1] } -ret = { cost = 133, arguments = [0, 1] } -revert = { cost = 134, arguments = [0] } -set_action_threshold = { cost = 135, arguments = [0, 1] } -transfer_from_purse_to_account = { cost = 136, arguments = [0, 1, 2, 3, 4, 5, 6, 7, 8] } -transfer_from_purse_to_purse = { cost = 137, arguments = [0, 1, 2, 3, 4, 5, 6, 7] } -transfer_to_account = { cost = 138, arguments = [0, 1, 2, 3, 4, 5, 6] } -update_associated_key = { cost = 139, arguments = [0, 1, 2] } -write = { cost = 140, arguments = [0, 1, 0, 2] } -write_local = { cost = 141, arguments = [0, 1, 2, 3] } - -[system_costs] -wasmless_transfer_cost = 10_000 - -[system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 - -[system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 - -[system_costs.handle_payment_costs] -get_payment_purse = 10_000 -set_refund_purse = 10_000 -get_refund_purse = 10_000 -finalize_payment = 10_000 - -[system_costs.standard_payment_costs] -pay = 10_000 diff --git a/resources/test/valid/1_0_0/global_state.toml b/resources/test/valid/1_0_0/global_state.toml deleted file mode 100644 index 853529cb80..0000000000 --- a/resources/test/valid/1_0_0/global_state.toml +++ /dev/null @@ -1,3 +0,0 @@ -[[entries]] -key = "hash-4242424242424242424242424242424242424242424242424242424242424242" -value = "AAQAAAAqAAAABA==" diff --git a/resources/testnet/chainspec.toml b/resources/testnet/chainspec.toml new file mode 100644 index 0000000000..57df6de82e --- /dev/null +++ b/resources/testnet/chainspec.toml @@ -0,0 +1,505 @@ +[protocol] +# Protocol version. +version = '2.0.4' +# Whether we need to clear latest blocks back to the switch block just before the activation point or not. +hard_reset = true +# This protocol version becomes active at this point. +# +# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By +# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up +# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used +# in contract-runtime for computing genesis post-state hash. +# +# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. +activation_point = 19267 + +[network] +# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by +# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis +# post-state hash. +name = 'casper-test' +# The maximum size of an acceptable networking message in bytes. Any message larger than this will +# be rejected at the networking level. +maximum_net_message_size = 25_165_824 + +[core] +# Era duration. +era_duration = '120 minutes' +# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the +# minimum height. +minimum_era_height = 20 +# Minimum difference between a block's and its child's timestamp. +minimum_block_time = '16384 ms' +# Number of slots available in validator auction. +validator_slots = 100 +# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. +# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as +# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize +# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. +finality_threshold_fraction = [1, 3] +# Protocol version from which nodes are required to hold strict finality signatures. +start_protocol_version_with_strict_finality_signatures_required = '1.5.0' +# Which finality is required for legacy blocks. Options are 'Strict', 'Weak' and 'Any'. +# Used to determine finality sufficiency for new joiners syncing blocks created +# in a protocol version before +# `start_protocol_version_with_strict_finality_signatures_required`. +legacy_required_finality = 'Any' +# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, +# you will be a validator in era N + auction_delay + 1. +auction_delay = 1 +# The period after genesis during which a genesis validator's bid is locked. +locked_funds_period = '0 days' +# The period in which genesis validator's bid is released over time after it's unlocked. +vesting_schedule_period = '0 weeks' +# Default number of eras that need to pass to be able to withdraw unbonded funds. +unbonding_delay = 7 +# Round seigniorage rate represented as a fraction of the total supply. +# +# Annual issuance: 0.25% +# Minimum round exponent: 14 +# Ticks per year: 31536000000 +# +# (1+0.0025)^((2^14)/31536000000)-1 is expressed as a fractional number below +# Python: +# from fractions import Fraction +# Fraction((1 + 0.0025)**((2**14)/31536000000) - 1).limit_denominator(1000000000) +round_seigniorage_rate = [1, 770883818] +# Maximum number of associated keys for a single account. +max_associated_keys = 100 +# Maximum height of contract runtime call stack. +max_runtime_call_stack_height = 12 +# Minimum allowed delegation amount in motes +minimum_delegation_amount = 500_000_000_000 +# Maximum allowed delegation amount in motes +maximum_delegation_amount = 1_000_000_000_000_000_000 +# Minimum bid amount allowed in motes. Withdrawing one's bid to an amount strictly less than +# the value specified will be treated as a full unbond of a validator and their associated delegators +minimum_bid_amount = 10_000_000_000_000 +# Global state prune batch size (0 = this feature is off) +prune_batch_size = 0 +# Enables strict arguments checking when calling a contract; i.e. that all non-optional args are provided and of the correct `CLType`. +strict_argument_checking = false +# Number of simultaneous peer requests. +simultaneous_peer_requests = 5 +# The consensus protocol to use. Options are "Zug" and "Highway". +consensus_protocol = 'Zug' +# The maximum amount of delegators per validator. +max_delegators_per_validator = 1200 +# The split in finality signature rewards between block producer and participating signers. +finders_fee = [1, 5] +# The proportion of baseline rewards going to reward finality signatures specifically. +finality_signature_proportion = [95, 100] +# Lookback interval indicating which past block we are looking at to reward. +signature_rewards_max_delay = 3 +# Allows transfers between accounts in the blockchain network. +# +# Setting this to false restricts normal accounts from sending tokens to other accounts, allowing transfers only to administrators. +# Changing this option makes sense only on private chains. +allow_unrestricted_transfers = true +# Enables the auction entry points 'delegate' and 'add_bid'. +# +# Setting this to false makes sense only for private chains which don't need to auction new validator slots. These +# auction entry points will return an error if called when this option is set to false. +allow_auction_bids = true +# If set to false, then consensus doesn't compute rewards and always uses 0. +compute_rewards = true +# Defines how refunds of the unused portion of payment amounts are calculated and handled. +# +# Valid options are: +# 'refund': a ratio of the unspent token is returned to the spender. +# 'burn': a ratio of the unspent token is burned. +# 'no_refund': no refunds are paid out; this is functionally equivalent to refund with 0% ratio. +# This causes excess payment amounts to be sent to either a +# pre-defined purse, or back to the sender. The refunded amount is calculated as the given ratio of the payment amount +# minus the execution costs. +refund_handling = { type = 'refund', refund_ratio = [75, 100] } +# Defines how fees are handled. +# +# Valid options are: +# 'no_fee': fees are eliminated. +# 'pay_to_proposer': fees are paid to the block proposer +# 'accumulate': fees are accumulated in a special purse and distributed at the end of each era evenly among all +# administrator accounts +# 'burn': fees are burned +fee_handling = { type = 'pay_to_proposer' } +# If a validator would recieve a validator credit, it cannot exceed this percentage of their total stake. +validator_credit_cap = [1, 5] +# Defines how pricing is handled. +# +# Valid options are: +# 'payment_limited': senders of transaction self-specify how much they pay. +# 'fixed': costs are fixed, per the cost table +# 'prepaid': prepaid transaction (currently not supported) +pricing_handling = { type = 'payment_limited' } +# Does the network allow pre-payment for future +# execution? Currently not supported. +# +allow_prepaid = false +# Defines how gas holds affect available balance calculations. +# +# Valid options are: +# 'accrued': sum of full value of all non-expired holds. +# 'amortized': sum of each hold is amortized over the time remaining until expiry. +# +# For instance, if 12 hours remained on a gas hold with a 24-hour `gas_hold_interval`, +# with accrued, the full hold amount would be applied +# with amortized, half the hold amount would be applied +gas_hold_balance_handling = { type = 'accrued' } +# Defines how long gas holds last. +# +# If fee_handling is set to 'no_fee', the system places a balance hold on the payer +# equal to the value the fee would have been. Such balance holds expire after a time +# interval has elapsed. This setting controls how long that interval is. The available +# balance of a purse equals its total balance minus the held amount(s) of non-expired +# holds (see gas_hold_balance_handling setting for details of how that is calculated). +# +# For instance, if gas_hold_interval is 24 hours and 100 gas is used from a purse, +# a hold for 100 is placed on that purse and is considered when calculating total balance +# for 24 hours starting from the block_time when the hold was placed. +gas_hold_interval = '24 hours' +# List of public keys of administrator accounts. Setting this option makes only on private chains which require +# administrator accounts for regulatory reasons. +administrators = [] +# Flag that triggers a migration of all userland accounts and contracts present in global state to the addressable +# entity in lazy manner. +# If the flag is set to false then no accounts and contracts are migrated during a protocol upgrade; +# i.e. all Account records will be present under Key::Account and Contracts and their associated ContractPackage +# will be written underneath Key::Hash. +# If the flag is set to true then accounts and contracts are migrated lazily; i.e on first use of the Account +# and/or Contract as part of the execution of a Transaction. This means the Accounts/Contracts will be migrated +# to their corresponding AddressableEntity and the NamedKeys for previous record and sepeareted and wrriten +# as discrete top level records. For Contracts specifically the entrypoints are also written as discrete top +# level records +# Note: Enabling of the AddressableEntity feature is one-way; i.e once enabled as part of a protocol upgrade +# the flag cannot be disabled in a future protocol upgrade. +enable_addressable_entity = false +# This value is used as the penalty payment amount, the lowest cost, and the minimum balance amount. +baseline_motes_amount = 2_500_000_000 +# Flag on whether ambiguous entity versions returns an execution error. +trap_on_ambiguous_entity_version = false + +[highway] +# Highway dynamically chooses its round length, between minimum_block_time and maximum_round_length. +maximum_round_length = '66 seconds' + +[transactions] +# The duration after the transaction timestamp that it can be included in a block. +max_ttl = '2 hours' +# The maximum number of approvals permitted in a single block. +block_max_approval_count = 2600 +# Maximum block size in bytes including transactions contained by the block. 0 means unlimited. +max_block_size = 5_242_880 +# The upper limit of total gas of all transactions in a block. +block_gas_limit = 1_625_000_000_000 +# The minimum amount in motes for a valid native transfer. +native_transfer_minimum_motes = 2_500_000_000 +# The maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the config.toml file. +max_timestamp_leeway = '5 seconds' + +# Configuration of the transaction runtime. +[transactions.enabled_runtime] +vm_casper_v1 = true +vm_casper_v2 = false + +[transactions.v1] +# The configuration settings for the lanes of transactions including both native and Wasm based interactions. +# Currently the node supports two native interactions the mint and auction and have the reserved identifiers of 0 and 1 +# respectively +# The remaining wasm based lanes specify the range of configuration settings for a given Wasm based transaction +# within a given lane. +# The maximum length in bytes of runtime args per V1 transaction. +# [0] -> Transaction lane label (apart from the reserved native identifiers these are simply labels) +# Note: For the given mainnet implementation we specially reserve the label 2 for install and upgrades and +# the lane must be present and defined. +# Different casper networks may not impose such a restriction. +# [1] -> Max serialized length of the entire transaction in bytes for a given transaction in a certain lane +# [2] -> Max args length size in bytes for a given transaction in a certain lane +# [3] -> Transaction gas limit for a given transaction in a certain lane +# [4] -> The maximum number of transactions the lane can contain +native_mint_lane = [0, 2048, 1024, 100_000_000, 650] +native_auction_lane = [1, 3096, 2048, 2_500_000_000, 650] +install_upgrade_lane = [2, 750_000, 2048, 1_000_000_000_000, 1] +wasm_lanes = [ + [3, 750_000, 2048, 1_000_000_000_000, 1], + [4, 131_072, 1024, 100_000_000_000, 2], + [5, 65_536, 512, 5_000_000_000, 80] +] + +[transactions.deploy] +# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. +max_payment_cost = '0' +# The limit of length of serialized payment code arguments. +payment_args_max_length = 1024 +# The limit of length of serialized session code arguments. +session_args_max_length = 1024 + +[wasm.v1] +# Amount of free memory (in 64kB pages) each contract can use for stack. +max_memory = 64 +# Max stack height (native WebAssembly stack limiter). +max_stack_height = 500 + +[storage_costs] +# Gas charged per byte stored in the global state. +gas_per_byte = 1_117_587 + +# For each opcode cost below there exists a static cost and a dynamic cost. +# The static cost is a fixed cost for each opcode that is hardcoded and validated by benchmarks. +[wasm.v1.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v1.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v1.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs +[wasm.v1.host_function_costs] +add = { cost = 5_800, arguments = [0, 0, 0, 0] } +add_associated_key = { cost = 1_200_000, arguments = [0, 0, 0] } +add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 0, 0] } +add_contract_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +add_package_version_with_message_topics = { cost = 200, arguments = [0, 0, 0, 0, 120_000, 0, 0, 0, 30_000, 0, 0] } +blake2b = { cost = 1_200_000, arguments = [0, 120_000, 0, 0] } +call_contract = { cost = 300_000_000, arguments = [0, 0, 0, 120_000, 0, 120_000, 0] } +call_versioned_contract = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } +create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } +create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +create_purse = { cost = 2_500_000_000, arguments = [0, 0] } +disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +get_balance = { cost = 3_000_000, arguments = [0, 0, 0] } +get_blocktime = { cost = 330, arguments = [0] } +get_caller = { cost = 380, arguments = [0] } +get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } +get_main_purse = { cost = 1_300, arguments = [0] } +get_named_arg = { cost = 200, arguments = [0, 120_000, 0, 120_000] } +get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } +get_phase = { cost = 710, arguments = [0] } +get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } +has_key = { cost = 1_500, arguments = [0, 840] } +is_valid_uref = { cost = 760, arguments = [0, 0] } +load_named_keys = { cost = 42_000, arguments = [0, 0] } +new_uref = { cost = 17_000, arguments = [0, 0, 590] } +random_bytes = { cost = 200, arguments = [0, 0] } +print = { cost = 20_000, arguments = [0, 4_600] } +provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } +put_key = { cost = 100_000_000, arguments = [0, 120_000, 0, 120_000] } +read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } +read_value = { cost = 60_000, arguments = [0, 120_000, 0] } +dictionary_get = { cost = 5_500, arguments = [0, 590, 0] } +remove_associated_key = { cost = 4_200, arguments = [0, 0] } +remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } +remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 120_000] } +remove_key = { cost = 61_000, arguments = [0, 3_200] } +ret = { cost = 23_000, arguments = [0, 420_000] } +revert = { cost = 500, arguments = [0] } +set_action_threshold = { cost = 74_000, arguments = [0, 0] } +transfer_from_purse_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer_from_purse_to_purse = { cost = 82_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } +transfer_to_account = { cost = 2_500_000_000, arguments = [0, 0, 0, 0, 0, 0, 0] } +update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } +write = { cost = 14_000, arguments = [0, 0, 0, 980] } +dictionary_put = { cost = 9_500, arguments = [0, 1_800, 0, 520] } +enable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } +manage_message_topic = { cost = 200, arguments = [0, 30_000, 0, 0] } +emit_message = { cost = 200, arguments = [0, 30_000, 0, 120_000] } +generic_hash = { cost = 1_200_000, arguments = [0, 120_000, 0, 0, 0] } +cost_increase_per_message = 50 +get_block_info = { cost = 330, arguments = [0, 0] } +recover_secp256k1 = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +verify_signature = { cost = 1_300_000, arguments = [0, 120_000, 0, 0, 0, 0] } +call_package_version = { cost = 300_000_000, arguments = [0, 0, 0, 0, 0, 0, 0, 120_000, 0, 120_000, 0] } + +[wasm.v2] +# Amount of free memory each contract can use for stack. +max_memory = 17 + +[wasm.v2.opcode_costs] +# Bit operations multiplier. +bit = 105 +# Arithmetic add operations multiplier. +add = 105 +# Mul operations multiplier. +mul = 105 +# Div operations multiplier. +div = 105 +# Memory load operation multiplier. +load = 105 +# Memory store operation multiplier. +store = 105 +# Const store operation multiplier. +const = 105 +# Local operations multiplier. +local = 105 +# Global operations multiplier. +global = 105 +# Integer operations multiplier. +integer_comparison = 105 +# Conversion operations multiplier. +conversion = 105 +# Unreachable operation multiplier. +unreachable = 105 +# Nop operation multiplier. +nop = 105 +# Get current memory operation multiplier. +current_memory = 105 +# Grow memory cost, per page (64kb). +grow_memory = 900 +# Sign extension operations cost +sign = 105 + +# Control flow operations multiplier. +[wasm.v2.opcode_costs.control_flow] +block = 255 +loop = 255 +if = 105 +else = 105 +end = 105 +br = 1665 +br_if = 510 +return = 105 +select = 105 +call = 225 +call_indirect = 270 +drop = 105 + +[wasm.v2.opcode_costs.control_flow.br_table] +# Fixed cost per `br_table` opcode +cost = 150 +# Size of target labels in the `br_table` opcode will be multiplied by `size_multiplier` +size_multiplier = 100 + +[wasm.v2.host_function_costs] +read = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +write = { cost = 0, arguments = [0, 0, 0, 0, 0] } +remove = { cost = 0, arguments = [0, 0, 0] } +copy_input = { cost = 0, arguments = [0, 0] } +ret = { cost = 0, arguments = [0, 0] } +create = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } +transfer = { cost = 0, arguments = [0, 0, 0] } +env_balance = { cost = 0, arguments = [0, 0, 0, 0] } +upgrade = { cost = 0, arguments = [0, 0, 0, 0, 0, 0] } +call = { cost = 0, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } +print = { cost = 0, arguments = [0, 0] } +emit = { cost = 0, arguments = [0, 0, 0, 0] } +env_info = { cost = 0, arguments = [0, 0] } + +[wasm.messages_limits] +max_topic_name_size = 256 +max_topics_per_contract = 128 +max_message_size = 1_024 + +[system_costs] +# Penalty charge for calling invalid entry point in a system contract. +no_such_entrypoint = 2_500_000_000 + +[system_costs.auction_costs] +get_era_validators = 2_500_000_000 +read_seigniorage_recipients = 5_000_000_000 +add_bid = 2_500_000_000 +withdraw_bid = 2_500_000_000 +delegate = 2_500_000_000 +undelegate = 2_500_000_000 +run_auction = 2_500_000_000 +slash = 2_500_000_000 +distribute = 2_500_000_000 +withdraw_delegator_reward = 5_000_000_000 +withdraw_validator_reward = 5_000_000_000 +read_era_id = 2_500_000_000 +activate_bid = 2_500_000_000 +redelegate = 2_500_000_000 +change_bid_public_key = 5_000_000_000 +add_reservations = 2_500_000_000 +cancel_reservations = 2_500_000_000 + +[system_costs.mint_costs] +mint = 2_500_000_000 +reduce_total_supply = 2_500_000_000 +create = 2_500_000_000 +balance = 100_000_000 +burn = 100_000_000 +transfer = 100_000_000 +read_base_round_reward = 2_500_000_000 +mint_into_existing_purse = 2_500_000_000 + +[system_costs.handle_payment_costs] +get_payment_purse = 10_000 +set_refund_purse = 10_000 +get_refund_purse = 10_000 +finalize_payment = 2_500_000_000 + +[system_costs.standard_payment_costs] +pay = 10_000 + +[vacancy] +# The cost of a transaction is based on a multiplier. This allows for economic disincentives for misuse of the network. +# +# The network starts with a current_gas_price of min_gas_price. +# +# Each block has multiple limits (bytes, transactions, transfers, gas, etc.) +# The utilization for a block is determined by the highest percentage utilization of each these limits. +# +# Ex: transfers limit is 650 and transactions limit is 20 (assume other limits are not a factor here) +# 19 transactons -> 19/20 or 95% +# 600 transfers -> 600/650 or 92.3% +# resulting block utilization is 95 +# +# The utilization for an era is the average of all block utilizations. At the switch block, the dynamic gas_price is +# adjusted with the following: +# +# If utilization was below the lower_threshold, current_gas_price is decremented by one if higher than min_gas_price. +# If utilization falls between the thresholds, current_gas_price is not changed. +# If utilization was above the upper_threshold, current_gas_price is incremented by one if lower than max_gas_price. +# +# The cost charged for the transaction is simply the gas_used * current_gas_price. +upper_threshold = 90 +lower_threshold = 50 +max_gas_price = 1 +min_gas_price = 1 diff --git a/resources/testnet/config-example.toml b/resources/testnet/config-example.toml new file mode 100644 index 0000000000..d8538acf06 --- /dev/null +++ b/resources/testnet/config-example.toml @@ -0,0 +1,651 @@ +# ================================ +# Configuration options for a node +# ================================ +[node] + +# If set, use this hash as a trust anchor when joining an existing network. +#trusted_hash = 'HEX-FORMATTED BLOCK HASH' + +# Historical sync behavior for this node. Options are: +# 'ttl' (node will attempt to acquire all block data to comply with time to live enforcement) +# 'genesis' (node will attempt to acquire all block data back to genesis) +# 'nosync' (node will only acquire blocks moving forward) +# 'isolated' (node will initialize without peers and will not accept peers) +# 'completeblock' (node will acquire complete block and shutdown) +# note: the only two states allowed to switch to Validate reactor state are `genesis` and `ttl`. +# it is recommended for dedicated validator nodes to be in ttl mode to increase +# their ability to maintain maximal uptime...if a long-running genesis validator +# goes offline and comes back up while in genesis mode, it must backfill +# any gaps in its block awareness before resuming validation. +# +# it is recommended for reporting non-validator nodes to be in genesis mode to +# enable support for queries at any block height. +# +# it is recommended for non-validator working nodes (for dapp support, etc) to run in +# ttl or nosync mode (depending upon their specific data requirements). +# +# thus for instance a node backing a block explorer would prefer genesis mode, +# while a node backing a dapp interested in very recent activity would prefer to run in nosync mode, +# and a node backing a dapp interested in auction activity or tracking trends would prefer to run in ttl mode. +# note: as time goes on, the time to sync back to genesis takes progressively longer. +# note: ttl is a chainsepc configured behavior on a given network; consult the `max_ttl` chainspec setting +# (it is currently ~18 hours by default on production and production-like networks but subject to change). +# note: `nosync` is incompatible with validator behavior; a nosync node is prevented from participating +# in consensus / switching to validate mode. it is primarily for lightweight nodes that are +# only interested in recent activity. +# note: an isolated node will not connect to, sync with, or keep up with the network, but will respond to +# binary port, rest server, event server, and diagnostic port connections. +sync_handling = 'ttl' + +# Idle time after which the syncing process is considered stalled. +idle_tolerance = '20 minutes' + +# When the syncing process is considered stalled, it'll be retried up to `max_attempts` times. +max_attempts = 3 + +# Default delay for the control events that have no dedicated delay requirements. +control_logic_default_delay = '1 second' + +# Flag which forces the node to resync all of the blocks. +force_resync = false + +# A timeout for the ShutdownForUpgrade state, after which the node will upgrade even if not all +# conditions are satisfied. +shutdown_for_upgrade_timeout = '2 minutes' + +# Maximum time a node will wait for an upgrade to commit. +upgrade_timeout = '3 hours' + +# The node detects when it should do a controlled shutdown when it is in a detectably bad state +# in order to avoid potentially catastrophic uncontrolled crashes. Generally, a node should be +# allowed to shutdown, and if restarted that node will generally recover gracefully and resume +# normal operation. However, actively validating nodes have subjective state in memory that is +# lost on shutdown / restart and must be reacquired from other validating nodes on restart. +# If all validating nodes shutdown in the middle of an era, social consensus is required to restart +# the network. As a mitigation for that, the following config can be set to true on some validator +# nodes to cause nodes that are supposed to be validators in the current era to ignore controlled +# shutdown events and stay up. This allows them to act as sentinels for the consensus data for +# other restarting nodes. This config is inert on non-validating nodes. +prevent_validator_shutdown = false + +# ================================= +# Configuration options for logging +# ================================= +[logging] + +# Output format. Possible values are 'text' or 'json'. +format = 'json' + +# Colored output. Has no effect if format = 'json'. +color = false + +# Abbreviate module names in text output. Has no effect if format = 'json'. +abbreviate_modules = false + + +# =================================== +# Configuration options for consensus +# =================================== +[consensus] + +# Path (absolute, or relative to this config.toml) to validator's secret key file used to sign +# consensus messages. +secret_key_path = '/etc/casper/validator_keys/secret_key.pem' + +# The maximum number of blocks by which execution is allowed to lag behind finalization. +# If it is more than that, consensus will pause, and resume once the executor has caught up. +max_execution_delay = 3 + + +# ======================================= +# Configuration options for Zug consensus +# ======================================= +[consensus.zug] + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +sync_state_interval = '1 second' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# The minimal proposal timeout. Validators wait this long for a proposal to receive a quorum of +# echo messages, before they vote to make the round skippable and move on to the next proposer. +proposal_timeout = '10 seconds' + +# The additional proposal delay that is still considered fast enough, in percent. This should +# take into account variables like empty vs. full blocks, network traffic etc. +# E.g. if proposing a full block while under heavy load takes 50% longer than an empty one +# while idle this should be at least 50, meaning that the timeout is 50% longer than +# necessary for a quorum of recent proposals, approximately. +proposal_grace_period = 200 + +# The average number of rounds after which the proposal timeout adapts by a factor of 2. +# Note: It goes up faster than it goes down: it takes fewer rounds to double than to halve. +proposal_timeout_inertia = 10 + +# The maximum difference between validators' clocks we expect. Incoming proposals whose timestamp +# lies in the future by more than that are rejected. +clock_tolerance = '1 second' + + +# =========================================== +# Configuration options for Highway consensus +# =========================================== +[consensus.highway] + +# The duration for which incoming vertices with missing dependencies should be kept in a queue. +pending_vertex_timeout = '30 minutes' + +# Request the latest protocol state from a random peer periodically, with this interval. +# '0 seconds' means it is disabled and we never request the protocol state from a peer. +request_state_interval = '20 seconds' + +# Log inactive or faulty validators periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_participation_interval = '1 minute' + +# Log the synchronizer state periodically, with this interval. +# '0 seconds' means it is disabled and we never print the log message. +log_synchronizer_interval = '5 seconds' + +# Log the size of every incoming and outgoing serialized unit. +log_unit_sizes = false + +# The maximum number of peers we request the same vertex from in parallel. +max_requests_for_vertex = 5 + +# The maximum number of dependencies we request per validator in a batch. +# Limits requests per validator in panorama - in order to get a total number of +# requests, multiply by # of validators. +max_request_batch_size = 20 + +[consensus.highway.round_success_meter] +# The number of most recent rounds we will be keeping track of. +num_rounds_to_consider = 40 + +# The number of successful rounds that triggers us to slow down: With this many or fewer +# successes per `num_rounds_to_consider`, we increase our round length. +num_rounds_slowdown = 10 + +# The number of successful rounds that triggers us to speed up: With this many or more successes +# per `num_rounds_to_consider`, we decrease our round length. +num_rounds_speedup = 32 + +# We will try to accelerate (decrease our round length) every `acceleration_parameter` rounds if +# we have few enough failures. +acceleration_parameter = 40 + +# The FTT, as a percentage (i.e. `acceleration_ftt = [1, 100]` means 1% of the validators' total weight), which +# we will use for looking for a summit in order to determine a proposal's finality. +# The required quorum in a summit we will look for to check if a round was successful is +# determined by this FTT. +acceleration_ftt = [1, 100] + + +# ==================================== +# Configuration options for networking +# ==================================== +[network] + +# The public address of the node. +# +# It must be publicly available in order to allow peers to connect to this node. +# If the port is set to 0, the actual bound port will be substituted. +public_address = ':0' + +# Address to bind to for listening. +# If port is set to 0, a random port will be used. +bind_address = '0.0.0.0:35000' + +# Addresses to connect to in order to join the network. +# +# If not set, this node will not be able to attempt to connect to the network. Instead it will +# depend upon peers connecting to it. This is normally only useful for the first node of the +# network. +# +# Multiple addresses can be given and the node will attempt to connect to each, requiring at least +# one connection. +known_addresses = ['135.148.34.2:35000','135.148.34.29:35000','51.91.83.147:35000','51.75.241.109:35000'] + +# Minimum number of fully-connected peers to consider network component initialized. +min_peers_for_initialization = 3 + +# The interval between each fresh round of gossiping the node's public address. +gossip_interval = '120 seconds' + +# Initial delay for starting address gossipping after the network starts. This should be slightly +# more than the expected time required for initial connections to complete. +initial_gossip_delay = '5 seconds' + +# How long a connection is allowed to be stuck as pending before it is abandoned. +max_addr_pending_time = '1 minute' + +# Maximum time allowed for a connection handshake between two nodes to be completed. Connections +# exceeding this threshold are considered unlikely to be healthy or even malicious and thus +# terminated. +handshake_timeout = '20 seconds' + +# Maximum number of incoming connections per unique peer allowed. If the limit is hit, additional +# connections will be rejected. A value of `0` means unlimited. +max_incoming_peer_connections = 3 + +# The maximum total of upstream bandwidth in bytes per second allocated to non-validating peers. +# A value of `0` means unlimited. +max_outgoing_byte_rate_non_validators = 6553600 + +# The maximum allowed total impact of requests from non-validating peers per second answered. +# A value of `0` means unlimited. +max_incoming_message_rate_non_validators = 3000 + +# Maximum number of requests for data from a single peer that are allowed be buffered. A value of +# `0` means unlimited. +max_in_flight_demands = 50 + +# Version threshold to enable tarpit for. +# +# When set to a version (the value may be `null` to disable the feature), any peer that reports a +# protocol version equal or below the threshold will be rejected only after holding open the +# connection for a specific (`tarpit_duration`) amount of time. +# +# This option makes most sense to enable on known nodes with addresses where legacy nodes that are +# still in operation are connecting to, as these older versions will only attempt to reconnect to +# other nodes once they have exhausted their set of known nodes. +tarpit_version_threshold = '1.2.1' + +# How long to hold connections to trapped legacy nodes. +tarpit_duration = '10 minutes' + +# The probability [0.0, 1.0] of this node trapping a legacy node. +# +# Since older nodes will only reconnect if all their options are exhausted, it is sufficient for a +# single known node to hold open a connection to prevent the node from reconnecting. This should be +# set to `1/n` or higher, with `n` being the number of known nodes expected in the configuration of +# legacy nodes running this software. +tarpit_chance = 0.2 + +# Minimum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_min_duration = '2 minutes' + +# Maximum time a peer is kept on block list before being redeemed. The actual +# timeout duration is calculated by selecting a random value between +# . +blocklist_retain_max_duration = '10 minutes' + +# Identity of a node +# +# When this section is not specified, an identity will be generated when the node process starts with a self-signed certifcate. +# This option makes sense for some private chains where for security reasons joining new nodes is restricted. +# [network.identity] +# tls_certificate = "node_cert.pem" +# secret_key = "node.pem" +# ca_certificate = "ca_cert.pem" + +# Weights for impact estimation of incoming messages, used in combination with +# `max_incoming_message_rate_non_validators`. +# +# Any weight set to 0 means that the category of traffic is exempt from throttling. +[network.estimator_weights] +consensus = 0 +block_gossip = 1 +transaction_gossip = 0 +finality_signature_gossip = 1 +address_gossip = 0 +finality_signature_broadcasts = 0 +transaction_requests = 1 +transaction_responses = 0 +legacy_deploy_requests = 1 +legacy_deploy_responses = 0 +block_requests = 1 +block_responses = 0 +block_header_requests = 1 +block_header_responses = 0 +trie_requests = 1 +trie_responses = 0 +finality_signature_requests = 1 +finality_signature_responses = 0 +sync_leap_requests = 1 +sync_leap_responses = 0 +approvals_hashes_requests = 1 +approvals_hashes_responses = 0 +execution_results_requests = 1 +execution_results_responses = 0 + +# ================================================== +# Configuration options for the BinaryPort server +# ================================================== +[binary_port_server] + +# Flag which enables the BinaryPort server. +enable_server = true + +# Listening address for BinaryPort server. +address = '0.0.0.0:7779' + +# Flag that enables the `AllValues` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_all_values = true + +# Flag that enables the `Trie` get request. Disabled by default, because it can potentially be abused to retrieve huge amounts of data and clog the node. +allow_request_get_trie = false + +# Flag that enables the `TrySpeculativeExec` request. Disabled by default. +allow_request_speculative_exec = false + +# Maximum size of a message in bytes. +max_message_size_bytes = 134_217_728 + +# Maximum number of connections to the server. +max_connections = 5 + +# The global max rate of requests (per second) before they are limited. +# The implementation uses a sliding window algorithm. +qps_limit = 110 + +# Initial time given to a connection before it expires +initial_connection_lifetime = '10 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +# [`Command::Get(GetRequest::Record)`] is sent to the node +get_record_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Information)`] is sent to the node +get_information_request_termination_delay = '5 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::State)`] is sent to the node +get_state_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::Get(GetRequest::Trie)`] is sent to the node +get_trie_request_termination_delay = '0 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TryAcceptTransaction`] is sent to the node +accept_transaction_request_termination_delay = '24 seconds' + +#The amount of time which is given to a connection to extend it's lifetime when a valid +#[`Command::TrySpeculativeExec`] is sent to the node +speculative_exec_request_termination_delay = '0 seconds' + + +# ============================================== +# Configuration options for the REST HTTP server +# ============================================== +[rest_server] + +# Flag which enables the REST HTTP server. +enable_server = true + +# Listening address for REST HTTP server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the REST HTTP server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:8888' + +# The global max rate of requests (per second) before they are limited. +# Request will be delayed to the next 1 second bucket once limited. +qps_limit = 100 + +# Specifies which origin will be reported as allowed by REST server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + + +# ========================================================== +# Configuration options for the SSE HTTP event stream server +# ========================================================== +[event_stream_server] + +# Flag which enables the SSE HTTP event stream server. +enable_server = true + +# Listening address for SSE HTTP event stream server. If the port is set to 0, a random port will be used. +# +# If the specified port cannot be bound to, a random port will be tried instead. If binding fails, +# the SSE HTTP event stream server will not run, but the node will be otherwise unaffected. +# +# The actual bound address will be reported via a log line if logging is enabled. +address = '0.0.0.0:9999' + +# The number of event stream events to buffer. +event_stream_buffer_length = 5000 + +# The maximum number of subscribers across all event streams the server will permit at any one time. +max_concurrent_subscribers = 100 + +# Specifies which origin will be reported as allowed by event stream server. +# +# If left empty, CORS will be disabled. +# If set to '*', any origin is allowed. +# Otherwise, only a specified origin is allowed. The given string must conform to the [origin scheme](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin). +cors_origin = '' + +# =============================================== +# Configuration options for the storage component +# =============================================== +[storage] + +# Path (absolute, or relative to this config.toml) to the folder where any files created +# or read by the storage component will exist. A subfolder named with the network name will be +# automatically created and used for the storage component files. +# +# If the folder doesn't exist, it and any required parents will be created. +# +# If unset, the path must be supplied as an argument via the CLI. +path = '/var/lib/casper/casper-node' + +# Maximum size of the database to use for the block store. +# +# The size should be a multiple of the OS page size. +# +# 483_183_820_800 == 450 GiB. +max_block_store_size = 483_183_820_800 + +# Maximum size of the database to use for the deploy store. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_store_size = 322_122_547_200 + +# Maximum size of the database to use for the deploy metadata. +# +# The size should be a multiple of the OS page size. +# +# 322_122_547_200 == 300 GiB. +max_deploy_metadata_store_size = 322_122_547_200 + +# Maximum size of the database to use for the state snapshots. +# +# The size should be a multiple of the OS page size. +# +# 10_737_418_240 == 10 GiB. +max_state_store_size = 10_737_418_240 + +# Memory deduplication. +# +# If enabled, nodes will attempt to share loaded objects if possible. +enable_mem_deduplication = true + +# Memory duplication garbage collection. +# +# Sets the frequency how often the memory pool cache is swept for free references. +# For example, setting this value to 5 means that every 5th time something is put in the pool the cache is swept. +mem_pool_prune_interval = 4096 + + +# =================================== +# Configuration options for gossiping +# =================================== +[gossip] + +# Target number of peers to infect with a given piece of data. +infection_target = 3 + +# The saturation limit as a percentage, with a maximum value of 99. Used as a termination +# condition. +# +# Example: assume the `infection_target` is 3, the `saturation_limit_percent` is 80, and we don't +# manage to newly infect 3 peers. We will stop gossiping once we know of more than 15 holders +# excluding us since 80% saturation would imply 3 new infections in 15 peers. +saturation_limit_percent = 80 + +# The maximum duration for which to keep finished entries. +# +# The longer they are retained, the lower the likelihood of re-gossiping a piece of data. However, +# the longer they are retained, the larger the list of finished entries can grow. +finished_entry_duration = '1 minute' + +# The timeout duration for a single gossip request, i.e. for a single gossip message +# sent from this node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +gossip_request_timeout = '30 seconds' + +# The timeout duration for retrieving the remaining part(s) of newly-discovered data +# from a peer which gossiped information about that data to this node. +get_remainder_timeout = '5 seconds' + +# The timeout duration for a newly-received, gossiped item to be validated and stored by another +# component before the gossiper abandons waiting to gossip the item onwards. +validate_and_store_timeout = '1 minute' + + +# =============================================== +# Configuration options for the block accumulator +# =============================================== +[block_accumulator] + +# Block height difference threshold for starting to execute the blocks. +attempt_execution_threshold = 3 + +# Accepted time interval for inactivity in block accumulator. +dead_air_interval = '3 minutes' + +# Time after which the block acceptors are considered old and can be purged. +purge_interval = '1 minute' + + +# ================================================ +# Configuration options for the block synchronizer +# ================================================ +[block_synchronizer] + +# Maximum number of fetch-trie tasks to run in parallel during block synchronization. +max_parallel_trie_fetches = 5000 + +# Time interval for the node to ask for refreshed peers. +peer_refresh_interval = '90 seconds' + +# Time interval for the node to check what the block synchronizer needs to acquire next. +need_next_interval = '1 second' + +# Time interval for recurring disconnection of dishonest peers. +disconnect_dishonest_peers_interval = '10 seconds' + +# Time interval for resetting the latch in block builders. +latch_reset_interval = '5 seconds' + + +# ============================================= +# Configuration options for the block validator +# ============================================= +[block_validator] + +# Maximum number of completed entries to retain. +# +# A higher value can avoid creating needless validation work on an already-validated proposed +# block, but comes at the cost of increased memory consumption. +max_completed_entries = 3 + + +# ================================== +# Configuration options for fetchers +# ================================== +[fetcher] + +# The timeout duration for a single fetcher request, i.e. for a single fetcher message +# sent from this node to another node, it will be considered timed out if the expected response from that peer is +# not received within this specified duration. +get_from_peer_timeout = '10 seconds' + + +# ======================================================== +# Configuration options for the contract runtime component +# ======================================================== +[contract_runtime] + +# Optional maximum size of the database to use for the global state store. +# +# If unset, defaults to 805,306,368,000 == 750 GiB. +# +# The size should be a multiple of the OS page size. +max_global_state_size = 2_089_072_132_096 + +# Optional depth limit to use for global state queries. +# +# If unset, defaults to 5. +#max_query_depth = 5 + +# Enable manual synchronizing to disk. +# +# If unset, defaults to true. +#enable_manual_sync = true + + +# ================================================== +# Configuration options for the transaction acceptor +# ================================================== +[transaction_acceptor] + +# The leeway allowed when considering whether a transaction is future-dated or not. +# +# To accommodate minor clock drift, transactions whose timestamps are within `timestamp_leeway` in the +# future are still acceptable. +# +# The maximum value to which `timestamp_leeway` can be set is defined by the chainspec setting +# `transaction.max_timestamp_leeway`. +timestamp_leeway = '2 seconds' + + +# =========================================== +# Configuration options for the transaction buffer +# =========================================== +[transaction_buffer] + +# The interval of checking for expired transactions. +expiry_check_interval = '1 minute' + + +# ============================================== +# Configuration options for the diagnostics port +# ============================================== +[diagnostics_port] + +# If set, the diagnostics port will be available on a UNIX socket. +enabled = false + +# Filename for the UNIX domain socket the diagnostics port listens on. +socket_path = "debug.socket" + +# The umask to set before creating the socket. A restrictive mask like `0o077` will cause the +# socket to be only accessible by the user the node runs as. A more relaxed variant is `0o007`, +# which allows for group access as well. +socket_umask = 0o077 + + +# ============================================= +# Configuration options for the upgrade watcher +# ============================================= +[upgrade_watcher] + +# How often to scan file system for available upgrades. +upgrade_check_interval = '30 seconds' diff --git a/run-dev-tmux.sh b/run-dev-tmux.sh deleted file mode 100755 index 345174e6e2..0000000000 --- a/run-dev-tmux.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -tmux_new_window() { - local SESSION=${1} - local ID=${2} - local CMD=${3} - local NAME="${SESSION}-${ID}" - - tmux new-window -t "${SESSION}:${ID}" -n "${NAME}" - tmux send-keys -t "${NAME}" "${CMD}" C-m -} - -build_system_contracts() { - local CMD=( - "make -s" - "build-contracts-rs" - "CARGO_FLAGS=--quiet" - ) - - echo "Building system contracts..." - ${CMD[*]} -} - -build_node() { - local CMD=( - "cargo build" - "--quiet" - "--manifest-path=node/Cargo.toml" - ) - - echo "Building node..." - ${CMD[*]} -} - -generate_timestamp() { - local DELAY=${1} - - local SCRIPT=( - "from datetime import datetime, timedelta;" - "print((datetime.utcnow() + timedelta(seconds=${DELAY})).isoformat('T') + 'Z')" - ) - - python3 -c "${SCRIPT[*]}" -} - -generate_chainspec() { - local BASEDIR=${1} - local TIMESTAMP=${2} - local SOURCE="${BASEDIR}/resources/local/chainspec.toml.in" - local TARGET="${BASEDIR}/resources/local/chainspec.toml" - - export BASEDIR - export TIMESTAMP - - echo "Generating chainspec..." - envsubst < ${SOURCE} > ${TARGET} -} - -run_node() { - local EXECUTABLE=${1} - local SESSION=${2} - local ID=${3} - local CONFIG_DIR=${4} - local DATA_DIR=${5} - local CONFIG_TOML_PATH="${CONFIG_DIR}/config.toml" - local SECRET_KEY_PATH="${CONFIG_DIR}/secret_keys/node-${ID}.pem" - local STORAGE_DIR="${DATA_DIR}/node-${ID}-storage" - - local CMD=( - "${EXECUTABLE}" - "validator" - "${CONFIG_TOML_PATH}" - "-C consensus.secret_key_path=${SECRET_KEY_PATH}" - "-C storage.path=${STORAGE_DIR}" - "-C rpc_server.address='0.0.0.0:50101'" - ) - - if [[ ${ID} != 1 ]]; then - CMD+=("-C network.bind_address='0.0.0.0:0'") - CMD+=("-C rpc_server.address='0.0.0.0:0'") - CMD+=("-C rest_server.address='0.0.0.0:0'") - CMD+=("-C event_stream_server.address='0.0.0.0:0'") - fi - - CMD+=("1> >(tee ${DATA_DIR}/node-${ID}.log) 2> >(tee ${DATA_DIR}/node-${ID}.log.stderr)") - - mkdir -p "${STORAGE_DIR}" - tmux_new_window "${SESSION}" "${ID}" "${CMD[*]}" - echo "Booting node ${ID}..." -} - -check_for_bootstrap () { - local BOOTSTRAP_PORT=34553 - - while ! (: /dev/null; do - sleep 1 - done -} - -main() { - local DELAY=${1:-40} - local SESSION="${SESSION:-local}" - local TMPDIR="${TMPDIR:-$(mktemp -d)}" - local BASEDIR="$(readlink -f $(dirname ${0}))" - local EXECUTABLE="${BASEDIR}/target/debug/casper-node" - local CONFIG_DIR="${BASEDIR}/resources/local" - local TIMESTAMP="$(generate_timestamp ${DELAY})" - local RUST_LOG="${RUST_LOG:-debug}" - - export TMPDIR - export RUST_LOG - - build_system_contracts - - build_node - - generate_chainspec ${BASEDIR} ${TIMESTAMP} - - tmux new-session -d -s ${SESSION} - - local ID=1 - run_node ${EXECUTABLE} ${SESSION} ${ID} ${CONFIG_DIR} ${TMPDIR} - - for ID in {2..5}; do - check_for_bootstrap - run_node ${EXECUTABLE} ${SESSION} ${ID} ${CONFIG_DIR} ${TMPDIR} - done - - echo - echo "DELAY : ${DELAY}" - echo "TMPDIR : ${TMPDIR}" - echo "TIMESTAMP : ${TIMESTAMP}" - echo "RUST_LOG : ${RUST_LOG}" - echo - echo "To view: " - echo " tmux attach -t ${SESSION}" - echo - echo "To kill: " - echo " tmux kill-session -t ${SESSION}" - echo -} - -main ${@} diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 714051aa53..0000000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -nightly-2020-12-16 diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000000..00822fdf58 --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "1.85.1" diff --git a/rustfmt.toml b/rustfmt.toml index f2120d3c0e..b16fb7d514 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1,4 @@ wrap_comments = true comment_width = 100 -merge_imports = true -edition = "2018" +imports_granularity = "Crate" +edition = "2021" diff --git a/shell.nix b/shell.nix deleted file mode 100644 index b37b55f5ae..0000000000 --- a/shell.nix +++ /dev/null @@ -1,86 +0,0 @@ -# This is an unsupported development environment, instantly enabling `make build -# build-contracts-rs`. There is no official support for this nix derivation. -# -# Do not use this, but follow instructions in the `README.md` instead. -# -# `ops`: enables support for running `casper-tool` and various experimental cluster-based -# testing utilities. -# `dev`: enables tooling useful for development, but not strictly necessary to run/build - -{ pkgs ? (import ) { }, ops ? true, dev ? true }: -with pkgs.lib; -let - # `volatile` is not packaged in nix. - volatile = pkgs.python38Packages.buildPythonPackage rec { - pname = "volatile"; - version = "2.1.0"; - src = pkgs.python38Packages.fetchPypi { - inherit pname version; - sha256 = "1lri7a6pmlx9ghbrsgd702c3n862glwy0p8idh0lwdg313anmqwv"; - }; - doCheck = false; - }; - python = pkgs.python3.withPackages (python-packages: - with python-packages; - [ click ] ++ lists.optionals ops [ kubernetes volatile ] - ++ lists.optionals dev [ prometheus_client psutil supervisor toml ]); - run-nctl = pkgs.writeScriptBin "nctl" '' - #!${pkgs.bash}/bin/bash - COMMAND_LINE="nctl-$@" - - shopt -s expand_aliases - source ''${CASPER_ROOT}/utils/nctl/activate - - eval ''${COMMAND_LINE} - ''; -in pkgs.stdenv.mkDerivation { - name = "rustenv"; - nativeBuildInputs = with pkgs; [ pkg-config perl which protobuf ]; - buildInputs = with pkgs; - [ cmake pkg-config openssl.dev zlib.dev rustup envsubst ] - ++ lists.optionals ops [ kubectl python skopeo git nix ] - ++ lists.optionals dev [ black docker coreutils run-nctl ]; - - # Enable SSL support in pure shells - SSL_CERT_FILE = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"; - NIX_SSL_CERT_FILE = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"; - - # `protoc` is required but not found by the `prost` crate, unless this envvar is set - PROTOC = "${pkgs.protobuf}/bin/protoc"; - - # The shell hook provides a predefined environment with kubectl and nctl setup, if `ops` and `dev` - # respectively are enabled. - shellHook = let - devS = boolToString dev; - opsS = boolToString ops; - in '' - NCTL_ACTIVATE="utils/nctl/activate" - - if [ ${opsS} = "true" ] && [ -e nix/k3s.yaml ]; then - echo "Found k3s.yaml in nix folder, setting KUBECONFIG envvar."; - export KUBECONFIG=$(pwd)/k3s.yaml - fi; - - if [ ${devS} = "true" ]; then - if [ -f "''${NCTL_ACTIVATE}" ]; then - echo "Sourcing ''${NCTL_ACTIVATE}." - source ''${NCTL_ACTIVATE} - else - echo "Warning: ''${NCTL_ACTIVATE} not found." - fi; - fi; - - export PS1="\n\[\033[1;32m\][casper-sh:\w]\$\[\033[0m\] "; - - if [ $(pwd | wc -c) -gt 50 ]; then - echo "" - echo "WARNING" - echo "The current path $(pwd) is very long. This will cause issues with UNIX sockets" - echo "(see https://stackoverflow.com/questions/34829600/why-is-the-maximal-path-length-allowed-for-unix-sockets-on-linux-108)." - echo - echo "Consider moving or symlinking this directory." - fi - - export CASPER_ROOT=$(pwd) - ''; -} diff --git a/smart_contracts/contract/CHANGELOG.md b/smart_contracts/contract/CHANGELOG.md new file mode 100644 index 0000000000..366e14313b --- /dev/null +++ b/smart_contracts/contract/CHANGELOG.md @@ -0,0 +1,109 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## 4.0.0 + +### Added +* Add `storage::enable_contract_version` for enabling a specific version of a contract. + + + +## 3.0.0 + +### Added +* Support fetching the calling account's authorization keys via the new function `runtime::list_authorization_keys` which calls the new `ext_ffi::casper_load_authorization_keys`. +* Support providing 32 random bytes via the new function `runtime::random_bytes` which calls the new `ext_ffi::casper_random_bytes`. +* Add `storage::read_from_key` for reading a value under a given `Key`. +* Add `storage::dictionary_read` for reading a value from a dictionary under a given `Key`, calling the new `ext_ffi::casper_dictionary_read`. +* Add `storage::named_dictionary_put` for writing a named value to a named dictionary. +* Add `storage::named_dictionary_get` for reading a named value from a named dictionary. + +### Changed +* Update pinned version of Rust to `nightly-2022-08-03`. +* Increase `DICTIONARY_ITEM_KEY_MAX_LENGTH` to 128. + + + +## 2.0.0 + +### Changed +* Update `casper-types` to v2.0.0 due to additional `Key` variant, requiring a major version bump here. + + + +## 1.4.4 + +### Changed +* Minor refactor of `system::create_purse()`. + + + +## [1.4.0] - 2021-10-04 + +### Added +* Add `no-std-helpers` feature, enabled by default, which provides no-std panic/oom handlers and a global allocator as a convenience. +* Add new APIs for transferring tokens to the main purse associated with a public key: `transfer_to_public_key` and `transfer_from_purse_to_public_key`. + +### Deprecated +* Feature `std` is deprecated as it is now a no-op, since there is no benefit to linking the std lib via this crate. + + + +## [1.3.0] - 2021-07-19 + +### Changed +* Update pinned version of Rust to `nightly-2021-06-17`. + + + +## [1.2.0] - 2021-05-28 + +### Changed +* Change to Apache 2.0 license. + + + +## [1.1.1] - 2021-04-19 + +No changes. + + + +## [1.1.0] - 2021-04-13 [YANKED] + +No changes. + + + +## [1.0.1] - 2021-04-08 + +No changes. + + + +## [1.0.0] - 2021-03-30 + +### Added +* Initial release of smart contract API compatible with Casper mainnet. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/v1.4.0...dev +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 diff --git a/smart_contracts/contract/Cargo.toml b/smart_contracts/contract/Cargo.toml index a771506dc9..c2913e70ca 100644 --- a/smart_contracts/contract/Cargo.toml +++ b/smart_contracts/contract/Cargo.toml @@ -1,23 +1,28 @@ [package] name = "casper-contract" -version = "1.0.0" # when updating, also update 'html_root_url' in lib.rs -authors = ["Michael Birch ", "Mateusz Górski "] -edition = "2018" -description = "Library for developing Casper smart contracts." +version = "5.1.1" # when updating, also update 'html_root_url' in lib.rs +authors = ["Michał Papierski ", "Mateusz Górski "] +edition = "2021" +description = "A library for developing Casper network smart contracts." readme = "README.md" documentation = "https://docs.rs/casper-contract" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/smart_contracts/contract" -license-file = "../../LICENSE" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/master/smart_contracts/contract" +license = "Apache-2.0" [dependencies] -casper-types = { version = "1.0.0", path = "../../types" } +casper-types = { version = "6.0.1", path = "../../types" } hex_fmt = "0.3.0" -thiserror = "1.0.18" version-sync = { version = "0.9", optional = true } -wee_alloc = "0.4.5" +wee_alloc = { version = "0.4.5", optional = true } [features] -default = [] -std = ["casper-types/std", "version-sync"] +default = ["no-std-helpers"] +no-std-helpers = ["wee_alloc"] test-support = [] +# DEPRECATED - enabling `std` has no effect. +std = [] + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/smart_contracts/contract/README.md b/smart_contracts/contract/README.md index dddc35dbc9..5542dcd661 100644 --- a/smart_contracts/contract/README.md +++ b/smart_contracts/contract/README.md @@ -1,14 +1,51 @@ # `casper-contract` -[![LOGO](../../images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) [![Crates.io](https://img.shields.io/crates/v/casper-contract)](https://crates.io/crates/casper-contract) [![Documentation](https://docs.rs/casper-contract/badge.svg)](https://docs.rs/casper-contract) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](../../LICENSE) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) -A library for developing CasperLabs smart contracts. +A library for developing Casper network smart contracts. + +## no_std + +The crate is `no_std`, but uses the `core` and `alloc` crates. It is recommended to build Wasm smart contracts in a +`no_std` environment as this generally yields smaller, and hence cheaper, binaries. + +## Compile-time features + +### `no-std-helpers` + +Enabled by default. + +Given that the library is intended to be consumed by smart-contract binaries, and that in a `no_std` environment these +will all require to provide an [alloc error handler](https://github.com/rust-lang/rust/issues/51540) and an +[eh_personality](https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#more-about-the-language-items), +then this crate provides these when `no-std-helpers` is enabled. This unfortunately requires the use of nightly Rust. + +For further convenience, enabling this feature also provides a global allocator suitable for use in a `no_std` +environment. + +If you wish to use a different global allocator, or provide different panic/out-of-memory handlers, then add the +following to your Cargo.toml: + +```toml +casper-contract = { version = "1", default-features = false } +``` + +### `test-support` + +Disabled by default. + +To help support smart contract debugging, enabling the `test-support` feature makes the function +`contract_api::runtime::print(text: &str)` available. If the contract is being tested offchain using the +`casper-engine-test-support` crate, then the contract can output text to the console for debugging. + +```toml +casper-contract = { version = "1", features = ["test-support"] } +``` ## License -Licensed under the [CasperLabs Open Source License (COSL)](../../LICENSE). +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/smart_contracts/contract/src/contract_api/account.rs b/smart_contracts/contract/src/contract_api/account.rs index 20da9a0c07..e4e740782e 100644 --- a/smart_contracts/contract/src/contract_api/account.rs +++ b/smart_contracts/contract/src/contract_api/account.rs @@ -15,7 +15,6 @@ use super::to_ptr; use crate::{contract_api, ext_ffi, unwrap_or_revert::UnwrapOrRevert}; /// Retrieves the ID of the account's main purse. -#[doc(hidden)] pub fn get_main_purse() -> URef { let dest_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH); let bytes = unsafe { diff --git a/smart_contracts/contract/src/contract_api/cryptography.rs b/smart_contracts/contract/src/contract_api/cryptography.rs new file mode 100644 index 0000000000..a05fd2e699 --- /dev/null +++ b/smart_contracts/contract/src/contract_api/cryptography.rs @@ -0,0 +1,74 @@ +//! Functions with cryptographic utils. + +use casper_types::{ + api_error, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + ApiError, HashAlgorithm, PublicKey, Signature, BLAKE2B_DIGEST_LENGTH, +}; + +use crate::{ext_ffi, unwrap_or_revert::UnwrapOrRevert}; + +/// Computes digest hash, using provided algorithm type. +pub fn generic_hash>(input: T, algo: HashAlgorithm) -> [u8; 32] { + let mut ret = [0; 32]; + + let result = unsafe { + ext_ffi::casper_generic_hash( + input.as_ref().as_ptr(), + input.as_ref().len(), + algo as u8, + ret.as_mut_ptr(), + BLAKE2B_DIGEST_LENGTH, + ) + }; + api_error::result_from(result).unwrap_or_revert(); + ret +} + +/// Attempts to recover a Secp256k1 [`PublicKey`] from a message and a signature over it. +pub fn recover_secp256k1>( + data: T, + signature: &Signature, + recovery_id: u8, +) -> Result { + let mut buffer = [0; U8_SERIALIZED_LENGTH + PublicKey::SECP256K1_LENGTH]; + let signature_bytes = signature.to_bytes().unwrap_or_revert(); + + let result = unsafe { + ext_ffi::casper_recover_secp256k1( + data.as_ref().as_ptr(), + data.as_ref().len(), + signature_bytes.as_ptr(), + signature_bytes.len(), + buffer.as_mut_ptr(), + recovery_id, + ) + }; + + PublicKey::from_bytes(&buffer) + .map(|(key, _)| key) + .map_err(|_| ApiError::from(result as u32)) +} + +/// Verifies the signature of the given message against the given public key. +pub fn verify_signature>( + data: T, + signature: &Signature, + public_key: &PublicKey, +) -> Result<(), ApiError> { + let signature_bytes = signature.to_bytes().unwrap_or_revert(); + let public_key_bytes = public_key.to_bytes().unwrap_or_revert(); + + let result = unsafe { + ext_ffi::casper_verify_signature( + data.as_ref().as_ptr(), + data.as_ref().len(), + signature_bytes.as_ptr(), + signature_bytes.len(), + public_key_bytes.as_ptr(), + public_key_bytes.len(), + ) + }; + + api_error::result_from(result) +} diff --git a/smart_contracts/contract/src/contract_api/entity.rs b/smart_contracts/contract/src/contract_api/entity.rs new file mode 100644 index 0000000000..255bf829ee --- /dev/null +++ b/smart_contracts/contract/src/contract_api/entity.rs @@ -0,0 +1,95 @@ +//! Functions for managing accounts. + +use alloc::vec::Vec; +use core::convert::TryFrom; + +use casper_types::{ + account::{ + AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure, + }, + addressable_entity::{ActionType, Weight}, + bytesrepr, URef, UREF_SERIALIZED_LENGTH, +}; + +use super::to_ptr; +use crate::{contract_api, ext_ffi, unwrap_or_revert::UnwrapOrRevert}; + +/// Retrieves the ID of the account's main purse. +pub fn get_main_purse() -> URef { + let dest_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH); + let bytes = unsafe { + ext_ffi::casper_get_main_purse(dest_non_null_ptr.as_ptr()); + Vec::from_raw_parts( + dest_non_null_ptr.as_ptr(), + UREF_SERIALIZED_LENGTH, + UREF_SERIALIZED_LENGTH, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + +/// Sets the given [`ActionType`]'s threshold to the provided value. +pub fn set_action_threshold( + action_type: ActionType, + threshold: Weight, +) -> Result<(), SetThresholdFailure> { + let action_type = action_type as u32; + let threshold = threshold.value().into(); + let result = unsafe { ext_ffi::casper_set_action_threshold(action_type, threshold) }; + if result == 0 { + Ok(()) + } else { + Err(SetThresholdFailure::try_from(result).unwrap_or_revert()) + } +} + +/// Adds the given [`AccountHash`] with associated [`Weight`] to the account's associated keys. +pub fn add_associated_key(account_hash: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { + let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash); + // Cast of u8 (weight) into i32 is assumed to be always safe + let result = unsafe { + ext_ffi::casper_add_associated_key( + account_hash_ptr, + account_hash_size, + weight.value().into(), + ) + }; + if result == 0 { + Ok(()) + } else { + Err(AddKeyFailure::try_from(result).unwrap_or_revert()) + } +} + +/// Removes the given [`AccountHash`] from the account's associated keys. +pub fn remove_associated_key(account_hash: AccountHash) -> Result<(), RemoveKeyFailure> { + let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash); + let result = + unsafe { ext_ffi::casper_remove_associated_key(account_hash_ptr, account_hash_size) }; + if result == 0 { + Ok(()) + } else { + Err(RemoveKeyFailure::try_from(result).unwrap_or_revert()) + } +} + +/// Updates the [`Weight`] of the given [`AccountHash`] in the account's associated keys. +pub fn update_associated_key( + account_hash: AccountHash, + weight: Weight, +) -> Result<(), UpdateKeyFailure> { + let (account_hash_ptr, account_hash_size, _bytes) = to_ptr(account_hash); + // Cast of u8 (weight) into i32 is assumed to be always safe + let result = unsafe { + ext_ffi::casper_update_associated_key( + account_hash_ptr, + account_hash_size, + weight.value().into(), + ) + }; + if result == 0 { + Ok(()) + } else { + Err(UpdateKeyFailure::try_from(result).unwrap_or_revert()) + } +} diff --git a/smart_contracts/contract/src/contract_api/mod.rs b/smart_contracts/contract/src/contract_api/mod.rs index 7213db0e2b..26e3a3aec0 100644 --- a/smart_contracts/contract/src/contract_api/mod.rs +++ b/smart_contracts/contract/src/contract_api/mod.rs @@ -1,6 +1,8 @@ //! Contains support for writing smart contracts. pub mod account; +pub mod cryptography; +pub mod entity; pub mod runtime; pub mod storage; pub mod system; @@ -17,7 +19,7 @@ use crate::unwrap_or_revert::UnwrapOrRevert; /// Calculates size and alignment for an array of T. const fn size_align_for_array(n: usize) -> (usize, usize) { - (n * mem::size_of::(), mem::align_of::()) + (n * size_of::(), mem::align_of::()) } /// Allocates bytes @@ -40,3 +42,10 @@ fn to_ptr(t: T) -> (*const u8, usize, Vec) { let size = bytes.len(); (ptr, size, bytes) } + +fn dictionary_item_key_to_ptr(dictionary_item_key: &str) -> (*const u8, usize) { + let bytes = dictionary_item_key.as_bytes(); + let ptr = bytes.as_ptr(); + let size = bytes.len(); + (ptr, size) +} diff --git a/smart_contracts/contract/src/contract_api/runtime.rs b/smart_contracts/contract/src/contract_api/runtime.rs index 4338814375..9e10562580 100644 --- a/smart_contracts/contract/src/contract_api/runtime.rs +++ b/smart_contracts/contract/src/contract_api/runtime.rs @@ -1,22 +1,34 @@ //! Functions for interacting with the current runtime. -// Can be removed once https://github.com/rust-lang/rustfmt/issues/3362 is resolved. -#[rustfmt::skip] -use alloc::vec; -use alloc::vec::Vec; +use alloc::{collections::BTreeSet, vec, vec::Vec}; use core::mem::MaybeUninit; use casper_types::{ account::AccountHash, api_error, - bytesrepr::{self, FromBytes}, - contracts::{ContractVersion, NamedKeys}, - ApiError, BlockTime, CLTyped, CLValue, ContractHash, ContractPackageHash, Key, Phase, - RuntimeArgs, URef, BLAKE2B_DIGEST_LENGTH, BLOCKTIME_SERIALIZED_LENGTH, PHASE_SERIALIZED_LENGTH, + bytesrepr::{self, FromBytes, U64_SERIALIZED_LENGTH}, + contract_messages::{MessagePayload, MessageTopicOperation}, + contracts::{ContractHash, ContractPackageHash, ContractVersion, NamedKeys}, + system::CallerInfo, + ApiError, BlockTime, CLTyped, CLValue, Digest, EntityVersion, HashAlgorithm, Key, Phase, + ProtocolVersion, RuntimeArgs, URef, BLAKE2B_DIGEST_LENGTH, BLOCKTIME_SERIALIZED_LENGTH, + PHASE_SERIALIZED_LENGTH, }; use crate::{contract_api, ext_ffi, unwrap_or_revert::UnwrapOrRevert}; +/// Number of random bytes returned from the `random_bytes()` function. +const RANDOM_BYTES_COUNT: usize = 32; + +const ACCOUNT: u8 = 0; + +#[repr(u8)] +enum CallerIndex { + Initiator = 0, + Immediate = 1, + FullStack = 2, +} + /// Returns the given [`CLValue`] to the host, terminating the currently running module. /// /// Note this function is only relevant to contracts stored on chain which are invoked via @@ -52,7 +64,7 @@ pub fn call_contract( let (contract_hash_ptr, contract_hash_size, _bytes1) = contract_api::to_ptr(contract_hash); let (entry_point_name_ptr, entry_point_name_size, _bytes2) = contract_api::to_ptr(entry_point_name); - let (runtime_args_ptr, runtime_args_size, _bytes2) = contract_api::to_ptr(runtime_args); + let (runtime_args_ptr, runtime_args_size, _bytes3) = contract_api::to_ptr(runtime_args); let bytes_written = { let mut bytes_written = MaybeUninit::uninit(); @@ -86,13 +98,13 @@ pub fn call_versioned_contract( entry_point_name: &str, runtime_args: RuntimeArgs, ) -> T { - let (contract_package_hash_ptr, contract_package_hash_size, _bytes) = + let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) = contract_api::to_ptr(contract_package_hash); - let (contract_version_ptr, contract_version_size, _bytes) = + let (contract_version_ptr, contract_version_size, _bytes2) = contract_api::to_ptr(contract_version); - let (entry_point_name_ptr, entry_point_name_size, _bytes) = + let (entry_point_name_ptr, entry_point_name_size, _bytes3) = contract_api::to_ptr(entry_point_name); - let (runtime_args_ptr, runtime_args_size, _bytes) = contract_api::to_ptr(runtime_args); + let (runtime_args_ptr, runtime_args_size, _bytes4) = contract_api::to_ptr(runtime_args); let bytes_written = { let mut bytes_written = MaybeUninit::uninit(); @@ -115,6 +127,52 @@ pub fn call_versioned_contract( deserialize_contract_result(bytes_written) } +/// Invokes the specified `entry_point_name` of stored logic at a specific `contract_package_hash` +/// address, for a specific pair of `major_version` and `contract_version` +/// and passing the provided `runtime_args` to it +/// +/// If the stored contract calls [`ret`], then that value is returned from +/// `call_package_version`. If the stored contract calls [`revert`], then execution stops and +/// `call_package_version` doesn't return. Otherwise `call_package_version` returns `()`. +pub fn call_package_version( + contract_package_hash: ContractPackageHash, + major_version: Option, + contract_version: Option, + entry_point_name: &str, + runtime_args: RuntimeArgs, +) -> T { + let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) = + contract_api::to_ptr(contract_package_hash); + let (major_version_ptr, major_version_size, _bytes_5) = contract_api::to_ptr(major_version); + let (contract_version_ptr, contract_version_size, _bytes2) = + contract_api::to_ptr(contract_version); + let (entry_point_name_ptr, entry_point_name_size, _bytes3) = + contract_api::to_ptr(entry_point_name); + let (runtime_args_ptr, runtime_args_size, _bytes4) = contract_api::to_ptr(runtime_args); + + let bytes_written = { + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_call_package_version( + contract_package_hash_ptr, + contract_package_hash_size, + major_version_ptr, + major_version_size, + contract_version_ptr, + contract_version_size, + entry_point_name_ptr, + entry_point_name_size, + runtime_args_ptr, + runtime_args_size, + bytes_written.as_mut_ptr(), + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + unsafe { bytes_written.assume_init() } + }; + deserialize_contract_result(bytes_written) +} + fn deserialize_contract_result(bytes_written: usize) -> T { let serialized_result = if bytes_written == 0 { // If no bytes were written, the host buffer hasn't been set and hence shouldn't be read. @@ -133,6 +191,11 @@ fn deserialize_contract_result(bytes_written: usize) -> bytesrepr::deserialize(serialized_result).unwrap_or_revert() } +/// Returns size in bytes of a given named argument passed to the host for the current module +/// invocation. +/// +/// This will return either Some with the size of argument if present, or None if given argument is +/// not passed. fn get_named_arg_size(name: &str) -> Option { let mut arg_size: usize = 0; let ret = unsafe { @@ -179,6 +242,37 @@ pub fn get_named_arg(name: &str) -> T { bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument) } +/// Returns given named argument passed to the host for the current module invocation. +/// If the argument is not found, returns `None`. +/// +/// Note that this is only relevant to contracts stored on-chain since a contract deployed directly +/// is not invoked with any arguments. +pub fn try_get_named_arg(name: &str) -> Option { + let arg_size = get_named_arg_size(name)?; + let arg_bytes = if arg_size > 0 { + let res = { + let data_non_null_ptr = contract_api::alloc_bytes(arg_size); + let ret = unsafe { + ext_ffi::casper_get_named_arg( + name.as_bytes().as_ptr(), + name.len(), + data_non_null_ptr.as_ptr(), + arg_size, + ) + }; + let data = + unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) }; + api_error::result_from(ret).map(|_| data) + }; + // Assumed to be safe as `get_named_arg_size` checks the argument already + res.unwrap_or_revert() + } else { + // Avoids allocation with 0 bytes and a call to get_named_arg + Vec::new() + }; + bytesrepr::deserialize(arg_bytes).ok() +} + /// Returns the caller of the current context, i.e. the [`AccountHash`] of the account which made /// the deploy request. pub fn get_caller() -> AccountHash { @@ -206,6 +300,95 @@ pub fn get_blocktime() -> BlockTime { bytesrepr::deserialize(bytes).unwrap_or_revert() } +/// The default length of hashes such as account hash, state hash, hash addresses, etc. +pub const DEFAULT_HASH_LENGTH: u8 = 32; +/// The default size of ProtocolVersion. It's 3×u32 (major, minor, patch), so 12 bytes. +pub const PROTOCOL_VERSION_LENGTH: u8 = 12; +///The default size of the addressable entity flag. +pub const ADDRESSABLE_ENTITY_LENGTH: u8 = 1; +/// Index for the block time field of block info. +pub const BLOCK_TIME_FIELD_IDX: u8 = 0; +/// Index for the block height field of block info. +pub const BLOCK_HEIGHT_FIELD_IDX: u8 = 1; +/// Index for the parent block hash field of block info. +pub const PARENT_BLOCK_HASH_FIELD_IDX: u8 = 2; +/// Index for the state hash field of block info. +pub const STATE_HASH_FIELD_IDX: u8 = 3; +/// Index for the protocol version field of block info. +pub const PROTOCOL_VERSION_FIELD_IDX: u8 = 4; +/// Index for the addressable entity field of block info. +pub const ADDRESSABLE_ENTITY_FIELD_IDX: u8 = 5; + +/// Returns the block height. +pub fn get_block_height() -> u64 { + let dest_non_null_ptr = contract_api::alloc_bytes(U64_SERIALIZED_LENGTH); + let bytes = unsafe { + ext_ffi::casper_get_block_info(BLOCK_HEIGHT_FIELD_IDX, dest_non_null_ptr.as_ptr()); + Vec::from_raw_parts( + dest_non_null_ptr.as_ptr(), + U64_SERIALIZED_LENGTH, + U64_SERIALIZED_LENGTH, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + +/// Returns the parent block hash. +pub fn get_parent_block_hash() -> Digest { + let dest_non_null_ptr = contract_api::alloc_bytes(DEFAULT_HASH_LENGTH as usize); + let bytes = unsafe { + ext_ffi::casper_get_block_info(PARENT_BLOCK_HASH_FIELD_IDX, dest_non_null_ptr.as_ptr()); + Vec::from_raw_parts( + dest_non_null_ptr.as_ptr(), + DEFAULT_HASH_LENGTH as usize, + DEFAULT_HASH_LENGTH as usize, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + +/// Returns the state root hash. +pub fn get_state_hash() -> Digest { + let dest_non_null_ptr = contract_api::alloc_bytes(DEFAULT_HASH_LENGTH as usize); + let bytes = unsafe { + ext_ffi::casper_get_block_info(STATE_HASH_FIELD_IDX, dest_non_null_ptr.as_ptr()); + Vec::from_raw_parts( + dest_non_null_ptr.as_ptr(), + DEFAULT_HASH_LENGTH as usize, + DEFAULT_HASH_LENGTH as usize, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + +/// Returns the protocol version. +pub fn get_protocol_version() -> ProtocolVersion { + let dest_non_null_ptr = contract_api::alloc_bytes(PROTOCOL_VERSION_LENGTH as usize); + let bytes = unsafe { + ext_ffi::casper_get_block_info(PROTOCOL_VERSION_FIELD_IDX, dest_non_null_ptr.as_ptr()); + Vec::from_raw_parts( + dest_non_null_ptr.as_ptr(), + PROTOCOL_VERSION_LENGTH as usize, + PROTOCOL_VERSION_LENGTH as usize, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + +/// Returns whether or not the addressable entity is turned on. +pub fn get_addressable_entity() -> bool { + let dest_non_null_ptr = contract_api::alloc_bytes(ADDRESSABLE_ENTITY_LENGTH as usize); + let bytes = unsafe { + ext_ffi::casper_get_block_info(ADDRESSABLE_ENTITY_FIELD_IDX, dest_non_null_ptr.as_ptr()); + Vec::from_raw_parts( + dest_non_null_ptr.as_ptr(), + ADDRESSABLE_ENTITY_LENGTH as usize, + ADDRESSABLE_ENTITY_LENGTH as usize, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + /// Returns the current [`Phase`]. pub fn get_phase() -> Phase { let dest_non_null_ptr = contract_api::alloc_bytes(PHASE_SERIALIZED_LENGTH); @@ -276,6 +459,31 @@ pub fn remove_key(name: &str) { unsafe { ext_ffi::casper_remove_key(name_ptr, name_size) } } +/// Returns the set of [`AccountHash`] from the calling account's context `authorization_keys`. +pub fn list_authorization_keys() -> BTreeSet { + let (total_authorization_keys, result_size) = { + let mut authorization_keys = MaybeUninit::uninit(); + let mut result_size = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_load_authorization_keys( + authorization_keys.as_mut_ptr(), + result_size.as_mut_ptr(), + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + let total_authorization_keys = unsafe { authorization_keys.assume_init() }; + let result_size = unsafe { result_size.assume_init() }; + (total_authorization_keys, result_size) + }; + + if total_authorization_keys == 0 { + return BTreeSet::new(); + } + + let bytes = read_host_buffer(result_size).unwrap_or_revert(); + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + /// Returns the named keys of the current context. /// /// The current context is either the caller's account or a stored contract depending on whether the @@ -309,9 +517,10 @@ pub fn is_valid_uref(uref: URef) -> bool { pub fn blake2b>(input: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { let mut ret = [0; BLAKE2B_DIGEST_LENGTH]; let result = unsafe { - ext_ffi::casper_blake2b( + ext_ffi::casper_generic_hash( input.as_ref().as_ptr(), input.as_ref().len(), + HashAlgorithm::Blake2b as u8, ret.as_mut_ptr(), BLAKE2B_DIGEST_LENGTH, ) @@ -320,6 +529,14 @@ pub fn blake2b>(input: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { ret } +/// Returns 32 pseudo random bytes. +pub fn random_bytes() -> [u8; RANDOM_BYTES_COUNT] { + let mut ret = [0; RANDOM_BYTES_COUNT]; + let result = unsafe { ext_ffi::casper_random_bytes(ret.as_mut_ptr(), RANDOM_BYTES_COUNT) }; + api_error::result_from(result).unwrap_or_revert(); + ret +} + fn read_host_buffer_into(dest: &mut [u8]) -> Result { let mut bytes_written = MaybeUninit::uninit(); let ret = unsafe { @@ -343,6 +560,120 @@ pub(crate) fn read_host_buffer(size: usize) -> Result, ApiError> { Ok(dest) } +/// Returns the call stack. +pub fn get_call_stack() -> Vec { + let (call_stack_len, result_size) = { + let mut call_stack_len: usize = 0; + let mut result_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_load_caller_information( + CallerIndex::FullStack as u8, + &mut call_stack_len as *mut usize, + &mut result_size as *mut usize, + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + (call_stack_len, result_size) + }; + if call_stack_len == 0 { + return Vec::new(); + } + let bytes = read_host_buffer(result_size).unwrap_or_revert(); + bytesrepr::deserialize(bytes).unwrap_or_revert() +} + +fn get_initiator_or_immediate(action: u8) -> Result { + let (call_stack_len, result_size) = { + let mut call_stack_len: usize = 0; + let mut result_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_load_caller_information( + action, + &mut call_stack_len as *mut usize, + &mut result_size as *mut usize, + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + (call_stack_len, result_size) + }; + if call_stack_len == 0 { + return Err(ApiError::InvalidCallerInfoRequest); + } + let bytes = read_host_buffer(result_size).unwrap_or_revert(); + let caller: Vec = bytesrepr::deserialize(bytes).unwrap_or_revert(); + + if caller.len() != 1 { + return Err(ApiError::Unhandled); + }; + let first = caller.first().unwrap_or_revert().clone(); + Ok(first) +} + +/// Returns the call stack initiator +pub fn get_call_initiator() -> Result { + let caller = get_initiator_or_immediate(CallerIndex::Initiator as u8)?; + if caller.kind() != ACCOUNT { + return Err(ApiError::Unhandled); + }; + if let Some(cl_value) = caller.get_field_by_index(ACCOUNT) { + let maybe_account_hash = cl_value + .to_t::>() + .map_err(|_| ApiError::CLTypeMismatch)?; + match maybe_account_hash { + Some(hash) => Ok(hash), + None => Err(ApiError::None), + } + } else { + Err(ApiError::PurseNotCreated) + } +} + +/// Returns the immidiate caller within the call stack. +pub fn get_immediate_caller() -> Result { + get_initiator_or_immediate(CallerIndex::Immediate as u8) +} + +/// Manages a message topic. +pub fn manage_message_topic( + topic_name: &str, + operation: MessageTopicOperation, +) -> Result<(), ApiError> { + if topic_name.is_empty() { + return Err(ApiError::InvalidArgument); + } + + let (operation_ptr, operation_size, _bytes) = contract_api::to_ptr(operation); + let result = unsafe { + ext_ffi::casper_manage_message_topic( + topic_name.as_ptr(), + topic_name.len(), + operation_ptr, + operation_size, + ) + }; + api_error::result_from(result) +} + +/// Emits a message on a topic. +pub fn emit_message(topic_name: &str, message: &MessagePayload) -> Result<(), ApiError> { + if topic_name.is_empty() { + return Err(ApiError::InvalidArgument); + } + + let (message_ptr, message_size, _bytes) = contract_api::to_ptr(message); + + let result = unsafe { + ext_ffi::casper_emit_message( + topic_name.as_ptr(), + topic_name.len(), + message_ptr, + message_size, + ) + }; + + api_error::result_from(result) +} + #[cfg(feature = "test-support")] /// Prints a debug message pub fn print(text: &str) { diff --git a/smart_contracts/contract/src/contract_api/storage.rs b/smart_contracts/contract/src/contract_api/storage.rs index 66dd3175b4..b63f2a8a0c 100644 --- a/smart_contracts/contract/src/contract_api/storage.rs +++ b/smart_contracts/contract/src/contract_api/storage.rs @@ -1,14 +1,22 @@ //! Functions for accessing and mutating local and global state. -use alloc::{collections::BTreeSet, string::String, vec, vec::Vec}; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::String, + vec, + vec::Vec, +}; use core::{convert::From, mem::MaybeUninit}; use casper_types::{ + addressable_entity::EntryPoints, api_error, bytesrepr::{self, FromBytes, ToBytes}, - contracts::{ContractVersion, EntryPoints, NamedKeys}, - AccessRights, ApiError, CLTyped, CLValue, ContractHash, ContractPackageHash, HashAddr, Key, - URef, UREF_SERIALIZED_LENGTH, + contract_messages::MessageTopicOperation, + contracts::{ContractHash, ContractPackageHash, ContractVersion, NamedKeys}, + AccessRights, ApiError, CLTyped, CLValue, EntityVersion, HashAddr, Key, URef, + DICTIONARY_ITEM_KEY_MAX_LENGTH, UREF_SERIALIZED_LENGTH, }; use crate::{ @@ -20,6 +28,11 @@ use crate::{ /// Reads value under `uref` in the global state. pub fn read(uref: URef) -> Result, bytesrepr::Error> { let key: Key = uref.into(); + read_from_key(key) +} + +/// Reads value under `key` in the global state. +pub fn read_from_key(key: Key) -> Result, bytesrepr::Error> { let (key_ptr, key_size, _bytes) = contract_api::to_ptr(key); let value_size = { @@ -89,30 +102,52 @@ pub fn new_uref(init: T) -> URef { /// Create a new contract stored under a Key::Hash at version 1. You may upgrade this contract in /// the future; if you want a contract that is locked (i.e. cannot be upgraded) call /// `new_locked_contract` instead. -/// if `named_keys` are provided, will apply them -/// if `hash_name` is provided, puts contract hash in current context's named keys under `hash_name` -/// if `uref_name` is provided, puts access_uref in current context's named keys under `uref_name` +/// if `named_keys` is provided, puts all of the included named keys into the newly created +/// contract version's named keys. +/// if `hash_name` is provided, puts Key::Hash(contract_package_hash) into the +/// installing account's named keys under `hash_name`. +/// if `uref_name` is provided, puts Key::URef(access_uref) into the installing account's named +/// keys under `uref_name` pub fn new_contract( entry_points: EntryPoints, named_keys: Option, hash_name: Option, uref_name: Option, -) -> (ContractHash, ContractVersion) { - create_contract(entry_points, named_keys, hash_name, uref_name, false) + message_topics: Option>, +) -> (ContractHash, EntityVersion) { + create_contract( + entry_points, + named_keys, + hash_name, + uref_name, + message_topics, + false, + ) } /// Create a locked contract stored under a Key::Hash, which can never be upgraded. This is an /// irreversible decision; for a contract that can be upgraded use `new_contract` instead. -/// if `named_keys` are provided, will apply them -/// if `hash_name` is provided, puts contract hash in current context's named keys under `hash_name` -/// if `uref_name` is provided, puts access_uref in current context's named keys under `uref_name` +/// if `named_keys` is provided, puts all of the included named keys into the newly created +/// contract version's named keys. +/// if `hash_name` is provided, puts Key::Hash(contract_package_hash) into the +/// installing account's named keys under `hash_name`. +/// if `uref_name` is provided, puts Key::URef(access_uref) into the installing account's named +/// keys under `uref_name` pub fn new_locked_contract( entry_points: EntryPoints, named_keys: Option, hash_name: Option, uref_name: Option, -) -> (ContractHash, ContractVersion) { - create_contract(entry_points, named_keys, hash_name, uref_name, true) + message_topics: Option>, +) -> (ContractHash, EntityVersion) { + create_contract( + entry_points, + named_keys, + hash_name, + uref_name, + message_topics, + true, + ) } fn create_contract( @@ -120,24 +155,29 @@ fn create_contract( named_keys: Option, hash_name: Option, uref_name: Option, + message_topics: Option>, is_locked: bool, -) -> (ContractHash, ContractVersion) { +) -> (ContractHash, EntityVersion) { let (contract_package_hash, access_uref) = create_contract_package(is_locked); if let Some(hash_name) = hash_name { - runtime::put_key(&hash_name, contract_package_hash.into()); + runtime::put_key(&hash_name, Key::Hash(contract_package_hash.value())); }; if let Some(uref_name) = uref_name { runtime::put_key(&uref_name, access_uref.into()); }; - let named_keys = match named_keys { - Some(named_keys) => named_keys, - None => NamedKeys::new(), - }; + let named_keys = named_keys.unwrap_or_default(); + + let message_topics = message_topics.unwrap_or_default(); - add_contract_version(contract_package_hash, entry_points, named_keys) + add_contract_version( + contract_package_hash, + entry_points, + named_keys, + message_topics, + ) } /// Create a new (versioned) contract stored under a Key::Hash. Initially there @@ -271,51 +311,69 @@ pub fn remove_contract_user_group( api_error::result_from(ret) } -/// Add a new version of a contract to the contract stored at the given -/// `Key`. Note that this contract must have been created by -/// `create_contract` or `create_contract_package_at_hash` first. +/// Add version to existing Package. pub fn add_contract_version( - contract_package_hash: ContractPackageHash, + package_hash: ContractPackageHash, entry_points: EntryPoints, named_keys: NamedKeys, -) -> (ContractHash, ContractVersion) { - let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) = - contract_api::to_ptr(contract_package_hash); - let (entry_points_ptr, entry_points_size, _bytes4) = contract_api::to_ptr(entry_points); - let (named_keys_ptr, named_keys_size, _bytes5) = contract_api::to_ptr(named_keys); + message_topics: BTreeMap, +) -> (ContractHash, EntityVersion) { + // Retain the underscore as Wasm transpiliation requires it. + let (package_hash_ptr, package_hash_size, _package_hash_bytes) = + contract_api::to_ptr(package_hash); + let (entry_points_ptr, entry_points_size, _entry_point_bytes) = + contract_api::to_ptr(entry_points); + let (named_keys_ptr, named_keys_size, _named_keys_bytes) = contract_api::to_ptr(named_keys); + let (message_topics_ptr, message_topics_size, _message_topics) = + contract_api::to_ptr(message_topics); - let mut output_ptr = vec![0u8; Key::max_serialized_length()]; - let mut total_bytes: usize = 0; + let mut output_ptr = vec![0u8; 32]; + // let mut total_bytes: usize = 0; - let mut contract_version: ContractVersion = 0; + let mut entity_version: ContractVersion = 0; let ret = unsafe { - ext_ffi::casper_add_contract_version( - contract_package_hash_ptr, - contract_package_hash_size, - &mut contract_version as *mut ContractVersion, + ext_ffi::casper_add_contract_version_with_message_topics( + package_hash_ptr, + package_hash_size, + &mut entity_version as *mut ContractVersion, // Fixed width entry_points_ptr, entry_points_size, named_keys_ptr, named_keys_size, + message_topics_ptr, + message_topics_size, output_ptr.as_mut_ptr(), output_ptr.len(), - &mut total_bytes as *mut usize, + // &mut total_bytes as *mut usize, ) }; match api_error::result_from(ret) { Ok(_) => {} Err(e) => revert(e), } - output_ptr.truncate(total_bytes); - let contract_hash = bytesrepr::deserialize(output_ptr).unwrap_or_revert(); - (contract_hash, contract_version) + // output_ptr.truncate(32usize); + let entity_hash: ContractHash = match bytesrepr::deserialize(output_ptr) { + Ok(hash) => hash, + Err(err) => panic!("{}", format!("{:?}", err)), + }; + (entity_hash, entity_version) } -/// Disable a version of a contract from the contract stored at the given -/// `Key`. That version of the contract will no longer be callable by -/// `call_versioned_contract`. Note that this contract must have been created by -/// `create_contract` or `create_contract_package_at_hash` first. +/// Disables a specific version of a contract within the contract package identified by +/// `contract_package_hash`. Once disabled, the specified version will no longer be +/// callable by `call_versioned_contract`. Please note that the contract must have been +/// previously created using `create_contract` or `create_contract_package_at_hash`. +/// +/// # Arguments +/// +/// * `contract_package_hash` - The hash of the contract package containing the version to be +/// disabled. +/// * `contract_hash` - The hash of the specific contract version to be disabled. +/// +/// # Errors +/// +/// Returns a `Result` indicating success or an `ApiError` if the operation fails. pub fn disable_contract_version( contract_package_hash: ContractPackageHash, contract_hash: ContractHash, @@ -335,3 +393,168 @@ pub fn disable_contract_version( api_error::result_from(result) } + +/// Enables a specific version of a contract from the contract package stored at the given hash. +/// Once enabled, that version of the contract becomes callable again by `call_versioned_contract`. +/// +/// # Arguments +/// +/// * `contract_package_hash` - The hash of the contract package containing the desired version. +/// * `contract_hash` - The hash of the specific contract version to be enabled. +/// +/// # Errors +/// +/// Returns a `Result` indicating success or an `ApiError` if the operation fails. +pub fn enable_contract_version( + contract_package_hash: ContractPackageHash, + contract_hash: ContractHash, +) -> Result<(), ApiError> { + let (contract_package_hash_ptr, contract_package_hash_size, _bytes1) = + contract_api::to_ptr(contract_package_hash); + let (contract_hash_ptr, contract_hash_size, _bytes2) = contract_api::to_ptr(contract_hash); + + let result = unsafe { + ext_ffi::casper_enable_contract_version( + contract_package_hash_ptr, + contract_package_hash_size, + contract_hash_ptr, + contract_hash_size, + ) + }; + + api_error::result_from(result) +} + +/// Creates new [`URef`] that represents a seed for a dictionary partition of the global state and +/// puts it under named keys. +pub fn new_dictionary(dictionary_name: &str) -> Result { + if dictionary_name.is_empty() || runtime::has_key(dictionary_name) { + return Err(ApiError::InvalidArgument); + } + + let value_size = { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { ext_ffi::casper_new_dictionary(value_size.as_mut_ptr()) }; + api_error::result_from(ret)?; + unsafe { value_size.assume_init() } + }; + let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert(); + let uref: URef = bytesrepr::deserialize(value_bytes).unwrap_or_revert(); + runtime::put_key(dictionary_name, Key::from(uref)); + Ok(uref) +} + +/// Retrieve `value` stored under `dictionary_item_key` in the dictionary accessed by +/// `dictionary_seed_uref`. +pub fn dictionary_get( + dictionary_seed_uref: URef, + dictionary_item_key: &str, +) -> Result, bytesrepr::Error> { + let (uref_ptr, uref_size, _bytes1) = contract_api::to_ptr(dictionary_seed_uref); + let (dictionary_item_key_ptr, dictionary_item_key_size) = + contract_api::dictionary_item_key_to_ptr(dictionary_item_key); + + if dictionary_item_key_size > DICTIONARY_ITEM_KEY_MAX_LENGTH { + revert(ApiError::DictionaryItemKeyExceedsLength) + } + + let value_size = { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_dictionary_get( + uref_ptr, + uref_size, + dictionary_item_key_ptr, + dictionary_item_key_size, + value_size.as_mut_ptr(), + ) + }; + match api_error::result_from(ret) { + Ok(_) => unsafe { value_size.assume_init() }, + Err(ApiError::ValueNotFound) => return Ok(None), + Err(e) => runtime::revert(e), + } + }; + + let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert(); + Ok(Some(bytesrepr::deserialize(value_bytes)?)) +} + +/// Writes `value` under `dictionary_item_key` in the dictionary accessed by `dictionary_seed_uref`. +pub fn dictionary_put( + dictionary_seed_uref: URef, + dictionary_item_key: &str, + value: V, +) { + let (uref_ptr, uref_size, _bytes1) = contract_api::to_ptr(dictionary_seed_uref); + let (dictionary_item_key_ptr, dictionary_item_key_size) = + contract_api::dictionary_item_key_to_ptr(dictionary_item_key); + + if dictionary_item_key_size > DICTIONARY_ITEM_KEY_MAX_LENGTH { + revert(ApiError::DictionaryItemKeyExceedsLength) + } + + let cl_value = CLValue::from_t(value).unwrap_or_revert(); + let (cl_value_ptr, cl_value_size, _bytes) = contract_api::to_ptr(cl_value); + + let result = unsafe { + let ret = ext_ffi::casper_dictionary_put( + uref_ptr, + uref_size, + dictionary_item_key_ptr, + dictionary_item_key_size, + cl_value_ptr, + cl_value_size, + ); + api_error::result_from(ret) + }; + + result.unwrap_or_revert() +} + +/// Reads value under `dictionary_key` in the global state. +pub fn dictionary_read(dictionary_key: Key) -> Result, ApiError> { + if !dictionary_key.is_dictionary_key() { + return Err(ApiError::UnexpectedKeyVariant); + } + + let (key_ptr, key_size, _bytes) = contract_api::to_ptr(dictionary_key); + + let value_size = { + let mut value_size = MaybeUninit::uninit(); + let ret = + unsafe { ext_ffi::casper_dictionary_read(key_ptr, key_size, value_size.as_mut_ptr()) }; + match api_error::result_from(ret) { + Ok(_) => unsafe { value_size.assume_init() }, + Err(ApiError::ValueNotFound) => return Ok(None), + Err(e) => runtime::revert(e), + } + }; + + let value_bytes = runtime::read_host_buffer(value_size).unwrap_or_revert(); + Ok(Some(bytesrepr::deserialize(value_bytes)?)) +} + +fn get_named_uref(name: &str) -> URef { + match runtime::get_key(name).unwrap_or_revert_with(ApiError::GetKey) { + Key::URef(uref) => uref, + _ => revert(ApiError::UnexpectedKeyVariant), + } +} + +/// Gets a value out of a named dictionary. +pub fn named_dictionary_get( + dictionary_name: &str, + dictionary_item_key: &str, +) -> Result, bytesrepr::Error> { + dictionary_get(get_named_uref(dictionary_name), dictionary_item_key) +} + +/// Writes a value in a named dictionary. +pub fn named_dictionary_put( + dictionary_name: &str, + dictionary_item_key: &str, + value: V, +) { + dictionary_put(get_named_uref(dictionary_name), dictionary_item_key, value) +} diff --git a/smart_contracts/contract/src/contract_api/system.rs b/smart_contracts/contract/src/contract_api/system.rs index c3a6320a10..b8beb2e679 100644 --- a/smart_contracts/contract/src/contract_api/system.rs +++ b/smart_contracts/contract/src/contract_api/system.rs @@ -4,13 +4,8 @@ use alloc::vec::Vec; use core::mem::MaybeUninit; use casper_types::{ - account::AccountHash, - api_error, bytesrepr, - system::{ - auction::{self, EraInfo}, - SystemContractType, - }, - ApiError, ContractHash, EraId, HashAddr, TransferResult, TransferredTo, URef, U512, + account::AccountHash, api_error, bytesrepr, contracts::ContractHash, system::SystemEntityType, + ApiError, HashAddr, PublicKey, TransferResult, TransferredTo, URef, U512, UREF_SERIALIZED_LENGTH, }; @@ -20,7 +15,7 @@ use crate::{ unwrap_or_revert::UnwrapOrRevert, }; -fn get_system_contract(system_contract: SystemContractType) -> ContractHash { +fn get_system_contract(system_contract: SystemEntityType) -> ContractHash { let system_contract_index = system_contract.into(); let contract_hash: ContractHash = { let result = { @@ -35,6 +30,7 @@ fn get_system_contract(system_contract: SystemContractType) -> ContractHash { api_error::result_from(value).map(|_| hash_data_raw) }; // Revert for any possible error that happened on host side + #[allow(clippy::redundant_closure)] // false positive let contract_hash_bytes = result.unwrap_or_else(|e| runtime::revert(e)); // Deserializes a valid URef passed from the host side bytesrepr::deserialize(contract_hash_bytes.to_vec()).unwrap_or_revert() @@ -46,51 +42,48 @@ fn get_system_contract(system_contract: SystemContractType) -> ContractHash { /// /// Any failure will trigger [`revert`](runtime::revert) with an appropriate [`ApiError`]. pub fn get_mint() -> ContractHash { - get_system_contract(SystemContractType::Mint) + get_system_contract(SystemEntityType::Mint) } /// Returns a read-only pointer to the Handle Payment contract. /// /// Any failure will trigger [`revert`](runtime::revert) with an appropriate [`ApiError`]. pub fn get_handle_payment() -> ContractHash { - get_system_contract(SystemContractType::HandlePayment) + get_system_contract(SystemEntityType::HandlePayment) } /// Returns a read-only pointer to the Standard Payment contract. /// /// Any failure will trigger [`revert`](runtime::revert) with an appropriate [`ApiError`]. pub fn get_standard_payment() -> ContractHash { - get_system_contract(SystemContractType::StandardPayment) + get_system_contract(SystemEntityType::StandardPayment) } /// Returns a read-only pointer to the Auction contract. /// /// Any failure will trigger [`revert`](runtime::revert) with appropriate [`ApiError`]. pub fn get_auction() -> ContractHash { - get_system_contract(SystemContractType::Auction) + get_system_contract(SystemEntityType::Auction) } /// Creates a new empty purse and returns its [`URef`]. -#[doc(hidden)] pub fn create_purse() -> URef { let purse_non_null_ptr = contract_api::alloc_bytes(UREF_SERIALIZED_LENGTH); - unsafe { - let ret = ext_ffi::casper_create_purse(purse_non_null_ptr.as_ptr(), UREF_SERIALIZED_LENGTH); - if ret == 0 { - let bytes = Vec::from_raw_parts( - purse_non_null_ptr.as_ptr(), - UREF_SERIALIZED_LENGTH, - UREF_SERIALIZED_LENGTH, - ); - bytesrepr::deserialize(bytes).unwrap_or_revert() - } else { - runtime::revert(ApiError::PurseNotCreated) - } - } + let ret = unsafe { + ext_ffi::casper_create_purse(purse_non_null_ptr.as_ptr(), UREF_SERIALIZED_LENGTH) + }; + api_error::result_from(ret).unwrap_or_revert(); + let bytes = unsafe { + Vec::from_raw_parts( + purse_non_null_ptr.as_ptr(), + UREF_SERIALIZED_LENGTH, + UREF_SERIALIZED_LENGTH, + ) + }; + bytesrepr::deserialize(bytes).unwrap_or_revert() } /// Returns the balance in motes of the given purse. -#[doc(hidden)] pub fn get_purse_balance(purse: URef) -> Option { let (purse_ptr, purse_size, _bytes) = contract_api::to_ptr(purse); @@ -109,7 +102,7 @@ pub fn get_purse_balance(purse: URef) -> Option { Some(value) } -/// Returns the balance in motes of a purse. +/// Returns the balance in motes of the account's main purse. pub fn get_balance() -> Option { get_purse_balance(account::get_main_purse()) } @@ -142,9 +135,15 @@ pub fn transfer_to_account(target: AccountHash, amount: U512, id: Option) - TransferredTo::result_from(transferred_to_value) } +/// Transfers `amount` of motes from the main purse of the caller's account to the main purse of +/// `target`. If the account referenced by `target` does not exist, it will be created. +pub fn transfer_to_public_key(target: PublicKey, amount: U512, id: Option) -> TransferResult { + let target = AccountHash::from(&target); + transfer_to_account(target, amount, id) +} + /// Transfers `amount` of motes from `source` purse to `target` account. If `target` does not exist /// it will be created. -#[doc(hidden)] pub fn transfer_from_purse_to_account( source: URef, target: AccountHash, @@ -179,9 +178,20 @@ pub fn transfer_from_purse_to_account( TransferredTo::result_from(transferred_to_value) } +/// Transfers `amount` of motes from `source` to the main purse of `target`. If the account +/// referenced by `target` does not exist, it will be created. +pub fn transfer_from_purse_to_public_key( + source: URef, + target: PublicKey, + amount: U512, + id: Option, +) -> TransferResult { + let target = AccountHash::from(&target); + transfer_from_purse_to_account(source, target, amount, id) +} + /// Transfers `amount` of motes from `source` purse to `target` purse. If `target` does not exist /// the transfer fails. -#[doc(hidden)] pub fn transfer_from_purse_to_purse( source: URef, target: URef, @@ -206,55 +216,3 @@ pub fn transfer_from_purse_to_purse( }; api_error::result_from(result) } - -/// Records a transfer. Can only be called from within the mint contract. -/// Needed to support system contract-based execution. -#[doc(hidden)] -pub fn record_transfer( - maybe_to: Option, - source: URef, - target: URef, - amount: U512, - id: Option, -) -> Result<(), ApiError> { - let (maybe_to_ptr, maybe_to_size, _bytes1) = contract_api::to_ptr(maybe_to); - let (source_ptr, source_size, _bytes2) = contract_api::to_ptr(source); - let (target_ptr, target_size, _bytes3) = contract_api::to_ptr(target); - let (amount_ptr, amount_size, _bytes4) = contract_api::to_ptr(amount); - let (id_ptr, id_size, _bytes5) = contract_api::to_ptr(id); - let result = unsafe { - ext_ffi::casper_record_transfer( - maybe_to_ptr, - maybe_to_size, - source_ptr, - source_size, - target_ptr, - target_size, - amount_ptr, - amount_size, - id_ptr, - id_size, - ) - }; - if result == 0 { - Ok(()) - } else { - Err(ApiError::Transfer) - } -} - -/// Records era info. Can only be called from within the auction contract. -/// Needed to support system contract-based execution. -#[doc(hidden)] -pub fn record_era_info(era_id: EraId, era_info: EraInfo) -> Result<(), ApiError> { - let (era_id_ptr, era_id_size, _bytes1) = contract_api::to_ptr(era_id); - let (era_info_ptr, era_info_size, _bytes2) = contract_api::to_ptr(era_info); - let result = unsafe { - ext_ffi::casper_record_era_info(era_id_ptr, era_id_size, era_info_ptr, era_info_size) - }; - if result == 0 { - Ok(()) - } else { - Err(auction::Error::RecordEraInfo.into()) - } -} diff --git a/smart_contracts/contract/src/ext_ffi.rs b/smart_contracts/contract/src/ext_ffi.rs index 1263bb667e..a7f380f2a0 100644 --- a/smart_contracts/contract/src/ext_ffi.rs +++ b/smart_contracts/contract/src/ext_ffi.rs @@ -2,6 +2,10 @@ //! //! Generally should not be used directly. See the [`contract_api`](crate::contract_api) for //! high-level bindings suitable for writing smart contracts. + +#[cfg(doc)] +use alloc::collections::BTreeMap; + extern "C" { /// The bytes in the span of wasm memory from `key_ptr` to `key_ptr + key_size` must correspond /// to a valid global state key, otherwise the function will fail. If the key is de-serialized @@ -63,7 +67,17 @@ extern "C" { /// * `value_ptr` - pointer to bytes representing the value to write under the new `URef` /// * `value_size` - size of the value (in bytes) pub fn casper_new_uref(uref_ptr: *mut u8, value_ptr: *const u8, value_size: usize); + /// This function loads a set of authorized keys used to sign this deploy from the host. + /// The data will be available through the host buffer and can be copied to Wasm memory through + /// [`casper_read_host_buffer`]. /// + /// # Arguments + /// + /// * `total_keys`: number of authorization keys used to sign this deploy + /// * `result_size`: size of the data loaded in the host + pub fn casper_load_authorization_keys(total_keys: *mut usize, result_size: *mut usize) -> i32; + /// This function loads a set of named keys from the host. The data will be available through + /// the host buffer and can be copied to Wasm memory through [`casper_read_host_buffer`]. pub fn casper_load_named_keys(total_keys: *mut usize, result_size: *mut usize) -> i32; /// This function causes a `Trap`, terminating the currently running module, /// but first copies the bytes from `value_ptr` to `value_ptr + value_size` to @@ -81,7 +95,7 @@ extern "C" { /// * `value_ptr`: pointer to bytes representing the value to return to the caller /// * `value_size`: size of the value (in bytes) pub fn casper_ret(value_ptr: *const u8, value_size: usize) -> !; - /// + /// Retrieves a key from the named keys by name and writes it to the output buffer. pub fn casper_get_key( name_ptr: *const u8, name_size: usize, @@ -89,16 +103,16 @@ extern "C" { output_size: usize, bytes_written_ptr: *mut usize, ) -> i32; - /// + /// This function checks if the key with the given name is present in the named keys. pub fn casper_has_key(name_ptr: *const u8, name_size: usize) -> i32; - /// + /// This function stores a key under the given name in the named keys. pub fn casper_put_key( name_ptr: *const u8, name_size: usize, key_ptr: *const u8, key_size: usize, ); - /// + /// This function removes a key with the given name from the named keys. pub fn casper_remove_key(name_ptr: *const u8, name_size: usize); /// This function causes a `Trap` which terminates the currently running /// module. Additionally, it signals that the current entire phase of @@ -185,8 +199,8 @@ extern "C" { /// /// # Arguments /// - /// * `public_key` - pointer to the bytes in wasm memory representing the - /// public key to update, presently only 32-byte public keys are supported + /// * `public_key` - pointer to the bytes in wasm memory representing the public key to update, + /// presently only 32-byte public keys are supported. /// * `weight` - the weight to assign to this public key pub fn casper_update_associated_key( account_hash_ptr: *const u8, @@ -211,16 +225,14 @@ extern "C" { /// * `action` - index representing the action threshold to set /// * `threshold` - new value of the threshold for performing this action pub fn casper_set_action_threshold(permission_level: u32, threshold: u32) -> i32; - /// This function returns the public key of the account for this deploy. The - /// result is always 36-bytes in length (4 bytes prefix on a 32-byte public - /// key); it is up to the caller to ensure the right amount of memory is - /// allocated at `dest_ptr`, data corruption in the wasm memory could occur - /// otherwise. + /// Returns the caller of the current context, i.e. the [`casper_types::account::AccountHash`] + /// of the account which made the transaction request. The value stored in the host + /// buffer is always 32-bytes in length. /// /// # Arguments /// - /// * `dest_ptr` - pointer to position in wasm memory where to write the result - pub fn casper_get_caller(output_size: *mut usize) -> i32; + /// * `output_size_ptr` - pointer to a value where the size of the account hash will be set. + pub fn casper_get_caller(output_size_ptr: *mut usize) -> i32; /// This function gets the timestamp which will be in the block this deploy is /// included in. The return value is always a 64-bit unsigned integer, /// representing the number of milliseconds since the Unix epoch. It is up to @@ -242,7 +254,6 @@ extern "C" { /// /// * `purse_ptr` - pointer to position in wasm memory where to write the created `URef` /// * `purse_size` - allocated size for the `URef` - #[doc(hidden)] pub fn casper_create_purse(purse_ptr: *const u8, purse_size: usize) -> i32; /// This function uses the mint contract’s transfer function to transfer /// tokens from the current account’s main purse to the main purse of the @@ -313,7 +324,6 @@ extern "C" { /// * `id_size` - size of the id (in bytes) /// * `result_ptr` - pointer in wasm memory to a value where `TransferredTo` value would be set /// on successful transfer. - #[doc(hidden)] pub fn casper_transfer_from_purse_to_account( source_ptr: *const u8, source_size: usize, @@ -351,7 +361,6 @@ extern "C" { /// * `amount_size` - size of the amount (in bytes) /// * `id_ptr` - pointer in wasm memory to bytes representing the user-defined transaction id /// * `id_size` - size of the id (in bytes) - #[doc(hidden)] pub fn casper_transfer_from_purse_to_purse( source_ptr: *const u8, source_size: usize, @@ -362,52 +371,6 @@ extern "C" { id_ptr: *const u8, id_size: usize, ) -> i32; - /// Records a transfer. Can only be called from within the mint contract. - /// Needed to support system contract-based execution. - /// - /// # Arguments - /// - /// * `maybe_to_ptr` - pointer in wasm memory to bytes representing the recipient - /// `Option` - /// * `maybe_to_size` - size of the source `Option` (in bytes) - /// * `source_ptr` - pointer in wasm memory to bytes representing the source `URef` to transfer - /// from - /// * `source_size` - size of the source `URef` (in bytes) - /// * `target_ptr` - pointer in wasm memory to bytes representing the target `URef` to transfer - /// to - /// * `target_size` - size of the target (in bytes) - /// * `amount_ptr` - pointer in wasm memory to bytes representing the amount to transfer to the - /// target account - /// * `amount_size` - size of the amount (in bytes) - /// * `id_ptr` - pointer in wasm memory to bytes representing the user-defined transaction id - /// * `id_size` - size of the id (in bytes) - pub fn casper_record_transfer( - maybe_to_ptr: *const u8, - maybe_to_size: usize, - source_ptr: *const u8, - source_size: usize, - target_ptr: *const u8, - target_size: usize, - amount_ptr: *const u8, - amount_size: usize, - id_ptr: *const u8, - id_size: usize, - ) -> i32; - /// Records era info. Can only be called from within the auction contract. - /// Needed to support system contract-based execution. - /// - /// # Arguments - /// - /// * `era_id_ptr` - pointer in wasm memory to bytes representing the `EraId` - /// * `era_id_size` - size of the `EraId` (in bytes) - /// * `era_info_ptr` - pointer in wasm memory to bytes representing the `EraInfo` - /// * `era_info_size` - size of the `EraInfo` (in bytes) - pub fn casper_record_era_info( - era_id_ptr: *const u8, - era_id_size: usize, - era_info_ptr: *const u8, - era_info_size: usize, - ) -> i32; /// This function uses the mint contract's balance function to get the balance /// of the specified purse. It causes a `Trap` if the bytes in wasm memory /// from `purse_ptr` to `purse_ptr + purse_size` cannot be @@ -440,14 +403,13 @@ extern "C" { /// /// * `dest_ptr` - pointer to position in wasm memory to write the result pub fn casper_get_phase(dest_ptr: *mut u8); - /// + /// Retrieves a system contract by index and writes it to the destination pointer. pub fn casper_get_system_contract( system_contract_index: u32, dest_ptr: *mut u8, dest_size: usize, ) -> i32; - /// - #[doc(hidden)] + /// Retrieves the main purse and writes it to the destination pointer. pub fn casper_get_main_purse(dest_ptr: *mut u8); /// This function copies the contents of the current runtime buffer into the /// wasm memory, beginning at the provided offset. It is intended that this @@ -471,7 +433,7 @@ extern "C" { bytes_written: *mut usize, ) -> i32; /// Creates new contract package at hash. Returns both newly generated - /// [`casper_types::ContractPackageHash`] and a [`casper_types::URef`] for further + /// [`casper_types::PackageHash`] and a [`casper_types::URef`] for further /// modifying access. pub fn casper_create_contract_package_at_hash( hash_addr_ptr: *mut u8, @@ -501,7 +463,7 @@ extern "C" { existing_urefs_size: usize, output_size_ptr: *mut usize, ) -> i32; - /// Adds new contract version to a contract package. + /// Adds new contract version to a contract package without message topics. /// /// # Arguments /// @@ -510,8 +472,8 @@ extern "C" { /// * `version_ptr` - output parameter where new version assigned by host is set /// * `entry_points_ptr` - pointer to serialized [`casper_types::EntryPoints`] /// * `entry_points_size` - size of serialized [`casper_types::EntryPoints`] - /// * `named_keys_ptr` - pointer to serialized [`casper_types::contracts::NamedKeys`] - /// * `named_keys_size` - size of serialized [`casper_types::contracts::NamedKeys`] + /// * `named_keys_ptr` - pointer to serialized [`casper_types::NamedKeys`] + /// * `named_keys_size` - size of serialized [`casper_types::NamedKeys`] /// * `output_ptr` - pointer to a memory where host assigned contract hash is set to /// * `output_size` - size of memory area that host can write to /// * `bytes_written_ptr` - pointer to a value where host will set a number of bytes written to @@ -528,6 +490,64 @@ extern "C" { output_size: usize, bytes_written_ptr: *mut usize, ) -> i32; + /// Adds a new version to a contract package with message topics. + /// + /// # Arguments + /// + /// * `contract_package_hash_ptr` - pointer to serialized package hash. + /// * `contract_package_hash_size` - size of package hash in serialized form. + /// * `version_ptr` - output parameter where new version assigned by host is set + /// * `entry_points_ptr` - pointer to serialized [`casper_types::EntryPoints`] + /// * `entry_points_size` - size of serialized [`casper_types::EntryPoints`] + /// * `named_keys_ptr` - pointer to serialized [`casper_types::NamedKeys`] + /// * `named_keys_size` - size of serialized [`casper_types::NamedKeys`] + /// * `message_topics_ptr` - pointer to serialized BTreeMap + /// containing message topic names and the operation to pe performed on each one. + /// * `message_topics_size` - size of serialized BTreeMap + /// * `output_ptr` - pointer to a memory where host assigned contract hash is set to + /// * `output_size` - expected width of output (currently 32) + pub fn casper_add_contract_version_with_message_topics( + contract_package_hash_ptr: *const u8, + contract_package_hash_size: usize, + version_ptr: *const u32, + entry_points_ptr: *const u8, + entry_points_size: usize, + named_keys_ptr: *const u8, + named_keys_size: usize, + message_topics_ptr: *const u8, + message_topics_size: usize, + output_ptr: *mut u8, + output_size: usize, + ) -> i32; + /// Adds a new version to a package. + /// + /// # Arguments + /// + /// * `package_hash_ptr` - pointer to serialized package hash. + /// * `package_hash_size` - size of package hash in serialized form. + /// * `version_ptr` - output parameter where new version assigned by host is set + /// * `entry_points_ptr` - pointer to serialized [`casper_types::EntryPoints`] + /// * `entry_points_size` - size of serialized [`casper_types::EntryPoints`] + /// * `named_keys_ptr` - pointer to serialized [`casper_types::NamedKeys`] + /// * `named_keys_size` - size of serialized [`casper_types::NamedKeys`] + /// * `message_topics_ptr` - pointer to serialized BTreeMap + /// containing message topic names and the operation to pe performed on each one. + /// * `message_topics_size` - size of serialized BTreeMap + /// * `output_ptr` - pointer to a memory where host assigned contract hash is set to + /// * `output_size` - expected width of output (currently 32) + pub fn casper_add_package_version_with_message_topics( + package_hash_ptr: *const u8, + package_hash_size: usize, + version_ptr: *const u32, + entry_points_ptr: *const u8, + entry_points_size: usize, + named_keys_ptr: *const u8, + named_keys_size: usize, + message_topics_ptr: *const u8, + message_topics_size: usize, + output_ptr: *mut u8, + output_size: usize, + ) -> i32; /// Disables contract in a contract package. Returns non-zero standard error for a failure, /// otherwise a zero indicates success. /// @@ -694,13 +714,24 @@ extern "C" { /// * `in_size` - length of bytes /// * `out_ptr` - pointer to the location where argument bytes will be copied from the host side /// * `out_size` - size of output pointer + #[deprecated(note = "Superseded by ext_ffi::casper_generic_hash")] pub fn casper_blake2b( in_ptr: *const u8, in_size: usize, out_ptr: *mut u8, out_size: usize, ) -> i32; - /// Prints data directly to stanadard output on the host. + /// Returns the elements on the call stack tracked by the runtime + /// + /// # Arguments + /// * `call_stack_len_ptr` - pointer to the length of the caller information. + /// * `result_size_ptr` - pointer to the size of the serialized caller information. + #[deprecated] + pub fn casper_load_call_stack( + call_stack_len_ptr: *mut usize, + result_size_ptr: *mut usize, + ) -> i32; + /// Prints data directly to standard output on the host. /// /// # Arguments /// @@ -708,4 +739,244 @@ extern "C" { /// * `text_size` - size of serialized text to print #[cfg(feature = "test-support")] pub fn casper_print(text_ptr: *const u8, text_size: usize); + /// Creates new URef that points to a dictionary partition of global state. + /// + /// # Arguments + /// + /// * `output_size` - pointer to a value where host will write size of bytes of created URef. + pub fn casper_new_dictionary(output_size_ptr: *mut usize) -> i32; + /// The bytes in wasm memory from offset `key_ptr` to `key_ptr + key_size` + /// will be used together with the current context’s seed to form a dictionary. + /// The value at that dictionary is read from the global state, serialized and + /// buffered in the runtime. This result can be obtained via the [`casper_read_host_buffer`] + /// function. + /// + /// # Arguments + /// + /// * `uref_ptr` - pointer to bytes representing the user-defined key + /// * `uref_size` - size of the key (in bytes) + /// * `key_bytes_ptr` - pointer to bytes representing the user-defined key + /// * `key_bytes_size` - size of the user-defined key + /// * `output_size` - pointer to a value where host will write size of bytes read from given key + pub fn casper_dictionary_get( + uref_ptr: *const u8, + uref_size: usize, + key_bytes_ptr: *const u8, + key_bytes_size: usize, + output_size: *mut usize, + ) -> i32; + /// The bytes in the span of wasm memory from `key_ptr` to `key_ptr + key_size` must correspond + /// to a valid global state dictionary key, otherwise the function will fail. + /// If the Key::Dictionary is de-serialized successfully, then the result of the read is + /// serialized and buffered in the runtime. This result can be obtained via the + /// [`casper_read_host_buffer`] function. Returns standard error code. + /// + /// # Arguments + /// + /// * `key_ptr` - pointer (offset in wasm linear memory) to serialized form of the + /// Key::Dictionary to read + /// * `key_size` - size of the serialized Key::Dictionary (in bytes) + /// * `output_size` - pointer to a value where host will write size of bytes read from given key + pub fn casper_dictionary_read( + key_ptr: *const u8, + key_size: usize, + output_size: *mut usize, + ) -> i32; + /// The bytes in wasm memory from offset `key_ptr` to `key_ptr + key_size` + /// will be used together with the passed URef's seed to form a dictionary. + /// This function writes the provided value (read via de-serializing the bytes + /// in wasm memory from offset `value_ptr` to `value_ptr + value_size`) under + /// that dictionary in the global state. This function will cause a `Trap` if + /// the value fails to de-serialize. + /// + /// # Arguments + /// + /// * `uref_ptr` - pointer to bytes representing the user-defined key + /// * `uref_size` - size of the key (in bytes) + /// * `key_ptr` - pointer to bytes representing the user-defined key to write to + /// * `key_size` - size of the key (in bytes) + /// * `value_ptr` - pointer to bytes representing the value to write at the key + /// * `value_size` - size of the value (in bytes) + pub fn casper_dictionary_put( + uref_ptr: *const u8, + uref_size: usize, + key_ptr: *const u8, + key_size: usize, + value_ptr: *const u8, + value_size: usize, + ) -> i32; + /// Returns 32 pseudo random bytes. + /// + /// # Arguments + /// * `out_ptr` - pointer to the location where argument bytes will be copied from the host side + /// * `out_size` - size of output pointer + pub fn casper_random_bytes(out_ptr: *mut u8, out_size: usize) -> i32; + /// Enables contract in a contract package. Returns non-zero standard error for a failure, + /// otherwise a zero indicates success. + /// + /// # Arguments + /// + /// * `contract_package_hash_ptr` - pointer to serialized contract package hash. + /// * `contract_package_hash_size` - size of contract package hash in serialized form. + /// * `contract_hash_ptr` - pointer to serialized contract hash. + /// * `contract_hash_size` - size of contract hash in serialized form. + pub fn casper_enable_contract_version( + contract_package_hash_ptr: *const u8, + contract_package_hash_size: usize, + contract_hash_ptr: *const u8, + contract_hash_size: usize, + ) -> i32; + /// Manages a message topic. + /// + /// # Arguments + /// + /// * `topic_name_ptr` - pointer to the topic name UTF-8 string. + /// * `topic_name_size` - size of the serialized name string. + /// * `operation_ptr` - pointer to the management operation to be performed for the specified + /// topic. + /// * `operation_ptr_size` - size of the operation. + pub fn casper_manage_message_topic( + topic_name_ptr: *const u8, + topic_name_size: usize, + operation_ptr: *const u8, + operation_size: usize, + ) -> i32; + /// Emits a new message on the specified topic. + /// + /// # Arguments + /// + /// * `topic_name_ptr` - pointer to the topic name UTF-8 string where the message will be + /// emitted. + /// * `topic_name_size` - size of the serialized name string. + /// * `message_ptr` - pointer to the serialized message payload to be emitted. + /// * `message_size` - size of the serialized message payload. + pub fn casper_emit_message( + topic_name_ptr: *const u8, + topic_name_size: usize, + message_ptr: *const u8, + message_size: usize, + ) -> i32; + + /// Returns information about the current call stack tracked by the runtime + /// based on an action + /// `0` => Initiator of the call chain + /// `1` => Immediate caller + /// `2` => The entire call stack + /// + /// # Arguments + /// `action`: u8 which encodes the information requested by the caller. + /// * `call_stack_len_ptr` - pointer to the length of the caller information. + /// * `result_size_ptr` - pointer to the size of the serialized caller information. + pub fn casper_load_caller_information( + action: u8, + call_stack_len_ptr: *mut usize, + result_size_ptr: *mut usize, + ) -> i32; + + /// This function gets the requested field at `field_idx`. It is up to + /// the caller to ensure that the correct number of bytes for the field data + /// are allocated at `dest_ptr`, otherwise data corruption in the wasm memory may occur. + /// + /// # Arguments + /// + /// * `field_idx` - what info field is requested? + /// * 0 => block time (functionally equivalent to earlier get_blocktime ffi) + /// * 1 => block height + /// * 2 => parent block hash + /// * 3 => state hash + /// * 4 => current protocol version + /// * 5 => is addressable entity enabled + /// * `dest_ptr` => pointer in wasm memory where to write the result + pub fn casper_get_block_info(field_idx: u8, dest_ptr: *const u8); + + /// Computes digest hash, using provided algorithm type. + /// + /// # Arguments + /// + /// * `in_ptr` - pointer to the location where argument bytes will be copied from the host side + /// * `in_size` - size of output pointer + /// * `hash_algo_type` - integer representation of HashAlgorithm enum variant + /// * `out_ptr` - pointer to the location where argument bytes will be copied to the host side + /// * `out_size` - size of output pointer + pub fn casper_generic_hash( + in_ptr: *const u8, + in_size: usize, + hash_algo_type: u8, + out_ptr: *const u8, + out_size: usize, + ) -> i32; + + /// Recovers a Secp256k1 public key from a signed message + /// and a signature used in the process of signing. + /// + /// # Arguments + /// + /// * `message_ptr` - pointer to the signed data + /// * `message_size` - length of the signed data in bytes + /// * `signature_ptr` - pointer to byte-encoded signature + /// * `signature_size` - length of the byte-encoded signature + /// * `out_ptr` - pointer to a buffer of size PublicKey::SECP256K1_LENGTH which will be + /// populated with the recovered key's bytes representation + /// * `recovery_id` - an integer value 0, 1, 2, or 3 used to select the correct public key from + /// the signature: + /// - Low bit (0/1): was the y-coordinate of the affine point resulting from the fixed-base + /// multiplication 𝑘×𝑮 odd? + /// - Hi bit (3/4): did the affine x-coordinate of 𝑘×𝑮 overflow the order of the scalar field, + /// requiring a reduction when computing r? + pub fn casper_recover_secp256k1( + message_ptr: *const u8, + message_size: usize, + signature_ptr: *const u8, + signature_size: usize, + out_ptr: *const u8, + recovery_id: u8, + ) -> i32; + + /// Verifies the signature of the given message against the given public key. + /// + /// # Arguments + /// + /// * `message_ptr` - pointer to the signed data + /// * `message_size` - length of the signed data in bytes + /// * `signature_ptr` - pointer to byte-encoded signature + /// * `signature_size` - length of the byte-encoded signature + /// * `public_key_ptr` - pointer to byte-encoded public key + /// * `public_key_size` - length of the byte-encoded public key + pub fn casper_verify_signature( + message_ptr: *const u8, + message_size: usize, + signature_ptr: *const u8, + signature_size: usize, + public_key_ptr: *const u8, + public_key_size: usize, + ) -> i32; + /// Calls a contract by its package hash. Requires both a major and contract version. Requires + /// an entry point name registered in a given version of contract. Returns a standard error + /// code in case of failure, otherwise a successful execution returns zero. Bytes returned + /// from contract execution are set to `result_size` pointer + /// + /// # Arguments + /// + /// * `contract_package_hash_ptr` - pointer to serialized contract package hash. + /// * `contract_package_hash_size` - size of contract package hash in serialized form. + /// * `contract_version_ptr` - Contract package hash in a serialized form + /// * `contract_version_size` - + /// * `entry_point_name_ptr` - + /// * `entry_point_name_size` - + /// * `runtime_args_ptr` - + /// * `runtime_args_size` - + /// * `result_size` - + pub fn casper_call_package_version( + contract_package_hash_ptr: *const u8, + contract_package_hash_size: usize, + major_version_ptr: *const u8, + major_version_size: usize, + contract_version_ptr: *const u8, + contract_version_size: usize, + entry_point_name_ptr: *const u8, + entry_point_name_size: usize, + runtime_args_ptr: *const u8, + runtime_args_size: usize, + result_size: *mut usize, + ) -> i32; } diff --git a/smart_contracts/contract/src/handlers.rs b/smart_contracts/contract/src/handlers.rs deleted file mode 100644 index 1b09b7b0cb..0000000000 --- a/smart_contracts/contract/src/handlers.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Contains definitions for panic and allocation error handlers, along with other `no_std` support -//! code. -#[cfg(feature = "test-support")] -use crate::contract_api::runtime; -#[cfg(feature = "test-support")] -use alloc::format; - -/// A panic handler for use in a `no_std` environment which simply aborts the process. -#[panic_handler] -#[no_mangle] -pub fn panic(_info: &::core::panic::PanicInfo) -> ! { - #[cfg(feature = "test-support")] - runtime::print(&format!("Panic: {}", _info)); - ::core::intrinsics::abort(); -} - -/// An out-of-memory allocation error handler for use in a `no_std` environment which simply aborts -/// the process. -#[alloc_error_handler] -#[no_mangle] -pub fn oom(_: ::core::alloc::Layout) -> ! { - ::core::intrinsics::abort(); -} - -#[lang = "eh_personality"] -extern "C" fn eh_personality() {} diff --git a/smart_contracts/contract/src/lib.rs b/smart_contracts/contract/src/lib.rs index 74daa5b114..1ce525aa4c 100644 --- a/smart_contracts/contract/src/lib.rs +++ b/smart_contracts/contract/src/lib.rs @@ -1,22 +1,22 @@ //! A Rust library for writing smart contracts on the -//! [Casper Platform](https://techspec.casperlabs.io). +//! [Casper Platform](https://docs.casper.network/dapp-dev-guide). //! //! # `no_std` //! -//! By default, the library is `no_std`, however you can enable full `std` functionality by enabling -//! the crate's `std` feature. +//! The library is `no_std`, but uses the `core` and `alloc` crates. //! //! # Example //! //! The following example contains session code which persists an integer value under an unforgeable //! reference. It then stores the unforgeable reference under a name in context-local storage. //! -//! ```rust,no_run +//! # Writing Smart Contracts +//! +//! ```no_run //! #![no_std] +//! #![no_main] //! -//! use casper_contract::{ -//! contract_api::{runtime, storage}, -//! }; +//! use casper_contract::contract_api::{runtime, storage}; //! use casper_types::{Key, URef}; //! //! const KEY: &str = "special_value"; @@ -40,39 +40,34 @@ //! let value: i32 = runtime::get_named_arg(ARG_VALUE); //! store(value); //! } -//! # fn main() {} //! ``` //! -//! # Writing Smart Contracts -//! //! Support for writing smart contracts are contained in the [`contract_api`] module and its //! submodules. -#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(not(test), no_std)] +#![cfg_attr(all(not(test), feature = "no-std-helpers"), allow(internal_features))] #![cfg_attr( - not(feature = "std"), + all(not(test), feature = "no-std-helpers"), feature(alloc_error_handler, core_intrinsics, lang_items) )] -#![doc(html_root_url = "https://docs.rs/casper-contract/1.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-contract/5.1.1")] #![doc( - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png" )] #![warn(missing_docs)] extern crate alloc; -#[cfg(any(feature = "std", test))] -extern crate std; + +pub mod contract_api; +pub mod ext_ffi; +#[cfg(all(not(test), feature = "no-std-helpers", not(feature = "std")))] +mod no_std_handlers; +pub mod unwrap_or_revert; /// An instance of [`WeeAlloc`](https://docs.rs/wee_alloc) which allows contracts built as `no_std` /// to avoid having to provide a global allocator themselves. -#[cfg(not(any(feature = "std", test)))] +#[cfg(all(not(test), feature = "no-std-helpers"))] #[global_allocator] pub static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - -pub mod contract_api; -pub mod ext_ffi; -#[cfg(not(any(feature = "std", test, doc)))] -pub mod handlers; -pub mod unwrap_or_revert; diff --git a/smart_contracts/contract/src/no_std_handlers.rs b/smart_contracts/contract/src/no_std_handlers.rs new file mode 100644 index 0000000000..e1298375b8 --- /dev/null +++ b/smart_contracts/contract/src/no_std_handlers.rs @@ -0,0 +1,21 @@ +//! Contains definitions for panic and allocation error handlers. + +/// A panic handler for use in a `no_std` environment which simply aborts the process. +#[panic_handler] +#[no_mangle] +pub fn panic(_info: &core::panic::PanicInfo) -> ! { + #[cfg(feature = "test-support")] + crate::contract_api::runtime::print(&alloc::format!("{_info}")); + core::intrinsics::abort(); +} + +/// An out-of-memory allocation error handler for use in a `no_std` environment which simply aborts +/// the process. +#[alloc_error_handler] +#[no_mangle] +pub fn oom(_: core::alloc::Layout) -> ! { + core::intrinsics::abort(); +} + +#[lang = "eh_personality"] +extern "C" fn eh_personality() {} diff --git a/smart_contracts/contract/tests/version_numbers.rs b/smart_contracts/contract/tests/version_numbers.rs index d23fe5e9e7..5787cf5077 100644 --- a/smart_contracts/contract/tests/version_numbers.rs +++ b/smart_contracts/contract/tests/version_numbers.rs @@ -1,4 +1,4 @@ -#[cfg(feature = "std")] +#[cfg(feature = "version-sync")] #[test] fn test_html_root_url() { version_sync::assert_html_root_url_updated!("src/lib.rs"); diff --git a/smart_contracts/contract_as/.gitignore b/smart_contracts/contract_as/.gitignore deleted file mode 100644 index 119f029a04..0000000000 --- a/smart_contracts/contract_as/.gitignore +++ /dev/null @@ -1,99 +0,0 @@ -temp-apidoc/ - -# Created by https://www.gitignore.io/api/node -# Edit at https://www.gitignore.io/?templates=node - -### Node ### -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env -.env.test - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# next.js build output -.next - -# nuxt.js build output -.nuxt - -# react / gatsby -public/ - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ - -# End of https://www.gitignore.io/api/node diff --git a/smart_contracts/contract_as/.npmignore b/smart_contracts/contract_as/.npmignore deleted file mode 100644 index 59536de018..0000000000 --- a/smart_contracts/contract_as/.npmignore +++ /dev/null @@ -1 +0,0 @@ -temp-apidoc/ diff --git a/smart_contracts/contract_as/.npmrc b/smart_contracts/contract_as/.npmrc deleted file mode 100644 index 5af8673616..0000000000 --- a/smart_contracts/contract_as/.npmrc +++ /dev/null @@ -1 +0,0 @@ -unsafe-perm = true diff --git a/smart_contracts/contract_as/README.md b/smart_contracts/contract_as/README.md deleted file mode 100644 index 93ec608bab..0000000000 --- a/smart_contracts/contract_as/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# casper-contract - -This package allows a distributed app developer to create smart contracts -for the open source [Casper](https://github.com/CasperLabs/casper-node) project using [AssemblyScript](https://www.npmjs.com/package/assemblyscript). - -## Installation -For each smart contract you create, make a project directory and initialize it. -``` -mkdir project -cd project -npm init -``` - -npm init will prompt you for various details about your project; -answer as you see fit but you may safely default everything except `name` which should follow the convention of -`your-contract-name`. - -Then install assembly script and this package in the project directory. - -``` -npm install --save-dev assemblyscript@0.9.1 -npm install --save casper-contract -``` - -## Usage -Add script entries for assembly script to your project's `package.json`; note that your contract name is used -for the name of the wasm file. -``` -{ - "name": "your-contract-name", - ... - "scripts": { - "asbuild:optimized": "asc assembly/index.ts -b dist/your-contract-name.wasm --validate --optimize --use abort=", - "asbuild": "npm run asbuild:optimized", - ... - }, - ... -} -``` -In your project root, create an `index.js` file with the following contents: -```js -const fs = require("fs"); -​ -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/dist/your-contract-name.wasm")); -​ -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -​ -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); -``` - -Create an `assembly/tsconfig.json` file in the following way: -```json -{ - "extends": "../node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} -``` - -### Sample smart contract -Create a `assembly/index.ts` file. This is where the code for your contract will go. - -You can use the following sample snippet which demonstrates a very simple smart contract that immediately returns an error, which will write a message to a block if executed on the Casper platform. - -```typescript -//@ts-nocheck -import {Error, ErrorCode} from "casper-contract/error"; - -// simplest possible feedback loop -export function call(): void { - Error.fromErrorCode(ErrorCode.None).revert(); // ErrorCode: 1 -} -``` -If you prefer a more complicated first contract, you can look at client contracts on the [casper-node](https://github.com/CasperLabs/casper-node/tree/master/smart_contracts/contracts_as/client) GitHub repository for inspiration. - -### Compile to wasm -To compile your contract to wasm, use npm to run the asbuild script from your project root. -``` -npm run asbuild -``` -If the build is successful, you should see a `dist` folder in your root folder and in it -should be `your-contract-name.wasm` diff --git a/smart_contracts/contract_as/assembly/account.ts b/smart_contracts/contract_as/assembly/account.ts deleted file mode 100644 index 04ac6b9385..0000000000 --- a/smart_contracts/contract_as/assembly/account.ts +++ /dev/null @@ -1,182 +0,0 @@ -import * as externals from "./externals"; -import {arrayToTyped} from "./utils"; -import {UREF_SERIALIZED_LENGTH} from "./constants"; -import {URef} from "./uref"; -import {AccountHash} from "./key"; - -/** - * Enum representing the possible results of adding an associated key to an account. - */ -export enum AddKeyFailure { - /** - * Success - */ - Ok = 0, - /** - * Unable to add new associated key because maximum amount of keys is reached - */ - MaxKeysLimit = 1, - /** - * Unable to add new associated key because given key already exists - */ - DuplicateKey = 2, - /** - * Unable to add new associated key due to insufficient permissions - */ - PermissionDenied = 3, -} - -/** - * Enum representing the possible results of updating an associated key of an account. - */ -export enum UpdateKeyFailure { - /** - * Success - */ - Ok = 0, - /** - * Key does not exist in the list of associated keys. - */ - MissingKey = 1, - /** - * Unable to update the associated key due to insufficient permissions - */ - PermissionDenied = 2, - /** - * Unable to update weight that would fall below any of action thresholds - */ - ThresholdViolation = 3, -} - -/** - * Enum representing the possible results of removing an associated key from an account. - */ -export enum RemoveKeyFailure { - /** - * Success - */ - Ok = 0, - /** - * Key does not exist in the list of associated keys. - */ - MissingKey = 1, - /** - * Unable to remove the associated key due to insufficient permissions - */ - PermissionDenied = 2, - /** - * Unable to remove a key which would violate action threshold constraints - */ - ThresholdViolation = 3, -} - -/** - * Enum representing the possible results of setting the threshold of an account. - */ -export enum SetThresholdFailure { - /** - * Success - */ - Ok = 0, - /** - * New threshold should be lower or equal than deployment threshold - */ - KeyManagementThreshold = 1, - /** - * New threshold should be lower or equal than key management threshold - */ - DeploymentThreshold = 2, - /** - * Unable to set action threshold due to insufficient permissions - */ - PermissionDeniedError = 3, - /** - * New threshold should be lower or equal than total weight of associated keys - */ - InsufficientTotalWeight = 4, -} - -/** - * Enum representing an action for which a threshold is being set. - */ -export enum ActionType { - /** - * Required by deploy execution. - */ - Deployment = 0, - /** - * Required when adding/removing associated keys, changing threshold levels. - */ - KeyManagement = 1, -} - -/** - * Adds an associated key to the account. Associated keys are the keys allowed to sign actions performed - * in the context of the account. - * - * @param AccountHash The public key to be added as the associated key. - * @param weight The weight that will be assigned to the new associated key. See [[setActionThreshold]] - * for more info about weights. - * @returns An instance of [[AddKeyFailure]] representing the result. - */ -export function addAssociatedKey(accountHash: AccountHash, weight: i32): AddKeyFailure { - const accountHashBytes = accountHash.toBytes(); - const ret = externals.add_associated_key(accountHashBytes.dataStart, accountHashBytes.length, weight); - return ret; -} - -/** - * Sets a threshold for the action performed in the context of the account. - * - * Each request has to be signed by one or more of the keys associated with the account. The action - * is only successful if the total weights of the signing associated keys is greater than the threshold. - * - * @param actionType The type of the action for which the threshold is being set. - * @param thresholdValue The minimum total weight of the keys of the action to be successful. - * @returns An instance of [[SetThresholdFailure]] representing the result. - */ -export function setActionThreshold(actionType: ActionType, thresholdValue: u8): SetThresholdFailure { - const ret = externals.set_action_threshold(actionType, thresholdValue); - return ret; -} - -/** - * Changes the weight of an existing associated key. See [[addAssociatedKey]] and [[setActionThreshold]] - * for info about associated keys and their weights. - * - * @param accountHash The associated key to be updated. - * @param weight The new desired weight of the associated key. - * @returns An instance of [[UpdateKeyFailure]] representing the result. - */ -export function updateAssociatedKey(accountHash: AccountHash, weight: i32): UpdateKeyFailure { - const accountHashBytes = accountHash.toBytes(); - const ret = externals.update_associated_key(accountHashBytes.dataStart, accountHashBytes.length, weight); - return ret; -} - -/** - * Removes the associated key from the account. See [[addAssociatedKey]] for more info about associated - * keys. - * - * @param accountHash The associated key to be removed. - * @returns An instance of [[RemoveKeyFailure]] representing the result. - */ -export function removeAssociatedKey(accountHash: AccountHash): RemoveKeyFailure { - const accountHashBytes = accountHash.toBytes(); - const ret = externals.remove_associated_key(accountHashBytes.dataStart, accountHashBytes.length); - return ret; -} - -/** - * Gets the [[URef]] representing the main purse of the account. - * - * @returns The [[URef]] that can be used to access the main purse. - * @hidden - */ -export function getMainPurse(): URef { - let data = new Uint8Array(UREF_SERIALIZED_LENGTH); - data.fill(0); - externals.get_main_purse(data.dataStart); - let urefResult = URef.fromBytes(data); - return urefResult.unwrap(); -} diff --git a/smart_contracts/contract_as/assembly/bignum.ts b/smart_contracts/contract_as/assembly/bignum.ts deleted file mode 100644 index 5a0164b384..0000000000 --- a/smart_contracts/contract_as/assembly/bignum.ts +++ /dev/null @@ -1,598 +0,0 @@ -import {Ref} from "./ref"; -import {Error, Result} from "./bytesrepr"; -import {Pair} from "./pair"; - -const HEX_LOWERCASE: string[] = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']; - -/** - * Fast lookup of ascii character into it's numerical value in base16 - */ -const HEX_DIGITS: i32[] = -[ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1, - -1,0xa,0xb,0xc,0xd,0xe,0xf,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,0xa,0xb,0xc,0xd,0xe,0xf,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1, - -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1 ]; - -/** - * An implementation of 512-bit unsigned integers. - */ -export class U512 { - private pn: Uint32Array; - - /** - * Constructs a new instance of U512. - */ - constructor() { - this.pn = new Uint32Array(16); // 512 bits total - } - - /** - * @returns The maximum possible value of a U512. - */ - static get MAX_VALUE(): U512 { - let value = new U512(); - value.pn.fill(0xffffffff); - return value; - } - - /** - * @returns The minimum possible value of a U512 (which is 0). - */ - static get MIN_VALUE(): U512 { - return new U512(); - } - - /** - * Constructs a new U512 from a string of hex digits. - */ - static fromHex(hex: String): U512 { - let res = new U512(); - res.setHex(hex); - return res; - } - - /** - * Converts a 64-bit unsigned integer into a U512. - * - * @param value The value to be converted. - */ - static fromU64(value: u64): U512 { - let res = new U512(); - res.setU64(value); - return res; - } - - /** - * Gets the width of the number in bytes. - */ - get width(): i32 { - return this.pn.length * 4; - } - - /** - * Sets the value of this U512 to a given 64-bit value. - * - * @param value The desired new value of this U512. - */ - setU64(value: u64): void { - this.pn.fill(0); - assert(this.pn.length >= 2); - this.pn[0] = (value & 0xffffffff); - this.pn[1] = (value >> 32); - } - - /** - * Sets the value of this U512 to a value represented by the given string of hex digits. - * - * @param value The string of hex digits representing the desired value. - */ - setHex(value: String): void { - if (value.length >= 2 && value[0] == '0' && (value[1] == 'x' || value[1] == 'X')) - value = value.substr(2); - - // Find the length - let digits = 0; - while (digits < value.length && HEX_DIGITS[value.charCodeAt(digits)] != -1 ) { - digits++; - } - - // Decodes hex string into an array of bytes - let bytes = new Uint8Array(this.width); - - // Convert ascii codes into values - let i = 0; - while (digits > 0 && i < bytes.length) { - bytes[i] = HEX_DIGITS[value.charCodeAt(--digits)]; - - if (digits > 0) { - bytes[i] |= HEX_DIGITS[value.charCodeAt(--digits)] << 4; - i++; - } - } - - // Reinterpret individual bytes back to u32 array - this.setBytesLE(bytes); - } - - /** - * Checks whether this U512 is equal to 0. - * - * @returns True if this U512 is 0, false otherwise. - */ - isZero(): bool { - for (let i = 0; i < this.pn.length; i++) { - if (this.pn[i] != 0) { - return false; - } - } - return true; - } - - /** - * The addition operator - adds two U512 numbers together. - */ - @operator("+") - add(other: U512): U512 { - assert(this.pn.length == other.pn.length); - // We do store carry as u64, to easily detect the overflow. - let carry = 0; - for (let i = 0; i < this.pn.length; i++) { - let n = carry + this.pn[i] + other.pn[i]; - // The actual value after (possibly) overflowing addition - this.pn[i] = (n & 0xffffffff); - // Anything above 2^32-1 is the overflow and its what we carry over - carry = (n >> 32); - } - return this; - } - - /** - * The negation operator - returns the two's complement of the argument. - */ - @operator.prefix("-") - neg(): U512 { - let ret = new U512(); - for (let i = 0; i < this.pn.length; i++) { - ret.pn[i] = ~this.pn[i]; - } - ++ret; - return ret; - } - - /** - * The subtraction operator - subtracts the two U512s. - */ - @operator("-") - sub(other: U512): U512 { - return this.add(-other); - } - - /** - * The multiplication operator - calculates the product of the two arguments. - */ - @operator("*") - mul(other: U512): U512 { - assert(this.pn.length == other.pn.length); - let ret = new U512(); - // A naive implementation of multiplication - for (let j = 0; j < this.pn.length; j++) { - let carry: u64 = 0; - for (let i = 0; i + j < this.pn.length; i++) { - // In a similar fashion to addition, we do arithmetic on 64 bit integers to detect overflow - let n: u64 = carry + ret.pn[i + j] + this.pn[j] * other.pn[i]; - ret.pn[i + j] = (n & 0xffffffff); - carry = (n >> 32); - } - } - return ret; - } - - /** - * Increments this U512. - */ - private increment(): void { - let i = 0; - while (i < this.pn.length && ++this.pn[i] == 0) { - i++; - } - } - - /** - * Decrements this U512. - */ - private decrement(): void { - let i = 0; - while (i < this.pn.length && --this.pn[i] == u32.MAX_VALUE) { - i++; - } - } - - /** - * Prefix operator `++` - increments this U512. - */ - @operator.prefix("++") - prefixInc(): U512 { - this.increment(); - return this; - } - - /** - * Prefix operator `--` - decrements this U512. - */ - @operator.prefix("--") - prefixDec(): U512 { - this.decrement(); - return this; - } - - /** - * Postfix operator `++` - increments this U512. - */ - @operator.postfix("++") - postfixInc(): U512 { - let cloned = this.clone(); - cloned.increment(); - return cloned; - } - - /** - * Postfix operator `--` - decrements this U512. - */ - @operator.postfix("--") - postfixDec(): U512 { - let cloned = this.clone(); - cloned.decrement(); - return cloned; - } - - /** - * Sets the values of the internally kept 32-bit "digits" (or "limbs") of the U512. - * - * @param pn The array of unsigned 32-bit integers to be used as the "digits"/"limbs". - */ - setValues(pn: Uint32Array): void { - for (let i = 0; i < this.pn.length; i++) { - this.pn[i] = pn[i]; - } - } - - /** - * Clones the U512. - */ - clone(): U512 { - let U512val = new U512(); - U512val.setValues(this.pn); - return U512val; - } - - /** - * Returns length of the integer in bits (not counting the leading zero bits). - */ - bits(): u32 { - for (let i = this.pn.length - 1; i >= 0; i--) { - if (this.pn[i] > 0) { - // Counts leading zeros - return 32 * i + (32 - clz(this.pn[i])); - } - } - return 0; - } - - /** - * Performs the integer division of `this/other`. - * - * @param other The divisor. - * @returns A pair consisting of the quotient and the remainder, or null if the divisor was 0. - */ - divMod(other: U512): Pair | null { - assert(this.pn.length == other.pn.length); - - let div = other.clone(); // make a copy, so we can shift. - let num = this.clone(); // make a copy, so we can subtract the quotient. - - let res = new U512(); - - let numBits = num.bits(); - let divBits = div.bits(); - - if (divBits == 0) { - // division by zero - return null; - } - - if (divBits > numBits) { - // the result is certainly 0 and rem is the lhs of equation. - let zero = new U512(); - return new Pair(zero, num); - } - - let shift: i32 = numBits - divBits; - div <<= shift; // shift so that div and num align. - - while (shift >= 0) { - if (num >= div) { - num -= div; - res.pn[shift / 32] |= (1 << (shift & 31)); // set a bit of the result. - } - div >>= 1; // shift back. - shift--; - } - // num now contains the remainder of the division. - return new Pair(res, num); - } - - /** - * The division operator - divides the arguments. - */ - @operator("/") - div(other: U512): U512 { - let divModResult = this.divMod(other); - assert(divModResult !== null); - return (>divModResult).first; - } - - /** - * The 'modulo' operator - calculates the remainder from the division of the arguments. - */ - @operator("%") - rem(other: U512): U512 { - let divModResult = this.divMod(other); - assert(divModResult !== null); - return (>divModResult).second; - } - - /** - * The bitwise left-shift operator. - */ - @operator("<<") - shl(shift: u32): U512 { - let res = new U512(); - - let k: u32 = shift / 32; - shift = shift % 32; - - for (let i = 0; i < this.pn.length; i++) { - if (i + k + 1 < this.pn.length && shift != 0) { - res.pn[i + k + 1] |= (this.pn[i] >> (32 - shift)); - } - if (i + k < this.pn.length) { - res.pn[i + k] |= (this.pn[i] << shift); - } - } - - return res; - } - - /** - * The bitwise right-shift operator. - */ - @operator(">>") - shr(shift: u32): U512 { - let res = new U512(); - - let k = shift / 32; - shift = shift % 32; - - for (let i = 0; i < this.pn.length; i++) { - if (i - k - 1 >= 0 && shift != 0) { - res.pn[i - k - 1] |= (this.pn[i] << (32 - shift)); - } - if (i - k >= 0) { - res.pn[i - k] |= (this.pn[i] >> shift); - } - } - - return res; - } - - /** - * Compares `this` and `other`. - * - * @param other The number to compare `this` to. - * @returns -1 if `this` is less than `other`, 1 if `this` is greater than `other`, 0 if `this` - * and `other` are equal. - */ - cmp(other: U512): i32 { - assert(this.pn.length == other.pn.length); - for (let i = this.pn.length - 1; i >= 0; --i) { - if (this.pn[i] < other.pn[i]) { - return -1; - } - if (this.pn[i] > other.pn[i]) { - return 1; - } - } - return 0; - } - - /** - * The equality operator. - * - * @returns True if `this` and `other` are equal, false otherwise. - */ - @operator("==") - eq(other: U512): bool { - return this.cmp(other) == 0; - } - - /** - * The not-equal operator. - * - * @returns False if `this` and `other` are equal, true otherwise. - */ - @operator("!=") - neq(other: U512): bool { - return this.cmp(other) != 0; - } - - /** - * The greater-than operator. - * - * @returns True if `this` is greater than `other`, false otherwise. - */ - @operator(">") - gt(other: U512): bool { - return this.cmp(other) == 1; - } - - /** - * The less-than operator. - * - * @returns True if `this` is less than `other`, false otherwise. - */ - @operator("<") - lt(other: U512): bool { - return this.cmp(other) == -1; - } - - /** - * The greater-than-or-equal operator. - * - * @returns True if `this` is greater than or equal to `other`, false otherwise. - */ - @operator(">=") - gte(other: U512): bool { - return this.cmp(other) >= 0; - } - - /** - * The less-than-or-equal operator. - * - * @returns True if `this` is less than or equal to `other`, false otherwise. - */ - @operator("<=") - lte(other: U512): bool { - return this.cmp(other) <= 0; - } - - /** - * Returns a little-endian byte-array representation of this U512 (i.e. `result[0]` is the least - * significant byte. - */ - toBytesLE(): Uint8Array { - let bytes = new Uint8Array(this.width); - // Copy array of u32 into array of u8 - for (let i = 0; i < this.pn.length; i++) { - store(bytes.dataStart + (i * 4), this.pn[i]); - } - return bytes; - } - - /** - * Sets the value of this U512 to the value represented by `bytes` when treated as a - * little-endian representation of a number. - */ - setBytesLE(bytes: Uint8Array): void { - for (let i = 0; i < this.pn.length; i++) { - let num = load(bytes.dataStart + (i * 4)); - this.pn[i] = num; - } - } - - /** - * Returns a string of hex digits representing the value of this U512. - */ - private toHex(): String { - let bytes = this.toBytesLE(); - let result = ""; - - // Skips zeros in the back to make the numbers readable without tons of zeros in front - let backZeros = bytes.length - 1; - - while (backZeros >= 0 && bytes[backZeros--] == 0) {} - - // First digit could be still 0 so skip it - let firstByte = bytes[++backZeros]; - if ((firstByte & 0xF0) == 0) { - // Skips the hi byte if the first character of the output base16 would be `0` - // This way the hex string wouldn't be something like "01" - result += HEX_LOWERCASE[firstByte & 0x0F]; - } - else { - result += HEX_LOWERCASE[firstByte >> 4]; - result += HEX_LOWERCASE[firstByte & 0x0F]; - } - - // Convert the rest of bytes into base16 - for (let i = backZeros - 1; i >= 0; i--) { - let value = bytes[i]; - result += HEX_LOWERCASE[value >> 4]; - result += HEX_LOWERCASE[value & 0x0F]; - } - return result; - } - - /** - * An alias for [[toHex]]. - */ - toString(): String { - return this.toHex(); - } - - /** - * Deserializes a U512 from an array of bytes. The array should represent a correct U512 in the - * Casper serialization format. - * - * @returns A [[Result]] that contains the deserialized U512 if the deserialization was - * successful, or an error otherwise. - */ - static fromBytes(bytes: Uint8Array): Result { - if (bytes.length < 1) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - - const lengthPrefix = bytes[0]; - if (lengthPrefix > bytes.length) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - - - let res = new U512(); - - // Creates a buffer so individual bytes can be placed there - let buffer = new Uint8Array(res.width); - for (let i = 0; i < lengthPrefix; i++) { - buffer[i] = bytes[i + 1]; - } - - res.setBytesLE(buffer); - let ref = new Ref(res); - return new Result(ref, Error.Ok, 1 + lengthPrefix); - } - - /** - * Serializes the U512 into an array of bytes that represents it in the Casper serialization - * format. - */ - toBytes(): Array { - let bytes = this.toBytesLE(); - let skipZeros = bytes.length - 1; - - // Skip zeros at the end - while (skipZeros >= 0 && bytes[skipZeros] == 0) { - skipZeros--; - } - - // Continue - let lengthPrefix = skipZeros + 1; - - let result = new Array(1 + lengthPrefix); - result[0] = lengthPrefix; - for (let i = 0; i < lengthPrefix; i++) { - result[1 + i] = bytes[i]; - } - return result; - } -}; diff --git a/smart_contracts/contract_as/assembly/bytesrepr.ts b/smart_contracts/contract_as/assembly/bytesrepr.ts deleted file mode 100644 index 782ae13da9..0000000000 --- a/smart_contracts/contract_as/assembly/bytesrepr.ts +++ /dev/null @@ -1,392 +0,0 @@ -import { Pair } from "./pair"; -import { typedToArray, encodeUTF8 } from "./utils"; -import { ErrorCode, Error as StdError } from "./error"; -import { Ref } from "./ref"; - -/** - * Enum representing possible results of deserialization. - */ -export enum Error { - /** - * Last operation was a success - */ - Ok = 0, - /** - * Early end of stream - */ - EarlyEndOfStream = 1, - /** - * Unexpected data encountered while decoding byte stream - */ - FormattingError = 2, -} - -/** - * Converts bytesrepr's [[Error]] into a standard [[ErrorCode]]. - * @internal - * @returns An instance of [[Ref]] object for non-zero error code, otherwise a null. - */ -function toErrorCode(error: Error): Ref | null { - switch (error) { - case Error.EarlyEndOfStream: - return new Ref(ErrorCode.EarlyEndOfStream); - case Error.FormattingError: - return new Ref(ErrorCode.Formatting); - default: - return null; - } -} - - -/** - * Class representing a result of an operation that might have failed. Can contain either a value - * resulting from a successful completion of a calculation, or an error. Similar to `Result` in Rust - * or `Either` in Haskell. - */ -export class Result { - /** - * Creates new Result with wrapped value - * @param value Ref-wrapped value (success) or null (error) - * @param error Error value - * @param position Position of input stream - */ - constructor(public ref: Ref | null, public error: Error, public position: u32) {} - - /** - * Assumes that reference wrapper contains a value and then returns it - */ - get value(): T { - assert(this.hasValue()); - let ref = >this.ref; - return ref.value; - } - - /** - * Checks if given Result contains a value - */ - hasValue(): bool { - return this.ref !== null; - } - - /** - * Checks if error value is set. - * - * Truth also implies !hasValue(), false value implies hasValue() - */ - hasError(): bool { - return this.error != Error.Ok; - } - - /** - * For nullable types, this returns the value itself, or a null. - */ - ok(): T | null { - return this.hasValue() ? this.value : null; - } - - /** - * Returns success value, or reverts error value. - */ - unwrap(): T { - const errorCode = toErrorCode(this.error); - if (errorCode != null) { - const error = new StdError(errorCode.value); - error.revert(); - return unreachable(); - } - return this.value; - } -} - -/** - * Serializes an `u8` as an array of bytes. - * - * @returns An array containing a single byte: `num`. - */ -export function toBytesU8(num: u8): u8[] { - return [num]; -} - -/** - * Deserializes a [[T]] from an array of bytes. - * - * @returns A [[Result]] that contains the value of type `T`, or an error if deserialization failed. - */ -export function fromBytesLoad(bytes: Uint8Array): Result { - let expectedSize = changetype(sizeof()) - if (bytes.length < expectedSize) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - const value = load(bytes.dataStart); - return new Result(new Ref(value), Error.Ok, expectedSize); -} - -/** - * Deserializes a `u8` from an array of bytes. - */ -export function fromBytesU8(bytes: Uint8Array): Result { - return fromBytesLoad(bytes); -} - -/** - * Converts `u32` to little endian. - */ -export function toBytesU32(num: u32): u8[] { - let bytes = new Uint8Array(4); - store(bytes.dataStart, num); - let result = new Array(4); - for (var i = 0; i < 4; i++) { - result[i] = bytes[i]; - } - return result; -} - -/** - * Deserializes a `u32` from an array of bytes. - */ -export function fromBytesU32(bytes: Uint8Array): Result { - return fromBytesLoad(bytes); -} - -/** - * Converts `i32` to little endian. - */ -export function toBytesI32(num: i32): u8[] { - let bytes = new Uint8Array(4); - store(bytes.dataStart, num); - let result = new Array(4); - for (var i = 0; i < 4; i++) { - result[i] = bytes[i]; - } - return result; -} - -/** - * Deserializes an `i32` from an array of bytes. - */ -export function fromBytesI32(bytes: Uint8Array): Result { - return fromBytesLoad(bytes); -} - -/** - * Converts `u64` to little endian. - */ -export function toBytesU64(num: u64): u8[] { - let bytes = new Uint8Array(8); - store(bytes.dataStart, num); - let result = new Array(8); - for (var i = 0; i < 8; i++) { - result[i] = bytes[i]; - } - return result; -} - -/** - * Deserializes a `u64` from an array of bytes. - */ -export function fromBytesU64(bytes: Uint8Array): Result { - return fromBytesLoad(bytes); -} - -/** - * Joins a pair of byte arrays into a single array. - */ -export function toBytesPair(key: u8[], value: u8[]): u8[] { - return key.concat(value); -} - -/** - * Serializes a map into an array of bytes. - * - * @param map A map container. - * @param serializeKey A function that will serialize given key. - * @param serializeValue A function that will serialize given value. - */ -export function toBytesMap(vecOfPairs: Array>, serializeKey: (key: K) => Array, serializeValue: (value: V) => Array): Array { - const len = vecOfPairs.length; - var bytes = toBytesU32(len); - for (var i = 0; i < len; i++) { - bytes = bytes.concat(serializeKey(vecOfPairs[i].first)); - bytes = bytes.concat(serializeValue(vecOfPairs[i].second)); - } - return bytes; -} - -/** - * Deserializes an array of bytes into a map. - * - * @param bytes The array of bytes to be deserialized. - * @param decodeKey A function deserializing the key type. - * @param decodeValue A function deserializing the value type. - * @returns An array of key-value pairs or an error in case of failure. - */ -export function fromBytesMap( - bytes: Uint8Array, - decodeKey: (bytes1: Uint8Array) => Result, - decodeValue: (bytes2: Uint8Array) => Result, -): Result>> { - const lengthResult = fromBytesU32(bytes); - if (lengthResult.error != Error.Ok) { - return new Result>>(null, Error.EarlyEndOfStream, 0); - } - const length = lengthResult.value; - - // Tracks how many bytes are parsed - let currentPos = lengthResult.position; - - let result = new Array>(); - - if (length == 0) { - let ref = new Ref>>(result); - return new Result>>(ref, Error.Ok, lengthResult.position); - } - - let bytes = bytes.subarray(currentPos); - - for (let i = 0; i < changetype(length); i++) { - const keyResult = decodeKey(bytes); - if (keyResult.error != Error.Ok) { - return new Result>>(null, keyResult.error, keyResult.position); - } - - currentPos += keyResult.position; - bytes = bytes.subarray(keyResult.position); - - let valueResult = decodeValue(bytes); - if (valueResult.error != Error.Ok) { - return new Result>>(null, valueResult.error, valueResult.position); - } - - currentPos += valueResult.position; - bytes = bytes.subarray(valueResult.position); - - let pair = new Pair(keyResult.value, valueResult.value); - result.push(pair); - } - - let ref = new Ref>>(result); - return new Result>>(ref, Error.Ok, currentPos); -} - -/** - * Serializes a string into an array of bytes. - */ -export function toBytesString(s: String): u8[] { - let bytes = toBytesU32(s.length); - return bytes.concat(typedToArray(encodeUTF8(s))); -} - -/** - * Deserializes a string from an array of bytes. - */ -export function fromBytesString(s: Uint8Array): Result { - var lenResult = fromBytesI32(s); - if (lenResult.error != Error.Ok) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - - let currentPos = lenResult.position; - - const leni32 = lenResult.value; - if (s.length < leni32 + 4) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - var result = ""; - for (var i = 0; i < leni32; i++) { - result += String.fromCharCode(s[4 + i]); - } - let ref = new Ref(result); - return new Result(ref, Error.Ok, currentPos + leni32); -} - -/** - * Serializes an array of bytes. - */ -export function toBytesArrayU8(arr: Array): u8[] { - let bytes = toBytesU32(arr.length); - return bytes.concat(arr); -} - -/** - * Deserializes an array of bytes. - */ -export function fromBytesArrayU8(bytes: Uint8Array): Result> { - var lenResult = fromBytesI32(bytes); - if (lenResult.error != Error.Ok) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - - let currentPos = lenResult.position; - - const leni32 = lenResult.value; - if (s.length < leni32 + 4) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - - let result = typedToArray(bytes.subarray(currentPos)); - let ref = new Ref(result); - return new Result(ref, Error.Ok, currentPos + leni32, currentPos + leni32); -} - -/** - * Serializes a vector of values of type `T` into an array of bytes. - */ -export function toBytesVecT(ts: Array, encodeItem: (item: T) => Array): Array { - var bytes = toBytesU32(ts.length); - for (let i = 0; i < ts.length; i++) { - var itemBytes = encodeItem(ts[i]); - bytes = bytes.concat(itemBytes); - } - return bytes; -} - -/** - * Deserializes an array of bytes into an array of type `T`. - * - * @param bytes The array of bytes to be deserialized. - * @param decodeItem A function deserializing a value of type `T`. - */ -export function fromBytesArray(bytes: Uint8Array, decodeItem: (bytes: Uint8Array) => Result): Result> { - var lenResult = fromBytesI32(bytes); - if (lenResult.error != Error.Ok) { - return new Result>(null, Error.EarlyEndOfStream, 0); - } - - let len = lenResult.value; - let currentPos = lenResult.position; - let head = bytes.subarray(currentPos); - - let result: Array = new Array(); - - for (let i = 0; i < len; ++i) { - let decodeResult = decodeItem(head); - if (decodeResult.error != Error.Ok) { - return new Result>(null, decodeResult.error, 0); - } - currentPos += decodeResult.position; - result.push(decodeResult.value); - head = head.subarray(decodeResult.position); - } - - let ref = new Ref>(result); - return new Result>(ref, Error.Ok, currentPos); -} - -/** - * Deserializes a list of strings from an array of bytes. - */ -export function fromBytesStringList(bytes: Uint8Array): Result> { - return fromBytesArray(bytes, fromBytesString); -} - -/** - * Serializes a list of strings into an array of bytes. - */ -export function toBytesStringList(arr: String[]): u8[] { - let data = toBytesU32(arr.length); - for (let i = 0; i < arr.length; i++) { - const strBytes = toBytesString(arr[i]); - data = data.concat(strBytes); - } - return data; -} diff --git a/smart_contracts/contract_as/assembly/clvalue.ts b/smart_contracts/contract_as/assembly/clvalue.ts deleted file mode 100644 index 883ee4b150..0000000000 --- a/smart_contracts/contract_as/assembly/clvalue.ts +++ /dev/null @@ -1,193 +0,0 @@ -import {toBytesArrayU8, toBytesString, toBytesI32, toBytesU32, toBytesU8, toBytesStringList, toBytesU64} from "./bytesrepr"; -import {U512} from "./bignum"; -import {URef} from "./uref"; -import {Key} from "./key"; -import {Option} from "./option"; -import {PublicKey} from "./public_key"; - -/** - * Casper types, i.e. types which can be stored and manipulated by smart contracts. - * - * Provides a description of the underlying data type of a [[CLValue]]. - */ -export enum CLTypeTag { - /** A boolean value */ - Bool = 0, - /** A 32-bit signed integer */ - I32 = 1, - /** A 64-bit signed integer */ - I64 = 2, - /** An 8-bit unsigned integer (a byte) */ - U8 = 3, - /** A 32-bit unsigned integer */ - U32 = 4, - /** A 64-bit unsigned integer */ - U64 = 5, - /** A 128-bit unsigned integer */ - U128 = 6, - /** A 256-bit unsigned integer */ - U256 = 7, - /** A 512-bit unsigned integer */ - U512 = 8, - /** A unit type, i.e. type with no values (analogous to `void` in C and `()` in Rust) */ - Unit = 9, - /** A string of characters */ - String = 10, - /** A key in the global state - URef/hash/etc. */ - Key = 11, - /** An Unforgeable Reference (URef) */ - Uref = 12, - /** An [[Option]], i.e. a type that can contain a value or nothing at all */ - Option = 13, - /** A list of values */ - List = 14, - /** A fixed-length array of bytes */ - ByteArray = 15, - /** - * A [[Result]], i.e. a type that can contain either a value representing success or one representing failure. - */ - Result = 16, - /** A key-value map. */ - Map = 17, - /** A 1-value tuple. */ - Tuple1 = 18, - /** A 2-value tuple, i.e. a pair of values. */ - Tuple2 = 19, - /** A 3-value tuple. */ - Tuple3 = 20, - /** A value of any type. */ - Any = 21, - /** A value of public key type. */ - PublicKey = 22, -} - -export class CLType { - tag: CLTypeTag; - bytes: Array; - - constructor(tag: CLTypeTag, extra: Array | null = null) { - this.tag = tag; - this.bytes = [tag]; - if (extra !== null) { - this.bytes = this.bytes.concat(>extra); - } - } - - static byteArray(size: u32): CLType { - let extra = toBytesU32(size); - - let clType = new CLType(CLTypeTag.ByteArray, extra); - - return clType; - } - - static list(typeTag: CLType): CLType { - return new CLType(CLTypeTag.List, typeTag.bytes); - } - - static option(typeTag: CLType): CLType { - return new CLType(CLTypeTag.Option, typeTag.bytes); - } - - toBytes(): u8[] { - return this.bytes; - } -}; - -/** - * A Casper value, i.e. a value which can be stored and manipulated by smart contracts. - * - * It holds the underlying data as a type-erased, serialized array of bytes and also holds the - * [[CLType]] of the underlying data as a separate member. - */ -export class CLValue { - bytes: u8[]; - clType: CLType; - - /** - * Constructs a new `CLValue` with given underlying data and type. - */ - constructor(bytes: u8[], clType: CLType) { - this.bytes = bytes; - this.clType = clType; - } - - /** - * Creates a `CLValue` holding a string. - */ - static fromString(s: String): CLValue { - return new CLValue(toBytesString(s), new CLType(CLTypeTag.String)); - } - - /** - * Creates a `CLValue` holding an unsigned 512-bit integer. - */ - static fromU512(value: U512): CLValue { - return new CLValue(value.toBytes(), new CLType(CLTypeTag.U512)); - } - - /** - * Creates a `CLValue` holding an unsigned 64-bit integer. - */ - static fromU8(value: u8): CLValue { - return new CLValue(toBytesU8(value), new CLType(CLTypeTag.U8)); - } - - /** - * Creates a `CLValue` holding a signed 32-bit integer. - */ - static fromI32(value: i32): CLValue { - return new CLValue(toBytesI32(value), new CLType(CLTypeTag.I32)); - } - - /** - * Creates a `CLValue` holding an unsigned 64-bit integer. - */ - static fromU64(value: u64): CLValue { - return new CLValue(toBytesU64(value), new CLType(CLTypeTag.U64)); - } - - /** - * Creates a `CLValue` holding a [[Key]]. - */ - static fromKey(key: Key): CLValue{ - return new CLValue(key.toBytes(), new CLType(CLTypeTag.Key)); - } - - /** - * Creates a `CLValue` holding a [[URef]]. - */ - static fromURef(uref: URef): CLValue { - return new CLValue(uref.toBytes(), new CLType(CLTypeTag.Uref)); - } - - /** - * Creates a `CLValue` holding a list of strings. - */ - static fromStringList(values: String[]): CLValue { - return new CLValue(toBytesStringList(values), CLType.list(new CLType(CLTypeTag.String))); - } - - /** - * Creates a `CLValue` holding a public key. - */ - static fromPublicKey(publicKey: PublicKey): CLValue { - return new CLValue(publicKey.toBytes(), new CLType(CLTypeTag.PublicKey)); - } - - /** - * Creates a `CLValue` holding an [[Option]]. - */ - static fromOption(value: Option, nestedT: CLType): CLValue { - return new CLValue(value.toBytes(), CLType.option(nestedT)); - } - - /** - * Serializes a `CLValue` into an array of bytes. - */ - toBytes(): u8[] { - let data = toBytesArrayU8(this.bytes); - data = data.concat(this.clType.bytes); - return data; - } -} diff --git a/smart_contracts/contract_as/assembly/constants.ts b/smart_contracts/contract_as/assembly/constants.ts deleted file mode 100644 index 34fb309bab..0000000000 --- a/smart_contracts/contract_as/assembly/constants.ts +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Length of [[URef]] address field. - * @internal - */ -export const UREF_ADDR_LENGTH = 32; - -/** - * Length of hash variant of a [[Key]]. - * @internal - */ -export const KEY_HASH_LENGTH = 32; - -/** - * Serialized length of [[AccessRights]] field. - * @internal - */ -export const ACCESS_RIGHTS_SERIALIZED_LENGTH = 1; - -/** - * Serialized length of [[URef]] object. - * @internal - */ -export const UREF_SERIALIZED_LENGTH = UREF_ADDR_LENGTH + ACCESS_RIGHTS_SERIALIZED_LENGTH; - -/** - * Serialized length of ID of key. - * @internal - */ -export const KEY_ID_SERIALIZED_LENGTH: i32 = 1; // u8 used to determine the ID - -/** - * Serialized length of [[Key]] object. - */ -export const KEY_UREF_SERIALIZED_LENGTH = KEY_ID_SERIALIZED_LENGTH + UREF_SERIALIZED_LENGTH; \ No newline at end of file diff --git a/smart_contracts/contract_as/assembly/error.ts b/smart_contracts/contract_as/assembly/error.ts deleted file mode 100644 index a0c034b159..0000000000 --- a/smart_contracts/contract_as/assembly/error.ts +++ /dev/null @@ -1,193 +0,0 @@ -import * as externals from "./externals"; - -/** - * Offset of a reserved range dedicated for system contract errors. - * @internal - */ -const SYSTEM_CONTRACT_ERROR_CODE_OFFSET: u32 = 65024; - -/** - * Offset of user errors - */ -const USER_ERROR_CODE_OFFSET: u32 = 65535; - -/** - * Standard error codes which can be encountered while running a smart contract. - * - * An [[ErrorCode]] can be passed to [[Error.fromErrorCode]] function to create an error object. - * This error object later can be used to stop execution by using [[Error.revert]] method. - */ -export const enum ErrorCode { - /** Optional data was unexpectedly `None`. */ - None = 1, - /** Specified argument not provided. */ - MissingArgument = 2, - /** Argument not of correct type. */ - InvalidArgument = 3, - /** Failed to deserialize a value. */ - Deserialize = 4, - /** `casper_contract::storage::read()` returned an error. */ - Read = 5, - /** The given key returned a `None` value. */ - ValueNotFound = 6, - /** Failed to find a specified contract. */ - ContractNotFound = 7, - /** A call to [[getKey]] returned a failure. */ - GetKey = 8, - /** The [[Key]] variant was not as expected. */ - UnexpectedKeyVariant = 9, - /** The `Contract` variant was not as expected. */ - UnexpectedContractRefVariant = 10, - /** Invalid purse name given. */ - InvalidPurseName = 11, - /** Invalid purse retrieved. */ - InvalidPurse = 12, - /** Failed to upgrade contract at [[URef]]. */ - UpgradeContractAtURef = 13, - /** Failed to transfer motes. */ - Transfer = 14, - /** The given [[URef]] has no access rights. */ - NoAccessRights = 15, - /** A given type could not be constructed from a [[CLValue]]. */ - CLTypeMismatch = 16, - /** Early end of stream while deserializing. */ - EarlyEndOfStream = 17, - /** Formatting error while deserializing. */ - Formatting = 18, - /** Not all input bytes were consumed in deserializing operation */ - LeftOverBytes = 19, - /** Out of memory error. */ - OutOfMemory = 20, - /** There are already maximum public keys associated with the given account. */ - MaxKeysLimit = 21, - /** The given public key is already associated with the given account. */ - DuplicateKey = 22, - /** Caller doesn't have sufficient permissions to perform the given action. */ - PermissionDenied = 23, - /** The given public key is not associated with the given account. */ - MissingKey = 24, - /** Removing/updating the given associated public key would cause the total weight of all remaining `AccountHash`s to fall below one of the action thresholds for the given account. */ - ThresholdViolation = 25, - /** Setting the key-management threshold to a value lower than the deployment threshold is disallowed. */ - KeyManagementThreshold = 26, - /** Setting the deployment threshold to a value greater than any other threshold is disallowed. */ - DeploymentThreshold = 27, - /** Setting a threshold to a value greater than the total weight of associated keys is disallowed. */ - InsufficientTotalWeight = 28, - /** The given `u32` doesn't map to a [[SystemContractType]]. */ - InvalidSystemContract = 29, - /** Failed to create a new purse. */ - PurseNotCreated = 30, - /** An unhandled value, likely representing a bug in the code. */ - Unhandled = 31, - /** The provided buffer is too small to complete an operation. */ - BufferTooSmall = 32, - /** No data available in the host buffer. */ - HostBufferEmpty = 33, - /** The host buffer has been set to a value and should be consumed first by a read operation. */ - HostBufferFull = 34, -} - - -/** - * This class represents error condition and is constructed by passing an error value. - * - * The variants are split into numeric ranges as follows: - * - * | Inclusive range | Variant(s) | - * | ----------------| ---------------------------------------------| - * | [1, 65023] | all except `Mint`, `HandlePayment` and `User`. Can be created with [[Error.fromErrorCode]] | - * | [65024, 65279] | `Mint` - instantiation currently unsupported | - * | [65280, 65535] | `HandlePayment` errors | - * | [65536, 131071] | User error codes created with [[Error.fromUserError]] | - * - * ## Example usage - * - * ```typescript - * // Creating using user error which adds 65536 to the error value. - * Error.fromUserError(1234).revert(); - * - * // Creating using standard error variant. - * Error.fromErrorCode(ErrorCode.InvalidArguent).revert(); - * ``` - */ -export class Error { - private errorCodeValue: u32; - - /** - * Creates an error object with given error value. - * - * Recommended way to use this class is through its static members: - * - * * [[Error.fromUserCode]] - * * [[Error.fromErrorCode]] - * @param value Error value - */ - constructor(value: u32) { - this.errorCodeValue = value; - } - - /** - * Creates an error object from a result value. - * - * Results in host interface contains 0 for a successful operation, - * or a non-zero standardized error otherwise. - * - * @param result A result value obtained from host interface functions. - * @returns Error object with an error [[ErrorCode]] variant. - */ - static fromResult(result: u32): Error | null { - if (result == 0) { - // Ok - return null; - } - return new Error(result); - } - - /** - * Creates new error from user value. - * - * Actual value held by returned [[Error]] object will be 65536 with added passed value. - * @param userErrorCodeValue - */ - static fromUserError(userErrorCodeValue: u16): Error { - return new Error(USER_ERROR_CODE_OFFSET + 1 + userErrorCodeValue); - } - - /** - * Creates new error object from an [[ErrorCode]] value. - * - * @param errorCode Variant of a standarized error. - */ - static fromErrorCode(errorCode: ErrorCode): Error { - return new Error(errorCode); - } - - /** - * Returns an error value. - */ - value(): u32{ - return this.errorCodeValue; - } - - /** - * Checks if error value is contained within user error range. - */ - isUserError(): bool{ - return this.errorCodeValue > USER_ERROR_CODE_OFFSET; - } - - /** - * Checks if error value is contained within system contract error range. - */ - isSystemContractError(): bool{ - return this.errorCodeValue >= SYSTEM_CONTRACT_ERROR_CODE_OFFSET && this.errorCodeValue <= USER_ERROR_CODE_OFFSET; - } - - /** - * Reverts execution of current contract with an error value contained within this error instance. - */ - revert(): void { - externals.revert(this.errorCodeValue); - } -} diff --git a/smart_contracts/contract_as/assembly/externals.ts b/smart_contracts/contract_as/assembly/externals.ts deleted file mode 100644 index 3682f4d11b..0000000000 --- a/smart_contracts/contract_as/assembly/externals.ts +++ /dev/null @@ -1,212 +0,0 @@ -/** @hidden */ -@external("env", "casper_read_value") -export declare function read_value(key_ptr: usize, key_size: usize, value_size: usize): i32; -/** @hidden */ -@external("env", "casper_read_value_local") -export declare function read_value_local(key_ptr: usize, key_size: usize, output_size: usize): i32; -/** @hidden */ -@external("env", "casper_write") -export declare function write(key_ptr: usize, key_size: usize, value_ptr: usize, value_size: usize): void; -/** @hidden */ -@external("env", "casper_write_local") -export declare function write_local(key_ptr: usize, key_size: usize, value_ptr: usize, value_size: usize): void; -/** @hidden */ -@external("env", "casper_add") -export declare function add(key_ptr: usize, key_size: usize, value_ptr: usize, value_size: usize): void; -/** @hidden */ -@external("env", "casper_new_uref") -export declare function new_uref(uref_ptr: usize, value_ptr: usize, value_size: usize): void; -@external("env", "casper_load_named_keys") -export declare function load_named_keys(total_keys: usize, result_size: usize): i32; -/** @hidden */ -@external("env", "casper_get_named_arg") -export declare function get_named_arg(name_ptr: usize, name_size: usize, dest_ptr: usize, dest_size: usize): i32; -/** @hidden */ -@external("env", "casper_get_named_arg_size") -export declare function get_named_arg_size(name_ptr: usize, name_size: usize, dest_size: usize): i32; -/** @hidden */ -@external("env", "casper_ret") -export declare function ret(value_ptr: usize, value_size: usize): void; -/** @hidden */ -@external("env", "casper_call_contract") -export declare function call_contract(contract_hash_ptr: usize, contract_hash_size: usize, entry_point_name_ptr: usize, entry_point_name_size: usize, runtime_args_ptr: usize, runtime_args_size: usize, result_size: usize): i32; -/** @hidden */ -@external("env", "casper_call_versioned_contract") -export declare function call_versioned_contract( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - contract_version_ptr: usize, - contract_version_size: usize, - entry_point_name_ptr: usize, - entry_point_name_size: usize, - runtime_args_ptr: usize, - runtime_args_size: usize, - result_size: usize, -): i32; -/** @hidden */ -@external("env", "casper_get_key") -export declare function get_key( - name_ptr: usize, - name_size: usize, - output_ptr: usize, - output_size: usize, - bytes_written_ptr: usize, -): i32; -/** @hidden */ -@external("env", "casper_has_key") -export declare function has_key(name_ptr: usize, name_size: usize): i32; -/** @hidden */ -@external("env", "casper_put_key") -export declare function put_key(name_ptr: usize, name_size: usize, key_ptr: usize, key_size: usize): void; -/** @hidden */ -@external("env", "casper_remove_key") -export declare function remove_key(name_ptr: usize, name_size: u32): void; -/** @hidden */ -@external("env", "casper_revert") -export declare function revert(err_code: i32): void; -/** @hidden */ -@external("env", "casper_is_valid_uref") -export declare function is_valid_uref(target_ptr: usize, target_size: u32): i32; -/** @hidden */ -@external("env", "casper_add_associated_key") -export declare function add_associated_key(account_hash_ptr: usize, account_hash_size: usize, weight: i32): i32; -/** @hidden */ -@external("env", "casper_remove_associated_key") -export declare function remove_associated_key(account_hash_ptr: usize, account_hash_size: usize): i32; -/** @hidden */ -@external("env", "casper_update_associated_key") -export declare function update_associated_key(account_hash_ptr: usize, account_hash_size: usize, weight: i32): i32; -/** @hidden */ -@external("env", "casper_set_action_threshold") -export declare function set_action_threshold(permission_level: u32, threshold: i32): i32; -/** @hidden */ -@external("env", "casper_get_blocktime") -export declare function get_blocktime(dest_ptr: usize): void; -/** @hidden */ -@external("env", "casper_get_caller") -export declare function get_caller(output_size: usize): i32; -/** @hidden */ -@external("env", "casper_create_purse") -export declare function create_purse(purse_ptr: usize, purse_size: u32): i32; -/** @hidden */ -@external("env", "casper_transfer_to_account") -export declare function transfer_to_account( - target_ptr: usize, - target_size: u32, - amount_ptr: usize, - amount_size: u32, - id_ptr: usize, - id_size: u32, - result_ptr: usize, -): i32; -/** @hidden */ -@external("env", "casper_transfer_from_purse_to_account") -export declare function transfer_from_purse_to_account( - source_ptr: usize, - source_size: u32, - target_ptr: usize, - target_size: u32, - amount_ptr: usize, - amount_size: u32, - id_ptr: usize, - id_size: u32, - result_ptr: usize, -): i32; -/** @hidden */ -@external("env", "casper_transfer_from_purse_to_purse") -export declare function transfer_from_purse_to_purse( - source_ptr: usize, - source_size: u32, - target_ptr: usize, - target_size: u32, - amount_ptr: usize, - amount_size: u32, - id_ptr: usize, - id_size: u32, -): i32; -/** @hidden */ -@external("env", "casper_get_balance") -export declare function get_balance(purse_ptr: usize, purse_size: usize, result_size: usize): i32; -/** @hidden */ -@external("env", "casper_get_phase") -export declare function get_phase(dest_ptr: usize): void; -/** @hidden */ -@external("env", "casper_upgrade_contract_at_uref") -export declare function upgrade_contract_at_uref( - name_ptr: usize, - name_size: u32, - key_ptr: usize, - key_size: u32 -): i32; -/** @hidden */ -@external("env", "casper_get_system_contract") -export declare function get_system_contract(system_contract_index: u32, dest_ptr: usize, dest_size: u32): i32; -/** @hidden */ -@external("env", "casper_get_main_purse") -export declare function get_main_purse(dest_ptr: usize): void; -/** @hidden */ -@external("env", "casper_read_host_buffer") -export declare function read_host_buffer(dest_ptr: usize, dest_size: u32, bytes_written: usize): i32; -/** @hidden */ -@external("env", "casper_remove_contract_user_group") -export declare function remove_contract_user_group( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - label_ptr: usize, - label_size: usize): i32; -/** @hidden */ -@external("env", "casper_provision_contract_user_group_uref") -export declare function provision_contract_user_group_uref( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - label_ptr: usize, - label_size: usize, - value_size_ptr: usize, -): i32; -/** @hidden */ -@external("env", "casper_remove_contract_user_group_urefs") -export declare function remove_contract_user_group_urefs( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - label_ptr: usize, - label_size: usize, - urefs_ptr: usize, - urefs_size: usize, -): i32; -/** @hidden */ -@external("env", "casper_create_contract_package_at_hash") -export declare function create_contract_package_at_hash(hash_addr_ptr: usize, access_addr_ptr: usize, is_locked: boolean): void; -/** @hidden */ -@external("env", "casper_add_contract_version") -export declare function add_contract_version( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - version_ptr: usize, - entry_points_ptr: usize, - entry_points_size: usize, - named_keys_ptr: usize, - named_keys_size: usize, - output_ptr: usize, - output_size: usize, - bytes_written_ptr: usize, -): i32; -/** @hidden */ -@external("env", "casper_create_contract_user_group") -export declare function create_contract_user_group( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - label_ptr: usize, - label_size: usize, - num_new_urefs: u8, - existing_urefs_ptr: usize, - existing_urefs_size: usize, - output_size_ptr: usize, -): i32; -/** @hidden */ -@external("env", "casper_disable_contract_version") -export declare function disable_contract_version( - contract_package_hash_ptr: usize, - contract_package_hash_size: usize, - contract_hash_ptr: usize, - contract_hash_size: usize, -): i32; \ No newline at end of file diff --git a/smart_contracts/contract_as/assembly/index.ts b/smart_contracts/contract_as/assembly/index.ts deleted file mode 100644 index 0aaa78445d..0000000000 --- a/smart_contracts/contract_as/assembly/index.ts +++ /dev/null @@ -1,666 +0,0 @@ -import * as externals from "./externals"; -import {URef, AccessRights} from "./uref"; -import {Error, ErrorCode} from "./error"; -import {CLValue, CLType, CLTypeTag} from "./clvalue"; -import {Key, AccountHash} from "./key"; -import {Pair} from "./pair"; -import {Ref} from "./ref"; -import {toBytesString, - toBytesVecT, - fromBytesMap, - fromBytesString, - toBytesStringList, - Result, - toBytesMap, - toBytesVecT, - fromBytesArray} from "./bytesrepr"; -import {KEY_UREF_SERIALIZED_LENGTH, UREF_ADDR_LENGTH, KEY_HASH_LENGTH} from "./constants"; -import {RuntimeArgs} from "./runtime_args"; -import {encodeUTF8} from "./utils"; -import {Option} from "./option"; - -// NOTE: interfaces aren't supported in AS yet: https://github.com/AssemblyScript/assemblyscript/issues/146#issuecomment-399130960 -// interface ToBytes { -// fromBytes(bytes: Uint8Array): ToBytes; -// } - -/** - * Length of address - */ -const ADDR_LENGTH = 32; - -/** - * System contract types. - */ -export const enum SystemContract { - /** - * Mint contract. - */ - Mint = 1, - /** - * Auction contract. - */ - Auction = 2, - /** - * Handle Payment contract. - */ - HandlePayment = 3, - /** - * Standard Payment contract. - */ - StandardPayment = 4, -} - -/** - * Returns size in bytes of I-th parameter - * - * @internal - * @param i I-th parameter - */ -export function getNamedArgSize(name: String): Ref | null { - let size = new Array(1); - size[0] = 0; - - const nameBuf = encodeUTF8(name); - let ret = externals.get_named_arg_size(nameBuf.dataStart, nameBuf.length, size.dataStart); - const error = Error.fromResult(ret); - if (error !== null) { - if (error.value() == ErrorCode.MissingArgument) { - return null; - } - error.revert(); - return >unreachable(); - } - const sizeU32 = changetype(size[0]); - return new Ref(sizeU32); -} - -/** - * Returns the i-th argument passed to the host for the current module - * invocation. - * - * Note that this is only relevant to contracts stored on-chain since a - * contract deployed directly is not invoked with any arguments. - * - * @param i I-th parameter - * @returns Array of bytes with ABI serialized argument. A null value if - * given parameter is not present. - */ -export function getNamedArg(name: String): Uint8Array { - let arg_size = getNamedArgSize(name); - if (arg_size == null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return unreachable(); - } - let nameBytes = encodeUTF8(name); - - let arg_size_u32 = changetype(arg_size.value); - let data = new Uint8Array(arg_size_u32); - let ret = externals.get_named_arg(nameBytes.dataStart, nameBytes.length, data.dataStart, arg_size_u32); - const error = Error.fromResult(ret); - if (error !== null) { - error.revert(); - return unreachable(); - } - return data; -} - -/** - * Reads a given amount of bytes from a host buffer - * - * @internal - * @param count Number of bytes - * @returns A byte array with bytes received, otherwise a null in case of - * errors. - */ -export function readHostBuffer(count: u32): Uint8Array { - let result = new Uint8Array(count); - let resultSize = new Uint32Array(1); - - let ret = externals.read_host_buffer(result.dataStart, result.length, resultSize.dataStart); - const error = Error.fromResult(ret); - if (error !== null) { - error.revert(); - return unreachable(); - } - return result; -} - -/** - * Returns an [[URef]] for a given system contract - * - * @param system_contract System contract variant - * @returns A valid [[URef]] that points at system contract, otherwise null. - */ -export function getSystemContract(systemContract: SystemContract): Uint8Array { - let data = new Uint8Array(32); - let ret = externals.get_system_contract(systemContract, data.dataStart, data.length); - const error = Error.fromResult(ret); - if (error !== null) { - error.revert(); - return unreachable(); - } - return data; -} - -/** - * Calls the given stored contract, passing the given arguments to it. - * - * If the stored contract calls [[ret]], then that value is returned from [[callContract]]. If the - * stored contract calls [[Error.revert]], then execution stops and [[callContract]] doesn't return. - * Otherwise [[callContract]] returns null. - * - * @param contractHash A key under which a contract is stored - * @param args A list of values - * @returns Bytes of the contract's return value. - */ -export function callContract(contractHash: Uint8Array, entryPointName: String, runtimeArgs: RuntimeArgs): Uint8Array { - let argBytes = runtimeArgs.toBytes(); - let entryPointNameBytes = toBytesString(entryPointName); - - let resultSize = new Uint32Array(1); - resultSize.fill(0); - - let ret = externals.call_contract( - contractHash.dataStart, - contractHash.length, - entryPointNameBytes.dataStart, - entryPointNameBytes.length, - argBytes.dataStart, - argBytes.length, - resultSize.dataStart, - ); - const error = Error.fromResult(ret); - if (error !== null) { - error.revert(); - return unreachable(); - } - let hostBufSize = resultSize[0]; - if (hostBufSize > 0) { - return readHostBuffer(hostBufSize); - } else { - return new Uint8Array(0); - } -} - -/** - * Stores the given [[Key]] under a given name in the current context's named keys. - * - * The current context is either the caller's account or a stored contract - * depending on whether the currently-executing module is a direct call or a - * sub-call respectively. - * - * @category Runtime - */ -export function putKey(name: String, key: Key): void { - var nameBytes = toBytesString(name); - var keyBytes = key.toBytes(); - externals.put_key( - nameBytes.dataStart, - nameBytes.length, - keyBytes.dataStart, - keyBytes.length - ); -} - -/** - * Removes the [[Key]] stored under `name` in the current context's named keys. - * - * The current context is either the caller's account or a stored contract depending on whether the - * currently-executing module is a direct call or a sub-call respectively. - * - * @param name Name of the key in current context's named keys - * @returns An instance of [[Key]] if it exists, or a `null` otherwise. - */ -export function getKey(name: String): Key | null { - var nameBytes = toBytesString(name); - let keyBytes = new Uint8Array(KEY_UREF_SERIALIZED_LENGTH); - let resultSize = new Uint32Array(1); - let ret = externals.get_key( - nameBytes.dataStart, - nameBytes.length, - keyBytes.dataStart, - keyBytes.length, - resultSize.dataStart, - ); - const error = Error.fromResult(ret); - if (error !== null) { - if (error.value() == ErrorCode.MissingKey) { - return null; - } - error.revert(); - return unreachable(); - } - let key = Key.fromBytes(keyBytes.slice(0, resultSize[0])); // total guess - return key.unwrap(); -} - -/** - * Returns the given [[CLValue]] to the host, terminating the currently - * running module. - * - * Note this function is only relevant to contracts stored on chain which are - * invoked via [[callContract]] and can thus return a value to their caller. - * The return value of a directly deployed contract is never used. - */ -export function ret(value: CLValue): void { - const valueBytes = value.toBytes(); - externals.ret( - valueBytes.dataStart, - valueBytes.length - ); - unreachable(); -} - -/** - * Returns `true` if `name` exists in the current context's named keys. - * - * The current context is either the caller's account or a stored contract depending on whether the - * currently-executing module is a direct call or a sub-call respectively. - * - * @param name Name of the key - */ -export function hasKey(name: String): bool { - const nameBytes = toBytesString(name); - let ret = externals.has_key(nameBytes.dataStart, nameBytes.length); - return ret == 0; -} - -/** - * Returns the current block time. - */ -export function getBlockTime(): u64 { - let bytes = new Uint64Array(1); - externals.get_blocktime(bytes.dataStart); - return bytes[0]; -} - -/** - * Returns the caller of the current context, i.e. the [[AccountHash]] of the - * account which made the deploy request. - */ -export function getCaller(): AccountHash { - let outputSize = new Uint32Array(1); - let ret = externals.get_caller(outputSize.dataStart); - const error = Error.fromResult(ret); - if (error !== null) { - error.revert(); - return unreachable(); - } - const accountHashBytes = readHostBuffer(outputSize[0]); - const accountHashResult = AccountHash.fromBytes(accountHashBytes); - if (accountHashResult.hasError()) { - Error.fromErrorCode(ErrorCode.Deserialize).revert(); - return unreachable(); - } - return accountHashResult.value; -} - -/** - * The phase in which a given contract is executing. - */ -export enum Phase { - /** - * Set while committing the genesis or upgrade configurations. - */ - System = 0, - /** - * Set while executing the payment code of a deploy. - */ - Payment = 1, - /** - * Set while executing the session code of a deploy. - */ - Session = 2, - /** - * Set while finalizing payment at the end of a deploy. - */ - FinalizePayment = 3, -} - -/** - * Returns the current [[Phase]]. - */ -export function getPhase(): Phase { - let bytes = new Uint8Array(1); - externals.get_phase(bytes.dataStart); - const phase = bytes[0]; - return phase; -} - -/** - * Removes the [[Key]] stored under `name` in the current context's named keys. - * - * The current context is either the caller's account or a stored contract depending on whether the - * currently-executing module is a direct call or a sub-call respectively. - */ -export function removeKey(name: String): void{ - var nameBytes = toBytesString(name); - externals.remove_key(nameBytes.dataStart, nameBytes.length); -} - -/** - * Returns the named keys of the current context. - * - * The current context is either the caller's account or a stored contract depending on whether the - * currently-executing module is a direct call or a sub-call respectively. - * - * @returns An array of String and [[Key]] pairs - */ -export function listNamedKeys(): Array> { - let totalKeys = new Uint32Array(1); - let resultSize = new Uint32Array(1); - - const res = externals.load_named_keys(totalKeys.dataStart, resultSize.dataStart); - const error = Error.fromResult(res); - if (error !== null) { - error.revert(); - return >>unreachable(); - } - - if (totalKeys[0] == 0) { - return new Array>(); - } - - let mapBytes = readHostBuffer(resultSize[0]); - let maybeMap = fromBytesMap( - mapBytes, - fromBytesString, - Key.fromBytes); - - if (maybeMap.hasError()) { - Error.fromErrorCode(ErrorCode.Deserialize).revert(); - return >>unreachable(); - } - return maybeMap.value; -} - -const ENTRYPOINTACCESS_PUBLIC_TAG: u8 = 1; -const ENTRYPOINTACCESS_GROUPS_TAG: u8 = 2; - -export class EntryPointAccess { - constructor(public cachedBytes: Array) {} - toBytes(): Array { - return this.cachedBytes; - } -} - -export class PublicAccess extends EntryPointAccess { - constructor() { - super([ENTRYPOINTACCESS_PUBLIC_TAG]); - } -}; - -export class GroupAccess extends EntryPointAccess { - constructor(groups: String[]) { - let bytes: Array = [ENTRYPOINTACCESS_GROUPS_TAG]; - bytes = bytes.concat(toBytesStringList(groups)); - super(bytes); - } -}; - - -export enum EntryPointType { - Session = 0, - Contract = 1, -} - -export class EntryPoint { - constructor(public name: String, - public args: Array>, - public ret: CLType, - public access: EntryPointAccess, - public entry_point_type: EntryPointType) {} - - toBytes(): Array { - let nameBytes = toBytesString(this.name); - let toBytesCLType = function(clType: CLType): Array { return clType.toBytes(); }; - let argsBytes = toBytesMap(this.args, toBytesString, toBytesCLType); - let retBytes = this.ret.toBytes(); - let accessBytes = this.access.toBytes(); - let entryPointTypeBytes: Array = [this.entry_point_type]; - return nameBytes.concat(argsBytes).concat(retBytes).concat(accessBytes).concat(entryPointTypeBytes); - } -}; - -export class EntryPoints { - entryPoints: Array> = new Array>(); - addEntryPoint(entryPoint: EntryPoint): void { - this.entryPoints.push(new Pair(entryPoint.name, entryPoint)); - } - toBytes(): Array { - let toBytesEntryPoint = function(entryPoint: EntryPoint): Array { return entryPoint.toBytes(); }; - return toBytesMap(this.entryPoints, toBytesString, toBytesEntryPoint); - } -} - -/** - * A two-value structure that holds the result of [[createContractPackageAtHash]]. - */ -export class CreateContractPackageResult { - constructor(public packageHash: Uint8Array, public accessURef: URef) {} -} - -export function createContractPackageAtHash(): CreateContractPackageResult { - let hashAddr = new Uint8Array(KEY_HASH_LENGTH); - let urefAddr = new Uint8Array(UREF_ADDR_LENGTH); - externals.create_contract_package_at_hash(hashAddr.dataStart, urefAddr.dataStart, false); - return new CreateContractPackageResult( - hashAddr, - new URef(urefAddr, AccessRights.READ_ADD_WRITE), - ); -} - -export function createLockedContractPackageAtHash(): CreateContractPackageResult { - let hashAddr = new Uint8Array(KEY_HASH_LENGTH); - let urefAddr = new Uint8Array(UREF_ADDR_LENGTH); - externals.create_contract_package_at_hash(hashAddr.dataStart, urefAddr.dataStart, true); - return new CreateContractPackageResult( - hashAddr, - new URef(urefAddr, AccessRights.READ_ADD_WRITE), - ); -} - -export function newContract(entryPoints: EntryPoints, namedKeys: Array> | null = null ,hashName: String | null = null, urefName: String | null = null): AddContractVersionResult { - let result = createContractPackageAtHash(); - if (hashName !== null) { - putKey(hashName, Key.fromHash(result.packageHash)); - } - if (urefName !== null) { - putKey(urefName, Key.fromURef(result.accessURef)); - } - - if (namedKeys === null) { - namedKeys = new Array>(); - } - - return addContractVersion( - result.packageHash, - entryPoints, - namedKeys, - ); -} - -export function newLockedContract(entryPoints: EntryPoints, namedKeys: Array> | null = null ,hashName: String | null = null, urefName: String | null = null): AddContractVersionResult { - let result = createLockedContractPackageAtHash(); - if (hashName !== null) { - putKey(hashName, Key.fromHash(result.packageHash)); - } - if (urefName !== null) { - putKey(urefName, Key.fromURef(result.accessURef)); - } - - if (namedKeys === null) { - namedKeys = new Array>(); - } - - return addContractVersion( - result.packageHash, - entryPoints, - namedKeys, - ); -} - - -export function callVersionedContract(packageHash: Uint8Array, contract_version: Option, entryPointName: String, runtimeArgs: RuntimeArgs): Uint8Array { - let entryPointBytes = toBytesString(entryPointName); - let argBytes = runtimeArgs.toBytes(); - let bytesWritten = new Uint32Array(1); - let bytesContractVersion = contract_version.toBytes(); - - let ret = externals.call_versioned_contract( - packageHash.dataStart, - packageHash.length, - bytesContractVersion.dataStart, - bytesContractVersion.length, - entryPointBytes.dataStart, - entryPointBytes.length, - argBytes.dataStart, - argBytes.length, - bytesWritten.dataStart, - ); - let err = Error.fromResult(ret); - if (err !== null) { - err.revert(); - } - if (bytesWritten[0] == 0) { - return new Uint8Array(0); - } - else { - return readHostBuffer(bytesWritten[0]); - } -} - -// Container for a result of contract version. -// Used as a replacement of non-existing tuples. -export class AddContractVersionResult { - constructor(public contractHash: Uint8Array, public contractVersion: u32) {} -} - -// Add new contract version. Requires a package hash, entry points and named keys. -// Result -export function addContractVersion(packageHash: Uint8Array, entryPoints: EntryPoints, namedKeys: Array>): AddContractVersionResult { - var versionPtr = new Uint32Array(1); - let entryPointsBytes = entryPoints.toBytes(); - let keyToBytes = function(key: Key): Array { return key.toBytes(); }; - let namedKeysBytes = toBytesMap(namedKeys, toBytesString, keyToBytes); - let keyBytes = new Uint8Array(32); - let totalBytes = new Uint32Array(1); - - let ret = externals.add_contract_version( - packageHash.dataStart, - packageHash.length, - versionPtr.dataStart, // output - entryPointsBytes.dataStart, - entryPointsBytes.length, - namedKeysBytes.dataStart, - namedKeysBytes.length, - keyBytes.dataStart, - keyBytes.length, - totalBytes.dataStart, - ); - const error = Error.fromResult(ret); - if (error !== null) { - error.revert(); - return unreachable(); - } - - const contractHash = keyBytes.slice(0, totalBytes[0]); - const contractVersion = versionPtr[0]; - return new AddContractVersionResult(contractHash, contractVersion); -} - -export function createContractUserGroup(packageHash: Uint8Array, label: String, newURefs: u8, existingURefs: Array): Array { - let labelBytes = toBytesString(label); - - // NOTE: AssemblyScript sometimes is fine with closures, and sometimes - // it generates unreachable code. Anonymous functions seems to be working - // consistently. - let toBytesURef = function(item: URef): Array { return item.toBytes(); } - let fromBytesURef = function(bytes: Uint8Array): Result { return URef.fromBytes(bytes); } - - let existingUrefBytes: Array = toBytesVecT(existingURefs, toBytesURef); - - let outputSize = new Uint32Array(1); - - let ret = externals.create_contract_user_group( - packageHash.dataStart, - packageHash.length, - labelBytes.dataStart, - labelBytes.length, - newURefs, - existingUrefBytes.dataStart, - existingUrefBytes.length, - outputSize.dataStart, - ); - - let err = Error.fromResult(ret); - if (err !== null) { - err.revert(); - return >unreachable(); - } - let bytes = readHostBuffer(outputSize[0]); - return fromBytesArray(bytes, fromBytesURef).unwrap(); -} - -export function removeContractUserGroup( - packageHash: Uint8Array, - label: String, -): void { - let label_bytes = toBytesString(label); - let ret = externals.remove_contract_user_group( - packageHash.dataStart, - packageHash.length, - label_bytes.dataStart, - label_bytes.length, - ); - let err = Error.fromResult(ret); - if (err !== null) { - err.revert(); - } -} - -export function extendContractUserGroupURefs( - packageHash: Uint8Array, - label: String, -): URef { - let label_bytes = toBytesString(label); - let size = new Uint32Array(1); - let ret = externals.provision_contract_user_group_uref( - packageHash.dataStart, - packageHash.length, - label_bytes.dataStart, - label_bytes.length, - size.dataStart, - ); - let err = Error.fromResult(ret); - if (err !== null) { - err.revert(); - } - let bytes = readHostBuffer(size[0]); - return URef.fromBytes(bytes).unwrap(); -} - -export function removeContractUserGroupURefs( - packageHash: Uint8Array, - label: String, - urefs: Array): void { - - let label_bytes = toBytesString(label); - - let encode = function(item: URef): Array { return item.toBytes(); }; - let urefsData = toBytesVecT(urefs, encode); - - let ret = externals.remove_contract_user_group_urefs( - packageHash.dataStart, - packageHash.length, - label_bytes.dataStart, - label_bytes.length, - urefsData.dataStart, - urefsData.length, - ); - let err = Error.fromResult(ret); - if (err !== null) { - err.revert(); - } -} \ No newline at end of file diff --git a/smart_contracts/contract_as/assembly/key.ts b/smart_contracts/contract_as/assembly/key.ts deleted file mode 100644 index faf8cbd63a..0000000000 --- a/smart_contracts/contract_as/assembly/key.ts +++ /dev/null @@ -1,276 +0,0 @@ -import * as externals from "./externals"; -import {readHostBuffer} from "."; -import {UREF_SERIALIZED_LENGTH} from "./constants"; -import {URef} from "./uref"; -import {CLValue} from "./clvalue"; -import {Error, ErrorCode} from "./error"; -import {checkTypedArrayEqual, typedToArray} from "./utils"; -import {Ref} from "./ref"; -import {Result, Error as BytesreprError} from "./bytesrepr"; - -/** - * Enum representing a variant of a [[Key]] - Account, Hash or URef. - */ -export enum KeyVariant { - /** The Account variant */ - ACCOUNT_ID = 0, - /** The Hash variant */ - HASH_ID = 1, - /** The URef variant */ - UREF_ID = 2, -} - -/** A cryptographic public key. */ -export class AccountHash { - /** - * Constructs a new `AccountHash`. - * - * @param bytes The bytes constituting the public key. - */ - constructor(public bytes: Uint8Array) {} - - /** Checks whether two `AccountHash`s are equal. */ - @operator("==") - equalsTo(other: AccountHash): bool { - return checkTypedArrayEqual(this.bytes, other.bytes); - } - - /** Checks whether two `AccountHash`s are not equal. */ - @operator("!=") - notEqualsTo(other: AccountHash): bool { - return !this.equalsTo(other); - } - - /** Deserializes a `AccountHash` from an array of bytes. */ - static fromBytes(bytes: Uint8Array): Result { - if (bytes.length < 32) { - return new Result(null, BytesreprError.EarlyEndOfStream, 0); - } - - let accountHashBytes = bytes.subarray(0, 32); - let accountHash = new AccountHash(accountHashBytes); - let ref = new Ref(accountHash); - return new Result(ref, BytesreprError.Ok, 32); - } - - /** Serializes a `AccountHash` into an array of bytes. */ - toBytes(): Array { - return typedToArray(this.bytes); - } -} - -/** - * The type under which data (e.g. [[CLValue]]s, smart contracts, user accounts) - * are indexed on the network. - */ -export class Key { - variant: KeyVariant; - hash: Uint8Array | null; - uref: URef | null; - account: AccountHash | null; - - /** Creates a `Key` from a given [[URef]]. */ - static fromURef(uref: URef): Key { - let key = new Key(); - key.variant = KeyVariant.UREF_ID; - key.uref = uref; - return key; - } - - /** Creates a `Key` from a given hash. */ - static fromHash(hash: Uint8Array): Key { - let key = new Key(); - key.variant = KeyVariant.HASH_ID; - key.hash = hash; - return key; - } - - /** Creates a `Key` from a [[]] representing an account. */ - static fromAccount(account: AccountHash): Key { - let key = new Key(); - key.variant = KeyVariant.ACCOUNT_ID; - key.account = account; - return key; - } - - /** - * Attempts to write `value` under a new Key::URef - * - * If a key is returned it is always of [[KeyVariant]].UREF_ID - */ - static create(value: CLValue): Key | null { - const valueBytes = value.toBytes(); - let urefBytes = new Uint8Array(UREF_SERIALIZED_LENGTH); - externals.new_uref( - urefBytes.dataStart, - valueBytes.dataStart, - valueBytes.length - ); - const urefResult = URef.fromBytes(urefBytes); - if (urefResult.hasError()) { - return null; - } - return Key.fromURef(urefResult.value); - } - - /** Deserializes a `Key` from an array of bytes. */ - static fromBytes(bytes: Uint8Array): Result { - if (bytes.length < 1) { - return new Result(null, BytesreprError.EarlyEndOfStream, 0); - } - const tag = bytes[0]; - let currentPos = 1; - - if (tag == KeyVariant.HASH_ID) { - var hashBytes = bytes.subarray(1, 32 + 1); - currentPos += 32; - - let key = Key.fromHash(hashBytes); - let ref = new Ref(key); - return new Result(ref, BytesreprError.Ok, currentPos); - } - else if (tag == KeyVariant.UREF_ID) { - var urefBytes = bytes.subarray(1); - var urefResult = URef.fromBytes(urefBytes); - if (urefResult.error != BytesreprError.Ok) { - return new Result(null, urefResult.error, 0); - } - let key = Key.fromURef(urefResult.value); - let ref = new Ref(key); - return new Result(ref, BytesreprError.Ok, currentPos + urefResult.position); - } - else if (tag == KeyVariant.ACCOUNT_ID) { - let accountHashBytes = bytes.subarray(1); - let accountHashResult = AccountHash.fromBytes(accountHashBytes); - if (accountHashResult.hasError()) { - return new Result(null, accountHashResult.error, currentPos); - } - currentPos += accountHashResult.position; - let key = Key.fromAccount(accountHashResult.value); - let ref = new Ref(key); - return new Result(ref, BytesreprError.Ok, currentPos); - } - else { - return new Result(null, BytesreprError.FormattingError, currentPos); - } - } - - /** Serializes a `Key` into an array of bytes. */ - toBytes(): Array { - if(this.variant == KeyVariant.UREF_ID){ - let bytes = new Array(); - bytes.push(this.variant) - bytes = bytes.concat((this.uref).toBytes()); - return bytes; - } - else if (this.variant == KeyVariant.HASH_ID) { - var hashBytes = this.hash; - let bytes = new Array(1 + hashBytes.length); - bytes[0] = this.variant; - for (let i = 0; i < hashBytes.length; i++) { - bytes[i + 1] = hashBytes[i]; - } - return bytes; - } - else if (this.variant == KeyVariant.ACCOUNT_ID) { - let bytes = new Array(); - bytes.push(this.variant); - bytes = bytes.concat((this.account).toBytes()); - return bytes; - } - else { - return >unreachable(); - } - } - - /** Checks whether the `Key` is of [[KeyVariant]].UREF_ID. */ - isURef(): bool { - return this.variant == KeyVariant.UREF_ID; - } - - /** Converts the `Key` into `URef`. */ - toURef(): URef { - return this.uref; - } - - /** Reads the data stored under this `Key`. */ - read(): Uint8Array | null { - const keyBytes = this.toBytes(); - let valueSize = new Uint8Array(1); - const ret = externals.read_value(keyBytes.dataStart, keyBytes.length, valueSize.dataStart); - const error = Error.fromResult(ret); - if (error != null) { - if (error.value() == ErrorCode.ValueNotFound) { - return null; - } - error.revert(); - return unreachable(); - } - // TODO: How can we have `read` that would deserialize host bytes into T? - return readHostBuffer(valueSize[0]); - } - - /** Stores a [[CLValue]] under this `Key`. */ - write(value: CLValue): void { - const keyBytes = this.toBytes(); - const valueBytes = value.toBytes(); - externals.write( - keyBytes.dataStart, - keyBytes.length, - valueBytes.dataStart, - valueBytes.length - ); - } - - /** Adds the given `CLValue` to a value already stored under this `Key`. */ - add(value: CLValue): void { - const keyBytes = this.toBytes(); - const valueBytes = value.toBytes(); - - externals.add( - keyBytes.dataStart, - keyBytes.length, - valueBytes.dataStart, - valueBytes.length - ); - } - - /** Checks whether two `Key`s are equal. */ - @operator("==") - equalsTo(other: Key): bool { - if (this.variant == KeyVariant.UREF_ID) { - if (other.variant == KeyVariant.UREF_ID) { - return this.uref == other.uref; - } - else { - return false; - } - } - else if (this.variant == KeyVariant.HASH_ID) { - if (other.variant == KeyVariant.HASH_ID) { - return checkTypedArrayEqual(this.hash, other.hash); - - } - else { - return false; - } - } - else if (this.variant == KeyVariant.ACCOUNT_ID) { - if (other.variant == KeyVariant.ACCOUNT_ID) { - return this.account == other.account; - } - else { - return false; - } - } - else { - return false; - } - } - - /** Checks whether two keys are not equal. */ - @operator("!=") - notEqualsTo(other: Key): bool { - return !this.equalsTo(other); - } -} diff --git a/smart_contracts/contract_as/assembly/local.ts b/smart_contracts/contract_as/assembly/local.ts deleted file mode 100644 index af2fce125f..0000000000 --- a/smart_contracts/contract_as/assembly/local.ts +++ /dev/null @@ -1,39 +0,0 @@ -//@ts-nocheck -import * as externals from "./externals"; -import {Error, ErrorCode} from "./error"; -import {CLValue} from "./clvalue"; -import {readHostBuffer} from "./index"; - -/** - * Reads the value under `key` in the context-local partition of global state. - * - * @category Storage - * @returns Returns bytes of serialized value, otherwise a null if given local key does not exists. - */ -export function readLocal(local: Uint8Array): Uint8Array | null { - let valueSize = new Uint8Array(1); - const ret = externals.read_value_local(local.dataStart, local.length, valueSize.dataStart); - if (ret == ErrorCode.ValueNotFound){ - return null; - } - const error = Error.fromResult(ret); - if (error != null) { - error.revert(); - return unreachable(); - } - return readHostBuffer(valueSize[0]); -} - -/** - * Writes `value` under `key` in the context-local partition of global state. - * @category Storage - */ -export function writeLocal(local: Uint8Array, value: CLValue): void { - const valueBytes = value.toBytes(); - externals.write_local( - local.dataStart, - local.length, - valueBytes.dataStart, - valueBytes.length - ); -} diff --git a/smart_contracts/contract_as/assembly/option.ts b/smart_contracts/contract_as/assembly/option.ts deleted file mode 100644 index cf2764aa91..0000000000 --- a/smart_contracts/contract_as/assembly/option.ts +++ /dev/null @@ -1,79 +0,0 @@ -const OPTION_TAG_NONE: u8 = 0; -const OPTION_TAG_SOME: u8 = 1; - -// TODO: explore Option (without interfaces to constrain T with, is it practical?) -/** - * A class representing an optional value, i.e. it might contain either a value of some type or - * no value at all. Similar to Rust's `Option` or Haskell's `Maybe`. - */ -export class Option{ - private bytes: Uint8Array | null; - - /** - * Constructs a new option containing the value of `bytes`. `bytes` can be `null`, which - * indicates no value. - */ - constructor(bytes: Uint8Array | null) { - this.bytes = bytes; - } - - /** - * Checks whether the `Option` contains no value. - * - * @returns True if the `Option` has no value. - */ - isNone(): bool{ - return this.bytes === null; - } - - /** - * Checks whether the `Option` contains a value. - * - * @returns True if the `Option` has some value. - */ - isSome() : bool{ - return this.bytes != null; - } - - /** - * Unwraps the `Option`, returning the inner value (or `null` if there was none). - * - * @returns The inner value, or `null` if there was none. - */ - unwrap(): Uint8Array{ - assert(this.isSome()); - return this.bytes; - } - - /** - * Serializes the `Option` into an array of bytes. - */ - toBytes(): Array{ - if (this.bytes === null){ - let result = new Array(1); - result[0] = OPTION_TAG_NONE; - return result; - } - const bytes = this.bytes; - - let result = new Array(bytes.length + 1); - result[0] = OPTION_TAG_SOME; - for (let i = 0; i < bytes.length; i++) { - result[i+1] = bytes[i]; - } - - return result; - } - - /** - * Deserializes an array of bytes into an `Option`. - */ - static fromBytes(bytes: Uint8Array): Option{ - // check SOME / NONE flag at head - // TODO: what if length is exactly 1? - if (bytes.length >= 1 && bytes[0] == 1) - return new Option(bytes.subarray(1)); - - return new Option(null); - } -} diff --git a/smart_contracts/contract_as/assembly/pair.ts b/smart_contracts/contract_as/assembly/pair.ts deleted file mode 100644 index 3ab74009ea..0000000000 --- a/smart_contracts/contract_as/assembly/pair.ts +++ /dev/null @@ -1,41 +0,0 @@ -/** - * A pair of values. - * - * @typeParam T1 The type of the first value. - * @typeParam T2 The type of the second value. - */ -export class Pair { - /** - * The first value in the pair. - */ - public first: T1; - /** - * The second value in the pair. - */ - public second: T2; - - /** - * Constructs the pair out of the two given values. - */ - constructor(first: T1, second: T2) { - this.first = first; - this.second = second; - } - - /** - * Checks whether two pairs are equal. The pairs are considered equal when both their first - * and second values are equal. - */ - @operator("==") - equalsTo(other: Pair): bool { - return this.first == other.first && this.second == other.second; - } - - /** - * Checks whether two pairs are not equal (the opposite of [[equalsTo]]). - */ - @operator("!=") - notEqualsTo(other: Pair): bool { - return !this.equalsTo(other); - } -} diff --git a/smart_contracts/contract_as/assembly/public_key.ts b/smart_contracts/contract_as/assembly/public_key.ts deleted file mode 100644 index 895d5302a3..0000000000 --- a/smart_contracts/contract_as/assembly/public_key.ts +++ /dev/null @@ -1,51 +0,0 @@ -import {typedToArray} from "./utils"; -import {Ref} from "./ref"; -import {Result, Error as BytesreprError} from "./bytesrepr"; - -const ED25519_PUBLIC_KEY_LENGTH = 32; -const SECP256K1_PUBLIC_KEY_LENGTH = 33; - -export enum PublicKeyVariant { - /** A public key of Ed25519 type */ - Ed25519 = 1, - /** A public key of Secp256k1 type */ - Secp256k1 = 2, -} - -export class PublicKey { - constructor(private variant: PublicKeyVariant, private bytes: Uint8Array) { - } - - toBytes(): Array { - let variantBytes: Array = [this.variant]; - return variantBytes.concat(typedToArray(this.bytes)); - } - - /** Deserializes a `PublicKey` from an array of bytes. */ - static fromBytes(bytes: Uint8Array): Result { - if (bytes.length < 1) { - return new Result(null, BytesreprError.EarlyEndOfStream, 0); - } - const variant = bytes[0]; - let currentPos = 1; - - let expectedPublicKeySize: i32; - - switch (variant) { - case PublicKeyVariant.Ed25519: - expectedPublicKeySize = ED25519_PUBLIC_KEY_LENGTH; - break; - case PublicKeyVariant.Secp256k1: - expectedPublicKeySize = SECP256K1_PUBLIC_KEY_LENGTH; - break; - default: - return new Result(null, BytesreprError.FormattingError, 0); - } - let publicKeyBytes = bytes.subarray(currentPos, currentPos + expectedPublicKeySize); - currentPos += expectedPublicKeySize; - - let publicKey = new PublicKey(variant, publicKeyBytes); - let ref = new Ref(publicKey); - return new Result(ref, BytesreprError.Ok, currentPos); - } -} diff --git a/smart_contracts/contract_as/assembly/purse.ts b/smart_contracts/contract_as/assembly/purse.ts deleted file mode 100644 index 454dbdab43..0000000000 --- a/smart_contracts/contract_as/assembly/purse.ts +++ /dev/null @@ -1,270 +0,0 @@ -import * as externals from "./externals"; -import {readHostBuffer} from "./index"; -import {U512} from "./bignum"; -import {Error, ErrorCode} from "./error"; -import {UREF_SERIALIZED_LENGTH} from "./constants"; -import {URef} from "./uref"; -import {toBytesU64} from "./bytesrepr"; -import {Option} from "./option"; -import {Ref} from "./ref"; -import {getMainPurse} from "./account"; -import {arrayToTyped} from "./utils"; - -/** - * The result of a successful transfer between purses. - */ -export enum TransferredTo { - /** - * The destination account already existed. - */ - ExistingAccount = 0, - /** - * The destination account was created. - */ - NewAccount = 1, -} - -/** - * The result of a transfer between purse and account. - */ -export class TransferResult { - public errValue: Error | null = null; - public okValue: Ref | null = null; - - static makeErr(err: Error): TransferResult { - let transferResult = new TransferResult(); - transferResult.errValue = err; - return transferResult; - } - - static makeOk(ok: Ref): TransferResult { - let transferResult = new TransferResult(); - transferResult.okValue = ok; - return transferResult; - } - - get isErr(): bool { - return this.errValue !== null; - } - - get isOk(): bool { - return this.okValue !== null; - } - - get ok(): TransferredTo { - assert(this.okValue !== null); - const ok = >this.okValue; - return ok.value; - } - - get err(): Error { - assert(this.errValue !== null); - return this.errValue; - } -} - -function makeTransferredTo(value: u32): Ref | null { - if (value == TransferredTo.ExistingAccount) - return new Ref(TransferredTo.ExistingAccount); - if (value == TransferredTo.NewAccount) - return new Ref(TransferredTo.NewAccount); - return null; -} - -/** - * Creates a new empty purse and returns its [[URef]], or a null in case a - * purse couldn't be created. - * @hidden - */ -export function createPurse(): URef { - let bytes = new Uint8Array(UREF_SERIALIZED_LENGTH); - let ret = externals.create_purse( - bytes.dataStart, - bytes.length - ); - let error = Error.fromResult(ret); - if (error !== null){ - error.revert(); - return unreachable(); - } - - let urefResult = URef.fromBytes(bytes); - if (urefResult.hasError()) { - Error.fromErrorCode(ErrorCode.PurseNotCreated).revert(); - return unreachable(); - } - - return urefResult.value; -} - -/** - * Returns the balance in motes of the given purse or a null if given purse - * is invalid. - * @hidden - */ -export function getPurseBalance(purse: URef): U512 | null { - let purseBytes = purse.toBytes(); - let balanceSize = new Array(1); - balanceSize[0] = 0; - - let retBalance = externals.get_balance( - purseBytes.dataStart, - purseBytes.length, - balanceSize.dataStart, - ); - - const error = Error.fromResult(retBalance); - if (error != null) { - if (error.value() == ErrorCode.InvalidPurse) { - return null; - } - error.revert(); - return unreachable(); - } - - let balanceBytes = readHostBuffer(balanceSize[0]); - let balanceResult = U512.fromBytes(balanceBytes); - return balanceResult.unwrap(); -} - -export function getBalance(): U512 | null { - getPurseBalance(getMainPurse()) -} - -/** - * Transfers `amount` of motes from `source` purse to `target` account. - * If `target` does not exist it will be created. - * - * @param amount Amount is denominated in motes - * @returns This function will return a [[TransferredTo.TransferError]] in - * case of transfer error, in case of any other variant the transfer itself - * can be considered successful. - * @hidden - */ -export function transferFromPurseToAccount(sourcePurse: URef, targetAccount: Uint8Array, amount: U512, id: Ref | null = null): TransferResult { - let purseBytes = sourcePurse.toBytes(); - let targetBytes = new Array(targetAccount.length); - for (let i = 0; i < targetAccount.length; i++) { - targetBytes[i] = targetAccount[i]; - } - let amountBytes = amount.toBytes(); - - let optId: Option; - if (id !== null) { - optId = new Option(arrayToTyped(toBytesU64(id.value))); - } - else { - optId = new Option(null); - } - const idBytes = optId.toBytes(); - - let resultPtr = new Uint32Array(1); - - let ret = externals.transfer_from_purse_to_account( - purseBytes.dataStart, - purseBytes.length, - targetBytes.dataStart, - targetBytes.length, - amountBytes.dataStart, - amountBytes.length, - idBytes.dataStart, - idBytes.length, - resultPtr.dataStart, - ); - - const error = Error.fromResult(ret); - if (error !== null) { - return TransferResult.makeErr(error); - } - - const transferredTo = makeTransferredTo(resultPtr[0]); - if (transferredTo !== null) { - return TransferResult.makeOk(transferredTo); - } - return TransferResult.makeErr(Error.fromErrorCode(ErrorCode.Transfer)); -} - -/** - * Transfers `amount` of motes from `source` purse to `target` purse. If `target` does not exist - * the transfer fails. - * - * @returns This function returns non-zero value on error. - * @hidden - */ -export function transferFromPurseToPurse(sourcePurse: URef, targetPurse: URef, amount: U512, id: Ref | null = null): Error | null { - let sourceBytes = sourcePurse.toBytes(); - let targetBytes = targetPurse.toBytes(); - let amountBytes = amount.toBytes(); - - let optId: Option; - if (id !== null) { - const idValue = (>id).value; - optId = new Option(arrayToTyped(toBytesU64(idValue))); - } - else { - optId = new Option(null); - } - const idBytes = optId.toBytes(); - - let ret = externals.transfer_from_purse_to_purse( - sourceBytes.dataStart, - sourceBytes.length, - targetBytes.dataStart, - targetBytes.length, - amountBytes.dataStart, - amountBytes.length, - idBytes.dataStart, - idBytes.length, - ); - - return Error.fromResult(ret); -} - -/** - * Transfers `amount` of motes from main purse purse to `target` account. - * If `target` does not exist it will be created. - * - * @param amount Amount is denominated in motes - * @returns This function will return a [[TransferredTo.TransferError]] in - * case of transfer error, in case of any other variant the transfer itself - * can be considered successful. - */ -export function transferToAccount(targetAccount: Uint8Array, amount: U512, id: Ref | null = null): TransferResult { - let targetBytes = new Array(targetAccount.length); - for (let i = 0; i < targetAccount.length; i++) { - targetBytes[i] = targetAccount[i]; - } - let amountBytes = amount.toBytes(); - - let optId: Option; - if (id !== null) { - optId = new Option(arrayToTyped(toBytesU64(id.value))); - } - else { - optId = new Option(null); - } - const idBytes = optId.toBytes(); - - let resultPtr = new Uint32Array(1); - - let ret = externals.transfer_to_account( - targetBytes.dataStart, - targetBytes.length, - amountBytes.dataStart, - amountBytes.length, - idBytes.dataStart, - idBytes.length, - resultPtr.dataStart, - ); - - const error = Error.fromResult(ret); - if (error !== null) { - return TransferResult.makeErr(error); - } - - const transferredTo = makeTransferredTo(resultPtr[0]); - if (transferredTo !== null) { - return TransferResult.makeOk(transferredTo); - } - return TransferResult.makeErr(Error.fromErrorCode(ErrorCode.Transfer)); -} diff --git a/smart_contracts/contract_as/assembly/ref.ts b/smart_contracts/contract_as/assembly/ref.ts deleted file mode 100644 index cc27b228e8..0000000000 --- a/smart_contracts/contract_as/assembly/ref.ts +++ /dev/null @@ -1,6 +0,0 @@ -/** - * Boxes a value which could then be nullable in any context. - */ -export class Ref { - constructor(public value: T) {} -} diff --git a/smart_contracts/contract_as/assembly/runtime_args.ts b/smart_contracts/contract_as/assembly/runtime_args.ts deleted file mode 100644 index 4850e2ed8b..0000000000 --- a/smart_contracts/contract_as/assembly/runtime_args.ts +++ /dev/null @@ -1,27 +0,0 @@ -import {CLValue} from "./clvalue"; -import { Pair } from "./pair"; -import { toBytesU32, toBytesString } from "./bytesrepr"; - -/** - * Implements a collection of runtime arguments. - */ -export class RuntimeArgs { - constructor(public arguments: Pair[] = []) {} - - static fromArray(pairs: Pair[]): RuntimeArgs { - return new RuntimeArgs(pairs); - } - - toBytes(): Array { - let bytes : u8[] = toBytesU32(this.arguments.length); - let args = this.arguments; - for (var i = 0; i < args.length; i++) { - let pair = args[i]; - const argNameBytes = toBytesString(pair.first); - bytes = bytes.concat(argNameBytes); - const argValueBytes = pair.second.toBytes(); - bytes = bytes.concat(argValueBytes); - } - return bytes; - } -} diff --git a/smart_contracts/contract_as/assembly/tsconfig.json b/smart_contracts/contract_as/assembly/tsconfig.json deleted file mode 100644 index 05dd1cb45d..0000000000 --- a/smart_contracts/contract_as/assembly/tsconfig.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "extends": "../node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ], - "typedocOptions": { - "mode": "file", - "out": "docs" - } -} diff --git a/smart_contracts/contract_as/assembly/unit.ts b/smart_contracts/contract_as/assembly/unit.ts deleted file mode 100644 index 21fa24931e..0000000000 --- a/smart_contracts/contract_as/assembly/unit.ts +++ /dev/null @@ -1,19 +0,0 @@ -/** - * A class representing the unit type, i.e. a type that has no values (equivalent to eg. `void` in - * C or `()` in Rust). - */ -export class Unit{ - /** - * Serializes a [[Unit]] - returns an empty array. - */ - toBytes(): Array { - return new Array(0); - } - - /** - * Deserializes a [[Unit]] - returns a new [[Unit]]. - */ - static fromBytes(bytes: Uint8Array): Unit{ - return new Unit(); - } -} diff --git a/smart_contracts/contract_as/assembly/uref.ts b/smart_contracts/contract_as/assembly/uref.ts deleted file mode 100644 index ab95d83aa9..0000000000 --- a/smart_contracts/contract_as/assembly/uref.ts +++ /dev/null @@ -1,143 +0,0 @@ -import {Ref} from "./ref"; -import {Error, Result} from "./bytesrepr"; -import {UREF_ADDR_LENGTH} from "./constants"; -import {checkTypedArrayEqual, typedToArray} from "./utils"; -import {is_valid_uref, revert} from "./externals"; - -/** - * A set of bitflags that defines access rights associated with a [[URef]]. - */ -export enum AccessRights{ - /** - * No permissions - */ - NONE = 0x0, - /** - * Permission to read the value under the associated [[URef]]. - */ - READ = 0x1, - /** - * Permission to write a value under the associated [[URef]]. - */ - WRITE = 0x2, - /** - * Permission to read or write the value under the associated [[URef]]. - */ - READ_WRITE = 0x3, - /** - * Permission to add to the value under the associated [[URef]]. - */ - ADD = 0x4, - /** - * Permission to read or add to the value under the associated [[URef]]. - */ - READ_ADD = 0x5, - /** - * Permission to add to, or write the value under the associated [[URef]]. - */ - ADD_WRITE = 0x6, - /** - * Permission to read, add to, or write the value under the associated [[URef]]. - */ - READ_ADD_WRITE = 0x07, -} - -/** - * Represents an unforgeable reference, containing an address in the network's global storage and - * the [[AccessRights]] of the reference. - * - * A [[URef]] can be used to index entities such as [[CLValue]]s, or smart contracts. - */ -export class URef { - /** - * Representation of URef address. - */ - private bytes: Uint8Array; - private accessRights: AccessRights - - /** - * Constructs new instance of URef. - * @param bytes Bytes representing address of the URef. - * @param accessRights Access rights flag. Use [[AccessRights.NONE]] to indicate no permissions. - */ - constructor(bytes: Uint8Array, accessRights: AccessRights) { - this.bytes = bytes; - this.accessRights = accessRights; - } - - /** - * Returns the address of this URef as an array of bytes. - * - * @returns A byte array with a length of 32. - */ - public getBytes(): Uint8Array { - return this.bytes; - } - - /** - * Returns the access rights of this [[URef]]. - */ - public getAccessRights(): AccessRights { - return this.accessRights; - } - - /** - * Validates uref against named keys. - */ - public isValid(): boolean{ - const urefBytes = this.toBytes(); - let ret = is_valid_uref( - urefBytes.dataStart, - urefBytes.length - ); - return ret !== 0; - } - - /** - * Deserializes a new [[URef]] from bytes. - * @param bytes Input bytes. Requires at least 33 bytes to properly deserialize an [[URef]]. - */ - static fromBytes(bytes: Uint8Array): Result { - if (bytes.length < 33) { - return new Result(null, Error.EarlyEndOfStream, 0); - } - - let urefBytes = bytes.subarray(0, UREF_ADDR_LENGTH); - let currentPos = 33; - - let accessRights = bytes[UREF_ADDR_LENGTH]; - let uref = new URef(urefBytes, accessRights); - let ref = new Ref(uref); - return new Result(ref, Error.Ok, currentPos); - } - - /** - * Serializes the URef into an array of bytes that represents it in the Casper serialization - * format. - */ - toBytes(): Array { - let result = typedToArray(this.bytes); - result.push(this.accessRights); - return result; - } - - /** - * The equality operator. - * - * @returns True if `this` and `other` are equal, false otherwise. - */ - @operator("==") - equalsTo(other: URef): bool { - return checkTypedArrayEqual(this.bytes, other.bytes) && this.accessRights == other.accessRights; - } - - /** - * The not-equal operator. - * - * @returns False if `this` and `other` are equal, true otherwise. - */ - @operator("!=") - notEqualsTo(other: URef): bool { - return !this.equalsTo(other); - } -} diff --git a/smart_contracts/contract_as/assembly/utils.ts b/smart_contracts/contract_as/assembly/utils.ts deleted file mode 100644 index 44d69e6e39..0000000000 --- a/smart_contracts/contract_as/assembly/utils.ts +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Encodes an UTF8 string into bytes. - * @param str Input string. - */ -export function encodeUTF8(str: String): Uint8Array { - let utf8Bytes = String.UTF8.encode(str); - return Uint8Array.wrap(utf8Bytes); -} - -/** Converts typed array to array */ -export function typedToArray(arr: Uint8Array): Array { - let result = new Array(arr.length); - for (let i = 0; i < arr.length; i++) { - result[i] = arr[i]; - } - return result; -} - -/** Converts array to typed array */ -export function arrayToTyped(arr: Array): Uint8Array { - let result = new Uint8Array(arr.length); - for (let i = 0; i < arr.length; i++) { - result[i] = arr[i]; - } - return result; -} - -/** Checks if items in two unordered arrays are equal */ -export function checkItemsEqual(a: Array, b: Array): bool { - for (let i = 0; i < a.length; i++) { - const idx = b.indexOf(a[i]); - if (idx == -1) { - return false; - } - b.splice(idx, 1); - } - return b.length === 0; -} - -/** Checks if two ordered arrays are equal */ -export function checkArraysEqual(a: Array, b: Array, len: i32 = 0): bool { - if (!len) { - len = a.length; - if (len != b.length) return false; - if (a === b) return true; - } - for (let i = 0; i < len; i++) { - if (isFloat()) { - if (isNaN(a[i]) && isNaN(b[i])) continue; - } - if (a[i] != b[i]) return false; - } - return true; -} - - -/** Checks if two ordered arrays are equal */ -export function checkTypedArrayEqual(a: Uint8Array, b: Uint8Array, len: i32 = 0): bool { - if (!len) { - len = a.length; - if (len != b.length) return false; - if (a === b) return true; - } - for (let i = 0; i < len; i++) { - if (a[i] != b[i]) return false; - } - return true; -} - diff --git a/smart_contracts/contract_as/build/.gitignore b/smart_contracts/contract_as/build/.gitignore deleted file mode 100644 index 870a503c72..0000000000 --- a/smart_contracts/contract_as/build/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.wasm -*.wasm.map -*.asm.js -*.wat diff --git a/smart_contracts/contract_as/index.js b/smart_contracts/contract_as/index.js deleted file mode 100644 index 7201a972a0..0000000000 --- a/smart_contracts/contract_as/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/optimized.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contract_as/package-lock.json b/smart_contracts/contract_as/package-lock.json deleted file mode 100644 index f895d22031..0000000000 --- a/smart_contracts/contract_as/package-lock.json +++ /dev/null @@ -1,7281 +0,0 @@ -{ - "name": "casper-contract", - "version": "1.0.0", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "@assemblyscript/loader": { - "version": "0.9.4", - "resolved": "https://registry.npmjs.org/@assemblyscript/loader/-/loader-0.9.4.tgz", - "integrity": "sha512-HazVq9zwTVwGmqdwYzu7WyQ6FQVZ7SwET0KKQuKm55jD0IfUpZgN0OPIiZG3zV1iSrVYcN0bdwLRXI/VNCYsUA==", - "dev": true - }, - "@babel/code-frame": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", - "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", - "dev": true, - "requires": { - "@babel/highlight": "^7.10.4" - } - }, - "@babel/helper-validator-identifier": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", - "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==", - "dev": true - }, - "@babel/highlight": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz", - "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==", - "dev": true, - "requires": { - "@babel/helper-validator-identifier": "^7.10.4", - "chalk": "^2.0.0", - "js-tokens": "^4.0.0" - } - }, - "@concordance/react": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@concordance/react/-/react-2.0.0.tgz", - "integrity": "sha512-huLSkUuM2/P+U0uy2WwlKuixMsTODD8p4JVQBI4VKeopkiN0C7M3N9XYVawb4M+4spN5RrO/eLhk7KoQX6nsfA==", - "dev": true, - "requires": { - "arrify": "^1.0.1" - }, - "dependencies": { - "arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", - "dev": true - } - } - }, - "@nodelib/fs.scandir": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz", - "integrity": "sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "2.0.3", - "run-parallel": "^1.1.9" - } - }, - "@nodelib/fs.stat": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz", - "integrity": "sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA==", - "dev": true - }, - "@nodelib/fs.walk": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz", - "integrity": "sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ==", - "dev": true, - "requires": { - "@nodelib/fs.scandir": "2.1.3", - "fastq": "^1.6.0" - } - }, - "@sindresorhus/is": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz", - "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==", - "dev": true - }, - "@szmarczak/http-timer": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz", - "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==", - "dev": true, - "requires": { - "defer-to-connect": "^1.0.1" - } - }, - "@textlint/ast-node-types": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/@textlint/ast-node-types/-/ast-node-types-4.3.4.tgz", - "integrity": "sha512-Grq+vJuNH7HCa278eFeiqJvowrD+onMCoG2ctLyoN+fXYIQGIr1/8fo8AcIg+VM16Kga+N6Y1UWNOWPd8j1nFg==", - "dev": true - }, - "@textlint/markdown-to-ast": { - "version": "6.0.9", - "resolved": "https://registry.npmjs.org/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.9.tgz", - "integrity": "sha512-hfAWBvTeUGh5t5kTn2U3uP3qOSM1BSrxzl1jF3nn0ywfZXpRBZr5yRjXnl4DzIYawCtZOshmRi/tI3/x4TE1jQ==", - "dev": true, - "requires": { - "@textlint/ast-node-types": "^4.0.3", - "debug": "^2.1.3", - "remark-frontmatter": "^1.2.0", - "remark-parse": "^5.0.0", - "structured-source": "^3.0.2", - "traverse": "^0.6.6", - "unified": "^6.1.6" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - } - } - }, - "@types/color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==", - "dev": true - }, - "@types/events": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz", - "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g==", - "dev": true - }, - "@types/glob": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.1.tgz", - "integrity": "sha512-1Bh06cbWJUHMC97acuD6UMG29nMt0Aqz1vF3guLfG+kHHJhy3AyohZFFxYk2f7Q1SQIrNwvncxAE0N/9s70F2w==", - "dev": true, - "requires": { - "@types/events": "*", - "@types/minimatch": "*", - "@types/node": "*" - } - }, - "@types/minimatch": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz", - "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==", - "dev": true - }, - "@types/minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-aaI6OtKcrwCX8G7aWbNh7i8GOfY=", - "dev": true - }, - "@types/node": { - "version": "12.12.19", - "resolved": "https://registry.npmjs.org/@types/node/-/node-12.12.19.tgz", - "integrity": "sha512-OXw80IpKyLeuZ5a8r2XCxVNnRAtS3lRDHBleSUQmbgu3C6eKqRsz7/5XNBU0EvK0RTVfotvYFgvRwwe2jeoiKw==", - "dev": true - }, - "@types/normalize-package-data": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.0.tgz", - "integrity": "sha512-f5j5b/Gf71L+dbqxIpQ4Z2WlmI/mPJ0fOkGGmFgtb6sAu97EPczzbS3/tJKxmcYDj55OX6ssqwDAWOHIYDRDGA==", - "dev": true - }, - "acorn": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.0.1.tgz", - "integrity": "sha512-dmKn4pqZ29iQl2Pvze1zTrps2luvls2PBY//neO2WJ0s10B3AxJXshN+Ph7B4GrhfGhHXrl4dnUwyNNXQcnWGQ==", - "dev": true - }, - "acorn-walk": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.0.0.tgz", - "integrity": "sha512-oZRad/3SMOI/pxbbmqyurIx7jHw1wZDcR9G44L8pUVFEomX/0dH89SrM1KaDXuv1NpzAXz6Op/Xu/Qd5XXzdEA==", - "dev": true - }, - "aggregate-error": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", - "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", - "dev": true, - "requires": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - } - }, - "anchor-markdown-header": { - "version": "0.5.7", - "resolved": "https://registry.npmjs.org/anchor-markdown-header/-/anchor-markdown-header-0.5.7.tgz", - "integrity": "sha1-BFBj125qH5zTJ6V6ASaqD97Dcac=", - "dev": true, - "requires": { - "emoji-regex": "~6.1.0" - }, - "dependencies": { - "emoji-regex": { - "version": "6.1.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-6.1.3.tgz", - "integrity": "sha1-7HmjlpsC0uzytyJUJ5v5m8eoOTI=", - "dev": true - } - } - }, - "ansi-align": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.0.tgz", - "integrity": "sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw==", - "dev": true, - "requires": { - "string-width": "^3.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", - "dev": true - }, - "emoji-regex": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", - "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", - "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", - "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - } - } - } - }, - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "anymatch": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.1.tgz", - "integrity": "sha512-mM8522psRCqzV+6LhomX5wgp25YVibjh8Wj23I5RPkPppSVSjyKD2A2mBJmWGa+KN7f2D6LNh9jkBCeyLktzjg==", - "dev": true, - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - } - }, - "arg": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.2.tgz", - "integrity": "sha512-+ytCkGcBtHZ3V2r2Z06AncYO8jz46UEamcspGoU8lHcEbpn6J77QK0vdWvChsclg/tM5XIJC5tnjmPp7Eq6Obg==", - "dev": true - }, - "argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "dev": true, - "requires": { - "sprintf-js": "~1.0.2" - } - }, - "array-find-index": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/array-find-index/-/array-find-index-1.0.2.tgz", - "integrity": "sha1-3wEKoSh+Fku9pvlyOwqWoexBh6E=", - "dev": true - }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, - "arrgv": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/arrgv/-/arrgv-1.0.2.tgz", - "integrity": "sha512-a4eg4yhp7mmruZDQFqVMlxNRFGi/i1r87pt8SDHy0/I8PqSXoUTlWZRdAZo0VXgvEARcujbtTk8kiZRi1uDGRw==", - "dev": true - }, - "arrify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-2.0.1.tgz", - "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", - "dev": true - }, - "assemblyscript": { - "version": "0.10.0", - "resolved": "https://registry.npmjs.org/assemblyscript/-/assemblyscript-0.10.0.tgz", - "integrity": "sha512-ErUNhHboD+zsB4oG6X1YICDAIo27Gq7LeNX6jVe+Q0W5cI51/fHwC8yJ68IukqvupmZgYPdp1JqqRXlS+BrUfA==", - "dev": true, - "requires": { - "binaryen": "93.0.0-nightly.20200514", - "long": "^4.0.0" - } - }, - "astral-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", - "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", - "dev": true - }, - "at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "dev": true - }, - "ava": { - "version": "3.12.1", - "resolved": "https://registry.npmjs.org/ava/-/ava-3.12.1.tgz", - "integrity": "sha512-cS41+X+UfrcPed+CIgne/YV/6eWxaUjHEPH+W8WvNSqWTWku5YitjZGE5cMHFuJxwHELdR541xTBRn8Uwi4PSw==", - "dev": true, - "requires": { - "@concordance/react": "^2.0.0", - "acorn": "^8.0.1", - "acorn-walk": "^8.0.0", - "ansi-styles": "^4.2.1", - "arrgv": "^1.0.2", - "arrify": "^2.0.1", - "callsites": "^3.1.0", - "chalk": "^4.1.0", - "chokidar": "^3.4.2", - "chunkd": "^2.0.1", - "ci-info": "^2.0.0", - "ci-parallel-vars": "^1.0.1", - "clean-yaml-object": "^0.1.0", - "cli-cursor": "^3.1.0", - "cli-truncate": "^2.1.0", - "code-excerpt": "^3.0.0", - "common-path-prefix": "^3.0.0", - "concordance": "^5.0.1", - "convert-source-map": "^1.7.0", - "currently-unhandled": "^0.4.1", - "debug": "^4.1.1", - "del": "^5.1.0", - "emittery": "^0.7.1", - "equal-length": "^1.0.0", - "figures": "^3.2.0", - "globby": "^11.0.1", - "ignore-by-default": "^2.0.0", - "import-local": "^3.0.2", - "indent-string": "^4.0.0", - "is-error": "^2.2.2", - "is-plain-object": "^4.1.1", - "is-promise": "^4.0.0", - "lodash": "^4.17.20", - "matcher": "^3.0.0", - "md5-hex": "^3.0.1", - "mem": "^6.1.0", - "ms": "^2.1.2", - "ora": "^5.0.0", - "p-map": "^4.0.0", - "picomatch": "^2.2.2", - "pkg-conf": "^3.1.0", - "plur": "^4.0.0", - "pretty-ms": "^7.0.0", - "read-pkg": "^5.2.0", - "resolve-cwd": "^3.0.0", - "slash": "^3.0.0", - "source-map-support": "^0.5.19", - "stack-utils": "^2.0.2", - "strip-ansi": "^6.0.0", - "supertap": "^1.0.0", - "temp-dir": "^2.0.0", - "trim-off-newlines": "^1.0.1", - "update-notifier": "^4.1.1", - "write-file-atomic": "^3.0.3", - "yargs": "^15.4.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "globby": { - "version": "11.0.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz", - "integrity": "sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==", - "dev": true, - "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.1.1", - "ignore": "^5.1.4", - "merge2": "^1.3.0", - "slash": "^3.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "lodash": { - "version": "4.17.20", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz", - "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA==", - "dev": true - }, - "parse-json": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.1.0.tgz", - "integrity": "sha512-+mi/lmVVNKFNVyLXV31ERiy2CY5E1/F6QtJFEzoChPRwwngMNXRDQ9GJ5WdE2Z2P4AujsOi0/+2qHID68KwfIQ==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - } - }, - "picomatch": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", - "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", - "dev": true - }, - "read-pkg": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", - "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", - "dev": true, - "requires": { - "@types/normalize-package-data": "^2.4.0", - "normalize-package-data": "^2.5.0", - "parse-json": "^5.0.0", - "type-fest": "^0.6.0" - } - }, - "source-map-support": { - "version": "0.5.19", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", - "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", - "dev": true, - "requires": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true - } - } - }, - "bail": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz", - "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==", - "dev": true - }, - "balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", - "dev": true - }, - "big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==", - "dev": true - }, - "binary-extensions": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.1.0.tgz", - "integrity": "sha512-1Yj8h9Q+QDF5FzhMs/c9+6UntbD5MkRfRwac8DoEm9ZfUBZ7tZ55YcGVAzEe4bXsdQHEk+s9S5wsOKVdZrw0tQ==", - "dev": true - }, - "binaryen": { - "version": "93.0.0-nightly.20200514", - "resolved": "https://registry.npmjs.org/binaryen/-/binaryen-93.0.0-nightly.20200514.tgz", - "integrity": "sha512-SRRItmNvhRVfoWWbRloO4i8IqkKH8rZ7/0QWRgLpM3umupK8gBpo9MY7Zp3pDysRSp+rVoqxvM5x4tFyCSa9zw==", - "dev": true - }, - "blueimp-md5": { - "version": "2.18.0", - "resolved": "https://registry.npmjs.org/blueimp-md5/-/blueimp-md5-2.18.0.tgz", - "integrity": "sha512-vE52okJvzsVWhcgUHOv+69OG3Mdg151xyn41aVQN/5W5S+S43qZhxECtYLAEHMSFWX6Mv5IZrzj3T5+JqXfj5Q==", - "dev": true - }, - "boundary": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/boundary/-/boundary-1.0.1.tgz", - "integrity": "sha1-TWfcJgLAzBbdm85+v4fpSCkPWBI=", - "dev": true - }, - "boxen": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-4.2.0.tgz", - "integrity": "sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ==", - "dev": true, - "requires": { - "ansi-align": "^3.0.0", - "camelcase": "^5.3.1", - "chalk": "^3.0.0", - "cli-boxes": "^2.2.0", - "string-width": "^4.1.0", - "term-size": "^2.1.0", - "type-fest": "^0.8.1", - "widest-line": "^3.1.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - } - } - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, - "requires": { - "fill-range": "^7.0.1" - } - }, - "buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", - "dev": true - }, - "cacheable-request": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz", - "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==", - "dev": true, - "requires": { - "clone-response": "^1.0.2", - "get-stream": "^5.1.0", - "http-cache-semantics": "^4.0.0", - "keyv": "^3.0.0", - "lowercase-keys": "^2.0.0", - "normalize-url": "^4.1.0", - "responselike": "^1.0.2" - }, - "dependencies": { - "get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "requires": { - "pump": "^3.0.0" - } - }, - "lowercase-keys": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", - "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==", - "dev": true - } - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true - }, - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - }, - "camelcase-keys": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", - "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", - "dev": true, - "requires": { - "camelcase": "^5.3.1", - "map-obj": "^4.0.0", - "quick-lru": "^4.0.1" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "character-entities": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", - "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", - "dev": true - }, - "character-entities-legacy": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", - "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", - "dev": true - }, - "character-reference-invalid": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", - "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", - "dev": true - }, - "chokidar": { - "version": "3.4.2", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.4.2.tgz", - "integrity": "sha512-IZHaDeBeI+sZJRX7lGcXsdzgvZqKv6sECqsbErJA4mHWfpRrD8B97kSFN4cQz6nGBGiuFia1MKR4d6c1o8Cv7A==", - "dev": true, - "requires": { - "anymatch": "~3.1.1", - "braces": "~3.0.2", - "fsevents": "~2.1.2", - "glob-parent": "~5.1.0", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.4.0" - } - }, - "chunkd": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/chunkd/-/chunkd-2.0.1.tgz", - "integrity": "sha512-7d58XsFmOq0j6el67Ug9mHf9ELUXsQXYJBkyxhH/k+6Ke0qXRnv0kbemx+Twc6fRJ07C49lcbdgm9FL1Ei/6SQ==", - "dev": true - }, - "ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", - "dev": true - }, - "ci-parallel-vars": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/ci-parallel-vars/-/ci-parallel-vars-1.0.1.tgz", - "integrity": "sha512-uvzpYrpmidaoxvIQHM+rKSrigjOe9feHYbw4uOI2gdfe1C3xIlxO+kVXq83WQWNniTf8bAxVpy+cQeFQsMERKg==", - "dev": true - }, - "clean-stack": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", - "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", - "dev": true - }, - "clean-yaml-object": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/clean-yaml-object/-/clean-yaml-object-0.1.0.tgz", - "integrity": "sha1-Y/sRDcLOGoTcIfbZM0h20BCui2g=", - "dev": true - }, - "cli-boxes": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz", - "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==", - "dev": true - }, - "cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "requires": { - "restore-cursor": "^3.1.0" - } - }, - "cli-spinners": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.4.0.tgz", - "integrity": "sha512-sJAofoarcm76ZGpuooaO0eDy8saEy+YoZBLjC4h8srt4jeBnkYeOgqxgsJQTpyt2LjI5PTfLJHSL+41Yu4fEJA==", - "dev": true - }, - "cli-truncate": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-2.1.0.tgz", - "integrity": "sha512-n8fOixwDD6b/ObinzTrp1ZKFzbgvKZvuz/TvejnLn1aQfC6r52XEx85FmuC+3HI+JM7coBRXUvNqEU2PHVrHpg==", - "dev": true, - "requires": { - "slice-ansi": "^3.0.0", - "string-width": "^4.2.0" - } - }, - "cliui": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-6.0.0.tgz", - "integrity": "sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ==", - "dev": true, - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^6.2.0" - } - }, - "clone": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", - "integrity": "sha1-2jCcwmPfFZlMaIypAheco8fNfH4=", - "dev": true - }, - "clone-response": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", - "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", - "dev": true, - "requires": { - "mimic-response": "^1.0.0" - } - }, - "code-excerpt": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/code-excerpt/-/code-excerpt-3.0.0.tgz", - "integrity": "sha512-VHNTVhd7KsLGOqfX3SyeO8RyYPMp1GJOg194VITk04WMYCv4plV68YWe6TJZxd9MhobjtpMRnVky01gqZsalaw==", - "dev": true, - "requires": { - "convert-to-spaces": "^1.0.1" - } - }, - "collapse-white-space": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz", - "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==", - "dev": true - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", - "dev": true, - "optional": true - }, - "common-path-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/common-path-prefix/-/common-path-prefix-3.0.0.tgz", - "integrity": "sha512-QE33hToZseCH3jS0qN96O/bSh3kaw/h+Tq7ngyY9eWDUnTlTNUyqfqvCXioLe5Na5jFsL78ra/wuBU4iuEgd4w==", - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true - }, - "concat-md": { - "version": "0.3.5", - "resolved": "https://registry.npmjs.org/concat-md/-/concat-md-0.3.5.tgz", - "integrity": "sha512-JVb5rp3JKFqpc6aapqsjgE8k6fWpDJ9YNBNn1Vyi009B1lWc35/cYmT/Rjec7gI4+giC1azkC5RhdHyI2cD89w==", - "dev": true, - "requires": { - "doctoc": "^1.4.0", - "front-matter": "^3.1.0", - "globby": "^11", - "install": "^0.13.0", - "lodash.startcase": "^4.4.0", - "meow": "^7.0.0", - "npm": "^6.14.5", - "transform-markdown-links": "^2.0.0" - }, - "dependencies": { - "globby": { - "version": "11.0.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.0.1.tgz", - "integrity": "sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ==", - "dev": true, - "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.1.1", - "ignore": "^5.1.4", - "merge2": "^1.3.0", - "slash": "^3.0.0" - } - } - } - }, - "concordance": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/concordance/-/concordance-5.0.1.tgz", - "integrity": "sha512-TbNtInKVElgEBnJ1v2Xg+MFX2lvFLbmlv3EuSC5wTfCwpB8kC3w3mffF6cKuUhkn475Ym1f1I4qmuXzx2+uXpw==", - "dev": true, - "requires": { - "date-time": "^3.1.0", - "esutils": "^2.0.3", - "fast-diff": "^1.2.0", - "js-string-escape": "^1.0.1", - "lodash": "^4.17.15", - "md5-hex": "^3.0.1", - "semver": "^7.3.2", - "well-known-symbols": "^2.0.0" - }, - "dependencies": { - "semver": { - "version": "7.3.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.2.tgz", - "integrity": "sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ==", - "dev": true - } - } - }, - "configstore": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", - "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", - "dev": true, - "requires": { - "dot-prop": "^5.2.0", - "graceful-fs": "^4.1.2", - "make-dir": "^3.0.0", - "unique-string": "^2.0.0", - "write-file-atomic": "^3.0.0", - "xdg-basedir": "^4.0.0" - } - }, - "convert-source-map": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.7.0.tgz", - "integrity": "sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.1" - } - }, - "convert-to-spaces": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/convert-to-spaces/-/convert-to-spaces-1.0.2.tgz", - "integrity": "sha1-fj5Iu+bZl7FBfdyihoIEtNPYVxU=", - "dev": true - }, - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", - "dev": true - }, - "crypto-random-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz", - "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==", - "dev": true - }, - "currently-unhandled": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/currently-unhandled/-/currently-unhandled-0.4.1.tgz", - "integrity": "sha1-mI3zP+qxke95mmE2nddsF635V+o=", - "dev": true, - "requires": { - "array-find-index": "^1.0.1" - } - }, - "date-time": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/date-time/-/date-time-3.1.0.tgz", - "integrity": "sha512-uqCUKXE5q1PNBXjPqvwhwJf9SwMoAHBgWJ6DcrnS5o+W2JOiIILl0JEdVD8SGujrNS02GGxgwAg2PN2zONgtjg==", - "dev": true, - "requires": { - "time-zone": "^1.0.0" - } - }, - "debug": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.2.0.tgz", - "integrity": "sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg==", - "dev": true, - "requires": { - "ms": "2.1.2" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=", - "dev": true - }, - "decamelize-keys": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz", - "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=", - "dev": true, - "requires": { - "decamelize": "^1.1.0", - "map-obj": "^1.0.0" - }, - "dependencies": { - "map-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", - "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=", - "dev": true - } - } - }, - "decompress-response": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", - "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", - "dev": true, - "requires": { - "mimic-response": "^1.0.0" - } - }, - "deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true - }, - "defaults": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.3.tgz", - "integrity": "sha1-xlYFHpgX2f8I7YgUd/P+QBnz730=", - "dev": true, - "requires": { - "clone": "^1.0.2" - } - }, - "defer-to-connect": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz", - "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ==", - "dev": true - }, - "del": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/del/-/del-5.1.0.tgz", - "integrity": "sha512-wH9xOVHnczo9jN2IW68BabcecVPxacIA3g/7z6vhSU/4stOKQzeCRK0yD0A24WiAAUJmmVpWqrERcTxnLo3AnA==", - "dev": true, - "requires": { - "globby": "^10.0.1", - "graceful-fs": "^4.2.2", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.1", - "p-map": "^3.0.0", - "rimraf": "^3.0.0", - "slash": "^3.0.0" - }, - "dependencies": { - "p-map": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-3.0.0.tgz", - "integrity": "sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ==", - "dev": true, - "requires": { - "aggregate-error": "^3.0.0" - } - } - } - }, - "diff": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.1.tgz", - "integrity": "sha512-s2+XdvhPCOF01LRQBC8hf4vhbVmI2CGS5aZnxLJlT5FtdhPCDFq80q++zK2KlrVorVDdL5BOGZ/VfLrVtYNF+Q==", - "dev": true - }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "requires": { - "path-type": "^4.0.0" - } - }, - "doctoc": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/doctoc/-/doctoc-1.4.0.tgz", - "integrity": "sha512-8IAq3KdMkxhXCUF+xdZxdJxwuz8N2j25sMgqiu4U4JWluN9tRKMlAalxGASszQjlZaBprdD2YfXpL3VPWUD4eg==", - "dev": true, - "requires": { - "@textlint/markdown-to-ast": "~6.0.9", - "anchor-markdown-header": "^0.5.5", - "htmlparser2": "~3.9.2", - "minimist": "~1.2.0", - "underscore": "~1.8.3", - "update-section": "^0.3.0" - }, - "dependencies": { - "underscore": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", - "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=", - "dev": true - } - } - }, - "dom-serializer": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", - "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", - "dev": true, - "requires": { - "domelementtype": "^2.0.1", - "entities": "^2.0.0" - }, - "dependencies": { - "domelementtype": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.0.2.tgz", - "integrity": "sha512-wFwTwCVebUrMgGeAwRL/NhZtHAUyT9n9yg4IMDwf10+6iCMxSkVq9MGCVEH+QZWo1nNidy8kNvwmv4zWHDTqvA==", - "dev": true - }, - "entities": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.0.3.tgz", - "integrity": "sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ==", - "dev": true - } - } - }, - "domelementtype": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-1.3.1.tgz", - "integrity": "sha512-BSKB+TSpMpFI/HOxCNr1O8aMOTZ8hT3pM3GQ0w/mWRmkhEDSFJkkyzz4XQsBV44BChwGkrDfMyjVD0eA2aFV3w==", - "dev": true - }, - "domhandler": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-2.4.2.tgz", - "integrity": "sha512-JiK04h0Ht5u/80fdLMCEmV4zkNh2BcoMFBmZ/91WtYZ8qVXSKjiw7fXMgFPnHcSZgOo3XdinHvmnDUeMf5R4wA==", - "dev": true, - "requires": { - "domelementtype": "1" - } - }, - "domutils": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-1.7.0.tgz", - "integrity": "sha512-Lgd2XcJ/NjEw+7tFvfKxOzCYKZsdct5lczQ2ZaQY8Djz7pfAD3Gbp8ySJWtreII/vDlMVmxwa6pHmdxIYgttDg==", - "dev": true, - "requires": { - "dom-serializer": "0", - "domelementtype": "1" - } - }, - "dot-prop": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz", - "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==", - "dev": true, - "requires": { - "is-obj": "^2.0.0" - } - }, - "duplexer3": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", - "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=", - "dev": true - }, - "emittery": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.7.1.tgz", - "integrity": "sha512-d34LN4L6h18Bzz9xpoku2nPwKxCPlPMr3EEKTkoEBi+1/+b0lcRkRJ1UVyyZaKNeqGR3swcGl6s390DNO4YVgQ==", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "emojis-list": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=", - "dev": true - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "requires": { - "once": "^1.4.0" - } - }, - "enhanced-resolve": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-4.1.1.tgz", - "integrity": "sha512-98p2zE+rL7/g/DzMHMTF4zZlCgeVdJ7yr6xzEpJRYwFYrGi9ANdn5DnJURg6RpBkyk60XYDnWIv51VfIhfNGuA==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "memory-fs": "^0.5.0", - "tapable": "^1.0.0" - } - }, - "entities": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/entities/-/entities-1.1.2.tgz", - "integrity": "sha512-f2LZMYl1Fzu7YSBKg+RoROelpOaNrcGmE9AZubeDfrCEia483oW4MI4VyFd5VNHIgQ/7qm1I0wUHK1eJnn2y2w==", - "dev": true - }, - "equal-length": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/equal-length/-/equal-length-1.0.1.tgz", - "integrity": "sha1-IcoRLUirJLTh5//A5TOdMf38J0w=", - "dev": true - }, - "errno": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/errno/-/errno-0.1.7.tgz", - "integrity": "sha512-MfrRBDWzIWifgq6tJj60gkAwtLNb6sQPlcFrSOflcP1aFmmruKQ2wRnze/8V6kgyz7H3FF8Npzv78mZ7XLLflg==", - "dev": true, - "requires": { - "prr": "~1.0.1" - } - }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "requires": { - "is-arrayish": "^0.2.1" - } - }, - "escape-goat": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", - "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==", - "dev": true - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true - }, - "fast-diff": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.2.0.tgz", - "integrity": "sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==", - "dev": true - }, - "fast-glob": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.1.1.tgz", - "integrity": "sha512-nTCREpBY8w8r+boyFYAx21iL6faSsQynliPHM4Uf56SbkyohCNxpVPEH9xrF5TXKy+IsjkPUHDKiUkzBVRXn9g==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.0", - "merge2": "^1.3.0", - "micromatch": "^4.0.2" - } - }, - "fastq": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.6.0.tgz", - "integrity": "sha512-jmxqQ3Z/nXoeyDmWAzF9kH1aGZSis6e/SbfPmJpUnyZ0ogr6iscHQaml4wsEepEWSdtmpy+eVXmCRIMpxaXqOA==", - "dev": true, - "requires": { - "reusify": "^1.0.0" - } - }, - "fault": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", - "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", - "dev": true, - "requires": { - "format": "^0.2.0" - } - }, - "figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "format": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", - "integrity": "sha1-1hcBB+nv3E7TDJ3DkBbflCtctYs=", - "dev": true - }, - "front-matter": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/front-matter/-/front-matter-3.2.1.tgz", - "integrity": "sha512-YUhgEhbL6tG+Ok3vTGIoSDKqcr47aSDvyhEqIv8B+YuBJFsPnOiArNXTPp2yO07NL+a0L4+2jXlKlKqyVcsRRA==", - "dev": true, - "requires": { - "js-yaml": "^3.13.1" - } - }, - "fs-extra": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz", - "integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==", - "dev": true, - "requires": { - "graceful-fs": "^4.2.0", - "jsonfile": "^4.0.0", - "universalify": "^0.1.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true - }, - "fsevents": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.1.3.tgz", - "integrity": "sha512-Auw9a4AxqWpa9GUfj370BMPzzyncfBABW8Mab7BGWBYDj4Isgq+cDKtx0i6u9jcX9pQDnswsaaOTgTmA5pEjuQ==", - "dev": true, - "optional": true - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true - }, - "get-stream": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz", - "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==", - "dev": true, - "requires": { - "pump": "^3.0.0" - } - }, - "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-parent": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.0.tgz", - "integrity": "sha512-qjtRgnIVmOfnKUE3NJAQEdk+lKrxfw8t5ke7SXtfMTHcjsBfOfWXCQfdb30zfDoZQ2IRSIiidmjtbHZPZ++Ihw==", - "dev": true, - "requires": { - "is-glob": "^4.0.1" - } - }, - "global-dirs": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.0.1.tgz", - "integrity": "sha512-5HqUqdhkEovj2Of/ms3IeS/EekcO54ytHRLV4PEY2rhRwrHXLQjeVEES0Lhka0xwNDtGYn58wyC4s5+MHsOO6A==", - "dev": true, - "requires": { - "ini": "^1.3.5" - } - }, - "globby": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/globby/-/globby-10.0.1.tgz", - "integrity": "sha512-sSs4inE1FB2YQiymcmTv6NWENryABjUNPeWhOvmn4SjtKybglsyPZxFB3U1/+L1bYi0rNZDqCLlHyLYDl1Pq5A==", - "dev": true, - "requires": { - "@types/glob": "^7.1.1", - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.0.3", - "glob": "^7.1.3", - "ignore": "^5.1.1", - "merge2": "^1.2.3", - "slash": "^3.0.0" - } - }, - "got": { - "version": "9.6.0", - "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz", - "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==", - "dev": true, - "requires": { - "@sindresorhus/is": "^0.14.0", - "@szmarczak/http-timer": "^1.1.2", - "cacheable-request": "^6.0.0", - "decompress-response": "^3.3.0", - "duplexer3": "^0.1.4", - "get-stream": "^4.1.0", - "lowercase-keys": "^1.0.1", - "mimic-response": "^1.0.1", - "p-cancelable": "^1.0.0", - "to-readable-stream": "^1.0.0", - "url-parse-lax": "^3.0.0" - } - }, - "graceful-fs": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", - "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==", - "dev": true - }, - "handlebars": { - "version": "4.7.6", - "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.6.tgz", - "integrity": "sha512-1f2BACcBfiwAfStCKZNrUCgqNZkGsAT7UM3kkYtXuLo0KnaVfjKOyf7PRzB6++aK9STyT1Pd2ZCPe3EGOXleXA==", - "dev": true, - "requires": { - "minimist": "^1.2.5", - "neo-async": "^2.6.0", - "source-map": "^0.6.1", - "uglify-js": "^3.1.4", - "wordwrap": "^1.0.0" - } - }, - "hard-rejection": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz", - "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==", - "dev": true - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true - }, - "has-yarn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz", - "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==", - "dev": true - }, - "highlight.js": { - "version": "10.6.0", - "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.6.0.tgz", - "integrity": "sha512-8mlRcn5vk/r4+QcqerapwBYTe+iPL5ih6xrNylxrnBdHQiijDETfXX7VIxC3UiCRiINBJfANBAsPzAvRQj8RpQ==", - "dev": true - }, - "hosted-git-info": { - "version": "2.8.5", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.5.tgz", - "integrity": "sha512-kssjab8CvdXfcXMXVcvsXum4Hwdq9XGtRD3TteMEvEbq0LXyiNQr6AprqKqfeaDXze7SxWvRxdpwE6ku7ikLkg==", - "dev": true - }, - "htmlparser2": { - "version": "3.9.2", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-3.9.2.tgz", - "integrity": "sha1-G9+HrMoPP55T+k/M6w9LTLsAszg=", - "dev": true, - "requires": { - "domelementtype": "^1.3.0", - "domhandler": "^2.3.0", - "domutils": "^1.5.1", - "entities": "^1.1.1", - "inherits": "^2.0.1", - "readable-stream": "^2.0.2" - } - }, - "http-cache-semantics": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", - "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==", - "dev": true - }, - "ignore": { - "version": "5.1.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.4.tgz", - "integrity": "sha512-MzbUSahkTW1u7JpKKjY7LCARd1fU5W2rLdxlM4kdkayuCwZImjkpluF9CM1aLewYJguPDqewLam18Y6AU69A8A==", - "dev": true - }, - "ignore-by-default": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ignore-by-default/-/ignore-by-default-2.0.0.tgz", - "integrity": "sha512-+mQSgMRiFD3L3AOxLYOCxjIq4OnAmo5CIuC+lj5ehCJcPtV++QacEV7FdpzvYxH6DaOySWzQU6RR0lPLy37ckA==", - "dev": true - }, - "import-lazy": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz", - "integrity": "sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM=", - "dev": true - }, - "import-local": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.0.2.tgz", - "integrity": "sha512-vjL3+w0oulAVZ0hBHnxa/Nm5TAurf9YLQJDhqRZyqb+VKGOB6LU8t9H1Nr5CIo16vh9XfJTOoHwU0B71S557gA==", - "dev": true, - "requires": { - "pkg-dir": "^4.2.0", - "resolve-cwd": "^3.0.0" - } - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", - "dev": true - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "dev": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dev": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true - }, - "ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==", - "dev": true - }, - "install": { - "version": "0.13.0", - "resolved": "https://registry.npmjs.org/install/-/install-0.13.0.tgz", - "integrity": "sha512-zDml/jzr2PKU9I8J/xyZBQn8rPCAY//UOYNmR01XwNwyfhEWObo2SWfSl1+0tm1u6PhxLwDnfsT/6jB7OUxqFA==", - "dev": true - }, - "interpret": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", - "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", - "dev": true - }, - "irregular-plurals": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/irregular-plurals/-/irregular-plurals-3.2.0.tgz", - "integrity": "sha512-YqTdPLfwP7YFN0SsD3QUVCkm9ZG2VzOXv3DOrw5G5mkMbVwptTwVcFv7/C0vOpBmgTxAeTG19XpUs1E522LW9Q==", - "dev": true - }, - "is-alphabetical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", - "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", - "dev": true - }, - "is-alphanumerical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", - "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", - "dev": true, - "requires": { - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0" - } - }, - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=", - "dev": true - }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "requires": { - "binary-extensions": "^2.0.0" - } - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", - "dev": true - }, - "is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dev": true, - "requires": { - "ci-info": "^2.0.0" - } - }, - "is-decimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", - "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", - "dev": true - }, - "is-error": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-error/-/is-error-2.2.2.tgz", - "integrity": "sha512-IOQqts/aHWbiisY5DuPJQ0gcbvaLFCa7fBa9xoLfxBZvQ+ZI/Zh9xoI7Gk+G64N0FdK4AbibytHht2tWgpJWLg==", - "dev": true - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "is-glob": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz", - "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==", - "dev": true, - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-hexadecimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", - "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", - "dev": true - }, - "is-installed-globally": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", - "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", - "dev": true, - "requires": { - "global-dirs": "^2.0.1", - "is-path-inside": "^3.0.1" - } - }, - "is-interactive": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", - "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", - "dev": true - }, - "is-npm": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-4.0.0.tgz", - "integrity": "sha512-96ECIfh9xtDDlPylNPXhzjsykHsMJZ18ASpaWzQyBr4YRTcVjUvzaHayDAES2oU/3KpljhHUjtSRNiDwi0F0ig==", - "dev": true - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true - }, - "is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", - "dev": true - }, - "is-path-cwd": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz", - "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==", - "dev": true - }, - "is-path-inside": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.2.tgz", - "integrity": "sha512-/2UGPSgmtqwo1ktx8NDHjuPwZWmHhO+gj0f93EkhLB5RgW9RZevWYYlIkS6zePc6U2WpOdQYIwHe9YC4DWEBVg==", - "dev": true - }, - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=", - "dev": true - }, - "is-plain-object": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-4.1.1.tgz", - "integrity": "sha512-5Aw8LLVsDlZsETVMhoMXzqsXwQqr/0vlnBYzIXJbYo2F4yYlhLHs+Ez7Bod7IIQKWkJbJfxrWD7pA1Dw1TKrwA==", - "dev": true - }, - "is-promise": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", - "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", - "dev": true - }, - "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", - "dev": true - }, - "is-whitespace-character": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz", - "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==", - "dev": true - }, - "is-word-character": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz", - "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==", - "dev": true - }, - "is-yarn-global": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz", - "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw==", - "dev": true - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "js-string-escape": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/js-string-escape/-/js-string-escape-1.0.1.tgz", - "integrity": "sha1-4mJbrbwNZ8dTPp7cEGjFh65BN+8=", - "dev": true - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "dev": true - }, - "js-yaml": { - "version": "3.13.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz", - "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==", - "dev": true, - "requires": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - } - }, - "json-buffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", - "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=", - "dev": true - }, - "json-parse-better-errors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", - "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==", - "dev": true - }, - "json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true - }, - "json5": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", - "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", - "dev": true, - "requires": { - "minimist": "^1.2.0" - } - }, - "jsonfile": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", - "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", - "dev": true, - "requires": { - "graceful-fs": "^4.1.6" - } - }, - "keyv": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz", - "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==", - "dev": true, - "requires": { - "json-buffer": "3.0.0" - } - }, - "kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true - }, - "latest-version": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz", - "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==", - "dev": true, - "requires": { - "package-json": "^6.3.0" - } - }, - "lines-and-columns": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.1.6.tgz", - "integrity": "sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA=", - "dev": true - }, - "loader-utils": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.2.3.tgz", - "integrity": "sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==", - "dev": true, - "requires": { - "big.js": "^5.2.2", - "emojis-list": "^2.0.0", - "json5": "^1.0.1" - } - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "requires": { - "p-locate": "^4.1.0" - } - }, - "lodash": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", - "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==", - "dev": true - }, - "lodash.startcase": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/lodash.startcase/-/lodash.startcase-4.4.0.tgz", - "integrity": "sha1-lDbjTtJgk+1/+uGTYUQ1CRXZrdg=", - "dev": true - }, - "log-symbols": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.0.0.tgz", - "integrity": "sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA==", - "dev": true, - "requires": { - "chalk": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "long": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz", - "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==", - "dev": true - }, - "lowercase-keys": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", - "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==", - "dev": true - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "lunr": { - "version": "2.3.9", - "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", - "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==", - "dev": true - }, - "make-dir": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz", - "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==", - "dev": true, - "requires": { - "semver": "^6.0.0" - } - }, - "make-error": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz", - "integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==", - "dev": true - }, - "map-age-cleaner": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", - "integrity": "sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w==", - "dev": true, - "requires": { - "p-defer": "^1.0.0" - } - }, - "map-obj": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.1.0.tgz", - "integrity": "sha512-glc9y00wgtwcDmp7GaE/0b0OnxpNJsVf3ael/An6Fe2Q51LLwN1er6sdomLRzz5h0+yMpiYLhWYF5R7HeqVd4g==", - "dev": true - }, - "markdown-escapes": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz", - "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==", - "dev": true - }, - "marked": { - "version": "1.2.9", - "resolved": "https://registry.npmjs.org/marked/-/marked-1.2.9.tgz", - "integrity": "sha512-H8lIX2SvyitGX+TRdtS06m1jHMijKN/XjfH6Ooii9fvxMlh8QdqBfBDkGUpMWH2kQNrtixjzYUa3SH8ROTgRRw==", - "dev": true - }, - "matcher": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz", - "integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==", - "dev": true, - "requires": { - "escape-string-regexp": "^4.0.0" - }, - "dependencies": { - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true - } - } - }, - "md5-hex": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/md5-hex/-/md5-hex-3.0.1.tgz", - "integrity": "sha512-BUiRtTtV39LIJwinWBjqVsU9xhdnz7/i889V859IBFpuqGAj6LuOvHv5XLbgZ2R7ptJoJaEcxkv88/h25T7Ciw==", - "dev": true, - "requires": { - "blueimp-md5": "^2.10.0" - } - }, - "mem": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/mem/-/mem-6.1.1.tgz", - "integrity": "sha512-Ci6bIfq/UgcxPTYa8dQQ5FY3BzKkT894bwXWXxC/zqs0XgMO2cT20CGkOqda7gZNkmK5VP4x89IGZ6K7hfbn3Q==", - "dev": true, - "requires": { - "map-age-cleaner": "^0.1.3", - "mimic-fn": "^3.0.0" - }, - "dependencies": { - "mimic-fn": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-3.1.0.tgz", - "integrity": "sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ==", - "dev": true - } - } - }, - "memory-fs": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/memory-fs/-/memory-fs-0.5.0.tgz", - "integrity": "sha512-jA0rdU5KoQMC0e6ppoNRtpp6vjFq6+NY7r8hywnC7V+1Xj/MtHwGIbB1QaK/dunyjWteJzmkpd7ooeWg10T7GA==", - "dev": true, - "requires": { - "errno": "^0.1.3", - "readable-stream": "^2.0.1" - } - }, - "meow": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/meow/-/meow-7.1.1.tgz", - "integrity": "sha512-GWHvA5QOcS412WCo8vwKDlTelGLsCGBVevQB5Kva961rmNfun0PCbv5+xta2kUMFJyR8/oWnn7ddeKdosbAPbA==", - "dev": true, - "requires": { - "@types/minimist": "^1.2.0", - "camelcase-keys": "^6.2.2", - "decamelize-keys": "^1.1.0", - "hard-rejection": "^2.1.0", - "minimist-options": "4.1.0", - "normalize-package-data": "^2.5.0", - "read-pkg-up": "^7.0.1", - "redent": "^3.0.0", - "trim-newlines": "^3.0.0", - "type-fest": "^0.13.1", - "yargs-parser": "^18.1.3" - }, - "dependencies": { - "type-fest": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz", - "integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==", - "dev": true - } - } - }, - "merge2": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.3.0.tgz", - "integrity": "sha512-2j4DAdlBOkiSZIsaXk4mTE3sRS02yBHAtfy127xRV3bQUFqXkjHCHLW6Scv7DwNRbIWNHH8zpnz9zMaKXIdvYw==", - "dev": true - }, - "micromatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz", - "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==", - "dev": true, - "requires": { - "braces": "^3.0.1", - "picomatch": "^2.0.5" - } - }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true - }, - "mimic-response": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", - "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==", - "dev": true - }, - "min-indent": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", - "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", - "dev": true - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true - }, - "minimist-options": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", - "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==", - "dev": true, - "requires": { - "arrify": "^1.0.1", - "is-plain-obj": "^1.1.0", - "kind-of": "^6.0.3" - }, - "dependencies": { - "arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", - "dev": true - } - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "mute-stream": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", - "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", - "dev": true - }, - "neo-async": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.1.tgz", - "integrity": "sha512-iyam8fBuCUpWeKPGpaNMetEocMt364qkCsfL9JuhjXX6dRnguRVOfk2GZaDpPjcOKiiXCPINZC1GczQ7iTq3Zw==", - "dev": true - }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true - } - } - }, - "normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", - "dev": true - }, - "normalize-url": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.0.tgz", - "integrity": "sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ==", - "dev": true - }, - "npm": { - "version": "6.14.8", - "resolved": "https://registry.npmjs.org/npm/-/npm-6.14.8.tgz", - "integrity": "sha512-HBZVBMYs5blsj94GTeQZel7s9odVuuSUHy1+AlZh7rPVux1os2ashvEGLy/STNK7vUjbrCg5Kq9/GXisJgdf6A==", - "dev": true, - "requires": { - "JSONStream": "^1.3.5", - "abbrev": "~1.1.1", - "ansicolors": "~0.3.2", - "ansistyles": "~0.1.3", - "aproba": "^2.0.0", - "archy": "~1.0.0", - "bin-links": "^1.1.8", - "bluebird": "^3.5.5", - "byte-size": "^5.0.1", - "cacache": "^12.0.3", - "call-limit": "^1.1.1", - "chownr": "^1.1.4", - "ci-info": "^2.0.0", - "cli-columns": "^3.1.2", - "cli-table3": "^0.5.1", - "cmd-shim": "^3.0.3", - "columnify": "~1.5.4", - "config-chain": "^1.1.12", - "debuglog": "*", - "detect-indent": "~5.0.0", - "detect-newline": "^2.1.0", - "dezalgo": "~1.0.3", - "editor": "~1.0.0", - "figgy-pudding": "^3.5.1", - "find-npm-prefix": "^1.0.2", - "fs-vacuum": "~1.2.10", - "fs-write-stream-atomic": "~1.0.10", - "gentle-fs": "^2.3.1", - "glob": "^7.1.6", - "graceful-fs": "^4.2.4", - "has-unicode": "~2.0.1", - "hosted-git-info": "^2.8.8", - "iferr": "^1.0.2", - "imurmurhash": "*", - "infer-owner": "^1.0.4", - "inflight": "~1.0.6", - "inherits": "^2.0.4", - "ini": "^1.3.5", - "init-package-json": "^1.10.3", - "is-cidr": "^3.0.0", - "json-parse-better-errors": "^1.0.2", - "lazy-property": "~1.0.0", - "libcipm": "^4.0.8", - "libnpm": "^3.0.1", - "libnpmaccess": "^3.0.2", - "libnpmhook": "^5.0.3", - "libnpmorg": "^1.0.1", - "libnpmsearch": "^2.0.2", - "libnpmteam": "^1.0.2", - "libnpx": "^10.2.4", - "lock-verify": "^2.1.0", - "lockfile": "^1.0.4", - "lodash._baseindexof": "*", - "lodash._baseuniq": "~4.6.0", - "lodash._bindcallback": "*", - "lodash._cacheindexof": "*", - "lodash._createcache": "*", - "lodash._getnative": "*", - "lodash.clonedeep": "~4.5.0", - "lodash.restparam": "*", - "lodash.union": "~4.6.0", - "lodash.uniq": "~4.5.0", - "lodash.without": "~4.4.0", - "lru-cache": "^5.1.1", - "meant": "^1.0.2", - "mississippi": "^3.0.0", - "mkdirp": "^0.5.5", - "move-concurrently": "^1.0.1", - "node-gyp": "^5.1.0", - "nopt": "^4.0.3", - "normalize-package-data": "^2.5.0", - "npm-audit-report": "^1.3.3", - "npm-cache-filename": "~1.0.2", - "npm-install-checks": "^3.0.2", - "npm-lifecycle": "^3.1.5", - "npm-package-arg": "^6.1.1", - "npm-packlist": "^1.4.8", - "npm-pick-manifest": "^3.0.2", - "npm-profile": "^4.0.4", - "npm-registry-fetch": "^4.0.7", - "npm-user-validate": "~1.0.0", - "npmlog": "~4.1.2", - "once": "~1.4.0", - "opener": "^1.5.1", - "osenv": "^0.1.5", - "pacote": "^9.5.12", - "path-is-inside": "~1.0.2", - "promise-inflight": "~1.0.1", - "qrcode-terminal": "^0.12.0", - "query-string": "^6.8.2", - "qw": "~1.0.1", - "read": "~1.0.7", - "read-cmd-shim": "^1.0.5", - "read-installed": "~4.0.3", - "read-package-json": "^2.1.1", - "read-package-tree": "^5.3.1", - "readable-stream": "^3.6.0", - "readdir-scoped-modules": "^1.1.0", - "request": "^2.88.0", - "retry": "^0.12.0", - "rimraf": "^2.7.1", - "safe-buffer": "^5.1.2", - "semver": "^5.7.1", - "sha": "^3.0.0", - "slide": "~1.1.6", - "sorted-object": "~2.0.1", - "sorted-union-stream": "~2.1.3", - "ssri": "^6.0.1", - "stringify-package": "^1.0.1", - "tar": "^4.4.13", - "text-table": "~0.2.0", - "tiny-relative-date": "^1.3.0", - "uid-number": "0.0.6", - "umask": "~1.1.0", - "unique-filename": "^1.1.1", - "unpipe": "~1.0.0", - "update-notifier": "^2.5.0", - "uuid": "^3.3.3", - "validate-npm-package-license": "^3.0.4", - "validate-npm-package-name": "~3.0.0", - "which": "^1.3.1", - "worker-farm": "^1.7.0", - "write-file-atomic": "^2.4.3" - }, - "dependencies": { - "JSONStream": { - "version": "1.3.5", - "bundled": true, - "dev": true, - "requires": { - "jsonparse": "^1.2.0", - "through": ">=2.2.7 <3" - } - }, - "abbrev": { - "version": "1.1.1", - "bundled": true, - "dev": true - }, - "agent-base": { - "version": "4.3.0", - "bundled": true, - "dev": true, - "requires": { - "es6-promisify": "^5.0.0" - } - }, - "agentkeepalive": { - "version": "3.5.2", - "bundled": true, - "dev": true, - "requires": { - "humanize-ms": "^1.2.1" - } - }, - "ajv": { - "version": "5.5.2", - "bundled": true, - "dev": true, - "requires": { - "co": "^4.6.0", - "fast-deep-equal": "^1.0.0", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.3.0" - } - }, - "ansi-align": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^2.0.0" - } - }, - "ansi-regex": { - "version": "2.1.1", - "bundled": true, - "dev": true - }, - "ansi-styles": { - "version": "3.2.1", - "bundled": true, - "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "ansicolors": { - "version": "0.3.2", - "bundled": true, - "dev": true - }, - "ansistyles": { - "version": "0.1.3", - "bundled": true, - "dev": true - }, - "aproba": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "archy": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "are-we-there-yet": { - "version": "1.1.4", - "bundled": true, - "dev": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^2.0.6" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "asap": { - "version": "2.0.6", - "bundled": true, - "dev": true - }, - "asn1": { - "version": "0.2.4", - "bundled": true, - "dev": true, - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "assert-plus": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "asynckit": { - "version": "0.4.0", - "bundled": true, - "dev": true - }, - "aws-sign2": { - "version": "0.7.0", - "bundled": true, - "dev": true - }, - "aws4": { - "version": "1.8.0", - "bundled": true, - "dev": true - }, - "balanced-match": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "tweetnacl": "^0.14.3" - } - }, - "bin-links": { - "version": "1.1.8", - "bundled": true, - "dev": true, - "requires": { - "bluebird": "^3.5.3", - "cmd-shim": "^3.0.0", - "gentle-fs": "^2.3.0", - "graceful-fs": "^4.1.15", - "npm-normalize-package-bin": "^1.0.0", - "write-file-atomic": "^2.3.0" - } - }, - "bluebird": { - "version": "3.5.5", - "bundled": true, - "dev": true - }, - "boxen": { - "version": "1.3.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-align": "^2.0.0", - "camelcase": "^4.0.0", - "chalk": "^2.0.1", - "cli-boxes": "^1.0.0", - "string-width": "^2.0.0", - "term-size": "^1.2.0", - "widest-line": "^2.0.0" - } - }, - "brace-expansion": { - "version": "1.1.11", - "bundled": true, - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "buffer-from": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "builtins": { - "version": "1.0.3", - "bundled": true, - "dev": true - }, - "byline": { - "version": "5.0.0", - "bundled": true, - "dev": true - }, - "byte-size": { - "version": "5.0.1", - "bundled": true, - "dev": true - }, - "cacache": { - "version": "12.0.3", - "bundled": true, - "dev": true, - "requires": { - "bluebird": "^3.5.5", - "chownr": "^1.1.1", - "figgy-pudding": "^3.5.1", - "glob": "^7.1.4", - "graceful-fs": "^4.1.15", - "infer-owner": "^1.0.3", - "lru-cache": "^5.1.1", - "mississippi": "^3.0.0", - "mkdirp": "^0.5.1", - "move-concurrently": "^1.0.1", - "promise-inflight": "^1.0.1", - "rimraf": "^2.6.3", - "ssri": "^6.0.1", - "unique-filename": "^1.1.1", - "y18n": "^4.0.0" - } - }, - "call-limit": { - "version": "1.1.1", - "bundled": true, - "dev": true - }, - "camelcase": { - "version": "4.1.0", - "bundled": true, - "dev": true - }, - "capture-stack-trace": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "caseless": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, - "chalk": { - "version": "2.4.1", - "bundled": true, - "dev": true, - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "chownr": { - "version": "1.1.4", - "bundled": true, - "dev": true - }, - "ci-info": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "cidr-regex": { - "version": "2.0.10", - "bundled": true, - "dev": true, - "requires": { - "ip-regex": "^2.1.0" - } - }, - "cli-boxes": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "cli-columns": { - "version": "3.1.2", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^2.0.0", - "strip-ansi": "^3.0.1" - } - }, - "cli-table3": { - "version": "0.5.1", - "bundled": true, - "dev": true, - "requires": { - "colors": "^1.1.2", - "object-assign": "^4.1.0", - "string-width": "^2.1.1" - } - }, - "cliui": { - "version": "5.0.0", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^3.1.0", - "strip-ansi": "^5.2.0", - "wrap-ansi": "^5.1.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "bundled": true, - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - } - } - } - }, - "clone": { - "version": "1.0.4", - "bundled": true, - "dev": true - }, - "cmd-shim": { - "version": "3.0.3", - "bundled": true, - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "mkdirp": "~0.5.0" - } - }, - "co": { - "version": "4.6.0", - "bundled": true, - "dev": true - }, - "code-point-at": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "color-convert": { - "version": "1.9.1", - "bundled": true, - "dev": true, - "requires": { - "color-name": "^1.1.1" - } - }, - "color-name": { - "version": "1.1.3", - "bundled": true, - "dev": true - }, - "colors": { - "version": "1.3.3", - "bundled": true, - "dev": true, - "optional": true - }, - "columnify": { - "version": "1.5.4", - "bundled": true, - "dev": true, - "requires": { - "strip-ansi": "^3.0.0", - "wcwidth": "^1.0.0" - } - }, - "combined-stream": { - "version": "1.0.6", - "bundled": true, - "dev": true, - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "concat-map": { - "version": "0.0.1", - "bundled": true, - "dev": true - }, - "concat-stream": { - "version": "1.6.2", - "bundled": true, - "dev": true, - "requires": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "config-chain": { - "version": "1.1.12", - "bundled": true, - "dev": true, - "requires": { - "ini": "^1.3.4", - "proto-list": "~1.2.1" - } - }, - "configstore": { - "version": "3.1.5", - "bundled": true, - "dev": true, - "requires": { - "dot-prop": "^4.2.1", - "graceful-fs": "^4.1.2", - "make-dir": "^1.0.0", - "unique-string": "^1.0.0", - "write-file-atomic": "^2.0.0", - "xdg-basedir": "^3.0.0" - } - }, - "console-control-strings": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "copy-concurrently": { - "version": "1.0.5", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.1.1", - "fs-write-stream-atomic": "^1.0.8", - "iferr": "^0.1.5", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.0" - }, - "dependencies": { - "aproba": { - "version": "1.2.0", - "bundled": true, - "dev": true - }, - "iferr": { - "version": "0.1.5", - "bundled": true, - "dev": true - } - } - }, - "core-util-is": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "create-error-class": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "capture-stack-trace": "^1.0.0" - } - }, - "cross-spawn": { - "version": "5.1.0", - "bundled": true, - "dev": true, - "requires": { - "lru-cache": "^4.0.1", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - }, - "dependencies": { - "lru-cache": { - "version": "4.1.5", - "bundled": true, - "dev": true, - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "yallist": { - "version": "2.1.2", - "bundled": true, - "dev": true - } - } - }, - "crypto-random-string": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "cyclist": { - "version": "0.2.2", - "bundled": true, - "dev": true - }, - "dashdash": { - "version": "1.14.1", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "^1.0.0" - } - }, - "debug": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "ms": "2.0.0" - }, - "dependencies": { - "ms": { - "version": "2.0.0", - "bundled": true, - "dev": true - } - } - }, - "debuglog": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "decamelize": { - "version": "1.2.0", - "bundled": true, - "dev": true - }, - "decode-uri-component": { - "version": "0.2.0", - "bundled": true, - "dev": true - }, - "deep-extend": { - "version": "0.6.0", - "bundled": true, - "dev": true - }, - "defaults": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "clone": "^1.0.2" - } - }, - "define-properties": { - "version": "1.1.3", - "bundled": true, - "dev": true, - "requires": { - "object-keys": "^1.0.12" - } - }, - "delayed-stream": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "delegates": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "detect-indent": { - "version": "5.0.0", - "bundled": true, - "dev": true - }, - "detect-newline": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, - "dezalgo": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "asap": "^2.0.0", - "wrappy": "1" - } - }, - "dot-prop": { - "version": "4.2.1", - "bundled": true, - "dev": true, - "requires": { - "is-obj": "^1.0.0" - } - }, - "dotenv": { - "version": "5.0.1", - "bundled": true, - "dev": true - }, - "duplexer3": { - "version": "0.1.4", - "bundled": true, - "dev": true - }, - "duplexify": { - "version": "3.6.0", - "bundled": true, - "dev": true, - "requires": { - "end-of-stream": "^1.0.0", - "inherits": "^2.0.1", - "readable-stream": "^2.0.0", - "stream-shift": "^1.0.0" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "ecc-jsbn": { - "version": "0.1.2", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "editor": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "emoji-regex": { - "version": "7.0.3", - "bundled": true, - "dev": true - }, - "encoding": { - "version": "0.1.12", - "bundled": true, - "dev": true, - "requires": { - "iconv-lite": "~0.4.13" - } - }, - "end-of-stream": { - "version": "1.4.1", - "bundled": true, - "dev": true, - "requires": { - "once": "^1.4.0" - } - }, - "env-paths": { - "version": "2.2.0", - "bundled": true, - "dev": true - }, - "err-code": { - "version": "1.1.2", - "bundled": true, - "dev": true - }, - "errno": { - "version": "0.1.7", - "bundled": true, - "dev": true, - "requires": { - "prr": "~1.0.1" - } - }, - "es-abstract": { - "version": "1.12.0", - "bundled": true, - "dev": true, - "requires": { - "es-to-primitive": "^1.1.1", - "function-bind": "^1.1.1", - "has": "^1.0.1", - "is-callable": "^1.1.3", - "is-regex": "^1.0.4" - } - }, - "es-to-primitive": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "es6-promise": { - "version": "4.2.8", - "bundled": true, - "dev": true - }, - "es6-promisify": { - "version": "5.0.0", - "bundled": true, - "dev": true, - "requires": { - "es6-promise": "^4.0.3" - } - }, - "escape-string-regexp": { - "version": "1.0.5", - "bundled": true, - "dev": true - }, - "execa": { - "version": "0.7.0", - "bundled": true, - "dev": true, - "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "bundled": true, - "dev": true - } - } - }, - "extend": { - "version": "3.0.2", - "bundled": true, - "dev": true - }, - "extsprintf": { - "version": "1.3.0", - "bundled": true, - "dev": true - }, - "fast-deep-equal": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "fast-json-stable-stringify": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "figgy-pudding": { - "version": "3.5.1", - "bundled": true, - "dev": true - }, - "find-npm-prefix": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "flush-write-stream": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.4" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "forever-agent": { - "version": "0.6.1", - "bundled": true, - "dev": true - }, - "form-data": { - "version": "2.3.2", - "bundled": true, - "dev": true, - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "1.0.6", - "mime-types": "^2.1.12" - } - }, - "from2": { - "version": "2.3.0", - "bundled": true, - "dev": true, - "requires": { - "inherits": "^2.0.1", - "readable-stream": "^2.0.0" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "fs-minipass": { - "version": "1.2.7", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^2.6.0" - }, - "dependencies": { - "minipass": { - "version": "2.9.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.1.2", - "yallist": "^3.0.0" - } - } - } - }, - "fs-vacuum": { - "version": "1.2.10", - "bundled": true, - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "path-is-inside": "^1.0.1", - "rimraf": "^2.5.2" - } - }, - "fs-write-stream-atomic": { - "version": "1.0.10", - "bundled": true, - "dev": true, - "requires": { - "graceful-fs": "^4.1.2", - "iferr": "^0.1.5", - "imurmurhash": "^0.1.4", - "readable-stream": "1 || 2" - }, - "dependencies": { - "iferr": { - "version": "0.1.5", - "bundled": true, - "dev": true - }, - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "fs.realpath": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "function-bind": { - "version": "1.1.1", - "bundled": true, - "dev": true - }, - "gauge": { - "version": "2.7.4", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.0.3", - "console-control-strings": "^1.0.0", - "has-unicode": "^2.0.0", - "object-assign": "^4.1.0", - "signal-exit": "^3.0.0", - "string-width": "^1.0.1", - "strip-ansi": "^3.0.1", - "wide-align": "^1.1.0" - }, - "dependencies": { - "aproba": { - "version": "1.2.0", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - } - } - }, - "genfun": { - "version": "5.0.0", - "bundled": true, - "dev": true - }, - "gentle-fs": { - "version": "2.3.1", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.1.2", - "chownr": "^1.1.2", - "cmd-shim": "^3.0.3", - "fs-vacuum": "^1.2.10", - "graceful-fs": "^4.1.11", - "iferr": "^0.1.5", - "infer-owner": "^1.0.4", - "mkdirp": "^0.5.1", - "path-is-inside": "^1.0.2", - "read-cmd-shim": "^1.0.1", - "slide": "^1.1.6" - }, - "dependencies": { - "aproba": { - "version": "1.2.0", - "bundled": true, - "dev": true - }, - "iferr": { - "version": "0.1.5", - "bundled": true, - "dev": true - } - } - }, - "get-caller-file": { - "version": "2.0.5", - "bundled": true, - "dev": true - }, - "get-stream": { - "version": "4.1.0", - "bundled": true, - "dev": true, - "requires": { - "pump": "^3.0.0" - } - }, - "getpass": { - "version": "0.1.7", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "^1.0.0" - } - }, - "glob": { - "version": "7.1.6", - "bundled": true, - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "global-dirs": { - "version": "0.1.1", - "bundled": true, - "dev": true, - "requires": { - "ini": "^1.3.4" - } - }, - "got": { - "version": "6.7.1", - "bundled": true, - "dev": true, - "requires": { - "create-error-class": "^3.0.0", - "duplexer3": "^0.1.4", - "get-stream": "^3.0.0", - "is-redirect": "^1.0.0", - "is-retry-allowed": "^1.0.0", - "is-stream": "^1.0.0", - "lowercase-keys": "^1.0.0", - "safe-buffer": "^5.0.1", - "timed-out": "^4.0.0", - "unzip-response": "^2.0.1", - "url-parse-lax": "^1.0.0" - }, - "dependencies": { - "get-stream": { - "version": "3.0.0", - "bundled": true, - "dev": true - } - } - }, - "graceful-fs": { - "version": "4.2.4", - "bundled": true, - "dev": true - }, - "har-schema": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "har-validator": { - "version": "5.1.0", - "bundled": true, - "dev": true, - "requires": { - "ajv": "^5.3.0", - "har-schema": "^2.0.0" - } - }, - "has": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-flag": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "has-symbols": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "has-unicode": { - "version": "2.0.1", - "bundled": true, - "dev": true - }, - "hosted-git-info": { - "version": "2.8.8", - "bundled": true, - "dev": true - }, - "http-cache-semantics": { - "version": "3.8.1", - "bundled": true, - "dev": true - }, - "http-proxy-agent": { - "version": "2.1.0", - "bundled": true, - "dev": true, - "requires": { - "agent-base": "4", - "debug": "3.1.0" - } - }, - "http-signature": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - } - }, - "https-proxy-agent": { - "version": "2.2.4", - "bundled": true, - "dev": true, - "requires": { - "agent-base": "^4.3.0", - "debug": "^3.1.0" - } - }, - "humanize-ms": { - "version": "1.2.1", - "bundled": true, - "dev": true, - "requires": { - "ms": "^2.0.0" - } - }, - "iconv-lite": { - "version": "0.4.23", - "bundled": true, - "dev": true, - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "iferr": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "ignore-walk": { - "version": "3.0.3", - "bundled": true, - "dev": true, - "requires": { - "minimatch": "^3.0.4" - } - }, - "import-lazy": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, - "imurmurhash": { - "version": "0.1.4", - "bundled": true, - "dev": true - }, - "infer-owner": { - "version": "1.0.4", - "bundled": true, - "dev": true - }, - "inflight": { - "version": "1.0.6", - "bundled": true, - "dev": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "bundled": true, - "dev": true - }, - "init-package-json": { - "version": "1.10.3", - "bundled": true, - "dev": true, - "requires": { - "glob": "^7.1.1", - "npm-package-arg": "^4.0.0 || ^5.0.0 || ^6.0.0", - "promzard": "^0.3.0", - "read": "~1.0.1", - "read-package-json": "1 || 2", - "semver": "2.x || 3.x || 4 || 5", - "validate-npm-package-license": "^3.0.1", - "validate-npm-package-name": "^3.0.0" - } - }, - "ip": { - "version": "1.1.5", - "bundled": true, - "dev": true - }, - "ip-regex": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, - "is-callable": { - "version": "1.1.4", - "bundled": true, - "dev": true - }, - "is-ci": { - "version": "1.2.1", - "bundled": true, - "dev": true, - "requires": { - "ci-info": "^1.5.0" - }, - "dependencies": { - "ci-info": { - "version": "1.6.0", - "bundled": true, - "dev": true - } - } - }, - "is-cidr": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "cidr-regex": "^2.0.10" - } - }, - "is-date-object": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "is-fullwidth-code-point": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "is-installed-globally": { - "version": "0.1.0", - "bundled": true, - "dev": true, - "requires": { - "global-dirs": "^0.1.0", - "is-path-inside": "^1.0.0" - } - }, - "is-npm": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "is-obj": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "is-path-inside": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "requires": { - "path-is-inside": "^1.0.1" - } - }, - "is-redirect": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "is-regex": { - "version": "1.0.4", - "bundled": true, - "dev": true, - "requires": { - "has": "^1.0.1" - } - }, - "is-retry-allowed": { - "version": "1.2.0", - "bundled": true, - "dev": true - }, - "is-stream": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "is-symbol": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "has-symbols": "^1.0.0" - } - }, - "is-typedarray": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "isarray": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "isexe": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "isstream": { - "version": "0.1.2", - "bundled": true, - "dev": true - }, - "jsbn": { - "version": "0.1.1", - "bundled": true, - "dev": true, - "optional": true - }, - "json-parse-better-errors": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "json-schema": { - "version": "0.2.3", - "bundled": true, - "dev": true - }, - "json-schema-traverse": { - "version": "0.3.1", - "bundled": true, - "dev": true - }, - "json-stringify-safe": { - "version": "5.0.1", - "bundled": true, - "dev": true - }, - "jsonparse": { - "version": "1.3.1", - "bundled": true, - "dev": true - }, - "jsprim": { - "version": "1.4.1", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "latest-version": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "package-json": "^4.0.0" - } - }, - "lazy-property": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "libcipm": { - "version": "4.0.8", - "bundled": true, - "dev": true, - "requires": { - "bin-links": "^1.1.2", - "bluebird": "^3.5.1", - "figgy-pudding": "^3.5.1", - "find-npm-prefix": "^1.0.2", - "graceful-fs": "^4.1.11", - "ini": "^1.3.5", - "lock-verify": "^2.1.0", - "mkdirp": "^0.5.1", - "npm-lifecycle": "^3.0.0", - "npm-logical-tree": "^1.2.1", - "npm-package-arg": "^6.1.0", - "pacote": "^9.1.0", - "read-package-json": "^2.0.13", - "rimraf": "^2.6.2", - "worker-farm": "^1.6.0" - } - }, - "libnpm": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "bin-links": "^1.1.2", - "bluebird": "^3.5.3", - "find-npm-prefix": "^1.0.2", - "libnpmaccess": "^3.0.2", - "libnpmconfig": "^1.2.1", - "libnpmhook": "^5.0.3", - "libnpmorg": "^1.0.1", - "libnpmpublish": "^1.1.2", - "libnpmsearch": "^2.0.2", - "libnpmteam": "^1.0.2", - "lock-verify": "^2.0.2", - "npm-lifecycle": "^3.0.0", - "npm-logical-tree": "^1.2.1", - "npm-package-arg": "^6.1.0", - "npm-profile": "^4.0.2", - "npm-registry-fetch": "^4.0.0", - "npmlog": "^4.1.2", - "pacote": "^9.5.3", - "read-package-json": "^2.0.13", - "stringify-package": "^1.0.0" - } - }, - "libnpmaccess": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "get-stream": "^4.0.0", - "npm-package-arg": "^6.1.0", - "npm-registry-fetch": "^4.0.0" - } - }, - "libnpmconfig": { - "version": "1.2.1", - "bundled": true, - "dev": true, - "requires": { - "figgy-pudding": "^3.5.1", - "find-up": "^3.0.0", - "ini": "^1.3.5" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "locate-path": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "2.2.0", - "bundled": true, - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "bundled": true, - "dev": true - } - } - }, - "libnpmhook": { - "version": "5.0.3", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "figgy-pudding": "^3.4.1", - "get-stream": "^4.0.0", - "npm-registry-fetch": "^4.0.0" - } - }, - "libnpmorg": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "figgy-pudding": "^3.4.1", - "get-stream": "^4.0.0", - "npm-registry-fetch": "^4.0.0" - } - }, - "libnpmpublish": { - "version": "1.1.2", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "figgy-pudding": "^3.5.1", - "get-stream": "^4.0.0", - "lodash.clonedeep": "^4.5.0", - "normalize-package-data": "^2.4.0", - "npm-package-arg": "^6.1.0", - "npm-registry-fetch": "^4.0.0", - "semver": "^5.5.1", - "ssri": "^6.0.1" - } - }, - "libnpmsearch": { - "version": "2.0.2", - "bundled": true, - "dev": true, - "requires": { - "figgy-pudding": "^3.5.1", - "get-stream": "^4.0.0", - "npm-registry-fetch": "^4.0.0" - } - }, - "libnpmteam": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "figgy-pudding": "^3.4.1", - "get-stream": "^4.0.0", - "npm-registry-fetch": "^4.0.0" - } - }, - "libnpx": { - "version": "10.2.4", - "bundled": true, - "dev": true, - "requires": { - "dotenv": "^5.0.1", - "npm-package-arg": "^6.0.0", - "rimraf": "^2.6.2", - "safe-buffer": "^5.1.0", - "update-notifier": "^2.3.0", - "which": "^1.3.0", - "y18n": "^4.0.0", - "yargs": "^14.2.3" - } - }, - "lock-verify": { - "version": "2.1.0", - "bundled": true, - "dev": true, - "requires": { - "npm-package-arg": "^6.1.0", - "semver": "^5.4.1" - } - }, - "lockfile": { - "version": "1.0.4", - "bundled": true, - "dev": true, - "requires": { - "signal-exit": "^3.0.2" - } - }, - "lodash._baseindexof": { - "version": "3.1.0", - "bundled": true, - "dev": true - }, - "lodash._baseuniq": { - "version": "4.6.0", - "bundled": true, - "dev": true, - "requires": { - "lodash._createset": "~4.0.0", - "lodash._root": "~3.0.0" - } - }, - "lodash._bindcallback": { - "version": "3.0.1", - "bundled": true, - "dev": true - }, - "lodash._cacheindexof": { - "version": "3.0.2", - "bundled": true, - "dev": true - }, - "lodash._createcache": { - "version": "3.1.2", - "bundled": true, - "dev": true, - "requires": { - "lodash._getnative": "^3.0.0" - } - }, - "lodash._createset": { - "version": "4.0.3", - "bundled": true, - "dev": true - }, - "lodash._getnative": { - "version": "3.9.1", - "bundled": true, - "dev": true - }, - "lodash._root": { - "version": "3.0.1", - "bundled": true, - "dev": true - }, - "lodash.clonedeep": { - "version": "4.5.0", - "bundled": true, - "dev": true - }, - "lodash.restparam": { - "version": "3.6.1", - "bundled": true, - "dev": true - }, - "lodash.union": { - "version": "4.6.0", - "bundled": true, - "dev": true - }, - "lodash.uniq": { - "version": "4.5.0", - "bundled": true, - "dev": true - }, - "lodash.without": { - "version": "4.4.0", - "bundled": true, - "dev": true - }, - "lowercase-keys": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "lru-cache": { - "version": "5.1.1", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^3.0.2" - } - }, - "make-dir": { - "version": "1.3.0", - "bundled": true, - "dev": true, - "requires": { - "pify": "^3.0.0" - } - }, - "make-fetch-happen": { - "version": "5.0.2", - "bundled": true, - "dev": true, - "requires": { - "agentkeepalive": "^3.4.1", - "cacache": "^12.0.0", - "http-cache-semantics": "^3.8.1", - "http-proxy-agent": "^2.1.0", - "https-proxy-agent": "^2.2.3", - "lru-cache": "^5.1.1", - "mississippi": "^3.0.0", - "node-fetch-npm": "^2.0.2", - "promise-retry": "^1.1.1", - "socks-proxy-agent": "^4.0.0", - "ssri": "^6.0.0" - } - }, - "meant": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "mime-db": { - "version": "1.35.0", - "bundled": true, - "dev": true - }, - "mime-types": { - "version": "2.1.19", - "bundled": true, - "dev": true, - "requires": { - "mime-db": "~1.35.0" - } - }, - "minimatch": { - "version": "3.0.4", - "bundled": true, - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "bundled": true, - "dev": true - }, - "minizlib": { - "version": "1.3.3", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^2.9.0" - }, - "dependencies": { - "minipass": { - "version": "2.9.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.1.2", - "yallist": "^3.0.0" - } - } - } - }, - "mississippi": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "concat-stream": "^1.5.0", - "duplexify": "^3.4.2", - "end-of-stream": "^1.1.0", - "flush-write-stream": "^1.0.0", - "from2": "^2.1.0", - "parallel-transform": "^1.1.0", - "pump": "^3.0.0", - "pumpify": "^1.3.3", - "stream-each": "^1.1.0", - "through2": "^2.0.0" - } - }, - "mkdirp": { - "version": "0.5.5", - "bundled": true, - "dev": true, - "requires": { - "minimist": "^1.2.5" - }, - "dependencies": { - "minimist": { - "version": "1.2.5", - "bundled": true, - "dev": true - } - } - }, - "move-concurrently": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.1.1", - "copy-concurrently": "^1.0.0", - "fs-write-stream-atomic": "^1.0.8", - "mkdirp": "^0.5.1", - "rimraf": "^2.5.4", - "run-queue": "^1.0.3" - }, - "dependencies": { - "aproba": { - "version": "1.2.0", - "bundled": true, - "dev": true - } - } - }, - "ms": { - "version": "2.1.1", - "bundled": true, - "dev": true - }, - "mute-stream": { - "version": "0.0.7", - "bundled": true, - "dev": true - }, - "node-fetch-npm": { - "version": "2.0.2", - "bundled": true, - "dev": true, - "requires": { - "encoding": "^0.1.11", - "json-parse-better-errors": "^1.0.0", - "safe-buffer": "^5.1.1" - } - }, - "node-gyp": { - "version": "5.1.0", - "bundled": true, - "dev": true, - "requires": { - "env-paths": "^2.2.0", - "glob": "^7.1.4", - "graceful-fs": "^4.2.2", - "mkdirp": "^0.5.1", - "nopt": "^4.0.1", - "npmlog": "^4.1.2", - "request": "^2.88.0", - "rimraf": "^2.6.3", - "semver": "^5.7.1", - "tar": "^4.4.12", - "which": "^1.3.1" - } - }, - "nopt": { - "version": "4.0.3", - "bundled": true, - "dev": true, - "requires": { - "abbrev": "1", - "osenv": "^0.1.4" - } - }, - "normalize-package-data": { - "version": "2.5.0", - "bundled": true, - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "resolve": { - "version": "1.10.0", - "bundled": true, - "dev": true, - "requires": { - "path-parse": "^1.0.6" - } - } - } - }, - "npm-audit-report": { - "version": "1.3.3", - "bundled": true, - "dev": true, - "requires": { - "cli-table3": "^0.5.0", - "console-control-strings": "^1.1.0" - } - }, - "npm-bundled": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "npm-normalize-package-bin": "^1.0.1" - } - }, - "npm-cache-filename": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "npm-install-checks": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "semver": "^2.3.0 || 3.x || 4 || 5" - } - }, - "npm-lifecycle": { - "version": "3.1.5", - "bundled": true, - "dev": true, - "requires": { - "byline": "^5.0.0", - "graceful-fs": "^4.1.15", - "node-gyp": "^5.0.2", - "resolve-from": "^4.0.0", - "slide": "^1.1.6", - "uid-number": "0.0.6", - "umask": "^1.1.0", - "which": "^1.3.1" - } - }, - "npm-logical-tree": { - "version": "1.2.1", - "bundled": true, - "dev": true - }, - "npm-normalize-package-bin": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "npm-package-arg": { - "version": "6.1.1", - "bundled": true, - "dev": true, - "requires": { - "hosted-git-info": "^2.7.1", - "osenv": "^0.1.5", - "semver": "^5.6.0", - "validate-npm-package-name": "^3.0.0" - } - }, - "npm-packlist": { - "version": "1.4.8", - "bundled": true, - "dev": true, - "requires": { - "ignore-walk": "^3.0.1", - "npm-bundled": "^1.0.1", - "npm-normalize-package-bin": "^1.0.1" - } - }, - "npm-pick-manifest": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "figgy-pudding": "^3.5.1", - "npm-package-arg": "^6.0.0", - "semver": "^5.4.1" - } - }, - "npm-profile": { - "version": "4.0.4", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.1.2 || 2", - "figgy-pudding": "^3.4.1", - "npm-registry-fetch": "^4.0.0" - } - }, - "npm-registry-fetch": { - "version": "4.0.7", - "bundled": true, - "dev": true, - "requires": { - "JSONStream": "^1.3.4", - "bluebird": "^3.5.1", - "figgy-pudding": "^3.4.1", - "lru-cache": "^5.1.1", - "make-fetch-happen": "^5.0.0", - "npm-package-arg": "^6.1.0", - "safe-buffer": "^5.2.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.1", - "bundled": true, - "dev": true - } - } - }, - "npm-run-path": { - "version": "2.0.2", - "bundled": true, - "dev": true, - "requires": { - "path-key": "^2.0.0" - } - }, - "npm-user-validate": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "npmlog": { - "version": "4.1.2", - "bundled": true, - "dev": true, - "requires": { - "are-we-there-yet": "~1.1.2", - "console-control-strings": "~1.1.0", - "gauge": "~2.7.3", - "set-blocking": "~2.0.0" - } - }, - "number-is-nan": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "oauth-sign": { - "version": "0.9.0", - "bundled": true, - "dev": true - }, - "object-assign": { - "version": "4.1.1", - "bundled": true, - "dev": true - }, - "object-keys": { - "version": "1.0.12", - "bundled": true, - "dev": true - }, - "object.getownpropertydescriptors": { - "version": "2.0.3", - "bundled": true, - "dev": true, - "requires": { - "define-properties": "^1.1.2", - "es-abstract": "^1.5.1" - } - }, - "once": { - "version": "1.4.0", - "bundled": true, - "dev": true, - "requires": { - "wrappy": "1" - } - }, - "opener": { - "version": "1.5.1", - "bundled": true, - "dev": true - }, - "os-homedir": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "os-tmpdir": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "osenv": { - "version": "0.1.5", - "bundled": true, - "dev": true, - "requires": { - "os-homedir": "^1.0.0", - "os-tmpdir": "^1.0.0" - } - }, - "p-finally": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "package-json": { - "version": "4.0.1", - "bundled": true, - "dev": true, - "requires": { - "got": "^6.7.1", - "registry-auth-token": "^3.0.1", - "registry-url": "^3.0.3", - "semver": "^5.1.0" - } - }, - "pacote": { - "version": "9.5.12", - "bundled": true, - "dev": true, - "requires": { - "bluebird": "^3.5.3", - "cacache": "^12.0.2", - "chownr": "^1.1.2", - "figgy-pudding": "^3.5.1", - "get-stream": "^4.1.0", - "glob": "^7.1.3", - "infer-owner": "^1.0.4", - "lru-cache": "^5.1.1", - "make-fetch-happen": "^5.0.0", - "minimatch": "^3.0.4", - "minipass": "^2.3.5", - "mississippi": "^3.0.0", - "mkdirp": "^0.5.1", - "normalize-package-data": "^2.4.0", - "npm-normalize-package-bin": "^1.0.0", - "npm-package-arg": "^6.1.0", - "npm-packlist": "^1.1.12", - "npm-pick-manifest": "^3.0.0", - "npm-registry-fetch": "^4.0.0", - "osenv": "^0.1.5", - "promise-inflight": "^1.0.1", - "promise-retry": "^1.1.1", - "protoduck": "^5.0.1", - "rimraf": "^2.6.2", - "safe-buffer": "^5.1.2", - "semver": "^5.6.0", - "ssri": "^6.0.1", - "tar": "^4.4.10", - "unique-filename": "^1.1.1", - "which": "^1.3.1" - }, - "dependencies": { - "minipass": { - "version": "2.9.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.1.2", - "yallist": "^3.0.0" - } - } - } - }, - "parallel-transform": { - "version": "1.1.0", - "bundled": true, - "dev": true, - "requires": { - "cyclist": "~0.2.2", - "inherits": "^2.0.3", - "readable-stream": "^2.1.5" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "path-exists": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "path-is-inside": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "path-key": { - "version": "2.0.1", - "bundled": true, - "dev": true - }, - "path-parse": { - "version": "1.0.6", - "bundled": true, - "dev": true - }, - "performance-now": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, - "pify": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "prepend-http": { - "version": "1.0.4", - "bundled": true, - "dev": true - }, - "process-nextick-args": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "promise-inflight": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "promise-retry": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "err-code": "^1.0.0", - "retry": "^0.10.0" - }, - "dependencies": { - "retry": { - "version": "0.10.1", - "bundled": true, - "dev": true - } - } - }, - "promzard": { - "version": "0.3.0", - "bundled": true, - "dev": true, - "requires": { - "read": "1" - } - }, - "proto-list": { - "version": "1.2.4", - "bundled": true, - "dev": true - }, - "protoduck": { - "version": "5.0.1", - "bundled": true, - "dev": true, - "requires": { - "genfun": "^5.0.0" - } - }, - "prr": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "pseudomap": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "psl": { - "version": "1.1.29", - "bundled": true, - "dev": true - }, - "pump": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "pumpify": { - "version": "1.5.1", - "bundled": true, - "dev": true, - "requires": { - "duplexify": "^3.6.0", - "inherits": "^2.0.3", - "pump": "^2.0.0" - }, - "dependencies": { - "pump": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - } - } - }, - "punycode": { - "version": "1.4.1", - "bundled": true, - "dev": true - }, - "qrcode-terminal": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, - "qs": { - "version": "6.5.2", - "bundled": true, - "dev": true - }, - "query-string": { - "version": "6.8.2", - "bundled": true, - "dev": true, - "requires": { - "decode-uri-component": "^0.2.0", - "split-on-first": "^1.0.0", - "strict-uri-encode": "^2.0.0" - } - }, - "qw": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "rc": { - "version": "1.2.8", - "bundled": true, - "dev": true, - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, - "read": { - "version": "1.0.7", - "bundled": true, - "dev": true, - "requires": { - "mute-stream": "~0.0.4" - } - }, - "read-cmd-shim": { - "version": "1.0.5", - "bundled": true, - "dev": true, - "requires": { - "graceful-fs": "^4.1.2" - } - }, - "read-installed": { - "version": "4.0.3", - "bundled": true, - "dev": true, - "requires": { - "debuglog": "^1.0.1", - "graceful-fs": "^4.1.2", - "read-package-json": "^2.0.0", - "readdir-scoped-modules": "^1.0.0", - "semver": "2 || 3 || 4 || 5", - "slide": "~1.1.3", - "util-extend": "^1.0.1" - } - }, - "read-package-json": { - "version": "2.1.1", - "bundled": true, - "dev": true, - "requires": { - "glob": "^7.1.1", - "graceful-fs": "^4.1.2", - "json-parse-better-errors": "^1.0.1", - "normalize-package-data": "^2.0.0", - "npm-normalize-package-bin": "^1.0.0" - } - }, - "read-package-tree": { - "version": "5.3.1", - "bundled": true, - "dev": true, - "requires": { - "read-package-json": "^2.0.0", - "readdir-scoped-modules": "^1.0.0", - "util-promisify": "^2.1.0" - } - }, - "readable-stream": { - "version": "3.6.0", - "bundled": true, - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, - "readdir-scoped-modules": { - "version": "1.1.0", - "bundled": true, - "dev": true, - "requires": { - "debuglog": "^1.0.1", - "dezalgo": "^1.0.0", - "graceful-fs": "^4.1.2", - "once": "^1.3.0" - } - }, - "registry-auth-token": { - "version": "3.4.0", - "bundled": true, - "dev": true, - "requires": { - "rc": "^1.1.6", - "safe-buffer": "^5.0.1" - } - }, - "registry-url": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "rc": "^1.0.1" - } - }, - "request": { - "version": "2.88.0", - "bundled": true, - "dev": true, - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.0", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.4.3", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - } - }, - "require-directory": { - "version": "2.1.1", - "bundled": true, - "dev": true - }, - "require-main-filename": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "resolve-from": { - "version": "4.0.0", - "bundled": true, - "dev": true - }, - "retry": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, - "rimraf": { - "version": "2.7.1", - "bundled": true, - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, - "run-queue": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.1.1" - }, - "dependencies": { - "aproba": { - "version": "1.2.0", - "bundled": true, - "dev": true - } - } - }, - "safe-buffer": { - "version": "5.1.2", - "bundled": true, - "dev": true - }, - "safer-buffer": { - "version": "2.1.2", - "bundled": true, - "dev": true - }, - "semver": { - "version": "5.7.1", - "bundled": true, - "dev": true - }, - "semver-diff": { - "version": "2.1.0", - "bundled": true, - "dev": true, - "requires": { - "semver": "^5.0.3" - } - }, - "set-blocking": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "sha": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "graceful-fs": "^4.1.2" - } - }, - "shebang-command": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "signal-exit": { - "version": "3.0.2", - "bundled": true, - "dev": true - }, - "slide": { - "version": "1.1.6", - "bundled": true, - "dev": true - }, - "smart-buffer": { - "version": "4.1.0", - "bundled": true, - "dev": true - }, - "socks": { - "version": "2.3.3", - "bundled": true, - "dev": true, - "requires": { - "ip": "1.1.5", - "smart-buffer": "^4.1.0" - } - }, - "socks-proxy-agent": { - "version": "4.0.2", - "bundled": true, - "dev": true, - "requires": { - "agent-base": "~4.2.1", - "socks": "~2.3.2" - }, - "dependencies": { - "agent-base": { - "version": "4.2.1", - "bundled": true, - "dev": true, - "requires": { - "es6-promisify": "^5.0.0" - } - } - } - }, - "sorted-object": { - "version": "2.0.1", - "bundled": true, - "dev": true - }, - "sorted-union-stream": { - "version": "2.1.3", - "bundled": true, - "dev": true, - "requires": { - "from2": "^1.3.0", - "stream-iterate": "^1.1.0" - }, - "dependencies": { - "from2": { - "version": "1.3.0", - "bundled": true, - "dev": true, - "requires": { - "inherits": "~2.0.1", - "readable-stream": "~1.1.10" - } - }, - "isarray": { - "version": "0.0.1", - "bundled": true, - "dev": true - }, - "readable-stream": { - "version": "1.1.14", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.1", - "isarray": "0.0.1", - "string_decoder": "~0.10.x" - } - }, - "string_decoder": { - "version": "0.10.31", - "bundled": true, - "dev": true - } - } - }, - "spdx-correct": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.1.0", - "bundled": true, - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.5", - "bundled": true, - "dev": true - }, - "split-on-first": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "sshpk": { - "version": "1.14.2", - "bundled": true, - "dev": true, - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, - "ssri": { - "version": "6.0.1", - "bundled": true, - "dev": true, - "requires": { - "figgy-pudding": "^3.5.1" - } - }, - "stream-each": { - "version": "1.2.2", - "bundled": true, - "dev": true, - "requires": { - "end-of-stream": "^1.1.0", - "stream-shift": "^1.0.0" - } - }, - "stream-iterate": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "requires": { - "readable-stream": "^2.1.5", - "stream-shift": "^1.0.0" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "stream-shift": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "strict-uri-encode": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "2.1.1", - "bundled": true, - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "strip-ansi": { - "version": "4.0.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "string_decoder": { - "version": "1.3.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.2.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.0", - "bundled": true, - "dev": true - } - } - }, - "stringify-package": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "strip-ansi": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "strip-eof": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "strip-json-comments": { - "version": "2.0.1", - "bundled": true, - "dev": true - }, - "supports-color": { - "version": "5.4.0", - "bundled": true, - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - }, - "tar": { - "version": "4.4.13", - "bundled": true, - "dev": true, - "requires": { - "chownr": "^1.1.1", - "fs-minipass": "^1.2.5", - "minipass": "^2.8.6", - "minizlib": "^1.2.1", - "mkdirp": "^0.5.0", - "safe-buffer": "^5.1.2", - "yallist": "^3.0.3" - }, - "dependencies": { - "minipass": { - "version": "2.9.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.1.2", - "yallist": "^3.0.0" - } - } - } - }, - "term-size": { - "version": "1.2.0", - "bundled": true, - "dev": true, - "requires": { - "execa": "^0.7.0" - } - }, - "text-table": { - "version": "0.2.0", - "bundled": true, - "dev": true - }, - "through": { - "version": "2.3.8", - "bundled": true, - "dev": true - }, - "through2": { - "version": "2.0.3", - "bundled": true, - "dev": true, - "requires": { - "readable-stream": "^2.1.5", - "xtend": "~4.0.1" - }, - "dependencies": { - "readable-stream": { - "version": "2.3.6", - "bundled": true, - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "string_decoder": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - } - } - }, - "timed-out": { - "version": "4.0.1", - "bundled": true, - "dev": true - }, - "tiny-relative-date": { - "version": "1.3.0", - "bundled": true, - "dev": true - }, - "tough-cookie": { - "version": "2.4.3", - "bundled": true, - "dev": true, - "requires": { - "psl": "^1.1.24", - "punycode": "^1.4.1" - } - }, - "tunnel-agent": { - "version": "0.6.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "bundled": true, - "dev": true, - "optional": true - }, - "typedarray": { - "version": "0.0.6", - "bundled": true, - "dev": true - }, - "uid-number": { - "version": "0.0.6", - "bundled": true, - "dev": true - }, - "umask": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "unique-filename": { - "version": "1.1.1", - "bundled": true, - "dev": true, - "requires": { - "unique-slug": "^2.0.0" - } - }, - "unique-slug": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "requires": { - "imurmurhash": "^0.1.4" - } - }, - "unique-string": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "requires": { - "crypto-random-string": "^1.0.0" - } - }, - "unpipe": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "unzip-response": { - "version": "2.0.1", - "bundled": true, - "dev": true - }, - "update-notifier": { - "version": "2.5.0", - "bundled": true, - "dev": true, - "requires": { - "boxen": "^1.2.1", - "chalk": "^2.0.1", - "configstore": "^3.0.0", - "import-lazy": "^2.1.0", - "is-ci": "^1.0.10", - "is-installed-globally": "^0.1.0", - "is-npm": "^1.0.0", - "latest-version": "^3.0.0", - "semver-diff": "^2.0.0", - "xdg-basedir": "^3.0.0" - } - }, - "url-parse-lax": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "requires": { - "prepend-http": "^1.0.1" - } - }, - "util-deprecate": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "util-extend": { - "version": "1.0.3", - "bundled": true, - "dev": true - }, - "util-promisify": { - "version": "2.1.0", - "bundled": true, - "dev": true, - "requires": { - "object.getownpropertydescriptors": "^2.0.3" - } - }, - "uuid": { - "version": "3.3.3", - "bundled": true, - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "bundled": true, - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "validate-npm-package-name": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "builtins": "^1.0.3" - } - }, - "verror": { - "version": "1.10.0", - "bundled": true, - "dev": true, - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "wcwidth": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "requires": { - "defaults": "^1.0.3" - } - }, - "which": { - "version": "1.3.1", - "bundled": true, - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "which-module": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "wide-align": { - "version": "1.1.2", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^1.0.2" - }, - "dependencies": { - "string-width": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - } - } - }, - "widest-line": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^2.1.1" - } - }, - "worker-farm": { - "version": "1.7.0", - "bundled": true, - "dev": true, - "requires": { - "errno": "~0.1.7" - } - }, - "wrap-ansi": { - "version": "5.1.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-styles": "^3.2.0", - "string-width": "^3.0.0", - "strip-ansi": "^5.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "bundled": true, - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - } - } - } - }, - "wrappy": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "write-file-atomic": { - "version": "2.4.3", - "bundled": true, - "dev": true, - "requires": { - "graceful-fs": "^4.1.11", - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.2" - } - }, - "xdg-basedir": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "xtend": { - "version": "4.0.1", - "bundled": true, - "dev": true - }, - "y18n": { - "version": "4.0.0", - "bundled": true, - "dev": true - }, - "yallist": { - "version": "3.0.3", - "bundled": true, - "dev": true - }, - "yargs": { - "version": "14.2.3", - "bundled": true, - "dev": true, - "requires": { - "cliui": "^5.0.0", - "decamelize": "^1.2.0", - "find-up": "^3.0.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^3.0.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^15.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "4.1.0", - "bundled": true, - "dev": true - }, - "find-up": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "locate-path": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-limit": { - "version": "2.3.0", - "bundled": true, - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "emoji-regex": "^7.0.1", - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^5.1.0" - } - }, - "strip-ansi": { - "version": "5.2.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^4.1.0" - } - } - } - }, - "yargs-parser": { - "version": "15.0.1", - "bundled": true, - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - }, - "dependencies": { - "camelcase": { - "version": "5.3.1", - "bundled": true, - "dev": true - } - } - } - } - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "requires": { - "wrappy": "1" - } - }, - "onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "requires": { - "mimic-fn": "^2.1.0" - } - }, - "ora": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/ora/-/ora-5.1.0.tgz", - "integrity": "sha512-9tXIMPvjZ7hPTbk8DFq1f7Kow/HU/pQYB60JbNq+QnGwcyhWVZaQ4hM9zQDEsPxw/muLpgiHSaumUZxCAmod/w==", - "dev": true, - "requires": { - "chalk": "^4.1.0", - "cli-cursor": "^3.1.0", - "cli-spinners": "^2.4.0", - "is-interactive": "^1.0.0", - "log-symbols": "^4.0.0", - "mute-stream": "0.0.8", - "strip-ansi": "^6.0.0", - "wcwidth": "^1.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "p-cancelable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz", - "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==", - "dev": true - }, - "p-defer": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-defer/-/p-defer-1.0.0.tgz", - "integrity": "sha1-n26xgvbJqozXQwBKfU+WsZaw+ww=", - "dev": true - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-map": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz", - "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", - "dev": true, - "requires": { - "aggregate-error": "^3.0.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "package-json": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz", - "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==", - "dev": true, - "requires": { - "got": "^9.6.0", - "registry-auth-token": "^4.0.0", - "registry-url": "^5.0.0", - "semver": "^6.2.0" - } - }, - "parse-entities": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-1.2.2.tgz", - "integrity": "sha512-NzfpbxW/NPrzZ/yYSoQxyqUZMZXIdCfE0OIN4ESsnptHJECoUk3FZktxNuzQf4tjt5UEopnxpYJbvYuxIFDdsg==", - "dev": true, - "requires": { - "character-entities": "^1.0.0", - "character-entities-legacy": "^1.0.0", - "character-reference-invalid": "^1.0.0", - "is-alphanumerical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-hexadecimal": "^1.0.0" - } - }, - "parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", - "dev": true, - "requires": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - } - }, - "parse-ms": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-2.1.0.tgz", - "integrity": "sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==", - "dev": true - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true - }, - "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==", - "dev": true - }, - "path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true - }, - "picomatch": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.1.1.tgz", - "integrity": "sha512-OYMyqkKzK7blWO/+XZYP6w8hH0LDvkBvdvKukti+7kqYFCiEAk+gI3DWnryapc0Dau05ugGTy0foQ6mqn4AHYA==", - "dev": true - }, - "pify": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz", - "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", - "dev": true - }, - "pkg-conf": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/pkg-conf/-/pkg-conf-3.1.0.tgz", - "integrity": "sha512-m0OTbR/5VPNPqO1ph6Fqbj7Hv6QU7gR/tQW40ZqrL1rjgCU85W6C1bJn0BItuJqnR98PWzw7Z8hHeChD1WrgdQ==", - "dev": true, - "requires": { - "find-up": "^3.0.0", - "load-json-file": "^5.2.0" - }, - "dependencies": { - "find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dev": true, - "requires": { - "locate-path": "^3.0.0" - } - }, - "load-json-file": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-5.3.0.tgz", - "integrity": "sha512-cJGP40Jc/VXUsp8/OrnyKyTZ1y6v/dphm3bioS+RrKXjK2BB6wHUd6JptZEFDGgGahMT+InnZO5i1Ei9mpC8Bw==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.15", - "parse-json": "^4.0.0", - "pify": "^4.0.1", - "strip-bom": "^3.0.0", - "type-fest": "^0.3.0" - } - }, - "locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dev": true, - "requires": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - } - }, - "p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dev": true, - "requires": { - "p-limit": "^2.0.0" - } - }, - "path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=", - "dev": true - } - } - }, - "pkg-dir": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", - "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", - "dev": true, - "requires": { - "find-up": "^4.0.0" - } - }, - "plur": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/plur/-/plur-4.0.0.tgz", - "integrity": "sha512-4UGewrYgqDFw9vV6zNV+ADmPAUAfJPKtGvb/VdpQAx25X5f3xXdGdyOEVFwkl8Hl/tl7+xbeHqSEM+D5/TirUg==", - "dev": true, - "requires": { - "irregular-plurals": "^3.2.0" - } - }, - "prepend-http": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", - "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=", - "dev": true - }, - "pretty-ms": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-7.0.0.tgz", - "integrity": "sha512-J3aPWiC5e9ZeZFuSeBraGxSkGMOvulSWsxDByOcbD1Pr75YL3LSNIKIb52WXbCLE1sS5s4inBBbryjF4Y05Ceg==", - "dev": true, - "requires": { - "parse-ms": "^2.1.0" - } - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true - }, - "progress": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", - "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", - "dev": true - }, - "prr": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/prr/-/prr-1.0.1.tgz", - "integrity": "sha1-0/wRS6BplaRexok/SEzrHXj19HY=", - "dev": true - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "pupa": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.0.1.tgz", - "integrity": "sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA==", - "dev": true, - "requires": { - "escape-goat": "^2.0.0" - } - }, - "quick-lru": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", - "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", - "dev": true - }, - "rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, - "read-pkg": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", - "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", - "dev": true, - "requires": { - "@types/normalize-package-data": "^2.4.0", - "normalize-package-data": "^2.5.0", - "parse-json": "^5.0.0", - "type-fest": "^0.6.0" - }, - "dependencies": { - "parse-json": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.1.0.tgz", - "integrity": "sha512-+mi/lmVVNKFNVyLXV31ERiy2CY5E1/F6QtJFEzoChPRwwngMNXRDQ9GJ5WdE2Z2P4AujsOi0/+2qHID68KwfIQ==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - } - }, - "type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true - } - } - }, - "read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", - "dev": true, - "requires": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" - }, - "dependencies": { - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - } - } - }, - "readable-stream": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "readdirp": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.4.0.tgz", - "integrity": "sha512-0xe001vZBnJEK+uKcj8qOhyAKPzIT+gStxWr3LCB0DwcXR5NZJ3IaC+yGnHCYzB/S7ov3m3EEbZI2zeNvX+hGQ==", - "dev": true, - "requires": { - "picomatch": "^2.2.1" - }, - "dependencies": { - "picomatch": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.2.tgz", - "integrity": "sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg==", - "dev": true - } - } - }, - "rechoir": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", - "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", - "dev": true, - "requires": { - "resolve": "^1.1.6" - } - }, - "redent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", - "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", - "dev": true, - "requires": { - "indent-string": "^4.0.0", - "strip-indent": "^3.0.0" - } - }, - "registry-auth-token": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.0.tgz", - "integrity": "sha512-P+lWzPrsgfN+UEpDS3U8AQKg/UjZX6mQSJueZj3EK+vNESoqBSpBUD3gmu4sF9lOsjXWjF11dQKUqemf3veq1w==", - "dev": true, - "requires": { - "rc": "^1.2.8" - } - }, - "registry-url": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz", - "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==", - "dev": true, - "requires": { - "rc": "^1.2.8" - } - }, - "remark-frontmatter": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-1.3.3.tgz", - "integrity": "sha512-fM5eZPBvu2pVNoq3ZPW22q+5Ativ1oLozq2qYt9I2oNyxiUd/tDl0iLLntEVAegpZIslPWg1brhcP1VsaSVUag==", - "dev": true, - "requires": { - "fault": "^1.0.1", - "xtend": "^4.0.1" - } - }, - "remark-parse": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-5.0.0.tgz", - "integrity": "sha512-b3iXszZLH1TLoyUzrATcTQUZrwNl1rE70rVdSruJFlDaJ9z5aMkhrG43Pp68OgfHndL/ADz6V69Zow8cTQu+JA==", - "dev": true, - "requires": { - "collapse-white-space": "^1.0.2", - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-whitespace-character": "^1.0.0", - "is-word-character": "^1.0.0", - "markdown-escapes": "^1.0.0", - "parse-entities": "^1.1.0", - "repeat-string": "^1.5.4", - "state-toggle": "^1.0.0", - "trim": "0.0.1", - "trim-trailing-lines": "^1.0.0", - "unherit": "^1.0.4", - "unist-util-remove-position": "^1.0.0", - "vfile-location": "^2.0.0", - "xtend": "^4.0.1" - } - }, - "repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", - "dev": true - }, - "replace-ext": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.0.tgz", - "integrity": "sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs=", - "dev": true - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha1-jGStX9MNqxyXbiNE/+f3kqam30I=", - "dev": true - }, - "require-main-filename": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/require-main-filename/-/require-main-filename-2.0.0.tgz", - "integrity": "sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==", - "dev": true - }, - "resolve": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.13.1.tgz", - "integrity": "sha512-CxqObCX8K8YtAhOBRg+lrcdn+LK+WYOS8tSjqSFbjtrI5PnS63QPhZl4+yKfrU9tdsbMu9Anr/amegT87M9Z6w==", - "dev": true, - "requires": { - "path-parse": "^1.0.6" - } - }, - "resolve-cwd": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", - "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", - "dev": true, - "requires": { - "resolve-from": "^5.0.0" - } - }, - "resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true - }, - "responselike": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", - "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", - "dev": true, - "requires": { - "lowercase-keys": "^1.0.0" - } - }, - "restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "requires": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - } - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, - "run-parallel": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.9.tgz", - "integrity": "sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q==", - "dev": true - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", - "dev": true - }, - "semver-diff": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz", - "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==", - "dev": true, - "requires": { - "semver": "^6.3.0" - } - }, - "serialize-error": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-2.1.0.tgz", - "integrity": "sha1-ULZ51WNc34Rme9yOWa9OW4HV9go=", - "dev": true - }, - "set-blocking": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", - "integrity": "sha1-BF+XgtARrppoA93TgrJDkrPYkPc=", - "dev": true - }, - "shelljs": { - "version": "0.8.4", - "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz", - "integrity": "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==", - "dev": true, - "requires": { - "glob": "^7.0.0", - "interpret": "^1.0.0", - "rechoir": "^0.6.2" - } - }, - "signal-exit": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", - "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", - "dev": true - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", - "dev": true - }, - "slice-ansi": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-3.0.0.tgz", - "integrity": "sha512-pSyv7bSTC7ig9Dcgbw9AuRNUb5k5V6oDudjZoMBSr13qpLBG7tB+zgCkARjq7xIUgdz5P1Qe8u+rSGdouOOIyQ==", - "dev": true, - "requires": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - } - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - }, - "source-map-support": { - "version": "0.5.16", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.16.tgz", - "integrity": "sha512-efyLRJDr68D9hBBNIPWFjhpFzURh+KJykQwvMyW5UiZzYwoF6l4YMMDIJJEyFWxWCqfyxLzz6tSfUFR+kXXsVQ==", - "dev": true, - "requires": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "spdx-correct": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.0.tgz", - "integrity": "sha512-lr2EZCctC2BNR7j7WzJ2FpDznxky1sjfxvvYEyzxNyb6lZXHODmEoJeFu4JupYlkfha1KZpJyoqiJ7pgA1qq8Q==", - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.2.0.tgz", - "integrity": "sha512-2XQACfElKi9SlVb1CYadKDXvoajPgBVPn/gOQLrTvHdElaVhr7ZEbqJaRnJLVNeaI4cMEAgVCeBMKF6MWRDCRA==", - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", - "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz", - "integrity": "sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==", - "dev": true - }, - "sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", - "dev": true - }, - "stack-utils": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.2.tgz", - "integrity": "sha512-0H7QK2ECz3fyZMzQ8rH0j2ykpfbnd20BFtfg/SqVC2+sCTtcw0aDTGB7dk+de4U4uUeuz6nOtJcrkFFLG1B0Rg==", - "dev": true, - "requires": { - "escape-string-regexp": "^2.0.0" - }, - "dependencies": { - "escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "dev": true - } - } - }, - "state-toggle": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz", - "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==", - "dev": true - }, - "string-width": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz", - "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - } - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=", - "dev": true - }, - "strip-indent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", - "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", - "dev": true, - "requires": { - "min-indent": "^1.0.0" - } - }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "dev": true - }, - "structured-source": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/structured-source/-/structured-source-3.0.2.tgz", - "integrity": "sha1-3YAkJeD1PcSm56yjdSkBoczaevU=", - "dev": true, - "requires": { - "boundary": "^1.0.1" - } - }, - "supertap": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supertap/-/supertap-1.0.0.tgz", - "integrity": "sha512-HZJ3geIMPgVwKk2VsmO5YHqnnJYl6bV5A9JW2uzqV43WmpgliNEYbuvukfor7URpaqpxuw3CfZ3ONdVbZjCgIA==", - "dev": true, - "requires": { - "arrify": "^1.0.1", - "indent-string": "^3.2.0", - "js-yaml": "^3.10.0", - "serialize-error": "^2.1.0", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=", - "dev": true - }, - "indent-string": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", - "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=", - "dev": true - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - }, - "tapable": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz", - "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==", - "dev": true - }, - "temp-dir": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-2.0.0.tgz", - "integrity": "sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg==", - "dev": true - }, - "term-size": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-2.2.0.tgz", - "integrity": "sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw==", - "dev": true - }, - "time-zone": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/time-zone/-/time-zone-1.0.0.tgz", - "integrity": "sha1-mcW/VZWJZq9tBtg73zgA3IL67F0=", - "dev": true - }, - "to-readable-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz", - "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==", - "dev": true - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "requires": { - "is-number": "^7.0.0" - } - }, - "transform-markdown-links": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/transform-markdown-links/-/transform-markdown-links-2.0.0.tgz", - "integrity": "sha1-t56Sg9RHTLmweYma4JFS1OxGMlo=", - "dev": true - }, - "traverse": { - "version": "0.6.6", - "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.6.tgz", - "integrity": "sha1-y99WD9e5r2MlAv7UD5GMFX6pcTc=", - "dev": true - }, - "trim": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", - "integrity": "sha1-WFhUf2spB1fulczMZm+1AITEYN0=", - "dev": true - }, - "trim-newlines": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.0.tgz", - "integrity": "sha512-C4+gOpvmxaSMKuEf9Qc134F1ZuOHVXKRbtEflf4NTtuuJDEIJ9p5PXsalL8SkeRw+qit1Mo+yuvMPAKwWg/1hA==", - "dev": true - }, - "trim-off-newlines": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/trim-off-newlines/-/trim-off-newlines-1.0.1.tgz", - "integrity": "sha1-n5up2e+odkw4dpi8v+sshI8RrbM=", - "dev": true - }, - "trim-trailing-lines": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.3.tgz", - "integrity": "sha512-4ku0mmjXifQcTVfYDfR5lpgV7zVqPg6zV9rdZmwOPqq0+Zq19xDqEgagqVbc4pOOShbncuAOIs59R3+3gcF3ZA==", - "dev": true - }, - "trough": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz", - "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==", - "dev": true - }, - "ts-loader": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ts-loader/-/ts-loader-6.2.1.tgz", - "integrity": "sha512-Dd9FekWuABGgjE1g0TlQJ+4dFUfYGbYcs52/HQObE0ZmUNjQlmLAS7xXsSzy23AMaMwipsx5sNHvoEpT2CZq1g==", - "dev": true, - "requires": { - "chalk": "^2.3.0", - "enhanced-resolve": "^4.0.0", - "loader-utils": "^1.0.2", - "micromatch": "^4.0.0", - "semver": "^6.0.0" - } - }, - "ts-node": { - "version": "8.5.4", - "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-8.5.4.tgz", - "integrity": "sha512-izbVCRV68EasEPQ8MSIGBNK9dc/4sYJJKYA+IarMQct1RtEot6Xp0bXuClsbUSnKpg50ho+aOAx8en5c+y4OFw==", - "dev": true, - "requires": { - "arg": "^4.1.0", - "diff": "^4.0.1", - "make-error": "^1.1.1", - "source-map-support": "^0.5.6", - "yn": "^3.0.0" - } - }, - "type-fest": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.3.1.tgz", - "integrity": "sha512-cUGJnCdr4STbePCgqNFbpVNCepa+kAVohJs1sLhxzdH+gnEoOd8VhbYa7pD3zZYGiURWM2xzEII3fQcRizDkYQ==", - "dev": true - }, - "typedarray-to-buffer": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz", - "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==", - "dev": true, - "requires": { - "is-typedarray": "^1.0.0" - } - }, - "typedoc": { - "version": "0.19.2", - "resolved": "https://registry.npmjs.org/typedoc/-/typedoc-0.19.2.tgz", - "integrity": "sha512-oDEg1BLEzi1qvgdQXc658EYgJ5qJLVSeZ0hQ57Eq4JXy6Vj2VX4RVo18qYxRWz75ifAaYuYNBUCnbhjd37TfOg==", - "dev": true, - "requires": { - "fs-extra": "^9.0.1", - "handlebars": "^4.7.6", - "highlight.js": "^10.2.0", - "lodash": "^4.17.20", - "lunr": "^2.3.9", - "marked": "^1.1.1", - "minimatch": "^3.0.0", - "progress": "^2.0.3", - "semver": "^7.3.2", - "shelljs": "^0.8.4", - "typedoc-default-themes": "^0.11.4" - }, - "dependencies": { - "fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "requires": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" - } - }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "semver": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.4.tgz", - "integrity": "sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true - } - } - }, - "typedoc-default-themes": { - "version": "0.11.4", - "resolved": "https://registry.npmjs.org/typedoc-default-themes/-/typedoc-default-themes-0.11.4.tgz", - "integrity": "sha512-Y4Lf+qIb9NTydrexlazAM46SSLrmrQRqWiD52593g53SsmUFioAsMWt8m834J6qsp+7wHRjxCXSZeiiW5cMUdw==", - "dev": true - }, - "typedoc-plugin-markdown": { - "version": "2.2.17", - "resolved": "https://registry.npmjs.org/typedoc-plugin-markdown/-/typedoc-plugin-markdown-2.2.17.tgz", - "integrity": "sha512-eE6cTeqsZIbjur6RG91Lhx1vTwjR49OHwVPRlmsxY3dthS4FNRL8sHxT5Y9pkosBwv1kSmNGQEPHjMYy1Ag6DQ==", - "dev": true, - "requires": { - "fs-extra": "^8.1.0", - "handlebars": "^4.7.3" - } - }, - "typescript": { - "version": "3.8.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.8.3.tgz", - "integrity": "sha512-MYlEfn5VrLNsgudQTVJeNaQFUAI7DkhnOjdpAp4T+ku1TfQClewlbSuTVHiA+8skNBgaf02TL/kLOvig4y3G8w==", - "dev": true - }, - "uglify-js": { - "version": "3.8.1", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.8.1.tgz", - "integrity": "sha512-W7KxyzeaQmZvUFbGj4+YFshhVrMBGSg2IbcYAjGWGvx8DHvJMclbTDMpffdxFUGPBHjIytk7KJUR/KUXstUGDw==", - "dev": true, - "optional": true, - "requires": { - "commander": "~2.20.3", - "source-map": "~0.6.1" - } - }, - "unherit": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz", - "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==", - "dev": true, - "requires": { - "inherits": "^2.0.0", - "xtend": "^4.0.0" - } - }, - "unified": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/unified/-/unified-6.2.0.tgz", - "integrity": "sha512-1k+KPhlVtqmG99RaTbAv/usu85fcSRu3wY8X+vnsEhIxNP5VbVIDiXnLqyKIG+UMdyTg0ZX9EI6k2AfjJkHPtA==", - "dev": true, - "requires": { - "bail": "^1.0.0", - "extend": "^3.0.0", - "is-plain-obj": "^1.1.0", - "trough": "^1.0.0", - "vfile": "^2.0.0", - "x-is-string": "^0.1.0" - } - }, - "unique-string": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", - "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==", - "dev": true, - "requires": { - "crypto-random-string": "^2.0.0" - } - }, - "unist-util-is": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-3.0.0.tgz", - "integrity": "sha512-sVZZX3+kspVNmLWBPAB6r+7D9ZgAFPNWm66f7YNb420RlQSbn+n8rG8dGZSkrER7ZIXGQYNm5pqC3v3HopH24A==", - "dev": true - }, - "unist-util-remove-position": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-1.1.4.tgz", - "integrity": "sha512-tLqd653ArxJIPnKII6LMZwH+mb5q+n/GtXQZo6S6csPRs5zB0u79Yw8ouR3wTw8wxvdJFhpP6Y7jorWdCgLO0A==", - "dev": true, - "requires": { - "unist-util-visit": "^1.1.0" - } - }, - "unist-util-stringify-position": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz", - "integrity": "sha512-pNCVrk64LZv1kElr0N1wPiHEUoXNVFERp+mlTg/s9R5Lwg87f9bM/3sQB99w+N9D/qnM9ar3+AKDBwo/gm/iQQ==", - "dev": true - }, - "unist-util-visit": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-1.4.1.tgz", - "integrity": "sha512-AvGNk7Bb//EmJZyhtRUnNMEpId/AZ5Ph/KUpTI09WHQuDZHKovQ1oEv3mfmKpWKtoMzyMC4GLBm1Zy5k12fjIw==", - "dev": true, - "requires": { - "unist-util-visit-parents": "^2.0.0" - } - }, - "unist-util-visit-parents": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-2.1.2.tgz", - "integrity": "sha512-DyN5vD4NE3aSeB+PXYNKxzGsfocxp6asDc2XXE3b0ekO2BaRUpBicbbUygfSvYfUz1IkmjFR1YF7dPklraMZ2g==", - "dev": true, - "requires": { - "unist-util-is": "^3.0.0" - } - }, - "universalify": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz", - "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==", - "dev": true - }, - "update-notifier": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.1.tgz", - "integrity": "sha512-9y+Kds0+LoLG6yN802wVXoIfxYEwh3FlZwzMwpCZp62S2i1/Jzeqb9Eeeju3NSHccGGasfGlK5/vEHbAifYRDg==", - "dev": true, - "requires": { - "boxen": "^4.2.0", - "chalk": "^3.0.0", - "configstore": "^5.0.1", - "has-yarn": "^2.1.0", - "import-lazy": "^2.1.0", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.3.1", - "is-npm": "^4.0.0", - "is-yarn-global": "^0.3.0", - "latest-version": "^5.0.0", - "pupa": "^2.0.1", - "semver-diff": "^3.1.1", - "xdg-basedir": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - } - } - }, - "update-section": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/update-section/-/update-section-0.3.3.tgz", - "integrity": "sha1-RY8Xgg03gg3GDiC4bZQ5GwASMVg=", - "dev": true - }, - "url-parse-lax": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", - "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", - "dev": true, - "requires": { - "prepend-http": "^2.0.0" - } - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "vfile": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-2.3.0.tgz", - "integrity": "sha512-ASt4mBUHcTpMKD/l5Q+WJXNtshlWxOogYyGYYrg4lt/vuRjC1EFQtlAofL5VmtVNIZJzWYFJjzGWZ0Gw8pzW1w==", - "dev": true, - "requires": { - "is-buffer": "^1.1.4", - "replace-ext": "1.0.0", - "unist-util-stringify-position": "^1.0.0", - "vfile-message": "^1.0.0" - } - }, - "vfile-location": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-2.0.6.tgz", - "integrity": "sha512-sSFdyCP3G6Ka0CEmN83A2YCMKIieHx0EDaj5IDP4g1pa5ZJ4FJDvpO0WODLxo4LUX4oe52gmSCK7Jw4SBghqxA==", - "dev": true - }, - "vfile-message": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-1.1.1.tgz", - "integrity": "sha512-1WmsopSGhWt5laNir+633LszXvZ+Z/lxveBf6yhGsqnQIhlhzooZae7zV6YVM1Sdkw68dtAW3ow0pOdPANugvA==", - "dev": true, - "requires": { - "unist-util-stringify-position": "^1.1.1" - } - }, - "wcwidth": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", - "integrity": "sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g=", - "dev": true, - "requires": { - "defaults": "^1.0.3" - } - }, - "well-known-symbols": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/well-known-symbols/-/well-known-symbols-2.0.0.tgz", - "integrity": "sha512-ZMjC3ho+KXo0BfJb7JgtQ5IBuvnShdlACNkKkdsqBmYw3bPAaJfPeYUo6tLUaT5tG/Gkh7xkpBhKRQ9e7pyg9Q==", - "dev": true - }, - "which-module": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/which-module/-/which-module-2.0.0.tgz", - "integrity": "sha1-2e8H3Od7mQK4o6j6SzHD4/fm6Ho=", - "dev": true - }, - "widest-line": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz", - "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==", - "dev": true, - "requires": { - "string-width": "^4.0.0" - } - }, - "wordwrap": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", - "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", - "dev": true - }, - "wrap-ansi": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", - "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", - "dev": true, - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz", - "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==", - "dev": true, - "requires": { - "@types/color-name": "^1.1.1", - "color-convert": "^2.0.1" - } - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "write-file-atomic": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", - "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", - "dev": true, - "requires": { - "imurmurhash": "^0.1.4", - "is-typedarray": "^1.0.0", - "signal-exit": "^3.0.2", - "typedarray-to-buffer": "^3.1.5" - } - }, - "x-is-string": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/x-is-string/-/x-is-string-0.1.0.tgz", - "integrity": "sha1-R0tQhlrzpJqcRlfwWs0UVFj3fYI=", - "dev": true - }, - "xdg-basedir": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz", - "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==", - "dev": true - }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", - "dev": true - }, - "y18n": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.0.tgz", - "integrity": "sha512-r9S/ZyXu/Xu9q1tYlpsLIsa3EeLXXk0VwlxqTcFRfg9EhMW+17kbt9G0NrgCmhGb5vT2hyhJZLfDGx+7+5Uj/w==", - "dev": true - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, - "yargs": { - "version": "15.4.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-15.4.1.tgz", - "integrity": "sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A==", - "dev": true, - "requires": { - "cliui": "^6.0.0", - "decamelize": "^1.2.0", - "find-up": "^4.1.0", - "get-caller-file": "^2.0.1", - "require-directory": "^2.1.1", - "require-main-filename": "^2.0.0", - "set-blocking": "^2.0.0", - "string-width": "^4.2.0", - "which-module": "^2.0.0", - "y18n": "^4.0.0", - "yargs-parser": "^18.1.2" - }, - "dependencies": { - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - }, - "yargs-parser": { - "version": "18.1.3", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - } - } - }, - "yargs-parser": { - "version": "18.1.3", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-18.1.3.tgz", - "integrity": "sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ==", - "dev": true, - "requires": { - "camelcase": "^5.0.0", - "decamelize": "^1.2.0" - } - }, - "yn": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", - "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", - "dev": true - } - } -} diff --git a/smart_contracts/contract_as/package.json b/smart_contracts/contract_as/package.json deleted file mode 100644 index afa9ff720b..0000000000 --- a/smart_contracts/contract_as/package.json +++ /dev/null @@ -1,47 +0,0 @@ -{ - "name": "casper-contract", - "version": "1.0.0", - "description": "Library for developing Casper smart contracts.", - "main": "index.js", - "ascMain": "assembly/index.ts", - "dependencies": {}, - "devDependencies": { - "@assemblyscript/loader": "^0.9.4", - "assemblyscript": "^0.10.0", - "ava": "^3.12.1", - "concat-md": "^0.3.5", - "ts-loader": "^6.2.1", - "ts-node": "^8.5.4", - "typedoc": "^0.19.1", - "typedoc-plugin-markdown": "^2.2.17", - "typescript": "^3.8.3" - }, - "scripts": { - "test": "npm run asbuild:test && npx ava -v --serial", - "asbuild:untouched": "asc assembly/index.ts -b build/untouched.wasm -t build/untouched.wat --sourceMap --debug --use abort=", - "asbuild:optimized": "asc assembly/index.ts -b build/optimized.wasm -t build/optimized.wat --sourceMap --optimize --use abort=", - "asbuild:test:bytesrepr": "asc tests/assembly/bytesrepr.spec.as.ts -b build/bytesrepr.spec.as.wasm -t build/bytesrepr.spec.as.wat --sourceMap --optimize", - "asbuild:test:bignum": "asc tests/assembly/bignum.spec.as.ts -b build/bignum.spec.as.wasm -t build/bignum.spec.as.wat --sourceMap --optimize", - "asbuild:test:utils": "asc tests/assembly/utils.spec.as.ts -b build/utils.spec.as.wasm -t build/utils.spec.as.wat --sourceMap --optimize", - "asbuild:test:runtime_args": "asc tests/assembly/runtime_args.spec.as.ts -b build/runtime_args.spec.as.wasm -t build/runtime_args.spec.as.wat --sourceMap --optimize", - "asbuild:test": "npm run asbuild:test:runtime_args && npm run asbuild:test:bytesrepr && npm run asbuild:test:bignum && npm run asbuild:test:utils", - "asbuild": "npm run asbuild:untouched && npm run asbuild:optimized", - "prepublish-docs": "rm -rf apidoc && mkdir apidoc && node_modules/.bin/typedoc assembly/*.ts assembly/collections/*.ts --theme markdown --readme none --ignoreCompilerErrors --hideBreadcrumbs --skipSidebar --excludePrivate --excludeNotExported --out temp-apidoc/ && concat-md --decrease-title-levels --dir-name-as-title temp-apidoc >> README.md", - "prepublishOnly": "cp README.md ._README.md && npm run prepublish-docs", - "postpublish": "rm -rf temp-apidoc && mv ._README.md README.md" - }, - "author": "Michał Papierski ", - "license": "Apache-2.0", - "ava": { - "extensions": [ - "ts" - ], - "require": [ - "ts-node/register", - "ts-node/register/transpile-only" - ], - "files": [ - "tests/**/*.spec.ts" - ] - } -} diff --git a/smart_contracts/contract_as/tests/assembly/bignum.spec.as.ts b/smart_contracts/contract_as/tests/assembly/bignum.spec.as.ts deleted file mode 100644 index d0cff226fb..0000000000 --- a/smart_contracts/contract_as/tests/assembly/bignum.spec.as.ts +++ /dev/null @@ -1,315 +0,0 @@ -import { hex2bin } from "../utils/helpers"; -import { U512 } from "../../assembly/bignum"; -import { Error } from "../../assembly/bytesrepr"; -import { Pair } from "../../assembly/pair"; -import { checkArraysEqual } from "../../assembly/utils"; -import { arrayToTyped, typedToArray } from "../../assembly/utils"; - -export function testBigNum512Arith(): bool { - let a = U512.fromU64(18446744073709551614); // 2^64-2 - assert(a.toString() == "fffffffffffffffe"); - - let b = U512.fromU64(1); - - assert(b.toString() == "1"); - - a += b; // a==2^64-1 - assert(a.toString() == "ffffffffffffffff"); - - a += b; // a==2^64 - assert(a.toString() == "10000000000000000"); - - a -= b; // a==2^64-1 - assert(a.toString() == "ffffffffffffffff"); - - a -= b; // a==2^64-2 - assert(a.toString() == "fffffffffffffffe"); - - return true; -} - -export function testBigNum512Mul(): bool { - let u64Max = U512.fromU64(18446744073709551615); // 2^64-1 - assert(u64Max.toString() == "ffffffffffffffff"); - - let a = U512.fromU64(18446744073709551615); - - assert(a.toString() == "ffffffffffffffff"); - - a *= u64Max; // (2^64-1) ^ 2 - assert(a.toString() == "fffffffffffffffe0000000000000001"); - - a *= u64Max; // (2^64-1) ^ 3 - assert(a.toString(), "fffffffffffffffd0000000000000002ffffffffffffffff"); - - a *= u64Max; // (2^64-1) ^ 4 - assert(a.toString() == "fffffffffffffffc0000000000000005fffffffffffffffc0000000000000001"); - - a *= u64Max; // (2^64-1) ^ 5 - assert(a.toString() == "fffffffffffffffb0000000000000009fffffffffffffff60000000000000004ffffffffffffffff"); - - a *= u64Max; // (2^64-1) ^ 6 - assert(a.toString() == "fffffffffffffffa000000000000000effffffffffffffec000000000000000efffffffffffffffa0000000000000001"); - a *= u64Max; // (2^64-1) ^ 7 - assert(a.toString() == "fffffffffffffff90000000000000014ffffffffffffffdd0000000000000022ffffffffffffffeb0000000000000006ffffffffffffffff"); - - a *= u64Max; // (2^64-1) ^ 8 - assert(a.toString() == "fffffffffffffff8000000000000001bffffffffffffffc80000000000000045ffffffffffffffc8000000000000001bfffffffffffffff80000000000000001"); - - return true; -} - -export function testBigNumZero(): bool { - let zero = new U512(); - assert(zero.toString() == "0"); - return zero.isZero(); -} - -export function testBigNonZero(): bool { - let nonzero = U512.fromU64(0xffffffff); - assert(nonzero.toString() == "ffffffff"); - return !nonzero.isZero(); -} - -export function testBigNumSetHex(): bool { - let large = U512.fromHex("fffffffffffffff8000000000000001bffffffffffffffc80000000000000045ffffffffffffffc8000000000000001bfffffffffffffff80000000000000001"); - assert(large.toString() == "fffffffffffffff8000000000000001bffffffffffffffc80000000000000045ffffffffffffffc8000000000000001bfffffffffffffff80000000000000001"); - return true; -} - -export function testNeg(): bool { - // big == -big - // in 2s compliment: big == ~(~big+1)+1 - let big = U512.fromHex("e7ed081ae96850db0c7d5b42094b5e09b0631e6b9f63efe4deb90d7dd677c82f8ce52eccda5b03f5190770a763729ae9ab85c76cd1dc9606ec9dcf2e2528fccb"); - assert(big == -(-big)); - return true; -} - -export function testComparison(): bool { - let zero = new U512(); - assert(zero.isZero()); - let one = U512.fromU64(1); - - let u32Max = U512.fromU64(4294967295); - - assert(zero != one); - assert(one == one); - assert(zero == zero); - - assert(zero < one); - assert(zero <= one); - assert(one <= one); - - assert(one > zero); - assert(one >= zero); - assert(one >= one); - - let large1 = U512.fromHex("a25bd58358ae4cd57ba0a4afcde6e9aa55c801d88854541dfc6ea5e3c1fada9ed9cb1e48b0a2d553faa26e5381743415ae1ec593dc67fc525d18e0b6fdf3f7ae"); - let large2 = U512.fromHex("f254bb1c7f6654f5ad104854709cb5c09009ccd2b78b5364fefd3a5fa99381a173c5498966e77d88d443bd1a650b4bcb8bb8a92013a85a7095330bc79a2e22dc"); - - assert(large1.cmp(large2) != 0); - assert(large1 != large2); - assert(large2 == large2); - assert(large1 == large1); - - assert(large1 < large2); - assert(large1 <= large2); - assert(large2 <= large2); - - assert(large2 > large1); - assert(large2 >= large1); - assert(large2 >= large2); - - assert(large1 > zero); - assert(large1 > one); - assert(large1 > u32Max); - assert(large2 > u32Max); - assert(large1 >= u32Max); - assert(u32Max >= one); - assert(one <= u32Max); - assert(one != u32Max); - return true; -} - -export function testBits(): bool { - let zero = new U512(); - assert(zero.bits() == 0); - let one = U512.fromU64(1); - assert(one.bits() == 1); - - let value = new U512(); - for (let i = 0; i < 63; i++) { - value.setU64(1 << i); - assert(value.bits() == i + 1); - } - - let shl512P1 = U512.fromHex("10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - assert(shl512P1.bits() == 509); - - let u512Max = U512.fromHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - assert(u512Max.bits() == 512); - - let mix = U512.fromHex("55555555555"); - assert(mix.bits() == 43); - return true; -} - -export function testDivision(): bool { - let u512Max = U512.fromHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - - let five = U512.fromU64(5); - - let rand = U512.fromHex("6fdf77a12c44899b8456d394e555ac9b62af0b0e70b79c8f8aa3837116c8c2a5"); - - assert(rand.bits() != 0); - let maybeRes1 = u512Max.divMod(rand); - assert(maybeRes1 !== null); - let res1 = >(maybeRes1); - - // result - - assert(res1.first.toString(), "249cebb32c9a2f0d1375ddc28138b727428ed6c66f4ca9f0abeb231cff6df7ec7"); - // remainder - assert(res1.second.toString(), "4d4edcc2e5e0a5416119b88b280018b1b79ffbd0891ae622ee7a6d895e687bbc"); - // recalculate back - assert((res1.first * rand) + res1.second == u512Max); - - // u512max is multiply of 5 - let divided = u512Max / five; - let multiplied = divided * five; - assert(multiplied == u512Max); - - let base10 = ""; - let zero = new U512(); - let ten = U512.fromU64(10); - - assert(five % ten == five); - assert(ten.divMod(zero) === null); - - while (u512Max > zero) { - let maybeRes = u512Max.divMod(ten); - assert(maybeRes !== null); - let res = >(maybeRes); - base10 = res.second.toString() + base10; - u512Max = res.first; - } - assert(base10 == "13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"); - return true; -} - -export function testSerializeU512Zero(): bool { - let truth = hex2bin("00"); - let result = U512.fromBytes(truth); - assert(result.error == Error.Ok); - assert(result.hasValue()); - let zero = result.value; - assert(zero.isZero()); - const bytes = zero.toBytes(); - return checkArraysEqual(bytes, typedToArray(truth)); -}; - -export function testSerializeU512_3BytesWide(): bool { - let truth = hex2bin("03807801"); - let result = U512.fromBytes(truth); - assert(result.error == Error.Ok); - assert(result.hasValue()); - let num = result.value; - assert(num.toString() == "17880"); // dec: 96384 - const bytes = num.toBytes(); - return checkArraysEqual(bytes, typedToArray(truth)); -}; - -export function testSerializeU512_2BytesWide(): bool { - let truth = hex2bin("020004"); - let result = U512.fromBytes(truth); - assert(result.error == Error.Ok); - assert(result.hasValue()); - let num = result.value; - assert(num.toString() == "400"); // dec: 1024 - const bytes = num.toBytes(); - return checkArraysEqual(bytes, typedToArray(truth)); -}; - -export function testSerializeU512_1BytesWide(): bool { - let truth = hex2bin("0101"); - let result = U512.fromBytes(truth); - assert(result.error == Error.Ok); - assert(result.hasValue()); - let num = result.value; - assert(num.toString() == "1"); - const bytes = num.toBytes(); - return checkArraysEqual(bytes, typedToArray(truth)); -}; - -export function testSerialize100mTimes10(): bool { - let truth = hex2bin("0400ca9a3b"); // bytesrepr truth - - let hex = "3b9aca00"; - - let valU512 = U512.fromHex(hex); - assert(valU512.toString() == hex); - - let bytes = valU512.toBytes(); - assert(bytes !== null) - assert(checkArraysEqual(bytes, typedToArray(truth))); - - let roundTrip = U512.fromBytes(arrayToTyped(bytes)); - assert(roundTrip.error == Error.Ok); - assert(roundTrip.value.toString() == hex); - - return true; -} - -export function testDeserLargeRandomU512(): bool { - // U512::from_dec_str("11047322357349959198658049652287831689404979606512518998046171549088754115972343255984024380249291159341787585633940860990180834807840096331186000119802997").expect("should create"); - let hex = "d2ee2fd630b02f2ffec88918ba0adaba87e76a294af2e5cc9a4710a23da14a6dde1c73780be2815e979547a949e085aa9279db4c3d1d0fde2361cd2e2d392c75"; - - // bytesrepr - let truth = hex2bin("40752c392d2ecd6123de0f1d3d4cdb7992aa85e049a94795975e81e20b78731cde6d4aa13da210479acce5f24a296ae787bada0aba1889c8fe2f2fb030d62feed2"); - - let deser = U512.fromBytes(truth); - assert(deser.error == Error.Ok); - assert(deser !== null); - assert(deser.value.toString() == hex); - - let ser = deser.value.toBytes(); - assert(checkArraysEqual(ser, typedToArray(truth))); - - return true; -} -export function testPrefixOps(): bool { - let a = U512.fromU64(18446744073709551615); // 2^64-2 - assert(a.toString() == "ffffffffffffffff"); - - let one = U512.fromU64(1); - assert(one.toString() == "1"); - - ++a; - assert(a.toString() == "10000000000000000"); - - ++a; - assert(a.toString() == "10000000000000001"); - - --a; - assert(a.toString() == "10000000000000000"); - - --a; - assert(a.toString() == "ffffffffffffffff"); - - let aCloned = a.clone(); - - let aPostInc = a++; - assert(aPostInc == aCloned); - assert(a == aCloned + one); - - let aPostDec = a--; - assert(aPostDec == aCloned); - assert(a == aCloned - one); - return true; -} - -export function testMinMaxValue(): bool { - assert(U512.MAX_VALUE.toString() == "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); - assert(U512.MIN_VALUE.toString() == "0"); - return true; -} diff --git a/smart_contracts/contract_as/tests/assembly/bytesrepr.spec.as.ts b/smart_contracts/contract_as/tests/assembly/bytesrepr.spec.as.ts deleted file mode 100644 index 0ce54fbfe5..0000000000 --- a/smart_contracts/contract_as/tests/assembly/bytesrepr.spec.as.ts +++ /dev/null @@ -1,408 +0,0 @@ -import { fromBytesU64, toBytesU64, - fromBytesStringList, toBytesStringList, - fromBytesU32, toBytesU32, - fromBytesU8, toBytesU8, - toBytesMap, fromBytesMap, - toBytesPair, - toBytesString, fromBytesString, - toBytesVecT, - Error } from "../../assembly/bytesrepr"; -import { CLValue, CLType, CLTypeTag } from "../../assembly/clvalue"; -import { Key, KeyVariant, AccountHash } from "../../assembly/key"; -import { URef, AccessRights } from "../../assembly/uref"; -import { Option } from "../../assembly/option"; -import { hex2bin } from "../utils/helpers"; -import { checkArraysEqual, checkTypedArrayEqual, checkItemsEqual } from "../../assembly/utils"; -import { typedToArray, arrayToTyped } from "../../assembly/utils"; -import { Pair } from "../../assembly/pair"; -import { EntryPointAccess, PublicAccess, GroupAccess, EntryPoint, EntryPoints, EntryPointType } from "../../assembly"; - -// adding the prefix xtest to one of these functions will cause the test to -// be ignored via the defineTestsFromModule function in spec.tsgit - -export function testDeserializeInvalidU8(): bool { - const bytes: u8[] = []; - let deser = fromBytesU8(arrayToTyped(bytes)); - assert(deser.error == Error.EarlyEndOfStream); - assert(deser.position == 0); - return !deser.hasValue(); -} - -export function testDeSerU8(): bool { - const truth: u8[] = [222]; - let ser = toBytesU8(222); - assert(checkArraysEqual(ser, truth)); - let deser = fromBytesU8(arrayToTyped(ser)); - assert(deser.error == Error.Ok); - return deser.value == 222; -} - -export function xtestDeSerU8_Zero(): bool { - // Used for deserializing Weight (for example) - // NOTE: Currently probably unable to check if `foo(): U8 | null` result is null - const truth: u8[] = [0]; - let ser = toBytesU8(0); - assert(checkArraysEqual(ser, truth)); - let deser = fromBytesU8(arrayToTyped(ser)); - assert(deser.error == Error.Ok); - return deser.value == 0; -} - -export function testDeSerU32(): bool { - const truth: u8[] = [239, 190, 173, 222]; - let ser = toBytesU32(3735928559); - assert(checkArraysEqual(ser, truth)); - let deser = fromBytesU32(arrayToTyped(ser)); - assert(deser.error == Error.Ok); - assert(deser.position == 4); - return deser.value == 0xdeadbeef; -} - -export function testDeSerZeroU32(): bool { - const truth: u8[] = [0, 0, 0, 0]; - let ser = toBytesU32(0); - assert(checkArraysEqual(ser, truth)); - let deser = fromBytesU32(arrayToTyped(ser)); - assert(deser.error == Error.Ok); - assert(deser.hasValue()); - return deser.value == 0; -} - -export function testDeserializeU64_1024(): bool { - const truth = hex2bin("0004000000000000"); - var deser = fromBytesU64(truth); - assert(deser.error == Error.Ok); - assert(deser.position == 8); - return deser.value == 1024; -} - -export function testDeserializeU64_zero(): bool { - const truth = hex2bin("0000000000000000"); - var deser = fromBytesU64(truth); - assert(deser.error == Error.Ok); - assert(deser.position == 8); - assert(deser.hasValue()); - return deser.value == 0; -} - -export function testDeserializeU64_u32max(): bool { - const truth = hex2bin("ffffffff00000000"); - const deser = fromBytesU64(truth); - assert(deser.error == Error.Ok); - assert(deser.position == 8); - return deser.value == 0xffffffff; -} - -export function testDeserializeU64_u32max_plus1(): bool { - const truth = hex2bin("0000000001000000"); - const deser = fromBytesU64(truth); - assert(deser.hasValue()); - assert(deser.error == Error.Ok); - assert(deser.position == 8); - return deser.value == 4294967296; -} - -export function testDeserializeU64_EOF(): bool { - const truth = hex2bin("0000"); - const deser = fromBytesU64(truth); - assert(deser.error == Error.EarlyEndOfStream); - assert(deser.position == 0); - return !deser.hasValue(); -} - -export function testDeserializeU64_u64max(): bool { - const truth = hex2bin("feffffffffffffff"); - const deser = fromBytesU64(truth); - assert(deser.error == Error.Ok); - assert(deser.position == 8); - return deser.value == 18446744073709551614; -} - -export function testDeSerListOfStrings(): bool { - const truth = hex2bin("03000000030000006162630a0000003132333435363738393006000000717765727479"); - const result = fromBytesStringList(truth); - assert(result.error == Error.Ok); - assert(result.hasValue()); - const strList = result.value; - assert(result.position == truth.length); - - assert(checkArraysEqual(strList, [ - "abc", - "1234567890", - "qwerty", - ])); - - let lhs = toBytesStringList(strList); - let rhs = typedToArray(truth); - return checkArraysEqual(lhs, rhs); -}; - -export function testDeSerEmptyListOfStrings(): bool { - const truth = hex2bin("00000000"); - const result = fromBytesStringList(truth); - assert(result.error == Error.Ok); - assert(result.position == 4); - return checkArraysEqual(result.value, []); -}; - -export function testDeSerEmptyMap(): bool { - const truth = hex2bin("00000000"); - const result = fromBytesMap( - truth, - fromBytesString, - Key.fromBytes); - assert(result.error == Error.Ok); - assert(result.hasValue()); - assert(result.position == 4); - return checkArraysEqual(result.value, >>[]); -}; - -export function testSerializeMap(): bool { - // let mut m = BTreeMap::new(); - // m.insert("Key1".to_string(), "Value1".to_string()); - // m.insert("Key2".to_string(), "Value2".to_string()); - // let truth = m.to_bytes().unwrap(); - const truth = hex2bin( - "02000000040000004b6579310600000056616c756531040000004b6579320600000056616c756532" - ); - const pairs = new Array>(); - pairs.push(new Pair("Key1", "Value1")); - pairs.push(new Pair("Key2", "Value2")); - const serialized = toBytesMap(pairs, toBytesString, toBytesString); - assert(checkArraysEqual(serialized, typedToArray(truth))); - - const deser = fromBytesMap( - arrayToTyped(serialized), - fromBytesString, - fromBytesString); - - assert(deser.error == Error.Ok); - assert(deser.position == truth.length); - let listOfPairs = deser.value; - - let res1 = false; - let res2 = false; - for (let i = 0; i < listOfPairs.length; i++) { - if (listOfPairs[i].first == "Key1" && listOfPairs[i].second == "Value1") { - res1 = true; - } - if (listOfPairs[i].first == "Key2" && listOfPairs[i].second == "Value2") { - res2 = true; - } - } - assert(res1); - assert(res2); - return listOfPairs.length == 2; -} - -export function testToBytesVecT(): bool { - // let args = ("get_payment_purse",).parse().unwrap().to_bytes().unwrap(); - const truth = hex2bin("0100000015000000110000006765745f7061796d656e745f70757273650a"); - let serialize = function(item: CLValue): Array { return item.toBytes(); }; - let serialized = toBytesVecT([ - CLValue.fromString("get_payment_purse"), - ], serialize); - return checkArraysEqual(serialized, typedToArray(truth)); -} - -export function testKeyOfURefVariantSerializes(): bool { - // URef with access rights - const truth = hex2bin("022a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a07"); - const urefBytes = hex2bin("2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"); - let uref = new URef(urefBytes, AccessRights.READ_ADD_WRITE); - let key = Key.fromURef(uref); - let serialized = key.toBytes(); - - return checkArraysEqual(serialized, typedToArray(truth)); -}; - -export function testDeSerString(): bool { - // Rust: let bytes = "hello_world".to_bytes().unwrap(); - const truth = hex2bin("0b00000068656c6c6f5f776f726c64"); - - const ser = toBytesString("hello_world"); - assert(checkArraysEqual(ser, typedToArray(truth))); - - const deser = fromBytesString(arrayToTyped(ser)); - assert(deser.error == Error.Ok); - return deser.value == "hello_world"; -} - -export function testDeSerIncompleteString(): bool { - // Rust: let bytes = "hello_world".to_bytes().unwrap(); - const truth = hex2bin("0b00000068656c6c6f5f776f726c"); - // last byte removed from the truth to signalize incomplete data - const deser = fromBytesString(truth); - assert(deser.error == Error.EarlyEndOfStream); - return !deser.hasValue(); -} - -export function testDecodeURefFromBytesWithoutAccessRights(): bool { - const truth = hex2bin("2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a00"); - let urefResult = URef.fromBytes(truth); - assert(urefResult.error == Error.Ok); - assert(urefResult.hasValue()); - let uref = urefResult.value; - - let urefBytes = new Array(32); - urefBytes.fill(42); - - - assert(checkArraysEqual(typedToArray(uref.getBytes()), urefBytes)); - assert(uref.getAccessRights() === AccessRights.NONE); - let serialized = uref.toBytes(); - return checkArraysEqual(serialized, typedToArray(truth)); -} - -export function testDecodeURefFromBytesWithAccessRights(): bool { - const truth = hex2bin("2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a07"); - const urefResult = URef.fromBytes(truth); - assert(urefResult.error == Error.Ok); - assert(urefResult.position == truth.length); - const uref = urefResult.value; - assert(checkArraysEqual(typedToArray(uref.getBytes()), [ - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, - ])); - return uref.getAccessRights() == 0x07; // NOTE: 0x07 is READ_ADD_WRITE -} - -export function testDecodedOptionalIsNone(): bool { - let optionalSome = new Uint8Array(10); - optionalSome[0] = 0; - let res = Option.fromBytes(optionalSome); - assert(res.isNone(), "option should be NONE"); - return !res.isSome(); -}; - -export function testDecodedOptionalIsSome(): bool { - let optionalSome = new Uint8Array(10); - for (let i = 0; i < 10; i++) { - optionalSome[i] = i + 1; - } - let res = Option.fromBytes(optionalSome); - assert(res !== null); - let unwrapped = res.unwrap(); - assert(unwrapped !== null, "unwrapped should not be null"); - let values = unwrapped; - let rhs: Array = [2, 3, 4, 5, 6, 7, 8, 9, 10]; - return checkArraysEqual(typedToArray(values), rhs); -}; - -export function testDeserMapOfNamedKeys(): bool { - - let extraBytes = "fffefd"; - let truthBytes = "0300000001000000410001010101010101010101010101010101010101010101010101010101010101010200000042420202020202020202020202020202020202020202020202020202020202020202020703000000434343010303030303030303030303030303030303030303030303030303030303030303"; - - let truth = hex2bin(truthBytes + extraBytes); - - const mapResult = fromBytesMap( - truth, - fromBytesString, - Key.fromBytes); - assert(mapResult.error == Error.Ok); - let deserializedBytes = mapResult.position; - assert(deserializedBytes == truth.length - hex2bin(extraBytes).length); - - let deser = mapResult.value; - assert(deser.length === 3); - - assert(deser[0].first == "A"); - assert(deser[0].second.variant == KeyVariant.ACCOUNT_ID); - - let accountBytes = new Array(32); - accountBytes.fill(1); - - assert(checkTypedArrayEqual((deser[0].second.account).bytes, arrayToTyped(accountBytes))); - assert(checkTypedArrayEqual((deser[0].second.account).bytes, arrayToTyped(accountBytes))); - - // - - assert(deser[1].first == "BB"); - assert(deser[1].second.variant == KeyVariant.UREF_ID); - - let urefBytes = new Array(32); - urefBytes.fill(2); - - assert(deser[1].second.uref !== null); - let deser1Uref = deser[1].second.uref; - assert(checkTypedArrayEqual(deser1Uref.bytes, arrayToTyped(urefBytes))); - assert(deser1Uref.accessRights == AccessRights.READ_ADD_WRITE); - - // - - assert(deser[2].first == "CCC"); - assert(deser[2].second.variant == KeyVariant.HASH_ID); - - let hashBytes = new Array(32); - hashBytes.fill(3); - - assert(checkTypedArrayEqual(deser[2].second.hash, arrayToTyped(hashBytes))); - - // Compares to truth - - let truthObj = new Array>(); - let keyA = Key.fromAccount(new AccountHash(arrayToTyped(accountBytes))); - truthObj.push(new Pair("A", keyA)); - - let urefB = new URef(arrayToTyped(urefBytes), AccessRights.READ_ADD_WRITE); - let keyB = Key.fromURef(urefB); - truthObj.push(new Pair("BB", keyB)); - - let keyC = Key.fromHash(arrayToTyped(hashBytes)); - truthObj.push(new Pair("CCC", keyC)); - - assert(truthObj.length === deser.length); - assert(truthObj[0] == deser[0]); - assert(truthObj[1] == deser[1]); - assert(truthObj[2] == deser[2]); - - assert(checkArraysEqual(truthObj, deser)); - assert(checkItemsEqual(truthObj, deser)); - - return true; -} - -function useEntryPointAccess(entryPointAccess: EntryPointAccess): Array { - return entryPointAccess.toBytes(); -} - -export function testPublicEntryPointAccess(): bool { - let publicTruth = hex2bin("01"); - let publicAccess = new PublicAccess(); - let bytes = useEntryPointAccess(publicAccess); - assert(bytes.length == 1); - assert(checkArraysEqual(typedToArray(publicTruth), bytes)); - return true; -} - -export function testGroupEntryPointAccess(): bool { - let publicTruth = hex2bin("02030000000700000047726f757020310700000047726f757020320700000047726f75702033"); - let publicAccess = new GroupAccess(["Group 1", "Group 2", "Group 3"]); - let bytes = useEntryPointAccess(publicAccess); - assert(checkArraysEqual(typedToArray(publicTruth), bytes)); - return true; -} - -export function testComplexCLType(): bool { - let type = CLType.byteArray(32); - let bytes = type.toBytes(); - let truth = hex2bin("0f20000000"); - assert(checkArraysEqual(typedToArray(truth), bytes)); - - return true; -} - -export function testToBytesEntryPoint(): bool { - let entryPoints = new EntryPoints(); - let args = new Array>(); - args.push(new Pair("param1", new CLType(CLTypeTag.U512))); - let entryPoint = new EntryPoint("delegate", args, new CLType(CLTypeTag.Unit), new PublicAccess(), EntryPointType.Contract); - entryPoints.addEntryPoint(entryPoint); - let bytes = entryPoints.toBytes(); - let truth = hex2bin("010000000800000064656c65676174650800000064656c65676174650100000006000000706172616d3108090101"); - assert(checkArraysEqual(typedToArray(truth), bytes)); - return true; -} \ No newline at end of file diff --git a/smart_contracts/contract_as/tests/assembly/runtime_args.spec.as.ts b/smart_contracts/contract_as/tests/assembly/runtime_args.spec.as.ts deleted file mode 100644 index 1dcffdb350..0000000000 --- a/smart_contracts/contract_as/tests/assembly/runtime_args.spec.as.ts +++ /dev/null @@ -1,46 +0,0 @@ -import { hex2bin } from "../utils/helpers"; -import { checkArraysEqual, checkItemsEqual } from "../../assembly/utils"; -import { typedToArray } from "../../assembly/utils"; -import { RuntimeArgs } from "../../assembly/runtime_args"; -import { Pair } from "../../assembly/pair"; - -import { CLValue } from "../../assembly/clvalue"; -import { U512 } from "../../assembly/bignum"; - - -export function testRuntimeArgs(): bool { - // Source: - // - // ``` - // let args = runtime_args! { - // "arg1" => 42u64, - // "arg2" => "Hello, world!", - // "arg3" => U512::from(123456789), - // }; - // ``` - const truth = hex2bin("030000000400000061726731080000002a00000000000000050400000061726732110000000d00000048656c6c6f2c20776f726c64210a0400000061726733050000000415cd5b0708"); - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair("arg1", CLValue.fromU64(42)), - new Pair("arg2", CLValue.fromString("Hello, world!")), - new Pair("arg3", CLValue.fromU512(U512.fromU64(123456789))), - ]); - let bytes = runtimeArgs.toBytes(); - return checkArraysEqual(typedToArray(truth), bytes); -} - -export function testRuntimeArgs_Empty(): bool { - // Source: - // - // ``` - // let args = runtime_args! { - // "arg1" => 42u64, - // "arg2" => "Hello, world!", - // "arg3" => U512::from(123456789), - // }; - // ``` - const truth = hex2bin("00000000"); - - let runtimeArgs = new RuntimeArgs(); - let bytes = runtimeArgs.toBytes(); - return checkArraysEqual(typedToArray(truth), bytes); -} diff --git a/smart_contracts/contract_as/tests/assembly/utils.spec.as.ts b/smart_contracts/contract_as/tests/assembly/utils.spec.as.ts deleted file mode 100644 index 45a066fa75..0000000000 --- a/smart_contracts/contract_as/tests/assembly/utils.spec.as.ts +++ /dev/null @@ -1,68 +0,0 @@ -import { hex2bin } from "../utils/helpers"; -import { checkArraysEqual, checkItemsEqual } from "../../assembly/utils"; -import { typedToArray } from "../../assembly/utils"; -import { Pair } from "../../assembly/pair"; - -export function testHex2Bin(): bool { - let truth = hex2bin("deadbeef"); - let lhs = typedToArray(truth); - let rhs: Array = [222, 173, 190, 239]; - return checkArraysEqual(lhs, rhs); -} - -export function testcheckArraysEqual(): bool { - assert(checkArraysEqual([], [])); - assert(!checkArraysEqual([1, 2, 3], [1])); - return checkArraysEqual([1, 2, 3], [1, 2, 3]); -} - -export function testItemsEqual(): bool { - let lhs: u32[] = [6,3,5,2,4,1]; - let rhs: u32[] = [1,2,3,4,5,6]; - return checkItemsEqual(lhs, rhs); -} - -export function testItemsNotEqual(): bool { - let lhs1: u32[] = [1,2,3,4,5]; - let rhs1: u32[] = [1,2,3,4,5,6]; - - let lhs2: u32[] = [1,2,3,4,5,6]; - let rhs2: u32[] = [1,2,3,4,5]; - assert(!checkItemsEqual(lhs1, rhs1)); - assert(!checkItemsEqual(rhs2, lhs2)); - return true; -} - -export function testItemsNotEqual2(): bool { - let lhs: u32[] = [1,2,3]; - let rhs: u32[] = [1,3,1]; - assert(!checkItemsEqual(lhs, rhs)); - assert(!checkItemsEqual(rhs, lhs)); - return true; -} - -function comp(lhs: Pair, rhs: Pair): bool { - return lhs.equalsTo(rhs); -} - -export function testPairItemsEqual(): bool { - let lhs: Pair[] = [ - new Pair("Key1", "Value1"), - new Pair("Key2", "Value2"), - new Pair("Key3", "Value3"), - ]; - let rhs: Pair[] = [ - new Pair("Key2", "Value2"), - new Pair("Key3", "Value3"), - new Pair("Key1", "Value1"), - ]; - return checkItemsEqual(lhs, rhs); -} - -export function testEmptyItemsEqual(): bool { - let lhs: Pair[] = [ - ]; - let rhs: Pair[] = [ - ]; - return checkItemsEqual(lhs, rhs); -} diff --git a/smart_contracts/contract_as/tests/bignum.spec.ts b/smart_contracts/contract_as/tests/bignum.spec.ts deleted file mode 100644 index d3a8e45b76..0000000000 --- a/smart_contracts/contract_as/tests/bignum.spec.ts +++ /dev/null @@ -1,3 +0,0 @@ -import {defineTestsFromModule} from "./utils/spec"; - -defineTestsFromModule("bignum"); diff --git a/smart_contracts/contract_as/tests/bytesrepr.spec.ts b/smart_contracts/contract_as/tests/bytesrepr.spec.ts deleted file mode 100644 index cb77da99e3..0000000000 --- a/smart_contracts/contract_as/tests/bytesrepr.spec.ts +++ /dev/null @@ -1,3 +0,0 @@ -import {defineTestsFromModule} from "./utils/spec"; - -defineTestsFromModule("bytesrepr"); diff --git a/smart_contracts/contract_as/tests/runtime_args.spec.ts b/smart_contracts/contract_as/tests/runtime_args.spec.ts deleted file mode 100644 index 86161ecb60..0000000000 --- a/smart_contracts/contract_as/tests/runtime_args.spec.ts +++ /dev/null @@ -1,3 +0,0 @@ -import {defineTestsFromModule} from "./utils/spec"; - -defineTestsFromModule("runtime_args"); diff --git a/smart_contracts/contract_as/tests/tsconfig.json b/smart_contracts/contract_as/tests/tsconfig.json deleted file mode 100644 index 72dd2f5d5b..0000000000 --- a/smart_contracts/contract_as/tests/tsconfig.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "extends": "../node_modules/assemblyscript/std/portable.json", - "include": [ - "./**/*.ts", "*.ts", - ] - } - diff --git a/smart_contracts/contract_as/tests/utils.spec.ts b/smart_contracts/contract_as/tests/utils.spec.ts deleted file mode 100644 index ca54c512aa..0000000000 --- a/smart_contracts/contract_as/tests/utils.spec.ts +++ /dev/null @@ -1,3 +0,0 @@ -import {defineTestsFromModule} from "./utils/spec"; - -defineTestsFromModule("utils"); diff --git a/smart_contracts/contract_as/tests/utils/helpers.ts b/smart_contracts/contract_as/tests/utils/helpers.ts deleted file mode 100644 index a3a0bf1ca5..0000000000 --- a/smart_contracts/contract_as/tests/utils/helpers.ts +++ /dev/null @@ -1,16 +0,0 @@ -const HEX_TABLE: String[] = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']; - -export function hex2bin(hex: String): Uint8Array { - let bin = new Uint8Array(hex.length / 2); - - for (let i = 0; i < hex.length / 2; i++) { - // NOTE: hex.substr + parseInt gives weird results under AssemblyScript - const hi = HEX_TABLE.indexOf(hex[i * 2]); - assert(hi > -1); - const lo = HEX_TABLE.indexOf(hex[(i * 2) + 1]); - assert(lo > -1); - const number = (hi << 4) | lo; - bin[i] = number; - } - return bin; -} diff --git a/smart_contracts/contract_as/tests/utils/spec.ts b/smart_contracts/contract_as/tests/utils/spec.ts deleted file mode 100644 index 2486a1ef3d..0000000000 --- a/smart_contracts/contract_as/tests/utils/spec.ts +++ /dev/null @@ -1,41 +0,0 @@ -import test from 'ava'; - -const fs = require("fs") -const loader = require("@assemblyscript/loader") - -function loadWasmModule(fileName: String) { - const myImports = { - env: { - abort(msgPtr, filePtr, line, column) { - var msg = msgPtr > 0 ? myModule.__getString(msgPtr) : ""; - var file = myModule.__getString(filePtr); - console.error(`abort called at ${file}:${line}:${column}: ${msg}`); - }, - }, - } - - let myModule = loader.instantiateSync( - fs.readFileSync(__dirname + `/../../build/${fileName}.spec.as.wasm`), - myImports); - - return myModule; -} - -export function defineTestsFromModule(moduleName: string) { - const instance = loadWasmModule(moduleName); - for (const testName in instance) { - const testInstance = instance[testName]; - - const testId = testName.toLowerCase(); - if (testId.startsWith("test")) { - test(testName, t => { - t.truthy(testInstance()); - }); - } - else if (testId.startsWith("xtest")) { - test.skip(testName, t => { - t.truthy(testInstance()); - }); - } - } -} diff --git a/smart_contracts/contracts/SRE/create-test-node-01/Cargo.toml b/smart_contracts/contracts/SRE/create-test-node-01/Cargo.toml index 62fe19e68c..63239304e0 100644 --- a/smart_contracts/contracts/SRE/create-test-node-01/Cargo.toml +++ b/smart_contracts/contracts/SRE/create-test-node-01/Cargo.toml @@ -2,7 +2,7 @@ name = "create-test-node-01" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "create_test_node_01" diff --git a/smart_contracts/contracts/SRE/create-test-node-02/Cargo.toml b/smart_contracts/contracts/SRE/create-test-node-02/Cargo.toml index 81137a6de9..f05ce96db3 100644 --- a/smart_contracts/contracts/SRE/create-test-node-02/Cargo.toml +++ b/smart_contracts/contracts/SRE/create-test-node-02/Cargo.toml @@ -2,7 +2,7 @@ name = "create-test-node-02" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "create_test_node_02" diff --git a/smart_contracts/contracts/SRE/create-test-node-03/Cargo.toml b/smart_contracts/contracts/SRE/create-test-node-03/Cargo.toml index 324d589248..5ab2c6f65d 100644 --- a/smart_contracts/contracts/SRE/create-test-node-03/Cargo.toml +++ b/smart_contracts/contracts/SRE/create-test-node-03/Cargo.toml @@ -2,7 +2,7 @@ name = "create-test-node-03" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "create_test_node_03" diff --git a/smart_contracts/contracts/SRE/create-test-node-shared/Cargo.toml b/smart_contracts/contracts/SRE/create-test-node-shared/Cargo.toml index bf881c0d2f..3fb74f4393 100644 --- a/smart_contracts/contracts/SRE/create-test-node-shared/Cargo.toml +++ b/smart_contracts/contracts/SRE/create-test-node-shared/Cargo.toml @@ -2,10 +2,7 @@ name = "create-test-node-shared" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" - -[features] -std = ["base16/std", "casper-contract/std"] +edition = "2021" [dependencies] base16 = { version = "0.2.1", default-features = false } diff --git a/smart_contracts/contracts/SRE/create-test-node-shared/src/lib.rs b/smart_contracts/contracts/SRE/create-test-node-shared/src/lib.rs index 21156692d9..8232c28922 100644 --- a/smart_contracts/contracts/SRE/create-test-node-shared/src/lib.rs +++ b/smart_contracts/contracts/SRE/create-test-node-shared/src/lib.rs @@ -13,9 +13,9 @@ enum Error { FailedToParseAccountHash = 12, } -impl Into for Error { - fn into(self) -> ApiError { - ApiError::User(self as u16) +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error as u16) } } diff --git a/smart_contracts/contracts/admin/disable-contract/Cargo.toml b/smart_contracts/contracts/admin/disable-contract/Cargo.toml new file mode 100644 index 0000000000..c8d1ece1a7 --- /dev/null +++ b/smart_contracts/contracts/admin/disable-contract/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "disable-contract" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "disable_contract" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/admin/disable-contract/src/main.rs b/smart_contracts/contracts/admin/disable-contract/src/main.rs new file mode 100644 index 0000000000..2dc66e5729 --- /dev/null +++ b/smart_contracts/contracts/admin/disable-contract/src/main.rs @@ -0,0 +1,21 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{AddressableEntityHash, PackageHash}; + +const ARG_CONTRACT_PACKAGE_HASH: &str = "contract_package_hash"; +const ARG_CONTRACT_HASH: &str = "contract_hash"; + +#[no_mangle] +pub extern "C" fn call() { + // This contract can be run only by an administrator account. + let contract_package_hash: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + storage::disable_contract_version(contract_package_hash.into(), contract_hash.into()) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/admin/enable-contract/Cargo.toml b/smart_contracts/contracts/admin/enable-contract/Cargo.toml new file mode 100644 index 0000000000..18264ec9dc --- /dev/null +++ b/smart_contracts/contracts/admin/enable-contract/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "enable-contract" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "enable_contract" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/admin/enable-contract/src/main.rs b/smart_contracts/contracts/admin/enable-contract/src/main.rs new file mode 100644 index 0000000000..11450d6cf5 --- /dev/null +++ b/smart_contracts/contracts/admin/enable-contract/src/main.rs @@ -0,0 +1,21 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{AddressableEntityHash, PackageHash}; + +const ARG_CONTRACT_PACKAGE_HASH: &str = "contract_package_hash"; +const ARG_CONTRACT_HASH: &str = "contract_hash"; + +#[no_mangle] +pub extern "C" fn call() { + // This contract can be run only by an administrator account. + let contract_package_hash: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + storage::enable_contract_version(contract_package_hash.into(), contract_hash.into()) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/bench/create-accounts/Cargo.toml b/smart_contracts/contracts/bench/create-accounts/Cargo.toml index 2a06e5fcf0..54f0d1ca1b 100644 --- a/smart_contracts/contracts/bench/create-accounts/Cargo.toml +++ b/smart_contracts/contracts/bench/create-accounts/Cargo.toml @@ -2,7 +2,7 @@ name = "create-accounts" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "create_accounts" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/bench/create-accounts/src/main.rs b/smart_contracts/contracts/bench/create-accounts/src/main.rs index dd72af8a07..b790ae0af0 100644 --- a/smart_contracts/contracts/bench/create-accounts/src/main.rs +++ b/smart_contracts/contracts/bench/create-accounts/src/main.rs @@ -12,12 +12,13 @@ use casper_contract::{ use casper_types::{account::AccountHash, ApiError, U512}; const ARG_ACCOUNTS: &str = "accounts"; -const ARG_SEED_AMOUNT: &str = "seed_amount"; +const ARG_AMOUNT: &str = "amount"; #[no_mangle] pub extern "C" fn call() { let accounts: Vec = runtime::get_named_arg(ARG_ACCOUNTS); - let seed_amount: U512 = runtime::get_named_arg(ARG_SEED_AMOUNT); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + let seed_amount = amount / accounts.len(); for account_hash in accounts { system::transfer_to_account(account_hash, seed_amount, None) .unwrap_or_revert_with(ApiError::Transfer); diff --git a/smart_contracts/contracts/bench/create-purses/Cargo.toml b/smart_contracts/contracts/bench/create-purses/Cargo.toml index ac97e64054..0253c5cdd4 100644 --- a/smart_contracts/contracts/bench/create-purses/Cargo.toml +++ b/smart_contracts/contracts/bench/create-purses/Cargo.toml @@ -2,7 +2,7 @@ name = "create-purses" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "create_purses" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/bench/create-purses/src/main.rs b/smart_contracts/contracts/bench/create-purses/src/main.rs index c716293640..3b7aff76ec 100644 --- a/smart_contracts/contracts/bench/create-purses/src/main.rs +++ b/smart_contracts/contracts/bench/create-purses/src/main.rs @@ -11,12 +11,13 @@ use casper_contract::{ use casper_types::U512; const ARG_TOTAL_PURSES: &str = "total_purses"; -const ARG_SEED_AMOUNT: &str = "seed_amount"; +const ARG_AMOUNT: &str = "amount"; #[no_mangle] pub extern "C" fn call() { let total_purses: u64 = runtime::get_named_arg(ARG_TOTAL_PURSES); - let seed_amount: U512 = runtime::get_named_arg(ARG_SEED_AMOUNT); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + let seed_amount = amount / total_purses; for i in 0..total_purses { let new_purse = system::create_purse(); diff --git a/smart_contracts/contracts/bench/transfer-to-existing-account/Cargo.toml b/smart_contracts/contracts/bench/transfer-to-existing-account/Cargo.toml index 1f9ba2c07b..ca08478414 100644 --- a/smart_contracts/contracts/bench/transfer-to-existing-account/Cargo.toml +++ b/smart_contracts/contracts/bench/transfer-to-existing-account/Cargo.toml @@ -2,7 +2,7 @@ name = "transfer-to-existing-account" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "transfer_to_existing_account" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/bench/transfer-to-purse/Cargo.toml b/smart_contracts/contracts/bench/transfer-to-purse/Cargo.toml index 0508c1d022..929a770f33 100644 --- a/smart_contracts/contracts/bench/transfer-to-purse/Cargo.toml +++ b/smart_contracts/contracts/bench/transfer-to-purse/Cargo.toml @@ -2,7 +2,7 @@ name = "transfer-to-purse" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "transfer_to_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/activate-bid/Cargo.toml b/smart_contracts/contracts/client/activate-bid/Cargo.toml index f580a83e07..9f92c35986 100644 --- a/smart_contracts/contracts/client/activate-bid/Cargo.toml +++ b/smart_contracts/contracts/client/activate-bid/Cargo.toml @@ -2,7 +2,7 @@ name = "activate-bid" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "activate_bid" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/activate-bid/src/main.rs b/smart_contracts/contracts/client/activate-bid/src/main.rs index 54adc08ee4..2e040b2aa6 100644 --- a/smart_contracts/contracts/client/activate-bid/src/main.rs +++ b/smart_contracts/contracts/client/activate-bid/src/main.rs @@ -1,17 +1,15 @@ #![no_std] #![no_main] -extern crate alloc; - use casper_contract::contract_api::{runtime, system}; -use casper_types::{runtime_args, system::auction, PublicKey, RuntimeArgs}; +use casper_types::{runtime_args, system::auction, PublicKey}; -const ARG_VALIDATOR_PUBLIC_KEY: &str = "validator_public_key"; +const ARG_VALIDATOR: &str = "validator"; fn activate_bid(public_key: PublicKey) { let contract_hash = system::get_auction(); let args = runtime_args! { - auction::ARG_VALIDATOR_PUBLIC_KEY => public_key, + auction::ARG_VALIDATOR => public_key, }; runtime::call_contract::<()>(contract_hash, auction::METHOD_ACTIVATE_BID, args); } @@ -19,6 +17,6 @@ fn activate_bid(public_key: PublicKey) { // Accepts a public key. Issues an activate-bid bid to the auction contract. #[no_mangle] pub extern "C" fn call() { - let public_key: PublicKey = runtime::get_named_arg(ARG_VALIDATOR_PUBLIC_KEY); + let public_key: PublicKey = runtime::get_named_arg(ARG_VALIDATOR); activate_bid(public_key); } diff --git a/smart_contracts/contracts/client/add-bid/Cargo.toml b/smart_contracts/contracts/client/add-bid/Cargo.toml index fb3cdb0b86..cbd8f1e645 100644 --- a/smart_contracts/contracts/client/add-bid/Cargo.toml +++ b/smart_contracts/contracts/client/add-bid/Cargo.toml @@ -2,7 +2,7 @@ name = "add-bid" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "add_bid" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/add-bid/src/main.rs b/smart_contracts/contracts/client/add-bid/src/main.rs index f9b5d1da83..bd0faaa490 100644 --- a/smart_contracts/contracts/client/add-bid/src/main.rs +++ b/smart_contracts/contracts/client/add-bid/src/main.rs @@ -3,36 +3,119 @@ extern crate alloc; -use casper_contract::contract_api::{runtime, system}; +use alloc::vec::Vec; + +use casper_contract::{ + contract_api, + contract_api::{runtime, system}, + ext_ffi, + unwrap_or_revert::UnwrapOrRevert, +}; use casper_types::{ - runtime_args, - system::auction::{self, DelegationRate}, - PublicKey, RuntimeArgs, U512, + api_error, bytesrepr, bytesrepr::FromBytes, runtime_args, system::auction, ApiError, PublicKey, + U512, }; -const ARG_AMOUNT: &str = "amount"; -const ARG_DELEGATION_RATE: &str = "delegation_rate"; -const ARG_PUBLIC_KEY: &str = "public_key"; +fn get_named_arg_size(name: &str) -> Option { + let mut arg_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_named_arg_size( + name.as_bytes().as_ptr(), + name.len(), + &mut arg_size as *mut usize, + ) + }; + match api_error::result_from(ret) { + Ok(_) => Some(arg_size), + Err(ApiError::MissingArgument) => None, + Err(e) => runtime::revert(e), + } +} -fn add_bid(public_key: PublicKey, bond_amount: U512, delegation_rate: DelegationRate) { +// The optional here is literal and does not co-relate to an Option enum type. +// If the argument has been provided it is accepted, and is then turned into a Some. +// If the argument is not provided at all, then it is considered as None. +pub fn get_optional_named_args(name: &str) -> Option { + let arg_size = get_named_arg_size(name)?; + let arg_bytes = if arg_size > 0 { + let res = { + let data_non_null_ptr = contract_api::alloc_bytes(arg_size); + let ret = unsafe { + ext_ffi::casper_get_named_arg( + name.as_bytes().as_ptr(), + name.len(), + data_non_null_ptr.as_ptr(), + arg_size, + ) + }; + let data = + unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) }; + api_error::result_from(ret).map(|_| data) + }; + // Assumed to be safe as `get_named_arg_size` checks the argument already + res.unwrap_or_revert() + } else { + // Avoids allocation with 0 bytes and a call to get_named_arg + Vec::new() + }; + + bytesrepr::deserialize(arg_bytes).ok() +} + +fn add_bid( + public_key: PublicKey, + bond_amount: U512, + delegation_rate: auction::DelegationRate, + minimum_delegation_amount: Option, + maximum_delegation_amount: Option, + reserved_slots: Option, +) { let contract_hash = system::get_auction(); - let args = runtime_args! { + let mut args = runtime_args! { auction::ARG_PUBLIC_KEY => public_key, auction::ARG_AMOUNT => bond_amount, auction::ARG_DELEGATION_RATE => delegation_rate, }; + // Optional arguments + if let Some(minimum_delegation_amount) = minimum_delegation_amount { + let _ = args.insert( + auction::ARG_MINIMUM_DELEGATION_AMOUNT, + minimum_delegation_amount, + ); + } + if let Some(maximum_delegation_amount) = maximum_delegation_amount { + let _ = args.insert( + auction::ARG_MAXIMUM_DELEGATION_AMOUNT, + maximum_delegation_amount, + ); + } + if let Some(reserved_slots) = reserved_slots { + let _ = args.insert(auction::ARG_RESERVED_SLOTS, reserved_slots); + } runtime::call_contract::(contract_hash, auction::METHOD_ADD_BID, args); } // Bidding contract. // -// Accepts a public key, amount and a delgation rate. +// Accepts a public key, amount and a delegation rate. // Issues an add bid request to the auction contract. #[no_mangle] pub extern "C" fn call() { - let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY); - let bond_amount = runtime::get_named_arg(ARG_AMOUNT); - let delegation_rate = runtime::get_named_arg(ARG_DELEGATION_RATE); + let public_key = runtime::get_named_arg(auction::ARG_PUBLIC_KEY); + let bond_amount = runtime::get_named_arg(auction::ARG_AMOUNT); + let delegation_rate = runtime::get_named_arg(auction::ARG_DELEGATION_RATE); + + // Optional arguments + let minimum_delegation_amount = get_optional_named_args(auction::ARG_MINIMUM_DELEGATION_AMOUNT); + let maximum_delegation_amount = get_optional_named_args(auction::ARG_MAXIMUM_DELEGATION_AMOUNT); + let reserved_slots = get_optional_named_args(auction::ARG_RESERVED_SLOTS); - add_bid(public_key, bond_amount, delegation_rate); + add_bid( + public_key, + bond_amount, + delegation_rate, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ); } diff --git a/smart_contracts/contracts/client/add-reservations/Cargo.toml b/smart_contracts/contracts/client/add-reservations/Cargo.toml new file mode 100644 index 0000000000..1381a63864 --- /dev/null +++ b/smart_contracts/contracts/client/add-reservations/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "add-reservations" +version = "0.1.0" +authors = ["Jacek Chmielewski "] +edition = "2021" + +[[bin]] +name = "add_reservations" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/add-reservations/src/main.rs b/smart_contracts/contracts/client/add-reservations/src/main.rs new file mode 100644 index 0000000000..96177b127f --- /dev/null +++ b/smart_contracts/contracts/client/add-reservations/src/main.rs @@ -0,0 +1,31 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; + +use casper_contract::contract_api::{runtime, system}; +use casper_types::{ + runtime_args, + system::auction::{self, Reservation}, +}; + +fn add_reservations(reservations: Vec) { + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_RESERVATIONS => reservations, + }; + runtime::call_contract::<()>(contract_hash, auction::METHOD_ADD_RESERVATIONS, args); +} + +// Add delegators to validator's reserved list. +// +// Accepts reservations. +// Issues an add_reservations request to the auction contract. +#[no_mangle] +pub extern "C" fn call() { + let reservations: Vec = runtime::get_named_arg(auction::ARG_RESERVATIONS); + + add_reservations(reservations); +} diff --git a/smart_contracts/contracts/client/burn/Cargo.toml b/smart_contracts/contracts/client/burn/Cargo.toml new file mode 100644 index 0000000000..f9949db688 --- /dev/null +++ b/smart_contracts/contracts/client/burn/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "burn" +version = "0.1.0" +authors = ["Igor Bunar ", "Jan Hoffmann "] +edition = "2021" + +[[bin]] +name = "burn" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/burn/src/main.rs b/smart_contracts/contracts/client/burn/src/main.rs new file mode 100644 index 0000000000..545fedafd3 --- /dev/null +++ b/smart_contracts/contracts/client/burn/src/main.rs @@ -0,0 +1,89 @@ +#![no_std] +#![no_main] + +extern crate alloc; +use alloc::{string::String, vec::Vec}; + +use casper_contract::{ + contract_api::{account, alloc_bytes, runtime, system}, + ext_ffi, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{api_error, bytesrepr, runtime_args, system::mint, ApiError, Key, URef, U512}; + +const ARG_PURSE_NAME: &str = "purse_name"; + +fn burn(uref: URef, amount: U512) -> Result<(), mint::Error> { + let contract_hash = system::get_mint(); + let args = runtime_args! { + mint::ARG_PURSE => uref, + mint::ARG_AMOUNT => amount, + }; + runtime::call_contract(contract_hash, mint::METHOD_BURN, args) +} + +#[no_mangle] +pub extern "C" fn call() { + let purse_uref = match get_named_arg_option::(ARG_PURSE_NAME) { + Some(name) => { + // if a key was provided and there is no value under it we revert + // to prevent user from accidentaly burning tokens from the main purse + // eg. if they make a typo + let Some(Key::URef(purse_uref)) = runtime::get_key(&name) else { + runtime::revert(ApiError::InvalidPurseName) + }; + purse_uref + } + None => account::get_main_purse(), + }; + let amount: U512 = runtime::get_named_arg(mint::ARG_AMOUNT); + + burn(purse_uref, amount).unwrap_or_revert(); +} + +fn get_named_arg_size(name: &str) -> Option { + let mut arg_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_named_arg_size( + name.as_bytes().as_ptr(), + name.len(), + &mut arg_size as *mut usize, + ) + }; + match api_error::result_from(ret) { + Ok(_) => Some(arg_size), + Err(ApiError::MissingArgument) => None, + Err(e) => runtime::revert(e), + } +} + +fn get_named_arg_option(name: &str) -> Option { + let arg_size = get_named_arg_size(name).unwrap_or_revert_with(ApiError::MissingArgument); + let arg_bytes = if arg_size > 0 { + let res = { + let data_non_null_ptr = alloc_bytes(arg_size); + let ret = unsafe { + ext_ffi::casper_get_named_arg( + name.as_bytes().as_ptr(), + name.len(), + data_non_null_ptr.as_ptr(), + arg_size, + ) + }; + let data = + unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) }; + if ret != 0 { + return None; + } + data + }; + res + } else { + // Avoids allocation with 0 bytes and a call to get_named_arg + Vec::new() + }; + + let deserialized_data = + bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument); + Some(deserialized_data) +} diff --git a/smart_contracts/contracts/client/call-contract/Cargo.toml b/smart_contracts/contracts/client/call-contract/Cargo.toml new file mode 100644 index 0000000000..40ef99fc70 --- /dev/null +++ b/smart_contracts/contracts/client/call-contract/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "call-contract" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "call_contract" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/call-contract/src/main.rs b/smart_contracts/contracts/client/call-contract/src/main.rs new file mode 100644 index 0000000000..238ead3694 --- /dev/null +++ b/smart_contracts/contracts/client/call-contract/src/main.rs @@ -0,0 +1,86 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use core::mem::MaybeUninit; + +use alloc::string::String; +use casper_contract::{contract_api::runtime, ext_ffi, unwrap_or_revert::UnwrapOrRevert}; +use casper_types::{ + api_error, + bytesrepr::{self, Bytes, ToBytes}, + AddressableEntityHash, ApiError, RuntimeArgs, +}; + +const ARG_CONTRACT_HASH: &str = "contract_hash"; +const ARG_ENTRYPOINT: &str = "entrypoint"; +const ARG_ARGUMENTS: &str = "arguments"; + +// Generic call contract contract. +// +// Accepts entrypoint name, and saves possible return value into URef stored in named keys. +#[no_mangle] +pub extern "C" fn call() { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT); + let arguments: RuntimeArgs = runtime::get_named_arg(ARG_ARGUMENTS); + + let _result_bytes = call_contract_forward(entrypoint, contract_hash, arguments); +} + +fn deserialize_contract_result(bytes_written: usize) -> Option { + if bytes_written == 0 { + // If no bytes were written, the host buffer hasn't been set and hence shouldn't be read. + None + } else { + // NOTE: this is a copy of the contents of `read_host_buffer()`. Calling that directly from + // here causes several contracts to fail with a Wasmi `Unreachable` error. + let mut dest = vec![0; bytes_written]; + let real_size = read_host_buffer_into(&mut dest).unwrap_or_revert(); + assert_eq!(dest.len(), real_size); + + let bytes: Bytes = bytesrepr::deserialize_from_slice(&dest[..real_size]).unwrap_or_revert(); + + Some(bytes) + } +} + +fn read_host_buffer_into(dest: &mut [u8]) -> Result { + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr()) + }; + api_error::result_from(ret)?; + Ok(unsafe { bytes_written.assume_init() }) +} + +/// Calls a contract and returns unwrapped [`CLValue`]. +fn call_contract_forward( + entrypoint: String, + contract_hash: AddressableEntityHash, + arguments: RuntimeArgs, +) -> Option { + let entry_point_name: &str = &entrypoint; + let contract_hash_ptr = contract_hash.to_bytes().unwrap_or_revert(); + let entry_point_name = entry_point_name.to_bytes().unwrap_or_revert(); + let runtime_args_ptr = arguments.to_bytes().unwrap_or_revert(); + let bytes_written = { + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_call_contract( + contract_hash_ptr.as_ptr(), + contract_hash_ptr.len(), + entry_point_name.as_ptr(), + entry_point_name.len(), + runtime_args_ptr.as_ptr(), + runtime_args_ptr.len(), + bytes_written.as_mut_ptr(), + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + unsafe { bytes_written.assume_init() } + }; + deserialize_contract_result(bytes_written) +} diff --git a/smart_contracts/contracts/client/call-package-version-by-hash/Cargo.toml b/smart_contracts/contracts/client/call-package-version-by-hash/Cargo.toml new file mode 100644 index 0000000000..1d44c471c0 --- /dev/null +++ b/smart_contracts/contracts/client/call-package-version-by-hash/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "call-package-version-by-hash" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "call_package_version_by_hash" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/call-package-version-by-hash/src/main.rs b/smart_contracts/contracts/client/call-package-version-by-hash/src/main.rs new file mode 100644 index 0000000000..3f09ac22a1 --- /dev/null +++ b/smart_contracts/contracts/client/call-package-version-by-hash/src/main.rs @@ -0,0 +1,29 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use casper_contract::contract_api::runtime; +use casper_types::{contracts::ContractPackageHash, runtime_args}; + +const ARG_PURSE_NAME: &str = "purse_name"; + +#[no_mangle] +pub extern "C" fn call() { + let package_hash: ContractPackageHash = runtime::get_named_arg("contract_package_hash"); + let entity_version: Option = runtime::get_named_arg("version"); + let major_version: Option = runtime::get_named_arg("major_version"); + let entry_point_name: String = runtime::get_named_arg("entry_point"); + let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME); + + runtime::call_package_version( + package_hash, + major_version, + entity_version, + &entry_point_name, + runtime_args! { + ARG_PURSE_NAME => purse_name + }, + ) +} diff --git a/smart_contracts/contracts/client/cancel-reservations/Cargo.toml b/smart_contracts/contracts/client/cancel-reservations/Cargo.toml new file mode 100644 index 0000000000..4397a2dcb9 --- /dev/null +++ b/smart_contracts/contracts/client/cancel-reservations/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "cancel-reservations" +version = "0.1.0" +authors = ["Jacek Chmielewski "] +edition = "2021" + +[[bin]] +name = "cancel_reservations" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/cancel-reservations/src/main.rs b/smart_contracts/contracts/client/cancel-reservations/src/main.rs new file mode 100644 index 0000000000..3a79191a9b --- /dev/null +++ b/smart_contracts/contracts/client/cancel-reservations/src/main.rs @@ -0,0 +1,34 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; + +use casper_contract::contract_api::{runtime, system}; +use casper_types::{ + runtime_args, + system::{auction, auction::DelegatorKind}, + PublicKey, +}; + +fn cancel_reservations(validator: PublicKey, delegators: Vec) { + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_VALIDATOR => validator, + auction::ARG_DELEGATORS => delegators, + }; + runtime::call_contract::<()>(contract_hash, auction::METHOD_CANCEL_RESERVATIONS, args); +} + +// Remove delegators from validator's reserved list. +// +// Accepts delegators' and validator's public keys. +// Issues a cancel_reservations request to the auction contract. +#[no_mangle] +pub extern "C" fn call() { + let delegators = runtime::get_named_arg(auction::ARG_DELEGATORS); + let validator = runtime::get_named_arg(auction::ARG_VALIDATOR); + + cancel_reservations(validator, delegators); +} diff --git a/smart_contracts/contracts/client/change_bid_public_key/Cargo.toml b/smart_contracts/contracts/client/change_bid_public_key/Cargo.toml new file mode 100644 index 0000000000..d6f5efaf26 --- /dev/null +++ b/smart_contracts/contracts/client/change_bid_public_key/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "change_bid_public_key" +version = "0.1.0" +authors = ["Maciej Wójcik "] +edition = "2018" + +[[bin]] +name = "change_bid_public_key" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/change_bid_public_key/src/main.rs b/smart_contracts/contracts/client/change_bid_public_key/src/main.rs new file mode 100644 index 0000000000..be0a8547f5 --- /dev/null +++ b/smart_contracts/contracts/client/change_bid_public_key/src/main.rs @@ -0,0 +1,32 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::contract_api::{runtime, system}; +use casper_types::{ + runtime_args, + system::auction::{ARG_NEW_PUBLIC_KEY, ARG_PUBLIC_KEY, METHOD_CHANGE_BID_PUBLIC_KEY}, + PublicKey, +}; + +fn change_bid_public_key(public_key: PublicKey, new_public_key: PublicKey) { + let contract_hash = system::get_auction(); + let args = runtime_args! { + ARG_PUBLIC_KEY => public_key, + ARG_NEW_PUBLIC_KEY => new_public_key + }; + runtime::call_contract::<()>(contract_hash, METHOD_CHANGE_BID_PUBLIC_KEY, args); +} + +// Change validator bid public key. +// +// Accepts current bid's public key and new public key. +// Updates existing validator bid and all related delegator bids with +// the new public key. +#[no_mangle] +pub extern "C" fn call() { + let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY); + let new_public_key = runtime::get_named_arg(ARG_NEW_PUBLIC_KEY); + change_bid_public_key(public_key, new_public_key); +} diff --git a/smart_contracts/contracts/client/counter-define/Cargo.toml b/smart_contracts/contracts/client/counter-define/Cargo.toml deleted file mode 100644 index 37bc030327..0000000000 --- a/smart_contracts/contracts/client/counter-define/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "counter-define" -version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" - -[[bin]] -name = "counter_define" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -std = ["casper-contract/std", "casper-types/std"] - -[dependencies] -casper-contract = { path = "../../../contract" } -casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/counter-define/src/main.rs b/smart_contracts/contracts/client/counter-define/src/main.rs deleted file mode 100644 index ba110334bc..0000000000 --- a/smart_contracts/contracts/client/counter-define/src/main.rs +++ /dev/null @@ -1,168 +0,0 @@ -#![no_std] -#![no_main] - -extern crate alloc; - -use alloc::{string::String, vec, vec::Vec}; -use core::convert::TryInto; - -use casper_contract::{ - contract_api::{self, runtime, storage}, - ext_ffi, - unwrap_or_revert::UnwrapOrRevert, -}; -use casper_types::{ - api_error::{self}, - bytesrepr::{self}, - contracts::NamedKeys, - runtime_args, ApiError, CLType, CLValue, ContractPackageHash, EntryPoint, EntryPointAccess, - EntryPointType, EntryPoints, Key, Parameter, RuntimeArgs, URef, -}; - -const HASH_KEY_NAME: &str = "counter_package_hash"; -const ACCESS_KEY_NAME: &str = "counter_package_access"; -const CONTRACT_VERSION_KEY: &str = "contract_version"; -const ENTRYPOINT_SESSION: &str = "session"; -const ENTRYPOINT_COUNTER: &str = "counter"; -const ARG_COUNTER_METHOD: &str = "method"; -const ARG_CONTRACT_HASH_NAME: &str = "counter_contract_hash"; -const COUNTER_VALUE_UREF: &str = "counter"; -const METHOD_GET: &str = "get"; -const METHOD_INC: &str = "inc"; - -#[no_mangle] -pub extern "C" fn counter() { - let uref = runtime::get_key(COUNTER_VALUE_UREF) - .unwrap_or_revert() - .try_into() - .unwrap_or_revert(); - - let method_name: String = runtime::get_named_arg(ARG_COUNTER_METHOD); - - match method_name.as_str() { - METHOD_INC => storage::add(uref, 1), - METHOD_GET => { - let result: i32 = storage::read_or_revert(uref); - let return_value = CLValue::from_t(result).unwrap_or_revert(); - runtime::ret(return_value); - } - _ => runtime::revert(ApiError::InvalidArgument), - } -} - -#[no_mangle] -pub extern "C" fn session() { - let counter_key = get_counter_key(); - let contract_hash = counter_key - .into_hash() - .unwrap_or_revert_with(ApiError::UnexpectedKeyVariant) - .into(); - let entry_point_name = ENTRYPOINT_COUNTER; - let runtime_args = runtime_args! { ARG_COUNTER_METHOD => METHOD_INC }; - runtime::call_contract(contract_hash, entry_point_name, runtime_args) -} - -#[no_mangle] -pub extern "C" fn call() { - let (contract_package_hash, access_uref): (ContractPackageHash, URef) = - storage::create_contract_package_at_hash(); - runtime::put_key(HASH_KEY_NAME, contract_package_hash.into()); - runtime::put_key(ACCESS_KEY_NAME, access_uref.into()); - - let entry_points = get_entry_points(); - let count_value_uref = storage::new_uref(0); //initialize counter - let named_keys = { - let mut ret = NamedKeys::new(); - ret.insert(String::from(COUNTER_VALUE_UREF), count_value_uref.into()); - ret - }; - - let (contract_hash, contract_version) = - storage::add_contract_version(contract_package_hash, entry_points, named_keys); - let version_uref = storage::new_uref(contract_version); - runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into()); - runtime::put_key(ARG_CONTRACT_HASH_NAME, contract_hash.into()); -} - -fn get_entry_points() -> EntryPoints { - let mut entry_points = EntryPoints::new(); - - // actual stored contract - // ARG_METHOD -> METHOD_GET or METHOD_INC - // ret -> counter value - let entry_point = EntryPoint::new( - ENTRYPOINT_COUNTER, - vec![Parameter::new(ARG_COUNTER_METHOD, CLType::String)], - CLType::I32, - EntryPointAccess::Public, - EntryPointType::Contract, - ); - entry_points.add_entry_point(entry_point); - - // stored session code that call a version of the stored contract - // ARG_CONTRACT_HASH -> ContractHash of METHOD_COUNTER - let entry_point = EntryPoint::new( - ENTRYPOINT_SESSION, - vec![Parameter::new( - ARG_CONTRACT_HASH_NAME, - CLType::ByteArray(32), - )], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Session, - ); - entry_points.add_entry_point(entry_point); - - entry_points -} - -fn get_counter_key() -> Key { - let name = ARG_CONTRACT_HASH_NAME; - let arg = { - let mut arg_size: usize = 0; - let ret = unsafe { - ext_ffi::casper_get_named_arg_size( - name.as_bytes().as_ptr(), - name.len(), - &mut arg_size as *mut usize, - ) - }; - match api_error::result_from(ret) { - Ok(_) => { - if arg_size == 0 { - None - } else { - Some(arg_size) - } - } - Err(ApiError::MissingArgument) => None, - Err(e) => runtime::revert(e), - } - }; - - match arg { - Some(arg_size) => { - let arg_bytes = { - let res = { - let data_non_null_ptr = contract_api::alloc_bytes(arg_size); - let ret = unsafe { - ext_ffi::casper_get_named_arg( - name.as_bytes().as_ptr(), - name.len(), - data_non_null_ptr.as_ptr(), - arg_size, - ) - }; - let data = unsafe { - Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) - }; - api_error::result_from(ret).map(|_| data) - }; - res.unwrap_or_revert() - }; - - bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument) - } - None => runtime::get_key(ARG_CONTRACT_HASH_NAME).unwrap_or_revert_with(ApiError::GetKey), - } -} diff --git a/smart_contracts/contracts/client/delegate/Cargo.toml b/smart_contracts/contracts/client/delegate/Cargo.toml index 4448638961..85342de438 100644 --- a/smart_contracts/contracts/client/delegate/Cargo.toml +++ b/smart_contracts/contracts/client/delegate/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "delegate" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "delegate" @@ -10,9 +10,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/delegate/src/main.rs b/smart_contracts/contracts/client/delegate/src/main.rs index 073d2fd5bc..50a5468111 100644 --- a/smart_contracts/contracts/client/delegate/src/main.rs +++ b/smart_contracts/contracts/client/delegate/src/main.rs @@ -4,7 +4,7 @@ extern crate alloc; use casper_contract::contract_api::{runtime, system}; -use casper_types::{runtime_args, system::auction, PublicKey, RuntimeArgs, U512}; +use casper_types::{runtime_args, system::auction, PublicKey, U512}; const ARG_AMOUNT: &str = "amount"; @@ -23,7 +23,7 @@ fn delegate(delegator: PublicKey, validator: PublicKey, amount: U512) { // Delegate contract. // -// Accepts a delegator's public key, validator's public key, amount and a delgation rate. +// Accepts a delegator's public key, validator's public key, amount and a delegation rate. // Issues an delegation request to the auction contract. #[no_mangle] pub extern "C" fn call() { diff --git a/smart_contracts/contracts/client/disable-contract-by-contract-hash/Cargo.toml b/smart_contracts/contracts/client/disable-contract-by-contract-hash/Cargo.toml new file mode 100644 index 0000000000..89d69c0a7b --- /dev/null +++ b/smart_contracts/contracts/client/disable-contract-by-contract-hash/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "disable-contract-by-contract-hash" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "disable_contract_by_contract_hash" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } + diff --git a/smart_contracts/contracts/client/disable-contract-by-contract-hash/src/main.rs b/smart_contracts/contracts/client/disable-contract-by-contract-hash/src/main.rs new file mode 100644 index 0000000000..3d60c87d71 --- /dev/null +++ b/smart_contracts/contracts/client/disable-contract-by-contract-hash/src/main.rs @@ -0,0 +1,21 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::contracts::{ContractHash, ContractPackageHash}; + +const ARG_CONTRACT_PACKAGE_HASH: &str = "contract_package_hash"; +const ARG_CONTRACT_HASH: &str = "contract_hash"; + +#[no_mangle] +pub extern "C" fn call() { + // This contract can be run only by an administrator account. + let contract_package_hash: ContractPackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + let contract_hash: ContractHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + storage::disable_contract_version(contract_package_hash, contract_hash).unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/client/named-purse-payment/Cargo.toml b/smart_contracts/contracts/client/named-purse-payment/Cargo.toml index f6f37d14e4..fa2dc97676 100644 --- a/smart_contracts/contracts/client/named-purse-payment/Cargo.toml +++ b/smart_contracts/contracts/client/named-purse-payment/Cargo.toml @@ -2,7 +2,7 @@ name = "named-purse-payment" version = "0.1.0" authors = ["Ed Hastings "] -edition = "2018" +edition = "2021" [[bin]] name = "named_purse_payment" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/non-standard-payment/Cargo.toml b/smart_contracts/contracts/client/non-standard-payment/Cargo.toml new file mode 100644 index 0000000000..4afbda5261 --- /dev/null +++ b/smart_contracts/contracts/client/non-standard-payment/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "non-standard-payment" +version = "0.1.0" +authors = ["Ed Hastings ", "Michał Papierski "] +edition = "2021" + +[[bin]] +name = "non_standard_payment" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/non-standard-payment/src/main.rs b/smart_contracts/contracts/client/non-standard-payment/src/main.rs new file mode 100644 index 0000000000..7f93e4441f --- /dev/null +++ b/smart_contracts/contracts/client/non-standard-payment/src/main.rs @@ -0,0 +1,94 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; + +use casper_contract::{ + contract_api::{self, account, runtime, system}, + ext_ffi, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + api_error, + bytesrepr::{self, FromBytes}, + ApiError, RuntimeArgs, URef, U512, +}; + +const ARG_AMOUNT: &str = "amount"; +const ARG_SOURCE_UREF: &str = "source"; +const GET_PAYMENT_PURSE: &str = "get_payment_purse"; + +/// This logic is intended to be used as SESSION PAYMENT LOGIC +/// Alternate payment logic that allows payment from a purse other than the executing [Account]'s +/// main purse. A `Key::Uref` to the source purse must already exist in the executing context's +/// named keys under the name passed in as the `purse_name` argument. +#[no_mangle] +pub extern "C" fn call() { + // source purse uref by name (from current context's named keys) + let purse_uref = { + match get_named_arg_if_exists(ARG_SOURCE_UREF) { + Some(purse_uref) => purse_uref, + None => account::get_main_purse(), + } + }; + + // handle payment contract + let handle_payment_contract_hash = system::get_handle_payment(); + + // get payment purse for current execution + let payment_purse: URef = runtime::call_contract( + handle_payment_contract_hash, + GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ); + + // amount to transfer from named purse to payment purse + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + // transfer amount from named purse to payment purse, which will be used to pay for execution + system::transfer_from_purse_to_purse(purse_uref, payment_purse, amount, None) + .unwrap_or_revert(); +} + +fn get_named_arg_if_exists(name: &str) -> Option { + let arg_size = { + let mut arg_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_named_arg_size( + name.as_bytes().as_ptr(), + name.len(), + &mut arg_size as *mut usize, + ) + }; + match api_error::result_from(ret) { + Ok(_) => Some(arg_size), + Err(ApiError::MissingArgument) => None, + Err(e) => runtime::revert(e), + } + }?; + let arg_bytes = if arg_size > 0 { + let res = { + let data_non_null_ptr = contract_api::alloc_bytes(arg_size); + let ret = unsafe { + ext_ffi::casper_get_named_arg( + name.as_bytes().as_ptr(), + name.len(), + data_non_null_ptr.as_ptr(), + arg_size, + ) + }; + let data = + unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) }; + api_error::result_from(ret).map(|_| data) + }; + // Assumed to be safe as `get_named_arg_size` checks the argument already + res.unwrap_or_revert() + } else { + // Avoids allocation with 0 bytes and a call to get_named_arg + Vec::new() + }; + let value = bytesrepr::deserialize(arg_bytes).unwrap_or_revert_with(ApiError::InvalidArgument); + Some(value) +} diff --git a/smart_contracts/contracts/client/redelegate/Cargo.toml b/smart_contracts/contracts/client/redelegate/Cargo.toml new file mode 100644 index 0000000000..8cc00ff9e1 --- /dev/null +++ b/smart_contracts/contracts/client/redelegate/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "redelegate" +version = "0.1.0" +authors = ["Karan Dhareshwar "] +edition = "2021" + +[[bin]] +name = "redelegate" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/redelegate/src/main.rs b/smart_contracts/contracts/client/redelegate/src/main.rs new file mode 100644 index 0000000000..be0d28dfb4 --- /dev/null +++ b/smart_contracts/contracts/client/redelegate/src/main.rs @@ -0,0 +1,37 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::contract_api::{runtime, system}; +use casper_types::{runtime_args, system::auction, PublicKey, U512}; + +const ARG_AMOUNT: &str = "amount"; +const ARG_DELEGATOR: &str = "delegator"; +const ARG_VALIDATOR: &str = "validator"; +const ARG_NEW_VALIDATOR: &str = "new_validator"; + +fn redelegate(delegator: PublicKey, validator: PublicKey, amount: U512, new_validator: PublicKey) { + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_DELEGATOR => delegator, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount, + auction::ARG_NEW_VALIDATOR => new_validator + }; + let _amount: U512 = runtime::call_contract(contract_hash, auction::METHOD_REDELEGATE, args); +} + +// Redelegate contract. +// +// Accepts a delegator's public key, validator's public key to be undelegated, +// a new_validator's public key to redelegate to and an amount +// to withdraw (of type `U512`). +#[no_mangle] +pub extern "C" fn call() { + let delegator = runtime::get_named_arg(ARG_DELEGATOR); + let validator = runtime::get_named_arg(ARG_VALIDATOR); + let amount = runtime::get_named_arg(ARG_AMOUNT); + let new_validator = runtime::get_named_arg(ARG_NEW_VALIDATOR); + redelegate(delegator, validator, amount, new_validator); +} diff --git a/smart_contracts/contracts/client/revert/Cargo.toml b/smart_contracts/contracts/client/revert/Cargo.toml index 184812b817..7b3dc9615f 100644 --- a/smart_contracts/contracts/client/revert/Cargo.toml +++ b/smart_contracts/contracts/client/revert/Cargo.toml @@ -2,7 +2,7 @@ name = "revert" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "revert" @@ -10,9 +10,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-account-stored/Cargo.toml b/smart_contracts/contracts/client/transfer-to-account-stored/Cargo.toml deleted file mode 100644 index 9b05d1933b..0000000000 --- a/smart_contracts/contracts/client/transfer-to-account-stored/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "transfer-to-account-stored" -version = "0.1.0" -authors = ["Michael Birch "] -edition = "2018" - -[[bin]] -name = "transfer_to_account_stored" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -std = ["casper-contract/std", "casper-types/std"] - -[dependencies] -casper-contract = { path = "../../../contract" } -transfer-to-account = { path = "../transfer-to-account" } -casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-account-stored/src/main.rs b/smart_contracts/contracts/client/transfer-to-account-stored/src/main.rs deleted file mode 100644 index ceeabb452b..0000000000 --- a/smart_contracts/contracts/client/transfer-to-account-stored/src/main.rs +++ /dev/null @@ -1,53 +0,0 @@ -#![no_std] -#![no_main] - -#[macro_use] -extern crate alloc; - -use casper_contract::contract_api::{runtime, storage}; -use casper_types::{ - account::AccountHash, CLType, CLTyped, ContractHash, ContractVersion, EntryPoint, - EntryPointAccess, EntryPointType, EntryPoints, Parameter, -}; - -const CONTRACT_NAME: &str = "transfer_to_account"; -const CONTRACT_VERSION_KEY: &str = "contract_version"; -const FUNCTION_NAME: &str = "transfer"; - -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; - -#[no_mangle] -pub extern "C" fn transfer() { - transfer_to_account::delegate(); -} - -fn store() -> (ContractHash, ContractVersion) { - let entry_points = { - let mut entry_points = EntryPoints::new(); - - let entry_point = EntryPoint::new( - FUNCTION_NAME, - vec![ - Parameter::new(ARG_TARGET, AccountHash::cl_type()), - Parameter::new(ARG_AMOUNT, CLType::U512), - ], - CLType::URef, - EntryPointAccess::Public, - EntryPointType::Session, - ); - - entry_points.add_entry_point(entry_point); - - entry_points - }; - storage::new_contract(entry_points, None, None, None) -} - -#[no_mangle] -pub extern "C" fn call() { - let (contract_hash, contract_version) = store(); - let version_uref = storage::new_uref(contract_version); - runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into()); - runtime::put_key(CONTRACT_NAME, contract_hash.into()); -} diff --git a/smart_contracts/contracts/client/transfer-to-account-u512-stored/Cargo.toml b/smart_contracts/contracts/client/transfer-to-account-u512-stored/Cargo.toml deleted file mode 100644 index 98d08d6695..0000000000 --- a/smart_contracts/contracts/client/transfer-to-account-u512-stored/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "transfer-to-account-u512-stored" -version = "0.1.0" -authors = ["Michael Birch "] -edition = "2018" - -[[bin]] -name = "transfer_to_account_u512_stored" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -std = ["casper-contract/std", "casper-types/std"] - -[dependencies] -casper-contract = { path = "../../../contract" } -transfer-to-account-u512 = { path = "../transfer-to-account-u512" } -casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-account-u512-stored/src/main.rs b/smart_contracts/contracts/client/transfer-to-account-u512-stored/src/main.rs deleted file mode 100644 index 51416c5264..0000000000 --- a/smart_contracts/contracts/client/transfer-to-account-u512-stored/src/main.rs +++ /dev/null @@ -1,62 +0,0 @@ -#![no_std] -#![no_main] - -#[macro_use] -extern crate alloc; - -use alloc::string::ToString; - -use casper_contract::contract_api::{runtime, storage}; -use casper_types::{ - account::AccountHash, CLType, CLTyped, ContractHash, ContractVersion, EntryPoint, - EntryPointAccess, EntryPointType, EntryPoints, Parameter, -}; - -const CONTRACT_NAME: &str = "transfer_to_account"; -const CONTRACT_VERSION_KEY: &str = "contract_version"; -const ENTRY_POINT_NAME: &str = "transfer"; -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; - -const HASH_KEY_NAME: &str = "transfer_to_account_U512"; -const ACCESS_KEY_NAME: &str = "transfer_to_account_U512_access"; - -#[no_mangle] -pub extern "C" fn transfer() { - transfer_to_account_u512::delegate(); -} - -fn store() -> (ContractHash, ContractVersion) { - let entry_points = { - let mut entry_points = EntryPoints::new(); - - let entry_point = EntryPoint::new( - ENTRY_POINT_NAME, - vec![ - Parameter::new(ARG_TARGET, AccountHash::cl_type()), - Parameter::new(ARG_AMOUNT, CLType::U512), - ], - CLType::Unit, - EntryPointAccess::Public, - EntryPointType::Session, - ); - - entry_points.add_entry_point(entry_point); - - entry_points - }; - storage::new_contract( - entry_points, - None, - Some(HASH_KEY_NAME.to_string()), - Some(ACCESS_KEY_NAME.to_string()), - ) -} - -#[no_mangle] -pub extern "C" fn call() { - let (contract_hash, contract_version) = store(); - let version_uref = storage::new_uref(contract_version); - runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into()); - runtime::put_key(CONTRACT_NAME, contract_hash.into()); -} diff --git a/smart_contracts/contracts/client/transfer-to-account-u512/Cargo.toml b/smart_contracts/contracts/client/transfer-to-account-u512/Cargo.toml index 040c9640c7..f108da40f3 100644 --- a/smart_contracts/contracts/client/transfer-to-account-u512/Cargo.toml +++ b/smart_contracts/contracts/client/transfer-to-account-u512/Cargo.toml @@ -2,7 +2,7 @@ name = "transfer-to-account-u512" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "transfer_to_account_u512" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-account/Cargo.toml b/smart_contracts/contracts/client/transfer-to-account/Cargo.toml index 84a9e5beb6..922142fe9e 100644 --- a/smart_contracts/contracts/client/transfer-to-account/Cargo.toml +++ b/smart_contracts/contracts/client/transfer-to-account/Cargo.toml @@ -2,7 +2,7 @@ name = "transfer-to-account" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "transfer_to_account" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-named-purse/Cargo.toml b/smart_contracts/contracts/client/transfer-to-named-purse/Cargo.toml new file mode 100644 index 0000000000..137bd9e7a7 --- /dev/null +++ b/smart_contracts/contracts/client/transfer-to-named-purse/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "transfer-to-named-purse" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "transfer_to_named_purse" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-named-purse/src/main.rs b/smart_contracts/contracts/client/transfer-to-named-purse/src/main.rs new file mode 100644 index 0000000000..209c878d47 --- /dev/null +++ b/smart_contracts/contracts/client/transfer-to-named-purse/src/main.rs @@ -0,0 +1,42 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ApiError, Key, U512}; + +const ARG_PURSE_NAME: &str = "purse_name"; +const ARG_AMOUNT: &str = "amount"; + +#[no_mangle] +pub extern "C" fn call() { + let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME); + + let purse_uref = match runtime::get_key(&purse_name) { + Some(Key::URef(uref)) => uref, + Some(_) => { + // Found a key but it is not a purse + runtime::revert(ApiError::UnexpectedKeyVariant); + } + None => { + // Creates new named purse + let new_purse = system::create_purse(); + runtime::put_key(&purse_name, new_purse.into()); + new_purse + } + }; + + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + let source_purse = account::get_main_purse(); + + if !amount.is_zero() { + system::transfer_from_purse_to_purse(source_purse, purse_uref, amount, None) + .unwrap_or_revert(); + } +} diff --git a/smart_contracts/contracts/client/transfer-to-public-key/Cargo.toml b/smart_contracts/contracts/client/transfer-to-public-key/Cargo.toml new file mode 100644 index 0000000000..f75b669a5e --- /dev/null +++ b/smart_contracts/contracts/client/transfer-to-public-key/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "transfer-to-public-key" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "transfer_to_public_key" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/transfer-to-public-key/src/main.rs b/smart_contracts/contracts/client/transfer-to-public-key/src/main.rs new file mode 100644 index 0000000000..ac2d901bce --- /dev/null +++ b/smart_contracts/contracts/client/transfer-to-public-key/src/main.rs @@ -0,0 +1,20 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{PublicKey, U512}; + +const ARG_TARGET: &str = "target"; +const ARG_AMOUNT: &str = "amount"; + +/// Executes mote transfer to supplied account hash. +/// Transfers the requested amount. +#[no_mangle] +pub extern "C" fn call() { + let account_hash: PublicKey = runtime::get_named_arg(ARG_TARGET); + let transfer_amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + system::transfer_to_public_key(account_hash, transfer_amount, None).unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/client/undelegate/Cargo.toml b/smart_contracts/contracts/client/undelegate/Cargo.toml index 1a9010d01f..1d16c8a3ce 100644 --- a/smart_contracts/contracts/client/undelegate/Cargo.toml +++ b/smart_contracts/contracts/client/undelegate/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "undelegate" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "undelegate" @@ -10,9 +10,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/undelegate/src/main.rs b/smart_contracts/contracts/client/undelegate/src/main.rs index 6c95bcbc87..0f55d19ef0 100644 --- a/smart_contracts/contracts/client/undelegate/src/main.rs +++ b/smart_contracts/contracts/client/undelegate/src/main.rs @@ -4,7 +4,7 @@ extern crate alloc; use casper_contract::contract_api::{runtime, system}; -use casper_types::{runtime_args, system::auction, PublicKey, RuntimeArgs, U512}; +use casper_types::{runtime_args, system::auction, PublicKey, U512}; const ARG_AMOUNT: &str = "amount"; const ARG_DELEGATOR: &str = "delegator"; diff --git a/smart_contracts/contracts/client/withdraw-bid/Cargo.toml b/smart_contracts/contracts/client/withdraw-bid/Cargo.toml index f2325e79f1..1ce5af20c1 100644 --- a/smart_contracts/contracts/client/withdraw-bid/Cargo.toml +++ b/smart_contracts/contracts/client/withdraw-bid/Cargo.toml @@ -2,7 +2,7 @@ name = "withdraw-bid" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "withdraw_bid" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/client/withdraw-bid/src/main.rs b/smart_contracts/contracts/client/withdraw-bid/src/main.rs index 23bad796a6..881eb117bf 100644 --- a/smart_contracts/contracts/client/withdraw-bid/src/main.rs +++ b/smart_contracts/contracts/client/withdraw-bid/src/main.rs @@ -4,7 +4,7 @@ extern crate alloc; use casper_contract::contract_api::{runtime, system}; -use casper_types::{runtime_args, system::auction, PublicKey, RuntimeArgs, U512}; +use casper_types::{runtime_args, system::auction, PublicKey, U512}; const ARG_PUBLIC_KEY: &str = "public_key"; const ARG_AMOUNT: &str = "amount"; diff --git a/smart_contracts/contracts/explorer/faucet-stored/Cargo.toml b/smart_contracts/contracts/explorer/faucet-stored/Cargo.toml index 73cace24da..b6dd8d5fb1 100644 --- a/smart_contracts/contracts/explorer/faucet-stored/Cargo.toml +++ b/smart_contracts/contracts/explorer/faucet-stored/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "faucet-stored" version = "0.1.0" -authors = ["Mateusz Górski "] -edition = "2018" +authors = ["Mateusz Górski "] +edition = "2021" [[bin]] name = "faucet_stored" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } faucet = { path = "../faucet" } diff --git a/smart_contracts/contracts/explorer/faucet-stored/src/main.rs b/smart_contracts/contracts/explorer/faucet-stored/src/main.rs index 028e975175..fa1034b1ec 100644 --- a/smart_contracts/contracts/explorer/faucet-stored/src/main.rs +++ b/smart_contracts/contracts/explorer/faucet-stored/src/main.rs @@ -3,57 +3,250 @@ extern crate alloc; -use alloc::{string::ToString, vec}; +use alloc::{boxed::Box, format, string::ToString, vec}; -use casper_contract::contract_api::{runtime, storage}; +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; use casper_types::{ - account::AccountHash, contracts::ContractHash, CLType, CLTyped, ContractVersion, EntryPoint, - EntryPointAccess, EntryPointType, EntryPoints, Parameter, + contracts::NamedKeys, AddressableEntityHash, ApiError, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, PublicKey, + URef, U512, }; -const CONTRACT_NAME: &str = "faucet"; -const HASH_KEY_NAME: &str = "faucet_package"; -const ACCESS_KEY_NAME: &str = "faucet_package_access"; -const ENTRY_POINT_NAME: &str = "call_faucet"; -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; -const CONTRACT_VERSION: &str = "contract_version"; +#[repr(u16)] +enum InstallerSessionError { + FailedToTransfer = 101, +} #[no_mangle] pub extern "C" fn call_faucet() { faucet::delegate(); } -fn store() -> (ContractHash, ContractVersion) { +fn build_named_keys_and_purse() -> (NamedKeys, URef) { + let mut named_keys = NamedKeys::new(); + let purse = system::create_purse(); + + // This session constructs a NamedKeys struct and later passes it to the + // storage::new_contract() function. This is simpler and more efficient than creating a custom + // "init" entry point for the stored contract in this case but it is not the best approach for + // every case. If you need to use the values that are stored under a new contract's named keys, + // you may store them under the named keys of the account that was used to deploy the session. + // However, this is not the best solution for every case, and there is another option. + // + // A custom "init" entrypoint would be useful for setting each required + // named key for the contract, but can only be called after the contract has been created + // using storage::new_contract(). Other entry points in the stored contract may require + // extra logic to check for the presence of, load and validate data stored under named keys. + // This would be useful in cases where a stored contract is creating another contract using + // storage::new_contract(), especially if values computed for initializing the new contract are + // also needed by the contract doing the initializing. + named_keys.insert(faucet::FAUCET_PURSE.to_string(), purse.into()); + + named_keys.insert(faucet::INSTALLER.to_string(), runtime::get_caller().into()); + named_keys.insert( + faucet::TIME_INTERVAL.to_string(), + storage::new_uref(faucet::TWO_HOURS_AS_MILLIS).into(), + ); + named_keys.insert( + faucet::LAST_DISTRIBUTION_TIME.to_string(), + storage::new_uref(0u64).into(), + ); + named_keys.insert( + faucet::AVAILABLE_AMOUNT.to_string(), + storage::new_uref(U512::zero()).into(), + ); + named_keys.insert( + faucet::REMAINING_REQUESTS.to_string(), + storage::new_uref(U512::zero()).into(), + ); + named_keys.insert( + faucet::DISTRIBUTIONS_PER_INTERVAL.to_string(), + storage::new_uref(0u64).into(), + ); + + // The AUTHORIZED_ACCOUNT named key holds an optional public key. If the public key is set, + // the account referenced by this public key will be granted a special privilege as the only + // authorized caller of the faucet's ENTRY_POINT_FAUCET. + // Only the authorized account will be able to issue token distributions from the faucet. The + // authorized account should call the faucet in the same way that the installer would, + // passing faucet::ARG_AMOUNT, faucet::ARG_TARGET and faucet::ARG_ID runtime arguments. + // + // The AUTHORIZED_ACCOUNT and faucet installer account have different responsibilities. While + // both of them may issue token using the ENTRY_POINT_FAUCET, only the faucet installer may + // configure the contract through the ENTRY_POINT_SET_VARIABLES, and only the faucet installer + // may set an authorized account through the ENTRY_POINT_AUTHORIZE_TO. The AUTHORIZED_ACCOUNT's + // responsibility would be to determine to whom and what amount of token should be issued + // through the faucet contract. + // + // While the AUTHORIZED_ACCOUNT named key is set to None::, the ENTRY_POINT_FAUCET + // will be publicly accessible and users may call ENTRY_POINT_FAUCET without a + // faucet::ARG_TARGET or faucet::ARG_AMOUNT. The contract will automatically issue them a + // computed amount of token. + // + // This enables the faucet contract to support a wider range of use cases, where in some cases + // the faucet installer does not want the ENTRY_POINT_FAUCET to be called directly by users for + // security reasons. Another case would be where this contract is deployed to a private Casper + // Network where all users are trusted to use the faucet to issue themselves token + // distributions responsibly. + named_keys.insert( + faucet::AUTHORIZED_ACCOUNT.to_string(), + storage::new_uref(None::).into(), + ); + + (named_keys, purse) +} + +#[no_mangle] +pub extern "C" fn call() { + let id: u64 = runtime::get_named_arg(faucet::ARG_ID); + let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( - ENTRY_POINT_NAME, + let faucet = EntityEntryPoint::new( + faucet::ENTRY_POINT_FAUCET, vec![ - Parameter::new(ARG_TARGET, AccountHash::cl_type()), - Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(faucet::ARG_ID, CLType::Option(Box::new(CLType::U64))), + Parameter::new(faucet::ARG_TARGET, CLType::PublicKey), + Parameter::new(faucet::ARG_AMOUNT, CLType::U512), ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Called, + EntryPointPayment::Caller, ); - entry_points.add_entry_point(entry_point); + let set_variables = EntityEntryPoint::new( + faucet::ENTRY_POINT_SET_VARIABLES, + vec![ + Parameter::new( + faucet::ARG_AVAILABLE_AMOUNT, + CLType::Option(Box::new(CLType::U512)), + ), + Parameter::new( + faucet::ARG_TIME_INTERVAL, + CLType::Option(Box::new(CLType::U64)), + ), + Parameter::new( + faucet::ARG_DISTRIBUTIONS_PER_INTERVAL, + CLType::Option(Box::new(CLType::U64)), + ), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let authorize_to = EntityEntryPoint::new( + faucet::ENTRY_POINT_AUTHORIZE_TO, + vec![Parameter::new( + faucet::ARG_TARGET, + CLType::Option(Box::new(CLType::PublicKey)), + )], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(faucet); + entry_points.add_entry_point(set_variables); + entry_points.add_entry_point(authorize_to); entry_points }; - storage::new_contract( + + // The installer will create the faucet purse and give it to the newly installed + // contract so that the installing account and the newly installed contract will + // have a handle on it via the shared purse URef. + // + // The faucet named keys include the faucet purse, these are the named keys that we pass to the + // faucet. + let (faucet_named_keys, faucet_purse) = build_named_keys_and_purse(); + + // This is where the contract package is created and the first version of the faucet contract is + // installed within it. The contract package hash for the created contract package will be + // stored in the installing account's named keys under the faucet::PACKAGE_HASH_KEY_NAME, this + // allows later usage via the installing account to easily refer to and access the contract + // package and thus all versions stored in it. + // + // The access URef for the contract package will also be stored in the installing account's + // named keys under faucet::ACCESS_KEY_NAME; this URef controls administrative access to the + // contract package which includes the ability to install new versions of the contract + // logic, administer group-based security (if any), and so on. + // + // The installing account may decide to grant this access uref to another account (not + // demonstrated here), which would allow that account equivalent full administrative control + // over the contract. This should only be done intentionally because it is not revocable. + let (contract_hash, contract_version) = storage::new_contract( entry_points, + Some(faucet_named_keys), + Some(format!("{}_{}", faucet::HASH_KEY_NAME, id)), + Some(format!("{}_{}", faucet::ACCESS_KEY_NAME, id)), None, - Some(HASH_KEY_NAME.to_string()), - Some(ACCESS_KEY_NAME.to_string()), - ) -} + ); -#[no_mangle] -pub extern "C" fn call() { - let (contract_hash, contract_version) = store(); - runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(CONTRACT_NAME, contract_hash.into()); + // As a convenience, a specific contract version can be referred to either by its contract hash + // or by the combination of the contract package hash and a contract version key. This comes + // down to developer preference. The contract package hash is a stable hash, so this may be + // preferable if you don't want to worry about contract hashes changing. Existing contracts' + // hashes can be stored under a URef under a named key and later used for calling the contract + // by hash. If you wanted to change the hash stored under the named key, your contract would + // have to have an entrypoint that would allow you to do so. + // + // Another option is to store a contract package hash that your contract depends on. When + // calling a contract using the package hash alone, the execution engine will find the latest + // contract version for you automatically. To avoid breaking changes, you may want to use a + // contract package hash and a contract version. That way, whenever the contract package + // that the calling contract depends on changes, the version can be updated through an + // entrypoint that allows an authorized caller to set named keys. + // + // In some cases it may be desireable to pass one or both of the contract package hash and + // version into contract or session code as a runtime argument. As an example, if a user + // regularly makes calls to a contract package via session code, they could have the session + // code take runtime arguments for one or both of the contract package hash and version. This + // way, if the contract package is updated, they could easily use the latest version without + // needing to edit their session code. The same technique could be applied to stored contracts + // that need to call other contracts by their contract package hash and version. + + // Here we are saving newly created contracts hash, the contract package hash and contract + // version, and an access URef under the installer's named keys. It's important to note that + // you'll need the access URef if you ever want to modify the contract package in the future. + // It's also important to note that it will be impossible to reference any of these values again + // if they're not stored under named keys. + // + // These named keys all end with the "id" runtime argument that is passed into this session. + // This is keep separate instances of this faucet contract namespaced in case the installer + // wants to install multiple instances of the contract using the same account. + runtime::put_key( + &format!("{}_{}", faucet::CONTRACT_VERSION, id), + storage::new_uref(contract_version).into(), + ); + runtime::put_key( + &format!("{}_{}", faucet::CONTRACT_NAME, id), + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); + + // This is specifically for this installing account, which would allow one installing account + // to potentially have multiple faucet contract packages. + runtime::put_key( + &format!("{}_{}", faucet::FAUCET_PURSE, id), + faucet_purse.into(), + ); + + let main_purse = account::get_main_purse(); + + // Initial funding amount. In other words, when the faucet contract is set up, this is its + // starting tokens transferred from the installing account's main purse as a one-time + // initialization. + let amount = runtime::get_named_arg(faucet::ARG_AMOUNT); + + system::transfer_from_purse_to_purse(main_purse, faucet_purse, amount, Some(id)) + .unwrap_or_revert_with(ApiError::User( + InstallerSessionError::FailedToTransfer as u16, + )); } diff --git a/smart_contracts/contracts/explorer/faucet/Cargo.toml b/smart_contracts/contracts/explorer/faucet/Cargo.toml index de8aa73f4a..f086511410 100644 --- a/smart_contracts/contracts/explorer/faucet/Cargo.toml +++ b/smart_contracts/contracts/explorer/faucet/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "faucet" version = "0.1.0" -authors = ["Mateusz Górski "] -edition = "2018" +authors = ["Mateusz Górski "] +edition = "2021" [[bin]] name = "faucet" @@ -11,9 +11,6 @@ doctest = false test = false bench = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/explorer/faucet/README.md b/smart_contracts/contracts/explorer/faucet/README.md new file mode 100644 index 0000000000..5b663a8e46 --- /dev/null +++ b/smart_contracts/contracts/explorer/faucet/README.md @@ -0,0 +1,41 @@ +# Faucet Contract + +The Faucet is a contract that is designed to allow users to create a new account or to allow existing users to fund their account easily. + +## Install Session +The install session is responsible for creating the faucet's stored contract package and setting up a few named keys in the account used to perform the install. The installer performs the following actions. + +1. Takes the `id` parameter and uses it to keep track of the faucet being set up. As an example, if the operator installing the faucet passes `1337` as the `id` named argument to the installer session, it will create a named key called `faucet_1337` for the account used to call the install session. +1. Calls the stored faucet's `init` entry point to initialize a new purse that will be used to fund the faucet. This purse is stored under the named keys of the account used to install the faucet. The `init` entrypoint also initializes the faucet's state and writes to its named keys. +1. Funds the faucet's purse with the amount of motes declared in the `amount` runtime argument. + + +## Set Variables + +> NOTE: Before the faucet can be called, the `set_variables` entrypoint must be called. +> +This is a list of the required runtime arguments for calling `set_variables` +* `available_amount: U512` - The total amount available for distribution each interval. +* `distributions_per_interval: u64` - The maximum number of distributions to be made each interval. +* `time_interval: u64` - The amount of time in milliseconds that must pass before the available amount is replenished. + + +You can adjust the faucet's distribution rate by modifying the variables. If the faucet has distributed a total of `available_amount` in one `time_interval`, then no more token will be available to distribute until `last_distribution_at + time_interval < blocktime`. However, the installer of this contract is not rate limited and may continue to distribute funds from the faucet freely. + +## Calling the Faucet + +The faucet will calculate a distribution amount as a ratio of the available amount per interval to the max distribution amount per interval. As an example, if the installer sets the available amount per interval to `100_000_000` and the max distributions per interval to `2`. When an existing user calls the faucet, `50_000_000` motes will be distributed to the caller. If a second user calls the faucet, they will also receive `50_000_000` motes. The remaining amount will now be `0` tokens. If a third user calls the faucet then they will not receive any token. +After an interval passes after then last user was funded, the available amount will be replenished. + +`distributions_per_interval`, `available_amount`, `time_interval` and `max_distributions_per_interval` +must be set and must be a number greater than `0` for the contract to run properly. +If you try to invoke the contract before these variables are set, then you'll get an error. + +### Costs by Entry Point + +| feature | cost | +|--------------------------|-------------------| +| faucet install | `1492_30_872_143` | +| faucet set variables | `79_455_975` | +| faucet call by installer | `265_26_265_33` | +| faucet call by user | `2_558_318_531` | \ No newline at end of file diff --git a/smart_contracts/contracts/explorer/faucet/src/lib.rs b/smart_contracts/contracts/explorer/faucet/src/lib.rs index 5b3a3526f5..9edade2967 100644 --- a/smart_contracts/contracts/explorer/faucet/src/lib.rs +++ b/smart_contracts/contracts/explorer/faucet/src/lib.rs @@ -1,39 +1,532 @@ #![no_std] +extern crate alloc; + +use alloc::{vec, vec::Vec}; +use core::mem::MaybeUninit; + use casper_contract::{ - contract_api::{runtime, storage, system}, + contract_api::{self, runtime, storage, system}, + ext_ffi, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{account::AccountHash, ApiError, Key, U512}; +use casper_types::{ + account::AccountHash, + api_error, + bytesrepr::{self, FromBytes, ToBytes}, + ApiError, BlockTime, CLTyped, Key, PublicKey, URef, U512, +}; + +pub const ARG_AMOUNT: &str = "amount"; +pub const ARG_TARGET: &str = "target"; +pub const ARG_ID: &str = "id"; +pub const ARG_TIME_INTERVAL: &str = "time_interval"; +pub const ARG_AVAILABLE_AMOUNT: &str = "available_amount"; +pub const ARG_DISTRIBUTIONS_PER_INTERVAL: &str = "distributions_per_interval"; +pub const REMAINING_REQUESTS: &str = "remaining_requests"; +pub const AVAILABLE_AMOUNT: &str = "available_amount"; +pub const TIME_INTERVAL: &str = "time_interval"; +pub const DISTRIBUTIONS_PER_INTERVAL: &str = "distributions_per_interval"; +pub const LAST_DISTRIBUTION_TIME: &str = "last_distribution_time"; +pub const FAUCET_PURSE: &str = "faucet_purse"; +pub const INSTALLER: &str = "installer"; +pub const TWO_HOURS_AS_MILLIS: u64 = 7_200_000; +pub const CONTRACT_NAME: &str = "faucet"; +pub const HASH_KEY_NAME: &str = "faucet_package"; +pub const ACCESS_KEY_NAME: &str = "faucet_package_access"; +pub const CONTRACT_VERSION: &str = "faucet_contract_version"; +pub const AUTHORIZED_ACCOUNT: &str = "authorized_account"; + +pub const ENTRY_POINT_FAUCET: &str = "call_faucet"; +pub const ENTRY_POINT_INIT: &str = "init"; +pub const ENTRY_POINT_SET_VARIABLES: &str = "set_variables"; +pub const ENTRY_POINT_AUTHORIZE_TO: &str = "authorize_to"; + +#[repr(u16)] +enum FaucetError { + InvalidAccount = 1, + MissingInstaller = 2, + InvalidInstaller = 3, + InstallerDoesNotFundItself = 4, + MissingDistributionTime = 5, + InvalidDistributionTime = 6, + MissingAvailableAmount = 7, + InvalidAvailableAmount = 8, + MissingTimeInterval = 9, + InvalidTimeInterval = 10, + MissingId = 11, + InvalidId = 12, + FailedToTransfer = 13, + FailedToGetArgBytes = 14, + MissingFaucetPurse = 15, + InvalidFaucetPurse = 16, + MissingRemainingRequests = 17, + InvalidRemainingRequests = 18, + MissingDistributionsPerInterval = 19, + InvalidDistributionsPerInterval = 20, + UnexpectedKeyVariant = 21, + MissingAuthorizedAccount = 22, + InvalidAuthorizedAccount = 23, + AuthorizedAccountDoesNotFundInstaller = 24, + FaucetCallByUserWithAuthorizedAccountSet = 25, +} + +impl From for ApiError { + fn from(e: FaucetError) -> Self { + ApiError::User(e as u16) + } +} + +#[no_mangle] +pub fn set_variables() { + let installer = get_account_hash_with_user_errors( + INSTALLER, + FaucetError::MissingInstaller, + FaucetError::InvalidInstaller, + ); + + if installer != runtime::get_caller() { + runtime::revert(FaucetError::InvalidAccount); + } + + if let Some(new_time_interval) = get_optional_named_arg_with_user_errors::( + ARG_TIME_INTERVAL, + FaucetError::MissingTimeInterval, + FaucetError::InvalidTimeInterval, + ) { + let time_interval_uref = get_uref_with_user_errors( + TIME_INTERVAL, + FaucetError::MissingTimeInterval, + FaucetError::InvalidTimeInterval, + ); + storage::write(time_interval_uref, new_time_interval); + } -const ARG_TARGET: &str = "target"; -const ARG_AMOUNT: &str = "amount"; + if let Some(new_available_amount) = get_optional_named_arg_with_user_errors::( + ARG_AVAILABLE_AMOUNT, + FaucetError::MissingAvailableAmount, + FaucetError::InvalidAvailableAmount, + ) { + let available_amount_uref = get_uref_with_user_errors( + AVAILABLE_AMOUNT, + FaucetError::MissingAvailableAmount, + FaucetError::InvalidAvailableAmount, + ); + storage::write(available_amount_uref, new_available_amount); + } -#[repr(u32)] -enum CustomError { - AlreadyFunded = 1, + if let Some(new_distributions_per_interval) = get_optional_named_arg_with_user_errors::( + ARG_DISTRIBUTIONS_PER_INTERVAL, + FaucetError::MissingDistributionsPerInterval, + FaucetError::InvalidDistributionsPerInterval, + ) { + let distributions_per_interval_uref = get_uref_with_user_errors( + DISTRIBUTIONS_PER_INTERVAL, + FaucetError::MissingDistributionsPerInterval, + FaucetError::InvalidDistributionsPerInterval, + ); + let remaining_requests_uref = get_uref_with_user_errors( + REMAINING_REQUESTS, + FaucetError::MissingRemainingRequests, + FaucetError::InvalidRemainingRequests, + ); + + storage::write( + distributions_per_interval_uref, + new_distributions_per_interval, + ); + // remaining requests == distributions per interval. + storage::write( + remaining_requests_uref, + U512::from(new_distributions_per_interval), + ); + } +} + +#[no_mangle] +pub fn authorize_to() { + let installer = get_account_hash_with_user_errors( + INSTALLER, + FaucetError::MissingInstaller, + FaucetError::InvalidInstaller, + ); + + if runtime::get_caller() != installer { + runtime::revert(FaucetError::InvalidAccount); + } + + let authorized_account_public_key = get_optional_named_arg_with_user_errors::( + ARG_TARGET, + FaucetError::MissingAuthorizedAccount, + FaucetError::InvalidAuthorizedAccount, + ); + + let authorized_account_uref = get_uref_with_user_errors( + AUTHORIZED_ACCOUNT, + FaucetError::MissingAuthorizedAccount, + FaucetError::InvalidAuthorizedAccount, + ); + + storage::write(authorized_account_uref, authorized_account_public_key); } -/// Executes token transfer to supplied account hash. -/// Revert status codes: -/// 1 - requested transfer to already funded account hash. #[no_mangle] pub fn delegate() { - let account_hash: AccountHash = runtime::get_named_arg(ARG_TARGET); + let id = get_optional_named_arg_with_user_errors( + ARG_ID, + FaucetError::MissingId, + FaucetError::InvalidId, + ); + + let caller = runtime::get_caller(); + let installer = get_account_hash_with_user_errors( + INSTALLER, + FaucetError::MissingInstaller, + FaucetError::InvalidInstaller, + ); + + let authorized_account_uref = get_uref_with_user_errors( + AUTHORIZED_ACCOUNT, + FaucetError::MissingAuthorizedAccount, + FaucetError::InvalidAuthorizedAccount, + ); + + let maybe_authorized_account_public_key: Option = read_with_user_errors( + authorized_account_uref, + FaucetError::MissingAuthorizedAccount, + FaucetError::InvalidAuthorizedAccount, + ); + + let maybe_authorized_account = + maybe_authorized_account_public_key.map(|pk| pk.to_account_hash()); + + let last_distribution_time_uref = get_uref_with_user_errors( + LAST_DISTRIBUTION_TIME, + FaucetError::MissingDistributionTime, + FaucetError::InvalidDistributionTime, + ); + + let last_distribution_time: u64 = read_with_user_errors( + last_distribution_time_uref, + FaucetError::MissingDistributionTime, + FaucetError::InvalidDistributionTime, + ); + + let time_interval_uref = get_uref_with_user_errors( + TIME_INTERVAL, + FaucetError::MissingTimeInterval, + FaucetError::InvalidTimeInterval, + ); + + let time_interval: u64 = read_with_user_errors( + time_interval_uref, + FaucetError::MissingTimeInterval, + FaucetError::InvalidTimeInterval, + ); + + let blocktime = runtime::get_blocktime(); + + if blocktime > BlockTime::new(last_distribution_time + time_interval) { + reset_remaining_requests(); + set_last_distribution_time(blocktime); + } + + if caller == installer { + let target: AccountHash = runtime::get_named_arg(ARG_TARGET); + // the authorized caller or the installer may pass an explicit amount. + // if they do not, the faucet can calculate an amount for them. + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + if target == installer { + runtime::revert(FaucetError::InstallerDoesNotFundItself); + } + + transfer(target, amount, id); + } else if let Some(authorized_account) = maybe_authorized_account { + if caller == authorized_account { + let target: AccountHash = runtime::get_named_arg(ARG_TARGET); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + if target == installer { + runtime::revert(FaucetError::AuthorizedAccountDoesNotFundInstaller); + } + + transfer(target, amount, id); + } else { + runtime::revert(FaucetError::FaucetCallByUserWithAuthorizedAccountSet); + } + } else { + let amount = get_distribution_amount_rate_limited(); + + transfer(caller, amount, id); + decrease_remaining_requests(); + } +} + +fn transfer(target: AccountHash, amount: U512, id: Option) { + let faucet_purse = get_uref_with_user_errors( + FAUCET_PURSE, + FaucetError::MissingFaucetPurse, + FaucetError::InvalidFaucetPurse, + ); + + system::transfer_from_purse_to_account(faucet_purse, target, amount, id) + .unwrap_or_revert_with(FaucetError::FailedToTransfer); +} + +fn get_distribution_amount_rate_limited() -> U512 { + let distributions_per_interval_uref = get_uref_with_user_errors( + DISTRIBUTIONS_PER_INTERVAL, + FaucetError::MissingDistributionsPerInterval, + FaucetError::InvalidDistributionsPerInterval, + ); - let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + let distributions_per_interval: u64 = read_with_user_errors( + distributions_per_interval_uref, + FaucetError::MissingDistributionsPerInterval, + FaucetError::InvalidDistributionsPerInterval, + ); - // Maybe we will decide to allow multiple funds up until some maximum value. - let already_funded = runtime::get_key(&account_hash.to_formatted_string()).is_some(); + if distributions_per_interval == 0 { + return U512::zero(); + } + + let available_amount_uref = get_uref_with_user_errors( + AVAILABLE_AMOUNT, + FaucetError::MissingAvailableAmount, + FaucetError::InvalidAvailableAmount, + ); + + let available_amount: U512 = read_with_user_errors( + available_amount_uref, + FaucetError::MissingAvailableAmount, + FaucetError::InvalidAvailableAmount, + ); + + if available_amount.is_zero() { + return available_amount; + } + + let remaining_requests_uref = get_uref_with_user_errors( + REMAINING_REQUESTS, + FaucetError::MissingRemainingRequests, + FaucetError::InvalidRemainingRequests, + ); + + let remaining_requests: U512 = read_with_user_errors( + remaining_requests_uref, + FaucetError::MissingRemainingRequests, + FaucetError::InvalidRemainingRequests, + ); + + if remaining_requests.is_zero() { + return remaining_requests; + } + + available_amount / U512::from(distributions_per_interval) +} + +fn reset_remaining_requests() { + let distributions_per_interval_uref = get_uref_with_user_errors( + DISTRIBUTIONS_PER_INTERVAL, + FaucetError::MissingDistributionsPerInterval, + FaucetError::InvalidDistributionsPerInterval, + ); + + let distributions_per_interval: u64 = read_with_user_errors( + distributions_per_interval_uref, + FaucetError::MissingDistributionsPerInterval, + FaucetError::InvalidDistributionsPerInterval, + ); - if already_funded { - runtime::revert(ApiError::User(CustomError::AlreadyFunded as u16)); + let remaining_requests_uref = get_uref_with_user_errors( + REMAINING_REQUESTS, + FaucetError::MissingRemainingRequests, + FaucetError::InvalidRemainingRequests, + ); + + storage::write( + remaining_requests_uref, + U512::from(distributions_per_interval), + ); +} + +fn decrease_remaining_requests() -> U512 { + let remaining_requests_uref = get_uref_with_user_errors( + REMAINING_REQUESTS, + FaucetError::MissingRemainingRequests, + FaucetError::InvalidRemainingRequests, + ); + + let remaining_requests: U512 = read_with_user_errors( + remaining_requests_uref, + FaucetError::MissingRemainingRequests, + FaucetError::InvalidRemainingRequests, + ); + + let new_remaining_requests = remaining_requests.saturating_sub(1.into()); + storage::write(remaining_requests_uref, new_remaining_requests); + + new_remaining_requests +} + +fn set_last_distribution_time(t: BlockTime) { + let last_distribution_time_uref = get_uref_with_user_errors( + LAST_DISTRIBUTION_TIME, + FaucetError::MissingDistributionTime, + FaucetError::InvalidDistributionTime, + ); + + storage::write::(last_distribution_time_uref, t.into()); +} + +fn get_named_arg_size(name: &str) -> Option { + let mut arg_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_named_arg_size( + name.as_bytes().as_ptr(), + name.len(), + &mut arg_size as *mut usize, + ) + }; + match api_error::result_from(ret) { + Ok(_) => Some(arg_size), + Err(ApiError::MissingArgument) => None, + Err(e) => runtime::revert(e), + } +} + +fn get_optional_named_arg_with_user_errors( + name: &str, + missing: FaucetError, + invalid: FaucetError, +) -> Option { + match get_named_arg_with_user_errors(name, missing, invalid) { + Ok(val) => val, + Err(err @ FaucetError::InvalidId) => runtime::revert(err), + Err(_) => None, + } +} + +fn get_named_arg_with_user_errors( + name: &str, + missing: FaucetError, + invalid: FaucetError, +) -> Result { + let arg_size = get_named_arg_size(name).ok_or(missing)?; + let arg_bytes = if arg_size > 0 { + let res = { + let data_non_null_ptr = contract_api::alloc_bytes(arg_size); + let ret = unsafe { + ext_ffi::casper_get_named_arg( + name.as_bytes().as_ptr(), + name.len(), + data_non_null_ptr.as_ptr(), + arg_size, + ) + }; + let data = + unsafe { Vec::from_raw_parts(data_non_null_ptr.as_ptr(), arg_size, arg_size) }; + api_error::result_from(ret).map(|_| data) + }; + // Assumed to be safe as `get_named_arg_size` checks the argument already + res.unwrap_or_revert_with(FaucetError::FailedToGetArgBytes) } else { - system::transfer_to_account(account_hash, amount, None).unwrap_or_revert(); - // Transfer successful; Store the fact of funding in the local state. - runtime::put_key( - &account_hash.to_formatted_string(), - Key::URef(storage::new_uref(())), + // Avoids allocation with 0 bytes and a call to get_named_arg + Vec::new() + }; + + bytesrepr::deserialize(arg_bytes).map_err(|_| invalid) +} + +fn get_account_hash_with_user_errors( + name: &str, + missing: FaucetError, + invalid: FaucetError, +) -> AccountHash { + let key = get_key_with_user_errors(name, missing, invalid); + key.into_account() + .unwrap_or_revert_with(FaucetError::UnexpectedKeyVariant) +} + +fn get_uref_with_user_errors(name: &str, missing: FaucetError, invalid: FaucetError) -> URef { + let key = get_key_with_user_errors(name, missing, invalid); + key.into_uref() + .unwrap_or_revert_with(FaucetError::UnexpectedKeyVariant) +} + +fn get_key_with_user_errors(name: &str, missing: FaucetError, invalid: FaucetError) -> Key { + let (name_ptr, name_size, _bytes) = to_ptr(name); + let mut key_bytes = vec![0u8; Key::max_serialized_length()]; + let mut total_bytes: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_key( + name_ptr, + name_size, + key_bytes.as_mut_ptr(), + key_bytes.len(), + &mut total_bytes as *mut usize, ) + }; + match api_error::result_from(ret) { + Ok(_) => {} + Err(ApiError::MissingKey) => runtime::revert(missing), + Err(e) => runtime::revert(e), } + key_bytes.truncate(total_bytes); + + bytesrepr::deserialize(key_bytes).unwrap_or_revert_with(invalid) +} + +fn read_with_user_errors( + uref: URef, + missing: FaucetError, + invalid: FaucetError, +) -> T { + let key: Key = uref.into(); + let (key_ptr, key_size, _bytes) = to_ptr(key); + + let value_size = { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr()) }; + match api_error::result_from(ret) { + Ok(_) => unsafe { value_size.assume_init() }, + Err(ApiError::ValueNotFound) => runtime::revert(missing), + Err(e) => runtime::revert(e), + } + }; + + let value_bytes = read_host_buffer(value_size).unwrap_or_revert(); + + bytesrepr::deserialize(value_bytes).unwrap_or_revert_with(invalid) +} + +fn read_host_buffer_into(dest: &mut [u8]) -> Result { + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr()) + }; + // NOTE: When rewriting below expression as `result_from(ret).map(|_| unsafe { ... })`, and the + // caller ignores the return value, execution of the contract becomes unstable and ultimately + // leads to `Unreachable` error. + api_error::result_from(ret)?; + Ok(unsafe { bytes_written.assume_init() }) +} + +fn read_host_buffer(size: usize) -> Result, ApiError> { + let mut dest: Vec = if size == 0 { + Vec::new() + } else { + let bytes_non_null_ptr = contract_api::alloc_bytes(size); + unsafe { Vec::from_raw_parts(bytes_non_null_ptr.as_ptr(), size, size) } + }; + read_host_buffer_into(&mut dest)?; + Ok(dest) +} + +fn to_ptr(t: T) -> (*const u8, usize, Vec) { + let bytes = t.into_bytes().unwrap_or_revert(); + let ptr = bytes.as_ptr(); + let size = bytes.len(); + (ptr, size, bytes) } diff --git a/smart_contracts/contracts/nctl/nctl-dictionary/Cargo.toml b/smart_contracts/contracts/nctl/nctl-dictionary/Cargo.toml new file mode 100644 index 0000000000..11d7d3b596 --- /dev/null +++ b/smart_contracts/contracts/nctl/nctl-dictionary/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "nctl-dictionary" +version = "0.1.0" +edition = "2018" + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/nctl/nctl-dictionary/src/main.rs b/smart_contracts/contracts/nctl/nctl-dictionary/src/main.rs new file mode 100644 index 0000000000..5ce10de485 --- /dev/null +++ b/smart_contracts/contracts/nctl/nctl-dictionary/src/main.rs @@ -0,0 +1,17 @@ +#![no_std] +#![no_main] + +#[cfg(not(target_arch = "wasm32"))] +compile_error!("target arch should be wasm32: compile with '--target wasm32-unknown-unknown'"); + +// We need to explicitly import the std alloc crate and `alloc::string::String` as we're in a +// `no_std` environment. +extern crate alloc; + +use casper_contract::{contract_api::storage, unwrap_or_revert::UnwrapOrRevert}; + +#[no_mangle] +pub extern "C" fn call() { + let seed_uref = storage::new_dictionary("nctl_dictionary").unwrap_or_revert(); + storage::dictionary_put(seed_uref, "foo", 1u64); +} diff --git a/smart_contracts/contracts/profiling/host-function-metrics/Cargo.toml b/smart_contracts/contracts/profiling/host-function-metrics/Cargo.toml index 3648479a10..86f9d39335 100644 --- a/smart_contracts/contracts/profiling/host-function-metrics/Cargo.toml +++ b/smart_contracts/contracts/profiling/host-function-metrics/Cargo.toml @@ -2,7 +2,7 @@ name = "host-function-metrics" version = "0.1.0" authors = ["Fraser Hutchison "] -edition = "2018" +edition = "2021" [lib] crate-type = ["cdylib"] @@ -12,7 +12,6 @@ test = false [features] default = ["casper-contract/test-support", "rand/small_rng"] -std = ["casper-contract/std", "casper-types/std"] [dependencies] casper-contract = { path = "../../../contract" } diff --git a/smart_contracts/contracts/profiling/host-function-metrics/src/lib.rs b/smart_contracts/contracts/profiling/host-function-metrics/src/lib.rs index 93979a9041..6190d6ff2d 100644 --- a/smart_contracts/contracts/profiling/host-function-metrics/src/lib.rs +++ b/smart_contracts/contracts/profiling/host-function-metrics/src/lib.rs @@ -2,8 +2,8 @@ extern crate alloc; -use alloc::{boxed::Box, string::String, vec, vec::Vec}; -use core::iter::{self, FromIterator}; +use alloc::{boxed::Box, collections::BTreeMap, string::String, vec, vec::Vec}; +use core::iter; use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng}; @@ -14,9 +14,9 @@ use casper_contract::{ use casper_types::{ account::{AccountHash, ActionType, Weight}, bytesrepr::Bytes, - contracts::NamedKeys, - runtime_args, ApiError, BlockTime, CLType, CLValue, ContractHash, ContractVersion, EntryPoint, - EntryPointAccess, EntryPointType, EntryPoints, Key, Parameter, Phase, RuntimeArgs, U512, + contracts::{ContractHash, ContractVersion, NamedKeys}, + runtime_args, ApiError, BlockTime, CLType, CLValue, EntityEntryPoint, EntryPointAccess, + EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, Phase, U512, }; const MIN_FUNCTION_NAME_LENGTH: usize = 1; @@ -24,8 +24,6 @@ const MAX_FUNCTION_NAME_LENGTH: usize = 100; const NAMED_KEY_COUNT: usize = 100; const MIN_NAMED_KEY_NAME_LENGTH: usize = 10; -// TODO - consider increasing to e.g. 1_000 once https://casperlabs.atlassian.net/browse/EE-966 is -// resolved. const MAX_NAMED_KEY_NAME_LENGTH: usize = 100; const VALUE_FOR_ADDITION_1: u64 = 1; const VALUE_FOR_ADDITION_2: u64 = 2; @@ -68,9 +66,9 @@ fn create_random_names(rng: &mut SmallRng) -> impl Iterator + '_ fn truncate_named_keys(named_keys: NamedKeys, rng: &mut SmallRng) -> NamedKeys { let truncated_len = rng.gen_range(1..=named_keys.len()); - let mut vec = named_keys.into_iter().collect::>(); + let mut vec = named_keys.into_inner().into_iter().collect::>(); vec.truncate(truncated_len); - vec.into_iter().collect() + NamedKeys::from(vec.into_iter().collect::>()) } // Executes the named key functions from the `runtime` module and most of the functions from the @@ -125,16 +123,16 @@ fn small_function() { #[no_mangle] pub extern "C" fn call() { let seed: u64 = runtime::get_named_arg(ARG_SEED); - let (random_bytes, source_account, destination_account): (Vec, AccountHash, AccountHash) = + let (random_bytes, source_account, destination_account): (Bytes, AccountHash, AccountHash) = runtime::get_named_arg(ARG_OTHERS); + let random_bytes: Vec = random_bytes.into(); // ========== storage, execution and upgrading of contracts ==================================== // Store large function with no named keys, then execute it to get named keys returned. let mut rng = SmallRng::seed_from_u64(seed); - let large_function_name = String::from_iter( - iter::repeat('l').take(rng.gen_range(MIN_FUNCTION_NAME_LENGTH..=MAX_FUNCTION_NAME_LENGTH)), - ); + let large_function_name: String = + "l".repeat(rng.gen_range(MIN_FUNCTION_NAME_LENGTH..=MAX_FUNCTION_NAME_LENGTH)); let entry_point_name = &large_function_name; let runtime_args = runtime_args! { @@ -149,15 +147,13 @@ pub extern "C" fn call() { let (contract_hash, _contract_version) = store_function(entry_point_name, Some(named_keys.clone())); // Store large function with 10 named keys, then execute it. - runtime::call_contract::(contract_hash, entry_point_name, runtime_args); + runtime::call_contract::(contract_hash, entry_point_name, runtime_args.clone()); // Small function - let small_function_name = String::from_iter( - iter::repeat('s').take(rng.gen_range(MIN_FUNCTION_NAME_LENGTH..=MAX_FUNCTION_NAME_LENGTH)), - ); + let small_function_name = + "s".repeat(rng.gen_range(MIN_FUNCTION_NAME_LENGTH..=MAX_FUNCTION_NAME_LENGTH)); let entry_point_name = &small_function_name; - let runtime_args = runtime_args! {}; // Store small function with no named keys, then execute it. let (contract_hash, _contract_version) = @@ -222,7 +218,7 @@ fn store_function( let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( entry_point_name, vec![ Parameter::new(ARG_SEED, CLType::U64), @@ -230,438 +226,1079 @@ fn store_function( ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - storage::new_contract(entry_points, named_keys, None, None) -} - -#[rustfmt::skip] #[no_mangle] pub extern "C" fn s() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssssssss() { small_function() -} -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + storage::new_contract(entry_points, named_keys, None, None, None) +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn s() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssss() { + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssss() { + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssss() { + small_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssss() { + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] + +#[rustfmt::skip] +#[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn -sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn +sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn -ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn +ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { + small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { -small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + small_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn l() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn ll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllllllll() { large_function() -} -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn l() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn ll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllll() { + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllll() { + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllll() { + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] + +#[rustfmt::skip] +#[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() +} + #[rustfmt::skip] -#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } + #[rustfmt:: -skip] #[no_mangle] pub extern "C" fn +skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn -lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn +lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn -llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn +llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { + large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { -large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + large_function() +} + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } -#[rustfmt::skip] #[no_mangle] pub extern "C" fn + +#[rustfmt::skip] +#[no_mangle] +pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() } diff --git a/smart_contracts/contracts/profiling/simple-transfer/Cargo.toml b/smart_contracts/contracts/profiling/simple-transfer/Cargo.toml deleted file mode 100644 index 4190ab8830..0000000000 --- a/smart_contracts/contracts/profiling/simple-transfer/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "simple-transfer" -version = "0.1.0" -authors = ["Fraser Hutchison "] -edition = "2018" - -[[bin]] -name = "simple_transfer" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -std = ["casper-contract/std", "casper-types/std"] - -[dependencies] -casper-contract = { path = "../../../contract" } -casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/profiling/simple-transfer/src/main.rs b/smart_contracts/contracts/profiling/simple-transfer/src/main.rs deleted file mode 100644 index 6c790a31aa..0000000000 --- a/smart_contracts/contracts/profiling/simple-transfer/src/main.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![no_std] -#![no_main] - -use casper_contract::{ - contract_api::{runtime, system}, - unwrap_or_revert::UnwrapOrRevert, -}; -use casper_types::{account::AccountHash, ApiError, TransferredTo, U512}; - -const ARG_ACCOUNT_HASH: &str = "account_hash"; -const ARG_AMOUNT: &str = "amount"; - -#[repr(u16)] -enum Error { - NonExistentAccount = 0, -} - -#[no_mangle] -pub extern "C" fn call() { - let account_hash: AccountHash = runtime::get_named_arg(ARG_ACCOUNT_HASH); - let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); - match system::transfer_to_account(account_hash, amount, None).unwrap_or_revert() { - TransferredTo::NewAccount => { - runtime::revert(ApiError::User(Error::NonExistentAccount as u16)) - } - TransferredTo::ExistingAccount => (), - } -} diff --git a/smart_contracts/contracts/profiling/state-initializer/Cargo.toml b/smart_contracts/contracts/profiling/state-initializer/Cargo.toml index 8b217d18d4..2fa05a3829 100644 --- a/smart_contracts/contracts/profiling/state-initializer/Cargo.toml +++ b/smart_contracts/contracts/profiling/state-initializer/Cargo.toml @@ -2,7 +2,7 @@ name = "state-initializer" version = "0.1.0" authors = ["Fraser Hutchison "] -edition = "2018" +edition = "2021" [[bin]] name = "state_initializer" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/add-associated-key/Cargo.toml b/smart_contracts/contracts/test/add-associated-key/Cargo.toml new file mode 100644 index 0000000000..85099960ae --- /dev/null +++ b/smart_contracts/contracts/test/add-associated-key/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "add-associated-key" +version = "0.1.0" +authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "add_gas_subcall" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/add-gas-subcall/src/main.rs b/smart_contracts/contracts/test/add-gas-subcall/src/main.rs index 784c2f2cce..a609df5bd7 100644 --- a/smart_contracts/contracts/test/add-gas-subcall/src/main.rs +++ b/smart_contracts/contracts/test/add-gas-subcall/src/main.rs @@ -9,52 +9,62 @@ use alloc::string::String; use casper_contract::contract_api::{runtime, storage}; use casper_types::{ - runtime_args, ApiError, CLType, ContractHash, ContractVersion, EntryPoint, EntryPointAccess, - EntryPointType, EntryPoints, Parameter, RuntimeArgs, + contracts::{ContractHash, ContractVersion}, + runtime_args, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, Parameter, }; -// This is making use of the undocumented "FFI" function `gas()` which is used by the Wasm -// interpreter to charge gas for upcoming interpreted instructions. For further info on this, see -// https://docs.rs/pwasm-utils/0.12.0/pwasm_utils/fn.inject_gas_counter.html -mod unsafe_ffi { - extern "C" { - pub fn gas(amount: i32); - } -} - -fn safe_gas(amount: i32) { - unsafe { unsafe_ffi::gas(amount) } -} - const SUBCALL_NAME: &str = "add_gas"; +const DATA_KEY: &str = "data"; const ADD_GAS_FROM_SESSION: &str = "add-gas-from-session"; const ADD_GAS_VIA_SUBCALL: &str = "add-gas-via-subcall"; const ARG_GAS_AMOUNT: &str = "gas_amount"; const ARG_METHOD_NAME: &str = "method_name"; +/// This should consume at least `amount * gas_per_byte + C` gas +/// where C contains wasm overhead and host function calls. +fn consume_at_least_gas_amount(amount: usize) { + if amount > 0 { + let data_uref = match runtime::get_key(DATA_KEY) { + Some(Key::URef(uref)) => uref, + Some(_key) => runtime::revert(ApiError::UnexpectedKeyVariant), + None => { + let uref = storage::new_uref(()); + runtime::put_key(DATA_KEY, uref.into()); + uref + } + }; + + let data = vec![0; amount]; + storage::write(data_uref, data); + } +} + #[no_mangle] pub extern "C" fn add_gas() { - let amount: i32 = runtime::get_named_arg(ARG_GAS_AMOUNT); - safe_gas(amount); + let amount: u32 = runtime::get_named_arg(ARG_GAS_AMOUNT); + + consume_at_least_gas_amount(amount as usize); } fn store() -> (ContractHash, ContractVersion) { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( SUBCALL_NAME, vec![Parameter::new(ARG_GAS_AMOUNT, CLType::I32)], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - storage::new_contract(entry_points, None, None, None) + storage::new_contract(entry_points, None, None, None, None) } #[no_mangle] @@ -63,7 +73,7 @@ pub extern "C" fn call() { let method_name: String = runtime::get_named_arg(ARG_METHOD_NAME); match method_name.as_str() { - ADD_GAS_FROM_SESSION => safe_gas(amount), + ADD_GAS_FROM_SESSION => consume_at_least_gas_amount(amount as usize), ADD_GAS_VIA_SUBCALL => { let (contract_hash, _contract_version) = store(); runtime::call_contract( diff --git a/smart_contracts/contracts/test/add-update-associated-key/Cargo.toml b/smart_contracts/contracts/test/add-update-associated-key/Cargo.toml index 5d9ccf0bd8..23135e8c9b 100644 --- a/smart_contracts/contracts/test/add-update-associated-key/Cargo.toml +++ b/smart_contracts/contracts/test/add-update-associated-key/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "add-update-associated-key" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "auction_bidding" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/auction-bidding/src/main.rs b/smart_contracts/contracts/test/auction-bidding/src/main.rs index 588eda171a..5877e88c92 100644 --- a/smart_contracts/contracts/test/auction-bidding/src/main.rs +++ b/smart_contracts/contracts/test/auction-bidding/src/main.rs @@ -12,8 +12,8 @@ use casper_contract::{ }; use casper_types::{ - account::AccountHash, runtime_args, system::auction, ApiError, ContractHash, PublicKey, - RuntimeArgs, U512, + account::AccountHash, contracts::ContractHash, runtime_args, system::auction, ApiError, + PublicKey, U512, }; const ARG_AMOUNT: &str = "amount"; diff --git a/smart_contracts/contracts/test/auction-bids/Cargo.toml b/smart_contracts/contracts/test/auction-bids/Cargo.toml index 7c2ed47a52..7b9ffb6569 100644 --- a/smart_contracts/contracts/test/auction-bids/Cargo.toml +++ b/smart_contracts/contracts/test/auction-bids/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "auction-bids" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "auction_bids" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/auction-bids/src/main.rs b/smart_contracts/contracts/test/auction-bids/src/main.rs index 0762a25f3c..087425f661 100644 --- a/smart_contracts/contracts/test/auction-bids/src/main.rs +++ b/smart_contracts/contracts/test/auction-bids/src/main.rs @@ -3,17 +3,17 @@ extern crate alloc; -use alloc::{collections::BTreeMap, string::String}; +use alloc::string::String; use casper_contract::contract_api::{runtime, system}; use casper_types::{ runtime_args, system::auction::{ - ARG_DELEGATOR, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_REWARD_FACTORS, ARG_VALIDATOR, - METHOD_DELEGATE, METHOD_DISTRIBUTE, METHOD_RUN_AUCTION, METHOD_UNDELEGATE, + ARG_DELEGATOR, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_VALIDATOR, METHOD_DELEGATE, + METHOD_DISTRIBUTE, METHOD_RUN_AUCTION, METHOD_UNDELEGATE, }, - ApiError, PublicKey, RuntimeArgs, U512, + ApiError, PublicKey, U512, }; const ARG_ENTRY_POINT: &str = "entry_point"; @@ -82,9 +82,9 @@ fn run_auction() { fn distribute() { let auction = system::get_auction(); - let reward_factors: BTreeMap = runtime::get_named_arg(ARG_REWARD_FACTORS); + let proposer: PublicKey = runtime::get_named_arg(ARG_VALIDATOR); let args = runtime_args! { - ARG_REWARD_FACTORS => reward_factors + ARG_VALIDATOR => proposer }; runtime::call_contract::<()>(auction, METHOD_DISTRIBUTE, args); } diff --git a/smart_contracts/contracts/test/authorized-keys/Cargo.toml b/smart_contracts/contracts/test/authorized-keys/Cargo.toml deleted file mode 100644 index 2708f35472..0000000000 --- a/smart_contracts/contracts/test/authorized-keys/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "authorized-keys" -version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" - -[[bin]] -name = "authorized_keys" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -std = ["casper-contract/std", "casper-types/std"] - -[dependencies] -casper-contract = { path = "../../../contract" } -casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/authorized-keys/src/main.rs b/smart_contracts/contracts/test/authorized-keys/src/main.rs deleted file mode 100644 index 51ec0164b8..0000000000 --- a/smart_contracts/contracts/test/authorized-keys/src/main.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![no_std] -#![no_main] - -use casper_contract::{ - contract_api::{account, runtime}, - unwrap_or_revert::UnwrapOrRevert, -}; -use casper_types::{ - account::{AccountHash, ActionType, AddKeyFailure, Weight}, - ApiError, -}; - -const ARG_KEY_MANAGEMENT_THRESHOLD: &str = "key_management_threshold"; -const ARG_DEPLOY_THRESHOLD: &str = "deploy_threshold"; - -#[no_mangle] -pub extern "C" fn call() { - match account::add_associated_key(AccountHash::new([123; 32]), Weight::new(100)) { - Err(AddKeyFailure::DuplicateKey) => {} - Err(_) => runtime::revert(ApiError::User(50)), - Ok(_) => {} - }; - - let key_management_threshold: Weight = runtime::get_named_arg(ARG_KEY_MANAGEMENT_THRESHOLD); - let deploy_threshold: Weight = runtime::get_named_arg(ARG_DEPLOY_THRESHOLD); - - if key_management_threshold != Weight::new(0) { - account::set_action_threshold(ActionType::KeyManagement, key_management_threshold) - .unwrap_or_revert() - } - - if deploy_threshold != Weight::new(0) { - account::set_action_threshold(ActionType::Deployment, deploy_threshold).unwrap_or_revert() - } -} diff --git a/smart_contracts/contracts/test/blake2b/Cargo.toml b/smart_contracts/contracts/test/blake2b/Cargo.toml index 17f4447609..5f2c06d8a5 100644 --- a/smart_contracts/contracts/test/blake2b/Cargo.toml +++ b/smart_contracts/contracts/test/blake2b/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "blake2b" -version = "0.1.0" +version = "0.8.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "blake2b" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-context/Cargo.toml b/smart_contracts/contracts/test/contract-context/Cargo.toml index 5f6bd69a3b..7cc88e92d0 100644 --- a/smart_contracts/contracts/test/contract-context/Cargo.toml +++ b/smart_contracts/contracts/test/contract-context/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "contract-context" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "contract_context" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-context/src/main.rs b/smart_contracts/contracts/test/contract-context/src/main.rs index 24c9153965..b230d7f6eb 100644 --- a/smart_contracts/contracts/test/contract-context/src/main.rs +++ b/smart_contracts/contracts/test/contract-context/src/main.rs @@ -3,18 +3,16 @@ extern crate alloc; -use alloc::{string::ToString, vec::Vec}; +use alloc::{collections::BTreeMap, string::ToString, vec::Vec}; use casper_contract::{ contract_api::{runtime, storage}, unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::{ - EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, NamedKeys, - CONTRACT_INITIAL_VERSION, - }, - runtime_args, CLType, ContractHash, ContractPackageHash, ContractVersion, Key, RuntimeArgs, + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + contracts::{ContractHash, ContractPackageHash, ContractVersion, NamedKeys}, + runtime_args, AddressableEntityHash, CLType, EntryPointPayment, Key, ENTITY_INITIAL_VERSION, }; const PACKAGE_HASH_KEY: &str = "package_hash_key"; @@ -44,12 +42,12 @@ pub extern "C" fn contract_code_test() { pub extern "C" fn session_code_caller_as_session() { let contract_package_hash = runtime::get_key(PACKAGE_HASH_KEY) .expect("should have contract package key") - .into_hash() + .into_entity_hash_addr() .unwrap_or_revert(); runtime::call_versioned_contract::<()>( contract_package_hash.into(), - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), SESSION_CODE, runtime_args! {}, ); @@ -65,14 +63,14 @@ pub extern "C" fn add_new_key() { pub extern "C" fn add_new_key_as_session() { let contract_package_hash = runtime::get_key(PACKAGE_HASH_KEY) .expect("should have package hash") - .into_hash() + .into_entity_hash_addr() .unwrap_or_revert() .into(); assert!(runtime::get_key(NEW_KEY).is_none()); runtime::call_versioned_contract::<()>( contract_package_hash, - Some(CONTRACT_INITIAL_VERSION), + Some(ENTITY_INITIAL_VERSION), "add_new_key", runtime_args! {}, ); @@ -82,10 +80,10 @@ pub extern "C" fn add_new_key_as_session() { #[no_mangle] pub extern "C" fn session_code_caller_as_contract() { let contract_package_key: Key = runtime::get_named_arg(PACKAGE_HASH_KEY); - let contract_package_hash = contract_package_key.into_hash().unwrap_or_revert().into(); + let contract_package_hash = contract_package_key.into_package_hash().unwrap_or_revert(); runtime::call_versioned_contract::<()>( - contract_package_hash, - Some(CONTRACT_INITIAL_VERSION), + contract_package_hash.into(), + Some(ENTITY_INITIAL_VERSION), SESSION_CODE, runtime_args! {}, ); @@ -93,59 +91,27 @@ pub extern "C" fn session_code_caller_as_contract() { fn create_entrypoints_1() -> EntryPoints { let mut entry_points = EntryPoints::new(); - let session_code_test = EntryPoint::new( - SESSION_CODE.to_string(), - Vec::new(), - CLType::I32, - EntryPointAccess::Public, - EntryPointType::Session, - ); - entry_points.add_entry_point(session_code_test); - let contract_code_test = EntryPoint::new( + let contract_code_test = EntityEntryPoint::new( CONTRACT_CODE.to_string(), Vec::new(), CLType::I32, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(contract_code_test); - let session_code_caller_as_session = EntryPoint::new( - "session_code_caller_as_session".to_string(), - Vec::new(), - CLType::I32, - EntryPointAccess::Public, - EntryPointType::Session, - ); - entry_points.add_entry_point(session_code_caller_as_session); - - let session_code_caller_as_contract = EntryPoint::new( + let session_code_caller_as_contract = EntityEntryPoint::new( "session_code_caller_as_contract".to_string(), Vec::new(), CLType::I32, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(session_code_caller_as_contract); - let add_new_key = EntryPoint::new( - "add_new_key".to_string(), - Vec::new(), - CLType::I32, - EntryPointAccess::Public, - EntryPointType::Session, - ); - entry_points.add_entry_point(add_new_key); - let add_new_key_as_session = EntryPoint::new( - "add_new_key_as_session".to_string(), - Vec::new(), - CLType::I32, - EntryPointAccess::Public, - EntryPointType::Session, - ); - entry_points.add_entry_point(add_new_key_as_session); - entry_points } @@ -159,7 +125,12 @@ fn install_version_1(package_hash: ContractPackageHash) -> (ContractHash, Contra }; let entry_points = create_entrypoints_1(); - storage::add_contract_version(package_hash, entry_points, contract_named_keys) + storage::add_contract_version( + package_hash, + entry_points, + contract_named_keys, + BTreeMap::new(), + ) } #[no_mangle] @@ -171,5 +142,8 @@ pub extern "C" fn call() { runtime::put_key(PACKAGE_ACCESS_KEY, access_uref.into()); let (contract_hash, contract_version) = install_version_1(contract_package_hash); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(CONTRACT_HASH_KEY, Key::Hash(contract_hash.value())); + runtime::put_key( + CONTRACT_HASH_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/contract-funds-call/Cargo.toml b/smart_contracts/contracts/test/contract-funds-call/Cargo.toml new file mode 100644 index 0000000000..6f2afd7327 --- /dev/null +++ b/smart_contracts/contracts/test/contract-funds-call/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "contract-funds-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "contract_funds_call" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-funds-call/src/main.rs b/smart_contracts/contracts/test/contract-funds-call/src/main.rs new file mode 100644 index 0000000000..e4e25c01f7 --- /dev/null +++ b/smart_contracts/contracts/test/contract-funds-call/src/main.rs @@ -0,0 +1,41 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; + +use casper_types::{contracts::ContractHash, Key, RuntimeArgs, URef, U512}; + +const GET_PAYMENT_PURSE_NAME: &str = "get_payment_purse"; +const HASH_KEY_NAME: &str = "contract_own_funds_hash"; +const ARG_AMOUNT: &str = "amount"; + +fn get_payment_purse() -> URef { + let contract_hash = get_entity_hash_name(); + runtime::call_contract( + contract_hash, + GET_PAYMENT_PURSE_NAME, + RuntimeArgs::default(), + ) +} + +fn get_entity_hash_name() -> ContractHash { + runtime::get_key(HASH_KEY_NAME) + .and_then(Key::into_entity_hash_addr) + .map(ContractHash::new) + .unwrap_or_revert() +} + +#[no_mangle] +pub extern "C" fn call() { + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + let payment_purse = get_payment_purse(); + + system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/contract-funds/Cargo.toml b/smart_contracts/contracts/test/contract-funds/Cargo.toml new file mode 100644 index 0000000000..63f93e7626 --- /dev/null +++ b/smart_contracts/contracts/test/contract-funds/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "contract-funds" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "contract_funds" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-funds/src/main.rs b/smart_contracts/contracts/test/contract-funds/src/main.rs new file mode 100644 index 0000000000..bd6c7a66ca --- /dev/null +++ b/smart_contracts/contracts/test/contract-funds/src/main.rs @@ -0,0 +1,77 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{string::ToString, vec}; + +use casper_contract::{ + contract_api::{runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; + +use casper_types::{ + account::AccountHash, + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, Parameter}, + contracts::NamedKeys, + AddressableEntityHash, CLTyped, CLValue, EntryPointPayment, EntryPoints, Key, URef, +}; + +const GET_PAYMENT_PURSE_NAME: &str = "get_payment_purse"; +const PACKAGE_HASH_KEY_NAME: &str = "contract_own_funds"; +const HASH_KEY_NAME: &str = "contract_own_funds_hash"; +const ACCESS_KEY_NAME: &str = "contract_own_funds_access"; +const ARG_TARGET: &str = "target"; +const CONTRACT_VERSION: &str = "contract_version"; +const PAYMENT_PURSE_KEY: &str = "payment_purse"; + +#[no_mangle] +pub extern "C" fn get_payment_purse() { + let purse_uref = runtime::get_key(PAYMENT_PURSE_KEY) + .and_then(Key::into_uref) + .unwrap_or_revert(); + + let attenuated_purse = purse_uref.into_add(); + + runtime::ret(CLValue::from_t(attenuated_purse).unwrap_or_revert()); +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + + let faucet_entrypoint = EntityEntryPoint::new( + GET_PAYMENT_PURSE_NAME.to_string(), + vec![Parameter::new(ARG_TARGET, AccountHash::cl_type())], + URef::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(faucet_entrypoint); + entry_points + }; + + let named_keys = { + let faucet_funds = system::create_purse(); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(PAYMENT_PURSE_KEY.to_string(), faucet_funds.into()); + named_keys + }; + + let (contract_hash, contract_version) = storage::new_contract( + entry_points, + Some(named_keys), + Some(PACKAGE_HASH_KEY_NAME.to_string()), + Some(ACCESS_KEY_NAME.to_string()), + None, + ); + runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); + runtime::put_key( + HASH_KEY_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/contract-messages-emitter/Cargo.toml b/smart_contracts/contracts/test/contract-messages-emitter/Cargo.toml new file mode 100644 index 0000000000..4594ca4c88 --- /dev/null +++ b/smart_contracts/contracts/test/contract-messages-emitter/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "contract-messages-emitter" +version = "0.1.0" +authors = ["Alexandru Sardan "] +edition = "2018" + +[[bin]] +name = "contract_messages_emitter" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-messages-emitter/src/main.rs b/smart_contracts/contracts/test/contract-messages-emitter/src/main.rs new file mode 100644 index 0000000000..552d57f4ad --- /dev/null +++ b/smart_contracts/contracts/test/contract-messages-emitter/src/main.rs @@ -0,0 +1,153 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; + +use casper_types::{ + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + api_error::ApiError, + contract_messages::MessageTopicOperation, + contracts::NamedKeys, + CLType, CLTyped, EntryPointPayment, Parameter, RuntimeArgs, +}; + +const ENTRY_POINT_INIT: &str = "init"; +const ENTRY_POINT_EMIT_MESSAGE: &str = "emit_message"; +const ENTRY_POINT_EMIT_MULTIPLE_MESSAGES: &str = "emit_multiple_messages"; +const ENTRY_POINT_ADD_TOPIC: &str = "add_topic"; +const MESSAGE_EMITTER_INITIALIZED: &str = "message_emitter_initialized"; +const ARG_MESSAGE_SUFFIX_NAME: &str = "message_suffix"; +const ARG_NUM_MESSAGES_TO_EMIT: &str = "num_messages_to_emit"; +const ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT: &str = "register_default_topic_with_init"; +const ARG_TOPIC_NAME: &str = "topic_name"; +const PACKAGE_HASH_KEY_NAME: &str = "messages_emitter_package_hash"; +const ACCESS_KEY_NAME: &str = "messages_emitter_access"; + +pub const MESSAGE_EMITTER_GENERIC_TOPIC: &str = "generic_messages"; +pub const MESSAGE_PREFIX: &str = "generic message: "; + +#[no_mangle] +pub extern "C" fn emit_message() { + let suffix: String = runtime::get_named_arg(ARG_MESSAGE_SUFFIX_NAME); + + runtime::emit_message( + MESSAGE_EMITTER_GENERIC_TOPIC, + &format!("{}{}", MESSAGE_PREFIX, suffix).into(), + ) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn emit_multiple_messages() { + let num_messages: u32 = runtime::get_named_arg(ARG_NUM_MESSAGES_TO_EMIT); + + for i in 0..num_messages { + runtime::emit_message( + MESSAGE_EMITTER_GENERIC_TOPIC, + &format!("{}{}", MESSAGE_PREFIX, i).into(), + ) + .unwrap_or_revert(); + } +} + +#[no_mangle] +pub extern "C" fn add_topic() { + let topic_name: String = runtime::get_named_arg(ARG_TOPIC_NAME); + + runtime::manage_message_topic(topic_name.as_str(), MessageTopicOperation::Add) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn init() { + if runtime::has_key(MESSAGE_EMITTER_INITIALIZED) { + runtime::revert(ApiError::User(0)); + } + + runtime::manage_message_topic(MESSAGE_EMITTER_GENERIC_TOPIC, MessageTopicOperation::Add) + .unwrap_or_revert(); + + runtime::put_key(MESSAGE_EMITTER_INITIALIZED, storage::new_uref(()).into()); +} + +#[no_mangle] +pub extern "C" fn call() { + let register_topic_with_init: bool = + runtime::get_named_arg(ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT); + + let mut emitter_entry_points = EntryPoints::new(); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_INIT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_EMIT_MESSAGE, + vec![Parameter::new(ARG_MESSAGE_SUFFIX_NAME, String::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_ADD_TOPIC, + vec![Parameter::new(ARG_TOPIC_NAME, String::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_EMIT_MULTIPLE_MESSAGES, + vec![Parameter::new(ARG_NUM_MESSAGES_TO_EMIT, u32::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + if register_topic_with_init { + let (stored_contract_hash, _contract_version) = storage::new_contract( + emitter_entry_points, + Some(NamedKeys::new()), + Some(PACKAGE_HASH_KEY_NAME.into()), + Some(ACCESS_KEY_NAME.into()), + None, + ); + + // Call contract to initialize it and register the default topic. + runtime::call_contract::<()>( + stored_contract_hash, + ENTRY_POINT_INIT, + RuntimeArgs::default(), + ); + } else { + let new_topics = BTreeMap::from([( + MESSAGE_EMITTER_GENERIC_TOPIC.to_string(), + MessageTopicOperation::Add, + )]); + // Register the default topic on contract creation and not through the initializer. + let (_stored_contract_hash, _contract_version) = storage::new_contract( + emitter_entry_points, + Some(NamedKeys::new()), + Some(PACKAGE_HASH_KEY_NAME.into()), + Some(ACCESS_KEY_NAME.into()), + Some(new_topics), + ); + } +} diff --git a/smart_contracts/contracts/test/contract-messages-from-account/Cargo.toml b/smart_contracts/contracts/test/contract-messages-from-account/Cargo.toml new file mode 100644 index 0000000000..a15fba294c --- /dev/null +++ b/smart_contracts/contracts/test/contract-messages-from-account/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "contract-messages-from-account" +version = "0.1.0" +authors = ["Alexandru Sardan "] +edition = "2018" + +[[bin]] +name = "contract_messages_from_account" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-messages-from-account/src/main.rs b/smart_contracts/contracts/test/contract-messages-from-account/src/main.rs new file mode 100644 index 0000000000..c582143431 --- /dev/null +++ b/smart_contracts/contracts/test/contract-messages-from-account/src/main.rs @@ -0,0 +1,15 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert}; + +use casper_types::contract_messages::MessageTopicOperation; + +const TOPIC_NAME: &str = "messages_topic"; + +#[no_mangle] +pub extern "C" fn call() { + runtime::manage_message_topic(TOPIC_NAME, MessageTopicOperation::Add).unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/contract-messages-upgrader/Cargo.toml b/smart_contracts/contracts/test/contract-messages-upgrader/Cargo.toml new file mode 100644 index 0000000000..ea8ab6abd0 --- /dev/null +++ b/smart_contracts/contracts/test/contract-messages-upgrader/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "contract-messages-upgrader" +version = "0.1.0" +authors = ["Alexandru Sardan "] +edition = "2018" + +[[bin]] +name = "contract_messages_upgrader" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract", features = ["test-support"] } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/contract-messages-upgrader/src/main.rs b/smart_contracts/contracts/test/contract-messages-upgrader/src/main.rs new file mode 100644 index 0000000000..697b889c1e --- /dev/null +++ b/smart_contracts/contracts/test/contract-messages-upgrader/src/main.rs @@ -0,0 +1,165 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; + +use casper_types::{ + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + api_error::ApiError, + contract_messages::MessageTopicOperation, + contracts::NamedKeys, + runtime_args, CLType, CLTyped, EntryPointPayment, PackageHash, Parameter, RuntimeArgs, +}; + +const ENTRY_POINT_INIT: &str = "init"; +const FIRST_VERSION_ENTRY_POINT_EMIT_MESSAGE: &str = "emit_message"; +const ENTRY_POINT_EMIT_MESSAGE: &str = "upgraded_emit_message"; +const ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION: &str = "emit_message_from_each_version"; +const UPGRADED_MESSAGE_EMITTER_INITIALIZED: &str = "upgraded_message_emitter_initialized"; +const ARG_MESSAGE_SUFFIX_NAME: &str = "message_suffix"; +const PACKAGE_HASH_KEY_NAME: &str = "messages_emitter_package_hash"; +const ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT: &str = "register_default_topic_with_init"; + +pub const MESSAGE_EMITTER_GENERIC_TOPIC: &str = "new_topic_after_upgrade"; +pub const MESSAGE_PREFIX: &str = "generic message: "; + +#[no_mangle] +pub extern "C" fn upgraded_emit_message() { + let suffix: String = runtime::get_named_arg(ARG_MESSAGE_SUFFIX_NAME); + + runtime::emit_message( + MESSAGE_EMITTER_GENERIC_TOPIC, + &format!("{}{}", MESSAGE_PREFIX, suffix).into(), + ) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn emit_message_from_each_version() { + let suffix: String = runtime::get_named_arg(ARG_MESSAGE_SUFFIX_NAME); + + let contract_package_hash: PackageHash = runtime::get_key(PACKAGE_HASH_KEY_NAME) + .expect("should have contract package key") + .into_package_addr() + .unwrap_or_revert() + .into(); + + // Emit a message from this contract. + runtime::emit_message( + MESSAGE_EMITTER_GENERIC_TOPIC, + &"emitting multiple messages".into(), + ) + .unwrap_or_revert(); + + // Call previous contract version which will emit a message. + runtime::call_package_version::<()>( + contract_package_hash.into(), + Some(2), + Some(1), + FIRST_VERSION_ENTRY_POINT_EMIT_MESSAGE, + runtime_args! { + ARG_MESSAGE_SUFFIX_NAME => suffix.clone(), + }, + ); + + // Emit another message from this version. + runtime::emit_message( + MESSAGE_EMITTER_GENERIC_TOPIC, + &format!("{}{}", MESSAGE_PREFIX, suffix).into(), + ) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn init() { + if runtime::has_key(UPGRADED_MESSAGE_EMITTER_INITIALIZED) { + runtime::revert(ApiError::User(0)); + } + + runtime::manage_message_topic(MESSAGE_EMITTER_GENERIC_TOPIC, MessageTopicOperation::Add) + .unwrap_or_revert(); + + runtime::put_key( + UPGRADED_MESSAGE_EMITTER_INITIALIZED, + storage::new_uref(()).into(), + ); +} + +#[no_mangle] +pub extern "C" fn call() { + let register_topic_with_init: bool = + runtime::get_named_arg(ARG_REGISTER_DEFAULT_TOPIC_WITH_INIT); + + let mut emitter_entry_points = EntryPoints::new(); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_INIT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_EMIT_MESSAGE, + vec![Parameter::new(ARG_MESSAGE_SUFFIX_NAME, String::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + emitter_entry_points.add_entry_point(EntityEntryPoint::new( + ENTRY_POINT_EMIT_MESSAGE_FROM_EACH_VERSION, + vec![Parameter::new(ARG_MESSAGE_SUFFIX_NAME, String::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let message_emitter_package_hash: PackageHash = runtime::get_key(PACKAGE_HASH_KEY_NAME) + .unwrap_or_revert() + .into_package_addr() + .unwrap_or_revert() + .into(); + + let mut named_keys = NamedKeys::new(); + named_keys.insert( + PACKAGE_HASH_KEY_NAME.into(), + message_emitter_package_hash.into(), + ); + + if register_topic_with_init { + let (contract_hash, _contract_version) = storage::add_contract_version( + message_emitter_package_hash.into(), + emitter_entry_points, + named_keys, + BTreeMap::new(), + ); + + // Call contract to initialize it + runtime::call_contract::<()>(contract_hash, ENTRY_POINT_INIT, RuntimeArgs::default()); + } else { + let new_topics = BTreeMap::from([( + MESSAGE_EMITTER_GENERIC_TOPIC.to_string(), + MessageTopicOperation::Add, + )]); + let (_contract_hash, _contract_version) = storage::add_contract_version( + message_emitter_package_hash.into(), + emitter_entry_points, + named_keys, + new_topics, + ); + } +} diff --git a/smart_contracts/contracts/test/counter-factory/Cargo.toml b/smart_contracts/contracts/test/counter-factory/Cargo.toml new file mode 100644 index 0000000000..2a882b8ba8 --- /dev/null +++ b/smart_contracts/contracts/test/counter-factory/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "counter-factory" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2018" + +[[bin]] +name = "counter_factory" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/counter-factory/src/main.rs b/smart_contracts/contracts/test/counter-factory/src/main.rs new file mode 100644 index 0000000000..e1b674513c --- /dev/null +++ b/smart_contracts/contracts/test/counter-factory/src/main.rs @@ -0,0 +1,174 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::{String, ToString}; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + addressable_entity::{EntityEntryPoint, EntryPoints, Parameters}, + bytesrepr::FromBytes, + contracts::NamedKeys, + ApiError, CLType, CLTyped, EntryPointAccess, EntryPointPayment, EntryPointType, Key, URef, + U512, +}; + +const ACCESS_KEY_NAME: &str = "factory_access"; +const ARG_INITIAL_VALUE: &str = "initial_value"; +const ARG_NAME: &str = "name"; +const CONTRACT_FACTORY_DEFAULT_ENTRY_POINT: &str = "contract_factory_default"; +const CONTRACT_FACTORY_ENTRY_POINT: &str = "contract_factory"; +const CONTRACT_VERSION: &str = "contract_version"; +const CURRENT_VALUE_KEY: &str = "current_value"; +const DECREASE_ENTRY_POINT: &str = "decrement"; +const HASH_KEY_NAME: &str = "factory_hash"; +const INCREASE_ENTRY_POINT: &str = "increment"; +const PACKAGE_HASH_KEY_NAME: &str = "factory_package_hash"; + +fn get_named_uref(name: &str) -> Result { + runtime::get_key(name) + .ok_or(ApiError::MissingKey)? + .into_uref() + .ok_or(ApiError::UnexpectedKeyVariant) +} + +fn read_uref(uref: URef) -> Result { + let value: T = storage::read(uref)?.ok_or(ApiError::ValueNotFound)?; + Ok(value) +} + +fn modify_counter(func: impl FnOnce(U512) -> U512) -> Result<(), ApiError> { + let current_value_uref = get_named_uref(CURRENT_VALUE_KEY)?; + let value: U512 = read_uref(current_value_uref)?; + let new_value = func(value); + storage::write(current_value_uref, new_value); + Ok(()) +} + +#[no_mangle] +pub extern "C" fn increment() { + modify_counter(|value| value + U512::one()).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn decrement() { + modify_counter(|value| value - U512::one()).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn contract_factory() { + let name: String = runtime::get_named_arg(ARG_NAME); + let initial_value: U512 = runtime::get_named_arg(ARG_INITIAL_VALUE); + installer(name, initial_value); +} + +#[no_mangle] +pub extern "C" fn contract_factory_default() { + let name: String = runtime::get_named_arg(ARG_NAME); + installer(name, U512::zero()); +} + +fn installer(name: String, initial_value: U512) { + let named_keys = { + let new_uref = storage::new_uref(initial_value); + let mut named_keys = NamedKeys::new(); + named_keys.insert(CURRENT_VALUE_KEY.to_string(), new_uref.into()); + named_keys + }; + + let entry_points = { + let mut entry_points = EntryPoints::new(); + let entry_point: EntityEntryPoint = EntityEntryPoint::new( + INCREASE_ENTRY_POINT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + let entry_point: EntityEntryPoint = EntityEntryPoint::new( + DECREASE_ENTRY_POINT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + entry_points + }; + + let (contract_hash, contract_version) = storage::new_contract( + entry_points, + Some(named_keys), + Some(PACKAGE_HASH_KEY_NAME.to_string()), + Some(ACCESS_KEY_NAME.to_string()), + None, + ); + + runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); + runtime::put_key(&name, Key::Hash(contract_hash.value())); +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + + let entry_point: EntityEntryPoint = EntityEntryPoint::new( + CONTRACT_FACTORY_ENTRY_POINT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Factory, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + let entry_point: EntityEntryPoint = EntityEntryPoint::new( + CONTRACT_FACTORY_DEFAULT_ENTRY_POINT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Factory, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + let entry_point: EntityEntryPoint = EntityEntryPoint::new( + INCREASE_ENTRY_POINT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Template, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + let entry_point: EntityEntryPoint = EntityEntryPoint::new( + DECREASE_ENTRY_POINT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Template, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + entry_points + }; + + let (contract_hash, contract_version) = storage::new_contract( + entry_points, + None, + Some(PACKAGE_HASH_KEY_NAME.to_string()), + Some(ACCESS_KEY_NAME.to_string()), + None, + ); + + runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); + runtime::put_key(HASH_KEY_NAME, Key::Hash(contract_hash.value())); +} diff --git a/smart_contracts/contracts/test/create-purse-01/Cargo.toml b/smart_contracts/contracts/test/create-purse-01/Cargo.toml index 4b4d92d952..de7bab898d 100644 --- a/smart_contracts/contracts/test/create-purse-01/Cargo.toml +++ b/smart_contracts/contracts/test/create-purse-01/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "create-purse-01" version = "0.1.0" -authors = ["Henry Till ", "Ed Hastings ", "Ed Hastings "] -edition = "2018" +authors = ["Mateusz Górski "] +edition = "2021" [[bin]] name = "deserialize_error" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/deserialize-error/src/main.rs b/smart_contracts/contracts/test/deserialize-error/src/main.rs index 63852877b4..3fc44edf91 100644 --- a/smart_contracts/contracts/test/deserialize-error/src/main.rs +++ b/smart_contracts/contracts/test/deserialize-error/src/main.rs @@ -3,12 +3,12 @@ extern crate alloc; -use alloc::{vec, vec::Vec}; +use alloc::vec::Vec; use casper_contract::{self, contract_api::storage, unwrap_or_revert::UnwrapOrRevert}; use casper_types::{ - api_error, bytesrepr::ToBytes, contracts::Parameters, CLType, ContractHash, EntryPoint, - EntryPointAccess, EntryPointType, EntryPoints, RuntimeArgs, + addressable_entity::Parameters, api_error, bytesrepr::ToBytes, contracts::ContractHash, CLType, + EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, }; #[no_mangle] @@ -42,16 +42,11 @@ mod malicious_ffi { // This is half-baked runtime::call_contract with changed `extra_urefs` // parameter with a desired payload that's supposed to bring the node down. -pub fn my_call_contract( - contract_hash: ContractHash, - _entry_point_name: &str, - runtime_args: RuntimeArgs, -) -> usize { +pub fn my_call_contract(contract_hash: ContractHash, entry_point_name: &str) -> usize { let (contract_hash_ptr, contract_hash_size, _bytes1) = to_ptr(contract_hash); - let malicious_string = vec![255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; - - let (runtime_args_ptr, runtime_args_size, _bytes2) = to_ptr(runtime_args); + let entry_point_name = ToBytes::to_bytes(entry_point_name).unwrap(); + let malicious_args = [255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; { let mut bytes_written = 0usize; @@ -59,10 +54,10 @@ pub fn my_call_contract( malicious_ffi::casper_call_contract( contract_hash_ptr, contract_hash_size, - malicious_string.as_ptr(), - malicious_string.len(), - runtime_args_ptr, - runtime_args_size, + entry_point_name.as_ptr(), + entry_point_name.len(), + malicious_args.as_ptr(), + malicious_args.len(), &mut bytes_written as *mut usize, ) }; @@ -76,19 +71,21 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( "do_nothing", Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - let (contract_hash, _contract_version) = storage::new_contract(entry_points, None, None, None); + let (contract_hash, _contract_version) = + storage::new_contract(entry_points, None, None, None, None); - my_call_contract(contract_hash, "do_nothing", RuntimeArgs::default()); + my_call_contract(contract_hash, "do_nothing"); } diff --git a/smart_contracts/contracts/test/dictionary-call/Cargo.toml b/smart_contracts/contracts/test/dictionary-call/Cargo.toml new file mode 100644 index 0000000000..ad2aace843 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-call/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "dictionary-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "dictionary_call" +path = "src/bin/main.rs" +bench = false +doctest = false +test = false + +[features] +default = ["casper-contract/default", "dictionary/default"] + +[dependencies] +casper-contract = { path = "../../../contract", default-features = false } +casper-types = { path = "../../../../types" } +dictionary = { path = "../dictionary", default-features = false } diff --git a/smart_contracts/contracts/test/dictionary-call/src/bin/main.rs b/smart_contracts/contracts/test/dictionary-call/src/bin/main.rs new file mode 100644 index 0000000000..ff8a4baa94 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-call/src/bin/main.rs @@ -0,0 +1,80 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::{String, ToString}; +use core::str::FromStr; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + bytesrepr::FromBytes, contracts::ContractHash, AddressableEntityHash, CLTyped, RuntimeArgs, + URef, +}; + +use dictionary::{ + DEFAULT_DICTIONARY_NAME, DEFAULT_DICTIONARY_VALUE, INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT, + INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT, +}; +use dictionary_call::{ + Operation, ARG_CONTRACT_HASH, ARG_FORGED_UREF, ARG_OPERATION, ARG_SHARE_UREF_ENTRYPOINT, + NEW_DICTIONARY_ITEM_KEY, NEW_DICTIONARY_VALUE, +}; + +/// Calls dictionary contract by hash as passed by `ARG_CONTRACT_HASH` argument and returns a +/// single value. +fn call_dictionary_contract(entrypoint: &str) -> T { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + runtime::call_contract(contract_hash.into(), entrypoint, RuntimeArgs::default()) +} + +#[no_mangle] +pub extern "C" fn call() { + let operation = { + let arg_operation: String = runtime::get_named_arg(ARG_OPERATION); + Operation::from_str(&arg_operation).unwrap_or_revert() + }; + + match operation { + Operation::Write => { + let entrypoint: String = runtime::get_named_arg(ARG_SHARE_UREF_ENTRYPOINT); + let uref = call_dictionary_contract(&entrypoint); + let value: String = NEW_DICTIONARY_VALUE.to_string(); + storage::dictionary_put(uref, NEW_DICTIONARY_ITEM_KEY, value); + } + Operation::Read => { + let entrypoint: String = runtime::get_named_arg(ARG_SHARE_UREF_ENTRYPOINT); + let uref = call_dictionary_contract(&entrypoint); + let maybe_value = + storage::dictionary_get(uref, DEFAULT_DICTIONARY_NAME).unwrap_or_revert(); + // Whether the value exists or not we're mostly interested in validation of access + // rights + let value: String = maybe_value.unwrap_or_default(); + assert_eq!(value, DEFAULT_DICTIONARY_VALUE); + } + Operation::ForgedURefWrite => { + let uref: URef = runtime::get_named_arg(ARG_FORGED_UREF); + let value: String = NEW_DICTIONARY_VALUE.to_string(); + storage::dictionary_put(uref, NEW_DICTIONARY_ITEM_KEY, value); + } + Operation::InvalidPutDictionaryItemKey => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + runtime::call_contract( + contract_hash.into(), + INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT, + RuntimeArgs::default(), + ) + } + Operation::InvalidGetDictionaryItemKey => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + runtime::call_contract( + ContractHash::new(contract_hash.value()), + INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT, + RuntimeArgs::default(), + ) + } + } +} diff --git a/smart_contracts/contracts/test/dictionary-call/src/lib.rs b/smart_contracts/contracts/test/dictionary-call/src/lib.rs new file mode 100644 index 0000000000..9a28734a98 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-call/src/lib.rs @@ -0,0 +1,58 @@ +#![no_std] + +extern crate alloc; + +use alloc::str::FromStr; + +use casper_types::ApiError; + +pub const ARG_OPERATION: &str = "operation"; +pub const ARG_CONTRACT_HASH: &str = "contract_hash"; +pub const OP_WRITE: &str = "write"; +pub const OP_READ: &str = "read"; +pub const OP_FORGED_UREF_WRITE: &str = "forged_uref_write"; +pub const OP_INVALID_PUT_DICTIONARY_ITEM_KEY: &str = "invalid_put_dictionary_item_key"; +pub const OP_INVALID_GET_DICTIONARY_ITEM_KEY: &str = "invalid_get_dictionary_item_key"; +pub const NEW_DICTIONARY_ITEM_KEY: &str = "New key"; +pub const NEW_DICTIONARY_VALUE: &str = "New value"; +pub const ARG_SHARE_UREF_ENTRYPOINT: &str = "share_uref_entrypoint"; +pub const ARG_FORGED_UREF: &str = "forged_uref"; + +#[repr(u16)] +pub enum Error { + InvalidOperation, +} + +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error as u16) + } +} + +pub enum Operation { + Write, + Read, + ForgedURefWrite, + InvalidPutDictionaryItemKey, + InvalidGetDictionaryItemKey, +} + +impl FromStr for Operation { + type Err = Error; + + fn from_str(s: &str) -> Result { + if s == OP_WRITE { + Ok(Operation::Write) + } else if s == OP_READ { + Ok(Operation::Read) + } else if s == OP_FORGED_UREF_WRITE { + Ok(Operation::ForgedURefWrite) + } else if s == OP_INVALID_PUT_DICTIONARY_ITEM_KEY { + Ok(Operation::InvalidPutDictionaryItemKey) + } else if s == OP_INVALID_GET_DICTIONARY_ITEM_KEY { + Ok(Operation::InvalidGetDictionaryItemKey) + } else { + Err(Error::InvalidOperation) + } + } +} diff --git a/smart_contracts/contracts/test/dictionary-item-key-length/Cargo.toml b/smart_contracts/contracts/test/dictionary-item-key-length/Cargo.toml new file mode 100644 index 0000000000..12b0886d34 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-item-key-length/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "dictionary-item-key-length" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "dictionary-item-key-check" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/dictionary-item-key-length/src/main.rs b/smart_contracts/contracts/test/dictionary-item-key-length/src/main.rs new file mode 100644 index 0000000000..b98825d809 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-item-key-length/src/main.rs @@ -0,0 +1,33 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; + +const OVERSIZED_DICTIONARY_ITEM_KEY: &str = "nZ1a27wa2MYty0KpPcl9WOYAFygPUWSqSTN5hyDi1MlfOk2RmykDdwM4HENeXEIUlnZ1a27wa2MYty0KpPcl9WOYAFygPUWSqSTN5hyDi1MlfOk2RmykDdwM4HENeXEIUl"; +const DICTIONARY_NAME: &str = "dictionary-name"; +const DICTIONARY_VALUE: &str = "dictionary-value"; +const DICTIONARY_OP: &str = "dictionary-operation"; +const OP_PUT: &str = "put"; +const OP_GET: &str = "get"; + +#[no_mangle] +pub extern "C" fn call() { + let dictionary_uref = storage::new_dictionary(DICTIONARY_NAME).unwrap_or_revert(); + let operation: String = runtime::get_named_arg(DICTIONARY_OP); + if operation == OP_GET { + let _ = storage::dictionary_get::(dictionary_uref, OVERSIZED_DICTIONARY_ITEM_KEY); + } else if operation == OP_PUT { + storage::dictionary_put( + dictionary_uref, + OVERSIZED_DICTIONARY_ITEM_KEY, + DICTIONARY_VALUE, + ); + } +} diff --git a/smart_contracts/contracts/test/dictionary-read/Cargo.toml b/smart_contracts/contracts/test/dictionary-read/Cargo.toml new file mode 100644 index 0000000000..2d7842bffc --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-read/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "dictionary-read" +version = "0.1.0" +edition = "2018" + +[[bin]] +name = "dictionary_read" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/dictionary-read/src/main.rs b/smart_contracts/contracts/test/dictionary-read/src/main.rs new file mode 100644 index 0000000000..63f22aaf47 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary-read/src/main.rs @@ -0,0 +1,40 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::{String, ToString}; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ApiError, Key}; + +const DICTIONARY_NAME: &str = "dictionary-name"; +const DICTIONARY_ITEM_KEY: &str = "dictionary-item-key"; +const DICTIONARY_VALUE: &str = "dictionary-value"; + +#[no_mangle] +pub extern "C" fn call() { + let dictionary_seed_uref = storage::new_dictionary(DICTIONARY_NAME).unwrap_or_revert(); + storage::dictionary_put( + dictionary_seed_uref, + DICTIONARY_ITEM_KEY, + DICTIONARY_VALUE.to_string(), + ); + let dictionary_address_key = + Key::dictionary(dictionary_seed_uref, DICTIONARY_ITEM_KEY.as_bytes()); + let value_via_read_address: String = storage::dictionary_read(dictionary_address_key) + .unwrap_or_revert() + .unwrap_or_revert(); + let value_via_get: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_ITEM_KEY) + .unwrap_or_revert() + .unwrap_or_revert(); + if value_via_read_address != *DICTIONARY_VALUE { + runtime::revert(ApiError::User(16u16)) + } + if value_via_get != value_via_read_address { + runtime::revert(ApiError::User(17u16)) + } +} diff --git a/smart_contracts/contracts/test/dictionary/Cargo.toml b/smart_contracts/contracts/test/dictionary/Cargo.toml new file mode 100644 index 0000000000..fc4f9ba6bc --- /dev/null +++ b/smart_contracts/contracts/test/dictionary/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "dictionary" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "dictionary" +path = "src/bin/main.rs" +bench = false +doctest = false +test = false + +[features] +default = ["casper-contract/default"] + +[dependencies] +casper-contract = { path = "../../../contract", default-features = false } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/dictionary/src/bin/main.rs b/smart_contracts/contracts/test/dictionary/src/bin/main.rs new file mode 100644 index 0000000000..693a36b42b --- /dev/null +++ b/smart_contracts/contracts/test/dictionary/src/bin/main.rs @@ -0,0 +1,7 @@ +#![no_std] +#![no_main] + +#[no_mangle] +pub extern "C" fn call() { + dictionary::delegate() +} diff --git a/smart_contracts/contracts/test/dictionary/src/lib.rs b/smart_contracts/contracts/test/dictionary/src/lib.rs new file mode 100644 index 0000000000..903b1fbdd2 --- /dev/null +++ b/smart_contracts/contracts/test/dictionary/src/lib.rs @@ -0,0 +1,253 @@ +#![no_std] + +extern crate alloc; + +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +use core::mem::MaybeUninit; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + addressable_entity::EntityKindTag, api_error, bytesrepr::ToBytes, contracts::NamedKeys, + AccessRights, AddressableEntityHash, ApiError, CLType, CLValue, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef, +}; + +pub const DICTIONARY_NAME: &str = "local"; +pub const DICTIONARY_PUT_KEY: &str = "item_key"; +pub const HELLO_PREFIX: &str = " Hello, "; +pub const WORLD_SUFFIX: &str = "world!"; +pub const MODIFY_WRITE_ENTRYPOINT: &str = "modify_write"; +pub const SHARE_RO_ENTRYPOINT: &str = "share_ro"; +pub const SHARE_W_ENTRYPOINT: &str = "share_w"; +pub const CONTRACT_HASH_NAME: &str = "contract_hash"; +const CONTRACT_PACKAGE_HASH_NAME: &str = "package_hash_name"; +pub const DEFAULT_DICTIONARY_NAME: &str = "Default Key"; +pub const DEFAULT_DICTIONARY_VALUE: &str = "Default Value"; +pub const DICTIONARY_REF: &str = "new_dictionary"; +pub const MALICIOUS_KEY_NAME: &str = "invalid dictionary name"; +pub const INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT: &str = "invalid_put_dictionary_item_key"; +pub const INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT: &str = "invalid_get_dictionary_item_key"; + +#[no_mangle] +fn modify_write() { + // Preserve for further modifications + let dictionary_seed_uref = match runtime::get_key(DICTIONARY_NAME) { + Some(key) => key.into_uref().unwrap_or_revert(), + None => runtime::revert(ApiError::GetKey), + }; + + // Appends " Hello, world!" to a [66; 32] dictionary with spaces trimmed. + // Two runs should yield value "Hello, world! Hello, world!" read from dictionary + let mut res: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_PUT_KEY) + .unwrap_or_default() + .unwrap_or_default(); + + res.push_str(HELLO_PREFIX); + // Write "Hello, " + storage::dictionary_put(dictionary_seed_uref, DICTIONARY_PUT_KEY, res); + + // Read (this should exercise cache) + let mut res: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_PUT_KEY) + .unwrap_or_revert() + .unwrap_or_revert(); + // Append + res.push_str(WORLD_SUFFIX); + // Write + storage::dictionary_put( + dictionary_seed_uref, + DICTIONARY_PUT_KEY, + res.trim().to_string(), + ); +} + +fn get_dictionary_seed_uref() -> URef { + let key = runtime::get_key(DICTIONARY_NAME).unwrap_or_revert(); + key.into_uref().unwrap_or_revert() +} + +#[no_mangle] +fn share_ro() { + let uref_ro = get_dictionary_seed_uref().into_read(); + runtime::ret(CLValue::from_t(uref_ro).unwrap_or_revert()) +} + +#[no_mangle] +fn share_w() { + let uref_w = get_dictionary_seed_uref().into_write(); + runtime::ret(CLValue::from_t(uref_w).unwrap_or_revert()) +} + +fn to_ptr(t: T) -> (*const u8, usize, Vec) { + let bytes = t.into_bytes().unwrap_or_revert(); + let ptr = bytes.as_ptr(); + let size = bytes.len(); + (ptr, size, bytes) +} + +#[no_mangle] +fn invalid_put_dictionary_item_key() { + let dictionary_seed_uref = get_dictionary_seed_uref(); + let (uref_ptr, uref_size, _bytes1) = to_ptr(dictionary_seed_uref); + + let bad_dictionary_item_key = alloc::vec![0, 159, 146, 150]; + let bad_dictionary_item_key_ptr = bad_dictionary_item_key.as_ptr(); + let bad_dictionary_item_key_size = bad_dictionary_item_key.len(); + + let cl_value = CLValue::unit(); + let (cl_value_ptr, cl_value_size, _bytes) = to_ptr(cl_value); + + let result = unsafe { + let ret = test_ffi::casper_dictionary_put( + uref_ptr, + uref_size, + bad_dictionary_item_key_ptr, + bad_dictionary_item_key_size, + cl_value_ptr, + cl_value_size, + ); + api_error::result_from(ret) + }; + + result.unwrap_or_revert() +} + +#[no_mangle] +fn invalid_get_dictionary_item_key() { + let dictionary_seed_uref = get_dictionary_seed_uref(); + let (uref_ptr, uref_size, _bytes1) = to_ptr(dictionary_seed_uref); + + let bad_dictionary_item_key = alloc::vec![0, 159, 146, 150]; + let bad_dictionary_item_key_ptr = bad_dictionary_item_key.as_ptr(); + let bad_dictionary_item_key_size = bad_dictionary_item_key.len(); + + let _value_size = { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { + test_ffi::casper_dictionary_get( + uref_ptr, + uref_size, + bad_dictionary_item_key_ptr, + bad_dictionary_item_key_size, + value_size.as_mut_ptr(), + ) + }; + match api_error::result_from(ret) { + Ok(_) => unsafe { value_size.assume_init() }, + Err(e) => runtime::revert(e), + } + }; +} + +mod test_ffi { + extern "C" { + pub fn casper_dictionary_put( + uref_ptr: *const u8, + uref_size: usize, + key_ptr: *const u8, + key_size: usize, + value_ptr: *const u8, + value_size: usize, + ) -> i32; + + pub fn casper_dictionary_get( + uref_ptr: *const u8, + uref_size: usize, + key_bytes_ptr: *const u8, + key_bytes_size: usize, + output_size: *mut usize, + ) -> i32; + } +} + +pub fn delegate() { + // Empty key name is invalid + assert!(storage::new_dictionary("").is_err()); + // Assert that we don't have this key yet + assert!(!runtime::has_key(MALICIOUS_KEY_NAME)); + // Create and put a new dictionary in named keys + storage::new_dictionary(MALICIOUS_KEY_NAME).unwrap(); + // Can't do it twice + assert!(storage::new_dictionary(MALICIOUS_KEY_NAME).is_err()); + + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + MODIFY_WRITE_ENTRYPOINT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + SHARE_RO_ENTRYPOINT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + SHARE_W_ENTRYPOINT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + INVALID_PUT_DICTIONARY_ITEM_KEY_ENTRYPOINT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + INVALID_GET_DICTIONARY_ITEM_KEY_ENTRYPOINT, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + let named_keys = { + let uref = { + let dictionary_uref = storage::new_dictionary(DICTIONARY_REF).unwrap_or_revert(); + assert_eq!( + dictionary_uref.access_rights() & AccessRights::READ_ADD_WRITE, + AccessRights::READ_ADD_WRITE + ); + + storage::dictionary_put( + dictionary_uref, + DEFAULT_DICTIONARY_NAME, + DEFAULT_DICTIONARY_VALUE, + ); + dictionary_uref + }; + let mut named_keys = NamedKeys::new(); + named_keys.insert(DICTIONARY_NAME.to_string(), uref.into()); + named_keys + }; + + let (entity_hash, _version) = storage::new_contract( + entry_points, + Some(named_keys), + Some(CONTRACT_PACKAGE_HASH_NAME.to_string()), + None, + None, + ); + + let entity_key = Key::addressable_entity_key( + EntityKindTag::SmartContract, + AddressableEntityHash::new(entity_hash.value()), + ); + + runtime::put_key(CONTRACT_HASH_NAME, entity_key); +} diff --git a/smart_contracts/contracts/test/do-nothing-stored-caller/Cargo.toml b/smart_contracts/contracts/test/do-nothing-stored-caller/Cargo.toml index ca3ac4af16..2b6bc3b1b0 100644 --- a/smart_contracts/contracts/test/do-nothing-stored-caller/Cargo.toml +++ b/smart_contracts/contracts/test/do-nothing-stored-caller/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "do-nothing-stored-caller" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "do_nothing_stored_caller" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/do-nothing-stored-caller/src/main.rs b/smart_contracts/contracts/test/do-nothing-stored-caller/src/main.rs index bef269bbe7..d666aa6017 100644 --- a/smart_contracts/contracts/test/do-nothing-stored-caller/src/main.rs +++ b/smart_contracts/contracts/test/do-nothing-stored-caller/src/main.rs @@ -6,28 +6,30 @@ extern crate alloc; use alloc::string::String; use casper_contract::contract_api::runtime; -use casper_types::{contracts::ContractVersion, runtime_args, ContractPackageHash, RuntimeArgs}; +use casper_types::{runtime_args, EntityVersion, PackageHash}; const ENTRY_FUNCTION_NAME: &str = "delegate"; const PURSE_NAME_ARG_NAME: &str = "purse_name"; const ARG_CONTRACT_PACKAGE: &str = "contract_package"; const ARG_NEW_PURSE_NAME: &str = "new_purse_name"; +const ARG_MAJOR_VERSION: &str = "major_version"; const ARG_VERSION: &str = "version"; #[no_mangle] pub extern "C" fn call() { - let contract_package_hash: ContractPackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE); + let contract_package_hash: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE); let new_purse_name: String = runtime::get_named_arg(ARG_NEW_PURSE_NAME); - let version_number: ContractVersion = runtime::get_named_arg(ARG_VERSION); - let contract_version = Some(version_number); + let major_version: u32 = runtime::get_named_arg(ARG_MAJOR_VERSION); + let version_number: EntityVersion = runtime::get_named_arg(ARG_VERSION); let runtime_args = runtime_args! { PURSE_NAME_ARG_NAME => new_purse_name, }; - runtime::call_versioned_contract( - contract_package_hash, - contract_version, + runtime::call_package_version( + contract_package_hash.into(), + Some(major_version), + Some(version_number), ENTRY_FUNCTION_NAME, runtime_args, ) diff --git a/smart_contracts/contracts/test/do-nothing-stored-upgrader/Cargo.toml b/smart_contracts/contracts/test/do-nothing-stored-upgrader/Cargo.toml index e3bc421ade..86316b9fef 100644 --- a/smart_contracts/contracts/test/do-nothing-stored-upgrader/Cargo.toml +++ b/smart_contracts/contracts/test/do-nothing-stored-upgrader/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "do-nothing-stored-upgrader" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "do_nothing_stored_upgrader" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } create-purse-01 = { path = "../create-purse-01" } diff --git a/smart_contracts/contracts/test/do-nothing-stored-upgrader/src/main.rs b/smart_contracts/contracts/test/do-nothing-stored-upgrader/src/main.rs index bd9a6e2656..5e7f89962a 100644 --- a/smart_contracts/contracts/test/do-nothing-stored-upgrader/src/main.rs +++ b/smart_contracts/contracts/test/do-nothing-stored-upgrader/src/main.rs @@ -1,9 +1,13 @@ #![no_std] #![no_main] +#[macro_use] extern crate alloc; -use alloc::{string::ToString, vec::Vec}; +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, +}; use casper_contract::{ contract_api::{runtime, storage}, unwrap_or_revert::UnwrapOrRevert, @@ -11,17 +15,20 @@ use casper_contract::{ use core::convert::TryInto; use casper_types::{ - contracts::{EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, NamedKeys}, - CLType, ContractPackageHash, Key, URef, + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + contracts::NamedKeys, + CLType, CLTyped, EntryPointPayment, Key, PackageHash, Parameter, URef, }; const ENTRY_FUNCTION_NAME: &str = "delegate"; const DO_NOTHING_PACKAGE_HASH_KEY_NAME: &str = "do_nothing_package_hash"; const DO_NOTHING_ACCESS_KEY_NAME: &str = "do_nothing_access"; const CONTRACT_VERSION: &str = "contract_version"; +const ARG_PURSE_NAME: &str = "purse_name"; #[no_mangle] pub extern "C" fn delegate() { + let _named_keys = runtime::list_named_keys(); runtime::put_key("called_do_nothing_ver_2", Key::Hash([1u8; 32])); create_purse_01::delegate() } @@ -31,32 +38,36 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let delegate = EntryPoint::new( + let delegate = EntityEntryPoint::new( ENTRY_FUNCTION_NAME.to_string(), - Vec::new(), + vec![Parameter::new(ARG_PURSE_NAME, String::cl_type())], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(delegate); entry_points }; - let do_nothing_package_hash: ContractPackageHash = - runtime::get_key(DO_NOTHING_PACKAGE_HASH_KEY_NAME) - .unwrap_or_revert() - .into_hash() - .unwrap() - .into(); + let do_nothing_package_hash: PackageHash = runtime::get_key(DO_NOTHING_PACKAGE_HASH_KEY_NAME) + .unwrap_or_revert() + .into_hash_addr() + .unwrap_or_revert() + .into(); let _do_nothing_uref: URef = runtime::get_key(DO_NOTHING_ACCESS_KEY_NAME) .unwrap_or_revert() .try_into() .unwrap_or_revert(); - let (contract_hash, contract_version) = - storage::add_contract_version(do_nothing_package_hash, entry_points, NamedKeys::new()); + let (contract_hash, contract_version) = storage::add_contract_version( + do_nothing_package_hash.into(), + entry_points, + NamedKeys::new(), + BTreeMap::new(), + ); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key("end of upgrade", contract_hash.into()); + runtime::put_key("end of upgrade", Key::Hash(contract_hash.value())); } diff --git a/smart_contracts/contracts/test/do-nothing-stored/Cargo.toml b/smart_contracts/contracts/test/do-nothing-stored/Cargo.toml index 9177385db1..8a52d947e0 100644 --- a/smart_contracts/contracts/test/do-nothing-stored/Cargo.toml +++ b/smart_contracts/contracts/test/do-nothing-stored/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "do-nothing-stored" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "do_nothing_stored" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/do-nothing-stored/src/main.rs b/smart_contracts/contracts/test/do-nothing-stored/src/main.rs index dcc443be01..f99d4f5263 100644 --- a/smart_contracts/contracts/test/do-nothing-stored/src/main.rs +++ b/smart_contracts/contracts/test/do-nothing-stored/src/main.rs @@ -1,14 +1,10 @@ #![no_std] #![no_main] -extern crate alloc; - -use alloc::{string::ToString, vec::Vec}; - use casper_contract::contract_api::{runtime, storage}; use casper_types::{ - contracts::{EntryPoint, EntryPoints}, - CLType, EntryPointAccess, EntryPointType, + addressable_entity::{EntityEntryPoint, EntryPoints, Parameters}, + CLType, EntryPointAccess, EntryPointPayment, EntryPointType, Key, }; const ENTRY_FUNCTION_NAME: &str = "delegate"; @@ -18,18 +14,21 @@ const ACCESS_KEY_NAME: &str = "do_nothing_access"; const CONTRACT_VERSION: &str = "contract_version"; #[no_mangle] -pub extern "C" fn delegate() {} +pub extern "C" fn delegate() { + let _named_keys = runtime::list_named_keys(); +} #[no_mangle] pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( - ENTRY_FUNCTION_NAME.to_string(), - Vec::new(), + let entry_point = EntityEntryPoint::new( + ENTRY_FUNCTION_NAME, + Parameters::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points @@ -38,10 +37,11 @@ pub extern "C" fn call() { let (contract_hash, contract_version) = storage::new_contract( entry_points, None, - Some(PACKAGE_HASH_KEY_NAME.to_string()), - Some(ACCESS_KEY_NAME.to_string()), + Some(PACKAGE_HASH_KEY_NAME.into()), + Some(ACCESS_KEY_NAME.into()), + None, ); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(HASH_KEY_NAME, contract_hash.into()); + runtime::put_key(HASH_KEY_NAME, Key::Hash(contract_hash.value())); } diff --git a/smart_contracts/contracts/test/do-nothing/Cargo.toml b/smart_contracts/contracts/test/do-nothing/Cargo.toml index 866df025ab..bde4bcc0e6 100644 --- a/smart_contracts/contracts/test/do-nothing/Cargo.toml +++ b/smart_contracts/contracts/test/do-nothing/Cargo.toml @@ -2,7 +2,7 @@ name = "do-nothing" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "do_nothing" @@ -11,8 +11,5 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } diff --git a/smart_contracts/contracts/test/do-nothing/src/main.rs b/smart_contracts/contracts/test/do-nothing/src/main.rs index e4c00cde04..0b60e16830 100644 --- a/smart_contracts/contracts/test/do-nothing/src/main.rs +++ b/smart_contracts/contracts/test/do-nothing/src/main.rs @@ -1,11 +1,9 @@ #![no_std] #![no_main] -// Required to bring `#[panic_handler]` from `contract::handlers` into scope. -#[allow(unused_imports, clippy::single_component_path_imports)] -use casper_contract; +use casper_contract::contract_api::runtime; #[no_mangle] pub extern "C" fn call() { - // This body intentionally left empty. + let _named_keys = runtime::list_named_keys(); } diff --git a/smart_contracts/contracts/test/ee-1071-regression/Cargo.toml b/smart_contracts/contracts/test/ee-1071-regression/Cargo.toml index 68ca5deaf6..6db90cd03d 100644 --- a/smart_contracts/contracts/test/ee-1071-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-1071-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-1071-regression" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "ee_1071_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-1071-regression/src/main.rs b/smart_contracts/contracts/test/ee-1071-regression/src/main.rs index 6b93421eff..c46c528f42 100644 --- a/smart_contracts/contracts/test/ee-1071-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-1071-regression/src/main.rs @@ -3,7 +3,8 @@ use casper_contract::contract_api::{runtime, storage}; use casper_types::{ - contracts::Parameters, CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, + addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, }; const CONTRACT_HASH_NAME: &str = "contract"; @@ -20,19 +21,24 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( NEW_UREF, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - let (contract_hash, _contract_version) = storage::new_contract(entry_points, None, None, None); + let (contract_hash, _contract_version) = + storage::new_contract(entry_points, None, None, None, None); - runtime::put_key(CONTRACT_HASH_NAME, contract_hash.into()); + runtime::put_key( + CONTRACT_HASH_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/ee-1129-regression/Cargo.toml b/smart_contracts/contracts/test/ee-1129-regression/Cargo.toml index a05b57a9c1..ce9541f8b9 100644 --- a/smart_contracts/contracts/test/ee-1129-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-1129-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-1129-regression" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "ee_1129_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-1129-regression/src/main.rs b/smart_contracts/contracts/test/ee-1129-regression/src/main.rs index 51ec15db1d..2136a7127f 100644 --- a/smart_contracts/contracts/test/ee-1129-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-1129-regression/src/main.rs @@ -7,7 +7,8 @@ use alloc::string::ToString; use casper_contract::contract_api::{runtime, storage, system}; use casper_types::{ - contracts::Parameters, CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, + addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, }; const ENTRY_POINT_NAME: &str = "create_purse"; @@ -17,8 +18,8 @@ const CONTRACT_PACKAGE_KEY: &str = "contract_package"; #[no_mangle] pub extern "C" fn create_purse() { - // This should exercise common issues with unsafe providers in mint: new_uref, write_local and - // put_key. + // This should exercise common issues with unsafe providers in mint: new_uref, dictionary_put + // and put_key. let _purse = system::create_purse(); } @@ -27,12 +28,13 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_POINT_NAME, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); @@ -45,7 +47,11 @@ pub extern "C" fn call() { None, Some(CONTRACT_PACKAGE_KEY.to_string()), Some(ACCESS_KEY.to_string()), + None, ); - runtime::put_key(CONTRACT_KEY, contract_hash.into()); + runtime::put_key( + CONTRACT_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/ee-1217-regression/Cargo.toml b/smart_contracts/contracts/test/ee-1217-regression/Cargo.toml new file mode 100644 index 0000000000..e6d16e54be --- /dev/null +++ b/smart_contracts/contracts/test/ee-1217-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "ee-1217-regression" +version = "0.1.0" +authors = ["Daniel Werner "] +edition = "2021" + +[[bin]] +name = "ee_1217_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-1217-regression/src/main.rs b/smart_contracts/contracts/test/ee-1217-regression/src/main.rs new file mode 100644 index 0000000000..67b0dbbf1d --- /dev/null +++ b/smart_contracts/contracts/test/ee-1217-regression/src/main.rs @@ -0,0 +1,239 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{string::ToString, vec}; + +// casper_contract is required for it's [global_alloc] as well as handlers (such as panic_handler) +use casper_contract::contract_api::{runtime, storage, system}; +use casper_types::{ + runtime_args, system::auction, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, PublicKey, U512, +}; + +const PACKAGE_NAME: &str = "call_auction"; +const PACKAGE_ACCESS_KEY_NAME: &str = "call_auction_access"; + +const METHOD_ADD_BID_CONTRACT_NAME: &str = "add_bid_contract"; +const METHOD_ADD_BID_SESSION_NAME: &str = "add_bid_session"; +const METHOD_WITHDRAW_BID_CONTRACT_NAME: &str = "withdraw_bid_contract"; +const METHOD_WITHDRAW_BID_SESSION_NAME: &str = "withdraw_bid_session"; +const METHOD_DELEGATE_CONTRACT_NAME: &str = "delegate_contract"; +const METHOD_DELEGATE_SESSION_NAME: &str = "delegate_session"; +const METHOD_UNDELEGATE_CONTRACT_NAME: &str = "undelegate_contract"; +const METHOD_UNDELEGATE_SESSION_NAME: &str = "undelegate_session"; +const METHOD_ACTIVATE_BID_CONTRACT_NAME: &str = "activate_bid_contract"; +const METHOD_ACTIVATE_BID_SESSION_NAME: &str = "activate_bid_session"; + +fn add_bid() { + let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY); + let auction = system::get_auction(); + let args = runtime_args! { + auction::ARG_PUBLIC_KEY => public_key, + auction::ARG_AMOUNT => U512::from(2), // smaller amount results in Error::BondTooSmall + auction::ARG_DELEGATION_RATE => 42u8, + }; + runtime::call_contract::(auction, auction::METHOD_ADD_BID, args); +} + +#[no_mangle] +pub extern "C" fn add_bid_contract() { + add_bid() +} + +#[no_mangle] +pub extern "C" fn add_bid_session() { + add_bid() +} + +pub fn withdraw_bid() { + let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY); + let auction = system::get_auction(); + let args = runtime_args! { + auction::ARG_PUBLIC_KEY => public_key, + auction::ARG_AMOUNT => U512::one(), + }; + runtime::call_contract::(auction, auction::METHOD_WITHDRAW_BID, args); +} + +#[no_mangle] +pub extern "C" fn withdraw_bid_contract() { + withdraw_bid() +} + +#[no_mangle] +pub extern "C" fn withdraw_bid_session() { + withdraw_bid() +} + +fn activate_bid() { + let public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR); + let auction = system::get_auction(); + let args = runtime_args! { + auction::ARG_VALIDATOR => public_key, + }; + runtime::call_contract::<()>(auction, auction::METHOD_ACTIVATE_BID, args); +} + +#[no_mangle] +pub extern "C" fn activate_bid_contract() { + activate_bid() +} + +#[no_mangle] +pub extern "C" fn activate_bid_session() { + activate_bid() +} + +#[no_mangle] +pub extern "C" fn delegate() { + let delegator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR); + let validator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR); + let auction = system::get_auction(); + let args = runtime_args! { + auction::ARG_DELEGATOR => delegator_public_key, + auction::ARG_VALIDATOR => validator_public_key, + auction::ARG_AMOUNT => U512::one(), + }; + runtime::call_contract::(auction, auction::METHOD_DELEGATE, args); +} + +#[no_mangle] +pub extern "C" fn delegate_contract() { + delegate() +} + +#[no_mangle] +pub extern "C" fn delegate_session() { + delegate() +} + +#[no_mangle] +pub extern "C" fn undelegate() { + let delegator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR); + let validator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR); + let auction = system::get_auction(); + let args = runtime_args! { + auction::ARG_DELEGATOR => delegator_public_key, + auction::ARG_VALIDATOR => validator_public_key, + auction::ARG_AMOUNT => U512::one(), + }; + runtime::call_contract::(auction, auction::METHOD_UNDELEGATE, args); +} + +#[no_mangle] +pub extern "C" fn undelegate_contract() { + undelegate() +} + +#[no_mangle] +pub extern "C" fn undelegate_session() { + undelegate() +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + let add_bid_session_entry_point = EntityEntryPoint::new( + METHOD_ADD_BID_SESSION_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let add_bid_contract_entry_point = EntityEntryPoint::new( + METHOD_ADD_BID_CONTRACT_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let withdraw_bid_session_entry_point = EntityEntryPoint::new( + METHOD_WITHDRAW_BID_SESSION_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let withdraw_bid_contract_entry_point = EntityEntryPoint::new( + METHOD_WITHDRAW_BID_CONTRACT_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let delegate_session_entry_point = EntityEntryPoint::new( + METHOD_DELEGATE_SESSION_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let delegate_contract_entry_point = EntityEntryPoint::new( + METHOD_DELEGATE_CONTRACT_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let undelegate_session_entry_point = EntityEntryPoint::new( + METHOD_UNDELEGATE_SESSION_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let undelegate_contract_entry_point = EntityEntryPoint::new( + METHOD_UNDELEGATE_CONTRACT_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let activate_bid_session_entry_point = EntityEntryPoint::new( + METHOD_ACTIVATE_BID_SESSION_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let activate_bid_contract_entry_point = EntityEntryPoint::new( + METHOD_ACTIVATE_BID_CONTRACT_NAME.to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(add_bid_session_entry_point); + entry_points.add_entry_point(add_bid_contract_entry_point); + entry_points.add_entry_point(withdraw_bid_session_entry_point); + entry_points.add_entry_point(withdraw_bid_contract_entry_point); + entry_points.add_entry_point(delegate_session_entry_point); + entry_points.add_entry_point(delegate_contract_entry_point); + entry_points.add_entry_point(undelegate_session_entry_point); + entry_points.add_entry_point(undelegate_contract_entry_point); + entry_points.add_entry_point(activate_bid_session_entry_point); + entry_points.add_entry_point(activate_bid_contract_entry_point); + entry_points + }; + + let (_contract_hash, _contract_version) = storage::new_contract( + entry_points, + None, + Some(PACKAGE_NAME.to_string()), + Some(PACKAGE_ACCESS_KEY_NAME.to_string()), + None, + ); +} diff --git a/smart_contracts/contracts/test/ee-1225-regression/Cargo.toml b/smart_contracts/contracts/test/ee-1225-regression/Cargo.toml new file mode 100644 index 0000000000..61ae89d3e0 --- /dev/null +++ b/smart_contracts/contracts/test/ee-1225-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "ee-1225-regression" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "ee_1225_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-1225-regression/src/main.rs b/smart_contracts/contracts/test/ee-1225-regression/src/main.rs new file mode 100644 index 0000000000..5d5d87d349 --- /dev/null +++ b/smart_contracts/contracts/test/ee-1225-regression/src/main.rs @@ -0,0 +1,60 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + runtime_args, system::handle_payment, ApiError, Phase, RuntimeArgs, URef, U512, +}; + +const ARG_AMOUNT: &str = "amount"; + +#[repr(u16)] +enum Error { + InvalidPhase, +} + +impl From for ApiError { + fn from(e: Error) -> Self { + ApiError::User(e as u16) + } +} + +fn get_payment_purse() -> URef { + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ) +} + +fn set_refund_purse(new_refund_purse: URef) { + let args = runtime_args! { + handle_payment::ARG_PURSE => new_refund_purse, + }; + + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_SET_REFUND_PURSE, + args, + ) +} + +#[no_mangle] +pub extern "C" fn call() { + if runtime::get_phase() != Phase::Payment { + runtime::revert(Error::InvalidPhase); + } + + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + // Attempt to get refund into a payment purse. + let payment_purse = get_payment_purse(); + set_refund_purse(payment_purse); + + // transfer amount from named purse to payment purse, which will be used to pay for execution + system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/ee-221-regression/Cargo.toml b/smart_contracts/contracts/test/ee-221-regression/Cargo.toml index 9720d3140f..7b8c0172c8 100644 --- a/smart_contracts/contracts/test/ee-221-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-221-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-221-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_221_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-401-regression-call/Cargo.toml b/smart_contracts/contracts/test/ee-401-regression-call/Cargo.toml index fd10263c3f..739f703963 100644 --- a/smart_contracts/contracts/test/ee-401-regression-call/Cargo.toml +++ b/smart_contracts/contracts/test/ee-401-regression-call/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-401-regression-call" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_401_regression_call" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-401-regression-call/src/main.rs b/smart_contracts/contracts/test/ee-401-regression-call/src/main.rs index 1179b5bb54..5df981ba88 100644 --- a/smart_contracts/contracts/test/ee-401-regression-call/src/main.rs +++ b/smart_contracts/contracts/test/ee-401-regression-call/src/main.rs @@ -9,17 +9,18 @@ use casper_contract::{ contract_api::{runtime, storage}, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{ApiError, ContractHash, RuntimeArgs, URef}; +use casper_types::{AddressableEntityHash, ApiError, RuntimeArgs, URef}; #[no_mangle] pub extern "C" fn call() { - let contract_hash: ContractHash = runtime::get_key("hello_ext") + let contract_hash: AddressableEntityHash = runtime::get_key("hello_ext") .unwrap_or_revert_with(ApiError::GetKey) - .into_hash() + .into_entity_hash_addr() .unwrap_or_revert() .into(); - let result: URef = runtime::call_contract(contract_hash, "hello_ext", RuntimeArgs::default()); + let result: URef = + runtime::call_contract(contract_hash.into(), "hello_ext", RuntimeArgs::default()); let value = storage::read(result); diff --git a/smart_contracts/contracts/test/ee-401-regression/Cargo.toml b/smart_contracts/contracts/test/ee-401-regression/Cargo.toml index 9fe07cac2a..25cee87cd7 100644 --- a/smart_contracts/contracts/test/ee-401-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-401-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-401-regression" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_401_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-401-regression/src/main.rs b/smart_contracts/contracts/test/ee-401-regression/src/main.rs index 41fc0d1fec..a14732ee61 100644 --- a/smart_contracts/contracts/test/ee-401-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-401-regression/src/main.rs @@ -10,8 +10,8 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::Parameters, CLType, CLValue, EntryPoint, EntryPointAccess, EntryPointType, - EntryPoints, URef, + addressable_entity::Parameters, AddressableEntityHash, CLType, CLValue, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef, }; const HELLO_EXT: &str = "hello_ext"; @@ -30,20 +30,25 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( HELLO_EXT, Parameters::new(), CLType::URef, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - let (contract_hash, contract_version) = storage::new_contract(entry_points, None, None, None); + let (contract_hash, contract_version) = + storage::new_contract(entry_points, None, None, None, None); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(HELLO_EXT, contract_hash.into()); + runtime::put_key( + HELLO_EXT, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/ee-441-rng-state/Cargo.toml b/smart_contracts/contracts/test/ee-441-rng-state/Cargo.toml index 18e7e10286..04b98d96bc 100644 --- a/smart_contracts/contracts/test/ee-441-rng-state/Cargo.toml +++ b/smart_contracts/contracts/test/ee-441-rng-state/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-441-rng-state" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_441_rng_state" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-441-rng-state/src/main.rs b/smart_contracts/contracts/test/ee-441-rng-state/src/main.rs index c3d936f1f3..864aca2e18 100644 --- a/smart_contracts/contracts/test/ee-441-rng-state/src/main.rs +++ b/smart_contracts/contracts/test/ee-441-rng-state/src/main.rs @@ -10,8 +10,8 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::Parameters, CLType, CLValue, EntryPoint, EntryPointAccess, EntryPointType, - EntryPoints, Key, RuntimeArgs, URef, U512, + addressable_entity::Parameters, CLType, CLValue, EntityEntryPoint, EntryPointAccess, + EntryPointPayment, EntryPointType, EntryPoints, Key, RuntimeArgs, URef, U512, }; const ARG_FLAG: &str = "flag"; @@ -39,29 +39,32 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let do_nothing_entry_point = EntryPoint::new( + let do_nothing_entry_point = EntityEntryPoint::new( "do_nothing", Parameters::default(), CLType::String, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(do_nothing_entry_point); - let do_something_entry_point = EntryPoint::new( + let do_something_entry_point = EntityEntryPoint::new( "do_something", Parameters::default(), CLType::URef, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(do_something_entry_point); entry_points }; - let (contract_hash, _contract_version) = storage::new_contract(entry_points, None, None, None); + let (contract_hash, _contract_version) = + storage::new_contract(entry_points, None, None, None, None); if flag == "pass1" { // Two calls should forward the internal RNG. This pass is a baseline. diff --git a/smart_contracts/contracts/test/ee-460-regression/Cargo.toml b/smart_contracts/contracts/test/ee-460-regression/Cargo.toml index 712cf69cad..749b4a46ff 100644 --- a/smart_contracts/contracts/test/ee-460-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-460-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-460-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_460_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-532-regression/Cargo.toml b/smart_contracts/contracts/test/ee-532-regression/Cargo.toml index 106151c8db..03d1aa8b32 100644 --- a/smart_contracts/contracts/test/ee-532-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-532-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-532-regression" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "ee_532_regression" @@ -11,8 +11,5 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } diff --git a/smart_contracts/contracts/test/ee-532-regression/src/main.rs b/smart_contracts/contracts/test/ee-532-regression/src/main.rs index 663451d887..ff8ec71811 100644 --- a/smart_contracts/contracts/test/ee-532-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-532-regression/src/main.rs @@ -1,8 +1,7 @@ #![no_std] #![no_main] - // Required to bring `#[panic_handler]` from `contract::handlers` into scope. -#[allow(unused_imports, clippy::single_component_path_imports)] +#![allow(unused_imports, clippy::single_component_path_imports)] use casper_contract; #[no_mangle] diff --git a/smart_contracts/contracts/test/ee-536-regression/Cargo.toml b/smart_contracts/contracts/test/ee-536-regression/Cargo.toml index 0a44474bb6..7229841e35 100644 --- a/smart_contracts/contracts/test/ee-536-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-536-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-536-regression" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "ee_536_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-536-regression/src/main.rs b/smart_contracts/contracts/test/ee-536-regression/src/main.rs index 64ecdbedde..562ee6b7d4 100644 --- a/smart_contracts/contracts/test/ee-536-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-536-regression/src/main.rs @@ -27,7 +27,7 @@ pub extern "C" fn call() { match account::remove_associated_key(key_2) { Err(RemoveKeyFailure::ThresholdViolation) => { - // Shouldn't be able to remove key because key threshold == 11 and + // Shouldn't be able to remove key because key weight == 11 and // removing would violate the constraint } Err(_) => runtime::revert(ApiError::User(300)), diff --git a/smart_contracts/contracts/test/ee-539-regression/Cargo.toml b/smart_contracts/contracts/test/ee-539-regression/Cargo.toml index a02d50fb58..c714423e74 100644 --- a/smart_contracts/contracts/test/ee-539-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-539-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-539-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_539_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-549-regression/Cargo.toml b/smart_contracts/contracts/test/ee-549-regression/Cargo.toml index 7cd1530acc..3ab3c064ed 100644 --- a/smart_contracts/contracts/test/ee-549-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-549-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-549-regression" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_549_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-549-regression/src/main.rs b/smart_contracts/contracts/test/ee-549-regression/src/main.rs index 4038335fbd..a7f17e988a 100644 --- a/smart_contracts/contracts/test/ee-549-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-549-regression/src/main.rs @@ -2,7 +2,7 @@ #![no_main] use casper_contract::contract_api::{runtime, system}; -use casper_types::{runtime_args, RuntimeArgs}; +use casper_types::runtime_args; const SET_REFUND_PURSE: &str = "set_refund_purse"; const ARG_PURSE: &str = "purse"; diff --git a/smart_contracts/contracts/test/ee-550-regression/Cargo.toml b/smart_contracts/contracts/test/ee-550-regression/Cargo.toml index 600faea2fd..4627d17e1e 100644 --- a/smart_contracts/contracts/test/ee-550-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-550-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-550-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_550_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-550-regression/src/main.rs b/smart_contracts/contracts/test/ee-550-regression/src/main.rs index 654231382c..9b8004c672 100644 --- a/smart_contracts/contracts/test/ee-550-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-550-regression/src/main.rs @@ -24,9 +24,9 @@ enum Error { UnknownPass = 5, } -impl Into for Error { - fn into(self) -> ApiError { - ApiError::User(self as u16) +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error as u16) } } diff --git a/smart_contracts/contracts/test/ee-572-regression-create/Cargo.toml b/smart_contracts/contracts/test/ee-572-regression-create/Cargo.toml index 9d5b9bea13..d74d1d042b 100644 --- a/smart_contracts/contracts/test/ee-572-regression-create/Cargo.toml +++ b/smart_contracts/contracts/test/ee-572-regression-create/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-572-regression-create" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_572_regression_create" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-572-regression-create/src/main.rs b/smart_contracts/contracts/test/ee-572-regression-create/src/main.rs index 575b3a94a4..d0aaad3903 100644 --- a/smart_contracts/contracts/test/ee-572-regression-create/src/main.rs +++ b/smart_contracts/contracts/test/ee-572-regression-create/src/main.rs @@ -8,8 +8,8 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::Parameters, AccessRights, CLType, CLValue, EntryPoint, EntryPointAccess, - EntryPointType, EntryPoints, URef, + addressable_entity::Parameters, AccessRights, AddressableEntityHash, CLType, CLValue, + EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef, }; const DATA: &str = "data"; @@ -29,19 +29,24 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( "create", Parameters::default(), CLType::URef, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - let (contract_hash, contract_version) = storage::new_contract(entry_points, None, None, None); + let (contract_hash, contract_version) = + storage::new_contract(entry_points, None, None, None, None); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(CONTRACT_NAME, contract_hash.into()); + runtime::put_key( + CONTRACT_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/ee-572-regression-escalate/Cargo.toml b/smart_contracts/contracts/test/ee-572-regression-escalate/Cargo.toml index c7a9e8b6e5..b01376077a 100644 --- a/smart_contracts/contracts/test/ee-572-regression-escalate/Cargo.toml +++ b/smart_contracts/contracts/test/ee-572-regression-escalate/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-572-regression-escalate" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_572_regression_escalate" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-572-regression-escalate/src/main.rs b/smart_contracts/contracts/test/ee-572-regression-escalate/src/main.rs index ba446a9dd4..393e577287 100644 --- a/smart_contracts/contracts/test/ee-572-regression-escalate/src/main.rs +++ b/smart_contracts/contracts/test/ee-572-regression-escalate/src/main.rs @@ -2,16 +2,17 @@ #![no_main] use casper_contract::contract_api::{runtime, storage}; -use casper_types::{AccessRights, ContractHash, RuntimeArgs, URef}; +use casper_types::{AccessRights, AddressableEntityHash, RuntimeArgs, URef}; const REPLACEMENT_DATA: &str = "bawitdaba"; const ARG_CONTRACT_HASH: &str = "contract_hash"; #[no_mangle] pub extern "C" fn call() { - let contract_hash: ContractHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); - let reference: URef = runtime::call_contract(contract_hash, "create", RuntimeArgs::default()); + let reference: URef = + runtime::call_contract(contract_hash.into(), "create", RuntimeArgs::default()); let forged_reference: URef = URef::new(reference.addr(), AccessRights::READ_ADD_WRITE); storage::write(forged_reference, REPLACEMENT_DATA) } diff --git a/smart_contracts/contracts/test/ee-584-regression/Cargo.toml b/smart_contracts/contracts/test/ee-584-regression/Cargo.toml index 360391944a..dc5598cfe0 100644 --- a/smart_contracts/contracts/test/ee-584-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-584-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-584-regression" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_584_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-597-regression/Cargo.toml b/smart_contracts/contracts/test/ee-597-regression/Cargo.toml index 1e08635c10..70605ac1c8 100644 --- a/smart_contracts/contracts/test/ee-597-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-597-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-597-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_597_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-597-regression/src/main.rs b/smart_contracts/contracts/test/ee-597-regression/src/main.rs index 838ba7020b..357062db91 100644 --- a/smart_contracts/contracts/test/ee-597-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-597-regression/src/main.rs @@ -5,18 +5,17 @@ extern crate alloc; use casper_contract::contract_api::{runtime, system}; use casper_types::{ + contracts::ContractHash, runtime_args, system::auction::{self, DelegationRate}, - ContractHash, PublicKey, RuntimeArgs, SecretKey, U512, + PublicKey, SecretKey, U512, }; const DELEGATION_RATE: DelegationRate = 42; fn bond(contract_hash: ContractHash, bond_amount: U512) { - let valid_public_key: PublicKey = - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); + let valid_secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let valid_public_key = PublicKey::from(&valid_secret_key); let runtime_args = runtime_args! { auction::ARG_PUBLIC_KEY => valid_public_key, diff --git a/smart_contracts/contracts/test/ee-598-regression/Cargo.toml b/smart_contracts/contracts/test/ee-598-regression/Cargo.toml index e31b1bddb4..04d20e3bda 100644 --- a/smart_contracts/contracts/test/ee-598-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-598-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-598-regression" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "ee_598_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-598-regression/src/main.rs b/smart_contracts/contracts/test/ee-598-regression/src/main.rs index c64cb21a37..8e276947d8 100644 --- a/smart_contracts/contracts/test/ee-598-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-598-regression/src/main.rs @@ -3,7 +3,7 @@ use auction::DelegationRate; use casper_contract::contract_api::{runtime, system}; -use casper_types::{runtime_args, system::auction, ContractHash, PublicKey, RuntimeArgs, U512}; +use casper_types::{contracts::ContractHash, runtime_args, system::auction, PublicKey, U512}; const ARG_AMOUNT: &str = "amount"; const ARG_PUBLIC_KEY: &str = "public_key"; diff --git a/smart_contracts/contracts/test/ee-599-regression/Cargo.toml b/smart_contracts/contracts/test/ee-599-regression/Cargo.toml index 3de7e0bd44..3a2e943d24 100644 --- a/smart_contracts/contracts/test/ee-599-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-599-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-599-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_599_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-599-regression/src/main.rs b/smart_contracts/contracts/test/ee-599-regression/src/main.rs index fc5fc20104..9da167e9bf 100644 --- a/smart_contracts/contracts/test/ee-599-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-599-regression/src/main.rs @@ -11,10 +11,9 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - account::AccountHash, - contracts::{NamedKeys, Parameters}, - ApiError, CLType, ContractHash, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Key, - RuntimeArgs, URef, U512, + account::AccountHash, addressable_entity::Parameters, contracts::NamedKeys, + AddressableEntityHash, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, RuntimeArgs, URef, U512, }; const DONATION_AMOUNT: u64 = 1; @@ -39,9 +38,9 @@ enum ContractError { InvalidDelegateMethod = 0, } -impl Into for ContractError { - fn into(self) -> ApiError { - ApiError::User(self as u16) +impl From for ApiError { + fn from(error: ContractError) -> Self { + ApiError::User(error as u16) } } @@ -127,42 +126,46 @@ fn delegate() -> Result<(), ApiError> { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point_1 = EntryPoint::new( + let entry_point_1 = EntityEntryPoint::new( TRANSFER_FROM_PURSE_TO_ACCOUNT, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point_1); - let entry_point_2 = EntryPoint::new( + let entry_point_2 = EntityEntryPoint::new( TRANSFER_TO_ACCOUNT, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point_2); - let entry_point_3 = EntryPoint::new( + let entry_point_3 = EntityEntryPoint::new( TRANSFER_TO_ACCOUNT, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point_3); - let entry_point_4 = EntryPoint::new( + let entry_point_4 = EntityEntryPoint::new( TRANSFER_FROM_PURSE_TO_PURSE, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point_4); @@ -171,8 +174,11 @@ fn delegate() -> Result<(), ApiError> { }; let (contract_hash, _contract_version) = - storage::new_contract(entry_points, Some(known_keys), None, None); - runtime::put_key(TRANSFER_FUNDS_KEY, contract_hash.into()); + storage::new_contract(entry_points, Some(known_keys), None, None, None); + runtime::put_key( + TRANSFER_FUNDS_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); // For easy access in outside world here `donation` purse is also attached // to the account runtime::put_key(DONATION_PURSE_COPY, purse.into()); @@ -180,11 +186,15 @@ fn delegate() -> Result<(), ApiError> { METHOD_CALL => { // This comes from outside i.e. after deploying the contract, this key is queried, // and then passed into the call - let contract_key: ContractHash = runtime::get_named_arg(ARG_CONTRACTKEY); + let contract_key: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACTKEY); // This is a method that's gets forwarded into the sub contract let subcontract_method: String = runtime::get_named_arg(ARG_SUBCONTRACTMETHODFWD); - runtime::call_contract::<()>(contract_key, &subcontract_method, RuntimeArgs::default()); + runtime::call_contract::<()>( + contract_key.into(), + &subcontract_method, + RuntimeArgs::default(), + ); } _ => return Err(ContractError::InvalidDelegateMethod.into()), } diff --git a/smart_contracts/contracts/test/ee-601-regression/Cargo.toml b/smart_contracts/contracts/test/ee-601-regression/Cargo.toml index 2bc817ea1b..2d8aee44a8 100644 --- a/smart_contracts/contracts/test/ee-601-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-601-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-601-regression" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "ee_601_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-771-regression/Cargo.toml b/smart_contracts/contracts/test/ee-771-regression/Cargo.toml index 99da206748..3107122d24 100644 --- a/smart_contracts/contracts/test/ee-771-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-771-regression/Cargo.toml @@ -2,7 +2,7 @@ name = "ee-771-regression" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "ee_771_regression" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ee-771-regression/src/main.rs b/smart_contracts/contracts/test/ee-771-regression/src/main.rs index f7d721c983..1ecc8835d9 100644 --- a/smart_contracts/contracts/test/ee-771-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-771-regression/src/main.rs @@ -7,9 +7,10 @@ use alloc::string::ToString; use casper_contract::contract_api::{runtime, storage}; use casper_types::{ - contracts::{NamedKeys, Parameters}, - CLType, ContractHash, ContractVersion, EntryPoint, EntryPointAccess, EntryPointType, - EntryPoints, RuntimeArgs, + addressable_entity::Parameters, + contracts::{ContractHash, ContractVersion}, + AddressableEntityHash, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, NamedKeys, RuntimeArgs, }; const ENTRY_POINT_NAME: &str = "contract_ext"; @@ -21,7 +22,10 @@ pub extern "C" fn contract_ext() { Some(contract_key) => { // Calls a stored contract if exists. runtime::call_contract( - contract_key.into_hash().expect("should be a hash").into(), + contract_key + .into_entity_hash_addr() + .expect("should be a hash") + .into(), "contract_ext", RuntimeArgs::default(), ) @@ -31,19 +35,20 @@ pub extern "C" fn contract_ext() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( "functiondoesnotexist", Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - storage::new_contract(entry_points, None, None, None); + storage::new_contract(entry_points, None, None, None, None); } } } @@ -53,33 +58,43 @@ fn store(named_keys: NamedKeys) -> (ContractHash, ContractVersion) { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_POINT_NAME, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - storage::new_contract(entry_points, Some(named_keys), None, None) + storage::new_contract(entry_points, Some(named_keys), None, None, None) } fn install() -> ContractHash { let (contract_hash, _contract_version) = store(NamedKeys::new()); let mut keys = NamedKeys::new(); - keys.insert(CONTRACT_KEY.to_string(), contract_hash.into()); + keys.insert( + CONTRACT_KEY.to_string(), + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); let (contract_hash, _contract_version) = store(keys); let mut keys_2 = NamedKeys::new(); - keys_2.insert(CONTRACT_KEY.to_string(), contract_hash.into()); + keys_2.insert( + CONTRACT_KEY.to_string(), + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); let (contract_hash, _contract_version) = store(keys_2); - runtime::put_key(CONTRACT_KEY, contract_hash.into()); + runtime::put_key( + CONTRACT_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); contract_hash } diff --git a/smart_contracts/contracts/test/ee-966-regression/Cargo.toml b/smart_contracts/contracts/test/ee-966-regression/Cargo.toml index f4086cfb8a..d298eddc45 100644 --- a/smart_contracts/contracts/test/ee-966-regression/Cargo.toml +++ b/smart_contracts/contracts/test/ee-966-regression/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "ee-966-regression" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "ee_966_regression" @@ -10,6 +10,3 @@ path = "src/main.rs" bench = false doctest = false test = false - -[dependencies] - diff --git a/smart_contracts/contracts/test/ee-966-regression/src/main.rs b/smart_contracts/contracts/test/ee-966-regression/src/main.rs index 5c0889f714..efffe3a0da 100644 --- a/smart_contracts/contracts/test/ee-966-regression/src/main.rs +++ b/smart_contracts/contracts/test/ee-966-regression/src/main.rs @@ -1,5 +1,6 @@ #![no_std] #![no_main] +#![allow(internal_features)] #![feature(lang_items)] extern crate core; @@ -45,7 +46,7 @@ pub fn memory_size() -> usize { pub fn memory_grow(new_pages: usize) { let ptr = wasm32::memory_grow(DEFAULT_MEMORY_INDEX, new_pages); - if ptr == usize::max_value() { + if ptr == usize::MAX { revert(ApiError::OutOfMemory); } } diff --git a/smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml b/smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml new file mode 100644 index 0000000000..be5fe4a9b8 --- /dev/null +++ b/smart_contracts/contracts/test/endless-loop-with-effects/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "endless-loop-with-effects" +version = "0.1.0" +authors = ["Alex Sardan "] +edition = "2021" + +[[bin]] +name = "endless_loop_with_effects" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs b/smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs new file mode 100644 index 0000000000..a09765d17a --- /dev/null +++ b/smart_contracts/contracts/test/endless-loop-with-effects/src/main.rs @@ -0,0 +1,19 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::contract_api::{account, runtime, storage}; +use casper_types::Key; + +#[no_mangle] +pub extern "C" fn call() { + let mut data: u32 = 1; + let uref = storage::new_uref(data); + runtime::put_key("new_key", Key::from(uref)); + loop { + let _ = account::get_main_purse(); + data += 1; + storage::write(uref, data); + } +} diff --git a/smart_contracts/contracts/test/endless-loop/Cargo.toml b/smart_contracts/contracts/test/endless-loop/Cargo.toml index 93cb1d47d6..174c4f5d15 100644 --- a/smart_contracts/contracts/test/endless-loop/Cargo.toml +++ b/smart_contracts/contracts/test/endless-loop/Cargo.toml @@ -2,7 +2,7 @@ name = "endless-loop" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "endless_loop" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } -casper-types = { path = "../../../../types" } \ No newline at end of file +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/expensive-calculation/Cargo.toml b/smart_contracts/contracts/test/expensive-calculation/Cargo.toml index 997328e12b..27007eb6ec 100644 --- a/smart_contracts/contracts/test/expensive-calculation/Cargo.toml +++ b/smart_contracts/contracts/test/expensive-calculation/Cargo.toml @@ -2,7 +2,7 @@ name = "expensive-calculation" version = "0.1.0" authors = ["Bartłomiej Kamiński "] -edition = "2018" +edition = "2021" [[bin]] name = "expensive_calculation" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/expensive-calculation/src/main.rs b/smart_contracts/contracts/test/expensive-calculation/src/main.rs index 2c362133d2..a536a25e53 100644 --- a/smart_contracts/contracts/test/expensive-calculation/src/main.rs +++ b/smart_contracts/contracts/test/expensive-calculation/src/main.rs @@ -5,7 +5,8 @@ extern crate alloc; use casper_contract::contract_api::{runtime, storage}; use casper_types::{ - contracts::Parameters, CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, + addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, }; const ENTRY_FUNCTION_NAME: &str = "calculate"; @@ -28,21 +29,26 @@ pub extern "C" fn calculate() -> u64 { pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_FUNCTION_NAME, Parameters::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points }; - let (contract_hash, contract_version) = storage::new_contract(entry_points, None, None, None); + let (contract_hash, contract_version) = + storage::new_contract(entry_points, None, None, None, None); runtime::put_key( "contract_version", storage::new_uref(contract_version).into(), ); - runtime::put_key("expensive-calculation", contract_hash.into()); + runtime::put_key( + "expensive-calculation", + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/finalize-payment/Cargo.toml b/smart_contracts/contracts/test/finalize-payment/Cargo.toml index 2fc12661cb..720634461d 100644 --- a/smart_contracts/contracts/test/finalize-payment/Cargo.toml +++ b/smart_contracts/contracts/test/finalize-payment/Cargo.toml @@ -2,7 +2,7 @@ name = "finalize-payment" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "finalize_payment" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/finalize-payment/src/main.rs b/smart_contracts/contracts/test/finalize-payment/src/main.rs index cebefed7e4..5b18e92e68 100644 --- a/smart_contracts/contracts/test/finalize-payment/src/main.rs +++ b/smart_contracts/contracts/test/finalize-payment/src/main.rs @@ -1,17 +1,22 @@ #![no_std] #![no_main] +extern crate alloc; + +use alloc::string::String; + use casper_contract::{ contract_api::{account, runtime, system}, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{account::AccountHash, runtime_args, ContractHash, RuntimeArgs, URef, U512}; +use casper_types::{account::AccountHash, contracts::ContractHash, runtime_args, URef, U512}; pub const ARG_AMOUNT: &str = "amount"; pub const ARG_AMOUNT_SPENT: &str = "amount_spent"; pub const ARG_REFUND_FLAG: &str = "refund"; pub const ARG_PURSE: &str = "purse"; pub const ARG_ACCOUNT_KEY: &str = "account"; +pub const ARG_PURSE_NAME: &str = "purse_name"; fn set_refund_purse(contract_hash: ContractHash, purse: URef) { runtime::call_contract( @@ -52,12 +57,15 @@ pub extern "C" fn call() { let refund_purse_flag: u8 = runtime::get_named_arg(ARG_REFUND_FLAG); let maybe_amount_spent: Option = runtime::get_named_arg(ARG_AMOUNT_SPENT); let maybe_account: Option = runtime::get_named_arg(ARG_ACCOUNT_KEY); + let purse_name: String = runtime::get_named_arg(ARG_PURSE_NAME); submit_payment(contract_hash, payment_amount); if refund_purse_flag != 0 { - let refund_purse = system::create_purse(); - runtime::put_key("local_refund_purse", refund_purse.into()); + let refund_purse = { + let stored_purse_key = runtime::get_key(&purse_name).unwrap_or_revert(); + stored_purse_key.into_uref().unwrap_or_revert() + }; set_refund_purse(contract_hash, refund_purse); } diff --git a/smart_contracts/contracts/test/generic-hash/Cargo.toml b/smart_contracts/contracts/test/generic-hash/Cargo.toml new file mode 100644 index 0000000000..352f8842c1 --- /dev/null +++ b/smart_contracts/contracts/test/generic-hash/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "generic-hash" +version = "0.1.0" +authors = ["Igor Bunar "] +edition = "2021" + +[[bin]] +name = "generic_hash" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/generic-hash/src/main.rs b/smart_contracts/contracts/test/generic-hash/src/main.rs new file mode 100644 index 0000000000..9da526357b --- /dev/null +++ b/smart_contracts/contracts/test/generic-hash/src/main.rs @@ -0,0 +1,24 @@ +#![no_std] +#![no_main] + +extern crate alloc; +use alloc::string::String; + +use casper_contract::contract_api::{cryptography, runtime}; +use casper_types::crypto::HashAlgorithm; + +const ARG_ALGORITHM: &str = "algorithm"; +const ARG_DATA: &str = "data"; +const ARG_EXPECTED: &str = "expected"; + +#[no_mangle] +pub extern "C" fn call() { + let data: String = runtime::get_named_arg(ARG_DATA); + let expected: [u8; 32] = runtime::get_named_arg(ARG_EXPECTED); + let algorithm_repr: u8 = runtime::get_named_arg(ARG_ALGORITHM); + + let algorithm = HashAlgorithm::try_from(algorithm_repr).expect("Invalid enum repr"); + let hash = cryptography::generic_hash(data, algorithm); + + assert_eq!(hash, expected, "Hash mismatch"); +} diff --git a/smart_contracts/contracts/test/get-arg/Cargo.toml b/smart_contracts/contracts/test/get-arg/Cargo.toml index 22a46cd60b..c8e9b3d13a 100644 --- a/smart_contracts/contracts/test/get-arg/Cargo.toml +++ b/smart_contracts/contracts/test/get-arg/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "get-arg" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "get_arg" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-blockinfo/Cargo.toml b/smart_contracts/contracts/test/get-blockinfo/Cargo.toml new file mode 100644 index 0000000000..a8d996f471 --- /dev/null +++ b/smart_contracts/contracts/test/get-blockinfo/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "get-blockinfo" +version = "0.1.0" +authors = ["Ed Hastings "] +edition = "2021" + +[[bin]] +name = "get_blockinfo" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-blockinfo/src/main.rs b/smart_contracts/contracts/test/get-blockinfo/src/main.rs new file mode 100644 index 0000000000..4a05ececb1 --- /dev/null +++ b/smart_contracts/contracts/test/get-blockinfo/src/main.rs @@ -0,0 +1,85 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, runtime::revert}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + bytesrepr::{Bytes, FromBytes}, + ApiError, BlockTime, Digest, ProtocolVersion, +}; + +const ARG_FIELD_IDX: &str = "field_idx"; +const FIELD_IDX_BLOCK_TIME: u8 = 0; +const FIELD_IDX_BLOCK_HEIGHT: u8 = 1; +const FIELD_IDX_PARENT_BLOCK_HASH: u8 = 2; +const FIELD_IDX_STATE_HASH: u8 = 3; +const FIELD_IDX_PROTOCOL_VERSION: u8 = 4; +const FIELD_IDX_ADDRESSABLE_ENTITY: u8 = 5; + +const CURRENT_UBOUND: u8 = FIELD_IDX_ADDRESSABLE_ENTITY; +const ARG_KNOWN_BLOCK_TIME: &str = "known_block_time"; +const ARG_KNOWN_BLOCK_HEIGHT: &str = "known_block_height"; +const ARG_KNOWN_BLOCK_PARENT_HASH: &str = "known_block_parent_hash"; +const ARG_KNOWN_STATE_HASH: &str = "known_state_hash"; +const ARG_KNOWN_PROTOCOL_VERSION: &str = "known_protocol_version"; +const ARG_KNOWN_ADDRESSABLE_ENTITY: &str = "known_addressable_entity"; + +#[no_mangle] +pub extern "C" fn call() { + let field_idx: u8 = runtime::get_named_arg(ARG_FIELD_IDX); + if field_idx > CURRENT_UBOUND { + revert(ApiError::Unhandled); + } + if field_idx == FIELD_IDX_BLOCK_TIME { + let expected = BlockTime::new(runtime::get_named_arg(ARG_KNOWN_BLOCK_TIME)); + let actual: BlockTime = runtime::get_blocktime(); + if expected != actual { + revert(ApiError::User(field_idx as u16)); + } + } + if field_idx == FIELD_IDX_BLOCK_HEIGHT { + let expected: u64 = runtime::get_named_arg(ARG_KNOWN_BLOCK_HEIGHT); + let actual = runtime::get_block_height(); + if expected != actual { + revert(ApiError::User(field_idx as u16)); + } + } + if field_idx == FIELD_IDX_PARENT_BLOCK_HASH { + let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_BLOCK_PARENT_HASH); + let (expected, _rem) = Digest::from_bytes(bytes.inner_bytes()) + .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 1)); + let actual = runtime::get_parent_block_hash(); + if expected != actual { + revert(ApiError::User(field_idx as u16)); + } + } + if field_idx == FIELD_IDX_STATE_HASH { + let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_STATE_HASH); + let (expected, _rem) = Digest::from_bytes(bytes.inner_bytes()) + .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 2)); + let actual = runtime::get_state_hash(); + if expected != actual { + revert(ApiError::User(field_idx as u16)); + } + } + if field_idx == FIELD_IDX_PROTOCOL_VERSION { + let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_PROTOCOL_VERSION); + let (expected, _rem) = ProtocolVersion::from_bytes(bytes.inner_bytes()) + .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 3)); + let actual = runtime::get_protocol_version(); + if expected != actual { + revert(ApiError::User(field_idx as u16)); + } + } + if field_idx == FIELD_IDX_ADDRESSABLE_ENTITY { + let bytes: Bytes = runtime::get_named_arg(ARG_KNOWN_ADDRESSABLE_ENTITY); + let (expected, _rem) = bool::from_bytes(bytes.inner_bytes()) + .unwrap_or_revert_with(ApiError::User(CURRENT_UBOUND as u16 + 4)); + let actual = runtime::get_addressable_entity(); + if expected != actual { + revert(ApiError::User(field_idx as u16)); + } + } +} diff --git a/smart_contracts/contracts/test/get-blocktime/Cargo.toml b/smart_contracts/contracts/test/get-blocktime/Cargo.toml index feb8e15ebf..3a080469b9 100644 --- a/smart_contracts/contracts/test/get-blocktime/Cargo.toml +++ b/smart_contracts/contracts/test/get-blocktime/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "get-blocktime" version = "0.1.0" -authors = ["Ed Hastings , Henry Till "] -edition = "2018" +authors = ["Ed Hastings , Henry Till "] +edition = "2021" [[bin]] name = "get_blocktime" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/Cargo.toml b/smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/Cargo.toml new file mode 100644 index 0000000000..769eec30bb --- /dev/null +++ b/smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "get-call-stack-call-recursive-subcall" +version = "0.1.0" +authors = ["Daniel Werner "] +edition = "2021" + +[[bin]] +name = "get_call_stack_call_recursive_subcall" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } +get-call-stack-recursive-subcall = { path = "../get-call-stack-recursive-subcall" } diff --git a/smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/src/main.rs b/smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/src/main.rs new file mode 100644 index 0000000000..ecc44e4b13 --- /dev/null +++ b/smart_contracts/contracts/test/get-call-stack-call-recursive-subcall/src/main.rs @@ -0,0 +1,66 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; + +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{runtime_args, ApiError, Key, Phase, U512}; +use get_call_stack_recursive_subcall::{standard_payment, Call, ContractAddress}; + +const ARG_CALLS: &str = "calls"; +const ARG_CURRENT_DEPTH: &str = "current_depth"; +const AMOUNT: &str = "amount"; + +#[no_mangle] +pub extern "C" fn call() { + let calls: Vec = runtime::get_named_arg(ARG_CALLS); + let current_depth: u8 = runtime::get_named_arg(ARG_CURRENT_DEPTH); + let amount: U512 = runtime::get_named_arg(AMOUNT); + let calls_count = calls.len() as u8; + + // The important bit + { + let call_stack = runtime::get_call_stack(); + let name = alloc::format!("call_stack-{}", current_depth); + let call_stack_at = storage::new_uref(call_stack); + runtime::put_key(&name, Key::URef(call_stack_at)); + } + + if current_depth == 0 && runtime::get_phase() == Phase::Payment { + standard_payment(amount); + } + + if current_depth == calls_count { + return; + } + + let args = runtime_args! { + ARG_CALLS => calls.clone(), + ARG_CURRENT_DEPTH => current_depth + 1, + }; + + match calls.get(current_depth as usize) { + Some(Call { + contract_address: ContractAddress::ContractPackageHash(contract_package_hash), + target_method, + .. + }) => { + runtime::call_versioned_contract::<()>( + *contract_package_hash, + None, + target_method, + args, + ); + } + Some(Call { + contract_address: ContractAddress::ContractHash(contract_hash), + target_method, + .. + }) => { + runtime::call_contract::<()>(*contract_hash, target_method, args); + } + _ => runtime::revert(ApiError::User(0)), + } +} diff --git a/smart_contracts/contracts/test/get-call-stack-recursive-subcall/Cargo.toml b/smart_contracts/contracts/test/get-call-stack-recursive-subcall/Cargo.toml new file mode 100644 index 0000000000..6428fced56 --- /dev/null +++ b/smart_contracts/contracts/test/get-call-stack-recursive-subcall/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "get-call-stack-recursive-subcall" +version = "0.1.0" +authors = ["Daniel Werner "] +edition = "2021" + +[[bin]] +name = "get_call_stack_recursive_subcall" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[features] +default = ["casper-contract/default"] + +[dependencies] +casper-contract = { path = "../../../contract", default-features = false } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/lib.rs b/smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/lib.rs new file mode 100644 index 0000000000..dc0570db87 --- /dev/null +++ b/smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/lib.rs @@ -0,0 +1,209 @@ +#![no_std] + +extern crate alloc; + +use alloc::{string::String, vec::Vec}; + +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + bytesrepr, + bytesrepr::{Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::{ContractHash, ContractPackageHash}, + runtime_args, ApiError, CLType, CLTyped, EntryPointType, Key, Phase, RuntimeArgs, Tagged, URef, + U512, +}; + +pub const CONTRACT_PACKAGE_NAME: &str = "forwarder"; +pub const PACKAGE_ACCESS_KEY_NAME: &str = "forwarder_access"; +pub const CONTRACT_NAME: &str = "our_contract_name"; + +pub const METHOD_FORWARDER_CONTRACT_NAME: &str = "forwarder_contract"; +pub const METHOD_FORWARDER_SESSION_NAME: &str = "forwarder_session"; + +pub const ARG_CALLS: &str = "calls"; +pub const ARG_CURRENT_DEPTH: &str = "current_depth"; + +const DEFAULT_PAYMENT: u64 = 1_500_000_000_000; + +#[repr(u8)] +enum ContractAddressTag { + ContractHash = 0, + ContractPackageHash, +} + +#[derive(Debug, Copy, Clone)] +pub enum ContractAddress { + ContractHash(ContractHash), + ContractPackageHash(ContractPackageHash), +} + +impl Tagged for ContractAddress { + fn tag(&self) -> u8 { + match self { + ContractAddress::ContractHash(_) => ContractAddressTag::ContractHash as u8, + ContractAddress::ContractPackageHash(_) => { + ContractAddressTag::ContractPackageHash as u8 + } + } + } +} + +impl ToBytes for ContractAddress { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.push(self.tag()); + match self { + ContractAddress::ContractHash(contract_hash) => { + result.append(&mut contract_hash.to_bytes()?) + } + ContractAddress::ContractPackageHash(contract_package_hash) => { + result.append(&mut contract_package_hash.to_bytes()?) + } + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ContractAddress::ContractHash(contract_hash) => contract_hash.serialized_length(), + ContractAddress::ContractPackageHash(contract_package_hash) => { + contract_package_hash.serialized_length() + } + } + } +} + +impl FromBytes for ContractAddress { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == ContractAddressTag::ContractHash as u8 => { + let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?; + Ok((ContractAddress::ContractHash(contract_hash), remainder)) + } + tag if tag == ContractAddressTag::ContractPackageHash as u8 => { + let (contract_package_hash, remainder) = + ContractPackageHash::from_bytes(remainder)?; + Ok(( + ContractAddress::ContractPackageHash(contract_package_hash), + remainder, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[derive(Debug, Clone)] +pub struct Call { + pub contract_address: ContractAddress, + pub target_method: String, + pub entry_point_type: EntryPointType, +} + +impl ToBytes for Call { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.contract_address.to_bytes()?); + result.append(&mut self.target_method.to_bytes()?); + result.append(&mut self.entry_point_type.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.contract_address.serialized_length() + + self.target_method.serialized_length() + + self.entry_point_type.serialized_length() + } +} + +impl FromBytes for Call { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (contract_address, remainder) = ContractAddress::from_bytes(bytes)?; + let (target_method, remainder) = String::from_bytes(remainder)?; + let (entry_point_type, remainder) = EntryPointType::from_bytes(remainder)?; + Ok(( + Call { + contract_address, + target_method, + entry_point_type, + }, + remainder, + )) + } +} + +impl CLTyped for Call { + fn cl_type() -> CLType { + CLType::Any + } +} + +pub fn standard_payment(amount: U512) { + const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; + + let main_purse = account::get_main_purse(); + + let handle_payment_pointer = system::get_handle_payment(); + + let payment_purse: URef = runtime::call_contract( + handle_payment_pointer, + METHOD_GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ); + + system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert() +} + +pub fn recurse() { + let calls: Vec = runtime::get_named_arg(ARG_CALLS); + let current_depth: u8 = runtime::get_named_arg(ARG_CURRENT_DEPTH); + + // The important bit + { + let call_stack = runtime::get_call_stack(); + let name = alloc::format!("call_stack-{}", current_depth); + let call_stack_at = storage::new_uref(call_stack); + runtime::put_key(&name, Key::URef(call_stack_at)); + } + + if current_depth == 0 && runtime::get_phase() == Phase::Payment { + standard_payment(U512::from(DEFAULT_PAYMENT)) + } + + if current_depth == calls.len() as u8 { + return; + } + + let args = runtime_args! { + ARG_CALLS => calls.clone(), + ARG_CURRENT_DEPTH => current_depth + 1u8, + }; + + match calls.get(current_depth as usize) { + Some(Call { + contract_address: ContractAddress::ContractPackageHash(contract_package_hash), + target_method, + .. + }) => { + runtime::call_versioned_contract::<()>( + *contract_package_hash, + None, + target_method, + args, + ); + } + Some(Call { + contract_address: ContractAddress::ContractHash(contract_hash), + target_method, + .. + }) => { + runtime::call_contract::<()>(*contract_hash, target_method, args); + } + _ => runtime::revert(ApiError::User(0)), + } +} diff --git a/smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/main.rs b/smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/main.rs new file mode 100644 index 0000000000..fa6bf357bf --- /dev/null +++ b/smart_contracts/contracts/test/get-call-stack-recursive-subcall/src/main.rs @@ -0,0 +1,69 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{boxed::Box, string::ToString, vec}; + +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{ + CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, + Key, Parameter, +}; + +use get_call_stack_recursive_subcall::{ + ARG_CALLS, ARG_CURRENT_DEPTH, CONTRACT_NAME, CONTRACT_PACKAGE_NAME, + METHOD_FORWARDER_CONTRACT_NAME, METHOD_FORWARDER_SESSION_NAME, PACKAGE_ACCESS_KEY_NAME, +}; + +#[no_mangle] +pub extern "C" fn forwarder_contract() { + get_call_stack_recursive_subcall::recurse() +} + +#[no_mangle] +pub extern "C" fn forwarder_session() { + get_call_stack_recursive_subcall::recurse() +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + let forwarder_contract_entry_point = EntityEntryPoint::new( + METHOD_FORWARDER_CONTRACT_NAME.to_string(), + vec![ + Parameter::new(ARG_CALLS, CLType::List(Box::new(CLType::Any))), + Parameter::new(ARG_CURRENT_DEPTH, CLType::U8), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let forwarder_session_entry_point = EntityEntryPoint::new( + METHOD_FORWARDER_SESSION_NAME.to_string(), + vec![ + Parameter::new(ARG_CALLS, CLType::List(Box::new(CLType::Any))), + Parameter::new(ARG_CURRENT_DEPTH, CLType::U8), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Caller, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(forwarder_contract_entry_point); + entry_points.add_entry_point(forwarder_session_entry_point); + entry_points + }; + + let (contract_hash, _contract_version) = storage::new_contract( + entry_points, + None, + Some(CONTRACT_PACKAGE_NAME.to_string()), + Some(PACKAGE_ACCESS_KEY_NAME.to_string()), + None, + ); + + runtime::put_key(CONTRACT_NAME, Key::Hash(contract_hash.value())); +} diff --git a/smart_contracts/contracts/test/get-caller-subcall/Cargo.toml b/smart_contracts/contracts/test/get-caller-subcall/Cargo.toml index f621a44038..3bbfc407ba 100644 --- a/smart_contracts/contracts/test/get-caller-subcall/Cargo.toml +++ b/smart_contracts/contracts/test/get-caller-subcall/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "get-caller-subcall" version = "0.1.0" -authors = ["Ed Hastings , Henry Till "] -edition = "2018" +authors = ["Ed Hastings , Henry Till "] +edition = "2021" [[bin]] name = "get_caller_subcall" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-caller-subcall/src/main.rs b/smart_contracts/contracts/test/get-caller-subcall/src/main.rs index 137672a33e..98d8ab761a 100644 --- a/smart_contracts/contracts/test/get-caller-subcall/src/main.rs +++ b/smart_contracts/contracts/test/get-caller-subcall/src/main.rs @@ -10,8 +10,8 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - account::AccountHash, CLType, CLValue, EntryPoint, EntryPointAccess, EntryPointType, - EntryPoints, RuntimeArgs, + account::AccountHash, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, RuntimeArgs, }; const ENTRY_POINT_NAME: &str = "get_caller_ext"; @@ -37,12 +37,13 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); // takes no args, ret's PublicKey - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_POINT_NAME.to_string(), Vec::new(), CLType::ByteArray(32), EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points @@ -53,6 +54,7 @@ pub extern "C" fn call() { None, Some(HASH_KEY_NAME.to_string()), Some(ACCESS_KEY_NAME.to_string()), + None, ); let subcall_account_hash: AccountHash = diff --git a/smart_contracts/contracts/test/get-caller/Cargo.toml b/smart_contracts/contracts/test/get-caller/Cargo.toml index 2efeedb383..b7e9000ac4 100644 --- a/smart_contracts/contracts/test/get-caller/Cargo.toml +++ b/smart_contracts/contracts/test/get-caller/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "get-caller" version = "0.1.0" -authors = ["Ed Hastings , Henry Till "] -edition = "2018" +authors = ["Ed Hastings , Henry Till "] +edition = "2021" [[bin]] name = "get_caller" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-payment-purse/Cargo.toml b/smart_contracts/contracts/test/get-payment-purse/Cargo.toml index acc04ba47c..c7d6d0faa2 100644 --- a/smart_contracts/contracts/test/get-payment-purse/Cargo.toml +++ b/smart_contracts/contracts/test/get-payment-purse/Cargo.toml @@ -2,7 +2,7 @@ name = "get-payment-purse" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "get_payment_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-phase-payment/Cargo.toml b/smart_contracts/contracts/test/get-phase-payment/Cargo.toml index e1ff48b27c..e8c2db5761 100644 --- a/smart_contracts/contracts/test/get-phase-payment/Cargo.toml +++ b/smart_contracts/contracts/test/get-phase-payment/Cargo.toml @@ -2,7 +2,7 @@ name = "get-phase-payment" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "get_phase_payment" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/get-phase/Cargo.toml b/smart_contracts/contracts/test/get-phase/Cargo.toml index 84433c09de..d2f8be822a 100644 --- a/smart_contracts/contracts/test/get-phase/Cargo.toml +++ b/smart_contracts/contracts/test/get-phase/Cargo.toml @@ -2,7 +2,7 @@ name = "get-phase" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "get_phase" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-1470-regression-call/Cargo.toml b/smart_contracts/contracts/test/gh-1470-regression-call/Cargo.toml new file mode 100644 index 0000000000..19831a7110 --- /dev/null +++ b/smart_contracts/contracts/test/gh-1470-regression-call/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "gh-1470-regression-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_1470_regression_call" +path = "src/bin/main.rs" +bench = false +doctest = false +test = false + +[features] +default = ["casper-contract/default", "gh-1470-regression/default"] + +[dependencies] +casper-contract = { path = "../../../contract", default-features = false } +casper-types = { path = "../../../../types" } +gh-1470-regression = { path = "../gh-1470-regression", default-features = false } diff --git a/smart_contracts/contracts/test/gh-1470-regression-call/src/bin/main.rs b/smart_contracts/contracts/test/gh-1470-regression-call/src/bin/main.rs new file mode 100644 index 0000000000..b4dcc22a1e --- /dev/null +++ b/smart_contracts/contracts/test/gh-1470-regression-call/src/bin/main.rs @@ -0,0 +1,180 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use core::str::FromStr; +use gh_1470_regression_call::{ARG_CONTRACT_HASH, ARG_CONTRACT_PACKAGE_HASH, ARG_TEST_METHOD}; + +use casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert}; +use casper_types::{runtime_args, AddressableEntityHash, PackageHash}; + +use gh_1470_regression_call::TestMethod; + +#[no_mangle] +pub extern "C" fn call() { + let test_method = { + let arg_test_method: String = runtime::get_named_arg(ARG_TEST_METHOD); + TestMethod::from_str(&arg_test_method).unwrap_or_revert() + }; + + let correct_runtime_args = runtime_args! { + gh_1470_regression::ARG3 => gh_1470_regression::Arg3Type::default(), + gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(), + gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(), + }; + + let no_runtime_args = runtime_args! {}; + + let type_mismatch_runtime_args = runtime_args! { + gh_1470_regression::ARG2 => gh_1470_regression::Arg1Type::default(), + gh_1470_regression::ARG3 => gh_1470_regression::Arg2Type::default(), + gh_1470_regression::ARG1 => gh_1470_regression::Arg3Type::default(), + }; + + let optional_type_mismatch_runtime_args = runtime_args! { + gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(), + gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(), + gh_1470_regression::ARG3 => gh_1470_regression::Arg4Type::default(), + }; + + let correct_without_optional_args = runtime_args! { + gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(), + gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(), + }; + + let extra_runtime_args = runtime_args! { + gh_1470_regression::ARG3 => gh_1470_regression::Arg3Type::default(), + gh_1470_regression::ARG2 => gh_1470_regression::Arg2Type::default(), + gh_1470_regression::ARG1 => gh_1470_regression::Arg1Type::default(), + gh_1470_regression::ARG4 => gh_1470_regression::Arg4Type::default(), + gh_1470_regression::ARG5 => gh_1470_regression::Arg5Type::default(), + }; + + assert_ne!(correct_runtime_args, optional_type_mismatch_runtime_args); + + match test_method { + TestMethod::CallDoNothing => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + runtime::call_contract::<()>( + contract_hash.into(), + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + correct_runtime_args, + ); + } + TestMethod::CallVersionedDoNothing => { + let contract_package_hash: PackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + + runtime::call_versioned_contract::<()>( + contract_package_hash.into(), + None, + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + correct_runtime_args, + ); + } + TestMethod::CallDoNothingNoArgs => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + runtime::call_contract::<()>( + contract_hash.into(), + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + no_runtime_args, + ); + } + TestMethod::CallVersionedDoNothingNoArgs => { + let contract_package_hash: PackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + + runtime::call_versioned_contract::<()>( + contract_package_hash.into(), + None, + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + no_runtime_args, + ); + } + TestMethod::CallDoNothingTypeMismatch => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + runtime::call_contract::<()>( + contract_hash.into(), + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + type_mismatch_runtime_args, + ); + } + + TestMethod::CallVersionedDoNothingTypeMismatch => { + let contract_package_hash: PackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + + runtime::call_versioned_contract::<()>( + contract_package_hash.into(), + None, + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + type_mismatch_runtime_args, + ); + } + TestMethod::CallDoNothingNoOptionals => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + runtime::call_contract::<()>( + contract_hash.into(), + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + correct_without_optional_args, + ); + } + TestMethod::CallVersionedDoNothingNoOptionals => { + let contract_package_hash: PackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + + runtime::call_versioned_contract::<()>( + contract_package_hash.into(), + None, + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + correct_without_optional_args, + ); + } + TestMethod::CallDoNothingExtra => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + runtime::call_contract::<()>( + contract_hash.into(), + gh_1470_regression::RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT, + extra_runtime_args, + ); + } + TestMethod::CallVersionedDoNothingExtra => { + let contract_package_hash: PackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + + runtime::call_versioned_contract::<()>( + contract_package_hash.into(), + None, + gh_1470_regression::RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT, + extra_runtime_args, + ); + } + TestMethod::CallDoNothingOptionalTypeMismatch => { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + + runtime::call_contract::<()>( + contract_hash.into(), + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + optional_type_mismatch_runtime_args, + ); + } + TestMethod::CallVersionedDoNothingOptionalTypeMismatch => { + let contract_package_hash: PackageHash = + runtime::get_named_arg(ARG_CONTRACT_PACKAGE_HASH); + + runtime::call_versioned_contract::<()>( + contract_package_hash.into(), + None, + gh_1470_regression::RESTRICTED_DO_NOTHING_ENTRYPOINT, + optional_type_mismatch_runtime_args, + ); + } + } +} diff --git a/smart_contracts/contracts/test/gh-1470-regression-call/src/lib.rs b/smart_contracts/contracts/test/gh-1470-regression-call/src/lib.rs new file mode 100644 index 0000000000..b96aec01b6 --- /dev/null +++ b/smart_contracts/contracts/test/gh-1470-regression-call/src/lib.rs @@ -0,0 +1,91 @@ +#![no_std] + +use core::str::FromStr; + +use casper_types::ApiError; + +pub const ARG_CONTRACT_HASH: &str = "payment_contract"; +pub const ARG_CONTRACT_PACKAGE_HASH: &str = "contract_package_hash"; +pub const ARG_TEST_METHOD: &str = "test_method"; + +#[repr(u16)] +pub enum Error { + InvalidMethod = 0, +} + +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error as u16) + } +} + +pub const METHOD_CALL_DO_NOTHING: &str = "call_do_nothing"; +pub const METHOD_CALL_VERSIONED_DO_NOTHING: &str = "call_versioned_do_nothing"; + +pub const METHOD_CALL_DO_NOTHING_NO_ARGS: &str = "call_do_nothing_no_args"; +pub const METHOD_CALL_VERSIONED_DO_NOTHING_NO_ARGS: &str = "call_versioned_do_nothing_no_args"; + +pub const METHOD_CALL_DO_NOTHING_TYPE_MISMATCH: &str = "call_do_nothing_type_mismatch"; +pub const METHOD_CALL_VERSIONED_DO_NOTHING_TYPE_MISMATCH: &str = + "call_versioned_do_nothing_type_mismatch"; + +pub const METHOD_CALL_DO_NOTHING_OPTIONAL_TYPE_MISMATCH: &str = + "call_do_nothing_optional_type_mismatch"; +pub const METHOD_CALL_VERSIONED_DO_NOTHING_OPTIONAL_TYPE_MISMATCH: &str = + "call_versioned_do_nothing_optional_type_mismatch"; + +pub const METHOD_CALL_DO_NOTHING_NO_OPTIONALS: &str = "call_do_nothing_no_optionals"; +pub const METHOD_CALL_VERSIONED_DO_NOTHING_NO_OPTIONALS: &str = + "call_versioned_do_nothing_no_optionals"; + +pub const METHOD_CALL_DO_NOTHING_EXTRA: &str = "call_do_nothing_extra"; +pub const METHOD_CALL_VERSIONED_DO_NOTHING_EXTRA: &str = "call_versioned_do_nothing_extra"; + +pub enum TestMethod { + CallDoNothing, + CallVersionedDoNothing, + CallDoNothingNoArgs, + CallVersionedDoNothingNoArgs, + CallDoNothingTypeMismatch, + CallVersionedDoNothingTypeMismatch, + CallDoNothingOptionalTypeMismatch, + CallVersionedDoNothingOptionalTypeMismatch, + CallDoNothingNoOptionals, + CallVersionedDoNothingNoOptionals, + CallDoNothingExtra, + CallVersionedDoNothingExtra, +} + +impl FromStr for TestMethod { + type Err = Error; + + fn from_str(s: &str) -> Result { + if s == METHOD_CALL_DO_NOTHING { + Ok(TestMethod::CallDoNothing) + } else if s == METHOD_CALL_VERSIONED_DO_NOTHING { + Ok(TestMethod::CallVersionedDoNothing) + } else if s == METHOD_CALL_DO_NOTHING_NO_ARGS { + Ok(TestMethod::CallDoNothingNoArgs) + } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_NO_ARGS { + Ok(TestMethod::CallVersionedDoNothingNoArgs) + } else if s == METHOD_CALL_DO_NOTHING_TYPE_MISMATCH { + Ok(TestMethod::CallDoNothingTypeMismatch) + } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_TYPE_MISMATCH { + Ok(TestMethod::CallVersionedDoNothingTypeMismatch) + } else if s == METHOD_CALL_DO_NOTHING_OPTIONAL_TYPE_MISMATCH { + Ok(TestMethod::CallDoNothingOptionalTypeMismatch) + } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_OPTIONAL_TYPE_MISMATCH { + Ok(TestMethod::CallVersionedDoNothingOptionalTypeMismatch) + } else if s == METHOD_CALL_DO_NOTHING_NO_OPTIONALS { + Ok(TestMethod::CallDoNothingNoOptionals) + } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_NO_OPTIONALS { + Ok(TestMethod::CallVersionedDoNothingNoOptionals) + } else if s == METHOD_CALL_DO_NOTHING_EXTRA { + Ok(TestMethod::CallDoNothingExtra) + } else if s == METHOD_CALL_VERSIONED_DO_NOTHING_EXTRA { + Ok(TestMethod::CallVersionedDoNothingExtra) + } else { + Err(Error::InvalidMethod) + } + } +} diff --git a/smart_contracts/contracts/test/gh-1470-regression/Cargo.toml b/smart_contracts/contracts/test/gh-1470-regression/Cargo.toml new file mode 100644 index 0000000000..0e71684001 --- /dev/null +++ b/smart_contracts/contracts/test/gh-1470-regression/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "gh-1470-regression" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_1470_regression" +path = "src/bin/main.rs" +bench = false +doctest = false +test = false + +[features] +default = ["casper-contract/default"] + +[dependencies] +casper-contract = { path = "../../../contract", default-features = false } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-1470-regression/src/bin/main.rs b/smart_contracts/contracts/test/gh-1470-regression/src/bin/main.rs new file mode 100644 index 0000000000..6ea574a121 --- /dev/null +++ b/smart_contracts/contracts/test/gh-1470-regression/src/bin/main.rs @@ -0,0 +1,92 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::collections::BTreeMap; +use casper_contract::contract_api::{runtime, storage}; + +use casper_types::{ + contracts::NamedKeys, CLType, CLTyped, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Group, Key, Parameter, +}; +use gh_1470_regression::{ + Arg1Type, Arg2Type, Arg3Type, Arg4Type, Arg5Type, ARG1, ARG2, ARG3, ARG4, ARG5, + CONTRACT_HASH_NAME, GROUP_LABEL, GROUP_UREF_NAME, PACKAGE_HASH_NAME, + RESTRICTED_DO_NOTHING_ENTRYPOINT, RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT, +}; + +#[no_mangle] +pub extern "C" fn restricted_do_nothing_contract() { + let _arg1: Arg1Type = runtime::get_named_arg(ARG1); + let _arg2: Arg2Type = runtime::get_named_arg(ARG2); + + // ARG3 is defined in entrypoint but optional and might not be passed in all cases +} + +#[no_mangle] +pub extern "C" fn restricted_with_extra_arg() { + let _arg1: Arg1Type = runtime::get_named_arg(ARG1); + let _arg2: Arg2Type = runtime::get_named_arg(ARG2); + let _arg3: Arg3Type = runtime::get_named_arg(ARG3); + + // Those arguments are not present in entry point definition but are always passed by caller + let _arg4: Arg4Type = runtime::get_named_arg(ARG4); + let _arg5: Arg5Type = runtime::get_named_arg(ARG5); +} + +#[no_mangle] +pub extern "C" fn call() { + let (contract_package_hash, _access_uref) = storage::create_contract_package_at_hash(); + + let admin_group = storage::create_contract_user_group( + contract_package_hash, + GROUP_LABEL, + 1, + Default::default(), + ) + .unwrap(); + + runtime::put_key(GROUP_UREF_NAME, admin_group[0].into()); + + let mut entry_points = EntryPoints::new(); + + entry_points.add_entry_point(EntityEntryPoint::new( + RESTRICTED_DO_NOTHING_ENTRYPOINT, + vec![ + Parameter::new(ARG2, Arg2Type::cl_type()), + Parameter::new(ARG1, Arg1Type::cl_type()), + Parameter::new(ARG3, Arg3Type::cl_type()), + ], + CLType::Unit, + EntryPointAccess::Groups(vec![Group::new(GROUP_LABEL)]), + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + entry_points.add_entry_point(EntityEntryPoint::new( + RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT, + vec![ + Parameter::new(ARG3, Arg3Type::cl_type()), + Parameter::new(ARG2, Arg2Type::cl_type()), + Parameter::new(ARG1, Arg1Type::cl_type()), + ], + CLType::Unit, + EntryPointAccess::Groups(vec![Group::new(GROUP_LABEL)]), + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let named_keys = NamedKeys::new(); + + let (contract_hash, _) = storage::add_contract_version( + contract_package_hash, + entry_points, + named_keys, + BTreeMap::new(), + ); + + runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value())); + runtime::put_key(PACKAGE_HASH_NAME, contract_package_hash.into()); +} diff --git a/smart_contracts/contracts/test/gh-1470-regression/src/lib.rs b/smart_contracts/contracts/test/gh-1470-regression/src/lib.rs new file mode 100644 index 0000000000..e449a51e6a --- /dev/null +++ b/smart_contracts/contracts/test/gh-1470-regression/src/lib.rs @@ -0,0 +1,29 @@ +#![no_std] + +extern crate alloc; + +use alloc::string::String; + +use casper_types::U512; + +pub const GROUP_LABEL: &str = "group_label"; +pub const GROUP_UREF_NAME: &str = "group_uref"; +pub const CONTRACT_HASH_NAME: &str = "contract_hash"; +pub const PACKAGE_HASH_NAME: &str = "contract_package_hash"; +pub const RESTRICTED_DO_NOTHING_ENTRYPOINT: &str = "restricted_do_nothing_contract"; +pub const RESTRICTED_WITH_EXTRA_ARG_ENTRYPOINT: &str = "restricted_with_extra_arg"; + +pub const ARG1: &str = "arg1"; +pub type Arg1Type = String; + +pub const ARG2: &str = "arg2"; +pub type Arg2Type = U512; + +pub const ARG3: &str = "arg3"; +pub type Arg3Type = Option; + +pub const ARG4: &str = "arg4"; +pub type Arg4Type = bool; + +pub const ARG5: &str = "arg5"; +pub type Arg5Type = Option; diff --git a/smart_contracts/contracts/test/gh-1688-regression/Cargo.toml b/smart_contracts/contracts/test/gh-1688-regression/Cargo.toml new file mode 100644 index 0000000000..9b17841bc3 --- /dev/null +++ b/smart_contracts/contracts/test/gh-1688-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-1688-regression" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_1688_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-1688-regression/src/main.rs b/smart_contracts/contracts/test/gh-1688-regression/src/main.rs new file mode 100644 index 0000000000..c8b53dd104 --- /dev/null +++ b/smart_contracts/contracts/test/gh-1688-regression/src/main.rs @@ -0,0 +1,48 @@ +#![no_main] +#![no_std] + +extern crate alloc; + +use alloc::string::ToString; +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{ + addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, +}; + +const METHOD_PUT_KEY: &str = "put_key"; +const NEW_KEY_NAME: &str = "Hello"; +const NEW_KEY_VALUE: &str = "World"; +const CONTRACT_PACKAGE_KEY: &str = "contract_package"; +const CONTRACT_HASH_KEY: &str = "contract_hash"; + +#[no_mangle] +fn put_key() { + let value = storage::new_uref(NEW_KEY_VALUE); + runtime::put_key(NEW_KEY_NAME, value.into()); +} + +#[no_mangle] +fn call() { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + METHOD_PUT_KEY, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let (contract_hash, _version) = storage::new_contract( + entry_points, + None, + Some(CONTRACT_PACKAGE_KEY.to_string()), + None, + None, + ); + runtime::put_key( + CONTRACT_HASH_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/gh-2280-regression-call/Cargo.toml b/smart_contracts/contracts/test/gh-2280-regression-call/Cargo.toml new file mode 100644 index 0000000000..7f8117fa4a --- /dev/null +++ b/smart_contracts/contracts/test/gh-2280-regression-call/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-2280-regression-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_2280_regression_call" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-2280-regression-call/src/main.rs b/smart_contracts/contracts/test/gh-2280-regression-call/src/main.rs new file mode 100644 index 0000000000..d94b949a49 --- /dev/null +++ b/smart_contracts/contracts/test/gh-2280-regression-call/src/main.rs @@ -0,0 +1,27 @@ +#![no_std] +#![no_main] + +use casper_contract::contract_api::runtime; + +use casper_types::{ + account::AccountHash, contracts::ContractHash, runtime_args, AddressableEntityHash, +}; + +const FAUCET_NAME: &str = "faucet"; +const ARG_TARGET: &str = "target"; +const ARG_CONTRACT_HASH: &str = "contract_hash"; + +fn call_faucet(contract_hash: ContractHash, target: AccountHash) { + let faucet_args = runtime_args! { + ARG_TARGET => target, + }; + runtime::call_contract(contract_hash, FAUCET_NAME, faucet_args) +} + +#[no_mangle] +pub extern "C" fn call() { + let contract_hash: AddressableEntityHash = runtime::get_named_arg(ARG_CONTRACT_HASH); + let target: AccountHash = runtime::get_named_arg(ARG_TARGET); + + call_faucet(contract_hash.into(), target); +} diff --git a/smart_contracts/contracts/test/gh-2280-regression/Cargo.toml b/smart_contracts/contracts/test/gh-2280-regression/Cargo.toml new file mode 100644 index 0000000000..159cdd32f1 --- /dev/null +++ b/smart_contracts/contracts/test/gh-2280-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-2280-regression" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_2280_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-2280-regression/src/main.rs b/smart_contracts/contracts/test/gh-2280-regression/src/main.rs new file mode 100644 index 0000000000..d2481674fc --- /dev/null +++ b/smart_contracts/contracts/test/gh-2280-regression/src/main.rs @@ -0,0 +1,96 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{string::ToString, vec}; + +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; + +use casper_types::{ + account::AccountHash, + addressable_entity::{ + EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + }, + AddressableEntityHash, CLType, CLTyped, EntryPointPayment, Key, NamedKeys, U512, +}; + +const FAUCET_NAME: &str = "faucet"; +const PACKAGE_HASH_KEY_NAME: &str = "gh_2280"; +const HASH_KEY_NAME: &str = "gh_2280_hash"; +const ACCESS_KEY_NAME: &str = "gh_2280_access"; +const ARG_TARGET: &str = "target"; +const CONTRACT_VERSION: &str = "contract_version"; +const ARG_FAUCET_FUNDS: &str = "faucet_initial_balance"; +const FAUCET_FUNDS_KEY: &str = "faucet_funds"; + +#[no_mangle] +pub extern "C" fn faucet() { + let purse_uref = runtime::get_key(FAUCET_FUNDS_KEY) + .and_then(Key::into_uref) + .unwrap_or_revert(); + + let account_hash: AccountHash = runtime::get_named_arg(ARG_TARGET); + system::transfer_from_purse_to_account(purse_uref, account_hash, U512::from(1u64), None) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + + let faucet_entrypoint = EntityEntryPoint::new( + FAUCET_NAME.to_string(), + vec![Parameter::new(ARG_TARGET, AccountHash::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(faucet_entrypoint); + entry_points + }; + + let faucet_initial_balance: U512 = runtime::get_named_arg(ARG_FAUCET_FUNDS); + + let named_keys = { + let faucet_funds = { + let purse = system::create_purse(); + + let id: Option = None; + system::transfer_from_purse_to_purse( + account::get_main_purse(), + purse, + faucet_initial_balance, + id, + ) + .unwrap_or_revert(); + + purse + }; + + let mut named_keys = NamedKeys::new(); + + named_keys.insert(FAUCET_FUNDS_KEY.to_string(), faucet_funds.into()); + + named_keys + }; + + let (contract_hash, contract_version) = storage::new_contract( + entry_points, + Some(named_keys), + Some(PACKAGE_HASH_KEY_NAME.to_string()), + Some(ACCESS_KEY_NAME.to_string()), + None, + ); + runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); + runtime::put_key( + HASH_KEY_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/gh-3097-regression-call/Cargo.toml b/smart_contracts/contracts/test/gh-3097-regression-call/Cargo.toml new file mode 100644 index 0000000000..4417604d63 --- /dev/null +++ b/smart_contracts/contracts/test/gh-3097-regression-call/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-3097-regression-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_3097_regression_call" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-3097-regression-call/src/main.rs b/smart_contracts/contracts/test/gh-3097-regression-call/src/main.rs new file mode 100644 index 0000000000..780d35fb51 --- /dev/null +++ b/smart_contracts/contracts/test/gh-3097-regression-call/src/main.rs @@ -0,0 +1,73 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert}; +use casper_types::{ + contracts::{ContractHash, ContractPackageHash, ContractVersion}, + ApiError, RuntimeArgs, +}; + +const CONTRACT_PACKAGE_HASH_KEY: &str = "contract_package_hash"; +const DO_SOMETHING_ENTRYPOINT: &str = "do_something"; +const ARG_METHOD: &str = "method"; +const ARG_CONTRACT_HASH_KEY: &str = "contract_hash_key"; +const ARG_MAJOR_VERSION: &str = "major_version"; +const ARG_CONTRACT_VERSION: &str = "contract_version"; +const METHOD_CALL_CONTRACT: &str = "call_contract"; +const METHOD_CALL_VERSIONED_CONTRACT: &str = "call_versioned_contract"; + +#[no_mangle] +pub extern "C" fn call() { + let method: String = runtime::get_named_arg(ARG_METHOD); + if method == METHOD_CALL_CONTRACT { + let contract_hash_key_name: String = runtime::get_named_arg(ARG_CONTRACT_HASH_KEY); + let contract_hash = runtime::get_key(&contract_hash_key_name) + .ok_or(ApiError::MissingKey) + .unwrap_or_revert() + .into_entity_hash_addr() + .ok_or(ApiError::UnexpectedKeyVariant) + .map(ContractHash::new) + .unwrap_or_revert(); + runtime::call_contract::<()>( + contract_hash, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::default(), + ) + } else if method == METHOD_CALL_VERSIONED_CONTRACT { + let contract_package_hash = runtime::get_key(CONTRACT_PACKAGE_HASH_KEY) + .ok_or(ApiError::MissingKey) + .unwrap_or_revert() + .into_package_addr() + .ok_or(ApiError::UnexpectedKeyVariant) + .map(ContractPackageHash::new) + .unwrap_or_revert(); + + let major_version = runtime::get_named_arg(ARG_MAJOR_VERSION); + let contract_version = + runtime::get_named_arg::>(ARG_CONTRACT_VERSION); + match contract_version { + None => { + runtime::call_versioned_contract::<()>( + contract_package_hash, + None, + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::default(), + ); + } + Some(contract_version) => { + runtime::call_package_version::<()>( + contract_package_hash, + Some(major_version), + Some(contract_version), + DO_SOMETHING_ENTRYPOINT, + RuntimeArgs::default(), + ); + } + } + } else { + runtime::revert(ApiError::User(0)); + } +} diff --git a/smart_contracts/contracts/test/gh-3097-regression/Cargo.toml b/smart_contracts/contracts/test/gh-3097-regression/Cargo.toml new file mode 100644 index 0000000000..308d95693b --- /dev/null +++ b/smart_contracts/contracts/test/gh-3097-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-3097-regression" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "gh_3097_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-3097-regression/src/main.rs b/smart_contracts/contracts/test/gh-3097-regression/src/main.rs new file mode 100644 index 0000000000..184c6361f4 --- /dev/null +++ b/smart_contracts/contracts/test/gh-3097-regression/src/main.rs @@ -0,0 +1,74 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::collections::BTreeMap; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + addressable_entity::Parameters, contracts::NamedKeys, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, +}; + +const CONTRACT_PACKAGE_HASH_KEY: &str = "contract_package_hash"; +const DISABLED_CONTRACT_HASH_KEY: &str = "disabled_contract_hash"; +const ENABLED_CONTRACT_HASH_KEY: &str = "enabled_contract_hash"; + +#[no_mangle] +pub extern "C" fn do_something() { + let _ = runtime::list_authorization_keys(); +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + + let do_something = EntityEntryPoint::new( + "do_something", + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(do_something); + + entry_points + }; + + let (contract_package_hash, _access_key) = storage::create_contract_package_at_hash(); + + let (disabled_contract_hash, _version) = storage::add_contract_version( + contract_package_hash, + entry_points.clone(), + NamedKeys::new(), + BTreeMap::new(), + ); + + let (enabled_contract_hash, _version) = storage::add_contract_version( + contract_package_hash, + entry_points, + NamedKeys::new(), + BTreeMap::new(), + ); + + runtime::put_key(CONTRACT_PACKAGE_HASH_KEY, contract_package_hash.into()); + + runtime::put_key( + DISABLED_CONTRACT_HASH_KEY, + Key::Hash(disabled_contract_hash.value()), + ); + runtime::put_key( + ENABLED_CONTRACT_HASH_KEY, + Key::Hash(enabled_contract_hash.value()), + ); + + storage::disable_contract_version(contract_package_hash, disabled_contract_hash) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/gh-4771-regression/Cargo.toml b/smart_contracts/contracts/test/gh-4771-regression/Cargo.toml new file mode 100644 index 0000000000..1d2016660b --- /dev/null +++ b/smart_contracts/contracts/test/gh-4771-regression/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "gh-4771-regression" +version = "0.1.0" +authors = ["Rafał Chabowski "] +edition = "2021" + +[[bin]] +name = "gh_4771_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-4771-regression/src/main.rs b/smart_contracts/contracts/test/gh-4771-regression/src/main.rs new file mode 100644 index 0000000000..c540ae9393 --- /dev/null +++ b/smart_contracts/contracts/test/gh-4771-regression/src/main.rs @@ -0,0 +1,48 @@ +#![no_main] +#![no_std] + +extern crate alloc; + +use alloc::string::ToString; +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{ + addressable_entity::Parameters, AddressableEntityHash, CLType, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, +}; + +const METHOD_TEST_ENTRY_POINT: &str = "test_entry_point"; +const NEW_KEY_NAME: &str = "Hello"; +const NEW_KEY_VALUE: &str = "World"; +const CONTRACT_PACKAGE_KEY: &str = "contract_package"; +const CONTRACT_HASH_KEY: &str = "contract_hash"; + +#[no_mangle] +fn test_entry_point() { + let value = storage::new_uref(NEW_KEY_VALUE); + runtime::put_key(NEW_KEY_NAME, value.into()); +} + +#[no_mangle] +fn call() { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + METHOD_TEST_ENTRY_POINT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let (contract_hash, _version) = storage::new_contract( + entry_points, + None, + Some(CONTRACT_PACKAGE_KEY.to_string()), + None, + None, + ); + runtime::put_key( + CONTRACT_HASH_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/gh-4898-regression/Cargo.toml b/smart_contracts/contracts/test/gh-4898-regression/Cargo.toml new file mode 100644 index 0000000000..1489d13119 --- /dev/null +++ b/smart_contracts/contracts/test/gh-4898-regression/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "gh-4898-regression" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "gh_4898_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } \ No newline at end of file diff --git a/smart_contracts/contracts/test/gh-4898-regression/src/main.rs b/smart_contracts/contracts/test/gh-4898-regression/src/main.rs new file mode 100644 index 0000000000..a3610e6e64 --- /dev/null +++ b/smart_contracts/contracts/test/gh-4898-regression/src/main.rs @@ -0,0 +1,22 @@ +#![no_std] +#![no_main] + +extern crate alloc; +use alloc::string::String; + +use casper_contract::contract_api::runtime; +use casper_types::Key; + +const ARG_DATA: &str = "data"; + +#[no_mangle] +fn is_key(key_str: &str) -> bool { + Key::from_formatted_str(key_str).is_ok() +} + +#[no_mangle] +pub extern "C" fn call() { + let data: String = runtime::get_named_arg(ARG_DATA); + + assert!(is_key(&data), "Data should be a key"); +} diff --git a/smart_contracts/contracts/test/gh-5058-regression/Cargo.toml b/smart_contracts/contracts/test/gh-5058-regression/Cargo.toml new file mode 100644 index 0000000000..3551e5aeec --- /dev/null +++ b/smart_contracts/contracts/test/gh-5058-regression/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "gh-5058-regression" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "gh_5058_regression" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/gh-5058-regression/src/main.rs b/smart_contracts/contracts/test/gh-5058-regression/src/main.rs new file mode 100644 index 0000000000..7fce5f502a --- /dev/null +++ b/smart_contracts/contracts/test/gh-5058-regression/src/main.rs @@ -0,0 +1,61 @@ +#![no_main] +#![no_std] + +extern crate alloc; + +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; + +use casper_types::{ + runtime_args, system::handle_payment, ApiError, Phase, RuntimeArgs, URef, U512, +}; + +const ARG_AMOUNT: &str = "amount"; + +#[repr(u16)] +enum Error { + InvalidPhase, +} + +impl From for ApiError { + fn from(e: Error) -> Self { + ApiError::User(e as u16) + } +} + +fn get_payment_purse() -> URef { + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ) +} + +fn set_refund_purse(new_refund_purse: URef) { + let args = runtime_args! { + handle_payment::ARG_PURSE => new_refund_purse, + }; + + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_SET_REFUND_PURSE, + args, + ) +} + +#[no_mangle] +pub extern "C" fn call() { + if runtime::get_phase() != Phase::Payment { + runtime::revert(Error::InvalidPhase); + } + + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + let payment_purse = get_payment_purse(); + set_refund_purse(account::get_main_purse()); + + // transfer amount from named purse to payment purse, which will be used to pay for execution + system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/groups/Cargo.toml b/smart_contracts/contracts/test/groups/Cargo.toml index 36759e8a20..65e220cbe8 100644 --- a/smart_contracts/contracts/test/groups/Cargo.toml +++ b/smart_contracts/contracts/test/groups/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "groups" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "groups" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/groups/src/main.rs b/smart_contracts/contracts/test/groups/src/main.rs index e0e2c69180..8493b407c1 100644 --- a/smart_contracts/contracts/test/groups/src/main.rs +++ b/smart_contracts/contracts/test/groups/src/main.rs @@ -4,18 +4,23 @@ #[macro_use] extern crate alloc; -use alloc::{collections::BTreeSet, string::ToString, vec::Vec}; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + string::ToString, + vec::Vec, +}; use casper_contract::{ - contract_api::{runtime, storage}, + contract_api::{account, runtime, storage, system}, unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::{ - EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, NamedKeys, - CONTRACT_INITIAL_VERSION, - }, - runtime_args, CLType, ContractPackageHash, Key, Parameter, RuntimeArgs, URef, + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + contracts::ContractPackageHash, + runtime_args, + system::{handle_payment, standard_payment}, + CLType, CLTyped, EntryPointPayment, Key, NamedKeys, Parameter, RuntimeArgs, URef, + ENTITY_INITIAL_VERSION, U512, }; const PACKAGE_HASH_KEY: &str = "package_hash_key"; @@ -28,6 +33,7 @@ const RESTRICTED_CONTRACT_CALLER_AS_SESSION: &str = "restricted_contract_caller_ const UNCALLABLE_SESSION: &str = "uncallable_session"; const UNCALLABLE_CONTRACT: &str = "uncallable_contract"; const CALL_RESTRICTED_ENTRY_POINTS: &str = "call_restricted_entry_points"; +const RESTRICTED_STANDARD_PAYMENT: &str = "restricted_standard_payment"; const ARG_PACKAGE_HASH: &str = "package_hash"; #[no_mangle] @@ -39,24 +45,25 @@ pub extern "C" fn restricted_contract() {} #[no_mangle] pub extern "C" fn restricted_session_caller() { let package_hash: Key = runtime::get_named_arg(ARG_PACKAGE_HASH); - let contract_version = Some(CONTRACT_INITIAL_VERSION); - let contract_package_hash = package_hash.into_hash().unwrap_or_revert().into(); + let contract_package_hash = package_hash + .into_entity_hash_addr() + .unwrap_or_revert() + .into(); runtime::call_versioned_contract( contract_package_hash, - contract_version, + Some(ENTITY_INITIAL_VERSION), RESTRICTED_SESSION, runtime_args! {}, ) } fn contract_caller() { - let package_hash: Key = runtime::get_named_arg(ARG_PACKAGE_HASH); - let contract_version = Some(CONTRACT_INITIAL_VERSION); - let contract_package_hash = package_hash.into_hash().unwrap_or_revert().into(); + let package_hash: ContractPackageHash = runtime::get_named_arg(ARG_PACKAGE_HASH); + let contract_version = ENTITY_INITIAL_VERSION; let runtime_args = runtime_args! {}; runtime::call_versioned_contract( - contract_package_hash, - contract_version, + package_hash, + Some(contract_version), RESTRICTED_CONTRACT, runtime_args, ) @@ -78,9 +85,26 @@ pub extern "C" fn uncallable_session() {} #[no_mangle] pub extern "C" fn uncallable_contract() {} +fn get_payment_purse() -> URef { + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ) +} + +#[no_mangle] +pub extern "C" fn restricted_standard_payment() { + let amount: U512 = runtime::get_named_arg(standard_payment::ARG_AMOUNT); + + let payment_purse = get_payment_purse(); + system::transfer_from_purse_to_purse(account::get_main_purse(), payment_purse, amount, None) + .unwrap_or_revert(); +} + #[no_mangle] pub extern "C" fn call_restricted_entry_points() { - // We're aggresively removing exports that aren't exposed through contract header so test + // We're aggressively removing exports that aren't exposed through contract header so test // ensures that those exports are still inside WASM. uncallable_session(); uncallable_contract(); @@ -102,43 +126,47 @@ fn create_group(package_hash: ContractPackageHash) -> URef { /// Restricted uref comes from creating a group and will be assigned to a smart contract fn create_entry_points_1() -> EntryPoints { let mut entry_points = EntryPoints::new(); - let restricted_session = EntryPoint::new( + let restricted_session = EntityEntryPoint::new( RESTRICTED_SESSION.to_string(), Vec::new(), CLType::I32, EntryPointAccess::groups(&["Group 1"]), - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(restricted_session); - let restricted_contract = EntryPoint::new( + let restricted_contract = EntityEntryPoint::new( RESTRICTED_CONTRACT.to_string(), Vec::new(), CLType::I32, EntryPointAccess::groups(&["Group 1"]), - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(restricted_contract); - let restricted_session_caller = EntryPoint::new( + let restricted_session_caller = EntityEntryPoint::new( RESTRICTED_SESSION_CALLER.to_string(), vec![Parameter::new(ARG_PACKAGE_HASH, CLType::Key)], CLType::I32, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(restricted_session_caller); - let restricted_contract = EntryPoint::new( + let restricted_contract = EntityEntryPoint::new( RESTRICTED_CONTRACT.to_string(), Vec::new(), CLType::I32, EntryPointAccess::groups(&["Group 1"]), - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(restricted_contract); - let unrestricted_contract_caller = EntryPoint::new( + let unrestricted_contract_caller = EntityEntryPoint::new( UNRESTRICTED_CONTRACT_CALLER.to_string(), Vec::new(), CLType::I32, @@ -147,11 +175,12 @@ fn create_entry_points_1() -> EntryPoints { EntryPointAccess::Public, // NOTE: Public contract authorizes any contract call, because this contract has groups // uref in its named keys - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(unrestricted_contract_caller); - let unrestricted_contract_caller_as_session = EntryPoint::new( + let unrestricted_contract_caller_as_session = EntityEntryPoint::new( RESTRICTED_CONTRACT_CALLER_AS_SESSION.to_string(), Vec::new(), CLType::I32, @@ -160,11 +189,12 @@ fn create_entry_points_1() -> EntryPoints { EntryPointAccess::Public, // NOTE: Public contract authorizes any contract call, because this contract has groups // uref in its named keys - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(unrestricted_contract_caller_as_session); - let uncallable_session = EntryPoint::new( + let uncallable_session = EntityEntryPoint::new( UNCALLABLE_SESSION.to_string(), Vec::new(), CLType::I32, @@ -173,11 +203,12 @@ fn create_entry_points_1() -> EntryPoints { EntryPointAccess::groups(&[]), // NOTE: Public contract authorizes any contract call, because this contract has groups // uref in its named keys - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(uncallable_session); - let uncallable_contract = EntryPoint::new( + let uncallable_contract = EntityEntryPoint::new( UNCALLABLE_CONTRACT.to_string(), Vec::new(), CLType::I32, @@ -186,13 +217,14 @@ fn create_entry_points_1() -> EntryPoints { EntryPointAccess::groups(&[]), // NOTE: Public contract authorizes any contract call, because this contract has groups // uref in its named keys - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(uncallable_contract); // Directly calls entry_points that are protected with empty group of lists to verify that even // though they're not callable externally, they're still visible in the WASM. - let call_restricted_entry_points = EntryPoint::new( + let call_restricted_entry_points = EntityEntryPoint::new( CALL_RESTRICTED_ENTRY_POINTS.to_string(), Vec::new(), CLType::I32, @@ -201,10 +233,24 @@ fn create_entry_points_1() -> EntryPoints { EntryPointAccess::Public, // NOTE: Public contract authorizes any contract call, because this contract has groups // uref in its named keys - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(call_restricted_entry_points); + let restricted_standard_payment = EntityEntryPoint::new( + RESTRICTED_STANDARD_PAYMENT.to_string(), + vec![Parameter::new( + standard_payment::ARG_AMOUNT, + U512::cl_type(), + )], + CLType::Unit, + EntryPointAccess::groups(&["Group 1"]), + EntryPointType::Caller, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(restricted_standard_payment); + entry_points } @@ -219,7 +265,12 @@ fn install_version_1(contract_package_hash: ContractPackageHash, restricted_uref }; let entry_points = create_entry_points_1(); - storage::add_contract_version(contract_package_hash, entry_points, contract_named_keys); + storage::add_contract_version( + contract_package_hash, + entry_points, + contract_named_keys, + BTreeMap::new(), + ); } #[no_mangle] diff --git a/smart_contracts/contracts/test/host-function-costs/Cargo.toml b/smart_contracts/contracts/test/host-function-costs/Cargo.toml index d74f89f95f..2b5c3439cc 100644 --- a/smart_contracts/contracts/test/host-function-costs/Cargo.toml +++ b/smart_contracts/contracts/test/host-function-costs/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "host-function-costs" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "host_function_costs" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/host-function-costs/src/main.rs b/smart_contracts/contracts/test/host-function-costs/src/main.rs index b92b1d31fa..466f98a1c3 100644 --- a/smart_contracts/contracts/test/host-function-costs/src/main.rs +++ b/smart_contracts/contracts/test/host-function-costs/src/main.rs @@ -6,7 +6,7 @@ extern crate alloc; use core::iter; -use alloc::{boxed::Box, string::String, vec::Vec}; +use alloc::{boxed::Box, collections::BTreeMap, string::String, vec::Vec}; use casper_contract::{ contract_api::{account, runtime, storage, system}, @@ -16,8 +16,9 @@ use casper_types::{ account::{AccountHash, ActionType, Weight}, bytesrepr::Bytes, contracts::NamedKeys, - runtime_args, ApiError, BlockTime, CLType, CLTyped, CLValue, EntryPoint, EntryPointAccess, - EntryPointType, EntryPoints, Key, Parameter, Phase, RuntimeArgs, U512, + runtime_args, ApiError, BlockTime, CLType, CLTyped, CLValue, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, Phase, + RuntimeArgs, U512, }; const DO_NOTHING_NAME: &str = "do_nothing"; @@ -201,8 +202,6 @@ pub extern "C" fn account_function() { ) .unwrap_or_revert(); - system::transfer_to_account(DESTINATION_ACCOUNT_HASH, transfer_amount, None).unwrap_or_revert(); - // ========== remaining functions from `runtime` module ======================================== if !runtime::is_valid_uref(main_purse) { @@ -221,7 +220,7 @@ pub extern "C" fn account_function() { #[no_mangle] pub extern "C" fn calls_do_nothing_level1() { let contract_package_hash = runtime::get_key(HASH_KEY_NAME) - .and_then(Key::into_hash) + .and_then(Key::into_package_addr) .expect("should have key") .into(); runtime::call_versioned_contract( @@ -235,7 +234,7 @@ pub extern "C" fn calls_do_nothing_level1() { #[no_mangle] pub extern "C" fn calls_do_nothing_level2() { let contract_package_hash = runtime::get_key(HASH_KEY_NAME) - .and_then(Key::into_hash) + .and_then(Key::into_package_addr) .expect("should have key") .into(); runtime::call_versioned_contract( @@ -248,11 +247,11 @@ pub extern "C" fn calls_do_nothing_level2() { fn measure_arg_size(bytes: usize) { let contract_package_hash = runtime::get_key(HASH_KEY_NAME) - .and_then(Key::into_hash) + .and_then(Key::into_package_addr) .expect("should have key") .into(); - let argument: Vec = iter::repeat(b'1').take(bytes).collect(); + let argument: Vec = iter::repeat_n(b'1', bytes).collect(); runtime::call_versioned_contract::<()>( contract_package_hash, @@ -278,117 +277,130 @@ pub extern "C" fn arg_size_function_call_100() { pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( DO_NOTHING_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( DO_SOMETHING_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( CALLS_DO_NOTHING_LEVEL1_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( CALLS_DO_NOTHING_LEVEL2_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( SHORT_FUNCTION_NAME_1, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( SHORT_FUNCTION_NAME_100, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( LONG_FUNCTION_NAME_1, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( LONG_FUNCTION_NAME_100, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ARG_SIZE_FUNCTION_NAME, vec![Parameter::new(ARG_BYTES, >::cl_type())], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( "account_function", Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( "storage_function", Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ARG_SIZE_FUNCTION_CALL_1_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ARG_SIZE_FUNCTION_CALL_100_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); @@ -397,12 +409,16 @@ pub extern "C" fn call() { let (contract_package_hash, _access_uref) = storage::create_contract_package_at_hash(); - runtime::put_key(&HASH_KEY_NAME, contract_package_hash.into()); + runtime::put_key(HASH_KEY_NAME, contract_package_hash.into()); let mut named_keys = NamedKeys::new(); named_keys.insert(HASH_KEY_NAME.into(), contract_package_hash.into()); - let (contract_hash, _version) = - storage::add_contract_version(contract_package_hash, entry_points, named_keys); - runtime::put_key(&CONTRACT_KEY_NAME, contract_hash.into()); + let (contract_hash, _version) = storage::add_contract_version( + contract_package_hash, + entry_points, + named_keys, + BTreeMap::new(), + ); + runtime::put_key(CONTRACT_KEY_NAME, Key::Hash(contract_hash.value())); } diff --git a/smart_contracts/contracts/test/key-management-thresholds/Cargo.toml b/smart_contracts/contracts/test/key-management-thresholds/Cargo.toml index d4480bbf93..7de4df13c2 100644 --- a/smart_contracts/contracts/test/key-management-thresholds/Cargo.toml +++ b/smart_contracts/contracts/test/key-management-thresholds/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "key-management-thresholds" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "key_management_thresholds" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/key-management-thresholds/src/main.rs b/smart_contracts/contracts/test/key-management-thresholds/src/main.rs index 7049f05b1e..ea95ee6f8d 100644 --- a/smart_contracts/contracts/test/key-management-thresholds/src/main.rs +++ b/smart_contracts/contracts/test/key-management-thresholds/src/main.rs @@ -64,7 +64,7 @@ pub extern "C" fn call() { .unwrap_or_revert(); // Removes [43;32] key created in init stage account::remove_associated_key(AccountHash::new([44; 32])).unwrap_or_revert(); - // Sets action threshodl + // Sets action threshold account::set_action_threshold(ActionType::KeyManagement, Weight::new(100)) .unwrap_or_revert(); } else { diff --git a/smart_contracts/contracts/test/key-putter/Cargo.toml b/smart_contracts/contracts/test/key-putter/Cargo.toml new file mode 100644 index 0000000000..3104e5b254 --- /dev/null +++ b/smart_contracts/contracts/test/key-putter/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "key-putter" +version = "0.1.0" +authors = ["CasperLabs "] +edition = "2021" + +[[bin]] +name = "key_putter" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/key-putter/src/main.rs b/smart_contracts/contracts/test/key-putter/src/main.rs new file mode 100644 index 0000000000..eca797a7cf --- /dev/null +++ b/smart_contracts/contracts/test/key-putter/src/main.rs @@ -0,0 +1,161 @@ +#![no_std] +#![no_main] + +#[cfg(not(target_arch = "wasm32"))] +compile_error!("target arch should be wasm32: compile with '--target wasm32-unknown-unknown'"); + +// This code imports necessary aspects of external crates that we will use in our contract code. +extern crate alloc; +// Importing Rust types. +use alloc::{ + collections::btree_map::BTreeMap, + format, + string::{String, ToString}, + vec::Vec, +}; +// Importing aspects of the Casper platform. +use casper_contract::contract_api::{runtime, storage}; +// Importing specific Casper types. +use casper_types::{ + addressable_entity::{EntityEntryPoint as EntryPoint, EntryPoints}, + bytesrepr::FromBytes, + contracts::NamedKeys, + ApiError, CLType, CLTyped, EntryPointAccess, EntryPointPayment, EntryPointType, Key, URef, +}; +/// Constants for the keys pointing to values stored in the account's named keys. +const CONTRACT_PACKAGE_NAME: &str = "package_name"; +const CONTRACT_ACCESS_UREF: &str = "access_uref"; + +/// Creating constants for the various contract entry points. +const ENTRY_POINT_PUT_KEY: &str = "put_key"; + +/// Constants for the keys pointing to values stored in the contract's named keys. +const CONTRACT_VERSION_KEY: &str = "version"; +const ALL_CONTRACTS_COUNTER: &str = "all_contracts_counter"; +const CONTRACT_KEY: &str = "key_putter"; + +const KEY_PLACEHOLDER: &str = "key_placeholder"; + +#[no_mangle] +fn put_key() { + let named_keys = runtime::list_named_keys(); + let mut number_of_matches = 0; + for key in named_keys.names() { + if key.to_string().starts_with("v_") { + number_of_matches += 1; + } + } + let key = if number_of_matches <= 0 { + "Contract not installed?".to_string() + } else { + format!("v_{number_of_matches}") + }; + let value_to_store = match get_stored_value::(&key) { + Some(value_to_store) => value_to_store, + None => format!("Nothing found under key {key}"), + }; + let value = storage::new_uref(value_to_store); + runtime::put_key(KEY_PLACEHOLDER, value.into()); +} + +pub fn install(contract_version: u32) { + let mut named_keys = NamedKeys::new(); + let key = format!("v_{contract_version}"); + let value = format!("key_putter_v{contract_version}"); + named_keys.insert(key, storage::new_uref(value).into()); + // Create the entry points for this contract. + let mut entry_points = EntryPoints::new(); + + entry_points.add_entry_point(EntryPoint::new( + ENTRY_POINT_PUT_KEY, + Vec::new(), + CLType::I32, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + // Create a new contract package that can be upgraded. + let (stored_contract_hash, contract_version) = storage::new_contract( + entry_points, + Some(named_keys), + Some(CONTRACT_PACKAGE_NAME.to_string()), + Some(CONTRACT_ACCESS_UREF.to_string()), + None, + ); + + // Store the contract version in the context's named keys. + let version_uref = storage::new_uref(contract_version); + runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into()); + + // Create a named key for the contract hash. + runtime::put_key(CONTRACT_KEY, stored_contract_hash.into()); + + let all_contracts_counter_uref = storage::new_uref(contract_version); + runtime::put_key(ALL_CONTRACTS_COUNTER, all_contracts_counter_uref.into()); +} + +pub fn upgrade(contract_version: u32) { + let package_key = runtime::get_key(CONTRACT_PACKAGE_NAME).unwrap(); + let mut named_keys = NamedKeys::new(); + let key = format!("v_{contract_version}"); + let value = format!("key_putter_v{contract_version}"); + named_keys.insert(key, storage::new_uref(value).into()); + // Create the entry points for this contract. + let mut entry_points = EntryPoints::new(); + + entry_points.add_entry_point(EntryPoint::new( + ENTRY_POINT_PUT_KEY, + Vec::new(), + CLType::I32, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + let contract_package_hash = match package_key { + Key::Hash(hash_addr) => hash_addr, + _ => panic!("shouldn't happen"), + }; + let (contract_hash, updated_contract_version) = storage::add_contract_version( + contract_package_hash.into(), + entry_points, + named_keys, + BTreeMap::new(), + ); + let version_uref = storage::new_uref(updated_contract_version); + runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into()); + + // Create a named key for the contract hash. + runtime::put_key(CONTRACT_KEY, contract_hash.into()); + + let all_contracts_counter_uref = storage::new_uref(contract_version); + runtime::put_key(ALL_CONTRACTS_COUNTER, all_contracts_counter_uref.into()); +} + +/// Entry point that executes automatically when a caller installs the contract. +#[no_mangle] +pub extern "C" fn call() { + let package_key = runtime::get_key(CONTRACT_PACKAGE_NAME); + if package_key.is_none() { + //install + install(1); + } else { + let all_contracts_counter = get_stored_value::(ALL_CONTRACTS_COUNTER).unwrap(); + upgrade(all_contracts_counter + 1) + } +} + +/// Reads value from a named key. +pub fn get_stored_value(name: &str) -> Option +where + T: FromBytes + CLTyped, +{ + let uref = get_uref(name); + storage::read(uref).unwrap() +} + +/// Gets [`URef`] under a name. +fn get_uref(name: &str) -> URef { + let key = runtime::get_key(name).ok_or(ApiError::MissingKey).unwrap(); + key.try_into().unwrap() +} diff --git a/smart_contracts/contracts/test/list-authorization-keys/Cargo.toml b/smart_contracts/contracts/test/list-authorization-keys/Cargo.toml new file mode 100644 index 0000000000..febbbc7e7e --- /dev/null +++ b/smart_contracts/contracts/test/list-authorization-keys/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "list-authorization-keys" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "list_authorization_keys" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/list-authorization-keys/src/main.rs b/smart_contracts/contracts/test/list-authorization-keys/src/main.rs new file mode 100644 index 0000000000..b900ad3a02 --- /dev/null +++ b/smart_contracts/contracts/test/list-authorization-keys/src/main.rs @@ -0,0 +1,34 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::collections::BTreeSet; + +use casper_contract::contract_api::runtime; +use casper_types::{account::AccountHash, ApiError}; + +const ARG_EXPECTED_AUTHORIZATION_KEYS: &str = "expected_authorization_keys"; + +#[repr(u16)] +enum UserError { + AssertionFail = 0, +} + +impl From for ApiError { + fn from(error: UserError) -> ApiError { + ApiError::User(error as u16) + } +} + +#[no_mangle] +pub extern "C" fn call() { + let expected_authorized_keys: BTreeSet = + runtime::get_named_arg(ARG_EXPECTED_AUTHORIZATION_KEYS); + + let actual_authorized_keys = runtime::list_authorization_keys(); + + if expected_authorized_keys != actual_authorized_keys { + runtime::revert(UserError::AssertionFail) + } +} diff --git a/smart_contracts/contracts/test/list-named-keys/Cargo.toml b/smart_contracts/contracts/test/list-named-keys/Cargo.toml index 0569fecd1d..a183991f98 100644 --- a/smart_contracts/contracts/test/list-named-keys/Cargo.toml +++ b/smart_contracts/contracts/test/list-named-keys/Cargo.toml @@ -2,7 +2,7 @@ name = "list-named-keys" version = "0.1.0" authors = ["Fraser Hutchison "] -edition = "2018" +edition = "2021" [[bin]] name = "list_named_keys" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/list-named-keys/src/main.rs b/smart_contracts/contracts/test/list-named-keys/src/main.rs index 5cebbb2a79..97a307a492 100644 --- a/smart_contracts/contracts/test/list-named-keys/src/main.rs +++ b/smart_contracts/contracts/test/list-named-keys/src/main.rs @@ -23,15 +23,15 @@ pub extern "C" fn call() { let new_named_keys: NamedKeys = runtime::get_named_arg(ARG_NEW_NAMED_KEYS); let mut expected_named_keys = expected_initial_named_keys; - for (key, value) in new_named_keys { - runtime::put_key(&key, value); - assert!(expected_named_keys.insert(key, value).is_none()); + for (name, key) in new_named_keys.iter() { + runtime::put_key(name, *key); + assert!(expected_named_keys.insert(name.clone(), *key).is_none()); let actual_named_keys = runtime::list_named_keys(); assert_eq!(expected_named_keys, actual_named_keys); } // Remove all named keys and check that removed keys aren't returned in `list_named_keys()`. - let all_key_names: Vec = expected_named_keys.keys().cloned().collect(); + let all_key_names: Vec = expected_named_keys.names().cloned().collect(); for key in all_key_names { runtime::remove_key(&key); assert!(expected_named_keys.remove(&key).is_some()); diff --git a/smart_contracts/contracts/test/load-caller-info/Cargo.toml b/smart_contracts/contracts/test/load-caller-info/Cargo.toml new file mode 100644 index 0000000000..d0fc78483d --- /dev/null +++ b/smart_contracts/contracts/test/load-caller-info/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "load-caller-info" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[[bin]] +name = "load_caller_info" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/load-caller-info/src/main.rs b/smart_contracts/contracts/test/load-caller-info/src/main.rs new file mode 100644 index 0000000000..52887927d9 --- /dev/null +++ b/smart_contracts/contracts/test/load-caller-info/src/main.rs @@ -0,0 +1,88 @@ +#![no_main] +#![no_std] + +extern crate alloc; + +use alloc::{string::ToString, vec}; + +use casper_contract::{ + contract_api::{runtime, runtime::revert, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + AddressableEntityHash, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, +}; + +const PACKAGE_NAME: &str = "load_caller_info_package"; +const CONTRACT_HASH: &str = "load_caller_info_contract_hash"; +const PACKAGE_ACCESS_KEY: &str = "package_access_key"; + +#[no_mangle] +pub extern "C" fn initiator() { + let initiator = runtime::get_call_initiator().unwrap_or_revert(); + runtime::put_key("initiator", Key::URef(storage::new_uref(initiator))) +} + +#[no_mangle] +pub extern "C" fn get_immediate_caller() { + let initiator = runtime::get_immediate_caller().unwrap_or_revert(); + runtime::put_key("immediate", Key::URef(storage::new_uref(initiator))) +} + +#[no_mangle] +pub extern "C" fn get_full_stack() { + let initiator = runtime::get_call_stack(); + if initiator.is_empty() { + revert(ApiError::User(10)) + } + runtime::put_key("full", Key::URef(storage::new_uref(initiator))) +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + let initiator_entry_point = EntityEntryPoint::new( + "initiator".to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let immediate_entry_point = EntityEntryPoint::new( + "get_immediate_caller".to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let full_stack_entry_point = EntityEntryPoint::new( + "get_full_stack".to_string(), + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(initiator_entry_point); + entry_points.add_entry_point(immediate_entry_point); + entry_points.add_entry_point(full_stack_entry_point); + entry_points + }; + + let (contract_hash, _contract_version) = storage::new_contract( + entry_points, + None, + Some(PACKAGE_NAME.to_string()), + Some(PACKAGE_ACCESS_KEY.to_string()), + None, + ); + + runtime::put_key( + CONTRACT_HASH, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/main-purse/Cargo.toml b/smart_contracts/contracts/test/main-purse/Cargo.toml index 81eb575da6..a0879b9084 100644 --- a/smart_contracts/contracts/test/main-purse/Cargo.toml +++ b/smart_contracts/contracts/test/main-purse/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "main-purse" version = "0.1.0" -authors = ["Ed Hastings , Henry Till "] -edition = "2018" +authors = ["Ed Hastings , Henry Till "] +edition = "2021" [[bin]] name = "main_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/main-purse/src/main.rs b/smart_contracts/contracts/test/main-purse/src/main.rs index 9bc05f7270..4c4d791c90 100644 --- a/smart_contracts/contracts/test/main-purse/src/main.rs +++ b/smart_contracts/contracts/test/main-purse/src/main.rs @@ -2,16 +2,26 @@ #![no_main] use casper_contract::contract_api::{account, runtime}; -use casper_types::URef; +use casper_types::{AccessRights, ApiError, URef}; const ARG_PURSE: &str = "purse"; +#[repr(u16)] +enum Error { + MainPurseShouldNotBeWriteable = 1, + MainPurseShouldHaveReadAddRights = 2, +} + #[no_mangle] pub extern "C" fn call() { let known_main_purse: URef = runtime::get_named_arg(ARG_PURSE); let main_purse: URef = account::get_main_purse(); - assert_eq!( - main_purse, known_main_purse, - "main purse was not known purse" - ); + if known_main_purse.is_writeable() { + runtime::revert(ApiError::User(Error::MainPurseShouldNotBeWriteable as u16)) + } + if main_purse.with_access_rights(AccessRights::READ_ADD) != known_main_purse { + runtime::revert(ApiError::User( + Error::MainPurseShouldHaveReadAddRights as u16, + )); + } } diff --git a/smart_contracts/contracts/test/manage-groups/Cargo.toml b/smart_contracts/contracts/test/manage-groups/Cargo.toml index 0adde8838d..ae1264b831 100644 --- a/smart_contracts/contracts/test/manage-groups/Cargo.toml +++ b/smart_contracts/contracts/test/manage-groups/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "manage-groups" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "manage_groups" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/manage-groups/src/main.rs b/smart_contracts/contracts/test/manage-groups/src/main.rs index fdd8d4565d..d6ebf748f2 100644 --- a/smart_contracts/contracts/test/manage-groups/src/main.rs +++ b/smart_contracts/contracts/test/manage-groups/src/main.rs @@ -6,20 +6,24 @@ extern crate alloc; use alloc::{ boxed::Box, - collections::BTreeSet, + collections::{BTreeMap, BTreeSet}, string::{String, ToString}, vec::Vec, }; -use core::{convert::TryInto, iter::FromIterator}; +use core::{convert::TryInto, iter::FromIterator, mem::MaybeUninit}; use casper_contract::{ - contract_api::{runtime, storage}, + contract_api::{self, runtime, storage}, + ext_ffi, unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::{EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, NamedKeys}, - CLType, ContractPackageHash, Key, Parameter, URef, + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + api_error, + bytesrepr::{self, ToBytes}, + contracts::{ContractPackage, ContractPackageHash, NamedKeys}, + ApiError, CLType, EntryPointPayment, Group, Key, Parameter, URef, }; const PACKAGE_HASH_KEY: &str = "package_hash_key"; @@ -32,20 +36,22 @@ const GROUP_NAME_ARG: &str = "group_name"; const UREFS_ARG: &str = "urefs"; const TOTAL_NEW_UREFS_ARG: &str = "total_new_urefs"; const TOTAL_EXISTING_UREFS_ARG: &str = "total_existing_urefs"; +const UREF_INDICES_ARG: &str = "uref_indices"; #[no_mangle] pub extern "C" fn create_group() { - let package_hash_key: ContractPackageHash = runtime::get_key(PACKAGE_HASH_KEY) - .and_then(Key::into_hash) - .unwrap_or_revert() - .into(); + let package_hash_key = + runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15)); + let contract_package_hash = package_hash_key + .into_hash_addr() + .unwrap_or_revert_with(ApiError::User(16)); let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG); let total_urefs: u64 = runtime::get_named_arg(TOTAL_NEW_UREFS_ARG); let total_existing_urefs: u64 = runtime::get_named_arg(TOTAL_EXISTING_UREFS_ARG); let existing_urefs: Vec = (0..total_existing_urefs).map(storage::new_uref).collect(); - let _new_uref = storage::create_contract_user_group( - package_hash_key, + storage::create_contract_user_group( + ContractPackageHash::new(contract_package_hash), &group_name, total_urefs as u8, BTreeSet::from_iter(existing_urefs), @@ -55,46 +61,126 @@ pub extern "C" fn create_group() { #[no_mangle] pub extern "C" fn remove_group() { - let package_hash_key: ContractPackageHash = runtime::get_key(PACKAGE_HASH_KEY) - .and_then(Key::into_hash) - .unwrap_or_revert() - .into(); + let package_hash_key = + runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15)); + let contract_package_hash = package_hash_key + .into_hash_addr() + .unwrap_or_revert_with(ApiError::User(16)); let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG); - storage::remove_contract_user_group(package_hash_key, &group_name).unwrap_or_revert(); + storage::remove_contract_user_group( + ContractPackageHash::new(contract_package_hash), + &group_name, + ) + .unwrap_or_revert(); } #[no_mangle] pub extern "C" fn extend_group_urefs() { - let package_hash_key: ContractPackageHash = runtime::get_key(PACKAGE_HASH_KEY) - .and_then(Key::into_hash) - .unwrap_or_revert() - .into(); + let package_hash_key = + runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15)); + let contract_package_hash = package_hash_key + .into_hash_addr() + .unwrap_or_revert_with(ApiError::User(16)); let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG); let new_urefs_count: u64 = runtime::get_named_arg(TOTAL_NEW_UREFS_ARG); // Provisions additional urefs inside group for _ in 1..=new_urefs_count { - let _new_uref = storage::provision_contract_user_group_uref(package_hash_key, &group_name) - .unwrap_or_revert(); + let _new_uref = storage::provision_contract_user_group_uref( + ContractPackageHash::new(contract_package_hash), + &group_name, + ) + .unwrap_or_revert(); } } +fn read_host_buffer_into(dest: &mut [u8]) -> Result { + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_read_host_buffer(dest.as_mut_ptr(), dest.len(), bytes_written.as_mut_ptr()) + }; + // NOTE: When rewriting below expression as `result_from(ret).map(|_| unsafe { ... })`, and the + // caller ignores the return value, execution of the contract becomes unstable and ultimately + // leads to `Unreachable` error. + api_error::result_from(ret)?; + Ok(unsafe { bytes_written.assume_init() }) +} + +fn read_contract_package( + package_hash: ContractPackageHash, +) -> Result, ApiError> { + let key = Key::from(package_hash); + let (key_ptr, key_size, _bytes) = { + let bytes = key.into_bytes().unwrap_or_revert(); + let ptr = bytes.as_ptr(); + let size = bytes.len(); + (ptr, size, bytes) + }; + + let value_size = { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr()) }; + match api_error::result_from(ret) { + Ok(_) => unsafe { value_size.assume_init() }, + Err(ApiError::ValueNotFound) => return Ok(None), + Err(e) => runtime::revert(e), + } + }; + + let value_bytes = { + let mut dest: Vec = if value_size == 0 { + Vec::new() + } else { + let bytes_non_null_ptr = contract_api::alloc_bytes(value_size); + unsafe { Vec::from_raw_parts(bytes_non_null_ptr.as_ptr(), value_size, value_size) } + }; + read_host_buffer_into(&mut dest)?; + dest + }; + + Ok(Some(bytesrepr::deserialize(value_bytes)?)) +} + #[no_mangle] pub extern "C" fn remove_group_urefs() { - let package_hash_key: ContractPackageHash = runtime::get_key(PACKAGE_HASH_KEY) - .and_then(Key::into_hash) - .unwrap_or_revert() - .into(); + let package_hash_key = + runtime::get_key(PACKAGE_HASH_KEY).unwrap_or_revert_with(ApiError::User(15)); + let contract_package_hash = package_hash_key + .into_hash_addr() + .unwrap_or_revert_with(ApiError::User(16)); let _package_access_key: URef = runtime::get_key(PACKAGE_ACCESS_KEY) .unwrap_or_revert() .try_into() .unwrap(); let group_name: String = runtime::get_named_arg(GROUP_NAME_ARG); - let urefs: Vec = runtime::get_named_arg(UREFS_ARG); + let ordinals: Vec = runtime::get_named_arg(UREF_INDICES_ARG); + + let contract_package: ContractPackage = + read_contract_package(ContractPackageHash::new(contract_package_hash)) + .unwrap_or_revert() + .unwrap_or_revert(); + + let group_urefs = contract_package + .groups() + .get(&Group::new("Group 1")) + .unwrap_or_revert(); + let group_urefs_vec = Vec::from_iter(group_urefs); + + let mut urefs_to_remove = BTreeSet::new(); + for ordinal in ordinals { + urefs_to_remove.insert( + group_urefs_vec + .get(ordinal as usize) + .cloned() + .cloned() + .unwrap_or_revert(), + ); + } + storage::remove_contract_user_group_urefs( - package_hash_key, + ContractPackageHash::new(contract_package_hash), &group_name, - BTreeSet::from_iter(urefs), + urefs_to_remove, ) .unwrap_or_revert(); } @@ -102,7 +188,7 @@ pub extern "C" fn remove_group_urefs() { /// Restricted uref comes from creating a group and will be assigned to a smart contract fn create_entry_points_1() -> EntryPoints { let mut entry_points = EntryPoints::new(); - let restricted_session = EntryPoint::new( + let restricted_session = EntityEntryPoint::new( CREATE_GROUP.to_string(), vec![ Parameter::new(GROUP_NAME_ARG, CLType::String), @@ -111,21 +197,23 @@ fn create_entry_points_1() -> EntryPoints { ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(restricted_session); - let remove_group = EntryPoint::new( + let remove_group = EntityEntryPoint::new( REMOVE_GROUP.to_string(), vec![Parameter::new(GROUP_NAME_ARG, CLType::String)], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(remove_group); let entry_point_name = EXTEND_GROUP_UREFS.to_string(); - let extend_group_urefs = EntryPoint::new( + let extend_group_urefs = EntityEntryPoint::new( entry_point_name, vec![ Parameter::new(GROUP_NAME_ARG, CLType::String), @@ -133,12 +221,13 @@ fn create_entry_points_1() -> EntryPoints { ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(extend_group_urefs); let entry_point_name = REMOVE_GROUP_UREFS.to_string(); - let remove_group_urefs = EntryPoint::new( + let remove_group_urefs = EntityEntryPoint::new( entry_point_name, vec![ Parameter::new(GROUP_NAME_ARG, CLType::String), @@ -146,7 +235,8 @@ fn create_entry_points_1() -> EntryPoints { ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(remove_group_urefs); entry_points @@ -156,7 +246,12 @@ fn install_version_1(package_hash: ContractPackageHash) { let contract_named_keys = NamedKeys::new(); let entry_points = create_entry_points_1(); - storage::add_contract_version(package_hash, entry_points, contract_named_keys); + storage::add_contract_version( + package_hash, + entry_points, + contract_named_keys, + BTreeMap::new(), + ); } #[no_mangle] diff --git a/smart_contracts/contracts/test/measure-gas-subcall/Cargo.toml b/smart_contracts/contracts/test/measure-gas-subcall/Cargo.toml index 18c8a16d4c..f3d05fe2e4 100644 --- a/smart_contracts/contracts/test/measure-gas-subcall/Cargo.toml +++ b/smart_contracts/contracts/test/measure-gas-subcall/Cargo.toml @@ -2,7 +2,7 @@ name = "measure-gas-subcall" version = "0.1.0" authors = ["Bartłomiej Kamiński "] -edition = "2018" +edition = "2021" [[bin]] name = "measure_gas_subcall" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/measure-gas-subcall/src/main.rs b/smart_contracts/contracts/test/measure-gas-subcall/src/main.rs index 15f82debef..be612f0080 100644 --- a/smart_contracts/contracts/test/measure-gas-subcall/src/main.rs +++ b/smart_contracts/contracts/test/measure-gas-subcall/src/main.rs @@ -10,8 +10,9 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::Parameters, ApiError, CLType, CLValue, ContractHash, ContractVersion, EntryPoint, - EntryPointAccess, EntryPointType, EntryPoints, Phase, RuntimeArgs, + addressable_entity::Parameters, contracts::ContractHash, ApiError, CLType, CLValue, + EntityEntryPoint, EntityVersion, EntryPointAccess, EntryPointPayment, EntryPointType, + EntryPoints, Phase, RuntimeArgs, }; const ARG_TARGET: &str = "target_contract"; @@ -35,33 +36,35 @@ pub extern "C" fn noop_ext() { runtime::ret(CLValue::from_t(()).unwrap_or_revert()) } -fn store() -> (ContractHash, ContractVersion) { +fn store() -> (ContractHash, EntityVersion) { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point_1 = EntryPoint::new( + let entry_point_1 = EntityEntryPoint::new( NOOP_EXT, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point_1); - let entry_point_2 = EntryPoint::new( + let entry_point_2 = EntityEntryPoint::new( GET_PHASE_EXT, Parameters::default(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point_2); entry_points }; - storage::new_contract(entry_points, None, None, None) + storage::new_contract(entry_points, None, None, None, None) } #[no_mangle] diff --git a/smart_contracts/contracts/test/mint-purse/Cargo.toml b/smart_contracts/contracts/test/mint-purse/Cargo.toml index 2a3ef6d474..eecf9a9a57 100644 --- a/smart_contracts/contracts/test/mint-purse/Cargo.toml +++ b/smart_contracts/contracts/test/mint-purse/Cargo.toml @@ -2,7 +2,7 @@ name = "mint-purse" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "mint_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/mint-purse/src/main.rs b/smart_contracts/contracts/test/mint-purse/src/main.rs index 6d64819f08..8c93a0083a 100644 --- a/smart_contracts/contracts/test/mint-purse/src/main.rs +++ b/smart_contracts/contracts/test/mint-purse/src/main.rs @@ -5,7 +5,7 @@ use casper_contract::{ contract_api::{runtime, system}, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{runtime_args, system::mint, ApiError, RuntimeArgs, URef, U512}; +use casper_types::{runtime_args, system::mint, ApiError, URef, U512}; const METHOD_MINT: &str = "mint"; const METHOD_BALANCE: &str = "balance"; diff --git a/smart_contracts/contracts/test/mint-transfer-proxy/Cargo.toml b/smart_contracts/contracts/test/mint-transfer-proxy/Cargo.toml new file mode 100644 index 0000000000..1a516a5d42 --- /dev/null +++ b/smart_contracts/contracts/test/mint-transfer-proxy/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "mint-transfer-proxy" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "mint_transfer_proxy" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/mint-transfer-proxy/src/main.rs b/smart_contracts/contracts/test/mint-transfer-proxy/src/main.rs new file mode 100644 index 0000000000..ad4081a8d7 --- /dev/null +++ b/smart_contracts/contracts/test/mint-transfer-proxy/src/main.rs @@ -0,0 +1,41 @@ +#![no_std] +#![no_main] + +// casper_contract is required for it's [global_alloc] as well as handlers (such as panic_handler) +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{account::AccountHash, runtime_args, system::mint, URef, U512}; + +fn mint_transfer( + maybe_to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, +) -> Result<(), mint::Error> { + let args = runtime_args! { + mint::ARG_TO => maybe_to, + mint::ARG_SOURCE => source, + mint::ARG_TARGET => target, + mint::ARG_AMOUNT => amount, + mint::ARG_ID => id, + }; + let mint_hash = system::get_mint(); + runtime::call_contract(mint_hash, mint::METHOD_TRANSFER, args) +} + +fn delegate() { + let to: Option = runtime::get_named_arg("to"); + let amount: U512 = runtime::get_named_arg("amount"); + let main_purse = account::get_main_purse(); + let target_purse = main_purse; + let id: Option = None; + mint_transfer(to, main_purse, target_purse, amount, id).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn call() { + delegate(); +} diff --git a/smart_contracts/contracts/test/multisig-authorization/Cargo.toml b/smart_contracts/contracts/test/multisig-authorization/Cargo.toml new file mode 100644 index 0000000000..7c5629162e --- /dev/null +++ b/smart_contracts/contracts/test/multisig-authorization/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "multisig-authorization" +version = "0.1.0" +authors = ["Michał Papierski for ApiError { + fn from(user_error: UserError) -> Self { + ApiError::User(user_error as u16) + } +} + +/// Checks if at least one of provided authorization keys belongs to a role defined as a slice of +/// `AccountHash`es. +fn has_role_access_to(role_keys: &[AccountHash]) -> bool { + let authorization_keys = runtime::list_authorization_keys(); + let role_b_keys: BTreeSet = role_keys.iter().copied().collect(); + authorization_keys.intersection(&role_b_keys).count() > 0 +} + +#[no_mangle] +pub extern "C" fn entrypoint_a() { + if !has_role_access_to(&ROLE_A_KEYS) { + // None of the authorization keys used to sign this deploy matched ROLE_A + runtime::revert(UserError::PermissionDenied) + } + + // Restricted code +} + +#[no_mangle] +pub extern "C" fn entrypoint_b() { + if !has_role_access_to(&ROLE_B_KEYS) { + // None of the authorization keys used to sign this deploy matched ROLE_B + runtime::revert(UserError::PermissionDenied) + } + + // Restricted code +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = { + let mut entry_points = EntryPoints::new(); + + let entrypoint_a = EntityEntryPoint::new( + ENTRYPOINT_A, + Parameters::default(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let entrypoint_b = EntityEntryPoint::new( + ENTRYPOINT_B, + Parameters::default(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(entrypoint_a); + entry_points.add_entry_point(entrypoint_b); + + entry_points + }; + + let (contract_hash, _version) = storage::new_contract( + entry_points, + None, + Some(CONTRACT_PACKAGE_KEY.to_string()), + Some(ACCESS_KEY.to_string()), + None, + ); + + runtime::put_key( + CONTRACT_KEY, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/named-dictionary-test/Cargo.toml b/smart_contracts/contracts/test/named-dictionary-test/Cargo.toml new file mode 100644 index 0000000000..8a62a9ea98 --- /dev/null +++ b/smart_contracts/contracts/test/named-dictionary-test/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "named-dictionary-test" +version = "0.1.0" +authors = ["Luís Fernando Schultz Xavier da Silveira "] +edition = "2021" + +[[bin]] +name = "named-dictionary-test" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/named-dictionary-test/src/main.rs b/smart_contracts/contracts/test/named-dictionary-test/src/main.rs new file mode 100644 index 0000000000..703aa145a8 --- /dev/null +++ b/smart_contracts/contracts/test/named-dictionary-test/src/main.rs @@ -0,0 +1,55 @@ +#![no_std] +#![no_main] + +extern crate alloc; +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; + +type DictIndex = u8; // Must fit into `usize`. +type KeySeed = u8; +type Value = u8; +const DICTIONARY_NAMES: &[&str] = &[ + "the", "quick", "brown", "fox", "jumps", "over", "the_", "lazy", "dog", +]; + +#[no_mangle] +pub extern "C" fn call() { + let puts: Vec<(DictIndex, KeySeed, Value)> = runtime::get_named_arg("puts"); + + for name in DICTIONARY_NAMES { + let _ = storage::new_dictionary(name).unwrap_or_revert(); + } + + let mut maps: Vec> = (0..DICTIONARY_NAMES.len()) + .map(|_| BTreeMap::new()) + .collect(); + for (dict_index, key_seed, value) in puts { + let dict_index = dict_index as usize; + assert!(dict_index < DICTIONARY_NAMES.len()); + let key = key_seed.to_string(); + assert_eq!( + maps[dict_index].get(&key), + storage::named_dictionary_get(DICTIONARY_NAMES[dict_index], &key) + .unwrap_or_revert() + .as_ref() + ); + storage::named_dictionary_put(DICTIONARY_NAMES[dict_index], &key, value); + maps[dict_index].insert(key, value); + } + + for i in 0..DICTIONARY_NAMES.len() { + for (key, &value) in maps[i].iter() { + assert_eq!( + storage::named_dictionary_get(DICTIONARY_NAMES[i], key).unwrap_or_revert(), + Some(value) + ); + } + } +} diff --git a/smart_contracts/contracts/test/named-keys-stored-call/Cargo.toml b/smart_contracts/contracts/test/named-keys-stored-call/Cargo.toml new file mode 100644 index 0000000000..e274ebaa58 --- /dev/null +++ b/smart_contracts/contracts/test/named-keys-stored-call/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "named-keys-stored-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "named_keys_stored_call" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/named-keys-stored-call/src/main.rs b/smart_contracts/contracts/test/named-keys-stored-call/src/main.rs new file mode 100644 index 0000000000..5cb31c5820 --- /dev/null +++ b/smart_contracts/contracts/test/named-keys-stored-call/src/main.rs @@ -0,0 +1,22 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; + +use casper_contract::{self, contract_api::runtime, unwrap_or_revert::UnwrapOrRevert}; +use casper_types::{Key, RuntimeArgs}; + +const CONTRACT_HASH_NAME: &str = "contract_stored"; + +#[no_mangle] +pub extern "C" fn call() { + let contract_hash = runtime::get_key(CONTRACT_HASH_NAME) + .and_then(Key::into_entity_hash) + .unwrap_or_revert(); + + let entry_point: String = runtime::get_named_arg("entry_point"); + + runtime::call_contract::<()>(contract_hash.into(), &entry_point, RuntimeArgs::default()); +} diff --git a/smart_contracts/contracts/test/named-keys-stored/Cargo.toml b/smart_contracts/contracts/test/named-keys-stored/Cargo.toml new file mode 100644 index 0000000000..c9b806473d --- /dev/null +++ b/smart_contracts/contracts/test/named-keys-stored/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "named-keys-stored" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "named_keys_stored" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/named-keys-stored/src/main.rs b/smart_contracts/contracts/test/named-keys-stored/src/main.rs new file mode 100644 index 0000000000..debfe0866f --- /dev/null +++ b/smart_contracts/contracts/test/named-keys-stored/src/main.rs @@ -0,0 +1,204 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{collections::BTreeMap, string::ToString}; + +use casper_contract::{ + self, + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + addressable_entity::Parameters, ApiError, CLType, EntityEntryPoint, EntryPointAccess, + EntryPointPayment, EntryPointType, EntryPoints, Key, NamedKeys, PackageHash, RuntimeArgs, +}; + +const ENTRY_POINT_CONTRACT: &str = "named_keys_contract"; +const ENTRY_POINT_CONTRACT_TO_CONTRACT: &str = "named_keys_contract_to_contract"; +const ENTRY_POINT_SESSION_TO_SESSION: &str = "named_keys_session_to_session"; +const ENTRY_POINT_SESSION: &str = "named_keys_session"; +const CONTRACT_PACKAGE_HASH_NAME: &str = "contract_package_stored"; +const CONTRACT_HASH_NAME: &str = "contract_stored"; +const CONTRACT_VERSION: &str = "contract_version"; + +#[repr(u16)] +enum Error { + HasWrongNamedKeys, + FoundNamedKey1, + FoundNamedKey2, + FoundNamedKey3, + FoundNamedKey4, + UnexpectedContractValidURef, + UnexpectedAccountValidURef, +} + +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error as u16) + } +} + +#[no_mangle] +pub extern "C" fn named_keys_contract() { + if runtime::get_key("account_named_key_1").is_some() + || runtime::get_key("account_named_key_2").is_some() + || runtime::get_key("account_named_key_3").is_some() + || runtime::get_key("account_named_key_4").is_some() + { + runtime::revert(Error::HasWrongNamedKeys); + } + + if runtime::get_key("named_key_1").is_none() { + runtime::revert(Error::FoundNamedKey1); + } + if runtime::get_key("named_key_2").is_none() { + runtime::revert(Error::FoundNamedKey2); + } + if runtime::get_key("named_key_3").is_none() { + runtime::revert(Error::FoundNamedKey3); + } + let uref_key = runtime::get_key("named_key_4").unwrap_or_revert_with(Error::FoundNamedKey4); + let uref = uref_key.into_uref().unwrap(); + if !runtime::is_valid_uref(uref) { + runtime::revert(Error::UnexpectedContractValidURef); + } +} + +#[no_mangle] +pub extern "C" fn named_keys_session() { + if runtime::get_key("named_key_1").is_some() + || runtime::get_key("named_key_2").is_some() + || runtime::get_key("named_key_3").is_some() + || runtime::get_key("named_key_4").is_some() + { + runtime::revert(Error::HasWrongNamedKeys); + } + + if runtime::get_key("account_named_key_1").is_none() { + runtime::revert(Error::FoundNamedKey1); + } + if runtime::get_key("account_named_key_2").is_none() { + runtime::revert(Error::FoundNamedKey2); + } + if runtime::get_key("account_named_key_3").is_none() { + runtime::revert(Error::FoundNamedKey3); + } + if runtime::get_key("account_named_key_4").is_none() { + runtime::revert(Error::FoundNamedKey4); + } + let uref_key = runtime::get_key("account_named_key_4") + .unwrap_or_revert_with(Error::UnexpectedContractValidURef); + let uref = uref_key.into_uref().unwrap(); + if !runtime::is_valid_uref(uref) { + runtime::revert(Error::UnexpectedAccountValidURef); + } +} + +#[no_mangle] +pub extern "C" fn named_keys_contract_to_contract() { + let package_hash = runtime::get_key(CONTRACT_PACKAGE_HASH_NAME) + .and_then(Key::into_package_addr) + .map(PackageHash::new) + .unwrap_or_revert(); + + runtime::call_versioned_contract::<()>( + package_hash.into(), + None, + ENTRY_POINT_CONTRACT, + RuntimeArgs::default(), + ); +} + +#[no_mangle] +pub extern "C" fn named_keys_session_to_session() { + let package_hash = runtime::get_key(CONTRACT_PACKAGE_HASH_NAME) + .and_then(Key::into_package_addr) + .map(PackageHash::new) + .unwrap_or_revert(); + + runtime::call_versioned_contract::<()>( + package_hash.into(), + None, + ENTRY_POINT_SESSION, + RuntimeArgs::default(), + ); +} + +#[no_mangle] +pub extern "C" fn call() { + runtime::put_key("account_named_key_1", Key::Hash([10; 32])); + runtime::put_key("account_named_key_2", Key::Hash([11; 32])); + runtime::put_key("account_named_key_3", Key::Hash([12; 32])); + let uref = storage::new_uref(()); + runtime::put_key("account_named_key_4", Key::from(uref)); + + let entry_points = { + let mut entry_points = EntryPoints::new(); + let contract_entrypoint = EntityEntryPoint::new( + ENTRY_POINT_CONTRACT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(contract_entrypoint); + let session_entrypoint = EntityEntryPoint::new( + ENTRY_POINT_SESSION.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(session_entrypoint); + let contract_to_contract_entrypoint = EntityEntryPoint::new( + ENTRY_POINT_CONTRACT_TO_CONTRACT.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(contract_to_contract_entrypoint); + let contract_to_contract_entrypoint = EntityEntryPoint::new( + ENTRY_POINT_SESSION_TO_SESSION.to_string(), + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(contract_to_contract_entrypoint); + entry_points + }; + + let (contract_package_hash, _access) = storage::create_contract_package_at_hash(); + + let named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert("named_key_1".to_string(), Key::Hash([1; 32])); + named_keys.insert("named_key_2".to_string(), Key::Hash([2; 32])); + named_keys.insert("named_key_3".to_string(), Key::Hash([3; 32])); + let uref = storage::new_uref(()); + named_keys.insert("named_key_4".to_string(), Key::from(uref)); + named_keys.insert( + CONTRACT_PACKAGE_HASH_NAME.to_string(), + Key::from(contract_package_hash), + ); + named_keys + }; + + let (contract_hash, contract_version) = storage::add_contract_version( + contract_package_hash, + entry_points, + named_keys, + BTreeMap::new(), + ); + + runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); + runtime::put_key(CONTRACT_PACKAGE_HASH_NAME, contract_package_hash.into()); + runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value())); +} diff --git a/smart_contracts/contracts/test/named-keys/Cargo.toml b/smart_contracts/contracts/test/named-keys/Cargo.toml index 37544f0e2f..b66188328e 100644 --- a/smart_contracts/contracts/test/named-keys/Cargo.toml +++ b/smart_contracts/contracts/test/named-keys/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "named-keys" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "named_keys" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/named-keys/src/main.rs b/smart_contracts/contracts/test/named-keys/src/main.rs index e6177f4e2f..e9d0000080 100644 --- a/smart_contracts/contracts/test/named-keys/src/main.rs +++ b/smart_contracts/contracts/test/named-keys/src/main.rs @@ -39,12 +39,11 @@ pub extern "C" fn call() { COMMAND_TEST_READ_UREF1 => { // Read data hidden behind `URef1` uref let hello_world: String = storage::read( - runtime::list_named_keys() + (*runtime::list_named_keys() .get("hello-world") - .expect("Unable to get hello-world") - .clone() - .try_into() - .expect("Unable to convert to uref"), + .expect("Unable to get hello-world")) + .try_into() + .expect("Unable to convert to uref"), ) .expect("Unable to deserialize URef") .expect("Unable to find value"); diff --git a/smart_contracts/contracts/test/new-named-uref/Cargo.toml b/smart_contracts/contracts/test/new-named-uref/Cargo.toml new file mode 100644 index 0000000000..c67e45551e --- /dev/null +++ b/smart_contracts/contracts/test/new-named-uref/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "new-named-uref" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "new_named_uref" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } diff --git a/smart_contracts/contracts/test/new-named-uref/src/main.rs b/smart_contracts/contracts/test/new-named-uref/src/main.rs new file mode 100644 index 0000000000..3d255d1611 --- /dev/null +++ b/smart_contracts/contracts/test/new-named-uref/src/main.rs @@ -0,0 +1,18 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; + +use casper_contract::contract_api::{runtime, storage}; + +const ARG_UREF_NAME: &str = "uref_name"; +const INITIAL_DATA: &str = "bawitdaba"; + +#[no_mangle] +pub extern "C" fn call() { + let uref_name: String = runtime::get_named_arg(ARG_UREF_NAME); + let uref = storage::new_uref(String::from(INITIAL_DATA)); + runtime::put_key(&uref_name, uref.into()); +} diff --git a/smart_contracts/contracts/test/ordered-transforms/Cargo.toml b/smart_contracts/contracts/test/ordered-transforms/Cargo.toml new file mode 100644 index 0000000000..be8086b243 --- /dev/null +++ b/smart_contracts/contracts/test/ordered-transforms/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "ordered-transforms" +version = "0.1.0" +authors = ["Luís Fernando Schultz Xavier da Silveira "] +edition = "2021" + +[[bin]] +name = "ordered-transforms" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/ordered-transforms/src/main.rs b/smart_contracts/contracts/test/ordered-transforms/src/main.rs new file mode 100644 index 0000000000..f052699e09 --- /dev/null +++ b/smart_contracts/contracts/test/ordered-transforms/src/main.rs @@ -0,0 +1,82 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::{string::ToString, vec::Vec}; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + contracts::NamedKeys, AddressableEntityHash, CLType, CLTyped, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, URef, +}; + +#[no_mangle] +pub extern "C" fn call() { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + "perform_operations", + vec![Parameter::new( + "operations", + Vec::<(u8, u32, i32)>::cl_type(), + )], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let n: u32 = runtime::get_named_arg("n"); + let mut named_keys = NamedKeys::new(); + for i in 0..n { + named_keys.insert(format!("uref-{}", i), Key::URef(storage::new_uref(0_i32))); + } + named_keys.insert("n-urefs".to_string(), Key::URef(storage::new_uref(n))); + + let (contract_hash, _contract_version) = + storage::new_locked_contract(entry_points, Some(named_keys), None, None, None); + runtime::put_key( + "ordered-transforms-contract-hash", + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} + +#[no_mangle] +pub extern "C" fn perform_operations() { + // List of operations to be performed by the contract. + // An operation is a tuple (t, i, v) where: + // * `t` is the operation type: 0 for reading, 1 for writing and 2 for adding; + // * `i` is the URef index; + // * `v` is the value to write or add (always zero for reads). + let operations: Vec<(u8, u32, i32)> = runtime::get_named_arg("operations"); + let n: u32 = storage::read(match runtime::get_key("n-urefs").unwrap_or_revert() { + Key::URef(uref) => uref, + _ => panic!("Bad number of URefs."), + }) + .unwrap_or_revert() + .unwrap_or_revert(); + let urefs: Vec = (0..n) + .map( + |i| match runtime::get_key(&format!("uref-{}", i)).unwrap_or_revert() { + Key::URef(uref) => uref, + _ => panic!("Bad URef."), + }, + ) + .collect(); + + for (t, i, v) in operations { + let uref = *urefs.get(i as usize).unwrap_or_revert(); + match t { + 0 => { + let _: Option = storage::read(uref).unwrap_or_revert(); + } + 1 => storage::write(uref, v), + 2 => storage::add(uref, v), + _ => panic!("Bad transform type"), + } + } +} diff --git a/smart_contracts/contracts/test/overwrite-uref-content/Cargo.toml b/smart_contracts/contracts/test/overwrite-uref-content/Cargo.toml index 66c429d58c..eaac1e7885 100644 --- a/smart_contracts/contracts/test/overwrite-uref-content/Cargo.toml +++ b/smart_contracts/contracts/test/overwrite-uref-content/Cargo.toml @@ -2,7 +2,7 @@ name = "overwrite-uref-content" version = "0.1.0" authors = ["Michał Papierski "] -edition = "2018" +edition = "2021" [[bin]] name = "overwrite_uref_content" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/payment-purse-persist/Cargo.toml b/smart_contracts/contracts/test/payment-purse-persist/Cargo.toml new file mode 100644 index 0000000000..b1883115cf --- /dev/null +++ b/smart_contracts/contracts/test/payment-purse-persist/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "payment-purse-persist" +version = "0.1.0" +authors = ["Ed Hastings ", "Michał Papierski "] +edition = "2021" + +[[bin]] +name = "payment_purse_persist" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/payment-purse-persist/src/main.rs b/smart_contracts/contracts/test/payment-purse-persist/src/main.rs new file mode 100644 index 0000000000..c2b31f3706 --- /dev/null +++ b/smart_contracts/contracts/test/payment-purse-persist/src/main.rs @@ -0,0 +1,63 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use casper_contract::contract_api::{runtime, runtime::put_key, system}; +use casper_types::{contracts::ContractPackageHash, runtime_args, ApiError, RuntimeArgs, URef}; + +const GET_PAYMENT_PURSE: &str = "get_payment_purse"; +const THIS_SHOULD_FAIL: &str = "this_should_fail"; + +const ARG_METHOD: &str = "method"; + +/// This logic is intended to be used as SESSION PAYMENT LOGIC +/// It gets the payment purse and attempts and attempts to persist it, +/// which should fail. +#[no_mangle] +pub extern "C" fn call() { + let method: String = runtime::get_named_arg(ARG_METHOD); + + // handle payment contract + let handle_payment_contract_hash = system::get_handle_payment(); + + // get payment purse for current execution + let payment_purse: URef = runtime::call_contract( + handle_payment_contract_hash, + GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ); + + if method == "put_key" { + // attempt to persist the payment purse, which should fail + put_key(THIS_SHOULD_FAIL, payment_purse.into()); + } else if method == "call_contract" { + // attempt to call a contract with the payment purse, which should fail + let _payment_purse: URef = runtime::call_contract( + handle_payment_contract_hash, + GET_PAYMENT_PURSE, + runtime_args! { + "payment_purse" => payment_purse, + }, + ); + + // should never reach here + runtime::revert(ApiError::User(1000)); + } else if method == "call_versioned_contract" { + // attempt to call a versioned contract with the payment purse, which should fail + let _payment_purse: URef = runtime::call_versioned_contract( + ContractPackageHash::new(handle_payment_contract_hash.value()), + None, // Latest + GET_PAYMENT_PURSE, + runtime_args! { + "payment_purse" => payment_purse, + }, + ); + + // should never reach here + runtime::revert(ApiError::User(1001)); + } else { + runtime::revert(ApiError::User(2000)); + } +} diff --git a/smart_contracts/contracts/test/purse-holder-stored-caller/Cargo.toml b/smart_contracts/contracts/test/purse-holder-stored-caller/Cargo.toml index 615c336d26..a7a619c16c 100644 --- a/smart_contracts/contracts/test/purse-holder-stored-caller/Cargo.toml +++ b/smart_contracts/contracts/test/purse-holder-stored-caller/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "purse-holder-stored-caller" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "purse_holder_stored_caller" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/purse-holder-stored-caller/src/main.rs b/smart_contracts/contracts/test/purse-holder-stored-caller/src/main.rs index 2074d774a0..e5294777cf 100644 --- a/smart_contracts/contracts/test/purse-holder-stored-caller/src/main.rs +++ b/smart_contracts/contracts/test/purse-holder-stored-caller/src/main.rs @@ -6,7 +6,7 @@ extern crate alloc; use alloc::string::String; use casper_contract::contract_api::{runtime, storage}; -use casper_types::{runtime_args, ContractHash, RuntimeArgs}; +use casper_types::{runtime_args, AddressableEntityHash, RuntimeArgs}; const METHOD_VERSION: &str = "version"; const HASH_KEY_NAME: &str = "purse_holder"; @@ -19,20 +19,23 @@ pub extern "C" fn call() { match entry_point_name.as_str() { METHOD_VERSION => { - let contract_hash: ContractHash = runtime::get_named_arg(HASH_KEY_NAME); - let version: String = - runtime::call_contract(contract_hash, &entry_point_name, RuntimeArgs::default()); + let contract_hash: AddressableEntityHash = runtime::get_named_arg(HASH_KEY_NAME); + let version: String = runtime::call_contract( + contract_hash.into(), + &entry_point_name, + RuntimeArgs::default(), + ); let version_key = storage::new_uref(version).into(); runtime::put_key(METHOD_VERSION, version_key); } _ => { - let contract_hash: ContractHash = runtime::get_named_arg(HASH_KEY_NAME); + let contract_hash: AddressableEntityHash = runtime::get_named_arg(HASH_KEY_NAME); let purse_name: String = runtime::get_named_arg(PURSE_NAME); let args = runtime_args! { PURSE_NAME => purse_name, }; - runtime::call_contract::<()>(contract_hash, &entry_point_name, args); + runtime::call_contract::<()>(contract_hash.into(), &entry_point_name, args); } }; } diff --git a/smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/Cargo.toml b/smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/Cargo.toml new file mode 100644 index 0000000000..1678b4c75a --- /dev/null +++ b/smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "purse-holder-stored-upgrader-v2-2" +version = "0.1.0" +authors = ["Karan Dhareshwar "] +edition = "2021" + +[[bin]] +name = "purse_holder_stored_upgrader_v2_2" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/src/main.rs b/smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/src/main.rs new file mode 100644 index 0000000000..095b9b51fa --- /dev/null +++ b/smart_contracts/contracts/test/purse-holder-stored-upgrader-v2-2/src/main.rs @@ -0,0 +1,109 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::{collections::BTreeMap, string::String}; + +use casper_contract::{ + contract_api::{runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + contracts::NamedKeys, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, PackageHash, Parameter, URef, +}; + +pub const METHOD_DELEGATE: &str = "delegate"; +pub const METHOD_REMOVE: &str = "remove"; +pub const METHOD_VERSION: &str = "version"; +pub const ARG_PURSE_NAME: &str = "purse_name"; +pub const NEW_VERSION: &str = "1.0.1"; +const VERSION: &str = "version"; +const ACCESS_KEY_NAME: &str = "purse_holder_access"; +const PURSE_HOLDER_STORED_CONTRACT_NAME: &str = "purse_holder_stored"; +const ARG_CONTRACT_PACKAGE: &str = "contract_package"; +const CONTRACT_VERSION: &str = "contract_version"; + +fn purse_name() -> String { + runtime::get_named_arg(ARG_PURSE_NAME) +} + +#[no_mangle] +pub extern "C" fn delegate() { + let purse_name = purse_name(); + let purse = system::create_purse(); + runtime::put_key(&purse_name, purse.into()); +} + +#[no_mangle] +pub extern "C" fn remove() { + let purse_name = purse_name(); + runtime::remove_key(&purse_name); +} + +#[no_mangle] +pub extern "C" fn version() { + runtime::ret(CLValue::from_t(VERSION).unwrap_or_revert()) +} + +#[no_mangle] +pub extern "C" fn call() { + let contract_package: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE); + let _access_key: URef = runtime::get_key(ACCESS_KEY_NAME) + .expect("should have access key") + .into_uref() + .expect("should be uref"); + + let entry_points = { + let mut entry_points = EntryPoints::new(); + let add = EntityEntryPoint::new( + METHOD_DELEGATE, + vec![Parameter::new(ARG_PURSE_NAME, CLType::String)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(add); + let version = EntityEntryPoint::new( + METHOD_VERSION, + vec![], + CLType::String, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(version); + + let remove = EntityEntryPoint::new( + METHOD_REMOVE, + vec![Parameter::new(ARG_PURSE_NAME, CLType::String)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(remove); + entry_points + }; + // this should overwrite the previous contract obj with the new contract obj at the same uref + let (new_contract_hash, new_contract_version) = storage::add_contract_version( + contract_package.into(), + entry_points, + NamedKeys::new(), + BTreeMap::new(), + ); + runtime::put_key( + PURSE_HOLDER_STORED_CONTRACT_NAME, + Key::Hash(new_contract_hash.value()), + ); + runtime::put_key( + CONTRACT_VERSION, + storage::new_uref(new_contract_version).into(), + ); + // set new version + let version_key = storage::new_uref(NEW_VERSION).into(); + runtime::put_key(VERSION, version_key); +} diff --git a/smart_contracts/contracts/test/purse-holder-stored-upgrader/Cargo.toml b/smart_contracts/contracts/test/purse-holder-stored-upgrader/Cargo.toml index f198278295..4ddecf0225 100644 --- a/smart_contracts/contracts/test/purse-holder-stored-upgrader/Cargo.toml +++ b/smart_contracts/contracts/test/purse-holder-stored-upgrader/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "purse-holder-stored-upgrader" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "purse_holder_stored_upgrader" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/purse-holder-stored-upgrader/src/main.rs b/smart_contracts/contracts/test/purse-holder-stored-upgrader/src/main.rs index 5357f507ed..4c1009d11e 100644 --- a/smart_contracts/contracts/test/purse-holder-stored-upgrader/src/main.rs +++ b/smart_contracts/contracts/test/purse-holder-stored-upgrader/src/main.rs @@ -4,15 +4,15 @@ #[macro_use] extern crate alloc; -use alloc::string::String; +use alloc::{collections::BTreeMap, string::String}; use casper_contract::{ contract_api::{runtime, storage, system}, unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::NamedKeys, CLType, CLValue, ContractPackageHash, EntryPoint, EntryPointAccess, - EntryPointType, EntryPoints, Parameter, URef, + contracts::NamedKeys, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, PackageHash, Parameter, URef, }; pub const METHOD_ADD: &str = "add"; @@ -50,7 +50,7 @@ pub extern "C" fn version() { #[no_mangle] pub extern "C" fn call() { - let contract_package: ContractPackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE); + let contract_package: PackageHash = runtime::get_named_arg(ARG_CONTRACT_PACKAGE); let _access_key: URef = runtime::get_key(ACCESS_KEY_NAME) .expect("should have access key") .into_uref() @@ -58,37 +58,47 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let add = EntryPoint::new( + let add = EntityEntryPoint::new( METHOD_ADD, vec![Parameter::new(ARG_PURSE_NAME, CLType::String)], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(add); - let version = EntryPoint::new( + let version = EntityEntryPoint::new( METHOD_VERSION, vec![], CLType::String, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(version); - let remove = EntryPoint::new( + let remove = EntityEntryPoint::new( METHOD_REMOVE, vec![Parameter::new(ARG_PURSE_NAME, CLType::String)], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(remove); entry_points }; // this should overwrite the previous contract obj with the new contract obj at the same uref - let (new_contract_hash, new_contract_version) = - storage::add_contract_version(contract_package, entry_points, NamedKeys::new()); - runtime::put_key(PURSE_HOLDER_STORED_CONTRACT_NAME, new_contract_hash.into()); + let (new_contract_hash, new_contract_version) = storage::add_contract_version( + contract_package.into(), + entry_points, + NamedKeys::new(), + BTreeMap::new(), + ); + runtime::put_key( + PURSE_HOLDER_STORED_CONTRACT_NAME, + Key::Hash(new_contract_hash.value()), + ); runtime::put_key( CONTRACT_VERSION, storage::new_uref(new_contract_version).into(), diff --git a/smart_contracts/contracts/test/purse-holder-stored/Cargo.toml b/smart_contracts/contracts/test/purse-holder-stored/Cargo.toml index b65e940083..efe8dbb1f7 100644 --- a/smart_contracts/contracts/test/purse-holder-stored/Cargo.toml +++ b/smart_contracts/contracts/test/purse-holder-stored/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "purse-holder-stored" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "purse_holder_stored" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/purse-holder-stored/src/main.rs b/smart_contracts/contracts/test/purse-holder-stored/src/main.rs index f47ca96558..878f38e335 100644 --- a/smart_contracts/contracts/test/purse-holder-stored/src/main.rs +++ b/smart_contracts/contracts/test/purse-holder-stored/src/main.rs @@ -12,7 +12,8 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - CLType, CLValue, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + AddressableEntityHash, CLType, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, Parameter, }; pub const METHOD_ADD: &str = "add"; @@ -47,20 +48,22 @@ pub extern "C" fn call() { let is_locked: bool = runtime::get_named_arg(ARG_IS_LOCKED); let entry_points = { let mut entry_points = EntryPoints::new(); - let add = EntryPoint::new( + let add = EntityEntryPoint::new( ENTRY_POINT_ADD.to_string(), vec![Parameter::new(ARG_PURSE, CLType::String)], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(add); - let version = EntryPoint::new( + let version = EntityEntryPoint::new( ENTRY_POINT_VERSION.to_string(), vec![], CLType::String, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(version); entry_points @@ -72,6 +75,7 @@ pub extern "C" fn call() { None, Some(HASH_KEY_NAME.to_string()), Some(ACCESS_KEY_NAME.to_string()), + None, ) } else { storage::new_locked_contract( @@ -79,10 +83,14 @@ pub extern "C" fn call() { None, Some(HASH_KEY_NAME.to_string()), Some(ACCESS_KEY_NAME.to_string()), + None, ) }; runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(PURSE_HOLDER_STORED_CONTRACT_NAME, contract_hash.into()); + runtime::put_key( + PURSE_HOLDER_STORED_CONTRACT_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); runtime::put_key(ENTRY_POINT_VERSION, storage::new_uref(VERSION).into()); } diff --git a/smart_contracts/contracts/test/random-bytes-payment/Cargo.toml b/smart_contracts/contracts/test/random-bytes-payment/Cargo.toml new file mode 100644 index 0000000000..8d6d9f324f --- /dev/null +++ b/smart_contracts/contracts/test/random-bytes-payment/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "random-bytes-payment" +version = "0.1.0" +authors = ["Rafał Chabowski "] +edition = "2021" + +[[bin]] +name = "random_bytes_payment" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/random-bytes-payment/src/main.rs b/smart_contracts/contracts/test/random-bytes-payment/src/main.rs new file mode 100644 index 0000000000..bd7d83021a --- /dev/null +++ b/smart_contracts/contracts/test/random-bytes-payment/src/main.rs @@ -0,0 +1,44 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{Phase, RuntimeArgs, URef, U512}; + +const GET_PAYMENT_PURSE: &str = "get_payment_purse"; +const ARG_AMOUNT: &str = "amount"; + +fn standard_payment(amount: U512) { + let main_purse = account::get_main_purse(); + + let handle_payment_pointer = system::get_handle_payment(); + + let payment_purse: URef = runtime::call_contract( + handle_payment_pointer, + GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ); + + system::transfer_from_purse_to_purse(main_purse, payment_purse, amount, None).unwrap_or_revert() +} + +const RANDOM_BYTES_PAYMENT_RESULT: &str = "random_bytes_payment_result"; + +#[no_mangle] +pub extern "C" fn call() { + let get_phase = runtime::get_phase(); + assert_eq!( + Phase::Payment, + get_phase, + "should only be invoked in payment phase" + ); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + let random_bytes = runtime::random_bytes(); + let uref = storage::new_uref(random_bytes); + runtime::put_key(RANDOM_BYTES_PAYMENT_RESULT, uref.into()); + + standard_payment(amount); +} diff --git a/smart_contracts/contracts/test/random-bytes/Cargo.toml b/smart_contracts/contracts/test/random-bytes/Cargo.toml new file mode 100644 index 0000000000..eb252f238f --- /dev/null +++ b/smart_contracts/contracts/test/random-bytes/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "random-bytes" +version = "0.1.0" +authors = ["Rafał Chabowski "] +edition = "2021" + +[[bin]] +name = "random_bytes" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/random-bytes/src/main.rs b/smart_contracts/contracts/test/random-bytes/src/main.rs new file mode 100644 index 0000000000..d282936448 --- /dev/null +++ b/smart_contracts/contracts/test/random-bytes/src/main.rs @@ -0,0 +1,21 @@ +#![no_std] +#![no_main] + +use casper_contract::contract_api::{runtime, storage}; +use casper_types::Phase; + +const RANDOM_BYTES_RESULT: &str = "random_bytes_result"; + +#[no_mangle] +pub extern "C" fn call() { + let get_phase = runtime::get_phase(); + assert_ne!( + Phase::Payment, + get_phase, + "should not be invoked in payment phase" + ); + + let random_bytes = runtime::random_bytes(); + let uref = storage::new_uref(random_bytes); + runtime::put_key(RANDOM_BYTES_RESULT, uref.into()) +} diff --git a/smart_contracts/contracts/test/read-from-key/Cargo.toml b/smart_contracts/contracts/test/read-from-key/Cargo.toml new file mode 100644 index 0000000000..6c85495bff --- /dev/null +++ b/smart_contracts/contracts/test/read-from-key/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "read-from-key" +version = "0.1.0" +edition = "2018" + +[[bin]] +name = "read_from_key" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } \ No newline at end of file diff --git a/smart_contracts/contracts/test/read-from-key/src/main.rs b/smart_contracts/contracts/test/read-from-key/src/main.rs new file mode 100644 index 0000000000..9c46f58364 --- /dev/null +++ b/smart_contracts/contracts/test/read-from-key/src/main.rs @@ -0,0 +1,40 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::{String, ToString}; + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ApiError, Key}; + +const DICTIONARY_NAME: &str = "dictionary-name"; +const DICTIONARY_ITEM_KEY: &str = "dictionary-item-key"; +const DICTIONARY_VALUE: &str = "dictionary-value"; + +#[no_mangle] +pub extern "C" fn call() { + let dictionary_seed_uref = storage::new_dictionary(DICTIONARY_NAME).unwrap_or_revert(); + storage::dictionary_put( + dictionary_seed_uref, + DICTIONARY_ITEM_KEY, + DICTIONARY_VALUE.to_string(), + ); + let dictionary_address_key = + Key::dictionary(dictionary_seed_uref, DICTIONARY_ITEM_KEY.as_bytes()); + let value_via_read = storage::read_from_key::(dictionary_address_key) + .unwrap_or_revert() + .unwrap_or_revert(); + let value_via_get: String = storage::dictionary_get(dictionary_seed_uref, DICTIONARY_ITEM_KEY) + .unwrap_or_revert() + .unwrap_or_revert(); + if value_via_read != *DICTIONARY_VALUE { + runtime::revert(ApiError::User(16u16)) + } + if value_via_get != value_via_read { + runtime::revert(ApiError::User(17u16)) + } +} diff --git a/smart_contracts/contracts/test/recover-secp256k1/Cargo.toml b/smart_contracts/contracts/test/recover-secp256k1/Cargo.toml new file mode 100644 index 0000000000..7c55642f35 --- /dev/null +++ b/smart_contracts/contracts/test/recover-secp256k1/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "recover-secp256k1" +version = "0.1.0" +authors = ["Igor Bunar "] +edition = "2021" + +[[bin]] +name = "recover_secp256k1" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/recover-secp256k1/src/main.rs b/smart_contracts/contracts/test/recover-secp256k1/src/main.rs new file mode 100644 index 0000000000..d885ba6fc0 --- /dev/null +++ b/smart_contracts/contracts/test/recover-secp256k1/src/main.rs @@ -0,0 +1,32 @@ +#![no_std] +#![no_main] + +extern crate alloc; +use alloc::string::String; +use casper_contract::{ + contract_api::{cryptography, runtime}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + bytesrepr::{Bytes, FromBytes}, + PublicKey, Signature, +}; + +const ARG_MESSAGE: &str = "message"; +const ARG_SIGNATURE_BYTES: &str = "signature_bytes"; +const ARG_RECOVERY_ID: &str = "recovery_id"; +const ARG_EXPECTED: &str = "expected"; + +#[no_mangle] +pub extern "C" fn call() { + let message: String = runtime::get_named_arg(ARG_MESSAGE); + let signature_bytes: Bytes = runtime::get_named_arg(ARG_SIGNATURE_BYTES); + let recovery_id: u8 = runtime::get_named_arg(ARG_RECOVERY_ID); + let expected: PublicKey = runtime::get_named_arg(ARG_EXPECTED); + + let (signature, _) = Signature::from_bytes(&signature_bytes).unwrap(); + let recovered_pk = cryptography::recover_secp256k1(message.as_bytes(), &signature, recovery_id) + .unwrap_or_revert(); + + assert_eq!(recovered_pk, expected, "PublicKey mismatch"); +} diff --git a/smart_contracts/contracts/test/refund-purse/Cargo.toml b/smart_contracts/contracts/test/refund-purse/Cargo.toml index ef6ef24f31..679a21336a 100644 --- a/smart_contracts/contracts/test/refund-purse/Cargo.toml +++ b/smart_contracts/contracts/test/refund-purse/Cargo.toml @@ -2,7 +2,7 @@ name = "refund-purse" version = "0.1.0" authors = ["Michael Birch "] -edition = "2018" +edition = "2021" [[bin]] name = "refund_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/refund-purse/src/main.rs b/smart_contracts/contracts/test/refund-purse/src/main.rs index 0bdaa5ddc8..749bb16b9a 100644 --- a/smart_contracts/contracts/test/refund-purse/src/main.rs +++ b/smart_contracts/contracts/test/refund-purse/src/main.rs @@ -1,11 +1,15 @@ #![no_std] #![no_main] +extern crate alloc; + +use alloc::string::String; + use casper_contract::{ contract_api::{account, runtime, system}, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{runtime_args, ApiError, ContractHash, RuntimeArgs, URef, U512}; +use casper_types::{contracts::ContractHash, runtime_args, ApiError, URef, U512}; #[repr(u16)] enum Error { @@ -20,6 +24,8 @@ const ARG_PAYMENT_AMOUNT: &str = "payment_amount"; const SET_REFUND_PURSE: &str = "set_refund_purse"; const GET_REFUND_PURSE: &str = "get_refund_purse"; const GET_PAYMENT_PURSE: &str = "get_payment_purse"; +const ARG_PURSE_NAME_1: &str = "purse_name_1"; +const ARG_PURSE_NAME_2: &str = "purse_name_2"; fn set_refund_purse(contract_hash: ContractHash, p: &URef) { runtime::call_contract( @@ -49,7 +55,13 @@ fn submit_payment(handle_payment: ContractHash, amount: U512) { pub extern "C" fn call() { let handle_payment = system::get_handle_payment(); - let refund_purse = system::create_purse(); + let refund_purse_name_1: String = runtime::get_named_arg(ARG_PURSE_NAME_1); + let refund_purse_name_2: String = runtime::get_named_arg(ARG_PURSE_NAME_2); + + let refund_purse_1 = runtime::get_key(&refund_purse_name_1) + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); { // get_refund_purse should return None before setting it let refund_result = get_refund_purse(handle_payment); @@ -58,10 +70,10 @@ pub extern "C" fn call() { } // it should return Some(x) after calling set_refund_purse(x) - set_refund_purse(handle_payment, &refund_purse); + set_refund_purse(handle_payment, &refund_purse_1); let refund_purse = match get_refund_purse(handle_payment) { None => runtime::revert(ApiError::User(Error::NotFound as u16)), - Some(x) if x.addr() == refund_purse.addr() => x, + Some(x) if x.addr() == refund_purse_1.addr() => x, Some(_) => runtime::revert(ApiError::User(Error::Invalid as u16)), }; @@ -71,12 +83,15 @@ pub extern "C" fn call() { } } { - let refund_purse = system::create_purse(); + let refund_purse_2 = runtime::get_key(&refund_purse_name_2) + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); // get_refund_purse should return correct value after setting a second time - set_refund_purse(handle_payment, &refund_purse); + set_refund_purse(handle_payment, &refund_purse_2); match get_refund_purse(handle_payment) { None => runtime::revert(ApiError::User(Error::NotFound as u16)), - Some(uref) if uref.addr() == refund_purse.addr() => (), + Some(uref) if uref.addr() == refund_purse_2.addr() => (), Some(_) => runtime::revert(ApiError::User(Error::Invalid as u16)), } diff --git a/smart_contracts/contracts/test/regression-20210707/Cargo.toml b/smart_contracts/contracts/test/regression-20210707/Cargo.toml new file mode 100644 index 0000000000..9bb499ba66 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20210707/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20210707" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20210707" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20210707/src/main.rs b/smart_contracts/contracts/test/regression-20210707/src/main.rs new file mode 100644 index 0000000000..ed98ca86b3 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20210707/src/main.rs @@ -0,0 +1,227 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::string::ToString; + +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + account::AccountHash, + contracts::NamedKeys, + runtime_args, + system::{handle_payment, mint}, + AccessRights, AddressableEntityHash, CLType, CLTyped, EntityEntryPoint, EntryPointAccess, + EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, RuntimeArgs, URef, U512, +}; + +const HARDCODED_UREF: URef = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + +const PACKAGE_HASH_NAME: &str = "package_hash_name"; +const ACCESS_UREF_NAME: &str = "uref_name"; +const CONTRACT_HASH_NAME: &str = "contract_hash"; + +const ARG_SOURCE: &str = "source"; +const ARG_RECIPIENT: &str = "recipient"; +const ARG_AMOUNT: &str = "amount"; +const ARG_TARGET: &str = "target"; + +const METHOD_SEND_TO_ACCOUNT: &str = "send_to_account"; +const METHOD_SEND_TO_PURSE: &str = "send_to_purse"; +const METHOD_HARDCODED_PURSE_SRC: &str = "hardcoded_purse_src"; +const METHOD_STORED_PAYMENT: &str = "stored_payment"; +const METHOD_HARDCODED_PAYMENT: &str = "hardcoded_payment"; + +pub fn get_payment_purse() -> URef { + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ) +} + +pub fn set_refund_purse(refund_purse: URef) { + let args = runtime_args! { + mint::ARG_PURSE => refund_purse, + }; + runtime::call_contract( + system::get_handle_payment(), + handle_payment::METHOD_SET_REFUND_PURSE, + args, + ) +} + +#[no_mangle] +pub extern "C" fn send_to_account() { + let source = runtime::get_key("purse") + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + let recipient: AccountHash = runtime::get_named_arg(ARG_RECIPIENT); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + system::transfer_from_purse_to_account(source, recipient, amount, None).unwrap(); +} + +#[no_mangle] +pub extern "C" fn send_to_purse() { + let source = runtime::get_key("purse") + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + let target: URef = runtime::get_named_arg(ARG_TARGET); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + system::transfer_from_purse_to_purse(source, target, amount, None).unwrap(); +} + +#[no_mangle] +pub extern "C" fn hardcoded_purse_src() { + let source = HARDCODED_UREF; + let target = runtime::get_key("purse") + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + system::transfer_from_purse_to_purse(source, target, amount, None).unwrap(); +} + +#[no_mangle] +pub extern "C" fn stored_payment() { + // Refund purse + let refund_purse: URef = runtime::get_key("purse") + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + // Who will be charged + let source: URef = runtime::get_named_arg(ARG_SOURCE); + // How much to pay for execution + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + // set refund purse to specified purse + set_refund_purse(refund_purse); + + // get payment purse for current execution + let payment_purse: URef = get_payment_purse(); + + // transfer amount from named purse to payment purse, which will be used to pay for execution + system::transfer_from_purse_to_purse(source, payment_purse, amount, None).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn hardcoded_payment() { + // Refund purse + let refund_purse: URef = runtime::get_key("purse") + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + // Who will be charged + let source: URef = HARDCODED_UREF; + // How much to pay for execution + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + // set refund purse to specified purse + set_refund_purse(refund_purse); + + // get payment purse for current execution + let payment_purse: URef = get_payment_purse(); + + // transfer amount from named purse to payment purse, which will be used to pay for execution + system::transfer_from_purse_to_purse(source, payment_purse, amount, None).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn call() { + let mut entry_points = EntryPoints::new(); + + let send_to_account = EntityEntryPoint::new( + METHOD_SEND_TO_ACCOUNT, + vec![ + Parameter::new(ARG_SOURCE, URef::cl_type()), + Parameter::new(ARG_RECIPIENT, AccountHash::cl_type()), + Parameter::new(ARG_AMOUNT, CLType::U512), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let send_to_purse = EntityEntryPoint::new( + METHOD_SEND_TO_PURSE, + vec![ + Parameter::new(ARG_SOURCE, URef::cl_type()), + Parameter::new(ARG_TARGET, URef::cl_type()), + Parameter::new(ARG_AMOUNT, CLType::U512), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let hardcoded_src = EntityEntryPoint::new( + METHOD_HARDCODED_PURSE_SRC, + vec![ + Parameter::new(ARG_TARGET, URef::cl_type()), + Parameter::new(ARG_AMOUNT, CLType::U512), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let stored_payment = EntityEntryPoint::new( + METHOD_STORED_PAYMENT, + vec![ + Parameter::new(ARG_SOURCE, URef::cl_type()), + Parameter::new(ARG_AMOUNT, CLType::U512), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let hardcoded_payment = EntityEntryPoint::new( + METHOD_HARDCODED_PAYMENT, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(send_to_account); + entry_points.add_entry_point(send_to_purse); + entry_points.add_entry_point(hardcoded_src); + entry_points.add_entry_point(stored_payment); + entry_points.add_entry_point(hardcoded_payment); + + let amount: U512 = runtime::get_named_arg("amount"); + + let named_keys = { + let purse = system::create_purse(); + system::transfer_from_purse_to_purse(account::get_main_purse(), purse, amount, None) + .unwrap_or_revert(); + + let mut named_keys = NamedKeys::new(); + named_keys.insert("purse".to_string(), purse.into()); + named_keys + }; + + let (contract_hash, _version) = storage::new_contract( + entry_points, + Some(named_keys), + Some(PACKAGE_HASH_NAME.to_string()), + Some(ACCESS_UREF_NAME.to_string()), + None, + ); + runtime::put_key( + CONTRACT_HASH_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/regression-20210831/Cargo.toml b/smart_contracts/contracts/test/regression-20210831/Cargo.toml new file mode 100644 index 0000000000..0a07608941 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20210831/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20210831" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20210831" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20210831/src/main.rs b/smart_contracts/contracts/test/regression-20210831/src/main.rs new file mode 100644 index 0000000000..c1428ce103 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20210831/src/main.rs @@ -0,0 +1,324 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::{collections::BTreeMap, string::ToString}; + +use casper_contract::{ + contract_api::{runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + bytesrepr::FromBytes, + contracts::{ContractPackageHash, NamedKeys}, + runtime_args, + system::auction::{self, DelegationRate}, + CLType, CLTyped, CLValue, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, Parameter, PublicKey, RuntimeArgs, U512, +}; + +const METHOD_ADD_BID_PROXY_CALL_1: &str = "add_bid_proxy_call_1"; +const METHOD_ADD_BID_PROXY_CALL: &str = "add_bid_proxy_call"; + +const METHOD_WITHDRAW_PROXY_CALL: &str = "withdraw_proxy_call"; +const METHOD_WITHDRAW_PROXY_CALL_1: &str = "withdraw_proxy_call_1"; + +const METHOD_DELEGATE_PROXY_CALL: &str = "delegate_proxy_call"; +const METHOD_DELEGATE_PROXY_CALL_1: &str = "delegate_proxy_call_1"; + +const METHOD_UNDELEGATE_PROXY_CALL: &str = "undelegate_proxy_call"; +const METHOD_UNDELEGATE_PROXY_CALL_1: &str = "undelegate_proxy_call_1"; + +const METHOD_ACTIVATE_BID_CALL: &str = "activate_bid_proxy_call"; +const METHOD_ACTIVATE_BID_CALL_1: &str = "activate_bid_proxy_call_1"; + +const PACKAGE_HASH_NAME: &str = "package_hash_name"; +const ACCESS_UREF_NAME: &str = "uref_name"; +const CONTRACT_HASH_NAME: &str = "contract_hash"; + +fn forwarded_add_bid_args() -> RuntimeArgs { + let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY); + let delegation_rate: DelegationRate = runtime::get_named_arg(auction::ARG_DELEGATION_RATE); + let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT); + + runtime_args! { + auction::ARG_PUBLIC_KEY => public_key, + auction::ARG_DELEGATION_RATE => delegation_rate, + auction::ARG_AMOUNT => amount, + } +} + +fn forwarded_withdraw_bid_args() -> RuntimeArgs { + let public_key: PublicKey = runtime::get_named_arg(auction::ARG_PUBLIC_KEY); + let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT); + + runtime_args! { + auction::ARG_PUBLIC_KEY => public_key, + auction::ARG_AMOUNT => amount, + } +} + +fn forwarded_delegate_args() -> RuntimeArgs { + let delegator: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR); + let validator: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR); + let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT); + + runtime_args! { + auction::ARG_DELEGATOR => delegator, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount, + } +} + +fn forwarded_undelegate_args() -> RuntimeArgs { + let delegator: PublicKey = runtime::get_named_arg(auction::ARG_DELEGATOR); + let validator: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR); + let amount: U512 = runtime::get_named_arg(auction::ARG_AMOUNT); + + runtime_args! { + auction::ARG_DELEGATOR => delegator, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount, + } +} + +fn forwarded_activate_bid_args() -> RuntimeArgs { + let validator_public_key: PublicKey = runtime::get_named_arg(auction::ARG_VALIDATOR); + + runtime_args! { + auction::ARG_VALIDATOR => validator_public_key, + } +} + +#[no_mangle] +pub extern "C" fn withdraw_proxy_call_1() { + let auction_contract_hash = system::get_auction(); + + let withdraw_bid_args = forwarded_withdraw_bid_args(); + + let result: U512 = runtime::call_contract( + auction_contract_hash, + auction::METHOD_WITHDRAW_BID, + withdraw_bid_args, + ); + + runtime::ret(CLValue::from_t(result).unwrap_or_revert()); +} + +fn forward_call_to_this(entry_point: &str, runtime_args: RuntimeArgs) -> T { + let this = runtime::get_key(PACKAGE_HASH_NAME) + .and_then(Key::into_package_addr) + .map(ContractPackageHash::new) + .unwrap_or_revert(); + runtime::call_versioned_contract(this, None, entry_point, runtime_args) +} + +fn call_auction(entry_point: &str, args: RuntimeArgs) -> T { + runtime::call_contract(system::get_auction(), entry_point, args) +} + +#[no_mangle] +pub extern "C" fn add_bid_proxy_call() { + forward_call_to_this(METHOD_ADD_BID_PROXY_CALL_1, forwarded_add_bid_args()) +} + +#[no_mangle] +pub extern "C" fn add_bid_proxy_call_1() { + let _result: U512 = call_auction(auction::METHOD_ADD_BID, forwarded_add_bid_args()); +} + +#[no_mangle] +pub extern "C" fn withdraw_proxy_call() { + let _result: U512 = + forward_call_to_this(METHOD_WITHDRAW_PROXY_CALL_1, forwarded_withdraw_bid_args()); +} + +#[no_mangle] +pub extern "C" fn delegate_proxy_call_1() { + let result: U512 = call_auction(auction::METHOD_DELEGATE, forwarded_delegate_args()); + runtime::ret(CLValue::from_t(result).unwrap_or_revert()); +} + +#[no_mangle] +pub extern "C" fn delegate_proxy_call() { + let _result: U512 = + forward_call_to_this(METHOD_DELEGATE_PROXY_CALL_1, forwarded_delegate_args()); +} + +#[no_mangle] +pub extern "C" fn undelegate_proxy_call_1() { + let result: U512 = call_auction(auction::METHOD_UNDELEGATE, forwarded_undelegate_args()); + runtime::ret(CLValue::from_t(result).unwrap_or_revert()); +} + +#[no_mangle] +pub extern "C" fn undelegate_proxy_call() { + let _result: U512 = + forward_call_to_this(METHOD_UNDELEGATE_PROXY_CALL_1, forwarded_undelegate_args()); +} + +#[no_mangle] +pub extern "C" fn activate_bid_proxy_call_1() { + call_auction::<()>(auction::METHOD_ACTIVATE_BID, forwarded_activate_bid_args()); +} + +#[no_mangle] +pub extern "C" fn activate_bid_proxy_call() { + forward_call_to_this(METHOD_ACTIVATE_BID_CALL_1, forwarded_activate_bid_args()) +} + +#[no_mangle] +pub extern "C" fn call() { + let mut entry_points = EntryPoints::new(); + + let add_bid_proxy_call_1 = EntityEntryPoint::new( + METHOD_ADD_BID_PROXY_CALL_1, + vec![ + Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(auction::ARG_DELEGATION_RATE, DelegationRate::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(add_bid_proxy_call_1); + + let add_bid_proxy_call = EntityEntryPoint::new( + METHOD_ADD_BID_PROXY_CALL, + vec![ + Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(auction::ARG_DELEGATION_RATE, DelegationRate::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(add_bid_proxy_call); + + let withdraw_proxy_call_1 = EntityEntryPoint::new( + METHOD_WITHDRAW_PROXY_CALL_1, + vec![ + Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let withdraw_proxy_call = EntityEntryPoint::new( + METHOD_WITHDRAW_PROXY_CALL, + vec![ + Parameter::new(auction::ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let delegate_proxy_call = EntityEntryPoint::new( + METHOD_DELEGATE_PROXY_CALL, + vec![ + Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let delegate_proxy_call_1 = EntityEntryPoint::new( + METHOD_DELEGATE_PROXY_CALL_1, + vec![ + Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let undelegate_proxy_call = EntityEntryPoint::new( + METHOD_UNDELEGATE_PROXY_CALL, + vec![ + Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let undelegate_proxy_call_1 = EntityEntryPoint::new( + METHOD_UNDELEGATE_PROXY_CALL_1, + vec![ + Parameter::new(auction::ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(auction::ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + let activate_bid_proxy_call = EntityEntryPoint::new( + METHOD_ACTIVATE_BID_CALL, + vec![Parameter::new(auction::ARG_VALIDATOR, CLType::PublicKey)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + let activate_bid_proxy_call_1 = EntityEntryPoint::new( + METHOD_ACTIVATE_BID_CALL_1, + vec![Parameter::new(auction::ARG_VALIDATOR, CLType::PublicKey)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + entry_points.add_entry_point(withdraw_proxy_call); + entry_points.add_entry_point(withdraw_proxy_call_1); + + entry_points.add_entry_point(delegate_proxy_call); + entry_points.add_entry_point(delegate_proxy_call_1); + + entry_points.add_entry_point(undelegate_proxy_call); + entry_points.add_entry_point(undelegate_proxy_call_1); + + entry_points.add_entry_point(activate_bid_proxy_call); + entry_points.add_entry_point(activate_bid_proxy_call_1); + + let (contract_package_hash, access_uref) = storage::create_contract_package_at_hash(); + + // runtime::put_key(PACKAGE_HASH_NAME, contract_package_hash); + runtime::put_key(ACCESS_UREF_NAME, access_uref.into()); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(PACKAGE_HASH_NAME.to_string(), contract_package_hash.into()); + + let (contract_hash, _version) = storage::add_contract_version( + contract_package_hash, + entry_points, + named_keys, + BTreeMap::new(), + ); + runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value())); +} diff --git a/smart_contracts/contracts/test/regression-20220204-call/Cargo.toml b/smart_contracts/contracts/test/regression-20220204-call/Cargo.toml new file mode 100644 index 0000000000..c249b8ecf6 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220204-call/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220204-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220204_call" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220204-call/src/main.rs b/smart_contracts/contracts/test/regression-20220204-call/src/main.rs new file mode 100644 index 0000000000..98d9a68dce --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220204-call/src/main.rs @@ -0,0 +1,40 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; + +use casper_contract::{ + contract_api::{account, runtime}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{contracts::ContractHash, runtime_args, AccessRights}; + +const ARG_PURSE: &str = "purse"; +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; +const ARG_ENTRYPOINT: &str = "entrypoint"; + +#[no_mangle] +pub extern "C" fn call() { + let new_access_rights: AccessRights = runtime::get_named_arg("new_access_rights"); + + let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT); + + let contract_hash_key = runtime::get_key(CONTRACT_HASH_NAME).unwrap_or_revert(); + + let contract_hash = contract_hash_key + .into_entity_hash_addr() + .map(ContractHash::new) + .unwrap_or_revert(); + + let main_purse_modified = account::get_main_purse().with_access_rights(new_access_rights); + + runtime::call_contract::<()>( + contract_hash, + &entrypoint, + runtime_args! { + ARG_PURSE => main_purse_modified, + }, + ); +} diff --git a/smart_contracts/contracts/test/regression-20220204-nontrivial/Cargo.toml b/smart_contracts/contracts/test/regression-20220204-nontrivial/Cargo.toml new file mode 100644 index 0000000000..823a6e9c4e --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220204-nontrivial/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220204-nontrivial" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220204_nontrivial" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220204-nontrivial/src/main.rs b/smart_contracts/contracts/test/regression-20220204-nontrivial/src/main.rs new file mode 100644 index 0000000000..7d90d71569 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220204-nontrivial/src/main.rs @@ -0,0 +1,47 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, +}; + +use casper_contract::{ + contract_api::{account, runtime}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{runtime_args, AccessRights, AddressableEntityHash, Key}; + +const ARG_PURSE: &str = "purse"; +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; +const ARG_ENTRYPOINT: &str = "entrypoint"; + +type NonTrivialArg = BTreeMap; + +#[no_mangle] +pub extern "C" fn call() { + let new_access_rights: AccessRights = runtime::get_named_arg("new_access_rights"); + + let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT); + + let contract_hash_key = runtime::get_key(CONTRACT_HASH_NAME).unwrap_or_revert(); + let contract_hash = contract_hash_key + .into_entity_hash_addr() + .map(AddressableEntityHash::new) + .unwrap_or_revert(); + + let main_purse_modified = account::get_main_purse().with_access_rights(new_access_rights); + + let mut nontrivial_arg = NonTrivialArg::new(); + nontrivial_arg.insert("anything".to_string(), Key::from(main_purse_modified)); + + runtime::call_contract::<()>( + contract_hash.into(), + &entrypoint, + runtime_args! { + ARG_PURSE => nontrivial_arg, + }, + ); +} diff --git a/smart_contracts/contracts/test/regression-20220204/Cargo.toml b/smart_contracts/contracts/test/regression-20220204/Cargo.toml new file mode 100644 index 0000000000..14493fc769 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220204/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220204" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220204" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220204/src/main.rs b/smart_contracts/contracts/test/regression-20220204/src/main.rs new file mode 100644 index 0000000000..6e4231f58a --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220204/src/main.rs @@ -0,0 +1,149 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use alloc::{collections::BTreeMap, string::String}; +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + account::AccountHash, contracts::NamedKeys, CLType, CLTyped, EntityEntryPoint, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, Parameter, URef, U512, +}; + +const TRANSFER_AS_CONTRACT: &str = "transfer_as_contract"; +const NONTRIVIAL_ARG_AS_CONTRACT: &str = "nontrivial_arg_as_contract"; +const ARG_PURSE: &str = "purse"; +const PURSE_KEY: &str = "purse"; +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; +const PACKAGE_HASH_NAME: &str = "package-contract-hash"; + +type NonTrivialArg = BTreeMap; + +#[no_mangle] +pub extern "C" fn call() { + let (contract_package_hash, _access_uref) = storage::create_contract_package_at_hash(); + + runtime::put_key(PACKAGE_HASH_NAME, contract_package_hash.into()); + + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + TRANSFER_AS_CONTRACT, + vec![Parameter::new(ARG_PURSE, URef::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + type NonTrivialArg = BTreeMap; + + entry_points.add_entry_point(EntityEntryPoint::new( + NONTRIVIAL_ARG_AS_CONTRACT, + vec![Parameter::new(ARG_PURSE, NonTrivialArg::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let named_keys = { + let mut named_keys = NamedKeys::new(); + let purse = system::create_purse(); + named_keys.insert(PURSE_KEY.into(), purse.into()); + named_keys + }; + + let (contract_hash, _contract_version) = storage::add_contract_version( + contract_package_hash, + entry_points, + named_keys, + BTreeMap::new(), + ); + + runtime::put_key(CONTRACT_HASH_NAME, Key::Hash(contract_hash.value())); +} + +#[no_mangle] +pub extern "C" fn transfer_as_contract() { + let source_purse: URef = runtime::get_named_arg(ARG_PURSE); + let target_purse = runtime::get_key(PURSE_KEY) + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + + assert!( + !source_purse.is_writeable(), + "Host should modify write bits in passed main purse" + ); + assert!(runtime::is_valid_uref(source_purse)); + + let extended = source_purse.into_read_add_write(); + assert!(!runtime::is_valid_uref(extended)); + + system::transfer_from_purse_to_purse(extended, target_purse, U512::one(), Some(42)) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn transfer_as_session() { + let source_purse: URef = runtime::get_named_arg(ARG_PURSE); + + assert!(!source_purse.is_writeable()); + + assert!(runtime::is_valid_uref(source_purse)); + let extended = source_purse.into_read_add_write(); + assert!(runtime::is_valid_uref(extended)); + + system::transfer_from_purse_to_account( + extended, + AccountHash::new([0; 32]), + U512::one(), + Some(42), + ) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn transfer_main_purse_as_session() { + let source_purse: URef = account::get_main_purse(); + + assert!(runtime::is_valid_uref(source_purse)); + let extended = source_purse.into_write(); + assert!(runtime::is_valid_uref(extended)); + + system::transfer_from_purse_to_account( + extended, + AccountHash::new([0; 32]), + U512::one(), + Some(42), + ) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn nontrivial_arg_as_contract() { + let non_trivial_arg: NonTrivialArg = runtime::get_named_arg(ARG_PURSE); + let source_purse: URef = non_trivial_arg + .into_values() + .filter_map(Key::into_uref) + .next() + .unwrap(); + + let target_purse = runtime::get_key(PURSE_KEY) + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + + assert!(!source_purse.is_writeable()); + assert!(runtime::is_valid_uref(source_purse)); + + let extended = source_purse.into_read_add_write(); + assert!(!runtime::is_valid_uref(extended)); + + system::transfer_from_purse_to_purse(extended, target_purse, U512::one(), Some(42)) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/regression-20220207/Cargo.toml b/smart_contracts/contracts/test/regression-20220207/Cargo.toml new file mode 100644 index 0000000000..37588f474f --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220207/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220207" +version = "0.1.0" +authors = ["Mateusz Górski "] +edition = "2021" + +[[bin]] +name = "regression_20220207" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220207/src/main.rs b/smart_contracts/contracts/test/regression-20220207/src/main.rs new file mode 100644 index 0000000000..5bbea1d342 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220207/src/main.rs @@ -0,0 +1,26 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{self, account, runtime}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{account::AccountHash, URef, U512}; + +const ARG_TARGET: &str = "target"; +const ARG_AMOUNT_TO_SEND: &str = "amount_to_send"; + +#[no_mangle] +pub extern "C" fn call() { + let source_purse: URef = account::get_main_purse(); + let amount_to_send: U512 = runtime::get_named_arg(ARG_AMOUNT_TO_SEND); + let target_account: AccountHash = runtime::get_named_arg(ARG_TARGET); + + contract_api::system::transfer_from_purse_to_account( + source_purse, + target_account, + amount_to_send, + None, + ) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/regression-20220208/Cargo.toml b/smart_contracts/contracts/test/regression-20220208/Cargo.toml new file mode 100644 index 0000000000..501f8be7f5 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220208/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220208" +version = "0.1.0" +authors = ["Mateusz Górski "] +edition = "2021" + +[[bin]] +name = "regression_20220208" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220208/src/main.rs b/smart_contracts/contracts/test/regression-20220208/src/main.rs new file mode 100644 index 0000000000..c23c5f1ee5 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220208/src/main.rs @@ -0,0 +1,36 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{self, account, runtime}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{account::AccountHash, URef, U512}; + +const ARG_TARGET: &str = "target"; +const ARG_AMOUNT_PART_1: &str = "amount_part_1"; +const ARG_AMOUNT_PART_2: &str = "amount_part_2"; + +#[no_mangle] +pub extern "C" fn call() { + let source_purse: URef = account::get_main_purse(); + let amount_part_1: U512 = runtime::get_named_arg(ARG_AMOUNT_PART_1); + let amount_part_2: U512 = runtime::get_named_arg(ARG_AMOUNT_PART_2); + let target_account: AccountHash = runtime::get_named_arg(ARG_TARGET); + + contract_api::system::transfer_from_purse_to_account( + source_purse, + target_account, + amount_part_1, + None, + ) + .unwrap_or_revert(); + + contract_api::system::transfer_from_purse_to_account( + source_purse, + target_account, + amount_part_2, + None, + ) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/regression-20220211-call/Cargo.toml b/smart_contracts/contracts/test/regression-20220211-call/Cargo.toml new file mode 100644 index 0000000000..ae12024762 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220211-call/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220211-call" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220211_call" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220211-call/src/main.rs b/smart_contracts/contracts/test/regression-20220211-call/src/main.rs new file mode 100644 index 0000000000..fd713ff2de --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220211-call/src/main.rs @@ -0,0 +1,27 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert}; +use casper_types::{contracts::ContractHash, RuntimeArgs, URef}; + +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; +const ARG_ENTRYPOINT: &str = "entrypoint"; + +#[no_mangle] +pub extern "C" fn call() { + let entrypoint: String = runtime::get_named_arg(ARG_ENTRYPOINT); + let contract_hash_key = runtime::get_key(CONTRACT_HASH_NAME).unwrap_or_revert(); + let contract_hash = contract_hash_key + .into_entity_hash_addr() + .map(ContractHash::new) + .unwrap_or_revert(); + + let hardcoded_uref: URef = + runtime::call_contract(contract_hash, &entrypoint, RuntimeArgs::default()); + + assert!(!runtime::is_valid_uref(hardcoded_uref)); + assert!(!hardcoded_uref.is_writeable(),); +} diff --git a/smart_contracts/contracts/test/regression-20220211/Cargo.toml b/smart_contracts/contracts/test/regression-20220211/Cargo.toml new file mode 100644 index 0000000000..55269676b9 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220211/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220211" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220211" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220211/src/main.rs b/smart_contracts/contracts/test/regression-20220211/src/main.rs new file mode 100644 index 0000000000..92233390b2 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220211/src/main.rs @@ -0,0 +1,175 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + addressable_entity::Parameters, AccessRights, AddressableEntityHash, CLType, CLValue, + EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, URef, +}; + +const RET_AS_CONTRACT: &str = "ret_as_contract"; +const RET_AS_SESSION: &str = "ret_as_session"; +const PUT_KEY_AS_SESSION: &str = "put_key_as_session"; +const PUT_KEY_AS_CONTRACT: &str = "put_key_as_contract"; +const READ_AS_SESSION: &str = "read_as_session"; +const READ_AS_CONTRACT: &str = "read_as_contract"; +const WRITE_AS_SESSION: &str = "write_as_session"; +const WRITE_AS_CONTRACT: &str = "write_as_contract"; +const ADD_AS_SESSION: &str = "add_as_session"; +const ADD_AS_CONTRACT: &str = "add_as_contract"; +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; + +#[no_mangle] +pub extern "C" fn call() { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + RET_AS_CONTRACT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + RET_AS_SESSION, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + PUT_KEY_AS_SESSION, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + PUT_KEY_AS_CONTRACT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + READ_AS_SESSION, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + READ_AS_CONTRACT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + WRITE_AS_SESSION, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + WRITE_AS_CONTRACT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + ADD_AS_SESSION, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + entry_points.add_entry_point(EntityEntryPoint::new( + ADD_AS_CONTRACT, + Parameters::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + let (contract_hash, _contract_version) = + storage::new_locked_contract(entry_points, None, None, None, None); + + runtime::put_key( + CONTRACT_HASH_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} + +#[no_mangle] +pub extern "C" fn ret_as_contract() { + let uref = URef::default().into_read_add_write(); + runtime::ret(CLValue::from_t(uref).unwrap_or_revert()) +} + +#[no_mangle] +pub extern "C" fn ret_as_session() { + let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE); + runtime::ret(CLValue::from_t(uref).unwrap_or_revert()); +} + +#[no_mangle] +pub extern "C" fn write_as_contract() { + let uref = URef::default().into_read_add_write(); + storage::write(uref, ()); +} + +#[no_mangle] +pub extern "C" fn write_as_session() { + let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE); + storage::write(uref, ()); +} + +#[no_mangle] +pub extern "C" fn read_as_contract() { + let uref = URef::default().into_read_add_write(); + let _: Option<()> = storage::read(uref).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn read_as_session() { + let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE); + let _: Option<()> = storage::read(uref).unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn put_key_as_contract() { + let uref = URef::default().into_read_add_write(); + runtime::put_key("", uref.into()); +} + +#[no_mangle] +pub extern "C" fn put_key_as_session() { + let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE); + runtime::put_key("", uref.into()); +} + +#[no_mangle] +pub extern "C" fn add_as_contract() { + let uref = URef::default().into_read_add_write(); + storage::write(uref, ()); +} + +#[no_mangle] +pub extern "C" fn add_as_session() { + let uref = URef::default().with_access_rights(AccessRights::READ_ADD_WRITE); + storage::write(uref, ()); +} diff --git a/smart_contracts/contracts/test/regression-20220222/Cargo.toml b/smart_contracts/contracts/test/regression-20220222/Cargo.toml new file mode 100644 index 0000000000..e299f95d52 --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220222/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-20220222" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220222" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-20220222/src/main.rs b/smart_contracts/contracts/test/regression-20220222/src/main.rs new file mode 100644 index 0000000000..ea07825c3b --- /dev/null +++ b/smart_contracts/contracts/test/regression-20220222/src/main.rs @@ -0,0 +1,49 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{account::AccountHash, AccessRights, ApiError, URef, URefAddr, U512}; + +const ALICE_ADDR: AccountHash = AccountHash::new([42; 32]); + +#[repr(u16)] +enum Error { + PurseDoesNotGrantImplicitAddAccess = 0, + TemporaryAddAccessPersists = 1, +} + +impl From for ApiError { + fn from(error: Error) -> Self { + ApiError::User(error as u16) + } +} + +#[no_mangle] +pub extern "C" fn call() { + let alice_purse_addr: URefAddr = runtime::get_named_arg("alice_purse_addr"); + + let alice_purse = URef::new(alice_purse_addr, AccessRights::ADD); + + if runtime::is_valid_uref(alice_purse) { + // Shouldn't be valid uref + runtime::revert(Error::PurseDoesNotGrantImplicitAddAccess); + } + + let source = account::get_main_purse(); + + let _failsafe = system::transfer_from_purse_to_account(source, ALICE_ADDR, U512::one(), None) + .unwrap_or_revert(); + + if runtime::is_valid_uref(alice_purse) { + // Should not be escalated since add access was granted temporarily for transfer. + runtime::revert(Error::TemporaryAddAccessPersists); + } + + // Should fail + runtime::put_key("put_key_with_add_should_fail", alice_purse.into()); +} diff --git a/smart_contracts/contracts/test/regression-add-bid/Cargo.toml b/smart_contracts/contracts/test/regression-add-bid/Cargo.toml new file mode 100644 index 0000000000..657d248807 --- /dev/null +++ b/smart_contracts/contracts/test/regression-add-bid/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-add-bid" +version = "0.1.0" +authors = ["Karan Dhareshwar ", "Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_add_bid" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-add-bid/src/main.rs b/smart_contracts/contracts/test/regression-add-bid/src/main.rs new file mode 100644 index 0000000000..95b7dfe9ad --- /dev/null +++ b/smart_contracts/contracts/test/regression-add-bid/src/main.rs @@ -0,0 +1,34 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::contract_api::{runtime, system}; +use casper_types::{ + runtime_args, + system::auction::{self, DelegationRate}, + PublicKey, U512, +}; + +const ARG_AMOUNT: &str = "amount"; +const ARG_DELEGATION_RATE: &str = "delegation_rate"; +const ARG_PUBLIC_KEY: &str = "public_key"; + +fn add_bid(public_key: PublicKey, bond_amount: U512, delegation_rate: DelegationRate) { + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_PUBLIC_KEY => public_key, + auction::ARG_AMOUNT => bond_amount + U512::one(), + auction::ARG_DELEGATION_RATE => delegation_rate, + }; + runtime::call_contract::(contract_hash, auction::METHOD_ADD_BID, args); +} + +#[no_mangle] +pub extern "C" fn call() { + let public_key = runtime::get_named_arg(ARG_PUBLIC_KEY); + let bond_amount = runtime::get_named_arg(ARG_AMOUNT); + let delegation_rate = runtime::get_named_arg(ARG_DELEGATION_RATE); + + add_bid(public_key, bond_amount, delegation_rate); +} diff --git a/smart_contracts/contracts/test/regression-delegate/Cargo.toml b/smart_contracts/contracts/test/regression-delegate/Cargo.toml new file mode 100644 index 0000000000..f1283947a3 --- /dev/null +++ b/smart_contracts/contracts/test/regression-delegate/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-delegate" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_delegate" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-delegate/src/main.rs b/smart_contracts/contracts/test/regression-delegate/src/main.rs new file mode 100644 index 0000000000..eb99fee6b2 --- /dev/null +++ b/smart_contracts/contracts/test/regression-delegate/src/main.rs @@ -0,0 +1,31 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::contract_api::{runtime, system}; +use casper_types::{runtime_args, system::auction, PublicKey, U512}; + +const ARG_AMOUNT: &str = "amount"; + +const ARG_VALIDATOR: &str = "validator"; +const ARG_DELEGATOR: &str = "delegator"; + +fn delegate(delegator: PublicKey, validator: PublicKey, amount: U512) { + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_DELEGATOR => delegator, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount + U512::one(), + }; + runtime::call_contract::(contract_hash, auction::METHOD_DELEGATE, args); +} + +#[no_mangle] +pub extern "C" fn call() { + let delegator = runtime::get_named_arg(ARG_DELEGATOR); + let validator = runtime::get_named_arg(ARG_VALIDATOR); + let amount = runtime::get_named_arg(ARG_AMOUNT); + + delegate(delegator, validator, amount); +} diff --git a/smart_contracts/contracts/test/regression-payment/Cargo.toml b/smart_contracts/contracts/test/regression-payment/Cargo.toml new file mode 100644 index 0000000000..c049c318e4 --- /dev/null +++ b/smart_contracts/contracts/test/regression-payment/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-payment" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_payment" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-payment/src/main.rs b/smart_contracts/contracts/test/regression-payment/src/main.rs new file mode 100644 index 0000000000..8156cc1a54 --- /dev/null +++ b/smart_contracts/contracts/test/regression-payment/src/main.rs @@ -0,0 +1,38 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + system::{handle_payment, standard_payment}, + RuntimeArgs, URef, U512, +}; + +fn pay(amount: U512) { + // amount to transfer from named purse to payment purse + let purse_uref = account::get_main_purse(); + + // handle payment contract + let handle_payment_contract_hash = system::get_handle_payment(); + + // get payment purse for current execution + let payment_purse: URef = runtime::call_contract( + handle_payment_contract_hash, + handle_payment::METHOD_GET_PAYMENT_PURSE, + RuntimeArgs::default(), + ); + + // transfer amount from named purse to payment purse, which will be used to pay for execution + system::transfer_from_purse_to_purse(purse_uref, payment_purse, amount + U512::one(), None) + .unwrap_or_revert(); +} + +#[no_mangle] +pub extern "C" fn call() { + let amount: U512 = runtime::get_named_arg(standard_payment::ARG_AMOUNT); + pay(amount); +} diff --git a/smart_contracts/contracts/test/regression-transfer/Cargo.toml b/smart_contracts/contracts/test/regression-transfer/Cargo.toml new file mode 100644 index 0000000000..1c7931bc41 --- /dev/null +++ b/smart_contracts/contracts/test/regression-transfer/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression-transfer" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_transfer" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression-transfer/src/main.rs b/smart_contracts/contracts/test/regression-transfer/src/main.rs new file mode 100644 index 0000000000..57b1cd0291 --- /dev/null +++ b/smart_contracts/contracts/test/regression-transfer/src/main.rs @@ -0,0 +1,38 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use casper_contract::{ + contract_api::{account, runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{account::AccountHash, runtime_args, system::mint, URef, U512}; + +fn call_mint_transfer( + to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, +) -> Result<(), mint::Error> { + let args = runtime_args! { + mint::ARG_TO => to, + mint::ARG_SOURCE => source, + mint::ARG_TARGET => target, + mint::ARG_AMOUNT => amount + U512::one(), + mint::ARG_ID => id, + }; + runtime::call_contract(system::get_mint(), mint::METHOD_TRANSFER, args) +} + +#[no_mangle] +pub extern "C" fn call() { + let to: Option = runtime::get_named_arg(mint::ARG_TO); + let source: URef = account::get_main_purse(); + let target: URef = runtime::get_named_arg(mint::ARG_TARGET); + let amount: U512 = runtime::get_named_arg(mint::ARG_AMOUNT); + let id: Option = runtime::get_named_arg(mint::ARG_ID); + + call_mint_transfer(to, source, target, amount, id).unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/regression_20211110/Cargo.toml b/smart_contracts/contracts/test/regression_20211110/Cargo.toml new file mode 100644 index 0000000000..2f826b8188 --- /dev/null +++ b/smart_contracts/contracts/test/regression_20211110/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression_20211110" +version = "0.1.0" +authors = ["Luís Fernando Schultz Xavier da Silveira "] +edition = "2021" + +[[bin]] +name = "regression_20211110" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression_20211110/src/main.rs b/smart_contracts/contracts/test/regression_20211110/src/main.rs new file mode 100644 index 0000000000..4c759f58fe --- /dev/null +++ b/smart_contracts/contracts/test/regression_20211110/src/main.rs @@ -0,0 +1,47 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{ + contracts::ContractHash, runtime_args, AddressableEntityHash, CLType, CLTyped, + EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, Key, + Parameter, +}; + +const RECURSE_ENTRYPOINT: &str = "recurse"; +const ARG_TARGET: &str = "target"; +const CONTRACT_HASH_NAME: &str = "regression-contract-hash"; + +#[no_mangle] +pub extern "C" fn call() { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + RECURSE_ENTRYPOINT, + vec![Parameter::new(ARG_TARGET, AddressableEntityHash::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let (contract_hash, _contract_version) = + storage::new_locked_contract(entry_points, None, None, None, None); + + runtime::put_key( + CONTRACT_HASH_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} + +#[no_mangle] +pub extern "C" fn recurse() { + let target: AddressableEntityHash = runtime::get_named_arg(ARG_TARGET); + runtime::call_contract( + ContractHash::new(target.value()), + RECURSE_ENTRYPOINT, + runtime_args! { ARG_TARGET => target }, + ) +} diff --git a/smart_contracts/contracts/test/regression_20220119/Cargo.toml b/smart_contracts/contracts/test/regression_20220119/Cargo.toml new file mode 100644 index 0000000000..8be5472654 --- /dev/null +++ b/smart_contracts/contracts/test/regression_20220119/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "regression_20220119" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "regression_20220119" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/regression_20220119/src/main.rs b/smart_contracts/contracts/test/regression_20220119/src/main.rs new file mode 100644 index 0000000000..f704cfd263 --- /dev/null +++ b/smart_contracts/contracts/test/regression_20220119/src/main.rs @@ -0,0 +1,22 @@ +#![no_std] +#![no_main] + +#[macro_use] +extern crate alloc; + +use casper_contract::ext_ffi; +use casper_types::{api_error, ApiError, UREF_SERIALIZED_LENGTH}; + +fn custom_create_purse(buffer_size: usize) -> Result<(), ApiError> { + let big_purse = vec![0u8; buffer_size]; + let ret = unsafe { ext_ffi::casper_create_purse(big_purse.as_ptr(), big_purse.len()) }; + api_error::result_from(ret) +} + +#[no_mangle] +pub extern "C" fn call() { + assert_eq!(custom_create_purse(1024), Ok(())); + assert_eq!(custom_create_purse(0), Err(ApiError::PurseNotCreated)); + assert_eq!(custom_create_purse(3), Err(ApiError::PurseNotCreated)); + assert_eq!(custom_create_purse(UREF_SERIALIZED_LENGTH), Ok(())); +} diff --git a/smart_contracts/contracts/test/regression_20240105/Cargo.toml b/smart_contracts/contracts/test/regression_20240105/Cargo.toml new file mode 100644 index 0000000000..446ef49461 --- /dev/null +++ b/smart_contracts/contracts/test/regression_20240105/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "regression_20240105" +version = "0.1.0" +edition = "2021" + +[dependencies] +casper-contract = { path = "../../../contract", features = ["test-support"] } +casper-types = { path = "../../../../types" } + +[[bin]] +name = "regression_20240105" +path = "src/main.rs" +bench = false +doctest = false +test = false diff --git a/smart_contracts/contracts/test/regression_20240105/src/main.rs b/smart_contracts/contracts/test/regression_20240105/src/main.rs new file mode 100644 index 0000000000..afa256af86 --- /dev/null +++ b/smart_contracts/contracts/test/regression_20240105/src/main.rs @@ -0,0 +1,969 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::{String, ToString}, + vec, + vec::Vec, +}; +use core::mem::MaybeUninit; + +use casper_contract::{ + contract_api, + contract_api::{account, runtime, storage, system}, + ext_ffi, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + account::{AccountHash, ActionType, Weight}, + addressable_entity::MAX_GROUPS, + api_error, + bytesrepr::ToBytes, + contracts::{ContractHash, ContractPackageHash}, + runtime_args, AccessRights, ApiError, CLType, CLValue, EntityEntryPoint, EntryPointAccess, + EntryPointPayment, EntryPointType, EntryPoints, EraId, Key, NamedKeys, Parameter, + TransferredTo, URef, U512, +}; + +const NOOP: &str = "noop"; + +fn to_ptr(t: &T) -> (*const u8, usize, Vec) { + let bytes = t.to_bytes().unwrap_or_revert(); + let ptr = bytes.as_ptr(); + let size = bytes.len(); + (ptr, size, bytes) +} + +#[no_mangle] +extern "C" fn noop() {} + +fn store_noop_contract(maybe_contract_pkg_hash: Option) -> ContractHash { + let mut entry_points = EntryPoints::new(); + entry_points.add_entry_point(EntityEntryPoint::new( + NOOP, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Caller, + EntryPointPayment::Caller, + )); + match maybe_contract_pkg_hash { + Some(contract_pkg_hash) => { + let (contract_hash, _version) = storage::add_contract_version( + contract_pkg_hash, + entry_points, + NamedKeys::new(), + BTreeMap::new(), + ); + contract_hash + } + None => { + let (contract_hash, _version) = + storage::new_contract(entry_points, None, None, None, None); + contract_hash + } + } +} + +fn get_name() -> String { + let large_name: bool = runtime::get_named_arg("large_name"); + if large_name { + "a".repeat(10_000) + } else { + "a".to_string() + } +} + +fn get_named_arg_size(name: &str) -> usize { + let mut arg_size: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_named_arg_size( + name.as_bytes().as_ptr(), + name.len(), + &mut arg_size as *mut usize, + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + arg_size +} + +#[no_mangle] +pub extern "C" fn call() { + let fn_arg: String = runtime::get_named_arg("fn"); + match fn_arg.as_str() { + "write" => { + let len: u32 = runtime::get_named_arg("len"); + let uref = storage::new_uref(()); + let key = Key::from(uref); + let (key_ptr, key_size, _bytes1) = to_ptr(&key); + let value = vec![u8::MAX; len as usize]; + let cl_value = CLValue::from_t(value).unwrap_or_revert(); + let (cl_value_ptr, cl_value_size, _bytes2) = to_ptr(&cl_value); + for _i in 0..u64::MAX { + unsafe { + ext_ffi::casper_write(key_ptr, key_size, cl_value_ptr, cl_value_size); + } + } + } + "read" => { + let len: Option = runtime::get_named_arg("len"); + let key = match len { + Some(len) => { + let key = Key::URef(storage::new_uref(())); + let uref = storage::new_uref(()); + storage::write(uref, vec![u8::MAX; len as usize]); + key + } + None => Key::Hash([0; 32]), + }; + let key_bytes = key.into_bytes().unwrap(); + let key_ptr = key_bytes.as_ptr(); + let key_size = key_bytes.len(); + let mut buffer = vec![0; len.unwrap_or_default() as usize]; + for _i in 0..u64::MAX { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_read_value(key_ptr, key_size, value_size.as_mut_ptr()) + }; + // If we actually read a value, we need to clear the host buffer before trying to + // read another value. + if len.is_some() { + assert_eq!(ret, 0); + } else { + assert_eq!(ret, u32::from(ApiError::ValueNotFound) as i32); + continue; + } + unsafe { + value_size.assume_init(); + } + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_read_host_buffer( + buffer.as_mut_ptr(), + buffer.len(), + bytes_written.as_mut_ptr(), + ) + }; + assert_eq!(ret, 0); + } + } + "add" => { + let large: bool = runtime::get_named_arg("large"); + if large { + let uref = storage::new_uref(U512::zero()); + for _i in 0..u64::MAX { + storage::add(uref, U512::MAX) + } + } else { + let uref = storage::new_uref(0_i32); + for _i in 0..u64::MAX { + storage::add(uref, 1_i32) + } + } + } + "new" => { + let len: u32 = runtime::get_named_arg("len"); + for _i in 0..u64::MAX { + let _n = storage::new_uref(vec![u32::MAX; len as usize]); + } + } + "call_contract" => { + let args_len: u32 = runtime::get_named_arg("args_len"); + let args = runtime_args! { "a" => vec![u8::MAX; args_len as usize] }; + let contract_hash = store_noop_contract(None); + let (contract_hash_ptr, contract_hash_size, _bytes1) = to_ptr(&contract_hash); + let (entry_point_name_ptr, entry_point_name_size, _bytes2) = to_ptr(&NOOP); + let (runtime_args_ptr, runtime_args_size, _bytes3) = to_ptr(&args); + let mut bytes_written = MaybeUninit::uninit(); + for _i in 0..u64::MAX { + let ret = unsafe { + ext_ffi::casper_call_contract( + contract_hash_ptr, + contract_hash_size, + entry_point_name_ptr, + entry_point_name_size, + runtime_args_ptr, + runtime_args_size, + bytes_written.as_mut_ptr(), + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + } + } + "get_key" => { + let maybe_large_key: Option = runtime::get_named_arg("large_key"); + match maybe_large_key { + Some(large_key) => { + let name = get_name(); + let key = if large_key { + let uref = storage::new_uref(()); + Key::URef(uref) + } else { + Key::EraInfo(EraId::new(0)) + }; + runtime::put_key(&name, key); + for _i in 0..u64::MAX { + let _k = runtime::get_key(&name); + } + } + None => { + for i in 0..u64::MAX { + let _k = runtime::get_key(i.to_string().as_str()); + } + } + } + } + "has_key" => { + let exists: bool = runtime::get_named_arg("key_exists"); + if exists { + let name = get_name(); + runtime::put_key(&name, Key::EraInfo(EraId::new(0))); + for _i in 0..u64::MAX { + let _b = runtime::has_key(&name); + } + } else { + for i in 0..u64::MAX { + let _b = runtime::has_key(i.to_string().as_str()); + } + } + } + "put_key" => { + let base_name = get_name(); + let large_key: bool = runtime::get_named_arg("large_key"); + let key = if large_key { + let uref = storage::new_uref(()); + Key::URef(uref) + } else { + Key::EraInfo(EraId::new(0)) + }; + let maybe_num_keys: Option = runtime::get_named_arg("num_keys"); + let num_keys = maybe_num_keys.unwrap_or(u32::MAX); + for i in 0..num_keys { + runtime::put_key(format!("{base_name}{i}").as_str(), key); + } + } + "is_valid_uref" => { + let valid: bool = runtime::get_named_arg("valid"); + let uref = if valid { + storage::new_uref(()) + } else { + URef::new([1; 32], AccessRights::default()) + }; + for _i in 0..u64::MAX { + let is_valid = runtime::is_valid_uref(uref); + assert_eq!(valid, is_valid); + } + } + "add_associated_key" => { + let remove_after_adding: bool = runtime::get_named_arg("remove_after_adding"); + let account_hash = AccountHash::new([1; 32]); + let weight = Weight::new(1); + for _i in 0..u64::MAX { + if remove_after_adding { + account::add_associated_key(account_hash, weight).unwrap_or_revert(); + // Remove to avoid getting a duplicate key error on next iteration. + account::remove_associated_key(account_hash).unwrap_or_revert(); + } else { + let _e = account::add_associated_key(account_hash, weight); + } + } + } + "remove_associated_key" => { + for _i in 0..u64::MAX { + account::remove_associated_key(AccountHash::new([1; 32])).unwrap_err(); + } + } + "update_associated_key" => { + let exists: bool = runtime::get_named_arg("exists"); + let account_hash = AccountHash::new([1; 32]); + if exists { + account::add_associated_key(account_hash, Weight::new(1)).unwrap_or_revert(); + for i in 0..u64::MAX { + account::update_associated_key(account_hash, Weight::new(i as u8)) + .unwrap_or_revert(); + } + } else { + for i in 0..u64::MAX { + account::update_associated_key(account_hash, Weight::new(i as u8)).unwrap_err(); + } + } + } + "set_action_threshold" => { + for _i in 0..u64::MAX { + account::set_action_threshold(ActionType::Deployment, Weight::new(1)) + .unwrap_or_revert(); + } + } + "load_named_keys" => { + let num_keys: u32 = runtime::get_named_arg("num_keys"); + if num_keys == 0 { + for _i in 0..u64::MAX { + assert!(runtime::list_named_keys().is_empty()); + } + return; + } + // Where `num_keys` > 0, we should have put the required number of named keys in a + // previous execution via the `put_key` flow of this contract. + for _i in 0..u64::MAX { + assert_eq!(runtime::list_named_keys().len() as u32, num_keys); + } + } + "remove_key" => { + let name = get_name(); + for _i in 0..u64::MAX { + runtime::remove_key(&name) + } + } + "get_caller" => { + for _i in 0..u64::MAX { + let _c = runtime::get_caller(); + } + } + "get_blocktime" => { + for _i in 0..u64::MAX { + let _b = runtime::get_blocktime(); + } + } + "create_purse" => { + for _i in 0..u64::MAX { + let _u = system::create_purse(); + } + } + "transfer_to_account" => { + let account_exists: bool = runtime::get_named_arg("account_exists"); + let amount = U512::one(); + let id = Some(u64::MAX); + if account_exists { + let target = AccountHash::new([1; 32]); + let to = system::transfer_to_account(target, amount, id).unwrap_or_revert(); + assert_eq!(to, TransferredTo::NewAccount); + for _i in 0..u64::MAX { + let to = system::transfer_to_account(target, amount, id).unwrap_or_revert(); + assert_eq!(to, TransferredTo::ExistingAccount); + } + } else { + let mut array = [0_u8; 32]; + for index in 0..32 { + for i in 1..=u8::MAX { + array[index] = i; + let target = AccountHash::new(array); + let to = system::transfer_to_account(target, amount, id).unwrap_or_revert(); + assert_eq!(to, TransferredTo::NewAccount); + } + } + } + } + "transfer_from_purse_to_account" => { + let account_exists: bool = runtime::get_named_arg("account_exists"); + let source = account::get_main_purse(); + let amount = U512::one(); + let id = Some(u64::MAX); + if account_exists { + let target = AccountHash::new([1; 32]); + let to = system::transfer_to_account(target, amount, id).unwrap_or_revert(); + assert_eq!(to, TransferredTo::NewAccount); + for _i in 0..u64::MAX { + let to = system::transfer_from_purse_to_account(source, target, amount, id) + .unwrap_or_revert(); + assert_eq!(to, TransferredTo::ExistingAccount); + } + } else { + let mut array = [0_u8; 32]; + for index in 0..32 { + for i in 1..=u8::MAX { + array[index] = i; + let target = AccountHash::new(array); + let to = system::transfer_from_purse_to_account(source, target, amount, id) + .unwrap_or_revert(); + assert_eq!(to, TransferredTo::NewAccount); + } + } + } + } + "transfer_from_purse_to_purse" => { + let source = account::get_main_purse(); + let target = system::create_purse(); + let amount = U512::one(); + let id = Some(u64::MAX); + system::transfer_from_purse_to_purse(source, target, amount, id).unwrap_or_revert(); + for _i in 0..u64::MAX { + system::transfer_from_purse_to_purse(source, target, amount, id).unwrap_or_revert(); + } + } + "get_balance" => { + let purse_exists: bool = runtime::get_named_arg("purse_exists"); + let uref = if purse_exists { + account::get_main_purse() + } else { + URef::new([1; 32], AccessRights::empty()) + }; + for _i in 0..u64::MAX { + let maybe_balance = system::get_purse_balance(uref); + assert_eq!(maybe_balance.is_some(), purse_exists); + } + } + "get_phase" => { + for _i in 0..u64::MAX { + let _p = runtime::get_phase(); + } + } + "get_system_contract" => { + for _i in 0..u64::MAX { + let _h = system::get_mint(); + } + } + "get_main_purse" => { + for _i in 0..u64::MAX { + let _u = account::get_main_purse(); + } + } + "read_host_buffer" => { + // The case where the host buffer is repeatedly filled is covered in the `read` + // branch above. All we do here is check repeatedly where `read_host_buffer` returns + // `HostBufferEmpty`. + let mut buffer = vec![0; 1]; + let mut bytes_written = MaybeUninit::uninit(); + for _i in 0..u64::MAX { + let ret = unsafe { + ext_ffi::casper_read_host_buffer( + buffer.as_mut_ptr(), + buffer.len(), + bytes_written.as_mut_ptr(), + ) + }; + assert_eq!(ret, u32::from(ApiError::HostBufferEmpty) as i32); + } + } + "create_contract_package_at_hash" => { + for _i in 0..u64::MAX { + let _h = storage::create_contract_package_at_hash(); + } + } + "add_contract_version" => { + let entry_points_len: u32 = runtime::get_named_arg("entry_points_len"); + let mut entry_points = EntryPoints::new(); + for entry_point_index in 0..entry_points_len { + entry_points.add_entry_point(EntityEntryPoint::new( + format!("function_{entry_point_index}"), + vec![Parameter::new("a", CLType::PublicKey); 10], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Caller, + EntryPointPayment::Caller, + )); + } + let named_keys_len: u32 = runtime::get_named_arg("named_keys_len"); + let mut named_keys = NamedKeys::new(); + for named_key_index in 0..named_keys_len { + let _ = named_keys.insert(named_key_index.to_string(), Key::Hash([1; 32])); + } + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + for i in 1..u64::MAX { + let (_h, version) = storage::add_contract_version( + contract_pkg_hash, + entry_points.clone(), + named_keys.clone(), + BTreeMap::new(), + ); + assert_eq!(version, i as u32); + } + } + "disable_contract_version" => { + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + let (contract_hash, _version) = storage::add_contract_version( + contract_pkg_hash, + EntryPoints::new(), + NamedKeys::new(), + BTreeMap::new(), + ); + for _i in 0..u64::MAX { + storage::disable_contract_version(contract_pkg_hash, contract_hash) + .unwrap_or_revert(); + } + } + "call_versioned_contract" => { + let args_len: u32 = runtime::get_named_arg("args_len"); + let args = runtime_args! { "a" => vec![u8::MAX; args_len as usize] }; + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + let _ = store_noop_contract(Some(contract_pkg_hash)); + let (contract_pkg_hash_ptr, contract_pkg_hash_size, _bytes1) = + to_ptr(&contract_pkg_hash); + let (contract_version_ptr, contract_version_size, _bytes2) = to_ptr(&Some(1_u32)); + let (entry_point_name_ptr, entry_point_name_size, _bytes3) = to_ptr(&NOOP); + let (runtime_args_ptr, runtime_args_size, _bytes4) = to_ptr(&args); + let mut bytes_written = MaybeUninit::uninit(); + for _i in 0..u64::MAX { + let ret = unsafe { + ext_ffi::casper_call_versioned_contract( + contract_pkg_hash_ptr, + contract_pkg_hash_size, + contract_version_ptr, + contract_version_size, + entry_point_name_ptr, + entry_point_name_size, + runtime_args_ptr, + runtime_args_size, + bytes_written.as_mut_ptr(), + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + } + } + "create_contract_user_group" => { + let label_len: u32 = runtime::get_named_arg("label_len"); + assert!(label_len > 0); + let label_prefix: String = "a".repeat(label_len as usize - 1); + let num_new_urefs: u8 = runtime::get_named_arg("num_new_urefs"); + let num_existing_urefs: u8 = runtime::get_named_arg("num_existing_urefs"); + let mut existing_urefs = BTreeSet::new(); + for _ in 0..num_existing_urefs { + existing_urefs.insert(storage::new_uref(())); + } + let (existing_urefs_ptr, existing_urefs_size, _bytes1) = to_ptr(&existing_urefs); + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + let (contract_pkg_hash_ptr, contract_pkg_hash_size, _bytes2) = + to_ptr(&contract_pkg_hash); + let mut index = 0_u8; + let mut label = String::new(); + let allow_exceeding_max_groups: bool = + runtime::get_named_arg("allow_exceeding_max_groups"); + let expect_failure = num_new_urefs == u8::MAX || allow_exceeding_max_groups; + let mut buffer = vec![0_u8; 5_000]; + let mut output_size = MaybeUninit::uninit(); + let mut bytes_written = MaybeUninit::uninit(); + loop { + if index == MAX_GROUPS && !allow_exceeding_max_groups { + // We need to remove the group to avoid hitting the `contracts::MAX_GROUPS` + // limit (currently 10). + let result = storage::remove_contract_user_group(contract_pkg_hash, &label); + if !expect_failure { + result.unwrap_or_revert(); + } + } else { + label = format!("{label_prefix}{index}"); + index += 1; + } + let (label_ptr, label_size, _bytes3) = to_ptr(&label); + let ret = unsafe { + ext_ffi::casper_create_contract_user_group( + contract_pkg_hash_ptr, + contract_pkg_hash_size, + label_ptr, + label_size, + num_new_urefs, + existing_urefs_ptr, + existing_urefs_size, + output_size.as_mut_ptr(), + ) + }; + if !expect_failure { + api_error::result_from(ret).unwrap_or_revert(); + let ret = unsafe { + ext_ffi::casper_read_host_buffer( + buffer.as_mut_ptr(), + buffer.len(), + bytes_written.as_mut_ptr(), + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + } + } + } + "print" => { + let num_chars: u32 = runtime::get_named_arg("num_chars"); + let value: String = "a".repeat(num_chars as usize); + for _i in 0..u64::MAX { + runtime::print(&value); + } + } + "get_runtime_arg_size" => { + let name = "arg"; + for _i in 0..u64::MAX { + let _s = get_named_arg_size(name); + } + } + "get_runtime_arg" => { + let name = "arg"; + let arg_size = get_named_arg_size(name); + let data_non_null_ptr = contract_api::alloc_bytes(arg_size); + for _i in 0..u64::MAX { + let ret = unsafe { + ext_ffi::casper_get_named_arg( + name.as_bytes().as_ptr(), + name.len(), + data_non_null_ptr.as_ptr(), + arg_size, + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + } + } + "remove_contract_user_group" => { + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + for _i in 0..u64::MAX { + storage::remove_contract_user_group(contract_pkg_hash, "a").unwrap_err(); + } + } + "extend_contract_user_group_urefs" => { + let allow_exceeding_max_urefs: bool = + runtime::get_named_arg("allow_exceeding_max_urefs"); + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + let label = "a"; + let _ = + storage::create_contract_user_group(contract_pkg_hash, label, 0, BTreeSet::new()) + .unwrap_or_revert(); + for _i in 0..u64::MAX { + if allow_exceeding_max_urefs { + let _r = storage::provision_contract_user_group_uref(contract_pkg_hash, label); + } else { + let uref = + storage::provision_contract_user_group_uref(contract_pkg_hash, label) + .unwrap_or_revert(); + storage::remove_contract_user_group_urefs( + contract_pkg_hash, + label, + BTreeSet::from_iter(Some(uref)), + ) + .unwrap_or_revert(); + } + } + } + "remove_contract_user_group_urefs" => { + // The success case is covered in `create_contract_user_group` above. We only test + // for unknown user groups here. + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + for _i in 0..u64::MAX { + storage::remove_contract_user_group(contract_pkg_hash, "a").unwrap_err(); + } + } + "blake2b" => { + let len: u32 = runtime::get_named_arg("len"); + let data = vec![1; len as usize]; + for _i in 0..u64::MAX { + let _hash = runtime::blake2b(&data); + } + } + "new_dictionary" => { + let mut buffer = vec![0_u8; 33]; // bytesrepr-serialized length of URef + for _i in 0..u64::MAX { + let mut value_size = MaybeUninit::uninit(); + let ret = unsafe { ext_ffi::casper_new_dictionary(value_size.as_mut_ptr()) }; + api_error::result_from(ret).unwrap_or_revert(); + assert_eq!(buffer.len(), unsafe { value_size.assume_init() }); + let mut bytes_written = MaybeUninit::uninit(); + let ret = unsafe { + ext_ffi::casper_read_host_buffer( + buffer.as_mut_ptr(), + buffer.len(), + bytes_written.as_mut_ptr(), + ) + }; + assert_eq!(ret, 0); + } + } + "dictionary_get" => { + let name_len: u32 = runtime::get_named_arg("name_len"); + let name: String = "a".repeat(name_len as usize); + let value_len: u32 = runtime::get_named_arg("value_len"); + let value = vec![u8::MAX; value_len as usize]; + let uref = storage::new_dictionary("a").unwrap_or_revert(); + storage::dictionary_put(uref, &name, value); + + for _i in 0..u64::MAX { + let read_value: Vec = storage::dictionary_get(uref, &name) + .unwrap_or_revert() + .unwrap_or_revert(); + assert_eq!(read_value.len(), value_len as usize); + } + } + "dictionary_put" => { + let name_len: u32 = runtime::get_named_arg("name_len"); + let name: String = "a".repeat(name_len as usize); + + let value_len: u32 = runtime::get_named_arg("value_len"); + let value = vec![u8::MAX; value_len as usize]; + + let uref = storage::new_dictionary("a").unwrap_or_revert(); + let (uref_ptr, uref_size, _bytes1) = to_ptr(&uref); + + let (item_name_ptr, item_name_size, _bytes2) = to_ptr(&name); + + let cl_value = CLValue::from_t(value).unwrap_or_revert(); + let (cl_value_ptr, cl_value_size, _bytes3) = to_ptr(&cl_value); + + for _i in 0..u64::MAX { + let ret = unsafe { + ext_ffi::casper_dictionary_put( + uref_ptr, + uref_size, + item_name_ptr, + item_name_size, + cl_value_ptr, + cl_value_size, + ) + }; + api_error::result_from(ret).unwrap_or_revert(); + } + } + "load_call_stack" => { + for _i in 0..u64::MAX { + let call_stack = runtime::get_call_stack(); + assert_eq!(call_stack.len(), 1); + } + } + "load_authorization_keys" => { + let setup: bool = runtime::get_named_arg("setup"); + if setup { + let weight = Weight::new(1); + for i in 1..100 { + let account_hash = AccountHash::new([i; 32]); + account::add_associated_key(account_hash, weight).unwrap_or_revert(); + } + } else { + for _i in 0..u64::MAX { + let _k = runtime::list_authorization_keys(); + } + } + } + "random_bytes" => { + for _i in 0..u64::MAX { + let _n = runtime::random_bytes(); + } + } + "dictionary_read" => { + let name_len: u32 = runtime::get_named_arg("name_len"); + let name: String = "a".repeat(name_len as usize); + let value_len: u32 = runtime::get_named_arg("value_len"); + let value = vec![u8::MAX; value_len as usize]; + let uref = storage::new_dictionary("a").unwrap_or_revert(); + storage::dictionary_put(uref, &name, value); + let key = Key::dictionary(uref, name.as_bytes()); + + for _i in 0..u64::MAX { + let read_value: Vec = storage::dictionary_read(key) + .unwrap_or_revert() + .unwrap_or_revert(); + assert_eq!(read_value.len(), value_len as usize); + } + } + "enable_contract_version" => { + let (contract_pkg_hash, _uref) = storage::create_contract_package_at_hash(); + let (contract_hash, _version) = storage::add_contract_version( + contract_pkg_hash, + EntryPoints::new(), + NamedKeys::new(), + BTreeMap::new(), + ); + for _i in 0..u64::MAX { + storage::enable_contract_version(contract_pkg_hash, contract_hash) + .unwrap_or_revert(); + } + } + _ => panic!(), + } +} + +#[no_mangle] +extern "C" fn function_0() {} +#[no_mangle] +extern "C" fn function_1() {} +#[no_mangle] +extern "C" fn function_2() {} +#[no_mangle] +extern "C" fn function_3() {} +#[no_mangle] +extern "C" fn function_4() {} +#[no_mangle] +extern "C" fn function_5() {} +#[no_mangle] +extern "C" fn function_6() {} +#[no_mangle] +extern "C" fn function_7() {} +#[no_mangle] +extern "C" fn function_8() {} +#[no_mangle] +extern "C" fn function_9() {} +#[no_mangle] +extern "C" fn function_10() {} +#[no_mangle] +extern "C" fn function_11() {} +#[no_mangle] +extern "C" fn function_12() {} +#[no_mangle] +extern "C" fn function_13() {} +#[no_mangle] +extern "C" fn function_14() {} +#[no_mangle] +extern "C" fn function_15() {} +#[no_mangle] +extern "C" fn function_16() {} +#[no_mangle] +extern "C" fn function_17() {} +#[no_mangle] +extern "C" fn function_18() {} +#[no_mangle] +extern "C" fn function_19() {} +#[no_mangle] +extern "C" fn function_20() {} +#[no_mangle] +extern "C" fn function_21() {} +#[no_mangle] +extern "C" fn function_22() {} +#[no_mangle] +extern "C" fn function_23() {} +#[no_mangle] +extern "C" fn function_24() {} +#[no_mangle] +extern "C" fn function_25() {} +#[no_mangle] +extern "C" fn function_26() {} +#[no_mangle] +extern "C" fn function_27() {} +#[no_mangle] +extern "C" fn function_28() {} +#[no_mangle] +extern "C" fn function_29() {} +#[no_mangle] +extern "C" fn function_30() {} +#[no_mangle] +extern "C" fn function_31() {} +#[no_mangle] +extern "C" fn function_32() {} +#[no_mangle] +extern "C" fn function_33() {} +#[no_mangle] +extern "C" fn function_34() {} +#[no_mangle] +extern "C" fn function_35() {} +#[no_mangle] +extern "C" fn function_36() {} +#[no_mangle] +extern "C" fn function_37() {} +#[no_mangle] +extern "C" fn function_38() {} +#[no_mangle] +extern "C" fn function_39() {} +#[no_mangle] +extern "C" fn function_40() {} +#[no_mangle] +extern "C" fn function_41() {} +#[no_mangle] +extern "C" fn function_42() {} +#[no_mangle] +extern "C" fn function_43() {} +#[no_mangle] +extern "C" fn function_44() {} +#[no_mangle] +extern "C" fn function_45() {} +#[no_mangle] +extern "C" fn function_46() {} +#[no_mangle] +extern "C" fn function_47() {} +#[no_mangle] +extern "C" fn function_48() {} +#[no_mangle] +extern "C" fn function_49() {} +#[no_mangle] +extern "C" fn function_50() {} +#[no_mangle] +extern "C" fn function_51() {} +#[no_mangle] +extern "C" fn function_52() {} +#[no_mangle] +extern "C" fn function_53() {} +#[no_mangle] +extern "C" fn function_54() {} +#[no_mangle] +extern "C" fn function_55() {} +#[no_mangle] +extern "C" fn function_56() {} +#[no_mangle] +extern "C" fn function_57() {} +#[no_mangle] +extern "C" fn function_58() {} +#[no_mangle] +extern "C" fn function_59() {} +#[no_mangle] +extern "C" fn function_60() {} +#[no_mangle] +extern "C" fn function_61() {} +#[no_mangle] +extern "C" fn function_62() {} +#[no_mangle] +extern "C" fn function_63() {} +#[no_mangle] +extern "C" fn function_64() {} +#[no_mangle] +extern "C" fn function_65() {} +#[no_mangle] +extern "C" fn function_66() {} +#[no_mangle] +extern "C" fn function_67() {} +#[no_mangle] +extern "C" fn function_68() {} +#[no_mangle] +extern "C" fn function_69() {} +#[no_mangle] +extern "C" fn function_70() {} +#[no_mangle] +extern "C" fn function_71() {} +#[no_mangle] +extern "C" fn function_72() {} +#[no_mangle] +extern "C" fn function_73() {} +#[no_mangle] +extern "C" fn function_74() {} +#[no_mangle] +extern "C" fn function_75() {} +#[no_mangle] +extern "C" fn function_76() {} +#[no_mangle] +extern "C" fn function_77() {} +#[no_mangle] +extern "C" fn function_78() {} +#[no_mangle] +extern "C" fn function_79() {} +#[no_mangle] +extern "C" fn function_80() {} +#[no_mangle] +extern "C" fn function_81() {} +#[no_mangle] +extern "C" fn function_82() {} +#[no_mangle] +extern "C" fn function_83() {} +#[no_mangle] +extern "C" fn function_84() {} +#[no_mangle] +extern "C" fn function_85() {} +#[no_mangle] +extern "C" fn function_86() {} +#[no_mangle] +extern "C" fn function_87() {} +#[no_mangle] +extern "C" fn function_88() {} +#[no_mangle] +extern "C" fn function_89() {} +#[no_mangle] +extern "C" fn function_90() {} +#[no_mangle] +extern "C" fn function_91() {} +#[no_mangle] +extern "C" fn function_92() {} +#[no_mangle] +extern "C" fn function_93() {} +#[no_mangle] +extern "C" fn function_94() {} +#[no_mangle] +extern "C" fn function_95() {} +#[no_mangle] +extern "C" fn function_96() {} +#[no_mangle] +extern "C" fn function_97() {} +#[no_mangle] +extern "C" fn function_98() {} +#[no_mangle] +extern "C" fn function_99() {} diff --git a/smart_contracts/contracts/test/remove-associated-key/Cargo.toml b/smart_contracts/contracts/test/remove-associated-key/Cargo.toml index c6ccba4d4b..45170f01cd 100644 --- a/smart_contracts/contracts/test/remove-associated-key/Cargo.toml +++ b/smart_contracts/contracts/test/remove-associated-key/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "remove-associated-key" version = "0.1.0" -authors = ["Ed Hastings "] +edition = "2021" + +[[bin]] +name = "set_action_thresholds" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/set-action-thresholds/src/main.rs b/smart_contracts/contracts/test/set-action-thresholds/src/main.rs new file mode 100644 index 0000000000..2c5dd78639 --- /dev/null +++ b/smart_contracts/contracts/test/set-action-thresholds/src/main.rs @@ -0,0 +1,26 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{account, runtime}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::account::{ActionType, Weight}; + +const ARG_KEY_MANAGEMENT_THRESHOLD: &str = "key_management_threshold"; +const ARG_DEPLOY_THRESHOLD: &str = "deploy_threshold"; + +#[no_mangle] +pub extern "C" fn call() { + let key_management_threshold: Weight = runtime::get_named_arg(ARG_KEY_MANAGEMENT_THRESHOLD); + let deploy_threshold: Weight = runtime::get_named_arg(ARG_DEPLOY_THRESHOLD); + + if key_management_threshold != Weight::new(0) { + account::set_action_threshold(ActionType::KeyManagement, key_management_threshold) + .unwrap_or_revert() + } + + if deploy_threshold != Weight::new(0) { + account::set_action_threshold(ActionType::Deployment, deploy_threshold).unwrap_or_revert() + } +} diff --git a/smart_contracts/contracts/test/staking-stored/Cargo.toml b/smart_contracts/contracts/test/staking-stored/Cargo.toml new file mode 100644 index 0000000000..46175f3cf4 --- /dev/null +++ b/smart_contracts/contracts/test/staking-stored/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "staking-stored" +version = "0.1.0" +authors = ["Ed Hastings "] +edition = "2021" + +[[bin]] +name = "staking_stored" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +staking = { path = "../staking" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/staking-stored/src/main.rs b/smart_contracts/contracts/test/staking-stored/src/main.rs new file mode 100644 index 0000000000..d11b7efd60 --- /dev/null +++ b/smart_contracts/contracts/test/staking-stored/src/main.rs @@ -0,0 +1,84 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::{string::ToString, vec}; + +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + contracts::NamedKeys, ApiError, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, Parameter, URef, +}; + +#[repr(u16)] +enum InstallerSessionError { + FailedToTransfer = 101, +} + +#[no_mangle] +pub extern "C" fn call_staking() { + staking::run(); +} + +fn build_named_keys_and_purse() -> (NamedKeys, URef) { + let mut named_keys = NamedKeys::new(); + let purse = system::create_purse(); + + named_keys.insert(staking::STAKING_PURSE.to_string(), purse.into()); + named_keys.insert(staking::INSTALLER.to_string(), runtime::get_caller().into()); + + (named_keys, purse) +} + +fn entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + entry_points.add_entry_point(EntityEntryPoint::new( + staking::ENTRY_POINT_RUN, + vec![ + Parameter::new(staking::ARG_ACTION, CLType::String), + Parameter::new(staking::ARG_AMOUNT, CLType::U512), + Parameter::new(staking::ARG_VALIDATOR, CLType::PublicKey), + Parameter::new(staking::ARG_NEW_VALIDATOR, CLType::PublicKey), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + entry_points +} + +#[no_mangle] +pub extern "C" fn call() { + let entry_points = entry_points(); + + let (staking_named_keys, staking_purse) = build_named_keys_and_purse(); + + let (contract_hash, contract_version) = storage::new_contract( + entry_points, + Some(staking_named_keys), + Some(staking::HASH_KEY_NAME.to_string()), + Some(staking::ACCESS_KEY_NAME.to_string()), + None, + ); + + runtime::put_key( + staking::CONTRACT_VERSION, + storage::new_uref(contract_version).into(), + ); + + runtime::put_key(staking::CONTRACT_NAME, Key::Hash(contract_hash.value())); + + // Initial funding amount. + let amount = runtime::get_named_arg(staking::ARG_AMOUNT); + system::transfer_from_purse_to_purse(account::get_main_purse(), staking_purse, amount, None) + .unwrap_or_revert_with(ApiError::User( + InstallerSessionError::FailedToTransfer as u16, + )); +} diff --git a/smart_contracts/contracts/test/staking/Cargo.toml b/smart_contracts/contracts/test/staking/Cargo.toml new file mode 100644 index 0000000000..0ee25c8f3c --- /dev/null +++ b/smart_contracts/contracts/test/staking/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "staking" +version = "0.1.0" +authors = ["Ed Hastings "] +edition = "2021" + +[[bin]] +name = "staking" +path = "src/bin/main.rs" +doctest = false +test = false +bench = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/staking/src/bin/main.rs b/smart_contracts/contracts/test/staking/src/bin/main.rs new file mode 100644 index 0000000000..d0b716a924 --- /dev/null +++ b/smart_contracts/contracts/test/staking/src/bin/main.rs @@ -0,0 +1,7 @@ +#![no_std] +#![no_main] + +#[no_mangle] +pub extern "C" fn call() { + staking::run(); +} diff --git a/smart_contracts/contracts/test/staking/src/lib.rs b/smart_contracts/contracts/test/staking/src/lib.rs new file mode 100644 index 0000000000..dadfc23e82 --- /dev/null +++ b/smart_contracts/contracts/test/staking/src/lib.rs @@ -0,0 +1,249 @@ +#![no_std] + +extern crate alloc; + +use alloc::{ + string::{String, ToString}, + vec, + vec::Vec, +}; + +use casper_contract::{ + contract_api::{ + runtime::{self, revert}, + storage::read_from_key, + system, + }, + ext_ffi, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + account::AccountHash, + api_error, + bytesrepr::{self, ToBytes}, + runtime_args, + system::auction::{self, BidAddr, BidKind}, + ApiError, CLValue, Key, PublicKey, URef, U512, +}; + +pub const STAKING_ID: &str = "staking_contract"; + +pub const ARG_ACTION: &str = "action"; +pub const ARG_AMOUNT: &str = "amount"; +pub const ARG_VALIDATOR: &str = "validator"; +pub const ARG_NEW_VALIDATOR: &str = "new_validator"; + +pub const STAKING_PURSE: &str = "staking_purse"; +pub const INSTALLER: &str = "installer"; +pub const CONTRACT_NAME: &str = "staking"; +pub const HASH_KEY_NAME: &str = "staking_package"; +pub const ACCESS_KEY_NAME: &str = "staking_package_access"; +pub const CONTRACT_VERSION: &str = "staking_contract_version"; +pub const ENTRY_POINT_RUN: &str = "run"; + +#[repr(u16)] +enum StakingError { + InvalidAccount = 1, + MissingInstaller = 2, + InvalidInstaller = 3, + MissingStakingPurse = 4, + InvalidStakingPurse = 5, + UnexpectedKeyVariant = 6, + UnexpectedAction = 7, + MissingValidator = 8, + MissingNewValidator = 9, +} + +impl From for ApiError { + fn from(e: StakingError) -> Self { + ApiError::User(e as u16) + } +} + +#[no_mangle] +pub fn run() { + let caller = runtime::get_caller(); + let installer = get_account_hash_with_user_errors( + INSTALLER, + StakingError::MissingInstaller, + StakingError::InvalidInstaller, + ); + + if caller != installer { + revert(ApiError::User(StakingError::InvalidAccount as u16)); + } + + let action: String = runtime::get_named_arg(ARG_ACTION); + + if action == *"UNSTAKE".to_string() { + unstake(); + } else if action == *"STAKE".to_string() { + stake(); + } else if action == *"STAKE_ALL".to_string() { + stake_all(); + } else if action == *"RESTAKE".to_string() { + restake(); + } else if action == *"STAKED_AMOUNT".to_string() { + read_staked_amount_gs(); + } else { + revert(ApiError::User(StakingError::UnexpectedAction as u16)); + } +} + +fn unstake() { + let args = get_unstaking_args(false); + let contract_hash = system::get_auction(); + runtime::call_contract::(contract_hash, auction::METHOD_UNDELEGATE, args); +} + +fn restake() { + let args = get_unstaking_args(true); + let contract_hash = system::get_auction(); + runtime::call_contract::(contract_hash, auction::METHOD_REDELEGATE, args); +} + +fn stake() { + let staking_purse = get_uref_with_user_errors( + STAKING_PURSE, + StakingError::MissingStakingPurse, + StakingError::InvalidStakingPurse, + ); + let validator: PublicKey = match runtime::try_get_named_arg(ARG_VALIDATOR) { + Some(validator_public_key) => validator_public_key, + None => revert(ApiError::User(StakingError::MissingValidator as u16)), + }; + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_DELEGATOR_PURSE => staking_purse, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount, + }; + runtime::call_contract::(contract_hash, auction::METHOD_DELEGATE, args); +} + +fn stake_all() { + let staking_purse = get_uref_with_user_errors( + STAKING_PURSE, + StakingError::MissingStakingPurse, + StakingError::InvalidStakingPurse, + ); + let validator: PublicKey = match runtime::try_get_named_arg(ARG_VALIDATOR) { + Some(validator_public_key) => validator_public_key, + None => revert(ApiError::User(StakingError::MissingValidator as u16)), + }; + let amount: U512 = system::get_purse_balance(staking_purse).unwrap_or_revert(); + let contract_hash = system::get_auction(); + let args = runtime_args! { + auction::ARG_DELEGATOR_PURSE => staking_purse, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount, + }; + runtime::call_contract::(contract_hash, auction::METHOD_DELEGATE, args); +} + +pub fn read_staked_amount_gs() { + let purse = get_uref_with_user_errors( + STAKING_PURSE, + StakingError::MissingStakingPurse, + StakingError::InvalidStakingPurse, + ); + + let validator = match runtime::try_get_named_arg::(ARG_VALIDATOR) { + Some(validator_public_key) => validator_public_key, + None => revert(ApiError::User(StakingError::MissingValidator as u16)), + }; + + let key = Key::BidAddr(BidAddr::DelegatedPurse { + validator: validator.to_account_hash(), + delegator: purse.addr(), + }); + + let bid = read_from_key::(key); + + let staked_amount = if let Ok(Some(BidKind::Delegator(delegator_bid))) = bid { + delegator_bid.staked_amount() + } else { + U512::zero() + }; + + runtime::ret(CLValue::from_t(staked_amount).unwrap_or_revert()); +} + +fn get_unstaking_args(is_restake: bool) -> casper_types::RuntimeArgs { + let staking_purse = get_uref_with_user_errors( + STAKING_PURSE, + StakingError::MissingStakingPurse, + StakingError::InvalidStakingPurse, + ); + let validator: PublicKey = match runtime::try_get_named_arg(ARG_VALIDATOR) { + Some(validator_public_key) => validator_public_key, + None => revert(ApiError::User(StakingError::MissingValidator as u16)), + }; + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + if !is_restake { + return runtime_args! { + auction::ARG_DELEGATOR_PURSE => staking_purse, + auction::ARG_VALIDATOR => validator, + auction::ARG_AMOUNT => amount, + }; + } + + let new_validator: PublicKey = match runtime::try_get_named_arg(ARG_NEW_VALIDATOR) { + Some(validator_public_key) => validator_public_key, + None => revert(ApiError::User(StakingError::MissingNewValidator as u16)), + }; + + runtime_args! { + auction::ARG_DELEGATOR_PURSE => staking_purse, + auction::ARG_VALIDATOR => validator, + auction::ARG_NEW_VALIDATOR => new_validator, + auction::ARG_AMOUNT => amount, + } +} + +fn get_account_hash_with_user_errors( + name: &str, + missing: StakingError, + invalid: StakingError, +) -> AccountHash { + let key = get_key_with_user_errors(name, missing, invalid); + key.into_account() + .unwrap_or_revert_with(StakingError::UnexpectedKeyVariant) +} + +fn get_uref_with_user_errors(name: &str, missing: StakingError, invalid: StakingError) -> URef { + let key = get_key_with_user_errors(name, missing, invalid); + key.into_uref() + .unwrap_or_revert_with(StakingError::UnexpectedKeyVariant) +} + +fn get_key_with_user_errors(name: &str, missing: StakingError, invalid: StakingError) -> Key { + let (name_ptr, name_size, _bytes) = to_ptr(name); + let mut key_bytes = vec![0u8; Key::max_serialized_length()]; + let mut total_bytes: usize = 0; + let ret = unsafe { + ext_ffi::casper_get_key( + name_ptr, + name_size, + key_bytes.as_mut_ptr(), + key_bytes.len(), + &mut total_bytes as *mut usize, + ) + }; + match api_error::result_from(ret) { + Ok(_) => {} + Err(ApiError::MissingKey) => revert(missing), + Err(e) => revert(e), + } + key_bytes.truncate(total_bytes); + + bytesrepr::deserialize(key_bytes).unwrap_or_revert_with(invalid) +} + +fn to_ptr(t: T) -> (*const u8, usize, Vec) { + let bytes = t.into_bytes().unwrap_or_revert(); + let ptr = bytes.as_ptr(); + let size = bytes.len(); + (ptr, size, bytes) +} diff --git a/smart_contracts/contracts/test/storage-costs/Cargo.toml b/smart_contracts/contracts/test/storage-costs/Cargo.toml index 1806c514e6..1d08a9b1a2 100644 --- a/smart_contracts/contracts/test/storage-costs/Cargo.toml +++ b/smart_contracts/contracts/test/storage-costs/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "storage-costs" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "storage_costs" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/storage-costs/src/main.rs b/smart_contracts/contracts/test/storage-costs/src/main.rs index 42e64faf5c..fee8432c07 100644 --- a/smart_contracts/contracts/test/storage-costs/src/main.rs +++ b/smart_contracts/contracts/test/storage-costs/src/main.rs @@ -3,15 +3,15 @@ extern crate alloc; -use alloc::{string::ToString, vec::Vec}; +use alloc::{collections::BTreeMap, string::ToString, vec::Vec}; use casper_contract::{ contract_api::{runtime, storage}, unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::NamedKeys, CLType, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Key, - U512, + contracts::NamedKeys, CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, + EntryPointType, EntryPoints, Key, U512, }; const WRITE_FUNCTION_SMALL_NAME: &str = "write_function_small"; @@ -25,7 +25,7 @@ const WRITE_LARGE_VALUE: &[u8] = b"111111111111111111111111111111111111111111111 const HASH_KEY_NAME: &str = "contract_package"; const CONTRACT_KEY_NAME: &str = "contract"; const ADD_SMALL_VALUE: u64 = 1; -const ADD_LARGE_VALUE: u64 = u64::max_value(); +const ADD_LARGE_VALUE: u64 = u64::MAX; const NEW_UREF_FUNCTION: &str = "new_uref_function"; const PUT_KEY_FUNCTION: &str = "put_key_function"; const REMOVE_KEY_FUNCTION: &str = "remove_key_function"; @@ -93,11 +93,10 @@ pub extern "C" fn create_contract_package_at_hash_function() { #[no_mangle] pub extern "C" fn create_contract_user_group_function() { let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME) - .and_then(Key::into_hash) - .expect("should have package hash") - .into(); + .and_then(Key::into_package_hash) + .expect("should have package hash"); let _result = storage::create_contract_user_group( - contract_package_hash, + contract_package_hash.into(), LABEL_NAME, 0, Default::default(), @@ -108,30 +107,29 @@ pub extern "C" fn create_contract_user_group_function() { #[no_mangle] pub extern "C" fn provision_urefs_function() { let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME) - .and_then(Key::into_hash) - .expect("should have package hash") - .into(); - let _result = storage::provision_contract_user_group_uref(contract_package_hash, LABEL_NAME) - .unwrap_or_revert(); + .and_then(Key::into_package_hash) + .expect("should have package hash"); + let _result = + storage::provision_contract_user_group_uref(contract_package_hash.into(), LABEL_NAME) + .unwrap_or_revert(); } #[no_mangle] pub extern "C" fn remove_contract_user_group_function() { let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME) - .and_then(Key::into_hash) - .expect("should have package hash") - .into(); - storage::remove_contract_user_group(contract_package_hash, LABEL_NAME).unwrap_or_revert(); + .and_then(Key::into_package_hash) + .expect("should have package hash"); + storage::remove_contract_user_group(contract_package_hash.into(), LABEL_NAME) + .unwrap_or_revert(); } #[no_mangle] pub extern "C" fn new_uref_subcall() { let contract_package_hash = runtime::get_key(CONTRACT_KEY_NAME) - .and_then(Key::into_hash) - .expect("should have package hash") - .into(); + .and_then(Key::into_package_hash) + .expect("should have package hash"); runtime::call_versioned_contract( - contract_package_hash, + contract_package_hash.into(), None, NEW_UREF_FUNCTION, Default::default(), @@ -142,108 +140,120 @@ pub extern "C" fn new_uref_subcall() { pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( WRITE_FUNCTION_SMALL_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( WRITE_FUNCTION_LARGE_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ADD_FUNCTION_SMALL_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ADD_FUNCTION_LARGE_NAME, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( NEW_UREF_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( PUT_KEY_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( REMOVE_KEY_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( CREATE_CONTRACT_PACKAGE_AT_HASH_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( CREATE_CONTRACT_USER_GROUP_FUNCTION_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( PROVISION_UREFS_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( REMOVE_CONTRACT_USER_GROUP_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( NEW_UREF_SUBCALL_FUNCTION, Vec::new(), CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); @@ -251,7 +261,7 @@ pub extern "C" fn call() { }; let (contract_package_hash, access_uref) = storage::create_contract_package_at_hash(); - runtime::put_key(&HASH_KEY_NAME, contract_package_hash.into()); + runtime::put_key(HASH_KEY_NAME, contract_package_hash.into()); let named_keys = { let mut named_keys = NamedKeys::new(); @@ -264,15 +274,19 @@ pub extern "C" fn call() { named_keys.insert( CONTRACT_KEY_NAME.to_string(), - Key::Hash(contract_package_hash.value()), + Key::SmartContract(contract_package_hash.value()), ); named_keys.insert(ACCESS_KEY_NAME.to_string(), access_uref.into()); named_keys }; - let (contract_hash, _version) = - storage::add_contract_version(contract_package_hash, entry_points, named_keys); - runtime::put_key(&CONTRACT_KEY_NAME, contract_hash.into()); - runtime::put_key(&ACCESS_KEY_NAME, access_uref.into()); + let (contract_hash, _version) = storage::add_contract_version( + contract_package_hash, + entry_points, + named_keys, + BTreeMap::new(), + ); + runtime::put_key(CONTRACT_KEY_NAME, Key::Hash(contract_hash.value())); + runtime::put_key(ACCESS_KEY_NAME, access_uref.into()); } diff --git a/smart_contracts/contracts/test/system-contract-hashes/Cargo.toml b/smart_contracts/contracts/test/system-contract-hashes/Cargo.toml index 705dcb345a..fdf0346e86 100644 --- a/smart_contracts/contracts/test/system-contract-hashes/Cargo.toml +++ b/smart_contracts/contracts/test/system-contract-hashes/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "system-contract-hashes" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "system_contract_hashes" diff --git a/smart_contracts/contracts/test/system-hashes/Cargo.toml b/smart_contracts/contracts/test/system-hashes/Cargo.toml deleted file mode 100644 index fe3f5ddeab..0000000000 --- a/smart_contracts/contracts/test/system-hashes/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "system-hashes" -version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" - -[[bin]] -name = "system_hashes" -path = "src/main.rs" -bench = false -doctest = false -test = false - -[features] -std = ["casper-contract/std", "casper-types/std"] - -[dependencies] -casper-contract = { path = "../../../contract" } -casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/system-hashes/src/main.rs b/smart_contracts/contracts/test/system-hashes/src/main.rs deleted file mode 100644 index 155a3e1e99..0000000000 --- a/smart_contracts/contracts/test/system-hashes/src/main.rs +++ /dev/null @@ -1,29 +0,0 @@ -#![no_std] -#![no_main] - -use casper_contract::contract_api::system; -use casper_types::ContractHash; - -const MINT_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -]); -const AUCTION_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -]); -const HANDLE_PAYMENT_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, -]); -const STANDARD_PAYMENT_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, -]); - -#[no_mangle] -pub extern "C" fn call() { - assert_eq!(MINT_CONTRACT_HASH, system::get_mint(),); - assert_eq!(AUCTION_CONTRACT_HASH, system::get_auction(),); - assert_eq!(HANDLE_PAYMENT_CONTRACT_HASH, system::get_handle_payment(),); - assert_eq!( - STANDARD_PAYMENT_CONTRACT_HASH, - system::get_standard_payment(), - ); -} diff --git a/smart_contracts/contracts/test/test-payment-stored/Cargo.toml b/smart_contracts/contracts/test/test-payment-stored/Cargo.toml index 1e6127d0c4..33d18315c4 100644 --- a/smart_contracts/contracts/test/test-payment-stored/Cargo.toml +++ b/smart_contracts/contracts/test/test-payment-stored/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "test-payment-stored" version = "0.1.0" -authors = ["Henry Till ", "Ed Hastings "] -edition = "2018" +authors = ["Henry Till ", "Ed Hastings "] +edition = "2021" [[bin]] name = "test_payment_stored" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/test-payment-stored/src/main.rs b/smart_contracts/contracts/test/test-payment-stored/src/main.rs index 780b4e51ce..9dadafbc53 100644 --- a/smart_contracts/contracts/test/test-payment-stored/src/main.rs +++ b/smart_contracts/contracts/test/test-payment-stored/src/main.rs @@ -10,23 +10,24 @@ use casper_contract::{ unwrap_or_revert::UnwrapOrRevert, }; use casper_types::{ - contracts::{EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter}, - system::mint::ARG_AMOUNT, - CLType, RuntimeArgs, URef, U512, + addressable_entity::{ + EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + }, + system::standard_payment, + AddressableEntityHash, CLType, EntryPointPayment, Key, RuntimeArgs, URef, U512, }; const ENTRY_FUNCTION_NAME: &str = "pay"; const HASH_KEY_NAME: &str = "test_payment_hash"; const PACKAGE_HASH_KEY_NAME: &str = "test_payment_package_hash"; const ACCESS_KEY_NAME: &str = "test_payment_access"; -const ARG_NAME: &str = "amount"; const CONTRACT_VERSION: &str = "contract_version"; const GET_PAYMENT_PURSE: &str = "get_payment_purse"; #[no_mangle] pub extern "C" fn pay() { // amount to transfer from named purse to payment purse - let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + let amount: U512 = runtime::get_named_arg(standard_payment::ARG_AMOUNT); let purse_uref = account::get_main_purse(); @@ -49,12 +50,13 @@ pub extern "C" fn pay() { pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_FUNCTION_NAME.to_string(), - vec![Parameter::new(ARG_NAME, CLType::U512)], + vec![Parameter::new(standard_payment::ARG_AMOUNT, CLType::U512)], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Called, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points @@ -64,7 +66,11 @@ pub extern "C" fn call() { None, Some(PACKAGE_HASH_KEY_NAME.to_string()), Some(ACCESS_KEY_NAME.to_string()), + None, ); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(HASH_KEY_NAME, contract_hash.into()); + runtime::put_key( + HASH_KEY_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/transfer-main-purse-to-new-purse/Cargo.toml b/smart_contracts/contracts/test/transfer-main-purse-to-new-purse/Cargo.toml index bcbbfe2bc2..e9ffc86b24 100644 --- a/smart_contracts/contracts/test/transfer-main-purse-to-new-purse/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-main-purse-to-new-purse/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-main-purse-to-new-purse" version = "0.1.0" -authors = ["Ed Hastings "] -edition = "2018" +authors = ["Ed Hastings "] +edition = "2021" [[bin]] name = "transfer_main_purse_to_new_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-main-purse-to-two-purses/Cargo.toml b/smart_contracts/contracts/test/transfer-main-purse-to-two-purses/Cargo.toml index 6b9f1325f0..0b1a900070 100644 --- a/smart_contracts/contracts/test/transfer-main-purse-to-two-purses/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-main-purse-to-two-purses/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-main-purse-to-two-purses" version = "0.1.0" -authors = ["Joe Sacher "] -edition = "2018" +authors = ["Joe Sacher "] +edition = "2021" [[bin]] name = "transfer_main_purse_to_two_purses" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-account-stored/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-account-stored/Cargo.toml index 8b45d6fc43..f967911cea 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-account-stored/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-account-stored/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-purse-to-account-stored" version = "0.1.0" -authors = ["Michał Papierski ", "Ed Hastings "] -edition = "2018" +authors = ["Michał Papierski ", "Ed Hastings "] +edition = "2021" [[bin]] name = "transfer_purse_to_account_stored" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-account-stored/src/main.rs b/smart_contracts/contracts/test/transfer-purse-to-account-stored/src/main.rs index 59c677adb7..e54490ae69 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-account-stored/src/main.rs +++ b/smart_contracts/contracts/test/transfer-purse-to-account-stored/src/main.rs @@ -8,8 +8,10 @@ use alloc::{string::ToString, vec}; use casper_contract::contract_api::{runtime, storage}; use casper_types::{ - contracts::{EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter}, - CLType, + addressable_entity::{ + EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + }, + AddressableEntityHash, CLType, EntryPointPayment, Key, }; const ENTRY_FUNCTION_NAME: &str = "transfer"; @@ -30,7 +32,7 @@ pub extern "C" fn call() { let entry_points = { let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_FUNCTION_NAME.to_string(), vec![ Parameter::new(ARG_0_NAME, CLType::ByteArray(32)), @@ -38,7 +40,8 @@ pub extern "C" fn call() { ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Session, + EntryPointType::Caller, + EntryPointPayment::Caller, ); entry_points.add_entry_point(entry_point); entry_points @@ -49,7 +52,11 @@ pub extern "C" fn call() { None, Some(PACKAGE_HASH_KEY_NAME.to_string()), Some(ACCESS_KEY_NAME.to_string()), + None, ); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(HASH_KEY_NAME, contract_hash.into()); + runtime::put_key( + HASH_KEY_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/transfer-purse-to-account-with-id/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-account-with-id/Cargo.toml index 6fbd365b0f..5338b86f32 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-account-with-id/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-account-with-id/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-purse-to-account-with-id" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "transfer_purse_to_account_with_id" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-account/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-account/Cargo.toml index e8dd8ef2f7..4bbdc391ad 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-account/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-account/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-purse-to-account" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "transfer_purse_to_account" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-account/src/lib.rs b/smart_contracts/contracts/test/transfer-purse-to-account/src/lib.rs index 75c065fdf8..216b27527c 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-account/src/lib.rs +++ b/smart_contracts/contracts/test/transfer-purse-to-account/src/lib.rs @@ -1,17 +1,10 @@ #![no_std] -extern crate alloc; - -use alloc::format; - use casper_contract::{ - contract_api::{account, runtime, storage, system}, + contract_api::{account, runtime, system}, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{account::AccountHash, ApiError, Key, URef, U512}; - -const TRANSFER_RESULT_UREF_NAME: &str = "transfer_result"; -const MAIN_PURSE_FINAL_BALANCE_UREF_NAME: &str = "final_balance"; +use casper_types::{account::AccountHash, URef, U512}; const ARG_TARGET: &str = "target"; const ARG_AMOUNT: &str = "amount"; @@ -21,17 +14,6 @@ pub fn delegate() { let target: AccountHash = runtime::get_named_arg(ARG_TARGET); let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); - let transfer_result = system::transfer_from_purse_to_account(source, target, amount, None); - - let final_balance = - system::get_purse_balance(source).unwrap_or_revert_with(ApiError::User(103)); - - let result = format!("{:?}", transfer_result); - - let result_uref: Key = storage::new_uref(result).into(); - runtime::put_key(TRANSFER_RESULT_UREF_NAME, result_uref); - runtime::put_key( - MAIN_PURSE_FINAL_BALANCE_UREF_NAME, - storage::new_uref(final_balance).into(), - ); + let _transfer_result = + system::transfer_from_purse_to_account(source, target, amount, None).unwrap_or_revert(); } diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/Cargo.toml index 38ae53cf94..ebba2a5cfe 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-purse-to-accounts-stored" version = "0.1.0" -authors = ["Michał Papierski ", "Ed Hastings "] -edition = "2018" +authors = ["Michał Papierski ", "Ed Hastings "] +edition = "2021" [[bin]] name = "transfer_purse_to_accounts_stored" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/src/main.rs b/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/src/main.rs index 42317a203c..9e78fe6bbb 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/src/main.rs +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts-stored/src/main.rs @@ -3,14 +3,20 @@ extern crate alloc; -use alloc::{string::ToString, vec}; +use alloc::{collections::BTreeMap, string::ToString, vec}; -use alloc::boxed::Box; -use casper_contract::contract_api::{runtime, storage}; +use casper_contract::{ + contract_api::{account, runtime, storage, system}, + unwrap_or_revert::UnwrapOrRevert, +}; use casper_types::{ - contracts::{EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter}, - CLType, + account::AccountHash, + addressable_entity::{ + EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Parameter, + }, + contracts::NamedKeys, + AddressableEntityHash, CLType, CLTyped, EntryPointPayment, Key, U512, }; const ENTRY_FUNCTION_NAME: &str = "transfer"; @@ -19,47 +25,68 @@ const PACKAGE_HASH_KEY_NAME: &str = "transfer_purse_to_accounts"; const HASH_KEY_NAME: &str = "transfer_purse_to_accounts_hash"; const ACCESS_KEY_NAME: &str = "transfer_purse_to_accounts_access"; +const ARG_AMOUNT: &str = "amount"; const ARG_SOURCE: &str = "source"; const ARG_TARGETS: &str = "targets"; const CONTRACT_VERSION: &str = "contract_version"; +const PURSE_KEY_NAME: &str = "purse"; + #[no_mangle] pub extern "C" fn transfer() { - transfer_purse_to_accounts::delegate(); + let purse = runtime::get_key(PURSE_KEY_NAME) + .unwrap_or_revert() + .into_uref() + .unwrap_or_revert(); + transfer_purse_to_accounts::delegate(purse); } #[no_mangle] pub extern "C" fn call() { let entry_points = { let mut tmp = EntryPoints::new(); - let entry_point = EntryPoint::new( + let entry_point = EntityEntryPoint::new( ENTRY_FUNCTION_NAME.to_string(), vec![ Parameter::new(ARG_SOURCE, CLType::URef), Parameter::new( ARG_TARGETS, - CLType::Map { - key: Box::new(CLType::ByteArray(32)), - value: Box::new(CLType::U512), - }, + )>>::cl_type(), ), ], CLType::Unit, EntryPointAccess::Public, - EntryPointType::Contract, + EntryPointType::Called, + EntryPointPayment::Caller, ); tmp.add_entry_point(entry_point); tmp }; + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + let named_keys = { + let purse = system::create_purse(); + system::transfer_from_purse_to_purse(account::get_main_purse(), purse, amount, None) + .unwrap_or_revert(); + + let mut named_keys = NamedKeys::new(); + named_keys.insert(PURSE_KEY_NAME.to_string(), purse.into()); + named_keys + }; + let (contract_hash, contract_version) = storage::new_contract( entry_points, - None, + Some(named_keys), Some(PACKAGE_HASH_KEY_NAME.to_string()), Some(ACCESS_KEY_NAME.to_string()), + None, ); runtime::put_key(CONTRACT_VERSION, storage::new_uref(contract_version).into()); - runtime::put_key(HASH_KEY_NAME, contract_hash.into()); + runtime::put_key( + HASH_KEY_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); } diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/Cargo.toml index f58fd66b09..1a37c2cd62 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-purse-to-accounts-subcall" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "transfer_purse_to_accounts_subcall" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/src/lib.rs b/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/src/lib.rs index 6cc993aa33..2e29956dd5 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/src/lib.rs +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts-subcall/src/lib.rs @@ -5,10 +5,10 @@ extern crate alloc; use alloc::collections::BTreeMap; use casper_contract::{ - contract_api::{runtime, system}, + contract_api::{account, runtime, system}, unwrap_or_revert::UnwrapOrRevert, }; -use casper_types::{account::AccountHash, runtime_args, Key, RuntimeArgs, URef, U512}; +use casper_types::{account::AccountHash, contracts::ContractHash, runtime_args, Key, URef, U512}; const ENTRYPOINT: &str = "transfer"; const ARG_SOURCE: &str = "source"; @@ -17,7 +17,7 @@ const ARG_TARGETS: &str = "targets"; const HASH_KEY_NAME: &str = "transfer_purse_to_accounts_hash"; pub fn delegate() { - let source: URef = runtime::get_named_arg(ARG_SOURCE); + let source: URef = account::get_main_purse(); let targets: BTreeMap)> = runtime::get_named_arg(ARG_TARGETS); for (target, (amount, id)) in &targets { @@ -25,11 +25,12 @@ pub fn delegate() { } let contract_hash = runtime::get_key(HASH_KEY_NAME) - .and_then(Key::into_hash) + .and_then(Key::into_entity_hash) + .map(|e_hash| ContractHash::new(e_hash.value())) .unwrap_or_revert(); runtime::call_contract( - contract_hash.into(), + contract_hash, ENTRYPOINT, runtime_args! { ARG_SOURCE => source, diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-accounts/Cargo.toml index 1dfc22f1ba..8d6d7ae580 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "transfer-purse-to-accounts" version = "0.1.0" -authors = ["Michał Papierski "] -edition = "2018" +authors = ["Michał Papierski "] +edition = "2021" [[bin]] name = "transfer_purse_to_accounts" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts/src/bin/main.rs b/smart_contracts/contracts/test/transfer-purse-to-accounts/src/bin/main.rs index 6fd4fd96c6..87b5d84730 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts/src/bin/main.rs +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts/src/bin/main.rs @@ -1,7 +1,10 @@ #![no_std] #![no_main] +use casper_contract::contract_api::account; + #[no_mangle] pub extern "C" fn call() { - transfer_purse_to_accounts::delegate(); + let source = account::get_main_purse(); + transfer_purse_to_accounts::delegate(source); } diff --git a/smart_contracts/contracts/test/transfer-purse-to-accounts/src/lib.rs b/smart_contracts/contracts/test/transfer-purse-to-accounts/src/lib.rs index 8ac1ad5b8e..d273483952 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-accounts/src/lib.rs +++ b/smart_contracts/contracts/test/transfer-purse-to-accounts/src/lib.rs @@ -10,11 +10,9 @@ use casper_contract::{ }; use casper_types::{account::AccountHash, URef, U512}; -const ARG_SOURCE: &str = "source"; const ARG_TARGETS: &str = "targets"; -pub fn delegate() { - let source: URef = runtime::get_named_arg(ARG_SOURCE); +pub fn delegate(source: URef) { let targets: BTreeMap)> = runtime::get_named_arg(ARG_TARGETS); for (target, (amount, id)) in targets { diff --git a/smart_contracts/contracts/test/transfer-purse-to-public-key/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-public-key/Cargo.toml new file mode 100644 index 0000000000..9e6991380a --- /dev/null +++ b/smart_contracts/contracts/test/transfer-purse-to-public-key/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "transfer-purse-to-public-key" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[[bin]] +name = "transfer_purse_to_public_key" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/transfer-purse-to-public-key/src/main.rs b/smart_contracts/contracts/test/transfer-purse-to-public-key/src/main.rs new file mode 100644 index 0000000000..8012906dc7 --- /dev/null +++ b/smart_contracts/contracts/test/transfer-purse-to-public-key/src/main.rs @@ -0,0 +1,22 @@ +#![no_std] +#![no_main] + +use casper_contract::{ + contract_api::{runtime, system}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{PublicKey, U512}; + +const ARG_TARGET: &str = "target"; +const ARG_SOURCE_PURSE: &str = "source_purse"; +const ARG_AMOUNT: &str = "amount"; + +#[no_mangle] +pub extern "C" fn call() { + let source_purse = runtime::get_named_arg(ARG_SOURCE_PURSE); + let target: PublicKey = runtime::get_named_arg(ARG_TARGET); + let amount: U512 = runtime::get_named_arg(ARG_AMOUNT); + + system::transfer_from_purse_to_public_key(source_purse, target, amount, None) + .unwrap_or_revert(); +} diff --git a/smart_contracts/contracts/test/transfer-purse-to-purse/Cargo.toml b/smart_contracts/contracts/test/transfer-purse-to-purse/Cargo.toml index 8ca809aaa2..7d20a1fb81 100644 --- a/smart_contracts/contracts/test/transfer-purse-to-purse/Cargo.toml +++ b/smart_contracts/contracts/test/transfer-purse-to-purse/Cargo.toml @@ -2,7 +2,7 @@ name = "transfer-purse-to-purse" version = "0.1.0" authors = ["Henry Till "] -edition = "2018" +edition = "2021" [[bin]] name = "transfer_purse_to_purse" @@ -11,9 +11,6 @@ bench = false doctest = false test = false -[features] -std = ["casper-contract/std", "casper-types/std"] - [dependencies] casper-contract = { path = "../../../contract" } casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/update-associated-key/Cargo.toml b/smart_contracts/contracts/test/update-associated-key/Cargo.toml new file mode 100644 index 0000000000..732dc2124e --- /dev/null +++ b/smart_contracts/contracts/test/update-associated-key/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "update-associated-key" +version = "0.1.0" +authors = ["Michał Papierski (ARG_NEW_UPGRADE_THRESHOLD); + entity::set_action_threshold(ActionType::UpgradeManagement, Weight::new(new_threshold)) + .unwrap_or_revert() +} + +#[no_mangle] +pub extern "C" fn call() { + let entrypoints = { + let mut entrypoints = EntryPoints::new(); + let add_associated_key_entry_point = EntityEntryPoint::new( + ENTRYPOINT_ADD_ASSOCIATED_KEY, + vec![ + Parameter::new(ARG_ENTITY_ACCOUNT_HASH, CLType::ByteArray(32)), + Parameter::new(ARG_KEY_WEIGHT, CLType::U8), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entrypoints.add_entry_point(add_associated_key_entry_point); + let manage_action_threshold_entrypoint = EntityEntryPoint::new( + ENTRYPOINT_MANAGE_ACTION_THRESHOLD, + vec![Parameter::new(ARG_NEW_UPGRADE_THRESHOLD, CLType::U8)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entrypoints.add_entry_point(manage_action_threshold_entrypoint); + entrypoints + }; + let (contract_hash, _) = storage::new_contract( + entrypoints, + None, + Some(PACKAGE_HASH_KEY_NAME.to_string()), + Some(ACCESS_UREF_NAME.to_string()), + None, + ); + runtime::put_key( + CONTRACT_HASH_NAME, + Key::contract_entity_key(AddressableEntityHash::new(contract_hash.value())), + ); +} diff --git a/smart_contracts/contracts/test/verify-signature/Cargo.toml b/smart_contracts/contracts/test/verify-signature/Cargo.toml new file mode 100644 index 0000000000..eb379ed490 --- /dev/null +++ b/smart_contracts/contracts/test/verify-signature/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "verify-signature" +version = "0.1.0" +authors = ["Igor Bunar "] +edition = "2021" + +[[bin]] +name = "verify_signature" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/test/verify-signature/src/main.rs b/smart_contracts/contracts/test/verify-signature/src/main.rs new file mode 100644 index 0000000000..1a9b8ade2c --- /dev/null +++ b/smart_contracts/contracts/test/verify-signature/src/main.rs @@ -0,0 +1,26 @@ +#![no_std] +#![no_main] + +extern crate alloc; +use alloc::string::String; +use casper_contract::contract_api::{cryptography, runtime}; +use casper_types::{ + bytesrepr::{Bytes, FromBytes}, + PublicKey, Signature, +}; + +const ARG_MESSAGE: &str = "message"; +const ARG_SIGNATURE_BYTES: &str = "signature_bytes"; +const ARG_PUBLIC_KEY: &str = "public_key"; + +#[no_mangle] +pub extern "C" fn call() { + let message: String = runtime::get_named_arg(ARG_MESSAGE); + let signature_bytes: Bytes = runtime::get_named_arg(ARG_SIGNATURE_BYTES); + let public_key: PublicKey = runtime::get_named_arg(ARG_PUBLIC_KEY); + + let (signature, _) = Signature::from_bytes(&signature_bytes).unwrap(); + let verify = cryptography::verify_signature(message.as_bytes(), &signature, &public_key); + + assert!(verify.is_ok()); +} diff --git a/smart_contracts/contracts/tutorial/counter-installer/Cargo.toml b/smart_contracts/contracts/tutorial/counter-installer/Cargo.toml new file mode 100644 index 0000000000..4ee9f17165 --- /dev/null +++ b/smart_contracts/contracts/tutorial/counter-installer/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "counter-installer" +version = "0.1.0" +authors = ["Ed Hastings ", "Michał Papierski "] +edition = "2021" + +[[bin]] +name = "counter_installer" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/tutorial/counter-installer/src/main.rs b/smart_contracts/contracts/tutorial/counter-installer/src/main.rs new file mode 100644 index 0000000000..952fc2cb72 --- /dev/null +++ b/smart_contracts/contracts/tutorial/counter-installer/src/main.rs @@ -0,0 +1,100 @@ +#![no_std] +#![no_main] + +#[cfg(not(target_arch = "wasm32"))] +compile_error!("target arch should be wasm32: compile with '--target wasm32-unknown-unknown'"); + +extern crate alloc; + +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +use casper_contract::{ + contract_api::{runtime, storage}, + unwrap_or_revert::UnwrapOrRevert, +}; +use casper_types::{ + addressable_entity::{EntityEntryPoint, EntryPointAccess, EntryPointType, EntryPoints}, + api_error::ApiError, + contracts::NamedKeys, + CLType, CLValue, EntryPointPayment, Key, URef, +}; + +const COUNT_KEY: &str = "count"; +const COUNTER_INC: &str = "counter_inc"; +const COUNTER_GET: &str = "counter_get"; +const COUNTER_KEY: &str = "counter"; +const CONTRACT_VERSION_KEY: &str = "version"; + +#[no_mangle] +pub extern "C" fn counter_inc() { + let uref: URef = runtime::get_key(COUNT_KEY) + .unwrap_or_revert_with(ApiError::MissingKey) + .into_uref() + .unwrap_or_revert_with(ApiError::UnexpectedKeyVariant); + storage::add(uref, 1); +} + +#[no_mangle] +pub extern "C" fn counter_get() { + let uref: URef = runtime::get_key(COUNT_KEY) + .unwrap_or_revert_with(ApiError::MissingKey) + .into_uref() + .unwrap_or_revert_with(ApiError::UnexpectedKeyVariant); + let result: i32 = storage::read(uref) + .unwrap_or_revert_with(ApiError::Read) + .unwrap_or_revert_with(ApiError::ValueNotFound); + let typed_result = CLValue::from_t(result).unwrap_or_revert(); + runtime::ret(typed_result); +} + +#[no_mangle] +pub extern "C" fn call() { + // Initialize counter to 0. + let counter_local_key = storage::new_uref(0_i32); + + // Create initial named keys of the contract. + let mut counter_named_keys = NamedKeys::new(); + let key_name = String::from(COUNT_KEY); + counter_named_keys.insert(key_name, counter_local_key.into()); + + // Create entry points to get the counter value and to increment the counter by 1. + let mut counter_entry_points = EntryPoints::new(); + counter_entry_points.add_entry_point(EntityEntryPoint::new( + COUNTER_INC, + Vec::new(), + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + counter_entry_points.add_entry_point(EntityEntryPoint::new( + COUNTER_GET, + Vec::new(), + CLType::I32, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + )); + + let (stored_contract_hash, contract_version) = storage::new_contract( + counter_entry_points, + Some(counter_named_keys), + Some("counter_package_name".to_string()), + Some("counter_access_uref".to_string()), + None, + ); + + // To create a locked contract instead, use new_locked_contract and throw away the contract + // version returned + // let (stored_contract_hash, _) = + // storage::new_locked_contract(counter_entry_points, Some(counter_named_keys), None, None); + + // The current version of the contract will be reachable through named keys + let version_uref = storage::new_uref(contract_version); + runtime::put_key(CONTRACT_VERSION_KEY, version_uref.into()); + + // Hash of the installed contract will be reachable through named keys + runtime::put_key(COUNTER_KEY, Key::Hash(stored_contract_hash.value())); +} diff --git a/smart_contracts/contracts/tutorial/hello-world/Cargo.toml b/smart_contracts/contracts/tutorial/hello-world/Cargo.toml new file mode 100644 index 0000000000..b35bcbc6a2 --- /dev/null +++ b/smart_contracts/contracts/tutorial/hello-world/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "hello-world" +version = "0.1.0" +authors = ["darthsiroftardis ", "Michał Papierski "] +edition = "2021" + +[[bin]] +name = "hello_world" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/tutorial/hello-world/src/main.rs b/smart_contracts/contracts/tutorial/hello-world/src/main.rs new file mode 100644 index 0000000000..673568debd --- /dev/null +++ b/smart_contracts/contracts/tutorial/hello-world/src/main.rs @@ -0,0 +1,30 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::string::String; +use casper_contract::contract_api::{runtime, storage}; +use casper_types::{Key, URef}; + +const KEY: &str = "special_value"; +const ARG_MESSAGE: &str = "message"; + +fn store(value: String) { + // Store `value` under a new unforgeable reference. + let value_ref: URef = storage::new_uref(value); + + // Wrap the unforgeable reference in a value of type `Key`. + let value_key: Key = value_ref.into(); + + // Store this key under the name "special_value" in context-local storage. + runtime::put_key(KEY, value_key); +} + +// All session code must have a `call` entrypoint. +#[no_mangle] +pub extern "C" fn call() { + // Get the optional first argument supplied to the argument. + let value: String = runtime::get_named_arg(ARG_MESSAGE); + store(value); +} diff --git a/smart_contracts/contracts/tutorial/increment-counter/Cargo.toml b/smart_contracts/contracts/tutorial/increment-counter/Cargo.toml new file mode 100644 index 0000000000..4256326a16 --- /dev/null +++ b/smart_contracts/contracts/tutorial/increment-counter/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "increment-counter" +version = "1.0.0" +authors = ["Maciej Zielinski", "Michał Papierski "] +edition = "2021" + +[[bin]] +name = "increment_counter" +path = "src/main.rs" +bench = false +doctest = false +test = false + +[dependencies] +casper-contract = { path = "../../../contract" } +casper-types = { path = "../../../../types" } diff --git a/smart_contracts/contracts/tutorial/increment-counter/src/main.rs b/smart_contracts/contracts/tutorial/increment-counter/src/main.rs new file mode 100644 index 0000000000..befcf7d46a --- /dev/null +++ b/smart_contracts/contracts/tutorial/increment-counter/src/main.rs @@ -0,0 +1,45 @@ +#![no_std] +#![no_main] + +#[cfg(not(target_arch = "wasm32"))] +compile_error!("target arch should be wasm32: compile with '--target wasm32-unknown-unknown'"); + +extern crate alloc; + +use casper_types::{ApiError, Key, RuntimeArgs}; + +use casper_contract::{contract_api::runtime, unwrap_or_revert::UnwrapOrRevert}; +use casper_types::contracts::ContractHash; + +const COUNTER_KEY: &str = "counter"; +const COUNTER_INC: &str = "counter_inc"; +const COUNTER_GET: &str = "counter_get"; + +#[no_mangle] +pub extern "C" fn call() { + // Read the Counter smart contract's ContractHash. + let contract_hash = { + let counter_uref = runtime::get_key(COUNTER_KEY).unwrap_or_revert_with(ApiError::GetKey); + if let Key::Hash(hash) = counter_uref { + ContractHash::new(hash) + } else { + runtime::revert(ApiError::User(66)); + } + }; + + // Call Counter to get the current value. + let current_counter_value: u32 = + runtime::call_contract(contract_hash, COUNTER_GET, RuntimeArgs::new()); + + // Call Counter to increment the value. + let _: () = runtime::call_contract(contract_hash, COUNTER_INC, RuntimeArgs::new()); + + // Call Counter to get the new value. + let new_counter_value: u32 = + runtime::call_contract(contract_hash, COUNTER_GET, RuntimeArgs::new()); + + // Expect counter to increment by one. + if new_counter_value - current_counter_value != 1u32 { + runtime::revert(ApiError::User(67)); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-cep18-caller/Cargo.toml b/smart_contracts/contracts/vm2/vm2-cep18-caller/Cargo.toml new file mode 100644 index 0000000000..2ea3ed7afa --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18-caller/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "vm2-cep18-caller" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-sdk = { path = "../../../sdk" } +vm2-cep18 = { path = "../vm2-cep18" } +borsh = { version = "1.5", features = ["derive"] } diff --git a/smart_contracts/contracts/vm2/vm2-cep18-caller/build.rs b/smart_contracts/contracts/vm2/vm2-cep18-caller/build.rs new file mode 100644 index 0000000000..c69511e9de --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18-caller/build.rs @@ -0,0 +1,20 @@ +// use std::{env, fs, path::Path}; + +// use casper_contract_sdk_codegen::Codegen; + +// const SCHEMA: &str = include_str!("cep18_schema.json"); + +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } + + // let mut codegen = Codegen::from_str(SCHEMA).unwrap(); + // let source = codegen.gen(); + + // let target_dir = env::var_os("OUT_DIR").unwrap(); + // let target_path = Path::new(&target_dir).join("cep18_schema.rs"); + // fs::write(&target_path, source).unwrap(); +} diff --git a/smart_contracts/contracts/vm2/vm2-cep18-caller/cep18_schema.json b/smart_contracts/contracts/vm2/vm2-cep18-caller/cep18_schema.json new file mode 120000 index 0000000000..a65b742cc6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18-caller/cep18_schema.json @@ -0,0 +1 @@ +/Users/michal/Dev/casperlabs-node/smart_contracts/sdk-codegen/tests/fixtures/cep18_schema.json \ No newline at end of file diff --git a/smart_contracts/contracts/vm2/vm2-cep18-caller/src/lib.rs b/smart_contracts/contracts/vm2/vm2-cep18-caller/src/lib.rs new file mode 100644 index 0000000000..b6ee10bd38 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18-caller/src/lib.rs @@ -0,0 +1,45 @@ +#![cfg_attr(target_family = "wasm", no_main)] + +pub mod exports { + use casper_contract_sdk::{ + contrib::cep18::{CEP18Ext, MintableExt}, + prelude::*, + types::{Address, U256}, + ContractHandle, + }; + use vm2_cep18::TokenContractRef; + + #[casper(export)] + pub fn call(address: Address) -> String { + use casper_contract_sdk::casper::Entity; + + log!("Hello {address:?}"); + let handle = ContractHandle::::from_address(address); + + // Mint tokens, then check the balance of the account that called this contract + handle + .call(|contract| contract.mint(Entity::Account([99; 32]), U256::from(100u64))) + .expect("Should call") + .expect("Should mint"); + + let balance_result = handle + .call(|contract| contract.balance_of(Entity::Account([99; 32]))) + .expect("Should call"); + + assert_eq!(balance_result, U256::from(100u64)); + + let name_result = handle + .call(|contract| contract.name()) + .expect("Should call"); + log!("Name: {name_result:?}"); + let transfer_result = handle + .call(|contract| contract.transfer(Entity::Account([100; 32]), U256::from(100u64))) + .expect("Should call"); + + log!("Transfer: {transfer_result:?}"); + + log!("Success"); + + name_result + } +} diff --git a/smart_contracts/contracts/vm2/vm2-cep18/Cargo.toml b/smart_contracts/contracts/vm2/vm2-cep18/Cargo.toml new file mode 100644 index 0000000000..94e82a4772 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "vm2-cep18" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-sdk = { path = "../../../sdk" } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +serde_json = "1" +casper-contract-sdk = { path = "../../../sdk", features = ["cli"] } + +[dev-dependencies] +casper-contract-sdk-codegen = { path = "../../../sdk_codegen" } + +[build-dependencies] +casper-contract-sdk-codegen = { path = "../../../sdk_codegen" } diff --git a/smart_contracts/contracts/vm2/vm2-cep18/build.rs b/smart_contracts/contracts/vm2/vm2-cep18/build.rs new file mode 100644 index 0000000000..d594b41916 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18/build.rs @@ -0,0 +1,22 @@ +// use std::{env, fs, path::Path}; + +// use casper_contract_sdk_codegen::Codegen; + +// const SCHEMA: &str = include_str!("cep18_schema.json"); + +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } + + // casper_contract_sdk::build_flags(); + + // let mut codegen = Codegen::from_str(SCHEMA).unwrap(); + // let source = codegen.gen(); + + // let target_dir = env::var_os("OUT_DIR").unwrap(); + // let target_path = Path::new(&target_dir).join("cep18_schema.rs"); + // fs::write(&target_path, source).unwrap(); +} diff --git a/smart_contracts/contracts/vm2/vm2-cep18/cep18_schema.json b/smart_contracts/contracts/vm2/vm2-cep18/cep18_schema.json new file mode 120000 index 0000000000..a65b742cc6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18/cep18_schema.json @@ -0,0 +1 @@ +/Users/michal/Dev/casperlabs-node/smart_contracts/sdk-codegen/tests/fixtures/cep18_schema.json \ No newline at end of file diff --git a/smart_contracts/contracts/vm2/vm2-cep18/src/lib.rs b/smart_contracts/contracts/vm2/vm2-cep18/src/lib.rs new file mode 100644 index 0000000000..6e9cb63ec0 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18/src/lib.rs @@ -0,0 +1,275 @@ +use casper_contract_sdk::{ + contrib::access_control::{AccessControl, AccessControlExt, AccessControlState}, + prelude::*, + types::U256, +}; + +use casper_contract_sdk::contrib::cep18::{ + Burnable, BurnableExt, CEP18Ext, CEP18State, Mintable, MintableExt, ADMIN_ROLE, CEP18, +}; + +#[casper(contract_state)] +pub struct TokenContract { + state: CEP18State, + access_control: AccessControlState, +} + +impl Default for TokenContract { + fn default() -> Self { + panic!("nope"); + } + // +} + +#[casper] +impl TokenContract { + #[casper(constructor)] + pub fn new(token_name: String) -> Self { + // TODO: If argument has same name as another entrypoint there's a compile error for some + // reason, so can't use "name" + let mut state = CEP18State::new(&token_name, "Default symbol", 8, U256::from(0u64)); + state.enable_mint_burn = true; + + let mut token = Self { + state, + access_control: AccessControlState::default(), + }; + + let caller = casper::get_caller(); + token.grant_role(caller, ADMIN_ROLE); + + // Give caller some tokens + token.mint(caller, U256::from(10_000u64)).expect("Mint"); + + token + } + + pub fn my_balance(&self) -> U256 { + CEP18::state(self) + .balances + .get(&casper::get_caller()) + .unwrap_or_default() + } +} + +#[casper(path = casper_contract_sdk::contrib::cep18)] +impl CEP18 for TokenContract { + fn state(&self) -> &CEP18State { + &self.state + } + + fn state_mut(&mut self) -> &mut CEP18State { + &mut self.state + } +} + +#[casper(path = casper_contract_sdk::contrib::access_control)] +impl AccessControl for TokenContract { + fn state(&self) -> &AccessControlState { + &self.access_control + } + + fn state_mut(&mut self) -> &mut AccessControlState { + &mut self.access_control + } +} + +#[casper(path = casper_contract_sdk::contrib::cep18)] +impl Mintable for TokenContract {} + +#[casper(path = casper_contract_sdk::contrib::cep18)] +impl Burnable for TokenContract {} + +#[cfg(test)] +mod tests { + use super::*; + + use casper_contract_sdk::{ + casper::{ + self, + native::{ + current_environment, dispatch_with, with_current_environment, Environment, + DEFAULT_ADDRESS, + }, + Entity, + }, + casper_executor_wasm_common::keyspace::Keyspace, + contrib::cep18::Cep18Error, + ContractHandle, ToCallData, + }; + + const ALICE: Entity = Entity::Account([1; 32]); + const BOB: Entity = Entity::Account([2; 32]); + + #[test] + fn it_works() { + let stub = Environment::new(Default::default(), DEFAULT_ADDRESS); + + let result = casper::native::dispatch_with(stub, || { + let mut contract = TokenContract::new("Foo Token".to_string()); + + assert_eq!(contract.require_any_role(&[ADMIN_ROLE]), Ok(())); + + assert_eq!(contract.name(), "Foo Token"); + assert_eq!(contract.balance_of(ALICE), U256::from(0u64)); + assert_eq!(contract.balance_of(BOB), U256::from(0u64)); + + contract.approve(BOB, U256::from(111u64)).unwrap(); + assert_eq!(contract.balance_of(ALICE), U256::from(0u64)); + contract.mint(ALICE, U256::from(1000u64)).unwrap(); + assert_eq!(contract.balance_of(ALICE), U256::from(1000u64)); + + // Caller has 10k tokens mintes (coming from constructor) + assert_eq!( + contract.balance_of(casper::get_caller()), + U256::from(10_000u64) + ); + assert_eq!( + contract.transfer(ALICE, U256::from(10_001u64)), + Err(Cep18Error::InsufficientBalance) + ); + assert_eq!(contract.transfer(ALICE, U256::from(10_000u64)), Ok(())); + }); + assert!(matches!(result, Ok(()))); + } + + #[test] + fn e2e() { + // let db = casper::native::Container::default(); + // let env = Environment::new(db.clone(), DEFAULT_ADDRESS); + + let result = casper::native::dispatch(move || { + assert_eq!(casper::get_caller(), DEFAULT_ADDRESS); + + let constructor = TokenContractRef::new("Foo Token".to_string()); + + // casper_call(address, value, selector!("nme"), ()); + let ctor_input_data = constructor.input_data(); + let create_result = casper::create( + None, + 0, + Some(constructor.entry_point()), + ctor_input_data.as_ref().map(|data| data.as_slice()), + None, + ) + .expect("Should create"); + + let new_env = with_current_environment(|env| env); + let new_env = new_env.smart_contract(Entity::Contract(create_result.contract_address)); + dispatch_with(new_env, || { + // This is the caller of the contract + casper::read_into_vec(Keyspace::State) + .expect("ok") + .expect("ok"); + }) + .unwrap(); + + // assert_eq!(casper::get_caller(), DEFAULT_ADDRESS); + + let cep18_handle = + ContractHandle::::from_address(create_result.contract_address); + + { + // As a builder that allows you to specify value to pass etc. + cep18_handle + .build_call() + .with_transferred_value(0) + .call(|cep18| cep18.name()) + .expect("Should call"); + } + + let name1: String = cep18_handle + .build_call() + .call(|cep18| cep18.name()) + .expect("Should call"); + + let name2: String = cep18_handle + .build_call() + .call(|cep18| cep18.name()) + .expect("Should call"); + + assert_eq!(name1, name2); + assert_eq!(name2, "Foo Token"); + let symbol: String = cep18_handle + .build_call() + .call(|cep18| cep18.symbol()) + .expect("Should call"); + assert_eq!(symbol, "Default symbol"); + + let alice_balance: U256 = cep18_handle + .build_call() + .call(|cep18| cep18.balance_of(ALICE)) + .expect("Should call"); + assert_eq!(alice_balance, U256::from(0u64)); + + let bob_balance: U256 = cep18_handle + .build_call() + .call(|cep18| cep18.balance_of(BOB)) + .expect("Should call"); + assert_eq!(bob_balance, U256::from(0u64)); + + let _mint_succeed: () = cep18_handle + .build_call() + .call(|cep18| cep18.mint(ALICE, U256::from(1000u64))) + .expect("Should succeed") + .expect("Mint succeeded"); + + let alice_balance_after: U256 = cep18_handle + .build_call() + .call(|cep18| cep18.balance_of(ALICE)) + .expect("Should call"); + assert_eq!(alice_balance_after, U256::from(1000u64)); + + // Default account -> ALICE + + let default_addr_balance: U256 = cep18_handle + .build_call() + .call(|cep18| cep18.balance_of(DEFAULT_ADDRESS)) + .expect("Should call"); + assert_eq!(default_addr_balance, U256::from(10_000u64)); + + assert_eq!( + cep18_handle + .build_call() + .call(|cep18| cep18.transfer(ALICE, U256::from(10_001u64))) + .expect("Should call"), + Err(Cep18Error::InsufficientBalance) + ); + assert_eq!(casper::get_caller(), DEFAULT_ADDRESS); + + let alice_env = current_environment().session(ALICE); + + casper::native::dispatch_with(alice_env, || { + assert_eq!(casper::get_caller(), ALICE); + assert_eq!( + cep18_handle + .call(|cep18| cep18.my_balance()) + .expect("Should call"), + U256::from(1000u64) + ); + assert_eq!( + cep18_handle + .build_call() + .call(|cep18| cep18.transfer(BOB, U256::from(1u64))) + .expect("Should call"), + Ok(()) + ); + }) + .expect("Success"); + + let bob_balance = cep18_handle + .build_call() + .call(|cep18| cep18.balance_of(BOB)) + .expect("Should call"); + assert_eq!(bob_balance, U256::from(1u64)); + + let alice_balance = cep18_handle + .build_call() + .call(|cep18| cep18.balance_of(ALICE)) + .expect("Should call"); + assert_eq!(alice_balance, U256::from(999u64)); + }); + + assert!(matches!(result, Ok(()))); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-cep18/tests/integration._rs b/smart_contracts/contracts/vm2/vm2-cep18/tests/integration._rs new file mode 100644 index 0000000000..762ea66f34 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-cep18/tests/integration._rs @@ -0,0 +1,47 @@ +use casper_contract_sdk::{ + host::native::{dispatch_with, with_stub, Stub}, + Contract, +}; +use casper_contract_sdk_codegen::support::IntoResult; +use vm2_cep18::contract::CEP18; + +mod bindings { + include!(concat!(env!("OUT_DIR"), "/cep18_schema.rs")); +} + +#[test] +fn foo() { + let stub = Stub::default(); + + let ret = dispatch_with(stub, || { + let client = bindings::CEP18Client::new::("Token Name".to_string()) + .expect("Constructor should work"); + + // Calling the `transfer` entry point with the following arguments: + let transfer_call_result = client + .transfer([1; 32], 42) + .expect("Calling transfer entry point should work"); + + assert!(!transfer_call_result.did_revert()); + + // Actual returned data, deserialized from the returned bytes. + let transfer_return_value = transfer_call_result.into_return_value(); + + assert_eq!( + transfer_return_value.clone(), + bindings::Result_____vm2_cep18__error__Cep18Error_::Err( + bindings::vm2_cep18__error__Cep18Error::InsufficientBalance(()) + ) + ); + + // Codegen can convert into standard Result type. + assert_eq!( + transfer_return_value.into_result(), + Err(bindings::vm2_cep18__error__Cep18Error::InsufficientBalance( + () + )) + ); + }); + + assert_eq!(ret, Ok(())); +} diff --git a/smart_contracts/contracts/vm2/vm2-flipper/Cargo.toml b/smart_contracts/contracts/vm2/vm2-flipper/Cargo.toml new file mode 100644 index 0000000000..53de6fe3e0 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-flipper/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "vm2-flipper" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-sdk = { path = "../../../sdk" } diff --git a/smart_contracts/contracts/vm2/vm2-flipper/build.rs b/smart_contracts/contracts/vm2/vm2-flipper/build.rs new file mode 100644 index 0000000000..12d5fa6ba6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-flipper/build.rs @@ -0,0 +1,7 @@ +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-flipper/src/lib.rs b/smart_contracts/contracts/vm2/vm2-flipper/src/lib.rs new file mode 100644 index 0000000000..a61da9ca71 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-flipper/src/lib.rs @@ -0,0 +1,53 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] +#![cfg_attr(target_arch = "wasm32", no_std)] + +use casper_contract_sdk::prelude::*; + +/// This contract implements a simple flipper. +#[casper(contract_state)] +pub struct Flipper { + /// The current state of the flipper. + value: bool, +} + +impl Default for Flipper { + fn default() -> Self { + panic!("Unable to instantiate contract without a constructor"); + } +} + +#[casper] +impl Flipper { + #[casper(constructor)] + pub fn new(init_value: bool) -> Self { + Self { value: init_value } + } + + #[casper(constructor)] + pub fn default() -> Self { + Self::new(Default::default()) + } + + pub fn flip(&mut self) { + self.value = !self.value; + } + + pub fn get(&self) -> bool { + self.value + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_flipper() { + let mut flipper = Flipper::new(false); + assert_eq!(flipper.get(), false); + flipper.flip(); + assert_eq!(flipper.get(), true); + flipper.flip(); + assert_eq!(flipper.get(), false); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/Cargo.toml b/smart_contracts/contracts/vm2/vm2-harness/Cargo.toml new file mode 100644 index 0000000000..6cf4079f23 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "vm2-harness" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-executor-wasm-common = { path = "../../../../executor/wasm_common" } +casper-contract-macros = { path = "../../../macros" } +casper-contract-sdk = { path = "../../../sdk" } +impls = "1" +thiserror = "2" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +serde_json = "1" diff --git a/smart_contracts/contracts/vm2/vm2-harness/build.rs b/smart_contracts/contracts/vm2/vm2-harness/build.rs new file mode 100644 index 0000000000..12d5fa6ba6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/build.rs @@ -0,0 +1,7 @@ +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/contracts.rs b/smart_contracts/contracts/vm2/vm2-harness/src/contracts.rs new file mode 100644 index 0000000000..d4b6fe37ba --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/contracts.rs @@ -0,0 +1,3 @@ +pub mod harness; +pub mod no_fallback; +pub mod token_owner; diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/contracts/harness.rs b/smart_contracts/contracts/vm2/vm2-harness/src/contracts/harness.rs new file mode 100644 index 0000000000..2924f2e013 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/contracts/harness.rs @@ -0,0 +1,452 @@ +use std::{ + collections::{BTreeSet, HashMap, LinkedList}, + ptr::NonNull, +}; + +use casper_contract_macros::casper; +use casper_contract_sdk::{ + casper::{self, Entity}, + casper_executor_wasm_common::{ + entry_point::{ + ENTRY_POINT_PAYMENT_CALLER, ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY, + ENTRY_POINT_PAYMENT_SELF_ONWARD, + }, + error::CommonResult, + keyspace::Keyspace, + }, + collections::Map, + log, revert, + types::CallError, + ContractHandle, +}; + +use crate::traits::{DepositExt, DepositRef}; + +pub(crate) const INITIAL_GREETING: &str = "This is initial data set from a constructor"; +pub(crate) const BALANCES_PREFIX: &str = "b"; + +#[derive(Debug)] +#[casper(contract_state)] +pub struct Harness { + counter: u64, + greeting: String, + address_inside_constructor: Option, + balances: Map, + block_time: u64, +} + +// #[casper(path = crate::traits)] +// impl Fallback for Harness { +// fn fallback(&mut self) { +// // Called when no entrypoint is matched +// // +// // Is invoked when +// // a) user performs plan CSPR transfer (not a contract call) +// // a.1) if there's no fallback entrypoint, the transfer will fail +// // a.2) if there's fallback entrypoint, it will be called +// // b) user calls a contract with no matching entrypoint +// // b.1) if there's no fallback entrypoint, the call will fail +// // b.2) if there's fallback entrypoint, it will be called and user can + +// log!( +// "Harness received fallback entrypoint value={}", +// host::get_value() +// ); +// } +// } + +#[derive(Debug, thiserror::Error, PartialEq)] +#[casper] +pub enum CustomError { + #[error("foo")] + Foo, + #[error("bar")] + Bar = 42, + #[error("error with body {0}")] + WithBody(String), + #[error("error with named variant name={name}; age={age}")] + Named { name: String, age: u64 }, + #[error("transfer error {0}")] + Transfer(String), + #[error("deposit error {0}")] + Deposit(CallError), +} + +impl Default for Harness { + fn default() -> Self { + Self { + counter: 0, + greeting: "Default value".to_string(), + address_inside_constructor: None, + balances: Map::new(BALANCES_PREFIX), + block_time: 0, + } + } +} + +pub type Result2 = Result<(), CustomError>; + +#[casper] +impl Harness { + // #[casper(event)] + // type TestMessage; + + #[casper(constructor)] + pub fn constructor_with_args(who: String) -> Self { + // Event::register(); + + log!("👋 Hello from constructor with args: {who}"); + + assert_eq!( + casper::write(Keyspace::PaymentInfo("this does not exists"), &[0]), + Err(CommonResult::NotFound) + ); + + { + for payment_info in [ + ENTRY_POINT_PAYMENT_CALLER, + ENTRY_POINT_PAYMENT_DIRECT_INVOCATION_ONLY, + ENTRY_POINT_PAYMENT_SELF_ONWARD, + ] { + casper::write(Keyspace::PaymentInfo("counter"), &[payment_info]).unwrap(); + + let mut buffer = [255; 1]; + assert_eq!( + casper::read(Keyspace::PaymentInfo("counter"), |size| { + assert_eq!(size, 1, "Size should be 1"); + NonNull::new(&mut buffer[0]) + }), + Ok(Some(())) + ); + assert_eq!(&buffer, &[payment_info]); + } + + assert_eq!( + casper::write(Keyspace::PaymentInfo("counter"), &[255, 255]), + Err(CommonResult::InvalidInput) + ); + } + + Self { + counter: 0, + greeting: format!("Hello, {who}!"), + address_inside_constructor: Some(casper::get_caller()), + balances: Map::new(BALANCES_PREFIX), + block_time: casper::get_block_time(), + } + } + + #[casper(constructor)] + pub fn failing_constructor(who: String) -> Self { + log!("👋 Hello from failing constructor with args: {who}"); + revert!(); + } + + #[casper(constructor)] + pub fn trapping_constructor() -> Self { + log!("👋 Hello from trapping constructor"); + // TODO: Storage doesn't fork as of yet, need to integrate casper-storage crate and leverage + // the tracking copy. + panic!("This will revert the execution of this constructor and won't create a new package"); + } + + #[casper(constructor)] + pub fn initialize() -> Self { + log!("👋 Hello from constructor"); + Self { + counter: 0, + greeting: INITIAL_GREETING.to_string(), + address_inside_constructor: Some(casper::get_caller()), + balances: Map::new(BALANCES_PREFIX), + block_time: casper::get_block_time(), + } + } + + #[casper(constructor, payable)] + pub fn payable_constructor() -> Self { + log!( + "👋 Hello from payable constructor value={}", + casper::transferred_value() + ); + Self { + counter: 0, + greeting: INITIAL_GREETING.to_string(), + address_inside_constructor: Some(casper::get_caller()), + balances: Map::new(BALANCES_PREFIX), + block_time: casper::get_block_time(), + } + } + + #[casper(constructor, payable)] + pub fn payable_failing_constructor() -> Self { + log!( + "👋 Hello from payable failign constructor value={}", + casper::transferred_value() + ); + revert!(); + } + + #[casper(constructor, payable)] + pub fn payable_trapping_constructor() -> Self { + log!( + "👋 Hello from payable trapping constructor value={}", + casper::transferred_value() + ); + panic!("This will revert the execution of this constructor and won't create a new package") + } + + pub fn get_greeting(&self) -> &str { + &self.greeting + } + + pub fn increment_counter(&mut self) { + self.counter += 1; + } + + pub fn counter(&self) -> u64 { + self.counter + } + + pub fn set_greeting(&mut self, greeting: String) { + self.counter += 1; + log!("Saving greeting {}", greeting); + self.greeting = greeting; + } + + pub fn emit_unreachable_trap(&mut self) -> ! { + self.counter += 1; + panic!("unreachable"); + } + + #[casper(revert_on_error)] + pub fn emit_revert_with_data(&mut self) -> Result<(), CustomError> { + // revert(code), ret(bytes) + + // casper_return(flags, bytes) flags == 0, flags & FLAG_REVERT + log!("emit_revert_with_data state={:?}", self); + log!( + "Reverting with data before {counter}", + counter = self.counter + ); + self.counter += 1; + log!( + "Reverting with data after {counter}", + counter = self.counter + ); + // Here we can't use revert!() macro, as it explicitly calls `return` and does not involve + // writing the state again. + Err(CustomError::Bar) + } + + pub fn emit_revert_without_data(&mut self) -> ! { + self.counter += 1; + revert!() + } + + pub fn get_address_inside_constructor(&self) -> Entity { + self.address_inside_constructor + .expect("Constructor was expected to be caller") + } + + #[casper(revert_on_error)] + pub fn should_revert_on_error(&self, flag: bool) -> Result2 { + if flag { + Err(CustomError::WithBody("Reverted".into())) + } else { + Ok(()) + } + } + + #[allow(dead_code)] + fn private_function_that_should_not_be_exported(&self) { + log!("This function should not be callable from outside"); + } + + pub(crate) fn restricted_function_that_should_be_part_of_manifest(&self) { + log!("This function should be callable from outside"); + } + + pub fn entry_point_without_state() { + log!("This function does not require state"); + } + + pub fn entry_point_without_state_with_args_and_output(mut arg: String) -> String { + log!("This function does not require state"); + arg.push_str("extra"); + arg + } + + pub fn into_modified_greeting(mut self) -> String { + self.greeting.push_str("!"); + self.greeting + } + + pub fn into_greeting(self) -> String { + self.greeting + } + + #[casper(payable)] + pub fn payable_entrypoint(&mut self) -> Result<(), CustomError> { + log!( + "This is a payable entrypoint value={}", + casper::transferred_value() + ); + Ok(()) + } + + // enum Error { + // TooLow { expected: u64} + // } + + // // #[casper(payable)] + // pub fn mint_wrapped_token(&mut self) -> Result<(), Error> { + + // if host::get_transferred_value() < EXPECTED_AMOUNT { + // // abort!("This function is not payable"); + // return Err(Error::TooLow { expected: EXPECTED_AMOUNT }); + // // abort!("This function is not payable"); + // // panic_str + // // abort!("") + // } + + // let transferred_value = host::get_transferred_value(); + // self.balances[sender] += transferred_value; + // } + + #[casper(payable, revert_on_error)] + pub fn payable_failing_entrypoint(&self) -> Result<(), CustomError> { + log!( + "This is a payable entrypoint with value={}", + casper::transferred_value() + ); + if casper::transferred_value() == 123 { + Err(CustomError::Foo) + } else { + Ok(()) + } + } + + #[casper(payable, revert_on_error)] + pub fn perform_token_deposit(&mut self, balance_before: u64) -> Result<(), CustomError> { + let caller = casper::get_caller(); + let value = casper::transferred_value(); + + if dbg!(value) == 0 { + return Err(CustomError::WithBody( + "Value should be greater than 0".into(), + )); + } + + assert_eq!( + balance_before + .checked_sub(value) + .unwrap_or_else(|| panic!("Balance before should be larger or equal to the value (caller={caller:?}, value={value})")), + casper::get_balance_of(&caller), + "Balance mismatch; token transfer should happen before a contract call" + ); + + log!("Depositing {value} from {caller:?}"); + let current_balance = self.balances.get(&caller).unwrap_or(0); + self.balances.insert(&caller, &(current_balance + value)); + Ok(()) + } + + #[casper(revert_on_error)] + pub fn withdraw(&mut self, balance_before: u64, amount: u64) -> Result<(), CustomError> { + let caller = casper::get_caller(); + log!("Withdrawing {amount} into {caller:?}"); + let current_balance = self.balances.get(&caller).unwrap_or(0); + if current_balance < amount { + return Err(CustomError::WithBody("Insufficient balance".into())); + } + + match caller { + Entity::Account(account) => { + // if this fails, the transfer will be reverted and the state will be rolled back + match casper::transfer(&account, amount) { + Ok(()) => {} + Err(call_error) => { + log!("Unable to perform a transfer: {call_error:?}"); + return Err(CustomError::Transfer(call_error.to_string())); + } + } + } + Entity::Contract(contract) => { + let result = ContractHandle::::from_address(contract) + .build_call() + .with_transferred_value(amount) + .try_call(|harness| harness.deposit()); + + match result { + Ok(call_result) => { + if let Err(call_error) = call_result.result { + log!("CallResult: Unable to perform a transfer: {call_error:?}"); + return Err(CustomError::Deposit(call_error)); + } + } + Err(call_error) => { + log!("try_call: Unable to perform a transfer: {call_error:?}"); + return Err(CustomError::Deposit(call_error)); + } + } + + // if let Err(call_error) = result.unwrap().result { + // log!("Unable to perform a transfer: {call_error:?}"); + // return Err(CustomError::Deposit(call_error)); + // } + } + } + + // TODO: transfer should probably pass CallError (i.e. reverted means mint transfer failed + // with error, or something like that) return Err(CustomError::WithBody("Transfer + // failed".into())); } + + let balance_after = balance_before + amount; + + assert_eq!( + casper::get_balance_of(&caller), + balance_after, + "Balance should be updated after withdrawal" + ); + + self.balances.insert(&caller, &(current_balance - amount)); + Ok(()) + } + + pub fn balance(&self) -> u64 { + if casper::transferred_value() != 0 { + panic!("This function is not payable"); + } + let caller = casper::get_caller(); + self.balances.get(&caller).unwrap_or(0) + } + + pub fn new_method( + &self, + _arg1: i32, + _arg2: i64, + _arg3: u32, + _arg4: u64, + _arg5: u64, + _arg6: Vec, + _arg7: bool, + _arg8: i8, + _arg9: String, + _arg10: Vec, + _arg11: [i32; 5], + _arg12: Option, + _arg13: Result<(), ()>, + _arg14: Box, + _arg15: String, + _arg16: i32, + _arg17: u64, + _arg18: (i32, i32), + _arg19: HashMap, + _arg20: BTreeSet, + _arg21: LinkedList, + _arg22: String, + _arg23: u64, + ) { + log!("Nothing"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/contracts/no_fallback.rs b/smart_contracts/contracts/vm2/vm2-harness/src/contracts/no_fallback.rs new file mode 100644 index 0000000000..012a3d1f9f --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/contracts/no_fallback.rs @@ -0,0 +1,31 @@ +use casper_contract_macros::casper; +use casper_contract_sdk::casper; + +/// A contract that can't receive tokens through a plain `fallback` method. +#[derive(Default)] +#[casper(contract_state)] +pub struct NoFallback { + initial_balance: u64, + received_balance: u64, +} + +#[casper] +impl NoFallback { + #[casper(constructor)] + pub fn no_fallback_initialize() -> Self { + Self { + initial_balance: casper::transferred_value(), + received_balance: 0, + } + } + + pub fn hello(&self) -> &str { + "Hello, World!" + } + + #[casper(payable)] + pub fn receive_funds(&mut self) { + let value = casper::transferred_value(); + self.received_balance += value; + } +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/contracts/token_owner.rs b/smart_contracts/contracts/vm2/vm2-harness/src/contracts/token_owner.rs new file mode 100644 index 0000000000..0ae47b5001 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/contracts/token_owner.rs @@ -0,0 +1,170 @@ +use casper_contract_sdk::prelude::*; + +use casper_contract_macros::casper; +use casper_contract_sdk::{ + casper::{self, Entity}, + log, revert, + types::{Address, CallError}, + ContractHandle, +}; + +use crate::traits::{Deposit, DepositExt}; + +use super::harness::HarnessRef; + +#[derive(Debug, PartialEq)] +#[casper] +pub enum TokenOwnerError { + CallError(CallError), + DepositError(String), + WithdrawError(String), +} + +impl From for TokenOwnerError { + fn from(v: CallError) -> Self { + Self::CallError(v) + } +} + +pub type Data = Vec; // TODO: CasperABI does not support generic parameters and it fails to compile, we need to support + // this in the macro + +#[casper] +#[derive(Debug, Default, PartialEq)] +pub enum FallbackHandler { + /// Accept tokens and do nothing. + #[default] + AcceptTokens, + /// Reject tokens with revert. + RejectWithRevert, + /// Reject tokens with trap. + RejectWithTrap, + /// Reject tokens with a revert with data. + RejectWithData(Data), +} + +#[derive(Default)] +#[casper(contract_state)] +pub struct TokenOwnerContract { + initial_balance: u64, + received_tokens: u64, + fallback_handler: FallbackHandler, +} + +#[casper] +impl TokenOwnerContract { + #[casper(constructor, payable)] + pub fn token_owner_initialize() -> Self { + Self { + initial_balance: casper::transferred_value(), + received_tokens: 0, + fallback_handler: FallbackHandler::AcceptTokens, + } + } + + pub fn do_deposit( + &self, + self_address: Address, + contract_address: Address, + amount: u64, + ) -> Result<(), TokenOwnerError> { + let self_balance = casper::get_balance_of(&Entity::Contract(self_address)); + let res = ContractHandle::::from_address(contract_address) + .build_call() + .with_transferred_value(amount) + .call(|harness| harness.perform_token_deposit(self_balance))?; + match &res { + Ok(()) => log!("Token owner deposited {amount} to {contract_address:?}"), + Err(e) => log!("Token owner failed to deposit {amount} to {contract_address:?}: {e:?}"), + } + res.map_err(|error| TokenOwnerError::DepositError(error.to_string()))?; + Ok(()) + } + + pub fn do_withdraw( + &self, + self_address: Address, + contract_address: Address, + amount: u64, + ) -> Result<(), TokenOwnerError> { + let self_entity = Entity::Contract(self_address); + let self_balance = casper::get_balance_of(&self_entity); + + let res = ContractHandle::::from_address(contract_address) + .build_call() + .call(|harness| { + // Be careful about re-entrancy here: we are calling a contract that can call back + // while we're still not done with this entry point. If &mut self is + // used, then the proc macro will save the state while the state was already saved + // at the end of `receive()` call. To protect against re-entrancy + // attacks, please use `&self` or `self`. + harness.withdraw(self_balance, amount) + }); + + let res = res?; + + match &res { + Ok(()) => { + log!("Token owner withdrew {amount} from {contract_address:?}"); + assert_eq!( + casper::get_balance_of(&self_entity), + self_balance + amount, + "Balance should change" + ); + } + Err(e) => { + log!("Token owner failed to withdraw {amount} from {contract_address:?}: {e:?}"); + assert_eq!( + casper::get_balance_of(&self_entity), + self_balance, + "Balance should NOT change" + ); + } + } + + res.map_err(|error| TokenOwnerError::WithdrawError(error.to_string()))?; + Ok(()) + } + + pub fn total_received_tokens(&self) -> u64 { + self.received_tokens + } + + pub fn set_fallback_handler(&mut self, handler: FallbackHandler) { + self.fallback_handler = handler; + } +} + +#[casper(path = crate::traits)] +impl Deposit for TokenOwnerContract { + fn deposit(&mut self) { + log!( + "Received deposit with value = {} current handler is {:?}", + casper::transferred_value(), + self.fallback_handler + ); + match std::mem::replace(&mut self.fallback_handler, FallbackHandler::AcceptTokens) { + FallbackHandler::AcceptTokens => { + let value = casper::transferred_value(); + log!( + "TokenOwnerContract received fallback entrypoint with value={}", + value + ); + self.received_tokens += value; + } + FallbackHandler::RejectWithRevert => { + // This will cause a revert. + log!("TokenOwnerContract rejected with revert"); + revert!(); + } + FallbackHandler::RejectWithTrap => { + // This will cause a trap. + unreachable!("its a trap"); + } + FallbackHandler::RejectWithData(data) => { + // This will cause a revert with data. + revert!(data); + } + } + } +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/lib.rs b/smart_contracts/contracts/vm2/vm2-harness/src/lib.rs new file mode 100644 index 0000000000..c12d845749 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/lib.rs @@ -0,0 +1,62 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] + +pub mod contracts; +pub mod traits; + +#[cfg(test)] +mod tests { + + use casper_contract_sdk::casper::native::{self, dispatch, EntryPointKind}; + + use crate::contracts::harness::{Harness, HarnessRef, INITIAL_GREETING}; + + #[test] + fn test() { + dispatch(|| { + native::invoke_export_by_name("call"); + }) + .unwrap(); + } + + #[test] + fn exports() { + let exports = native::ENTRY_POINTS + .into_iter() + .filter_map(|e| match e.kind { + EntryPointKind::SmartContract { .. } => None, + EntryPointKind::TraitImpl { .. } => None, + EntryPointKind::Function { name } => Some(name), + }) + .collect::>(); + assert_eq!(exports, vec!["call"]); + } + + #[test] + fn should_greet() { + let mut flipper = Harness::constructor_with_args("Hello".into()); + assert_eq!(flipper.get_greeting(), "Hello"); // TODO: Initializer + flipper.set_greeting("Hi".into()); + assert_eq!(flipper.get_greeting(), "Hi"); + } + + #[test] + fn unittest() { + dispatch(|| { + let mut foo = Harness::initialize(); + assert_eq!(foo.get_greeting(), INITIAL_GREETING); + foo.set_greeting("New greeting".to_string()); + assert_eq!(foo.get_greeting(), "New greeting"); + }) + .unwrap(); + } + + #[test] + fn foo() { + assert_eq!(Harness::default().into_greeting(), "Default value"); + } +} + +#[cfg(not(target_arch = "wasm32"))] +fn main() { + panic!("Execute \"cargo test\" to test the contract, \"cargo build\" to build it"); +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/main.rs b/smart_contracts/contracts/vm2/vm2-harness/src/main.rs new file mode 100644 index 0000000000..4bd09bfce5 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/main.rs @@ -0,0 +1,736 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] + +pub mod contracts; +pub mod traits; + +#[macro_use] +extern crate alloc; + +use casper_contract_macros::casper; +use casper_contract_sdk::{ + casper::{self, emit, emit_raw, Entity}, + casper_executor_wasm_common::{error::CommonResult, keyspace::Keyspace}, + log, + types::{Address, CallError}, +}; + +use contracts::token_owner::TokenOwnerContractRef; + +#[casper(message)] +pub struct TestMessage { + pub message: String, +} + +#[derive(Default)] +struct Seed { + value: u64, +} + +impl Seed { + fn next_seed(&mut self) -> [u8; 32] { + let current_value = { + let mut value: [u8; 32] = Default::default(); + value[32 - 8..].copy_from_slice(&self.value.to_be_bytes()); + value + }; + self.value += 1; + current_value + } +} + +fn next_test(counter: &mut u32, name: &str) -> u32 { + let current = *counter; + log!("Test {}. Running test: {name}", current); + *counter += 1; + current +} + +fn perform_test(seed: &mut Seed, flipper_address: Address) { + use casper_contract_sdk::ContractBuilder; + use contracts::harness::{CustomError, INITIAL_GREETING}; + + use crate::contracts::{harness::HarnessRef, token_owner::FallbackHandler}; + + log!("calling create"); + + let session_caller = casper::get_caller(); + assert_ne!(session_caller, Entity::Account([0; 32])); + + // Constructor without args + let mut counter = 1; + + { + next_test(&mut counter, "Traps and reverts"); + + let contract_handle = ContractBuilder::::new() + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::initialize()) + .expect("Should create"); + log!("success"); + log!("contract_address: {:?}", contract_handle.contract_address()); + + // Verify that the address captured inside constructor is not the same as caller. + let greeting_result = contract_handle + .call(|harness| harness.get_greeting()) + .expect("Should call"); + log!("Getting greeting: {greeting_result}"); + assert_eq!(greeting_result, INITIAL_GREETING); + + let () = contract_handle + .call(|harness| harness.set_greeting("Foo".into())) + .expect("Should call"); + + log!("New greeting saved"); + let greeting_result = contract_handle + .call(|harness| harness.get_greeting()) + .expect("Should call"); + assert_eq!(greeting_result, "Foo"); + + log!("Emitting unreachable trap"); + + let call_result = contract_handle.call(|harness| harness.emit_unreachable_trap()); + assert_eq!(call_result, Err(CallError::CalleeTrapped)); + + log!("Trap recovered"); + + { + let counter_value_before = contract_handle + .call(|harness| harness.counter()) + .expect("Should call"); + + // increase counter + let () = contract_handle + .call(|harness| harness.increment_counter()) + .expect("Should call"); + + let counter_value_after = contract_handle + .call(|harness| harness.counter()) + .expect("Should call"); + + assert_eq!(counter_value_before + 1, counter_value_after); + } + + { + let counter_value_before = contract_handle + .call(|harness| harness.counter()) + .expect("Should call"); + + let call_result = contract_handle + .try_call(|harness| harness.emit_revert_with_data()) + .expect("Call succeed"); + + assert_eq!(call_result.result, Err(CallError::CalleeReverted)); + assert_eq!(call_result.into_result().unwrap(), Err(CustomError::Bar),); + + let counter_value_after = contract_handle + .call(|harness| harness.counter()) + .expect("Should call"); + + assert_eq!(counter_value_before, counter_value_after); + } + + log!("Revert with data success"); + + let call_result = contract_handle + .try_call(|harness| harness.emit_revert_without_data()) + .expect("Call succeed"); + assert_eq!(call_result.result, Err(CallError::CalleeReverted)); + assert_eq!(call_result.data, None); + + log!("Revert without data success"); + + let call_result = contract_handle + .try_call(|harness| harness.should_revert_on_error(false)) + .expect("Call succeed"); + assert!(!call_result.did_revert()); + assert_eq!(call_result.into_result().unwrap(), Ok(())); + + log!("Revert on error success (ok case)"); + + let call_result = contract_handle + .try_call(|harness| harness.should_revert_on_error(true)) + .expect("Call succeed"); + assert!(call_result.did_revert()); + assert_eq!( + call_result.into_result().unwrap(), + Err(CustomError::WithBody("Reverted".to_string())) + ); + + log!("Revert on error success (err case)"); + // let should_revert_on_error: TypedCall<(bool,), Result<(), CustomError>> = + // TypedCall::new(contract_address, selector!("should_revert_on_error")); + // let result = should_revert_on_error.call((false,)); + // assert!(!result.did_revert()); + + // let result = should_revert_on_error.call((true,)); + // assert!(result.did_revert()); + // assert_eq!( + // result.into_return_value(), + // Err(CustomError::WithBody("Reverted".to_string())) + // ); + } + + // Constructor with args + + { + next_test(&mut counter, "Constructor with args"); + + let contract_handle = ContractBuilder::::new() + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::constructor_with_args("World".into())) + .expect("Should create"); + log!("success 2"); + log!("contract_address: {:?}", contract_handle.contract_address()); + + // Calling constructor twice should fail + let error = match contract_handle + .try_call(|_| HarnessRef::constructor_with_args("World".into())) + { + Ok(_) => panic!("Constructor should fail to initialize already initialized contract"), + Err(error) => error, + }; + assert_eq!(error, CallError::CalleeTrapped); + + let result = contract_handle + .call(|harness| harness.get_greeting()) + .expect("Should call"); + assert_eq!(result, "Hello, World!".to_string(),); + } + + { + next_test(&mut counter, "Failing constructor"); + + let error = match ContractBuilder::::new() + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::failing_constructor("World".to_string())) + { + Ok(_) => panic!("Constructor that reverts should fail to create"), + Err(error) => error, + }; + assert_eq!(error, CallError::CalleeReverted); + + let error = match ContractBuilder::::new() + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::trapping_constructor()) + { + Ok(_) => panic!("Constructor that traps should fail to create"), + Err(error) => error, + }; + assert_eq!(error, CallError::CalleeTrapped); + } + + // + // Check payable entrypoints + // + + { + next_test(&mut counter, "Checking payable entrypoints"); + + let contract_handle = ContractBuilder::::new() + .with_transferred_value(1) + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::payable_constructor()) + .expect("Should create"); + + assert_eq!(contract_handle.balance(), 1); + + log!("success 2"); + log!("contract_address: {:?}", contract_handle.contract_address()); + + // Transferring 500 motes before payable entrypoint is executed + + let result_1 = contract_handle + .build_call() + .with_transferred_value(500) + .call(|harness| harness.payable_entrypoint()) + .expect("Should call"); + assert_eq!(result_1, Ok(())); + + // Transferring 499 motes before payable entrypoint is executed + + let result_2 = contract_handle + .build_call() + .with_transferred_value(499) + .call(|harness| harness.payable_entrypoint()) + .expect("Should call"); + assert_eq!(result_2, Ok(())); + + // Check balance after payable constructor and two successful calls + assert_eq!(contract_handle.balance(), 1 + 500 + 499); + + let result_3 = contract_handle + .build_call() + .with_transferred_value(123) + .call(|harness| harness.payable_failing_entrypoint()) + .expect("Should call"); + assert_eq!(result_3, Err(CustomError::Foo)); + // Check balance after failed call, should be the same as before + assert_eq!(contract_handle.balance(), 1 + 500 + 499); + } + + // Deposit and withdraw + // 1. wasm (caller = A, callee = B) + // 2. create (caller = B, callee = C) + // 3. call (caller = B, callee = C) + // 4. create (caller = C, callee = D) + // 5. call (caller = C, callee = D) + + { + let current_test = next_test(&mut counter, "Deposit and withdraw"); + + let contract_handle = ContractBuilder::::new() + .with_transferred_value(0) + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::payable_constructor()) + .expect("Should create"); + + let caller = casper::get_caller(); + + { + next_test( + &mut counter, + &format!("{current_test} Depositing as an account"), + ); + let account_balance_1 = casper::get_balance_of(&caller); + contract_handle + .build_call() + .with_transferred_value(100) + .call(|harness| harness.perform_token_deposit(account_balance_1)) + .expect("Should call") + .expect("Should succeed"); + let account_balance_2 = casper::get_balance_of(&caller); + assert_eq!(account_balance_2, account_balance_1 - 100); + + contract_handle + .build_call() + .with_transferred_value(25) + .call(|harness| harness.perform_token_deposit(account_balance_2)) + .expect("Should call") + .expect("Should succeed"); + + let account_balance_after = casper::get_balance_of(&caller); + assert_eq!(account_balance_after, account_balance_1 - 125); + } + + let current_contract_balance = contract_handle + .build_call() + .call(|harness| harness.balance()) + .expect("Should call"); + assert_eq!(current_contract_balance, 100 + 25); + + { + next_test( + &mut counter, + &format!("{current_test} Withdrawing as an account"), + ); + let account_balance_before = casper::get_balance_of(&caller); + contract_handle + .build_call() + .call(|harness| harness.withdraw(account_balance_before, 50)) + .expect("Should call") + .expect("Should succeed"); + let account_balance_after = casper::get_balance_of(&caller); + assert_ne!(account_balance_after, account_balance_before); + assert_eq!(account_balance_after, account_balance_before + 50); + + let current_deposit_balance = contract_handle + .build_call() + .call(|harness| harness.balance()) + .expect("Should call"); + assert_eq!(current_deposit_balance, 100 + 25 - 50); + + assert_eq!(contract_handle.balance(), 100 + 25 - 50); + } + } + + // + // Perform tests with a contract acting as an owner of funds deposited into other contract + // + + { + next_test( + &mut counter, + "Contract acts as owner of funds deposited into other contract", + ); + + let caller = casper::get_caller(); + + let harness = ContractBuilder::::new() + .with_transferred_value(0) + .with_seed(&seed.next_seed()) + .create(|| HarnessRef::constructor_with_args("Contract".into())) + .expect("Should create"); + + let initial_balance = 1000; + + let token_owner = ContractBuilder::::new() + .with_transferred_value(initial_balance) + .with_seed(&seed.next_seed()) + .create(|| TokenOwnerContractRef::token_owner_initialize()) + .expect("Should create"); + assert_eq!(token_owner.balance(), initial_balance); + + // token owner contract performs a deposit into a harness contract through `deposit` payable + // entrypoint caller: no change + // token owner: -50 + // harness: +50 + { + next_test(&mut counter, "Subtest 1"); + let caller_balance_before = casper::get_balance_of(&caller); + let token_owner_balance_before = token_owner.balance(); + let harness_balance_before = harness.balance(); + + let initial_deposit = 500; + + token_owner + .call(|contract| { + contract.do_deposit( + token_owner.contract_address(), + harness.contract_address(), + initial_deposit, + ) + }) + .expect("Should call") + .expect("Should succeed"); + + assert_eq!( + casper::get_balance_of(&caller), + caller_balance_before, + "Caller funds should not change" + ); + assert_eq!( + token_owner.balance(), + token_owner_balance_before - initial_deposit, + "Token owner balance should decrease" + ); + assert_eq!(harness.balance(), harness_balance_before + initial_deposit); + } + + // token owner contract performs a withdrawal from a harness contract through `withdraw` + // entrypoint caller: no change + // token owner: +50 + // harness: -50 + { + next_test(&mut counter, "Subtest 2"); + let caller_balance_before = casper::get_balance_of(&caller); + let token_owner_balance_before = token_owner.balance(); + let harness_balance_before = harness.balance(); + + token_owner + .call(|contract| { + contract.do_withdraw( + token_owner.contract_address(), + harness.contract_address(), + 50, + ) + }) + .expect("Should call") + .expect("Should succeed"); + + assert_eq!( + casper::get_balance_of(&caller), + caller_balance_before, + "Caller funds should not change" + ); + assert_eq!( + token_owner.balance(), + token_owner_balance_before + 50, + "Token owner balance should increase" + ); + assert_eq!(harness.balance(), harness_balance_before - 50); + let total_received_tokens = token_owner + .call(|contract| contract.total_received_tokens()) + .expect("Should call"); + assert_eq!(total_received_tokens, 50); + } + + { + next_test( + &mut counter, + "Token owner will revert inside fallback while plain transfer", + ); + { + let harness_balance_before = harness.balance(); + token_owner + .call(|contract| { + contract.set_fallback_handler(FallbackHandler::RejectWithRevert) + }) + .expect("Should call"); + let harness_balance_after = harness.balance(); + assert_eq!(harness_balance_before, harness_balance_after); + } + + { + let harness_balance_before = harness.balance(); + let withdraw_result = token_owner + .call(|contract| { + contract.do_withdraw( + token_owner.contract_address(), + harness.contract_address(), + 50, + ) + }) + .expect("Should call"); + let harness_balance_after = harness.balance(); + assert_eq!(harness_balance_before, harness_balance_after); + assert_eq!( + withdraw_result, + Err( + crate::contracts::token_owner::TokenOwnerError::WithdrawError( + "deposit error callee reverted".to_string() + ) + ) + ); + } + } + + { + next_test( + &mut counter, + "Token owner will trap inside fallback while plain transfer", + ); + { + let harness_balance_before = harness.balance(); + token_owner + .call(|contract| contract.set_fallback_handler(FallbackHandler::RejectWithTrap)) + .expect("Should call"); + let harness_balance_after = harness.balance(); + assert_eq!(harness_balance_before, harness_balance_after); + } + + { + let harness_balance_before = harness.balance(); + let withdraw_result = token_owner + .call(|contract| { + contract.do_withdraw( + token_owner.contract_address(), + harness.contract_address(), + 50, + ) + }) + .expect("Should call"); + let harness_balance_after = harness.balance(); + assert_eq!(harness_balance_before, harness_balance_after); + assert_eq!( + withdraw_result, + Err( + crate::contracts::token_owner::TokenOwnerError::WithdrawError( + "deposit error callee trapped".to_string() + ) + ) + ); + } + } + + { + next_test( + &mut counter, + "Token owner will revert with data inside fallback while plain transfer", + ); + { + let harness_balance_before = harness.balance(); + token_owner + .call(|contract| { + contract.set_fallback_handler(FallbackHandler::RejectWithData(vec![ + 1, 2, 3, 4, 5, + ])) + }) + .expect("Should call"); + let harness_balance_after = harness.balance(); + assert_eq!(harness_balance_before, harness_balance_after); + } + + { + let harness_balance_before = harness.balance(); + let withdraw_result = token_owner + .call(|contract| { + contract.do_withdraw( + token_owner.contract_address(), + harness.contract_address(), + 50, + ) + }) + .expect("Should call"); + let harness_balance_after = harness.balance(); + assert_eq!(harness_balance_before, harness_balance_after); + assert_eq!( + withdraw_result, + Err( + crate::contracts::token_owner::TokenOwnerError::WithdrawError( + "deposit error callee reverted".to_string() + ) + ) + ); + } + } + } + + // { + // let _current_test = next_test( + // &mut counter, + // "Plain transfer to a contract does not work without fallback", + // ); + // let flipper_address = Entity::Contract(flipper_address); + + // // assert_eq!( + // // host::casper_transfer(&flipper_address, 123), + // // Err(CallError::NotCallable) + // // ); + // } + + { + let _current_test = next_test( + &mut counter, + "Calling non-existing entrypoint does not crash", + ); + let (output, result) = + casper::casper_call(&flipper_address, 0, "non_existing_entrypoint", &[]); + assert_eq!(result, Err(CallError::NotCallable)); + assert_eq!(output, None); + } + + { + let _current_test = next_test(&mut counter, "Message passing"); + + for i in 0..10 { + assert_eq!( + emit(TestMessage { + message: format!("Hello, world: {i}!"), + }), + Ok(()) + ); + } + + let small_topic_name = "a".repeat(32); + let large_topic_name = "a".repeat(257); + let large_payload_data = vec![0; 16384]; + + assert_eq!( + emit_raw(&large_topic_name, &[]), + Err(CommonResult::TopicTooLong) + ); + assert_eq!( + emit_raw(&small_topic_name, &large_payload_data), + Err(CommonResult::PayloadTooLong) + ); + + for i in 0..127u64 { + assert_eq!( + emit_raw(&format!("Topic{i}"), &i.to_be_bytes()), + Ok(()), + "Emitting message with small payload failed" + ); + } + + assert_eq!( + emit_raw(&format!("Topic128"), &[128]), + Err(CommonResult::TooManyTopics), + "Emitting message with small payload failed" + ); + } + + { + next_test(&mut counter, "Removing from global state"); + let key = [0, 1, 2, 3]; + let value_1 = [4, 5, 6, 7]; + let value_2 = [8, 9, 10, 11, 12, 13, 14, 15]; + let keyspace = Keyspace::Context(&key); + // No value exists + assert_eq!(casper::read(keyspace, |_size| None), Ok(None)); + + // Write a value + casper::write(keyspace, &value_1).unwrap(); + // Value exists + assert_eq!(casper::read_into_vec(keyspace), Ok(Some(value_1.to_vec()))); + // Remove the value + casper::remove(keyspace).unwrap(); + // No value exists + assert_eq!(casper::read_into_vec(keyspace), Ok(None)); + // Removing again (aka removing non-existent key) should raise an error + assert_eq!(casper::remove(keyspace), Err(CommonResult::NotFound)); + // Re-reading already purged value wouldn't be an issue + assert_eq!(casper::read_into_vec(keyspace), Ok(None)); + // Write a new value under same key + casper::write(keyspace, &value_2).unwrap(); + // New value exists + assert_eq!(casper::read_into_vec(keyspace), Ok(Some(value_2.to_vec()))); + + // Attempting to remove a definetely non-existent key should be an error + let keyspace = Keyspace::Context(b"this key definetely does not exists"); + let result = casper::remove(keyspace); + assert_eq!(result, Err(CommonResult::NotFound)); + } + + log!("👋 Goodbye"); +} + +#[casper(export)] +pub fn call(flipper_address: Address) { + let mut seed = Seed::default(); + perform_test(&mut seed, flipper_address); +} + +#[casper(export)] +pub fn yet_another_exported_function(arg1: u64, arg2: String) { + log!("Yet another exported function with args arg1={arg1} arg2={arg2}"); +} + +#[cfg(test)] +mod tests { + use casper::native::{dispatch_with, EntryPointKind, Environment, ENTRY_POINTS}; + use casper_contract_sdk::casper::native::{self, dispatch}; + use contracts::harness::{Harness, INITIAL_GREETING}; + + use super::*; + #[test] + fn can_call_exported_function() { + super::yet_another_exported_function(1234u64, "Hello, world!".to_string()); + + let input_data = casper_contract_sdk::serializers::borsh::to_vec(&( + 4321u64, + "!world, Hello".to_string(), + )) + .unwrap(); + + dispatch_with(Environment::default().with_input_data(input_data), || { + native::invoke_export_by_name("yet_another_exported_function"); + }) + .unwrap(); + } + + #[test] + fn exports() { + assert!(ENTRY_POINTS + .iter() + .any(|export| export.kind == EntryPointKind::Function { name: "call" })); + } + + #[test] + fn should_greet() { + let mut flipper = Harness::constructor_with_args("Hello".into()); + assert_eq!(flipper.get_greeting(), "Hello"); // TODO: Initializer + flipper.set_greeting("Hi".into()); + assert_eq!(flipper.get_greeting(), "Hi"); + } + + #[test] + fn unittest() { + dispatch(|| { + let mut foo = Harness::initialize(); + assert_eq!(foo.get_greeting(), INITIAL_GREETING); + foo.set_greeting("New greeting".to_string()); + assert_eq!(foo.get_greeting(), "New greeting"); + }) + .unwrap(); + } + + #[test] + fn foo() { + assert_eq!(Harness::default().into_greeting(), "Default value"); + } +} + +#[cfg(not(target_arch = "wasm32"))] +fn main() { + panic!("Execute \"cargo test\" to test the contract, \"cargo build\" to build it"); +} diff --git a/smart_contracts/contracts/vm2/vm2-harness/src/traits.rs b/smart_contracts/contracts/vm2/vm2-harness/src/traits.rs new file mode 100644 index 0000000000..425a8d69a8 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-harness/src/traits.rs @@ -0,0 +1,42 @@ +use casper_contract_macros::casper; + +/// Deposit interface for contracts to receive tokens. +/// +/// Useful for contracts that need to receive tokens. +#[casper] +pub trait Deposit { + /// Deposit tokens into the contract. + #[casper(payable)] + fn deposit(&mut self); +} + +#[casper] +pub trait SupportsALotOfArguments { + fn very_long_list_of_arguments( + &mut self, + a0: u64, + a1: u64, + a2: u64, + a3: u64, + a4: String, + a5: String, + a6: u64, + a7: u64, + a8: u64, + a9: u64, + a10: u32, + a11: u16, + a12: String, + a13: bool, + a14: u32, + a15: Vec, + a16: Vec, + a17: String, + a18: String, + a19: Option, + a20: u64, + a21: u32, + a22: (u64, u32, u16, u8), + a23: (String, String, String, String, u64), + ); +} diff --git a/smart_contracts/contracts/vm2/vm2-host/Cargo.toml b/smart_contracts/contracts/vm2/vm2-host/Cargo.toml new file mode 100644 index 0000000000..c5875cc210 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-host/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "vm2-host" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-macros = { path = "../../../macros" } +casper-contract-sdk = { path = "../../../sdk" } diff --git a/smart_contracts/contracts/vm2/vm2-host/build.rs b/smart_contracts/contracts/vm2/vm2-host/build.rs new file mode 100644 index 0000000000..12d5fa6ba6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-host/build.rs @@ -0,0 +1,7 @@ +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-host/src/lib.rs b/smart_contracts/contracts/vm2/vm2-host/src/lib.rs new file mode 100644 index 0000000000..34b7a955e2 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-host/src/lib.rs @@ -0,0 +1,150 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] + +use casper_contract_sdk::{ + casper_executor_wasm_common::{flags::ReturnFlags, keyspace::Keyspace}, + prelude::*, +}; + +const CURRENT_VERSION: &str = "v1"; + +// This contract is used to assert that calling host functions consumes gas. +// It is by design that it does nothing other than calling appropriate host functions. + +// There is no need for these functions to actually do anything meaningful, and it's alright +// if they short-circuit. + +#[casper(contract_state)] +pub struct MinHostWrapper; + +impl Default for MinHostWrapper { + fn default() -> Self { + panic!("Unable to instantiate contract without a constructor"); + } +} + +#[casper] +impl MinHostWrapper { + #[casper(constructor)] + pub fn new(with_host_fn_call: String) -> Self { + let ret = Self; + match with_host_fn_call.as_str() { + "get_caller" => { + ret.get_caller(); + } + "get_block_time" => { + ret.get_block_time(); + } + "get_value" => { + ret.get_transferred_value(); + } + "get_balance_of" => { + ret.get_balance_of(); + } + "call" => { + ret.call(); + } + "input" => { + ret.input(); + } + "create" => { + ret.create(); + } + "print" => { + ret.print(); + } + "read" => { + ret.read(); + } + "ret" => { + ret.ret(); + } + "transfer" => { + ret.transfer(); + } + "upgrade" => { + ret.upgrade(); + } + "write" => { + ret.write(); + } + "write_n_bytes" => { + ret.write(); + } + _ => panic!("Unknown host function"), + } + ret + } + + #[casper(constructor)] + pub fn new_with_write(byte_count: u64) -> Self { + let ret = Self; + ret.write_n_bytes(byte_count); + ret + } + + #[casper(constructor)] + pub fn default() -> Self { + Self + } + + pub fn version(&self) -> &str { + CURRENT_VERSION + } + + pub fn get_caller(&self) -> Entity { + casper::get_caller() + } + + pub fn get_block_time(&self) -> u64 { + casper::get_block_time() + } + + pub fn get_transferred_value(&self) -> u64 { + casper::transferred_value() + } + + pub fn get_balance_of(&self) -> u64 { + casper::get_balance_of(&Entity::Account([0u8; 32])) + } + + pub fn call(&self) { + casper::casper_call(&[0u8; 32], 0, "", &[]).1.ok(); + } + + pub fn input(&self) { + casper::copy_input(); + } + + pub fn create(&self) { + casper::create(None, 0, None, None, None).ok(); + } + + pub fn print(&self) { + casper::print(""); + } + + pub fn read(&self) { + casper::read(Keyspace::Context(&[]), |_| None).ok(); + } + + pub fn ret(&self) { + casper::ret(ReturnFlags::empty(), None); + } + + pub fn transfer(&self) { + casper::transfer(&[0; 32], 0).ok(); + } + + pub fn upgrade(&self) { + casper::upgrade(&[], None, None).ok(); + } + + pub fn write(&self) { + casper::write(Keyspace::Context(&[]), &[]).ok(); + } + + pub fn write_n_bytes(&self, n: u64) { + let buffer = vec![0; n as usize]; + casper::write(Keyspace::Context(&[0]), &buffer).ok(); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/Cargo.toml b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/Cargo.toml new file mode 100644 index 0000000000..9d31895430 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "vm2-legacy-counter-proxy" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + + +[dependencies] +casper-contract-macros = { path = "../../../macros" } +casper-contract-sdk = { path = "../../../sdk" } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +serde_json = "1" +casper-contract-sdk = { path = "../../../sdk", features = ["cli"] } + +[dev-dependencies] +casper-contract-sdk-codegen = { path = "../../../sdk_codegen" } + +[build-dependencies] +casper-contract-sdk-codegen = { path = "../../../sdk_codegen" } diff --git a/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/build.rs b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/build.rs new file mode 100644 index 0000000000..d594b41916 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/build.rs @@ -0,0 +1,22 @@ +// use std::{env, fs, path::Path}; + +// use casper_contract_sdk_codegen::Codegen; + +// const SCHEMA: &str = include_str!("cep18_schema.json"); + +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } + + // casper_contract_sdk::build_flags(); + + // let mut codegen = Codegen::from_str(SCHEMA).unwrap(); + // let source = codegen.gen(); + + // let target_dir = env::var_os("OUT_DIR").unwrap(); + // let target_path = Path::new(&target_dir).join("cep18_schema.rs"); + // fs::write(&target_path, source).unwrap(); +} diff --git a/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/cep18_schema.json b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/cep18_schema.json new file mode 120000 index 0000000000..a65b742cc6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/cep18_schema.json @@ -0,0 +1 @@ +/Users/michal/Dev/casperlabs-node/smart_contracts/sdk-codegen/tests/fixtures/cep18_schema.json \ No newline at end of file diff --git a/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/src/lib.rs b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/src/lib.rs new file mode 100644 index 0000000000..f5f9b7e55f --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-legacy-counter-proxy/src/lib.rs @@ -0,0 +1,55 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] +#![cfg_attr(target_arch = "wasm32", no_std)] + +use casper_contract_macros::{casper, PanicOnDefault}; +use casper_contract_sdk::{casper, log, types::Address}; + +/// This contract implements a simple LegacyCounterProxy. +#[derive(PanicOnDefault)] +#[casper(contract_state)] +pub struct LegacyCounterProxy { + /// Legacy address of the counter contract. + legacy_address: Address, +} + +const EMPTY_RUNTIME_ARGS: [u8; 4] = 0u32.to_le_bytes(); +const CL_VALUE_UNIT_BYTES: [u8; 5] = [0, 0, 0, 0, 9]; + +#[casper] +impl LegacyCounterProxy { + #[casper(constructor)] + pub fn new(legacy_address: Address) -> Self { + Self { legacy_address } + } + + pub fn perform_test(&self) { + let (counter_get_result_1, host_error) = + casper::casper_call(&self.legacy_address, 0, "counter_get", &EMPTY_RUNTIME_ARGS); + log!("counter_get_result_before: {:?}", counter_get_result_1); + let _ = host_error.expect("No error 1"); + + let (inc_result_1, host_error) = + casper::casper_call(&self.legacy_address, 0, "counter_inc", &EMPTY_RUNTIME_ARGS); + log!("inc_result {:?}", inc_result_1); + assert_eq!(inc_result_1, Some(CL_VALUE_UNIT_BYTES.to_vec())); + let _ = host_error.expect("No error 2"); + + let (counter_get_result_2, host_error) = + casper::casper_call(&self.legacy_address, 0, "counter_get", &EMPTY_RUNTIME_ARGS); + let _ = host_error.expect("No error 3"); + log!("counter_get_result_after: {:?}", counter_get_result_2); + assert_ne!(counter_get_result_1, counter_get_result_2); + + let (inc_result_2, host_error) = + casper::casper_call(&self.legacy_address, 0, "counter_inc", &EMPTY_RUNTIME_ARGS); + log!("inc_result {:?}", inc_result_2); + assert_eq!(inc_result_2, Some(CL_VALUE_UNIT_BYTES.to_vec())); + let _ = host_error.expect("No error 4"); + + let (counter_get_result_3, host_error) = + casper::casper_call(&self.legacy_address, 0, "counter_get", &EMPTY_RUNTIME_ARGS); + let _ = host_error.expect("No error 3"); + log!("counter_get_result_after: {:?}", counter_get_result_3); + assert_ne!(counter_get_result_2, counter_get_result_3); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-trait/Cargo.toml b/smart_contracts/contracts/vm2/vm2-trait/Cargo.toml new file mode 100644 index 0000000000..59545b0fb0 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-trait/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "vm2-trait" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-macros = { path = "../../../macros" } +casper-contract-sdk = { path = "../../../sdk" } +base16 = "0.2.1" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +serde_json = "1" diff --git a/smart_contracts/contracts/vm2/vm2-trait/build.rs b/smart_contracts/contracts/vm2/vm2-trait/build.rs new file mode 100644 index 0000000000..12d5fa6ba6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-trait/build.rs @@ -0,0 +1,7 @@ +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-trait/src/lib.rs b/smart_contracts/contracts/vm2/vm2-trait/src/lib.rs new file mode 100644 index 0000000000..8d0fd506d4 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-trait/src/lib.rs @@ -0,0 +1,399 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] +#![cfg_attr(target_arch = "wasm32", no_std)] + +use casper_contract_macros::{blake2b256, casper}; +use casper_contract_sdk::{ + casper, + contrib::{ + access_control::{AccessControl, AccessControlExt, AccessControlState, Role}, + ownable::{Ownable, OwnableError, OwnableExt, OwnableState}, + }, + log, + prelude::*, + ContractBuilder, ContractHandle, +}; + +pub const GREET_RETURN_VALUE: u64 = 123456789; + +#[casper] +pub trait HasFallback { + #[casper(fallback)] + fn this_is_fallback_method(&self) { + log!("Fallback called with value={}", casper::transferred_value()); + } +} + +#[casper] +pub trait Trait1 { + fn abstract_greet(&self); + + fn greet(&self, who: String) -> u64 { + log!("Hello from greet, {who}!"); + GREET_RETURN_VALUE + } + + fn adder(&self, lhs: u64, rhs: u64) -> u64; +} + +#[casper] +#[derive(Copy, Clone, Default)] +pub struct CounterState { + value: u64, +} + +#[casper] +pub trait Counter { + fn increment(&mut self) { + log!("Incrementing!"); + self.counter_state_mut().value += 1; + } + + fn decrement(&mut self) { + log!("Decrementing!"); + self.counter_state_mut().value -= 1; + } + + fn get_counter_value(&self) -> u64 { + self.counter_state().value + } + + fn get_counter_state(&self) -> CounterState { + self.counter_state().clone() + } + + #[casper(private)] + fn counter_state(&self) -> &CounterState; + + #[casper(private)] + fn counter_state_mut(&mut self) -> &mut CounterState; +} + +#[casper(contract_state)] +#[derive(Default)] +pub struct HasTraits { + counter_state: CounterState, + ownable_state: OwnableState, + access_control_state: AccessControlState, +} + +#[casper] +impl Trait1 for HasTraits { + fn abstract_greet(&self) { + log!("Hello from abstract greet impl!"); + } + + fn adder(&self, lhs: u64, rhs: u64) -> u64 { + lhs + rhs + } +} + +#[casper] +impl HasFallback for HasTraits {} + +// Implementing traits does not require extra annotation as the trait dispatcher is generated at the +// trait level. +#[casper] +impl Counter for HasTraits { + fn counter_state_mut(&mut self) -> &mut CounterState { + &mut self.counter_state + } + fn counter_state(&self) -> &CounterState { + &self.counter_state + } +} + +#[casper(path = casper_contract_sdk::contrib::ownable)] +impl Ownable for HasTraits { + fn state(&self) -> &OwnableState { + &self.ownable_state + } + fn state_mut(&mut self) -> &mut OwnableState { + &mut self.ownable_state + } +} + +#[casper] +pub enum UserRole { + Admin, + User, +} + +impl Into for UserRole { + fn into(self) -> Role { + match self { + UserRole::Admin => blake2b256!("admin"), + UserRole::User => blake2b256!("user"), + } + } +} + +#[casper(path = casper_contract_sdk::contrib::access_control)] +impl AccessControl for HasTraits { + fn state(&self) -> &AccessControlState { + &self.access_control_state + } + fn state_mut(&mut self) -> &mut AccessControlState { + &mut self.access_control_state + } +} + +#[casper] +impl HasTraits { + #[casper(constructor)] + pub fn new(counter_value: u64) -> Self { + log!("Calling new constructor with value={counter_value}"); + Self { + counter_state: CounterState { + value: counter_value, + }, + ownable_state: OwnableState::default(), + access_control_state: AccessControlState::default(), + } + } + pub fn foobar(&self) { + // Can extend contract that implements a trait to also call methods provided by a trait. + let counter_state = self.counter_state(); + log!("Foobar! Counter value: {}", counter_state.value); + } + + pub fn only_for_owner(&mut self) -> Result<(), OwnableError> { + self.only_owner()?; + log!("Only for owner!"); + Ok(()) + } +} + +#[casper] +impl HasTraits { + pub fn multiple_impl_blocks_should_work() { + log!("Multiple impl blocks work!"); + } +} + +fn perform_test() { + let contract_handle = ContractBuilder::::new() + .default_create() + .expect("should create contract"); + + let trait1_handle = + ContractHandle::::from_address(contract_handle.contract_address()); + let counter_handle = + ContractHandle::::from_address(contract_handle.contract_address()); + + { + let greet_result: u64 = contract_handle + .build_call() + .call(|has_traits| has_traits.greet("World".into())) + .expect("Call as Trait1Ref"); + assert_eq!(greet_result, GREET_RETURN_VALUE); + } + + { + let () = trait1_handle + .call(|trait1ref| trait1ref.abstract_greet()) + .expect("Call as Trait1Ref"); + } + + { + let result: u64 = contract_handle + .build_call() + .call(|trait1ref| trait1ref.adder(1111, 2222)) + .expect("Call as Trait1Ref"); + assert_eq!(result, 1111 + 2222); + } + + // + // Counter trait + // + + { + let counter_value = counter_handle + .call(|counter| counter.get_counter_value()) + .expect("Call"); + assert_eq!(counter_value, 0); + + // call increase + let () = counter_handle + .call(|counter| counter.increment()) + .expect("Call"); + + // get value + let counter_value = counter_handle + .call(|counter| counter.get_counter_value()) + .expect("Call"); + + // check that the value increased + assert_eq!(counter_value, 1); + + // call decrease + let () = counter_handle + .call(|counter| counter.decrement()) + .expect("Call"); + + // get value and compare the difference + let counter_value = counter_handle + .call(|counter| counter.get_counter_value()) + .expect("Call"); + assert_eq!(counter_value, 0); + } +} + +#[casper(export)] +pub fn call() { + log!("Hello"); + perform_test(); + log!("🎉 Success"); +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use crate::{Counter, CounterExt, HasTraits, HasTraitsRef}; + + use casper_sdk::{ + abi::{CasperABI, StructField}, + abi_generator, + casper::native::{dispatch, dispatch_with, Environment}, + casper_executor_wasm_common::flags::EntryPointFlags, + log, + schema::{SchemaEntryPoint, SchemaType}, + ContractRef, + }; + + #[test] + fn unit_test() { + dispatch(|| { + let mut has_traits = HasTraits::default(); + has_traits.increment(); + }) + .unwrap(); + } + + #[test] + fn trait_has_schema() { + // We can't attach methods to trait itself, but we can generate an "${TRAIT}Ext" struct and + // attach extra information to it. let schema = Trait1::schema(); + let counter_schema = abi_generator::casper_collect_schema(); + + assert_eq!( + counter_schema.type_, + SchemaType::Contract { + state: "vm2_trait::CounterState".to_string(), + } + ); + + // Order of entry point definitions is not guaranteed. + assert_eq!( + BTreeSet::from_iter(counter_schema.entry_points.clone()), + BTreeSet::from_iter([ + SchemaEntryPoint { + name: "get_counter_value".to_string(), + arguments: vec![], + result: "U64".to_string(), + flags: EntryPointFlags::empty() + }, + SchemaEntryPoint { + name: "get_counter_state".to_string(), + arguments: vec![], + result: "vm2_trait::CounterState".to_string(), + flags: EntryPointFlags::empty() + }, + SchemaEntryPoint { + name: "decrement".to_string(), + arguments: vec![], + result: "()".to_string(), + flags: EntryPointFlags::empty() + }, + SchemaEntryPoint { + name: "increment".to_string(), + arguments: vec![], + result: "()".to_string(), + flags: EntryPointFlags::empty() + }, + ]) + ); + } + + #[test] + fn schema_has_traits() { + let schema = abi_generator::casper_collect_schema(); + + assert_eq!( + schema.type_, + SchemaType::Contract { + state: "vm2_trait::HasTraits".to_string() + } + ); + + assert!( + schema.entry_points.iter().any(|e| e.name == "foobar"), + "Method inside impl block" + ); + + assert!( + schema.entry_points.iter().any(|e| e.name == "increment"), + "Method inside Counter trait" + ); + + let get_counter_state = schema + .entry_points + .iter() + .find(|e| e.name == "get_counter_state") + .unwrap(); + let counter_state_def = schema + .definitions + .get(&get_counter_state.result) + .expect("Has counter state definition"); + + let expected_definition = vec![StructField { + name: "value".to_string(), + decl: ::declaration(), + }]; + assert_eq!( + counter_state_def + .as_struct() + .expect("Counter State is struct"), + expected_definition.as_slice() + ); + + assert!( + !schema + .entry_points + .iter() + .any(|e| e.name == "counter_state"), + "Trait method marked as private" + ); + assert!( + !schema + .entry_points + .iter() + .any(|e| e.name == "counter_state_mut"), + "Trait method marked as private" + ); + + let fallback = schema + .entry_points + .iter() + .filter_map(|e| if e.name == "fallback" { Some(e) } else { None }) + .next() + .expect("Fallback method present in schema"); + + assert_eq!(fallback.flags, EntryPointFlags::FALLBACK); + } + + #[test] + fn foo() { + let _ = dispatch_with(Environment::default(), || { + super::perform_test(); + }); + + log!("Success"); + } + + #[test] + fn bar() { + let inst = ::new(); + let _call_data = inst.get_counter_value(); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-upgradable-v2/Cargo.toml b/smart_contracts/contracts/vm2/vm2-upgradable-v2/Cargo.toml new file mode 100644 index 0000000000..cfca0ee4fa --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-upgradable-v2/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "vm2-upgradable-v2" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-macros = { path = "../../../macros" } +casper-contract-sdk = { path = "../../../sdk" } diff --git a/smart_contracts/contracts/vm2/vm2-upgradable-v2/build.rs b/smart_contracts/contracts/vm2/vm2-upgradable-v2/build.rs new file mode 100644 index 0000000000..12d5fa6ba6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-upgradable-v2/build.rs @@ -0,0 +1,7 @@ +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-upgradable-v2/src/lib.rs b/smart_contracts/contracts/vm2/vm2-upgradable-v2/src/lib.rs new file mode 100644 index 0000000000..df1829618d --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-upgradable-v2/src/lib.rs @@ -0,0 +1,110 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] +#![cfg_attr(target_arch = "wasm32", no_std)] + +use casper_contract_macros::casper; +use casper_contract_sdk::{ + casper::{self, Entity}, + log, + serializers::borsh::BorshDeserialize, +}; + +const CURRENT_VERSION: &str = "v2"; + +#[derive(BorshDeserialize, Debug)] +#[borsh(crate = "casper_contract_sdk::serializers::borsh")] +pub struct UpgradableContractV1 { + /// The current state of the flipper. + value: u8, + /// The owner of the contract. + owner: Entity, +} + +impl Default for UpgradableContractV1 { + fn default() -> Self { + panic!("Unable to instantiate contract without a constructor"); + } +} + +/// This contract implements a simple flipper. +#[derive(Debug)] +#[casper(contract_state)] +pub struct UpgradableContractV2 { + /// The current state of the flipper. + value: u64, + /// The owner of the contract. + owner: Entity, +} + +impl From for UpgradableContractV2 { + fn from(old: UpgradableContractV1) -> Self { + Self { + value: old.value as u64, + owner: old.owner, + } + } +} + +impl Default for UpgradableContractV2 { + fn default() -> Self { + panic!("Unable to instantiate contract without a constructor"); + } +} + +#[casper] +impl UpgradableContractV2 { + #[casper(constructor)] + pub fn new(initial_value: u64) -> Self { + let caller = casper::get_caller(); + Self { + value: initial_value, + owner: caller, + } + } + + #[casper(constructor)] + pub fn default() -> Self { + Self::new(Default::default()) + } + + pub fn increment(&mut self) { + self.increment_by(1); + } + + pub fn increment_by(&mut self, value: u64) { + let old_value = self.value; + self.value = value.wrapping_add(value); + log!( + "Incrementing value by {value} from {} to {}", + old_value, + self.value + ); + } + + pub fn get(&self) -> u64 { + self.value + } + + pub fn version(&self) -> &str { + CURRENT_VERSION + } + + #[casper(ignore_state)] + pub fn migrate() { + log!("Reading old state..."); + let old_state: UpgradableContractV1 = casper::read_state().unwrap(); + log!("Old state {old_state:?}"); + let new_state = UpgradableContractV2::from(old_state); + log!("Success! New state: {new_state:?}"); + casper::write_state(&new_state).unwrap(); + } + + #[casper(ignore_state)] + pub fn perform_upgrade() { + let new_code = casper::copy_input(); + log!("V2: New code length: {}", new_code.len()); + log!("V2: New code first 10 bytes: {:?}", &new_code[..10]); + + let upgrade_result = casper::upgrade(&new_code, Some("migrate"), None); + log!("{:?}", upgrade_result); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-upgradable/Cargo.toml b/smart_contracts/contracts/vm2/vm2-upgradable/Cargo.toml new file mode 100644 index 0000000000..af73f837b6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-upgradable/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "vm2-upgradable" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-macros = { path = "../../../macros" } +casper-contract-sdk = { path = "../../../sdk" } diff --git a/smart_contracts/contracts/vm2/vm2-upgradable/build.rs b/smart_contracts/contracts/vm2/vm2-upgradable/build.rs new file mode 100644 index 0000000000..12d5fa6ba6 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-upgradable/build.rs @@ -0,0 +1,7 @@ +fn main() { + // Check if target arch is wasm32 and set link flags accordingly + if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" { + println!("cargo:rustc-link-arg=--import-memory"); + println!("cargo:rustc-link-arg=--export-table"); + } +} diff --git a/smart_contracts/contracts/vm2/vm2-upgradable/src/lib.rs b/smart_contracts/contracts/vm2/vm2-upgradable/src/lib.rs new file mode 100644 index 0000000000..a536754fa1 --- /dev/null +++ b/smart_contracts/contracts/vm2/vm2-upgradable/src/lib.rs @@ -0,0 +1,78 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] + +use casper_contract_macros::casper; +use casper_contract_sdk::{casper, casper::Entity, log, prelude::*}; + +const CURRENT_VERSION: &str = "v1"; + +/// This contract implements a simple flipper. +#[casper(contract_state)] +pub struct UpgradableContract { + /// The current state of the flipper. + value: u8, + /// The owner of the contract. + owner: Entity, +} + +impl Default for UpgradableContract { + fn default() -> Self { + panic!("Unable to instantiate contract without a constructor"); + } +} + +// trait ContractPackage { +// fn versions: BTreeMap<>, + +// } + +#[casper] +impl UpgradableContract { + #[casper(constructor)] + pub fn new(initial_value: u8) -> Self { + let caller = casper::get_caller(); + Self { + value: initial_value, + owner: caller, + } + } + + #[casper(constructor)] + pub fn default() -> Self { + Self::new(Default::default()) + } + + pub fn increment(&mut self) { + self.value += 1; + } + + pub fn get(&self) -> u8 { + self.value + } + + pub fn version(&self) -> &str { + CURRENT_VERSION + } + + // pub fn is_disabled(&self) { + // self.disabled + // } + + // pub fn do_something(&self) { + // if self.disabled { + // panic!("nope") + + // } + // } + + #[skip_arg_parsing] + pub fn perform_upgrade(&self, new_code: Vec) { + if casper::get_caller() != self.owner { + panic!("Only the owner can perform upgrades"); + } + log!("V1: starting upgrade process current value={}", self.value); + log!("New code length: {}", new_code.len()); + log!("New code first 10 bytes: {:?}", &new_code[..10]); + // TODO: Enforce valid wasm validation + casper::upgrade(&new_code, Some("migrate"), None).unwrap(); + } +} diff --git a/smart_contracts/contracts_as/.gitignore b/smart_contracts/contracts_as/.gitignore deleted file mode 100644 index 0a7ee2e21e..0000000000 --- a/smart_contracts/contracts_as/.gitignore +++ /dev/null @@ -1,99 +0,0 @@ -package-lock.json - -# Created by https://www.gitignore.io/api/node -# Edit at https://www.gitignore.io/?templates=node - -### Node ### -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env -.env.test - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# next.js build output -.next - -# nuxt.js build output -.nuxt - -# react / gatsby -public/ - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ - -# End of https://www.gitignore.io/api/node diff --git a/smart_contracts/contracts_as/client/add-bid/assembly/index.ts b/smart_contracts/contracts_as/client/add-bid/assembly/index.ts deleted file mode 100644 index 8d77764c29..0000000000 --- a/smart_contracts/contracts_as/client/add-bid/assembly/index.ts +++ /dev/null @@ -1,68 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {fromBytesU8} from "../../../../contract_as/assembly/bytesrepr"; -import {PublicKey} from "../../../../contract_as/assembly/public_key"; - -const ARG_PUBLIC_KEY = "public_key"; -const ARG_AMOUNT = "amount"; -const ARG_DELEGATION_RATE = "delegation_rate"; - -const METHOD_ADD_BID = "add_bid"; - -export function call(): void { - let auction = CL.getSystemContract(CL.SystemContract.Auction); - let mainPurse = getMainPurse(); - - let publicKeyBytes = CL.getNamedArg(ARG_PUBLIC_KEY); - if (publicKeyBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let publicKeyResult = PublicKey.fromBytes(publicKeyBytes); - if (publicKeyResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let publicKey = publicKeyResult.value; - - let amountBytes = CL.getNamedArg(ARG_AMOUNT); - if (amountBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - - let amount = amountResult.value; - - let delegationRateBytes = CL.getNamedArg(ARG_DELEGATION_RATE); - if (delegationRateBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let delegationRateResult = fromBytesU8(delegationRateBytes); - if (delegationRateResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - - let delegationRate = delegationRateResult.value; - - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(ARG_PUBLIC_KEY, CLValue.fromPublicKey(publicKey)), - new Pair(ARG_AMOUNT, CLValue.fromU512(amount)), - new Pair(ARG_DELEGATION_RATE, CLValue.fromU8(delegationRate)), - ]); - CL.callContract(auction, METHOD_ADD_BID, runtimeArgs); -} diff --git a/smart_contracts/contracts_as/client/add-bid/assembly/tsconfig.json b/smart_contracts/contracts_as/client/add-bid/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/client/add-bid/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/add-bid/index.js b/smart_contracts/contracts_as/client/add-bid/index.js deleted file mode 100644 index d93882dbe3..0000000000 --- a/smart_contracts/contracts_as/client/add-bid/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/bonding.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/add-bid/package.json b/smart_contracts/contracts_as/client/add-bid/package.json deleted file mode 100644 index 0f15e54d25..0000000000 --- a/smart_contracts/contracts_as/client/add-bid/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/add_bid.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/client/delegate/assembly/index.ts b/smart_contracts/contracts_as/client/delegate/assembly/index.ts deleted file mode 100644 index 81e1b86a29..0000000000 --- a/smart_contracts/contracts_as/client/delegate/assembly/index.ts +++ /dev/null @@ -1,62 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {PublicKey} from "../../../../contract_as/assembly/public_key"; - -const ARG_DELEGATOR = "delegator"; -const ARG_VALIDATOR = "validator"; -const ARG_AMOUNT = "amount"; -const METHOD_DELEGATE = "delegate"; - -export function call(): void { - let auction = CL.getSystemContract(CL.SystemContract.Auction); - - let delegatorBytes = CL.getNamedArg(ARG_DELEGATOR); - if (delegatorBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let delegatorResult = PublicKey.fromBytes(delegatorBytes); - if (delegatorResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let delegator = delegatorResult.value; - - let validatorBytes = CL.getNamedArg(ARG_VALIDATOR); - if (validatorBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let validatorResult = PublicKey.fromBytes(validatorBytes); - if (validatorResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let validator = validatorResult.value; - - let amountBytes = CL.getNamedArg(ARG_AMOUNT); - if (amountBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let amount = amountResult.value; - - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(ARG_DELEGATOR, CLValue.fromPublicKey(delegator)), - new Pair(ARG_VALIDATOR, CLValue.fromPublicKey(validator)), - new Pair(ARG_AMOUNT, CLValue.fromU512(amount)), - ]); - CL.callContract(auction, METHOD_DELEGATE, runtimeArgs); -} diff --git a/smart_contracts/contracts_as/client/delegate/assembly/tsconfig.json b/smart_contracts/contracts_as/client/delegate/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/client/delegate/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/delegate/index.js b/smart_contracts/contracts_as/client/delegate/index.js deleted file mode 100644 index a28cb9433d..0000000000 --- a/smart_contracts/contracts_as/client/delegate/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/unbonding.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/delegate/package.json b/smart_contracts/contracts_as/client/delegate/package.json deleted file mode 100644 index 548ed851d8..0000000000 --- a/smart_contracts/contracts_as/client/delegate/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/delegate.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/client/named-purse-payment/assembly/index.ts b/smart_contracts/contracts_as/client/named-purse-payment/assembly/index.ts deleted file mode 100644 index 0589cb35a7..0000000000 --- a/smart_contracts/contracts_as/client/named-purse-payment/assembly/index.ts +++ /dev/null @@ -1,88 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {getKey} from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {KeyVariant} from "../../../../contract_as/assembly/key"; -import {transferFromPurseToPurse} from "../../../../contract_as/assembly/purse"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; - -const GET_PAYMENT_PURSE = "get_payment_purse"; -const SET_REFUND_PURSE= "set_refund_purse"; -const ARG_AMOUNT = "amount"; -const ARG_PURSE = "purse"; -const ARG_PURSE_NAME = "purse_name"; - -function getPurseURef(): URef | null{ - let purseNameBytes = CL.getNamedArg(ARG_PURSE_NAME); - - let purseName = fromBytesString(purseNameBytes); - if (purseName.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument); - return null; - } - - let purseKey = getKey(purseName.value); - if (purseKey === null) { - Error.fromErrorCode(ErrorCode.InvalidArgument); - return null; - } - - if (purseKey.variant != KeyVariant.UREF_ID) { - Error.fromErrorCode(ErrorCode.UnexpectedKeyVariant); - return null; - } - - let purse = purseKey.uref; - - return purse; -} - -export function call(): void { - let maybePurseURef = getPurseURef(); - if (maybePurseURef === null) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - - let purseURef = maybePurseURef; - - let amountBytes = CL.getNamedArg(ARG_AMOUNT); - let amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let amount = amountResult.value; - - let handlePayment = CL.getSystemContract(CL.SystemContract.HandlePayment); - - // Get Payment Purse - let paymentPurseOutput = CL.callContract(handlePayment, GET_PAYMENT_PURSE, new RuntimeArgs()); - - let paymentPurseResult = URef.fromBytes(paymentPurseOutput); - if (paymentPurseResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidPurse).revert(); - return; - } - let paymentPurse = paymentPurseResult.value; - - // Set Refund Purse - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(ARG_PURSE, CLValue.fromURef(purseURef)), - ]); - CL.callContract(handlePayment, SET_REFUND_PURSE, runtimeArgs); - - let error = transferFromPurseToPurse( - purseURef, - paymentPurse, - amount, - ); - if (error !== null) { - error.revert(); - return; - } -} diff --git a/smart_contracts/contracts_as/client/named-purse-payment/assembly/tsconfig.json b/smart_contracts/contracts_as/client/named-purse-payment/assembly/tsconfig.json deleted file mode 100644 index 891cc238d0..0000000000 --- a/smart_contracts/contracts_as/client/named-purse-payment/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/named-purse-payment/index.js b/smart_contracts/contracts_as/client/named-purse-payment/index.js deleted file mode 100644 index 710b5d66b2..0000000000 --- a/smart_contracts/contracts_as/client/named-purse-payment/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/named_purse_payment.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/named-purse-payment/package.json b/smart_contracts/contracts_as/client/named-purse-payment/package.json deleted file mode 100644 index 38ac6a8333..0000000000 --- a/smart_contracts/contracts_as/client/named-purse-payment/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/named_purse_payment.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/client/revert/assembly/index.ts b/smart_contracts/contracts_as/client/revert/assembly/index.ts deleted file mode 100644 index df92f91419..0000000000 --- a/smart_contracts/contracts_as/client/revert/assembly/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -import {Error} from "../../../../contract_as/assembly/error"; - -export function call(): void { - Error.fromUserError(100).revert(); -} diff --git a/smart_contracts/contracts_as/client/revert/assembly/tsconfig.json b/smart_contracts/contracts_as/client/revert/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/client/revert/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/revert/index.js b/smart_contracts/contracts_as/client/revert/index.js deleted file mode 100644 index 8a4fc0eef1..0000000000 --- a/smart_contracts/contracts_as/client/revert/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/revert.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/revert/package.json b/smart_contracts/contracts_as/client/revert/package.json deleted file mode 100644 index 1321c53d2d..0000000000 --- a/smart_contracts/contracts_as/client/revert/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/revert.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/client/transfer-to-account-u512/assembly/index.ts b/smart_contracts/contracts_as/client/transfer-to-account-u512/assembly/index.ts deleted file mode 100644 index dadb556210..0000000000 --- a/smart_contracts/contracts_as/client/transfer-to-account-u512/assembly/index.ts +++ /dev/null @@ -1,25 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {transferToAccount, TransferredTo} from "../../../../contract_as/assembly/purse"; - -const ARG_TARGET = "target"; -const ARG_AMOUNT = "amount"; - - -export function call(): void { - const accountBytes = CL.getNamedArg(ARG_TARGET); - const amountBytes = CL.getNamedArg(ARG_AMOUNT); - const amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()){ - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let amount = amountResult.value; - - const result = transferToAccount(accountBytes, amount); - if (result.isErr) { - result.err.revert(); - return; - } -} diff --git a/smart_contracts/contracts_as/client/transfer-to-account-u512/assembly/tsconfig.json b/smart_contracts/contracts_as/client/transfer-to-account-u512/assembly/tsconfig.json deleted file mode 100644 index 891cc238d0..0000000000 --- a/smart_contracts/contracts_as/client/transfer-to-account-u512/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/transfer-to-account-u512/index.js b/smart_contracts/contracts_as/client/transfer-to-account-u512/index.js deleted file mode 100644 index c282f33262..0000000000 --- a/smart_contracts/contracts_as/client/transfer-to-account-u512/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/transfer_to_account_u512.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/transfer-to-account-u512/package.json b/smart_contracts/contracts_as/client/transfer-to-account-u512/package.json deleted file mode 100644 index 2a229aa996..0000000000 --- a/smart_contracts/contracts_as/client/transfer-to-account-u512/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/transfer_to_account_u512.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/client/undelegate/assembly/index.ts b/smart_contracts/contracts_as/client/undelegate/assembly/index.ts deleted file mode 100644 index 26c19dd023..0000000000 --- a/smart_contracts/contracts_as/client/undelegate/assembly/index.ts +++ /dev/null @@ -1,63 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {PublicKey} from "../../../../contract_as/assembly/public_key"; - -const ARG_AMOUNT = "amount"; -const ARG_DELEGATOR = "delegator"; -const ARG_VALIDATOR = "validator"; -const METHOD_UNDELEGATE = "undelegate"; - -export function call(): void { - let auction = CL.getSystemContract(CL.SystemContract.Auction); - - let delegatorBytes = CL.getNamedArg(ARG_DELEGATOR); - if (delegatorBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let delegatorResult = PublicKey.fromBytes(delegatorBytes); - if (delegatorResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let delegator = delegatorResult.value; - - let validatorBytes = CL.getNamedArg(ARG_VALIDATOR); - if (validatorBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let validatorResult = PublicKey.fromBytes(validatorBytes); - if (validatorResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let validator = validatorResult.value; - - let amountBytes = CL.getNamedArg(ARG_AMOUNT); - if (amountBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let amount = amountResult.value; - - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(ARG_AMOUNT, CLValue.fromU512(amount)), - new Pair(ARG_DELEGATOR, CLValue.fromPublicKey(delegator)), - new Pair(ARG_VALIDATOR, CLValue.fromPublicKey(validator)), - new Pair(ARG_AMOUNT, CLValue.fromU512(amount)), - ]); - CL.callContract(auction, METHOD_UNDELEGATE, runtimeArgs); -} diff --git a/smart_contracts/contracts_as/client/undelegate/assembly/tsconfig.json b/smart_contracts/contracts_as/client/undelegate/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/client/undelegate/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/undelegate/index.js b/smart_contracts/contracts_as/client/undelegate/index.js deleted file mode 100644 index a28cb9433d..0000000000 --- a/smart_contracts/contracts_as/client/undelegate/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/unbonding.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/undelegate/package.json b/smart_contracts/contracts_as/client/undelegate/package.json deleted file mode 100644 index ebe25d8a0f..0000000000 --- a/smart_contracts/contracts_as/client/undelegate/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/undelegate.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/client/withdraw-bid/assembly/index.ts b/smart_contracts/contracts_as/client/withdraw-bid/assembly/index.ts deleted file mode 100644 index 552404ddfe..0000000000 --- a/smart_contracts/contracts_as/client/withdraw-bid/assembly/index.ts +++ /dev/null @@ -1,49 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {PublicKey} from "../../../../contract_as/assembly/public_key"; - -const ARG_AMOUNT = "amount"; -const ARG_PUBLIC_KEY = "public_key"; -const ARG_UNBOND_PURSE = "unbond_purse"; -const METHOD_WITHDRAW_BID = "withdraw_bid"; - -export function call(): void { - let auction = CL.getSystemContract(CL.SystemContract.Auction); - - let publicKeyBytes = CL.getNamedArg(ARG_PUBLIC_KEY); - if (publicKeyBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let publicKeyResult = PublicKey.fromBytes(publicKeyBytes); - if (publicKeyResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let publicKey = publicKeyResult.value; - - let amountBytes = CL.getNamedArg(ARG_AMOUNT); - if (amountBytes === null) { - Error.fromErrorCode(ErrorCode.MissingArgument).revert(); - return; - } - - let amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let amount = amountResult.value; - - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(ARG_AMOUNT, CLValue.fromU512(amount)), - new Pair(ARG_PUBLIC_KEY, CLValue.fromPublicKey(publicKey)), - ]); - - CL.callContract(auction, METHOD_WITHDRAW_BID, runtimeArgs); -} diff --git a/smart_contracts/contracts_as/client/withdraw-bid/assembly/tsconfig.json b/smart_contracts/contracts_as/client/withdraw-bid/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/client/withdraw-bid/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/client/withdraw-bid/index.js b/smart_contracts/contracts_as/client/withdraw-bid/index.js deleted file mode 100644 index a28cb9433d..0000000000 --- a/smart_contracts/contracts_as/client/withdraw-bid/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/unbonding.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/client/withdraw-bid/package.json b/smart_contracts/contracts_as/client/withdraw-bid/package.json deleted file mode 100644 index 9ceea755d4..0000000000 --- a/smart_contracts/contracts_as/client/withdraw-bid/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/withdraw_bid.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/add-update-associated-key/assembly/index.ts b/smart_contracts/contracts_as/test/add-update-associated-key/assembly/index.ts deleted file mode 100644 index 38c6e3b94c..0000000000 --- a/smart_contracts/contracts_as/test/add-update-associated-key/assembly/index.ts +++ /dev/null @@ -1,32 +0,0 @@ -// The entry file of your WebAssembly module. -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {addAssociatedKey, AddKeyFailure, updateAssociatedKey, UpdateKeyFailure} from "../../../../contract_as/assembly/account"; -import {typedToArray} from "../../../../contract_as/assembly/utils"; -import {AccountHash} from "../../../../contract_as/assembly/key"; - - -const INIT_WEIGHT: u8 = 1; -const MOD_WEIGHT: u8 = 2; - -const ARG_ACCOUNT = "account"; - -export function call(): void { - let accountHashBytes = CL.getNamedArg(ARG_ACCOUNT); - const accountHashResult = AccountHash.fromBytes(accountHashBytes); - if (accountHashResult.hasError()) { - Error.fromUserError(4464 + accountHashResult.error).revert(); - return; - } - const accountHash = accountHashResult.value; - - if (addAssociatedKey(accountHash, INIT_WEIGHT) != AddKeyFailure.Ok) { - Error.fromUserError(4464).revert(); - return; - } - - if (updateAssociatedKey(accountHash, MOD_WEIGHT) != UpdateKeyFailure.Ok) { - Error.fromUserError(4464 + 1).revert(); - return; - } -} diff --git a/smart_contracts/contracts_as/test/add-update-associated-key/assembly/tsconfig.json b/smart_contracts/contracts_as/test/add-update-associated-key/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/add-update-associated-key/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/add-update-associated-key/index.js b/smart_contracts/contracts_as/test/add-update-associated-key/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/add-update-associated-key/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/add-update-associated-key/package.json b/smart_contracts/contracts_as/test/add-update-associated-key/package.json deleted file mode 100644 index d633d779a0..0000000000 --- a/smart_contracts/contracts_as/test/add-update-associated-key/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/add_update_associated_key.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/authorized-keys/assembly/index.ts b/smart_contracts/contracts_as/test/authorized-keys/assembly/index.ts deleted file mode 100644 index afbb02dddd..0000000000 --- a/smart_contracts/contracts_as/test/authorized-keys/assembly/index.ts +++ /dev/null @@ -1,46 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString, fromBytesI32} from "../../../../contract_as/assembly/bytesrepr"; -import {arrayToTyped} from "../../../../contract_as/assembly/utils"; -import {Key, AccountHash} from "../../../../contract_as/assembly/key" -import {addAssociatedKey, AddKeyFailure, ActionType, setActionThreshold, SetThresholdFailure} from "../../../../contract_as/assembly/account"; - -const ARG_KEY_MANAGEMENT_THRESHOLD = "key_management_threshold"; -const ARG_DEPLOY_THRESHOLD = "deploy_threshold"; - -export function call(): void { - let publicKeyBytes = new Array(32); - publicKeyBytes.fill(123); - let accountHash = new AccountHash(arrayToTyped(publicKeyBytes)); - - const addResult = addAssociatedKey(accountHash, 100); - switch (addResult) { - case AddKeyFailure.DuplicateKey: - break; - case AddKeyFailure.Ok: - break; - default: - Error.fromUserError(50).revert(); - break; - } - - let keyManagementThresholdBytes = CL.getNamedArg(ARG_KEY_MANAGEMENT_THRESHOLD); - let keyManagementThreshold = keyManagementThresholdBytes[0]; - - let deployThresholdBytes = CL.getNamedArg(ARG_DEPLOY_THRESHOLD); - let deployThreshold = deployThresholdBytes[0]; - - if (keyManagementThreshold != 0) { - if (setActionThreshold(ActionType.KeyManagement, keyManagementThreshold) != SetThresholdFailure.Ok) { - // TODO: Create standard Error from those enum values - Error.fromUserError(4464 + 1).revert(); - } - } - if (deployThreshold != 0) { - if (setActionThreshold(ActionType.Deployment, deployThreshold) != SetThresholdFailure.Ok) { - Error.fromUserError(4464).revert(); - return; - } - } - -} diff --git a/smart_contracts/contracts_as/test/authorized-keys/assembly/tsconfig.json b/smart_contracts/contracts_as/test/authorized-keys/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/authorized-keys/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/authorized-keys/index.js b/smart_contracts/contracts_as/test/authorized-keys/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/authorized-keys/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/authorized-keys/package.json b/smart_contracts/contracts_as/test/authorized-keys/package.json deleted file mode 100644 index 2123be0925..0000000000 --- a/smart_contracts/contracts_as/test/authorized-keys/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/authorized_keys.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/create-purse-01/assembly/index.ts b/smart_contracts/contracts_as/test/create-purse-01/assembly/index.ts deleted file mode 100644 index 88adb2c318..0000000000 --- a/smart_contracts/contracts_as/test/create-purse-01/assembly/index.ts +++ /dev/null @@ -1,30 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {putKey} from "../../../../contract_as/assembly"; -import {createPurse} from "../../../../contract_as/assembly/purse"; -import {URef} from "../../../../contract_as/assembly/uref"; - -const ARG_PURSE_NAME = "purse_name"; - -export function delegate(): void { - // purse name arg - const purseNameArg = CL.getNamedArg(ARG_PURSE_NAME); - const purseNameResult = fromBytesString(purseNameArg); - if (purseNameResult.hasError()) { - Error.fromErrorCode(ErrorCode.PurseNotCreated).revert(); - return; - } - let purseName = purseNameResult.value; - - const purse = createPurse(); - - const key = Key.fromURef(purse); - putKey(purseName, key); -} - -export function call(): void { - delegate(); -} diff --git a/smart_contracts/contracts_as/test/create-purse-01/assembly/tsconfig.json b/smart_contracts/contracts_as/test/create-purse-01/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/create-purse-01/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/create-purse-01/index.js b/smart_contracts/contracts_as/test/create-purse-01/index.js deleted file mode 100644 index eb1a7fac05..0000000000 --- a/smart_contracts/contracts_as/test/create-purse-01/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/create_purse_01.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/create-purse-01/package.json b/smart_contracts/contracts_as/test/create-purse-01/package.json deleted file mode 100644 index 63488c0eb3..0000000000 --- a/smart_contracts/contracts_as/test/create-purse-01/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/create_purse_01.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-caller/assembly/index.ts b/smart_contracts/contracts_as/test/do-nothing-stored-caller/assembly/index.ts deleted file mode 100644 index 902430532b..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-caller/assembly/index.ts +++ /dev/null @@ -1,26 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {fromBytesString, toBytesU32} from "../../../../contract_as/assembly/bytesrepr"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {Option} from "../../../../contract_as/assembly/option"; -import {arrayToTyped} from "../../../../contract_as/assembly/utils"; - -const ENTRY_FUNCTION_NAME = "delegate"; -const PURSE_NAME_ARG_NAME = "purse_name"; -const ARG_CONTRACT_PACKAGE = "contract_package"; -const ARG_NEW_PURSE_NAME = "new_purse_name"; -const ARG_VERSION = "version"; - -export function call(): void { - let contractPackageHash = CL.getNamedArg(ARG_CONTRACT_PACKAGE); - const newPurseNameBytes = CL.getNamedArg(ARG_NEW_PURSE_NAME); - const newPurseName = fromBytesString(newPurseNameBytes).unwrap(); - const versionNumber = CL.getNamedArg(ARG_VERSION)[0]; - let contractVersion = new Option(arrayToTyped(toBytesU32(versionNumber))); - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(PURSE_NAME_ARG_NAME, CLValue.fromString(newPurseName)), - ]); - CL.callVersionedContract(contractPackageHash, contractVersion, ENTRY_FUNCTION_NAME, runtimeArgs); -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-caller/assembly/tsconfig.json b/smart_contracts/contracts_as/test/do-nothing-stored-caller/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-caller/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-caller/index.js b/smart_contracts/contracts_as/test/do-nothing-stored-caller/index.js deleted file mode 100644 index 6d5c2dca18..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-caller/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing_stored_caller.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-caller/package.json b/smart_contracts/contracts_as/test/do-nothing-stored-caller/package.json deleted file mode 100644 index ebe91da71d..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-caller/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/do_nothing_stored_caller.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/assembly/index.ts b/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/assembly/index.ts deleted file mode 100644 index 4e2fbae288..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/assembly/index.ts +++ /dev/null @@ -1,55 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {createPurse} from "../../../../contract_as/assembly/purse"; -import {CLType, CLTypeTag} from "../../../../contract_as/assembly/clvalue"; -import * as CreatePurse01 from "../../create-purse-01/assembly"; - -const ENTRY_FUNCTION_NAME = "delegate"; -const DO_NOTHING_PACKAGE_HASH_KEY_NAME = "do_nothing_package_hash"; -const DO_NOTHING_ACCESS_KEY_NAME = "do_nothing_access"; - -export function delegate(): void { - let key = new Uint8Array(32); - for (var i = 0; i < 32; i++) { - key[i] = 1; - } - CL.putKey("called_do_nothing_ver_2", Key.fromHash(key)); - CreatePurse01.delegate(); -} - -export function call(): void { - let entryPoints = new CL.EntryPoints(); - let entryPoint = new CL.EntryPoint( - ENTRY_FUNCTION_NAME, - new Array>(), - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Session); - entryPoints.addEntryPoint(entryPoint); - - let doNothingPackageHash = CL.getKey(DO_NOTHING_PACKAGE_HASH_KEY_NAME); - if (doNothingPackageHash === null) { - Error.fromErrorCode(ErrorCode.None).revert(); - return; - } - - let doNothingURef = CL.getKey(DO_NOTHING_ACCESS_KEY_NAME); - if (doNothingURef === null) { - Error.fromErrorCode(ErrorCode.None).revert(); - return; - } - - const result = CL.addContractVersion( - doNothingPackageHash.hash, - entryPoints, - new Array>(), - ); - - CL.putKey("end of upgrade", Key.fromHash(result.contractHash)); -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/assembly/tsconfig.json b/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/index.js b/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/index.js deleted file mode 100644 index 3572e101bf..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing_stored_upgrader.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/package.json b/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/package.json deleted file mode 100644 index 9baf276a41..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored-upgrader/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/do_nothing_stored_upgrader.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored/assembly/index.ts b/smart_contracts/contracts_as/test/do-nothing-stored/assembly/index.ts deleted file mode 100644 index d3d7ce29a0..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored/assembly/index.ts +++ /dev/null @@ -1,36 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString, toBytesMap} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {CLValue, CLType, CLTypeTag} from "../../../../contract_as/assembly/clvalue"; -import {Pair} from "../../../../contract_as/assembly/pair"; - -const ENTRY_FUNCTION_NAME = "delegate"; -const HASH_KEY_NAME = "do_nothing_hash"; -const PACKAGE_HASH_KEY_NAME = "do_nothing_package_hash"; -const ACCESS_KEY_NAME = "do_nothing_access"; -const CONTRACT_VERSION = "contract_version"; - -export function delegate(): void { - // no-op -} - -export function call(): void { - let entryPoints = new CL.EntryPoints(); - let entryPoint = new CL.EntryPoint("delegate", new Array>(), new CLType(CLTypeTag.Unit), new CL.PublicAccess(), CL.EntryPointType.Contract); - entryPoints.addEntryPoint(entryPoint); - - const result = CL.newContract( - entryPoints, - null, - PACKAGE_HASH_KEY_NAME, - ACCESS_KEY_NAME, - ); - const key = Key.create(CLValue.fromI32(result.contractVersion)); - if (key === null) { - return; - } - CL.putKey(CONTRACT_VERSION, key); - CL.putKey(HASH_KEY_NAME, Key.fromHash(result.contractHash)); -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored/assembly/tsconfig.json b/smart_contracts/contracts_as/test/do-nothing-stored/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/do-nothing-stored/index.js b/smart_contracts/contracts_as/test/do-nothing-stored/index.js deleted file mode 100644 index 83c6d5b5b2..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing_stored.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/do-nothing-stored/package.json b/smart_contracts/contracts_as/test/do-nothing-stored/package.json deleted file mode 100644 index 0da74cfb26..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing-stored/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/do_nothing_stored.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/do-nothing/assembly/index.ts b/smart_contracts/contracts_as/test/do-nothing/assembly/index.ts deleted file mode 100644 index 829ecd7b16..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing/assembly/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -// The entry file of your WebAssembly module. - -export function call(): void { - // This body intentionally left empty. -} diff --git a/smart_contracts/contracts_as/test/do-nothing/assembly/tsconfig.json b/smart_contracts/contracts_as/test/do-nothing/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/do-nothing/index.js b/smart_contracts/contracts_as/test/do-nothing/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/do-nothing/package.json b/smart_contracts/contracts_as/test/do-nothing/package.json deleted file mode 100644 index 160d9f5507..0000000000 --- a/smart_contracts/contracts_as/test/do-nothing/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/do_nothing.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/endless-loop/assembly/index.ts b/smart_contracts/contracts_as/test/endless-loop/assembly/index.ts deleted file mode 100644 index d9e40f0a1c..0000000000 --- a/smart_contracts/contracts_as/test/endless-loop/assembly/index.ts +++ /dev/null @@ -1,22 +0,0 @@ -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {Key} from "../../../../contract_as/assembly/key"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {toBytesArrayU8} from "../../../../contract_as/assembly/bytesrepr"; -import {CLValue, CLTypeTag, CLType} from "../../../../contract_as/assembly/clvalue"; -import { U512 } from "../../../../contract_as/assembly/bignum"; - -export function call(): void { - let key = Key.create(new CLValue(new Array(0), new CLType(CLTypeTag.Unit))); - if (key === null) { - Error.fromErrorCode(ErrorCode.Unhandled).revert(); - return; - } - - const bytes = new Array(4096); - const serialized = toBytesArrayU8(bytes); - - while(true){ - getMainPurse(); - key.write(new CLValue(serialized, CLType.list(new CLType(CLTypeTag.U8)))); - } -} diff --git a/smart_contracts/contracts_as/test/endless-loop/assembly/tsconfig.json b/smart_contracts/contracts_as/test/endless-loop/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/endless-loop/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/endless-loop/index.js b/smart_contracts/contracts_as/test/endless-loop/index.js deleted file mode 100644 index d06677981f..0000000000 --- a/smart_contracts/contracts_as/test/endless-loop/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/endless_loop.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/endless-loop/package.json b/smart_contracts/contracts_as/test/endless-loop/package.json deleted file mode 100644 index 4349827a65..0000000000 --- a/smart_contracts/contracts_as/test/endless-loop/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/endless_loop.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/get-arg/assembly/index.ts b/smart_contracts/contracts_as/test/get-arg/assembly/index.ts deleted file mode 100644 index e172f278dc..0000000000 --- a/smart_contracts/contracts_as/test/get-arg/assembly/index.ts +++ /dev/null @@ -1,36 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; - -const EXPECTED_STRING = "Hello, world!"; -const EXPECTED_NUM = 42; - -const ARG_VALUE0 = "value0"; -const ARG_VALUE1 = "value1"; - -export function call(): void { - const stringArg = CL.getNamedArg(ARG_VALUE0); - const stringValResult = fromBytesString(stringArg) - if (stringValResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let stringVal = stringValResult.value; - if (stringVal != EXPECTED_STRING){ - unreachable(); - return; - } - const u512Arg = CL.getNamedArg(ARG_VALUE1); - const u512ValResult = U512.fromBytes(u512Arg); - if (u512ValResult.hasError() || u512Arg.length > u512ValResult.position) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let u512Val = u512ValResult.value; - if (u512Val != U512.fromU64(EXPECTED_NUM)){ - unreachable(); - return; - } -} diff --git a/smart_contracts/contracts_as/test/get-arg/assembly/tsconfig.json b/smart_contracts/contracts_as/test/get-arg/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/get-arg/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/get-arg/index.js b/smart_contracts/contracts_as/test/get-arg/index.js deleted file mode 100644 index ca8d04b519..0000000000 --- a/smart_contracts/contracts_as/test/get-arg/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/get_arg.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/get-arg/package.json b/smart_contracts/contracts_as/test/get-arg/package.json deleted file mode 100644 index 280b387ee6..0000000000 --- a/smart_contracts/contracts_as/test/get-arg/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/get_arg.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/get-blocktime/assembly/index.ts b/smart_contracts/contracts_as/test/get-blocktime/assembly/index.ts deleted file mode 100644 index c9c1bdbec7..0000000000 --- a/smart_contracts/contracts_as/test/get-blocktime/assembly/index.ts +++ /dev/null @@ -1,17 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesU64} from "../../../../contract_as/assembly/bytesrepr"; - -const ARG_KNOWN_BLOCK_TIME = "known_block_time"; - -export function call(): void { - const knownBlockTimeBytes = CL.getNamedArg(ARG_KNOWN_BLOCK_TIME); - const knownBlockTime = fromBytesU64(knownBlockTimeBytes); - if (knownBlockTime.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - - const blockTime = CL.getBlockTime(); - assert(blockTime == knownBlockTime.value); -} diff --git a/smart_contracts/contracts_as/test/get-blocktime/assembly/tsconfig.json b/smart_contracts/contracts_as/test/get-blocktime/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/get-blocktime/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/get-blocktime/index.js b/smart_contracts/contracts_as/test/get-blocktime/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/get-blocktime/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/get-blocktime/package.json b/smart_contracts/contracts_as/test/get-blocktime/package.json deleted file mode 100644 index 07dbb6a565..0000000000 --- a/smart_contracts/contracts_as/test/get-blocktime/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/get_blocktime.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/get-caller/assembly/index.ts b/smart_contracts/contracts_as/test/get-caller/assembly/index.ts deleted file mode 100644 index b191efa951..0000000000 --- a/smart_contracts/contracts_as/test/get-caller/assembly/index.ts +++ /dev/null @@ -1,19 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {typedToArray, checkArraysEqual} from "../../../../contract_as/assembly/utils"; -import {AccountHash} from "../../../../contract_as/assembly/key"; - -const ARG_ACCOUNT = "account"; - -export function call(): void { - const knownAccountHashBytes = CL.getNamedArg(ARG_ACCOUNT); - let knownAccountHashResult = AccountHash.fromBytes(knownAccountHashBytes); - if (knownAccountHashResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - const knownAccountHash = knownAccountHashResult.value; - const caller = CL.getCaller(); - - assert(caller == knownAccountHash); -} diff --git a/smart_contracts/contracts_as/test/get-caller/assembly/tsconfig.json b/smart_contracts/contracts_as/test/get-caller/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/get-caller/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/get-caller/index.js b/smart_contracts/contracts_as/test/get-caller/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/get-caller/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/get-caller/package.json b/smart_contracts/contracts_as/test/get-caller/package.json deleted file mode 100644 index 88581728f9..0000000000 --- a/smart_contracts/contracts_as/test/get-caller/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/get_caller.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/get-phase-payment/assembly/index.ts b/smart_contracts/contracts_as/test/get-phase-payment/assembly/index.ts deleted file mode 100644 index 0a6596bab4..0000000000 --- a/smart_contracts/contracts_as/test/get-phase-payment/assembly/index.ts +++ /dev/null @@ -1,60 +0,0 @@ -// The entry file of your WebAssembly module. -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {RuntimeArgs} from "../../../../contract_as/assembly/runtime_args"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {transferFromPurseToPurse} from "../../../../contract_as/assembly/purse"; - -const ARG_PHASE = "phase"; -const ARG_AMOUNT = "amount"; -const HANDLE_PAYMENT_ACTION = "get_payment_purse"; - -function standardPayment(amount: U512): void { - let handlePayment = CL.getSystemContract(CL.SystemContract.HandlePayment); - - let mainPurse = getMainPurse(); - - let output = CL.callContract(handlePayment, HANDLE_PAYMENT_ACTION, new RuntimeArgs()); - - let paymentPurseResult = URef.fromBytes(output); - if (paymentPurseResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidPurse).revert(); - return; - } - let paymentPurse = paymentPurseResult.value; - - let error = transferFromPurseToPurse( - mainPurse, - paymentPurse, - amount, - ); - if (error !== null) { - error.revert(); - return; - } -} - -export function call(): void { - const amountBytes = CL.getNamedArg(ARG_AMOUNT); - let amountResult = U512.fromBytes(amountBytes); - if (amountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let amount = amountResult.value; - - const phaseBytes = CL.getNamedArg(ARG_PHASE); - if (phaseBytes.length != 1) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - - const phase = phaseBytes[0]; - - const caller = CL.getPhase(); - assert(phase == caller); - - standardPayment(amount); -} diff --git a/smart_contracts/contracts_as/test/get-phase-payment/assembly/tsconfig.json b/smart_contracts/contracts_as/test/get-phase-payment/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/get-phase-payment/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/get-phase-payment/index.js b/smart_contracts/contracts_as/test/get-phase-payment/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/get-phase-payment/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/get-phase-payment/package.json b/smart_contracts/contracts_as/test/get-phase-payment/package.json deleted file mode 100644 index 753385cdda..0000000000 --- a/smart_contracts/contracts_as/test/get-phase-payment/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/get_phase_payment.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/get-phase/assembly/index.ts b/smart_contracts/contracts_as/test/get-phase/assembly/index.ts deleted file mode 100644 index 75e08c83e4..0000000000 --- a/smart_contracts/contracts_as/test/get-phase/assembly/index.ts +++ /dev/null @@ -1,17 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; - -const ARG_PHASE = "phase"; - -export function call(): void { - const phaseBytes = CL.getNamedArg(ARG_PHASE); - if (phaseBytes.length != 1) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - - const phase = phaseBytes[0]; - - const caller = CL.getPhase(); - assert(phase == caller); -} diff --git a/smart_contracts/contracts_as/test/get-phase/assembly/tsconfig.json b/smart_contracts/contracts_as/test/get-phase/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/get-phase/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/get-phase/index.js b/smart_contracts/contracts_as/test/get-phase/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/get-phase/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/get-phase/package.json b/smart_contracts/contracts_as/test/get-phase/package.json deleted file mode 100644 index 9d4502ed40..0000000000 --- a/smart_contracts/contracts_as/test/get-phase/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/get_phase.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/groups/assembly/index.ts b/smart_contracts/contracts_as/test/groups/assembly/index.ts deleted file mode 100644 index b2160d9059..0000000000 --- a/smart_contracts/contracts_as/test/groups/assembly/index.ts +++ /dev/null @@ -1,241 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import { Error, ErrorCode } from "../../../../contract_as/assembly/error"; -import { Key } from "../../../../contract_as/assembly/key"; -import { URef } from "../../../../contract_as/assembly/uref"; -import { CLValue, CLType, CLTypeTag } from "../../../../contract_as/assembly/clvalue"; -import { Pair } from "../../../../contract_as/assembly/pair"; -import { RuntimeArgs } from "../../../../contract_as/assembly/runtime_args"; -import { Option } from "../../../../contract_as/assembly/option"; -import { toBytesU32 } from "../../../../contract_as/assembly/bytesrepr"; -import { arrayToTyped } from "../../../../contract_as/assembly/utils"; - -const CONTRACT_INITIAL_VERSION: u8 = 1; -const PACKAGE_HASH_KEY = "package_hash_key"; -const PACKAGE_ACCESS_KEY = "package_access_key"; -const RESTRICTED_CONTRACT = "restricted_contract"; -const RESTRICTED_SESSION = "restricted_session"; -const RESTRICTED_SESSION_CALLER = "restricted_session_caller"; -const UNRESTRICTED_CONTRACT_CALLER = "unrestricted_contract_caller"; -const RESTRICTED_CONTRACT_CALLER_AS_SESSION = "restricted_contract_caller_as_session"; -const UNCALLABLE_SESSION = "uncallable_session"; -const UNCALLABLE_CONTRACT = "uncallable_contract"; -const CALL_RESTRICTED_ENTRY_POINTS = "call_restricted_entry_points"; -const ARG_PACKAGE_HASH = "package_hash"; - -export function restricted_session(): void { } - -export function restricted_contract(): void { } - -export function restricted_session_caller(): void { - let packageHashBytes = CL.getNamedArg(ARG_PACKAGE_HASH); - let packageKey = Key.fromBytes(packageHashBytes).unwrap(); - let contractVersion = new Option(arrayToTyped(toBytesU32(CONTRACT_INITIAL_VERSION))); - CL.callVersionedContract( - packageKey.hash, - contractVersion, - RESTRICTED_SESSION, - new RuntimeArgs(), - ); -} - -function contract_caller(): void { - let packageHashBytes = CL.getNamedArg(ARG_PACKAGE_HASH); - let packageKey = Key.fromBytes(packageHashBytes).unwrap(); - let contractVersion = new Option(arrayToTyped(toBytesU32(CONTRACT_INITIAL_VERSION))); - CL.callVersionedContract( - packageKey.hash, - contractVersion, - RESTRICTED_CONTRACT, - new RuntimeArgs(), - ); -} - -export function unrestricted_contract_caller(): void { - contract_caller(); -} - -export function restricted_contract_caller_as_session(): void { - contract_caller(); -} - -export function uncallable_session(): void { } - -export function uncallable_contract(): void { } - -export function call_restricted_entry_points(): void { - // We're aggressively removing exports that aren't exposed through contract header so test - // ensures that those exports are still inside WASM. - uncallable_session(); - uncallable_contract(); -} - - -function createGroup(packageHash: Uint8Array): URef { - let key = Key.create(CLValue.fromU64(0)); - if (key === null) { - Error.fromErrorCode(ErrorCode.Formatting).revert(); - return unreachable(); - } - - CL.putKey("saved_uref", key); - - let existingURefs: Array = [key.uref]; - - let newURefs = CL.createContractUserGroup( - packageHash, - "Group 1", - 1, - existingURefs, - ); - - if (newURefs.length != 1) { - Error.fromUserError(4464 + 1000 + 1).revert(); - return unreachable(); - } - return newURefs[0]; -} - -function createEntryPoints(): CL.EntryPoints { - let entryPoints = new CL.EntryPoints(); - let restrictedSession = new CL.EntryPoint( - RESTRICTED_SESSION, - new Array>(), - new CLType(CLTypeTag.Unit), - new CL.GroupAccess(["Group 1"]), - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(restrictedSession); - - let restricted_contract = new CL.EntryPoint( - RESTRICTED_CONTRACT, - new Array>(), - new CLType(CLTypeTag.Unit), - new CL.GroupAccess(["Group 1"]), - CL.EntryPointType.Contract, - ); - entryPoints.addEntryPoint(restricted_contract); - - let restrictedSessionCallerParams = new Array>(); - restrictedSessionCallerParams.push(new Pair(ARG_PACKAGE_HASH, new CLType(CLTypeTag.Key))); - let restricted_session_caller = new CL.EntryPoint( - RESTRICTED_SESSION_CALLER, - restrictedSessionCallerParams, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(restricted_session_caller); - - let restricted_contract2 = new CL.EntryPoint( - RESTRICTED_CONTRACT, - new Array>(), - new CLType(CLTypeTag.Unit), - new CL.GroupAccess(["Group 1"]), - CL.EntryPointType.Contract, - ); - entryPoints.addEntryPoint(restricted_contract2); - - let unrestricted_contract_caller = new CL.EntryPoint( - UNRESTRICTED_CONTRACT_CALLER, - new Array>(), - new CLType(CLTypeTag.Unit), - // Made public because we've tested deploy level auth into a contract in - // RESTRICTED_CONTRACT entrypoint - new CL.PublicAccess(), - // NOTE: Public contract authorizes any contract call, because this contract has groups - // uref in its named keys - CL.EntryPointType.Contract, - ); - entryPoints.addEntryPoint(unrestricted_contract_caller); - - let unrestricted_contract_caller_as_session = new CL.EntryPoint( - RESTRICTED_CONTRACT_CALLER_AS_SESSION, - new Array>(), - new CLType(CLTypeTag.Unit), - // Made public because we've tested deploy level auth into a contract in - // RESTRICTED_CONTRACT entrypoint - new CL.PublicAccess(), - // NOTE: Public contract authorizes any contract call, because this contract has groups - // uref in its named keys - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(unrestricted_contract_caller_as_session); - - let uncallable_session = new CL.EntryPoint( - UNCALLABLE_SESSION, - new Array>(), - new CLType(CLTypeTag.Unit), - // Made public because we've tested deploy level auth into a contract in - // RESTRICTED_CONTRACT entrypoint - new CL.GroupAccess([]), - // NOTE: Public contract authorizes any contract call, because this contract has groups - // uref in its named keys - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(uncallable_session); - - let uncallable_contract = new CL.EntryPoint( - UNCALLABLE_CONTRACT, - new Array>(), - new CLType(CLTypeTag.Unit), - // Made public because we've tested deploy level auth into a contract in - // RESTRICTED_CONTRACT entrypoint - new CL.GroupAccess([]), - // NOTE: Public contract authorizes any contract call, because this contract has groups - // uref in its named keys - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(uncallable_contract); - - // Directly calls entryPoints that are protected with empty group of lists to verify that even - // though they're not callable externally, they're still visible in the WASM. - let call_restricted_entry_points = new CL.EntryPoint( - CALL_RESTRICTED_ENTRY_POINTS, - new Array>(), - new CLType(CLTypeTag.Unit), - // Made public because we've tested deploy level auth into a contract in - // RESTRICTED_CONTRACT entrypoint - new CL.PublicAccess(), - // NOTE: Public contract authorizes any contract call, because this contract has groups - // uref in its named keys - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(call_restricted_entry_points); - - return entryPoints; -} - -function installVersion1( - contractPackageHash: Uint8Array, - restrictedURef: URef, -): void { - let contractVariable = Key.create(CLValue.fromI32(0)); - if (contractVariable === null) { - Error.fromErrorCode(ErrorCode.Formatting).revert(); - unreachable(); - return; - } - - let namedKeys = new Array>(); - namedKeys.push(new Pair("contract_named_key", contractVariable)); - namedKeys.push(new Pair("restricted_uref", Key.fromURef(restrictedURef))); - - let entryPoints = createEntryPoints(); - - const result = CL.addContractVersion( - contractPackageHash, - entryPoints, - namedKeys, - ); -} - - -export function call(): void { - let createResult = CL.createContractPackageAtHash(); - CL.putKey(PACKAGE_HASH_KEY, Key.fromHash(createResult.packageHash)); - CL.putKey(PACKAGE_ACCESS_KEY, Key.fromURef(createResult.accessURef)); - - let restrictedURef = createGroup(createResult.packageHash); - installVersion1(createResult.packageHash, restrictedURef); -} diff --git a/smart_contracts/contracts_as/test/groups/assembly/tsconfig.json b/smart_contracts/contracts_as/test/groups/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/groups/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/groups/index.js b/smart_contracts/contracts_as/test/groups/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/groups/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/groups/package.json b/smart_contracts/contracts_as/test/groups/package.json deleted file mode 100644 index 9eeeca5801..0000000000 --- a/smart_contracts/contracts_as/test/groups/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/groups.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/key-management-thresholds/assembly/index.ts b/smart_contracts/contracts_as/test/key-management-thresholds/assembly/index.ts deleted file mode 100644 index ecc8e5025a..0000000000 --- a/smart_contracts/contracts_as/test/key-management-thresholds/assembly/index.ts +++ /dev/null @@ -1,135 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {arrayToTyped} from "../../../../contract_as/assembly/utils"; -import {AccountHash} from "../../../../contract_as/assembly/key"; -import {addAssociatedKey, AddKeyFailure, - setActionThreshold, ActionType, SetThresholdFailure, - updateAssociatedKey, UpdateKeyFailure, - removeAssociatedKey, RemoveKeyFailure} from "../../../../contract_as/assembly/account"; - -const ARG_STAGE = "stage"; - -export function call(): void { - let stageBytes = CL.getNamedArg(ARG_STAGE); - let stageResult = fromBytesString(stageBytes); - if (stageResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let stage = stageResult.value; - - let key42sBytes = new Array(32); - key42sBytes.fill(42); - let key42s = new AccountHash(arrayToTyped(key42sBytes)); - - let key43sBytes = new Array(32); - key43sBytes.fill(43); - let key43s = new AccountHash(arrayToTyped(key43sBytes)); - - let key1sBytes = new Array(32); - key1sBytes.fill(1); - let key1s = new AccountHash(arrayToTyped(key1sBytes)); - - if (stage == "init") { - if (addAssociatedKey(key42s, 100) != AddKeyFailure.Ok) { - Error.fromUserError(4464).revert(); - return; - } - if (addAssociatedKey(key43s, 1) != AddKeyFailure.Ok) { - Error.fromUserError(4464 + 1).revert(); - return; - } - if (addAssociatedKey(key1s, 1) != AddKeyFailure.Ok) { - Error.fromUserError(4464 + 2).revert(); - return; - } - - if (setActionThreshold(ActionType.KeyManagement, 101) != SetThresholdFailure.Ok) { - Error.fromUserError(4464 + 3).revert(); - return; - } - } - else if (stage == "test-permission-denied") { - let key44sBytes = new Array(32); - key44sBytes.fill(44); - let key44s = new AccountHash(arrayToTyped(key44sBytes)); - switch (addAssociatedKey(key44s, 1)) { - case AddKeyFailure.Ok: - Error.fromUserError(200).revert(); - break; - case AddKeyFailure.PermissionDenied: - break; - default: - Error.fromUserError(201).revert(); - break; - } - - let key43sBytes = new Array(32); - key43sBytes.fill(43); - let key43s = new AccountHash(arrayToTyped(key43sBytes)); - - switch (updateAssociatedKey(key43s, 2)) { - case UpdateKeyFailure.Ok: - Error.fromUserError(300).revert(); - break; - case UpdateKeyFailure.PermissionDenied: - break; - default: - Error.fromUserError(301).revert(); - break; - } - - switch (removeAssociatedKey(key43s)) { - case RemoveKeyFailure.Ok: - Error.fromUserError(400).revert(); - break; - case RemoveKeyFailure.PermissionDenied: - break; - default: - Error.fromUserError(401).revert(); - break; - } - - switch (setActionThreshold(ActionType.KeyManagement, 255)) { - case SetThresholdFailure.Ok: - Error.fromUserError(500).revert(); - break; - case SetThresholdFailure.PermissionDeniedError: - break; - default: - Error.fromUserError(501).revert(); - break; - } - } - else if (stage == "test-key-mgmnt-succeed") { - let key44sBytes = new Array(32); - key44sBytes.fill(44); - let key44s = new AccountHash(arrayToTyped(key44sBytes)); - - // Has to be executed with keys of total weight >= 254 - if (addAssociatedKey(key44s, 1) != AddKeyFailure.Ok) { - Error.fromUserError(4464 + 4).revert(); - return; - } - - // Updates [43;32] key weight created in init stage - if (updateAssociatedKey(key44s, 2) != UpdateKeyFailure.Ok) { - Error.fromUserError(4464 + 5).revert(); - return; - } - // Removes [43;32] key created in init stage - if (removeAssociatedKey(key44s) != RemoveKeyFailure.Ok) { - Error.fromUserError(4464 + 6).revert(); - return; - } - // Sets action threshodl - if (setActionThreshold(ActionType.KeyManagement, 100) != SetThresholdFailure.Ok) { - Error.fromUserError(4464 + 7).revert(); - return; - } - } - else { - Error.fromUserError(1).revert(); - } -} diff --git a/smart_contracts/contracts_as/test/key-management-thresholds/assembly/tsconfig.json b/smart_contracts/contracts_as/test/key-management-thresholds/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/key-management-thresholds/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/key-management-thresholds/index.js b/smart_contracts/contracts_as/test/key-management-thresholds/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/key-management-thresholds/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/key-management-thresholds/package.json b/smart_contracts/contracts_as/test/key-management-thresholds/package.json deleted file mode 100644 index d9f5813f49..0000000000 --- a/smart_contracts/contracts_as/test/key-management-thresholds/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/key_management_thresholds.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/list-named-keys/assembly/index.ts b/smart_contracts/contracts_as/test/list-named-keys/assembly/index.ts deleted file mode 100644 index aa16c8cd20..0000000000 --- a/smart_contracts/contracts_as/test/list-named-keys/assembly/index.ts +++ /dev/null @@ -1,94 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesMap, fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {checkItemsEqual} from "../../../../contract_as/assembly/utils"; - -const ARG_INITIAL_NAMED_KEYS = "initial_named_args"; -const ARG_NEW_NAMED_KEYS = "new_named_keys"; - -enum CustomError { - MissingInitialNamedKeys = 0, - InvalidInitialNamedKeys = 1, - MissingNewNamedKeys = 2, - InvalidNewNamedKeys = 3, - MissingActualNamedKeys = 4464, - MismatchedKeys = 4505, -} - -export function call(): void { - let expectedInitialNamedKeysBytes = CL.getNamedArg(ARG_INITIAL_NAMED_KEYS); - - const mapResult = fromBytesMap( - expectedInitialNamedKeysBytes, - fromBytesString, - Key.fromBytes - ); - if (mapResult.hasError()) { - Error.fromUserError(CustomError.InvalidInitialNamedKeys).revert(); - return; - } - let expectedInitialNamedKeys = mapResult.value; - - - let actualNamedKeys = CL.listNamedKeys(); - if (actualNamedKeys === null) { - Error.fromUserError(CustomError.MissingActualNamedKeys).revert(); - return; - } - - - if (!checkItemsEqual(expectedInitialNamedKeys, actualNamedKeys)) { - Error.fromUserError(CustomError.MismatchedKeys).revert(); - return; - } - - let newNamedKeysBytes = CL.getNamedArg(ARG_NEW_NAMED_KEYS); - const mapResult2 = fromBytesMap( - newNamedKeysBytes, - fromBytesString, - Key.fromBytes - ); - if (mapResult2.hasError()) { - Error.fromUserError(CustomError.InvalidNewNamedKeys).revert(); - return; - } - let newNamedKeys = mapResult2.value; - - let expectedNamedKeys = expectedInitialNamedKeys; - - for (let i = 0; i < newNamedKeys.length; i++) { - const namedKey = newNamedKeys[i]; - CL.putKey(namedKey.first, namedKey.second); - expectedNamedKeys.push(namedKey); - - const actualNamedKeys = CL.listNamedKeys(); - assert(checkItemsEqual(expectedNamedKeys, actualNamedKeys)); - } - - - let allKeyNames = new Array(); - for (let i = 0; i < expectedNamedKeys.length; i++) { - allKeyNames.push(expectedNamedKeys[i].first); - } - - for (let i = 0; i < allKeyNames.length; i++) { - CL.removeKey(allKeyNames[i]); - - // TODO: remove on an ordered map, or reconsider giving Map a try with Map.remove - let removed = false; - for (let j = 0; j < expectedNamedKeys.length; j++) { - if (expectedNamedKeys[j].first == allKeyNames[i]) { - expectedNamedKeys.splice(j, 1); - removed = true; - break; - } - } - - assert(removed); - - const actualNamedKeys = CL.listNamedKeys(); - assert(checkItemsEqual(expectedNamedKeys, actualNamedKeys)); - } -} diff --git a/smart_contracts/contracts_as/test/list-named-keys/assembly/tsconfig.json b/smart_contracts/contracts_as/test/list-named-keys/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/list-named-keys/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/list-named-keys/index.js b/smart_contracts/contracts_as/test/list-named-keys/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/list-named-keys/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/list-named-keys/package.json b/smart_contracts/contracts_as/test/list-named-keys/package.json deleted file mode 100644 index 1f56511aba..0000000000 --- a/smart_contracts/contracts_as/test/list-named-keys/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/list_named_keys.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/main-purse/assembly/index.ts b/smart_contracts/contracts_as/test/main-purse/assembly/index.ts deleted file mode 100644 index 7db7ebfc8d..0000000000 --- a/smart_contracts/contracts_as/test/main-purse/assembly/index.ts +++ /dev/null @@ -1,27 +0,0 @@ -//@ts-nocheck -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import * as CL from "../../../../contract_as/assembly"; -import {Error} from "../../../../contract_as/assembly/error"; -import {URef} from "../../../../contract_as/assembly/uref"; - -const ARG_PURSE = "purse"; - -enum CustomError { - MissingExpectedMainPurseArg = 86, - InvalidExpectedMainPurseArg = 97, - EqualityAssertionFailed = 139 -} - -export function call(): void { - let expectedMainPurseArg = CL.getNamedArg(ARG_PURSE); - let purseResult = URef.fromBytes(expectedMainPurseArg); - if (purseResult === null){ - Error.fromUserError(CustomError.InvalidExpectedMainPurseArg).revert(); - return; - } - const expectedMainPurse = purseResult.value; - const actualMainPurse = getMainPurse(); - - if (expectedMainPurse != actualMainPurse) - Error.fromUserError(CustomError.EqualityAssertionFailed).revert(); -} diff --git a/smart_contracts/contracts_as/test/main-purse/assembly/tsconfig.json b/smart_contracts/contracts_as/test/main-purse/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/main-purse/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/main-purse/index.js b/smart_contracts/contracts_as/test/main-purse/index.js deleted file mode 100644 index 0dd53f04a1..0000000000 --- a/smart_contracts/contracts_as/test/main-purse/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/main_purse.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/main-purse/package.json b/smart_contracts/contracts_as/test/main-purse/package.json deleted file mode 100644 index 0b62733af7..0000000000 --- a/smart_contracts/contracts_as/test/main-purse/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/main_purse.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/manage-groups/assembly/index.ts b/smart_contracts/contracts_as/test/manage-groups/assembly/index.ts deleted file mode 100644 index 7af9391279..0000000000 --- a/smart_contracts/contracts_as/test/manage-groups/assembly/index.ts +++ /dev/null @@ -1,189 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import { Error, ErrorCode } from "../../../../contract_as/assembly/error"; -import { Key } from "../../../../contract_as/assembly/key"; -import { URef } from "../../../../contract_as/assembly/uref"; -import { fromBytesString, fromBytesU64, Result, fromBytesArray } from "../../../../contract_as/assembly/bytesrepr"; -import { CLValue, CLType, CLTypeTag } from "../../../../contract_as/assembly/clvalue"; -import { Pair } from "../../../../contract_as/assembly/pair"; -import { RuntimeArgs } from "../../../../contract_as/assembly/runtime_args"; - -const PACKAGE_HASH_KEY = "package_hash_key"; -const PACKAGE_ACCESS_KEY = "package_access_key"; -const CREATE_GROUP = "create_group"; -const REMOVE_GROUP = "remove_group"; -const EXTEND_GROUP_UREFS = "extend_group_urefs"; -const REMOVE_GROUP_UREFS = "remove_group_urefs"; -const GROUP_NAME_ARG = "group_name"; -const UREFS_ARG = "urefs"; -const TOTAL_NEW_UREFS_ARG = "total_new_urefs"; -const TOTAL_EXISTING_UREFS_ARG = "total_existing_urefs"; - -export function create_group(): void { - let packageHashKey = CL.getKey(PACKAGE_HASH_KEY); - if (packageHashKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let packageAccessKey = CL.getKey(PACKAGE_ACCESS_KEY); - if (packageAccessKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let group_name: String = fromBytesString(CL.getNamedArg(GROUP_NAME_ARG)).unwrap(); - let total_urefs: u64 = fromBytesU64(CL.getNamedArg(TOTAL_NEW_UREFS_ARG)).unwrap(); - let total_existing_urefs: u64 = fromBytesU64(CL.getNamedArg(TOTAL_EXISTING_UREFS_ARG)).unwrap(); - - let existingURefs = new Array(); - for (var i: u64 = 0; i < total_existing_urefs; i++) { - let res = Key.create(CLValue.fromU64(i)); - if (res === null) { - Error.fromErrorCode(ErrorCode.Formatting).revert(); - unreachable(); - return; - } - existingURefs.push(res.uref); - } - - let newURefs = CL.createContractUserGroup( - packageHashKey.hash, - group_name, - total_urefs as u8, - existingURefs, - ); -} - -export function remove_group(): void { - let packageHashKey = CL.getKey(PACKAGE_HASH_KEY); - if (packageHashKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let groupName: String = fromBytesString(CL.getNamedArg(GROUP_NAME_ARG)).unwrap(); - CL.removeContractUserGroup( - packageHashKey.hash, - groupName); -} - -export function extend_group_urefs(): void { - let packageHashKey = CL.getKey(PACKAGE_HASH_KEY); - if (packageHashKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let packageAccessKey = CL.getKey(PACKAGE_ACCESS_KEY); - if (packageAccessKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let groupName: String = fromBytesString(CL.getNamedArg(GROUP_NAME_ARG)).unwrap(); - let newURefsCount: u64 = fromBytesU64(CL.getNamedArg(TOTAL_NEW_UREFS_ARG)).unwrap(); - - // Creates 1 additional uref inside group - for (var i = 0; i < newURefsCount; i++) { - let _newURef = CL.extendContractUserGroupURefs( - packageHashKey.hash, - groupName - ); - } -} - -export function remove_group_urefs(): void { - let packageHashKey = CL.getKey(PACKAGE_HASH_KEY); - if (packageHashKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let groupName: String = fromBytesString(CL.getNamedArg(GROUP_NAME_ARG)).unwrap(); - let urefsBytes = CL.getNamedArg(UREFS_ARG); - let decode = function (bytes: Uint8Array): Result { - return URef.fromBytes(bytes); - }; - let urefs: Array = fromBytesArray(urefsBytes, decode).unwrap(); - - CL.removeContractUserGroupURefs( - packageHashKey.hash, - groupName, - urefs, - ); -} - -/// Restricted uref comes from creating a group and will be assigned to a smart contract -function createEntryPoints1(): CL.EntryPoints { - let entryPoints = new CL.EntryPoints(); - - { - let restrictedSessionParams = new Array>(); - restrictedSessionParams.push(new Pair(GROUP_NAME_ARG, new CLType(CLTypeTag.String))); - restrictedSessionParams.push(new Pair(TOTAL_EXISTING_UREFS_ARG, new CLType(CLTypeTag.U64))); - restrictedSessionParams.push(new Pair(TOTAL_NEW_UREFS_ARG, new CLType(CLTypeTag.U64))); - let restrictedSession = new CL.EntryPoint( - CREATE_GROUP, - restrictedSessionParams, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(restrictedSession); - } - - { - let params = new Array>(); - params.push(new Pair(GROUP_NAME_ARG, new CLType(CLTypeTag.String))); - - let removeGroup = new CL.EntryPoint( - REMOVE_GROUP, - params, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(removeGroup); - } - - { - let params = new Array>(); - params.push(new Pair(GROUP_NAME_ARG, new CLType(CLTypeTag.String))); - params.push(new Pair(TOTAL_NEW_UREFS_ARG, new CLType(CLTypeTag.U64))); - let extendGroupURefs = new CL.EntryPoint( - EXTEND_GROUP_UREFS, - params, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(extendGroupURefs); - } - - { - let params = new Array>(); - params.push(new Pair(GROUP_NAME_ARG, new CLType(CLTypeTag.String))); - params.push(new Pair(UREFS_ARG, CLType.list(new CLType(CLTypeTag.Uref)))); - - let entry_point_name2 = REMOVE_GROUP_UREFS; - let remove_group_urefs = new CL.EntryPoint( - entry_point_name2, - params, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Session, - ); - entryPoints.addEntryPoint(remove_group_urefs); - } - return entryPoints; -} - -function installVersion1(package_hash: Uint8Array): void { - let contractNamedKeys = new Array>(); - let entryPoints = createEntryPoints1(); - const result = CL.addContractVersion(package_hash, entryPoints, contractNamedKeys); -} - -export function call(): void { - let result = CL.createContractPackageAtHash(); - - CL.putKey(PACKAGE_HASH_KEY, Key.fromHash(result.packageHash)); - CL.putKey(PACKAGE_ACCESS_KEY, Key.fromURef(result.accessURef)); - - installVersion1(result.packageHash); -} diff --git a/smart_contracts/contracts_as/test/manage-groups/assembly/tsconfig.json b/smart_contracts/contracts_as/test/manage-groups/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/manage-groups/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/manage-groups/index.js b/smart_contracts/contracts_as/test/manage-groups/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/manage-groups/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/manage-groups/package.json b/smart_contracts/contracts_as/test/manage-groups/package.json deleted file mode 100644 index 062c708c6e..0000000000 --- a/smart_contracts/contracts_as/test/manage-groups/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/manage_groups.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/named-keys/assembly/index.ts b/smart_contracts/contracts_as/test/named-keys/assembly/index.ts deleted file mode 100644 index 70d53f58ed..0000000000 --- a/smart_contracts/contracts_as/test/named-keys/assembly/index.ts +++ /dev/null @@ -1,181 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {Key} from "../../../../contract_as/assembly/key"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; - -const COMMAND_CREATE_UREF1 = "create-uref1"; -const COMMAND_CREATE_UREF2 = "create-uref2"; -const COMMAND_REMOVE_UREF1 = "remove-uref1"; -const COMMAND_REMOVE_UREF2 = "remove-uref2"; -const COMMAND_TEST_READ_UREF1 = "test-read-uref1"; -const COMMAND_TEST_READ_UREF2 = "test-read-uref2"; -const COMMAND_INCREASE_UREF2 = "increase-uref2"; -const COMMAND_OVERWRITE_UREF2 = "overwrite-uref2"; -const ARG_COMMAND = "command"; - -export function call(): void { - let commandBytes = CL.getNamedArg(ARG_COMMAND); - let commandResult = fromBytesString(commandBytes); - if (commandResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument); - return; - } - let command = commandResult.value; - - if (command == COMMAND_CREATE_UREF1) { - let helloWorldKey = Key.create(CLValue.fromString("Hello, world!")); - if (helloWorldKey === null) { - Error.fromUserError(4464 + 1).revert(); - return; - } - CL.putKey("hello-world", helloWorldKey); - } - - else if (command == COMMAND_CREATE_UREF2) { - let newBigValueKey = Key.create(CLValue.fromU512(U512.MAX_VALUE)); - if (newBigValueKey === null) { - Error.fromUserError(4464 + 4).revert(); - return; - } - CL.putKey("big-value", newBigValueKey); - } - - else if (command == COMMAND_REMOVE_UREF1) { - CL.removeKey("hello-world"); - } - - else if (command == COMMAND_REMOVE_UREF2) { - CL.removeKey("big-value"); - } - - else if (command == COMMAND_TEST_READ_UREF1) { - let namedKeys = CL.listNamedKeys(); - // Read data hidden behind `URef1` uref - namedKeys = CL.listNamedKeys(); - - let helloWorld: String = ""; - for (let i = 0; i < namedKeys.length; i++) { - if (namedKeys[i].first == "hello-world") { - let bytes = namedKeys[i].second.read(); - if (bytes === null) { - Error.fromUserError(4464 + 1000 + i).revert(); - return; - } - - let bytesString = fromBytesString(bytes); - if (bytesString.hasError()) { - Error.fromUserError(4464 + 2000 + i).revert(); - return; - } - helloWorld = bytesString.value; - } - } - - if (helloWorld != "Hello, world!") { - Error.fromUserError(4464 + 6).revert(); - return; - } - - // Read data through dedicated FFI function - let uref1 = CL.getKey("hello-world"); - if (uref1 === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let uref1Bytes = uref1.read(); - if (uref1Bytes === null) { - Error.fromUserError(4464 + 7).revert(); - return; - } - let uref1Str = fromBytesString(uref1Bytes); - if (uref1Str.hasError()) { - Error.fromUserError(4464 + 8).revert(); - return; - } - if (uref1Str.value != "Hello, world!") { - Error.fromUserError(4464 + 9).revert(); - return; - } - } - - else if (command == COMMAND_TEST_READ_UREF2) { - // Get the big value back - let bigValueKey = CL.getKey("big-value"); - if (bigValueKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - let bigValueBytes = bigValueKey.read(); - if (bigValueBytes === null) { - Error.fromUserError(4464 + 12).revert(); - return; - } - let bigValue = U512.fromBytes(bigValueBytes); - if (bigValue.hasError()) { - Error.fromUserError(4464 + 13).revert(); - return; - } - - if (bigValue.value != U512.MAX_VALUE) { - Error.fromUserError(4464 + 14).revert(); - return; - } - } - - else if (command == COMMAND_INCREASE_UREF2) { - // Get the big value back - let bigValueKey = CL.getKey("big-value"); - if (bigValueKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - // Increase by 1 - bigValueKey.add(CLValue.fromU512(U512.fromU64(1))); - let newBigValueBytes = bigValueKey.read(); - if (newBigValueBytes === null) { - Error.fromUserError(4464 + 15).revert(); - return; - } - let newBigValue = U512.fromBytes(newBigValueBytes); - if (newBigValue.hasError()) { - Error.fromUserError(4464 + 16).revert(); - return; - } - if (newBigValue.value != U512.MIN_VALUE) { - Error.fromUserError(4464 + 17).revert(); - return; - } - } - - else if (command == COMMAND_OVERWRITE_UREF2) { - // Get the big value back - let bigValueKey = CL.getKey("big-value"); - if (bigValueKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - // I can overwrite some data under the pointer - bigValueKey.write(CLValue.fromU512(U512.fromU64(123456789))); - - let newBigValueBytes = bigValueKey.read(); - if (newBigValueBytes === null) { - Error.fromUserError(4464 + 18).revert(); - return; - } - let newBigValue = U512.fromBytes(newBigValueBytes); - if (newBigValue.hasError()) { - Error.fromUserError(4464 + 19).revert(); - return; - } - if (newBigValue.value != U512.fromU64(123456789)) { - Error.fromUserError(4464 + 20).revert(); - return; - } - } - - else { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - } -} diff --git a/smart_contracts/contracts_as/test/named-keys/assembly/tsconfig.json b/smart_contracts/contracts_as/test/named-keys/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/named-keys/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/named-keys/index.js b/smart_contracts/contracts_as/test/named-keys/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/named-keys/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/named-keys/package.json b/smart_contracts/contracts_as/test/named-keys/package.json deleted file mode 100644 index 8442b7070e..0000000000 --- a/smart_contracts/contracts_as/test/named-keys/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/named_keys.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/overwrite-uref-content/assembly/index.ts b/smart_contracts/contracts_as/test/overwrite-uref-content/assembly/index.ts deleted file mode 100644 index 1dc22460a6..0000000000 --- a/smart_contracts/contracts_as/test/overwrite-uref-content/assembly/index.ts +++ /dev/null @@ -1,34 +0,0 @@ -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {AccessRights, URef} from "../../../../contract_as/assembly/uref"; -import {Key} from "../../../../contract_as/assembly/key"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; - -const ARG_CONTRACT_UREF = "contract_uref"; -const REPLACEMENT_DATA = "bawitdaba"; - -export function call(): void { - let urefBytes = CL.getNamedArg(ARG_CONTRACT_UREF); - let urefResult = URef.fromBytes(urefBytes); - if (urefResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - let uref = urefResult.value; - - if (uref.isValid() == false){ - Error.fromUserError(1).revert(); - return; - } - - let elevatedUref = new URef( - uref.getBytes(), - AccessRights.READ_ADD_WRITE - ); - - let forgedKey = Key.fromURef(elevatedUref); - - let value = CLValue.fromString(REPLACEMENT_DATA); - - forgedKey.write(value); -} diff --git a/smart_contracts/contracts_as/test/overwrite-uref-content/assembly/tsconfig.json b/smart_contracts/contracts_as/test/overwrite-uref-content/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/overwrite-uref-content/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/overwrite-uref-content/index.js b/smart_contracts/contracts_as/test/overwrite-uref-content/index.js deleted file mode 100644 index d88d7abc84..0000000000 --- a/smart_contracts/contracts_as/test/overwrite-uref-content/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/overwrite_uref_content.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/overwrite-uref-content/package.json b/smart_contracts/contracts_as/test/overwrite-uref-content/package.json deleted file mode 100644 index 047163da53..0000000000 --- a/smart_contracts/contracts_as/test/overwrite-uref-content/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/overwrite_uref_content.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-caller/assembly/index.ts b/smart_contracts/contracts_as/test/purse-holder-stored-caller/assembly/index.ts deleted file mode 100644 index aaaac9fa30..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-caller/assembly/index.ts +++ /dev/null @@ -1,57 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error} from "../../../../contract_as/assembly/error"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {putKey} from "../../../../contract_as/assembly"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import { RuntimeArgs } from "../../../../contract_as/assembly/runtime_args"; -import {Pair} from "../../../../contract_as/assembly/pair"; - -const METHOD_VERSION = "version"; -const HASH_KEY_NAME = "purse_holder"; -const ENTRY_POINT_NAME = "entry_point"; -const PURSE_NAME = "purse_name"; - -enum CustomError { - UnableToGetVersion = 6, - UnableToStoreVersion = 7, - InvalidVersion = 8 -} - -export function call(): void { - let entryPointNameBytes = CL.getNamedArg(ENTRY_POINT_NAME); - let entryPointName = fromBytesString(entryPointNameBytes).unwrap(); - - // short circuit if VERSION method called - if (entryPointName == METHOD_VERSION) { - let contractHash = CL.getNamedArg(HASH_KEY_NAME); - const versionBytes = CL.callContract(contractHash, entryPointName, new RuntimeArgs()); - if (versionBytes === null) { - Error.fromUserError(CustomError.UnableToGetVersion).revert(); - return; - } - const versionResult = fromBytesString(versionBytes); - if (versionResult.hasError()) { - Error.fromUserError(CustomError.InvalidVersion).revert(); - return; - } - let version = versionResult.value; - const maybeVersionKey = Key.create(CLValue.fromString(version)); - if (maybeVersionKey === null) { - Error.fromUserError(CustomError.UnableToStoreVersion).revert(); - return; - } - const versionKey = maybeVersionKey; - putKey(METHOD_VERSION, versionKey); - } - else { - let contractHash = CL.getNamedArg(HASH_KEY_NAME); - let purseNameBytes = CL.getNamedArg(PURSE_NAME); - let purseName = fromBytesString(purseNameBytes).unwrap(); - let runtimeArgs = RuntimeArgs.fromArray([ - new Pair(PURSE_NAME, CLValue.fromString(purseName)), - ]); - CL.callContract(contractHash, entryPointName, runtimeArgs); - } -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-caller/assembly/tsconfig.json b/smart_contracts/contracts_as/test/purse-holder-stored-caller/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-caller/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-caller/index.js b/smart_contracts/contracts_as/test/purse-holder-stored-caller/index.js deleted file mode 100644 index 0b1094a1d8..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-caller/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/purse_holder_stored_caller.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-caller/package.json b/smart_contracts/contracts_as/test/purse-holder-stored-caller/package.json deleted file mode 100644 index 20041ea6f5..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-caller/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/purse_holder_stored_caller.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/assembly/index.ts b/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/assembly/index.ts deleted file mode 100644 index 79ed8c5503..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/assembly/index.ts +++ /dev/null @@ -1,114 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {CLValue, CLType, CLTypeTag} from "../../../../contract_as/assembly/clvalue"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {createPurse} from "../../../../contract_as/assembly/purse"; -import { checkItemsEqual } from "../../../../contract_as/assembly/utils"; -import {Pair} from "../../../../contract_as/assembly/pair"; - -const METHOD_ADD = "add"; -const METHOD_REMOVE = "remove"; -const METHOD_VERSION = "version"; -const ARG_PURSE_NAME = "purse_name"; -const NEW_VERSION = "1.0.1"; -const VERSION = "version"; -const ACCESS_KEY_NAME = "purse_holder_access"; -const PURSE_HOLDER_STORED_CONTRACT_NAME = "purse_holder_stored"; -const ARG_CONTRACT_PACKAGE = "contract_package"; - -enum CustomError { - MissingPurseHolderURefArg = 0, - InvalidPurseHolderURefArg = 1, - MissingMethodNameArg = 2, - InvalidMethodNameArg = 3, - MissingPurseNameArg = 4, - InvalidPurseNameArg = 5, - UnknownMethodName = 6, - UnableToStoreVersion = 7, - NamedPurseNotCreated = 8 -} - - -function getPurseName(): String { - let purseNameBytes = CL.getNamedArg(ARG_PURSE_NAME); - return fromBytesString(purseNameBytes).unwrap(); -} - -export function add(): void { - let purseName = getPurseName(); - let purse = createPurse(); - CL.putKey(purseName, Key.fromURef(purse)); -} - -export function remove(): void { - let purseName = getPurseName(); - CL.removeKey(purseName); -} - -export function version(): void { - CL.ret(CLValue.fromString(VERSION)); -} - -export function delegate(): void { -} - -export function call(): void { - let contractPackageHash = CL.getNamedArg(ARG_CONTRACT_PACKAGE); - let accessKey = CL.getKey(ACCESS_KEY_NAME); - if (accessKey === null) { - Error.fromErrorCode(ErrorCode.GetKey).revert(); - return; - } - - let entryPoints = new CL.EntryPoints(); - - let addArgs = new Array>(); - addArgs.push(new Pair(ARG_PURSE_NAME, new CLType(CLTypeTag.String))); - - let add = new CL.EntryPoint( - METHOD_ADD, - addArgs, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Contract, - ); - entryPoints.addEntryPoint(add); - - let version = new CL.EntryPoint( - METHOD_VERSION, - new Array>(), - new CLType(CLTypeTag.String), - new CL.PublicAccess(), - CL.EntryPointType.Contract, - ); - entryPoints.addEntryPoint(version); - - let removeArgs = new Array>(); - removeArgs.push(new Pair(ARG_PURSE_NAME, new CLType(CLTypeTag.String))); - - let remove = new CL.EntryPoint( - METHOD_REMOVE, - removeArgs, - new CLType(CLTypeTag.Unit), - new CL.PublicAccess(), - CL.EntryPointType.Contract, - ); - entryPoints.addEntryPoint(remove); - - let newResult = CL.addContractVersion( - contractPackageHash, - entryPoints, - new Array>(), - ); - CL.putKey(PURSE_HOLDER_STORED_CONTRACT_NAME, Key.fromHash(newResult.contractHash)); - - let newVersionKey = Key.create(CLValue.fromString(NEW_VERSION)); - if (newVersionKey === null) { - Error.fromErrorCode(ErrorCode.Formatting).revert(); - return; - } - CL.putKey(VERSION, newVersionKey); -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/assembly/tsconfig.json b/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/index.js b/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/index.js deleted file mode 100644 index 53d717197f..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/purse_holder_stored_upgrader.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/package.json b/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/package.json deleted file mode 100644 index 13383d334a..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored-upgrader/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/purse_holder_stored_upgrader.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored/assembly/index.ts b/smart_contracts/contracts_as/test/purse-holder-stored/assembly/index.ts deleted file mode 100644 index cf9ca3900d..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored/assembly/index.ts +++ /dev/null @@ -1,89 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {fromBytesString, toBytesMap} from "../../../../contract_as/assembly/bytesrepr"; -import {Key} from "../../../../contract_as/assembly/key"; -import {Pair} from "../../../../contract_as/assembly/pair"; -import {putKey, ret} from "../../../../contract_as/assembly"; -import {CLValue, CLType, CLTypeTag} from "../../../../contract_as/assembly/clvalue"; -import {createPurse} from "../../../../contract_as/assembly/purse"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {CLTypeTag} from "../../../../contract_as/assembly/clvalue"; - -const METHOD_ADD = "add"; -const METHOD_REMOVE = "remove"; -const METHOD_VERSION = "version"; - -const ENTRY_POINT_ADD = "add_named_purse"; -const ENTRY_POINT_VERSION = "version"; -const HASH_KEY_NAME = "purse_holder"; -const ACCESS_KEY_NAME = "purse_holder_access"; -const ARG_PURSE = "purse_name"; -const ARG_IS_LOCKED = "is_locked"; -const VERSION = "1.0.0"; -const PURSE_HOLDER_STORED_CONTRACT_NAME = "purse_holder_stored"; - -enum CustomError { - MissingMethodNameArg = 0, - InvalidMethodNameArg = 1, - MissingPurseNameArg = 2, - InvalidPurseNameArg = 3, - UnknownMethodName = 4, - NamedPurseNotCreated = 5 -} - -export function add_named_purse(): void { - const purseNameBytes = CL.getNamedArg(ARG_PURSE); - const purseName = fromBytesString(purseNameBytes).unwrap(); - const purse = createPurse(); - CL.putKey(purseName, Key.fromURef(purse)); -} - -export function version(): void { - CL.ret(CLValue.fromString(VERSION)); -} - -export function call(): void { - let entryPoints = new CL.EntryPoints(); - let is_locked = CL.getNamedArg(ARG_IS_LOCKED); - - { - let args = new Array>(); - args.push(new Pair(ARG_PURSE, new CLType(CLTypeTag.String))); - let entryPointAdd = new CL.EntryPoint(ENTRY_POINT_ADD, args, new CLType(CLTypeTag.Unit), new CL.PublicAccess(), CL.EntryPointType.Contract); - entryPoints.addEntryPoint(entryPointAdd); - } - { - let entryPointAdd = new CL.EntryPoint(ENTRY_POINT_VERSION, new Array>(), new CLType(CLTypeTag.Unit), new CL.PublicAccess(), CL.EntryPointType.Contract); - entryPoints.addEntryPoint(entryPointAdd); - } - - if (is_locked[0] == true) { - let result = CL.newLockedContract( - entryPoints, - null, - HASH_KEY_NAME, - ACCESS_KEY_NAME - ); - putKey(PURSE_HOLDER_STORED_CONTRACT_NAME, Key.fromHash(result.contractHash)); - const versionKey = Key.create(CLValue.fromString(VERSION)); - if (versionKey === null) { - Error.fromErrorCode(ErrorCode.Formatting).revert(); - } - putKey(ENTRY_POINT_VERSION, versionKey); - - } else if (is_locked[0] == false) { - let result = CL.newContract( - entryPoints, - null, - HASH_KEY_NAME, - ACCESS_KEY_NAME); - - putKey(PURSE_HOLDER_STORED_CONTRACT_NAME, Key.fromHash(result.contractHash)); - const versionKey = Key.create(CLValue.fromString(VERSION)); - if (versionKey === null) { - Error.fromErrorCode(ErrorCode.Formatting).revert(); - } - putKey(ENTRY_POINT_VERSION, versionKey); - } -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored/assembly/tsconfig.json b/smart_contracts/contracts_as/test/purse-holder-stored/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/purse-holder-stored/index.js b/smart_contracts/contracts_as/test/purse-holder-stored/index.js deleted file mode 100644 index 6a8b60dcc6..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/purse_holder_stored.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/purse-holder-stored/package.json b/smart_contracts/contracts_as/test/purse-holder-stored/package.json deleted file mode 100644 index 0a91065cab..0000000000 --- a/smart_contracts/contracts_as/test/purse-holder-stored/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/purse_holder_stored.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/remove-associated-key/assembly/index.ts b/smart_contracts/contracts_as/test/remove-associated-key/assembly/index.ts deleted file mode 100644 index da1363f92d..0000000000 --- a/smart_contracts/contracts_as/test/remove-associated-key/assembly/index.ts +++ /dev/null @@ -1,23 +0,0 @@ -// The entry file of your WebAssembly module. -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {removeAssociatedKey, RemoveKeyFailure} from "../../../../contract_as/assembly/account"; -import {typedToArray} from "../../../../contract_as/assembly/utils"; -import {AccountHash} from "../../../../contract_as/assembly/key"; - -const ARG_ACCOUNT = "account"; - -export function call(): void { - let accountBytes = CL.getNamedArg(ARG_ACCOUNT); - const accountResult = AccountHash.fromBytes(accountBytes); - if (accountResult.hasError()) { - Error.fromErrorCode(ErrorCode.InvalidArgument).revert(); - return; - } - const account = accountResult.value; - - if (removeAssociatedKey(account) != RemoveKeyFailure.Ok) { - Error.fromUserError(4464).revert(); - return; - } -} diff --git a/smart_contracts/contracts_as/test/remove-associated-key/assembly/tsconfig.json b/smart_contracts/contracts_as/test/remove-associated-key/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/remove-associated-key/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/remove-associated-key/index.js b/smart_contracts/contracts_as/test/remove-associated-key/index.js deleted file mode 100644 index 5a6adb78f3..0000000000 --- a/smart_contracts/contracts_as/test/remove-associated-key/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/do_nothing.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/remove-associated-key/package.json b/smart_contracts/contracts_as/test/remove-associated-key/package.json deleted file mode 100644 index 917830fd4c..0000000000 --- a/smart_contracts/contracts_as/test/remove-associated-key/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/remove_associated_key.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/assembly/index.ts b/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/assembly/index.ts deleted file mode 100644 index 00e359f63e..0000000000 --- a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/assembly/index.ts +++ /dev/null @@ -1,44 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {Key} from "../../../../contract_as/assembly/key"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {putKey} from "../../../../contract_as/assembly"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {createPurse, transferFromPurseToPurse} from "../../../../contract_as/assembly/purse"; - - -const ARG_AMOUNT = "amount"; -const ARG_DESTINATION = "destination"; - -enum CustomError{ - InvalidAmountArg = 2, - InvalidDestinationArg = 4 -} - -export function call(): void { - const amountArg = CL.getNamedArg(ARG_AMOUNT); - const amountResult = U512.fromBytes(amountArg); - if (amountResult.hasError()) { - Error.fromUserError(CustomError.InvalidAmountArg).revert(); - return; - } - let amount = amountResult.value; - const destinationPurseNameArg = CL.getNamedArg(ARG_DESTINATION); - const destinationPurseNameResult = fromBytesString(destinationPurseNameArg); - if (destinationPurseNameResult.hasError()) { - Error.fromUserError(CustomError.InvalidDestinationArg).revert(); - return; - } - let destinationPurseName = destinationPurseNameResult.value; - const mainPurse = getMainPurse(); - const destinationPurse = createPurse(); - const error = transferFromPurseToPurse(mainPurse, destinationPurse, amount); - if (error !== null) { - error.revert(); - return; - } - putKey(destinationPurseName, Key.fromURef(destinationPurse)); -} diff --git a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/assembly/tsconfig.json b/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/index.js b/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/index.js deleted file mode 100644 index a458c1d3a0..0000000000 --- a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/transfer_main_purse_to_new_purse.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/package.json b/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/package.json deleted file mode 100644 index 5a955d5dce..0000000000 --- a/smart_contracts/contracts_as/test/transfer-main-purse-to-new-purse/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/transfer_main_purse_to_new_purse.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/assembly/index.ts b/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/assembly/index.ts deleted file mode 100644 index db576c87a6..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/assembly/index.ts +++ /dev/null @@ -1,49 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {Key} from "../../../../contract_as/assembly/key"; -import {putKey} from "../../../../contract_as/assembly"; -import {CLValue, CLType, CLTypeTag} from "../../../../contract_as/assembly/clvalue"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {toBytesMap} from "../../../../contract_as/assembly/bytesrepr"; -import * as TransferPurseToAccount from "../../transfer-purse-to-account/assembly"; -import {getBalance, transferFromPurseToAccount, TransferredTo} from "../../../../contract_as/assembly/purse"; -import {Pair} from "../../../../contract_as/assembly/pair"; - -const ENTRY_FUNCTION_NAME = "transfer"; -const PACKAGE_HASH_KEY_NAME = "transfer_purse_to_account"; -const HASH_KEY_NAME = "transfer_purse_to_account_hash"; -const ACCESS_KEY_NAME = "transfer_purse_to_account_access"; -const ARG_0_NAME = "target_account_addr"; -const ARG_1_NAME = "amount"; - - -enum CustomError{ - MissingAmountArg = 1, - InvalidAmountArg = 2, - MissingDestinationAccountArg = 3, - UnableToGetBalance = 103 -} - -export function transfer(): void { - TransferPurseToAccount.delegate(); -} - -export function call(): void { - let entryPoints = new CL.EntryPoints(); - let args = new Array>(); - args.push(new Pair(ARG_0_NAME, CLType.byteArray(32))); - args.push(new Pair(ARG_1_NAME, new CLType(CLTypeTag.U512))); - - let entryPoint = new CL.EntryPoint(ENTRY_FUNCTION_NAME, args, new CLType(CLTypeTag.Unit), new CL.PublicAccess(), CL.EntryPointType.Session); - entryPoints.addEntryPoint(entryPoint); - let newResult = CL.newContract( - entryPoints, - null, - PACKAGE_HASH_KEY_NAME, - ACCESS_KEY_NAME, - ); - CL.putKey(HASH_KEY_NAME, Key.fromHash(newResult.contractHash)); -} \ No newline at end of file diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/assembly/tsconfig.json b/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/index.js b/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/index.js deleted file mode 100644 index 6d824f2f58..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/transfer_purse_to_account_stored.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/package.json b/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/package.json deleted file mode 100644 index 74b87c27f3..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-stored/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/transfer_purse_to_account_stored.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/assembly/index.ts b/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/assembly/index.ts deleted file mode 100644 index 18763b56db..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/assembly/index.ts +++ /dev/null @@ -1,79 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {Key} from "../../../../contract_as/assembly/key"; -import {putKey} from "../../../../contract_as/assembly"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {getPurseBalance, transferFromPurseToAccount, TransferredTo} from "../../../../contract_as/assembly/purse"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {Option} from "../../../../contract_as/assembly/option"; -import {fromBytesU64} from "../../../../contract_as/assembly/bytesrepr"; -import {Ref} from "../../../../contract_as/assembly/ref"; - - -const TRANSFER_RESULT_UREF_NAME = "transfer_result"; -const MAIN_PURSE_FINAL_BALANCE_UREF_NAME = "final_balance"; - -const ARG_TARGET = "target"; -const ARG_AMOUNT = "amount"; -const ARG_ID = "id"; - -enum CustomError{ - MissingAmountArg = 1, - InvalidAmountArg = 2, - MissingDestinationAccountArg = 3, - UnableToGetBalance = 103 -} - -export function delegate(): void { - const mainPurse = getMainPurse(); - const destinationAccountAddrArg = CL.getNamedArg(ARG_TARGET); - const amountArg = CL.getNamedArg(ARG_AMOUNT); - const amountResult = U512.fromBytes(amountArg); - const idBytes = CL.getNamedArg(ARG_ID); - const maybeOptionalId = Option.fromBytes(idBytes); - - let maybeId: Ref | null = null; - if (maybeOptionalId.isSome()) { - const maybeIdBytes = maybeOptionalId.unwrap(); - maybeId = new Ref(fromBytesU64(maybeIdBytes).unwrap()); - } - - if (amountResult.hasError()) { - Error.fromUserError(CustomError.InvalidAmountArg).revert(); - return; - } - let amount = amountResult.value; - const result = transferFromPurseToAccount(mainPurse, destinationAccountAddrArg, amount, maybeId); - let message = ""; - if (result.isOk) { - const foo = result.ok; - switch (result.ok) { - case TransferredTo.NewAccount: - message = "Ok(NewAccount)"; - break; - case TransferredTo.ExistingAccount: - message = "Ok(ExistingAccount)"; - break; - } - } - - if (result.isErr) { - message = "Err(ApiError::Mint(0) [65024])"; - } - const transferResultKey = Key.create(CLValue.fromString(message)); - putKey(TRANSFER_RESULT_UREF_NAME, transferResultKey); - const maybeBalance = getPurseBalance(mainPurse); - if (maybeBalance === null) { - Error.fromUserError(CustomError.UnableToGetBalance).revert(); - return; - } - const key = Key.create(CLValue.fromU512(maybeBalance)); - putKey(MAIN_PURSE_FINAL_BALANCE_UREF_NAME, key); -} - -export function call(): void { - delegate(); -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/assembly/tsconfig.json b/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/index.js b/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/index.js deleted file mode 100644 index a57b2d2ac1..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/transfer_purse_to_account.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/package.json b/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/package.json deleted file mode 100644 index 055643b4c4..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account-with-id/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/transfer_purse_to_account_with_id.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account/assembly/index.ts b/smart_contracts/contracts_as/test/transfer-purse-to-account/assembly/index.ts deleted file mode 100644 index bb22920e81..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account/assembly/index.ts +++ /dev/null @@ -1,66 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error, ErrorCode} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {Key} from "../../../../contract_as/assembly/key"; -import {putKey} from "../../../../contract_as/assembly"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {getPurseBalance, transferFromPurseToAccount, TransferredTo} from "../../../../contract_as/assembly/purse"; -import {URef} from "../../../../contract_as/assembly/uref"; - - -const TRANSFER_RESULT_UREF_NAME = "transfer_result"; -const MAIN_PURSE_FINAL_BALANCE_UREF_NAME = "final_balance"; - -const ARG_TARGET = "target"; -const ARG_AMOUNT = "amount"; - -enum CustomError{ - MissingAmountArg = 1, - InvalidAmountArg = 2, - MissingDestinationAccountArg = 3, - UnableToGetBalance = 103 -} - -export function delegate(): void { - const mainPurse = getMainPurse(); - const destinationAccountAddrArg = CL.getNamedArg(ARG_TARGET); - const amountArg = CL.getNamedArg(ARG_AMOUNT); - const amountResult = U512.fromBytes(amountArg); - if (amountResult.hasError()) { - Error.fromUserError(CustomError.InvalidAmountArg).revert(); - return; - } - let amount = amountResult.value; - let message = ""; - const result = transferFromPurseToAccount(mainPurse, destinationAccountAddrArg, amount); - if (result.isOk) { - const foo = result.ok; - switch (result.ok) { - case TransferredTo.NewAccount: - message = "Ok(NewAccount)"; - break; - case TransferredTo.ExistingAccount: - message = "Ok(ExistingAccount)"; - break; - } - } - - if (result.isErr) { - message = "Err(ApiError::Mint(0) [65024])"; - } - const transferResultKey = Key.create(CLValue.fromString(message)); - putKey(TRANSFER_RESULT_UREF_NAME, transferResultKey); - const maybeBalance = getPurseBalance(mainPurse); - if (maybeBalance === null) { - Error.fromUserError(CustomError.UnableToGetBalance).revert(); - return; - } - const key = Key.create(CLValue.fromU512(maybeBalance)); - putKey(MAIN_PURSE_FINAL_BALANCE_UREF_NAME, key); -} - -export function call(): void { - delegate(); -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account/assembly/tsconfig.json b/smart_contracts/contracts_as/test/transfer-purse-to-account/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account/index.js b/smart_contracts/contracts_as/test/transfer-purse-to-account/index.js deleted file mode 100644 index a57b2d2ac1..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/transfer_purse_to_account.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-account/package.json b/smart_contracts/contracts_as/test/transfer-purse-to-account/package.json deleted file mode 100644 index 5230e2cef9..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-account/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/transfer_purse_to_account.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-purse/assembly/index.ts b/smart_contracts/contracts_as/test/transfer-purse-to-purse/assembly/index.ts deleted file mode 100644 index 5a8355c6fe..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-purse/assembly/index.ts +++ /dev/null @@ -1,126 +0,0 @@ -//@ts-nocheck -import * as CL from "../../../../contract_as/assembly"; -import {Error} from "../../../../contract_as/assembly/error"; -import {U512} from "../../../../contract_as/assembly/bignum"; -import {getMainPurse} from "../../../../contract_as/assembly/account"; -import {Key} from "../../../../contract_as/assembly/key"; -import {getKey, hasKey, putKey} from "../../../../contract_as/assembly"; -import {CLValue} from "../../../../contract_as/assembly/clvalue"; -import {fromBytesString} from "../../../../contract_as/assembly/bytesrepr"; -import {URef} from "../../../../contract_as/assembly/uref"; -import {createPurse, getPurseBalance, transferFromPurseToPurse} from "../../../../contract_as/assembly/purse"; - -const PURSE_MAIN = "purse:main"; -const PURSE_TRANSFER_RESULT = "purse_transfer_result"; -const MAIN_PURSE_BALANCE = "main_purse_balance"; -const SUCCESS_MESSAGE = "Ok(())"; -const TRANSFER_ERROR_MESSAGE = "Err(ApiError::Mint(0) [65024])"; - -const ARG_SOURCE = "source"; -const ARG_TARGET = "target"; -const ARG_AMOUNT = "amount"; - -enum CustomError { - MissingSourcePurseArg = 1, - InvalidSourcePurseArg = 2, - MissingDestinationPurseArg = 3, - InvalidDestinationPurseArg = 4, - MissingDestinationPurse = 5, - UnableToStoreResult = 6, - UnableToStoreBalance = 7, - MissingAmountArg = 8, - InvalidAmountArg = 9, - InvalidSourcePurseKey = 103, - UnexpectedSourcePurseKeyVariant = 104, - InvalidDestinationPurseKey = 105, - UnexpectedDestinationPurseKeyVariant = 106, - UnableToGetBalance = 107, -} - -export function call(): void { - const mainPurse = getMainPurse(); - const mainPurseKey = Key.fromURef(mainPurse); - putKey(PURSE_MAIN, mainPurseKey); - const sourcePurseKeyNameArg = CL.getNamedArg(ARG_SOURCE); - const maybeSourcePurseKeyName = fromBytesString(sourcePurseKeyNameArg); - if(maybeSourcePurseKeyName.hasError()) { - Error.fromUserError(CustomError.InvalidSourcePurseArg).revert(); - return; - } - const sourcePurseKeyName = maybeSourcePurseKeyName.value; - const sourcePurseKey = getKey(sourcePurseKeyName); - if (sourcePurseKey === null){ - Error.fromUserError(CustomError.InvalidSourcePurseKey).revert(); - return; - } - if(!sourcePurseKey.isURef()){ - Error.fromUserError(CustomError.UnexpectedSourcePurseKeyVariant).revert(); - return; - } - const sourcePurse = sourcePurseKey.toURef(); - - const destinationPurseKeyNameArg = CL.getNamedArg(ARG_TARGET); - if (destinationPurseKeyNameArg === null) { - Error.fromUserError(CustomError.MissingDestinationPurseArg).revert(); - return; - } - const maybeDestinationPurseKeyName = fromBytesString(destinationPurseKeyNameArg); - if(maybeDestinationPurseKeyName.hasError()){ - Error.fromUserError(CustomError.InvalidDestinationPurseArg).revert(); - return; - } - let destinationPurseKeyName = maybeDestinationPurseKeyName.value; - let destinationPurse: URef | null; - let destinationKey: Key | null; - if(!hasKey(destinationPurseKeyName)){ - destinationPurse = createPurse(); - destinationKey = Key.fromURef(destinationPurse); - putKey(destinationPurseKeyName, destinationKey); - } else { - destinationKey = getKey(destinationPurseKeyName); - if(destinationKey === null){ - Error.fromUserError(CustomError.InvalidDestinationPurseKey).revert(); - return; - } - if(!destinationKey.isURef()){ - Error.fromUserError(CustomError.UnexpectedDestinationPurseKeyVariant).revert(); - return; - } - destinationPurse = destinationKey.toURef(); - } - if(destinationPurse === null){ - Error.fromUserError(CustomError.MissingDestinationPurse).revert(); - return; - } - - const amountArg = CL.getNamedArg(ARG_AMOUNT); - const amountResult = U512.fromBytes(amountArg); - if (amountResult.hasError()) { - Error.fromUserError(CustomError.InvalidAmountArg).revert(); - return; - } - const amount = amountResult.value; - - const error = transferFromPurseToPurse(sourcePurse, destinationPurse, amount); - let message = SUCCESS_MESSAGE; - if (error !== null){ - message = TRANSFER_ERROR_MESSAGE; - } - const resultKey = Key.create(CLValue.fromString(message)); - const finalBalance = getPurseBalance(sourcePurse); - if(finalBalance === null){ - Error.fromUserError(CustomError.UnableToGetBalance).revert(); - return; - } - const balanceKey = Key.create(CLValue.fromU512(finalBalance)); - if(balanceKey === null){ - Error.fromUserError(CustomError.UnableToStoreBalance).revert(); - return; - } - if(resultKey === null){ - Error.fromUserError(CustomError.UnableToStoreResult).revert(); - return; - } - putKey(PURSE_TRANSFER_RESULT, resultKey); - putKey(MAIN_PURSE_BALANCE, balanceKey); -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-purse/assembly/tsconfig.json b/smart_contracts/contracts_as/test/transfer-purse-to-purse/assembly/tsconfig.json deleted file mode 100644 index 505b0fc0d8..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-purse/assembly/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../../../../../../../.nvm/versions/node/v10.16.3/lib/node_modules/assemblyscript/std/assembly.json", - "include": [ - "./**/*.ts" - ] -} diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-purse/index.js b/smart_contracts/contracts_as/test/transfer-purse-to-purse/index.js deleted file mode 100644 index 82bdb3360b..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-purse/index.js +++ /dev/null @@ -1,12 +0,0 @@ -const fs = require("fs"); -const compiled = new WebAssembly.Module(fs.readFileSync(__dirname + "/build/transfer_purse_to_purse.wasm")); -const imports = { - env: { - abort(_msg, _file, line, column) { - console.error("abort called at index.ts:" + line + ":" + column); - } - } -}; -Object.defineProperty(module, "exports", { - get: () => new WebAssembly.Instance(compiled, imports).exports -}); diff --git a/smart_contracts/contracts_as/test/transfer-purse-to-purse/package.json b/smart_contracts/contracts_as/test/transfer-purse-to-purse/package.json deleted file mode 100644 index 147d4ae642..0000000000 --- a/smart_contracts/contracts_as/test/transfer-purse-to-purse/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "scripts": { - "asbuild:optimized": "asc --lib ../../.. assembly/index.ts -b ../../../../target_as/transfer_purse_to_purse.wasm --optimize --use abort=", - "asbuild": "npm run asbuild:optimized" - }, - "devDependencies": { - "assemblyscript": "^0.8.1" - } -} diff --git a/smart_contracts/macros/Cargo.toml b/smart_contracts/macros/Cargo.toml new file mode 100644 index 0000000000..8230ba1113 --- /dev/null +++ b/smart_contracts/macros/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "casper-contract-macros" +version = "0.1.3" +edition = "2021" +authors = ["Michał Papierski "] +description = "Casper contract macros package" +documentation = "https://docs.rs/casper-contract-macros" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/smart_contracts/macros" +license = "Apache-2.0" + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "2", features = ["full", "extra-traits"] } +quote = "1" +casper-executor-wasm-common = { version = "0.1.3", path = "../../executor/wasm_common" } +casper-contract-sdk-sys = { version = "0.1.3", path = "../sdk_sys" } +paste = "1" +darling = "0.20" +proc-macro2 = "1.0" +static_assertions = "1.1.0" + +blake2-rfc = { version = "0.2.18", default-features = false, features = [ + "std", +] } + +[features] +default = [] +__abi_generator = [] +__embed_schema = [] diff --git a/smart_contracts/macros/src/lib.rs b/smart_contracts/macros/src/lib.rs new file mode 100644 index 0000000000..8d3c00b768 --- /dev/null +++ b/smart_contracts/macros/src/lib.rs @@ -0,0 +1,1831 @@ +pub(crate) mod utils; + +extern crate proc_macro; + +use darling::{ast, FromAttributes, FromMeta}; +use proc_macro::TokenStream; +use proc_macro2::Span; +use quote::{format_ident, quote, ToTokens}; +use syn::{ + parse_macro_input, Fields, ItemEnum, ItemFn, ItemImpl, ItemStruct, ItemTrait, ItemUnion, + LitStr, Type, +}; + +use casper_executor_wasm_common::flags::EntryPointFlags; +const CASPER_RESERVED_FALLBACK_EXPORT: &str = "__casper_fallback"; + +#[derive(Debug, FromAttributes)] +#[darling(attributes(casper))] +struct MethodAttribute { + #[darling(default)] + constructor: bool, + #[darling(default)] + ignore_state: bool, + #[darling(default)] + revert_on_error: bool, + /// Explicitly mark method as private so it's not externally callable. + #[darling(default)] + private: bool, + #[darling(default)] + payable: bool, + #[darling(default)] + fallback: bool, +} + +#[derive(Debug, FromMeta)] +struct StructMeta { + #[darling(default)] + path: Option, + /// Contract state is a special struct that is used to store the state of the contract. + #[darling(default)] + contract_state: bool, + /// Message is a special struct that is used to send messages to other contracts. + #[darling(default)] + message: bool, +} + +#[derive(Debug, FromMeta)] +struct EnumMeta { + #[darling(default)] + path: Option, +} + +#[derive(Debug, FromMeta)] +struct TraitMeta { + path: Option, + export: Option, +} + +#[derive(Debug, FromMeta)] +enum ItemFnMeta { + Export, +} + +#[derive(Debug, FromMeta)] +struct ImplTraitForContractMeta { + /// Fully qualified path of the trait. + #[darling(default)] + path: Option, + /// Does not produce Wasm exports for the entry points. + #[darling(default)] + compile_as_dependency: bool, +} + +fn generate_call_data_return(output: &syn::ReturnType) -> proc_macro2::TokenStream { + match output { + syn::ReturnType::Default => { + quote! { () } + } + syn::ReturnType::Type(_, ty) => match ty.as_ref() { + Type::Never(_) => { + quote! { () } + } + Type::Reference(reference) => { + // ty.uses_lifetimes(options, lifetimes) + let mut new_ref = reference.clone(); + new_ref.lifetime = Some(syn::Lifetime::new("'a", Span::call_site())); + quote! { <<#new_ref as core::ops::Deref>::Target as casper_contract_sdk::prelude::borrow::ToOwned>::Owned } + } + _ => { + quote! { #ty } + } + }, + } +} + +#[proc_macro_attribute] +pub fn casper(attrs: TokenStream, item: TokenStream) -> TokenStream { + // let attrs: Meta = parse_macro_input!(attrs as Meta); + let attr_args = match ast::NestedMeta::parse_meta_list(attrs.into()) { + Ok(v) => v, + Err(e) => { + return TokenStream::from(e.to_compile_error()); + } + }; + + let has_fallback_selector = false; + + if let Ok(item_struct) = syn::parse::(item.clone()) { + let struct_meta = StructMeta::from_list(&attr_args).unwrap(); + if struct_meta.message { + process_casper_message_for_struct(&item_struct, struct_meta) + } else if struct_meta.contract_state { + // #[casper(contract_state)] + process_casper_contract_state_for_struct(&item_struct, struct_meta) + } else { + // For any other struct that will be part of a schema + // #[casper] + let partial = generate_casper_state_for_struct(&item_struct, struct_meta); + quote! { + #partial + } + .into() + } + } else if let Ok(item_enum) = syn::parse::(item.clone()) { + let enum_meta = EnumMeta::from_list(&attr_args).unwrap(); + let partial = generate_casper_state_for_enum(&item_enum, enum_meta); + quote! { + #partial + } + .into() + } else if let Ok(item_trait) = syn::parse::(item.clone()) { + let trait_meta = TraitMeta::from_list(&attr_args).unwrap(); + casper_trait_definition(item_trait, trait_meta) + } else if let Ok(entry_points) = syn::parse::(item.clone()) { + if let Some((_not, trait_path, _for)) = entry_points.trait_.as_ref() { + let impl_meta = ImplTraitForContractMeta::from_list(&attr_args).unwrap(); + generate_impl_trait_for_contract(&entry_points, trait_path, impl_meta) + } else { + generate_impl_for_contract(entry_points, has_fallback_selector) + } + } else if let Ok(func) = syn::parse::(item.clone()) { + let func_meta = ItemFnMeta::from_list(&attr_args).unwrap(); + match func_meta { + ItemFnMeta::Export => generate_export_function(&func), + } + } else { + let err = syn::Error::new( + Span::call_site(), + "State attribute can only be applied to struct or enum", + ); + TokenStream::from(err.to_compile_error()) + } +} + +fn process_casper_message_for_struct( + item_struct: &ItemStruct, + struct_meta: StructMeta, +) -> TokenStream { + let struct_name = &item_struct.ident; + + let crate_path = match &struct_meta.path { + Some(path) => quote! { #path }, + None => quote! { casper_contract_sdk }, + }; + + let borsh_path = { + let crate_path_str = match &struct_meta.path { + Some(path) => path.to_token_stream().to_string(), + None => "casper_contract_sdk".to_string(), + }; + syn::LitStr::new( + &format!("{}::serializers::borsh", crate_path_str), + Span::call_site(), + ) + }; + + let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone()); + + let maybe_abi_collectors; + let maybe_entrypoint_defs; + + #[cfg(feature = "__abi_generator")] + { + maybe_abi_collectors = quote! { + const _: () = { + #[#crate_path::linkme::distributed_slice(#crate_path::abi_generator::ABI_COLLECTORS)] + #[linkme(crate = #crate_path::linkme)] + static COLLECTOR: fn(&mut #crate_path::abi::Definitions) = |defs| { + defs.populate_one::<#struct_name>(); + }; + }; + }; + + maybe_entrypoint_defs = quote! { + const _: () = { + #[#crate_path::linkme::distributed_slice(#crate_path::abi_generator::MESSAGES)] + #[linkme(crate = #crate_path::linkme)] + static MESSAGE: #crate_path::abi_generator::Message = #crate_path::abi_generator::Message { + name: <#struct_name as #crate_path::Message>::TOPIC, + decl: concat!(module_path!(), "::", stringify!(#struct_name)), + }; + }; + } + } + #[cfg(not(feature = "__abi_generator"))] + { + maybe_abi_collectors = quote! {}; + maybe_entrypoint_defs = quote! {}; + } + + quote! { + #[derive(#crate_path::serializers::borsh::BorshSerialize)] + #[borsh(crate = #borsh_path)] + #maybe_derive_abi + #item_struct + + impl #crate_path::Message for #struct_name { + const TOPIC: &'static str = stringify!(#struct_name); + + #[inline] + fn payload(&self) -> Vec { + #crate_path::serializers::borsh::to_vec(self).unwrap() + } + } + + #maybe_abi_collectors + #maybe_entrypoint_defs + + } + .into() +} + +fn generate_export_function(func: &ItemFn) -> TokenStream { + let func_name = &func.sig.ident; + let mut arg_names = Vec::new(); + let mut args_attrs = Vec::new(); + for input in &func.sig.inputs { + let (name, ty) = match input { + syn::FnArg::Receiver(receiver) => { + todo!("{receiver:?}") + } + syn::FnArg::Typed(typed) => match typed.pat.as_ref() { + syn::Pat::Ident(ident) => (&ident.ident, &typed.ty), + _ => todo!("export: other typed variant"), + }, + }; + arg_names.push(name); + args_attrs.push(quote! { + #name: #ty + }); + } + let _ctor_name = format_ident!("{func_name}_ctor"); + + let exported_func_name = format_ident!("__casper_export_{func_name}"); + quote! { + #[export_name = stringify!(#func_name)] + #[no_mangle] + pub extern "C" fn #exported_func_name() { + #[cfg(target_arch = "wasm32")] + { + casper_contract_sdk::set_panic_hook(); + } + + #func + + #[derive(casper_contract_sdk::serializers::borsh::BorshDeserialize)] + #[borsh(crate = "casper_contract_sdk::serializers::borsh")] + struct Arguments { + #(#args_attrs,)* + } + let input = casper_contract_sdk::prelude::casper::copy_input(); + let args: Arguments = casper_contract_sdk::serializers::borsh::from_slice(&input).unwrap(); + let _ret = #func_name(#(args.#arg_names,)*); + } + + #[cfg(not(target_arch = "wasm32"))] + #func + + #[cfg(not(target_arch = "wasm32"))] + const _: () = { + #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::casper::native::ENTRY_POINTS)] + #[linkme(crate = casper_contract_sdk::linkme)] + pub static EXPORTS: casper_contract_sdk::casper::native::EntryPoint = casper_contract_sdk::casper::native::EntryPoint { + kind: casper_contract_sdk::casper::native::EntryPointKind::Function { name: stringify!(#func_name) }, + fptr: || { #exported_func_name(); }, + module_path: module_path!(), + file: file!(), + line: line!(), + }; + }; + }.into() +} + +fn generate_impl_for_contract( + mut entry_points: ItemImpl, + _has_fallback_selector: bool, +) -> TokenStream { + #[cfg(feature = "__abi_generator")] + let mut populate_definitions_linkme = Vec::new(); + let impl_trait = match entry_points.trait_.as_ref() { + Some((None, path, _for)) => Some(path), + Some((Some(_not), _path, _for)) => { + panic!("Exclamation mark not supported"); + } + None => None, + }; + let struct_name = match entry_points.self_ty.as_ref() { + Type::Path(ref path) => &path.path, + + other => todo!("Unsupported {other:?}"), + }; + let defs = vec![quote! {}]; // TODO: Dummy element which may not be necessary but is used for expansion later + #[cfg(feature = "__abi_generator")] + let mut defs = defs; + #[cfg(feature = "__abi_generator")] + let mut defs_linkme = Vec::new(); + let mut names = Vec::new(); + let mut extern_entry_points = Vec::new(); + let _abi_generator_entry_points = [quote! {}]; // TODO: Dummy element which may not be necessary but is used for expansion later + let mut manifest_entry_point_enum_variants = Vec::new(); + let mut manifest_entry_point_enum_match_name = Vec::new(); + let mut manifest_entry_point_input_data = Vec::new(); + let mut extra_code = Vec::new(); + + for entry_point in &mut entry_points.items { + let mut populate_definitions = Vec::new(); + + let method_attribute; + let mut flag_value = EntryPointFlags::empty(); + + // let selector_value; + + let func = match entry_point { + syn::ImplItem::Const(_) => todo!("Const"), + syn::ImplItem::Fn(ref mut func) => { + let vis = &func.vis; + match vis { + syn::Visibility::Public(_) => {} + syn::Visibility::Inherited => { + // As the doc says this "usually means private" + continue; + } + syn::Visibility::Restricted(_restricted) => {} + } + + // func.sig.re + let never_returns = match &func.sig.output { + syn::ReturnType::Default => false, + syn::ReturnType::Type(_, ty) => matches!(ty.as_ref(), Type::Never(_)), + }; + + method_attribute = MethodAttribute::from_attributes(&func.attrs).unwrap(); + + func.attrs.clear(); + + let func_name = func.sig.ident.clone(); + if func_name.to_string().starts_with("__casper_") { + return TokenStream::from( + syn::Error::new( + Span::call_site(), + "Function names starting with '__casper_' are reserved", + ) + .to_compile_error(), + ); + } + + let export_name = if method_attribute.fallback { + format_ident!("{}", CASPER_RESERVED_FALLBACK_EXPORT) + } else { + format_ident!("{}", &func_name) + }; + + names.push(func_name.clone()); + + let arg_names_and_types = func + .sig + .inputs + .iter() + .filter_map(|arg| match arg { + syn::FnArg::Receiver(_) => None, + syn::FnArg::Typed(typed) => match typed.pat.as_ref() { + syn::Pat::Ident(ident) => Some((&ident.ident, &typed.ty)), + _ => todo!(), + }, + }) + .collect::>(); + + let arg_names: Vec<_> = + arg_names_and_types.iter().map(|(name, _ty)| name).collect(); + let arg_types: Vec<_> = arg_names_and_types.iter().map(|(_name, ty)| ty).collect(); + let arg_attrs: Vec<_> = arg_names_and_types + .iter() + .map(|(name, ty)| quote! { #name: #ty }) + .collect(); + + // Entry point has &self or &mut self + let mut entry_point_requires_state: bool = false; + + let handle_write_state = match func.sig.inputs.first() { + Some(syn::FnArg::Receiver(receiver)) if receiver.mutability.is_some() => { + entry_point_requires_state = true; + + if !never_returns && receiver.reference.is_some() { + // &mut self does write updated state + Some(quote! { + casper_contract_sdk::casper::write_state(&instance).unwrap(); + }) + } else { + // mut self does not write updated state as the + // method call + // will consume self and there's nothing to persist. + None + } + } + Some(syn::FnArg::Receiver(receiver)) if receiver.mutability.is_none() => { + entry_point_requires_state = true; + + // &self does not write state + None + } + Some(syn::FnArg::Receiver(receiver)) if receiver.lifetime().is_some() => { + panic!("Lifetimes are currently not supported"); + } + Some(_) | None => { + if !never_returns && method_attribute.constructor { + Some(quote! { + casper_contract_sdk::casper::write_state(&_ret).unwrap(); + }) + } else { + None + } + } + }; + + let call_data_return_lifetime = if method_attribute.constructor { + quote! { + #struct_name + } + } else { + generate_call_data_return(&func.sig.output) + }; + let _func_sig_output = match &func.sig.output { + syn::ReturnType::Default => { + quote! { () } + } + syn::ReturnType::Type(_, ty) => { + quote! { #ty } + } + }; + + let handle_ret = if never_returns { + None + } else { + match func.sig.output { + syn::ReturnType::Default => { + // Do not call casper_return if there is no return value + None + } + _ if method_attribute.constructor => { + // Constructor does not return serialized state but is expected to save + // state, or explicitly revert. + // TODO: Add support for Result and revert_on_error if + // possible. + Some(quote! { + let _ = flags; // hide the warning + }) + } + syn::ReturnType::Type(..) => { + // There is a return value so call casper_return. + Some(quote! { + let ret_bytes = casper_contract_sdk::serializers::borsh::to_vec(&_ret).unwrap(); + casper_contract_sdk::casper::ret(flags, Some(&ret_bytes)); + }) + } + } + }; + + assert_eq!(arg_names.len(), arg_types.len()); + + let mut prelude = Vec::new(); + + prelude.push(quote! { + #[derive(casper_contract_sdk::serializers::borsh::BorshDeserialize)] + #[borsh(crate = "casper_contract_sdk::serializers::borsh")] + struct Arguments { + #(#arg_attrs,)* + } + + + let input = casper_contract_sdk::prelude::casper::copy_input(); + let args: Arguments = casper_contract_sdk::serializers::borsh::from_slice(&input).unwrap(); + }); + + if method_attribute.constructor { + prelude.push(quote! { + if casper_contract_sdk::casper::has_state().unwrap() { + panic!("State of the contract is already present; unable to proceed with the constructor"); + } + }); + } + + if !method_attribute.payable { + let panic_msg = format!( + r#"Entry point "{func_name}" is not payable and does not accept tokens"# + ); + prelude.push(quote! { + if casper_contract_sdk::casper::transferred_value() != 0 { + // TODO: Be precise and unambigious about the error + panic!(#panic_msg); + } + }); + } + + let handle_err = if !never_returns && method_attribute.revert_on_error { + if let syn::ReturnType::Default = func.sig.output { + panic!("Cannot revert on error if there is no return value"); + } + + quote! { + let _ret: &Result<_, _> = &_ret; + if _ret.is_err() { + flags |= casper_contract_sdk::casper_executor_wasm_common::flags::ReturnFlags::REVERT; + } + + } + } else { + quote! {} + }; + + let handle_call = if entry_point_requires_state { + quote! { + let mut instance: #struct_name = casper_contract_sdk::casper::read_state().unwrap(); + let _ret = instance.#func_name(#(args.#arg_names,)*); + } + } else if method_attribute.constructor { + quote! { + let _ret = <#struct_name>::#func_name(#(args.#arg_names,)*); + } + } else { + quote! { + let _ret = <#struct_name>::#func_name(#(args.#arg_names,)*); + } + }; + if method_attribute.constructor { + flag_value |= EntryPointFlags::CONSTRUCTOR; + } + + if method_attribute.fallback { + flag_value |= EntryPointFlags::FALLBACK; + } + + let _bits = flag_value.bits(); + + let extern_func_name = format_ident!("__casper_export_{func_name}"); + + extern_entry_points.push(quote! { + + #[export_name = stringify!(#export_name)] + #vis extern "C" fn #extern_func_name() { + // Set panic hook (assumes std is enabled etc.) + #[cfg(target_arch = "wasm32")] + { + casper_contract_sdk::set_panic_hook(); + } + + #(#prelude;)* + + let mut flags = casper_contract_sdk::casper_executor_wasm_common::flags::ReturnFlags::empty(); + + #handle_call; + + #handle_err; + + #handle_write_state; + + #handle_ret; + } + + #[cfg(not(target_arch = "wasm32"))] + const _: () = { + #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::casper::native::ENTRY_POINTS)] + #[linkme(crate = casper_contract_sdk::linkme)] + pub static EXPORTS: casper_contract_sdk::casper::native::EntryPoint = casper_contract_sdk::casper::native::EntryPoint { + kind: casper_contract_sdk::casper::native::EntryPointKind::SmartContract { name: stringify!(#export_name), struct_name: stringify!(#struct_name) }, + fptr: || -> () { #extern_func_name(); }, + module_path: module_path!(), + file: file!(), + line: line!(), + }; + }; + + }); + + manifest_entry_point_enum_variants.push(quote! { + #func_name { + #(#arg_names: #arg_types,)* + } + }); + + manifest_entry_point_enum_match_name.push(quote! { + #func_name + }); + + manifest_entry_point_input_data.push(quote! { + Self::#func_name { #(#arg_names,)* } => { + let into_tuple = (#(#arg_names,)*); + into_tuple.serialize(writer) + } + }); + + match entry_points.self_ty.as_ref() { + Type::Path(ref path) => { + let ident = syn::Ident::new( + &format!("{}_{}", path.path.get_ident().unwrap(), func_name), + Span::call_site(), + ); + + let input_data_content = if arg_names.is_empty() { + quote! { + None + } + } else { + quote! { + Some(casper_contract_sdk::serializers::borsh::to_vec(&self).expect("Serialization to succeed")) + } + }; + + let self_ty = + if method_attribute.constructor || method_attribute.ignore_state { + None + } else { + Some(quote! { + &self, + }) + }; + + if !method_attribute.fallback { + extra_code.push(quote! { + pub fn #func_name<'a>(#self_ty #(#arg_names: #arg_types,)*) -> impl casper_contract_sdk::ToCallData = #call_data_return_lifetime> { + #[derive(casper_contract_sdk::serializers::borsh::BorshSerialize, PartialEq, Debug)] + #[borsh(crate = "casper_contract_sdk::serializers::borsh")] + struct #ident { + #(#arg_names: #arg_types,)* + } + + impl casper_contract_sdk::ToCallData for #ident { + // const SELECTOR: vm_common::selector::Selector = vm_common::selector::Selector::new(#selector_value); + + type Return<'a> = #call_data_return_lifetime; + + fn entry_point(&self) -> &str { stringify!(#func_name) } + + fn input_data(&self) -> Option> { + #input_data_content + } + } + + #ident { + #(#arg_names,)* + } + } + }); + } + } + + _ => todo!("Different self_ty currently unsupported"), + } + + func.clone() + } + syn::ImplItem::Type(_) => todo!(), + syn::ImplItem::Macro(_) => todo!(), + syn::ImplItem::Verbatim(_) => todo!(), + _ => todo!(), + }; + + let mut args = Vec::new(); + + for input in &func.sig.inputs { + let typed = match input { + syn::FnArg::Receiver(_receiver) => continue, + syn::FnArg::Typed(typed) => typed, + }; + // typed + let name = match &typed.pat.as_ref() { + syn::Pat::Const(_) => todo!("Const"), + syn::Pat::Ident(ident) => ident, + syn::Pat::Lit(_) => todo!("Lit"), + syn::Pat::Macro(_) => todo!("Macro"), + syn::Pat::Or(_) => todo!("Or"), + syn::Pat::Paren(_) => todo!("Paren"), + syn::Pat::Path(_) => todo!("Path"), + syn::Pat::Range(_) => todo!("Range"), + syn::Pat::Reference(_) => todo!("Reference"), + syn::Pat::Rest(_) => todo!("Rest"), + syn::Pat::Slice(_) => todo!("Slice"), + syn::Pat::Struct(_) => todo!("Struct"), + syn::Pat::Tuple(_) => todo!("Tuple"), + syn::Pat::TupleStruct(_) => todo!("TupleStruct"), + syn::Pat::Type(_) => todo!("Type"), + syn::Pat::Verbatim(_) => todo!("Verbatim"), + syn::Pat::Wild(_) => todo!("Wild"), + _ => todo!(), + }; + let ty = &typed.ty; + + populate_definitions.push(quote! { + definitions.populate_one::<#ty>(); + }); + + args.push(quote! { + casper_contract_sdk::schema::SchemaArgument { + name: stringify!(#name).into(), + decl: <#ty as casper_contract_sdk::abi::CasperABI>::declaration(), + } + }); + } + + #[cfg(feature = "__abi_generator")] + { + let bits = flag_value.bits(); + + let result = match &func.sig.output { + syn::ReturnType::Default => { + populate_definitions.push(quote! { + definitions.populate_one::<()>(); + }); + + quote! { <() as casper_contract_sdk::abi::CasperABI>::declaration() } + } + syn::ReturnType::Type(_, ty) => match ty.as_ref() { + Type::Never(_) => { + populate_definitions.push(quote! { + definitions.populate_one::<()>(); + }); + + quote! { <() as casper_contract_sdk::abi::CasperABI>::declaration() } + } + _ => { + populate_definitions.push(quote! { + definitions.populate_one::<#ty>(); + }); + + quote! { <#ty as casper_contract_sdk::abi::CasperABI>::declaration() } + } + }, + }; + + let func_name = &func.sig.ident; + + let linkme_schema_entry_point_ident = + format_ident!("__casper_schema_entry_point_{func_name}"); + + defs.push(quote! { + fn #linkme_schema_entry_point_ident() -> casper_contract_sdk::schema::SchemaEntryPoint { + casper_contract_sdk::schema::SchemaEntryPoint { + name: stringify!(#func_name).into(), + arguments: vec![ #(#args,)* ], + result: #result, + flags: casper_contract_sdk::casper_executor_wasm_common::flags::EntryPointFlags::from_bits(#bits).unwrap(), + } + } + }); + defs_linkme.push(linkme_schema_entry_point_ident); + + let linkme_abi_populate_defs_ident = + format_ident!("__casper_populate_definitions_{func_name}"); + + defs.push(quote! { + fn #linkme_abi_populate_defs_ident(definitions: &mut casper_contract_sdk::abi::Definitions) { + #(#populate_definitions)*; + } + }); + + populate_definitions_linkme.push(linkme_abi_populate_defs_ident); + } + } + // let entry_points_len = entry_points.len(); + let st_name = struct_name.get_ident().unwrap(); + let maybe_abi_collectors; + let maybe_entrypoint_defs; + #[cfg(feature = "__abi_generator")] + { + maybe_abi_collectors = quote! { + #( + const _: () = { + #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::abi_generator::ABI_COLLECTORS)] + #[linkme(crate = casper_contract_sdk::linkme)] + static COLLECTOR: fn(&mut casper_contract_sdk::abi::Definitions) = <#struct_name>::#populate_definitions_linkme; + }; + )* + }; + + maybe_entrypoint_defs = quote! { + #( + + const _: () = { + #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::abi_generator::ENTRYPOINTS)] + #[linkme(crate = casper_contract_sdk::linkme)] + static ENTRY_POINTS: fn() -> casper_contract_sdk::schema::SchemaEntryPoint = <#struct_name>::#defs_linkme; + }; + )* + } + } + #[cfg(not(feature = "__abi_generator"))] + { + maybe_abi_collectors = quote! {}; + maybe_entrypoint_defs = quote! {}; + } + let handle_manifest = match impl_trait { + Some(_path) => { + // Do not generate a manifest if we're implementing a trait. + // The expectation is that you list the traits below under + // #[derive(Contract)] and the rest is handled by a macro + None + } + None => Some(quote! { + + #[doc(hidden)] + impl #struct_name { + #(#defs)* + } + + #maybe_abi_collectors + + #maybe_entrypoint_defs + #(#extern_entry_points)* + + }), + }; + let ref_struct_name = format_ident!("{st_name}Ref"); + + quote! { + #entry_points + + #handle_manifest + + impl #ref_struct_name { + #(#extra_code)* + } + } + .into() +} + +fn generate_impl_trait_for_contract( + entry_points: &ItemImpl, + trait_path: &syn::Path, + impl_meta: ImplTraitForContractMeta, +) -> TokenStream { + let self_ty = match entry_points.self_ty.as_ref() { + Type::Path(ref path) => &path.path, + other => todo!("Unsupported {other:?}"), + }; + let self_ty = quote! { #self_ty }; + let mut code = Vec::new(); + + let trait_name = trait_path + .segments + .last() + .expect("Expected non-empty path") + .ident + .clone(); + + let path_to_macro = match &impl_meta.path { + Some(path) => quote! { #path }, + None => { + quote! { self } + } + }; + + let path_to_crate: proc_macro2::TokenStream = match &impl_meta.path { + Some(path) => { + let crate_name = path + .segments + .first() + .expect("Expected non-empty path") + .ident + .clone(); + + if crate_name == "crate" { + // This is local, can't refer by absolute path + quote! { #path } + } else { + quote! { #crate_name } + } + } + None => { + quote! { self } + } + }; + + let macro_name = format_ident!("enumerate_{trait_name}_symbols"); + let ref_trait = format_ident!("{}Ext", trait_path.segments.last().unwrap().ident); + let ref_name = format_ident!("{}Ref", self_ty.to_token_stream().to_string()); + + let visitor = if impl_meta.compile_as_dependency { + quote! { + const _: () = { + macro_rules! visitor { + ($( $vis:vis $name:ident as $export_name:ident => $dispatch:ident , $schema:ident , )*) => { + $( + $vis fn $name() { + #path_to_macro::$dispatch::<#self_ty>(); + } + )* + } + } + + #path_to_crate::#macro_name!(visitor); + }; + } + } else { + quote! { + const _: () = { + macro_rules! visitor { + ($( $vis:vis $name:ident as $export_name:ident => $dispatch:ident , $schema:ident , )*) => { + $( + #[export_name = stringify!($export_name)] + $vis extern "C" fn $name() { + #path_to_macro::$dispatch::<#self_ty>(); + } + + #[cfg(not(target_arch = "wasm32"))] + const _: () = { + #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::casper::native::ENTRY_POINTS)] + #[linkme(crate = casper_contract_sdk::linkme)] + pub static EXPORTS: casper_contract_sdk::casper::native::EntryPoint = casper_contract_sdk::casper::native::EntryPoint { + kind: casper_contract_sdk::casper::native::EntryPointKind::TraitImpl { trait_name: stringify!(#trait_name), impl_name: stringify!(#self_ty), name: stringify!($export_name) }, + fptr: || -> () { $name(); }, + module_path: module_path!(), + file: file!(), + line: line!(), + }; + }; + + #[cfg(not(target_arch = "wasm32"))] + const _: () = { + #[casper_contract_sdk::linkme::distributed_slice(casper_contract_sdk::abi_generator::ENTRYPOINTS)] + #[linkme(crate = casper_contract_sdk::linkme)] + static ENTRY_POINTS: fn() -> casper_contract_sdk::schema::SchemaEntryPoint = <#ref_name as #ref_trait>::$schema; + }; + )* + } + } + + #path_to_crate::#macro_name!(visitor); + }; + } + }; + + code.push(visitor); + + let ref_trait = format_ident!("{}Ext", trait_path.require_ident().unwrap()); + + let ref_name = format_ident!("{self_ty}Ref"); + + code.push(quote! { + impl #ref_trait for #ref_name {} + }); + + quote! { + #entry_points + + #(#code)* + } + .into() +} + +fn casper_trait_definition(mut item_trait: ItemTrait, trait_meta: TraitMeta) -> TokenStream { + let crate_path = match &trait_meta.path { + Some(path) => quote! { #path }, + None => quote! { casper_contract_sdk }, + }; + + let borsh_path = { + let crate_path_str = match &trait_meta.path { + Some(path) => path.to_token_stream().to_string(), + None => "casper_contract_sdk".to_string(), + }; + syn::LitStr::new( + &format!("{}::serializers::borsh", crate_path_str), + Span::call_site(), + ) + }; + + let trait_name = &item_trait.ident; + + let vis = &item_trait.vis; + let mut dispatch_functions = Vec::new(); + // let mut dispatch_table = Vec::new(); + let mut extra_code = Vec::new(); + // let mut schema_entry_points = Vec::new(); + let mut populate_definitions = Vec::new(); + let mut macro_symbols = Vec::new(); + for entry_point in &mut item_trait.items { + match entry_point { + syn::TraitItem::Const(_) => todo!("Const"), + syn::TraitItem::Fn(func) => { + // let vis =func.vis; + let method_attribute = MethodAttribute::from_attributes(&func.attrs).unwrap(); + func.attrs.clear(); + + if method_attribute.private { + continue; + } + + let func_name = func.sig.ident.clone(); + let func_name_str = func_name.to_string(); + + if func_name.to_string().starts_with("__casper_") { + return TokenStream::from( + syn::Error::new( + Span::call_site(), + "Function names starting with '__casper_' are reserved", + ) + .to_compile_error(), + ); + } + + let export_name = if method_attribute.fallback { + CASPER_RESERVED_FALLBACK_EXPORT.to_string() + } else { + format!("{}_{}", trait_name, func_name_str) + }; + + let export_ident = format_ident!("{}", &func_name_str); + + let result = match &func.sig.output { + syn::ReturnType::Default => { + populate_definitions.push(quote! { + definitions.populate_one::<()>(); + }); + + quote! { <() as #crate_path::abi::CasperABI>::declaration() } + } + syn::ReturnType::Type(_, ty) => { + if let Type::Never(_) = ty.as_ref() { + populate_definitions.push(quote! { + definitions.populate_one::<()>(); + }); + + quote! { <() as #crate_path::abi::CasperABI>::declaration() } + } else { + populate_definitions.push(quote! { + definitions.populate_one::<#ty>(); + }); + + quote! { <#ty as #crate_path::abi::CasperABI>::declaration() } + } + } + }; + + let call_data_return_lifetime = generate_call_data_return(&func.sig.output); + + let dispatch_func_name = format_ident!("{trait_name}_{func_name}_dispatch"); + + let arg_names_and_types = func + .sig + .inputs + .iter() + .filter_map(|arg| match arg { + syn::FnArg::Receiver(_) => None, + syn::FnArg::Typed(typed) => match typed.pat.as_ref() { + syn::Pat::Ident(ident) => Some((&ident.ident, &typed.ty)), + _ => todo!(), + }, + }) + .collect::>(); + + let arg_names: Vec<_> = + arg_names_and_types.iter().map(|(name, _ty)| name).collect(); + let arg_types: Vec<_> = arg_names_and_types.iter().map(|(_name, ty)| ty).collect(); + // let mut arg_pairs: Vec + let args_attrs: Vec<_> = arg_names_and_types + .iter() + .map(|(name, ty)| { + quote! { + #name: #ty + } + }) + .collect(); + + let mut args = Vec::new(); + for (name, ty) in &arg_names_and_types { + populate_definitions.push(quote! { + definitions.populate_one::<()>(); + }); + args.push(quote! { + casper_contract_sdk::schema::SchemaArgument { + name: stringify!(#name).into(), + decl: <#ty as #crate_path::abi::CasperABI>::declaration(), + } + }); + } + + let flags = EntryPointFlags::empty(); + + let _flags = flags.bits(); + + let handle_dispatch = match func.sig.inputs.first() { + Some(syn::FnArg::Receiver(_receiver)) => { + assert!( + !method_attribute.private, + "can't make dispatcher for private method" + ); + quote! { + #vis extern "C" fn #dispatch_func_name() + where + T: #trait_name + + #crate_path::serializers::borsh::BorshDeserialize + + #crate_path::serializers::borsh::BorshSerialize + + Default + { + #[derive(#crate_path::serializers::borsh::BorshDeserialize)] + #[borsh(crate = #borsh_path)] + struct Arguments { + #(#args_attrs,)* + } + + let mut flags = #crate_path::casper_executor_wasm_common::flags::ReturnFlags::empty(); + let mut instance: T = #crate_path::casper::read_state().unwrap(); + let input = #crate_path::prelude::casper::copy_input(); + let args: Arguments = #crate_path::serializers::borsh::from_slice(&input).unwrap(); + + let ret = instance.#func_name(#(args.#arg_names,)*); + + #crate_path::casper::write_state(&instance).unwrap(); + + let ret_bytes = #crate_path::serializers::borsh::to_vec(&ret).unwrap(); + #crate_path::casper::ret(flags, Some(&ret_bytes)); + } + } + } + + None | Some(syn::FnArg::Typed(_)) => { + assert!( + !method_attribute.private, + "can't make dispatcher for private static method" + ); + quote! { + #vis extern "C" fn #dispatch_func_name() { + #[derive(#crate_path::serializers::borsh::BorshDeserialize)] + #[borsh(crate = #borsh_path)] + struct Arguments { + #(#args_attrs,)* + } + + + let input = #crate_path::prelude::casper::copy_input(); + let args: Arguments = #crate_path::serializers::borsh::from_slice(&input).unwrap(); + + + let _ret = ::#func_name(#(args.#arg_names,)*); + } + } + } + }; + + let schema_helper_ident = format_ident!("__casper_schema_entry_point_{func_name}"); + extra_code.push(quote! { + fn #schema_helper_ident () -> casper_contract_sdk::schema::SchemaEntryPoint { + casper_contract_sdk::schema::SchemaEntryPoint { + name: stringify!(#export_name).into(), + arguments: vec![ #(#args,)* ], + result: #result, + flags: casper_contract_sdk::casper_executor_wasm_common::flags::EntryPointFlags::from_bits(#_flags).unwrap(), + } + } + }); + + macro_symbols.push(quote! { + #vis #func_name as #export_ident => #dispatch_func_name , #schema_helper_ident + }); + + dispatch_functions.push(quote! { #handle_dispatch }); + + let input_data_content = if arg_names.is_empty() { + quote! { + None + } + } else { + quote! { + Some(#crate_path::serializers::borsh::to_vec(&self).expect("Serialization to succeed")) + } + }; + let self_ty = if method_attribute.constructor || method_attribute.ignore_state { + None + } else { + Some(quote! { + self, + }) + }; + + let is_fallback = method_attribute.fallback; + + if !is_fallback { + let entry_point_lit = LitStr::new(&export_name, Span::call_site()); + extra_code.push(quote! { + fn #func_name<'a>(#self_ty #(#arg_names: #arg_types,)*) -> impl #crate_path::ToCallData = #call_data_return_lifetime> { + #[derive(#crate_path::serializers::borsh::BorshSerialize)] + #[borsh(crate = #borsh_path)] + struct CallData { + #(pub #arg_names: #arg_types,)* + } + + impl #crate_path::ToCallData for CallData { + // const SELECTOR: vm_common::selector::Selector = vm_common::selector::Selector::new(#selector_value); + + type Return<'a> = #call_data_return_lifetime; + + fn entry_point(&self) -> &str { #entry_point_lit } + fn input_data(&self) -> Option> { + #input_data_content + } + } + + CallData { + #(#arg_names,)* + } + } + }); + } + } + syn::TraitItem::Type(_) => { + return syn::Error::new(Span::call_site(), "Unsupported generic associated types") + .to_compile_error() + .into(); + } + syn::TraitItem::Macro(_) => todo!("Macro"), + syn::TraitItem::Verbatim(_) => todo!("Verbatim"), + other => todo!("Other {other:?}"), + } + } + let ref_struct = format_ident!("{trait_name}Ref"); + let ref_struct_trait = format_ident!("{trait_name}Ext"); + + let macro_name = format_ident!("enumerate_{trait_name}_symbols"); + + let maybe_exported_macro = if !trait_meta.export.unwrap_or(false) { + quote! { + #[allow(non_snake_case, unused_macros)] + macro_rules! #macro_name { + ($mac:ident) => { + $mac! { + #(#macro_symbols,)* + } + } + } + pub(crate) use #macro_name; + } + } else { + quote! { + #[allow(non_snake_case, unused_macros)] + #[macro_export] + macro_rules! #macro_name { + ($mac:ident) => { + $mac! { + #(#macro_symbols,)* + } + } + } + } + }; + + let extension_struct = quote! { + #vis trait #ref_struct_trait: Sized { + #(#extra_code)* + } + + #vis struct #ref_struct; + + impl #ref_struct { + + } + + #maybe_exported_macro + + #(#dispatch_functions)* + + // TODO: Rename Ext with Ref, since Ref struct can be pub(crate)'d + impl #ref_struct_trait for #ref_struct {} + impl #crate_path::ContractRef for #ref_struct { + fn new() -> Self { + #ref_struct + } + } + }; + quote! { + #item_trait + + #extension_struct + } + .into() +} + +fn generate_casper_state_for_struct( + item_struct: &ItemStruct, + struct_meta: StructMeta, +) -> impl quote::ToTokens { + let crate_path = match &struct_meta.path { + Some(path) => quote! { #path }, + None => quote! { casper_contract_sdk }, + }; + + let borsh_path = { + let crate_path_str = match &struct_meta.path { + Some(path) => path.to_token_stream().to_string(), + None => "casper_contract_sdk".to_string(), + }; + syn::LitStr::new( + &format!("{}::serializers::borsh", crate_path_str), + Span::call_site(), + ) + }; + let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone()); + + quote! { + #[derive(#crate_path::serializers::borsh::BorshSerialize, #crate_path::serializers::borsh::BorshDeserialize)] + #[borsh(crate = #borsh_path)] + #maybe_derive_abi + #item_struct + } +} + +fn generate_casper_state_for_enum( + item_enum: &ItemEnum, + enum_meta: EnumMeta, +) -> impl quote::ToTokens { + let crate_path = match &enum_meta.path { + Some(path) => quote! { #path }, + None => quote! { casper_contract_sdk }, + }; + + let borsh_path = { + let crate_path_str = match &enum_meta.path { + Some(path) => path.to_token_stream().to_string(), + None => "casper_contract_sdk".to_string(), + }; + syn::LitStr::new( + &format!("{}::serializers::borsh", crate_path_str), + Span::call_site(), + ) + }; + + let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone()); + + quote! { + #[derive(#crate_path::serializers::borsh::BorshSerialize, #crate_path::serializers::borsh::BorshDeserialize)] + #[borsh(use_discriminant = true, crate = #borsh_path)] + #[repr(u32)] + #maybe_derive_abi + #item_enum + } +} + +fn get_maybe_derive_abi(_crate_path: impl ToTokens) -> impl ToTokens { + #[cfg(feature = "__abi_generator")] + { + quote! { + #[derive(#_crate_path::macros::CasperABI)] + } + } + + #[cfg(not(feature = "__abi_generator"))] + { + quote! {} + } +} + +fn process_casper_contract_state_for_struct( + contract_struct: &ItemStruct, + struct_meta: StructMeta, +) -> TokenStream { + let struct_name = &contract_struct.ident; + let ref_name = format_ident!("{struct_name}Ref"); + let vis = &contract_struct.vis; + + let crate_path = match &struct_meta.path { + Some(path) => quote! { #path }, + None => quote! { casper_contract_sdk }, + }; + let borsh_path = { + let crate_path_str = match &struct_meta.path { + Some(path) => path.to_token_stream().to_string(), + None => "casper_contract_sdk".to_string(), + }; + syn::LitStr::new( + &format!("{}::serializers::borsh", crate_path_str), + Span::call_site(), + ) + }; + + let maybe_derive_abi = get_maybe_derive_abi(crate_path.clone()); + + // Optionally, generate a schema export if the appropriate flag + // is set. + let maybe_casper_schema = { + #[cfg(feature = "__embed_schema")] + quote! { + const SCHEMA: Option<&str> = option_env!("__CARGO_CASPER_INJECT_SCHEMA_MARKER"); + + #[no_mangle] + pub extern "C" fn __casper_schema() { + use #crate_path::casper::ret; + use #crate_path::casper_executor_wasm_common::flags::ReturnFlags; + let bytes = SCHEMA.unwrap_or_default().as_bytes(); + ret(ReturnFlags::empty(), Some(bytes)); + } + } + #[cfg(not(feature = "__embed_schema"))] + quote! {} + }; + + quote! { + #[derive(#crate_path::serializers::borsh::BorshSerialize, #crate_path::serializers::borsh::BorshDeserialize)] + #[borsh(crate = #borsh_path)] + #maybe_derive_abi + #contract_struct + + #vis struct #ref_name; + + impl #crate_path::ContractRef for #ref_name { + fn new() -> Self { + #ref_name + } + } + + #maybe_casper_schema + } + .into() +} + +#[proc_macro_attribute] +pub fn entry_point(_attr: TokenStream, item: TokenStream) -> TokenStream { + let func = parse_macro_input!(item as ItemFn); + + let vis = &func.vis; + let _sig = &func.sig; + let func_name = &func.sig.ident; + + let block = &func.block; + + let mut handle_args = Vec::new(); + let mut params = Vec::new(); + + for arg in &func.sig.inputs { + let typed = match arg { + syn::FnArg::Receiver(_) => todo!(), + syn::FnArg::Typed(typed) => typed, + }; + + let name = match typed.pat.as_ref() { + syn::Pat::Ident(ident) => &ident.ident, + _ => todo!(), + }; + + let ty = &typed.ty; + + let tok = quote! { + let #typed = casper_contract_sdk::get_named_arg(stringify!(#name)).expect("should get named arg"); + }; + handle_args.push(tok); + + let tok2 = quote! { + (stringify!(#name), <#ty>::cl_type()) + }; + params.push(tok2); + } + + // let len = params.len(); + + let output = &func.sig.output; + + // let const_tok = + + let gen = quote! { + // const paste!(#func_name, _ENTRY_POINT): &str = #func_name; + + #vis fn #func_name() { + #(#handle_args)*; + + let closure = || #output { + #block + }; + + let result = closure(); + + // casper_contract_sdk::EntryPoint { + // name: #func_name, + // params: &[ + // #(#params,)* + // ], + // func: closure, + // } + + result.expect("should work") + } + }; + + println!("{gen}"); + + // quote!(fn foo() {}) + // item + gen.into() +} + +// #[proc_macro_derive(CasperSchema, attributes(casper))] +// pub fn derive_casper_schema(input: TokenStream) -> TokenStream { +// let contract = parse_macro_input!(input as DeriveInput); + +// let contract_attributes = ContractAttributes::from_attributes(&contract.attrs).unwrap(); + +// let _data_struct = match &contract.data { +// Data::Struct(s) => s, +// Data::Enum(_) => todo!("Enum"), +// Data::Union(_) => todo!("Union"), +// }; + +// let name = &contract.ident; + +// // let mut extra_code = Vec::new(); +// // if let Some(traits) = contract_attributes.impl_traits { +// // for path in traits.iter() { +// // let ext_struct = format_ident!("{}Ref", path.require_ident().unwrap()); +// // extra_code.push(quote! { +// // { +// // let entry_points = <#ext_struct>::__casper_schema_entry_points(); +// // schema.entry_points.extend(entry_points); +// // <#ext_struct>::__casper_populate_definitions(&mut schema.definitions); +// // } +// // }); +// // } + +// // let macro_name = format_ident!("enumerate_{path}_symbols"); + +// // extra_code.push(quote! { +// // const _: () = { +// // macro_rules! #macro_name { +// // ($mac:ident) => { +// // $mac! { +// // #(#extra_code)* +// // } +// // } +// // } +// // } +// // }) +// // } + +// quote! { +// impl casper_contract_sdk::schema::CasperSchema for #name { +// fn schema() -> casper_contract_sdk::schema::Schema { +// let mut schema = Self::__casper_schema(); + +// // #(#extra_code)*; + +// schema +// // schema.entry_points.ext +// } +// } +// } +// .into() +// } + +#[proc_macro_derive(CasperABI, attributes(casper))] +pub fn derive_casper_abi(input: TokenStream) -> TokenStream { + let res = if let Ok(input) = syn::parse::(input.clone()) { + let mut populate_definitions = Vec::new(); + let name = input.ident.clone(); + let mut items = Vec::new(); + for field in &input.fields { + match &field.ty { + Type::Path(path) => { + for segment in &path.path.segments { + let field_name = &field.ident; + + populate_definitions.push(quote! { + definitions.populate_one::<#segment>(); + }); + + items.push(quote! { + casper_contract_sdk::abi::StructField { + name: stringify!(#field_name).into(), + decl: <#segment>::declaration(), + } + }); + } + } + other_ty => todo!("Unsupported type {other_ty:?}"), + } + } + + Ok(quote! { + impl casper_contract_sdk::abi::CasperABI for #name { + fn populate_definitions(definitions: &mut casper_contract_sdk::abi::Definitions) { + #(#populate_definitions)*; + } + + fn declaration() -> casper_contract_sdk::abi::Declaration { + const DECL: &str = concat!(module_path!(), "::", stringify!(#name)); + DECL.into() + } + + fn definition() -> casper_contract_sdk::abi::Definition { + casper_contract_sdk::abi::Definition::Struct { + items: vec![ + #(#items,)* + ] + } + } + } + }) + } else if let Ok(input) = syn::parse::(input.clone()) { + // TODO: Check visibility + let name = input.ident.clone(); + + let mut all_definitions = Vec::new(); + let mut all_variants = Vec::new(); + let mut populate_definitions = Vec::new(); + let mut has_unit_definition = false; + + // populate_definitions.push(quote! { + // definitions.populate_one::<#name>(); + // }); + + all_definitions.push(quote! { + casper_contract_sdk::abi::Definition::Enum { + name: stringify!(#name).into(), + } + }); + + let mut current_discriminant = 0; + + for variant in &input.variants { + if let Some(discriminant) = &variant.discriminant { + match &discriminant.1 { + syn::Expr::Lit(lit) => match &lit.lit { + syn::Lit::Int(int) => { + current_discriminant = int.base10_parse::().unwrap(); + } + _ => todo!(), + }, + _ => todo!(), + } + } + + let variant_name = &variant.ident; + + let variant_decl = match &variant.fields { + Fields::Unit => { + // NOTE: Generate an empty struct here for a definition. + if !has_unit_definition { + populate_definitions.push(quote! { + definitions.populate_one::<()>(); + }); + has_unit_definition = true; + } + + quote! { + <()>::declaration() + } + } + Fields::Named(named) => { + let mut fields = Vec::new(); + + let variant_name = format_ident!("{name}_{variant_name}"); + + for field in &named.named { + let field_name = &field.ident; + match &field.ty { + Type::Path(path) => { + populate_definitions.push(quote! { + definitions.populate_one::<#path>(); + }); + + fields.push(quote! { + casper_contract_sdk::abi::StructField { + name: stringify!(#field_name).into(), + decl: <#path as casper_contract_sdk::abi::CasperABI>::declaration() + } + }); + } + other_ty => todo!("Unsupported type {other_ty:?}"), + } + } + + populate_definitions.push(quote! { + definitions.populate_custom( + stringify!(#variant_name).into(), + casper_contract_sdk::abi::Definition::Struct { + items: vec![ + #(#fields,)* + ], + }); + }); + + quote! { + stringify!(#variant_name).into() + } + } + Fields::Unnamed(unnamed_fields) => { + let mut fields = Vec::new(); + + let variant_name = format_ident!("{name}_{variant_name}"); + + for field in &unnamed_fields.unnamed { + match &field.ty { + Type::Path(path) => { + for segment in &path.path.segments { + let type_name = &segment.ident; + populate_definitions.push(quote! { + definitions.populate_one::<#type_name>(); + }); + + fields.push(quote! { + <#type_name as casper_contract_sdk::abi::CasperABI>::declaration() + }); + } + } + other_ty => todo!("Unsupported type {other_ty:?}"), + } + } + + populate_definitions.push(quote! { + definitions.populate_custom( + stringify!(#variant_name).into(), + casper_contract_sdk::abi::Definition::Tuple { + items: vec![ + #(#fields,)* + ], + }); + }); + + quote! { + stringify!(#variant_name).into() + } + } + }; + + all_variants.push(quote! { + casper_contract_sdk::abi::EnumVariant { + name: stringify!(#variant_name).into(), + discriminant: #current_discriminant, + decl: #variant_decl, + } + }); + + current_discriminant += 1; + } + + Ok(quote! { + impl casper_contract_sdk::abi::CasperABI for #name { + fn populate_definitions(definitions: &mut casper_contract_sdk::abi::Definitions) { + #(#populate_definitions)*; + } + + fn declaration() -> casper_contract_sdk::abi::Declaration { + const DECL: &str = concat!(module_path!(), "::", stringify!(#name)); + DECL.into() + } + + fn definition() -> casper_contract_sdk::abi::Definition { + casper_contract_sdk::abi::Definition::Enum { + items: vec![ + #(#all_variants,)* + ], + } + } + } + }) + } else if syn::parse::(input).is_ok() { + Err(syn::Error::new( + Span::call_site(), + "Borsh schema does not support unions yet.", + )) + } else { + // Derive macros can only be defined on structs, enums, and unions. + unreachable!() + }; + TokenStream::from(match res { + Ok(res) => res, + Err(err) => err.to_compile_error(), + }) +} + +#[proc_macro] +pub fn blake2b256(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as LitStr); + let bytes = input.value(); + + let hash = utils::compute_blake2b256(bytes.as_bytes()); + + TokenStream::from(quote! { + [ #(#hash),* ] + }) +} + +#[proc_macro] +pub fn test(item: TokenStream) -> TokenStream { + let input = parse_macro_input!(item as ItemFn); + TokenStream::from(quote! { + #[test] + #input + }) +} + +/// `PanicOnDefault` generates implementation for `Default` trait that panics with the following +/// message `The contract is not initialized` when `default()` is called. +/// +/// This is to protect againsts default-initialization of contracts in a situation where no +/// constructor is called, and an entrypoint is invoked before the contract is initialized. +#[proc_macro_derive(PanicOnDefault)] +pub fn derive_no_default(item: TokenStream) -> TokenStream { + if let Ok(input) = syn::parse::(item) { + let name = &input.ident; + TokenStream::from(quote! { + impl ::core::default::Default for #name { + fn default() -> Self { + panic!("The contract is not initialized"); + } + } + }) + } else { + TokenStream::from( + syn::Error::new( + Span::call_site(), + "PanicOnDefault can only be used on type declarations sections.", + ) + .to_compile_error(), + ) + } +} diff --git a/smart_contracts/macros/src/utils.rs b/smart_contracts/macros/src/utils.rs new file mode 100644 index 0000000000..a4bffde887 --- /dev/null +++ b/smart_contracts/macros/src/utils.rs @@ -0,0 +1,5 @@ +pub(crate) fn compute_blake2b256(bytes: &[u8]) -> [u8; 32] { + let mut context = blake2_rfc::blake2b::Blake2b::new(32); + context.update(bytes); + context.finalize().as_bytes().try_into().unwrap() +} diff --git a/smart_contracts/rust-toolchain b/smart_contracts/rust-toolchain new file mode 100644 index 0000000000..dacd12aede --- /dev/null +++ b/smart_contracts/rust-toolchain @@ -0,0 +1 @@ +nightly-2025-02-16 \ No newline at end of file diff --git a/smart_contracts/sdk/Cargo.toml b/smart_contracts/sdk/Cargo.toml new file mode 100644 index 0000000000..6b60d8edf5 --- /dev/null +++ b/smart_contracts/sdk/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "casper-contract-sdk" +version = "0.1.3" +edition = "2021" +description = "Casper contract sdk package" +authors = ["Michał Papierski "] +documentation = "https://docs.rs/casper-contract-sdk" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/smart_contracts/sdk" +license = "Apache-2.0" + +[dependencies] +base16 = "0.2.1" +bitflags = "2.9" +bnum = { version = "0.13.0", features = ["borsh", "num-integer", "num-traits"] } +borsh = { version = "1.5", features = ["derive"] } +bytes = "1.10" +casper-executor-wasm-common = { version = "0.1.3", path = "../../executor/wasm_common" } +casper-contract-macros = { version = "0.1.3", path = "../macros" } +casper-contract-sdk-sys = { version = "0.1.3", path = "../sdk_sys" } +cfg-if = "1.0.0" +clap = { version = "4", optional = true, features = ["derive"] } +const-fnv1a-hash = "1.1.0" +impl-trait-for-tuples = "0.2.2" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +thiserror = { version = "2", optional = true } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +rand = "0.8.5" +once_cell = "1.19.0" +linkme = "=0.3.29" + +[features] +default = ["std"] +std = [] + +cli = ["clap", "thiserror"] +__abi_generator = ["casper-contract-macros/__abi_generator"] +__embed_schema = ["casper-contract-macros/__embed_schema"] diff --git a/smart_contracts/sdk/src/abi.rs b/smart_contracts/sdk/src/abi.rs new file mode 100644 index 0000000000..d23eb6ec23 --- /dev/null +++ b/smart_contracts/sdk/src/abi.rs @@ -0,0 +1,556 @@ +use core::mem; + +use crate::prelude::{ + collections, + collections::{BTreeMap, BTreeSet, HashMap, LinkedList}, + str::FromStr, +}; +use impl_trait_for_tuples::impl_for_tuples; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +pub struct EnumVariant { + pub name: String, + pub discriminant: u64, + pub decl: Declaration, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +pub struct StructField { + pub name: String, + pub decl: Declaration, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +pub enum Primitive { + Char, + U8, + I8, + U16, + I16, + U32, + I32, + U64, + I64, + U128, + I128, + F32, + F64, + Bool, +} + +impl FromStr for Primitive { + type Err = &'static str; + + fn from_str(s: &str) -> Result { + use Primitive::*; + match s { + "Char" => Ok(Char), + "U8" => Ok(U8), + "I8" => Ok(I8), + "U16" => Ok(U16), + "I16" => Ok(I16), + "U32" => Ok(U32), + "I32" => Ok(I32), + "U64" => Ok(U64), + "I64" => Ok(I64), + "U128" => Ok(U128), + "I128" => Ok(I128), + "F32" => Ok(F32), + "F64" => Ok(F64), + "Bool" => Ok(Bool), + _ => Err("Unknown primitive type"), + } + } +} + +pub trait Keyable { + const PRIMITIVE: Primitive; +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone, Hash)] +#[serde(tag = "type")] +pub enum Definition { + /// Primitive type. + /// + /// Examples: u64, i32, f32, bool, etc + Primitive(Primitive), + /// A mapping. + /// + /// Example Rust types: BTreeMap. + Mapping { + key: Declaration, + value: Declaration, + }, + /// Arbitrary sequence of values. + /// + /// Example Rust types: `Vec`, `&[T]`, `[T; N]`, `Box<[T]>` + Sequence { + /// If length is known, then it specifies that this definition should be represented as + /// an array of a fixed size. + decl: Declaration, + }, + FixedSequence { + /// If length is known, then it specifies that this definition should be represented as + /// an array of a fixed size. + length: u32, // None -> Vec Some(N) [T; N] + decl: Declaration, + }, + /// A tuple of multiple values of various types. + /// + /// Can be also used to represent a heterogeneous list. + Tuple { + items: Vec, + }, + Enum { + items: Vec, + }, + Struct { + items: Vec, + }, +} + +impl Definition { + pub fn unit() -> Self { + // Empty struct should be equivalent to `()` in Rust in other languages. + Definition::Tuple { items: Vec::new() } + } + + pub fn as_struct(&self) -> Option<&[StructField]> { + if let Self::Struct { items } = self { + Some(items.as_slice()) + } else { + None + } + } + + pub fn as_enum(&self) -> Option<&[EnumVariant]> { + if let Self::Enum { items } = self { + Some(items.as_slice()) + } else { + None + } + } + + pub fn as_tuple(&self) -> Option<&[Declaration]> { + if let Self::Tuple { items } = self { + Some(items.as_slice()) + } else { + None + } + } +} + +#[derive(Default, Debug, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Definitions(BTreeMap); + +impl Definitions { + pub fn populate_one(&mut self) { + T::populate_definitions(self); + + let decl = T::declaration(); + let def = T::definition(); + + self.populate_custom(decl, def); + } + + pub fn populate_custom(&mut self, decl: Declaration, def: Definition) { + let previous = self.0.insert(decl.clone(), def.clone()); + if previous.is_some() && previous != Some(def.clone()) { + panic!("Type {decl} has multiple definitions ({previous:?} != {def:?})."); + } + } + + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + pub fn get(&self, decl: &str) -> Option<&Definition> { + self.0.get(decl) + } + + pub fn first(&self) -> Option<(&Declaration, &Definition)> { + self.0.iter().next() + } + + /// Returns true if the given declaration has a definition in this set. + pub fn has_definition(&self, decl: &Declaration) -> bool { + self.0.contains_key(decl) + } +} + +impl IntoIterator for Definitions { + type Item = (Declaration, Definition); + type IntoIter = collections::btree_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +pub type Declaration = String; + +pub trait CasperABI { + fn populate_definitions(definitions: &mut Definitions); + fn declaration() -> Declaration; // "String" + fn definition() -> Definition; // Sequence { Char } +} + +impl CasperABI for &T +where + T: CasperABI, +{ + fn populate_definitions(definitions: &mut Definitions) { + T::populate_definitions(definitions); + } + + fn declaration() -> Declaration { + T::declaration() + } + + fn definition() -> Definition { + T::definition() + } +} + +impl CasperABI for Box +where + T: CasperABI, +{ + fn populate_definitions(definitions: &mut Definitions) { + T::populate_definitions(definitions); + } + + fn declaration() -> Declaration { + T::declaration() + } + + fn definition() -> Definition { + T::definition() + } +} + +macro_rules! impl_abi_for_types { + // Accepts following syntax: impl_abi_for_types(u8, u16, u32, u64, String => "string", f32, f64) + ($($ty:ty $(=> $name:expr)?,)* ) => { + $( + impl_abi_for_types!(@impl $ty $(=> $name)?); + )* + }; + + (@impl $ty:ty ) => { + impl_abi_for_types!(@impl $ty => stringify!($ty)); + }; + + (@impl $ty:ty => $def:expr ) => { + impl CasperABI for $ty { + fn populate_definitions(_definitions: &mut Definitions) { + } + + fn declaration() -> Declaration { + stringify!($def).into() + } + + fn definition() -> Definition { + use Primitive::*; + const PRIMITIVE: Primitive = $def; + Definition::Primitive(PRIMITIVE) + } + } + + impl Keyable for $ty { + const PRIMITIVE: Primitive = { + use Primitive::*; + $def + }; + } + }; +} + +impl CasperABI for () { + fn populate_definitions(_definitions: &mut Definitions) {} + + fn declaration() -> Declaration { + "()".into() + } + + fn definition() -> Definition { + Definition::unit() + } +} + +impl_abi_for_types!( + char => Char, + bool => Bool, + u8 => U8, + u16 => U16, + u32 => U32, + u64 => U64, + u128 => U128, + i8 => I8, + i16 => I16, + i32 => I32, + i64 => I64, + f32 => F32, + f64 => F64, + i128 => I128, +); + +#[impl_for_tuples(1, 12)] +impl CasperABI for Tuple { + fn populate_definitions(_definitions: &mut Definitions) { + for_tuples!( #( _definitions.populate_one::(); )* ) + } + + fn declaration() -> Declaration { + let items = <[_]>::into_vec(Box::new([for_tuples!( #( Tuple::declaration() ),* )])); + format!("({})", items.join(", ")) + } + + fn definition() -> Definition { + let items = <[_]>::into_vec(Box::new([for_tuples!( #( Tuple::declaration() ),* )])); + Definition::Tuple { items } + } +} + +impl CasperABI for Result { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + let t_decl = T::declaration(); + let e_decl = E::declaration(); + format!("Result<{t_decl}, {e_decl}>") + } + + fn definition() -> Definition { + Definition::Enum { + items: vec![ + EnumVariant { + name: "Ok".into(), + discriminant: 0, + decl: T::declaration(), + }, + EnumVariant { + name: "Err".into(), + discriminant: 1, + decl: E::declaration(), + }, + ], + } + } +} + +impl CasperABI for Option { + fn declaration() -> Declaration { + format!("Option<{}>", T::declaration()) + } + fn definition() -> Definition { + Definition::Enum { + items: vec![ + EnumVariant { + name: "None".into(), + discriminant: 0, + decl: <()>::declaration(), + }, + EnumVariant { + name: "Some".into(), + discriminant: 1, + decl: T::declaration(), + }, + ], + } + } + + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::<()>(); + definitions.populate_one::(); + } +} + +impl CasperABI for Vec { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("Vec<{}>", T::declaration()) + } + fn definition() -> Definition { + Definition::Sequence { + decl: T::declaration(), + } + } +} + +impl CasperABI for [T; N] { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("[{}; {N}]", T::declaration()) + } + fn definition() -> Definition { + Definition::FixedSequence { + length: N.try_into().expect("N is too big"), + decl: T::declaration(), + } + } +} + +impl CasperABI for BTreeMap { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("BTreeMap<{}, {}>", K::declaration(), V::declaration()) + } + + fn definition() -> Definition { + Definition::Mapping { + key: K::declaration(), + value: V::declaration(), + } + } +} + +impl CasperABI for HashMap { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("HashMap<{}, {}>", K::declaration(), V::declaration()) + } + + fn definition() -> Definition { + Definition::Mapping { + key: K::declaration(), + value: V::declaration(), + } + } +} + +impl CasperABI for String { + fn populate_definitions(_definitions: &mut Definitions) {} + + fn declaration() -> Declaration { + "String".into() + } + fn definition() -> Definition { + Definition::Sequence { + decl: char::declaration(), + } + } +} + +impl CasperABI for str { + fn populate_definitions(_definitions: &mut Definitions) {} + + fn declaration() -> Declaration { + "String".into() + } + fn definition() -> Definition { + Definition::Sequence { + decl: char::declaration(), + } + } +} + +impl CasperABI for &str { + fn populate_definitions(_definitions: &mut Definitions) {} + + fn declaration() -> Declaration { + "String".into() + } + + fn definition() -> Definition { + Definition::Sequence { + decl: char::declaration(), + } + } +} + +impl CasperABI for LinkedList { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("LinkedList<{}>", T::declaration()) + } + fn definition() -> Definition { + Definition::Sequence { + decl: T::declaration(), + } + } +} + +impl CasperABI for BTreeSet { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("BTreeSet<{}>", T::declaration()) + } + fn definition() -> Definition { + Definition::Sequence { + decl: T::declaration(), + } + } +} + +impl CasperABI for bnum::BUint { + fn populate_definitions(definitions: &mut Definitions) { + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + let width_bytes: usize = mem::size_of::>(); + let width_bits: usize = width_bytes * 8; + format!("U{width_bits}") + } + + fn definition() -> Definition { + let length: u32 = N.try_into().expect("N is too big"); + Definition::FixedSequence { + length, + decl: u64::declaration(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + abi::{CasperABI, Definition}, + types::U256, + }; + + #[test] + fn u256_schema() { + assert_eq!(U256::declaration(), "U256"); + assert_eq!( + U256::definition(), + Definition::FixedSequence { + length: 4, + decl: u64::declaration() + } + ); + + let mut value = U256::from(u128::MAX); + value += U256::from(1u64); + let bytes = borsh::to_vec(&value).unwrap(); + // Ensure bnum's borsh serialize/deserialize is what we consider "FixedSequence" + let bytes_back: [u64; 4] = borsh::from_slice(&bytes).unwrap(); + let value_back = U256::from_digits(bytes_back); + assert_eq!(value, value_back); + } +} diff --git a/smart_contracts/sdk/src/abi_generator.rs b/smart_contracts/sdk/src/abi_generator.rs new file mode 100644 index 0000000000..5fa36f1928 --- /dev/null +++ b/smart_contracts/sdk/src/abi_generator.rs @@ -0,0 +1,113 @@ +use core::{mem, ptr::NonNull}; + +use crate::{ + abi::{Declaration, Definitions}, + linkme::distributed_slice, + schema::{Schema, SchemaMessage, SchemaType}, +}; + +#[derive(Debug)] +pub struct Param { + pub name: &'static str, + pub decl: Declaration, +} + +#[derive(Debug)] +pub struct EntryPoint { + pub name: &'static str, + pub params: &'static [&'static Param], + pub result_decl: Declaration, +} + +#[derive(Debug, Clone)] +pub struct Message { + pub name: &'static str, + pub decl: &'static str, +} + +pub struct Manifest { + pub name: &'static str, + pub entry_points: &'static [EntryPoint], +} + +/// All of the entry points generated by proc macro will be registered here. +#[distributed_slice] +#[linkme(crate = crate::linkme)] +pub static ENTRYPOINTS: [fn() -> crate::schema::SchemaEntryPoint] = [..]; + +#[distributed_slice] +#[linkme(crate = crate::linkme)] +pub static ABI_COLLECTORS: [fn(&mut crate::abi::Definitions)] = [..]; + +#[distributed_slice] +#[linkme(crate = crate::linkme)] +pub static MESSAGES: [Message] = [..]; + +pub fn casper_collect_schema() -> Schema { + // Collect definitions + let definitions = { + let mut definitions = Definitions::default(); + + for abi_collector in ABI_COLLECTORS { + abi_collector(&mut definitions); + } + + definitions + }; + + // Collect messages + let messages = { + let mut messages = Vec::new(); + + for message in MESSAGES { + messages.push(SchemaMessage { + name: message.name.to_owned(), + decl: message.decl.to_owned(), + }); + } + + messages + }; + + // Collect entrypoints + let entry_points = { + let mut entry_points = Vec::new(); + for entrypoint in ENTRYPOINTS { + entry_points.push(entrypoint()); + } + entry_points + }; + + // Construct a schema object from the extracted information + Schema { + name: "contract".to_string(), + version: None, + type_: SchemaType::Contract { + state: "Contract".to_string(), + }, + definitions, + entry_points, + messages, + } +} + +/// This function is called by the host to collect the schema from the contract. +/// +/// This is considered internal implementation detail and should not be used directly. +/// Primary user of this API is `cargo-casper` tool that will use it to extract schema from the +/// contract. +/// +/// # Safety +/// Pointer to json bytes passed to the callback is valid only within the scope of that function. +#[export_name = "__cargo_casper_collect_schema"] +pub unsafe extern "C" fn cargo_casper_collect_schema(size_ptr: *mut u64) -> *mut u8 { + let schema = casper_collect_schema(); + // Write the schema using the provided writer + let mut json_bytes = serde_json::to_vec(&schema).expect("Serialized schema"); + NonNull::new(size_ptr) + .expect("expected non-null ptr") + .write(json_bytes.len().try_into().expect("usize to u64")); + let ptr = json_bytes.as_mut_ptr(); + mem::forget(json_bytes); + ptr +} diff --git a/smart_contracts/sdk/src/casper.rs b/smart_contracts/sdk/src/casper.rs new file mode 100644 index 0000000000..c8c942660b --- /dev/null +++ b/smart_contracts/sdk/src/casper.rs @@ -0,0 +1,561 @@ +#[cfg(all(not(target_arch = "wasm32"), feature = "std"))] +pub mod native; + +use crate::{ + abi::{CasperABI, EnumVariant}, + prelude::{ + ffi::c_void, + marker::PhantomData, + mem::MaybeUninit, + ptr::{self, NonNull}, + }, + reserve_vec_space, + serializers::borsh::{BorshDeserialize, BorshSerialize}, + types::{Address, CallError}, + Message, ToCallData, +}; + +use casper_contract_sdk_sys::casper_env_info; +use casper_executor_wasm_common::{ + env_info::EnvInfo, + error::{result_from_code, CommonResult, HOST_ERROR_SUCCESS}, + flags::ReturnFlags, + keyspace::{Keyspace, KeyspaceTag}, +}; + +/// Print a message. +#[inline] +pub fn print(msg: &str) { + unsafe { casper_contract_sdk_sys::casper_print(msg.as_ptr(), msg.len()) }; +} + +pub enum Alloc Option>> { + Callback(F), + Static(ptr::NonNull), +} + +extern "C" fn alloc_callback Option>>( + len: usize, + ctx: *mut c_void, +) -> *mut u8 { + let opt_closure = ctx.cast::>(); + let allocated_ptr = unsafe { (*opt_closure).take().unwrap()(len) }; + match allocated_ptr { + Some(ptr) => ptr.as_ptr(), + None => ptr::null_mut(), + } +} + +/// Provided callback should ensure that it can provide a pointer that can store `size` bytes. +/// Function returns last pointer after writing data, or None otherwise. +pub fn copy_input_into Option>>( + alloc: Option, +) -> Option> { + let ret = unsafe { + casper_contract_sdk_sys::casper_copy_input( + alloc_callback::, + &alloc as *const _ as *mut c_void, + ) + }; + NonNull::::new(ret) +} + +/// Copy input data into a vector. +pub fn copy_input() -> Vec { + let mut vec = Vec::new(); + let last_ptr = copy_input_into(Some(|size| reserve_vec_space(&mut vec, size))); + match last_ptr { + Some(_last_ptr) => vec, + None => { + // TODO: size of input was 0, we could properly deal with this case by not calling alloc + // cb if size==0 + Vec::new() + } + } +} + +/// Provided callback should ensure that it can provide a pointer that can store `size` bytes. +pub fn copy_input_to(dest: &mut [u8]) -> Option<&[u8]> { + let last_ptr = copy_input_into(Some(|size| { + if size > dest.len() { + None + } else { + // SAFETY: `dest` is guaranteed to be non-null and large enough to hold `size` + // bytes. + Some(unsafe { ptr::NonNull::new_unchecked(dest.as_mut_ptr()) }) + } + })); + + let end_ptr = last_ptr?; + let length = unsafe { end_ptr.as_ptr().offset_from(dest.as_mut_ptr()) }; + let length: usize = length.try_into().unwrap(); + Some(&dest[..length]) +} + +/// Return from the contract. +pub fn ret(flags: ReturnFlags, data: Option<&[u8]>) { + let (data_ptr, data_len) = match data { + Some(data) => (data.as_ptr(), data.len()), + None => (ptr::null(), 0), + }; + unsafe { casper_contract_sdk_sys::casper_return(flags.bits(), data_ptr, data_len) }; + #[cfg(target_arch = "wasm32")] + unreachable!() +} + +/// Read from the global state. +pub fn read Option>>( + key: Keyspace, + f: F, +) -> Result, CommonResult> { + let (key_space, key_bytes) = match key { + Keyspace::State => (KeyspaceTag::State as u64, &[][..]), + Keyspace::Context(key_bytes) => (KeyspaceTag::Context as u64, key_bytes), + Keyspace::NamedKey(key_bytes) => (KeyspaceTag::NamedKey as u64, key_bytes.as_bytes()), + Keyspace::PaymentInfo(payload) => (KeyspaceTag::PaymentInfo as u64, payload.as_bytes()), + }; + + let mut info = casper_contract_sdk_sys::ReadInfo { + data: ptr::null(), + size: 0, + }; + + extern "C" fn alloc_cb Option>>( + len: usize, + ctx: *mut c_void, + ) -> *mut u8 { + let opt_closure = ctx as *mut Option; + let allocated_ptr = unsafe { (*opt_closure).take().unwrap()(len) }; + match allocated_ptr { + Some(mut ptr) => unsafe { ptr.as_mut() }, + None => ptr::null_mut(), + } + } + + let ctx = &Some(f) as *const _ as *mut _; + + let ret = unsafe { + casper_contract_sdk_sys::casper_read( + key_space, + key_bytes.as_ptr(), + key_bytes.len(), + &mut info as *mut casper_contract_sdk_sys::ReadInfo, + alloc_cb::, + ctx, + ) + }; + + match result_from_code(ret) { + Ok(()) => Ok(Some(())), + Err(CommonResult::NotFound) => Ok(None), + Err(err) => Err(err), + } +} + +/// Write to the global state. +pub fn write(key: Keyspace, value: &[u8]) -> Result<(), CommonResult> { + let (key_space, key_bytes) = match key { + Keyspace::State => (KeyspaceTag::State as u64, &[][..]), + Keyspace::Context(key_bytes) => (KeyspaceTag::Context as u64, key_bytes), + Keyspace::NamedKey(key_bytes) => (KeyspaceTag::NamedKey as u64, key_bytes.as_bytes()), + Keyspace::PaymentInfo(payload) => (KeyspaceTag::PaymentInfo as u64, payload.as_bytes()), + }; + let ret = unsafe { + casper_contract_sdk_sys::casper_write( + key_space, + key_bytes.as_ptr(), + key_bytes.len(), + value.as_ptr(), + value.len(), + ) + }; + result_from_code(ret) +} + +/// Remove from the global state. +pub fn remove(key: Keyspace) -> Result<(), CommonResult> { + let (key_space, key_bytes) = match key { + Keyspace::State => (KeyspaceTag::State as u64, &[][..]), + Keyspace::Context(key_bytes) => (KeyspaceTag::Context as u64, key_bytes), + Keyspace::NamedKey(key_bytes) => (KeyspaceTag::NamedKey as u64, key_bytes.as_bytes()), + Keyspace::PaymentInfo(payload) => (KeyspaceTag::PaymentInfo as u64, payload.as_bytes()), + }; + let ret = unsafe { + casper_contract_sdk_sys::casper_remove(key_space, key_bytes.as_ptr(), key_bytes.len()) + }; + result_from_code(ret) +} + +/// Create a new contract instance. +pub fn create( + code: Option<&[u8]>, + transferred_value: u64, + constructor: Option<&str>, + input_data: Option<&[u8]>, + seed: Option<&[u8; 32]>, +) -> Result { + let (code_ptr, code_size): (*const u8, usize) = match code { + Some(code) => (code.as_ptr(), code.len()), + None => (ptr::null(), 0), + }; + + let mut result = MaybeUninit::uninit(); + + let call_error = unsafe { + casper_contract_sdk_sys::casper_create( + code_ptr, + code_size, + transferred_value, + constructor.map(|s| s.as_ptr()).unwrap_or(ptr::null()), + constructor.map(|s| s.len()).unwrap_or(0), + input_data.map(|s| s.as_ptr()).unwrap_or(ptr::null()), + input_data.map(|s| s.len()).unwrap_or(0), + seed.map(|s| s.as_ptr()).unwrap_or(ptr::null()), + seed.map(|s| s.len()).unwrap_or(0), + result.as_mut_ptr(), + ) + }; + + if call_error == 0 { + let result = unsafe { result.assume_init() }; + Ok(result) + } else { + Err(CallError::try_from(call_error).expect("Unexpected error code")) + } +} + +pub(crate) fn call_into Option>>( + address: &Address, + transferred_value: u64, + entry_point: &str, + input_data: &[u8], + alloc: Option, +) -> Result<(), CallError> { + let result_code = unsafe { + casper_contract_sdk_sys::casper_call( + address.as_ptr(), + address.len(), + transferred_value, + entry_point.as_ptr(), + entry_point.len(), + input_data.as_ptr(), + input_data.len(), + alloc_callback::, + &alloc as *const _ as *mut _, + ) + }; + call_result_from_code(result_code) +} + +fn call_result_from_code(result_code: u32) -> Result<(), CallError> { + if result_code == HOST_ERROR_SUCCESS { + Ok(()) + } else { + Err(CallError::try_from(result_code).expect("Unexpected error code")) + } +} + +/// Call a contract. +pub fn casper_call( + address: &Address, + transferred_value: u64, + entry_point: &str, + input_data: &[u8], +) -> (Option>, Result<(), CallError>) { + let mut output = None; + let result_code = call_into( + address, + transferred_value, + entry_point, + input_data, + Some(|size| { + let mut vec = Vec::new(); + reserve_vec_space(&mut vec, size); + let result = Some(unsafe { ptr::NonNull::new_unchecked(vec.as_mut_ptr()) }); + output = Some(vec); + result + }), + ); + (output, result_code) +} + +/// Upgrade the contract. +pub fn upgrade( + code: &[u8], + entry_point: Option<&str>, + input_data: Option<&[u8]>, +) -> Result<(), CallError> { + let code_ptr = code.as_ptr(); + let code_size = code.len(); + let entry_point_ptr = entry_point.map(str::as_ptr).unwrap_or(ptr::null()); + let entry_point_size = entry_point.map(str::len).unwrap_or(0); + let input_ptr = input_data.map(|s| s.as_ptr()).unwrap_or(ptr::null()); + let input_size = input_data.map(|s| s.len()).unwrap_or(0); + + let result_code = unsafe { + casper_contract_sdk_sys::casper_upgrade( + code_ptr, + code_size, + entry_point_ptr, + entry_point_size, + input_ptr, + input_size, + ) + }; + match call_result_from_code(result_code) { + Ok(()) => Ok(()), + Err(err) => Err(err), + } +} + +/// Read from the global state into a vector. +pub fn read_into_vec(key: Keyspace) -> Result>, CommonResult> { + let mut vec = Vec::new(); + let out = read(key, |size| reserve_vec_space(&mut vec, size))?.map(|()| vec); + Ok(out) +} + +/// Read from the global state into a vector. +pub fn has_state() -> Result { + // TODO: Host side optimized `casper_exists` to check if given entry exists in the global state. + let mut vec = Vec::new(); + let read_info = read(Keyspace::State, |size| reserve_vec_space(&mut vec, size))?; + match read_info { + Some(()) => Ok(true), + None => Ok(false), + } +} + +/// Read state from the global state. +pub fn read_state() -> Result { + let mut vec = Vec::new(); + let read_info = read(Keyspace::State, |size| reserve_vec_space(&mut vec, size))?; + match read_info { + Some(()) => Ok(borsh::from_slice(&vec).unwrap()), + None => Ok(T::default()), + } +} + +/// Write state to the global state. +pub fn write_state(state: &T) -> Result<(), CommonResult> { + let new_state = borsh::to_vec(state).unwrap(); + write(Keyspace::State, &new_state)?; + Ok(()) +} + +#[derive(Debug)] +pub struct CallResult { + pub data: Option>, + pub result: Result<(), CallError>, + pub marker: PhantomData, +} + +impl CallResult { + pub fn into_result<'a>(self) -> Result, CallError> + where + ::Return<'a>: BorshDeserialize, + { + match self.result { + Ok(()) | Err(CallError::CalleeReverted) => { + let data = self.data.unwrap_or_default(); + Ok(borsh::from_slice(&data).unwrap()) + } + Err(call_error) => Err(call_error), + } + } + + pub fn did_revert(&self) -> bool { + self.result == Err(CallError::CalleeReverted) + } +} + +/// Call a contract. +pub fn call( + contract_address: &Address, + transferred_value: u64, + call_data: T, +) -> Result, CallError> { + let input_data = call_data.input_data().unwrap_or_default(); + + let (maybe_data, result_code) = casper_call( + contract_address, + transferred_value, + call_data.entry_point(), + &input_data, + ); + match result_code { + Ok(()) | Err(CallError::CalleeReverted) => Ok(CallResult:: { + data: maybe_data, + result: result_code, + marker: PhantomData, + }), + Err(error) => Err(error), + } +} + +/// Get the environment info. +pub fn get_env_info() -> EnvInfo { + let ret = { + let mut info = MaybeUninit::::uninit(); + + let ret = unsafe { casper_env_info(info.as_mut_ptr().cast(), size_of::() as u32) }; + result_from_code(ret).map(|()| { + // SAFETY: The size of `EnvInfo` is known and the pointer is valid. + unsafe { info.assume_init() } + }) + }; + + match ret { + Ok(info) => info, + Err(err) => panic!("Failed to get environment info: {:?}", err), + } +} + +/// Get the caller. +#[must_use] +pub fn get_caller() -> Entity { + let info = get_env_info(); + Entity::from_parts(info.caller_kind, info.caller_addr).expect("Invalid caller kind") +} + +#[must_use] +pub fn get_callee() -> Entity { + let info = get_env_info(); + Entity::from_parts(info.callee_kind, info.callee_addr).expect("Invalid callee kind") +} + +/// Enum representing either an account or a contract. +#[derive( + BorshSerialize, BorshDeserialize, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, +)] +pub enum Entity { + Account([u8; 32]), + Contract([u8; 32]), +} + +impl Entity { + /// Get the tag of the entity. + #[must_use] + pub fn tag(&self) -> u32 { + match self { + Entity::Account(_) => 0, + Entity::Contract(_) => 1, + } + } + + #[must_use] + pub fn from_parts(tag: u32, address: [u8; 32]) -> Option { + match tag { + 0 => Some(Self::Account(address)), + 1 => Some(Self::Contract(address)), + _ => None, + } + } + + #[must_use] + pub fn address(&self) -> &Address { + match self { + Entity::Account(addr) | Entity::Contract(addr) => addr, + } + } +} + +impl CasperABI for Entity { + fn populate_definitions(definitions: &mut crate::abi::Definitions) { + definitions.populate_one::<[u8; 32]>(); + } + + fn declaration() -> crate::abi::Declaration { + "Entity".into() + } + + fn definition() -> crate::abi::Definition { + crate::abi::Definition::Enum { + items: vec![ + EnumVariant { + name: "Account".into(), + discriminant: 0, + decl: <[u8; 32] as CasperABI>::declaration(), + }, + EnumVariant { + name: "Contract".into(), + discriminant: 1, + decl: <[u8; 32] as CasperABI>::declaration(), + }, + ], + } + } +} + +/// Get the balance of an account or contract. +#[must_use] +pub fn get_balance_of(entity_kind: &Entity) -> u64 { + let (kind, addr) = match entity_kind { + Entity::Account(addr) => (0, addr), + Entity::Contract(addr) => (1, addr), + }; + let mut output: MaybeUninit = MaybeUninit::uninit(); + let ret = unsafe { + casper_contract_sdk_sys::casper_env_balance( + kind, + addr.as_ptr(), + addr.len(), + output.as_mut_ptr().cast(), + ) + }; + if ret == 1 { + unsafe { output.assume_init() } + } else { + 0 + } +} + +/// Get the transferred token value passed to the contract. +#[must_use] +pub fn transferred_value() -> u64 { + let info = get_env_info(); + info.transferred_value +} + +/// Transfer tokens from the current contract to another account or contract. +pub fn transfer(target_account: &Address, amount: u64) -> Result<(), CallError> { + let amount: *const c_void = &amount as *const _ as *const c_void; + let result_code = unsafe { + casper_contract_sdk_sys::casper_transfer( + target_account.as_ptr(), + target_account.len(), + amount, + ) + }; + call_result_from_code(result_code) +} + +/// Get the current block time. +#[inline] +pub fn get_block_time() -> u64 { + let info = get_env_info(); + info.block_time +} + +#[doc(hidden)] +pub fn emit_raw(topic: &str, payload: &[u8]) -> Result<(), CommonResult> { + let ret = unsafe { + casper_contract_sdk_sys::casper_emit( + topic.as_ptr(), + topic.len(), + payload.as_ptr(), + payload.len(), + ) + }; + result_from_code(ret) +} + +/// Emit a message. +pub fn emit(message: M) -> Result<(), CommonResult> +where + M: Message, +{ + let topic = M::TOPIC; + let payload = message.payload(); + emit_raw(topic, &payload) +} diff --git a/smart_contracts/sdk/src/casper/native.rs b/smart_contracts/sdk/src/casper/native.rs new file mode 100644 index 0000000000..43373fa566 --- /dev/null +++ b/smart_contracts/sdk/src/casper/native.rs @@ -0,0 +1,963 @@ +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet, VecDeque}, + convert::Infallible, + fmt, + panic::{self, UnwindSafe}, + ptr::{self, NonNull}, + slice, + sync::{Arc, RwLock}, +}; + +use crate::linkme::distributed_slice; +use bytes::Bytes; +use casper_executor_wasm_common::{ + env_info::EnvInfo, + error::{ + CALLEE_REVERTED, CALLEE_SUCCEEDED, CALLEE_TRAPPED, HOST_ERROR_INTERNAL, + HOST_ERROR_NOT_FOUND, HOST_ERROR_SUCCESS, + }, + flags::ReturnFlags, +}; +#[cfg(not(target_arch = "wasm32"))] +use rand::Rng; + +use super::Entity; +use crate::types::Address; + +/// The kind of export that is being registered. +/// +/// This is used to identify the type of export and its name. +/// +/// Depending on the location of given function it may be registered as a: +/// +/// * `SmartContract` (if it's part of a `impl Contract` block), +/// * `TraitImpl` (if it's part of a `impl Trait for Contract` block), +/// * `Function` (if it's a standalone function). +/// +/// This is used to dispatch exports under native code i.e. you want to write a test that calls +/// "foobar" regardless of location. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum EntryPointKind { + /// Smart contract. + /// + /// This is used to identify the smart contract and its name. + /// + /// The `struct_name` is the name of the smart contract that is being registered. + /// The `name` is the name of the function that is being registered. + SmartContract { + struct_name: &'static str, + name: &'static str, + }, + /// Trait implementation. + /// + /// This is used to identify the trait implementation and its name. + /// + /// The `trait_name` is the name of the trait that is being implemented. + /// The `impl_name` is the name of the implementation. + /// The `name` is the name of the function that is being implemented. + TraitImpl { + trait_name: &'static str, + impl_name: &'static str, + name: &'static str, + }, + /// Function export. + /// + /// This is used to identify the function export and its name. + /// + /// The `name` is the name of the function that is being exported. + Function { name: &'static str }, +} + +impl EntryPointKind { + pub fn name(&self) -> &'static str { + match self { + EntryPointKind::SmartContract { name, .. } + | EntryPointKind::TraitImpl { name, .. } + | EntryPointKind::Function { name } => name, + } + } +} + +/// Export is a structure that contains information about the exported function. +/// +/// This is used to register the export and its name and physical location in the smart contract +/// source code. +pub struct EntryPoint { + /// The kind of entry point that is being registered. + pub kind: EntryPointKind, + pub fptr: fn() -> (), + pub module_path: &'static str, + pub file: &'static str, + pub line: u32, +} + +#[distributed_slice] +#[linkme(crate = crate::linkme)] +pub static ENTRY_POINTS: [EntryPoint]; + +impl fmt::Debug for EntryPoint { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let Self { + kind, + fptr: _, + module_path, + file, + line, + } = self; + + f.debug_struct("Export") + .field("kind", kind) + .field("fptr", &"") + .field("module_path", module_path) + .field("file", file) + .field("line", line) + .finish() + } +} + +/// Invokes an export by its name. +/// +/// This function is used to invoke an export by its name regardless of its location in the smart +/// contract. +pub fn invoke_export_by_name(name: &str) { + let exports_by_name: Vec<_> = ENTRY_POINTS + .iter() + .filter(|export| export.kind.name() == name) + .collect(); + + assert_eq!( + exports_by_name.len(), + 1, + "Expected exactly one export {name} found, but got {exports_by_name:?}" + ); + + (exports_by_name[0].fptr)(); +} + +#[derive(Debug)] +pub enum NativeTrap { + Return(ReturnFlags, Bytes), + Panic(Box), +} + +pub type Container = BTreeMap>; + +#[derive(Clone, Debug)] +#[allow(dead_code)] +pub struct NativeParam(pub(crate) String); + +impl From<&casper_contract_sdk_sys::Param> for NativeParam { + fn from(val: &casper_contract_sdk_sys::Param) -> Self { + let name = + String::from_utf8_lossy(unsafe { slice::from_raw_parts(val.name_ptr, val.name_len) }) + .into_owned(); + NativeParam(name) + } +} + +#[derive(Clone, Debug)] +pub struct Environment { + pub db: Arc>, + contracts: Arc>>, + // input_data: Arc>>, + input_data: Option, + caller: Entity, + callee: Entity, +} + +impl Default for Environment { + fn default() -> Self { + Self { + db: Default::default(), + contracts: Default::default(), + input_data: Default::default(), + caller: DEFAULT_ADDRESS, + callee: DEFAULT_ADDRESS, + } + } +} + +pub const DEFAULT_ADDRESS: Entity = Entity::Account([42; 32]); + +impl Environment { + #[must_use] + pub fn new(db: Container, caller: Entity) -> Self { + Self { + db: Arc::new(RwLock::new(db)), + contracts: Default::default(), + input_data: Default::default(), + caller, + callee: caller, + } + } + + #[must_use] + pub fn with_caller(&self, caller: Entity) -> Self { + let mut env = self.clone(); + env.caller = caller; + env + } + + #[must_use] + pub fn smart_contract(&self, callee: Entity) -> Self { + let mut env = self.clone(); + env.caller = self.callee; + env.callee = callee; + env + } + + #[must_use] + pub fn session(&self, callee: Entity) -> Self { + let mut env = self.clone(); + env.caller = callee; + env.callee = callee; + env + } + + #[must_use] + pub fn with_callee(&self, callee: Entity) -> Self { + let mut env = self.clone(); + env.callee = callee; + env + } + + #[must_use] + pub fn with_input_data(&self, input_data: Vec) -> Self { + let mut env = self.clone(); + env.input_data = Some(Bytes::from(input_data)); + env + } +} + +impl Environment { + fn key_prefix(&self, key: &[u8]) -> Vec { + let entity = self.callee; + + let mut bytes = Vec::new(); + bytes.extend(entity.tag().to_le_bytes()); + bytes.extend(entity.address()); + bytes.extend(key); + + bytes + } + + fn casper_read( + &self, + key_space: u64, + key_ptr: *const u8, + key_size: usize, + info: *mut casper_contract_sdk_sys::ReadInfo, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, + alloc_ctx: *const core::ffi::c_void, + ) -> Result { + let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) }; + let key_bytes = self.key_prefix(key_bytes); + + let Ok(db) = self.db.read() else { + return Ok(HOST_ERROR_INTERNAL); + }; + + let value = match db.get(&key_space) { + Some(values) => values.get(key_bytes.as_slice()).cloned(), + None => return Ok(HOST_ERROR_NOT_FOUND), + }; + match value { + Some(tagged_value) => { + let ptr = NonNull::new(alloc(tagged_value.len(), alloc_ctx as _)); + + if let Some(ptr) = ptr { + unsafe { + (*info).data = ptr.as_ptr(); + (*info).size = tagged_value.len(); + } + + unsafe { + ptr::copy_nonoverlapping( + tagged_value.as_ptr(), + ptr.as_ptr(), + tagged_value.len(), + ); + } + } + + Ok(HOST_ERROR_SUCCESS) + } + None => Ok(HOST_ERROR_NOT_FOUND), + } + } + + fn casper_write( + &self, + key_space: u64, + key_ptr: *const u8, + key_size: usize, + value_ptr: *const u8, + value_size: usize, + ) -> Result { + assert!(!key_ptr.is_null()); + assert!(!value_ptr.is_null()); + // let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) }; + let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) }.to_owned(); + let key_bytes = self.key_prefix(&key_bytes); + + let value_bytes = unsafe { slice::from_raw_parts(value_ptr, value_size) }; + + let mut db = self.db.write().unwrap(); + db.entry(key_space).or_default().insert( + Bytes::from(key_bytes.to_vec()), + Bytes::from(value_bytes.to_vec()), + ); + Ok(HOST_ERROR_SUCCESS) + } + + fn casper_remove( + &self, + key_space: u64, + key_ptr: *const u8, + key_size: usize, + ) -> Result { + assert!(!key_ptr.is_null()); + let key_bytes = unsafe { slice::from_raw_parts(key_ptr, key_size) }; + let key_bytes = self.key_prefix(key_bytes); + + let mut db = self.db.write().unwrap(); + if let Some(values) = db.get_mut(&key_space) { + values.remove(key_bytes.as_slice()); + Ok(HOST_ERROR_SUCCESS) + } else { + Ok(HOST_ERROR_NOT_FOUND) + } + } + + fn casper_print(&self, msg_ptr: *const u8, msg_size: usize) -> Result<(), NativeTrap> { + let msg_bytes = unsafe { slice::from_raw_parts(msg_ptr, msg_size) }; + let msg = std::str::from_utf8(msg_bytes).expect("Valid UTF-8 string"); + println!("💻 {msg}"); + Ok(()) + } + + fn casper_return( + &self, + flags: u32, + data_ptr: *const u8, + data_len: usize, + ) -> Result { + let return_flags = ReturnFlags::from_bits_truncate(flags); + let data = if data_ptr.is_null() { + Bytes::new() + } else { + Bytes::copy_from_slice(unsafe { slice::from_raw_parts(data_ptr, data_len) }) + }; + Err(NativeTrap::Return(return_flags, data)) + } + + fn casper_copy_input( + &self, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, + alloc_ctx: *const core::ffi::c_void, + ) -> Result<*mut u8, NativeTrap> { + let input_data = self.input_data.clone(); + let input_data = input_data.as_ref().cloned().unwrap_or_default(); + let ptr = NonNull::new(alloc(input_data.len(), alloc_ctx as _)); + + match ptr { + Some(ptr) => { + if !input_data.is_empty() { + unsafe { + ptr::copy_nonoverlapping( + input_data.as_ptr(), + ptr.as_ptr(), + input_data.len(), + ); + } + } + Ok(unsafe { ptr.as_ptr().add(input_data.len()) }) + } + None => Ok(ptr::null_mut()), + } + } + + #[allow(clippy::too_many_arguments)] + fn casper_create( + &self, + code_ptr: *const u8, + code_size: usize, + transferred_value: u64, + constructor_ptr: *const u8, + constructor_size: usize, + input_ptr: *const u8, + input_size: usize, + seed_ptr: *const u8, + seed_size: usize, + result_ptr: *mut casper_contract_sdk_sys::CreateResult, + ) -> Result { + // let manifest = + // NonNull::new(manifest_ptr as *mut casper_contract_sdk_sys::Manifest).expect("Manifest + // instance"); + let code = if code_ptr.is_null() { + None + } else { + Some(unsafe { slice::from_raw_parts(code_ptr, code_size) }) + }; + + if code.is_some() { + panic!("Supplying code is not supported yet in native mode"); + } + + let constructor = if constructor_ptr.is_null() { + None + } else { + Some(unsafe { slice::from_raw_parts(constructor_ptr, constructor_size) }) + }; + + let input_data = if input_ptr.is_null() { + None + } else { + Some(unsafe { slice::from_raw_parts(input_ptr, input_size) }) + }; + + let _seed = if seed_ptr.is_null() { + None + } else { + Some(unsafe { slice::from_raw_parts(seed_ptr, seed_size) }) + }; + + assert_eq!( + transferred_value, 0, + "Creating new contracts with transferred value is not supported in native mode" + ); + + let mut rng = rand::thread_rng(); + let contract_address = rng.gen(); + let package_address = rng.gen(); + + let mut result = NonNull::new(result_ptr).expect("Valid pointer"); + unsafe { + result.as_mut().contract_address = package_address; + } + + let mut contracts = self.contracts.write().unwrap(); + contracts.insert(contract_address); + + if let Some(entry_point) = constructor { + let entry_point = ENTRY_POINTS + .iter() + .find(|export| export.kind.name().as_bytes() == entry_point) + .expect("Entry point exists"); + + let mut stub = with_current_environment(|stub| stub); + stub.input_data = input_data.map(Bytes::copy_from_slice); + + stub.caller = stub.callee; + stub.callee = Entity::Contract(package_address); + + // stub.callee + // Call constructor, expect a trap + let result = dispatch_with(stub, || { + // TODO: Handle panic inside constructor + (entry_point.fptr)(); + }); + + match result { + Ok(()) => {} + Err(NativeTrap::Return(flags, bytes)) => { + if flags.contains(ReturnFlags::REVERT) { + todo!("Constructor returned with a revert flag"); + } + assert!(bytes.is_empty(), "When returning from the constructor it is expected that no bytes are passed in a return function"); + } + Err(NativeTrap::Panic(_panic)) => { + todo!(); + } + } + } + + Ok(HOST_ERROR_SUCCESS) + } + + #[allow(clippy::too_many_arguments)] + fn casper_call( + &self, + address_ptr: *const u8, + address_size: usize, + transferred_value: u64, + entry_point_ptr: *const u8, + entry_point_size: usize, + input_ptr: *const u8, + input_size: usize, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, /* For capturing output + * data */ + alloc_ctx: *const core::ffi::c_void, + ) -> Result { + let address = unsafe { slice::from_raw_parts(address_ptr, address_size) }; + let input_data = unsafe { slice::from_raw_parts(input_ptr, input_size) }; + let entry_point = { + let entry_point_ptr = NonNull::new(entry_point_ptr.cast_mut()).expect("Valid pointer"); + let entry_point = + unsafe { slice::from_raw_parts(entry_point_ptr.as_ptr(), entry_point_size) }; + let entry_point = std::str::from_utf8(entry_point).expect("Valid UTF-8 string"); + entry_point.to_string() + }; + + assert_eq!( + transferred_value, 0, + "Transferred value is not supported in native mode" + ); + + let export = ENTRY_POINTS + .iter() + .find(|export| + matches!(export.kind, EntryPointKind::SmartContract { name, .. } | EntryPointKind::TraitImpl { name, .. } + if name == entry_point) + ) + .expect("Existing entry point"); + + let mut new_stub = with_current_environment(|stub| stub.clone()); + new_stub.input_data = Some(Bytes::copy_from_slice(input_data)); + new_stub.caller = new_stub.callee; + new_stub.callee = Entity::Contract(address.try_into().expect("Size to match")); + + let ret = dispatch_with(new_stub, || { + // We need to convert any panic inside the entry point into a native trap. This probably + // should be done in a more configurable way. + dispatch_export_call(|| { + (export.fptr)(); + }) + }); + + let unfolded = match ret { + Ok(Ok(())) => Ok(()), + Ok(Err(error)) | Err(error) => Err(error), + }; + + match unfolded { + Ok(()) => Ok(CALLEE_SUCCEEDED), + Err(NativeTrap::Return(flags, bytes)) => { + let ptr = NonNull::new(alloc(bytes.len(), alloc_ctx.cast_mut())); + if let Some(output_ptr) = ptr { + unsafe { + ptr::copy_nonoverlapping(bytes.as_ptr(), output_ptr.as_ptr(), bytes.len()); + } + } + + if flags.contains(ReturnFlags::REVERT) { + Ok(CALLEE_REVERTED) + } else { + Ok(CALLEE_SUCCEEDED) + } + } + Err(NativeTrap::Panic(panic)) => { + eprintln!("Panic {panic:?}"); + Ok(CALLEE_TRAPPED) + } + } + } + + #[doc = r"Obtain data from the blockchain environemnt of current wasm invocation. + +Example paths: + +* `env_read([CASPER_CALLER], 1, nullptr, &caller_addr)` -> read caller's address into + `caller_addr` memory. +* `env_read([CASPER_CHAIN, BLOCK_HASH, 0], 3, nullptr, &block_hash)` -> read hash of the + current block into `block_hash` memory. +* `env_read([CASPER_CHAIN, BLOCK_HASH, 5], 3, nullptr, &block_hash)` -> read hash of the 5th + block from the current one into `block_hash` memory. +* `env_read([CASPER_AUTHORIZED_KEYS], 1, nullptr, &authorized_keys)` -> read list of + authorized keys into `authorized_keys` memory."] + fn casper_env_read( + &self, + _env_path: *const u64, + _env_path_size: usize, + _alloc: Option *mut u8>, + _alloc_ctx: *const core::ffi::c_void, + ) -> Result<*mut u8, NativeTrap> { + todo!() + } + + fn casper_env_info(&self, info_ptr: *const u8, info_size: u32) -> Result { + assert_eq!(info_size as usize, size_of::()); + let mut env_info = NonNull::new(info_ptr as *mut u8) + .expect("Valid ptr") + .cast::(); + let env_info = unsafe { env_info.as_mut() }; + *env_info = EnvInfo { + block_time: 0, + transferred_value: 0, + caller_addr: *self.caller.address(), + caller_kind: self.caller.tag(), + callee_addr: *self.callee.address(), + callee_kind: self.callee.tag(), + }; + Ok(HOST_ERROR_SUCCESS) + } +} + +thread_local! { + pub(crate) static LAST_TRAP: RefCell> = const { RefCell::new(None) }; + static ENV_STACK: RefCell> = RefCell::new(VecDeque::from_iter([ + // Stack of environments has a default element so unit tests do not require extra effort. + // Environment::default() + ])); +} + +pub fn with_current_environment(f: impl FnOnce(Environment) -> T) -> T { + ENV_STACK.with(|stack| { + let stub = { + let borrowed = stack.borrow(); + let front = borrowed.front().expect("Stub exists").clone(); + front + }; + f(stub) + }) +} + +pub fn current_environment() -> Environment { + with_current_environment(|env| env) +} + +fn handle_ret_with(value: Result, ret: impl FnOnce() -> T) -> T { + match value { + Ok(result) => { + LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take()); + result + } + Err(trap) => { + let result = ret(); + LAST_TRAP.with(|last_trap| last_trap.borrow_mut().replace(trap)); + result + } + } +} + +fn dispatch_export_call(func: F) -> Result<(), NativeTrap> +where + F: FnOnce() + Send + UnwindSafe, +{ + let call_result = panic::catch_unwind(|| { + func(); + }); + match call_result { + Ok(()) => { + let last_trap = LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take()); + match last_trap { + Some(last_trap) => Err(last_trap), + None => Ok(()), + } + } + Err(error) => Err(NativeTrap::Panic(error)), + } +} + +fn handle_ret(value: Result) -> T { + handle_ret_with(value, || T::default()) +} + +/// Dispatches a function with a default environment. +pub fn dispatch(f: impl FnOnce() -> T) -> Result { + dispatch_with(Environment::default(), f) +} + +/// Dispatches a function with a given environment. +pub fn dispatch_with(stub: Environment, f: impl FnOnce() -> T) -> Result { + ENV_STACK.with(|stack| { + let mut borrowed = stack.borrow_mut(); + borrowed.push_front(stub); + }); + + // Clear previous trap (if present) + LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take()); + + // Call a function + let result = f(); + + // Check if a trap was set and return it if so (otherwise return the result). + let last_trap = LAST_TRAP.with(|last_trap| last_trap.borrow_mut().take()); + + let result = if let Some(trap) = last_trap { + Err(trap) + } else { + Ok(result) + }; + + // Pop the stub from the stack + ENV_STACK.with(|stack| { + let mut borrowed = stack.borrow_mut(); + borrowed.pop_front(); + }); + + result +} + +mod symbols { + // TODO: Figure out how to use for_each_host_function macro here and deal with never type in + // casper_return + #[no_mangle] + /// Read value from a storage available for caller's entity address. + pub extern "C" fn casper_read( + key_space: u64, + key_ptr: *const u8, + key_size: usize, + info: *mut ::casper_contract_sdk_sys::ReadInfo, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, + alloc_ctx: *const core::ffi::c_void, + ) -> u32 { + let _name = "casper_read"; + let _args = (&key_space, &key_ptr, &key_size, &info, &alloc, &alloc_ctx); + let _call_result = with_current_environment(|stub| { + stub.casper_read(key_space, key_ptr, key_size, info, alloc, alloc_ctx) + }); + crate::casper::native::handle_ret(_call_result) + } + + #[no_mangle] + pub extern "C" fn casper_write( + key_space: u64, + key_ptr: *const u8, + key_size: usize, + value_ptr: *const u8, + value_size: usize, + ) -> u32 { + let _name = "casper_write"; + let _args = (&key_space, &key_ptr, &key_size, &value_ptr, &value_size); + let _call_result = with_current_environment(|stub| { + stub.casper_write(key_space, key_ptr, key_size, value_ptr, value_size) + }); + crate::casper::native::handle_ret(_call_result) + } + + #[no_mangle] + pub extern "C" fn casper_remove(key_space: u64, key_ptr: *const u8, key_size: usize) -> u32 { + let _name = "casper_remove"; + let _args = (&key_space, &key_ptr, &key_size); + let _call_result = + with_current_environment(|stub| stub.casper_remove(key_space, key_ptr, key_size)); + crate::casper::native::handle_ret(_call_result) + } + + #[no_mangle] + pub extern "C" fn casper_print(msg_ptr: *const u8, msg_size: usize) { + let _name = "casper_print"; + let _args = (&msg_ptr, &msg_size); + let _call_result = with_current_environment(|stub| stub.casper_print(msg_ptr, msg_size)); + crate::casper::native::handle_ret(_call_result); + } + + use casper_executor_wasm_common::error::HOST_ERROR_SUCCESS; + + use crate::casper::native::LAST_TRAP; + + #[no_mangle] + pub extern "C" fn casper_return(flags: u32, data_ptr: *const u8, data_len: usize) { + let _name = "casper_return"; + let _args = (&flags, &data_ptr, &data_len); + let _call_result = + with_current_environment(|stub| stub.casper_return(flags, data_ptr, data_len)); + let err = _call_result.unwrap_err(); // SAFE + LAST_TRAP.with(|last_trap| last_trap.borrow_mut().replace(err)); + } + + #[no_mangle] + pub extern "C" fn casper_copy_input( + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, + alloc_ctx: *const core::ffi::c_void, + ) -> *mut u8 { + let _name = "casper_copy_input"; + let _args = (&alloc, &alloc_ctx); + let _call_result = + with_current_environment(|stub| stub.casper_copy_input(alloc, alloc_ctx)); + crate::casper::native::handle_ret_with(_call_result, ptr::null_mut) + } + + #[no_mangle] + pub extern "C" fn casper_create( + code_ptr: *const u8, + code_size: usize, + transferred_value: u64, + constructor_ptr: *const u8, + constructor_size: usize, + input_ptr: *const u8, + input_size: usize, + seed_ptr: *const u8, + seed_size: usize, + result_ptr: *mut casper_contract_sdk_sys::CreateResult, + ) -> u32 { + let _call_result = with_current_environment(|stub| { + stub.casper_create( + code_ptr, + code_size, + transferred_value, + constructor_ptr, + constructor_size, + input_ptr, + input_size, + seed_ptr, + seed_size, + result_ptr, + ) + }); + crate::casper::native::handle_ret(_call_result) + } + + #[no_mangle] + pub extern "C" fn casper_call( + address_ptr: *const u8, + address_size: usize, + transferred_value: u64, + entry_point_ptr: *const u8, + entry_point_size: usize, + input_ptr: *const u8, + input_size: usize, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, /* For capturing output + * data */ + alloc_ctx: *const core::ffi::c_void, + ) -> u32 { + let _call_result = with_current_environment(|stub| { + stub.casper_call( + address_ptr, + address_size, + transferred_value, + entry_point_ptr, + entry_point_size, + input_ptr, + input_size, + alloc, + alloc_ctx, + ) + }); + crate::casper::native::handle_ret(_call_result) + } + + #[no_mangle] + pub extern "C" fn casper_upgrade( + _code_ptr: *const u8, + _code_size: usize, + _entry_point_ptr: *const u8, + _entry_point_size: usize, + _input_ptr: *const u8, + _input_size: usize, + ) -> u32 { + todo!() + } + + use core::slice; + use std::ptr; + + use super::with_current_environment; + + #[no_mangle] + pub extern "C" fn casper_env_read( + env_path: *const u64, + env_path_size: usize, + alloc: Option *mut u8>, + alloc_ctx: *const core::ffi::c_void, + ) -> *mut u8 { + let _name = "casper_env_read"; + let _args = (&env_path, &env_path_size, &alloc, &alloc_ctx); + let _call_result = with_current_environment(|stub| { + stub.casper_env_read(env_path, env_path_size, alloc, alloc_ctx) + }); + crate::casper::native::handle_ret_with(_call_result, ptr::null_mut) + } + #[no_mangle] + pub extern "C" fn casper_env_balance( + _entity_kind: u32, + _entity_addr_ptr: *const u8, + _entity_addr_len: usize, + ) -> u64 { + todo!() + } + #[no_mangle] + pub extern "C" fn casper_transfer( + _entity_kind: u32, + _entity_addr_ptr: *const u8, + _entity_addr_len: usize, + _amount: u64, + ) -> u32 { + todo!() + } + #[no_mangle] + pub extern "C" fn casper_emit( + topic_ptr: *const u8, + topic_size: usize, + data_ptr: *const u8, + data_size: usize, + ) -> u32 { + let topic = unsafe { slice::from_raw_parts(topic_ptr, topic_size) }; + let data = unsafe { slice::from_raw_parts(data_ptr, data_size) }; + let topic = std::str::from_utf8(topic).expect("Valid UTF-8 string"); + println!("Emitting event with topic: {topic:?} and data: {data:?}"); + HOST_ERROR_SUCCESS + } + + #[no_mangle] + pub extern "C" fn casper_env_info(info_ptr: *const u8, info_size: u32) -> u32 { + let ret = with_current_environment(|env| env.casper_env_info(info_ptr, info_size)); + crate::casper::native::handle_ret(ret) + } +} + +#[cfg(test)] +mod tests { + use casper_executor_wasm_common::keyspace::Keyspace; + + use crate::casper; + + use super::*; + + #[test] + fn foo() { + dispatch(|| { + casper::print("Hello"); + casper::write(Keyspace::Context(b"test"), b"value 1").unwrap(); + + let change_context_1 = + with_current_environment(|stub| stub.smart_contract(Entity::Contract([1; 32]))); + + dispatch_with(change_context_1, || { + casper::write(Keyspace::Context(b"test"), b"value 2").unwrap(); + casper::write(Keyspace::State, b"state").unwrap(); + }) + .unwrap(); + + let change_context_1 = + with_current_environment(|stub| stub.smart_contract(Entity::Contract([1; 32]))); + dispatch_with(change_context_1, || { + assert_eq!( + casper::read_into_vec(Keyspace::Context(b"test")), + Ok(Some(b"value 2".to_vec())) + ); + assert_eq!( + casper::read_into_vec(Keyspace::State), + Ok(Some(b"state".to_vec())) + ); + }) + .unwrap(); + + assert_eq!(casper::get_caller(), DEFAULT_ADDRESS); + assert_eq!( + casper::read_into_vec(Keyspace::Context(b"test")), + Ok(Some(b"value 1".to_vec())) + ); + }) + .unwrap(); + } + #[test] + fn test() { + dispatch_with(Environment::default(), || { + let msg = "Hello"; + let () = with_current_environment(|stub| stub.casper_print(msg.as_ptr(), msg.len())) + .expect("Ok"); + }) + .unwrap(); + } + + #[test] + fn test_returns() { + dispatch_with(Environment::default(), || { + let _ = with_current_environment(|stub| stub.casper_return(0, ptr::null(), 0)); + }) + .unwrap(); + } +} diff --git a/smart_contracts/sdk/src/cli/validation.rs b/smart_contracts/sdk/src/cli/validation.rs new file mode 100644 index 0000000000..ad5b639464 --- /dev/null +++ b/smart_contracts/sdk/src/cli/validation.rs @@ -0,0 +1,8 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum Validation { + #[error("Contract does not have any entry points")] + NoEntryPoints, +} diff --git a/smart_contracts/sdk/src/collections.rs b/smart_contracts/sdk/src/collections.rs new file mode 100644 index 0000000000..d1077529b0 --- /dev/null +++ b/smart_contracts/sdk/src/collections.rs @@ -0,0 +1,15 @@ +mod lookup_key; + +mod iterable_map; +mod iterable_set; +mod map; +mod set; +pub mod sorted_vector; +mod vector; + +pub use map::Map; +pub use set::Set; +pub use vector::Vector; + +pub use iterable_map::{IterableMap, IterableMapHash, IterableMapIter, IterableMapPtr}; +pub use iterable_set::IterableSet; diff --git a/smart_contracts/sdk/src/collections/iterable_map.rs b/smart_contracts/sdk/src/collections/iterable_map.rs new file mode 100644 index 0000000000..c1b06368c0 --- /dev/null +++ b/smart_contracts/sdk/src/collections/iterable_map.rs @@ -0,0 +1,991 @@ +use core::marker::PhantomData; + +use borsh::{BorshDeserialize, BorshSerialize}; +use bytes::BufMut; +use casper_executor_wasm_common::keyspace::Keyspace; +use const_fnv1a_hash::fnv1a_hash_64; + +use crate::casper::{self, read_into_vec}; + +/// A pointer that uniquely identifies a value written into the map. +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Copy, PartialEq)] +pub struct IterableMapPtr { + /// The key hash + pub(crate) hash: u64, + /// In case of a collision, signifies the index of this element + /// in a bucket + pub(crate) index: u64, +} + +/// Trait for types that can be used as keys in [IterableMap]. +/// Must produce a deterministic hash. +/// +/// A blanket implementation is provided for all types that implement +/// [BorshSerialize]. +pub trait IterableMapHash: PartialEq + BorshSerialize + BorshDeserialize { + fn compute_hash(&self) -> u64 { + let mut bytes = Vec::new(); + self.serialize(&mut bytes).unwrap(); + fnv1a_hash_64(&bytes, None) + } +} + +// No blanket IterableMapKey implementation. Explicit impls prevent conflicts with +// user‑provided implementations; a blanket impl would forbid custom hashes. +impl IterableMapHash for u8 {} +impl IterableMapHash for u16 {} +impl IterableMapHash for u32 {} +impl IterableMapHash for u64 {} +impl IterableMapHash for u128 {} +impl IterableMapHash for i8 {} +impl IterableMapHash for i16 {} +impl IterableMapHash for i32 {} +impl IterableMapHash for i64 {} +impl IterableMapHash for i128 {} +impl IterableMapHash for String {} + +/// A map over global state that allows iteration. Each entry at key `K_n` stores `(K_{n}, V, +/// K_{n-1})`, where `V` is the value and `K_{n-1}` is the key hash of the previous entry. +/// +/// This creates a constant spatial overhead; every entry stores a pointer +/// to the one inserted before it. +/// +/// Enables iteration without a guaranteed ordering; updating an existing +/// key does not affect position. +/// +/// Under the hood, this is a singly-linked HashMap with linear probing for collision resolution. +/// Supports full traversal, typically in reverse-insertion order. +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] +#[borsh(crate = "crate::serializers::borsh")] +pub struct IterableMap { + pub(crate) prefix: String, + + // Keys are hashed to u128 internally, but K is preserved to enforce type safety. + // While this map could accept arbitrary u128 keys, requiring a concrete K prevents + // misuse and clarifies intent at the type level. + pub(crate) tail_key_hash: Option, + _marker: PhantomData<(K, V)>, +} + +/// Single entry in `IterableMap`. Stores the value and the hash of the previous entry's key. +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] +#[borsh(crate = "crate::serializers::borsh")] +pub struct IterableMapEntry { + pub(crate) key: K, + pub(crate) value: Option, + pub(crate) previous: Option, +} + +impl IterableMap +where + K: IterableMapHash, + V: BorshSerialize + BorshDeserialize, +{ + /// Creates an empty [IterableMap] with the given prefix. + pub fn new>(prefix: S) -> Self { + Self { + prefix: prefix.into(), + tail_key_hash: None, + _marker: PhantomData, + } + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not have this key present, `None` is returned. + /// + /// If the map did have this key present, the value is updated, and the old value is returned. + /// + /// This has an amortized complexity of O(1), with a worst-case of O(n) when running into + /// collisions. + pub fn insert(&mut self, key: K, value: V) -> Option { + // Find an address we can write to + let (ptr, at_ptr) = self.get_writable_slot(&key); + + // Either overwrite an existing entry, or create a new one. + let (entry_to_write, previous) = match at_ptr { + Some(mut entry) => { + if entry.value.is_none() { + // Reuse tombstone as a new insertion + entry.key = key; + entry.previous = self.tail_key_hash; + entry.value = Some(value); + self.tail_key_hash = Some(ptr); + (entry, None) + } else { + // Overwrite an existing value + let old = entry.value; + entry.value = Some(value); + (entry, old) + } + } + None => { + let entry = IterableMapEntry { + key, + value: Some(value), + previous: self.tail_key_hash, + }; + + // Additionally, since this is a new entry, we need to update the tail + self.tail_key_hash = Some(ptr); + + (entry, None) + } + }; + + // Write the entry and return previous value if it exists + let mut entry_bytes = Vec::new(); + entry_to_write.serialize(&mut entry_bytes).unwrap(); + + let prefix = self.create_prefix_from_ptr(&ptr); + let keyspace = Keyspace::Context(&prefix); + casper::write(keyspace, &entry_bytes).unwrap(); + + previous + } + + /// Returns a value corresponding to the key. + pub fn get(&self, key: &K) -> Option { + // If a slot is writable, it implicitly belongs the key + let (_, at_ptr) = self.get_writable_slot(key); + at_ptr.and_then(|entry| entry.value) + } + + /// Removes a key from the map. Returns the associated value if the key exists. + /// + /// Has a worst-case runtime of O(n). + pub fn remove(&mut self, key: &K) -> Option { + // Find the entry for the key that we're about to remove. + let (to_remove_ptr, at_remove_ptr) = self.find_slot(key)?; + + let to_remove_prefix = self.create_prefix_from_ptr(&to_remove_ptr); + let to_remove_context_key = Keyspace::Context(&to_remove_prefix); + + // See if the removed entry is a part of a collision resolution chain + // by investigating its potential child. + let to_remove_ptr_child_prefix = self.create_prefix_from_ptr(&IterableMapPtr { + index: to_remove_ptr.index + 1, + ..to_remove_ptr + }); + let to_remove_ptr_child_keyspace = Keyspace::Context(&to_remove_ptr_child_prefix); + + if self.get_entry(to_remove_ptr_child_keyspace).is_some() { + // A child exists, so we need to retain this element to maintain + // collision resolution soundness. Instead of purging, mark as + // tombstone. + let tombstone = IterableMapEntry { + value: None, + ..at_remove_ptr + }; + + // Write the updated value + let mut entry_bytes = Vec::new(); + tombstone.serialize(&mut entry_bytes).unwrap(); + casper::write(to_remove_context_key, &entry_bytes).unwrap(); + } else { + // There is no child, so we can safely purge this entry entirely. + casper::remove(to_remove_context_key).unwrap(); + } + + // Edge case when removing tail + if self.tail_key_hash == Some(to_remove_ptr) { + self.tail_key_hash = at_remove_ptr.previous; + return at_remove_ptr.value; + } + + // Scan the map, find entry to remove, join adjacent entries + let mut current_hash = self.tail_key_hash; + while let Some(key) = current_hash { + let current_prefix = self.create_prefix_from_ptr(&key); + let current_context_key = Keyspace::Context(¤t_prefix); + let mut current_entry = self.get_entry(current_context_key).unwrap(); + + // If there is no previous entry, then we've finished iterating. + // + // This shouldn't happen, as the outer logic prevents from running + // into such case, ie. we early exit if the entry to remove doesn't + // exist. + let Some(next_hash) = current_entry.previous else { + panic!("Unexpected end of IterableMap"); + }; + + // If the next entry is the one to be removed, repoint the current + // one to the one preceeding the one to remove. + if next_hash == to_remove_ptr { + // Advance current past the element to remove + current_entry.previous = at_remove_ptr.previous; + + // Re-write the updated current entry + let mut entry_bytes = Vec::new(); + current_entry.serialize(&mut entry_bytes).unwrap(); + casper::write(current_context_key, &entry_bytes).unwrap(); + + return at_remove_ptr.value; + } + + // Advance backwards + current_hash = current_entry.previous; + } + + None + } + + /// Clears the map, removing all key-value pairs. + pub fn clear(&mut self) { + for key in self.keys() { + let prefix = self.create_prefix_from_key(&key); + { + let key = Keyspace::Context(&prefix); + casper::remove(key).unwrap() + }; + } + + self.tail_key_hash = None; + } + + /// Returns true if the map contains a value for the specified key. + pub fn contains_key(&self, key: &K) -> bool { + self.get(key).is_some() + } + + /// Creates an iterator visiting all the values in arbitrary order. + pub fn keys(&self) -> impl Iterator + '_ { + self.iter().map(|(key, _)| key) + } + + /// Creates an iterator visiting all the values in arbitrary order. + pub fn values(&self) -> impl Iterator + '_ { + self.iter().map(|(_, value)| value) + } + + // Returns true if the map contains no elements. + pub fn is_empty(&self) -> bool { + self.tail_key_hash.is_none() + } + + /// Returns an iterator over the entries in the map. + /// + /// Traverses entries in reverse-insertion order. + /// Each item is a tuple of the hashed key and the value. + pub fn iter(&self) -> IterableMapIter { + IterableMapIter { + prefix: &self.prefix, + current: self.tail_key_hash, + _marker: PhantomData, + } + } + + /// Returns the number of entries in the map. + /// + /// This is an O(n) operation. + pub fn len(&self) -> usize { + self.iter().count() + } + + /// Find the slot containing key, if any. + fn find_slot(&self, key: &K) -> Option<(IterableMapPtr, IterableMapEntry)> { + let mut bucket_ptr = self.create_root_ptr_from_key(key); + + // Probe until we find either an existing slot, a tombstone or empty space. + // This should rarely iterate more than once assuming a solid hashing algorithm. + loop { + let prefix = self.create_prefix_from_ptr(&bucket_ptr); + let keyspace = Keyspace::Context(&prefix); + + if let Some(entry) = self.get_entry(keyspace) { + // Existing value, check if the keys match + if entry.key == *key && entry.value.is_some() { + // We have found a slot where this key lives, return it + return Some((bucket_ptr, entry)); + } else { + // We found a slot for this key hash, but either the keys mismatch, + // or it's vacant, so we need to probe further. + bucket_ptr.index += 1; + continue; + } + } else { + // We've reached empty address space, so the slot doesn't actually exist. + return None; + } + } + } + + /// Find the next slot we can safely write to. This is either a slot already owned and + /// assigned to the key, a vacant tombstone, or empty memory. + fn get_writable_slot(&self, key: &K) -> (IterableMapPtr, Option>) { + let mut bucket_ptr = self.create_root_ptr_from_key(key); + + // Probe until we find either an existing slot, a tombstone or empty space. + // This should rarely iterate more than once assuming a solid hashing algorithm. + loop { + let prefix = self.create_prefix_from_ptr(&bucket_ptr); + let keyspace = Keyspace::Context(&prefix); + + if let Some(entry) = self.get_entry(keyspace) { + // Existing value, check if the keys match + if entry.key == *key { + // We have found an existing slot for that key, return it + return (bucket_ptr, Some(entry)); + } else if entry.value.is_none() { + // If the value is None, then this is a tombstone, and we + // can write over it. + return (bucket_ptr, Some(entry)); + } else { + // We found a slot for this key hash, but the keys mismatch, + // and it's not vacant, so this is a collision and we need to + // probe further. + bucket_ptr.index += 1; + continue; + } + } else { + // We've reached empty address space, so we can write here + return (bucket_ptr, None); + } + } + } + + fn get_entry(&self, keyspace: Keyspace) -> Option> { + match read_into_vec(keyspace) { + Ok(Some(vec)) => { + let entry: IterableMapEntry = borsh::from_slice(&vec).unwrap(); + Some(entry) + } + Ok(None) => None, + Err(_) => None, + } + } + + fn create_prefix_from_key(&self, key: &K) -> Vec { + let ptr = self.create_root_ptr_from_key(key); + self.create_prefix_from_ptr(&ptr) + } + + fn create_root_ptr_from_key(&self, key: &K) -> IterableMapPtr { + IterableMapPtr { + hash: key.compute_hash(), + index: 0, + } + } + + fn create_prefix_from_ptr(&self, hash: &IterableMapPtr) -> Vec { + let mut context_key = Vec::new(); + context_key.extend(self.prefix.as_bytes()); + context_key.extend(b"_"); + context_key.put_u64_le(hash.hash); + context_key.extend(b"_"); + context_key.put_u64_le(hash.index); + context_key + } +} + +/// Iterator over entries in an [`IterableMap`]. +/// +/// Traverses the map in reverse-insertion order, following the internal +/// linked structure via hashed key references [`u128`]. +/// +/// Yields a tuple (K, V), where the key is the hashed +/// representation of the original key. The original key type `K` is not recoverable. +/// +/// Each iteration step deserializes a single entry from storage. +/// +/// This iterator performs no allocation beyond internal buffers, +/// and deserialization errors are treated as iteration termination. +pub struct IterableMapIter<'a, K, V> { + prefix: &'a str, + current: Option, + _marker: PhantomData<(K, V)>, +} + +impl<'a, K, V> IntoIterator for &'a IterableMap +where + K: BorshDeserialize, + V: BorshDeserialize, +{ + type Item = (K, V); + type IntoIter = IterableMapIter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + IterableMapIter { + prefix: &self.prefix, + current: self.tail_key_hash, + _marker: PhantomData, + } + } +} + +impl Iterator for IterableMapIter<'_, K, V> +where + K: BorshDeserialize, + V: BorshDeserialize, +{ + type Item = (K, V); + + fn next(&mut self) -> Option { + let current_hash = self.current?; + let mut key_bytes = Vec::new(); + key_bytes.extend(self.prefix.as_bytes()); + key_bytes.extend(b"_"); + key_bytes.put_u64_le(current_hash.hash); + key_bytes.extend(b"_"); + key_bytes.put_u64_le(current_hash.index); + + let context_key = Keyspace::Context(&key_bytes); + + match read_into_vec(context_key) { + Ok(Some(vec)) => { + let entry: IterableMapEntry = borsh::from_slice(&vec).unwrap(); + self.current = entry.previous; + Some(( + entry.key, + entry + .value + .expect("Tombstone values should be unlinked on removal"), + )) + } + Ok(None) => None, + Err(_) => None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::casper::native::dispatch; + + const TEST_MAP_PREFIX: &str = "test_map"; + + #[test] + fn insert_and_get() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + assert_eq!(map.len(), 0); + + assert_eq!(map.get(&1), None); + + map.insert(1, "a".to_string()); + assert_eq!(map.len(), 1); + + assert_eq!(map.get(&1), Some("a".to_string())); + + map.insert(2, "b".to_string()); + assert_eq!(map.len(), 2); + + assert_eq!(map.get(&2), Some("b".to_string())); + }) + .unwrap(); + } + + #[test] + fn overwrite_existing_key() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + assert_eq!(map.insert(1, "a".to_string()), None); + assert_eq!(map.insert(1, "b".to_string()), Some("a".to_string())); + assert_eq!(map.get(&1), Some("b".to_string())); + }) + .unwrap(); + } + + #[test] + fn remove_tail_entry() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + assert_eq!(map.len(), 0); + map.insert(1, "a".to_string()); + assert_eq!(map.len(), 1); + map.insert(2, "b".to_string()); + assert_eq!(map.len(), 2); + assert_eq!(map.remove(&2), Some("b".to_string())); + assert_eq!(map.len(), 1); + assert_eq!(map.get(&2), None); + assert_eq!(map.get(&1), Some("a".to_string())); + }) + .unwrap(); + } + + #[test] + fn remove_middle_entry() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + assert_eq!(map.len(), 0); + + map.insert(1, "a".to_string()); + assert_eq!(map.len(), 1); + + map.insert(2, "b".to_string()); + assert_eq!(map.len(), 2); + + map.insert(3, "c".to_string()); + assert_eq!(map.len(), 3); + + assert_eq!(map.remove(&2), Some("b".to_string())); + assert_eq!(map.len(), 2); + + assert_eq!(map.get(&2), None); + assert_eq!(map.get(&1), Some("a".to_string())); + assert_eq!(map.get(&3), Some("c".to_string())); + + assert_eq!(map.len(), 2); + }) + .unwrap(); + } + + #[test] + fn remove_nonexistent_key_does_nothing() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + map.insert(1, "a".to_string()); + + assert_eq!(map.remove(&999), None); + assert_eq!(map.get(&1), Some("a".to_string())); + }) + .unwrap(); + } + + #[test] + fn iterates_all_entries_in_reverse_insertion_order() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + map.insert(3, "c".to_string()); + + let values: Vec<_> = map.values().collect(); + assert_eq!( + values, + vec!["c".to_string(), "b".to_string(), "a".to_string(),] + ); + }) + .unwrap(); + } + + #[test] + fn iteration_skips_deleted_entries() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + map.insert(3, "c".to_string()); + + map.remove(&2); + + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["c".to_string(), "a".to_string(),]); + }) + .unwrap(); + } + + #[test] + fn empty_map_behaves_sanely() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + assert_eq!(map.get(&1), None); + assert_eq!(map.remove(&1), None); + assert_eq!(map.iter().count(), 0); + }) + .unwrap(); + } + + #[test] + fn separate_maps_do_not_conflict() { + dispatch(|| { + let mut map1 = IterableMap::::new("map1"); + let mut map2 = IterableMap::::new("map2"); + + map1.insert(1, "a".to_string()); + map2.insert(1, "b".to_string()); + + assert_eq!(map1.get(&1), Some("a".to_string())); + assert_eq!(map2.get(&1), Some("b".to_string())); + }) + .unwrap(); + } + + #[test] + fn insert_same_value_under_different_keys() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + map.insert(1, "shared".to_string()); + map.insert(2, "shared".to_string()); + + assert_eq!(map.get(&1), Some("shared".to_string())); + assert_eq!(map.get(&2), Some("shared".to_string())); + }) + .unwrap(); + } + + #[test] + fn clear_removes_all_entries() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + map.clear(); + assert!(map.is_empty()); + assert_eq!(map.iter().count(), 0); + }) + .unwrap(); + } + + #[test] + fn keys_returns_reverse_insertion_order() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + let hashes: Vec<_> = map.keys().collect(); + assert_eq!(hashes, vec![2, 1]); + }) + .unwrap(); + } + + #[test] + fn values_returns_values_in_reverse_insertion_order() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["b".to_string(), "a".to_string()]); + }) + .unwrap(); + } + + #[test] + fn contains_key_returns_correctly() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + assert!(!map.contains_key(&1)); + map.insert(1, "a".to_string()); + assert!(map.contains_key(&1)); + map.remove(&1); + assert!(!map.contains_key(&1)); + }) + .unwrap(); + } + + #[test] + fn multiple_removals_and_insertions() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + map.insert(3, "c".to_string()); + map.remove(&2); + assert_eq!(map.get(&2), None); + assert_eq!(map.get(&1), Some("a".to_string())); + assert_eq!(map.get(&3), Some("c".to_string())); + + map.insert(4, "d".to_string()); + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["d", "c", "a"]); + }) + .unwrap(); + } + + #[test] + fn struct_as_key() { + #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, PartialEq, Eq)] + struct TestKey { + id: u64, + name: String, + } + + impl IterableMapHash for TestKey {} + + dispatch(|| { + let key1 = TestKey { + id: 1, + name: "Key1".to_string(), + }; + let key2 = TestKey { + id: 2, + name: "Key2".to_string(), + }; + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + map.insert(key1.clone(), "a".to_string()); + map.insert(key2.clone(), "b".to_string()); + + assert_eq!(map.get(&key1), Some("a".to_string())); + assert_eq!(map.get(&key2), Some("b".to_string())); + }) + .unwrap(); + } + + #[test] + fn remove_middle_of_long_chain() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + map.insert(3, "c".to_string()); + map.insert(4, "d".to_string()); + map.insert(5, "e".to_string()); + + // The order is 5,4,3,2,1 + map.remove(&3); // Remove the middle entry + + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["e", "d", "b", "a"]); + + // Check that entry 4's previous is now 2's hash + let ptr4 = map.create_root_ptr_from_key(&4u64); + let prefix = map.create_prefix_from_ptr(&ptr4); + let entry = map.get_entry(Keyspace::Context(&prefix)).unwrap(); + assert_eq!(entry.previous, Some(map.create_root_ptr_from_key(&2u64))); + }) + .unwrap(); + } + + #[test] + fn insert_after_remove_updates_head() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + map.remove(&2); + map.insert(3, "c".to_string()); + + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["c", "a"]); + }) + .unwrap(); + } + + #[test] + fn reinsert_removed_key() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.remove(&1); + map.insert(1, "b".to_string()); + + assert_eq!(map.get(&1), Some("b".to_string())); + assert_eq!(map.iter().next().unwrap().1, "b".to_string()); + }) + .unwrap(); + } + + #[test] + fn iteration_reflects_modifications() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(1, "a".to_string()); + map.insert(2, "b".to_string()); + let mut iter = map.iter(); + assert_eq!(iter.next().unwrap().1, "b".to_string()); + + map.remove(&2); + map.insert(3, "c".to_string()); + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["c", "a"]); + }) + .unwrap(); + } + + #[test] + fn unit_struct_as_key() { + #[derive(BorshSerialize, BorshDeserialize, PartialEq)] + struct UnitKey; + + impl IterableMapHash for UnitKey {} + + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + map.insert(UnitKey, "value".to_string()); + assert_eq!(map.get(&UnitKey), Some("value".to_string())); + }) + .unwrap(); + } + + #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] + struct CollidingKey(u64, u64); + + impl IterableMapHash for CollidingKey { + fn compute_hash(&self) -> u64 { + let mut bytes = Vec::new(); + // Only serialize first field for hash computation + self.0.serialize(&mut bytes).unwrap(); + fnv1a_hash_64(&bytes, None) + } + } + + #[test] + fn basic_collision_handling() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + // Both keys will have same hash but different actual keys + let k1 = CollidingKey(42, 1); + let k2 = CollidingKey(42, 2); + + map.insert(k1.clone(), "first".to_string()); + map.insert(k2.clone(), "second".to_string()); + + assert_eq!(map.get(&k1), Some("first".to_string())); + assert_eq!(map.get(&k2), Some("second".to_string())); + }) + .unwrap(); + } + + #[test] + fn tombstone_handling() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + let k1 = CollidingKey(42, 1); + let k2 = CollidingKey(42, 2); + let k3 = CollidingKey(42, 3); + + map.insert(k1.clone(), "first".to_string()); + map.insert(k2.clone(), "second".to_string()); + map.insert(k3.clone(), "third".to_string()); + + // Remove middle entry + assert_eq!(map.remove(&k2), Some("second".to_string())); + + // Verify tombstone state + let (_, entry) = map.get_writable_slot(&k2); + assert!(entry.unwrap().value.is_none()); + + // Verify chain integrity + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["third", "first"]); + }) + .unwrap(); + } + + #[test] + fn tombstone_reuse() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + let k1 = CollidingKey(42, 1); + let k2 = CollidingKey(42, 2); + + map.insert(k1.clone(), "first".to_string()); + map.insert(k2.clone(), "second".to_string()); + + // Removing k1 while k2 exists guarantees k1 turns into + // a tombstone + map.remove(&k1); + + // Reinsert into tombstone slot + map.insert(k1.clone(), "reused".to_string()); + + assert_eq!(map.get(&k1), Some("reused".to_string())); + assert_eq!(map.get(&k2), Some("second".to_string())); + }) + .unwrap(); + } + + #[test] + fn full_deletion_handling() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + let k1 = CollidingKey(42, 1); + map.insert(k1.clone(), "lonely".to_string()); + + assert_eq!(map.remove(&k1), Some("lonely".to_string())); + + // Verify complete removal + let (_, entry) = map.get_writable_slot(&k1); + assert!(entry.is_none()); + }) + .unwrap(); + } + + #[test] + fn collision_chain_iteration() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + let keys = [ + CollidingKey(42, 1), + CollidingKey(42, 2), + CollidingKey(42, 3), + ]; + + for (i, k) in keys.iter().enumerate() { + map.insert(k.clone(), format!("value-{}", i)); + } + + // Remove middle entry + map.remove(&keys[1]); + + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["value-2", "value-0"]); + }) + .unwrap(); + } + + #[test] + fn complex_collision_chain() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + // Create 5 colliding keys + let keys: Vec<_> = (0..5).map(|i| CollidingKey(42, i)).collect(); + + // Insert all + for k in &keys { + map.insert(k.clone(), format!("{}", k.1)); + } + + // Remove even indexes + for k in keys.iter().step_by(2) { + map.remove(k); + } + + // Insert new values + map.insert(keys[0].clone(), "reinserted".to_string()); + map.insert(CollidingKey(42, 5), "new".to_string()); + + // Verify final state + let expected = vec![ + ("new".to_string(), 5), + ("reinserted".to_string(), 0), + ("3".to_string(), 3), + ("1".to_string(), 1), + ]; + + let results: Vec<_> = map.iter().map(|(k, v)| (v, k.1)).collect(); + + assert_eq!(results, expected); + }) + .unwrap(); + } + + #[test] + fn cross_bucket_reference() { + dispatch(|| { + let mut map = IterableMap::::new(TEST_MAP_PREFIX); + + // Create keys with different hashes but chained references + let k1 = CollidingKey(1, 0); + let k2 = CollidingKey(2, 0); + let k3 = CollidingKey(1, 1); // Collides with k1 + + map.insert(k1.clone(), "first".to_string()); + map.insert(k2.clone(), "second".to_string()); + map.insert(k3.clone(), "third".to_string()); + + // Remove k2 which is referenced by k3 + map.remove(&k2); + + // Verify iteration skips removed entry + let values: Vec<_> = map.values().collect(); + assert_eq!(values, vec!["third", "first"]); + }) + .unwrap(); + } +} diff --git a/smart_contracts/sdk/src/collections/iterable_set.rs b/smart_contracts/sdk/src/collections/iterable_set.rs new file mode 100644 index 0000000000..0f2ced6461 --- /dev/null +++ b/smart_contracts/sdk/src/collections/iterable_set.rs @@ -0,0 +1,216 @@ +use borsh::{BorshDeserialize, BorshSerialize}; + +use super::{IterableMap, IterableMapHash}; + +/// An iterable set backed by a map. +pub struct IterableSet { + pub(crate) map: IterableMap, +} + +impl IterableSet { + /// Creates an empty [IterableMap] with the given prefix. + pub fn new>(prefix: S) -> Self { + Self { + map: IterableMap::new(prefix), + } + } + + /// Inserts a value into the set. + pub fn insert(&mut self, value: V) { + self.map.insert(value, ()); + } + + /// Removes a value from the set. + /// + /// Has a worst-case runtime of O(n). + pub fn remove(&mut self, value: &V) { + self.map.remove(value); + } + + /// Returns true if the set contains a value. + pub fn contains(&self, value: &V) -> bool { + self.map.get(value).is_some() + } + + /// Creates an iterator visiting all the values in arbitrary order. + pub fn iter(&self) -> impl Iterator + '_ { + self.map.iter().map(|(value, _)| value) + } + + // Returns true if the set contains no elements. + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Clears the set, removing all values. + pub fn clear(&mut self) { + self.map.clear(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::casper::native::dispatch; + use borsh::{BorshDeserialize, BorshSerialize}; + + #[test] + fn basic_insert_contains() { + dispatch(|| { + let mut set = IterableSet::new("test_set"); + assert!(!set.contains(&1)); + + set.insert(1); + assert!(set.contains(&1)); + + set.insert(2); + assert!(set.contains(&2)); + }) + .unwrap(); + } + + #[test] + fn remove_elements() { + dispatch(|| { + let mut set = IterableSet::new("test_set"); + set.insert(1); + set.insert(2); + + set.remove(&1); + assert!(!set.contains(&1)); + assert!(set.contains(&2)); + + set.remove(&2); + assert!(set.is_empty()); + }) + .unwrap(); + } + + #[test] + fn iterator_order_and_contents() { + dispatch(|| { + let mut set = IterableSet::new("test_set"); + set.insert(1); + set.insert(2); + set.insert(3); + + let mut items: Vec<_> = set.iter().collect(); + items.sort(); + assert_eq!(items, vec![1, 2, 3]); + }) + .unwrap(); + } + + #[test] + fn clear_functionality() { + dispatch(|| { + let mut set = IterableSet::new("test_set"); + set.insert(1); + set.insert(2); + + assert!(!set.is_empty()); + set.clear(); + assert!(set.is_empty()); + assert_eq!(set.iter().count(), 0); + }) + .unwrap(); + } + + #[test] + fn multiple_sets_independence() { + dispatch(|| { + let mut set1 = IterableSet::new("set1"); + let mut set2 = IterableSet::new("set2"); + + set1.insert(1); + set2.insert(1); + + assert!(set1.contains(&1)); + assert!(set2.contains(&1)); + + set1.remove(&1); + assert!(!set1.contains(&1)); + assert!(set2.contains(&1)); + }) + .unwrap(); + } + + #[derive(BorshSerialize, BorshDeserialize, Clone, Debug, PartialEq)] + struct TestStruct { + field1: u64, + field2: String, + } + + impl IterableMapHash for TestStruct {} + + #[test] + fn struct_values() { + dispatch(|| { + let val1 = TestStruct { + field1: 1, + field2: "a".to_string(), + }; + let val2 = TestStruct { + field1: 2, + field2: "b".to_string(), + }; + + let mut set = IterableSet::new("test_set"); + set.insert(val1.clone()); + set.insert(val2.clone()); + + assert!(set.contains(&val1)); + assert!(set.contains(&val2)); + + let mut collected: Vec<_> = set.iter().collect(); + collected.sort_by(|a, b| a.field1.cmp(&b.field1)); + assert_eq!(collected, vec![val1, val2]); + }) + .unwrap(); + } + + #[test] + fn duplicate_insertions() { + dispatch(|| { + let mut set = IterableSet::new("test_set"); + set.insert(1); + set.insert(1); // Should be no-op + + assert_eq!(set.iter().count(), 1); + set.remove(&1); + assert!(set.is_empty()); + }) + .unwrap(); + } + + #[test] + fn empty_set_behavior() { + dispatch(|| { + let set = IterableSet::::new("test_set"); + assert!(set.is_empty()); + assert_eq!(set.iter().count(), 0); + + let mut set = set; + set.remove(&999); // Shouldn't panic + assert!(set.is_empty()); + }) + .unwrap(); + } + + #[test] + fn complex_operations_sequence() { + dispatch(|| { + let mut set = IterableSet::new("test_set"); + set.insert(1); + set.insert(2); + set.remove(&1); + set.insert(3); + set.clear(); + set.insert(4); + + let items: Vec<_> = set.iter().collect(); + assert_eq!(items, vec![4]); + }) + .unwrap(); + } +} diff --git a/smart_contracts/sdk/src/collections/lookup_key.rs b/smart_contracts/sdk/src/collections/lookup_key.rs new file mode 100644 index 0000000000..98843dae95 --- /dev/null +++ b/smart_contracts/sdk/src/collections/lookup_key.rs @@ -0,0 +1,33 @@ +use borsh::BorshSerialize; + +pub trait LookupKey<'a>: Default { + type Output: AsRef<[u8]> + 'a; + fn lookup(&self, prefix: &'a [u8], key: &T) -> Self::Output; +} + +pub trait LookupKeyOwned: for<'a> LookupKey<'a> {} +impl LookupKeyOwned for T where T: for<'a> LookupKey<'a> {} + +#[derive(Default)] +pub struct Identity; +impl<'a> LookupKey<'a> for Identity { + type Output = &'a [u8]; + + #[inline(always)] + fn lookup(&self, prefix: &'a [u8], _key: &T) -> Self::Output { + prefix + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn identity_should_work() { + let identity = Identity; + let prefix = b"foo"; + let key = 123u64; + assert_eq!(identity.lookup(prefix, &key), prefix); + } +} diff --git a/smart_contracts/sdk/src/collections/map.rs b/smart_contracts/sdk/src/collections/map.rs new file mode 100644 index 0000000000..0dfead7027 --- /dev/null +++ b/smart_contracts/sdk/src/collections/map.rs @@ -0,0 +1,122 @@ +use crate::{ + abi::{CasperABI, Declaration, Definition, StructField}, + casper::{self, read_into_vec}, + serializers::borsh::{BorshDeserialize, BorshSerialize}, +}; +use casper_executor_wasm_common::keyspace::Keyspace; +use const_fnv1a_hash::fnv1a_hash_str_64; + +use crate::prelude::marker::PhantomData; + +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] +#[borsh(crate = "crate::serializers::borsh")] +pub struct Map { + pub(crate) name: String, + pub(crate) _marker: PhantomData<(K, V)>, +} + +/// Computes the prefix for a given key. +#[allow(dead_code)] +pub(crate) const fn compute_prefix(input: &str) -> [u8; 8] { + let hash = fnv1a_hash_str_64(input); + hash.to_le_bytes() +} + +impl Map +where + K: BorshSerialize, + V: BorshSerialize + BorshDeserialize, +{ + pub fn new>(name: S) -> Self { + Self { + name: name.into(), + _marker: PhantomData, + } + } + + pub fn insert(&mut self, key: &K, value: &V) { + let mut context_key = Vec::new(); + context_key.extend(self.name.as_bytes()); + // NOTE: We may want to create new keyspace for a hashed context element to avoid hashing in + // the wasm. + key.serialize(&mut context_key).unwrap(); + let prefix = Keyspace::Context(&context_key); + casper::write(prefix, &borsh::to_vec(value).unwrap()).unwrap(); + } + + pub fn remove(&mut self, key: &K) { + let prefix_bytes = self.compute_prefix_for_key(key); + let prefix = Keyspace::Context(&prefix_bytes); + casper::remove(prefix).unwrap(); + } + + pub fn get(&self, key: &K) -> Option { + let mut key_bytes = self.name.as_bytes().to_owned(); + key.serialize(&mut key_bytes).unwrap(); + let prefix = Keyspace::Context(&key_bytes); + read_into_vec(prefix) + .unwrap() + .map(|vec| borsh::from_slice(&vec).unwrap()) + } + + fn compute_prefix_for_key(&self, key: &K) -> Vec { + let mut context_key = Vec::new(); + context_key.extend(self.name.as_bytes()); + key.serialize(&mut context_key).unwrap(); + context_key + } +} + +impl CasperABI for Map { + fn populate_definitions(definitions: &mut crate::abi::Definitions) { + definitions.populate_one::(); + definitions.populate_one::(); + } + + fn declaration() -> Declaration { + format!("Map<{}, {}>", K::declaration(), V::declaration()) + } + #[inline] + fn definition() -> Definition { + Definition::Struct { + items: vec![StructField { + name: "prefix".into(), + decl: u64::declaration(), + }], + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + + #[test] + fn test_compute_prefix() { + let prefix = compute_prefix("hello"); + assert_eq!(prefix.as_slice(), &[11, 189, 170, 128, 70, 216, 48, 164]); + let back = u64::from_le_bytes(prefix); + assert_eq!(fnv1a_hash_str_64("hello"), back); + } + + #[ignore] + #[test] + fn test_map() { + let mut map = Map::::new("test"); + map.insert(&1, &2); + assert_eq!(map.get(&1), Some(2)); + assert_eq!(map.get(&2), None); + map.insert(&2, &3); + assert_eq!(map.get(&1), Some(2)); + assert_eq!(map.get(&2), Some(3)); + + let mut map = Map::::new("test2"); + assert_eq!(map.get(&1), None); + map.insert(&1, &22); + assert_eq!(map.get(&1), Some(22)); + assert_eq!(map.get(&2), None); + map.insert(&2, &33); + assert_eq!(map.get(&1), Some(22)); + assert_eq!(map.get(&2), Some(33)); + } +} diff --git a/smart_contracts/sdk/src/collections/set.rs b/smart_contracts/sdk/src/collections/set.rs new file mode 100644 index 0000000000..e7d22f3a92 --- /dev/null +++ b/smart_contracts/sdk/src/collections/set.rs @@ -0,0 +1,78 @@ +use crate::prelude::marker::PhantomData; + +use crate::{casper, serializers::borsh::BorshSerialize}; +use casper_executor_wasm_common::keyspace::Keyspace; + +use super::lookup_key::{Identity, LookupKey, LookupKeyOwned}; + +#[derive(Clone)] +pub struct Set +where + T: BorshSerialize, +{ + prefix: String, + lookup: L, + _marker: PhantomData, +} + +impl Set +where + T: BorshSerialize, + L: LookupKeyOwned, + for<'a> >::Output: AsRef<[u8]>, +{ + pub fn new(prefix: String) -> Self { + Self { + prefix, + lookup: L::default(), + _marker: PhantomData, + } + } + + pub fn insert(&mut self, key: T) { + let lookup_key = self.lookup.lookup(self.prefix.as_bytes(), &key); + casper::write(Keyspace::Context(lookup_key.as_ref()), &[]).unwrap(); + } + + pub fn contains_key(&self, key: T) -> bool { + let lookup_key = self.lookup.lookup(self.prefix.as_bytes(), &key); + let entry = casper::read(Keyspace::Context(lookup_key.as_ref()), |_size| None).unwrap(); + entry.is_some() + } +} + +#[cfg(test)] +mod tests { + use crate::prelude::*; + + use crate::serializers::borsh::BorshSerialize; + + use super::Set; + + #[derive(BorshSerialize)] + #[borsh(crate = "crate::serializers::borsh")] + pub enum Flag { + A, + B, + C, + } + + #[ignore] + #[test] + fn should_insert() { + let mut set: Set = Set::new("Prefix".to_string()); + + assert!(!set.contains_key(Flag::A)); + assert!(!set.contains_key(Flag::B)); + assert!(!set.contains_key(Flag::C)); + + set.insert(Flag::A); + assert!(set.contains_key(Flag::A)); + + set.insert(Flag::B); + assert!(set.contains_key(Flag::B)); + + set.insert(Flag::C); + assert!(set.contains_key(Flag::C)); + } +} diff --git a/smart_contracts/sdk/src/collections/sorted_vector.rs b/smart_contracts/sdk/src/collections/sorted_vector.rs new file mode 100644 index 0000000000..b1ab8fde60 --- /dev/null +++ b/smart_contracts/sdk/src/collections/sorted_vector.rs @@ -0,0 +1,123 @@ +use crate::serializers::borsh::{BorshDeserialize, BorshSerialize}; + +use crate::abi::CasperABI; + +use super::Vector; + +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] +#[borsh(crate = "crate::serializers::borsh")] +pub struct SortedVector { + vector: Vector, +} + +impl CasperABI for SortedVector { + fn populate_definitions(definitions: &mut crate::abi::Definitions) { + T::populate_definitions(definitions) + } + + fn declaration() -> crate::abi::Declaration { + format!("SortedVector<{}>", T::declaration()) + } + + fn definition() -> crate::abi::Definition { + crate::abi::Definition::Struct { + items: vec![ + crate::abi::StructField { + name: "prefix".into(), + decl: String::declaration(), + }, + crate::abi::StructField { + name: "length".into(), + decl: u64::declaration(), + }, + ], + } + } +} + +impl SortedVector +where + T: BorshSerialize + BorshDeserialize + Ord, +{ + pub fn new>(prefix: S) -> Self { + Self { + vector: Vector::new(prefix), + } + } + + pub fn push(&mut self, value: T) { + let pos = self.vector.binary_search(&value).unwrap_or_else(|e| e); + self.vector.insert(pos, value); + } + + pub fn remove(&mut self, index: u64) -> Option { + self.vector.remove(index) + } + + #[inline] + pub fn contains(&self, value: &T) -> bool { + self.vector.binary_search(value).is_ok() + } + + #[inline(always)] + pub fn get(&self, index: u64) -> Option { + self.vector.get(index) + } + + #[inline(always)] + pub fn iter(&self) -> impl Iterator + '_ { + self.vector.iter() + } + + #[inline(always)] + pub fn len(&self) -> u64 { + self.vector.len() + } + + #[inline(always)] + pub fn is_empty(&self) -> bool { + self.vector.is_empty() + } + + #[inline(always)] + pub fn retain(&mut self, f: F) + where + F: FnMut(&T) -> bool, + { + self.vector.retain(f); + } +} + +#[cfg(all(test, feature = "std"))] +mod tests { + use crate::casper::native::dispatch; + + use super::*; + + #[test] + fn test_sorted_vector() { + dispatch(|| { + let mut sorted_vector = SortedVector::new("sorted_vector"); + + sorted_vector.push(2); + sorted_vector.push(1); + sorted_vector.push(3); + sorted_vector.push(0); + sorted_vector.push(0); + sorted_vector.push(3); + + assert!(sorted_vector.contains(&0)); + assert!(sorted_vector.contains(&2)); + assert!(!sorted_vector.contains(&15)); + + let vec_1: Vec<_> = sorted_vector.iter().collect(); + assert_eq!(vec_1, vec![0, 0, 1, 2, 3, 3]); + + sorted_vector.remove(2); + + let vec_2: Vec<_> = sorted_vector.iter().collect(); + assert_eq!(vec_2, vec![0, 0, 2, 3, 3]); + }) + .unwrap(); + } +} diff --git a/smart_contracts/sdk/src/collections/vector.rs b/smart_contracts/sdk/src/collections/vector.rs new file mode 100644 index 0000000000..f8f17095b9 --- /dev/null +++ b/smart_contracts/sdk/src/collections/vector.rs @@ -0,0 +1,563 @@ +use crate::{ + abi::{CasperABI, Declaration, Definition, Definitions, StructField}, + casper::{self, read_into_vec}, + prelude::{cmp::Ordering, marker::PhantomData}, + serializers::borsh::{BorshDeserialize, BorshSerialize}, +}; + +use casper_executor_wasm_common::keyspace::Keyspace; + +#[derive(BorshSerialize, BorshDeserialize, Debug, Clone)] +#[borsh(crate = "crate::serializers::borsh")] +pub struct Vector { + pub(crate) prefix: String, + pub(crate) length: u64, + pub(crate) _marker: PhantomData, +} + +impl CasperABI for Vector { + fn populate_definitions(_definitions: &mut Definitions) {} + + fn declaration() -> Declaration { + format!("Vector<{}>", T::declaration()) + } + + fn definition() -> Definition { + Definition::Struct { + items: vec![ + StructField { + name: "prefix".into(), + decl: String::declaration(), + }, + StructField { + name: "length".into(), + decl: u64::declaration(), + }, + ], + } + } +} + +impl Vector +where + T: BorshSerialize + BorshDeserialize, +{ + /// Constructs a new, empty [`Vector`]. + /// + /// The vector header will not write itself to the GS, even if + /// values are pushed onto it later. + pub fn new>(prefix: S) -> Self { + Self { + prefix: prefix.into(), + length: 0, + _marker: PhantomData, + } + } + + /// Appends an element to the back of a collection. + pub fn push(&mut self, value: T) { + let prefix_bytes = self.compute_prefix_bytes_for_index(self.length); + let prefix = Keyspace::Context(&prefix_bytes); + casper::write(prefix, &borsh::to_vec(&value).unwrap()).unwrap(); + self.length += 1; + } + + /// Removes the last element from a vector and returns it, or None if it is empty. + pub fn pop(&mut self) -> Option { + if self.is_empty() { + return None; + } + self.swap_remove(self.len() - 1) + } + + /// Returns true if the slice contains an element with the given value. + /// + /// This operation is O(n). + pub fn contains(&self, value: &T) -> bool + where + T: PartialEq, + { + self.iter().any(|v| v == *value) + } + + /// Returns an element at index, deserialized. + pub fn get(&self, index: u64) -> Option { + let prefix = self.compute_prefix_bytes_for_index(index); + let item_keyspace = Keyspace::Context(&prefix); + read_into_vec(item_keyspace) + .unwrap() + .map(|vec| borsh::from_slice(&vec).unwrap()) + } + + /// Returns an iterator over self, with elements deserialized. + pub fn iter(&self) -> impl Iterator + '_ { + (0..self.length).map(move |i| self.get(i).unwrap()) + } + + /// Inserts an element at position `index` within the vector, shifting all elements after it to + /// the right. + pub fn insert(&mut self, index: u64, value: T) { + assert!(index <= self.length, "index out of bounds"); + + // Shift elements to the right + for i in (index..self.length).rev() { + if let Some(src_value) = self.get(i) { + self.write(i + 1, src_value); + } + } + + // Write the new value at the specified index + self.write(index, value); + + self.length += 1; + } + + /// Clears the vector, removing all values from the global state. + /// This is potentially expensive, as it requires an iteration over all elements to remove them + /// from the global state. + pub fn clear(&mut self) { + for i in 0..self.length { + let prefix_bytes = self.compute_prefix_bytes_for_index(i); + let item_keyspace = Keyspace::Context(&prefix_bytes); + casper::remove(item_keyspace).unwrap(); + } + self.length = 0; + } + + /// Returns the number of elements in the vector, also referred to as its ‘length’. + #[inline(always)] + pub fn len(&self) -> u64 { + self.length + } + + /// Returns `true` if the vector contains no elements. + #[inline(always)] + pub fn is_empty(&self) -> bool { + self.length == 0 + } + + /// Binary searches this vector for a given element. If the vector is not sorted, the returned + /// result is unspecified and meaningless. + pub fn binary_search(&self, value: &T) -> Result + where + T: Ord, + { + self.binary_search_by(|v| v.cmp(value)) + } + + /// Binary searches this slice with a comparator function. + /// + /// The comparator function should return an [Ordering] that indicates whether its argument is + /// `Less`, `Equal` or `Greater` the desired target. If the slice is not sorted or if the + /// comparator function does not implement an order consistent with the sort order of the + /// underlying slice, the returned result is unspecified and meaningless. + pub fn binary_search_by(&self, mut f: F) -> Result + where + F: FnMut(&T) -> Ordering, + { + // INVARIANTS: + // - 0 <= left <= left + size = right <= self.len() + // - f returns Less for everything in self[..left] + // - f returns Greater for everything in self[right..] + let mut size = self.len(); + let mut left = 0; + let mut right = size; + while left < right { + let mid = left + size / 2; + + // SAFETY: the while condition means `size` is strictly positive, so + // `size/2 < size`. Thus `left + size/2 < left + size`, which + // coupled with the `left + size <= self.len()` invariant means + // we have `left + size/2 < self.len()`, and this is in-bounds. + let cmp = f(&self.get(mid).unwrap()); + + // This control flow produces conditional moves, which results in + // fewer branches and instructions than if/else or matching on + // cmp::Ordering. + // This is x86 asm for u8: https://rust.godbolt.org/z/698eYffTx. + left = if cmp == Ordering::Less { mid + 1 } else { left }; + right = if cmp == Ordering::Greater { mid } else { right }; + if cmp == Ordering::Equal { + // SAFETY: same as the `get_unchecked` above + assert!(mid < self.len()); + return Ok(mid); + } + + size = right - left; + } + + // SAFETY: directly true from the overall invariant. + // Note that this is `<=`, unlike the assume in the `Ok` path. + assert!(left <= self.len()); + Err(left) + } + + /// Removes the element at the specified index and returns it. + /// + /// Note: Because this shifts over the remaining elements, it has a + /// worst-case performance of O(n). If you don’t need the order of + /// elements to be preserved, use `swap_remove` instead. + pub fn remove(&mut self, index: u64) -> Option { + if index >= self.length { + return None; + } + + let value_to_remove = self.get(index).unwrap(); + + // Shift elements to the left + for i in index..(self.length - 1) { + if let Some(next_value) = self.get(i + 1) { + self.write(i, next_value); + } + } + + // Remove the last element from storage + self.length -= 1; + casper::remove(Keyspace::Context( + &self.compute_prefix_bytes_for_index(self.length), + )) + .unwrap(); + + Some(value_to_remove) + } + + /// Removes the element at the specified index and returns it. + /// + /// The removed element is replaced by the last element of the vector. + /// This does not preserve ordering of the remaining elements, but is O(1). + pub fn swap_remove(&mut self, index: u64) -> Option { + if index >= self.length { + return None; + } + + let value_to_remove = self.get(index).unwrap(); + let last_value = self.get(self.len() - 1).unwrap(); + + if index != self.len() - 1 { + self.write(index, last_value); + } + + self.length -= 1; + casper::remove(Keyspace::Context( + &self.compute_prefix_bytes_for_index(self.length), + )) + .unwrap(); + + Some(value_to_remove) + } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + let mut i = 0; + while i < self.length { + if !f(&self.get(i).unwrap()) { + self.remove(i).unwrap(); + } else { + i += 1; + } + } + } + + #[inline(always)] + fn compute_prefix_bytes_for_index(&self, index: u64) -> Vec { + compute_prefix_bytes_for_index(&self.prefix, index) + } + + fn write(&self, index: u64, value: T) { + let prefix_bytes = self.compute_prefix_bytes_for_index(index); + let prefix = Keyspace::Context(&prefix_bytes); + casper::write(prefix, &borsh::to_vec(&value).unwrap()).unwrap(); + } +} + +fn compute_prefix_bytes_for_index(prefix: &str, index: u64) -> Vec { + let mut prefix_bytes = prefix.as_bytes().to_owned(); + prefix_bytes.extend(&index.to_le_bytes()); + prefix_bytes +} + +#[cfg(all(test, feature = "std"))] +pub(crate) mod tests { + use core::ptr::NonNull; + + use self::casper::native::dispatch; + + use super::*; + + const TEST_VEC_PREFIX: &str = "test_vector"; + type VecU64 = Vector; + + fn get_vec_elements_from_storage(prefix: &str) -> Vec { + let mut values = Vec::new(); + for idx in 0..64 { + let prefix = compute_prefix_bytes_for_index(prefix, idx); + let mut value: [u8; 8] = [0; 8]; + let result = casper::read(Keyspace::Context(&prefix), |size| { + assert_eq!(size, 8); + NonNull::new(value.as_mut_ptr()) + }) + .unwrap(); + + if result.is_some() { + values.push(u64::from_le_bytes(value)); + } + } + values + } + + #[test] + fn should_not_panic_with_empty_vec() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + assert_eq!(vec.len(), 0); + assert_eq!(vec.remove(0), None); + vec.retain(|_| false); + let _ = vec.binary_search(&123); + assert_eq!( + get_vec_elements_from_storage(TEST_VEC_PREFIX), + Vec::::new() + ); + }) + .unwrap(); + } + + #[test] + fn should_retain() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + + vec.push(1); + vec.push(2); + vec.push(3); + vec.push(4); + vec.push(5); + + vec.retain(|v| *v % 2 == 0); + + let vec: Vec<_> = vec.iter().collect(); + assert_eq!(vec, vec![2, 4]); + + assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![2, 4]); + }) + .unwrap(); + } + + #[test] + fn test_vec() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + + assert!(vec.get(0).is_none()); + vec.push(111); + assert_eq!(vec.get(0), Some(111)); + vec.push(222); + assert_eq!(vec.get(1), Some(222)); + + vec.insert(0, 42); + vec.insert(0, 41); + vec.insert(1, 43); + vec.insert(5, 333); + vec.insert(5, 334); + assert_eq!(vec.remove(5), Some(334)); + assert_eq!(vec.remove(55), None); + + let mut iter = vec.iter(); + assert_eq!(iter.next(), Some(41)); + assert_eq!(iter.next(), Some(43)); + assert_eq!(iter.next(), Some(42)); + assert_eq!(iter.next(), Some(111)); + assert_eq!(iter.next(), Some(222)); + assert_eq!(iter.next(), Some(333)); + assert_eq!(iter.next(), None); + + { + let ser = borsh::to_vec(&vec).unwrap(); + let deser: Vector = borsh::from_slice(&ser).unwrap(); + let mut iter = deser.iter(); + assert_eq!(iter.next(), Some(41)); + assert_eq!(iter.next(), Some(43)); + assert_eq!(iter.next(), Some(42)); + assert_eq!(iter.next(), Some(111)); + assert_eq!(iter.next(), Some(222)); + assert_eq!(iter.next(), Some(333)); + assert_eq!(iter.next(), None); + } + + assert_eq!( + get_vec_elements_from_storage(TEST_VEC_PREFIX), + vec![41, 43, 42, 111, 222, 333] + ); + + let vec2 = VecU64::new("test1"); + assert_eq!(vec2.get(0), None); + + assert_eq!(get_vec_elements_from_storage("test1"), Vec::::new()); + }) + .unwrap(); + } + + #[test] + fn test_pop() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + assert_eq!(vec.pop(), None); + vec.push(1); + vec.push(2); + assert_eq!(vec.pop(), Some(2)); + assert_eq!(vec.len(), 1); + assert_eq!(vec.pop(), Some(1)); + assert!(vec.is_empty()); + + assert_eq!( + get_vec_elements_from_storage(TEST_VEC_PREFIX), + Vec::::new() + ); + }) + .unwrap(); + } + + #[test] + fn test_contains() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.push(1); + vec.push(2); + assert!(vec.contains(&1)); + assert!(vec.contains(&2)); + assert!(!vec.contains(&3)); + vec.remove(0); + assert!(!vec.contains(&1)); + assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![2]); + }) + .unwrap(); + } + + #[test] + fn test_clear() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.push(1); + vec.push(2); + vec.clear(); + assert_eq!(vec.len(), 0); + assert!(vec.is_empty()); + assert_eq!(vec.get(0), None); + vec.push(3); + assert_eq!(vec.get(0), Some(3)); + + assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![3]); + }) + .unwrap(); + } + + #[test] + fn test_binary_search() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.push(1); + vec.push(2); + vec.push(3); + vec.push(4); + vec.push(5); + assert_eq!(vec.binary_search(&3), Ok(2)); + assert_eq!(vec.binary_search(&0), Err(0)); + assert_eq!(vec.binary_search(&6), Err(5)); + }) + .unwrap(); + } + + #[test] + fn test_swap_remove() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.push(1); + vec.push(2); + vec.push(3); + vec.push(4); + assert_eq!(vec.swap_remove(1), Some(2)); + assert_eq!(vec.iter().collect::>(), vec![1, 4, 3]); + assert_eq!(vec.swap_remove(2), Some(3)); + assert_eq!(vec.iter().collect::>(), vec![1, 4]); + + assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![1, 4]); + }) + .unwrap(); + } + + #[test] + fn test_insert_at_len() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.push(1); + vec.insert(1, 2); + assert_eq!(vec.iter().collect::>(), vec![1, 2]); + assert_eq!(get_vec_elements_from_storage(TEST_VEC_PREFIX), vec![1, 2]); + }) + .unwrap(); + } + + #[test] + fn test_struct_elements() { + #[derive(BorshSerialize, BorshDeserialize, PartialEq, Debug)] + struct TestStruct { + field: u64, + } + + dispatch(|| { + let mut vec = Vector::new(TEST_VEC_PREFIX); + vec.push(TestStruct { field: 1 }); + vec.push(TestStruct { field: 2 }); + assert_eq!(vec.get(1), Some(TestStruct { field: 2 })); + }) + .unwrap(); + } + + #[test] + fn test_multiple_operations() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + assert!(vec.is_empty()); + vec.push(1); + vec.insert(0, 2); + vec.push(3); + assert_eq!(vec.iter().collect::>(), vec![2, 1, 3]); + assert_eq!(vec.swap_remove(0), Some(2)); + assert_eq!(vec.iter().collect::>(), vec![3, 1]); + assert_eq!(vec.pop(), Some(1)); + assert_eq!(vec.get(0), Some(3)); + vec.clear(); + assert!(vec.is_empty()); + + assert_eq!( + get_vec_elements_from_storage(TEST_VEC_PREFIX), + Vec::::new() + ); + }) + .unwrap(); + } + + #[test] + fn test_remove_invalid_index() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.push(1); + assert_eq!(vec.remove(1), None); + assert_eq!(vec.remove(0), Some(1)); + assert_eq!(vec.remove(0), None); + }) + .unwrap(); + } + + #[test] + #[should_panic(expected = "index out of bounds")] + fn test_insert_out_of_bounds() { + dispatch(|| { + let mut vec = VecU64::new(TEST_VEC_PREFIX); + vec.insert(1, 1); + }) + .unwrap(); + } +} diff --git a/smart_contracts/sdk/src/contrib.rs b/smart_contracts/sdk/src/contrib.rs new file mode 100644 index 0000000000..b328eaf3a5 --- /dev/null +++ b/smart_contracts/sdk/src/contrib.rs @@ -0,0 +1,4 @@ +pub mod access_control; +pub mod cep18; +pub mod ownable; +pub mod pausable; diff --git a/smart_contracts/sdk/src/contrib/access_control.rs b/smart_contracts/sdk/src/contrib/access_control.rs new file mode 100644 index 0000000000..f229649dc6 --- /dev/null +++ b/smart_contracts/sdk/src/contrib/access_control.rs @@ -0,0 +1,138 @@ +#[allow(unused_imports)] +use crate as casper_contract_sdk; // Workaround for absolute crate path in derive CasperABI macro + +use casper_contract_macros::casper; + +use crate::{ + casper::{self, Entity}, + collections::{sorted_vector::SortedVector, Map}, +}; + +/// A role is a unique identifier for a specific permission or set of permissions. +/// +/// You can use `blake2b256` macro to generate a unique identifier for a role at compile time. +pub type Role = [u8; 32]; + +/// A role is a unique identifier for a specific permission or set of permissions. +const ROLES_PREFIX: &str = "roles"; + +/// The state of the access control contract, which contains a mapping of entities to their roles. +#[casper(path = "crate")] +pub struct AccessControlState { + roles: Map>, +} + +impl AccessControlState { + /// Creates a new instance of `AccessControlState`. + pub fn new() -> Self { + Self { + roles: Map::new(ROLES_PREFIX), + } + } +} + +impl Default for AccessControlState { + fn default() -> Self { + Self::new() + } +} + +/// Represents the possible errors that can occur during access control operations. +#[casper(path = "crate")] +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum AccessControlError { + /// The caller is not authorized to perform the action. + NotAuthorized, +} + +/// The AccessControl trait provides a simple role-based access control mechanism. +/// It allows for multiple roles to be assigned to an account, and provides functions to check, +/// grant, and revoke roles. +/// It also provides functions to check if the caller has a specific role or any of a set of roles. +/// +/// The roles are stored in a `Map` where the key is the account address and the value is a +/// `SortedVector` of roles. +/// +/// None of these methods are turned into smart contract entry points, so they are not exposed +/// accidentally. +/// +/// The `AccessControl` trait is designed to be used with the `casper` macro, which generates +/// the necessary boilerplate code for the contract. +#[casper(path = "crate", export = true)] +pub trait AccessControl { + /// The state of the contract, which contains the roles. + #[casper(private)] + fn state(&self) -> &AccessControlState; + /// The mutable state of the contract, which allows modifying the roles. + #[casper(private)] + fn state_mut(&mut self) -> &mut AccessControlState; + + /// Checks if the given account has the specified role. + #[casper(private)] + fn has_role(&self, entity: Entity, role: Role) -> bool { + match self.state().roles.get(&entity) { + Some(roles) => roles.contains(&role), + None => false, + } + } + + #[casper(private)] + fn has_any_role(&self, entity: Entity, roles: &[Role]) -> bool { + match self.state().roles.get(&entity) { + Some(roles_vec) => roles_vec.iter().any(|r| roles.contains(&r)), + None => false, + } + } + + /// Grants a role to an account. If the account already has the role, it does nothing. + #[casper(private)] + fn grant_role(&mut self, entity: Entity, role: Role) { + match self.state_mut().roles.get(&entity) { + Some(mut roles) => { + if roles.contains(&role) { + return; + } + roles.push(role); + } + None => { + let mut roles = SortedVector::new(format!( + "{ROLES_PREFIX}-{:02x}{}", + entity.tag(), + base16::encode_lower(&entity.address()) + )); + roles.push(role); + self.state_mut().roles.insert(&entity, &roles); + } + } + } + + /// Revokes a role from an account. If the account does not have the role, it does nothing. + #[casper(private)] + fn revoke_role(&mut self, entity: Entity, role: Role) { + if let Some(mut roles) = self.state_mut().roles.get(&entity) { + roles.retain(|r| r != &role); + } + } + + /// Checks if the caller has the specified role and reverts if not. + #[casper(private)] + fn require_role(&self, role: Role) -> Result<(), AccessControlError> { + let caller = casper::get_caller(); + if !self.has_role(caller, role) { + // Caller does not have specified role. + return Err(AccessControlError::NotAuthorized); + } + Ok(()) + } + + /// Checks if the caller has any of the specified roles and reverts if not. + #[casper(private)] + fn require_any_role(&self, roles: &[Role]) -> Result<(), AccessControlError> { + let caller = casper::get_caller(); + if !self.has_any_role(caller, roles) { + // Caller does not have any of the specified roles. + return Err(AccessControlError::NotAuthorized); + } + Ok(()) + } +} diff --git a/smart_contracts/sdk/src/contrib/cep18.rs b/smart_contracts/sdk/src/contrib/cep18.rs new file mode 100644 index 0000000000..0abf18661b --- /dev/null +++ b/smart_contracts/sdk/src/contrib/cep18.rs @@ -0,0 +1,387 @@ +//! CEP-18 token standard. +//! +//! This module implements the CEP-18 token standard, which is a fungible token standard +//! for the Casper blockchain. It provides a set of functions and traits for creating, transferring, +//! and managing fungible tokens. +//! +//! The CEP-18 standard is designed to be simple and efficient, allowing developers to easily +//! create and manage fungible tokens on the Casper blockchain. It includes support for +//! minting, burning, and transferring tokens, as well as managing allowances and balances. +//! +//! The standard also includes support for events, allowing developers to emit events +//! when tokens are transferred, minted, or burned. This allows for easy tracking +//! and monitoring of token activity on the blockchain. +//! +//! It only requires implementation of `CEP18` trait for your contract to receive already +//! implemented entry points. +//! +//! # Example CEP18 token contract +//! +//! ```rust +//! use casper_contract_sdk::prelude::*; +//! use casper_contract_sdk::contrib::cep18::{CEP18, CEP18State, CEP18Ext, Mintable, Burnable}; +//! # use casper_contract_sdk::collections::Map; +//! # use casper_contract_sdk::macros::casper; +//! # use casper_contract_sdk::types::U256; +//! +//! #[casper(contract_state)] +//! struct MyToken { +//! state: CEP18State, +//! } +//! +//! impl Default for MyToken { +//! fn default() -> Self { +//! Self { +//! state: CEP18State::new("MyToken", "MTK", 18, U256::from(10_000_000_000u64)), +//! } +//! } +//! } +//! +//! #[casper] +//! impl MyToken { +//! #[casper(constructor)] +//! pub fn new() -> Self { +//! let my_token = Self::default(); +//! // Perform extra initialization if needed i.e. mint tokens, set genesis balance holders etc. +//! my_token +//! } +//! } +//! +//! #[casper(path = casper_contract_sdk::contrib::cep18)] +//! impl CEP18 for MyToken { +//! fn state(&self) -> &CEP18State { +//! &self.state +//! } +//! +//! fn state_mut(&mut self) -> &mut CEP18State { +//! &mut self.state +//! } +//! } +//! ``` +use bnum::types::U256; +use borsh::{BorshDeserialize, BorshSerialize}; +use casper_contract_macros::CasperABI; + +use super::access_control::{AccessControl, AccessControlError, Role}; +#[allow(unused_imports)] +use crate as casper_contract_sdk; +use crate::{collections::Map, macros::blake2b256, prelude::*}; + +/// While the code consuming this contract needs to define further error variants, it can +/// return those via the `Error::User` variant or equivalently via the `ApiError::User` +/// variant. +#[derive(Debug, PartialEq, Eq, CasperABI, BorshSerialize, BorshDeserialize)] +#[casper] +pub enum Cep18Error { + /// CEP-18 contract called from within an invalid context. + InvalidContext, + /// Spender does not have enough balance. + InsufficientBalance, + /// Spender does not have enough allowance approved. + InsufficientAllowance, + /// Operation would cause an integer overflow. + Overflow, + /// A required package hash was not specified. + PackageHashMissing, + /// The package hash specified does not represent a package. + PackageHashNotPackage, + /// An invalid event mode was specified. + InvalidEventsMode, + /// The event mode required was not specified. + MissingEventsMode, + /// An unknown error occurred. + Phantom, + /// Failed to read the runtime arguments provided. + FailedToGetArgBytes, + /// The caller does not have sufficient security access. + InsufficientRights, + /// The list of Admin accounts provided is invalid. + InvalidAdminList, + /// The list of accounts that can mint tokens is invalid. + InvalidMinterList, + /// The list of accounts with no access rights is invalid. + InvalidNoneList, + /// The flag to enable the mint and burn mode is invalid. + InvalidEnableMBFlag, + /// This contract instance cannot be initialized again. + AlreadyInitialized, + /// The mint and burn mode is disabled. + MintBurnDisabled, + CannotTargetSelfUser, + InvalidBurnTarget, +} + +impl From for Cep18Error { + fn from(error: AccessControlError) -> Self { + match error { + AccessControlError::NotAuthorized => Cep18Error::InsufficientRights, + } + } +} + +#[casper(message, path = crate)] +pub struct Transfer { + pub from: Option, + pub to: Entity, + pub amount: U256, +} + +#[casper(message, path = crate)] +pub struct Approve { + pub owner: Entity, + pub spender: Entity, + pub amount: U256, +} + +pub const ADMIN_ROLE: Role = blake2b256!("admin"); +pub const MINTER_ROLE: Role = blake2b256!("minter"); + +#[casper(path = crate)] +pub struct CEP18State { + pub name: String, + pub symbol: String, + pub decimals: u8, + pub total_supply: U256, + pub balances: Map, + pub allowances: Map<(Entity, Entity), U256>, + pub enable_mint_burn: bool, +} + +impl CEP18State { + fn transfer_balance( + &mut self, + sender: &Entity, + recipient: &Entity, + amount: U256, + ) -> Result<(), Cep18Error> { + if amount.is_zero() { + return Ok(()); + } + + let sender_balance = self.balances.get(sender).unwrap_or_default(); + + let new_sender_balance = sender_balance + .checked_sub(amount) + .ok_or(Cep18Error::InsufficientBalance)?; + + let recipient_balance = self.balances.get(recipient).unwrap_or_default(); + + let new_recipient_balance = recipient_balance + .checked_add(amount) + .ok_or(Cep18Error::Overflow)?; + + self.balances.insert(sender, &new_sender_balance); + self.balances.insert(recipient, &new_recipient_balance); + Ok(()) + } +} + +impl CEP18State { + pub fn new(name: &str, symbol: &str, decimals: u8, total_supply: U256) -> CEP18State { + CEP18State { + name: name.to_string(), + symbol: symbol.to_string(), + decimals, + total_supply, + balances: Map::new("balances"), + allowances: Map::new("allowances"), + enable_mint_burn: false, + } + } +} + +#[casper(path = crate, export = true)] +pub trait CEP18 { + #[casper(private)] + fn state(&self) -> &CEP18State; + + #[casper(private)] + fn state_mut(&mut self) -> &mut CEP18State; + + fn name(&self) -> &str { + &self.state().name + } + + fn symbol(&self) -> &str { + &self.state().symbol + } + + fn decimals(&self) -> u8 { + self.state().decimals + } + + fn total_supply(&self) -> U256 { + self.state().total_supply + } + + fn balance_of(&self, address: Entity) -> U256 { + self.state().balances.get(&address).unwrap_or_default() + } + + fn allowance(&self, spender: Entity, owner: Entity) { + self.state() + .allowances + .get(&(spender, owner)) + .unwrap_or_default(); + } + + #[casper(revert_on_error)] + fn approve(&mut self, spender: Entity, amount: U256) -> Result<(), Cep18Error> { + let owner = casper::get_caller(); + if owner == spender { + return Err(Cep18Error::CannotTargetSelfUser); + } + let lookup_key = (owner, spender); + self.state_mut().allowances.insert(&lookup_key, &amount); + casper::emit(Approve { + owner, + spender, + amount, + }) + .expect("failed to emit message"); + Ok(()) + } + + #[casper(revert_on_error)] + fn decrease_allowance(&mut self, spender: Entity, amount: U256) -> Result<(), Cep18Error> { + let owner = casper::get_caller(); + if owner == spender { + return Err(Cep18Error::CannotTargetSelfUser); + } + let lookup_key = (owner, spender); + let allowance = self.state().allowances.get(&lookup_key).unwrap_or_default(); + let allowance = allowance.saturating_sub(amount); + self.state_mut().allowances.insert(&lookup_key, &allowance); + Ok(()) + } + + #[casper(revert_on_error)] + fn increase_allowance(&mut self, spender: Entity, amount: U256) -> Result<(), Cep18Error> { + let owner = casper::get_caller(); + if owner == spender { + return Err(Cep18Error::CannotTargetSelfUser); + } + let lookup_key = (owner, spender); + let allowance = self.state().allowances.get(&lookup_key).unwrap_or_default(); + let allowance = allowance.saturating_add(amount); + self.state_mut().allowances.insert(&lookup_key, &allowance); + Ok(()) + } + + #[casper(revert_on_error)] + fn transfer(&mut self, recipient: Entity, amount: U256) -> Result<(), Cep18Error> { + let sender = casper::get_caller(); + if sender == recipient { + return Err(Cep18Error::CannotTargetSelfUser); + } + self.state_mut() + .transfer_balance(&sender, &recipient, amount)?; + + // NOTE: This is operation is fallible, although it's not expected to fail under any + // circumstances (number of topics per contract, payload size, topic size, number of + // messages etc. are all under control). + casper::emit(Transfer { + from: Some(sender), + to: recipient, + amount, + }) + .expect("failed to emit message"); + + Ok(()) + } + + #[casper(revert_on_error)] + fn transfer_from( + &mut self, + owner: Entity, + recipient: Entity, + amount: U256, + ) -> Result<(), Cep18Error> { + let spender = casper::get_caller(); + if owner == recipient { + return Err(Cep18Error::CannotTargetSelfUser); + } + + if amount.is_zero() { + return Ok(()); + } + + let spender_allowance = self + .state() + .allowances + .get(&(owner, spender)) + .unwrap_or_default(); + let new_spender_allowance = spender_allowance + .checked_sub(amount) + .ok_or(Cep18Error::InsufficientAllowance)?; + + self.state_mut() + .transfer_balance(&owner, &recipient, amount)?; + + self.state_mut() + .allowances + .insert(&(owner, spender), &new_spender_allowance); + + casper::emit(Transfer { + from: Some(owner), + to: recipient, + amount, + }) + .expect("failed to emit message"); + + Ok(()) + } +} + +#[casper(path = crate, export = true)] +pub trait Mintable: CEP18 + AccessControl { + #[casper(revert_on_error)] + fn mint(&mut self, owner: Entity, amount: U256) -> Result<(), Cep18Error> { + if !CEP18::state(self).enable_mint_burn { + return Err(Cep18Error::MintBurnDisabled); + } + + AccessControl::require_any_role(self, &[ADMIN_ROLE, MINTER_ROLE])?; + + let balance = CEP18::state(self).balances.get(&owner).unwrap_or_default(); + let new_balance = balance.checked_add(amount).ok_or(Cep18Error::Overflow)?; + CEP18::state_mut(self).balances.insert(&owner, &new_balance); + CEP18::state_mut(self).total_supply = CEP18::state(self) + .total_supply + .checked_add(amount) + .ok_or(Cep18Error::Overflow)?; + + casper::emit(Transfer { + from: None, + to: owner, + amount, + }) + .expect("failed to emit message"); + + Ok(()) + } +} + +#[casper(path = crate, export = true)] +pub trait Burnable: CEP18 { + #[casper(revert_on_error)] + fn burn(&mut self, owner: Entity, amount: U256) -> Result<(), Cep18Error> { + if !self.state().enable_mint_burn { + return Err(Cep18Error::MintBurnDisabled); + } + + if owner != casper::get_caller() { + return Err(Cep18Error::InvalidBurnTarget); + } + + let balance = self.state().balances.get(&owner).unwrap_or_default(); + let new_balance = balance.checked_add(amount).ok_or(Cep18Error::Overflow)?; + self.state_mut().balances.insert(&owner, &new_balance); + self.state_mut().total_supply = self + .state() + .total_supply + .checked_sub(amount) + .ok_or(Cep18Error::Overflow)?; + Ok(()) + } +} diff --git a/smart_contracts/sdk/src/contrib/ownable.rs b/smart_contracts/sdk/src/contrib/ownable.rs new file mode 100644 index 0000000000..da66d3caeb --- /dev/null +++ b/smart_contracts/sdk/src/contrib/ownable.rs @@ -0,0 +1,88 @@ +//! This module provides an implementation of the Ownable pattern for smart contracts. +//! +//! The Ownable pattern is a common design pattern in smart contracts that allows for +//! a single owner to control the contract. This module provides a simple implementation +//! of this pattern, allowing for ownership to be transferred or renounced. +use borsh::{BorshDeserialize, BorshSerialize}; +use casper_contract_macros::CasperABI; + +#[allow(unused_imports)] +use crate as casper_contract_sdk; +use crate::{casper::Entity, macros::casper}; + +/// The state of the Ownable contract, which contains the owner of the contract. +#[casper(path = crate)] +pub struct OwnableState { + owner: Option, +} + +impl Default for OwnableState { + fn default() -> Self { + Self { + owner: Some(crate::casper::get_caller()), + } + } +} + +/// Represents the possible errors that can occur during ownership operations. +#[derive(CasperABI, BorshSerialize, BorshDeserialize)] +#[casper(path = crate)] +pub enum OwnableError { + /// The caller is not authorized to perform the action. + NotAuthorized, +} + +/// The Ownable trait provides a simple ownership model for smart contracts. +/// It allows for a single owner to be set, and provides functions to transfer or renounce +/// ownership. +#[casper(path = crate, export = true)] +pub trait Ownable { + #[casper(private)] + fn state(&self) -> &OwnableState; + #[casper(private)] + fn state_mut(&mut self) -> &mut OwnableState; + + /// Checks if the caller is the owner of the contract. + /// + /// This function is used to restrict access to certain functions to only the owner. + #[casper(private)] + fn only_owner(&self) -> Result<(), OwnableError> { + let caller = crate::casper::get_caller(); + match self.state().owner { + Some(owner) if caller != owner => { + return Err(OwnableError::NotAuthorized); + } + None => { + return Err(OwnableError::NotAuthorized); + } + Some(_owner) => {} + } + Ok(()) + } + + /// Transfers ownership of the contract to a new owner. + #[casper(revert_on_error)] + fn transfer_ownership(&mut self, new_owner: Entity) -> Result<(), OwnableError> { + self.only_owner()?; + self.state_mut().owner = Some(new_owner); + Ok(()) + } + + /// Returns the current owner of the contract. + fn owner(&self) -> Option { + self.state().owner + } + + /// Renounces ownership of the contract, making it no longer owned by any entity. + /// + /// This function can only be called by the current owner of the contract + /// once the contract is deployed. After calling this function, the contract + /// will no longer have an owner, and no entity will be able to call + /// functions that require ownership. + #[casper(revert_on_error)] + fn renounce_ownership(&mut self) -> Result<(), OwnableError> { + self.only_owner()?; + self.state_mut().owner = None; + Ok(()) + } +} diff --git a/smart_contracts/sdk/src/contrib/pausable.rs b/smart_contracts/sdk/src/contrib/pausable.rs new file mode 100644 index 0000000000..7e7e955bbc --- /dev/null +++ b/smart_contracts/sdk/src/contrib/pausable.rs @@ -0,0 +1,93 @@ +//! This module provides a trait for pausable contracts. +//! +//! The `Pausable` trait allows contracts to be paused and unpaused, which can be useful +//! in scenarios where the contract needs to be temporarily disabled for maintenance or +//! security reasons. The trait provides methods to check the current pause state, as well +//! as to pause and unpause the contract. +//! +//! The `Pausable` trait is designed to be used with the `casper` macro, which generates +//! the necessary boilerplate code for the contract. +//! +//! For security reasons you may want to combine `AccessControl` or `Ownable` with +//! this trait to ensure that only selected entities can manage the pause state. +use crate::{self as casper_contract_sdk, casper, casper::Entity, macros::casper}; + +#[casper] +pub struct PausedState { + paused: bool, +} + +#[casper(path = crate)] +pub enum PausableError { + EnforcedPause, + ExpectedPause, +} + +/// The `Paused` event is emitted when the contract is paused. +#[casper(message, path = crate)] +pub struct Paused { + entity: Entity, +} + +/// The `Unpaused` event is emitted when the contract is unpaused. +#[casper(message, path = crate)] +pub struct Unpaused { + entity: Entity, +} + +/// Pausable is a trait that provides a simple way to pause and unpause a contract. +#[casper(path = crate, export = true)] +pub trait Pausable { + /// The state of the contract, which contains the paused state. + #[casper(private)] + fn state(&self) -> &PausedState; + /// The mutable state of the contract, which allows modifying the paused state. + #[casper(private)] + fn state_mut(&mut self) -> &mut PausedState; + + /// Checks if the contract is paused. + #[casper(private)] + fn paused(&self) -> bool { + self.state().paused + } + + #[casper(private)] + fn pause(&mut self) -> Result<(), PausableError> { + self.enforce_unpaused()?; + self.state_mut().paused = true; + casper::emit(Paused { + entity: casper::get_caller(), + }) + .expect("Emit"); + Ok(()) + } + + #[casper(private)] + fn unpause(&mut self) -> Result<(), PausableError> { + self.enforce_paused()?; + self.state_mut().paused = false; + casper::emit(Unpaused { + entity: casper::get_caller(), + }) + .expect("Emit"); + Ok(()) + } + + #[casper(private)] + fn enforce_paused(&self) -> Result<(), PausableError> { + if self.paused() { + Ok(()) + } else { + Err(PausableError::ExpectedPause) + } + } + + #[casper(private)] + fn enforce_unpaused(&self) -> Result<(), PausableError> { + if !self.paused() { + Ok(()) + } else { + Err(PausableError::EnforcedPause) + } + } +} diff --git a/smart_contracts/sdk/src/lib.rs b/smart_contracts/sdk/src/lib.rs new file mode 100644 index 0000000000..63a1046421 --- /dev/null +++ b/smart_contracts/sdk/src/lib.rs @@ -0,0 +1,367 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +#[macro_use] +extern crate alloc; + +pub mod abi; +pub mod prelude; +pub mod serializers; +#[cfg(not(target_arch = "wasm32"))] +pub use linkme; + +#[cfg(not(target_arch = "wasm32"))] +pub mod abi_generator; +pub mod casper; +pub mod collections; +pub mod contrib; +#[cfg(feature = "std")] +pub mod schema; +pub mod types; + +use crate::prelude::{marker::PhantomData, ptr::NonNull}; + +use crate::serializers::borsh::{BorshDeserialize, BorshSerialize}; +use casper::{CallResult, Entity}; +pub use casper_contract_macros as macros; +pub use casper_contract_sdk_sys as sys; +pub use casper_executor_wasm_common; +use types::{Address, CallError}; + +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + #[inline] + pub fn set_panic_hook() { + static SET_HOOK: std::sync::Once = std::sync::Once::new(); + SET_HOOK.call_once(|| { + std::panic::set_hook(Box::new(|panic_info| { + let msg = panic_info.to_string(); + casper::print(&msg); + })); + }); + } + } + else { + pub fn set_panic_hook() { + // TODO: What to do? + } + } +} + +pub fn reserve_vec_space(vec: &mut Vec, size: usize) -> Option> { + if size == 0 { + None + } else { + *vec = Vec::with_capacity(size); + unsafe { + vec.set_len(size); + } + NonNull::new(vec.as_mut_ptr()) + } +} + +pub trait ContractRef { + fn new() -> Self; +} + +pub trait ToCallData { + type Return<'a>; + + fn entry_point(&self) -> &str; + + fn input_data(&self) -> Option>; +} + +/// To derive this contract you have to use `#[casper]` macro on top of impl block. +/// +/// This proc macro handles generation of a manifest. +pub trait Contract { + type Ref: ContractRef; + + fn name() -> &'static str; + fn create( + value: u64, + call_data: T, + ) -> Result, CallError>; + fn default_create() -> Result, CallError>; + fn upgrade(code: Option<&[u8]>, call_data: T) -> Result<(), CallError>; +} + +#[derive(Debug)] +pub enum Access { + Private, + Public, +} + +// A println! like macro that calls `host::print` function. +#[cfg(target_arch = "wasm32")] +#[macro_export] +macro_rules! log { + ($($arg:tt)*) => ({ + $crate::prelude::casper::print(&$crate::prelude::format!($($arg)*)); + }) +} + +#[cfg(not(target_arch = "wasm32"))] +#[macro_export] +macro_rules! log { + ($($arg:tt)*) => ({ + eprintln!("📝 {}", &$crate::prelude::format!($($arg)*)); + }) +} + +#[macro_export] +macro_rules! revert { + () => {{ + $crate::casper::ret( + $crate::casper_executor_wasm_common::flags::ReturnFlags::REVERT, + None, + ); + unreachable!() + }}; + ($arg:expr) => {{ + let value = $arg; + let data = + $crate::serializers::borsh::to_vec(&value).expect("Revert value should serialize"); + $crate::casper::ret( + $crate::casper_executor_wasm_common::flags::ReturnFlags::REVERT, + Some(data.as_slice()), + ); + #[allow(unreachable_code)] + value + }}; +} + +pub trait UnwrapOrRevert { + /// Unwraps the value into its inner type or calls [`crate::casper::ret`] with a + /// predetermined error code on failure. + fn unwrap_or_revert(self) -> T; +} + +impl UnwrapOrRevert for Result +where + E: BorshSerialize, +{ + fn unwrap_or_revert(self) -> T { + self.unwrap_or_else(|error| { + let error_data = borsh::to_vec(&error).expect("Revert value should serialize"); + casper::ret( + casper_executor_wasm_common::flags::ReturnFlags::REVERT, + Some(error_data.as_slice()), + ); + unreachable!("Support for unwrap_or_revert") + }) + } +} + +#[derive(Debug)] +pub struct ContractHandle { + contract_address: Address, + marker: PhantomData, +} + +impl ContractHandle { + #[must_use] + pub const fn from_address(contract_address: Address) -> Self { + ContractHandle { + contract_address, + marker: PhantomData, + } + } + + pub fn build_call(&self) -> CallBuilder { + CallBuilder { + address: self.contract_address, + marker: PhantomData, + transferred_value: None, + } + } + + /// A shorthand form to call contracts with default settings. + #[inline] + pub fn call<'a, CallData: ToCallData>( + &self, + func: impl FnOnce(T) -> CallData, + ) -> Result, CallError> + where + CallData::Return<'a>: BorshDeserialize, + { + self.build_call().call(func) + } + + /// A shorthand form to call contracts with default settings. + #[inline] + pub fn try_call( + &self, + func: impl FnOnce(T) -> CallData, + ) -> Result, CallError> { + self.build_call().try_call(func) + } + + #[must_use] + pub fn contract_address(&self) -> Address { + self.contract_address + } + + #[must_use] + pub fn entity(&self) -> Entity { + Entity::Contract(self.contract_address) + } + + /// Returns the balance of the contract. + #[must_use] + pub fn balance(&self) -> u64 { + casper::get_balance_of(&Entity::Contract(self.contract_address)) + } +} + +pub struct CallBuilder { + address: Address, + transferred_value: Option, + marker: PhantomData, +} + +impl CallBuilder { + #[must_use] + pub fn new(address: Address) -> Self { + CallBuilder { + address, + transferred_value: None, + marker: PhantomData, + } + } + + #[must_use] + pub fn with_transferred_value(mut self, transferred_value: u64) -> Self { + self.transferred_value = Some(transferred_value); + self + } + + /// Casts the call builder to a different contract reference. + #[must_use] + pub fn cast(self) -> CallBuilder { + CallBuilder { + address: self.address, + transferred_value: self.transferred_value, + marker: PhantomData, + } + } + + pub fn try_call( + &self, + func: impl FnOnce(T) -> CallData, + ) -> Result, CallError> { + let inst = T::new(); + let call_data = func(inst); + casper::call( + &self.address, + self.transferred_value.unwrap_or(0), + call_data, + ) + } + + pub fn call<'a, CallData: ToCallData>( + &self, + func: impl FnOnce(T) -> CallData, + ) -> Result, CallError> + where + CallData::Return<'a>: BorshDeserialize, + { + let inst = T::new(); + let call_data = func(inst); + let call_result = casper::call( + &self.address, + self.transferred_value.unwrap_or(0), + call_data, + )?; + call_result.into_result() + } +} + +pub struct ContractBuilder<'a, T: ContractRef> { + transferred_value: Option, + code: Option<&'a [u8]>, + seed: Option<&'a [u8; 32]>, + marker: PhantomData, +} + +impl Default for ContractBuilder<'_, T> { + fn default() -> Self { + Self::new() + } +} + +impl<'a, T: ContractRef> ContractBuilder<'a, T> { + #[must_use] + pub fn new() -> Self { + ContractBuilder { + transferred_value: None, + code: None, + seed: None, + marker: PhantomData, + } + } + + #[must_use] + pub fn with_transferred_value(mut self, transferred_value: u64) -> Self { + self.transferred_value = Some(transferred_value); + self + } + + #[must_use] + pub fn with_code(mut self, code: &'a [u8]) -> Self { + self.code = Some(code); + self + } + + #[must_use] + pub fn with_seed(mut self, seed: &'a [u8; 32]) -> Self { + self.seed = Some(seed); + self + } + + pub fn create( + &self, + func: impl FnOnce() -> CallData, + ) -> Result, CallError> + where + CallData::Return<'a>: BorshDeserialize, + { + let value = self.transferred_value.unwrap_or(0); + let call_data = func(); + let input_data = call_data.input_data(); + let seed = self.seed; + let create_result = casper::create( + self.code, + value, + Some(call_data.entry_point()), + input_data.as_deref(), + seed, + )?; + Ok(ContractHandle::from_address(create_result.contract_address)) + } + + pub fn default_create(&self) -> Result, CallError> { + if self.transferred_value.is_some() { + panic!("Value should not be set for default create"); + } + + let value = self.transferred_value.unwrap_or(0); + let seed = self.seed; + let create_result = casper::create(self.code, value, None, None, seed)?; + Ok(ContractHandle::from_address(create_result.contract_address)) + } +} + +/// Trait for converting a message data to a string. +pub trait Message: BorshSerialize { + const TOPIC: &'static str; + /// Converts the message data to a string. + fn payload(&self) -> Vec; +} + +#[cfg(test)] +mod tests { + #[test] + fn test_call_builder() {} +} diff --git a/smart_contracts/sdk/src/prelude.rs b/smart_contracts/sdk/src/prelude.rs new file mode 100644 index 0000000000..9194022762 --- /dev/null +++ b/smart_contracts/sdk/src/prelude.rs @@ -0,0 +1,66 @@ +use cfg_if::cfg_if; + +cfg_if! { + if #[cfg(feature = "std")] { + pub use ::std::{format, borrow, string, vec, boxed, fmt, str, marker, ffi, ptr, mem, cmp}; + + pub mod collections { + pub use ::std::collections::btree_map::{self, BTreeMap}; + pub use ::std::collections::{linked_list::{self, LinkedList}}; + pub use ::std::collections::{hash_map::{self, HashMap}}; + pub use ::std::collections::{btree_set::{self, BTreeSet}}; + } + } + else { + pub use ::alloc::{format, borrow, string, vec, boxed, fmt, str}; + + pub use ::core::{marker, ffi, ptr, mem, cmp}; + + pub mod collections { + pub use ::alloc::collections::btree_map::{self, BTreeMap}; + pub use ::alloc::collections::{linked_list::{self, LinkedList}}; + pub use ::alloc::collections::{hash_map::{self, HashMap}}; + pub use ::alloc::collections::{btree_set::{self, BTreeSet}}; + } + } +} + +pub use self::{ + borrow::ToOwned, + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; + +pub use crate::{ + casper::{self, Entity}, + log, + macros::{self, casper, PanicOnDefault}, + revert, +}; + +#[cfg(test)] +mod tests { + + #[test] + fn test_format() { + assert_eq!(super::format!("Hello, {}!", "world"), "Hello, world!"); + } + + #[test] + fn test_string() { + let s = super::String::from("hello"); + assert_eq!(s, "hello"); + } + + #[test] + #[allow(clippy::vec_init_then_push)] + fn test_vec() { + let mut v = super::Vec::new(); + v.push(1); + v.push(2); + assert_eq!(v.len(), 2); + assert_eq!(v[0], 1); + assert_eq!(v[1], 2); + } +} diff --git a/smart_contracts/sdk/src/schema.rs b/smart_contracts/sdk/src/schema.rs new file mode 100644 index 0000000000..f2299e142f --- /dev/null +++ b/smart_contracts/sdk/src/schema.rs @@ -0,0 +1,98 @@ +pub trait CasperSchema { + fn schema() -> Schema; +} + +use std::fmt::LowerHex; + +use bitflags::Flags; +use casper_executor_wasm_common::flags::EntryPointFlags; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use crate::abi::{Declaration, Definitions}; + +pub fn serialize_bits(data: &T, serializer: S) -> Result +where + S: Serializer, + T: Flags, + T::Bits: Serialize, +{ + data.bits().serialize(serializer) +} + +pub fn deserialize_bits<'de, D, F>(deserializer: D) -> Result +where + D: Deserializer<'de>, + F: Flags, + F::Bits: Deserialize<'de> + LowerHex, +{ + let raw: F::Bits = F::Bits::deserialize(deserializer)?; + F::from_bits(raw).ok_or(serde::de::Error::custom(format!( + "Unexpected flags value 0x{raw:#08x}" + ))) +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct SchemaArgument { + pub name: String, + pub decl: Declaration, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct SchemaEntryPoint { + pub name: String, + pub arguments: Vec, + pub result: Declaration, + #[serde( + serialize_with = "serialize_bits", + deserialize_with = "deserialize_bits" + )] + pub flags: EntryPointFlags, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] +#[serde(tag = "type")] +pub enum SchemaType { + /// Contract schemas contain a state structure that we want to mark in the schema. + Contract { state: Declaration }, + /// Schemas of interface type does not contain state. + Interface, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct SchemaMessage { + pub name: String, + pub decl: Declaration, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Clone)] +pub struct Schema { + pub name: String, + pub version: Option, + #[serde(rename = "type")] + pub type_: SchemaType, + pub definitions: Definitions, + pub entry_points: Vec, + pub messages: Vec, +} + +#[derive(Debug)] +pub struct EntryPoint<'a, F: Fn()> { + pub name: &'a str, + pub params: &'a [&'a str], + pub func: F, +} + +#[cfg(not(target_family = "wasm"))] +use std::{cell::RefCell, collections::BTreeMap}; + +#[cfg(not(target_family = "wasm"))] +thread_local! { + pub static DISPATCHER: RefCell> = RefCell::default(); +} + +// #[cfg(not(target_family = "wasm"))] +// #[no_mangle] +// pub unsafe fn register_func(name: &str, f: extern "C" fn() -> ()) { +// println!("registering function {}", name); +// DISPATCHER.with(|foo| foo.borrow_mut().insert(name.to_string(), f)); +// } diff --git a/smart_contracts/sdk/src/selector.rs b/smart_contracts/sdk/src/selector.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/smart_contracts/sdk/src/serializers.rs b/smart_contracts/sdk/src/serializers.rs new file mode 100644 index 0000000000..79b897c2da --- /dev/null +++ b/smart_contracts/sdk/src/serializers.rs @@ -0,0 +1 @@ +pub use ::borsh; diff --git a/smart_contracts/sdk/src/types.rs b/smart_contracts/sdk/src/types.rs new file mode 100644 index 0000000000..4e573b7b22 --- /dev/null +++ b/smart_contracts/sdk/src/types.rs @@ -0,0 +1,82 @@ +use casper_executor_wasm_common::error::{ + CALLEE_GAS_DEPLETED, CALLEE_NOT_CALLABLE, CALLEE_REVERTED, CALLEE_TRAPPED, +}; + +use crate::{ + abi::{CasperABI, Declaration, Definition, EnumVariant}, + prelude::fmt, + serializers::borsh::{BorshDeserialize, BorshSerialize}, +}; + +pub type Address = [u8; 32]; +pub use bnum::types::U256; + +// Keep in sync with [`casper_executor_wasm_common::error::CallError`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] +#[borsh(crate = "crate::serializers::borsh")] +pub enum CallError { + CalleeReverted, + CalleeTrapped, + CalleeGasDepleted, + NotCallable, +} + +impl fmt::Display for CallError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + CallError::CalleeReverted => write!(f, "callee reverted"), + CallError::CalleeTrapped => write!(f, "callee trapped"), + CallError::CalleeGasDepleted => write!(f, "callee gas depleted"), + CallError::NotCallable => write!(f, "not callable"), + } + } +} + +impl TryFrom for CallError { + type Error = (); + + fn try_from(value: u32) -> Result { + match value { + CALLEE_REVERTED => Ok(Self::CalleeReverted), + CALLEE_TRAPPED => Ok(Self::CalleeTrapped), + CALLEE_GAS_DEPLETED => Ok(Self::CalleeGasDepleted), + CALLEE_NOT_CALLABLE => Ok(Self::NotCallable), + _ => Err(()), + } + } +} + +impl CasperABI for CallError { + fn populate_definitions(_definitions: &mut crate::abi::Definitions) {} + + fn declaration() -> Declaration { + "CallError".into() + } + + fn definition() -> Definition { + Definition::Enum { + items: vec![ + EnumVariant { + name: "CalleeReverted".into(), + discriminant: 0, + decl: <()>::declaration(), + }, + EnumVariant { + name: "CalleeTrapped".into(), + discriminant: 1, + decl: <()>::declaration(), + }, + EnumVariant { + name: "CalleeGasDepleted".into(), + discriminant: 2, + decl: <()>::declaration(), + }, + EnumVariant { + name: "CodeNotFound".into(), + discriminant: 3, + decl: <()>::declaration(), + }, + ], + } + } +} diff --git a/smart_contracts/sdk_codegen/Cargo.toml b/smart_contracts/sdk_codegen/Cargo.toml new file mode 100644 index 0000000000..88fb2a93d6 --- /dev/null +++ b/smart_contracts/sdk_codegen/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "casper-contract-sdk-codegen" +version = "0.1.3" +edition = "2021" +description = "Casper contract sdk codegen package" +authors = ["Michał Papierski "] +documentation = "https://docs.rs/casper-contract-sdk-codegen" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/smart_contracts/sdk_codegen" +license = "Apache-2.0" + +[dependencies] +casper-contract-sdk = { version = "0.1.3", path = "../sdk" } +codegen = "0.2.0" +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +indexmap = "2.1.0" +syn = "2" + +[dev-dependencies] +trybuild = "1" +tempfile = "3.2.0" +borsh = { version = "1.5", features = ["derive"] } diff --git a/smart_contracts/sdk_codegen/src/lib.rs b/smart_contracts/sdk_codegen/src/lib.rs new file mode 100644 index 0000000000..51c34afa1b --- /dev/null +++ b/smart_contracts/sdk_codegen/src/lib.rs @@ -0,0 +1,614 @@ +pub mod support; + +use casper_contract_sdk::{ + abi::{Declaration, Definition, Primitive}, + casper_executor_wasm_common::flags::EntryPointFlags, + schema::{Schema, SchemaType}, +}; +use codegen::{Field, Scope, Type}; +use indexmap::IndexMap; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{BTreeMap, VecDeque}, + iter, + str::FromStr, +}; + +const DEFAULT_DERIVED_TRAITS: &[&str] = &[ + "Clone", + "Debug", + "PartialEq", + "Eq", + "PartialOrd", + "Ord", + "Hash", + "BorshSerialize", + "BorshDeserialize", +]; + +/// Replaces characters that are not valid in Rust identifiers with underscores. +fn slugify_type(input: &str) -> String { + let mut output = String::with_capacity(input.len()); + + for c in input.chars() { + if c.is_ascii_alphanumeric() { + output.push(c); + } else { + output.push('_'); + } + } + + output +} + +#[derive(Debug, Deserialize, Serialize)] +enum Specialized { + Result { ok: Declaration, err: Declaration }, + Option { some: Declaration }, +} + +#[derive(Deserialize, Serialize)] +pub struct Codegen { + schema: Schema, + type_mapping: BTreeMap, + specialized_types: BTreeMap, +} + +impl FromStr for Codegen { + type Err = serde_json::Error; + + fn from_str(s: &str) -> Result { + let schema: Schema = serde_json::from_str(s)?; + Ok(Self::new(schema)) + } +} + +impl Codegen { + pub fn new(schema: Schema) -> Self { + Self { + schema, + type_mapping: Default::default(), + specialized_types: Default::default(), + } + } + + pub fn from_file(path: &str) -> Result { + let file = std::fs::File::open(path)?; + let schema: Schema = serde_json::from_reader(file)?; + Ok(Self::new(schema)) + } + + pub fn gen(&mut self) -> String { + let mut scope = Scope::new(); + + scope.import("borsh", "self"); + scope.import("borsh", "BorshSerialize"); + scope.import("borsh", "BorshDeserialize"); + scope.import("casper_contract_sdk_codegen::support", "IntoResult"); + scope.import("casper_contract_sdk_codegen::support", "IntoOption"); + scope.import("casper_contract_sdk", "Selector"); + scope.import("casper_contract_sdk", "ToCallData"); + + let _head = self + .schema + .definitions + .first() + .expect("No definitions found."); + + match &self.schema.type_ { + SchemaType::Contract { state } => { + if !self.schema.definitions.has_definition(state) { + panic!( + "Missing state definition. Expected to find a definition for {}.", + &state + ) + }; + } + SchemaType::Interface => {} + } + + // Initialize a queue with the first definition + let mut queue = VecDeque::new(); + + // Create a set to keep track of processed definitions + let mut processed = std::collections::HashSet::new(); + + let mut graph: IndexMap<_, VecDeque<_>> = IndexMap::new(); + + for (def_index, (next_decl, next_def)) in self.schema.definitions.iter().enumerate() { + println!( + "{def_index}. decl={decl}", + def_index = def_index, + decl = next_decl + ); + + queue.push_back(next_decl); + + while let Some(decl) = queue.pop_front() { + if processed.contains(decl) { + continue; + } + + processed.insert(decl); + graph.entry(next_decl).or_default().push_back(decl); + // graph.find + + match Primitive::from_str(decl) { + Ok(primitive) => { + println!("Processing primitive type {primitive:?}"); + continue; + } + Err(_) => { + // Not a primitive type + } + }; + + let def = self + .schema + .definitions + .get(decl) + .unwrap_or_else(|| panic!("Missing definition for {}", decl)); + + // graph.entry(next_decl).or_default().push(decl); + // println!("Processing type {decl}"); + + // Enqueue all unprocessed definitions that depend on the current definition + match def { + Definition::Primitive(_primitive) => { + continue; + } + Definition::Mapping { key, value } => { + if !processed.contains(key) { + queue.push_front(key); + continue; + } + + if !processed.contains(value) { + queue.push_front(value); + continue; + } + } + Definition::Sequence { decl } => { + queue.push_front(decl); + } + Definition::FixedSequence { length: _, decl } => { + if !processed.contains(decl) { + queue.push_front(decl); + continue; + } + } + Definition::Tuple { items } => { + for item in items { + if !processed.contains(item) { + queue.push_front(item); + continue; + } + } + + // queue.push_front(decl); + } + Definition::Enum { items } => { + for item in items { + if !processed.contains(&item.decl) { + queue.push_front(&item.decl); + continue; + } + } + } + Definition::Struct { items } => { + for item in items { + if !processed.contains(&item.decl) { + queue.push_front(&item.decl); + continue; + } + } + } + } + } + + match next_def { + Definition::Primitive(_) => {} + Definition::Mapping { key, value } => { + assert!(processed.contains(key)); + assert!(processed.contains(value)); + } + Definition::Sequence { decl } => { + assert!(processed.contains(decl)); + } + Definition::FixedSequence { length: _, decl } => { + assert!(processed.contains(decl)); + } + Definition::Tuple { items } => { + for item in items { + assert!(processed.contains(&item)); + } + } + Definition::Enum { items } => { + for item in items { + assert!(processed.contains(&item.decl)); + } + } + Definition::Struct { items } => { + for item in items { + assert!(processed.contains(&item.decl)); + } + } + } + } + dbg!(&graph); + + let mut counter = iter::successors(Some(0usize), |prev| prev.checked_add(1)); + + for (_decl, deps) in graph { + for decl in deps.into_iter().rev() { + // println!("generate {decl}"); + + let def = self + .schema + .definitions + .get(decl) + .cloned() + .or_else(|| Primitive::from_str(decl).ok().map(Definition::Primitive)) + .unwrap_or_else(|| panic!("Missing definition for {}", decl)); + + match def { + Definition::Primitive(primitive) => { + let (from, to) = match primitive { + Primitive::Char => ("Char", "char"), + Primitive::U8 => ("U8", "u8"), + Primitive::I8 => ("I8", "i8"), + Primitive::U16 => ("U16", "u16"), + Primitive::I16 => ("I16", "i16"), + Primitive::U32 => ("U32", "u32"), + Primitive::I32 => ("I32", "i32"), + Primitive::U64 => ("U64", "u64"), + Primitive::I64 => ("I64", "i64"), + Primitive::U128 => ("U128", "u128"), + Primitive::I128 => ("I128", "i128"), + Primitive::Bool => ("Bool", "bool"), + Primitive::F32 => ("F32", "f32"), + Primitive::F64 => ("F64", "f64"), + }; + + scope.new_type_alias(from, to).vis("pub"); + self.type_mapping.insert(decl.to_string(), from.to_string()); + } + Definition::Mapping { key: _, value: _ } => { + // println!("Processing mapping type {key:?} -> {value:?}"); + todo!() + } + Definition::Sequence { decl: seq_decl } => { + println!("Processing sequence type {decl:?}"); + if decl.as_str() == "String" + && Primitive::from_str(&seq_decl) == Ok(Primitive::Char) + { + self.type_mapping + .insert("String".to_owned(), "String".to_owned()); + } else { + let mapped_type = self + .type_mapping + .get(&seq_decl) + .unwrap_or_else(|| panic!("Missing type mapping for {}", seq_decl)); + let type_name = + format!("Sequence{}_{seq_decl}", counter.next().unwrap()); + scope.new_type_alias(&type_name, format!("Vec<{}>", mapped_type)); + self.type_mapping.insert(decl.to_string(), type_name); + } + } + Definition::FixedSequence { + length, + decl: fixed_seq_decl, + } => { + let mapped_type = + self.type_mapping.get(&fixed_seq_decl).unwrap_or_else(|| { + panic!("Missing type mapping for {}", fixed_seq_decl) + }); + + let type_name = format!( + "FixedSequence{}_{length}_{fixed_seq_decl}", + counter.next().unwrap() + ); + scope.new_type_alias(&type_name, format!("[{}; {}]", mapped_type, length)); + self.type_mapping.insert(decl.to_string(), type_name); + } + Definition::Tuple { items } => { + if decl.as_str() == "()" && items.is_empty() { + self.type_mapping.insert("()".to_owned(), "()".to_owned()); + continue; + } + + println!("Processing tuple type {items:?}"); + let struct_name = slugify_type(decl); + + let r#struct = scope + .new_struct(&struct_name) + .doc(&format!("Declared as {decl}")); + + for trait_name in DEFAULT_DERIVED_TRAITS { + r#struct.derive(trait_name); + } + + if items.is_empty() { + r#struct.tuple_field(Type::new("()")); + } else { + for item in items { + let mapped_type = self + .type_mapping + .get(&item) + .unwrap_or_else(|| panic!("Missing type mapping for {}", item)); + r#struct.tuple_field(mapped_type); + } + } + + self.type_mapping.insert(decl.to_string(), struct_name); + } + Definition::Enum { items } => { + println!("Processing enum type {decl} {items:?}"); + + let mut items: Vec<&casper_contract_sdk::abi::EnumVariant> = + items.iter().collect(); + + let mut specialized = None; + + if decl.starts_with("Result") + && items.len() == 2 + && items[0].name == "Ok" + && items[1].name == "Err" + { + specialized = Some(Specialized::Result { + ok: items[0].decl.clone(), + err: items[1].decl.clone(), + }); + + // NOTE: Because we're not doing the standard library Result, and also + // to simplify things we're using default impl of + // BorshSerialize/BorshDeserialize, we have to flip the order of enums. + // The standard library defines Result as Ok, Err, but the borsh impl + // serializes Err as 0, and Ok as 1. So, by flipping the order we can + // enforce byte for byte compatibility between our "custom" Result and a + // real Result. + items.reverse(); + } + + if decl.starts_with("Option") + && items.len() == 2 + && items[0].name == "None" + && items[1].name == "Some" + { + specialized = Some(Specialized::Option { + some: items[1].decl.clone(), + }); + + items.reverse(); + } + + let enum_name = slugify_type(decl); + + let r#enum = scope + .new_enum(&enum_name) + .vis("pub") + .doc(&format!("Declared as {decl}")); + + for trait_name in DEFAULT_DERIVED_TRAITS { + r#enum.derive(trait_name); + } + + for item in &items { + let variant = r#enum.new_variant(&item.name); + + let def = self.type_mapping.get(&item.decl).unwrap_or_else(|| { + panic!("Missing type mapping for {}", item.decl) + }); + + variant.tuple(def); + } + + self.type_mapping + .insert(decl.to_string(), enum_name.to_owned()); + + match specialized { + Some(Specialized::Result { ok, err }) => { + let ok_type = self + .type_mapping + .get(&ok) + .unwrap_or_else(|| panic!("Missing type mapping for {}", ok)); + let err_type = self + .type_mapping + .get(&err) + .unwrap_or_else(|| panic!("Missing type mapping for {}", err)); + + let impl_block = scope + .new_impl(&enum_name) + .impl_trait(format!("IntoResult<{ok_type}, {err_type}>")); + + let func = impl_block.new_fn("into_result").arg_self().ret( + Type::new(format!( + "Result<{ok_type}, {err_type}>", + ok_type = ok_type, + err_type = err_type + )), + ); + func.line("match self {") + .line(format!("{enum_name}::Ok(ok) => Ok(ok),")) + .line(format!("{enum_name}::Err(err) => Err(err),")) + .line("}"); + } + Some(Specialized::Option { some }) => { + let some_type = self.type_mapping.get(&some).unwrap_or_else(|| { + panic!("Missing type mapping for {}", &some) + }); + + let impl_block = scope + .new_impl(&enum_name) + .impl_trait(format!("IntoOption<{some_type}>")); + + let func = impl_block + .new_fn("into_option") + .arg_self() + .ret(Type::new(format!("Option<{some_type}>",))); + func.line("match self {") + .line(format!("{enum_name}::None => None,")) + .line(format!("{enum_name}::Some(some) => Some(some),")) + .line("}"); + } + None => {} + } + } + Definition::Struct { items } => { + println!("Processing struct type {items:?}"); + + let type_name = slugify_type(decl); + + let r#struct = scope.new_struct(&type_name); + + for trait_name in DEFAULT_DERIVED_TRAITS { + r#struct.derive(trait_name); + } + + for item in items { + let mapped_type = + self.type_mapping.get(&item.decl).unwrap_or_else(|| { + panic!("Missing type mapping for {}", item.decl) + }); + let field = Field::new(&item.name, Type::new(mapped_type)) + .doc(format!("Declared as {}", item.decl)) + .to_owned(); + + r#struct.push_field(field); + } + self.type_mapping.insert(decl.to_string(), type_name); + } + } + } + } + + let struct_name = format!("{}Client", self.schema.name); + let client = scope.new_struct(&struct_name).vis("pub"); + + for trait_name in DEFAULT_DERIVED_TRAITS { + client.derive(trait_name); + } + + let mut field = Field::new("address", Type::new("[u8; 32]")); + field.vis("pub"); + + client.push_field(field); + + let client_impl = scope.new_impl(&struct_name); + + for entry_point in &self.schema.entry_points { + let func = client_impl.new_fn(&entry_point.name); + func.vis("pub"); + + let result_type = self + .type_mapping + .get(&entry_point.result) + .unwrap_or_else(|| panic!("Missing type mapping for {}", entry_point.result)); + + if entry_point.flags.contains(EntryPointFlags::CONSTRUCTOR) { + func.ret(Type::new(format!( + "Result<{}, casper_contract_sdk::types::CallError>", + &struct_name + ))) + .generic("C") + .bound("C", "casper_contract_sdk::Contract"); + } else { + func.ret(Type::new(format!( + "Result, casper_contract_sdk::types::CallError>" + ))); + func.arg_ref_self(); + } + + for arg in &entry_point.arguments { + let mapped_type = self + .type_mapping + .get(&arg.decl) + .unwrap_or_else(|| panic!("Missing type mapping for {}", arg.decl)); + let arg_ty = Type::new(mapped_type); + func.arg(&arg.name, arg_ty); + } + + func.line("let value = 0; // TODO: Transferring values"); + + let input_struct_name = + format!("{}_{}", slugify_type(&self.schema.name), &entry_point.name); + + if entry_point.arguments.is_empty() { + func.line(format!(r#"let call_data = {input_struct_name};"#)); + } else { + func.line(format!(r#"let call_data = {input_struct_name} {{ "#)); + for arg in &entry_point.arguments { + func.line(format!("{},", arg.name)); + } + func.line("};"); + } + + if entry_point.flags.contains(EntryPointFlags::CONSTRUCTOR) { + // if !entry_point.arguments.is_empty() { + // func.line(r#"let create_result = C::create(SELECTOR, Some(&input_data))?;"#); + // } else { + func.line(r#"let create_result = C::create(call_data)?;"#); + // } + + func.line(format!( + r#"let result = {struct_name} {{ address: create_result.contract_address }};"#, + struct_name = &struct_name + )); + func.line("Ok(result)"); + continue; + } else { + func.line(r#"casper_contract_sdk::host::call(&self.address, value, call_data)"#); + } + } + + for entry_point in &self.schema.entry_points { + // Generate arg structure similar to what casper-contract-macros is doing + let struct_name = format!("{}_{}", &self.schema.name, &entry_point.name); + let input_struct = scope.new_struct(&struct_name); + + for trait_name in DEFAULT_DERIVED_TRAITS { + input_struct.derive(trait_name); + } + + for argument in &entry_point.arguments { + let mapped_type = self.type_mapping.get(&argument.decl).unwrap_or_else(|| { + panic!( + "Missing type mapping for {} when generating input arg {}", + argument.decl, &struct_name + ) + }); + input_struct.push_field(Field::new(&argument.name, Type::new(mapped_type))); + } + + let impl_block = scope.new_impl(&struct_name).impl_trait("ToCallData"); + + let input_data_func = impl_block + .new_fn("input_data") + .arg_ref_self() + .ret(Type::new("Option>")); + + if entry_point.arguments.is_empty() { + input_data_func.line(r#"None"#); + } else { + input_data_func + .line(r#"let input_data = borsh::to_vec(&self).expect("Serialization to succeed");"#) + .line(r#"Some(input_data)"#); + } + } + + scope.to_string() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_slugify_complex_type() { + let input = "Option>"; + let expected = "Option_Result_____vm2_cep18__error__Cep18Error__"; + + assert_eq!(slugify_type(input), expected); + } +} diff --git a/smart_contracts/sdk_codegen/src/support.rs b/smart_contracts/sdk_codegen/src/support.rs new file mode 100644 index 0000000000..2f981f9295 --- /dev/null +++ b/smart_contracts/sdk_codegen/src/support.rs @@ -0,0 +1,68 @@ +//! Support library for generated code. + +pub trait IntoResult { + fn into_result(self) -> Result; +} + +pub trait IntoOption { + fn into_option(self) -> Option; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Debug, PartialEq, Eq)] + struct MyOk; + #[derive(Debug, PartialEq, Eq)] + struct MyErr; + + #[derive(Debug, PartialEq, Eq)] + + enum CustomResult { + Ok(MyOk), + Err(MyErr), + } + + #[derive(Debug, PartialEq, Eq)] + enum CustomOption { + Some(MyOk), + None, + } + + impl IntoResult for CustomResult { + fn into_result(self) -> Result { + match self { + CustomResult::Ok(ok) => Ok(ok), + CustomResult::Err(err) => Err(err), + } + } + } + + impl IntoOption for CustomOption { + fn into_option(self) -> Option { + match self { + CustomOption::Some(value) => Some(value), + CustomOption::None => None, + } + } + } + + #[test] + fn test_into_result() { + let ok = CustomResult::Ok(MyOk); + let err = CustomResult::Err(MyErr); + + assert_eq!(ok.into_result(), Ok(MyOk)); + assert_eq!(err.into_result(), Err(MyErr)); + } + + #[test] + fn test_into_option() { + let some = CustomOption::Some(MyOk); + let none = CustomOption::None; + + assert_eq!(some.into_option(), Some(MyOk)); + assert_eq!(none.into_option(), None); + } +} diff --git a/smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.json b/smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.json new file mode 100644 index 0000000000..3be1ff7057 --- /dev/null +++ b/smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.json @@ -0,0 +1,428 @@ +{ + "name": "TokenContract", + "version": "0.1.0", + "type": { + "type": "Contract", + "state": "vm2_cep18::contract::TokenContract" + }, + "definitions": { + "()": { + "type": "Tuple", + "items": [] + }, + "([U8; 32], [U8; 32])": { + "type": "Tuple", + "items": [ + "[U8; 32]", + "[U8; 32]" + ] + }, + "Bool": { + "type": "Primitive", + "Bool": null + }, + "Map<([U8; 32], [U8; 32]), U64>": { + "type": "Struct", + "items": [ + { + "name": "prefix", + "decl": "U64" + } + ] + }, + "Map<[U8; 32], U64>": { + "type": "Struct", + "items": [ + { + "name": "prefix", + "decl": "U64" + } + ] + }, + "Map<[U8; 32], vm2_cep18::security_badge::SecurityBadge>": { + "type": "Struct", + "items": [ + { + "name": "prefix", + "decl": "U64" + } + ] + }, + "Result<(), vm2_cep18::error::Cep18Error>": { + "type": "Enum", + "items": [ + { + "name": "Ok", + "discriminant": 0, + "decl": "()" + }, + { + "name": "Err", + "discriminant": 1, + "decl": "vm2_cep18::error::Cep18Error" + } + ] + }, + "String": { + "type": "Sequence", + "decl": "Char" + }, + "U64": { + "type": "Primitive", + "U64": null + }, + "U8": { + "type": "Primitive", + "U8": null + }, + "[U8; 32]": { + "type": "FixedSequence", + "length": 32, + "decl": "U8" + }, + "vm2_cep18::contract::TokenContract": { + "type": "Struct", + "items": [ + { + "name": "state", + "decl": "vm2_cep18::traits::CEP18State" + } + ] + }, + "vm2_cep18::error::Cep18Error": { + "type": "Enum", + "items": [ + { + "name": "InvalidContext", + "discriminant": 0, + "decl": "()" + }, + { + "name": "InsufficientBalance", + "discriminant": 1, + "decl": "()" + }, + { + "name": "InsufficientAllowance", + "discriminant": 2, + "decl": "()" + }, + { + "name": "Overflow", + "discriminant": 3, + "decl": "()" + }, + { + "name": "PackageHashMissing", + "discriminant": 4, + "decl": "()" + }, + { + "name": "PackageHashNotPackage", + "discriminant": 5, + "decl": "()" + }, + { + "name": "InvalidEventsMode", + "discriminant": 6, + "decl": "()" + }, + { + "name": "MissingEventsMode", + "discriminant": 7, + "decl": "()" + }, + { + "name": "Phantom", + "discriminant": 8, + "decl": "()" + }, + { + "name": "FailedToGetArgBytes", + "discriminant": 9, + "decl": "()" + }, + { + "name": "InsufficientRights", + "discriminant": 10, + "decl": "()" + }, + { + "name": "InvalidAdminList", + "discriminant": 11, + "decl": "()" + }, + { + "name": "InvalidMinterList", + "discriminant": 12, + "decl": "()" + }, + { + "name": "InvalidNoneList", + "discriminant": 13, + "decl": "()" + }, + { + "name": "InvalidEnableMBFlag", + "discriminant": 14, + "decl": "()" + }, + { + "name": "AlreadyInitialized", + "discriminant": 15, + "decl": "()" + }, + { + "name": "MintBurnDisabled", + "discriminant": 16, + "decl": "()" + }, + { + "name": "CannotTargetSelfUser", + "discriminant": 17, + "decl": "()" + }, + { + "name": "InvalidBurnTarget", + "discriminant": 18, + "decl": "()" + } + ] + }, + "vm2_cep18::security_badge::SecurityBadge": { + "type": "Enum", + "items": [ + { + "name": "Admin", + "discriminant": 0, + "decl": "()" + }, + { + "name": "Minter", + "discriminant": 1, + "decl": "()" + }, + { + "name": "None", + "discriminant": 2, + "decl": "()" + } + ] + }, + "vm2_cep18::traits::CEP18State": { + "type": "Struct", + "items": [ + { + "name": "name", + "decl": "String" + }, + { + "name": "symbol", + "decl": "String" + }, + { + "name": "decimals", + "decl": "U8" + }, + { + "name": "total_supply", + "decl": "U64" + }, + { + "name": "balances", + "decl": "Map<[U8; 32], U64>" + }, + { + "name": "allowances", + "decl": "Map<([U8; 32], [U8; 32]), U64>" + }, + { + "name": "security_badges", + "decl": "Map<[U8; 32], vm2_cep18::security_badge::SecurityBadge>" + }, + { + "name": "enable_mint_burn", + "decl": "Bool" + } + ] + } + }, + "entry_points": [ + { + "name": "new", + "arguments": [ + { + "name": "token_name", + "decl": "String" + } + ], + "result": "vm2_cep18::contract::TokenContract", + "flags": 1 + }, + { + "name": "my_balance", + "arguments": [], + "result": "U64", + "flags": 0 + }, + { + "name": "name", + "arguments": [], + "result": "String", + "flags": 0 + }, + { + "name": "symbol", + "arguments": [], + "result": "String", + "flags": 0 + }, + { + "name": "decimals", + "arguments": [], + "result": "U8", + "flags": 0 + }, + { + "name": "total_supply", + "arguments": [], + "result": "U64", + "flags": 0 + }, + { + "name": "balance_of", + "arguments": [ + { + "name": "address", + "decl": "[U8; 32]" + } + ], + "result": "U64", + "flags": 0 + }, + { + "name": "allowance", + "arguments": [ + { + "name": "spender", + "decl": "[U8; 32]" + }, + { + "name": "owner", + "decl": "[U8; 32]" + } + ], + "result": "()", + "flags": 0 + }, + { + "name": "approve", + "arguments": [ + { + "name": "spender", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + }, + { + "name": "decrease_allowance", + "arguments": [ + { + "name": "spender", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + }, + { + "name": "increase_allowance", + "arguments": [ + { + "name": "spender", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + }, + { + "name": "transfer", + "arguments": [ + { + "name": "recipient", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + }, + { + "name": "transfer_from", + "arguments": [ + { + "name": "owner", + "decl": "[U8; 32]" + }, + { + "name": "recipient", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + }, + { + "name": "mint", + "arguments": [ + { + "name": "owner", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + }, + { + "name": "burn", + "arguments": [ + { + "name": "owner", + "decl": "[U8; 32]" + }, + { + "name": "amount", + "decl": "U64" + } + ], + "result": "Result<(), vm2_cep18::error::Cep18Error>", + "flags": 0 + } + ] +} diff --git a/smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.rs b/smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.rs new file mode 100644 index 0000000000..07ff3145b4 --- /dev/null +++ b/smart_contracts/sdk_codegen/tests/fixtures/cep18_schema.rs @@ -0,0 +1,438 @@ +#![allow(dead_code, unused_variables, non_camel_case_types)]use borsh::{self, BorshSerialize, BorshDeserialize}; +use casper_contract_sdk_codegen::support::{IntoResult, IntoOption}; +use casper_contract_sdk::{Selector, ToCallData}; + +pub type U8 = u8; +type FixedSequence0_32_U8 = [U8; 32]; +/// Declared as ([U8; 32], [U8; 32]) +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct __U8__32____U8__32__(FixedSequence0_32_U8, FixedSequence0_32_U8); + +pub type Bool = bool; +pub type U64 = u64; +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct Map___U8__32____U8__32____U64_ { + /// Declared as U64 + prefix: U64, +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct Map__U8__32___U64_ { + /// Declared as U64 + prefix: U64, +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct Map__U8__32___vm2_cep18__security_badge__SecurityBadge_ { + /// Declared as U64 + prefix: U64, +} + +/// Declared as vm2_cep18::error::Cep18Error +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +pub enum vm2_cep18__error__Cep18Error { + InvalidContext(()), + InsufficientBalance(()), + InsufficientAllowance(()), + Overflow(()), + PackageHashMissing(()), + PackageHashNotPackage(()), + InvalidEventsMode(()), + MissingEventsMode(()), + Phantom(()), + FailedToGetArgBytes(()), + InsufficientRights(()), + InvalidAdminList(()), + InvalidMinterList(()), + InvalidNoneList(()), + InvalidEnableMBFlag(()), + AlreadyInitialized(()), + MintBurnDisabled(()), + CannotTargetSelfUser(()), + InvalidBurnTarget(()), +} + +/// Declared as Result<(), vm2_cep18::error::Cep18Error> +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +pub enum Result_____vm2_cep18__error__Cep18Error_ { + Err(vm2_cep18__error__Cep18Error), + Ok(()), +} + +impl IntoResult<(), vm2_cep18__error__Cep18Error> for Result_____vm2_cep18__error__Cep18Error_ { + fn into_result(self) -> Result<(), vm2_cep18__error__Cep18Error> { + match self { + Result_____vm2_cep18__error__Cep18Error_::Ok(ok) => Ok(ok), + Result_____vm2_cep18__error__Cep18Error_::Err(err) => Err(err), + } + } +} + +pub type Char = char; +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct vm2_cep18__traits__CEP18State { + /// Declared as String + name: String, + /// Declared as String + symbol: String, + /// Declared as U8 + decimals: U8, + /// Declared as U64 + total_supply: U64, + /// Declared as Map<[U8; 32], U64> + balances: Map__U8__32___U64_, + /// Declared as Map<([U8; 32], [U8; 32]), U64> + allowances: Map___U8__32____U8__32____U64_, + /// Declared as Map<[U8; 32], vm2_cep18::security_badge::SecurityBadge> + security_badges: Map__U8__32___vm2_cep18__security_badge__SecurityBadge_, + /// Declared as Bool + enable_mint_burn: Bool, +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct vm2_cep18__contract__TokenContract { + /// Declared as vm2_cep18::traits::CEP18State + state: vm2_cep18__traits__CEP18State, +} + +/// Declared as vm2_cep18::security_badge::SecurityBadge +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +pub enum vm2_cep18__security_badge__SecurityBadge { + Admin(()), + Minter(()), + None(()), +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +pub struct TokenContractClient { + pub address: [u8; 32], +} + +impl TokenContractClient { + pub fn new(token_name: String) -> Result + where C: casper_contract_sdk::Contract, + { + const SELECTOR: Selector = Selector::new(2611912030); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_new { + token_name, + }; + let create_result = C::create(call_data)?; + let result = TokenContractClient { address: create_result.contract_address }; + Ok(result) + } + + pub fn my_balance(&self) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(926069361); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_my_balance; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn name(&self) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(987428621); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_name; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn symbol(&self) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(2614203198); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_symbol; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn decimals(&self) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(2176884103); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_decimals; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn total_supply(&self) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(3680728488); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_total_supply; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn balance_of(&self, address: FixedSequence0_32_U8) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(259349078); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_balance_of { + address, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn allowance(&self, spender: FixedSequence0_32_U8, owner: FixedSequence0_32_U8) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(1778390622); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_allowance { + spender, + owner, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn approve(&self, spender: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(1746036384); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_approve { + spender, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn decrease_allowance(&self, spender: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(4187548633); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_decrease_allowance { + spender, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn increase_allowance(&self, spender: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(4115780642); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_increase_allowance { + spender, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn transfer(&self, recipient: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(2225167777); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_transfer { + recipient, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn transfer_from(&self, owner: FixedSequence0_32_U8, recipient: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(188313368); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_transfer_from { + owner, + recipient, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn mint(&self, owner: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(3487406754); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_mint { + owner, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } + + pub fn burn(&self, owner: FixedSequence0_32_U8, amount: U64) -> Result, casper_contract_sdk::types::CallError> { + const SELECTOR: Selector = Selector::new(2985279867); + let value = 0; // TODO: Transferring values + let call_data = TokenContract_burn { + owner, + amount, + }; + casper_contract_sdk::host::call(&self.address, value, call_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_new { + token_name: String, +} + +impl ToCallData for TokenContract_new { + const SELECTOR: Selector = Selector::new(2611912030); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_my_balance; + +impl ToCallData for TokenContract_my_balance { + const SELECTOR: Selector = Selector::new(926069361); + fn input_data(&self) -> Option> { + None + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_name; + +impl ToCallData for TokenContract_name { + const SELECTOR: Selector = Selector::new(987428621); + fn input_data(&self) -> Option> { + None + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_symbol; + +impl ToCallData for TokenContract_symbol { + const SELECTOR: Selector = Selector::new(2614203198); + fn input_data(&self) -> Option> { + None + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_decimals; + +impl ToCallData for TokenContract_decimals { + const SELECTOR: Selector = Selector::new(2176884103); + fn input_data(&self) -> Option> { + None + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_total_supply; + +impl ToCallData for TokenContract_total_supply { + const SELECTOR: Selector = Selector::new(3680728488); + fn input_data(&self) -> Option> { + None + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_balance_of { + address: FixedSequence0_32_U8, +} + +impl ToCallData for TokenContract_balance_of { + const SELECTOR: Selector = Selector::new(259349078); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_allowance { + spender: FixedSequence0_32_U8, + owner: FixedSequence0_32_U8, +} + +impl ToCallData for TokenContract_allowance { + const SELECTOR: Selector = Selector::new(1778390622); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_approve { + spender: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_approve { + const SELECTOR: Selector = Selector::new(1746036384); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_decrease_allowance { + spender: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_decrease_allowance { + const SELECTOR: Selector = Selector::new(4187548633); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_increase_allowance { + spender: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_increase_allowance { + const SELECTOR: Selector = Selector::new(4115780642); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_transfer { + recipient: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_transfer { + const SELECTOR: Selector = Selector::new(2225167777); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_transfer_from { + owner: FixedSequence0_32_U8, + recipient: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_transfer_from { + const SELECTOR: Selector = Selector::new(188313368); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_mint { + owner: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_mint { + const SELECTOR: Selector = Selector::new(3487406754); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, BorshSerialize, BorshDeserialize)] +struct TokenContract_burn { + owner: FixedSequence0_32_U8, + amount: U64, +} + +impl ToCallData for TokenContract_burn { + const SELECTOR: Selector = Selector::new(2985279867); + fn input_data(&self) -> Option> { + let input_data = borsh::to_vec(&self).expect("Serialization to succeed"); + Some(input_data) + } +}fn main() {} \ No newline at end of file diff --git a/smart_contracts/sdk_codegen/tests/test_build.rs b/smart_contracts/sdk_codegen/tests/test_build.rs new file mode 100644 index 0000000000..f89b8609bf --- /dev/null +++ b/smart_contracts/sdk_codegen/tests/test_build.rs @@ -0,0 +1,34 @@ +use std::{fs, io::Write, path::PathBuf, str::FromStr}; + +use casper_contract_sdk_codegen::Codegen; + +const FIXTURE_1: &str = include_str!("fixtures/cep18_schema.json"); + +const PROLOG: &str = "#![allow(dead_code, unused_variables, non_camel_case_types)]"; +const EPILOG: &str = "fn main() {}"; + +#[ignore = "Not yet supported"] +#[test] +fn it_works() -> Result<(), std::io::Error> { + let mut schema = Codegen::from_str(FIXTURE_1)?; + let mut code = schema.gen(); + code.insert_str(0, PROLOG); + + code += EPILOG; + + let mut tmp = tempfile::Builder::new() + .prefix("cep18_schema") + .suffix(".rs") + .tempfile()?; + tmp.write_all(code.as_bytes())?; + + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("fixtures") + .join("cep18_schema.rs"); + fs::write(path, code.as_bytes())?; + tmp.flush()?; + let t = trybuild::TestCases::new(); + t.pass(tmp.path()); + Ok(()) +} diff --git a/smart_contracts/sdk_sys/Cargo.toml b/smart_contracts/sdk_sys/Cargo.toml new file mode 100644 index 0000000000..3664762876 --- /dev/null +++ b/smart_contracts/sdk_sys/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "casper-contract-sdk-sys" +version = "0.1.3" +edition = "2021" +description = "Casper contract sdk sys package" +authors = ["Michał Papierski "] +documentation = "https://docs.rs/casper-contract-sdk-sys" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/dev/smart_contracts/sdk_sys" +license = "Apache-2.0" + +[dependencies] diff --git a/smart_contracts/sdk_sys/src/for_each_host_function.rs b/smart_contracts/sdk_sys/src/for_each_host_function.rs new file mode 100644 index 0000000000..05c0b5bd47 --- /dev/null +++ b/smart_contracts/sdk_sys/src/for_each_host_function.rs @@ -0,0 +1,72 @@ +#[macro_export] +macro_rules! for_each_host_function { + ($mac:ident) => { + $mac! { + #[doc = "Read value from a storage available for caller's entity address."] + pub fn casper_read( + key_space: u64, + key_ptr: *const u8, + key_size: usize, + info: *mut $crate::ReadInfo, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, + alloc_ctx: *const core::ffi::c_void, + ) -> u32; + pub fn casper_write( + key_space: u64, + key_ptr: *const u8, + key_size: usize, + value_ptr: *const u8, + value_size: usize, + ) -> u32; + pub fn casper_remove( + key_space: u64, + key_ptr: *const u8, + key_size: usize, + ) -> u32; + pub fn casper_print(msg_ptr: *const u8, msg_size: usize,); + pub fn casper_return(flags: u32, data_ptr: *const u8, data_len: usize,); + pub fn casper_copy_input( + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, + alloc_ctx: *const core::ffi::c_void, + ) -> *mut u8; + pub fn casper_create( + code_ptr: *const u8, + code_size: usize, + transferred_value: u64, + constructor_ptr: *const u8, + constructor_size: usize, + input_ptr: *const u8, + input_size: usize, + seed_ptr: *const u8, + seed_size: usize, + result_ptr: *mut $crate::CreateResult, + ) -> u32; + + // We don't offer any special protection against smart contracts on the host side + pub fn casper_call( + address_ptr: *const u8, + address_size: usize, + transferred_amount: u64, + entry_point_ptr: *const u8, + entry_point_size: usize, + input_ptr: *const u8, + input_size: usize, + alloc: extern "C" fn(usize, *mut core::ffi::c_void) -> *mut u8, // For capturing output data + alloc_ctx: *const core::ffi::c_void, + ) -> u32; + pub fn casper_upgrade( + code_ptr: *const u8, + code_size: usize, + entry_point_ptr: *const u8, + entry_point_size: usize, + input_ptr: *const u8, + input_size: usize, + ) -> u32; + #[doc = r"Get balance of an entity by its address."] + pub fn casper_env_balance(entity_kind: u32, entity_addr_ptr: *const u8, entity_addr_len: usize, output_ptr: *mut core::ffi::c_void,) -> u32; + pub fn casper_env_info(info_ptr: *const u8, info_size: u32,) -> u32; + pub fn casper_transfer(entity_addr_ptr: *const u8, entity_addr_len: usize, amount: *const core::ffi::c_void,) -> u32; + pub fn casper_emit(topic_ptr: *const u8, topic_size: usize, payload_ptr: *const u8, payload_size: usize,) -> u32; + } + }; +} diff --git a/smart_contracts/sdk_sys/src/lib.rs b/smart_contracts/sdk_sys/src/lib.rs new file mode 100644 index 0000000000..c4ad89aa11 --- /dev/null +++ b/smart_contracts/sdk_sys/src/lib.rs @@ -0,0 +1,92 @@ +pub mod for_each_host_function; + +#[repr(C)] +pub struct Param { + pub name_ptr: *const u8, + pub name_len: usize, +} + +/// Signature of a function pointer that a host understands. +pub type Fptr = extern "C" fn() -> (); + +#[derive(Debug)] +#[repr(C)] +pub struct ReadInfo { + pub data: *const u8, + /// Size in bytes. + pub size: usize, +} + +#[repr(C)] +#[derive(Debug)] +pub struct CreateResult { + pub contract_address: [u8; 32], +} + +#[repr(C)] +#[derive(Debug)] +pub struct UpgradeResult { + pub package_address: [u8; 32], + pub contract_address: [u8; 32], + pub version: u32, +} + +macro_rules! visit_host_function { + ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => { + $( + $(#[$cfg])? $vis fn $name($($($arg: $argty,)*)?) $(-> $ret)?; + )* + } +} + +extern "C" { + for_each_host_function!(visit_host_function); +} + +macro_rules! visit_host_function_name { + ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => { + &[ + $( + stringify!($name), + )* + ] + } +} + +pub const HOST_FUNCTIONS: &[&str] = for_each_host_function!(visit_host_function_name); + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + + use crate::HOST_FUNCTIONS; + + mod separate_module { + use crate::for_each_host_function; + + macro_rules! visit_host_function { + ( $( $(#[$cfg:meta])? $vis:vis fn $name:ident $(( $($arg:ident: $argty:ty,)* ))? $(-> $ret:ty)?;)+) => { + $( + #[allow(dead_code, unused_variables, clippy::too_many_arguments)] + $(#[$cfg])? $vis fn $name($($($arg: $argty,)*)?) $(-> $ret)? { + todo!("Called fn {}", stringify!($name)); + } + )* + } + } + for_each_host_function!(visit_host_function); + } + + #[test] + #[should_panic(expected = "Called fn casper_print")] + fn different_module() { + const MSG: &str = "foobar"; + separate_module::casper_print(MSG.as_ptr(), MSG.len()); + } + + #[test] + fn all_host_functions() { + let host_functions = BTreeSet::from_iter(HOST_FUNCTIONS); + assert!(host_functions.contains(&"casper_call")); + } +} diff --git a/stop-dev.sh b/stop-dev.sh deleted file mode 100755 index df3f13d1b7..0000000000 --- a/stop-dev.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -# -# stop-dev: A quick and dirty script to stop a testing setup of local nodes. - -set -eu - -ARGS="$@" -# If no nodes defined, stop all. -NODES="${ARGS:-1 2 3 4 5}" - -# print the warning if node 1 is one of the selected nodes to be stopped -case "$NODES" in - *"1"*) echo "NOTE: Stopping node 1 will also stop other nodes started with run-dev.sh" -esac - -for i in $NODES; do - systemctl --user stop node-$i -done; - -rm /tmp/chainspec_* diff --git a/storage/CHANGELOG.md b/storage/CHANGELOG.md new file mode 100644 index 0000000000..eb612171ae --- /dev/null +++ b/storage/CHANGELOG.md @@ -0,0 +1,43 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + + + +## 3.0.0 + +### Changed +* Update `casper-types` to v4.0.1, requiring a major version bump here. + + + +## 2.0.0 + +### Added +* Add `ChunkWithProof` to support chunking of large values, and associated Merkle-proofs of these. + + + +## 1.4.4 + +### Changed +* Update dependencies. + + + +## 1.4.0 + +### Added +* Initial release of crate providing `Digest` type and hashing methods, including the structs to handle proofs for chunks of data. + + + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/tree/dev diff --git a/storage/Cargo.toml b/storage/Cargo.toml new file mode 100644 index 0000000000..1d85eb0ebc --- /dev/null +++ b/storage/Cargo.toml @@ -0,0 +1,53 @@ +[package] +name = "casper-storage" +version = "2.1.1" +edition = "2018" +authors = ["Ed Hastings "] +description = "Storage for a node on the Casper network." +readme = "README.md" +documentation = "https://docs.rs/casper-storage" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/master/storage" +license = "Apache-2.0" + +[dependencies] +bincode = "1.3.1" +casper-types = { version = "6.0.1", path = "../types", features = ["datasize", "json-schema", "std"] } +datasize = "0.2.4" +either = "1.8.1" +lmdb-rkv = "0.14" +num = { version = "0.4.0", default-features = false } +num-derive = { workspace = true } +num-rational = { version = "0.4.0", features = ["serde"] } +num-traits = { workspace = true } +proptest = { version = "1.0.0", optional = true } +serde = { version = "1", features = ["derive"] } +tempfile = "3.1.0" +thiserror = "1.0.18" +tracing = "0.1.18" +uuid = { version = "0.8.1", features = ["serde", "v4"] } +linked-hash-map = "0.5.3" +once_cell = "1.18.0" +rand = "0.8.3" +rand_chacha = "0.3.0" +itertools = "0.10.5" +parking_lot = "0.12.1" + +[dev-dependencies] +assert_matches = "1.3.0" +anyhow = "1.0.33" +casper-types = { path = "../types", features = ["testing"] } +proptest = "1.0.0" +rand = "0.8.3" +serde_json = "1" +base16 = "0.2.1" +criterion = { version = "0.5.1", features = ["html_reports"] } +pprof = { version = "0.14.0", features = ["flamegraph", "criterion"] } + +[package.metadata.docs.rs] +all-features = true +rustc-args = ["--cfg", "docsrs"] + +[[bench]] +name = "global_state_key_write_bench" +harness = false diff --git a/storage/README.md b/storage/README.md new file mode 100644 index 0000000000..b539da00c7 --- /dev/null +++ b/storage/README.md @@ -0,0 +1,13 @@ +# `casper-storage` + +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) + +[![Crates.io](https://img.shields.io/crates/v/casper-hashing)](https://crates.io/crates/casper-storage) +[![Documentation](https://docs.rs/casper-hashing/badge.svg)](https://docs.rs/casper-storage) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) + +A library providing storage functionality for Casper nodes. + +## License + +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/storage/benches/global_state_key_write_bench.rs b/storage/benches/global_state_key_write_bench.rs new file mode 100644 index 0000000000..7ab75dada7 --- /dev/null +++ b/storage/benches/global_state_key_write_bench.rs @@ -0,0 +1,197 @@ +use std::time::{Duration, Instant}; + +use criterion::{criterion_group, criterion_main, Criterion, Throughput}; +use pprof::criterion::{Output, PProfProfiler}; + +use casper_storage::global_state::{ + error, + store::Store, + transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource}, + trie::Trie, + trie_store::{ + lmdb::LmdbTrieStore, + operations::{batch_write, WriteResult}, + }, +}; +use casper_types::{bytesrepr::ToBytes, testing::TestRng, Digest, Key}; +use lmdb::{DatabaseFlags, RwTransaction}; +use rand::Rng; +use tempfile::tempdir; + +use casper_storage::global_state::trie_store::operations::write; + +pub(crate) const DB_SIZE: usize = 8_520_428_800; +pub(crate) const MAX_READERS: u32 = 512; + +fn write_sequential( + trie_store: &LmdbTrieStore, + txn: &mut RwTransaction, + mut root_hash: Digest, + data: Vec<(Key, u32)>, +) -> Digest { + for (key, value) in data.iter() { + let write_result = + write::(txn, trie_store, &root_hash, key, value).unwrap(); + match write_result { + WriteResult::Written(hash) => { + root_hash = hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => panic!("invalid root hash"), + }; + } + root_hash +} + +fn create_empty_store() -> (LmdbEnvironment, LmdbTrieStore) { + let _temp_dir = tempdir().unwrap(); + let environment = LmdbEnvironment::new(_temp_dir.path(), DB_SIZE, MAX_READERS, true).unwrap(); + let store = LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()).unwrap(); + + (environment, store) +} + +fn store_empty_root(env: &LmdbEnvironment, store: &LmdbTrieStore) -> Digest { + let trie: Trie = Trie::node(&[]); + let trie_bytes = trie.to_bytes().unwrap(); + let hash = Digest::hash(trie_bytes); + + let mut txn = env.create_read_write_txn().unwrap(); + store.put(&mut txn, &hash, &trie).unwrap(); + txn.commit().unwrap(); + + hash +} + +fn sequential_write_bench(c: &mut Criterion, rng: &mut TestRng) { + let mut sequential_write_group = c.benchmark_group("trie_store_sequential_write"); + for batch_size in [1000, 10_000] { + sequential_write_group.throughput(Throughput::Elements(batch_size as u64)); + + if batch_size > 150_000 { + // Reduce the sample size to allow faster runtime. + sequential_write_group.sample_size(30); + } + + sequential_write_group.bench_function(format!("write_sequential_{}", batch_size), |b| { + b.iter_custom(|iter| { + let mut total = Duration::default(); + for _ in 0..iter { + let (env, store) = create_empty_store(); + let root_hash = store_empty_root(&env, &store); + let mut txn = env.create_read_write_txn().unwrap(); + let data: Vec<(Key, u32)> = + (0u32..batch_size).map(|val| (rng.gen(), val)).collect(); + + let start = Instant::now(); + write_sequential(&store, &mut txn, root_hash, data); + total = total.checked_add(start.elapsed()).unwrap(); + } + + total + }) + }); + } + sequential_write_group.finish(); +} + +fn batch_write_with_empty_store(c: &mut Criterion, rng: &mut TestRng) { + let mut batch_write_group = c.benchmark_group("batch_write_with_empty_store"); + + for batch_size in [1000, 10_000] { + batch_write_group.throughput(Throughput::Elements(batch_size as u64)); + + if batch_size > 150_000 { + // Reduce the sample size to allow faster runtime. + batch_write_group.sample_size(30); + } + + batch_write_group.bench_function(format!("write_batch_{}", batch_size), |b| { + b.iter_custom(|iter| { + let mut total = Duration::default(); + for _ in 0..iter { + let (environment, store) = create_empty_store(); + let root_hash = store_empty_root(&environment, &store); + let mut txn = environment.create_read_write_txn().unwrap(); + let data: Vec<(Key, u32)> = + (0u32..batch_size).map(|val| (rng.gen(), val)).collect(); + + let start = Instant::now(); + let _ = batch_write::( + &mut txn, + &store, + &root_hash, + data.into_iter(), + ) + .unwrap(); + total = total.checked_add(start.elapsed()).unwrap(); + } + + total + }) + }); + } + batch_write_group.finish(); +} + +fn batch_write_with_populated_store(c: &mut Criterion, rng: &mut TestRng) { + let mut batch_write_group = c.benchmark_group("batch_write_with_populated_store"); + + for batch_size in [1000, 10_000] { + batch_write_group.throughput(Throughput::Elements(batch_size as u64)); + + if batch_size > 150_000 { + // Reduce the sample size to allow faster runtime. + batch_write_group.sample_size(30); + } + + batch_write_group.bench_function(format!("write_batch_{}", batch_size), |b| { + b.iter_custom(|iter| { + let mut total = Duration::default(); + for _ in 0..iter { + let (environment, store) = create_empty_store(); + let root_hash = store_empty_root(&environment, &store); + let mut txn = environment.create_read_write_txn().unwrap(); + let initial_data: Vec<(Key, u32)> = + (0u32..200).map(|val| (rng.gen(), val)).collect(); + + // Pre-populate trie store with some data. + let root_hash = write_sequential(&store, &mut txn, root_hash, initial_data); + + // Create a cache backed up by the pre-populated store. Any already existing + // nodes will be read-back into the cache. + let data: Vec<(Key, u32)> = + (0u32..batch_size).map(|val| (rng.gen(), val)).collect(); + + let start = Instant::now(); + let _ = batch_write::( + &mut txn, + &store, + &root_hash, + data.into_iter(), + ) + .unwrap(); + total = total.checked_add(start.elapsed()).unwrap(); + } + + total + }) + }); + } + batch_write_group.finish(); +} + +fn trie_store_batch_write_bench(c: &mut Criterion) { + let mut rng = TestRng::new(); + + sequential_write_bench(c, &mut rng); + batch_write_with_empty_store(c, &mut rng); + batch_write_with_populated_store(c, &mut rng); +} + +criterion_group! { + name = benches; + config = Criterion::default().with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = trie_store_batch_write_bench +} +criterion_main!(benches); diff --git a/storage/src/address_generator.rs b/storage/src/address_generator.rs new file mode 100644 index 0000000000..3bbaae20e5 --- /dev/null +++ b/storage/src/address_generator.rs @@ -0,0 +1,122 @@ +//! Generates unique 32-byte addresses. +use rand::{RngCore, SeedableRng}; +use rand_chacha::ChaChaRng; + +use casper_types::{AccessRights, Digest, Phase, URef}; + +/// The length of an address. +pub const ADDRESS_LENGTH: usize = 32; + +/// Alias for an array of bytes that represents an address. +pub type Address = [u8; ADDRESS_LENGTH]; + +const SEED_LENGTH: usize = 32; + +/// An `AddressGenerator` generates `URef` addresses. +pub struct AddressGenerator(ChaChaRng); + +impl AddressGenerator { + /// Creates an [`AddressGenerator`] from a 32-byte hash digest and [`Phase`]. + pub fn new(hash: &[u8], phase: Phase) -> AddressGenerator { + AddressGeneratorBuilder::new() + .seed_with(hash) + .seed_with(&[phase as u8]) + .build() + } + + /// Creates a new [`Address`] by using an internal instance of PRNG. + pub fn create_address(&mut self) -> Address { + let mut buff = [0u8; ADDRESS_LENGTH]; + self.0.fill_bytes(&mut buff); + buff + } + + /// Creates a new [`Address`] by hashing an output from [`AddressGenerator::create_address`] + /// with a blake2b256. + pub fn new_hash_address(&mut self) -> Address { + Digest::hash(self.create_address()).value() + } + + /// Creates a new [`URef`] with a new address generated. + pub fn new_uref(&mut self, access_rights: AccessRights) -> URef { + let addr = self.create_address(); + URef::new(addr, access_rights) + } +} + +/// A builder for [`AddressGenerator`]. +#[derive(Default)] +pub struct AddressGeneratorBuilder { + data: Vec, +} + +impl AddressGeneratorBuilder { + /// Creates a new builder. + pub fn new() -> Self { + Default::default() + } + + /// Extends the seed with more data. + pub fn seed_with(mut self, bytes: &[u8]) -> Self { + self.data.extend(bytes); + self + } + + /// Creates a new [`AddressGenerator`]. + /// + /// This method hashes the seed bytes, and seeds the PRNG with it. + pub fn build(self) -> AddressGenerator { + let seed: [u8; SEED_LENGTH] = Digest::hash(self.data).value(); + AddressGenerator(ChaChaRng::from_seed(seed)) + } +} + +#[cfg(test)] +mod tests { + use casper_types::Phase; + + use super::AddressGenerator; + + const DEPLOY_HASH_1: [u8; 32] = [1u8; 32]; + const DEPLOY_HASH_2: [u8; 32] = [2u8; 32]; + + #[test] + fn should_generate_different_numbers_for_different_seeds() { + let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); + let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_2, Phase::Session); + let random_a = ag_a.create_address(); + let random_b = ag_b.create_address(); + + assert_ne!(random_a, random_b) + } + + #[test] + fn should_generate_same_numbers_for_same_seed() { + let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); + let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); + let random_a = ag_a.create_address(); + let random_b = ag_b.create_address(); + + assert_eq!(random_a, random_b) + } + + #[test] + fn should_not_generate_same_numbers_for_different_phase() { + let mut ag_a = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Payment); + let mut ag_b = AddressGenerator::new(&DEPLOY_HASH_1, Phase::Session); + let mut ag_c = AddressGenerator::new(&DEPLOY_HASH_1, Phase::FinalizePayment); + let random_a = ag_a.create_address(); + let random_b = ag_b.create_address(); + let random_c = ag_c.create_address(); + + assert_ne!( + random_a, random_b, + "different phase should have different output" + ); + + assert_ne!( + random_a, random_c, + "different phase should have different output" + ); + } +} diff --git a/storage/src/block_store/block_provider.rs b/storage/src/block_store/block_provider.rs new file mode 100644 index 0000000000..d3290360c0 --- /dev/null +++ b/storage/src/block_store/block_provider.rs @@ -0,0 +1,43 @@ +use super::error::BlockStoreError; + +/// A block store that supports read/write operations consistently. +pub trait BlockStoreProvider { + /// Reader alias. + type Reader<'a>: BlockStoreTransaction + where + Self: 'a; + /// ReaderWriter alias. + type ReaderWriter<'a>: BlockStoreTransaction + where + Self: 'a; + + /// Check out read only handle. + fn checkout_ro(&self) -> Result, BlockStoreError>; + /// Check out read write handle. + fn checkout_rw(&mut self) -> Result, BlockStoreError>; +} + +/// Block store transaction. +pub trait BlockStoreTransaction { + /// Commit changes to the block store. + fn commit(self) -> Result<(), BlockStoreError>; + + /// Roll back any temporary changes to the block store. + fn rollback(self); +} + +/// Data reader definition. +pub trait DataReader { + /// Read item at key. + fn read(&self, key: K) -> Result, BlockStoreError>; + /// Returns true if item exists at key, else false. + fn exists(&self, key: K) -> Result; +} + +/// Data write definition. +pub trait DataWriter { + /// Write item to store and return key. + fn write(&mut self, data: &T) -> Result; + /// Delete item at key from store. + fn delete(&mut self, key: K) -> Result<(), BlockStoreError>; +} diff --git a/storage/src/block_store/error.rs b/storage/src/block_store/error.rs new file mode 100644 index 0000000000..976357e6d5 --- /dev/null +++ b/storage/src/block_store/error.rs @@ -0,0 +1,44 @@ +use casper_types::{BlockHash, EraId, TransactionHash}; +use std::fmt::Debug; +use thiserror::Error; + +/// Block store error. +#[derive(Debug, Error)] +pub enum BlockStoreError { + /// Found a duplicate block entry of the specified height. + #[error("duplicate entries for block at height {height}: {first} / {second}")] + DuplicateBlock { + /// Height at which duplicate was found. + height: u64, + /// First block hash encountered at `height`. + first: BlockHash, + /// Second block hash encountered at `height`. + second: BlockHash, + }, + /// Found a duplicate switch-block entry of the specified height. + #[error("duplicate entries for switch block at era id {era_id}: {first} / {second}")] + DuplicateEraId { + /// Era ID at which duplicate was found. + era_id: EraId, + /// First block hash encountered at `era_id`. + first: BlockHash, + /// Second block hash encountered at `era_id`. + second: BlockHash, + }, + /// Found a duplicate transaction entry. + #[error("duplicate entries for blocks for transaction {transaction_hash}: {first} / {second}")] + DuplicateTransaction { + /// Transaction hash at which duplicate was found. + transaction_hash: TransactionHash, + /// First block hash encountered at `transaction_hash`. + first: BlockHash, + /// Second block hash encountered at `transaction_hash`. + second: BlockHash, + }, + /// Internal error. + #[error("internal database error: {0}")] + InternalStorage(Box), + /// The operation is unsupported. + #[error("unsupported operation")] + UnsupportedOperation, +} diff --git a/storage/src/block_store/lmdb/indexed_lmdb_block_store.rs b/storage/src/block_store/lmdb/indexed_lmdb_block_store.rs new file mode 100644 index 0000000000..2e8a759c9e --- /dev/null +++ b/storage/src/block_store/lmdb/indexed_lmdb_block_store.rs @@ -0,0 +1,1245 @@ +use std::{ + borrow::Cow, + collections::{btree_map, hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, +}; + +use super::{ + lmdb_block_store::LmdbBlockStore, lmdb_ext::LmdbExtError, temp_map::TempMap, DbTableId, +}; +use datasize::DataSize; +use lmdb::{ + Environment, RoTransaction, RwCursor, RwTransaction, Transaction as LmdbTransaction, WriteFlags, +}; + +use tracing::info; + +use super::versioned_databases::VersionedDatabases; +use crate::block_store::{ + block_provider::{BlockStoreTransaction, DataReader, DataWriter}, + types::{ + ApprovalsHashes, BlockExecutionResults, BlockHashHeightAndEra, BlockHeight, BlockTransfers, + LatestSwitchBlock, StateStore, StateStoreKey, Tip, TransactionFinalizedApprovals, + }, + BlockStoreError, BlockStoreProvider, DbRawBytesSpec, +}; +use casper_types::{ + execution::ExecutionResult, Approval, Block, BlockBody, BlockHash, BlockHeader, + BlockSignatures, Digest, EraId, ProtocolVersion, Transaction, TransactionHash, Transfer, +}; + +/// Indexed lmdb block store. +#[derive(DataSize, Debug)] +pub struct IndexedLmdbBlockStore { + /// Block store + block_store: LmdbBlockStore, + /// A map of block height to block ID. + block_height_index: BTreeMap, + /// A map of era ID to switch block ID. + switch_block_era_id_index: BTreeMap, + /// A map of transaction hashes to hashes, heights and era IDs of blocks containing them. + transaction_hash_index: BTreeMap, +} + +impl IndexedLmdbBlockStore { + fn get_reader(&self) -> Result, BlockStoreError> { + let txn = self + .block_store + .env + .begin_ro_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + Ok(IndexedLmdbBlockStoreReadTransaction { + txn, + block_store: self, + }) + } + + /// Inserts the relevant entries to the index. + /// + /// If a duplicate entry is encountered, index is not updated and an error is returned. + fn insert_to_transaction_index( + transaction_hash_index: &mut BTreeMap, + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + transaction_hashes: Vec, + ) -> Result<(), BlockStoreError> { + if let Some(hash) = transaction_hashes.iter().find(|hash| { + transaction_hash_index + .get(hash) + .is_some_and(|old_details| old_details.block_hash != block_hash) + }) { + return Err(BlockStoreError::DuplicateTransaction { + transaction_hash: *hash, + first: transaction_hash_index[hash].block_hash, + second: block_hash, + }); + } + + for hash in transaction_hashes { + transaction_hash_index.insert( + hash, + BlockHashHeightAndEra::new(block_hash, block_height, era_id), + ); + } + + Ok(()) + } + + /// Inserts the relevant entries to the two indices. + /// + /// If a duplicate entry is encountered, neither index is updated and an error is returned. + pub(super) fn insert_to_block_header_indices( + block_height_index: &mut BTreeMap, + switch_block_era_id_index: &mut BTreeMap, + block_header: &BlockHeader, + ) -> Result<(), BlockStoreError> { + let block_hash = block_header.block_hash(); + if let Some(first) = block_height_index.get(&block_header.height()) { + if *first != block_hash { + return Err(BlockStoreError::DuplicateBlock { + height: block_header.height(), + first: *first, + second: block_hash, + }); + } + } + + if block_header.is_switch_block() { + match switch_block_era_id_index.entry(block_header.era_id()) { + btree_map::Entry::Vacant(entry) => { + let _ = entry.insert(block_hash); + } + btree_map::Entry::Occupied(entry) => { + if *entry.get() != block_hash { + return Err(BlockStoreError::DuplicateEraId { + era_id: block_header.era_id(), + first: *entry.get(), + second: block_hash, + }); + } + } + } + } + + let _ = block_height_index.insert(block_header.height(), block_hash); + Ok(()) + } + + /// Ctor. + pub fn new( + block_store: LmdbBlockStore, + hard_reset_to_start_of_era: Option, + protocol_version: ProtocolVersion, + ) -> Result { + // We now need to restore the block-height index. Log messages allow timing here. + info!("indexing block store"); + let mut block_height_index = BTreeMap::new(); + let mut switch_block_era_id_index = BTreeMap::new(); + let mut transaction_hash_index = BTreeMap::new(); + let mut block_txn = block_store + .env + .begin_rw_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + let mut deleted_block_hashes = HashSet::new(); + // Map of all block body hashes, with their values representing whether to retain the + // corresponding block bodies or not. + let mut block_body_hashes = HashMap::new(); + let mut deleted_transaction_hashes = HashSet::::new(); + + let mut init_fn = + |cursor: &mut RwCursor, block_header: BlockHeader| -> Result<(), BlockStoreError> { + let should_retain_block = match hard_reset_to_start_of_era { + Some(invalid_era) => { + // Retain blocks from eras before the hard reset era, and blocks after this + // era if they are from the current protocol version (as otherwise a node + // restart would purge them again, despite them being valid). + block_header.era_id() < invalid_era + || block_header.protocol_version() == protocol_version + } + None => true, + }; + + // If we don't already have the block body hash in the collection, insert it with + // the value `should_retain_block`. + // + // If there is an existing value, the updated value should be `false` iff the + // existing value and `should_retain_block` are both `false`. + // Otherwise the updated value should be `true`. + match block_body_hashes.entry(*block_header.body_hash()) { + Entry::Vacant(entry) => { + entry.insert(should_retain_block); + } + Entry::Occupied(entry) => { + let value = entry.into_mut(); + *value = *value || should_retain_block; + } + } + + let body_txn = block_store + .env + .begin_ro_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let maybe_block_body = block_store + .block_body_dbs + .get(&body_txn, block_header.body_hash()) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + if !should_retain_block { + let _ = deleted_block_hashes.insert(block_header.block_hash()); + + match &maybe_block_body { + Some(BlockBody::V1(v1_body)) => deleted_transaction_hashes.extend( + v1_body + .deploy_and_transfer_hashes() + .map(TransactionHash::from), + ), + Some(BlockBody::V2(v2_body)) => { + let transactions = v2_body.all_transactions(); + deleted_transaction_hashes.extend(transactions) + } + None => (), + } + + cursor + .del(WriteFlags::empty()) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + return Ok(()); + } + + Self::insert_to_block_header_indices( + &mut block_height_index, + &mut switch_block_era_id_index, + &block_header, + )?; + + if let Some(block_body) = maybe_block_body { + let transaction_hashes = match block_body { + BlockBody::V1(v1) => v1 + .deploy_and_transfer_hashes() + .map(TransactionHash::from) + .collect(), + BlockBody::V2(v2) => v2.all_transactions().copied().collect(), + }; + Self::insert_to_transaction_index( + &mut transaction_hash_index, + block_header.block_hash(), + block_header.height(), + block_header.era_id(), + transaction_hashes, + )?; + } + + Ok(()) + }; + + block_store + .block_header_dbs + .for_each_value_in_current(&mut block_txn, &mut init_fn)?; + block_store + .block_header_dbs + .for_each_value_in_legacy(&mut block_txn, &mut init_fn)?; + + info!("block store reindexing complete"); + block_txn + .commit() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + let deleted_block_body_hashes = block_body_hashes + .into_iter() + .filter_map(|(body_hash, retain)| (!retain).then_some(body_hash)) + .collect(); + initialize_block_body_dbs( + &block_store.env, + block_store.block_body_dbs, + deleted_block_body_hashes, + )?; + initialize_block_metadata_dbs( + &block_store.env, + block_store.block_metadata_dbs, + deleted_block_hashes, + )?; + initialize_execution_result_dbs( + &block_store.env, + block_store.execution_result_dbs, + deleted_transaction_hashes, + ) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(Self { + block_store, + block_height_index, + switch_block_era_id_index, + transaction_hash_index, + }) + } +} + +/// Purges stale entries from the block body databases. +fn initialize_block_body_dbs( + env: &Environment, + block_body_dbs: VersionedDatabases, + deleted_block_body_hashes: HashSet, +) -> Result<(), BlockStoreError> { + info!("initializing block body databases"); + let mut txn = env + .begin_rw_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + for body_hash in deleted_block_body_hashes { + block_body_dbs + .delete(&mut txn, &body_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + } + txn.commit() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + info!("block body database initialized"); + Ok(()) +} + +/// Purges stale entries from the block metadata database. +fn initialize_block_metadata_dbs( + env: &Environment, + block_metadata_dbs: VersionedDatabases, + deleted_block_hashes: HashSet, +) -> Result<(), BlockStoreError> { + let block_count_to_be_deleted = deleted_block_hashes.len(); + info!( + block_count_to_be_deleted, + "initializing block metadata database" + ); + let mut txn = env + .begin_rw_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + for block_hash in deleted_block_hashes { + block_metadata_dbs + .delete(&mut txn, &block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))? + } + txn.commit() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + info!("block metadata database initialized"); + Ok(()) +} + +/// Purges stale entries from the execution result databases. +fn initialize_execution_result_dbs( + env: &Environment, + execution_result_dbs: VersionedDatabases, + deleted_transaction_hashes: HashSet, +) -> Result<(), LmdbExtError> { + let exec_results_count_to_be_deleted = deleted_transaction_hashes.len(); + info!( + exec_results_count_to_be_deleted, + "initializing execution result databases" + ); + let mut txn = env.begin_rw_txn()?; + for hash in deleted_transaction_hashes { + execution_result_dbs.delete(&mut txn, &hash)?; + } + txn.commit()?; + info!("execution result databases initialized"); + Ok(()) +} + +pub struct IndexedLmdbBlockStoreRWTransaction<'t> { + txn: RwTransaction<'t>, + block_store: &'t LmdbBlockStore, + block_height_index: TempMap<'t, u64, BlockHash>, + switch_block_era_id_index: TempMap<'t, EraId, BlockHash>, + transaction_hash_index: TempMap<'t, TransactionHash, BlockHashHeightAndEra>, +} + +impl IndexedLmdbBlockStoreRWTransaction<'_> { + /// Check if the block height index can be updated. + fn should_update_block_height_index( + &self, + block_height: u64, + block_hash: &BlockHash, + ) -> Result { + if let Some(first) = self.block_height_index.get(&block_height) { + // There is a block in the index at this height + if first != *block_hash { + Err(BlockStoreError::DuplicateBlock { + height: block_height, + first, + second: *block_hash, + }) + } else { + // Same value already in index, no need to update it. + Ok(false) + } + } else { + // Value not in index, update. + Ok(true) + } + } + + /// Check if the switch block index can be updated. + fn should_update_switch_block_index( + &self, + block_header: &BlockHeader, + ) -> Result { + if block_header.is_switch_block() { + let era_id = block_header.era_id(); + if let Some(entry) = self.switch_block_era_id_index.get(&era_id) { + let block_hash = block_header.block_hash(); + if entry != block_hash { + Err(BlockStoreError::DuplicateEraId { + era_id, + first: entry, + second: block_hash, + }) + } else { + // already in index, no need to update. + Ok(false) + } + } else { + // not in the index, update. + Ok(true) + } + } else { + // not a switch block. + Ok(false) + } + } + + // Check if the transaction hash index can be updated. + fn should_update_transaction_hash_index( + &self, + transaction_hashes: &[TransactionHash], + block_hash: &BlockHash, + ) -> Result { + if let Some(hash) = transaction_hashes.iter().find(|hash| { + self.transaction_hash_index + .get(hash) + .is_some_and(|old_details| old_details.block_hash != *block_hash) + }) { + return Err(BlockStoreError::DuplicateTransaction { + transaction_hash: *hash, + first: self.transaction_hash_index.get(hash).unwrap().block_hash, + second: *block_hash, + }); + } + Ok(true) + } +} + +pub struct IndexedLmdbBlockStoreReadTransaction<'t> { + txn: RoTransaction<'t>, + block_store: &'t IndexedLmdbBlockStore, +} + +enum LmdbBlockStoreIndex { + BlockHeight(IndexPosition), + SwitchBlockEraId(IndexPosition), +} + +enum IndexPosition { + Tip, + Key(K), +} + +enum DataType { + Block, + BlockHeader, + ApprovalsHashes, + BlockSignatures, +} + +impl IndexedLmdbBlockStoreReadTransaction<'_> { + fn block_hash_from_index(&self, index: LmdbBlockStoreIndex) -> Option<&BlockHash> { + match index { + LmdbBlockStoreIndex::BlockHeight(position) => match position { + IndexPosition::Tip => self.block_store.block_height_index.values().last(), + IndexPosition::Key(height) => self.block_store.block_height_index.get(&height), + }, + LmdbBlockStoreIndex::SwitchBlockEraId(position) => match position { + IndexPosition::Tip => self.block_store.switch_block_era_id_index.values().last(), + IndexPosition::Key(era_id) => { + self.block_store.switch_block_era_id_index.get(&era_id) + } + }, + } + } + + fn read_block_indexed( + &self, + index: LmdbBlockStoreIndex, + ) -> Result, BlockStoreError> { + self.block_hash_from_index(index) + .and_then(|block_hash| { + self.block_store + .block_store + .get_single_block(&self.txn, block_hash) + .transpose() + }) + .transpose() + } + + fn read_block_header_indexed( + &self, + index: LmdbBlockStoreIndex, + ) -> Result, BlockStoreError> { + self.block_hash_from_index(index) + .and_then(|block_hash| { + self.block_store + .block_store + .get_single_block_header(&self.txn, block_hash) + .transpose() + }) + .transpose() + } + + fn read_block_signatures_indexed( + &self, + index: LmdbBlockStoreIndex, + ) -> Result, BlockStoreError> { + self.block_hash_from_index(index) + .and_then(|block_hash| { + self.block_store + .block_store + .get_block_signatures(&self.txn, block_hash) + .transpose() + }) + .transpose() + } + + fn read_approvals_hashes_indexed( + &self, + index: LmdbBlockStoreIndex, + ) -> Result, BlockStoreError> { + self.block_hash_from_index(index) + .and_then(|block_hash| { + self.block_store + .block_store + .read_approvals_hashes(&self.txn, block_hash) + .transpose() + }) + .transpose() + } + + fn contains_data_indexed( + &self, + index: LmdbBlockStoreIndex, + data_type: DataType, + ) -> Result { + self.block_hash_from_index(index) + .map_or(Ok(false), |block_hash| match data_type { + DataType::Block => self + .block_store + .block_store + .block_exists(&self.txn, block_hash), + DataType::BlockHeader => self + .block_store + .block_store + .block_header_exists(&self.txn, block_hash), + DataType::ApprovalsHashes => self + .block_store + .block_store + .approvals_hashes_exist(&self.txn, block_hash), + DataType::BlockSignatures => self + .block_store + .block_store + .block_signatures_exist(&self.txn, block_hash), + }) + } +} + +impl BlockStoreTransaction for IndexedLmdbBlockStoreReadTransaction<'_> { + fn commit(self) -> Result<(), BlockStoreError> { + Ok(()) + } + + fn rollback(self) { + self.txn.abort(); + } +} + +impl BlockStoreTransaction for IndexedLmdbBlockStoreRWTransaction<'_> { + fn commit(self) -> Result<(), BlockStoreError> { + self.txn + .commit() + .map_err(|e| BlockStoreError::InternalStorage(Box::new(LmdbExtError::from(e))))?; + + self.block_height_index.commit(); + self.switch_block_era_id_index.commit(); + self.transaction_hash_index.commit(); + Ok(()) + } + + fn rollback(self) { + self.txn.abort(); + } +} + +impl BlockStoreProvider for IndexedLmdbBlockStore { + type Reader<'t> = IndexedLmdbBlockStoreReadTransaction<'t>; + type ReaderWriter<'t> = IndexedLmdbBlockStoreRWTransaction<'t>; + + fn checkout_ro(&self) -> Result, BlockStoreError> { + self.get_reader() + } + + fn checkout_rw(&mut self) -> Result, BlockStoreError> { + let txn = self + .block_store + .env + .begin_rw_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(IndexedLmdbBlockStoreRWTransaction { + txn, + block_store: &self.block_store, + block_height_index: TempMap::new(&mut self.block_height_index), + switch_block_era_id_index: TempMap::new(&mut self.switch_block_era_id_index), + transaction_hash_index: TempMap::new(&mut self.transaction_hash_index), + }) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store + .block_store + .get_single_block(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_store.block_exists(&self.txn, &key) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store + .block_store + .get_single_block_header(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store + .block_store + .block_header_exists(&self.txn, &key) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store + .block_store + .read_approvals_hashes(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store + .block_store + .block_header_exists(&self.txn, &key) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store + .block_store + .get_block_signatures(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store + .block_store + .block_signatures_exist(&self.txn, &key) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHeight) -> Result, BlockStoreError> { + self.read_block_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key))) + } + + fn exists(&self, key: BlockHeight) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)), + DataType::Block, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHeight) -> Result, BlockStoreError> { + self.read_block_header_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key))) + } + + fn exists(&self, key: BlockHeight) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)), + DataType::BlockHeader, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHeight) -> Result, BlockStoreError> { + self.read_approvals_hashes_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key( + key, + ))) + } + + fn exists(&self, key: BlockHeight) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)), + DataType::ApprovalsHashes, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: BlockHeight) -> Result, BlockStoreError> { + self.read_block_signatures_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key( + key, + ))) + } + + fn exists(&self, key: BlockHeight) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::BlockHeight(IndexPosition::Key(key)), + DataType::BlockSignatures, + ) + } +} + +/// Retrieves single switch block by era ID by looking it up in the index and returning it. +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: EraId) -> Result, BlockStoreError> { + self.read_block_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key( + key, + ))) + } + + fn exists(&self, key: EraId) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)), + DataType::Block, + ) + } +} + +/// Retrieves single switch block header by era ID by looking it up in the index and returning +/// it. +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: EraId) -> Result, BlockStoreError> { + self.read_block_header_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key( + key, + ))) + } + + fn exists(&self, key: EraId) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)), + DataType::BlockHeader, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: EraId) -> Result, BlockStoreError> { + self.read_approvals_hashes_indexed(LmdbBlockStoreIndex::SwitchBlockEraId( + IndexPosition::Key(key), + )) + } + + fn exists(&self, key: EraId) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)), + DataType::ApprovalsHashes, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: EraId) -> Result, BlockStoreError> { + self.read_block_signatures_indexed(LmdbBlockStoreIndex::SwitchBlockEraId( + IndexPosition::Key(key), + )) + } + + fn exists(&self, key: EraId) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Key(key)), + DataType::BlockSignatures, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, _key: Tip) -> Result, BlockStoreError> { + self.read_block_header_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip)) + } + + fn exists(&self, _key: Tip) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip), + DataType::BlockHeader, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, _key: Tip) -> Result, BlockStoreError> { + self.read_block_indexed(LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip)) + } + + fn exists(&self, _key: Tip) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::BlockHeight(IndexPosition::Tip), + DataType::Block, + ) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, _key: LatestSwitchBlock) -> Result, BlockStoreError> { + self.read_block_header_indexed(LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Tip)) + } + + fn exists(&self, _key: LatestSwitchBlock) -> Result { + self.contains_data_indexed( + LmdbBlockStoreIndex::SwitchBlockEraId(IndexPosition::Tip), + DataType::BlockHeader, + ) + } +} + +impl DataReader + for IndexedLmdbBlockStoreReadTransaction<'_> +{ + fn read(&self, key: TransactionHash) -> Result, BlockStoreError> { + Ok(self.block_store.transaction_hash_index.get(&key).copied()) + } + + fn exists(&self, key: TransactionHash) -> Result { + Ok(self.block_store.transaction_hash_index.contains_key(&key)) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: TransactionHash) -> Result, BlockStoreError> { + self.block_store + .block_store + .transaction_dbs + .get(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: TransactionHash) -> Result { + self.block_store + .block_store + .transaction_exists(&self.txn, &key) + } +} + +impl DataReader> for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: TransactionHash) -> Result>, BlockStoreError> { + self.block_store + .block_store + .finalized_transaction_approvals_dbs + .get(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: TransactionHash) -> Result { + self.block_store + .block_store + .finalized_transaction_approvals_dbs + .exists(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataReader for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, key: TransactionHash) -> Result, BlockStoreError> { + self.block_store + .block_store + .execution_result_dbs + .get(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: TransactionHash) -> Result { + self.block_store + .block_store + .execution_result_dbs + .exists(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataReader> for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read(&self, StateStoreKey(key): StateStoreKey) -> Result>, BlockStoreError> { + self.block_store + .block_store + .read_state_store(&self.txn, &key) + } + + fn exists(&self, StateStoreKey(key): StateStoreKey) -> Result { + self.block_store + .block_store + .state_store_key_exists(&self.txn, &key) + } +} + +impl DataReader<(DbTableId, Vec), DbRawBytesSpec> for IndexedLmdbBlockStoreReadTransaction<'_> { + fn read( + &self, + (id, key): (DbTableId, Vec), + ) -> Result, BlockStoreError> { + if key.is_empty() { + return Ok(None); + } + let store = &self.block_store.block_store; + let res = match id { + DbTableId::BlockHeader => store.block_header_dbs.get_raw(&self.txn, &key), + DbTableId::BlockBody => store.block_body_dbs.get_raw(&self.txn, &key), + DbTableId::ApprovalsHashes => store.approvals_hashes_dbs.get_raw(&self.txn, &key), + DbTableId::BlockMetadata => store.block_metadata_dbs.get_raw(&self.txn, &key), + DbTableId::Transaction => store.transaction_dbs.get_raw(&self.txn, &key), + DbTableId::ExecutionResult => store.execution_result_dbs.get_raw(&self.txn, &key), + DbTableId::Transfer => store.transfer_dbs.get_raw(&self.txn, &key), + DbTableId::FinalizedTransactionApprovals => store + .finalized_transaction_approvals_dbs + .get_raw(&self.txn, &key), + }; + res.map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: (DbTableId, Vec)) -> Result { + self.read(key).map(|res| res.is_some()) + } +} + +impl DataWriter for IndexedLmdbBlockStoreRWTransaction<'_> { + /// Writes a block to storage. + /// + /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it + /// couldn't be written because it already existed, and `Err(_)` if there was an error. + fn write(&mut self, data: &Block) -> Result { + let block_header = data.clone_header(); + let block_hash = data.hash(); + let block_height = data.height(); + let era_id = data.era_id(); + let transaction_hashes: Vec = match &data { + Block::V1(v1) => v1 + .deploy_and_transfer_hashes() + .map(TransactionHash::from) + .collect(), + Block::V2(v2) => v2.all_transactions().copied().collect(), + }; + + let update_height_index = + self.should_update_block_height_index(block_height, block_hash)?; + let update_switch_block_index = self.should_update_switch_block_index(&block_header)?; + let update_transaction_hash_index = + self.should_update_transaction_hash_index(&transaction_hashes, block_hash)?; + + let key = self.block_store.write_block(&mut self.txn, data)?; + + if update_height_index { + self.block_height_index.insert(block_height, *block_hash); + } + + if update_switch_block_index { + self.switch_block_era_id_index.insert(era_id, *block_hash); + } + + if update_transaction_hash_index { + for hash in transaction_hashes { + self.transaction_hash_index.insert( + hash, + BlockHashHeightAndEra::new(*block_hash, block_height, era_id), + ); + } + } + + Ok(key) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + let maybe_block = self.block_store.get_single_block(&self.txn, &key)?; + + if let Some(block) = maybe_block { + let transaction_hashes: Vec = match &block { + Block::V1(v1) => v1 + .deploy_and_transfer_hashes() + .map(TransactionHash::from) + .collect(), + Block::V2(v2) => v2.all_transactions().copied().collect(), + }; + + self.block_store.delete_block_header(&mut self.txn, &key)?; + + /* + TODO: currently we don't delete the block body since other blocks may reference it. + self.block_store + .delete_block_body(&mut self.txn, block.body_hash())?; + */ + + self.block_height_index.remove(block.height()); + + if block.is_switch_block() { + self.switch_block_era_id_index.remove(block.era_id()); + } + + for hash in transaction_hashes { + self.transaction_hash_index.remove(hash); + } + + self.block_store + .delete_finality_signatures(&mut self.txn, &key)?; + } + Ok(()) + } +} + +impl DataWriter for IndexedLmdbBlockStoreRWTransaction<'_> { + fn write(&mut self, data: &ApprovalsHashes) -> Result { + self.block_store.write_approvals_hashes(&mut self.txn, data) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store + .delete_approvals_hashes(&mut self.txn, &key) + } +} + +impl DataWriter for IndexedLmdbBlockStoreRWTransaction<'_> { + fn write(&mut self, data: &BlockSignatures) -> Result { + self.block_store + .write_finality_signatures(&mut self.txn, data) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store + .delete_finality_signatures(&mut self.txn, &key) + } +} + +impl DataWriter for IndexedLmdbBlockStoreRWTransaction<'_> { + fn write(&mut self, data: &BlockHeader) -> Result { + let block_hash = data.block_hash(); + let block_height = data.height(); + let era_id = data.era_id(); + + let update_height_index = + self.should_update_block_height_index(block_height, &block_hash)?; + let update_switch_block_index = self.should_update_switch_block_index(data)?; + + let key = self.block_store.write_block_header(&mut self.txn, data)?; + + if update_height_index { + self.block_height_index.insert(block_height, block_hash); + } + + if update_switch_block_index { + self.switch_block_era_id_index.insert(era_id, block_hash); + } + + Ok(key) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + let maybe_block_header = self.block_store.get_single_block_header(&self.txn, &key)?; + + if let Some(block_header) = maybe_block_header { + self.block_store.delete_block_header(&mut self.txn, &key)?; + + if block_header.is_switch_block() { + self.switch_block_era_id_index.remove(block_header.era_id()); + } + + self.block_height_index.remove(block_header.height()); + } + Ok(()) + } +} + +impl DataWriter for IndexedLmdbBlockStoreRWTransaction<'_> { + fn write(&mut self, data: &Transaction) -> Result { + self.block_store.write_transaction(&mut self.txn, data) + } + + fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> { + self.block_store.delete_transaction(&mut self.txn, &key) + } +} + +impl DataWriter + for IndexedLmdbBlockStoreRWTransaction<'_> +{ + fn write( + &mut self, + data: &TransactionFinalizedApprovals, + ) -> Result { + self.block_store + .finalized_transaction_approvals_dbs + .put( + &mut self.txn, + &data.transaction_hash, + &data.finalized_approvals, + true, + ) + .map(|_| data.transaction_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> { + self.block_store + .finalized_transaction_approvals_dbs + .delete(&mut self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataWriter + for IndexedLmdbBlockStoreRWTransaction<'_> +{ + fn write( + &mut self, + data: &BlockExecutionResults, + ) -> Result { + let transaction_hashes: Vec = data.exec_results.keys().copied().collect(); + let block_hash = data.block_info.block_hash; + let block_height = data.block_info.block_height; + let era_id = data.block_info.era_id; + + let update_transaction_hash_index = + self.should_update_transaction_hash_index(&transaction_hashes, &block_hash)?; + + let _ = self.block_store.write_execution_results( + &mut self.txn, + &block_hash, + data.exec_results.clone(), + )?; + + if update_transaction_hash_index { + for hash in transaction_hashes { + self.transaction_hash_index.insert( + hash, + BlockHashHeightAndEra::new(block_hash, block_height, era_id), + ); + } + } + + Ok(data.block_info) + } + + fn delete(&mut self, _key: BlockHashHeightAndEra) -> Result<(), BlockStoreError> { + Err(BlockStoreError::UnsupportedOperation) + } +} + +impl DataWriter for IndexedLmdbBlockStoreRWTransaction<'_> { + fn write(&mut self, data: &BlockTransfers) -> Result { + self.block_store + .write_transfers(&mut self.txn, &data.block_hash, &data.transfers) + .map(|_| data.block_hash) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store.delete_transfers(&mut self.txn, &key) + } +} + +impl DataWriter, StateStore> for IndexedLmdbBlockStoreRWTransaction<'_> { + fn write(&mut self, data: &StateStore) -> Result, BlockStoreError> { + self.block_store + .write_state_store(&mut self.txn, data.key.clone(), &data.value)?; + Ok(data.key.clone()) + } + + fn delete(&mut self, key: Cow<'static, [u8]>) -> Result<(), BlockStoreError> { + self.block_store.delete_state_store(&mut self.txn, key) + } +} + +impl DataReader for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, query: TransactionHash) -> Result, BlockStoreError> { + self.block_store + .transaction_dbs + .get(&self.txn, &query) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, query: TransactionHash) -> Result { + self.block_store.transaction_exists(&self.txn, &query) + } +} + +impl DataReader for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.get_block_signatures(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_signatures_exist(&self.txn, &key) + } +} + +impl DataReader> for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, query: TransactionHash) -> Result>, BlockStoreError> { + self.block_store + .finalized_transaction_approvals_dbs + .get(&self.txn, &query) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, query: TransactionHash) -> Result { + self.block_store + .finalized_transaction_approvals_dbs + .exists(&self.txn, &query) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataReader for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.get_single_block(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_exists(&self.txn, &key) + } +} + +impl DataReader for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.get_single_block_header(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_header_exists(&self.txn, &key) + } +} + +impl DataReader for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, query: TransactionHash) -> Result, BlockStoreError> { + self.block_store + .execution_result_dbs + .get(&self.txn, &query) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, query: TransactionHash) -> Result { + self.block_store + .execution_result_dbs + .exists(&self.txn, &query) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataReader> for IndexedLmdbBlockStoreRWTransaction<'_> { + fn read(&self, key: BlockHash) -> Result>, BlockStoreError> { + self.block_store.get_transfers(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.has_transfers(&self.txn, &key) + } +} diff --git a/storage/src/block_store/lmdb/lmdb_block_store.rs b/storage/src/block_store/lmdb/lmdb_block_store.rs new file mode 100644 index 0000000000..02fea2156f --- /dev/null +++ b/storage/src/block_store/lmdb/lmdb_block_store.rs @@ -0,0 +1,941 @@ +use std::{ + borrow::Cow, + collections::{BTreeSet, HashMap}, + path::{Path, PathBuf}, + sync::Arc, +}; + +use datasize::DataSize; +use tracing::{debug, error}; + +use casper_types::{ + execution::{execution_result_v1, ExecutionResult, ExecutionResultV1}, + Approval, Block, BlockBody, BlockHash, BlockHeader, BlockSignatures, Digest, Transaction, + TransactionHash, Transfer, +}; + +use super::{ + lmdb_ext::{LmdbExtError, TransactionExt}, + versioned_databases::VersionedDatabases, +}; +use crate::block_store::{ + error::BlockStoreError, + types::{ + ApprovalsHashes, BlockExecutionResults, BlockHashHeightAndEra, BlockTransfers, StateStore, + TransactionFinalizedApprovals, Transfers, + }, + BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter, +}; +use lmdb::{ + Database, DatabaseFlags, Environment, EnvironmentFlags, RoTransaction, RwTransaction, + Transaction as LmdbTransaction, WriteFlags, +}; + +/// Filename for the LMDB database created by the Storage component. +const STORAGE_DB_FILENAME: &str = "storage.lmdb"; + +/// We can set this very low, as there is only a single reader/writer accessing the component at any +/// one time. +const MAX_TRANSACTIONS: u32 = 5; + +/// Maximum number of allowed dbs. +const MAX_DB_COUNT: u32 = 17; + +/// OS-specific lmdb flags. +#[cfg(not(target_os = "macos"))] +const OS_FLAGS: EnvironmentFlags = EnvironmentFlags::WRITE_MAP; + +/// OS-specific lmdb flags. +/// +/// Mac OS X exhibits performance regressions when `WRITE_MAP` is used. +#[cfg(target_os = "macos")] +const OS_FLAGS: EnvironmentFlags = EnvironmentFlags::empty(); + +/// Lmdb block store. +#[derive(DataSize, Debug)] +pub struct LmdbBlockStore { + /// Storage location. + root: PathBuf, + /// Environment holding LMDB databases. + #[data_size(skip)] + pub(super) env: Arc, + /// The block header databases. + pub(super) block_header_dbs: VersionedDatabases, + /// The block body databases. + pub(super) block_body_dbs: VersionedDatabases, + /// The approvals hashes databases. + pub(super) approvals_hashes_dbs: VersionedDatabases, + /// The block metadata db. + pub(super) block_metadata_dbs: VersionedDatabases, + /// The transaction databases. + pub(super) transaction_dbs: VersionedDatabases, + /// Databases of `ExecutionResult`s indexed by transaction hash for current DB or by deploy + /// hash for legacy DB. + pub(super) execution_result_dbs: VersionedDatabases, + /// The transfer databases. + pub(super) transfer_dbs: VersionedDatabases, + /// The state storage database. + #[data_size(skip)] + state_store_db: Database, + /// The finalized transaction approvals databases. + pub(super) finalized_transaction_approvals_dbs: + VersionedDatabases>, +} + +impl LmdbBlockStore { + /// Ctor. + pub fn new(root_path: &Path, total_size: usize) -> Result { + // Create the environment and databases. + let env = new_environment(total_size, root_path)?; + + let block_header_dbs = VersionedDatabases::new(&env, "block_header", "block_header_v2") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let block_body_dbs = + VersionedDatabases::<_, BlockBody>::new(&env, "block_body", "block_body_v2") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let block_metadata_dbs = + VersionedDatabases::new(&env, "block_metadata", "block_metadata_v2") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let transaction_dbs = VersionedDatabases::new(&env, "deploys", "transactions") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let execution_result_dbs = + VersionedDatabases::new(&env, "deploy_metadata", "execution_results") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let transfer_dbs = VersionedDatabases::new(&env, "transfer", "versioned_transfers") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let state_store_db = env + .create_db(Some("state_store"), DatabaseFlags::empty()) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + let finalized_transaction_approvals_dbs = + VersionedDatabases::new(&env, "finalized_approvals", "versioned_finalized_approvals") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let approvals_hashes_dbs = + VersionedDatabases::new(&env, "approvals_hashes", "versioned_approvals_hashes") + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(Self { + root: root_path.to_path_buf(), + env: Arc::new(env), + block_header_dbs, + block_body_dbs, + approvals_hashes_dbs, + block_metadata_dbs, + transaction_dbs, + execution_result_dbs, + transfer_dbs, + state_store_db, + finalized_transaction_approvals_dbs, + }) + } + + /// Write finality signatures. + pub fn write_finality_signatures( + &self, + txn: &mut RwTransaction, + signatures: &BlockSignatures, + ) -> Result { + let block_hash = signatures.block_hash(); + let _ = self + .block_metadata_dbs + .put(txn, block_hash, signatures, true) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(*block_hash) + } + + pub(crate) fn delete_finality_signatures( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + ) -> Result<(), BlockStoreError> { + self.block_metadata_dbs + .delete(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn transaction_exists( + &self, + txn: &Tx, + transaction_hash: &TransactionHash, + ) -> Result { + self.transaction_dbs + .exists(txn, transaction_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Returns `true` if the given block's header and body are stored. + pub(crate) fn block_exists( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result { + let block_header = match self.get_single_block_header(txn, block_hash)? { + Some(block_header) => block_header, + None => { + return Ok(false); + } + }; + self.block_body_dbs + .exists(txn, block_header.body_hash()) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Returns `true` if the given block's header is stored. + pub(crate) fn block_header_exists( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result { + self.block_header_dbs + .exists(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn get_transfers( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result>, BlockStoreError> { + Ok(self + .transfer_dbs + .get(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))? + .map(Transfers::into_owned)) + } + + pub(crate) fn has_transfers( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result { + self.transfer_dbs + .exists(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn read_state_store, Tx: lmdb::Transaction>( + &self, + txn: &Tx, + key: &K, + ) -> Result>, BlockStoreError> { + let bytes = match txn.get(self.state_store_db, &key) { + Ok(slice) => Some(slice.to_owned()), + Err(lmdb::Error::NotFound) => None, + Err(err) => return Err(BlockStoreError::InternalStorage(Box::new(err))), + }; + Ok(bytes) + } + + /// Retrieves approvals hashes by block hash. + pub(crate) fn read_approvals_hashes( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result, BlockStoreError> { + self.approvals_hashes_dbs + .get(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn approvals_hashes_exist( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result { + self.approvals_hashes_dbs + .exists(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Put a single transaction into storage. + pub(crate) fn write_transaction( + &self, + txn: &mut RwTransaction, + transaction: &Transaction, + ) -> Result { + let transaction_hash = transaction.hash(); + self.transaction_dbs + .put(txn, &transaction_hash, transaction, false) + .map(|_| transaction_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn delete_transaction( + &self, + txn: &mut RwTransaction, + transaction_hash: &TransactionHash, + ) -> Result<(), BlockStoreError> { + self.transaction_dbs + .delete(txn, transaction_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn write_transfers( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + transfers: &[Transfer], + ) -> Result { + self.transfer_dbs + .put( + txn, + block_hash, + &Transfers::from(transfers.to_owned()), + true, + ) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn delete_transfers( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + ) -> Result<(), BlockStoreError> { + self.transfer_dbs + .delete(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Writes a key to the state storage database. + // See note below why `key` and `data` are not `&[u8]`s. + pub(crate) fn write_state_store( + &self, + txn: &mut RwTransaction, + key: Cow<'static, [u8]>, + data: &Vec, + ) -> Result<(), BlockStoreError> { + // Note: The interface of `lmdb` seems suboptimal: `&K` and `&V` could simply be `&[u8]` for + // simplicity. At the very least it seems to be missing a `?Sized` trait bound. For + // this reason, we need to use actual sized types in the function signature above. + txn.put(self.state_store_db, &key, data, WriteFlags::default()) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(()) + } + + pub(crate) fn state_store_key_exists, Tx: lmdb::Transaction>( + &self, + txn: &Tx, + key: &K, + ) -> Result { + txn.value_exists(self.state_store_db, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn delete_state_store( + &self, + txn: &mut RwTransaction, + key: Cow<'static, [u8]>, + ) -> Result<(), BlockStoreError> { + txn.del(self.state_store_db, &key, None) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Retrieves a single block header in a given transaction from storage. + pub(crate) fn get_single_block_header( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result, BlockStoreError> { + let block_header = match self + .block_header_dbs + .get(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))? + { + Some(block_header) => block_header, + None => return Ok(None), + }; + block_header.set_block_hash(*block_hash); + Ok(Some(block_header)) + } + + /// Retrieves block signatures for a block with a given block hash. + pub(crate) fn get_block_signatures( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result, BlockStoreError> { + self.block_metadata_dbs + .get(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn block_signatures_exist( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result { + self.block_metadata_dbs + .exists(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Retrieves a single block from storage. + pub(crate) fn get_single_block( + &self, + txn: &Tx, + block_hash: &BlockHash, + ) -> Result, BlockStoreError> { + let block_header: BlockHeader = match self.get_single_block_header(txn, block_hash)? { + Some(block_header) => block_header, + None => { + debug!( + ?block_hash, + "get_single_block: missing block header for {}", block_hash + ); + return Ok(None); + } + }; + + let maybe_block_body = self + .block_body_dbs + .get(txn, block_header.body_hash()) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))); + let block_body = match maybe_block_body? { + Some(block_body) => block_body, + None => { + debug!( + ?block_header, + "get_single_block: missing block body for {}", + block_header.block_hash() + ); + return Ok(None); + } + }; + let block = Block::new_from_header_and_body(block_header, block_body) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + Ok(Some(block)) + } + + /// Writes a block to storage. + /// + /// Returns `Ok(true)` if the block has been successfully written, `Ok(false)` if a part of it + /// couldn't be written because it already existed, and `Err(_)` if there was an error. + pub(crate) fn write_block( + &self, + txn: &mut RwTransaction, + block: &Block, + ) -> Result { + let block_hash = *block.hash(); + let _ = self + .block_body_dbs + .put(txn, block.body_hash(), &block.clone_body(), true) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + let block_header = block.clone_header(); + let _ = self + .block_header_dbs + .put(txn, block.hash(), &block_header, true) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(block_hash) + } + + pub(crate) fn write_block_header( + &self, + txn: &mut RwTransaction, + block_header: &BlockHeader, + ) -> Result { + let block_hash = block_header.block_hash(); + self.block_header_dbs + .put(txn, &block_hash, block_header, true) + .map(|_| block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn delete_block_header( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + ) -> Result<(), BlockStoreError> { + self.block_header_dbs + .delete(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn delete_block_body( + &self, + txn: &mut RwTransaction, + block_body_hash: &Digest, + ) -> Result<(), BlockStoreError> { + self.block_body_dbs + .delete(txn, block_body_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + /// Writes approvals hashes to storage. + pub(crate) fn write_approvals_hashes( + &self, + txn: &mut RwTransaction, + approvals_hashes: &ApprovalsHashes, + ) -> Result { + let block_hash = approvals_hashes.block_hash(); + let _ = self + .approvals_hashes_dbs + .put(txn, block_hash, approvals_hashes, true) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + Ok(*block_hash) + } + + pub(crate) fn delete_approvals_hashes( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + ) -> Result<(), BlockStoreError> { + self.approvals_hashes_dbs + .delete(txn, block_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + pub(crate) fn write_execution_results( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + execution_results: HashMap, + ) -> Result { + let mut transfers: Vec = vec![]; + for (transaction_hash, execution_result) in execution_results.into_iter() { + transfers.extend(successful_transfers(&execution_result)); + + let maybe_stored_execution_result: Option = self + .checkout_ro() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))? + .read(transaction_hash)?; + + // If we have a previous execution result, we can continue if it is the same. + match maybe_stored_execution_result { + Some(stored_execution_result) if stored_execution_result == execution_result => { + continue + } + Some(_) | None => (), + } + + let was_written = self + .execution_result_dbs + .put(txn, &transaction_hash, &execution_result, true) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + if !was_written { + error!( + ?block_hash, + ?transaction_hash, + "failed to write execution results" + ); + debug_assert!(was_written); + } + } + + let was_written = self + .transfer_dbs + .put(txn, block_hash, &Transfers::from(transfers), true) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + if !was_written { + error!(?block_hash, "failed to write transfers"); + debug_assert!(was_written); + } + Ok(was_written) + } + + pub(crate) fn delete_execution_results( + &self, + txn: &mut RwTransaction, + block_hash: &BlockHash, + ) -> Result { + let block = self.get_single_block(txn, block_hash)?; + + if let Some(block) = block { + for txn_hash in block.all_transaction_hashes() { + self.execution_result_dbs + .delete(txn, &txn_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + } + } + Ok(true) + } +} + +pub(crate) fn new_environment( + total_size: usize, + root: &Path, +) -> Result { + Environment::new() + .set_flags( + OS_FLAGS + // We manage our own directory. + | EnvironmentFlags::NO_SUB_DIR + // Disable thread local storage, strongly suggested for operation with tokio. + | EnvironmentFlags::NO_TLS + // Disable read-ahead. Our data is not stored/read in sequence that would benefit from the read-ahead. + | EnvironmentFlags::NO_READAHEAD, + ) + .set_max_readers(MAX_TRANSACTIONS) + .set_max_dbs(MAX_DB_COUNT) + .set_map_size(total_size) + .open(&root.join(STORAGE_DB_FILENAME)) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) +} + +/// Returns all `Transform::WriteTransfer`s from the execution effects if this is an +/// `ExecutionResult::Success`, or an empty `Vec` if `ExecutionResult::Failure`. +fn successful_transfers(execution_result: &ExecutionResult) -> Vec { + let mut all_transfers: Vec = vec![]; + match execution_result { + ExecutionResult::V1(ExecutionResultV1::Success { effect, .. }) => { + for transform_entry in &effect.transforms { + if let execution_result_v1::TransformKindV1::WriteTransfer(transfer_v1) = + &transform_entry.transform + { + all_transfers.push(Transfer::V1(transfer_v1.clone())); + } + } + } + ExecutionResult::V2(execution_result_v2) => { + if execution_result_v2.error_message.is_none() { + for transfer in &execution_result_v2.transfers { + all_transfers.push(transfer.clone()); + } + } + // else no-op: we only record transfers from successful executions. + } + ExecutionResult::V1(ExecutionResultV1::Failure { .. }) => { + // No-op: we only record transfers from successful executions. + } + } + + all_transfers +} + +impl BlockStoreProvider for LmdbBlockStore { + type Reader<'t> = LmdbBlockStoreTransaction<'t, RoTransaction<'t>>; + type ReaderWriter<'t> = LmdbBlockStoreTransaction<'t, RwTransaction<'t>>; + + fn checkout_ro(&self) -> Result, BlockStoreError> { + let txn = self + .env + .begin_ro_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + Ok(LmdbBlockStoreTransaction { + txn, + block_store: self, + }) + } + + fn checkout_rw(&mut self) -> Result, BlockStoreError> { + let txn = self + .env + .begin_rw_txn() + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + + Ok(LmdbBlockStoreTransaction { + txn, + block_store: self, + }) + } +} + +pub struct LmdbBlockStoreTransaction<'t, T> +where + T: LmdbTransaction, +{ + txn: T, + block_store: &'t LmdbBlockStore, +} + +impl BlockStoreTransaction for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn commit(self) -> Result<(), BlockStoreError> { + self.txn + .commit() + .map_err(|e| BlockStoreError::InternalStorage(Box::new(LmdbExtError::from(e)))) + } + + fn rollback(self) { + self.txn.abort(); + } +} + +impl DataReader for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.get_single_block(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_exists(&self.txn, &key) + } +} + +impl DataReader for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.get_single_block_header(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_header_exists(&self.txn, &key) + } +} + +impl DataReader for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.read_approvals_hashes(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_header_exists(&self.txn, &key) + } +} + +impl DataReader for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: BlockHash) -> Result, BlockStoreError> { + self.block_store.get_block_signatures(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.block_signatures_exist(&self.txn, &key) + } +} + +impl DataReader for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: TransactionHash) -> Result, BlockStoreError> { + self.block_store + .transaction_dbs + .get(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: TransactionHash) -> Result { + self.block_store.transaction_exists(&self.txn, &key) + } +} + +impl DataReader> for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: TransactionHash) -> Result>, BlockStoreError> { + self.block_store + .finalized_transaction_approvals_dbs + .get(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: TransactionHash) -> Result { + self.block_store + .finalized_transaction_approvals_dbs + .exists(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataReader for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: TransactionHash) -> Result, BlockStoreError> { + self.block_store + .execution_result_dbs + .get(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn exists(&self, key: TransactionHash) -> Result { + self.block_store + .execution_result_dbs + .exists(&self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl DataReader> for LmdbBlockStoreTransaction<'_, T> +where + T: LmdbTransaction, +{ + fn read(&self, key: BlockHash) -> Result>, BlockStoreError> { + self.block_store.get_transfers(&self.txn, &key) + } + + fn exists(&self, key: BlockHash) -> Result { + self.block_store.has_transfers(&self.txn, &key) + } +} + +impl DataReader> for LmdbBlockStoreTransaction<'_, T> +where + K: AsRef<[u8]>, + T: LmdbTransaction, +{ + fn read(&self, key: K) -> Result>, BlockStoreError> { + self.block_store.read_state_store(&self.txn, &key) + } + + fn exists(&self, key: K) -> Result { + self.block_store.state_store_key_exists(&self.txn, &key) + } +} + +impl<'t> DataWriter for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> { + /// Writes a block to storage. + fn write(&mut self, data: &Block) -> Result { + self.block_store.write_block(&mut self.txn, data) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + let maybe_block = self.block_store.get_single_block_header(&self.txn, &key)?; + + if let Some(block_header) = maybe_block { + self.block_store.delete_block_header(&mut self.txn, &key)?; + self.block_store + .delete_block_body(&mut self.txn, block_header.body_hash())?; + } + Ok(()) + } +} + +impl<'t> DataWriter + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write(&mut self, data: &ApprovalsHashes) -> Result { + self.block_store.write_approvals_hashes(&mut self.txn, data) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store + .delete_approvals_hashes(&mut self.txn, &key) + } +} + +impl<'t> DataWriter + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write(&mut self, data: &BlockSignatures) -> Result { + self.block_store + .write_finality_signatures(&mut self.txn, data) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store + .delete_finality_signatures(&mut self.txn, &key) + } +} + +impl<'t> DataWriter for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> { + fn write(&mut self, data: &BlockHeader) -> Result { + self.block_store.write_block_header(&mut self.txn, data) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store.delete_block_header(&mut self.txn, &key) + } +} + +impl<'t> DataWriter + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write(&mut self, data: &Transaction) -> Result { + self.block_store.write_transaction(&mut self.txn, data) + } + + fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> { + self.block_store.delete_transaction(&mut self.txn, &key) + } +} + +impl<'t> DataWriter + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write(&mut self, data: &BlockTransfers) -> Result { + self.block_store + .write_transfers(&mut self.txn, &data.block_hash, &data.transfers) + .map(|_| data.block_hash) + } + + fn delete(&mut self, key: BlockHash) -> Result<(), BlockStoreError> { + self.block_store.delete_transfers(&mut self.txn, &key) + } +} + +impl<'t> DataWriter, StateStore> + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write(&mut self, data: &StateStore) -> Result, BlockStoreError> { + self.block_store + .write_state_store(&mut self.txn, data.key.clone(), &data.value)?; + Ok(data.key.clone()) + } + + fn delete(&mut self, key: Cow<'static, [u8]>) -> Result<(), BlockStoreError> { + self.block_store.delete_state_store(&mut self.txn, key) + } +} + +impl<'t> DataWriter + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write( + &mut self, + data: &TransactionFinalizedApprovals, + ) -> Result { + self.block_store + .finalized_transaction_approvals_dbs + .put( + &mut self.txn, + &data.transaction_hash, + &data.finalized_approvals, + true, + ) + .map(|_| data.transaction_hash) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } + + fn delete(&mut self, key: TransactionHash) -> Result<(), BlockStoreError> { + self.block_store + .finalized_transaction_approvals_dbs + .delete(&mut self.txn, &key) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err))) + } +} + +impl<'t> DataWriter + for LmdbBlockStoreTransaction<'t, RwTransaction<'t>> +{ + fn write( + &mut self, + data: &BlockExecutionResults, + ) -> Result { + let block_hash = data.block_info.block_hash; + + let _ = self.block_store.write_execution_results( + &mut self.txn, + &block_hash, + data.exec_results.clone(), + )?; + + Ok(data.block_info) + } + + fn delete(&mut self, key: BlockHashHeightAndEra) -> Result<(), BlockStoreError> { + let block_hash = key.block_hash; + + let _ = self + .block_store + .delete_execution_results(&mut self.txn, &block_hash)?; + Ok(()) + } +} diff --git a/storage/src/block_store/lmdb/lmdb_ext.rs b/storage/src/block_store/lmdb/lmdb_ext.rs new file mode 100644 index 0000000000..3f965f1270 --- /dev/null +++ b/storage/src/block_store/lmdb/lmdb_ext.rs @@ -0,0 +1,477 @@ +//! LMDB extensions. +//! +//! Various traits and helper functions to extend the lower level LMDB functions. Unifies +//! lower-level storage errors from lmdb and serialization issues. +//! +//! ## Serialization +//! +//! The module also centralizes settings and methods for serialization for all parts of storage. +//! +//! Serialization errors are unified into a generic, type erased `std` error to allow for easy +//! interchange of the serialization format if desired. + +use std::{any::TypeId, collections::BTreeSet}; + +use lmdb::{Database, RwTransaction, Transaction, WriteFlags}; +use serde::de::DeserializeOwned; +#[cfg(test)] +use serde::Serialize; +use thiserror::Error; +use tracing::{error, warn}; + +use crate::block_store::types::{ApprovalsHashes, DeployMetadataV1}; +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + execution::ExecutionResult, + system::auction::UnbondingPurse, + Approval, BlockBody, BlockHeader, BlockSignatures, Deploy, DeployHash, Transfer, +}; + +const UNBONDING_PURSE_V2_MAGIC_BYTES: &[u8] = &[121, 17, 133, 179, 91, 63, 69, 222]; + +/// Error wrapper for lower-level storage errors. +/// +/// Used to classify storage errors, allowing more accurate reporting on potential issues and +/// crashes. Indicates how to proceed (clearing storage entirely or just restarting) in most cases. +/// +/// Note that accessing a storage with an incompatible version of this software is also considered a +/// case of corruption. +#[derive(Debug, Error)] +pub enum LmdbExtError { + /// The internal database is corrupted and can probably not be salvaged. + #[error("internal storage corrupted: {0}")] + LmdbCorrupted(lmdb::Error), + /// The data stored inside the internal database is corrupted or formatted wrong. + #[error("internal data corrupted: {0}")] + DataCorrupted(Box), + /// A resource has been exhausted at runtime, restarting (potentially with different settings) + /// might fix the problem. Storage integrity is still intact. + #[error("storage exhausted resource (but still intact): {0}")] + ResourceExhausted(lmdb::Error), + /// Error neither corruption nor resource exhaustion occurred, likely a programming error. + #[error("unknown LMDB or serialization error, likely from a bug: {0}")] + Other(Box), +} + +#[derive(Debug, Error)] +#[error("{0}")] +pub struct BytesreprError(pub bytesrepr::Error); + +// Classifies an `lmdb::Error` according to our scheme. This one of the rare cases where we accept a +// blanked `From<>` implementation for error type conversion. +impl From for LmdbExtError { + fn from(lmdb_error: lmdb::Error) -> Self { + match lmdb_error { + lmdb::Error::PageNotFound + | lmdb::Error::Corrupted + | lmdb::Error::Panic + | lmdb::Error::VersionMismatch + | lmdb::Error::Invalid + | lmdb::Error::Incompatible => LmdbExtError::LmdbCorrupted(lmdb_error), + + lmdb::Error::MapFull + | lmdb::Error::DbsFull + | lmdb::Error::ReadersFull + | lmdb::Error::TlsFull + | lmdb::Error::TxnFull + | lmdb::Error::CursorFull + | lmdb::Error::PageFull + | lmdb::Error::MapResized => LmdbExtError::ResourceExhausted(lmdb_error), + + lmdb::Error::NotFound + | lmdb::Error::BadRslot + | lmdb::Error::BadTxn + | lmdb::Error::BadValSize + | lmdb::Error::BadDbi + | lmdb::Error::KeyExist + | lmdb::Error::Other(_) => LmdbExtError::Other(Box::new(lmdb_error)), + } + } +} + +/// Additional methods on transaction. +pub(super) trait TransactionExt { + /// Helper function to load a value from a database. + fn get_value, V: 'static + DeserializeOwned>( + &self, + db: Database, + key: &K, + ) -> Result, LmdbExtError>; + + /// Returns `true` if the given key has an entry in the given database. + fn value_exists>(&self, db: Database, key: &K) -> Result; + + /// Helper function to load a value from a database using the `bytesrepr` `ToBytes`/`FromBytes` + /// serialization. + fn get_value_bytesrepr( + &self, + db: Database, + key: &K, + ) -> Result, LmdbExtError>; + + fn value_exists_bytesrepr( + &self, + db: Database, + key: &K, + ) -> Result; +} + +/// Additional methods on write transactions. +pub(super) trait WriteTransactionExt { + /// Helper function to write a value to a database. + /// + /// Returns `true` if the value has actually been written, `false` if the key already existed. + /// + /// Setting `overwrite` to true will cause the value to always be written instead. + #[cfg(test)] + fn put_value, V: 'static + Serialize>( + &mut self, + db: Database, + key: &K, + value: &V, + overwrite: bool, + ) -> Result; + + /// Helper function to write a value to a database using the `bytesrepr` `ToBytes`/`FromBytes` + /// serialization. + /// + /// Returns `true` if the value has actually been written, `false` if the key already existed. + /// + /// Setting `overwrite` to true will cause the value to always be written instead. + fn put_value_bytesrepr( + &mut self, + db: Database, + key: &K, + value: &V, + overwrite: bool, + ) -> Result; +} + +impl TransactionExt for T +where + T: Transaction, +{ + #[inline] + fn get_value, V: 'static + DeserializeOwned>( + &self, + db: Database, + key: &K, + ) -> Result, LmdbExtError> { + match self.get(db, key) { + // Deserialization failures are likely due to storage corruption. + Ok(raw) => deserialize_internal(raw), + Err(lmdb::Error::NotFound) => Ok(None), + Err(err) => Err(err.into()), + } + } + + #[inline] + fn value_exists>(&self, db: Database, key: &K) -> Result { + match self.get(db, key) { + Ok(_raw) => Ok(true), + Err(lmdb::Error::NotFound) => Ok(false), + Err(err) => Err(err.into()), + } + } + + #[inline] + fn get_value_bytesrepr( + &self, + db: Database, + key: &K, + ) -> Result, LmdbExtError> { + let serialized_key = serialize_bytesrepr(key)?; + match self.get(db, &serialized_key) { + // Deserialization failures are likely due to storage corruption. + Ok(raw) => match deserialize_bytesrepr(raw) { + Ok(ret) => Ok(Some(ret)), + Err(err) => { + error!(%key, %err, raw_len = raw.len(), "get_value_bytesrepr deserialization"); + Err(err) + } + }, + Err(lmdb::Error::NotFound) => Ok(None), + Err(err) => Err(err.into()), + } + } + + #[inline] + fn value_exists_bytesrepr( + &self, + db: Database, + key: &K, + ) -> Result { + let serialized_key = serialize_bytesrepr(key)?; + match self.get(db, &serialized_key) { + Ok(_raw) => Ok(true), + Err(lmdb::Error::NotFound) => Ok(false), + Err(err) => Err(err.into()), + } + } +} + +/// Serializes `value` into the buffer. +/// In case the `value` is of the `UnbondingPurse` type it uses the specialized +/// function to provide compatibility with the legacy version of the `UnbondingPurse` struct. +/// See [`serialize_unbonding_purse`] for more details. +// TODO: Get rid of the 'static bound. +#[cfg(test)] +pub(crate) fn serialize_internal( + value: &V, +) -> Result, LmdbExtError> { + let buffer = if TypeId::of::() == TypeId::of::() { + serialize_unbonding_purse(value)? + } else { + serialize(value)? + }; + Ok(buffer) +} + +/// Deserializes an object from the raw bytes. +/// In case the expected object is of the `UnbondingPurse` type it uses the specialized +/// function to provide compatibility with the legacy version of the `UnbondingPurse` struct. +/// See [`deserialize_unbonding_purse`] for more details. +pub(crate) fn deserialize_internal( + raw: &[u8], +) -> Result, LmdbExtError> { + if TypeId::of::() == TypeId::of::() { + deserialize_unbonding_purse(raw).map(Some) + } else { + deserialize(raw).map(Some) + } +} + +impl WriteTransactionExt for RwTransaction<'_> { + #[cfg(test)] + fn put_value, V: 'static + Serialize>( + &mut self, + db: Database, + key: &K, + value: &V, + overwrite: bool, + ) -> Result { + let buffer = serialize_internal(value)?; + + let flags = if overwrite { + WriteFlags::empty() + } else { + WriteFlags::NO_OVERWRITE + }; + + match self.put(db, key, &buffer, flags) { + Ok(()) => Ok(true), + // If we did not add the value due to it already existing, just return `false`. + Err(lmdb::Error::KeyExist) => Ok(false), + Err(err) => Err(err.into()), + } + } + + fn put_value_bytesrepr( + &mut self, + db: Database, + key: &K, + value: &V, + overwrite: bool, + ) -> Result { + let serialized_key = serialize_bytesrepr(key)?; + let serialized_value = serialize_bytesrepr(value)?; + + let flags = if overwrite { + WriteFlags::empty() + } else { + WriteFlags::NO_OVERWRITE + }; + + match self.put(db, &serialized_key, &serialized_value, flags) { + Ok(()) => Ok(true), + // If we did not add the value due to it already existing, just return `false`. + Err(lmdb::Error::KeyExist) => Ok(false), + Err(err) => Err(err.into()), + } + } +} + +/// Deserializes from a buffer. +#[inline(always)] +pub(super) fn deserialize(raw: &[u8]) -> Result { + match bincode::deserialize(raw) { + Ok(value) => Ok(value), + Err(err) => { + // unfortunately, type_name is unstable + let type_name = { + if TypeId::of::() == TypeId::of::() { + "DeployMetadataV1".to_string() + } else if TypeId::of::() == TypeId::of::() { + "BlockHeader".to_string() + } else if TypeId::of::() == TypeId::of::() { + "BlockBody".to_string() + } else if TypeId::of::() == TypeId::of::() { + "BlockSignatures".to_string() + } else if TypeId::of::() == TypeId::of::() { + "DeployHash".to_string() + } else if TypeId::of::() == TypeId::of::() { + "Deploy".to_string() + } else if TypeId::of::() == TypeId::of::() { + "ApprovalsHashes".to_string() + } else if TypeId::of::>() == TypeId::of::() { + "BTreeSet".to_string() + } else if TypeId::of::() == TypeId::of::() { + "ExecutionResult".to_string() + } else if TypeId::of::>() == TypeId::of::() { + "Transfers".to_string() + } else { + format!("{:?}", TypeId::of::()) + } + }; + warn!(?err, ?raw, "{}: bincode deserialization failed", type_name); + Err(LmdbExtError::DataCorrupted(Box::new(err))) + } + } +} + +/// Returns `true` if the specified bytes represent the legacy version of `UnbondingPurse`. +fn is_legacy(raw: &[u8]) -> bool { + !raw.starts_with(UNBONDING_PURSE_V2_MAGIC_BYTES) +} + +/// Deserializes `UnbondingPurse` from a buffer. +/// To provide backward compatibility with the previous version of the `UnbondingPurse`, +/// it checks if the raw bytes stream begins with "magic bytes". If yes, the magic bytes are +/// stripped and the struct is deserialized as a new version. Otherwise, the raw bytes +/// are treated as bytes representing the legacy `UnbondingPurse` and deserialized accordingly. +/// In order for the latter scenario to work, the raw bytes stream is extended with +/// bytes that represent the `None` serialized with `bincode` - these bytes simulate +/// the existence of the `new_validator` field added to the `UnbondingPurse` struct. +pub(super) fn deserialize_unbonding_purse( + raw: &[u8], +) -> Result { + const BINCODE_ENCODED_NONE: [u8; 4] = [0; 4]; + if is_legacy(raw) { + deserialize(&[raw, &BINCODE_ENCODED_NONE].concat()) + } else { + deserialize(&raw[UNBONDING_PURSE_V2_MAGIC_BYTES.len()..]) + } +} + +/// Serializes into a buffer. +#[cfg(test)] +#[inline(always)] +pub(super) fn serialize(value: &T) -> Result, LmdbExtError> { + bincode::serialize(value).map_err(|err| LmdbExtError::Other(Box::new(err))) +} + +/// Serializes `UnbondingPurse` into a buffer. +/// To provide backward compatibility with the previous version of the `UnbondingPurse`, +/// the serialized bytes are prefixed with the "magic bytes", which will be used by the +/// deserialization routine to detect the version of the `UnbondingPurse` struct. +#[cfg(test)] +#[inline(always)] +pub(super) fn serialize_unbonding_purse(value: &T) -> Result, LmdbExtError> { + let mut serialized = UNBONDING_PURSE_V2_MAGIC_BYTES.to_vec(); + serialized.extend(bincode::serialize(value).map_err(|err| LmdbExtError::Other(Box::new(err)))?); + Ok(serialized) +} + +/// Deserializes from a buffer. +#[inline(always)] +pub(super) fn deserialize_bytesrepr(raw: &[u8]) -> Result { + match T::from_bytes(raw).map(|val| val.0) { + Ok(ret) => Ok(ret), + Err(err) => { + // unfortunately, type_name is unstable + let type_name = { + if TypeId::of::() == TypeId::of::() { + "DeployMetadataV1".to_string() + } else if TypeId::of::() == TypeId::of::() { + "BlockHeader".to_string() + } else if TypeId::of::() == TypeId::of::() { + "BlockBody".to_string() + } else if TypeId::of::() == TypeId::of::() { + "BlockSignatures".to_string() + } else if TypeId::of::() == TypeId::of::() { + "DeployHash".to_string() + } else if TypeId::of::() == TypeId::of::() { + "Deploy".to_string() + } else if TypeId::of::() == TypeId::of::() { + "ApprovalsHashes".to_string() + } else if TypeId::of::>() == TypeId::of::() { + "BTreeSet".to_string() + } else if TypeId::of::() == TypeId::of::() { + "ExecutionResult".to_string() + } else if TypeId::of::>() == TypeId::of::() { + "Transfers".to_string() + } else { + format!("{:?}", TypeId::of::()) + } + }; + error!("deserialize_bytesrepr failed to deserialize: {}", type_name); + Err(LmdbExtError::DataCorrupted(Box::new(BytesreprError(err)))) + } + } +} + +/// Serializes into a buffer. +#[inline(always)] +pub(super) fn serialize_bytesrepr(value: &T) -> Result, LmdbExtError> { + value + .to_bytes() + .map_err(|err| LmdbExtError::Other(Box::new(BytesreprError(err)))) +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::{AccessRights, EraId, PublicKey, SecretKey, URef, U512}; + + #[test] + fn should_read_legacy_unbonding_purse() { + // These bytes represent the `UnbondingPurse` struct with the `new_validator` field removed + // and serialized with `bincode`. + // In theory, we can generate these bytes by serializing the `WithdrawPurse`, but at some + // point, these two structs may diverge and it's a safe bet to rely on the bytes + // that are consistent with what we keep in the current storage. + const LEGACY_BYTES: &str = "0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e07010000002000000000000000197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610100000020000000000000004508a07aa941707f3eb2db94c8897a80b2c1197476b6de213ac273df7d86c4ffffffffffffffffff40feffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; + + let decoded = base16::decode(LEGACY_BYTES).expect("decode"); + let deserialized: UnbondingPurse = deserialize_internal(&decoded) + .expect("should deserialize w/o error") + .expect("should be Some"); + + // Make sure the new field is set to default. + assert_eq!(*deserialized.new_validator(), Option::default()) + } + + #[test] + fn unbonding_purse_serialization_roundtrip() { + let original = UnbondingPurse::new( + URef::new([14; 32], AccessRights::READ_ADD_WRITE), + { + let secret_key = + SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + }, + { + let secret_key = + SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + }, + EraId::MAX, + U512::max_value() - 1, + Some({ + let secret_key = + SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + }), + ); + + let serialized = serialize_internal(&original).expect("serialization"); + let deserialized: UnbondingPurse = deserialize_internal(&serialized) + .expect("should deserialize w/o error") + .expect("should be Some"); + + assert_eq!(original, deserialized); + + // Explicitly assert that the `new_validator` is not `None` + assert!(deserialized.new_validator().is_some()) + } +} diff --git a/storage/src/block_store/lmdb/mod.rs b/storage/src/block_store/lmdb/mod.rs new file mode 100644 index 0000000000..8c43d7b446 --- /dev/null +++ b/storage/src/block_store/lmdb/mod.rs @@ -0,0 +1,115 @@ +mod lmdb_ext; +mod temp_map; +mod versioned_databases; + +mod indexed_lmdb_block_store; +mod lmdb_block_store; + +use core::convert::TryFrom; +pub use indexed_lmdb_block_store::IndexedLmdbBlockStore; +pub use lmdb_block_store::LmdbBlockStore; + +#[cfg(test)] +use rand::Rng; +use serde::Serialize; + +#[cfg(test)] +use casper_types::testing::TestRng; + +/// An identifier of db tables. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Serialize)] +#[repr(u16)] +pub enum DbTableId { + /// Refers to `BlockHeader` db table. + BlockHeader = 0, + /// Refers to `BlockBody` db table. + BlockBody = 1, + /// Refers to `ApprovalsHashes` db table. + ApprovalsHashes = 2, + /// Refers to `BlockMetadata` db table. + BlockMetadata = 3, + /// Refers to `Transaction` db table. + Transaction = 4, + /// Refers to `ExecutionResult` db table. + ExecutionResult = 5, + /// Refers to `Transfer` db table. + Transfer = 6, + /// Refers to `FinalizedTransactionApprovals` db table. + FinalizedTransactionApprovals = 7, +} + +impl DbTableId { + /// Returns a random `DbTableId`. + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..8) { + 0 => DbTableId::BlockHeader, + 1 => DbTableId::BlockBody, + 2 => DbTableId::ApprovalsHashes, + 3 => DbTableId::BlockMetadata, + 4 => DbTableId::Transaction, + 5 => DbTableId::ExecutionResult, + 6 => DbTableId::Transfer, + 7 => DbTableId::FinalizedTransactionApprovals, + _ => unreachable!(), + } + } +} + +impl TryFrom for DbTableId { + type Error = UnknownDbTableId; + + fn try_from(value: u16) -> Result { + match value { + 0 => Ok(DbTableId::BlockHeader), + 1 => Ok(DbTableId::BlockBody), + 2 => Ok(DbTableId::ApprovalsHashes), + 3 => Ok(DbTableId::BlockMetadata), + 4 => Ok(DbTableId::Transaction), + 5 => Ok(DbTableId::ExecutionResult), + 6 => Ok(DbTableId::Transfer), + 7 => Ok(DbTableId::FinalizedTransactionApprovals), + _ => Err(UnknownDbTableId(value)), + } + } +} + +impl From for u16 { + fn from(value: DbTableId) -> Self { + value as u16 + } +} + +impl core::fmt::Display for DbTableId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + DbTableId::BlockHeader => write!(f, "BlockHeader"), + DbTableId::BlockBody => write!(f, "BlockBody"), + DbTableId::ApprovalsHashes => write!(f, "ApprovalsHashes"), + DbTableId::BlockMetadata => write!(f, "BlockMetadata"), + DbTableId::Transaction => write!(f, "Transaction"), + DbTableId::ExecutionResult => write!(f, "ExecutionResult"), + DbTableId::Transfer => write!(f, "Transfer"), + DbTableId::FinalizedTransactionApprovals => write!(f, "FinalizedTransactionApprovals"), + } + } +} + +/// Error returned when trying to convert a `u16` into a `DbTableId`. +#[derive(Debug, PartialEq, Eq)] +pub struct UnknownDbTableId(u16); + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::testing::TestRng; + + #[test] + fn tag_roundtrip() { + let rng = &mut TestRng::new(); + + let val = DbTableId::random(rng); + let tag = u16::from(val); + assert_eq!(DbTableId::try_from(tag), Ok(val)); + } +} diff --git a/storage/src/block_store/lmdb/temp_map.rs b/storage/src/block_store/lmdb/temp_map.rs new file mode 100644 index 0000000000..9e26afef9e --- /dev/null +++ b/storage/src/block_store/lmdb/temp_map.rs @@ -0,0 +1,70 @@ +use std::collections::BTreeMap; + +enum EntryState { + Deleted, + Occupied(V), +} + +/// A wrapper over a BTreeMap that stores changes to the backing map only temporarily. +/// The backing map will not be altered until the temporary changes are committed. +pub(crate) struct TempMap<'a, K, V: 'a> { + base_index: &'a mut BTreeMap, + new_index: BTreeMap>, +} + +impl<'a, K, V> TempMap<'a, K, V> +where + K: Ord, + V: 'a + Copy, +{ + /// Creates a new temporary map that is backed by a BTreeMap + pub(crate) fn new(base_index: &'a mut BTreeMap) -> Self { + Self { + base_index, + new_index: BTreeMap::>::new(), + } + } + + /// Reads the value contained in the map at the specified key. + pub(crate) fn get(&self, key: &K) -> Option { + if let Some(state) = self.new_index.get(key) { + match state { + EntryState::Occupied(val) => Some(*val), + EntryState::Deleted => None, + } + } else { + self.base_index.get(key).copied() + } + } + + /// Checks if a key exists in this map. + pub(crate) fn contains_key(&self, key: &K) -> bool { + if self.new_index.contains_key(key) { + true + } else { + self.base_index.contains_key(key) + } + } + + /// Sets the value at the specified key index. + pub(crate) fn insert(&mut self, key: K, val: V) { + self.new_index.insert(key, EntryState::Occupied(val)); + } + + /// Removes the value from the map. + pub(crate) fn remove(&mut self, key: K) { + if self.contains_key(&key) { + self.new_index.insert(key, EntryState::Deleted); + } + } + + /// Saves temporary changes to the backing map. + pub(crate) fn commit(self) { + for (key, val) in self.new_index { + match val { + EntryState::Occupied(val) => self.base_index.insert(key, val), + EntryState::Deleted => self.base_index.remove(&key), + }; + } + } +} diff --git a/storage/src/block_store/lmdb/versioned_databases.rs b/storage/src/block_store/lmdb/versioned_databases.rs new file mode 100644 index 0000000000..aecd297915 --- /dev/null +++ b/storage/src/block_store/lmdb/versioned_databases.rs @@ -0,0 +1,607 @@ +use datasize::DataSize; +use lmdb::{ + Cursor, Database, DatabaseFlags, Environment, RwCursor, RwTransaction, + Transaction as LmdbTransaction, +}; +use serde::de::DeserializeOwned; +#[cfg(test)] +use serde::Serialize; +use std::{collections::BTreeSet, marker::PhantomData}; +use tracing::error; + +use casper_types::{ + bytesrepr::{FromBytes, ToBytes}, + execution::ExecutionResult, + Approval, BlockBody, BlockBodyV1, BlockHash, BlockHeader, BlockHeaderV1, BlockSignatures, + BlockSignaturesV1, Deploy, DeployHash, Digest, Transaction, TransactionHash, TransferV1, +}; + +use super::{ + super::{ + error::BlockStoreError, + types::{ApprovalsHashes, DeployMetadataV1, LegacyApprovalsHashes, Transfers}, + DbRawBytesSpec, + }, + lmdb_ext::{self, LmdbExtError, TransactionExt, WriteTransactionExt}, +}; + +pub(crate) trait VersionedKey: ToBytes { + type Legacy: AsRef<[u8]>; + + fn legacy_key(&self) -> Option<&Self::Legacy>; +} + +pub(crate) trait VersionedValue: ToBytes + FromBytes { + type Legacy: 'static + DeserializeOwned + Into; +} + +impl VersionedKey for TransactionHash { + type Legacy = DeployHash; + + fn legacy_key(&self) -> Option<&Self::Legacy> { + match self { + TransactionHash::Deploy(deploy_hash) => Some(deploy_hash), + TransactionHash::V1(_) => None, + } + } +} + +impl VersionedKey for BlockHash { + type Legacy = BlockHash; + + fn legacy_key(&self) -> Option<&Self::Legacy> { + Some(self) + } +} + +impl VersionedKey for Digest { + type Legacy = Digest; + + fn legacy_key(&self) -> Option<&Self::Legacy> { + Some(self) + } +} + +impl VersionedValue for Transaction { + type Legacy = Deploy; +} + +impl VersionedValue for BlockHeader { + type Legacy = BlockHeaderV1; +} + +impl VersionedValue for BlockBody { + type Legacy = BlockBodyV1; +} + +impl VersionedValue for ApprovalsHashes { + type Legacy = LegacyApprovalsHashes; +} + +impl VersionedValue for ExecutionResult { + type Legacy = DeployMetadataV1; +} + +impl VersionedValue for BTreeSet { + type Legacy = BTreeSet; +} + +impl VersionedValue for BlockSignatures { + type Legacy = BlockSignaturesV1; +} + +impl VersionedValue for Transfers { + type Legacy = Vec; +} + +/// A pair of databases, one holding the original legacy form of the data, and the other holding the +/// new versioned, future-proof form of the data. +/// +/// Specific entries should generally not be repeated - they will either be held in the legacy or +/// the current DB, but not both. Data is not migrated from legacy to current, but newly-stored +/// data will always be written to the current DB, even if it is of the type `V::Legacy`. +/// +/// Exceptions to this can occur if a pre-existing legacy entry is re-stored, in which case there +/// will be a duplicated entry in the `legacy` and `current` DBs. This should not be a common +/// occurrence though. +#[derive(Eq, PartialEq, DataSize, Debug)] +pub(crate) struct VersionedDatabases { + /// Legacy form of the data, with the key as `K::Legacy` type (converted to bytes using + /// `AsRef<[u8]>`) and the value bincode-encoded. + #[data_size(skip)] + pub legacy: Database, + /// Current form of the data, with the key as `K` bytesrepr-encoded and the value as `V` also + /// bytesrepr-encoded. + #[data_size(skip)] + pub current: Database, + _phantom: PhantomData<(K, V)>, +} + +impl Clone for VersionedDatabases { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for VersionedDatabases {} + +impl VersionedDatabases +where + K: VersionedKey + std::fmt::Display, + V: VersionedValue + 'static, +{ + pub(super) fn new( + env: &Environment, + legacy_name: &str, + current_name: &str, + ) -> Result { + Ok(VersionedDatabases { + legacy: env.create_db(Some(legacy_name), DatabaseFlags::empty())?, + current: env.create_db(Some(current_name), DatabaseFlags::empty())?, + _phantom: PhantomData, + }) + } + + pub(super) fn put( + &self, + txn: &mut RwTransaction, + key: &K, + value: &V, + overwrite: bool, + ) -> Result { + txn.put_value_bytesrepr(self.current, key, value, overwrite) + } + + pub(super) fn get( + &self, + txn: &Tx, + key: &K, + ) -> Result, LmdbExtError> { + match txn.get_value_bytesrepr(self.current, key) { + Ok(Some(value)) => return Ok(Some(value)), + Ok(None) => { + // check legacy db + } + Err(err) => { + error!(%err, "versioned_database: failed to retrieve record from current db"); + return Err(err); + } + } + + let legacy_key = match key.legacy_key() { + Some(key) => key, + None => return Ok(None), + }; + + Ok(txn + .get_value::<_, V::Legacy>(self.legacy, legacy_key)? + .map(Into::into)) + } + + pub(super) fn get_raw( + &self, + txn: &Tx, + key: &[u8], + ) -> Result, LmdbExtError> { + if key.is_empty() { + return Ok(None); + } + let value = txn.get(self.current, &key); + match value { + Ok(raw_bytes) => Ok(Some(DbRawBytesSpec::new_current(raw_bytes))), + Err(lmdb::Error::NotFound) => { + let value = txn.get(self.legacy, &key); + match value { + Ok(raw_bytes) => Ok(Some(DbRawBytesSpec::new_legacy(raw_bytes))), + Err(lmdb::Error::NotFound) => Ok(None), + Err(err) => Err(err.into()), + } + } + Err(err) => Err(err.into()), + } + } + + pub(super) fn exists( + &self, + txn: &Tx, + key: &K, + ) -> Result { + if txn.value_exists_bytesrepr(self.current, key)? { + return Ok(true); + } + + let legacy_key = match key.legacy_key() { + Some(key) => key, + None => return Ok(false), + }; + + txn.value_exists(self.legacy, legacy_key) + } + + /// Deletes the value under `key` from both the current and legacy DBs. + /// + /// Returns `Ok` if the value is successfully deleted from either or both the DBs, or if the + /// value did not exist in either. + pub(super) fn delete(&self, txn: &mut RwTransaction, key: &K) -> Result<(), LmdbExtError> { + let serialized_key = lmdb_ext::serialize_bytesrepr(key)?; + let current_result = match txn.del(self.current, &serialized_key, None) { + Ok(_) | Err(lmdb::Error::NotFound) => Ok(()), + Err(error) => Err(error.into()), + }; + // Avoid returning early for the case where `current_result` is Ok, since some + // `VersionedDatabases` could possibly have the same entry in both DBs. + + let legacy_key = match key.legacy_key() { + Some(key) => key, + None => return current_result, + }; + + let legacy_result = match txn.del(self.legacy, legacy_key, None) { + Ok(_) | Err(lmdb::Error::NotFound) => Ok(()), + Err(error) => Err(error.into()), + }; + + match (current_result, legacy_result) { + (Err(error), _) => Err(error), + (_, Err(error)) => Err(error), + (Ok(_), Ok(_)) => Ok(()), + } + } + + /// Iterates every row in the current database, deserializing the value and calling `f` with the + /// cursor and the parsed value. + pub(super) fn for_each_value_in_current<'a, F>( + &self, + txn: &'a mut RwTransaction, + f: &mut F, + ) -> Result<(), BlockStoreError> + where + F: FnMut(&mut RwCursor<'a>, V) -> Result<(), BlockStoreError>, + { + let mut cursor = txn + .open_rw_cursor(self.current) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + for row in cursor.iter() { + let (_, raw_val) = + row.map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let value: V = lmdb_ext::deserialize_bytesrepr(raw_val) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + f(&mut cursor, value)?; + } + Ok(()) + } + + /// Iterates every row in the legacy database, deserializing the value and calling `f` with the + /// cursor and the parsed value. + pub(super) fn for_each_value_in_legacy<'a, F>( + &self, + txn: &'a mut RwTransaction, + f: &mut F, + ) -> Result<(), BlockStoreError> + where + F: FnMut(&mut RwCursor<'a>, V) -> Result<(), BlockStoreError>, + { + let mut cursor = txn + .open_rw_cursor(self.legacy) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + for row in cursor.iter() { + let (_, raw_val) = + row.map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + let value: V::Legacy = lmdb_ext::deserialize(raw_val) + .map_err(|err| BlockStoreError::InternalStorage(Box::new(err)))?; + f(&mut cursor, value.into())?; + } + Ok(()) + } + + /// Writes to the `legacy` database. + #[cfg(test)] + pub(super) fn put_legacy( + &self, + txn: &mut RwTransaction, + legacy_key: &K::Legacy, + legacy_value: &V::Legacy, + overwrite: bool, + ) -> bool + where + V::Legacy: Serialize, + { + txn.put_value(self.legacy, legacy_key, legacy_value, overwrite) + .expect("should put legacy value") + } +} + +#[cfg(test)] +mod tests { + use crate::block_store::lmdb::lmdb_block_store::new_environment; + use lmdb::WriteFlags; + use std::collections::HashMap; + + use tempfile::TempDir; + + use casper_types::testing::TestRng; + + use super::*; + + struct Fixture { + rng: TestRng, + env: Environment, + dbs: VersionedDatabases, + random_transactions: HashMap, + legacy_transactions: HashMap, + _data_dir: TempDir, + } + + impl Fixture { + fn new() -> Fixture { + let rng = TestRng::new(); + let data_dir = TempDir::new().expect("should create temp dir"); + let env = new_environment(1024 * 1024, data_dir.path()).unwrap(); + let dbs = VersionedDatabases::new(&env, "legacy", "current").unwrap(); + let mut fixture = Fixture { + rng, + env, + dbs, + random_transactions: HashMap::new(), + legacy_transactions: HashMap::new(), + _data_dir: data_dir, + }; + for _ in 0..3 { + let transaction = Transaction::random(&mut fixture.rng); + assert!(fixture + .random_transactions + .insert(transaction.hash(), transaction) + .is_none()); + let deploy = Deploy::random(&mut fixture.rng); + assert!(fixture + .legacy_transactions + .insert(*deploy.hash(), deploy) + .is_none()); + } + fixture + } + } + + #[test] + fn should_put() { + let fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + + // Should return `true` on first `put`. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + + // Should return `false` on duplicate `put` if not set to overwrite. + assert!(!fixture + .dbs + .put(&mut txn, transaction_hash, transaction, false) + .unwrap()); + + // Should return `true` on duplicate `put` if set to overwrite. + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + } + + #[test] + fn should_get() { + let mut fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap(); + + // Inject the deploy into the legacy DB and store the random transaction in the current DB. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + txn.commit().unwrap(); + + // Should get the deploy. + let txn = fixture.env.begin_ro_txn().unwrap(); + assert_eq!( + fixture + .dbs + .get(&txn, &TransactionHash::from(*deploy_hash)) + .unwrap(), + Some(Transaction::from(deploy.clone())) + ); + + // Should get the random transaction. + assert_eq!( + fixture.dbs.get(&txn, transaction_hash).unwrap(), + Some(transaction.clone()) + ); + + // Should return `Ok(None)` for non-existent data. + let random_hash = Transaction::random(&mut fixture.rng).hash(); + assert!(fixture.dbs.get(&txn, &random_hash).unwrap().is_none()); + } + + #[test] + fn should_exist() { + let mut fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap(); + + // Inject the deploy into the legacy DB and store the random transaction in the current DB. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + txn.commit().unwrap(); + + // The deploy should exist. + let txn = fixture.env.begin_ro_txn().unwrap(); + assert!(fixture + .dbs + .exists(&txn, &TransactionHash::from(*deploy_hash)) + .unwrap()); + + // The random transaction should exist. + assert!(fixture.dbs.exists(&txn, transaction_hash).unwrap()); + + // Random data should not exist. + let random_hash = Transaction::random(&mut fixture.rng).hash(); + assert!(!fixture.dbs.exists(&txn, &random_hash).unwrap()); + } + + #[test] + fn should_delete() { + let mut fixture = Fixture::new(); + let (transaction_hash, transaction) = fixture.random_transactions.iter().next().unwrap(); + let (deploy_hash, deploy) = fixture.legacy_transactions.iter().next().unwrap(); + + // Inject the deploy into the legacy DB and store the random transaction in the current DB. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + // Also store the legacy deploy in the `current` DB. While being an edge case, we still + // need to ensure that deleting removes both copies of the deploy. + assert!(fixture + .dbs + .put( + &mut txn, + &TransactionHash::from(*deploy_hash), + &Transaction::from(deploy.clone()), + true + ) + .unwrap()); + txn.commit().unwrap(); + + // Should delete the deploy. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + fixture + .dbs + .delete(&mut txn, &TransactionHash::from(*deploy_hash)) + .unwrap(); + assert!(!fixture + .dbs + .exists(&txn, &TransactionHash::from(*deploy_hash)) + .unwrap()); + + // Should delete the random transaction. + fixture.dbs.delete(&mut txn, transaction_hash).unwrap(); + assert!(!fixture.dbs.exists(&txn, transaction_hash).unwrap()); + + // Should report success when attempting to delete non-existent data. + let random_hash = Transaction::random(&mut fixture.rng).hash(); + fixture.dbs.delete(&mut txn, &random_hash).unwrap(); + } + + #[test] + fn should_iterate_current() { + let fixture = Fixture::new(); + + // Store all random transactions. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + for (transaction_hash, transaction) in fixture.random_transactions.iter() { + assert!(fixture + .dbs + .put(&mut txn, transaction_hash, transaction, true) + .unwrap()); + } + txn.commit().unwrap(); + + // Iterate `current`, deleting each cursor entry and gathering the visited values in a map. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + let mut visited = HashMap::new(); + let mut visitor = |cursor: &mut RwCursor, transaction: Transaction| { + cursor.del(WriteFlags::empty()).unwrap(); + let _ = visited.insert(transaction.hash(), transaction); + Ok(()) + }; + fixture + .dbs + .for_each_value_in_current(&mut txn, &mut visitor) + .unwrap(); + txn.commit().unwrap(); + + // Ensure all values were visited and the DB doesn't contain them any more. + assert_eq!(visited, fixture.random_transactions); + let txn = fixture.env.begin_ro_txn().unwrap(); + for transaction_hash in fixture.random_transactions.keys() { + assert!(!fixture.dbs.exists(&txn, transaction_hash).unwrap()); + } + + // Ensure a second run is a no-op. + let mut visitor = |_cursor: &mut RwCursor, _transaction: Transaction| { + panic!("should never get called"); + }; + let mut txn = fixture.env.begin_rw_txn().unwrap(); + fixture + .dbs + .for_each_value_in_current(&mut txn, &mut visitor) + .unwrap(); + } + + #[test] + fn should_iterate_legacy() { + let fixture = Fixture::new(); + + // Store all legacy transactions. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + for (deploy_hash, deploy) in fixture.legacy_transactions.iter() { + assert!(fixture.dbs.put_legacy(&mut txn, deploy_hash, deploy, true)); + } + txn.commit().unwrap(); + + // Iterate `legacy`, deleting each cursor entry and gathering the visited values in a map. + let mut txn = fixture.env.begin_rw_txn().unwrap(); + let mut visited = HashMap::new(); + let mut visitor = |cursor: &mut RwCursor, transaction: Transaction| { + cursor.del(WriteFlags::empty()).unwrap(); + match transaction { + Transaction::Deploy(deploy) => { + let _ = visited.insert(*deploy.hash(), deploy); + } + Transaction::V1(_) => unreachable!(), + } + Ok(()) + }; + fixture + .dbs + .for_each_value_in_legacy(&mut txn, &mut visitor) + .unwrap(); + txn.commit().unwrap(); + + // Ensure all values were visited and the DB doesn't contain them any more. + assert_eq!(visited, fixture.legacy_transactions); + let txn = fixture.env.begin_ro_txn().unwrap(); + for deploy_hash in fixture.legacy_transactions.keys() { + assert!(!fixture + .dbs + .exists(&txn, &TransactionHash::from(*deploy_hash)) + .unwrap()); + } + + // Ensure a second run is a no-op. + let mut visitor = |_cursor: &mut RwCursor, _transaction: Transaction| { + panic!("should never get called"); + }; + let mut txn = fixture.env.begin_rw_txn().unwrap(); + fixture + .dbs + .for_each_value_in_legacy(&mut txn, &mut visitor) + .unwrap(); + } + + #[test] + fn should_get_on_empty_key() { + let fixture = Fixture::new(); + let txn = fixture.env.begin_ro_txn().unwrap(); + let key = vec![]; + let res = fixture.dbs.get_raw(&txn, &key); + assert!(matches!(res, Ok(None))); + } +} diff --git a/storage/src/block_store/mod.rs b/storage/src/block_store/mod.rs new file mode 100644 index 0000000000..baa9ae7d50 --- /dev/null +++ b/storage/src/block_store/mod.rs @@ -0,0 +1,45 @@ +mod block_provider; +mod error; +/// Block store lmdb logic. +pub mod lmdb; +/// Block store types. +pub mod types; + +pub use block_provider::{BlockStoreProvider, BlockStoreTransaction, DataReader, DataWriter}; +pub use error::BlockStoreError; + +/// Stores raw bytes from the DB along with the flag indicating whether data come from legacy or +/// current version of the DB. +#[derive(Debug)] +pub struct DbRawBytesSpec { + is_legacy: bool, + raw_bytes: Vec, +} + +impl DbRawBytesSpec { + /// Creates a variant indicating that raw bytes are coming from the legacy database. + pub fn new_legacy(raw_bytes: &[u8]) -> Self { + Self { + is_legacy: true, + raw_bytes: raw_bytes.to_vec(), + } + } + + /// Creates a variant indicating that raw bytes are coming from the current database. + pub fn new_current(raw_bytes: &[u8]) -> Self { + Self { + is_legacy: false, + raw_bytes: raw_bytes.to_vec(), + } + } + + /// Is legacy? + pub fn is_legacy(&self) -> bool { + self.is_legacy + } + + /// Raw bytes. + pub fn into_raw_bytes(self) -> Vec { + self.raw_bytes + } +} diff --git a/storage/src/block_store/types/approvals_hashes.rs b/storage/src/block_store/types/approvals_hashes.rs new file mode 100644 index 0000000000..3398dfcf7f --- /dev/null +++ b/storage/src/block_store/types/approvals_hashes.rs @@ -0,0 +1,253 @@ +use std::{ + collections::BTreeMap, + fmt::{self, Debug, Display, Formatter}, +}; + +use datasize::DataSize; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use tracing::error; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + global_state::TrieMerkleProof, + ApprovalsHash, Block, BlockHash, BlockV1, BlockV2, DeployId, Digest, Key, StoredValue, + TransactionId, +}; + +use crate::global_state::trie_store::operations::compute_state_hash; + +pub(crate) const APPROVALS_CHECKSUM_NAME: &str = "approvals_checksum"; + +/// Returns the hash of the bytesrepr-encoded deploy_ids. +fn compute_approvals_checksum(txn_ids: Vec) -> Result { + let bytes = txn_ids.into_bytes()?; + Ok(Digest::hash(bytes)) +} + +/// The data which is gossiped by validators to non-validators upon creation of a new block. +#[derive(DataSize, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct ApprovalsHashes { + /// Hash of the block that contains deploys that are relevant to the approvals. + block_hash: BlockHash, + /// The set of all deploys' finalized approvals' hashes. + approvals_hashes: Vec, + /// The Merkle proof of the checksum registry containing the checksum of the finalized + /// approvals. + #[data_size(skip)] + merkle_proof_approvals: TrieMerkleProof, +} + +impl ApprovalsHashes { + /// Ctor. + pub fn new( + block_hash: BlockHash, + approvals_hashes: Vec, + merkle_proof_approvals: TrieMerkleProof, + ) -> Self { + Self { + block_hash, + approvals_hashes, + merkle_proof_approvals, + } + } + + /// Verify block. + pub fn verify(&self, block: &Block) -> Result<(), ApprovalsHashesValidationError> { + if *self.merkle_proof_approvals.key() != Key::ChecksumRegistry { + return Err(ApprovalsHashesValidationError::InvalidKeyType); + } + + let proof_state_root_hash = compute_state_hash(&self.merkle_proof_approvals) + .map_err(ApprovalsHashesValidationError::TrieMerkleProof)?; + + if proof_state_root_hash != *block.state_root_hash() { + return Err(ApprovalsHashesValidationError::StateRootHashMismatch { + proof_state_root_hash, + block_state_root_hash: *block.state_root_hash(), + }); + } + + let value_in_proof = self + .merkle_proof_approvals + .value() + .as_cl_value() + .and_then(|cl_value| cl_value.clone().into_t().ok()) + .and_then(|registry: BTreeMap| { + registry.get(APPROVALS_CHECKSUM_NAME).copied() + }) + .ok_or(ApprovalsHashesValidationError::InvalidChecksumRegistry)?; + + let computed_approvals_checksum = match block { + Block::V1(v1_block) => compute_legacy_approvals_checksum(self.deploy_ids(v1_block)?)?, + Block::V2(v2_block) => compute_approvals_checksum(self.transaction_ids(v2_block)?) + .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?, + }; + + if value_in_proof != computed_approvals_checksum { + return Err(ApprovalsHashesValidationError::ApprovalsChecksumMismatch { + computed_approvals_checksum, + value_in_proof, + }); + } + + Ok(()) + } + + /// Deploy ids. + pub(crate) fn deploy_ids( + &self, + v1_block: &BlockV1, + ) -> Result, ApprovalsHashesValidationError> { + let deploy_approvals_hashes = self.approvals_hashes.clone(); + Ok(v1_block + .deploy_and_transfer_hashes() + .zip(deploy_approvals_hashes) + .map(|(deploy_hash, deploy_approvals_hash)| { + DeployId::new(*deploy_hash, deploy_approvals_hash) + }) + .collect()) + } + + /// Transaction ids. + pub fn transaction_ids( + &self, + v2_block: &BlockV2, + ) -> Result, ApprovalsHashesValidationError> { + v2_block + .all_transactions() + .zip(self.approvals_hashes.clone()) + .map(|(txn_hash, txn_approvals_hash)| { + Ok(TransactionId::new(*txn_hash, txn_approvals_hash)) + }) + .collect() + } + + /// Block hash. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Approvals hashes. + pub fn approvals_hashes(&self) -> Vec { + self.approvals_hashes.clone() + } +} + +impl Display for ApprovalsHashes { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "approvals hashes for {}", self.block_hash()) + } +} + +impl ToBytes for ApprovalsHashes { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.approvals_hashes.write_bytes(writer)?; + self.merkle_proof_approvals.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.approvals_hashes.serialized_length() + + self.merkle_proof_approvals.serialized_length() + } +} + +impl FromBytes for ApprovalsHashes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (approvals_hashes, remainder) = Vec::::from_bytes(remainder)?; + let (merkle_proof_approvals, remainder) = + TrieMerkleProof::::from_bytes(remainder)?; + Ok(( + ApprovalsHashes { + block_hash, + approvals_hashes, + merkle_proof_approvals, + }, + remainder, + )) + } +} + +/// Returns the hash of the bytesrepr-encoded deploy_ids, as used until the `Block` enum became +/// available. +pub(crate) fn compute_legacy_approvals_checksum( + deploy_ids: Vec, +) -> Result { + let bytes = deploy_ids + .into_bytes() + .map_err(ApprovalsHashesValidationError::ApprovalsChecksum)?; + Ok(Digest::hash(bytes)) +} + +/// An error that can arise when validating `ApprovalsHashes`. +#[derive(Error, Debug, DataSize)] +#[non_exhaustive] +pub enum ApprovalsHashesValidationError { + /// The key provided in the proof is not a `Key::ChecksumRegistry`. + #[error("key provided in proof is not a Key::ChecksumRegistry")] + InvalidKeyType, + + /// An error while computing the state root hash implied by the Merkle proof. + #[error("failed to compute state root hash implied by proof")] + TrieMerkleProof(bytesrepr::Error), + + /// The state root hash implied by the Merkle proof doesn't match that in the block. + #[error("state root hash implied by the Merkle proof doesn't match that in the block")] + StateRootHashMismatch { + /// Proof state root hash. + proof_state_root_hash: Digest, + /// Block state root hash. + block_state_root_hash: Digest, + }, + + /// The value provided in the proof cannot be parsed to the checksum registry type. + #[error("value provided in the proof cannot be parsed to the checksum registry type")] + InvalidChecksumRegistry, + + /// An error while computing the checksum of the approvals. + #[error("failed to compute checksum of the approvals")] + ApprovalsChecksum(bytesrepr::Error), + + /// The approvals checksum provided doesn't match one calculated from the approvals. + #[error("provided approvals checksum doesn't match one calculated from the approvals")] + ApprovalsChecksumMismatch { + /// Computed approvals checksum. + computed_approvals_checksum: Digest, + /// Value in proof. + value_in_proof: Digest, + }, + + /// Variant mismatch. + #[error("mismatch in variants: {0:?}")] + #[data_size(skip)] + VariantMismatch(Box), +} + +/// Initial version of `ApprovalsHashes` prior to `casper-node` v2.0.0. +#[derive(Deserialize)] +pub(crate) struct LegacyApprovalsHashes { + block_hash: BlockHash, + approvals_hashes: Vec, + merkle_proof_approvals: TrieMerkleProof, +} + +impl From for ApprovalsHashes { + fn from( + LegacyApprovalsHashes { + block_hash, + approvals_hashes, + merkle_proof_approvals, + }: LegacyApprovalsHashes, + ) -> Self { + ApprovalsHashes::new(block_hash, approvals_hashes, merkle_proof_approvals) + } +} diff --git a/storage/src/block_store/types/block_hash_height_and_era.rs b/storage/src/block_store/types/block_hash_height_and_era.rs new file mode 100644 index 0000000000..8aa5e64431 --- /dev/null +++ b/storage/src/block_store/types/block_hash_height_and_era.rs @@ -0,0 +1,45 @@ +use datasize::DataSize; +#[cfg(test)] +use rand::Rng; + +#[cfg(test)] +use casper_types::testing::TestRng; +use casper_types::{BlockHash, BlockHashAndHeight, EraId}; + +/// Aggregates block identifying information. +#[derive(Clone, Copy, Debug, DataSize)] +pub struct BlockHashHeightAndEra { + /// Block hash. + pub block_hash: BlockHash, + /// Block height. + pub block_height: u64, + /// EraId + pub era_id: EraId, +} + +impl BlockHashHeightAndEra { + /// Creates a new [`BlockHashHeightAndEra`] from parts. + pub fn new(block_hash: BlockHash, block_height: u64, era_id: EraId) -> Self { + BlockHashHeightAndEra { + block_hash, + block_height, + era_id, + } + } + + /// Returns the block hash. + #[cfg(test)] + pub fn random(rng: &mut TestRng) -> Self { + Self { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + era_id: EraId::random(rng), + } + } +} + +impl From for BlockHashAndHeight { + fn from(bhhe: BlockHashHeightAndEra) -> Self { + BlockHashAndHeight::new(bhhe.block_hash, bhhe.block_height) + } +} diff --git a/storage/src/block_store/types/deploy_metadata_v1.rs b/storage/src/block_store/types/deploy_metadata_v1.rs new file mode 100644 index 0000000000..ec0645d2ec --- /dev/null +++ b/storage/src/block_store/types/deploy_metadata_v1.rs @@ -0,0 +1,32 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use casper_types::{ + execution::{ExecutionResult, ExecutionResultV1}, + BlockHash, +}; + +/// Version 1 metadata related to a single deploy prior to `casper-node` v2.0.0. +#[derive(Clone, Default, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub(crate) struct DeployMetadataV1 { + /// The hash of the single block containing the related deploy, along with the results of + /// executing it. + /// + /// Due to reasons, this was implemented as a map, despite the guarantee that there will only + /// ever be a single entry. + pub(super) execution_results: HashMap, +} + +impl From for ExecutionResult { + fn from(v1_results: DeployMetadataV1) -> Self { + let v1_result = v1_results + .execution_results + .into_iter() + .next() + // Safe to unwrap as it's guaranteed to contain exactly one entry. + .expect("must be exactly one result") + .1; + ExecutionResult::V1(v1_result) + } +} diff --git a/storage/src/block_store/types/mod.rs b/storage/src/block_store/types/mod.rs new file mode 100644 index 0000000000..96fbc90ab6 --- /dev/null +++ b/storage/src/block_store/types/mod.rs @@ -0,0 +1,79 @@ +mod approvals_hashes; +mod block_hash_height_and_era; +mod deploy_metadata_v1; +mod transfers; + +use std::{ + borrow::Cow, + collections::{BTreeSet, HashMap}, +}; + +pub use approvals_hashes::{ApprovalsHashes, ApprovalsHashesValidationError}; +pub use block_hash_height_and_era::BlockHashHeightAndEra; +use casper_types::{ + execution::ExecutionResult, Approval, Block, BlockHash, BlockHeader, TransactionHash, Transfer, +}; + +pub(crate) use approvals_hashes::LegacyApprovalsHashes; +pub(crate) use deploy_metadata_v1::DeployMetadataV1; +pub(in crate::block_store) use transfers::Transfers; + +/// Exeuction results. +pub type ExecutionResults = HashMap; + +/// Transaction finalized approvals. +pub struct TransactionFinalizedApprovals { + /// Transaction hash. + pub transaction_hash: TransactionHash, + /// Finalized approvals. + pub finalized_approvals: BTreeSet, +} + +/// Block execution results. +pub struct BlockExecutionResults { + /// Block info. + pub block_info: BlockHashHeightAndEra, + /// Execution results. + pub exec_results: ExecutionResults, +} + +/// Block transfers. +pub struct BlockTransfers { + /// Block hash. + pub block_hash: BlockHash, + /// Transfers. + pub transfers: Vec, +} + +/// State store. +pub struct StateStore { + /// Key. + pub key: Cow<'static, [u8]>, + /// Value. + pub value: Vec, +} + +/// State store key. +pub struct StateStoreKey(pub(super) Cow<'static, [u8]>); + +impl StateStoreKey { + /// Ctor. + pub fn new(key: Cow<'static, [u8]>) -> Self { + StateStoreKey(key) + } +} + +/// Block tip anchor. +pub struct Tip; + +/// Latest switch block anchor. +pub struct LatestSwitchBlock; + +/// Block height. +pub type BlockHeight = u64; + +/// Switch block header alias. +pub type SwitchBlockHeader = BlockHeader; + +/// Switch block alias. +pub type SwitchBlock = Block; diff --git a/storage/src/block_store/types/transfers.rs b/storage/src/block_store/types/transfers.rs new file mode 100644 index 0000000000..159146fa68 --- /dev/null +++ b/storage/src/block_store/types/transfers.rs @@ -0,0 +1,52 @@ +use serde::{Deserialize, Serialize}; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + Transfer, TransferV1, +}; + +/// A wrapped `Vec`, used as the value type in the `transfer_dbs`. +/// +/// It exists to allow the `impl From>` to be written, making the type suitable for +/// use as a parameter in a `VersionedDatabases`. +#[derive(Clone, Serialize, Deserialize, Debug, Default, PartialEq, Eq)] +pub(in crate::block_store) struct Transfers(Vec); + +impl Transfers { + pub(in crate::block_store) fn into_owned(self) -> Vec { + self.0 + } +} + +impl From> for Transfers { + fn from(v1_transfers: Vec) -> Self { + Transfers(v1_transfers.into_iter().map(Transfer::V1).collect()) + } +} + +impl From> for Transfers { + fn from(transfers: Vec) -> Self { + Transfers(transfers) + } +} + +impl ToBytes for Transfers { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for Transfers { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Vec::::from_bytes(bytes) + .map(|(transfers, remainder)| (Transfers(transfers), remainder)) + } +} diff --git a/storage/src/data_access_layer.rs b/storage/src/data_access_layer.rs new file mode 100644 index 0000000000..a34c16442e --- /dev/null +++ b/storage/src/data_access_layer.rs @@ -0,0 +1,201 @@ +use crate::global_state::{ + error::Error as GlobalStateError, + state::{CommitProvider, StateProvider}, +}; +use casper_types::{execution::Effects, Digest}; + +use crate::tracking_copy::TrackingCopy; + +mod addressable_entity; +/// Auction provider. +pub mod auction; +/// Balance provider. +pub mod balance; +mod balance_hold; +mod balance_identifier_purse; +/// Bids provider. +pub mod bids; +mod block_global; +/// Block rewards provider. +pub mod block_rewards; +mod contract; +mod entry_points; +/// Era validators provider. +pub mod era_validators; +mod execution_results_checksum; +mod fee; +mod flush; +/// Forced undelegate provider. +pub mod forced_undelegate; +mod genesis; +/// Handle fee provider. +pub mod handle_fee; +mod handle_refund; +mod key_prefix; +/// Message topics. +pub mod message_topics; +/// Mint provider. +pub mod mint; +/// Prefixed values provider. +pub mod prefixed_values; +mod protocol_upgrade; +/// Prune provider. +pub mod prune; +/// Query provider. +pub mod query; +mod round_seigniorage; +mod seigniorage_recipients; +/// Step provider. +pub mod step; +mod system_entity_registry; +/// Tagged values provider. +pub mod tagged_values; +mod total_supply; +mod trie; + +pub use addressable_entity::{AddressableEntityRequest, AddressableEntityResult}; +pub use auction::{AuctionMethod, BiddingRequest, BiddingResult}; +pub use balance::{ + BalanceHolds, BalanceHoldsWithProof, BalanceIdentifier, BalanceRequest, BalanceResult, + GasHoldBalanceHandling, ProofHandling, ProofsResult, +}; +pub use balance_hold::{ + BalanceHoldError, BalanceHoldKind, BalanceHoldMode, BalanceHoldRequest, BalanceHoldResult, + InsufficientBalanceHandling, +}; +pub use balance_identifier_purse::{BalanceIdentifierPurseRequest, BalanceIdentifierPurseResult}; +pub use bids::{BidsRequest, BidsResult}; +pub use block_global::{BlockGlobalKind, BlockGlobalRequest, BlockGlobalResult}; +pub use block_rewards::{BlockRewardsError, BlockRewardsRequest, BlockRewardsResult}; +pub use contract::{ContractRequest, ContractResult}; +pub use entry_points::{ + EntryPointExistsRequest, EntryPointExistsResult, EntryPointRequest, EntryPointResult, +}; +pub use era_validators::{EraValidatorsRequest, EraValidatorsResult}; +pub use execution_results_checksum::{ + ExecutionResultsChecksumRequest, ExecutionResultsChecksumResult, + EXECUTION_RESULTS_CHECKSUM_NAME, +}; +pub use fee::{FeeError, FeeRequest, FeeResult}; +pub use flush::{FlushRequest, FlushResult}; +pub use genesis::{GenesisRequest, GenesisResult}; +pub use handle_fee::{HandleFeeMode, HandleFeeRequest, HandleFeeResult}; +pub use handle_refund::{HandleRefundMode, HandleRefundRequest, HandleRefundResult}; +pub use key_prefix::KeyPrefix; +pub use message_topics::{MessageTopicsRequest, MessageTopicsResult}; +pub use mint::{TransferRequest, TransferResult}; +pub use protocol_upgrade::{ProtocolUpgradeRequest, ProtocolUpgradeResult}; +pub use prune::{PruneRequest, PruneResult}; +pub use query::{QueryRequest, QueryResult}; +pub use round_seigniorage::{RoundSeigniorageRateRequest, RoundSeigniorageRateResult}; +pub use seigniorage_recipients::{SeigniorageRecipientsRequest, SeigniorageRecipientsResult}; +pub use step::{EvictItem, RewardItem, SlashItem, StepError, StepRequest, StepResult}; +pub use system_entity_registry::{ + SystemEntityRegistryPayload, SystemEntityRegistryRequest, SystemEntityRegistryResult, + SystemEntityRegistrySelector, +}; +pub use total_supply::{TotalSupplyRequest, TotalSupplyResult}; +pub use trie::{PutTrieRequest, PutTrieResult, TrieElement, TrieRequest, TrieResult}; + +/// Anchor struct for block store functionality. +#[derive(Default, Copy, Clone)] +pub struct BlockStore(()); + +impl BlockStore { + /// Ctor. + pub fn new() -> Self { + BlockStore(()) + } +} + +/// Data access layer. +#[derive(Copy, Clone)] +pub struct DataAccessLayer { + /// Block store instance. + pub block_store: BlockStore, + /// Memoized state. + pub state: S, + /// Max query depth. + pub max_query_depth: u64, + /// Enable the addressable entity capability. + pub enable_addressable_entity: bool, +} + +impl DataAccessLayer { + /// Returns reference to current state of the data access layer. + pub fn state(&self) -> &S { + &self.state + } +} + +impl CommitProvider for DataAccessLayer +where + S: CommitProvider, +{ + fn commit_effects( + &self, + state_hash: Digest, + effects: Effects, + ) -> Result { + self.state.commit_effects(state_hash, effects) + } + + fn commit_values( + &self, + state_hash: Digest, + values_to_write: Vec<(casper_types::Key, casper_types::StoredValue)>, + keys_to_prune: std::collections::BTreeSet, + ) -> Result { + self.state + .commit_values(state_hash, values_to_write, keys_to_prune) + } +} + +impl StateProvider for DataAccessLayer +where + S: StateProvider, +{ + type Reader = S::Reader; + + fn flush(&self, request: FlushRequest) -> FlushResult { + self.state.flush(request) + } + + fn empty_root(&self) -> Digest { + self.state.empty_root() + } + + fn tracking_copy( + &self, + hash: Digest, + ) -> Result>, GlobalStateError> { + match self.state.checkout(hash)? { + Some(reader) => Ok(Some(TrackingCopy::new( + reader, + self.max_query_depth, + self.enable_addressable_entity, + ))), + None => Ok(None), + } + } + + fn checkout(&self, state_hash: Digest) -> Result, GlobalStateError> { + self.state.checkout(state_hash) + } + + fn trie(&self, request: TrieRequest) -> TrieResult { + self.state.trie(request) + } + + fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult { + self.state.put_trie(request) + } + + fn missing_children(&self, trie_raw: &[u8]) -> Result, GlobalStateError> { + self.state.missing_children(trie_raw) + } + + fn enable_entity(&self) -> bool { + self.state.enable_entity() + } +} diff --git a/storage/src/data_access_layer/addressable_entity.rs b/storage/src/data_access_layer/addressable_entity.rs new file mode 100644 index 0000000000..724251296c --- /dev/null +++ b/storage/src/data_access_layer/addressable_entity.rs @@ -0,0 +1,53 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{AddressableEntity, Digest, Key}; + +/// Represents a request to obtain an addressable entity. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AddressableEntityRequest { + state_hash: Digest, + key: Key, +} + +impl AddressableEntityRequest { + /// Creates new request. + pub fn new(state_hash: Digest, key: Key) -> Self { + AddressableEntityRequest { state_hash, key } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns key. + pub fn key(&self) -> Key { + self.key + } +} + +/// Represents a result of a `addressable_entity` request. +#[derive(Debug)] +pub enum AddressableEntityResult { + /// Invalid state root hash. + RootNotFound, + /// Value not found. + ValueNotFound(String), + /// Contains an addressable entity from global state. + Success { + /// An addressable entity. + entity: AddressableEntity, + }, + /// Failure. + Failure(TrackingCopyError), +} + +impl AddressableEntityResult { + /// Returns wrapped addressable entity if this represents a successful query result. + pub fn into_option(self) -> Option { + if let Self::Success { entity } = self { + Some(entity) + } else { + None + } + } +} diff --git a/storage/src/data_access_layer/auction.rs b/storage/src/data_access_layer/auction.rs new file mode 100644 index 0000000000..e0346d0dd4 --- /dev/null +++ b/storage/src/data_access_layer/auction.rs @@ -0,0 +1,452 @@ +use std::collections::BTreeSet; + +use serde::Serialize; +use thiserror::Error; +use tracing::error; + +use casper_types::{ + account::AccountHash, + bytesrepr::FromBytes, + execution::Effects, + system::{ + auction, + auction::{DelegationRate, DelegatorKind, Reservation}, + }, + CLTyped, CLValue, CLValueError, Chainspec, Digest, InitiatorAddr, ProtocolVersion, PublicKey, + RuntimeArgs, TransactionEntryPoint, TransactionHash, Transfer, URefAddr, U512, +}; + +use crate::{ + system::runtime_native::Config as NativeRuntimeConfig, tracking_copy::TrackingCopyError, +}; + +/// An error returned when constructing an [`AuctionMethod`]. +#[derive(Clone, Eq, PartialEq, Error, Serialize, Debug)] +pub enum AuctionMethodError { + /// Provided entry point is not one of the Auction ones. + #[error("invalid entry point for auction: {0}")] + InvalidEntryPoint(TransactionEntryPoint), + /// Required arg missing. + #[error("missing '{0}' arg")] + MissingArg(String), + /// Failed to parse the given arg. + #[error("failed to parse '{arg}' arg: {error}")] + CLValue { + /// The arg name. + arg: String, + /// The failure. + error: CLValueError, + }, +} + +/// Auction method to interact with. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum AuctionMethod { + /// Activate bid. + ActivateBid { + /// Validator public key (must match initiating address). + validator: PublicKey, + }, + /// Add bid. + AddBid { + /// Validator public key (must match initiating address). + public_key: PublicKey, + /// Delegation rate for this validator bid. + delegation_rate: DelegationRate, + /// Bid amount. + amount: U512, + /// Minimum delegation amount for this validator bid. + minimum_delegation_amount: u64, + /// Maximum delegation amount for this validator bid. + maximum_delegation_amount: u64, + /// The minimum bid amount a validator must submit to have + /// their bid considered as valid. + minimum_bid_amount: u64, + /// Number of delegator slots which can be reserved for specific delegators + reserved_slots: u32, + }, + /// Withdraw bid. + WithdrawBid { + /// Validator public key. + public_key: PublicKey, + /// Bid amount. + amount: U512, + /// The minimum bid amount a validator, if a validator reduces their stake + /// below this amount, then it is treated as a complete withdrawal. + minimum_bid_amount: u64, + }, + /// Delegate to validator. + Delegate { + /// Delegator public key. + delegator: DelegatorKind, + /// Validator public key. + validator: PublicKey, + /// Delegation amount. + amount: U512, + /// Max delegators per validator. + max_delegators_per_validator: u32, + }, + /// Undelegate from validator. + Undelegate { + /// Delegator public key. + delegator: DelegatorKind, + /// Validator public key. + validator: PublicKey, + /// Undelegation amount. + amount: U512, + }, + /// Undelegate from validator and attempt delegation to new validator after unbonding delay + /// elapses. + Redelegate { + /// Delegator public key. + delegator: DelegatorKind, + /// Validator public key. + validator: PublicKey, + /// Redelegation amount. + amount: U512, + /// New validator public key. + new_validator: PublicKey, + }, + /// Change the public key associated with a validator to a different public key. + ChangeBidPublicKey { + /// Current public key. + public_key: PublicKey, + /// New public key. + new_public_key: PublicKey, + }, + /// Add delegator slot reservations. + AddReservations { + /// List of reservations. + reservations: Vec, + }, + /// Remove delegator slot reservations for delegators with specified public keys. + CancelReservations { + /// Validator public key. + validator: PublicKey, + /// List of delegator public keys. + delegators: Vec, + /// Max delegators per validator. + max_delegators_per_validator: u32, + }, +} + +impl AuctionMethod { + /// Form auction method from parts. + pub fn from_parts( + entry_point: TransactionEntryPoint, + runtime_args: &RuntimeArgs, + chainspec: &Chainspec, + ) -> Result { + match entry_point { + TransactionEntryPoint::Call + | TransactionEntryPoint::Custom(_) + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn => { + Err(AuctionMethodError::InvalidEntryPoint(entry_point)) + } + TransactionEntryPoint::ActivateBid => Self::new_activate_bid(runtime_args), + TransactionEntryPoint::AddBid => Self::new_add_bid( + runtime_args, + chainspec.core_config.minimum_delegation_amount, + chainspec.core_config.maximum_delegation_amount, + chainspec.core_config.minimum_bid_amount, + ), + TransactionEntryPoint::WithdrawBid => { + Self::new_withdraw_bid(runtime_args, chainspec.core_config.minimum_bid_amount) + } + TransactionEntryPoint::Delegate => Self::new_delegate( + runtime_args, + chainspec.core_config.max_delegators_per_validator, + ), + TransactionEntryPoint::Undelegate => Self::new_undelegate(runtime_args), + TransactionEntryPoint::Redelegate => Self::new_redelegate(runtime_args), + TransactionEntryPoint::ChangeBidPublicKey => { + Self::new_change_bid_public_key(runtime_args) + } + TransactionEntryPoint::AddReservations => Self::new_add_reservations(runtime_args), + TransactionEntryPoint::CancelReservations => Self::new_cancel_reservations( + runtime_args, + chainspec.core_config.max_delegators_per_validator, + ), + } + } + + fn new_activate_bid(runtime_args: &RuntimeArgs) -> Result { + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + Ok(Self::ActivateBid { validator }) + } + + fn new_add_bid( + runtime_args: &RuntimeArgs, + global_minimum_delegation: u64, + global_maximum_delegation: u64, + global_minimum_bid_amount: u64, + ) -> Result { + let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?; + let delegation_rate = Self::get_named_argument(runtime_args, auction::ARG_DELEGATION_RATE)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + let minimum_delegation_amount = + Self::get_named_argument(runtime_args, auction::ARG_MINIMUM_DELEGATION_AMOUNT) + .unwrap_or(global_minimum_delegation); + let maximum_delegation_amount = + Self::get_named_argument(runtime_args, auction::ARG_MAXIMUM_DELEGATION_AMOUNT) + .unwrap_or(global_maximum_delegation); + let reserved_slots = + Self::get_named_argument(runtime_args, auction::ARG_RESERVED_SLOTS).unwrap_or(0); + + Ok(Self::AddBid { + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount: global_minimum_bid_amount, + reserved_slots, + }) + } + + fn new_withdraw_bid( + runtime_args: &RuntimeArgs, + global_minimum_bid_amount: u64, + ) -> Result { + let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + Ok(Self::WithdrawBid { + public_key, + amount, + minimum_bid_amount: global_minimum_bid_amount, + }) + } + + fn new_delegate( + runtime_args: &RuntimeArgs, + max_delegators_per_validator: u32, + ) -> Result { + let delegator = { + match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) { + Ok(pk) => DelegatorKind::PublicKey(pk), + Err(_) => { + let purse: URefAddr = + Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR_PURSE)?; + DelegatorKind::Purse(purse) + } + } + }; + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + + Ok(Self::Delegate { + delegator, + validator, + amount, + max_delegators_per_validator, + }) + } + + fn new_undelegate(runtime_args: &RuntimeArgs) -> Result { + let delegator = { + match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) { + Ok(pk) => DelegatorKind::PublicKey(pk), + Err(_) => { + let purse: URefAddr = + Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR_PURSE)?; + DelegatorKind::Purse(purse) + } + } + }; + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + + Ok(Self::Undelegate { + delegator, + validator, + amount, + }) + } + + fn new_redelegate(runtime_args: &RuntimeArgs) -> Result { + let delegator = { + match Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR) { + Ok(pk) => DelegatorKind::PublicKey(pk), + Err(_) => { + let purse: URefAddr = + Self::get_named_argument(runtime_args, auction::ARG_DELEGATOR_PURSE)?; + DelegatorKind::Purse(purse) + } + } + }; + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let amount = Self::get_named_argument(runtime_args, auction::ARG_AMOUNT)?; + let new_validator = Self::get_named_argument(runtime_args, auction::ARG_NEW_VALIDATOR)?; + + Ok(Self::Redelegate { + delegator, + validator, + amount, + new_validator, + }) + } + + fn new_change_bid_public_key(runtime_args: &RuntimeArgs) -> Result { + let public_key = Self::get_named_argument(runtime_args, auction::ARG_PUBLIC_KEY)?; + let new_public_key = Self::get_named_argument(runtime_args, auction::ARG_NEW_PUBLIC_KEY)?; + + Ok(Self::ChangeBidPublicKey { + public_key, + new_public_key, + }) + } + + fn new_add_reservations(runtime_args: &RuntimeArgs) -> Result { + let reservations = Self::get_named_argument(runtime_args, auction::ARG_RESERVATIONS)?; + + Ok(Self::AddReservations { reservations }) + } + + fn new_cancel_reservations( + runtime_args: &RuntimeArgs, + max_delegators_per_validator: u32, + ) -> Result { + let validator = Self::get_named_argument(runtime_args, auction::ARG_VALIDATOR)?; + let delegators = Self::get_named_argument(runtime_args, auction::ARG_DELEGATORS)?; + + Ok(Self::CancelReservations { + validator, + delegators, + max_delegators_per_validator, + }) + } + + fn get_named_argument( + args: &RuntimeArgs, + name: &str, + ) -> Result { + let arg: &CLValue = args + .get(name) + .ok_or_else(|| AuctionMethodError::MissingArg(name.to_string()))?; + arg.to_t().map_err(|error| AuctionMethodError::CLValue { + arg: name.to_string(), + error, + }) + } +} + +/// Bidding request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BiddingRequest { + /// The runtime config. + pub(crate) config: NativeRuntimeConfig, + /// State root hash. + pub(crate) state_hash: Digest, + /// The protocol version. + pub(crate) protocol_version: ProtocolVersion, + /// The auction method. + pub(crate) auction_method: AuctionMethod, + /// Transaction hash. + pub(crate) transaction_hash: TransactionHash, + /// Base account. + pub(crate) initiator: InitiatorAddr, + /// List of authorizing accounts. + pub(crate) authorization_keys: BTreeSet, +} + +impl BiddingRequest { + /// Creates new request instance with runtime args. + #[allow(clippy::too_many_arguments)] + pub fn new( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + auction_method: AuctionMethod, + ) -> Self { + Self { + config, + state_hash, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + auction_method, + } + } + + /// Returns the config. + pub fn config(&self) -> &NativeRuntimeConfig { + &self.config + } + + /// Returns the state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns the auction method. + pub fn auction_method(&self) -> &AuctionMethod { + &self.auction_method + } + + /// Returns the transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Returns the initiator. + pub fn initiator(&self) -> &InitiatorAddr { + &self.initiator + } + + /// Returns the authorization keys. + pub fn authorization_keys(&self) -> &BTreeSet { + &self.authorization_keys + } +} + +/// Auction method ret. +#[derive(Debug, Clone)] +pub enum AuctionMethodRet { + /// Unit. + Unit, + /// Updated amount. + UpdatedAmount(U512), +} + +/// Bidding result. +#[derive(Debug)] +pub enum BiddingResult { + /// Invalid state root hash. + RootNotFound, + /// Bidding request succeeded + Success { + /// Transfer records. + transfers: Vec, + /// Effects of bidding interaction. + effects: Effects, + /// The ret value, if any. + ret: AuctionMethodRet, + }, + /// Bidding request failed. + Failure(TrackingCopyError), +} + +impl BiddingResult { + /// Is this a success. + pub fn is_success(&self) -> bool { + matches!(self, BiddingResult::Success { .. }) + } + + /// Effects. + pub fn effects(&self) -> Effects { + match self { + BiddingResult::RootNotFound | BiddingResult::Failure(_) => Effects::new(), + BiddingResult::Success { effects, .. } => effects.clone(), + } + } +} diff --git a/storage/src/data_access_layer/balance.rs b/storage/src/data_access_layer/balance.rs new file mode 100644 index 0000000000..c2b0760ec1 --- /dev/null +++ b/storage/src/data_access_layer/balance.rs @@ -0,0 +1,796 @@ +//! Types for balance queries. +use casper_types::{ + account::AccountHash, + global_state::TrieMerkleProof, + system::{ + handle_payment::{ACCUMULATION_PURSE_KEY, PAYMENT_PURSE_KEY, REFUND_PURSE_KEY}, + mint::BalanceHoldAddrTag, + HANDLE_PAYMENT, + }, + AccessRights, BlockTime, Digest, EntityAddr, HoldBalanceHandling, InitiatorAddr, Key, + ProtocolVersion, PublicKey, StoredValue, TimeDiff, URef, URefAddr, U512, +}; +use itertools::Itertools; +use num_rational::Ratio; +use num_traits::CheckedMul; +use std::{ + collections::{btree_map::Entry, BTreeMap}, + fmt::{Display, Formatter}, +}; +use tracing::error; + +use crate::{ + global_state::state::StateReader, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt}, + TrackingCopy, +}; + +/// How to handle available balance inquiry? +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub enum BalanceHandling { + /// Ignore balance holds. + #[default] + Total, + /// Adjust for balance holds (if any). + Available, +} + +/// Merkle proof handling options. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub enum ProofHandling { + /// Do not attempt to provide proofs. + #[default] + NoProofs, + /// Provide proofs. + Proofs, +} + +/// Represents a way to make a balance inquiry. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BalanceIdentifier { + /// Use system refund purse (held by handle payment system contract). + Refund, + /// Use system payment purse (held by handle payment system contract). + Payment, + /// Use system accumulate purse (held by handle payment system contract). + Accumulate, + /// Use purse associated to specified uref. + Purse(URef), + /// Use main purse of entity derived from public key. + Public(PublicKey), + /// Use main purse of entity from account hash. + Account(AccountHash), + /// Use main purse of entity. + Entity(EntityAddr), + /// Use purse at Key::Purse(URefAddr). + Internal(URefAddr), + /// Penalized account identifier. + PenalizedAccount(AccountHash), + /// Penalized payment identifier. + PenalizedPayment, +} + +impl BalanceIdentifier { + /// Returns underlying uref addr from balance identifier, if any. + pub fn as_purse_addr(&self) -> Option { + match self { + BalanceIdentifier::Internal(addr) => Some(*addr), + BalanceIdentifier::Purse(uref) => Some(uref.addr()), + BalanceIdentifier::Public(_) + | BalanceIdentifier::Account(_) + | BalanceIdentifier::PenalizedAccount(_) + | BalanceIdentifier::PenalizedPayment + | BalanceIdentifier::Entity(_) + | BalanceIdentifier::Refund + | BalanceIdentifier::Payment + | BalanceIdentifier::Accumulate => None, + } + } + + /// Return purse_uref, if able. + pub fn purse_uref( + &self, + tc: &mut TrackingCopy, + protocol_version: ProtocolVersion, + ) -> Result + where + S: StateReader, + { + let purse_uref = match self { + BalanceIdentifier::Internal(addr) => URef::new(*addr, AccessRights::READ), + BalanceIdentifier::Purse(purse_uref) => *purse_uref, + BalanceIdentifier::Public(public_key) => { + let account_hash = public_key.to_account_hash(); + match tc.runtime_footprint_by_account_hash(protocol_version, account_hash) { + Ok((_, entity)) => entity + .main_purse() + .ok_or(TrackingCopyError::Authorization)?, + Err(tce) => return Err(tce), + } + } + BalanceIdentifier::Account(account_hash) + | BalanceIdentifier::PenalizedAccount(account_hash) => { + match tc.runtime_footprint_by_account_hash(protocol_version, *account_hash) { + Ok((_, entity)) => entity + .main_purse() + .ok_or(TrackingCopyError::Authorization)?, + Err(tce) => return Err(tce), + } + } + BalanceIdentifier::Entity(entity_addr) => { + match tc.runtime_footprint_by_entity_addr(*entity_addr) { + Ok(entity) => entity + .main_purse() + .ok_or(TrackingCopyError::Authorization)?, + Err(tce) => return Err(tce), + } + } + BalanceIdentifier::Refund => { + self.get_system_purse(tc, HANDLE_PAYMENT, REFUND_PURSE_KEY)? + } + BalanceIdentifier::Payment | BalanceIdentifier::PenalizedPayment => { + self.get_system_purse(tc, HANDLE_PAYMENT, PAYMENT_PURSE_KEY)? + } + BalanceIdentifier::Accumulate => { + self.get_system_purse(tc, HANDLE_PAYMENT, ACCUMULATION_PURSE_KEY)? + } + }; + Ok(purse_uref) + } + + fn get_system_purse( + &self, + tc: &mut TrackingCopy, + system_contract_name: &str, + named_key_name: &str, + ) -> Result + where + S: StateReader, + { + let system_contract_registry = tc.get_system_entity_registry()?; + + let entity_hash = system_contract_registry + .get(system_contract_name) + .ok_or_else(|| { + error!("Missing system handle payment contract hash"); + TrackingCopyError::MissingSystemContractHash(system_contract_name.to_string()) + })?; + + let named_keys = tc + .runtime_footprint_by_entity_addr(EntityAddr::System(*entity_hash))? + .take_named_keys(); + + let named_key = + named_keys + .get(named_key_name) + .ok_or(TrackingCopyError::NamedKeyNotFound( + named_key_name.to_string(), + ))?; + let uref = named_key + .as_uref() + .ok_or(TrackingCopyError::UnexpectedKeyVariant(*named_key))?; + Ok(*uref) + } + + /// Is this balance identifier for penalty? + pub fn is_penalty(&self) -> bool { + matches!( + self, + BalanceIdentifier::PenalizedAccount(_) | BalanceIdentifier::PenalizedPayment + ) + } +} + +impl Default for BalanceIdentifier { + fn default() -> Self { + BalanceIdentifier::Purse(URef::default()) + } +} + +impl From for BalanceIdentifier { + fn from(value: InitiatorAddr) -> Self { + match value { + InitiatorAddr::PublicKey(public_key) => BalanceIdentifier::Public(public_key), + InitiatorAddr::AccountHash(account_hash) => BalanceIdentifier::Account(account_hash), + } + } +} + +/// Processing hold balance handling. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub struct ProcessingHoldBalanceHandling {} + +impl ProcessingHoldBalanceHandling { + /// Returns new instance. + pub fn new() -> Self { + ProcessingHoldBalanceHandling::default() + } + + /// Returns handling. + pub fn handling(&self) -> HoldBalanceHandling { + HoldBalanceHandling::Accrued + } + + /// Returns true if handling is amortized. + pub fn is_amortized(&self) -> bool { + false + } + + /// Returns hold interval. + pub fn interval(&self) -> TimeDiff { + TimeDiff::default() + } +} + +impl From<(HoldBalanceHandling, u64)> for ProcessingHoldBalanceHandling { + fn from(_value: (HoldBalanceHandling, u64)) -> Self { + ProcessingHoldBalanceHandling::default() + } +} + +/// Gas hold balance handling. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub struct GasHoldBalanceHandling { + handling: HoldBalanceHandling, + interval: TimeDiff, +} + +impl GasHoldBalanceHandling { + /// Returns new instance. + pub fn new(handling: HoldBalanceHandling, interval: TimeDiff) -> Self { + GasHoldBalanceHandling { handling, interval } + } + + /// Returns handling. + pub fn handling(&self) -> HoldBalanceHandling { + self.handling + } + + /// Returns interval. + pub fn interval(&self) -> TimeDiff { + self.interval + } + + /// Returns true if handling is amortized. + pub fn is_amortized(&self) -> bool { + matches!(self.handling, HoldBalanceHandling::Amortized) + } +} + +impl From<(HoldBalanceHandling, TimeDiff)> for GasHoldBalanceHandling { + fn from(value: (HoldBalanceHandling, TimeDiff)) -> Self { + GasHoldBalanceHandling { + handling: value.0, + interval: value.1, + } + } +} + +impl From<(HoldBalanceHandling, u64)> for GasHoldBalanceHandling { + fn from(value: (HoldBalanceHandling, u64)) -> Self { + GasHoldBalanceHandling { + handling: value.0, + interval: TimeDiff::from_millis(value.1), + } + } +} + +/// Represents a balance request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BalanceRequest { + state_hash: Digest, + protocol_version: ProtocolVersion, + identifier: BalanceIdentifier, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, +} + +impl BalanceRequest { + /// Creates a new [`BalanceRequest`]. + pub fn new( + state_hash: Digest, + protocol_version: ProtocolVersion, + identifier: BalanceIdentifier, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, + ) -> Self { + BalanceRequest { + state_hash, + protocol_version, + identifier, + balance_handling, + proof_handling, + } + } + + /// Creates a new [`BalanceRequest`]. + pub fn from_purse( + state_hash: Digest, + protocol_version: ProtocolVersion, + purse_uref: URef, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, + ) -> Self { + BalanceRequest { + state_hash, + protocol_version, + identifier: BalanceIdentifier::Purse(purse_uref), + balance_handling, + proof_handling, + } + } + + /// Creates a new [`BalanceRequest`]. + pub fn from_public_key( + state_hash: Digest, + protocol_version: ProtocolVersion, + public_key: PublicKey, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, + ) -> Self { + BalanceRequest { + state_hash, + protocol_version, + identifier: BalanceIdentifier::Public(public_key), + balance_handling, + proof_handling, + } + } + + /// Creates a new [`BalanceRequest`]. + pub fn from_account_hash( + state_hash: Digest, + protocol_version: ProtocolVersion, + account_hash: AccountHash, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, + ) -> Self { + BalanceRequest { + state_hash, + protocol_version, + identifier: BalanceIdentifier::Account(account_hash), + balance_handling, + proof_handling, + } + } + + /// Creates a new [`BalanceRequest`]. + pub fn from_entity_addr( + state_hash: Digest, + protocol_version: ProtocolVersion, + entity_addr: EntityAddr, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, + ) -> Self { + BalanceRequest { + state_hash, + protocol_version, + identifier: BalanceIdentifier::Entity(entity_addr), + balance_handling, + proof_handling, + } + } + + /// Creates a new [`BalanceRequest`]. + pub fn from_internal( + state_hash: Digest, + protocol_version: ProtocolVersion, + balance_addr: URefAddr, + balance_handling: BalanceHandling, + proof_handling: ProofHandling, + ) -> Self { + BalanceRequest { + state_hash, + protocol_version, + identifier: BalanceIdentifier::Internal(balance_addr), + balance_handling, + proof_handling, + } + } + + /// Returns a state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns the identifier [`BalanceIdentifier`]. + pub fn identifier(&self) -> &BalanceIdentifier { + &self.identifier + } + + /// Returns the block time. + pub fn balance_handling(&self) -> BalanceHandling { + self.balance_handling + } + + /// Returns proof handling. + pub fn proof_handling(&self) -> ProofHandling { + self.proof_handling + } +} + +/// Available balance checker. +pub trait AvailableBalanceChecker { + /// Calculate and return available balance. + fn available_balance( + &self, + block_time: BlockTime, + total_balance: U512, + gas_hold_balance_handling: GasHoldBalanceHandling, + processing_hold_balance_handling: ProcessingHoldBalanceHandling, + ) -> Result { + if self.is_empty() { + return Ok(total_balance); + } + + let gas_held = match gas_hold_balance_handling.handling() { + HoldBalanceHandling::Accrued => self.accrued(BalanceHoldAddrTag::Gas), + HoldBalanceHandling::Amortized => { + let interval = gas_hold_balance_handling.interval(); + self.amortization(BalanceHoldAddrTag::Gas, block_time, interval)? + } + }; + + let processing_held = match processing_hold_balance_handling.handling() { + HoldBalanceHandling::Accrued => self.accrued(BalanceHoldAddrTag::Processing), + HoldBalanceHandling::Amortized => { + let interval = processing_hold_balance_handling.interval(); + self.amortization(BalanceHoldAddrTag::Processing, block_time, interval)? + } + }; + + let held = gas_held.saturating_add(processing_held); + + if held > total_balance { + return Ok(U512::zero()); + } + + debug_assert!( + total_balance >= held, + "it should not be possible to hold more than the total available" + ); + match total_balance.checked_sub(held) { + Some(available_balance) => Ok(available_balance), + None => { + error!(%held, %total_balance, "held amount exceeds total balance, which should never occur."); + Err(BalanceFailure::HeldExceedsTotal) + } + } + } + + /// Calculates amortization. + fn amortization( + &self, + hold_kind: BalanceHoldAddrTag, + block_time: BlockTime, + interval: TimeDiff, + ) -> Result { + let mut held = U512::zero(); + let block_time = block_time.value(); + let interval = interval.millis(); + + for (hold_created_time, holds) in self.holds(hold_kind) { + let hold_created_time = hold_created_time.value(); + if hold_created_time > block_time { + continue; + } + let expiry = hold_created_time.saturating_add(interval); + if block_time > expiry { + continue; + } + // total held amount + let held_ratio = Ratio::new_raw( + holds.values().copied().collect_vec().into_iter().sum(), + U512::one(), + ); + // remaining time + let remaining_time = U512::from(expiry.saturating_sub(block_time)); + // remaining time over total time + let ratio = Ratio::new_raw(remaining_time, U512::from(interval)); + /* + EXAMPLE: 1000 held for 24 hours + if 1 hours has elapsed, held amount = 1000 * (23/24) == 958 + if 2 hours has elapsed, held amount = 1000 * (22/24) == 916 + ... + if 23 hours has elapsed, held amount = 1000 * (1/24) == 41 + if 23.50 hours has elapsed, held amount = 1000 * (1/48) == 20 + if 23.75 hours has elapsed, held amount = 1000 * (1/96) == 10 + (54000 ms / 5184000 ms) + */ + match held_ratio.checked_mul(&ratio) { + Some(amortized) => held += amortized.to_integer(), + None => return Err(BalanceFailure::AmortizationFailure), + } + } + Ok(held) + } + + /// Return accrued amount. + fn accrued(&self, hold_kind: BalanceHoldAddrTag) -> U512; + + /// Return holds. + fn holds(&self, hold_kind: BalanceHoldAddrTag) -> BTreeMap; + + /// Return true if empty. + fn is_empty(&self) -> bool; +} + +/// Balance holds with Merkle proofs. +pub type BalanceHolds = BTreeMap; + +impl AvailableBalanceChecker for BTreeMap { + fn accrued(&self, hold_kind: BalanceHoldAddrTag) -> U512 { + self.values() + .filter_map(|holds| holds.get(&hold_kind).copied()) + .collect_vec() + .into_iter() + .sum() + } + + fn holds(&self, hold_kind: BalanceHoldAddrTag) -> BTreeMap { + let mut ret = BTreeMap::new(); + for (k, v) in self { + if let Some(hold) = v.get(&hold_kind) { + let mut inner = BTreeMap::new(); + inner.insert(hold_kind, *hold); + ret.insert(*k, inner); + } + } + ret + } + + fn is_empty(&self) -> bool { + self.is_empty() + } +} + +/// Balance holds with Merkle proofs. +pub type BalanceHoldsWithProof = + BTreeMap)>; + +impl AvailableBalanceChecker for BTreeMap { + fn accrued(&self, hold_kind: BalanceHoldAddrTag) -> U512 { + self.values() + .filter_map(|holds| holds.get(&hold_kind)) + .map(|(amount, _)| *amount) + .collect_vec() + .into_iter() + .sum() + } + + fn holds(&self, hold_kind: BalanceHoldAddrTag) -> BTreeMap { + let mut ret: BTreeMap = BTreeMap::new(); + for (block_time, holds_with_proof) in self { + let mut holds: BTreeMap = BTreeMap::new(); + for (addr, (held, _)) in holds_with_proof { + if addr == &hold_kind { + match holds.entry(*addr) { + Entry::Vacant(v) => v.insert(*held), + Entry::Occupied(mut o) => &mut o.insert(*held), + }; + } + } + if !holds.is_empty() { + match ret.entry(*block_time) { + Entry::Vacant(v) => v.insert(holds), + Entry::Occupied(mut o) => &mut o.insert(holds), + }; + } + } + ret + } + + fn is_empty(&self) -> bool { + self.is_empty() + } +} + +/// Proofs result. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ProofsResult { + /// Not requested. + NotRequested { + /// Any time-relevant active holds on the balance, without proofs. + balance_holds: BTreeMap, + }, + /// Proofs. + Proofs { + /// A proof that the given value is present in the Merkle trie. + total_balance_proof: Box>, + /// Any time-relevant active holds on the balance, with proofs.. + balance_holds: BTreeMap, + }, +} + +impl ProofsResult { + /// Returns total balance proof, if any. + pub fn total_balance_proof(&self) -> Option<&TrieMerkleProof> { + match self { + ProofsResult::NotRequested { .. } => None, + ProofsResult::Proofs { + total_balance_proof, + .. + } => Some(total_balance_proof), + } + } + + /// Returns balance holds, if any. + pub fn balance_holds_with_proof(&self) -> Option<&BTreeMap> { + match self { + ProofsResult::NotRequested { .. } => None, + ProofsResult::Proofs { balance_holds, .. } => Some(balance_holds), + } + } + + /// Returns balance holds, if any. + pub fn balance_holds(&self) -> Option<&BTreeMap> { + match self { + ProofsResult::NotRequested { balance_holds } => Some(balance_holds), + ProofsResult::Proofs { .. } => None, + } + } + + /// Returns the total held amount. + pub fn total_held_amount(&self) -> U512 { + match self { + ProofsResult::NotRequested { balance_holds } => balance_holds + .values() + .flat_map(|holds| holds.values().copied()) + .collect_vec() + .into_iter() + .sum(), + ProofsResult::Proofs { balance_holds, .. } => balance_holds + .values() + .flat_map(|holds| holds.values().map(|(v, _)| *v)) + .collect_vec() + .into_iter() + .sum(), + } + } + + /// Returns the available balance, calculated using imputed values. + #[allow(clippy::result_unit_err)] + pub fn available_balance( + &self, + block_time: BlockTime, + total_balance: U512, + gas_hold_balance_handling: GasHoldBalanceHandling, + processing_hold_balance_handling: ProcessingHoldBalanceHandling, + ) -> Result { + match self { + ProofsResult::NotRequested { balance_holds } => balance_holds.available_balance( + block_time, + total_balance, + gas_hold_balance_handling, + processing_hold_balance_handling, + ), + ProofsResult::Proofs { balance_holds, .. } => balance_holds.available_balance( + block_time, + total_balance, + gas_hold_balance_handling, + processing_hold_balance_handling, + ), + } + } +} + +/// Balance failure. +#[derive(Debug, Clone)] +pub enum BalanceFailure { + /// Failed to calculate amortization (checked multiplication). + AmortizationFailure, + /// Held amount exceeds total balance, which should never occur. + HeldExceedsTotal, +} + +impl Display for BalanceFailure { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BalanceFailure::AmortizationFailure => { + write!( + f, + "AmortizationFailure: failed to calculate amortization (checked multiplication)." + ) + } + BalanceFailure::HeldExceedsTotal => { + write!( + f, + "HeldExceedsTotal: held amount exceeds total balance, which should never occur." + ) + } + } + } +} + +/// Result enum that represents all possible outcomes of a balance request. +#[derive(Debug, Clone)] +pub enum BalanceResult { + /// Returned if a passed state root hash is not found. + RootNotFound, + /// A query returned a balance. + Success { + /// The purse address. + purse_addr: URefAddr, + /// The purses total balance, not considering holds. + total_balance: U512, + /// The available balance (total balance - sum of all active holds). + available_balance: U512, + /// Proofs result. + proofs_result: ProofsResult, + }, + /// Failure. + Failure(TrackingCopyError), +} + +impl BalanceResult { + /// Returns the purse address for a [`BalanceResult::Success`] variant. + pub fn purse_addr(&self) -> Option { + match self { + BalanceResult::Success { purse_addr, .. } => Some(*purse_addr), + _ => None, + } + } + + /// Returns the total balance for a [`BalanceResult::Success`] variant. + pub fn total_balance(&self) -> Option<&U512> { + match self { + BalanceResult::Success { total_balance, .. } => Some(total_balance), + _ => None, + } + } + + /// Returns the available balance for a [`BalanceResult::Success`] variant. + pub fn available_balance(&self) -> Option<&U512> { + match self { + BalanceResult::Success { + available_balance, .. + } => Some(available_balance), + _ => None, + } + } + + /// Returns the Merkle proofs, if any. + pub fn proofs_result(self) -> Option { + match self { + BalanceResult::Success { proofs_result, .. } => Some(proofs_result), + _ => None, + } + } + + /// Is the available balance sufficient to cover the cost? + pub fn is_sufficient(&self, cost: U512) -> bool { + match self { + BalanceResult::RootNotFound | BalanceResult::Failure(_) => false, + BalanceResult::Success { + available_balance, .. + } => available_balance >= &cost, + } + } + + /// Was the balance request successful? + pub fn is_success(&self) -> bool { + match self { + BalanceResult::RootNotFound | BalanceResult::Failure(_) => false, + BalanceResult::Success { .. } => true, + } + } + + /// Tracking copy error, if any. + pub fn error(&self) -> Option<&TrackingCopyError> { + match self { + BalanceResult::RootNotFound | BalanceResult::Success { .. } => None, + BalanceResult::Failure(err) => Some(err), + } + } +} + +impl From for BalanceResult { + fn from(tce: TrackingCopyError) -> Self { + BalanceResult::Failure(tce) + } +} diff --git a/storage/src/data_access_layer/balance_hold.rs b/storage/src/data_access_layer/balance_hold.rs new file mode 100644 index 0000000000..f25f6a6abe --- /dev/null +++ b/storage/src/data_access_layer/balance_hold.rs @@ -0,0 +1,372 @@ +use crate::{ + data_access_layer::{balance::BalanceFailure, BalanceIdentifier}, + tracking_copy::TrackingCopyError, +}; +use casper_types::{ + account::AccountHash, + execution::Effects, + system::mint::{BalanceHoldAddr, BalanceHoldAddrTag}, + Digest, ProtocolVersion, StoredValue, U512, +}; +use std::fmt::{Display, Formatter}; +use thiserror::Error; + +/// Balance hold kind. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub enum BalanceHoldKind { + /// All balance holds. + #[default] + All, + /// Selection of a specific kind of balance. + Tag(BalanceHoldAddrTag), +} + +impl BalanceHoldKind { + /// Returns true of imputed tag applies to instance. + pub fn matches(&self, balance_hold_addr_tag: BalanceHoldAddrTag) -> bool { + match self { + BalanceHoldKind::All => true, + BalanceHoldKind::Tag(tag) => tag == &balance_hold_addr_tag, + } + } +} + +/// Balance hold mode. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BalanceHoldMode { + /// Balance hold request. + Hold { + /// Balance identifier. + identifier: BalanceIdentifier, + /// Hold amount. + hold_amount: U512, + /// How should insufficient balance be handled. + insufficient_handling: InsufficientBalanceHandling, + }, + /// Clear balance holds. + Clear { + /// Identifier of balance to be cleared of holds. + identifier: BalanceIdentifier, + }, +} + +impl Default for BalanceHoldMode { + fn default() -> Self { + BalanceHoldMode::Hold { + insufficient_handling: InsufficientBalanceHandling::HoldRemaining, + hold_amount: U512::zero(), + identifier: BalanceIdentifier::Account(AccountHash::default()), + } + } +} + +/// How to handle available balance is less than hold amount? +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub enum InsufficientBalanceHandling { + /// Hold however much balance remains. + #[default] + HoldRemaining, + /// No operation. Aka, do not place a hold. + Noop, +} + +/// Balance hold request. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct BalanceHoldRequest { + state_hash: Digest, + protocol_version: ProtocolVersion, + hold_kind: BalanceHoldKind, + hold_mode: BalanceHoldMode, +} + +impl BalanceHoldRequest { + /// Creates a new [`BalanceHoldRequest`] for adding a gas balance hold. + #[allow(clippy::too_many_arguments)] + pub fn new_gas_hold( + state_hash: Digest, + protocol_version: ProtocolVersion, + identifier: BalanceIdentifier, + hold_amount: U512, + insufficient_handling: InsufficientBalanceHandling, + ) -> Self { + let hold_kind = BalanceHoldKind::Tag(BalanceHoldAddrTag::Gas); + let hold_mode = BalanceHoldMode::Hold { + identifier, + hold_amount, + insufficient_handling, + }; + BalanceHoldRequest { + state_hash, + protocol_version, + hold_kind, + hold_mode, + } + } + + /// Creates a new [`BalanceHoldRequest`] for adding a processing balance hold. + #[allow(clippy::too_many_arguments)] + pub fn new_processing_hold( + state_hash: Digest, + protocol_version: ProtocolVersion, + identifier: BalanceIdentifier, + hold_amount: U512, + insufficient_handling: InsufficientBalanceHandling, + ) -> Self { + let hold_kind = BalanceHoldKind::Tag(BalanceHoldAddrTag::Processing); + let hold_mode = BalanceHoldMode::Hold { + identifier, + hold_amount, + insufficient_handling, + }; + BalanceHoldRequest { + state_hash, + protocol_version, + hold_kind, + hold_mode, + } + } + + /// Creates a new [`BalanceHoldRequest`] for clearing holds. + pub fn new_clear( + state_hash: Digest, + protocol_version: ProtocolVersion, + hold_kind: BalanceHoldKind, + identifier: BalanceIdentifier, + ) -> Self { + let hold_mode = BalanceHoldMode::Clear { identifier }; + BalanceHoldRequest { + state_hash, + protocol_version, + hold_kind, + hold_mode, + } + } + + /// Returns a state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Balance hold kind. + pub fn balance_hold_kind(&self) -> BalanceHoldKind { + self.hold_kind + } + + /// Balance hold mode. + pub fn balance_hold_mode(&self) -> BalanceHoldMode { + self.hold_mode.clone() + } +} + +/// Possible balance hold errors. +#[derive(Error, Debug, Clone)] +#[non_exhaustive] +pub enum BalanceHoldError { + /// Tracking copy error. + TrackingCopy(TrackingCopyError), + /// Balance error. + Balance(BalanceFailure), + /// Insufficient balance error. + InsufficientBalance { + /// Remaining balance error. + remaining_balance: U512, + }, + /// Unexpected wildcard variant error. + UnexpectedWildcardVariant, // programmer error, + /// Unexpected hold value error. + UnexpectedHoldValue(StoredValue), +} + +impl From for BalanceHoldError { + fn from(be: BalanceFailure) -> Self { + BalanceHoldError::Balance(be) + } +} + +impl From for BalanceHoldError { + fn from(tce: TrackingCopyError) -> Self { + BalanceHoldError::TrackingCopy(tce) + } +} + +impl Display for BalanceHoldError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BalanceHoldError::TrackingCopy(err) => { + write!(f, "TrackingCopy: {:?}", err) + } + BalanceHoldError::InsufficientBalance { remaining_balance } => { + write!(f, "InsufficientBalance: {}", remaining_balance) + } + BalanceHoldError::UnexpectedWildcardVariant => { + write!( + f, + "UnexpectedWildcardVariant: unsupported use of BalanceHoldKind::All" + ) + } + BalanceHoldError::Balance(be) => Display::fmt(be, f), + BalanceHoldError::UnexpectedHoldValue(value) => { + write!(f, "Found an unexpected hold value in storage: {:?}", value,) + } + } + } +} + +/// Result enum that represents all possible outcomes of a balance hold request. +#[derive(Debug)] +pub enum BalanceHoldResult { + /// Returned if a passed state root hash is not found. + RootNotFound, + /// Returned if global state does not have an entry for block time. + BlockTimeNotFound, + /// Balance hold successfully placed. + Success { + /// Hold addresses, if any. + holds: Option>, + /// Purse total balance. + total_balance: Box, + /// Purse available balance after hold placed. + available_balance: Box, + /// How much were we supposed to hold? + hold: Box, + /// How much did we actually hold? + held: Box, + /// Effects of bidding interaction. + effects: Box, + }, + /// Failed to place balance hold. + Failure(BalanceHoldError), +} + +impl BalanceHoldResult { + /// Success ctor. + pub fn success( + holds: Option>, + total_balance: U512, + available_balance: U512, + hold: U512, + held: U512, + effects: Effects, + ) -> Self { + BalanceHoldResult::Success { + holds, + total_balance: Box::new(total_balance), + available_balance: Box::new(available_balance), + hold: Box::new(hold), + held: Box::new(held), + effects: Box::new(effects), + } + } + + /// Returns the total balance for a [`BalanceHoldResult::Success`] variant. + pub fn total_balance(&self) -> Option<&U512> { + match self { + BalanceHoldResult::Success { total_balance, .. } => Some(total_balance), + _ => None, + } + } + + /// Returns the available balance for a [`BalanceHoldResult::Success`] variant. + pub fn available_balance(&self) -> Option<&U512> { + match self { + BalanceHoldResult::Success { + available_balance, .. + } => Some(available_balance), + _ => None, + } + } + + /// Returns the held amount for a [`BalanceHoldResult::Success`] variant. + pub fn held(&self) -> Option<&U512> { + match self { + BalanceHoldResult::Success { held, .. } => Some(held), + _ => None, + } + } + + /// Hold address, if any. + pub fn holds(&self) -> Option> { + match self { + BalanceHoldResult::RootNotFound + | BalanceHoldResult::BlockTimeNotFound + | BalanceHoldResult::Failure(_) => None, + BalanceHoldResult::Success { holds, .. } => holds.clone(), + } + } + + /// Does this result contain any hold addresses? + pub fn has_holds(&self) -> bool { + match self.holds() { + None => false, + Some(holds) => !holds.is_empty(), + } + } + + /// Was the hold fully covered? + pub fn is_fully_covered(&self) -> bool { + match self { + BalanceHoldResult::RootNotFound + | BalanceHoldResult::BlockTimeNotFound + | BalanceHoldResult::Failure(_) => false, + BalanceHoldResult::Success { hold, held, .. } => hold == held, + } + } + + /// Was the hold successful? + pub fn is_success(&self) -> bool { + matches!(self, BalanceHoldResult::Success { .. }) + } + + /// Was the root not found? + pub fn is_root_not_found(&self) -> bool { + matches!(self, BalanceHoldResult::RootNotFound) + } + + /// The effects, if any. + pub fn effects(&self) -> Effects { + match self { + BalanceHoldResult::RootNotFound + | BalanceHoldResult::BlockTimeNotFound + | BalanceHoldResult::Failure(_) => Effects::new(), + BalanceHoldResult::Success { effects, .. } => *effects.clone(), + } + } + + /// Error message. + pub fn error_message(&self) -> String { + match self { + BalanceHoldResult::Success { hold, held, .. } => { + if hold == held { + String::default() + } else { + format!( + "insufficient balance to cover hold amount: {}, held remaining amount: {}", + hold, held + ) + } + } + BalanceHoldResult::RootNotFound => "root not found".to_string(), + BalanceHoldResult::BlockTimeNotFound => "block time not found".to_string(), + BalanceHoldResult::Failure(bhe) => { + format!("{:?}", bhe) + } + } + } +} + +impl From for BalanceHoldResult { + fn from(be: BalanceFailure) -> Self { + BalanceHoldResult::Failure(be.into()) + } +} + +impl From for BalanceHoldResult { + fn from(tce: TrackingCopyError) -> Self { + BalanceHoldResult::Failure(tce.into()) + } +} diff --git a/storage/src/data_access_layer/balance_identifier_purse.rs b/storage/src/data_access_layer/balance_identifier_purse.rs new file mode 100644 index 0000000000..2f04012dfd --- /dev/null +++ b/storage/src/data_access_layer/balance_identifier_purse.rs @@ -0,0 +1,88 @@ +use crate::{data_access_layer::BalanceIdentifier, tracking_copy::TrackingCopyError}; +use casper_types::{Digest, ProtocolVersion, URefAddr}; + +/// Represents a balance identifier purse request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BalanceIdentifierPurseRequest { + state_hash: Digest, + protocol_version: ProtocolVersion, + identifier: BalanceIdentifier, +} + +impl BalanceIdentifierPurseRequest { + /// Creates a new [`BalanceIdentifierPurseRequest`]. + pub fn new( + state_hash: Digest, + protocol_version: ProtocolVersion, + identifier: BalanceIdentifier, + ) -> Self { + BalanceIdentifierPurseRequest { + state_hash, + protocol_version, + identifier, + } + } + + /// Returns a state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns the identifier [`BalanceIdentifier`]. + pub fn identifier(&self) -> &BalanceIdentifier { + &self.identifier + } +} + +/// Result enum that represents all possible outcomes of a balance request. +#[derive(Debug, Clone)] +pub enum BalanceIdentifierPurseResult { + /// Returned if a passed state root hash is not found. + RootNotFound, + /// A query returned a balance. + Success { + /// The purse address. + purse_addr: URefAddr, + }, + /// Failure. + Failure(TrackingCopyError), +} + +impl BalanceIdentifierPurseResult { + /// Returns the purse address for a [`BalanceIdentifierPurseResult::Success`] variant. + pub fn purse_addr(&self) -> Option { + match self { + BalanceIdentifierPurseResult::Success { purse_addr, .. } => Some(*purse_addr), + _ => None, + } + } + + /// Was the balance request successful? + pub fn is_success(&self) -> bool { + match self { + BalanceIdentifierPurseResult::RootNotFound + | BalanceIdentifierPurseResult::Failure(_) => false, + BalanceIdentifierPurseResult::Success { .. } => true, + } + } + + /// Tracking copy error, if any. + pub fn error(&self) -> Option<&TrackingCopyError> { + match self { + BalanceIdentifierPurseResult::RootNotFound + | BalanceIdentifierPurseResult::Success { .. } => None, + BalanceIdentifierPurseResult::Failure(err) => Some(err), + } + } +} + +impl From for BalanceIdentifierPurseResult { + fn from(tce: TrackingCopyError) -> Self { + BalanceIdentifierPurseResult::Failure(tce) + } +} diff --git a/storage/src/data_access_layer/bids.rs b/storage/src/data_access_layer/bids.rs new file mode 100644 index 0000000000..88b5da03f2 --- /dev/null +++ b/storage/src/data_access_layer/bids.rs @@ -0,0 +1,47 @@ +//! Support for obtaining current bids from the auction system. +use crate::tracking_copy::TrackingCopyError; + +use casper_types::{system::auction::BidKind, Digest}; + +/// Represents a request to obtain current bids in the auction system. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BidsRequest { + state_hash: Digest, +} + +impl BidsRequest { + /// Creates new request. + pub fn new(state_hash: Digest) -> Self { + BidsRequest { state_hash } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } +} + +/// Represents a result of a `get_bids` request. +#[derive(Debug)] +pub enum BidsResult { + /// Invalid state root hash. + RootNotFound, + /// Contains current bids returned from the global state. + Success { + /// Current bids. + bids: Vec, + }, + /// Failure. + Failure(TrackingCopyError), +} + +impl BidsResult { + /// Returns wrapped [`Vec`] if this represents a successful query result. + pub fn into_option(self) -> Option> { + if let Self::Success { bids } = self { + Some(bids) + } else { + None + } + } +} diff --git a/storage/src/data_access_layer/block_global.rs b/storage/src/data_access_layer/block_global.rs new file mode 100644 index 0000000000..a8dc1c7298 --- /dev/null +++ b/storage/src/data_access_layer/block_global.rs @@ -0,0 +1,114 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{execution::Effects, BlockTime, Digest, ProtocolVersion}; +use std::fmt::{Display, Formatter}; +use thiserror::Error; + +/// Block global kind. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum BlockGlobalKind { + /// Block time. + BlockTime(BlockTime), + /// Message count. + MessageCount(u64), + /// Protocol version. + ProtocolVersion(ProtocolVersion), + /// Addressable entity flag. + AddressableEntity(bool), +} + +impl Default for BlockGlobalKind { + fn default() -> Self { + BlockGlobalKind::BlockTime(BlockTime::default()) + } +} + +/// Block global request. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub struct BlockGlobalRequest { + state_hash: Digest, + protocol_version: ProtocolVersion, + block_global_kind: BlockGlobalKind, +} + +impl BlockGlobalRequest { + /// Returns block time setting request. + pub fn block_time( + state_hash: Digest, + protocol_version: ProtocolVersion, + block_time: BlockTime, + ) -> Self { + let block_global_kind = BlockGlobalKind::BlockTime(block_time); + BlockGlobalRequest { + state_hash, + protocol_version, + block_global_kind, + } + } + + /// Returns protocol version setting request. + pub fn set_protocol_version(state_hash: Digest, protocol_version: ProtocolVersion) -> Self { + let block_global_kind = BlockGlobalKind::ProtocolVersion(protocol_version); + BlockGlobalRequest { + state_hash, + protocol_version, + block_global_kind, + } + } + + /// Returns addressable entity flag setting request. + pub fn set_addressable_entity( + state_hash: Digest, + protocol_version: ProtocolVersion, + addressable_entity: bool, + ) -> Self { + let block_global_kind = BlockGlobalKind::AddressableEntity(addressable_entity); + BlockGlobalRequest { + state_hash, + protocol_version, + block_global_kind, + } + } + + /// Returns state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns block global kind. + pub fn block_global_kind(&self) -> BlockGlobalKind { + self.block_global_kind + } +} + +/// Block global result. +#[derive(Error, Debug, Clone)] +pub enum BlockGlobalResult { + /// Returned if a passed state root hash is not found. + RootNotFound, + /// Failed to store block global data. + Failure(TrackingCopyError), + /// Successfully stored block global data. + Success { + /// State hash after data committed to the global state. + post_state_hash: Digest, + /// The effects of putting the data to global state. + effects: Box, + }, +} + +impl Display for BlockGlobalResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BlockGlobalResult::RootNotFound => f.write_str("root not found"), + BlockGlobalResult::Failure(tce) => { + write!(f, "failed {}", tce) + } + BlockGlobalResult::Success { .. } => f.write_str("success"), + } + } +} diff --git a/storage/src/data_access_layer/block_rewards.rs b/storage/src/data_access_layer/block_rewards.rs new file mode 100644 index 0000000000..e4c2d9fd5a --- /dev/null +++ b/storage/src/data_access_layer/block_rewards.rs @@ -0,0 +1,110 @@ +use std::collections::BTreeMap; + +use thiserror::Error; + +use casper_types::{ + execution::Effects, system::auction::Error as AuctionError, BlockTime, Digest, ProtocolVersion, + PublicKey, U512, +}; + +use crate::{ + system::{runtime_native::Config, transfer::TransferError}, + tracking_copy::TrackingCopyError, +}; + +/// Block rewards request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BlockRewardsRequest { + config: Config, + state_hash: Digest, + protocol_version: ProtocolVersion, + rewards: BTreeMap>, + block_time: BlockTime, +} + +impl BlockRewardsRequest { + /// Ctor. + pub fn new( + config: Config, + state_hash: Digest, + protocol_version: ProtocolVersion, + block_time: BlockTime, + rewards: BTreeMap>, + ) -> Self { + BlockRewardsRequest { + config, + state_hash, + protocol_version, + rewards, + block_time, + } + } + + /// Returns config. + pub fn config(&self) -> &Config { + &self.config + } + + /// Returns state_hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns protocol_version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns rewards. + pub fn rewards(&self) -> &BTreeMap> { + &self.rewards + } + + /// Returns block time. + pub fn block_time(&self) -> BlockTime { + self.block_time + } +} + +/// Block rewards error. +#[derive(Clone, Error, Debug)] +pub enum BlockRewardsError { + /// Undistributed rewards error. + #[error("Undistributed rewards")] + UndistributedRewards, + /// Tracking copy error. + #[error(transparent)] + TrackingCopy(TrackingCopyError), + /// Registry entry not found error. + #[error("Registry entry not found: {0}")] + RegistryEntryNotFound(String), + /// Transfer error. + #[error(transparent)] + Transfer(TransferError), + /// Auction error. + #[error("Auction error: {0}")] + Auction(AuctionError), +} + +/// Block reward result. +#[derive(Debug, Clone)] +pub enum BlockRewardsResult { + /// Root not found in global state. + RootNotFound, + /// Block rewards failure error. + Failure(BlockRewardsError), + /// Success result. + Success { + /// State hash after distribution outcome is committed to the global state. + post_state_hash: Digest, + /// Effects of the distribution process. + effects: Effects, + }, +} + +impl BlockRewardsResult { + /// Returns true if successful, else false. + pub fn is_success(&self) -> bool { + matches!(self, BlockRewardsResult::Success { .. }) + } +} diff --git a/storage/src/data_access_layer/contract.rs b/storage/src/data_access_layer/contract.rs new file mode 100644 index 0000000000..6adc1fd980 --- /dev/null +++ b/storage/src/data_access_layer/contract.rs @@ -0,0 +1,40 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{Contract, Digest, Key}; + +/// Represents a request to obtain contract. +pub struct ContractRequest { + state_hash: Digest, + key: Key, +} + +impl ContractRequest { + /// ctor + pub fn new(state_hash: Digest, key: Key) -> Self { + ContractRequest { state_hash, key } + } + + /// Returns key. + pub fn key(&self) -> Key { + self.key + } + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } +} + +/// Represents a result of a `contract` request. +#[derive(Debug)] +pub enum ContractResult { + /// Invalid state root hash. + RootNotFound, + /// Value not found. + ValueNotFound(String), + /// This variant will be returned if the contract was found. + Success { + /// A contract. + contract: Contract, + }, + /// Failure result. + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/entry_points.rs b/storage/src/data_access_layer/entry_points.rs new file mode 100644 index 0000000000..ed42aa6873 --- /dev/null +++ b/storage/src/data_access_layer/entry_points.rs @@ -0,0 +1,116 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{Digest, EntryPointValue, HashAddr}; + +/// Represents a request to obtain entry point. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EntryPointRequest { + state_hash: Digest, + entry_point_name: String, + contract_hash: HashAddr, +} + +impl EntryPointRequest { + /// ctor + pub fn new(state_hash: Digest, entry_point_name: String, contract_hash: HashAddr) -> Self { + EntryPointRequest { + state_hash, + entry_point_name, + contract_hash, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns entry_point_name. + pub fn entry_point_name(&self) -> &str { + &self.entry_point_name + } + + /// Returns contract_hash. + pub fn contract_hash(&self) -> HashAddr { + self.contract_hash + } +} + +impl From for EntryPointRequest { + fn from(value: EntryPointExistsRequest) -> Self { + EntryPointRequest { + state_hash: value.state_hash, + entry_point_name: value.entry_point_name, + contract_hash: value.contract_hash, + } + } +} + +/// Represents a result of a `entry_point` request. +#[derive(Debug)] +pub enum EntryPointResult { + /// Invalid state root hash. + RootNotFound, + /// Value not found. + ValueNotFound(String), + /// Contains an addressable entity from global state. + Success { + /// An addressable entity. + entry_point: EntryPointValue, + }, + /// Failure result. + Failure(TrackingCopyError), +} + +/// Represents a request to check entry point existence. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EntryPointExistsRequest { + state_hash: Digest, + entry_point_name: String, + contract_hash: HashAddr, +} + +impl EntryPointExistsRequest { + /// ctor + pub fn new(state_hash: Digest, entry_point_name: String, contract_hash: HashAddr) -> Self { + EntryPointExistsRequest { + state_hash, + entry_point_name, + contract_hash, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns entry_point_name. + pub fn entry_point_name(&self) -> &str { + &self.entry_point_name + } + + /// Returns contract_hash. + pub fn contract_hash(&self) -> HashAddr { + self.contract_hash + } +} + +/// Represents a result of `entry_point_exists` request. +#[derive(Debug)] +pub enum EntryPointExistsResult { + /// Invalid state root hash. + RootNotFound, + /// Value not found. + ValueNotFound(String), + /// This variant will be returned if the entry point was found. + Success, + /// Failure result. + Failure(TrackingCopyError), +} + +impl EntryPointExistsResult { + /// Returns `true` if the result is `Success`. + pub fn is_success(self) -> bool { + matches!(self, Self::Success { .. }) + } +} diff --git a/storage/src/data_access_layer/era_validators.rs b/storage/src/data_access_layer/era_validators.rs new file mode 100644 index 0000000000..5f5ace14b4 --- /dev/null +++ b/storage/src/data_access_layer/era_validators.rs @@ -0,0 +1,73 @@ +//! Support for querying era validators. + +use crate::tracking_copy::TrackingCopyError; +use casper_types::{system::auction::EraValidators, Digest}; +use std::fmt::{Display, Formatter}; + +/// Request for era validators. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EraValidatorsRequest { + state_hash: Digest, +} + +impl EraValidatorsRequest { + /// Constructs a new EraValidatorsRequest. + pub fn new(state_hash: Digest) -> Self { + EraValidatorsRequest { state_hash } + } + + /// Get the state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } +} + +/// Result enum that represents all possible outcomes of a era validators request. +#[derive(Debug)] +pub enum EraValidatorsResult { + /// Returned if auction is not found. This is a catastrophic outcome. + AuctionNotFound, + /// Returned if a passed state root hash is not found. This is recoverable. + RootNotFound, + /// Value not found. This is not erroneous if the record does not exist. + ValueNotFound(String), + /// There is no systemic issue, but the query itself errored. + Failure(TrackingCopyError), + /// The query succeeded. + Success { + /// Era Validators. + era_validators: EraValidators, + }, +} + +impl EraValidatorsResult { + /// Returns true if success. + pub fn is_success(&self) -> bool { + matches!(self, EraValidatorsResult::Success { .. }) + } + + /// Takes era validators. + pub fn take_era_validators(self) -> Option { + match self { + EraValidatorsResult::AuctionNotFound + | EraValidatorsResult::RootNotFound + | EraValidatorsResult::ValueNotFound(_) + | EraValidatorsResult::Failure(_) => None, + EraValidatorsResult::Success { era_validators } => Some(era_validators), + } + } +} + +impl Display for EraValidatorsResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + EraValidatorsResult::AuctionNotFound => write!(f, "system auction not found"), + EraValidatorsResult::RootNotFound => write!(f, "state root not found"), + EraValidatorsResult::ValueNotFound(msg) => write!(f, "value not found: {}", msg), + EraValidatorsResult::Failure(tce) => write!(f, "{}", tce), + EraValidatorsResult::Success { .. } => { + write!(f, "success") + } + } + } +} diff --git a/storage/src/data_access_layer/execution_results_checksum.rs b/storage/src/data_access_layer/execution_results_checksum.rs new file mode 100644 index 0000000000..cc4d77e510 --- /dev/null +++ b/storage/src/data_access_layer/execution_results_checksum.rs @@ -0,0 +1,54 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::Digest; + +/// Execution results checksum literal. +pub const EXECUTION_RESULTS_CHECKSUM_NAME: &str = "execution_results_checksum"; + +/// Represents a request to obtain current execution results checksum. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ExecutionResultsChecksumRequest { + state_hash: Digest, +} + +impl ExecutionResultsChecksumRequest { + /// Creates new request. + pub fn new(state_hash: Digest) -> Self { + ExecutionResultsChecksumRequest { state_hash } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } +} + +/// Represents a result of a `execution_results_checksum` request. +#[derive(Debug)] +pub enum ExecutionResultsChecksumResult { + /// Invalid state root hash. + RootNotFound, + /// Returned if system registry is not found. + RegistryNotFound, + /// Returned if checksum is not found. + ChecksumNotFound, + /// Contains current checksum returned from the global state. + Success { + /// Current checksum. + checksum: Digest, + }, + /// Error occurred. + Failure(TrackingCopyError), +} + +impl ExecutionResultsChecksumResult { + /// Returns a Result matching the original api for this functionality. + pub fn as_legacy(&self) -> Result, TrackingCopyError> { + match self { + ExecutionResultsChecksumResult::RootNotFound + | ExecutionResultsChecksumResult::RegistryNotFound + | ExecutionResultsChecksumResult::ChecksumNotFound => Ok(None), + ExecutionResultsChecksumResult::Success { checksum } => Ok(Some(*checksum)), + ExecutionResultsChecksumResult::Failure(err) => Err(err.clone()), + } + } +} diff --git a/storage/src/data_access_layer/fee.rs b/storage/src/data_access_layer/fee.rs new file mode 100644 index 0000000000..9b11da254a --- /dev/null +++ b/storage/src/data_access_layer/fee.rs @@ -0,0 +1,138 @@ +use std::collections::BTreeSet; +use thiserror::Error; + +use crate::system::{ + runtime_native::{Config as NativeRuntimeConfig, TransferConfig}, + transfer::TransferError, +}; +use casper_types::{ + account::AccountHash, execution::Effects, BlockTime, Digest, FeeHandling, ProtocolVersion, + Transfer, +}; + +use crate::tracking_copy::TrackingCopyError; + +/// Fee request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FeeRequest { + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + block_time: BlockTime, +} + +impl FeeRequest { + /// Ctor. + pub fn new( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + block_time: BlockTime, + ) -> Self { + FeeRequest { + config, + state_hash, + protocol_version, + block_time, + } + } + + /// Returns config. + pub fn config(&self) -> &NativeRuntimeConfig { + &self.config + } + + /// Returns state_hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns protocol_version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns fee handling setting. + pub fn fee_handling(&self) -> &FeeHandling { + self.config.fee_handling() + } + + /// Returns block time. + pub fn block_time(&self) -> BlockTime { + self.block_time + } + + /// Returns administrative accounts, if any. + pub fn administrative_accounts(&self) -> Option<&BTreeSet> { + match self.config.transfer_config() { + TransferConfig::Administered { + administrative_accounts, + .. + } => Some(administrative_accounts), + TransferConfig::Unadministered => None, + } + } + + /// Should we attempt to distribute fees? + pub fn should_distribute_fees(&self) -> bool { + // we only distribute if chainspec FeeHandling == Accumulate + // and if there are administrative accounts to receive the fees. + // the various public networks do not use this option. + if !self.fee_handling().is_accumulate() { + return false; + } + + matches!( + self.config.transfer_config(), + TransferConfig::Administered { .. } + ) + } +} + +/// Fee error. +#[derive(Clone, Error, Debug)] +pub enum FeeError { + /// No fees distributed error. + #[error("Undistributed fees")] + NoFeesDistributed, + /// Tracking copy error. + #[error(transparent)] + TrackingCopy(TrackingCopyError), + /// Registry entry not found. + #[error("Registry entry not found: {0}")] + RegistryEntryNotFound(String), + /// Transfer error. + #[error(transparent)] + Transfer(TransferError), + /// Named keys not found. + #[error("Named keys not found")] + NamedKeysNotFound, + /// Administrative accounts not found. + #[error("Administrative accounts not found")] + AdministrativeAccountsNotFound, +} + +/// Fee result. +#[derive(Debug, Clone)] +pub enum FeeResult { + /// Root not found in global state. + RootNotFound, + /// Failure result. + Failure(FeeError), + /// Success result. + Success { + /// List of transfers that happened during execution. + transfers: Vec, + /// State hash after fee distribution outcome is committed to the global state. + post_state_hash: Digest, + /// Effects of the fee distribution process. + effects: Effects, + }, +} + +impl FeeResult { + /// Returns true if successful, else false. + pub fn is_success(&self) -> bool { + matches!(self, FeeResult::Success { .. }) + } +} diff --git a/storage/src/data_access_layer/flush.rs b/storage/src/data_access_layer/flush.rs new file mode 100644 index 0000000000..b3853b00f4 --- /dev/null +++ b/storage/src/data_access_layer/flush.rs @@ -0,0 +1,42 @@ +use crate::global_state::error::Error as GlobalStateError; + +/// Request to flush state. +pub struct FlushRequest {} + +impl FlushRequest { + /// Returns a new instance of FlushRequest. + pub fn new() -> Self { + FlushRequest {} + } +} + +impl Default for FlushRequest { + fn default() -> Self { + FlushRequest::new() + } +} + +/// Represents a result of a `flush` request. +pub enum FlushResult { + /// Manual sync is disabled in config settings. + ManualSyncDisabled, + /// Successfully flushed. + Success, + /// Failed to flush. + Failure(GlobalStateError), +} + +impl FlushResult { + /// Flush succeeded + pub fn flushed(&self) -> bool { + matches!(self, FlushResult::Success) + } + + /// Transforms flush result to global state error, if relevant. + pub fn as_error(self) -> Result<(), GlobalStateError> { + match self { + FlushResult::ManualSyncDisabled | FlushResult::Success => Ok(()), + FlushResult::Failure(gse) => Err(gse), + } + } +} diff --git a/storage/src/data_access_layer/forced_undelegate.rs b/storage/src/data_access_layer/forced_undelegate.rs new file mode 100644 index 0000000000..ad9a53772b --- /dev/null +++ b/storage/src/data_access_layer/forced_undelegate.rs @@ -0,0 +1,95 @@ +use casper_types::{ + execution::Effects, system::auction::Error as AuctionError, BlockTime, Digest, ProtocolVersion, +}; +use thiserror::Error; + +use crate::{ + system::{runtime_native::Config, transfer::TransferError}, + tracking_copy::TrackingCopyError, +}; + +/// Forced undelegate request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ForcedUndelegateRequest { + config: Config, + state_hash: Digest, + protocol_version: ProtocolVersion, + block_time: BlockTime, +} + +impl ForcedUndelegateRequest { + /// Ctor. + pub fn new( + config: Config, + state_hash: Digest, + protocol_version: ProtocolVersion, + block_time: BlockTime, + ) -> Self { + Self { + config, + state_hash, + protocol_version, + block_time, + } + } + + /// Returns config. + pub fn config(&self) -> &Config { + &self.config + } + + /// Returns state_hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns protocol_version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns block time. + pub fn block_time(&self) -> BlockTime { + self.block_time + } +} + +/// Forced undelegation error. +#[derive(Clone, Error, Debug)] +pub enum ForcedUndelegateError { + /// Tracking copy error. + #[error(transparent)] + TrackingCopy(TrackingCopyError), + /// Registry entry not found error. + #[error("Registry entry not found: {0}")] + RegistryEntryNotFound(String), + /// Transfer error. + #[error(transparent)] + Transfer(TransferError), + /// Auction error. + #[error("Auction error: {0}")] + Auction(AuctionError), +} + +/// Forced undelegation result. +#[derive(Debug, Clone)] +pub enum ForcedUndelegateResult { + /// Root hash not found in global state. + RootNotFound, + /// Forced undelegation failed. + Failure(ForcedUndelegateError), + /// Forced undelegation succeeded. + Success { + /// State hash after distribution outcome is committed to the global state. + post_state_hash: Digest, + /// Effects of the distribution process. + effects: Effects, + }, +} + +impl ForcedUndelegateResult { + /// Returns true if successful, else false. + pub fn is_success(&self) -> bool { + matches!(self, Self::Success { .. }) + } +} diff --git a/storage/src/data_access_layer/genesis.rs b/storage/src/data_access_layer/genesis.rs new file mode 100644 index 0000000000..5b4f944fb3 --- /dev/null +++ b/storage/src/data_access_layer/genesis.rs @@ -0,0 +1,129 @@ +#[cfg(test)] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use casper_types::{ + execution::Effects, ChainspecRegistry, Digest, GenesisAccount, GenesisConfig, GenesisValidator, + ProtocolVersion, PublicKey, +}; + +use crate::system::genesis::GenesisError; + +/// Represents a configuration of a genesis process. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct GenesisRequest { + chainspec_hash: Digest, + protocol_version: ProtocolVersion, + config: GenesisConfig, + chainspec_registry: ChainspecRegistry, +} + +impl GenesisRequest { + /// Creates a new genesis config object. + pub fn new( + chainspec_hash: Digest, + protocol_version: ProtocolVersion, + config: GenesisConfig, + chainspec_registry: ChainspecRegistry, + ) -> Self { + GenesisRequest { + chainspec_hash, + protocol_version, + config, + chainspec_registry, + } + } + + /// Set enable entity. + pub fn set_enable_entity(&mut self, enable: bool) { + self.config.set_enable_entity(enable); + } + + /// Push genesis validator. + pub fn push_genesis_account(&mut self, genesis_account: GenesisAccount) { + self.config.push_account(genesis_account); + } + + /// Push genesis validator. + pub fn push_genesis_validator( + &mut self, + public_key: &PublicKey, + genesis_validator: GenesisValidator, + ) { + self.config + .push_genesis_validator(public_key, genesis_validator); + } + + /// Returns chainspec_hash. + pub fn chainspec_hash(&self) -> Digest { + self.chainspec_hash + } + + /// Returns protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns configuration details of the genesis process. + pub fn config(&self) -> &GenesisConfig { + &self.config + } + + /// Returns chainspec registry. + pub fn chainspec_registry(&self) -> &ChainspecRegistry { + &self.chainspec_registry + } +} + +#[cfg(test)] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisRequest { + let input: [u8; 32] = rng.gen(); + let chainspec_hash = Digest::hash(input); + let protocol_version = ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()); + let config = rng.gen(); + + let chainspec_file_bytes: [u8; 10] = rng.gen(); + let genesis_account_file_bytes: [u8; 15] = rng.gen(); + let chainspec_registry = + ChainspecRegistry::new_with_genesis(&chainspec_file_bytes, &genesis_account_file_bytes); + GenesisRequest::new(chainspec_hash, protocol_version, config, chainspec_registry) + } +} + +/// Represents a result of a `genesis` request. +#[derive(Debug, Clone)] +pub enum GenesisResult { + /// Genesis fatal. + Fatal(String), + /// Genesis failure. + Failure(GenesisError), + /// Genesis success. + Success { + /// State hash after genesis is committed to the global state. + post_state_hash: Digest, + /// Effects of genesis. + effects: Effects, + }, +} + +impl GenesisResult { + /// Is success. + pub fn is_success(&self) -> bool { + matches!(self, GenesisResult::Success { .. }) + } + + /// Returns a Result matching the original api for this functionality. + pub fn as_legacy(self) -> Result<(Digest, Effects), Box> { + match self { + GenesisResult::Fatal(_) => Err(Box::new(GenesisError::StateUninitialized)), + GenesisResult::Failure(err) => Err(Box::new(err)), + GenesisResult::Success { + post_state_hash, + effects, + } => Ok((post_state_hash, effects)), + } + } +} diff --git a/storage/src/data_access_layer/handle_fee.rs b/storage/src/data_access_layer/handle_fee.rs new file mode 100644 index 0000000000..1bc47d2096 --- /dev/null +++ b/storage/src/data_access_layer/handle_fee.rs @@ -0,0 +1,161 @@ +use crate::{ + data_access_layer::BalanceIdentifier, system::runtime_native::Config as NativeRuntimeConfig, + tracking_copy::TrackingCopyError, +}; +use casper_types::{ + execution::Effects, Digest, EraId, InitiatorAddr, ProtocolVersion, PublicKey, TransactionHash, + Transfer, U512, +}; + +/// Handle fee mode. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum HandleFeeMode { + /// Pay the fee. + Pay { + /// Initiator. + initiator_addr: Box, + /// Source. + source: Box, + /// Target. + target: Box, + /// Amount. + amount: U512, + }, + /// Burn the fee. + Burn { + /// Source. + source: BalanceIdentifier, + /// Amount. + amount: Option, + }, + /// Validator credit (used in no fee mode). + Credit { + /// Validator. + validator: Box, + /// Amount. + amount: U512, + /// EraId. + era_id: EraId, + }, +} + +impl HandleFeeMode { + /// Ctor for Pay mode. + pub fn pay( + initiator_addr: Box, + source: BalanceIdentifier, + target: BalanceIdentifier, + amount: U512, + ) -> Self { + HandleFeeMode::Pay { + initiator_addr, + source: Box::new(source), + target: Box::new(target), + amount, + } + } + + /// What source should be used to burn from, and how much? + /// If amount is None or greater than the available balance, the full available balance + /// will be burned. If amount is less than available balance, only that much will be + /// burned leaving a remaining balance. + pub fn burn(source: BalanceIdentifier, amount: Option) -> Self { + HandleFeeMode::Burn { source, amount } + } + + /// Applies a staking credit to the imputed proposer for the imputed amount at the end + /// of the current era when the auction process is executed. + pub fn credit(validator: Box, amount: U512, era_id: EraId) -> Self { + HandleFeeMode::Credit { + validator, + amount, + era_id, + } + } +} + +/// Handle fee request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HandleFeeRequest { + /// The runtime config. + pub(crate) config: NativeRuntimeConfig, + /// State root hash. + pub(crate) state_hash: Digest, + /// The protocol version. + pub(crate) protocol_version: ProtocolVersion, + /// Transaction hash. + pub(crate) transaction_hash: TransactionHash, + /// Handle fee mode. + pub(crate) handle_fee_mode: HandleFeeMode, +} + +impl HandleFeeRequest { + /// Creates new request instance with runtime args. + #[allow(clippy::too_many_arguments)] + pub fn new( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + handle_fee_mode: HandleFeeMode, + ) -> Self { + Self { + config, + state_hash, + protocol_version, + transaction_hash, + handle_fee_mode, + } + } + + /// Returns config. + pub fn config(&self) -> &NativeRuntimeConfig { + &self.config + } + + /// Returns state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns handle protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns handle transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Returns handle fee mode. + pub fn handle_fee_mode(&self) -> &HandleFeeMode { + &self.handle_fee_mode + } +} + +/// Result enum that represents all possible outcomes of a handle request. +#[derive(Debug)] +pub enum HandleFeeResult { + /// Invalid state root hash. + RootNotFound, + /// Handle request succeeded. + Success { + /// Transfers. + transfers: Vec, + /// Handle fee effects. + effects: Effects, + }, + /// Handle request failed. + Failure(TrackingCopyError), +} + +impl HandleFeeResult { + /// The effects, if any. + pub fn effects(&self) -> Effects { + match self { + HandleFeeResult::RootNotFound | HandleFeeResult::Failure(_) => Effects::new(), + HandleFeeResult::Success { effects, .. } => effects.clone(), + } + } +} diff --git a/storage/src/data_access_layer/handle_refund.rs b/storage/src/data_access_layer/handle_refund.rs new file mode 100644 index 0000000000..f12da05177 --- /dev/null +++ b/storage/src/data_access_layer/handle_refund.rs @@ -0,0 +1,214 @@ +use crate::{ + data_access_layer::BalanceIdentifier, system::runtime_native::Config as NativeRuntimeConfig, + tracking_copy::TrackingCopyError, +}; +use casper_types::{ + execution::Effects, Digest, InitiatorAddr, Phase, ProtocolVersion, TransactionHash, Transfer, + U512, +}; +use num_rational::Ratio; + +/// Selects refund operation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum HandleRefundMode { + /// This variant will cause the refund amount to be calculated and then burned. + Burn { + /// Refund limit. + limit: U512, + /// Refund cost. + cost: U512, + /// Refund consumed. + consumed: U512, + /// Refund gas price. + gas_price: u8, + /// Refund source. + source: Box, + /// Refund ratio. + ratio: Ratio, + }, + /// This variant will cause the refund amount to be calculated and the refund to be executed. + Refund { + /// Refund initiator. + initiator_addr: Box, + /// Refund limit. + limit: U512, + /// Refund cost. + cost: U512, + /// Refund consumed. + consumed: U512, + /// Refund gas price. + gas_price: u8, + /// Refund ratio. + ratio: Ratio, + /// Refund source. + source: Box, + /// Target for refund. + target: Box, + }, + /// This variant handles the edge case of custom payment plus no fee plus no refund. + /// This ultimately turns into a hold on the initiator, but it takes extra steps to get there + /// because the payment has already been fully processed up front and must first be unwound. + RefundNoFeeCustomPayment { + /// Refund initiator. + initiator_addr: Box, + /// Refund limit. + limit: U512, + /// Refund cost. + cost: U512, + /// Refund gas price. + gas_price: u8, + }, + /// This variant only calculates and returns the refund amount. It does not + /// execute a refund. + CalculateAmount { + /// Refund limit. + limit: U512, + /// Refund cost. + cost: U512, + /// Refund consumed. + consumed: U512, + /// Refund gas price. + gas_price: u8, + /// Refund ratio. + ratio: Ratio, + /// Refund source. + source: Box, + }, + /// This variant will cause the refund purse tracked by handle_payment to be set. + SetRefundPurse { + /// Target for refund, which will receive any refunded token while set. + target: Box, + }, + /// This variant will cause the refund purse tracked by handle_payment to be cleared. + ClearRefundPurse, +} + +impl HandleRefundMode { + /// Returns the appropriate phase for the mode. + pub fn phase(&self) -> Phase { + match self { + HandleRefundMode::Burn { .. } + | HandleRefundMode::Refund { .. } + | HandleRefundMode::RefundNoFeeCustomPayment { .. } + | HandleRefundMode::CalculateAmount { .. } => Phase::FinalizePayment, + + HandleRefundMode::ClearRefundPurse | HandleRefundMode::SetRefundPurse { .. } => { + Phase::Payment + } + } + } +} + +/// Handle refund request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HandleRefundRequest { + /// The runtime config. + pub(crate) config: NativeRuntimeConfig, + /// State root hash. + pub(crate) state_hash: Digest, + /// The protocol version. + pub(crate) protocol_version: ProtocolVersion, + /// Transaction hash. + pub(crate) transaction_hash: TransactionHash, + /// Refund handling. + pub(crate) refund_mode: HandleRefundMode, +} + +impl HandleRefundRequest { + /// Creates a new instance. + pub fn new( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + refund_mode: HandleRefundMode, + ) -> Self { + HandleRefundRequest { + config, + state_hash, + protocol_version, + transaction_hash, + refund_mode, + } + } + + /// Returns a reference to the config. + pub fn config(&self) -> &NativeRuntimeConfig { + &self.config + } + + /// Returns the state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns the transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Returns the refund mode. + pub fn refund_mode(&self) -> &HandleRefundMode { + &self.refund_mode + } +} + +/// Handle refund result. +#[derive(Debug)] +pub enum HandleRefundResult { + /// Invalid state root hash. + RootNotFound, + /// Handle refund request succeeded. + Success { + /// Transfers. + transfers: Vec, + /// The effects. + effects: Effects, + /// The amount, if any. + amount: Option, + }, + /// Invalid phase selected (programmer error). + InvalidPhase, + /// Handle refund request failed. + Failure(TrackingCopyError), +} + +impl HandleRefundResult { + /// The effects, if any. + pub fn effects(&self) -> Effects { + match self { + HandleRefundResult::RootNotFound + | HandleRefundResult::InvalidPhase + | HandleRefundResult::Failure(_) => Effects::new(), + HandleRefundResult::Success { effects, .. } => effects.clone(), + } + } + + /// The refund amount. + pub fn refund_amount(&self) -> U512 { + match self { + HandleRefundResult::RootNotFound + | HandleRefundResult::InvalidPhase + | HandleRefundResult::Failure(_) => U512::zero(), + HandleRefundResult::Success { + amount: refund_amount, + .. + } => refund_amount.unwrap_or(U512::zero()), + } + } + + /// The error message, if any. + pub fn error_message(&self) -> Option { + match self { + HandleRefundResult::RootNotFound => Some("root not found".to_string()), + HandleRefundResult::InvalidPhase => Some("invalid phase selected".to_string()), + HandleRefundResult::Failure(tce) => Some(format!("{}", tce)), + HandleRefundResult::Success { .. } => None, + } + } +} diff --git a/storage/src/data_access_layer/key_prefix.rs b/storage/src/data_access_layer/key_prefix.rs new file mode 100644 index 0000000000..6e3fe12e06 --- /dev/null +++ b/storage/src/data_access_layer/key_prefix.rs @@ -0,0 +1,264 @@ +use casper_types::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contract_messages::TopicNameHash, + system::{auction::BidAddrTag, mint::BalanceHoldAddrTag}, + EntityAddr, KeyTag, URefAddr, +}; + +/// Key prefixes used for querying the global state. +#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] +pub enum KeyPrefix { + /// Retrieves all delegator bid addresses for a given validator. + DelegatorBidAddrsByValidator(AccountHash), + /// Retrieves all entries for a given hash addr. + MessageEntriesByEntity(EntityAddr), + /// Retrieves all messages for a given hash addr and topic. + MessagesByEntityAndTopic(EntityAddr, TopicNameHash), + /// Retrieves all named keys for a given entity. + NamedKeysByEntity(EntityAddr), + /// Retrieves all gas balance holds for a given purse. + GasBalanceHoldsByPurse(URefAddr), + /// Retrieves all processing balance holds for a given purse. + ProcessingBalanceHoldsByPurse(URefAddr), + /// Retrieves all V1 entry points for a given entity. + EntryPointsV1ByEntity(EntityAddr), + /// Retrieves all V2 entry points for a given entity. + EntryPointsV2ByEntity(EntityAddr), +} + +impl ToBytes for KeyPrefix { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + KeyPrefix::DelegatorBidAddrsByValidator(validator) => { + U8_SERIALIZED_LENGTH + validator.serialized_length() + } + KeyPrefix::MessageEntriesByEntity(hash_addr) => hash_addr.serialized_length(), + KeyPrefix::MessagesByEntityAndTopic(hash_addr, topic) => { + hash_addr.serialized_length() + topic.serialized_length() + } + KeyPrefix::NamedKeysByEntity(entity) => entity.serialized_length(), + KeyPrefix::GasBalanceHoldsByPurse(uref) => { + U8_SERIALIZED_LENGTH + uref.serialized_length() + } + KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => { + U8_SERIALIZED_LENGTH + uref.serialized_length() + } + KeyPrefix::EntryPointsV1ByEntity(entity) => { + U8_SERIALIZED_LENGTH + entity.serialized_length() + } + KeyPrefix::EntryPointsV2ByEntity(entity) => { + U8_SERIALIZED_LENGTH + entity.serialized_length() + } + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + KeyPrefix::DelegatorBidAddrsByValidator(validator) => { + writer.push(KeyTag::BidAddr as u8); + writer.push(BidAddrTag::DelegatedAccount as u8); + validator.write_bytes(writer)?; + } + KeyPrefix::MessageEntriesByEntity(hash_addr) => { + writer.push(KeyTag::Message as u8); + hash_addr.write_bytes(writer)?; + } + KeyPrefix::MessagesByEntityAndTopic(hash_addr, topic) => { + writer.push(KeyTag::Message as u8); + hash_addr.write_bytes(writer)?; + topic.write_bytes(writer)?; + } + KeyPrefix::NamedKeysByEntity(entity) => { + writer.push(KeyTag::NamedKey as u8); + entity.write_bytes(writer)?; + } + KeyPrefix::GasBalanceHoldsByPurse(uref) => { + writer.push(KeyTag::BalanceHold as u8); + writer.push(BalanceHoldAddrTag::Gas as u8); + uref.write_bytes(writer)?; + } + KeyPrefix::ProcessingBalanceHoldsByPurse(uref) => { + writer.push(KeyTag::BalanceHold as u8); + writer.push(BalanceHoldAddrTag::Processing as u8); + uref.write_bytes(writer)?; + } + KeyPrefix::EntryPointsV1ByEntity(entity) => { + writer.push(KeyTag::EntryPoint as u8); + writer.push(0); + entity.write_bytes(writer)?; + } + KeyPrefix::EntryPointsV2ByEntity(entity) => { + writer.push(KeyTag::EntryPoint as u8); + writer.push(1); + entity.write_bytes(writer)?; + } + } + Ok(()) + } +} + +impl FromBytes for KeyPrefix { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let result = match tag { + tag if tag == KeyTag::BidAddr as u8 => { + let (bid_addr_tag, remainder) = u8::from_bytes(remainder)?; + match bid_addr_tag { + tag if tag == BidAddrTag::DelegatedAccount as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + ( + KeyPrefix::DelegatorBidAddrsByValidator(validator), + remainder, + ) + } + _ => return Err(bytesrepr::Error::Formatting), + } + } + tag if tag == KeyTag::Message as u8 => { + let (hash_addr, remainder) = EntityAddr::from_bytes(remainder)?; + if remainder.is_empty() { + (KeyPrefix::MessageEntriesByEntity(hash_addr), remainder) + } else { + let (topic, remainder) = TopicNameHash::from_bytes(remainder)?; + ( + KeyPrefix::MessagesByEntityAndTopic(hash_addr, topic), + remainder, + ) + } + } + tag if tag == KeyTag::NamedKey as u8 => { + let (entity, remainder) = EntityAddr::from_bytes(remainder)?; + (KeyPrefix::NamedKeysByEntity(entity), remainder) + } + tag if tag == KeyTag::BalanceHold as u8 => { + let (balance_hold_addr_tag, remainder) = u8::from_bytes(remainder)?; + let (uref, remainder) = URefAddr::from_bytes(remainder)?; + match balance_hold_addr_tag { + tag if tag == BalanceHoldAddrTag::Gas as u8 => { + (KeyPrefix::GasBalanceHoldsByPurse(uref), remainder) + } + tag if tag == BalanceHoldAddrTag::Processing as u8 => { + (KeyPrefix::ProcessingBalanceHoldsByPurse(uref), remainder) + } + _ => return Err(bytesrepr::Error::Formatting), + } + } + tag if tag == KeyTag::EntryPoint as u8 => { + let (entry_point_type, remainder) = u8::from_bytes(remainder)?; + let (entity, remainder) = EntityAddr::from_bytes(remainder)?; + match entry_point_type { + 0 => (KeyPrefix::EntryPointsV1ByEntity(entity), remainder), + 1 => (KeyPrefix::EntryPointsV2ByEntity(entity), remainder), + _ => return Err(bytesrepr::Error::Formatting), + } + } + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok(result) + } +} + +#[cfg(test)] +mod tests { + use casper_types::testing::TestRng; + use rand::Rng; + + use casper_types::{ + addressable_entity::NamedKeyAddr, + contract_messages::MessageAddr, + gens::{account_hash_arb, entity_addr_arb, topic_name_hash_arb, u8_slice_32}, + system::{auction::BidAddr, mint::BalanceHoldAddr}, + BlockTime, EntryPointAddr, Key, + }; + + use super::*; + use proptest::prelude::*; + + pub fn key_prefix_arb() -> impl Strategy { + prop_oneof![ + account_hash_arb().prop_map(KeyPrefix::DelegatorBidAddrsByValidator), + entity_addr_arb().prop_map(KeyPrefix::MessageEntriesByEntity), + (entity_addr_arb(), topic_name_hash_arb()).prop_map(|(entity_addr, topic)| { + KeyPrefix::MessagesByEntityAndTopic(entity_addr, topic) + }), + entity_addr_arb().prop_map(KeyPrefix::NamedKeysByEntity), + u8_slice_32().prop_map(KeyPrefix::GasBalanceHoldsByPurse), + u8_slice_32().prop_map(KeyPrefix::ProcessingBalanceHoldsByPurse), + entity_addr_arb().prop_map(KeyPrefix::EntryPointsV1ByEntity), + entity_addr_arb().prop_map(KeyPrefix::EntryPointsV2ByEntity), + ] + } + + proptest! { + #[test] + fn bytesrepr_roundtrip(key_prefix in key_prefix_arb()) { + bytesrepr::test_serialization_roundtrip(&key_prefix); + } + } + + #[test] + fn key_serializer_compat() { + // This test ensures that the `KeyPrefix` deserializer is compatible with the `Key` + // serializer. Combined with the `bytesrepr_roundtrip` test, this ensures that + // `KeyPrefix` is binary compatible with `Key`. + + let rng = &mut TestRng::new(); + + let hash1 = rng.gen(); + let hash2 = rng.gen(); + + for (key, prefix) in [ + ( + Key::BidAddr(BidAddr::new_delegator_account_addr((hash1, hash2))), + KeyPrefix::DelegatorBidAddrsByValidator(AccountHash::new(hash1)), + ), + ( + Key::Message(MessageAddr::new_message_addr( + EntityAddr::SmartContract(hash1), + TopicNameHash::new(hash2), + 0, + )), + KeyPrefix::MessagesByEntityAndTopic( + EntityAddr::SmartContract(hash1), + TopicNameHash::new(hash2), + ), + ), + ( + Key::NamedKey(NamedKeyAddr::new_named_key_entry( + EntityAddr::Account(hash1), + hash2, + )), + KeyPrefix::NamedKeysByEntity(EntityAddr::Account(hash1)), + ), + ( + Key::BalanceHold(BalanceHoldAddr::new_gas(hash1, BlockTime::new(0))), + KeyPrefix::GasBalanceHoldsByPurse(hash1), + ), + ( + Key::BalanceHold(BalanceHoldAddr::new_processing(hash1, BlockTime::new(0))), + KeyPrefix::ProcessingBalanceHoldsByPurse(hash1), + ), + ( + Key::EntryPoint( + EntryPointAddr::new_v1_entry_point_addr(EntityAddr::Account(hash1), "name") + .expect("should create entry point"), + ), + KeyPrefix::EntryPointsV1ByEntity(EntityAddr::Account(hash1)), + ), + ] { + let key_bytes = key.to_bytes().expect("should serialize key"); + let (parsed_key_prefix, remainder) = + KeyPrefix::from_bytes(&key_bytes).expect("should deserialize key prefix"); + assert_eq!(parsed_key_prefix, prefix, "key: {:?}", key); + assert!(!remainder.is_empty(), "key: {:?}", key); + } + } +} diff --git a/storage/src/data_access_layer/message_topics.rs b/storage/src/data_access_layer/message_topics.rs new file mode 100644 index 0000000000..6002505ab3 --- /dev/null +++ b/storage/src/data_access_layer/message_topics.rs @@ -0,0 +1,43 @@ +use casper_types::{addressable_entity::MessageTopics, Digest, EntityAddr}; + +use crate::tracking_copy::TrackingCopyError; + +/// Request for a message topics. +pub struct MessageTopicsRequest { + state_hash: Digest, + entity_addr: EntityAddr, +} + +impl MessageTopicsRequest { + /// Creates new request object. + pub fn new(state_hash: Digest, entity_addr: EntityAddr) -> Self { + Self { + state_hash, + entity_addr, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the hash addr. + pub fn entity_addr(&self) -> EntityAddr { + self.entity_addr + } +} + +/// Result of a global state query request. +#[derive(Debug)] +pub enum MessageTopicsResult { + /// Invalid state root hash. + RootNotFound, + /// Successful query. + Success { + /// Stored value under a path. + message_topics: MessageTopics, + }, + /// Tracking Copy Error + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/mint.rs b/storage/src/data_access_layer/mint.rs new file mode 100644 index 0000000000..e7dded87f2 --- /dev/null +++ b/storage/src/data_access_layer/mint.rs @@ -0,0 +1,427 @@ +use std::collections::BTreeSet; + +use crate::{ + data_access_layer::BalanceIdentifier, + system::{ + burn::{BurnArgs, BurnError}, + runtime_native::{Config as NativeRuntimeConfig, TransferConfig}, + transfer::{TransferArgs, TransferError}, + }, + tracking_copy::TrackingCopyCache, +}; +use casper_types::{ + account::AccountHash, execution::Effects, Digest, InitiatorAddr, ProtocolVersion, RuntimeArgs, + TransactionHash, Transfer, U512, +}; + +/// Transfer arguments using balance identifiers. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BalanceIdentifierTransferArgs { + to: Option, + source: BalanceIdentifier, + target: BalanceIdentifier, + amount: U512, + arg_id: Option, +} + +impl BalanceIdentifierTransferArgs { + /// Ctor. + pub fn new( + to: Option, + source: BalanceIdentifier, + target: BalanceIdentifier, + amount: U512, + arg_id: Option, + ) -> Self { + BalanceIdentifierTransferArgs { + to, + source, + target, + amount, + arg_id, + } + } + + /// Get to. + pub fn to(&self) -> Option { + self.to + } + + /// Get source. + pub fn source(&self) -> &BalanceIdentifier { + &self.source + } + + /// Get target. + pub fn target(&self) -> &BalanceIdentifier { + &self.target + } + + /// Get amount. + pub fn amount(&self) -> U512 { + self.amount + } + + /// Get arg_id. + pub fn arg_id(&self) -> Option { + self.arg_id + } +} + +/// Transfer details. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TransferRequestArgs { + /// Provides opaque arguments in runtime format. + Raw(RuntimeArgs), + /// Provides explicit structured args. + Explicit(TransferArgs), + /// Provides support for transfers using balance identifiers. + /// The source and target purses will get resolved on usage. + Indirect(Box), +} + +/// Request for motes transfer. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TransferRequest { + /// Config. + config: NativeRuntimeConfig, + /// State root hash. + state_hash: Digest, + /// Protocol version. + protocol_version: ProtocolVersion, + /// Transaction hash. + transaction_hash: TransactionHash, + /// Base account. + initiator: InitiatorAddr, + /// List of authorizing accounts. + authorization_keys: BTreeSet, + /// Args. + args: TransferRequestArgs, +} + +impl TransferRequest { + /// Creates new request object. + #[allow(clippy::too_many_arguments)] + pub fn new( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + args: TransferArgs, + ) -> Self { + let args = TransferRequestArgs::Explicit(args); + Self { + config, + state_hash, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + args, + } + } + + /// Creates new request instance with runtime args. + #[allow(clippy::too_many_arguments)] + pub fn with_runtime_args( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + args: RuntimeArgs, + ) -> Self { + let args = TransferRequestArgs::Raw(args); + Self { + config, + state_hash, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + args, + } + } + + /// Creates new request object using balance identifiers. + #[allow(clippy::too_many_arguments)] + pub fn new_indirect( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + args: BalanceIdentifierTransferArgs, + ) -> Self { + let args = TransferRequestArgs::Indirect(Box::new(args)); + Self { + config, + state_hash, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + args, + } + } + + /// Returns a reference to the runtime config. + pub fn config(&self) -> &NativeRuntimeConfig { + &self.config + } + + /// Returns a reference to the transfer config. + pub fn transfer_config(&self) -> &TransferConfig { + self.config.transfer_config() + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns initiator. + pub fn initiator(&self) -> &InitiatorAddr { + &self.initiator + } + + /// Returns authorization keys. + pub fn authorization_keys(&self) -> &BTreeSet { + &self.authorization_keys + } + + /// Returns protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Returns transfer args. + pub fn args(&self) -> &TransferRequestArgs { + &self.args + } + + /// Into args. + pub fn into_args(self) -> TransferRequestArgs { + self.args + } + + /// Used by `WasmTestBuilder` to set the appropriate state root hash and transfer config before + /// executing the transfer. + #[doc(hidden)] + pub fn set_state_hash_and_config(&mut self, state_hash: Digest, config: NativeRuntimeConfig) { + self.state_hash = state_hash; + self.config = config; + } +} + +/// Transfer result. +#[derive(Debug, Clone)] +pub enum TransferResult { + /// Invalid state root hash. + RootNotFound, + /// Transfer succeeded + Success { + /// List of transfers that happened during execution. + transfers: Vec, + /// Effects of transfer. + effects: Effects, + /// Cached tracking copy operations. + cache: TrackingCopyCache, + }, + /// Transfer failed + Failure(TransferError), +} + +impl TransferResult { + /// Returns the effects, if any. + pub fn effects(&self) -> Effects { + match self { + TransferResult::RootNotFound | TransferResult::Failure(_) => Effects::new(), + TransferResult::Success { effects, .. } => effects.clone(), + } + } + + /// Returns transfers. + pub fn transfers(&self) -> Vec { + match self { + TransferResult::RootNotFound | TransferResult::Failure(_) => vec![], + TransferResult::Success { transfers, .. } => transfers.clone(), + } + } + + /// Returns transfer error, if any. + pub fn error(&self) -> Option { + if let Self::Failure(error) = self { + Some(error.clone()) + } else { + None + } + } +} + +/// Burn details. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BurnRequestArgs { + /// Provides opaque arguments in runtime format. + Raw(RuntimeArgs), + /// Provides explicit structured args. + Explicit(BurnArgs), +} + +/// Request for motes burn. +pub struct BurnRequest { + /// Config. + config: NativeRuntimeConfig, + /// State root hash. + state_hash: Digest, + /// Protocol version. + protocol_version: ProtocolVersion, + /// Transaction hash. + transaction_hash: TransactionHash, + /// Base account. + initiator: InitiatorAddr, + /// List of authorizing accounts. + authorization_keys: BTreeSet, + /// Args. + args: BurnRequestArgs, +} + +impl BurnRequest { + /// Creates new request object. + #[allow(clippy::too_many_arguments)] + pub fn new( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + args: BurnArgs, + ) -> Self { + let args = BurnRequestArgs::Explicit(args); + Self { + config, + state_hash, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + args, + } + } + + /// Creates new request instance with runtime args. + #[allow(clippy::too_many_arguments)] + pub fn with_runtime_args( + config: NativeRuntimeConfig, + state_hash: Digest, + protocol_version: ProtocolVersion, + transaction_hash: TransactionHash, + initiator: InitiatorAddr, + authorization_keys: BTreeSet, + args: RuntimeArgs, + ) -> Self { + let args = BurnRequestArgs::Raw(args); + Self { + config, + state_hash, + protocol_version, + transaction_hash, + initiator, + authorization_keys, + args, + } + } + + /// Returns a reference to the runtime config. + pub fn config(&self) -> &NativeRuntimeConfig { + &self.config + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns initiator. + pub fn initiator(&self) -> &InitiatorAddr { + &self.initiator + } + + /// Returns authorization keys. + pub fn authorization_keys(&self) -> &BTreeSet { + &self.authorization_keys + } + + /// Returns protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Returns transfer args. + pub fn args(&self) -> &BurnRequestArgs { + &self.args + } + + /// Into args. + pub fn into_args(self) -> BurnRequestArgs { + self.args + } + + /// Used by `WasmTestBuilder` to set the appropriate state root hash and runtime config before + /// executing the burn. + #[doc(hidden)] + pub fn set_state_hash_and_config(&mut self, state_hash: Digest, config: NativeRuntimeConfig) { + self.state_hash = state_hash; + self.config = config; + } +} + +/// Burn result. +#[derive(Debug, Clone)] +pub enum BurnResult { + /// Invalid state root hash. + RootNotFound, + /// Transfer succeeded + Success { + /// Effects of transfer. + effects: Effects, + /// Cached tracking copy operations. + cache: TrackingCopyCache, + }, + /// Burn failed + Failure(BurnError), +} + +impl BurnResult { + /// Returns the effects, if any. + pub fn effects(&self) -> Effects { + match self { + BurnResult::RootNotFound | BurnResult::Failure(_) => Effects::new(), + BurnResult::Success { effects, .. } => effects.clone(), + } + } + + /// Returns burn error, if any. + pub fn error(&self) -> Option { + if let Self::Failure(error) = self { + Some(error.clone()) + } else { + None + } + } +} diff --git a/storage/src/data_access_layer/prefixed_values.rs b/storage/src/data_access_layer/prefixed_values.rs new file mode 100644 index 0000000000..942af11fb0 --- /dev/null +++ b/storage/src/data_access_layer/prefixed_values.rs @@ -0,0 +1,46 @@ +//! Support for obtaining all values with a given key prefix. +use crate::{tracking_copy::TrackingCopyError, KeyPrefix}; +use casper_types::{Digest, StoredValue}; + +/// Represents a request to obtain all values with a given key prefix. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PrefixedValuesRequest { + state_hash: Digest, + key_prefix: KeyPrefix, +} + +impl PrefixedValuesRequest { + /// Creates new request. + pub fn new(state_hash: Digest, key_prefix: KeyPrefix) -> Self { + Self { + state_hash, + key_prefix, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns key prefix. + pub fn key_prefix(&self) -> &KeyPrefix { + &self.key_prefix + } +} + +/// Represents a result of a `items_by_prefix` request. +#[derive(Debug)] +pub enum PrefixedValuesResult { + /// Invalid state root hash. + RootNotFound, + /// Contains values returned from the global state. + Success { + /// The requested prefix. + key_prefix: KeyPrefix, + /// Current values. + values: Vec, + }, + /// Failure. + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/protocol_upgrade.rs b/storage/src/data_access_layer/protocol_upgrade.rs new file mode 100644 index 0000000000..691d06f720 --- /dev/null +++ b/storage/src/data_access_layer/protocol_upgrade.rs @@ -0,0 +1,63 @@ +use casper_types::{execution::Effects, Digest, ProtocolUpgradeConfig}; + +use crate::system::protocol_upgrade::ProtocolUpgradeError; + +/// Request to upgrade the protocol. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ProtocolUpgradeRequest { + config: ProtocolUpgradeConfig, +} + +impl ProtocolUpgradeRequest { + /// Creates a new instance of ProtocolUpgradeRequest. + pub fn new(config: ProtocolUpgradeConfig) -> Self { + ProtocolUpgradeRequest { config } + } + + /// Get the protocol upgrade config. + pub fn config(&self) -> &ProtocolUpgradeConfig { + &self.config + } + + /// Get the pre_state_hash to apply protocol upgrade to. + pub fn pre_state_hash(&self) -> Digest { + self.config.pre_state_hash() + } +} + +/// Response to attempt to upgrade the protocol. +#[derive(Debug, Clone)] +pub enum ProtocolUpgradeResult { + /// Global state root not found. + RootNotFound, + /// Protocol upgraded successfully. + Success { + /// State hash after protocol upgrade is committed to the global state. + post_state_hash: Digest, + /// Effects of protocol upgrade. + effects: Effects, + }, + /// Failed to upgrade protocol. + Failure(ProtocolUpgradeError), +} + +impl ProtocolUpgradeResult { + /// Is success. + pub fn is_success(&self) -> bool { + matches!(self, ProtocolUpgradeResult::Success { .. }) + } + + /// Is an error + pub fn is_err(&self) -> bool { + match self { + ProtocolUpgradeResult::RootNotFound | ProtocolUpgradeResult::Failure(_) => true, + ProtocolUpgradeResult::Success { .. } => false, + } + } +} + +impl From for ProtocolUpgradeResult { + fn from(err: ProtocolUpgradeError) -> Self { + ProtocolUpgradeResult::Failure(err) + } +} diff --git a/storage/src/data_access_layer/prune.rs b/storage/src/data_access_layer/prune.rs new file mode 100644 index 0000000000..ca7bddbe59 --- /dev/null +++ b/storage/src/data_access_layer/prune.rs @@ -0,0 +1,64 @@ +//! Support for pruning leaf nodes from the merkle trie. +use crate::{ + global_state::trie_store::operations::TriePruneResult, tracking_copy::TrackingCopyError, +}; +use casper_types::{execution::Effects, Digest, Key}; + +/// Represents the configuration of a prune operation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PruneRequest { + state_hash: Digest, + keys_to_prune: Vec, +} + +impl PruneRequest { + /// Create new prune config. + pub fn new(state_hash: Digest, keys_to_prune: Vec) -> Self { + PruneRequest { + state_hash, + keys_to_prune, + } + } + + /// Returns the current state root state hash + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the list of keys to delete. + pub fn keys_to_prune(&self) -> &[Key] { + &self.keys_to_prune + } +} + +/// The result of performing a prune. +#[derive(Debug, Clone)] +pub enum PruneResult { + /// Root not found. + RootNotFound, + /// Key does not exists. + MissingKey, + /// Failed to prune. + Failure(TrackingCopyError), + /// New state root hash generated after elements were pruned. + Success { + /// State root hash. + post_state_hash: Digest, + /// Effects of executing a step request. + effects: Effects, + }, +} + +impl From for PruneResult { + fn from(value: TriePruneResult) -> Self { + match value { + TriePruneResult::Pruned(post_state_hash) => PruneResult::Success { + post_state_hash, + effects: Effects::default(), + }, + TriePruneResult::MissingKey => PruneResult::MissingKey, + TriePruneResult::RootNotFound => PruneResult::RootNotFound, + TriePruneResult::Failure(gse) => PruneResult::Failure(TrackingCopyError::Storage(gse)), + } + } +} diff --git a/storage/src/data_access_layer/query.rs b/storage/src/data_access_layer/query.rs new file mode 100644 index 0000000000..733fe99a10 --- /dev/null +++ b/storage/src/data_access_layer/query.rs @@ -0,0 +1,75 @@ +//! Support for global state queries. +use casper_types::{global_state::TrieMerkleProof, Digest, Key, StoredValue}; + +use crate::tracking_copy::{TrackingCopyError, TrackingCopyQueryResult}; + +/// Request for a global state query. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct QueryRequest { + state_hash: Digest, + key: Key, + path: Vec, +} + +impl QueryRequest { + /// Creates new request object. + pub fn new(state_hash: Digest, key: Key, path: Vec) -> Self { + QueryRequest { + state_hash, + key, + path, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns a key. + pub fn key(&self) -> Key { + self.key + } + + /// Returns a query path. + pub fn path(&self) -> &[String] { + &self.path + } +} + +impl From for QueryResult { + fn from(tracking_copy_query_result: TrackingCopyQueryResult) -> Self { + match tracking_copy_query_result { + TrackingCopyQueryResult::ValueNotFound(message) => QueryResult::ValueNotFound(message), + TrackingCopyQueryResult::CircularReference(message) => { + QueryResult::Failure(TrackingCopyError::CircularReference(message)) + } + TrackingCopyQueryResult::Success { value, proofs } => { + let value = Box::new(value); + QueryResult::Success { value, proofs } + } + TrackingCopyQueryResult::DepthLimit { depth } => { + QueryResult::Failure(TrackingCopyError::QueryDepthLimit { depth }) + } + TrackingCopyQueryResult::RootNotFound => QueryResult::RootNotFound, + } + } +} + +/// Result of a global state query request. +#[derive(Debug)] +pub enum QueryResult { + /// Invalid state root hash. + RootNotFound, + /// Value not found. + ValueNotFound(String), + /// Successful query. + Success { + /// Stored value under a path. + value: Box, + /// Merkle proof of the query. + proofs: Vec>, + }, + /// Tracking Copy Error + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/round_seigniorage.rs b/storage/src/data_access_layer/round_seigniorage.rs new file mode 100644 index 0000000000..77405d1235 --- /dev/null +++ b/storage/src/data_access_layer/round_seigniorage.rs @@ -0,0 +1,48 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{Digest, ProtocolVersion, U512}; +use num_rational::Ratio; + +/// Request to get the current round seigniorage rate. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct RoundSeigniorageRateRequest { + state_hash: Digest, + protocol_version: ProtocolVersion, +} + +impl RoundSeigniorageRateRequest { + /// Create instance of RoundSeigniorageRateRequest. + pub fn new(state_hash: Digest, protocol_version: ProtocolVersion) -> Self { + RoundSeigniorageRateRequest { + state_hash, + protocol_version, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } +} + +/// Represents a result of a `round_seigniorage_rate` request. +#[derive(Debug)] +pub enum RoundSeigniorageRateResult { + /// Invalid state root hash. + RootNotFound, + /// The mint is not found. + MintNotFound, + /// Value not found. + ValueNotFound(String), + /// The round seigniorage rate at the specified state hash. + Success { + /// The current rate. + rate: Ratio, + }, + /// Failure. + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/seigniorage_recipients.rs b/storage/src/data_access_layer/seigniorage_recipients.rs new file mode 100644 index 0000000000..2117f7d7d1 --- /dev/null +++ b/storage/src/data_access_layer/seigniorage_recipients.rs @@ -0,0 +1,77 @@ +//! Support for querying seigniorage recipients. + +use crate::tracking_copy::TrackingCopyError; +use casper_types::{system::auction::SeigniorageRecipientsSnapshot, Digest}; +use std::fmt::{Display, Formatter}; + +/// Request for seigniorage recipients. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SeigniorageRecipientsRequest { + state_hash: Digest, +} + +impl SeigniorageRecipientsRequest { + /// Constructs a new SeigniorageRecipientsRequest. + pub fn new(state_hash: Digest) -> Self { + SeigniorageRecipientsRequest { state_hash } + } + + /// Get the state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } +} + +/// Result enum that represents all possible outcomes of a seignorage recipients request. +#[derive(Debug)] +pub enum SeigniorageRecipientsResult { + /// Returned if auction is not found. This is a catastrophic outcome. + AuctionNotFound, + /// Returned if a passed state root hash is not found. This is recoverable. + RootNotFound, + /// Value not found. This is not erroneous if the record does not exist. + ValueNotFound(String), + /// There is no systemic issue, but the query itself errored. + Failure(TrackingCopyError), + /// The query succeeded. + Success { + /// Seigniorage recipients. + seigniorage_recipients: SeigniorageRecipientsSnapshot, + }, +} + +impl SeigniorageRecipientsResult { + /// Returns true if success. + pub fn is_success(&self) -> bool { + matches!(self, SeigniorageRecipientsResult::Success { .. }) + } + + /// Takes seigniorage recipients. + pub fn into_option(self) -> Option { + match self { + SeigniorageRecipientsResult::AuctionNotFound + | SeigniorageRecipientsResult::RootNotFound + | SeigniorageRecipientsResult::ValueNotFound(_) + | SeigniorageRecipientsResult::Failure(_) => None, + SeigniorageRecipientsResult::Success { + seigniorage_recipients, + } => Some(seigniorage_recipients), + } + } +} + +impl Display for SeigniorageRecipientsResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + SeigniorageRecipientsResult::AuctionNotFound => write!(f, "system auction not found"), + SeigniorageRecipientsResult::RootNotFound => write!(f, "state root not found"), + SeigniorageRecipientsResult::ValueNotFound(msg) => { + write!(f, "value not found: {}", msg) + } + SeigniorageRecipientsResult::Failure(tce) => write!(f, "{}", tce), + SeigniorageRecipientsResult::Success { .. } => { + write!(f, "success") + } + } + } +} diff --git a/storage/src/data_access_layer/step.rs b/storage/src/data_access_layer/step.rs new file mode 100644 index 0000000000..9dafc487a2 --- /dev/null +++ b/storage/src/data_access_layer/step.rs @@ -0,0 +1,224 @@ +//! Support for a step method. +//! +//! A step request executes auction code, slashes validators, evicts validators and distributes +//! rewards. + +use std::vec::Vec; +use thiserror::Error; + +use casper_types::{execution::Effects, CLValueError, Digest, EraId, ProtocolVersion, PublicKey}; + +use crate::{ + global_state::error::Error as GlobalStateError, + system::runtime_native::{Config, TransferConfig}, + tracking_copy::TrackingCopyError, +}; + +/// The definition of a slash item. +#[derive(Debug, Clone)] +pub struct SlashItem { + /// The public key of the validator that will be slashed. + pub validator_id: PublicKey, +} + +impl SlashItem { + /// Creates a new slash item. + pub fn new(validator_id: PublicKey) -> Self { + Self { validator_id } + } +} + +/// The definition of a reward item. +#[derive(Debug, Clone)] +pub struct RewardItem { + /// The public key of the validator that will be rewarded. + pub validator_id: PublicKey, + /// Amount of motes that will be distributed as rewards. + pub value: u64, +} + +impl RewardItem { + /// Creates new reward item. + pub fn new(validator_id: PublicKey, value: u64) -> Self { + Self { + validator_id, + value, + } + } +} + +/// The definition of an evict item. +#[derive(Debug, Clone)] +pub struct EvictItem { + /// The public key of the validator that will be evicted. + pub validator_id: PublicKey, +} + +impl EvictItem { + /// Creates new evict item. + pub fn new(validator_id: PublicKey) -> Self { + Self { validator_id } + } +} + +/// Representation of a step request. +#[derive(Debug)] +pub struct StepRequest { + /// Config + config: Config, + + /// State root hash. + state_hash: Digest, + + /// Protocol version for this request. + protocol_version: ProtocolVersion, + /// List of validators to be slashed. + /// + /// A slashed validator is removed from the next validator set. + slash_items: Vec, + /// List of validators to be evicted. + /// + /// Compared to a slashing, evictions are deactivating a given validator, but his stake is + /// unchanged. A further re-activation is possible. + evict_items: Vec, + /// Specifies which era validators will be returned based on `next_era_id`. + /// + /// Intended use is to always specify the current era id + 1 which will return computed era at + /// the end of this step request. + next_era_id: EraId, + + /// Timestamp in milliseconds representing end of the current era. + era_end_timestamp_millis: u64, +} + +impl StepRequest { + /// Creates new step request. + #[allow(clippy::too_many_arguments)] + pub fn new( + config: Config, + state_hash: Digest, + protocol_version: ProtocolVersion, + slash_items: Vec, + evict_items: Vec, + next_era_id: EraId, + era_end_timestamp_millis: u64, + ) -> Self { + Self { + config, + state_hash, + protocol_version, + slash_items, + evict_items, + next_era_id, + era_end_timestamp_millis, + } + } + + /// Returns the config. + pub fn config(&self) -> &Config { + &self.config + } + + /// Returns the transfer config. + pub fn transfer_config(&self) -> TransferConfig { + self.config.transfer_config().clone() + } + + /// Returns list of slashed validators. + pub fn slashed_validators(&self) -> Vec { + self.slash_items + .iter() + .map(|si| si.validator_id.clone()) + .collect() + } + + /// Returns pre_state_hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns protocol_version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns slash_items. + pub fn slash_items(&self) -> &Vec { + &self.slash_items + } + + /// Returns evict_items. + pub fn evict_items(&self) -> &Vec { + &self.evict_items + } + /// Returns next_era_id. + pub fn next_era_id(&self) -> EraId { + self.next_era_id + } + + /// Returns era_end_timestamp_millis. + pub fn era_end_timestamp_millis(&self) -> u64 { + self.era_end_timestamp_millis + } +} + +/// Representation of all possible failures of a step request. +#[derive(Clone, Error, Debug)] +pub enum StepError { + /// Error using the auction contract. + #[error("Auction error")] + Auction, + /// Error executing a slashing operation. + #[error("Slashing error")] + SlashingError, + /// Tracking copy error. + #[error("{0}")] + TrackingCopy(TrackingCopyError), + /// Failed to find auction contract. + #[error("Auction not found")] + AuctionNotFound, + /// Failed to find mint contract. + #[error("Mint not found")] + MintNotFound, +} + +impl From for StepError { + fn from(tce: TrackingCopyError) -> Self { + Self::TrackingCopy(tce) + } +} + +impl From for StepError { + fn from(gse: GlobalStateError) -> Self { + Self::TrackingCopy(TrackingCopyError::Storage(gse)) + } +} + +impl From for StepError { + fn from(cve: CLValueError) -> Self { + StepError::TrackingCopy(TrackingCopyError::CLValue(cve)) + } +} + +/// Outcome of running step process. +#[derive(Debug)] +pub enum StepResult { + /// Global state root not found. + RootNotFound, + /// Step process ran successfully. + Success { + /// State hash after step outcome is committed to the global state. + post_state_hash: Digest, + /// Effects of the step process. + effects: Effects, + }, + /// Failed to execute step. + Failure(StepError), +} + +impl StepResult { + /// Returns if step is successful. + pub fn is_success(&self) -> bool { + matches!(self, StepResult::Success { .. }) + } +} diff --git a/storage/src/data_access_layer/system_entity_registry.rs b/storage/src/data_access_layer/system_entity_registry.rs new file mode 100644 index 0000000000..c17a321dc8 --- /dev/null +++ b/storage/src/data_access_layer/system_entity_registry.rs @@ -0,0 +1,146 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{ + system::{AUCTION, HANDLE_PAYMENT, MINT}, + Digest, Key, ProtocolVersion, SystemHashRegistry, +}; + +/// Used to specify is the requestor wants the registry itself or a named entry within it. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SystemEntityRegistrySelector { + /// Requests all system entity entries. + All, + /// Requests system entity by name. + ByName(String), +} + +impl SystemEntityRegistrySelector { + /// Create instance asking for the entire registry. + pub fn all() -> Self { + SystemEntityRegistrySelector::All + } + + /// Create instance asking for mint. + pub fn mint() -> Self { + SystemEntityRegistrySelector::ByName(MINT.to_string()) + } + + /// Create instance asking for auction. + pub fn auction() -> Self { + SystemEntityRegistrySelector::ByName(AUCTION.to_string()) + } + + /// Create instance asking for handle payment. + pub fn handle_payment() -> Self { + SystemEntityRegistrySelector::ByName(HANDLE_PAYMENT.to_string()) + } + + /// Name of selected entity, if any. + pub fn name(&self) -> Option { + match self { + SystemEntityRegistrySelector::All => None, + SystemEntityRegistrySelector::ByName(name) => Some(name.clone()), + } + } +} + +/// Represents a request to obtain the system entity registry or an entry within it. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SystemEntityRegistryRequest { + /// State root hash. + state_hash: Digest, + /// Protocol version. + protocol_version: ProtocolVersion, + /// Selector. + selector: SystemEntityRegistrySelector, + enable_addressable_entity: bool, +} + +impl SystemEntityRegistryRequest { + /// Create new request. + pub fn new( + state_hash: Digest, + protocol_version: ProtocolVersion, + selector: SystemEntityRegistrySelector, + enable_addressable_entity: bool, + ) -> Self { + SystemEntityRegistryRequest { + state_hash, + protocol_version, + selector, + enable_addressable_entity, + } + } + + /// Returns the state hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the current selector. + pub fn selector(&self) -> &SystemEntityRegistrySelector { + &self.selector + } + + /// Protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Enable the addressable entity and migrate accounts/contracts to entities. + pub fn enable_addressable_entity(&self) -> bool { + self.enable_addressable_entity + } +} + +/// The payload of a successful request. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SystemEntityRegistryPayload { + /// All registry entries. + All(SystemHashRegistry), + /// Specific system entity registry entry. + EntityKey(Key), +} + +/// The result of a system entity registry request. +#[derive(Debug)] +pub enum SystemEntityRegistryResult { + /// Invalid state root hash. + RootNotFound, + /// The system contract registry was not found. This is a valid outcome + /// on older networks, which did not have the system contract registry prior + /// to protocol version 1.4 + SystemEntityRegistryNotFound, + /// The named entity was not found in the registry. + NamedEntityNotFound(String), + /// Successful request. + Success { + /// What was asked for. + selected: SystemEntityRegistrySelector, + /// The payload asked for. + payload: SystemEntityRegistryPayload, + }, + /// Failed to get requested data. + Failure(TrackingCopyError), +} + +impl SystemEntityRegistryResult { + /// Is success. + pub fn is_success(&self) -> bool { + matches!(self, SystemEntityRegistryResult::Success { .. }) + } + + /// As registry payload. + pub fn as_registry_payload(&self) -> Result { + match self { + SystemEntityRegistryResult::RootNotFound => Err("Root not found".to_string()), + SystemEntityRegistryResult::SystemEntityRegistryNotFound => { + Err("System entity registry not found".to_string()) + } + SystemEntityRegistryResult::NamedEntityNotFound(name) => { + Err(format!("Named entity not found: {:?}", name)) + } + SystemEntityRegistryResult::Failure(tce) => Err(format!("{:?}", tce)), + SystemEntityRegistryResult::Success { payload, .. } => Ok(payload.clone()), + } + } +} diff --git a/storage/src/data_access_layer/tagged_values.rs b/storage/src/data_access_layer/tagged_values.rs new file mode 100644 index 0000000000..bf430bb8a5 --- /dev/null +++ b/storage/src/data_access_layer/tagged_values.rs @@ -0,0 +1,60 @@ +//! Support for obtaining all values under the given key tag. +use crate::tracking_copy::TrackingCopyError; +use casper_types::{Digest, KeyTag, StoredValue}; + +/// Tagged values selector. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum TaggedValuesSelection { + /// All values under the specified key tag. + All(KeyTag), +} + +/// Represents a request to obtain all values under the given key tag. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TaggedValuesRequest { + state_hash: Digest, + selection: TaggedValuesSelection, +} + +impl TaggedValuesRequest { + /// Creates new request. + pub fn new(state_hash: Digest, selection: TaggedValuesSelection) -> Self { + Self { + state_hash, + selection, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns key tag. + pub fn key_tag(&self) -> KeyTag { + match self.selection { + TaggedValuesSelection::All(key_tag) => key_tag, + } + } + + /// Returns selection criteria. + pub fn selection(&self) -> TaggedValuesSelection { + self.selection + } +} + +/// Represents a result of a `get_all_values` request. +#[derive(Debug)] +pub enum TaggedValuesResult { + /// Invalid state root hash. + RootNotFound, + /// Contains values returned from the global state. + Success { + /// The requested selection. + selection: TaggedValuesSelection, + /// Current values. + values: Vec, + }, + /// Tagged value failure. + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/total_supply.rs b/storage/src/data_access_layer/total_supply.rs new file mode 100644 index 0000000000..3f27168c99 --- /dev/null +++ b/storage/src/data_access_layer/total_supply.rs @@ -0,0 +1,47 @@ +use crate::tracking_copy::TrackingCopyError; +use casper_types::{Digest, ProtocolVersion, U512}; + +/// Request for total supply. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct TotalSupplyRequest { + state_hash: Digest, + protocol_version: ProtocolVersion, +} + +impl TotalSupplyRequest { + /// Creates an instance of TotalSupplyRequest. + pub fn new(state_hash: Digest, protocol_version: ProtocolVersion) -> Self { + TotalSupplyRequest { + state_hash, + protocol_version, + } + } + + /// Returns state root hash. + pub fn state_hash(&self) -> Digest { + self.state_hash + } + + /// Returns the protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } +} + +/// Represents a result of a `total_supply` request. +#[derive(Debug)] +pub enum TotalSupplyResult { + /// Invalid state root hash. + RootNotFound, + /// The mint is not found. + MintNotFound, + /// Value not found. + ValueNotFound(String), + /// The total supply at the specified state hash. + Success { + /// The total supply in motes. + total_supply: U512, + }, + /// Failed to get total supply. + Failure(TrackingCopyError), +} diff --git a/storage/src/data_access_layer/trie.rs b/storage/src/data_access_layer/trie.rs new file mode 100644 index 0000000000..cfc09f645b --- /dev/null +++ b/storage/src/data_access_layer/trie.rs @@ -0,0 +1,113 @@ +use casper_types::Digest; + +use crate::global_state::{error::Error as GlobalStateError, trie::TrieRaw}; + +/// Request for a trie element. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct TrieRequest { + trie_key: Digest, + chunk_id: Option, +} + +impl TrieRequest { + /// Creates an instance of TrieRequest. + pub fn new(trie_key: Digest, chunk_id: Option) -> Self { + TrieRequest { trie_key, chunk_id } + } + + /// Trie key. + pub fn trie_key(&self) -> Digest { + self.trie_key + } + + /// Chunk id. + pub fn chunk_id(&self) -> Option { + self.chunk_id + } + + /// Has chunk id. + pub fn has_chunk_id(&self) -> bool { + self.chunk_id.is_some() + } +} + +/// A trie element. +#[derive(Debug)] +pub enum TrieElement { + /// Raw bytes. + Raw(TrieRaw), + /// Chunk. + Chunked(TrieRaw, u64), +} + +/// Represents a result of a `trie` request. +#[derive(Debug)] +pub enum TrieResult { + /// Value not found. + ValueNotFound(String), + /// The trie element at the specified key. + Success { + /// A trie element. + element: TrieElement, + }, + /// Failed to get the trie element. + Failure(GlobalStateError), +} + +impl TrieResult { + /// Transform trie result to raw state. + pub fn into_raw(self) -> Result, GlobalStateError> { + match self { + TrieResult::ValueNotFound(_) => Ok(None), + TrieResult::Success { element } => match element { + TrieElement::Raw(raw) | TrieElement::Chunked(raw, _) => Ok(Some(raw)), + }, + TrieResult::Failure(err) => Err(err), + } + } +} + +/// Request for a trie element to be persisted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PutTrieRequest { + raw: TrieRaw, +} + +impl PutTrieRequest { + /// Creates an instance of PutTrieRequest. + pub fn new(raw: TrieRaw) -> Self { + PutTrieRequest { raw } + } + + /// The raw bytes of the trie element. + pub fn raw(&self) -> &TrieRaw { + &self.raw + } + + /// Take raw trie value. + pub fn take_raw(self) -> TrieRaw { + self.raw + } +} + +/// Represents a result of a `put_trie` request. +#[derive(Debug)] +pub enum PutTrieResult { + /// The trie element is persisted. + Success { + /// The hash of the persisted trie element. + hash: Digest, + }, + /// Failed to persist the trie element. + Failure(GlobalStateError), +} + +impl PutTrieResult { + /// Returns a Result matching the original api for this functionality. + pub fn as_legacy(&self) -> Result { + match self { + PutTrieResult::Success { hash } => Ok(*hash), + PutTrieResult::Failure(err) => Err(err.clone()), + } + } +} diff --git a/storage/src/global_state.rs b/storage/src/global_state.rs new file mode 100644 index 0000000000..163896d530 --- /dev/null +++ b/storage/src/global_state.rs @@ -0,0 +1,33 @@ +/// Storage errors. +pub mod error; +/// Global State. +pub mod state; +/// Store module. +pub mod store; +/// Transaction Source. +pub mod transaction_source; +/// Merkle Trie implementation. +pub mod trie; +/// Merkle Trie storage. +pub mod trie_store; + +const MAX_DBS: u32 = 2; + +pub(crate) const DEFAULT_MAX_DB_SIZE: usize = 52_428_800; // 50 MiB + +pub(crate) const DEFAULT_MAX_READERS: u32 = 512; + +pub(crate) const DEFAULT_MAX_QUERY_DEPTH: u64 = 5; + +/// The global state reader. +pub trait GlobalStateReader: + state::StateReader +{ +} + +impl> + GlobalStateReader for R +{ +} + +pub(crate) const DEFAULT_ENABLE_ENTITY: bool = false; diff --git a/storage/src/global_state/error.rs b/storage/src/global_state/error.rs new file mode 100644 index 0000000000..3b5e50f956 --- /dev/null +++ b/storage/src/global_state/error.rs @@ -0,0 +1,56 @@ +use std::sync; + +use thiserror::Error; + +use casper_types::{bytesrepr, Digest, Key}; + +use crate::global_state::{state::CommitError, trie::TrieRaw}; + +use super::trie_store::TrieStoreCacheError; + +/// Error enum representing possible errors in global state interactions. +#[derive(Debug, Clone, Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum Error { + /// LMDB error returned from underlying `lmdb` crate. + #[error(transparent)] + Lmdb(#[from] lmdb::Error), + + /// (De)serialization error. + #[error("{0}")] + BytesRepr(#[from] bytesrepr::Error), + + /// Concurrency error. + #[error("Another thread panicked while holding a lock")] + Poison, + + /// Error committing to execution engine. + #[error(transparent)] + Commit(#[from] CommitError), + + /// Invalid state root hash. + #[error("RootNotFound")] + RootNotFound, + + /// Failed to put a trie node into global state because some of its children were missing. + #[error("Failed to put a trie into global state because some of its children were missing")] + MissingTrieNodeChildren(Digest, TrieRaw, Vec), + + /// Failed to prune listed keys. + #[error("Pruning attempt failed.")] + FailedToPrune(Vec), + + /// Cannot provide proofs over working state in a cache (programmer error). + #[error("Attempt to generate proofs using non-empty cache.")] + CannotProvideProofsOverCachedData, + + /// Encountered a cache error. + #[error("Cache error")] + CacheError(#[from] TrieStoreCacheError), +} + +impl From> for Error { + fn from(_error: sync::PoisonError) -> Self { + Error::Poison + } +} diff --git a/storage/src/global_state/state/lmdb.rs b/storage/src/global_state/state/lmdb.rs new file mode 100644 index 0000000000..89f84b6689 --- /dev/null +++ b/storage/src/global_state/state/lmdb.rs @@ -0,0 +1,651 @@ +use itertools::Itertools; +use std::{ops::Deref, sync::Arc}; +use tracing::{error, warn}; + +use lmdb::{DatabaseFlags, RwTransaction}; + +use tempfile::TempDir; + +use casper_types::{ + execution::{Effects, TransformKindV2, TransformV2}, + global_state::TrieMerkleProof, + Digest, Key, StoredValue, +}; + +use super::CommitError; +use crate::{ + data_access_layer::{ + DataAccessLayer, FlushRequest, FlushResult, PutTrieRequest, PutTrieResult, TrieElement, + TrieRequest, TrieResult, + }, + global_state::{ + error::Error as GlobalStateError, + state::{ + commit, put_stored_values, scratch::ScratchGlobalState, CommitProvider, + ScratchProvider, StateProvider, StateReader, + }, + store::Store, + transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource}, + trie::{operations::create_hashed_empty_trie, Trie, TrieRaw}, + trie_store::{ + lmdb::{LmdbTrieStore, ScratchTrieStore}, + operations::{ + keys_with_prefix, missing_children, prune, put_trie, read, read_with_proof, + ReadResult, TriePruneResult, + }, + }, + DEFAULT_ENABLE_ENTITY, DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_MAX_READERS, + }, + tracking_copy::TrackingCopy, +}; + +/// Global state implemented against LMDB as a backing data store. +pub struct LmdbGlobalState { + /// Environment for LMDB. + pub(crate) environment: Arc, + /// Trie store held within LMDB. + pub(crate) trie_store: Arc, + /// Empty root hash used for a new trie. + pub(crate) empty_root_hash: Digest, + /// Max query depth + pub max_query_depth: u64, + /// Enable the addressable entity and migrate accounts/contracts to entities. + pub enable_entity: bool, +} + +/// Represents a "view" of global state at a particular root hash. +pub struct LmdbGlobalStateView { + /// Environment for LMDB. + pub(crate) environment: Arc, + /// Trie store held within LMDB. + pub(crate) store: Arc, + /// Root hash of this "view". + pub(crate) root_hash: Digest, +} + +impl LmdbGlobalState { + /// Creates an empty state from an existing environment and trie_store. + pub fn empty( + environment: Arc, + trie_store: Arc, + max_query_depth: u64, + enable_entity: bool, + ) -> Result { + let root_hash: Digest = { + let (root_hash, root) = compute_empty_root_hash()?; + let mut txn = environment.create_read_write_txn()?; + trie_store.put(&mut txn, &root_hash, &root)?; + txn.commit()?; + environment.env().sync(true)?; + root_hash + }; + Ok(LmdbGlobalState::new( + environment, + trie_store, + root_hash, + max_query_depth, + enable_entity, + )) + } + + /// Creates a state from an existing environment, store, and root_hash. + /// Intended to be used for testing. + pub fn new( + environment: Arc, + trie_store: Arc, + empty_root_hash: Digest, + max_query_depth: u64, + enable_entity: bool, + ) -> Self { + LmdbGlobalState { + environment, + trie_store, + empty_root_hash, + max_query_depth, + enable_entity, + } + } + + /// Creates an in-memory cache for changes written. + pub fn create_scratch(&self) -> ScratchGlobalState { + ScratchGlobalState::new( + Arc::clone(&self.environment), + Arc::clone(&self.trie_store), + self.empty_root_hash, + self.max_query_depth, + self.enable_entity, + ) + } + + /// Gets a scratch trie store. + pub(crate) fn get_scratch_store(&self) -> ScratchTrieStore { + ScratchTrieStore::new(Arc::clone(&self.trie_store), Arc::clone(&self.environment)) + } + + /// Write stored values to LMDB. + pub fn put_stored_values( + &self, + prestate_hash: Digest, + stored_values: Vec<(Key, StoredValue)>, + ) -> Result { + let scratch_trie = self.get_scratch_store(); + let new_state_root = put_stored_values::<_, _, GlobalStateError>( + &scratch_trie, + &scratch_trie, + prestate_hash, + stored_values, + )?; + scratch_trie.write_root_to_db(new_state_root)?; + Ok(new_state_root) + } + + /// Get a reference to the lmdb global state's environment. + #[must_use] + pub fn environment(&self) -> &LmdbEnvironment { + &self.environment + } + + /// Get a reference to the lmdb global state's trie store. + #[must_use] + pub fn trie_store(&self) -> &LmdbTrieStore { + &self.trie_store + } + + /// Returns an initial, empty root hash of the underlying trie. + pub fn empty_state_root_hash(&self) -> Digest { + self.empty_root_hash + } +} + +fn compute_empty_root_hash() -> Result<(Digest, Trie), GlobalStateError> { + let (root_hash, root) = create_hashed_empty_trie::()?; + Ok((root_hash, root)) +} + +impl StateReader for LmdbGlobalStateView { + type Error = GlobalStateError; + + fn read(&self, key: &Key) -> Result, Self::Error> { + let txn = self.environment.create_read_txn()?; + let ret = match read::( + &txn, + self.store.deref(), + &self.root_hash, + key, + )? { + ReadResult::Found(value) => Some(value), + ReadResult::NotFound => None, + ReadResult::RootNotFound => panic!("LmdbGlobalState has invalid root"), + }; + txn.commit()?; + Ok(ret) + } + + fn read_with_proof( + &self, + key: &Key, + ) -> Result>, Self::Error> { + let txn = self.environment.create_read_txn()?; + let ret = match read_with_proof::< + Key, + StoredValue, + lmdb::RoTransaction, + LmdbTrieStore, + Self::Error, + >(&txn, self.store.deref(), &self.root_hash, key)? + { + ReadResult::Found(value) => Some(value), + ReadResult::NotFound => None, + ReadResult::RootNotFound => panic!("LmdbGlobalState has invalid root"), + }; + txn.commit()?; + Ok(ret) + } + + fn keys_with_prefix(&self, prefix: &[u8]) -> Result, Self::Error> { + let txn = self.environment.create_read_txn()?; + let keys_iter = keys_with_prefix::( + &txn, + self.store.deref(), + &self.root_hash, + prefix, + ); + let mut ret = Vec::new(); + for result in keys_iter { + match result { + Ok(key) => ret.push(key), + Err(error) => return Err(error), + } + } + txn.commit()?; + Ok(ret) + } +} + +impl CommitProvider for LmdbGlobalState { + fn commit_effects( + &self, + prestate_hash: Digest, + effects: Effects, + ) -> Result { + commit::( + &self.environment, + &self.trie_store, + prestate_hash, + effects, + ) + } + + fn commit_values( + &self, + prestate_hash: Digest, + values_to_write: Vec<(Key, StoredValue)>, + keys_to_prune: std::collections::BTreeSet, + ) -> Result { + let post_write_hash = put_stored_values::( + &self.environment, + &self.trie_store, + prestate_hash, + values_to_write, + )?; + + let mut txn = self.environment.create_read_write_txn()?; + + let maybe_root: Option> = + self.trie_store.get(&txn, &post_write_hash)?; + + if maybe_root.is_none() { + return Err(CommitError::RootNotFound(post_write_hash).into()); + }; + + let mut state_hash = post_write_hash; + + for key in keys_to_prune.into_iter() { + let prune_result = prune::( + &mut txn, + &self.trie_store, + &state_hash, + &key, + )?; + + match prune_result { + TriePruneResult::Pruned(root_hash) => { + state_hash = root_hash; + } + TriePruneResult::MissingKey => { + warn!("commit: pruning attempt failed for {}", key); + } + TriePruneResult::RootNotFound => { + error!(?state_hash, ?key, "commit: root not found"); + return Err(CommitError::WriteRootNotFound(state_hash).into()); + } + TriePruneResult::Failure(gse) => { + return Err(gse); + } + } + } + + txn.commit()?; + + Ok(state_hash) + } +} + +impl StateProvider for LmdbGlobalState { + type Reader = LmdbGlobalStateView; + + fn flush(&self, _: FlushRequest) -> FlushResult { + if self.environment.is_manual_sync_enabled() { + match self.environment.sync() { + Ok(_) => FlushResult::Success, + Err(err) => FlushResult::Failure(err.into()), + } + } else { + FlushResult::ManualSyncDisabled + } + } + + fn checkout(&self, state_hash: Digest) -> Result, GlobalStateError> { + let txn = self.environment.create_read_txn()?; + let maybe_root: Option> = self.trie_store.get(&txn, &state_hash)?; + let maybe_state = maybe_root.map(|_| LmdbGlobalStateView { + environment: Arc::clone(&self.environment), + store: Arc::clone(&self.trie_store), + root_hash: state_hash, + }); + txn.commit()?; + Ok(maybe_state) + } + + fn tracking_copy( + &self, + hash: Digest, + ) -> Result>, GlobalStateError> { + match self.checkout(hash)? { + Some(reader) => Ok(Some(TrackingCopy::new( + reader, + self.max_query_depth, + self.enable_entity, + ))), + None => Ok(None), + } + } + + fn empty_root(&self) -> Digest { + self.empty_root_hash + } + + fn trie(&self, request: TrieRequest) -> TrieResult { + let key = request.trie_key(); + let txn = match self.environment.create_read_txn() { + Ok(ro) => ro, + Err(err) => return TrieResult::Failure(err.into()), + }; + let raw = match Store::>::get_raw( + &*self.trie_store, + &txn, + &key, + ) { + Ok(Some(bytes)) => TrieRaw::new(bytes), + Ok(None) => { + return TrieResult::ValueNotFound(key.to_string()); + } + Err(err) => { + return TrieResult::Failure(err); + } + }; + match txn.commit() { + Ok(_) => match request.chunk_id() { + Some(chunk_id) => TrieResult::Success { + element: TrieElement::Chunked(raw, chunk_id), + }, + None => TrieResult::Success { + element: TrieElement::Raw(raw), + }, + }, + Err(err) => TrieResult::Failure(err.into()), + } + } + + /// Persists a trie element. + fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult { + // We only allow bottom-up persistence of trie elements. + // Thus we do not persist the element unless we already have all of its descendants + // persisted. It is safer to throw away the element and rely on a follow up attempt + // to reacquire it later than to allow it to be persisted which would allow runtime + // access to acquire a root hash that is missing one or more children which will + // result in undefined behavior if a process attempts to access elements below that + // root which are not held locally. + let bytes = request.raw().inner(); + match self.missing_children(bytes) { + Ok(missing_children) => { + if !missing_children.is_empty() { + let hash = Digest::hash_into_chunks_if_necessary(bytes); + return PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren( + hash, + request.take_raw(), + missing_children, + )); + } + } + Err(err) => return PutTrieResult::Failure(err), + }; + + match self.environment.create_read_write_txn() { + Ok(mut txn) => { + match put_trie::( + &mut txn, + &self.trie_store, + bytes, + ) { + Ok(hash) => match txn.commit() { + Ok(_) => PutTrieResult::Success { hash }, + Err(err) => PutTrieResult::Failure(err.into()), + }, + Err(err) => PutTrieResult::Failure(err), + } + } + Err(err) => PutTrieResult::Failure(err.into()), + } + } + + /// Finds all of the keys of missing directly descendant `Trie` values. + fn missing_children(&self, trie_raw: &[u8]) -> Result, GlobalStateError> { + let txn = self.environment.create_read_txn()?; + let missing_hashes = missing_children::< + Key, + StoredValue, + lmdb::RoTransaction, + LmdbTrieStore, + GlobalStateError, + >(&txn, self.trie_store.deref(), trie_raw)?; + txn.commit()?; + Ok(missing_hashes) + } + + fn enable_entity(&self) -> bool { + self.enable_entity + } +} + +impl ScratchProvider for DataAccessLayer { + /// Provide a local cached-only version of engine-state. + fn get_scratch_global_state(&self) -> ScratchGlobalState { + self.state().create_scratch() + } + + /// Writes state cached in an `EngineState` to LMDB. + fn write_scratch_to_db( + &self, + state_root_hash: Digest, + scratch_global_state: ScratchGlobalState, + ) -> Result { + let (stored_values, keys_to_prune) = scratch_global_state.into_inner(); + let post_state_hash = self + .state() + .put_stored_values(state_root_hash, stored_values)?; + if keys_to_prune.is_empty() { + return Ok(post_state_hash); + } + let prune_keys = keys_to_prune.iter().cloned().collect_vec(); + match self.prune_keys(post_state_hash, &prune_keys) { + TriePruneResult::Pruned(post_state_hash) => Ok(post_state_hash), + TriePruneResult::MissingKey => Err(GlobalStateError::FailedToPrune(prune_keys)), + TriePruneResult::RootNotFound => Err(GlobalStateError::RootNotFound), + TriePruneResult::Failure(gse) => Err(gse), + } + } + + /// Prune keys. + fn prune_keys(&self, mut state_root_hash: Digest, keys: &[Key]) -> TriePruneResult { + let scratch_trie_store = self.state().get_scratch_store(); + + let mut txn = match scratch_trie_store.create_read_write_txn() { + Ok(scratch) => scratch, + Err(gse) => return TriePruneResult::Failure(gse), + }; + + for key in keys { + let prune_results = prune::( + &mut txn, + &scratch_trie_store, + &state_root_hash, + key, + ); + match prune_results { + Ok(TriePruneResult::Pruned(new_root)) => { + state_root_hash = new_root; + } + Ok(TriePruneResult::MissingKey) => continue, // idempotent outcome + Ok(other) => return other, + Err(gse) => return TriePruneResult::Failure(gse), + } + } + + if let Err(gse) = txn.commit() { + return TriePruneResult::Failure(gse); + } + + if let Err(gse) = scratch_trie_store.write_root_to_db(state_root_hash) { + TriePruneResult::Failure(gse) + } else { + TriePruneResult::Pruned(state_root_hash) + } + } +} + +/// Creates prepopulated LMDB global state instance that stores data in a temporary directory. As +/// soon as the `TempDir` instance is dropped all the data stored will be removed from the disk as +/// well. +pub fn make_temporary_global_state( + initial_data: impl IntoIterator, +) -> (LmdbGlobalState, Digest, TempDir) { + let tempdir = tempfile::tempdir().expect("should create tempdir"); + + let lmdb_global_state = { + let lmdb_environment = LmdbEnvironment::new( + tempdir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + false, + ) + .expect("should create lmdb environment"); + let lmdb_trie_store = LmdbTrieStore::new(&lmdb_environment, None, DatabaseFlags::default()) + .expect("should create lmdb trie store"); + LmdbGlobalState::empty( + Arc::new(lmdb_environment), + Arc::new(lmdb_trie_store), + DEFAULT_MAX_QUERY_DEPTH, + DEFAULT_ENABLE_ENTITY, + ) + .expect("should create lmdb global state") + }; + + let mut root_hash = lmdb_global_state.empty_root_hash; + + let mut effects = Effects::new(); + + for (key, stored_value) in initial_data { + let transform = TransformV2::new(key.normalize(), TransformKindV2::Write(stored_value)); + effects.push(transform); + } + + root_hash = lmdb_global_state + .commit_effects(root_hash, effects) + .expect("Creation of account should be a success."); + + (lmdb_global_state, root_hash, tempdir) +} + +#[cfg(test)] +mod tests { + use casper_types::{account::AccountHash, execution::TransformKindV2, CLValue, Digest}; + + use crate::global_state::state::scratch::tests::TestPair; + + use super::*; + + fn create_test_pairs() -> Vec<(Key, StoredValue)> { + vec![ + ( + Key::Account(AccountHash::new([1_u8; 32])), + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ), + ( + Key::Account(AccountHash::new([2_u8; 32])), + StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), + ), + ] + } + + fn create_test_pairs_updated() -> [TestPair; 3] { + [ + TestPair { + key: Key::Account(AccountHash::new([1u8; 32])), + value: StoredValue::CLValue(CLValue::from_t("one".to_string()).unwrap()), + }, + TestPair { + key: Key::Account(AccountHash::new([2u8; 32])), + value: StoredValue::CLValue(CLValue::from_t("two".to_string()).unwrap()), + }, + TestPair { + key: Key::Account(AccountHash::new([3u8; 32])), + value: StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), + }, + ] + } + + #[test] + fn reads_from_a_checkout_return_expected_values() { + let test_pairs = create_test_pairs(); + let (state, root_hash, _tempdir) = make_temporary_global_state(test_pairs.clone()); + let checkout = state.checkout(root_hash).unwrap().unwrap(); + for (key, value) in test_pairs { + assert_eq!(Some(value), checkout.read(&key).unwrap()); + } + } + + #[test] + fn checkout_fails_if_unknown_hash_is_given() { + let (state, _, _tempdir) = make_temporary_global_state(create_test_pairs()); + let fake_hash: Digest = Digest::hash([1u8; 32]); + let result = state.checkout(fake_hash).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn commit_updates_state() { + let test_pairs_updated = create_test_pairs_updated(); + + let (state, root_hash, _tempdir) = make_temporary_global_state(create_test_pairs()); + + let effects = { + let mut tmp = Effects::new(); + for TestPair { key, value } in &test_pairs_updated { + let transform = TransformV2::new(*key, TransformKindV2::Write(value.clone())); + tmp.push(transform); + } + tmp + }; + + let updated_hash = state.commit_effects(root_hash, effects).unwrap(); + + let updated_checkout = state.checkout(updated_hash).unwrap().unwrap(); + + for TestPair { key, value } in test_pairs_updated.iter().cloned() { + assert_eq!(Some(value), updated_checkout.read(&key).unwrap()); + } + } + + #[test] + fn commit_updates_state_and_original_state_stays_intact() { + let test_pairs_updated = create_test_pairs_updated(); + + let (state, root_hash, _tempdir) = make_temporary_global_state(create_test_pairs()); + + let effects = { + let mut tmp = Effects::new(); + for TestPair { key, value } in &test_pairs_updated { + let transform = TransformV2::new(*key, TransformKindV2::Write(value.clone())); + tmp.push(transform); + } + tmp + }; + + let updated_hash = state.commit_effects(root_hash, effects).unwrap(); + + let updated_checkout = state.checkout(updated_hash).unwrap().unwrap(); + for TestPair { key, value } in test_pairs_updated.iter().cloned() { + assert_eq!(Some(value), updated_checkout.read(&key).unwrap()); + } + + let original_checkout = state.checkout(root_hash).unwrap().unwrap(); + for (key, value) in create_test_pairs().iter().cloned() { + assert_eq!(Some(value), original_checkout.read(&key).unwrap()); + } + assert_eq!( + None, + original_checkout.read(&test_pairs_updated[2].key).unwrap() + ); + } +} diff --git a/storage/src/global_state/state/mod.rs b/storage/src/global_state/state/mod.rs new file mode 100644 index 0000000000..af51ca5510 --- /dev/null +++ b/storage/src/global_state/state/mod.rs @@ -0,0 +1,2811 @@ +//! Global state. + +/// Lmdb implementation of global state. +pub mod lmdb; + +/// Lmdb implementation of global state with cache. +pub mod scratch; + +use num_rational::Ratio; +use parking_lot::RwLock; +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + convert::TryFrom, + rc::Rc, + sync::Arc, +}; + +use tracing::{debug, error, info, warn}; + +use casper_types::{ + account::AccountHash, + bytesrepr::{self, ToBytes}, + contracts::NamedKeys, + execution::{Effects, TransformError, TransformInstruction, TransformKindV2, TransformV2}, + global_state::TrieMerkleProof, + system::{ + self, + auction::{ + SeigniorageRecipientsSnapshot, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, + }, + mint::{ + BalanceHoldAddr, BalanceHoldAddrTag, ARG_AMOUNT, ROUND_SEIGNIORAGE_RATE_KEY, + TOTAL_SUPPLY_KEY, + }, + AUCTION, HANDLE_PAYMENT, MINT, + }, + Account, AddressableEntity, BlockGlobalAddr, CLValue, Digest, EntityAddr, EntityEntryPoint, + EntryPointAddr, EntryPointValue, HoldsEpoch, Key, KeyTag, Phase, PublicKey, RuntimeArgs, + StoredValue, SystemHashRegistry, U512, +}; + +#[cfg(test)] +pub use self::lmdb::make_temporary_global_state; + +use super::trie_store::{operations::batch_write, TrieStoreCacheError}; +use crate::{ + data_access_layer::{ + auction::{AuctionMethodRet, BiddingRequest, BiddingResult}, + balance::BalanceHandling, + era_validators::EraValidatorsResult, + handle_fee::{HandleFeeMode, HandleFeeRequest, HandleFeeResult}, + mint::{ + BurnRequest, BurnRequestArgs, BurnResult, TransferRequest, TransferRequestArgs, + TransferResult, + }, + prefixed_values::{PrefixedValuesRequest, PrefixedValuesResult}, + tagged_values::{TaggedValuesRequest, TaggedValuesResult}, + AddressableEntityRequest, AddressableEntityResult, AuctionMethod, BalanceHoldError, + BalanceHoldKind, BalanceHoldMode, BalanceHoldRequest, BalanceHoldResult, BalanceIdentifier, + BalanceIdentifierPurseRequest, BalanceIdentifierPurseResult, BalanceRequest, BalanceResult, + BidsRequest, BidsResult, BlockGlobalKind, BlockGlobalRequest, BlockGlobalResult, + BlockRewardsError, BlockRewardsRequest, BlockRewardsResult, ContractRequest, + ContractResult, EntryPointExistsRequest, EntryPointExistsResult, EntryPointRequest, + EntryPointResult, EraValidatorsRequest, ExecutionResultsChecksumRequest, + ExecutionResultsChecksumResult, FeeError, FeeRequest, FeeResult, FlushRequest, FlushResult, + GenesisRequest, GenesisResult, HandleRefundMode, HandleRefundRequest, HandleRefundResult, + InsufficientBalanceHandling, MessageTopicsRequest, MessageTopicsResult, ProofHandling, + ProofsResult, ProtocolUpgradeRequest, ProtocolUpgradeResult, PruneRequest, PruneResult, + PutTrieRequest, PutTrieResult, QueryRequest, QueryResult, RoundSeigniorageRateRequest, + RoundSeigniorageRateResult, SeigniorageRecipientsRequest, SeigniorageRecipientsResult, + StepError, StepRequest, StepResult, SystemEntityRegistryPayload, + SystemEntityRegistryRequest, SystemEntityRegistryResult, SystemEntityRegistrySelector, + TotalSupplyRequest, TotalSupplyResult, TrieRequest, TrieResult, + EXECUTION_RESULTS_CHECKSUM_NAME, + }, + global_state::{ + error::Error as GlobalStateError, + state::scratch::ScratchGlobalState, + transaction_source::{Transaction, TransactionSource}, + trie::Trie, + trie_store::{ + operations::{prune, read, write, ReadResult, TriePruneResult, WriteResult}, + TrieStore, + }, + }, + system::{ + auction::{self, Auction}, + burn::{BurnError, BurnRuntimeArgsBuilder}, + genesis::{GenesisError, GenesisInstaller}, + handle_payment::HandlePayment, + mint::Mint, + protocol_upgrade::{ProtocolUpgradeError, ProtocolUpgrader}, + runtime_native::{Id, RuntimeNative}, + transfer::{TransferArgs, TransferError, TransferRuntimeArgsBuilder, TransferTargetMode}, + }, + tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt}, + AddressGenerator, +}; + +/// A trait expressing the reading of state. This trait is used to abstract the underlying store. +pub trait StateReader: Sized + Send + Sync { + /// An error which occurs when reading state + type Error; + + /// Returns the state value from the corresponding key + fn read(&self, key: &K) -> Result, Self::Error>; + + /// Returns the merkle proof of the state value from the corresponding key + fn read_with_proof(&self, key: &K) -> Result>, Self::Error>; + + /// Returns the keys in the trie matching `prefix`. + fn keys_with_prefix(&self, prefix: &[u8]) -> Result, Self::Error>; +} + +/// An error emitted by the execution engine on commit +#[derive(Clone, Debug, thiserror::Error, Eq, PartialEq)] +pub enum CommitError { + /// Root not found. + #[error("Root not found: {0:?}")] + RootNotFound(Digest), + /// Root not found while attempting to read. + #[error("Root not found while attempting to read: {0:?}")] + ReadRootNotFound(Digest), + /// Root not found while attempting to write. + #[error("Root not found while writing: {0:?}")] + WriteRootNotFound(Digest), + /// Key not found. + #[error("Key not found: {0}")] + KeyNotFound(Key), + /// Transform error. + #[error(transparent)] + TransformError(TransformError), + /// Trie not found while attempting to validate cache write. + #[error("Trie not found in cache {0}")] + TrieNotFoundInCache(Digest), +} + +/// Scratch provider. +pub trait ScratchProvider: CommitProvider { + /// Get scratch state to db. + fn get_scratch_global_state(&self) -> ScratchGlobalState; + /// Write scratch state to db. + fn write_scratch_to_db( + &self, + state_root_hash: Digest, + scratch_global_state: ScratchGlobalState, + ) -> Result; + /// Prune items for imputed keys. + fn prune_keys(&self, state_root_hash: Digest, keys: &[Key]) -> TriePruneResult; +} + +/// Provides `commit` method. +pub trait CommitProvider: StateProvider { + /// Applies changes and returns a new post state hash. + /// block_hash is used for computing a deterministic and unique keys. + fn commit_effects( + &self, + state_hash: Digest, + effects: Effects, + ) -> Result; + + /// Commit values to global state. + fn commit_values( + &self, + state_hash: Digest, + values_to_write: Vec<(Key, StoredValue)>, + keys_to_prune: BTreeSet, + ) -> Result; + + /// Runs and commits the genesis process, once per network. + fn genesis(&self, request: GenesisRequest) -> GenesisResult { + let initial_root = self.empty_root(); + let tc = match self.tracking_copy(initial_root) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return GenesisResult::Fatal("state uninitialized".to_string()), + Err(err) => { + return GenesisResult::Failure(GenesisError::TrackingCopy( + TrackingCopyError::Storage(err), + )); + } + }; + let chainspec_hash = request.chainspec_hash(); + let protocol_version = request.protocol_version(); + let config = request.config(); + + let mut genesis_installer: GenesisInstaller = + GenesisInstaller::new(chainspec_hash, protocol_version, config.clone(), tc); + + let chainspec_registry = request.chainspec_registry(); + if let Err(gen_err) = genesis_installer.install(chainspec_registry.clone()) { + return GenesisResult::Failure(*gen_err); + } + + let effects = genesis_installer.finalize(); + match self.commit_effects(initial_root, effects.clone()) { + Ok(post_state_hash) => GenesisResult::Success { + post_state_hash, + effects, + }, + Err(err) => { + GenesisResult::Failure(GenesisError::TrackingCopy(TrackingCopyError::Storage(err))) + } + } + } + + /// Runs and commits the protocol upgrade process. + fn protocol_upgrade(&self, request: ProtocolUpgradeRequest) -> ProtocolUpgradeResult { + let pre_state_hash = request.pre_state_hash(); + let tc = match self.tracking_copy(pre_state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return ProtocolUpgradeResult::RootNotFound, + Err(err) => { + return ProtocolUpgradeResult::Failure(ProtocolUpgradeError::TrackingCopy( + TrackingCopyError::Storage(err), + )); + } + }; + + let protocol_upgrader: ProtocolUpgrader = + ProtocolUpgrader::new(request.config().clone(), pre_state_hash, tc); + + let post_upgrade_tc = match protocol_upgrader.upgrade(pre_state_hash) { + Err(e) => return e.into(), + Ok(tc) => tc, + }; + + let (writes, prunes, effects) = post_upgrade_tc.destructure(); + + // commit + match self.commit_values(pre_state_hash, writes, prunes) { + Ok(post_state_hash) => ProtocolUpgradeResult::Success { + post_state_hash, + effects, + }, + Err(err) => ProtocolUpgradeResult::Failure(ProtocolUpgradeError::TrackingCopy( + TrackingCopyError::Storage(err), + )), + } + } + + /// Safely prune specified keys from global state, using a tracking copy. + fn prune(&self, request: PruneRequest) -> PruneResult { + let pre_state_hash = request.state_hash(); + let tc = match self.tracking_copy(pre_state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return PruneResult::RootNotFound, + Err(err) => return PruneResult::Failure(TrackingCopyError::Storage(err)), + }; + + let keys_to_delete = request.keys_to_prune(); + if keys_to_delete.is_empty() { + // effectively a noop + return PruneResult::Success { + post_state_hash: pre_state_hash, + effects: Effects::default(), + }; + } + + for key in keys_to_delete { + tc.borrow_mut().prune(*key) + } + + let effects = tc.borrow().effects(); + + match self.commit_effects(pre_state_hash, effects.clone()) { + Ok(post_state_hash) => PruneResult::Success { + post_state_hash, + effects, + }, + Err(tce) => PruneResult::Failure(tce.into()), + } + } + + /// Step auction state at era end. + fn step(&self, request: StepRequest) -> StepResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return StepResult::RootNotFound, + Err(err) => { + return StepResult::Failure(StepError::TrackingCopy(TrackingCopyError::Storage( + err, + ))); + } + }; + let protocol_version = request.protocol_version(); + + let seed = { + // seeds address generator w/ era_end_timestamp_millis + let mut bytes = match request.era_end_timestamp_millis().into_bytes() { + Ok(bytes) => bytes, + Err(bre) => { + return StepResult::Failure(StepError::TrackingCopy( + TrackingCopyError::BytesRepr(bre), + )); + } + }; + match &mut protocol_version.into_bytes() { + Ok(next) => bytes.append(next), + Err(bre) => { + return StepResult::Failure(StepError::TrackingCopy( + TrackingCopyError::BytesRepr(*bre), + )); + } + }; + match &mut request.next_era_id().into_bytes() { + Ok(next) => bytes.append(next), + Err(bre) => { + return StepResult::Failure(StepError::TrackingCopy( + TrackingCopyError::BytesRepr(*bre), + )); + } + }; + + Id::Seed(bytes) + }; + + let config = request.config(); + // this runtime uses the system's context + let phase = Phase::Session; + let address_generator = AddressGenerator::new(&seed.seed(), phase); + let mut runtime = match RuntimeNative::new_system_runtime( + config.clone(), + protocol_version, + seed, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + phase, + ) { + Ok(rt) => rt, + Err(tce) => return StepResult::Failure(StepError::TrackingCopy(tce)), + }; + + let slashed_validators: Vec = request.slashed_validators(); + if !slashed_validators.is_empty() { + if let Err(err) = runtime.slash(slashed_validators) { + error!("{}", err); + return StepResult::Failure(StepError::SlashingError); + } + } + + let era_end_timestamp_millis = request.era_end_timestamp_millis(); + let evicted_validators = request + .evict_items() + .iter() + .map(|item| item.validator_id.clone()) + .collect::>(); + let max_delegators_per_validator = config.max_delegators_per_validator(); + let include_credits = config.include_credits(); + let credit_cap = config.credit_cap(); + let minimum_bid_amount = config.minimum_bid_amount(); + + if let Err(err) = runtime.run_auction( + era_end_timestamp_millis, + evicted_validators, + max_delegators_per_validator, + include_credits, + credit_cap, + minimum_bid_amount, + ) { + error!("{}", err); + return StepResult::Failure(StepError::Auction); + } + + let effects = tc.borrow().effects(); + + match self.commit_effects(state_hash, effects.clone()) { + Ok(post_state_hash) => StepResult::Success { + post_state_hash, + effects, + }, + Err(gse) => StepResult::Failure(gse.into()), + } + } + + /// Distribute block rewards. + fn distribute_block_rewards(&self, request: BlockRewardsRequest) -> BlockRewardsResult { + let state_hash = request.state_hash(); + let rewards = request.rewards(); + if rewards.is_empty() { + info!("rewards are empty"); + // if there are no rewards to distribute, this is effectively a noop + return BlockRewardsResult::Success { + post_state_hash: state_hash, + effects: Effects::new(), + }; + } + + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return BlockRewardsResult::RootNotFound, + Err(err) => { + return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy( + TrackingCopyError::Storage(err), + )); + } + }; + + let config = request.config(); + let protocol_version = request.protocol_version(); + let seed = { + let mut bytes = match request.block_time().into_bytes() { + Ok(bytes) => bytes, + Err(bre) => { + return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy( + TrackingCopyError::BytesRepr(bre), + )); + } + }; + match &mut protocol_version.into_bytes() { + Ok(next) => bytes.append(next), + Err(bre) => { + return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy( + TrackingCopyError::BytesRepr(*bre), + )); + } + }; + + Id::Seed(bytes) + }; + + // this runtime uses the system's context + let phase = Phase::Session; + let address_generator = AddressGenerator::new(&seed.seed(), phase); + + let mut runtime = match RuntimeNative::new_system_runtime( + config.clone(), + protocol_version, + seed, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + phase, + ) { + Ok(rt) => rt, + Err(tce) => { + return BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy(tce)); + } + }; + + if let Err(auction_error) = runtime.distribute(rewards.clone()) { + error!( + "distribute block rewards failed due to auction error {:?}", + auction_error + ); + return BlockRewardsResult::Failure(BlockRewardsError::Auction(auction_error)); + } else { + debug!("rewards distribution complete"); + } + + let effects = tc.borrow().effects(); + + match self.commit_effects(state_hash, effects.clone()) { + Ok(post_state_hash) => { + debug!("reward distribution committed"); + BlockRewardsResult::Success { + post_state_hash, + effects, + } + } + Err(gse) => BlockRewardsResult::Failure(BlockRewardsError::TrackingCopy( + TrackingCopyError::Storage(gse), + )), + } + } + + /// Distribute fees, if relevant to the chainspec configured behavior. + fn distribute_fees(&self, request: FeeRequest) -> FeeResult { + let state_hash = request.state_hash(); + if !request.should_distribute_fees() { + // effectively noop + return FeeResult::Success { + post_state_hash: state_hash, + effects: Effects::new(), + transfers: vec![], + }; + } + + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), + Ok(None) => return FeeResult::RootNotFound, + Err(gse) => { + return FeeResult::Failure(FeeError::TrackingCopy(TrackingCopyError::Storage(gse))); + } + }; + + let config = request.config(); + let protocol_version = request.protocol_version(); + let seed = { + let mut bytes = match request.block_time().into_bytes() { + Ok(bytes) => bytes, + Err(bre) => { + return FeeResult::Failure(FeeError::TrackingCopy( + TrackingCopyError::BytesRepr(bre), + )); + } + }; + match &mut protocol_version.into_bytes() { + Ok(next) => bytes.append(next), + Err(bre) => { + return FeeResult::Failure(FeeError::TrackingCopy( + TrackingCopyError::BytesRepr(*bre), + )); + } + }; + + Id::Seed(bytes) + }; + + // this runtime uses the system's context + let phase = Phase::System; + let address_generator = AddressGenerator::new(&seed.seed(), phase); + let mut runtime = match RuntimeNative::new_system_runtime( + config.clone(), + protocol_version, + seed, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + phase, + ) { + Ok(rt) => rt, + Err(tce) => { + return FeeResult::Failure(FeeError::TrackingCopy(tce)); + } + }; + + let source = BalanceIdentifier::Accumulate; + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return FeeResult::Failure(FeeError::TrackingCopy(tce)), + }; + // amount = None will distribute the full current balance of the accumulation purse + let result = runtime.distribute_accumulated_fees(source_purse, None); + + match result { + Ok(_) => { + let effects = tc.borrow_mut().effects(); + let transfers = runtime.into_transfers(); + let post_state_hash = match self.commit_effects(state_hash, effects.clone()) { + Ok(post_state_hash) => post_state_hash, + Err(gse) => { + return FeeResult::Failure(FeeError::TrackingCopy( + TrackingCopyError::Storage(gse), + )); + } + }; + FeeResult::Success { + effects, + transfers, + post_state_hash, + } + } + Err(hpe) => FeeResult::Failure(FeeError::TrackingCopy( + TrackingCopyError::SystemContract(system::Error::HandlePayment(hpe)), + )), + } + } + + /// Gets block global data. + fn block_global(&self, request: BlockGlobalRequest) -> BlockGlobalResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tracking_copy)) => Rc::new(RefCell::new(tracking_copy)), + Ok(None) => return BlockGlobalResult::RootNotFound, + Err(gse) => return BlockGlobalResult::Failure(TrackingCopyError::Storage(gse)), + }; + + // match request + match request.block_global_kind() { + BlockGlobalKind::BlockTime(block_time) => { + let cl_value = + match CLValue::from_t(block_time.value()).map_err(TrackingCopyError::CLValue) { + Ok(cl_value) => cl_value, + Err(tce) => { + return BlockGlobalResult::Failure(tce); + } + }; + tc.borrow_mut().write( + Key::BlockGlobal(BlockGlobalAddr::BlockTime), + StoredValue::CLValue(cl_value), + ); + } + BlockGlobalKind::MessageCount(count) => { + let cl_value = match CLValue::from_t(count).map_err(TrackingCopyError::CLValue) { + Ok(cl_value) => cl_value, + Err(tce) => { + return BlockGlobalResult::Failure(tce); + } + }; + tc.borrow_mut().write( + Key::BlockGlobal(BlockGlobalAddr::MessageCount), + StoredValue::CLValue(cl_value), + ); + } + BlockGlobalKind::ProtocolVersion(protocol_version) => { + let cl_value = match CLValue::from_t(protocol_version.destructure()) + .map_err(TrackingCopyError::CLValue) + { + Ok(cl_value) => cl_value, + Err(tce) => { + return BlockGlobalResult::Failure(tce); + } + }; + tc.borrow_mut().write( + Key::BlockGlobal(BlockGlobalAddr::ProtocolVersion), + StoredValue::CLValue(cl_value), + ); + } + BlockGlobalKind::AddressableEntity(addressable_entity) => { + let cl_value = + match CLValue::from_t(addressable_entity).map_err(TrackingCopyError::CLValue) { + Ok(cl_value) => cl_value, + Err(tce) => { + return BlockGlobalResult::Failure(tce); + } + }; + tc.borrow_mut().write( + Key::BlockGlobal(BlockGlobalAddr::AddressableEntity), + StoredValue::CLValue(cl_value), + ); + } + } + + let effects = tc.borrow_mut().effects(); + + let post_state_hash = match self.commit_effects(state_hash, effects.clone()) { + Ok(post_state_hash) => post_state_hash, + Err(gse) => return BlockGlobalResult::Failure(TrackingCopyError::Storage(gse)), + }; + + BlockGlobalResult::Success { + post_state_hash, + effects: Box::new(effects), + } + } +} + +/// A trait expressing operations over the trie. +pub trait StateProvider: Send + Sync + Sized { + /// Associated reader type for `StateProvider`. + type Reader: StateReader; + + /// Flush the state provider. + fn flush(&self, request: FlushRequest) -> FlushResult; + + /// Returns an empty root hash. + fn empty_root(&self) -> Digest; + + /// Get a tracking copy. + fn tracking_copy( + &self, + state_hash: Digest, + ) -> Result>, GlobalStateError>; + + /// Checkouts a slice of initial state using root state hash. + fn checkout(&self, state_hash: Digest) -> Result, GlobalStateError>; + + /// Query state. + fn query(&self, request: QueryRequest) -> QueryResult { + match self.tracking_copy(request.state_hash()) { + Ok(Some(tc)) => match tc.query(request.key(), request.path()) { + Ok(ret) => ret.into(), + Err(err) => QueryResult::Failure(err), + }, + Ok(None) => QueryResult::RootNotFound, + Err(err) => QueryResult::Failure(TrackingCopyError::Storage(err)), + } + } + + /// Message topics request. + fn message_topics(&self, message_topics_request: MessageTopicsRequest) -> MessageTopicsResult { + let tc = match self.tracking_copy(message_topics_request.state_hash()) { + Ok(Some(tracking_copy)) => tracking_copy, + Ok(None) => return MessageTopicsResult::RootNotFound, + Err(err) => return MessageTopicsResult::Failure(err.into()), + }; + + match tc.get_message_topics(message_topics_request.entity_addr()) { + Ok(message_topics) => MessageTopicsResult::Success { message_topics }, + Err(tce) => MessageTopicsResult::Failure(tce), + } + } + + /// Provides the underlying addr for the imputed balance identifier. + fn balance_purse( + &self, + request: BalanceIdentifierPurseRequest, + ) -> BalanceIdentifierPurseResult { + let mut tc = match self.tracking_copy(request.state_hash()) { + Ok(Some(tracking_copy)) => tracking_copy, + Ok(None) => return BalanceIdentifierPurseResult::RootNotFound, + Err(err) => return TrackingCopyError::Storage(err).into(), + }; + let balance_identifier = request.identifier(); + let protocol_version = request.protocol_version(); + match balance_identifier.purse_uref(&mut tc, protocol_version) { + Ok(uref) => BalanceIdentifierPurseResult::Success { + purse_addr: uref.addr(), + }, + Err(tce) => BalanceIdentifierPurseResult::Failure(tce), + } + } + + /// Balance inquiry. + fn balance(&self, request: BalanceRequest) -> BalanceResult { + let mut tc = match self.tracking_copy(request.state_hash()) { + Ok(Some(tracking_copy)) => tracking_copy, + Ok(None) => return BalanceResult::RootNotFound, + Err(err) => return TrackingCopyError::Storage(err).into(), + }; + let protocol_version = request.protocol_version(); + let balance_identifier = request.identifier(); + let purse_key = match balance_identifier.purse_uref(&mut tc, protocol_version) { + Ok(value) => value.into(), + Err(tce) => return tce.into(), + }; + let (purse_balance_key, purse_addr) = match tc.get_purse_balance_key(purse_key) { + Ok(key @ Key::Balance(addr)) => (key, addr), + Ok(key) => return TrackingCopyError::UnexpectedKeyVariant(key).into(), + Err(tce) => return tce.into(), + }; + + let (total_balance, proofs_result) = match request.proof_handling() { + ProofHandling::NoProofs => { + let total_balance = match tc.read(&purse_balance_key) { + Ok(Some(StoredValue::CLValue(cl_value))) => match cl_value.into_t::() { + Ok(val) => val, + Err(cve) => return TrackingCopyError::CLValue(cve).into(), + }, + Ok(Some(_)) => return TrackingCopyError::UnexpectedStoredValueVariant.into(), + Ok(None) => return TrackingCopyError::KeyNotFound(purse_balance_key).into(), + Err(tce) => return tce.into(), + }; + let balance_holds = match request.balance_handling() { + BalanceHandling::Total => BTreeMap::new(), + BalanceHandling::Available => { + match tc.get_balance_hold_config(BalanceHoldAddrTag::Gas) { + Ok(Some((block_time, _, interval))) => { + match tc.get_balance_holds(purse_addr, block_time, interval) { + Ok(holds) => holds, + Err(tce) => return tce.into(), + } + } + Ok(None) => BTreeMap::new(), + Err(tce) => return tce.into(), + } + } + }; + (total_balance, ProofsResult::NotRequested { balance_holds }) + } + ProofHandling::Proofs => { + let (total_balance, total_balance_proof) = + match tc.get_total_balance_with_proof(purse_balance_key) { + Ok((balance, proof)) => (balance, Box::new(proof)), + Err(tce) => return tce.into(), + }; + + let balance_holds = match request.balance_handling() { + BalanceHandling::Total => BTreeMap::new(), + BalanceHandling::Available => { + match tc.get_balance_holds_with_proof(purse_addr) { + Ok(holds) => holds, + Err(tce) => return tce.into(), + } + } + }; + + ( + total_balance, + ProofsResult::Proofs { + total_balance_proof, + balance_holds, + }, + ) + } + }; + + let (block_time, gas_hold_handling) = match tc + .get_balance_hold_config(BalanceHoldAddrTag::Gas) + { + Ok(Some((block_time, handling, interval))) => (block_time, (handling, interval).into()), + Ok(None) => { + return BalanceResult::Success { + purse_addr, + total_balance, + available_balance: total_balance, + proofs_result, + }; + } + Err(tce) => return tce.into(), + }; + + let processing_hold_handling = + match tc.get_balance_hold_config(BalanceHoldAddrTag::Processing) { + Ok(Some((_, handling, interval))) => (handling, interval).into(), + Ok(None) => { + return BalanceResult::Success { + purse_addr, + total_balance, + available_balance: total_balance, + proofs_result, + }; + } + Err(tce) => return tce.into(), + }; + + let available_balance = match &proofs_result.available_balance( + block_time, + total_balance, + gas_hold_handling, + processing_hold_handling, + ) { + Ok(available_balance) => *available_balance, + Err(be) => return BalanceResult::Failure(TrackingCopyError::Balance(be.clone())), + }; + + BalanceResult::Success { + purse_addr, + total_balance, + available_balance, + proofs_result, + } + } + + /// Balance hold. + fn balance_hold(&self, request: BalanceHoldRequest) -> BalanceHoldResult { + let mut tc = match self.tracking_copy(request.state_hash()) { + Ok(Some(tracking_copy)) => tracking_copy, + Ok(None) => return BalanceHoldResult::RootNotFound, + Err(err) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + TrackingCopyError::Storage(err), + )); + } + }; + let hold_mode = request.balance_hold_mode(); + match hold_mode { + BalanceHoldMode::Hold { + identifier, + hold_amount, + insufficient_handling, + } => { + let block_time = match tc.get_block_time() { + Ok(Some(block_time)) => block_time, + Ok(None) => return BalanceHoldResult::BlockTimeNotFound, + Err(tce) => return tce.into(), + }; + let tag = match request.balance_hold_kind() { + BalanceHoldKind::All => { + return BalanceHoldResult::Failure( + BalanceHoldError::UnexpectedWildcardVariant, + ); + } + BalanceHoldKind::Tag(tag) => tag, + }; + let balance_request = BalanceRequest::new( + request.state_hash(), + request.protocol_version(), + identifier, + BalanceHandling::Available, + ProofHandling::NoProofs, + ); + let balance_result = self.balance(balance_request); + let (total_balance, remaining_balance, purse_addr) = match balance_result { + BalanceResult::RootNotFound => return BalanceHoldResult::RootNotFound, + BalanceResult::Failure(be) => return be.into(), + BalanceResult::Success { + total_balance, + available_balance, + purse_addr, + .. + } => (total_balance, available_balance, purse_addr), + }; + + let held_amount = { + if remaining_balance >= hold_amount { + // the purse has sufficient balance to fully cover the hold + hold_amount + } else if insufficient_handling == InsufficientBalanceHandling::Noop { + // the purse has insufficient balance and the insufficient + // balance handling mode is noop, so get out + return BalanceHoldResult::Failure(BalanceHoldError::InsufficientBalance { + remaining_balance, + }); + } else { + // currently this is always the default HoldRemaining variant. + // the purse holder has insufficient balance to cover the hold, + // but the system will put a hold on whatever balance remains. + // this is basically punitive to block an edge case resource consumption + // attack whereby a malicious purse holder drains a balance to not-zero + // but not-enough-to-cover-holds and then spams a bunch of transactions + // knowing that they will fail due to insufficient funds, but only + // after making the system do the work of processing the balance + // check without penalty to themselves. + remaining_balance + } + }; + + let balance_hold_addr = match tag { + BalanceHoldAddrTag::Gas => BalanceHoldAddr::Gas { + purse_addr, + block_time, + }, + BalanceHoldAddrTag::Processing => BalanceHoldAddr::Processing { + purse_addr, + block_time, + }, + }; + + let hold_key = Key::BalanceHold(balance_hold_addr); + let hold_value = match tc.get(&hold_key) { + Ok(Some(StoredValue::CLValue(cl_value))) => { + // There was a previous hold on this balance. We need to add the new hold to + // the old one. + match cl_value.clone().into_t::() { + Ok(prev_hold) => prev_hold.saturating_add(held_amount), + Err(cve) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + TrackingCopyError::CLValue(cve), + )); + } + } + } + Ok(Some(other_value_variant)) => { + return BalanceHoldResult::Failure(BalanceHoldError::UnexpectedHoldValue( + other_value_variant, + )) + } + Ok(None) => held_amount, // There was no previous hold. + Err(tce) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce)); + } + }; + + let hold_cl_value = match CLValue::from_t(hold_value) { + Ok(cl_value) => cl_value, + Err(cve) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + TrackingCopyError::CLValue(cve), + )); + } + }; + tc.write(hold_key, StoredValue::CLValue(hold_cl_value)); + let holds = vec![balance_hold_addr]; + + let available_balance = remaining_balance.saturating_sub(held_amount); + let effects = tc.effects(); + BalanceHoldResult::success( + Some(holds), + total_balance, + available_balance, + hold_amount, + held_amount, + effects, + ) + } + BalanceHoldMode::Clear { identifier } => { + let purse_addr = match identifier.purse_uref(&mut tc, request.protocol_version()) { + Ok(source_purse) => source_purse.addr(), + Err(tce) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce)); + } + }; + + { + // clear holds + let hold_kind = request.balance_hold_kind(); + let mut filter = vec![]; + let tag = BalanceHoldAddrTag::Processing; + if hold_kind.matches(tag) { + let (block_time, interval) = match tc.get_balance_hold_config(tag) { + Ok(Some((block_time, _, interval))) => (block_time, interval), + Ok(None) => { + return BalanceHoldResult::BlockTimeNotFound; + } + Err(tce) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + tce, + )); + } + }; + filter.push((tag, HoldsEpoch::from_millis(block_time.value(), interval))); + } + let tag = BalanceHoldAddrTag::Gas; + if hold_kind.matches(tag) { + let (block_time, interval) = match tc.get_balance_hold_config(tag) { + Ok(Some((block_time, _, interval))) => (block_time, interval), + Ok(None) => { + return BalanceHoldResult::BlockTimeNotFound; + } + Err(tce) => { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy( + tce, + )); + } + }; + filter.push((tag, HoldsEpoch::from_millis(block_time.value(), interval))); + } + if let Err(tce) = tc.clear_expired_balance_holds(purse_addr, filter) { + return BalanceHoldResult::Failure(BalanceHoldError::TrackingCopy(tce)); + } + } + + // get updated balance + let balance_result = self.balance(BalanceRequest::new( + request.state_hash(), + request.protocol_version(), + identifier, + BalanceHandling::Available, + ProofHandling::NoProofs, + )); + let (total_balance, available_balance) = match balance_result { + BalanceResult::RootNotFound => return BalanceHoldResult::RootNotFound, + BalanceResult::Failure(be) => return be.into(), + BalanceResult::Success { + total_balance, + available_balance, + .. + } => (total_balance, available_balance), + }; + // note that hold & held in this context does not refer to remaining holds, + // but rather to the requested hold amount and the resulting held amount for + // this execution. as calls to this variant clears holds and does not create + // new holds, hold & held are zero and no new hold address exists. + let new_hold_addr = None; + let hold = U512::zero(); + let held = U512::zero(); + let effects = tc.effects(); + BalanceHoldResult::success( + new_hold_addr, + total_balance, + available_balance, + hold, + held, + effects, + ) + } + } + } + + /// Get the requested era validators. + fn era_validators(&self, request: EraValidatorsRequest) -> EraValidatorsResult { + match self.seigniorage_recipients(SeigniorageRecipientsRequest::new(request.state_hash())) { + SeigniorageRecipientsResult::RootNotFound => EraValidatorsResult::RootNotFound, + SeigniorageRecipientsResult::Failure(err) => EraValidatorsResult::Failure(err), + SeigniorageRecipientsResult::ValueNotFound(msg) => { + EraValidatorsResult::ValueNotFound(msg) + } + SeigniorageRecipientsResult::AuctionNotFound => EraValidatorsResult::AuctionNotFound, + SeigniorageRecipientsResult::Success { + seigniorage_recipients, + } => { + let era_validators = match seigniorage_recipients { + SeigniorageRecipientsSnapshot::V1(snapshot) => { + auction::detail::era_validators_from_legacy_snapshot(snapshot) + } + SeigniorageRecipientsSnapshot::V2(snapshot) => { + auction::detail::era_validators_from_snapshot(snapshot) + } + }; + EraValidatorsResult::Success { era_validators } + } + } + } + + /// Get the requested seigniorage recipients. + fn seigniorage_recipients( + &self, + request: SeigniorageRecipientsRequest, + ) -> SeigniorageRecipientsResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return SeigniorageRecipientsResult::RootNotFound, + Err(err) => { + return SeigniorageRecipientsResult::Failure(TrackingCopyError::Storage(err)) + } + }; + let scr = match tc.get_system_entity_registry() { + Ok(scr) => scr, + Err(err) => return SeigniorageRecipientsResult::Failure(err), + }; + let enable_addressable_entity = tc.enable_addressable_entity(); + match get_snapshot_data(self, &scr, state_hash, enable_addressable_entity) { + not_found @ SeigniorageRecipientsResult::ValueNotFound(_) => { + if enable_addressable_entity { + //There is a chance that, when looking for systemic data, we could be using a + // state root hash from before the AddressableEntity + // migration boundary. In such a case, we should attempt to look up the data + // under the Account/Contract model instead; e.g. Key::Hash instead of + // Key::AddressableEntity + match get_snapshot_data(self, &scr, state_hash, false) { + SeigniorageRecipientsResult::ValueNotFound(_) => not_found, + other => other, + } + } else { + not_found + } + } + other => other, + } + } + + /// Gets the bids. + fn bids(&self, request: BidsRequest) -> BidsResult { + let state_hash = request.state_hash(); + let mut tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return BidsResult::RootNotFound, + Err(err) => return BidsResult::Failure(TrackingCopyError::Storage(err)), + }; + + let bid_keys = match tc.get_keys(&KeyTag::BidAddr) { + Ok(ret) => ret, + Err(err) => return BidsResult::Failure(err), + }; + + let mut bids = vec![]; + for key in bid_keys.iter() { + match tc.get(key) { + Ok(ret) => match ret { + Some(StoredValue::BidKind(bid_kind)) => { + if !bids.contains(&bid_kind) { + bids.push(bid_kind); + } + } + Some(_) => { + return BidsResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ); + } + None => return BidsResult::Failure(TrackingCopyError::MissingBid(*key)), + }, + Err(error) => return BidsResult::Failure(error), + } + } + BidsResult::Success { bids } + } + + /// Direct auction interaction for all variations of bid management. + fn bidding( + &self, + BiddingRequest { + config, + state_hash, + protocol_version, + auction_method, + transaction_hash, + initiator, + authorization_keys, + }: BiddingRequest, + ) -> BiddingResult { + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return BiddingResult::RootNotFound, + Err(err) => return BiddingResult::Failure(TrackingCopyError::Storage(err)), + }; + + let source_account_hash = initiator.account_hash(); + let (entity_addr, mut footprint, mut entity_access_rights) = match tc + .borrow_mut() + .authorized_runtime_footprint_with_access_rights( + protocol_version, + source_account_hash, + &authorization_keys, + &BTreeSet::default(), + ) { + Ok(ret) => ret, + Err(tce) => { + return BiddingResult::Failure(tce); + } + }; + let entity_key = Key::AddressableEntity(entity_addr); + + // extend named keys with era end timestamp + match tc + .borrow_mut() + .system_contract_named_key(AUCTION, ERA_END_TIMESTAMP_MILLIS_KEY) + { + Ok(Some(k)) => { + match k.as_uref() { + Some(uref) => entity_access_rights.extend(&[*uref]), + None => { + return BiddingResult::Failure(TrackingCopyError::UnexpectedKeyVariant(k)); + } + } + footprint.insert_into_named_keys(ERA_END_TIMESTAMP_MILLIS_KEY.into(), k); + } + Ok(None) => { + return BiddingResult::Failure(TrackingCopyError::NamedKeyNotFound( + ERA_END_TIMESTAMP_MILLIS_KEY.into(), + )); + } + Err(tce) => { + return BiddingResult::Failure(tce); + } + }; + // extend named keys with era id + match tc + .borrow_mut() + .system_contract_named_key(AUCTION, ERA_ID_KEY) + { + Ok(Some(k)) => { + match k.as_uref() { + Some(uref) => entity_access_rights.extend(&[*uref]), + None => { + return BiddingResult::Failure(TrackingCopyError::UnexpectedKeyVariant(k)); + } + } + footprint.insert_into_named_keys(ERA_ID_KEY.into(), k); + } + Ok(None) => { + return BiddingResult::Failure(TrackingCopyError::NamedKeyNotFound( + ERA_ID_KEY.into(), + )); + } + Err(tce) => { + return BiddingResult::Failure(tce); + } + }; + + let phase = Phase::Session; + let id = Id::Transaction(transaction_hash); + let address_generator = AddressGenerator::new(&id.seed(), phase); + let max_delegators_per_validator = config.max_delegators_per_validator(); + let minimum_bid_amount = config.minimum_bid_amount(); + let mut runtime = RuntimeNative::new( + config, + protocol_version, + id, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + source_account_hash, + entity_key, + footprint, + entity_access_rights, + U512::MAX, + phase, + ); + + let result = match auction_method { + AuctionMethod::ActivateBid { validator } => runtime + .activate_bid(validator, minimum_bid_amount) + .map(|_| AuctionMethodRet::Unit) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + AuctionMethod::AddBid { + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount, + reserved_slots, + } => runtime + .add_bid( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount, + max_delegators_per_validator, + reserved_slots, + ) + .map(AuctionMethodRet::UpdatedAmount) + .map_err(TrackingCopyError::Api), + AuctionMethod::WithdrawBid { + public_key, + amount, + minimum_bid_amount, + } => runtime + .withdraw_bid(public_key, amount, minimum_bid_amount) + .map(AuctionMethodRet::UpdatedAmount) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + AuctionMethod::Delegate { + delegator, + validator, + amount, + max_delegators_per_validator, + } => runtime + .delegate(delegator, validator, amount, max_delegators_per_validator) + .map(AuctionMethodRet::UpdatedAmount) + .map_err(TrackingCopyError::Api), + AuctionMethod::Undelegate { + delegator, + validator, + amount, + } => runtime + .undelegate(delegator, validator, amount) + .map(AuctionMethodRet::UpdatedAmount) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + AuctionMethod::Redelegate { + delegator, + validator, + amount, + new_validator, + } => runtime + .redelegate(delegator, validator, amount, new_validator) + .map(AuctionMethodRet::UpdatedAmount) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + AuctionMethod::ChangeBidPublicKey { + public_key, + new_public_key, + } => runtime + .change_bid_public_key(public_key, new_public_key) + .map(|_| AuctionMethodRet::Unit) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + AuctionMethod::AddReservations { reservations } => runtime + .add_reservations(reservations) + .map(|_| AuctionMethodRet::Unit) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + AuctionMethod::CancelReservations { + validator, + delegators, + max_delegators_per_validator, + } => runtime + .cancel_reservations(validator, delegators, max_delegators_per_validator) + .map(|_| AuctionMethodRet::Unit) + .map_err(|auc_err| { + TrackingCopyError::SystemContract(system::Error::Auction(auc_err)) + }), + }; + + let transfers = runtime.into_transfers(); + let effects = tc.borrow_mut().effects(); + + match result { + Ok(ret) => BiddingResult::Success { + ret, + effects, + transfers, + }, + Err(tce) => BiddingResult::Failure(tce), + } + } + + /// Handle refund. + fn handle_refund( + &self, + HandleRefundRequest { + config, + state_hash, + protocol_version, + transaction_hash, + refund_mode, + }: HandleRefundRequest, + ) -> HandleRefundResult { + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return HandleRefundResult::RootNotFound, + Err(err) => return HandleRefundResult::Failure(TrackingCopyError::Storage(err)), + }; + + let id = Id::Transaction(transaction_hash); + let phase = refund_mode.phase(); + let address_generator = Arc::new(RwLock::new(AddressGenerator::new(&id.seed(), phase))); + let mut runtime = match phase { + Phase::FinalizePayment => { + // this runtime uses the system's context + match RuntimeNative::new_system_runtime( + config, + protocol_version, + id, + address_generator, + Rc::clone(&tc), + phase, + ) { + Ok(rt) => rt, + Err(tce) => { + return HandleRefundResult::Failure(tce); + } + } + } + Phase::Payment => { + // this runtime uses the handle payment contract's context + match RuntimeNative::new_system_contract_runtime( + config, + protocol_version, + id, + address_generator, + Rc::clone(&tc), + phase, + HANDLE_PAYMENT, + ) { + Ok(rt) => rt, + Err(tce) => { + return HandleRefundResult::Failure(tce); + } + } + } + Phase::System | Phase::Session => return HandleRefundResult::InvalidPhase, + }; + + let result = match refund_mode { + HandleRefundMode::CalculateAmount { + limit, + cost, + gas_price, + consumed, + ratio, + source, + } => { + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + let (numer, denom) = ratio.into(); + let ratio = Ratio::new_raw(U512::from(numer), U512::from(denom)); + let refund_amount = match runtime.calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + source_purse, + ratio, + ) { + Ok((refund, _)) => Some(refund), + Err(hpe) => { + return HandleRefundResult::Failure(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )); + } + }; + Ok(refund_amount) + } + HandleRefundMode::Refund { + initiator_addr, + limit, + cost, + gas_price, + consumed, + ratio, + source, + target, + } => { + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + let (numer, denom) = ratio.into(); + let ratio = Ratio::new_raw(U512::from(numer), U512::from(denom)); + let refund_amount = match runtime.calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + source_purse, + ratio, + ) { + Ok((refund, _)) => refund, + Err(hpe) => { + return HandleRefundResult::Failure(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )); + } + }; + let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + // pay amount from source to target + match runtime + .transfer( + Some(initiator_addr.account_hash()), + source_purse, + target_purse, + refund_amount, + None, + ) + .map_err(|mint_err| { + TrackingCopyError::SystemContract(system::Error::Mint(mint_err)) + }) { + Ok(_) => Ok(Some(refund_amount)), + Err(err) => Err(err), + } + } + HandleRefundMode::RefundNoFeeCustomPayment { + initiator_addr, + limit, + cost, + gas_price, + } => { + let source = BalanceIdentifier::Payment; + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + let consumed = U512::zero(); + let ratio = Ratio::new_raw(U512::one(), U512::one()); + let refund_amount = match runtime.calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + source_purse, + ratio, + ) { + Ok((refund, _)) => refund, + Err(hpe) => { + return HandleRefundResult::Failure(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )); + } + }; + let target = BalanceIdentifier::Refund; + let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + match runtime + .transfer( + Some(initiator_addr.account_hash()), + source_purse, + target_purse, + refund_amount, + None, + ) + .map_err(|mint_err| { + TrackingCopyError::SystemContract(system::Error::Mint(mint_err)) + }) { + Ok(_) => Ok(Some(U512::zero())), // return 0 in this mode + Err(err) => Err(err), + } + } + HandleRefundMode::Burn { + limit, + gas_price, + cost, + consumed, + source, + ratio, + } => { + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + let (numer, denom) = ratio.into(); + let ratio = Ratio::new_raw(U512::from(numer), U512::from(denom)); + let burn_amount = match runtime.calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + source_purse, + ratio, + ) { + Ok((amount, _)) => Some(amount), + Err(hpe) => { + return HandleRefundResult::Failure(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )); + } + }; + match runtime.payment_burn(source_purse, burn_amount) { + Ok(_) => Ok(burn_amount), + Err(hpe) => Err(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )), + } + } + HandleRefundMode::SetRefundPurse { target } => { + let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleRefundResult::Failure(tce), + }; + match runtime.set_refund_purse(target_purse) { + Ok(_) => Ok(None), + Err(hpe) => Err(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )), + } + } + HandleRefundMode::ClearRefundPurse => match runtime.clear_refund_purse() { + Ok(_) => Ok(None), + Err(hpe) => Err(TrackingCopyError::SystemContract( + system::Error::HandlePayment(hpe), + )), + }, + }; + + let effects = tc.borrow_mut().effects(); + let transfers = runtime.into_transfers(); + + match result { + Ok(amount) => HandleRefundResult::Success { + transfers, + effects, + amount, + }, + Err(tce) => HandleRefundResult::Failure(tce), + } + } + + /// Handle payment. + fn handle_fee( + &self, + HandleFeeRequest { + config, + state_hash, + protocol_version, + transaction_hash, + handle_fee_mode, + }: HandleFeeRequest, + ) -> HandleFeeResult { + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return HandleFeeResult::RootNotFound, + Err(err) => return HandleFeeResult::Failure(TrackingCopyError::Storage(err)), + }; + + // this runtime uses the system's context + + let id = Id::Transaction(transaction_hash); + let phase = Phase::FinalizePayment; + let address_generator = AddressGenerator::new(&id.seed(), phase); + + let mut runtime = match RuntimeNative::new_system_runtime( + config, + protocol_version, + id, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + phase, + ) { + Ok(rt) => rt, + Err(tce) => { + return HandleFeeResult::Failure(tce); + } + }; + + let result = match handle_fee_mode { + HandleFeeMode::Credit { + validator, + amount, + era_id, + } => runtime + .write_validator_credit(*validator, era_id, amount) + .map(|_| ()) + .map_err(|auction_error| { + TrackingCopyError::SystemContract(system::Error::Auction(auction_error)) + }), + HandleFeeMode::Pay { + initiator_addr, + amount, + source, + target, + } => { + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleFeeResult::Failure(tce), + }; + let target_purse = match target.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleFeeResult::Failure(tce), + }; + runtime + .transfer( + Some(initiator_addr.account_hash()), + source_purse, + target_purse, + amount, + None, + ) + .map_err(|mint_err| { + TrackingCopyError::SystemContract(system::Error::Mint(mint_err)) + }) + } + HandleFeeMode::Burn { source, amount } => { + let source_purse = match source.purse_uref(&mut tc.borrow_mut(), protocol_version) { + Ok(value) => value, + Err(tce) => return HandleFeeResult::Failure(tce), + }; + runtime + .payment_burn(source_purse, amount) + .map_err(|handle_payment_error| { + TrackingCopyError::SystemContract(system::Error::HandlePayment( + handle_payment_error, + )) + }) + } + }; + + let effects = tc.borrow_mut().effects(); + let transfers = runtime.into_transfers(); + + match result { + Ok(_) => HandleFeeResult::Success { transfers, effects }, + Err(tce) => HandleFeeResult::Failure(tce), + } + } + + /// Gets the execution result checksum. + fn execution_result_checksum( + &self, + request: ExecutionResultsChecksumRequest, + ) -> ExecutionResultsChecksumResult { + let state_hash = request.state_hash(); + let mut tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return ExecutionResultsChecksumResult::RootNotFound, + Err(err) => { + return ExecutionResultsChecksumResult::Failure(TrackingCopyError::Storage(err)); + } + }; + match tc.get_checksum_registry() { + Ok(Some(registry)) => match registry.get(EXECUTION_RESULTS_CHECKSUM_NAME) { + Some(checksum) => ExecutionResultsChecksumResult::Success { + checksum: *checksum, + }, + None => ExecutionResultsChecksumResult::ChecksumNotFound, + }, + Ok(None) => ExecutionResultsChecksumResult::RegistryNotFound, + Err(err) => ExecutionResultsChecksumResult::Failure(err), + } + } + + /// Gets an addressable entity. + fn addressable_entity(&self, request: AddressableEntityRequest) -> AddressableEntityResult { + let key = request.key(); + let query_key = match key { + Key::Account(_) => { + let query_request = QueryRequest::new(request.state_hash(), key, vec![]); + match self.query(query_request) { + QueryResult::RootNotFound => return AddressableEntityResult::RootNotFound, + QueryResult::ValueNotFound(msg) => { + return AddressableEntityResult::ValueNotFound(msg); + } + QueryResult::Failure(err) => return AddressableEntityResult::Failure(err), + QueryResult::Success { value, .. } => { + if let StoredValue::Account(account) = *value { + // legacy account that has not been migrated + let entity = AddressableEntity::from(account); + return AddressableEntityResult::Success { entity }; + } + if let StoredValue::CLValue(cl_value) = &*value { + // the corresponding entity key should be under the account's key + match cl_value.clone().into_t::() { + Ok(entity_key @ Key::AddressableEntity(_)) => entity_key, + Ok(invalid_key) => { + warn!( + %key, + %invalid_key, + type_name = %value.type_name(), + "expected a Key::AddressableEntity to be stored under account hash" + ); + return AddressableEntityResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ); + } + Err(error) => { + error!(%key, %error, "expected a CLValue::Key to be stored under account hash"); + return AddressableEntityResult::Failure( + TrackingCopyError::CLValue(error), + ); + } + } + } else { + warn!( + %key, + type_name = %value.type_name(), + "expected a CLValue::Key or Account to be stored under account hash" + ); + return AddressableEntityResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ); + } + } + } + } + Key::Hash(contract_hash) => { + let query_request = QueryRequest::new(request.state_hash(), key, vec![]); + match self.query(query_request) { + QueryResult::RootNotFound => return AddressableEntityResult::RootNotFound, + QueryResult::ValueNotFound(msg) => { + return AddressableEntityResult::ValueNotFound(msg); + } + QueryResult::Failure(err) => return AddressableEntityResult::Failure(err), + QueryResult::Success { value, .. } => { + if let StoredValue::Contract(contract) = *value { + // legacy contract that has not been migrated + let entity = AddressableEntity::from(contract); + return AddressableEntityResult::Success { entity }; + } + Key::AddressableEntity(EntityAddr::SmartContract(contract_hash)) + } + } + } + Key::AddressableEntity(_) => key, + _ => { + return AddressableEntityResult::Failure(TrackingCopyError::UnexpectedKeyVariant( + key, + )); + } + }; + + let query_request = QueryRequest::new(request.state_hash(), query_key, vec![]); + match self.query(query_request) { + QueryResult::RootNotFound => AddressableEntityResult::RootNotFound, + QueryResult::ValueNotFound(msg) => AddressableEntityResult::ValueNotFound(msg), + QueryResult::Success { value, .. } => { + let entity = match value.as_addressable_entity() { + Some(entity) => entity.clone(), + None => { + return AddressableEntityResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ); + } + }; + AddressableEntityResult::Success { entity } + } + QueryResult::Failure(err) => AddressableEntityResult::Failure(err), + } + } + + /// Returns the system entity registry or the key for a system entity registered within it. + fn system_entity_registry( + &self, + request: SystemEntityRegistryRequest, + ) -> SystemEntityRegistryResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return SystemEntityRegistryResult::RootNotFound, + Err(err) => { + return SystemEntityRegistryResult::Failure(TrackingCopyError::Storage(err)); + } + }; + + let reg = match tc.get_system_entity_registry() { + Ok(reg) => reg, + Err(tce) => { + return SystemEntityRegistryResult::Failure(tce); + } + }; + + let selector = request.selector(); + match selector { + SystemEntityRegistrySelector::All => SystemEntityRegistryResult::Success { + selected: selector.clone(), + payload: SystemEntityRegistryPayload::All(reg), + }, + SystemEntityRegistrySelector::ByName(name) => match reg.get(name).copied() { + Some(entity_hash) => { + let key = if !request.enable_addressable_entity() { + Key::Hash(entity_hash) + } else { + Key::AddressableEntity(EntityAddr::System(entity_hash)) + }; + SystemEntityRegistryResult::Success { + selected: selector.clone(), + payload: SystemEntityRegistryPayload::EntityKey(key), + } + } + None => { + error!("unexpected query failure; mint not found"); + SystemEntityRegistryResult::NamedEntityNotFound(name.clone()) + } + }, + } + } + + /// Gets an entry point value. + fn entry_point(&self, request: EntryPointRequest) -> EntryPointResult { + let state_root_hash = request.state_hash(); + let contract_hash = request.contract_hash(); + let entry_point_name = request.entry_point_name(); + match EntryPointAddr::new_v1_entry_point_addr( + EntityAddr::SmartContract(contract_hash), + entry_point_name, + ) { + Ok(entry_point_addr) => { + let key = Key::EntryPoint(entry_point_addr); + let query_request = QueryRequest::new(request.state_hash(), key, vec![]); + //We first check if the entry point exists as a stand alone 2.x entity + match self.query(query_request) { + QueryResult::RootNotFound => EntryPointResult::RootNotFound, + QueryResult::ValueNotFound(query_result_not_found_msg) => { + //If the entry point was not found as a 2.x entity, we check if it exists + // as part of a 1.x contract + let contract_key = Key::Hash(contract_hash); + let contract_request = ContractRequest::new(state_root_hash, contract_key); + match self.contract(contract_request) { + ContractResult::Failure(tce) => EntryPointResult::Failure(tce), + ContractResult::ValueNotFound(_) => { + EntryPointResult::ValueNotFound(query_result_not_found_msg) + } + ContractResult::RootNotFound => EntryPointResult::RootNotFound, + ContractResult::Success { contract } => { + match contract.entry_points().get(entry_point_name) { + Some(contract_entry_point) => EntryPointResult::Success { + entry_point: EntryPointValue::V1CasperVm( + EntityEntryPoint::from(contract_entry_point), + ), + }, + None => { + EntryPointResult::ValueNotFound(query_result_not_found_msg) + } + } + } + } + } + QueryResult::Failure(tce) => EntryPointResult::Failure(tce), + QueryResult::Success { value, .. } => { + if let StoredValue::EntryPoint(entry_point) = *value { + EntryPointResult::Success { entry_point } + } else { + error!("Expected to get entry point value received other variant"); + EntryPointResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ) + } + } + } + } + Err(_) => EntryPointResult::Failure( + //TODO maybe we can have a better error type here + TrackingCopyError::ValueNotFound("Entry point not found".to_string()), + ), + } + } + + /// Gets a contract value. + fn contract(&self, request: ContractRequest) -> ContractResult { + let query_request = QueryRequest::new(request.state_hash(), request.key(), vec![]); + + match self.query(query_request) { + QueryResult::RootNotFound => ContractResult::RootNotFound, + QueryResult::ValueNotFound(msg) => ContractResult::ValueNotFound(msg), + QueryResult::Failure(tce) => ContractResult::Failure(tce), + QueryResult::Success { value, .. } => { + if let StoredValue::Contract(contract) = *value { + ContractResult::Success { contract } + } else { + error!("Expected to get contract value received other variant"); + ContractResult::Failure(TrackingCopyError::UnexpectedStoredValueVariant) + } + } + } + } + + /// Gets an entry point value. + fn entry_point_exists(&self, request: EntryPointExistsRequest) -> EntryPointExistsResult { + match self.entry_point(request.into()) { + EntryPointResult::RootNotFound => EntryPointExistsResult::RootNotFound, + EntryPointResult::ValueNotFound(msg) => EntryPointExistsResult::ValueNotFound(msg), + EntryPointResult::Success { .. } => EntryPointExistsResult::Success, + EntryPointResult::Failure(error) => EntryPointExistsResult::Failure(error), + } + } + + /// Gets total supply. + fn total_supply(&self, request: TotalSupplyRequest) -> TotalSupplyResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return TotalSupplyResult::RootNotFound, + Err(err) => return TotalSupplyResult::Failure(TrackingCopyError::Storage(err)), + }; + let scr = match tc.get_system_entity_registry() { + Ok(scr) => scr, + Err(err) => return TotalSupplyResult::Failure(err), + }; + let enable_addressable_entity = tc.enable_addressable_entity(); + match get_total_supply_data(self, &scr, state_hash, enable_addressable_entity) { + not_found @ TotalSupplyResult::ValueNotFound(_) => { + if enable_addressable_entity { + //There is a chance that, when looking for systemic data, we could be using a + // state root hash from before the AddressableEntity + // migration boundary. In such a case, we should attempt to look up the data + // under the Account/Contract model instead; e.g. Key::Hash instead of + // Key::AddressableEntity + match get_total_supply_data(self, &scr, state_hash, false) { + TotalSupplyResult::ValueNotFound(_) => not_found, + other => other, + } + } else { + not_found + } + } + other => other, + } + } + + /// Gets the current round seigniorage rate. + fn round_seigniorage_rate( + &self, + request: RoundSeigniorageRateRequest, + ) -> RoundSeigniorageRateResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return RoundSeigniorageRateResult::RootNotFound, + Err(err) => { + return RoundSeigniorageRateResult::Failure(TrackingCopyError::Storage(err)); + } + }; + let scr = match tc.get_system_entity_registry() { + Ok(scr) => scr, + Err(err) => return RoundSeigniorageRateResult::Failure(err), + }; + let enable_addressable_entity = tc.enable_addressable_entity(); + match get_round_seigniorage_rate_data(self, &scr, state_hash, enable_addressable_entity) { + not_found @ RoundSeigniorageRateResult::ValueNotFound(_) => { + if enable_addressable_entity { + //There is a chance that, when looking for systemic data, we could be using a + // state root hash from before the AddressableEntity + // migration boundary. In such a case, we should attempt to look up the data + // under the Account/Contract model instead; e.g. Key::Hash instead of + // Key::AddressableEntity + match get_round_seigniorage_rate_data(self, &scr, state_hash, false) { + RoundSeigniorageRateResult::ValueNotFound(_) => not_found, + other => other, + } + } else { + not_found + } + } + other => other, + } + } + + /// Direct transfer. + fn transfer(&self, request: TransferRequest) -> TransferResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return TransferResult::RootNotFound, + Err(err) => { + return TransferResult::Failure(TransferError::TrackingCopy( + TrackingCopyError::Storage(err), + )); + } + }; + + let source_account_hash = request.initiator().account_hash(); + let protocol_version = request.protocol_version(); + if let Err(tce) = tc + .borrow_mut() + .migrate_account(source_account_hash, protocol_version) + { + return TransferResult::Failure(tce.into()); + } + + let authorization_keys = request.authorization_keys(); + + let config = request.config(); + let transfer_config = config.transfer_config(); + let administrative_accounts = transfer_config.administrative_accounts(); + + let runtime_args = match request.args() { + TransferRequestArgs::Raw(runtime_args) => runtime_args.clone(), + TransferRequestArgs::Explicit(transfer_args) => { + match RuntimeArgs::try_from(*transfer_args) { + Ok(runtime_args) => runtime_args, + Err(cve) => return TransferResult::Failure(TransferError::CLValue(cve)), + } + } + TransferRequestArgs::Indirect(bita) => { + let source_uref = match bita + .source() + .purse_uref(&mut tc.borrow_mut(), protocol_version) + { + Ok(source_uref) => source_uref, + Err(tce) => return TransferResult::Failure(TransferError::TrackingCopy(tce)), + }; + let target_uref = match bita + .target() + .purse_uref(&mut tc.borrow_mut(), protocol_version) + { + Ok(target_uref) => target_uref, + Err(tce) => return TransferResult::Failure(TransferError::TrackingCopy(tce)), + }; + let transfer_args = TransferArgs::new( + bita.to(), + source_uref, + target_uref, + bita.amount(), + bita.arg_id(), + ); + match RuntimeArgs::try_from(transfer_args) { + Ok(runtime_args) => runtime_args, + Err(cve) => return TransferResult::Failure(TransferError::CLValue(cve)), + } + } + }; + + let remaining_spending_limit = match runtime_args.try_get_number(ARG_AMOUNT) { + Ok(amount) => amount, + Err(cve) => { + debug!("failed to derive remaining_spending_limit"); + return TransferResult::Failure(TransferError::CLValue(cve)); + } + }; + + let mut runtime_args_builder = TransferRuntimeArgsBuilder::new(runtime_args); + + let transfer_target_mode = match runtime_args_builder + .resolve_transfer_target_mode(protocol_version, Rc::clone(&tc)) + { + Ok(transfer_target_mode) => transfer_target_mode, + Err(error) => return TransferResult::Failure(error), + }; + + // On some private networks, transfers are restricted. + // This means that they must either the source or target are an admin account. + // This behavior is not used on public networks. + if transfer_config.enforce_transfer_restrictions(&source_account_hash) { + // if the source is an admin, enforce_transfer_restrictions == false + // if the source is not an admin, enforce_transfer_restrictions == true, + // and we must check to see if the target is an admin. + // if the target is also not an admin, this transfer is not permitted. + match transfer_target_mode.target_account_hash() { + Some(target_account_hash) => { + let is_target_system_account = + target_account_hash == PublicKey::System.to_account_hash(); + let is_target_administrator = + transfer_config.is_administrator(&target_account_hash); + if !(is_target_system_account || is_target_administrator) { + // Transferring from normal account to a purse doesn't work. + return TransferResult::Failure(TransferError::RestrictedTransferAttempted); + } + } + None => { + // can't allow this transfer because we are not sure if the target is an admin. + return TransferResult::Failure(TransferError::UnableToVerifyTargetIsAdmin); + } + } + } + + let (entity_addr, runtime_footprint, entity_access_rights) = match tc + .borrow_mut() + .authorized_runtime_footprint_with_access_rights( + protocol_version, + source_account_hash, + authorization_keys, + &administrative_accounts, + ) { + Ok(ret) => ret, + Err(tce) => { + return TransferResult::Failure(TransferError::TrackingCopy(tce)); + } + }; + let entity_key = if config.enable_addressable_entity() { + Key::AddressableEntity(entity_addr) + } else { + match entity_addr { + EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => Key::Hash(hash), + EntityAddr::Account(hash) => Key::Account(AccountHash::new(hash)), + } + }; + let id = Id::Transaction(request.transaction_hash()); + let phase = Phase::Session; + let address_generator = AddressGenerator::new(&id.seed(), phase); + // IMPORTANT: this runtime _must_ use the payer's context. + let mut runtime = RuntimeNative::new( + config.clone(), + protocol_version, + id, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + source_account_hash, + entity_key, + runtime_footprint.clone(), + entity_access_rights, + remaining_spending_limit, + phase, + ); + + match transfer_target_mode { + TransferTargetMode::ExistingAccount { .. } | TransferTargetMode::PurseExists { .. } => { + // Noop + } + TransferTargetMode::CreateAccount(account_hash) => { + let main_purse = match runtime.mint(U512::zero()) { + Ok(uref) => uref, + Err(mint_error) => { + return TransferResult::Failure(TransferError::Mint(mint_error)); + } + }; + + let account = Account::create(account_hash, NamedKeys::new(), main_purse); + if let Err(tce) = tc + .borrow_mut() + .create_addressable_entity_from_account(account, protocol_version) + { + return TransferResult::Failure(tce.into()); + } + } + } + let transfer_args = match runtime_args_builder.build( + &runtime_footprint, + protocol_version, + Rc::clone(&tc), + ) { + Ok(transfer_args) => transfer_args, + Err(error) => return TransferResult::Failure(error), + }; + if let Err(mint_error) = runtime.transfer( + transfer_args.to(), + transfer_args.source(), + transfer_args.target(), + transfer_args.amount(), + transfer_args.arg_id(), + ) { + return TransferResult::Failure(TransferError::Mint(mint_error)); + } + + let transfers = runtime.into_transfers(); + + let effects = tc.borrow_mut().effects(); + let cache = tc.borrow_mut().cache(); + + TransferResult::Success { + transfers, + effects, + cache, + } + } + + /// Direct burn. + fn burn(&self, request: BurnRequest) -> BurnResult { + let state_hash = request.state_hash(); + let tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => Rc::new(RefCell::new(tc)), + Ok(None) => return BurnResult::RootNotFound, + Err(err) => { + return BurnResult::Failure(BurnError::TrackingCopy(TrackingCopyError::Storage( + err, + ))); + } + }; + + let source_account_hash = request.initiator().account_hash(); + let protocol_version = request.protocol_version(); + if let Err(tce) = tc + .borrow_mut() + .migrate_account(source_account_hash, protocol_version) + { + return BurnResult::Failure(tce.into()); + } + + let authorization_keys = request.authorization_keys(); + + let config = request.config(); + + let runtime_args = match request.args() { + BurnRequestArgs::Raw(runtime_args) => runtime_args.clone(), + BurnRequestArgs::Explicit(transfer_args) => { + match RuntimeArgs::try_from(*transfer_args) { + Ok(runtime_args) => runtime_args, + Err(cve) => return BurnResult::Failure(BurnError::CLValue(cve)), + } + } + }; + + let runtime_args_builder = BurnRuntimeArgsBuilder::new(runtime_args); + + let (entity_addr, mut footprint, mut entity_access_rights) = match tc + .borrow_mut() + .authorized_runtime_footprint_with_access_rights( + protocol_version, + source_account_hash, + authorization_keys, + &BTreeSet::default(), + ) { + Ok(ret) => ret, + Err(tce) => { + return BurnResult::Failure(BurnError::TrackingCopy(tce)); + } + }; + let entity_key = if config.enable_addressable_entity() { + Key::AddressableEntity(entity_addr) + } else { + match entity_addr { + EntityAddr::System(hash) | EntityAddr::SmartContract(hash) => Key::Hash(hash), + EntityAddr::Account(hash) => Key::Account(AccountHash::new(hash)), + } + }; + + // extend named keys with total supply + match tc + .borrow_mut() + .system_contract_named_key(MINT, TOTAL_SUPPLY_KEY) + { + Ok(Some(k)) => { + match k.as_uref() { + Some(uref) => entity_access_rights.extend(&[*uref]), + None => { + return BurnResult::Failure(BurnError::TrackingCopy( + TrackingCopyError::UnexpectedKeyVariant(k), + )); + } + } + footprint.insert_into_named_keys(TOTAL_SUPPLY_KEY.into(), k); + } + Ok(None) => { + return BurnResult::Failure(BurnError::TrackingCopy( + TrackingCopyError::NamedKeyNotFound(TOTAL_SUPPLY_KEY.into()), + )); + } + Err(tce) => { + return BurnResult::Failure(BurnError::TrackingCopy(tce)); + } + }; + let id = Id::Transaction(request.transaction_hash()); + let phase = Phase::Session; + let address_generator = AddressGenerator::new(&id.seed(), phase); + let burn_args = match runtime_args_builder.build(&footprint, Rc::clone(&tc)) { + Ok(burn_args) => burn_args, + Err(error) => return BurnResult::Failure(error), + }; + + // IMPORTANT: this runtime _must_ use the payer's context. + let mut runtime = RuntimeNative::new( + config.clone(), + protocol_version, + id, + Arc::new(RwLock::new(address_generator)), + Rc::clone(&tc), + source_account_hash, + entity_key, + footprint.clone(), + entity_access_rights, + burn_args.amount(), + phase, + ); + + if let Err(mint_error) = runtime.burn(burn_args.source(), burn_args.amount()) { + return BurnResult::Failure(BurnError::Mint(mint_error)); + } + + let effects = tc.borrow_mut().effects(); + let cache = tc.borrow_mut().cache(); + + BurnResult::Success { effects, cache } + } + + /// Gets all values under a given key tag. + fn tagged_values(&self, request: TaggedValuesRequest) -> TaggedValuesResult { + let state_hash = request.state_hash(); + let mut tc = match self.tracking_copy(state_hash) { + Ok(Some(tc)) => tc, + Ok(None) => return TaggedValuesResult::RootNotFound, + Err(gse) => return TaggedValuesResult::Failure(TrackingCopyError::Storage(gse)), + }; + + let key_tag = request.key_tag(); + let keys = match tc.get_keys(&key_tag) { + Ok(keys) => keys, + Err(tce) => return TaggedValuesResult::Failure(tce), + }; + + let mut values = vec![]; + for key in keys { + match tc.get(&key) { + Ok(Some(value)) => { + values.push(value); + } + Ok(None) => {} + Err(error) => return TaggedValuesResult::Failure(error), + } + } + + TaggedValuesResult::Success { + values, + selection: request.selection(), + } + } + + /// Gets all values under a given key prefix. + /// Currently, this ignores the cache and only provides values from the trie. + fn prefixed_values(&self, request: PrefixedValuesRequest) -> PrefixedValuesResult { + let mut tc = match self.tracking_copy(request.state_hash()) { + Ok(Some(tc)) => tc, + Ok(None) => return PrefixedValuesResult::RootNotFound, + Err(err) => return PrefixedValuesResult::Failure(TrackingCopyError::Storage(err)), + }; + match tc.get_keys_by_prefix(request.key_prefix()) { + Ok(keys) => { + let mut values = Vec::with_capacity(keys.len()); + for key in keys { + match tc.get(&key) { + Ok(Some(value)) => values.push(value), + Ok(None) => {} + Err(error) => return PrefixedValuesResult::Failure(error), + } + } + PrefixedValuesResult::Success { + values, + key_prefix: request.key_prefix().clone(), + } + } + Err(error) => PrefixedValuesResult::Failure(error), + } + } + + /// Reads a `Trie` from the state if it is present + fn trie(&self, request: TrieRequest) -> TrieResult; + + /// Persists a trie element. + fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult; + + /// Finds all the children of `trie_raw` which aren't present in the state. + fn missing_children(&self, trie_raw: &[u8]) -> Result, GlobalStateError>; + + /// Gets the value of enable entity flag. + fn enable_entity(&self) -> bool; +} + +fn get_round_seigniorage_rate_data( + state_provider: &T, + scr: &SystemHashRegistry, + state_hash: Digest, + enable_addressable_entity: bool, +) -> RoundSeigniorageRateResult { + let query_request = match scr.get(MINT).copied() { + Some(mint_hash) => { + let key = if !enable_addressable_entity { + Key::Hash(mint_hash) + } else { + Key::AddressableEntity(EntityAddr::System(mint_hash)) + }; + QueryRequest::new( + state_hash, + key, + vec![ROUND_SEIGNIORAGE_RATE_KEY.to_string()], + ) + } + None => { + error!("unexpected query failure; mint not found"); + return RoundSeigniorageRateResult::MintNotFound; + } + }; + + match state_provider.query(query_request) { + QueryResult::RootNotFound => RoundSeigniorageRateResult::RootNotFound, + QueryResult::ValueNotFound(msg) => RoundSeigniorageRateResult::ValueNotFound(msg), + QueryResult::Failure(tce) => RoundSeigniorageRateResult::Failure(tce), + QueryResult::Success { value, proofs: _ } => { + let cl_value = match value.into_cl_value() { + Some(cl_value) => cl_value, + None => { + error!("unexpected query failure; total supply is not a CLValue"); + return RoundSeigniorageRateResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ); + } + }; + + match cl_value.into_t() { + Ok(rate) => RoundSeigniorageRateResult::Success { rate }, + Err(cve) => RoundSeigniorageRateResult::Failure(TrackingCopyError::CLValue(cve)), + } + } + } +} + +fn get_total_supply_data( + state_provider: &T, + scr: &SystemHashRegistry, + state_hash: Digest, + enable_addressable_entity: bool, +) -> TotalSupplyResult { + let query_request = match scr.get(MINT).copied() { + Some(mint_hash) => { + let key = if !enable_addressable_entity { + Key::Hash(mint_hash) + } else { + Key::AddressableEntity(EntityAddr::System(mint_hash)) + }; + QueryRequest::new(state_hash, key, vec![TOTAL_SUPPLY_KEY.to_string()]) + } + None => { + error!("unexpected query failure; mint not found"); + return TotalSupplyResult::MintNotFound; + } + }; + match state_provider.query(query_request) { + QueryResult::RootNotFound => TotalSupplyResult::RootNotFound, + QueryResult::ValueNotFound(msg) => TotalSupplyResult::ValueNotFound(msg), + QueryResult::Failure(tce) => TotalSupplyResult::Failure(tce), + QueryResult::Success { value, proofs: _ } => { + let cl_value = match value.into_cl_value() { + Some(cl_value) => cl_value, + None => { + error!("unexpected query failure; total supply is not a CLValue"); + return TotalSupplyResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + ); + } + }; + + match cl_value.into_t() { + Ok(total_supply) => TotalSupplyResult::Success { total_supply }, + Err(cve) => TotalSupplyResult::Failure(TrackingCopyError::CLValue(cve)), + } + } + } +} + +fn get_snapshot_data( + state_provider: &T, + scr: &SystemHashRegistry, + state_hash: Digest, + enable_addressable_entity: bool, +) -> SeigniorageRecipientsResult { + let (snapshot_query_request, snapshot_version_query_request) = + match build_query_requests(scr, state_hash, enable_addressable_entity) { + Ok(res) => res, + Err(res) => return res, + }; + + // check if snapshot version flag is present + let snapshot_version: Option = + match query_snapshot_version(state_provider, snapshot_version_query_request) { + Ok(value) => value, + Err(value) => return value, + }; + + let snapshot = match query_snapshot(state_provider, snapshot_version, snapshot_query_request) { + Ok(snapshot) => snapshot, + Err(value) => return value, + }; + + SeigniorageRecipientsResult::Success { + seigniorage_recipients: snapshot, + } +} + +fn query_snapshot( + state_provider: &T, + snapshot_version: Option, + snapshot_query_request: QueryRequest, +) -> Result { + match state_provider.query(snapshot_query_request) { + QueryResult::RootNotFound => Err(SeigniorageRecipientsResult::RootNotFound), + QueryResult::Failure(error) => { + error!(?error, "unexpected tracking copy error"); + Err(SeigniorageRecipientsResult::Failure(error)) + } + QueryResult::ValueNotFound(msg) => { + error!(%msg, "value not found"); + Err(SeigniorageRecipientsResult::ValueNotFound(msg)) + } + QueryResult::Success { value, proofs: _ } => { + let cl_value = match value.into_cl_value() { + Some(snapshot_cl_value) => snapshot_cl_value, + None => { + error!("unexpected query failure; seigniorage recipients snapshot is not a CLValue"); + return Err(SeigniorageRecipientsResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + )); + } + }; + + match snapshot_version { + Some(_) => { + let snapshot = match cl_value.into_t() { + Ok(snapshot) => snapshot, + Err(cve) => { + error!("Failed to convert snapshot from CLValue"); + return Err(SeigniorageRecipientsResult::Failure( + TrackingCopyError::CLValue(cve), + )); + } + }; + Ok(SeigniorageRecipientsSnapshot::V2(snapshot)) + } + None => { + let snapshot = match cl_value.into_t() { + Ok(snapshot) => snapshot, + Err(cve) => { + error!("Failed to convert snapshot from CLValue"); + return Err(SeigniorageRecipientsResult::Failure( + TrackingCopyError::CLValue(cve), + )); + } + }; + Ok(SeigniorageRecipientsSnapshot::V1(snapshot)) + } + } + } + } +} + +fn query_snapshot_version( + state_provider: &T, + snapshot_version_query_request: QueryRequest, +) -> Result, SeigniorageRecipientsResult> { + match state_provider.query(snapshot_version_query_request) { + QueryResult::RootNotFound => Err(SeigniorageRecipientsResult::RootNotFound), + QueryResult::Failure(error) => { + error!(?error, "unexpected tracking copy error"); + Err(SeigniorageRecipientsResult::Failure(error)) + } + QueryResult::ValueNotFound(_msg) => Ok(None), + QueryResult::Success { value, proofs: _ } => { + let cl_value = match value.into_cl_value() { + Some(snapshot_version_cl_value) => snapshot_version_cl_value, + None => { + error!("unexpected query failure; seigniorage recipients snapshot version is not a CLValue"); + return Err(SeigniorageRecipientsResult::Failure( + TrackingCopyError::UnexpectedStoredValueVariant, + )); + } + }; + match cl_value.into_t() { + Ok(snapshot_version) => Ok(Some(snapshot_version)), + Err(cve) => Err(SeigniorageRecipientsResult::Failure( + TrackingCopyError::CLValue(cve), + )), + } + } + } +} + +fn build_query_requests( + scr: &SystemHashRegistry, + state_hash: Digest, + enable_addressable_entity: bool, +) -> Result<(QueryRequest, QueryRequest), SeigniorageRecipientsResult> { + match scr.get(AUCTION).copied() { + Some(auction_hash) => { + let key = if !enable_addressable_entity { + Key::Hash(auction_hash) + } else { + Key::AddressableEntity(EntityAddr::System(auction_hash)) + }; + Ok(( + QueryRequest::new( + state_hash, + key, + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_string()], + ), + QueryRequest::new( + state_hash, + key, + vec![SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.to_string()], + ), + )) + } + None => Err(SeigniorageRecipientsResult::AuctionNotFound), + } +} + +/// Write multiple key/stored value pairs to the store in a single rw transaction. +pub fn put_stored_values<'a, R, S, E>( + environment: &'a R, + store: &S, + prestate_hash: Digest, + stored_values: Vec<(Key, StoredValue)>, +) -> Result +where + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + + From + + From + + From + + From, +{ + let mut txn = environment.create_read_write_txn()?; + let state_root = prestate_hash; + let maybe_root: Option> = store.get(&txn, &state_root)?; + if maybe_root.is_none() { + return Err(CommitError::RootNotFound(prestate_hash).into()); + }; + + let state_root = + batch_write::<_, _, _, _, _, E>(&mut txn, store, &state_root, stored_values.into_iter())?; + txn.commit()?; + Ok(state_root) +} + +/// Commit `effects` to the store. +pub fn commit<'a, R, S, E>( + environment: &'a R, + store: &S, + prestate_hash: Digest, + effects: Effects, +) -> Result +where + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + + From + + From + + From + + From, /* even tho E is currently always GSE, this is required to + * satisfy the compiler */ +{ + let mut txn = environment.create_read_write_txn()?; + let mut state_root = prestate_hash; + + let maybe_root: Option> = store.get(&txn, &state_root)?; + + if maybe_root.is_none() { + return Err(CommitError::RootNotFound(prestate_hash).into()); + }; + + for (key, kind) in effects.value().into_iter().map(TransformV2::destructure) { + let read_result = read::<_, _, _, _, E>(&txn, store, &state_root, &key)?; + + let instruction = match (read_result, kind) { + (_, TransformKindV2::Identity) => { + // effectively a noop. + continue; + } + (ReadResult::NotFound, TransformKindV2::Write(new_value)) => { + TransformInstruction::store(new_value) + } + (ReadResult::NotFound, TransformKindV2::Prune(key)) => { + // effectively a noop. + debug!( + ?state_root, + ?key, + "commit: attempt to prune nonexistent record; this may happen if a key is both added and pruned in the same commit." + ); + continue; + } + (ReadResult::NotFound, transform_kind) => { + error!( + ?state_root, + ?key, + ?transform_kind, + "commit: key not found while attempting to apply transform" + ); + return Err(CommitError::KeyNotFound(key).into()); + } + (ReadResult::Found(current_value), transform_kind) => { + match transform_kind.apply(current_value) { + Ok(instruction) => instruction, + Err(err) => { + error!( + ?state_root, + ?key, + ?err, + "commit: key found, but could not apply transform" + ); + return Err(CommitError::TransformError(err).into()); + } + } + } + (ReadResult::RootNotFound, transform_kind) => { + error!( + ?state_root, + ?key, + ?transform_kind, + "commit: failed to read state root while processing transform" + ); + return Err(CommitError::ReadRootNotFound(state_root).into()); + } + }; + + match instruction { + TransformInstruction::Store(value) => { + let write_result = + write::<_, _, _, _, E>(&mut txn, store, &state_root, &key, &value)?; + + match write_result { + WriteResult::Written(root_hash) => { + state_root = root_hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => { + error!(?state_root, ?key, ?value, "commit: root not found"); + return Err(CommitError::WriteRootNotFound(state_root).into()); + } + } + } + TransformInstruction::Prune(key) => { + let prune_result = prune::<_, _, _, _, E>(&mut txn, store, &state_root, &key)?; + + match prune_result { + TriePruneResult::Pruned(root_hash) => { + state_root = root_hash; + } + TriePruneResult::MissingKey => { + warn!("commit: pruning attempt failed for {}", key); + } + TriePruneResult::RootNotFound => { + error!(?state_root, ?key, "commit: root not found"); + return Err(CommitError::WriteRootNotFound(state_root).into()); + } + TriePruneResult::Failure(gse) => { + return Err(gse.into()); // currently this is always reflexive + } + } + } + } + } + + txn.commit()?; + + Ok(state_root) +} diff --git a/storage/src/global_state/state/scratch.rs b/storage/src/global_state/state/scratch.rs new file mode 100644 index 0000000000..022e7a1b00 --- /dev/null +++ b/storage/src/global_state/state/scratch.rs @@ -0,0 +1,948 @@ +use lmdb::RwTransaction; +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, VecDeque}, + mem, + ops::Deref, + sync::{Arc, RwLock}, +}; + +use tracing::{debug, error}; + +use casper_types::{ + bytesrepr::{self, ToBytes}, + execution::{Effects, TransformInstruction, TransformKindV2, TransformV2}, + global_state::TrieMerkleProof, + Digest, Key, StoredValue, +}; + +use crate::{ + data_access_layer::{ + FlushRequest, FlushResult, PutTrieRequest, PutTrieResult, TrieElement, TrieRequest, + TrieResult, + }, + global_state::{ + error::Error as GlobalStateError, + state::{CommitError, CommitProvider, StateProvider, StateReader}, + store::Store, + transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource}, + trie::{Trie, TrieRaw}, + trie_store::{ + lmdb::LmdbTrieStore, + operations::{ + keys_with_prefix, missing_children, put_trie, read, read_with_proof, ReadResult, + }, + }, + }, +}; + +use crate::tracking_copy::TrackingCopy; + +type SharedCache = Arc>; + +struct Cache { + cached_values: HashMap, + pruned: BTreeSet, + cached_keys: CacheTrie, +} + +struct CacheTrieNode { + children: BTreeMap>, + value: Option, +} + +impl CacheTrieNode { + fn new() -> Self { + CacheTrieNode { + children: BTreeMap::new(), + value: None, + } + } + + fn remove(&mut self, bytes: &[u8], depth: usize) -> bool { + if depth == bytes.len() { + if self.value.is_some() { + self.value = None; + return self.children.is_empty(); + } + return false; + } + + if let Some(child_node) = self.children.get_mut(&bytes[depth]) { + if child_node.remove(bytes, depth + 1) { + self.children.remove(&bytes[depth]); + return self.value.is_none() && self.children.is_empty(); + } + } + false + } +} + +struct CacheTrie { + root: CacheTrieNode, +} + +impl CacheTrie { + fn new() -> Self { + CacheTrie { + root: CacheTrieNode::new(), + } + } + + fn insert(&mut self, key_bytes: &[u8], key: T) { + let mut current_node = &mut self.root; + for &byte in key_bytes { + current_node = current_node + .children + .entry(byte) + .or_insert(CacheTrieNode::new()); + } + current_node.value = Some(key); + } + + fn keys_with_prefix(&self, prefix: &[u8]) -> Vec { + let mut current_node = &self.root; + let mut result = Vec::new(); + + for &byte in prefix { + match current_node.children.get(&byte) { + Some(node) => current_node = node, + None => return result, + } + } + + self.collect_keys(current_node, &mut result); + result + } + + fn collect_keys(&self, start_node: &CacheTrieNode, result: &mut Vec) { + let mut stack = VecDeque::new(); + stack.push_back(start_node); + + while let Some(node) = stack.pop_back() { + if let Some(key) = node.value { + result.push(key); + } + + for child_node in node.children.values() { + stack.push_back(child_node); + } + } + } + + fn remove(&mut self, key_bytes: &[u8]) -> bool { + self.root.remove(key_bytes, 0) + } +} + +impl Cache { + fn new() -> Self { + Cache { + cached_values: HashMap::new(), + pruned: BTreeSet::new(), + cached_keys: CacheTrie::new(), + } + } + + /// Returns true if the pruned and cached values are both empty. + pub fn is_empty(&self) -> bool { + self.cached_values.is_empty() && self.pruned.is_empty() + } + + fn insert_write(&mut self, key: Key, value: StoredValue) -> Result<(), bytesrepr::Error> { + self.pruned.remove(&key); + if self.cached_values.insert(key, (true, value)).is_none() { + let key_bytes = key.to_bytes()?; + self.cached_keys.insert(&key_bytes, key); + }; + Ok(()) + } + + fn insert_read(&mut self, key: Key, value: StoredValue) -> Result<(), bytesrepr::Error> { + let key_bytes = key.to_bytes()?; + self.cached_keys.insert(&key_bytes, key); + self.cached_values.entry(key).or_insert((false, value)); + Ok(()) + } + + fn prune(&mut self, key: Key) -> Result<(), bytesrepr::Error> { + self.cached_values.remove(&key); + self.cached_keys.remove(&key.to_bytes()?); + self.pruned.insert(key); + Ok(()) + } + + fn get(&self, key: &Key) -> Option<&StoredValue> { + if self.pruned.contains(key) { + return None; + } + self.cached_values.get(key).map(|(_dirty, value)| value) + } + + /// Consumes self and returns only written values as values that were only read must be filtered + /// out to prevent unnecessary writes. + fn into_dirty_writes(self) -> (Vec<(Key, StoredValue)>, BTreeSet) { + let stored_values: Vec<(Key, StoredValue)> = self + .cached_keys + .keys_with_prefix(&[]) + .into_iter() + .filter_map(|key| { + self.cached_values.get(&key).and_then(|(dirty, value)| { + if *dirty { + Some((key, value.clone())) + } else { + None + } + }) + }) + .collect(); + let keys_to_prune = self.pruned; + + debug!( + "Cache::into_dirty_writes prune_count: {} store_count: {}", + keys_to_prune.len(), + stored_values.len() + ); + (stored_values, keys_to_prune) + } +} + +/// Global state implemented against LMDB as a backing data store. +pub struct ScratchGlobalState { + /// Underlying, cached stored values. + cache: SharedCache, + /// Environment for LMDB. + pub(crate) environment: Arc, + /// Trie store held within LMDB. + pub(crate) trie_store: Arc, + /// Empty root hash used for a new trie. + pub(crate) empty_root_hash: Digest, + /// Max query depth + pub max_query_depth: u64, + /// Enable the addressable entity and migrate accounts/contracts to entities. + pub enable_addressable_entity: bool, +} + +/// Represents a "view" of global state at a particular root hash. +pub struct ScratchGlobalStateView { + cache: SharedCache, + /// Environment for LMDB. + pub(crate) environment: Arc, + /// Trie store held within LMDB. + pub(crate) trie_store: Arc, + /// Root hash of this "view". + pub(crate) root_hash: Digest, +} + +impl ScratchGlobalStateView { + /// Returns true if the pruned and cached values are both empty. + pub fn is_empty(&self) -> bool { + self.cache.read().unwrap().is_empty() + } +} + +impl ScratchGlobalState { + /// Creates a state from an existing environment, store, and root_hash. + /// Intended to be used for testing. + pub fn new( + environment: Arc, + trie_store: Arc, + empty_root_hash: Digest, + max_query_depth: u64, + enable_entity: bool, + ) -> Self { + ScratchGlobalState { + cache: Arc::new(RwLock::new(Cache::new())), + environment, + trie_store, + empty_root_hash, + max_query_depth, + enable_addressable_entity: enable_entity, + } + } + + /// Consume self and return inner cache. + pub fn into_inner(self) -> (Vec<(Key, StoredValue)>, BTreeSet) { + let cache = mem::replace(&mut *self.cache.write().unwrap(), Cache::new()); + cache.into_dirty_writes() + } +} + +impl StateReader for ScratchGlobalStateView { + type Error = GlobalStateError; + + fn read(&self, key: &Key) -> Result, Self::Error> { + { + let cache = self.cache.read().unwrap(); + if cache.pruned.contains(key) { + return Ok(None); + } + if let Some(value) = cache.get(key) { + return Ok(Some(value.clone())); + } + } + let txn = self.environment.create_read_txn()?; + let ret = match read::( + &txn, + self.trie_store.deref(), + &self.root_hash, + key, + )? { + ReadResult::Found(value) => { + self.cache + .write() + .expect("poisoned scratch cache lock") + .insert_read(*key, value.clone())?; + Some(value) + } + ReadResult::NotFound => None, + ReadResult::RootNotFound => panic!("ScratchGlobalState has invalid root"), + }; + txn.commit()?; + Ok(ret) + } + + fn read_with_proof( + &self, + key: &Key, + ) -> Result>, Self::Error> { + // if self.cache.is_empty() proceed else error + if !self.is_empty() { + return Err(Self::Error::CannotProvideProofsOverCachedData); + } + + let txn = self.environment.create_read_txn()?; + let ret = match read_with_proof::< + Key, + StoredValue, + lmdb::RoTransaction, + LmdbTrieStore, + Self::Error, + >(&txn, self.trie_store.deref(), &self.root_hash, key)? + { + ReadResult::Found(value) => Some(value), + ReadResult::NotFound => None, + ReadResult::RootNotFound => panic!("LmdbWithCacheGlobalState has invalid root"), + }; + txn.commit()?; + Ok(ret) + } + + fn keys_with_prefix(&self, prefix: &[u8]) -> Result, Self::Error> { + let mut ret = Vec::new(); + let cache = self.cache.read().expect("poisoned scratch cache mutex"); + let cached_keys = cache.cached_keys.keys_with_prefix(prefix); + ret.extend(cached_keys); + + let txn = self.environment.create_read_txn()?; + let keys_iter = keys_with_prefix::( + &txn, + self.trie_store.deref(), + &self.root_hash, + prefix, + ); + for result in keys_iter { + match result { + Ok(key) => { + // If the key is pruned then we won't return it. If the key is already cached, + // then it would have been picked up by the code above so we don't add it again + // to avoid duplicates. + if !cache.pruned.contains(&key) && !cache.cached_values.contains_key(&key) { + ret.push(key); + } + } + Err(error) => return Err(error), + } + } + txn.commit()?; + Ok(ret) + } +} + +impl CommitProvider for ScratchGlobalState { + /// State hash returned is the one provided, as we do not write to lmdb with this kind of global + /// state. Note that the state hash is NOT used, and simply passed back to the caller. + fn commit_effects( + &self, + state_hash: Digest, + effects: Effects, + ) -> Result { + let txn = self.environment.create_read_txn()?; + for (key, kind) in effects.value().into_iter().map(TransformV2::destructure) { + let cached_value = self.cache.read().unwrap().get(&key).cloned(); + let instruction = match (cached_value, kind) { + (_, TransformKindV2::Identity) => { + // effectively a noop. + continue; + } + (None, TransformKindV2::Write(new_value)) => TransformInstruction::store(new_value), + (None, transform_kind) => { + // It might be the case that for `Add*` operations we don't have the previous + // value in cache yet. + match read::< + Key, + StoredValue, + lmdb::RoTransaction, + LmdbTrieStore, + GlobalStateError, + >(&txn, self.trie_store.deref(), &state_hash, &key)? + { + ReadResult::Found(current_value) => { + match transform_kind.apply(current_value.clone()) { + Ok(instruction) => instruction, + Err(err) => { + error!(?key, ?err, "Key found, but could not apply transform"); + return Err(CommitError::TransformError(err).into()); + } + } + } + ReadResult::NotFound => { + error!( + ?key, + ?transform_kind, + "Key not found while attempting to apply transform" + ); + return Err(CommitError::KeyNotFound(key).into()); + } + ReadResult::RootNotFound => { + error!(root_hash=?state_hash, "root not found"); + return Err(CommitError::ReadRootNotFound(state_hash).into()); + } + } + } + (Some(current_value), transform_kind) => { + match transform_kind.apply(current_value) { + Ok(instruction) => instruction, + Err(err) => { + error!(?key, ?err, "Key found, but could not apply transform"); + return Err(CommitError::TransformError(err).into()); + } + } + } + }; + let mut cache = self.cache.write().unwrap(); + match instruction { + TransformInstruction::Store(value) => { + cache.insert_write(key, value)?; + } + TransformInstruction::Prune(key) => { + cache.prune(key)?; + } + } + } + txn.commit()?; + Ok(state_hash) + } + + fn commit_values( + &self, + state_hash: Digest, + write_values: Vec<(Key, StoredValue)>, + prune_keys: BTreeSet, + ) -> Result { + let mut cache = self.cache.write().unwrap(); + for (key, value) in write_values { + cache.insert_write(key, value)?; + } + + for key_to_prune in prune_keys { + cache.prune(key_to_prune)?; + } + + Ok(state_hash) + } +} + +impl StateProvider for ScratchGlobalState { + type Reader = ScratchGlobalStateView; + + fn flush(&self, _: FlushRequest) -> FlushResult { + if self.environment.is_manual_sync_enabled() { + match self.environment.sync() { + Ok(_) => FlushResult::Success, + Err(err) => FlushResult::Failure(err.into()), + } + } else { + FlushResult::ManualSyncDisabled + } + } + + fn empty_root(&self) -> Digest { + self.empty_root_hash + } + + fn tracking_copy( + &self, + hash: Digest, + ) -> Result>, GlobalStateError> { + match self.checkout(hash)? { + Some(tc) => Ok(Some(TrackingCopy::new( + tc, + self.max_query_depth, + self.enable_addressable_entity, + ))), + None => Ok(None), + } + } + + fn checkout(&self, state_hash: Digest) -> Result, GlobalStateError> { + let txn = self.environment.create_read_txn()?; + let maybe_root: Option> = self.trie_store.get(&txn, &state_hash)?; + let maybe_state = maybe_root.map(|_| ScratchGlobalStateView { + cache: Arc::clone(&self.cache), + environment: Arc::clone(&self.environment), + trie_store: Arc::clone(&self.trie_store), + root_hash: state_hash, + }); + txn.commit()?; + Ok(maybe_state) + } + + fn trie(&self, request: TrieRequest) -> TrieResult { + let key = request.trie_key(); + let txn = match self.environment.create_read_txn() { + Ok(ro) => ro, + Err(err) => return TrieResult::Failure(err.into()), + }; + let raw = match Store::>::get_raw( + &*self.trie_store, + &txn, + &key, + ) { + Ok(Some(bytes)) => TrieRaw::new(bytes), + Ok(None) => { + return TrieResult::ValueNotFound(key.to_string()); + } + Err(err) => { + return TrieResult::Failure(err); + } + }; + match txn.commit() { + Ok(_) => match request.chunk_id() { + Some(chunk_id) => TrieResult::Success { + element: TrieElement::Chunked(raw, chunk_id), + }, + None => TrieResult::Success { + element: TrieElement::Raw(raw), + }, + }, + Err(err) => TrieResult::Failure(err.into()), + } + } + + /// Persists a trie element. + fn put_trie(&self, request: PutTrieRequest) -> PutTrieResult { + // We only allow bottom-up persistence of trie elements. + // Thus we do not persist the element unless we already have all of its descendants + // persisted. It is safer to throw away the element and rely on a follow up attempt + // to reacquire it later than to allow it to be persisted which would allow runtime + // access to acquire a root hash that is missing one or more children which will + // result in undefined behavior if a process attempts to access elements below that + // root which are not held locally. + let bytes = request.raw().inner(); + match self.missing_children(bytes) { + Ok(missing_children) => { + if !missing_children.is_empty() { + let hash = Digest::hash_into_chunks_if_necessary(bytes); + return PutTrieResult::Failure(GlobalStateError::MissingTrieNodeChildren( + hash, + request.take_raw(), + missing_children, + )); + } + } + Err(err) => return PutTrieResult::Failure(err), + }; + + match self.environment.create_read_write_txn() { + Ok(mut txn) => { + match put_trie::( + &mut txn, + &self.trie_store, + bytes, + ) { + Ok(hash) => match txn.commit() { + Ok(_) => PutTrieResult::Success { hash }, + Err(err) => PutTrieResult::Failure(err.into()), + }, + Err(err) => PutTrieResult::Failure(err), + } + } + Err(err) => PutTrieResult::Failure(err.into()), + } + } + + /// Finds all of the keys of missing directly descendant `Trie` values + fn missing_children(&self, trie_raw: &[u8]) -> Result, GlobalStateError> { + let txn = self.environment.create_read_txn()?; + let missing_descendants = missing_children::< + Key, + StoredValue, + lmdb::RoTransaction, + LmdbTrieStore, + GlobalStateError, + >(&txn, self.trie_store.deref(), trie_raw)?; + txn.commit()?; + Ok(missing_descendants) + } + + fn enable_entity(&self) -> bool { + self.enable_addressable_entity + } +} + +#[cfg(test)] +pub(crate) mod tests { + use lmdb::DatabaseFlags; + use tempfile::tempdir; + + use casper_types::{ + account::AccountHash, + execution::{Effects, TransformKindV2, TransformV2}, + CLValue, Digest, + }; + + use super::*; + use crate::global_state::{ + state::{lmdb::LmdbGlobalState, CommitProvider}, + trie_store::operations::{write, WriteResult}, + }; + + #[cfg(test)] + use crate::global_state::{DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS}; + + #[derive(Debug, Clone)] + pub(crate) struct TestPair { + pub key: Key, + pub value: StoredValue, + } + + pub(crate) fn create_test_pairs() -> [TestPair; 2] { + [ + TestPair { + key: Key::Account(AccountHash::new([1_u8; 32])), + value: StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + }, + TestPair { + key: Key::Account(AccountHash::new([2_u8; 32])), + value: StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), + }, + ] + } + + pub(crate) fn create_test_pairs_updated() -> [TestPair; 3] { + [ + TestPair { + key: Key::Account(AccountHash::new([1u8; 32])), + value: StoredValue::CLValue(CLValue::from_t("one".to_string()).unwrap()), + }, + TestPair { + key: Key::Account(AccountHash::new([2u8; 32])), + value: StoredValue::CLValue(CLValue::from_t("two".to_string()).unwrap()), + }, + TestPair { + key: Key::Account(AccountHash::new([3u8; 32])), + value: StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), + }, + ] + } + + pub(crate) fn create_test_transforms() -> Effects { + let mut effects = Effects::new(); + let transform = TransformV2::new( + Key::Account(AccountHash::new([3u8; 32])), + TransformKindV2::Write(StoredValue::CLValue(CLValue::from_t("one").unwrap())), + ); + effects.push(transform); + effects + } + + pub(crate) struct TestState { + state: LmdbGlobalState, + root_hash: Digest, + } + + #[cfg(test)] + pub(crate) fn create_test_state() -> TestState { + let temp_dir = tempdir().unwrap(); + let environment = Arc::new( + LmdbEnvironment::new( + temp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + ) + .unwrap(), + ); + let trie_store = + Arc::new(LmdbTrieStore::new(&environment, None, DatabaseFlags::empty()).unwrap()); + + let state = LmdbGlobalState::empty( + environment, + trie_store, + crate::global_state::DEFAULT_MAX_QUERY_DEPTH, + crate::global_state::DEFAULT_ENABLE_ENTITY, + ) + .unwrap(); + let mut current_root = state.empty_root_hash; + { + let mut txn = state.environment.create_read_write_txn().unwrap(); + + for TestPair { key, value } in &create_test_pairs() { + match write::<_, _, _, LmdbTrieStore, GlobalStateError>( + &mut txn, + &state.trie_store, + ¤t_root, + key, + value, + ) + .unwrap() + { + WriteResult::Written(root_hash) => { + current_root = root_hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => { + panic!("LmdbWithCacheGlobalState has invalid root") + } + } + } + + txn.commit().unwrap(); + } + TestState { + state, + root_hash: current_root, + } + } + + #[test] + fn commit_updates_state() { + let test_pairs_updated = create_test_pairs_updated(); + + let TestState { state, root_hash } = create_test_state(); + + let scratch = state.create_scratch(); + + let effects = { + let mut tmp = Effects::new(); + for TestPair { key, value } in &test_pairs_updated { + let transform = TransformV2::new(*key, TransformKindV2::Write(value.to_owned())); + tmp.push(transform); + } + tmp + }; + + let scratch_root_hash = scratch.commit_effects(root_hash, effects.clone()).unwrap(); + + assert_eq!( + scratch_root_hash, root_hash, + "ScratchGlobalState should not modify the state root, as it does no hashing" + ); + + let lmdb_hash = state.commit_effects(root_hash, effects).unwrap(); + let updated_checkout = state.checkout(lmdb_hash).unwrap().unwrap(); + + let all_keys = updated_checkout.keys_with_prefix(&[]).unwrap(); + + let (stored_values, _) = scratch.into_inner(); + assert_eq!(all_keys.len(), stored_values.len()); + + for key in all_keys { + assert_eq!( + stored_values + .iter() + .find(|(k, _)| k == &key) + .unwrap() + .1 + .clone(), + updated_checkout.read(&key).unwrap().unwrap() + ); + } + + for TestPair { key, value } in test_pairs_updated.iter().cloned() { + assert_eq!(Some(value), updated_checkout.read(&key).unwrap()); + } + } + + #[test] + fn commit_updates_state_with_add() { + let test_pairs_updated = create_test_pairs_updated(); + + // create two lmdb instances, with a scratch instance on the first + let TestState { state, root_hash } = create_test_state(); + let TestState { + state: state2, + root_hash: state_2_root_hash, + } = create_test_state(); + + let scratch = state.create_scratch(); + + let effects = { + let mut tmp = Effects::new(); + for TestPair { key, value } in &test_pairs_updated { + let transform = TransformV2::new(*key, TransformKindV2::Write(value.to_owned())); + tmp.push(transform); + } + tmp + }; + + // Commit effects to both databases. + scratch.commit_effects(root_hash, effects.clone()).unwrap(); + let updated_hash = state2.commit_effects(state_2_root_hash, effects).unwrap(); + + // Create add transforms as well + let add_effects = create_test_transforms(); + scratch + .commit_effects(root_hash, add_effects.clone()) + .unwrap(); + let updated_hash = state2.commit_effects(updated_hash, add_effects).unwrap(); + + let scratch_checkout = scratch.checkout(root_hash).unwrap().unwrap(); + let updated_checkout = state2.checkout(updated_hash).unwrap().unwrap(); + let all_keys = updated_checkout.keys_with_prefix(&[]).unwrap(); + + // Check that cache matches the contents of the second instance of lmdb + for key in all_keys { + assert_eq!( + scratch_checkout.read(&key).unwrap().as_ref(), + updated_checkout.read(&key).unwrap().as_ref() + ); + } + } + + #[test] + fn commit_updates_state_and_original_state_stays_intact() { + let test_pairs_updated = create_test_pairs_updated(); + + let TestState { + state, root_hash, .. + } = create_test_state(); + + let scratch = state.create_scratch(); + + let effects = { + let mut tmp = Effects::new(); + for TestPair { key, value } in &test_pairs_updated { + let transform = TransformV2::new(*key, TransformKindV2::Write(value.to_owned())); + tmp.push(transform); + } + tmp + }; + + let updated_hash = scratch.commit_effects(root_hash, effects).unwrap(); + + let updated_checkout = scratch.checkout(updated_hash).unwrap().unwrap(); + for TestPair { key, value } in test_pairs_updated.iter().cloned() { + assert_eq!( + Some(value), + updated_checkout.read(&key).unwrap(), + "ScratchGlobalState should not yet be written to the underlying lmdb state" + ); + } + + let original_checkout = state.checkout(root_hash).unwrap().unwrap(); + for TestPair { key, value } in create_test_pairs().iter().cloned() { + assert_eq!(Some(value), original_checkout.read(&key).unwrap()); + } + assert_eq!( + None, + original_checkout.read(&test_pairs_updated[2].key).unwrap() + ); + } + + #[test] + fn cache_trie_basic_insert_get() { + let mut trie = CacheTrie::new(); + let key_hello = Key::Hash(*b"hello..........................."); + let key_world = Key::Hash(*b"world..........................."); + let key_hey = Key::Hash(*b"hey............................."); + + trie.insert(b"hello", key_hello); + trie.insert(b"world", key_world); + trie.insert(b"hey", key_hey); + + assert_eq!(trie.keys_with_prefix(b"he"), vec![key_hey, key_hello]); + assert_eq!(trie.keys_with_prefix(b"wo"), vec![key_world]); + } + + #[test] + fn cache_trie_overlapping_prefix() { + let mut trie = CacheTrie::new(); + let key_apple = Key::Hash(*b"apple..........................."); + let key_app = Key::Hash(*b"app............................."); + let key_apron = Key::Hash(*b"apron..........................."); + + trie.insert(b"apple", key_apple); + trie.insert(b"app", key_app); + trie.insert(b"apron", key_apron); + + assert_eq!( + trie.keys_with_prefix(b"ap"), + vec![key_apron, key_app, key_apple] + ); + assert_eq!(trie.keys_with_prefix(b"app"), vec![key_app, key_apple]); + } + + #[test] + fn cache_trie_leaf_removal() { + let mut trie = CacheTrie::new(); + let key_cat = Key::Hash(*b"cat............................."); + let key_category = Key::Hash(*b"category........................"); + + trie.insert(b"cat", key_cat); + trie.insert(b"category", key_category); + + trie.remove(b"category"); + assert_eq!(trie.keys_with_prefix(b"ca"), vec![key_cat]); + } + + #[test] + fn cache_trie_internal_node_removal() { + let mut trie = CacheTrie::new(); + let key_be = Key::Hash(*b"be.............................."); + let key_berry = Key::Hash(*b"berry..........................."); + + trie.insert(b"be", key_be); + trie.insert(b"berry", key_berry); + + trie.remove(b"be"); + assert_eq!(trie.keys_with_prefix(b"be"), vec![key_berry]); + } + + #[test] + fn cache_trie_non_existent_prefix() { + let mut trie = CacheTrie::new(); + + let key_apple = Key::Hash(*b"apple..........................."); + let key_mango = Key::Hash(*b"mango..........................."); + + trie.insert(b"apple", key_apple); + trie.insert(b"mango", key_mango); + + assert_eq!(trie.keys_with_prefix(b"b"), Vec::::new()); + } + + #[test] + fn cache_trie_empty_trie_search() { + let trie = CacheTrie::::new(); + + assert_eq!(trie.keys_with_prefix(b""), Vec::::new()); + } + + #[test] + fn cache_trie_empty_prefix_search_all_keys() { + let mut trie = CacheTrie::new(); + let key_hello = Key::Hash(*b"hello..........................."); + let key_world = Key::Hash(*b"world..........................."); + let key_hey = Key::Hash(*b"hey............................."); + + trie.insert(b"hello", key_hello); + trie.insert(b"world", key_world); + trie.insert(b"hey", key_hey); + + assert_eq!( + trie.keys_with_prefix(b""), + vec![key_world, key_hey, key_hello] + ); + } +} diff --git a/storage/src/global_state/store/mod.rs b/storage/src/global_state/store/mod.rs new file mode 100644 index 0000000000..a67f24558e --- /dev/null +++ b/storage/src/global_state/store/mod.rs @@ -0,0 +1,107 @@ +mod store_ext; +#[cfg(test)] +pub(crate) mod tests; + +use std::borrow::Cow; + +use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +pub use self::store_ext::StoreExt; +use crate::global_state::transaction_source::{Readable, Writable}; + +/// Store is responsible for abstracting `get` and `put` operations over the underlying store +/// specified by its associated `Handle` type. +pub trait Store { + /// Errors possible from this store. + type Error: From; + + /// Underlying store type. + type Handle; + + /// `handle` returns the underlying store. + fn handle(&self) -> Self::Handle; + + /// Deserialize a value. + #[inline] + fn deserialize_value(&self, bytes: &[u8]) -> Result + where + V: FromBytes, + { + bytesrepr::deserialize_from_slice(bytes) + } + + /// Serialize a value. + #[inline] + fn serialize_value(&self, value: &V) -> Result, bytesrepr::Error> + where + V: ToBytes, + { + value.to_bytes() + } + + /// Returns an optional value (may exist or not) as read through a transaction, or an error + /// of the associated `Self::Error` variety. + fn get(&self, txn: &T, key: &K) -> Result, Self::Error> + where + T: Readable, + K: AsRef<[u8]>, + V: FromBytes, + Self::Error: From, + { + let raw = self.get_raw(txn, key)?; + match raw { + Some(bytes) => { + let value = self.deserialize_value(&bytes)?; + Ok(Some(value)) + } + None => Ok(None), + } + } + + /// Returns an optional value (may exist or not) as read through a transaction, or an error + /// of the associated `Self::Error` variety. + fn get_raw(&self, txn: &T, key: &K) -> Result, Self::Error> + where + T: Readable, + K: AsRef<[u8]>, + Self::Error: From, + { + let handle = self.handle(); + Ok(txn.read(handle, key.as_ref())?) + } + + /// Puts a `value` into the store at `key` within a transaction, potentially returning an + /// error of type `Self::Error` if that fails. + fn put(&self, txn: &mut T, key: &K, value: &V) -> Result<(), Self::Error> + where + T: Writable, + K: AsRef<[u8]>, + V: ToBytes, + Self::Error: From, + { + let serialized_value = self.serialize_value(value)?; + self.put_raw(txn, key, Cow::from(serialized_value)) + } + + /// Puts a raw `value` into the store at `key` within a transaction, potentially returning an + /// error of type `Self::Error` if that fails. + /// + /// This accepts a [`Cow`] object as a value to allow different implementations to choose if + /// they want to use owned value (i.e. put it in a cache without cloning) or the raw bytes + /// (write it into a persistent store). + fn put_raw( + &self, + txn: &mut T, + key: &K, + value_bytes: Cow<'_, [u8]>, + ) -> Result<(), Self::Error> + where + T: Writable, + K: AsRef<[u8]>, + Self::Error: From, + { + let handle = self.handle(); + txn.write(handle, key.as_ref(), &value_bytes) + .map_err(Into::into) + } +} diff --git a/storage/src/global_state/store/store_ext.rs b/storage/src/global_state/store/store_ext.rs new file mode 100644 index 0000000000..76521f3837 --- /dev/null +++ b/storage/src/global_state/store/store_ext.rs @@ -0,0 +1,52 @@ +//! Extension traits for store. + +use casper_types::bytesrepr::{FromBytes, ToBytes}; + +use crate::global_state::{ + store::Store, + transaction_source::{Readable, Writable}, +}; + +/// Extension trait for Store. +pub trait StoreExt: Store { + /// Returns multiple optional values (each may exist or not) from the store in one transaction. + fn get_many<'a, T>( + &self, + txn: &T, + keys: impl Iterator, + ) -> Result>, Self::Error> + where + T: Readable, + K: AsRef<[u8]> + 'a, + V: FromBytes, + Self::Error: From, + { + let mut ret: Vec> = Vec::new(); + for key in keys { + let result = self.get(txn, key)?; + ret.push(result) + } + Ok(ret) + } + + /// Puts multiple key/value pairs into the store in one transaction, potentially returning an + /// error of type `Self::Error` if that fails. + fn put_many<'a, T>( + &self, + txn: &mut T, + pairs: impl Iterator, + ) -> Result<(), Self::Error> + where + T: Writable, + K: AsRef<[u8]> + 'a, + V: ToBytes + 'a, + Self::Error: From, + { + for (key, value) in pairs { + self.put(txn, key, value)?; + } + Ok(()) + } +} + +impl> StoreExt for T {} diff --git a/storage/src/global_state/store/tests.rs b/storage/src/global_state/store/tests.rs new file mode 100644 index 0000000000..c0d9a3df0f --- /dev/null +++ b/storage/src/global_state/store/tests.rs @@ -0,0 +1,49 @@ +use std::collections::BTreeMap; + +use casper_types::bytesrepr::{FromBytes, ToBytes}; + +use crate::global_state::{ + store::{Store, StoreExt}, + transaction_source::{Transaction, TransactionSource}, +}; + +// should be moved to the `store` module +fn roundtrip<'a, K, V, X, S>( + transaction_source: &'a X, + store: &S, + items: &BTreeMap, +) -> Result>, S::Error> +where + K: AsRef<[u8]>, + V: ToBytes + FromBytes, + X: TransactionSource<'a, Handle = S::Handle>, + S: Store, + S::Error: From, +{ + let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; + store.put_many(&mut txn, items.iter())?; + let result = store.get_many(&txn, items.keys()); + txn.commit()?; + result +} + +// should be moved to the `store` module +pub fn roundtrip_succeeds<'a, K, V, X, S>( + transaction_source: &'a X, + store: &S, + items: BTreeMap, +) -> Result +where + K: AsRef<[u8]>, + V: ToBytes + FromBytes + Clone + PartialEq, + X: TransactionSource<'a, Handle = S::Handle>, + S: Store, + S::Error: From, +{ + let maybe_values: Vec> = roundtrip(transaction_source, store, &items)?; + let values = match maybe_values.into_iter().collect::>>() { + Some(values) => values, + None => return Ok(false), + }; + Ok(Iterator::eq(items.values(), values.iter())) +} diff --git a/storage/src/global_state/transaction_source/lmdb.rs b/storage/src/global_state/transaction_source/lmdb.rs new file mode 100644 index 0000000000..5f0d1da600 --- /dev/null +++ b/storage/src/global_state/transaction_source/lmdb.rs @@ -0,0 +1,180 @@ +use std::path::Path; + +use casper_types::bytesrepr::Bytes; +use lmdb::{ + self, Database, Environment, EnvironmentFlags, RoTransaction, RwTransaction, WriteFlags, +}; + +use crate::global_state::{ + error, + transaction_source::{Readable, Transaction, TransactionSource, Writable}, + trie_store::lmdb::ScratchTrieStore, + MAX_DBS, +}; + +/// Filename for the LMDB database created by the EE. +const EE_DB_FILENAME: &str = "data.lmdb"; + +impl Transaction for ScratchTrieStore { + type Error = error::Error; + type Handle = ScratchTrieStore; + fn commit(self) -> Result<(), Self::Error> { + // NO OP as scratch doesn't use transactions. + Ok(()) + } +} + +impl Readable for ScratchTrieStore { + fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { + let txn = self.env.create_read_txn()?; + match lmdb::Transaction::get(&txn, handle.store.get_db(), &key) { + Ok(bytes) => Ok(Some(Bytes::from(bytes))), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(error::Error::Lmdb(e)), + } + } +} + +impl Writable for ScratchTrieStore { + fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { + let mut txn = self.env.create_read_write_txn()?; + txn.put(handle.store.get_db(), &key, &value, WriteFlags::empty()) + .map_err(error::Error::Lmdb)?; + Ok(()) + } +} + +impl<'a> TransactionSource<'a> for ScratchTrieStore { + type Error = error::Error; + type Handle = ScratchTrieStore; + type ReadTransaction = ScratchTrieStore; + type ReadWriteTransaction = ScratchTrieStore; + fn create_read_txn(&'a self) -> Result { + Ok(self.clone()) + } + + fn create_read_write_txn(&'a self) -> Result { + Ok(self.clone()) + } +} + +impl Transaction for RoTransaction<'_> { + type Error = lmdb::Error; + + type Handle = Database; + + fn commit(self) -> Result<(), Self::Error> { + lmdb::Transaction::commit(self) + } +} + +impl Readable for RoTransaction<'_> { + fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { + match lmdb::Transaction::get(self, handle, &key) { + Ok(bytes) => Ok(Some(Bytes::from(bytes))), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e), + } + } +} + +impl<'a> Transaction for RwTransaction<'a> { + type Error = lmdb::Error; + + type Handle = Database; + + fn commit(self) -> Result<(), Self::Error> { + as lmdb::Transaction>::commit(self) + } +} + +impl Readable for RwTransaction<'_> { + fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error> { + match lmdb::Transaction::get(self, handle, &key) { + Ok(bytes) => Ok(Some(Bytes::from(bytes))), + Err(lmdb::Error::NotFound) => Ok(None), + Err(e) => Err(e), + } + } +} + +impl Writable for RwTransaction<'_> { + fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { + self.put(handle, &key, &value, WriteFlags::empty()) + } +} + +/// The environment for an LMDB-backed trie store. +/// +/// Wraps [`lmdb::Environment`]. +#[derive(Debug)] +pub struct LmdbEnvironment { + env: Environment, + manual_sync_enabled: bool, +} + +impl LmdbEnvironment { + /// Constructor for `LmdbEnvironment`. + pub fn new>( + path: P, + map_size: usize, + max_readers: u32, + manual_sync_enabled: bool, + ) -> Result { + let lmdb_flags = if manual_sync_enabled { + // These options require that we manually call sync on the environment for the EE. + EnvironmentFlags::NO_SUB_DIR + | EnvironmentFlags::NO_READAHEAD + | EnvironmentFlags::MAP_ASYNC + | EnvironmentFlags::WRITE_MAP + | EnvironmentFlags::NO_META_SYNC + } else { + EnvironmentFlags::NO_SUB_DIR | EnvironmentFlags::NO_READAHEAD + }; + + let env = Environment::new() + // Set the flag to manage our own directory like in the storage component. + .set_flags(lmdb_flags) + .set_max_dbs(MAX_DBS) + .set_map_size(map_size) + .set_max_readers(max_readers) + .open(&path.as_ref().join(EE_DB_FILENAME))?; + Ok(LmdbEnvironment { + env, + manual_sync_enabled, + }) + } + + /// Returns a reference to the wrapped `Environment`. + pub fn env(&self) -> &Environment { + &self.env + } + + /// Returns if this environment was constructed with manual synchronization enabled. + pub fn is_manual_sync_enabled(&self) -> bool { + self.manual_sync_enabled + } + + /// Manually synchronize LMDB to disk. + pub fn sync(&self) -> Result<(), lmdb::Error> { + self.env.sync(true) + } +} + +impl<'a> TransactionSource<'a> for LmdbEnvironment { + type Error = lmdb::Error; + + type Handle = Database; + + type ReadTransaction = RoTransaction<'a>; + + type ReadWriteTransaction = RwTransaction<'a>; + + fn create_read_txn(&'a self) -> Result, Self::Error> { + self.env.begin_ro_txn() + } + + fn create_read_write_txn(&'a self) -> Result, Self::Error> { + self.env.begin_rw_txn() + } +} diff --git a/storage/src/global_state/transaction_source/mod.rs b/storage/src/global_state/transaction_source/mod.rs new file mode 100644 index 0000000000..d1b78f92f2 --- /dev/null +++ b/storage/src/global_state/transaction_source/mod.rs @@ -0,0 +1,60 @@ +use casper_types::bytesrepr::Bytes; + +/// LMDB implementation of transaction source. +pub mod lmdb; + +/// A transaction which can be committed or aborted. +pub trait Transaction: Sized { + /// An error which can occur while reading or writing during a transaction, + /// or committing the transaction. + type Error; + + /// An entity which is being read from or written to during a transaction. + type Handle; + + /// Commits the transaction. + fn commit(self) -> Result<(), Self::Error>; + + /// Aborts the transaction. + /// + /// Any pending operations will not be saved. + fn abort(self) { + unimplemented!("Abort operations should be performed in Drop implementations.") + } +} + +/// A transaction with the capability to read from a given [`Handle`](Transaction::Handle). +pub trait Readable: Transaction { + /// Returns the value from the corresponding key from a given [`Transaction::Handle`]. + fn read(&self, handle: Self::Handle, key: &[u8]) -> Result, Self::Error>; +} + +/// A transaction with the capability to write to a given [`Handle`](Transaction::Handle). +pub trait Writable: Transaction { + /// Inserts a key-value pair into a given [`Transaction::Handle`]. + fn write(&mut self, handle: Self::Handle, key: &[u8], value: &[u8]) -> Result<(), Self::Error>; +} + +/// A source of transactions e.g. values that implement [`Readable`] +/// and/or [`Writable`]. +pub trait TransactionSource<'a> { + /// An error which can occur while creating a read or read-write + /// transaction. + type Error; + + /// An entity which is being read from or written to during a transaction. + type Handle; + + /// Represents the type of read transactions. + type ReadTransaction: Readable; + + /// Represents the type of read-write transactions. + type ReadWriteTransaction: Readable + + Writable; + + /// Creates a read transaction. + fn create_read_txn(&'a self) -> Result; + + /// Creates a read-write transaction. + fn create_read_write_txn(&'a self) -> Result; +} diff --git a/storage/src/global_state/trie/gens.rs b/storage/src/global_state/trie/gens.rs new file mode 100644 index 0000000000..932c08034f --- /dev/null +++ b/storage/src/global_state/trie/gens.rs @@ -0,0 +1,36 @@ +//! Generators for trie related types. +use proptest::{collection::vec, option, prelude::*}; + +use casper_types::{ + gens::{key_arb, stored_value_arb, trie_pointer_arb}, + Key, StoredValue, +}; + +use super::{Pointer, PointerBlock, Trie}; + +/// Generates a trie pointer block. +pub fn trie_pointer_block_arb() -> impl Strategy { + vec(option::of(trie_pointer_arb()), 256).prop_map(|vec| { + let mut ret: [Option; 256] = [Default::default(); 256]; + ret.clone_from_slice(vec.as_slice()); + ret.into() + }) +} + +/// Generates a trie leaf. +pub fn trie_leaf_arb() -> impl Strategy> { + (key_arb(), stored_value_arb()).prop_map(|(key, value)| Trie::Leaf { key, value }) +} + +/// Generates a trie node with a single child. +pub fn trie_extension_arb() -> impl Strategy> { + (vec(any::(), 0..32), trie_pointer_arb()) + .prop_map(|(affix, pointer)| Trie::extension(affix, pointer)) +} + +/// Generates a trie node with multiple children. +pub fn trie_node_arb() -> impl Strategy> { + trie_pointer_block_arb().prop_map(|pointer_block| Trie::Node { + pointer_block: Box::new(pointer_block), + }) +} diff --git a/storage/src/global_state/trie/mod.rs b/storage/src/global_state/trie/mod.rs new file mode 100644 index 0000000000..20ba5b8ddf --- /dev/null +++ b/storage/src/global_state/trie/mod.rs @@ -0,0 +1,696 @@ +//! Core types for a Merkle Trie + +use std::{ + convert::{TryFrom, TryInto}, + fmt::{self, Debug, Display, Formatter}, + iter::Flatten, + mem::MaybeUninit, + slice, +}; + +use datasize::DataSize; +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::{FromPrimitive, ToPrimitive}; +use serde::{ + de::{self, MapAccess, Visitor}, + ser::SerializeMap, + Deserialize, Deserializer, Serialize, Serializer, +}; + +use casper_types::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + global_state::Pointer, + Digest, +}; + +#[cfg(test)] +pub mod gens; + +#[cfg(test)] +mod tests; + +pub(crate) const USIZE_EXCEEDS_U8: &str = "usize exceeds u8"; +pub(crate) const RADIX: usize = 256; + +/// A parent is represented as a pair of a child index and a node or extension. +pub type Parents = Vec<(u8, Trie)>; + +/// Type alias for values under pointer blocks. +pub type PointerBlockValue = Option; + +/// Type alias for arrays of pointer block values. +pub type PointerBlockArray = [PointerBlockValue; RADIX]; + +/// Represents the underlying structure of a node in a Merkle Trie +#[derive(Copy, Clone)] +pub struct PointerBlock(PointerBlockArray); + +impl Serialize for PointerBlock { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // We are going to use the sparse representation of pointer blocks + // non-None entries and their indices will be output + + // Create the sequence serializer, reserving the necessary number of slots + let elements_count = self.0.iter().filter(|element| element.is_some()).count(); + let mut map = serializer.serialize_map(Some(elements_count))?; + + // Store the non-None entries with their indices + for (index, maybe_pointer_block) in self.0.iter().enumerate() { + if let Some(pointer_block_value) = maybe_pointer_block { + map.serialize_entry(&(index as u8), pointer_block_value)?; + } + } + map.end() + } +} + +impl<'de> Deserialize<'de> for PointerBlock { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct PointerBlockDeserializer; + + impl<'de> Visitor<'de> for PointerBlockDeserializer { + type Value = PointerBlock; + + fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + formatter.write_str("sparse representation of a PointerBlock") + } + + fn visit_map(self, mut access: M) -> Result + where + M: MapAccess<'de>, + { + let mut pointer_block = PointerBlock::new(); + + // Unpack the sparse representation + while let Some((index, pointer_block_value)) = access.next_entry::()? { + let element = pointer_block.0.get_mut(usize::from(index)).ok_or_else(|| { + de::Error::custom(format!("invalid index {} in pointer block value", index)) + })?; + *element = Some(pointer_block_value); + } + + Ok(pointer_block) + } + } + deserializer.deserialize_map(PointerBlockDeserializer) + } +} + +impl PointerBlock { + /// No-arg constructor for `PointerBlock`. Delegates to `Default::default()`. + pub fn new() -> Self { + Default::default() + } + + /// Constructs a `PointerBlock` from a slice of indexed `Pointer`s. + pub fn from_indexed_pointers(indexed_pointers: &[(u8, Pointer)]) -> Self { + let mut ret = PointerBlock::new(); + for (idx, ptr) in indexed_pointers.iter() { + ret[*idx as usize] = Some(*ptr); + } + ret + } + + /// Deconstructs a `PointerBlock` into an iterator of indexed `Pointer`s. + pub fn as_indexed_pointers(&self) -> impl Iterator + '_ { + self.0 + .iter() + .enumerate() + .filter_map(|(index, maybe_pointer)| { + maybe_pointer + .map(|value| (index.try_into().expect(USIZE_EXCEEDS_U8), value.to_owned())) + }) + } + + /// Gets the count of children for this `PointerBlock`. + pub fn child_count(&self) -> usize { + self.as_indexed_pointers().count() + } +} + +impl From for PointerBlock { + fn from(src: PointerBlockArray) -> Self { + PointerBlock(src) + } +} + +impl PartialEq for PointerBlock { + #[inline] + fn eq(&self, other: &PointerBlock) -> bool { + self.0[..] == other.0[..] + } +} + +impl Eq for PointerBlock {} + +impl Default for PointerBlock { + fn default() -> Self { + PointerBlock([Default::default(); RADIX]) + } +} + +impl ToBytes for PointerBlock { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + for pointer in self.0.iter() { + result.append(&mut pointer.to_bytes()?); + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.iter().map(ToBytes::serialized_length).sum() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + for pointer in self.0.iter() { + pointer.write_bytes(writer)?; + } + Ok(()) + } +} + +impl FromBytes for PointerBlock { + fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let pointer_block_array = { + // With MaybeUninit here we can avoid default initialization of result array below. + let mut result: MaybeUninit = MaybeUninit::uninit(); + let result_ptr = result.as_mut_ptr() as *mut PointerBlockValue; + for i in 0..RADIX { + let (t, remainder) = match FromBytes::from_bytes(bytes) { + Ok(success) => success, + Err(error) => { + for j in 0..i { + unsafe { result_ptr.add(j).drop_in_place() } + } + return Err(error); + } + }; + unsafe { result_ptr.add(i).write(t) }; + bytes = remainder; + } + unsafe { result.assume_init() } + }; + Ok((PointerBlock(pointer_block_array), bytes)) + } +} + +impl core::ops::Index for PointerBlock { + type Output = PointerBlockValue; + + #[inline] + fn index(&self, index: usize) -> &Self::Output { + let PointerBlock(dat) = self; + &dat[index] + } +} + +impl core::ops::IndexMut for PointerBlock { + #[inline] + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + let PointerBlock(dat) = self; + &mut dat[index] + } +} + +impl core::ops::Index> for PointerBlock { + type Output = [PointerBlockValue]; + + #[inline] + fn index(&self, index: core::ops::Range) -> &[PointerBlockValue] { + let PointerBlock(dat) = self; + &dat[index] + } +} + +impl core::ops::Index> for PointerBlock { + type Output = [PointerBlockValue]; + + #[inline] + fn index(&self, index: core::ops::RangeTo) -> &[PointerBlockValue] { + let PointerBlock(dat) = self; + &dat[index] + } +} + +impl core::ops::Index> for PointerBlock { + type Output = [PointerBlockValue]; + + #[inline] + fn index(&self, index: core::ops::RangeFrom) -> &[PointerBlockValue] { + let PointerBlock(dat) = self; + &dat[index] + } +} + +impl core::ops::Index for PointerBlock { + type Output = [PointerBlockValue]; + + #[inline] + fn index(&self, index: core::ops::RangeFull) -> &[PointerBlockValue] { + let PointerBlock(dat) = self; + &dat[index] + } +} + +impl ::std::fmt::Debug for PointerBlock { + #[allow(clippy::assertions_on_constants)] + fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + assert!(RADIX > 1, "RADIX must be > 1"); + write!(f, "{}([", stringify!(PointerBlock))?; + write!(f, "{:?}", self.0[0])?; + for item in self.0[1..].iter() { + write!(f, ", {:?}", item)?; + } + write!(f, "])") + } +} + +/// Newtype representing a trie node in its raw form without deserializing into `Trie`. +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, DataSize)] +pub struct TrieRaw(Bytes); + +impl TrieRaw { + /// Constructs an instance of [`TrieRaw`]. + pub fn new(bytes: Bytes) -> Self { + TrieRaw(bytes) + } + + /// Consumes self and returns inner bytes. + pub fn into_inner(self) -> Bytes { + self.0 + } + + /// Returns a reference inner bytes. + pub fn inner(&self) -> &Bytes { + &self.0 + } + + /// Returns a hash of the inner bytes. + pub fn hash(&self) -> Digest { + Digest::hash_into_chunks_if_necessary(self.inner()) + } +} + +impl ToBytes for TrieRaw { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TrieRaw { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = Bytes::from_bytes(bytes)?; + Ok((TrieRaw(bytes), rem)) + } +} + +/// Represents all possible serialization tags for a [`Trie`] enum. +#[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub(crate) enum TrieTag { + /// Represents a tag for a [`Trie::Leaf`] variant. + Leaf = 0, + /// Represents a tag for a [`Trie::Node`] variant. + Node = 1, + /// Represents a tag for a [`Trie::Extension`] variant. + Extension = 2, +} + +impl From for u8 { + fn from(value: TrieTag) -> Self { + TrieTag::to_u8(&value).unwrap() // SAFETY: TrieTag is represented as u8. + } +} + +/// Represents a Merkle Trie. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum Trie { + /// Trie leaf. + Leaf { + /// Leaf key. + key: K, + /// Leaf value. + value: V, + }, + /// Trie node. + Node { + /// Node pointer block. + pointer_block: Box, + }, + /// Trie extension node. + Extension { + /// Extension node affix bytes. + affix: Bytes, + /// Extension node pointer. + pointer: Pointer, + }, +} + +impl Display for Trie +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +impl Trie { + fn tag(&self) -> TrieTag { + match self { + Trie::Leaf { .. } => TrieTag::Leaf, + Trie::Node { .. } => TrieTag::Node, + Trie::Extension { .. } => TrieTag::Extension, + } + } + + /// Tag type for current trie element. + pub fn tag_type(&self) -> String { + match self { + Trie::Leaf { .. } => "Leaf".to_string(), + Trie::Node { .. } => "Node".to_string(), + Trie::Extension { .. } => "Extension".to_string(), + } + } + + /// Constructs a [`Trie::Leaf`] from a given key and value. + pub fn leaf(key: K, value: V) -> Self { + Trie::Leaf { key, value } + } + + /// Constructs a [`Trie::Node`] from a given slice of indexed pointers. + pub fn node(indexed_pointers: &[(u8, Pointer)]) -> Self { + let pointer_block = PointerBlock::from_indexed_pointers(indexed_pointers); + let pointer_block = Box::new(pointer_block); + Trie::Node { pointer_block } + } + + /// Constructs a [`Trie::Extension`] from a given affix and pointer. + pub fn extension(affix: Vec, pointer: Pointer) -> Self { + Trie::Extension { + affix: affix.into(), + pointer, + } + } + + /// Gets a reference to the root key of this Trie. + pub fn key(&self) -> Option<&K> { + match self { + Trie::Leaf { key, .. } => Some(key), + _ => None, + } + } + + /// Returns the hash of this Trie. + pub fn trie_hash(&self) -> Result + where + Self: ToBytes, + { + self.to_bytes() + .map(|bytes| Digest::hash_into_chunks_if_necessary(&bytes)) + } + + /// Returns bytes representation of this Trie and the hash over those bytes. + pub fn trie_hash_and_bytes(&self) -> Result<(Digest, Vec), bytesrepr::Error> + where + Self: ToBytes, + { + self.to_bytes() + .map(|bytes| (Digest::hash_into_chunks_if_necessary(&bytes), bytes)) + } + + /// Returns a pointer block, if possible. + pub fn as_pointer_block(&self) -> Option<&PointerBlock> { + if let Self::Node { pointer_block } = self { + Some(pointer_block.as_ref()) + } else { + None + } + } + + /// Returns an iterator over descendants of the trie. + pub fn iter_children(&self) -> DescendantsIterator { + match self { + Trie::::Leaf { .. } => DescendantsIterator::ZeroOrOne(None), + Trie::Node { pointer_block } => DescendantsIterator::PointerBlock { + iter: pointer_block.0.iter().flatten(), + }, + Trie::Extension { pointer, .. } => { + DescendantsIterator::ZeroOrOne(Some(pointer.into_hash())) + } + } + } +} + +/// Bytes representation of a `Trie` that is a `Trie::Leaf` variant. +/// The bytes for this trie leaf also include the `Trie::Tag`. +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct TrieLeafBytes(Bytes); + +impl TrieLeafBytes { + pub(crate) fn bytes(&self) -> &Bytes { + &self.0 + } + + pub(crate) fn try_deserialize_leaf_key( + &self, + ) -> Result<(K, &[u8]), bytesrepr::Error> { + let (tag_byte, rem) = u8::from_bytes(&self.0)?; + let tag = TrieTag::from_u8(tag_byte).ok_or(bytesrepr::Error::Formatting)?; + assert_eq!( + tag, + TrieTag::Leaf, + "Unexpected layout for trie leaf bytes. Expected `TrieTag::Leaf` but got {:?}", + tag + ); + K::from_bytes(rem) + } +} + +impl From<&[u8]> for TrieLeafBytes { + fn from(value: &[u8]) -> Self { + Self(value.into()) + } +} + +impl From> for TrieLeafBytes { + fn from(value: Vec) -> Self { + Self(value.into()) + } +} + +/// Like `Trie` but does not deserialize the leaf when constructed. +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum LazilyDeserializedTrie { + /// Serialized trie leaf bytes + Leaf(TrieLeafBytes), + /// Trie node. + Node { pointer_block: Box }, + /// Trie extension node. + Extension { affix: Bytes, pointer: Pointer }, +} + +impl LazilyDeserializedTrie { + pub(crate) fn iter_children(&self) -> DescendantsIterator { + match self { + LazilyDeserializedTrie::Leaf(_) => { + // Leaf bytes does not have any children + DescendantsIterator::ZeroOrOne(None) + } + LazilyDeserializedTrie::Node { pointer_block } => DescendantsIterator::PointerBlock { + iter: pointer_block.0.iter().flatten(), + }, + LazilyDeserializedTrie::Extension { pointer, .. } => { + DescendantsIterator::ZeroOrOne(Some(pointer.into_hash())) + } + } + } +} + +impl FromBytes for LazilyDeserializedTrie { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag_byte, rem) = u8::from_bytes(bytes)?; + let tag = TrieTag::from_u8(tag_byte).ok_or(bytesrepr::Error::Formatting)?; + match tag { + TrieTag::Leaf => Ok((LazilyDeserializedTrie::Leaf(bytes.into()), &[])), + TrieTag::Node => { + let (pointer_block, rem) = PointerBlock::from_bytes(rem)?; + Ok(( + LazilyDeserializedTrie::Node { + pointer_block: Box::new(pointer_block), + }, + rem, + )) + } + TrieTag::Extension => { + let (affix, rem) = FromBytes::from_bytes(rem)?; + let (pointer, rem) = Pointer::from_bytes(rem)?; + Ok((LazilyDeserializedTrie::Extension { affix, pointer }, rem)) + } + } + } +} + +impl TryFrom> for LazilyDeserializedTrie +where + K: ToBytes, + V: ToBytes, +{ + type Error = bytesrepr::Error; + + fn try_from(value: Trie) -> Result { + match value { + Trie::Leaf { .. } => { + let serialized_bytes = ToBytes::to_bytes(&value)?; + Ok(LazilyDeserializedTrie::Leaf(serialized_bytes.into())) + } + Trie::Node { pointer_block } => Ok(LazilyDeserializedTrie::Node { pointer_block }), + Trie::Extension { affix, pointer } => { + Ok(LazilyDeserializedTrie::Extension { affix, pointer }) + } + } + } +} + +/// An iterator over the descendants of a trie node. +pub enum DescendantsIterator<'a> { + /// A leaf (zero descendants) or extension (one descendant) being iterated. + ZeroOrOne(Option), + /// A pointer block being iterated. + PointerBlock { + /// An iterator over the non-None entries of the `PointerBlock`. + iter: Flatten>>, + }, +} + +impl Iterator for DescendantsIterator<'_> { + type Item = Digest; + + fn next(&mut self) -> Option { + match *self { + DescendantsIterator::ZeroOrOne(ref mut maybe_digest) => maybe_digest.take(), + DescendantsIterator::PointerBlock { ref mut iter } => { + iter.next().map(|pointer| *pointer.hash()) + } + } + } +} + +impl ToBytes for Trie +where + K: ToBytes, + V: ToBytes, +{ + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut ret)?; + Ok(ret) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Trie::Leaf { key, value } => key.serialized_length() + value.serialized_length(), + Trie::Node { pointer_block } => pointer_block.serialized_length(), + Trie::Extension { affix, pointer } => { + affix.serialized_length() + pointer.serialized_length() + } + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + // NOTE: When changing this make sure all partial deserializers that are referencing + // `LazyTrieLeaf` are also updated. + writer.push(u8::from(self.tag())); + match self { + Trie::Leaf { key, value } => { + key.write_bytes(writer)?; + value.write_bytes(writer)?; + } + Trie::Node { pointer_block } => pointer_block.write_bytes(writer)?, + Trie::Extension { affix, pointer } => { + affix.write_bytes(writer)?; + pointer.write_bytes(writer)?; + } + } + Ok(()) + } +} + +impl FromBytes for Trie { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag_byte, rem) = u8::from_bytes(bytes)?; + let tag = TrieTag::from_u8(tag_byte).ok_or(bytesrepr::Error::Formatting)?; + match tag { + TrieTag::Leaf => { + let (key, rem) = K::from_bytes(rem)?; + let (value, rem) = V::from_bytes(rem)?; + Ok((Trie::Leaf { key, value }, rem)) + } + TrieTag::Node => { + let (pointer_block, rem) = PointerBlock::from_bytes(rem)?; + Ok(( + Trie::Node { + pointer_block: Box::new(pointer_block), + }, + rem, + )) + } + TrieTag::Extension => { + let (affix, rem) = FromBytes::from_bytes(rem)?; + let (pointer, rem) = Pointer::from_bytes(rem)?; + Ok((Trie::Extension { affix, pointer }, rem)) + } + } + } +} + +impl TryFrom for Trie { + type Error = bytesrepr::Error; + + fn try_from(value: LazilyDeserializedTrie) -> Result { + match value { + LazilyDeserializedTrie::Leaf(leaf_bytes) => { + let (key, value_bytes) = leaf_bytes.try_deserialize_leaf_key()?; + let value = bytesrepr::deserialize_from_slice(value_bytes)?; + Ok(Self::Leaf { key, value }) + } + LazilyDeserializedTrie::Node { pointer_block } => Ok(Self::Node { pointer_block }), + LazilyDeserializedTrie::Extension { affix, pointer } => { + Ok(Self::Extension { affix, pointer }) + } + } + } +} + +pub(crate) mod operations { + use casper_types::{ + bytesrepr::{self, ToBytes}, + Digest, + }; + + use crate::global_state::trie::Trie; + + /// Creates a tuple containing an empty root hash and an empty root (a node + /// with an empty pointer block) + pub fn create_hashed_empty_trie( + ) -> Result<(Digest, Trie), bytesrepr::Error> { + let root: Trie = Trie::Node { + pointer_block: Default::default(), + }; + let root_bytes: Vec = root.to_bytes()?; + Ok((Digest::hash(root_bytes), root)) + } +} diff --git a/storage/src/global_state/trie/tests.rs b/storage/src/global_state/trie/tests.rs new file mode 100644 index 0000000000..35b7e2dd8a --- /dev/null +++ b/storage/src/global_state/trie/tests.rs @@ -0,0 +1,299 @@ +#[test] +fn radix_is_256() { + assert_eq!( + super::RADIX, + 256, + "Changing RADIX alone might cause things to break" + ); +} + +mod pointer_block { + use casper_types::U256; + + use crate::global_state::trie::*; + + /// A defense against changes to [`RADIX`](history::trie::RADIX). + #[test] + fn debug_formatter_succeeds() { + let _ = format!("{:?}", PointerBlock::new()); + } + + #[test] + fn assignment_and_indexing() { + let test_hash = Digest::hash(b"TrieTrieAgain"); + let leaf_pointer = Some(Pointer::LeafPointer(test_hash)); + let mut pointer_block = PointerBlock::new(); + pointer_block[0] = leaf_pointer; + pointer_block[RADIX - 1] = leaf_pointer; + assert_eq!(leaf_pointer, pointer_block[0]); + assert_eq!(leaf_pointer, pointer_block[RADIX - 1]); + assert_eq!(None, pointer_block[1]); + assert_eq!(None, pointer_block[RADIX - 2]); + } + + #[test] + #[should_panic] + fn assignment_off_end() { + let test_hash = Digest::hash(b"TrieTrieAgain"); + let leaf_pointer = Some(Pointer::LeafPointer(test_hash)); + let mut pointer_block = PointerBlock::new(); + pointer_block[RADIX] = leaf_pointer; + } + + #[test] + #[should_panic] + fn indexing_off_end() { + let pointer_block = PointerBlock::new(); + let _val = pointer_block[RADIX]; + } + + #[test] + fn trie_node_descendants_iterator() { + fn digest_from_value>(value: T) -> Digest { + let mut value_bytes = [0; Digest::LENGTH]; + let u256: U256 = value.into(); + u256.to_big_endian(&mut value_bytes); + Digest::from(value_bytes) + } + + let pointers: Vec<_> = (0..=255u8) + .rev() + .filter_map(|index| { + let hash = digest_from_value(index); + if index % 3 == 0 { + Some((index, Pointer::NodePointer(hash))) + } else if index % 3 == 1 { + Some((index, Pointer::LeafPointer(hash))) + } else if index % 3 == 2 { + None + } else { + unreachable!() + } + }) + .collect(); + + let trie = Trie::<(), ()>::Node { + pointer_block: Box::new(PointerBlock::from_indexed_pointers(pointers.as_slice())), + }; + let mut descendants = trie.iter_children(); + let hashes: Vec = descendants.by_ref().collect(); + assert_eq!( + hashes, + pointers + .into_iter() + .rev() // reverse again for correct order + .map(|(_idx, pointer)| *pointer.hash()) + .collect::>() + ); + + assert_eq!(descendants.next(), None); + assert_eq!(descendants.next(), None); + } +} + +mod proptests { + use std::convert::TryInto; + + use proptest::prelude::*; + + use casper_types::{ + bytesrepr::{self, deserialize_from_slice, FromBytes, ToBytes}, + gens::{all_keys_arb, blake2b_hash_arb, trie_pointer_arb}, + Digest, Key, StoredValue, + }; + + use crate::global_state::trie::{gens::*, LazilyDeserializedTrie, PointerBlock, Trie}; + + fn test_trie_roundtrip_to_lazy_trie(trie: &Trie) + where + K: ToBytes + FromBytes + PartialEq + std::fmt::Debug + Clone, + V: ToBytes + FromBytes + PartialEq + std::fmt::Debug + Clone, + { + let serialized = ToBytes::to_bytes(trie).expect("Unable to serialize data"); + + let expected_lazy_trie_leaf: LazilyDeserializedTrie = (*trie) + .clone() + .try_into() + .expect("Cannot convert Trie to LazilyDeserializedTrie"); + + let deserialized_from_slice: LazilyDeserializedTrie = + deserialize_from_slice(&serialized).expect("Unable to deserialize data"); + assert_eq!(expected_lazy_trie_leaf, deserialized_from_slice); + assert_eq!( + *trie, + deserialized_from_slice + .clone() + .try_into() + .expect("Expected to be able to convert LazilyDeserializedTrie to Trie") + ); + if let LazilyDeserializedTrie::Leaf(leaf_bytes) = deserialized_from_slice { + let (key, _) = leaf_bytes + .try_deserialize_leaf_key::() + .expect("Should have been able to deserialize key"); + assert_eq!(key, *trie.key().unwrap()); + }; + + let deserialized: LazilyDeserializedTrie = + bytesrepr::deserialize(serialized).expect("Unable to deserialize data"); + assert_eq!(expected_lazy_trie_leaf, deserialized); + assert_eq!( + *trie, + deserialized + .clone() + .try_into() + .expect("Expected to be able to convert LazilyDeserializedTrie to Trie") + ); + if let LazilyDeserializedTrie::Leaf(leaf_bytes) = deserialized { + let (key, _) = leaf_bytes + .try_deserialize_leaf_key::() + .expect("Should have been able to deserialize key"); + assert_eq!(key, *trie.key().unwrap()); + }; + } + + proptest! { + #[test] + fn roundtrip_blake2b_hash(hash in blake2b_hash_arb()) { + bytesrepr::test_serialization_roundtrip(&hash); + } + + #[test] + fn roundtrip_trie_pointer(pointer in trie_pointer_arb()) { + bytesrepr::test_serialization_roundtrip(&pointer); + } + + #[test] + fn roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) { + bytesrepr::test_serialization_roundtrip(&pointer_block); + } + + #[test] + fn bytesrepr_roundtrip_trie_leaf(trie_leaf in trie_leaf_arb()) { + bytesrepr::test_serialization_roundtrip(&trie_leaf); + } + + #[test] + fn bytesrepr_roundtrip_trie_leaf_to_lazy_trie(trie_leaf in trie_leaf_arb()) { + test_trie_roundtrip_to_lazy_trie(&trie_leaf) + } + + #[test] + fn bytesrepr_roundtrip_trie_extension_to_lazy_trie(trie_extension in trie_extension_arb()) { + test_trie_roundtrip_to_lazy_trie(&trie_extension) + } + + #[test] + fn bytesrepr_roundtrip_trie_node_to_lazy_trie(trie_node in trie_node_arb()) { + test_trie_roundtrip_to_lazy_trie(&trie_node); + } + + #[test] + fn bytesrepr_roundtrip_trie_extension(trie_extension in trie_extension_arb()) { + bytesrepr::test_serialization_roundtrip(&trie_extension); + } + + #[test] + fn bytesrepr_roundtrip_trie_node(trie_node in trie_node_arb()) { + bytesrepr::test_serialization_roundtrip(&trie_node); + } + + #[test] + fn roundtrip_key(key in all_keys_arb()) { + bytesrepr::test_serialization_roundtrip(&key); + } + + #[test] + fn serde_roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) { + let json_str = serde_json::to_string(&pointer_block)?; + let deserialized_pointer_block: PointerBlock = serde_json::from_str(&json_str)?; + assert_eq!(pointer_block, deserialized_pointer_block) + } + + #[test] + fn serde_roundtrip_trie_leaf(trie_leaf in trie_leaf_arb()) { + let json_str = serde_json::to_string(&trie_leaf)?; + let deserialized_trie: Trie = serde_json::from_str(&json_str)?; + assert_eq!(trie_leaf, deserialized_trie) + } + + #[test] + fn serde_roundtrip_trie_node(trie_node in trie_node_arb()) { + let json_str = serde_json::to_string(&trie_node)?; + let deserialized_trie: Trie = serde_json::from_str(&json_str)?; + assert_eq!(trie_node, deserialized_trie) + } + + #[test] + fn serde_roundtrip_trie_extension(trie_extension in trie_extension_arb()) { + let json_str = serde_json::to_string(&trie_extension)?; + let deserialized_trie: Trie = serde_json::from_str(&json_str)?; + assert_eq!(trie_extension, deserialized_trie) + } + + #[test] + fn bincode_roundtrip_trie_leaf(trie_leaf in trie_leaf_arb()) { + let bincode_bytes = bincode::serialize(&trie_leaf)?; + let deserialized_trie = bincode::deserialize(&bincode_bytes)?; + assert_eq!(trie_leaf, deserialized_trie) + } + + #[test] + fn bincode_roundtrip_trie_node(trie_node in trie_node_arb()) { + let bincode_bytes = bincode::serialize(&trie_node)?; + let deserialized_trie = bincode::deserialize(&bincode_bytes)?; + assert_eq!(trie_node, deserialized_trie) + } + + #[test] + fn bincode_roundtrip_trie_extension(trie_extension in trie_extension_arb()) { + let bincode_bytes = bincode::serialize(&trie_extension)?; + let deserialized_trie = bincode::deserialize(&bincode_bytes)?; + assert_eq!(trie_extension, deserialized_trie) + } + + #[test] + fn bincode_roundtrip_trie_pointer_block(pointer_block in trie_pointer_block_arb()) { + let bincode_bytes = bincode::serialize(&pointer_block)?; + let deserialized_pointer_block = bincode::deserialize(&bincode_bytes)?; + assert_eq!(pointer_block, deserialized_pointer_block) + } + + #[test] + fn bincode_roundtrip_key(key in all_keys_arb()) { + let bincode_bytes = bincode::serialize(&key)?; + let deserialized_key = bincode::deserialize(&bincode_bytes)?; + prop_assert_eq!(key, deserialized_key) + } + + #[test] + fn serde_roundtrip_key(key in all_keys_arb()) { + let json_str = serde_json::to_string(&key)?; + let deserialized_key = serde_json::from_str(&json_str)?; + assert_eq!(key, deserialized_key) + } + + #[test] + fn iter_children_trie_leaf(trie_leaf in trie_leaf_arb()) { + assert!(trie_leaf.iter_children().next().is_none()); + } + + #[test] + fn iter_children_trie_extension(trie_extension in trie_extension_arb()) { + let children = if let Trie::Extension { pointer, .. } = trie_extension { + vec![*pointer.hash()] + } else { + unreachable!() + }; + assert_eq!(children, trie_extension.iter_children().collect::>()); + } + + #[test] + fn iter_children_trie_node(trie_node in trie_node_arb()) { + let children: Vec = trie_node.as_pointer_block().unwrap() + .as_indexed_pointers() + .map(|(_index, ptr)| *ptr.hash()) + .collect(); + assert_eq!(children, trie_node.iter_children().collect::>()); + } + } +} diff --git a/storage/src/global_state/trie_store/cache/mod.rs b/storage/src/global_state/trie_store/cache/mod.rs new file mode 100644 index 0000000000..312c9b2eda --- /dev/null +++ b/storage/src/global_state/trie_store/cache/mod.rs @@ -0,0 +1,374 @@ +use std::borrow::Cow; + +use casper_types::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + Digest, Pointer, +}; + +use crate::global_state::{ + transaction_source::{Readable, Writable}, + trie::{PointerBlock, Trie, RADIX}, +}; + +use super::{operations::common_prefix, TrieStore}; + +#[derive(Clone, Debug, thiserror::Error, Eq, PartialEq)] +pub enum CacheError { + /// Root not found. + #[error("Root not found: {0:?}")] + RootNotFound(Digest), +} + +// Pointer used by the cache to determine if the node is stored or is loaded in memory. +#[derive(Debug, Clone, PartialEq, Eq)] +enum CachePointer { + InMem(TrieCacheNode), + Stored(Pointer), +} + +impl CachePointer { + /// Loads the node in memory from the specified store if it's not already loaded. + /// Returns an error if the node can't be found in the store. + fn load_from_store(&mut self, txn: &T, store: &S) -> Result<(), E> + where + K: FromBytes, + V: FromBytes, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From + From, + { + if let CachePointer::Stored(pointer) = self { + let Some(stored_node) = store.get(txn, pointer.hash())? else { + return Err(CacheError::RootNotFound(pointer.into_hash()).into()); + }; + let trie_cache_node = stored_node.into(); + *self = CachePointer::InMem(trie_cache_node); + } + Ok(()) + } +} + +/// A node representation used by the cache. This follows the Trie implementation for easy +/// conversion. +#[derive(Debug, Clone, PartialEq, Eq)] +enum TrieCacheNode { + Leaf { + key: K, + value: V, + }, + Branch { + pointer_block: Vec>>, + }, + Extension { + affix: Bytes, + pointer: Box>, + }, +} + +impl From> for TrieCacheNode { + fn from(node: Trie) -> Self { + match node { + Trie::Leaf { key, value } => Self::Leaf { key, value }, + Trie::Node { pointer_block } => { + let mut new_pointer_block = Vec::with_capacity(RADIX); + for i in 0..RADIX { + new_pointer_block.push(pointer_block[i].map(|ptr| CachePointer::Stored(ptr))); + } + Self::Branch { + pointer_block: new_pointer_block, + } + } + Trie::Extension { affix, pointer } => Self::Extension { + affix, + pointer: Box::new(CachePointer::Stored(pointer)), + }, + } + } +} + +// An in-memory cache for Trie nodes that is backed up by a store. +pub struct TrieCache<'a, K, V, S> { + root: TrieCacheNode, + store: &'a S, +} + +impl<'a, K, V, S> TrieCache<'a, K, V, S> +where + K: ToBytes + FromBytes + Clone + Eq, + V: ToBytes + FromBytes + Clone + Eq, + S: TrieStore + 'a, +{ + pub fn new(txn: &T, store: &'a S, root: &Digest) -> Result + where + T: Readable, + S::Error: From, + E: From + From + From, + { + match store.get(txn, root)? { + Some(node) => Ok(Self { + root: node.into(), + store, + }), + None => Err(CacheError::RootNotFound(*root).into()), + } + } + + pub fn insert(&mut self, key: K, value: V, txn: &T) -> Result<(), E> + where + T: Readable, + S::Error: From, + E: From + From + From, + { + let path: Vec = key.to_bytes()?; + + let mut depth: usize = 0; + let mut current = &mut self.root; + + while depth < path.len() { + match current { + TrieCacheNode::Branch { pointer_block } => { + let index: usize = { + assert!(depth < path.len(), "depth must be < {}", path.len()); + path[depth].into() + }; + + let pointer = &mut pointer_block[index]; + if let Some(next) = pointer { + if depth == path.len() - 1 { + let leaf = TrieCacheNode::Leaf { key, value }; + *next = CachePointer::InMem(leaf); + return Ok(()); + } else { + depth += 1; + + next.load_from_store::<_, _, E>(txn, self.store)?; + if let CachePointer::InMem(next) = next { + current = next; + } else { + unreachable!("Stored pointer should have been converted"); + } + } + } else { + let leaf = TrieCacheNode::Leaf { key, value }; + let _ = std::mem::replace(pointer, Some(CachePointer::InMem(leaf))); + return Ok(()); + } + } + TrieCacheNode::Leaf { + key: old_key, + value: old_value, + } => { + if *old_key == key { + *old_value = value; + } else { + let mut pointer_block = Vec::with_capacity(RADIX); + pointer_block.resize_with(RADIX, || None::>); + let old_key_bytes = old_key.to_bytes()?; + + let shared_path = common_prefix(&old_key_bytes, &path); + + let existing_idx = old_key_bytes[shared_path.len()] as usize; + pointer_block[existing_idx] = + Some(CachePointer::InMem(TrieCacheNode::Leaf { + key: old_key.clone(), + value: old_value.clone(), + })); + + let new_idx = path[shared_path.len()] as usize; + pointer_block[new_idx] = + Some(CachePointer::InMem(TrieCacheNode::Leaf { key, value })); + + let new_affix = { &shared_path[depth..] }; + *current = if !new_affix.is_empty() { + TrieCacheNode::Extension { + affix: Bytes::from(new_affix), + pointer: Box::new(CachePointer::InMem(TrieCacheNode::Branch { + pointer_block, + })), + } + } else { + TrieCacheNode::Branch { pointer_block } + }; + } + return Ok(()); + } + TrieCacheNode::Extension { affix, ref pointer } + if path.len() < depth + affix.len() + || affix.as_ref() != &path[depth..depth + affix.len()] => + { + // We might be trying to store a key that is shorter than the keys that are + // already stored. In this case, we would need to split this extension. + // We also need to split this extension if the affix changes. + + // Is there something common between the new key and the old key? + let shared_prefix = common_prefix(affix, &path[depth..]); + + // Need to split the node at the byte that is different. + let mut pointer_block = Vec::with_capacity(RADIX); + pointer_block.resize_with(RADIX, || None::>); + + // Add the new key under a leaf where the paths diverge. + pointer_block[path[depth + shared_prefix.len()] as usize] = + Some(CachePointer::InMem(TrieCacheNode::Leaf { key, value })); + + let post_branch_affix = &affix[shared_prefix.len() + 1..]; + if !post_branch_affix.is_empty() { + let post_extension = TrieCacheNode::Extension { + affix: Bytes::from(post_branch_affix), + pointer: pointer.clone(), + }; + let existing_idx = affix[shared_prefix.len()] as usize; + pointer_block[existing_idx] = Some(CachePointer::InMem(post_extension)); + } else { + let existing_idx = affix[shared_prefix.len()] as usize; + pointer_block[existing_idx] = Some(*pointer.clone()); + } + + let new_branch = TrieCacheNode::Branch { pointer_block }; + let next = if !shared_prefix.is_empty() { + // Create an extension node with the common part + TrieCacheNode::Extension { + affix: Bytes::from(shared_prefix), + pointer: Box::new(CachePointer::InMem(new_branch)), + } + } else { + new_branch + }; + + *current = next; + return Ok(()); + } + TrieCacheNode::Extension { + affix, + ref mut pointer, + } => { + depth += affix.len(); + pointer.load_from_store::<_, _, E>(txn, self.store)?; + if let CachePointer::InMem(next) = pointer.as_mut() { + current = next; + } else { + unreachable!("Stored pointer should have been converted"); + } + } + } + } + Ok(()) + } + + fn traverse_and_store( + node: TrieCacheNode, + txn: &mut T, + store: &S, + ) -> Result + where + T: Readable + Writable, + S::Error: From, + E: From + From + From, + { + match node { + TrieCacheNode::Leaf { key, value } => { + let trie_leaf = Trie::leaf(key, value); + let (hash, trie_bytes) = trie_leaf.trie_hash_and_bytes()?; + store.put_raw(txn, &hash, Cow::from(trie_bytes))?; + Ok(Pointer::LeafPointer(hash)) + } + TrieCacheNode::Branch { mut pointer_block } => { + let mut trie_pointer_block = PointerBlock::new(); + for i in 0..RADIX { + trie_pointer_block[i] = Option::take(&mut pointer_block[i]) + .map(|child| match child { + CachePointer::InMem(in_mem_child) => { + Self::traverse_and_store::<_, E>(in_mem_child, txn, store) + } + CachePointer::Stored(ptr) => Ok(ptr), + }) + .transpose()?; + } + + let trie_node = Trie::::Node { + pointer_block: Box::new(trie_pointer_block), + }; + let (hash, trie_bytes) = trie_node.trie_hash_and_bytes()?; + store.put_raw(txn, &hash, Cow::from(trie_bytes))?; + Ok(Pointer::NodePointer(hash)) + } + TrieCacheNode::Extension { pointer, affix } => { + let pointer = match *pointer { + CachePointer::InMem(in_mem_ptr) => { + Self::traverse_and_store::<_, E>(in_mem_ptr, txn, store) + } + CachePointer::Stored(ptr) => Ok(ptr), + }?; + + let trie_extension = Trie::::extension(affix.to_vec(), pointer); + let (hash, trie_bytes) = trie_extension.trie_hash_and_bytes()?; + store.put_raw(txn, &hash, Cow::from(trie_bytes))?; + Ok(Pointer::NodePointer(hash)) + } + } + } + + pub fn store_cache(self, txn: &mut T) -> Result + where + T: Readable + Writable, + S::Error: From, + E: From + From + From, + { + Self::traverse_and_store::<_, E>(self.root, txn, self.store) + .map(|root_pointer| root_pointer.into_hash()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + impl TrieCache<'_, K, V, S> + where + K: ToBytes + FromBytes + Clone + Eq, + V: ToBytes + FromBytes + Clone + Eq, + S: TrieStore, + { + fn traverse(node: TrieCacheNode) -> Pointer { + match node { + TrieCacheNode::Leaf { key, value } => { + // Process the leaf node + let trie_leaf = Trie::leaf(key, value); + let hash = trie_leaf.trie_hash().unwrap(); + Pointer::LeafPointer(hash) + } + TrieCacheNode::Branch { mut pointer_block } => { + let mut trie_pointer_block = PointerBlock::new(); + for i in 0..RADIX { + trie_pointer_block[i] = Option::take(pointer_block.get_mut(i).unwrap()) + .map(|child| match child { + CachePointer::InMem(in_mem_child) => Self::traverse(in_mem_child), + CachePointer::Stored(ptr) => ptr, + }); + } + + let trie_node = Trie::::Node { + pointer_block: Box::new(trie_pointer_block), + }; + let hash = trie_node.trie_hash().unwrap(); + Pointer::NodePointer(hash) + } + TrieCacheNode::Extension { pointer, affix } => { + let pointer = match *pointer { + CachePointer::InMem(in_mem_ptr) => Self::traverse(in_mem_ptr), + CachePointer::Stored(ptr) => ptr, + }; + + let trie_extension = Trie::::extension(affix.to_vec(), pointer); + let hash = trie_extension.trie_hash().unwrap(); + Pointer::NodePointer(hash) + } + } + } + + pub fn calculate_root_hash(self) -> Digest { + Self::traverse(self.root).into_hash() + } + } +} diff --git a/storage/src/global_state/trie_store/lmdb.rs b/storage/src/global_state/trie_store/lmdb.rs new file mode 100644 index 0000000000..af81879175 --- /dev/null +++ b/storage/src/global_state/trie_store/lmdb.rs @@ -0,0 +1,325 @@ +//! An LMDB-backed trie store. +//! +//! # Usage +//! +//! ``` +//! use casper_storage::global_state::store::Store; +//! use casper_storage::global_state::transaction_source::{Transaction, TransactionSource}; +//! use casper_storage::global_state::transaction_source::lmdb::LmdbEnvironment; +//! use casper_storage::global_state::trie::{PointerBlock, Trie}; +//! use casper_storage::global_state::trie_store::lmdb::LmdbTrieStore; +//! use casper_types::Digest; +//! use casper_types::global_state::Pointer; +//! use casper_types::bytesrepr::{ToBytes, Bytes}; +//! use lmdb::DatabaseFlags; +//! use tempfile::tempdir; +//! +//! // Create some leaves +//! let leaf_1 = Trie::Leaf { key: Bytes::from([0u8, 0, 0].as_slice()), value: Bytes::from(b"val_1".as_slice()) }; +//! let leaf_2 = Trie::Leaf { key: Bytes::from([1u8, 0, 0].as_slice()), value: Bytes::from(b"val_2".as_slice()) }; +//! +//! // Get their hashes +//! let leaf_1_hash = Digest::hash(&leaf_1.to_bytes().unwrap()); +//! let leaf_2_hash = Digest::hash(&leaf_2.to_bytes().unwrap()); +//! +//! // Create a node +//! let node: Trie = { +//! let mut pointer_block = PointerBlock::new(); +//! pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash)); +//! pointer_block[1] = Some(Pointer::LeafPointer(leaf_2_hash)); +//! let pointer_block = Box::new(pointer_block); +//! Trie::Node { pointer_block } +//! }; +//! +//! // Get its hash +//! let node_hash = Digest::hash(&node.to_bytes().unwrap()); +//! +//! // Create the environment and the store. For both the in-memory and +//! // LMDB-backed implementations, the environment is the source of +//! // transactions. +//! let tmp_dir = tempdir().unwrap(); +//! let map_size = 4096 * 2560; // map size should be a multiple of OS page size +//! let max_readers = 512; +//! let env = LmdbEnvironment::new(&tmp_dir.path().to_path_buf(), map_size, max_readers, true).unwrap(); +//! let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); +//! +//! // First let's create a read-write transaction, persist the values, but +//! // forget to commit the transaction. +//! { +//! // Create a read-write transaction +//! let mut txn = env.create_read_write_txn().unwrap(); +//! +//! // Put the values in the store +//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); +//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap(); +//! store.put(&mut txn, &node_hash, &node).unwrap(); +//! +//! // Here we forget to commit the transaction before it goes out of scope +//! } +//! +//! // Now let's check to see if the values were stored +//! { +//! // Create a read transaction +//! let txn = env.create_read_txn().unwrap(); +//! +//! // Observe that nothing has been persisted to the store +//! for hash in [&leaf_1_hash, &leaf_2_hash, &node_hash].iter() { +//! // We need to use a type annotation here to help the compiler choose +//! // a suitable FromBytes instance +//! let maybe_trie: Option> = store.get(&txn, hash).unwrap(); +//! assert!(maybe_trie.is_none()); +//! } +//! +//! // Commit the read transaction. Not strictly necessary, but better to be hygienic. +//! txn.commit().unwrap(); +//! } +//! +//! // Now let's try that again, remembering to commit the transaction this time +//! { +//! // Create a read-write transaction +//! let mut txn = env.create_read_write_txn().unwrap(); +//! +//! // Put the values in the store +//! store.put(&mut txn, &leaf_1_hash, &leaf_1).unwrap(); +//! store.put(&mut txn, &leaf_2_hash, &leaf_2).unwrap(); +//! store.put(&mut txn, &node_hash, &node).unwrap(); +//! +//! // Commit the transaction. +//! txn.commit().unwrap(); +//! } +//! +//! // Now let's check to see if the values were stored again +//! { +//! // Create a read transaction +//! let txn = env.create_read_txn().unwrap(); +//! +//! // Get the values in the store +//! assert_eq!(Some(leaf_1), store.get(&txn, &leaf_1_hash).unwrap()); +//! assert_eq!(Some(leaf_2), store.get(&txn, &leaf_2_hash).unwrap()); +//! assert_eq!(Some(node), store.get(&txn, &node_hash).unwrap()); +//! +//! // Commit the read transaction. +//! txn.commit().unwrap(); +//! } +//! +//! tmp_dir.close().unwrap(); +//! ``` +use std::{ + borrow::Cow, + collections::{hash_map::Entry, HashMap}, + sync::{Arc, Mutex}, +}; + +use lmdb::{Database, DatabaseFlags, Transaction}; + +use casper_types::{ + bytesrepr::{self, Bytes, ToBytes}, + Digest, Key, StoredValue, +}; + +use crate::global_state::{ + error, + state::CommitError, + store::Store, + transaction_source::{lmdb::LmdbEnvironment, Readable, TransactionSource, Writable}, + trie::{LazilyDeserializedTrie, Trie}, + trie_store::{self, TrieStore}, +}; + +/// An LMDB-backed trie store. +/// +/// Wraps [`lmdb::Database`]. +#[derive(Debug, Clone)] +pub struct LmdbTrieStore { + db: Database, +} + +impl LmdbTrieStore { + /// Constructor for new `LmdbTrieStore`. + pub fn new( + env: &LmdbEnvironment, + maybe_name: Option<&str>, + flags: DatabaseFlags, + ) -> Result { + let name = Self::name(maybe_name); + let db = env.env().create_db(Some(&name), flags)?; + Ok(LmdbTrieStore { db }) + } + + /// Constructor for `LmdbTrieStore` which opens an existing lmdb store file. + pub fn open(env: &LmdbEnvironment, maybe_name: Option<&str>) -> Result { + let name = Self::name(maybe_name); + let db = env.env().open_db(Some(&name))?; + Ok(LmdbTrieStore { db }) + } + + fn name(maybe_name: Option<&str>) -> String { + maybe_name + .map(|name| format!("{}-{}", trie_store::NAME, name)) + .unwrap_or_else(|| String::from(trie_store::NAME)) + } + + /// Get a handle to the underlying database. + pub fn get_db(&self) -> Database { + self.db + } +} + +impl Store> for LmdbTrieStore { + type Error = error::Error; + + type Handle = Database; + + fn handle(&self) -> Self::Handle { + self.db + } +} + +impl TrieStore for LmdbTrieStore {} + +/// Cache used by the scratch trie. The keys represent the hash of the trie being cached. The +/// values represent: 1) A boolean, where `false` means the trie was _not_ written and `true` means +/// it was 2) A deserialized trie +pub(crate) type Cache = Arc>>; + +/// Cached version of the trie store. +#[derive(Clone)] +pub(crate) struct ScratchTrieStore { + pub(crate) cache: Cache, + pub(crate) store: Arc, + pub(crate) env: Arc, +} + +impl ScratchTrieStore { + /// Creates a new ScratchTrieStore. + pub fn new(store: Arc, env: Arc) -> Self { + Self { + store, + env, + cache: Default::default(), + } + } + + /// Writes only tries which are both under the given `state_root` and dirty to the underlying + /// db. + pub fn write_root_to_db(self, state_root: Digest) -> Result<(), error::Error> { + let cache = &*self.cache.lock().map_err(|_| error::Error::Poison)?; + if !cache.contains_key(&state_root) { + return Err(CommitError::TrieNotFoundInCache(state_root).into()); + } + + let mut tries_to_write = vec![state_root]; + let mut txn = self.env.create_read_write_txn()?; + + while let Some(trie_hash) = tries_to_write.pop() { + let trie_bytes = if let Some((true, trie_bytes)) = cache.get(&trie_hash) { + trie_bytes + } else { + // We don't have this trie in the scratch store or it's not dirty - do nothing. + continue; + }; + + let lazy_trie: LazilyDeserializedTrie = bytesrepr::deserialize_from_slice(trie_bytes)?; + tries_to_write.extend(lazy_trie.iter_children()); + + Store::>::put_raw( + &*self.store, + &mut txn, + &trie_hash, + Cow::Borrowed(trie_bytes), + )?; + } + + txn.commit()?; + Ok(()) + } +} + +impl Store> for ScratchTrieStore { + type Error = error::Error; + + type Handle = ScratchTrieStore; + + fn handle(&self) -> Self::Handle { + self.clone() + } + + fn get(&self, txn: &T, key: &Digest) -> Result>, Self::Error> + where + T: Readable, + Digest: ToBytes, + Trie: bytesrepr::FromBytes, + Self::Error: From, + { + match self.get_raw(txn, key)? { + None => Ok(None), + Some(value_bytes) => { + let value = bytesrepr::deserialize(value_bytes.into())?; + Ok(Some(value)) + } + } + } + + fn get_raw(&self, txn: &T, key: &Digest) -> Result, Self::Error> + where + T: Readable, + Digest: AsRef<[u8]>, + Self::Error: From, + { + let mut store = self.cache.lock().map_err(|_| error::Error::Poison)?; + + let maybe_trie = store.get(key); + + match maybe_trie { + Some((_, trie_bytes)) => Ok(Some(trie_bytes.clone())), + None => { + let handle = self.handle(); + match txn.read(handle, key.as_ref())? { + Some(trie_bytes) => { + match store.entry(*key) { + Entry::Occupied(_) => {} + Entry::Vacant(v) => { + v.insert((false, trie_bytes.clone())); + } + } + Ok(Some(trie_bytes)) + } + None => Ok(None), + } + } + } + } + + fn put( + &self, + txn: &mut T, + key: &Digest, + value: &Trie, + ) -> Result<(), Self::Error> + where + T: Writable, + Trie: ToBytes, + Self::Error: From, + { + self.put_raw(txn, key, Cow::Owned(value.to_bytes()?)) + } + + fn put_raw( + &self, + _txn: &mut T, + key: &Digest, + value_bytes: Cow<'_, [u8]>, + ) -> Result<(), Self::Error> + where + T: Writable, + Self::Error: From, + { + self.cache + .lock() + .map_err(|_| error::Error::Poison)? + .insert(*key, (true, Bytes::from(value_bytes.into_owned()))); + Ok(()) + } +} + +impl TrieStore for ScratchTrieStore {} diff --git a/storage/src/global_state/trie_store/mod.rs b/storage/src/global_state/trie_store/mod.rs new file mode 100644 index 0000000000..286daa997a --- /dev/null +++ b/storage/src/global_state/trie_store/mod.rs @@ -0,0 +1,23 @@ +//! A store for persisting `Trie` values at their hashes. +//! +//! See the [lmdb](lmdb/index.html#usage) modules for usage examples. +pub mod lmdb; +/// Trie store operational logic. +pub mod operations; + +// An in-mem cache backed up by a store that is used to optimize batch writes. +mod cache; + +pub(crate) use cache::CacheError as TrieStoreCacheError; + +#[cfg(test)] +mod tests; + +use casper_types::Digest; + +use crate::global_state::{store::Store, trie::Trie}; + +const NAME: &str = "TRIE_STORE"; + +/// An entity which persists [`Trie`] values at their hashes. +pub trait TrieStore: Store> {} diff --git a/storage/src/global_state/trie_store/operations/mod.rs b/storage/src/global_state/trie_store/operations/mod.rs new file mode 100644 index 0000000000..d2800842a1 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/mod.rs @@ -0,0 +1,1389 @@ +pub(crate) mod store_wrappers; +#[cfg(test)] +mod tests; + +#[cfg(test)] +use std::collections::HashSet; +use std::{borrow::Cow, cmp, collections::VecDeque, convert::TryInto, mem}; + +use num_traits::FromPrimitive; +use tracing::{error, warn}; + +use casper_types::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + global_state::{Pointer, TrieMerkleProof, TrieMerkleProofStep}, + Digest, +}; + +use crate::global_state::{ + error::Error as GlobalStateError, + store::Store, + transaction_source::{Readable, Writable}, + trie::{LazilyDeserializedTrie, Parents, PointerBlock, Trie, TrieTag, RADIX, USIZE_EXCEEDS_U8}, + trie_store::TrieStore, +}; + +use self::store_wrappers::NonDeserializingStore; + +use super::{cache::TrieCache, TrieStoreCacheError}; + +/// Result of attemptint to read a record from the trie store. +#[allow(clippy::enum_variant_names)] +#[derive(Debug, PartialEq, Eq)] +pub enum ReadResult { + /// Requested item found in trie store. + Found(V), + /// Requested item not found in trie store. + NotFound, + /// Root hash not found in trie store. + RootNotFound, +} + +impl ReadResult { + /// Returns `true` if the result is [`ReadResult::Found`]. + #[cfg(test)] + pub fn is_found(&self) -> bool { + matches!(self, ReadResult::Found(_)) + } +} + +/// Returns a value from the corresponding key at a given root in a given store +pub fn read(txn: &T, store: &S, root: &Digest, key: &K) -> Result, E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug, + V: ToBytes + FromBytes, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let path: Vec = key.to_bytes()?; + + let store = store_wrappers::OnceDeserializingStore::new(store); + + let mut depth: usize = 0; + let mut current: Trie = match store.get(txn, root)? { + Some(root) => root, + None => return Ok(ReadResult::RootNotFound), + }; + + loop { + match current { + Trie::Leaf { + key: leaf_key, + value: leaf_value, + } => { + let result = if *key == leaf_key { + ReadResult::Found(leaf_value) + } else { + // Keys may not match in the case of a compressed path from + // a Node directly to a Leaf + ReadResult::NotFound + }; + return Ok(result); + } + Trie::Node { pointer_block } => { + let index: usize = { + assert!(depth < path.len(), "depth must be < {}", path.len()); + path[depth].into() + }; + let maybe_pointer: Option = { + assert!(index < RADIX, "key length must be < {}", RADIX); + pointer_block[index] + }; + + match maybe_pointer { + Some(pointer) => match store.get(txn, pointer.hash()) { + Ok(Some(next)) => { + depth += 1; + current = next; + } + Ok(None) => { + warn!( + "No trie value at key: {:?} (reading from key: {:?})", + pointer.hash(), + key + ); + return Ok(ReadResult::NotFound); + } + Err(error) => { + return Err(error.into()); + } + }, + None => { + return Ok(ReadResult::NotFound); + } + } + } + Trie::Extension { affix, pointer } => { + let sub_path = &path[depth..depth + affix.len()]; + if sub_path == affix.as_slice() { + match store.get(txn, pointer.hash())? { + Some(next) => { + depth += affix.len(); + current = next; + } + None => { + warn!( + "No trie value at key: {:?} (reading from key: {:?})", + pointer.hash(), + key + ); + return Ok(ReadResult::NotFound); + } + } + } else { + return Ok(ReadResult::NotFound); + } + } + } + } +} + +/// Same as [`read`], except that a [`TrieMerkleProof`] is generated and returned along with the key +/// and the value given the root and store. +pub fn read_with_proof( + txn: &T, + store: &S, + root: &Digest, + key: &K, +) -> Result>, E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug, + V: ToBytes + FromBytes, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let mut proof_steps = VecDeque::new(); + let path: Vec = key.to_bytes()?; + + let mut depth: usize = 0; + let mut current: Trie = match store.get(txn, root)? { + Some(root) => root, + None => return Ok(ReadResult::RootNotFound), + }; + loop { + match current { + Trie::Leaf { + key: leaf_key, + value, + } => { + if *key != leaf_key { + return Ok(ReadResult::NotFound); + } + let key = leaf_key; + return Ok(ReadResult::Found(TrieMerkleProof::new( + key, + value, + proof_steps, + ))); + } + Trie::Node { pointer_block } => { + let hole_index: usize = { + assert!(depth < path.len(), "depth must be < {}", path.len()); + path[depth].into() + }; + let pointer: Pointer = { + assert!(hole_index < RADIX, "key length must be < {}", RADIX); + match pointer_block[hole_index] { + Some(pointer) => pointer, + None => return Ok(ReadResult::NotFound), + } + }; + let indexed_pointers_with_hole = pointer_block + .as_indexed_pointers() + .filter(|(index, _)| *index as usize != hole_index) + .collect(); + let next = match store.get(txn, pointer.hash())? { + Some(next) => next, + None => { + warn!( + "No trie value at key: {:?} (reading from path: {:?})", + pointer.hash(), + path + ); + return Ok(ReadResult::NotFound); + } + }; + depth += 1; + current = next; + let hole_index: u8 = hole_index.try_into().expect(USIZE_EXCEEDS_U8); + proof_steps.push_front(TrieMerkleProofStep::node( + hole_index, + indexed_pointers_with_hole, + )); + } + Trie::Extension { affix, pointer } => { + let sub_path = &path[depth..depth + affix.len()]; + if sub_path != affix.as_slice() { + return Ok(ReadResult::NotFound); + }; + + let next = match store.get(txn, pointer.hash())? { + Some(next) => next, + None => { + warn!( + "No trie value at key: {:?} (reading from path: {:?})", + pointer.hash(), + path + ); + return Ok(ReadResult::NotFound); + } + }; + depth += affix.len(); + current = next; + proof_steps.push_front(TrieMerkleProofStep::extension(affix.into())); + } + } + } +} + +/// Given a serialized trie, find any children that are referenced but not present in the database. +pub fn missing_children( + txn: &T, + store: &S, + trie_raw: &[u8], +) -> Result, E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + std::fmt::Debug, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + // Optimization: Don't deserialize leaves as they have no descendants. + if let Some(TrieTag::Leaf) = trie_raw.first().copied().and_then(TrieTag::from_u8) { + return Ok(vec![]); + } + + // Parse the trie, handling errors gracefully. + let trie = match bytesrepr::deserialize_from_slice(trie_raw) { + Ok(trie) => trie, + Err(err) => { + error!(?err, "unable to parse trie"); + return Err(err.into()); + } + }; + + let is_present = |trie_key| matches!(store.get_raw(txn, &trie_key), Ok(Some(_))); + + Ok(match trie { + // Should be unreachable due to checking the first byte as a shortcut above. + Trie::::Leaf { .. } => { + error!("did not expect to see a trie leaf in `missing_children` after shortcut"); + vec![] + } + // If we hit a pointer block, queue up all of the nodes it points to + Trie::Node { pointer_block } => pointer_block + .as_indexed_pointers() + .map(|(_, pointer)| *pointer.hash()) + .filter(|pointer_hash| !is_present(*pointer_hash)) + .collect(), + // If we hit an extension block, add its pointer to the queue + Trie::Extension { pointer, .. } => { + let trie_key = pointer.into_hash(); + if is_present(trie_key) { + vec![] + } else { + vec![trie_key] + } + } + }) +} + +struct TrieScanRaw { + tip: LazilyDeserializedTrie, + parents: Parents, +} + +impl TrieScanRaw { + fn new(tip: LazilyDeserializedTrie, parents: Parents) -> Self { + TrieScanRaw { tip, parents } + } +} + +/// Returns a [`TrieScanRaw`] from the given key at a given root in a given store. +/// A scan consists of the deepest trie variant found at that key, a.k.a. the +/// "tip", along the with the parents of that variant. Parents are ordered by +/// their depth from the root (shallow to deep). The tip is not parsed. +fn scan_raw( + txn: &T, + store: &NonDeserializingStore, + key_bytes: &[u8], + root_bytes: Bytes, +) -> Result, E> +where + K: ToBytes + FromBytes + Clone, + V: ToBytes + FromBytes + Clone, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let path = key_bytes; + + let mut current = root_bytes; + let mut depth: usize = 0; + let mut acc: Parents = Vec::new(); + + loop { + let maybe_trie_leaf = bytesrepr::deserialize_from_slice(¤t)?; + match maybe_trie_leaf { + leaf_bytes @ LazilyDeserializedTrie::Leaf(_) => { + return Ok(TrieScanRaw::new(leaf_bytes, acc)) + } + LazilyDeserializedTrie::Node { pointer_block } => { + let index = { + assert!(depth < path.len(), "depth must be < {}", path.len()); + path[depth] + }; + let maybe_pointer: Option = { + let index: usize = index.into(); + assert!(index < RADIX, "index must be < {}", RADIX); + pointer_block[index] + }; + let pointer = match maybe_pointer { + Some(pointer) => pointer, + None => { + return Ok(TrieScanRaw::new( + LazilyDeserializedTrie::Node { pointer_block }, + acc, + )); + } + }; + match store.get_raw(txn, pointer.hash())? { + Some(next) => { + current = next; + depth += 1; + acc.push((index, Trie::Node { pointer_block })) + } + None => { + panic!( + "No trie value at key: {:?} (reading from path: {:?})", + pointer.hash(), + path + ); + } + } + } + LazilyDeserializedTrie::Extension { affix, pointer } => { + if path.len() < depth + affix.len() { + // We might be trying to store a key that is shorter than the keys that are + // already stored. In this case, we would need to split this extension. + return Ok(TrieScanRaw::new( + LazilyDeserializedTrie::Extension { affix, pointer }, + acc, + )); + } + let sub_path = &path[depth..depth + affix.len()]; + if sub_path != affix.as_slice() { + return Ok(TrieScanRaw::new( + LazilyDeserializedTrie::Extension { affix, pointer }, + acc, + )); + } + match store.get_raw(txn, pointer.hash())? { + Some(next) => { + let index = { + assert!(depth < path.len(), "depth must be < {}", path.len()); + path[depth] + }; + current = next; + depth += affix.len(); + acc.push((index, Trie::extension(affix.into(), pointer))) + } + None => { + panic!( + "No trie value at key: {:?} (reading from path: {:?})", + pointer.hash(), + path + ); + } + } + } + } + } +} + +/// Result of attempting to prune an item from the trie store. +#[derive(Debug, PartialEq, Eq)] +pub enum TriePruneResult { + /// Successfully pruned item from trie store. + Pruned(Digest), + /// Requested key not found in trie store. + MissingKey, + /// Root hash not found in trie store. + RootNotFound, + /// Prune failure. + Failure(GlobalStateError), +} + +/// Delete provided key from a global state so it is not reachable from a resulting state root hash. +pub(crate) fn prune( + txn: &mut T, + store: &S, + root: &Digest, + keys_to_prune: &K, +) -> Result +where + K: ToBytes + FromBytes + Clone + PartialEq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone, + T: Readable + Writable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let store = store_wrappers::NonDeserializingStore::new(store); + let root_trie_bytes = match store.get_raw(txn, root)? { + None => return Ok(TriePruneResult::RootNotFound), + Some(root_trie) => root_trie, + }; + + let key_bytes = keys_to_prune.to_bytes()?; + let TrieScanRaw { tip, mut parents } = + scan_raw::<_, _, _, _, E>(txn, &store, &key_bytes, root_trie_bytes)?; + + // Check that tip is a leaf + match tip { + LazilyDeserializedTrie::Leaf(leaf_bytes) + if { + // Partially deserialize a key of a leaf node to ensure that we can only continue if + // the key matches what we're looking for. + // _rem contains bytes of serialized V, but we don't need to inspect it. + let (key, _rem) = leaf_bytes.try_deserialize_leaf_key::()?; + key == *keys_to_prune + } => {} + _ => return Ok(TriePruneResult::MissingKey), + } + + let mut new_elements: Vec<(Digest, Trie)> = Vec::new(); + + while let Some((idx, parent)) = parents.pop() { + match (new_elements.last_mut(), parent) { + (_, Trie::Leaf { .. }) => panic!("Should not find leaf"), + (None, Trie::Extension { .. }) => panic!("Extension node should never end in leaf"), + (Some((_, Trie::Leaf { .. })), _) => panic!("New elements should never contain a leaf"), + // The parent is the node which pointed to the leaf we deleted, and that leaf had + // multiple siblings. + (None, Trie::Node { mut pointer_block }) if pointer_block.child_count() > 2 => { + let trie_node: Trie = { + pointer_block[idx as usize] = None; + Trie::Node { pointer_block } + }; + let trie_key = trie_node.trie_hash()?; + new_elements.push((trie_key, trie_node)) + } + // The parent is the node which pointed to the leaf we deleted, and that leaf had one or + // zero siblings. + (None, Trie::Node { mut pointer_block }) => { + let (sibling_idx, sibling_pointer) = match pointer_block + .as_indexed_pointers() + .find(|(jdx, _)| idx != *jdx) + { + // There are zero siblings. Elsewhere we maintain the invariant that only the + // root node can contain a single leaf. Therefore the parent is the root node. + // The resulting output is just the empty node and nothing else. + None => { + let trie_node = Trie::Node { + pointer_block: Box::new(PointerBlock::new()), + }; + let trie_key = trie_node.trie_hash()?; + new_elements.push((trie_key, trie_node)); + break; + } + Some((sibling_idx, pointer)) => (sibling_idx, pointer), + }; + // There is one sibling. + match (sibling_pointer, parents.pop()) { + (_, Some((_, Trie::Leaf { .. }))) => panic!("Should not have leaf in scan"), + // There is no grandparent. Therefore the parent is the root node. Output the + // root node with the index zeroed out. + (_, None) => { + pointer_block[idx as usize] = None; + let trie_node = Trie::Node { pointer_block }; + let trie_key = trie_node.trie_hash()?; + new_elements.push((trie_key, trie_node)); + break; + } + // The sibling is a leaf and the grandparent is a node. Reseat the single leaf + // sibling into the grandparent. + (Pointer::LeafPointer(..), Some((idx, Trie::Node { mut pointer_block }))) => { + pointer_block[idx as usize] = Some(sibling_pointer); + let trie_node = Trie::Node { pointer_block }; + let trie_key = trie_node.trie_hash()?; + new_elements.push((trie_key, trie_node)) + } + // The sibling is a leaf and the grandparent is an extension. + (Pointer::LeafPointer(..), Some((_, Trie::Extension { .. }))) => { + match parents.pop() { + None => panic!("Root node cannot be an extension node"), + Some((_, Trie::Leaf { .. })) => panic!("Should not find leaf"), + Some((_, Trie::Extension { .. })) => { + panic!("Extension cannot extend to an extension") + } + // The great-grandparent is a node. Reseat the single leaf sibling into + // the position the grandparent was in. + Some((idx, Trie::Node { mut pointer_block })) => { + pointer_block[idx as usize] = Some(sibling_pointer); + let trie_node = Trie::Node { pointer_block }; + let trie_key = trie_node.trie_hash()?; + new_elements.push((trie_key, trie_node)) + } + } + } + // The single sibling is a node or an extension, and a grandparent exists. + // Therefore the parent is not the root + (Pointer::NodePointer(sibling_trie_key), Some((idx, grandparent))) => { + // Push the grandparent back onto the parents so it may be processed later. + parents.push((idx, grandparent)); + // Elsewhere we maintain the invariant that all trie keys have corresponding + // trie values. + let sibling_trie = store + .get(txn, &sibling_trie_key)? + .expect("should have sibling"); + match sibling_trie { + Trie::Leaf { .. } => { + panic!("Node pointer should not point to leaf") + } + // The single sibling is a node, and there exists a grandparent. + // Therefore the parent is not the root. We output an extension to + // replace the parent, with a single byte corresponding to the sibling + // index. In the next loop iteration, we will handle the case where + // this extension might need to be combined with a grandparent + // extension. + Trie::Node { .. } => { + let new_extension: Trie = + Trie::extension(vec![sibling_idx], sibling_pointer); + let trie_key = new_extension.trie_hash()?; + new_elements.push((trie_key, new_extension)) + } + // The single sibling is an extension. We output an extension to + // replace the parent, prepending the + // sibling index to the sibling's affix. In + // the next loop iteration, we will handle the case where this extension + // might need to be combined with a grandparent extension. + Trie::Extension { + affix: extension_affix, + pointer, + } => { + let mut new_affix = vec![sibling_idx]; + new_affix.extend(Vec::::from(extension_affix)); + let new_extension: Trie = Trie::extension(new_affix, pointer); + let trie_key = new_extension.trie_hash()?; + new_elements.push((trie_key, new_extension)) + } + } + } + } + } + // The parent is a pointer block, and we are propagating a node or extension upwards. + // It is impossible to propagate a leaf upwards. Reseat the thing we are propagating + // into the parent. + (Some((trie_key, _)), Trie::Node { mut pointer_block }) => { + let trie_node: Trie = { + pointer_block[idx as usize] = Some(Pointer::NodePointer(*trie_key)); + Trie::Node { pointer_block } + }; + let trie_key = trie_node.trie_hash()?; + new_elements.push((trie_key, trie_node)) + } + // The parent is an extension, and we are outputting an extension. Prepend the parent + // affix to affix of the output extension, mutating the output in place. This is the + // only mutate-in-place. + ( + Some(( + trie_key, + Trie::Extension { + affix: child_affix, + pointer, + }, + )), + Trie::Extension { affix, .. }, + ) => { + let mut new_affix: Vec = affix.into(); + new_affix.extend_from_slice(child_affix.as_slice()); + *child_affix = new_affix.into(); + *trie_key = { + let new_extension: Trie = + Trie::extension(child_affix.to_owned().into(), pointer.to_owned()); + new_extension.trie_hash()? + } + } + // The parent is an extension and the new element is a pointer block. The next element + // we add will be an extension to the pointer block we are going to add. + (Some((trie_key, Trie::Node { .. })), Trie::Extension { affix, .. }) => { + let pointer = Pointer::NodePointer(*trie_key); + let trie_extension = Trie::Extension { affix, pointer }; + let trie_key = trie_extension.trie_hash()?; + new_elements.push((trie_key, trie_extension)) + } + } + } + for (hash, element) in new_elements.iter() { + store.put(txn, hash, element)?; + } + // The hash of the final trie in the new elements is the new root + let new_root = new_elements + .pop() + .map(|(hash, _)| hash) + .unwrap_or_else(|| root.to_owned()); + + Ok(TriePruneResult::Pruned(new_root)) +} + +#[allow(clippy::type_complexity)] +fn rehash( + mut tip: Trie, + parents: Parents, +) -> Result)>, bytesrepr::Error> +where + K: ToBytes + Clone, + V: ToBytes + Clone, +{ + let mut ret: Vec<(Digest, Trie)> = Vec::new(); + let mut tip_hash = tip.trie_hash()?; + ret.push((tip_hash, tip.to_owned())); + + for (index, parent) in parents.into_iter().rev() { + match parent { + Trie::Leaf { .. } => { + panic!("parents should not contain any leaves"); + } + Trie::Node { mut pointer_block } => { + tip = { + let pointer = match tip { + Trie::Leaf { .. } => Pointer::LeafPointer(tip_hash), + Trie::Node { .. } => Pointer::NodePointer(tip_hash), + Trie::Extension { .. } => Pointer::NodePointer(tip_hash), + }; + pointer_block[index.into()] = Some(pointer); + Trie::Node { pointer_block } + }; + tip_hash = tip.trie_hash()?; + ret.push((tip_hash, tip.to_owned())) + } + Trie::Extension { affix, pointer } => { + tip = { + let pointer = pointer.update(tip_hash); + Trie::Extension { affix, pointer } + }; + tip_hash = tip.trie_hash()?; + ret.push((tip_hash, tip.to_owned())) + } + } + } + Ok(ret) +} + +pub(super) fn common_prefix(ls: &[A], rs: &[A]) -> Vec { + ls.iter() + .zip(rs.iter()) + .take_while(|(l, r)| l == r) + .map(|(l, _)| l.to_owned()) + .collect() +} + +fn get_parents_path(parents: &[(u8, Trie)]) -> Vec { + let mut ret = Vec::new(); + for (index, element) in parents.iter() { + if let Trie::Extension { affix, .. } = element { + ret.extend(affix); + } else { + ret.push(index.to_owned()); + } + } + ret +} + +/// Takes a path to a leaf, that leaf's parent node, and the parents of that +/// node, and adds the node to the parents. +/// +/// This function will panic if the path to the leaf and the path to its +/// parent node do not share a common prefix. +fn add_node_to_parents( + path_to_leaf: &[u8], + new_parent_node: Trie, + mut parents: Parents, +) -> Parents +where + K: ToBytes, + V: ToBytes, +{ + // TODO: add is_node() method to Trie + match new_parent_node { + Trie::Node { .. } => (), + _ => panic!("new_parent must be a node"), + } + // The current depth will be the length of the path to the new parent node. + let depth: usize = { + // Get the path to this node + let path_to_node: Vec = get_parents_path(&parents); + // Check that the path to the node is a prefix of the current path + let current_path = common_prefix(path_to_leaf, &path_to_node); + assert_eq!(current_path, path_to_node); + // Get the length + path_to_node.len() + }; + // Index path by current depth; + let index = { + assert!( + depth < path_to_leaf.len(), + "depth must be < {}", + path_to_leaf.len() + ); + path_to_leaf[depth] + }; + // Add node to parents, along with index to modify + parents.push((index, new_parent_node)); + parents +} + +/// Takes paths to a new leaf and an existing leaf that share a common prefix, +/// along with the parents of the existing leaf. Creates a new node (adding a +/// possible parent extension for it to parents) which contains the existing +/// leaf. Returns the new node and parents, so that they can be used by +/// [`add_node_to_parents`]. +#[allow(clippy::type_complexity)] +fn reparent_leaf( + new_leaf_path: &[u8], + existing_leaf_path: &[u8], + parents: Parents, +) -> Result<(Trie, Parents), bytesrepr::Error> +where + K: ToBytes, + V: ToBytes, +{ + let mut parents = parents; + let (child_index, parent) = parents.pop().expect("parents should not be empty"); + let pointer_block = match parent { + Trie::Node { pointer_block } => pointer_block, + _ => panic!("A leaf should have a node for its parent"), + }; + // Get the path that the new leaf and existing leaf share + let shared_path = common_prefix(new_leaf_path, existing_leaf_path); + // Assemble a new node to hold the existing leaf. The new leaf will + // be added later during the add_parent_node and rehash phase. + let new_node = { + let index = existing_leaf_path[shared_path.len()]; + let existing_leaf_pointer = + pointer_block[::from(child_index)].expect("parent has lost the existing leaf"); + Trie::node(&[(index, existing_leaf_pointer)]) + }; + // Re-add the parent node to parents + parents.push((child_index, Trie::Node { pointer_block })); + // Create an affix for a possible extension node + let affix = { + let parents_path = get_parents_path(&parents); + &shared_path[parents_path.len()..] + }; + // If the affix is non-empty, create an extension node and add it + // to parents. + if !affix.is_empty() { + let new_node_hash = new_node.trie_hash()?; + let new_extension = Trie::extension(affix.to_vec(), Pointer::NodePointer(new_node_hash)); + parents.push((child_index, new_extension)); + } + Ok((new_node, parents)) +} + +struct SplitResult { + new_node: Trie, + parents: Parents, + maybe_hashed_child_extension: Option<(Digest, Trie)>, +} + +/// Takes a path to a new leaf, an existing extension that leaf collides with, +/// and the parents of that extension. Creates a new node and possible parent +/// and child extensions. The node pointer contained in the existing extension +/// is repositioned in the new node or the possible child extension. The +/// possible parent extension is added to parents. Returns the new node, +/// parents, and the possible child extension (paired with its hash). +/// The new node and parents can be used by [`add_node_to_parents`], and the +/// new hashed child extension can be added to the list of new trie elements. +fn split_extension( + new_leaf_path: &[u8], + existing_extension: Trie, + mut parents: Parents, +) -> Result, bytesrepr::Error> +where + K: ToBytes + Clone, + V: ToBytes + Clone, +{ + // TODO: add is_extension() method to Trie + let (affix, pointer) = match existing_extension { + Trie::Extension { affix, pointer } => (affix, pointer), + _ => panic!("existing_extension must be an extension"), + }; + let parents_path = get_parents_path(&parents); + // Get the path to the existing extension node + let existing_extension_path: Vec = + parents_path.iter().chain(affix.iter()).cloned().collect(); + // Get the path that the new leaf and existing leaf share + let shared_path = common_prefix(new_leaf_path, &existing_extension_path); + // Create an affix for a possible parent extension above the new + // node. + let parent_extension_affix = shared_path[parents_path.len()..].to_vec(); + // Create an affix for a possible child extension between the new + // node and the node that the existing extension pointed to. + let child_extension_affix = affix[parent_extension_affix.len() + 1..].to_vec(); + // Create a child extension (paired with its hash) if necessary + let maybe_hashed_child_extension: Option<(Digest, Trie)> = + if child_extension_affix.is_empty() { + None + } else { + let child_extension = Trie::extension(child_extension_affix.to_vec(), pointer); + let child_extension_hash = child_extension.trie_hash()?; + Some((child_extension_hash, child_extension)) + }; + // Assemble a new node. + let new_node: Trie = { + let index = existing_extension_path[shared_path.len()]; + let pointer = maybe_hashed_child_extension + .to_owned() + .map_or(pointer, |(hash, _)| Pointer::NodePointer(hash)); + Trie::node(&[(index, pointer)]) + }; + // Create a parent extension if necessary + if !parent_extension_affix.is_empty() { + let new_node_hash = new_node.trie_hash()?; + let parent_extension = Trie::extension( + parent_extension_affix.to_vec(), + Pointer::NodePointer(new_node_hash), + ); + parents.push((parent_extension_affix[0], parent_extension)); + } + Ok(SplitResult { + new_node, + parents, + maybe_hashed_child_extension, + }) +} + +/// Result of attemptint to write to trie store. +#[derive(Debug, PartialEq, Eq)] +pub enum WriteResult { + /// Record written to trie store. + Written(Digest), + /// Record already exists in trie store. + AlreadyExists, + /// Requested global state root hash does not exist in trie store. + RootNotFound, +} + +/// Write to trie store. +pub fn write( + txn: &mut T, + store: &S, + root: &Digest, + key: &K, + value: &V, +) -> Result +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq, + T: Readable + Writable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let store = store_wrappers::NonDeserializingStore::new(store); + match store.get_raw(txn, root)? { + None => Ok(WriteResult::RootNotFound), + Some(current_root_bytes) => { + let new_leaf = Trie::Leaf { + key: key.to_owned(), + value: value.to_owned(), + }; + let path: Vec = key.to_bytes()?; + let TrieScanRaw { tip, parents } = + scan_raw::(txn, &store, &path, current_root_bytes)?; + let new_elements: Vec<(Digest, Trie)> = match tip { + LazilyDeserializedTrie::Leaf(leaf_bytes) => { + let (existing_leaf_key, existing_value_bytes) = + leaf_bytes.try_deserialize_leaf_key()?; + + if key != &existing_leaf_key { + // If the "tip" is an existing leaf with a different key than + // the new leaf, then we are in a situation where the new leaf + // shares some common prefix with the existing leaf. + let existing_leaf_path = existing_leaf_key.to_bytes()?; + let (new_node, parents) = + reparent_leaf(&path, &existing_leaf_path, parents)?; + let parents = add_node_to_parents(&path, new_node, parents); + rehash(new_leaf, parents)? + } else { + let new_value_bytes = value.to_bytes()?; + if new_value_bytes != existing_value_bytes { + // If the "tip" is an existing leaf with the same key as the + // new leaf, but the existing leaf and new leaf have different + // values, then we are in the situation where we are "updating" + // an existing leaf. + rehash(new_leaf, parents)? + } else { + // Both key and values are the same. + // If the "tip" is the same as the new leaf, then the leaf + // is already in the Trie. + Vec::new() + } + } + } + // If the "tip" is an existing node, then we can add a pointer + // to the new leaf to the node's pointer block. + node @ LazilyDeserializedTrie::Node { .. } => { + let parents = add_node_to_parents(&path, node.try_into()?, parents); + rehash(new_leaf, parents)? + } + // If the "tip" is an extension node, then we must modify or + // replace it, adding a node where necessary. + extension @ LazilyDeserializedTrie::Extension { .. } => { + let SplitResult { + new_node, + parents, + maybe_hashed_child_extension, + } = split_extension(&path, extension.try_into()?, parents)?; + let parents = add_node_to_parents(&path, new_node, parents); + if let Some(hashed_extension) = maybe_hashed_child_extension { + let mut ret = vec![hashed_extension]; + ret.extend(rehash(new_leaf, parents)?); + ret + } else { + rehash(new_leaf, parents)? + } + } + }; + if new_elements.is_empty() { + return Ok(WriteResult::AlreadyExists); + } + let mut root_hash = root.to_owned(); + for (hash, element) in new_elements.iter() { + store.put(txn, hash, element)?; + root_hash = *hash; + } + Ok(WriteResult::Written(root_hash)) + } + } +} + +/// Batch write to trie store. +pub fn batch_write( + txn: &mut T, + store: &S, + root: &Digest, + values: I, +) -> Result +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq, + I: Iterator, + T: Readable + Writable, + S: TrieStore, + S::Error: From, + E: From + From + From, +{ + let mut cache = TrieCache::::new::<_, E>(txn, store, root)?; + + for (key, value) in values { + cache.insert::<_, E>(key, value, txn)?; + } + cache.store_cache::<_, E>(txn) +} + +/// Puts a trie pointer block, extension node or leaf into the trie. +pub fn put_trie(txn: &mut T, store: &S, trie_bytes: &[u8]) -> Result +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq, + T: Readable + Writable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let trie_hash = Digest::hash_into_chunks_if_necessary(trie_bytes); + store.put_raw(txn, &trie_hash, Cow::from(trie_bytes))?; + Ok(trie_hash) +} + +enum KeysIteratorState> { + /// Iterate normally + Ok, + /// Return the error and stop iterating + #[allow(dead_code)] // Return variant alone is used in testing. + ReturnError(S::Error), + /// Already failed, only return None + Failed, +} + +struct VisitedTrieNode { + trie: LazilyDeserializedTrie, + maybe_index: Option, + path: Vec, +} + +/// Iterator for trie store keys. +pub struct KeysIterator<'a, 'b, K, V, T, S: TrieStore> { + initial_descend: VecDeque, + visited: Vec, + store: NonDeserializingStore<'a, K, V, S>, + txn: &'b T, + state: KeysIteratorState, +} + +impl Iterator for KeysIterator<'_, '_, K, V, T, S> +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + T: Readable, + S: TrieStore, + S::Error: From + From, +{ + type Item = Result; + + fn next(&mut self) -> Option { + match mem::replace(&mut self.state, KeysIteratorState::Ok) { + KeysIteratorState::Ok => (), + KeysIteratorState::ReturnError(e) => { + self.state = KeysIteratorState::Failed; + return Some(Err(e)); + } + KeysIteratorState::Failed => { + return None; + } + } + while let Some(VisitedTrieNode { + trie, + maybe_index, + mut path, + }) = self.visited.pop() + { + let mut maybe_next_trie: Option = None; + + match trie { + LazilyDeserializedTrie::Leaf(leaf_bytes) => { + let leaf_bytes = leaf_bytes.bytes(); + if leaf_bytes.is_empty() { + self.state = KeysIteratorState::Failed; + return Some(Err(bytesrepr::Error::Formatting.into())); + } + + let key_bytes = &leaf_bytes[1..]; // Skip `Trie::Leaf` tag + debug_assert!( + key_bytes.starts_with(&path), + "Expected key bytes to start with the current path" + ); + + // only return the leaf if it matches the initial descend path + path.extend(&self.initial_descend); + if key_bytes.starts_with(&path) { + // Only deserializes K when we're absolutely sure the path matches. + let (key, _stored_value): (K, _) = match K::from_bytes(key_bytes) { + Ok(key) => key, + Err(error) => { + self.state = KeysIteratorState::Failed; + return Some(Err(error.into())); + } + }; + return Some(Ok(key)); + } + } + LazilyDeserializedTrie::Node { ref pointer_block } => { + // if we are still initially descending (and initial_descend is not empty), take + // the first index we should descend to, otherwise take maybe_index from the + // visited stack + let mut index: usize = self + .initial_descend + .front() + .map(|i| *i as usize) + .or(maybe_index) + .unwrap_or_default(); + while index < RADIX { + if let Some(ref pointer) = pointer_block[index] { + maybe_next_trie = { + match self.store.get_raw(self.txn, pointer.hash()) { + Ok(Some(trie_bytes)) => { + match bytesrepr::deserialize_from_slice(&trie_bytes) { + Ok(lazy_trie) => Some(lazy_trie), + Err(error) => { + self.state = KeysIteratorState::Failed; + return Some(Err(error.into())); + } + } + } + Ok(None) => None, + Err(error) => { + self.state = KeysIteratorState::Failed; + return Some(Err(error)); + } + } + }; + debug_assert!( + maybe_next_trie.is_some(), + "Trie at the pointer is expected to exist" + ); + if self.initial_descend.pop_front().is_none() { + self.visited.push(VisitedTrieNode { + trie, + maybe_index: Some(index + 1), + path: path.clone(), + }); + } + path.push(index as u8); + break; + } + // only continue the loop if we are not initially descending; + // if we are descending and we land here, it means that there is no subtrie + // along the descend path and we will return no results + if !self.initial_descend.is_empty() { + break; + } + index += 1; + } + } + LazilyDeserializedTrie::Extension { affix, pointer } => { + let descend_len = cmp::min(self.initial_descend.len(), affix.len()); + let check_prefix = self + .initial_descend + .drain(..descend_len) + .collect::>(); + // if we are initially descending, we only want to continue if the affix + // matches the descend path + // if we are not, the check_prefix will be empty, so we will enter the if + // anyway + if affix.starts_with(&check_prefix) { + maybe_next_trie = match self.store.get_raw(self.txn, pointer.hash()) { + Ok(Some(trie_bytes)) => { + match bytesrepr::deserialize_from_slice(&trie_bytes) { + Ok(lazy_trie) => Some(lazy_trie), + Err(error) => { + self.state = KeysIteratorState::Failed; + return Some(Err(error.into())); + } + } + } + Ok(None) => None, + Err(e) => { + self.state = KeysIteratorState::Failed; + return Some(Err(e)); + } + }; + debug_assert!( + matches!(&maybe_next_trie, Some(LazilyDeserializedTrie::Node { .. }),), + "Expected a LazilyDeserializedTrie::Node but received {:?}", + maybe_next_trie + ); + path.extend(affix); + } + } + } + + if let Some(next_trie) = maybe_next_trie { + self.visited.push(VisitedTrieNode { + trie: next_trie, + maybe_index: None, + path, + }); + } + } + None + } +} + +/// Returns the iterator over the keys in the subtrie matching `prefix`. +/// +/// The root should be the apex of the trie. +pub fn keys_with_prefix<'a, 'b, K, V, T, S>( + txn: &'b T, + store: &'a S, + root: &Digest, + prefix: &[u8], +) -> KeysIterator<'a, 'b, K, V, T, S> +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + T: Readable, + S: TrieStore, + S::Error: From, +{ + let store = store_wrappers::NonDeserializingStore::new(store); + let (visited, init_state): (Vec, _) = match store.get_raw(txn, root) { + Ok(None) => (vec![], KeysIteratorState::Ok), + Err(e) => (vec![], KeysIteratorState::ReturnError(e)), + Ok(Some(current_root_bytes)) => match bytesrepr::deserialize_from_slice(current_root_bytes) + { + Ok(lazy_trie) => { + let visited = vec![VisitedTrieNode { + trie: lazy_trie, + maybe_index: None, + path: vec![], + }]; + let init_state = KeysIteratorState::Ok; + + (visited, init_state) + } + Err(error) => (vec![], KeysIteratorState::ReturnError(error.into())), + }, + }; + + KeysIterator { + initial_descend: prefix.iter().cloned().collect(), + visited, + store, + txn, + state: init_state, + } +} + +/// Returns the iterator over the keys at a given root hash. +/// +/// The root should be the apex of the trie. +#[cfg(test)] +pub fn keys<'a, 'b, K, V, T, S>( + txn: &'b T, + store: &'a S, + root: &Digest, +) -> KeysIterator<'a, 'b, K, V, T, S> +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + T: Readable, + S: TrieStore, + S::Error: From, +{ + keys_with_prefix(txn, store, root, &[]) +} + +/// Checks the integrity of the trie store. +#[cfg(test)] +pub fn check_integrity( + txn: &T, + store: &S, + trie_keys_to_visit: Vec, +) -> Result<(), E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + std::fmt::Debug, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + for state_root in &trie_keys_to_visit { + match store.get(txn, state_root)? { + Some(Trie::Node { .. }) => {} + other => panic!( + "Should have a pointer block node as state root but received {:?} instead", + other + ), + } + } + let mut trie_keys_to_visit: Vec<(Vec, Digest)> = trie_keys_to_visit + .iter() + .map(|blake2b_hash| (Vec::new(), *blake2b_hash)) + .collect(); + let mut visited = HashSet::new(); + while let Some((mut path, trie_key)) = trie_keys_to_visit.pop() { + if !visited.insert(trie_key) { + continue; + } + let maybe_retrieved_trie: Option> = store.get(txn, &trie_key)?; + if let Some(trie_value) = &maybe_retrieved_trie { + let hash_of_trie_value = { + let node_bytes = trie_value.to_bytes()?; + Digest::hash(&node_bytes) + }; + if trie_key != hash_of_trie_value { + panic!( + "Trie key {:?} has corrupted value {:?} (hash of value is {:?})", + trie_key, trie_value, hash_of_trie_value + ); + } + } + match maybe_retrieved_trie { + // If we can't find the trie_key; it is missing and we'll return it + None => { + panic!("Missing trie key: {:?}", trie_key) + } + // If we could retrieve the node and it is a leaf, the search can move on + Some(Trie::Leaf { key, .. }) => { + let key_bytes = key.to_bytes()?; + if !key_bytes.starts_with(&path) { + panic!( + "Trie key {:?} belongs to a leaf with a corrupted affix. Key bytes: {:?}, Path: {:?}.", + trie_key, key_bytes, path + ); + } + } + // If we hit a pointer block, queue up all of the nodes it points to + Some(Trie::Node { pointer_block }) => { + for (byte, pointer) in pointer_block.as_indexed_pointers() { + let mut new_path = path.clone(); + new_path.push(byte); + match pointer { + Pointer::LeafPointer(descendant_leaf_trie_key) => { + trie_keys_to_visit.push((new_path, descendant_leaf_trie_key)) + } + Pointer::NodePointer(descendant_node_trie_key) => { + trie_keys_to_visit.push((new_path, descendant_node_trie_key)) + } + } + } + } + // If we hit an extension block, add its pointer to the queue + Some(Trie::Extension { pointer, affix }) => { + path.extend_from_slice(affix.as_slice()); + trie_keys_to_visit.push((path, pointer.into_hash())) + } + } + } + Ok(()) +} + +/// Recomputes a state root hash from a [`TrieMerkleProof`]. +/// This is done in the following steps: +/// +/// 1. Using [`TrieMerkleProof::key`] and [`TrieMerkleProof::value`], construct a [`Trie::Leaf`] and +/// compute a hash for that leaf. +/// +/// 2. We then iterate over [`TrieMerkleProof::proof_steps`] left to right, using the hash from the +/// previous step combined with the next step to compute a new hash. +/// +/// 3. When there are no more steps, we return the final hash we have computed. +/// +/// The steps in this function reflect `operations::rehash`. +pub fn compute_state_hash(proof: &TrieMerkleProof) -> Result +where + K: ToBytes + Copy + Clone, + V: ToBytes + Clone, +{ + let mut hash = { + let leaf = Trie::leaf(proof.key(), proof.value().to_owned()); + leaf.trie_hash()? + }; + + for (proof_step_index, proof_step) in proof.proof_steps().iter().enumerate() { + let pointer = if proof_step_index == 0 { + Pointer::LeafPointer(hash) + } else { + Pointer::NodePointer(hash) + }; + let proof_step_bytes = match proof_step { + TrieMerkleProofStep::Node { + hole_index, + indexed_pointers_with_hole, + } => { + let hole_index = *hole_index; + assert!(hole_index as usize <= RADIX, "hole_index exceeded RADIX"); + let mut indexed_pointers = indexed_pointers_with_hole.to_owned(); + indexed_pointers.push((hole_index, pointer)); + Trie::::node(&indexed_pointers).to_bytes()? + } + TrieMerkleProofStep::Extension { affix } => { + Trie::::extension(affix.clone().into(), pointer).to_bytes()? + } + }; + hash = Digest::hash(&proof_step_bytes); + } + Ok(hash) +} diff --git a/storage/src/global_state/trie_store/operations/store_wrappers.rs b/storage/src/global_state/trie_store/operations/store_wrappers.rs new file mode 100644 index 0000000000..8be4afb302 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/store_wrappers.rs @@ -0,0 +1,242 @@ +use std::marker::PhantomData; +#[cfg(debug_assertions)] +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +use crate::global_state::{ + store::Store, + transaction_source::{Readable, Writable}, + trie::Trie, + trie_store::TrieStore, +}; + +/// A [`TrieStore`] wrapper that panics in debug mode whenever an attempt to deserialize [`V`] is +/// made, otherwise it behaves as a [`TrieStore`]. +/// +/// To ensure this wrapper has zero overhead, a debug assertion is used. +pub(crate) struct NonDeserializingStore<'a, K, V, S>(&'a S, PhantomData<*const (K, V)>) +where + S: TrieStore; + +impl<'a, K, V, S> NonDeserializingStore<'a, K, V, S> +where + S: TrieStore, +{ + pub(crate) fn new(store: &'a S) -> Self { + Self(store, PhantomData) + } +} + +impl Store> for NonDeserializingStore<'_, K, V, S> +where + S: TrieStore, +{ + type Error = S::Error; + + type Handle = S::Handle; + + #[inline] + fn handle(&self) -> Self::Handle { + self.0.handle() + } + + #[inline] + fn deserialize_value(&self, bytes: &[u8]) -> Result, bytesrepr::Error> + where + Trie: FromBytes, + { + #[cfg(debug_assertions)] + { + let trie: Trie = self.0.deserialize_value(bytes)?; + if let Trie::Leaf { .. } = trie { + panic!("Tried to deserialize a value but expected no deserialization to happen.") + } + Ok(trie) + } + #[cfg(not(debug_assertions))] + { + self.0.deserialize_value(bytes) + } + } + + #[inline] + fn serialize_value(&self, value: &Trie) -> Result, bytesrepr::Error> + where + Trie: ToBytes, + { + self.0.serialize_value(value) + } + + #[inline] + fn get(&self, txn: &T, key: &Digest) -> Result>, Self::Error> + where + T: Readable, + Digest: AsRef<[u8]>, + Trie: FromBytes, + Self::Error: From, + { + self.0.get(txn, key) + } + + #[inline] + fn get_raw(&self, txn: &T, key: &Digest) -> Result, Self::Error> + where + T: Readable, + Digest: AsRef<[u8]>, + Self::Error: From, + { + self.0.get_raw(txn, key) + } + + #[inline] + fn put(&self, txn: &mut T, key: &Digest, value: &Trie) -> Result<(), Self::Error> + where + T: Writable, + Digest: AsRef<[u8]>, + Trie: ToBytes, + Self::Error: From, + { + self.0.put(txn, key, value) + } + + #[inline] + fn put_raw( + &self, + txn: &mut T, + key: &Digest, + value_bytes: std::borrow::Cow<'_, [u8]>, + ) -> Result<(), Self::Error> + where + T: Writable, + Digest: AsRef<[u8]>, + Self::Error: From, + { + self.0.put_raw(txn, key, value_bytes) + } +} + +pub(crate) struct OnceDeserializingStore<'a, K: ToBytes, V: ToBytes, S: TrieStore> { + store: &'a S, + #[cfg(debug_assertions)] + deserialize_tracking: Arc>>, + _marker: PhantomData<*const (K, V)>, +} + +impl<'a, K, V, S> OnceDeserializingStore<'a, K, V, S> +where + K: ToBytes, + V: ToBytes, + S: TrieStore, +{ + pub(crate) fn new(store: &'a S) -> Self { + Self { + store, + #[cfg(debug_assertions)] + deserialize_tracking: Arc::new(Mutex::new(HashSet::new())), + _marker: PhantomData, + } + } +} + +impl Store> for OnceDeserializingStore<'_, K, V, S> +where + K: ToBytes, + V: ToBytes, + S: TrieStore, +{ + type Error = S::Error; + + type Handle = S::Handle; + + #[inline] + fn handle(&self) -> Self::Handle { + self.store.handle() + } + + #[inline] + fn deserialize_value(&self, bytes: &[u8]) -> Result, bytesrepr::Error> + where + Trie: FromBytes, + { + #[cfg(debug_assertions)] + { + let trie: Trie = self.store.deserialize_value(bytes)?; + if let Trie::Leaf { .. } = trie { + let trie_hash = trie.trie_hash()?; + let mut tracking = self.deserialize_tracking.lock().expect("Poisoned lock"); + if tracking.get(&trie_hash).is_some() { + panic!("Tried to deserialize a value more than once."); + } else { + tracking.insert(trie_hash); + } + } + Ok(trie) + } + #[cfg(not(debug_assertions))] + { + self.store.deserialize_value(bytes) + } + } + + #[inline] + fn serialize_value(&self, value: &Trie) -> Result, bytesrepr::Error> + where + Trie: ToBytes, + { + self.store.serialize_value(value) + } + + #[inline] + fn get(&self, txn: &T, key: &Digest) -> Result>, Self::Error> + where + T: Readable, + Digest: AsRef<[u8]>, + Trie: FromBytes, + Self::Error: From, + { + self.store.get(txn, key) + } + + #[inline] + fn get_raw(&self, txn: &T, key: &Digest) -> Result, Self::Error> + where + T: Readable, + Digest: AsRef<[u8]>, + Self::Error: From, + { + self.store.get_raw(txn, key) + } + + #[inline] + fn put(&self, txn: &mut T, key: &Digest, value: &Trie) -> Result<(), Self::Error> + where + T: Writable, + Digest: AsRef<[u8]>, + Trie: ToBytes, + Self::Error: From, + { + self.store.put(txn, key, value) + } + + #[inline] + fn put_raw( + &self, + txn: &mut T, + key: &Digest, + value_bytes: std::borrow::Cow<'_, [u8]>, + ) -> Result<(), Self::Error> + where + T: Writable, + Digest: AsRef<[u8]>, + Self::Error: From, + { + self.store.put_raw(txn, key, value_bytes) + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/bytesrepr_utils.rs b/storage/src/global_state/trie_store/operations/tests/bytesrepr_utils.rs new file mode 100644 index 0000000000..7c44d0f9af --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/bytesrepr_utils.rs @@ -0,0 +1,43 @@ +use casper_types::bytesrepr::{self, FromBytes, ToBytes}; + +#[derive(PartialEq, Eq, Debug, Clone)] +pub(crate) struct PanickingFromBytes(T); + +impl PanickingFromBytes { + pub(crate) fn new(inner: T) -> PanickingFromBytes { + PanickingFromBytes(inner) + } +} + +impl FromBytes for PanickingFromBytes +where + T: FromBytes, +{ + fn from_bytes(_: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + unreachable!("This type is expected to never deserialize."); + } +} + +impl ToBytes for PanickingFromBytes +where + T: ToBytes, +{ + fn into_bytes(self) -> Result, bytesrepr::Error> + where + Self: Sized, + { + self.0.into_bytes() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/ee_699.rs b/storage/src/global_state/trie_store/operations/tests/ee_699.rs new file mode 100644 index 0000000000..55a48e9a05 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/ee_699.rs @@ -0,0 +1,498 @@ +use proptest::{arbitrary, array, collection, prop_oneof, strategy::Strategy}; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + gens, Digest, URef, +}; + +use super::{HashedTrie, TestValue}; +use crate::global_state::trie::Trie; + +pub const BASIC_LENGTH: usize = 4; +pub const SIMILAR_LENGTH: usize = 4; +pub const FANCY_LENGTH: usize = 5; +pub const LONG_LENGTH: usize = 8; + +const PUBLIC_KEY_BASIC_ID: u8 = 0; +const PUBLIC_KEY_SIMILAR_ID: u8 = 1; +const PUBLIC_KEY_FANCY_ID: u8 = 2; +const PUBLIC_KEY_LONG_ID: u8 = 3; + +pub const KEY_HASH_LENGTH: usize = 32; + +const KEY_ACCOUNT_ID: u8 = 0; +const KEY_HASH_ID: u8 = 1; +const KEY_UREF_ID: u8 = 2; + +macro_rules! make_array_newtype { + ($name:ident, $ty:ty, $len:expr) => { + pub struct $name([$ty; $len]); + + impl $name { + pub fn new(source: [$ty; $len]) -> Self { + $name(source) + } + + pub fn into_inner(self) -> [$ty; $len] { + self.0 + } + } + + // impl Clone for $name { + // fn clone(&self) -> $name { + // let &$name(ref dat) = self; + // $name(dat.clone()) + // } + // } + + impl Clone for $name { + fn clone(&self) -> $name { + *self + } + } + + impl Copy for $name {} + + impl PartialEq for $name { + fn eq(&self, other: &$name) -> bool { + &self[..] == &other[..] + } + } + + impl Eq for $name {} + + impl PartialOrd for $name { + fn partial_cmp(&self, other: &$name) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $name { + fn cmp(&self, other: &$name) -> core::cmp::Ordering { + self.0.cmp(&other.0) + } + } + + impl core::ops::Index for $name { + type Output = $ty; + + fn index(&self, index: usize) -> &$ty { + let &$name(ref dat) = self; + &dat[index] + } + } + + impl core::ops::Index> for $name { + type Output = [$ty]; + + fn index(&self, index: core::ops::Range) -> &[$ty] { + let &$name(ref dat) = self; + &dat[index] + } + } + + impl core::ops::Index> for $name { + type Output = [$ty]; + + fn index(&self, index: core::ops::RangeTo) -> &[$ty] { + let &$name(ref dat) = self; + &dat[index] + } + } + + impl core::ops::Index> for $name { + type Output = [$ty]; + + fn index(&self, index: core::ops::RangeFrom) -> &[$ty] { + let &$name(ref dat) = self; + &dat[index] + } + } + + impl core::ops::Index for $name { + type Output = [$ty]; + + fn index(&self, _: core::ops::RangeFull) -> &[$ty] { + let &$name(ref dat) = self; + &dat[..] + } + } + + impl core::fmt::Debug for $name { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}([", stringify!($name))?; + write!(f, "{:?}", self.0[0])?; + for item in self.0[1..].iter() { + write!(f, ", {:?}", item)?; + } + write!(f, "])") + } + } + + #[allow(unused_qualifications)] + impl bytesrepr::ToBytes for $name { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + } + + #[allow(unused_qualifications)] + impl bytesrepr::FromBytes for $name { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (dat, rem) = <[$ty; $len]>::from_bytes(bytes)?; + Ok(($name(dat), rem)) + } + } + }; +} + +make_array_newtype!(Basic, u8, BASIC_LENGTH); +make_array_newtype!(Similar, u8, SIMILAR_LENGTH); +make_array_newtype!(Fancy, u8, FANCY_LENGTH); +make_array_newtype!(Long, u8, LONG_LENGTH); + +macro_rules! impl_distribution_for_array_newtype { + ($name:ident, $ty:ty, $len:expr) => { + impl rand::distributions::Distribution<$name> for rand::distributions::Standard { + fn sample(&self, rng: &mut R) -> $name { + let mut dat = [0u8; $len]; + rng.fill_bytes(dat.as_mut()); + $name(dat) + } + } + }; +} + +impl_distribution_for_array_newtype!(Basic, u8, BASIC_LENGTH); +impl_distribution_for_array_newtype!(Similar, u8, SIMILAR_LENGTH); +impl_distribution_for_array_newtype!(Fancy, u8, FANCY_LENGTH); +impl_distribution_for_array_newtype!(Long, u8, LONG_LENGTH); + +macro_rules! make_array_newtype_arb { + ($name:ident, $ty:ty, $len:expr, $fn_name:ident) => { + fn $fn_name() -> impl Strategy { + collection::vec(arbitrary::any::<$ty>(), $len).prop_map(|values| { + let mut dat = [0u8; $len]; + dat.copy_from_slice(values.as_slice()); + $name(dat) + }) + } + }; +} + +make_array_newtype_arb!(Basic, u8, BASIC_LENGTH, basic_arb); +make_array_newtype_arb!(Similar, u8, SIMILAR_LENGTH, similar_arb); +make_array_newtype_arb!(Fancy, u8, FANCY_LENGTH, fancy_arb); +make_array_newtype_arb!(Long, u8, LONG_LENGTH, long_arb); + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum PublicKey { + Basic(Basic), + Similar(Similar), + Fancy(Fancy), + Long(Long), +} + +impl ToBytes for PublicKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::allocate_buffer(self)?; + match self { + PublicKey::Basic(key) => { + ret.push(PUBLIC_KEY_BASIC_ID); + ret.extend(key.to_bytes()?) + } + PublicKey::Similar(key) => { + ret.push(PUBLIC_KEY_SIMILAR_ID); + ret.extend(key.to_bytes()?) + } + PublicKey::Fancy(key) => { + ret.push(PUBLIC_KEY_FANCY_ID); + ret.extend(key.to_bytes()?) + } + PublicKey::Long(key) => { + ret.push(PUBLIC_KEY_LONG_ID); + ret.extend(key.to_bytes()?) + } + }; + Ok(ret) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + PublicKey::Basic(key) => key.serialized_length(), + PublicKey::Similar(key) => key.serialized_length(), + PublicKey::Fancy(key) => key.serialized_length(), + PublicKey::Long(key) => key.serialized_length(), + } + } +} + +impl FromBytes for PublicKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (id, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match id { + PUBLIC_KEY_BASIC_ID => { + let (key, rem): (Basic, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((PublicKey::Basic(key), rem)) + } + PUBLIC_KEY_SIMILAR_ID => { + let (key, rem): (Similar, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((PublicKey::Similar(key), rem)) + } + PUBLIC_KEY_FANCY_ID => { + let (key, rem): (Fancy, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((PublicKey::Fancy(key), rem)) + } + PUBLIC_KEY_LONG_ID => { + let (key, rem): (Long, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((PublicKey::Long(key), rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +fn public_key_arb() -> impl Strategy { + prop_oneof![ + basic_arb().prop_map(PublicKey::Basic), + similar_arb().prop_map(PublicKey::Similar), + fancy_arb().prop_map(PublicKey::Fancy), + long_arb().prop_map(PublicKey::Long) + ] +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum TestKey { + Account(PublicKey), + Hash([u8; KEY_HASH_LENGTH]), + URef(URef), +} + +impl ToBytes for TestKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = Vec::with_capacity(self.serialized_length()); + match self { + TestKey::Account(public_key) => { + ret.push(KEY_ACCOUNT_ID); + ret.extend(&public_key.to_bytes()?) + } + TestKey::Hash(hash) => { + ret.push(KEY_HASH_ID); + ret.extend(&hash.to_bytes()?) + } + TestKey::URef(uref) => { + ret.push(KEY_UREF_ID); + ret.extend(&uref.to_bytes()?) + } + } + Ok(ret) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TestKey::Account(public_key) => public_key.serialized_length(), + TestKey::Hash(hash) => hash.serialized_length(), + TestKey::URef(uref) => uref.serialized_length(), + } + } +} + +impl FromBytes for TestKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (id, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match id { + KEY_ACCOUNT_ID => { + let (public_key, rem): (PublicKey, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((TestKey::Account(public_key), rem)) + } + KEY_HASH_ID => { + let (hash, rem): ([u8; KEY_HASH_LENGTH], &[u8]) = FromBytes::from_bytes(rem)?; + Ok((TestKey::Hash(hash), rem)) + } + KEY_UREF_ID => { + let (uref, rem): (URef, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((TestKey::URef(uref), rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +fn test_key_arb() -> impl Strategy { + prop_oneof![ + public_key_arb().prop_map(TestKey::Account), + gens::u8_slice_32().prop_map(TestKey::Hash), + gens::uref_arb().prop_map(TestKey::URef), + ] +} + +#[allow(clippy::unnecessary_operation)] +mod basics { + use proptest::proptest; + + use super::*; + + #[test] + fn random_key_generation_works_as_expected() { + use rand::Rng; + let mut rng = rand::thread_rng(); + let a: Basic = rng.gen(); + let b: Basic = rng.gen(); + assert_ne!(a, b) + } + + proptest! { + #[test] + fn key_should_roundtrip(key in test_key_arb()) { + bytesrepr::test_serialization_roundtrip(&key) + } + } +} + +type TestTrie = Trie; + +const TEST_LEAVES_LENGTH: usize = 6; + +/// Keys have been chosen deliberately and the `create_` functions below depend +/// on these exact definitions. Values are arbitrary. +const TEST_LEAVES: [TestTrie; TEST_LEAVES_LENGTH] = [ + Trie::Leaf { + key: TestKey::Account(PublicKey::Basic(Basic([0u8, 0, 0, 0]))), + value: TestValue(*b"value0"), + }, + Trie::Leaf { + key: TestKey::Account(PublicKey::Basic(Basic([0u8, 0, 0, 1]))), + value: TestValue(*b"value1"), + }, + Trie::Leaf { + key: TestKey::Account(PublicKey::Similar(Similar([0u8, 0, 0, 1]))), + value: TestValue(*b"value3"), + }, + Trie::Leaf { + key: TestKey::Account(PublicKey::Fancy(Fancy([0u8, 0, 0, 1, 0]))), + value: TestValue(*b"value4"), + }, + Trie::Leaf { + key: TestKey::Account(PublicKey::Long(Long([0u8, 0, 0, 1, 0, 0, 0, 0]))), + value: TestValue(*b"value5"), + }, + Trie::Leaf { + key: TestKey::Hash([0u8; 32]), + value: TestValue(*b"value6"), + }, +]; + +fn create_0_leaf_trie() -> Result<(Digest, Vec>), bytesrepr::Error> { + let root = HashedTrie::new(Trie::node(&[]))?; + + let root_hash: Digest = root.hash; + + let parents: Vec> = vec![root]; + + let tries: Vec> = { + let mut ret = Vec::new(); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +mod empty_tries { + use super::*; + use crate::global_state::{ + error, + trie_store::operations::tests::{self, LmdbTestContext}, + }; + + #[test] + fn lmdb_writes_to_n_leaf_empty_trie_had_expected_results() { + let (root_hash, tries) = create_0_leaf_trie().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let initial_states = vec![root_hash]; + + let _states = tests::writes_to_n_leaf_empty_trie_had_expected_results::< + _, + _, + _, + _, + _, + _, + error::Error, + >( + &context.environment, + &context.environment, + &context.store, + &context.store, + &initial_states, + &TEST_LEAVES, + ) + .unwrap(); + } +} + +mod proptests { + use std::ops::RangeInclusive; + + use proptest::{collection::vec, proptest}; + + use super::*; + use crate::global_state::{ + error::{self}, + trie_store::operations::tests::{self, LmdbTestContext}, + }; + + const DEFAULT_MIN_LENGTH: usize = 0; + const DEFAULT_MAX_LENGTH: usize = 100; + + fn get_range() -> RangeInclusive { + let start = option_env!("CL_TRIE_TEST_VECTOR_MIN_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MIN_LENGTH); + let end = option_env!("CL_TRIE_TEST_VECTOR_MAX_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MAX_LENGTH); + RangeInclusive::new(start, end) + } + + fn lmdb_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool { + let (root_hash, tries) = create_0_leaf_trie().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let mut states_to_check = vec![]; + + let root_hashes = tests::write_pairs::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + &root_hash, + pairs, + ) + .unwrap(); + + states_to_check.extend(root_hashes); + + tests::check_pairs::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + &states_to_check, + pairs, + ) + .unwrap() + } + + fn test_value_arb() -> impl Strategy { + array::uniform6(arbitrary::any::()).prop_map(TestValue) + } + + proptest! { + #[test] + fn prop_lmdb_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) { + assert!(lmdb_roundtrip_succeeds(&inputs)); + } + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/keys.rs b/storage/src/global_state/trie_store/operations/tests/keys.rs new file mode 100644 index 0000000000..9269bdab09 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/keys.rs @@ -0,0 +1,263 @@ +mod partial_tries { + use crate::global_state::{ + transaction_source::{Transaction, TransactionSource}, + trie::Trie, + trie_store::operations::{ + self, + tests::{ + bytesrepr_utils::PanickingFromBytes, LmdbTestContext, TestKey, TestValue, + TEST_LEAVES, TEST_TRIE_GENERATORS, + }, + }, + }; + + #[test] + fn lmdb_keys_from_n_leaf_partial_trie_had_expected_results() { + for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let test_leaves = TEST_LEAVES; + let (used, _) = test_leaves.split_at(num_leaves); + + let expected = { + let mut tmp = used + .iter() + .filter_map(Trie::key) + .cloned() + .collect::>(); + tmp.sort(); + tmp + }; + let actual = { + let txn = context.environment.create_read_txn().unwrap(); + let mut tmp = operations::keys::, _, _>( + &txn, + &context.store, + &root_hash, + ) + .filter_map(Result::ok) + .collect::>(); + txn.commit().unwrap(); + tmp.sort(); + tmp + }; + assert_eq!(actual, expected); + } + } +} + +mod full_tries { + use casper_types::Digest; + + use crate::global_state::{ + transaction_source::{Transaction, TransactionSource}, + trie::Trie, + trie_store::operations::{ + self, + tests::{ + bytesrepr_utils::PanickingFromBytes, LmdbTestContext, TestKey, TestValue, + EMPTY_HASHED_TEST_TRIES, TEST_LEAVES, TEST_TRIE_GENERATORS, + }, + }, + }; + + #[test] + fn lmdb_keys_from_n_leaf_full_trie_had_expected_results() { + let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); + let mut states: Vec = Vec::new(); + + for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + context.update(&tries).unwrap(); + states.push(root_hash); + + for (num_leaves, state) in states[..state_index].iter().enumerate() { + let test_leaves = TEST_LEAVES; + let (used, _unused) = test_leaves.split_at(num_leaves); + + let expected = { + let mut tmp = used + .iter() + .filter_map(Trie::key) + .cloned() + .collect::>(); + tmp.sort(); + tmp + }; + let actual = { + let txn = context.environment.create_read_txn().unwrap(); + let mut tmp = operations::keys::, _, _>( + &txn, + &context.store, + state, + ) + .filter_map(Result::ok) + .collect::>(); + txn.commit().unwrap(); + tmp.sort(); + tmp + }; + assert_eq!(actual, expected); + } + } + } +} + +#[cfg(debug_assertions)] +mod keys_iterator { + use casper_types::{bytesrepr, global_state::Pointer, Digest}; + + use crate::global_state::{ + transaction_source::TransactionSource, + trie::Trie, + trie_store::operations::{ + self, + tests::{ + bytesrepr_utils::PanickingFromBytes, hash_test_tries, HashedTestTrie, HashedTrie, + LmdbTestContext, TestKey, TestValue, TEST_LEAVES, + }, + }, + }; + + fn create_invalid_extension_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[2..3])?; + let ext_1 = HashedTrie::new(Trie::extension( + vec![0u8, 0], + Pointer::NodePointer(leaves[0].hash), + ))?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_1.hash))]))?; + let root_hash = root.hash; + + let tries = vec![root, ext_1, leaves[0].clone()]; + + Ok((root_hash, tries)) + } + + fn create_invalid_path_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..1])?; + + let root = HashedTrie::new(Trie::node(&[(1, Pointer::NodePointer(leaves[0].hash))]))?; + let root_hash = root.hash; + + let tries = vec![root, leaves[0].clone()]; + + Ok((root_hash, tries)) + } + + fn create_invalid_hash_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..2])?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(leaves[1].hash))]))?; + let root_hash = root.hash; + + let tries = vec![root, leaves[0].clone()]; + + Ok((root_hash, tries)) + } + + macro_rules! return_on_err { + ($x:expr) => { + match $x { + Ok(result) => result, + Err(_) => { + return; // we expect the test to panic, so this will cause a test failure + } + } + }; + } + + fn test_trie(root_hash: Digest, tries: Vec) { + let context = return_on_err!(LmdbTestContext::new(&tries)); + let txn = return_on_err!(context.environment.create_read_txn()); + let _tmp = operations::keys::, _, _>( + &txn, + &context.store, + &root_hash, + ) + .collect::>(); + } + + #[test] + #[should_panic = "Expected a LazilyDeserializedTrie::Node but received"] + fn should_panic_on_leaf_after_extension() { + let (root_hash, tries) = return_on_err!(create_invalid_extension_trie()); + test_trie(root_hash, tries); + } + + #[test] + #[should_panic = "Expected key bytes to start with the current path"] + fn should_panic_when_key_not_matching_path() { + let (root_hash, tries) = return_on_err!(create_invalid_path_trie()); + test_trie(root_hash, tries); + } + + #[test] + #[should_panic = "Trie at the pointer is expected to exist"] + fn should_panic_on_pointer_to_nonexisting_hash() { + let (root_hash, tries) = return_on_err!(create_invalid_hash_trie()); + test_trie(root_hash, tries); + } +} + +mod keys_with_prefix_iterator { + use crate::global_state::{ + transaction_source::TransactionSource, + trie::Trie, + trie_store::operations::{ + self, + tests::{ + bytesrepr_utils::PanickingFromBytes, create_6_leaf_trie, LmdbTestContext, TestKey, + TestValue, TEST_LEAVES, + }, + }, + }; + + fn expected_keys(prefix: &[u8]) -> Vec { + let mut tmp = TEST_LEAVES + .iter() + .filter_map(Trie::key) + .filter(|key| key.0.starts_with(prefix)) + .cloned() + .collect::>(); + tmp.sort(); + tmp + } + + fn test_prefix(prefix: &[u8]) { + let (root_hash, tries) = create_6_leaf_trie().expect("should create a trie"); + let context = LmdbTestContext::new(&tries).expect("should create a new context"); + let txn = context + .environment + .create_read_txn() + .expect("should create a read txn"); + let expected = expected_keys(prefix); + let mut actual = + operations::keys_with_prefix::, _, _>( + &txn, + &context.store, + &root_hash, + prefix, + ) + .filter_map(Result::ok) + .collect::>(); + actual.sort(); + assert_eq!(expected, actual); + } + + #[test] + fn test_prefixes() { + test_prefix(&[]); // 6 leaves + test_prefix(&[0]); // 6 leaves + test_prefix(&[0, 1]); // 1 leaf + test_prefix(&[0, 1, 0]); // 1 leaf + test_prefix(&[0, 1, 1]); // 0 leaves + test_prefix(&[0, 0]); // 5 leaves + test_prefix(&[0, 0, 1]); // 0 leaves + test_prefix(&[0, 0, 2]); // 1 leaf + test_prefix(&[0, 0, 0, 0]); // 3 leaves, prefix points to an Extension + test_prefix(&[0, 0, 0, 0, 0]); // 3 leaves + test_prefix(&[0, 0, 0, 0, 0, 0]); // 2 leaves + test_prefix(&[0, 0, 0, 0, 0, 0, 1]); // 1 leaf + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/mod.rs b/storage/src/global_state/trie_store/operations/tests/mod.rs new file mode 100644 index 0000000000..dc03fb43ec --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/mod.rs @@ -0,0 +1,1005 @@ +pub(crate) mod bytesrepr_utils; +mod ee_699; +mod keys; +mod proptests; +mod prune; +mod read; +mod scan; +mod synchronize; +mod write; + +use std::{convert, ops::Not}; + +use lmdb::DatabaseFlags; +use tempfile::{tempdir, TempDir}; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + global_state::{Pointer, TrieMerkleProof}, + Digest, +}; + +use crate::global_state::{ + error, + transaction_source::{lmdb::LmdbEnvironment, Readable, Transaction, TransactionSource}, + trie::Trie, + trie_store::{ + lmdb::LmdbTrieStore, + operations::{self, read, read_with_proof, write, ReadResult, WriteResult}, + TrieStore, + }, + DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, +}; + +use super::compute_state_hash; + +use self::bytesrepr_utils::PanickingFromBytes; + +const TEST_KEY_LENGTH: usize = 7; + +/// A short key type for tests. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +struct TestKey([u8; TEST_KEY_LENGTH]); + +impl ToBytes for TestKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + Ok(self.0.to_vec()) + } + + fn serialized_length(&self) -> usize { + TEST_KEY_LENGTH + } +} + +impl FromBytes for TestKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, rem) = bytes.split_at(TEST_KEY_LENGTH); + let mut ret = [0u8; TEST_KEY_LENGTH]; + ret.copy_from_slice(key); + Ok((TestKey(ret), rem)) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum VariableAddr { + Empty, + LegacyAddr([u8; TEST_KEY_LENGTH]), +} + +pub enum VariableAddrTag { + Empty = 0, + LegacyTestKey = 1, +} + +impl ToBytes for VariableAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + 1 + match self { + Self::Empty => 0, + Self::LegacyAddr(_) => TEST_KEY_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Self::Empty => writer.push(VariableAddrTag::Empty as u8), + Self::LegacyAddr(addr) => { + writer.push(VariableAddrTag::LegacyTestKey as u8); + writer.extend(addr.to_bytes()?); + } + } + Ok(()) + } +} + +impl FromBytes for VariableAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == VariableAddrTag::Empty as u8 => Ok((VariableAddr::Empty, remainder)), + tag if tag == VariableAddrTag::LegacyTestKey as u8 => { + let (key, rem) = remainder.split_at(TEST_KEY_LENGTH); + let mut ret = [0u8; TEST_KEY_LENGTH]; + ret.copy_from_slice(key); + Ok((VariableAddr::LegacyAddr(ret), rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum MultiVariantTestKey { + VariableSizedKey(VariableAddr), +} + +const VARIABLE_SIZE_KEY_TAG: u8 = 1; + +impl ToBytes for MultiVariantTestKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + 1 + match self { + Self::VariableSizedKey(addr) => addr.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Self::VariableSizedKey(addr) => { + writer.push(VARIABLE_SIZE_KEY_TAG); + writer.extend(addr.to_bytes()?); + } + } + Ok(()) + } +} + +impl FromBytes for MultiVariantTestKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + VARIABLE_SIZE_KEY_TAG => { + let (addr, rem) = FromBytes::from_bytes(remainder)?; + Ok((MultiVariantTestKey::VariableSizedKey(addr), rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +const TEST_VAL_LENGTH: usize = 6; + +/// A short value type for tests. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +struct TestValue([u8; TEST_VAL_LENGTH]); + +impl ToBytes for TestValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + Ok(self.0.to_vec()) + } + + fn serialized_length(&self) -> usize { + TEST_VAL_LENGTH + } +} + +impl FromBytes for TestValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, rem) = bytes.split_at(TEST_VAL_LENGTH); + let mut ret = [0u8; TEST_VAL_LENGTH]; + ret.copy_from_slice(key); + + Ok((TestValue(ret), rem)) + } +} + +type TestTrie = Trie; + +type HashedTestTrie = HashedTrie; + +/// A pairing of a trie element and its hash. +#[derive(Debug, Clone, PartialEq, Eq)] +struct HashedTrie { + hash: Digest, + trie: Trie, +} + +impl HashedTrie { + pub fn new(trie: Trie) -> Result { + let trie_bytes = trie.to_bytes()?; + let hash = Digest::hash(trie_bytes); + Ok(HashedTrie { hash, trie }) + } +} + +const EMPTY_HASHED_TEST_TRIES: &[HashedTestTrie] = &[]; + +const TEST_LEAVES_LENGTH: usize = 6; + +/// Keys have been chosen deliberately and the `create_` functions below depend +/// on these exact definitions. Values are arbitrary. +const TEST_LEAVES: [TestTrie; TEST_LEAVES_LENGTH] = [ + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"value0"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 1]), + value: TestValue(*b"value1"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 2, 0, 0, 0]), + value: TestValue(*b"value2"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 255, 0]), + value: TestValue(*b"value3"), + }, + Trie::Leaf { + key: TestKey([0u8, 1, 0, 0, 0, 0, 0]), + value: TestValue(*b"value4"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 2, 0, 0, 0, 0]), + value: TestValue(*b"value5"), + }, +]; + +const TEST_LEAVES_UPDATED: [TestTrie; TEST_LEAVES_LENGTH] = [ + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueA"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 1]), + value: TestValue(*b"valueB"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 2, 0, 0, 0]), + value: TestValue(*b"valueC"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 255, 0]), + value: TestValue(*b"valueD"), + }, + Trie::Leaf { + key: TestKey([0u8, 1, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueE"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 2, 0, 0, 0, 0]), + value: TestValue(*b"valueF"), + }, +]; + +const TEST_LEAVES_NON_COLLIDING: [TestTrie; TEST_LEAVES_LENGTH] = [ + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueA"), + }, + Trie::Leaf { + key: TestKey([1u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueB"), + }, + Trie::Leaf { + key: TestKey([2u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueC"), + }, + Trie::Leaf { + key: TestKey([3u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueD"), + }, + Trie::Leaf { + key: TestKey([4u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueE"), + }, + Trie::Leaf { + key: TestKey([5u8, 0, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueF"), + }, +]; + +const TEST_LEAVES_ADJACENTS: [TestTrie; TEST_LEAVES_LENGTH] = [ + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 2]), + value: TestValue(*b"valueA"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 0, 3]), + value: TestValue(*b"valueB"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 3, 0, 0, 0]), + value: TestValue(*b"valueC"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 0, 0, 0, 1, 0]), + value: TestValue(*b"valueD"), + }, + Trie::Leaf { + key: TestKey([0u8, 2, 0, 0, 0, 0, 0]), + value: TestValue(*b"valueE"), + }, + Trie::Leaf { + key: TestKey([0u8, 0, 3, 0, 0, 0, 0]), + value: TestValue(*b"valueF"), + }, +]; + +type TrieGenerator = fn() -> Result<(Digest, Vec>), bytesrepr::Error>; + +const TEST_TRIE_GENERATORS_LENGTH: usize = 7; + +const TEST_TRIE_GENERATORS: [TrieGenerator; TEST_TRIE_GENERATORS_LENGTH] = [ + create_0_leaf_trie, + create_1_leaf_trie, + create_2_leaf_trie, + create_3_leaf_trie, + create_4_leaf_trie, + create_5_leaf_trie, + create_6_leaf_trie, +]; + +fn hash_test_tries(tries: &[TestTrie]) -> Result, bytesrepr::Error> { + tries + .iter() + .map(|trie| HashedTestTrie::new(trie.to_owned())) + .collect() +} + +fn create_0_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let root = HashedTrie::new(Trie::node(&[]))?; + + let root_hash: Digest = root.hash; + + let parents: Vec = vec![root]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn create_empty_trie() -> Result<(Digest, Vec>), bytesrepr::Error> +where + K: ToBytes, + V: ToBytes, +{ + let root_node = HashedTrie::::new(Trie::node(&[]))?; + let root_hash = root_node.hash; + let tries = vec![root_node]; + + Ok((root_hash, tries)) +} + +fn create_1_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..1])?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::LeafPointer(leaves[0].hash))]))?; + + let root_hash: Digest = root.hash; + + let parents: Vec = vec![root]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(leaves); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn create_2_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..2])?; + + let node = HashedTrie::new(Trie::node(&[ + (0, Pointer::LeafPointer(leaves[0].hash)), + (1, Pointer::LeafPointer(leaves[1].hash)), + ]))?; + + let ext = HashedTrie::new(Trie::extension( + vec![0u8, 0, 0, 0, 0], + Pointer::NodePointer(node.hash), + ))?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext.hash))]))?; + + let root_hash = root.hash; + + let parents: Vec = vec![root, ext, node]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(leaves); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn create_3_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..3])?; + + let node_1 = HashedTrie::new(Trie::node(&[ + (0, Pointer::LeafPointer(leaves[0].hash)), + (1, Pointer::LeafPointer(leaves[1].hash)), + ]))?; + + let ext_1 = HashedTrie::new(Trie::extension( + vec![0u8, 0], + Pointer::NodePointer(node_1.hash), + ))?; + + let node_2 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(ext_1.hash)), + (2, Pointer::LeafPointer(leaves[2].hash)), + ]))?; + + let ext_2 = HashedTrie::new(Trie::extension( + vec![0u8, 0], + Pointer::NodePointer(node_2.hash), + ))?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_2.hash))]))?; + + let root_hash = root.hash; + + let parents: Vec = vec![root, ext_2, node_2, ext_1, node_1]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(leaves); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn create_4_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..4])?; + + let node_1 = HashedTrie::new(Trie::node(&[ + (0, Pointer::LeafPointer(leaves[0].hash)), + (1, Pointer::LeafPointer(leaves[1].hash)), + ]))?; + + let node_2 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(node_1.hash)), + (255, Pointer::LeafPointer(leaves[3].hash)), + ]))?; + + let ext_1 = HashedTrie::new(Trie::extension( + vec![0u8], + Pointer::NodePointer(node_2.hash), + ))?; + + let node_3 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(ext_1.hash)), + (2, Pointer::LeafPointer(leaves[2].hash)), + ]))?; + + let ext_2 = HashedTrie::new(Trie::extension( + vec![0u8, 0], + Pointer::NodePointer(node_3.hash), + ))?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(ext_2.hash))]))?; + + let root_hash = root.hash; + + let parents: Vec = vec![root, ext_2, node_3, ext_1, node_2, node_1]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(leaves); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn create_5_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES[..5])?; + + let node_1 = HashedTrie::new(Trie::node(&[ + (0, Pointer::LeafPointer(leaves[0].hash)), + (1, Pointer::LeafPointer(leaves[1].hash)), + ]))?; + + let node_2 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(node_1.hash)), + (255, Pointer::LeafPointer(leaves[3].hash)), + ]))?; + + let ext_1 = HashedTrie::new(Trie::extension( + vec![0u8], + Pointer::NodePointer(node_2.hash), + ))?; + + let node_3 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(ext_1.hash)), + (2, Pointer::LeafPointer(leaves[2].hash)), + ]))?; + + let ext_2 = HashedTrie::new(Trie::extension( + vec![0u8], + Pointer::NodePointer(node_3.hash), + ))?; + + let node_4 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(ext_2.hash)), + (1, Pointer::LeafPointer(leaves[4].hash)), + ]))?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_4.hash))]))?; + + let root_hash = root.hash; + + let parents: Vec = vec![root, node_4, ext_2, node_3, ext_1, node_2, node_1]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(leaves); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn create_6_leaf_trie() -> Result<(Digest, Vec), bytesrepr::Error> { + let leaves = hash_test_tries(&TEST_LEAVES)?; + + let node_1 = HashedTrie::new(Trie::node(&[ + (0, Pointer::LeafPointer(leaves[0].hash)), + (1, Pointer::LeafPointer(leaves[1].hash)), + ]))?; + + let node_2 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(node_1.hash)), + (255, Pointer::LeafPointer(leaves[3].hash)), + ]))?; + + let ext = HashedTrie::new(Trie::extension( + vec![0u8], + Pointer::NodePointer(node_2.hash), + ))?; + + let node_3 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(ext.hash)), + (2, Pointer::LeafPointer(leaves[2].hash)), + ]))?; + + let node_4 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(node_3.hash)), + (2, Pointer::LeafPointer(leaves[5].hash)), + ]))?; + + let node_5 = HashedTrie::new(Trie::node(&[ + (0, Pointer::NodePointer(node_4.hash)), + (1, Pointer::LeafPointer(leaves[4].hash)), + ]))?; + + let root = HashedTrie::new(Trie::node(&[(0, Pointer::NodePointer(node_5.hash))]))?; + + let root_hash = root.hash; + + let parents: Vec = vec![root, node_5, node_4, node_3, ext, node_2, node_1]; + + let tries: Vec = { + let mut ret = Vec::new(); + ret.extend(leaves); + ret.extend(parents); + ret + }; + + Ok((root_hash, tries)) +} + +fn put_tries<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + tries: &[HashedTrie], +) -> Result<(), E> +where + K: ToBytes, + V: ToBytes, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + From + From, +{ + if tries.is_empty() { + return Ok(()); + } + let mut txn = environment.create_read_write_txn()?; + for HashedTrie { hash, trie } in tries.iter() { + store.put(&mut txn, hash, trie)?; + } + txn.commit()?; + Ok(()) +} + +// A context for holding lmdb-based test resources +struct LmdbTestContext { + _temp_dir: TempDir, + environment: LmdbEnvironment, + store: LmdbTrieStore, +} + +impl LmdbTestContext { + fn new(tries: &[HashedTrie]) -> anyhow::Result + where + K: FromBytes + ToBytes, + V: FromBytes + ToBytes, + { + let _temp_dir = tempdir()?; + let environment = LmdbEnvironment::new( + _temp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + )?; + let store = LmdbTrieStore::new(&environment, None, DatabaseFlags::empty())?; + put_tries::<_, _, _, _, error::Error>(&environment, &store, tries)?; + Ok(LmdbTestContext { + _temp_dir, + environment, + store, + }) + } + + fn update(&self, tries: &[HashedTrie]) -> anyhow::Result<()> + where + K: ToBytes, + V: ToBytes, + { + put_tries::<_, _, _, _, error::Error>(&self.environment, &self.store, tries)?; + Ok(()) + } +} + +fn check_leaves_exist( + txn: &T, + store: &S, + root: &Digest, + leaves: &[Trie], +) -> Result, E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Eq + Copy, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let mut ret = Vec::new(); + + for leaf in leaves { + if let Trie::Leaf { key, value } = leaf { + let maybe_value: ReadResult = read::<_, _, _, _, E>(txn, store, root, key)?; + if let ReadResult::Found(value_found) = maybe_value { + ret.push(*value == value_found); + } + } else { + panic!("leaves should only contain leaves") + } + } + Ok(ret) +} + +/// For a given vector of leaves check the merkle proofs exist and are correct +fn check_merkle_proofs( + txn: &T, + store: &S, + root: &Digest, + leaves: &[Trie], +) -> Result, E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, + V: ToBytes + FromBytes + Eq + Copy, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let mut ret = Vec::new(); + + for leaf in leaves { + if let Trie::Leaf { key, value } = leaf { + let maybe_proof: ReadResult> = + read_with_proof::<_, _, _, _, E>(txn, store, root, key)?; + match maybe_proof { + ReadResult::Found(proof) => { + let hash = compute_state_hash(&proof)?; + ret.push(hash == *root && proof.value() == value); + } + ReadResult::NotFound => { + ret.push(false); + } + ReadResult::RootNotFound => panic!("Root not found!"), + }; + } else { + panic!("leaves should only contain leaves") + } + } + Ok(ret) +} + +fn check_keys(txn: &T, store: &S, root: &Digest, leaves: &[Trie]) -> bool +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug + Clone + Ord, + V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, + T: Readable, + S: TrieStore, + S::Error: From, +{ + let expected = { + let mut tmp = leaves + .iter() + .filter_map(Trie::key) + .cloned() + .collect::>(); + tmp.sort(); + tmp + }; + let actual = { + let mut tmp = operations::keys::<_, _, _, _>(txn, store, root) + .filter_map(Result::ok) + .collect::>(); + tmp.sort(); + tmp + }; + expected == actual +} + +fn check_leaves<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root: &Digest, + present: &[Trie], + absent: &[Trie], +) -> Result<(), E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, + V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + From + From, +{ + let txn: R::ReadTransaction = environment.create_read_txn()?; + + assert!( + check_leaves_exist::<_, _, _, _, E>(&txn, store, root, present)? + .into_iter() + .all(convert::identity) + ); + + assert!( + check_merkle_proofs::<_, _, _, _, E>(&txn, store, root, present)? + .into_iter() + .all(convert::identity) + ); + + assert!( + check_leaves_exist::<_, _, _, _, E>(&txn, store, root, absent)? + .into_iter() + .all(bool::not) + ); + + assert!( + check_merkle_proofs::<_, _, _, _, E>(&txn, store, root, absent)? + .into_iter() + .all(bool::not) + ); + + assert!(check_keys::<_, _, _, _>(&txn, store, root, present,)); + + txn.commit()?; + Ok(()) +} + +fn write_leaves<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root_hash: &Digest, + leaves: &[Trie], +) -> Result, E> +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore>, + S::Error: From, + E: From + From + From, +{ + let mut results = Vec::new(); + if leaves.is_empty() { + return Ok(results); + } + let mut root_hash = root_hash.to_owned(); + let mut txn: R::ReadWriteTransaction = environment.create_read_write_txn()?; + + for leaf in leaves.iter() { + if let Trie::Leaf { key, value } = leaf { + let new_value = PanickingFromBytes::new(value.clone()); + let write_result = write::, _, _, E>( + &mut txn, store, &root_hash, key, &new_value, + )?; + match write_result { + WriteResult::Written(hash) => { + root_hash = hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), + }; + results.push(write_result); + } else { + panic!("leaves should contain only leaves"); + } + } + txn.commit()?; + Ok(results) +} + +fn check_pairs_proofs<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root_hashes: &[Digest], + pairs: &[(K, V)], +) -> Result +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, + V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + From + From, +{ + let txn = environment.create_read_txn()?; + for (index, root_hash) in root_hashes.iter().enumerate() { + for (key, value) in &pairs[..=index] { + let maybe_proof = read_with_proof::<_, _, _, _, E>(&txn, store, root_hash, key)?; + match maybe_proof { + ReadResult::Found(proof) => { + let hash = compute_state_hash(&proof)?; + if hash != *root_hash || proof.value() != value { + return Ok(false); + } + } + ReadResult::NotFound => return Ok(false), + ReadResult::RootNotFound => panic!("Root not found!"), + }; + } + } + Ok(true) +} + +fn check_pairs<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root_hashes: &[Digest], + pairs: &[(K, V)], +) -> Result +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug + Clone + Ord, + V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + From + From, +{ + let txn: R::ReadTransaction = environment.create_read_txn()?; + for (index, root_hash) in root_hashes.iter().enumerate() { + for (key, value) in &pairs[..=index] { + let result = read::<_, _, _, _, E>(&txn, store, root_hash, key)?; + if ReadResult::Found(*value) != result { + return Ok(false); + } + } + let expected = { + let mut tmp = pairs[..=index] + .iter() + .map(|(k, _)| k) + .cloned() + .collect::>(); + tmp.sort(); + tmp + }; + let actual = { + let mut tmp = operations::keys::<_, _, _, _>(&txn, store, root_hash) + .filter_map(Result::ok) + .collect::>(); + tmp.sort(); + tmp + }; + if expected != actual { + return Ok(false); + } + } + Ok(true) +} + +fn write_pairs<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root_hash: &Digest, + pairs: &[(K, V)], +) -> Result, E> +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore>, + S::Error: From, + E: From + From + From, +{ + let mut results = Vec::new(); + if pairs.is_empty() { + return Ok(results); + } + let mut root_hash = root_hash.to_owned(); + let mut txn = environment.create_read_write_txn()?; + + for (key, value) in pairs.iter() { + let new_val = PanickingFromBytes::new(value.clone()); + match write::, _, _, E>( + &mut txn, store, &root_hash, key, &new_val, + )? { + WriteResult::Written(hash) => { + root_hash = hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), + }; + results.push(root_hash); + } + txn.commit()?; + Ok(results) +} + +fn writes_to_n_leaf_empty_trie_had_expected_results<'a, K, V, R, WR, S, WS, E>( + environment: &'a R, + writable_environment: &'a WR, + store: &S, + writable_store: &WS, + states: &[Digest], + test_leaves: &[Trie], +) -> Result, E> +where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug + Copy + Ord, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug + Copy, + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + From + From + From + From, +{ + let mut states = states.to_vec(); + + // Write set of leaves to the trie + let hashes = write_leaves::<_, _, _, _, E>( + writable_environment, + writable_store, + states.last().unwrap(), + test_leaves, + )? + .into_iter() + .map(|result| match result { + WriteResult::Written(root_hash) => root_hash, + _ => panic!("write_leaves resulted in non-write"), + }) + .collect::>(); + + states.extend(hashes); + + // Check that the expected set of leaves is in the trie at every + // state, and that the set of other leaves is not. + for (num_leaves, state) in states.iter().enumerate() { + let (used, unused) = test_leaves.split_at(num_leaves); + check_leaves::<_, _, _, _, E>(environment, store, state, used, unused)?; + } + + Ok(states) +} diff --git a/storage/src/global_state/trie_store/operations/tests/proptests.rs b/storage/src/global_state/trie_store/operations/tests/proptests.rs new file mode 100644 index 0000000000..05d66872ed --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/proptests.rs @@ -0,0 +1,70 @@ +use std::ops::RangeInclusive; + +use proptest::{ + array, + collection::vec, + prelude::{any, proptest, Strategy}, +}; + +use super::*; + +const DEFAULT_MIN_LENGTH: usize = 0; + +const DEFAULT_MAX_LENGTH: usize = 100; + +fn get_range() -> RangeInclusive { + let start = option_env!("CL_TRIE_TEST_VECTOR_MIN_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MIN_LENGTH); + let end = option_env!("CL_TRIE_TEST_VECTOR_MAX_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MAX_LENGTH); + RangeInclusive::new(start, end) +} + +fn lmdb_roundtrip_succeeds(pairs: &[(TestKey, TestValue)]) -> bool { + let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let mut states_to_check = vec![]; + + let root_hashes = write_pairs::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + &root_hash, + pairs, + ) + .unwrap(); + + states_to_check.extend(root_hashes); + + check_pairs::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + &states_to_check, + pairs, + ) + .unwrap(); + + check_pairs_proofs::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + &states_to_check, + pairs, + ) + .unwrap() +} + +fn test_key_arb() -> impl Strategy { + array::uniform7(any::()).prop_map(TestKey) +} + +fn test_value_arb() -> impl Strategy { + array::uniform6(any::()).prop_map(TestValue) +} + +proptest! { + #[test] + fn prop_lmdb_roundtrip_succeeds(inputs in vec((test_key_arb(), test_value_arb()), get_range())) { + assert!(lmdb_roundtrip_succeeds(&inputs)); + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/prune.rs b/storage/src/global_state/trie_store/operations/tests/prune.rs new file mode 100644 index 0000000000..1718010061 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/prune.rs @@ -0,0 +1,427 @@ +use super::*; +use crate::global_state::trie_store::operations::TriePruneResult; + +fn checked_prune<'a, K, V, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + write_store: &WS, + root: &Digest, + key_to_prune: &K, +) -> Result +where + K: ToBytes + FromBytes + Clone + std::fmt::Debug + Eq, + V: ToBytes + FromBytes + Clone + std::fmt::Debug, + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + From + From + From + From, +{ + let mut txn = write_environment.create_read_write_txn()?; + let prune_result = operations::prune::, _, WS, E>( + &mut txn, + write_store, + root, + key_to_prune, + ); + txn.commit()?; + let prune_result = prune_result?; + let rtxn = environment.create_read_write_txn()?; + if let TriePruneResult::Pruned(new_root) = prune_result { + operations::check_integrity::(&rtxn, store, vec![new_root])?; + } + rtxn.commit()?; + Ok(prune_result) +} + +mod partial_tries { + use super::*; + use crate::global_state::trie_store::operations::TriePruneResult; + + #[allow(clippy::too_many_arguments)] + fn prune_from_partial_trie_had_expected_results<'a, K, V, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + write_store: &WS, + root: &Digest, + key_to_prune: &K, + expected_root_after_prune: &Digest, + expected_tries_after_prune: &[HashedTrie], + ) -> Result<(), E> + where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + let rtxn = environment.create_read_txn()?; + // The assert below only works with partial tries + assert_eq!(store.get(&rtxn, expected_root_after_prune)?, None); + rtxn.commit()?; + let root_after_prune = match checked_prune::( + environment, + write_environment, + store, + write_store, + root, + key_to_prune, + )? { + TriePruneResult::Pruned(root_after_prune) => root_after_prune, + TriePruneResult::MissingKey => panic!("key did not exist"), + TriePruneResult::RootNotFound => panic!("root should be found"), + TriePruneResult::Failure(err) => panic!("{:?}", err), + }; + assert_eq!(root_after_prune, *expected_root_after_prune); + let rtxn = environment.create_read_txn()?; + for HashedTrie { hash, trie } in expected_tries_after_prune { + assert_eq!(store.get(&rtxn, hash)?, Some(trie.clone())); + } + rtxn.commit()?; + Ok(()) + } + + #[test] + fn lmdb_prune_from_partial_trie_had_expected_results() { + for i in 0..TEST_LEAVES_LENGTH { + let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i + 1]().unwrap(); + let (updated_root_hash, updated_tries) = TEST_TRIE_GENERATORS[i]().unwrap(); + let key_to_prune = &TEST_LEAVES[i]; + let context = LmdbTestContext::new(&initial_tries).unwrap(); + + prune_from_partial_trie_had_expected_results::< + TestKey, + TestValue, + _, + _, + _, + _, + error::Error, + >( + &context.environment, + &context.environment, + &context.store, + &context.store, + &initial_root_hash, + key_to_prune.key().unwrap(), + &updated_root_hash, + updated_tries.as_slice(), + ) + .unwrap(); + } + } + + fn prune_non_existent_key_from_partial_trie_should_return_does_not_exist< + 'a, + K, + V, + R, + WR, + S, + WS, + E, + >( + environment: &'a R, + write_environment: &'a WR, + store: &S, + write_store: &WS, + root: &Digest, + key_to_prune: &K, + ) -> Result<(), E> + where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + match checked_prune::( + environment, + write_environment, + store, + write_store, + root, + key_to_prune, + )? { + TriePruneResult::Pruned(_) => panic!("should not prune"), + TriePruneResult::MissingKey => Ok(()), + TriePruneResult::RootNotFound => panic!("root should be found"), + TriePruneResult::Failure(err) => panic!("{:?}", err), + } + } + + #[test] + fn lmdb_prune_non_existent_key_from_partial_trie_should_return_does_not_exist() { + for i in 0..TEST_LEAVES_LENGTH { + let (initial_root_hash, initial_tries) = TEST_TRIE_GENERATORS[i]().unwrap(); + let key_to_prune = &TEST_LEAVES_ADJACENTS[i]; + let context = LmdbTestContext::new(&initial_tries).unwrap(); + + prune_non_existent_key_from_partial_trie_should_return_does_not_exist::< + TestKey, + TestValue, + _, + _, + _, + _, + error::Error, + >( + &context.environment, + &context.environment, + &context.store, + &context.store, + &initial_root_hash, + key_to_prune.key().unwrap(), + ) + .unwrap(); + } + } +} + +mod full_tries { + use super::*; + use std::ops::RangeInclusive; + + use proptest::{collection, prelude::*}; + + use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + gens::{colliding_key_arb, stored_value_arb}, + Digest, Key, StoredValue, + }; + + use crate::global_state::{ + error, + transaction_source::TransactionSource, + trie_store::{ + operations::{ + prune, + tests::{LmdbTestContext, TestKey, TestValue, TEST_TRIE_GENERATORS}, + write, TriePruneResult, WriteResult, + }, + TrieStore, + }, + }; + + fn serially_insert_and_prune<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root: &Digest, + pairs: &[(K, V)], + ) -> Result<(), E> + where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore>, + S::Error: From, + E: From + From + From, + { + let mut txn: R::ReadWriteTransaction = environment.create_read_write_txn()?; + let mut roots = Vec::new(); + // Insert the key-value pairs, keeping track of the roots as we go + for (key, value) in pairs { + let new_value = PanickingFromBytes::new(value.clone()); + if let WriteResult::Written(new_root) = write::, _, _, E>( + &mut txn, + store, + roots.last().unwrap_or(root), + key, + &new_value, + )? { + roots.push(new_root); + } else { + panic!("Could not write pair") + } + } + // Delete the key-value pairs, checking the resulting roots as we go + let mut current_root = roots.pop().unwrap_or_else(|| root.to_owned()); + for (key, _value) in pairs.iter().rev() { + let prune_result = + prune::, _, _, E>(&mut txn, store, ¤t_root, key); + if let TriePruneResult::Pruned(new_root) = prune_result? { + current_root = roots.pop().unwrap_or_else(|| root.to_owned()); + assert_eq!(new_root, current_root); + } else { + panic!("Could not prune") + } + } + Ok(()) + } + + #[test] + fn lmdb_serially_insert_and_prune() { + let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); + let context = LmdbTestContext::new(&empty_trie).unwrap(); + + serially_insert_and_prune::( + &context.environment, + &context.store, + &empty_root_hash, + &[ + (TestKey([1u8; 7]), TestValue([1u8; 6])), + (TestKey([0u8; 7]), TestValue([0u8; 6])), + (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])), + (TestKey([2u8; 7]), TestValue([2u8; 6])), + ], + ) + .unwrap(); + } + + const INTERLEAVED_INSERT_AND_PRUNE_TEST_LEAVES_1: [(TestKey, TestValue); 3] = [ + (TestKey([1u8; 7]), TestValue([1u8; 6])), + (TestKey([0u8; 7]), TestValue([0u8; 6])), + (TestKey([0u8, 1, 1, 1, 1, 1, 1]), TestValue([2u8; 6])), + ]; + + const INTERLEAVED_PRUNE_TEST_KEYS_1: [TestKey; 1] = [TestKey([1u8; 7])]; + + fn interleaved_insert_and_prune<'a, K, V, R, S, E>( + environment: &'a R, + store: &S, + root: &Digest, + pairs_to_insert: &[(K, V)], + keys_to_prune: &[K], + ) -> Result<(), E> + where + K: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + Clone + Eq + std::fmt::Debug, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore>, + S::Error: From, + E: From + From + From, + { + let mut txn: R::ReadWriteTransaction = environment.create_read_write_txn()?; + let mut expected_root = *root; + // Insert the key-value pairs, keeping track of the roots as we go + for (key, value) in pairs_to_insert.iter() { + let new_value = PanickingFromBytes::new(value.clone()); + if let WriteResult::Written(new_root) = write::, _, _, E>( + &mut txn, + store, + &expected_root, + key, + &new_value, + )? { + expected_root = new_root; + } else { + panic!("Could not write pair") + } + } + for key in keys_to_prune.iter() { + let prune_result = + prune::, _, _, E>(&mut txn, store, &expected_root, key); + match prune_result? { + TriePruneResult::Pruned(new_root) => { + expected_root = new_root; + } + TriePruneResult::MissingKey => {} + TriePruneResult::RootNotFound => panic!("should find root"), + TriePruneResult::Failure(err) => panic!("{:?}", err), + } + } + + let pairs_to_insert_less_pruned: Vec<(K, V)> = pairs_to_insert + .iter() + .rev() + .filter(|&(key, _value)| !keys_to_prune.contains(key)) + .cloned() + .collect(); + + let mut actual_root = *root; + for (key, value) in pairs_to_insert_less_pruned.iter() { + let new_value = PanickingFromBytes::new(value.clone()); + if let WriteResult::Written(new_root) = write::, _, _, E>( + &mut txn, + store, + &actual_root, + key, + &new_value, + )? { + actual_root = new_root; + } else { + panic!("Could not write pair") + } + } + + assert_eq!(expected_root, actual_root, "Expected did not match actual"); + + Ok(()) + } + + #[test] + fn lmdb_interleaved_insert_and_prune() { + let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); + let context = LmdbTestContext::new(&empty_trie).unwrap(); + + interleaved_insert_and_prune::( + &context.environment, + &context.store, + &empty_root_hash, + &INTERLEAVED_INSERT_AND_PRUNE_TEST_LEAVES_1, + &INTERLEAVED_PRUNE_TEST_KEYS_1, + ) + .unwrap(); + } + + const DEFAULT_MIN_LENGTH: usize = 1; + + const DEFAULT_MAX_LENGTH: usize = 6; + + fn get_range() -> RangeInclusive { + let start = option_env!("CL_TRIE_TEST_VECTOR_MIN_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MIN_LENGTH); + let end = option_env!("CL_TRIE_TEST_VECTOR_MAX_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MAX_LENGTH); + RangeInclusive::new(start, end) + } + + proptest! { + #[test] + fn prop_lmdb_interleaved_insert_and_prune( + pairs_to_insert in collection::vec((colliding_key_arb(), stored_value_arb()), get_range()) + ) { + let (empty_root_hash, empty_trie) = TEST_TRIE_GENERATORS[0]().unwrap(); + let context = LmdbTestContext::new(&empty_trie).unwrap(); + + let keys_to_prune = { + let mut tmp = Vec::new(); + for i in (0..pairs_to_insert.len()).step_by(2) { + tmp.push(pairs_to_insert[i].0) + } + tmp + }; + + interleaved_insert_and_prune::( + &context.environment, + &context.store, + &empty_root_hash, + &pairs_to_insert, + &keys_to_prune, + ) + .unwrap(); + } + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/read.rs b/storage/src/global_state/trie_store/operations/tests/read.rs new file mode 100644 index 0000000000..9999289f35 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/read.rs @@ -0,0 +1,76 @@ +//! This module contains tests for [`StateReader::read`]. +//! +//! Our primary goal here is to test this functionality in isolation. +//! Therefore, we manually construct test tries from a well-known set of +//! leaves called [`TEST_LEAVES`](super::TEST_LEAVES), each of which represents a value we are +//! trying to store in the trie at a given key. +//! +//! We use two strategies for testing. See the [`partial_tries`] and +//! [`full_tries`] modules for more info. + +use super::*; +use crate::global_state::error; + +mod partial_tries { + //! Here we construct 6 separate "partial" tries, increasing in size + //! from 0 to 5 leaves. Each of these tries contains no past history, + //! only a single a root to read from. The tests check that we can read + //! only the expected set of leaves from the trie from this single root. + + use super::*; + + #[test] + fn lmdb_reads_from_n_leaf_partial_trie_had_expected_results() { + for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let test_leaves = TEST_LEAVES; + let (used, unused) = test_leaves.split_at(num_leaves); + + check_leaves::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + &root_hash, + used, + unused, + ) + .unwrap(); + } + } +} + +mod full_tries { + //! Here we construct a series of 6 "full" tries, increasing in size + //! from 0 to 5 leaves. Each trie contains the history from preceding + //! tries in this series, and past history can be read from the roots of + //! each preceding trie. The tests check that we can read only the + //! expected set of leaves from the trie at the current root and all past + //! roots. + + use super::*; + + #[test] + fn lmdb_reads_from_n_leaf_full_trie_had_expected_results() { + let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); + let mut states: Vec = Vec::new(); + + for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + context.update(&tries).unwrap(); + states.push(root_hash); + + for (num_leaves, state) in states[..state_index].iter().enumerate() { + let test_leaves = TEST_LEAVES; + let (used, unused) = test_leaves.split_at(num_leaves); + check_leaves::<_, _, _, _, error::Error>( + &context.environment, + &context.store, + state, + used, + unused, + ) + .unwrap(); + } + } + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/scan.rs b/storage/src/global_state/trie_store/operations/tests/scan.rs new file mode 100644 index 0000000000..fe8fcf943f --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/scan.rs @@ -0,0 +1,126 @@ +use casper_types::Digest; +use convert::TryInto; + +use super::*; +use crate::global_state::{ + error, + trie::LazilyDeserializedTrie, + trie_store::operations::{scan_raw, store_wrappers, TrieScanRaw}, +}; + +fn check_scan<'a, R, S, E>( + environment: &'a R, + store: &S, + root_hash: &Digest, + key: &[u8], +) -> Result<(), E> +where + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From + std::fmt::Debug, + E: From + From + From, +{ + let txn: R::ReadTransaction = environment.create_read_txn()?; + let root = store + .get(&txn, root_hash)? + .expect("check_scan received an invalid root hash"); + let root_bytes = root.to_bytes()?; + let store = store_wrappers::NonDeserializingStore::new(store); + let TrieScanRaw { mut tip, parents } = scan_raw::( + &txn, + &store, + key, + root_bytes.into(), + )?; + + for (index, parent) in parents.into_iter().rev() { + let expected_tip_hash = { + match tip { + LazilyDeserializedTrie::Leaf(leaf_bytes) => Digest::hash(leaf_bytes.bytes()), + node @ LazilyDeserializedTrie::Node { .. } + | node @ LazilyDeserializedTrie::Extension { .. } => { + let tip_bytes = TryInto::>::try_into(node)? + .to_bytes() + .unwrap(); + Digest::hash(&tip_bytes) + } + } + }; + match parent { + Trie::Leaf { .. } => panic!("parents should not contain any leaves"), + Trie::Node { pointer_block } => { + let pointer_tip_hash = pointer_block[::from(index)].map(|ptr| *ptr.hash()); + assert_eq!(Some(expected_tip_hash), pointer_tip_hash); + tip = LazilyDeserializedTrie::Node { pointer_block }; + } + Trie::Extension { affix, pointer } => { + let pointer_tip_hash = pointer.hash().to_owned(); + assert_eq!(expected_tip_hash, pointer_tip_hash); + tip = LazilyDeserializedTrie::Extension { affix, pointer }; + } + } + } + + assert!( + matches!( + tip, + LazilyDeserializedTrie::Node { .. } | LazilyDeserializedTrie::Extension { .. }, + ), + "Unexpected leaf found" + ); + assert_eq!(root, tip.try_into()?); + txn.commit()?; + Ok(()) +} + +mod partial_tries { + use super::*; + + #[test] + fn lmdb_scans_from_n_leaf_partial_trie_had_expected_results() { + for generator in &TEST_TRIE_GENERATORS { + let (root_hash, tries) = generator().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + + for leaf in TEST_LEAVES.iter() { + let leaf_bytes = leaf.to_bytes().unwrap(); + check_scan::<_, _, error::Error>( + &context.environment, + &context.store, + &root_hash, + &leaf_bytes, + ) + .unwrap() + } + } + } +} + +mod full_tries { + use super::*; + + #[test] + fn lmdb_scans_from_n_leaf_full_trie_had_expected_results() { + let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); + let mut states: Vec = Vec::new(); + + for (state_index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + context.update(&tries).unwrap(); + states.push(root_hash); + + for state in &states[..state_index] { + for leaf in TEST_LEAVES.iter() { + let leaf_bytes = leaf.to_bytes().unwrap(); + check_scan::<_, _, error::Error>( + &context.environment, + &context.store, + state, + &leaf_bytes, + ) + .unwrap() + } + } + } + } +} diff --git a/storage/src/global_state/trie_store/operations/tests/synchronize.rs b/storage/src/global_state/trie_store/operations/tests/synchronize.rs new file mode 100644 index 0000000000..e21efdeb6c --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/synchronize.rs @@ -0,0 +1,228 @@ +use std::{borrow::Cow, collections::HashSet}; + +use num_traits::FromPrimitive; + +use casper_types::{ + bytesrepr::{self, FromBytes, ToBytes}, + global_state::Pointer, + Digest, +}; + +use crate::global_state::{ + error, + transaction_source::{Readable, Transaction, TransactionSource}, + trie::{Trie, TrieTag}, + trie_store::{ + operations::{ + self, + tests::{LmdbTestContext, TestKey, TestValue}, + ReadResult, + }, + TrieStore, + }, +}; + +/// Given a root hash, find any trie keys that are descendant from it that are referenced but not +/// present in the database. +// TODO: We only need to check one trie key at a time +fn missing_trie_keys( + txn: &T, + store: &S, + mut trie_keys_to_visit: Vec, + known_complete: &HashSet, +) -> Result, E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug, + V: ToBytes + FromBytes + std::fmt::Debug, + T: Readable, + S: TrieStore, + S::Error: From, + E: From + From, +{ + let mut missing_descendants = Vec::new(); + let mut visited = HashSet::new(); + while let Some(trie_key) = trie_keys_to_visit.pop() { + if !visited.insert(trie_key) { + continue; + } + + if known_complete.contains(&trie_key) { + // Skip because we know there are no missing descendants. + continue; + } + + let retrieved_trie_bytes = match store.get_raw(txn, &trie_key)? { + Some(bytes) => bytes, + None => { + // No entry under this trie key. + missing_descendants.push(trie_key); + continue; + } + }; + + // Optimization: Don't deserialize leaves as they have no descendants. + if let Some(TrieTag::Leaf) = retrieved_trie_bytes + .first() + .copied() + .and_then(TrieTag::from_u8) + { + continue; + } + + // Parse the trie, handling errors gracefully. + let retrieved_trie = match bytesrepr::deserialize_from_slice(retrieved_trie_bytes) { + Ok(retrieved_trie) => retrieved_trie, + // Couldn't parse; treat as missing and continue. + Err(err) => { + tracing::error!(?err, "unable to parse trie"); + missing_descendants.push(trie_key); + continue; + } + }; + + match retrieved_trie { + // Should be unreachable due to checking the first byte as a shortcut above. + Trie::::Leaf { .. } => { + tracing::error!( + "did not expect to see a trie leaf in `missing_trie_keys` after shortcut" + ); + } + // If we hit a pointer block, queue up all of the nodes it points to + Trie::Node { pointer_block } => { + for (_, pointer) in pointer_block.as_indexed_pointers() { + match pointer { + Pointer::LeafPointer(descendant_leaf_trie_key) => { + trie_keys_to_visit.push(descendant_leaf_trie_key) + } + Pointer::NodePointer(descendant_node_trie_key) => { + trie_keys_to_visit.push(descendant_node_trie_key) + } + } + } + } + // If we hit an extension block, add its pointer to the queue + Trie::Extension { pointer, .. } => trie_keys_to_visit.push(pointer.into_hash()), + } + } + Ok(missing_descendants) +} + +fn copy_state<'a, K, V, R, S, E>( + source_environment: &'a R, + source_store: &S, + target_environment: &'a R, + target_store: &S, + root: &Digest, +) -> Result<(), E> +where + K: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy + Clone + Ord, + V: ToBytes + FromBytes + Eq + std::fmt::Debug + Copy, + R: TransactionSource<'a, Handle = S::Handle>, + S: TrieStore, + S::Error: From, + E: From + From + From, +{ + // Make sure no missing nodes in source + { + let txn: R::ReadTransaction = source_environment.create_read_txn()?; + let missing_from_source = missing_trie_keys::<_, _, _, _, E>( + &txn, + source_store, + vec![root.to_owned()], + &Default::default(), + )?; + assert_eq!(missing_from_source, Vec::new()); + txn.commit()?; + } + + // Copy source to target + { + let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; + let mut target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?; + // Copy source to destination + let mut queue = vec![root.to_owned()]; + while let Some(trie_key) = queue.pop() { + let trie_bytes_to_insert = source_store + .get_raw(&source_txn, &trie_key)? + .expect("should have trie"); + target_store.put_raw( + &mut target_txn, + &trie_key, + Cow::from(&*trie_bytes_to_insert), + )?; + + // Now that we've added in `trie_to_insert`, queue up its children + let new_keys = missing_trie_keys::<_, _, _, _, E>( + &target_txn, + target_store, + vec![trie_key], + &Default::default(), + )?; + + queue.extend(new_keys); + } + source_txn.commit()?; + target_txn.commit()?; + } + + // After the copying process above there should be no missing entries in the target + { + let target_txn: R::ReadWriteTransaction = target_environment.create_read_write_txn()?; + let missing_from_target = missing_trie_keys::<_, _, _, _, E>( + &target_txn, + target_store, + vec![root.to_owned()], + &Default::default(), + )?; + assert_eq!(missing_from_target, Vec::new()); + target_txn.commit()?; + } + + // Make sure all of the target keys under the root hash are in the source + { + let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; + let target_txn: R::ReadTransaction = target_environment.create_read_txn()?; + let target_keys = operations::keys::<_, _, _, _>(&target_txn, target_store, root) + .collect::, S::Error>>()?; + for key in target_keys { + let maybe_value: ReadResult = + operations::read::<_, _, _, _, E>(&source_txn, source_store, root, &key)?; + assert!(maybe_value.is_found()) + } + source_txn.commit()?; + target_txn.commit()?; + } + + // Make sure all of the target keys under the root hash are in the source + { + let source_txn: R::ReadTransaction = source_environment.create_read_txn()?; + let target_txn: R::ReadTransaction = target_environment.create_read_txn()?; + let source_keys = operations::keys::<_, _, _, _>(&source_txn, source_store, root) + .collect::, S::Error>>()?; + for key in source_keys { + let maybe_value: ReadResult = + operations::read::<_, _, _, _, E>(&target_txn, target_store, root, &key)?; + assert!(maybe_value.is_found()) + } + source_txn.commit()?; + target_txn.commit()?; + } + + Ok(()) +} + +#[test] +fn lmdb_copy_state() { + let (root_hash, tries) = super::create_6_leaf_trie().unwrap(); + let source = LmdbTestContext::new(&tries).unwrap(); + let target = LmdbTestContext::new::(&[]).unwrap(); + + copy_state::( + &source.environment, + &source.store, + &target.environment, + &target.store, + &root_hash, + ) + .unwrap(); +} diff --git a/storage/src/global_state/trie_store/operations/tests/write.rs b/storage/src/global_state/trie_store/operations/tests/write.rs new file mode 100644 index 0000000000..5ccf917ed4 --- /dev/null +++ b/storage/src/global_state/trie_store/operations/tests/write.rs @@ -0,0 +1,700 @@ +use super::*; + +mod empty_tries { + use super::*; + + #[test] + fn lmdb_non_colliding_writes_to_n_leaf_empty_trie_had_expected_results() { + for num_leaves in 1..=TEST_LEAVES_LENGTH { + let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let initial_states = vec![root_hash]; + + writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &initial_states, + &TEST_LEAVES_NON_COLLIDING[..num_leaves], + ) + .unwrap(); + } + } + + #[test] + fn lmdb_writes_to_n_leaf_empty_trie_had_expected_results() { + for num_leaves in 1..=TEST_LEAVES_LENGTH { + let (root_hash, tries) = TEST_TRIE_GENERATORS[0]().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let initial_states = vec![root_hash]; + + writes_to_n_leaf_empty_trie_had_expected_results::<_, _, _, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &initial_states, + &TEST_LEAVES[..num_leaves], + ) + .unwrap(); + } + } +} + +mod partial_tries { + use super::*; + + fn noop_writes_to_n_leaf_partial_trie_had_expected_results<'a, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + writable_store: &WS, + states: &[Digest], + num_leaves: usize, + ) -> Result<(), E> + where + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + // Check that the expected set of leaves is in the trie + check_leaves::<_, _, _, _, E>( + environment, + store, + &states[0], + &TEST_LEAVES[..num_leaves], + &[], + )?; + + // Rewrite that set of leaves + let write_results = write_leaves::( + write_environment, + writable_store, + &states[0], + &TEST_LEAVES[..num_leaves], + )?; + + assert!(write_results + .iter() + .all(|result| *result == WriteResult::AlreadyExists)); + + // Check that the expected set of leaves is in the trie + check_leaves::<_, _, _, _, E>( + environment, + store, + &states[0], + &TEST_LEAVES[..num_leaves], + &[], + ) + } + + #[test] + fn lmdb_noop_writes_to_n_leaf_partial_trie_had_expected_results() { + for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let states = vec![root_hash]; + + noop_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &states, + num_leaves, + ) + .unwrap(); + } + } + + fn update_writes_to_n_leaf_partial_trie_had_expected_results<'a, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + writable_store: &WS, + states: &[Digest], + num_leaves: usize, + ) -> Result<(), E> + where + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + let mut states = states.to_owned(); + + // Check that the expected set of leaves is in the trie + check_leaves::<_, _, _, _, E>( + environment, + store, + &states[0], + &TEST_LEAVES[..num_leaves], + &[], + )?; + + // Update and check leaves + for (n, leaf) in TEST_LEAVES_UPDATED[..num_leaves].iter().enumerate() { + let expected_leaves: Vec = { + let n = n + 1; + TEST_LEAVES_UPDATED[..n] + .iter() + .chain(&TEST_LEAVES[n..num_leaves]) + .map(ToOwned::to_owned) + .collect() + }; + + let root_hash = { + let current_root = states.last().unwrap(); + let results = write_leaves::<_, _, _, _, E>( + write_environment, + writable_store, + current_root, + &[leaf.to_owned()], + )?; + assert_eq!(1, results.len()); + match results[0] { + WriteResult::Written(root_hash) => root_hash, + _ => panic!("value not written"), + } + }; + + states.push(root_hash); + + // Check that the expected set of leaves is in the trie + check_leaves::<_, _, _, _, E>( + environment, + store, + states.last().unwrap(), + &expected_leaves, + &[], + )?; + } + + Ok(()) + } + + #[test] + fn lmdb_update_writes_to_n_leaf_partial_trie_had_expected_results() { + for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let initial_states = vec![root_hash]; + + update_writes_to_n_leaf_partial_trie_had_expected_results::<_, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &initial_states, + num_leaves, + ) + .unwrap() + } + } +} + +mod full_tries { + use super::*; + + fn noop_writes_to_n_leaf_full_trie_had_expected_results<'a, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + write_store: &WS, + states: &[Digest], + index: usize, + ) -> Result<(), E> + where + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + // Check that the expected set of leaves is in the trie at every state reference + for (num_leaves, state) in states[..index].iter().enumerate() { + check_leaves::<_, _, _, _, E>( + environment, + store, + state, + &TEST_LEAVES[..num_leaves], + &[], + )?; + } + + // Rewrite that set of leaves + let write_results = write_leaves::<_, _, _, _, E>( + write_environment, + write_store, + states.last().unwrap(), + &TEST_LEAVES[..index], + )?; + + assert!(write_results + .iter() + .all(|result| *result == WriteResult::AlreadyExists)); + + // Check that the expected set of leaves is in the trie at every state reference + for (num_leaves, state) in states[..index].iter().enumerate() { + check_leaves::<_, _, _, _, E>( + environment, + store, + state, + &TEST_LEAVES[..num_leaves], + &[], + )? + } + + Ok(()) + } + + #[test] + fn lmdb_noop_writes_to_n_leaf_full_trie_had_expected_results() { + let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); + let mut states: Vec = Vec::new(); + + for (index, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + context.update(&tries).unwrap(); + states.push(root_hash); + + noop_writes_to_n_leaf_full_trie_had_expected_results::<_, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &states, + index, + ) + .unwrap(); + } + } + + fn update_writes_to_n_leaf_full_trie_had_expected_results<'a, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + write_store: &WS, + states: &[Digest], + num_leaves: usize, + ) -> Result<(), E> + where + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + let mut states = states.to_vec(); + + // Check that the expected set of leaves is in the trie at every state reference + for (state_index, state) in states.iter().enumerate() { + check_leaves::<_, _, _, _, E>( + environment, + store, + state, + &TEST_LEAVES[..state_index], + &[], + )?; + } + + // Write set of leaves to the trie + let hashes = write_leaves::<_, _, _, _, E>( + write_environment, + write_store, + states.last().unwrap(), + &TEST_LEAVES_UPDATED[..num_leaves], + )? + .iter() + .map(|result| match result { + WriteResult::Written(root_hash) => *root_hash, + _ => panic!("write_leaves resulted in non-write"), + }) + .collect::>(); + + states.extend(hashes); + + let expected: Vec> = { + let mut ret = vec![vec![]]; + if num_leaves > 0 { + for i in 1..=num_leaves { + ret.push(TEST_LEAVES[..i].to_vec()) + } + for i in 1..=num_leaves { + ret.push( + TEST_LEAVES[i..num_leaves] + .iter() + .chain(&TEST_LEAVES_UPDATED[..i]) + .map(ToOwned::to_owned) + .collect::>(), + ) + } + } + ret + }; + + assert_eq!(states.len(), expected.len()); + + // Check that the expected set of leaves is in the trie at every state reference + for (state_index, state) in states.iter().enumerate() { + check_leaves::<_, _, _, _, E>(environment, store, state, &expected[state_index], &[])?; + } + + Ok(()) + } + + #[test] + fn lmdb_update_writes_to_n_leaf_full_trie_had_expected_results() { + let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); + let mut states: Vec = Vec::new(); + + for (num_leaves, generator) in TEST_TRIE_GENERATORS.iter().enumerate() { + let (root_hash, tries) = generator().unwrap(); + context.update(&tries).unwrap(); + states.push(root_hash); + + update_writes_to_n_leaf_full_trie_had_expected_results::<_, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &states, + num_leaves, + ) + .unwrap() + } + } + + fn node_writes_to_5_leaf_full_trie_had_expected_results<'a, R, WR, S, WS, E>( + environment: &'a R, + write_environment: &'a WR, + store: &S, + write_store: &WS, + states: &[Digest], + ) -> Result<(), E> + where + R: TransactionSource<'a, Handle = S::Handle>, + WR: TransactionSource<'a, Handle = WS::Handle>, + S: TrieStore, + WS: TrieStore>, + S::Error: From, + WS::Error: From, + E: From + + From + + From + + From + + From, + { + let mut states = states.to_vec(); + let num_leaves = TEST_LEAVES_LENGTH; + + // Check that the expected set of leaves is in the trie at every state reference + for (state_index, state) in states.iter().enumerate() { + check_leaves::<_, _, _, _, E>( + environment, + store, + state, + &TEST_LEAVES[..state_index], + &[], + )?; + } + + // Write set of leaves to the trie + let hashes = write_leaves::<_, _, _, _, E>( + write_environment, + write_store, + states.last().unwrap(), + &TEST_LEAVES_ADJACENTS, + )? + .iter() + .map(|result| match result { + WriteResult::Written(root_hash) => *root_hash, + _ => panic!("write_leaves resulted in non-write"), + }) + .collect::>(); + + states.extend(hashes); + + let expected: Vec> = { + let mut ret = vec![vec![]]; + if num_leaves > 0 { + for i in 1..=num_leaves { + ret.push(TEST_LEAVES[..i].to_vec()) + } + for i in 1..=num_leaves { + ret.push( + TEST_LEAVES + .iter() + .chain(&TEST_LEAVES_ADJACENTS[..i]) + .map(ToOwned::to_owned) + .collect::>(), + ) + } + } + ret + }; + + assert_eq!(states.len(), expected.len()); + + // Check that the expected set of leaves is in the trie at every state reference + for (state_index, state) in states.iter().enumerate() { + check_leaves::<_, _, _, _, E>(environment, store, state, &expected[state_index], &[])?; + } + Ok(()) + } + + #[test] + fn lmdb_node_writes_to_5_leaf_full_trie_had_expected_results() { + let context = LmdbTestContext::new(EMPTY_HASHED_TEST_TRIES).unwrap(); + let mut states: Vec = Vec::new(); + + for generator in &TEST_TRIE_GENERATORS { + let (root_hash, tries) = generator().unwrap(); + context.update(&tries).unwrap(); + states.push(root_hash); + } + + node_writes_to_5_leaf_full_trie_had_expected_results::<_, _, _, _, error::Error>( + &context.environment, + &context.environment, + &context.store, + &context.store, + &states, + ) + .unwrap() + } +} + +mod variable_sized_keys { + use super::*; + + fn assert_write_result(result: WriteResult) -> Option { + match result { + WriteResult::Written(root_hash) => Some(root_hash), + WriteResult::AlreadyExists => None, + WriteResult::RootNotFound => panic!("Root not found while attempting write"), + } + } + + #[test] + fn write_variable_len_keys() { + let (root_hash, tries) = create_empty_trie::().unwrap(); + + let context = LmdbTestContext::new(&tries).unwrap(); + let mut txn = context.environment.create_read_write_txn().unwrap(); + + let test_key_1 = + MultiVariantTestKey::VariableSizedKey(VariableAddr::LegacyAddr(*b"caab6ff")); + let root_hash = assert_write_result( + write::( + &mut txn, + &context.store, + &root_hash, + &test_key_1, + &1u32, + ) + .unwrap(), + ) + .expect("Expected new root hash after write"); + + let test_key_2 = + MultiVariantTestKey::VariableSizedKey(VariableAddr::LegacyAddr(*b"caabb74")); + let root_hash = assert_write_result( + write::( + &mut txn, + &context.store, + &root_hash, + &test_key_2, + &2u32, + ) + .unwrap(), + ) + .expect("Expected new root hash after write"); + + let test_key_3 = MultiVariantTestKey::VariableSizedKey(VariableAddr::Empty); + let _ = assert_write_result( + write::( + &mut txn, + &context.store, + &root_hash, + &test_key_3, + &3u32, + ) + .unwrap(), + ) + .expect("Expected new root hash after write"); + } +} + +mod batch_write_with_random_keys { + use crate::global_state::trie_store::cache::TrieCache; + + use super::*; + + use casper_types::{testing::TestRng, Key}; + use rand::Rng; + + #[test] + fn compare_random_keys_seq_write_with_batch_cache_write() { + let mut rng = TestRng::new(); + + for _ in 0..100 { + let (mut seq_write_root_hash, tries) = create_empty_trie::().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let mut txn = context.environment.create_read_write_txn().unwrap(); + + // Create some random keys and values. + let data: Vec<(Key, u32)> = (0u32..4000).map(|val| (rng.gen(), val)).collect(); + + // Write all the keys sequentially to the store + for (key, value) in data.iter() { + let write_result = write::( + &mut txn, + &context.store, + &seq_write_root_hash, + key, + value, + ) + .unwrap(); + match write_result { + WriteResult::Written(hash) => { + seq_write_root_hash = hash; // Update the state root hash; we'll use it to + // compare with the cache root hash. + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), + }; + } + + // Create an empty store that backs up the cache. + let (cache_root_hash, tries) = create_empty_trie::().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let mut txn = context.environment.create_read_write_txn().unwrap(); + + let mut trie_cache = TrieCache::::new::<_, error::Error>( + &txn, + &context.store, + &cache_root_hash, + ) + .unwrap(); + for (key, value) in data.iter() { + trie_cache + .insert::<_, error::Error>(*key, *value, &txn) + .unwrap(); + } + + let cache_root_hash = trie_cache.store_cache::<_, error::Error>(&mut txn).unwrap(); + + if seq_write_root_hash != cache_root_hash { + println!("Root Hash is: {:?}", seq_write_root_hash); + println!("Cache root Hash is: {:?}", cache_root_hash); + println!("Faulty keys: "); + + for (key, _) in data.iter() { + println!("{}", key.to_formatted_string()); + } + panic!("ROOT hash mismatch"); + } + } + } + + #[test] + fn compare_random_keys_write_with_cache_and_readback() { + let mut rng = TestRng::new(); + + // create a store + let (mut root_hash, tries) = create_empty_trie::().unwrap(); + let context = LmdbTestContext::new(&tries).unwrap(); + let mut txn = context.environment.create_read_write_txn().unwrap(); + + // Create initial keys and values. + let initial_keys: Vec<(Key, u32)> = (0u32..1000).map(|val| (rng.gen(), val)).collect(); + + // Store these keys and values using sequential write; + for (key, value) in initial_keys.iter() { + let write_result = write::( + &mut txn, + &context.store, + &root_hash, + key, + value, + ) + .unwrap(); + match write_result { + WriteResult::Written(hash) => { + root_hash = hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), + }; + } + + // Create some test data. + let data: Vec<(Key, u32)> = (0u32..1000).map(|val| (rng.gen(), val)).collect(); + + // Create a cache backed up by the store that has the initial data. + let mut trie_cache = + TrieCache::::new::<_, error::Error>(&txn, &context.store, &root_hash) + .unwrap(); + + // Insert the test data into the cache. + for (key, value) in data.iter() { + trie_cache + .insert::<_, error::Error>(*key, *value, &txn) + .unwrap(); + } + + // Get the generated root hash + let cache_root_hash = trie_cache.calculate_root_hash(); + + // now write the same keys to the store one by one and check if we get the same root hash. + let mut seq_write_root_hash = root_hash; + for (key, value) in data.iter() { + let write_result = write::( + &mut txn, + &context.store, + &seq_write_root_hash, + key, + value, + ) + .unwrap(); + match write_result { + WriteResult::Written(hash) => { + seq_write_root_hash = hash; + } + WriteResult::AlreadyExists => (), + WriteResult::RootNotFound => panic!("write_leaves given an invalid root"), + }; + } + + assert_eq!(cache_root_hash, seq_write_root_hash); + } +} diff --git a/storage/src/global_state/trie_store/tests/concurrent.rs b/storage/src/global_state/trie_store/tests/concurrent.rs new file mode 100644 index 0000000000..64b48f8a1a --- /dev/null +++ b/storage/src/global_state/trie_store/tests/concurrent.rs @@ -0,0 +1,70 @@ +use std::{ + sync::{Arc, Barrier}, + thread, +}; + +use casper_types::bytesrepr::Bytes; +use tempfile::tempdir; + +use super::TestData; +use crate::global_state::{ + store::Store, + transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource}, + trie::Trie, + trie_store::lmdb::LmdbTrieStore, + DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, +}; + +#[test] +fn lmdb_writer_mutex_does_not_collide_with_readers() { + let dir = tempdir().unwrap(); + let env = Arc::new( + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(), + ); + let store = Arc::new(LmdbTrieStore::new(&env, None, Default::default()).unwrap()); + let num_threads = 10; + let barrier = Arc::new(Barrier::new(num_threads + 1)); + let mut handles = Vec::new(); + let TestData(ref leaf_1_hash, ref leaf_1) = &super::create_data()[0..1][0]; + + for _ in 0..num_threads { + let reader_env = env.clone(); + let reader_store = store.clone(); + let reader_barrier = barrier.clone(); + let leaf_1_hash = *leaf_1_hash; + #[allow(clippy::clone_on_copy)] + let leaf_1 = leaf_1.clone(); + + handles.push(thread::spawn(move || { + { + let txn = reader_env.create_read_txn().unwrap(); + let result: Option> = + reader_store.get(&txn, &leaf_1_hash).unwrap(); + assert_eq!(result, None); + txn.commit().unwrap(); + } + // wait for other reader threads to read and the main thread to + // take a read-write transaction + reader_barrier.wait(); + // wait for main thread to put and commit + reader_barrier.wait(); + { + let txn = reader_env.create_read_txn().unwrap(); + let result: Option> = + reader_store.get(&txn, &leaf_1_hash).unwrap(); + txn.commit().unwrap(); + result.unwrap() == leaf_1 + } + })); + } + + let mut txn = env.create_read_write_txn().unwrap(); + // wait for reader threads to read + barrier.wait(); + store.put(&mut txn, leaf_1_hash, leaf_1).unwrap(); + txn.commit().unwrap(); + // sync with reader threads + barrier.wait(); + + assert!(handles.into_iter().all(|b| b.join().unwrap())) +} diff --git a/storage/src/global_state/trie_store/tests/mod.rs b/storage/src/global_state/trie_store/tests/mod.rs new file mode 100644 index 0000000000..ef205090a7 --- /dev/null +++ b/storage/src/global_state/trie_store/tests/mod.rs @@ -0,0 +1,76 @@ +mod concurrent; +mod proptests; +mod simple; + +use casper_types::{ + bytesrepr::{Bytes, ToBytes}, + global_state::Pointer, + Digest, +}; + +use crate::global_state::trie::{PointerBlock, Trie}; + +#[derive(Clone)] +struct TestData(Digest, Trie); + +impl<'a, K, V> From<&'a TestData> for (&'a Digest, &'a Trie) { + fn from(test_data: &'a TestData) -> Self { + (&test_data.0, &test_data.1) + } +} + +fn create_data() -> Vec> { + let leaf_1 = Trie::Leaf { + key: Bytes::from(vec![0u8, 0, 0]), + value: Bytes::from(b"val_1".to_vec()), + }; + let leaf_2 = Trie::Leaf { + key: Bytes::from(vec![1u8, 0, 0]), + value: Bytes::from(b"val_2".to_vec()), + }; + let leaf_3 = Trie::Leaf { + key: Bytes::from(vec![1u8, 0, 1]), + value: Bytes::from(b"val_3".to_vec()), + }; + + let leaf_1_hash = Digest::hash(leaf_1.to_bytes().unwrap()); + let leaf_2_hash = Digest::hash(leaf_2.to_bytes().unwrap()); + let leaf_3_hash = Digest::hash(leaf_3.to_bytes().unwrap()); + + let node_2: Trie = { + let mut pointer_block = PointerBlock::new(); + pointer_block[0] = Some(Pointer::LeafPointer(leaf_2_hash)); + pointer_block[1] = Some(Pointer::LeafPointer(leaf_3_hash)); + let pointer_block = Box::new(pointer_block); + Trie::Node { pointer_block } + }; + + let node_2_hash = Digest::hash(node_2.to_bytes().unwrap()); + + let ext_node: Trie = { + let affix = vec![1u8, 0]; + let pointer = Pointer::NodePointer(node_2_hash); + Trie::extension(affix, pointer) + }; + + let ext_node_hash = Digest::hash(ext_node.to_bytes().unwrap()); + + let node_1: Trie = { + let mut pointer_block = PointerBlock::new(); + pointer_block[0] = Some(Pointer::LeafPointer(leaf_1_hash)); + pointer_block[1] = Some(Pointer::NodePointer(ext_node_hash)); + let pointer_block = Box::new(pointer_block); + Trie::Node { pointer_block } + }; + + let node_1_hash = Digest::hash(node_1.to_bytes().unwrap()); + + vec![ + TestData(leaf_1_hash, leaf_1), + TestData(leaf_2_hash, leaf_2), + TestData(leaf_3_hash, leaf_3), + TestData(node_1_hash, node_1), + TestData(node_2_hash, node_2), + TestData(ext_node_hash, ext_node), + ] +} diff --git a/storage/src/global_state/trie_store/tests/proptests.rs b/storage/src/global_state/trie_store/tests/proptests.rs new file mode 100644 index 0000000000..804f3fddbc --- /dev/null +++ b/storage/src/global_state/trie_store/tests/proptests.rs @@ -0,0 +1,71 @@ +use std::{collections::BTreeMap, ops::RangeInclusive}; + +use lmdb::DatabaseFlags; +use proptest::{collection::vec, prelude::proptest}; +use tempfile::tempdir; + +use casper_types::{bytesrepr::ToBytes, Digest, Key, StoredValue}; + +use crate::global_state::{ + store::tests as store_tests, + trie::{ + gens::{trie_extension_arb, trie_leaf_arb, trie_node_arb}, + Trie, + }, + DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, +}; + +const DEFAULT_MIN_LENGTH: usize = 1; +const DEFAULT_MAX_LENGTH: usize = 4; + +fn get_range() -> RangeInclusive { + let start = option_env!("CL_TRIE_STORE_TEST_VECTOR_MIN_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MIN_LENGTH); + let end = option_env!("CL_TRIE_STORE_TEST_VECTOR_MAX_LENGTH") + .and_then(|s| str::parse::(s).ok()) + .unwrap_or(DEFAULT_MAX_LENGTH); + RangeInclusive::new(start, end) +} + +fn lmdb_roundtrip_succeeds(inputs: Vec>) -> bool { + use crate::global_state::{ + transaction_source::lmdb::LmdbEnvironment, trie_store::lmdb::LmdbTrieStore, + }; + + let tmp_dir = tempdir().unwrap(); + let env = LmdbEnvironment::new( + tmp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + ) + .unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + + let inputs: BTreeMap> = inputs + .into_iter() + .map(|trie| (Digest::hash(trie.to_bytes().unwrap()), trie)) + .collect(); + + let ret = store_tests::roundtrip_succeeds(&env, &store, inputs).unwrap(); + tmp_dir.close().unwrap(); + ret +} + +proptest! { + #[test] + fn prop_lmdb_roundtrip_succeeds_leaf(v in vec(trie_leaf_arb(), get_range())) { + assert!(lmdb_roundtrip_succeeds(v)) + } + + #[test] + fn prop_lmdb_roundtrip_succeeds_node(v in vec(trie_node_arb(), get_range())) { + assert!(lmdb_roundtrip_succeeds(v)) + } + + #[test] + fn prop_lmdb_roundtrip_succeeds_extension(v in vec(trie_extension_arb(), get_range())) { + assert!(lmdb_roundtrip_succeeds(v)) + } +} diff --git a/storage/src/global_state/trie_store/tests/simple.rs b/storage/src/global_state/trie_store/tests/simple.rs new file mode 100644 index 0000000000..b51a2b473e --- /dev/null +++ b/storage/src/global_state/trie_store/tests/simple.rs @@ -0,0 +1,452 @@ +use lmdb::DatabaseFlags; +use tempfile::tempdir; + +use casper_types::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +use super::TestData; +use crate::global_state::{ + error, + store::StoreExt, + transaction_source::{lmdb::LmdbEnvironment, Transaction, TransactionSource}, + trie::Trie, + trie_store::{lmdb::LmdbTrieStore, TrieStore}, + DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, +}; + +fn put_succeeds<'a, K, V, S, X, E>( + store: &S, + transaction_source: &'a X, + items: &[TestData], +) -> Result<(), E> +where + K: ToBytes, + V: ToBytes, + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From, +{ + let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; + let items = items.iter().map(Into::into); + store.put_many(&mut txn, items)?; + txn.commit()?; + Ok(()) +} + +#[test] +fn lmdb_put_succeeds() { + let tmp_dir = tempdir().unwrap(); + let env = LmdbEnvironment::new( + tmp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + ) + .unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + let data = &super::create_data()[0..1]; + + assert!(put_succeeds::<_, _, _, _, error::Error>(&store, &env, data).is_ok()); + + tmp_dir.close().unwrap(); +} + +fn put_get_succeeds<'a, K, V, S, X, E>( + store: &S, + transaction_source: &'a X, + items: &[TestData], +) -> Result>>, E> +where + K: ToBytes + FromBytes, + V: ToBytes + FromBytes, + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From, +{ + let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; + let items = items.iter().map(Into::into); + store.put_many(&mut txn, items.clone())?; + let keys = items.map(|(k, _)| k); + let ret = store.get_many(&txn, keys)?; + txn.commit()?; + Ok(ret) +} + +#[test] +fn lmdb_put_get_succeeds() { + let tmp_dir = tempdir().unwrap(); + let env = LmdbEnvironment::new( + tmp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + ) + .unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + let data = &super::create_data()[0..1]; + + let expected: Vec> = data.iter().cloned().map(|TestData(_, v)| v).collect(); + + assert_eq!( + expected, + put_get_succeeds::<_, _, _, _, error::Error>(&store, &env, data) + .expect("put_get_succeeds failed") + .into_iter() + .collect::>>>() + .expect("one of the outputs was empty") + ); + + tmp_dir.close().unwrap(); +} + +#[test] +fn lmdb_put_get_many_succeeds() { + let tmp_dir = tempdir().unwrap(); + let env = LmdbEnvironment::new( + tmp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + ) + .unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + let data = super::create_data(); + + let expected: Vec> = data.iter().cloned().map(|TestData(_, v)| v).collect(); + + assert_eq!( + expected, + put_get_succeeds::<_, _, _, _, error::Error>(&store, &env, &data) + .expect("put_get failed") + .into_iter() + .collect::>>>() + .expect("one of the outputs was empty") + ); + + tmp_dir.close().unwrap(); +} + +fn uncommitted_read_write_txn_does_not_persist<'a, K, V, S, X, E>( + store: &S, + transaction_source: &'a X, + items: &[TestData], +) -> Result>>, E> +where + K: ToBytes + FromBytes, + V: ToBytes + FromBytes, + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From, +{ + { + let mut txn: X::ReadWriteTransaction = transaction_source.create_read_write_txn()?; + let items = items.iter().map(Into::into); + store.put_many(&mut txn, items)?; + } + { + let txn: X::ReadTransaction = transaction_source.create_read_txn()?; + let keys = items.iter().map(|TestData(k, _)| k); + let ret = store.get_many(&txn, keys)?; + txn.commit()?; + Ok(ret) + } +} + +#[test] +fn lmdb_uncommitted_read_write_txn_does_not_persist() { + let tmp_dir = tempdir().unwrap(); + let env = LmdbEnvironment::new( + tmp_dir.path(), + DEFAULT_MAX_DB_SIZE, + DEFAULT_MAX_READERS, + true, + ) + .unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + let data = super::create_data(); + + assert_eq!( + None, + uncommitted_read_write_txn_does_not_persist::<_, _, _, _, error::Error>( + &store, &env, &data, + ) + .expect("uncommitted_read_write_txn_does_not_persist failed") + .into_iter() + .collect::>>>() + ); + + tmp_dir.close().unwrap(); +} + +fn read_write_transaction_does_not_block_read_transaction<'a, X, E>( + transaction_source: &'a X, +) -> Result<(), E> +where + X: TransactionSource<'a>, + E: From, +{ + let read_write_txn = transaction_source.create_read_write_txn()?; + let read_txn = transaction_source.create_read_txn()?; + read_write_txn.commit()?; + read_txn.commit()?; + Ok(()) +} + +#[test] +fn lmdb_read_write_transaction_does_not_block_read_transaction() { + let dir = tempdir().unwrap(); + let env = + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(); + + assert!(read_write_transaction_does_not_block_read_transaction::<_, error::Error>(&env).is_ok()) +} + +fn reads_are_isolated<'a, S, X, E>(store: &S, env: &'a X) -> Result<(), E> +where + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From + From, +{ + let TestData(leaf_1_hash, leaf_1) = &super::create_data()[0..1][0]; + + { + let read_txn_1 = env.create_read_txn()?; + let result = store.get(&read_txn_1, leaf_1_hash)?; + assert_eq!(result, None); + + { + let mut write_txn = env.create_read_write_txn()?; + store.put(&mut write_txn, leaf_1_hash, leaf_1)?; + write_txn.commit()?; + } + + let result = store.get(&read_txn_1, leaf_1_hash)?; + read_txn_1.commit()?; + assert_eq!(result, None); + } + + { + let read_txn_2 = env.create_read_txn()?; + let result = store.get(&read_txn_2, leaf_1_hash)?; + read_txn_2.commit()?; + assert_eq!(result, Some(leaf_1.to_owned())); + } + + Ok(()) +} + +#[test] +fn lmdb_reads_are_isolated() { + let dir = tempdir().unwrap(); + let env = + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + + assert!(reads_are_isolated::<_, _, error::Error>(&store, &env).is_ok()) +} + +fn reads_are_isolated_2<'a, S, X, E>(store: &S, env: &'a X) -> Result<(), E> +where + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From + From, +{ + let data = super::create_data(); + let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; + let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; + + { + let mut write_txn = env.create_read_write_txn()?; + store.put(&mut write_txn, leaf_1_hash, leaf_1)?; + write_txn.commit()?; + } + + { + let read_txn_1 = env.create_read_txn()?; + { + let mut write_txn = env.create_read_write_txn()?; + store.put(&mut write_txn, leaf_2_hash, leaf_2)?; + write_txn.commit()?; + } + let result = store.get(&read_txn_1, leaf_1_hash)?; + read_txn_1.commit()?; + assert_eq!(result, Some(leaf_1.to_owned())); + } + + { + let read_txn_2 = env.create_read_txn()?; + let result = store.get(&read_txn_2, leaf_2_hash)?; + read_txn_2.commit()?; + assert_eq!(result, Some(leaf_2.to_owned())); + } + + Ok(()) +} + +#[test] +fn lmdb_reads_are_isolated_2() { + let dir = tempdir().unwrap(); + let env = + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(); + let store = LmdbTrieStore::new(&env, None, DatabaseFlags::empty()).unwrap(); + + assert!(reads_are_isolated_2::<_, _, error::Error>(&store, &env).is_ok()) +} + +fn dbs_are_isolated<'a, S, X, E>(env: &'a X, store_a: &S, store_b: &S) -> Result<(), E> +where + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From + From, +{ + let data = super::create_data(); + let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; + let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; + + { + let mut write_txn = env.create_read_write_txn()?; + store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?; + write_txn.commit()?; + } + + { + let mut write_txn = env.create_read_write_txn()?; + store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?; + write_txn.commit()?; + } + + { + let read_txn = env.create_read_txn()?; + let result = store_a.get(&read_txn, leaf_1_hash)?; + assert_eq!(result, Some(leaf_1.to_owned())); + let result = store_a.get(&read_txn, leaf_2_hash)?; + assert_eq!(result, None); + read_txn.commit()?; + } + + { + let read_txn = env.create_read_txn()?; + let result = store_b.get(&read_txn, leaf_1_hash)?; + assert_eq!(result, None); + let result = store_b.get(&read_txn, leaf_2_hash)?; + assert_eq!(result, Some(leaf_2.to_owned())); + read_txn.commit()?; + } + + Ok(()) +} + +#[test] +fn lmdb_dbs_are_isolated() { + let dir = tempdir().unwrap(); + let env = + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(); + let store_a = LmdbTrieStore::new(&env, Some("a"), DatabaseFlags::empty()).unwrap(); + let store_b = LmdbTrieStore::new(&env, Some("b"), DatabaseFlags::empty()).unwrap(); + + assert!(dbs_are_isolated::<_, _, error::Error>(&env, &store_a, &store_b).is_ok()) +} + +fn transactions_can_be_used_across_sub_databases<'a, S, X, E>( + env: &'a X, + store_a: &S, + store_b: &S, +) -> Result<(), E> +where + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From + From, +{ + let data = super::create_data(); + let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; + let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; + + { + let mut write_txn = env.create_read_write_txn()?; + store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?; + store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?; + write_txn.commit()?; + } + + { + let read_txn = env.create_read_txn()?; + let result = store_a.get(&read_txn, leaf_1_hash)?; + assert_eq!(result, Some(leaf_1.to_owned())); + let result = store_b.get(&read_txn, leaf_2_hash)?; + assert_eq!(result, Some(leaf_2.to_owned())); + read_txn.commit()?; + } + + Ok(()) +} + +#[test] +fn lmdb_transactions_can_be_used_across_sub_databases() { + let dir = tempdir().unwrap(); + let env = + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(); + let store_a = LmdbTrieStore::new(&env, Some("a"), DatabaseFlags::empty()).unwrap(); + let store_b = LmdbTrieStore::new(&env, Some("b"), DatabaseFlags::empty()).unwrap(); + + assert!( + transactions_can_be_used_across_sub_databases::<_, _, error::Error>( + &env, &store_a, &store_b, + ) + .is_ok() + ) +} + +fn uncommitted_transactions_across_sub_databases_do_not_persist<'a, S, X, E>( + env: &'a X, + store_a: &S, + store_b: &S, +) -> Result<(), E> +where + S: TrieStore, + X: TransactionSource<'a, Handle = S::Handle>, + S::Error: From, + E: From + From + From, +{ + let data = super::create_data(); + let TestData(ref leaf_1_hash, ref leaf_1) = data[0]; + let TestData(ref leaf_2_hash, ref leaf_2) = data[1]; + + { + let mut write_txn = env.create_read_write_txn()?; + store_a.put(&mut write_txn, leaf_1_hash, leaf_1)?; + store_b.put(&mut write_txn, leaf_2_hash, leaf_2)?; + } + + { + let read_txn = env.create_read_txn()?; + let result = store_a.get(&read_txn, leaf_1_hash)?; + assert_eq!(result, None); + let result = store_b.get(&read_txn, leaf_2_hash)?; + assert_eq!(result, None); + read_txn.commit()?; + } + + Ok(()) +} + +#[test] +fn lmdb_uncommitted_transactions_across_sub_databases_do_not_persist() { + let dir = tempdir().unwrap(); + let env = + LmdbEnvironment::new(dir.path(), DEFAULT_MAX_DB_SIZE, DEFAULT_MAX_READERS, true).unwrap(); + let store_a = LmdbTrieStore::new(&env, Some("a"), DatabaseFlags::empty()).unwrap(); + let store_b = LmdbTrieStore::new(&env, Some("b"), DatabaseFlags::empty()).unwrap(); + + assert!( + uncommitted_transactions_across_sub_databases_do_not_persist::<_, _, error::Error>( + &env, &store_a, &store_b, + ) + .is_ok() + ) +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs new file mode 100644 index 0000000000..db4812dc16 --- /dev/null +++ b/storage/src/lib.rs @@ -0,0 +1,33 @@ +//! Storage for a node on the Casper network. + +#![doc(html_root_url = "https://docs.rs/casper-storage/2.1.1")] +#![doc( + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png" +)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![warn(missing_docs)] + +/// Address generator logic. +pub mod address_generator; +/// Block store logic. +pub mod block_store; +/// Data access layer logic. +pub mod data_access_layer; +/// Global state logic. +pub mod global_state; +/// Storage layer logic. +pub mod system; +/// Tracking copy. +pub mod tracking_copy; + +pub use address_generator::{AddressGenerator, AddressGeneratorBuilder}; +pub use data_access_layer::KeyPrefix; +#[cfg(test)] +pub use tracking_copy::new_temporary_tracking_copy; +pub use tracking_copy::TrackingCopy; + +pub use block_store::{ + lmdb::{DbTableId, UnknownDbTableId}, + DbRawBytesSpec, +}; diff --git a/storage/src/system.rs b/storage/src/system.rs new file mode 100644 index 0000000000..5262fcff04 --- /dev/null +++ b/storage/src/system.rs @@ -0,0 +1,20 @@ +/// Auction logic. +pub mod auction; +/// Burn logic. +pub mod burn; +/// Error definition. +pub mod error; +/// Genesis logic. +pub mod genesis; +/// Handle payment logic. +pub mod handle_payment; +/// Mint logic. +pub mod mint; +/// Protocol upgrade logic. +pub mod protocol_upgrade; +/// Runtime native logic. +pub mod runtime_native; +/// Standard payment logic. +pub mod standard_payment; +/// Transfer logic. +pub mod transfer; diff --git a/storage/src/system/auction.rs b/storage/src/system/auction.rs new file mode 100644 index 0000000000..b0bd1ab99a --- /dev/null +++ b/storage/src/system/auction.rs @@ -0,0 +1,1007 @@ +mod auction_native; +/// Auction business logic. +pub mod detail; +/// System logic providers. +pub mod providers; + +use itertools::Itertools; +use num_rational::Ratio; +use std::collections::BTreeMap; +use tracing::{debug, error, warn}; + +use self::providers::{AccountProvider, MintProvider, RuntimeProvider, StorageProvider}; +use crate::system::auction::detail::{ + process_undelegation, process_updated_delegator_reservation_slots, + process_updated_delegator_stake_boundaries, process_with_vesting_schedule, read_delegator_bids, + read_validator_bid, rewards_per_validator, seigniorage_recipients, DistributeTarget, +}; +use casper_types::{ + account::AccountHash, + system::auction::{ + BidAddr, BidKind, Bridge, DelegationRate, DelegatorKind, EraInfo, EraValidators, Error, + Reservation, SeigniorageAllocation, SeigniorageRecipientsSnapshot, SeigniorageRecipientsV2, + UnbondEra, UnbondKind, ValidatorBid, ValidatorCredit, ValidatorWeights, + DELEGATION_RATE_DENOMINATOR, + }, + AccessRights, ApiError, EraId, Key, PublicKey, URef, U512, +}; + +/// Bonding auction contract interface +pub trait Auction: + StorageProvider + RuntimeProvider + MintProvider + AccountProvider + Sized +{ + /// Returns active validators and auction winners for a number of future eras determined by the + /// configured auction_delay. + fn get_era_validators(&mut self) -> Result { + let snapshot = detail::get_seigniorage_recipients_snapshot(self)?; + let era_validators = detail::era_validators_from_snapshot(snapshot); + Ok(era_validators) + } + + /// Returns validators in era_validators, mapped to their bids or founding stakes, delegation + /// rates and lists of delegators together with their delegated quantities from delegators. + /// This function is publicly accessible, but intended for system use by the Handle Payment + /// contract, because this data is necessary for distributing seigniorage. + fn read_seigniorage_recipients(&mut self) -> Result { + // `era_validators` are assumed to be computed already by calling "run_auction" entrypoint. + let era_index = detail::get_era_id(self)?; + let mut seigniorage_recipients_snapshot = + detail::get_seigniorage_recipients_snapshot(self)?; + let seigniorage_recipients = seigniorage_recipients_snapshot + .remove(&era_index) + .ok_or(Error::MissingSeigniorageRecipients)?; + Ok(seigniorage_recipients) + } + + /// This entry point adds or modifies an entry in the `Key::Bid` section of the global state and + /// creates (or tops off) a bid purse. Post genesis, any new call on this entry point causes a + /// non-founding validator in the system to exist. + /// + /// The logic works for both founding and non-founding validators, making it possible to adjust + /// their delegation rate and increase their stakes. + /// + /// A validator with its bid inactive due to slashing can activate its bid again by increasing + /// its stake. + /// + /// Validators cannot create a bid with 0 amount, and the delegation rate can't exceed + /// [`DELEGATION_RATE_DENOMINATOR`]. + /// + /// Returns a [`U512`] value indicating total amount of tokens staked for given `public_key`. + #[allow(clippy::too_many_arguments)] + fn add_bid( + &mut self, + public_key: PublicKey, + delegation_rate: DelegationRate, + amount: U512, + minimum_delegation_amount: u64, + maximum_delegation_amount: u64, + minimum_bid_amount: u64, + max_delegators_per_validator: u32, + reserved_slots: u32, + ) -> Result { + if !self.allow_auction_bids() { + // The validator set may be closed on some side chains, + // which is configured by disabling bids. + return Err(Error::AuctionBidsDisabled.into()); + } + + if amount == U512::zero() { + return Err(Error::BondTooSmall.into()); + } + + if delegation_rate > DELEGATION_RATE_DENOMINATOR { + return Err(Error::DelegationRateTooLarge.into()); + } + + if reserved_slots > max_delegators_per_validator { + return Err(Error::ExceededReservationSlotsLimit.into()); + } + + let provided_account_hash = AccountHash::from(&public_key); + + if !self.is_allowed_session_caller(&provided_account_hash) { + return Err(Error::InvalidContext.into()); + } + let validator_bid_key = BidAddr::from(public_key.clone()).into(); + let (target, validator_bid) = if let Some(BidKind::Validator(mut validator_bid)) = + self.read_bid(&validator_bid_key)? + { + let updated_stake = validator_bid.increase_stake(amount)?; + if updated_stake < U512::from(minimum_bid_amount) { + return Err(Error::BondTooSmall.into()); + } + // idempotent + validator_bid.activate(); + + validator_bid.with_delegation_rate(delegation_rate); + process_updated_delegator_stake_boundaries( + self, + &mut validator_bid, + minimum_delegation_amount, + maximum_delegation_amount, + )?; + process_updated_delegator_reservation_slots( + self, + &mut validator_bid, + max_delegators_per_validator, + reserved_slots, + )?; + (*validator_bid.bonding_purse(), validator_bid) + } else { + if amount < U512::from(minimum_bid_amount) { + return Err(Error::BondTooSmall.into()); + } + // create new validator bid + let bonding_purse = self.create_purse()?; + let validator_bid = ValidatorBid::unlocked( + public_key, + bonding_purse, + amount, + delegation_rate, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ); + (bonding_purse, Box::new(validator_bid)) + }; + + let source = self.get_main_purse()?; + self.mint_transfer_direct( + Some(PublicKey::System.to_account_hash()), + source, + target, + amount, + None, + ) + .map_err(|_| Error::TransferToBidPurse)? + .map_err(|mint_error| { + // Propagate mint contract's error that occurred during execution of transfer + // entrypoint. This will improve UX in case of (for example) + // unapproved spending limit error. + ApiError::from(mint_error) + })?; + + let updated_amount = validator_bid.staked_amount(); + self.write_bid(validator_bid_key, BidKind::Validator(validator_bid))?; + Ok(updated_amount) + } + + /// Unbonds aka reduces stake by specified amount, adding an entry to the unbonding queue. + /// For a genesis validator, this is subject to vesting if applicable to a given network. + /// + /// If this bid stake is reduced to 0, any delegators to this bid will be undelegated, with + /// entries made to the unbonding queue for each of them for their full delegated amount. + /// Additionally, this bid record will be pruned away from the next calculated root hash. + /// + /// An attempt to reduce stake by more than is staked will instead 0 the stake. + /// + /// The function returns the remaining staked amount (we allow partial unbonding). + fn withdraw_bid( + &mut self, + public_key: PublicKey, + amount: U512, + minimum_bid_amount: u64, + ) -> Result { + let provided_account_hash = AccountHash::from(&public_key); + + if !self.is_allowed_session_caller(&provided_account_hash) { + return Err(Error::InvalidContext); + } + + let validator_bid_addr = BidAddr::from(public_key.clone()); + let validator_bid_key = validator_bid_addr.into(); + let mut validator_bid = read_validator_bid(self, &validator_bid_key)?; + let staked_amount = validator_bid.staked_amount(); + + // An attempt to unbond more than is staked results in unbonding the staked amount. + let unbonding_amount = U512::min(amount, validator_bid.staked_amount()); + + let era_end_timestamp_millis = detail::get_era_end_timestamp_millis(self)?; + let updated_stake = + validator_bid.decrease_stake(unbonding_amount, era_end_timestamp_millis)?; + + debug!( + "withdrawing bid for {validator_bid_addr} reducing {staked_amount} by {unbonding_amount} to {updated_stake}", + ); + // if validator stake is less than minimum_bid_amount, unbond fully and prune validator bid + if updated_stake < U512::from(minimum_bid_amount) { + // create unbonding purse for full validator stake + detail::create_unbonding_purse( + self, + public_key.clone(), + UnbondKind::Validator(public_key.clone()), // validator is the unbonder + *validator_bid.bonding_purse(), + staked_amount, + None, + )?; + // Unbond all delegators and zero them out + let delegators = read_delegator_bids(self, &public_key)?; + for mut delegator in delegators { + let unbond_kind = delegator.unbond_kind(); + detail::create_unbonding_purse( + self, + public_key.clone(), + unbond_kind, + *delegator.bonding_purse(), + delegator.staked_amount(), + None, + )?; + delegator.decrease_stake(delegator.staked_amount(), era_end_timestamp_millis)?; + + let delegator_bid_addr = delegator.bid_addr(); + debug!("pruning delegator bid {}", delegator_bid_addr); + self.prune_bid(delegator_bid_addr) + } + debug!("pruning validator bid {}", validator_bid_addr); + self.prune_bid(validator_bid_addr); + } else { + // create unbonding purse for the unbonding amount + detail::create_unbonding_purse( + self, + public_key.clone(), + UnbondKind::Validator(public_key.clone()), // validator is the unbonder + *validator_bid.bonding_purse(), + unbonding_amount, + None, + )?; + self.write_bid(validator_bid_key, BidKind::Validator(validator_bid))?; + } + + Ok(updated_stake) + } + + /// Adds a new delegator to delegators or increases its current stake. If the target validator + /// is missing, the function call returns an error and does nothing. + /// + /// The function transfers motes from the source purse to the delegator's bonding purse. + /// + /// This entry point returns the number of tokens currently delegated to a given validator. + fn delegate( + &mut self, + delegator_kind: DelegatorKind, + validator_public_key: PublicKey, + amount: U512, + max_delegators_per_validator: u32, + ) -> Result { + if !self.allow_auction_bids() { + // The auction process can be disabled on a given network. + return Err(Error::AuctionBidsDisabled.into()); + } + + let source = match &delegator_kind { + DelegatorKind::PublicKey(pk) => { + let account_hash = pk.to_account_hash(); + if !self.is_allowed_session_caller(&account_hash) { + return Err(Error::InvalidContext.into()); + } + self.get_main_purse()? + } + DelegatorKind::Purse(addr) => { + let uref = URef::new(*addr, AccessRights::WRITE); + if !self.is_valid_uref(uref) { + return Err(Error::InvalidContext.into()); + } + uref + } + }; + + detail::handle_delegation( + self, + delegator_kind, + validator_public_key, + source, + amount, + max_delegators_per_validator, + ) + } + + /// Unbonds aka reduces stake by specified amount, adding an entry to the unbonding queue + /// + /// The arguments are the delegator's key, the validator's key, and the amount. + /// + /// Returns the remaining staked amount (we allow partial unbonding). + fn undelegate( + &mut self, + delegator_kind: DelegatorKind, + validator_public_key: PublicKey, + amount: U512, + ) -> Result { + let redelegate_target = None; + process_undelegation( + self, + delegator_kind, + validator_public_key, + amount, + redelegate_target, + ) + } + + /// Unbonds aka reduces stake by specified amount, adding an entry to the unbonding queue, + /// which when processed will attempt to re-delegate the stake to the specified new validator. + /// If this is not possible at that future point in time, the unbonded stake will instead + /// downgrade to a standard undelegate operation automatically (the unbonded stake is + /// returned to the associated purse). + /// + /// This is a quality of life / convenience method, allowing a delegator to indicate they + /// would like some or all of their stake moved away from a validator to a different validator + /// with a single transaction, instead of requiring them to send an unbonding transaction + /// to unbond from the first validator and then wait a number of eras equal to the unbonding + /// delay and then send a second transaction to bond to the second validator. + /// + /// The arguments are the delegator's key, the existing validator's key, the amount, + /// and the new validator's key. + /// + /// Returns the remaining staked amount (we allow partial unbonding). + fn redelegate( + &mut self, + delegator_kind: DelegatorKind, + validator_public_key: PublicKey, + amount: U512, + new_validator: PublicKey, + ) -> Result { + let redelegate_target = Some(new_validator); + process_undelegation( + self, + delegator_kind, + validator_public_key, + amount, + redelegate_target, + ) + } + + /// Adds new reservations for a given validator with specified delegator public keys + /// and delegation rates. If during adding reservations configured number of reserved + /// delegator slots is exceeded it returns an error. + /// + /// If given reservation exists already and the delegation rate was changed it's updated. + fn add_reservations(&mut self, reservations: Vec) -> Result<(), Error> { + if !self.allow_auction_bids() { + // The auction process can be disabled on a given network. + return Err(Error::AuctionBidsDisabled); + } + + for reservation in reservations { + if !self + .is_allowed_session_caller(&AccountHash::from(reservation.validator_public_key())) + { + return Err(Error::InvalidContext); + } + + detail::handle_add_reservation(self, reservation)?; + } + Ok(()) + } + + /// Removes reservations for given delegator public keys. If a reservation for one of the keys + /// does not exist it returns an error. + fn cancel_reservations( + &mut self, + validator: PublicKey, + delegators: Vec, + max_delegators_per_validator: u32, + ) -> Result<(), Error> { + if !self.is_allowed_session_caller(&AccountHash::from(&validator)) { + return Err(Error::InvalidContext); + } + + for delegator in delegators { + detail::handle_cancel_reservation( + self, + validator.clone(), + delegator.clone(), + max_delegators_per_validator, + )?; + } + Ok(()) + } + + /// Slashes each validator. + /// + /// This can be only invoked through a system call. + fn slash(&mut self, validator_public_keys: Vec) -> Result<(), Error> { + fn slash_unbonds(unbond_eras: Vec) -> U512 { + let mut burned_amount = U512::zero(); + for unbond_era in unbond_eras { + burned_amount += *unbond_era.amount(); + } + burned_amount + } + + if self.get_caller() != PublicKey::System.to_account_hash() { + return Err(Error::InvalidCaller); + } + + let mut burned_amount: U512 = U512::zero(); + + for validator_public_key in validator_public_keys { + let validator_bid_addr = BidAddr::from(validator_public_key.clone()); + // Burn stake, deactivate + if let Some(BidKind::Validator(validator_bid)) = + self.read_bid(&validator_bid_addr.into())? + { + burned_amount += validator_bid.staked_amount(); + self.prune_bid(validator_bid_addr); + + // Also slash delegator stakes when deactivating validator bid. + let delegator_keys = { + let mut ret = + self.get_keys_by_prefix(&validator_bid_addr.delegated_account_prefix()?)?; + ret.extend( + self.get_keys_by_prefix(&validator_bid_addr.delegated_purse_prefix()?)?, + ); + ret + }; + + for delegator_key in delegator_keys { + if let Some(BidKind::Delegator(delegator_bid)) = + self.read_bid(&delegator_key)? + { + burned_amount += delegator_bid.staked_amount(); + let delegator_bid_addr = delegator_bid.bid_addr(); + self.prune_bid(delegator_bid_addr); + + // Also slash delegator unbonds. + let delegator_unbond_addr = match delegator_bid.delegator_kind() { + DelegatorKind::PublicKey(pk) => BidAddr::UnbondAccount { + validator: validator_public_key.to_account_hash(), + unbonder: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::UnbondPurse { + validator: validator_public_key.to_account_hash(), + unbonder: *addr, + }, + }; + + match self.read_unbond(delegator_unbond_addr)? { + Some(unbond) => { + let burned = slash_unbonds(unbond.take_eras()); + + burned_amount += burned; + self.write_unbond(delegator_unbond_addr, None)?; + } + None => { + continue; + } + } + } + } + } + + // get rid of any staked token in the unbonding queue + let validator_unbond_addr = BidAddr::UnbondAccount { + validator: validator_public_key.to_account_hash(), + unbonder: validator_public_key.to_account_hash(), + }; + match self.read_unbond(validator_unbond_addr)? { + Some(unbond) => { + let burned = slash_unbonds(unbond.take_eras()); + burned_amount += burned; + self.write_unbond(validator_unbond_addr, None)?; + } + None => { + continue; + } + } + } + + self.reduce_total_supply(burned_amount)?; + + Ok(()) + } + + /// Takes active_bids and delegators to construct a list of validators' total bids (their own + /// added to their delegators') ordered by size from largest to smallest, then takes the top N + /// (number of auction slots) bidders and replaces era_validators with these. + /// + /// Accessed by: node + fn run_auction( + &mut self, + era_end_timestamp_millis: u64, + evicted_validators: Vec, + max_delegators_per_validator: u32, + include_credits: bool, + credit_cap: Ratio, + minimum_bid_amount: u64, + ) -> Result<(), ApiError> { + debug!("run_auction called"); + + if self.get_caller() != PublicKey::System.to_account_hash() { + return Err(Error::InvalidCaller.into()); + } + + let vesting_schedule_period_millis = self.vesting_schedule_period_millis(); + let validator_slots = detail::get_validator_slots(self)?; + let auction_delay = detail::get_auction_delay(self)?; + // We have to store auction_delay future eras, one current era and one past era (for + // rewards calculations). + let snapshot_size = auction_delay as usize + 2; + let mut era_id: EraId = detail::get_era_id(self)?; + + // Process unbond requests + debug!("processing unbond requests"); + detail::process_unbond_requests(self, max_delegators_per_validator)?; + debug!("processing unbond request successful"); + + let mut validator_bids_detail = detail::get_validator_bids(self, era_id)?; + + // Process bids + let mut bids_modified = false; + for (validator_public_key, validator_bid) in + validator_bids_detail.validator_bids_mut().iter_mut() + { + if process_with_vesting_schedule( + self, + validator_bid, + era_end_timestamp_millis, + self.vesting_schedule_period_millis(), + )? { + bids_modified = true; + } + + if evicted_validators.contains(validator_public_key) { + validator_bid.deactivate(); + bids_modified = true; + } + } + + let winners = validator_bids_detail.pick_winners( + era_id, + validator_slots, + minimum_bid_amount, + include_credits, + credit_cap, + era_end_timestamp_millis, + vesting_schedule_period_millis, + )?; + + let (validator_bids, validator_credits, delegator_bids, reservations) = + validator_bids_detail.destructure(); + + // call prune BEFORE incrementing the era + detail::prune_validator_credits(self, era_id, &validator_credits); + + // Increment era + era_id = era_id.checked_add(1).ok_or(Error::ArithmeticOverflow)?; + + let delayed_era = era_id + .checked_add(auction_delay) + .ok_or(Error::ArithmeticOverflow)?; + + // Update seigniorage recipients for current era + { + let mut snapshot = detail::get_seigniorage_recipients_snapshot(self)?; + let recipients = + seigniorage_recipients(&winners, &validator_bids, &delegator_bids, &reservations)?; + let previous_recipients = snapshot.insert(delayed_era, recipients); + assert!(previous_recipients.is_none()); + + let snapshot = snapshot.into_iter().rev().take(snapshot_size).collect(); + detail::set_seigniorage_recipients_snapshot(self, snapshot)?; + } + + detail::set_era_id(self, era_id)?; + detail::set_era_end_timestamp_millis(self, era_end_timestamp_millis)?; + + if bids_modified { + detail::set_validator_bids(self, validator_bids)?; + } + + debug!("run_auction successful"); + + Ok(()) + } + + /// Mint and distribute seigniorage rewards to validators and their delegators, + /// according to `reward_factors` returned by the consensus component. + // TODO: rework EraInfo and other related structs, methods, etc. to report correct era-end + // totals of per-block rewards + fn distribute(&mut self, rewards: BTreeMap>) -> Result<(), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + error!("invalid caller to auction distribute"); + return Err(Error::InvalidCaller); + } + + debug!("reading seigniorage recipients snapshot"); + let seigniorage_recipients_snapshot = detail::get_seigniorage_recipients_snapshot(self)?; + let current_era_id = detail::get_era_id(self)?; + + let mut era_info = EraInfo::new(); + let seigniorage_allocations = era_info.seigniorage_allocations_mut(); + + debug!(rewards_set_size = rewards.len(), "processing rewards"); + for item in rewards + .into_iter() + .filter(|(key, _amounts)| key != &PublicKey::System) + .map(|(proposer, amounts)| { + rewards_per_validator( + &proposer, + current_era_id, + &amounts, + &SeigniorageRecipientsSnapshot::V2(seigniorage_recipients_snapshot.clone()), + ) + .map(|infos| infos.into_iter().map(move |info| (proposer.clone(), info))) + }) + .flatten_ok() + { + let (validator_public_key, reward_info) = item?; + + let validator_bid_addr = BidAddr::Validator(validator_public_key.to_account_hash()); + let mut maybe_bridged_validator_addrs: Option> = None; + let validator_reward_amount = reward_info.validator_reward(); + let (validator_bonding_purse, min_del, max_del) = + match detail::get_distribution_target(self, validator_bid_addr) { + Ok(target) => match target { + DistributeTarget::Validator(mut validator_bid) => { + debug!(?validator_public_key, "validator payout starting "); + let validator_bonding_purse = *validator_bid.bonding_purse(); + validator_bid.increase_stake(validator_reward_amount)?; + + self.write_bid( + validator_bid_addr.into(), + BidKind::Validator(validator_bid.clone()), + )?; + ( + validator_bonding_purse, + validator_bid.minimum_delegation_amount().into(), + validator_bid.maximum_delegation_amount().into(), + ) + } + DistributeTarget::BridgedValidator { + requested_validator_bid_addr: _requested_validator_bid_addr, + current_validator_bid_addr, + bridged_validator_addrs, + mut validator_bid, + } => { + debug!(?validator_public_key, "bridged validator payout starting "); + maybe_bridged_validator_addrs = Some(bridged_validator_addrs); // <-- important + let validator_bonding_purse = *validator_bid.bonding_purse(); + validator_bid.increase_stake(validator_reward_amount)?; + + self.write_bid( + current_validator_bid_addr.into(), + BidKind::Validator(validator_bid.clone()), + )?; + ( + validator_bonding_purse, + validator_bid.minimum_delegation_amount().into(), + validator_bid.maximum_delegation_amount().into(), + ) + } + DistributeTarget::Unbond(unbond) => match unbond.target_unbond_era() { + Some(mut unbond_era) => { + let account_hash = validator_public_key.to_account_hash(); + let unbond_addr = BidAddr::UnbondAccount { + validator: account_hash, + unbonder: account_hash, + }; + let validator_bonding_purse = *unbond_era.bonding_purse(); + let new_amount = + unbond_era.amount().saturating_add(validator_reward_amount); + unbond_era.with_amount(new_amount); + self.write_unbond(unbond_addr, Some(*unbond.clone()))?; + (validator_bonding_purse, U512::MAX, U512::MAX) + } + None => { + warn!( + ?validator_public_key, + "neither validator bid or unbond found" + ); + continue; + } + }, + DistributeTarget::Delegator(_) => { + return Err(Error::UnexpectedBidVariant); + } + }, + Err(Error::BridgeRecordChainTooLong) => { + warn!(?validator_public_key, "bridge record chain too long"); + continue; + } + Err(err) => return Err(err), + }; + + self.mint_into_existing_purse(validator_reward_amount, validator_bonding_purse)?; + seigniorage_allocations.push(SeigniorageAllocation::validator( + validator_public_key.clone(), + validator_reward_amount, + )); + debug!(?validator_public_key, "validator payout finished"); + + debug!(?validator_public_key, "delegator payouts for validator"); + let mut undelegates = vec![]; + let mut prunes = vec![]; + for (delegator_kind, delegator_reward) in reward_info.take_delegator_rewards() { + let mut delegator_bid_addrs = Vec::with_capacity(2); + if let Some(bridged_validator_addrs) = &maybe_bridged_validator_addrs { + for bridged_addr in bridged_validator_addrs { + delegator_bid_addrs.push(BidAddr::new_delegator_kind_relaxed( + bridged_addr.validator_account_hash(), + &delegator_kind, + )) + } + } + delegator_bid_addrs.push(BidAddr::new_delegator_kind_relaxed( + validator_bid_addr.validator_account_hash(), + &delegator_kind, + )); + let mut maybe_delegator_bonding_purse: Option = None; + for delegator_bid_addr in delegator_bid_addrs { + if delegator_reward.is_zero() { + maybe_delegator_bonding_purse = None; + break; // if there is no reward to give, no need to continue looking + } else { + let delegator_bid_key = delegator_bid_addr.into(); + match detail::get_distribution_target(self, delegator_bid_addr) { + Ok(target) => match target { + DistributeTarget::Delegator(mut delegator_bid) => { + let delegator_bonding_purse = *delegator_bid.bonding_purse(); + let increased_stake = + delegator_bid.increase_stake(delegator_reward)?; + if increased_stake < min_del { + // update the bid initially, but register for unbond and + // prune + undelegates.push(( + delegator_kind.clone(), + validator_public_key.clone(), + increased_stake, + )); + prunes.push(delegator_bid_addr); + } else if increased_stake > max_del { + // update the bid initially, but register overage for unbond + let unbond_amount = increased_stake.saturating_sub(max_del); + if !unbond_amount.is_zero() { + undelegates.push(( + delegator_kind.clone(), + validator_public_key.clone(), + unbond_amount, + )); + } + } + self.write_bid( + delegator_bid_key, + BidKind::Delegator(delegator_bid), + )?; + maybe_delegator_bonding_purse = Some(delegator_bonding_purse); + break; + } + DistributeTarget::Unbond(mut unbond) => { + match unbond.target_unbond_era_mut() { + Some(unbond_era) => { + let unbond_addr = BidAddr::new_delegator_unbond_relaxed( + delegator_bid_addr.validator_account_hash(), + &delegator_kind, + ); + let delegator_bonding_purse = + *unbond_era.bonding_purse(); + let new_amount = unbond_era + .amount() + .saturating_add(delegator_reward); + + unbond_era.with_amount(new_amount); + self.write_unbond(unbond_addr, Some(*unbond.clone()))?; + maybe_delegator_bonding_purse = + Some(delegator_bonding_purse); + break; + } + None => { + debug!( + ?delegator_bid_key, + "neither delegator bid or unbond found" + ); + // keep looking + } + } + } + DistributeTarget::Validator(_) + | DistributeTarget::BridgedValidator { .. } => { + return Err(Error::UnexpectedBidVariant) + } + }, + Err(Error::DelegatorNotFound) => { + debug!( + ?validator_public_key, + ?delegator_bid_addr, + "delegator bid not found" + ); + // keep looking + } + Err(err) => return Err(err), + } + } + } + + // we include 0 allocations for explicitness + let allocation = SeigniorageAllocation::delegator_kind( + delegator_kind, + validator_public_key.clone(), + delegator_reward, + ); + seigniorage_allocations.push(allocation); + if let Some(delegator_bonding_purse) = maybe_delegator_bonding_purse { + self.mint_into_existing_purse(delegator_reward, delegator_bonding_purse)?; + } + } + + for (kind, pk, unbond_amount) in undelegates { + debug!(?kind, ?pk, ?unbond_amount, "unbonding delegator"); + self.undelegate(kind, pk, unbond_amount)?; + } + + for bid_addr in prunes { + debug!(?bid_addr, "pruning bid"); + self.prune_bid(bid_addr); + } + + debug!( + ?validator_public_key, + delegator_set_size = seigniorage_allocations.len(), + "delegator payout finished" + ); + + debug!( + ?validator_public_key, + "rewards minted into recipient purses" + ); + } + + // record allocations for this era for reporting purposes. + self.record_era_info(era_info)?; + + Ok(()) + } + + /// Reads current era id. + fn read_era_id(&mut self) -> Result { + detail::get_era_id(self) + } + + /// Activates a given validator's bid. To be used when a validator has been marked as inactive + /// by consensus (aka "evicted"). + fn activate_bid(&mut self, validator: PublicKey, minimum_bid: u64) -> Result<(), Error> { + let provided_account_hash = AccountHash::from(&validator); + + if !self.is_allowed_session_caller(&provided_account_hash) { + return Err(Error::InvalidContext); + } + + let key = BidAddr::from(validator).into(); + if let Some(BidKind::Validator(mut validator_bid)) = self.read_bid(&key)? { + if validator_bid.staked_amount() >= minimum_bid.into() { + validator_bid.activate(); + self.write_bid(key, BidKind::Validator(validator_bid))?; + Ok(()) + } else { + Err(Error::BondTooSmall) + } + } else { + Err(Error::ValidatorNotFound) + } + } + + /// Updates a `ValidatorBid` and all related delegator bids to use a new public key. + /// + /// This in effect "transfers" a validator bid along with its stake and all delegators + /// from one public key to another. + /// This method can only be called by the account associated with the current `ValidatorBid`. + /// + /// The arguments are the existing bid's 'validator_public_key' and the new public key. + fn change_bid_public_key( + &mut self, + public_key: PublicKey, + new_public_key: PublicKey, + ) -> Result<(), Error> { + let validator_account_hash = AccountHash::from(&public_key); + + // check that the caller is the current bid's owner + if !self.is_allowed_session_caller(&validator_account_hash) { + return Err(Error::InvalidContext); + } + + // verify that a bid for given public key exists + let validator_bid_addr = BidAddr::from(public_key.clone()); + let mut validator_bid = read_validator_bid(self, &validator_bid_addr.into())?; + + // verify that a bid for the new key does not exist yet + let new_validator_bid_addr = BidAddr::from(new_public_key.clone()); + if self.read_bid(&new_validator_bid_addr.into())?.is_some() { + return Err(Error::ValidatorBidExistsAlready); + } + + debug!("changing validator bid {validator_bid_addr} public key from {public_key} to {new_public_key}"); + + // store new validator bid + validator_bid.with_validator_public_key(new_public_key.clone()); + self.write_bid( + new_validator_bid_addr.into(), + BidKind::Validator(validator_bid), + )?; + + // store bridge record in place of old validator bid + let bridge = Bridge::new( + public_key.clone(), + new_public_key.clone(), + self.read_era_id()?, + ); + // write a bridge record under the old account hash, allowing forward pathing + // i.e. given an older account hash find the replacement account hash + self.write_bid( + validator_bid_addr.into(), + BidKind::Bridge(Box::new(bridge.clone())), + )?; + // write a bridge record under the new account hash, allowing reverse pathing + // i.e. given a newer account hash find the previous account hash + let rev_addr = BidAddr::new_validator_rev_addr_from_public_key(new_public_key.clone()); + self.write_bid(rev_addr.into(), BidKind::Bridge(Box::new(bridge)))?; + + debug!("transferring delegator bids from validator bid {validator_bid_addr} to {new_validator_bid_addr}"); + let delegators = read_delegator_bids(self, &public_key)?; + for mut delegator in delegators { + let delegator_bid_addr = + BidAddr::new_delegator_kind(&public_key, delegator.delegator_kind()); + + delegator.with_validator_public_key(new_public_key.clone()); + let new_delegator_bid_addr = + BidAddr::new_delegator_kind(&new_public_key, delegator.delegator_kind()); + + self.write_bid( + new_delegator_bid_addr.into(), + BidKind::Delegator(Box::from(delegator)), + )?; + + debug!("pruning delegator bid {delegator_bid_addr}"); + self.prune_bid(delegator_bid_addr); + } + + Ok(()) + } + + /// Writes a validator credit record. + fn write_validator_credit( + &mut self, + validator: PublicKey, + era_id: EraId, + amount: U512, + ) -> Result, Error> { + // only the system may use this method + if self.get_caller() != PublicKey::System.to_account_hash() { + error!("invalid caller to auction validator_credit"); + return Err(Error::InvalidCaller); + } + + // is imputed public key associated with a validator bid record? + let bid_addr = BidAddr::new_from_public_keys(&validator, None); + let key = Key::BidAddr(bid_addr); + let _ = match self.read_bid(&key)? { + Some(bid_kind) => bid_kind, + None => { + warn!( + ?key, + ?era_id, + ?amount, + "attempt to add a validator credit to a non-existent validator" + ); + return Ok(None); + } + }; + + // if amount is zero, noop + if amount.is_zero() { + return Ok(None); + } + + // write credit record + let credit_addr = BidAddr::new_credit(&validator, era_id); + let credit_key = Key::BidAddr(credit_addr); + let credit_bid = match self.read_bid(&credit_key)? { + Some(BidKind::Credit(mut existing_credit)) => { + existing_credit.increase(amount); + existing_credit + } + Some(_) => return Err(Error::UnexpectedBidVariant), + None => Box::new(ValidatorCredit::new(validator, era_id, amount)), + }; + + self.write_bid(credit_key, BidKind::Credit(credit_bid)) + .map(|_| Some(credit_addr)) + } +} diff --git a/storage/src/system/auction/auction_native.rs b/storage/src/system/auction/auction_native.rs new file mode 100644 index 0000000000..d4338c8371 --- /dev/null +++ b/storage/src/system/auction/auction_native.rs @@ -0,0 +1,505 @@ +use crate::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + system::{ + auction::{ + providers::{AccountProvider, MintProvider, RuntimeProvider, StorageProvider}, + Auction, + }, + mint::Mint, + runtime_native::RuntimeNative, + }, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError}, +}; +use casper_types::{ + account::AccountHash, + bytesrepr::{FromBytes, ToBytes}, + system::{ + auction::{BidAddr, BidKind, EraInfo, Error, Unbond, UnbondEra, UnbondKind}, + mint, + }, + AccessRights, CLTyped, CLValue, Key, KeyTag, PublicKey, StoredValue, URef, U512, +}; +use std::collections::BTreeSet; +use tracing::{debug, error}; + +impl RuntimeProvider for RuntimeNative +where + S: StateReader, +{ + fn get_caller(&self) -> AccountHash { + self.address() + } + + fn is_allowed_session_caller(&self, account_hash: &AccountHash) -> bool { + if self.get_caller() == PublicKey::System.to_account_hash() { + return true; + } + + account_hash == &self.address() + } + + fn is_valid_uref(&self, uref: URef) -> bool { + self.access_rights().has_access_rights_to_uref(&uref) + } + + fn named_keys_get(&self, name: &str) -> Option { + self.named_keys().get(name).cloned() + } + + fn get_keys(&mut self, key_tag: &KeyTag) -> Result, Error> { + self.tracking_copy() + .borrow_mut() + .get_keys(key_tag) + .map_err(|error| { + error!(%key_tag, "RuntimeProvider::get_keys: {:?}", error); + Error::Storage + }) + } + + fn get_keys_by_prefix(&mut self, prefix: &[u8]) -> Result, Error> { + self.tracking_copy() + .borrow_mut() + .reader() + .keys_with_prefix(prefix) + .map_err(|error| { + error!("RuntimeProvider::get_keys_by_prefix: {:?}", error); + Error::Storage + }) + } + + fn delegator_count(&mut self, bid_addr: &BidAddr) -> Result { + let delegated_accounts = { + let prefix = bid_addr.delegated_account_prefix()?; + let keys = self.get_keys_by_prefix(&prefix).map_err(|err| { + error!("RuntimeProvider::delegator_count {:?}", err); + Error::Storage + })?; + keys.len() + }; + let delegated_purses = { + let prefix = bid_addr.delegated_purse_prefix()?; + let keys = self.get_keys_by_prefix(&prefix).map_err(|err| { + error!("RuntimeProvider::delegator_count {:?}", err); + Error::Storage + })?; + keys.len() + }; + Ok(delegated_accounts.saturating_add(delegated_purses)) + } + + fn reservation_count(&mut self, bid_addr: &BidAddr) -> Result { + let reserved_accounts = { + let reservation_prefix = bid_addr.reserved_account_prefix()?; + let reservation_keys = self + .get_keys_by_prefix(&reservation_prefix) + .map_err(|err| { + error!("RuntimeProvider::reservation_count {:?}", err); + Error::Storage + })?; + reservation_keys.len() + }; + let reserved_purses = { + let reservation_prefix = bid_addr.reserved_purse_prefix()?; + let reservation_keys = self + .get_keys_by_prefix(&reservation_prefix) + .map_err(|err| { + error!("RuntimeProvider::reservation_count {:?}", err); + Error::Storage + })?; + reservation_keys.len() + }; + Ok(reserved_accounts.saturating_add(reserved_purses)) + } + + fn used_reservation_count(&mut self, bid_addr: &BidAddr) -> Result { + let reservation_account_prefix = bid_addr.reserved_account_prefix()?; + let reservation_purse_prefix = bid_addr.reserved_purse_prefix()?; + + let mut reservation_keys = self + .get_keys_by_prefix(&reservation_account_prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::reservation_count {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + + let more = self + .get_keys_by_prefix(&reservation_purse_prefix) + .map_err(|exec_error| { + error!("RuntimeProvider::reservation_count {:?}", exec_error); + >::from(exec_error).unwrap_or(Error::Storage) + })?; + + reservation_keys.extend(more); + + let mut used = 0; + for reservation_key in reservation_keys { + if let Key::BidAddr(BidAddr::ReservedDelegationAccount { + validator, + delegator, + }) = reservation_key + { + let key_to_check = Key::BidAddr(BidAddr::DelegatedAccount { + validator, + delegator, + }); + if let Ok(Some(_)) = self.read_bid(&key_to_check) { + used += 1; + } + } + if let Key::BidAddr(BidAddr::ReservedDelegationPurse { + validator, + delegator, + }) = reservation_key + { + let key_to_check = Key::BidAddr(BidAddr::DelegatedPurse { + validator, + delegator, + }); + if let Ok(Some(_)) = self.read_bid(&key_to_check) { + used += 1; + } + } + } + Ok(used) + } + + fn vesting_schedule_period_millis(&self) -> u64 { + self.vesting_schedule_period_millis() + } + + fn allow_auction_bids(&self) -> bool { + self.allow_auction_bids() + } + + fn should_compute_rewards(&self) -> bool { + self.compute_rewards() + } +} + +impl StorageProvider for RuntimeNative +where + S: StateReader, +{ + fn read(&mut self, uref: URef) -> Result, Error> { + // check access rights on uref + if !self.access_rights().has_access_rights_to_uref(&uref) { + return Err(Error::ForgedReference); + } + let key = &Key::URef(uref); + let stored_value = match self.tracking_copy().borrow_mut().read(key) { + Ok(Some(stored_value)) => stored_value, + Ok(None) => return Ok(None), + Err(_) => return Err(Error::Storage), + }; + // by convention, we only store CLValues under Key::URef + if let StoredValue::CLValue(value) = stored_value { + // Only CLTyped instances should be stored as a CLValue. + let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?; + Ok(Some(value)) + } else { + Err(Error::CLValue) + } + } + + fn write(&mut self, uref: URef, value: T) -> Result<(), Error> { + let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + // is the uref writeable? + if !uref.is_writeable() { + error!("uref not writeable {}", uref); + return Err(Error::Storage); + } + // check access rights on uref + if !self.access_rights().has_access_rights_to_uref(&uref) { + return Err(Error::ForgedReference); + } + self.tracking_copy() + .borrow_mut() + .write(Key::URef(uref), StoredValue::CLValue(cl_value)); + Ok(()) + } + + fn read_bid(&mut self, key: &Key) -> Result, Error> { + match self.tracking_copy().borrow_mut().read(key) { + Ok(Some(StoredValue::BidKind(bid_kind))) => Ok(Some(bid_kind)), + Ok(Some(_)) => { + error!("StorageProvider::read_bid: unexpected StoredValue variant"); + Err(Error::Storage) + } + Ok(None) => Ok(None), + Err(TrackingCopyError::BytesRepr(_)) => Err(Error::Serialization), + Err(err) => { + error!("StorageProvider::read_bid: {:?}", err); + Err(Error::Storage) + } + } + } + + fn write_bid(&mut self, key: Key, bid_kind: BidKind) -> Result<(), Error> { + let stored_value = StoredValue::BidKind(bid_kind); + + // Charge for amount as measured by serialized length + // let bytes_count = stored_value.serialized_length(); + // self.charge_gas_storage(bytes_count)?; + + self.tracking_copy().borrow_mut().write(key, stored_value); + Ok(()) + } + + fn read_unbond(&mut self, bid_addr: BidAddr) -> Result, Error> { + match self + .tracking_copy() + .borrow_mut() + .read(&Key::BidAddr(bid_addr)) + { + Ok(Some(StoredValue::BidKind(BidKind::Unbond(unbond)))) => Ok(Some(*unbond)), + Ok(Some(_)) => { + error!("StorageProvider::read_unbonds: unexpected StoredValue variant"); + Err(Error::Storage) + } + Ok(None) => Ok(None), + Err(TrackingCopyError::BytesRepr(_)) => Err(Error::Serialization), + Err(err) => { + error!("StorageProvider::read_unbonds: {:?}", err); + Err(Error::Storage) + } + } + } + + fn write_unbond(&mut self, bid_addr: BidAddr, unbond: Option) -> Result<(), Error> { + let unbond_key = Key::BidAddr(bid_addr); + match unbond { + Some(unbond) => { + self.tracking_copy().borrow_mut().write( + unbond_key, + StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))), + ); + } + None => { + self.tracking_copy().borrow_mut().prune(unbond_key); + } + } + Ok(()) + } + + fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + return Err(Error::InvalidContext); + } + self.tracking_copy() + .borrow_mut() + .write(Key::EraSummary, StoredValue::EraInfo(era_info)); + Ok(()) + } + + fn prune_bid(&mut self, bid_addr: BidAddr) { + self.tracking_copy().borrow_mut().prune(bid_addr.into()); + } +} + +impl MintProvider for RuntimeNative +where + S: StateReader, +{ + fn unbond(&mut self, unbond_kind: &UnbondKind, unbond_era: &UnbondEra) -> Result<(), Error> { + let (purse, maybe_account_hash) = match unbond_kind { + UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => { + let account_hash = pk.to_account_hash(); + // Do a migration if the account hasn't been migrated yet. This is just a read if it + // has been migrated already. + self.tracking_copy() + .borrow_mut() + .migrate_account(account_hash, self.protocol_version()) + .map_err(|error| { + error!( + "MintProvider::unbond: couldn't migrate account: {:?}", + error + ); + Error::Storage + })?; + + let maybe_value = self + .tracking_copy() + .borrow_mut() + .read(&Key::Account(account_hash)) + .map_err(|error| { + error!("MintProvider::unbond: {:?}", error); + Error::Storage + })?; + + match maybe_value { + Some(StoredValue::Account(account)) => { + (account.main_purse(), Some(account_hash)) + } + Some(StoredValue::CLValue(cl_value)) => { + let entity_key: Key = cl_value.into_t().map_err(|_| Error::CLValue)?; + let maybe_value = self + .tracking_copy() + .borrow_mut() + .read(&entity_key) + .map_err(|error| { + error!("MintProvider::unbond: {:?}", error); + Error::Storage + })?; + match maybe_value { + Some(StoredValue::AddressableEntity(entity)) => { + (entity.main_purse(), Some(account_hash)) + } + Some(_cl_value) => return Err(Error::CLValue), + None => return Err(Error::InvalidPublicKey), + } + } + Some(_cl_value) => return Err(Error::CLValue), + None => return Err(Error::InvalidPublicKey), + } + } + UnbondKind::DelegatedPurse(addr) => { + let purse = URef::new(*addr, AccessRights::READ_ADD_WRITE); + match self.balance(purse) { + Ok(Some(_)) => (purse, None), + Ok(None) => return Err(Error::MissingPurse), + Err(err) => { + error!("MintProvider::unbond: {:?}", err); + return Err(Error::Unbonding); + } + } + } + }; + + self.mint_transfer_direct( + maybe_account_hash, + *unbond_era.bonding_purse(), + purse, + *unbond_era.amount(), + None, + ) + .map_err(|_| Error::Transfer)? + .map_err(|_| Error::Transfer)?; + Ok(()) + } + + fn mint_transfer_direct( + &mut self, + to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result, Error> { + let addr = if let Some(uref) = self.runtime_footprint().main_purse() { + uref.addr() + } else { + return Err(Error::InvalidContext); + }; + if !(addr == source.addr() || self.get_caller() == PublicKey::System.to_account_hash()) { + return Err(Error::InvalidCaller); + } + + // let gas_counter = self.gas_counter(); + self.extend_access_rights(&[source, target.into_add()]); + + match self.transfer(to, source, target, amount, id) { + Ok(ret) => { + // self.set_gas_counter(gas_counter); + Ok(Ok(ret)) + } + Err(err) => { + error!("{}", err); + Err(Error::Transfer) + } + } + } + + fn mint_into_existing_purse( + &mut self, + amount: U512, + existing_purse: URef, + ) -> Result<(), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + return Err(Error::InvalidCaller); + } + + match ::mint_into_existing_purse(self, existing_purse, amount) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::MintError) + } + } + } + + fn create_purse(&mut self) -> Result { + let initial_balance = U512::zero(); + match ::mint(self, initial_balance) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::CreatePurseFailed) + } + } + } + + fn available_balance(&mut self, purse: URef) -> Result, Error> { + match ::balance(self, purse) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::GetBalance) + } + } + } + + fn read_base_round_reward(&mut self) -> Result { + match ::read_base_round_reward(self) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::MissingValue) + } + } + } + + fn mint(&mut self, amount: U512) -> Result { + match ::mint(self, amount) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::MintReward) + } + } + } + + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { + match ::reduce_total_supply(self, amount) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::MintReduceTotalSupply) + } + } + } +} + +impl AccountProvider for RuntimeNative +where + S: StateReader, +{ + fn get_main_purse(&self) -> Result { + // NOTE: this is used by the system and is not (and should not be made to be) accessible + // from userland. + match self.runtime_footprint().main_purse() { + None => { + debug!("runtime_native attempt to access non-existent main purse"); + Err(Error::InvalidContext) + } + Some(purse) => Ok(purse), + } + } + + /// Set main purse. + fn set_main_purse(&mut self, purse: URef) { + self.runtime_footprint_mut().set_main_purse(purse); + } +} + +impl Auction for RuntimeNative where S: StateReader +{} diff --git a/storage/src/system/auction/detail.rs b/storage/src/system/auction/detail.rs new file mode 100644 index 0000000000..f3632a6a6c --- /dev/null +++ b/storage/src/system/auction/detail.rs @@ -0,0 +1,1734 @@ +use std::{collections::BTreeMap, convert::TryInto, ops::Mul}; + +use super::{ + Auction, EraValidators, MintProvider, RuntimeProvider, StorageProvider, ValidatorWeights, +}; +use casper_types::{ + bytesrepr::{FromBytes, ToBytes}, + system::auction::{ + BidAddr, BidAddrTag, BidKind, DelegatorBid, DelegatorBids, DelegatorKind, Error, + Reservation, Reservations, SeigniorageRecipient, SeigniorageRecipientV2, + SeigniorageRecipientsSnapshot, SeigniorageRecipientsSnapshotV1, + SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Unbond, UnbondEra, UnbondKind, + ValidatorBid, ValidatorBids, ValidatorCredit, ValidatorCredits, WeightsBreakout, + AUCTION_DELAY_KEY, DELEGATION_RATE_DENOMINATOR, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + }, + AccessRights, ApiError, CLTyped, EraId, Key, KeyTag, PublicKey, URef, U512, +}; +use num_rational::Ratio; +use num_traits::{CheckedMul, CheckedSub}; +use tracing::{debug, error, warn}; + +/// Maximum length of bridge records chain. +/// Used when looking for the most recent bid record to avoid unbounded computations. +const MAX_BRIDGE_CHAIN_LENGTH: u64 = 20; + +fn read_from(provider: &mut P, name: &str) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, + T: FromBytes + CLTyped, +{ + let key = match provider.named_keys_get(name) { + None => { + error!("auction missing named key {:?}", name); + return Err(Error::MissingKey); + } + Some(key) => key, + }; + let uref = key.into_uref().ok_or(Error::InvalidKeyVariant)?; + let value: T = provider.read(uref)?.ok_or(Error::MissingValue)?; + Ok(value) +} + +fn write_to(provider: &mut P, name: &str, value: T) -> Result<(), Error> +where + P: StorageProvider + RuntimeProvider + ?Sized, + T: ToBytes + CLTyped, +{ + let key = provider.named_keys_get(name).ok_or(Error::MissingKey)?; + let uref = key.into_uref().ok_or(Error::InvalidKeyVariant)?; + provider.write(uref, value) +} + +/// Aggregated bid data for a Validator. +#[derive(Debug, Default)] +pub struct ValidatorBidsDetail { + validator_bids: ValidatorBids, + validator_credits: ValidatorCredits, + delegator_bids: DelegatorBids, + reservations: Reservations, +} + +impl ValidatorBidsDetail { + /// Ctor. + pub fn new() -> Self { + ValidatorBidsDetail { + validator_bids: BTreeMap::new(), + validator_credits: BTreeMap::new(), + delegator_bids: BTreeMap::new(), + reservations: BTreeMap::new(), + } + } + + /// Inserts a validator bid. + pub fn insert_bid( + &mut self, + validator: PublicKey, + validator_bid: Box, + delegators: Vec>, + reservations: Vec>, + ) -> Option> { + self.delegator_bids.insert(validator.clone(), delegators); + self.reservations.insert(validator.clone(), reservations); + self.validator_bids.insert(validator, validator_bid) + } + + /// Inserts a validator credit. + pub fn insert_credit( + &mut self, + validator: PublicKey, + era_id: EraId, + validator_credit: Box, + ) { + let credits = &mut self.validator_credits; + + credits + .entry(validator.clone()) + .and_modify(|inner| { + inner + .entry(era_id) + .and_modify(|_| { + warn!( + ?validator, + ?era_id, + "multiple validator credit entries in same era" + ) + }) + .or_insert(validator_credit.clone()); + }) + .or_insert_with(|| { + let mut inner = BTreeMap::new(); + inner.insert(era_id, validator_credit); + inner + }); + } + + /// Get validator weights. + #[allow(clippy::too_many_arguments)] + pub fn validator_weights_breakout( + &mut self, + era_ending: EraId, + era_end_timestamp_millis: u64, + vesting_schedule_period_millis: u64, + minimum_bid_amount: u64, + include_credits: bool, + credits_cap: Ratio, + ) -> Result { + let mut ret = WeightsBreakout::new(); + let min_bid = minimum_bid_amount.into(); + for (validator_public_key, bid) in self + .validator_bids + .iter() + .filter(|(_, v)| !v.inactive() && !v.staked_amount() >= U512::one()) + { + let mut staked_amount = bid.staked_amount(); + let meets_minimum = staked_amount >= min_bid; + if let Some(delegators) = self.delegator_bids.get(validator_public_key) { + staked_amount = staked_amount + .checked_add(delegators.iter().map(|d| d.staked_amount()).sum()) + .ok_or(Error::InvalidAmount)?; + } + + let credit_amount = self.credit_amount( + validator_public_key, + era_ending, + staked_amount, + include_credits, + credits_cap, + ); + let total = staked_amount.saturating_add(credit_amount); + + let locked = bid.is_locked_with_vesting_schedule( + era_end_timestamp_millis, + vesting_schedule_period_millis, + ); + + ret.register(validator_public_key.clone(), total, locked, meets_minimum); + } + + Ok(ret) + } + + fn credit_amount( + &self, + validator_public_key: &PublicKey, + era_ending: EraId, + staked_amount: U512, + include_credit: bool, + cap: Ratio, + ) -> U512 { + if !include_credit { + return U512::zero(); + } + + if let Some(inner) = self.validator_credits.get(validator_public_key) { + if let Some(credit) = inner.get(&era_ending) { + let capped = Ratio::new_raw(staked_amount, U512::one()) + .mul(cap) + .to_integer(); + let credit_amount = credit.amount(); + return credit_amount.min(capped); + } + } + + U512::zero() + } + + #[allow(unused)] + pub(crate) fn validator_bids(&self) -> &ValidatorBids { + &self.validator_bids + } + + pub(crate) fn validator_bids_mut(&mut self) -> &mut ValidatorBids { + &mut self.validator_bids + } + + /// Select winners for auction. + #[allow(clippy::too_many_arguments)] + pub fn pick_winners( + &mut self, + era_id: EraId, + validator_slots: usize, + minimum_bid_amount: u64, + include_credits: bool, + credit_cap: Ratio, + era_end_timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> Result { + // as a safety mechanism, if we would fall below 75% of the expected + // validator count by enforcing minimum bid, allow bids with less + // that min bid up to fill to 75% of the expected count + let threshold = Ratio::new(3, 4) + .mul(Ratio::new(validator_slots, 1)) + .to_integer(); + let breakout = self.validator_weights_breakout( + era_id, + era_end_timestamp_millis, + vesting_schedule_period_millis, + minimum_bid_amount, + include_credits, + credit_cap, + )?; + let ret = breakout.take(validator_slots, threshold); + Ok(ret) + } + + /// Consume self into in underlying collections. + pub fn destructure(self) -> (ValidatorBids, ValidatorCredits, DelegatorBids, Reservations) { + ( + self.validator_bids, + self.validator_credits, + self.delegator_bids, + self.reservations, + ) + } +} + +/// Prunes away all validator credits for the imputed era, which should be the era ending. +/// +/// This is intended to be called at the end of an era, after calculating validator weights. +pub fn prune_validator_credits

( + provider: &mut P, + era_ending: EraId, + validator_credits: &ValidatorCredits, +) where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + for (validator_public_key, inner) in validator_credits { + if inner.contains_key(&era_ending) { + provider.prune_bid(BidAddr::new_credit(validator_public_key, era_ending)) + } + } +} + +/// Returns the imputed validator bids. +pub fn get_validator_bids

(provider: &mut P, era_id: EraId) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + let bids_keys = provider.get_keys(&KeyTag::BidAddr)?; + + let mut ret = ValidatorBidsDetail::new(); + + for key in bids_keys { + match provider.read_bid(&key)? { + Some(BidKind::Validator(validator_bid)) => { + let validator_public_key = validator_bid.validator_public_key(); + let delegator_bids = delegators(provider, validator_public_key)?; + let reservations = reservations(provider, validator_public_key)?; + ret.insert_bid( + validator_public_key.clone(), + validator_bid, + delegator_bids, + reservations, + ); + } + Some(BidKind::Credit(credit)) => { + ret.insert_credit(credit.validator_public_key().clone(), era_id, credit); + } + Some(_) => { + // noop + } + None => return Err(Error::ValidatorNotFound), + }; + } + + Ok(ret) +} + +/// Sets the imputed validator bids. +pub fn set_validator_bids

(provider: &mut P, validators: ValidatorBids) -> Result<(), Error> +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + for (validator_public_key, validator_bid) in validators.into_iter() { + let bid_addr = BidAddr::from(validator_public_key.clone()); + provider.write_bid(bid_addr.into(), BidKind::Validator(validator_bid))?; + } + Ok(()) +} + +/// Returns the unbonding purses. +pub fn get_unbonding_purses

(provider: &mut P) -> Result, Error> +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + let prefix = vec![KeyTag::BidAddr as u8, BidAddrTag::UnbondAccount as u8]; + + let unbond_keys = provider.get_keys_by_prefix(&prefix)?; + + let mut ret = BTreeMap::new(); + + for key in unbond_keys { + if let Key::BidAddr(bid_addr) = key { + match provider.read_bid(&key) { + Ok(Some(BidKind::Unbond(unbonds))) => { + ret.insert(bid_addr, *unbonds); + } + Ok(Some(_)) => { + warn!("unexpected BidKind variant {:?}", key); + } + Ok(None) => { + warn!("expected unbond record {:?}", key); + } + Err(err) => { + error!("{} {}", key, err); + } + } + } + } + + let prefix = vec![KeyTag::BidAddr as u8, BidAddrTag::UnbondPurse as u8]; + + let unbond_keys = provider.get_keys_by_prefix(&prefix)?; + for key in unbond_keys { + if let Key::BidAddr(bid_addr) = key { + match provider.read_bid(&key) { + Ok(Some(BidKind::Unbond(unbonds))) => { + ret.insert(bid_addr, *unbonds); + } + Ok(Some(_)) => { + warn!("unexpected BidKind variant {:?}", key) + } + Ok(None) => { + warn!("expected unbond record {:?}", key) + } + Err(err) => { + error!("{} {}", key, err); + } + } + } + } + + Ok(ret) +} + +/// Returns the era id. +pub fn get_era_id

(provider: &mut P) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + read_from(provider, ERA_ID_KEY) +} + +/// Sets the era id. +pub fn set_era_id

(provider: &mut P, era_id: EraId) -> Result<(), Error> +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + write_to(provider, ERA_ID_KEY, era_id) +} + +/// Returns the era end timestamp. +pub fn get_era_end_timestamp_millis

(provider: &mut P) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + read_from(provider, ERA_END_TIMESTAMP_MILLIS_KEY) +} + +/// Sets the era end timestamp. +pub fn set_era_end_timestamp_millis

( + provider: &mut P, + era_end_timestamp_millis: u64, +) -> Result<(), Error> +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + write_to( + provider, + ERA_END_TIMESTAMP_MILLIS_KEY, + era_end_timestamp_millis, + ) +} + +/// Returns seigniorage recipients snapshot. +pub fn get_seigniorage_recipients_snapshot

( + provider: &mut P, +) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + read_from(provider, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) +} + +/// Returns seigniorage recipients snapshot in legacy format. +pub fn get_legacy_seigniorage_recipients_snapshot

( + provider: &mut P, +) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + read_from(provider, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) +} + +/// Sets the setigniorage recipients snapshot. +pub fn set_seigniorage_recipients_snapshot

( + provider: &mut P, + snapshot: SeigniorageRecipientsSnapshotV2, +) -> Result<(), Error> +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + write_to(provider, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, snapshot) +} + +/// Returns the number of validator slots. +pub fn get_validator_slots

(provider: &mut P) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + let validator_slots: u32 = match read_from(provider, VALIDATOR_SLOTS_KEY) { + Ok(ret) => ret, + Err(err) => { + error!("Failed to find VALIDATOR_SLOTS_KEY {}", err); + return Err(err); + } + }; + let validator_slots = validator_slots + .try_into() + .map_err(|_| Error::InvalidValidatorSlotsValue)?; + Ok(validator_slots) +} + +/// Returns auction delay. +pub fn get_auction_delay

(provider: &mut P) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + let auction_delay: u64 = match read_from(provider, AUCTION_DELAY_KEY) { + Ok(ret) => ret, + Err(err) => { + error!("Failed to find AUCTION_DELAY_KEY {}", err); + return Err(err); + } + }; + Ok(auction_delay) +} + +fn get_unbonding_delay

(provider: &mut P) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + read_from(provider, UNBONDING_DELAY_KEY) +} + +/// Iterates over unbonding entries and checks if a locked amount can be paid already if +/// a specific era is reached. +/// +/// This function can be called by the system only. +pub fn process_unbond_requests( + provider: &mut P, + max_delegators_per_validator: u32, +) -> Result<(), ApiError> { + if provider.get_caller() != PublicKey::System.to_account_hash() { + return Err(Error::InvalidCaller.into()); + } + + let current_era_id = provider.read_era_id()?; + + let unbonding_delay = get_unbonding_delay(provider)?; + + let unbonds = get_unbonding_purses(provider)?; + + for (bid_addr, unbond) in unbonds { + let unbond_kind = &unbond.unbond_kind().clone(); + let (retained, expired) = unbond.expired(current_era_id, unbonding_delay); + if let Some(unbonded) = expired { + for unbond_era in unbonded { + if unbond_kind.is_validator() { + provider.unbond(unbond_kind, &unbond_era).map_err(|err| { + error!(?err, "error unbonding purse"); + ApiError::from(Error::TransferToUnbondingPurse) + })?; + continue; + } + let redelegation_result = handle_redelegation( + provider, + unbond_kind, + &unbond_era, + max_delegators_per_validator, + ) + .inspect_err(|err| { + error!(?err, ?unbond_kind, ?unbond_era, "error processing unbond"); + })?; + + match redelegation_result { + UnbondRedelegationOutcome::SuccessfullyRedelegated => { + // noop; on successful re-delegation, no actual unbond occurs + } + uro @ UnbondRedelegationOutcome::NonexistantRedelegationTarget + | uro @ UnbondRedelegationOutcome::DelegationAmountBelowCap + | uro @ UnbondRedelegationOutcome::DelegationAmountAboveCap + | uro @ UnbondRedelegationOutcome::RedelegationTargetHasNoVacancy + | uro @ UnbondRedelegationOutcome::RedelegationTargetIsUnstaked + | uro @ UnbondRedelegationOutcome::Withdrawal => { + // Move funds from bid purse to unbonding purse + provider.unbond(unbond_kind, &unbond_era).map_err(|err| { + error!(?err, ?uro, "error unbonding purse"); + ApiError::from(Error::TransferToUnbondingPurse) + })? + } + } + } + } + if retained.eras().is_empty() { + provider.write_unbond(bid_addr, None)?; + } else { + provider.write_unbond(bid_addr, Some(retained))?; + } + } + Ok(()) +} + +/// Creates a new purse in unbonding_purses given a validator's key, amount, and a destination +/// unbonding purse. Returns the amount of motes remaining in the validator's bid purse. +pub fn create_unbonding_purse( + provider: &mut P, + validator_public_key: PublicKey, + unbond_kind: UnbondKind, + bonding_purse: URef, + amount: U512, + new_validator: Option, +) -> Result<(), Error> { + if provider + .available_balance(bonding_purse)? + .unwrap_or_default() + < amount + { + return Err(Error::UnbondTooLarge); + } + + let era_of_creation = provider.read_era_id()?; + + let bid_addr = match &unbond_kind { + UnbondKind::Validator(_) => { + let account_hash = validator_public_key.to_account_hash(); + BidAddr::UnbondAccount { + validator: account_hash, + unbonder: account_hash, + } + } + UnbondKind::DelegatedPublicKey(pk) => BidAddr::UnbondAccount { + validator: validator_public_key.to_account_hash(), + unbonder: pk.to_account_hash(), + }, + UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse { + validator: validator_public_key.to_account_hash(), + unbonder: *addr, + }, + }; + + let unbond_era = UnbondEra::new(bonding_purse, era_of_creation, amount, new_validator); + + let unbond = match provider.read_unbond(bid_addr)? { + Some(unbond) => { + let mut eras = unbond.take_eras(); + eras.push(unbond_era); + Unbond::new(validator_public_key, unbond_kind, eras) + } + None => Unbond::new(validator_public_key, unbond_kind, vec![unbond_era]), + }; + + provider.write_unbond(bid_addr, Some(unbond))?; + + Ok(()) +} + +/// Reward distribution target variants. +#[derive(Debug)] +pub enum DistributeTarget { + /// Validator bid. + Validator(Box), + /// Bridged validator bid. + BridgedValidator { + /// Requested bid addr. + requested_validator_bid_addr: BidAddr, + /// The current bid addr for the bridged validator. + current_validator_bid_addr: BidAddr, + /// All chained bid addrs. + bridged_validator_addrs: Vec, + /// Validator bid. + validator_bid: Box, + }, + /// Delegator bid. + Delegator(Box), + /// Unbond record. + Unbond(Box), +} + +impl DistributeTarget { + /// Returns the bonding purse for this instance. + pub fn bonding_purse(&self) -> Result { + match self { + DistributeTarget::Validator(vb) => Ok(*vb.bonding_purse()), + DistributeTarget::BridgedValidator { validator_bid, .. } => { + Ok(*validator_bid.bonding_purse()) + } + DistributeTarget::Delegator(db) => Ok(*db.bonding_purse()), + DistributeTarget::Unbond(unbond) => match unbond.target_unbond_era() { + Some(unbond_era) => Ok(*unbond_era.bonding_purse()), + None => Err(Error::MissingPurse), + }, + } + } +} + +/// Returns most recent validator public key if public key has been changed +/// or the validator has withdrawn their bid completely. +pub fn get_distribution_target( + provider: &mut P, + bid_addr: BidAddr, +) -> Result { + let mut bridged_addrs = vec![]; + let mut current_validator_bid_addr = bid_addr; + for _ in 0..MAX_BRIDGE_CHAIN_LENGTH { + match provider.read_bid(¤t_validator_bid_addr.into())? { + Some(BidKind::Validator(validator_bid)) => { + if !bridged_addrs.is_empty() { + return Ok(DistributeTarget::BridgedValidator { + requested_validator_bid_addr: bid_addr, + current_validator_bid_addr, + bridged_validator_addrs: bridged_addrs, + validator_bid, + }); + } + return Ok(DistributeTarget::Validator(validator_bid)); + } + Some(BidKind::Delegator(delegator_bid)) => { + return Ok(DistributeTarget::Delegator(delegator_bid)); + } + Some(BidKind::Unbond(unbond)) => { + return Ok(DistributeTarget::Unbond(unbond)); + } + Some(BidKind::Bridge(bridge)) => { + current_validator_bid_addr = + BidAddr::from(bridge.new_validator_public_key().clone()); + bridged_addrs.push(current_validator_bid_addr); + } + None => { + // in the case of missing validator or delegator bids, check unbonds + if let BidAddr::Validator(account_hash) = bid_addr { + let validator_unbond_key = BidAddr::UnbondAccount { + validator: account_hash, + unbonder: account_hash, + } + .into(); + if let Some(BidKind::Unbond(unbond)) = + provider.read_bid(&validator_unbond_key)? + { + return Ok(DistributeTarget::Unbond(unbond)); + } + return Err(Error::ValidatorNotFound); + } + + if let BidAddr::DelegatedAccount { + validator, + delegator, + } = bid_addr + { + let delegator_unbond_key = BidAddr::UnbondAccount { + validator, + unbonder: delegator, + } + .into(); + if let Some(BidKind::Unbond(unbond)) = + provider.read_bid(&delegator_unbond_key)? + { + return Ok(DistributeTarget::Unbond(unbond)); + } + return Err(Error::DelegatorNotFound); + } + + if let BidAddr::DelegatedPurse { + validator, + delegator, + } = bid_addr + { + let delegator_unbond_key = BidAddr::UnbondPurse { + validator, + unbonder: delegator, + } + .into(); + if let Some(BidKind::Unbond(unbond)) = + provider.read_bid(&delegator_unbond_key)? + { + return Ok(DistributeTarget::Unbond(unbond)); + } + return Err(Error::DelegatorNotFound); + } + + break; + } + _ => { + break; + } + }; + } + Err(Error::BridgeRecordChainTooLong) +} + +#[derive(Debug)] +enum UnbondRedelegationOutcome { + Withdrawal, + SuccessfullyRedelegated, + NonexistantRedelegationTarget, + RedelegationTargetHasNoVacancy, + RedelegationTargetIsUnstaked, + DelegationAmountBelowCap, + DelegationAmountAboveCap, +} + +fn handle_redelegation

( + provider: &mut P, + unbond_kind: &UnbondKind, + unbond_era: &UnbondEra, + max_delegators_per_validator: u32, +) -> Result +where + P: StorageProvider + MintProvider + RuntimeProvider, +{ + let delegator_kind = match unbond_kind { + UnbondKind::Validator(_) => { + return Err(ApiError::AuctionError(Error::UnexpectedUnbondVariant as u8)) + } + UnbondKind::DelegatedPublicKey(pk) => DelegatorKind::PublicKey(pk.clone()), + UnbondKind::DelegatedPurse(addr) => DelegatorKind::Purse(*addr), + }; + + let redelegation_target_public_key = match unbond_era.new_validator() { + Some(public_key) => { + // get updated key if `ValidatorBid` public key was changed + let validator_bid_addr = BidAddr::from(public_key.clone()); + match read_current_validator_bid(provider, validator_bid_addr.into()) { + Ok(validator_bid) => validator_bid.validator_public_key().clone(), + Err(err) => { + error!(?err, ?unbond_era, redelegate_to=?public_key, "error redelegating"); + return Ok(UnbondRedelegationOutcome::NonexistantRedelegationTarget); + } + } + } + None => return Ok(UnbondRedelegationOutcome::Withdrawal), + }; + + let redelegation = handle_delegation( + provider, + delegator_kind, + redelegation_target_public_key, + *unbond_era.bonding_purse(), + *unbond_era.amount(), + max_delegators_per_validator, + ); + match redelegation { + Ok(_) => Ok(UnbondRedelegationOutcome::SuccessfullyRedelegated), + Err(ApiError::AuctionError(err)) if err == Error::BondTooSmall as u8 => { + Ok(UnbondRedelegationOutcome::RedelegationTargetIsUnstaked) + } + Err(ApiError::AuctionError(err)) if err == Error::DelegationAmountTooSmall as u8 => { + Ok(UnbondRedelegationOutcome::DelegationAmountBelowCap) + } + Err(ApiError::AuctionError(err)) if err == Error::DelegationAmountTooLarge as u8 => { + Ok(UnbondRedelegationOutcome::DelegationAmountAboveCap) + } + Err(ApiError::AuctionError(err)) if err == Error::ValidatorNotFound as u8 => { + Ok(UnbondRedelegationOutcome::NonexistantRedelegationTarget) + } + Err(ApiError::AuctionError(err)) if err == Error::ExceededDelegatorSizeLimit as u8 => { + Ok(UnbondRedelegationOutcome::RedelegationTargetHasNoVacancy) + } + Err(err) => Err(err), + } +} + +/// Checks if a reservation for a given delegator exists. +fn has_reservation

( + provider: &mut P, + delegator_kind: &DelegatorKind, + validator: &PublicKey, +) -> Result +where + P: RuntimeProvider + StorageProvider + ?Sized, +{ + let reservation_bid_key = match delegator_kind { + DelegatorKind::PublicKey(pk) => BidAddr::ReservedDelegationAccount { + validator: validator.to_account_hash(), + delegator: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::ReservedDelegationPurse { + validator: validator.to_account_hash(), + delegator: *addr, + }, + } + .into(); + if let Some(BidKind::Reservation(_)) = provider.read_bid(&reservation_bid_key)? { + Ok(true) + } else { + Ok(false) + } +} + +/// If specified validator exists, and if validator is not yet at max delegators count, processes +/// delegation. For a new delegation a delegator bid record will be created to track the delegation, +/// otherwise the existing tracking record will be updated. +#[allow(clippy::too_many_arguments)] +pub fn handle_delegation

( + provider: &mut P, + delegator_kind: DelegatorKind, + validator_public_key: PublicKey, + source: URef, + amount: U512, + max_delegators_per_validator: u32, +) -> Result +where + P: StorageProvider + MintProvider + RuntimeProvider, +{ + if amount.is_zero() { + return Err(Error::BondTooSmall.into()); + } + + let validator_bid_addr = BidAddr::from(validator_public_key.clone()); + // is there such a validator? + let validator_bid = read_validator_bid(provider, &validator_bid_addr.into())?; + if amount < U512::from(validator_bid.minimum_delegation_amount()) { + return Err(Error::DelegationAmountTooSmall.into()); + } + if amount > U512::from(validator_bid.maximum_delegation_amount()) { + return Err(Error::DelegationAmountTooLarge.into()); + } + + // is there already a record for this delegator? + let delegator_bid_key = + BidAddr::new_delegator_kind(&validator_public_key, &delegator_kind).into(); + + let (target, delegator_bid) = if let Some(BidKind::Delegator(mut delegator_bid)) = + provider.read_bid(&delegator_bid_key)? + { + delegator_bid.increase_stake(amount)?; + (*delegator_bid.bonding_purse(), delegator_bid) + } else { + // is this validator over the delegator limit + // or is there a reservation for given delegator public key? + let delegator_count = provider.delegator_count(&validator_bid_addr)?; + let reserved_slots_count = validator_bid.reserved_slots(); + let reservation_count = provider.reservation_count(&validator_bid_addr)?; + let has_reservation = has_reservation(provider, &delegator_kind, &validator_public_key)?; + if delegator_count >= (max_delegators_per_validator - reserved_slots_count) as usize + && !has_reservation + { + warn!( + %delegator_count, %max_delegators_per_validator, %reservation_count, %has_reservation, + "delegator_count {}, max_delegators_per_validator {}, reservation_count {}, has_reservation {}", + delegator_count, max_delegators_per_validator, reservation_count, has_reservation + ); + return Err(Error::ExceededDelegatorSizeLimit.into()); + } + + let bonding_purse = provider.create_purse()?; + let delegator_bid = + DelegatorBid::unlocked(delegator_kind, amount, bonding_purse, validator_public_key); + (bonding_purse, Box::new(delegator_bid)) + }; + + // transfer token to bonding purse + provider + .mint_transfer_direct( + Some(PublicKey::System.to_account_hash()), + source, + target, + amount, + None, + ) + .map_err(|_| Error::TransferToDelegatorPurse)? + .map_err(|mint_error| { + // Propagate mint contract's error that occured during execution of transfer + // entrypoint. This will improve UX in case of (for example) + // unapproved spending limit error. + ApiError::from(mint_error) + })?; + + let updated_amount = delegator_bid.staked_amount(); + provider.write_bid(delegator_bid_key, BidKind::Delegator(delegator_bid))?; + + Ok(updated_amount) +} + +/// If specified validator exists, and if validator is not yet at max reservations count, processes +/// reservation. For a new reservation a bid record will be created to track the reservation, +/// otherwise the existing tracking record will be updated. +#[allow(clippy::too_many_arguments)] +pub fn handle_add_reservation

(provider: &mut P, reservation: Reservation) -> Result<(), Error> +where + P: StorageProvider + MintProvider + RuntimeProvider, +{ + // is there such a validator? + let validator_bid_addr = BidAddr::from(reservation.validator_public_key().clone()); + let bid = read_validator_bid(provider, &validator_bid_addr.into())?; + + let reservation_bid_key = match reservation.delegator_kind() { + DelegatorKind::PublicKey(pk) => BidAddr::ReservedDelegationAccount { + validator: reservation.validator_public_key().to_account_hash(), + delegator: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::ReservedDelegationPurse { + validator: reservation.validator_public_key().to_account_hash(), + delegator: *addr, + }, + } + .into(); + if provider.read_bid(&reservation_bid_key)?.is_none() { + // ensure reservation list has capacity to create a new reservation + let reservation_count = provider.reservation_count(&validator_bid_addr)?; + let reserved_slots = bid.reserved_slots() as usize; + if reservation_count >= reserved_slots { + warn!( + %reservation_count, %reserved_slots, + "reservation_count {}, reserved_slots {}", + reservation_count, reserved_slots + ); + return Err(Error::ExceededReservationsLimit); + } + }; + + // validate specified delegation rate + if reservation.delegation_rate() > &DELEGATION_RATE_DENOMINATOR { + return Err(Error::DelegationRateTooLarge); + } + + provider.write_bid( + reservation_bid_key, + BidKind::Reservation(Box::new(reservation)), + )?; + + Ok(()) +} + +/// Attempts to remove a reservation if one exists. If not it returns an error. +/// +/// If there is already a delegator bid associated with a given reservation it validates that +/// there are free public slots available. If not, it returns an error since the delegator +/// cannot be "downgraded". +pub fn handle_cancel_reservation

( + provider: &mut P, + validator: PublicKey, + delegator_kind: DelegatorKind, + max_delegators_per_validator: u32, +) -> Result<(), Error> +where + P: StorageProvider + MintProvider + RuntimeProvider, +{ + // is there such a validator? + let validator_bid_addr = BidAddr::from(validator.clone()); + let validator_bid = read_validator_bid(provider, &validator_bid_addr.into())?; + let validator = validator.to_account_hash(); + + // is there a reservation for this delegator? + let (reservation_bid_addr, delegator_bid_addr) = match delegator_kind { + DelegatorKind::PublicKey(pk) => { + let delegator_account_hash = pk.to_account_hash(); + ( + BidAddr::ReservedDelegationAccount { + validator, + delegator: delegator_account_hash, + }, + BidAddr::DelegatedAccount { + validator, + delegator: delegator_account_hash, + }, + ) + } + DelegatorKind::Purse(addr) => ( + BidAddr::ReservedDelegationPurse { + validator, + delegator: addr, + }, + BidAddr::DelegatedPurse { + validator, + delegator: addr, + }, + ), + }; + + if provider.read_bid(&reservation_bid_addr.into())?.is_none() { + return Err(Error::ReservationNotFound); + } + + // is there such a delegator? + if read_delegator_bid(provider, &delegator_bid_addr.into()).is_ok() { + // is there a free public slot + let reserved_slots = validator_bid.reserved_slots(); + let delegator_count = provider.delegator_count(&validator_bid_addr)?; + let used_reservation_count = provider.used_reservation_count(&validator_bid_addr)?; + let normal_delegators = delegator_count.saturating_sub(used_reservation_count); + let public_slots = max_delegators_per_validator.saturating_sub(reserved_slots); + + // cannot "downgrade" a delegator if there are no free public slots available + if public_slots == normal_delegators as u32 { + return Err(Error::ExceededDelegatorSizeLimit); + } + } + + provider.prune_bid(reservation_bid_addr); + Ok(()) +} + +/// Returns validator bid by key. +pub fn read_validator_bid

(provider: &mut P, bid_key: &Key) -> Result, Error> +where + P: StorageProvider + ?Sized, +{ + if !bid_key.is_bid_addr_key() { + return Err(Error::InvalidKeyVariant); + } + if let Some(BidKind::Validator(validator_bid)) = provider.read_bid(bid_key)? { + Ok(validator_bid) + } else { + Err(Error::ValidatorNotFound) + } +} + +/// Returns current `ValidatorBid` in case the public key was changed. +pub fn read_current_validator_bid

( + provider: &mut P, + mut bid_key: Key, +) -> Result, Error> +where + P: StorageProvider + ?Sized, +{ + if !bid_key.is_bid_addr_key() { + return Err(Error::InvalidKeyVariant); + } + + for _ in 0..MAX_BRIDGE_CHAIN_LENGTH { + match provider.read_bid(&bid_key)? { + Some(BidKind::Validator(validator_bid)) => return Ok(validator_bid), + Some(BidKind::Bridge(bridge)) => { + debug!( + ?bid_key, + ?bridge, + "read_current_validator_bid: bridge found" + ); + let validator_bid_addr = BidAddr::from(bridge.new_validator_public_key().clone()); + bid_key = validator_bid_addr.into(); + } + _ => break, + } + } + Err(Error::ValidatorNotFound) +} + +/// Returns all delegator bids for imputed validator. +pub fn read_delegator_bids

( + provider: &mut P, + validator_public_key: &PublicKey, +) -> Result, Error> +where + P: RuntimeProvider + StorageProvider + ?Sized, +{ + let mut ret = vec![]; + let bid_addr = BidAddr::from(validator_public_key.clone()); + let mut delegator_bid_keys = provider.get_keys_by_prefix( + &bid_addr + .delegated_account_prefix() + .map_err(|_| Error::Serialization)?, + )?; + delegator_bid_keys.extend( + provider.get_keys_by_prefix( + &bid_addr + .delegated_purse_prefix() + .map_err(|_| Error::Serialization)?, + )?, + ); + for delegator_bid_key in delegator_bid_keys { + let delegator_bid = read_delegator_bid(provider, &delegator_bid_key)?; + ret.push(*delegator_bid); + } + + Ok(ret) +} + +/// Returns delegator bid by key. +pub fn read_delegator_bid

(provider: &mut P, bid_key: &Key) -> Result, Error> +where + P: RuntimeProvider + ?Sized + StorageProvider, +{ + if !bid_key.is_bid_addr_key() { + return Err(Error::InvalidKeyVariant); + } + if let Some(BidKind::Delegator(delegator_bid)) = provider.read_bid(bid_key)? { + Ok(delegator_bid) + } else { + Err(Error::DelegatorNotFound) + } +} + +/// Returns all delegator slot reservations for given validator. +pub fn read_reservation_bids

( + provider: &mut P, + validator_public_key: &PublicKey, +) -> Result, Error> +where + P: RuntimeProvider + StorageProvider + ?Sized, +{ + let mut ret = vec![]; + let bid_addr = BidAddr::from(validator_public_key.clone()); + let mut reservation_bid_keys = provider.get_keys_by_prefix( + &bid_addr + .reserved_account_prefix() + .map_err(|_| Error::Serialization)?, + )?; + reservation_bid_keys.extend( + provider.get_keys_by_prefix( + &bid_addr + .reserved_purse_prefix() + .map_err(|_| Error::Serialization)?, + )?, + ); + for reservation_bid_key in reservation_bid_keys { + let reservation_bid = read_reservation_bid(provider, &reservation_bid_key)?; + ret.push(*reservation_bid); + } + + Ok(ret) +} + +/// Returns delegator slot reservation bid by key. +pub fn read_reservation_bid

(provider: &mut P, bid_key: &Key) -> Result, Error> +where + P: RuntimeProvider + ?Sized + StorageProvider, +{ + if !bid_key.is_bid_addr_key() { + return Err(Error::InvalidKeyVariant); + } + if let Some(BidKind::Reservation(reservation_bid)) = provider.read_bid(bid_key)? { + Ok(reservation_bid) + } else { + Err(Error::ReservationNotFound) + } +} + +/// Applies seigniorage recipient changes. +pub fn seigniorage_recipients( + validator_weights: &ValidatorWeights, + validator_bids: &ValidatorBids, + delegator_bids: &DelegatorBids, + reservations: &Reservations, +) -> Result { + let mut recipients = SeigniorageRecipientsV2::new(); + for (validator_public_key, validator_total_weight) in validator_weights { + // check if validator bid exists before processing. + let validator_bid = validator_bids + .get(validator_public_key) + .ok_or(Error::ValidatorNotFound)?; + // calculate delegator portion(s), if any + let mut delegators_weight = U512::zero(); + let mut delegators_stake = BTreeMap::new(); + if let Some(delegators) = delegator_bids.get(validator_public_key) { + for delegator_bid in delegators { + if delegator_bid.staked_amount().is_zero() { + continue; + } + let delegator_staked_amount = delegator_bid.staked_amount(); + delegators_weight = delegators_weight.saturating_add(delegator_staked_amount); + let delegator_kind = delegator_bid.delegator_kind(); + delegators_stake.insert(delegator_kind.clone(), delegator_staked_amount); + } + } + + let mut reservation_delegation_rates = BTreeMap::new(); + if let Some(reservations) = reservations.get(validator_public_key) { + for reservation in reservations { + reservation_delegation_rates.insert( + reservation.delegator_kind().clone(), + *reservation.delegation_rate(), + ); + } + } + + // determine validator's personal stake (total weight - sum of delegators weight) + let validator_stake = validator_total_weight.saturating_sub(delegators_weight); + let seigniorage_recipient = SeigniorageRecipientV2::new( + validator_stake, + *validator_bid.delegation_rate(), + delegators_stake, + reservation_delegation_rates, + ); + recipients.insert(validator_public_key.clone(), seigniorage_recipient); + } + Ok(recipients) +} + +/// Returns the era validators from a snapshot. +/// +/// This is `pub` as it is used not just in the relevant auction entry point, but also by the +/// engine state while directly querying for the era validators. +pub fn era_validators_from_snapshot(snapshot: SeigniorageRecipientsSnapshotV2) -> EraValidators { + snapshot + .into_iter() + .map(|(era_id, recipients)| { + let validator_weights = recipients + .into_iter() + .filter_map(|(public_key, bid)| bid.total_stake().map(|stake| (public_key, stake))) + .collect::(); + (era_id, validator_weights) + }) + .collect() +} + +/// Returns the era validators from a legacy snapshot. +pub(crate) fn era_validators_from_legacy_snapshot( + snapshot: SeigniorageRecipientsSnapshotV1, +) -> EraValidators { + snapshot + .into_iter() + .map(|(era_id, recipients)| { + let validator_weights = recipients + .into_iter() + .filter_map(|(public_key, bid)| bid.total_stake().map(|stake| (public_key, stake))) + .collect::(); + (era_id, validator_weights) + }) + .collect() +} + +/// Initializes the vesting schedule of provided bid if the provided timestamp is greater than +/// or equal to the bid's initial release timestamp and the bid is owned by a genesis +/// validator. +/// +/// Returns `true` if the provided bid's vesting schedule was initialized. +pub fn process_with_vesting_schedule

( + provider: &mut P, + validator_bid: &mut ValidatorBid, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, +) -> Result +where + P: StorageProvider + RuntimeProvider + ?Sized, +{ + let validator_public_key = validator_bid.validator_public_key().clone(); + + let delegator_bids = read_delegator_bids(provider, &validator_public_key)?; + for mut delegator_bid in delegator_bids { + let delegator_staked_amount = delegator_bid.staked_amount(); + let delegator_vesting_schedule = match delegator_bid.vesting_schedule_mut() { + Some(vesting_schedule) => vesting_schedule, + None => continue, + }; + if timestamp_millis < delegator_vesting_schedule.initial_release_timestamp_millis() { + continue; + } + if delegator_vesting_schedule + .initialize_with_schedule(delegator_staked_amount, vesting_schedule_period_millis) + { + let delegator_bid_key = delegator_bid.bid_addr().into(); + provider.write_bid( + delegator_bid_key, + BidKind::Delegator(Box::new(delegator_bid)), + )?; + } + } + + let validator_staked_amount = validator_bid.staked_amount(); + let validator_vesting_schedule = match validator_bid.vesting_schedule_mut() { + Some(vesting_schedule) => vesting_schedule, + None => return Ok(false), + }; + if timestamp_millis < validator_vesting_schedule.initial_release_timestamp_millis() { + Ok(false) + } else { + Ok(validator_vesting_schedule + .initialize_with_schedule(validator_staked_amount, vesting_schedule_period_millis)) + } +} + +/// Returns all delegators for imputed validator. +pub fn delegators

( + provider: &mut P, + validator_public_key: &PublicKey, +) -> Result>, Error> +where + P: RuntimeProvider + ?Sized + StorageProvider, +{ + let mut ret = vec![]; + let bid_addr = BidAddr::from(validator_public_key.clone()); + let mut delegator_bid_keys = provider.get_keys_by_prefix( + &bid_addr + .delegated_account_prefix() + .map_err(|_| Error::Serialization)?, + )?; + delegator_bid_keys.extend( + provider.get_keys_by_prefix( + &bid_addr + .delegated_purse_prefix() + .map_err(|_| Error::Serialization)?, + )?, + ); + + for delegator_bid_key in delegator_bid_keys { + let delegator = read_delegator_bid(provider, &delegator_bid_key)?; + ret.push(delegator); + } + + Ok(ret) +} + +/// Returns all delegator slot reservations for given validator. +pub fn reservations

( + provider: &mut P, + validator_public_key: &PublicKey, +) -> Result>, Error> +where + P: RuntimeProvider + ?Sized + StorageProvider, +{ + let mut ret = vec![]; + let bid_addr = BidAddr::from(validator_public_key.clone()); + let mut reservation_bid_keys = provider.get_keys_by_prefix( + &bid_addr + .reserved_account_prefix() + .map_err(|_| Error::Serialization)?, + )?; + reservation_bid_keys.extend( + provider.get_keys_by_prefix( + &bid_addr + .reserved_purse_prefix() + .map_err(|_| Error::Serialization)?, + )?, + ); + + for reservation_bid_key in reservation_bid_keys { + let reservation = read_reservation_bid(provider, &reservation_bid_key)?; + ret.push(reservation); + } + + Ok(ret) +} + +/// Handles forced unbonding of delegators when a validator raises the min or lowers the max amount +/// they allow delegators to stake with them. +pub fn process_updated_delegator_stake_boundaries( + provider: &mut P, + validator_bid: &mut ValidatorBid, + minimum_delegation_amount: u64, + maximum_delegation_amount: u64, +) -> Result<(), Error> { + // check modified delegation bookends + let raised_min = validator_bid.minimum_delegation_amount() < minimum_delegation_amount; + let lowered_max = validator_bid.maximum_delegation_amount() > maximum_delegation_amount; + if !raised_min && !lowered_max { + return Ok(()); + } + + let era_end_timestamp_millis = get_era_end_timestamp_millis(provider)?; + if validator_bid.is_locked(era_end_timestamp_millis) { + // cannot increase the min or decrease the max while vesting is locked + // as this could result in vested delegators being forcibly unbonded, thus + // prematurely allowing liquidity on a network still in its vesting period. + return Err(Error::VestingLockout); + } + + // set updated delegation amount range + validator_bid + .set_delegation_amount_boundaries(minimum_delegation_amount, maximum_delegation_amount); + + let validator_public_key = validator_bid.validator_public_key(); + let min_delegation = minimum_delegation_amount.into(); + let max_delegation = maximum_delegation_amount.into(); + let delegators = read_delegator_bids(provider, validator_public_key)?; + for mut delegator in delegators { + let delegator_staked_amount = delegator.staked_amount(); + let unbond_amount = if delegator_staked_amount < min_delegation { + // fully unbond the staked amount as it is below the min + delegator_staked_amount + } else if delegator_staked_amount > max_delegation { + // partially unbond the staked amount to not exceed the max + delegator_staked_amount.saturating_sub(max_delegation) + } else { + // nothing to unbond + U512::zero() + }; + // skip delegators within the range + if unbond_amount.is_zero() { + continue; + } + + let unbond_kind = delegator.unbond_kind(); + create_unbonding_purse( + provider, + validator_public_key.clone(), + unbond_kind, + *delegator.bonding_purse(), + unbond_amount, + None, + )?; + + let updated_stake = match delegator.decrease_stake(unbond_amount, era_end_timestamp_millis) + { + Ok(updated_stake) => updated_stake, + // Work around the case when the locked amounts table has yet to be + // initialized (likely pre-90 day mark). + Err(Error::DelegatorFundsLocked) => continue, + Err(err) => return Err(err), + }; + + let delegator_bid_addr = delegator.bid_addr(); + if updated_stake.is_zero() { + debug!("pruning delegator bid {delegator_bid_addr}"); + provider.prune_bid(delegator_bid_addr); + } else { + debug!( + "forced undelegation for {delegator_bid_addr} reducing {delegator_staked_amount} by {unbond_amount} to {updated_stake}", + ); + provider.write_bid( + delegator_bid_addr.into(), + BidKind::Delegator(Box::new(delegator)), + )?; + } + } + Ok(()) +} + +/// Handles an attempt by a validator to lower the number of delegator reserve slots +/// they allow. An attempt to lower the number below the current count of occupied reservations +/// will fail. An attempt to increase the number above the global allowed maximum of a given +/// network will also fail. +pub fn process_updated_delegator_reservation_slots( + provider: &mut P, + validator_bid: &mut ValidatorBid, + max_delegators_per_validator: u32, + reserved_slots: u32, +) -> Result<(), Error> { + if reserved_slots == validator_bid.reserved_slots() { + return Ok(()); + } + + let validator_public_key = validator_bid.validator_public_key(); + + let validator_bid_addr = BidAddr::from(validator_public_key.clone()); + // cannot reserve fewer slots than there are reservations + let reservation_count = provider.reservation_count(&validator_bid_addr)?; + if reserved_slots < reservation_count as u32 { + return Err(Error::ReservationSlotsCountTooSmall); + } + + // cannot reserve more slots than there are free delegator slots + let max_reserved_slots = { + let used_reservation_count = provider.used_reservation_count(&validator_bid_addr)?; + let delegator_count = provider.delegator_count(&validator_bid_addr)?; + let normal_delegators = delegator_count.saturating_sub(used_reservation_count) as u32; + max_delegators_per_validator.saturating_sub(normal_delegators) + }; + if reserved_slots > max_reserved_slots { + return Err(Error::ExceededReservationSlotsLimit); + } + validator_bid.with_reserved_slots(reserved_slots); + Ok(()) +} + +/// Processes undelegation with optional redelegation target. +pub fn process_undelegation( + provider: &mut P, + delegator_kind: DelegatorKind, + validator_public_key: PublicKey, + amount: U512, + new_validator: Option, +) -> Result { + match &delegator_kind { + DelegatorKind::PublicKey(pk) => { + let account_hash = pk.to_account_hash(); + if !provider.is_allowed_session_caller(&account_hash) { + return Err(Error::InvalidContext); + } + } + DelegatorKind::Purse(addr) => { + let uref = URef::new(*addr, AccessRights::WRITE); + if !provider.is_valid_uref(uref) { + return Err(Error::InvalidContext); + } + } + } + + let new_validator_public_key = { + // check redelegation target for existence + if let Some(new_validator_public_key) = new_validator { + let new_validator_bid_key = BidAddr::from(new_validator_public_key.clone()).into(); + match read_validator_bid(provider, &new_validator_bid_key) { + Err(Error::ValidatorNotFound) => return Err(Error::RedelegationValidatorNotFound), + Err(err) => return Err(err), + Ok(_) => Some(new_validator_public_key), + } + } else { + None + } + }; + + let validator_bid_key = BidAddr::from(validator_public_key.clone()).into(); + let validator_bid = read_validator_bid(provider, &validator_bid_key)?; + + let delegator_bid_addr = BidAddr::new_delegator_kind(&validator_public_key, &delegator_kind); + let mut delegator_bid = read_delegator_bid(provider, &delegator_bid_addr.into())?; + + let bonding_purse = *delegator_bid.bonding_purse(); + let initial_staked_amount = delegator_bid.staked_amount(); + let (unbonding_amount, updated_stake) = { + let era_end_timestamp_millis = get_era_end_timestamp_millis(provider)?; + + // cannot unbond more than you have + let unbonding_amount = U512::min(amount, initial_staked_amount); + let rem = delegator_bid.decrease_stake(unbonding_amount, era_end_timestamp_millis)?; + if rem < validator_bid.minimum_delegation_amount().into() { + // if the remaining stake is less than the validator's min delegation amount + // unbond all the delegator's stake + let zeroed = delegator_bid.decrease_stake(rem, era_end_timestamp_millis)?; + (initial_staked_amount, zeroed) + } else { + (unbonding_amount, rem) + } + }; + + if updated_stake.is_zero() { + debug!("pruning delegator bid {}", delegator_bid_addr); + provider.prune_bid(delegator_bid_addr); + } else { + provider.write_bid(delegator_bid_addr.into(), BidKind::Delegator(delegator_bid))?; + } + + if !unbonding_amount.is_zero() { + let unbond_kind = delegator_kind.into(); + + create_unbonding_purse( + provider, + validator_public_key, + unbond_kind, + bonding_purse, + unbonding_amount, + new_validator_public_key, + )?; + + debug!( + "undelegation for {delegator_bid_addr} reducing {initial_staked_amount} by {unbonding_amount} to {updated_stake}" + ); + } + + Ok(updated_stake) +} + +/// Retrieves the total reward for a given validator or delegator in a given era. +pub fn reward( + validator: &PublicKey, + delegator: Option<&DelegatorKind>, + era_id: EraId, + rewards: &[U512], + seigniorage_recipients_snapshot: &SeigniorageRecipientsSnapshot, +) -> Result, Error> { + let validator_rewards = + match rewards_per_validator(validator, era_id, rewards, seigniorage_recipients_snapshot) { + Ok(rewards) => rewards, + Err(Error::ValidatorNotFound) => return Ok(None), + Err(Error::MissingSeigniorageRecipients) => return Ok(None), + Err(err) => return Err(err), + }; + + let reward = validator_rewards + .into_iter() + .map(|reward_info| { + if let Some(delegator) = delegator { + reward_info + .delegator_rewards + .get(delegator) + .copied() + .unwrap_or_default() + } else { + reward_info.validator_reward + } + }) + .sum(); + + Ok(Some(reward)) +} + +/// Calculates the reward for a given validator for a given era. +pub(crate) fn rewards_per_validator( + validator: &PublicKey, + era_id: EraId, + rewards: &[U512], + seigniorage_recipients_snapshot: &SeigniorageRecipientsSnapshot, +) -> Result, Error> { + let mut results = Vec::with_capacity(rewards.len()); + + for (reward_amount, eras_back) in rewards + .iter() + .enumerate() + .map(move |(i, &amount)| (amount, i as u64)) + // do not process zero amounts, unless they are for the current era (we still want to + // record zero allocations for the current validators in EraInfo) + .filter(|(amount, eras_back)| !amount.is_zero() || *eras_back == 0) + { + let total_reward = Ratio::from(reward_amount); + let rewarded_era = era_id + .checked_sub(eras_back) + .ok_or(Error::MissingSeigniorageRecipients)?; + + // try to find validator in seigniorage snapshot + let maybe_seigniorage_recipient = match seigniorage_recipients_snapshot { + SeigniorageRecipientsSnapshot::V1(snapshot) => snapshot + .get(&rewarded_era) + .ok_or(Error::MissingSeigniorageRecipients)? + .get(validator) + .cloned() + .map(SeigniorageRecipient::V1), + SeigniorageRecipientsSnapshot::V2(snapshot) => snapshot + .get(&rewarded_era) + .ok_or(Error::MissingSeigniorageRecipients)? + .get(validator) + .cloned() + .map(SeigniorageRecipient::V2), + }; + + let Some(recipient) = maybe_seigniorage_recipient else { + // We couldn't find the validator. If the reward amount is zero, we don't care - + // the validator wasn't supposed to be rewarded in this era, anyway. Otherwise, + // return an error. + if reward_amount.is_zero() { + continue; + } else { + return Err(Error::ValidatorNotFound); + } + }; + + let total_stake = recipient.total_stake().ok_or(Error::ArithmeticOverflow)?; + + if total_stake.is_zero() { + // The validator has completely unbonded. We can't compute the delegators' part (as + // their stakes are also zero), so we just give the whole reward to the validator. + // When used from `distribute`, we will mint the reward into their bonding purse + // and increase their unbond request by the corresponding amount. + + results.push(RewardsPerValidator { + validator_reward: reward_amount, + delegator_rewards: BTreeMap::new(), + }); + continue; + } + + let delegator_total_stake: U512 = recipient + .delegator_total_stake() + .ok_or(Error::ArithmeticOverflow)?; + + // calculate part of reward to be distributed to delegators before commission + let base_delegators_part: Ratio = { + let reward_multiplier: Ratio = Ratio::new(delegator_total_stake, total_stake); + total_reward + .checked_mul(&reward_multiplier) + .ok_or(Error::ArithmeticOverflow)? + }; + + let default = BTreeMap::new(); + let reservation_delegation_rates = + recipient.reservation_delegation_rates().unwrap_or(&default); + // calculate commission and final reward for each delegator + let mut delegator_rewards: BTreeMap = BTreeMap::new(); + for (delegator_kind, delegator_stake) in recipient.delegator_stake().iter() { + let reward_multiplier = Ratio::new(*delegator_stake, delegator_total_stake); + let base_reward = base_delegators_part * reward_multiplier; + let delegation_rate = *reservation_delegation_rates + .get(delegator_kind) + .unwrap_or(recipient.delegation_rate()); + let commission_rate = Ratio::new( + U512::from(delegation_rate), + U512::from(DELEGATION_RATE_DENOMINATOR), + ); + let commission: Ratio = base_reward + .checked_mul(&commission_rate) + .ok_or(Error::ArithmeticOverflow)?; + let reward = base_reward + .checked_sub(&commission) + .ok_or(Error::ArithmeticOverflow)?; + delegator_rewards.insert(delegator_kind.clone(), reward.to_integer()); + } + + let total_delegator_payout: U512 = + delegator_rewards.iter().map(|(_, &amount)| amount).sum(); + + let validator_reward = reward_amount - total_delegator_payout; + + results.push(RewardsPerValidator { + validator_reward, + delegator_rewards, + }); + } + Ok(results) +} + +/// Aggregated rewards data for a validator. +#[derive(Debug, Default)] +pub struct RewardsPerValidator { + validator_reward: U512, + delegator_rewards: BTreeMap, +} + +impl RewardsPerValidator { + /// The validator reward amount. + pub fn validator_reward(&self) -> U512 { + self.validator_reward + } + + /// The rewards for this validator's delegators. + pub fn delegator_rewards(&self) -> &BTreeMap { + &self.delegator_rewards + } + + /// The rewards for this validator's delegators. + pub fn take_delegator_rewards(self) -> BTreeMap { + self.delegator_rewards + } +} diff --git a/storage/src/system/auction/providers.rs b/storage/src/system/auction/providers.rs new file mode 100644 index 0000000000..7bffbc207c --- /dev/null +++ b/storage/src/system/auction/providers.rs @@ -0,0 +1,125 @@ +use std::collections::BTreeSet; + +use casper_types::{ + account::AccountHash, + bytesrepr::{FromBytes, ToBytes}, + system::{ + auction::{BidAddr, BidKind, EraInfo, Error, Unbond, UnbondEra, UnbondKind}, + mint, + }, + CLTyped, Key, KeyTag, URef, U512, +}; + +/// Provider of runtime host functionality. +pub trait RuntimeProvider { + /// This method should return the caller of the current context. + fn get_caller(&self) -> AccountHash; + + /// Checks if account_hash matches the active session's account. + fn is_allowed_session_caller(&self, account_hash: &AccountHash) -> bool; + + /// Checks if uref is in access rights. + fn is_valid_uref(&self, uref: URef) -> bool; + + /// Gets named key under a `name`. + fn named_keys_get(&self, name: &str) -> Option; + + /// Gets keys in a given keyspace + fn get_keys(&mut self, key_tag: &KeyTag) -> Result, Error>; + + /// Gets keys by prefix. + fn get_keys_by_prefix(&mut self, prefix: &[u8]) -> Result, Error>; + + /// Returns the current number of delegators for this validator. + fn delegator_count(&mut self, bid_addr: &BidAddr) -> Result; + + /// Returns number of reservations for this validator. + fn reservation_count(&mut self, bid_addr: &BidAddr) -> Result; + + /// Returns number of reservations for which a delegator bid exists. + fn used_reservation_count(&mut self, bid_addr: &BidAddr) -> Result; + + /// Returns vesting schedule period. + fn vesting_schedule_period_millis(&self) -> u64; + + /// Check if auction bids are allowed. + fn allow_auction_bids(&self) -> bool; + + /// Check if auction should compute rewards. + fn should_compute_rewards(&self) -> bool; +} + +/// Provides functionality of a contract storage. +pub trait StorageProvider { + /// Reads data from [`URef`]. + fn read(&mut self, uref: URef) -> Result, Error>; + + /// Writes data to [`URef]. + fn write(&mut self, uref: URef, value: T) -> Result<(), Error>; + + /// Reads [`casper_types::system::auction::Bid`] at account hash derived from given public key + fn read_bid(&mut self, key: &Key) -> Result, Error>; + + /// Writes given [`BidKind`] at given key. + fn write_bid(&mut self, key: Key, bid_kind: BidKind) -> Result<(), Error>; + + /// Reads [`Unbond`]s at bid address. + fn read_unbond(&mut self, bid_addr: BidAddr) -> Result, Error>; + + /// Writes given [`Unbond`] if some, else prunes if none at bid address. + fn write_unbond(&mut self, bid_addr: BidAddr, unbond: Option) -> Result<(), Error>; + + /// Records era info. + fn record_era_info(&mut self, era_info: EraInfo) -> Result<(), Error>; + + /// Prunes a given bid at [`BidAddr`]. + fn prune_bid(&mut self, bid_addr: BidAddr); +} + +/// Provides an access to mint. +pub trait MintProvider { + /// Returns successfully unbonded stake to origin account. + fn unbond(&mut self, unbond_kind: &UnbondKind, unbond_era: &UnbondEra) -> Result<(), Error>; + + /// Allows optimized auction and mint interaction. + /// Intended to be used only by system contracts to manage staked purses. + fn mint_transfer_direct( + &mut self, + to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result, Error>; + + /// Mint `amount` new token into `existing_purse`. + /// Returns unit on success, otherwise an error. + fn mint_into_existing_purse(&mut self, amount: U512, existing_purse: URef) + -> Result<(), Error>; + + /// Creates new purse. + fn create_purse(&mut self) -> Result; + + /// Gets purse balance. + fn available_balance(&mut self, purse: URef) -> Result, Error>; + + /// Reads the base round reward. + fn read_base_round_reward(&mut self) -> Result; + + /// Mints new token with given `initial_balance` balance. Returns new purse on success, + /// otherwise an error. + fn mint(&mut self, amount: U512) -> Result; + + /// Reduce total supply by `amount`. Returns unit on success, otherwise + /// an error. + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error>; +} + +/// Provider of an account related functionality. +pub trait AccountProvider { + /// Get currently executing account's purse. + fn get_main_purse(&self) -> Result; + + /// Set main purse. + fn set_main_purse(&mut self, purse: URef); +} diff --git a/storage/src/system/burn.rs b/storage/src/system/burn.rs new file mode 100644 index 0000000000..3c88d1e622 --- /dev/null +++ b/storage/src/system/burn.rs @@ -0,0 +1,264 @@ +use std::{cell::RefCell, convert::TryFrom, rc::Rc}; +use thiserror::Error; + +use casper_types::{ + bytesrepr::FromBytes, + system::{mint, mint::Error as MintError}, + AccessRights, CLType, CLTyped, CLValue, CLValueError, Key, RuntimeArgs, RuntimeFootprint, + StoredValue, StoredValueTypeMismatch, URef, U512, +}; + +use crate::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + tracking_copy::{TrackingCopy, TrackingCopyError, TrackingCopyExt}, +}; + +/// Burn error. +#[derive(Clone, Error, Debug)] +pub enum BurnError { + /// Invalid key variant. + #[error("Invalid key {0}")] + UnexpectedKeyVariant(Key), + /// Type mismatch error. + #[error("{}", _0)] + TypeMismatch(StoredValueTypeMismatch), + /// Forged reference error. + #[error("Forged reference: {}", _0)] + ForgedReference(URef), + /// Invalid access. + #[error("Invalid access rights: {}", required)] + InvalidAccess { + /// Required access rights of the operation. + required: AccessRights, + }, + /// Error converting a CLValue. + #[error("{0}")] + CLValue(CLValueError), + /// Invalid purse. + #[error("Invalid purse")] + InvalidPurse, + /// Invalid argument. + #[error("Invalid argument")] + InvalidArgument, + /// Missing argument. + #[error("Missing argument")] + MissingArgument, + /// Invalid purse. + #[error("Attempt to transfer amount 0")] + AttemptToBurnZero, + /// Invalid operation. + #[error("Invalid operation")] + InvalidOperation, + /// Disallowed transfer attempt (private chain). + #[error("Either the source or the target must be an admin (private chain).")] + RestrictedBurnAttempted, + /// Could not determine if target is an admin (private chain). + #[error("Unable to determine if the target of a transfer is an admin")] + UnableToVerifyTargetIsAdmin, + /// Tracking copy error. + #[error("{0}")] + TrackingCopy(TrackingCopyError), + /// Mint error. + #[error("{0}")] + Mint(MintError), +} + +impl From for BurnError { + fn from(gse: GlobalStateError) -> Self { + BurnError::TrackingCopy(TrackingCopyError::Storage(gse)) + } +} + +impl From for BurnError { + fn from(tce: TrackingCopyError) -> Self { + BurnError::TrackingCopy(tce) + } +} + +/// Mint's burn arguments. +/// +/// A struct has a benefit of static typing, which is helpful while resolving the arguments. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct BurnArgs { + source: URef, + amount: U512, +} + +impl BurnArgs { + /// Creates new transfer arguments. + pub fn new(source: URef, amount: U512) -> Self { + Self { source, amount } + } + + /// Returns `source` field. + pub fn source(&self) -> URef { + self.source + } + + /// Returns `amount` field. + pub fn amount(&self) -> U512 { + self.amount + } +} + +impl TryFrom for RuntimeArgs { + type Error = CLValueError; + + fn try_from(burn_args: BurnArgs) -> Result { + let mut runtime_args = RuntimeArgs::new(); + + runtime_args.insert(mint::ARG_SOURCE, burn_args.source)?; + runtime_args.insert(mint::ARG_AMOUNT, burn_args.amount)?; + + Ok(runtime_args) + } +} + +/// State of a builder of a `BurnArgs`. +/// +/// Purpose of this builder is to resolve native burn args into BurnTargetMode and a +/// [`BurnArgs`] instance to execute actual token burn on the mint contract. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct BurnRuntimeArgsBuilder { + inner: RuntimeArgs, +} + +impl BurnRuntimeArgsBuilder { + /// Creates new burn args builder. + /// + /// Takes an incoming runtime args that represents native burn's arguments. + pub fn new(imputed_runtime_args: RuntimeArgs) -> BurnRuntimeArgsBuilder { + BurnRuntimeArgsBuilder { + inner: imputed_runtime_args, + } + } + + /// Checks if a purse exists. + fn purse_exists(&self, uref: URef, tracking_copy: Rc>>) -> bool + where + R: StateReader, + { + let key = match tracking_copy + .borrow_mut() + .get_purse_balance_key(uref.into()) + { + Ok(key) => key, + Err(_) => return false, + }; + tracking_copy + .borrow_mut() + .get_available_balance(key) + .is_ok() + } + + /// Resolves the source purse of the burn. + /// + /// User can optionally pass a "source" argument which should refer to an [`URef`] existing in + /// user's named keys. When the "source" argument is missing then user's main purse is assumed. + /// + /// Returns resolved [`URef`]. + fn resolve_source_uref( + &self, + account: &RuntimeFootprint, + tracking_copy: Rc>>, + ) -> Result + where + R: StateReader, + { + let imputed_runtime_args = &self.inner; + let arg_name = mint::ARG_SOURCE; + let uref = match imputed_runtime_args.get(arg_name) { + Some(cl_value) if *cl_value.cl_type() == CLType::URef => { + self.map_cl_value::(cl_value)? + } + Some(cl_value) if *cl_value.cl_type() == CLType::Option(CLType::URef.into()) => { + let Some(uref): Option = self.map_cl_value(cl_value)? else { + return account.main_purse().ok_or(BurnError::InvalidOperation); + }; + uref + } + Some(_) => return Err(BurnError::InvalidArgument), + None => return account.main_purse().ok_or(BurnError::InvalidOperation), /* if no source purse passed use account + * main purse */ + }; + if account + .main_purse() + .ok_or(BurnError::InvalidOperation)? + .addr() + == uref.addr() + { + return Ok(uref); + } + + let normalized_uref = Key::URef(uref).normalize(); + let maybe_named_key = account + .named_keys() + .keys() + .find(|&named_key| named_key.normalize() == normalized_uref); + + match maybe_named_key { + Some(Key::URef(found_uref)) => { + if found_uref.is_writeable() { + // it is a URef and caller has access but is it a purse URef? + if !self.purse_exists(found_uref.to_owned(), tracking_copy) { + return Err(BurnError::InvalidPurse); + } + + Ok(uref) + } else { + Err(BurnError::InvalidAccess { + required: AccessRights::WRITE, + }) + } + } + Some(key) => Err(BurnError::TypeMismatch(StoredValueTypeMismatch::new( + "Key::URef".to_string(), + key.type_string(), + ))), + None => Err(BurnError::ForgedReference(uref)), + } + } + + /// Resolves amount. + /// + /// User has to specify "amount" argument that could be either a [`U512`] or a u64. + fn resolve_amount(&self) -> Result { + let imputed_runtime_args = &self.inner; + + let amount = match imputed_runtime_args.get(mint::ARG_AMOUNT) { + Some(amount_value) if *amount_value.cl_type() == CLType::U512 => { + self.map_cl_value(amount_value)? + } + Some(amount_value) if *amount_value.cl_type() == CLType::U64 => { + let amount: u64 = self.map_cl_value(amount_value)?; + U512::from(amount) + } + Some(_) => return Err(BurnError::InvalidArgument), + None => return Err(BurnError::MissingArgument), + }; + + if amount.is_zero() { + return Err(BurnError::AttemptToBurnZero); + } + + Ok(amount) + } + + /// Creates new [`BurnArgs`] instance. + pub fn build( + self, + from: &RuntimeFootprint, + tracking_copy: Rc>>, + ) -> Result + where + R: StateReader, + { + let source = self.resolve_source_uref(from, Rc::clone(&tracking_copy))?; + let amount = self.resolve_amount()?; + Ok(BurnArgs { source, amount }) + } + + fn map_cl_value(&self, cl_value: &CLValue) -> Result { + cl_value.clone().into_t().map_err(BurnError::CLValue) + } +} diff --git a/storage/src/system/error.rs b/storage/src/system/error.rs new file mode 100644 index 0000000000..cfb8c1914e --- /dev/null +++ b/storage/src/system/error.rs @@ -0,0 +1,10 @@ +use casper_types::account::AccountHash; + +/// Implementation level errors for system contract providers +#[derive(Debug)] +pub enum ProviderError { + /// System contract registry. + SystemEntityRegistry, + /// Account hash. + AccountHash(AccountHash), +} diff --git a/storage/src/system/genesis.rs b/storage/src/system/genesis.rs new file mode 100644 index 0000000000..872bdbb168 --- /dev/null +++ b/storage/src/system/genesis.rs @@ -0,0 +1,275 @@ +//! Support for a genesis process. +#![allow(unused_imports)] + +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + fmt, iter, + rc::Rc, +}; + +use itertools::Itertools; +use num::Zero; +use num_rational::Ratio; +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use casper_types::{ + addressable_entity::{ + ActionThresholds, EntityKind, EntityKindTag, MessageTopics, NamedKeyAddr, NamedKeyValue, + }, + bytesrepr, + contracts::NamedKeys, + execution::Effects, + system::{ + auction::{ + self, BidAddr, BidKind, DelegationRate, Delegator, SeigniorageRecipientV2, + SeigniorageRecipients, SeigniorageRecipientsSnapshot, SeigniorageRecipientsSnapshotV2, + SeigniorageRecipientsV2, Staking, ValidatorBid, AUCTION_DELAY_KEY, + DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, DELEGATION_RATE_DENOMINATOR, + ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, INITIAL_ERA_END_TIMESTAMP_MILLIS, + INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + }, + handle_payment::{self, ACCUMULATION_PURSE_KEY}, + mint::{ + self, ARG_ROUND_SEIGNIORAGE_RATE, MINT_GAS_HOLD_HANDLING_KEY, + MINT_GAS_HOLD_INTERVAL_KEY, ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY, + }, + SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, + }, + AccessRights, AddressableEntity, AddressableEntityHash, AdministratorAccount, BlockGlobalAddr, + BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, CLValue, Chainspec, + ChainspecRegistry, Digest, EntityAddr, EntityVersions, EntryPointAddr, EntryPointValue, + EntryPoints, EraId, FeeHandling, GenesisAccount, GenesisConfig, Groups, HashAddr, Key, Motes, + Package, PackageHash, PackageStatus, Phase, ProtocolVersion, PublicKey, RefundHandling, + StoredValue, SystemConfig, SystemHashRegistry, Tagged, TimeDiff, URef, WasmConfig, U512, +}; + +use crate::{ + global_state::state::StateProvider, + system::genesis::{ + account_contract_installer::AccountContractInstaller, + entity_installer::EntityGenesisInstaller, + }, + tracking_copy::{TrackingCopy, TrackingCopyError}, + AddressGenerator, +}; + +mod account_contract_installer; +mod entity_installer; + +const DEFAULT_ADDRESS: [u8; 32] = [0; 32]; + +const NO_WASM: bool = true; + +/// Error returned as a result of a failed genesis process. +#[derive(Clone, Debug)] +pub enum GenesisError { + /// Error creating a runtime. + StateUninitialized, + /// Error obtaining the mint's contract key. + InvalidMintKey, + /// Missing mint contract. + MissingMintContract, + /// Unexpected stored value variant. + UnexpectedStoredValue, + /// Error executing the mint system contract. + MintError(mint::Error), + /// Error converting a [`CLValue`] to a concrete type. + CLValue(String), + /// Specified validator does not exist among the genesis accounts. + OrphanedDelegator { + /// Validator's public key. + validator_public_key: PublicKey, + /// Delegator's public key. + delegator_public_key: PublicKey, + }, + /// Duplicated delegator entry found for a given validator. + DuplicatedDelegatorEntry { + /// Validator's public key. + validator_public_key: PublicKey, + /// Delegator's public key. + delegator_public_key: PublicKey, + }, + /// Delegation rate outside the allowed range. + InvalidDelegationRate { + /// Delegator's public key. + public_key: PublicKey, + /// Invalid delegation rate specified in the genesis account entry. + delegation_rate: DelegationRate, + }, + /// Invalid bond amount in a genesis account. + InvalidBondAmount { + /// Validator's public key. + public_key: PublicKey, + }, + /// Invalid delegated amount in a genesis account. + InvalidDelegatedAmount { + /// Delegator's public key. + public_key: PublicKey, + }, + /// Failed to create system registry. + FailedToCreateSystemRegistry, + /// Missing system contract hash. + MissingSystemContractHash(String), + /// Invalid number of validator slots configured. + InvalidValidatorSlots { + /// Number of validators in the genesis config. + validators: usize, + /// Number of validator slots specified. + validator_slots: u32, + }, + /// The chainspec registry is missing a required entry. + MissingChainspecRegistryEntry, + /// Duplicated administrator entry. + /// + /// This error can occur only on some private chains. + DuplicatedAdministratorEntry, + /// A bytesrepr Error. + Bytesrepr(bytesrepr::Error), + /// Genesis process requires initial accounts. + MissingGenesisAccounts, + /// A tracking copy error. + TrackingCopy(TrackingCopyError), +} + +impl fmt::Display for GenesisError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "GenesisError: {:?}", self) + } +} + +/// State for genesis installer. +pub enum GenesisInstaller +where + S: StateProvider, +{ + /// Install genesis using the Accounts/Contracts model. + AccountContract(AccountContractInstaller), + /// Install genesis using the Addressable Entity model. + Entity(EntityGenesisInstaller), +} +impl GenesisInstaller +where + S: StateProvider, +{ + /// Ctor. + pub fn new( + genesis_config_hash: Digest, + protocol_version: ProtocolVersion, + config: GenesisConfig, + tracking_copy: Rc::Reader>>>, + ) -> Self { + if config.enable_entity() { + GenesisInstaller::Entity(EntityGenesisInstaller::new( + genesis_config_hash, + protocol_version, + config, + tracking_copy, + )) + } else { + GenesisInstaller::AccountContract(AccountContractInstaller::new( + genesis_config_hash, + protocol_version, + config, + tracking_copy, + )) + } + } + + /// Finalize genesis. + pub fn finalize(self) -> Effects { + match self { + GenesisInstaller::AccountContract(installer) => installer.finalize(), + GenesisInstaller::Entity(installer) => installer.finalize(), + } + } + + /// Performs a complete system installation. + pub fn install( + &mut self, + chainspec_registry: ChainspecRegistry, + ) -> Result<(), Box> { + match self { + GenesisInstaller::AccountContract(installer) => installer.install(chainspec_registry), + GenesisInstaller::Entity(installer) => installer.install(chainspec_registry), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::AsymmetricType; + use rand::RngCore; + + use casper_types::{bytesrepr, SecretKey}; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + let genesis_account: GenesisAccount = rng.gen(); + bytesrepr::test_serialization_roundtrip(&genesis_account); + } + + #[test] + fn system_account_bytesrepr_roundtrip() { + let genesis_account = GenesisAccount::system(); + + bytesrepr::test_serialization_roundtrip(&genesis_account); + } + + #[test] + fn genesis_account_bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes[..]); + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let public_key: PublicKey = PublicKey::from(&secret_key); + + let genesis_account_1 = GenesisAccount::account(public_key.clone(), Motes::new(100), None); + + bytesrepr::test_serialization_roundtrip(&genesis_account_1); + + let genesis_account_2 = + GenesisAccount::account(public_key, Motes::new(100), Some(rng.gen())); + + bytesrepr::test_serialization_roundtrip(&genesis_account_2); + } + + #[test] + fn delegator_bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + let mut validator_bytes = [0u8; 32]; + let mut delegator_bytes = [0u8; 32]; + rng.fill_bytes(&mut validator_bytes[..]); + rng.fill_bytes(&mut delegator_bytes[..]); + let validator_secret_key = SecretKey::ed25519_from_bytes(validator_bytes).unwrap(); + let delegator_secret_key = SecretKey::ed25519_from_bytes(delegator_bytes).unwrap(); + + let validator_public_key = PublicKey::from(&validator_secret_key); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + + let genesis_account = GenesisAccount::delegator( + validator_public_key, + delegator_public_key, + Motes::new(100), + Motes::zero(), + ); + + bytesrepr::test_serialization_roundtrip(&genesis_account); + } + + #[test] + fn administrator_account_bytesrepr_roundtrip() { + let administrator_account = AdministratorAccount::new( + PublicKey::ed25519_from_bytes([123u8; 32]).unwrap(), + Motes::new(U512::MAX), + ); + bytesrepr::test_serialization_roundtrip(&administrator_account); + } +} diff --git a/storage/src/system/genesis/account_contract_installer.rs b/storage/src/system/genesis/account_contract_installer.rs new file mode 100644 index 0000000000..10fcbccdb6 --- /dev/null +++ b/storage/src/system/genesis/account_contract_installer.rs @@ -0,0 +1,780 @@ +use itertools::Itertools; +use num_rational::Ratio; +use num_traits::Zero; +use rand::Rng; +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + rc::Rc, +}; + +use crate::{ + global_state::state::StateProvider, + system::{ + genesis::{GenesisError, DEFAULT_ADDRESS, NO_WASM}, + protocol_upgrade::ProtocolUpgradeError, + }, + AddressGenerator, TrackingCopy, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::{ + ActionThresholds, EntityKindTag, MessageTopics, NamedKeyAddr, NamedKeyValue, + }, + contracts::{ + ContractHash, ContractPackage, ContractPackageHash, ContractPackageStatus, + ContractVersions, DisabledVersions, NamedKeys, + }, + execution::Effects, + system::{ + auction, + auction::{ + BidAddr, BidKind, Delegator, DelegatorBid, DelegatorKind, SeigniorageRecipient, + SeigniorageRecipientV2, SeigniorageRecipients, SeigniorageRecipientsSnapshot, + SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Staking, ValidatorBid, + AUCTION_DELAY_KEY, DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, + DELEGATION_RATE_DENOMINATOR, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, + INITIAL_ERA_END_TIMESTAMP_MILLIS, INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, + UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + }, + handle_payment, + handle_payment::ACCUMULATION_PURSE_KEY, + mint, + mint::{ + ARG_ROUND_SEIGNIORAGE_RATE, MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY, + ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY, + }, + standard_payment, SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT, + }, + AccessRights, Account, AddressableEntity, AddressableEntityHash, AdministratorAccount, + BlockGlobalAddr, ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, CLValue, + ChainspecRegistry, Contract, ContractWasm, ContractWasmHash, Digest, EntityAddr, EntityKind, + EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, EraId, GenesisAccount, + GenesisConfig, Groups, HashAddr, Key, Motes, Package, PackageHash, PackageStatus, Phase, + ProtocolVersion, PublicKey, StoredValue, SystemHashRegistry, URef, U512, +}; + +pub struct AccountContractInstaller +where + S: StateProvider, +{ + protocol_version: ProtocolVersion, + config: GenesisConfig, + address_generator: Rc>, + tracking_copy: Rc::Reader>>>, +} + +impl AccountContractInstaller +where + S: StateProvider, +{ + pub(crate) fn new( + genesis_config_hash: Digest, + protocol_version: ProtocolVersion, + config: GenesisConfig, + tracking_copy: Rc::Reader>>>, + ) -> Self { + let phase = Phase::System; + let genesis_config_hash_bytes = genesis_config_hash.as_ref(); + + let address_generator = { + let generator = AddressGenerator::new(genesis_config_hash_bytes, phase); + Rc::new(RefCell::new(generator)) + }; + + AccountContractInstaller { + protocol_version, + address_generator, + tracking_copy, + config, + } + } + + pub(crate) fn finalize(self) -> Effects { + self.tracking_copy.borrow().effects() + } + + fn create_mint(&mut self) -> Result> { + let round_seigniorage_rate_uref = + { + let round_seigniorage_rate_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + let (round_seigniorage_rate_numer, round_seigniorage_rate_denom) = + self.config.round_seigniorage_rate().into(); + let round_seigniorage_rate: Ratio = Ratio::new( + round_seigniorage_rate_numer.into(), + round_seigniorage_rate_denom.into(), + ); + + self.tracking_copy.borrow_mut().write( + round_seigniorage_rate_uref.into(), + StoredValue::CLValue(CLValue::from_t(round_seigniorage_rate).map_err( + |_| GenesisError::CLValue(ARG_ROUND_SEIGNIORAGE_RATE.to_string()), + )?), + ); + round_seigniorage_rate_uref + }; + + let total_supply_uref = { + let total_supply_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + self.tracking_copy.borrow_mut().write( + total_supply_uref.into(), + StoredValue::CLValue( + CLValue::from_t(U512::zero()) + .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, + ), + ); + total_supply_uref + }; + + let gas_hold_handling_uref = + { + let gas_hold_handling = self.config.gas_hold_balance_handling().tag(); + let gas_hold_handling_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + self.tracking_copy.borrow_mut().write( + gas_hold_handling_uref.into(), + StoredValue::CLValue(CLValue::from_t(gas_hold_handling).map_err(|_| { + GenesisError::CLValue(MINT_GAS_HOLD_HANDLING_KEY.to_string()) + })?), + ); + gas_hold_handling_uref + }; + + let gas_hold_interval_uref = + { + let gas_hold_interval = self.config.gas_hold_interval_millis(); + let gas_hold_interval_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + self.tracking_copy.borrow_mut().write( + gas_hold_interval_uref.into(), + StoredValue::CLValue(CLValue::from_t(gas_hold_interval).map_err(|_| { + GenesisError::CLValue(MINT_GAS_HOLD_INTERVAL_KEY.to_string()) + })?), + ); + gas_hold_interval_uref + }; + + let named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert( + ROUND_SEIGNIORAGE_RATE_KEY.to_string(), + round_seigniorage_rate_uref.into(), + ); + named_keys.insert(TOTAL_SUPPLY_KEY.to_string(), total_supply_uref.into()); + named_keys.insert( + MINT_GAS_HOLD_HANDLING_KEY.to_string(), + gas_hold_handling_uref.into(), + ); + named_keys.insert( + MINT_GAS_HOLD_INTERVAL_KEY.to_string(), + gas_hold_interval_uref.into(), + ); + named_keys + }; + + let entry_points = mint::mint_entry_points(); + + let access_key = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + let (_, mint_hash) = self.store_contract(access_key, named_keys, entry_points); + + { + // Insert a partial registry into global state. + // This allows for default values to be accessible when the remaining system contracts + // call the `call_host_mint` function during their creation. + let mut partial_registry = BTreeMap::::new(); + partial_registry.insert(MINT.to_string(), mint_hash.value()); + partial_registry.insert(HANDLE_PAYMENT.to_string(), DEFAULT_ADDRESS); + let cl_registry = CLValue::from_t(partial_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + self.tracking_copy + .borrow_mut() + .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry)); + } + + Ok(total_supply_uref.into()) + } + + fn create_handle_payment( + &self, + handle_payment_payment_purse: URef, + ) -> Result> { + let named_keys = { + let mut named_keys = NamedKeys::new(); + let named_key = Key::URef(handle_payment_payment_purse); + named_keys.insert(handle_payment::PAYMENT_PURSE_KEY.to_string(), named_key); + + // This purse is used only in FeeHandling::Accumulate setting. + let rewards_purse_uref = self.create_purse(U512::zero())?; + + named_keys.insert( + ACCUMULATION_PURSE_KEY.to_string(), + rewards_purse_uref.into(), + ); + + named_keys + }; + + let entry_points = handle_payment::handle_payment_entry_points(); + + let access_key = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + let (_, handle_payment_hash) = self.store_contract(access_key, named_keys, entry_points); + + self.store_system_contract(HANDLE_PAYMENT, handle_payment_hash)?; + + Ok(handle_payment_hash.value()) + } + + fn create_auction(&self, total_supply_key: Key) -> Result> { + let locked_funds_period_millis = self.config.locked_funds_period_millis(); + let auction_delay: u64 = self.config.auction_delay(); + let genesis_timestamp_millis: u64 = self.config.genesis_timestamp_millis(); + + let mut named_keys = NamedKeys::new(); + + let genesis_validators: Vec<_> = self.config.get_bonded_validators().collect(); + if (self.config.validator_slots() as usize) < genesis_validators.len() { + return Err(GenesisError::InvalidValidatorSlots { + validators: genesis_validators.len(), + validator_slots: self.config.validator_slots(), + } + .into()); + } + + let genesis_delegators: Vec<_> = self.config.get_bonded_delegators().collect(); + + // Make sure all delegators have corresponding genesis validator entries + for (validator_public_key, delegator_public_key, _, delegated_amount) in + genesis_delegators.iter() + { + if *delegated_amount == &Motes::zero() { + return Err(GenesisError::InvalidDelegatedAmount { + public_key: (*delegator_public_key).clone(), + } + .into()); + } + + let orphan_condition = genesis_validators.iter().find(|genesis_validator| { + genesis_validator.public_key() == (*validator_public_key).clone() + }); + + if orphan_condition.is_none() { + return Err(GenesisError::OrphanedDelegator { + validator_public_key: (*validator_public_key).clone(), + delegator_public_key: (*delegator_public_key).clone(), + } + .into()); + } + } + + let mut total_staked_amount = U512::zero(); + + let staked = { + let mut staked: Staking = BTreeMap::new(); + + for genesis_validator in genesis_validators { + let public_key = genesis_validator.public_key(); + let mut delegators = BTreeMap::new(); + + let staked_amount = genesis_validator.staked_amount().value(); + if staked_amount.is_zero() { + return Err(GenesisError::InvalidBondAmount { public_key }.into()); + } + + let delegation_rate = genesis_validator.delegation_rate(); + if delegation_rate > DELEGATION_RATE_DENOMINATOR { + return Err(GenesisError::InvalidDelegationRate { + public_key, + delegation_rate, + } + .into()); + } + debug_assert_ne!(public_key, PublicKey::System); + + total_staked_amount += staked_amount; + + let purse_uref = self.create_purse(staked_amount)?; + let release_timestamp_millis = + genesis_timestamp_millis + locked_funds_period_millis; + let validator_bid = { + let bid = ValidatorBid::locked( + public_key.clone(), + purse_uref, + staked_amount, + delegation_rate, + release_timestamp_millis, + 0, + u64::MAX, + 0, + ); + + // Set up delegator entries attached to genesis validators + for ( + validator_public_key, + delegator_public_key, + _delegator_balance, + delegator_delegated_amount, + ) in genesis_delegators.iter() + { + if (*validator_public_key).clone() == public_key.clone() { + let purse_uref = + self.create_purse(delegator_delegated_amount.value())?; + + let delegator_kind: DelegatorKind = + DelegatorKind::PublicKey((*delegator_public_key).clone()); + let delegator = DelegatorBid::locked( + delegator_kind.clone(), + delegator_delegated_amount.value(), + purse_uref, + (*validator_public_key).clone(), + release_timestamp_millis, + ); + + if delegators.insert(delegator_kind, delegator).is_some() { + return Err(GenesisError::DuplicatedDelegatorEntry { + validator_public_key: (*validator_public_key).clone(), + delegator_public_key: (*delegator_public_key).clone(), + } + .into()); + } + } + } + + bid + }; + + staked.insert(public_key, (validator_bid, delegators)); + } + staked + }; + + let _ = self.tracking_copy.borrow_mut().add( + total_supply_key, + StoredValue::CLValue( + CLValue::from_t(total_staked_amount) + .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, + ), + ); + + let initial_seigniorage_recipients = + self.initial_seigniorage_recipients(&staked, auction_delay); + + let era_id_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + era_id_uref.into(), + StoredValue::CLValue( + CLValue::from_t(INITIAL_ERA_ID) + .map_err(|_| GenesisError::CLValue(ERA_ID_KEY.to_string()))?, + ), + ); + named_keys.insert(ERA_ID_KEY.into(), era_id_uref.into()); + + let era_end_timestamp_millis_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + era_end_timestamp_millis_uref.into(), + StoredValue::CLValue( + CLValue::from_t(INITIAL_ERA_END_TIMESTAMP_MILLIS) + .map_err(|_| GenesisError::CLValue(ERA_END_TIMESTAMP_MILLIS_KEY.to_string()))?, + ), + ); + named_keys.insert( + ERA_END_TIMESTAMP_MILLIS_KEY.into(), + era_end_timestamp_millis_uref.into(), + ); + + let initial_seigniorage_recipients_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + initial_seigniorage_recipients_uref.into(), + StoredValue::CLValue(CLValue::from_t(initial_seigniorage_recipients).map_err( + |_| GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_string()), + )?), + ); + named_keys.insert( + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.into(), + initial_seigniorage_recipients_uref.into(), + ); + + // initialize snapshot version flag + let initial_seigniorage_recipients_version_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + initial_seigniorage_recipients_version_uref.into(), + StoredValue::CLValue( + CLValue::from_t(DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION).map_err(|_| { + GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.to_string()) + })?, + ), + ); + + named_keys.insert( + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.into(), + initial_seigniorage_recipients_version_uref.into(), + ); + + // store all delegator and validator bids + for (validator_public_key, (validator_bid, delegators)) in staked { + for (delegator_kind, delegator_bid) in delegators { + let delegator_bid_key = Key::BidAddr(BidAddr::new_delegator_kind( + &validator_public_key.clone(), + &delegator_kind, + )); + self.tracking_copy.borrow_mut().write( + delegator_bid_key, + StoredValue::BidKind(BidKind::Delegator(Box::new(delegator_bid))), + ); + } + let validator_bid_key = Key::BidAddr(BidAddr::from(validator_public_key.clone())); + self.tracking_copy.borrow_mut().write( + validator_bid_key, + StoredValue::BidKind(BidKind::Validator(Box::new(validator_bid))), + ); + } + + let validator_slots = self.config.validator_slots(); + let validator_slots_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + validator_slots_uref.into(), + StoredValue::CLValue( + CLValue::from_t(validator_slots) + .map_err(|_| GenesisError::CLValue(VALIDATOR_SLOTS_KEY.to_string()))?, + ), + ); + named_keys.insert(VALIDATOR_SLOTS_KEY.into(), validator_slots_uref.into()); + + let auction_delay_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + auction_delay_uref.into(), + StoredValue::CLValue( + CLValue::from_t(auction_delay) + .map_err(|_| GenesisError::CLValue(AUCTION_DELAY_KEY.to_string()))?, + ), + ); + named_keys.insert(AUCTION_DELAY_KEY.into(), auction_delay_uref.into()); + + let locked_funds_period_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + locked_funds_period_uref.into(), + StoredValue::CLValue( + CLValue::from_t(locked_funds_period_millis) + .map_err(|_| GenesisError::CLValue(LOCKED_FUNDS_PERIOD_KEY.to_string()))?, + ), + ); + named_keys.insert( + LOCKED_FUNDS_PERIOD_KEY.into(), + locked_funds_period_uref.into(), + ); + + let unbonding_delay = self.config.unbonding_delay(); + let unbonding_delay_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + unbonding_delay_uref.into(), + StoredValue::CLValue( + CLValue::from_t(unbonding_delay) + .map_err(|_| GenesisError::CLValue(UNBONDING_DELAY_KEY.to_string()))?, + ), + ); + named_keys.insert(UNBONDING_DELAY_KEY.into(), unbonding_delay_uref.into()); + + let entry_points = auction::auction_entry_points(); + + let access_key = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + let (_, auction_hash) = self.store_contract(access_key, named_keys, entry_points); + + self.store_system_contract(AUCTION, auction_hash)?; + + Ok(auction_hash.value()) + } + + pub(crate) fn create_accounts( + &self, + total_supply_key: Key, + payment_purse_uref: URef, + ) -> Result<(), Box> { + let accounts = { + let mut ret: Vec = self.config.accounts_iter().cloned().collect(); + let system_account = GenesisAccount::system(); + ret.push(system_account); + ret + }; + + let mut administrative_accounts = self.config.administrative_accounts().peekable(); + + if administrative_accounts.peek().is_some() + && administrative_accounts + .duplicates_by(|admin| admin.public_key()) + .next() + .is_some() + { + // Ensure no duplicate administrator accounts are specified as this might raise errors + // during genesis process when administrator accounts are added to associated keys. + return Err(GenesisError::DuplicatedAdministratorEntry.into()); + } + + let mut total_supply = U512::zero(); + + for account in accounts { + let account_hash = account.account_hash(); + let main_purse = match account { + GenesisAccount::System + if self.config.administrative_accounts().next().is_some() => + { + payment_purse_uref + } + _ => self.create_purse(account.balance().value())?, + }; + + let key = Key::Account(account_hash); + let stored_value = StoredValue::Account(Account::create( + account_hash, + Default::default(), + main_purse, + )); + + self.tracking_copy.borrow_mut().write(key, stored_value); + + total_supply += account.balance().value(); + } + + self.tracking_copy.borrow_mut().write( + total_supply_key, + StoredValue::CLValue( + CLValue::from_t(total_supply) + .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, + ), + ); + + Ok(()) + } + + fn initial_seigniorage_recipients( + &self, + staked: &Staking, + auction_delay: u64, + ) -> BTreeMap { + let initial_snapshot_range = INITIAL_ERA_ID.iter_inclusive(auction_delay); + + let mut seigniorage_recipients = SeigniorageRecipientsV2::new(); + for (validator_public_key, (validator_bid, delegators)) in staked { + let mut delegator_stake = BTreeMap::new(); + for (k, v) in delegators { + delegator_stake.insert(k.clone(), v.staked_amount()); + } + let recipient = SeigniorageRecipientV2::new( + validator_bid.staked_amount(), + *validator_bid.delegation_rate(), + delegator_stake, + BTreeMap::new(), + ); + seigniorage_recipients.insert(validator_public_key.clone(), recipient); + } + + let mut initial_seigniorage_recipients = SeigniorageRecipientsSnapshotV2::new(); + for era_id in initial_snapshot_range { + initial_seigniorage_recipients.insert(era_id, seigniorage_recipients.clone()); + } + initial_seigniorage_recipients + } + + fn create_purse(&self, amount: U512) -> Result> { + let purse_addr = self.address_generator.borrow_mut().create_address(); + + let balance_cl_value = + CLValue::from_t(amount).map_err(|error| GenesisError::CLValue(error.to_string()))?; + self.tracking_copy.borrow_mut().write( + Key::Balance(purse_addr), + StoredValue::CLValue(balance_cl_value), + ); + + let purse_cl_value = CLValue::unit(); + let purse_uref = URef::new(purse_addr, AccessRights::READ_ADD_WRITE); + self.tracking_copy + .borrow_mut() + .write(Key::URef(purse_uref), StoredValue::CLValue(purse_cl_value)); + + Ok(purse_uref) + } + + fn store_contract( + &self, + access_key: URef, + named_keys: NamedKeys, + entry_points: EntryPoints, + ) -> (ContractPackageHash, ContractHash) { + let protocol_version = self.protocol_version; + let contract_wasm_hash = + ContractWasmHash::new(self.address_generator.borrow_mut().new_hash_address()); + let contract_hash = + ContractHash::new(self.address_generator.borrow_mut().new_hash_address()); + let contract_package_hash = + ContractPackageHash::new(self.address_generator.borrow_mut().new_hash_address()); + + let contract_wasm = ContractWasm::new(vec![]); + let contract = Contract::new( + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points.into(), + protocol_version, + ); + + // Genesis contracts can be versioned contracts. + let contract_package = { + let mut contract_package = ContractPackage::new( + access_key, + ContractVersions::default(), + DisabledVersions::default(), + Groups::default(), + ContractPackageStatus::default(), + ); + contract_package.insert_contract_version(protocol_version.value().major, contract_hash); + contract_package + }; + + self.tracking_copy.borrow_mut().write( + contract_wasm_hash.into(), + StoredValue::ContractWasm(contract_wasm), + ); + self.tracking_copy + .borrow_mut() + .write(contract_hash.into(), StoredValue::Contract(contract)); + self.tracking_copy.borrow_mut().write( + contract_package_hash.into(), + StoredValue::ContractPackage(contract_package), + ); + + (contract_package_hash, contract_hash) + } + + fn store_system_contract( + &self, + contract_name: &str, + contract_hash: ContractHash, + ) -> Result<(), Box> { + let partial_cl_registry = self + .tracking_copy + .borrow_mut() + .read(&Key::SystemEntityRegistry) + .map_err(|_| GenesisError::FailedToCreateSystemRegistry)? + .ok_or_else(|| { + GenesisError::CLValue("failed to convert registry as stored value".to_string()) + })? + .as_cl_value() + .ok_or_else(|| GenesisError::CLValue("failed to convert to CLValue".to_string()))? + .to_owned(); + let mut partial_registry = CLValue::into_t::(partial_cl_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + partial_registry.insert(contract_name.to_string(), contract_hash.value()); + let cl_registry = CLValue::from_t(partial_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + self.tracking_copy + .borrow_mut() + .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry)); + Ok(()) + } + + fn store_chainspec_registry( + &self, + chainspec_registry: ChainspecRegistry, + ) -> Result<(), Box> { + if chainspec_registry.genesis_accounts_raw_hash().is_none() { + return Err(GenesisError::MissingChainspecRegistryEntry.into()); + } + let cl_value_registry = CLValue::from_t(chainspec_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + + self.tracking_copy.borrow_mut().write( + Key::ChainspecRegistry, + StoredValue::CLValue(cl_value_registry), + ); + Ok(()) + } + + /// Writes a tracking record to global state for block time / genesis timestamp. + fn store_block_time(&self) -> Result<(), Box> { + let cl_value = CLValue::from_t(self.config.genesis_timestamp_millis()) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + + self.tracking_copy.borrow_mut().write( + Key::BlockGlobal(BlockGlobalAddr::BlockTime), + StoredValue::CLValue(cl_value), + ); + Ok(()) + } + + /// Performs a complete system installation. + pub(crate) fn install( + &mut self, + chainspec_registry: ChainspecRegistry, + ) -> Result<(), Box> { + // self.setup_system_account()?; + // Create mint + let total_supply_key = self.create_mint()?; + + let payment_purse_uref = self.create_purse(U512::zero())?; + + // Create all genesis accounts + self.create_accounts(total_supply_key, payment_purse_uref)?; + + // Create the auction and setup the stake of all genesis validators. + self.create_auction(total_supply_key)?; + + // Create handle payment + self.create_handle_payment(payment_purse_uref)?; + + // Write chainspec registry. + self.store_chainspec_registry(chainspec_registry)?; + + // Write block time to global state + self.store_block_time()?; + Ok(()) + } +} diff --git a/storage/src/system/genesis/entity_installer.rs b/storage/src/system/genesis/entity_installer.rs new file mode 100644 index 0000000000..38ebf8d0d1 --- /dev/null +++ b/storage/src/system/genesis/entity_installer.rs @@ -0,0 +1,949 @@ +use itertools::Itertools; +use num_rational::Ratio; +use num_traits::Zero; +use rand::Rng; +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + rc::Rc, +}; + +use crate::{ + global_state::state::StateProvider, + system::genesis::{GenesisError, DEFAULT_ADDRESS, NO_WASM}, + AddressGenerator, TrackingCopy, +}; +use casper_types::{ + addressable_entity::{ + ActionThresholds, EntityKindTag, MessageTopics, NamedKeyAddr, NamedKeyValue, + }, + contracts::NamedKeys, + execution::Effects, + system::{ + auction, + auction::{ + BidAddr, BidKind, DelegatorBid, DelegatorKind, SeigniorageRecipient, + SeigniorageRecipientV2, SeigniorageRecipients, SeigniorageRecipientsSnapshot, + SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Staking, ValidatorBid, + AUCTION_DELAY_KEY, DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, + DELEGATION_RATE_DENOMINATOR, ERA_END_TIMESTAMP_MILLIS_KEY, ERA_ID_KEY, + INITIAL_ERA_END_TIMESTAMP_MILLIS, INITIAL_ERA_ID, LOCKED_FUNDS_PERIOD_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, + UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + }, + handle_payment, + handle_payment::ACCUMULATION_PURSE_KEY, + mint, + mint::{ + ARG_ROUND_SEIGNIORAGE_RATE, MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY, + ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY, + }, + SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, + }, + AccessRights, AddressableEntity, AddressableEntityHash, AdministratorAccount, BlockGlobalAddr, + ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind, CLValue, ChainspecRegistry, Digest, + EntityAddr, EntityKind, EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, EraId, + GenesisAccount, GenesisConfig, Groups, HashAddr, Key, Motes, Package, PackageHash, + PackageStatus, Phase, ProtocolVersion, PublicKey, StoredValue, SystemHashRegistry, Tagged, + URef, U512, +}; + +pub struct EntityGenesisInstaller +where + S: StateProvider, +{ + protocol_version: ProtocolVersion, + config: GenesisConfig, + address_generator: Rc>, + tracking_copy: Rc::Reader>>>, +} + +impl EntityGenesisInstaller +where + S: StateProvider, +{ + pub fn new( + genesis_config_hash: Digest, + protocol_version: ProtocolVersion, + config: GenesisConfig, + tracking_copy: Rc::Reader>>>, + ) -> Self { + let phase = Phase::System; + let genesis_config_hash_bytes = genesis_config_hash.as_ref(); + + let address_generator = { + let generator = AddressGenerator::new(genesis_config_hash_bytes, phase); + Rc::new(RefCell::new(generator)) + }; + + EntityGenesisInstaller { + protocol_version, + config, + address_generator, + tracking_copy, + } + } + + pub fn finalize(self) -> Effects { + self.tracking_copy.borrow().effects() + } + + fn setup_system_account(&mut self) -> Result<(), Box> { + let system_account_addr = PublicKey::System.to_account_hash(); + + self.store_addressable_entity( + EntityKind::Account(system_account_addr), + NO_WASM, + None, + None, + self.create_purse(U512::zero())?, + )?; + + Ok(()) + } + + fn create_mint(&mut self) -> Result> { + let round_seigniorage_rate_uref = + { + let round_seigniorage_rate_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + let (round_seigniorage_rate_numer, round_seigniorage_rate_denom) = + self.config.round_seigniorage_rate().into(); + let round_seigniorage_rate: Ratio = Ratio::new( + round_seigniorage_rate_numer.into(), + round_seigniorage_rate_denom.into(), + ); + + self.tracking_copy.borrow_mut().write( + round_seigniorage_rate_uref.into(), + StoredValue::CLValue(CLValue::from_t(round_seigniorage_rate).map_err( + |_| GenesisError::CLValue(ARG_ROUND_SEIGNIORAGE_RATE.to_string()), + )?), + ); + round_seigniorage_rate_uref + }; + + let total_supply_uref = { + let total_supply_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + self.tracking_copy.borrow_mut().write( + total_supply_uref.into(), + StoredValue::CLValue( + CLValue::from_t(U512::zero()) + .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, + ), + ); + total_supply_uref + }; + + let gas_hold_handling_uref = + { + let gas_hold_handling = self.config.gas_hold_balance_handling().tag(); + let gas_hold_handling_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + self.tracking_copy.borrow_mut().write( + gas_hold_handling_uref.into(), + StoredValue::CLValue(CLValue::from_t(gas_hold_handling).map_err(|_| { + GenesisError::CLValue(MINT_GAS_HOLD_HANDLING_KEY.to_string()) + })?), + ); + gas_hold_handling_uref + }; + + let gas_hold_interval_uref = + { + let gas_hold_interval = self.config.gas_hold_interval_millis(); + let gas_hold_interval_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + + self.tracking_copy.borrow_mut().write( + gas_hold_interval_uref.into(), + StoredValue::CLValue(CLValue::from_t(gas_hold_interval).map_err(|_| { + GenesisError::CLValue(MINT_GAS_HOLD_INTERVAL_KEY.to_string()) + })?), + ); + gas_hold_interval_uref + }; + + let named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert( + ROUND_SEIGNIORAGE_RATE_KEY.to_string(), + round_seigniorage_rate_uref.into(), + ); + named_keys.insert(TOTAL_SUPPLY_KEY.to_string(), total_supply_uref.into()); + named_keys.insert( + MINT_GAS_HOLD_HANDLING_KEY.to_string(), + gas_hold_handling_uref.into(), + ); + named_keys.insert( + MINT_GAS_HOLD_INTERVAL_KEY.to_string(), + gas_hold_interval_uref.into(), + ); + named_keys + }; + + let entry_points = mint::mint_entry_points(); + + let contract_hash = self.store_system_contract( + named_keys, + entry_points, + EntityKind::System(SystemEntityType::Mint), + )?; + + { + // Insert a partial registry into global state. + // This allows for default values to be accessible when the remaining system contracts + // call the `call_host_mint` function during their creation. + let mut partial_registry = BTreeMap::::new(); + partial_registry.insert(MINT.to_string(), contract_hash); + partial_registry.insert(HANDLE_PAYMENT.to_string(), DEFAULT_ADDRESS.into()); + let cl_registry = CLValue::from_t(partial_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + self.tracking_copy + .borrow_mut() + .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry)); + } + + Ok(total_supply_uref.into()) + } + + fn create_handle_payment(&self) -> Result> { + let handle_payment_payment_purse = self.create_purse(U512::zero())?; + let named_keys = { + let mut named_keys = NamedKeys::new(); + let named_key = Key::URef(handle_payment_payment_purse); + named_keys.insert(handle_payment::PAYMENT_PURSE_KEY.to_string(), named_key); + + // This purse is used only in FeeHandling::Accumulate setting. + let accumulation_purse_uref = self.create_purse(U512::zero())?; + named_keys.insert( + ACCUMULATION_PURSE_KEY.to_string(), + accumulation_purse_uref.into(), + ); + named_keys + }; + + let entry_points = handle_payment::handle_payment_entry_points(); + + let contract_hash = self.store_system_contract( + named_keys, + entry_points, + EntityKind::System(SystemEntityType::HandlePayment), + )?; + + self.store_system_entity_registry(HANDLE_PAYMENT, contract_hash.value())?; + + Ok(contract_hash.value()) + } + + fn create_auction(&self, total_supply_key: Key) -> Result> { + let locked_funds_period_millis = self.config.locked_funds_period_millis(); + let auction_delay: u64 = self.config.auction_delay(); + let genesis_timestamp_millis: u64 = self.config.genesis_timestamp_millis(); + + let mut named_keys = NamedKeys::new(); + + let genesis_validators: Vec<_> = self.config.get_bonded_validators().collect(); + if (self.config.validator_slots() as usize) < genesis_validators.len() { + return Err(GenesisError::InvalidValidatorSlots { + validators: genesis_validators.len(), + validator_slots: self.config.validator_slots(), + } + .into()); + } + + let genesis_delegators: Vec<_> = self.config.get_bonded_delegators().collect(); + + // Make sure all delegators have corresponding genesis validator entries + for (validator_public_key, delegator_public_key, _, delegated_amount) in + genesis_delegators.iter() + { + if *delegated_amount == &Motes::zero() { + return Err(GenesisError::InvalidDelegatedAmount { + public_key: (*delegator_public_key).clone(), + } + .into()); + } + + let orphan_condition = genesis_validators.iter().find(|genesis_validator| { + genesis_validator.public_key() == (*validator_public_key).clone() + }); + + if orphan_condition.is_none() { + return Err(GenesisError::OrphanedDelegator { + validator_public_key: (*validator_public_key).clone(), + delegator_public_key: (*delegator_public_key).clone(), + } + .into()); + } + } + + let mut total_staked_amount = U512::zero(); + + let staked = { + let mut staked: Staking = BTreeMap::new(); + + for genesis_validator in genesis_validators { + let public_key = genesis_validator.public_key(); + let mut delegators = BTreeMap::new(); + + let staked_amount = genesis_validator.staked_amount().value(); + if staked_amount.is_zero() { + return Err(GenesisError::InvalidBondAmount { public_key }.into()); + } + + let delegation_rate = genesis_validator.delegation_rate(); + if delegation_rate > DELEGATION_RATE_DENOMINATOR { + return Err(GenesisError::InvalidDelegationRate { + public_key, + delegation_rate, + } + .into()); + } + debug_assert_ne!(public_key, PublicKey::System); + + total_staked_amount += staked_amount; + + let purse_uref = self.create_purse(staked_amount)?; + let release_timestamp_millis = + genesis_timestamp_millis + locked_funds_period_millis; + let validator_bid = { + let bid = ValidatorBid::locked( + public_key.clone(), + purse_uref, + staked_amount, + delegation_rate, + release_timestamp_millis, + 0, + u64::MAX, + 0, + ); + + // Set up delegator entries attached to genesis validators + for ( + validator_public_key, + delegator_public_key, + _delegator_balance, + delegator_delegated_amount, + ) in genesis_delegators.iter() + { + if (*validator_public_key).clone() == public_key.clone() { + let purse_uref = + self.create_purse(delegator_delegated_amount.value())?; + + let delegator_kind: DelegatorKind = + (*delegator_public_key).clone().into(); + let delegator = DelegatorBid::locked( + delegator_kind.clone(), + delegator_delegated_amount.value(), + purse_uref, + (*validator_public_key).clone(), + release_timestamp_millis, + ); + + if delegators.insert(delegator_kind, delegator).is_some() { + return Err(GenesisError::DuplicatedDelegatorEntry { + validator_public_key: (*validator_public_key).clone(), + delegator_public_key: (*delegator_public_key).clone(), + } + .into()); + } + } + } + + bid + }; + + staked.insert(public_key, (validator_bid, delegators)); + } + staked + }; + + let _ = self.tracking_copy.borrow_mut().add( + total_supply_key, + StoredValue::CLValue( + CLValue::from_t(total_staked_amount) + .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, + ), + ); + + let initial_seigniorage_recipients = + self.initial_seigniorage_recipients(&staked, auction_delay); + + let era_id_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + era_id_uref.into(), + StoredValue::CLValue( + CLValue::from_t(INITIAL_ERA_ID) + .map_err(|_| GenesisError::CLValue(ERA_ID_KEY.to_string()))?, + ), + ); + named_keys.insert(ERA_ID_KEY.into(), era_id_uref.into()); + + let era_end_timestamp_millis_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + era_end_timestamp_millis_uref.into(), + StoredValue::CLValue( + CLValue::from_t(INITIAL_ERA_END_TIMESTAMP_MILLIS) + .map_err(|_| GenesisError::CLValue(ERA_END_TIMESTAMP_MILLIS_KEY.to_string()))?, + ), + ); + named_keys.insert( + ERA_END_TIMESTAMP_MILLIS_KEY.into(), + era_end_timestamp_millis_uref.into(), + ); + + let initial_seigniorage_recipients_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + initial_seigniorage_recipients_uref.into(), + StoredValue::CLValue(CLValue::from_t(initial_seigniorage_recipients).map_err( + |_| GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.to_string()), + )?), + ); + named_keys.insert( + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY.into(), + initial_seigniorage_recipients_uref.into(), + ); + + // initialize snapshot version flag + let initial_seigniorage_recipients_version_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + initial_seigniorage_recipients_version_uref.into(), + StoredValue::CLValue( + CLValue::from_t(DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION).map_err(|_| { + GenesisError::CLValue(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.to_string()) + })?, + ), + ); + + named_keys.insert( + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY.into(), + initial_seigniorage_recipients_version_uref.into(), + ); + + // store all delegator and validator bids + for (validator_public_key, (validator_bid, delegators)) in staked { + for (delegator_kind, delegator_bid) in delegators { + let delegator_bid_key = Key::BidAddr(BidAddr::new_delegator_kind( + &validator_public_key, + &delegator_kind, + )); + self.tracking_copy.borrow_mut().write( + delegator_bid_key, + StoredValue::BidKind(BidKind::Delegator(Box::new(delegator_bid))), + ); + } + let validator_bid_key = Key::BidAddr(BidAddr::from(validator_public_key.clone())); + self.tracking_copy.borrow_mut().write( + validator_bid_key, + StoredValue::BidKind(BidKind::Validator(Box::new(validator_bid))), + ); + } + + let validator_slots = self.config.validator_slots(); + let validator_slots_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + validator_slots_uref.into(), + StoredValue::CLValue( + CLValue::from_t(validator_slots) + .map_err(|_| GenesisError::CLValue(VALIDATOR_SLOTS_KEY.to_string()))?, + ), + ); + named_keys.insert(VALIDATOR_SLOTS_KEY.into(), validator_slots_uref.into()); + + let auction_delay_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + auction_delay_uref.into(), + StoredValue::CLValue( + CLValue::from_t(auction_delay) + .map_err(|_| GenesisError::CLValue(AUCTION_DELAY_KEY.to_string()))?, + ), + ); + named_keys.insert(AUCTION_DELAY_KEY.into(), auction_delay_uref.into()); + + let locked_funds_period_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + locked_funds_period_uref.into(), + StoredValue::CLValue( + CLValue::from_t(locked_funds_period_millis) + .map_err(|_| GenesisError::CLValue(LOCKED_FUNDS_PERIOD_KEY.to_string()))?, + ), + ); + named_keys.insert( + LOCKED_FUNDS_PERIOD_KEY.into(), + locked_funds_period_uref.into(), + ); + + let unbonding_delay = self.config.unbonding_delay(); + let unbonding_delay_uref = self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE); + self.tracking_copy.borrow_mut().write( + unbonding_delay_uref.into(), + StoredValue::CLValue( + CLValue::from_t(unbonding_delay) + .map_err(|_| GenesisError::CLValue(UNBONDING_DELAY_KEY.to_string()))?, + ), + ); + named_keys.insert(UNBONDING_DELAY_KEY.into(), unbonding_delay_uref.into()); + + let entry_points = auction::auction_entry_points(); + + let contract_hash = self.store_system_contract( + named_keys, + entry_points, + EntityKind::System(SystemEntityType::Auction), + )?; + + self.store_system_entity_registry(AUCTION, contract_hash.value())?; + + Ok(contract_hash.value()) + } + + pub fn create_accounts(&self, total_supply_key: Key) -> Result<(), Box> { + let accounts = { + let mut ret: Vec = self.config.accounts_iter().cloned().collect(); + let system_account = GenesisAccount::system(); + ret.push(system_account); + ret + }; + + let mut administrative_accounts = self.config.administrative_accounts().peekable(); + + if administrative_accounts.peek().is_some() + && administrative_accounts + .duplicates_by(|admin| admin.public_key()) + .next() + .is_some() + { + // Ensure no duplicate administrator accounts are specified as this might raise errors + // during genesis process when administrator accounts are added to associated keys. + return Err(GenesisError::DuplicatedAdministratorEntry.into()); + } + + let mut total_supply = U512::zero(); + + for account in accounts { + let account_starting_balance = account.balance().value(); + + let main_purse = self.create_purse(account_starting_balance)?; + + self.store_addressable_entity( + EntityKind::Account(account.account_hash()), + NO_WASM, + None, + None, + main_purse, + )?; + + total_supply += account_starting_balance; + } + + self.tracking_copy.borrow_mut().write( + total_supply_key, + StoredValue::CLValue( + CLValue::from_t(total_supply) + .map_err(|_| GenesisError::CLValue(TOTAL_SUPPLY_KEY.to_string()))?, + ), + ); + + Ok(()) + } + + fn initial_seigniorage_recipients( + &self, + staked: &Staking, + auction_delay: u64, + ) -> BTreeMap { + let initial_snapshot_range = INITIAL_ERA_ID.iter_inclusive(auction_delay); + + let mut seigniorage_recipients = SeigniorageRecipientsV2::new(); + for (validator_public_key, (validator_bid, delegators)) in staked { + let mut delegator_stake = BTreeMap::new(); + for (k, v) in delegators { + delegator_stake.insert(k.clone(), v.staked_amount()); + } + let recipient = SeigniorageRecipientV2::new( + validator_bid.staked_amount(), + *validator_bid.delegation_rate(), + delegator_stake, + BTreeMap::new(), + ); + seigniorage_recipients.insert(validator_public_key.clone(), recipient); + } + + let mut initial_seigniorage_recipients = SeigniorageRecipientsSnapshotV2::new(); + for era_id in initial_snapshot_range { + initial_seigniorage_recipients.insert(era_id, seigniorage_recipients.clone()); + } + initial_seigniorage_recipients + } + + fn create_purse(&self, amount: U512) -> Result> { + let purse_addr = self.address_generator.borrow_mut().create_address(); + + let balance_cl_value = + CLValue::from_t(amount).map_err(|error| GenesisError::CLValue(error.to_string()))?; + self.tracking_copy.borrow_mut().write( + Key::Balance(purse_addr), + StoredValue::CLValue(balance_cl_value), + ); + + let purse_cl_value = CLValue::unit(); + let purse_uref = URef::new(purse_addr, AccessRights::READ_ADD_WRITE); + self.tracking_copy + .borrow_mut() + .write(Key::URef(purse_uref), StoredValue::CLValue(purse_cl_value)); + + Ok(purse_uref) + } + + fn store_system_contract( + &self, + named_keys: NamedKeys, + entry_points: EntryPoints, + contract_package_kind: EntityKind, + ) -> Result> { + self.store_addressable_entity( + contract_package_kind, + NO_WASM, + Some(named_keys), + Some(entry_points), + self.create_purse(U512::zero())?, + ) + } + + fn store_addressable_entity( + &self, + entity_kind: EntityKind, + no_wasm: bool, + maybe_named_keys: Option, + maybe_entry_points: Option, + main_purse: URef, + ) -> Result> { + let protocol_version = self.protocol_version; + let byte_code_hash = if no_wasm { + ByteCodeHash::new(DEFAULT_ADDRESS) + } else { + ByteCodeHash::new(self.address_generator.borrow_mut().new_hash_address()) + }; + + let entity_hash = match entity_kind { + EntityKind::System(_) | EntityKind::SmartContract(_) => { + AddressableEntityHash::new(self.address_generator.borrow_mut().new_hash_address()) + } + EntityKind::Account(account_hash) => { + if entity_kind.is_system_account() { + let entity_hash_addr = PublicKey::System.to_account_hash().value(); + AddressableEntityHash::new(entity_hash_addr) + } else { + AddressableEntityHash::new(account_hash.value()) + } + } + }; + + let entity_addr = match entity_kind.tag() { + EntityKindTag::System => EntityAddr::new_system(entity_hash.value()), + EntityKindTag::Account => EntityAddr::new_account(entity_hash.value()), + EntityKindTag::SmartContract => EntityAddr::new_smart_contract(entity_hash.value()), + }; + + let package_hash = PackageHash::new(self.address_generator.borrow_mut().new_hash_address()); + + let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]); + let associated_keys = entity_kind.associated_keys(); + let maybe_account_hash = entity_kind.maybe_account_hash(); + let named_keys = maybe_named_keys.unwrap_or_default(); + + self.store_system_contract_named_keys(entity_hash, named_keys)?; + if let Some(entry_point) = maybe_entry_points { + self.store_system_entry_points(entity_hash, entry_point)?; + } + + let entity = AddressableEntity::new( + package_hash, + byte_code_hash, + protocol_version, + main_purse, + associated_keys, + ActionThresholds::default(), + entity_kind, + ); + + // Genesis contracts can be versioned contracts. + let contract_package = { + let mut package = Package::new( + EntityVersions::new(), + BTreeSet::default(), + Groups::default(), + PackageStatus::default(), + ); + package.insert_entity_version(protocol_version.value().major, entity_addr); + package + }; + + let byte_code_key = Key::ByteCode(ByteCodeAddr::Empty); + + self.tracking_copy + .borrow_mut() + .write(byte_code_key, StoredValue::ByteCode(byte_code)); + + let entity_key: Key = entity_addr.into(); + + self.tracking_copy + .borrow_mut() + .write(entity_key, StoredValue::AddressableEntity(entity)); + + self.tracking_copy.borrow_mut().write( + package_hash.into(), + StoredValue::SmartContract(contract_package), + ); + + if let Some(account_hash) = maybe_account_hash { + let entity_by_account = CLValue::from_t(entity_key) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + + self.tracking_copy.borrow_mut().write( + Key::Account(account_hash), + StoredValue::CLValue(entity_by_account), + ); + } + + Ok(entity_hash) + } + + fn store_system_contract_named_keys( + &self, + contract_hash: AddressableEntityHash, + named_keys: NamedKeys, + ) -> Result<(), Box> { + let entity_addr = EntityAddr::new_system(contract_hash.value()); + + for (string, key) in named_keys.iter() { + let named_key_entry = NamedKeyAddr::new_from_string(entity_addr, string.clone()) + .map_err(GenesisError::Bytesrepr)?; + + let named_key_value = NamedKeyValue::from_concrete_values(*key, string.clone()) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + + let entry_key = Key::NamedKey(named_key_entry); + + self.tracking_copy + .borrow_mut() + .write(entry_key, StoredValue::NamedKey(named_key_value)); + } + + Ok(()) + } + + fn store_system_entry_points( + &self, + contract_hash: AddressableEntityHash, + entry_points: EntryPoints, + ) -> Result<(), Box> { + let entity_addr = EntityAddr::new_system(contract_hash.value()); + + for entry_point in entry_points.take_entry_points() { + let entry_point_addr = + EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name()) + .map_err(GenesisError::Bytesrepr)?; + self.tracking_copy.borrow_mut().write( + Key::EntryPoint(entry_point_addr), + StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)), + ) + } + + Ok(()) + } + + fn store_system_entity_registry( + &self, + contract_name: &str, + contract_hash: HashAddr, + ) -> Result<(), Box> { + let partial_cl_registry = self + .tracking_copy + .borrow_mut() + .read(&Key::SystemEntityRegistry) + .map_err(|_| GenesisError::FailedToCreateSystemRegistry)? + .ok_or_else(|| { + GenesisError::CLValue("failed to convert registry as stored value".to_string()) + })? + .into_cl_value() + .ok_or_else(|| GenesisError::CLValue("failed to convert to CLValue".to_string()))?; + let mut partial_registry = CLValue::into_t::(partial_cl_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + partial_registry.insert(contract_name.to_string(), contract_hash); + let cl_registry = CLValue::from_t(partial_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + self.tracking_copy + .borrow_mut() + .write(Key::SystemEntityRegistry, StoredValue::CLValue(cl_registry)); + Ok(()) + } + + fn store_chainspec_registry( + &self, + chainspec_registry: ChainspecRegistry, + ) -> Result<(), Box> { + if chainspec_registry.genesis_accounts_raw_hash().is_none() { + return Err(GenesisError::MissingChainspecRegistryEntry.into()); + } + let cl_value_registry = CLValue::from_t(chainspec_registry) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + + self.tracking_copy.borrow_mut().write( + Key::ChainspecRegistry, + StoredValue::CLValue(cl_value_registry), + ); + Ok(()) + } + + /// Writes a tracking record to global state for block time / genesis timestamp. + fn store_block_time(&self) -> Result<(), Box> { + let cl_value = CLValue::from_t(self.config.genesis_timestamp_millis()) + .map_err(|error| GenesisError::CLValue(error.to_string()))?; + + self.tracking_copy.borrow_mut().write( + Key::BlockGlobal(BlockGlobalAddr::BlockTime), + StoredValue::CLValue(cl_value), + ); + Ok(()) + } + + /// Performs a complete system installation. + pub fn install( + &mut self, + chainspec_registry: ChainspecRegistry, + ) -> Result<(), Box> { + // Setup system account + self.setup_system_account()?; + + // Create mint + let total_supply_key = self.create_mint()?; + + // Create all genesis accounts + self.create_accounts(total_supply_key)?; + + // Create the auction and setup the stake of all genesis validators. + self.create_auction(total_supply_key)?; + + // Create handle payment + self.create_handle_payment()?; + + // Write chainspec registry. + self.store_chainspec_registry(chainspec_registry)?; + + // Write block time to global state + self.store_block_time()?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use casper_types::AsymmetricType; + use rand::RngCore; + + use casper_types::{bytesrepr, SecretKey}; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + let genesis_account: GenesisAccount = rng.gen(); + bytesrepr::test_serialization_roundtrip(&genesis_account); + } + + #[test] + fn system_account_bytesrepr_roundtrip() { + let genesis_account = GenesisAccount::system(); + + bytesrepr::test_serialization_roundtrip(&genesis_account); + } + + #[test] + fn genesis_account_bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes[..]); + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let public_key: PublicKey = PublicKey::from(&secret_key); + + let genesis_account_1 = GenesisAccount::account(public_key.clone(), Motes::new(100), None); + + bytesrepr::test_serialization_roundtrip(&genesis_account_1); + + let genesis_account_2 = + GenesisAccount::account(public_key, Motes::new(100), Some(rng.gen())); + + bytesrepr::test_serialization_roundtrip(&genesis_account_2); + } + + #[test] + fn delegator_bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + let mut validator_bytes = [0u8; 32]; + let mut delegator_bytes = [0u8; 32]; + rng.fill_bytes(&mut validator_bytes[..]); + rng.fill_bytes(&mut delegator_bytes[..]); + let validator_secret_key = SecretKey::ed25519_from_bytes(validator_bytes).unwrap(); + let delegator_secret_key = SecretKey::ed25519_from_bytes(delegator_bytes).unwrap(); + + let validator_public_key = PublicKey::from(&validator_secret_key); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + + let genesis_account = GenesisAccount::delegator( + validator_public_key, + delegator_public_key, + Motes::new(100), + Motes::zero(), + ); + + bytesrepr::test_serialization_roundtrip(&genesis_account); + } + + #[test] + fn administrator_account_bytesrepr_roundtrip() { + let administrator_account = AdministratorAccount::new( + PublicKey::ed25519_from_bytes([123u8; 32]).unwrap(), + Motes::new(U512::MAX), + ); + bytesrepr::test_serialization_roundtrip(&administrator_account); + } +} diff --git a/storage/src/system/handle_payment.rs b/storage/src/system/handle_payment.rs new file mode 100644 index 0000000000..a093e18b6e --- /dev/null +++ b/storage/src/system/handle_payment.rs @@ -0,0 +1,111 @@ +mod handle_payment_native; +mod internal; +/// Provides mint logic for handle payment processing. +pub mod mint_provider; +/// Provides runtime logic for handle payment processing. +pub mod runtime_provider; +/// Provides storage logic for handle payment processing. +pub mod storage_provider; + +use casper_types::{ + system::handle_payment::{Error, REFUND_PURSE_KEY}, + AccessRights, PublicKey, URef, U512, +}; +use num_rational::Ratio; +use tracing::error; + +use crate::system::handle_payment::{ + mint_provider::MintProvider, runtime_provider::RuntimeProvider, + storage_provider::StorageProvider, +}; + +/// Handle payment functionality implementation. +pub trait HandlePayment: MintProvider + RuntimeProvider + StorageProvider + Sized { + /// Get payment purse. + fn get_payment_purse(&mut self) -> Result { + let purse = internal::get_payment_purse(self)?; + // Limit the access rights so only balance query and deposit are allowed. + Ok(URef::new(purse.addr(), AccessRights::READ_ADD)) + } + + /// Set refund purse. + fn set_refund_purse(&mut self, purse: URef) -> Result<(), Error> { + // make sure the passed uref is actually a purse... + // if it has a balance it is a purse and if not it isn't + let _balance = self.available_balance(purse)?; + internal::set_refund(self, purse) + } + + /// Get refund purse. + fn get_refund_purse(&mut self) -> Result, Error> { + // We purposely choose to remove the access rights so that we do not + // accidentally give rights for a purse to some contract that is not + // supposed to have it. + let maybe_purse = internal::get_refund_purse(self)?; + Ok(maybe_purse.map(|p| p.remove_access_rights())) + } + + /// Clear refund purse. + fn clear_refund_purse(&mut self) -> Result<(), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + error!("invalid caller to clear refund purse"); + return Err(Error::InvalidCaller); + } + + self.remove_key(REFUND_PURSE_KEY) + } + + /// Calculate overpayment and fees (if any) for payment finalization. + #[allow(clippy::too_many_arguments)] + fn calculate_overpayment_and_fee( + &mut self, + limit: U512, + gas_price: u8, + cost: U512, + consumed: U512, + source_purse: URef, + refund_ratio: Ratio, + ) -> Result<(U512, U512), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + error!("invalid caller to calculate overpayment and fee"); + return Err(Error::InvalidCaller); + } + + let available_balance = match self.available_balance(source_purse)? { + Some(balance) => balance, + None => return Err(Error::PaymentPurseBalanceNotFound), + }; + internal::calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available_balance, + refund_ratio, + ) + } + + /// Distribute fees from an accumulation purse. + fn distribute_accumulated_fees( + &mut self, + source_uref: URef, + amount: Option, + ) -> Result<(), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + error!("invalid caller to distribute accumulated fee"); + return Err(Error::InvalidCaller); + } + + internal::distribute_accumulated_fees(self, source_uref, amount) + } + + /// Burns the imputed amount from the imputed purse. + fn payment_burn(&mut self, source_uref: URef, amount: Option) -> Result<(), Error> { + if self.get_caller() != PublicKey::System.to_account_hash() { + error!("invalid caller to payment burn"); + return Err(Error::InvalidCaller); + } + + internal::payment_burn(self, source_uref, amount) + } +} diff --git a/storage/src/system/handle_payment/handle_payment_native.rs b/storage/src/system/handle_payment/handle_payment_native.rs new file mode 100644 index 0000000000..4a40083934 --- /dev/null +++ b/storage/src/system/handle_payment/handle_payment_native.rs @@ -0,0 +1,267 @@ +use crate::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + system::{ + handle_payment::{ + mint_provider::MintProvider, runtime_provider::RuntimeProvider, + storage_provider::StorageProvider, HandlePayment, + }, + mint::Mint, + runtime_native::RuntimeNative, + }, + tracking_copy::TrackingCopyEntityExt, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::{NamedKeyAddr, NamedKeyValue}, + system::handle_payment::Error, + AccessRights, CLValue, FeeHandling, GrantedAccess, Key, Phase, RefundHandling, StoredValue, + TransferredTo, URef, U512, +}; +use std::collections::BTreeSet; +use tracing::error; + +impl MintProvider for RuntimeNative +where + S: StateReader, +{ + fn transfer_purse_to_account( + &mut self, + source: URef, + target: AccountHash, + amount: U512, + ) -> Result { + let target_key = Key::Account(target); + let target_uref = match self.tracking_copy().borrow_mut().read(&target_key) { + Ok(Some(StoredValue::CLValue(cl_value))) => { + let entity_key = CLValue::into_t::(cl_value) + .map_err(|_| Error::FailedTransferToAccountPurse)?; + // get entity + let target_uref = { + if let Ok(Some(StoredValue::AddressableEntity(entity))) = + self.tracking_copy().borrow_mut().read(&entity_key) + { + entity.main_purse_add_only() + } else { + return Err(Error::Transfer); + } + }; + target_uref + } // entity exists + Ok(Some(StoredValue::Account(account))) => { + if self.config().enable_addressable_entity() { + self.tracking_copy() + .borrow_mut() + .migrate_account(target, self.protocol_version()) + .map_err(|_| Error::Transfer)?; + } + + account.main_purse_add_only() + } + Ok(_) | Err(_) => return Err(Error::Transfer), + }; + + // source and target are the same, noop + if source.with_access_rights(AccessRights::ADD) == target_uref { + return Ok(TransferredTo::ExistingAccount); + } + + // Temporarily grant ADD access to target if it is not already present. + let granted_access = self.access_rights_mut().grant_access(target_uref); + + let transfered = self + .transfer_purse_to_purse(source, target_uref, amount) + .is_ok(); + + // if ADD access was temporarily granted, remove it. + if let GrantedAccess::Granted { + uref_addr, + newly_granted_access_rights, + } = granted_access + { + self.access_rights_mut() + .remove_access(uref_addr, newly_granted_access_rights) + } + + if transfered { + Ok(TransferredTo::ExistingAccount) + } else { + Err(Error::Transfer) + } + } + + fn transfer_purse_to_purse( + &mut self, + source: URef, + target: URef, + amount: U512, + ) -> Result<(), Error> { + // system purses do not have holds on them + match self.transfer(None, source, target, amount, None) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::Transfer) + } + } + } + + fn available_balance(&mut self, purse: URef) -> Result, Error> { + match ::balance(self, purse) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::GetBalance) + } + } + } + + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { + match ::reduce_total_supply(self, amount) { + Ok(ret) => Ok(ret), + Err(err) => { + error!("{}", err); + Err(Error::ReduceTotalSupply) + } + } + } +} + +impl RuntimeProvider for RuntimeNative +where + S: StateReader, +{ + fn get_key(&mut self, name: &str) -> Option { + self.named_keys().get(name).cloned() + } + + fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error> { + let name = name.to_string(); + match self.context_key() { + Key::Account(_) | Key::Hash(_) => { + let name: String = name.clone(); + let value = CLValue::from_t((name.clone(), key)).map_err(|_| Error::PutKey)?; + let named_key_value = StoredValue::CLValue(value); + self.tracking_copy() + .borrow_mut() + .add(*self.context_key(), named_key_value) + .map_err(|_| Error::PutKey)?; + self.named_keys_mut().insert(name, key); + Ok(()) + } + Key::AddressableEntity(entity_addr) => { + let named_key_value = StoredValue::NamedKey( + NamedKeyValue::from_concrete_values(key, name.clone()) + .map_err(|_| Error::PutKey)?, + ); + let named_key_addr = NamedKeyAddr::new_from_string(*entity_addr, name.clone()) + .map_err(|_| Error::PutKey)?; + let named_key = Key::NamedKey(named_key_addr); + // write to both tracking copy and in-mem named keys cache + self.tracking_copy() + .borrow_mut() + .write(named_key, named_key_value); + self.named_keys_mut().insert(name, key); + Ok(()) + } + _ => Err(Error::UnexpectedKeyVariant), + } + } + + fn remove_key(&mut self, name: &str) -> Result<(), Error> { + self.named_keys_mut().remove(name); + match self.context_key() { + Key::AddressableEntity(entity_addr) => { + let named_key_addr = NamedKeyAddr::new_from_string(*entity_addr, name.to_string()) + .map_err(|_| Error::RemoveKey)?; + let key = Key::NamedKey(named_key_addr); + let value = self + .tracking_copy() + .borrow_mut() + .read(&key) + .map_err(|_| Error::RemoveKey)?; + if let Some(StoredValue::NamedKey(_)) = value { + self.tracking_copy().borrow_mut().prune(key); + } + } + Key::Hash(_) => { + let mut contract = self + .tracking_copy() + .borrow_mut() + .read(self.context_key()) + .map_err(|_| Error::RemoveKey)? + .ok_or(Error::RemoveKey)? + .as_contract() + .ok_or(Error::RemoveKey)? + .clone(); + + if contract.remove_named_key(name).is_none() { + return Ok(()); + } + + self.tracking_copy() + .borrow_mut() + .write(*self.context_key(), StoredValue::Contract(contract)) + } + Key::Account(_) => { + let account = { + let mut account = match self + .tracking_copy() + .borrow_mut() + .read(self.context_key()) + .map_err(|_| Error::RemoveKey)? + { + Some(StoredValue::Account(account)) => account, + Some(_) | None => return Err(Error::UnexpectedKeyVariant), + }; + account.named_keys_mut().remove(name); + account + }; + self.tracking_copy() + .borrow_mut() + .write(*self.context_key(), StoredValue::Account(account)); + } + _ => return Err(Error::UnexpectedKeyVariant), + } + + Ok(()) + } + + fn get_phase(&self) -> Phase { + self.phase() + } + + fn get_caller(&self) -> AccountHash { + self.address() + } + + fn refund_handling(&self) -> RefundHandling { + *self.config().refund_handling() + } + + fn fee_handling(&self) -> FeeHandling { + *self.config().fee_handling() + } + + fn administrative_accounts(&self) -> BTreeSet { + self.transfer_config().administrative_accounts() + } +} + +impl StorageProvider for RuntimeNative +where + S: StateReader, +{ + fn write_balance(&mut self, purse_uref: URef, amount: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(amount).map_err(|_| Error::Storage)?; + self.tracking_copy().borrow_mut().write( + Key::Balance(purse_uref.addr()), + StoredValue::CLValue(cl_value), + ); + Ok(()) + } +} + +impl HandlePayment for RuntimeNative where + S: StateReader +{ +} diff --git a/storage/src/system/handle_payment/internal.rs b/storage/src/system/handle_payment/internal.rs new file mode 100644 index 0000000000..b6dbf33dba --- /dev/null +++ b/storage/src/system/handle_payment/internal.rs @@ -0,0 +1,347 @@ +use super::{ + mint_provider::MintProvider, runtime_provider::RuntimeProvider, + storage_provider::StorageProvider, +}; +use casper_types::{ + system::handle_payment::{Error, PAYMENT_PURSE_KEY, REFUND_PURSE_KEY}, + Key, Phase, URef, U512, +}; +use num::CheckedMul; +use num_rational::Ratio; +use num_traits::Zero; + +/// Returns the purse for accepting payment for transactions. +pub fn get_payment_purse(runtime_provider: &mut R) -> Result { + match runtime_provider.get_key(PAYMENT_PURSE_KEY) { + Some(Key::URef(uref)) => Ok(uref), + Some(_) => Err(Error::PaymentPurseKeyUnexpectedType), + None => Err(Error::PaymentPurseNotFound), + } +} + +/// Sets the purse where refunds (excess funds not spent to pay for computation) will be sent. +/// Note that if this function is never called, the default location is the main purse of the +/// deployer's account. +pub fn set_refund(runtime_provider: &mut R, purse: URef) -> Result<(), Error> { + if let Phase::Payment = runtime_provider.get_phase() { + runtime_provider.put_key(REFUND_PURSE_KEY, Key::URef(purse))?; + return Ok(()); + } + Err(Error::SetRefundPurseCalledOutsidePayment) +} + +/// Returns the currently set refund purse. +pub fn get_refund_purse( + runtime_provider: &mut R, +) -> Result, Error> { + match runtime_provider.get_key(REFUND_PURSE_KEY) { + Some(Key::URef(uref)) => Ok(Some(uref)), + Some(_) => Err(Error::RefundPurseKeyUnexpectedType), + None => Ok(None), + } +} + +/// Returns tuple where 1st element is the portion of unspent payment (if any), and the 2nd element +/// is the fee (if any). +/// +/// # Note +/// +/// Any dust amounts are added to the fee. +pub fn calculate_overpayment_and_fee( + limit: U512, + gas_price: u8, + cost: U512, + consumed: U512, + available_balance: U512, + refund_ratio: Ratio, +) -> Result<(U512, U512), Error> { + /* + cost is limit * price, unused = limit - consumed + base refund is unused * price + refund rate is a percentage ranging from 0% to 100% + actual refund = base refund * refund rate + i.e. if rate == 100%, actual refund == base refund + if rate = 0%, actual refund = 0 (and we can skip refund processing) + EXAMPLE 1 + limit = 500, consumed = 450, price = 2, refund rate = 100% + cost = limit * price == 1000 + unused = limit - consumed == 50 + base refund = unused * price == 100 + actual refund = base refund * refund rate == 100 + + EXAMPLE 2 + limit = 5000, consumed = 0, price = 5, refund rate = 50% + cost = limit * price == 25000 + unused = limit - consumed == 5000 + base refund = unused * price == 25000 + actual refund = base refund * refund rate == 12500 + + Complicating factors: + if the source purse does not have enough to cover the cost, their available balance is taken + and there is no refund + if the refund rate is 0%, there is no refund (although it would be bizarre for a network to + run with RefundHandling turned on but with a 0% rate, they are technically independent + settings and thus the logic must account for the possibility) + cost might be higher than limit * price if additional costs have been incurred. + as the refund calculation is based on paid for but unused gas, such additional costs + are not subject to refund. This is handled by this logic correctly, but tests over logic + that incurs any additional costs need to use actual discrete variables for each value + and not assume limit * price == cost + */ + if available_balance < cost { + return Ok((U512::zero(), available_balance)); + } + if refund_ratio.is_zero() { + return Ok((U512::zero(), cost)); + } + let unspent = limit.saturating_sub(consumed); + if unspent == U512::zero() { + return Ok((U512::zero(), cost)); + } + let base_refund = unspent * gas_price; + + let adjusted_refund = Ratio::from(base_refund) + .checked_mul(&refund_ratio) + .ok_or(Error::ArithmeticOverflow)? + .to_integer(); + + let fee = cost + .checked_sub(adjusted_refund) + .ok_or(Error::ArithmeticOverflow)?; + + Ok((adjusted_refund, fee)) +} + +pub fn payment_burn( + provider: &mut P, + purse: URef, + amount: Option, +) -> Result<(), Error> { + let available_balance = match provider.available_balance(purse)? { + Some(balance) => balance, + None => return Err(Error::PaymentPurseBalanceNotFound), + }; + let burn_amount = amount.unwrap_or(available_balance); + if burn_amount.is_zero() { + // nothing to burn == noop + return Ok(()); + } + // Reduce the source purse and total supply by the refund amount + let adjusted_balance = available_balance + .checked_sub(burn_amount) + .ok_or(Error::ArithmeticOverflow)?; + provider.write_balance(purse, adjusted_balance)?; + provider.reduce_total_supply(burn_amount)?; + Ok(()) +} + +/// This function distributes the fees according to the fee handling config. +/// +/// NOTE: If a network is not configured for fee accumulation, this method will error if called. +pub fn distribute_accumulated_fees

( + provider: &mut P, + source_uref: URef, + amount: Option, +) -> Result<(), Error> +where + P: RuntimeProvider + MintProvider, +{ + let fee_handling = provider.fee_handling(); + if !fee_handling.is_accumulate() { + return Err(Error::IncompatiblePaymentSettings); + } + + let administrative_accounts = provider.administrative_accounts(); + let reward_recipients = U512::from(administrative_accounts.len()); + + let distribute_amount = match amount { + Some(amount) => amount, + None => provider.available_balance(source_uref)?.unwrap_or_default(), + }; + + if distribute_amount.is_zero() { + return Ok(()); + } + + let portion = distribute_amount + .checked_div(reward_recipients) + .unwrap_or_else(U512::zero); + + if !portion.is_zero() { + for target in administrative_accounts { + provider.transfer_purse_to_account(source_uref, target, portion)?; + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + // both burn and refund use the same basic calculation for + // overpayment / unspent vs fee...the only difference is + // what is done with the overage _after_ the calculation + // refund returns it to payer, while burn destroys it + + #[test] + fn should_calculate_expected_amounts() { + let limit = U512::from(6u64); + let gas_price = 1; + let cost = limit; + let consumed = U512::from(3u64); + let available = U512::from(10u64); + + let (overpay, fee) = calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available, + Ratio::new_raw(U512::from(1), U512::from(1)), + ) + .unwrap(); + + let unspent = limit.saturating_sub(consumed); + let expected = unspent; + assert_eq!(expected, overpay, "overpay"); + let expected_fee = consumed; + assert_eq!(expected_fee, fee, "fee"); + } + + #[test] + fn should_handle_straight_percentages() { + let limit = U512::from(100u64); + let gas_price = 1; + let cost = limit; + let consumed = U512::from(50u64); + let available = U512::from(1000u64); + let denom = 100; + + for numer in 0..=denom { + let refund_ratio = Ratio::new_raw(U512::from(numer), U512::from(denom)); + let (overpay, fee) = calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available, + refund_ratio, + ) + .unwrap(); + + let unspent = limit.saturating_sub(consumed); + let expected = Ratio::from(unspent) + .checked_mul(&refund_ratio) + .ok_or(Error::ArithmeticOverflow) + .expect("should math") + .to_integer(); + assert_eq!(expected, overpay, "overpay"); + let expected_fee = limit - expected; + assert_eq!(expected_fee, fee, "fee"); + } + } + + #[test] + fn should_roll_over_dust() { + let limit = U512::from(6u64); + let gas_price = 1; + let cost = limit; + let consumed = U512::from(3u64); + let available = U512::from(10u64); + + for percentage in 0..=100 { + let refund_ratio = Ratio::new_raw(U512::from(percentage), U512::from(100)); + + let (overpay, fee) = calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available, + refund_ratio, + ) + .expect("should have overpay and fee"); + + let a = Ratio::from(overpay); + let b = Ratio::from(fee); + + assert_eq!(a + b, Ratio::from(cost), "{}", percentage); + } + } + + #[test] + fn should_take_all_of_insufficient_balance() { + let limit = U512::from(6u64); + let gas_price = 1; + let cost = limit; + let consumed = U512::from(3u64); + let available = U512::from(5u64); + + let (overpay, fee) = calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available, + Ratio::new_raw(U512::from(1), U512::from(1)), + ) + .unwrap(); + + assert_eq!(U512::zero(), overpay, "overpay"); + let expected = available; + assert_eq!(expected, fee, "fee"); + } + + #[test] + fn should_handle_non_1_gas_price() { + let limit = U512::from(6u64); + let gas_price = 2; + let cost = limit * gas_price; + let consumed = U512::from(3u64); + let available = U512::from(12u64); + + let (overpay, fee) = calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available, + Ratio::new_raw(U512::from(1), U512::from(1)), + ) + .unwrap(); + + let unspent = limit.saturating_sub(consumed); + let expected = unspent * gas_price; + assert_eq!(expected, overpay, "overpay"); + let expected_fee = consumed * gas_price; + assert_eq!(expected_fee, fee, "fee"); + } + + #[test] + fn should_handle_extra_cost() { + let limit = U512::from(6u64); + let gas_price = 2; + let extra_cost = U512::from(1u64); + let cost = limit * gas_price + extra_cost; + let consumed = U512::from(3u64); + let available = U512::from(21u64); + + let (overpay, fee) = calculate_overpayment_and_fee( + limit, + gas_price, + cost, + consumed, + available, + Ratio::new_raw(U512::from(1), U512::from(1)), + ) + .unwrap(); + + let unspent = limit.saturating_sub(consumed); + let expected = unspent * gas_price; + assert_eq!(expected, overpay, "overpay"); + let expected_fee = consumed * gas_price + extra_cost; + assert_eq!(expected_fee, fee, "fee"); + } +} diff --git a/storage/src/system/handle_payment/mint_provider.rs b/storage/src/system/handle_payment/mint_provider.rs new file mode 100644 index 0000000000..1096e428f2 --- /dev/null +++ b/storage/src/system/handle_payment/mint_provider.rs @@ -0,0 +1,34 @@ +use casper_types::{ + account::AccountHash, system::handle_payment::Error, TransferredTo, URef, U512, +}; + +/// Provides an access to mint. +pub trait MintProvider { + /// Transfer `amount` from `source` purse to a `target` account. + /// Note: the source should always be a system purse of some kind, + /// such as the payment purse or an accumulator purse. + /// The target should be the recipient of a refund or a reward + fn transfer_purse_to_account( + &mut self, + source: URef, + target: AccountHash, + amount: U512, + ) -> Result; + + /// Transfer `amount` from `source` purse to a `target` purse. + /// Note: the source should always be a system purse of some kind, + /// such as the payment purse or an accumulator purse. + /// The target should be the recipient of a refund or a reward + fn transfer_purse_to_purse( + &mut self, + source: URef, + target: URef, + amount: U512, + ) -> Result<(), Error>; + + /// Checks balance of a `purse`. Returns `None` if given purse does not exist. + fn available_balance(&mut self, purse: URef) -> Result, Error>; + + /// Reduce total supply by `amount`. + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error>; +} diff --git a/storage/src/system/handle_payment/runtime_provider.rs b/storage/src/system/handle_payment/runtime_provider.rs new file mode 100644 index 0000000000..b504605f13 --- /dev/null +++ b/storage/src/system/handle_payment/runtime_provider.rs @@ -0,0 +1,32 @@ +use std::collections::BTreeSet; + +use casper_types::{ + account::AccountHash, system::handle_payment::Error, FeeHandling, Key, Phase, RefundHandling, +}; + +/// Provider of runtime host functionality. +pub trait RuntimeProvider { + /// Get named key under a `name`. + fn get_key(&mut self, name: &str) -> Option; + + /// Put key under a `name`. + fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error>; + + /// Remove a named key by `name`. + fn remove_key(&mut self, name: &str) -> Result<(), Error>; + + /// Get current execution phase. + fn get_phase(&self) -> Phase; + + /// Get caller. + fn get_caller(&self) -> AccountHash; + + /// Get refund handling. + fn refund_handling(&self) -> RefundHandling; + + /// Returns fee handling value. + fn fee_handling(&self) -> FeeHandling; + + /// Returns list of administrative accounts. + fn administrative_accounts(&self) -> BTreeSet; +} diff --git a/storage/src/system/handle_payment/storage_provider.rs b/storage/src/system/handle_payment/storage_provider.rs new file mode 100644 index 0000000000..74712012cd --- /dev/null +++ b/storage/src/system/handle_payment/storage_provider.rs @@ -0,0 +1,9 @@ +use casper_types::{URef, U512}; + +use crate::system::handle_payment::Error; + +/// Provider of storage functionality. +pub trait StorageProvider { + /// Write new balance. + fn write_balance(&mut self, purse_uref: URef, amount: U512) -> Result<(), Error>; +} diff --git a/storage/src/system/mint.rs b/storage/src/system/mint.rs new file mode 100644 index 0000000000..dbc2e5d6d0 --- /dev/null +++ b/storage/src/system/mint.rs @@ -0,0 +1,324 @@ +pub(crate) mod detail; +/// Provides native mint processing. +mod mint_native; +/// Provides runtime logic for mint processing. +pub mod runtime_provider; +/// Provides storage logic for mint processing. +pub mod storage_provider; +/// Provides system logic for mint processing. +pub mod system_provider; + +use num_rational::Ratio; +use num_traits::CheckedMul; + +use casper_types::{ + account::AccountHash, + system::{ + mint::{Error, ROUND_SEIGNIORAGE_RATE_KEY, TOTAL_SUPPLY_KEY}, + Caller, + }, + Key, PublicKey, URef, U512, +}; + +use crate::system::mint::{ + runtime_provider::RuntimeProvider, storage_provider::StorageProvider, + system_provider::SystemProvider, +}; + +/// Mint trait. +pub trait Mint: RuntimeProvider + StorageProvider + SystemProvider { + /// Mint new token with given `initial_balance` balance. Returns new purse on success, otherwise + /// an error. + fn mint(&mut self, initial_balance: U512) -> Result { + let caller = self.get_caller(); + let is_empty_purse = initial_balance.is_zero(); + if !is_empty_purse && caller != PublicKey::System.to_account_hash() { + return Err(Error::InvalidNonEmptyPurseCreation); + } + + let purse_uref: URef = self.new_uref(())?; + self.write_balance(purse_uref, initial_balance)?; + + if !is_empty_purse { + // get total supply uref if exists, otherwise error + let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) { + None => { + // total supply URef should exist due to genesis + return Err(Error::TotalSupplyNotFound); + } + Some(Key::URef(uref)) => uref, + Some(_) => return Err(Error::MissingKey), + }; + // increase total supply + self.add(total_supply_uref, initial_balance)?; + } + + Ok(purse_uref) + } + + /// Burns native tokens. + fn burn(&mut self, purse: URef, amount: U512) -> Result<(), Error> { + if !purse.is_writeable() { + return Err(Error::InvalidAccessRights); + } + if !self.is_valid_uref(&purse) { + return Err(Error::ForgedReference); + } + + let source_available_balance: U512 = match self.balance(purse)? { + Some(source_balance) => source_balance, + None => return Err(Error::PurseNotFound), + }; + + let new_balance = source_available_balance + .checked_sub(amount) + .unwrap_or_else(U512::zero); + // change balance + self.write_balance(purse, new_balance)?; + // reduce total supply AFTER changing balance in case changing balance errors + let burned_amount = source_available_balance.saturating_sub(new_balance); + detail::reduce_total_supply_unsafe(self, burned_amount) + } + + /// Reduce total supply by `amount`. Returns unit on success, otherwise + /// an error. + fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { + // only system may reduce total supply + let caller = self.get_caller(); + if caller != PublicKey::System.to_account_hash() { + return Err(Error::InvalidTotalSupplyReductionAttempt); + } + + detail::reduce_total_supply_unsafe(self, amount) + } + + /// Read balance of given `purse`. + fn balance(&mut self, purse: URef) -> Result, Error> { + match self.available_balance(purse)? { + some @ Some(_) => Ok(some), + None => Err(Error::PurseNotFound), + } + } + + /// Transfers `amount` of tokens from `source` purse to a `target` purse. + fn transfer( + &mut self, + maybe_to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result<(), Error> { + if !self.allow_unrestricted_transfers() { + let registry = self + .get_system_entity_registry() + .map_err(|_| Error::UnableToGetSystemRegistry)?; + let immediate_caller = self.get_immediate_caller(); + match immediate_caller { + Some(Caller::Entity { entity_addr, .. }) + if registry.exists(&entity_addr.value()) => + { + // System contract calling a mint is fine (i.e. standard payment calling mint's + // transfer) + } + + Some(Caller::Initiator { account_hash: _ }) + if self.is_called_from_standard_payment() => + { + // Standard payment acts as a session without separate stack frame and calls + // into mint's transfer. + } + + Some(Caller::Initiator { account_hash }) + if account_hash == PublicKey::System.to_account_hash() => + { + // System calls a session code. + } + + Some(Caller::Initiator { account_hash }) => { + // For example: a session using transfer host functions, or calling the mint's + // entrypoint directly + let is_source_admin = self.is_administrator(&account_hash); + match maybe_to { + Some(to) => { + let maybe_account = self.runtime_footprint_by_account_hash(to); + + match maybe_account { + Ok(Some(runtime_footprint)) => { + // This can happen when user tries to transfer funds by + // calling mint + // directly but tries to specify wrong account hash. + let addr = if let Some(uref) = runtime_footprint.main_purse() { + uref.addr() + } else { + return Err(Error::InvalidContext); + }; + + if addr != target.addr() { + return Err(Error::DisabledUnrestrictedTransfers); + } + let is_target_system_account = + to == PublicKey::System.to_account_hash(); + let is_target_administrator = self.is_administrator(&to); + if !(is_source_admin + || is_target_system_account + || is_target_administrator) + { + return Err(Error::DisabledUnrestrictedTransfers); + } + } + Ok(None) => { + // `to` is specified, but no new account is persisted + // yet. Only + // administrators can do that and it is also validated + // at the host function level. + if !is_source_admin { + return Err(Error::DisabledUnrestrictedTransfers); + } + } + Err(_) => { + return Err(Error::Storage); + } + } + } + None => { + if !is_source_admin { + return Err(Error::DisabledUnrestrictedTransfers); + } + } + } + } + + Some(Caller::Entity { + package_hash: _, + entity_addr: _, + }) => { + if self.get_caller() != PublicKey::System.to_account_hash() + && !self.is_administrator(&self.get_caller()) + { + return Err(Error::DisabledUnrestrictedTransfers); + } + } + + Some(Caller::SmartContract { + contract_package_hash: _, + contract_hash: _, + }) => { + if self.get_caller() != PublicKey::System.to_account_hash() + && !self.is_administrator(&self.get_caller()) + { + return Err(Error::DisabledUnrestrictedTransfers); + } + } + + None => { + // There's always an immediate caller, but we should return something. + return Err(Error::DisabledUnrestrictedTransfers); + } + } + } + + if !source.is_writeable() || !target.is_addable() { + // TODO: I don't think we should enforce is addable on the target + // Unlike other uses of URefs (such as a counter), in this context the value represents + // a deposit of token. Generally, deposit of a desirable resource is permissive. + return Err(Error::InvalidAccessRights); + } + let source_available_balance: U512 = match self.available_balance(source)? { + Some(source_balance) => source_balance, + None => return Err(Error::SourceNotFound), + }; + if amount > source_available_balance { + // NOTE: we use AVAILABLE balance to check sufficient funds + return Err(Error::InsufficientFunds); + } + let source_total_balance = self.total_balance(source)?; + if source_available_balance > source_total_balance { + panic!("available balance can never be greater than total balance"); + } + if self.available_balance(target)?.is_none() { + return Err(Error::DestNotFound); + } + let addr = match self.get_main_purse() { + None => return Err(Error::InvalidURef), + Some(uref) => uref.addr(), + }; + if self.get_caller() != PublicKey::System.to_account_hash() && addr == source.addr() { + if amount > self.get_approved_spending_limit() { + return Err(Error::UnapprovedSpendingAmount); + } + self.sub_approved_spending_limit(amount); + } + + // NOTE: we use TOTAL balance to determine new balance + let new_balance = source_total_balance.saturating_sub(amount); + self.write_balance(source, new_balance)?; + self.add_balance(target, amount)?; + self.record_transfer(maybe_to, source, target, amount, id)?; + Ok(()) + } + + /// Retrieves the base round reward. + fn read_base_round_reward(&mut self) -> Result { + let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) { + Some(Key::URef(uref)) => uref, + Some(_) => return Err(Error::MissingKey), + None => return Err(Error::MissingKey), + }; + let total_supply: U512 = self + .read(total_supply_uref)? + .ok_or(Error::TotalSupplyNotFound)?; + + let round_seigniorage_rate_uref = match self.get_key(ROUND_SEIGNIORAGE_RATE_KEY) { + Some(Key::URef(uref)) => uref, + Some(_) => return Err(Error::MissingKey), + None => return Err(Error::MissingKey), + }; + let round_seigniorage_rate: Ratio = self + .read(round_seigniorage_rate_uref)? + .ok_or(Error::TotalSupplyNotFound)?; + + round_seigniorage_rate + .checked_mul(&Ratio::from(total_supply)) + .map(|ratio| ratio.to_integer()) + .ok_or(Error::ArithmeticOverflow) + } + + /// Mint `amount` new token into `existing_purse`. + /// Returns unit on success, otherwise an error. + fn mint_into_existing_purse( + &mut self, + existing_purse: URef, + amount: U512, + ) -> Result<(), Error> { + let caller = self.get_caller(); + if caller != PublicKey::System.to_account_hash() { + return Err(Error::InvalidContext); + } + if amount.is_zero() { + // treat as noop + return Ok(()); + } + if !self.purse_exists(existing_purse)? { + return Err(Error::PurseNotFound); + } + self.add_balance(existing_purse, amount)?; + // get total supply uref if exists, otherwise error. + let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) { + None => { + // total supply URef should exist due to genesis + // which obviously must have been called + // before new rewards are minted at the end of an era + return Err(Error::TotalSupplyNotFound); + } + Some(Key::URef(uref)) => uref, + Some(_) => return Err(Error::MissingKey), + }; + // increase total supply + self.add(total_supply_uref, amount)?; + Ok(()) + } + + /// Check if a purse exists. + fn purse_exists(&mut self, uref: URef) -> Result; +} diff --git a/storage/src/system/mint/detail.rs b/storage/src/system/mint/detail.rs new file mode 100644 index 0000000000..6fac3af2dc --- /dev/null +++ b/storage/src/system/mint/detail.rs @@ -0,0 +1,39 @@ +use casper_types::{ + system::{ + mint, + mint::{Error, TOTAL_SUPPLY_KEY}, + }, + Key, U512, +}; + +use crate::system::mint::Mint; + +// Please do not expose this to the user! +pub(crate) fn reduce_total_supply_unsafe

(mint: &mut P, amount: U512) -> Result<(), mint::Error> +where + P: Mint + ?Sized, +{ + if amount.is_zero() { + return Ok(()); // no change to supply + } + + // get total supply or error + let total_supply_uref = match mint.get_key(TOTAL_SUPPLY_KEY) { + Some(Key::URef(uref)) => uref, + Some(_) => return Err(Error::MissingKey), + None => return Err(Error::MissingKey), + }; + let total_supply: U512 = mint + .read(total_supply_uref)? + .ok_or(Error::TotalSupplyNotFound)?; + + // decrease total supply + let reduced_total_supply = total_supply + .checked_sub(amount) + .ok_or(Error::ArithmeticOverflow)?; + + // update total supply + mint.write_amount(total_supply_uref, reduced_total_supply)?; + + Ok(()) +} diff --git a/storage/src/system/mint/mint_native.rs b/storage/src/system/mint/mint_native.rs new file mode 100644 index 0000000000..a722ab0149 --- /dev/null +++ b/storage/src/system/mint/mint_native.rs @@ -0,0 +1,274 @@ +use tracing::error; + +use crate::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + system::{ + error::ProviderError, + mint::{ + runtime_provider::RuntimeProvider, storage_provider::StorageProvider, + system_provider::SystemProvider, Mint, + }, + runtime_native::{Id, RuntimeNative}, + }, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyExt}, +}; +use casper_types::{ + account::AccountHash, + bytesrepr::{FromBytes, ToBytes}, + system::{mint::Error, Caller}, + AccessRights, CLTyped, CLValue, Gas, InitiatorAddr, Key, Phase, PublicKey, RuntimeFootprint, + StoredValue, SystemHashRegistry, Transfer, TransferV2, URef, U512, +}; + +impl RuntimeProvider for RuntimeNative +where + S: StateReader, +{ + fn get_caller(&self) -> AccountHash { + self.address() + } + + fn get_immediate_caller(&self) -> Option { + let caller = Caller::Initiator { + account_hash: PublicKey::System.to_account_hash(), + }; + Some(caller) + } + + fn is_called_from_standard_payment(&self) -> bool { + self.phase() == Phase::Payment + } + + fn get_system_entity_registry(&self) -> Result { + self.tracking_copy() + .borrow_mut() + .get_system_entity_registry() + .map_err(|tce| { + error!(%tce, "unable to obtain system entity registry during transfer"); + ProviderError::SystemEntityRegistry + }) + } + + fn runtime_footprint_by_account_hash( + &mut self, + account_hash: AccountHash, + ) -> Result, ProviderError> { + match self + .tracking_copy() + .borrow_mut() + .runtime_footprint_by_account_hash(self.protocol_version(), account_hash) + { + Ok((_, footprint)) => Ok(Some(footprint)), + Err(tce) => { + error!(%tce, "error reading addressable entity by account hash"); + Err(ProviderError::AccountHash(account_hash)) + } + } + } + + fn get_phase(&self) -> Phase { + self.phase() + } + + fn get_key(&self, name: &str) -> Option { + self.named_keys().get(name).cloned() + } + + fn get_approved_spending_limit(&self) -> U512 { + self.remaining_spending_limit() + } + + fn sub_approved_spending_limit(&mut self, amount: U512) { + if let Some(remaining) = self.remaining_spending_limit().checked_sub(amount) { + self.set_remaining_spending_limit(remaining); + } else { + error!( + limit = %self.remaining_spending_limit(), + spent = %amount, + "exceeded main purse spending limit" + ); + self.set_remaining_spending_limit(U512::zero()); + } + } + + fn get_main_purse(&self) -> Option { + self.runtime_footprint().main_purse() + } + + fn is_administrator(&self, account_hash: &AccountHash) -> bool { + self.transfer_config().is_administrator(account_hash) + } + + fn allow_unrestricted_transfers(&self) -> bool { + self.transfer_config().allow_unrestricted_transfers() + } + + fn is_valid_uref(&self, uref: &URef) -> bool { + self.access_rights().has_access_rights_to_uref(uref) + } +} + +impl StorageProvider for RuntimeNative +where + S: StateReader, +{ + fn new_uref(&mut self, value: T) -> Result { + let cl_value: CLValue = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + let uref = self + .address_generator() + .write() + .new_uref(AccessRights::READ_ADD_WRITE); + self.extend_access_rights(&[uref]); + // we are creating this key now, thus we know it is a Key::URef and we grant the creator + // full permissions on it, thus we do not need to do validate key / validate uref access + // before storing it. + self.tracking_copy() + .borrow_mut() + .write(Key::URef(uref), StoredValue::CLValue(cl_value)); + Ok(uref) + } + + fn read(&mut self, uref: URef) -> Result, Error> { + // check access rights on uref + if !self.access_rights().has_access_rights_to_uref(&uref) { + return Err(Error::ForgedReference); + } + let key = &Key::URef(uref); + let stored_value = match self.tracking_copy().borrow_mut().read(key) { + Ok(Some(stored_value)) => stored_value, + Ok(None) => return Ok(None), + Err(_) => return Err(Error::Storage), + }; + // by convention, we only store CLValues under Key::URef + if let StoredValue::CLValue(value) = stored_value { + // Only CLTyped instances should be stored as a CLValue. + let value = CLValue::into_t(value).map_err(|_| Error::CLValue)?; + Ok(Some(value)) + } else { + Err(Error::CLValue) + } + } + + fn write_amount(&mut self, uref: URef, amount: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(amount).map_err(|_| Error::CLValue)?; + // is the uref writeable? + if !uref.is_writeable() { + return Err(Error::Storage); + } + // check access rights on uref + if !self.access_rights().has_access_rights_to_uref(&uref) { + return Err(Error::ForgedReference); + } + self.tracking_copy() + .borrow_mut() + .write(Key::URef(uref), StoredValue::CLValue(cl_value)); + Ok(()) + } + + fn add(&mut self, uref: URef, value: T) -> Result<(), Error> { + let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + self.tracking_copy() + .borrow_mut() + .add(Key::URef(uref), StoredValue::CLValue(cl_value)) + .map_err(|_| Error::Storage)?; + Ok(()) + } + + fn total_balance(&mut self, purse: URef) -> Result { + match self + .tracking_copy() + .borrow_mut() + .get_total_balance(purse.into()) + { + Ok(total) => Ok(total.value()), + Err(err) => { + error!(?err, "mint native total_balance"); + dbg!(&err); + Err(Error::Storage) + } + } + } + + fn available_balance(&mut self, purse: URef) -> Result, Error> { + match self + .tracking_copy() + .borrow_mut() + .get_available_balance(Key::Balance(purse.addr())) + { + Ok(motes) => Ok(Some(motes.value())), + Err(err) => { + error!(?err, "mint native available_balance"); + Err(Error::Storage) + } + } + } + + fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(balance).map_err(|_| Error::CLValue)?; + self.tracking_copy() + .borrow_mut() + .write(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value)); + Ok(()) + } + + fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error> { + let cl_value = CLValue::from_t(value).map_err(|_| Error::CLValue)?; + self.tracking_copy() + .borrow_mut() + .add(Key::Balance(uref.addr()), StoredValue::CLValue(cl_value)) + .map_err(|_| Error::Storage)?; + Ok(()) + } +} + +impl SystemProvider for RuntimeNative +where + S: StateReader, +{ + fn record_transfer( + &mut self, + maybe_to: Option, + source: URef, + target: URef, + amount: U512, + id: Option, + ) -> Result<(), Error> { + if self.phase() != Phase::Session { + return Ok(()); + } + let txn_hash = match self.id() { + Id::Transaction(txn_hash) => *txn_hash, + // we don't write transfer records for systemic transfers (step, fees, rewards, etc) + // so return Ok and move on. + Id::Seed(_) => return Ok(()), + }; + let from = InitiatorAddr::AccountHash(self.get_caller()); + let fee = Gas::from(self.native_transfer_cost()); + let transfer = Transfer::V2(TransferV2::new( + txn_hash, from, maybe_to, source, target, amount, fee, id, + )); + + self.push_transfer(transfer); + + Ok(()) + } +} + +impl Mint for RuntimeNative +where + S: StateReader, +{ + fn purse_exists(&mut self, uref: URef) -> Result { + let key = Key::Balance(uref.addr()); + match self + .tracking_copy() + .borrow_mut() + .read(&key) + .map_err(|_| Error::Storage)? + { + Some(StoredValue::CLValue(value)) => Ok(*value.cl_type() == U512::cl_type()), + Some(_non_cl_value) => Err(Error::CLValue), + None => Ok(false), + } + } +} diff --git a/storage/src/system/mint/runtime_provider.rs b/storage/src/system/mint/runtime_provider.rs new file mode 100644 index 0000000000..58a7cccf19 --- /dev/null +++ b/storage/src/system/mint/runtime_provider.rs @@ -0,0 +1,51 @@ +use crate::system::error::ProviderError; +use casper_types::{ + account::AccountHash, system::Caller, Key, Phase, RuntimeFootprint, SystemHashRegistry, URef, + U512, +}; + +/// Provider of runtime host functionality. +pub trait RuntimeProvider { + /// This method should return the caller of the current context. + fn get_caller(&self) -> AccountHash; + + /// This method should return the immediate caller of the current context. + fn get_immediate_caller(&self) -> Option; + + /// Is the caller standard payment logic? + fn is_called_from_standard_payment(&self) -> bool; + + /// Get system entity registry. + fn get_system_entity_registry(&self) -> Result; + + /// Read addressable entity by account hash. + fn runtime_footprint_by_account_hash( + &mut self, + account_hash: AccountHash, + ) -> Result, ProviderError>; + + /// Gets execution phase + fn get_phase(&self) -> Phase; + + /// This method should handle obtaining a given named [`Key`] under a `name`. + fn get_key(&self, name: &str) -> Option; + + /// Returns approved CSPR spending limit. + fn get_approved_spending_limit(&self) -> U512; + + /// Signal to host that `amount` of tokens has been transferred. + fn sub_approved_spending_limit(&mut self, amount: U512); + + /// Returns main purse of the sender account. + fn get_main_purse(&self) -> Option; + + /// Returns `true` if the account hash belongs to an administrator account, otherwise `false`. + fn is_administrator(&self, account_hash: &AccountHash) -> bool; + + /// Checks if users can perform unrestricted transfers. This option is valid only for private + /// chains. + fn allow_unrestricted_transfers(&self) -> bool; + + /// Validate URef against context access rights. + fn is_valid_uref(&self, uref: &URef) -> bool; +} diff --git a/storage/src/system/mint/storage_provider.rs b/storage/src/system/mint/storage_provider.rs new file mode 100644 index 0000000000..b4f548373e --- /dev/null +++ b/storage/src/system/mint/storage_provider.rs @@ -0,0 +1,32 @@ +use casper_types::{ + bytesrepr::{FromBytes, ToBytes}, + system::mint::Error, + CLTyped, URef, U512, +}; + +/// Provides functionality of a contract storage. +pub trait StorageProvider { + /// Create new [`URef`]. + fn new_uref(&mut self, init: T) -> Result; + + /// Read data from [`URef`]. + fn read(&mut self, uref: URef) -> Result, Error>; + + /// Write a [`U512`] amount under a [`URef`]. + fn write_amount(&mut self, uref: URef, amount: U512) -> Result<(), Error>; + + /// Add data to a [`URef`]. + fn add(&mut self, uref: URef, value: T) -> Result<(), Error>; + + /// Read total balance. + fn total_balance(&mut self, uref: URef) -> Result; + + /// Read balance. + fn available_balance(&mut self, uref: URef) -> Result, Error>; + + /// Write balance. + fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error>; + + /// Add amount to an existing balance. + fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error>; +} diff --git a/types/src/system/mint/system_provider.rs b/storage/src/system/mint/system_provider.rs similarity index 80% rename from types/src/system/mint/system_provider.rs rename to storage/src/system/mint/system_provider.rs index 182264d066..64922f7247 100644 --- a/types/src/system/mint/system_provider.rs +++ b/storage/src/system/mint/system_provider.rs @@ -1,4 +1,4 @@ -use crate::{account::AccountHash, system::mint::Error, URef, U512}; +use casper_types::{account::AccountHash, system::mint::Error, URef, U512}; /// Provides functionality of a system module. pub trait SystemProvider { diff --git a/storage/src/system/protocol_upgrade.rs b/storage/src/system/protocol_upgrade.rs new file mode 100644 index 0000000000..77e908a295 --- /dev/null +++ b/storage/src/system/protocol_upgrade.rs @@ -0,0 +1,1423 @@ +//! Support for applying upgrades on the execution engine. +use num_rational::Ratio; +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + rc::Rc, +}; + +use thiserror::Error; +use tracing::{debug, error, info, warn}; + +use casper_types::{ + addressable_entity::{ + ActionThresholds, AssociatedKeys, EntityKind, NamedKeyAddr, NamedKeyValue, Weight, + }, + bytesrepr::{self, ToBytes}, + contracts::{ContractHash, ContractPackageStatus, NamedKeys}, + system::{ + auction::{ + BidAddr, BidAddrTag, BidKind, DelegatorBid, DelegatorKind, + SeigniorageRecipientsSnapshotV1, SeigniorageRecipientsSnapshotV2, + SeigniorageRecipientsV2, Unbond, ValidatorBid, AUCTION_DELAY_KEY, + DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, LOCKED_FUNDS_PERIOD_KEY, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, + UNBONDING_DELAY_KEY, VALIDATOR_SLOTS_KEY, + }, + handle_payment::{ACCUMULATION_PURSE_KEY, PAYMENT_PURSE_KEY}, + mint::{ + MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY, ROUND_SEIGNIORAGE_RATE_KEY, + TOTAL_SUPPLY_KEY, + }, + SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, + }, + AccessRights, AddressableEntity, AddressableEntityHash, ByteCode, ByteCodeAddr, ByteCodeHash, + ByteCodeKind, CLValue, CLValueError, Contract, Digest, EntityAddr, EntityVersionKey, + EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, FeeHandling, Groups, HashAddr, + Key, KeyTag, Motes, Package, PackageHash, PackageStatus, Phase, ProtocolUpgradeConfig, + ProtocolVersion, PublicKey, StoredValue, SystemHashRegistry, URef, U512, +}; + +use crate::{ + global_state::state::StateProvider, + tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyExt}, + AddressGenerator, +}; + +const NO_CARRY_FORWARD: bool = false; +const CARRY_FORWARD: bool = true; + +/// Represents outcomes of a failed protocol upgrade. +#[derive(Clone, Error, Debug)] +pub enum ProtocolUpgradeError { + /// Protocol version used in the deploy is invalid. + #[error("Invalid protocol version: {0}")] + InvalidProtocolVersion(ProtocolVersion), + /// Error validating a protocol upgrade config. + #[error("Invalid upgrade config")] + InvalidUpgradeConfig, + /// Unable to retrieve a system contract. + #[error("Unable to retrieve system contract: {0}")] + UnableToRetrieveSystemContract(String), + /// Unable to retrieve a system contract package. + #[error("Unable to retrieve system contract package: {0}")] + UnableToRetrieveSystemContractPackage(String), + /// Unable to disable previous version of a system contract. + #[error("Failed to disable previous version of system contract: {0}")] + FailedToDisablePreviousVersion(String), + /// (De)serialization error. + #[error("Bytesrepr error: {0}")] + Bytesrepr(String), + /// Failed to create system entity registry. + #[error("Failed to insert system entity registry")] + FailedToCreateSystemRegistry, + /// Found unexpected variant of a key. + #[error("Unexpected key variant")] + UnexpectedKeyVariant, + /// Found unexpected variant of a stored value. + #[error("Unexpected stored value variant")] + UnexpectedStoredValueVariant, + /// Failed to convert into a CLValue. + #[error("{0}")] + CLValue(String), + /// Missing system contract hash. + #[error("Missing system contract hash: {0}")] + MissingSystemEntityHash(String), + /// Tracking copy error. + #[error("{0}")] + TrackingCopy(crate::tracking_copy::TrackingCopyError), +} + +impl From for ProtocolUpgradeError { + fn from(v: CLValueError) -> Self { + Self::CLValue(v.to_string()) + } +} + +impl From for ProtocolUpgradeError { + fn from(err: crate::tracking_copy::TrackingCopyError) -> Self { + ProtocolUpgradeError::TrackingCopy(err) + } +} + +impl From for ProtocolUpgradeError { + fn from(error: bytesrepr::Error) -> Self { + ProtocolUpgradeError::Bytesrepr(error.to_string()) + } +} + +/// Addresses for system entities. +pub struct SystemHashAddresses { + mint: HashAddr, + auction: HashAddr, + handle_payment: HashAddr, +} + +impl SystemHashAddresses { + /// Creates a new instance of system entity addresses. + pub fn new(mint: HashAddr, auction: HashAddr, handle_payment: HashAddr) -> Self { + SystemHashAddresses { + mint, + auction, + handle_payment, + } + } + + /// Mint address. + pub fn mint(&self) -> HashAddr { + self.mint + } + + /// Auction address. + pub fn auction(&self) -> HashAddr { + self.auction + } + + /// Handle payment address. + pub fn handle_payment(&self) -> HashAddr { + self.handle_payment + } +} + +/// The system upgrader deals with conducting an actual protocol upgrade. +pub struct ProtocolUpgrader +where + S: StateProvider, +{ + config: ProtocolUpgradeConfig, + address_generator: Rc>, + tracking_copy: TrackingCopy<::Reader>, +} + +impl ProtocolUpgrader +where + S: StateProvider, +{ + /// Creates new system upgrader instance. + pub fn new( + config: ProtocolUpgradeConfig, + protocol_upgrade_config_hash: Digest, + tracking_copy: TrackingCopy<::Reader>, + ) -> Self { + let phase = Phase::System; + let protocol_upgrade_config_hash_bytes = protocol_upgrade_config_hash.as_ref(); + + let address_generator = { + let generator = AddressGenerator::new(protocol_upgrade_config_hash_bytes, phase); + Rc::new(RefCell::new(generator)) + }; + ProtocolUpgrader { + config, + address_generator, + tracking_copy, + } + } + + /// Apply a protocol upgrade. + pub fn upgrade( + mut self, + pre_state_hash: Digest, + ) -> Result::Reader>, ProtocolUpgradeError> { + self.check_next_protocol_version_validity()?; + self.handle_global_state_updates(); + let system_entity_addresses = self.handle_system_hashes()?; + + if self.config.enable_addressable_entity() { + self.migrate_system_account(pre_state_hash)?; + self.create_accumulation_purse_if_required( + &system_entity_addresses.handle_payment(), + self.config.fee_handling(), + )?; + self.migrate_or_refresh_system_entities(&system_entity_addresses)?; + } else { + self.create_accumulation_purse_if_required_by_contract( + &system_entity_addresses.handle_payment(), + self.config.fee_handling(), + )?; + self.refresh_system_contracts(&system_entity_addresses)?; + } + + self.handle_payment_purse_check( + system_entity_addresses.handle_payment(), + system_entity_addresses.mint(), + )?; + self.handle_new_gas_hold_config(system_entity_addresses.mint())?; + self.handle_new_validator_slots(system_entity_addresses.auction())?; + self.handle_new_auction_delay(system_entity_addresses.auction())?; + self.handle_new_locked_funds_period_millis(system_entity_addresses.auction())?; + self.handle_new_unbonding_delay(system_entity_addresses.auction())?; + self.handle_new_round_seigniorage_rate(system_entity_addresses.mint())?; + self.handle_unbonds_migration()?; + self.handle_bids_migration( + self.config.validator_minimum_bid_amount(), + self.config.minimum_delegation_amount(), + self.config.maximum_delegation_amount(), + )?; + self.handle_era_info_migration()?; + self.handle_seignorage_snapshot_migration(system_entity_addresses.auction())?; + + Ok(self.tracking_copy) + } + + /// Determine if the next protocol version is a legitimate semver progression. + pub fn check_next_protocol_version_validity(&self) -> Result<(), ProtocolUpgradeError> { + debug!("check next protocol version validity"); + let current_protocol_version = self.config.current_protocol_version(); + let new_protocol_version = self.config.new_protocol_version(); + + let upgrade_check_result = + current_protocol_version.check_next_version(&new_protocol_version); + + if upgrade_check_result.is_invalid() { + Err(ProtocolUpgradeError::InvalidProtocolVersion( + new_protocol_version, + )) + } else { + Ok(()) + } + } + + fn system_hash_registry(&self) -> Result { + debug!("system entity registry"); + let registry = if let Ok(registry) = self.tracking_copy.get_system_entity_registry() { + registry + } else { + // Check the upgrade config for the registry + let upgrade_registry = self + .config + .global_state_update() + .get(&Key::SystemEntityRegistry) + .ok_or_else(|| { + error!("Registry is absent in upgrade config"); + ProtocolUpgradeError::FailedToCreateSystemRegistry + })? + .to_owned(); + if let StoredValue::CLValue(cl_registry) = upgrade_registry { + CLValue::into_t::(cl_registry).map_err(|error| { + let error_msg = format!("Conversion to system registry failed: {:?}", error); + error!("{}", error_msg); + ProtocolUpgradeError::Bytesrepr(error_msg) + })? + } else { + error!("Failed to create registry as StoreValue in upgrade config is not CLValue"); + return Err(ProtocolUpgradeError::FailedToCreateSystemRegistry); + } + }; + Ok(registry) + } + + /// Handle system entities. + pub fn handle_system_hashes(&mut self) -> Result { + debug!("handle system entities"); + let mut registry = self.system_hash_registry()?; + + let mint = *registry.get(MINT).ok_or_else(|| { + error!("Missing system mint entity hash"); + ProtocolUpgradeError::MissingSystemEntityHash(MINT.to_string()) + })?; + let auction = *registry.get(AUCTION).ok_or_else(|| { + error!("Missing system auction entity hash"); + ProtocolUpgradeError::MissingSystemEntityHash(AUCTION.to_string()) + })?; + let handle_payment = *registry.get(HANDLE_PAYMENT).ok_or_else(|| { + error!("Missing system handle payment entity hash"); + ProtocolUpgradeError::MissingSystemEntityHash(HANDLE_PAYMENT.to_string()) + })?; + if let Some(standard_payment_hash) = registry.remove_standard_payment() { + // Write the chainspec registry to global state + let cl_value_chainspec_registry = CLValue::from_t(registry) + .map_err(|error| ProtocolUpgradeError::Bytesrepr(error.to_string()))?; + + self.tracking_copy.write( + Key::SystemEntityRegistry, + StoredValue::CLValue(cl_value_chainspec_registry), + ); + + // Prune away standard payment from global state. + self.tracking_copy.prune(Key::Hash(standard_payment_hash)); + }; + + // Write the chainspec registry to global state + let cl_value_chainspec_registry = CLValue::from_t(self.config.chainspec_registry().clone()) + .map_err(|error| ProtocolUpgradeError::Bytesrepr(error.to_string()))?; + + self.tracking_copy.write( + Key::ChainspecRegistry, + StoredValue::CLValue(cl_value_chainspec_registry), + ); + + let system_hash_addresses = SystemHashAddresses::new(mint, auction, handle_payment); + + Ok(system_hash_addresses) + } + + /// Bump major version and/or update the entry points for system contracts. + pub fn migrate_or_refresh_system_entities( + &mut self, + system_entity_addresses: &SystemHashAddresses, + ) -> Result<(), ProtocolUpgradeError> { + debug!("refresh system contracts"); + self.migrate_or_refresh_system_entity_entry_points( + system_entity_addresses.mint(), + SystemEntityType::Mint, + )?; + self.migrate_or_refresh_system_entity_entry_points( + system_entity_addresses.auction(), + SystemEntityType::Auction, + )?; + self.migrate_or_refresh_system_entity_entry_points( + system_entity_addresses.handle_payment(), + SystemEntityType::HandlePayment, + )?; + + Ok(()) + } + + /// Bump major version and/or update the entry points for system contracts. + pub fn refresh_system_contracts( + &mut self, + system_entity_addresses: &SystemHashAddresses, + ) -> Result<(), ProtocolUpgradeError> { + self.refresh_system_contract_entry_points( + system_entity_addresses.mint(), + SystemEntityType::Mint, + )?; + self.refresh_system_contract_entry_points( + system_entity_addresses.auction(), + SystemEntityType::Auction, + )?; + self.refresh_system_contract_entry_points( + system_entity_addresses.handle_payment(), + SystemEntityType::HandlePayment, + )?; + + Ok(()) + } + + /// Refresh the system contracts with an updated set of entry points, + /// and bump the contract version at a major version upgrade. + fn migrate_or_refresh_system_entity_entry_points( + &mut self, + hash_addr: HashAddr, + system_entity_type: SystemEntityType, + ) -> Result<(), ProtocolUpgradeError> { + debug!(%system_entity_type, "refresh system contract entry points"); + let entity_name = system_entity_type.entity_name(); + + let (mut entity, maybe_named_keys, must_carry_forward) = + match self.retrieve_system_entity(hash_addr, system_entity_type) { + Ok(ret) => ret, + Err(err) => { + error!("{:?}", err); + return Err(err); + } + }; + + let mut package = + self.retrieve_system_package(entity.package_hash(), system_entity_type)?; + + let entity_hash = AddressableEntityHash::new(hash_addr); + let entity_addr = EntityAddr::new_system(entity_hash.value()); + package.disable_entity_version(entity_addr).map_err(|_| { + ProtocolUpgradeError::FailedToDisablePreviousVersion(entity_name.to_string()) + })?; + + entity.set_protocol_version(self.config.new_protocol_version()); + + let new_entity = AddressableEntity::new( + entity.package_hash(), + ByteCodeHash::default(), + self.config.new_protocol_version(), + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::System(system_entity_type), + ); + + let byte_code_key = Key::byte_code_key(ByteCodeAddr::Empty); + let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]); + + self.tracking_copy + .write(byte_code_key, StoredValue::ByteCode(byte_code)); + + let entity_key = new_entity.entity_key(entity_hash); + + self.tracking_copy + .write(entity_key, StoredValue::AddressableEntity(new_entity)); + + if let Some(named_keys) = maybe_named_keys { + for (string, key) in named_keys.into_inner().into_iter() { + let entry_addr = NamedKeyAddr::new_from_string(entity_addr, string.clone()) + .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?; + + let entry_key = Key::NamedKey(entry_addr); + + let named_key_value = NamedKeyValue::from_concrete_values(key, string) + .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?; + + self.tracking_copy + .write(entry_key, StoredValue::NamedKey(named_key_value)); + } + } + + let entry_points = system_entity_type.entry_points(); + + for entry_point in entry_points.take_entry_points() { + let entry_point_addr = + EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name()) + .map_err(|error| ProtocolUpgradeError::Bytesrepr(error.to_string()))?; + self.tracking_copy.write( + Key::EntryPoint(entry_point_addr), + StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)), + ); + } + + package.insert_entity_version( + self.config.new_protocol_version().value().major, + entity_addr, + ); + + self.tracking_copy.write( + Key::SmartContract(entity.package_hash().value()), + StoredValue::SmartContract(package), + ); + + if must_carry_forward { + // carry forward + let package_key = Key::SmartContract(entity.package_hash().value()); + let uref = URef::default(); + let indirection = CLValue::from_t((package_key, uref)) + .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?; + + self.tracking_copy.write( + Key::Hash(entity.package_hash().value()), + StoredValue::CLValue(indirection), + ); + + let contract_wasm_key = Key::Hash(entity.byte_code_hash().value()); + let contract_wasm_indirection = CLValue::from_t(Key::ByteCode(ByteCodeAddr::Empty)) + .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?; + self.tracking_copy.write( + contract_wasm_key, + StoredValue::CLValue(contract_wasm_indirection), + ); + + let contract_indirection = CLValue::from_t(Key::AddressableEntity(entity_addr)) + .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?; + + self.tracking_copy.write( + Key::Hash(entity_addr.value()), + StoredValue::CLValue(contract_indirection), + ) + } + + Ok(()) + } + + fn retrieve_system_package( + &mut self, + package_hash: PackageHash, + system_contract_type: SystemEntityType, + ) -> Result { + debug!(%system_contract_type, "retrieve system package"); + if let Some(StoredValue::SmartContract(system_entity)) = self + .tracking_copy + .read(&Key::SmartContract(package_hash.value())) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( + system_contract_type.to_string(), + ) + })? + { + return Ok(system_entity); + } + + if let Some(StoredValue::ContractPackage(contract_package)) = self + .tracking_copy + .read(&Key::Hash(package_hash.value())) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( + system_contract_type.to_string(), + ) + })? + { + let versions: BTreeMap = contract_package + .versions() + .iter() + .map(|(version, contract_hash)| { + let entity_version = EntityVersionKey::new(2, version.contract_version()); + let entity_hash = EntityAddr::System(contract_hash.value()); + (entity_version, entity_hash) + }) + .collect(); + + let disabled_versions = contract_package + .disabled_versions() + .iter() + .map(|contract_versions| { + EntityVersionKey::new( + contract_versions.protocol_version_major(), + contract_versions.contract_version(), + ) + }) + .collect(); + + let lock_status = if contract_package.lock_status() == ContractPackageStatus::Locked { + PackageStatus::Locked + } else { + PackageStatus::Unlocked + }; + + let groups = contract_package.take_groups(); + return Ok(Package::new( + versions.into(), + disabled_versions, + groups, + lock_status, + )); + } + + Err(ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( + system_contract_type.to_string(), + )) + } + + fn retrieve_system_entity( + &mut self, + hash_addr: HashAddr, + system_contract_type: SystemEntityType, + ) -> Result<(AddressableEntity, Option, bool), ProtocolUpgradeError> { + debug!(%system_contract_type, "retrieve system entity"); + if let Some(StoredValue::Contract(system_contract)) = self + .tracking_copy + .read(&Key::Hash(hash_addr)) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContract( + system_contract_type.to_string(), + ) + })? + { + let named_keys = system_contract.named_keys().clone(); + return Ok((system_contract.into(), Some(named_keys), CARRY_FORWARD)); + } + + if let Some(StoredValue::AddressableEntity(system_entity)) = self + .tracking_copy + .read(&Key::AddressableEntity(EntityAddr::new_system(hash_addr))) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContract( + system_contract_type.to_string(), + ) + })? + { + return Ok((system_entity, None, NO_CARRY_FORWARD)); + } + + Err(ProtocolUpgradeError::UnableToRetrieveSystemContract( + system_contract_type.to_string(), + )) + } + + /// Refresh the system contracts with an updated set of entry points, + /// and bump the contract version at a major version upgrade. + fn refresh_system_contract_entry_points( + &mut self, + contract_hash: HashAddr, + system_entity_type: SystemEntityType, + ) -> Result<(), ProtocolUpgradeError> { + let contract_name = system_entity_type.entity_name(); + let entry_points = system_entity_type.entry_points(); + + let mut contract = if let StoredValue::Contract(contract) = self + .tracking_copy + .read(&Key::Hash(contract_hash)) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string()) + })? + .ok_or_else(|| { + ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string()) + })? { + contract + } else { + return Err(ProtocolUpgradeError::UnableToRetrieveSystemContract( + contract_name, + )); + }; + + let is_major_bump = self + .config + .current_protocol_version() + .check_next_version(&self.config.new_protocol_version()) + .is_major_version(); + + let contract_entry_points: EntryPoints = contract.entry_points().clone().into(); + let entry_points_unchanged = contract_entry_points == entry_points; + if entry_points_unchanged && !is_major_bump { + // We don't need to do anything if entry points are unchanged, or there's no major + // version bump. + return Ok(()); + } + + let contract_package_key = Key::Hash(contract.contract_package_hash().value()); + + let mut contract_package = if let StoredValue::ContractPackage(contract_package) = self + .tracking_copy + .read(&contract_package_key) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( + contract_name.to_string(), + ) + })? + .ok_or_else(|| { + ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( + contract_name.to_string(), + ) + })? { + contract_package + } else { + return Err(ProtocolUpgradeError::UnableToRetrieveSystemContractPackage( + contract_name, + )); + }; + + contract.set_protocol_version(self.config.new_protocol_version()); + + let new_contract = Contract::new( + contract.contract_package_hash(), + contract.contract_wasm_hash(), + contract.named_keys().clone(), + entry_points.into(), + self.config.new_protocol_version(), + ); + self.tracking_copy.write( + Key::Hash(contract_hash), + StoredValue::Contract(new_contract), + ); + + contract_package.insert_contract_version( + self.config.new_protocol_version().value().major, + ContractHash::new(contract_hash), + ); + + self.tracking_copy.write( + contract_package_key, + StoredValue::ContractPackage(contract_package), + ); + + Ok(()) + } + + /// Migrate the system account to addressable entity if necessary. + pub fn migrate_system_account( + &mut self, + pre_state_hash: Digest, + ) -> Result<(), ProtocolUpgradeError> { + debug!("migrate system account"); + let mut address_generator = AddressGenerator::new(pre_state_hash.as_ref(), Phase::System); + + let account_hash = PublicKey::System.to_account_hash(); + + let main_purse = { + let purse_addr = address_generator.new_hash_address(); + let balance_cl_value = CLValue::from_t(U512::zero()) + .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?; + + self.tracking_copy.write( + Key::Balance(purse_addr), + StoredValue::CLValue(balance_cl_value), + ); + + let purse_cl_value = CLValue::unit(); + let purse_uref = URef::new(purse_addr, AccessRights::READ_ADD_WRITE); + + self.tracking_copy + .write(Key::URef(purse_uref), StoredValue::CLValue(purse_cl_value)); + purse_uref + }; + + let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); + let byte_code_hash = ByteCodeHash::default(); + let entity_hash = AddressableEntityHash::new(PublicKey::System.to_account_hash().value()); + let package_hash = PackageHash::new(address_generator.new_hash_address()); + + let byte_code = ByteCode::new(ByteCodeKind::Empty, vec![]); + + let system_account_entity = AddressableEntity::new( + package_hash, + byte_code_hash, + self.config.new_protocol_version(), + main_purse, + associated_keys, + ActionThresholds::default(), + EntityKind::Account(account_hash), + ); + + let package = { + let mut package = Package::new( + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::default(), + ); + package.insert_entity_version( + self.config.new_protocol_version().value().major, + EntityAddr::Account(entity_hash.value()), + ); + package + }; + + let byte_code_key = Key::ByteCode(ByteCodeAddr::Empty); + self.tracking_copy + .write(byte_code_key, StoredValue::ByteCode(byte_code)); + + let entity_key = system_account_entity.entity_key(entity_hash); + + self.tracking_copy.write( + entity_key, + StoredValue::AddressableEntity(system_account_entity), + ); + + self.tracking_copy + .write(package_hash.into(), StoredValue::SmartContract(package)); + + let contract_by_account = CLValue::from_t(entity_key) + .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?; + + self.tracking_copy.write( + Key::Account(account_hash), + StoredValue::CLValue(contract_by_account), + ); + + Ok(()) + } + + /// Creates an accumulation purse in the handle payment system contract if its not present. + /// + /// This can happen on older networks that did not have support for [`FeeHandling::Accumulate`] + /// at the genesis. In such cases we have to check the state of handle payment contract and + /// create an accumulation purse. + pub fn create_accumulation_purse_if_required( + &mut self, + handle_payment_hash: &HashAddr, + fee_handling: FeeHandling, + ) -> Result<(), ProtocolUpgradeError> { + debug!(?fee_handling, "create accumulation purse if required"); + match fee_handling { + FeeHandling::PayToProposer | FeeHandling::Burn => return Ok(()), + FeeHandling::Accumulate | FeeHandling::NoFee => {} + } + let mut address_generator = { + let seed_bytes = ( + self.config.current_protocol_version(), + self.config.new_protocol_version(), + ) + .to_bytes()?; + let phase = Phase::System; + AddressGenerator::new(&seed_bytes, phase) + }; + let system_contract = SystemEntityType::HandlePayment; + + let (addressable_entity, maybe_named_keys, _) = + self.retrieve_system_entity(*handle_payment_hash, system_contract)?; + + let entity_addr = EntityAddr::new_system(*handle_payment_hash); + + if let Some(named_keys) = maybe_named_keys { + for (string, key) in named_keys.into_inner().into_iter() { + let entry_addr = NamedKeyAddr::new_from_string(entity_addr, string.clone()) + .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?; + + let named_key_value = NamedKeyValue::from_concrete_values(key, string) + .map_err(|error| ProtocolUpgradeError::CLValue(error.to_string()))?; + + let entry_key = Key::NamedKey(entry_addr); + + self.tracking_copy + .write(entry_key, StoredValue::NamedKey(named_key_value)); + } + } + + let named_key_addr = + NamedKeyAddr::new_from_string(entity_addr, ACCUMULATION_PURSE_KEY.to_string()) + .map_err(|err| ProtocolUpgradeError::Bytesrepr(err.to_string()))?; + + let requries_accumulation_purse = self + .tracking_copy + .read(&Key::NamedKey(named_key_addr)) + .map_err(|_| ProtocolUpgradeError::UnexpectedStoredValueVariant)? + .is_none(); + + if requries_accumulation_purse { + let purse_uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE); + let balance_clvalue = CLValue::from_t(U512::zero())?; + self.tracking_copy.write( + Key::Balance(purse_uref.addr()), + StoredValue::CLValue(balance_clvalue), + ); + + let purse_key = Key::URef(purse_uref); + + self.tracking_copy + .write(purse_key, StoredValue::CLValue(CLValue::unit())); + + let purse = + NamedKeyValue::from_concrete_values(purse_key, ACCUMULATION_PURSE_KEY.to_string()) + .map_err(|cl_error| ProtocolUpgradeError::CLValue(cl_error.to_string()))?; + + self.tracking_copy + .write(Key::NamedKey(named_key_addr), StoredValue::NamedKey(purse)); + + let entity_key = Key::AddressableEntity(EntityAddr::System(*handle_payment_hash)); + + self.tracking_copy.write( + entity_key, + StoredValue::AddressableEntity(addressable_entity), + ); + } + + Ok(()) + } + + /// Creates an accumulation purse in the handle payment system contract if its not present. + /// + /// This can happen on older networks that did not have support for [`FeeHandling::Accumulate`] + /// at the genesis. In such cases we have to check the state of handle payment contract and + /// create an accumulation purse. + pub fn create_accumulation_purse_if_required_by_contract( + &mut self, + handle_payment_hash: &HashAddr, + fee_handling: FeeHandling, + ) -> Result<(), ProtocolUpgradeError> { + match fee_handling { + FeeHandling::PayToProposer | FeeHandling::Burn => return Ok(()), + FeeHandling::Accumulate | FeeHandling::NoFee => {} + } + + let mut address_generator = { + let seed_bytes = ( + self.config.current_protocol_version(), + self.config.new_protocol_version(), + ) + .to_bytes()?; + + let phase = Phase::System; + + AddressGenerator::new(&seed_bytes, phase) + }; + + let system_contract = SystemEntityType::HandlePayment; + let contract_name = system_contract.entity_name(); + let mut contract = if let StoredValue::Contract(contract) = self + .tracking_copy + .read(&Key::Hash(*handle_payment_hash)) + .map_err(|_| { + ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string()) + })? + .ok_or_else(|| { + ProtocolUpgradeError::UnableToRetrieveSystemContract(contract_name.to_string()) + })? { + contract + } else { + return Err(ProtocolUpgradeError::UnableToRetrieveSystemContract( + contract_name, + )); + }; + + if !contract.named_keys().contains(ACCUMULATION_PURSE_KEY) { + let purse_uref = address_generator.new_uref(AccessRights::READ_ADD_WRITE); + let balance_clvalue = CLValue::from_t(U512::zero())?; + self.tracking_copy.write( + Key::Balance(purse_uref.addr()), + StoredValue::CLValue(balance_clvalue), + ); + self.tracking_copy + .write(Key::URef(purse_uref), StoredValue::CLValue(CLValue::unit())); + + let mut new_named_keys = NamedKeys::new(); + new_named_keys.insert(ACCUMULATION_PURSE_KEY.into(), Key::from(purse_uref)); + contract.named_keys_append(new_named_keys); + + self.tracking_copy.write( + Key::Hash(*handle_payment_hash), + StoredValue::Contract(contract), + ); + } + + Ok(()) + } + + fn get_named_keys( + &mut self, + contract_hash: HashAddr, + ) -> Result { + if self.config.enable_addressable_entity() { + let named_keys = self + .tracking_copy + .get_named_keys(EntityAddr::System(contract_hash))?; + Ok(named_keys) + } else { + let named_keys = self + .tracking_copy + .read(&Key::Hash(contract_hash))? + .ok_or_else(|| { + ProtocolUpgradeError::UnableToRetrieveSystemContract(format!( + "{:?}", + contract_hash + )) + })? + .as_contract() + .map(|contract| contract.named_keys().clone()) + .ok_or(ProtocolUpgradeError::UnexpectedStoredValueVariant)?; + + Ok(named_keys) + } + } + + /// Check payment purse balance. + pub fn handle_payment_purse_check( + &mut self, + handle_payment: HashAddr, + mint: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + let payment_named_keys = self.get_named_keys(handle_payment)?; + let payment_purse_key = payment_named_keys + .get(PAYMENT_PURSE_KEY) + .expect("payment purse key must exist in handle payment contract's named keys"); + let balance = self + .tracking_copy + .get_total_balance(*payment_purse_key) + .expect("must be able to get payment purse balance"); + if balance <= Motes::zero() { + return Ok(()); + } + warn!("payment purse had remaining balance at upgrade {}", balance); + let balance_key = { + let uref_addr = payment_purse_key + .as_uref() + .expect("payment purse key must be uref.") + .addr(); + Key::Balance(uref_addr) + }; + + let mint_named_keys = self.get_named_keys(mint)?; + let total_supply_key = mint_named_keys + .get(TOTAL_SUPPLY_KEY) + .expect("total supply key must exist in mint contract's named keys"); + + let stored_value = self + .tracking_copy + .read(total_supply_key) + .expect("must be able to read total supply") + .expect("total supply must have a value"); + + // by convention, we only store CLValues under Key::URef + if let StoredValue::CLValue(value) = stored_value { + // Only CLTyped instances should be stored as a CLValue. + let total_supply: U512 = + CLValue::into_t(value).expect("total supply must have expected type."); + + let new_total_supply = total_supply.saturating_sub(balance.value()); + info!( + "adjusting total supply from {} to {}", + total_supply, new_total_supply + ); + let cl_value = CLValue::from_t(new_total_supply) + .expect("new total supply must convert to CLValue."); + self.tracking_copy + .write(*total_supply_key, StoredValue::CLValue(cl_value)); + info!( + "adjusting payment purse balance from {} to {}", + balance.value(), + U512::zero() + ); + let cl_value = CLValue::from_t(U512::zero()).expect("zero must convert to CLValue."); + self.tracking_copy + .write(balance_key, StoredValue::CLValue(cl_value)); + Ok(()) + } else { + Err(ProtocolUpgradeError::CLValue( + "failure to retrieve total supply".to_string(), + )) + } + } + + /// Upsert gas hold interval to mint named keys. + pub fn handle_new_gas_hold_config( + &mut self, + mint: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + if self.config.new_gas_hold_handling().is_none() + && self.config.new_gas_hold_interval().is_none() + { + return Ok(()); + } + + let mint_addr = EntityAddr::System(mint); + let named_keys = self.get_named_keys(mint)?; + + if let Some(new_gas_hold_handling) = self.config.new_gas_hold_handling() { + debug!(%new_gas_hold_handling, "handle new gas hold handling"); + let stored_value = + StoredValue::CLValue(CLValue::from_t(new_gas_hold_handling.tag()).map_err( + |_| ProtocolUpgradeError::Bytesrepr("new_gas_hold_handling".to_string()), + )?); + + self.system_uref( + mint_addr, + MINT_GAS_HOLD_HANDLING_KEY, + &named_keys, + stored_value, + )?; + } + + if let Some(new_gas_hold_interval) = self.config.new_gas_hold_interval() { + debug!(%new_gas_hold_interval, "handle new gas hold interval"); + let stored_value = + StoredValue::CLValue(CLValue::from_t(new_gas_hold_interval).map_err(|_| { + ProtocolUpgradeError::Bytesrepr("new_gas_hold_interval".to_string()) + })?); + + self.system_uref( + mint_addr, + MINT_GAS_HOLD_INTERVAL_KEY, + &named_keys, + stored_value, + )?; + } + Ok(()) + } + + fn system_uref( + &mut self, + entity_addr: EntityAddr, + name: &str, + named_keys: &NamedKeys, + stored_value: StoredValue, + ) -> Result<(), ProtocolUpgradeError> { + let uref = { + match named_keys.get(name) { + Some(key) => match key.as_uref() { + Some(uref) => *uref, + None => { + return Err(ProtocolUpgradeError::UnexpectedKeyVariant); + } + }, + None => self + .address_generator + .borrow_mut() + .new_uref(AccessRights::READ_ADD_WRITE), + } + }; + self.tracking_copy + .upsert_uref_to_named_keys(entity_addr, name, named_keys, uref, stored_value) + .map_err(ProtocolUpgradeError::TrackingCopy) + } + + /// Handle new validator slots. + pub fn handle_new_validator_slots( + &mut self, + auction: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + if let Some(new_validator_slots) = self.config.new_validator_slots() { + debug!(%new_validator_slots, "handle new validator slots"); + // if new total validator slots is provided, update auction contract state + let auction_named_keys = self.get_named_keys(auction)?; + + let validator_slots_key = auction_named_keys + .get(VALIDATOR_SLOTS_KEY) + .expect("validator_slots key must exist in auction contract's named keys"); + let value = + StoredValue::CLValue(CLValue::from_t(new_validator_slots).map_err(|_| { + ProtocolUpgradeError::Bytesrepr("new_validator_slots".to_string()) + })?); + self.tracking_copy.write(*validator_slots_key, value); + } + Ok(()) + } + + /// Applies the necessary changes if a new auction delay is part of the upgrade. + pub fn handle_new_auction_delay( + &mut self, + auction: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + if let Some(new_auction_delay) = self.config.new_auction_delay() { + debug!(%new_auction_delay, "handle new auction delay"); + let auction_named_keys = self.get_named_keys(auction)?; + + let auction_delay_key = auction_named_keys + .get(AUCTION_DELAY_KEY) + .expect("auction_delay key must exist in auction contract's named keys"); + let value = + StoredValue::CLValue(CLValue::from_t(new_auction_delay).map_err(|_| { + ProtocolUpgradeError::Bytesrepr("new_auction_delay".to_string()) + })?); + self.tracking_copy.write(*auction_delay_key, value); + } + Ok(()) + } + + /// Applies the necessary changes if a new locked funds period is part of the upgrade. + pub fn handle_new_locked_funds_period_millis( + &mut self, + auction: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + if let Some(new_locked_funds_period) = self.config.new_locked_funds_period_millis() { + debug!(%new_locked_funds_period,"handle new locked funds period millis"); + + let auction_named_keys = self.get_named_keys(auction)?; + + let locked_funds_period_key = auction_named_keys + .get(LOCKED_FUNDS_PERIOD_KEY) + .expect("locked_funds_period key must exist in auction contract's named keys"); + let value = + StoredValue::CLValue(CLValue::from_t(new_locked_funds_period).map_err(|_| { + ProtocolUpgradeError::Bytesrepr("new_locked_funds_period".to_string()) + })?); + self.tracking_copy.write(*locked_funds_period_key, value); + } + Ok(()) + } + + /// Applies the necessary changes if a new unbonding delay is part of the upgrade. + pub fn handle_new_unbonding_delay( + &mut self, + auction: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + // We insert the new unbonding delay once the purses to be paid out have been transformed + // based on the previous unbonding delay. + if let Some(new_unbonding_delay) = self.config.new_unbonding_delay() { + debug!(%new_unbonding_delay,"handle new unbonding delay"); + + let auction_named_keys = self.get_named_keys(auction)?; + + let unbonding_delay_key = auction_named_keys + .get(UNBONDING_DELAY_KEY) + .expect("unbonding_delay key must exist in auction contract's named keys"); + let value = + StoredValue::CLValue(CLValue::from_t(new_unbonding_delay).map_err(|_| { + ProtocolUpgradeError::Bytesrepr("new_unbonding_delay".to_string()) + })?); + self.tracking_copy.write(*unbonding_delay_key, value); + } + Ok(()) + } + + /// Applies the necessary changes if a new round seigniorage rate is part of the upgrade. + pub fn handle_new_round_seigniorage_rate( + &mut self, + mint: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + if let Some(new_round_seigniorage_rate) = self.config.new_round_seigniorage_rate() { + debug!(%new_round_seigniorage_rate,"handle new round seigniorage rate"); + let new_round_seigniorage_rate: Ratio = { + let (numer, denom) = new_round_seigniorage_rate.into(); + Ratio::new(numer.into(), denom.into()) + }; + + let mint_named_keys = self.get_named_keys(mint)?; + + let locked_funds_period_key = mint_named_keys + .get(ROUND_SEIGNIORAGE_RATE_KEY) + .expect("round_seigniorage_rate key must exist in mint contract's named keys"); + let value = StoredValue::CLValue(CLValue::from_t(new_round_seigniorage_rate).map_err( + |_| ProtocolUpgradeError::Bytesrepr("new_round_seigniorage_rate".to_string()), + )?); + self.tracking_copy.write(*locked_funds_period_key, value); + } + Ok(()) + } + + /// Handle unbonds migration. + pub fn handle_unbonds_migration(&mut self) -> Result<(), ProtocolUpgradeError> { + debug!("handle unbonds migration"); + let tc = &mut self.tracking_copy; + let existing_keys = match tc.get_keys(&KeyTag::Unbond) { + Ok(keys) => keys, + Err(err) => return Err(ProtocolUpgradeError::TrackingCopy(err)), + }; + for key in existing_keys { + if let Some(StoredValue::Unbonding(unbonding_purses)) = + tc.get(&key).map_err(Into::::into)? + { + // prune away the original record, we don't need it anymore + tc.prune(key); + + // re-write records under Key::BidAddr , StoredValue::BidKind + for unbonding_purse in unbonding_purses { + let validator = unbonding_purse.validator_public_key(); + let unbonder = unbonding_purse.unbonder_public_key(); + let new_key = Key::BidAddr(BidAddr::UnbondAccount { + validator: validator.to_account_hash(), + unbonder: unbonder.to_account_hash(), + }); + let unbond = Box::new(Unbond::from(unbonding_purse)); + let unbond_bid_kind = BidKind::Unbond(unbond.clone()); + if !unbond.eras().is_empty() { + tc.write(new_key, StoredValue::BidKind(unbond_bid_kind)); + } + } + } + } + + Ok(()) + } + + /// Handle bids migration. + pub fn handle_bids_migration( + &mut self, + validator_minimum: u64, + delegation_minimum: u64, + delegation_maximum: u64, + ) -> Result<(), ProtocolUpgradeError> { + if delegation_maximum < delegation_minimum { + return Err(ProtocolUpgradeError::InvalidUpgradeConfig); + } + debug!("handle bids migration"); + let tc = &mut self.tracking_copy; + let existing_bid_keys = match tc.get_keys(&KeyTag::Bid) { + Ok(keys) => keys, + Err(err) => return Err(ProtocolUpgradeError::TrackingCopy(err)), + }; + for key in existing_bid_keys { + if let Some(StoredValue::Bid(existing_bid)) = + tc.get(&key).map_err(Into::::into)? + { + // prune away the original record, we don't need it anymore + tc.prune(key); + + if existing_bid.staked_amount().is_zero() { + // the previous logic enforces unbonding all delegators of + // a validator that reduced their personal stake to 0 (and we have + // various existent tests that prove this), thus there is no need + // to handle the complicated hypothetical case of one or more + // delegator stakes being > 0 if the validator stake is 0. + // + // tl;dr this is a "zombie" bid and we don't need to continue + // carrying it forward at tip. + continue; + } + + let validator_public_key = existing_bid.validator_public_key(); + let validator_bid_addr = BidAddr::from(validator_public_key.clone()); + let validator_bid = { + let validator_bid = ValidatorBid::from(*existing_bid.clone()); + let inactive = validator_bid.staked_amount() < U512::from(validator_minimum); + validator_bid + .with_inactive(inactive) + .with_min_max_delegation_amount(delegation_maximum, delegation_minimum) + }; + tc.write( + validator_bid_addr.into(), + StoredValue::BidKind(BidKind::Validator(Box::new(validator_bid))), + ); + + let delegators = existing_bid.delegators().clone(); + for (_, delegator) in delegators { + let delegator_bid_addr = BidAddr::new_delegator_kind( + validator_public_key, + &DelegatorKind::PublicKey(delegator.delegator_public_key().clone()), + ); + // the previous code was removing a delegator bid from the embedded + // collection within their validator's bid when the delegator fully + // unstaked, so technically we don't need to check for 0 balance here. + // However, since it is low effort to check, doing it just to be sure. + if !delegator.staked_amount().is_zero() { + tc.write( + delegator_bid_addr.into(), + StoredValue::BidKind(BidKind::Delegator(Box::new(DelegatorBid::from( + delegator, + )))), + ); + } + } + } + } + + let validator_bid_keys = tc + .get_by_byte_prefix(&[KeyTag::BidAddr as u8, BidAddrTag::Validator as u8]) + .map_err(|_| ProtocolUpgradeError::UnexpectedKeyVariant)?; + for validator_bid_key in validator_bid_keys { + if let Some(StoredValue::BidKind(BidKind::Validator(validator_bid))) = tc + .get(&validator_bid_key) + .map_err(Into::::into)? + { + let is_bid_inactive = validator_bid.inactive(); + let has_less_than_validator_minimum = + validator_bid.staked_amount() < U512::from(validator_minimum); + if !is_bid_inactive && has_less_than_validator_minimum { + let inactive_bid = validator_bid.with_inactive(true); + info!("marking bid inactive {validator_bid_key}"); + tc.write( + validator_bid_key, + StoredValue::BidKind(BidKind::Validator(Box::new(inactive_bid))), + ); + } + } + } + + Ok(()) + } + + /// Handle era info migration. + pub fn handle_era_info_migration(&mut self) -> Result<(), ProtocolUpgradeError> { + // EraInfo migration + if let Some(activation_point) = self.config.activation_point() { + // The highest stored era is the immediate predecessor of the activation point. + let highest_era_info_id = activation_point.saturating_sub(1); + let highest_era_info_key = Key::EraInfo(highest_era_info_id); + + let get_result = self + .tracking_copy + .get(&highest_era_info_key) + .map_err(ProtocolUpgradeError::TrackingCopy)?; + + match get_result { + Some(stored_value @ StoredValue::EraInfo(_)) => { + self.tracking_copy.write(Key::EraSummary, stored_value); + } + Some(other_stored_value) => { + // This should not happen as we only write EraInfo variants. + error!(stored_value_type_name=%other_stored_value.type_name(), + "EraInfo key contains unexpected StoredValue variant"); + return Err(ProtocolUpgradeError::UnexpectedStoredValueVariant); + } + None => { + // Can't find key + // Most likely this chain did not yet run an auction, or recently completed a + // prune + } + }; + } + Ok(()) + } + + /// Handle seignorage snapshot migration to new version. + pub fn handle_seignorage_snapshot_migration( + &mut self, + auction: HashAddr, + ) -> Result<(), ProtocolUpgradeError> { + let auction_named_keys = self.get_named_keys(auction)?; + let maybe_snapshot_version_key = + auction_named_keys.get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY); + let snapshot_key = auction_named_keys + .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) + .expect("snapshot key should already exist"); + + // if version flag does not exist yet, set it and migrate snapshot + if maybe_snapshot_version_key.is_none() { + let auction_addr = EntityAddr::new_system(auction); + + // add new snapshot version named key + let stored_value = StoredValue::CLValue(CLValue::from_t( + DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION, + )?); + self.system_uref( + auction_addr, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY, + &auction_named_keys, + stored_value, + )?; + + // read legacy snapshot + if let Some(snapshot_stored_value) = self.tracking_copy.read(snapshot_key)? { + let snapshot_cl_value = match snapshot_stored_value.into_cl_value() { + Some(cl_value) => cl_value, + None => { + error!("seigniorage recipients snapshot is not a CLValue"); + return Err(ProtocolUpgradeError::CLValue( + "seigniorage recipients snapshot is not a CLValue".to_string(), + )); + } + }; + + let legacy_snapshot: SeigniorageRecipientsSnapshotV1 = + snapshot_cl_value.into_t()?; + + let mut new_snapshot = SeigniorageRecipientsSnapshotV2::default(); + for (era_id, recipients) in legacy_snapshot.into_iter() { + let mut new_recipients = SeigniorageRecipientsV2::default(); + for (pubkey, recipient) in recipients { + new_recipients.insert(pubkey, recipient.into()); + } + new_snapshot.insert(era_id, new_recipients); + } + + // store new snapshot + self.tracking_copy.write( + *snapshot_key, + StoredValue::CLValue(CLValue::from_t(new_snapshot)?), + ); + }; + } + + Ok(()) + } + + /// Handle global state updates. + pub fn handle_global_state_updates(&mut self) { + debug!("handle global state updates"); + for (key, value) in self.config.global_state_update() { + self.tracking_copy.write(*key, value.clone()); + } + } +} diff --git a/storage/src/system/runtime_native.rs b/storage/src/system/runtime_native.rs new file mode 100644 index 0000000000..49e6bcc179 --- /dev/null +++ b/storage/src/system/runtime_native.rs @@ -0,0 +1,581 @@ +use crate::{ + global_state::{error::Error as GlobalStateReader, state::StateReader}, + tracking_copy::{TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt}, + AddressGenerator, TrackingCopy, +}; +use casper_types::{ + account::AccountHash, contracts::NamedKeys, Chainspec, ContextAccessRights, EntityAddr, + FeeHandling, Key, Phase, ProtocolVersion, PublicKey, RefundHandling, RuntimeFootprint, + StoredValue, TransactionHash, Transfer, URef, U512, +}; +use num_rational::Ratio; +use parking_lot::RwLock; +use std::{cell::RefCell, collections::BTreeSet, rc::Rc, sync::Arc}; +use tracing::error; + +/// Configuration settings. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub struct Config { + transfer_config: TransferConfig, + fee_handling: FeeHandling, + refund_handling: RefundHandling, + vesting_schedule_period_millis: u64, + allow_auction_bids: bool, + compute_rewards: bool, + max_delegators_per_validator: u32, + minimum_bid_amount: u64, + minimum_delegation_amount: u64, + balance_hold_interval: u64, + include_credits: bool, + credit_cap: Ratio, + enable_addressable_entity: bool, + native_transfer_cost: u32, +} + +impl Config { + /// Ctor. + #[allow(clippy::too_many_arguments)] + pub const fn new( + transfer_config: TransferConfig, + fee_handling: FeeHandling, + refund_handling: RefundHandling, + vesting_schedule_period_millis: u64, + allow_auction_bids: bool, + compute_rewards: bool, + max_delegators_per_validator: u32, + minimum_bid_amount: u64, + minimum_delegation_amount: u64, + balance_hold_interval: u64, + include_credits: bool, + credit_cap: Ratio, + enable_addressable_entity: bool, + native_transfer_cost: u32, + ) -> Self { + Config { + transfer_config, + fee_handling, + refund_handling, + vesting_schedule_period_millis, + allow_auction_bids, + compute_rewards, + max_delegators_per_validator, + minimum_bid_amount, + minimum_delegation_amount, + balance_hold_interval, + include_credits, + credit_cap, + enable_addressable_entity, + native_transfer_cost, + } + } + + /// Ctor from chainspec. + pub fn from_chainspec(chainspec: &Chainspec) -> Self { + let transfer_config = TransferConfig::from_chainspec(chainspec); + let fee_handling = chainspec.core_config.fee_handling; + let refund_handling = chainspec.core_config.refund_handling; + let vesting_schedule_period_millis = chainspec.core_config.vesting_schedule_period.millis(); + let allow_auction_bids = chainspec.core_config.allow_auction_bids; + let compute_rewards = chainspec.core_config.compute_rewards; + let max_delegators_per_validator = chainspec.core_config.max_delegators_per_validator; + let minimum_bid_amount = chainspec.core_config.minimum_bid_amount; + let minimum_delegation_amount = chainspec.core_config.minimum_delegation_amount; + let balance_hold_interval = chainspec.core_config.gas_hold_interval.millis(); + let include_credits = chainspec.core_config.fee_handling == FeeHandling::NoFee; + let credit_cap = Ratio::new_raw( + U512::from(*chainspec.core_config.validator_credit_cap.numer()), + U512::from(*chainspec.core_config.validator_credit_cap.denom()), + ); + let enable_addressable_entity = chainspec.core_config.enable_addressable_entity; + let native_transfer_cost = chainspec.system_costs_config.mint_costs().transfer; + Config::new( + transfer_config, + fee_handling, + refund_handling, + vesting_schedule_period_millis, + allow_auction_bids, + compute_rewards, + max_delegators_per_validator, + minimum_bid_amount, + minimum_delegation_amount, + balance_hold_interval, + include_credits, + credit_cap, + enable_addressable_entity, + native_transfer_cost, + ) + } + + /// Returns transfer config. + pub fn transfer_config(&self) -> &TransferConfig { + &self.transfer_config + } + + /// Returns fee handling setting. + pub fn fee_handling(&self) -> &FeeHandling { + &self.fee_handling + } + + /// Returns refund handling setting. + pub fn refund_handling(&self) -> &RefundHandling { + &self.refund_handling + } + + /// Returns vesting schedule period millis setting. + pub fn vesting_schedule_period_millis(&self) -> u64 { + self.vesting_schedule_period_millis + } + + /// Returns if auction bids are allowed. + pub fn allow_auction_bids(&self) -> bool { + self.allow_auction_bids + } + + /// Returns if rewards should be computed. + pub fn compute_rewards(&self) -> bool { + self.compute_rewards + } + + /// Returns max delegators per validator setting. + pub fn max_delegators_per_validator(&self) -> u32 { + self.max_delegators_per_validator + } + + /// Returns minimum bid amount setting. + pub fn minimum_bid_amount(&self) -> u64 { + self.minimum_bid_amount + } + + /// Returns minimum delegation amount setting. + pub fn minimum_delegation_amount(&self) -> u64 { + self.minimum_delegation_amount + } + + /// Returns balance hold interval setting. + pub fn balance_hold_interval(&self) -> u64 { + self.balance_hold_interval + } + + /// Returns include credit setting. + pub fn include_credits(&self) -> bool { + self.include_credits + } + + /// Returns validator credit cap setting. + pub fn credit_cap(&self) -> Ratio { + self.credit_cap + } + + /// Enable the addressable entity and migrate accounts/contracts to entities. + pub fn enable_addressable_entity(&self) -> bool { + self.enable_addressable_entity + } + + /// Changes the transfer config. + pub fn set_transfer_config(self, transfer_config: TransferConfig) -> Self { + Config { + transfer_config, + fee_handling: self.fee_handling, + refund_handling: self.refund_handling, + vesting_schedule_period_millis: self.vesting_schedule_period_millis, + max_delegators_per_validator: self.max_delegators_per_validator, + allow_auction_bids: self.allow_auction_bids, + minimum_bid_amount: self.minimum_bid_amount, + minimum_delegation_amount: self.minimum_delegation_amount, + compute_rewards: self.compute_rewards, + balance_hold_interval: self.balance_hold_interval, + include_credits: self.include_credits, + credit_cap: self.credit_cap, + enable_addressable_entity: self.enable_addressable_entity, + native_transfer_cost: self.native_transfer_cost, + } + } +} + +/// Configuration for transfer. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub enum TransferConfig { + /// Transfers are affected by the existence of administrative_accounts. This is a + /// behavior specific to private or managed chains, not a public chain. + Administered { + /// Retrusn the set of account hashes for all administrators. + administrative_accounts: BTreeSet, + /// If true, transfers are unrestricted. + /// If false, the source and / or target of a transfer must be an administrative account. + allow_unrestricted_transfers: bool, + }, + /// Transfers are not affected by the existence of administrative_accounts (the standard + /// behavior). + #[default] + Unadministered, +} + +impl TransferConfig { + /// Returns a new instance. + pub fn new( + administrative_accounts: BTreeSet, + allow_unrestricted_transfers: bool, + ) -> Self { + if administrative_accounts.is_empty() && allow_unrestricted_transfers { + TransferConfig::Unadministered + } else { + TransferConfig::Administered { + administrative_accounts, + allow_unrestricted_transfers, + } + } + } + + /// New instance from chainspec. + pub fn from_chainspec(chainspec: &Chainspec) -> Self { + let administrative_accounts: BTreeSet = chainspec + .core_config + .administrators + .iter() + .map(|x| x.to_account_hash()) + .collect(); + let allow_unrestricted_transfers = chainspec.core_config.allow_unrestricted_transfers; + if administrative_accounts.is_empty() && allow_unrestricted_transfers { + TransferConfig::Unadministered + } else { + TransferConfig::Administered { + administrative_accounts, + allow_unrestricted_transfers, + } + } + } + + /// Does account hash belong to an administrative account? + pub fn is_administrator(&self, account_hash: &AccountHash) -> bool { + match self { + TransferConfig::Administered { + administrative_accounts, + .. + } => administrative_accounts.contains(account_hash), + TransferConfig::Unadministered => false, + } + } + + /// Administrative accounts, if any. + pub fn administrative_accounts(&self) -> BTreeSet { + match self { + TransferConfig::Administered { + administrative_accounts, + .. + } => administrative_accounts.clone(), + TransferConfig::Unadministered => BTreeSet::default(), + } + } + + /// Allow unrestricted transfers. + pub fn allow_unrestricted_transfers(&self) -> bool { + match self { + TransferConfig::Administered { + allow_unrestricted_transfers, + .. + } => *allow_unrestricted_transfers, + TransferConfig::Unadministered => true, + } + } + + /// Restricted transfer should be enforced. + pub fn enforce_transfer_restrictions(&self, account_hash: &AccountHash) -> bool { + !self.allow_unrestricted_transfers() && !self.is_administrator(account_hash) + } +} + +/// Id for runtime processing. +pub enum Id { + /// Hash of current transaction. + Transaction(TransactionHash), + /// An arbitrary set of bytes to be used as a seed value. + Seed(Vec), +} + +impl Id { + /// Ctor for id enum. + pub fn seed(&self) -> Vec { + match self { + Id::Transaction(hash) => hash.digest().into_vec(), + Id::Seed(bytes) => bytes.clone(), + } + } +} + +/// State held by an instance of runtime native. +pub struct RuntimeNative { + config: Config, + + id: Id, + address_generator: Arc>, + protocol_version: ProtocolVersion, + + tracking_copy: Rc>>, + address: AccountHash, + context_key: Key, + runtime_footprint: RuntimeFootprint, + access_rights: ContextAccessRights, + remaining_spending_limit: U512, + transfers: Vec, + phase: Phase, +} + +impl RuntimeNative +where + S: StateReader, +{ + /// Ctor. + #[allow(clippy::too_many_arguments)] + pub fn new( + config: Config, + protocol_version: ProtocolVersion, + id: Id, + address_generator: Arc>, + tracking_copy: Rc>>, + address: AccountHash, + context_key: Key, + runtime_footprint: RuntimeFootprint, + access_rights: ContextAccessRights, + remaining_spending_limit: U512, + phase: Phase, + ) -> Self { + let transfers = vec![]; + RuntimeNative { + config, + + id, + address_generator, + protocol_version, + + tracking_copy, + address, + context_key, + runtime_footprint, + access_rights, + remaining_spending_limit, + transfers, + phase, + } + } + + /// Creates a runtime with elevated permissions for systemic behaviors. + pub fn new_system_runtime( + config: Config, + protocol_version: ProtocolVersion, + id: Id, + address_generator: Arc>, + tracking_copy: Rc>>, + phase: Phase, + ) -> Result { + let transfers = vec![]; + let (entity_addr, runtime_footprint, access_rights) = tracking_copy + .borrow_mut() + .system_entity_runtime_footprint(protocol_version)?; + let address = PublicKey::System.to_account_hash(); + let context_key = if config.enable_addressable_entity { + Key::AddressableEntity(entity_addr) + } else { + Key::Hash(entity_addr.value()) + }; + let remaining_spending_limit = U512::MAX; // system has no spending limit + Ok(RuntimeNative { + config, + id, + address_generator, + protocol_version, + + tracking_copy, + address, + context_key, + runtime_footprint, + access_rights, + remaining_spending_limit, + transfers, + phase, + }) + } + + /// Creates a runtime context for a system contract. + pub fn new_system_contract_runtime( + config: Config, + protocol_version: ProtocolVersion, + id: Id, + address_generator: Arc>, + tracking_copy: Rc>>, + phase: Phase, + name: &str, + ) -> Result { + let transfers = vec![]; + + let system_entity_registry = tracking_copy.borrow().get_system_entity_registry()?; + let hash = match system_entity_registry.get(name).copied() { + Some(hash) => hash, + None => { + error!("unexpected failure; system contract {} not found", name); + return Err(TrackingCopyError::MissingSystemContractHash( + name.to_string(), + )); + } + }; + let context_key = if config.enable_addressable_entity { + Key::AddressableEntity(EntityAddr::System(hash)) + } else { + Key::Hash(hash) + }; + let runtime_footprint = tracking_copy + .borrow_mut() + .runtime_footprint_by_hash_addr(hash)?; + let access_rights = runtime_footprint.extract_access_rights(hash); + let address = PublicKey::System.to_account_hash(); + let remaining_spending_limit = U512::MAX; // system has no spending limit + Ok(RuntimeNative { + config, + id, + address_generator, + protocol_version, + + tracking_copy, + address, + context_key, + runtime_footprint, + access_rights, + remaining_spending_limit, + transfers, + phase, + }) + } + + /// Returns mutable reference to address generator. + pub fn address_generator(&mut self) -> Arc> { + Arc::clone(&self.address_generator) + } + + /// Returns reference to config. + pub fn config(&self) -> &Config { + &self.config + } + + /// Returns reference to transfer config. + pub fn transfer_config(&self) -> &TransferConfig { + &self.config.transfer_config + } + + /// Returns protocol version. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns handle to tracking copy. + pub fn tracking_copy(&self) -> Rc>> { + Rc::clone(&self.tracking_copy) + } + + /// Returns account hash being used by this instance. + pub fn address(&self) -> AccountHash { + self.address + } + + /// Changes the account hash being used by this instance. + pub fn with_address(&mut self, account_hash: AccountHash) { + self.address = account_hash; + } + + /// Returns the context key being used by this instance. + pub fn context_key(&self) -> &Key { + &self.context_key + } + + /// Returns a reference to the runtime footprint used by this instance. + pub fn runtime_footprint(&self) -> &RuntimeFootprint { + &self.runtime_footprint + } + + /// Returns the addressable entity being used by this instance. + pub fn runtime_footprint_mut(&mut self) -> &mut RuntimeFootprint { + &mut self.runtime_footprint + } + + /// Changes the addressable entity being used by this instance. + pub fn with_addressable_entity(&mut self, runtime_footprint: RuntimeFootprint) { + self.runtime_footprint = runtime_footprint; + } + + /// Returns a reference to the named keys being used by this instance. + pub fn named_keys(&self) -> &NamedKeys { + self.runtime_footprint().named_keys() + } + + /// Returns a mutable reference to the named keys being used by this instance. + pub fn named_keys_mut(&mut self) -> &mut NamedKeys { + self.runtime_footprint.named_keys_mut() + } + + /// Returns a reference to the access rights being used by this instance. + pub fn access_rights(&self) -> &ContextAccessRights { + &self.access_rights + } + + /// Returns a mutable reference to the access rights being used by this instance. + pub fn access_rights_mut(&mut self) -> &mut ContextAccessRights { + &mut self.access_rights + } + + /// Extends the access rights being used by this instance. + pub fn extend_access_rights(&mut self, urefs: &[URef]) { + self.access_rights.extend(urefs) + } + + /// Returns the remaining spending limit. + pub fn remaining_spending_limit(&self) -> U512 { + self.remaining_spending_limit + } + + /// Set remaining spending limit. + pub fn set_remaining_spending_limit(&mut self, remaining: U512) { + self.remaining_spending_limit = remaining; + } + + /// Get references to transfers. + pub fn transfers(&self) -> &Vec { + &self.transfers + } + + /// Push transfer instance. + pub fn push_transfer(&mut self, transfer: Transfer) { + self.transfers.push(transfer); + } + + /// Get id. + pub fn id(&self) -> &Id { + &self.id + } + + /// Get phase. + pub fn phase(&self) -> Phase { + self.phase + } + + /// Vesting schedule period in milliseconds. + pub fn vesting_schedule_period_millis(&self) -> u64 { + self.config.vesting_schedule_period_millis + } + + /// Are auction bids allowed? + pub fn allow_auction_bids(&self) -> bool { + self.config.allow_auction_bids + } + + /// Are rewards computed? + pub fn compute_rewards(&self) -> bool { + self.config.compute_rewards + } + + /// Extracts transfer items. + pub fn into_transfers(self) -> Vec { + self.transfers + } + + pub(crate) fn native_transfer_cost(&self) -> u32 { + self.config.native_transfer_cost + } +} diff --git a/storage/src/system/standard_payment.rs b/storage/src/system/standard_payment.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/storage/src/system/standard_payment.rs @@ -0,0 +1 @@ + diff --git a/storage/src/system/standard_payment/account_provider.rs b/storage/src/system/standard_payment/account_provider.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/storage/src/system/standard_payment/handle_payment_provider.rs b/storage/src/system/standard_payment/handle_payment_provider.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/storage/src/system/standard_payment/mint_provider.rs b/storage/src/system/standard_payment/mint_provider.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/storage/src/system/transfer.rs b/storage/src/system/transfer.rs new file mode 100644 index 0000000000..ce2634e9c6 --- /dev/null +++ b/storage/src/system/transfer.rs @@ -0,0 +1,463 @@ +use std::{cell::RefCell, convert::TryFrom, rc::Rc}; +use thiserror::Error; + +use casper_types::{ + account::AccountHash, + bytesrepr::FromBytes, + system::{mint, mint::Error as MintError}, + AccessRights, CLType, CLTyped, CLValue, CLValueError, Key, ProtocolVersion, RuntimeArgs, + RuntimeFootprint, StoredValue, StoredValueTypeMismatch, URef, U512, +}; + +use crate::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyError, TrackingCopyExt}, +}; + +/// Transfer error. +#[derive(Clone, Error, Debug)] +pub enum TransferError { + /// Invalid key variant. + #[error("Invalid key {0}")] + UnexpectedKeyVariant(Key), + /// Type mismatch error. + #[error("{}", _0)] + TypeMismatch(StoredValueTypeMismatch), + /// Forged reference error. + #[error("Forged reference: {}", _0)] + ForgedReference(URef), + /// Invalid access. + #[error("Invalid access rights: {}", required)] + InvalidAccess { + /// Required access rights of the operation. + required: AccessRights, + }, + /// Error converting a CLValue. + #[error("{0}")] + CLValue(CLValueError), + /// Invalid purse. + #[error("Invalid purse")] + InvalidPurse, + /// Invalid argument. + #[error("Invalid argument")] + InvalidArgument, + /// Missing argument. + #[error("Missing argument")] + MissingArgument, + /// Invalid purse. + #[error("Attempt to transfer amount 0")] + AttemptToTransferZero, + /// Invalid operation. + #[error("Invalid operation")] + InvalidOperation, + /// Disallowed transfer attempt (private chain). + #[error("Either the source or the target must be an admin (private chain).")] + RestrictedTransferAttempted, + /// Could not determine if target is an admin (private chain). + #[error("Unable to determine if the target of a transfer is an admin")] + UnableToVerifyTargetIsAdmin, + /// Tracking copy error. + #[error("{0}")] + TrackingCopy(TrackingCopyError), + /// Mint error. + #[error("{0}")] + Mint(MintError), +} + +impl From for TransferError { + fn from(gse: GlobalStateError) -> Self { + TransferError::TrackingCopy(TrackingCopyError::Storage(gse)) + } +} + +impl From for TransferError { + fn from(tce: TrackingCopyError) -> Self { + TransferError::TrackingCopy(tce) + } +} + +/// A target mode indicates if a native transfer's arguments will resolve to an existing purse, or +/// will have to create a new account first. +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum TransferTargetMode { + /// Native transfer arguments resolved into a transfer to an existing account. + ExistingAccount { + /// Existing account hash. + target_account_hash: AccountHash, + /// Main purse of a resolved account. + main_purse: URef, + }, + /// Native transfer arguments resolved into a transfer to a purse. + PurseExists { + /// Target account hash (if known). + target_account_hash: Option, + /// Purse. + purse_uref: URef, + }, + /// Native transfer arguments resolved into a transfer to a new account. + CreateAccount(AccountHash), +} + +impl TransferTargetMode { + /// Target account hash, if any. + pub fn target_account_hash(&self) -> Option { + match self { + TransferTargetMode::PurseExists { + target_account_hash, + .. + } => *target_account_hash, + TransferTargetMode::ExistingAccount { + target_account_hash, + .. + } => Some(*target_account_hash), + TransferTargetMode::CreateAccount(target_account_hash) => Some(*target_account_hash), + } + } +} + +/// Mint's transfer arguments. +/// +/// A struct has a benefit of static typing, which is helpful while resolving the arguments. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct TransferArgs { + to: Option, + source: URef, + target: URef, + amount: U512, + arg_id: Option, +} + +impl TransferArgs { + /// Creates new transfer arguments. + pub fn new( + to: Option, + source: URef, + target: URef, + amount: U512, + arg_id: Option, + ) -> Self { + Self { + to, + source, + target, + amount, + arg_id, + } + } + + /// Returns `to` field. + pub fn to(&self) -> Option { + self.to + } + + /// Returns `source` field. + pub fn source(&self) -> URef { + self.source + } + + /// Returns `target` field. + pub fn target(&self) -> URef { + self.target + } + + /// Returns `amount` field. + pub fn amount(&self) -> U512 { + self.amount + } + + /// Returns `arg_id` field. + pub fn arg_id(&self) -> Option { + self.arg_id + } +} + +impl TryFrom for RuntimeArgs { + type Error = CLValueError; + + fn try_from(transfer_args: TransferArgs) -> Result { + let mut runtime_args = RuntimeArgs::new(); + + runtime_args.insert(mint::ARG_TO, transfer_args.to)?; + runtime_args.insert(mint::ARG_SOURCE, transfer_args.source)?; + runtime_args.insert(mint::ARG_TARGET, transfer_args.target)?; + runtime_args.insert(mint::ARG_AMOUNT, transfer_args.amount)?; + runtime_args.insert(mint::ARG_ID, transfer_args.arg_id)?; + + Ok(runtime_args) + } +} + +/// State of a builder of a `TransferArgs`. +/// +/// Purpose of this builder is to resolve native transfer args into [`TransferTargetMode`] and a +/// [`TransferArgs`] instance to execute actual token transfer on the mint contract. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TransferRuntimeArgsBuilder { + inner: RuntimeArgs, +} + +impl TransferRuntimeArgsBuilder { + /// Creates new transfer args builder. + /// + /// Takes an incoming runtime args that represents native transfer's arguments. + pub fn new(imputed_runtime_args: RuntimeArgs) -> TransferRuntimeArgsBuilder { + TransferRuntimeArgsBuilder { + inner: imputed_runtime_args, + } + } + + /// Checks if a purse exists. + fn purse_exists(&self, uref: URef, tracking_copy: Rc>>) -> bool + where + R: StateReader, + { + let key = match tracking_copy + .borrow_mut() + .get_purse_balance_key(uref.into()) + { + Ok(key) => key, + Err(_) => return false, + }; + tracking_copy + .borrow_mut() + .get_available_balance(key) + .is_ok() + } + + /// Resolves the source purse of the transfer. + /// + /// User can optionally pass a "source" argument which should refer to an [`URef`] existing in + /// user's named keys. When the "source" argument is missing then user's main purse is assumed. + /// + /// Returns resolved [`URef`]. + fn resolve_source_uref( + &self, + account: &RuntimeFootprint, + tracking_copy: Rc>>, + ) -> Result + where + R: StateReader, + { + let imputed_runtime_args = &self.inner; + let arg_name = mint::ARG_SOURCE; + let uref = match imputed_runtime_args.get(arg_name) { + Some(cl_value) if *cl_value.cl_type() == CLType::URef => { + self.map_cl_value::(cl_value)? + } + Some(cl_value) if *cl_value.cl_type() == CLType::Option(CLType::URef.into()) => { + let Some(uref): Option = self.map_cl_value(cl_value)? else { + return account.main_purse().ok_or(TransferError::InvalidOperation); + }; + uref + } + Some(_) => return Err(TransferError::InvalidArgument), + None => return account.main_purse().ok_or(TransferError::InvalidOperation), /* if no source purse passed use account + * main purse */ + }; + if account + .main_purse() + .ok_or(TransferError::InvalidOperation)? + .addr() + == uref.addr() + { + return Ok(uref); + } + + let normalized_uref = Key::URef(uref).normalize(); + let maybe_named_key = account + .named_keys() + .keys() + .find(|&named_key| named_key.normalize() == normalized_uref); + + match maybe_named_key { + Some(Key::URef(found_uref)) => { + if found_uref.is_writeable() { + // it is a URef and caller has access but is it a purse URef? + if !self.purse_exists(found_uref.to_owned(), tracking_copy) { + return Err(TransferError::InvalidPurse); + } + + Ok(uref) + } else { + Err(TransferError::InvalidAccess { + required: AccessRights::WRITE, + }) + } + } + Some(key) => Err(TransferError::TypeMismatch(StoredValueTypeMismatch::new( + "Key::URef".to_string(), + key.type_string(), + ))), + None => Err(TransferError::ForgedReference(uref)), + } + } + + /// Resolves a transfer target mode. + /// + /// User has to specify a "target" argument which must be one of the following types: + /// * an existing purse [`URef`] + /// * a 32-byte array, interpreted as an account hash + /// * a [`Key::Account`], from which the account hash is extracted + /// * a [`casper_types::PublicKey`], which is converted to an account hash + /// + /// If the "target" account hash is not existing, then a special variant is returned that + /// indicates that the system has to create new account first. + /// + /// Returns [`TransferTargetMode`] with a resolved variant. + pub fn resolve_transfer_target_mode( + &mut self, + protocol_version: ProtocolVersion, + tracking_copy: Rc>>, + ) -> Result + where + R: StateReader, + { + let imputed_runtime_args = &self.inner; + let to_name = mint::ARG_TO; + + let target_account_hash = match imputed_runtime_args.get(to_name) { + Some(cl_value) + if *cl_value.cl_type() == CLType::Option(Box::new(CLType::ByteArray(32))) => + { + let to: Option = self.map_cl_value(cl_value)?; + to + } + Some(_) | None => None, + }; + + let target_name = mint::ARG_TARGET; + let account_hash = match imputed_runtime_args.get(target_name) { + Some(cl_value) if *cl_value.cl_type() == CLType::URef => { + let purse_uref = self.map_cl_value(cl_value)?; + + if !self.purse_exists(purse_uref, tracking_copy) { + return Err(TransferError::InvalidPurse); + } + + return Ok(TransferTargetMode::PurseExists { + purse_uref, + target_account_hash, + }); + } + Some(cl_value) if *cl_value.cl_type() == CLType::ByteArray(32) => { + self.map_cl_value(cl_value)? + } + Some(cl_value) if *cl_value.cl_type() == CLType::Key => { + let account_key: Key = self.map_cl_value(cl_value)?; + let account_hash: AccountHash = account_key + .into_account() + .ok_or(TransferError::UnexpectedKeyVariant(account_key))?; + account_hash + } + Some(cl_value) if *cl_value.cl_type() == CLType::PublicKey => { + let public_key = self.map_cl_value(cl_value)?; + AccountHash::from(&public_key) + } + Some(_) => return Err(TransferError::InvalidArgument), + None => return Err(TransferError::MissingArgument), + }; + + match tracking_copy + .borrow_mut() + .runtime_footprint_by_account_hash(protocol_version, account_hash) + { + Ok((_, entity)) => { + let main_purse_addable = entity + .main_purse() + .ok_or(TransferError::InvalidPurse)? + .with_access_rights(AccessRights::ADD); + Ok(TransferTargetMode::ExistingAccount { + target_account_hash: account_hash, + main_purse: main_purse_addable, + }) + } + Err(_) => Ok(TransferTargetMode::CreateAccount(account_hash)), + } + } + + /// Resolves amount. + /// + /// User has to specify "amount" argument that could be either a [`U512`] or a u64. + fn resolve_amount(&self) -> Result { + let imputed_runtime_args = &self.inner; + + let amount = match imputed_runtime_args.get(mint::ARG_AMOUNT) { + Some(amount_value) if *amount_value.cl_type() == CLType::U512 => { + self.map_cl_value(amount_value)? + } + Some(amount_value) if *amount_value.cl_type() == CLType::U64 => { + let amount: u64 = self.map_cl_value(amount_value)?; + U512::from(amount) + } + Some(_) => return Err(TransferError::InvalidArgument), + None => return Err(TransferError::MissingArgument), + }; + + if amount.is_zero() { + return Err(TransferError::AttemptToTransferZero); + } + + Ok(amount) + } + + fn resolve_id(&self) -> Result, TransferError> { + let id: Option = if let Some(id_value) = self.inner.get(mint::ARG_ID) { + self.map_cl_value(id_value)? + } else { + None + }; + Ok(id) + } + + /// Creates new [`TransferArgs`] instance. + pub fn build( + mut self, + from: &RuntimeFootprint, + protocol_version: ProtocolVersion, + tracking_copy: Rc>>, + ) -> Result + where + R: StateReader, + { + let (to, target) = match self + .resolve_transfer_target_mode(protocol_version, Rc::clone(&tracking_copy))? + { + TransferTargetMode::ExistingAccount { + main_purse: purse_uref, + target_account_hash: target_account, + } => (Some(target_account), purse_uref), + TransferTargetMode::PurseExists { + target_account_hash, + purse_uref, + } => (target_account_hash, purse_uref), + TransferTargetMode::CreateAccount(_) => { + // Method "build()" is called after `resolve_transfer_target_mode` is first called + // and handled by creating a new account. Calling `resolve_transfer_target_mode` + // for the second time should never return `CreateAccount` variant. + return Err(TransferError::InvalidOperation); + } + }; + + let source = self.resolve_source_uref(from, Rc::clone(&tracking_copy))?; + + if source.addr() == target.addr() { + return Err(TransferError::InvalidPurse); + } + + let amount = self.resolve_amount()?; + + let arg_id = self.resolve_id()?; + + Ok(TransferArgs { + to, + source, + target, + amount, + arg_id, + }) + } + + fn map_cl_value(&self, cl_value: &CLValue) -> Result { + cl_value.clone().into_t().map_err(TransferError::CLValue) + } +} diff --git a/storage/src/tracking_copy/byte_size.rs b/storage/src/tracking_copy/byte_size.rs new file mode 100644 index 0000000000..d71443e6c9 --- /dev/null +++ b/storage/src/tracking_copy/byte_size.rs @@ -0,0 +1,109 @@ +use casper_types::{account::Account, bytesrepr::ToBytes, ByteCode, Key, StoredValue}; + +/// Returns byte size of the element - both heap size and stack size. +pub trait ByteSize { + fn byte_size(&self) -> usize; +} + +impl ByteSize for Key { + fn byte_size(&self) -> usize { + size_of::() + self.heap_size() + } +} + +impl ByteSize for String { + fn byte_size(&self) -> usize { + size_of::() + self.heap_size() + } +} + +impl ByteSize for StoredValue { + fn byte_size(&self) -> usize { + size_of::() + + match self { + StoredValue::CLValue(cl_value) => cl_value.serialized_length(), + StoredValue::Account(account) => account.serialized_length(), + StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(), + StoredValue::ContractPackage(contract_package) => { + contract_package.serialized_length() + } + StoredValue::Contract(contract) => contract.serialized_length(), + StoredValue::AddressableEntity(contract_header) => { + contract_header.serialized_length() + } + StoredValue::SmartContract(package) => package.serialized_length(), + StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(), + StoredValue::Transfer(transfer_v1) => transfer_v1.serialized_length(), + StoredValue::EraInfo(era_info) => era_info.serialized_length(), + StoredValue::Bid(bid) => bid.serialized_length(), + StoredValue::BidKind(bid_kind) => bid_kind.serialized_length(), + StoredValue::Withdraw(withdraw_purses) => withdraw_purses.serialized_length(), + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.serialized_length(), + StoredValue::ByteCode(byte_code) => byte_code.serialized_length(), + StoredValue::MessageTopic(message_topic_summary) => { + message_topic_summary.serialized_length() + } + StoredValue::Message(message_summary) => message_summary.serialized_length(), + StoredValue::NamedKey(named_key) => named_key.serialized_length(), + StoredValue::Prepayment(prepayment_kind) => prepayment_kind.serialized_length(), + StoredValue::EntryPoint(entry_point) => entry_point.serialized_length(), + StoredValue::RawBytes(raw_bytes) => raw_bytes.serialized_length(), + } + } +} + +/// Returns heap size of the value. +/// Note it's different from [ByteSize] that returns both heap and stack size. +pub trait HeapSizeOf { + fn heap_size(&self) -> usize; +} + +impl HeapSizeOf for Key { + fn heap_size(&self) -> usize { + 0 + } +} + +// TODO: contract has other fields (re a bunch) that are not repr here...on purpose? +impl HeapSizeOf for Account { + fn heap_size(&self) -> usize { + // NOTE: We're ignoring size of the tree's nodes. + self.named_keys() + .iter() + .fold(0, |sum, (k, v)| sum + k.heap_size() + v.heap_size()) + } +} + +// TODO: contract has other fields (re protocol version) that are not repr here...on purpose? +impl HeapSizeOf for ByteCode { + fn heap_size(&self) -> usize { + self.bytes().len() + } +} + +impl ByteSize for [T] { + fn byte_size(&self) -> usize { + self.iter() + .fold(0, |sum, el| sum + size_of::() + el.heap_size()) + } +} + +impl HeapSizeOf for String { + fn heap_size(&self) -> usize { + self.capacity() + } +} + +#[cfg(test)] +mod tests { + use super::ByteSize; + + fn assert_byte_size(el: T, expected: usize) { + assert_eq!(el.byte_size(), expected) + } + + #[test] + fn byte_size_of_string() { + assert_byte_size("Hello".to_owned(), 5 + size_of::()) + } +} diff --git a/storage/src/tracking_copy/error.rs b/storage/src/tracking_copy/error.rs new file mode 100644 index 0000000000..71c7b023f7 --- /dev/null +++ b/storage/src/tracking_copy/error.rs @@ -0,0 +1,143 @@ +use thiserror::Error; + +use crate::data_access_layer::balance::BalanceFailure; +use casper_types::{ + account::{AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, UpdateKeyFailure}, + bytesrepr, system, ApiError, CLType, CLValueError, Key, StoredValueTypeMismatch, +}; + +/// Possible tracking copy errors. +#[derive(Error, Debug, Clone)] +#[non_exhaustive] +pub enum Error { + /// Storage error. + #[error("Storage error: {}", _0)] + Storage(crate::global_state::error::Error), + /// Failed to (de)serialize bytes. + #[error("Serialization error: {}", _0)] + BytesRepr(bytesrepr::Error), + /// Unable to find named key. + #[error("Named key {} not found", _0)] + NamedKeyNotFound(String), + /// Unable to find a key. + #[error("Key {} not found", _0)] + KeyNotFound(Key), + /// Unable to find an account. + #[error("Account {:?} not found", _0)] + AccountNotFound(Key), + /// Type mismatch error. + #[error("{}", _0)] + TypeMismatch(StoredValueTypeMismatch), + /// ApiError. + #[error("{}", _0)] + Api(ApiError), + /// Error adding an associated key. + #[error("{}", _0)] + AddKeyFailure(AddKeyFailure), + /// Error removing an associated key. + #[error("{}", _0)] + RemoveKeyFailure(RemoveKeyFailure), + /// Error updating an associated key. + #[error("{}", _0)] + UpdateKeyFailure(UpdateKeyFailure), + /// Error setting threshold on associated key. + #[error("{}", _0)] + SetThresholdFailure(SetThresholdFailure), + /// Error executing system contract. + #[error("{}", _0)] + SystemContract(system::Error), + /// Weight of all used associated keys does not meet account's deploy threshold. + #[error("Deployment authorization failure")] + DeploymentAuthorizationFailure, + /// Error converting a CLValue. + #[error("{0}")] + CLValue(CLValueError), + /// Unexpected variant of a stored value. + #[error("Unexpected variant of a stored value")] + UnexpectedStoredValueVariant, + /// Missing system contract hash. + #[error("Missing system contract hash: {0}")] + MissingSystemContractHash(String), + /// Invalid key + #[error("Invalid key {0}")] + UnexpectedKeyVariant(Key), + /// Circular reference error. + #[error("Query attempted a circular reference: {0}")] + CircularReference(String), + /// Depth limit reached. + #[error("Query exceeded depth limit: {depth}")] + QueryDepthLimit { + /// Current depth limit. + depth: u64, + }, + /// Missing bid. + #[error("Missing bid: {0}")] + MissingBid(Key), + /// Not authorized. + #[error("Authorization error")] + Authorization, + /// The value wasn't found. + #[error("Value not found")] + ValueNotFound(String), + /// Balance calculation failure. + #[error("Balance calculation failure")] + Balance(BalanceFailure), + /// Unable to find a contract. + #[error("Contract {:?} not found", _0)] + ContractNotFound(Key), + #[error("flag")] + /// Attempted to fetch an entity or an associated record + AddressableEntityDisable, +} + +impl Error { + /// Returns new type mismatch error. + pub fn type_mismatch(expected: CLType, found: CLType) -> Error { + Error::TypeMismatch(StoredValueTypeMismatch::new( + format!("{:?}", expected), + format!("{:?}", found), + )) + } +} + +impl From for Error { + fn from(e: bytesrepr::Error) -> Self { + Error::BytesRepr(e) + } +} + +impl From for Error { + fn from(err: AddKeyFailure) -> Self { + Error::AddKeyFailure(err) + } +} + +impl From for Error { + fn from(err: RemoveKeyFailure) -> Self { + Error::RemoveKeyFailure(err) + } +} + +impl From for Error { + fn from(err: UpdateKeyFailure) -> Self { + Error::UpdateKeyFailure(err) + } +} + +impl From for Error { + fn from(err: SetThresholdFailure) -> Self { + Error::SetThresholdFailure(err) + } +} + +impl From for Error { + fn from(e: CLValueError) -> Self { + Error::CLValue(e) + } +} + +impl From for Error { + fn from(gse: crate::global_state::error::Error) -> Self { + Error::Storage(gse) + } +} diff --git a/storage/src/tracking_copy/ext.rs b/storage/src/tracking_copy/ext.rs new file mode 100644 index 0000000000..41ee058ad2 --- /dev/null +++ b/storage/src/tracking_copy/ext.rs @@ -0,0 +1,675 @@ +use std::{ + collections::{btree_map::Entry, BTreeMap}, + convert::TryInto, +}; +use tracing::{error, warn}; + +use crate::{ + data_access_layer::balance::{ + AvailableBalanceChecker, BalanceHolds, BalanceHoldsWithProof, ProcessingHoldBalanceHandling, + }, + global_state::{error::Error as GlobalStateError, state::StateReader}, + tracking_copy::{TrackingCopy, TrackingCopyEntityExt, TrackingCopyError}, + KeyPrefix, +}; +use casper_types::{ + account::AccountHash, + addressable_entity::MessageTopics, + bytesrepr::ToBytes, + contract_messages::TopicNameHash, + contracts::{ContractHash, NamedKeys}, + global_state::TrieMerkleProof, + system::{ + mint::{ + BalanceHoldAddr, BalanceHoldAddrTag, MINT_GAS_HOLD_HANDLING_KEY, + MINT_GAS_HOLD_INTERVAL_KEY, + }, + MINT, + }, + BlockGlobalAddr, BlockTime, ByteCode, ByteCodeAddr, ByteCodeHash, CLValue, ChecksumRegistry, + Contract, EntityAddr, EntryPoints, HashAddr, HoldBalanceHandling, HoldsEpoch, Key, Motes, + Package, StoredValue, StoredValueTypeMismatch, SystemHashRegistry, URef, URefAddr, U512, +}; + +/// Higher-level operations on the state via a `TrackingCopy`. +pub trait TrackingCopyExt { + /// The type for the returned errors. + type Error; + + /// Reads the entity key for a given account hash. + fn read_account_key(&mut self, account_hash: AccountHash) -> Result; + + /// Returns block time associated with checked out root hash. + fn get_block_time(&self) -> Result, Self::Error>; + + /// Returns balance hold configuration settings for imputed kind of balance hold. + fn get_balance_hold_config( + &self, + hold_kind: BalanceHoldAddrTag, + ) -> Result, Self::Error>; + + /// Gets the purse balance key for a given purse. + fn get_purse_balance_key(&self, purse_key: Key) -> Result; + + /// Gets the balance hold keys for the imputed purse (if any). + fn get_balance_hold_addresses( + &self, + purse_addr: URefAddr, + ) -> Result, Self::Error>; + + /// Returns total balance. + fn get_total_balance(&self, key: Key) -> Result; + + /// Returns the available balance, considering any holds from holds_epoch to now. + fn get_available_balance(&mut self, balance_key: Key) -> Result; + + /// Gets the purse balance key for a given purse and provides a Merkle proof. + fn get_purse_balance_key_with_proof( + &self, + purse_key: Key, + ) -> Result<(Key, TrieMerkleProof), Self::Error>; + + /// Gets the balance at a given balance key and provides a Merkle proof. + fn get_total_balance_with_proof( + &self, + balance_key: Key, + ) -> Result<(U512, TrieMerkleProof), Self::Error>; + + /// Clear expired balance holds. + fn clear_expired_balance_holds( + &mut self, + purse_addr: URefAddr, + filter: Vec<(BalanceHoldAddrTag, HoldsEpoch)>, + ) -> Result<(), Self::Error>; + + /// Gets the balance holds for a given balance, without Merkle proofs. + fn get_balance_holds( + &mut self, + purse_addr: URefAddr, + block_time: BlockTime, + interval: u64, + ) -> Result, Self::Error>; + + /// Gets the balance holds for a given balance, with Merkle proofs. + fn get_balance_holds_with_proof( + &self, + purse_addr: URefAddr, + ) -> Result, Self::Error>; + + /// Returns the collection of message topics (if any) for a given HashAddr. + fn get_message_topics(&self, entity_addr: EntityAddr) -> Result; + + /// Returns the collection of named keys for a given AddressableEntity. + fn get_named_keys(&self, entity_addr: EntityAddr) -> Result; + + /// Returns the collection of entry points for a given AddresableEntity. + fn get_v1_entry_points(&self, entity_addr: EntityAddr) -> Result; + + /// Gets a package by hash. + fn get_package(&mut self, package_hash: HashAddr) -> Result; + + /// Get a Contract record. + fn get_contract(&mut self, contract_hash: ContractHash) -> Result; + + /// Gets the system entity registry. + fn get_system_entity_registry(&self) -> Result; + + /// Gets the system checksum registry. + fn get_checksum_registry(&mut self) -> Result, Self::Error>; + + /// Gets byte code by hash. + fn get_byte_code(&mut self, byte_code_hash: ByteCodeHash) -> Result; +} + +impl TrackingCopyExt for TrackingCopy +where + R: StateReader, +{ + type Error = TrackingCopyError; + + fn read_account_key(&mut self, account_hash: AccountHash) -> Result { + let account_key = Key::Account(account_hash); + match self.read(&account_key)? { + Some(StoredValue::CLValue(cl_value)) => Ok(CLValue::into_t(cl_value)?), + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("Account".to_string(), other.type_name()), + )), + None => Err(TrackingCopyError::KeyNotFound(account_key)), + } + } + + fn get_block_time(&self) -> Result, Self::Error> { + match self.read(&Key::BlockGlobal(BlockGlobalAddr::BlockTime))? { + None => Ok(None), + Some(StoredValue::CLValue(cl_value)) => { + let block_time = cl_value.into_t().map_err(Self::Error::CLValue)?; + Ok(Some(BlockTime::new(block_time))) + } + Some(unexpected) => { + warn!(?unexpected, "block time stored as unexpected value type"); + Err(Self::Error::UnexpectedStoredValueVariant) + } + } + } + + fn get_balance_hold_config( + &self, + hold_kind: BalanceHoldAddrTag, + ) -> Result, Self::Error> { + let block_time = match self.get_block_time()? { + None => return Ok(None), + Some(block_time) => block_time, + }; + let (handling_key, interval_key) = match hold_kind { + BalanceHoldAddrTag::Processing => { + return Ok(Some((block_time, HoldBalanceHandling::Accrued, 0))); + } + BalanceHoldAddrTag::Gas => (MINT_GAS_HOLD_HANDLING_KEY, MINT_GAS_HOLD_INTERVAL_KEY), + }; + + let system_contract_registry = self.get_system_entity_registry()?; + + let entity_hash = *system_contract_registry.get(MINT).ok_or_else(|| { + error!("Missing system mint contract hash"); + TrackingCopyError::MissingSystemContractHash(MINT.to_string()) + })?; + + let named_keys = self.get_named_keys(EntityAddr::System(entity_hash))?; + + // get the handling + let handling = { + let named_key = + named_keys + .get(handling_key) + .ok_or(TrackingCopyError::NamedKeyNotFound( + handling_key.to_string(), + ))?; + let _uref = named_key + .as_uref() + .ok_or(TrackingCopyError::UnexpectedKeyVariant(*named_key))?; + + match self.read(&named_key.normalize()) { + Ok(Some(StoredValue::CLValue(cl_value))) => { + let handling_tag = cl_value.into_t().map_err(TrackingCopyError::CLValue)?; + HoldBalanceHandling::from_tag(handling_tag).map_err(|_| { + TrackingCopyError::ValueNotFound( + "No hold balance handling variant matches stored tag".to_string(), + ) + })? + } + Ok(Some(unexpected)) => { + warn!( + ?unexpected, + "hold balance handling unexpected stored value variant" + ); + return Err(TrackingCopyError::UnexpectedStoredValueVariant); + } + Ok(None) => { + error!("hold balance handling missing from gs"); + return Err(TrackingCopyError::ValueNotFound(handling_key.to_string())); + } + Err(gse) => { + error!(?gse, "hold balance handling read error"); + return Err(TrackingCopyError::Storage(gse)); + } + } + }; + + // get the interval. + let interval = { + let named_key = + named_keys + .get(interval_key) + .ok_or(TrackingCopyError::NamedKeyNotFound( + interval_key.to_string(), + ))?; + let _uref = named_key + .as_uref() + .ok_or(TrackingCopyError::UnexpectedKeyVariant(*named_key))?; + + match self.read(&named_key.normalize()) { + Ok(Some(StoredValue::CLValue(cl_value))) => { + cl_value.into_t().map_err(TrackingCopyError::CLValue)? + } + Ok(Some(unexpected)) => { + warn!( + ?unexpected, + "hold balance interval unexpected stored value variant" + ); + return Err(TrackingCopyError::UnexpectedStoredValueVariant); + } + Ok(None) => { + error!("hold balance interval missing from gs"); + return Err(TrackingCopyError::ValueNotFound(handling_key.to_string())); + } + Err(gse) => return Err(TrackingCopyError::Storage(gse)), + } + }; + + Ok(Some((block_time, handling, interval))) + } + + fn get_purse_balance_key(&self, purse_key: Key) -> Result { + let balance_key: URef = purse_key + .into_uref() + .ok_or(TrackingCopyError::UnexpectedKeyVariant(purse_key))?; + Ok(Key::Balance(balance_key.addr())) + } + + fn get_balance_hold_addresses( + &self, + purse_addr: URefAddr, + ) -> Result, Self::Error> { + let tagged_keys = { + let mut ret: Vec = vec![]; + let gas_prefix = KeyPrefix::GasBalanceHoldsByPurse(purse_addr).to_bytes()?; + for key in self.keys_with_prefix(&gas_prefix)? { + let addr = key + .as_balance_hold() + .ok_or(Self::Error::UnexpectedKeyVariant(key))?; + ret.push(*addr); + } + let processing_prefix = + KeyPrefix::ProcessingBalanceHoldsByPurse(purse_addr).to_bytes()?; + for key in self.keys_with_prefix(&processing_prefix)? { + let addr = key + .as_balance_hold() + .ok_or(Self::Error::UnexpectedKeyVariant(key))?; + ret.push(*addr); + } + ret + }; + Ok(tagged_keys) + } + + fn get_total_balance(&self, key: Key) -> Result { + let key = { + if let Key::URef(uref) = key { + Key::Balance(uref.addr()) + } else { + key + } + }; + if let Key::Balance(_) = key { + let stored_value: StoredValue = self + .read(&key)? + .ok_or(TrackingCopyError::KeyNotFound(key))?; + let cl_value: CLValue = stored_value + .try_into() + .map_err(TrackingCopyError::TypeMismatch)?; + let total_balance = cl_value.into_t::()?; + Ok(Motes::new(total_balance)) + } else { + Err(Self::Error::UnexpectedKeyVariant(key)) + } + } + + fn get_available_balance(&mut self, key: Key) -> Result { + let purse_addr = { + if let Key::URef(uref) = key { + uref.addr() + } else if let Key::Balance(uref_addr) = key { + uref_addr + } else { + return Err(Self::Error::UnexpectedKeyVariant(key)); + } + }; + + let total_balance = self.get_total_balance(Key::Balance(purse_addr))?.value(); + let (block_time, handling, interval) = + match self.get_balance_hold_config(BalanceHoldAddrTag::Gas)? { + None => { + // if there is no hold config at this root hash, holds are not a thing + // and available balance = total balance + return Ok(Motes::new(total_balance)); + } + Some((block_time, handling, interval)) => (block_time, handling, interval), + }; + + let balance_holds = self.get_balance_holds(purse_addr, block_time, interval)?; + let gas_handling = (handling, interval).into(); + let processing_handling = ProcessingHoldBalanceHandling::new(); + match balance_holds.available_balance( + block_time, + total_balance, + gas_handling, + processing_handling, + ) { + Ok(balance) => Ok(Motes::new(balance)), + Err(balance_error) => Err(Self::Error::Balance(balance_error)), + } + } + + fn get_purse_balance_key_with_proof( + &self, + purse_key: Key, + ) -> Result<(Key, TrieMerkleProof), Self::Error> { + let balance_key: Key = purse_key + .uref_to_hash() + .ok_or(TrackingCopyError::UnexpectedKeyVariant(purse_key))?; + let proof: TrieMerkleProof = self + .read_with_proof(&balance_key)? + .ok_or(TrackingCopyError::KeyNotFound(purse_key))?; + let stored_value_ref: &StoredValue = proof.value(); + let cl_value: CLValue = stored_value_ref + .to_owned() + .try_into() + .map_err(TrackingCopyError::TypeMismatch)?; + let balance_key: Key = cl_value.into_t()?; + Ok((balance_key, proof)) + } + + fn get_total_balance_with_proof( + &self, + key: Key, + ) -> Result<(U512, TrieMerkleProof), Self::Error> { + let key = { + if let Key::URef(uref) = key { + Key::Balance(uref.addr()) + } else { + key + } + }; + if let Key::Balance(_) = key { + let proof: TrieMerkleProof = self + .read_with_proof(&key.normalize())? + .ok_or(TrackingCopyError::KeyNotFound(key))?; + let cl_value: CLValue = proof + .value() + .to_owned() + .try_into() + .map_err(TrackingCopyError::TypeMismatch)?; + let balance = cl_value.into_t()?; + Ok((balance, proof)) + } else { + Err(Self::Error::UnexpectedKeyVariant(key)) + } + } + + fn clear_expired_balance_holds( + &mut self, + purse_addr: URefAddr, + filter: Vec<(BalanceHoldAddrTag, HoldsEpoch)>, + ) -> Result<(), Self::Error> { + for (tag, holds_epoch) in filter { + let prefix = match tag { + BalanceHoldAddrTag::Gas => KeyPrefix::GasBalanceHoldsByPurse(purse_addr), + BalanceHoldAddrTag::Processing => { + KeyPrefix::ProcessingBalanceHoldsByPurse(purse_addr) + } + }; + let immut: &_ = self; + let hold_keys = immut.keys_with_prefix(&prefix.to_bytes()?)?; + for hold_key in hold_keys { + let balance_hold_addr = hold_key + .as_balance_hold() + .ok_or(Self::Error::UnexpectedKeyVariant(hold_key))?; + let hold_block_time = balance_hold_addr.block_time(); + if let Some(earliest_relevant_timestamp) = holds_epoch.value() { + if hold_block_time.value() > earliest_relevant_timestamp { + // skip still relevant holds + // the expectation is that holds are cleared after balance checks, + // and before payment settlement; if that ordering changes in the + // future this strategy should be reevaluated to determine if it + // remains correct. + continue; + } + } + // prune away holds with a timestamp newer than epoch timestamp + // including holds with a timestamp == epoch timestamp + self.prune(hold_key) + } + } + Ok(()) + } + + fn get_balance_holds( + &mut self, + purse_addr: URefAddr, + block_time: BlockTime, + interval: u64, + ) -> Result, Self::Error> { + // NOTE: currently there are two kinds of holds, gas and processing. + // Processing holds only effect one block to prevent double spend and are always + // cleared at the end of processing each transaction. Gas holds persist for some + // interval, over many blocks and eras. Thus, using the holds_epoch for gas holds + // during transaction execution also picks up processing holds and call sites of + // this method currently pass the holds epoch for gas holds. This works fine for + // now, but if one or more other kinds of holds with differing periods are added + // in the future, this logic will need to be tweaked to take get the holds epoch + // for each hold kind and process each kind discretely in order and collate the + // non-expired hold total at the end. + let mut ret: BTreeMap = BTreeMap::new(); + let holds_epoch = { HoldsEpoch::from_millis(block_time.value(), interval) }; + let holds = self.get_balance_hold_addresses(purse_addr)?; + for balance_hold_addr in holds { + let block_time = balance_hold_addr.block_time(); + if let Some(timestamp) = holds_epoch.value() { + if block_time.value() < timestamp { + // skip holds older than the interval + // don't skip holds with a timestamp >= epoch timestamp + continue; + } + } + let hold_key: Key = balance_hold_addr.into(); + let hold_amount = match self.read(&hold_key) { + Ok(Some(StoredValue::CLValue(cl_value))) => match cl_value.into_t::() { + Ok(val) => val, + Err(cve) => return Err(Self::Error::CLValue(cve)), + }, + Ok(Some(_)) => return Err(Self::Error::UnexpectedStoredValueVariant), + Ok(None) => return Err(Self::Error::KeyNotFound(hold_key)), + Err(tce) => return Err(tce), + }; + match ret.entry(block_time) { + Entry::Vacant(entry) => { + let mut inner = BTreeMap::new(); + inner.insert(balance_hold_addr.tag(), hold_amount); + entry.insert(inner); + } + Entry::Occupied(mut occupied_entry) => { + let inner = occupied_entry.get_mut(); + match inner.entry(balance_hold_addr.tag()) { + Entry::Vacant(entry) => { + entry.insert(hold_amount); + } + Entry::Occupied(_) => { + unreachable!( + "there should be only one entry per (block_time, hold kind)" + ); + } + } + } + } + } + Ok(ret) + } + + fn get_balance_holds_with_proof( + &self, + purse_addr: URefAddr, + ) -> Result, Self::Error> { + // NOTE: currently there are two kinds of holds, gas and processing. + // Processing holds only effect one block to prevent double spend and are always + // cleared at the end of processing each transaction. Gas holds persist for some + // interval, over many blocks and eras. Thus, using the holds_epoch for gas holds + // during transaction execution also picks up processing holds and call sites of + // this method currently pass the holds epoch for gas holds. This works fine for + // now, but if one or more other kinds of holds with differing periods are added + // in the future, this logic will need to be tweaked to take get the holds epoch + // for each hold kind and process each kind discretely in order and collate the + // non-expired hold total at the end. + let mut ret: BTreeMap = BTreeMap::new(); + let (block_time, interval) = match self.get_balance_hold_config(BalanceHoldAddrTag::Gas)? { + Some((block_time, _, interval)) => (block_time.value(), interval), + None => { + // if there is no holds config at this root hash, there can't be any holds + return Ok(ret); + } + }; + let holds_epoch = { HoldsEpoch::from_millis(block_time, interval) }; + let holds = self.get_balance_hold_addresses(purse_addr)?; + for balance_hold_addr in holds { + let block_time = balance_hold_addr.block_time(); + if let Some(timestamp) = holds_epoch.value() { + if block_time.value() < timestamp { + // skip holds older than the interval + // don't skip holds with a timestamp >= epoch timestamp + continue; + } + } + let hold_key: Key = balance_hold_addr.into(); + let proof: TrieMerkleProof = self + .read_with_proof(&hold_key.normalize())? + .ok_or(TrackingCopyError::KeyNotFound(hold_key))?; + let cl_value: CLValue = proof + .value() + .to_owned() + .try_into() + .map_err(TrackingCopyError::TypeMismatch)?; + let hold_amount = cl_value.into_t()?; + match ret.entry(block_time) { + Entry::Vacant(entry) => { + let mut inner = BTreeMap::new(); + inner.insert(balance_hold_addr.tag(), (hold_amount, proof)); + entry.insert(inner); + } + Entry::Occupied(mut occupied_entry) => { + let inner = occupied_entry.get_mut(); + match inner.entry(balance_hold_addr.tag()) { + Entry::Vacant(entry) => { + entry.insert((hold_amount, proof)); + } + Entry::Occupied(_) => { + unreachable!( + "there should be only one entry per (block_time, hold kind)" + ); + } + } + } + } + } + Ok(ret) + } + + fn get_message_topics(&self, hash_addr: EntityAddr) -> Result { + let keys = self.get_keys_by_prefix(&KeyPrefix::MessageEntriesByEntity(hash_addr))?; + + let mut topics: BTreeMap = BTreeMap::new(); + + for entry_key in &keys { + if let Some(topic_name_hash) = entry_key.as_message_topic_name_hash() { + match self.read(entry_key)?.as_ref() { + Some(StoredValue::Message(_)) => { + continue; + } + Some(StoredValue::MessageTopic(summary)) => { + topics.insert(summary.topic_name().to_owned(), topic_name_hash); + } + Some(other) => { + return Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new( + "MessageTopic".to_string(), + other.type_name(), + ), + )); + } + None => match self.cache.reads_cached.get(entry_key) { + Some(StoredValue::Message(_)) => { + continue; + } + Some(StoredValue::MessageTopic(summary)) => { + topics.insert(summary.topic_name().to_owned(), topic_name_hash); + } + Some(_) | None => { + return Err(TrackingCopyError::KeyNotFound(*entry_key)); + } + }, + }; + } + } + + Ok(MessageTopics::from(topics)) + } + + fn get_named_keys(&self, entity_addr: EntityAddr) -> Result { + Ok(self + .runtime_footprint_by_entity_addr(entity_addr)? + .take_named_keys()) + } + + fn get_v1_entry_points(&self, entity_addr: EntityAddr) -> Result { + Ok(self + .runtime_footprint_by_entity_addr(entity_addr)? + .entry_points() + .clone()) + } + + fn get_package(&mut self, hash_addr: HashAddr) -> Result { + let key = Key::Hash(hash_addr); + match self.read(&key)? { + Some(StoredValue::ContractPackage(contract_package)) => Ok(contract_package.into()), + Some(_) | None => match self.read(&Key::SmartContract(hash_addr))? { + Some(StoredValue::SmartContract(package)) => Ok(package), + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new( + "Package or CLValue".to_string(), + other.type_name(), + ), + )), + None => Err(Self::Error::ValueNotFound(key.to_formatted_string())), + }, + } + } + + fn get_contract(&mut self, contract_hash: ContractHash) -> Result { + let key = Key::Hash(contract_hash.value()); + match self.read(&key)? { + Some(StoredValue::Contract(contract)) => Ok(contract), + Some(other) => Err(Self::Error::TypeMismatch(StoredValueTypeMismatch::new( + "Contract".to_string(), + other.type_name(), + ))), + None => Err(Self::Error::ValueNotFound(key.to_formatted_string())), + } + } + + fn get_system_entity_registry(&self) -> Result { + match self.read(&Key::SystemEntityRegistry)? { + Some(StoredValue::CLValue(registry)) => { + let registry: SystemHashRegistry = + CLValue::into_t(registry).map_err(Self::Error::from)?; + Ok(registry) + } + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("CLValue".to_string(), other.type_name()), + )), + None => Err(TrackingCopyError::KeyNotFound(Key::SystemEntityRegistry)), + } + } + + fn get_checksum_registry(&mut self) -> Result, Self::Error> { + match self.get(&Key::ChecksumRegistry)? { + Some(StoredValue::CLValue(registry)) => { + let registry: ChecksumRegistry = + CLValue::into_t(registry).map_err(Self::Error::from)?; + Ok(Some(registry)) + } + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("CLValue".to_string(), other.type_name()), + )), + None => Ok(None), + } + } + + fn get_byte_code(&mut self, byte_code_hash: ByteCodeHash) -> Result { + let key = Key::ByteCode(ByteCodeAddr::V1CasperWasm(byte_code_hash.value())); + match self.get(&key)? { + Some(StoredValue::ByteCode(byte_code)) => Ok(byte_code), + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("ContractWasm".to_string(), other.type_name()), + )), + None => Err(TrackingCopyError::KeyNotFound(key)), + } + } +} diff --git a/storage/src/tracking_copy/ext_entity.rs b/storage/src/tracking_copy/ext_entity.rs new file mode 100644 index 0000000000..3191d46c91 --- /dev/null +++ b/storage/src/tracking_copy/ext_entity.rs @@ -0,0 +1,952 @@ +use std::collections::BTreeSet; +use tracing::{debug, error}; + +use casper_types::{ + account::AccountHash, + addressable_entity::{ActionThresholds, AssociatedKeys, NamedKeyAddr, NamedKeyValue, Weight}, + contracts::{ContractHash, NamedKeys}, + system::{ + handle_payment::ACCUMULATION_PURSE_KEY, SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, + }, + AccessRights, Account, AddressableEntity, AddressableEntityHash, ByteCode, ByteCodeAddr, + ByteCodeHash, CLValue, ContextAccessRights, ContractRuntimeTag, EntityAddr, EntityKind, + EntityVersions, EntryPointAddr, EntryPointValue, EntryPoints, Groups, HashAddr, Key, Package, + PackageHash, PackageStatus, Phase, ProtocolVersion, PublicKey, RuntimeFootprint, StoredValue, + StoredValueTypeMismatch, URef, U512, +}; + +use crate::{ + global_state::{error::Error as GlobalStateError, state::StateReader}, + tracking_copy::{TrackingCopy, TrackingCopyError, TrackingCopyExt}, + AddressGenerator, KeyPrefix, +}; + +/// Fees purse handling. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum FeesPurseHandling { + /// Transfer fees to proposer. + ToProposer(AccountHash), + /// Transfer all fees to a system-wide accumulation purse, for future disbursement. + Accumulate, + /// Burn all fees from specified purse. + Burn(URef), + /// No fees are charged. + None(URef), +} + +/// Higher-level operations on the state via a `TrackingCopy`. +pub trait TrackingCopyEntityExt { + /// The type for the returned errors. + type Error; + + /// Gets a runtime information by entity_addr. + fn runtime_footprint_by_entity_addr( + &self, + entity_addr: EntityAddr, + ) -> Result; + + /// Gets a runtime information by hash_addr. + fn runtime_footprint_by_hash_addr( + &mut self, + hash_addr: HashAddr, + ) -> Result; + + /// Gets a runtime information by account hash. + fn runtime_footprint_by_account_hash( + &mut self, + protocol_version: ProtocolVersion, + account_hash: AccountHash, + ) -> Result<(EntityAddr, RuntimeFootprint), Self::Error>; + + /// Get runtime information for an account if authorized, else error. + fn authorized_runtime_footprint_by_account( + &mut self, + protocol_version: ProtocolVersion, + account_hash: AccountHash, + authorization_keys: &BTreeSet, + administrative_accounts: &BTreeSet, + ) -> Result<(RuntimeFootprint, EntityAddr), Self::Error>; + + /// Returns runtime information and access rights if authorized, else error. + fn authorized_runtime_footprint_with_access_rights( + &mut self, + protocol_version: ProtocolVersion, + initiating_address: AccountHash, + authorization_keys: &BTreeSet, + administrative_accounts: &BTreeSet, + ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError>; + + /// Returns runtime information for systemic functionality. + fn system_entity_runtime_footprint( + &mut self, + protocol_version: ProtocolVersion, + ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError>; + + /// Migrate the NamedKeys for a entity. + fn migrate_named_keys( + &mut self, + entity_addr: EntityAddr, + named_keys: NamedKeys, + ) -> Result<(), Self::Error>; + + /// Migrate entry points from and older structure to top level entries. + fn migrate_entry_points( + &mut self, + entity_addr: EntityAddr, + entry_points: EntryPoints, + ) -> Result<(), Self::Error>; + + /// Upsert uref value to global state and imputed entity's named keys. + fn upsert_uref_to_named_keys( + &mut self, + entity_addr: EntityAddr, + name: &str, + named_keys: &NamedKeys, + uref: URef, + stored_value: StoredValue, + ) -> Result<(), Self::Error>; + + /// Migrate Account to AddressableEntity. + fn migrate_account( + &mut self, + account_hash: AccountHash, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error>; + + /// Create an addressable entity to receive transfer. + fn create_new_addressable_entity_on_transfer( + &mut self, + account_hash: AccountHash, + main_purse: URef, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error>; + + /// Create an addressable entity instance using the field data of an account instance. + fn create_addressable_entity_from_account( + &mut self, + account: Account, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error>; + + /// Migrate ContractPackage to Package. + fn migrate_package( + &mut self, + contract_package_key: Key, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error>; + + /// Returns fee purse. + fn fees_purse( + &mut self, + protocol_version: ProtocolVersion, + fees_purse_handling: FeesPurseHandling, + ) -> Result; + + /// Returns named key from selected system contract. + fn system_contract_named_key( + &mut self, + system_contract_name: &str, + name: &str, + ) -> Result, Self::Error>; +} + +impl TrackingCopyEntityExt for TrackingCopy +where + R: StateReader, +{ + type Error = TrackingCopyError; + + fn runtime_footprint_by_entity_addr( + &self, + entity_addr: EntityAddr, + ) -> Result { + let entity_key = match entity_addr { + EntityAddr::Account(account_addr) => { + let account_key = Key::Account(AccountHash::new(account_addr)); + match self.read(&account_key)? { + Some(StoredValue::Account(account)) => { + return Ok(RuntimeFootprint::new_account_footprint(account)) + } + Some(StoredValue::CLValue(cl_value)) => cl_value.to_t::()?, + Some(other) => { + return Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new( + "Account or Key".to_string(), + other.type_name(), + ), + )) + } + None => return Err(TrackingCopyError::KeyNotFound(account_key)), + } + } + EntityAddr::SmartContract(addr) | EntityAddr::System(addr) => { + let contract_key = Key::Hash(addr); + match self.read(&contract_key)? { + Some(StoredValue::Contract(contract)) => { + let contract_hash = ContractHash::new(entity_addr.value()); + let maybe_system_entity_type = { + let mut ret = None; + let registry = self.get_system_entity_registry()?; + for (name, hash) in registry.inner().into_iter() { + if hash == entity_addr.value() { + match name.as_ref() { + MINT => ret = Some(SystemEntityType::Mint), + AUCTION => ret = Some(SystemEntityType::Auction), + HANDLE_PAYMENT => { + ret = Some(SystemEntityType::HandlePayment) + } + _ => continue, + } + } + } + + ret + }; + + return Ok(RuntimeFootprint::new_contract_footprint( + contract_hash, + contract, + maybe_system_entity_type, + )); + } + Some(StoredValue::CLValue(cl_value)) => cl_value.to_t::()?, + Some(_) | None => Key::AddressableEntity(entity_addr), + } + } + }; + + match self.read(&entity_key)? { + Some(StoredValue::AddressableEntity(entity)) => { + let named_keys = { + let keys = + self.get_keys_by_prefix(&KeyPrefix::NamedKeysByEntity(entity_addr))?; + + let mut named_keys = NamedKeys::new(); + + for entry_key in &keys { + match self.read(entry_key)? { + Some(StoredValue::NamedKey(named_key)) => { + let key = + named_key.get_key().map_err(TrackingCopyError::CLValue)?; + let name = + named_key.get_name().map_err(TrackingCopyError::CLValue)?; + named_keys.insert(name, key); + } + Some(other) => { + return Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new( + "CLValue".to_string(), + other.type_name(), + ), + )); + } + None => match self.cache.reads_cached.get(entry_key) { + Some(StoredValue::NamedKey(named_key_value)) => { + let key = named_key_value + .get_key() + .map_err(TrackingCopyError::CLValue)?; + let name = named_key_value + .get_name() + .map_err(TrackingCopyError::CLValue)?; + named_keys.insert(name, key); + } + Some(_) | None => { + return Err(TrackingCopyError::KeyNotFound(*entry_key)); + } + }, + }; + } + + named_keys + }; + let entry_points = { + let keys = + self.get_keys_by_prefix(&KeyPrefix::EntryPointsV1ByEntity(entity_addr))?; + + let mut entry_points_v1 = EntryPoints::new(); + + for entry_point_key in keys.iter() { + match self.read(entry_point_key)? { + Some(StoredValue::EntryPoint(EntryPointValue::V1CasperVm( + entry_point, + ))) => entry_points_v1.add_entry_point(entry_point), + Some(other) => { + return Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new( + "EntryPointsV1".to_string(), + other.type_name(), + ), + )); + } + None => match self.cache.reads_cached.get(entry_point_key) { + Some(StoredValue::EntryPoint(EntryPointValue::V1CasperVm( + entry_point, + ))) => entry_points_v1.add_entry_point(entry_point.to_owned()), + Some(other) => { + return Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new( + "EntryPointsV1".to_string(), + other.type_name(), + ), + )); + } + None => { + return Err(TrackingCopyError::KeyNotFound(*entry_point_key)); + } + }, + } + } + + entry_points_v1 + }; + Ok(RuntimeFootprint::new_entity_footprint( + entity_addr, + entity, + named_keys, + entry_points, + )) + } + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("AddressableEntity".to_string(), other.type_name()), + )), + None => Err(TrackingCopyError::KeyNotFound(entity_key)), + } + } + + fn runtime_footprint_by_hash_addr( + &mut self, + hash_addr: HashAddr, + ) -> Result { + let entity_addr = if self.get_system_entity_registry()?.exists(&hash_addr) { + EntityAddr::new_system(hash_addr) + } else { + EntityAddr::new_smart_contract(hash_addr) + }; + + self.runtime_footprint_by_entity_addr(entity_addr) + } + + fn runtime_footprint_by_account_hash( + &mut self, + protocol_version: ProtocolVersion, + account_hash: AccountHash, + ) -> Result<(EntityAddr, RuntimeFootprint), Self::Error> { + let account_key = Key::Account(account_hash); + + let entity_addr = match self.get(&account_key)? { + Some(StoredValue::Account(account)) => { + if self.enable_addressable_entity { + self.create_addressable_entity_from_account(account.clone(), protocol_version)?; + } + + let footprint = RuntimeFootprint::new_account_footprint(account); + let entity_addr = EntityAddr::new_account(account_hash.value()); + return Ok((entity_addr, footprint)); + } + + Some(StoredValue::CLValue(contract_key_as_cl_value)) => { + let key = CLValue::into_t::(contract_key_as_cl_value)?; + if let Key::AddressableEntity(addr) = key { + addr + } else { + return Err(Self::Error::UnexpectedKeyVariant(key)); + } + } + Some(other) => { + return Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("Key".to_string(), other.type_name()), + )); + } + None => return Err(TrackingCopyError::KeyNotFound(account_key)), + }; + + match self.get(&Key::AddressableEntity(entity_addr))? { + Some(StoredValue::AddressableEntity(entity)) => { + let named_keys = self.get_named_keys(entity_addr)?; + let entry_points = self.get_v1_entry_points(entity_addr)?; + let runtime_footprint = RuntimeFootprint::new_entity_footprint( + entity_addr, + entity, + named_keys, + entry_points, + ); + Ok((entity_addr, runtime_footprint)) + } + Some(other) => Err(TrackingCopyError::TypeMismatch( + StoredValueTypeMismatch::new("AddressableEntity".to_string(), other.type_name()), + )), + None => Err(TrackingCopyError::KeyNotFound(Key::AddressableEntity( + entity_addr, + ))), + } + } + + fn authorized_runtime_footprint_by_account( + &mut self, + protocol_version: ProtocolVersion, + account_hash: AccountHash, + authorization_keys: &BTreeSet, + administrative_accounts: &BTreeSet, + ) -> Result<(RuntimeFootprint, EntityAddr), Self::Error> { + let (entity_addr, footprint) = + self.runtime_footprint_by_account_hash(protocol_version, account_hash)?; + + if !administrative_accounts.is_empty() + && administrative_accounts + .intersection(authorization_keys) + .next() + .is_some() + { + // Exit early if there's at least a single signature coming from an admin. + return Ok((footprint, entity_addr)); + } + + // Authorize using provided authorization keys + if !footprint.can_authorize(authorization_keys) { + return Err(Self::Error::Authorization); + } + + // Check total key weight against deploy threshold + if !footprint.can_deploy_with(authorization_keys) { + return Err(Self::Error::DeploymentAuthorizationFailure); + } + + Ok((footprint, entity_addr)) + } + + fn authorized_runtime_footprint_with_access_rights( + &mut self, + protocol_version: ProtocolVersion, + initiating_address: AccountHash, + authorization_keys: &BTreeSet, + administrative_accounts: &BTreeSet, + ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError> { + if initiating_address == PublicKey::System.to_account_hash() { + return self.system_entity_runtime_footprint(protocol_version); + } + + let (footprint, entity_addr) = self.authorized_runtime_footprint_by_account( + protocol_version, + initiating_address, + authorization_keys, + administrative_accounts, + )?; + let access_rights = footprint.extract_access_rights(entity_addr.value()); + Ok((entity_addr, footprint, access_rights)) + } + + fn system_entity_runtime_footprint( + &mut self, + protocol_version: ProtocolVersion, + ) -> Result<(EntityAddr, RuntimeFootprint, ContextAccessRights), TrackingCopyError> { + let system_account_hash = PublicKey::System.to_account_hash(); + let (system_entity_addr, mut system_entity) = + self.runtime_footprint_by_account_hash(protocol_version, system_account_hash)?; + + let system_entity_registry = self.get_system_entity_registry()?; + + let (auction_named_keys, mut auction_access_rights) = { + let auction_hash = match system_entity_registry.get(AUCTION).copied() { + Some(auction_hash) => auction_hash, + None => { + error!("unexpected failure; auction not found"); + return Err(TrackingCopyError::MissingSystemContractHash( + AUCTION.to_string(), + )); + } + }; + let auction = self.runtime_footprint_by_hash_addr(auction_hash)?; + let auction_access_rights = auction.extract_access_rights(auction_hash); + (auction.take_named_keys(), auction_access_rights) + }; + let (mint_named_keys, mint_access_rights) = { + let mint_hash = match system_entity_registry.get(MINT).copied() { + Some(mint_hash) => mint_hash, + None => { + error!("unexpected failure; mint not found"); + return Err(TrackingCopyError::MissingSystemContractHash( + MINT.to_string(), + )); + } + }; + let mint = self.runtime_footprint_by_hash_addr(mint_hash)?; + let mint_access_rights = mint.extract_access_rights(mint_hash); + (mint.take_named_keys(), mint_access_rights) + }; + + let (payment_named_keys, payment_access_rights) = { + let payment_hash = match system_entity_registry.get(HANDLE_PAYMENT).copied() { + Some(payment_hash) => payment_hash, + None => { + error!("unexpected failure; handle payment not found"); + return Err(TrackingCopyError::MissingSystemContractHash( + HANDLE_PAYMENT.to_string(), + )); + } + }; + let payment = self.runtime_footprint_by_hash_addr(payment_hash)?; + let payment_access_rights = payment.extract_access_rights(payment_hash); + (payment.take_named_keys(), payment_access_rights) + }; + + // the auction calls the mint for total supply behavior, so extending the context to include + // mint named keys & access rights + system_entity.named_keys_mut().append(auction_named_keys); + system_entity.named_keys_mut().append(mint_named_keys); + system_entity.named_keys_mut().append(payment_named_keys); + + auction_access_rights.extend_access_rights(mint_access_rights.take_access_rights()); + auction_access_rights.extend_access_rights(payment_access_rights.take_access_rights()); + + Ok((system_entity_addr, system_entity, auction_access_rights)) + } + + fn migrate_named_keys( + &mut self, + entity_addr: EntityAddr, + named_keys: NamedKeys, + ) -> Result<(), Self::Error> { + if !self.enable_addressable_entity { + return Err(Self::Error::AddressableEntityDisable); + } + + for (string, key) in named_keys.into_inner().into_iter() { + let entry_addr = NamedKeyAddr::new_from_string(entity_addr, string.clone())?; + let named_key_value = + StoredValue::NamedKey(NamedKeyValue::from_concrete_values(key, string.clone())?); + let entry_key = Key::NamedKey(entry_addr); + self.write(entry_key, named_key_value) + } + + Ok(()) + } + + fn migrate_entry_points( + &mut self, + entity_addr: EntityAddr, + entry_points: EntryPoints, + ) -> Result<(), Self::Error> { + if !self.enable_addressable_entity { + return Err(Self::Error::AddressableEntityDisable); + } + + if entry_points.is_empty() { + return Ok(()); + } + for entry_point in entry_points.take_entry_points().into_iter() { + let entry_point_addr = + EntryPointAddr::new_v1_entry_point_addr(entity_addr, entry_point.name())?; + let entry_point_value = + StoredValue::EntryPoint(EntryPointValue::V1CasperVm(entry_point)); + self.write(Key::EntryPoint(entry_point_addr), entry_point_value) + } + + Ok(()) + } + + fn upsert_uref_to_named_keys( + &mut self, + entity_addr: EntityAddr, + name: &str, + named_keys: &NamedKeys, + uref: URef, + stored_value: StoredValue, + ) -> Result<(), Self::Error> { + match named_keys.get(name) { + Some(key) => { + if let Key::URef(_) = key { + self.write(*key, stored_value); + } else { + return Err(Self::Error::UnexpectedKeyVariant(*key)); + } + } + None => { + let uref_key = Key::URef(uref).normalize(); + self.write(uref_key, stored_value); + + if self.enable_addressable_entity { + let entry_value = { + let named_key_value = + NamedKeyValue::from_concrete_values(uref_key, name.to_string()) + .map_err(Self::Error::CLValue)?; + StoredValue::NamedKey(named_key_value) + }; + let entry_key = { + let named_key_entry = + NamedKeyAddr::new_from_string(entity_addr, name.to_string()) + .map_err(Self::Error::BytesRepr)?; + Key::NamedKey(named_key_entry) + }; + + self.write(entry_key, entry_value); + } else { + let named_key_value = StoredValue::CLValue(CLValue::from_t((name, uref_key))?); + let base_key = match entity_addr { + EntityAddr::System(hash_addr) | EntityAddr::SmartContract(hash_addr) => { + Key::Hash(hash_addr) + } + EntityAddr::Account(addr) => Key::Account(AccountHash::new(addr)), + }; + self.add(base_key, named_key_value)?; + } + } + }; + Ok(()) + } + + fn migrate_account( + &mut self, + account_hash: AccountHash, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error> { + if !self.enable_addressable_entity { + debug!("ae is not enabled, skipping migration"); + return Ok(()); + } + let key = Key::Account(account_hash); + let maybe_stored_value = self.read(&key)?; + + match maybe_stored_value { + Some(StoredValue::Account(account)) => { + self.create_addressable_entity_from_account(account, protocol_version) + } + Some(StoredValue::CLValue(_)) => Ok(()), + // This means the Account does not exist, which we consider to be + // an authorization error. As used by the node, this type of deploy + // will have already been filtered out, but for other EE use cases + // and testing it is reachable. + Some(_) => Err(Self::Error::UnexpectedStoredValueVariant), + None => Err(Self::Error::AccountNotFound(key)), + } + } + + fn create_new_addressable_entity_on_transfer( + &mut self, + account_hash: AccountHash, + main_purse: URef, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error> { + let mut generator = AddressGenerator::new(main_purse.addr().as_ref(), Phase::System); + + let byte_code_hash = ByteCodeHash::default(); + let entity_hash = AddressableEntityHash::new(account_hash.value()); + let package_hash = PackageHash::new(generator.new_hash_address()); + + let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); + + let action_thresholds: ActionThresholds = Default::default(); + + let entity = AddressableEntity::new( + package_hash, + byte_code_hash, + protocol_version, + main_purse, + associated_keys, + action_thresholds, + EntityKind::Account(account_hash), + ); + + let entity_addr = EntityAddr::new_account(entity_hash.value()); + let package = { + let mut package = Package::new( + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::Locked, + ); + package.insert_entity_version(protocol_version.value().major, entity_addr); + package + }; + + let entity_key = Key::AddressableEntity(entity_addr); + + self.write(entity_key, entity.into()); + self.write(package_hash.into(), package.into()); + let contract_by_account = match CLValue::from_t(entity_key) { + Ok(cl_value) => cl_value, + Err(err) => return Err(Self::Error::CLValue(err)), + }; + + self.write( + Key::Account(account_hash), + StoredValue::CLValue(contract_by_account), + ); + Ok(()) + } + + fn create_addressable_entity_from_account( + &mut self, + account: Account, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error> { + let account_hash = account.account_hash(); + if !self.enable_addressable_entity { + self.write(Key::Account(account_hash), StoredValue::Account(account)); + return Ok(()); + } + + // carry forward the account hash to allow reverse lookup + let entity_hash = AddressableEntityHash::new(account_hash.value()); + let entity_addr = EntityAddr::new_account(entity_hash.value()); + + // migrate named keys -- if this fails there is no reason to proceed further. + let named_keys = account.named_keys().clone(); + self.migrate_named_keys(entity_addr, named_keys)?; + + // write package first + let package_hash = { + let mut generator = + AddressGenerator::new(account.main_purse().addr().as_ref(), Phase::System); + + let package_hash = PackageHash::new(generator.new_hash_address()); + + let mut package = Package::new( + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::Locked, + ); + package.insert_entity_version(protocol_version.value().major, entity_addr); + self.write(package_hash.into(), package.into()); + package_hash + }; + + // write entity after package + { + // currently, addressable entities of account kind are not permitted to have bytecode + // however, we intend to revisit this and potentially allow it in a future release + // as a replacement for stored session. + let byte_code_hash = ByteCodeHash::default(); + + let action_thresholds = { + let account_threshold = account.action_thresholds().clone(); + ActionThresholds::new( + Weight::new(account_threshold.deployment.value()), + Weight::new(1u8), + Weight::new(account_threshold.key_management.value()), + ) + .map_err(Self::Error::SetThresholdFailure)? + }; + + let associated_keys = AssociatedKeys::from(account.associated_keys().clone()); + + let entity = AddressableEntity::new( + package_hash, + byte_code_hash, + protocol_version, + account.main_purse(), + associated_keys, + action_thresholds, + EntityKind::Account(account_hash), + ); + let entity_key = entity.entity_key(entity_hash); + let contract_by_account = match CLValue::from_t(entity_key) { + Ok(cl_value) => cl_value, + Err(err) => return Err(Self::Error::CLValue(err)), + }; + + self.write(entity_key, entity.into()); + self.write( + Key::Account(account_hash), + StoredValue::CLValue(contract_by_account), + ); + } + + Ok(()) + } + + fn migrate_package( + &mut self, + legacy_package_key: Key, + protocol_version: ProtocolVersion, + ) -> Result<(), Self::Error> { + if !self.enable_addressable_entity { + return Err(Self::Error::AddressableEntityDisable); + } + + let legacy_package = match self.read(&legacy_package_key)? { + Some(StoredValue::ContractPackage(legacy_package)) => legacy_package, + Some(_) | None => { + return Err(Self::Error::ValueNotFound(format!( + "contract package not found {}", + legacy_package_key + ))); + } + }; + + let legacy_versions = legacy_package.versions().clone(); + let access_uref = legacy_package.access_key(); + let mut generator = AddressGenerator::new(access_uref.addr().as_ref(), Phase::System); + + let package: Package = legacy_package.into(); + + for (_, contract_hash) in legacy_versions.into_iter() { + let contract = match self.read(&Key::Hash(contract_hash.value()))? { + Some(StoredValue::Contract(legacy_contract)) => legacy_contract, + Some(_) | None => { + return Err(Self::Error::ValueNotFound(format!( + "contract not found {}", + contract_hash + ))); + } + }; + + let purse = generator.new_uref(AccessRights::all()); + let cl_value: CLValue = CLValue::from_t(()).map_err(Self::Error::CLValue)?; + self.write(Key::URef(purse), StoredValue::CLValue(cl_value)); + + let balance_value: CLValue = + CLValue::from_t(U512::zero()).map_err(Self::Error::CLValue)?; + self.write( + Key::Balance(purse.addr()), + StoredValue::CLValue(balance_value), + ); + + let contract_addr = EntityAddr::new_smart_contract(contract_hash.value()); + + let contract_wasm_hash = contract.contract_wasm_hash(); + + let updated_entity = AddressableEntity::new( + PackageHash::new(contract.contract_package_hash().value()), + ByteCodeHash::new(contract_wasm_hash.value()), + protocol_version, + purse, + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ); + + let entry_points = contract.entry_points().clone(); + let named_keys = contract.take_named_keys(); + + self.migrate_named_keys(contract_addr, named_keys)?; + self.migrate_entry_points(contract_addr, entry_points.into())?; + + let maybe_previous_wasm = self + .read(&Key::Hash(contract_wasm_hash.value()))? + .and_then(|stored_value| stored_value.into_contract_wasm()); + + match maybe_previous_wasm { + None => { + return Err(Self::Error::ValueNotFound(format!( + "{}", + contract_wasm_hash + ))); + } + Some(contract_wasm) => { + let byte_code_key = Key::byte_code_key(ByteCodeAddr::new_wasm_addr( + updated_entity.byte_code_addr(), + )); + let byte_code_cl_value = match CLValue::from_t(byte_code_key) { + Ok(cl_value) => cl_value, + Err(err) => return Err(Self::Error::CLValue(err)), + }; + self.write( + Key::Hash(updated_entity.byte_code_addr()), + StoredValue::CLValue(byte_code_cl_value), + ); + + let byte_code: ByteCode = contract_wasm.into(); + self.write(byte_code_key, StoredValue::ByteCode(byte_code)); + } + } + + let entity_hash = AddressableEntityHash::new(contract_hash.value()); + let entity_key = Key::contract_entity_key(entity_hash); + let indirection = match CLValue::from_t(entity_key) { + Ok(cl_value) => cl_value, + Err(err) => return Err(Self::Error::CLValue(err)), + }; + self.write( + Key::Hash(contract_hash.value()), + StoredValue::CLValue(indirection), + ); + + self.write(entity_key, StoredValue::AddressableEntity(updated_entity)); + } + + let package_key = Key::SmartContract( + legacy_package_key + .into_hash_addr() + .ok_or(Self::Error::UnexpectedKeyVariant(legacy_package_key))?, + ); + + let access_key_value = + CLValue::from_t((package_key, access_uref)).map_err(Self::Error::CLValue)?; + self.write(legacy_package_key, StoredValue::CLValue(access_key_value)); + self.write(package_key, StoredValue::SmartContract(package)); + Ok(()) + } + + fn fees_purse( + &mut self, + protocol_version: ProtocolVersion, + fees_purse_handling: FeesPurseHandling, + ) -> Result { + let fee_handling = fees_purse_handling; + match fee_handling { + FeesPurseHandling::None(uref) => Ok(uref), + FeesPurseHandling::ToProposer(proposer) => { + let (_, entity) = + self.runtime_footprint_by_account_hash(protocol_version, proposer)?; + Ok(entity + .main_purse() + .ok_or(TrackingCopyError::AddressableEntityDisable)?) + } + FeesPurseHandling::Accumulate => { + let registry = self.get_system_entity_registry()?; + let entity_addr = { + let hash = match registry.get(HANDLE_PAYMENT) { + Some(hash) => hash, + None => { + return Err(TrackingCopyError::MissingSystemContractHash( + HANDLE_PAYMENT.to_string(), + )); + } + }; + EntityAddr::new_system(*hash) + }; + + let named_keys = self.get_named_keys(entity_addr)?; + + let accumulation_purse_uref = match named_keys.get(ACCUMULATION_PURSE_KEY) { + Some(Key::URef(accumulation_purse)) => *accumulation_purse, + Some(_) | None => { + error!( + "fee handling is configured to accumulate but handle payment does not \ + have accumulation purse" + ); + return Err(TrackingCopyError::NamedKeyNotFound( + ACCUMULATION_PURSE_KEY.to_string(), + )); + } + }; + + Ok(accumulation_purse_uref) + } + FeesPurseHandling::Burn(uref) => Ok(uref), + } + } + + fn system_contract_named_key( + &mut self, + system_contract_name: &str, + name: &str, + ) -> Result, Self::Error> { + let system_entity_registry = self.get_system_entity_registry()?; + let hash = match system_entity_registry.get(system_contract_name).copied() { + Some(hash) => hash, + None => { + error!( + "unexpected failure; system contract {} not found", + system_contract_name + ); + return Err(TrackingCopyError::MissingSystemContractHash( + system_contract_name.to_string(), + )); + } + }; + let runtime_footprint = self.runtime_footprint_by_hash_addr(hash)?; + Ok(runtime_footprint.take_named_keys().get(name).copied()) + } +} diff --git a/execution_engine/src/core/tracking_copy/meter.rs b/storage/src/tracking_copy/meter.rs similarity index 77% rename from execution_engine/src/core/tracking_copy/meter.rs rename to storage/src/tracking_copy/meter.rs index 8e46563686..ed691d50c9 100644 --- a/execution_engine/src/core/tracking_copy/meter.rs +++ b/storage/src/tracking_copy/meter.rs @@ -1,7 +1,7 @@ -use std::collections::BTreeSet; +use std::{collections::BTreeSet, fmt::Debug}; /// Trait for measuring "size" of key-value pairs. -pub trait Meter { +pub trait Meter: Copy + Default + Debug { fn measure(&self, k: &K, v: &V) -> usize; fn measure_keys(&self, keys: &BTreeSet) -> usize; @@ -10,13 +10,14 @@ pub trait Meter { pub mod heap_meter { use std::collections::BTreeSet; - use crate::core::tracking_copy::byte_size::ByteSize; + use crate::tracking_copy::byte_size::ByteSize; + #[derive(Copy, Clone, Default, Debug)] pub struct HeapSize; impl super::Meter for HeapSize { fn measure(&self, _: &K, v: &V) -> usize { - std::mem::size_of::() + v.byte_size() + size_of::() + v.byte_size() } fn measure_keys(&self, keys: &BTreeSet) -> usize { @@ -33,6 +34,7 @@ pub mod heap_meter { pub mod count_meter { use std::collections::BTreeSet; + #[derive(Clone, Copy, Debug, Default)] pub struct Count; impl super::Meter for Count { diff --git a/storage/src/tracking_copy/mod.rs b/storage/src/tracking_copy/mod.rs new file mode 100644 index 0000000000..cd0074b30e --- /dev/null +++ b/storage/src/tracking_copy/mod.rs @@ -0,0 +1,1144 @@ +//! This module defines the `TrackingCopy` - a utility that caches operations on the state, so that +//! the underlying state remains unmodified, but it can be interacted with as if the modifications +//! were applied on it. +mod byte_size; +mod error; +mod ext; +mod ext_entity; +mod meter; +#[cfg(test)] +mod tests; + +use std::{ + borrow::Borrow, + collections::{BTreeMap, BTreeSet, HashSet, VecDeque}, + convert::{From, TryInto}, + fmt::Debug, + sync::Arc, +}; + +use linked_hash_map::LinkedHashMap; +use thiserror::Error; +use tracing::error; + +use crate::{ + global_state::{ + error::Error as GlobalStateError, state::StateReader, + trie_store::operations::compute_state_hash, DEFAULT_MAX_QUERY_DEPTH, + }, + KeyPrefix, +}; +use casper_types::{ + addressable_entity::NamedKeyAddr, + bytesrepr::{self, ToBytes}, + contract_messages::{Message, Messages}, + contracts::NamedKeys, + execution::{Effects, TransformError, TransformInstruction, TransformKindV2, TransformV2}, + global_state::TrieMerkleProof, + handle_stored_dictionary_value, BlockGlobalAddr, CLType, CLValue, CLValueError, Digest, Key, + KeyTag, StoredValue, StoredValueTypeMismatch, U512, +}; + +use self::meter::{heap_meter::HeapSize, Meter}; +pub use self::{ + error::Error as TrackingCopyError, + ext::TrackingCopyExt, + ext_entity::{FeesPurseHandling, TrackingCopyEntityExt}, +}; + +/// Result of a query on a `TrackingCopy`. +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +pub enum TrackingCopyQueryResult { + /// Invalid state root hash. + RootNotFound, + /// The value wasn't found. + ValueNotFound(String), + /// A circular reference was found in the state while traversing it. + CircularReference(String), + /// The query reached the depth limit. + DepthLimit { + /// The depth reached. + depth: u64, + }, + /// The query was successful. + Success { + /// The value read from the state. + value: StoredValue, + /// Merkle proofs for the value. + proofs: Vec>, + }, +} + +impl TrackingCopyQueryResult { + /// Is this a successful query? + pub fn is_success(&self) -> bool { + matches!(self, TrackingCopyQueryResult::Success { .. }) + } + + /// As result. + pub fn into_result(self) -> Result { + match self { + TrackingCopyQueryResult::RootNotFound => { + Err(TrackingCopyError::Storage(Error::RootNotFound)) + } + TrackingCopyQueryResult::ValueNotFound(msg) => { + Err(TrackingCopyError::ValueNotFound(msg)) + } + TrackingCopyQueryResult::CircularReference(msg) => { + Err(TrackingCopyError::CircularReference(msg)) + } + TrackingCopyQueryResult::DepthLimit { depth } => { + Err(TrackingCopyError::QueryDepthLimit { depth }) + } + TrackingCopyQueryResult::Success { value, .. } => Ok(value), + } + } +} + +/// Struct containing state relating to a given query. +struct Query { + /// The key from where the search starts. + base_key: Key, + /// A collection of normalized keys which have been visited during the search. + visited_keys: HashSet, + /// The key currently being processed. + current_key: Key, + /// Path components which have not yet been followed, held in the same order in which they were + /// provided to the `query()` call. + unvisited_names: VecDeque, + /// Path components which have been followed, held in the same order in which they were + /// provided to the `query()` call. + visited_names: Vec, + /// Current depth of the query. + depth: u64, +} + +impl Query { + fn new(base_key: Key, path: &[String]) -> Self { + Query { + base_key, + current_key: base_key.normalize(), + unvisited_names: path.iter().cloned().collect(), + visited_names: Vec::new(), + visited_keys: HashSet::new(), + depth: 0, + } + } + + /// Panics if `unvisited_names` is empty. + fn next_name(&mut self) -> Option<&String> { + let next_name = self.unvisited_names.pop_front()?; + self.visited_names.push(next_name); + self.visited_names.last() + } + + fn navigate(&mut self, key: Key) { + self.current_key = key.normalize(); + self.depth += 1; + } + + fn navigate_for_named_key(&mut self, named_key: Key) { + if let Key::NamedKey(_) = &named_key { + self.current_key = named_key.normalize(); + } + } + + fn into_not_found_result(self, msg_prefix: &str) -> TrackingCopyQueryResult { + let msg = format!("{} at path: {}", msg_prefix, self.current_path()); + TrackingCopyQueryResult::ValueNotFound(msg) + } + + fn into_circular_ref_result(self) -> TrackingCopyQueryResult { + let msg = format!( + "{:?} has formed a circular reference at path: {}", + self.current_key, + self.current_path() + ); + TrackingCopyQueryResult::CircularReference(msg) + } + + fn into_depth_limit_result(self) -> TrackingCopyQueryResult { + TrackingCopyQueryResult::DepthLimit { depth: self.depth } + } + + fn current_path(&self) -> String { + let mut path = format!("{:?}", self.base_key); + for name in &self.visited_names { + path.push('/'); + path.push_str(name); + } + path + } +} + +/// Keeps track of already accessed keys. +/// We deliberately separate cached Reads from cached mutations +/// because we want to invalidate Reads' cache so it doesn't grow too fast. +#[derive(Clone, Debug)] +pub struct GenericTrackingCopyCache { + max_cache_size: usize, + current_cache_size: usize, + reads_cached: LinkedHashMap, + muts_cached: BTreeMap, + prunes_cached: BTreeSet, + meter: M, +} + +impl + Copy + Default> GenericTrackingCopyCache { + /// Creates instance of `TrackingCopyCache` with specified `max_cache_size`, + /// above which least-recently-used elements of the cache are invalidated. + /// Measurements of elements' "size" is done with the usage of `Meter` + /// instance. + pub fn new(max_cache_size: usize, meter: M) -> GenericTrackingCopyCache { + GenericTrackingCopyCache { + max_cache_size, + current_cache_size: 0, + reads_cached: LinkedHashMap::new(), + muts_cached: BTreeMap::new(), + prunes_cached: BTreeSet::new(), + meter, + } + } + + /// Creates instance of `TrackingCopyCache` with specified `max_cache_size`, above which + /// least-recently-used elements of the cache are invalidated. Measurements of elements' "size" + /// is done with the usage of default `Meter` instance. + pub fn new_default(max_cache_size: usize) -> GenericTrackingCopyCache { + GenericTrackingCopyCache::new(max_cache_size, M::default()) + } + + /// Inserts `key` and `value` pair to Read cache. + pub fn insert_read(&mut self, key: Key, value: StoredValue) { + let element_size = Meter::measure(&self.meter, &key, &value); + self.reads_cached.insert(key, value); + self.current_cache_size += element_size; + while self.current_cache_size > self.max_cache_size { + match self.reads_cached.pop_front() { + Some((k, v)) => { + let element_size = Meter::measure(&self.meter, &k, &v); + self.current_cache_size -= element_size; + } + None => break, + } + } + } + + /// Inserts `key` and `value` pair to Write/Add cache. + pub fn insert_write(&mut self, key: Key, value: StoredValue) { + let kb = KeyWithByteRepr::new(key); + self.prunes_cached.remove(&key); + self.muts_cached.insert(kb, value); + } + + /// Inserts `key` and `value` pair to Write/Add cache. + pub fn insert_prune(&mut self, key: Key) { + self.prunes_cached.insert(key); + } + + /// Gets value from `key` in the cache. + pub fn get(&mut self, key: &Key) -> Option<&StoredValue> { + if self.prunes_cached.contains(key) { + // the item is marked for pruning and therefore + // is no longer accessible. + return None; + } + let kb = KeyWithByteRepr::new(*key); + if let Some(value) = self.muts_cached.get(&kb) { + return Some(value); + }; + + self.reads_cached.get_refresh(key).map(|v| &*v) + } + + /// Get cached items by prefix. + fn get_muts_cached_by_byte_prefix(&self, prefix: &[u8]) -> Vec { + self.muts_cached + .range(prefix.to_vec()..) + .take_while(|(key, _)| key.starts_with(prefix)) + .map(|(key, _)| key.to_key()) + .collect() + } + + /// Does the prune cache contain key. + pub fn is_pruned(&self, key: &Key) -> bool { + self.prunes_cached.contains(key) + } + + pub(self) fn into_muts(self) -> (BTreeMap, BTreeSet) { + (self.muts_cached, self.prunes_cached) + } +} + +/// A helper type for `TrackingCopyCache` that allows convenient storage and access +/// to keys as bytes. +/// Its equality and ordering is based on the byte representation of the key. +#[derive(Debug, Clone)] +struct KeyWithByteRepr(Key, Vec); + +impl KeyWithByteRepr { + #[inline] + fn new(key: Key) -> Self { + let bytes = key.to_bytes().expect("should always serialize a Key"); + KeyWithByteRepr(key, bytes) + } + + #[inline] + fn starts_with(&self, prefix: &[u8]) -> bool { + self.1.starts_with(prefix) + } + + #[inline] + fn to_key(&self) -> Key { + self.0 + } +} + +impl Borrow> for KeyWithByteRepr { + #[inline] + fn borrow(&self) -> &Vec { + &self.1 + } +} + +impl PartialEq for KeyWithByteRepr { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.1 == other.1 + } +} + +impl Eq for KeyWithByteRepr {} + +impl PartialOrd for KeyWithByteRepr { + #[inline] + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for KeyWithByteRepr { + #[inline] + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.1.cmp(&other.1) + } +} + +/// An alias for a `TrackingCopyCache` with `HeapSize` as the meter. +pub type TrackingCopyCache = GenericTrackingCopyCache; + +/// An interface for the global state that caches all operations (reads and writes) instead of +/// applying them directly to the state. This way the state remains unmodified, while the user can +/// interact with it as if it was being modified in real time. +#[derive(Clone)] +pub struct TrackingCopy { + reader: Arc, + cache: TrackingCopyCache, + effects: Effects, + max_query_depth: u64, + messages: Messages, + enable_addressable_entity: bool, +} + +/// Result of executing an "add" operation on a value in the state. +#[derive(Debug)] +pub enum AddResult { + /// The operation was successful. + Success, + /// The key was not found. + KeyNotFound(Key), + /// There was a type mismatch between the stored value and the value being added. + TypeMismatch(StoredValueTypeMismatch), + /// Serialization error. + Serialization(bytesrepr::Error), + /// Transform error. + Transform(TransformError), +} + +impl From for AddResult { + fn from(error: CLValueError) -> Self { + match error { + CLValueError::Serialization(error) => AddResult::Serialization(error), + CLValueError::Type(type_mismatch) => { + let expected = format!("{:?}", type_mismatch.expected); + let found = format!("{:?}", type_mismatch.found); + AddResult::TypeMismatch(StoredValueTypeMismatch::new(expected, found)) + } + } + } +} + +/// A helper type for `TrackingCopy` that represents a key-value pair. +pub type TrackingCopyParts = (TrackingCopyCache, Effects, Messages); + +impl> TrackingCopy +where + R: StateReader, +{ + /// Creates a new `TrackingCopy` using the `reader` as the interface to the state. + pub fn new( + reader: R, + max_query_depth: u64, + enable_addressable_entity: bool, + ) -> TrackingCopy { + TrackingCopy { + reader: Arc::new(reader), + // TODO: Should `max_cache_size` be a fraction of wasm memory limit? + cache: GenericTrackingCopyCache::new(1024 * 16, HeapSize), + effects: Effects::new(), + max_query_depth, + messages: Vec::new(), + enable_addressable_entity, + } + } + + /// Returns the `reader` used to access the state. + pub fn reader(&self) -> &R { + &self.reader + } + + /// Returns a shared reference to the `reader` used to access the state. + pub fn shared_reader(&self) -> Arc { + Arc::clone(&self.reader) + } + + /// Creates a new `TrackingCopy` using the `reader` as the interface to the state. + /// Returns a new `TrackingCopy` instance that is a snapshot of the current state, allowing + /// further changes to be made. + /// + /// This method creates a new `TrackingCopy` using the current instance (including its + /// mutations) as the base state to read against. Mutations made to the new `TrackingCopy` + /// will not impact the original instance. + /// + /// Note: Currently, there is no `join` or `merge` function to bring changes from a fork back to + /// the main `TrackingCopy`. Therefore, forking should be done repeatedly, which is + /// suboptimal and will be improved in the future. + pub fn fork(&self) -> TrackingCopy<&TrackingCopy> { + TrackingCopy::new(self, self.max_query_depth, self.enable_addressable_entity) + } + + /// Returns a new `TrackingCopy` instance that is a snapshot of the current state, allowing + /// further changes to be made. + /// + /// This method creates a new `TrackingCopy` using the current instance (including its + /// mutations) as the base state to read against. Mutations made to the new `TrackingCopy` + /// will not impact the original instance. + /// + /// Note: Currently, there is no `join` or `merge` function to bring changes from a fork back to + /// the main `TrackingCopy`. This method is an alternative to the `fork` method and is + /// provided for clarity and consistency in naming. + pub fn fork2(&self) -> Self { + TrackingCopy { + reader: Arc::clone(&self.reader), + cache: self.cache.clone(), + effects: self.effects.clone(), + max_query_depth: self.max_query_depth, + messages: self.messages.clone(), + enable_addressable_entity: self.enable_addressable_entity, + } + } + + /// Applies the changes to the state. + /// + /// This is a low-level function that should be used only by the execution engine. The purpose + /// of this function is to apply the changes to the state from a forked tracking copy. Once + /// caller decides that the changes are valid, they can be applied to the state and the + /// processing can resume. + pub fn apply_changes( + &mut self, + effects: Effects, + cache: TrackingCopyCache, + messages: Messages, + ) { + self.effects = effects; + self.cache = cache; + self.messages = messages; + } + + /// Returns a copy of the execution effects cached by this instance. + pub fn effects(&self) -> Effects { + self.effects.clone() + } + + /// Returns copy of cache. + pub fn cache(&self) -> TrackingCopyCache { + self.cache.clone() + } + + /// Destructure cached entries. + pub fn destructure(self) -> (Vec<(Key, StoredValue)>, BTreeSet, Effects) { + let (writes, prunes) = self.cache.into_muts(); + let writes: Vec<(Key, StoredValue)> = writes.into_iter().map(|(k, v)| (k.0, v)).collect(); + + (writes, prunes, self.effects) + } + + /// Enable the addressable entity and migrate accounts/contracts to entities. + pub fn enable_addressable_entity(&self) -> bool { + self.enable_addressable_entity + } + + /// Get record by key. + pub fn get(&mut self, key: &Key) -> Result, TrackingCopyError> { + if let Some(value) = self.cache.get(key) { + return Ok(Some(value.to_owned())); + } + match self.reader.read(key) { + Ok(ret) => { + if let Some(value) = ret { + self.cache.insert_read(*key, value.to_owned()); + Ok(Some(value)) + } else { + Ok(None) + } + } + Err(err) => Err(TrackingCopyError::Storage(err)), + } + } + + /// Gets the set of keys in the state whose tag is `key_tag`. + pub fn get_keys(&self, key_tag: &KeyTag) -> Result, TrackingCopyError> { + self.get_by_byte_prefix(&[*key_tag as u8]) + } + + /// Get keys by prefix. + pub fn get_keys_by_prefix( + &self, + key_prefix: &KeyPrefix, + ) -> Result, TrackingCopyError> { + let byte_prefix = key_prefix + .to_bytes() + .map_err(TrackingCopyError::BytesRepr)?; + self.get_by_byte_prefix(&byte_prefix) + } + + /// Gets the set of keys in the state by a byte prefix. + pub(crate) fn get_by_byte_prefix( + &self, + byte_prefix: &[u8], + ) -> Result, TrackingCopyError> { + let ret = self.keys_with_prefix(byte_prefix)?.into_iter().collect(); + Ok(ret) + } + + /// Reads the value stored under `key`. + pub fn read(&mut self, key: &Key) -> Result, TrackingCopyError> { + let normalized_key = key.normalize(); + if let Some(value) = self.get(&normalized_key)? { + self.effects + .push(TransformV2::new(normalized_key, TransformKindV2::Identity)); + Ok(Some(value)) + } else { + Ok(None) + } + } + + /// Reads the first value stored under the keys in `keys`. + pub fn read_first(&mut self, keys: &[&Key]) -> Result, TrackingCopyError> { + for key in keys { + if let Some(value) = self.read(key)? { + return Ok(Some(value)); + } + } + Ok(None) + } + + /// Writes `value` under `key`. Note that the written value is only cached. + pub fn write(&mut self, key: Key, value: StoredValue) { + let normalized_key = key.normalize(); + self.cache.insert_write(normalized_key, value.clone()); + let transform = TransformV2::new(normalized_key, TransformKindV2::Write(value)); + self.effects.push(transform); + } + + /// Caches the emitted message and writes the message topic summary under the specified key. + /// + /// This function does not check the types for the key and the value so the caller should + /// correctly set the type. The `message_topic_key` should be of the `Key::MessageTopic` + /// variant and the `message_topic_summary` should be of the `StoredValue::Message` variant. + #[allow(clippy::too_many_arguments)] + pub fn emit_message( + &mut self, + message_topic_key: Key, + message_topic_summary: StoredValue, + message_key: Key, + message_value: StoredValue, + block_message_count_value: StoredValue, + message: Message, + ) { + self.write(message_key, message_value); + self.write(message_topic_key, message_topic_summary); + self.write( + Key::BlockGlobal(BlockGlobalAddr::MessageCount), + block_message_count_value, + ); + self.messages.push(message); + } + + /// Prunes a `key`. + pub fn prune(&mut self, key: Key) { + let normalized_key = key.normalize(); + self.cache.insert_prune(normalized_key); + self.effects.push(TransformV2::new( + normalized_key, + TransformKindV2::Prune(key), + )); + } + + /// Ok(None) represents missing key to which we want to "add" some value. + /// Ok(Some(unit)) represents successful operation. + /// Err(error) is reserved for unexpected errors when accessing global + /// state. + pub fn add(&mut self, key: Key, value: StoredValue) -> Result { + let normalized_key = key.normalize(); + let current_value = match self.get(&normalized_key)? { + None => return Ok(AddResult::KeyNotFound(normalized_key)), + Some(current_value) => current_value, + }; + + let type_name = value.type_name(); + let mismatch = || { + Ok(AddResult::TypeMismatch(StoredValueTypeMismatch::new( + "I32, U64, U128, U256, U512 or (String, Key) tuple".to_string(), + type_name, + ))) + }; + + let transform_kind = match value { + StoredValue::CLValue(cl_value) => match *cl_value.cl_type() { + CLType::I32 => match cl_value.into_t() { + Ok(value) => TransformKindV2::AddInt32(value), + Err(error) => return Ok(AddResult::from(error)), + }, + CLType::U64 => match cl_value.into_t() { + Ok(value) => TransformKindV2::AddUInt64(value), + Err(error) => return Ok(AddResult::from(error)), + }, + CLType::U128 => match cl_value.into_t() { + Ok(value) => TransformKindV2::AddUInt128(value), + Err(error) => return Ok(AddResult::from(error)), + }, + CLType::U256 => match cl_value.into_t() { + Ok(value) => TransformKindV2::AddUInt256(value), + Err(error) => return Ok(AddResult::from(error)), + }, + CLType::U512 => match cl_value.into_t() { + Ok(value) => TransformKindV2::AddUInt512(value), + Err(error) => return Ok(AddResult::from(error)), + }, + _ => { + if *cl_value.cl_type() == casper_types::named_key_type() { + match cl_value.into_t() { + Ok((name, key)) => { + let mut named_keys = NamedKeys::new(); + named_keys.insert(name, key); + TransformKindV2::AddKeys(named_keys) + } + Err(error) => return Ok(AddResult::from(error)), + } + } else { + return mismatch(); + } + } + }, + _ => return mismatch(), + }; + + match transform_kind.clone().apply(current_value) { + Ok(TransformInstruction::Store(new_value)) => { + self.cache.insert_write(normalized_key, new_value); + self.effects + .push(TransformV2::new(normalized_key, transform_kind)); + Ok(AddResult::Success) + } + Ok(TransformInstruction::Prune(key)) => { + self.cache.insert_prune(normalized_key); + self.effects.push(TransformV2::new( + normalized_key, + TransformKindV2::Prune(key), + )); + Ok(AddResult::Success) + } + Err(TransformError::TypeMismatch(type_mismatch)) => { + Ok(AddResult::TypeMismatch(type_mismatch)) + } + Err(TransformError::Serialization(error)) => Ok(AddResult::Serialization(error)), + Err(transform_error) => Ok(AddResult::Transform(transform_error)), + } + } + + /// Returns a copy of the messages cached by this instance. + pub fn messages(&self) -> Messages { + self.messages.clone() + } + + /// Calling `query()` avoids calling into `self.cache`, so this will not return any values + /// written or mutated in this `TrackingCopy` via previous calls to `write()` or `add()`, since + /// these updates are only held in `self.cache`. + /// + /// The intent is that `query()` is only used to satisfy `QueryRequest`s made to the server. + /// Other EE internal use cases should call `read()` or `get()` in order to retrieve cached + /// values. + pub fn query( + &self, + base_key: Key, + path: &[String], + ) -> Result { + let mut query = Query::new(base_key, path); + + let mut proofs = Vec::new(); + + loop { + if query.depth >= self.max_query_depth { + return Ok(query.into_depth_limit_result()); + } + + if !query.visited_keys.insert(query.current_key) { + return Ok(query.into_circular_ref_result()); + } + + let stored_value = match self.reader.read_with_proof(&query.current_key)? { + None => { + return Ok(query.into_not_found_result("Failed to find base key")); + } + Some(stored_value) => stored_value, + }; + + let value = stored_value.value().to_owned(); + + // Following code does a patching on the `StoredValue` to get an inner + // `DictionaryValue` for dictionaries only. + let value = match handle_stored_dictionary_value(query.current_key, value) { + Ok(patched_stored_value) => patched_stored_value, + Err(error) => { + return Ok(query.into_not_found_result(&format!( + "Failed to retrieve dictionary value: {}", + error + ))); + } + }; + + proofs.push(stored_value); + + if query.unvisited_names.is_empty() && !query.current_key.is_named_key() { + return Ok(TrackingCopyQueryResult::Success { value, proofs }); + } + + let stored_value: &StoredValue = proofs + .last() + .map(|r| r.value()) + .expect("but we just pushed"); + + match stored_value { + StoredValue::Account(account) => { + let mut maybe_msg_prefix: Option = None; + if let Some(name) = query.next_name() { + if let Some(key) = account.named_keys().get(name) { + query.navigate(*key); + } else { + maybe_msg_prefix = Some(format!("Name {} not found in Account", name)); + } + } else { + maybe_msg_prefix = Some("All names visited".to_string()); + } + if let Some(msg_prefix) = maybe_msg_prefix { + return Ok(query.into_not_found_result(&msg_prefix)); + } + } + StoredValue::Contract(contract) => { + let mut maybe_msg_prefix: Option = None; + if let Some(name) = query.next_name() { + if let Some(key) = contract.named_keys().get(name) { + query.navigate(*key); + } else { + maybe_msg_prefix = Some(format!("Name {} not found in Contract", name)); + } + } else { + maybe_msg_prefix = Some("All names visited".to_string()); + } + if let Some(msg_prefix) = maybe_msg_prefix { + return Ok(query.into_not_found_result(&msg_prefix)); + } + } + StoredValue::AddressableEntity(_) => { + let current_key = query.current_key; + let mut maybe_msg_prefix: Option = None; + if let Some(name) = query.next_name() { + if let Key::AddressableEntity(addr) = current_key { + let named_key_addr = + match NamedKeyAddr::new_from_string(addr, name.clone()) { + Ok(named_key_addr) => Key::NamedKey(named_key_addr), + Err(error) => { + let msg_prefix = format!("{}", error); + return Ok(query.into_not_found_result(&msg_prefix)); + } + }; + query.navigate_for_named_key(named_key_addr); + } else { + maybe_msg_prefix = Some("Invalid base key".to_string()); + } + } else { + maybe_msg_prefix = Some("All names visited".to_string()); + } + if let Some(msg_prefix) = maybe_msg_prefix { + return Ok(query.into_not_found_result(&msg_prefix)); + } + } + StoredValue::NamedKey(named_key_value) => { + match query.visited_names.last() { + Some(expected_name) => match named_key_value.get_name() { + Ok(actual_name) => { + if &actual_name != expected_name { + return Ok(query.into_not_found_result( + "Queried and retrieved names do not match", + )); + } else if let Ok(key) = named_key_value.get_key() { + query.navigate(key) + } else { + return Ok(query + .into_not_found_result("Failed to parse CLValue as Key")); + } + } + Err(_) => { + return Ok(query + .into_not_found_result("Failed to parse CLValue as String")); + } + }, + None if path.is_empty() => { + return Ok(TrackingCopyQueryResult::Success { value, proofs }); + } + None => return Ok(query.into_not_found_result("No visited names")), + } + } + StoredValue::CLValue(cl_value) if cl_value.cl_type() == &CLType::Key => { + if let Ok(key) = cl_value.to_owned().into_t::() { + query.navigate(key); + } else { + return Ok(query.into_not_found_result("Failed to parse CLValue as Key")); + } + } + StoredValue::CLValue(cl_value) => { + let msg_prefix = format!( + "Query cannot continue as {:?} is not an account, contract nor key to \ + such. Value found", + cl_value + ); + return Ok(query.into_not_found_result(&msg_prefix)); + } + StoredValue::ContractWasm(_) => { + return Ok(query.into_not_found_result("ContractWasm value found.")); + } + StoredValue::ContractPackage(_) => { + return Ok(query.into_not_found_result("ContractPackage value found.")); + } + StoredValue::SmartContract(_) => { + return Ok(query.into_not_found_result("Package value found.")); + } + StoredValue::ByteCode(_) => { + return Ok(query.into_not_found_result("ByteCode value found.")); + } + StoredValue::Transfer(_) => { + return Ok(query.into_not_found_result("Legacy Transfer value found.")); + } + StoredValue::DeployInfo(_) => { + return Ok(query.into_not_found_result("DeployInfo value found.")); + } + StoredValue::EraInfo(_) => { + return Ok(query.into_not_found_result("EraInfo value found.")); + } + StoredValue::Bid(_) => { + return Ok(query.into_not_found_result("Bid value found.")); + } + StoredValue::BidKind(_) => { + return Ok(query.into_not_found_result("BidKind value found.")); + } + StoredValue::Withdraw(_) => { + return Ok(query.into_not_found_result("WithdrawPurses value found.")); + } + StoredValue::Unbonding(_) => { + return Ok(query.into_not_found_result("Unbonding value found.")); + } + StoredValue::MessageTopic(_) => { + return Ok(query.into_not_found_result("MessageTopic value found.")); + } + StoredValue::Message(_) => { + return Ok(query.into_not_found_result("Message value found.")); + } + StoredValue::EntryPoint(_) => { + return Ok(query.into_not_found_result("EntryPoint value found.")); + } + StoredValue::Prepayment(_) => { + return Ok(query.into_not_found_result("Prepayment value found.")) + } + StoredValue::RawBytes(_) => { + return Ok(query.into_not_found_result("RawBytes value found.")); + } + } + } + } +} + +/// The purpose of this implementation is to allow a "snapshot" mechanism for +/// TrackingCopy. The state of a TrackingCopy (including the effects of +/// any transforms it has accumulated) can be read using an immutable +/// reference to that TrackingCopy via this trait implementation. See +/// `TrackingCopy::fork` for more information. +impl> StateReader for &TrackingCopy { + type Error = R::Error; + + fn read(&self, key: &Key) -> Result, Self::Error> { + let kb = KeyWithByteRepr::new(*key); + if let Some(value) = self.cache.muts_cached.get(&kb) { + return Ok(Some(value.to_owned())); + } + if let Some(value) = self.reader.read(key)? { + Ok(Some(value)) + } else { + Ok(None) + } + } + + fn read_with_proof( + &self, + key: &Key, + ) -> Result>, Self::Error> { + self.reader.read_with_proof(key) + } + + fn keys_with_prefix(&self, byte_prefix: &[u8]) -> Result, Self::Error> { + let keys = self.reader.keys_with_prefix(byte_prefix)?; + + let ret = keys + .into_iter() + // don't include keys marked for pruning + .filter(|key| !self.cache.is_pruned(key)) + // there may be newly inserted keys which have not been committed yet + .chain(self.cache.get_muts_cached_by_byte_prefix(byte_prefix)) + .collect(); + Ok(ret) + } +} + +/// Error conditions of a proof validation. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum ValidationError { + /// The path should not have a different length than the proof less one. + #[error("The path should not have a different length than the proof less one.")] + PathLengthDifferentThanProofLessOne, + + /// The provided key does not match the key in the proof. + #[error("The provided key does not match the key in the proof.")] + UnexpectedKey, + + /// The provided value does not match the value in the proof. + #[error("The provided value does not match the value in the proof.")] + UnexpectedValue, + + /// The proof hash is invalid. + #[error("The proof hash is invalid.")] + InvalidProofHash, + + /// The path went cold. + #[error("The path went cold.")] + PathCold, + + /// (De)serialization error. + #[error("Serialization error: {0}")] + BytesRepr(bytesrepr::Error), + + /// Key is not a URef. + #[error("Key is not a URef")] + KeyIsNotAURef(Key), + + /// Error converting a stored value to a [`Key`]. + #[error("Failed to convert stored value to key")] + ValueToCLValueConversion, + + /// CLValue conversion error. + #[error("{0}")] + CLValueError(CLValueError), +} + +impl From for ValidationError { + fn from(err: CLValueError) -> Self { + ValidationError::CLValueError(err) + } +} + +impl From for ValidationError { + fn from(error: bytesrepr::Error) -> Self { + Self::BytesRepr(error) + } +} + +/// Validates proof of the query. +/// +/// Returns [`ValidationError`] for any of +pub fn validate_query_proof( + hash: &Digest, + proofs: &[TrieMerkleProof], + expected_first_key: &Key, + path: &[String], + expected_value: &StoredValue, +) -> Result<(), ValidationError> { + if proofs.len() != path.len() + 1 { + return Err(ValidationError::PathLengthDifferentThanProofLessOne); + } + + let mut proofs_iter = proofs.iter(); + let first_proof = match proofs_iter.next() { + Some(proof) => proof, + None => { + return Err(ValidationError::PathLengthDifferentThanProofLessOne); + } + }; + + let mut path_components_iter = path.iter(); + + if first_proof.key() != &expected_first_key.normalize() { + return Err(ValidationError::UnexpectedKey); + } + + if hash != &compute_state_hash(first_proof)? { + return Err(ValidationError::InvalidProofHash); + } + + let mut proof_value = first_proof.value(); + + for proof in proofs_iter { + let named_keys = match proof_value { + StoredValue::Account(account) => account.named_keys(), + StoredValue::Contract(contract) => contract.named_keys(), + _ => return Err(ValidationError::PathCold), + }; + + let path_component = match path_components_iter.next() { + Some(path_component) => path_component, + None => return Err(ValidationError::PathCold), + }; + + let key = match named_keys.get(path_component) { + Some(key) => key, + None => return Err(ValidationError::PathCold), + }; + + if proof.key() != &key.normalize() { + return Err(ValidationError::UnexpectedKey); + } + + if hash != &compute_state_hash(proof)? { + return Err(ValidationError::InvalidProofHash); + } + + proof_value = proof.value(); + } + + if proof_value != expected_value { + return Err(ValidationError::UnexpectedValue); + } + + Ok(()) +} + +/// Validates proof of the query. +/// +/// Returns [`ValidationError`] for any of +pub fn validate_query_merkle_proof( + hash: &Digest, + proofs: &[TrieMerkleProof], + expected_key_trace: &[Key], + expected_value: &StoredValue, +) -> Result<(), ValidationError> { + let expected_len = expected_key_trace.len(); + if proofs.len() != expected_len { + return Err(ValidationError::PathLengthDifferentThanProofLessOne); + } + + let proof_keys: Vec = proofs.iter().map(|proof| *proof.key()).collect(); + + if !expected_key_trace.eq(&proof_keys) { + return Err(ValidationError::UnexpectedKey); + } + + if expected_value != proofs[expected_len - 1].value() { + return Err(ValidationError::UnexpectedValue); + } + + let mut proofs_iter = proofs.iter(); + + let first_proof = match proofs_iter.next() { + Some(proof) => proof, + None => return Err(ValidationError::PathLengthDifferentThanProofLessOne), + }; + + if hash != &compute_state_hash(first_proof)? { + return Err(ValidationError::InvalidProofHash); + } + + Ok(()) +} + +/// Validates a proof of a balance request. +pub fn validate_balance_proof( + hash: &Digest, + balance_proof: &TrieMerkleProof, + expected_purse_key: Key, + expected_motes: &U512, +) -> Result<(), ValidationError> { + let expected_balance_key = expected_purse_key + .into_uref() + .map(|uref| Key::Balance(uref.addr())) + .ok_or_else(|| ValidationError::KeyIsNotAURef(expected_purse_key.to_owned()))?; + + if balance_proof.key() != &expected_balance_key.normalize() { + return Err(ValidationError::UnexpectedKey); + } + + if hash != &compute_state_hash(balance_proof)? { + return Err(ValidationError::InvalidProofHash); + } + + let balance_proof_stored_value = balance_proof.value().to_owned(); + + let balance_proof_clvalue: CLValue = balance_proof_stored_value + .try_into() + .map_err(|_| ValidationError::ValueToCLValueConversion)?; + + let balance_motes: U512 = balance_proof_clvalue.into_t()?; + + if expected_motes != &balance_motes { + return Err(ValidationError::UnexpectedValue); + } + + Ok(()) +} + +use crate::global_state::{ + error::Error, + state::{ + lmdb::{make_temporary_global_state, LmdbGlobalStateView}, + StateProvider, + }, +}; +use tempfile::TempDir; + +/// Creates a temp global state with initial state and checks out a tracking copy on it. +pub fn new_temporary_tracking_copy( + initial_data: impl IntoIterator, + max_query_depth: Option, + enable_addressable_entity: bool, +) -> (TrackingCopy, TempDir) { + let (global_state, state_root_hash, tempdir) = make_temporary_global_state(initial_data); + + let reader = global_state + .checkout(state_root_hash) + .expect("Checkout should not throw errors.") + .expect("Root hash should exist."); + + let query_depth = max_query_depth.unwrap_or(DEFAULT_MAX_QUERY_DEPTH); + + ( + TrackingCopy::new(reader, query_depth, enable_addressable_entity), + tempdir, + ) +} diff --git a/storage/src/tracking_copy/tests.rs b/storage/src/tracking_copy/tests.rs new file mode 100644 index 0000000000..ffe245149c --- /dev/null +++ b/storage/src/tracking_copy/tests.rs @@ -0,0 +1,1199 @@ +use std::sync::{Arc, RwLock}; + +use assert_matches::assert_matches; + +use casper_types::{ + account::AccountHash, + addressable_entity::{ + ActionThresholds, AddressableEntityHash, AssociatedKeys, NamedKeyAddr, NamedKeyValue, + Weight, + }, + contract_messages::Messages, + contracts::{EntryPoints as ContractEntryPoints, NamedKeys}, + execution::{Effects, TransformKindV2, TransformV2}, + gens::*, + global_state::TrieMerkleProof, + handle_stored_dictionary_value, AccessRights, AddressableEntity, ByteCodeHash, CLValue, + CLValueDictionary, CLValueError, ContractRuntimeTag, EntityAddr, EntityKind, HashAddr, Key, + KeyTag, PackageHash, ProtocolVersion, StoredValue, URef, U256, U512, UREF_ADDR_LENGTH, +}; + +use super::{ + meter::count_meter::Count, GenericTrackingCopyCache, TrackingCopyError, TrackingCopyQueryResult, +}; +use crate::{ + global_state::state::{self, StateProvider, StateReader}, + tracking_copy::{self, TrackingCopy}, +}; + +use crate::global_state::{DEFAULT_ENABLE_ENTITY, DEFAULT_MAX_QUERY_DEPTH}; +use casper_types::contracts::ContractHash; +use proptest::proptest; + +struct CountingDb { + count: Arc>, + value: Option, +} + +impl CountingDb { + fn new(counter: Arc>) -> CountingDb { + CountingDb { + count: counter, + value: None, + } + } +} + +impl StateReader for CountingDb { + type Error = crate::global_state::error::Error; + fn read(&self, _key: &Key) -> Result, Self::Error> { + let count = *self.count.read().unwrap(); + let value = match self.value { + Some(ref v) => v.clone(), + None => StoredValue::CLValue(CLValue::from_t(count).unwrap()), + }; + *self.count.write().unwrap() = count + 1; + Ok(Some(value)) + } + + fn read_with_proof( + &self, + _key: &Key, + ) -> Result>, Self::Error> { + Ok(None) + } + + fn keys_with_prefix(&self, _prefix: &[u8]) -> Result, Self::Error> { + Ok(Vec::new()) + } +} + +fn effects(transform_keys_and_kinds: Vec<(Key, TransformKindV2)>) -> Effects { + let mut effects = Effects::new(); + for (key, kind) in transform_keys_and_kinds { + effects.push(TransformV2::new(key, kind)); + } + effects +} + +#[test] +fn tracking_copy_new() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(counter); + let tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + assert!(tc.effects.is_empty()); +} + +#[test] +fn tracking_copy_caching() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(Arc::clone(&counter)); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + let zero = StoredValue::CLValue(CLValue::from_t(0_i32).unwrap()); + // first read + let value = tc.read(&k).unwrap().unwrap(); + assert_eq!(value, zero); + + // second read; should use cache instead + // of going back to the DB + let value = tc.read(&k).unwrap().unwrap(); + let db_value = *counter.read().unwrap(); + assert_eq!(value, zero); + assert_eq!(db_value, 1); +} + +#[test] +fn tracking_copy_read() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(Arc::clone(&counter)); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + let zero = StoredValue::CLValue(CLValue::from_t(0_i32).unwrap()); + let value = tc.read(&k).unwrap().unwrap(); + // value read correctly + assert_eq!(value, zero); + // Reading does produce an identity transform. + assert_eq!(tc.effects, effects(vec![(k, TransformKindV2::Identity)])); +} + +#[test] +fn tracking_copy_write() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(Arc::clone(&counter)); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + let one = StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()); + let two = StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()); + + // writing should work + tc.write(k, one.clone()); + // write does not need to query the DB + let db_value = *counter.read().unwrap(); + assert_eq!(db_value, 0); + // Writing creates a write transform. + assert_eq!( + tc.effects, + effects(vec![(k, TransformKindV2::Write(one.clone()))]) + ); + + // writing again should update the values + tc.write(k, two.clone()); + let db_value = *counter.read().unwrap(); + assert_eq!(db_value, 0); + assert_eq!( + tc.effects, + effects(vec![ + (k, TransformKindV2::Write(one)), + (k, TransformKindV2::Write(two)), + ]) + ); +} + +#[test] +fn tracking_copy_add_i32() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(counter); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + let three = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); + + // adding should work + let add = tc.add(k, three.clone()); + assert_matches!(add, Ok(_)); + + // Adding creates an add transform. + assert_eq!(tc.effects, effects(vec![(k, TransformKindV2::AddInt32(3))])); + + // adding again should update the values + let add = tc.add(k, three); + assert_matches!(add, Ok(_)); + assert_eq!( + tc.effects, + effects(vec![(k, TransformKindV2::AddInt32(3)); 2]) + ); +} + +#[test] +fn tracking_copy_rw() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(counter); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + // reading then writing should update the op + let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); + let _ = tc.read(&k); + tc.write(k, value.clone()); + assert_eq!( + tc.effects, + effects(vec![ + (k, TransformKindV2::Identity), + (k, TransformKindV2::Write(value)), + ]) + ); +} + +#[test] +fn tracking_copy_ra() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(counter); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + // reading then adding should update the op + let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); + let _ = tc.read(&k); + let _ = tc.add(k, value); + assert_eq!( + tc.effects, + effects(vec![ + (k, TransformKindV2::Identity), + (k, TransformKindV2::AddInt32(3)), + ]) + ); +} + +#[test] +fn tracking_copy_aw() { + let counter = Arc::new(RwLock::new(0)); + let db = CountingDb::new(counter); + let mut tc = TrackingCopy::new(db, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let k = Key::Hash([0u8; 32]); + + // adding then writing should update the op + let value = StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()); + let write_value = StoredValue::CLValue(CLValue::from_t(7_i32).unwrap()); + let _ = tc.add(k, value); + tc.write(k, write_value.clone()); + assert_eq!( + tc.effects, + effects(vec![ + (k, TransformKindV2::AddInt32(3)), + (k, TransformKindV2::Write(write_value)), + ]) + ); +} + +#[test] +fn should_return_value_not_found() { + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([]); + let view = gs.checkout(root_hash).unwrap().unwrap(); + + let missing_key = Key::Dictionary([2u8; 32]); + let empty_path = Vec::new(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let result = tc.query(missing_key, &empty_path); + assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_))); +} + +#[test] +fn should_find_existing_entry() { + let foo_key = Key::URef(URef::default()); + let foo_val = CLValue::from_t("test").expect("should get cl_value from string"); + let stored_val = StoredValue::CLValue(foo_val); + + // seed gs w/ entry as a testing convenience + let (gs, root_hash, _tempdir) = + state::lmdb::make_temporary_global_state([(foo_key, stored_val.clone())]); + + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let empty_path = Vec::new(); + let query_result = tc.query(foo_key, &empty_path); + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = query_result { + assert_eq!(stored_val, value); + } else { + panic!("Query failed when it should not have!"); + } +} + +#[test] +fn should_query_empty_path() { + let dictionary_key = Key::Dictionary([1u8; 32]); + let cl_value = CLValue::from_t("test").expect("should get cl_value from string"); + let seed_uref = URef::default(); + let dictionary_item_key_bytes = "dict_name".as_bytes(); + let dictionary_value = CLValueDictionary::new( + cl_value, + seed_uref.addr().to_vec(), + dictionary_item_key_bytes.to_vec(), + ); + let stored_value = StoredValue::CLValue( + CLValue::from_t(dictionary_value).expect("should get cl_value from dictionary_value"), + ); + + // seed gs w/ entry as a testing convenience + let (gs, root_hash, _tempdir) = + state::lmdb::make_temporary_global_state([(dictionary_key, stored_value.clone())]); + + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let empty_path = Vec::new(); + let query_result = tc.query(dictionary_key, &empty_path); + let dictionary_stored_value = handle_stored_dictionary_value(dictionary_key, stored_value) + .expect("should get dictionary stored value"); + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = query_result { + assert_eq!(dictionary_stored_value, value); + } else { + panic!("Query failed when it should not have!"); + } +} + +#[test] +fn should_traverse_contract_pathing() { + let account_hash = AccountHash::new([0u8; 32]); + let account_key = Key::Account(account_hash); + let account = + casper_types::account::Account::create(account_hash, NamedKeys::default(), URef::default()); + let stored_account = StoredValue::Account(account); + + let account_alias = "account_alias".to_string(); + let contract_named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert(account_alias.clone(), account_key); + named_keys + }; + let contract = casper_types::contracts::Contract::new( + [2; 32].into(), + [3; 32].into(), + contract_named_keys, + ContractEntryPoints::new(), + ProtocolVersion::V1_0_0, + ); + let contract_hash = ContractHash::default(); + let contract_key = Key::Hash(contract_hash.value()); + let stored_contract = StoredValue::Contract(contract); + + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (account_key, stored_account.clone()), + (contract_key, stored_contract), + ]); + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let path = vec![account_alias]; + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(contract_key, &path) { + assert_eq!(value, stored_account, "should find expected account"); + } else { + panic!("Query failed when it should not have!"); + } +} + +#[test] +fn should_traverse_account_pathing() { + let contract = casper_types::contracts::Contract::new( + [2; 32].into(), + [3; 32].into(), + NamedKeys::default(), + ContractEntryPoints::new(), + ProtocolVersion::V1_0_0, + ); + let contract_hash = ContractHash::default(); + let contract_key = Key::Hash(contract_hash.value()); + let stored_contract = StoredValue::Contract(contract); + + let account_hash = AccountHash::new([0u8; 32]); + let account_key = Key::Account(account_hash); + let contract_alias = "contract_alias".to_string(); + let account_named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert(contract_alias.clone(), contract_key); + named_keys + }; + let account = + casper_types::account::Account::create(account_hash, account_named_keys, URef::default()); + let stored_account = StoredValue::Account(account); + + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (account_key, stored_account), + (contract_key, stored_contract.clone()), + ]); + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let path = vec![contract_alias]; + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(account_key, &path) { + assert_eq!(value, stored_contract, "should find expected contract"); + } else { + panic!("Query failed when it should not have!"); + } +} + +#[test] +fn should_traverse_all_paths() { + let contract_hash = ContractHash::default(); + let contract_key = Key::Hash(contract_hash.value()); + let contract_alias = "contract_alias".to_string(); + let account_hash = AccountHash::new([0u8; 32]); + let account_key = Key::Account(account_hash); + let account_alias = "account_alias".to_string(); + + let some_inner = "test"; + let (misc_uref_key, misc_stored_value) = { + ( + Key::URef(URef::new([4u8; UREF_ADDR_LENGTH], AccessRights::all())), + StoredValue::CLValue( + CLValue::from_t(some_inner).expect("should get cl_value from string"), + ), + ) + }; + let misc_alias = "some_alias".to_string(); + + let stored_contract = { + let contract_named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert(account_alias.clone(), account_key); + named_keys.insert(misc_alias.clone(), misc_uref_key); + named_keys + }; + let contract = casper_types::contracts::Contract::new( + [2; 32].into(), + [3; 32].into(), + contract_named_keys, + ContractEntryPoints::new(), + ProtocolVersion::V1_0_0, + ); + StoredValue::Contract(contract) + }; + + let stored_account = { + let account_named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert(contract_alias.clone(), contract_key); + named_keys.insert(misc_alias.clone(), misc_uref_key); + named_keys + }; + let account = casper_types::account::Account::create( + account_hash, + account_named_keys, + URef::default(), + ); + StoredValue::Account(account) + }; + + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (account_key, stored_account.clone()), + (contract_key, stored_contract.clone()), + (misc_uref_key, misc_stored_value.clone()), + ]); + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + fn unpack( + result: Result, + err_msg: String, + ) -> StoredValue { + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = result { + value + } else { + panic!("{}", err_msg); + } + } + + let expected_contract = unpack( + tc.query(account_key, &[contract_alias.clone()]), + "contract should exist".to_string(), + ); + assert_eq!( + expected_contract, stored_contract, + "unexpected stored value" + ); + + // from account, traverse to contract then to misc val + let expected_account_contract_misc = unpack( + tc.query( + account_key, + &[contract_alias, misc_alias.clone()], // <-- path magic here + ), + "misc value should exist via account to contract".to_string(), + ); + assert_eq!( + expected_account_contract_misc, misc_stored_value, + "unexpected stored value" + ); + + let expected_account = unpack( + tc.query(contract_key, &[account_alias.clone()]), + "account should exist".to_string(), + ); + assert_eq!(expected_account, stored_account, "unexpected stored value"); + + // from contract, traverse to account then to misc val + let expected_contract_account_misc = unpack( + tc.query( + contract_key, + &[account_alias, misc_alias.clone()], // <-- path magic here + ), + "misc value should exist via contract to account".to_string(), + ); + assert_eq!( + expected_contract_account_misc, misc_stored_value, + "unexpected stored value" + ); + + let expected_value = unpack( + tc.query(misc_uref_key, &[]), + "misc value should exist".to_string(), + ); + assert_eq!(expected_value, misc_stored_value, "unexpected stored value"); + + let expected_account_misc = unpack( + tc.query(account_key, &[misc_alias.clone()]), + "misc value should exist via account".to_string(), + ); + assert_eq!( + expected_account_misc, misc_stored_value, + "unexpected stored value" + ); + + let expected_contract_misc = unpack( + tc.query(contract_key, &[misc_alias]), + "misc value should exist via contract".to_string(), + ); + assert_eq!( + expected_contract_misc, misc_stored_value, + "unexpected stored value" + ); +} + +fn handle_stored_value_into( + key: Key, + stored_value: StoredValue, +) -> Result { + match (key, stored_value) { + (Key::Dictionary(_), StoredValue::CLValue(cl_value)) => { + let wrapped_dictionary_value = + CLValueDictionary::new(cl_value, vec![0; 32], vec![255; 32]); + let wrapped_cl_value = CLValue::from_t(wrapped_dictionary_value)?; + Ok(StoredValue::CLValue(wrapped_cl_value)) + } + (_, stored_value) => Ok(stored_value), + } +} + +proptest! { + #[test] + fn query_contract_state( + k in key_arb(), // key state is stored at + v in stored_value_arb(), // value in contract state + name in "\\PC*", // human-readable name for state + missing_name in "\\PC*", + hash in u8_slice_32(), // hash for contract key + ) { + let mut named_keys = NamedKeys::new(); + named_keys.insert(name.clone(), k); + let contract = + StoredValue::AddressableEntity(AddressableEntity::new( + [2; 32].into(), + [3; 32].into(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) + )); + let contract_key = Key::AddressableEntity(EntityAddr::SmartContract(hash)); + + let value = handle_stored_value_into(k, v.clone()).unwrap(); + + let named_key = Key::NamedKey( NamedKeyAddr::new_from_string(EntityAddr::SmartContract(hash), name.clone()).unwrap()); + let named_value = StoredValue::NamedKey(NamedKeyValue::from_concrete_values(k, name.clone()).unwrap()); + + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state( + [(k, value), (named_key, named_value) ,(contract_key, contract)] + ); + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let path = vec!(name.clone()); + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query( contract_key, &path) { + assert_eq!(v, value); + } else { + panic!("Query failed when it should not have!"); + } + + if missing_name != name { + let result = tc.query(contract_key, &[missing_name]); + assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_))); + } + } + + #[test] + fn query_account_state( + k in key_arb(), // key state is stored at + v in stored_value_arb(), // value in account state + name in "\\PC*", // human-readable name for state + missing_name in "\\PC*", + pk in account_hash_arb(), // account hash + address in account_hash_arb(), // address for account hash + ) { + let purse = URef::new([0u8; 32], AccessRights::READ_ADD_WRITE); + let associated_keys = AssociatedKeys::new(pk, Weight::new(1)); + let entity = AddressableEntity::new( + PackageHash::new([1u8;32]), + ByteCodeHash::default(), + ProtocolVersion::V1_0_0, + purse, + associated_keys, + ActionThresholds::default(), + EntityKind::Account(address) + ); + + let account_key = Key::AddressableEntity(EntityAddr::Account([9;32])); + let value = handle_stored_value_into(k, v.clone()).unwrap(); + + let named_key = Key::NamedKey( NamedKeyAddr::new_from_string(EntityAddr::Account([9;32]), name.clone()).unwrap()); + let named_value = StoredValue::NamedKey(NamedKeyValue::from_concrete_values(k, name.clone()).unwrap()); + + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state( + [(k, value), (named_key, named_value),(account_key, entity.into())], + ); + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let path = vec!(name.clone()); + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = tc.query(account_key, &path) { + assert_eq!(v, value); + } else { + panic!("Query failed when it should not have!"); + } + + if missing_name != name { + let result = tc.query( account_key, &[missing_name]); + assert_matches!(result, Ok(TrackingCopyQueryResult::ValueNotFound(_))); + } + } + + #[test] + fn query_path( + k in key_arb(), // key state is stored at + v in stored_value_arb(), // value in contract state + state_name in "\\PC*", // human-readable name for state + _pk in account_hash_arb(), // account hash + hash in u8_slice_32(), // hash for contract key + ) { + // create contract which knows about value + let mut contract_named_keys = NamedKeys::new(); + contract_named_keys.insert(state_name.clone(), k); + let contract = + StoredValue::AddressableEntity(AddressableEntity::new( + [2; 32].into(), + [3; 32].into(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1) + )); + let contract_key = Key::AddressableEntity(EntityAddr::SmartContract(hash)); + let contract_named_key = NamedKeyAddr::new_from_string(EntityAddr::SmartContract(hash), state_name.clone()) + .unwrap(); + + let contract_value = NamedKeyValue::from_concrete_values(k, state_name.clone()).unwrap(); + + let value = handle_stored_value_into(k, v.clone()).unwrap(); + + let (gs, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (k, value), + (contract_key, contract), + (Key::NamedKey(contract_named_key), StoredValue::NamedKey(contract_value)) + ]); + let view = gs.checkout(root_hash).unwrap().unwrap(); + let tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + let path = vec!(state_name); + + let results = tc.query( contract_key, &path); + if let Ok(TrackingCopyQueryResult::Success { value, .. }) = results { + assert_eq!(v, value); + } else { + panic!("Query failed when it should not have!"); + } + } +} + +#[test] +fn cache_reads_invalidation() { + let mut tc_cache = GenericTrackingCopyCache::new(2, Count); + let (k1, v1) = ( + Key::Hash([1u8; 32]), + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ); + let (k2, v2) = ( + Key::Hash([2u8; 32]), + StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), + ); + let (k3, v3) = ( + Key::Hash([3u8; 32]), + StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), + ); + tc_cache.insert_read(k1, v1); + tc_cache.insert_read(k2, v2.clone()); + tc_cache.insert_read(k3, v3.clone()); + assert!(tc_cache.get(&k1).is_none()); // first entry should be invalidated + assert_eq!(tc_cache.get(&k2), Some(&v2)); // k2 and k3 should be there + assert_eq!(tc_cache.get(&k3), Some(&v3)); +} + +#[test] +fn cache_writes_not_invalidated() { + let mut tc_cache = GenericTrackingCopyCache::new(2, Count); + let (k1, v1) = ( + Key::Hash([1u8; 32]), + StoredValue::CLValue(CLValue::from_t(1_i32).unwrap()), + ); + let (k2, v2) = ( + Key::Hash([2u8; 32]), + StoredValue::CLValue(CLValue::from_t(2_i32).unwrap()), + ); + let (k3, v3) = ( + Key::Hash([3u8; 32]), + StoredValue::CLValue(CLValue::from_t(3_i32).unwrap()), + ); + tc_cache.insert_write(k1, v1.clone()); + tc_cache.insert_read(k2, v2.clone()); + tc_cache.insert_read(k3, v3.clone()); + // Writes are not subject to cache invalidation + assert_eq!(tc_cache.get(&k1), Some(&v1)); + assert_eq!(tc_cache.get(&k2), Some(&v2)); // k2 and k3 should be there + assert_eq!(tc_cache.get(&k3), Some(&v3)); +} + +#[test] +fn query_for_circular_references_should_fail() { + // create self-referential key + let cl_value_key = Key::URef(URef::new([255; 32], AccessRights::READ)); + let cl_value = StoredValue::CLValue(CLValue::from_t(cl_value_key).unwrap()); + let key_name = "key".to_string(); + + // create contract with this self-referential key in its named keys, and also a key referring to + // itself in its named keys. + let contract_key = Key::AddressableEntity(EntityAddr::SmartContract([1; 32])); + let contract_name = "contract".to_string(); + let mut named_keys = NamedKeys::new(); + named_keys.insert(key_name.clone(), cl_value_key); + named_keys.insert(contract_name.clone(), contract_key); + let contract = StoredValue::AddressableEntity(AddressableEntity::new( + [2; 32].into(), + [3; 32].into(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + )); + + let name_key_cl_value = Key::NamedKey( + NamedKeyAddr::new_from_string(EntityAddr::SmartContract([1; 32]), "key".to_string()) + .unwrap(), + ); + let key_value = StoredValue::NamedKey( + NamedKeyValue::from_concrete_values(cl_value_key, "key".to_string()).unwrap(), + ); + + let name_key_contract = Key::NamedKey( + NamedKeyAddr::new_from_string(EntityAddr::SmartContract([1; 32]), "contract".to_string()) + .unwrap(), + ); + let key_value_contract = StoredValue::NamedKey( + NamedKeyValue::from_concrete_values(contract_key, "contract".to_string()).unwrap(), + ); + + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (cl_value_key, cl_value), + (contract_key, contract), + (name_key_cl_value, key_value), + (name_key_contract, key_value_contract), + ]); + let view = global_state.checkout(root_hash).unwrap().unwrap(); + let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + // query for the self-referential key (second path element of arbitrary value required to cause + // iteration _into_ the self-referential key) + let path = vec![key_name, String::new()]; + if let Ok(TrackingCopyQueryResult::CircularReference(msg)) = + tracking_copy.query(contract_key, &path) + { + let expected_path_msg = format!("at path: {:?}/{}", contract_key, path[0]); + assert!(msg.contains(&expected_path_msg)); + } else { + panic!("Query didn't fail with a circular reference error"); + } + + // query for itself in its own named keys + let path = vec![contract_name]; + if let Ok(TrackingCopyQueryResult::CircularReference(msg)) = + tracking_copy.query(contract_key, &path) + { + let expected_path_msg = format!("at path: {:?}/{}", contract_key, path[0]); + assert!(msg.contains(&expected_path_msg)); + } else { + panic!("Query didn't fail with a circular reference error"); + } +} + +#[test] +fn validate_query_proof_should_work() { + let a_e_key = Key::AddressableEntity(EntityAddr::Account([30; 32])); + let a_e = StoredValue::AddressableEntity(AddressableEntity::new( + PackageHash::new([20; 32]), + ByteCodeHash::default(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::new(AccountHash::new([3; 32]), Weight::new(1)), + ActionThresholds::default(), + EntityKind::Account(AccountHash::new([3; 32])), + )); + + let c_e_key = Key::AddressableEntity(EntityAddr::SmartContract([5; 32])); + let c_e = StoredValue::AddressableEntity(AddressableEntity::new( + [2; 32].into(), + [3; 32].into(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + )); + + let c_nk = "abc".to_string(); + + let (nk, nkv) = { + let entity_addr = if let Key::AddressableEntity(addr) = a_e_key { + addr + } else { + panic!("unexpected key variant"); + }; + let named_key_addr = NamedKeyAddr::new_from_string(entity_addr, c_nk.clone()) + .expect("must create named key entry"); + ( + Key::NamedKey(named_key_addr), + StoredValue::NamedKey( + NamedKeyValue::from_concrete_values(c_e_key, c_nk.clone()).unwrap(), + ), + ) + }; + + let initial_data = vec![(a_e_key, a_e), (c_e_key, c_e.clone()), (nk, nkv)]; + + // persist them + let (global_state, root_hash, _tempdir) = + state::lmdb::make_temporary_global_state(initial_data); + + let view = global_state + .checkout(root_hash) + .expect("should checkout") + .expect("should have view"); + + let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + let path = &[c_nk]; + + let result = tracking_copy.query(a_e_key, path).expect("should query"); + + let proofs = if let TrackingCopyQueryResult::Success { proofs, .. } = result { + proofs + } else { + panic!("query was not successful: {:?}", result) + }; + + let expected_key_trace = &[a_e_key, nk, c_e_key]; + + // Happy path + tracking_copy::validate_query_merkle_proof(&root_hash, &proofs, expected_key_trace, &c_e) + .expect("should validate"); +} + +#[test] +fn get_keys_should_return_keys_in_the_account_keyspace() { + // account 1 + let account_1_hash = AccountHash::new([1; 32]); + + let account_cl_value = CLValue::from_t(AddressableEntityHash::new([20; 32])).unwrap(); + let account_1_value = StoredValue::CLValue(account_cl_value); + let account_1_key = Key::Account(account_1_hash); + + // account 2 + let account_2_hash = AccountHash::new([2; 32]); + + let fake_account_cl_value = CLValue::from_t(AddressableEntityHash::new([21; 32])).unwrap(); + let account_2_value = StoredValue::CLValue(fake_account_cl_value); + let account_2_key = Key::Account(account_2_hash); + + // random value + let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); + let uref_value = StoredValue::CLValue(cl_value); + let uref_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); + + // persist them + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (account_1_key, account_1_value), + (account_2_key, account_2_value), + (uref_key, uref_value), + ]); + + let view = global_state + .checkout(root_hash) + .expect("should checkout") + .expect("should have view"); + + let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + let key_set = tracking_copy.get_keys(&KeyTag::Account).unwrap(); + + assert_eq!(key_set.len(), 2); + assert!(key_set.contains(&account_1_key)); + assert!(key_set.contains(&account_2_key)); + assert!(!key_set.contains(&uref_key)); +} + +#[test] +fn get_keys_should_return_keys_in_the_uref_keyspace() { + // account + let account_hash = AccountHash::new([1; 32]); + + let account_cl_value = CLValue::from_t(AddressableEntityHash::new([20; 32])).unwrap(); + let account_value = StoredValue::CLValue(account_cl_value); + let account_key = Key::Account(account_hash); + + // random value 1 + let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); + let uref_1_value = StoredValue::CLValue(cl_value); + let uref_1_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); + + // random value 2 + let cl_value = CLValue::from_t(U512::one()).expect("should convert"); + let uref_2_value = StoredValue::CLValue(cl_value); + let uref_2_key = Key::URef(URef::new([9; 32], AccessRights::READ_ADD_WRITE)); + + // persist them + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([ + (account_key, account_value), + (uref_1_key, uref_1_value), + (uref_2_key, uref_2_value), + ]); + + let view = global_state + .checkout(root_hash) + .expect("should checkout") + .expect("should have view"); + + let mut tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap(); + + assert_eq!(key_set.len(), 2); + assert!(key_set.contains(&uref_1_key.normalize())); + assert!(key_set.contains(&uref_2_key.normalize())); + assert!(!key_set.contains(&account_key)); + + // random value 3 + let cl_value = CLValue::from_t(U512::from(2)).expect("should convert"); + let uref_3_value = StoredValue::CLValue(cl_value); + let uref_3_key = Key::URef(URef::new([10; 32], AccessRights::READ_ADD_WRITE)); + tracking_copy.write(uref_3_key, uref_3_value); + + let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap(); + + assert_eq!(key_set.len(), 3); + assert!(key_set.contains(&uref_1_key.normalize())); + assert!(key_set.contains(&uref_2_key.normalize())); + assert!(key_set.contains(&uref_3_key.normalize())); + assert!(!key_set.contains(&account_key)); +} + +#[test] +fn get_keys_should_handle_reads_from_empty_trie() { + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state([]); + + let view = global_state + .checkout(root_hash) + .expect("should checkout") + .expect("should have view"); + + let mut tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap(); + + assert_eq!(key_set.len(), 0); + assert!(key_set.is_empty()); + + // persist random value 1 + let cl_value = CLValue::from_t(U512::zero()).expect("should convert"); + let uref_1_value = StoredValue::CLValue(cl_value); + let uref_1_key = Key::URef(URef::new([8; 32], AccessRights::READ_ADD_WRITE)); + tracking_copy.write(uref_1_key, uref_1_value); + + let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap(); + + assert_eq!(key_set.len(), 1); + assert!(key_set.contains(&uref_1_key.normalize())); + + // persist random value 2 + let cl_value = CLValue::from_t(U512::one()).expect("should convert"); + let uref_2_value = StoredValue::CLValue(cl_value); + let uref_2_key = Key::URef(URef::new([9; 32], AccessRights::READ_ADD_WRITE)); + tracking_copy.write(uref_2_key, uref_2_value); + + let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap(); + + assert_eq!(key_set.len(), 2); + assert!(key_set.contains(&uref_1_key.normalize())); + assert!(key_set.contains(&uref_2_key.normalize())); + + // persist account + let account_hash = AccountHash::new([1; 32]); + + let account_value = CLValue::from_t(AddressableEntityHash::new([10; 32])).unwrap(); + let account_value = StoredValue::CLValue(account_value); + let account_key = Key::Account(account_hash); + tracking_copy.write(account_key, account_value); + + assert_eq!(key_set.len(), 2); + assert!(key_set.contains(&uref_1_key.normalize())); + assert!(key_set.contains(&uref_2_key.normalize())); + assert!(!key_set.contains(&account_key)); + + // persist random value 3 + let cl_value = CLValue::from_t(U512::from(2)).expect("should convert"); + let uref_3_value = StoredValue::CLValue(cl_value); + let uref_3_key = Key::URef(URef::new([10; 32], AccessRights::READ_ADD_WRITE)); + tracking_copy.write(uref_3_key, uref_3_value); + + let key_set = tracking_copy.get_keys(&KeyTag::URef).unwrap(); + + assert_eq!(key_set.len(), 3); + assert!(key_set.contains(&uref_1_key.normalize())); + assert!(key_set.contains(&uref_2_key.normalize())); + assert!(key_set.contains(&uref_3_key.normalize())); + assert!(!key_set.contains(&account_key)); +} + +fn val_to_hashaddr>(value: T) -> HashAddr { + let mut addr = HashAddr::default(); + value.into().to_big_endian(&mut addr); + addr +} + +#[test] +fn query_with_large_depth_with_fixed_path_should_fail() { + let mut pairs = Vec::new(); + let mut contract_keys = Vec::new(); + let mut path = Vec::new(); + + const WASM_OFFSET: u64 = 1_000_000; + const PACKAGE_OFFSET: u64 = 1_000; + + // create a long chain of contract at address X with a named key that points to a contract X+1 + // which has a size that exceeds configured max query depth. + for value in 1..=DEFAULT_MAX_QUERY_DEPTH { + let contract_addr = EntityAddr::SmartContract(val_to_hashaddr(value)); + let contract_key = Key::AddressableEntity(contract_addr); + let next_contract_key = + Key::AddressableEntity(EntityAddr::SmartContract(val_to_hashaddr(value + 1))); + let contract_name = format!("contract{}", value); + + let named_key = + NamedKeyAddr::new_from_string(contract_addr, contract_name.clone()).unwrap(); + + let named_key_value = + NamedKeyValue::from_concrete_values(next_contract_key, contract_name.clone()).unwrap(); + + pairs.push(( + Key::NamedKey(named_key), + StoredValue::NamedKey(named_key_value), + )); + + let contract = StoredValue::AddressableEntity(AddressableEntity::new( + val_to_hashaddr(PACKAGE_OFFSET + value).into(), + val_to_hashaddr(WASM_OFFSET + value).into(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + )); + pairs.push((contract_key, contract)); + contract_keys.push(contract_key); + path.push(contract_name.clone()); + } + + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs); + + let view = global_state.checkout(root_hash).unwrap().unwrap(); + let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + let contract_key = contract_keys[0]; + let result = tracking_copy.query(contract_key, &path); + + assert!( + matches!(result, Ok(TrackingCopyQueryResult::DepthLimit { + depth + }) if depth == DEFAULT_MAX_QUERY_DEPTH), + "{:?}", + result + ); +} + +#[test] +fn query_with_large_depth_with_urefs_should_fail() { + let mut pairs = Vec::new(); + let mut uref_keys = Vec::new(); + + const WASM_OFFSET: u64 = 1_000_000; + const PACKAGE_OFFSET: u64 = 1_000; + let root_key_name = "key".to_string(); + + // create a long chain of urefs at address X with a uref that points to a uref X+1 + // which has a size that exceeds configured max query depth. + for value in 1..=DEFAULT_MAX_QUERY_DEPTH { + let uref_addr = val_to_hashaddr(value); + let uref = Key::URef(URef::new(uref_addr, AccessRights::READ)); + + let next_uref_addr = val_to_hashaddr(value + 1); + let next_uref = Key::URef(URef::new(next_uref_addr, AccessRights::READ)); + let next_cl_value = StoredValue::CLValue(CLValue::from_t(next_uref).unwrap()); + + pairs.push((uref, next_cl_value)); + uref_keys.push(uref); + } + + let contract_addr = EntityAddr::SmartContract([0; 32]); + + let named_key = NamedKeyAddr::new_from_string(contract_addr, root_key_name.clone()).unwrap(); + + let named_key_value = + NamedKeyValue::from_concrete_values(uref_keys[0], root_key_name.clone()).unwrap(); + + pairs.push(( + Key::NamedKey(named_key), + StoredValue::NamedKey(named_key_value), + )); + + let contract = StoredValue::AddressableEntity(AddressableEntity::new( + val_to_hashaddr(PACKAGE_OFFSET).into(), + val_to_hashaddr(WASM_OFFSET).into(), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + )); + let contract_key = Key::AddressableEntity(contract_addr); + pairs.push((contract_key, contract)); + + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs); + + let view = global_state.checkout(root_hash).unwrap().unwrap(); + let tracking_copy = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + + // query for the beginning of a long chain of urefs + // (second path element of arbitrary value required to cause iteration _into_ the nested key) + let path = vec![root_key_name, String::new()]; + let result = tracking_copy.query(contract_key, &path); + + assert!( + matches!(result, Ok(TrackingCopyQueryResult::DepthLimit { + depth + }) if depth == DEFAULT_MAX_QUERY_DEPTH), + "{:?}", + result + ); +} + +#[test] +fn add_should_work() { + let mut pairs = Vec::new(); + let key = Key::URef(URef::default()); + let initial_value = CLValue::from_t(1_i32).unwrap(); + pairs.push((key, StoredValue::CLValue(initial_value))); + + let (global_state, root_hash, _tempdir) = state::lmdb::make_temporary_global_state(pairs); + + let (effects, cache) = { + let view = global_state.checkout(root_hash).unwrap().unwrap(); + let mut tracking_copy = + TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + assert!( + matches!(tracking_copy.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::().unwrap() == 1) + ); + tracking_copy + .add(key, StoredValue::CLValue(CLValue::from_t(1_i32).unwrap())) + .unwrap(); + assert!( + matches!(tracking_copy.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::().unwrap() == 2) + ); + (tracking_copy.effects(), tracking_copy.cache()) + }; + + let view = global_state.checkout(root_hash).unwrap().unwrap(); + let mut tc = TrackingCopy::new(view, DEFAULT_MAX_QUERY_DEPTH, DEFAULT_ENABLE_ENTITY); + assert!( + matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::().unwrap() == 1) + ); + tc.apply_changes(effects, cache, Messages::new()); + assert!( + matches!(tc.get(&key), Ok(Some(StoredValue::CLValue(initial_value))) if initial_value.clone().into_t::().unwrap() == 2) + ); +} diff --git a/types/CHANGELOG.md b/types/CHANGELOG.md new file mode 100644 index 0000000000..b0b469bb7a --- /dev/null +++ b/types/CHANGELOG.md @@ -0,0 +1,435 @@ +# Changelog + +All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog]. + +[comment]: <> (Added: new features) +[comment]: <> (Changed: changes in existing functionality) +[comment]: <> (Deprecated: soon-to-be removed features) +[comment]: <> (Removed: now removed features) +[comment]: <> (Fixed: any bug fixes) +[comment]: <> (Security: in case of vulnerabilities) + +## [UNRELEASED] casper-types 6.0.0 + +### Added +- TransactionInvocationTarget::ByPackageHash::protocol_version_major field +- TransactionInvocationTarget::ByPackageName::protocol_version_major field +- New variant PackageIdentifier::HashWithVersion +- New variant PackageIdentifier::NameWithVersion + +## casper-types 5.0.0 + +### Added + +- enum EntityKind +- enum addressable_entity::EntityKindTag +- enum EntityAddr +- struct addressable_entity::NamedKeyAddr +- struct addressable_entity::NamedKeyValue +- struct addressable_entity::MessageTopics +- enum addressable_entity::MessageTopicError +- struct AddressableEntity +- struct addressable_entity::ActionThresholds +- enum addressable_entity::ActionType +- struct addressable_entity::AssociatedKeys +- struct contract::EntryPoint +- enum EntryPointType +- enum EntryPointPayment +- struct EntryPoint +- enum EntryPointAddr +- enum EntryPointValue +- enum addressable_entity::FromAccountHashStrError +- enum addressable_entity::SetThresholdFailure +- struct addressable_entity::TryFromSliceForAccountHashError +- struct addressable_entity::NamedKeys +- struct BlockV1 +- struct BlockBodyV1 +- struct BlockV2 +- struct BlockHeaderV2 +- struct BlockBodyV2 +- struct ChainNameDigest +- enum EraEnd +- struct EraEndV1 +- struct EraEndV2 +- struct EraReport +- enum FinalitySignature +- struct FinalitySignatureV1 +- struct FinalitySignatureV2 +- struct FinalitySignatureId +- struct JsonBlockWithSignatures +- struct RewardedSignatures +- struct SingleBlockRewardedSignatures +- enum Rewards +- struct BlockWithSignatures +- enum BlockHeaderWithSignaturesValidationError +- struct BlockHeaderWithSignatures +- enum BlockValidationError (moved from casper-node) +- enum Block (don't confuse with previous `Block` struct, see `Changed` section for details) +- enum BlockHeader (don't confuse with previous `BlockHeader` struct, see `Changed` section for details) +- struct HoldsEpoch +- struct addressable_entity::TryFromSliceForContractHashError +- enum addressable_entity::FromStrError +- enum contract_messages::FromStrError +- enum ByteCodeAddr +- struct ByteCodeHash +- enum ByteCodeKind +- struct ByteCode +- struct Chainspec +- struct AccountsConfig +- struct AccountConfig +- struct DelegatorConfig +- struct GenesisValidator +- struct AdministratorAccount +- enum GenesisAccount +- struct ValidatorConfig +- enum ActivationPoint +- struct ChainspecRawBytes +- struct CoreConfig +- enum ConsensusProtocolName +- enum LegacyRequiredFinality +- enum FeeHandling +- struct GenesisConfig +- struct GlobalStateUpdateConfig +- struct GlobalStateUpdate +- enum GlobalStateUpdateError +- struct HighwayConfig +- enum HoldBalanceHandling +- struct NetworkConfig +- struct NextUpgrade +- enum PricingHandling +- struct ProtocolConfig +- enum RefundHandling +- struct TransactionConfig +- struct TransactionLimitsDefinition +- struct TransactionV1Config +- struct ProtocolUpgradeConfig +- struct VacancyConfig +- struct AuctionCosts +- struct ChainspecRegistry +- struct HandlePaymentCosts +- struct HostFunctionCosts +- struct MessageLimits +- struct MintCosts +- struct BrTableCost +- struct ControlFlowCosts +- struct OpcodeCosts +- struct StandardPaymentCosts +- struct StorageCosts +- struct SystemConfig +- struct WasmConfig +- struct WasmV1Config +- struct ChecksumRegistry +- struct SystemEntityRegistry +- struct contract_messages::MessageAddr +- type contract_messages::Messages +- struct contract_messages::MessageChecksum +- enum contract_messages::MessagePayload +- struct contract_messages::Message +- struct contract_messages::TopicNameHash +- struct contract_messages::MessageTopicSummary +- struct Contract +- struct EntryPoints +- struct Digest +- struct DigestError +- struct ChunkWithProof +- enum MerkleConstructionError +- enum MerkleVerificationError +- struct IndexedMerkleProof +- struct DisplayIter +- struct execution::Effects; +- enum execution::ExecutionResult (not to be confused with previous `ExecutionResult`, see `Changed` secion for details) +- struct execution::ExecutionResultV2 +- struct execution::TransformV2 +- struct execution::TransformError +- struct execution::TransformInstruction +- struct execution::TransformKindV2 +- struct execution::PaymentInfo +- enum global_state::TrieMerkleProofStep +- enum global_state::TrieMerkleProof +- struct Pointer +- trait GasLimited +- enum AddressableEntityIdentifier +- struct Approval +- struct ApprovalsHash +- enum InvalidDeploy +- enum DeployDecodeFromJsonError +- struct ExecutableDeployItem, +- enum ExecutableDeployItemIdentifier +- struct ExecutionInfo +- enum InitiatorAddr, +- enum InvalidTransaction, +- enum InvalidTransactionV1 +- enum PackageIdentifier +- enum PricingMode +- enum PricingModeError +- enum Transaction +- enum TransactionEntryPoint, +- enum TransactionHash +- struct TransactionId +- enum TransactionInvocationTarget +- enum TransactionRuntime +- enum TransactionScheduling +- enum TransactionTarget +- struct TransactionV1, +- struct TransactionV1Payload, +- struct TransactionV1Hash +- enum TransactionV1DecodeFromJsonError +- enum TransactionV1Error +- struct TransactionV1ExcessiveSizeError +- enum TransferTarget +- struct TransferV2 +- enum ValidatorChange +- type contracts::ProtocolVersionMajor +- type EntityVersion +- struct EntityVersionKey +- struct EntityVersions +- struct PackageHash +- enum PackageStatus +- struct Package +- struct PeerEntry +- struct Peers +- enum system::auction::BidAddr +- enum system::auction::BidAddrTag +- enum system::auction::BidKind +- enum system::auction::BidKindTag +- enum system::auction::Bridge +- enum system::auction::Reservation +- enum system::auction::ValidatorBid; +- enum system::auction::ValidatorBids +- enum system::auction::DelegatorBids +- enum system::auction::ValidatorCredits +- enum system::auction::Staking +- trait system::auction::BidsExt +- enum system::auction::Error has new variants: ForgedReference, MissingPurse, ValidatorBidExistsAlready,BridgeRecordChainTooLong,UnexpectedBidVariant, DelegationAmountTooLarge +- enum system::CallerTag +- enum system::Caller +- enum system::handle_payment::Error +- enum system::handle_payment::Error has new variants IncompatiblePaymentSettings, UnexpectedKeyVariant +- enum system::mint::BalanceHoldAddrTag +- enum system::mint::Error has new variant: ForgedReference +- enum system::reservation::ReservationKind +- method CLValue::to_t +- function handle_stored_dictionary_value +- methods in ContractWasm: `new` and `take_bytes` +- method `lock_status` in struct ContractPackage +- function bytesrepr::allocate_buffer_for_size(expected_size: usize) -> Result, Error> +- Enum EntryPointAccess has new variant `Template` added + +### Changed + +- pub enum ApiError has new variants: MessageTopicAlreadyRegistered, MaxTopicsNumberExceeded, MaxTopicNameSizeExceeded, MessageTopicNotRegistered, MessageTopicFull, MessageTooLarge, MaxMessagesPerBlockExceeded,NotAllowedToAddContractVersion,InvalidDelegationAmountLimits,InvalidCallerInfoRequest +- struct AuctionState#bids is now a BTreeMap instead of Vec. This field is still serialized as an array. Due to this change the elements of the array will have more fields than before (added `validator_public_key`, `vesting_schedule`). +- Variants of enum EntryPointType changed +- Struct Parameter moved from contracts to addressable_entity::entry_points +- struct EraId has new methods `iter_range_inclusive`, `increment` +- struct ExecutionEffect moved to module execution::execution_result_v1 +- enum OpKind moved to module execution::execution_result_v1 +- struct Operation moved to module execution::execution_result_v1 +- enum Transform changed name to TransformKindV1, moved to module execution::execution_result_v1 and has new variants (WriteAddressableEntity, Prune, WriteBidKind) +- enum ExecutionResult changed name to ExecutionResultV1, moved to module execution::execution_result_v1 +- struct TransformEntry changed name to TransformV1 and moved to module execution::execution_result_v1 +- moved NamedKey to module execution::execution_result_v1 +- KeyTag::SystemContractRegistry variant changed name to KeyTag::SystemEntityRegistry +- variants for KeyTag enum: BidAddr = 15, Package = 16, AddressableEntity = 17, ByteCode = 18, Message = 19, NamedKey = 20, BlockGlobal = 21, BalanceHold = 22, EntryPoint = 23, +- enum Key::SystemContractRegistry changed name to Key::SystemEntityRegistry +- variants for enum Key: BidAddr, Package, AddressableEntity, ByteCode, Message, NamedKey, BlockGlobal, BalanceHold, EntryPoint, +- struct ExcessiveSizeError changed name to DeployExcessiveSizeError +- struct Transfer changed name to TransferV1 +- enum GlobalStateIdentifier +- enum StoredValue has new variants: Transfer, AddressableEntity, BidKind, Package, ByteCode, MessageTopic, Message, NamedKey,Reservation,EntryPoint, +- enum system::SystemContractType changed name to system::SystemEntityType +- enum system::handle_payment::Error variant SystemFunctionCalledByUserAccount changed to InvalidCaller +- struct EntryPoint has a new field `entry_point_payment` +- struct BlockHeader was renamed to BlockHeaderV1 and used as a variant in enum BlockHeader +- struct Block was renamed to BlockV1 and used as a variant in enum Block +- Gas::from_motes now takes `u8` instead of `u64` as second parameter + +### Removed + +- type Groups (there is now a struct with that name) +- type EntryPointsMap +- type NamedKeys +- methods `groups_mut`, `add_group`, `lookup_contract_hash`, `is_version_enabled`, `is_contract_enabled`, `insert_contract_version`, `disable_contract_version`, `enable_contract_version`, `enabled_versions`, `remove_group`, `next_contract_version_for`, `current_contract_version`, `current_contract_hash` in struct ContractPackage + +## [Unreleased] (node 1.5.4) + +### Changed + +- Remove filesystem I/O functionality from the `std` feature, and gated this behind a new feature `std-fs-io` which depends upon `std`. + +## 4.0.1 + +### Added + +- Add a new `SyncHandling` enum, which allows a node to opt out of historical sync. + +### Changed + +- Update `k256` to version 0.13.1. + +### Removed + +- Remove `ExecutionResult::successful_transfers`. + +### Security + +- Update `ed25519-dalek` to version 2.0.0 as mitigation for [RUSTSEC-2022-0093](https://rustsec.org/advisories/RUSTSEC-2022-0093) + +## 3.0.0 + +### Added + +- Add new `bytesrepr::Error::NotRepresentable` error variant that represents values that are not representable by the serialization format. +- Add new `Key::Unbond` key variant under which the new unbonding information (to support redelegation) is written. +- Add new `Key::ChainspecRegistry` key variant under which the `ChainspecRegistry` is written. +- Add new `Key::ChecksumRegistry` key variant under which a registry of checksums for a given block is written. There are two checksums in the registry, one for the execution results and the other for the approvals of all deploys in the block. +- Add new `StoredValue::Unbonding` variant to support redelegating. +- Add a new type `WithdrawPurses` which is meant to represent `UnbondingPurses` as they exist in current live networks. + +### Changed + +- Extend `UnbondingPurse` to take a new field `new_validator` which represents the validator to whom tokens will be re-delegated. +- Increase `DICTIONARY_ITEM_KEY_MAX_LENGTH` to 128. +- Change prefix of formatted string representation of `ContractPackageHash` from "contract-package-wasm" to "contract-package-". Parsing from the old format is still supported. +- Apply `#[non_exhaustive]` to error enums. +- Change Debug output of `DeployHash` to hex-encoded string rather than a list of integers. + +### Fixed + +- Fix some integer casts, where failure is now detected and reported via new error variant `NotRepresentable`. + +## 2.0.0 + +### Fixed + +- Republish v1.6.0 as v2.0.0 due to missed breaking change in API (addition of new variant to `Key`). + +## 1.6.0 [YANKED] + +### Added + +- Extend asymmetric key functionality, available via feature `std` (moved from `casper-nodes` crate). +- Provide `Timestamp` and `TimeDiff` types for time operations, with extended functionality available via feature `std` (moved from `casper-nodes` crate). +- Provide test-only functionality, in particular a seedable RNG `TestRng` which outputs its seed on test failure. Available via a new feature `testing`. +- Add new `Key::EraSummary` key variant under which the era summary info is written on each switch block execution. + +### Deprecated + +- Deprecate `gens` feature: its functionality is included in the new `testing` feature. + +## 1.5.0 + +### Added + +- Provide types and functionality to support improved access control inside execution engine. +- Provide `CLTyped` impl for `ContractPackage` to allow it to be passed into contracts. + +### Fixed + +- Limit parsing of CLTyped objects to a maximum of 50 types deep. + +## 1.4.6 - 2021-12-29 + +### Changed + +- Disable checksummed-hex encoding, but leave checksummed-hex decoding in place. + +## 1.4.5 - 2021-12-06 + +### Added + +- Add function to `auction::MintProvider` trait to support minting into an existing purse. + +### Changed + +- Change checksummed hex implementation to use 32 byte rather than 64 byte blake2b digests. + +## [1.4.4] - 2021-11-18 + +### Fixed + +- Revert the accidental change to the `std` feature causing a broken build when this feature is enabled. + +## [1.4.3] - 2021-11-17 [YANKED] + +## [1.4.2] - 2021-11-13 [YANKED] + +### Added + +- Add checksummed hex encoding following a scheme similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55). + +## [1.4.1] - 2021-10-23 + +No changes. + +## [1.4.0] - 2021-10-21 [YANKED] + +### Added + +- Add `json-schema` feature, disabled by default, to enable many types to be used to produce JSON-schema data. +- Add implicit `datasize` feature, disabled by default, to enable many types to derive the `DataSize` trait. +- Add `StoredValue` types to this crate. + +### Changed + +- Support building and testing using stable Rust. +- Allow longer hex string to be presented in `json` files. Current maximum is increased from 100 to 150 characters. +- Improve documentation and `Debug` impls for `ApiError`. + +### Deprecated + +- Feature `std` is deprecated as it is now a no-op, since there is no benefit to linking the std lib via this crate. + +## [1.3.0] - 2021-07-19 + +### Changed + +- Restrict summarization when JSON pretty-printing to contiguous long hex strings. +- Update pinned version of Rust to `nightly-2021-06-17`. + +### Removed + +- Remove ability to clone `SecretKey`s. + +## [1.2.0] - 2021-05-27 + +### Changed + +- Change to Apache 2.0 license. +- Return a `Result` from the constructor of `SecretKey` rather than potentially panicking. +- Improve `Key` error reporting and tests. + +### Fixed + +- Fix `Key` deserialization. + +## [1.1.1] - 2021-04-19 + +No changes. + +## [1.1.0] - 2021-04-13 [YANKED] + +No changes. + +## [1.0.1] - 2021-04-08 + +No changes. + +## [1.0.0] - 2021-03-30 + +### Added + +- Initial release of types for use by software compatible with Casper mainnet. + +[Keep a Changelog]: https://keepachangelog.com/en/1.0.0 +[unreleased]: https://github.com/casper-network/casper-node/compare/24fc4027a...dev +[1.4.3]: https://github.com/casper-network/casper-node/compare/2be27b3f5...24fc4027a +[1.4.2]: https://github.com/casper-network/casper-node/compare/v1.4.1...2be27b3f5 +[1.4.1]: https://github.com/casper-network/casper-node/compare/v1.4.0...v1.4.1 +[1.4.0]: https://github.com/casper-network/casper-node/compare/v1.3.0...v1.4.0 +[1.3.0]: https://github.com/casper-network/casper-node/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/casper-network/casper-node/compare/v1.1.1...v1.2.0 +[1.1.1]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.1.0]: https://github.com/casper-network/casper-node/compare/v1.0.1...v1.1.1 +[1.0.1]: https://github.com/casper-network/casper-node/compare/v1.0.0...v1.0.1 +[1.0.0]: https://github.com/casper-network/casper-node/releases/tag/v1.0.0 diff --git a/types/Cargo.toml b/types/Cargo.toml index 122a49c46f..cbec1a98ed 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -1,65 +1,95 @@ [package] name = "casper-types" -version = "1.0.0" # when updating, also update 'html_root_url' in lib.rs -authors = ["Fraser Hutchison "] -edition = "2018" -description = "Types used to allow creation of Wasm contracts and tests for use on the Casper network." +version = "6.0.1" # when updating, also update 'html_root_url' in lib.rs +authors = ["Ed Hastings "] +edition = "2021" +description = "Types shared by many casper crates for use on the Casper network." readme = "README.md" documentation = "https://docs.rs/casper-types" -homepage = "https://casperlabs.io" -repository = "https://github.com/CasperLabs/casper-node/tree/master/types" -license-file = "../LICENSE" +homepage = "https://casper.network" +repository = "https://github.com/casper-network/casper-node/tree/master/types" +license = "Apache-2.0" [dependencies] -base16 = { version = "0.2.1", default-features = false } +base16 = { version = "0.2.1", default-features = false, features = ["alloc"] } base64 = { version = "0.13.0", default-features = false } bitflags = "1" +bincode = { version = "1.3.1", optional = true } blake2 = { version = "0.9.0", default-features = false } -datasize = { version = "0.2.4", default-features = false } -displaydoc = { version = "0.1", default-features = false, optional = true } -ed25519-dalek = { version = "1.0.0", default-features = false, features = ["rand", "u64_backend"] } -hex = { version = "0.4.2", default-features = false } +datasize = { version = "0.2.15", optional = true } +derp = { version = "0.0.14", optional = true } +ed25519-dalek = { version = "2.1.1", default-features = false, features = ["alloc", "zeroize"] } +getrandom = { version = "0.2.0", features = ["rdrand", "js"], optional = true } +hex = { version = "0.4.2", default-features = false, features = ["alloc"] } hex_fmt = "0.3.0" -k256 = { version = "0.7.2", default-features = false, features = ["ecdsa", "zeroize"] } -num-derive = { version = "0.3.0", default-features = false } +humantime = { version = "2", optional = true } +itertools = { version = "0.10.3", default-features = false } +libc = { version = "0.2.146", optional = true, default-features = false } +k256 = { version = "0.13.4", default-features = false, features = ["ecdsa", "sha256"] } +num = { version = "0.4.0", default-features = false, features = ["alloc"] } +num-derive = { version = "0.4.2", default-features = false } num-integer = { version = "0.1.42", default-features = false } -num-rational = { version = "0.4.0", default-features = false } -num-traits = { version = "0.2.10", default-features = false } -once_cell = "1.5.2" +num-rational = { version = "0.4.0", default-features = false, features = ["serde"] } +num-traits = { version = "0.2.19", default-features = false } +once_cell = { version = "1.5.2", optional = true } +pem = { version = "0.8.1", optional = true } proptest = { version = "1.0.0", optional = true } +proptest-derive = { version = "0.5.1", optional = true } rand = { version = "0.8.3", default-features = false, features = ["small_rng"] } -schemars = { version = "0.8.0", features = ["preserve_order"], optional = true } -serde = { version = "1", default-features = false, features = ["derive"] } -serde_json = { version = "1.0.59", default-features = false } -thiserror = { version = "1.0.20", default-features = false, optional = true } +rand_pcg = { version = "0.3.0", optional = true } +schemars = { version = "0.8.21", features = ["preserve_order"], optional = true } +serde-map-to-array = "1.1.0" +serde = { version = "1", default-features = false, features = ["alloc", "derive"] } +serde_bytes = { version = "0.11.5", default-features = false, features = ["alloc"] } +serde_json = { version = "1.0.59", default-features = false, features = ["alloc"] } +strum = { version = "0.27", features = ["derive"], optional = true } +thiserror = { version = "1", optional = true } +tracing = { version = "0.1.37", default-features = false } uint = { version = "0.9.0", default-features = false } +untrusted = { version = "0.7.1", optional = true } +derive_more = "0.99.17" +version-sync = { version = "0.9", optional = true } [dev-dependencies] +base16 = { version = "0.2.1", features = ["std"] } bincode = "1.3.1" -criterion = "0.3.3" -getrandom = { version = "0.2.0", features = ["rdrand"] } +criterion = "0.5.1" +derp = "0.0.14" +getrandom = "0.2.0" +humantime = "2" +once_cell = "1.5.2" +openssl = "0.10.70" +pem = "0.8.1" proptest = "1.0.0" -serde_json = "1.0.55" -serde_test = "1.0.117" -version-sync = "0.9" +proptest-derive = "0.5.1" +proptest-attr-macro = "1.0.0" +rand = "0.8.3" +rand_pcg = "0.3.0" +serde_json = "1" +serde_test = "1" +strum = { version = "0.27", features = ["derive"] } +tempfile = "3.4.0" +thiserror = "1" +untrusted = "0.7.1" +# add explicit dependency to resolve RUSTSEC-2024-0421 +url = "2.5.4" [features] -default = ["base16/alloc", "hex/alloc", "serde/alloc", "serde_json/alloc", "displaydoc"] -std = [ - "base16/std", - "base64/std", - "ed25519-dalek/std", - "ed25519-dalek/serde", - "hex/std", - "k256/std", - "serde/std", - "serde_json/std", - "schemars", - "thiserror" -] -gens = ["std", "proptest/std"] +json-schema = ["once_cell", "schemars", "serde-map-to-array/json-schema"] +testing = ["proptest", "proptest-derive", "rand/default", "rand_pcg", "strum", "bincode", "thiserror", "getrandom", "derp"] +# Includes a restricted set of std lib functionality suitable for usage e.g. in a JS environment when compiled to Wasm. +std = ["base16/std", "derp", "getrandom/std", "humantime", "itertools/use_std", "libc", "once_cell", "pem", "serde_json/preserve_order", "thiserror", "untrusted"] +# Includes a complete set of std lib functionality, including filesystem I/O operations. +std-fs-io = ["std"] +# DEPRECATED - use "testing" instead of "gens". +gens = ["testing"] +version-sync = ["dep:version-sync"] [[bench]] name = "bytesrepr_bench" harness = false +required-features = ["testing"] +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/types/README.md b/types/README.md index c4f7beeea5..5205df961d 100644 --- a/types/README.md +++ b/types/README.md @@ -1,14 +1,21 @@ # `casper-types` -[![LOGO](https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Horizontal_RGB.png)](https://casperlabs.io/) +[![LOGO](https://raw.githubusercontent.com/casper-network/casper-node/master/images/casper-association-logo-primary.svg)](https://casper.network/) -[![Build Status](https://drone-auto.casperlabs.io/api/badges/CasperLabs/casper-node/status.svg?branch=master)](http://drone-auto.casperlabs.io/CasperLabs/casper-node) [![Crates.io](https://img.shields.io/crates/v/casper-types)](https://crates.io/crates/casper-types) [![Documentation](https://docs.rs/casper-types/badge.svg)](https://docs.rs/casper-types) -[![License](https://img.shields.io/badge/license-COSL-blue.svg)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE) +[![License](https://img.shields.io/badge/license-Apache-blue)](https://github.com/casper-network/casper-node/blob/master/LICENSE) -Types used to allow creation of Wasm contracts and tests for use on the CasperLabs network. +Types shared by many casper crates for use on the Casper network. + +## `no_std` + +The crate is `no_std` (using the `core` and `alloc` crates) unless any of the following features are enabled: + +* `json-schema` to enable many types to be used to produce JSON-schema data via the [`schemars`](https://crates.io/crates/schemars) crate +* `datasize` to enable many types to derive the [`DataSize`](https://github.com/casper-network/datasize-rs) trait +* `gens` to enable many types to be produced in accordance with [`proptest`](https://crates.io/crates/proptest) usage for consumption within dependee crates' property testing suites ## License -Licensed under the [CasperLabs Open Source License (COSL)](https://github.com/CasperLabs/casper-node/blob/master/LICENSE). +Licensed under the [Apache License Version 2.0](https://github.com/casper-network/casper-node/blob/master/LICENSE). diff --git a/types/benches/bytesrepr_bench.rs b/types/benches/bytesrepr_bench.rs index cc827233ef..3facbb1807 100644 --- a/types/benches/bytesrepr_bench.rs +++ b/types/benches/bytesrepr_bench.rs @@ -1,11 +1,23 @@ -use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; +use std::{ + collections::{BTreeMap, BTreeSet}, + iter, +}; -use std::{collections::BTreeMap, iter}; +use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; use casper_types::{ account::AccountHash, + addressable_entity::{ActionThresholds, AddressableEntity, AssociatedKeys, EntityKind}, bytesrepr::{self, Bytes, FromBytes, ToBytes}, - AccessRights, CLTyped, CLValue, Key, URef, U128, U256, U512, + system::auction::{ + Bid, BidKind, Delegator, DelegatorBid, DelegatorKind, EraInfo, SeigniorageAllocation, + ValidatorBid, + }, + AccessRights, ByteCodeHash, CLTyped, CLValue, ContractRuntimeTag, DeployHash, DeployInfo, + EntityAddr, EntityVersionKey, EntityVersions, Gas, Group, Groups, InitiatorAddr, Key, Package, + PackageHash, PackageStatus, ProtocolVersion, PublicKey, SecretKey, TransactionHash, + TransactionV1Hash, TransferAddr, TransferV2, URef, KEY_HASH_LENGTH, TRANSFER_ADDR_LENGTH, U128, + U256, U512, UREF_ADDR_LENGTH, }; static KB: usize = 1024; @@ -305,7 +317,7 @@ fn serialize_cl_value(raw_value: T) -> Vec { fn benchmark_deserialization(b: &mut Bencher, raw_value: T) { let serialized_value = serialize_cl_value(raw_value); b.iter(|| { - let cl_value: CLValue = bytesrepr::deserialize(serialized_value.clone()).unwrap(); + let cl_value: CLValue = bytesrepr::deserialize_from_slice(&serialized_value).unwrap(); let _raw_value: T = cl_value.into_t().unwrap(); }); } @@ -437,6 +449,266 @@ fn deserialize_u512(b: &mut Bencher) { b.iter(|| U512::from_bytes(black_box(&num_u512_bytes))) } +fn serialize_contract(b: &mut Bencher) { + let contract = sample_contract(); + b.iter(|| ToBytes::to_bytes(black_box(&contract))); +} + +fn deserialize_contract(b: &mut Bencher) { + let contract = sample_contract(); + let contract_bytes = AddressableEntity::to_bytes(&contract).unwrap(); + b.iter(|| AddressableEntity::from_bytes(black_box(&contract_bytes)).unwrap()); +} + +fn sample_contract() -> AddressableEntity { + AddressableEntity::new( + PackageHash::default(), + ByteCodeHash::default(), + ProtocolVersion::default(), + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ) +} + +fn contract_version_key_fn(i: u8) -> EntityVersionKey { + EntityVersionKey::new(i as u32, i as u32) +} + +fn contract_hash_fn(i: u8) -> EntityAddr { + EntityAddr::SmartContract([i; KEY_HASH_LENGTH]) +} + +fn sample_map(key_fn: FK, value_fn: FV, count: u8) -> BTreeMap +where + FK: Fn(u8) -> K, + FV: Fn(u8) -> V, +{ + (0..count) + .map(|i| { + let key = key_fn(i); + let value = value_fn(i); + (key, value) + }) + .collect() +} + +fn sample_set(fun: F, count: u8) -> BTreeSet +where + F: Fn(u8) -> K, +{ + (0..count).map(fun).collect() +} + +fn sample_group(i: u8) -> Group { + Group::new(format!("group-{}", i)) +} + +fn sample_uref(i: u8) -> URef { + URef::new([i; UREF_ADDR_LENGTH], AccessRights::all()) +} + +fn sample_contract_package( + contract_versions_len: u8, + disabled_versions_len: u8, + groups_len: u8, +) -> Package { + let versions = EntityVersions::from(sample_map( + contract_version_key_fn, + contract_hash_fn, + contract_versions_len, + )); + let disabled_versions = sample_set(contract_version_key_fn, disabled_versions_len); + let groups = Groups::from(sample_map( + sample_group, + |_| sample_set(sample_uref, 3), + groups_len, + )); + + Package::new(versions, disabled_versions, groups, PackageStatus::Locked) +} + +fn serialize_contract_package(b: &mut Bencher) { + let contract = sample_contract_package(5, 1, 5); + b.iter(|| Package::to_bytes(black_box(&contract))); +} + +fn deserialize_contract_package(b: &mut Bencher) { + let contract_package = sample_contract_package(5, 1, 5); + let contract_bytes = Package::to_bytes(&contract_package).unwrap(); + b.iter(|| Package::from_bytes(black_box(&contract_bytes)).unwrap()); +} + +fn u32_to_pk(i: u32) -> PublicKey { + let mut sk_bytes = [0u8; 32]; + U256::from(i).to_big_endian(&mut sk_bytes); + let sk = SecretKey::ed25519_from_bytes(sk_bytes).unwrap(); + PublicKey::from(&sk) +} + +fn sample_delegators(delegators_len: u32) -> Vec { + (0..delegators_len) + .map(|i| { + let delegator_pk = u32_to_pk(i); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let bonding_purse = URef::default(); + let validator_pk = u32_to_pk(i); + Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk) + }) + .collect() +} + +fn sample_delegator_bids(delegators_len: u32) -> Vec { + (0..delegators_len) + .map(|i| { + let delegator_pk = u32_to_pk(i); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let bonding_purse = URef::default(); + let validator_pk = u32_to_pk(i); + DelegatorBid::unlocked( + delegator_pk.into(), + staked_amount, + bonding_purse, + validator_pk, + ) + }) + .collect() +} + +fn sample_bid(delegators_len: u32) -> Bid { + let validator_public_key = PublicKey::System; + let bonding_purse = URef::default(); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let delegation_rate = 10u8; + let mut bid = Bid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ); + let new_delegators = sample_delegators(delegators_len); + + let curr_delegators = bid.delegators_mut(); + for delegator in new_delegators.into_iter() { + assert!(curr_delegators + .insert(delegator.delegator_public_key().clone(), delegator) + .is_none()); + } + bid +} + +fn serialize_bid(delegators_len: u32, b: &mut Bencher) { + let bid = sample_bid(delegators_len); + b.iter(|| Bid::to_bytes(black_box(&bid))); +} +fn serialize_delegation_bid(delegators_len: u32, b: &mut Bencher) { + let bids = sample_delegator_bids(delegators_len); + for bid in bids { + b.iter(|| BidKind::to_bytes(black_box(&BidKind::Delegator(Box::new(bid.clone()))))); + } +} + +fn sample_validator_bid() -> BidKind { + let validator_public_key = PublicKey::System; + let bonding_purse = URef::default(); + let staked_amount = U512::from_dec_str("123123123123123").unwrap(); + let delegation_rate = 10u8; + BidKind::Validator(Box::new(ValidatorBid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 0, + 0, + 0, + ))) +} + +fn serialize_validator_bid(b: &mut Bencher) { + let bid = sample_validator_bid(); + b.iter(|| BidKind::to_bytes(black_box(&bid))); +} + +fn deserialize_bid(delegators_len: u32, b: &mut Bencher) { + let bid = sample_bid(delegators_len); + let bid_bytes = Bid::to_bytes(&bid).unwrap(); + b.iter(|| Bid::from_bytes(black_box(&bid_bytes))); +} + +fn sample_transfer() -> TransferV2 { + TransferV2::new( + TransactionHash::V1(TransactionV1Hash::default()), + InitiatorAddr::AccountHash(AccountHash::default()), + None, + URef::default(), + URef::default(), + U512::MAX, + Gas::new(U512::from_dec_str("123123123123").unwrap()), + Some(1u64), + ) +} + +fn serialize_transfer(b: &mut Bencher) { + let transfer = sample_transfer(); + b.iter(|| TransferV2::to_bytes(&transfer)); +} + +fn deserialize_transfer(b: &mut Bencher) { + let transfer = sample_transfer(); + let transfer_bytes = transfer.to_bytes().unwrap(); + b.iter(|| TransferV2::from_bytes(&transfer_bytes)); +} + +fn sample_deploy_info(transfer_len: u16) -> DeployInfo { + let transfers = (0..transfer_len) + .map(|i| { + let mut tmp = [0u8; TRANSFER_ADDR_LENGTH]; + U256::from(i).to_little_endian(&mut tmp); + TransferAddr::new(tmp) + }) + .collect::>(); + DeployInfo::new( + DeployHash::default(), + &transfers, + AccountHash::default(), + URef::default(), + U512::MAX, + ) +} + +fn serialize_deploy_info(b: &mut Bencher) { + let deploy_info = sample_deploy_info(1000); + b.iter(|| DeployInfo::to_bytes(&deploy_info)); +} + +fn deserialize_deploy_info(b: &mut Bencher) { + let deploy_info = sample_deploy_info(1000); + let deploy_bytes = deploy_info.to_bytes().unwrap(); + b.iter(|| DeployInfo::from_bytes(&deploy_bytes)); +} + +fn sample_era_info(delegators_len: u32) -> EraInfo { + let mut base = EraInfo::new(); + let delegations = (0..delegators_len).map(|i| { + let pk = u32_to_pk(i); + SeigniorageAllocation::delegator_kind(DelegatorKind::PublicKey(pk.clone()), pk, U512::MAX) + }); + base.seigniorage_allocations_mut().extend(delegations); + base +} + +fn serialize_era_info(delegators_len: u32, b: &mut Bencher) { + let era_info = sample_era_info(delegators_len); + b.iter(|| EraInfo::to_bytes(&era_info)); +} + +fn deserialize_era_info(delegators_len: u32, b: &mut Bencher) { + let era_info = sample_era_info(delegators_len); + let era_info_bytes = era_info.to_bytes().unwrap(); + b.iter(|| EraInfo::from_bytes(&era_info_bytes)); +} + fn bytesrepr_bench(c: &mut Criterion) { c.bench_function("serialize_vector_of_i32s", serialize_vector_of_i32s); c.bench_function("deserialize_vector_of_i32s", deserialize_vector_of_i32s); @@ -557,6 +829,50 @@ fn bytesrepr_bench(c: &mut Criterion) { c.bench_function("deserialize_u256", deserialize_u256); c.bench_function("serialize_u512", serialize_u512); c.bench_function("deserialize_u512", deserialize_u512); + // c.bench_function("bytesrepr::serialize_account", serialize_account); + // c.bench_function("bytesrepr::deserialize_account", deserialize_account); + c.bench_function("bytesrepr::serialize_contract", serialize_contract); + c.bench_function("bytesrepr::deserialize_contract", deserialize_contract); + c.bench_function( + "bytesrepr::serialize_contract_package", + serialize_contract_package, + ); + c.bench_function( + "bytesrepr::deserialize_contract_package", + deserialize_contract_package, + ); + c.bench_function( + "bytesrepr::serialize_validator_bid", + serialize_validator_bid, + ); + c.bench_function("bytesrepr::serialize_delegation_bid", |b| { + serialize_delegation_bid(10, b) + }); + c.bench_function("bytesrepr::serialize_bid_small", |b| serialize_bid(10, b)); + c.bench_function("bytesrepr::serialize_bid_medium", |b| serialize_bid(100, b)); + c.bench_function("bytesrepr::serialize_bid_big", |b| serialize_bid(1000, b)); + c.bench_function("bytesrepr::deserialize_bid_small", |b| { + deserialize_bid(10, b) + }); + c.bench_function("bytesrepr::deserialize_bid_medium", |b| { + deserialize_bid(100, b) + }); + c.bench_function("bytesrepr::deserialize_bid_big", |b| { + deserialize_bid(1000, b) + }); + c.bench_function("bytesrepr::serialize_transfer", serialize_transfer); + c.bench_function("bytesrepr::deserialize_transfer", deserialize_transfer); + c.bench_function("bytesrepr::serialize_deploy_info", serialize_deploy_info); + c.bench_function( + "bytesrepr::deserialize_deploy_info", + deserialize_deploy_info, + ); + c.bench_function("bytesrepr::serialize_era_info", |b| { + serialize_era_info(500, b) + }); + c.bench_function("bytesrepr::deserialize_era_info", |b| { + deserialize_era_info(500, b) + }); } criterion_group!(benches, bytesrepr_bench); diff --git a/types/proptest-regressions/stored_value.txt b/types/proptest-regressions/stored_value.txt new file mode 100644 index 0000000000..2ded7ecdb4 --- /dev/null +++ b/types/proptest-regressions/stored_value.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 451b981c778518acba99daaa2eb7b621b1882430c7a665ed85ce06446732117e # shrinks to v = RawBytes([]) diff --git a/types/src/access_rights.rs b/types/src/access_rights.rs index d112aee307..fcf8e8e4eb 100644 --- a/types/src/access_rights.rs +++ b/types/src/access_rights.rs @@ -1,40 +1,52 @@ -use alloc::vec::Vec; +use alloc::{ + collections::{btree_map::Entry, BTreeMap}, + vec::Vec, +}; +use core::fmt::{self, Display, Formatter}; -use bitflags::bitflags; -use datasize::DataSize; +#[cfg(any(feature = "testing", test))] use rand::{ distributions::{Distribution, Standard}, Rng, }; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; -use crate::bytesrepr; +use crate::{bytesrepr, HashAddr, URef, URefAddr}; +pub use private::AccessRights; /// The number of bytes in a serialized [`AccessRights`]. pub const ACCESS_RIGHTS_SERIALIZED_LENGTH: usize = 1; -bitflags! { - /// A struct which behaves like a set of bitflags to define access rights associated with a - /// [`URef`](crate::URef). - #[allow(clippy::derive_hash_xor_eq)] - #[derive(DataSize)] - pub struct AccessRights: u8 { - /// No permissions - const NONE = 0; - /// Permission to read the value under the associated `URef`. - const READ = 0b001; - /// Permission to write a value under the associated `URef`. - const WRITE = 0b010; - /// Permission to add to the value under the associated `URef`. - const ADD = 0b100; - /// Permission to read or add to the value under the associated `URef`. - const READ_ADD = Self::READ.bits | Self::ADD.bits; - /// Permission to read or write the value under the associated `URef`. - const READ_WRITE = Self::READ.bits | Self::WRITE.bits; - /// Permission to add to, or write the value under the associated `URef`. - const ADD_WRITE = Self::ADD.bits | Self::WRITE.bits; - /// Permission to read, add to, or write the value under the associated `URef`. - const READ_ADD_WRITE = Self::READ.bits | Self::ADD.bits | Self::WRITE.bits; +// Module exists only to restrict the scope of the following `#allow`. +#[allow(clippy::bad_bit_mask)] +mod private { + use bitflags::bitflags; + #[cfg(feature = "datasize")] + use datasize::DataSize; + + bitflags! { + /// A struct which behaves like a set of bitflags to define access rights associated with a + /// [`URef`](crate::URef). + #[allow(clippy::derived_hash_with_manual_eq)] + #[cfg_attr(feature = "datasize", derive(DataSize))] + pub struct AccessRights: u8 { + /// No permissions + const NONE = 0; + /// Permission to read the value under the associated `URef`. + const READ = 0b001; + /// Permission to write a value under the associated `URef`. + const WRITE = 0b010; + /// Permission to add to the value under the associated `URef`. + const ADD = 0b100; + /// Permission to read or add to the value under the associated `URef`. + const READ_ADD = Self::READ.bits | Self::ADD.bits; + /// Permission to read or write the value under the associated `URef`. + const READ_WRITE = Self::READ.bits | Self::WRITE.bits; + /// Permission to add to, or write the value under the associated `URef`. + const ADD_WRITE = Self::ADD.bits | Self::WRITE.bits; + /// Permission to read, add to, or write the value under the associated `URef`. + const READ_ADD_WRITE = Self::READ.bits | Self::ADD.bits | Self::WRITE.bits; + } } } @@ -66,8 +78,8 @@ impl AccessRights { } } -impl core::fmt::Display for AccessRights { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { +impl Display for AccessRights { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { match *self { AccessRights::NONE => write!(f, "NONE"), AccessRights::READ => write!(f, "READ"), @@ -84,12 +96,17 @@ impl core::fmt::Display for AccessRights { impl bytesrepr::ToBytes for AccessRights { fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.bits.to_bytes() + self.bits().to_bytes() } fn serialized_length(&self) -> usize { ACCESS_RIGHTS_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.bits()); + Ok(()) + } } impl bytesrepr::FromBytes for AccessRights { @@ -104,7 +121,7 @@ impl bytesrepr::FromBytes for AccessRights { impl Serialize for AccessRights { fn serialize(&self, serializer: S) -> Result { - self.bits.serialize(serializer) + self.bits().serialize(serializer) } } @@ -115,6 +132,7 @@ impl<'de> Deserialize<'de> for AccessRights { } } +#[cfg(any(feature = "testing", test))] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> AccessRights { let mut result = AccessRights::NONE; @@ -131,9 +149,145 @@ impl Distribution for Standard { } } +/// Used to indicate if a granted [`URef`] was already held by the context. +#[derive(Debug, PartialEq, Eq)] +pub enum GrantedAccess { + /// No new set of access rights were granted. + PreExisting, + /// A new set of access rights were granted. + Granted { + /// The address of the URef. + uref_addr: URefAddr, + /// The set of the newly granted access rights. + newly_granted_access_rights: AccessRights, + }, +} + +/// Access rights for a given runtime context. +#[derive(Debug, PartialEq, Eq)] +pub struct ContextAccessRights { + hash_addr: HashAddr, + access_rights: BTreeMap, +} + +impl ContextAccessRights { + /// Creates a new instance of access rights from an iterator of URefs merging any duplicates, + /// taking the union of their rights. + pub fn new>(hash_addr: HashAddr, uref_iter: T) -> Self { + let mut context_access_rights = ContextAccessRights { + hash_addr, + access_rights: BTreeMap::new(), + }; + context_access_rights.do_extend(uref_iter); + context_access_rights + } + + /// Extend context access rights with access rights. + pub fn extend_access_rights(&mut self, access_rights: BTreeMap) { + for (uref_addr, access_rights) in access_rights { + match self.access_rights.entry(uref_addr) { + Entry::Occupied(rights) => { + *rights.into_mut() = rights.get().union(access_rights); + } + Entry::Vacant(rights) => { + rights.insert(access_rights); + } + } + } + } + + /// Returns the current context key. + pub fn context_key(&self) -> HashAddr { + self.hash_addr + } + + /// Extends the current access rights from a given set of URefs. + pub fn extend(&mut self, urefs: &[URef]) { + self.do_extend(urefs.iter().copied()) + } + + /// Extends the current access rights from a given set of URefs. + fn do_extend>(&mut self, uref_iter: T) { + for uref in uref_iter { + match self.access_rights.entry(uref.addr()) { + Entry::Occupied(rights) => { + *rights.into_mut() = rights.get().union(uref.access_rights()); + } + Entry::Vacant(rights) => { + rights.insert(uref.access_rights()); + } + } + } + } + + /// Checks whether given uref has enough access rights. + pub fn has_access_rights_to_uref(&self, uref: &URef) -> bool { + if let Some(known_rights) = self.access_rights.get(&uref.addr()) { + let rights_to_check = uref.access_rights(); + known_rights.contains(rights_to_check) + } else { + // URef is not known + false + } + } + + /// Returns a reference to the map of access rights. + pub fn access_rights(&self) -> &BTreeMap { + &self.access_rights + } + + /// Consume into access rights. + pub fn take_access_rights(self) -> BTreeMap { + self.access_rights + } + + /// Grants access to a [`URef`]; unless access was pre-existing. + pub fn grant_access(&mut self, uref: URef) -> GrantedAccess { + match self.access_rights.entry(uref.addr()) { + Entry::Occupied(existing_rights) => { + let newly_granted_access_rights = + uref.access_rights().difference(*existing_rights.get()); + *existing_rights.into_mut() = existing_rights.get().union(uref.access_rights()); + if newly_granted_access_rights.is_none() { + GrantedAccess::PreExisting + } else { + GrantedAccess::Granted { + uref_addr: uref.addr(), + newly_granted_access_rights, + } + } + } + Entry::Vacant(rights) => { + rights.insert(uref.access_rights()); + GrantedAccess::Granted { + uref_addr: uref.addr(), + newly_granted_access_rights: uref.access_rights(), + } + } + } + } + + /// Remove access for a given `URef`. + pub fn remove_access(&mut self, uref_addr: URefAddr, access_rights: AccessRights) { + if let Some(current_access_rights) = self.access_rights.get_mut(&uref_addr) { + current_access_rights.remove(access_rights) + } + } +} + #[cfg(test)] mod tests { use super::*; + use crate::UREF_ADDR_LENGTH; + + const ENTITY_HASH: HashAddr = [1u8; 32]; + const UREF_ADDRESS: [u8; UREF_ADDR_LENGTH] = [1; UREF_ADDR_LENGTH]; + const UREF_NO_PERMISSIONS: URef = URef::new(UREF_ADDRESS, AccessRights::empty()); + const UREF_READ: URef = URef::new(UREF_ADDRESS, AccessRights::READ); + const UREF_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::ADD); + const UREF_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::WRITE); + const UREF_READ_ADD: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD); + const UREF_READ_ADD_WRITE: URef = URef::new(UREF_ADDRESS, AccessRights::READ_ADD_WRITE); fn test_readable(right: AccessRights, is_true: bool) { assert_eq!(right.is_readable(), is_true) @@ -179,4 +333,116 @@ mod tests { test_addable(AccessRights::WRITE, false); test_addable(AccessRights::READ_ADD_WRITE, true); } + + #[test] + fn should_check_has_access_rights_to_uref() { + let context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD)); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ)); + assert!(context_rights.has_access_rights_to_uref(&UREF_ADD)); + assert!(context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS)); + } + + #[test] + fn should_check_does_not_have_access_rights_to_uref() { + let context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]); + assert!(!context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + assert!(!context_rights + .has_access_rights_to_uref(&URef::new([2; UREF_ADDR_LENGTH], AccessRights::empty()))); + } + + #[test] + fn should_extend_access_rights() { + // Start with uref with no permissions. + let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_NO_PERMISSIONS]); + let mut expected_rights = BTreeMap::new(); + expected_rights.insert(UREF_ADDRESS, AccessRights::empty()); + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a READ_ADD: should merge to single READ_ADD. + context_rights.extend(&[UREF_READ_ADD]); + *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD; + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a READ: should have no observable effect. + context_rights.extend(&[UREF_READ]); + assert_eq!(context_rights.access_rights, expected_rights); + + // Extend with a WRITE: should merge to single READ_ADD_WRITE. + context_rights.extend(&[UREF_WRITE]); + *expected_rights.get_mut(&UREF_ADDRESS).unwrap() = AccessRights::READ_ADD_WRITE; + assert_eq!(context_rights.access_rights, expected_rights); + } + + #[test] + fn should_perform_union_of_access_rights_in_new() { + let context_rights = + ContextAccessRights::new(ENTITY_HASH, vec![UREF_NO_PERMISSIONS, UREF_READ, UREF_ADD]); + + // Expect the three discrete URefs' rights to be unioned into READ_ADD. + let mut expected_rights = BTreeMap::new(); + expected_rights.insert(UREF_ADDRESS, AccessRights::READ_ADD); + assert_eq!(context_rights.access_rights, expected_rights); + } + + #[test] + fn should_grant_access_rights() { + let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD]); + let granted_access = context_rights.grant_access(UREF_READ); + assert_eq!(granted_access, GrantedAccess::PreExisting); + let granted_access = context_rights.grant_access(UREF_READ_ADD_WRITE); + assert_eq!( + granted_access, + GrantedAccess::Granted { + uref_addr: UREF_ADDRESS, + newly_granted_access_rights: AccessRights::WRITE + } + ); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + let new_uref = URef::new([3; 32], AccessRights::all()); + let granted_access = context_rights.grant_access(new_uref); + assert_eq!( + granted_access, + GrantedAccess::Granted { + uref_addr: new_uref.addr(), + newly_granted_access_rights: AccessRights::all() + } + ); + assert!(context_rights.has_access_rights_to_uref(&new_uref)); + } + + #[test] + fn should_remove_access_rights() { + let mut context_rights = ContextAccessRights::new(ENTITY_HASH, vec![UREF_READ_ADD_WRITE]); + assert!(context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE)); + + // Strip write access from the context rights. + context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE), + "Write access should have been removed" + ); + + // Strip the access again to ensure that the bit is not flipped back. + context_rights.remove_access(UREF_ADDRESS, AccessRights::WRITE); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD_WRITE), + "Write access should not have been granted back" + ); + assert!( + context_rights.has_access_rights_to_uref(&UREF_READ_ADD), + "Read and add access should be preserved." + ); + + // Strip both read and add access from the context rights. + context_rights.remove_access(UREF_ADDRESS, AccessRights::READ_ADD); + assert!( + !context_rights.has_access_rights_to_uref(&UREF_READ_ADD), + "Read and add access should have been removed" + ); + assert!( + context_rights.has_access_rights_to_uref(&UREF_NO_PERMISSIONS), + "The access rights should be empty" + ); + } } diff --git a/types/src/account.rs b/types/src/account.rs index 5493633b9b..5dc25ca330 100644 --- a/types/src/account.rs +++ b/types/src/account.rs @@ -1,555 +1,408 @@ //! Contains types and constants associated with user accounts. -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use alloc::{format, string::String, vec::Vec}; -use core::{ - array::TryFromSliceError, - convert::TryFrom, - fmt::{self, Debug, Display, Formatter}, -}; +mod account_hash; +pub mod action_thresholds; +pub mod action_type; +pub mod associated_keys; +mod error; +mod weight; -use blake2::{ - digest::{Update, VariableOutput}, - VarBlake2b, -}; +use serde::{Deserialize, Serialize}; + +use alloc::{collections::BTreeSet, vec::Vec}; + +#[cfg(feature = "datasize")] use datasize::DataSize; -use rand::{ - distributions::{Distribution, Standard}, - Rng, +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +pub use self::{ + account_hash::{AccountHash, ACCOUNT_HASH_FORMATTED_STRING_PREFIX, ACCOUNT_HASH_LENGTH}, + action_thresholds::ActionThresholds, + action_type::ActionType, + associated_keys::{AddKeyFailure, AssociatedKeys, RemoveKeyFailure, UpdateKeyFailure}, + error::{FromStrError, SetThresholdFailure, TryFromIntError}, + weight::Weight, }; -#[cfg(feature = "std")] -use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; -use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; -#[cfg(feature = "std")] -use thiserror::Error; use crate::{ - bytesrepr::{Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - CLType, CLTyped, PublicKey, BLAKE2B_DIGEST_LENGTH, + bytesrepr::{self, FromBytes, ToBytes}, + contracts::NamedKeys, + AccessRights, Key, URef, }; - -const FORMATTED_STRING_PREFIX: &str = "account-hash-"; - -// This error type is not intended to be used by third party crates. -#[doc(hidden)] -#[derive(Debug, Eq, PartialEq)] -pub struct TryFromIntError(()); - -/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`]. -#[derive(Debug)] -pub struct TryFromSliceForAccountHashError(()); - -/// Error returned when decoding an `AccountHash` from a formatted string. -#[derive(Debug)] -pub enum FromStrError { - /// The prefix is invalid. - InvalidPrefix, - /// The hash is not valid hex. - Hex(base16::DecodeError), - /// The hash is the wrong length. - Hash(TryFromSliceError), -} - -impl From for FromStrError { - fn from(error: base16::DecodeError) -> Self { - FromStrError::Hex(error) - } -} - -impl From for FromStrError { - fn from(error: TryFromSliceError) -> Self { - FromStrError::Hash(error) - } -} - -impl Display for FromStrError { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - match self { - FromStrError::InvalidPrefix => write!(f, "prefix is not 'account-hash-'"), - FromStrError::Hex(error) => { - write!(f, "failed to decode address portion from hex: {}", error) - } - FromStrError::Hash(error) => write!(f, "address portion is wrong length: {}", error), +#[cfg(feature = "json-schema")] +use crate::{PublicKey, SecretKey}; + +#[cfg(feature = "json-schema")] +static ACCOUNT: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let account_hash = PublicKey::from(&secret_key).to_account_hash(); + let main_purse = URef::from_formatted_str( + "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + ) + .unwrap(); + let mut named_keys = NamedKeys::new(); + named_keys.insert("main_purse".to_string(), Key::URef(main_purse)); + let weight = Weight::new(1); + let associated_keys = AssociatedKeys::new(account_hash, weight); + let action_thresholds = ActionThresholds::new(weight, weight).unwrap(); + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + } +}); + +/// Represents an Account in the global state. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Account { + account_hash: AccountHash, + named_keys: NamedKeys, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, +} + +impl Account { + /// Creates a new account. + pub fn new( + account_hash: AccountHash, + named_keys: NamedKeys, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, + ) -> Self { + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, } } -} -/// The various types of action which can be performed in the context of a given account. -#[repr(u32)] -pub enum ActionType { - /// Represents performing a deploy. - Deployment = 0, - /// Represents changing the associated keys (i.e. map of [`AccountHash`]s to [`Weight`]s) or - /// action thresholds (i.e. the total [`Weight`]s of signing [`AccountHash`]s required to - /// perform various actions). - KeyManagement = 1, -} - -// This conversion is not intended to be used by third party crates. -#[doc(hidden)] -impl TryFrom for ActionType { - type Error = TryFromIntError; - - fn try_from(value: u32) -> Result { - // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive - // that helps to automatically create `from_u32` and `to_u32`. This approach - // gives better control over generated code. - match value { - d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment), - d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement), - _ => Err(TryFromIntError(())), - } + /// An Account constructor with presets for associated_keys and action_thresholds. + /// + /// An account created with this method is valid and can be used as the target of a transaction. + /// It will be created with an [`AssociatedKeys`] with a [`Weight`] of 1, and a default + /// [`ActionThresholds`]. + pub fn create(account: AccountHash, named_keys: NamedKeys, main_purse: URef) -> Self { + let associated_keys = AssociatedKeys::new(account, Weight::new(1)); + + let action_thresholds: ActionThresholds = Default::default(); + Account::new( + account, + named_keys, + main_purse, + associated_keys, + action_thresholds, + ) } -} - -/// Errors that can occur while changing action thresholds (i.e. the total [`Weight`]s of signing -/// [`AccountHash`]s required to perform various actions) on an account. -#[repr(i32)] -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -#[cfg_attr(feature = "std", derive(Error))] -pub enum SetThresholdFailure { - /// Setting the key-management threshold to a value lower than the deployment threshold is - /// disallowed. - #[cfg_attr( - feature = "std", - error("New threshold should be greater than or equal to deployment threshold") - )] - KeyManagementThreshold = 1, - /// Setting the deployment threshold to a value greater than any other threshold is disallowed. - #[cfg_attr( - feature = "std", - error("New threshold should be lower than or equal to key management threshold") - )] - DeploymentThreshold = 2, - /// Caller doesn't have sufficient permissions to set new thresholds. - #[cfg_attr( - feature = "std", - error("Unable to set action threshold due to insufficient permissions") - )] - PermissionDeniedError = 3, - /// Setting a threshold to a value greater than the total weight of associated keys is - /// disallowed. - #[cfg_attr( - feature = "std", - error("New threshold should be lower or equal than total weight of associated keys") - )] - InsufficientTotalWeight = 4, -} -// This conversion is not intended to be used by third party crates. -#[doc(hidden)] -impl TryFrom for SetThresholdFailure { - type Error = TryFromIntError; - - fn try_from(value: i32) -> Result { - match value { - d if d == SetThresholdFailure::KeyManagementThreshold as i32 => { - Ok(SetThresholdFailure::KeyManagementThreshold) - } - d if d == SetThresholdFailure::DeploymentThreshold as i32 => { - Ok(SetThresholdFailure::DeploymentThreshold) - } - d if d == SetThresholdFailure::PermissionDeniedError as i32 => { - Ok(SetThresholdFailure::PermissionDeniedError) - } - d if d == SetThresholdFailure::InsufficientTotalWeight as i32 => { - Ok(SetThresholdFailure::InsufficientTotalWeight) - } - _ => Err(TryFromIntError(())), - } + /// Appends named keys to an account's named_keys field. + pub fn named_keys_append(&mut self, keys: NamedKeys) { + self.named_keys.append(keys); } -} -/// Maximum number of associated keys (i.e. map of [`AccountHash`]s to [`Weight`]s) for a single -/// account. -pub const MAX_ASSOCIATED_KEYS: usize = 10; - -/// The number of bytes in a serialized [`Weight`]. -pub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH; - -/// The weight attributed to a given [`AccountHash`] in an account's associated keys. -#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] -pub struct Weight(u8); - -impl Weight { - /// Constructs a new `Weight`. - pub fn new(weight: u8) -> Weight { - Weight(weight) + /// Returns named keys. + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys } - /// Returns the value of `self` as a `u8`. - pub fn value(self) -> u8 { - self.0 + /// Returns a mutable reference to named keys. + pub fn named_keys_mut(&mut self) -> &mut NamedKeys { + &mut self.named_keys } -} -impl ToBytes for Weight { - fn to_bytes(&self) -> Result, Error> { - self.0.to_bytes() + /// Removes the key under the given name from named keys. + pub fn remove_named_key(&mut self, name: &str) -> Option { + self.named_keys.remove(name) } - fn serialized_length(&self) -> usize { - WEIGHT_SERIALIZED_LENGTH + /// Returns account hash. + pub fn account_hash(&self) -> AccountHash { + self.account_hash } -} -impl FromBytes for Weight { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (byte, rem) = u8::from_bytes(bytes)?; - Ok((Weight::new(byte), rem)) + /// Returns main purse. + pub fn main_purse(&self) -> URef { + self.main_purse } -} -impl CLTyped for Weight { - fn cl_type() -> CLType { - CLType::U8 + /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`]. + pub fn main_purse_add_only(&self) -> URef { + URef::new(self.main_purse.addr(), AccessRights::ADD) } -} - -/// The length in bytes of a [`AccountHash`]. -pub const ACCOUNT_HASH_LENGTH: usize = 32; - -/// A type alias for the raw bytes of an Account Hash. -pub type AccountHashBytes = [u8; ACCOUNT_HASH_LENGTH]; - -/// A newtype wrapping a [`AccountHashBytes`] which is the raw bytes of -/// the AccountHash, a hash of Public Key and Algorithm -#[derive(DataSize, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] -pub struct AccountHash(AccountHashBytes); -impl AccountHash { - /// Constructs a new `AccountHash` instance from the raw bytes of an Public Key Account Hash. - pub const fn new(value: AccountHashBytes) -> AccountHash { - AccountHash(value) + /// Returns associated keys. + pub fn associated_keys(&self) -> &AssociatedKeys { + &self.associated_keys } - /// Returns the raw bytes of the account hash as an array. - pub fn value(&self) -> AccountHashBytes { - self.0 + /// Returns action thresholds. + pub fn action_thresholds(&self) -> &ActionThresholds { + &self.action_thresholds } - /// Returns the raw bytes of the account hash as a `slice`. - pub fn as_bytes(&self) -> &[u8] { - &self.0 + /// Adds an associated key to an account. + pub fn add_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), AddKeyFailure> { + self.associated_keys.add_key(account_hash, weight) } - /// Formats the `AccountHash` for users getting and putting. - pub fn to_formatted_string(&self) -> String { - format!( - "{}{}", - FORMATTED_STRING_PREFIX, - base16::encode_lower(&self.0), - ) - } + /// Checks if removing given key would properly satisfy thresholds. + fn can_remove_key(&self, account_hash: AccountHash) -> bool { + let total_weight_without = self + .associated_keys + .total_keys_weight_excluding(account_hash); - /// Parses a string formatted as per `Self::to_formatted_string()` into an `AccountHash`. - pub fn from_formatted_str(input: &str) -> Result { - let remainder = input - .strip_prefix(FORMATTED_STRING_PREFIX) - .ok_or(FromStrError::InvalidPrefix)?; - let bytes = AccountHashBytes::try_from(base16::decode(remainder)?.as_ref())?; - Ok(AccountHash(bytes)) + // Returns true if the total weight calculated without given public key would be greater or + // equal to all of the thresholds. + total_weight_without >= *self.action_thresholds().deployment() + && total_weight_without >= *self.action_thresholds().key_management() } - #[doc(hidden)] - pub fn from_public_key( - public_key: &PublicKey, - blake2b_hash_fn: impl Fn(Vec) -> [u8; BLAKE2B_DIGEST_LENGTH], - ) -> Self { - const SYSTEM_LOWERCASE: &str = "system"; - const ED25519_LOWERCASE: &str = "ed25519"; - const SECP256K1_LOWERCASE: &str = "secp256k1"; - - let algorithm_name = match public_key { - PublicKey::System => SYSTEM_LOWERCASE, - PublicKey::Ed25519(_) => ED25519_LOWERCASE, - PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE, - }; - let public_key_bytes: Vec = public_key.into(); - - // Prepare preimage based on the public key parameters. - let preimage = { - let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1); - data.extend(algorithm_name.as_bytes()); - data.push(0); - data.extend(public_key_bytes); - data - }; - // Hash the preimage data using blake2b256 and return it. - let digest = blake2b_hash_fn(preimage); - Self::new(digest) - } -} + /// Checks if adding a weight to a sum of all weights excluding the given key would make the + /// resulting value to fall below any of the thresholds on account. + fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool { + // Calculates total weight of all keys excluding the given key + let total_weight = self + .associated_keys + .total_keys_weight_excluding(account_hash); -#[cfg(feature = "std")] -impl JsonSchema for AccountHash { - fn schema_name() -> String { - String::from("AccountHash") - } + // Safely calculate new weight by adding the updated weight + let new_weight = total_weight.value().saturating_add(weight.value()); - fn json_schema(gen: &mut SchemaGenerator) -> Schema { - let schema = gen.subschema_for::(); - let mut schema_object = schema.into_object(); - schema_object.metadata().description = Some("Hex-encoded account hash.".to_string()); - schema_object.into() + // Returns true if the new weight would be greater or equal to all of + // the thresholds. + new_weight >= self.action_thresholds().deployment().value() + && new_weight >= self.action_thresholds().key_management().value() } -} -impl Serialize for AccountHash { - fn serialize(&self, serializer: S) -> Result { - if serializer.is_human_readable() { - self.to_formatted_string().serialize(serializer) - } else { - self.0.serialize(serializer) + /// Removes an associated key from an account. + /// + /// Verifies that removing the key will not cause the remaining weight to fall below any action + /// thresholds. + pub fn remove_associated_key( + &mut self, + account_hash: AccountHash, + ) -> Result<(), RemoveKeyFailure> { + if self.associated_keys.contains_key(&account_hash) { + // Check if removing this weight would fall below thresholds + if !self.can_remove_key(account_hash) { + return Err(RemoveKeyFailure::ThresholdViolation); + } } - } -} - -impl<'de> Deserialize<'de> for AccountHash { - fn deserialize>(deserializer: D) -> Result { - if deserializer.is_human_readable() { - let formatted_string = String::deserialize(deserializer)?; - AccountHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) - } else { - let bytes = AccountHashBytes::deserialize(deserializer)?; - Ok(AccountHash(bytes)) + self.associated_keys.remove_key(&account_hash) + } + + /// Updates an associated key. + /// + /// Returns an error if the update would result in a violation of the key management thresholds. + pub fn update_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), UpdateKeyFailure> { + if let Some(current_weight) = self.associated_keys.get(&account_hash) { + if weight < *current_weight { + // New weight is smaller than current weight + if !self.can_update_key(account_hash, weight) { + return Err(UpdateKeyFailure::ThresholdViolation); + } + } } + self.associated_keys.update_key(account_hash, weight) + } + + /// Sets a new action threshold for a given action type for the account without checking against + /// the total weight of the associated keys. + /// + /// This should only be called when authorized by an administrator account. + /// + /// Returns an error if setting the action would cause the `ActionType::Deployment` threshold to + /// be greater than any of the other action types. + pub fn set_action_threshold_unchecked( + &mut self, + action_type: ActionType, + threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + self.action_thresholds.set_threshold(action_type, threshold) + } + + /// Sets a new action threshold for a given action type for the account. + /// + /// Returns an error if the new action threshold weight is greater than the total weight of the + /// account's associated keys. + pub fn set_action_threshold( + &mut self, + action_type: ActionType, + weight: Weight, + ) -> Result<(), SetThresholdFailure> { + // Verify if new threshold weight exceeds total weight of all associated + // keys. + self.can_set_threshold(weight)?; + // Set new weight for given action + self.action_thresholds.set_threshold(action_type, weight) + } + + /// Verifies if user can set action threshold. + pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> { + let total_weight = self.associated_keys.total_keys_weight(); + if new_threshold > total_weight { + return Err(SetThresholdFailure::InsufficientTotalWeight); + } + Ok(()) } -} -#[doc(hidden)] -pub fn blake2b>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { - let mut result = [0; BLAKE2B_DIGEST_LENGTH]; - // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher - let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); - - hasher.update(data); - hasher.finalize_variable(|slice| { - result.copy_from_slice(slice); - }); - result -} - -impl TryFrom<&[u8]> for AccountHash { - type Error = TryFromSliceForAccountHashError; - - fn try_from(bytes: &[u8]) -> Result { - AccountHashBytes::try_from(bytes) - .map(AccountHash::new) - .map_err(|_| TryFromSliceForAccountHashError(())) + /// Checks whether all authorization keys are associated with this account. + pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { + !authorization_keys.is_empty() + && authorization_keys + .iter() + .all(|e| self.associated_keys.contains_key(e)) } -} -impl TryFrom<&alloc::vec::Vec> for AccountHash { - type Error = TryFromSliceForAccountHashError; + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to deploy threshold. + pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); - fn try_from(bytes: &Vec) -> Result { - AccountHashBytes::try_from(bytes as &[u8]) - .map(AccountHash::new) - .map_err(|_| TryFromSliceForAccountHashError(())) + total_weight >= *self.action_thresholds().deployment() } -} -impl From<&PublicKey> for AccountHash { - fn from(public_key: &PublicKey) -> Self { - AccountHash::from_public_key(public_key, blake2b) - } -} - -impl Display for AccountHash { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", base16::encode_lower(&self.0)) - } -} + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to key management threshold. + pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); -impl Debug for AccountHash { - fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { - write!(f, "AccountHash({})", base16::encode_lower(&self.0)) + total_weight >= *self.action_thresholds().key_management() } -} -impl CLTyped for AccountHash { - fn cl_type() -> CLType { - CLType::ByteArray(ACCOUNT_HASH_LENGTH as u32) + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ACCOUNT } } -impl ToBytes for AccountHash { - #[inline(always)] - fn to_bytes(&self) -> Result, Error> { - self.0.to_bytes() +impl ToBytes for Account { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.account_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.main_purse.write_bytes(&mut result)?; + self.associated_keys().write_bytes(&mut result)?; + self.action_thresholds().write_bytes(&mut result)?; + Ok(result) } - #[inline(always)] fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for AccountHash { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (bytes, rem) = FromBytes::from_bytes(bytes)?; - Ok((AccountHash::new(bytes), rem)) - } -} - -impl AsRef<[u8]> for AccountHash { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> AccountHash { - AccountHash::new(rng.gen()) - } -} - -/// Errors that can occur while adding a new [`AccountHash`] to an account's associated keys map. -#[derive(PartialEq, Eq, Debug, Copy, Clone)] -#[cfg_attr(feature = "std", derive(Error))] -#[repr(i32)] -pub enum AddKeyFailure { - /// There are already [`MAX_ASSOCIATED_KEYS`] [`AccountHash`]s associated with the given - /// account. - #[cfg_attr( - feature = "std", - error("Unable to add new associated key because maximum amount of keys is reached") - )] - MaxKeysLimit = 1, - /// The given [`AccountHash`] is already associated with the given account. - #[cfg_attr( - feature = "std", - error("Unable to add new associated key because given key already exists") - )] - DuplicateKey = 2, - /// Caller doesn't have sufficient permissions to associate a new [`AccountHash`] with the - /// given account. - #[cfg_attr( - feature = "std", - error("Unable to add new associated key due to insufficient permissions") - )] - PermissionDenied = 3, -} - -// This conversion is not intended to be used by third party crates. -#[doc(hidden)] -impl TryFrom for AddKeyFailure { - type Error = TryFromIntError; - - fn try_from(value: i32) -> Result { - match value { - d if d == AddKeyFailure::MaxKeysLimit as i32 => Ok(AddKeyFailure::MaxKeysLimit), - d if d == AddKeyFailure::DuplicateKey as i32 => Ok(AddKeyFailure::DuplicateKey), - d if d == AddKeyFailure::PermissionDenied as i32 => Ok(AddKeyFailure::PermissionDenied), - _ => Err(TryFromIntError(())), - } + self.account_hash.serialized_length() + + self.named_keys.serialized_length() + + self.main_purse.serialized_length() + + self.associated_keys.serialized_length() + + self.action_thresholds.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.account_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.main_purse().write_bytes(writer)?; + self.associated_keys().write_bytes(writer)?; + self.action_thresholds().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Account { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (account_hash, rem) = AccountHash::from_bytes(bytes)?; + let (named_keys, rem) = NamedKeys::from_bytes(rem)?; + let (main_purse, rem) = URef::from_bytes(rem)?; + let (associated_keys, rem) = AssociatedKeys::from_bytes(rem)?; + let (action_thresholds, rem) = ActionThresholds::from_bytes(rem)?; + Ok(( + Account { + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + }, + rem, + )) } } -/// Errors that can occur while removing a [`AccountHash`] from an account's associated keys map. -#[derive(Debug, Eq, PartialEq, Copy, Clone)] -#[cfg_attr(feature = "std", derive(Error))] -#[repr(i32)] -pub enum RemoveKeyFailure { - /// The given [`AccountHash`] is not associated with the given account. - #[cfg_attr(feature = "std", error("Unable to remove a key that does not exist"))] - MissingKey = 1, - /// Caller doesn't have sufficient permissions to remove an associated [`AccountHash`] from the - /// given account. - #[cfg_attr( - feature = "std", - error("Unable to remove associated key due to insufficient permissions") - )] - PermissionDenied = 2, - /// Removing the given associated [`AccountHash`] would cause the total weight of all remaining - /// `AccountHash`s to fall below one of the action thresholds for the given account. - #[cfg_attr( - feature = "std", - error("Unable to remove a key which would violate action threshold constraints") - )] - ThresholdViolation = 3, -} - -// This conversion is not intended to be used by third party crates. #[doc(hidden)] -impl TryFrom for RemoveKeyFailure { - type Error = TryFromIntError; - - fn try_from(value: i32) -> Result { - match value { - d if d == RemoveKeyFailure::MissingKey as i32 => Ok(RemoveKeyFailure::MissingKey), - d if d == RemoveKeyFailure::PermissionDenied as i32 => { - Ok(RemoveKeyFailure::PermissionDenied) - } - d if d == RemoveKeyFailure::ThresholdViolation as i32 => { - Ok(RemoveKeyFailure::ThresholdViolation) - } - _ => Err(TryFromIntError(())), - } - } -} - -/// Errors that can occur while updating the [`Weight`] of a [`AccountHash`] in an account's -/// associated keys map. -#[derive(PartialEq, Eq, Debug, Copy, Clone)] -#[cfg_attr(feature = "std", derive(Error))] -#[repr(i32)] -pub enum UpdateKeyFailure { - /// The given [`AccountHash`] is not associated with the given account. - #[cfg_attr( - feature = "std", - error("Unable to update the value under an associated key that does not exist") - )] - MissingKey = 1, - /// Caller doesn't have sufficient permissions to update an associated [`AccountHash`] from the - /// given account. - #[cfg_attr( - feature = "std", - error("Unable to update associated key due to insufficient permissions") - )] - PermissionDenied = 2, - /// Updating the [`Weight`] of the given associated [`AccountHash`] would cause the total - /// weight of all `AccountHash`s to fall below one of the action thresholds for the given - /// account. - #[cfg_attr( - feature = "std", - error("Unable to update weight that would fall below any of action thresholds") - )] - ThresholdViolation = 3, -} - -// This conversion is not intended to be used by third party crates. -#[doc(hidden)] -impl TryFrom for UpdateKeyFailure { - type Error = TryFromIntError; - - fn try_from(value: i32) -> Result { - match value { - d if d == UpdateKeyFailure::MissingKey as i32 => Ok(UpdateKeyFailure::MissingKey), - d if d == UpdateKeyFailure::PermissionDenied as i32 => { - Ok(UpdateKeyFailure::PermissionDenied) - } - d if d == UpdateKeyFailure::ThresholdViolation as i32 => { - Ok(UpdateKeyFailure::ThresholdViolation) - } - _ => Err(TryFromIntError(())), +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::{ + account::{associated_keys::gens::account_associated_keys_arb, Account, Weight}, + gens::{account_hash_arb, named_keys_arb, uref_arb}, + }; + + use super::action_thresholds::gens::account_action_thresholds_arb; + + prop_compose! { + pub fn account_arb()( + account_hash in account_hash_arb(), + urefs in named_keys_arb(3), + purse in uref_arb(), + thresholds in account_action_thresholds_arb(), + mut associated_keys in account_associated_keys_arb(), + ) -> Account { + associated_keys.add_key(account_hash, Weight::new(1)).unwrap(); + Account::new( + account_hash, + urefs, + purse, + associated_keys, + thresholds, + ) } } } #[cfg(test)] mod tests { - use std::{convert::TryFrom, vec::Vec}; + use crate::{ + account::{ + Account, AccountHash, ActionThresholds, ActionType, AssociatedKeys, RemoveKeyFailure, + TryFromIntError, UpdateKeyFailure, Weight, + }, + contracts::NamedKeys, + AccessRights, URef, + }; + use std::{collections::BTreeSet, convert::TryFrom, iter::FromIterator, vec::Vec}; use super::*; #[test] fn account_hash_from_slice() { let bytes: Vec = (0..32).collect(); - let account_hash = AccountHash::try_from(&bytes[..]).expect("should create account hash"); + let account_hash = AccountHash::try_from(&bytes[..]).expect( + "should create account +hash", + ); assert_eq!(&bytes, &account_hash.as_bytes()); } @@ -652,4 +505,362 @@ mod tests { let decoded = serde_json::from_str(&json_string).unwrap(); assert_eq!(account_hash, decoded); } + + #[test] + fn associated_keys_can_authorize_keys() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_3, key_2, key_1]))); + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_3, key_2]))); + + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1, key_2]))); + assert!(account.can_authorize(&BTreeSet::from_iter(vec![key_1]))); + + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + key_1, + key_2, + AccountHash::new([42; 32]) + ]))); + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + AccountHash::new([42; 32]), + key_1, + key_2 + ]))); + assert!(!account.can_authorize(&BTreeSet::from_iter(vec![ + AccountHash::new([43; 32]), + AccountHash::new([44; 32]), + AccountHash::new([42; 32]) + ]))); + assert!(!account.can_authorize(&BTreeSet::new())); + } + + #[test] + fn account_can_deploy_with() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) + .expect("should add key 3"); + res + }; + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + // sum: 22, required 33 - can't deploy + assert!(!account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 33, required 33 - can deploy + assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 34, required 33 - can deploy + assert!(account.can_deploy_with(&BTreeSet::from_iter(vec![ + AccountHash::new([2u8; 32]), + AccountHash::new([1u8; 32]), + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + ]))); + } + + #[test] + fn account_can_manage_keys_with() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(11)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(11)) + .expect("should add key 3"); + res + }; + let account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(11), Weight::new(33)) + .expect("should create thresholds"), + ); + + // sum: 22, required 33 - can't manage + assert!(!account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 33, required 33 - can manage + assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + AccountHash::new([2u8; 32]), + ]))); + + // sum: 34, required 33 - can manage + assert!(account.can_manage_keys_with(&BTreeSet::from_iter(vec![ + AccountHash::new([2u8; 32]), + AccountHash::new([1u8; 32]), + AccountHash::new([4u8; 32]), + AccountHash::new([3u8; 32]), + ]))); + } + + #[test] + fn set_action_threshold_higher_than_total_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + res.add_key(key_2, Weight::new(3)) + .expect("should add key 2"); + res.add_key(key_3, Weight::new(4)) + .expect("should add key 3"); + res + }; + let mut account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(33), Weight::new(48)) + .expect("should create thresholds"), + ); + + assert_eq!( + account + .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 1)) + .unwrap_err(), + SetThresholdFailure::InsufficientTotalWeight, + ); + assert_eq!( + account + .set_action_threshold(ActionType::Deployment, Weight::new(1 + 2 + 3 + 4 + 245)) + .unwrap_err(), + SetThresholdFailure::InsufficientTotalWeight, + ) + } + + #[test] + fn remove_key_would_violate_action_thresholds() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + res.add_key(key_2, Weight::new(3)) + .expect("should add key 2"); + res.add_key(key_3, Weight::new(4)) + .expect("should add key 3"); + res + }; + let mut account = Account::new( + AccountHash::new([0u8; 32]), + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(Weight::new(1 + 2 + 3 + 4), Weight::new(1 + 2 + 3 + 4 + 5)) + .expect("should create thresholds"), + ); + + assert_eq!( + account.remove_associated_key(key_3).unwrap_err(), + RemoveKeyFailure::ThresholdViolation, + ) + } + + #[test] + fn updating_key_would_violate_action_thresholds() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(2); + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(3); + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(4); + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + // 1 + 2 + 3 + 4 + res + }; + + let deployment_threshold = Weight::new( + identity_key_weight.value() + + key_1_weight.value() + + key_2_weight.value() + + key_3_weight.value(), + ); + let key_management_threshold = Weight::new(deployment_threshold.value() + 1); + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + // deploy: 33 (3*11) + ActionThresholds::new(deployment_threshold, key_management_threshold) + .expect("should create thresholds"), + ); + + // Decreases by 3 + assert_eq!( + account + .clone() + .update_associated_key(key_3, Weight::new(1)) + .unwrap_err(), + UpdateKeyFailure::ThresholdViolation, + ); + + // increase total weight (12) + account + .update_associated_key(identity_key, Weight::new(3)) + .unwrap(); + + // variant a) decrease total weight by 1 (total 11) + account + .clone() + .update_associated_key(key_3, Weight::new(3)) + .unwrap(); + // variant b) decrease total weight by 3 (total 9) - fail + assert_eq!( + account + .update_associated_key(key_3, Weight::new(1)) + .unwrap_err(), + UpdateKeyFailure::ThresholdViolation + ); + } + + #[test] + fn overflowing_should_allow_removal() { + let identity_key = AccountHash::new([42; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + + let associated_keys = { + // Identity + let mut res = AssociatedKeys::new(identity_key, Weight::new(1)); + + // Spare key + res.add_key(key_1, Weight::new(2)) + .expect("should add key 1"); + // Big key + res.add_key(key_2, Weight::new(255)) + .expect("should add key 2"); + + res + }; + + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + ActionThresholds::new(Weight::new(1), Weight::new(254)) + .expect("should create thresholds"), + ); + + account.remove_associated_key(key_1).expect("should work") + } + + #[test] + fn overflowing_should_allow_updating() { + let identity_key = AccountHash::new([1; 32]); + let identity_key_weight = Weight::new(1); + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(3); + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(255); + let deployment_threshold = Weight::new(1); + let key_management_threshold = Weight::new(254); + + let associated_keys = { + // Identity + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + // Spare key + res.add_key(key_1, key_1_weight).expect("should add key 1"); + // Big key + res.add_key(key_2, key_2_weight).expect("should add key 2"); + + res + }; + + let mut account = Account::new( + identity_key, + NamedKeys::new(), + URef::new([0u8; 32], AccessRights::READ_ADD_WRITE), + associated_keys, + ActionThresholds::new(deployment_threshold, key_management_threshold) + .expect("should create thresholds"), + ); + + // decrease so total weight would be changed from 1 + 3 + 255 to 1 + 1 + 255 + account + .update_associated_key(key_1, Weight::new(1)) + .expect("should work"); + } +} + +#[cfg(test)] +mod proptests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + proptest! { + #[test] + fn test_value_account(acct in gens::account_arb()) { + bytesrepr::test_serialization_roundtrip(&acct); + } + } } diff --git a/types/src/account/account_hash.rs b/types/src/account/account_hash.rs new file mode 100644 index 0000000000..00bf784ca5 --- /dev/null +++ b/types/src/account/account_hash.rs @@ -0,0 +1,219 @@ +use alloc::{string::String, vec::Vec}; +use core::{ + convert::{From, TryFrom}, + fmt::{Debug, Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + addressable_entity::FromStrError, + bytesrepr::{Error, FromBytes, ToBytes}, + checksummed_hex, crypto, CLType, CLTyped, PublicKey, BLAKE2B_DIGEST_LENGTH, +}; + +/// The length in bytes of a [`AccountHash`]. +pub const ACCOUNT_HASH_LENGTH: usize = 32; +/// The prefix applied to the hex-encoded `AccountHash` to produce a formatted string +/// representation. +pub const ACCOUNT_HASH_FORMATTED_STRING_PREFIX: &str = "account-hash-"; + +/// A newtype wrapping an array which contains the raw bytes of +/// the AccountHash, a hash of Public Key and Algorithm +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Account hash as a formatted string.") +)] +pub struct AccountHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub [u8; ACCOUNT_HASH_LENGTH], +); + +impl AccountHash { + /// Constructs a new `AccountHash` instance from the raw bytes of an Public Key Account Hash. + pub const fn new(value: [u8; ACCOUNT_HASH_LENGTH]) -> AccountHash { + AccountHash(value) + } + + /// Returns the raw bytes of the account hash as an array. + pub fn value(&self) -> [u8; ACCOUNT_HASH_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the account hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `AccountHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + ACCOUNT_HASH_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Hexadecimal representation of the hash. + pub fn to_hex_string(&self) -> String { + base16::encode_lower(&self.0) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into an `AccountHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(ACCOUNT_HASH_FORMATTED_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(AccountHash(bytes)) + } + + /// Parses a `PublicKey` and outputs the corresponding account hash. + pub fn from_public_key( + public_key: &PublicKey, + blake2b_hash_fn: impl Fn(Vec) -> [u8; BLAKE2B_DIGEST_LENGTH], + ) -> Self { + const SYSTEM_LOWERCASE: &str = "system"; + const ED25519_LOWERCASE: &str = "ed25519"; + const SECP256K1_LOWERCASE: &str = "secp256k1"; + + let algorithm_name = match public_key { + PublicKey::System => SYSTEM_LOWERCASE, + PublicKey::Ed25519(_) => ED25519_LOWERCASE, + PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE, + }; + let public_key_bytes: Vec = public_key.into(); + + // Prepare preimage based on the public key parameters. + let preimage = { + let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1); + data.extend(algorithm_name.as_bytes()); + data.push(0); + data.extend(public_key_bytes); + data + }; + // Hash the preimage data using blake2b256 and return it. + let digest = blake2b_hash_fn(preimage); + Self::new(digest) + } +} + +impl Serialize for AccountHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for AccountHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + AccountHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; ACCOUNT_HASH_LENGTH]>::deserialize(deserializer)?; + Ok(AccountHash(bytes)) + } + } +} + +impl TryFrom<&[u8]> for AccountHash { + type Error = TryFromSliceForAccountHashError; + + fn try_from(bytes: &[u8]) -> Result { + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes) + .map(AccountHash::new) + .map_err(|_| TryFromSliceForAccountHashError(())) + } +} + +impl TryFrom<&alloc::vec::Vec> for AccountHash { + type Error = TryFromSliceForAccountHashError; + + fn try_from(bytes: &Vec) -> Result { + <[u8; ACCOUNT_HASH_LENGTH]>::try_from(bytes as &[u8]) + .map(AccountHash::new) + .map_err(|_| TryFromSliceForAccountHashError(())) + } +} + +impl From<&PublicKey> for AccountHash { + fn from(public_key: &PublicKey) -> Self { + AccountHash::from_public_key(public_key, crypto::blake2b) + } +} + +impl Display for AccountHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for AccountHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "AccountHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for AccountHash { + fn cl_type() -> CLType { + CLType::ByteArray(ACCOUNT_HASH_LENGTH as u32) + } +} + +impl ToBytes for AccountHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for AccountHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((AccountHash::new(bytes), rem)) + } +} + +impl AsRef<[u8]> for AccountHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`]. +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AccountHash { + AccountHash::new(rng.gen()) + } +} diff --git a/types/src/account/action_thresholds.rs b/types/src/account/action_thresholds.rs new file mode 100644 index 0000000000..ce2e492c7e --- /dev/null +++ b/types/src/account/action_thresholds.rs @@ -0,0 +1,175 @@ +//! This module contains types and functions for managing action thresholds. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::{ActionType, SetThresholdFailure, Weight}, + addressable_entity::WEIGHT_SERIALIZED_LENGTH, + bytesrepr::{self, Error, FromBytes, ToBytes}, +}; + +/// Thresholds that have to be met when executing an action of a certain type. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "AccountActionThresholds"))] +pub struct ActionThresholds { + /// Threshold for deploy execution. + pub deployment: Weight, + /// Threshold for managing action threshold. + pub key_management: Weight, +} + +impl ActionThresholds { + /// Creates new ActionThresholds object with provided weights + /// + /// Requires deployment threshold to be lower than or equal to + /// key management threshold. + pub fn new( + deployment: Weight, + key_management: Weight, + ) -> Result { + if deployment > key_management { + return Err(SetThresholdFailure::DeploymentThreshold); + } + Ok(ActionThresholds { + deployment, + key_management, + }) + } + /// Sets new threshold for [ActionType::Deployment]. + /// Should return an error if setting new threshold for `action_type` breaks + /// one of the invariants. Currently, invariant is that + /// `ActionType::Deployment` threshold shouldn't be higher than any + /// other, which should be checked both when increasing `Deployment` + /// threshold and decreasing the other. + pub fn set_deployment_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if new_threshold > self.key_management { + Err(SetThresholdFailure::DeploymentThreshold) + } else { + self.deployment = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::KeyManagement]. + pub fn set_key_management_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if self.deployment > new_threshold { + Err(SetThresholdFailure::KeyManagementThreshold) + } else { + self.key_management = new_threshold; + Ok(()) + } + } + + /// Returns the deployment action threshold. + pub fn deployment(&self) -> &Weight { + &self.deployment + } + + /// Returns key management action threshold. + pub fn key_management(&self) -> &Weight { + &self.key_management + } + + /// Unified function that takes an action type, and changes appropriate + /// threshold defined by the [ActionType] variants. + pub fn set_threshold( + &mut self, + action_type: ActionType, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + match action_type { + ActionType::Deployment => self.set_deployment_threshold(new_threshold), + ActionType::KeyManagement => self.set_key_management_threshold(new_threshold), + } + } +} + +impl Default for ActionThresholds { + fn default() -> Self { + ActionThresholds { + deployment: Weight::new(1), + key_management: Weight::new(1), + } + } +} + +impl ToBytes for ActionThresholds { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.deployment.to_bytes()?); + result.append(&mut self.key_management.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + 2 * WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deployment().write_bytes(writer)?; + self.key_management().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ActionThresholds { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (deployment, rem) = Weight::from_bytes(bytes)?; + let (key_management, rem) = Weight::from_bytes(rem)?; + let ret = ActionThresholds { + deployment, + key_management, + }; + Ok((ret, rem)) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::ActionThresholds; + + pub fn account_action_thresholds_arb() -> impl Strategy { + Just(Default::default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_create_new_action_thresholds() { + let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); + assert_eq!(*action_thresholds.deployment(), Weight::new(1)); + assert_eq!(*action_thresholds.key_management(), Weight::new(42)); + } + + #[test] + fn should_not_create_action_thresholds_with_invalid_deployment_threshold() { + // deployment cant be greater than key management + assert!(ActionThresholds::new(Weight::new(5), Weight::new(1)).is_err()); + } + + #[test] + fn serialization_roundtrip() { + let action_thresholds = ActionThresholds::new(Weight::new(1), Weight::new(42)).unwrap(); + bytesrepr::test_serialization_roundtrip(&action_thresholds); + } +} diff --git a/types/src/account/action_type.rs b/types/src/account/action_type.rs new file mode 100644 index 0000000000..65848f79e6 --- /dev/null +++ b/types/src/account/action_type.rs @@ -0,0 +1,32 @@ +use core::convert::TryFrom; + +use crate::addressable_entity::TryFromIntError; + +/// The various types of action which can be performed in the context of a given account. +#[repr(u32)] +pub enum ActionType { + /// Represents performing a deploy. + Deployment = 0, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// perform various actions). + KeyManagement = 1, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for ActionType { + type Error = TryFromIntError; + + fn try_from(value: u32) -> Result { + // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive + // that helps to automatically create `from_u32` and `to_u32`. This approach + // gives better control over generated code. + match value { + d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment), + d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement), + _ => Err(TryFromIntError(())), + } + } +} diff --git a/types/src/account/associated_keys.rs b/types/src/account/associated_keys.rs new file mode 100644 index 0000000000..b94880b5f6 --- /dev/null +++ b/types/src/account/associated_keys.rs @@ -0,0 +1,527 @@ +//! This module contains types and functions for working with keys associated with an account. + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + vec::Vec, +}; +use core::{ + fmt, + fmt::{Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + account::{AccountHash, TryFromIntError, Weight}, + bytesrepr::{self, FromBytes, ToBytes}, +}; + +/// Errors that can occur while adding a new [`AccountHash`] to an account's associated keys map. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum AddKeyFailure { + /// There are already maximum [`AccountHash`]s associated with the given account. + MaxKeysLimit = 1, + /// The given [`AccountHash`] is already associated with the given account. + DuplicateKey = 2, + /// Caller doesn't have sufficient permissions to associate a new [`AccountHash`] with the + /// given account. + PermissionDenied = 3, +} + +impl Display for AddKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + AddKeyFailure::MaxKeysLimit => formatter.write_str( + "Unable to add new associated key because maximum amount of keys is reached", + ), + AddKeyFailure::DuplicateKey => formatter + .write_str("Unable to add new associated key because given key already exists"), + AddKeyFailure::PermissionDenied => formatter + .write_str("Unable to add new associated key due to insufficient permissions"), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for AddKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == AddKeyFailure::MaxKeysLimit as i32 => Ok(AddKeyFailure::MaxKeysLimit), + d if d == AddKeyFailure::DuplicateKey as i32 => Ok(AddKeyFailure::DuplicateKey), + d if d == AddKeyFailure::PermissionDenied as i32 => Ok(AddKeyFailure::PermissionDenied), + _ => Err(TryFromIntError(())), + } + } +} + +/// Errors that can occur while removing a [`AccountHash`] from an account's associated keys map. +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum RemoveKeyFailure { + /// The given [`AccountHash`] is not associated with the given account. + MissingKey = 1, + /// Caller doesn't have sufficient permissions to remove an associated [`AccountHash`] from the + /// given account. + PermissionDenied = 2, + /// Removing the given associated [`AccountHash`] would cause the total weight of all remaining + /// `AccountHash`s to fall below one of the action thresholds for the given account. + ThresholdViolation = 3, +} + +impl Display for RemoveKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + RemoveKeyFailure::MissingKey => { + formatter.write_str("Unable to remove a key that does not exist") + } + RemoveKeyFailure::PermissionDenied => formatter + .write_str("Unable to remove associated key due to insufficient permissions"), + RemoveKeyFailure::ThresholdViolation => formatter.write_str( + "Unable to remove a key which would violate action threshold constraints", + ), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for RemoveKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == RemoveKeyFailure::MissingKey as i32 => Ok(RemoveKeyFailure::MissingKey), + d if d == RemoveKeyFailure::PermissionDenied as i32 => { + Ok(RemoveKeyFailure::PermissionDenied) + } + d if d == RemoveKeyFailure::ThresholdViolation as i32 => { + Ok(RemoveKeyFailure::ThresholdViolation) + } + _ => Err(TryFromIntError(())), + } + } +} + +/// Errors that can occur while updating the [`crate::addressable_entity::Weight`] of a +/// [`AccountHash`] in an account's associated keys map. +#[derive(PartialEq, Eq, Debug, Copy, Clone)] +#[repr(i32)] +#[non_exhaustive] +pub enum UpdateKeyFailure { + /// The given [`AccountHash`] is not associated with the given account. + MissingKey = 1, + /// Caller doesn't have sufficient permissions to update an associated [`AccountHash`] from the + /// given account. + PermissionDenied = 2, + /// Updating the [`crate::addressable_entity::Weight`] of the given associated [`AccountHash`] + /// would cause the total weight of all `AccountHash`s to fall below one of the action + /// thresholds for the given account. + ThresholdViolation = 3, +} + +impl Display for UpdateKeyFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + UpdateKeyFailure::MissingKey => formatter.write_str( + "Unable to update the value under an associated key that does not exist", + ), + UpdateKeyFailure::PermissionDenied => formatter + .write_str("Unable to update associated key due to insufficient permissions"), + UpdateKeyFailure::ThresholdViolation => formatter.write_str( + "Unable to update weight that would fall below any of action thresholds", + ), + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for UpdateKeyFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == UpdateKeyFailure::MissingKey as i32 => Ok(UpdateKeyFailure::MissingKey), + d if d == UpdateKeyFailure::PermissionDenied as i32 => { + Ok(UpdateKeyFailure::PermissionDenied) + } + d if d == UpdateKeyFailure::ThresholdViolation as i32 => { + Ok(UpdateKeyFailure::ThresholdViolation) + } + _ => Err(TryFromIntError(())), + } + } +} + +/// A collection of weighted public keys (represented as account hashes) associated with an account. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "AccountAssociatedKeys"))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct AssociatedKeys( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl AssociatedKeys { + /// Constructs a new AssociatedKeys. + pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys { + let mut bt: BTreeMap = BTreeMap::new(); + bt.insert(key, weight); + AssociatedKeys(bt) + } + + /// Adds a new AssociatedKey to the set. + /// + /// Returns true if added successfully, false otherwise. + pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(entry) => { + entry.insert(weight); + } + Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey), + } + Ok(()) + } + + /// Removes key from the associated keys set. + /// Returns true if value was found in the set prior to the removal, false + /// otherwise. + pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> { + self.0 + .remove(key) + .map(|_| ()) + .ok_or(RemoveKeyFailure::MissingKey) + } + + /// Adds new AssociatedKey to the set. + /// Returns true if added successfully, false otherwise. + pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(_) => { + return Err(UpdateKeyFailure::MissingKey); + } + Entry::Occupied(mut entry) => { + *entry.get_mut() = weight; + } + } + Ok(()) + } + + /// Returns the weight of an account hash. + pub fn get(&self, key: &AccountHash) -> Option<&Weight> { + self.0.get(key) + } + + /// Returns `true` if a given key exists. + pub fn contains_key(&self, key: &AccountHash) -> bool { + self.0.contains_key(key) + } + + /// Returns an iterator over the account hash and the weights. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the count of the associated keys. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the associated keys are empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Helper method that calculates weight for keys that comes from any + /// source. + /// + /// This method is not concerned about uniqueness of the passed iterable. + /// Uniqueness is determined based on the input collection properties, + /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`]) + /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]). + fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator) -> Weight { + let total = keys + .filter_map(|key| self.0.get(key)) + .fold(0u8, |acc, w| acc.saturating_add(w.value())); + + Weight::new(total) + } + + /// Calculates total weight of authorization keys provided by an argument + pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet) -> Weight { + self.calculate_any_keys_weight(authorization_keys.iter()) + } + + /// Calculates total weight of all authorization keys + pub fn total_keys_weight(&self) -> Weight { + self.calculate_any_keys_weight(self.0.keys()) + } + + /// Calculates total weight of all authorization keys excluding a given key + pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight { + self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash)) + } +} + +impl From> for AssociatedKeys { + fn from(associated_keys: BTreeMap) -> Self { + Self(associated_keys) + } +} + +impl From for BTreeMap { + fn from(associated_keys: AssociatedKeys) -> Self { + associated_keys.0 + } +} + +impl ToBytes for AssociatedKeys { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for AssociatedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (associated_keys, rem) = FromBytes::from_bytes(bytes)?; + Ok((AssociatedKeys(associated_keys), rem)) + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "account_hash"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for Labels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("AssociatedKey"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some("A weighted public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = + Some("The account hash of the public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = + Some("The weight assigned to the public key."); +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::gens::{account_hash_arb, account_weight_arb}; + + use super::AssociatedKeys; + + pub fn account_associated_keys_arb() -> impl Strategy { + proptest::collection::btree_map(account_hash_arb(), account_weight_arb(), 10).prop_map( + |keys| { + let mut associated_keys = AssociatedKeys::default(); + keys.into_iter().for_each(|(k, v)| { + associated_keys.add_key(k, v).unwrap(); + }); + associated_keys + }, + ) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeSet, iter::FromIterator}; + + use crate::{ + account::{AccountHash, Weight, ACCOUNT_HASH_LENGTH}, + bytesrepr, + }; + + use super::*; + + #[test] + fn associated_keys_add() { + let mut keys = + AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1)); + let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let new_pk_weight = Weight::new(2); + assert!(keys.add_key(new_pk, new_pk_weight).is_ok()); + assert_eq!(keys.get(&new_pk), Some(&new_pk_weight)) + } + + #[test] + fn associated_keys_add_duplicate() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert_eq!( + keys.add_key(pk, Weight::new(10)), + Err(AddKeyFailure::DuplicateKey) + ); + assert_eq!(keys.get(&pk), Some(&weight)); + } + + #[test] + fn associated_keys_remove() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert!(keys.remove_key(&pk).is_ok()); + assert!(keys + .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH])) + .is_err()); + } + + #[test] + fn associated_keys_update() { + let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk1, weight); + assert!(matches!( + keys.update_key(pk2, Weight::new(2)) + .expect_err("should get error"), + UpdateKeyFailure::MissingKey + )); + keys.add_key(pk2, Weight::new(1)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(1))); + keys.update_key(pk2, Weight::new(2)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(2))); + } + + #[test] + fn associated_keys_calculate_keys_once() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + assert_eq!( + keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + key_1, key_2, key_3, key_1, key_2, key_3, + ])), + Weight::new(1 + 2 + 3) + ); + } + + #[test] + fn associated_keys_total_weight() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(12)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(13)) + .expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight(), + Weight::new(1 + 11 + 12 + 13) + ); + } + + #[test] + fn associated_keys_total_weight_excluding() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(11); + + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(12); + + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(13); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight_excluding(key_2), + Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value()) + ); + } + + #[test] + fn overflowing_keys_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + + let identity_key_weight = Weight::new(250); + let weight_1 = Weight::new(1); + let weight_2 = Weight::new(2); + let weight_3 = Weight::new(3); + + let saturated_weight = Weight::new(u8::MAX); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + res.add_key(key_1, weight_1).expect("should add key 1"); + res.add_key(key_2, weight_2).expect("should add key 2"); + res.add_key(key_3, weight_3).expect("should add key 3"); + res + }; + + assert_eq!( + associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + identity_key, // 250 + key_1, // 251 + key_2, // 253 + key_3, // 256 - error + ])), + saturated_weight, + ); + } + + #[test] + fn serialization_roundtrip() { + let mut keys = AssociatedKeys::default(); + keys.add_key(AccountHash::new([1; 32]), Weight::new(1)) + .unwrap(); + keys.add_key(AccountHash::new([2; 32]), Weight::new(2)) + .unwrap(); + keys.add_key(AccountHash::new([3; 32]), Weight::new(3)) + .unwrap(); + bytesrepr::test_serialization_roundtrip(&keys); + } +} diff --git a/types/src/account/error.rs b/types/src/account/error.rs new file mode 100644 index 0000000000..9ec28a9a3c --- /dev/null +++ b/types/src/account/error.rs @@ -0,0 +1,105 @@ +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +#[derive(Debug, Eq, PartialEq)] +pub struct TryFromIntError(pub(super) ()); + +/// Error returned when decoding an `AccountHash` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The hash is not valid hex. + Hex(base16::DecodeError), + /// The hash is the wrong length. + Hash(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "prefix is not 'account-hash-'"), + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Hash(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} + +/// Errors that can occur while changing action thresholds (i.e. the total +/// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to perform +/// various actions) on an account. +#[repr(i32)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[non_exhaustive] +pub enum SetThresholdFailure { + /// Setting the key-management threshold to a value lower than the deployment threshold is + /// disallowed. + KeyManagementThreshold = 1, + /// Setting the deployment threshold to a value greater than any other threshold is disallowed. + DeploymentThreshold = 2, + /// Caller doesn't have sufficient permissions to set new thresholds. + PermissionDeniedError = 3, + /// Setting a threshold to a value greater than the total weight of associated keys is + /// disallowed. + InsufficientTotalWeight = 4, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for SetThresholdFailure { + type Error = TryFromIntError; + + fn try_from(value: i32) -> Result { + match value { + d if d == SetThresholdFailure::KeyManagementThreshold as i32 => { + Ok(SetThresholdFailure::KeyManagementThreshold) + } + d if d == SetThresholdFailure::DeploymentThreshold as i32 => { + Ok(SetThresholdFailure::DeploymentThreshold) + } + d if d == SetThresholdFailure::PermissionDeniedError as i32 => { + Ok(SetThresholdFailure::PermissionDeniedError) + } + d if d == SetThresholdFailure::InsufficientTotalWeight as i32 => { + Ok(SetThresholdFailure::InsufficientTotalWeight) + } + _ => Err(TryFromIntError(())), + } + } +} + +impl Display for SetThresholdFailure { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + SetThresholdFailure::KeyManagementThreshold => formatter + .write_str("New threshold should be greater than or equal to deployment threshold"), + SetThresholdFailure::DeploymentThreshold => formatter.write_str( + "New threshold should be lower than or equal to key management threshold", + ), + SetThresholdFailure::PermissionDeniedError => formatter + .write_str("Unable to set action threshold due to insufficient permissions"), + SetThresholdFailure::InsufficientTotalWeight => formatter.write_str( + "New threshold should be lower or equal than total weight of associated keys", + ), + } + } +} diff --git a/types/src/account/weight.rs b/types/src/account/weight.rs new file mode 100644 index 0000000000..f9c8703534 --- /dev/null +++ b/types/src/account/weight.rs @@ -0,0 +1,69 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Weight`]. +pub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// The weight associated with public keys in an account's associated keys. +#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr( + feature = "json-schema", + schemars(rename = "AccountAssociatedKeyWeight") +)] +pub struct Weight(u8); + +impl Weight { + /// Maximum possible weight. + pub const MAX: Weight = Weight(u8::MAX); + + /// Constructs a new `Weight`. + pub const fn new(weight: u8) -> Weight { + Weight(weight) + } + + /// Returns the value of `self` as a `u8`. + pub fn value(self) -> u8 { + self.0 + } +} + +impl ToBytes for Weight { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.0); + Ok(()) + } +} + +impl FromBytes for Weight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte, rem) = u8::from_bytes(bytes)?; + Ok((Weight::new(byte), rem)) + } +} + +impl CLTyped for Weight { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/types/src/addressable_entity.rs b/types/src/addressable_entity.rs new file mode 100644 index 0000000000..0bcb9e6100 --- /dev/null +++ b/types/src/addressable_entity.rs @@ -0,0 +1,1964 @@ +//! Data types for supporting contract headers feature. +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +pub mod action_thresholds; +mod action_type; +pub mod associated_keys; +mod entry_points; +mod error; +//mod named_keys; +mod weight; + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + format, + string::{String, ToString}, + vec::Vec, +}; +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +use core::{ + array::TryFromSliceError, + convert::{TryFrom, TryInto}, + fmt::{self, Debug, Display, Formatter}, + iter, +}; + +#[cfg(feature = "json-schema")] +use crate::SecretKey; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +pub use self::{ + action_thresholds::ActionThresholds, + action_type::ActionType, + associated_keys::AssociatedKeys, + entry_points::{ + EntityEntryPoint, EntryPointAccess, EntryPointAddr, EntryPointPayment, EntryPointType, + EntryPointValue, EntryPoints, Parameter, Parameters, DEFAULT_ENTRY_POINT_NAME, + }, + error::{FromAccountHashStrError, TryFromIntError, TryFromSliceForAccountHashError}, + weight::{Weight, WEIGHT_SERIALIZED_LENGTH}, +}; +use crate::{ + account::{ + Account, AccountHash, AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, + UpdateKeyFailure, + }, + byte_code::ByteCodeHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, + contract_messages::TopicNameHash, + contracts::{Contract, ContractHash}, + system::SystemEntityType, + uref::{self, URef}, + AccessRights, ApiError, CLType, CLTyped, CLValue, CLValueError, ContextAccessRights, HashAddr, + Key, NamedKeys, PackageHash, ProtocolVersion, PublicKey, Tagged, BLAKE2B_DIGEST_LENGTH, + KEY_HASH_LENGTH, +}; + +/// Maximum number of distinct user groups. +pub const MAX_GROUPS: u8 = 10; +/// Maximum number of URefs which can be assigned across all user groups. +pub const MAX_TOTAL_UREFS: usize = 100; + +/// The prefix applied to the hex-encoded `Addressable Entity` to produce a formatted string +/// representation. +pub const ADDRESSABLE_ENTITY_STRING_PREFIX: &str = "addressable-entity-"; +/// The prefix applied to the hex-encoded `Entity` to produce a formatted string +/// representation. +pub const ENTITY_PREFIX: &str = "entity-"; +/// The prefix applied to the hex-encoded `Account` to produce a formatted string +/// representation. +pub const ACCOUNT_ENTITY_PREFIX: &str = "account-"; +/// The prefix applied to the hex-encoded `Smart contract` to produce a formatted string +/// representation. +pub const CONTRACT_ENTITY_PREFIX: &str = "contract-"; +/// The prefix applied to the hex-encoded `System entity account or contract` to produce a formatted +/// string representation. +pub const SYSTEM_ENTITY_PREFIX: &str = "system-"; +/// The prefix applied to the hex-encoded `Named Key` to produce a formatted string +/// representation. +pub const NAMED_KEY_PREFIX: &str = "named-key-"; + +/// Set of errors which may happen when working with contract headers. +#[derive(Debug, PartialEq, Eq)] +#[repr(u8)] +#[non_exhaustive] +pub enum Error { + /// Attempt to override an existing or previously existing version with a + /// new header (this is not allowed to ensure immutability of a given + /// version). + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(1, Error::PreviouslyUsedVersion as u8); + /// ``` + PreviouslyUsedVersion = 1, + /// Attempted to disable a contract that does not exist. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(2, Error::EntityNotFound as u8); + /// ``` + EntityNotFound = 2, + /// Attempted to create a user group which already exists (use the update + /// function to change an existing user group). + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(3, Error::GroupAlreadyExists as u8); + /// ``` + GroupAlreadyExists = 3, + /// Attempted to add a new user group which exceeds the allowed maximum + /// number of groups. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(4, Error::MaxGroupsExceeded as u8); + /// ``` + MaxGroupsExceeded = 4, + /// Attempted to add a new URef to a group, which resulted in the total + /// number of URefs across all user groups to exceed the allowed maximum. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8); + /// ``` + MaxTotalURefsExceeded = 5, + /// Attempted to remove a URef from a group, which does not exist in the + /// group. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(6, Error::GroupDoesNotExist as u8); + /// ``` + GroupDoesNotExist = 6, + /// Attempted to remove unknown URef from the group. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(7, Error::UnableToRemoveURef as u8); + /// ``` + UnableToRemoveURef = 7, + /// Group is use by at least one active contract. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(8, Error::GroupInUse as u8); + /// ``` + GroupInUse = 8, + /// URef already exists in given group. + /// ``` + /// # use casper_types::addressable_entity::Error; + /// assert_eq!(9, Error::URefAlreadyExists as u8); + /// ``` + URefAlreadyExists = 9, +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion, + v if v == Self::EntityNotFound as u8 => Self::EntityNotFound, + v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists, + v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded, + v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded, + v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist, + v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef, + v if v == Self::GroupInUse as u8 => Self::GroupInUse, + v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists, + _ => return Err(()), + }; + Ok(error) + } +} + +/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +impl Display for TryFromSliceForContractHashError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "failed to retrieve from slice") + } +} + +/// An error from parsing a formatted contract string +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Invalid formatted string prefix. + InvalidPrefix, + /// Error when decoding a hex string + Hex(base16::DecodeError), + /// Error when parsing an account + Account(TryFromSliceForAccountHashError), + /// Error when parsing the hash. + Hash(TryFromSliceError), + /// Error when parsing an uref. + URef(uref::FromStrError), + /// Error parsing from bytes. + BytesRepr(bytesrepr::Error), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + FromStrError::Account(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::BytesRepr(error) => { + write!(f, "bytesrepr error: {:?}", error) + } + } + } +} + +/// A newtype wrapping a `HashAddr` which references an [`AddressableEntity`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The hex-encoded address of the addressable entity.") +)] +pub struct AddressableEntityHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr, +); + +impl AddressableEntityHash { + /// Constructs a new `AddressableEntityHash` from the raw bytes of the contract hash. + pub const fn new(value: HashAddr) -> AddressableEntityHash { + AddressableEntityHash(value) + } + + /// Get the entity addr for this entity hash from the corresponding entity. + pub fn entity_addr(&self, entity: AddressableEntity) -> EntityAddr { + entity.entity_addr(*self) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `AddressableEntityHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + ADDRESSABLE_ENTITY_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Hexadecimal representation of the hash. + pub fn to_hex_string(&self) -> String { + base16::encode_lower(&self.0) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `AddressableEntityHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(ADDRESSABLE_ENTITY_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(AddressableEntityHash(bytes)) + } +} + +impl From for AddressableEntityHash { + fn from(contract_hash: ContractHash) -> Self { + AddressableEntityHash::new(contract_hash.value()) + } +} + +impl Display for AddressableEntityHash { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for AddressableEntityHash { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "AddressableEntityHash({})", + base16::encode_lower(&self.0) + ) + } +} + +impl CLTyped for AddressableEntityHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for AddressableEntityHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for AddressableEntityHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((AddressableEntityHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for AddressableEntityHash { + fn from(bytes: [u8; 32]) -> Self { + AddressableEntityHash(bytes) + } +} + +impl TryFrom for AddressableEntityHash { + type Error = ApiError; + + fn try_from(value: Key) -> Result { + if let Key::AddressableEntity(entity_addr) = value { + Ok(AddressableEntityHash::new(entity_addr.value())) + } else { + Err(ApiError::Formatting) + } + } +} + +impl Serialize for AddressableEntityHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for AddressableEntityHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + AddressableEntityHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(AddressableEntityHash(bytes)) + } + } +} + +impl AsRef<[u8]> for AddressableEntityHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for AddressableEntityHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(AddressableEntityHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for AddressableEntityHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(AddressableEntityHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AddressableEntityHash { + AddressableEntityHash(rng.gen()) + } +} + +/// Tag for the variants of [`EntityKind`]. +#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[repr(u8)] +pub enum EntityKindTag { + /// `EntityKind::System` variant. + System = 0, + /// `EntityKind::Account` variant. + Account = 1, + /// `EntityKind::SmartContract` variant. + SmartContract = 2, +} + +impl TryFrom for EntityKindTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(EntityKindTag::System), + 1 => Ok(EntityKindTag::Account), + 2 => Ok(EntityKindTag::SmartContract), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl ToBytes for EntityKindTag { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + (*self as u8).write_bytes(writer) + } +} + +impl FromBytes for EntityKindTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entity_kind_tag, remainder) = u8::from_bytes(bytes)?; + Ok((entity_kind_tag.try_into()?, remainder)) + } +} + +impl Display for EntityKindTag { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + EntityKindTag::System => { + write!(f, "system") + } + EntityKindTag::Account => { + write!(f, "account") + } + EntityKindTag::SmartContract => { + write!(f, "contract") + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> EntityKindTag { + match rng.gen_range(0..=2) { + 0 => EntityKindTag::System, + 1 => EntityKindTag::Account, + 2 => EntityKindTag::SmartContract, + _ => unreachable!(), + } + } +} + +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Runtime used to execute a Transaction.") +)] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] +#[serde(deny_unknown_fields)] +#[repr(u8)] +pub enum ContractRuntimeTag { + #[cfg_attr(any(feature = "testing", test), default)] + VmCasperV1, + VmCasperV2, +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ContractRuntimeTag { + match rng.gen_range(0..=1) { + 0 => ContractRuntimeTag::VmCasperV1, + 1 => ContractRuntimeTag::VmCasperV2, + _ => unreachable!(), + } + } +} + +impl ToBytes for ContractRuntimeTag { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + (*self as u8).write_bytes(writer) + } +} + +impl FromBytes for ContractRuntimeTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + if tag == ContractRuntimeTag::VmCasperV1 as u8 { + Ok((ContractRuntimeTag::VmCasperV1, remainder)) + } else if tag == ContractRuntimeTag::VmCasperV2 as u8 { + Ok((ContractRuntimeTag::VmCasperV2, remainder)) + } else { + Err(bytesrepr::Error::Formatting) + } + } +} + +impl Display for ContractRuntimeTag { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ContractRuntimeTag::VmCasperV1 => write!(f, "vm-casper-v1"), + ContractRuntimeTag::VmCasperV2 => write!(f, "vm-casper-v2"), + } + } +} +impl ContractRuntimeTag { + /// Returns the tag of the [`ContractRuntimeTag`]. + pub fn tag(&self) -> u8 { + *self as u8 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +/// The type of Package. +pub enum EntityKind { + /// Package associated with a native contract implementation. + System(SystemEntityType), + /// Package associated with an Account hash. + Account(AccountHash), + /// Packages associated with Wasm stored on chain. + SmartContract(ContractRuntimeTag), +} + +impl EntityKind { + /// Returns the Account hash associated with a Package based on the package kind. + pub fn maybe_account_hash(&self) -> Option { + match self { + Self::Account(account_hash) => Some(*account_hash), + Self::SmartContract(_) | Self::System(_) => None, + } + } + + /// Returns the associated key set based on the Account hash set in the package kind. + pub fn associated_keys(&self) -> AssociatedKeys { + match self { + Self::Account(account_hash) => AssociatedKeys::new(*account_hash, Weight::new(1)), + Self::SmartContract(_) | Self::System(_) => AssociatedKeys::default(), + } + } + + /// Returns if the current package is either a system contract or the system entity. + pub fn is_system(&self) -> bool { + matches!(self, Self::System(_)) + } + + /// Returns if the current package is the system mint. + pub fn is_system_mint(&self) -> bool { + matches!(self, Self::System(SystemEntityType::Mint)) + } + + /// Returns if the current package is the system auction. + pub fn is_system_auction(&self) -> bool { + matches!(self, Self::System(SystemEntityType::Auction)) + } + + /// Returns if the current package is associated with the system addressable entity. + pub fn is_system_account(&self) -> bool { + match self { + Self::Account(account_hash) => { + if *account_hash == PublicKey::System.to_account_hash() { + return true; + } + false + } + _ => false, + } + } +} + +impl Tagged for EntityKind { + fn tag(&self) -> EntityKindTag { + match self { + EntityKind::System(_) => EntityKindTag::System, + EntityKind::Account(_) => EntityKindTag::Account, + EntityKind::SmartContract(_) => EntityKindTag::SmartContract, + } + } +} + +impl Tagged for EntityKind { + fn tag(&self) -> u8 { + let package_kind_tag: EntityKindTag = self.tag(); + package_kind_tag as u8 + } +} + +impl ToBytes for EntityKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + EntityKind::SmartContract(transaction_runtime) => { + transaction_runtime.serialized_length() + } + EntityKind::System(system_entity_type) => system_entity_type.serialized_length(), + EntityKind::Account(account_hash) => account_hash.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + EntityKind::SmartContract(transaction_runtime) => { + writer.push(self.tag()); + transaction_runtime.write_bytes(writer) + } + EntityKind::System(system_entity_type) => { + writer.push(self.tag()); + system_entity_type.write_bytes(writer) + } + EntityKind::Account(account_hash) => { + writer.push(self.tag()); + account_hash.write_bytes(writer) + } + } + } +} + +impl FromBytes for EntityKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = EntityKindTag::from_bytes(bytes)?; + match tag { + EntityKindTag::System => { + let (entity_type, remainder) = SystemEntityType::from_bytes(remainder)?; + Ok((EntityKind::System(entity_type), remainder)) + } + EntityKindTag::Account => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((EntityKind::Account(account_hash), remainder)) + } + EntityKindTag::SmartContract => { + let (transaction_runtime, remainder) = FromBytes::from_bytes(remainder)?; + Ok((EntityKind::SmartContract(transaction_runtime), remainder)) + } + } + } +} + +impl Display for EntityKind { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + EntityKind::System(system_entity) => { + write!(f, "system-entity-kind({})", system_entity) + } + EntityKind::Account(account_hash) => { + write!(f, "account-entity-kind({})", account_hash) + } + EntityKind::SmartContract(transaction_runtime) => { + write!(f, "smart-contract-entity-kind({})", transaction_runtime) + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> EntityKind { + match rng.gen_range(0..=2) { + 0 => EntityKind::System(rng.gen()), + 1 => EntityKind::Account(rng.gen()), + 2 => EntityKind::SmartContract(rng.gen()), + _ => unreachable!(), + } + } +} + +/// The address for an AddressableEntity which contains the 32 bytes and tagging information. +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema), schemars(untagged))] +pub enum EntityAddr { + /// The address for a system entity account or contract. + System(#[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr), + /// The address of an entity that corresponds to an Account. + Account(#[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr), + /// The address of an entity that corresponds to a Userland smart contract. + SmartContract(#[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr), +} + +impl EntityAddr { + /// The length in bytes of an `EntityAddr`. + pub const LENGTH: usize = U8_SERIALIZED_LENGTH + KEY_HASH_LENGTH; + + /// Constructs a new `EntityAddr` for a system entity. + pub const fn new_system(hash_addr: HashAddr) -> Self { + Self::System(hash_addr) + } + + /// Constructs a new `EntityAddr` for an Account entity. + pub const fn new_account(hash_addr: HashAddr) -> Self { + Self::Account(hash_addr) + } + + /// Constructs a new `EntityAddr` for a smart contract. + pub const fn new_smart_contract(hash_addr: HashAddr) -> Self { + Self::SmartContract(hash_addr) + } + + /// Constructs a new `EntityAddr` based on the supplied kind. + pub fn new_of_kind(entity_kind: EntityKind, hash_addr: HashAddr) -> Self { + match entity_kind { + EntityKind::System(_) => Self::new_system(hash_addr), + EntityKind::Account(_) => Self::new_account(hash_addr), + EntityKind::SmartContract(_) => Self::new_smart_contract(hash_addr), + } + } + + /// Returns the tag of the [`EntityAddr`]. + pub fn tag(&self) -> EntityKindTag { + match self { + EntityAddr::System(_) => EntityKindTag::System, + EntityAddr::Account(_) => EntityKindTag::Account, + EntityAddr::SmartContract(_) => EntityKindTag::SmartContract, + } + } + + /// Is this a system entity address? + pub fn is_system(&self) -> bool { + self.tag() == EntityKindTag::System + || self.value() == PublicKey::System.to_account_hash().value() + } + + /// Is this a contract entity address? + pub fn is_contract(&self) -> bool { + self.tag() == EntityKindTag::SmartContract + } + + /// Is this an account entity address? + pub fn is_account(&self) -> bool { + self.tag() == EntityKindTag::Account + } + + /// Returns the 32 bytes of the [`EntityAddr`]. + pub fn value(&self) -> HashAddr { + match self { + EntityAddr::System(hash_addr) + | EntityAddr::Account(hash_addr) + | EntityAddr::SmartContract(hash_addr) => *hash_addr, + } + } + + /// Returns the formatted String representation of the [`EntityAddr`]. + pub fn to_formatted_string(&self) -> String { + match self { + EntityAddr::System(addr) => { + format!( + "{}{}{}", + ENTITY_PREFIX, + SYSTEM_ENTITY_PREFIX, + base16::encode_lower(addr) + ) + } + EntityAddr::Account(addr) => { + format!( + "{}{}{}", + ENTITY_PREFIX, + ACCOUNT_ENTITY_PREFIX, + base16::encode_lower(addr) + ) + } + EntityAddr::SmartContract(addr) => { + format!( + "{}{}{}", + ENTITY_PREFIX, + CONTRACT_ENTITY_PREFIX, + base16::encode_lower(addr) + ) + } + } + } + + /// Constructs an [`EntityAddr`] from a formatted String. + pub fn from_formatted_str(input: &str) -> Result { + if let Some(entity) = input.strip_prefix(ENTITY_PREFIX) { + let (addr_str, tag) = if let Some(str) = entity.strip_prefix(SYSTEM_ENTITY_PREFIX) { + (str, EntityKindTag::System) + } else if let Some(str) = entity.strip_prefix(ACCOUNT_ENTITY_PREFIX) { + (str, EntityKindTag::Account) + } else if let Some(str) = entity.strip_prefix(CONTRACT_ENTITY_PREFIX) { + (str, EntityKindTag::SmartContract) + } else { + return Err(FromStrError::InvalidPrefix); + }; + let addr = checksummed_hex::decode(addr_str).map_err(FromStrError::Hex)?; + let hash_addr = HashAddr::try_from(addr.as_ref()).map_err(FromStrError::Hash)?; + let entity_addr = match tag { + EntityKindTag::System => EntityAddr::new_system(hash_addr), + EntityKindTag::Account => EntityAddr::new_account(hash_addr), + EntityKindTag::SmartContract => EntityAddr::new_smart_contract(hash_addr), + }; + + return Ok(entity_addr); + } + + Err(FromStrError::InvalidPrefix) + } + + pub fn into_smart_contract(&self) -> Option<[u8; 32]> { + match self { + EntityAddr::SmartContract(addr) => Some(*addr), + _ => None, + } + } +} + +impl ToBytes for EntityAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + EntityAddr::LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + EntityAddr::System(addr) => { + EntityKindTag::System.write_bytes(writer)?; + addr.write_bytes(writer) + } + EntityAddr::Account(addr) => { + EntityKindTag::Account.write_bytes(writer)?; + addr.write_bytes(writer) + } + EntityAddr::SmartContract(addr) => { + EntityKindTag::SmartContract.write_bytes(writer)?; + addr.write_bytes(writer) + } + } + } +} + +impl FromBytes for EntityAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = EntityKindTag::from_bytes(bytes)?; + let (addr, remainder) = HashAddr::from_bytes(remainder)?; + let entity_addr = match tag { + EntityKindTag::System => EntityAddr::System(addr), + EntityKindTag::Account => EntityAddr::Account(addr), + EntityKindTag::SmartContract => EntityAddr::SmartContract(addr), + }; + Ok((entity_addr, remainder)) + } +} + +impl CLTyped for EntityAddr { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl From for AddressableEntityHash { + fn from(entity_addr: EntityAddr) -> Self { + AddressableEntityHash::new(entity_addr.value()) + } +} + +impl Display for EntityAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_str(&self.to_formatted_string()) + } +} + +impl Debug for EntityAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + EntityAddr::System(hash_addr) => { + write!(f, "EntityAddr::System({})", base16::encode_lower(hash_addr)) + } + EntityAddr::Account(hash_addr) => { + write!( + f, + "EntityAddr::Account({})", + base16::encode_lower(hash_addr) + ) + } + EntityAddr::SmartContract(hash_addr) => { + write!( + f, + "EntityAddr::SmartContract({})", + base16::encode_lower(hash_addr) + ) + } + } + } +} + +impl Serialize for EntityAddr { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + let (tag, value): (EntityKindTag, HashAddr) = (self.tag(), self.value()); + (tag, value).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for EntityAddr { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + Self::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let (tag, addr) = <(EntityKindTag, HashAddr)>::deserialize(deserializer)?; + match tag { + EntityKindTag::System => Ok(EntityAddr::new_system(addr)), + EntityKindTag::Account => Ok(EntityAddr::new_account(addr)), + EntityKindTag::SmartContract => Ok(EntityAddr::new_smart_contract(addr)), + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> EntityAddr { + match rng.gen_range(0..=2) { + 0 => EntityAddr::System(rng.gen()), + 1 => EntityAddr::Account(rng.gen()), + 2 => EntityAddr::SmartContract(rng.gen()), + _ => unreachable!(), + } + } +} + +/// A NamedKey address. +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct NamedKeyAddr { + /// The address of the entity. + base_addr: EntityAddr, + /// The bytes of the name. + string_bytes: [u8; KEY_HASH_LENGTH], +} + +impl NamedKeyAddr { + /// The length in bytes of a [`NamedKeyAddr`]. + pub const NAMED_KEY_ADDR_BASE_LENGTH: usize = 1 + EntityAddr::LENGTH; + + /// Constructs a new [`NamedKeyAddr`] based on the supplied bytes. + pub const fn new_named_key_entry( + entity_addr: EntityAddr, + string_bytes: [u8; KEY_HASH_LENGTH], + ) -> Self { + Self { + base_addr: entity_addr, + string_bytes, + } + } + + /// Constructs a new [`NamedKeyAddr`] based on string name. + /// Will fail if the string cannot be serialized. + pub fn new_from_string( + entity_addr: EntityAddr, + entry: String, + ) -> Result { + let bytes = entry.to_bytes()?; + let mut hasher = { + match VarBlake2b::new(BLAKE2B_DIGEST_LENGTH) { + Ok(hasher) => hasher, + Err(_) => return Err(bytesrepr::Error::Formatting), + } + }; + hasher.update(bytes); + // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher. + let mut string_bytes = HashAddr::default(); + hasher.finalize_variable(|hash| string_bytes.clone_from_slice(hash)); + Ok(Self::new_named_key_entry(entity_addr, string_bytes)) + } + + /// Returns the encapsulated [`EntityAddr`]. + pub fn entity_addr(&self) -> EntityAddr { + self.base_addr + } + + /// Returns the formatted String representation of the [`NamedKeyAddr`]. + pub fn to_formatted_string(&self) -> String { + format!("{}", self) + } + + /// Constructs a [`NamedKeyAddr`] from a formatted string. + pub fn from_formatted_str(input: &str) -> Result { + if let Some(named_key) = input.strip_prefix(NAMED_KEY_PREFIX) { + if let Some((entity_addr_str, string_bytes_str)) = named_key.rsplit_once('-') { + let entity_addr = EntityAddr::from_formatted_str(entity_addr_str)?; + let string_bytes = + checksummed_hex::decode(string_bytes_str).map_err(FromStrError::Hex)?; + let (string_bytes, _) = + FromBytes::from_vec(string_bytes).map_err(FromStrError::BytesRepr)?; + return Ok(Self::new_named_key_entry(entity_addr, string_bytes)); + }; + } + + Err(FromStrError::InvalidPrefix) + } +} + +impl Default for NamedKeyAddr { + fn default() -> Self { + NamedKeyAddr { + base_addr: EntityAddr::System(HashAddr::default()), + string_bytes: Default::default(), + } + } +} + +impl ToBytes for NamedKeyAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.base_addr.to_bytes()?); + buffer.append(&mut self.string_bytes.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.base_addr.serialized_length() + self.string_bytes.serialized_length() + } +} + +impl FromBytes for NamedKeyAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (base_addr, remainder) = EntityAddr::from_bytes(bytes)?; + let (string_bytes, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + Self { + base_addr, + string_bytes, + }, + remainder, + )) + } +} + +impl Display for NamedKeyAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "{}{}-{}", + NAMED_KEY_PREFIX, + self.base_addr, + base16::encode_lower(&self.string_bytes) + ) + } +} + +impl Debug for NamedKeyAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "NamedKeyAddr({:?}-{:?})", + self.base_addr, + base16::encode_lower(&self.string_bytes) + ) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> NamedKeyAddr { + NamedKeyAddr { + base_addr: rng.gen(), + string_bytes: rng.gen(), + } + } +} + +/// A NamedKey value. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct NamedKeyValue { + /// The actual `Key` encoded as a CLValue. + named_key: CLValue, + /// The name of the `Key` encoded as a CLValue. + name: CLValue, +} + +impl NamedKeyValue { + /// Constructs a new [`NamedKeyValue`]. + pub fn new(key: CLValue, name: CLValue) -> Self { + Self { + named_key: key, + name, + } + } + + /// Constructs a new [`NamedKeyValue`] from its [`Key`] and [`String`]. + pub fn from_concrete_values(named_key: Key, name: String) -> Result { + let key_cl_value = CLValue::from_t(named_key)?; + let string_cl_value = CLValue::from_t(name)?; + Ok(Self::new(key_cl_value, string_cl_value)) + } + + /// Returns the [`Key`] as a CLValue. + pub fn get_key_as_cl_value(&self) -> &CLValue { + &self.named_key + } + + /// Returns the [`String`] as a CLValue. + pub fn get_name_as_cl_value(&self) -> &CLValue { + &self.name + } + + /// Returns the concrete `Key` value + pub fn get_key(&self) -> Result { + self.named_key.clone().into_t::() + } + + /// Returns the concrete `String` value + pub fn get_name(&self) -> Result { + self.name.clone().into_t::() + } +} + +impl ToBytes for NamedKeyValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.named_key.to_bytes()?); + buffer.append(&mut self.name.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.named_key.serialized_length() + self.name.serialized_length() + } +} + +impl FromBytes for NamedKeyValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (named_key, remainder) = CLValue::from_bytes(bytes)?; + let (name, remainder) = CLValue::from_bytes(remainder)?; + Ok((Self { named_key, name }, remainder)) + } +} + +/// Collection of named message topics. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct MessageTopics( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl ToBytes for MessageTopics { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for MessageTopics { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (message_topics_map, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((MessageTopics(message_topics_map), remainder)) + } +} + +impl MessageTopics { + /// Adds new message topic by topic name. + pub fn add_topic( + &mut self, + topic_name: &str, + topic_name_hash: TopicNameHash, + ) -> Result<(), MessageTopicError> { + match self.0.entry(topic_name.to_string()) { + Entry::Vacant(entry) => { + entry.insert(topic_name_hash); + Ok(()) + } + Entry::Occupied(_) => Err(MessageTopicError::DuplicateTopic), + } + } + + /// Checks if given topic name exists. + pub fn has_topic(&self, topic_name: &str) -> bool { + self.0.contains_key(topic_name) + } + + /// Gets the topic hash from the collection by its topic name. + pub fn get(&self, topic_name: &str) -> Option<&TopicNameHash> { + self.0.get(topic_name) + } + + /// Returns the length of the message topics. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns true if no message topics are registered. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns an iterator over the topic name and its hash. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } +} + +struct MessageTopicLabels; + +impl KeyValueLabels for MessageTopicLabels { + const KEY: &'static str = "topic_name"; + const VALUE: &'static str = "topic_name_hash"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for MessageTopicLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("MessageTopic"); +} + +impl From> for MessageTopics { + fn from(topics: BTreeMap) -> MessageTopics { + MessageTopics(topics) + } +} + +/// Errors that can occur while adding a new topic. +#[derive(PartialEq, Eq, Debug, Clone)] +#[non_exhaustive] +pub enum MessageTopicError { + /// Topic already exists. + DuplicateTopic, + /// Maximum number of topics exceeded. + MaxTopicsExceeded, + /// Topic name size exceeded. + TopicNameSizeExceeded, +} + +#[cfg(feature = "json-schema")] +static ADDRESSABLE_ENTITY: Lazy = Lazy::new(|| { + let secret_key = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let account_hash = PublicKey::from(&secret_key).to_account_hash(); + let package_hash = PackageHash::new([0; 32]); + let byte_code_hash = ByteCodeHash::new([0; 32]); + let main_purse = URef::from_formatted_str( + "uref-09480c3248ef76b603d386f3f4f8a5f87f597d4eaffd475433f861af187ab5db-007", + ) + .unwrap(); + let weight = Weight::new(1); + let associated_keys = AssociatedKeys::new(account_hash, weight); + let action_thresholds = ActionThresholds::new(weight, weight, weight).unwrap(); + let protocol_version = ProtocolVersion::from_parts(2, 0, 0); + AddressableEntity { + protocol_version, + entity_kind: EntityKind::Account(account_hash), + package_hash, + byte_code_hash, + main_purse, + associated_keys, + action_thresholds, + } +}); + +/// The address for an AddressableEntity which contains the 32 bytes and tagging information. +pub type ContractAddress = PackageHash; + +/// Methods and type signatures supported by a contract. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct AddressableEntity { + protocol_version: ProtocolVersion, + entity_kind: EntityKind, + package_hash: PackageHash, + byte_code_hash: ByteCodeHash, + main_purse: URef, + + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, +} + +impl From + for ( + PackageHash, + ByteCodeHash, + ProtocolVersion, + URef, + AssociatedKeys, + ActionThresholds, + ) +{ + fn from(entity: AddressableEntity) -> Self { + ( + entity.package_hash, + entity.byte_code_hash, + entity.protocol_version, + entity.main_purse, + entity.associated_keys, + entity.action_thresholds, + ) + } +} + +impl AddressableEntity { + /// `AddressableEntity` constructor. + #[allow(clippy::too_many_arguments)] + pub fn new( + package_hash: PackageHash, + byte_code_hash: ByteCodeHash, + protocol_version: ProtocolVersion, + main_purse: URef, + associated_keys: AssociatedKeys, + action_thresholds: ActionThresholds, + entity_kind: EntityKind, + ) -> Self { + AddressableEntity { + package_hash, + byte_code_hash, + protocol_version, + main_purse, + action_thresholds, + associated_keys, + entity_kind, + } + } + + /// Get the entity addr for this entity from the corresponding hash. + pub fn entity_addr(&self, entity_hash: AddressableEntityHash) -> EntityAddr { + let hash_addr = entity_hash.value(); + match self.entity_kind { + EntityKind::System(_) => EntityAddr::new_system(hash_addr), + EntityKind::Account(_) => EntityAddr::new_account(hash_addr), + EntityKind::SmartContract(_) => EntityAddr::new_smart_contract(hash_addr), + } + } + + pub fn entity_kind(&self) -> EntityKind { + self.entity_kind + } + + /// Hash for accessing contract package + pub fn package_hash(&self) -> PackageHash { + self.package_hash + } + + /// Hash for accessing contract WASM + pub fn byte_code_hash(&self) -> ByteCodeHash { + self.byte_code_hash + } + + /// Get the protocol version this header is targeting. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns main purse. + pub fn main_purse(&self) -> URef { + self.main_purse + } + + /// Returns an [`AccessRights::ADD`]-only version of the main purse's [`URef`]. + pub fn main_purse_add_only(&self) -> URef { + URef::new(self.main_purse.addr(), AccessRights::ADD) + } + + /// Returns associated keys. + pub fn associated_keys(&self) -> &AssociatedKeys { + &self.associated_keys + } + + /// Returns action thresholds. + pub fn action_thresholds(&self) -> &ActionThresholds { + &self.action_thresholds + } + + /// Adds an associated key to an addressable entity. + pub fn add_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), AddKeyFailure> { + self.associated_keys.add_key(account_hash, weight) + } + + /// Checks if removing given key would properly satisfy thresholds. + fn can_remove_key(&self, account_hash: AccountHash) -> bool { + let total_weight_without = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Returns true if the total weight calculated without given public key would be greater or + // equal to all of the thresholds. + total_weight_without >= *self.action_thresholds().deployment() + && total_weight_without >= *self.action_thresholds().key_management() + } + + /// Checks if adding a weight to a sum of all weights excluding the given key would make the + /// resulting value to fall below any of the thresholds on account. + fn can_update_key(&self, account_hash: AccountHash, weight: Weight) -> bool { + // Calculates total weight of all keys excluding the given key + let total_weight = self + .associated_keys + .total_keys_weight_excluding(account_hash); + + // Safely calculate new weight by adding the updated weight + let new_weight = total_weight.value().saturating_add(weight.value()); + + // Returns true if the new weight would be greater or equal to all of + // the thresholds. + new_weight >= self.action_thresholds().deployment().value() + && new_weight >= self.action_thresholds().key_management().value() + } + + /// Removes an associated key from an addressable entity. + /// + /// Verifies that removing the key will not cause the remaining weight to fall below any action + /// thresholds. + pub fn remove_associated_key( + &mut self, + account_hash: AccountHash, + ) -> Result<(), RemoveKeyFailure> { + if self.associated_keys.contains_key(&account_hash) { + // Check if removing this weight would fall below thresholds + if !self.can_remove_key(account_hash) { + return Err(RemoveKeyFailure::ThresholdViolation); + } + } + self.associated_keys.remove_key(&account_hash) + } + + /// Updates an associated key. + /// + /// Returns an error if the update would result in a violation of the key management thresholds. + pub fn update_associated_key( + &mut self, + account_hash: AccountHash, + weight: Weight, + ) -> Result<(), UpdateKeyFailure> { + if let Some(current_weight) = self.associated_keys.get(&account_hash) { + if weight < *current_weight { + // New weight is smaller than current weight + if !self.can_update_key(account_hash, weight) { + return Err(UpdateKeyFailure::ThresholdViolation); + } + } + } + self.associated_keys.update_key(account_hash, weight) + } + + /// Sets new action threshold for a given action type for the addressable entity. + /// + /// Returns an error if the new action threshold weight is greater than the total weight of the + /// account's associated keys. + pub fn set_action_threshold( + &mut self, + action_type: ActionType, + weight: Weight, + ) -> Result<(), SetThresholdFailure> { + // Verify if new threshold weight exceeds total weight of all associated + // keys. + self.can_set_threshold(weight)?; + // Set new weight for given action + self.action_thresholds.set_threshold(action_type, weight) + } + + /// Sets a new action threshold for a given action type for the account without checking against + /// the total weight of the associated keys. + /// + /// This should only be called when authorized by an administrator account. + /// + /// Returns an error if setting the action would cause the `ActionType::Deployment` threshold to + /// be greater than any of the other action types. + pub fn set_action_threshold_unchecked( + &mut self, + action_type: ActionType, + threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + self.action_thresholds.set_threshold(action_type, threshold) + } + + /// Verifies if user can set action threshold. + pub fn can_set_threshold(&self, new_threshold: Weight) -> Result<(), SetThresholdFailure> { + let total_weight = self.associated_keys.total_keys_weight(); + if new_threshold > total_weight { + return Err(SetThresholdFailure::InsufficientTotalWeight); + } + Ok(()) + } + + /// Checks whether all authorization keys are associated with this addressable entity. + pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { + !authorization_keys.is_empty() + && authorization_keys + .iter() + .any(|e| self.associated_keys.contains_key(e)) + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to deploy threshold. + pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().deployment() + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to key management threshold. + pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().key_management() + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to upgrade management threshold. + pub fn can_upgrade_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + total_weight >= *self.action_thresholds().upgrade_management() + } + + /// Addr for accessing wasm bytes + pub fn byte_code_addr(&self) -> HashAddr { + self.byte_code_hash.value() + } + + /// Set protocol_version. + pub fn set_protocol_version(&mut self, protocol_version: ProtocolVersion) { + self.protocol_version = protocol_version; + } + + /// Determines if `AddressableEntity` is compatible with a given `ProtocolVersion`. + pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool { + let entity_protocol_version = self.protocol_version.value(); + let context_protocol_version = protocol_version.value(); + if entity_protocol_version.major == context_protocol_version.major { + return true; + } + if entity_protocol_version.major == 1 && context_protocol_version.major == 2 { + // the 1.x model has been deprecated but is still supported until 3.0.0 + return true; + } + false + } + + /// Returns the kind of `AddressableEntity`. + pub fn kind(&self) -> EntityKind { + self.entity_kind + } + + /// Is this an account? + pub fn is_account_kind(&self) -> bool { + matches!(self.entity_kind, EntityKind::Account(_)) + } + + /// Key for the addressable entity + pub fn entity_key(&self, entity_hash: AddressableEntityHash) -> Key { + match self.entity_kind { + EntityKind::System(_) => { + Key::addressable_entity_key(EntityKindTag::System, entity_hash) + } + EntityKind::Account(_) => { + Key::addressable_entity_key(EntityKindTag::Account, entity_hash) + } + EntityKind::SmartContract(_) => { + Key::addressable_entity_key(EntityKindTag::SmartContract, entity_hash) + } + } + } + + /// Extracts the access rights from the named keys of the addressable entity. + pub fn extract_access_rights( + &self, + entity_hash: AddressableEntityHash, + named_keys: &NamedKeys, + ) -> ContextAccessRights { + let urefs_iter = named_keys + .keys() + .filter_map(|key| key.as_uref().copied()) + .chain(iter::once(self.main_purse)); + ContextAccessRights::new(entity_hash.value(), urefs_iter) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ADDRESSABLE_ENTITY + } +} + +impl ToBytes for AddressableEntity { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.package_hash().write_bytes(&mut result)?; + self.byte_code_hash().write_bytes(&mut result)?; + self.protocol_version().write_bytes(&mut result)?; + self.main_purse().write_bytes(&mut result)?; + self.associated_keys().write_bytes(&mut result)?; + self.action_thresholds().write_bytes(&mut result)?; + self.kind().write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.package_hash) + + ToBytes::serialized_length(&self.byte_code_hash) + + ToBytes::serialized_length(&self.protocol_version) + + ToBytes::serialized_length(&self.main_purse) + + ToBytes::serialized_length(&self.associated_keys) + + ToBytes::serialized_length(&self.action_thresholds) + + ToBytes::serialized_length(&self.entity_kind) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.package_hash().write_bytes(writer)?; + self.byte_code_hash().write_bytes(writer)?; + self.protocol_version().write_bytes(writer)?; + self.main_purse().write_bytes(writer)?; + self.associated_keys().write_bytes(writer)?; + self.action_thresholds().write_bytes(writer)?; + self.kind().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for AddressableEntity { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (package_hash, bytes) = PackageHash::from_bytes(bytes)?; + let (byte_code_hash, bytes) = ByteCodeHash::from_bytes(bytes)?; + let (protocol_version, bytes) = ProtocolVersion::from_bytes(bytes)?; + let (main_purse, bytes) = URef::from_bytes(bytes)?; + let (associated_keys, bytes) = AssociatedKeys::from_bytes(bytes)?; + let (action_thresholds, bytes) = ActionThresholds::from_bytes(bytes)?; + let (entity_kind, bytes) = EntityKind::from_bytes(bytes)?; + Ok(( + AddressableEntity { + package_hash, + byte_code_hash, + protocol_version, + main_purse, + associated_keys, + action_thresholds, + entity_kind, + }, + bytes, + )) + } +} + +impl Default for AddressableEntity { + fn default() -> Self { + AddressableEntity { + byte_code_hash: [0; KEY_HASH_LENGTH].into(), + package_hash: [0; KEY_HASH_LENGTH].into(), + protocol_version: ProtocolVersion::V1_0_0, + main_purse: URef::default(), + action_thresholds: ActionThresholds::default(), + associated_keys: AssociatedKeys::default(), + entity_kind: EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + } + } +} + +impl From for AddressableEntity { + fn from(value: Contract) -> Self { + AddressableEntity::new( + PackageHash::new(value.contract_package_hash().value()), + ByteCodeHash::new(value.contract_wasm_hash().value()), + value.protocol_version(), + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ) + } +} + +impl From for AddressableEntity { + fn from(value: Account) -> Self { + AddressableEntity::new( + PackageHash::default(), + ByteCodeHash::new([0u8; 32]), + ProtocolVersion::default(), + value.main_purse(), + value.associated_keys().clone().into(), + value.action_thresholds().clone().into(), + EntityKind::Account(value.account_hash()), + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{AccessRights, URef, UREF_ADDR_LENGTH}; + + #[cfg(feature = "json-schema")] + use schemars::{gen::SchemaGenerator, schema::InstanceType}; + + #[test] + fn entity_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let entity_hash = HashAddr::try_from(&bytes[..]).expect("should create contract hash"); + let entity_hash = AddressableEntityHash::new(entity_hash); + assert_eq!(&bytes, &entity_hash.as_bytes()); + } + + #[test] + fn entity_hash_from_str() { + let entity_hash = AddressableEntityHash([3; 32]); + let encoded = entity_hash.to_formatted_string(); + let decoded = AddressableEntityHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(entity_hash, decoded); + + let invalid_prefix = + "addressable-entity--0000000000000000000000000000000000000000000000000000000000000000"; + assert!(AddressableEntityHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "addressable-entity-00000000000000000000000000000000000000000000000000000000000000"; + assert!(AddressableEntityHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "addressable-entity-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(AddressableEntityHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "addressable-entity-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(AddressableEntityHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn named_key_addr_from_str() { + let named_key_addr = + NamedKeyAddr::new_named_key_entry(EntityAddr::new_smart_contract([3; 32]), [4; 32]); + let encoded = named_key_addr.to_formatted_string(); + let decoded = NamedKeyAddr::from_formatted_str(&encoded).unwrap(); + assert_eq!(named_key_addr, decoded); + } + + #[test] + fn entity_hash_serde_roundtrip() { + let entity_hash = AddressableEntityHash([255; 32]); + let serialized = bincode::serialize(&entity_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(entity_hash, deserialized) + } + + #[test] + fn entity_hash_json_roundtrip() { + let entity_hash = AddressableEntityHash([255; 32]); + let json_string = serde_json::to_string_pretty(&entity_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(entity_hash, decoded) + } + + #[test] + fn entity_addr_formatted_string_roundtrip() { + let entity_addr = EntityAddr::Account([5; 32]); + let encoded = entity_addr.to_formatted_string(); + let decoded = EntityAddr::from_formatted_str(&encoded).expect("must get entity addr"); + assert_eq!(decoded, entity_addr); + + let entity_addr = EntityAddr::SmartContract([5; 32]); + let encoded = entity_addr.to_formatted_string(); + let decoded = EntityAddr::from_formatted_str(&encoded).expect("must get entity addr"); + assert_eq!(decoded, entity_addr); + + let entity_addr = EntityAddr::System([5; 32]); + let encoded = entity_addr.to_formatted_string(); + let decoded = EntityAddr::from_formatted_str(&encoded).expect("must get entity addr"); + assert_eq!(decoded, entity_addr); + } + + #[test] + fn entity_addr_serialization_roundtrip() { + for addr in [ + EntityAddr::new_system([1; 32]), + EntityAddr::new_account([1; 32]), + EntityAddr::new_smart_contract([1; 32]), + ] { + bytesrepr::test_serialization_roundtrip(&addr); + } + } + + #[test] + fn entity_addr_serde_roundtrip() { + for addr in [ + EntityAddr::new_system([1; 32]), + EntityAddr::new_account([1; 32]), + EntityAddr::new_smart_contract([1; 32]), + ] { + let serialized = bincode::serialize(&addr).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(addr, deserialized) + } + } + + #[test] + fn entity_addr_json_roundtrip() { + for addr in [ + EntityAddr::new_system([1; 32]), + EntityAddr::new_account([1; 32]), + EntityAddr::new_smart_contract([1; 32]), + ] { + let json_string = serde_json::to_string_pretty(&addr).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(addr, decoded) + } + } + + #[cfg(feature = "json-schema")] + #[test] + fn entity_addr_schema() { + let mut gen = SchemaGenerator::default(); + let any_of = EntityAddr::json_schema(&mut gen) + .into_object() + .subschemas + .expect("should have subschemas") + .any_of + .expect("should have any_of"); + for elem in any_of { + let schema = elem + .into_object() + .instance_type + .expect("should have instance type"); + assert!(schema.contains(&InstanceType::String), "{:?}", schema); + } + } + + #[test] + fn should_extract_access_rights() { + const MAIN_PURSE: URef = URef::new([2; 32], AccessRights::READ_ADD_WRITE); + + let entity_hash = AddressableEntityHash([255; 32]); + let uref = URef::new([84; UREF_ADDR_LENGTH], AccessRights::READ_ADD); + let uref_r = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ); + let uref_a = URef::new([42; UREF_ADDR_LENGTH], AccessRights::ADD); + let uref_w = URef::new([42; UREF_ADDR_LENGTH], AccessRights::WRITE); + let mut named_keys = NamedKeys::new(); + named_keys.insert("a".to_string(), Key::URef(uref_r)); + named_keys.insert("b".to_string(), Key::URef(uref_a)); + named_keys.insert("c".to_string(), Key::URef(uref_w)); + named_keys.insert("d".to_string(), Key::URef(uref)); + let associated_keys = AssociatedKeys::new(AccountHash::new([254; 32]), Weight::new(1)); + let contract = AddressableEntity::new( + PackageHash::new([254; 32]), + ByteCodeHash::new([253; 32]), + ProtocolVersion::V1_0_0, + MAIN_PURSE, + associated_keys, + ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(1)) + .expect("should create thresholds"), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ); + let access_rights = contract.extract_access_rights(entity_hash, &named_keys); + let expected_uref = URef::new([42; UREF_ADDR_LENGTH], AccessRights::READ_ADD_WRITE); + assert!( + access_rights.has_access_rights_to_uref(&uref), + "urefs in named keys should be included in access rights" + ); + assert!( + access_rights.has_access_rights_to_uref(&expected_uref), + "multiple access right bits to the same uref should coalesce" + ); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_contract(contract in gens::addressable_entity_arb()) { + bytesrepr::test_serialization_roundtrip(&contract); + } + } +} diff --git a/types/src/addressable_entity/action_thresholds.rs b/types/src/addressable_entity/action_thresholds.rs new file mode 100644 index 0000000000..8d56658da7 --- /dev/null +++ b/types/src/addressable_entity/action_thresholds.rs @@ -0,0 +1,212 @@ +//! This module contains types and functions for managing action thresholds. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::{ActionThresholds as AccountActionThresholds, SetThresholdFailure}, + addressable_entity::{ActionType, Weight, WEIGHT_SERIALIZED_LENGTH}, + bytesrepr::{self, Error, FromBytes, ToBytes}, +}; + +/// Thresholds that have to be met when executing an action of a certain type. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "EntityActionThresholds"))] +pub struct ActionThresholds { + /// Threshold for deploy execution. + pub deployment: Weight, + /// Threshold for upgrading contracts. + pub upgrade_management: Weight, + /// Threshold for managing action threshold. + pub key_management: Weight, +} + +impl ActionThresholds { + /// Creates new ActionThresholds object with provided weights + /// + /// Requires deployment threshold to be lower than or equal to + /// key management threshold. + pub fn new( + deployment: Weight, + upgrade_management: Weight, + key_management: Weight, + ) -> Result { + if deployment > key_management { + return Err(SetThresholdFailure::DeploymentThreshold); + } + Ok(ActionThresholds { + deployment, + upgrade_management, + key_management, + }) + } + /// Sets new threshold for [ActionType::Deployment]. + /// Should return an error if setting new threshold for `action_type` breaks + /// one of the invariants. Currently, invariant is that + /// `ActionType::Deployment` threshold shouldn't be higher than any + /// other, which should be checked both when increasing `Deployment` + /// threshold and decreasing the other. + pub fn set_deployment_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if new_threshold > self.key_management { + Err(SetThresholdFailure::DeploymentThreshold) + } else { + self.deployment = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::KeyManagement]. + pub fn set_key_management_threshold( + &mut self, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + if self.deployment > new_threshold { + Err(SetThresholdFailure::KeyManagementThreshold) + } else { + self.key_management = new_threshold; + Ok(()) + } + } + + /// Sets new threshold for [ActionType::UpgradeManagement]. + pub fn set_upgrade_management_threshold( + &mut self, + upgrade_management: Weight, + ) -> Result<(), SetThresholdFailure> { + self.upgrade_management = upgrade_management; + Ok(()) + } + + /// Returns the deployment action threshold. + pub fn deployment(&self) -> &Weight { + &self.deployment + } + + /// Returns key management action threshold. + pub fn key_management(&self) -> &Weight { + &self.key_management + } + + /// Returns the upgrade management action threshold. + pub fn upgrade_management(&self) -> &Weight { + &self.upgrade_management + } + + /// Unified function that takes an action type, and changes appropriate + /// threshold defined by the [ActionType] variants. + pub fn set_threshold( + &mut self, + action_type: ActionType, + new_threshold: Weight, + ) -> Result<(), SetThresholdFailure> { + match action_type { + ActionType::Deployment => self.set_deployment_threshold(new_threshold), + ActionType::KeyManagement => self.set_key_management_threshold(new_threshold), + ActionType::UpgradeManagement => self.set_upgrade_management_threshold(new_threshold), + } + } +} + +impl Default for ActionThresholds { + fn default() -> Self { + ActionThresholds { + deployment: Weight::new(1), + upgrade_management: Weight::new(1), + key_management: Weight::new(1), + } + } +} + +impl From for ActionThresholds { + fn from(value: AccountActionThresholds) -> Self { + Self { + deployment: Weight::new(value.deployment.value()), + key_management: Weight::new(value.key_management.value()), + upgrade_management: Weight::new(1), + } + } +} + +impl ToBytes for ActionThresholds { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + result.append(&mut self.deployment.to_bytes()?); + result.append(&mut self.upgrade_management.to_bytes()?); + result.append(&mut self.key_management.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + 3 * WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deployment().write_bytes(writer)?; + self.upgrade_management().write_bytes(writer)?; + self.key_management().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ActionThresholds { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (deployment, rem) = Weight::from_bytes(bytes)?; + let (upgrade_management, rem) = Weight::from_bytes(rem)?; + let (key_management, rem) = Weight::from_bytes(rem)?; + let ret = ActionThresholds { + deployment, + upgrade_management, + key_management, + }; + Ok((ret, rem)) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::ActionThresholds; + + pub fn action_thresholds_arb() -> impl Strategy { + Just(Default::default()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_create_new_action_thresholds() { + let action_thresholds = + ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(42)).unwrap(); + assert_eq!(*action_thresholds.deployment(), Weight::new(1)); + assert_eq!(*action_thresholds.upgrade_management(), Weight::new(1)); + assert_eq!(*action_thresholds.key_management(), Weight::new(42)); + } + + #[test] + fn should_not_create_action_thresholds_with_invalid_deployment_threshold() { + // deployment cant be greater than key management + assert!(ActionThresholds::new(Weight::new(5), Weight::new(1), Weight::new(1)).is_err()); + } + + #[test] + fn serialization_roundtrip() { + let action_thresholds = + ActionThresholds::new(Weight::new(1), Weight::new(1), Weight::new(42)).unwrap(); + bytesrepr::test_serialization_roundtrip(&action_thresholds); + } +} diff --git a/types/src/addressable_entity/action_type.rs b/types/src/addressable_entity/action_type.rs new file mode 100644 index 0000000000..2a627309ba --- /dev/null +++ b/types/src/addressable_entity/action_type.rs @@ -0,0 +1,38 @@ +use core::convert::TryFrom; + +use super::TryFromIntError; + +/// The various types of action which can be performed in the context of a given account. +#[repr(u32)] +pub enum ActionType { + /// Represents performing a deploy. + Deployment = 0, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// perform various actions). + KeyManagement = 1, + /// Represents changing the associated keys (i.e. map of [`AccountHash`](super::AccountHash)s + /// to [`Weight`](super::Weight)s) or action thresholds (i.e. the total + /// [`Weight`](super::Weight)s of signing [`AccountHash`](super::AccountHash)s required to + /// upgrade the addressable entity. + UpgradeManagement = 2, +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for ActionType { + type Error = TryFromIntError; + + fn try_from(value: u32) -> Result { + // This doesn't use `num_derive` traits such as FromPrimitive and ToPrimitive + // that helps to automatically create `from_u32` and `to_u32`. This approach + // gives better control over generated code. + match value { + d if d == ActionType::Deployment as u32 => Ok(ActionType::Deployment), + d if d == ActionType::KeyManagement as u32 => Ok(ActionType::KeyManagement), + d if d == ActionType::UpgradeManagement as u32 => Ok(ActionType::UpgradeManagement), + _ => Err(TryFromIntError(())), + } + } +} diff --git a/types/src/addressable_entity/associated_keys.rs b/types/src/addressable_entity/associated_keys.rs new file mode 100644 index 0000000000..1e7ec59582 --- /dev/null +++ b/types/src/addressable_entity/associated_keys.rs @@ -0,0 +1,393 @@ +//! This module contains types and functions for working with keys associated with an account. + +use alloc::{ + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + vec::Vec, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + account::{ + AccountHash, AddKeyFailure, AssociatedKeys as AccountAssociatedKeys, RemoveKeyFailure, + UpdateKeyFailure, + }, + addressable_entity::Weight, + bytesrepr::{self, FromBytes, ToBytes}, +}; + +/// A collection of weighted public keys (represented as account hashes) associated with an account. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "EntityAssociatedKeys"))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct AssociatedKeys( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl AssociatedKeys { + /// Constructs a new AssociatedKeys. + pub fn new(key: AccountHash, weight: Weight) -> AssociatedKeys { + let mut bt: BTreeMap = BTreeMap::new(); + bt.insert(key, weight); + AssociatedKeys(bt) + } + + /// Adds a new AssociatedKey to the set. + /// + /// Returns true if added successfully, false otherwise. + pub fn add_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), AddKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(entry) => { + entry.insert(weight); + } + Entry::Occupied(_) => return Err(AddKeyFailure::DuplicateKey), + } + Ok(()) + } + + /// Removes key from the associated keys set. + /// Returns true if value was found in the set prior to the removal, false + /// otherwise. + pub fn remove_key(&mut self, key: &AccountHash) -> Result<(), RemoveKeyFailure> { + self.0 + .remove(key) + .map(|_| ()) + .ok_or(RemoveKeyFailure::MissingKey) + } + + /// Adds new AssociatedKey to the set. + /// Returns true if added successfully, false otherwise. + pub fn update_key(&mut self, key: AccountHash, weight: Weight) -> Result<(), UpdateKeyFailure> { + match self.0.entry(key) { + Entry::Vacant(_) => { + return Err(UpdateKeyFailure::MissingKey); + } + Entry::Occupied(mut entry) => { + *entry.get_mut() = weight; + } + } + Ok(()) + } + + /// Returns the weight of an account hash. + pub fn get(&self, key: &AccountHash) -> Option<&Weight> { + self.0.get(key) + } + + /// Returns `true` if a given key exists. + pub fn contains_key(&self, key: &AccountHash) -> bool { + self.0.contains_key(key) + } + + /// Returns an iterator over the account hash and the weights. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the count of the associated keys. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the associated keys are empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Helper method that calculates weight for keys that comes from any + /// source. + /// + /// This method is not concerned about uniqueness of the passed iterable. + /// Uniqueness is determined based on the input collection properties, + /// which is either BTreeSet (in [`AssociatedKeys::calculate_keys_weight`]) + /// or BTreeMap (in [`AssociatedKeys::total_keys_weight`]). + fn calculate_any_keys_weight<'a>(&self, keys: impl Iterator) -> Weight { + let total = keys + .filter_map(|key| self.0.get(key)) + .fold(0u8, |acc, w| acc.saturating_add(w.value())); + + Weight::new(total) + } + + /// Calculates total weight of authorization keys provided by an argument + pub fn calculate_keys_weight(&self, authorization_keys: &BTreeSet) -> Weight { + self.calculate_any_keys_weight(authorization_keys.iter()) + } + + /// Calculates total weight of all authorization keys + pub fn total_keys_weight(&self) -> Weight { + self.calculate_any_keys_weight(self.0.keys()) + } + + /// Calculates total weight of all authorization keys excluding a given key + pub fn total_keys_weight_excluding(&self, account_hash: AccountHash) -> Weight { + self.calculate_any_keys_weight(self.0.keys().filter(|&&element| element != account_hash)) + } + + pub fn empty_keys() -> Self { + AssociatedKeys(BTreeMap::new()) + } +} + +impl From> for AssociatedKeys { + fn from(associated_keys: BTreeMap) -> Self { + Self(associated_keys) + } +} + +impl ToBytes for AssociatedKeys { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for AssociatedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (associated_keys, rem) = FromBytes::from_bytes(bytes)?; + Ok((AssociatedKeys(associated_keys), rem)) + } +} + +impl From for AssociatedKeys { + fn from(value: AccountAssociatedKeys) -> Self { + let mut associated_keys = AssociatedKeys::default(); + for (account_hash, weight) in value.iter() { + associated_keys + .0 + .insert(*account_hash, Weight::new(weight.value())); + } + associated_keys + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "account_hash"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for Labels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("AssociatedKey"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some("A weighted public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = + Some("The account hash of the public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = + Some("The weight assigned to the public key."); +} + +#[doc(hidden)] +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::gens::{account_hash_arb, weight_arb}; + + use super::AssociatedKeys; + + pub fn associated_keys_arb() -> impl Strategy { + proptest::collection::btree_map(account_hash_arb(), weight_arb(), 10).prop_map(|keys| { + let mut associated_keys = AssociatedKeys::default(); + keys.into_iter().for_each(|(k, v)| { + associated_keys.add_key(k, v).unwrap(); + }); + associated_keys + }) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeSet, iter::FromIterator}; + + use crate::{ + account::{AccountHash, AddKeyFailure, ACCOUNT_HASH_LENGTH}, + addressable_entity::Weight, + bytesrepr, + }; + + use super::*; + + #[test] + fn associated_keys_add() { + let mut keys = + AssociatedKeys::new(AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]), Weight::new(1)); + let new_pk = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let new_pk_weight = Weight::new(2); + assert!(keys.add_key(new_pk, new_pk_weight).is_ok()); + assert_eq!(keys.get(&new_pk), Some(&new_pk_weight)) + } + + #[test] + fn associated_keys_add_duplicate() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert_eq!( + keys.add_key(pk, Weight::new(10)), + Err(AddKeyFailure::DuplicateKey) + ); + assert_eq!(keys.get(&pk), Some(&weight)); + } + + #[test] + fn associated_keys_remove() { + let pk = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk, weight); + assert!(keys.remove_key(&pk).is_ok()); + assert!(keys + .remove_key(&AccountHash::new([1u8; ACCOUNT_HASH_LENGTH])) + .is_err()); + } + + #[test] + fn associated_keys_update() { + let pk1 = AccountHash::new([0u8; ACCOUNT_HASH_LENGTH]); + let pk2 = AccountHash::new([1u8; ACCOUNT_HASH_LENGTH]); + let weight = Weight::new(1); + let mut keys = AssociatedKeys::new(pk1, weight); + assert!(matches!( + keys.update_key(pk2, Weight::new(2)) + .expect_err("should get error"), + UpdateKeyFailure::MissingKey + )); + keys.add_key(pk2, Weight::new(1)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(1))); + keys.update_key(pk2, Weight::new(2)).unwrap(); + assert_eq!(keys.get(&pk2), Some(&Weight::new(2))); + } + + #[test] + fn associated_keys_calculate_keys_once() { + let key_1 = AccountHash::new([0; 32]); + let key_2 = AccountHash::new([1; 32]); + let key_3 = AccountHash::new([2; 32]); + let mut keys = AssociatedKeys::default(); + + keys.add_key(key_2, Weight::new(2)) + .expect("should add key_1"); + keys.add_key(key_1, Weight::new(1)) + .expect("should add key_1"); + keys.add_key(key_3, Weight::new(3)) + .expect("should add key_1"); + + assert_eq!( + keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + key_1, key_2, key_3, key_1, key_2, key_3, + ])), + Weight::new(1 + 2 + 3) + ); + } + + #[test] + fn associated_keys_total_weight() { + let associated_keys = { + let mut res = AssociatedKeys::new(AccountHash::new([1u8; 32]), Weight::new(1)); + res.add_key(AccountHash::new([2u8; 32]), Weight::new(11)) + .expect("should add key 1"); + res.add_key(AccountHash::new([3u8; 32]), Weight::new(12)) + .expect("should add key 2"); + res.add_key(AccountHash::new([4u8; 32]), Weight::new(13)) + .expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight(), + Weight::new(1 + 11 + 12 + 13) + ); + } + + #[test] + fn associated_keys_total_weight_excluding() { + let identity_key = AccountHash::new([1u8; 32]); + let identity_key_weight = Weight::new(1); + + let key_1 = AccountHash::new([2u8; 32]); + let key_1_weight = Weight::new(11); + + let key_2 = AccountHash::new([3u8; 32]); + let key_2_weight = Weight::new(12); + + let key_3 = AccountHash::new([4u8; 32]); + let key_3_weight = Weight::new(13); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + res.add_key(key_1, key_1_weight).expect("should add key 1"); + res.add_key(key_2, key_2_weight).expect("should add key 2"); + res.add_key(key_3, key_3_weight).expect("should add key 3"); + res + }; + assert_eq!( + associated_keys.total_keys_weight_excluding(key_2), + Weight::new(identity_key_weight.value() + key_1_weight.value() + key_3_weight.value()) + ); + } + + #[test] + fn overflowing_keys_weight() { + let identity_key = AccountHash::new([1u8; 32]); + let key_1 = AccountHash::new([2u8; 32]); + let key_2 = AccountHash::new([3u8; 32]); + let key_3 = AccountHash::new([4u8; 32]); + + let identity_key_weight = Weight::new(250); + let weight_1 = Weight::new(1); + let weight_2 = Weight::new(2); + let weight_3 = Weight::new(3); + + let saturated_weight = Weight::new(u8::MAX); + + let associated_keys = { + let mut res = AssociatedKeys::new(identity_key, identity_key_weight); + + res.add_key(key_1, weight_1).expect("should add key 1"); + res.add_key(key_2, weight_2).expect("should add key 2"); + res.add_key(key_3, weight_3).expect("should add key 3"); + res + }; + + assert_eq!( + associated_keys.calculate_keys_weight(&BTreeSet::from_iter(vec![ + identity_key, // 250 + key_1, // 251 + key_2, // 253 + key_3, // 256 - error + ])), + saturated_weight, + ); + } + + #[test] + fn serialization_roundtrip() { + let mut keys = AssociatedKeys::default(); + keys.add_key(AccountHash::new([1; 32]), Weight::new(1)) + .unwrap(); + keys.add_key(AccountHash::new([2; 32]), Weight::new(2)) + .unwrap(); + keys.add_key(AccountHash::new([3; 32]), Weight::new(3)) + .unwrap(); + bytesrepr::test_serialization_roundtrip(&keys); + } +} diff --git a/types/src/addressable_entity/entry_points.rs b/types/src/addressable_entity/entry_points.rs new file mode 100644 index 0000000000..180ddf8e08 --- /dev/null +++ b/types/src/addressable_entity/entry_points.rs @@ -0,0 +1,823 @@ +use core::fmt::{Debug, Display, Formatter}; + +use alloc::{ + collections::BTreeMap, + format, + string::{String, ToString}, + vec::Vec, +}; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num_derive::FromPrimitive; +use num_traits::FromPrimitive; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + addressable_entity::FromStrError, + bytesrepr, + bytesrepr::{Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, CLType, EntityAddr, Group, HashAddr, BLAKE2B_DIGEST_LENGTH, KEY_HASH_LENGTH, +}; + +const V1_ENTRY_POINT_TAG: u8 = 0; + +const V1_ENTRY_POINT_PREFIX: &str = "entry-point-v1-"; + +/// Context of method execution +/// +/// Most significant bit represents version i.e. +/// - 0b0 -> 0.x/1.x (session & contracts) +/// - 0b1 -> 2.x and later (introduced installer, utility entry points) +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, FromPrimitive)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointType { + /// Runs using the calling entity's context. + /// In v1.x this was used for both "session" code run using the originating + /// Account's context, and also for "StoredSession" code that ran in the + /// caller's context. While this made systemic sense due to the way the runtime + /// context nesting works, this dual usage was very confusing to most human beings. + /// + /// In v2.x the renamed Caller variant is exclusively used for wasm run using the initiating + /// account entity's context. Previously installed 1.x stored session code should + /// continue to work as the binary value matches but we no longer allow such logic + /// to be upgraded, nor do we allow new stored session to be installed. + Caller = 0b00000000, + /// Runs using the called entity's context. + Called = 0b00000001, + /// Extract a subset of bytecode and installs it as a new smart contract. + /// Runs using the called entity's context. + Factory = 0b10000000, +} + +impl EntryPointType { + /// Checks if entry point type is introduced before 2.0. + /// + /// This method checks if there is a bit pattern for entry point types introduced in 2.0. + /// + /// If this bit is missing, that means given entry point type was defined in pre-2.0 world. + pub fn is_legacy_pattern(&self) -> bool { + (*self as u8) & 0b10000000 == 0 + } + + /// Get the bit pattern. + pub fn bits(self) -> u8 { + self as u8 + } + + /// Returns true if entry point type is invalid for the context. + pub fn is_invalid_context(&self) -> bool { + match self { + EntryPointType::Caller => true, + EntryPointType::Called | EntryPointType::Factory => false, + } + } +} + +impl ToBytes for EntryPointType { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.bits().to_bytes() + } + + fn serialized_length(&self) -> usize { + 1 + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.bits()); + Ok(()) + } +} + +impl FromBytes for EntryPointType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, bytes) = u8::from_bytes(bytes)?; + let entry_point_type = + EntryPointType::from_u8(value).ok_or(bytesrepr::Error::Formatting)?; + Ok((entry_point_type, bytes)) + } +} + +/// Default name for an entry point. +pub const DEFAULT_ENTRY_POINT_NAME: &str = "call"; + +/// Collection of entry point parameters. +pub type Parameters = Vec; + +/// An enum specifying who pays for the invocation and execution of the entrypoint. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[repr(u8)] +pub enum EntryPointPayment { + /// The caller must cover costs + Caller = 0, + /// Will cover costs if directly invoked. + DirectInvocationOnly = 1, + /// will cover costs to execute self including any subsequent invoked contracts + SelfOnward = 2, +} + +impl EntryPointPayment { + /// Contract will pay if directly invoked. + pub fn will_pay_direct_invocation(&self) -> bool { + match self { + EntryPointPayment::Caller => false, + EntryPointPayment::DirectInvocationOnly | EntryPointPayment::SelfOnward => true, + } + } +} + +impl ToBytes for EntryPointPayment { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for EntryPointPayment { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rem) = u8::from_bytes(bytes)?; + let tag = match id { + tag if tag == EntryPointPayment::Caller as u8 => EntryPointPayment::Caller, + tag if tag == EntryPointPayment::DirectInvocationOnly as u8 => { + EntryPointPayment::DirectInvocationOnly + } + tag if tag == EntryPointPayment::SelfOnward as u8 => EntryPointPayment::SelfOnward, + _ => return Err(Error::Formatting), + }; + Ok((tag, rem)) + } +} + +/// Type signature of a method. Order of arguments matter since can be +/// referenced by index as well as name. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EntityEntryPoint { + name: String, + args: Parameters, // one argument vec![Parameter::new("chunked", CLType::Any)] + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, + entry_point_payment: EntryPointPayment, +} + +impl From + for ( + String, + Parameters, + CLType, + EntryPointAccess, + EntryPointType, + EntryPointPayment, + ) +{ + fn from(entry_point: EntityEntryPoint) -> Self { + ( + entry_point.name, + entry_point.args, + entry_point.ret, + entry_point.access, + entry_point.entry_point_type, + entry_point.entry_point_payment, + ) + } +} + +impl EntityEntryPoint { + /// `EntryPoint` constructor. + pub fn new>( + name: T, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, + entry_point_payment: EntryPointPayment, + ) -> Self { + EntityEntryPoint { + name: name.into(), + args, + ret, + access, + entry_point_type, + entry_point_payment, + } + } + + /// Create a default [`EntityEntryPoint`] with specified name. + pub fn default_with_name>(name: T) -> Self { + EntityEntryPoint { + name: name.into(), + ..Default::default() + } + } + + /// Get name. + pub fn name(&self) -> &str { + &self.name + } + + /// Get access enum. + pub fn access(&self) -> &EntryPointAccess { + &self.access + } + + /// Get the arguments for this method. + pub fn args(&self) -> &[Parameter] { + self.args.as_slice() + } + + /// Get the return type. + pub fn ret(&self) -> &CLType { + &self.ret + } + + /// Obtains entry point + pub fn entry_point_type(&self) -> EntryPointType { + self.entry_point_type + } + + /// Obtains entry point payment + pub fn entry_point_payment(&self) -> EntryPointPayment { + self.entry_point_payment + } +} + +impl Default for EntityEntryPoint { + /// constructor for a public session `EntryPoint` that takes no args and returns `Unit` + fn default() -> Self { + EntityEntryPoint { + name: DEFAULT_ENTRY_POINT_NAME.to_string(), + args: Vec::new(), + ret: CLType::Unit, + access: EntryPointAccess::Public, + entry_point_type: EntryPointType::Caller, + entry_point_payment: EntryPointPayment::Caller, + } + } +} + +impl ToBytes for EntityEntryPoint { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + + self.args.serialized_length() + + self.ret.serialized_length() + + self.access.serialized_length() + + self.entry_point_type.serialized_length() + + self.entry_point_payment.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.args.write_bytes(writer)?; + self.ret.append_bytes(writer)?; + self.access.write_bytes(writer)?; + self.entry_point_type.write_bytes(writer)?; + self.entry_point_payment.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EntityEntryPoint { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (args, bytes) = Vec::::from_bytes(bytes)?; + let (ret, bytes) = CLType::from_bytes(bytes)?; + let (access, bytes) = EntryPointAccess::from_bytes(bytes)?; + let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?; + let (entry_point_payment, bytes) = EntryPointPayment::from_bytes(bytes)?; + + Ok(( + EntityEntryPoint { + name, + args, + ret, + access, + entry_point_type, + entry_point_payment, + }, + bytes, + )) + } +} + +/// Enum describing the possible access control options for a contract entry +/// point (method). +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointAccess { + /// Anyone can call this method (no access controls). + Public, + /// Only users from the listed groups may call this method. Note: if the + /// list is empty then this method is not callable from outside the + /// contract. + Groups(Vec), + /// Can't be accessed directly but are kept in the derived wasm bytes. + Template, +} + +const ENTRYPOINTACCESS_PUBLIC_TAG: u8 = 1; +const ENTRYPOINTACCESS_GROUPS_TAG: u8 = 2; +const ENTRYPOINTACCESS_ABSTRACT_TAG: u8 = 3; + +impl EntryPointAccess { + /// Constructor for access granted to only listed groups. + pub fn groups(labels: &[&str]) -> Self { + let list: Vec = labels + .iter() + .map(|s| Group::new(String::from(*s))) + .collect(); + EntryPointAccess::Groups(list) + } +} + +impl ToBytes for EntryPointAccess { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + + match self { + EntryPointAccess::Public => { + result.push(ENTRYPOINTACCESS_PUBLIC_TAG); + } + EntryPointAccess::Groups(groups) => { + result.push(ENTRYPOINTACCESS_GROUPS_TAG); + result.append(&mut groups.to_bytes()?); + } + EntryPointAccess::Template => { + result.push(ENTRYPOINTACCESS_ABSTRACT_TAG); + } + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + EntryPointAccess::Public => 1, + EntryPointAccess::Groups(groups) => 1 + groups.serialized_length(), + EntryPointAccess::Template => 1, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + EntryPointAccess::Public => { + writer.push(ENTRYPOINTACCESS_PUBLIC_TAG); + } + EntryPointAccess::Groups(groups) => { + writer.push(ENTRYPOINTACCESS_GROUPS_TAG); + groups.write_bytes(writer)?; + } + EntryPointAccess::Template => { + writer.push(ENTRYPOINTACCESS_ABSTRACT_TAG); + } + } + Ok(()) + } +} + +impl FromBytes for EntryPointAccess { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, bytes) = u8::from_bytes(bytes)?; + + match tag { + ENTRYPOINTACCESS_PUBLIC_TAG => Ok((EntryPointAccess::Public, bytes)), + ENTRYPOINTACCESS_GROUPS_TAG => { + let (groups, bytes) = Vec::::from_bytes(bytes)?; + let result = EntryPointAccess::Groups(groups); + Ok((result, bytes)) + } + ENTRYPOINTACCESS_ABSTRACT_TAG => Ok((EntryPointAccess::Template, bytes)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Parameter to a method +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Parameter { + name: String, + cl_type: CLType, +} + +impl Parameter { + /// `Parameter` constructor. + pub fn new>(name: T, cl_type: CLType) -> Self { + Parameter { + name: name.into(), + cl_type, + } + } + + /// Get the type of this argument. + pub fn cl_type(&self) -> &CLType { + &self.cl_type + } + + /// Get a reference to the parameter's name. + pub fn name(&self) -> &str { + &self.name + } +} + +impl From for (String, CLType) { + fn from(parameter: Parameter) -> Self { + (parameter.name, parameter.cl_type) + } +} + +impl ToBytes for Parameter { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = ToBytes::to_bytes(&self.name)?; + self.cl_type.append_bytes(&mut result)?; + + Ok(result) + } + + fn serialized_length(&self) -> usize { + ToBytes::serialized_length(&self.name) + self.cl_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.cl_type.append_bytes(writer) + } +} + +impl FromBytes for Parameter { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (cl_type, bytes) = CLType::from_bytes(bytes)?; + + Ok((Parameter { name, cl_type }, bytes)) + } +} + +/// Collection of named entry points. +#[derive(Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct EntryPoints(BTreeMap); + +impl ToBytes for EntryPoints { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for EntryPoints { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entry_points_map, remainder) = + BTreeMap::::from_bytes(bytes)?; + Ok((EntryPoints(entry_points_map), remainder)) + } +} + +impl Default for EntryPoints { + fn default() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntityEntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } +} + +impl EntryPoints { + /// Constructs a new, empty `EntryPoints`. + pub const fn new() -> EntryPoints { + EntryPoints(BTreeMap::::new()) + } + + /// Constructs a new `EntryPoints` with a single entry for the default `EntryPoint`. + pub fn new_with_default_entry_point() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntityEntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } + + /// Adds new [`EntityEntryPoint`]. + pub fn add_entry_point(&mut self, entry_point: EntityEntryPoint) { + self.0.insert(entry_point.name().to_string(), entry_point); + } + + /// Checks if given [`EntityEntryPoint`] exists. + pub fn has_entry_point(&self, entry_point_name: &str) -> bool { + self.0.contains_key(entry_point_name) + } + + /// Gets an existing [`EntityEntryPoint`] by its name. + pub fn get(&self, entry_point_name: &str) -> Option<&EntityEntryPoint> { + self.0.get(entry_point_name) + } + + /// Returns iterator for existing entry point names. + pub fn keys(&self) -> impl Iterator { + self.0.keys() + } + + /// Takes all entry points. + pub fn take_entry_points(self) -> Vec { + self.0.into_values().collect() + } + + /// Returns the length of the entry points + pub fn len(&self) -> usize { + self.0.len() + } + + /// Checks if the `EntryPoints` is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Checks if any of the entry points are of the type Session. + pub fn contains_stored_session(&self) -> bool { + self.0 + .values() + .any(|entry_point| entry_point.entry_point_type == EntryPointType::Caller) + } +} + +impl From> for EntryPoints { + fn from(entry_points: Vec) -> EntryPoints { + let entries = entry_points + .into_iter() + .map(|entry_point| (String::from(entry_point.name()), entry_point)) + .collect(); + EntryPoints(entries) + } +} + +/// The entry point address. +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointAddr { + /// The address for a V1 Entrypoint. + VmCasperV1 { + /// The addr of the entity. + entity_addr: EntityAddr, + /// The 32 byte hash of the name of the entry point + name_bytes: [u8; KEY_HASH_LENGTH], + }, +} + +impl EntryPointAddr { + /// Returns a `VmCasperV1` variant of the entry point address. + pub fn new_v1_entry_point_addr( + entity_addr: EntityAddr, + name: &str, + ) -> Result { + let bytes = name.to_bytes()?; + let mut hasher = { + match VarBlake2b::new(BLAKE2B_DIGEST_LENGTH) { + Ok(hasher) => hasher, + Err(_) => return Err(bytesrepr::Error::Formatting), + } + }; + hasher.update(bytes); + // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher. + let mut name_bytes = HashAddr::default(); + hasher.finalize_variable(|hash| name_bytes.clone_from_slice(hash)); + Ok(Self::VmCasperV1 { + entity_addr, + name_bytes, + }) + } + + /// Returns the encapsulated [`EntityAddr`]. + pub fn entity_addr(&self) -> EntityAddr { + match self { + EntryPointAddr::VmCasperV1 { entity_addr, .. } => *entity_addr, + } + } + + /// Returns the formatted String representation of the [`EntryPointAddr`]. + pub fn to_formatted_string(&self) -> String { + format!("{}", self) + } + + /// Returns the address from the formatted string. + pub fn from_formatted_str(input: &str) -> Result { + if let Some(entry_point_v1) = input.strip_prefix(V1_ENTRY_POINT_PREFIX) { + if let Some((entity_addr_str, string_bytes_str)) = entry_point_v1.rsplit_once('-') { + let entity_addr = EntityAddr::from_formatted_str(entity_addr_str)?; + let string_bytes = + checksummed_hex::decode(string_bytes_str).map_err(FromStrError::Hex)?; + let (name_bytes, _) = + FromBytes::from_vec(string_bytes).map_err(FromStrError::BytesRepr)?; + return Ok(Self::VmCasperV1 { + entity_addr, + name_bytes, + }); + } + } + + Err(FromStrError::InvalidPrefix) + } +} + +impl ToBytes for EntryPointAddr { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + EntryPointAddr::VmCasperV1 { + entity_addr, + name_bytes: named_bytes, + } => { + buffer.insert(0, V1_ENTRY_POINT_TAG); + buffer.append(&mut entity_addr.to_bytes()?); + buffer.append(&mut named_bytes.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + EntryPointAddr::VmCasperV1 { + entity_addr, + name_bytes: named_bytes, + } => entity_addr.serialized_length() + named_bytes.serialized_length(), + } + } +} + +impl FromBytes for EntryPointAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, bytes) = u8::from_bytes(bytes)?; + match tag { + V1_ENTRY_POINT_TAG => { + let (entity_addr, bytes) = EntityAddr::from_bytes(bytes)?; + let (name_bytes, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Self::VmCasperV1 { + entity_addr, + name_bytes, + }, + bytes, + )) + } + _ => Err(Error::Formatting), + } + } +} + +impl Display for EntryPointAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + EntryPointAddr::VmCasperV1 { + entity_addr, + name_bytes, + } => { + write!( + f, + "{}{}-{}", + V1_ENTRY_POINT_PREFIX, + entity_addr, + base16::encode_lower(name_bytes) + ) + } + } + } +} + +impl Debug for EntryPointAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + EntryPointAddr::VmCasperV1 { + entity_addr, + name_bytes, + } => { + write!(f, "EntryPointAddr({:?}-{:?})", entity_addr, name_bytes) + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> EntryPointAddr { + EntryPointAddr::VmCasperV1 { + entity_addr: rng.gen(), + name_bytes: rng.gen(), + } + } +} + +/// The encaspulated representation of entrypoints. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum EntryPointValue { + /// Entrypoints to be executed against the V1 Casper VM. + V1CasperVm(EntityEntryPoint), +} + +impl EntryPointValue { + /// Returns [`EntryPointValue::V1CasperVm`] variant. + pub fn new_v1_entry_point_value(entry_point: EntityEntryPoint) -> Self { + Self::V1CasperVm(entry_point) + } + + /// Entry point will cover payment if directly invoked. + pub fn will_pay_direct_invocation(&self) -> bool { + match self { + EntryPointValue::V1CasperVm(ep) => ep.entry_point_payment.will_pay_direct_invocation(), + } + } +} + +impl ToBytes for EntryPointValue { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + EntryPointValue::V1CasperVm(entry_point) => entry_point.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + EntryPointValue::V1CasperVm(entry_point) => { + writer.push(V1_ENTRY_POINT_TAG); + entry_point.write_bytes(writer)?; + } + } + Ok(()) + } +} + +impl FromBytes for EntryPointValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + V1_ENTRY_POINT_TAG => { + let (entry_point, remainder) = EntityEntryPoint::from_bytes(remainder)?; + Ok((Self::V1CasperVm(entry_point), remainder)) + } + _ => Err(Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn entry_point_type_serialization_roundtrip() { + let vm1 = EntryPointAddr::VmCasperV1 { + entity_addr: EntityAddr::new_smart_contract([42; 32]), + name_bytes: [99; 32], + }; + bytesrepr::test_serialization_roundtrip(&vm1); + } +} diff --git a/types/src/addressable_entity/error.rs b/types/src/addressable_entity/error.rs new file mode 100644 index 0000000000..fad8a1fc6f --- /dev/null +++ b/types/src/addressable_entity/error.rs @@ -0,0 +1,51 @@ +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; + +// This error type is not intended to be used by third party crates. +#[doc(hidden)] +#[derive(Debug, Eq, PartialEq)] +pub struct TryFromIntError(pub ()); + +/// Error returned when decoding an `AccountHash` from a formatted string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromAccountHashStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The hash is not valid hex. + Hex(base16::DecodeError), + /// The hash is the wrong length. + Hash(TryFromSliceError), +} + +impl From for FromAccountHashStrError { + fn from(error: base16::DecodeError) -> Self { + FromAccountHashStrError::Hex(error) + } +} + +impl From for FromAccountHashStrError { + fn from(error: TryFromSliceError) -> Self { + FromAccountHashStrError::Hash(error) + } +} + +impl Display for FromAccountHashStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromAccountHashStrError::InvalidPrefix => write!(f, "prefix is not 'account-hash-'"), + FromAccountHashStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromAccountHashStrError::Hash(error) => { + write!(f, "address portion is wrong length: {}", error) + } + } + } +} + +/// Associated error type of `TryFrom<&[u8]>` for [`AccountHash`](super::AccountHash). +#[derive(Debug)] +pub struct TryFromSliceForAccountHashError(()); diff --git a/types/src/addressable_entity/named_keys.rs b/types/src/addressable_entity/named_keys.rs new file mode 100644 index 0000000000..8574fa83c8 --- /dev/null +++ b/types/src/addressable_entity/named_keys.rs @@ -0,0 +1,181 @@ +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::execution::execution_result_v1::NamedKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, Key, +}; + +/// A collection of named keys. +#[derive(Clone, Eq, PartialEq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct NamedKeys( + #[serde(with = "BTreeMapToArray::")] + #[cfg_attr(feature = "json-schema", schemars(with = "Vec"))] + BTreeMap, +); + +impl NamedKeys { + /// Constructs a new, empty `NamedKeys`. + pub const fn new() -> Self { + NamedKeys(BTreeMap::new()) + } + + /// Consumes `self`, returning the wrapped map. + pub fn into_inner(self) -> BTreeMap { + self.0 + } + + /// Inserts a named key. + /// + /// If the map did not have this name present, `None` is returned. If the map did have this + /// name present, the `Key` is updated, and the old `Key` is returned. + pub fn insert(&mut self, name: String, key: Key) -> Option { + self.0.insert(name, key) + } + + /// Moves all elements from `other` into `self`. + pub fn append(&mut self, mut other: Self) { + self.0.append(&mut other.0) + } + + /// Removes a named `Key`, returning the `Key` if it existed in the collection. + pub fn remove(&mut self, name: &str) -> Option { + self.0.remove(name) + } + + /// Returns a reference to the `Key` under the given `name` if any. + pub fn get(&self, name: &str) -> Option<&Key> { + self.0.get(name) + } + + /// Returns `true` if the named `Key` exists in the collection. + pub fn contains(&self, name: &str) -> bool { + self.0.contains_key(name) + } + + /// Returns an iterator over the names. + pub fn names(&self) -> impl Iterator { + self.0.keys() + } + + /// Returns an iterator over the `Key`s (i.e. the map's values). + pub fn keys(&self) -> impl Iterator { + self.0.values() + } + + /// Returns a mutable iterator over the `Key`s (i.e. the map's values). + pub fn keys_mut(&mut self) -> impl Iterator { + self.0.values_mut() + } + + /// Returns an iterator over the name-key pairs. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the number of named `Key`s. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if there are no named `Key`s. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl From> for NamedKeys { + fn from(value: BTreeMap) -> Self { + NamedKeys(value) + } +} + +impl ToBytes for NamedKeys { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for NamedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (named_keys, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((NamedKeys(named_keys), remainder)) + } +} + +impl CLTyped for NamedKeys { + fn cl_type() -> CLType { + BTreeMap::::cl_type() + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "name"; + const VALUE: &'static str = "key"; +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + use crate::testing::TestRng; + + /// `NamedKeys` was previously (pre node v2.0.0) just an alias for `BTreeMap`. + /// Check if we serialize as the old form, that can deserialize to the new. + #[test] + fn should_be_backwards_compatible() { + let rng = &mut TestRng::new(); + let mut named_keys = NamedKeys::new(); + assert!(named_keys.insert("a".to_string(), rng.gen()).is_none()); + assert!(named_keys.insert("bb".to_string(), rng.gen()).is_none()); + assert!(named_keys.insert("ccc".to_string(), rng.gen()).is_none()); + + let serialized_old = bincode::serialize(&named_keys.0).unwrap(); + let parsed_new = bincode::deserialize(&serialized_old).unwrap(); + assert_eq!(named_keys, parsed_new); + + let serialized_old = bytesrepr::serialize(&named_keys.0).unwrap(); + let parsed_new = bytesrepr::deserialize(serialized_old).unwrap(); + assert_eq!(named_keys, parsed_new); + } + + #[test] + fn should_match_field_names() { + // this test was written to ensure that the schema generated by schemars matches the serde + // encoding, both are configured using attributes and they can get out of sync + let mut named_keys = NamedKeys::new(); + named_keys.insert("key".to_string(), Key::Hash([0u8; 32])); + assert_eq!( + serde_json::to_value(&named_keys).expect("should serialize"), + serde_json::json!([{ + Labels::KEY: "key", + Labels::VALUE: "hash-0000000000000000000000000000000000000000000000000000000000000000" + }]) + ); + } +} diff --git a/types/src/addressable_entity/weight.rs b/types/src/addressable_entity/weight.rs new file mode 100644 index 0000000000..ee2f0343e1 --- /dev/null +++ b/types/src/addressable_entity/weight.rs @@ -0,0 +1,66 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLType, CLTyped, +}; + +/// The number of bytes in a serialized [`Weight`]. +pub const WEIGHT_SERIALIZED_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// The weight associated with public keys in an account's associated keys. +#[derive(PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr( + feature = "json-schema", + schemars(rename = "EntityAssociatedKeyWeight") +)] +pub struct Weight(u8); + +impl Weight { + /// Constructs a new `Weight`. + pub const fn new(weight: u8) -> Weight { + Weight(weight) + } + + /// Returns the value of `self` as a `u8`. + pub fn value(self) -> u8 { + self.0 + } +} + +impl ToBytes for Weight { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + WEIGHT_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.0); + Ok(()) + } +} + +impl FromBytes for Weight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte, rem) = u8::from_bytes(bytes)?; + Ok((Weight::new(byte), rem)) + } +} + +impl CLTyped for Weight { + fn cl_type() -> CLType { + CLType::U8 + } +} diff --git a/types/src/api_error.rs b/types/src/api_error.rs index c715c2dee9..4ee7be8089 100644 --- a/types/src/api_error.rs +++ b/types/src/api_error.rs @@ -1,15 +1,15 @@ //! Contains [`ApiError`] and associated helper functions. use core::{ + convert::TryFrom, fmt::{self, Debug, Formatter}, - u16, u8, }; use crate::{ account::{ - AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, TryFromIntError, - TryFromSliceForAccountHashError, UpdateKeyFailure, + AddKeyFailure, RemoveKeyFailure, SetThresholdFailure, TryFromIntError, UpdateKeyFailure, }, + addressable_entity::{self, MessageTopicError, TryFromSliceForAccountHashError}, bytesrepr, contracts, system::{auction, handle_payment, mint}, CLValueError, @@ -75,366 +75,9 @@ const AUCTION_ERROR_MAX: u32 = AUCTION_ERROR_OFFSET + u8::MAX as u32; /// | [64512, 64767] | `Auction` | /// | [64768, 65023] | `ContractHeader` | /// | [65024, 65279] | `Mint` | -/// | [65280, 65535] | `HandlePayment` | +/// | [65280, 65535] | `HandlePayment` | /// | [65536, 131071] | `User` | /// -/// ## Mappings -/// -/// The expanded mapping of all variants to their numerical equivalents is as follows: -/// ``` -/// # use casper_types::ApiError::{self, *}; -/// # macro_rules! show_and_check { -/// # ($lhs:literal => $rhs:expr) => { -/// # assert_eq!($lhs as u32, u32::from(ApiError::from($rhs))); -/// # }; -/// # } -/// // General system errors: -/// # show_and_check!( -/// 1 => None -/// # ); -/// # show_and_check!( -/// 2 => MissingArgument -/// # ); -/// # show_and_check!( -/// 3 => InvalidArgument -/// # ); -/// # show_and_check!( -/// 4 => Deserialize -/// # ); -/// # show_and_check!( -/// 5 => Read -/// # ); -/// # show_and_check!( -/// 6 => ValueNotFound -/// # ); -/// # show_and_check!( -/// 7 => ContractNotFound -/// # ); -/// # show_and_check!( -/// 8 => GetKey -/// # ); -/// # show_and_check!( -/// 9 => UnexpectedKeyVariant -/// # ); -/// # show_and_check!( -/// 10 => UnexpectedContractRefVariant -/// # ); -/// # show_and_check!( -/// 11 => InvalidPurseName -/// # ); -/// # show_and_check!( -/// 12 => InvalidPurse -/// # ); -/// # show_and_check!( -/// 13 => UpgradeContractAtURef -/// # ); -/// # show_and_check!( -/// 14 => Transfer -/// # ); -/// # show_and_check!( -/// 15 => NoAccessRights -/// # ); -/// # show_and_check!( -/// 16 => CLTypeMismatch -/// # ); -/// # show_and_check!( -/// 17 => EarlyEndOfStream -/// # ); -/// # show_and_check!( -/// 18 => Formatting -/// # ); -/// # show_and_check!( -/// 19 => LeftOverBytes -/// # ); -/// # show_and_check!( -/// 20 => OutOfMemory -/// # ); -/// # show_and_check!( -/// 21 => MaxKeysLimit -/// # ); -/// # show_and_check!( -/// 22 => DuplicateKey -/// # ); -/// # show_and_check!( -/// 23 => PermissionDenied -/// # ); -/// # show_and_check!( -/// 24 => MissingKey -/// # ); -/// # show_and_check!( -/// 25 => ThresholdViolation -/// # ); -/// # show_and_check!( -/// 26 => KeyManagementThreshold -/// # ); -/// # show_and_check!( -/// 27 => DeploymentThreshold -/// # ); -/// # show_and_check!( -/// 28 => InsufficientTotalWeight -/// # ); -/// # show_and_check!( -/// 29 => InvalidSystemContract -/// # ); -/// # show_and_check!( -/// 30 => PurseNotCreated -/// # ); -/// # show_and_check!( -/// 31 => Unhandled -/// # ); -/// # show_and_check!( -/// 32 => BufferTooSmall -/// # ); -/// # show_and_check!( -/// 33 => HostBufferEmpty -/// # ); -/// # show_and_check!( -/// 34 => HostBufferFull -/// # ); -/// // Auction errors: -/// use casper_types::system::auction::Error as AuctionError; -/// # show_and_check!( -/// 64_512 => AuctionError::MissingKey -/// # ); -/// # show_and_check!( -/// 64_513 => AuctionError::InvalidKeyVariant -/// # ); -/// # show_and_check!( -/// 64_514 => AuctionError::MissingValue -/// # ); -/// # show_and_check!( -/// 64_515 => AuctionError::Serialization -/// # ); -/// # show_and_check!( -/// 64_516 => AuctionError::TransferToBidPurse -/// # ); -/// # show_and_check!( -/// 64_517 => AuctionError::InvalidAmount -/// # ); -/// # show_and_check!( -/// 64_518 => AuctionError::BidNotFound -/// # ); -/// # show_and_check!( -/// 64_519 => AuctionError::ValidatorNotFound -/// # ); -/// # show_and_check!( -/// 64_520 => AuctionError::DelegatorNotFound -/// # ); -/// # show_and_check!( -/// 64_521 => AuctionError::Storage -/// # ); -/// # show_and_check!( -/// 64_522 => AuctionError::Bonding -/// # ); -/// # show_and_check!( -/// 64_523 => AuctionError::Unbonding -/// # ); -/// # show_and_check!( -/// 64_524 => AuctionError::ReleaseFounderStake -/// # ); -/// # show_and_check!( -/// 64_525 => AuctionError::GetBalance -/// # ); -/// # show_and_check!( -/// 64_526 => AuctionError::InvalidContext -/// # ); -/// # show_and_check!( -/// 64_527 => AuctionError::ValidatorFundsLocked -/// # ); -/// # show_and_check!( -/// 64_528 => AuctionError::InvalidCaller -/// # ); -/// # show_and_check!( -/// 64_529 => AuctionError::InvalidPublicKey -/// # ); -/// # show_and_check!( -/// 64_530 => AuctionError::BondNotFound -/// # ); -/// # show_and_check!( -/// 64_531 => AuctionError::CreatePurseFailed -/// # ); -/// # show_and_check!( -/// 64_532 => AuctionError::UnbondTooLarge -/// # ); -/// # show_and_check!( -/// 64_533 => AuctionError::BondTooSmall -/// # ); -/// # show_and_check!( -/// 64_534 => AuctionError::MissingDelegations -/// # ); -/// # show_and_check!( -/// 64_535 => AuctionError::MismatchedEraValidators -/// # ); -/// # show_and_check!( -/// 64_536 => AuctionError::MintReward -/// # ); -/// # show_and_check!( -/// 64_537 => AuctionError::InvalidValidatorSlotsValue -/// # ); -/// # show_and_check!( -/// 64_538 => AuctionError::MintReduceTotalSupply -/// # ); -/// # show_and_check!( -/// 64_539 => AuctionError::TransferToDelegatorPurse -/// # ); -/// # show_and_check!( -/// 64_540 => AuctionError::ValidatorRewardTransfer -/// # ); -/// # show_and_check!( -/// 64_541 => AuctionError::DelegatorRewardTransfer -/// # ); -/// # show_and_check!( -/// 64_542 => AuctionError::WithdrawDelegatorReward -/// # ); -/// # show_and_check!( -/// 64_543 => AuctionError::WithdrawValidatorReward -/// # ); -/// # show_and_check!( -/// 64_544 => AuctionError::TransferToUnbondingPurse -/// # ); -/// // Contract header errors: -/// use casper_types::contracts::Error as ContractHeaderError; -/// # show_and_check!( -/// 64_769 => ContractHeaderError::PreviouslyUsedVersion -/// # ); -/// # show_and_check!( -/// 64_770 => ContractHeaderError::ContractNotFound -/// # ); -/// # show_and_check!( -/// 64_771 => ContractHeaderError::GroupAlreadyExists -/// # ); -/// # show_and_check!( -/// 64_772 => ContractHeaderError::MaxGroupsExceeded -/// # ); -/// # show_and_check!( -/// 64_773 => ContractHeaderError::MaxTotalURefsExceeded -/// # ); -/// // Mint errors: -/// use casper_types::system::mint::Error as MintError; -/// # show_and_check!( -/// 65_024 => MintError::InsufficientFunds -/// # ); -/// # show_and_check!( -/// 65_025 => MintError::SourceNotFound -/// # ); -/// # show_and_check!( -/// 65_026 => MintError::DestNotFound -/// # ); -/// # show_and_check!( -/// 65_027 => MintError::InvalidURef -/// # ); -/// # show_and_check!( -/// 65_028 => MintError::InvalidAccessRights -/// # ); -/// # show_and_check!( -/// 65_029 => MintError::InvalidNonEmptyPurseCreation -/// # ); -/// # show_and_check!( -/// 65_030 => MintError::Storage -/// # ); -/// # show_and_check!( -/// 65_031 => MintError::PurseNotFound -/// # ); -/// -/// // Handle Payment errors: -/// use casper_types::system::handle_payment::Error as PosError; -/// # show_and_check!( -/// 65_280 => PosError::NotBonded -/// # ); -/// # show_and_check!( -/// 65_281 => PosError::TooManyEventsInQueue -/// # ); -/// # show_and_check!( -/// 65_282 => PosError::CannotUnbondLastValidator -/// # ); -/// # show_and_check!( -/// 65_283 => PosError::SpreadTooHigh -/// # ); -/// # show_and_check!( -/// 65_284 => PosError::MultipleRequests -/// # ); -/// # show_and_check!( -/// 65_285 => PosError::BondTooSmall -/// # ); -/// # show_and_check!( -/// 65_286 => PosError::BondTooLarge -/// # ); -/// # show_and_check!( -/// 65_287 => PosError::UnbondTooLarge -/// # ); -/// # show_and_check!( -/// 65_288 => PosError::BondTransferFailed -/// # ); -/// # show_and_check!( -/// 65_289 => PosError::UnbondTransferFailed -/// # ); -/// # show_and_check!( -/// 65_290 => PosError::TimeWentBackwards -/// # ); -/// # show_and_check!( -/// 65_291 => PosError::StakesNotFound -/// # ); -/// # show_and_check!( -/// 65_292 => PosError::PaymentPurseNotFound -/// # ); -/// # show_and_check!( -/// 65_293 => PosError::PaymentPurseKeyUnexpectedType -/// # ); -/// # show_and_check!( -/// 65_294 => PosError::PaymentPurseBalanceNotFound -/// # ); -/// # show_and_check!( -/// 65_295 => PosError::BondingPurseNotFound -/// # ); -/// # show_and_check!( -/// 65_296 => PosError::BondingPurseKeyUnexpectedType -/// # ); -/// # show_and_check!( -/// 65_297 => PosError::RefundPurseKeyUnexpectedType -/// # ); -/// # show_and_check!( -/// 65_298 => PosError::RewardsPurseNotFound -/// # ); -/// # show_and_check!( -/// 65_299 => PosError::RewardsPurseKeyUnexpectedType -/// # ); -/// # show_and_check!( -/// 65_300 => PosError::StakesKeyDeserializationFailed -/// # ); -/// # show_and_check!( -/// 65_301 => PosError::StakesDeserializationFailed -/// # ); -/// # show_and_check!( -/// 65_302 => PosError::SystemFunctionCalledByUserAccount -/// # ); -/// # show_and_check!( -/// 65_303 => PosError::InsufficientPaymentForAmountSpent -/// # ); -/// # show_and_check!( -/// 65_304 => PosError::FailedTransferToRewardsPurse -/// # ); -/// # show_and_check!( -/// 65_305 => PosError::FailedTransferToAccountPurse -/// # ); -/// # show_and_check!( -/// 65_306 => PosError::SetRefundPurseCalledOutsidePayment -/// # ); -/// -/// // User-defined errors: -/// # show_and_check!( -/// 65_536 => User(0) -/// # ); -/// # show_and_check!( -/// 65_537 => User(1) -/// # ); -/// # show_and_check!( -/// 65_538 => User(2) -/// # ); -/// # show_and_check!( -/// 131_071 => User(u16::max_value()) -/// # ); -/// ``` -/// /// Users can specify a C-style enum and implement `From` to ease usage of /// `casper_contract::runtime::revert()`, e.g. /// ``` @@ -458,95 +101,363 @@ const AUCTION_ERROR_MAX: u32 = AUCTION_ERROR_OFFSET + u8::MAX as u32; /// assert_eq!(65_538, u32::from(ApiError::from(FailureCode::Two))); /// ``` #[derive(Copy, Clone, PartialEq, Eq)] +#[non_exhaustive] pub enum ApiError { /// Optional data was unexpectedly `None`. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(1), ApiError::None); + /// ``` None, /// Specified argument not provided. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(2), ApiError::MissingArgument); + /// ``` MissingArgument, /// Argument not of correct type. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(3), ApiError::InvalidArgument); + /// ``` InvalidArgument, /// Failed to deserialize a value. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(4), ApiError::Deserialize); + /// ``` Deserialize, /// `casper_contract::storage::read()` returned an error. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(5), ApiError::Read); + /// ``` Read, /// The given key returned a `None` value. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(6), ApiError::ValueNotFound); + /// ``` ValueNotFound, /// Failed to find a specified contract. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(7), ApiError::ContractNotFound); + /// ``` ContractNotFound, /// A call to `casper_contract::runtime::get_key()` returned a failure. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(8), ApiError::GetKey); + /// ``` GetKey, /// The [`Key`](crate::Key) variant was not as expected. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(9), ApiError::UnexpectedKeyVariant); + /// ``` UnexpectedKeyVariant, - /// Obsolete error variant (we no longer have ContractRef). - UnexpectedContractRefVariant, // TODO: this variant is not used any longer and can be removed + /// Unsupported contract discovery variant. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(10), ApiError::UnexpectedContractRefVariant); + /// ``` + UnexpectedContractRefVariant, /// Invalid purse name given. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(11), ApiError::InvalidPurseName); + /// ``` InvalidPurseName, /// Invalid purse retrieved. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(12), ApiError::InvalidPurse); + /// ``` InvalidPurse, /// Failed to upgrade contract at [`URef`](crate::URef). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(13), ApiError::UpgradeContractAtURef); + /// ``` UpgradeContractAtURef, /// Failed to transfer motes. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(14), ApiError::Transfer); + /// ``` Transfer, /// The given [`URef`](crate::URef) has no access rights. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(15), ApiError::NoAccessRights); + /// ``` NoAccessRights, /// A given type could not be constructed from a [`CLValue`](crate::CLValue). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(16), ApiError::CLTypeMismatch); + /// ``` CLTypeMismatch, /// Early end of stream while deserializing. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(17), ApiError::EarlyEndOfStream); + /// ``` EarlyEndOfStream, /// Formatting error while deserializing. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(18), ApiError::Formatting); + /// ``` Formatting, /// Not all input bytes were consumed in [`deserialize`](crate::bytesrepr::deserialize). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(19), ApiError::LeftOverBytes); + /// ``` LeftOverBytes, /// Out of memory error. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(20), ApiError::OutOfMemory); + /// ``` OutOfMemory, - /// There are already [`MAX_ASSOCIATED_KEYS`](crate::account::MAX_ASSOCIATED_KEYS) - /// [`AccountHash`](crate::account::AccountHash)s associated with the given account. + /// There are already maximum [`AccountHash`](crate::account::AccountHash)s associated with the + /// given account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(21), ApiError::MaxKeysLimit); + /// ``` MaxKeysLimit, /// The given [`AccountHash`](crate::account::AccountHash) is already associated with the given /// account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(22), ApiError::DuplicateKey); + /// ``` DuplicateKey, /// Caller doesn't have sufficient permissions to perform the given action. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(23), ApiError::PermissionDenied); + /// ``` PermissionDenied, /// The given [`AccountHash`](crate::account::AccountHash) is not associated with the given /// account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(24), ApiError::MissingKey); + /// ``` MissingKey, /// Removing/updating the given associated [`AccountHash`](crate::account::AccountHash) would - /// cause the total [`Weight`](crate::account::Weight) of all remaining `AccountHash`s to + /// cause the total [`Weight`](addressable_entity::Weight) of all remaining `AccountHash`s to /// fall below one of the action thresholds for the given account. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(25), ApiError::ThresholdViolation); + /// ``` ThresholdViolation, /// Setting the key-management threshold to a value lower than the deployment threshold is /// disallowed. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(26), ApiError::KeyManagementThreshold); + /// ``` KeyManagementThreshold, /// Setting the deployment threshold to a value greater than any other threshold is disallowed. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(27), ApiError::DeploymentThreshold); + /// ``` DeploymentThreshold, /// Setting a threshold to a value greater than the total weight of associated keys is /// disallowed. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(28), ApiError::InsufficientTotalWeight); + /// ``` InsufficientTotalWeight, - /// The given `u32` doesn't map to a [`SystemContractType`](crate::system::SystemContractType). + /// The given `u32` doesn't map to a [`SystemContractType`](crate::system::SystemEntityType). + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(29), ApiError::InvalidSystemContract); + /// ``` InvalidSystemContract, /// Failed to create a new purse. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(30), ApiError::PurseNotCreated); + /// ``` PurseNotCreated, /// An unhandled value, likely representing a bug in the code. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(31), ApiError::Unhandled); + /// ``` Unhandled, /// The provided buffer is too small to complete an operation. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(32), ApiError::BufferTooSmall); + /// ``` BufferTooSmall, /// No data available in the host buffer. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(33), ApiError::HostBufferEmpty); + /// ``` HostBufferEmpty, /// The host buffer has been set to a value and should be consumed first by a read operation. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(34), ApiError::HostBufferFull); + /// ``` HostBufferFull, /// Could not lay out an array in memory + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(35), ApiError::AllocLayout); + /// ``` AllocLayout, - /// Error specific to Auction contract. + /// The `dictionary_item_key` length exceeds the maximum length. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(36), ApiError::DictionaryItemKeyExceedsLength); + /// ``` + DictionaryItemKeyExceedsLength, + /// The `dictionary_item_key` is invalid. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(37), ApiError::InvalidDictionaryItemKey); + /// ``` + InvalidDictionaryItemKey, + /// Unable to retrieve the requested system contract hash. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(38), ApiError::MissingSystemContractHash); + /// ``` + MissingSystemContractHash, + /// Exceeded a recursion depth limit. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(39), ApiError::ExceededRecursionDepth); + /// ``` + ExceededRecursionDepth, + /// Attempt to serialize a value that does not have a serialized representation. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(40), ApiError::NonRepresentableSerialization); + /// ``` + NonRepresentableSerialization, + /// Error specific to Auction contract. See + /// [casper_types::system::auction::Error](crate::system::auction::Error). + /// ``` + /// # use casper_types::ApiError; + /// for code in 64512..=64767 { + /// assert!(matches!(ApiError::from(code), ApiError::AuctionError(_auction_error))); + /// } + /// ``` AuctionError(u8), - /// Contract header errors. + /// Contract header errors. See + /// [casper_types::contracts::Error](crate::addressable_entity::Error). + /// + /// ``` + /// # use casper_types::ApiError; + /// for code in 64768..=65023 { + /// assert!(matches!(ApiError::from(code), ApiError::ContractHeader(_contract_header_error))); + /// } + /// ``` ContractHeader(u8), - /// Error specific to Mint contract. + /// Error specific to Mint contract. See + /// [casper_types::system::mint::Error](crate::system::mint::Error). + /// ``` + /// # use casper_types::ApiError; + /// for code in 65024..=65279 { + /// assert!(matches!(ApiError::from(code), ApiError::Mint(_mint_error))); + /// } + /// ``` Mint(u8), - /// Error specific to Handle Payment contract. + /// Error specific to Handle Payment contract. See + /// [casper_types::system::handle_payment](crate::system::handle_payment::Error). + /// ``` + /// # use casper_types::ApiError; + /// for code in 65280..=65535 { + /// assert!(matches!(ApiError::from(code), ApiError::HandlePayment(_handle_payment_error))); + /// } + /// ``` HandlePayment(u8), /// User-specified error code. The internal `u16` value is added to `u16::MAX as u32 + 1` when /// an `Error::User` is converted to a `u32`. + /// ``` + /// # use casper_types::ApiError; + /// for code in 65536..131071 { + /// assert!(matches!(ApiError::from(code), ApiError::User(_))); + /// } + /// ``` User(u16), + /// The message topic is already registered. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(41), ApiError::MessageTopicAlreadyRegistered); + /// ``` + MessageTopicAlreadyRegistered, + /// The maximum number of allowed message topics was exceeded. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(42), ApiError::MaxTopicsNumberExceeded); + /// ``` + MaxTopicsNumberExceeded, + /// The maximum size for the topic name was exceeded. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(43), ApiError::MaxTopicNameSizeExceeded); + /// ``` + MaxTopicNameSizeExceeded, + /// The message topic is not registered. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(44), ApiError::MessageTopicNotRegistered); + /// ``` + MessageTopicNotRegistered, + /// The message topic is full and cannot accept new messages. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(45), ApiError::MessageTopicFull); + /// ``` + MessageTopicFull, + /// The message topic is full and cannot accept new messages. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(46), ApiError::MessageTooLarge); + /// ``` + MessageTooLarge, + /// The maximum number of messages emitted per block was exceeded when trying to emit a + /// message. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(47), ApiError::MaxMessagesPerBlockExceeded); + /// ``` + MaxMessagesPerBlockExceeded, + /// Attempt to call FFI function `casper_add_contract_version()` from a transaction not defined + /// as an installer/upgrader. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(48), ApiError::NotAllowedToAddContractVersion); + /// ``` + NotAllowedToAddContractVersion, + /// Invalid delegation amount limits. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(49), ApiError::InvalidDelegationAmountLimits); + /// ``` + InvalidDelegationAmountLimits, + /// Invalid action for caller information. + /// ``` + /// # use casper_types::ApiError; + /// assert_eq!(ApiError::from(50), ApiError::InvalidCallerInfoRequest); + /// ``` + InvalidCallerInfoRequest, } impl From for ApiError { @@ -556,6 +467,8 @@ impl From for ApiError { bytesrepr::Error::Formatting => ApiError::Formatting, bytesrepr::Error::LeftOverBytes => ApiError::LeftOverBytes, bytesrepr::Error::OutOfMemory => ApiError::OutOfMemory, + bytesrepr::Error::NotRepresentable => ApiError::NonRepresentableSerialization, + bytesrepr::Error::ExceededRecursionDepth => ApiError::ExceededRecursionDepth, } } } @@ -610,6 +523,12 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(error: addressable_entity::Error) -> Self { + ApiError::ContractHeader(error as u8) + } +} + impl From for ApiError { fn from(error: contracts::Error) -> Self { ApiError::ContractHeader(error as u8) @@ -648,6 +567,16 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(error: MessageTopicError) -> Self { + match error { + MessageTopicError::DuplicateTopic => ApiError::MessageTopicAlreadyRegistered, + MessageTopicError::MaxTopicsExceeded => ApiError::MaxTopicsNumberExceeded, + MessageTopicError::TopicNameSizeExceeded => ApiError::MaxTopicNameSizeExceeded, + } + } +} + impl From for u32 { fn from(error: ApiError) -> Self { match error { @@ -686,6 +615,21 @@ impl From for u32 { ApiError::HostBufferEmpty => 33, ApiError::HostBufferFull => 34, ApiError::AllocLayout => 35, + ApiError::DictionaryItemKeyExceedsLength => 36, + ApiError::InvalidDictionaryItemKey => 37, + ApiError::MissingSystemContractHash => 38, + ApiError::ExceededRecursionDepth => 39, + ApiError::NonRepresentableSerialization => 40, + ApiError::MessageTopicAlreadyRegistered => 41, + ApiError::MaxTopicsNumberExceeded => 42, + ApiError::MaxTopicNameSizeExceeded => 43, + ApiError::MessageTopicNotRegistered => 44, + ApiError::MessageTopicFull => 45, + ApiError::MessageTooLarge => 46, + ApiError::MaxMessagesPerBlockExceeded => 47, + ApiError::NotAllowedToAddContractVersion => 48, + ApiError::InvalidDelegationAmountLimits => 49, + ApiError::InvalidCallerInfoRequest => 50, ApiError::AuctionError(value) => AUCTION_ERROR_OFFSET + u32::from(value), ApiError::ContractHeader(value) => HEADER_ERROR_OFFSET + u32::from(value), ApiError::Mint(value) => MINT_ERROR_OFFSET + u32::from(value), @@ -733,6 +677,21 @@ impl From for ApiError { 33 => ApiError::HostBufferEmpty, 34 => ApiError::HostBufferFull, 35 => ApiError::AllocLayout, + 36 => ApiError::DictionaryItemKeyExceedsLength, + 37 => ApiError::InvalidDictionaryItemKey, + 38 => ApiError::MissingSystemContractHash, + 39 => ApiError::ExceededRecursionDepth, + 40 => ApiError::NonRepresentableSerialization, + 41 => ApiError::MessageTopicAlreadyRegistered, + 42 => ApiError::MaxTopicsNumberExceeded, + 43 => ApiError::MaxTopicNameSizeExceeded, + 44 => ApiError::MessageTopicNotRegistered, + 45 => ApiError::MessageTopicFull, + 46 => ApiError::MessageTooLarge, + 47 => ApiError::MaxMessagesPerBlockExceeded, + 48 => ApiError::NotAllowedToAddContractVersion, + 49 => ApiError::InvalidDelegationAmountLimits, + 50 => ApiError::InvalidCallerInfoRequest, USER_ERROR_MIN..=USER_ERROR_MAX => ApiError::User(value as u16), HP_ERROR_MIN..=HP_ERROR_MAX => ApiError::HandlePayment(value as u8), MINT_ERROR_MIN..=MINT_ERROR_MAX => ApiError::Mint(value as u8), @@ -783,10 +742,55 @@ impl Debug for ApiError { ApiError::HostBufferEmpty => write!(f, "ApiError::HostBufferEmpty")?, ApiError::HostBufferFull => write!(f, "ApiError::HostBufferFull")?, ApiError::AllocLayout => write!(f, "ApiError::AllocLayout")?, - ApiError::AuctionError(value) => write!(f, "ApiError::AuctionError({})", value)?, - ApiError::ContractHeader(value) => write!(f, "ApiError::ContractHeader({})", value)?, - ApiError::Mint(value) => write!(f, "ApiError::Mint({})", value)?, - ApiError::HandlePayment(value) => write!(f, "ApiError::HandlePayment({})", value)?, + ApiError::DictionaryItemKeyExceedsLength => { + write!(f, "ApiError::DictionaryItemKeyTooLarge")? + } + ApiError::InvalidDictionaryItemKey => write!(f, "ApiError::InvalidDictionaryItemKey")?, + ApiError::MissingSystemContractHash => write!(f, "ApiError::MissingContractHash")?, + ApiError::NonRepresentableSerialization => { + write!(f, "ApiError::NonRepresentableSerialization")? + } + ApiError::MessageTopicAlreadyRegistered => { + write!(f, "ApiError::MessageTopicAlreadyRegistered")? + } + ApiError::MaxTopicsNumberExceeded => write!(f, "ApiError::MaxTopicsNumberExceeded")?, + ApiError::MaxTopicNameSizeExceeded => write!(f, "ApiError::MaxTopicNameSizeExceeded")?, + ApiError::MessageTopicNotRegistered => { + write!(f, "ApiError::MessageTopicNotRegistered")? + } + ApiError::MessageTopicFull => write!(f, "ApiError::MessageTopicFull")?, + ApiError::MessageTooLarge => write!(f, "ApiError::MessageTooLarge")?, + ApiError::MaxMessagesPerBlockExceeded => { + write!(f, "ApiError::MaxMessagesPerBlockExceeded")? + } + ApiError::NotAllowedToAddContractVersion => { + write!(f, "ApiError::NotAllowedToAddContractVersion")? + } + ApiError::InvalidDelegationAmountLimits => { + write!(f, "ApiError::InvalidDelegationAmountLimits")? + } + ApiError::InvalidCallerInfoRequest => write!(f, "ApiError::InvalidCallerInfoRequest")?, + ApiError::ExceededRecursionDepth => write!(f, "ApiError::ExceededRecursionDepth")?, + ApiError::AuctionError(value) => write!( + f, + "ApiError::AuctionError({:?})", + auction::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::ContractHeader(value) => write!( + f, + "ApiError::ContractHeader({:?})", + addressable_entity::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::Mint(value) => write!( + f, + "ApiError::Mint({:?})", + mint::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, + ApiError::HandlePayment(value) => write!( + f, + "ApiError::HandlePayment({:?})", + handle_payment::Error::try_from(*value).map_err(|_err| fmt::Error)? + )?, ApiError::User(value) => write!(f, "ApiError::User({})", value)?, } write!(f, " [{}]", u32::from(*self)) @@ -800,7 +804,7 @@ impl fmt::Display for ApiError { ApiError::ContractHeader(value) => write!(f, "Contract header error: {}", value), ApiError::Mint(value) => write!(f, "Mint error: {}", value), ApiError::HandlePayment(value) => write!(f, "Handle Payment error: {}", value), - _ => ::fmt(&self, f), + _ => ::fmt(self, f), } } } @@ -832,8 +836,6 @@ pub fn result_from(value: i32) -> Result<(), ApiError> { #[cfg(test)] mod tests { - use std::{i32, u16, u8}; - use super::*; fn round_trip(result: Result<(), ApiError>) { @@ -852,13 +854,19 @@ mod tests { } #[test] - fn error_descriptions() { + fn error_descriptions_getkey() { assert_eq!("ApiError::GetKey [8]", &format!("{:?}", ApiError::GetKey)); assert_eq!("ApiError::GetKey [8]", &format!("{}", ApiError::GetKey)); + } + #[test] + fn error_descriptions_contract_header() { assert_eq!( - "ApiError::ContractHeader(0) [64768]", - &format!("{:?}", ApiError::ContractHeader(0)) + "ApiError::ContractHeader(PreviouslyUsedVersion) [64769]", + &format!( + "{:?}", + ApiError::ContractHeader(addressable_entity::Error::PreviouslyUsedVersion as u8) + ) ); assert_eq!( "Contract header error: 0", @@ -868,29 +876,47 @@ mod tests { "Contract header error: 255", &format!("{}", ApiError::ContractHeader(u8::MAX)) ); + } + #[test] + fn error_descriptions_mint() { assert_eq!( - "ApiError::Mint(0) [65024]", + "ApiError::Mint(InsufficientFunds) [65024]", &format!("{:?}", ApiError::Mint(0)) ); assert_eq!("Mint error: 0", &format!("{}", ApiError::Mint(0))); assert_eq!("Mint error: 255", &format!("{}", ApiError::Mint(u8::MAX))); + } + + #[test] + fn error_descriptions_handle_payment() { assert_eq!( - "ApiError::HandlePayment(0) [65280]", - &format!("{:?}", ApiError::HandlePayment(0)) + "ApiError::HandlePayment(NotBonded) [65280]", + &format!( + "{:?}", + ApiError::HandlePayment(handle_payment::Error::NotBonded as u8) + ) ); + } + + #[test] + fn error_descriptions_handle_payment_display() { assert_eq!( "Handle Payment error: 0", - &format!("{}", ApiError::HandlePayment(0)) - ); - assert_eq!( - "ApiError::HandlePayment(255) [65535]", - &format!("{:?}", ApiError::HandlePayment(u8::MAX)) + &format!( + "{}", + ApiError::HandlePayment(handle_payment::Error::NotBonded as u8) + ) ); + } + + #[test] + fn error_descriptions_user_errors() { assert_eq!( "ApiError::User(0) [65536]", &format!("{:?}", ApiError::User(0)) ); + assert_eq!("User error: 0", &format!("{}", ApiError::User(0))); assert_eq!( "ApiError::User(65535) [131071]", @@ -951,6 +977,7 @@ mod tests { round_trip(Err(ApiError::HostBufferEmpty)); round_trip(Err(ApiError::HostBufferFull)); round_trip(Err(ApiError::AllocLayout)); + round_trip(Err(ApiError::NonRepresentableSerialization)); round_trip(Err(ApiError::ContractHeader(0))); round_trip(Err(ApiError::ContractHeader(u8::MAX))); round_trip(Err(ApiError::Mint(0))); @@ -961,5 +988,13 @@ mod tests { round_trip(Err(ApiError::User(u16::MAX))); round_trip(Err(ApiError::AuctionError(0))); round_trip(Err(ApiError::AuctionError(u8::MAX))); + round_trip(Err(ApiError::MessageTopicAlreadyRegistered)); + round_trip(Err(ApiError::MaxTopicsNumberExceeded)); + round_trip(Err(ApiError::MaxTopicNameSizeExceeded)); + round_trip(Err(ApiError::MessageTopicNotRegistered)); + round_trip(Err(ApiError::MessageTopicFull)); + round_trip(Err(ApiError::MessageTooLarge)); + round_trip(Err(ApiError::NotAllowedToAddContractVersion)); + round_trip(Err(ApiError::InvalidDelegationAmountLimits)); } } diff --git a/types/src/auction_state.rs b/types/src/auction_state.rs new file mode 100644 index 0000000000..7c13c63554 --- /dev/null +++ b/types/src/auction_state.rs @@ -0,0 +1,224 @@ +#![allow(deprecated)] + +use alloc::{ + collections::{btree_map::Entry, BTreeMap}, + vec::Vec, +}; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + system::auction::{ + Bid, BidKind, DelegatorBid, DelegatorKind, EraValidators, Staking, ValidatorBid, + }, + Digest, EraId, PublicKey, U512, +}; + +#[cfg(feature = "json-schema")] +static ERA_VALIDATORS: Lazy = Lazy::new(|| { + use crate::SecretKey; + + let secret_key_1 = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + + let mut validator_weights = BTreeMap::new(); + validator_weights.insert(public_key_1, U512::from(10)); + + let mut era_validators = BTreeMap::new(); + era_validators.insert(EraId::from(10u64), validator_weights); + + era_validators +}); + +#[cfg(feature = "json-schema")] +static AUCTION_INFO: Lazy = Lazy::new(|| { + use crate::{system::auction::DelegationRate, AccessRights, SecretKey, URef}; + use num_traits::Zero; + + let state_root_hash = Digest::from([11; Digest::LENGTH]); + let validator_secret_key = + SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + let validator_public_key = PublicKey::from(&validator_secret_key); + + let mut bids = vec![]; + let validator_bid = ValidatorBid::unlocked( + validator_public_key.clone(), + URef::new([250; 32], AccessRights::READ_ADD_WRITE), + U512::from(20), + DelegationRate::zero(), + 0, + u64::MAX, + 0, + ); + bids.push(BidKind::Validator(Box::new(validator_bid))); + + let delegator_secret_key = + SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + let delegator_bid = DelegatorBid::unlocked( + delegator_public_key.into(), + U512::from(10), + URef::new([251; 32], AccessRights::READ_ADD_WRITE), + validator_public_key, + ); + bids.push(BidKind::Delegator(Box::new(delegator_bid))); + + let height: u64 = 10; + let era_validators = ERA_VALIDATORS.clone(); + AuctionState::new(state_root_hash, height, era_validators, bids) +}); + +/// A validator's weight. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[deprecated(since = "5.0.0")] +pub struct JsonValidatorWeights { + public_key: PublicKey, + weight: U512, +} + +/// The validators for the given era. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[deprecated(since = "5.0.0")] +pub struct JsonEraValidators { + era_id: EraId, + validator_weights: Vec, +} + +/// Data structure summarizing auction contract data. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[deprecated(since = "5.0.0")] +pub struct AuctionState { + /// Global state hash. + pub state_root_hash: Digest, + /// Block height. + pub block_height: u64, + /// Era validators. + pub era_validators: Vec, + /// All bids. + #[serde(with = "BTreeMapToArray::")] + bids: BTreeMap, +} + +impl AuctionState { + /// Create new instance of `AuctionState` + /// this logic will retrofit new data into old structure if applicable (it's a lossy + /// conversion). + pub fn new( + state_root_hash: Digest, + block_height: u64, + era_validators: EraValidators, + bids: Vec, + ) -> Self { + let mut json_era_validators: Vec = Vec::new(); + for (era_id, validator_weights) in era_validators.iter() { + let mut json_validator_weights: Vec = Vec::new(); + for (public_key, weight) in validator_weights.iter() { + json_validator_weights.push(JsonValidatorWeights { + public_key: public_key.clone(), + weight: *weight, + }); + } + json_era_validators.push(JsonEraValidators { + era_id: *era_id, + validator_weights: json_validator_weights, + }); + } + + let staking = { + let mut staking: Staking = BTreeMap::new(); + for bid_kind in bids.iter().filter(|x| x.is_unified()) { + if let BidKind::Unified(bid) = bid_kind { + let public_key = bid.validator_public_key().clone(); + let validator_bid = ValidatorBid::unlocked( + bid.validator_public_key().clone(), + *bid.bonding_purse(), + *bid.staked_amount(), + *bid.delegation_rate(), + 0, + u64::MAX, + 0, + ); + let mut delegators: BTreeMap = BTreeMap::new(); + for (delegator_public_key, delegator) in bid.delegators() { + delegators.insert( + DelegatorKind::PublicKey(delegator_public_key.clone()), + DelegatorBid::from(delegator.clone()), + ); + } + staking.insert(public_key, (validator_bid, delegators)); + } + } + + for bid_kind in bids.iter().filter(|x| x.is_validator()) { + if let BidKind::Validator(validator_bid) = bid_kind { + let public_key = validator_bid.validator_public_key().clone(); + staking.insert(public_key, (*validator_bid.clone(), BTreeMap::new())); + } + } + + for bid_kind in bids.iter().filter(|x| x.is_delegator()) { + if let BidKind::Delegator(delegator_bid) = bid_kind { + let validator_public_key = delegator_bid.validator_public_key().clone(); + if let Entry::Occupied(mut occupant) = + staking.entry(validator_public_key.clone()) + { + let (_, delegators) = occupant.get_mut(); + delegators.insert( + delegator_bid.delegator_kind().clone(), + *delegator_bid.clone(), + ); + } + } + } + staking + }; + + let mut bids: BTreeMap = BTreeMap::new(); + for (public_key, (validator_bid, delegators)) in staking { + let bid = Bid::from_non_unified(validator_bid, delegators); + bids.insert(public_key, bid); + } + + AuctionState { + state_root_hash, + block_height, + era_validators: json_era_validators, + bids, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &AUCTION_INFO + } +} + +struct BidLabels; + +impl KeyValueLabels for BidLabels { + const KEY: &'static str = "public_key"; + const VALUE: &'static str = "bid"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for BidLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("PublicKeyAndBid"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = + Some("A bid associated with the given public key."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The public key of the bidder."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The bid details."); +} diff --git a/types/src/block.rs b/types/src/block.rs new file mode 100644 index 0000000000..833d7a9ac0 --- /dev/null +++ b/types/src/block.rs @@ -0,0 +1,618 @@ +mod available_block_range; +mod block_body; +mod block_global; +mod block_hash; +mod block_hash_and_height; +mod block_header; +mod block_header_with_signatures; +mod block_identifier; +mod block_signatures; +mod block_sync_status; +mod block_v1; +mod block_v2; +mod block_with_signatures; +mod chain_name_digest; +mod era_end; +mod finality_signature; +mod finality_signature_id; +mod json_compatibility; +mod rewarded_signatures; +mod rewards; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +mod test_block_builder; + +use alloc::{boxed::Box, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +use itertools::Either; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "std")] +use num_rational::Ratio; + +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +#[cfg(feature = "std")] +use crate::TransactionConfig; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + transaction::TransactionHash, + Digest, EraId, ProtocolVersion, PublicKey, Timestamp, +}; +pub use available_block_range::AvailableBlockRange; +pub use block_body::{BlockBody, BlockBodyV1, BlockBodyV2}; +pub use block_global::{BlockGlobalAddr, BlockGlobalAddrTag}; +pub use block_hash::BlockHash; +pub use block_hash_and_height::BlockHashAndHeight; +pub use block_header::{BlockHeader, BlockHeaderV1, BlockHeaderV2}; +pub use block_header_with_signatures::{ + BlockHeaderWithSignatures, BlockHeaderWithSignaturesValidationError, +}; +pub use block_identifier::BlockIdentifier; +pub use block_signatures::{ + BlockSignatures, BlockSignaturesMergeError, BlockSignaturesV1, BlockSignaturesV2, +}; +pub use block_sync_status::{BlockSyncStatus, BlockSynchronizerStatus}; +pub use block_v1::BlockV1; +pub use block_v2::BlockV2; +pub use block_with_signatures::BlockWithSignatures; +pub use chain_name_digest::ChainNameDigest; +pub use era_end::{EraEnd, EraEndV1, EraEndV2, EraReport}; +pub use finality_signature::{FinalitySignature, FinalitySignatureV1, FinalitySignatureV2}; +pub use finality_signature_id::FinalitySignatureId; +#[cfg(all(feature = "std", feature = "json-schema"))] +pub use json_compatibility::JsonBlockWithSignatures; +pub use rewarded_signatures::{RewardedSignatures, SingleBlockRewardedSignatures}; +pub use rewards::Rewards; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +pub use test_block_builder::{TestBlockBuilder, TestBlockV1Builder}; + +#[cfg(feature = "json-schema")] +static BLOCK: Lazy = Lazy::new(|| BlockV2::example().into()); + +/// An error that can arise when validating a block's cryptographic integrity using its hashes. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(any(feature = "std", test), derive(serde::Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum BlockValidationError { + /// Problem serializing some of a block's data into bytes. + Bytesrepr(bytesrepr::Error), + /// The provided block's hash is not the same as the actual hash of the block. + UnexpectedBlockHash { + /// The block with the incorrect block hash. + block: Box, + /// The actual hash of the block. + actual_block_hash: BlockHash, + }, + /// The body hash in the header is not the same as the actual hash of the body of the block. + UnexpectedBodyHash { + /// The block with the header containing the incorrect block body hash. + block: Box, + /// The actual hash of the block's body. + actual_block_body_hash: Digest, + }, + /// The header version does not match the body version. + IncompatibleVersions, +} + +impl Display for BlockValidationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockValidationError::Bytesrepr(error) => { + write!(formatter, "error validating block: {}", error) + } + BlockValidationError::UnexpectedBlockHash { + block, + actual_block_hash, + } => { + write!( + formatter, + "block has incorrect block hash - actual block hash: {:?}, block: {:?}", + actual_block_hash, block + ) + } + BlockValidationError::UnexpectedBodyHash { + block, + actual_block_body_hash, + } => { + write!( + formatter, + "block header has incorrect body hash - actual body hash: {:?}, block: {:?}", + actual_block_body_hash, block + ) + } + BlockValidationError::IncompatibleVersions => { + write!(formatter, "block body and header versions do not match") + } + } + } +} + +impl From for BlockValidationError { + fn from(error: bytesrepr::Error) -> Self { + BlockValidationError::Bytesrepr(error) + } +} + +#[cfg(feature = "std")] +impl StdError for BlockValidationError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + BlockValidationError::Bytesrepr(error) => Some(error), + BlockValidationError::UnexpectedBlockHash { .. } + | BlockValidationError::UnexpectedBodyHash { .. } + | BlockValidationError::IncompatibleVersions => None, + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum BlockConversionError { + DifferentVersion { expected_version: u8 }, +} + +#[cfg(feature = "std")] +impl Display for BlockConversionError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + BlockConversionError::DifferentVersion { expected_version } => { + write!( + f, + "Could not convert a block to the expected version {}", + expected_version + ) + } + } + } +} + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block body v1. +const BLOCK_V1_TAG: u8 = 0; +/// Tag for block body v2. +const BLOCK_V2_TAG: u8 = 1; + +/// A block after execution. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + any(feature = "std", feature = "json-schema", test), + derive(serde::Serialize, serde::Deserialize) +)] +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum Block { + /// The legacy, initial version of the block. + #[cfg_attr( + any(feature = "std", feature = "json-schema", test), + serde(rename = "Version1") + )] + V1(BlockV1), + /// The version 2 of the block. + #[cfg_attr( + any(feature = "std", feature = "json-schema", test), + serde(rename = "Version2") + )] + V2(BlockV2), +} + +impl Block { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new_from_header_and_body( + block_header: BlockHeader, + block_body: BlockBody, + ) -> Result> { + let hash = block_header.block_hash(); + let block = match (block_body, block_header) { + (BlockBody::V1(body), BlockHeader::V1(header)) => { + Ok(Block::V1(BlockV1 { hash, header, body })) + } + (BlockBody::V2(body), BlockHeader::V2(header)) => { + Ok(Block::V2(BlockV2 { hash, header, body })) + } + _ => Err(BlockValidationError::IncompatibleVersions), + }?; + + block.verify()?; + Ok(block) + } + + /// Clones the header, put it in the versioning enum, and returns it. + pub fn clone_header(&self) -> BlockHeader { + match self { + Block::V1(v1) => BlockHeader::V1(v1.header().clone()), + Block::V2(v2) => BlockHeader::V2(v2.header().clone()), + } + } + + /// Returns the block's header, consuming `self`. + pub fn take_header(self) -> BlockHeader { + match self { + Block::V1(v1) => BlockHeader::V1(v1.take_header()), + Block::V2(v2) => BlockHeader::V2(v2.take_header()), + } + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + match self { + Block::V1(v1) => v1.header.timestamp(), + Block::V2(v2) => v2.header.timestamp(), + } + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + match self { + Block::V1(v1) => v1.header.protocol_version(), + Block::V2(v2) => v2.header.protocol_version(), + } + } + + /// The hash of this block's header. + pub fn hash(&self) -> &BlockHash { + match self { + Block::V1(v1) => v1.hash(), + Block::V2(v2) => v2.hash(), + } + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + match self { + Block::V1(v1) => v1.header().body_hash(), + Block::V2(v2) => v2.header().body_hash(), + } + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + match self { + Block::V1(v1) => v1.header().random_bit(), + Block::V2(v2) => v2.header().random_bit(), + } + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + match self { + Block::V1(v1) => v1.accumulated_seed(), + Block::V2(v2) => v2.accumulated_seed(), + } + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + match self { + Block::V1(v1) => v1.parent_hash(), + Block::V2(v2) => v2.parent_hash(), + } + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + match self { + Block::V1(v1) => v1.proposer(), + Block::V2(v2) => v2.proposer(), + } + } + + /// Clone the body and wrap is up in the versioned `Body`. + pub fn clone_body(&self) -> BlockBody { + match self { + Block::V1(v1) => BlockBody::V1(v1.body().clone()), + Block::V2(v2) => BlockBody::V2(v2.body().clone()), + } + } + + /// Returns the block's body, consuming `self`. + pub fn take_body(self) -> BlockBody { + match self { + Block::V1(v1) => BlockBody::V1(v1.take_body()), + Block::V2(v2) => BlockBody::V2(v2.take_body()), + } + } + + /// Check the integrity of a block by hashing its body and header + pub fn verify(&self) -> Result<(), BlockValidationError> { + match self { + Block::V1(v1) => v1.verify(), + Block::V2(v2) => v2.verify(), + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + match self { + Block::V1(v1) => v1.header.height(), + Block::V2(v2) => v2.header.height(), + } + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + match self { + Block::V1(v1) => v1.era_id(), + Block::V2(v2) => v2.era_id(), + } + } + + /// Clones the era end, put it in the versioning enum, and returns it. + pub fn clone_era_end(&self) -> Option { + match self { + Block::V1(v1) => v1.header().era_end().cloned().map(EraEnd::V1), + Block::V2(v2) => v2.header().era_end().cloned().map(EraEnd::V2), + } + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + match self { + Block::V1(v1) => v1.header.is_switch_block(), + Block::V2(v2) => v2.header.is_switch_block(), + } + } + + /// Returns `true` if this block is the first block of the chain, the genesis block. + pub fn is_genesis(&self) -> bool { + match self { + Block::V1(v1) => v1.header.is_genesis(), + Block::V2(v2) => v2.header.is_genesis(), + } + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + match self { + Block::V1(v1) => v1.header.state_root_hash(), + Block::V2(v2) => v2.header.state_root_hash(), + } + } + + /// List of identifiers for finality signatures for a particular past block. + pub fn rewarded_signatures(&self) -> &RewardedSignatures { + match self { + Block::V1(_v1) => &rewarded_signatures::EMPTY, + Block::V2(v2) => v2.body.rewarded_signatures(), + } + } + + /// Return the gas price for V2 block header. + pub fn maybe_current_gas_price(&self) -> Option { + match self { + Block::V1(_) => None, + Block::V2(v2) => Some(v2.header().current_gas_price()), + } + } + + /// Returns the count of transactions within a block. + pub fn transaction_count(&self) -> u64 { + match self { + Block::V1(block) => { + (block.body.deploy_hashes().len() + block.body.transfer_hashes().len()) as u64 + } + Block::V2(block_v2) => block_v2.all_transactions().count() as u64, + } + } + + /// Returns a list of all transaction hashes in a block. + pub fn all_transaction_hashes(&self) -> impl Iterator + '_ { + match self { + Block::V1(block) => Either::Left( + block + .body + .deploy_and_transfer_hashes() + .map(TransactionHash::from), + ), + Block::V2(block_v2) => Either::Right(block_v2.all_transactions().copied()), + } + } + + /// Returns the utilization of the block against a given chainspec. + #[cfg(feature = "std")] + pub fn block_utilization(&self, transaction_config: TransactionConfig) -> u64 { + match self { + Block::V1(_) => { + // We shouldnt be tracking this for legacy blocks + 0 + } + Block::V2(block_v2) => { + let has_hit_slot_limt = self.has_hit_slot_capacity(transaction_config.clone()); + let per_block_capacity = transaction_config + .transaction_v1_config + .get_max_block_count(); + + if has_hit_slot_limt { + 100u64 + } else { + let num = block_v2.all_transactions().count() as u64; + Ratio::new(num * 100, per_block_capacity).to_integer() + } + } + } + } + + /// Returns true if the block has reached capacity in any of its transaction limit. + #[cfg(feature = "std")] + pub fn has_hit_slot_capacity(&self, transaction_config: TransactionConfig) -> bool { + match self { + Block::V1(_) => false, + Block::V2(block_v2) => { + let mint_count = block_v2.mint().count(); + if mint_count as u64 + >= transaction_config + .transaction_v1_config + .native_mint_lane + .max_transaction_count() + { + return true; + } + + let auction_count = block_v2.auction().count(); + if auction_count as u64 + >= transaction_config + .transaction_v1_config + .native_auction_lane + .max_transaction_count() + { + return true; + } + + let install_upgrade_count = block_v2.install_upgrade().count(); + if install_upgrade_count as u64 + >= transaction_config + .transaction_v1_config + .install_upgrade_lane + .max_transaction_count() + { + return true; + } + + for (lane_id, transactions) in block_v2.body.transactions() { + let transaction_count = transactions.len(); + if *lane_id < 2 { + continue; + }; + let max_transaction_count = transaction_config + .transaction_v1_config + .get_max_transaction_count(*lane_id); + + if transaction_count as u64 >= max_transaction_count { + return true; + } + } + false + } + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK + } +} + +impl Display for Block { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \ + {}, random bit {}, protocol version: {}", + self.height(), + self.hash(), + self.timestamp(), + self.era_id(), + self.parent_hash().inner(), + self.state_root_hash(), + self.body_hash(), + self.random_bit(), + self.protocol_version() + )?; + if let Some(era_end) = self.clone_era_end() { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for Block { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + Block::V1(v1) => { + buffer.insert(0, BLOCK_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + Block::V2(v2) => { + buffer.insert(0, BLOCK_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + Block::V1(v1) => v1.serialized_length(), + Block::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for Block { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_V1_TAG => { + let (body, remainder): (BlockV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(body), remainder)) + } + BLOCK_V2_TAG => { + let (body, remainder): (BlockV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(body), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl From<&BlockV2> for Block { + fn from(block: &BlockV2) -> Self { + Block::V2(block.clone()) + } +} + +impl From for Block { + fn from(block: BlockV2) -> Self { + Block::V2(block) + } +} + +impl From<&BlockV1> for Block { + fn from(block: &BlockV1) -> Self { + Block::V1(block.clone()) + } +} + +impl From for Block { + fn from(block: BlockV1) -> Self { + Block::V1(block) + } +} + +#[cfg(all(feature = "std", feature = "json-schema"))] +impl From for Block { + fn from(block_with_signatures: JsonBlockWithSignatures) -> Self { + block_with_signatures.block + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng}; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let block_v1 = TestBlockV1Builder::new().build(rng); + let block = Block::V1(block_v1); + bytesrepr::test_serialization_roundtrip(&block); + + let block_v2 = TestBlockBuilder::new().build(rng); + let block = Block::V2(block_v2); + bytesrepr::test_serialization_roundtrip(&block); + } +} diff --git a/types/src/block/available_block_range.rs b/types/src/block/available_block_range.rs new file mode 100644 index 0000000000..5022d366c0 --- /dev/null +++ b/types/src/block/available_block_range.rs @@ -0,0 +1,110 @@ +use core::fmt::{self, Display, Formatter}; + +use alloc::vec::Vec; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +/// An unbroken, inclusive range of blocks. +#[derive(Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct AvailableBlockRange { + /// The inclusive lower bound of the range. + low: u64, + /// The inclusive upper bound of the range. + high: u64, +} + +impl AvailableBlockRange { + /// An `AvailableRange` of [0, 0]. + pub const RANGE_0_0: AvailableBlockRange = AvailableBlockRange { low: 0, high: 0 }; + + /// Constructs a new `AvailableBlockRange` with the given limits. + pub fn new(low: u64, high: u64) -> Self { + assert!( + low <= high, + "cannot construct available block range with low > high" + ); + AvailableBlockRange { low, high } + } + + /// Returns `true` if `height` is within the range. + pub fn contains(&self, height: u64) -> bool { + height >= self.low && height <= self.high + } + + /// Returns the low value. + pub fn low(&self) -> u64 { + self.low + } + + /// Returns the high value. + pub fn high(&self) -> u64 { + self.high + } + + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let low = rng.gen::() as u64; + let high = low + rng.gen::() as u64; + Self { low, high } + } +} + +impl Display for AvailableBlockRange { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "available block range [{}, {}]", + self.low, self.high + ) + } +} + +impl ToBytes for AvailableBlockRange { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.low.write_bytes(writer)?; + self.high.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.low.serialized_length() + self.high.serialized_length() + } +} + +impl FromBytes for AvailableBlockRange { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (low, remainder) = u64::from_bytes(bytes)?; + let (high, remainder) = u64::from_bytes(remainder)?; + Ok((AvailableBlockRange { low, high }, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = AvailableBlockRange::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/block/block_body.rs b/types/src/block/block_body.rs new file mode 100644 index 0000000000..9e7cd6b741 --- /dev/null +++ b/types/src/block/block_body.rs @@ -0,0 +1,116 @@ +mod block_body_v1; +mod block_body_v2; + +pub use block_body_v1::BlockBodyV1; +pub use block_body_v2::BlockBodyV2; + +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block body v1. +pub const BLOCK_BODY_V1_TAG: u8 = 0; +/// Tag for block body v2. +pub const BLOCK_BODY_V2_TAG: u8 = 1; + +/// The versioned body portion of a block. It encapsulates different variants of the BlockBody +/// struct. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "testing", test), derive(PartialEq))] +#[derive(Clone, Serialize, Deserialize, Debug)] +#[allow(clippy::large_enum_variant)] +pub enum BlockBody { + /// The legacy, initial version of the body portion of a block. + #[serde(rename = "Version1")] + V1(BlockBodyV1), + /// The version 2 of the body portion of a block, which includes the + /// `past_finality_signatures`. + #[serde(rename = "Version2")] + V2(BlockBodyV2), +} + +impl Display for BlockBody { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockBody::V1(v1) => Display::fmt(&v1, formatter), + BlockBody::V2(v2) => Display::fmt(&v2, formatter), + } + } +} + +impl From for BlockBody { + fn from(body: BlockBodyV1) -> Self { + BlockBody::V1(body) + } +} + +impl From<&BlockBodyV2> for BlockBody { + fn from(body: &BlockBodyV2) -> Self { + BlockBody::V2(body.clone()) + } +} + +impl ToBytes for BlockBody { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + BlockBody::V1(v1) => { + buffer.insert(0, BLOCK_BODY_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + BlockBody::V2(v2) => { + buffer.insert(0, BLOCK_BODY_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + BlockBody::V1(v1) => v1.serialized_length(), + BlockBody::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for BlockBody { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_BODY_V1_TAG => { + let (body, remainder): (BlockBodyV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(body), remainder)) + } + BLOCK_BODY_V2_TAG => { + let (body, remainder): (BlockBodyV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(body), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng, TestBlockBuilder, TestBlockV1Builder}; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let block_body_v1 = TestBlockV1Builder::new().build_versioned(rng).clone_body(); + bytesrepr::test_serialization_roundtrip(&block_body_v1); + + let block_body_v2 = TestBlockBuilder::new().build_versioned(rng).clone_body(); + bytesrepr::test_serialization_roundtrip(&block_body_v2); + } +} diff --git a/types/src/block/block_body/block_body_v1.rs b/types/src/block/block_body/block_body_v1.rs new file mode 100644 index 0000000000..e32ab4b97c --- /dev/null +++ b/types/src/block/block_body/block_body_v1.rs @@ -0,0 +1,160 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + DeployHash, Digest, PublicKey, +}; + +/// The body portion of a block. Version 1. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockBodyV1 { + /// The public key of the validator which proposed the block. + pub(super) proposer: PublicKey, + /// The deploy hashes of the non-transfer deploys within the block. + pub(super) deploy_hashes: Vec, + /// The deploy hashes of the transfers within the block. + pub(super) transfer_hashes: Vec, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) hash: OnceCell, +} + +impl BlockBodyV1 { + /// Constructs a new `BlockBody`. + pub(crate) fn new( + proposer: PublicKey, + deploy_hashes: Vec, + transfer_hashes: Vec, + ) -> Self { + BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + } + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + &self.proposer + } + + /// Returns the deploy hashes of the non-transfer deploys within the block. + pub fn deploy_hashes(&self) -> &[DeployHash] { + &self.deploy_hashes + } + + /// Returns the deploy hashes of the transfers within the block. + pub fn transfer_hashes(&self) -> &[DeployHash] { + &self.transfer_hashes + } + + /// Returns the deploy and transfer hashes in the order in which they were executed. + pub fn deploy_and_transfer_hashes(&self) -> impl Iterator { + self.deploy_hashes() + .iter() + .chain(self.transfer_hashes().iter()) + } + + /// Returns the body hash, i.e. the hash of the body's serialized bytes. + pub fn hash(&self) -> Digest { + #[cfg(any(feature = "once_cell", test))] + return *self.hash.get_or_init(|| self.compute_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_hash() + } + + fn compute_hash(&self) -> Digest { + let serialized_body = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block body: {}", error)); + Digest::hash(serialized_body) + } +} + +impl PartialEq for BlockBodyV1 { + fn eq(&self, other: &BlockBodyV1) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + } = self; + *proposer == other.proposer + && *deploy_hashes == other.deploy_hashes + && *transfer_hashes == other.transfer_hashes + } +} + +impl Display for BlockBodyV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block body proposed by {}, {} deploys, {} transfers", + self.proposer, + self.deploy_hashes.len(), + self.transfer_hashes.len() + ) + } +} + +impl ToBytes for BlockBodyV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.proposer.write_bytes(writer)?; + self.deploy_hashes.write_bytes(writer)?; + self.transfer_hashes.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.proposer.serialized_length() + + self.deploy_hashes.serialized_length() + + self.transfer_hashes.serialized_length() + } +} + +impl FromBytes for BlockBodyV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (proposer, bytes) = PublicKey::from_bytes(bytes)?; + let (deploy_hashes, bytes) = Vec::::from_bytes(bytes)?; + let (transfer_hashes, bytes) = Vec::::from_bytes(bytes)?; + let body = BlockBodyV1 { + proposer, + deploy_hashes, + transfer_hashes, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + }; + Ok((body, bytes)) + } +} diff --git a/types/src/block/block_body/block_body_v2.rs b/types/src/block/block_body/block_body_v2.rs new file mode 100644 index 0000000000..8aae7f75d9 --- /dev/null +++ b/types/src/block/block_body/block_body_v2.rs @@ -0,0 +1,174 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + block::RewardedSignatures, + bytesrepr::{self, FromBytes, ToBytes}, + Digest, TransactionHash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, LARGE_WASM_LANE_ID, + MEDIUM_WASM_LANE_ID, MINT_LANE_ID, SMALL_WASM_LANE_ID, +}; + +/// The body portion of a block. Version 2. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockBodyV2 { + /// Map of transactions mapping categories to a list of transaction hashes. + pub(super) transactions: BTreeMap>, + /// List of identifiers for finality signatures for a particular past block. + pub(super) rewarded_signatures: RewardedSignatures, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) hash: OnceCell, +} + +impl BlockBodyV2 { + /// Constructs a new `BlockBodyV2`. + pub(crate) fn new( + transactions: BTreeMap>, + rewarded_signatures: RewardedSignatures, + ) -> Self { + BlockBodyV2 { + transactions, + rewarded_signatures, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + } + } + + /// Returns the hashes of the transactions within the block filtered by lane_id. + pub fn transaction_by_lane(&self, lane_id: u8) -> impl Iterator { + match self.transactions.get(&lane_id) { + Some(transactions) => transactions.to_vec(), + None => vec![], + } + .into_iter() + } + + /// Returns the hashes of the mint transactions within the block. + pub fn mint(&self) -> impl Iterator { + self.transaction_by_lane(MINT_LANE_ID) + } + + /// Returns the hashes of the auction transactions within the block. + pub fn auction(&self) -> impl Iterator { + self.transaction_by_lane(AUCTION_LANE_ID) + } + + /// Returns the hashes of the installer/upgrader transactions within the block. + pub fn install_upgrade(&self) -> impl Iterator { + self.transaction_by_lane(INSTALL_UPGRADE_LANE_ID) + } + + /// Returns the hashes of the transactions filtered by lane id within the block. + pub fn transactions_by_lane_id(&self, lane_id: u8) -> impl Iterator { + self.transaction_by_lane(lane_id) + } + + /// Returns a reference to the collection of mapped transactions. + pub fn transactions(&self) -> &BTreeMap> { + &self.transactions + } + + /// Returns all of the transaction hashes in the order in which they were executed. + pub fn all_transactions(&self) -> impl Iterator { + self.transactions.values().flatten() + } + + /// Returns the body hash, i.e. the hash of the body's serialized bytes. + pub fn hash(&self) -> Digest { + #[cfg(any(feature = "once_cell", test))] + return *self.hash.get_or_init(|| self.compute_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_hash() + } + + fn compute_hash(&self) -> Digest { + let serialized_body = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block body: {}", error)); + Digest::hash(serialized_body) + } + + /// Return the list of identifiers for finality signatures for a particular past block. + pub fn rewarded_signatures(&self) -> &RewardedSignatures { + &self.rewarded_signatures + } +} + +impl PartialEq for BlockBodyV2 { + fn eq(&self, other: &BlockBodyV2) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockBodyV2 { + transactions, + rewarded_signatures, + hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockBodyV2 { + transactions, + rewarded_signatures, + } = self; + *transactions == other.transactions && *rewarded_signatures == other.rewarded_signatures + } +} + +impl Display for BlockBodyV2 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block body, {} mint, {} auction, {} install_upgrade, {} large wasm, {} medium wasm, {} small wasm", + self.mint().count(), + self.auction().count(), + self.install_upgrade().count(), + self.transaction_by_lane(LARGE_WASM_LANE_ID).count(), + self.transaction_by_lane(MEDIUM_WASM_LANE_ID).count(), + self.transaction_by_lane(SMALL_WASM_LANE_ID).count(), + ) + } +} + +impl ToBytes for BlockBodyV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.transactions.write_bytes(writer)?; + self.rewarded_signatures.write_bytes(writer)?; + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.transactions.serialized_length() + self.rewarded_signatures.serialized_length() + } +} + +impl FromBytes for BlockBodyV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transactions, bytes) = FromBytes::from_bytes(bytes)?; + let (rewarded_signatures, bytes) = RewardedSignatures::from_bytes(bytes)?; + let body = BlockBodyV2 { + transactions, + rewarded_signatures, + #[cfg(any(feature = "once_cell", test))] + hash: OnceCell::new(), + }; + Ok((body, bytes)) + } +} diff --git a/types/src/block/block_global.rs b/types/src/block/block_global.rs new file mode 100644 index 0000000000..739c16c2bb --- /dev/null +++ b/types/src/block/block_global.rs @@ -0,0 +1,306 @@ +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; + +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes}, + checksummed_hex, + key::FromStrError, + Key, +}; + +use core::{ + convert::TryFrom, + fmt::{Debug, Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::distributions::{Distribution, Standard}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const BLOCK_TIME_TAG: u8 = 0; +const MESSAGE_COUNT_TAG: u8 = 1; +const PROTOCOL_VERSION_TAG: u8 = 2; +const ADDRESSABLE_ENTITY_TAG: u8 = 3; + +/// Serialization tag for BlockGlobalAddr variants. +#[derive( + Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, +)] +#[repr(u8)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BlockGlobalAddrTag { + #[default] + /// Tag for block time variant. + BlockTime = BLOCK_TIME_TAG, + /// Tag for processing variant. + MessageCount = MESSAGE_COUNT_TAG, + /// Tag for protocol version variant. + ProtocolVersion = PROTOCOL_VERSION_TAG, + /// Tag for addressable entity variant. + AddressableEntity = ADDRESSABLE_ENTITY_TAG, +} + +impl BlockGlobalAddrTag { + /// The length in bytes of a [`BlockGlobalAddrTag`]. + pub const BLOCK_GLOBAL_ADDR_TAG_LENGTH: usize = 1; + + /// Attempts to map `BalanceHoldAddrTag` from a u8. + pub fn try_from_u8(value: u8) -> Option { + // TryFrom requires std, so doing this instead. + if value == BLOCK_TIME_TAG { + return Some(BlockGlobalAddrTag::BlockTime); + } + if value == MESSAGE_COUNT_TAG { + return Some(BlockGlobalAddrTag::MessageCount); + } + if value == PROTOCOL_VERSION_TAG { + return Some(BlockGlobalAddrTag::ProtocolVersion); + } + if value == ADDRESSABLE_ENTITY_TAG { + return Some(BlockGlobalAddrTag::AddressableEntity); + } + None + } +} + +impl Display for BlockGlobalAddrTag { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = match self { + BlockGlobalAddrTag::BlockTime => BLOCK_TIME_TAG, + BlockGlobalAddrTag::MessageCount => MESSAGE_COUNT_TAG, + BlockGlobalAddrTag::ProtocolVersion => PROTOCOL_VERSION_TAG, + BlockGlobalAddrTag::AddressableEntity => ADDRESSABLE_ENTITY_TAG, + }; + write!(f, "{}", base16::encode_lower(&[tag])) + } +} + +impl ToBytes for BlockGlobalAddrTag { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + Self::BLOCK_GLOBAL_ADDR_TAG_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for BlockGlobalAddrTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + if let Some((byte, rem)) = bytes.split_first() { + let tag = BlockGlobalAddrTag::try_from_u8(*byte).ok_or(bytesrepr::Error::Formatting)?; + Ok((tag, rem)) + } else { + Err(bytesrepr::Error::Formatting) + } + } +} + +/// Address for singleton values associated to specific block. These are values which are +/// calculated or set during the execution of a block such as the block timestamp, or the +/// total count of messages emitted during the execution of the block, and so on. +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BlockGlobalAddr { + /// Block time variant + #[default] + BlockTime, + /// Message count variant. + MessageCount, + /// Protocol version. + ProtocolVersion, + /// Addressable entity. + AddressableEntity, +} + +impl BlockGlobalAddr { + /// The length in bytes of a [`BlockGlobalAddr`]. + pub const BLOCK_GLOBAL_ADDR_LENGTH: usize = BlockGlobalAddrTag::BLOCK_GLOBAL_ADDR_TAG_LENGTH; + + /// How long is be the serialized value for this instance. + pub fn serialized_length(&self) -> usize { + Self::BLOCK_GLOBAL_ADDR_LENGTH + } + + /// Returns the tag of this instance. + pub fn tag(&self) -> BlockGlobalAddrTag { + match self { + BlockGlobalAddr::MessageCount => BlockGlobalAddrTag::MessageCount, + BlockGlobalAddr::BlockTime => BlockGlobalAddrTag::BlockTime, + BlockGlobalAddr::ProtocolVersion => BlockGlobalAddrTag::ProtocolVersion, + BlockGlobalAddr::AddressableEntity => BlockGlobalAddrTag::AddressableEntity, + } + } + + /// To formatted string. + pub fn to_formatted_string(self) -> String { + match self { + BlockGlobalAddr::BlockTime => base16::encode_lower(&BLOCK_TIME_TAG.to_le_bytes()), + BlockGlobalAddr::MessageCount => base16::encode_lower(&MESSAGE_COUNT_TAG.to_le_bytes()), + BlockGlobalAddr::ProtocolVersion => { + base16::encode_lower(&PROTOCOL_VERSION_TAG.to_le_bytes()) + } + BlockGlobalAddr::AddressableEntity => { + base16::encode_lower(&ADDRESSABLE_ENTITY_TAG.to_le_bytes()) + } + } + } + + /// From formatted string. + pub fn from_formatted_string(hex: &str) -> Result { + let bytes = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?; + if bytes.is_empty() { + return Err(FromStrError::BlockGlobal( + "bytes should not be 0 len".to_string(), + )); + } + let tag_bytes = <[u8; BlockGlobalAddrTag::BLOCK_GLOBAL_ADDR_TAG_LENGTH]>::try_from( + bytes[0..BlockGlobalAddrTag::BLOCK_GLOBAL_ADDR_TAG_LENGTH].as_ref(), + ) + .map_err(|err| FromStrError::BlockGlobal(err.to_string()))?; + let tag = ::from_le_bytes(tag_bytes); + let tag = BlockGlobalAddrTag::try_from_u8(tag).ok_or_else(|| { + FromStrError::BlockGlobal("failed to parse block global addr tag".to_string()) + })?; + + // if more tags are added, extend the below logic to handle every case. + match tag { + BlockGlobalAddrTag::BlockTime => Ok(BlockGlobalAddr::BlockTime), + BlockGlobalAddrTag::MessageCount => Ok(BlockGlobalAddr::MessageCount), + BlockGlobalAddrTag::ProtocolVersion => Ok(BlockGlobalAddr::ProtocolVersion), + BlockGlobalAddrTag::AddressableEntity => Ok(BlockGlobalAddr::AddressableEntity), + } + } +} + +impl ToBytes for BlockGlobalAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.push(self.tag() as u8); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.serialized_length() + } +} + +impl FromBytes for BlockGlobalAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == BlockGlobalAddrTag::BlockTime as u8 => { + Ok((BlockGlobalAddr::BlockTime, remainder)) + } + tag if tag == BlockGlobalAddrTag::MessageCount as u8 => { + Ok((BlockGlobalAddr::MessageCount, remainder)) + } + tag if tag == BlockGlobalAddrTag::ProtocolVersion as u8 => { + Ok((BlockGlobalAddr::ProtocolVersion, remainder)) + } + tag if tag == BlockGlobalAddrTag::AddressableEntity as u8 => { + Ok((BlockGlobalAddr::AddressableEntity, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl From for Key { + fn from(block_global_addr: BlockGlobalAddr) -> Self { + Key::BlockGlobal(block_global_addr) + } +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for BlockGlobalAddr { + type Error = (); + + fn try_from(value: Key) -> Result { + if let Key::BlockGlobal(block_global_addr) = value { + Ok(block_global_addr) + } else { + Err(()) + } + } +} + +impl Display for BlockGlobalAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = self.tag(); + write!(f, "{}", tag,) + } +} + +impl Debug for BlockGlobalAddr { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + match self { + BlockGlobalAddr::BlockTime => write!(f, "BlockTime",), + BlockGlobalAddr::MessageCount => write!(f, "MessageCount",), + BlockGlobalAddr::ProtocolVersion => write!(f, "ProtocolVersion"), + BlockGlobalAddr::AddressableEntity => write!(f, "AddressableEntity"), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BlockGlobalAddr { + match rng.gen_range(BLOCK_TIME_TAG..=ADDRESSABLE_ENTITY_TAG) { + BLOCK_TIME_TAG => BlockGlobalAddr::BlockTime, + MESSAGE_COUNT_TAG => BlockGlobalAddr::MessageCount, + PROTOCOL_VERSION_TAG => BlockGlobalAddr::ProtocolVersion, + ADDRESSABLE_ENTITY_TAG => BlockGlobalAddr::AddressableEntity, + _ => unreachable!(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{block::block_global::BlockGlobalAddr, bytesrepr}; + + #[test] + fn serialization_roundtrip() { + let addr = BlockGlobalAddr::BlockTime; + bytesrepr::test_serialization_roundtrip(&addr); + let addr = BlockGlobalAddr::MessageCount; + bytesrepr::test_serialization_roundtrip(&addr); + let addr = BlockGlobalAddr::ProtocolVersion; + bytesrepr::test_serialization_roundtrip(&addr); + let addr = BlockGlobalAddr::AddressableEntity; + bytesrepr::test_serialization_roundtrip(&addr); + } +} + +#[cfg(test)] +mod prop_test_gas { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_variant_gas(addr in gens::balance_hold_addr_arb()) { + bytesrepr::test_serialization_roundtrip(&addr); + } + } +} diff --git a/types/src/block/block_hash.rs b/types/src/block/block_hash.rs new file mode 100644 index 0000000000..4046169834 --- /dev/null +++ b/types/src/block/block_hash.rs @@ -0,0 +1,136 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Block; +#[cfg(doc)] +use super::BlockV2; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +#[cfg(feature = "json-schema")] +static BLOCK_HASH: Lazy = + Lazy::new(|| BlockHash::new(Digest::from([7; BlockHash::LENGTH]))); + +/// The cryptographic hash of a [`Block`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded cryptographic hash of a block.") +)] +#[serde(deny_unknown_fields)] +pub struct BlockHash(Digest); + +impl BlockHash { + /// The number of bytes in a `BlockHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `BlockHash`. + pub fn new(hash: Digest) -> Self { + BlockHash(hash) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Hexadecimal representation of the hash. + pub fn to_hex_string(&self) -> String { + base16::encode_lower(self.inner()) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_HASH + } + + /// Returns a new `DeployHash` directly initialized with the provided bytes; no hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + BlockHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `DeployHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Self::LENGTH]>().into(); + BlockHash(hash) + } +} + +impl From for BlockHash { + fn from(digest: Digest) -> Self { + Self(digest) + } +} + +impl From for Digest { + fn from(block_hash: BlockHash) -> Self { + block_hash.0 + } +} + +impl Display for BlockHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "block-hash({})", self.0) + } +} + +impl AsRef<[u8]> for BlockHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for BlockHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for BlockHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (BlockHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = BlockHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/block/block_hash_and_height.rs b/types/src/block/block_hash_and_height.rs new file mode 100644 index 0000000000..b9a48796f9 --- /dev/null +++ b/types/src/block/block_hash_and_height.rs @@ -0,0 +1,114 @@ +use core::fmt::{self, Display, Formatter}; + +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::BlockHash; +#[cfg(doc)] +use super::BlockV2; +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// The block hash and height of a given block. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockHashAndHeight { + /// The hash of the block. + block_hash: BlockHash, + /// The height of the block. + block_height: u64, +} + +impl BlockHashAndHeight { + /// Constructs a new `BlockHashAndHeight`. + pub fn new(block_hash: BlockHash, block_height: u64) -> Self { + Self { + block_hash, + block_height, + } + } + + /// Returns the hash of the block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the height of the block. + pub fn block_height(&self) -> u64 { + self.block_height + } + + /// Returns a random `BlockHashAndHeight`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Self { + block_hash: BlockHash::random(rng), + block_height: rng.gen(), + } + } +} + +impl Display for BlockHashAndHeight { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "{}, height {} ", + self.block_hash, self.block_height + ) + } +} + +impl ToBytes for BlockHashAndHeight { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.block_height.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + self.block_height.serialized_length() + } +} + +impl FromBytes for BlockHashAndHeight { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (block_height, remainder) = u64::from_bytes(remainder)?; + Ok(( + BlockHashAndHeight { + block_hash, + block_height, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BlockHashAndHeight::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/block/block_header.rs b/types/src/block/block_header.rs new file mode 100644 index 0000000000..098ec44fd0 --- /dev/null +++ b/types/src/block/block_header.rs @@ -0,0 +1,288 @@ +mod block_header_v1; +mod block_header_v2; + +pub use block_header_v1::BlockHeaderV1; +pub use block_header_v2::BlockHeaderV2; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "std")] +use crate::ProtocolConfig; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockHash, Digest, EraEnd, EraId, ProtocolVersion, PublicKey, Timestamp, U512, +}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block header v1. +pub const BLOCK_HEADER_V1_TAG: u8 = 0; +/// Tag for block header v2. +pub const BLOCK_HEADER_V2_TAG: u8 = 1; + +/// The versioned header portion of a block. It encapsulates different variants of the BlockHeader +/// struct. +#[derive(Clone, Debug, Eq, PartialEq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[allow(clippy::large_enum_variant)] +pub enum BlockHeader { + /// The legacy, initial version of the header portion of a block. + #[cfg_attr(any(feature = "std", test), serde(rename = "Version1"))] + V1(BlockHeaderV1), + /// The version 2 of the header portion of a block. + #[cfg_attr(any(feature = "std", test), serde(rename = "Version2"))] + V2(BlockHeaderV2), +} + +impl BlockHeader { + /// Returns the hash of this block header. + pub fn block_hash(&self) -> BlockHash { + match self { + BlockHeader::V1(v1) => v1.block_hash(), + BlockHeader::V2(v2) => v2.block_hash(), + } + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + match self { + BlockHeader::V1(v1) => v1.parent_hash(), + BlockHeader::V2(v2) => v2.parent_hash(), + } + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + match self { + BlockHeader::V1(v1) => v1.state_root_hash(), + BlockHeader::V2(v2) => v2.state_root_hash(), + } + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + match self { + BlockHeader::V1(v1) => v1.body_hash(), + BlockHeader::V2(v2) => v2.body_hash(), + } + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + match self { + BlockHeader::V1(v1) => v1.random_bit(), + BlockHeader::V2(v2) => v2.random_bit(), + } + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + match self { + BlockHeader::V1(v1) => v1.accumulated_seed(), + BlockHeader::V2(v2) => v2.accumulated_seed(), + } + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn clone_era_end(&self) -> Option { + match self { + BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.clone().into()), + BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.clone().into()), + } + } + + /// Returns equivocators if the header is of a switch block. + pub fn maybe_equivocators(&self) -> Option<&[PublicKey]> { + match self { + BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.equivocators()), + BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.equivocators()), + } + } + + /// Returns equivocators if the header is of a switch block. + pub fn maybe_inactive_validators(&self) -> Option<&[PublicKey]> { + match self { + BlockHeader::V1(v1) => v1.era_end().map(|ee| ee.inactive_validators()), + BlockHeader::V2(v2) => v2.era_end().map(|ee| ee.inactive_validators()), + } + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + match self { + BlockHeader::V1(v1) => v1.timestamp(), + BlockHeader::V2(v2) => v2.timestamp(), + } + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + match self { + BlockHeader::V1(v1) => v1.era_id(), + BlockHeader::V2(v2) => v2.era_id(), + } + } + + /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or + /// its successor if this is a switch block). + pub fn next_block_era_id(&self) -> EraId { + match self { + BlockHeader::V1(v1) => v1.next_block_era_id(), + BlockHeader::V2(v2) => v2.next_block_era_id(), + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + match self { + BlockHeader::V1(v1) => v1.height(), + BlockHeader::V2(v2) => v2.height(), + } + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + match self { + BlockHeader::V1(v1) => v1.protocol_version(), + BlockHeader::V2(v2) => v2.protocol_version(), + } + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + match self { + BlockHeader::V1(v1) => v1.is_switch_block(), + BlockHeader::V2(v2) => v2.is_switch_block(), + } + } + + /// Returns the validators for the upcoming era and their respective weights (if this is a + /// switch block). + pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { + match self { + BlockHeader::V1(v1) => v1.next_era_validator_weights(), + BlockHeader::V2(v2) => v2.next_era_validator_weights(), + } + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + match self { + BlockHeader::V1(v1) => v1.is_genesis(), + BlockHeader::V2(v2) => v2.is_genesis(), + } + } + + /// Returns `true` if this block belongs to the last block before the upgrade to the + /// current protocol version. + #[cfg(feature = "std")] + pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool { + match self { + BlockHeader::V1(v1) => v1.is_last_block_before_activation(protocol_config), + BlockHeader::V2(v2) => v2.is_last_block_before_activation(protocol_config), + } + } + + // This method is not intended to be used by third party crates. + // + // Sets the block hash without recomputing it. Must only be called with the correct hash. + #[doc(hidden)] + #[cfg(any(feature = "once_cell", test))] + pub fn set_block_hash(&self, block_hash: BlockHash) { + match self { + BlockHeader::V1(v1) => v1.set_block_hash(block_hash), + BlockHeader::V2(v2) => v2.set_block_hash(block_hash), + } + } +} + +impl Display for BlockHeader { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockHeader::V1(v1) => Display::fmt(&v1, formatter), + BlockHeader::V2(v2) => Display::fmt(&v2, formatter), + } + } +} + +impl From for BlockHeader { + fn from(header: BlockHeaderV1) -> Self { + BlockHeader::V1(header) + } +} + +impl From for BlockHeader { + fn from(header: BlockHeaderV2) -> Self { + BlockHeader::V2(header) + } +} + +impl ToBytes for BlockHeader { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + BlockHeader::V1(v1) => { + buffer.insert(0, BLOCK_HEADER_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + BlockHeader::V2(v2) => { + buffer.insert(0, BLOCK_HEADER_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + BlockHeader::V1(v1) => v1.serialized_length(), + BlockHeader::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for BlockHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_HEADER_V1_TAG => { + let (header, remainder): (BlockHeaderV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(header), remainder)) + } + BLOCK_HEADER_V2_TAG => { + let (header, remainder): (BlockHeaderV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(header), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng, TestBlockBuilder, TestBlockV1Builder}; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let block_header_v1 = TestBlockV1Builder::new() + .build_versioned(rng) + .clone_header(); + bytesrepr::test_serialization_roundtrip(&block_header_v1); + + let block_header_v2 = TestBlockBuilder::new().build_versioned(rng).clone_header(); + bytesrepr::test_serialization_roundtrip(&block_header_v2); + } +} diff --git a/types/src/block/block_header/block_header_v1.rs b/types/src/block/block_header/block_header_v1.rs new file mode 100644 index 0000000000..7fb648189b --- /dev/null +++ b/types/src/block/block_header/block_header_v1.rs @@ -0,0 +1,372 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use crate::{ + block::{BlockHash, EraEndV1}, + bytesrepr::{self, FromBytes, ToBytes}, + Digest, EraId, ProtocolVersion, PublicKey, Timestamp, U512, +}; +#[cfg(feature = "std")] +use crate::{ActivationPoint, ProtocolConfig}; + +#[cfg(feature = "json-schema")] +static BLOCK_HEADER_V1: Lazy = Lazy::new(|| { + let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH])); + let state_root_hash = Digest::from([8; Digest::LENGTH]); + let random_bit = true; + let era_end = Some(EraEndV1::example().clone()); + let timestamp = *Timestamp::example(); + let era_id = EraId::from(1); + let height: u64 = 10; + let protocol_version = ProtocolVersion::V1_0_0; + let accumulated_seed = Digest::hash_pair(Digest::from([9; Digest::LENGTH]), [random_bit as u8]); + let body_hash = Digest::from([5; Digest::LENGTH]); + BlockHeaderV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ) +}); + +/// The header portion of a block. +#[derive(Clone, Debug, Eq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockHeaderV1 { + /// The parent block's hash. + pub(super) parent_hash: BlockHash, + /// The root hash of global state after the deploys in this block have been executed. + pub(super) state_root_hash: Digest, + /// The hash of the block's body. + pub(super) body_hash: Digest, + /// A random bit needed for initializing a future era. + pub(super) random_bit: bool, + /// A seed needed for initializing a future era. + pub(super) accumulated_seed: Digest, + /// The `EraEnd` of a block if it is a switch block. + pub(super) era_end: Option, + /// The timestamp from when the block was proposed. + pub(super) timestamp: Timestamp, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The height of this block, i.e. the number of ancestors. + pub(super) height: u64, + /// The protocol version of the network from when this block was created. + pub(super) protocol_version: ProtocolVersion, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) block_hash: OnceCell, +} + +impl BlockHeaderV1 { + /// Returns the hash of this block header. + pub fn block_hash(&self) -> BlockHash { + #[cfg(any(feature = "once_cell", test))] + return *self.block_hash.get_or_init(|| self.compute_block_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_block_hash() + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + &self.parent_hash + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + &self.state_root_hash + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.random_bit + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + &self.accumulated_seed + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV1> { + self.era_end.as_ref() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or + /// its successor if this is a switch block). + pub fn next_block_era_id(&self) -> EraId { + if self.era_end.is_some() { + self.era_id.successor() + } else { + self.era_id + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.height + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.era_end.is_some() + } + + /// Returns the validators for the upcoming era and their respective weights (if this is a + /// switch block). + pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { + self.era_end + .as_ref() + .map(|era_end| era_end.next_era_validator_weights()) + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.era_id().is_genesis() && self.height() == 0 + } + + /// Returns `true` if this block belongs to the last block before the upgrade to the + /// current protocol version. + #[cfg(feature = "std")] + pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool { + protocol_config.version > self.protocol_version + && self.is_switch_block() + && ActivationPoint::EraId(self.next_block_era_id()) == protocol_config.activation_point + } + + pub(crate) fn compute_block_hash(&self) -> BlockHash { + let serialized_header = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block header: {}", error)); + BlockHash::new(Digest::hash(serialized_header)) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + state_root_hash: Digest, + body_hash: Digest, + random_bit: bool, + accumulated_seed: Digest, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + #[cfg(any(feature = "once_cell", test))] block_hash: OnceCell, + ) -> Self { + BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + block_hash, + } + } + + // This method is not intended to be used by third party crates. + // + // Sets the block hash without recomputing it. Must only be called with the correct hash. + #[doc(hidden)] + #[cfg(any(feature = "once_cell", test))] + pub fn set_block_hash(&self, block_hash: BlockHash) { + self.block_hash.get_or_init(|| block_hash); + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_HEADER_V1 + } + + #[cfg(test)] + pub(crate) fn set_body_hash(&mut self, new_body_hash: Digest) { + self.body_hash = new_body_hash; + } +} + +impl PartialEq for BlockHeaderV1 { + fn eq(&self, other: &BlockHeaderV1) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + block_hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + } = self; + *parent_hash == other.parent_hash + && *state_root_hash == other.state_root_hash + && *body_hash == other.body_hash + && *random_bit == other.random_bit + && *accumulated_seed == other.accumulated_seed + && *era_end == other.era_end + && *timestamp == other.timestamp + && *era_id == other.era_id + && *height == other.height + && *protocol_version == other.protocol_version + } +} + +impl Display for BlockHeaderV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block header #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash {}, \ + random bit {}, protocol version: {}", + self.height, + self.block_hash(), + self.timestamp, + self.era_id, + self.parent_hash.inner(), + self.state_root_hash, + self.body_hash, + self.random_bit, + self.protocol_version, + )?; + if let Some(era_end) = &self.era_end { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockHeaderV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.parent_hash.write_bytes(writer)?; + self.state_root_hash.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.random_bit.write_bytes(writer)?; + self.accumulated_seed.write_bytes(writer)?; + self.era_end.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.height.write_bytes(writer)?; + self.protocol_version.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.parent_hash.serialized_length() + + self.state_root_hash.serialized_length() + + self.body_hash.serialized_length() + + self.random_bit.serialized_length() + + self.accumulated_seed.serialized_length() + + self.era_end.serialized_length() + + self.timestamp.serialized_length() + + self.era_id.serialized_length() + + self.height.serialized_length() + + self.protocol_version.serialized_length() + } +} + +impl FromBytes for BlockHeaderV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (random_bit, remainder) = bool::from_bytes(remainder)?; + let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?; + let (era_end, remainder) = Option::from_bytes(remainder)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (height, remainder) = u64::from_bytes(remainder)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; + let block_header = BlockHeaderV1 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + block_hash: OnceCell::new(), + }; + Ok((block_header, remainder)) + } +} diff --git a/types/src/block/block_header/block_header_v2.rs b/types/src/block/block_header/block_header_v2.rs new file mode 100644 index 0000000000..5810015849 --- /dev/null +++ b/types/src/block/block_header/block_header_v2.rs @@ -0,0 +1,434 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockHash, Digest, EraEndV2, EraId, ProtocolVersion, PublicKey, Timestamp, U512, +}; +#[cfg(feature = "std")] +use crate::{ActivationPoint, ProtocolConfig}; + +#[cfg(feature = "json-schema")] +static BLOCK_HEADER_V2: Lazy = Lazy::new(|| { + let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH])); + let state_root_hash = Digest::from([8; Digest::LENGTH]); + let random_bit = true; + let era_end = Some(EraEndV2::example().clone()); + let timestamp = *Timestamp::example(); + let era_id = EraId::from(1); + let height: u64 = 10; + let current_gas_price: u8 = 1; + let protocol_version = ProtocolVersion::V1_0_0; + let accumulated_seed = Digest::hash_pair(Digest::from([9; Digest::LENGTH]), [random_bit as u8]); + let body_hash = Digest::from([5; Digest::LENGTH]); + let proposer = PublicKey::example().clone(); + let last_switch_block_hash = BlockHash::new(Digest::from([9; Digest::LENGTH])); + BlockHeaderV2::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + current_gas_price, + Some(last_switch_block_hash), + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ) +}); + +/// The header portion of a block. +#[derive(Clone, Debug, Eq)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockHeaderV2 { + /// The parent block's hash. + pub(super) parent_hash: BlockHash, + /// The root hash of global state after the deploys in this block have been executed. + pub(super) state_root_hash: Digest, + /// The hash of the block's body. + pub(super) body_hash: Digest, + /// A random bit needed for initializing a future era. + pub(super) random_bit: bool, + /// A seed needed for initializing a future era. + pub(super) accumulated_seed: Digest, + /// The `EraEnd` of a block if it is a switch block. + pub(super) era_end: Option, + /// The timestamp from when the block was proposed. + pub(super) timestamp: Timestamp, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The height of this block, i.e. the number of ancestors. + pub(super) height: u64, + /// The protocol version of the network from when this block was created. + pub(super) protocol_version: ProtocolVersion, + /// The public key of the validator which proposed the block. + pub(super) proposer: PublicKey, + /// The gas price of the era + pub(super) current_gas_price: u8, + /// The most recent switch block hash. + pub(super) last_switch_block_hash: Option, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(super) block_hash: OnceCell, +} + +impl BlockHeaderV2 { + /// Returns the hash of this block header. + pub fn block_hash(&self) -> BlockHash { + #[cfg(any(feature = "once_cell", test))] + return *self.block_hash.get_or_init(|| self.compute_block_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_block_hash() + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + &self.parent_hash + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + &self.state_root_hash + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.random_bit + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + &self.accumulated_seed + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV2> { + self.era_end.as_ref() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the era ID in which the next block would be created (i.e. this block's era ID, or + /// its successor if this is a switch block). + pub fn next_block_era_id(&self) -> EraId { + if self.era_end.is_some() { + self.era_id.successor() + } else { + self.era_id + } + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.height + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_version + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.era_end.is_some() + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + &self.proposer + } + + /// Returns the validators for the upcoming era and their respective weights (if this is a + /// switch block). + pub fn next_era_validator_weights(&self) -> Option<&BTreeMap> { + self.era_end + .as_ref() + .map(|era_end| era_end.next_era_validator_weights()) + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.era_id().is_genesis() && self.height() == 0 + } + + /// Returns the gas price for the given block. + pub fn current_gas_price(&self) -> u8 { + self.current_gas_price + } + + /// Returns the hash for the last relevant switch block. + pub fn last_switch_block_hash(&self) -> Option { + self.last_switch_block_hash + } + + /// Returns `true` if this block belongs to the last block before the upgrade to the + /// current protocol version. + #[cfg(feature = "std")] + pub fn is_last_block_before_activation(&self, protocol_config: &ProtocolConfig) -> bool { + protocol_config.version > self.protocol_version + && self.is_switch_block() + && ActivationPoint::EraId(self.next_block_era_id()) == protocol_config.activation_point + } + + pub(crate) fn compute_block_hash(&self) -> BlockHash { + let serialized_header = self + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize block header: {}", error)); + BlockHash::new(Digest::hash(serialized_header)) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + state_root_hash: Digest, + body_hash: Digest, + random_bit: bool, + accumulated_seed: Digest, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + proposer: PublicKey, + current_gas_price: u8, + last_switch_block_hash: Option, + #[cfg(any(feature = "once_cell", test))] block_hash: OnceCell, + ) -> Self { + BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + current_gas_price, + last_switch_block_hash, + #[cfg(any(feature = "once_cell", test))] + block_hash, + } + } + + // This method is not intended to be used by third party crates. + // + // Sets the block hash without recomputing it. Must only be called with the correct hash. + #[doc(hidden)] + #[cfg(any(feature = "once_cell", test))] + pub fn set_block_hash(&self, block_hash: BlockHash) { + self.block_hash.get_or_init(|| block_hash); + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_HEADER_V2 + } + + #[cfg(test)] + pub(crate) fn set_body_hash(&mut self, new_body_hash: Digest) { + self.body_hash = new_body_hash; + } +} + +impl PartialEq for BlockHeaderV2 { + fn eq(&self, other: &BlockHeaderV2) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + current_gas_price, + last_switch_block_hash, + block_hash: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + current_gas_price, + last_switch_block_hash, + } = self; + *parent_hash == other.parent_hash + && *state_root_hash == other.state_root_hash + && *body_hash == other.body_hash + && *random_bit == other.random_bit + && *accumulated_seed == other.accumulated_seed + && *era_end == other.era_end + && *timestamp == other.timestamp + && *era_id == other.era_id + && *height == other.height + && *protocol_version == other.protocol_version + && *proposer == other.proposer + && *current_gas_price == other.current_gas_price + && *last_switch_block_hash == other.last_switch_block_hash + } +} + +impl Display for BlockHeaderV2 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block header #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash {}, \ + random bit {}, protocol version: {}, proposed by {}, current_gas_price: {}", + self.height, + self.block_hash(), + self.timestamp, + self.era_id, + self.parent_hash.inner(), + self.state_root_hash, + self.body_hash, + self.random_bit, + self.protocol_version, + self.proposer, + self.current_gas_price, + )?; + if let Some(last_switch_block_hash) = &self.last_switch_block_hash { + write!( + formatter, + ", last_switch_block_hash: {}", + last_switch_block_hash + )?; + } + if let Some(era_end) = &self.era_end { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockHeaderV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.parent_hash.write_bytes(writer)?; + self.state_root_hash.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.random_bit.write_bytes(writer)?; + self.accumulated_seed.write_bytes(writer)?; + self.era_end.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.height.write_bytes(writer)?; + self.protocol_version.write_bytes(writer)?; + self.proposer.write_bytes(writer)?; + self.current_gas_price.write_bytes(writer)?; + self.last_switch_block_hash.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.parent_hash.serialized_length() + + self.state_root_hash.serialized_length() + + self.body_hash.serialized_length() + + self.random_bit.serialized_length() + + self.accumulated_seed.serialized_length() + + self.era_end.serialized_length() + + self.timestamp.serialized_length() + + self.era_id.serialized_length() + + self.height.serialized_length() + + self.protocol_version.serialized_length() + + self.proposer.serialized_length() + + self.current_gas_price.serialized_length() + + self.last_switch_block_hash.serialized_length() + } +} + +impl FromBytes for BlockHeaderV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (parent_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (state_root_hash, remainder) = Digest::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (random_bit, remainder) = bool::from_bytes(remainder)?; + let (accumulated_seed, remainder) = Digest::from_bytes(remainder)?; + let (era_end, remainder) = Option::from_bytes(remainder)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (height, remainder) = u64::from_bytes(remainder)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; + let (proposer, remainder) = PublicKey::from_bytes(remainder)?; + let (current_gas_price, remainder) = u8::from_bytes(remainder)?; + let (last_switch_block_hash, remainder) = Option::from_bytes(remainder)?; + let block_header = BlockHeaderV2 { + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + current_gas_price, + last_switch_block_hash, + #[cfg(any(feature = "once_cell", test))] + block_hash: OnceCell::new(), + }; + Ok((block_header, remainder)) + } +} diff --git a/types/src/block/block_header_with_signatures.rs b/types/src/block/block_header_with_signatures.rs new file mode 100644 index 0000000000..f370f14054 --- /dev/null +++ b/types/src/block/block_header_with_signatures.rs @@ -0,0 +1,138 @@ +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use super::{BlockHash, BlockHeader, BlockSignatures}; +use crate::EraId; +#[cfg(doc)] +use crate::Signature; + +/// An error which can result from validating a [`BlockHeaderWithSignatures`]. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum BlockHeaderWithSignaturesValidationError { + /// Mismatch between block hash in [`BlockHeader`] and [`BlockSignatures`]. + BlockHashMismatch { + /// The block hash in the `BlockHeader`. + block_hash_in_header: BlockHash, + /// The block hash in the `BlockSignatures`. + block_hash_in_signatures: BlockHash, + }, + /// Mismatch between era ID in [`BlockHeader`] and [`BlockSignatures`]. + EraIdMismatch { + /// The era ID in the `BlockHeader`. + era_id_in_header: EraId, + /// The era ID in the `BlockSignatures`. + era_id_in_signatures: EraId, + }, +} + +impl Display for BlockHeaderWithSignaturesValidationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockHeaderWithSignaturesValidationError::BlockHashMismatch { + block_hash_in_header: expected, + block_hash_in_signatures: actual, + } => { + write!( + formatter, + "block hash mismatch - header: {expected}, signatures: {actual}", + ) + } + BlockHeaderWithSignaturesValidationError::EraIdMismatch { + era_id_in_header: expected, + era_id_in_signatures: actual, + } => { + write!( + formatter, + "era id mismatch - header: {expected}, signatures: {actual}", + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for BlockHeaderWithSignaturesValidationError {} + +/// A block header and collection of signatures of a given block. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct BlockHeaderWithSignatures { + block_header: BlockHeader, + block_signatures: BlockSignatures, +} + +impl BlockHeaderWithSignatures { + /// Returns a new `BlockHeaderWithSignatures`. + pub fn new(block_header: BlockHeader, block_signatures: BlockSignatures) -> Self { + BlockHeaderWithSignatures { + block_header, + block_signatures, + } + } + + /// Returns the block header. + pub fn block_header(&self) -> &BlockHeader { + &self.block_header + } + + /// Returns the block signatures. + pub fn block_signatures(&self) -> &BlockSignatures { + &self.block_signatures + } + + /// Returns `Ok` if and only if the block hash and era ID in the `BlockHeader` are identical to + /// those in the `BlockSignatures`. + /// + /// Note that no cryptographic verification of the contained signatures is performed. For this, + /// see [`BlockSignatures::is_verified`]. + pub fn is_valid(&self) -> Result<(), BlockHeaderWithSignaturesValidationError> { + if self.block_header.block_hash() != *self.block_signatures.block_hash() { + return Err( + BlockHeaderWithSignaturesValidationError::BlockHashMismatch { + block_hash_in_header: self.block_header.block_hash(), + block_hash_in_signatures: *self.block_signatures.block_hash(), + }, + ); + } + if self.block_header.era_id() != self.block_signatures.era_id() { + return Err(BlockHeaderWithSignaturesValidationError::EraIdMismatch { + era_id_in_header: self.block_header.era_id(), + era_id_in_signatures: self.block_signatures.era_id(), + }); + } + Ok(()) + } + + /// Sets the era ID contained in `block_signatures` to its max value, rendering it and hence + /// `self` invalid (assuming the relevant era ID for this `BlockHeaderWithSignatures` wasn't + /// already the max value). + #[cfg(any(feature = "testing", test))] + pub fn invalidate_era(&mut self) { + self.block_signatures.invalidate_era() + } + + /// Replaces the signature field of the last `block_signatures` entry with the `System` variant + /// of [`crate::crypto::Signature`], rendering that entry invalid. + /// + /// Note that [`Self::is_valid`] will be unaffected by this as it only checks for equality in + /// the block hash and era ID of the header and signatures; no cryptographic verification is + /// performed. + #[cfg(any(feature = "testing", test))] + pub fn invalidate_last_signature(&mut self) { + self.block_signatures.invalidate_last_signature() + } +} + +impl Display for BlockHeaderWithSignatures { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}, and {}", self.block_header, self.block_signatures) + } +} diff --git a/types/src/block/block_identifier.rs b/types/src/block/block_identifier.rs new file mode 100644 index 0000000000..dd8e1329e7 --- /dev/null +++ b/types/src/block/block_identifier.rs @@ -0,0 +1,140 @@ +use alloc::vec::Vec; +use core::num::ParseIntError; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockHash, Digest, DigestError, +}; + +const HASH_TAG: u8 = 0; +const HEIGHT_TAG: u8 = 1; + +/// Identifier for possible ways to retrieve a block. +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum BlockIdentifier { + /// Identify and retrieve the block with its hash. + Hash(BlockHash), + /// Identify and retrieve the block with its height. + Height(u64), +} + +impl BlockIdentifier { + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..1) { + 0 => Self::Hash(BlockHash::random(rng)), + 1 => Self::Height(rng.gen()), + _ => panic!(), + } + } +} + +impl FromBytes for BlockIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + match bytes.split_first() { + Some((&HASH_TAG, rem)) => { + let (hash, rem) = FromBytes::from_bytes(rem)?; + Ok((BlockIdentifier::Hash(hash), rem)) + } + Some((&HEIGHT_TAG, rem)) => { + let (height, rem) = FromBytes::from_bytes(rem)?; + Ok((BlockIdentifier::Height(height), rem)) + } + Some(_) | None => Err(bytesrepr::Error::Formatting), + } + } +} + +impl ToBytes for BlockIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + BlockIdentifier::Hash(hash) => { + writer.push(HASH_TAG); + hash.write_bytes(writer)?; + } + BlockIdentifier::Height(height) => { + writer.push(HEIGHT_TAG); + height.write_bytes(writer)?; + } + } + Ok(()) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + BlockIdentifier::Hash(hash) => hash.serialized_length(), + BlockIdentifier::Height(height) => height.serialized_length(), + } + } +} + +impl core::str::FromStr for BlockIdentifier { + type Err = ParseBlockIdentifierError; + + fn from_str(maybe_block_identifier: &str) -> Result { + if maybe_block_identifier.is_empty() { + return Err(ParseBlockIdentifierError::EmptyString); + } + + if maybe_block_identifier.len() == (Digest::LENGTH * 2) { + let hash = Digest::from_hex(maybe_block_identifier) + .map_err(ParseBlockIdentifierError::FromHexError)?; + Ok(BlockIdentifier::Hash(BlockHash::new(hash))) + } else { + let height = maybe_block_identifier + .parse() + .map_err(ParseBlockIdentifierError::ParseIntError)?; + Ok(BlockIdentifier::Height(height)) + } + } +} + +/// Represents errors that can arise when parsing a [`BlockIdentifier`]. +#[derive(Debug)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +pub enum ParseBlockIdentifierError { + /// String was empty. + #[cfg_attr( + feature = "std", + error("Empty string is not a valid block identifier.") + )] + EmptyString, + /// Couldn't parse a height value. + #[cfg_attr(feature = "std", error("Unable to parse height from string. {0}"))] + ParseIntError(ParseIntError), + /// Couldn't parse a blake2bhash. + #[cfg_attr(feature = "std", error("Unable to parse digest from string. {0}"))] + FromHexError(DigestError), +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BlockIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/block/block_signatures.rs b/types/src/block/block_signatures.rs new file mode 100644 index 0000000000..6f3b5a5751 --- /dev/null +++ b/types/src/block/block_signatures.rs @@ -0,0 +1,429 @@ +mod block_signatures_v1; +mod block_signatures_v2; + +pub use block_signatures_v1::BlockSignaturesV1; +pub use block_signatures_v2::BlockSignaturesV2; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::{ + fmt::{self, Display, Formatter}, + hash::Hash, +}; +use itertools::Either; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + crypto, BlockHash, ChainNameDigest, EraId, FinalitySignature, PublicKey, Signature, +}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block signatures v1. +pub const BLOCK_SIGNATURES_V1_TAG: u8 = 0; +/// Tag for block signatures v2. +pub const BLOCK_SIGNATURES_V2_TAG: u8 = 1; + +/// A collection of signatures for a single block, along with the associated block's hash and era +/// ID. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum BlockSignatures { + /// Version 1 of the block signatures. + V1(BlockSignaturesV1), + /// Version 2 of the block signatures. + V2(BlockSignaturesV2), +} + +impl BlockSignatures { + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.block_hash(), + BlockSignatures::V2(block_signatures) => block_signatures.block_hash(), + } + } + + /// Returns the era id of the associated block. + pub fn era_id(&self) -> EraId { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.era_id(), + BlockSignatures::V2(block_signatures) => block_signatures.era_id(), + } + } + + /// Returns the finality signature associated with the given public key, if available. + pub fn finality_signature(&self, public_key: &PublicKey) -> Option { + match self { + BlockSignatures::V1(block_signatures) => block_signatures + .finality_signature(public_key) + .map(FinalitySignature::V1), + BlockSignatures::V2(block_signatures) => block_signatures + .finality_signature(public_key) + .map(FinalitySignature::V2), + } + } + + /// Returns `true` if there is a signature associated with the given public key. + pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool { + match self { + BlockSignatures::V1(block_signatures) => { + block_signatures.has_finality_signature(public_key) + } + BlockSignatures::V2(block_signatures) => { + block_signatures.has_finality_signature(public_key) + } + } + } + + /// Returns an iterator over all the signatures. + pub fn finality_signatures(&self) -> impl Iterator + '_ { + match self { + BlockSignatures::V1(block_signatures) => Either::Left( + block_signatures + .finality_signatures() + .map(FinalitySignature::V1), + ), + BlockSignatures::V2(block_signatures) => Either::Right( + block_signatures + .finality_signatures() + .map(FinalitySignature::V2), + ), + } + } + + /// Returns an `BTreeMap` of public keys to signatures. + pub fn proofs(&self) -> &BTreeMap { + match self { + BlockSignatures::V1(block_signatures) => &block_signatures.proofs, + BlockSignatures::V2(block_signatures) => &block_signatures.proofs, + } + } + + /// Returns an iterator over all the validator public keys. + pub fn signers(&self) -> impl Iterator + '_ { + match self { + BlockSignatures::V1(block_signatures) => Either::Left(block_signatures.signers()), + BlockSignatures::V2(block_signatures) => Either::Right(block_signatures.signers()), + } + } + + /// Returns the number of signatures in the collection. + pub fn len(&self) -> usize { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.len(), + BlockSignatures::V2(block_signatures) => block_signatures.len(), + } + } + + /// Returns `true` if there are no signatures in the collection. + pub fn is_empty(&self) -> bool { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.is_empty(), + BlockSignatures::V2(block_signatures) => block_signatures.is_empty(), + } + } + + /// Merges the collection of signatures in `other` into `self`. + /// + /// Returns an error if the block hashes, block heights, era IDs, or chain name hashes do not + /// match. + pub fn merge(&mut self, mut other: Self) -> Result<(), BlockSignaturesMergeError> { + if self.block_hash() != other.block_hash() { + return Err(BlockSignaturesMergeError::BlockHashMismatch { + self_hash: *self.block_hash(), + other_hash: *other.block_hash(), + }); + } + + if self.era_id() != other.era_id() { + return Err(BlockSignaturesMergeError::EraIdMismatch { + self_era_id: self.era_id(), + other_era_id: other.era_id(), + }); + } + + match (self, &mut other) { + (BlockSignatures::V1(self_), BlockSignatures::V1(other)) => { + self_.proofs.append(&mut other.proofs); + } + (BlockSignatures::V2(self_), BlockSignatures::V2(other)) => { + if self_.block_height != other.block_height { + return Err(BlockSignaturesMergeError::BlockHeightMismatch { + self_height: self_.block_height, + other_height: other.block_height, + }); + } + + if self_.chain_name_hash != other.chain_name_hash { + return Err(BlockSignaturesMergeError::ChainNameHashMismatch { + self_chain_name_hash: self_.chain_name_hash, + other_chain_name_hash: other.chain_name_hash, + }); + } + + self_.proofs.append(&mut other.proofs); + } + _ => return Err(BlockSignaturesMergeError::VersionMismatch), + } + + Ok(()) + } + + /// Returns `Ok` if and only if all the signatures are cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.is_verified(), + BlockSignatures::V2(block_signatures) => block_signatures.is_verified(), + } + } + + /// Converts self into a `BTreeMap` of public keys to signatures. + pub fn into_proofs(self) -> BTreeMap { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.proofs, + BlockSignatures::V2(block_signatures) => block_signatures.proofs, + } + } + + /// Inserts a new signature. + pub fn insert_signature(&mut self, public_key: PublicKey, signature: Signature) { + match self { + BlockSignatures::V1(block_signatures) => { + block_signatures.insert_signature(public_key, signature) + } + BlockSignatures::V2(block_signatures) => { + block_signatures.insert_signature(public_key, signature) + } + } + } + + /// Removes a signature corresponding to the specified key. + pub fn remove_signature(&mut self, public_key: &PublicKey) -> Option { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.proofs.remove(public_key), + BlockSignatures::V2(block_signatures) => block_signatures.proofs.remove(public_key), + } + } + + /// Sets the era ID to its max value, rendering it and hence `self` invalid (assuming the + /// relevant era ID for this `BlockHeaderWithSignatures` wasn't already the max value). + #[cfg(any(feature = "testing", test))] + pub fn invalidate_era(&mut self) { + match self { + BlockSignatures::V1(block_signatures) => block_signatures.era_id = EraId::new(u64::MAX), + BlockSignatures::V2(block_signatures) => block_signatures.era_id = EraId::new(u64::MAX), + } + } + + /// Replaces the signature field of the last `proofs` entry with the `System` variant + /// of [`Signature`], rendering that entry invalid. + #[cfg(any(feature = "testing", test))] + pub fn invalidate_last_signature(&mut self) { + let proofs = match self { + BlockSignatures::V1(block_signatures) => &mut block_signatures.proofs, + BlockSignatures::V2(block_signatures) => &mut block_signatures.proofs, + }; + let last_proof = proofs + .last_entry() + .expect("should have at least one signature"); + *last_proof.into_mut() = Signature::System; + } + + /// Returns a random `BlockSignatures`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + BlockSignatures::V1(BlockSignaturesV1::random(rng)) + } else { + BlockSignatures::V2(BlockSignaturesV2::random(rng)) + } + } +} + +impl Display for BlockSignatures { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockSignatures::V1(block_signatures) => write!(formatter, "{}", block_signatures), + BlockSignatures::V2(block_signatures) => write!(formatter, "{}", block_signatures), + } + } +} + +impl From for BlockSignatures { + fn from(block_signatures: BlockSignaturesV1) -> Self { + BlockSignatures::V1(block_signatures) + } +} + +impl From for BlockSignatures { + fn from(block_signatures: BlockSignaturesV2) -> Self { + BlockSignatures::V2(block_signatures) + } +} + +impl ToBytes for BlockSignatures { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buf = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + BlockSignatures::V1(block_signatures) => { + writer.push(BLOCK_SIGNATURES_V1_TAG); + block_signatures.write_bytes(writer)?; + } + BlockSignatures::V2(block_signatures) => { + writer.push(BLOCK_SIGNATURES_V2_TAG); + block_signatures.write_bytes(writer)?; + } + } + Ok(()) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + BlockSignatures::V1(block_signatures) => block_signatures.serialized_length(), + BlockSignatures::V2(block_signatures) => block_signatures.serialized_length(), + } + } +} + +impl FromBytes for BlockSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + BLOCK_SIGNATURES_V1_TAG => { + let (block_signatures, remainder) = BlockSignaturesV1::from_bytes(remainder)?; + Ok((BlockSignatures::V1(block_signatures), remainder)) + } + BLOCK_SIGNATURES_V2_TAG => { + let (block_signatures, remainder) = BlockSignaturesV2::from_bytes(remainder)?; + Ok((BlockSignatures::V2(block_signatures), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// An error returned during an attempt to merge two incompatible [`BlockSignaturesV1`]. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +#[non_exhaustive] +pub enum BlockSignaturesMergeError { + /// A mismatch between block hashes. + BlockHashMismatch { + /// The `self` hash. + self_hash: BlockHash, + /// The `other` hash. + other_hash: BlockHash, + }, + /// A mismatch between block heights. + BlockHeightMismatch { + /// The `self` height. + self_height: u64, + /// The `other` height. + other_height: u64, + }, + /// A mismatch between era IDs. + EraIdMismatch { + /// The `self` era ID. + self_era_id: EraId, + /// The `other` era ID. + other_era_id: EraId, + }, + /// A mismatch between chain name hashes. + ChainNameHashMismatch { + /// The `self` chain name hash. + self_chain_name_hash: ChainNameDigest, + /// The `other` chain name hash. + other_chain_name_hash: ChainNameDigest, + }, + /// A mismatch between the versions of the block signatures. + VersionMismatch, +} + +impl Display for BlockSignaturesMergeError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + BlockSignaturesMergeError::BlockHashMismatch { + self_hash, + other_hash, + } => { + write!( + formatter, + "mismatch between block hashes while merging block signatures - self: {}, \ + other: {}", + self_hash, other_hash + ) + } + BlockSignaturesMergeError::BlockHeightMismatch { + self_height, + other_height, + } => { + write!( + formatter, + "mismatch between block heights while merging block signatures - self: {}, \ + other: {}", + self_height, other_height + ) + } + BlockSignaturesMergeError::EraIdMismatch { + self_era_id, + other_era_id, + } => { + write!( + formatter, + "mismatch between era ids while merging block signatures - self: {}, other: \ + {}", + self_era_id, other_era_id + ) + } + BlockSignaturesMergeError::ChainNameHashMismatch { + self_chain_name_hash, + other_chain_name_hash, + } => { + write!( + formatter, + "mismatch between chain name hashes while merging block signatures - self: {}, \ + other: {}", + self_chain_name_hash, other_chain_name_hash + ) + } + BlockSignaturesMergeError::VersionMismatch => { + write!( + formatter, + "mismatch between versions of block signatures while merging" + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for BlockSignaturesMergeError {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = BlockSignatures::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/block/block_signatures/block_signatures_v1.rs b/types/src/block/block_signatures/block_signatures_v1.rs new file mode 100644 index 0000000000..46ee155b3c --- /dev/null +++ b/types/src/block/block_signatures/block_signatures_v1.rs @@ -0,0 +1,201 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +use super::BlockHash; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, EraId, FinalitySignatureV1, PublicKey, Signature, +}; + +/// A collection of signatures for a single block, along with the associated block's hash and era +/// ID. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct BlockSignaturesV1 { + /// The block hash. + pub(super) block_hash: BlockHash, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The proofs of the block, i.e. a collection of validators' signatures of the block hash. + pub(super) proofs: BTreeMap, +} + +impl BlockSignaturesV1 { + /// Constructs a new `BlockSignaturesV1`. + pub fn new(block_hash: BlockHash, era_id: EraId) -> Self { + BlockSignaturesV1 { + block_hash, + era_id, + proofs: BTreeMap::new(), + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the era id of the associated block. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the finality signature associated with the given public key, if available. + pub fn finality_signature(&self, public_key: &PublicKey) -> Option { + self.proofs + .get(public_key) + .map(|signature| FinalitySignatureV1 { + block_hash: self.block_hash, + era_id: self.era_id, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: Default::default(), + }) + } + + /// Returns `true` if there is a signature associated with the given public key. + pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool { + self.proofs.contains_key(public_key) + } + + /// Returns an iterator over all the signatures. + pub fn finality_signatures(&self) -> impl Iterator + '_ { + self.proofs + .iter() + .map(move |(public_key, signature)| FinalitySignatureV1 { + block_hash: self.block_hash, + era_id: self.era_id, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: Default::default(), + }) + } + + /// Returns an iterator over all the validator public keys. + pub fn signers(&self) -> impl Iterator + '_ { + self.proofs.keys() + } + + /// Returns the number of signatures in the collection. + pub fn len(&self) -> usize { + self.proofs.len() + } + + /// Returns `true` if there are no signatures in the collection. + pub fn is_empty(&self) -> bool { + self.proofs.is_empty() + } + + /// Inserts a new signature. + pub fn insert_signature(&mut self, public_key: PublicKey, signature: Signature) { + let _ = self.proofs.insert(public_key, signature); + } + + /// Returns `Ok` if and only if all the signatures are cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + for (public_key, signature) in self.proofs.iter() { + let signature = FinalitySignatureV1 { + block_hash: self.block_hash, + era_id: self.era_id, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: Default::default(), + }; + signature.is_verified()?; + } + Ok(()) + } + + /// Returns a random `BlockSignaturesV1`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let block_hash = BlockHash::random(rng); + let era_id = EraId::random(rng); + let proofs = (0..rng.gen_range(0..10)) + .map(|_| { + let public_key = PublicKey::random(rng); + let bytes = std::array::from_fn(|_| rng.gen()); + let signature = Signature::ed25519(bytes).unwrap(); + (public_key, signature) + }) + .collect(); + Self { + block_hash, + era_id, + proofs, + } + } +} + +impl Display for BlockSignaturesV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block signatures for {} in {} with {} proofs", + self.block_hash, + self.era_id, + self.proofs.len() + ) + } +} + +impl ToBytes for BlockSignaturesV1 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buf = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.proofs.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.era_id.serialized_length() + + self.proofs.serialized_length() + } +} + +impl FromBytes for BlockSignaturesV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (proofs, remainder) = BTreeMap::::from_bytes(remainder)?; + Ok(( + Self { + block_hash, + era_id, + proofs, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = BlockSignaturesV1::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/block/block_signatures/block_signatures_v2.rs b/types/src/block/block_signatures/block_signatures_v2.rs new file mode 100644 index 0000000000..19371381c6 --- /dev/null +++ b/types/src/block/block_signatures/block_signatures_v2.rs @@ -0,0 +1,240 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, BlockHash, ChainNameDigest, EraId, FinalitySignatureV2, PublicKey, Signature, +}; + +/// A collection of signatures for a single block, along with the associated block's hash and era +/// ID. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct BlockSignaturesV2 { + /// The block hash. + pub(super) block_hash: BlockHash, + /// The block height. + pub(super) block_height: u64, + /// The era ID in which this block was created. + pub(super) era_id: EraId, + /// The hash of the chain name of the associated block. + pub(super) chain_name_hash: ChainNameDigest, + /// The proofs of the block, i.e. a collection of validators' signatures of the block hash. + pub(super) proofs: BTreeMap, +} + +impl BlockSignaturesV2 { + /// Constructs a new `BlockSignaturesV2`. + pub fn new( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, + ) -> Self { + BlockSignaturesV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + proofs: BTreeMap::new(), + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the block height of the associated block. + pub fn block_height(&self) -> u64 { + self.block_height + } + + /// Returns the era id of the associated block. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the chain name hash of the associated block. + pub fn chain_name_hash(&self) -> ChainNameDigest { + self.chain_name_hash + } + + /// Returns the finality signature associated with the given public key, if available. + pub fn finality_signature(&self, public_key: &PublicKey) -> Option { + self.proofs + .get(public_key) + .map(|signature| FinalitySignatureV2 { + block_hash: self.block_hash, + block_height: self.block_height, + era_id: self.era_id, + chain_name_hash: self.chain_name_hash, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: Default::default(), + }) + } + + /// Returns `true` if there is a signature associated with the given public key. + pub fn has_finality_signature(&self, public_key: &PublicKey) -> bool { + self.proofs.contains_key(public_key) + } + + /// Returns an iterator over all the signatures. + pub fn finality_signatures(&self) -> impl Iterator + '_ { + self.proofs + .iter() + .map(move |(public_key, signature)| FinalitySignatureV2 { + block_hash: self.block_hash, + block_height: self.block_height, + era_id: self.era_id, + chain_name_hash: self.chain_name_hash, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: Default::default(), + }) + } + + /// Returns an iterator over all the validator public keys. + pub fn signers(&self) -> impl Iterator + '_ { + self.proofs.keys() + } + + /// Returns the number of signatures in the collection. + pub fn len(&self) -> usize { + self.proofs.len() + } + + /// Returns `true` if there are no signatures in the collection. + pub fn is_empty(&self) -> bool { + self.proofs.is_empty() + } + + /// Inserts a new signature. + pub fn insert_signature(&mut self, public_key: PublicKey, signature: Signature) { + let _ = self.proofs.insert(public_key, signature); + } + + /// Returns `Ok` if and only if all the signatures are cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + for (public_key, signature) in self.proofs.iter() { + let signature = FinalitySignatureV2 { + block_hash: self.block_hash, + block_height: self.block_height, + era_id: self.era_id, + chain_name_hash: self.chain_name_hash, + signature: *signature, + public_key: public_key.clone(), + #[cfg(any(feature = "once_cell", test))] + is_verified: Default::default(), + }; + signature.is_verified()?; + } + Ok(()) + } + + /// Returns a random `BlockSignaturesV2`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let era_id = EraId::random(rng); + let chain_name_hash = ChainNameDigest::random(rng); + let proofs = (0..rng.gen_range(0..10)) + .map(|_| { + let public_key = PublicKey::random(rng); + let bytes = std::array::from_fn(|_| rng.gen()); + let signature = Signature::ed25519(bytes).unwrap(); + (public_key, signature) + }) + .collect(); + Self { + block_hash, + block_height, + era_id, + chain_name_hash, + proofs, + } + } +} + +impl Display for BlockSignaturesV2 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "block signatures for {} in {} with {} proofs", + self.block_hash, + self.era_id, + self.proofs.len() + ) + } +} + +impl ToBytes for BlockSignaturesV2 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buf = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.block_height.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.chain_name_hash.write_bytes(writer)?; + self.proofs.write_bytes(writer)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.block_height.serialized_length() + + self.era_id.serialized_length() + + self.chain_name_hash.serialized_length() + + self.proofs.serialized_length() + } +} + +impl FromBytes for BlockSignaturesV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (block_height, remainder) = u64::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + let (chain_name_hash, remainder) = ChainNameDigest::from_bytes(remainder)?; + let (proofs, remainder) = BTreeMap::::from_bytes(remainder)?; + Ok(( + Self { + block_hash, + block_height, + era_id, + chain_name_hash, + proofs, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = BlockSignaturesV2::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/block/block_sync_status.rs b/types/src/block/block_sync_status.rs new file mode 100644 index 0000000000..36f201294b --- /dev/null +++ b/types/src/block/block_sync_status.rs @@ -0,0 +1,213 @@ +use alloc::{string::String, vec::Vec}; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + BlockHash, +}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +#[cfg(feature = "json-schema")] +static BLOCK_SYNCHRONIZER_STATUS: Lazy = Lazy::new(|| { + use crate::Digest; + + BlockSynchronizerStatus::new( + Some(BlockSyncStatus { + block_hash: BlockHash::new( + Digest::from_hex( + "16ddf28e2b3d2e17f4cef36f8b58827eca917af225d139b0c77df3b4a67dc55e", + ) + .unwrap(), + ), + block_height: Some(40), + acquisition_state: "have strict finality(40) for: block hash 16dd..c55e".to_string(), + }), + Some(BlockSyncStatus { + block_hash: BlockHash::new( + Digest::from_hex( + "59907b1e32a9158169c4d89d9ce5ac9164fc31240bfcfb0969227ece06d74983", + ) + .unwrap(), + ), + block_height: Some(6701), + acquisition_state: "have block body(6701) for: block hash 5990..4983".to_string(), + }), + ) +}); + +/// The status of syncing an individual block. +#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct BlockSyncStatus { + /// The block hash. + block_hash: BlockHash, + /// The height of the block, if known. + block_height: Option, + /// The state of acquisition of the data associated with the block. + acquisition_state: String, +} + +impl BlockSyncStatus { + /// Constructs a new `BlockSyncStatus`. + pub fn new( + block_hash: BlockHash, + block_height: Option, + acquisition_state: String, + ) -> Self { + Self { + block_hash, + block_height, + acquisition_state, + } + } + + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Self { + block_hash: BlockHash::random(rng), + block_height: rng.gen::().then_some(rng.gen()), + acquisition_state: rng.random_string(10..20), + } + } +} + +impl ToBytes for BlockSyncStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(writer)?; + self.block_height.write_bytes(writer)?; + self.acquisition_state.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.block_height.serialized_length() + + self.acquisition_state.serialized_length() + } +} + +impl FromBytes for BlockSyncStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, remainder) = BlockHash::from_bytes(bytes)?; + let (block_height, remainder) = Option::::from_bytes(remainder)?; + let (acquisition_state, remainder) = String::from_bytes(remainder)?; + Ok(( + BlockSyncStatus { + block_hash, + block_height, + acquisition_state, + }, + remainder, + )) + } +} + +/// The status of the block synchronizer. +#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct BlockSynchronizerStatus { + /// The status of syncing a historical block, if any. + historical: Option, + /// The status of syncing a forward block, if any. + forward: Option, +} + +impl BlockSynchronizerStatus { + /// Constructs a new `BlockSynchronizerStatus`. + pub fn new(historical: Option, forward: Option) -> Self { + Self { + historical, + forward, + } + } + + /// Returns an example `BlockSynchronizerStatus`. + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_SYNCHRONIZER_STATUS + } + + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let historical = rng.gen::().then_some(BlockSyncStatus::random(rng)); + let forward = rng.gen::().then_some(BlockSyncStatus::random(rng)); + Self { + historical, + forward, + } + } + + /// Returns status of the historical block sync. + #[cfg(any(feature = "testing", test))] + pub fn historical(&self) -> &Option { + &self.historical + } + + /// Returns status of the forward block sync. + #[cfg(any(feature = "testing", test))] + pub fn forward(&self) -> &Option { + &self.forward + } +} + +impl ToBytes for BlockSynchronizerStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.historical.write_bytes(writer)?; + self.forward.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.historical.serialized_length() + self.forward.serialized_length() + } +} + +impl FromBytes for BlockSynchronizerStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (historical, remainder) = Option::::from_bytes(bytes)?; + let (forward, remainder) = Option::::from_bytes(remainder)?; + Ok(( + BlockSynchronizerStatus { + historical, + forward, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = BlockSyncStatus::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/block/block_v1.rs b/types/src/block/block_v1.rs new file mode 100644 index 0000000000..05cc357fdb --- /dev/null +++ b/types/src/block/block_v1.rs @@ -0,0 +1,372 @@ +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use alloc::collections::BTreeMap; +use alloc::{boxed::Box, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use core::iter; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::U512; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Block, BlockBodyV1, BlockHash, BlockHeaderV1, BlockValidationError, DeployHash, Digest, + EraEndV1, EraId, ProtocolVersion, PublicKey, Timestamp, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::{testing::TestRng, EraReport}; + +/// A block after execution, with the resulting global state root hash. This is the core component +/// of the Casper linear blockchain. Version 1. +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockV1 { + /// The block hash identifying this block. + pub(super) hash: BlockHash, + /// The header portion of the block. + pub(super) header: BlockHeaderV1, + /// The body portion of the block. + pub(super) body: BlockBodyV1, +} + +impl BlockV1 { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + parent_seed: Digest, + state_root_hash: Digest, + random_bit: bool, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + proposer: PublicKey, + deploy_hashes: Vec, + transfer_hashes: Vec, + ) -> Self { + let body = BlockBodyV1::new(proposer, deploy_hashes, transfer_hashes); + let body_hash = body.hash(); + let accumulated_seed = Digest::hash_pair(parent_seed, [random_bit as u8]); + let header = BlockHeaderV1::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ); + Self::new_from_header_and_body(header, body) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new_from_header_and_body(header: BlockHeaderV1, body: BlockBodyV1) -> Self { + let hash = header.block_hash(); + BlockV1 { hash, header, body } + } + + /// Returns the `BlockHash` identifying this block. + pub fn hash(&self) -> &BlockHash { + &self.hash + } + + /// Returns the block's header. + pub fn header(&self) -> &BlockHeaderV1 { + &self.header + } + + /// Returns the block's header, consuming `self`. + pub fn take_header(self) -> BlockHeaderV1 { + self.header + } + + /// Returns the block's body. + pub fn body(&self) -> &BlockBodyV1 { + &self.body + } + + /// Returns the block's body, consuming `self`. + pub fn take_body(self) -> BlockBodyV1 { + self.body + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + self.header.parent_hash() + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + self.header.state_root_hash() + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + self.header.body_hash() + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.header.random_bit() + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + self.header.accumulated_seed() + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV1> { + self.header.era_end() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.header.era_id() + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.header.height() + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.header.protocol_version() + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.header.is_switch_block() + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.header.is_genesis() + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + self.body.proposer() + } + + /// Returns the deploy hashes within the block. + pub fn deploy_hashes(&self) -> &[DeployHash] { + self.body.deploy_hashes() + } + + /// Returns the transfer hashes within the block. + pub fn transfer_hashes(&self) -> &[DeployHash] { + self.body.transfer_hashes() + } + + /// Returns the deploy and transfer hashes in the order in which they were executed. + pub fn deploy_and_transfer_hashes(&self) -> impl Iterator { + self.deploy_hashes() + .iter() + .chain(self.transfer_hashes().iter()) + } + + /// Returns `Ok` if and only if the block's provided block hash and body hash are identical to + /// those generated by hashing the appropriate input data. + pub fn verify(&self) -> Result<(), BlockValidationError> { + let actual_block_header_hash = self.header().block_hash(); + if *self.hash() != actual_block_header_hash { + return Err(BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V1(self.clone())), + actual_block_hash: actual_block_header_hash, + }); + } + + let actual_block_body_hash = self.body.hash(); + if *self.header.body_hash() != actual_block_body_hash { + return Err(BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V1(self.clone())), + actual_block_body_hash, + }); + } + + Ok(()) + } + + /// Returns a random block, but using the provided values. + /// + /// If `deploy_hashes_iter` is empty, a few random deploy hashes will be added to the + /// `deploy_hashes` and `transfer_hashes` fields of the body. Otherwise, the provided deploy + /// hashes will populate the `deploy_hashes` field and `transfer_hashes` will be empty. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_specifics>( + rng: &mut TestRng, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + is_switch: bool, + deploy_hashes_iter: I, + ) -> Self { + let parent_hash = BlockHash::random(rng); + let parent_seed = Digest::random(rng); + let state_root_hash = Digest::random(rng); + let random_bit = rng.gen(); + let era_end = is_switch.then(|| { + let mut next_era_validator_weights = BTreeMap::new(); + for i in 1_u64..6 { + let _ = next_era_validator_weights.insert(PublicKey::random(rng), U512::from(i)); + } + EraEndV1::new(EraReport::random(rng), next_era_validator_weights) + }); + let timestamp = Timestamp::now(); + let proposer = PublicKey::random(rng); + let mut deploy_hashes: Vec = deploy_hashes_iter.into_iter().collect(); + let mut transfer_hashes: Vec = vec![]; + if deploy_hashes.is_empty() { + let count = rng.gen_range(0..6); + deploy_hashes = iter::repeat_with(|| DeployHash::random(rng)) + .take(count) + .collect(); + let count = rng.gen_range(0..6); + transfer_hashes = iter::repeat_with(|| DeployHash::random(rng)) + .take(count) + .collect(); + } + + BlockV1::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + deploy_hashes, + transfer_hashes, + ) + } +} + +impl Display for BlockV1 { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \ + {}, random bit {}, protocol version: {}", + self.height(), + self.hash(), + self.timestamp(), + self.era_id(), + self.parent_hash().inner(), + self.state_root_hash(), + self.body_hash(), + self.random_bit(), + self.protocol_version() + )?; + if let Some(era_end) = self.era_end() { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.header.write_bytes(writer)?; + self.body.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.header.serialized_length() + + self.body.serialized_length() + } +} + +impl FromBytes for BlockV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = BlockHash::from_bytes(bytes)?; + let (header, remainder) = BlockHeaderV1::from_bytes(remainder)?; + let (body, remainder) = BlockBodyV1::from_bytes(remainder)?; + let block = BlockV1 { hash, header, body }; + Ok((block, remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::{Block, TestBlockV1Builder}; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let block = TestBlockV1Builder::new().build(rng); + bytesrepr::test_serialization_roundtrip(&block); + } + + #[test] + fn block_check_bad_body_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockV1Builder::new().build(rng); + let bogus_block_body_hash = Digest::hash([0xde, 0xad, 0xbe, 0xef]); + block.header.set_body_hash(bogus_block_body_hash); + block.hash = block.header.block_hash(); + + let expected_error = BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V1(block.clone())), + actual_block_body_hash: block.body.hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } + + #[test] + fn block_check_bad_block_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockV1Builder::new().build(rng); + let bogus_block_hash = BlockHash::from(Digest::hash([0xde, 0xad, 0xbe, 0xef])); + block.hash = bogus_block_hash; + + let expected_error = BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V1(block.clone())), + actual_block_hash: block.header.block_hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } +} diff --git a/types/src/block/block_v2.rs b/types/src/block/block_v2.rs new file mode 100644 index 0000000000..7a45122814 --- /dev/null +++ b/types/src/block/block_v2.rs @@ -0,0 +1,427 @@ +use alloc::{boxed::Box, collections::BTreeMap, vec::Vec}; + +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; + +use super::{Block, BlockBodyV2, BlockConversionError, RewardedSignatures}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + transaction::TransactionHash, + BlockHash, BlockHeaderV2, BlockValidationError, Digest, EraEndV2, EraId, ProtocolVersion, + PublicKey, Timestamp, +}; +#[cfg(feature = "json-schema")] +use crate::{TransactionV1Hash, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID}; + +#[cfg(feature = "json-schema")] +static BLOCK_V2: Lazy = Lazy::new(|| { + let parent_hash = BlockHash::new(Digest::from([7; Digest::LENGTH])); + let parent_seed = Digest::from([9; Digest::LENGTH]); + let state_root_hash = Digest::from([8; Digest::LENGTH]); + let random_bit = true; + let era_end = Some(EraEndV2::example().clone()); + let timestamp = *Timestamp::example(); + let era_id = EraId::from(1); + let height = 10; + let protocol_version = ProtocolVersion::V1_0_0; + let secret_key = crate::SecretKey::example(); + let proposer = PublicKey::from(secret_key); + let mint_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(Digest::from( + [20; Digest::LENGTH], + )))]; + let auction_hashes = vec![TransactionHash::V1(TransactionV1Hash::new(Digest::from( + [21; Digest::LENGTH], + )))]; + let installer_upgrader_hashes = vec![TransactionHash::V1(TransactionV1Hash::new( + Digest::from([22; Digest::LENGTH]), + ))]; + let transactions = { + let mut ret = BTreeMap::new(); + ret.insert(MINT_LANE_ID, mint_hashes); + ret.insert(AUCTION_LANE_ID, auction_hashes); + ret.insert(INSTALL_UPGRADE_LANE_ID, installer_upgrader_hashes); + ret + }; + let rewarded_signatures = RewardedSignatures::default(); + let current_gas_price = 1u8; + let last_switch_block_hash = BlockHash::new(Digest::from([10; Digest::LENGTH])); + BlockV2::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + transactions, + rewarded_signatures, + current_gas_price, + Some(last_switch_block_hash), + ) +}); + +/// A block after execution, with the resulting global state root hash. This is the core component +/// of the Casper linear blockchain. Version 2. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct BlockV2 { + /// The block hash identifying this block. + pub(super) hash: BlockHash, + /// The header portion of the block. + pub(super) header: BlockHeaderV2, + /// The body portion of the block. + pub(super) body: BlockBodyV2, +} + +impl BlockV2 { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[allow(clippy::too_many_arguments)] + pub fn new( + parent_hash: BlockHash, + parent_seed: Digest, + state_root_hash: Digest, + random_bit: bool, + era_end: Option, + timestamp: Timestamp, + era_id: EraId, + height: u64, + protocol_version: ProtocolVersion, + proposer: PublicKey, + transactions: BTreeMap>, + rewarded_signatures: RewardedSignatures, + current_gas_price: u8, + last_switch_block_hash: Option, + ) -> Self { + let body = BlockBodyV2::new(transactions, rewarded_signatures); + let body_hash = body.hash(); + let accumulated_seed = Digest::hash_pair(parent_seed, [random_bit as u8]); + let header = BlockHeaderV2::new( + parent_hash, + state_root_hash, + body_hash, + random_bit, + accumulated_seed, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + current_gas_price, + last_switch_block_hash, + #[cfg(any(feature = "once_cell", test))] + OnceCell::new(), + ); + Self::new_from_header_and_body(header, body) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new_from_header_and_body(header: BlockHeaderV2, body: BlockBodyV2) -> Self { + let hash = header.block_hash(); + BlockV2 { hash, header, body } + } + + /// Returns the `BlockHash` identifying this block. + pub fn hash(&self) -> &BlockHash { + &self.hash + } + + /// Returns the block's header. + pub fn header(&self) -> &BlockHeaderV2 { + &self.header + } + + /// Returns the block's header, consuming `self`. + pub fn take_header(self) -> BlockHeaderV2 { + self.header + } + + /// Returns the block's body. + pub fn body(&self) -> &BlockBodyV2 { + &self.body + } + + /// Returns the block's body, consuming `self`. + pub fn take_body(self) -> BlockBodyV2 { + self.body + } + + /// Returns the parent block's hash. + pub fn parent_hash(&self) -> &BlockHash { + self.header.parent_hash() + } + + /// Returns the root hash of global state after the deploys in this block have been executed. + pub fn state_root_hash(&self) -> &Digest { + self.header.state_root_hash() + } + + /// Returns the hash of the block's body. + pub fn body_hash(&self) -> &Digest { + self.header.body_hash() + } + + /// Returns a random bit needed for initializing a future era. + pub fn random_bit(&self) -> bool { + self.header.random_bit() + } + + /// Returns a seed needed for initializing a future era. + pub fn accumulated_seed(&self) -> &Digest { + self.header.accumulated_seed() + } + + /// Returns the `EraEnd` of a block if it is a switch block. + pub fn era_end(&self) -> Option<&EraEndV2> { + self.header.era_end() + } + + /// Returns the timestamp from when the block was proposed. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the era ID in which this block was created. + pub fn era_id(&self) -> EraId { + self.header.era_id() + } + + /// Returns the height of this block, i.e. the number of ancestors. + pub fn height(&self) -> u64 { + self.header.height() + } + + /// Returns the protocol version of the network from when this block was created. + pub fn protocol_version(&self) -> ProtocolVersion { + self.header.protocol_version() + } + + /// Returns `true` if this block is the last one in the current era. + pub fn is_switch_block(&self) -> bool { + self.header.is_switch_block() + } + + /// Returns `true` if this block is the Genesis block, i.e. has height 0 and era 0. + pub fn is_genesis(&self) -> bool { + self.header.is_genesis() + } + + /// Returns the public key of the validator which proposed the block. + pub fn proposer(&self) -> &PublicKey { + self.header.proposer() + } + + /// List of identifiers for finality signatures for a particular past block. + pub fn rewarded_signatures(&self) -> &RewardedSignatures { + self.body.rewarded_signatures() + } + + /// Returns the hashes of the transfer transactions within the block. + pub fn mint(&self) -> impl Iterator { + self.body.mint() + } + + /// Returns the hashes of the non-transfer, native transactions within the block. + pub fn auction(&self) -> impl Iterator { + self.body.auction() + } + + /// Returns the hashes of the install/upgrade wasm transactions within the block. + pub fn install_upgrade(&self) -> impl Iterator { + self.body.install_upgrade() + } + + /// Returns the hashes of the transactions filtered by lane id within the block. + pub fn transactions_by_lane_id(&self, lane_id: u8) -> impl Iterator { + self.body.transaction_by_lane(lane_id) + } + + /// Returns all of the transaction hashes in the order in which they were executed. + pub fn all_transactions(&self) -> impl Iterator { + self.body.all_transactions() + } + + /// Returns a reference to the collection of mapped transactions. + pub fn transactions(&self) -> &BTreeMap> { + self.body.transactions() + } + + /// Returns the last relevant switch block hash. + pub fn last_switch_block_hash(&self) -> Option { + self.header.last_switch_block_hash() + } + + /// Returns `Ok` if and only if the block's provided block hash and body hash are identical to + /// those generated by hashing the appropriate input data. + pub fn verify(&self) -> Result<(), BlockValidationError> { + let actual_block_header_hash = self.header().block_hash(); + if *self.hash() != actual_block_header_hash { + return Err(BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V2(self.clone())), + actual_block_hash: actual_block_header_hash, + }); + } + + let actual_block_body_hash = self.body.hash(); + if *self.header.body_hash() != actual_block_body_hash { + return Err(BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V2(self.clone())), + actual_block_body_hash, + }); + } + + Ok(()) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &BLOCK_V2 + } + + /// Makes the block invalid, for testing purpose. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn make_invalid(self, rng: &mut TestRng) -> Self { + let block = BlockV2 { + hash: BlockHash::random(rng), + ..self + }; + + assert!(block.verify().is_err()); + block + } +} + +impl Display for BlockV2 { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "executed block #{}, {}, timestamp {}, {}, parent {}, post-state hash {}, body hash \ + {}, random bit {}, protocol version: {}", + self.height(), + self.hash(), + self.timestamp(), + self.era_id(), + self.parent_hash().inner(), + self.state_root_hash(), + self.body_hash(), + self.random_bit(), + self.protocol_version() + )?; + if let Some(era_end) = self.era_end() { + write!(formatter, ", era_end: {}", era_end)?; + } + Ok(()) + } +} + +impl ToBytes for BlockV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.hash.write_bytes(writer)?; + self.header.write_bytes(writer)?; + self.body.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.hash.serialized_length() + + self.header.serialized_length() + + self.body.serialized_length() + } +} + +impl FromBytes for BlockV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, remainder) = BlockHash::from_bytes(bytes)?; + let (header, remainder) = BlockHeaderV2::from_bytes(remainder)?; + let (body, remainder) = BlockBodyV2::from_bytes(remainder)?; + let block = BlockV2 { hash, header, body }; + Ok((block, remainder)) + } +} + +impl TryFrom for BlockV2 { + type Error = BlockConversionError; + + fn try_from(value: Block) -> Result { + match value { + Block::V2(v2) => Ok(v2), + _ => Err(BlockConversionError::DifferentVersion { + expected_version: 2, + }), + } + } +} + +#[cfg(test)] +mod tests { + use crate::TestBlockBuilder; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + bytesrepr::test_serialization_roundtrip(&block); + } + + #[test] + fn block_check_bad_body_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockBuilder::new().build(rng); + let bogus_block_body_hash = Digest::hash([0xde, 0xad, 0xbe, 0xef]); + block.header.set_body_hash(bogus_block_body_hash); + block.hash = block.header.block_hash(); + + let expected_error = BlockValidationError::UnexpectedBodyHash { + block: Box::new(Block::V2(block.clone())), + actual_block_body_hash: block.body.hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } + + #[test] + fn block_check_bad_block_hash_sad_path() { + let rng = &mut TestRng::new(); + + let mut block = TestBlockBuilder::new().build(rng); + let bogus_block_hash = BlockHash::from(Digest::hash([0xde, 0xad, 0xbe, 0xef])); + block.hash = bogus_block_hash; + + let expected_error = BlockValidationError::UnexpectedBlockHash { + block: Box::new(Block::V2(block.clone())), + actual_block_hash: block.header.block_hash(), + }; + assert_eq!(block.verify(), Err(expected_error)); + } +} diff --git a/types/src/block/block_with_signatures.rs b/types/src/block/block_with_signatures.rs new file mode 100644 index 0000000000..f10d3a62d5 --- /dev/null +++ b/types/src/block/block_with_signatures.rs @@ -0,0 +1,85 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Block, BlockSignatures, +}; +#[cfg(any(feature = "std", feature = "json-schema", test))] +use serde::{Deserialize, Serialize}; + +/// A block and signatures for that block. +#[derive(Clone, Debug, PartialEq, Eq)] +#[cfg_attr( + any(feature = "std", feature = "json-schema", test), + derive(Serialize, Deserialize) +)] +pub struct BlockWithSignatures { + /// Block. + pub(crate) block: Block, + // The signatures of the block. + pub(crate) block_signatures: BlockSignatures, +} + +impl BlockWithSignatures { + /// Creates a new `BlockWithSignatures`. + pub fn new(block: Block, block_signatures: BlockSignatures) -> Self { + Self { + block, + block_signatures, + } + } + + /// Returns the inner block. + pub fn block(&self) -> &Block { + &self.block + } + + /// Returns the block signatures. + pub fn block_signatures(&self) -> &BlockSignatures { + &self.block_signatures + } + + /// Converts `self` into the block and signatures. + pub fn into_inner(self) -> (Block, BlockSignatures) { + (self.block, self.block_signatures) + } +} + +impl FromBytes for BlockWithSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block, bytes) = FromBytes::from_bytes(bytes)?; + let (block_signatures, bytes) = FromBytes::from_bytes(bytes)?; + Ok((BlockWithSignatures::new(block, block_signatures), bytes)) + } +} + +impl ToBytes for BlockWithSignatures { + fn to_bytes(&self) -> Result, crate::bytesrepr::Error> { + let mut buf = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn write_bytes(&self, bytes: &mut Vec) -> Result<(), crate::bytesrepr::Error> { + self.block.write_bytes(bytes)?; + self.block_signatures.write_bytes(bytes)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.block.serialized_length() + self.block_signatures.serialized_length() + } +} + +impl Display for BlockWithSignatures { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!( + f, + "block #{}, {}, with {} block signatures", + self.block.height(), + self.block.hash(), + self.block_signatures.len() + ) + } +} diff --git a/types/src/block/chain_name_digest.rs b/types/src/block/chain_name_digest.rs new file mode 100644 index 0000000000..1dd9a70fe7 --- /dev/null +++ b/types/src/block/chain_name_digest.rs @@ -0,0 +1,98 @@ +use core::fmt::{self, Display, Formatter}; + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// A cryptographic hash of a chain name. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded cryptographic hash of a chain name.") +)] +#[serde(deny_unknown_fields)] +pub struct ChainNameDigest(Digest); + +impl ChainNameDigest { + /// The number of bytes in a `ChainNameDigest` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `ChainNameDigest` from the given chain name. + pub fn from_chain_name(name: &str) -> Self { + ChainNameDigest(Digest::hash(name.as_bytes())) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `ChainNameDigest` directly initialized with the provided `Digest`; + /// no hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_digest(digest: Digest) -> Self { + ChainNameDigest(digest) + } + + /// Returns a random `ChainNameDigest`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + ChainNameDigest(hash) + } +} + +impl Display for ChainNameDigest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "chain-name-hash({})", self.0) + } +} + +impl ToBytes for ChainNameDigest { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for ChainNameDigest { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (Self(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = ChainNameDigest::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/block/era_end.rs b/types/src/block/era_end.rs new file mode 100644 index 0000000000..1bb853d154 --- /dev/null +++ b/types/src/block/era_end.rs @@ -0,0 +1,133 @@ +mod era_end_v1; +mod era_end_v2; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + PublicKey, Rewards, U512, +}; +pub use era_end_v1::{EraEndV1, EraReport}; +pub use era_end_v2::EraEndV2; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for block body v1. +pub const ERA_END_V1_TAG: u8 = 0; +/// Tag for block body v2. +pub const ERA_END_V2_TAG: u8 = 1; + +/// The versioned era end of a block, storing the data for a switch block. +/// It encapsulates different variants of the EraEnd struct. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(any(feature = "testing", test), derive(PartialEq))] +#[derive(Clone, Hash, Serialize, Deserialize, Debug)] +pub enum EraEnd { + /// The legacy, initial version of the body portion of a block. + V1(EraEndV1), + /// The version 2 of the body portion of a block, which includes the + /// `past_finality_signatures`. + V2(EraEndV2), +} + +impl EraEnd { + /// Returns the equivocators. + pub fn equivocators(&self) -> &[PublicKey] { + match self { + EraEnd::V1(v1) => v1.equivocators(), + EraEnd::V2(v2) => v2.equivocators(), + } + } + + /// Returns the inactive validators. + pub fn inactive_validators(&self) -> &[PublicKey] { + match self { + EraEnd::V1(v1) => v1.inactive_validators(), + EraEnd::V2(v2) => v2.inactive_validators(), + } + } + + /// Returns the weights of validators in the upcoming era. + pub fn next_era_validator_weights(&self) -> &BTreeMap { + match self { + EraEnd::V1(v1) => v1.next_era_validator_weights(), + EraEnd::V2(v2) => v2.next_era_validator_weights(), + } + } + + /// Returns the rewards. + pub fn rewards(&self) -> Rewards { + match self { + EraEnd::V1(v1) => Rewards::V1(v1.rewards()), + EraEnd::V2(v2) => Rewards::V2(v2.rewards()), + } + } +} + +impl Display for EraEnd { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + EraEnd::V1(v1) => Display::fmt(&v1, formatter), + EraEnd::V2(v2) => Display::fmt(&v2, formatter), + } + } +} + +impl From for EraEnd { + fn from(era_end: EraEndV1) -> Self { + EraEnd::V1(era_end) + } +} + +impl From for EraEnd { + fn from(era_end: EraEndV2) -> Self { + EraEnd::V2(era_end) + } +} + +impl ToBytes for EraEnd { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + EraEnd::V1(v1) => { + buffer.insert(0, ERA_END_V1_TAG); + buffer.extend(v1.to_bytes()?); + } + EraEnd::V2(v2) => { + buffer.insert(0, ERA_END_V2_TAG); + buffer.extend(v2.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + EraEnd::V1(v1) => v1.serialized_length(), + EraEnd::V2(v2) => v2.serialized_length(), + } + } +} + +impl FromBytes for EraEnd { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + ERA_END_V1_TAG => { + let (body, remainder): (EraEndV1, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V1(body), remainder)) + } + ERA_END_V2_TAG => { + let (body, remainder): (EraEndV2, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::V2(body), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} diff --git a/types/src/block/era_end/era_end_v1.rs b/types/src/block/era_end/era_end_v1.rs new file mode 100644 index 0000000000..d93488f059 --- /dev/null +++ b/types/src/block/era_end/era_end_v1.rs @@ -0,0 +1,163 @@ +mod era_report; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::SecretKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + PublicKey, U512, +}; +pub use era_report::EraReport; + +#[cfg(feature = "json-schema")] +static ERA_END_V1: Lazy = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let next_era_validator_weights = { + let mut next_era_validator_weights: BTreeMap = BTreeMap::new(); + next_era_validator_weights.insert(public_key_1, U512::from(123)); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(456), + ); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(789), + ); + next_era_validator_weights + }; + + let era_report = EraReport::example().clone(); + EraEndV1::new(era_report, next_era_validator_weights) +}); + +/// Information related to the end of an era, and validator weights for the following era. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EraEndV1 { + /// Equivocation, reward and validator inactivity information. + pub(super) era_report: EraReport, + /// The validators for the upcoming era and their respective weights. + #[serde(with = "BTreeMapToArray::")] + pub(super) next_era_validator_weights: BTreeMap, +} + +impl EraEndV1 { + /// Returns equivocation, reward and validator inactivity information. + pub fn era_report(&self) -> &EraReport { + &self.era_report + } + + /// Retrieves the deploy hashes within the block. + pub fn equivocators(&self) -> &[PublicKey] { + self.era_report.equivocators() + } + + /// Retrieves the transfer hashes within the block. + pub fn inactive_validators(&self) -> &[PublicKey] { + self.era_report.inactive_validators() + } + + /// Returns rewards for finalization of earlier blocks. + pub fn rewards(&self) -> &BTreeMap { + self.era_report.rewards() + } + + /// Returns the validators for the upcoming era and their respective weights. + pub fn next_era_validator_weights(&self) -> &BTreeMap { + &self.next_era_validator_weights + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new( + era_report: EraReport, + next_era_validator_weights: BTreeMap, + ) -> Self { + EraEndV1 { + era_report, + next_era_validator_weights, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ERA_END_V1 + } +} + +impl ToBytes for EraEndV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.era_report.write_bytes(writer)?; + self.next_era_validator_weights.write_bytes(writer)?; + + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.era_report.serialized_length() + self.next_era_validator_weights.serialized_length() + } +} + +impl FromBytes for EraEndV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (era_report, remainder) = EraReport::::from_bytes(bytes)?; + let (next_era_validator_weights, remainder) = + BTreeMap::::from_bytes(remainder)?; + let era_end = EraEndV1 { + era_report, + next_era_validator_weights, + }; + Ok((era_end, remainder)) + } +} + +impl Display for EraEndV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "era end: {} ", self.era_report) + } +} + +struct NextEraValidatorLabels; + +impl KeyValueLabels for NextEraValidatorLabels { + const KEY: &'static str = "validator"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for NextEraValidatorLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("ValidatorWeight"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with its weight, i.e. the total number of \ + motes staked by it and its delegators.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The validator's weight."); +} diff --git a/types/src/block/era_end/era_end_v1/era_report.rs b/types/src/block/era_end/era_end_v1/era_report.rs new file mode 100644 index 0000000000..af63359e41 --- /dev/null +++ b/types/src/block/era_end/era_end_v1/era_report.rs @@ -0,0 +1,252 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(feature = "testing", test))] +use core::iter; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "json-schema")] +use crate::SecretKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, DisplayIter, PublicKey, +}; + +#[cfg(feature = "json-schema")] +static ERA_REPORT: Lazy> = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let equivocators = vec![public_key_1]; + + let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap(); + let public_key_3 = PublicKey::from(&secret_key_3); + let inactive_validators = vec![public_key_3]; + + let rewards = BTreeMap::new(); + + EraReport { + equivocators, + rewards, + inactive_validators, + } +}); + +/// Equivocation, reward and validator inactivity information. +/// +/// `VID` represents validator ID type, generally [`PublicKey`]. +#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(bound( + serialize = "VID: Ord + Serialize", + deserialize = "VID: Ord + Deserialize<'de>", +))] +#[cfg_attr( + feature = "json-schema", + schemars(description = "Equivocation, reward and validator inactivity information.") +)] +pub struct EraReport { + /// The set of equivocators. + pub(super) equivocators: Vec, + /// Rewards for finalization of earlier blocks. + #[serde(with = "BTreeMapToArray::")] + pub(super) rewards: BTreeMap, + /// Validators that haven't produced any unit during the era. + pub(super) inactive_validators: Vec, +} + +impl EraReport { + /// Constructs a new `EraReport`. + pub fn new( + equivocators: Vec, + rewards: BTreeMap, + inactive_validators: Vec, + ) -> Self { + EraReport { + equivocators, + rewards, + inactive_validators, + } + } + + /// Returns the set of equivocators. + pub fn equivocators(&self) -> &[VID] { + &self.equivocators + } + + /// Returns rewards for finalization of earlier blocks. + /// + /// This is a measure of the value of each validator's contribution to consensus, in + /// fractions of the configured maximum block reward. + pub fn rewards(&self) -> &BTreeMap { + &self.rewards + } + + /// Returns validators that haven't produced any unit during the era. + pub fn inactive_validators(&self) -> &[VID] { + &self.inactive_validators + } + + /// Returns a cryptographic hash of the `EraReport`. + pub fn hash(&self) -> Digest + where + VID: ToBytes, + { + // Helper function to hash slice of validators + fn hash_slice_of_validators(slice_of_validators: &[VID]) -> Digest + where + VID: ToBytes, + { + Digest::hash_merkle_tree(slice_of_validators.iter().map(|validator| { + Digest::hash(validator.to_bytes().expect("Could not serialize validator")) + })) + } + + // Pattern match here leverages compiler to ensure every field is accounted for + let EraReport { + equivocators, + inactive_validators, + rewards, + } = self; + + let hashed_equivocators = hash_slice_of_validators(equivocators); + let hashed_inactive_validators = hash_slice_of_validators(inactive_validators); + let hashed_rewards = Digest::hash_btree_map(rewards).expect("Could not hash rewards"); + + Digest::hash_slice_rfold(&[ + hashed_equivocators, + hashed_rewards, + hashed_inactive_validators, + ]) + } +} + +impl Default for EraReport { + fn default() -> Self { + EraReport { + equivocators: vec![], + rewards: BTreeMap::new(), + inactive_validators: vec![], + } + } +} + +impl Display for EraReport { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let slashings = DisplayIter::new(&self.equivocators); + let rewards = DisplayIter::new( + self.rewards + .iter() + .map(|(public_key, amount)| format!("{}: {}", public_key, amount)), + ); + write!(f, "era end: slash {}, reward {}", slashings, rewards) + } +} + +impl ToBytes for EraReport { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.equivocators.write_bytes(writer)?; + self.rewards.write_bytes(writer)?; + self.inactive_validators.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.equivocators.serialized_length() + + self.rewards.serialized_length() + + self.inactive_validators.serialized_length() + } +} + +impl FromBytes for EraReport { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (equivocators, remainder) = Vec::::from_bytes(bytes)?; + let (rewards, remainder) = BTreeMap::::from_bytes(remainder)?; + let (inactive_validators, remainder) = Vec::::from_bytes(remainder)?; + let era_report = EraReport { + equivocators, + rewards, + inactive_validators, + }; + Ok((era_report, remainder)) + } +} + +impl EraReport { + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ERA_REPORT + } + + /// Returns a random `EraReport`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + let equivocators_count = rng.gen_range(0..5); + let rewards_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let equivocators = iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + let rewards = iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let reward = rng.gen_range(1..(1_000_000_000 + 1)); + (pub_key, reward) + }) + .take(rewards_count) + .collect(); + let inactive_validators = iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + EraReport::new(equivocators, rewards, inactive_validators) + } +} + +struct EraRewardsLabels; + +impl KeyValueLabels for EraRewardsLabels { + const KEY: &'static str = "validator"; + const VALUE: &'static str = "amount"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for EraRewardsLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("EraReward"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with a measure of the value of its \ + contribution to consensus, as a fraction of the configured maximum block reward.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The reward amount."); +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let era_report = EraReport::random(rng); + bytesrepr::test_serialization_roundtrip(&era_report); + } +} diff --git a/types/src/block/era_end/era_end_v2.rs b/types/src/block/era_end/era_end_v2.rs new file mode 100644 index 0000000000..e60f6f31ee --- /dev/null +++ b/types/src/block/era_end/era_end_v2.rs @@ -0,0 +1,268 @@ +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::SecretKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + DisplayIter, PublicKey, U512, +}; + +#[cfg(feature = "json-schema")] +static ERA_END_V2: Lazy = Lazy::new(|| { + let secret_key_1 = SecretKey::ed25519_from_bytes([0; 32]).unwrap(); + let public_key_1 = PublicKey::from(&secret_key_1); + let secret_key_3 = SecretKey::ed25519_from_bytes([2; 32]).unwrap(); + let public_key_3 = PublicKey::from(&secret_key_3); + + let equivocators = vec![public_key_1.clone()]; + let inactive_validators = vec![public_key_3]; + let next_era_validator_weights = { + let mut next_era_validator_weights: BTreeMap = BTreeMap::new(); + next_era_validator_weights.insert(public_key_1, U512::from(123)); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([5u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(456), + ); + next_era_validator_weights.insert( + PublicKey::from( + &SecretKey::ed25519_from_bytes([6u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + U512::from(789), + ); + next_era_validator_weights + }; + let rewards = Default::default(); + + EraEndV2::new( + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + 1u8, + ) +}); + +/// Information related to the end of an era, and validator weights for the following era. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EraEndV2 { + /// The set of equivocators. + pub(super) equivocators: Vec, + /// Validators that haven't produced any unit during the era. + pub(super) inactive_validators: Vec, + /// The validators for the upcoming era and their respective weights. + #[serde(with = "BTreeMapToArray::")] + pub(super) next_era_validator_weights: BTreeMap, + /// The rewards distributed to the validators. + pub(super) rewards: BTreeMap>, + pub(super) next_era_gas_price: u8, +} + +impl EraEndV2 { + /// Returns the set of equivocators. + pub fn equivocators(&self) -> &[PublicKey] { + &self.equivocators + } + + /// Returns the validators that haven't produced any unit during the era. + pub fn inactive_validators(&self) -> &[PublicKey] { + &self.inactive_validators + } + + /// Returns the validators for the upcoming era and their respective weights. + pub fn next_era_validator_weights(&self) -> &BTreeMap { + &self.next_era_validator_weights + } + + /// Returns the rewards distributed to the validators. + pub fn rewards(&self) -> &BTreeMap> { + &self.rewards + } + + /// Returns the next era gas price. + pub fn next_era_gas_price(&self) -> u8 { + self.next_era_gas_price + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn new( + equivocators: Vec, + inactive_validators: Vec, + next_era_validator_weights: BTreeMap, + rewards: BTreeMap>, + next_era_gas_price: u8, + ) -> Self { + EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + next_era_gas_price, + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ERA_END_V2 + } + + /// Returns a random `EraReport`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut crate::testing::TestRng) -> Self { + use rand::Rng; + + let equivocators_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let next_era_validator_weights_count = rng.gen_range(0..5); + let rewards_count = rng.gen_range(0..5); + + let equivocators = core::iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + + let inactive_validators = core::iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + + let next_era_validator_weights = core::iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let reward = rng.gen_range(1..=1_000_000_000); + (pub_key, U512::from(reward)) + }) + .take(next_era_validator_weights_count) + .collect(); + + let rewards = core::iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let mut rewards = vec![U512::from(rng.gen_range(1..=1_000_000_000 + 1))]; + if rng.gen_bool(0.2) { + rewards.push(U512::from(rng.gen_range(1..=1_000_000_000 + 1))); + }; + (pub_key, rewards) + }) + .take(rewards_count) + .collect(); + + Self::new( + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + 1u8, + ) + } +} + +impl ToBytes for EraEndV2 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + next_era_gas_price, + } = self; + + equivocators.write_bytes(writer)?; + inactive_validators.write_bytes(writer)?; + next_era_validator_weights.write_bytes(writer)?; + rewards.write_bytes(writer)?; + next_era_gas_price.write_bytes(writer)?; + + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + next_era_gas_price, + } = self; + + equivocators.serialized_length() + + inactive_validators.serialized_length() + + next_era_validator_weights.serialized_length() + + rewards.serialized_length() + + next_era_gas_price.serialized_length() + } +} + +impl FromBytes for EraEndV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (equivocators, bytes) = Vec::from_bytes(bytes)?; + let (inactive_validators, bytes) = Vec::from_bytes(bytes)?; + let (next_era_validator_weights, bytes) = BTreeMap::from_bytes(bytes)?; + let (rewards, bytes) = BTreeMap::from_bytes(bytes)?; + let (next_era_gas_price, bytes) = u8::from_bytes(bytes)?; + let era_end = EraEndV2 { + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + next_era_gas_price, + }; + + Ok((era_end, bytes)) + } +} + +impl fmt::Display for EraEndV2 { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let slashings = DisplayIter::new(&self.equivocators); + let rewards = DisplayIter::new( + self.rewards + .iter() + .map(|(public_key, amounts)| format!("{}: {:?}", public_key, amounts)), + ); + + write!( + formatter, + "era end: slash {}, reward {}", + slashings, rewards + ) + } +} + +struct NextEraValidatorLabels; + +impl KeyValueLabels for NextEraValidatorLabels { + const KEY: &'static str = "validator"; + const VALUE: &'static str = "weight"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for NextEraValidatorLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("ValidatorWeight"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with its weight, i.e. the total number of \ + motes staked by it and its delegators.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The validator's weight."); +} diff --git a/types/src/block/finality_signature.rs b/types/src/block/finality_signature.rs new file mode 100644 index 0000000000..4b908db3f7 --- /dev/null +++ b/types/src/block/finality_signature.rs @@ -0,0 +1,137 @@ +mod finality_signature_v1; +mod finality_signature_v2; + +pub use finality_signature_v1::FinalitySignatureV1; +pub use finality_signature_v2::FinalitySignatureV2; + +use core::{ + fmt::{self, Display, Formatter}, + hash::Hash, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{crypto, BlockHash, EraId, PublicKey, Signature}; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, ChainNameDigest}; + +/// A validator's signature of a block, confirming it is finalized. +/// +/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault +/// tolerance threshold before accepting the block as finalized. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "A validator's signature of a block, confirming it is finalized.") +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum FinalitySignature { + /// Version 1 of the finality signature. + V1(FinalitySignatureV1), + /// Version 2 of the finality signature. + V2(FinalitySignatureV2), +} + +impl FinalitySignature { + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + match self { + FinalitySignature::V1(fs) => fs.block_hash(), + FinalitySignature::V2(fs) => fs.block_hash(), + } + } + + /// Returns the era in which the associated block was created. + pub fn era_id(&self) -> EraId { + match self { + FinalitySignature::V1(fs) => fs.era_id(), + FinalitySignature::V2(fs) => fs.era_id(), + } + } + + /// Returns the public key of the signing validator. + pub fn public_key(&self) -> &PublicKey { + match self { + FinalitySignature::V1(fs) => fs.public_key(), + FinalitySignature::V2(fs) => fs.public_key(), + } + } + + /// Returns the signature over the block hash of the associated block. + pub fn signature(&self) -> &Signature { + match self { + FinalitySignature::V1(fs) => fs.signature(), + FinalitySignature::V2(fs) => fs.signature(), + } + } + + /// Returns `Ok` if the signature is cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + match self { + FinalitySignature::V1(fs) => fs.is_verified(), + FinalitySignature::V2(fs) => fs.is_verified(), + } + } + + /// Returns a random `FinalitySignature`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let block_hash = BlockHash::random(rng); + let block_height = rng.gen(); + let era_id = EraId::random(rng); + let chain_name_hash = ChainNameDigest::random(rng); + Self::random_for_block(block_hash, block_height, era_id, chain_name_hash, rng) + } + + /// Returns a random `FinalitySignature` for the provided `block_hash` and `era_id`. + #[cfg(any(feature = "testing", test))] + pub fn random_for_block( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, + rng: &mut TestRng, + ) -> Self { + if rng.gen_bool(0.5) { + FinalitySignature::V1(FinalitySignatureV1::random_for_block( + block_hash, era_id, rng, + )) + } else { + FinalitySignature::V2(FinalitySignatureV2::random_for_block( + block_hash, + block_height, + era_id, + chain_name_hash, + rng, + )) + } + } +} + +impl From for FinalitySignature { + fn from(fs: FinalitySignatureV1) -> Self { + FinalitySignature::V1(fs) + } +} + +impl From for FinalitySignature { + fn from(fs: FinalitySignatureV2) -> Self { + FinalitySignature::V2(fs) + } +} + +impl Display for FinalitySignature { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FinalitySignature::V1(fs) => write!(f, "{}", fs), + FinalitySignature::V2(fs) => write!(f, "{}", fs), + } + } +} diff --git a/types/src/block/finality_signature/finality_signature_v1.rs b/types/src/block/finality_signature/finality_signature_v1.rs new file mode 100644 index 0000000000..5fe272eb58 --- /dev/null +++ b/types/src/block/finality_signature/finality_signature_v1.rs @@ -0,0 +1,265 @@ +use alloc::vec::Vec; +use core::{ + cmp::Ordering, + fmt::{self, Display, Formatter}, + hash::{Hash, Hasher}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{crypto, BlockHash, EraId, PublicKey, SecretKey, Signature}; + +/// A validator's signature of a block, confirming it is finalized. +/// +/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault +/// tolerance threshold before accepting the block as finalized. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "A validator's signature of a block, confirming it is finalized.") +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalitySignatureV1 { + /// The block hash of the associated block. + pub(crate) block_hash: BlockHash, + /// The era in which the associated block was created. + pub(crate) era_id: EraId, + /// The signature over the block hash of the associated block. + pub(crate) signature: Signature, + /// The public key of the signing validator. + pub(crate) public_key: PublicKey, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(crate) is_verified: OnceCell>, +} + +impl FinalitySignatureV1 { + /// Constructs a new `FinalitySignatureV1`. + pub fn create(block_hash: BlockHash, era_id: EraId, secret_key: &SecretKey) -> Self { + let bytes = Self::bytes_to_sign(&block_hash, era_id); + let public_key = PublicKey::from(secret_key); + let signature = crypto::sign(bytes, secret_key, &public_key); + FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::with_value(Ok(())), + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the era in which the associated block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the signature over the block hash of the associated block. + pub fn signature(&self) -> &Signature { + &self.signature + } + + /// Returns the public key of the signing validator. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns `Ok` if the signature is cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + #[cfg(any(feature = "once_cell", test))] + return self.is_verified.get_or_init(|| self.verify()).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + self.verify() + } + + /// Constructs a new `FinalitySignatureV1`. + #[cfg(any(feature = "testing", test))] + pub fn new( + block_hash: BlockHash, + era_id: EraId, + signature: Signature, + public_key: PublicKey, + ) -> Self { + FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + } + } + + /// Returns a random `FinalitySignatureV1`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + FinalitySignatureV1::random_for_block(BlockHash::random(rng), EraId::random(rng), rng) + } + + /// Returns a random `FinalitySignatureV1` for the provided `block_hash` and `era_id`. + #[cfg(any(feature = "testing", test))] + pub fn random_for_block(block_hash: BlockHash, era_id: EraId, rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + FinalitySignatureV1::create(block_hash, era_id, &secret_key) + } + + fn bytes_to_sign(block_hash: &BlockHash, era_id: EraId) -> Vec { + let mut bytes = block_hash.inner().into_vec(); + bytes.extend_from_slice(&era_id.to_le_bytes()); + bytes + } + + fn verify(&self) -> Result<(), crypto::Error> { + let bytes = Self::bytes_to_sign(&self.block_hash, self.era_id); + crypto::verify(bytes, &self.signature, &self.public_key) + } +} + +impl Hash for FinalitySignatureV1 { + fn hash(&self, state: &mut H) { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + } = self; + block_hash.hash(state); + era_id.hash(state); + signature.hash(state); + public_key.hash(state); + is_verified.hash(state); + } +} + +impl PartialEq for FinalitySignatureV1 { + fn eq(&self, other: &FinalitySignatureV1) -> bool { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + } = self; + *block_hash == other.block_hash + && *era_id == other.era_id + && *signature == other.signature + && *public_key == other.public_key + && is_verified == other.is_verified().is_ok() + } +} + +impl Ord for FinalitySignatureV1 { + fn cmp(&self, other: &FinalitySignatureV1) -> Ordering { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignatureV1 { + block_hash, + era_id, + signature, + public_key, + } = self; + block_hash + .cmp(&other.block_hash) + .then_with(|| era_id.cmp(&other.era_id)) + .then_with(|| signature.cmp(&other.signature)) + .then_with(|| public_key.cmp(&other.public_key)) + .then_with(|| is_verified.cmp(&other.is_verified().is_ok())) + } +} + +impl PartialOrd for FinalitySignatureV1 { + fn partial_cmp(&self, other: &FinalitySignatureV1) -> Option { + Some(self.cmp(other)) + } +} + +impl Display for FinalitySignatureV1 { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "finality signature for {}, from {}", + self.block_hash, self.public_key + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::TestBlockBuilder; + + #[test] + fn finality_signature() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + // Signature should be over both block hash and era id. + let secret_key = SecretKey::random(rng); + let public_key = PublicKey::from(&secret_key); + let era_id = EraId::from(1); + let finality_signature = FinalitySignatureV1::create(*block.hash(), era_id, &secret_key); + finality_signature.is_verified().unwrap(); + let signature = finality_signature.signature; + // Verify that signature includes era id. + let invalid_finality_signature = FinalitySignatureV1 { + block_hash: *block.hash(), + era_id: EraId::from(2), + signature, + public_key, + is_verified: OnceCell::new(), + }; + // Test should fail b/c `signature` is over `era_id=1` and here we're using `era_id=2`. + assert!(invalid_finality_signature.is_verified().is_err()); + } +} diff --git a/types/src/block/finality_signature/finality_signature_v2.rs b/types/src/block/finality_signature/finality_signature_v2.rs new file mode 100644 index 0000000000..79fe48ec65 --- /dev/null +++ b/types/src/block/finality_signature/finality_signature_v2.rs @@ -0,0 +1,360 @@ +use alloc::vec::Vec; +use core::{ + cmp::Ordering, + fmt::{self, Display, Formatter}, + hash::{Hash, Hasher}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{crypto, BlockHash, ChainNameDigest, EraId, PublicKey, SecretKey, Signature}; + +/// A validator's signature of a block, confirming it is finalized. +/// +/// Clients and joining nodes should wait until the signers' combined weight exceeds the fault +/// tolerance threshold before accepting the block as finalized. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "A validator's signature of a block, confirming it is finalized.") +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalitySignatureV2 { + /// The block hash of the associated block. + pub(crate) block_hash: BlockHash, + /// The height of the associated block. + pub(crate) block_height: u64, + /// The era in which the associated block was created. + pub(crate) era_id: EraId, + /// The hash of the chain name of the associated block. + pub(crate) chain_name_hash: ChainNameDigest, + /// The signature over the block hash of the associated block. + pub(crate) signature: Signature, + /// The public key of the signing validator. + pub(crate) public_key: PublicKey, + #[serde(skip)] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + pub(crate) is_verified: OnceCell>, +} + +impl FinalitySignatureV2 { + /// Constructs a new `FinalitySignatureV2`. + pub fn create( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, + secret_key: &SecretKey, + ) -> Self { + let bytes = Self::bytes_to_sign(block_hash, block_height, era_id, chain_name_hash); + let public_key = PublicKey::from(secret_key); + let signature = crypto::sign(bytes, secret_key, &public_key); + FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::with_value(Ok(())), + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the height of the associated block. + pub fn block_height(&self) -> u64 { + self.block_height + } + + /// Returns the era in which the associated block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the hash of the chain name of the associated block. + pub fn chain_name_hash(&self) -> ChainNameDigest { + self.chain_name_hash + } + + /// Returns the signature over the block hash of the associated block. + pub fn signature(&self) -> &Signature { + &self.signature + } + + /// Returns the public key of the signing validator. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } + + /// Returns `Ok` if the signature is cryptographically valid. + pub fn is_verified(&self) -> Result<(), crypto::Error> { + #[cfg(any(feature = "once_cell", test))] + return self.is_verified.get_or_init(|| self.verify()).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + self.verify() + } + + /// Constructs a new `FinalitySignatureV2`. + #[cfg(any(feature = "testing", test))] + pub fn new( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, + signature: Signature, + public_key: PublicKey, + ) -> Self { + FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + } + } + + /// Returns a random `FinalitySignatureV2`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + FinalitySignatureV2::random_for_block( + BlockHash::random(rng), + rng.gen(), + EraId::random(rng), + ChainNameDigest::random(rng), + rng, + ) + } + + /// Returns a random `FinalitySignatureV2` for the provided `block_hash`, `block_height`, + /// `era_id`, and `chain_name_hash`. + #[cfg(any(feature = "testing", test))] + pub fn random_for_block( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, + rng: &mut TestRng, + ) -> Self { + let secret_key = SecretKey::random(rng); + FinalitySignatureV2::create( + block_hash, + block_height, + era_id, + chain_name_hash, + &secret_key, + ) + } + + fn bytes_to_sign( + block_hash: BlockHash, + block_height: u64, + era_id: EraId, + chain_name_hash: ChainNameDigest, + ) -> Vec { + let mut bytes = block_hash.inner().into_vec(); + bytes.extend_from_slice(&block_height.to_le_bytes()); + bytes.extend_from_slice(&era_id.to_le_bytes()); + bytes.extend_from_slice(chain_name_hash.inner().as_ref()); + bytes + } + + fn verify(&self) -> Result<(), crypto::Error> { + let bytes = Self::bytes_to_sign( + self.block_hash, + self.block_height, + self.era_id, + self.chain_name_hash, + ); + crypto::verify(bytes, &self.signature, &self.public_key) + } +} + +impl Hash for FinalitySignatureV2 { + fn hash(&self, state: &mut H) { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + } = self; + block_hash.hash(state); + block_height.hash(state); + era_id.hash(state); + chain_name_hash.hash(state); + signature.hash(state); + public_key.hash(state); + is_verified.hash(state); + } +} + +impl PartialEq for FinalitySignatureV2 { + fn eq(&self, other: &FinalitySignatureV2) -> bool { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + } = self; + *block_hash == other.block_hash + && *block_height == other.block_height + && *era_id == other.era_id + && *chain_name_hash == other.chain_name_hash + && *signature == other.signature + && *public_key == other.public_key + && is_verified == other.is_verified().is_ok() + } +} + +impl Ord for FinalitySignatureV2 { + fn cmp(&self, other: &FinalitySignatureV2) -> Ordering { + // Ensure we initialize self.is_verified field. + let is_verified = self.is_verified().is_ok(); + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + is_verified: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let FinalitySignatureV2 { + block_hash, + block_height, + era_id, + chain_name_hash, + signature, + public_key, + } = self; + block_hash + .cmp(&other.block_hash) + .then_with(|| block_height.cmp(&other.block_height)) + .then_with(|| era_id.cmp(&other.era_id)) + .then_with(|| chain_name_hash.cmp(&other.chain_name_hash)) + .then_with(|| signature.cmp(&other.signature)) + .then_with(|| public_key.cmp(&other.public_key)) + .then_with(|| is_verified.cmp(&other.is_verified().is_ok())) + } +} + +impl PartialOrd for FinalitySignatureV2 { + fn partial_cmp(&self, other: &FinalitySignatureV2) -> Option { + Some(self.cmp(other)) + } +} + +impl Display for FinalitySignatureV2 { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "finality signature for {}, from {}", + self.block_hash, self.public_key + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::TestBlockBuilder; + + #[test] + fn finality_signature() { + let rng = &mut TestRng::new(); + let block = TestBlockBuilder::new().build(rng); + // Signature should be over block hash, block height, era id and chain name hash. + let secret_key = SecretKey::random(rng); + let era_id = EraId::from(1); + let chain_name_hash = ChainNameDigest::from_chain_name("example"); + let finality_signature = FinalitySignatureV2::create( + *block.hash(), + block.height(), + era_id, + chain_name_hash, + &secret_key, + ); + finality_signature + .is_verified() + .expect("should have verified"); + // Verify that changing era causes verification to fail. + let invalid_finality_signature = FinalitySignatureV2 { + era_id: EraId::from(2), + is_verified: OnceCell::new(), + ..finality_signature.clone() + }; + assert!(invalid_finality_signature.is_verified().is_err()); + // Verify that changing block height causes verification to fail. + let invalid_finality_signature = FinalitySignatureV2 { + block_height: block.height() + 1, + is_verified: OnceCell::new(), + ..finality_signature.clone() + }; + assert!(invalid_finality_signature.is_verified().is_err()); + // Verify that changing chain name hash causes verification to fail. + let invalid_finality_signature = FinalitySignatureV2 { + chain_name_hash: ChainNameDigest::from_chain_name("different"), + is_verified: OnceCell::new(), + ..finality_signature + }; + assert!(invalid_finality_signature.is_verified().is_err()); + } +} diff --git a/types/src/block/finality_signature_id.rs b/types/src/block/finality_signature_id.rs new file mode 100644 index 0000000000..211071e2ef --- /dev/null +++ b/types/src/block/finality_signature_id.rs @@ -0,0 +1,55 @@ +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use super::BlockHash; +#[cfg(doc)] +use super::FinalitySignature; +use crate::{EraId, PublicKey}; + +/// An identifier for a [`FinalitySignature`]. +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct FinalitySignatureId { + block_hash: BlockHash, + era_id: EraId, + public_key: PublicKey, +} + +impl FinalitySignatureId { + /// Returns a new `FinalitySignatureId`. + pub fn new(block_hash: BlockHash, era_id: EraId, public_key: PublicKey) -> Self { + FinalitySignatureId { + block_hash, + era_id, + public_key, + } + } + + /// Returns the block hash of the associated block. + pub fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + + /// Returns the era in which the associated block was created. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Returns the public key of the signing validator. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } +} + +impl Display for FinalitySignatureId { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "finality signature id for {}, from {}", + self.block_hash, self.public_key + ) + } +} diff --git a/types/src/block/json_compatibility.rs b/types/src/block/json_compatibility.rs new file mode 100644 index 0000000000..1c25637631 --- /dev/null +++ b/types/src/block/json_compatibility.rs @@ -0,0 +1,8 @@ +//! This module provides types primarily to support converting instances of `BTreeMap` into +//! `Vec<(K, V)>` or similar, in order to allow these types to be able to be converted to and from +//! JSON, and to allow for the production of a static schema for them. + +#![cfg(all(feature = "std", feature = "json-schema"))] +mod json_block_with_signatures; + +pub use json_block_with_signatures::JsonBlockWithSignatures; diff --git a/types/src/block/json_compatibility/json_block_with_signatures.rs b/types/src/block/json_compatibility/json_block_with_signatures.rs new file mode 100644 index 0000000000..8acde59535 --- /dev/null +++ b/types/src/block/json_compatibility/json_block_with_signatures.rs @@ -0,0 +1,113 @@ +use alloc::collections::BTreeMap; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_map_to_array::{BTreeMapToArray, KeyValueJsonSchema, KeyValueLabels}; + +use crate::{crypto, Block, BlockSignatures, BlockV2, PublicKey, SecretKey, Signature}; + +#[cfg(feature = "json-schema")] +static JSON_BLOCK_WITH_SIGNATURES: Lazy = Lazy::new(|| { + let block = BlockV2::example().clone(); + let secret_key = SecretKey::example(); + let public_key = PublicKey::from(secret_key); + let signature = crypto::sign(block.hash.inner(), secret_key, &public_key); + let mut proofs = BTreeMap::new(); + proofs.insert(public_key, signature); + + JsonBlockWithSignatures { + block: block.into(), + proofs, + } +}); + +/// A JSON-friendly representation of a block and the signatures for that block. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct JsonBlockWithSignatures { + /// The block. + pub block: Block, + /// The proofs of the block, i.e. a collection of validators' signatures of the block hash. + #[serde(with = "BTreeMapToArray::")] + pub proofs: BTreeMap, +} + +impl JsonBlockWithSignatures { + /// Constructs a new `JsonBlock`. + pub fn new(block: Block, maybe_signatures: Option) -> Self { + let proofs = maybe_signatures + .map(|signatures| signatures.into_proofs()) + .unwrap_or_default(); + + JsonBlockWithSignatures { block, proofs } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + pub fn example() -> &'static Self { + &JSON_BLOCK_WITH_SIGNATURES + } +} +struct BlockProofLabels; + +impl KeyValueLabels for BlockProofLabels { + const KEY: &'static str = "public_key"; + const VALUE: &'static str = "signature"; +} + +impl KeyValueJsonSchema for BlockProofLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("BlockProof"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = Some( + "A validator's public key paired with a corresponding signature of a given block hash.", + ); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = Some("The validator's public key."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The validator's signature."); +} + +#[cfg(test)] +mod tests { + use crate::{ + testing::TestRng, BlockSignaturesV1, BlockSignaturesV2, ChainNameDigest, TestBlockBuilder, + }; + + use super::*; + + #[test] + fn block_to_and_from_json_block_with_signatures_v1() { + let rng = &mut TestRng::new(); + let block: Block = TestBlockBuilder::new().build(rng).into(); + let empty_signatures = + BlockSignatures::V1(BlockSignaturesV1::new(*block.hash(), block.era_id())); + let json_block = JsonBlockWithSignatures::new(block.clone(), Some(empty_signatures)); + let recovered_block = Block::from(json_block); + assert_eq!(block, recovered_block); + } + + #[test] + fn block_to_and_from_json_block_with_signatures_v2() { + let rng = &mut TestRng::new(); + let block: Block = TestBlockBuilder::new().build(rng).into(); + let empty_signatures = BlockSignatures::V2(BlockSignaturesV2::new( + *block.hash(), + block.height(), + block.era_id(), + ChainNameDigest::random(rng), + )); + let json_block = JsonBlockWithSignatures::new(block.clone(), Some(empty_signatures)); + let recovered_block = Block::from(json_block); + assert_eq!(block, recovered_block); + } + + #[test] + fn json_block_roundtrip() { + let rng = &mut TestRng::new(); + let block: Block = TestBlockBuilder::new().build(rng).into(); + let json_string = serde_json::to_string_pretty(&block).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(block, decoded); + } +} diff --git a/types/src/block/rewarded_signatures.rs b/types/src/block/rewarded_signatures.rs new file mode 100644 index 0000000000..e483f95a38 --- /dev/null +++ b/types/src/block/rewarded_signatures.rs @@ -0,0 +1,474 @@ +use alloc::{collections::BTreeSet, vec::Vec}; + +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + PublicKey, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use serde::{Deserialize, Serialize}; +use tracing::error; + +/// Describes finality signatures that will be rewarded in a block. Consists of a vector of +/// `SingleBlockRewardedSignatures`, each of which describes signatures for a single ancestor +/// block. The first entry represents the signatures for the parent block, the second for the +/// parent of the parent, and so on. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct RewardedSignatures(Vec); + +/// List of identifiers for finality signatures for a particular past block. +/// +/// That past block height is current_height - signature_rewards_max_delay, the latter being defined +/// in the chainspec. +/// +/// We need to wait for a few blocks to pass (`signature_rewards_max_delay`) to store the finality +/// signers because we need a bit of time to get the block finality. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct SingleBlockRewardedSignatures(Vec); + +impl SingleBlockRewardedSignatures { + /// Creates a new set of recorded finality signaures from the era's validators + + /// the list of validators which signed. + pub fn from_validator_set<'a>( + public_keys: &BTreeSet, + all_validators: impl IntoIterator, + ) -> Self { + // Take the validators list + // Replace the ones who signed with 1 and the ones who didn't with 0 + // Pack everything into bytes + let result = Self::pack( + all_validators + .into_iter() + .map(|key| u8::from(public_keys.contains(key))), + ); + + let included_count: u32 = result.0.iter().map(|c| c.count_ones()).sum(); + if included_count as usize != public_keys.len() { + error!( + included_count, + expected_count = public_keys.len(), + "error creating past finality signatures from validator set" + ); + } + + result + } + + /// Gets the list of validators which signed from a set of recorded finality signaures (`self`) + /// + the era's validators. + pub fn to_validator_set( + &self, + all_validators: impl IntoIterator, + ) -> BTreeSet { + self.unpack() + .zip(all_validators) + .filter_map(|(active, validator)| (active != 0).then_some(validator)) + .collect() + } + + /// Packs the bits to bytes, to create a `PastFinalitySignature` + /// from an iterator of bits. + /// + /// If a value is neither 1 nor 0, it is interpreted as a 1. + #[doc(hidden)] + pub fn pack(bits: impl Iterator) -> Self { + //use itertools::Itertools; + + fn set_bit_at(value: u8, position: usize) -> u8 { + // Sanitize the value (must be 0 or 1): + let value = u8::from(value != 0); + + value << (7 - position) + } + + let inner = chunks_8(bits) + .map(|bits_chunk| { + bits_chunk + .enumerate() + .fold(0, |acc, (pos, value)| acc | set_bit_at(value, pos)) + }) + .collect(); + + SingleBlockRewardedSignatures(inner) + } + + /// Unpacks the bytes to bits, + /// to get a human readable representation of `PastFinalitySignature`. + #[doc(hidden)] + pub fn unpack(&self) -> impl Iterator + '_ { + // Returns the bit at the given position (0 or 1): + fn bit_at(byte: u8, position: u8) -> u8 { + (byte & (0b1000_0000 >> position)) >> (7 - position) + } + + self.0 + .iter() + .flat_map(|&byte| (0..8).map(move |i| bit_at(byte, i))) + } + + /// Calculates the set difference of two instances of `SingleBlockRewardedSignatures`. + #[doc(hidden)] + pub fn difference(mut self, other: &SingleBlockRewardedSignatures) -> Self { + for (self_byte, other_byte) in self.0.iter_mut().zip(other.0.iter()) { + *self_byte &= !other_byte; + } + self + } + + /// Calculates the set intersection of two instances of `SingleBlockRewardedSignatures`. + pub fn intersection(mut self, other: &SingleBlockRewardedSignatures) -> Self { + self.0 = self + .0 + .iter() + .zip(other.0.iter()) + .map(|(a, b)| *a & *b) + .collect(); + self + } + + /// Returns `true` if the set contains at least one signature. + pub fn has_some(&self) -> bool { + self.0.iter().any(|byte| *byte != 0) + } +} + +impl ToBytes for SingleBlockRewardedSignatures { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(Bytes::from(self.0.as_ref()).to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for SingleBlockRewardedSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, rest) = Bytes::from_bytes(bytes)?; + Ok((SingleBlockRewardedSignatures(inner.into()), rest)) + } +} + +impl RewardedSignatures { + /// Creates a new instance of `RewardedSignatures`. + pub fn new>( + single_block_signatures: I, + ) -> Self { + Self(single_block_signatures.into_iter().collect()) + } + + /// Creates an instance of `RewardedSignatures` based on its unpacked (one byte per validator) + /// representation. + pub fn pack(unpacked: Vec>) -> Self { + Self( + unpacked + .into_iter() + .map(|single_block_signatures| { + SingleBlockRewardedSignatures::pack(single_block_signatures.into_iter()) + }) + .collect(), + ) + } + + /// Creates an unpacked (one byte per validator) representation of the finality signatures to + /// be rewarded in this block. + pub fn unpack(&self) -> Vec> { + self.0 + .iter() + .map(|single_block_signatures| single_block_signatures.unpack().collect()) + .collect() + } + + /// Returns this instance of `RewardedSignatures` with `num_blocks` of empty signatures + /// prepended. + pub fn left_padded(self, num_blocks: usize) -> Self { + Self( + core::iter::repeat_with(SingleBlockRewardedSignatures::default) + .take(num_blocks) + .chain(self.0) + .collect(), + ) + } + + /// Calculates the set difference between two instances of `RewardedSignatures`. + pub fn difference(self, other: &RewardedSignatures) -> Self { + Self( + self.0 + .into_iter() + .zip(other.0.iter()) + .map(|(single_block_signatures, other_block_signatures)| { + single_block_signatures.difference(other_block_signatures) + }) + .collect(), + ) + } + + /// Calculates the set intersection between two instances of `RewardedSignatures`. + pub fn intersection(&self, other: &RewardedSignatures) -> Self { + Self( + self.0 + .iter() + .zip(other.0.iter()) + .map(|(single_block_signatures, other_block_signatures)| { + single_block_signatures + .clone() + .intersection(other_block_signatures) + }) + .collect(), + ) + } + + /// Iterates over the `SingleBlockRewardedSignatures` for each rewarded block. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Iterates over the `SingleBlockRewardedSignatures`, yielding the signatures together with + /// the block height for each entry. `block_height` is the height of the block that contains + /// this instance of `RewardedSignatures`. + pub fn iter_with_height( + &self, + block_height: u64, + ) -> impl Iterator { + self.0.iter().enumerate().map(move |(rel_height, sbrs)| { + ( + block_height + .saturating_sub(rel_height as u64) + .saturating_sub(1), + sbrs, + ) + }) + } + + /// Returns `true` if there is at least one cited signature. + pub fn has_some(&self) -> bool { + self.0.iter().any(|signatures| signatures.has_some()) + } +} + +pub(crate) static EMPTY: RewardedSignatures = RewardedSignatures(Vec::new()); + +impl ToBytes for RewardedSignatures { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for RewardedSignatures { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Vec::::from_bytes(bytes) + .map(|(inner, rest)| (RewardedSignatures(inner), rest)) + } +} + +/// Chunks an iterator over `u8`s into pieces of maximum size of 8. +fn chunks_8(bits: impl Iterator) -> impl Iterator> { + struct Chunks(B); + + struct Chunk { + values: [u8; 8], + index: usize, + max: usize, + } + + impl Iterator for Chunks + where + B: Iterator, + { + type Item = Chunk; + + fn next(&mut self) -> Option { + let mut values = [0; 8]; + let max = core::iter::zip(&mut values, &mut self.0) + .map(|(array_slot, value)| *array_slot = value) + .count(); + + (max != 0).then_some(Chunk { + values, + max, + index: 0, + }) + } + } + + impl Iterator for Chunk { + type Item = u8; + + fn next(&mut self) -> Option { + if self.index < self.max { + let n = self.values.get(self.index).cloned(); + self.index += 1; + n + } else { + None + } + } + } + + Chunks(bits) +} + +#[cfg(any(feature = "testing", test))] +impl SingleBlockRewardedSignatures { + /// Returns random data. + pub fn random(rng: &mut crate::testing::TestRng, n_validators: usize) -> Self { + let mut bytes = vec![0; (n_validators + 7) / 8]; + + rand::RngCore::fill_bytes(rng, bytes.as_mut()); + + SingleBlockRewardedSignatures(bytes) + } +} + +#[cfg(test)] +mod tests { + use super::{chunks_8, SingleBlockRewardedSignatures}; + use crate::{ + bytesrepr::{FromBytes, ToBytes}, + testing::TestRng, + PublicKey, + }; + use rand::{seq::IteratorRandom, Rng}; + use std::collections::BTreeSet; + + #[test] + fn empty_signatures() { + let rng = &mut TestRng::new(); + let validators: Vec<_> = std::iter::repeat_with(|| PublicKey::random(rng)) + .take(7) + .collect(); + let original_signed = BTreeSet::new(); + + let past_finality_signatures = + SingleBlockRewardedSignatures::from_validator_set(&original_signed, validators.iter()); + + assert_eq!(past_finality_signatures.0, &[0]); + + let signed = past_finality_signatures.to_validator_set(validators); + + assert_eq!(original_signed, signed); + } + + #[test] + fn from_and_to_methods_match_in_a_simple_case() { + let rng = &mut TestRng::new(); + let validators: Vec<_> = std::iter::repeat_with(|| PublicKey::random(rng)) + .take(11) + .collect(); + let signed = { + let mut signed = BTreeSet::new(); + signed.insert(validators[2].clone()); + signed.insert(validators[5].clone()); + signed.insert(validators[6].clone()); + signed.insert(validators[8].clone()); + signed.insert(validators[10].clone()); + signed + }; + + let past_finality_signatures = + SingleBlockRewardedSignatures::from_validator_set(&signed, validators.iter()); + + assert_eq!(past_finality_signatures.0, &[0b0010_0110, 0b1010_0000]); + + let signed_ = past_finality_signatures.to_validator_set(validators); + + assert_eq!(signed, signed_); + } + + #[test] + fn simple_serialization_roundtrip() { + let data = SingleBlockRewardedSignatures(vec![1, 2, 3, 4, 5]); + + let serialized = data.to_bytes().unwrap(); + assert_eq!(serialized.len(), data.0.len() + 4); + assert_eq!(data.serialized_length(), data.0.len() + 4); + + let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap(); + + assert_eq!(data, deserialized); + assert_eq!(rest, &[0u8; 0]); + } + + #[test] + fn serialization_roundtrip_of_empty_data() { + let data = SingleBlockRewardedSignatures::default(); + + let serialized = data.to_bytes().unwrap(); + assert_eq!(serialized, &[0; 4]); + assert_eq!(data.serialized_length(), 4); + + let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap(); + + assert_eq!(data, deserialized); + assert_eq!(rest, &[0u8; 0]); + } + + #[test] + fn serialization_roundtrip_of_random_data() { + let rng = &mut TestRng::new(); + let n_validators = rng.gen_range(50..200); + let all_validators: BTreeSet<_> = std::iter::repeat_with(|| PublicKey::random(rng)) + .take(n_validators) + .collect(); + let n_to_sign = rng.gen_range(0..all_validators.len()); + let public_keys = all_validators + .iter() + .cloned() + .choose_multiple(rng, n_to_sign) + .into_iter() + .collect(); + + let past_finality_signatures = + SingleBlockRewardedSignatures::from_validator_set(&public_keys, all_validators.iter()); + + let serialized = past_finality_signatures.to_bytes().unwrap(); + let (deserialized, rest) = SingleBlockRewardedSignatures::from_bytes(&serialized).unwrap(); + + assert_eq!(public_keys, deserialized.to_validator_set(all_validators)); + assert_eq!(rest, &[0u8; 0]); + } + + #[test] + fn chunk_iterator() { + fn v(maybe_chunk: Option>) -> Option> { + maybe_chunk.map(itertools::Itertools::collect_vec) + } + + // Empty chunks: + + let mut chunks = chunks_8(IntoIterator::into_iter([])); + + assert_eq!(v(chunks.next()), None); + + // Exact size chunk: + + let mut chunks = chunks_8(IntoIterator::into_iter([10, 11, 12, 13, 14, 15, 16, 17])); + + assert_eq!(v(chunks.next()), Some(vec![10, 11, 12, 13, 14, 15, 16, 17])); + assert_eq!(v(chunks.next()), None); + + // Chunks with a remainder: + + let mut chunks = chunks_8(IntoIterator::into_iter([ + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + ])); + + assert_eq!(v(chunks.next()), Some(vec![10, 11, 12, 13, 14, 15, 16, 17])); + assert_eq!(v(chunks.next()), Some(vec![18, 19, 20, 21, 22, 23, 24, 25])); + assert_eq!(v(chunks.next()), Some(vec![26])); + } +} diff --git a/types/src/block/rewards.rs b/types/src/block/rewards.rs new file mode 100644 index 0000000000..af46a28825 --- /dev/null +++ b/types/src/block/rewards.rs @@ -0,0 +1,12 @@ +use alloc::{collections::BTreeMap, vec::Vec}; + +use crate::{PublicKey, U512}; + +/// Rewards distributed to validators. +#[derive(Debug)] +pub enum Rewards<'a> { + /// Rewards for version 1, associate a ratio to each validator. + V1(&'a BTreeMap), + /// Rewards for version 1, associate a tokens amount to each validator. + V2(&'a BTreeMap>), +} diff --git a/types/src/block/test_block_builder.rs b/types/src/block/test_block_builder.rs new file mode 100644 index 0000000000..d514963781 --- /dev/null +++ b/types/src/block/test_block_builder.rs @@ -0,0 +1,5 @@ +mod test_block_v1_builder; +mod test_block_v2_builder; + +pub use test_block_v1_builder::TestBlockV1Builder; +pub use test_block_v2_builder::TestBlockV2Builder as TestBlockBuilder; diff --git a/types/src/block/test_block_builder/test_block_v1_builder.rs b/types/src/block/test_block_builder/test_block_v1_builder.rs new file mode 100644 index 0000000000..1a6b68a774 --- /dev/null +++ b/types/src/block/test_block_builder/test_block_v1_builder.rs @@ -0,0 +1,183 @@ +use std::iter; + +use rand::Rng; + +use crate::{testing::TestRng, Block, EraEndV1}; + +use crate::{ + system::auction::ValidatorWeights, BlockHash, BlockV1, Deploy, Digest, EraId, EraReport, + ProtocolVersion, PublicKey, Timestamp, U512, +}; + +/// A helper to build the blocks with various properties required for tests. +pub struct TestBlockV1Builder { + parent_hash: Option, + state_root_hash: Option, + timestamp: Option, + era: Option, + height: Option, + protocol_version: ProtocolVersion, + deploys: Vec, + is_switch: Option, + validator_weights: Option, +} + +impl Default for TestBlockV1Builder { + fn default() -> Self { + Self { + parent_hash: None, + state_root_hash: None, + timestamp: None, + era: None, + height: None, + protocol_version: ProtocolVersion::V1_0_0, + deploys: Vec::new(), + is_switch: None, + validator_weights: None, + } + } +} + +impl TestBlockV1Builder { + /// Creates new `TestBlockBuilder`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the parent hash for the block. + pub fn parent_hash(self, parent_hash: BlockHash) -> Self { + Self { + parent_hash: Some(parent_hash), + ..self + } + } + + /// Sets the state root hash for the block. + pub fn state_root_hash(self, state_root_hash: Digest) -> Self { + Self { + state_root_hash: Some(state_root_hash), + ..self + } + } + + /// Sets the timestamp for the block. + pub fn timestamp(self, timestamp: Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + + /// Sets the era for the block + pub fn era(self, era: impl Into) -> Self { + Self { + era: Some(era.into()), + ..self + } + } + + /// Sets the height for the block. + pub fn height(self, height: u64) -> Self { + Self { + height: Some(height), + ..self + } + } + + /// Sets the protocol version for the block. + pub fn protocol_version(self, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + ..self + } + } + + /// Associates the given deploys with the created block. + pub fn deploys<'a, I: IntoIterator>(self, deploys_iter: I) -> Self { + Self { + deploys: deploys_iter.into_iter().cloned().collect(), + ..self + } + } + + /// Associates a number of random deploys with the created block. + pub fn random_deploys(mut self, count: usize, rng: &mut TestRng) -> Self { + self.deploys = iter::repeat(()) + .take(count) + .map(|_| Deploy::random(rng)) + .collect(); + self + } + + /// Allows setting the created block to be switch block or not. + pub fn switch_block(self, is_switch: bool) -> Self { + Self { + is_switch: Some(is_switch), + ..self + } + } + + /// Sets the validator weights for the block. + pub fn validator_weights(self, validator_weights: ValidatorWeights) -> Self { + Self { + validator_weights: Some(validator_weights), + ..self + } + } + + /// Builds the block. + pub fn build(self, rng: &mut TestRng) -> BlockV1 { + let Self { + parent_hash, + state_root_hash, + timestamp, + era, + height, + protocol_version, + deploys, + is_switch, + validator_weights, + } = self; + + let parent_hash = parent_hash.unwrap_or_else(|| BlockHash::new(rng.gen())); + let parent_seed = Digest::random(rng); + let state_root_hash = state_root_hash.unwrap_or_else(|| rng.gen()); + let random_bit = rng.gen(); + let is_switch = is_switch.unwrap_or_else(|| rng.gen_bool(0.1)); + let era_end = is_switch.then(|| { + let next_era_validator_weights = validator_weights.unwrap_or_else(|| { + (1..6) + .map(|i| (PublicKey::random(rng), U512::from(i))) + .take(6) + .collect() + }); + EraEndV1::new(EraReport::random(rng), next_era_validator_weights) + }); + let timestamp = timestamp.unwrap_or_else(Timestamp::now); + let era_id = era.unwrap_or(EraId::random(rng)); + let height = height.unwrap_or_else(|| era_id.value() * 10 + rng.gen_range(0..10)); + let proposer = PublicKey::random(rng); + let deploy_hashes = deploys.iter().map(|deploy| *deploy.hash()).collect(); + let transfer_hashes = vec![]; + + BlockV1::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + deploy_hashes, + transfer_hashes, + ) + } + + /// Builds the block as a versioned block. + pub fn build_versioned(self, rng: &mut TestRng) -> Block { + self.build(rng).into() + } +} diff --git a/types/src/block/test_block_builder/test_block_v2_builder.rs b/types/src/block/test_block_builder/test_block_v2_builder.rs new file mode 100644 index 0000000000..a0b35ee5d4 --- /dev/null +++ b/types/src/block/test_block_builder/test_block_v2_builder.rs @@ -0,0 +1,354 @@ +use std::iter; + +use alloc::collections::BTreeMap; +use rand::Rng; + +use crate::{ + system::auction::ValidatorWeights, testing::TestRng, Block, BlockHash, BlockV2, Digest, + EraEndV2, EraId, ProtocolVersion, PublicKey, RewardedSignatures, Timestamp, Transaction, + TransactionEntryPoint, TransactionTarget, AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, + LARGE_WASM_LANE_ID, MEDIUM_WASM_LANE_ID, MINT_LANE_ID, SMALL_WASM_LANE_ID, U512, +}; + +/// A helper to build the blocks with various properties required for tests. +pub struct TestBlockV2Builder { + parent_hash: Option, + state_root_hash: Option, + timestamp: Option, + era: Option, + height: Option, + proposer: Option, + protocol_version: ProtocolVersion, + txns: Vec, + is_switch: Option, + validator_weights: Option, + rewarded_signatures: Option, +} + +impl Default for TestBlockV2Builder { + fn default() -> Self { + Self { + parent_hash: None, + state_root_hash: None, + timestamp: None, + era: None, + height: None, + proposer: None, + protocol_version: ProtocolVersion::V1_0_0, + txns: Vec::new(), + is_switch: None, + validator_weights: None, + rewarded_signatures: None, + } + } +} + +impl TestBlockV2Builder { + /// Creates new `TestBlockBuilder`. + pub fn new() -> Self { + Self::default() + } + + /// Sets the parent hash for the block. + pub fn parent_hash(self, parent_hash: BlockHash) -> Self { + Self { + parent_hash: Some(parent_hash), + ..self + } + } + + /// Sets the state root hash for the block. + pub fn state_root_hash(self, state_root_hash: Digest) -> Self { + Self { + state_root_hash: Some(state_root_hash), + ..self + } + } + + /// Sets the timestamp for the block. + pub fn timestamp(self, timestamp: Timestamp) -> Self { + Self { + timestamp: Some(timestamp), + ..self + } + } + + /// Sets the era for the block + pub fn era(self, era: impl Into) -> Self { + Self { + era: Some(era.into()), + ..self + } + } + + /// Sets the height for the block. + pub fn height(self, height: u64) -> Self { + Self { + height: Some(height), + ..self + } + } + + /// Sets the block proposer. + pub fn proposer(self, proposer: PublicKey) -> Self { + Self { + proposer: Some(proposer), + ..self + } + } + + /// Sets the protocol version for the block. + pub fn protocol_version(self, protocol_version: ProtocolVersion) -> Self { + Self { + protocol_version, + ..self + } + } + + /// Associates the given transactions with the created block. + pub fn transactions<'a, I: IntoIterator>(self, txns_iter: I) -> Self { + Self { + txns: txns_iter.into_iter().cloned().collect(), + ..self + } + } + + /// Sets the height for the block. + pub fn rewarded_signatures(self, rewarded_signatures: RewardedSignatures) -> Self { + Self { + rewarded_signatures: Some(rewarded_signatures), + ..self + } + } + + /// Associates a number of random transactions with the created block. + pub fn random_transactions(mut self, count: usize, rng: &mut TestRng) -> Self { + self.txns = iter::repeat_with(|| Transaction::random(rng)) + .take(count) + .collect(); + self + } + + /// Allows setting the created block to be switch block or not. + pub fn switch_block(self, is_switch: bool) -> Self { + Self { + is_switch: Some(is_switch), + ..self + } + } + + /// Sets the validator weights for the block. + pub fn validator_weights(self, validator_weights: ValidatorWeights) -> Self { + Self { + validator_weights: Some(validator_weights), + ..self + } + } + + /// Builds the block. + pub fn build(self, rng: &mut TestRng) -> BlockV2 { + let Self { + parent_hash, + state_root_hash, + timestamp, + era, + height, + proposer, + protocol_version, + txns, + is_switch, + validator_weights, + rewarded_signatures, + } = self; + + let parent_hash = parent_hash.unwrap_or_else(|| BlockHash::new(rng.gen())); + let parent_seed = Digest::random(rng); + let state_root_hash = state_root_hash.unwrap_or_else(|| rng.gen()); + let random_bit = rng.gen(); + let is_switch = is_switch.unwrap_or_else(|| rng.gen_bool(0.1)); + let era_end = is_switch.then(|| gen_era_end_v2(rng, validator_weights)); + let timestamp = timestamp.unwrap_or_else(Timestamp::now); + let era_id = era.unwrap_or(EraId::random(rng)); + let height = height.unwrap_or_else(|| era_id.value() * 10 + rng.gen_range(0..10)); + let proposer = proposer.unwrap_or_else(|| PublicKey::random(rng)); + + let mut mint_hashes = vec![]; + let mut auction_hashes = vec![]; + let mut install_upgrade_hashes = vec![]; + let mut large_hashes = vec![]; + let mut medium_hashes = vec![]; + let mut small_hashes = vec![]; + for txn in txns { + let txn_hash = txn.hash(); + let lane_id = match txn { + Transaction::Deploy(deploy) => { + if deploy.is_transfer() { + MINT_LANE_ID + } else { + LARGE_WASM_LANE_ID + } + } + Transaction::V1(transaction_v1) => { + let entry_point = transaction_v1.get_transaction_entry_point().unwrap(); + let target = transaction_v1.get_transaction_target().unwrap(); + simplified_calculate_transaction_lane_from_values(&entry_point, &target) + } + }; + match lane_id { + MINT_LANE_ID => mint_hashes.push(txn_hash), + AUCTION_LANE_ID => auction_hashes.push(txn_hash), + INSTALL_UPGRADE_LANE_ID => install_upgrade_hashes.push(txn_hash), + LARGE_WASM_LANE_ID => large_hashes.push(txn_hash), + MEDIUM_WASM_LANE_ID => medium_hashes.push(txn_hash), + SMALL_WASM_LANE_ID => small_hashes.push(txn_hash), + _ => panic!("Invalid lane id"), + } + } + let transactions = { + let mut ret = BTreeMap::new(); + ret.insert(MINT_LANE_ID, mint_hashes); + ret.insert(AUCTION_LANE_ID, auction_hashes); + ret.insert(INSTALL_UPGRADE_LANE_ID, install_upgrade_hashes); + ret.insert(LARGE_WASM_LANE_ID, large_hashes); + ret.insert(MEDIUM_WASM_LANE_ID, medium_hashes); + ret.insert(SMALL_WASM_LANE_ID, small_hashes); + ret + }; + let rewarded_signatures = rewarded_signatures.unwrap_or_default(); + let current_gas_price: u8 = 1; + let last_switch_block_hash = BlockHash::new(Digest::from([8; Digest::LENGTH])); + BlockV2::new( + parent_hash, + parent_seed, + state_root_hash, + random_bit, + era_end, + timestamp, + era_id, + height, + protocol_version, + proposer, + transactions, + rewarded_signatures, + current_gas_price, + Some(last_switch_block_hash), + ) + } + + /// Builds the block as a versioned block. + pub fn build_versioned(self, rng: &mut TestRng) -> Block { + self.build(rng).into() + } + + /// Builds a block that is invalid. + pub fn build_invalid(self, rng: &mut TestRng) -> BlockV2 { + self.build(rng).make_invalid(rng) + } +} + +// A simplified way of calculating transaction lanes. It doesn't take +// into consideration the size of the transaction against the chainspec +// and doesn't take `additional_computation_factor` into consideration. +// This is only used for tests purposes. +fn simplified_calculate_transaction_lane_from_values( + entry_point: &TransactionEntryPoint, + target: &TransactionTarget, +) -> u8 { + match target { + TransactionTarget::Native => match entry_point { + TransactionEntryPoint::Transfer | TransactionEntryPoint::Burn => MINT_LANE_ID, + TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => AUCTION_LANE_ID, + TransactionEntryPoint::Call => panic!("EntryPointCannotBeCall"), + TransactionEntryPoint::Custom(_) => panic!("EntryPointCannotBeCustom"), + }, + TransactionTarget::Stored { .. } => match entry_point { + TransactionEntryPoint::Custom(_) => LARGE_WASM_LANE_ID, + TransactionEntryPoint::Call + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + panic!("EntryPointMustBeCustom") + } + }, + TransactionTarget::Session { + is_install_upgrade, .. + } => match entry_point { + TransactionEntryPoint::Call => { + if *is_install_upgrade { + INSTALL_UPGRADE_LANE_ID + } else { + LARGE_WASM_LANE_ID + } + } + TransactionEntryPoint::Custom(_) + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + panic!("EntryPointMustBeCall") + } + }, + } +} + +fn gen_era_end_v2( + rng: &mut TestRng, + validator_weights: Option>, +) -> EraEndV2 { + let equivocators_count = rng.gen_range(0..5); + let rewards_count = rng.gen_range(0..5); + let inactive_count = rng.gen_range(0..5); + let next_era_validator_weights = validator_weights.unwrap_or_else(|| { + (1..6) + .map(|i| (PublicKey::random(rng), U512::from(i))) + .take(6) + .collect() + }); + let equivocators = iter::repeat_with(|| PublicKey::random(rng)) + .take(equivocators_count) + .collect(); + let rewards = iter::repeat_with(|| { + let pub_key = PublicKey::random(rng); + let mut rewards = vec![U512::from(rng.gen_range(1..=1_000_000_000 + 1))]; + if rng.gen_bool(0.2) { + rewards.push(U512::from(rng.gen_range(1..=1_000_000_000 + 1))); + }; + (pub_key, rewards) + }) + .take(rewards_count) + .collect(); + let inactive_validators = iter::repeat_with(|| PublicKey::random(rng)) + .take(inactive_count) + .collect(); + + EraEndV2::new( + equivocators, + inactive_validators, + next_era_validator_weights, + rewards, + 1u8, + ) +} diff --git a/types/src/block_time.rs b/types/src/block_time.rs index f250136e85..4fe4f73533 100644 --- a/types/src/block_time.rs +++ b/types/src/block_time.rs @@ -1,33 +1,103 @@ use alloc::vec::Vec; -use crate::bytesrepr::{Error, FromBytes, ToBytes, U64_SERIALIZED_LENGTH}; +use crate::{ + bytesrepr::{Error, FromBytes, ToBytes, U64_SERIALIZED_LENGTH}, + CLType, CLTyped, TimeDiff, Timestamp, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; /// The number of bytes in a serialized [`BlockTime`]. pub const BLOCKTIME_SERIALIZED_LENGTH: usize = U64_SERIALIZED_LENGTH; +/// Holds epoch type. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] +pub struct HoldsEpoch(Option); + +impl HoldsEpoch { + /// No epoch is applicable. + pub const NOT_APPLICABLE: HoldsEpoch = HoldsEpoch(None); + + /// Instance from block time. + pub fn from_block_time(block_time: BlockTime, hold_internal: TimeDiff) -> Self { + HoldsEpoch(Some( + block_time.value().saturating_sub(hold_internal.millis()), + )) + } + + /// Instance from timestamp. + pub fn from_timestamp(timestamp: Timestamp, hold_internal: TimeDiff) -> Self { + HoldsEpoch(Some( + timestamp.millis().saturating_sub(hold_internal.millis()), + )) + } + + /// Instance from milliseconds. + pub fn from_millis(timestamp_millis: u64, hold_internal_millis: u64) -> Self { + HoldsEpoch(Some(timestamp_millis.saturating_sub(hold_internal_millis))) + } + + /// Returns the inner value. + pub fn value(&self) -> Option { + self.0 + } +} + /// A newtype wrapping a [`u64`] which represents the block time. -#[derive(Clone, Copy, Default, Debug, PartialEq, Eq, PartialOrd)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[derive( + Clone, Copy, Default, Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize, +)] pub struct BlockTime(u64); impl BlockTime { /// Constructs a `BlockTime`. - pub fn new(value: u64) -> Self { + pub const fn new(value: u64) -> Self { BlockTime(value) } /// Saturating integer subtraction. Computes `self - other`, saturating at `0` instead of /// overflowing. + #[must_use] pub fn saturating_sub(self, other: BlockTime) -> Self { BlockTime(self.0.saturating_sub(other.0)) } -} -impl Into for BlockTime { - fn into(self) -> u64 { + /// Returns inner value. + pub fn value(&self) -> u64 { self.0 } } +impl From for u64 { + fn from(blocktime: BlockTime) -> Self { + blocktime.0 + } +} + +impl From for Timestamp { + fn from(value: BlockTime) -> Self { + Timestamp::from(value.0) + } +} + +impl From for BlockTime { + fn from(value: u64) -> Self { + BlockTime(value) + } +} + +impl From for BlockTime { + fn from(value: Timestamp) -> Self { + BlockTime(value.millis()) + } +} + impl ToBytes for BlockTime { fn to_bytes(&self) -> Result, Error> { self.0.to_bytes() @@ -44,3 +114,9 @@ impl FromBytes for BlockTime { Ok((BlockTime::new(time), rem)) } } + +impl CLTyped for BlockTime { + fn cl_type() -> CLType { + CLType::U64 + } +} diff --git a/types/src/byte_code.rs b/types/src/byte_code.rs new file mode 100644 index 0000000000..a34b4bafcc --- /dev/null +++ b/types/src/byte_code.rs @@ -0,0 +1,677 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + addressable_entity, bytesrepr, + bytesrepr::{Bytes, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, uref, CLType, CLTyped, HashAddr, +}; + +const BYTE_CODE_MAX_DISPLAY_LEN: usize = 16; +const KEY_HASH_LENGTH: usize = 32; +const WASM_STRING_PREFIX: &str = "byte-code-"; + +const BYTE_CODE_PREFIX: &str = "byte-code-"; +const V1_WASM_PREFIX: &str = "v1-wasm-"; +const V2_WASM_PREFIX: &str = "v2-wasm-"; +const EMPTY_PREFIX: &str = "empty-"; + +/// Associated error type of `TryFrom<&[u8]>` for `ByteCodeHash`. +#[derive(Debug)] +pub struct TryFromSliceForContractHashError(()); + +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + InvalidPrefix, + Hex(base16::DecodeError), + Hash(TryFromSliceError), + AccountHash(addressable_entity::FromAccountHashStrError), + URef(uref::FromStrError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Hash(error) + } +} + +impl From for FromStrError { + fn from(error: addressable_entity::FromAccountHashStrError) -> Self { + FromStrError::AccountHash(error) + } +} + +impl From for FromStrError { + fn from(error: uref::FromStrError) -> Self { + FromStrError::URef(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => write!(f, "invalid prefix"), + FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), + FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), + FromStrError::AccountHash(error) => { + write!(f, "account hash from string error: {:?}", error) + } + FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), + } + } +} + +/// An address for ByteCode records stored in global state. +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum ByteCodeAddr { + /// An address for byte code to be executed against the V1 Casper execution engine. + V1CasperWasm(HashAddr), + /// An address for byte code to be executed against the V2 Casper execution engine. + V2CasperWasm(HashAddr), + /// An empty byte code record + Empty, +} + +impl ByteCodeAddr { + /// Constructs a new Byte code address for Wasm. + pub const fn new_wasm_addr(hash_addr: HashAddr) -> Self { + Self::V1CasperWasm(hash_addr) + } + + /// Returns the tag of the byte code address. + pub fn tag(&self) -> ByteCodeKind { + match self { + Self::Empty => ByteCodeKind::Empty, + Self::V1CasperWasm(_) => ByteCodeKind::V1CasperWasm, + Self::V2CasperWasm(_) => ByteCodeKind::V2CasperWasm, + } + } + + /// Formats the `ByteCodeAddr` for users getting and putting. + pub fn to_formatted_string(&self) -> String { + format!("{}", self) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ByteCodeAddr`. + pub fn from_formatted_string(input: &str) -> Result { + if let Some(byte_code) = input.strip_prefix(BYTE_CODE_PREFIX) { + let (addr_str, tag) = if let Some(str) = byte_code.strip_prefix(EMPTY_PREFIX) { + (str, ByteCodeKind::Empty) + } else if let Some(str) = byte_code.strip_prefix(V1_WASM_PREFIX) { + (str, ByteCodeKind::V1CasperWasm) + } else if let Some(str) = byte_code.strip_prefix(V2_WASM_PREFIX) { + (str, ByteCodeKind::V2CasperWasm) + } else { + return Err(FromStrError::InvalidPrefix); + }; + let addr = checksummed_hex::decode(addr_str).map_err(FromStrError::Hex)?; + let byte_code_addr = HashAddr::try_from(addr.as_ref()).map_err(FromStrError::Hash)?; + return match tag { + ByteCodeKind::V1CasperWasm => Ok(ByteCodeAddr::V1CasperWasm(byte_code_addr)), + ByteCodeKind::V2CasperWasm => Ok(ByteCodeAddr::V2CasperWasm(byte_code_addr)), + ByteCodeKind::Empty => Ok(ByteCodeAddr::Empty), + }; + } + + Err(FromStrError::InvalidPrefix) + } +} + +impl ToBytes for ByteCodeAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Self::Empty => 0, + Self::V1CasperWasm(_) => KEY_HASH_LENGTH, + Self::V2CasperWasm(_) => KEY_HASH_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + Self::Empty => writer.push(self.tag() as u8), + Self::V1CasperWasm(addr) => { + writer.push(self.tag() as u8); + writer.extend(addr.to_bytes()?); + } + Self::V2CasperWasm(addr) => { + writer.push(self.tag() as u8); + writer.extend(addr.to_bytes()?); + } + } + Ok(()) + } +} + +impl FromBytes for ByteCodeAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder): (ByteCodeKind, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + ByteCodeKind::Empty => Ok((ByteCodeAddr::Empty, remainder)), + ByteCodeKind::V1CasperWasm => { + let (addr, remainder) = HashAddr::from_bytes(remainder)?; + Ok((ByteCodeAddr::new_wasm_addr(addr), remainder)) + } + ByteCodeKind::V2CasperWasm => { + let (addr, remainder) = HashAddr::from_bytes(remainder)?; + Ok((ByteCodeAddr::V2CasperWasm(addr), remainder)) + } + } + } +} + +impl Display for ByteCodeAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ByteCodeAddr::V1CasperWasm(addr) => { + write!( + f, + "{}{}{}", + BYTE_CODE_PREFIX, + V1_WASM_PREFIX, + base16::encode_lower(&addr) + ) + } + ByteCodeAddr::V2CasperWasm(addr) => { + write!( + f, + "{}{}{}", + BYTE_CODE_PREFIX, + V2_WASM_PREFIX, + base16::encode_lower(&addr) + ) + } + ByteCodeAddr::Empty => { + write!( + f, + "{}{}{}", + BYTE_CODE_PREFIX, + EMPTY_PREFIX, + base16::encode_lower(&[0u8; 32]) + ) + } + } + } +} + +impl Debug for ByteCodeAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ByteCodeAddr::V1CasperWasm(addr) => { + write!(f, "ByteCodeAddr::V1CasperWasm({:?})", addr) + } + ByteCodeAddr::V2CasperWasm(addr) => { + write!(f, "ByteCodeAddr::V2CasperWasm({:?})", addr) + } + ByteCodeAddr::Empty => { + write!(f, "ByteCodeAddr::Empty") + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ByteCodeAddr { + match rng.gen_range(0..=2) { + 0 => ByteCodeAddr::Empty, + 1 => ByteCodeAddr::V1CasperWasm(rng.gen()), + 2 => ByteCodeAddr::V2CasperWasm(rng.gen()), + _ => unreachable!(), + } + } +} + +/// A newtype wrapping a `HashAddr` which is the raw bytes of +/// the ByteCodeHash +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ByteCodeHash(HashAddr); + +impl ByteCodeHash { + /// Constructs a new `ByteCodeHash` from the raw bytes of the contract wasm hash. + pub const fn new(value: HashAddr) -> ByteCodeHash { + ByteCodeHash(value) + } + + /// Returns the raw bytes of the contract hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the contract hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `ByteCodeHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", WASM_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `ByteCodeHash`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(WASM_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(ByteCodeHash(bytes)) + } +} + +impl Default for ByteCodeHash { + fn default() -> Self { + Self::new([0u8; KEY_HASH_LENGTH]) + } +} + +impl Display for ByteCodeHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for ByteCodeHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "ByteCodeHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for ByteCodeHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for ByteCodeHash { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for ByteCodeHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((ByteCodeHash::new(bytes), rem)) + } +} + +impl From<[u8; KEY_HASH_LENGTH]> for ByteCodeHash { + fn from(bytes: [u8; KEY_HASH_LENGTH]) -> Self { + ByteCodeHash(bytes) + } +} + +impl Serialize for ByteCodeHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for ByteCodeHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + ByteCodeHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(ByteCodeHash(bytes)) + } + } +} + +impl AsRef<[u8]> for ByteCodeHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for ByteCodeHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(ByteCodeHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +impl TryFrom<&Vec> for ByteCodeHash { + type Error = TryFromSliceForContractHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(ByteCodeHash::new) + .map_err(|_| TryFromSliceForContractHashError(())) + } +} + +#[cfg(feature = "json-schema")] +impl JsonSchema for ByteCodeHash { + fn schema_name() -> String { + String::from("ByteCodeHash") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = + Some("The hash address of the contract wasm".to_string()); + schema_object.into() + } +} + +/// The type of Byte code. +#[repr(u8)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub enum ByteCodeKind { + /// Empty byte code. + Empty = 0, + /// Byte code to be executed with the version 1 Casper execution engine. + V1CasperWasm = 1, + /// Byte code to be executed with the version 2 Casper execution engine. + V2CasperWasm = 2, +} + +impl ToBytes for ByteCodeKind { + fn to_bytes(&self) -> Result, Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + (*self as u8).write_bytes(writer) + } +} + +impl FromBytes for ByteCodeKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (byte_code_kind, remainder) = u8::from_bytes(bytes)?; + match byte_code_kind { + byte_code_kind if byte_code_kind == ByteCodeKind::Empty as u8 => { + Ok((ByteCodeKind::Empty, remainder)) + } + byte_code_kind if byte_code_kind == ByteCodeKind::V1CasperWasm as u8 => { + Ok((ByteCodeKind::V1CasperWasm, remainder)) + } + byte_code_kind if byte_code_kind == ByteCodeKind::V2CasperWasm as u8 => { + Ok((ByteCodeKind::V2CasperWasm, remainder)) + } + _ => Err(Error::Formatting), + } + } +} + +impl Display for ByteCodeKind { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ByteCodeKind::Empty => { + write!(f, "empty") + } + ByteCodeKind::V1CasperWasm => { + write!(f, "v1-casper-wasm") + } + ByteCodeKind::V2CasperWasm => { + write!(f, "v2-casper-wasm") + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ByteCodeKind { + match rng.gen_range(0..=2) { + 0 => ByteCodeKind::Empty, + 1 => ByteCodeKind::V1CasperWasm, + 2 => ByteCodeKind::V2CasperWasm, + _ => unreachable!(), + } + } +} + +/// A container for contract's Wasm bytes. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct ByteCode { + kind: ByteCodeKind, + bytes: Bytes, +} + +impl Debug for ByteCode { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + if self.bytes.len() > BYTE_CODE_MAX_DISPLAY_LEN { + write!( + f, + "ByteCode(0x{}...)", + base16::encode_lower(&self.bytes[..BYTE_CODE_MAX_DISPLAY_LEN]) + ) + } else { + write!(f, "ByteCode(0x{})", base16::encode_lower(&self.bytes)) + } + } +} + +impl ByteCode { + /// Creates new Wasm object from bytes. + pub fn new(kind: ByteCodeKind, bytes: Vec) -> Self { + ByteCode { + kind, + bytes: bytes.into(), + } + } + + /// Consumes instance of [`ByteCode`] and returns its bytes. + pub fn take_bytes(self) -> Vec { + self.bytes.into() + } + + /// Returns a slice of contained Wasm bytes. + pub fn bytes(&self) -> &[u8] { + self.bytes.as_ref() + } + + /// Return the type of byte code. + pub fn kind(&self) -> ByteCodeKind { + self.kind + } +} + +impl ToBytes for ByteCode { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.kind.serialized_length() + self.bytes.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.kind.write_bytes(writer)?; + self.bytes.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ByteCode { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (kind, remainder) = ByteCodeKind::from_bytes(bytes)?; + let (bytes, remainder) = Bytes::from_bytes(remainder)?; + Ok((ByteCode { kind, bytes }, remainder)) + } +} + +#[cfg(test)] +mod tests { + use rand::RngCore; + + use super::*; + use crate::testing::TestRng; + + #[test] + fn debug_repr_of_short_wasm() { + const SIZE: usize = 8; + let wasm_bytes = vec![0; SIZE]; + let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, wasm_bytes); + assert_eq!(format!("{:?}", byte_code), "ByteCode(0x0000000000000000)"); + } + + #[test] + fn debug_repr_of_long_wasm() { + const SIZE: usize = 65; + let wasm_bytes = vec![0; SIZE]; + let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, wasm_bytes); + // String output is less than the bytes itself + assert_eq!( + format!("{:?}", byte_code), + "ByteCode(0x00000000000000000000000000000000...)" + ); + } + + #[test] + fn byte_code_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code = ByteCode::new(rng.gen(), vec![]); + bytesrepr::test_serialization_roundtrip(&byte_code); + + let mut buffer = vec![0u8; rng.gen_range(1..100)]; + rng.fill_bytes(buffer.as_mut()); + let byte_code = ByteCode::new(rng.gen(), buffer); + bytesrepr::test_serialization_roundtrip(&byte_code); + } + + #[test] + fn contract_wasm_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let byte_code_hash = HashAddr::try_from(&bytes[..]).expect("should create byte code hash"); + let contract_hash = ByteCodeHash::new(byte_code_hash); + assert_eq!(&bytes, &contract_hash.as_bytes()); + } + + #[test] + fn contract_wasm_hash_from_str() { + let byte_code_hash = ByteCodeHash([3; KEY_HASH_LENGTH]); + let encoded = byte_code_hash.to_formatted_string(); + let decoded = ByteCodeHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(byte_code_hash, decoded); + + let invalid_prefix = + "contractwasm-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(ByteCodeHash::from_formatted_str(invalid_prefix).is_err()); + + let short_addr = + "contract-wasm-00000000000000000000000000000000000000000000000000000000000000"; + assert!(ByteCodeHash::from_formatted_str(short_addr).is_err()); + + let long_addr = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(ByteCodeHash::from_formatted_str(long_addr).is_err()); + + let invalid_hex = + "contract-wasm-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(ByteCodeHash::from_formatted_str(invalid_hex).is_err()); + } + + #[test] + fn byte_code_addr_from_str() { + let empty_addr = ByteCodeAddr::Empty; + let encoded = empty_addr.to_formatted_string(); + let decoded = ByteCodeAddr::from_formatted_string(&encoded).unwrap(); + assert_eq!(empty_addr, decoded); + + let wasm_addr = ByteCodeAddr::V1CasperWasm([3; 32]); + let encoded = wasm_addr.to_formatted_string(); + let decoded = ByteCodeAddr::from_formatted_string(&encoded).unwrap(); + assert_eq!(wasm_addr, decoded); + } + + #[test] + fn byte_code_serialization_roundtrip() { + let rng = &mut TestRng::new(); + let wasm_addr = ByteCodeAddr::V1CasperWasm(rng.gen()); + bytesrepr::test_serialization_roundtrip(&wasm_addr); + + let empty_addr = ByteCodeAddr::Empty; + bytesrepr::test_serialization_roundtrip(&empty_addr); + } + + #[test] + fn contract_wasm_hash_bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code_hash = ByteCodeHash(rng.gen()); + bytesrepr::test_serialization_roundtrip(&byte_code_hash); + } + + #[test] + fn contract_wasm_hash_bincode_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code_hash = ByteCodeHash(rng.gen()); + let serialized = bincode::serialize(&byte_code_hash).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(byte_code_hash, deserialized) + } + + #[test] + fn contract_wasm_hash_json_roundtrip() { + let rng = &mut TestRng::new(); + let byte_code_hash = ByteCodeHash(rng.gen()); + let json_string = serde_json::to_string_pretty(&byte_code_hash).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(byte_code_hash, decoded) + } +} diff --git a/types/src/bytesrepr.rs b/types/src/bytesrepr.rs index 4156cbbff6..4629619ea0 100644 --- a/types/src/bytesrepr.rs +++ b/types/src/bytesrepr.rs @@ -1,25 +1,30 @@ //! Contains serialization and deserialization code for types used throughout the system. mod bytes; -// Can be removed once https://github.com/rust-lang/rustfmt/issues/3362 is resolved. -#[rustfmt::skip] -use alloc::vec; use alloc::{ - alloc::{alloc, Layout}, + boxed::Box, collections::{BTreeMap, BTreeSet, VecDeque}, str, string::String, + vec, vec::Vec, }; #[cfg(debug_assertions)] use core::any; -use core::{mem, ptr::NonNull}; +use core::{ + convert::TryInto, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; +#[cfg(feature = "datasize")] +use datasize::DataSize; use num_integer::Integer; use num_rational::Ratio; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use thiserror::Error; pub use bytes::Bytes; @@ -28,19 +33,19 @@ pub const UNIT_SERIALIZED_LENGTH: usize = 0; /// The number of bytes in a serialized `bool`. pub const BOOL_SERIALIZED_LENGTH: usize = 1; /// The number of bytes in a serialized `i32`. -pub const I32_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const I32_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized `i64`. -pub const I64_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const I64_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized `u8`. -pub const U8_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const U8_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized `u16`. -pub const U16_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const U16_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized `u32`. -pub const U32_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const U32_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized `u64`. -pub const U64_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const U64_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized [`U128`](crate::U128). -pub const U128_SERIALIZED_LENGTH: usize = mem::size_of::(); +pub const U128_SERIALIZED_LENGTH: usize = size_of::(); /// The number of bytes in a serialized [`U256`](crate::U256). pub const U256_SERIALIZED_LENGTH: usize = U128_SERIALIZED_LENGTH * 2; /// The number of bytes in a serialized [`U512`](crate::U512). @@ -69,12 +74,19 @@ pub trait ToBytes { /// `to_bytes()` or `into_bytes()`. The data is not actually serialized, so this call is /// relatively cheap. fn serialized_length(&self) -> usize; + + /// Writes `&self` into a mutable `writer`. + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend(self.to_bytes()?); + Ok(()) + } } /// A type which can be deserialized from a `Vec`. pub trait FromBytes: Sized { /// Deserializes the slice into `Self`. fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error>; + /// Deserializes the `Vec` into `Self`. fn from_vec(bytes: Vec) -> Result<(Self, Vec), Error> { Self::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder))) @@ -89,40 +101,123 @@ pub fn unchecked_allocate_buffer(to_be_serialized: &T) -> Vec { } /// Returns a `Vec` initialized with sufficient capacity to hold `to_be_serialized` after -/// serialization, or an error if the capacity would exceed `u32::max_value()`. +/// serialization, or an error if the capacity would exceed `u32::MAX`. pub fn allocate_buffer(to_be_serialized: &T) -> Result, Error> { let serialized_length = to_be_serialized.serialized_length(); - if serialized_length > u32::max_value() as usize { + allocate_buffer_for_size(serialized_length) +} + +/// Returns a `Vec` initialized with sufficient capacity to hold `expected_size` bytes, +/// or an error if the capacity would exceed `u32::MAX`. +pub fn allocate_buffer_for_size(expected_size: usize) -> Result, Error> { + if expected_size > u32::MAX as usize { return Err(Error::OutOfMemory); } - Ok(Vec::with_capacity(serialized_length)) + Ok(Vec::with_capacity(expected_size)) } /// Serialization and deserialization errors. -#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(Error))] +#[derive(Copy, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(rename = "BytesreprError") +)] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] #[repr(u8)] +#[non_exhaustive] pub enum Error { /// Early end of stream while deserializing. - #[cfg_attr(feature = "std", error("Deserialization error: early end of stream"))] + #[cfg_attr(any(feature = "testing", test), default)] EarlyEndOfStream = 0, /// Formatting error while deserializing. - #[cfg_attr(feature = "std", error("Deserialization error: formatting"))] Formatting, /// Not all input bytes were consumed in [`deserialize`]. - #[cfg_attr(feature = "std", error("Deserialization error: left-over bytes"))] LeftOverBytes, /// Out of memory error. - #[cfg_attr(feature = "std", error("Serialization error: out of memory"))] OutOfMemory, + /// No serialized representation is available for a value. + NotRepresentable, + /// Exceeded a recursion depth limit. + ExceededRecursionDepth, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::EarlyEndOfStream => { + formatter.write_str("Deserialization error: early end of stream") + } + Error::Formatting => formatter.write_str("Deserialization error: formatting"), + Error::LeftOverBytes => formatter.write_str("Deserialization error: left-over bytes"), + Error::OutOfMemory => formatter.write_str("Serialization error: out of memory"), + Error::NotRepresentable => { + formatter.write_str("Serialization error: value is not representable.") + } + Error::ExceededRecursionDepth => formatter.write_str("exceeded recursion depth"), + } + } } +impl ToBytes for Error { + fn to_bytes(&self) -> Result, Error> { + (*self as u8).to_bytes() + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + (*self as u8).write_bytes(writer) + } +} + +impl FromBytes for Error { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (value, remainder) = u8::from_bytes(bytes)?; + match value { + value if value == Error::EarlyEndOfStream as u8 => { + Ok((Error::EarlyEndOfStream, remainder)) + } + value if value == Error::Formatting as u8 => Ok((Error::Formatting, remainder)), + value if value == Error::LeftOverBytes as u8 => Ok((Error::LeftOverBytes, remainder)), + value if value == Error::OutOfMemory as u8 => Ok((Error::OutOfMemory, remainder)), + value if value == Error::NotRepresentable as u8 => { + Ok((Error::NotRepresentable, remainder)) + } + value if value == Error::ExceededRecursionDepth as u8 => { + Ok((Error::ExceededRecursionDepth, remainder)) + } + _ => Err(Error::Formatting), + } + } +} + +#[cfg(feature = "std")] +impl StdError for Error {} + /// Deserializes `bytes` into an instance of `T`. /// /// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes /// are consumed in the operation. pub fn deserialize(bytes: Vec) -> Result { - let (t, remainder) = T::from_vec(bytes)?; + let (t, remainder) = T::from_bytes(&bytes)?; + if remainder.is_empty() { + Ok(t) + } else { + Err(Error::LeftOverBytes) + } +} + +/// Deserializes a slice of bytes into an instance of `T`. +/// +/// Returns an error if the bytes cannot be deserialized into `T` or if not all of the input bytes +/// are consumed in the operation. +pub fn deserialize_from_slice, O: FromBytes>(bytes: I) -> Result { + let (t, remainder) = O::from_bytes(bytes.as_ref())?; if remainder.is_empty() { Ok(t) } else { @@ -135,6 +230,7 @@ pub fn serialize(t: impl ToBytes) -> Result, Error> { t.into_bytes() } +/// Safely splits the slice at the given point. pub(crate) fn safe_split_at(bytes: &[u8], n: usize) -> Result<(&[u8], &[u8]), Error> { if n > bytes.len() { Err(Error::EarlyEndOfStream) @@ -167,6 +263,11 @@ impl ToBytes for bool { fn serialized_length(&self) -> usize { BOOL_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self as u8); + Ok(()) + } } impl FromBytes for bool { @@ -190,6 +291,11 @@ impl ToBytes for u8 { fn serialized_length(&self) -> usize { U8_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self); + Ok(()) + } } impl FromBytes for u8 { @@ -209,6 +315,11 @@ impl ToBytes for i32 { fn serialized_length(&self) -> usize { I32_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } } impl FromBytes for i32 { @@ -228,6 +339,11 @@ impl ToBytes for i64 { fn serialized_length(&self) -> usize { I64_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } } impl FromBytes for i64 { @@ -247,6 +363,11 @@ impl ToBytes for u16 { fn serialized_length(&self) -> usize { U16_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } } impl FromBytes for u16 { @@ -266,6 +387,11 @@ impl ToBytes for u32 { fn serialized_length(&self) -> usize { U32_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } } impl FromBytes for u32 { @@ -285,6 +411,11 @@ impl ToBytes for u64 { fn serialized_length(&self) -> usize { U64_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } } impl FromBytes for u64 { @@ -296,6 +427,30 @@ impl FromBytes for u64 { } } +impl ToBytes for u128 { + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_le_bytes().to_vec()) + } + + fn serialized_length(&self) -> usize { + U128_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(&self.to_le_bytes()); + Ok(()) + } +} + +impl FromBytes for u128 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let mut result = [0u8; U128_SERIALIZED_LENGTH]; + let (bytes, remainder) = safe_split_at(bytes, U128_SERIALIZED_LENGTH)?; + result.copy_from_slice(bytes); + Ok((::from_le_bytes(result), remainder)) + } +} + impl ToBytes for String { fn to_bytes(&self) -> Result, Error> { let bytes = self.as_bytes(); @@ -305,6 +460,11 @@ impl ToBytes for String { fn serialized_length(&self) -> usize { u8_slice_serialized_length(self.as_bytes()) } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } } impl FromBytes for String { @@ -321,7 +481,7 @@ fn ensure_efficient_serialization() { debug_assert_ne!( any::type_name::(), any::type_name::(), - "You should use Bytes newtype wrapper for efficiency" + "You should use `casper_types::bytesrepr::Bytes` newtype wrapper instead of `Vec` for efficiency" ); } @@ -333,8 +493,9 @@ impl ToBytes for Vec { fn to_bytes(&self) -> Result, Error> { ensure_efficient_serialization::(); - let mut result = try_vec_with_capacity(self.serialized_length())?; - result.append(&mut (self.len() as u32).to_bytes()?); + let mut result = Vec::with_capacity(self.serialized_length()); + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); for item in self.iter() { result.append(&mut item.to_bytes()?); @@ -347,7 +508,8 @@ impl ToBytes for Vec { ensure_efficient_serialization::(); let mut result = allocate_buffer(&self)?; - result.append(&mut (self.len() as u32).to_bytes()?); + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); for item in self { result.append(&mut item.into_bytes()?); @@ -359,24 +521,15 @@ impl ToBytes for Vec { fn serialized_length(&self) -> usize { iterator_serialized_length(self.iter()) } -} - -// TODO Replace `try_vec_with_capacity` with `Vec::try_reserve_exact` once it's in stable. -fn try_vec_with_capacity(capacity: usize) -> Result, Error> { - // see https://doc.rust-lang.org/src/alloc/raw_vec.rs.html#75-98 - let elem_size = mem::size_of::(); - let alloc_size = capacity.checked_mul(elem_size).ok_or(Error::OutOfMemory)?; - let ptr = if alloc_size == 0 { - NonNull::::dangling() - } else { - let align = mem::align_of::(); - let layout = Layout::from_size_align(alloc_size, align).map_err(|_| Error::OutOfMemory)?; - let raw_ptr = unsafe { alloc(layout) }; - let non_null_ptr = NonNull::::new(raw_ptr).ok_or(Error::OutOfMemory)?; - non_null_ptr.cast() - }; - unsafe { Ok(Vec::from_raw_parts(ptr.as_ptr(), 0, capacity)) } + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for item in self.iter() { + item.write_bytes(writer)?; + } + Ok(()) + } } fn vec_from_vec(bytes: Vec) -> Result<(Vec, Vec), Error> { @@ -385,13 +538,28 @@ fn vec_from_vec(bytes: Vec) -> Result<(Vec, Vec), Error Vec::::from_bytes(bytes.as_slice()).map(|(x, remainder)| (x, Vec::from(remainder))) } +/// Returns a conservative estimate for the preallocated number of elements for a new `Vec`. +/// +/// `hint` indicates the desired upper limit in heap size (in bytes), which is itself bounded by +/// 4096 bytes. This function will never return less than 1. +#[inline] +fn cautious(hint: usize) -> usize { + let el_size = size_of::(); + core::cmp::max(core::cmp::min(hint, 4096 / el_size), 1) +} + impl FromBytes for Vec { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { ensure_efficient_serialization::(); let (count, mut stream) = u32::from_bytes(bytes)?; - let mut result = try_vec_with_capacity(count as usize)?; + if count == 0 { + return Ok((Vec::new(), stream)); + } + + let mut result = Vec::with_capacity(cautious::(count as usize)); + for _ in 0..count { let (value, remainder) = T::from_bytes(stream)?; result.push(value); @@ -410,7 +578,8 @@ impl ToBytes for VecDeque { fn to_bytes(&self) -> Result, Error> { let (slice1, slice2) = self.as_slices(); let mut result = allocate_buffer(self)?; - result.append(&mut (self.len() as u32).to_bytes()?); + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + result.append(&mut length_32.to_bytes()?); for item in slice1.iter().chain(slice2.iter()) { result.append(&mut item.to_bytes()?); } @@ -440,46 +609,39 @@ impl FromBytes for VecDeque { } } -macro_rules! impl_to_from_bytes_for_array { - ($($N:literal)+) => { - $( - impl ToBytes for [u8; $N] { - #[inline(always)] - fn to_bytes(&self) -> Result, Error> { - Ok(self.to_vec()) - } +impl ToBytes for [u8; COUNT] { + #[inline(always)] + fn to_bytes(&self) -> Result, Error> { + Ok(self.to_vec()) + } - #[inline(always)] - fn serialized_length(&self) -> usize { $N } - } + #[inline(always)] + fn serialized_length(&self) -> usize { + COUNT + } - impl FromBytes for [u8; $N] { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (bytes, rem) = safe_split_at(bytes, $N)?; - // SAFETY: safe_split_at makes sure `bytes` is exactly $N bytes. - let ptr = bytes.as_ptr() as *const [u8; $N]; - let result = unsafe { *ptr }; - Ok((result, rem)) - } - } - )+ + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend_from_slice(self); + Ok(()) } } -impl_to_from_bytes_for_array! { - 0 1 2 3 4 5 6 7 8 9 - 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 - 33 - 64 128 256 512 +impl FromBytes for [u8; COUNT] { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (bytes, rem) = safe_split_at(bytes, COUNT)?; + // SAFETY: safe_split_at makes sure `bytes` is exactly `COUNT` bytes. + let ptr = bytes.as_ptr() as *const [u8; COUNT]; + let result = unsafe { *ptr }; + Ok((result, rem)) + } } impl ToBytes for BTreeSet { fn to_bytes(&self) -> Result, Error> { let mut result = allocate_buffer(self)?; - let num_keys = self.len() as u32; + let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; result.append(&mut num_keys.to_bytes()?); for value in self.iter() { @@ -492,6 +654,15 @@ impl ToBytes for BTreeSet { fn serialized_length(&self) -> usize { U32_SERIALIZED_LENGTH + self.iter().map(|v| v.serialized_length()).sum::() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for value in self.iter() { + value.write_bytes(writer)?; + } + Ok(()) + } } impl FromBytes for BTreeSet { @@ -515,7 +686,7 @@ where fn to_bytes(&self) -> Result, Error> { let mut result = allocate_buffer(self)?; - let num_keys = self.len() as u32; + let num_keys: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; result.append(&mut num_keys.to_bytes()?); for (key, value) in self.iter() { @@ -533,6 +704,16 @@ where .map(|(key, value)| key.serialized_length() + value.serialized_length()) .sum::() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = self.len().try_into().map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + for (key, value) in self.iter() { + key.write_bytes(writer)?; + value.write_bytes(writer)?; + } + Ok(()) + } } impl FromBytes for BTreeMap @@ -576,6 +757,17 @@ impl ToBytes for Option { None => 0, } } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + None => writer.push(OPTION_NONE_TAG), + Some(v) => { + writer.push(OPTION_SOME_TAG); + v.write_bytes(writer)?; + } + }; + Ok(()) + } } impl FromBytes for Option { @@ -611,6 +803,20 @@ impl ToBytes for Result { Err(error) => error.serialized_length(), } } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + Err(error) => { + writer.push(RESULT_ERR_TAG); + error.write_bytes(writer)?; + } + Ok(result) => { + writer.push(RESULT_OK_TAG); + result.write_bytes(writer)?; + } + }; + Ok(()) + } } impl FromBytes for Result { @@ -1045,6 +1251,12 @@ impl ToBytes for str { fn serialized_length(&self) -> usize { u8_slice_serialized_length(self.as_bytes()) } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } } impl ToBytes for &str { @@ -1057,6 +1269,38 @@ impl ToBytes for &str { fn serialized_length(&self) -> usize { (*self).serialized_length() } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + write_u8_slice(self.as_bytes(), writer)?; + Ok(()) + } +} + +impl ToBytes for &T +where + T: ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + (*self).to_bytes() + } + + fn serialized_length(&self) -> usize { + (*self).serialized_length() + } +} + +impl ToBytes for Box +where + T: ToBytes, +{ + fn to_bytes(&self) -> Result, Error> { + self.as_ref().to_bytes() + } + + fn serialized_length(&self) -> usize { + self.as_ref().serialized_length() + } } impl ToBytes for Ratio @@ -1096,14 +1340,27 @@ where /// avoid using serializing Vec. fn u8_slice_to_bytes(bytes: &[u8]) -> Result, Error> { let serialized_length = u8_slice_serialized_length(bytes); - let mut vec = try_vec_with_capacity(serialized_length)?; - let length_prefix = bytes.len() as u32; + let mut vec = Vec::with_capacity(serialized_length); + let length_prefix: u32 = bytes + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; let length_prefix_bytes = length_prefix.to_le_bytes(); vec.extend_from_slice(&length_prefix_bytes); vec.extend_from_slice(bytes); Ok(vec) } +fn write_u8_slice(bytes: &[u8], writer: &mut Vec) -> Result<(), Error> { + let length_32: u32 = bytes + .len() + .try_into() + .map_err(|_| Error::NotRepresentable)?; + writer.extend_from_slice(&length_32.to_le_bytes()); + writer.extend_from_slice(bytes); + Ok(()) +} + /// Serializes a vector of bytes with a length prefix. /// /// For efficiency you should avoid serializing Vec. @@ -1115,7 +1372,7 @@ pub(crate) fn vec_u8_to_bytes(vec: &Vec) -> Result, Error> { /// Returns serialized length of serialized slice of bytes. /// -/// This function adds a length prefix in the beggining. +/// This function adds a length prefix in the beginning. #[inline(always)] fn u8_slice_serialized_length(bytes: &[u8]) -> usize { U32_SERIALIZED_LENGTH + bytes.len() @@ -1127,29 +1384,43 @@ pub(crate) fn vec_u8_serialized_length(vec: &Vec) -> usize { u8_slice_serialized_length(vec.as_slice()) } -// This test helper is not intended to be used by third party crates. -#[doc(hidden)] -/// Returns `true` if a we can serialize and then deserialize a value +/// Asserts that `t` can be serialized and when deserialized back into an instance `T` compares +/// equal to `t`. +/// +/// Also asserts that `t.serialized_length()` is the same as the actual number of bytes of the +/// serialized `t` instance. +#[cfg(any(feature = "testing", test))] +#[track_caller] pub fn test_serialization_roundtrip(t: &T) where - T: alloc::fmt::Debug + ToBytes + FromBytes + PartialEq, + T: fmt::Debug + ToBytes + FromBytes + PartialEq, { let serialized = ToBytes::to_bytes(t).expect("Unable to serialize data"); assert_eq!( serialized.len(), t.serialized_length(), - "\nLength of serialized data: {},\nserialized_length() yielded: {},\nserialized data: {:?}, t is {:?}", + "\nLength of serialized data: {},\nserialized_length() yielded: {},\n t is {:?}", serialized.len(), t.serialized_length(), - serialized, t ); + let mut written_bytes = vec![]; + t.write_bytes(&mut written_bytes) + .expect("Unable to serialize data via write_bytes"); + assert_eq!(serialized, written_bytes); + + let deserialized_from_slice = deserialize_from_slice(&serialized) + .unwrap_or_else(|error| panic!("Unable to deserialize data: {error:?} ({t:?})")); + assert_eq!(*t, deserialized_from_slice); + let deserialized = deserialize::(serialized).expect("Unable to deserialize data"); - assert!(*t == deserialized) + assert_eq!(*t, deserialized); } #[cfg(test)] mod tests { + use crate::U128; + use super::*; #[test] @@ -1161,17 +1432,88 @@ mod tests { #[test] fn should_not_deserialize_zero_denominator() { let malicious_bytes = (1u64, 0u64).to_bytes().unwrap(); - let result: Result, Error> = super::deserialize(malicious_bytes); + let result: Result, Error> = deserialize(malicious_bytes); assert_eq!(result.unwrap_err(), Error::Formatting); } + #[test] + fn should_have_generic_tobytes_impl_for_borrowed_types() { + struct NonCopyable; + + impl ToBytes for NonCopyable { + fn to_bytes(&self) -> Result, Error> { + Ok(vec![1, 2, 3]) + } + + fn serialized_length(&self) -> usize { + 3 + } + } + + let noncopyable: &NonCopyable = &NonCopyable; + + assert_eq!(noncopyable.to_bytes().unwrap(), vec![1, 2, 3]); + assert_eq!(noncopyable.serialized_length(), 3); + assert_eq!(noncopyable.into_bytes().unwrap(), vec![1, 2, 3]); + } + #[cfg(debug_assertions)] #[test] - #[should_panic(expected = "You should use Bytes newtype wrapper for efficiency")] + #[should_panic( + expected = "You should use `casper_types::bytesrepr::Bytes` newtype wrapper instead of `Vec` for efficiency" + )] fn should_fail_to_serialize_slice_of_u8() { let bytes = b"0123456789".to_vec(); bytes.to_bytes().unwrap(); } + + #[test] + fn should_calculate_capacity() { + #[allow(dead_code)] + struct CustomStruct { + u8_field: u8, + u16_field: u16, + u32_field: u32, + u64_field: u64, + // Here we're using U128 type that represents u128 with a two u64s which is what the + // compiler is doing for x86_64. On 64-bit ARM architecture u128 is aligned + // to 16 bytes, but on x86_64 it's aligned to 8 bytes. This changes the + // memory layout of the struct and affects the results of function `cautious`. + // The expected behaviour of u128 alignment is 8 bytes instead of 16, + // and there is a bug in the rust compiler for this: https://github.com/rust-lang/rust/issues/54341 + u128_field: U128, + str_field: String, + } + assert_eq!( + cautious::(u32::MAX as usize), + 512, + "hint is 2^32-1 and we can only preallocate 512 elements" + ); + assert_eq!( + cautious::(usize::MAX), + 4096, + "hint is usize::MAX and we can only preallocate 4096 elements" + ); + assert_eq!( + cautious::(usize::MAX), + 2048, + "hint is usize::MAX and we can only preallocate 2048 elements" + ); + assert_eq!( + cautious::(usize::MAX), + 73, + "hint is usize::MAX and we can only preallocate 73 elements" + ); + } + + #[test] + fn deserializing_empty_vec_has_no_capacity() { + let bytes = ToBytes::to_bytes(&(0u32, b"123")).unwrap(); + let (vec, rem): (Vec, _) = FromBytes::from_bytes(&bytes).unwrap(); + assert!(vec.is_empty()); + assert_eq!(vec.capacity(), 0); + assert_eq!(rem, b"123"); + } } #[cfg(test)] @@ -1388,7 +1730,7 @@ mod proptests { bytesrepr::test_serialization_roundtrip(&t); } #[test] - fn test_ratio_u64(t in (any::(), 1..u64::max_value())) { + fn test_ratio_u64(t in (any::(), 1..u64::MAX)) { bytesrepr::test_serialization_roundtrip(&t); } } diff --git a/types/src/bytesrepr/bytes.rs b/types/src/bytesrepr/bytes.rs index be9235d5fa..c3d81d4d0f 100644 --- a/types/src/bytesrepr/bytes.rs +++ b/types/src/bytesrepr/bytes.rs @@ -5,27 +5,36 @@ use alloc::{ use core::{ cmp, fmt, iter::FromIterator, - mem, ops::{Deref, Index, Range, RangeFrom, RangeFull, RangeTo}, slice, }; -use datasize::DataSize; use rand::{ distributions::{Distribution, Standard}, Rng, }; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; use serde::{ de::{Error as SerdeError, SeqAccess, Visitor}, Deserialize, Deserializer, Serialize, Serializer, }; use super::{Error, FromBytes, ToBytes}; -use crate::{CLType, CLTyped}; +use crate::{checksummed_hex, CLType, CLTyped}; /// A newtype wrapper for bytes that has efficient serialization routines. #[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Default, Hash)] -pub struct Bytes(Vec); +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded bytes.") +)] +#[rustfmt::skip] +pub struct Bytes( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + Vec +); impl Bytes { /// Constructs a new, empty vector of bytes. @@ -43,6 +52,11 @@ impl Bytes { pub fn as_slice(&self) -> &[u8] { self } + + /// Consumes self and returns the inner bytes. + pub fn take_inner(self) -> Vec { + self.0 + } } impl Deref for Bytes { @@ -98,6 +112,11 @@ impl ToBytes for Bytes { fn serialized_length(&self) -> usize { super::vec_u8_serialized_length(&self.0) } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + super::write_u8_slice(self.as_slice(), writer) + } } impl FromBytes for Bytes { @@ -132,7 +151,7 @@ impl Index> for Bytes { type Output = [u8]; fn index(&self, index: Range) -> &[u8] { - let &Bytes(ref dat) = self; + let Bytes(dat) = self; &dat[index] } } @@ -141,7 +160,7 @@ impl Index> for Bytes { type Output = [u8]; fn index(&self, index: RangeTo) -> &[u8] { - let &Bytes(ref dat) = self; + let Bytes(dat) = self; &dat[index] } } @@ -150,7 +169,7 @@ impl Index> for Bytes { type Output = [u8]; fn index(&self, index: RangeFrom) -> &[u8] { - let &Bytes(ref dat) = self; + let Bytes(dat) = self; &dat[index] } } @@ -159,7 +178,7 @@ impl Index for Bytes { type Output = [u8]; fn index(&self, _: RangeFull) -> &[u8] { - let &Bytes(ref dat) = self; + let Bytes(dat) = self; &dat[..] } } @@ -192,13 +211,14 @@ impl IntoIterator for Bytes { } } -impl DataSize for Bytes { +#[cfg(feature = "datasize")] +impl datasize::DataSize for Bytes { const IS_DYNAMIC: bool = true; const STATIC_HEAP_SIZE: usize = 0; fn estimate_heap_size(&self) -> usize { - self.0.capacity() * mem::size_of::() + self.0.capacity() * size_of::() } } @@ -274,7 +294,7 @@ impl<'de> Deserialize<'de> for Bytes { { if deserializer.is_human_readable() { let hex_string = String::deserialize(deserializer)?; - base16::decode(&hex_string) + checksummed_hex::decode(hex_string) .map(Bytes) .map_err(SerdeError::custom) } else { diff --git a/types/src/chainspec.rs b/types/src/chainspec.rs new file mode 100644 index 0000000000..d6f84d52e8 --- /dev/null +++ b/types/src/chainspec.rs @@ -0,0 +1,397 @@ +//! The chainspec is a set of configuration options for the network. All validators must apply the +//! same set of options in order to join and act as a peer in a given network. + +mod accounts_config; +mod activation_point; +mod chainspec_raw_bytes; +mod core_config; +mod fee_handling; +pub mod genesis_config; +mod global_state_update; +mod highway_config; +mod hold_balance_handling; +mod network_config; +mod next_upgrade; +mod pricing_handling; +mod protocol_config; +mod refund_handling; +mod transaction_config; +mod upgrade_config; +mod vacancy_config; +mod vm_config; + +#[cfg(any(feature = "std", test))] +use std::{fmt::Debug, sync::Arc}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::Serialize; +use tracing::error; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ChainNameDigest, Digest, EraId, ProtocolVersion, Timestamp, +}; +pub use accounts_config::{ + AccountConfig, AccountsConfig, AdministratorAccount, DelegatorConfig, GenesisAccount, + GenesisValidator, ValidatorConfig, +}; +pub use activation_point::ActivationPoint; +pub use chainspec_raw_bytes::ChainspecRawBytes; +pub use core_config::{ + ConsensusProtocolName, CoreConfig, LegacyRequiredFinality, DEFAULT_GAS_HOLD_INTERVAL, + DEFAULT_MINIMUM_BID_AMOUNT, +}; +#[cfg(any(feature = "std", test))] +pub use core_config::{ + DEFAULT_BASELINE_MOTES_AMOUNT, DEFAULT_FEE_HANDLING, DEFAULT_REFUND_HANDLING, +}; +pub use fee_handling::FeeHandling; +#[cfg(any(feature = "std", test))] +pub use genesis_config::GenesisConfig; +pub use global_state_update::{GlobalStateUpdate, GlobalStateUpdateConfig, GlobalStateUpdateError}; +pub use highway_config::HighwayConfig; +pub use hold_balance_handling::HoldBalanceHandling; +pub use network_config::NetworkConfig; +pub use next_upgrade::NextUpgrade; +pub use pricing_handling::PricingHandling; +pub use protocol_config::ProtocolConfig; +pub use refund_handling::RefundHandling; +pub use transaction_config::{ + DeployConfig, TransactionConfig, TransactionLaneDefinition, TransactionV1Config, +}; +#[cfg(any(feature = "testing", test))] +pub use transaction_config::{ + DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MIN_TRANSFER_MOTES, +}; +pub use upgrade_config::ProtocolUpgradeConfig; +pub use vacancy_config::VacancyConfig; +pub use vm_config::{ + AuctionCosts, BrTableCost, ChainspecRegistry, ControlFlowCosts, HandlePaymentCosts, + HostFunction, HostFunctionCost, HostFunctionCostsV1, HostFunctionCostsV2, HostFunctionV2, + MessageLimits, MintCosts, OpcodeCosts, StandardPaymentCosts, StorageCosts, SystemConfig, + WasmConfig, WasmV1Config, WasmV2Config, DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, +}; +#[cfg(any(feature = "testing", test))] +pub use vm_config::{ + DEFAULT_ADD_BID_COST, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, + DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, + DEFAULT_CONTROL_FLOW_BR_OPCODE, DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, + DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, + DEFAULT_CONTROL_FLOW_CALL_OPCODE, DEFAULT_CONTROL_FLOW_DROP_OPCODE, + DEFAULT_CONTROL_FLOW_ELSE_OPCODE, DEFAULT_CONTROL_FLOW_END_OPCODE, + DEFAULT_CONTROL_FLOW_IF_OPCODE, DEFAULT_CONTROL_FLOW_LOOP_OPCODE, + DEFAULT_CONTROL_FLOW_RETURN_OPCODE, DEFAULT_CONTROL_FLOW_SELECT_OPCODE, + DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, DEFAULT_DELEGATE_COST, DEFAULT_DIV_COST, + DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, DEFAULT_INTEGER_COMPARISON_COST, + DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MAX_STACK_HEIGHT, DEFAULT_MUL_COST, + DEFAULT_NEW_DICTIONARY_COST, DEFAULT_NOP_COST, DEFAULT_STORE_COST, DEFAULT_TRANSFER_COST, + DEFAULT_UNREACHABLE_COST, DEFAULT_WASM_MAX_MEMORY, +}; + +/// A collection of configuration settings describing the state of the system at genesis and after +/// upgrades to basic system functionality occurring after genesis. +#[derive(Clone, PartialEq, Eq, Serialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct Chainspec { + /// Protocol config. + #[serde(rename = "protocol")] + pub protocol_config: ProtocolConfig, + + /// Network config. + #[serde(rename = "network")] + pub network_config: NetworkConfig, + + /// Core config. + #[serde(rename = "core")] + pub core_config: CoreConfig, + + /// Highway config. + #[serde(rename = "highway")] + pub highway_config: HighwayConfig, + + /// Transaction Config. + #[serde(rename = "transactions")] + pub transaction_config: TransactionConfig, + + /// Wasm config. + #[serde(rename = "wasm")] + pub wasm_config: WasmConfig, + + /// System costs config. + #[serde(rename = "system_costs")] + pub system_costs_config: SystemConfig, + + /// Vacancy behavior config + #[serde(rename = "vacancy")] + pub vacancy_config: VacancyConfig, + + /// Storage costs. + pub storage_costs: StorageCosts, +} + +impl Chainspec { + /// Returns the hash of the chainspec's name. + pub fn name_hash(&self) -> ChainNameDigest { + ChainNameDigest::from_chain_name(&self.network_config.name) + } + + /// Serializes `self` and hashes the resulting bytes. + pub fn hash(&self) -> Digest { + let serialized_chainspec = self.to_bytes().unwrap_or_else(|error| { + error!(%error, "failed to serialize chainspec"); + vec![] + }); + Digest::hash(serialized_chainspec) + } + + /// Serializes `self` and hashes the resulting bytes, if able. + pub fn try_hash(&self) -> Result { + let arr = self + .to_bytes() + .map_err(|_| "failed to serialize chainspec".to_string())?; + Ok(Digest::hash(arr)) + } + + /// Returns the protocol version of the chainspec. + pub fn protocol_version(&self) -> ProtocolVersion { + self.protocol_config.version + } + + /// Returns the era ID of where we should reset back to. This means stored blocks in that and + /// subsequent eras are deleted from storage. + pub fn hard_reset_to_start_of_era(&self) -> Option { + self.protocol_config + .hard_reset + .then(|| self.protocol_config.activation_point.era_id()) + } + + /// Creates an upgrade config instance from parts. + pub fn upgrade_config_from_parts( + &self, + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + era_id: EraId, + chainspec_raw_bytes: Arc, + ) -> Result { + let chainspec_registry = ChainspecRegistry::new_with_optional_global_state( + chainspec_raw_bytes.chainspec_bytes(), + chainspec_raw_bytes.maybe_global_state_bytes(), + ); + let global_state_update = match self.protocol_config.get_update_mapping() { + Ok(global_state_update) => global_state_update, + Err(err) => { + return Err(format!("failed to generate global state update: {}", err)); + } + }; + let fee_handling = self.core_config.fee_handling; + let validator_minimum_bid_amount = self.core_config.minimum_bid_amount; + let maximum_delegation_amount = self.core_config.maximum_delegation_amount; + let minimum_delegation_amount = self.core_config.minimum_delegation_amount; + let enable_addressable_entity = self.core_config.enable_addressable_entity; + + Ok(ProtocolUpgradeConfig::new( + pre_state_hash, + current_protocol_version, + self.protocol_config.version, + Some(era_id), + Some(self.core_config.gas_hold_balance_handling), + Some(self.core_config.gas_hold_interval.millis()), + Some(self.core_config.validator_slots), + Some(self.core_config.auction_delay), + Some(self.core_config.locked_funds_period.millis()), + Some(self.core_config.round_seigniorage_rate), + Some(self.core_config.unbonding_delay), + global_state_update, + chainspec_registry, + fee_handling, + validator_minimum_bid_amount, + maximum_delegation_amount, + minimum_delegation_amount, + enable_addressable_entity, + )) + } + + /// Returns balance hold epoch based upon configured hold interval, calculated from the imputed + /// timestamp. + pub fn balance_holds_epoch(&self, timestamp: Timestamp) -> u64 { + timestamp + .millis() + .saturating_sub(self.core_config.gas_hold_interval.millis()) + } + + /// Is the given transaction lane supported. + pub fn is_supported(&self, lane: u8) -> bool { + self.transaction_config + .transaction_v1_config + .is_supported(lane) + } + + /// Returns the max serialized for the given category. + pub fn get_max_serialized_length_by_category(&self, lane: u8) -> u64 { + self.transaction_config + .transaction_v1_config + .get_max_serialized_length(lane) + } + + /// Returns the max args length for the given category. + pub fn get_max_args_length_by_category(&self, lane: u8) -> u64 { + self.transaction_config + .transaction_v1_config + .get_max_args_length(lane) + } + + /// Returns the max gas limit for the given category. + pub fn get_max_gas_limit_by_category(&self, lane: u8) -> u64 { + self.transaction_config + .transaction_v1_config + .get_max_transaction_gas_limit(lane) + } + + /// Returns the max transaction count for the given category. + pub fn get_max_transaction_count_by_category(&self, lane: u8) -> u64 { + self.transaction_config + .transaction_v1_config + .get_max_transaction_count(lane) + } +} + +#[cfg(any(feature = "testing", test))] +impl Chainspec { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let protocol_config = ProtocolConfig::random(rng); + let network_config = NetworkConfig::random(rng); + let core_config = CoreConfig::random(rng); + let highway_config = HighwayConfig::random(rng); + let transaction_config = TransactionConfig::random(rng); + let wasm_config = rng.gen(); + let system_costs_config = SystemConfig::random(rng); + let vacancy_config = VacancyConfig::random(rng); + + Chainspec { + protocol_config, + network_config, + core_config, + highway_config, + transaction_config, + wasm_config, + system_costs_config, + vacancy_config, + storage_costs: rng.gen(), + } + } + + /// Set the chain name; + pub fn with_chain_name(&mut self, chain_name: String) -> &mut Self { + self.network_config.name = chain_name; + self + } + + /// Set max associated keys. + pub fn with_max_associated_keys(&mut self, max_associated_keys: u32) -> &mut Self { + self.core_config.max_associated_keys = max_associated_keys; + self + } + + /// Set pricing handling. + pub fn with_pricing_handling(&mut self, pricing_handling: PricingHandling) -> &mut Self { + self.core_config.pricing_handling = pricing_handling; + self + } + + /// Set allow prepaid. + pub fn with_allow_prepaid(&mut self, allow_prepaid: bool) -> &mut Self { + self.core_config.allow_prepaid = allow_prepaid; + self + } + + /// Set block gas limit. + pub fn with_block_gas_limit(&mut self, block_gas_limit: u64) -> &mut Self { + self.transaction_config.block_gas_limit = block_gas_limit; + self + } + + /// Set vm2 casper wasm. + pub fn with_vm_casper_v2(&mut self, vm_casper_v2: bool) -> &mut Self { + self.transaction_config.runtime_config.vm_casper_v2 = vm_casper_v2; + self + } +} + +impl ToBytes for Chainspec { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.protocol_config.write_bytes(writer)?; + self.network_config.write_bytes(writer)?; + self.core_config.write_bytes(writer)?; + self.highway_config.write_bytes(writer)?; + self.transaction_config.write_bytes(writer)?; + self.wasm_config.write_bytes(writer)?; + self.system_costs_config.write_bytes(writer)?; + self.vacancy_config.write_bytes(writer)?; + self.storage_costs.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.protocol_config.serialized_length() + + self.network_config.serialized_length() + + self.core_config.serialized_length() + + self.highway_config.serialized_length() + + self.transaction_config.serialized_length() + + self.wasm_config.serialized_length() + + self.system_costs_config.serialized_length() + + self.vacancy_config.serialized_length() + + self.storage_costs.serialized_length() + } +} + +impl FromBytes for Chainspec { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_config, remainder) = ProtocolConfig::from_bytes(bytes)?; + let (network_config, remainder) = NetworkConfig::from_bytes(remainder)?; + let (core_config, remainder) = CoreConfig::from_bytes(remainder)?; + let (highway_config, remainder) = HighwayConfig::from_bytes(remainder)?; + let (transaction_config, remainder) = TransactionConfig::from_bytes(remainder)?; + let (wasm_config, remainder) = WasmConfig::from_bytes(remainder)?; + let (system_costs_config, remainder) = SystemConfig::from_bytes(remainder)?; + let (vacancy_config, remainder) = VacancyConfig::from_bytes(remainder)?; + let (storage_costs, remainder) = FromBytes::from_bytes(remainder)?; + let chainspec = Chainspec { + protocol_config, + network_config, + core_config, + highway_config, + transaction_config, + wasm_config, + system_costs_config, + vacancy_config, + storage_costs, + }; + Ok((chainspec, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use rand::SeedableRng; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let chainspec = Chainspec::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&chainspec); + } +} diff --git a/types/src/chainspec/accounts_config.rs b/types/src/chainspec/accounts_config.rs new file mode 100644 index 0000000000..e5e3fb278a --- /dev/null +++ b/types/src/chainspec/accounts_config.rs @@ -0,0 +1,192 @@ +//! The accounts config is a set of configuration options that is used to create accounts at +//! genesis, and set up auction contract with validators and delegators. +mod account_config; +mod delegator_config; +mod genesis; +mod validator_config; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Deserializer, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + PublicKey, +}; + +pub use account_config::AccountConfig; +pub use delegator_config::DelegatorConfig; +pub use genesis::{AdministratorAccount, GenesisAccount, GenesisValidator}; +pub use validator_config::ValidatorConfig; + +fn sorted_vec_deserializer<'de, T, D>(deserializer: D) -> Result, D::Error> +where + T: Deserialize<'de> + Ord, + D: Deserializer<'de>, +{ + let mut vec = Vec::::deserialize(deserializer)?; + vec.sort_unstable(); + Ok(vec) +} + +/// Configuration values associated with accounts.toml +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AccountsConfig { + #[serde(deserialize_with = "sorted_vec_deserializer")] + accounts: Vec, + #[serde(default, deserialize_with = "sorted_vec_deserializer")] + delegators: Vec, + #[serde( + default, + deserialize_with = "sorted_vec_deserializer", + skip_serializing_if = "Vec::is_empty" + )] + administrators: Vec, +} + +impl AccountsConfig { + /// Create new accounts config instance. + pub fn new( + accounts: Vec, + delegators: Vec, + administrators: Vec, + ) -> Self { + Self { + accounts, + delegators, + administrators, + } + } + + /// Accounts. + pub fn accounts(&self) -> &[AccountConfig] { + &self.accounts + } + + /// Delegators. + pub fn delegators(&self) -> &[DelegatorConfig] { + &self.delegators + } + + /// Administrators. + pub fn administrators(&self) -> &[AdministratorAccount] { + &self.administrators + } + + /// Account. + pub fn account(&self, public_key: &PublicKey) -> Option<&AccountConfig> { + self.accounts + .iter() + .find(|account| &account.public_key == public_key) + } + + /// All of the validators. + pub fn validators(&self) -> impl Iterator { + self.accounts + .iter() + .filter(|account| account.validator.is_some()) + } + + /// Is the provided public key in the set of genesis validator public keys. + pub fn is_genesis_validator(&self, public_key: &PublicKey) -> bool { + match self.account(public_key) { + None => false, + Some(account_config) => account_config.is_genesis_validator(), + } + } + + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + use crate::Motes; + + let alpha = AccountConfig::random(rng); + let accounts = vec![ + alpha.clone(), + AccountConfig::random(rng), + AccountConfig::random(rng), + AccountConfig::random(rng), + ]; + + let mut delegator = DelegatorConfig::random(rng); + delegator.validator_public_key = alpha.public_key; + + let delegators = vec![delegator]; + + let admin_balance: u32 = rng.gen(); + let administrators = vec![AdministratorAccount::new( + PublicKey::random(rng), + Motes::new(admin_balance), + )]; + + AccountsConfig { + accounts, + delegators, + administrators, + } + } +} + +impl ToBytes for AccountsConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.accounts.to_bytes()?); + buffer.extend(self.delegators.to_bytes()?); + buffer.extend(self.administrators.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.accounts.serialized_length() + + self.delegators.serialized_length() + + self.administrators.serialized_length() + } +} + +impl FromBytes for AccountsConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (accounts, remainder) = FromBytes::from_bytes(bytes)?; + let (delegators, remainder) = FromBytes::from_bytes(remainder)?; + let (administrators, remainder) = FromBytes::from_bytes(remainder)?; + let accounts_config = AccountsConfig::new(accounts, delegators, administrators); + Ok((accounts_config, remainder)) + } +} + +impl From for Vec { + fn from(accounts_config: AccountsConfig) -> Self { + let mut genesis_accounts = Vec::with_capacity(accounts_config.accounts.len()); + for account_config in accounts_config.accounts { + let genesis_account = account_config.into(); + genesis_accounts.push(genesis_account); + } + for delegator_config in accounts_config.delegators { + let genesis_account = delegator_config.into(); + genesis_accounts.push(genesis_account); + } + + for administrator_config in accounts_config.administrators { + let administrator_account = administrator_config.into(); + genesis_accounts.push(administrator_account); + } + + genesis_accounts + } +} + +#[cfg(any(feature = "testing", test))] +mod tests { + #[cfg(test)] + use crate::{bytesrepr, testing::TestRng, AccountsConfig}; + + #[test] + fn serialization_roundtrip() { + let mut rng = TestRng::new(); + let accounts_config = AccountsConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&accounts_config); + } +} diff --git a/node/src/types/chainspec/accounts_config/account_config.rs b/types/src/chainspec/accounts_config/account_config.rs similarity index 76% rename from node/src/types/chainspec/accounts_config/account_config.rs rename to types/src/chainspec/accounts_config/account_config.rs index 190410f829..b64cb97efb 100644 --- a/node/src/types/chainspec/accounts_config/account_config.rs +++ b/types/src/chainspec/accounts_config/account_config.rs @@ -1,30 +1,37 @@ +#[cfg(feature = "datasize")] use datasize::DataSize; -use num::Zero; -#[cfg(test)] + +#[cfg(any(feature = "testing", test))] use rand::{distributions::Standard, prelude::*}; + use serde::{Deserialize, Serialize}; -use casper_execution_engine::{core::engine_state::GenesisAccount, shared::motes::Motes}; -use casper_types::{ +use crate::{ bytesrepr::{self, FromBytes, ToBytes}, - PublicKey, + GenesisAccount, Motes, PublicKey, }; -#[cfg(test)] -use casper_types::{SecretKey, U512}; -#[cfg(test)] +#[cfg(any(feature = "testing", test))] use crate::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use crate::{SecretKey, U512}; use super::ValidatorConfig; -#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, DataSize, Debug, Clone)] +/// Configuration of an individial account in accounts.toml +#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct AccountConfig { - pub(super) public_key: PublicKey, - balance: Motes, - validator: Option, + /// Public Key. + pub public_key: PublicKey, + /// Balance. + pub balance: Motes, + /// Validator config. + pub validator: Option, } impl AccountConfig { + /// Creates a new `AccountConfig`. pub fn new(public_key: PublicKey, balance: Motes, validator: Option) -> Self { Self { public_key, @@ -33,14 +40,17 @@ impl AccountConfig { } } + /// Public key. pub fn public_key(&self) -> PublicKey { self.public_key.clone() } + /// Balance. pub fn balance(&self) -> Motes { self.balance } + /// Bonded amount. pub fn bonded_amount(&self) -> Motes { match self.validator { Some(validator_config) => validator_config.bonded_amount(), @@ -48,16 +58,17 @@ impl AccountConfig { } } + /// Is this a genesis validator? pub fn is_genesis_validator(&self) -> bool { self.validator.is_some() } - #[cfg(test)] + #[cfg(any(feature = "testing", test))] /// Generates a random instance using a `TestRng`. pub fn random(rng: &mut TestRng) -> Self { let public_key = PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); - let balance = Motes::new(rng.gen()); + let balance = Motes::new(rng.gen::()); let validator = rng.gen(); AccountConfig { @@ -68,12 +79,11 @@ impl AccountConfig { } } -#[cfg(test)] +#[cfg(any(feature = "testing", test))] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> AccountConfig { - let public_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()) - .unwrap() - .into(); + let secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + let public_key = PublicKey::from(&secret_key); let mut u512_array = [0u8; 64]; rng.fill_bytes(u512_array.as_mut()); diff --git a/types/src/chainspec/accounts_config/delegator_config.rs b/types/src/chainspec/accounts_config/delegator_config.rs new file mode 100644 index 0000000000..f6fa63afc3 --- /dev/null +++ b/types/src/chainspec/accounts_config/delegator_config.rs @@ -0,0 +1,133 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{distributions::Standard, prelude::*}; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + GenesisAccount, Motes, PublicKey, +}; +#[cfg(any(feature = "testing", test))] +use crate::{SecretKey, U512}; + +/// Configuration values related to a delegator. +#[derive(PartialEq, Ord, PartialOrd, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct DelegatorConfig { + /// Validator public key. + pub validator_public_key: PublicKey, + /// Delegator public key. + pub delegator_public_key: PublicKey, + /// Balance for this delegator in Motes. + pub balance: Motes, + /// Delegated amount in Motes. + pub delegated_amount: Motes, +} + +impl DelegatorConfig { + /// Creates a new DelegatorConfig. + pub fn new( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + balance: Motes, + delegated_amount: Motes, + ) -> Self { + Self { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } + } + + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let validator_public_key = + PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); + let delegator_public_key = + PublicKey::from(&SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap()); + let balance = Motes::new(rng.gen::()); + let delegated_amount = Motes::new(rng.gen::()); + + DelegatorConfig { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> DelegatorConfig { + let validator_secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + let delegator_secret_key = SecretKey::ed25519_from_bytes(rng.gen::<[u8; 32]>()).unwrap(); + + let validator_public_key = PublicKey::from(&validator_secret_key); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + + let mut u512_array = [0u8; 64]; + rng.fill_bytes(u512_array.as_mut()); + let balance = Motes::new(U512::from(u512_array)); + + rng.fill_bytes(u512_array.as_mut()); + let delegated_amount = Motes::new(U512::from(u512_array)); + + DelegatorConfig::new( + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + ) + } +} + +impl ToBytes for DelegatorConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.validator_public_key.to_bytes()?); + buffer.extend(self.delegator_public_key.to_bytes()?); + buffer.extend(self.balance.to_bytes()?); + buffer.extend(self.delegated_amount.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.delegator_public_key.serialized_length() + + self.balance.serialized_length() + + self.delegated_amount.serialized_length() + } +} + +impl FromBytes for DelegatorConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (delegated_amount, remainder) = FromBytes::from_bytes(remainder)?; + let delegator_config = DelegatorConfig { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + }; + Ok((delegator_config, remainder)) + } +} + +impl From for GenesisAccount { + fn from(delegator_config: DelegatorConfig) -> Self { + GenesisAccount::delegator( + delegator_config.validator_public_key, + delegator_config.delegator_public_key, + delegator_config.balance, + delegator_config.delegated_amount, + ) + } +} diff --git a/types/src/chainspec/accounts_config/genesis.rs b/types/src/chainspec/accounts_config/genesis.rs new file mode 100644 index 0000000000..4acd1d62d7 --- /dev/null +++ b/types/src/chainspec/accounts_config/genesis.rs @@ -0,0 +1,512 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::SecretKey; +use crate::{ + account::AccountHash, + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::DelegationRate, + Motes, PublicKey, +}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +#[repr(u8)] +enum GenesisAccountTag { + System = 0, + Account = 1, + Delegator = 2, + Administrator = 3, +} + +/// Represents details about genesis account's validator status. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct GenesisValidator { + /// Stake of a genesis validator. + bonded_amount: Motes, + /// Delegation rate in the range of 0-100. + delegation_rate: DelegationRate, +} + +impl ToBytes for GenesisValidator { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.bonded_amount.to_bytes()?); + buffer.extend(self.delegation_rate.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.bonded_amount.serialized_length() + self.delegation_rate.serialized_length() + } +} + +impl FromBytes for GenesisValidator { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonded_amount, remainder) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, remainder) = FromBytes::from_bytes(remainder)?; + let genesis_validator = GenesisValidator { + bonded_amount, + delegation_rate, + }; + Ok((genesis_validator, remainder)) + } +} + +impl GenesisValidator { + /// Creates new [`GenesisValidator`]. + pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self { + Self { + bonded_amount, + delegation_rate, + } + } + + /// Returns the bonded amount of a genesis validator. + pub fn bonded_amount(&self) -> Motes { + self.bonded_amount + } + + /// Returns the delegation rate of a genesis validator. + pub fn delegation_rate(&self) -> DelegationRate { + self.delegation_rate + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisValidator { + let bonded_amount = Motes::new(rng.gen::()); + let delegation_rate = rng.gen(); + + GenesisValidator::new(bonded_amount, delegation_rate) + } +} + +/// Special account in the system that is useful only for some private chains. +#[derive(Debug, Clone, PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct AdministratorAccount { + public_key: PublicKey, + balance: Motes, +} + +impl AdministratorAccount { + /// Creates new special account. + pub fn new(public_key: PublicKey, balance: Motes) -> Self { + Self { + public_key, + balance, + } + } + + /// Gets a reference to the administrator account's public key. + pub fn public_key(&self) -> &PublicKey { + &self.public_key + } +} + +impl ToBytes for AdministratorAccount { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let AdministratorAccount { + public_key, + balance, + } = self; + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(public_key.to_bytes()?); + buffer.extend(balance.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let AdministratorAccount { + public_key, + balance, + } = self; + public_key.serialized_length() + balance.serialized_length() + } +} + +impl FromBytes for AdministratorAccount { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let administrator_account = AdministratorAccount { + public_key, + balance, + }; + Ok((administrator_account, remainder)) + } +} + +/// This enum represents possible states of a genesis account. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum GenesisAccount { + /// This variant is for internal use only - genesis process will create a virtual system + /// account and use it to call system contracts. + System, + /// Genesis account that will be created. + Account { + /// Public key of a genesis account. + public_key: PublicKey, + /// Starting balance of a genesis account. + balance: Motes, + /// If set, it will make this account a genesis validator. + validator: Option, + }, + /// The genesis delegator is a special account that will be created as a delegator. + /// It does not have any stake of its own, but will create a real account in the system + /// which will delegate to a genesis validator. + Delegator { + /// Validator's public key that has to refer to other instance of + /// [`GenesisAccount::Account`] with a `validator` field set. + validator_public_key: PublicKey, + /// Public key of the genesis account that will be created as part of this entry. + delegator_public_key: PublicKey, + /// Starting balance of the account. + balance: Motes, + /// Delegated amount for given `validator_public_key`. + delegated_amount: Motes, + }, + /// An administrative account in the genesis process. + /// + /// This variant makes sense for some private chains. + Administrator(AdministratorAccount), +} + +impl From for GenesisAccount { + fn from(v: AdministratorAccount) -> Self { + Self::Administrator(v) + } +} + +impl GenesisAccount { + /// Create a system account variant. + pub fn system() -> Self { + Self::System + } + + /// Create a standard account variant. + pub fn account( + public_key: PublicKey, + balance: Motes, + validator: Option, + ) -> Self { + Self::Account { + public_key, + balance, + validator, + } + } + + /// Create a delegator account variant. + pub fn delegator( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + balance: Motes, + delegated_amount: Motes, + ) -> Self { + Self::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } + } + + /// The public key (if any) associated with the account. + pub fn public_key(&self) -> PublicKey { + match self { + GenesisAccount::System => PublicKey::System, + GenesisAccount::Account { public_key, .. } => public_key.clone(), + GenesisAccount::Delegator { + delegator_public_key, + .. + } => delegator_public_key.clone(), + GenesisAccount::Administrator(AdministratorAccount { public_key, .. }) => { + public_key.clone() + } + } + } + + /// The account hash for the account. + pub fn account_hash(&self) -> AccountHash { + match self { + GenesisAccount::System => PublicKey::System.to_account_hash(), + GenesisAccount::Account { public_key, .. } => public_key.to_account_hash(), + GenesisAccount::Delegator { + delegator_public_key, + .. + } => delegator_public_key.to_account_hash(), + GenesisAccount::Administrator(AdministratorAccount { public_key, .. }) => { + public_key.to_account_hash() + } + } + } + + /// How many motes are to be deposited in the account's main purse. + pub fn balance(&self) -> Motes { + match self { + GenesisAccount::System => Motes::zero(), + GenesisAccount::Account { balance, .. } => *balance, + GenesisAccount::Delegator { balance, .. } => *balance, + GenesisAccount::Administrator(AdministratorAccount { balance, .. }) => *balance, + } + } + + /// How many motes are to be staked. + /// + /// Staked accounts are either validators with some amount of bonded stake or delgators with + /// some amount of delegated stake. + pub fn staked_amount(&self) -> Motes { + match self { + GenesisAccount::System { .. } + | GenesisAccount::Account { + validator: None, .. + } => Motes::zero(), + GenesisAccount::Account { + validator: Some(genesis_validator), + .. + } => genesis_validator.bonded_amount(), + GenesisAccount::Delegator { + delegated_amount, .. + } => *delegated_amount, + GenesisAccount::Administrator(AdministratorAccount { + public_key: _, + balance: _, + }) => { + // This is defaulted to zero because administrator accounts are filtered out before + // validator set is created at the genesis. + Motes::zero() + } + } + } + + /// What is the delegation rate of a validator. + pub fn delegation_rate(&self) -> DelegationRate { + match self { + GenesisAccount::Account { + validator: Some(genesis_validator), + .. + } => genesis_validator.delegation_rate(), + GenesisAccount::System + | GenesisAccount::Account { + validator: None, .. + } + | GenesisAccount::Delegator { .. } => { + // This value represents a delegation rate in invalid state that system is supposed + // to reject if used. + DelegationRate::MAX + } + GenesisAccount::Administrator(AdministratorAccount { .. }) => DelegationRate::MAX, + } + } + + /// Is this a virtual system account. + pub fn is_system_account(&self) -> bool { + matches!(self, GenesisAccount::System { .. }) + } + + /// Is this a validator account. + pub fn is_validator(&self) -> bool { + match self { + GenesisAccount::Account { + validator: Some(_), .. + } => true, + GenesisAccount::System { .. } + | GenesisAccount::Account { + validator: None, .. + } + | GenesisAccount::Delegator { .. } + | GenesisAccount::Administrator(AdministratorAccount { .. }) => false, + } + } + + /// Details about the genesis validator. + pub fn validator(&self) -> Option<&GenesisValidator> { + match self { + GenesisAccount::Account { + validator: Some(genesis_validator), + .. + } => Some(genesis_validator), + _ => None, + } + } + + /// Is this a delegator account. + pub fn is_delegator(&self) -> bool { + matches!(self, GenesisAccount::Delegator { .. }) + } + + /// Details about the genesis delegator. + pub fn as_delegator(&self) -> Option<(&PublicKey, &PublicKey, &Motes, &Motes)> { + match self { + GenesisAccount::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } => Some(( + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + )), + _ => None, + } + } + + /// Gets the administrator account variant. + pub fn as_administrator_account(&self) -> Option<&AdministratorAccount> { + if let Self::Administrator(v) = self { + Some(v) + } else { + None + } + } + + /// Set validator. + pub fn try_set_validator(&mut self, genesis_validator: GenesisValidator) -> bool { + match self { + GenesisAccount::Account { validator, .. } => { + *validator = Some(genesis_validator); + true + } + GenesisAccount::System + | GenesisAccount::Delegator { .. } + | GenesisAccount::Administrator(_) => false, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisAccount { + let mut bytes = [0u8; 32]; + rng.fill_bytes(&mut bytes[..]); + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + let public_key = PublicKey::from(&secret_key); + let balance = Motes::new(rng.gen::()); + let validator = rng.gen(); + + GenesisAccount::account(public_key, balance, validator) + } +} + +impl ToBytes for GenesisAccount { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + GenesisAccount::System => { + buffer.push(GenesisAccountTag::System as u8); + } + GenesisAccount::Account { + public_key, + balance, + validator, + } => { + buffer.push(GenesisAccountTag::Account as u8); + buffer.extend(public_key.to_bytes()?); + buffer.extend(balance.value().to_bytes()?); + buffer.extend(validator.to_bytes()?); + } + GenesisAccount::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } => { + buffer.push(GenesisAccountTag::Delegator as u8); + buffer.extend(validator_public_key.to_bytes()?); + buffer.extend(delegator_public_key.to_bytes()?); + buffer.extend(balance.to_bytes()?); + buffer.extend(delegated_amount.to_bytes()?); + } + GenesisAccount::Administrator(administrator_account) => { + buffer.push(GenesisAccountTag::Administrator as u8); + buffer.extend(administrator_account.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + match self { + GenesisAccount::System => TAG_LENGTH, + GenesisAccount::Account { + public_key, + balance, + validator, + } => { + public_key.serialized_length() + + balance.value().serialized_length() + + validator.serialized_length() + + TAG_LENGTH + } + GenesisAccount::Delegator { + validator_public_key, + delegator_public_key, + balance, + delegated_amount, + } => { + validator_public_key.serialized_length() + + delegator_public_key.serialized_length() + + balance.value().serialized_length() + + delegated_amount.value().serialized_length() + + TAG_LENGTH + } + GenesisAccount::Administrator(administrator_account) => { + administrator_account.serialized_length() + TAG_LENGTH + } + } + } +} + +impl FromBytes for GenesisAccount { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == GenesisAccountTag::System as u8 => { + let genesis_account = GenesisAccount::system(); + Ok((genesis_account, remainder)) + } + tag if tag == GenesisAccountTag::Account as u8 => { + let (public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (validator, remainder) = FromBytes::from_bytes(remainder)?; + let genesis_account = GenesisAccount::account(public_key, balance, validator); + Ok((genesis_account, remainder)) + } + tag if tag == GenesisAccountTag::Delegator as u8 => { + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (delegator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (balance, remainder) = FromBytes::from_bytes(remainder)?; + let (delegated_amount_value, remainder) = FromBytes::from_bytes(remainder)?; + let genesis_account = GenesisAccount::delegator( + validator_public_key, + delegator_public_key, + balance, + delegated_amount_value, + ); + Ok((genesis_account, remainder)) + } + tag if tag == GenesisAccountTag::Administrator as u8 => { + let (administrator_account, remainder) = + AdministratorAccount::from_bytes(remainder)?; + let genesis_account = GenesisAccount::Administrator(administrator_account); + Ok((genesis_account, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} diff --git a/node/src/types/chainspec/accounts_config/validator_config.rs b/types/src/chainspec/accounts_config/validator_config.rs similarity index 81% rename from node/src/types/chainspec/accounts_config/validator_config.rs rename to types/src/chainspec/accounts_config/validator_config.rs index f5ac35266a..6b308b7c37 100644 --- a/node/src/types/chainspec/accounts_config/validator_config.rs +++ b/types/src/chainspec/accounts_config/validator_config.rs @@ -1,23 +1,21 @@ +#[cfg(feature = "datasize")] use datasize::DataSize; use num::Zero; -#[cfg(test)] +#[cfg(any(feature = "testing", test))] use rand::{distributions::Standard, prelude::*}; use serde::{Deserialize, Serialize}; -use casper_execution_engine::{ - core::engine_state::genesis::GenesisValidator, shared::motes::Motes, -}; -#[cfg(test)] -use casper_types::U512; -use casper_types::{ +use crate::{ bytesrepr::{self, FromBytes, ToBytes}, system::auction::DelegationRate, + GenesisValidator, Motes, }; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, U512}; -#[cfg(test)] -use crate::testing::TestRng; - -#[derive(PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, DataSize, Debug, Copy, Clone)] +/// Validator account configuration. +#[derive(PartialEq, Eq, Ord, PartialOrd, Serialize, Deserialize, Debug, Copy, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ValidatorConfig { bonded_amount: Motes, #[serde(default = "DelegationRate::zero")] @@ -25,6 +23,7 @@ pub struct ValidatorConfig { } impl ValidatorConfig { + /// Creates a new `ValidatorConfig`. pub fn new(bonded_amount: Motes, delegation_rate: DelegationRate) -> Self { Self { bonded_amount, @@ -32,18 +31,20 @@ impl ValidatorConfig { } } + /// Delegation rate. pub fn delegation_rate(&self) -> DelegationRate { self.delegation_rate } + /// Bonded amount. pub fn bonded_amount(&self) -> Motes { self.bonded_amount } - #[cfg(test)] - /// Generates a random instance using a `TestRng`. + /// Returns a random `ValidatorConfig`. + #[cfg(any(feature = "testing", test))] pub fn random(rng: &mut TestRng) -> Self { - let bonded_amount = Motes::new(U512::from(rng.gen::())); + let bonded_amount = Motes::new(rng.gen::()); let delegation_rate = rng.gen(); ValidatorConfig { @@ -53,7 +54,7 @@ impl ValidatorConfig { } } -#[cfg(test)] +#[cfg(any(feature = "testing", test))] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> ValidatorConfig { let mut u512_array = [0; 64]; diff --git a/node/src/types/chainspec/activation_point.rs b/types/src/chainspec/activation_point.rs similarity index 77% rename from node/src/types/chainspec/activation_point.rs rename to types/src/chainspec/activation_point.rs index c87dd1bd01..33e622374b 100644 --- a/node/src/types/chainspec/activation_point.rs +++ b/types/src/chainspec/activation_point.rs @@ -1,38 +1,39 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use std::fmt::{self, Display, Formatter}; +#[cfg(feature = "datasize")] use datasize::DataSize; -#[cfg(test)] +#[cfg(any(feature = "testing", test))] use rand::Rng; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use casper_types::{ +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - EraId, + EraId, TimeDiff, Timestamp, }; -#[cfg(test)] -use crate::testing::TestRng; -use crate::types::Timestamp; - const ERA_ID_TAG: u8 = 0; const GENESIS_TAG: u8 = 1; /// The first era to which the associated protocol version applies. -#[derive(Copy, Clone, DataSize, PartialEq, Eq, Serialize, Deserialize, Debug, JsonSchema)] +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(untagged)] pub enum ActivationPoint { + /// Era id. EraId(EraId), + /// Genesis timestamp. Genesis(Timestamp), } impl ActivationPoint { - /// Returns whether we should upgrade the node due to the next era being at or after the upgrade - /// activation point. - pub(crate) fn should_upgrade(&self, era_being_deactivated: &EraId) -> bool { + /// Returns whether we should upgrade the node due to the next era being the upgrade activation + /// point. + pub fn should_upgrade(&self, era_being_deactivated: &EraId) -> bool { match self { ActivationPoint::EraId(era_id) => era_being_deactivated.successor() >= *era_id, ActivationPoint::Genesis(_) => false, @@ -40,7 +41,7 @@ impl ActivationPoint { } /// Returns the Era ID if `self` is of `EraId` variant, or else 0 if `Genesis`. - pub(crate) fn era_id(&self) -> EraId { + pub fn era_id(&self) -> EraId { match self { ActivationPoint::EraId(era_id) => *era_id, ActivationPoint::Genesis(_) => EraId::from(0), @@ -48,22 +49,30 @@ impl ActivationPoint { } /// Returns the timestamp if `self` is of `Genesis` variant, or else `None`. - pub(crate) fn genesis_timestamp(&self) -> Option { + pub fn genesis_timestamp(&self) -> Option { match self { ActivationPoint::EraId(_) => None, ActivationPoint::Genesis(timestamp) => Some(*timestamp), } } - /// Returns true if `self` is `Genesis`. - pub(crate) fn is_genesis(&self) -> bool { - match self { - ActivationPoint::EraId(_) => false, - ActivationPoint::Genesis(_) => true, + /// Returns a random `ActivationPoint`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + ActivationPoint::EraId(EraId::random(rng)) + } else { + ActivationPoint::Genesis(Timestamp::random(rng)) } } } +impl Default for ActivationPoint { + fn default() -> Self { + ActivationPoint::Genesis(Timestamp::now().saturating_add(TimeDiff::from_seconds(15))) + } +} + impl Display for ActivationPoint { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { match self { @@ -116,15 +125,3 @@ impl FromBytes for ActivationPoint { } } } - -#[cfg(test)] -impl ActivationPoint { - /// Generates a random instance using a `TestRng`. - pub fn random(rng: &mut TestRng) -> Self { - if rng.gen() { - ActivationPoint::EraId(EraId::from(rng.gen::() as u64)) - } else { - ActivationPoint::Genesis(Timestamp::random(rng)) - } - } -} diff --git a/types/src/chainspec/chainspec_raw_bytes.rs b/types/src/chainspec/chainspec_raw_bytes.rs new file mode 100644 index 0000000000..37c8347d2c --- /dev/null +++ b/types/src/chainspec/chainspec_raw_bytes.rs @@ -0,0 +1,196 @@ +use core::fmt::{self, Debug, Display, Formatter}; + +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The raw bytes of the chainspec.toml, genesis accounts.toml, and global_state.toml files. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct ChainspecRawBytes { + /// Raw bytes of the current chainspec.toml file. + chainspec_bytes: Bytes, + /// Raw bytes of the current genesis accounts.toml file. + maybe_genesis_accounts_bytes: Option, + /// Raw bytes of the current global_state.toml file. + maybe_global_state_bytes: Option, +} + +impl ChainspecRawBytes { + /// Create an instance from parts. + pub fn new( + chainspec_bytes: Bytes, + maybe_genesis_accounts_bytes: Option, + maybe_global_state_bytes: Option, + ) -> Self { + ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } + } + + /// The bytes of the chainspec file. + pub fn chainspec_bytes(&self) -> &[u8] { + self.chainspec_bytes.as_slice() + } + + /// The bytes of global state account entries, when present for a protocol version. + pub fn maybe_genesis_accounts_bytes(&self) -> Option<&[u8]> { + match self.maybe_genesis_accounts_bytes.as_ref() { + Some(bytes) => Some(bytes.as_slice()), + None => None, + } + } + + /// The bytes of global state update entries, when present for a protocol version. + pub fn maybe_global_state_bytes(&self) -> Option<&[u8]> { + match self.maybe_global_state_bytes.as_ref() { + Some(bytes) => Some(bytes.as_slice()), + None => None, + } + } + + /// Returns a random `ChainspecRawBytes`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + let chainspec_bytes = Bytes::from(rng.random_vec(0..1024)); + let maybe_genesis_accounts_bytes = rng + .gen::() + .then(|| Bytes::from(rng.random_vec(0..1024))); + let maybe_global_state_bytes = rng + .gen::() + .then(|| Bytes::from(rng.random_vec(0..1024))); + ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } + } +} + +impl Debug for ChainspecRawBytes { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let genesis_accounts_bytes_owned: Bytes; + let global_state_bytes_owned: Bytes; + f.debug_struct("ChainspecRawBytes") + .field( + "chainspec_bytes", + &self.chainspec_bytes[0..16].to_ascii_uppercase(), + ) + .field( + "maybe_genesis_accounts_bytes", + match self.maybe_genesis_accounts_bytes.as_ref() { + Some(genesis_accounts_bytes) => { + genesis_accounts_bytes_owned = + genesis_accounts_bytes[0..16].to_ascii_uppercase().into(); + &genesis_accounts_bytes_owned + } + None => &self.maybe_genesis_accounts_bytes, + }, + ) + .field( + "maybe_global_state_bytes", + match self.maybe_global_state_bytes.as_ref() { + Some(global_state_bytes) => { + global_state_bytes_owned = + global_state_bytes[0..16].to_ascii_uppercase().into(); + &global_state_bytes_owned + } + None => &self.maybe_global_state_bytes, + }, + ) + .finish() + } +} + +impl Display for ChainspecRawBytes { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "{}", + String::from_utf8_lossy(&self.chainspec_bytes) + )?; + if let Some(genesis_accounts_bytes) = &self.maybe_genesis_accounts_bytes { + write!( + formatter, + "{}", + String::from_utf8_lossy(genesis_accounts_bytes) + )?; + } + if let Some(global_state_bytes) = &self.maybe_global_state_bytes { + write!(formatter, "{}", String::from_utf8_lossy(global_state_bytes))?; + } + Ok(()) + } +} + +impl ToBytes for ChainspecRawBytes { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } = self; + + chainspec_bytes.write_bytes(writer)?; + maybe_genesis_accounts_bytes.write_bytes(writer)?; + maybe_global_state_bytes.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + let ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + } = self; + chainspec_bytes.serialized_length() + + maybe_genesis_accounts_bytes.serialized_length() + + maybe_global_state_bytes.serialized_length() + } +} + +impl FromBytes for ChainspecRawBytes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (chainspec_bytes, remainder) = FromBytes::from_bytes(bytes)?; + let (maybe_genesis_accounts_bytes, remainder) = FromBytes::from_bytes(remainder)?; + let (maybe_global_state_bytes, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + ChainspecRawBytes { + chainspec_bytes, + maybe_genesis_accounts_bytes, + maybe_global_state_bytes, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = ChainspecRawBytes::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/chainspec/core_config.rs b/types/src/chainspec/core_config.rs new file mode 100644 index 0000000000..3d01a50348 --- /dev/null +++ b/types/src/chainspec/core_config.rs @@ -0,0 +1,736 @@ +use alloc::collections::BTreeSet; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::rational::Ratio; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use serde::{ + de::{Deserializer, Error as DeError}, + Deserialize, Serialize, Serializer, +}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ProtocolVersion, PublicKey, TimeDiff, U512, +}; + +use super::{ + fee_handling::FeeHandling, hold_balance_handling::HoldBalanceHandling, + pricing_handling::PricingHandling, refund_handling::RefundHandling, +}; + +/// Default value for maximum associated keys configuration option. +pub const DEFAULT_MAX_ASSOCIATED_KEYS: u32 = 100; + +/// Default value for maximum runtime call stack height configuration option. +pub const DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT: u32 = 12; + +/// Default refund handling. +pub const DEFAULT_REFUND_HANDLING: RefundHandling = RefundHandling::NoRefund; + +/// Default pricing handling. +pub const DEFAULT_PRICING_HANDLING: PricingHandling = PricingHandling::Fixed; + +/// Default fee handling. +pub const DEFAULT_FEE_HANDLING: FeeHandling = FeeHandling::NoFee; + +/// Default allow prepaid. +pub const DEFAULT_ALLOW_PREPAID: bool = false; + +/// Default value for minimum bid amount in motes. +pub const DEFAULT_MINIMUM_BID_AMOUNT: u64 = 2; + +/// Default processing hold balance handling. +#[allow(unused)] +pub const DEFAULT_PROCESSING_HOLD_BALANCE_HANDLING: HoldBalanceHandling = + HoldBalanceHandling::Accrued; + +/// Default gas hold balance handling. +pub const DEFAULT_GAS_HOLD_BALANCE_HANDLING: HoldBalanceHandling = HoldBalanceHandling::Amortized; + +/// Default gas hold interval. +pub const DEFAULT_GAS_HOLD_INTERVAL: TimeDiff = TimeDiff::from_seconds(24 * 60 * 60); + +/// Default enable entity setting. +pub const DEFAULT_ENABLE_ENTITY: bool = false; + +/// Default baseline motes amount. +pub const DEFAULT_BASELINE_MOTES_AMOUNT: u64 = 2_500_000_000; + +/// Configuration values associated with the core protocol. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct CoreConfig { + /// Duration of an era. + pub era_duration: TimeDiff, + + /// Minimum era height. + pub minimum_era_height: u64, + + /// Minimum block time. + pub minimum_block_time: TimeDiff, + + /// Validator slots. + pub validator_slots: u32, + + /// Finality threshold fraction. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub finality_threshold_fraction: Ratio, + + /// Protocol version from which nodes are required to hold strict finality signatures. + pub start_protocol_version_with_strict_finality_signatures_required: ProtocolVersion, + + /// Which finality is required for legacy blocks. + /// Used to determine finality sufficiency for new joiners syncing blocks created + /// in a protocol version before + /// `start_protocol_version_with_strict_finality_signatures_required`. + pub legacy_required_finality: LegacyRequiredFinality, + + /// Number of eras before an auction actually defines the set of validators. + /// If you bond with a sufficient bid in era N, you will be a validator in era N + + /// auction_delay + 1 + pub auction_delay: u64, + + /// The period after genesis during which a genesis validator's bid is locked. + pub locked_funds_period: TimeDiff, + + /// The period in which genesis validator's bid is released over time after it's unlocked. + pub vesting_schedule_period: TimeDiff, + + /// The delay in number of eras for paying out the unbonding amount. + pub unbonding_delay: u64, + + /// Round seigniorage rate represented as a fractional number. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub round_seigniorage_rate: Ratio, + + /// Maximum number of associated keys for a single account. + pub max_associated_keys: u32, + + /// Maximum height of contract runtime call stack. + pub max_runtime_call_stack_height: u32, + + /// The minimum bound of motes that can be delegated to a validator. + pub minimum_delegation_amount: u64, + + /// The maximum bound of motes that can be delegated to a validator. + pub maximum_delegation_amount: u64, + + /// The minimum bound of motes that can be bid for a validator. + pub minimum_bid_amount: u64, + + /// Global state prune batch size (0 means the feature is off in the current protocol version). + pub prune_batch_size: u64, + + /// Enables strict arguments checking when calling a contract. + pub strict_argument_checking: bool, + + /// How many peers to simultaneously ask when sync leaping. + pub simultaneous_peer_requests: u8, + + /// Which consensus protocol to use. + pub consensus_protocol: ConsensusProtocolName, + + /// The maximum amount of delegators per validator. + /// if the value is 0, there is no maximum capacity. + pub max_delegators_per_validator: u32, + + /// The split in finality signature rewards between block producer and participating signers. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub finders_fee: Ratio, + + /// The proportion of baseline rewards going to reward finality signatures specifically. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub finality_signature_proportion: Ratio, + + /// The cap for validator credits based upon a proportion of a receiving validator's total + /// stake. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub validator_credit_cap: Ratio, + + /// Lookback interval indicating which past block we are looking at to reward. + pub signature_rewards_max_delay: u64, + /// Auction entrypoints such as "add_bid" or "delegate" are disabled if this flag is set to + /// `false`. Setting up this option makes sense only for private chains where validator set + /// rotation is unnecessary. + pub allow_auction_bids: bool, + /// Allows unrestricted transfers between users. + pub allow_unrestricted_transfers: bool, + /// If set to false then consensus doesn't compute rewards and always uses 0. + pub compute_rewards: bool, + /// Refund handling. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub refund_handling: RefundHandling, + /// Fee handling. + pub fee_handling: FeeHandling, + /// Pricing handling. + pub pricing_handling: PricingHandling, + /// Allow prepaid. + pub allow_prepaid: bool, + /// How do gas holds affect available balance calculations? + pub gas_hold_balance_handling: HoldBalanceHandling, + /// How long does it take for a gas hold to expire? + pub gas_hold_interval: TimeDiff, + /// Administrative accounts are a valid option for a private chain only. + //#[serde(default, skip_serializing_if = "BTreeSet::is_empty")] + pub administrators: BTreeSet, + /// Turn on migration to addressable entity behavior. + pub enable_addressable_entity: bool, + /// This value is used as the penalty payment amount, the minimum balance amount, + /// and the minimum consumed amount. + pub baseline_motes_amount: u64, + /// The flag on whether the engine will return an error for multiple + /// entity versions. + pub trap_on_ambiguous_entity_version: bool, +} + +impl CoreConfig { + /// Turn on migration to addressable entity behavior. + pub fn enable_addressable_entity(&self) -> bool { + self.enable_addressable_entity + } + + /// The number of eras that have already started and whose validators are still bonded. + pub fn recent_era_count(&self) -> u64 { + // Safe to use naked `-` operation assuming `CoreConfig::is_valid()` has been checked. + self.unbonding_delay - self.auction_delay + } + + /// The proportion of the total rewards going to block production. + pub fn production_rewards_proportion(&self) -> Ratio { + Ratio::new(1, 1) - self.finality_signature_proportion + } + + /// The finder's fee, *i.e.* the proportion of the total rewards going to the validator + /// collecting the finality signatures which is the validator producing the block. + pub fn collection_rewards_proportion(&self) -> Ratio { + self.finders_fee * self.finality_signature_proportion + } + + /// The proportion of the total rewards going to finality signatures collection. + pub fn contribution_rewards_proportion(&self) -> Ratio { + (Ratio::new(1, 1) - self.finders_fee) * self.finality_signature_proportion + } + + /// The baseline motes amount as a U512. + pub fn baseline_motes_amount_u512(&self) -> U512 { + U512::from(self.baseline_motes_amount) + } +} + +#[cfg(any(feature = "testing", test))] +impl CoreConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let era_duration = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + let minimum_era_height = rng.gen_range(5..100); + let minimum_block_time = TimeDiff::from_seconds(rng.gen_range(1..60)); + let validator_slots = rng.gen_range(1..10_000); + let finality_threshold_fraction = Ratio::new(rng.gen_range(1..100), 100); + let start_protocol_version_with_strict_finality_signatures_required = + ProtocolVersion::from_parts(1, rng.gen_range(5..10), rng.gen_range(0..100)); + let legacy_required_finality = rng.gen(); + let auction_delay = rng.gen_range(1..5); + let locked_funds_period = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + let vesting_schedule_period = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + let unbonding_delay = rng.gen_range((auction_delay + 1)..1_000_000_000); + let round_seigniorage_rate = Ratio::new( + rng.gen_range(1..1_000_000_000), + rng.gen_range(1..1_000_000_000), + ); + let max_associated_keys = rng.gen(); + let max_runtime_call_stack_height = rng.gen(); + let minimum_delegation_amount = rng.gen::() as u64; + // `maximum_delegation_amount` must be greater than `minimum_delegation_amount`. + let maximum_delegation_amount = rng.gen_range(minimum_delegation_amount..u32::MAX as u64); + let minimum_bid_amount = DEFAULT_MINIMUM_BID_AMOUNT; + let prune_batch_size = rng.gen_range(0..100); + let strict_argument_checking = rng.gen(); + let simultaneous_peer_requests = rng.gen_range(3..100); + let consensus_protocol = rng.gen(); + let finders_fee = Ratio::new(rng.gen_range(1..100), 100); + let finality_signature_proportion = Ratio::new(rng.gen_range(1..100), 100); + let signature_rewards_max_delay = rng.gen_range(1..10); + let allow_auction_bids = rng.gen(); + let allow_unrestricted_transfers = rng.gen(); + let compute_rewards = rng.gen(); + let administrators = (0..rng.gen_range(0..=10u32)) + .map(|_| PublicKey::random(rng)) + .collect(); + let refund_handling = { + let numer = rng.gen_range(0..=100); + let refund_ratio = Ratio::new(numer, 100); + RefundHandling::Refund { refund_ratio } + }; + + let pricing_handling = if rng.gen() { + PricingHandling::PaymentLimited + } else { + PricingHandling::Fixed + }; + + let allow_prepaid = DEFAULT_ALLOW_PREPAID; + + let fee_handling = if rng.gen() { + FeeHandling::PayToProposer + } else { + FeeHandling::NoFee + }; + + let gas_hold_balance_handling = if rng.gen() { + HoldBalanceHandling::Accrued + } else { + HoldBalanceHandling::Amortized + }; + + let gas_hold_interval = TimeDiff::from_seconds(rng.gen_range(600..604_800)); + + let validator_credit_cap = Ratio::new(rng.gen_range(1..100), 100); + + CoreConfig { + era_duration, + minimum_era_height, + minimum_block_time, + validator_slots, + finality_threshold_fraction, + start_protocol_version_with_strict_finality_signatures_required, + legacy_required_finality, + auction_delay, + locked_funds_period, + vesting_schedule_period, + unbonding_delay, + round_seigniorage_rate, + max_associated_keys, + max_runtime_call_stack_height, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount, + prune_batch_size, + strict_argument_checking, + simultaneous_peer_requests, + consensus_protocol, + max_delegators_per_validator: 0, + finders_fee, + finality_signature_proportion, + signature_rewards_max_delay, + allow_auction_bids, + administrators, + allow_unrestricted_transfers, + compute_rewards, + refund_handling, + pricing_handling, + allow_prepaid, + fee_handling, + gas_hold_balance_handling, + gas_hold_interval, + validator_credit_cap, + enable_addressable_entity: DEFAULT_ENABLE_ENTITY, + baseline_motes_amount: DEFAULT_BASELINE_MOTES_AMOUNT, + trap_on_ambiguous_entity_version: false, + } + } +} + +impl Default for CoreConfig { + fn default() -> Self { + Self { + era_duration: TimeDiff::from_seconds(41), + minimum_era_height: 5, + minimum_block_time: TimeDiff::from_millis(4096), + validator_slots: 7, + finality_threshold_fraction: Ratio::new(1, 3), + start_protocol_version_with_strict_finality_signatures_required: + ProtocolVersion::from_parts(1, 5, 0), + legacy_required_finality: LegacyRequiredFinality::Weak, + auction_delay: 1, + locked_funds_period: Default::default(), + vesting_schedule_period: Default::default(), + unbonding_delay: 7, + round_seigniorage_rate: Ratio::new(1, 4_200_000_000_000_000_000), + max_associated_keys: DEFAULT_MAX_ASSOCIATED_KEYS, + max_runtime_call_stack_height: DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT, + minimum_delegation_amount: 500_000_000_000, + maximum_delegation_amount: 1_000_000_000_000_000_000, + minimum_bid_amount: DEFAULT_MINIMUM_BID_AMOUNT, + prune_batch_size: 0, + strict_argument_checking: false, + simultaneous_peer_requests: 5, + consensus_protocol: ConsensusProtocolName::Zug, + max_delegators_per_validator: 1200, + finders_fee: Ratio::new(1, 5), + finality_signature_proportion: Ratio::new(1, 2), + signature_rewards_max_delay: 3, + allow_auction_bids: true, + allow_unrestricted_transfers: true, + compute_rewards: true, + administrators: Default::default(), + refund_handling: DEFAULT_REFUND_HANDLING, + pricing_handling: DEFAULT_PRICING_HANDLING, + fee_handling: DEFAULT_FEE_HANDLING, + allow_prepaid: DEFAULT_ALLOW_PREPAID, + gas_hold_balance_handling: DEFAULT_GAS_HOLD_BALANCE_HANDLING, + gas_hold_interval: DEFAULT_GAS_HOLD_INTERVAL, + validator_credit_cap: Ratio::new(1, 5), + enable_addressable_entity: DEFAULT_ENABLE_ENTITY, + baseline_motes_amount: DEFAULT_BASELINE_MOTES_AMOUNT, + trap_on_ambiguous_entity_version: false, + } + } +} + +impl ToBytes for CoreConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.era_duration.to_bytes()?); + buffer.extend(self.minimum_era_height.to_bytes()?); + buffer.extend(self.minimum_block_time.to_bytes()?); + buffer.extend(self.validator_slots.to_bytes()?); + buffer.extend(self.finality_threshold_fraction.to_bytes()?); + buffer.extend( + self.start_protocol_version_with_strict_finality_signatures_required + .to_bytes()?, + ); + buffer.extend(self.legacy_required_finality.to_bytes()?); + buffer.extend(self.auction_delay.to_bytes()?); + buffer.extend(self.locked_funds_period.to_bytes()?); + buffer.extend(self.vesting_schedule_period.to_bytes()?); + buffer.extend(self.unbonding_delay.to_bytes()?); + buffer.extend(self.round_seigniorage_rate.to_bytes()?); + buffer.extend(self.max_associated_keys.to_bytes()?); + buffer.extend(self.max_runtime_call_stack_height.to_bytes()?); + buffer.extend(self.minimum_delegation_amount.to_bytes()?); + buffer.extend(self.maximum_delegation_amount.to_bytes()?); + buffer.extend(self.minimum_bid_amount.to_bytes()?); + buffer.extend(self.prune_batch_size.to_bytes()?); + buffer.extend(self.strict_argument_checking.to_bytes()?); + buffer.extend(self.simultaneous_peer_requests.to_bytes()?); + buffer.extend(self.consensus_protocol.to_bytes()?); + buffer.extend(self.max_delegators_per_validator.to_bytes()?); + buffer.extend(self.finders_fee.to_bytes()?); + buffer.extend(self.finality_signature_proportion.to_bytes()?); + buffer.extend(self.signature_rewards_max_delay.to_bytes()?); + buffer.extend(self.allow_auction_bids.to_bytes()?); + buffer.extend(self.allow_unrestricted_transfers.to_bytes()?); + buffer.extend(self.compute_rewards.to_bytes()?); + buffer.extend(self.administrators.to_bytes()?); + buffer.extend(self.refund_handling.to_bytes()?); + buffer.extend(self.pricing_handling.to_bytes()?); + buffer.extend(self.fee_handling.to_bytes()?); + buffer.extend(self.allow_prepaid.to_bytes()?); + buffer.extend(self.gas_hold_balance_handling.to_bytes()?); + buffer.extend(self.gas_hold_interval.to_bytes()?); + buffer.extend(self.validator_credit_cap.to_bytes()?); + buffer.extend(self.enable_addressable_entity.to_bytes()?); + buffer.extend(self.baseline_motes_amount.to_bytes()?); + buffer.extend(self.trap_on_ambiguous_entity_version.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.era_duration.serialized_length() + + self.minimum_era_height.serialized_length() + + self.minimum_block_time.serialized_length() + + self.validator_slots.serialized_length() + + self.finality_threshold_fraction.serialized_length() + + self + .start_protocol_version_with_strict_finality_signatures_required + .serialized_length() + + self.legacy_required_finality.serialized_length() + + self.auction_delay.serialized_length() + + self.locked_funds_period.serialized_length() + + self.vesting_schedule_period.serialized_length() + + self.unbonding_delay.serialized_length() + + self.round_seigniorage_rate.serialized_length() + + self.max_associated_keys.serialized_length() + + self.max_runtime_call_stack_height.serialized_length() + + self.minimum_delegation_amount.serialized_length() + + self.maximum_delegation_amount.serialized_length() + + self.minimum_bid_amount.serialized_length() + + self.prune_batch_size.serialized_length() + + self.strict_argument_checking.serialized_length() + + self.simultaneous_peer_requests.serialized_length() + + self.consensus_protocol.serialized_length() + + self.max_delegators_per_validator.serialized_length() + + self.finders_fee.serialized_length() + + self.finality_signature_proportion.serialized_length() + + self.signature_rewards_max_delay.serialized_length() + + self.allow_auction_bids.serialized_length() + + self.allow_unrestricted_transfers.serialized_length() + + self.compute_rewards.serialized_length() + + self.administrators.serialized_length() + + self.refund_handling.serialized_length() + + self.pricing_handling.serialized_length() + + self.fee_handling.serialized_length() + + self.allow_prepaid.serialized_length() + + self.gas_hold_balance_handling.serialized_length() + + self.gas_hold_interval.serialized_length() + + self.validator_credit_cap.serialized_length() + + self.enable_addressable_entity.serialized_length() + + self.baseline_motes_amount.serialized_length() + + self.trap_on_ambiguous_entity_version.serialized_length() + } +} + +impl FromBytes for CoreConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (era_duration, remainder) = TimeDiff::from_bytes(bytes)?; + let (minimum_era_height, remainder) = u64::from_bytes(remainder)?; + let (minimum_block_time, remainder) = TimeDiff::from_bytes(remainder)?; + let (validator_slots, remainder) = u32::from_bytes(remainder)?; + let (finality_threshold_fraction, remainder) = Ratio::::from_bytes(remainder)?; + let (start_protocol_version_with_strict_finality_signatures_required, remainder) = + ProtocolVersion::from_bytes(remainder)?; + let (legacy_required_finality, remainder) = LegacyRequiredFinality::from_bytes(remainder)?; + let (auction_delay, remainder) = u64::from_bytes(remainder)?; + let (locked_funds_period, remainder) = TimeDiff::from_bytes(remainder)?; + let (vesting_schedule_period, remainder) = TimeDiff::from_bytes(remainder)?; + let (unbonding_delay, remainder) = u64::from_bytes(remainder)?; + let (round_seigniorage_rate, remainder) = Ratio::::from_bytes(remainder)?; + let (max_associated_keys, remainder) = u32::from_bytes(remainder)?; + let (max_runtime_call_stack_height, remainder) = u32::from_bytes(remainder)?; + let (minimum_delegation_amount, remainder) = u64::from_bytes(remainder)?; + let (maximum_delegation_amount, remainder) = u64::from_bytes(remainder)?; + let (minimum_bid_amount, remainder) = u64::from_bytes(remainder)?; + let (prune_batch_size, remainder) = u64::from_bytes(remainder)?; + let (strict_argument_checking, remainder) = bool::from_bytes(remainder)?; + let (simultaneous_peer_requests, remainder) = u8::from_bytes(remainder)?; + let (consensus_protocol, remainder) = ConsensusProtocolName::from_bytes(remainder)?; + let (max_delegators_per_validator, remainder) = FromBytes::from_bytes(remainder)?; + let (finders_fee, remainder) = Ratio::from_bytes(remainder)?; + let (finality_signature_proportion, remainder) = Ratio::from_bytes(remainder)?; + let (signature_rewards_max_delay, remainder) = u64::from_bytes(remainder)?; + let (allow_auction_bids, remainder) = FromBytes::from_bytes(remainder)?; + let (allow_unrestricted_transfers, remainder) = FromBytes::from_bytes(remainder)?; + let (compute_rewards, remainder) = bool::from_bytes(remainder)?; + let (administrative_accounts, remainder) = FromBytes::from_bytes(remainder)?; + let (refund_handling, remainder) = FromBytes::from_bytes(remainder)?; + let (pricing_handling, remainder) = FromBytes::from_bytes(remainder)?; + let (fee_handling, remainder) = FromBytes::from_bytes(remainder)?; + let (allow_prepaid, remainder) = FromBytes::from_bytes(remainder)?; + let (gas_hold_balance_handling, remainder) = FromBytes::from_bytes(remainder)?; + let (gas_hold_interval, remainder) = TimeDiff::from_bytes(remainder)?; + let (validator_credit_cap, remainder) = Ratio::from_bytes(remainder)?; + let (enable_addressable_entity, remainder) = FromBytes::from_bytes(remainder)?; + let (baseline_motes_amount, remainder) = u64::from_bytes(remainder)?; + let (trap_on_ambiguous_entity_version, remainder) = bool::from_bytes(remainder)?; + let config = CoreConfig { + era_duration, + minimum_era_height, + minimum_block_time, + validator_slots, + finality_threshold_fraction, + start_protocol_version_with_strict_finality_signatures_required, + legacy_required_finality, + auction_delay, + locked_funds_period, + vesting_schedule_period, + unbonding_delay, + round_seigniorage_rate, + max_associated_keys, + max_runtime_call_stack_height, + minimum_delegation_amount, + maximum_delegation_amount, + minimum_bid_amount, + prune_batch_size, + strict_argument_checking, + simultaneous_peer_requests, + consensus_protocol, + max_delegators_per_validator, + finders_fee, + finality_signature_proportion, + signature_rewards_max_delay, + allow_auction_bids, + allow_unrestricted_transfers, + compute_rewards, + administrators: administrative_accounts, + refund_handling, + pricing_handling, + fee_handling, + allow_prepaid, + gas_hold_balance_handling, + gas_hold_interval, + validator_credit_cap, + enable_addressable_entity, + baseline_motes_amount, + trap_on_ambiguous_entity_version, + }; + Ok((config, remainder)) + } +} + +/// Consensus protocol name. +#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum ConsensusProtocolName { + /// Highway. + Highway, + /// Zug. + #[default] + Zug, +} + +impl Serialize for ConsensusProtocolName { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + ConsensusProtocolName::Highway => "Highway", + ConsensusProtocolName::Zug => "Zug", + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for ConsensusProtocolName { + fn deserialize>(deserializer: D) -> Result { + match String::deserialize(deserializer)?.to_lowercase().as_str() { + "highway" => Ok(ConsensusProtocolName::Highway), + "zug" => Ok(ConsensusProtocolName::Zug), + _ => Err(DeError::custom("unknown consensus protocol name")), + } + } +} + +const CONSENSUS_HIGHWAY_TAG: u8 = 0; +const CONSENSUS_ZUG_TAG: u8 = 1; + +impl ToBytes for ConsensusProtocolName { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let tag = match self { + ConsensusProtocolName::Highway => CONSENSUS_HIGHWAY_TAG, + ConsensusProtocolName::Zug => CONSENSUS_ZUG_TAG, + }; + Ok(vec![tag]) + } + + fn serialized_length(&self) -> usize { + 1 + } +} + +impl FromBytes for ConsensusProtocolName { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let name = match tag { + CONSENSUS_HIGHWAY_TAG => ConsensusProtocolName::Highway, + CONSENSUS_ZUG_TAG => ConsensusProtocolName::Zug, + _ => return Err(bytesrepr::Error::Formatting), + }; + Ok((name, remainder)) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ConsensusProtocolName { + if rng.gen() { + ConsensusProtocolName::Highway + } else { + ConsensusProtocolName::Zug + } + } +} + +/// Which finality a legacy block needs during a fast sync. +#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum LegacyRequiredFinality { + /// Strict finality: more than 2/3rd of validators. + Strict, + /// Weak finality: more than 1/3rd of validators. + Weak, + /// Finality always valid. + #[default] + Any, +} + +impl Serialize for LegacyRequiredFinality { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + LegacyRequiredFinality::Strict => "Strict", + LegacyRequiredFinality::Weak => "Weak", + LegacyRequiredFinality::Any => "Any", + } + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for LegacyRequiredFinality { + fn deserialize>(deserializer: D) -> Result { + match String::deserialize(deserializer)?.to_lowercase().as_str() { + "strict" => Ok(LegacyRequiredFinality::Strict), + "weak" => Ok(LegacyRequiredFinality::Weak), + "any" => Ok(LegacyRequiredFinality::Any), + _ => Err(DeError::custom("unknown legacy required finality")), + } + } +} + +const LEGACY_REQUIRED_FINALITY_STRICT_TAG: u8 = 0; +const LEGACY_REQUIRED_FINALITY_WEAK_TAG: u8 = 1; +const LEGACY_REQUIRED_FINALITY_ANY_TAG: u8 = 2; + +impl ToBytes for LegacyRequiredFinality { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let tag = match self { + LegacyRequiredFinality::Strict => LEGACY_REQUIRED_FINALITY_STRICT_TAG, + LegacyRequiredFinality::Weak => LEGACY_REQUIRED_FINALITY_WEAK_TAG, + LegacyRequiredFinality::Any => LEGACY_REQUIRED_FINALITY_ANY_TAG, + }; + Ok(vec![tag]) + } + + fn serialized_length(&self) -> usize { + 1 + } +} + +impl FromBytes for LegacyRequiredFinality { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + LEGACY_REQUIRED_FINALITY_STRICT_TAG => Ok((LegacyRequiredFinality::Strict, remainder)), + LEGACY_REQUIRED_FINALITY_WEAK_TAG => Ok((LegacyRequiredFinality::Weak, remainder)), + LEGACY_REQUIRED_FINALITY_ANY_TAG => Ok((LegacyRequiredFinality::Any, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> LegacyRequiredFinality { + match rng.gen_range(0..3) { + 0 => LegacyRequiredFinality::Strict, + 1 => LegacyRequiredFinality::Weak, + 2 => LegacyRequiredFinality::Any, + _not_in_range => unreachable!(), + } + } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = CoreConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/fee_handling.rs b/types/src/chainspec/fee_handling.rs new file mode 100644 index 0000000000..0327372f53 --- /dev/null +++ b/types/src/chainspec/fee_handling.rs @@ -0,0 +1,103 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +const FEE_HANDLING_PROPOSER_TAG: u8 = 0; +const FEE_HANDLING_ACCUMULATE_TAG: u8 = 1; +const FEE_HANDLING_BURN_TAG: u8 = 2; +const FEE_HANDLING_NONE_TAG: u8 = 3; + +/// Defines how fees are handled in the system. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(tag = "type", rename_all = "snake_case")] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum FeeHandling { + /// Transaction fees are paid to the block proposer. + /// + /// This is the default option for public chains. + PayToProposer, + /// Transaction fees are accumulated in a special purse and then distributed during end of era + /// processing evenly among all administrator accounts. + /// + /// This setting is applicable for some private chains (but not all). + Accumulate, + /// Burn the fees. + Burn, + /// No fees. + // in 1.x the (implicit) default was PayToProposer + // FeeHandling::PayToProposer + // in 2.x the default is NoFee as there are no fees. + #[default] + NoFee, +} + +impl FeeHandling { + /// Is the Accumulate variant selected? + pub fn is_accumulate(&self) -> bool { + matches!(self, FeeHandling::Accumulate) + } + + /// Returns true if configured for no fees. + pub fn is_no_fee(&self) -> bool { + matches!(self, FeeHandling::NoFee) + } +} + +impl ToBytes for FeeHandling { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + match self { + FeeHandling::PayToProposer => Ok(vec![FEE_HANDLING_PROPOSER_TAG]), + FeeHandling::Accumulate => Ok(vec![FEE_HANDLING_ACCUMULATE_TAG]), + FeeHandling::Burn => Ok(vec![FEE_HANDLING_BURN_TAG]), + FeeHandling::NoFee => Ok(vec![FEE_HANDLING_NONE_TAG]), + } + } + + fn serialized_length(&self) -> usize { + 1 + } +} + +impl FromBytes for FeeHandling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + FEE_HANDLING_PROPOSER_TAG => Ok((FeeHandling::PayToProposer, rem)), + FEE_HANDLING_ACCUMULATE_TAG => Ok((FeeHandling::Accumulate, rem)), + FEE_HANDLING_BURN_TAG => Ok((FeeHandling::Burn, rem)), + FEE_HANDLING_NONE_TAG => Ok((FeeHandling::NoFee, rem)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip_for_refund() { + let fee_config = FeeHandling::PayToProposer; + bytesrepr::test_serialization_roundtrip(&fee_config); + } + + #[test] + fn bytesrepr_roundtrip_for_accumulate() { + let fee_config = FeeHandling::Accumulate; + bytesrepr::test_serialization_roundtrip(&fee_config); + } + + #[test] + fn bytesrepr_roundtrip_for_burn() { + let fee_config = FeeHandling::Burn; + bytesrepr::test_serialization_roundtrip(&fee_config); + } + + #[test] + fn bytesrepr_roundtrip_for_no_fee() { + let fee_config = FeeHandling::NoFee; + bytesrepr::test_serialization_roundtrip(&fee_config); + } +} diff --git a/types/src/chainspec/genesis_config.rs b/types/src/chainspec/genesis_config.rs new file mode 100644 index 0000000000..ce05950792 --- /dev/null +++ b/types/src/chainspec/genesis_config.rs @@ -0,0 +1,260 @@ +//! Contains genesis configuration settings. + +#[cfg(any(feature = "testing", test))] +use std::iter; + +use num_rational::Ratio; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::{ + AdministratorAccount, Chainspec, GenesisAccount, GenesisValidator, HoldBalanceHandling, Motes, + PublicKey, SystemConfig, WasmConfig, +}; + +use super::StorageCosts; + +/// Represents the details of a genesis process. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct GenesisConfig { + accounts: Vec, + wasm_config: WasmConfig, + system_config: SystemConfig, + validator_slots: u32, + auction_delay: u64, + locked_funds_period_millis: u64, + round_seigniorage_rate: Ratio, + unbonding_delay: u64, + genesis_timestamp_millis: u64, + gas_hold_balance_handling: HoldBalanceHandling, + gas_hold_interval_millis: u64, + enable_addressable_entity: bool, + storage_costs: StorageCosts, +} + +impl GenesisConfig { + /// Creates a new genesis configuration. + #[allow(clippy::too_many_arguments)] + pub fn new( + accounts: Vec, + wasm_config: WasmConfig, + system_config: SystemConfig, + validator_slots: u32, + auction_delay: u64, + locked_funds_period_millis: u64, + round_seigniorage_rate: Ratio, + unbonding_delay: u64, + genesis_timestamp_millis: u64, + gas_hold_balance_handling: HoldBalanceHandling, + gas_hold_interval_millis: u64, + enable_addressable_entity: bool, + storage_costs: StorageCosts, + ) -> GenesisConfig { + GenesisConfig { + accounts, + wasm_config, + system_config, + validator_slots, + auction_delay, + locked_funds_period_millis, + round_seigniorage_rate, + unbonding_delay, + genesis_timestamp_millis, + gas_hold_balance_handling, + gas_hold_interval_millis, + enable_addressable_entity, + storage_costs, + } + } + + /// Returns WASM config. + pub fn wasm_config(&self) -> &WasmConfig { + &self.wasm_config + } + + /// Returns system config. + pub fn system_config(&self) -> &SystemConfig { + &self.system_config + } + + /// Returns all bonded genesis validators. + pub fn get_bonded_validators(&self) -> impl Iterator { + self.accounts_iter() + .filter(|&genesis_account| genesis_account.is_validator()) + } + + /// Returns all bonded genesis delegators. + pub fn get_bonded_delegators( + &self, + ) -> impl Iterator { + self.accounts + .iter() + .filter_map(|genesis_account| genesis_account.as_delegator()) + } + + /// Returns all genesis accounts. + pub fn accounts(&self) -> &[GenesisAccount] { + self.accounts.as_slice() + } + + /// Returns an iterator over all genesis accounts. + pub fn accounts_iter(&self) -> impl Iterator { + self.accounts.iter() + } + + /// Returns an iterator over all administrative accounts. + pub fn administrative_accounts(&self) -> impl Iterator { + self.accounts + .iter() + .filter_map(GenesisAccount::as_administrator_account) + } + + /// Adds new genesis account to the config. + pub fn push_account(&mut self, account: GenesisAccount) { + self.accounts.push(account) + } + + /// Returns validator slots. + pub fn validator_slots(&self) -> u32 { + self.validator_slots + } + + /// Returns auction delay. + pub fn auction_delay(&self) -> u64 { + self.auction_delay + } + + /// Returns locked funds period expressed in milliseconds. + pub fn locked_funds_period_millis(&self) -> u64 { + self.locked_funds_period_millis + } + + /// Returns round seigniorage rate. + pub fn round_seigniorage_rate(&self) -> Ratio { + self.round_seigniorage_rate + } + + /// Returns unbonding delay in eras. + pub fn unbonding_delay(&self) -> u64 { + self.unbonding_delay + } + + /// Returns genesis timestamp expressed in milliseconds. + pub fn genesis_timestamp_millis(&self) -> u64 { + self.genesis_timestamp_millis + } + + /// Returns gas hold balance handling. + pub fn gas_hold_balance_handling(&self) -> HoldBalanceHandling { + self.gas_hold_balance_handling + } + + /// Returns gas hold interval expressed in milliseconds. + pub fn gas_hold_interval_millis(&self) -> u64 { + self.gas_hold_interval_millis + } + + /// Enable entity. + pub fn enable_entity(&self) -> bool { + self.enable_addressable_entity + } + + /// Set enable entity. + pub fn set_enable_entity(&mut self, enable: bool) { + self.enable_addressable_entity = enable + } + + /// Push genesis validator. + pub fn push_genesis_validator( + &mut self, + public_key: &PublicKey, + genesis_validator: GenesisValidator, + ) { + if let Some(genesis_account) = self + .accounts + .iter_mut() + .find(|x| &x.public_key() == public_key) + { + genesis_account.try_set_validator(genesis_validator); + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> GenesisConfig { + let count = rng.gen_range(1..10); + + let accounts = iter::repeat(()).map(|_| rng.gen()).take(count).collect(); + + let wasm_config = rng.gen(); + + let system_config = rng.gen(); + + let validator_slots = rng.gen(); + + let auction_delay = rng.gen(); + + let locked_funds_period_millis = rng.gen(); + + let round_seigniorage_rate = Ratio::new( + rng.gen_range(1..1_000_000_000), + rng.gen_range(1..1_000_000_000), + ); + + let unbonding_delay = rng.gen(); + + let genesis_timestamp_millis = rng.gen(); + let gas_hold_balance_handling = rng.gen(); + let gas_hold_interval_millis = rng.gen(); + let storage_costs = rng.gen(); + + GenesisConfig { + accounts, + wasm_config, + system_config, + validator_slots, + auction_delay, + locked_funds_period_millis, + round_seigniorage_rate, + unbonding_delay, + genesis_timestamp_millis, + gas_hold_balance_handling, + gas_hold_interval_millis, + enable_addressable_entity: false, + storage_costs, + } + } +} + +impl From<&Chainspec> for GenesisConfig { + fn from(chainspec: &Chainspec) -> Self { + let genesis_timestamp_millis = chainspec + .protocol_config + .activation_point + .genesis_timestamp() + .map_or(0, |timestamp| timestamp.millis()); + let gas_hold_interval_millis = chainspec.core_config.gas_hold_interval.millis(); + let gas_hold_balance_handling = chainspec.core_config.gas_hold_balance_handling; + let storage_costs = chainspec.storage_costs; + GenesisConfig { + accounts: chainspec.network_config.accounts_config.clone().into(), + wasm_config: chainspec.wasm_config, + system_config: chainspec.system_costs_config, + validator_slots: chainspec.core_config.validator_slots, + auction_delay: chainspec.core_config.auction_delay, + locked_funds_period_millis: chainspec.core_config.locked_funds_period.millis(), + round_seigniorage_rate: chainspec.core_config.round_seigniorage_rate, + unbonding_delay: chainspec.core_config.unbonding_delay, + genesis_timestamp_millis, + gas_hold_balance_handling, + gas_hold_interval_millis, + enable_addressable_entity: chainspec.core_config.enable_addressable_entity, + storage_costs, + } + } +} diff --git a/types/src/chainspec/global_state_update.rs b/types/src/chainspec/global_state_update.rs new file mode 100644 index 0000000000..68de870c9e --- /dev/null +++ b/types/src/chainspec/global_state_update.rs @@ -0,0 +1,181 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, convert::TryFrom}; +use thiserror::Error; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + AsymmetricType, Key, PublicKey, U512, +}; + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct GlobalStateUpdateEntry { + key: String, + value: String, +} + +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct GlobalStateUpdateValidatorInfo { + public_key: String, + weight: String, +} + +/// Type storing global state update entries. +#[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct GlobalStateUpdateConfig { + validators: Option>, + entries: Vec, +} + +/// Type storing the information about modifications to be applied to the global state. +/// +/// It stores the serialized `StoredValue`s corresponding to keys to be modified, and for the case +/// where the validator set is being modified in any way, the full set of post-upgrade validators. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct GlobalStateUpdate { + /// Some with all validators (including pre-existent), if any change to the set is made. + pub validators: Option>, + /// Global state key value pairs, which will be directly upserted into global state against + /// the root hash of the final block of the era before the upgrade. + pub entries: BTreeMap, +} + +impl GlobalStateUpdate { + /// Returns a random `GlobalStateUpdate`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let mut validators = BTreeMap::new(); + if rng.gen() { + let count = rng.gen_range(5..10); + for _ in 0..count { + validators.insert(PublicKey::random(rng), rng.gen::()); + } + } + + let count = rng.gen_range(0..10); + let mut entries = BTreeMap::new(); + for _ in 0..count { + entries.insert(rng.gen(), rng.gen()); + } + + Self { + validators: Some(validators), + entries, + } + } +} + +impl ToBytes for GlobalStateUpdate { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validators.write_bytes(writer)?; + self.entries.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validators.serialized_length() + self.entries.serialized_length() + } +} + +impl FromBytes for GlobalStateUpdate { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validators, remainder) = Option::>::from_bytes(bytes)?; + let (entries, remainder) = BTreeMap::::from_bytes(remainder)?; + let global_state_update = GlobalStateUpdate { + entries, + validators, + }; + Ok((global_state_update, remainder)) + } +} + +/// Error loading global state update file. +#[derive(Debug, Error)] +pub enum GlobalStateUpdateError { + /// Error while decoding a key from a prefix formatted string. + #[error("decoding key from formatted string error: {0}")] + DecodingKeyFromStr(String), + /// Error while decoding a key from a hex formatted string. + #[error("decoding key from hex string error: {0}")] + DecodingKeyFromHex(String), + /// Error while decoding a public key weight from formatted string. + #[error("decoding weight from decimal string error: {0}")] + DecodingWeightFromStr(String), + /// Error while decoding a serialized value from a base64 encoded string. + #[error("decoding from base64 error: {0}")] + DecodingFromBase64(#[from] base64::DecodeError), +} + +impl TryFrom for GlobalStateUpdate { + type Error = GlobalStateUpdateError; + + fn try_from(config: GlobalStateUpdateConfig) -> Result { + let mut validators: Option> = None; + if let Some(config_validators) = config.validators { + let mut new_validators = BTreeMap::new(); + for (index, validator) in config_validators.into_iter().enumerate() { + let public_key = PublicKey::from_hex(&validator.public_key).map_err(|error| { + GlobalStateUpdateError::DecodingKeyFromHex(format!( + "failed to decode validator public key {}: {:?}", + index, error + )) + })?; + let weight = U512::from_dec_str(&validator.weight).map_err(|error| { + GlobalStateUpdateError::DecodingWeightFromStr(format!( + "failed to decode validator weight {}: {}", + index, error + )) + })?; + let _ = new_validators.insert(public_key, weight); + } + validators = Some(new_validators); + } + + let mut entries = BTreeMap::new(); + for (index, entry) in config.entries.into_iter().enumerate() { + let key = Key::from_formatted_str(&entry.key).map_err(|error| { + GlobalStateUpdateError::DecodingKeyFromStr(format!( + "failed to decode entry key {}: {}", + index, error + )) + })?; + let value = base64::decode(&entry.value)?.into(); + let _ = entries.insert(key, value); + } + + Ok(GlobalStateUpdate { + validators, + entries, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::SeedableRng; + + #[test] + fn global_state_update_bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let update = GlobalStateUpdate::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&update); + } +} diff --git a/types/src/chainspec/highway_config.rs b/types/src/chainspec/highway_config.rs new file mode 100644 index 0000000000..585ae72227 --- /dev/null +++ b/types/src/chainspec/highway_config.rs @@ -0,0 +1,76 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + TimeDiff, +}; + +/// Configuration values relevant to Highway consensus. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct HighwayConfig { + /// The upper limit for Highway round lengths. + pub maximum_round_length: TimeDiff, +} + +impl HighwayConfig { + /// Checks whether the values set in the config make sense and returns `false` if they don't. + pub fn is_valid(&self) -> Result<(), String> { + Ok(()) + } + + /// Returns a random `HighwayConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let maximum_round_length = TimeDiff::from_seconds(rng.gen_range(60..600)); + + HighwayConfig { + maximum_round_length, + } + } +} + +impl ToBytes for HighwayConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.maximum_round_length.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.maximum_round_length.serialized_length() + } +} + +impl FromBytes for HighwayConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (maximum_round_length, remainder) = TimeDiff::from_bytes(bytes)?; + let config = HighwayConfig { + maximum_round_length, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = HighwayConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/hold_balance_handling.rs b/types/src/chainspec/hold_balance_handling.rs new file mode 100644 index 0000000000..dfb3950380 --- /dev/null +++ b/types/src/chainspec/hold_balance_handling.rs @@ -0,0 +1,132 @@ +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes}, +}; +use core::fmt::{Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +const HOLD_BALANCE_ACCRUED_TAG: u8 = 0; +const HOLD_BALANCE_AMORTIZED_TAG: u8 = 1; +const HOLD_BALANCE_HANDLING_TAG_LENGTH: u8 = 1; + +/// Defines how a given network handles holds when calculating available balances. There may be +/// multiple types of holds (such as Processing and Gas currently, and potentially other kinds in +/// the future), and each type of hold can differ on how it applies to available +/// balance calculation. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(tag = "type", rename_all = "snake_case")] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum HoldBalanceHandling { + /// The sum of full value of all non-expired holds is used. + // in 2.0 the default hold balance handling is Accrued, + // which means a non-expired hold is applied in full to + // available balance calculations + #[default] + Accrued, + /// The sum of each hold is amortized over the time remaining until expiry. + /// For instance, if 12 hours remain on a 24 hour hold, half the hold amount is applied. + Amortized, +} + +impl HoldBalanceHandling { + /// Returns variant for tag, if able. + #[allow(clippy::result_unit_err)] + pub fn from_tag(tag: u8) -> Result { + if tag == HOLD_BALANCE_ACCRUED_TAG { + Ok(HoldBalanceHandling::Accrued) + } else if tag == HOLD_BALANCE_AMORTIZED_TAG { + Ok(HoldBalanceHandling::Amortized) + } else { + Err(()) + } + } + + /// Returns the tag for the variant. + pub fn tag(&self) -> u8 { + match self { + HoldBalanceHandling::Accrued => HOLD_BALANCE_ACCRUED_TAG, + HoldBalanceHandling::Amortized => HOLD_BALANCE_AMORTIZED_TAG, + } + } +} + +impl Display for HoldBalanceHandling { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + HoldBalanceHandling::Accrued => { + write!(f, "HoldBalanceHandling::Accrued") + } + HoldBalanceHandling::Amortized => { + write!(f, "HoldBalanceHandling::Amortized") + } + } + } +} + +impl ToBytes for HoldBalanceHandling { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + + match self { + HoldBalanceHandling::Accrued => { + buffer.push(HOLD_BALANCE_ACCRUED_TAG); + } + HoldBalanceHandling::Amortized => { + buffer.push(HOLD_BALANCE_AMORTIZED_TAG); + } + } + + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + HOLD_BALANCE_HANDLING_TAG_LENGTH as usize + } +} + +impl FromBytes for HoldBalanceHandling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + HOLD_BALANCE_ACCRUED_TAG => Ok((HoldBalanceHandling::Accrued, rem)), + HOLD_BALANCE_AMORTIZED_TAG => Ok((HoldBalanceHandling::Amortized, rem)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HoldBalanceHandling { + match rng.gen_range(HOLD_BALANCE_ACCRUED_TAG..=HOLD_BALANCE_AMORTIZED_TAG) { + HOLD_BALANCE_ACCRUED_TAG => HoldBalanceHandling::Accrued, + HOLD_BALANCE_AMORTIZED_TAG => HoldBalanceHandling::Amortized, + _ => unreachable!(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip_for_accrued() { + let handling = HoldBalanceHandling::Accrued; + bytesrepr::test_serialization_roundtrip(&handling); + } + + #[test] + fn bytesrepr_roundtrip_for_amortized() { + let handling = HoldBalanceHandling::Amortized; + bytesrepr::test_serialization_roundtrip(&handling); + } +} diff --git a/types/src/chainspec/network_config.rs b/types/src/chainspec/network_config.rs new file mode 100644 index 0000000000..4ea2c88112 --- /dev/null +++ b/types/src/chainspec/network_config.rs @@ -0,0 +1,86 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::Serialize; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +use super::AccountsConfig; + +/// Configuration values associated with the network. +#[derive(Clone, PartialEq, Eq, Serialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct NetworkConfig { + /// The network name. + pub name: String, + /// The maximum size of an accepted network message, in bytes. + pub maximum_net_message_size: u32, + /// Validator accounts specified in the chainspec. + // Note: `accounts_config` must be the last field on this struct due to issues in the TOML + // crate - see . + pub accounts_config: AccountsConfig, +} + +impl NetworkConfig { + /// Returns a random `NetworkConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let name = rng.gen::().to_string(); + let maximum_net_message_size = 4 + rng.gen_range(0..4); + let accounts_config = AccountsConfig::random(rng); + + NetworkConfig { + name, + maximum_net_message_size, + accounts_config, + } + } +} + +impl ToBytes for NetworkConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.name.to_bytes()?); + buffer.extend(self.accounts_config.to_bytes()?); + buffer.extend(self.maximum_net_message_size.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + + self.accounts_config.serialized_length() + + self.maximum_net_message_size.serialized_length() + } +} + +impl FromBytes for NetworkConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (accounts_config, remainder) = FromBytes::from_bytes(remainder)?; + let (maximum_net_message_size, remainder) = FromBytes::from_bytes(remainder)?; + let config = NetworkConfig { + name, + maximum_net_message_size, + accounts_config, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use rand::SeedableRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = NetworkConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/next_upgrade.rs b/types/src/chainspec/next_upgrade.rs new file mode 100644 index 0000000000..ea001f37ea --- /dev/null +++ b/types/src/chainspec/next_upgrade.rs @@ -0,0 +1,116 @@ +use std::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ActivationPoint, ProtocolConfig, ProtocolVersion, +}; + +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// Information about the next protocol upgrade. +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Copy)] +pub struct NextUpgrade { + activation_point: ActivationPoint, + protocol_version: ProtocolVersion, +} + +impl NextUpgrade { + /// Creates a new `NextUpgrade`. + pub fn new(activation_point: ActivationPoint, protocol_version: ProtocolVersion) -> Self { + NextUpgrade { + activation_point, + protocol_version, + } + } + + /// Returns the activation point of the next upgrade. + pub fn activation_point(&self) -> ActivationPoint { + self.activation_point + } + + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Self { + activation_point: ActivationPoint::random(rng), + protocol_version: ProtocolVersion::from_parts(rng.gen(), rng.gen(), rng.gen()), + } + } +} + +impl From for NextUpgrade { + fn from(protocol_config: ProtocolConfig) -> Self { + NextUpgrade { + activation_point: protocol_config.activation_point, + protocol_version: protocol_config.version, + } + } +} + +impl Display for NextUpgrade { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!( + formatter, + "next upgrade to {} at start of era {}", + self.protocol_version, + self.activation_point.era_id() + ) + } +} + +impl ToBytes for NextUpgrade { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.activation_point.write_bytes(writer)?; + self.protocol_version.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.activation_point.serialized_length() + self.protocol_version.serialized_length() + } +} + +impl FromBytes for NextUpgrade { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (activation_point, remainder) = ActivationPoint::from_bytes(bytes)?; + let (protocol_version, remainder) = ProtocolVersion::from_bytes(remainder)?; + Ok(( + NextUpgrade { + activation_point, + protocol_version, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = NextUpgrade::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/chainspec/pricing_handling.rs b/types/src/chainspec/pricing_handling.rs new file mode 100644 index 0000000000..2585de545b --- /dev/null +++ b/types/src/chainspec/pricing_handling.rs @@ -0,0 +1,90 @@ +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes}, +}; +use core::fmt::{Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +const PRICING_HANDLING_TAG_LENGTH: u8 = 1; + +const PRICING_HANDLING_PAYMENT_LIMITED_TAG: u8 = 0; +const PRICING_HANDLING_FIXED_TAG: u8 = 1; + +/// Defines what pricing mode a network allows. Correlates to the PricingMode of a +/// [`crate::Transaction`]. Nodes will not accept transactions whose pricing mode does not match. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(tag = "type", rename_all = "snake_case")] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum PricingHandling { + #[default] + /// The transaction sender self-specifies how much token they pay, which becomes their gas + /// limit. + PaymentLimited, + /// The costs are fixed, per the cost tables. + Fixed, +} + +impl Display for PricingHandling { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + PricingHandling::PaymentLimited => { + write!(f, "PricingHandling::PaymentLimited") + } + PricingHandling::Fixed => { + write!(f, "PricingHandling::Fixed") + } + } + } +} + +impl ToBytes for PricingHandling { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + + match self { + PricingHandling::PaymentLimited => { + buffer.push(PRICING_HANDLING_PAYMENT_LIMITED_TAG); + } + PricingHandling::Fixed => { + buffer.push(PRICING_HANDLING_FIXED_TAG); + } + } + + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + PRICING_HANDLING_TAG_LENGTH as usize + } +} + +impl FromBytes for PricingHandling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + PRICING_HANDLING_PAYMENT_LIMITED_TAG => Ok((PricingHandling::PaymentLimited, rem)), + PRICING_HANDLING_FIXED_TAG => Ok((PricingHandling::Fixed, rem)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + + use super::*; + + #[test] + fn bytesrepr_roundtrip_for_payment_limited() { + let handling = PricingHandling::PaymentLimited; + bytesrepr::test_serialization_roundtrip(&handling); + } + + #[test] + fn bytesrepr_roundtrip_for_fixed() { + let handling = PricingHandling::Fixed; + bytesrepr::test_serialization_roundtrip(&handling); + } +} diff --git a/types/src/chainspec/protocol_config.rs b/types/src/chainspec/protocol_config.rs new file mode 100644 index 0000000000..5d4aecf719 --- /dev/null +++ b/types/src/chainspec/protocol_config.rs @@ -0,0 +1,136 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, str::FromStr}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Key, ProtocolVersion, StoredValue, Timestamp, +}; + +use crate::{ActivationPoint, GlobalStateUpdate}; + +/// Configuration values associated with the protocol. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct ProtocolConfig { + /// Protocol version. + #[cfg_attr(feature = "datasize", data_size(skip))] + pub version: ProtocolVersion, + /// Whether we need to clear latest blocks back to the switch block just before the activation + /// point or not. + pub hard_reset: bool, + /// This protocol config applies starting at the era specified in the activation point. + pub activation_point: ActivationPoint, + /// Any arbitrary updates we might want to make to the global state at the start of the era + /// specified in the activation point. + pub global_state_update: Option, +} + +impl ProtocolConfig { + /// The mapping of [`Key`]s to [`StoredValue`]s we will use to update global storage in the + /// event of an emergency update. + pub(crate) fn get_update_mapping( + &self, + ) -> Result, bytesrepr::Error> { + let state_update = match &self.global_state_update { + Some(GlobalStateUpdate { entries, .. }) => entries, + None => return Ok(BTreeMap::default()), + }; + let mut update_mapping = BTreeMap::new(); + for (key, stored_value_bytes) in state_update { + let stored_value = bytesrepr::deserialize(stored_value_bytes.clone().into())?; + update_mapping.insert(*key, stored_value); + } + Ok(update_mapping) + } + + /// Returns a random `ProtocolConfig`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let protocol_version = ProtocolVersion::from_parts( + rng.gen_range(0..10), + rng.gen::() as u32, + rng.gen::() as u32, + ); + let activation_point = ActivationPoint::random(rng); + + ProtocolConfig { + version: protocol_version, + hard_reset: rng.gen(), + activation_point, + global_state_update: None, + } + } +} + +impl ToBytes for ProtocolConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.version.to_string().to_bytes()?); + buffer.extend(self.hard_reset.to_bytes()?); + buffer.extend(self.activation_point.to_bytes()?); + buffer.extend(self.global_state_update.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.version.to_string().serialized_length() + + self.hard_reset.serialized_length() + + self.activation_point.serialized_length() + + self.global_state_update.serialized_length() + } +} + +impl FromBytes for ProtocolConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version_string, remainder) = String::from_bytes(bytes)?; + let version = ProtocolVersion::from_str(&protocol_version_string) + .map_err(|_| bytesrepr::Error::Formatting)?; + let (hard_reset, remainder) = bool::from_bytes(remainder)?; + let (activation_point, remainder) = ActivationPoint::from_bytes(remainder)?; + let (global_state_update, remainder) = Option::::from_bytes(remainder)?; + let protocol_config = ProtocolConfig { + version, + hard_reset, + activation_point, + global_state_update, + }; + Ok((protocol_config, remainder)) + } +} + +impl Default for ProtocolConfig { + fn default() -> Self { + ProtocolConfig { + activation_point: ActivationPoint::Genesis(Timestamp::now()), + global_state_update: None, + hard_reset: true, + version: ProtocolVersion::V2_0_0, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::SeedableRng; + + #[test] + fn activation_point_bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let activation_point = ActivationPoint::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&activation_point); + } + + #[test] + fn protocol_config_bytesrepr_roundtrip() { + let mut rng = TestRng::from_entropy(); + let config = ProtocolConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/refund_handling.rs b/types/src/chainspec/refund_handling.rs new file mode 100644 index 0000000000..cf0305d4a2 --- /dev/null +++ b/types/src/chainspec/refund_handling.rs @@ -0,0 +1,140 @@ +/// Configuration options of refund handling that are executed as part of handle payment +/// finalization. +use num_rational::Ratio; +use num_traits::Zero; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +const REFUND_HANDLING_REFUND_TAG: u8 = 0; +const REFUND_HANDLING_BURN_TAG: u8 = 1; +const REFUND_HANDLING_NONE_TAG: u8 = 2; + +/// Defines how refunds are calculated. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum RefundHandling { + /// Refund of excess payment amount goes to either a pre-defined purse, or back to the sender + /// and the rest of the payment amount goes to the block proposer. + Refund { + /// Computes how much refund goes back to the user after deducting gas spent from the paid + /// amount. + /// + /// user_part = (payment_amount - gas_spent_amount) * refund_ratio + /// validator_part = payment_amount - user_part + /// + /// Any dust amount that was a result of multiplying by refund_ratio goes back to user. + refund_ratio: Ratio, + }, + /// Burns the refund amount. + Burn { + /// Computes how much of the refund amount is burned after deducting gas spent from the + /// paid amount. + refund_ratio: Ratio, + }, + /// No refunds. + // in 1.x the default was Refund + // RefundHandling::Refund { + // refund_ratio: Ratio::new(99, 100), + // } + // in 2.0 the default payment mode is Fixed with Fee Elimination on, + // thus there is nothing to refund. + #[default] + NoRefund, +} + +impl RefundHandling { + /// Returns true if we don't need to process a refund. + pub fn skip_refund(&self) -> bool { + match self { + RefundHandling::NoRefund => true, + RefundHandling::Refund { refund_ratio } => refund_ratio.is_zero(), + RefundHandling::Burn { .. } => false, + } + } + + /// Returns refund ratio. + pub fn refund_ratio(&self) -> Ratio { + match self { + RefundHandling::Refund { refund_ratio } | RefundHandling::Burn { refund_ratio } => { + *refund_ratio + } + RefundHandling::NoRefund => Ratio::zero(), + } + } +} + +impl ToBytes for RefundHandling { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + + match self { + RefundHandling::Refund { refund_ratio } => { + buffer.push(REFUND_HANDLING_REFUND_TAG); + buffer.extend(refund_ratio.to_bytes()?); + } + RefundHandling::Burn { refund_ratio } => { + buffer.push(REFUND_HANDLING_BURN_TAG); + buffer.extend(refund_ratio.to_bytes()?); + } + RefundHandling::NoRefund => { + buffer.push(REFUND_HANDLING_NONE_TAG); + } + } + + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + 1 + match self { + RefundHandling::Refund { refund_ratio } => refund_ratio.serialized_length(), + RefundHandling::Burn { refund_ratio } => refund_ratio.serialized_length(), + RefundHandling::NoRefund => 0, + } + } +} + +impl FromBytes for RefundHandling { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + REFUND_HANDLING_REFUND_TAG => { + let (refund_ratio, rem) = FromBytes::from_bytes(rem)?; + Ok((RefundHandling::Refund { refund_ratio }, rem)) + } + REFUND_HANDLING_BURN_TAG => { + let (refund_ratio, rem) = FromBytes::from_bytes(rem)?; + Ok((RefundHandling::Burn { refund_ratio }, rem)) + } + REFUND_HANDLING_NONE_TAG => Ok((RefundHandling::NoRefund, rem)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip_for_refund() { + let refund_config = RefundHandling::Refund { + refund_ratio: Ratio::new(49, 313), + }; + bytesrepr::test_serialization_roundtrip(&refund_config); + } + + #[test] + fn bytesrepr_roundtrip_for_burn() { + let refund_config = RefundHandling::Burn { + refund_ratio: Ratio::new(49, 313), + }; + bytesrepr::test_serialization_roundtrip(&refund_config); + } + + #[test] + fn bytesrepr_roundtrip_for_no_refund() { + let refund_config = RefundHandling::NoRefund; + bytesrepr::test_serialization_roundtrip(&refund_config); + } +} diff --git a/types/src/chainspec/transaction_config.rs b/types/src/chainspec/transaction_config.rs new file mode 100644 index 0000000000..cc3d5f357c --- /dev/null +++ b/types/src/chainspec/transaction_config.rs @@ -0,0 +1,181 @@ +mod deploy_config; +mod runtime_config; +mod transaction_v1_config; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use runtime_config::RuntimeConfig; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + TimeDiff, +}; + +pub use deploy_config::DeployConfig; +#[cfg(any(feature = "testing", test))] +pub use deploy_config::DEFAULT_MAX_PAYMENT_MOTES; +#[cfg(any(feature = "testing", test))] +pub use transaction_v1_config::DEFAULT_LARGE_TRANSACTION_GAS_LIMIT; +pub use transaction_v1_config::{TransactionLaneDefinition, TransactionV1Config}; + +/// The default minimum number of motes that can be transferred. +pub const DEFAULT_MIN_TRANSFER_MOTES: u64 = 2_500_000_000; + +/// Configuration values associated with Transactions. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct TransactionConfig { + /// Maximum time to live any transaction can specify. + pub max_ttl: TimeDiff, + /// Maximum number of approvals (signatures) allowed in a block across all transactions. + pub block_max_approval_count: u32, + /// Maximum possible size in bytes of a block. + pub max_block_size: u32, + /// Maximum sum of payment across all transactions included in a block. + pub block_gas_limit: u64, + /// Minimum token amount for a native transfer deploy or transaction (a transfer deploy or + /// transaction received with an transfer amount less than this will be rejected upon receipt). + pub native_transfer_minimum_motes: u64, + /// Maximum value to which `transaction_acceptor.timestamp_leeway` can be set in the + /// config.toml file. + pub max_timestamp_leeway: TimeDiff, + /// Configuration values specific to Deploy transactions. + #[serde(rename = "deploy")] + pub deploy_config: DeployConfig, + /// Configuration of the transaction runtime. + /// Configuration values specific to V1 transactions. + #[serde(rename = "v1")] + pub transaction_v1_config: TransactionV1Config, + /// Configuration values specific to the runtime. + /// + /// This is where we specify which runtimes are available. + #[serde(rename = "enabled_runtime")] + pub runtime_config: RuntimeConfig, +} + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +impl TransactionConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let max_ttl = TimeDiff::from_seconds(rng.gen_range(60..3_600)); + let block_max_approval_count = rng.gen(); + let max_block_size = rng.gen_range(1_000_000..1_000_000_000); + let block_gas_limit = rng.gen_range(100_000_000_000..1_000_000_000_000_000); + let native_transfer_minimum_motes = + rng.gen_range(DEFAULT_MIN_TRANSFER_MOTES..1_000_000_000_000_000); + let max_timestamp_leeway = TimeDiff::from_seconds(rng.gen_range(0..6)); + let deploy_config = DeployConfig::random(rng); + let transaction_v1_config: TransactionV1Config = TransactionV1Config::random(rng); + let runtime_config = RuntimeConfig::random(rng); + + TransactionConfig { + max_ttl, + block_max_approval_count, + max_block_size, + block_gas_limit, + native_transfer_minimum_motes, + max_timestamp_leeway, + deploy_config, + transaction_v1_config, + runtime_config, + } + } +} + +impl Default for TransactionConfig { + fn default() -> Self { + let two_hours = TimeDiff::from_seconds(2 * 60 * 60); + TransactionConfig { + max_ttl: two_hours, + block_max_approval_count: 2600, + max_block_size: 10_485_760, + block_gas_limit: 10_000_000_000_000, + native_transfer_minimum_motes: DEFAULT_MIN_TRANSFER_MOTES, + max_timestamp_leeway: TimeDiff::from_seconds(5), + deploy_config: DeployConfig::default(), + runtime_config: RuntimeConfig { + vm_casper_v1: true, + vm_casper_v2: false, + }, + transaction_v1_config: TransactionV1Config::default(), + } + } +} + +impl ToBytes for TransactionConfig { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.max_ttl.write_bytes(writer)?; + self.block_max_approval_count.write_bytes(writer)?; + self.max_block_size.write_bytes(writer)?; + self.block_gas_limit.write_bytes(writer)?; + self.native_transfer_minimum_motes.write_bytes(writer)?; + self.max_timestamp_leeway.write_bytes(writer)?; + self.deploy_config.write_bytes(writer)?; + self.runtime_config.write_bytes(writer)?; + self.transaction_v1_config.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.max_ttl.serialized_length() + + self.block_max_approval_count.serialized_length() + + self.max_block_size.serialized_length() + + self.block_gas_limit.serialized_length() + + self.native_transfer_minimum_motes.serialized_length() + + self.max_timestamp_leeway.serialized_length() + + self.deploy_config.serialized_length() + + self.runtime_config.serialized_length() + + self.transaction_v1_config.serialized_length() + } +} + +impl FromBytes for TransactionConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_ttl, remainder) = TimeDiff::from_bytes(bytes)?; + let (block_max_approval_count, remainder) = u32::from_bytes(remainder)?; + let (max_block_size, remainder) = u32::from_bytes(remainder)?; + let (block_gas_limit, remainder) = u64::from_bytes(remainder)?; + let (native_transfer_minimum_motes, remainder) = u64::from_bytes(remainder)?; + let (max_timestamp_leeway, remainder) = TimeDiff::from_bytes(remainder)?; + let (deploy_config, remainder) = DeployConfig::from_bytes(remainder)?; + let (runtime_config, remainder) = RuntimeConfig::from_bytes(remainder)?; + let (transaction_v1_config, remainder) = TransactionV1Config::from_bytes(remainder)?; + + let config = TransactionConfig { + max_ttl, + block_max_approval_count, + max_block_size, + block_gas_limit, + native_transfer_minimum_motes, + max_timestamp_leeway, + deploy_config, + runtime_config, + transaction_v1_config, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = TransactionConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/transaction_config/deploy_config.rs b/types/src/chainspec/transaction_config/deploy_config.rs new file mode 100644 index 0000000000..2a03d28081 --- /dev/null +++ b/types/src/chainspec/transaction_config/deploy_config.rs @@ -0,0 +1,102 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Motes, +}; + +/// The default maximum number of motes that payment code execution can cost. +pub const DEFAULT_MAX_PAYMENT_MOTES: u64 = 2_500_000_000; + +/// Configuration values associated with deploys. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct DeployConfig { + /// Maximum amount any deploy can pay. + pub max_payment_cost: Motes, + /// Maximum length in bytes of payment args per deploy. + pub payment_args_max_length: u32, + /// Maximum length in bytes of session args per deploy. + pub session_args_max_length: u32, +} + +#[cfg(any(feature = "testing", test))] +impl DeployConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let max_payment_cost = Motes::new(rng.gen_range(1_000_000..1_000_000_000)); + let payment_args_max_length = rng.gen(); + let session_args_max_length = rng.gen(); + + DeployConfig { + max_payment_cost, + payment_args_max_length, + session_args_max_length, + } + } +} + +#[cfg(any(feature = "std", test))] +impl Default for DeployConfig { + fn default() -> Self { + DeployConfig { + max_payment_cost: Motes::new(DEFAULT_MAX_PAYMENT_MOTES), + payment_args_max_length: 1024, + session_args_max_length: 1024, + } + } +} + +impl ToBytes for DeployConfig { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.max_payment_cost.write_bytes(writer)?; + self.payment_args_max_length.write_bytes(writer)?; + self.session_args_max_length.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.max_payment_cost.value().serialized_length() + + self.payment_args_max_length.serialized_length() + + self.session_args_max_length.serialized_length() + } +} + +impl FromBytes for DeployConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_payment_cost, remainder) = Motes::from_bytes(bytes)?; + let (payment_args_max_length, remainder) = u32::from_bytes(remainder)?; + let (session_args_max_length, remainder) = u32::from_bytes(remainder)?; + let config = DeployConfig { + max_payment_cost, + payment_args_max_length, + session_args_max_length, + }; + Ok((config, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = DeployConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/transaction_config/runtime_config.rs b/types/src/chainspec/transaction_config/runtime_config.rs new file mode 100644 index 0000000000..9446b428c7 --- /dev/null +++ b/types/src/chainspec/transaction_config/runtime_config.rs @@ -0,0 +1,61 @@ +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use {crate::testing::TestRng, rand::Rng}; + +/// Configuration values associated with deploys. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct RuntimeConfig { + /// Whether the chain is using the Casper v1 runtime. + pub vm_casper_v1: bool, + /// Whether the chain is using the Casper v2 runtime. + pub vm_casper_v2: bool, +} + +impl RuntimeConfig { + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + Self { + vm_casper_v1: rng.gen(), + vm_casper_v2: rng.gen(), + } + } +} + +impl FromBytes for RuntimeConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), crate::bytesrepr::Error> { + let (vm_casper_v1, rem) = bool::from_bytes(bytes)?; + let (vm_casper_v2, rem) = bool::from_bytes(rem)?; + Ok(( + RuntimeConfig { + vm_casper_v1, + vm_casper_v2, + }, + rem, + )) + } +} + +impl ToBytes for RuntimeConfig { + fn to_bytes(&self) -> Result, crate::bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.vm_casper_v1.serialized_length() + self.vm_casper_v2.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), crate::bytesrepr::Error> { + self.vm_casper_v1.write_bytes(writer)?; + self.vm_casper_v2.write_bytes(writer) + } +} diff --git a/types/src/chainspec/transaction_config/transaction_v1_config.rs b/types/src/chainspec/transaction_config/transaction_v1_config.rs new file mode 100644 index 0000000000..0feebb3c43 --- /dev/null +++ b/types/src/chainspec/transaction_config/transaction_v1_config.rs @@ -0,0 +1,1064 @@ +use core::cmp; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{ + de::{Error, Unexpected}, + ser::SerializeSeq, + Deserialize, Deserializer, Serialize, Serializer, +}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + AUCTION_LANE_ID, INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; + +/// Default gas limit of standard transactions +pub const DEFAULT_LARGE_TRANSACTION_GAS_LIMIT: u64 = 6_000_000_000_000; + +const DEFAULT_NATIVE_MINT_LANE: [u64; 5] = [0, 1_048_576, 1024, 2_500_000_000, 650]; +const DEFAULT_NATIVE_AUCTION_LANE: [u64; 5] = [1, 1_048_576, 1024, 5_000_000_000_000, 145]; +const DEFAULT_INSTALL_UPGRADE_LANE: [u64; 5] = [2, 1_048_576, 2048, 3_500_000_000_000, 2]; + +const TRANSACTION_ID_INDEX: usize = 0; +const TRANSACTION_LENGTH_INDEX: usize = 1; +const TRANSACTION_ARGS_LENGTH_INDEX: usize = 2; +const TRANSACTION_GAS_LIMIT_INDEX: usize = 3; +const TRANSACTION_COUNT_INDEX: usize = 4; + +/// Structured limits imposed on a transaction lane +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct TransactionLaneDefinition { + /// The lane identifier + pub id: u8, + /// The maximum length of a transaction in bytes + pub max_transaction_length: u64, + /// The max args length size in bytes + pub max_transaction_args_length: u64, + /// The maximum gas limit + pub max_transaction_gas_limit: u64, + /// The maximum number of transactions + pub max_transaction_count: u64, +} + +impl TryFrom> for TransactionLaneDefinition { + type Error = TransactionConfigError; + + fn try_from(v: Vec) -> Result { + if v.len() != 5 { + return Err(TransactionConfigError::InvalidArgsProvided); + } + Ok(TransactionLaneDefinition { + id: v[TRANSACTION_ID_INDEX] as u8, + max_transaction_length: v[TRANSACTION_LENGTH_INDEX], + max_transaction_args_length: v[TRANSACTION_ARGS_LENGTH_INDEX], + max_transaction_gas_limit: v[TRANSACTION_GAS_LIMIT_INDEX], + max_transaction_count: v[TRANSACTION_COUNT_INDEX], + }) + } +} + +impl TransactionLaneDefinition { + /// Creates a new instance of TransactionLimitsDefinition + pub fn new( + id: u8, + max_transaction_length: u64, + max_transaction_args_length: u64, + max_transaction_gas_limit: u64, + max_transaction_count: u64, + ) -> Self { + Self { + id, + max_transaction_length, + max_transaction_args_length, + max_transaction_gas_limit, + max_transaction_count, + } + } + + fn as_vec(&self) -> Vec { + vec![ + self.id as u64, + self.max_transaction_length, + self.max_transaction_args_length, + self.max_transaction_gas_limit, + self.max_transaction_count, + ] + } + + /// Returns max_transaction_length + pub fn max_transaction_length(&self) -> u64 { + self.max_transaction_length + } + + /// Returns max_transaction_args_length + pub fn max_transaction_args_length(&self) -> u64 { + self.max_transaction_args_length + } + + /// Returns max_transaction_gas_limit + pub fn max_transaction_gas_limit(&self) -> u64 { + self.max_transaction_gas_limit + } + + /// Returns max_transaction_count + pub fn max_transaction_count(&self) -> u64 { + self.max_transaction_count + } + + /// Returns id + pub fn id(&self) -> u8 { + self.id + } +} + +#[derive(Debug, Clone)] +pub enum TransactionConfigError { + InvalidArgsProvided, +} + +/// Configuration values associated with V1 Transactions. +#[derive(Clone, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +// Disallow unknown fields to ensure config files and command-line overrides contain valid keys. +#[serde(deny_unknown_fields)] +pub struct TransactionV1Config { + #[serde( + serialize_with = "limit_definition_to_vec", + deserialize_with = "vec_to_limit_definition" + )] + /// Lane configuration of the native mint interaction. + pub native_mint_lane: TransactionLaneDefinition, + #[serde( + serialize_with = "limit_definition_to_vec", + deserialize_with = "vec_to_limit_definition" + )] + /// Lane configuration for the native auction interaction. + pub native_auction_lane: TransactionLaneDefinition, + #[serde( + serialize_with = "limit_definition_to_vec", + deserialize_with = "vec_to_limit_definition" + )] + /// Lane configuration for the install/upgrade interaction. + pub install_upgrade_lane: TransactionLaneDefinition, + #[serde( + serialize_with = "wasm_definitions_to_vec", + deserialize_with = "definition_to_wasms" + )] + /// Lane configurations for Wasm based lanes that are not declared as install/upgrade. + wasm_lanes: Vec, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + wasm_lanes_ordered_by_transaction_size: OnceCell>, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length: + OnceCell>, +} + +impl PartialEq for TransactionV1Config { + fn eq(&self, other: &TransactionV1Config) -> bool { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1Config { + native_mint_lane, + native_auction_lane, + install_upgrade_lane, + wasm_lanes, + #[cfg(any(feature = "once_cell", test))] + wasm_lanes_ordered_by_transaction_size: _, + #[cfg(any(feature = "once_cell", test))] + wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length: _, + } = self; + *native_mint_lane == other.native_mint_lane + && *native_auction_lane == other.native_auction_lane + && *install_upgrade_lane == other.install_upgrade_lane + && *wasm_lanes == other.wasm_lanes + } +} + +impl TransactionV1Config { + /// Cretaes a new instance of TransactionV1Config + pub fn new( + native_mint_lane: TransactionLaneDefinition, + native_auction_lane: TransactionLaneDefinition, + install_upgrade_lane: TransactionLaneDefinition, + wasm_lanes: Vec, + ) -> Self { + #[cfg(any(feature = "once_cell", test))] + let wasm_lanes_ordered_by_transaction_size = OnceCell::with_value( + Self::build_wasm_lanes_ordered_by_transaction_size(wasm_lanes.clone()), + ); + #[cfg(any(feature = "once_cell", test))] + let wasm_lanes_ordered_by_transaction_gas_limit = + OnceCell::with_value(Self::build_wasm_lanes_ordered(wasm_lanes.clone())); + TransactionV1Config { + native_mint_lane, + native_auction_lane, + install_upgrade_lane, + wasm_lanes, + #[cfg(any(feature = "once_cell", test))] + wasm_lanes_ordered_by_transaction_size, + #[cfg(any(feature = "once_cell", test))] + wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length: + wasm_lanes_ordered_by_transaction_gas_limit, + } + } + + #[cfg(any(feature = "testing", test))] + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + let native_mint_lane = DEFAULT_NATIVE_MINT_LANE.to_vec(); + let native_auction_lane = DEFAULT_NATIVE_AUCTION_LANE.to_vec(); + let install_upgrade_lane = DEFAULT_INSTALL_UPGRADE_LANE.to_vec(); + let mut wasm_lanes = vec![]; + for kind in 2..7 { + let lane = vec![ + kind as u64, + rng.gen_range(0..=1_048_576), + rng.gen_range(0..=1024), + rng.gen_range(0..=2_500_000_000), + rng.gen_range(5..=150), + ]; + wasm_lanes.push(lane.try_into().unwrap()) + } + + TransactionV1Config::new( + native_mint_lane.try_into().unwrap(), + native_auction_lane.try_into().unwrap(), + install_upgrade_lane.try_into().unwrap(), + wasm_lanes, + ) + } + + /// Returns the max serialized length of a transaction for the given lane. + pub fn get_max_serialized_length(&self, lane_id: u8) -> u64 { + match lane_id { + MINT_LANE_ID => self.native_mint_lane.max_transaction_length, + AUCTION_LANE_ID => self.native_auction_lane.max_transaction_length, + INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_length, + _ => match self.wasm_lanes.iter().find(|lane| lane.id == lane_id) { + Some(wasm_lane) => wasm_lane.max_transaction_length, + None => 0, + }, + } + } + + /// Returns the max number of runtime args + pub fn get_max_args_length(&self, lane_id: u8) -> u64 { + match lane_id { + MINT_LANE_ID => self.native_mint_lane.max_transaction_args_length, + AUCTION_LANE_ID => self.native_auction_lane.max_transaction_args_length, + INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_args_length, + _ => match self.wasm_lanes.iter().find(|lane| lane.id == lane_id) { + Some(wasm_lane) => wasm_lane.max_transaction_args_length, + None => 0, + }, + } + } + + /// Returns the max gas limit of a transaction for the given lane. + pub fn get_max_transaction_gas_limit(&self, lane_id: u8) -> u64 { + match lane_id { + MINT_LANE_ID => self.native_mint_lane.max_transaction_gas_limit, + AUCTION_LANE_ID => self.native_auction_lane.max_transaction_gas_limit, + INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_gas_limit, + _ => match self.wasm_lanes.iter().find(|lane| lane.id == lane_id) { + Some(wasm_lane) => wasm_lane.max_transaction_gas_limit, + None => 0, + }, + } + } + + /// Returns the max transactions count for the given lane. + pub fn get_max_transaction_count(&self, lane_id: u8) -> u64 { + match lane_id { + MINT_LANE_ID => self.native_mint_lane.max_transaction_count, + AUCTION_LANE_ID => self.native_auction_lane.max_transaction_count, + INSTALL_UPGRADE_LANE_ID => self.install_upgrade_lane.max_transaction_count, + _ => match self.wasm_lanes.iter().find(|lane| lane.id == lane_id) { + Some(wasm_lane) => wasm_lane.max_transaction_count, + None => 0, + }, + } + } + + /// Returns the maximum number of Wasm based transactions across wasm lanes. + pub fn get_max_wasm_transaction_count(&self) -> u64 { + let mut ret = 0; + for lane in self.wasm_lanes.iter() { + ret += lane.max_transaction_count; + } + ret + } + + /// Are the given transaction parameters supported. + pub fn is_supported(&self, lane_id: u8) -> bool { + if !self.is_predefined_lane(lane_id) { + return self.wasm_lanes.iter().any(|lane| lane.id == lane_id); + } + true + } + + /// Returns the list of currently supported lane identifiers. + pub fn get_supported_lanes(&self) -> Vec { + let mut ret = vec![0, 1, 2]; + for lane in self.wasm_lanes.iter() { + ret.push(lane.id); + } + ret + } + + /// Returns the transaction v1 configuration with the specified lane limits. + #[cfg(any(feature = "testing", test))] + pub fn with_count_limits( + mut self, + mint: Option, + auction: Option, + install: Option, + large: Option, + ) -> Self { + if let Some(mint_count) = mint { + self.native_mint_lane.max_transaction_count = mint_count; + } + if let Some(auction_count) = auction { + self.native_auction_lane.max_transaction_count = auction_count; + } + if let Some(install_upgrade) = install { + self.install_upgrade_lane.max_transaction_count = install_upgrade; + } + if let Some(large_limit) = large { + for lane in self.wasm_lanes.iter_mut() { + if lane.id == 3 { + lane.max_transaction_count = large_limit; + } + } + } + self + } + + /// Returns the max total count for all transactions across all lanes allowed in a block. + pub fn get_max_block_count(&self) -> u64 { + self.native_mint_lane.max_transaction_count + + self.native_auction_lane.max_transaction_count + + self.install_upgrade_lane.max_transaction_count + + self + .wasm_lanes + .iter() + .map(|lane| lane.max_transaction_count) + .sum::() + } + + /// Returns true if the lane identifier is for one of the predefined lanes. + pub fn is_predefined_lane(&self, lane: u8) -> bool { + lane == AUCTION_LANE_ID || lane == MINT_LANE_ID || lane == INSTALL_UPGRADE_LANE_ID + } + + /// Returns a wasm lane id based on the transaction size adjusted by + /// maybe_additional_computation_factor if necessary. + pub fn get_wasm_lane_id_by_size( + &self, + transaction_size: u64, + additional_computation_factor: u8, + runtime_args_size: u64, + ) -> Option { + let mut maybe_adequate_lane_index = None; + let buckets = self.get_wasm_lanes_ordered_by_transaction_size(); + let number_of_lanes = buckets.len(); + for (i, lane) in buckets.iter().enumerate() { + let max_transaction_size = lane.max_transaction_length; + let max_runtime_args_size = lane.max_transaction_args_length; + if max_transaction_size >= transaction_size + && max_runtime_args_size >= runtime_args_size + { + maybe_adequate_lane_index = Some(i); + break; + } + } + if let Some(adequate_lane_index) = maybe_adequate_lane_index { + maybe_adequate_lane_index = Some(cmp::min( + adequate_lane_index + additional_computation_factor as usize, + number_of_lanes - 1, + )); + } + maybe_adequate_lane_index.map(|index| buckets[index].id) + } + + pub fn get_lane_by_id(&self, lane_id: u8) -> Option<&TransactionLaneDefinition> { + if lane_id == MINT_LANE_ID { + return Some(&self.native_mint_lane); + } + if lane_id == AUCTION_LANE_ID { + return Some(&self.native_auction_lane); + } + if lane_id == INSTALL_UPGRADE_LANE_ID { + return Some(&self.install_upgrade_lane); + } + self.wasm_lanes.iter().find(|el| el.id == lane_id) + } + + pub fn get_wasm_lane_id_by_payment_limited( + &self, + gas_limit: u64, + transaction_size: u64, + runtime_args_size: u64, + ) -> Option { + let mut maybe_adequate_lane_index = None; + let lanes = self.get_wasm_lanes_ordered(); + for (i, lane) in lanes.iter().enumerate() { + let max_transaction_gas = lane.max_transaction_gas_limit; + let max_transaction_size = lane.max_transaction_length; + let max_runtime_args_size = lane.max_transaction_args_length; + if gas_limit <= max_transaction_gas + && transaction_size <= max_transaction_size + && runtime_args_size <= max_runtime_args_size + { + maybe_adequate_lane_index = Some(i); + break; + } + } + maybe_adequate_lane_index.map(|index| lanes[index].id) + } + + #[allow(unreachable_code)] + //We're allowing unreachable code here because there's a possibility that someone might + // want to use the types crate without once_cell + fn get_wasm_lanes_ordered_by_transaction_size(&self) -> &Vec { + #[cfg(any(feature = "once_cell", test))] + return self.wasm_lanes_ordered_by_transaction_size.get_or_init(|| { + Self::build_wasm_lanes_ordered_by_transaction_size(self.wasm_lanes.clone()) + }); + &Self::build_wasm_lanes_ordered_by_transaction_size(self.wasm_lanes.clone()) + } + + #[allow(unreachable_code)] + //We're allowing unreachable code here because there's a possibility that someone might + // want to use the types crate without once_cell + // This function will take the wasm lanes ordered by: + // - firstly gas limit + // - secondly max_transaction_length + // - thirdly max runtime args + // - fourthly lane id (this has no "business" value, but it ensures that the ordering is + // always reproducible since ids should be unique) + fn get_wasm_lanes_ordered(&self) -> &Vec { + #[cfg(any(feature = "once_cell", test))] + return self + .wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length + .get_or_init(|| Self::build_wasm_lanes_ordered(self.wasm_lanes.clone())); + &Self::build_wasm_lanes_ordered(self.wasm_lanes.clone()) + } + + fn build_wasm_lanes_ordered( + wasm_lanes: Vec, + ) -> Vec { + let mut ordered = wasm_lanes; + ordered.sort_by_key(|item| { + ( + item.max_transaction_gas_limit, + item.max_transaction_length, + item.max_transaction_args_length, + item.id, + ) + }); + ordered + } + + fn build_wasm_lanes_ordered_by_transaction_size( + wasm_lanes: Vec, + ) -> Vec { + let mut ordered = wasm_lanes; + ordered.sort_by(|a, b| a.max_transaction_length.cmp(&b.max_transaction_length)); + ordered + } + + pub fn wasm_lanes(&self) -> &Vec { + &self.wasm_lanes + } + + #[cfg(any(feature = "testing", test))] + pub fn set_wasm_lanes(&mut self, wasm_lanes: Vec) { + self.wasm_lanes = wasm_lanes; + #[cfg(any(feature = "once_cell", test))] + { + let wasm_lanes_ordered_by_transaction_size = OnceCell::with_value( + Self::build_wasm_lanes_ordered_by_transaction_size(self.wasm_lanes.clone()), + ); + self.wasm_lanes_ordered_by_transaction_size = wasm_lanes_ordered_by_transaction_size; + let wasm_lanes_ordered_by_transaction_gas_limit = + OnceCell::with_value(Self::build_wasm_lanes_ordered(self.wasm_lanes.clone())); + self.wasm_lanes_ordered_by_transaction_gas_limit_transaction_size_args_length = + wasm_lanes_ordered_by_transaction_gas_limit; + } + } + + #[cfg(any(feature = "testing", test))] + pub fn get_max_wasm_lane_by_gas_limit(&self) -> Option { + self.wasm_lanes + .iter() + .max_by(|a, b| { + a.max_transaction_gas_limit + .cmp(&b.max_transaction_gas_limit) + }) + .cloned() + } +} + +#[cfg(any(feature = "std", test))] +impl Default for TransactionV1Config { + fn default() -> Self { + let wasm_lane = vec![ + 3_u64, //large lane id + 1_048_576, + 1024, + DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, + 10, + ]; + + let native_mint_lane = DEFAULT_NATIVE_MINT_LANE.to_vec(); + let native_auction_lane = DEFAULT_NATIVE_AUCTION_LANE.to_vec(); + let install_upgrade_lane = DEFAULT_INSTALL_UPGRADE_LANE.to_vec(); + let raw_wasm_lanes = vec![wasm_lane]; + let wasm_lanes: Result, _> = + raw_wasm_lanes.into_iter().map(|v| v.try_into()).collect(); + + TransactionV1Config::new( + native_mint_lane.try_into().unwrap(), + native_auction_lane.try_into().unwrap(), + install_upgrade_lane.try_into().unwrap(), + wasm_lanes.unwrap(), + ) + } +} + +impl ToBytes for TransactionV1Config { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.native_mint_lane.as_vec().write_bytes(writer)?; + self.native_auction_lane.as_vec().write_bytes(writer)?; + self.install_upgrade_lane.as_vec().write_bytes(writer)?; + let wasm_lanes_as_vecs: Vec> = self + .wasm_lanes + .iter() + .map(TransactionLaneDefinition::as_vec) + .collect(); + wasm_lanes_as_vecs.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let wasm_lanes_as_vecs: Vec> = self + .wasm_lanes + .iter() + .map(TransactionLaneDefinition::as_vec) + .collect(); + self.native_mint_lane.as_vec().serialized_length() + + self.native_auction_lane.as_vec().serialized_length() + + self.install_upgrade_lane.as_vec().serialized_length() + + wasm_lanes_as_vecs.serialized_length() + } +} + +impl FromBytes for TransactionV1Config { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (raw_native_mint_lane, remainder): (Vec, &[u8]) = FromBytes::from_bytes(bytes)?; + let (raw_native_auction_lane, remainder): (Vec, &[u8]) = + FromBytes::from_bytes(remainder)?; + let (raw_install_upgrade_lane, remainder): (Vec, &[u8]) = + FromBytes::from_bytes(remainder)?; + let (raw_wasm_lanes, remainder): (Vec>, &[u8]) = FromBytes::from_bytes(remainder)?; + let native_mint_lane = raw_native_mint_lane + .try_into() + .map_err(|_| bytesrepr::Error::Formatting)?; + let native_auction_lane = raw_native_auction_lane + .try_into() + .map_err(|_| bytesrepr::Error::Formatting)?; + let install_upgrade_lane = raw_install_upgrade_lane + .try_into() + .map_err(|_| bytesrepr::Error::Formatting)?; + let wasm_lanes: Result, _> = + raw_wasm_lanes.into_iter().map(|v| v.try_into()).collect(); + let config = TransactionV1Config::new( + native_mint_lane, + native_auction_lane, + install_upgrade_lane, + wasm_lanes.map_err(|_| bytesrepr::Error::Formatting)?, + ); + Ok((config, remainder)) + } +} + +fn vec_to_limit_definition<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let vec = Vec::::deserialize(deserializer)?; + let limits = TransactionLaneDefinition::try_from(vec).map_err(|_| { + D::Error::invalid_value( + Unexpected::Seq, + &"expected 5 u64 compliant numbers to create a TransactionLimitsDefinition", + ) + })?; + Ok(limits) +} + +fn limit_definition_to_vec( + limits: &TransactionLaneDefinition, + serializer: S, +) -> Result +where + S: Serializer, +{ + let vec = limits.as_vec(); + let mut seq = serializer.serialize_seq(Some(vec.len()))?; + for element in vec { + seq.serialize_element(&element)?; + } + seq.end() +} + +fn definition_to_wasms<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let vec = Vec::>::deserialize(deserializer)?; + let result: Result, TransactionConfigError> = + vec.into_iter().map(|v| v.try_into()).collect(); + result.map_err(|_| { + D::Error::invalid_value( + Unexpected::Seq, + &"sequence of sequences to assemble wasm definitions", + ) + }) +} + +fn wasm_definitions_to_vec( + limits: &[TransactionLaneDefinition], + serializer: S, +) -> Result +where + S: Serializer, +{ + let vec_of_vecs: Vec> = limits.iter().map(|v| v.as_vec()).collect(); + let mut seq = serializer.serialize_seq(Some(vec_of_vecs.len()))?; + for element in vec_of_vecs { + seq.serialize_element(&element)?; + } + seq.end() +} + +#[cfg(test)] +mod tests { + use serde_json::Value; + + use super::*; + const EXAMPLE_JSON: &str = r#"{ + "native_mint_lane": [0,1,2,3,4], + "native_auction_lane": [1,5,6,7,8], + "install_upgrade_lane": [2,9,10,11,12], + "wasm_lanes": [[3,13,14,15,16], [4,17,18,19,20], [5,21,22,23,24]] + }"#; + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = TransactionV1Config::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } + + #[test] + fn should_correctly_track_supported() { + let config = TransactionV1Config::default(); + assert!(config.is_supported(0)); + assert!(config.is_supported(1)); + assert!(config.is_supported(2)); + assert!(config.is_supported(3)); + assert!(!config.is_supported(10)); + } + + #[test] + fn should_get_configuration_for_wasm() { + let config = build_example_transaction_config(); + let got = config.get_wasm_lane_id_by_size(100, 0, 0); + assert_eq!(got, Some(3)); + let config = build_example_transaction_config_reverse_wasm_ids(); + let got = config.get_wasm_lane_id_by_size(100, 0, 0); + assert_eq!(got, Some(5)); + } + + #[test] + fn given_too_big_transaction_should_return_none() { + let config = build_example_transaction_config(); + let got = config.get_wasm_lane_id_by_size(100000000, 0, 0); + assert!(got.is_none()); + let config = build_example_transaction_config_reverse_wasm_ids(); + let got = config.get_wasm_lane_id_by_size(100000000, 0, 0); + assert!(got.is_none()); + let config = build_example_transaction_config_reverse_wasm_ids(); + let got = config.get_wasm_lane_id_by_size(1, 0, 100000); + assert!(got.is_none()); + } + + #[test] + fn given_wasm_should_return_first_fit() { + let config = build_example_transaction_config(); + + let got = config.get_wasm_lane_id_by_size(660, 0, 0); + assert_eq!(got, Some(4)); + + let got = config.get_wasm_lane_id_by_size(800, 0, 0); + assert_eq!(got, Some(5)); + + let got = config.get_wasm_lane_id_by_size(1, 0, 0); + assert_eq!(got, Some(3)); + + let got = config.get_wasm_lane_id_by_size(800, 0, 6024); + assert_eq!(got, Some(5)); + + let config = build_example_transaction_config_reverse_wasm_ids(); + + let got = config.get_wasm_lane_id_by_size(660, 0, 0); + assert_eq!(got, Some(4)); + + let got = config.get_wasm_lane_id_by_size(800, 0, 0); + assert_eq!(got, Some(3)); + + let got = config.get_wasm_lane_id_by_size(1, 0, 0); + assert_eq!(got, Some(5)); + + let got = config.get_wasm_lane_id_by_size(800, 0, 6024); + assert_eq!(got, Some(3)); + } + + #[test] + fn given_additional_computation_factor_should_be_applied() { + let config = build_example_transaction_config(); + let got = config.get_wasm_lane_id_by_size(660, 1, 0); + assert_eq!(got, Some(5)); + + let config = build_example_transaction_config_reverse_wasm_ids(); + let got = config.get_wasm_lane_id_by_size(660, 1, 0); + assert_eq!(got, Some(3)); + } + + #[test] + fn given_additional_computation_factor_should_not_overflow() { + let config = build_example_transaction_config(); + let got = config.get_wasm_lane_id_by_size(660, 2, 0); + assert_eq!(got, Some(5)); + let got_2 = config.get_wasm_lane_id_by_size(660, 20, 0); + assert_eq!(got_2, Some(5)); + + let config = build_example_transaction_config_reverse_wasm_ids(); + let got = config.get_wasm_lane_id_by_size(660, 2, 0); + assert_eq!(got, Some(3)); + let got_2 = config.get_wasm_lane_id_by_size(660, 20, 0); + assert_eq!(got_2, Some(3)); + } + + #[test] + fn given_no_wasm_lanes_should_return_none() { + let config = build_example_transaction_config_no_wasms(); + let got = config.get_wasm_lane_id_by_size(660, 2, 0); + assert!(got.is_none()); + let got = config.get_wasm_lane_id_by_size(660, 0, 0); + assert!(got.is_none()); + let got = config.get_wasm_lane_id_by_size(660, 20, 0); + assert!(got.is_none()); + + let got = config.get_wasm_lane_id_by_payment_limited(100, 1, 0); + assert!(got.is_none()); + } + + #[test] + fn given_wasm_when_by_payment_should_find_smallest_lane() { + let config = TransactionV1Config::new( + example_native(), + example_auction(), + example_install_upgrade(), + vec![ + TransactionLaneDefinition { + id: 3, + max_transaction_length: 10, + max_transaction_args_length: 1, + max_transaction_gas_limit: 5, + max_transaction_count: 1, + }, + TransactionLaneDefinition { + id: 4, + max_transaction_length: 11, + max_transaction_args_length: 1, + max_transaction_gas_limit: 55, + max_transaction_count: 1, + }, + TransactionLaneDefinition { + id: 5, + max_transaction_length: 12, + max_transaction_args_length: 5, + max_transaction_gas_limit: 155, + max_transaction_count: 1, + }, + ], + ); + let got = config.get_wasm_lane_id_by_payment_limited(54, 1, 0); + assert_eq!(got, Some(4)); + let got = config.get_wasm_lane_id_by_payment_limited(54, 10, 3); + assert_eq!(got, Some(5)); + } + + #[test] + fn given_wasm_when_by_payment_should_take_size_into_consideration() { + let config = TransactionV1Config::new( + example_native(), + example_auction(), + example_install_upgrade(), + vec![ + TransactionLaneDefinition { + id: 3, + max_transaction_length: 10, + max_transaction_args_length: 1, + max_transaction_gas_limit: 5, + max_transaction_count: 1, + }, + TransactionLaneDefinition { + id: 4, + max_transaction_length: 11, + max_transaction_args_length: 1, + max_transaction_gas_limit: 55, + max_transaction_count: 1, + }, + TransactionLaneDefinition { + id: 5, + max_transaction_length: 12, + max_transaction_args_length: 1, + max_transaction_gas_limit: 155, + max_transaction_count: 1, + }, + ], + ); + let got = config.get_wasm_lane_id_by_payment_limited(54, 12, 0); + assert_eq!(got, Some(5)); + } + + #[test] + fn given_wasm_when_by_payment_should_return_none_if_no_size_fits() { + let config = TransactionV1Config::new( + example_native(), + example_auction(), + example_install_upgrade(), + vec![ + TransactionLaneDefinition { + id: 3, + max_transaction_length: 10, + max_transaction_args_length: 1, + max_transaction_gas_limit: 5, + max_transaction_count: 1, + }, + TransactionLaneDefinition { + id: 4, + max_transaction_length: 11, + max_transaction_args_length: 1, + max_transaction_gas_limit: 55, + max_transaction_count: 1, + }, + TransactionLaneDefinition { + id: 5, + max_transaction_length: 12, + max_transaction_args_length: 5, + max_transaction_gas_limit: 155, + max_transaction_count: 1, + }, + ], + ); + let got = config.get_wasm_lane_id_by_payment_limited(54, 120, 0); + assert_eq!(got, None); + let got = config.get_wasm_lane_id_by_payment_limited(54, 10, 1000); + assert_eq!(got, None); + } + + #[test] + fn should_deserialize() { + let got: TransactionV1Config = serde_json::from_str(EXAMPLE_JSON).unwrap(); + let expected = TransactionV1Config::new( + TransactionLaneDefinition::new(0, 1, 2, 3, 4), + TransactionLaneDefinition::new(1, 5, 6, 7, 8), + TransactionLaneDefinition::new(2, 9, 10, 11, 12), + vec![ + TransactionLaneDefinition::new(3, 13, 14, 15, 16), + TransactionLaneDefinition::new(4, 17, 18, 19, 20), + TransactionLaneDefinition::new(5, 21, 22, 23, 24), + ], + ); + assert_eq!(got, expected); + } + + #[test] + fn should_serialize() { + let input = TransactionV1Config::new( + TransactionLaneDefinition::new(0, 1, 2, 3, 4), + TransactionLaneDefinition::new(1, 5, 6, 7, 8), + TransactionLaneDefinition::new(2, 9, 10, 11, 12), + vec![ + TransactionLaneDefinition::new(3, 13, 14, 15, 16), + TransactionLaneDefinition::new(4, 17, 18, 19, 20), + TransactionLaneDefinition::new(5, 21, 22, 23, 24), + ], + ); + let raw = serde_json::to_string(&input).unwrap(); + let got = serde_json::from_str::(&raw).unwrap(); + let expected: Value = serde_json::from_str::(EXAMPLE_JSON).unwrap(); + assert_eq!(got, expected); + } + + #[test] + fn should_order_by_nested_predicates() { + // Firstly, order by max_transaction_gas_limit + let definition_1 = TransactionLaneDefinition::new(0, 0, 0, 4, 0); + let definition_2 = TransactionLaneDefinition::new(1, 0, 0, 3, 0); + let definition_3 = TransactionLaneDefinition::new(2, 0, 0, 2, 0); + let res = TransactionV1Config::build_wasm_lanes_ordered(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + assert_eq!(res, vec![definition_3, definition_2, definition_1,]); + + // If max_transaction_gas_limit equal, order by + let definition_1 = TransactionLaneDefinition::new(0, 3, 0, 1, 0); + let definition_2 = TransactionLaneDefinition::new(1, 4, 0, 1, 0); + let definition_3 = TransactionLaneDefinition::new(2, 2, 0, 1, 0); + let res = TransactionV1Config::build_wasm_lanes_ordered(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + assert_eq!(res, vec![definition_3, definition_1, definition_2,]); + + // If max_transaction_gas_limit and max_transaction_length equal, order by + // max_transaction_args_length + let definition_1 = TransactionLaneDefinition::new(0, 2, 4, 1, 0); + let definition_2 = TransactionLaneDefinition::new(1, 2, 2, 1, 0); + let definition_3 = TransactionLaneDefinition::new(2, 2, 3, 1, 0); + let res = TransactionV1Config::build_wasm_lanes_ordered(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + ]); + assert_eq!(res, vec![definition_2, definition_3, definition_1,]); + + // If max_transaction_gas_limit and max_transaction_length equal and + // max_transaction_args_length, order by id + let definition_1 = TransactionLaneDefinition::new(2, 2, 3, 1, 0); + let definition_2 = TransactionLaneDefinition::new(0, 2, 3, 1, 0); + let definition_3 = TransactionLaneDefinition::new(1, 2, 3, 1, 0); + let res = TransactionV1Config::build_wasm_lanes_ordered(vec![ + definition_2.clone(), + definition_3.clone(), + definition_1.clone(), + ]); + assert_eq!(res, vec![definition_2, definition_3, definition_1,]); + + // Should apply those rules mixed + let definition_1 = TransactionLaneDefinition::new(10, 0, 2, 2, 0); + let definition_2 = TransactionLaneDefinition::new(1, 2, 3, 1, 0); + let definition_3 = TransactionLaneDefinition::new(2, 4, 3, 1, 0); + let definition_4 = TransactionLaneDefinition::new(3, 4, 2, 1, 0); + let definition_5 = TransactionLaneDefinition::new(4, 0, 0, 2, 0); + let definition_6 = TransactionLaneDefinition::new(5, 4, 3, 1, 0); + + let res = TransactionV1Config::build_wasm_lanes_ordered(vec![ + definition_1.clone(), + definition_2.clone(), + definition_3.clone(), + definition_4.clone(), + definition_5.clone(), + definition_6.clone(), + ]); + assert_eq!( + res, + vec![ + definition_2, + definition_4, + definition_3, + definition_6, + definition_5, + definition_1 + ] + ); + } + + fn example_native() -> TransactionLaneDefinition { + TransactionLaneDefinition::new(0, 1500, 1024, 1_500_000_000, 150) + } + + fn example_auction() -> TransactionLaneDefinition { + TransactionLaneDefinition::new(1, 500, 3024, 3_500_000_000, 350) + } + + fn example_install_upgrade() -> TransactionLaneDefinition { + TransactionLaneDefinition::new(2, 10000, 2024, 2_500_000_000, 250) + } + + fn wasm_small(id: u8) -> TransactionLaneDefinition { + TransactionLaneDefinition::new(id, 600, 4024, 4_500_000_000, 450) + } + + fn wasm_medium(id: u8) -> TransactionLaneDefinition { + TransactionLaneDefinition::new(id, 700, 5024, 5_500_000_000, 550) + } + + fn wasm_large(id: u8) -> TransactionLaneDefinition { + TransactionLaneDefinition::new(id, 800, 6024, 6_500_000_000, 650) + } + + fn example_wasm() -> Vec { + vec![wasm_small(3), wasm_medium(4), wasm_large(5)] + } + + fn example_wasm_reversed_ids() -> Vec { + vec![wasm_small(5), wasm_medium(4), wasm_large(3)] + } + + fn build_example_transaction_config_no_wasms() -> TransactionV1Config { + TransactionV1Config::new( + example_native(), + example_auction(), + example_install_upgrade(), + vec![], + ) + } + + fn build_example_transaction_config() -> TransactionV1Config { + TransactionV1Config::new( + example_native(), + example_auction(), + example_install_upgrade(), + example_wasm(), + ) + } + + fn build_example_transaction_config_reverse_wasm_ids() -> TransactionV1Config { + TransactionV1Config::new( + example_native(), + example_auction(), + example_install_upgrade(), + example_wasm_reversed_ids(), + ) + } +} diff --git a/types/src/chainspec/upgrade_config.rs b/types/src/chainspec/upgrade_config.rs new file mode 100644 index 0000000000..5ce68f9ce4 --- /dev/null +++ b/types/src/chainspec/upgrade_config.rs @@ -0,0 +1,171 @@ +use num_rational::Ratio; +use serde::Serialize; +use std::collections::BTreeMap; + +use crate::{ + ChainspecRegistry, Digest, EraId, FeeHandling, HoldBalanceHandling, Key, ProtocolVersion, + StoredValue, +}; + +/// Represents the configuration of a protocol upgrade. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +pub struct ProtocolUpgradeConfig { + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + new_protocol_version: ProtocolVersion, + activation_point: Option, + new_gas_hold_handling: Option, + new_gas_hold_interval: Option, + new_validator_slots: Option, + new_auction_delay: Option, + new_locked_funds_period_millis: Option, + new_round_seigniorage_rate: Option>, + new_unbonding_delay: Option, + global_state_update: BTreeMap, + chainspec_registry: ChainspecRegistry, + fee_handling: FeeHandling, + validator_minimum_bid_amount: u64, + maximum_delegation_amount: u64, + minimum_delegation_amount: u64, + enable_addressable_entity: bool, +} + +impl ProtocolUpgradeConfig { + /// Create new upgrade config. + #[allow(clippy::too_many_arguments)] + pub fn new( + pre_state_hash: Digest, + current_protocol_version: ProtocolVersion, + new_protocol_version: ProtocolVersion, + activation_point: Option, + new_gas_hold_handling: Option, + new_gas_hold_interval: Option, + new_validator_slots: Option, + new_auction_delay: Option, + new_locked_funds_period_millis: Option, + new_round_seigniorage_rate: Option>, + new_unbonding_delay: Option, + global_state_update: BTreeMap, + chainspec_registry: ChainspecRegistry, + fee_handling: FeeHandling, + validator_minimum_bid_amount: u64, + maximum_delegation_amount: u64, + minimum_delegation_amount: u64, + enable_addressable_entity: bool, + ) -> Self { + ProtocolUpgradeConfig { + pre_state_hash, + current_protocol_version, + new_protocol_version, + activation_point, + new_gas_hold_handling, + new_gas_hold_interval, + new_validator_slots, + new_auction_delay, + new_locked_funds_period_millis, + new_round_seigniorage_rate, + new_unbonding_delay, + global_state_update, + chainspec_registry, + fee_handling, + validator_minimum_bid_amount, + maximum_delegation_amount, + minimum_delegation_amount, + enable_addressable_entity, + } + } + + /// Returns the current state root state hash + pub fn pre_state_hash(&self) -> Digest { + self.pre_state_hash + } + + /// Returns current protocol version of this upgrade. + pub fn current_protocol_version(&self) -> ProtocolVersion { + self.current_protocol_version + } + + /// Returns new protocol version of this upgrade. + pub fn new_protocol_version(&self) -> ProtocolVersion { + self.new_protocol_version + } + + /// Returns activation point in eras. + pub fn activation_point(&self) -> Option { + self.activation_point + } + + /// Returns new gas hold handling if specified. + pub fn new_gas_hold_handling(&self) -> Option { + self.new_gas_hold_handling + } + + /// Returns new auction delay if specified. + pub fn new_gas_hold_interval(&self) -> Option { + self.new_gas_hold_interval + } + + /// Returns new validator slots if specified. + pub fn new_validator_slots(&self) -> Option { + self.new_validator_slots + } + + /// Returns new auction delay if specified. + pub fn new_auction_delay(&self) -> Option { + self.new_auction_delay + } + + /// Returns new locked funds period if specified. + pub fn new_locked_funds_period_millis(&self) -> Option { + self.new_locked_funds_period_millis + } + + /// Returns new round seigniorage rate if specified. + pub fn new_round_seigniorage_rate(&self) -> Option> { + self.new_round_seigniorage_rate + } + + /// Returns new unbonding delay if specified. + pub fn new_unbonding_delay(&self) -> Option { + self.new_unbonding_delay + } + + /// Returns new map of emergency global state updates. + pub fn global_state_update(&self) -> &BTreeMap { + &self.global_state_update + } + + /// Returns a reference to the chainspec registry. + pub fn chainspec_registry(&self) -> &ChainspecRegistry { + &self.chainspec_registry + } + + /// Sets new pre state hash. + pub fn with_pre_state_hash(&mut self, pre_state_hash: Digest) { + self.pre_state_hash = pre_state_hash; + } + + /// Fee handling setting. + pub fn fee_handling(&self) -> FeeHandling { + self.fee_handling + } + + /// Validator minimum bid amount + pub fn validator_minimum_bid_amount(&self) -> u64 { + self.validator_minimum_bid_amount + } + + /// Maximum delegation amount for validator. + pub fn maximum_delegation_amount(&self) -> u64 { + self.maximum_delegation_amount + } + + /// Minimum delegation amount for validator. + pub fn minimum_delegation_amount(&self) -> u64 { + self.minimum_delegation_amount + } + + pub fn enable_addressable_entity(&self) -> bool { + self.enable_addressable_entity + } +} diff --git a/types/src/chainspec/vacancy_config.rs b/types/src/chainspec/vacancy_config.rs new file mode 100644 index 0000000000..774e37c0ca --- /dev/null +++ b/types/src/chainspec/vacancy_config.rs @@ -0,0 +1,100 @@ +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr, + bytesrepr::{Error, FromBytes, ToBytes}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +/// The configuration to determine gas price based on block vacancy. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct VacancyConfig { + /// The upper threshold to determine an increment in gas price + pub upper_threshold: u64, + /// The lower threshold to determine a decrement in gas price + pub lower_threshold: u64, + /// The upper limit of the gas price. + pub max_gas_price: u8, + /// The lower limit of the gas price. + pub min_gas_price: u8, +} + +impl Default for VacancyConfig { + fn default() -> Self { + Self { + upper_threshold: 90, + lower_threshold: 50, + max_gas_price: 3, + min_gas_price: 1, + } + } +} + +impl VacancyConfig { + /// Returns a random [`VacancyConfig`] + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Self { + upper_threshold: rng.gen_range(49..100), + lower_threshold: rng.gen_range(0..50), + max_gas_price: rng.gen_range(3..5), + min_gas_price: rng.gen_range(1..3), + } + } +} + +impl ToBytes for VacancyConfig { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.upper_threshold.write_bytes(writer)?; + self.lower_threshold.write_bytes(writer)?; + self.max_gas_price.write_bytes(writer)?; + self.min_gas_price.write_bytes(writer) + } + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + fn serialized_length(&self) -> usize { + self.upper_threshold.serialized_length() + + self.lower_threshold.serialized_length() + + self.max_gas_price.serialized_length() + + self.min_gas_price.serialized_length() + } +} + +impl FromBytes for VacancyConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (upper_threshold, remainder) = u64::from_bytes(bytes)?; + let (lower_threshold, remainder) = u64::from_bytes(remainder)?; + let (max_gas_price, remainder) = u8::from_bytes(remainder)?; + let (min_gas_price, remainder) = u8::from_bytes(remainder)?; + Ok(( + Self { + upper_threshold, + lower_threshold, + max_gas_price, + min_gas_price, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let config = VacancyConfig::random(&mut rng); + bytesrepr::test_serialization_roundtrip(&config); + } +} diff --git a/types/src/chainspec/vm_config.rs b/types/src/chainspec/vm_config.rs new file mode 100644 index 0000000000..c3da5c185d --- /dev/null +++ b/types/src/chainspec/vm_config.rs @@ -0,0 +1,54 @@ +mod auction_costs; +mod chainspec_registry; +mod handle_payment_costs; +mod host_function_costs; +mod host_function_costs_v2; +mod message_limits; +mod mint_costs; +mod opcode_costs; +mod standard_payment_costs; +mod storage_costs; +mod system_config; +mod wasm_config; +mod wasm_v1_config; +mod wasm_v2_config; + +pub use auction_costs::AuctionCosts; +#[cfg(any(feature = "testing", test))] +pub use auction_costs::{DEFAULT_ADD_BID_COST, DEFAULT_DELEGATE_COST}; +pub use chainspec_registry::ChainspecRegistry; +pub use handle_payment_costs::HandlePaymentCosts; +#[cfg(any(feature = "testing", test))] +pub use host_function_costs::DEFAULT_NEW_DICTIONARY_COST; +pub use host_function_costs::{ + Cost as HostFunctionCost, HostFunction, HostFunctionCostsV1, + DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, +}; +pub use host_function_costs_v2::{HostFunctionCostsV2, HostFunctionV2}; +pub use message_limits::MessageLimits; +pub use mint_costs::MintCosts; +#[cfg(any(feature = "testing", test))] +pub use mint_costs::DEFAULT_TRANSFER_COST; +pub use opcode_costs::{BrTableCost, ControlFlowCosts, OpcodeCosts}; +#[cfg(any(feature = "testing", test))] +pub use opcode_costs::{ + DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, + DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, DEFAULT_CONTROL_FLOW_BR_OPCODE, + DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, + DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, DEFAULT_CONTROL_FLOW_CALL_OPCODE, + DEFAULT_CONTROL_FLOW_DROP_OPCODE, DEFAULT_CONTROL_FLOW_ELSE_OPCODE, + DEFAULT_CONTROL_FLOW_END_OPCODE, DEFAULT_CONTROL_FLOW_IF_OPCODE, + DEFAULT_CONTROL_FLOW_LOOP_OPCODE, DEFAULT_CONTROL_FLOW_RETURN_OPCODE, + DEFAULT_CONTROL_FLOW_SELECT_OPCODE, DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, + DEFAULT_DIV_COST, DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, + DEFAULT_INTEGER_COMPARISON_COST, DEFAULT_LOAD_COST, DEFAULT_LOCAL_COST, DEFAULT_MUL_COST, + DEFAULT_NOP_COST, DEFAULT_STORE_COST, DEFAULT_UNREACHABLE_COST, +}; +pub use standard_payment_costs::StandardPaymentCosts; +pub use storage_costs::StorageCosts; +pub use system_config::SystemConfig; +pub use wasm_config::WasmConfig; +pub use wasm_v1_config::WasmV1Config; +#[cfg(any(feature = "testing", test))] +pub use wasm_v1_config::{DEFAULT_MAX_STACK_HEIGHT, DEFAULT_WASM_MAX_MEMORY}; +pub use wasm_v2_config::WasmV2Config; diff --git a/types/src/chainspec/vm_config/auction_costs.rs b/types/src/chainspec/vm_config/auction_costs.rs new file mode 100644 index 0000000000..d57c4dbbd9 --- /dev/null +++ b/types/src/chainspec/vm_config/auction_costs.rs @@ -0,0 +1,316 @@ +//! Costs of the auction system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `get_era_validators` auction entry point. +pub const DEFAULT_GET_ERA_VALIDATORS_COST: u64 = 2_500_000_000; +/// Default cost of the `read_seigniorage_recipients` auction entry point. +pub const DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST: u64 = 5_000_000_000; +/// Default cost of the `add_bid` auction entry point. +pub const DEFAULT_ADD_BID_COST: u64 = 2_500_000_000; +/// Default cost of the `withdraw_bid` auction entry point. +pub const DEFAULT_WITHDRAW_BID_COST: u64 = 2_500_000_000; +/// Default cost of the `delegate` auction entry point. +pub const DEFAULT_DELEGATE_COST: u64 = DEFAULT_WITHDRAW_BID_COST; +/// Default cost of the `redelegate` auction entry point. +pub const DEFAULT_REDELEGATE_COST: u64 = DEFAULT_WITHDRAW_BID_COST; +/// Default cost of the `undelegate` auction entry point. +pub const DEFAULT_UNDELEGATE_COST: u64 = DEFAULT_WITHDRAW_BID_COST; +/// Default cost of the `run_auction` auction entry point. +pub const DEFAULT_RUN_AUCTION_COST: u64 = 2_500_000_000; +/// Default cost of the `slash` auction entry point. +pub const DEFAULT_SLASH_COST: u64 = 2_500_000_000; +/// Default cost of the `distribute` auction entry point. +pub const DEFAULT_DISTRIBUTE_COST: u64 = 2_500_000_000; +/// Default cost of the `withdraw_delegator_reward` auction entry point. +pub const DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST: u64 = 5_000_000_000; +/// Default cost of the `withdraw_validator_reward` auction entry point. +pub const DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST: u64 = 5_000_000_000; +/// Default cost of the `read_era_id` auction entry point. +pub const DEFAULT_READ_ERA_ID_COST: u64 = 2_500_000_000; +/// Default cost of the `activate_bid` auction entry point. +pub const DEFAULT_ACTIVATE_BID_COST: u64 = 2_500_000_000; +/// Default cost of the `change_bid_public_key` auction entry point. +pub const DEFAULT_CHANGE_BID_PUBLIC_KEY_COST: u64 = 5_000_000_000; +/// Default cost of the `add_reservations` auction entry point. +pub const DEFAULT_ADD_RESERVATIONS_COST: u64 = DEFAULT_WITHDRAW_BID_COST; +/// Default cost of the `cancel_reservations` auction entry point. +pub const DEFAULT_CANCEL_RESERVATIONS_COST: u64 = DEFAULT_WITHDRAW_BID_COST; + +/// Description of the costs of calling auction entrypoints. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct AuctionCosts { + /// Cost of calling the `get_era_validators` entry point. + pub get_era_validators: u64, + /// Cost of calling the `read_seigniorage_recipients` entry point. + pub read_seigniorage_recipients: u64, + /// Cost of calling the `add_bid` entry point. + pub add_bid: u64, + /// Cost of calling the `withdraw_bid` entry point. + pub withdraw_bid: u64, + /// Cost of calling the `delegate` entry point. + pub delegate: u64, + /// Cost of calling the `undelegate` entry point. + pub undelegate: u64, + /// Cost of calling the `run_auction` entry point. + pub run_auction: u64, + /// Cost of calling the `slash` entry point. + pub slash: u64, + /// Cost of calling the `distribute` entry point. + pub distribute: u64, + /// Cost of calling the `withdraw_delegator_reward` entry point. + pub withdraw_delegator_reward: u64, + /// Cost of calling the `withdraw_validator_reward` entry point. + pub withdraw_validator_reward: u64, + /// Cost of calling the `read_era_id` entry point. + pub read_era_id: u64, + /// Cost of calling the `activate_bid` entry point. + pub activate_bid: u64, + /// Cost of calling the `redelegate` entry point. + pub redelegate: u64, + /// Cost of calling the `change_bid_public_key` entry point. + pub change_bid_public_key: u64, + /// Cost of calling the `add_reservations` entry point. + pub add_reservations: u64, + /// Cost of calling the `cancel_reservations` entry point. + pub cancel_reservations: u64, +} + +impl Default for AuctionCosts { + fn default() -> Self { + Self { + get_era_validators: DEFAULT_GET_ERA_VALIDATORS_COST, + read_seigniorage_recipients: DEFAULT_READ_SEIGNIORAGE_RECIPIENTS_COST, + add_bid: DEFAULT_ADD_BID_COST, + withdraw_bid: DEFAULT_WITHDRAW_BID_COST, + delegate: DEFAULT_DELEGATE_COST, + undelegate: DEFAULT_UNDELEGATE_COST, + run_auction: DEFAULT_RUN_AUCTION_COST, + slash: DEFAULT_SLASH_COST, + distribute: DEFAULT_DISTRIBUTE_COST, + withdraw_delegator_reward: DEFAULT_WITHDRAW_DELEGATOR_REWARD_COST, + withdraw_validator_reward: DEFAULT_WITHDRAW_VALIDATOR_REWARD_COST, + read_era_id: DEFAULT_READ_ERA_ID_COST, + activate_bid: DEFAULT_ACTIVATE_BID_COST, + redelegate: DEFAULT_REDELEGATE_COST, + change_bid_public_key: DEFAULT_CHANGE_BID_PUBLIC_KEY_COST, + add_reservations: DEFAULT_ADD_RESERVATIONS_COST, + cancel_reservations: DEFAULT_CANCEL_RESERVATIONS_COST, + } + } +} + +impl ToBytes for AuctionCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + change_bid_public_key, + add_reservations, + cancel_reservations, + } = self; + + ret.append(&mut get_era_validators.to_bytes()?); + ret.append(&mut read_seigniorage_recipients.to_bytes()?); + ret.append(&mut add_bid.to_bytes()?); + ret.append(&mut withdraw_bid.to_bytes()?); + ret.append(&mut delegate.to_bytes()?); + ret.append(&mut undelegate.to_bytes()?); + ret.append(&mut run_auction.to_bytes()?); + ret.append(&mut slash.to_bytes()?); + ret.append(&mut distribute.to_bytes()?); + ret.append(&mut withdraw_delegator_reward.to_bytes()?); + ret.append(&mut withdraw_validator_reward.to_bytes()?); + ret.append(&mut read_era_id.to_bytes()?); + ret.append(&mut activate_bid.to_bytes()?); + ret.append(&mut redelegate.to_bytes()?); + ret.append(&mut change_bid_public_key.to_bytes()?); + ret.append(&mut add_reservations.to_bytes()?); + ret.append(&mut cancel_reservations.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + change_bid_public_key, + add_reservations, + cancel_reservations, + } = self; + + get_era_validators.serialized_length() + + read_seigniorage_recipients.serialized_length() + + add_bid.serialized_length() + + withdraw_bid.serialized_length() + + delegate.serialized_length() + + undelegate.serialized_length() + + run_auction.serialized_length() + + slash.serialized_length() + + distribute.serialized_length() + + withdraw_delegator_reward.serialized_length() + + withdraw_validator_reward.serialized_length() + + read_era_id.serialized_length() + + activate_bid.serialized_length() + + redelegate.serialized_length() + + change_bid_public_key.serialized_length() + + add_reservations.serialized_length() + + cancel_reservations.serialized_length() + } +} + +impl FromBytes for AuctionCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (get_era_validators, rem) = FromBytes::from_bytes(bytes)?; + let (read_seigniorage_recipients, rem) = FromBytes::from_bytes(rem)?; + let (add_bid, rem) = FromBytes::from_bytes(rem)?; + let (withdraw_bid, rem) = FromBytes::from_bytes(rem)?; + let (delegate, rem) = FromBytes::from_bytes(rem)?; + let (undelegate, rem) = FromBytes::from_bytes(rem)?; + let (run_auction, rem) = FromBytes::from_bytes(rem)?; + let (slash, rem) = FromBytes::from_bytes(rem)?; + let (distribute, rem) = FromBytes::from_bytes(rem)?; + let (withdraw_delegator_reward, rem) = FromBytes::from_bytes(rem)?; + let (withdraw_validator_reward, rem) = FromBytes::from_bytes(rem)?; + let (read_era_id, rem) = FromBytes::from_bytes(rem)?; + let (activate_bid, rem) = FromBytes::from_bytes(rem)?; + let (redelegate, rem) = FromBytes::from_bytes(rem)?; + let (change_bid_public_key, rem) = FromBytes::from_bytes(rem)?; + let (add_reservations, rem) = FromBytes::from_bytes(rem)?; + let (cancel_reservations, rem) = FromBytes::from_bytes(rem)?; + Ok(( + Self { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + change_bid_public_key, + add_reservations, + cancel_reservations, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> AuctionCosts { + AuctionCosts { + get_era_validators: rng.gen_range(0..i64::MAX) as u64, + read_seigniorage_recipients: rng.gen_range(0..i64::MAX) as u64, + add_bid: rng.gen_range(0..i64::MAX) as u64, + withdraw_bid: rng.gen_range(0..i64::MAX) as u64, + delegate: rng.gen_range(0..i64::MAX) as u64, + undelegate: rng.gen_range(0..i64::MAX) as u64, + run_auction: rng.gen_range(0..i64::MAX) as u64, + slash: rng.gen_range(0..i64::MAX) as u64, + distribute: rng.gen_range(0..i64::MAX) as u64, + withdraw_delegator_reward: rng.gen_range(0..i64::MAX) as u64, + withdraw_validator_reward: rng.gen_range(0..i64::MAX) as u64, + read_era_id: rng.gen_range(0..i64::MAX) as u64, + activate_bid: rng.gen_range(0..i64::MAX) as u64, + redelegate: rng.gen_range(0..i64::MAX) as u64, + change_bid_public_key: rng.gen_range(0..i64::MAX) as u64, + add_reservations: rng.gen_range(0..i64::MAX) as u64, + cancel_reservations: rng.gen_range(0..i64::MAX) as u64, + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::AuctionCosts; + + prop_compose! { + pub fn auction_costs_arb()( + get_era_validators in 0..=(i64::MAX as u64), + read_seigniorage_recipients in 0..=(i64::MAX as u64), + add_bid in 0..=(i64::MAX as u64), + withdraw_bid in 0..=(i64::MAX as u64), + delegate in 0..=(i64::MAX as u64), + undelegate in 0..=(i64::MAX as u64), + run_auction in 0..=(i64::MAX as u64), + slash in 0..=(i64::MAX as u64), + distribute in 0..=(i64::MAX as u64), + withdraw_delegator_reward in 0..=(i64::MAX as u64), + withdraw_validator_reward in 0..=(i64::MAX as u64), + read_era_id in 0..=(i64::MAX as u64), + activate_bid in 0..=(i64::MAX as u64), + redelegate in 0..=(i64::MAX as u64), + change_bid_public_key in 0..=(i64::MAX as u64), + add_reservations in 0..=(i64::MAX as u64), + cancel_reservations in 0..=(i64::MAX as u64), + ) -> AuctionCosts { + AuctionCosts { + get_era_validators, + read_seigniorage_recipients, + add_bid, + withdraw_bid, + delegate, + undelegate, + run_auction, + slash, + distribute, + withdraw_delegator_reward, + withdraw_validator_reward, + read_era_id, + activate_bid, + redelegate, + change_bid_public_key, + add_reservations, + cancel_reservations, + } + } + } +} diff --git a/types/src/chainspec/vm_config/chainspec_registry.rs b/types/src/chainspec/vm_config/chainspec_registry.rs new file mode 100644 index 0000000000..39c7b2da7e --- /dev/null +++ b/types/src/chainspec/vm_config/chainspec_registry.rs @@ -0,0 +1,174 @@ +//! The registry of chainspec hash digests. + +use std::{collections::BTreeMap, convert::TryFrom}; + +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, Digest, +}; + +type BytesreprChainspecRegistry = BTreeMap; + +/// The chainspec registry. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +pub struct ChainspecRegistry { + chainspec_raw_hash: Digest, + genesis_accounts_raw_hash: Option, + global_state_raw_hash: Option, +} + +impl ChainspecRegistry { + const CHAINSPEC_RAW_MAP_KEY: &'static str = "chainspec_raw"; + const GENESIS_ACCOUNTS_RAW_MAP_KEY: &'static str = "genesis_accounts_raw"; + const GLOBAL_STATE_RAW_MAP_KEY: &'static str = "global_state_raw"; + + /// Returns a `ChainspecRegistry` constructed at genesis. + pub fn new_with_genesis( + chainspec_file_bytes: &[u8], + genesis_accounts_file_bytes: &[u8], + ) -> Self { + ChainspecRegistry { + chainspec_raw_hash: Digest::hash(chainspec_file_bytes), + genesis_accounts_raw_hash: Some(Digest::hash(genesis_accounts_file_bytes)), + global_state_raw_hash: None, + } + } + + /// Returns a `ChainspecRegistry` constructed at node upgrade. + pub fn new_with_optional_global_state( + chainspec_file_bytes: &[u8], + global_state_file_bytes: Option<&[u8]>, + ) -> Self { + ChainspecRegistry { + chainspec_raw_hash: Digest::hash(chainspec_file_bytes), + genesis_accounts_raw_hash: None, + global_state_raw_hash: global_state_file_bytes.map(Digest::hash), + } + } + + /// Returns the hash of the raw bytes of the chainspec.toml file. + pub fn chainspec_raw_hash(&self) -> &Digest { + &self.chainspec_raw_hash + } + + /// Returns the hash of the raw bytes of the genesis accounts.toml file if it exists. + pub fn genesis_accounts_raw_hash(&self) -> Option<&Digest> { + self.genesis_accounts_raw_hash.as_ref() + } + + /// Returns the hash of the raw bytes of the global_state.toml file if it exists. + pub fn global_state_raw_hash(&self) -> Option<&Digest> { + self.global_state_raw_hash.as_ref() + } + + fn as_map(&self) -> BytesreprChainspecRegistry { + let mut map = BTreeMap::new(); + map.insert( + Self::CHAINSPEC_RAW_MAP_KEY.to_string(), + self.chainspec_raw_hash, + ); + if let Some(genesis_accounts_raw_hash) = self.genesis_accounts_raw_hash { + map.insert( + Self::GENESIS_ACCOUNTS_RAW_MAP_KEY.to_string(), + genesis_accounts_raw_hash, + ); + } + if let Some(global_state_raw_hash) = self.global_state_raw_hash { + map.insert( + Self::GLOBAL_STATE_RAW_MAP_KEY.to_string(), + global_state_raw_hash, + ); + } + map + } +} + +impl TryFrom for ChainspecRegistry { + type Error = bytesrepr::Error; + + fn try_from(map: BytesreprChainspecRegistry) -> Result { + let chainspec_raw_hash = *map + .get(Self::CHAINSPEC_RAW_MAP_KEY) + .ok_or(bytesrepr::Error::Formatting)?; + let genesis_accounts_raw_hash = map.get(Self::GENESIS_ACCOUNTS_RAW_MAP_KEY).copied(); + let global_state_raw_hash = map.get(Self::GLOBAL_STATE_RAW_MAP_KEY).copied(); + Ok(ChainspecRegistry { + chainspec_raw_hash, + genesis_accounts_raw_hash, + global_state_raw_hash, + }) + } +} + +impl ToBytes for ChainspecRegistry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.as_map().to_bytes() + } + + fn serialized_length(&self) -> usize { + self.as_map().serialized_length() + } +} + +impl FromBytes for ChainspecRegistry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (map, remainder) = BytesreprChainspecRegistry::from_bytes(bytes)?; + let chainspec_registry = ChainspecRegistry::try_from(map)?; + Ok((chainspec_registry, remainder)) + } +} + +impl CLTyped for ChainspecRegistry { + fn cl_type() -> CLType { + BytesreprChainspecRegistry::cl_type() + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ChainspecRegistry { + ChainspecRegistry { + chainspec_raw_hash: rng.gen(), + genesis_accounts_raw_hash: rng.gen(), + global_state_raw_hash: rng.gen(), + } + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = rand::thread_rng(); + + let chainspec_file_bytes: [u8; 10] = rng.gen(); + + let genesis_account_file_bytes: [u8; 10] = rng.gen(); + let chainspec_registry = + ChainspecRegistry::new_with_genesis(&chainspec_file_bytes, &genesis_account_file_bytes); + bytesrepr::test_serialization_roundtrip(&chainspec_registry); + + let global_state_file_bytes: [u8; 10] = rng.gen(); + let chainspec_registry = ChainspecRegistry::new_with_optional_global_state( + &chainspec_file_bytes, + Some(&global_state_file_bytes), + ); + bytesrepr::test_serialization_roundtrip(&chainspec_registry); + + let chainspec_registry = + ChainspecRegistry::new_with_optional_global_state(&chainspec_file_bytes, None); + bytesrepr::test_serialization_roundtrip(&chainspec_registry); + } +} diff --git a/types/src/chainspec/vm_config/handle_payment_costs.rs b/types/src/chainspec/vm_config/handle_payment_costs.rs new file mode 100644 index 0000000000..f12edc1013 --- /dev/null +++ b/types/src/chainspec/vm_config/handle_payment_costs.rs @@ -0,0 +1,121 @@ +//! Costs of the `handle_payment` system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `get_payment_purse` `handle_payment` entry point. +pub const DEFAULT_GET_PAYMENT_PURSE_COST: u32 = 10_000; +/// Default cost of the `set_refund_purse` `handle_payment` entry point. +pub const DEFAULT_SET_REFUND_PURSE_COST: u32 = 10_000; +/// Default cost of the `get_refund_purse` `handle_payment` entry point. +pub const DEFAULT_GET_REFUND_PURSE_COST: u32 = 10_000; +/// Default cost of the `finalize_payment` `handle_payment` entry point. +pub const DEFAULT_FINALIZE_PAYMENT_COST: u32 = 2_500_000_000; + +/// Description of the costs of calling `handle_payment` entrypoints. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HandlePaymentCosts { + /// Cost of calling the `get_payment_purse` entry point. + pub get_payment_purse: u32, + /// Cost of calling the `set_refund_purse` entry point. + pub set_refund_purse: u32, + /// Cost of calling the `get_refund_purse` entry point. + pub get_refund_purse: u32, + /// Cost of calling the `finalize_payment` entry point. + pub finalize_payment: u32, +} + +impl Default for HandlePaymentCosts { + fn default() -> Self { + Self { + get_payment_purse: DEFAULT_GET_PAYMENT_PURSE_COST, + set_refund_purse: DEFAULT_SET_REFUND_PURSE_COST, + get_refund_purse: DEFAULT_GET_REFUND_PURSE_COST, + finalize_payment: DEFAULT_FINALIZE_PAYMENT_COST, + } + } +} + +impl ToBytes for HandlePaymentCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.get_payment_purse.to_bytes()?); + ret.append(&mut self.set_refund_purse.to_bytes()?); + ret.append(&mut self.get_refund_purse.to_bytes()?); + ret.append(&mut self.finalize_payment.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.get_payment_purse.serialized_length() + + self.set_refund_purse.serialized_length() + + self.get_refund_purse.serialized_length() + + self.finalize_payment.serialized_length() + } +} + +impl FromBytes for HandlePaymentCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (get_payment_purse, rem) = FromBytes::from_bytes(bytes)?; + let (set_refund_purse, rem) = FromBytes::from_bytes(rem)?; + let (get_refund_purse, rem) = FromBytes::from_bytes(rem)?; + let (finalize_payment, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + Self { + get_payment_purse, + set_refund_purse, + get_refund_purse, + finalize_payment, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HandlePaymentCosts { + HandlePaymentCosts { + get_payment_purse: rng.gen(), + set_refund_purse: rng.gen(), + get_refund_purse: rng.gen(), + finalize_payment: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::HandlePaymentCosts; + + prop_compose! { + pub fn handle_payment_costs_arb()( + get_payment_purse in num::u32::ANY, + set_refund_purse in num::u32::ANY, + get_refund_purse in num::u32::ANY, + finalize_payment in num::u32::ANY, + ) -> HandlePaymentCosts { + HandlePaymentCosts { + get_payment_purse, + set_refund_purse, + get_refund_purse, + finalize_payment, + } + } + } +} diff --git a/types/src/chainspec/vm_config/host_function_costs.rs b/types/src/chainspec/vm_config/host_function_costs.rs new file mode 100644 index 0000000000..480bca3901 --- /dev/null +++ b/types/src/chainspec/vm_config/host_function_costs.rs @@ -0,0 +1,1310 @@ +//! Support for host function gas cost tables. +use core::ops::Add; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Add; +use num_traits::Zero; +use rand::{distributions::Standard, prelude::Distribution, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + Gas, +}; + +/// Representation of argument's cost. +pub type Cost = u32; + +const COST_SERIALIZED_LENGTH: usize = U32_SERIALIZED_LENGTH; + +/// An identifier that represents an unused argument. +const NOT_USED: Cost = 0; + +/// An arbitrary default fixed cost for host functions that were not researched yet. +const DEFAULT_FIXED_COST: Cost = 200; + +const DEFAULT_ADD_COST: u32 = 5_800; +const DEFAULT_ADD_ASSOCIATED_KEY_COST: u32 = 1_200_000; + +const DEFAULT_CALL_CONTRACT_COST: u32 = 300_000_000; + +const DEFAULT_CREATE_PURSE_COST: u32 = 2_500_000_000; +const DEFAULT_GET_BALANCE_COST: u32 = 3_000_000; +const DEFAULT_GET_BLOCKTIME_COST: u32 = 330; +const DEFAULT_GET_CALLER_COST: u32 = 380; +const DEFAULT_GET_KEY_COST: u32 = 2_000; +const DEFAULT_GET_KEY_NAME_SIZE_WEIGHT: u32 = 440; +const DEFAULT_GET_MAIN_PURSE_COST: u32 = 1_300; +const DEFAULT_GET_PHASE_COST: u32 = 710; +const DEFAULT_GET_SYSTEM_CONTRACT_COST: u32 = 1_100; +const DEFAULT_HAS_KEY_COST: u32 = 1_500; +const DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT: u32 = 840; +const DEFAULT_IS_VALID_UREF_COST: u32 = 760; +const DEFAULT_LOAD_NAMED_KEYS_COST: u32 = 42_000; +const DEFAULT_NEW_UREF_COST: u32 = 17_000; +const DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT: u32 = 590; + +const DEFAULT_PRINT_COST: u32 = 20_000; +const DEFAULT_PRINT_TEXT_SIZE_WEIGHT: u32 = 4_600; + +const DEFAULT_PUT_KEY_COST: u32 = 100_000_000; +const DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT: u32 = 120_000; + +const DEFAULT_READ_HOST_BUFFER_COST: u32 = 3_500; +const DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT: u32 = 310; + +const DEFAULT_READ_VALUE_COST: u32 = 60_000; +const DEFAULT_DICTIONARY_GET_COST: u32 = 5_500; +const DEFAULT_DICTIONARY_GET_KEY_SIZE_WEIGHT: u32 = 590; + +const DEFAULT_REMOVE_ASSOCIATED_KEY_COST: u32 = 4_200; + +const DEFAULT_REMOVE_KEY_COST: u32 = 61_000; +const DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT: u32 = 3_200; + +const DEFAULT_RET_COST: u32 = 23_000; +const DEFAULT_RET_VALUE_SIZE_WEIGHT: u32 = 420_000; + +const DEFAULT_REVERT_COST: u32 = 500; +const DEFAULT_SET_ACTION_THRESHOLD_COST: u32 = 74_000; +const DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST: u32 = 2_500_000_000; +const DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST: u32 = 82_000_000; +const DEFAULT_TRANSFER_TO_ACCOUNT_COST: u32 = 2_500_000_000; +const DEFAULT_UPDATE_ASSOCIATED_KEY_COST: u32 = 4_200; + +const DEFAULT_WRITE_COST: u32 = 14_000; +const DEFAULT_WRITE_VALUE_SIZE_WEIGHT: u32 = 980; + +const DEFAULT_ARG_CHARGE: u32 = 120_000; + +const DEFAULT_DICTIONARY_PUT_COST: u32 = 9_500; +const DEFAULT_DICTIONARY_PUT_KEY_BYTES_SIZE_WEIGHT: u32 = 1_800; +const DEFAULT_DICTIONARY_PUT_VALUE_SIZE_WEIGHT: u32 = 520; + +/// Default cost for a new dictionary. +pub const DEFAULT_NEW_DICTIONARY_COST: u32 = DEFAULT_NEW_UREF_COST; + +/// Host function cost unit for a new dictionary. +#[allow(unused)] +pub const DEFAULT_HOST_FUNCTION_NEW_DICTIONARY: HostFunction<[Cost; 1]> = + HostFunction::new(DEFAULT_NEW_DICTIONARY_COST, [NOT_USED]); +const DEFAULT_BLAKE2B_COST: u32 = 1_200_000; + +/// Default value that the cost of calling `casper_emit_message` increases by for every new message +/// emitted within an execution. +pub const DEFAULT_COST_INCREASE_PER_MESSAGE_EMITTED: u32 = 50; + +const DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT: u32 = 30_000; +const DEFAULT_MESSAGE_PAYLOAD_SIZE_WEIGHT: u32 = 120_000; + +const DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT: u32 = 120_000; + +const DEFAULT_GENERIC_HASH_COST: u32 = 1_200_000; + +const DEFAULT_GENERIC_HASH_INPUT_COST: u32 = 120_000; + +const DEFAULT_RECOVER_SECP256K1_COST: u32 = 1_300_000; +const DEFAULT_RECOVER_SECP256K1_SIZE_WEIGHT: u32 = 120_000; + +const DEFAULT_VERIFY_SIGNATURE_COST: u32 = 1_300_000; +const DEFAULT_VERIFY_SIGNATURE_SIZE_WEIGHT: u32 = 120_000; + +/// Representation of a host function cost. +/// +/// The total gas cost is equal to `cost` + sum of each argument weight multiplied by the byte size +/// of the data. +#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HostFunction { + /// How much the user is charged for calling the host function. + cost: Cost, + /// Weights of the function arguments. + arguments: T, +} + +impl Default for HostFunction +where + T: Default, +{ + fn default() -> Self { + HostFunction::new(DEFAULT_FIXED_COST, Default::default()) + } +} + +impl HostFunction { + /// Creates a new instance of `HostFunction` with a fixed call cost and argument weights. + pub const fn new(cost: Cost, arguments: T) -> Self { + Self { cost, arguments } + } + + pub fn with_new_static_cost(self, cost: Cost) -> Self { + Self { + cost, + arguments: self.arguments, + } + } + + /// Returns the base gas fee for calling the host function. + pub fn cost(&self) -> Cost { + self.cost + } +} + +impl HostFunction +where + T: Default, +{ + /// Creates a new fixed host function cost with argument weights of zero. + pub fn fixed(cost: Cost) -> Self { + Self { + cost, + ..Default::default() + } + } +} + +impl HostFunction +where + T: AsRef<[Cost]>, +{ + /// Returns a slice containing the argument weights. + pub fn arguments(&self) -> &[Cost] { + self.arguments.as_ref() + } + + /// Calculate gas cost for a host function + pub fn calculate_gas_cost(&self, weights: T) -> Option { + let mut gas = Gas::new(self.cost); + for (argument, weight) in self.arguments.as_ref().iter().zip(weights.as_ref()) { + let lhs = Gas::new(*argument); + let rhs = Gas::new(*weight); + let product = lhs.checked_mul(rhs)?; + gas = gas.checked_add(product)?; + } + Some(gas) + } +} + +impl Add for HostFunction<[Cost; COUNT]> { + type Output = HostFunction<[Cost; COUNT]>; + + fn add(self, rhs: Self) -> Self::Output { + let mut result = HostFunction::new(self.cost + rhs.cost, [0; COUNT]); + for i in 0..COUNT { + result.arguments[i] = self.arguments[i] + rhs.arguments[i]; + } + result + } +} + +impl Zero for HostFunction<[Cost; COUNT]> { + fn zero() -> Self { + HostFunction::new(0, [0; COUNT]) + } + + fn is_zero(&self) -> bool { + !self.arguments.iter().any(|cost| *cost != 0) && self.cost.is_zero() + } +} + +impl Distribution> for Standard +where + Standard: Distribution, + T: AsRef<[Cost]>, +{ + fn sample(&self, rng: &mut R) -> HostFunction { + let cost = rng.gen::(); + let arguments = rng.gen(); + HostFunction::new(cost, arguments) + } +} + +impl ToBytes for HostFunction +where + T: AsRef<[Cost]>, +{ + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.cost.to_bytes()?); + for value in self.arguments.as_ref().iter() { + ret.append(&mut value.to_bytes()?); + } + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.cost.serialized_length() + (COST_SERIALIZED_LENGTH * self.arguments.as_ref().len()) + } +} + +impl FromBytes for HostFunction +where + T: Default + AsMut<[Cost]>, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (cost, mut bytes) = FromBytes::from_bytes(bytes)?; + let mut arguments = T::default(); + let arguments_mut = arguments.as_mut(); + for ith_argument in arguments_mut { + let (cost, rem) = FromBytes::from_bytes(bytes)?; + *ith_argument = cost; + bytes = rem; + } + Ok((Self { cost, arguments }, bytes)) + } +} + +/// Definition of a host function cost table. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HostFunctionCostsV1 { + /// Cost increase for successive calls to `casper_emit_message` within an execution. + pub cost_increase_per_message: u32, + /// Cost of calling the `read_value` host function. + pub read_value: HostFunction<[Cost; 3]>, + /// Cost of calling the `dictionary_get` host function. + pub dictionary_get: HostFunction<[Cost; 3]>, + /// Cost of calling the `write` host function. + pub write: HostFunction<[Cost; 4]>, + /// Cost of calling the `dictionary_put` host function. + pub dictionary_put: HostFunction<[Cost; 4]>, + /// Cost of calling the `add` host function. + pub add: HostFunction<[Cost; 4]>, + /// Cost of calling the `new_uref` host function. + pub new_uref: HostFunction<[Cost; 3]>, + /// Cost of calling the `load_named_keys` host function. + pub load_named_keys: HostFunction<[Cost; 2]>, + /// Cost of calling the `ret` host function. + pub ret: HostFunction<[Cost; 2]>, + /// Cost of calling the `get_key` host function. + pub get_key: HostFunction<[Cost; 5]>, + /// Cost of calling the `has_key` host function. + pub has_key: HostFunction<[Cost; 2]>, + /// Cost of calling the `put_key` host function. + pub put_key: HostFunction<[Cost; 4]>, + /// Cost of calling the `remove_key` host function. + pub remove_key: HostFunction<[Cost; 2]>, + /// Cost of calling the `revert` host function. + pub revert: HostFunction<[Cost; 1]>, + /// Cost of calling the `is_valid_uref` host function. + pub is_valid_uref: HostFunction<[Cost; 2]>, + /// Cost of calling the `add_associated_key` host function. + pub add_associated_key: HostFunction<[Cost; 3]>, + /// Cost of calling the `remove_associated_key` host function. + pub remove_associated_key: HostFunction<[Cost; 2]>, + /// Cost of calling the `update_associated_key` host function. + pub update_associated_key: HostFunction<[Cost; 3]>, + /// Cost of calling the `set_action_threshold` host function. + pub set_action_threshold: HostFunction<[Cost; 2]>, + /// Cost of calling the `get_caller` host function. + pub get_caller: HostFunction<[Cost; 1]>, + /// Cost of calling the `get_blocktime` host function. + pub get_blocktime: HostFunction<[Cost; 1]>, + /// Cost of calling the `create_purse` host function. + pub create_purse: HostFunction<[Cost; 2]>, + /// Cost of calling the `transfer_to_account` host function. + pub transfer_to_account: HostFunction<[Cost; 7]>, + /// Cost of calling the `transfer_from_purse_to_account` host function. + pub transfer_from_purse_to_account: HostFunction<[Cost; 9]>, + /// Cost of calling the `transfer_from_purse_to_purse` host function. + pub transfer_from_purse_to_purse: HostFunction<[Cost; 8]>, + /// Cost of calling the `get_balance` host function. + pub get_balance: HostFunction<[Cost; 3]>, + /// Cost of calling the `get_phase` host function. + pub get_phase: HostFunction<[Cost; 1]>, + /// Cost of calling the `get_system_contract` host function. + pub get_system_contract: HostFunction<[Cost; 3]>, + /// Cost of calling the `get_main_purse` host function. + pub get_main_purse: HostFunction<[Cost; 1]>, + /// Cost of calling the `read_host_buffer` host function. + pub read_host_buffer: HostFunction<[Cost; 3]>, + /// Cost of calling the `create_contract_package_at_hash` host function. + pub create_contract_package_at_hash: HostFunction<[Cost; 2]>, + /// Cost of calling the `create_contract_user_group` host function. + pub create_contract_user_group: HostFunction<[Cost; 8]>, + /// Cost of calling the `add_contract_version` host function. + pub add_contract_version: HostFunction<[Cost; 10]>, + /// Cost of calling the `add_contract_version_with_message_topics` host function. + pub add_contract_version_with_message_topics: HostFunction<[Cost; 11]>, + /// Cost of calling the `add_package_version` host function. + pub add_package_version_with_message_topics: HostFunction<[Cost; 11]>, + /// Cost of calling the `disable_contract_version` host function. + pub disable_contract_version: HostFunction<[Cost; 4]>, + /// Cost of calling the `call_contract` host function. + pub call_contract: HostFunction<[Cost; 7]>, + /// Cost of calling the `call_versioned_contract` host function. + pub call_versioned_contract: HostFunction<[Cost; 9]>, + /// Cost of calling the `get_named_arg_size` host function. + pub get_named_arg_size: HostFunction<[Cost; 3]>, + /// Cost of calling the `get_named_arg` host function. + pub get_named_arg: HostFunction<[Cost; 4]>, + /// Cost of calling the `remove_contract_user_group` host function. + pub remove_contract_user_group: HostFunction<[Cost; 4]>, + /// Cost of calling the `provision_contract_user_group_uref` host function. + pub provision_contract_user_group_uref: HostFunction<[Cost; 5]>, + /// Cost of calling the `remove_contract_user_group_urefs` host function. + pub remove_contract_user_group_urefs: HostFunction<[Cost; 6]>, + /// Cost of calling the `print` host function. + pub print: HostFunction<[Cost; 2]>, + /// Cost of calling the `blake2b` host function. + pub blake2b: HostFunction<[Cost; 4]>, + /// Cost of calling the `next address` host function. + pub random_bytes: HostFunction<[Cost; 2]>, + /// Cost of calling the `enable_contract_version` host function. + pub enable_contract_version: HostFunction<[Cost; 4]>, + /// Cost of calling the `casper_manage_message_topic` host function. + pub manage_message_topic: HostFunction<[Cost; 4]>, + /// Cost of calling the `casper_emit_message` host function. + pub emit_message: HostFunction<[Cost; 4]>, + /// Cost of calling the `get_block_info` host function. + pub get_block_info: HostFunction<[Cost; 2]>, + /// Cost of calling the `generic_hash` host function. + pub generic_hash: HostFunction<[Cost; 5]>, + /// Cost of calling the 'recover_secp256k1' host function. + pub recover_secp256k1: HostFunction<[Cost; 6]>, + /// Cost of calling the 'recover_secp256k1' host function. + pub verify_signature: HostFunction<[Cost; 6]>, + /// Cost of calling the 'call_package_version' host function. + pub call_package_version: HostFunction<[Cost; 11]>, +} + +impl Zero for HostFunctionCostsV1 { + fn zero() -> Self { + Self { + read_value: HostFunction::zero(), + dictionary_get: HostFunction::zero(), + write: HostFunction::zero(), + dictionary_put: HostFunction::zero(), + add: HostFunction::zero(), + new_uref: HostFunction::zero(), + load_named_keys: HostFunction::zero(), + ret: HostFunction::zero(), + get_key: HostFunction::zero(), + has_key: HostFunction::zero(), + put_key: HostFunction::zero(), + remove_key: HostFunction::zero(), + revert: HostFunction::zero(), + is_valid_uref: HostFunction::zero(), + add_associated_key: HostFunction::zero(), + remove_associated_key: HostFunction::zero(), + update_associated_key: HostFunction::zero(), + set_action_threshold: HostFunction::zero(), + get_caller: HostFunction::zero(), + get_blocktime: HostFunction::zero(), + create_purse: HostFunction::zero(), + transfer_to_account: HostFunction::zero(), + transfer_from_purse_to_account: HostFunction::zero(), + transfer_from_purse_to_purse: HostFunction::zero(), + get_balance: HostFunction::zero(), + get_phase: HostFunction::zero(), + get_system_contract: HostFunction::zero(), + get_main_purse: HostFunction::zero(), + read_host_buffer: HostFunction::zero(), + create_contract_package_at_hash: HostFunction::zero(), + create_contract_user_group: HostFunction::zero(), + add_contract_version_with_message_topics: HostFunction::zero(), + add_contract_version: HostFunction::zero(), + add_package_version_with_message_topics: HostFunction::zero(), + disable_contract_version: HostFunction::zero(), + call_contract: HostFunction::zero(), + call_versioned_contract: HostFunction::zero(), + get_named_arg_size: HostFunction::zero(), + get_named_arg: HostFunction::zero(), + remove_contract_user_group: HostFunction::zero(), + provision_contract_user_group_uref: HostFunction::zero(), + remove_contract_user_group_urefs: HostFunction::zero(), + print: HostFunction::zero(), + blake2b: HostFunction::zero(), + random_bytes: HostFunction::zero(), + enable_contract_version: HostFunction::zero(), + manage_message_topic: HostFunction::zero(), + emit_message: HostFunction::zero(), + cost_increase_per_message: Zero::zero(), + get_block_info: HostFunction::zero(), + generic_hash: HostFunction::zero(), + recover_secp256k1: HostFunction::zero(), + verify_signature: HostFunction::zero(), + call_package_version: HostFunction::zero(), + } + } + + fn is_zero(&self) -> bool { + let HostFunctionCostsV1 { + cost_increase_per_message, + read_value, + dictionary_get, + write, + dictionary_put, + add, + new_uref, + load_named_keys, + ret, + get_key, + has_key, + put_key, + remove_key, + revert, + is_valid_uref, + add_associated_key, + remove_associated_key, + update_associated_key, + set_action_threshold, + get_caller, + get_blocktime, + create_purse, + transfer_to_account, + transfer_from_purse_to_account, + transfer_from_purse_to_purse, + get_balance, + get_phase, + get_system_contract, + get_main_purse, + read_host_buffer, + create_contract_package_at_hash, + create_contract_user_group, + add_contract_version_with_message_topics, + add_contract_version, + add_package_version_with_message_topics: add_package_version, + disable_contract_version, + call_contract, + call_versioned_contract, + get_named_arg_size, + get_named_arg, + remove_contract_user_group, + provision_contract_user_group_uref, + remove_contract_user_group_urefs, + print, + blake2b, + random_bytes, + enable_contract_version, + manage_message_topic, + emit_message, + get_block_info, + generic_hash, + recover_secp256k1, + verify_signature, + call_package_version, + } = self; + read_value.is_zero() + && dictionary_get.is_zero() + && write.is_zero() + && dictionary_put.is_zero() + && add.is_zero() + && new_uref.is_zero() + && load_named_keys.is_zero() + && ret.is_zero() + && get_key.is_zero() + && has_key.is_zero() + && put_key.is_zero() + && remove_key.is_zero() + && revert.is_zero() + && is_valid_uref.is_zero() + && add_associated_key.is_zero() + && remove_associated_key.is_zero() + && update_associated_key.is_zero() + && set_action_threshold.is_zero() + && get_caller.is_zero() + && get_blocktime.is_zero() + && create_purse.is_zero() + && transfer_to_account.is_zero() + && transfer_from_purse_to_account.is_zero() + && transfer_from_purse_to_purse.is_zero() + && get_balance.is_zero() + && get_phase.is_zero() + && get_system_contract.is_zero() + && get_main_purse.is_zero() + && read_host_buffer.is_zero() + && create_contract_package_at_hash.is_zero() + && create_contract_user_group.is_zero() + && add_contract_version.is_zero() + && disable_contract_version.is_zero() + && call_contract.is_zero() + && call_versioned_contract.is_zero() + && get_named_arg_size.is_zero() + && get_named_arg.is_zero() + && remove_contract_user_group.is_zero() + && provision_contract_user_group_uref.is_zero() + && remove_contract_user_group_urefs.is_zero() + && print.is_zero() + && blake2b.is_zero() + && random_bytes.is_zero() + && enable_contract_version.is_zero() + && manage_message_topic.is_zero() + && emit_message.is_zero() + && cost_increase_per_message.is_zero() + && add_package_version.is_zero() + && get_block_info.is_zero() + && add_contract_version_with_message_topics.is_zero() + && generic_hash.is_zero() + && recover_secp256k1.is_zero() + && verify_signature.is_zero() + && call_package_version.is_zero() + } +} + +impl Default for HostFunctionCostsV1 { + fn default() -> Self { + Self { + read_value: HostFunction::new( + DEFAULT_READ_VALUE_COST, + [NOT_USED, DEFAULT_ARG_CHARGE, NOT_USED], + ), + dictionary_get: HostFunction::new( + DEFAULT_DICTIONARY_GET_COST, + [NOT_USED, DEFAULT_DICTIONARY_GET_KEY_SIZE_WEIGHT, NOT_USED], + ), + write: HostFunction::new( + DEFAULT_WRITE_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_WRITE_VALUE_SIZE_WEIGHT, + ], + ), + dictionary_put: HostFunction::new( + DEFAULT_DICTIONARY_PUT_COST, + [ + NOT_USED, + DEFAULT_DICTIONARY_PUT_KEY_BYTES_SIZE_WEIGHT, + NOT_USED, + DEFAULT_DICTIONARY_PUT_VALUE_SIZE_WEIGHT, + ], + ), + add: HostFunction::fixed(DEFAULT_ADD_COST), + new_uref: HostFunction::new( + DEFAULT_NEW_UREF_COST, + [NOT_USED, NOT_USED, DEFAULT_NEW_UREF_VALUE_SIZE_WEIGHT], + ), + load_named_keys: HostFunction::fixed(DEFAULT_LOAD_NAMED_KEYS_COST), + ret: HostFunction::new(DEFAULT_RET_COST, [NOT_USED, DEFAULT_RET_VALUE_SIZE_WEIGHT]), + get_key: HostFunction::new( + DEFAULT_GET_KEY_COST, + [ + NOT_USED, + DEFAULT_GET_KEY_NAME_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + has_key: HostFunction::new( + DEFAULT_HAS_KEY_COST, + [NOT_USED, DEFAULT_HAS_KEY_NAME_SIZE_WEIGHT], + ), + put_key: HostFunction::new( + DEFAULT_PUT_KEY_COST, + [ + NOT_USED, + DEFAULT_PUT_KEY_NAME_SIZE_WEIGHT, + NOT_USED, + DEFAULT_ARG_CHARGE, + ], + ), + remove_key: HostFunction::new( + DEFAULT_REMOVE_KEY_COST, + [NOT_USED, DEFAULT_REMOVE_KEY_NAME_SIZE_WEIGHT], + ), + revert: HostFunction::fixed(DEFAULT_REVERT_COST), + is_valid_uref: HostFunction::fixed(DEFAULT_IS_VALID_UREF_COST), + add_associated_key: HostFunction::fixed(DEFAULT_ADD_ASSOCIATED_KEY_COST), + remove_associated_key: HostFunction::fixed(DEFAULT_REMOVE_ASSOCIATED_KEY_COST), + update_associated_key: HostFunction::fixed(DEFAULT_UPDATE_ASSOCIATED_KEY_COST), + set_action_threshold: HostFunction::fixed(DEFAULT_SET_ACTION_THRESHOLD_COST), + get_caller: HostFunction::fixed(DEFAULT_GET_CALLER_COST), + get_blocktime: HostFunction::fixed(DEFAULT_GET_BLOCKTIME_COST), + create_purse: HostFunction::fixed(DEFAULT_CREATE_PURSE_COST), + transfer_to_account: HostFunction::fixed(DEFAULT_TRANSFER_TO_ACCOUNT_COST), + transfer_from_purse_to_account: HostFunction::fixed( + DEFAULT_TRANSFER_FROM_PURSE_TO_ACCOUNT_COST, + ), + transfer_from_purse_to_purse: HostFunction::fixed( + DEFAULT_TRANSFER_FROM_PURSE_TO_PURSE_COST, + ), + get_balance: HostFunction::fixed(DEFAULT_GET_BALANCE_COST), + get_phase: HostFunction::fixed(DEFAULT_GET_PHASE_COST), + get_system_contract: HostFunction::fixed(DEFAULT_GET_SYSTEM_CONTRACT_COST), + get_main_purse: HostFunction::fixed(DEFAULT_GET_MAIN_PURSE_COST), + read_host_buffer: HostFunction::new( + DEFAULT_READ_HOST_BUFFER_COST, + [ + NOT_USED, + DEFAULT_READ_HOST_BUFFER_DEST_SIZE_WEIGHT, + NOT_USED, + ], + ), + create_contract_package_at_hash: HostFunction::default(), + create_contract_user_group: HostFunction::default(), + add_package_version_with_message_topics: HostFunction::new( + DEFAULT_FIXED_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + ], + ), + add_contract_version: HostFunction::new( + DEFAULT_FIXED_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + add_contract_version_with_message_topics: HostFunction::new( + DEFAULT_FIXED_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_CONTRACT_VERSION_ARG_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + ], + ), + disable_contract_version: HostFunction::default(), + call_contract: HostFunction::new( + DEFAULT_CALL_CONTRACT_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_ARG_CHARGE, + NOT_USED, + DEFAULT_ARG_CHARGE, + NOT_USED, + ], + ), + call_versioned_contract: HostFunction::new( + DEFAULT_CALL_CONTRACT_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_ARG_CHARGE, + NOT_USED, + DEFAULT_ARG_CHARGE, + NOT_USED, + ], + ), + get_named_arg_size: HostFunction::default(), + get_named_arg: HostFunction::new( + 200, + [NOT_USED, DEFAULT_ARG_CHARGE, NOT_USED, DEFAULT_ARG_CHARGE], + ), + remove_contract_user_group: HostFunction::default(), + provision_contract_user_group_uref: HostFunction::default(), + remove_contract_user_group_urefs: HostFunction::new( + 200, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_ARG_CHARGE, + ], + ), + print: HostFunction::new( + DEFAULT_PRINT_COST, + [NOT_USED, DEFAULT_PRINT_TEXT_SIZE_WEIGHT], + ), + blake2b: HostFunction::new( + DEFAULT_BLAKE2B_COST, + [NOT_USED, DEFAULT_ARG_CHARGE, NOT_USED, NOT_USED], + ), + random_bytes: HostFunction::default(), + enable_contract_version: HostFunction::default(), + manage_message_topic: HostFunction::new( + DEFAULT_FIXED_COST, + [ + NOT_USED, + DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + ], + ), + emit_message: HostFunction::new( + DEFAULT_FIXED_COST, + [ + NOT_USED, + DEFAULT_MESSAGE_TOPIC_NAME_SIZE_WEIGHT, + NOT_USED, + DEFAULT_MESSAGE_PAYLOAD_SIZE_WEIGHT, + ], + ), + generic_hash: HostFunction::new( + DEFAULT_GENERIC_HASH_COST, + [ + NOT_USED, + DEFAULT_GENERIC_HASH_INPUT_COST, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + cost_increase_per_message: DEFAULT_COST_INCREASE_PER_MESSAGE_EMITTED, + get_block_info: HostFunction::new(DEFAULT_GET_BLOCKTIME_COST, [NOT_USED, NOT_USED]), + recover_secp256k1: HostFunction::new( + DEFAULT_RECOVER_SECP256K1_COST, + [ + NOT_USED, + DEFAULT_RECOVER_SECP256K1_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + verify_signature: HostFunction::new( + DEFAULT_VERIFY_SIGNATURE_COST, + [ + NOT_USED, + DEFAULT_VERIFY_SIGNATURE_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + call_package_version: HostFunction::new( + DEFAULT_CALL_CONTRACT_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_ARG_CHARGE, + NOT_USED, + DEFAULT_ARG_CHARGE, + NOT_USED, + ], + ), + } + } +} + +impl ToBytes for HostFunctionCostsV1 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.read_value.to_bytes()?); + ret.append(&mut self.dictionary_get.to_bytes()?); + ret.append(&mut self.write.to_bytes()?); + ret.append(&mut self.dictionary_put.to_bytes()?); + ret.append(&mut self.add.to_bytes()?); + ret.append(&mut self.new_uref.to_bytes()?); + ret.append(&mut self.load_named_keys.to_bytes()?); + ret.append(&mut self.ret.to_bytes()?); + ret.append(&mut self.get_key.to_bytes()?); + ret.append(&mut self.has_key.to_bytes()?); + ret.append(&mut self.put_key.to_bytes()?); + ret.append(&mut self.remove_key.to_bytes()?); + ret.append(&mut self.revert.to_bytes()?); + ret.append(&mut self.is_valid_uref.to_bytes()?); + ret.append(&mut self.add_associated_key.to_bytes()?); + ret.append(&mut self.remove_associated_key.to_bytes()?); + ret.append(&mut self.update_associated_key.to_bytes()?); + ret.append(&mut self.set_action_threshold.to_bytes()?); + ret.append(&mut self.get_caller.to_bytes()?); + ret.append(&mut self.get_blocktime.to_bytes()?); + ret.append(&mut self.create_purse.to_bytes()?); + ret.append(&mut self.transfer_to_account.to_bytes()?); + ret.append(&mut self.transfer_from_purse_to_account.to_bytes()?); + ret.append(&mut self.transfer_from_purse_to_purse.to_bytes()?); + ret.append(&mut self.get_balance.to_bytes()?); + ret.append(&mut self.get_phase.to_bytes()?); + ret.append(&mut self.get_system_contract.to_bytes()?); + ret.append(&mut self.get_main_purse.to_bytes()?); + ret.append(&mut self.read_host_buffer.to_bytes()?); + ret.append(&mut self.create_contract_package_at_hash.to_bytes()?); + ret.append(&mut self.create_contract_user_group.to_bytes()?); + ret.append(&mut self.add_contract_version_with_message_topics.to_bytes()?); + ret.append(&mut self.add_contract_version.to_bytes()?); + ret.append(&mut self.add_package_version_with_message_topics.to_bytes()?); + ret.append(&mut self.disable_contract_version.to_bytes()?); + ret.append(&mut self.call_contract.to_bytes()?); + ret.append(&mut self.call_versioned_contract.to_bytes()?); + ret.append(&mut self.get_named_arg_size.to_bytes()?); + ret.append(&mut self.get_named_arg.to_bytes()?); + ret.append(&mut self.remove_contract_user_group.to_bytes()?); + ret.append(&mut self.provision_contract_user_group_uref.to_bytes()?); + ret.append(&mut self.remove_contract_user_group_urefs.to_bytes()?); + ret.append(&mut self.print.to_bytes()?); + ret.append(&mut self.blake2b.to_bytes()?); + ret.append(&mut self.random_bytes.to_bytes()?); + ret.append(&mut self.enable_contract_version.to_bytes()?); + ret.append(&mut self.manage_message_topic.to_bytes()?); + ret.append(&mut self.emit_message.to_bytes()?); + ret.append(&mut self.cost_increase_per_message.to_bytes()?); + ret.append(&mut self.get_block_info.to_bytes()?); + ret.append(&mut self.generic_hash.to_bytes()?); + ret.append(&mut self.recover_secp256k1.to_bytes()?); + ret.append(&mut self.verify_signature.to_bytes()?); + ret.append(&mut self.call_package_version.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.read_value.serialized_length() + + self.dictionary_get.serialized_length() + + self.write.serialized_length() + + self.dictionary_put.serialized_length() + + self.add.serialized_length() + + self.new_uref.serialized_length() + + self.load_named_keys.serialized_length() + + self.ret.serialized_length() + + self.get_key.serialized_length() + + self.has_key.serialized_length() + + self.put_key.serialized_length() + + self.remove_key.serialized_length() + + self.revert.serialized_length() + + self.is_valid_uref.serialized_length() + + self.add_associated_key.serialized_length() + + self.remove_associated_key.serialized_length() + + self.update_associated_key.serialized_length() + + self.set_action_threshold.serialized_length() + + self.get_caller.serialized_length() + + self.get_blocktime.serialized_length() + + self.create_purse.serialized_length() + + self.transfer_to_account.serialized_length() + + self.transfer_from_purse_to_account.serialized_length() + + self.transfer_from_purse_to_purse.serialized_length() + + self.get_balance.serialized_length() + + self.get_phase.serialized_length() + + self.get_system_contract.serialized_length() + + self.get_main_purse.serialized_length() + + self.read_host_buffer.serialized_length() + + self.create_contract_package_at_hash.serialized_length() + + self.create_contract_user_group.serialized_length() + + self + .add_contract_version_with_message_topics + .serialized_length() + + self.add_contract_version.serialized_length() + + self + .add_package_version_with_message_topics + .serialized_length() + + self.disable_contract_version.serialized_length() + + self.call_contract.serialized_length() + + self.call_versioned_contract.serialized_length() + + self.get_named_arg_size.serialized_length() + + self.get_named_arg.serialized_length() + + self.remove_contract_user_group.serialized_length() + + self.provision_contract_user_group_uref.serialized_length() + + self.remove_contract_user_group_urefs.serialized_length() + + self.print.serialized_length() + + self.blake2b.serialized_length() + + self.random_bytes.serialized_length() + + self.enable_contract_version.serialized_length() + + self.manage_message_topic.serialized_length() + + self.emit_message.serialized_length() + + self.cost_increase_per_message.serialized_length() + + self.get_block_info.serialized_length() + + self.generic_hash.serialized_length() + + self.recover_secp256k1.serialized_length() + + self.verify_signature.serialized_length() + + self.call_package_version.serialized_length() + } +} + +impl FromBytes for HostFunctionCostsV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (read_value, rem) = FromBytes::from_bytes(bytes)?; + let (dictionary_get, rem) = FromBytes::from_bytes(rem)?; + let (write, rem) = FromBytes::from_bytes(rem)?; + let (dictionary_put, rem) = FromBytes::from_bytes(rem)?; + let (add, rem) = FromBytes::from_bytes(rem)?; + let (new_uref, rem) = FromBytes::from_bytes(rem)?; + let (load_named_keys, rem) = FromBytes::from_bytes(rem)?; + let (ret, rem) = FromBytes::from_bytes(rem)?; + let (get_key, rem) = FromBytes::from_bytes(rem)?; + let (has_key, rem) = FromBytes::from_bytes(rem)?; + let (put_key, rem) = FromBytes::from_bytes(rem)?; + let (remove_key, rem) = FromBytes::from_bytes(rem)?; + let (revert, rem) = FromBytes::from_bytes(rem)?; + let (is_valid_uref, rem) = FromBytes::from_bytes(rem)?; + let (add_associated_key, rem) = FromBytes::from_bytes(rem)?; + let (remove_associated_key, rem) = FromBytes::from_bytes(rem)?; + let (update_associated_key, rem) = FromBytes::from_bytes(rem)?; + let (set_action_threshold, rem) = FromBytes::from_bytes(rem)?; + let (get_caller, rem) = FromBytes::from_bytes(rem)?; + let (get_blocktime, rem) = FromBytes::from_bytes(rem)?; + let (create_purse, rem) = FromBytes::from_bytes(rem)?; + let (transfer_to_account, rem) = FromBytes::from_bytes(rem)?; + let (transfer_from_purse_to_account, rem) = FromBytes::from_bytes(rem)?; + let (transfer_from_purse_to_purse, rem) = FromBytes::from_bytes(rem)?; + let (get_balance, rem) = FromBytes::from_bytes(rem)?; + let (get_phase, rem) = FromBytes::from_bytes(rem)?; + let (get_system_contract, rem) = FromBytes::from_bytes(rem)?; + let (get_main_purse, rem) = FromBytes::from_bytes(rem)?; + let (read_host_buffer, rem) = FromBytes::from_bytes(rem)?; + let (create_contract_package_at_hash, rem) = FromBytes::from_bytes(rem)?; + let (create_contract_user_group, rem) = FromBytes::from_bytes(rem)?; + let (add_contract_version_with_message_topics, rem) = FromBytes::from_bytes(rem)?; + let (add_contract_version, rem) = FromBytes::from_bytes(rem)?; + let (add_package_version_with_message_topics, rem) = FromBytes::from_bytes(rem)?; + let (disable_contract_version, rem) = FromBytes::from_bytes(rem)?; + let (call_contract, rem) = FromBytes::from_bytes(rem)?; + let (call_versioned_contract, rem) = FromBytes::from_bytes(rem)?; + let (get_named_arg_size, rem) = FromBytes::from_bytes(rem)?; + let (get_named_arg, rem) = FromBytes::from_bytes(rem)?; + let (remove_contract_user_group, rem) = FromBytes::from_bytes(rem)?; + let (provision_contract_user_group_uref, rem) = FromBytes::from_bytes(rem)?; + let (remove_contract_user_group_urefs, rem) = FromBytes::from_bytes(rem)?; + let (print, rem) = FromBytes::from_bytes(rem)?; + let (blake2b, rem) = FromBytes::from_bytes(rem)?; + let (random_bytes, rem) = FromBytes::from_bytes(rem)?; + let (enable_contract_version, rem) = FromBytes::from_bytes(rem)?; + let (manage_message_topic, rem) = FromBytes::from_bytes(rem)?; + let (emit_message, rem) = FromBytes::from_bytes(rem)?; + let (cost_increase_per_message, rem) = FromBytes::from_bytes(rem)?; + let (get_block_info, rem) = FromBytes::from_bytes(rem)?; + let (generic_hash, rem) = FromBytes::from_bytes(rem)?; + let (recover_secp256k1, rem) = FromBytes::from_bytes(rem)?; + let (verify_signature, rem) = FromBytes::from_bytes(rem)?; + let (call_package_version, rem) = FromBytes::from_bytes(rem)?; + Ok(( + HostFunctionCostsV1 { + read_value, + dictionary_get, + write, + dictionary_put, + add, + new_uref, + load_named_keys, + ret, + get_key, + has_key, + put_key, + remove_key, + revert, + is_valid_uref, + add_associated_key, + remove_associated_key, + update_associated_key, + set_action_threshold, + get_caller, + get_blocktime, + create_purse, + transfer_to_account, + transfer_from_purse_to_account, + transfer_from_purse_to_purse, + get_balance, + get_phase, + get_system_contract, + get_main_purse, + read_host_buffer, + create_contract_package_at_hash, + create_contract_user_group, + add_contract_version_with_message_topics, + add_contract_version, + add_package_version_with_message_topics, + disable_contract_version, + call_contract, + call_versioned_contract, + get_named_arg_size, + get_named_arg, + remove_contract_user_group, + provision_contract_user_group_uref, + remove_contract_user_group_urefs, + print, + blake2b, + random_bytes, + enable_contract_version, + manage_message_topic, + emit_message, + cost_increase_per_message, + get_block_info, + generic_hash, + recover_secp256k1, + verify_signature, + call_package_version, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HostFunctionCostsV1 { + HostFunctionCostsV1 { + read_value: rng.gen(), + dictionary_get: rng.gen(), + write: rng.gen(), + dictionary_put: rng.gen(), + add: rng.gen(), + new_uref: rng.gen(), + load_named_keys: rng.gen(), + ret: rng.gen(), + get_key: rng.gen(), + has_key: rng.gen(), + put_key: rng.gen(), + remove_key: rng.gen(), + revert: rng.gen(), + is_valid_uref: rng.gen(), + add_associated_key: rng.gen(), + remove_associated_key: rng.gen(), + update_associated_key: rng.gen(), + set_action_threshold: rng.gen(), + get_caller: rng.gen(), + get_blocktime: rng.gen(), + create_purse: rng.gen(), + transfer_to_account: rng.gen(), + transfer_from_purse_to_account: rng.gen(), + transfer_from_purse_to_purse: rng.gen(), + get_balance: rng.gen(), + get_phase: rng.gen(), + get_system_contract: rng.gen(), + get_main_purse: rng.gen(), + read_host_buffer: rng.gen(), + create_contract_package_at_hash: rng.gen(), + create_contract_user_group: rng.gen(), + add_contract_version_with_message_topics: rng.gen(), + add_contract_version: rng.gen(), + add_package_version_with_message_topics: rng.gen(), + disable_contract_version: rng.gen(), + call_contract: rng.gen(), + call_versioned_contract: rng.gen(), + get_named_arg_size: rng.gen(), + get_named_arg: rng.gen(), + remove_contract_user_group: rng.gen(), + provision_contract_user_group_uref: rng.gen(), + remove_contract_user_group_urefs: rng.gen(), + print: rng.gen(), + blake2b: rng.gen(), + random_bytes: rng.gen(), + enable_contract_version: rng.gen(), + manage_message_topic: rng.gen(), + emit_message: rng.gen(), + cost_increase_per_message: rng.gen(), + get_block_info: rng.gen(), + generic_hash: rng.gen(), + recover_secp256k1: rng.gen(), + verify_signature: rng.gen(), + call_package_version: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prelude::*}; + + use crate::{HostFunction, HostFunctionCost, HostFunctionCostsV1}; + + #[allow(unused)] + pub fn host_function_cost_arb() -> impl Strategy> { + (any::(), any::()) + .prop_map(|(cost, arguments)| HostFunction::new(cost, arguments)) + } + + prop_compose! { + pub fn host_function_costs_arb() ( + read_value in host_function_cost_arb(), + dictionary_get in host_function_cost_arb(), + write in host_function_cost_arb(), + dictionary_put in host_function_cost_arb(), + add in host_function_cost_arb(), + new_uref in host_function_cost_arb(), + load_named_keys in host_function_cost_arb(), + ret in host_function_cost_arb(), + get_key in host_function_cost_arb(), + has_key in host_function_cost_arb(), + put_key in host_function_cost_arb(), + remove_key in host_function_cost_arb(), + revert in host_function_cost_arb(), + is_valid_uref in host_function_cost_arb(), + add_associated_key in host_function_cost_arb(), + remove_associated_key in host_function_cost_arb(), + update_associated_key in host_function_cost_arb(), + set_action_threshold in host_function_cost_arb(), + get_caller in host_function_cost_arb(), + get_blocktime in host_function_cost_arb(), + create_purse in host_function_cost_arb(), + transfer_to_account in host_function_cost_arb(), + transfer_from_purse_to_account in host_function_cost_arb(), + transfer_from_purse_to_purse in host_function_cost_arb(), + get_balance in host_function_cost_arb(), + get_phase in host_function_cost_arb(), + get_system_contract in host_function_cost_arb(), + get_main_purse in host_function_cost_arb(), + read_host_buffer in host_function_cost_arb(), + create_contract_package_at_hash in host_function_cost_arb(), + create_contract_user_group in host_function_cost_arb(), + add_contract_version_with_message_topics in host_function_cost_arb(), + add_contract_version in host_function_cost_arb(), + add_package_version_with_message_topics in host_function_cost_arb(), + disable_contract_version in host_function_cost_arb(), + call_contract in host_function_cost_arb(), + call_versioned_contract in host_function_cost_arb(), + get_named_arg_size in host_function_cost_arb(), + get_named_arg in host_function_cost_arb(), + remove_contract_user_group in host_function_cost_arb(), + provision_contract_user_group_uref in host_function_cost_arb(), + remove_contract_user_group_urefs in host_function_cost_arb(), + print in host_function_cost_arb(), + blake2b in host_function_cost_arb(), + random_bytes in host_function_cost_arb(), + enable_contract_version in host_function_cost_arb(), + manage_message_topic in host_function_cost_arb(), + emit_message in host_function_cost_arb(), + cost_increase_per_message in num::u32::ANY, + get_block_info in host_function_cost_arb(), + generic_hash in host_function_cost_arb(), + recover_secp256k1 in host_function_cost_arb(), + verify_signature in host_function_cost_arb(), + call_package_version in host_function_cost_arb(), + ) -> HostFunctionCostsV1 { + HostFunctionCostsV1 { + read_value, + dictionary_get, + write, + dictionary_put, + add, + new_uref, + load_named_keys, + ret, + get_key, + has_key, + put_key, + remove_key, + revert, + is_valid_uref, + add_associated_key, + remove_associated_key, + update_associated_key, + set_action_threshold, + get_caller, + get_blocktime, + create_purse, + transfer_to_account, + transfer_from_purse_to_account, + transfer_from_purse_to_purse, + get_balance, + get_phase, + get_system_contract, + get_main_purse, + read_host_buffer, + create_contract_package_at_hash, + create_contract_user_group, + add_contract_version_with_message_topics, + add_contract_version, + add_package_version_with_message_topics, + disable_contract_version, + call_contract, + call_versioned_contract, + get_named_arg_size, + get_named_arg, + remove_contract_user_group, + provision_contract_user_group_uref, + remove_contract_user_group_urefs, + print, + blake2b, + random_bytes, + enable_contract_version, + manage_message_topic, + emit_message, + cost_increase_per_message, + get_block_info, + generic_hash, + recover_secp256k1, + verify_signature, + call_package_version, + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use super::*; + + const COST: Cost = 42; + const ARGUMENT_COSTS: [Cost; 3] = [123, 456, 789]; + const WEIGHTS: [Cost; 3] = [1000, 1100, 1200]; + + #[test] + fn calculate_gas_cost_for_host_function() { + let host_function = HostFunction::new(COST, ARGUMENT_COSTS); + let expected_cost = COST + + (ARGUMENT_COSTS[0] * WEIGHTS[0]) + + (ARGUMENT_COSTS[1] * WEIGHTS[1]) + + (ARGUMENT_COSTS[2] * WEIGHTS[2]); + assert_eq!( + host_function.calculate_gas_cost(WEIGHTS), + Some(Gas::new(expected_cost)) + ); + } + + #[test] + fn calculate_gas_cost_would_overflow() { + let large_value = Cost::MAX; + + let host_function = HostFunction::new( + large_value, + [large_value, large_value, large_value, large_value], + ); + + let lhs = + host_function.calculate_gas_cost([large_value, large_value, large_value, large_value]); + + let large_value = U512::from(large_value); + let rhs = large_value + (U512::from(4) * large_value * large_value); + + assert_eq!(lhs, Some(Gas::new(rhs))); + } +} + +#[cfg(test)] +mod proptests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + type Signature = [Cost; 10]; + + proptest! { + #[test] + fn test_host_function(host_function in gens::host_function_cost_arb::()) { + bytesrepr::test_serialization_roundtrip(&host_function); + } + + #[test] + fn test_host_function_costs(host_function_costs in gens::host_function_costs_arb()) { + bytesrepr::test_serialization_roundtrip(&host_function_costs); + } + } +} diff --git a/types/src/chainspec/vm_config/host_function_costs_v2.rs b/types/src/chainspec/vm_config/host_function_costs_v2.rs new file mode 100644 index 0000000000..d00723d90f --- /dev/null +++ b/types/src/chainspec/vm_config/host_function_costs_v2.rs @@ -0,0 +1,539 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{distributions::Standard, prelude::Distribution, Rng}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U64_SERIALIZED_LENGTH}, + Gas, +}; + +/// Representation of argument's cost. +pub type Cost = u64; + +/// Representation of a host function cost. +/// +/// The total gas cost is equal to `cost` + sum of each argument weight multiplied by the byte size +/// of the data. +/// +/// NOTE: This is duplicating the `HostFunction` struct from the `casper-types` crate +/// but to avoid changing the public API of that crate, we are creating a new struct +/// with the same name and fields. +/// +/// There is some opportunity to unify the code to turn `HostFunction` into a generic struct +/// that generalizes over the cost type, but that would require a lot of work and +/// is not worth it at this time. +#[derive(Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HostFunctionV2 { + /// How much the user is charged for calling the host function. + cost: Cost, + /// Weights of the function arguments. + arguments: T, +} + +impl Default for HostFunctionV2 +where + T: Default, +{ + fn default() -> Self { + Self { + cost: DEFAULT_FIXED_COST, + arguments: T::default(), + } + } +} + +impl HostFunctionV2 { + /// Creates a new instance of `HostFunction` with a fixed call cost and argument weights. + pub const fn new(cost: Cost, arguments: T) -> Self { + Self { cost, arguments } + } + + pub fn with_new_static_cost(self, cost: Cost) -> Self { + Self { + cost, + arguments: self.arguments, + } + } + + /// Returns the base gas fee for calling the host function. + pub fn cost(&self) -> Cost { + self.cost + } +} + +impl HostFunctionV2 +where + T: Default, +{ + /// Creates a new fixed host function cost with argument weights of zero. + pub fn fixed(cost: Cost) -> Self { + Self { + cost, + ..Default::default() + } + } + + pub fn zero() -> Self { + Self { + cost: Default::default(), + arguments: Default::default(), + } + } +} + +impl HostFunctionV2 +where + T: AsRef<[Cost]>, +{ + /// Returns a slice containing the argument weights. + pub fn arguments(&self) -> &[Cost] { + self.arguments.as_ref() + } + + /// Calculate gas cost for a host function + pub fn calculate_gas_cost(&self, weights: T) -> Option { + let mut gas = Gas::new(self.cost); + for (argument, weight) in self.arguments.as_ref().iter().zip(weights.as_ref()) { + let lhs = Gas::new(*argument); + let rhs = Gas::new(*weight); + let product = lhs.checked_mul(rhs)?; + gas = gas.checked_add(product)?; + } + Some(gas) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution> for Standard +where + Standard: Distribution, + T: AsMut<[Cost]> + Default, +{ + fn sample(&self, rng: &mut R) -> HostFunctionV2 { + let cost = rng.gen::() as u64; + let mut arguments = T::default(); + for arg in arguments.as_mut() { + *arg = rng.gen::() as u64; + } + + HostFunctionV2::new(cost, arguments) + } +} + +impl ToBytes for HostFunctionV2 +where + T: AsRef<[Cost]>, +{ + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.cost.to_bytes()?); + for value in self.arguments.as_ref().iter() { + ret.append(&mut value.to_bytes()?); + } + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.cost.serialized_length() + (U64_SERIALIZED_LENGTH * self.arguments.as_ref().len()) + } +} + +impl FromBytes for HostFunctionV2 +where + T: Default + AsMut<[Cost]>, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (cost, mut bytes) = FromBytes::from_bytes(bytes)?; + let mut arguments = T::default(); + let arguments_mut = arguments.as_mut(); + for ith_argument in arguments_mut { + let (cost, rem) = FromBytes::from_bytes(bytes)?; + *ith_argument = cost; + bytes = rem; + } + Ok((Self { cost, arguments }, bytes)) + } +} +/// An identifier that represents an unused argument. +const NOT_USED: Cost = 0; + +/// An arbitrary default fixed cost for host functions that were not researched yet. +const DEFAULT_FIXED_COST: Cost = 200; + +const DEFAULT_CALL_COST: u64 = 10_000; +const DEFAULT_ENV_BALANCE_COST: u64 = 100; + +const DEFAULT_PRINT_COST: Cost = 100; + +const DEFAULT_READ_COST: Cost = 1_000; +const DEFAULT_READ_KEY_SIZE_WEIGHT: Cost = 100; + +const DEFAULT_RET_COST: Cost = 300; +const DEFAULT_RET_VALUE_SIZE_WEIGHT: Cost = 100; + +const DEFAULT_TRANSFER_COST: Cost = 2_500_000_000; + +const DEFAULT_WRITE_COST: Cost = 25_000; +const DEFAULT_WRITE_SIZE_WEIGHT: Cost = 100_000; + +const DEFAULT_REMOVE_COST: Cost = 15_000; + +const DEFAULT_COPY_INPUT_COST: Cost = 300; +const DEFAULT_COPY_INPUT_VALUE_SIZE_WEIGHT: Cost = 0; + +const DEFAULT_CREATE_COST: Cost = 0; +const DEFAULT_CREATE_CODE_SIZE_WEIGHT: Cost = 0; +const DEFAULT_CREATE_ENTRYPOINT_SIZE_WEIGHT: Cost = 0; +const DEFAULT_CREATE_INPUT_SIZE_WEIGHT: Cost = 0; +const DEFAULT_CREATE_SEED_SIZE_WEIGHT: Cost = 0; + +const DEFAULT_EMIT_COST: Cost = 200; +const DEFAULT_EMIT_TOPIC_SIZE_WEIGHT: Cost = 100; +const DEFAULT_EMIT_PAYLOAD_SIZE_HEIGHT: Cost = 100; + +const DEFAULT_ENV_INFO_COST: Cost = 10_000; + +/// Definition of a host function cost table. +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct HostFunctionCostsV2 { + /// Cost of calling the `read` host function. + pub read: HostFunctionV2<[Cost; 6]>, + /// Cost of calling the `write` host function. + pub write: HostFunctionV2<[Cost; 5]>, + /// Cost of calling the `remove` host function. + pub remove: HostFunctionV2<[Cost; 3]>, + /// Cost of calling the `copy_input` host function. + pub copy_input: HostFunctionV2<[Cost; 2]>, + /// Cost of calling the `ret` host function. + pub ret: HostFunctionV2<[Cost; 2]>, + /// Cost of calling the `create` host function. + pub create: HostFunctionV2<[Cost; 10]>, + /// Cost of calling the `transfer` host function. + pub transfer: HostFunctionV2<[Cost; 3]>, + /// Cost of calling the `env_balance` host function. + pub env_balance: HostFunctionV2<[Cost; 4]>, + /// Cost of calling the `upgrade` host function. + pub upgrade: HostFunctionV2<[Cost; 6]>, + /// Cost of calling the `call` host function. + pub call: HostFunctionV2<[Cost; 9]>, + /// Cost of calling the `print` host function. + pub print: HostFunctionV2<[Cost; 2]>, + /// Cost of calling the `emit` host function. + pub emit: HostFunctionV2<[Cost; 4]>, + /// Cost of calling the `env_info` host function. + pub env_info: HostFunctionV2<[Cost; 2]>, +} + +impl HostFunctionCostsV2 { + pub fn zero() -> Self { + Self { + read: HostFunctionV2::zero(), + write: HostFunctionV2::zero(), + remove: HostFunctionV2::zero(), + copy_input: HostFunctionV2::zero(), + ret: HostFunctionV2::zero(), + create: HostFunctionV2::zero(), + transfer: HostFunctionV2::zero(), + env_balance: HostFunctionV2::zero(), + upgrade: HostFunctionV2::zero(), + call: HostFunctionV2::zero(), + print: HostFunctionV2::zero(), + emit: HostFunctionV2::zero(), + env_info: HostFunctionV2::zero(), + } + } +} + +impl Default for HostFunctionCostsV2 { + fn default() -> Self { + Self { + read: HostFunctionV2::new( + DEFAULT_READ_COST, + [ + NOT_USED, + NOT_USED, + DEFAULT_READ_KEY_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + NOT_USED, + ], + ), + write: HostFunctionV2::new( + DEFAULT_WRITE_COST, + [ + NOT_USED, + NOT_USED, + NOT_USED, + NOT_USED, + DEFAULT_WRITE_SIZE_WEIGHT, + ], + ), + remove: HostFunctionV2::new(DEFAULT_REMOVE_COST, [NOT_USED, NOT_USED, NOT_USED]), + copy_input: HostFunctionV2::new( + DEFAULT_COPY_INPUT_COST, + [NOT_USED, DEFAULT_COPY_INPUT_VALUE_SIZE_WEIGHT], + ), + ret: HostFunctionV2::new(DEFAULT_RET_COST, [NOT_USED, DEFAULT_RET_VALUE_SIZE_WEIGHT]), + create: HostFunctionV2::new( + DEFAULT_CREATE_COST, + [ + NOT_USED, + DEFAULT_CREATE_CODE_SIZE_WEIGHT, + NOT_USED, + NOT_USED, + DEFAULT_CREATE_ENTRYPOINT_SIZE_WEIGHT, + NOT_USED, + DEFAULT_CREATE_INPUT_SIZE_WEIGHT, + NOT_USED, + DEFAULT_CREATE_SEED_SIZE_WEIGHT, + NOT_USED, + ], + ), + env_balance: HostFunctionV2::fixed(DEFAULT_ENV_BALANCE_COST), + transfer: HostFunctionV2::new(DEFAULT_TRANSFER_COST, [NOT_USED, NOT_USED, NOT_USED]), + upgrade: HostFunctionV2::new( + DEFAULT_FIXED_COST, + [NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED], + ), + call: HostFunctionV2::new( + DEFAULT_CALL_COST, + [ + NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, NOT_USED, + NOT_USED, + ], + ), + print: HostFunctionV2::new(DEFAULT_PRINT_COST, [NOT_USED, NOT_USED]), + emit: HostFunctionV2::new( + DEFAULT_EMIT_COST, + [ + NOT_USED, + DEFAULT_EMIT_TOPIC_SIZE_WEIGHT, + NOT_USED, + DEFAULT_EMIT_PAYLOAD_SIZE_HEIGHT, + ], + ), + env_info: HostFunctionV2::new(DEFAULT_ENV_INFO_COST, [NOT_USED, NOT_USED]), + } + } +} + +impl ToBytes for HostFunctionCostsV2 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.read.to_bytes()?); + ret.append(&mut self.write.to_bytes()?); + ret.append(&mut self.remove.to_bytes()?); + ret.append(&mut self.copy_input.to_bytes()?); + ret.append(&mut self.ret.to_bytes()?); + ret.append(&mut self.create.to_bytes()?); + ret.append(&mut self.transfer.to_bytes()?); + ret.append(&mut self.env_balance.to_bytes()?); + ret.append(&mut self.upgrade.to_bytes()?); + ret.append(&mut self.call.to_bytes()?); + ret.append(&mut self.print.to_bytes()?); + ret.append(&mut self.emit.to_bytes()?); + ret.append(&mut self.env_info.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.read.serialized_length() + + self.write.serialized_length() + + self.remove.serialized_length() + + self.copy_input.serialized_length() + + self.ret.serialized_length() + + self.create.serialized_length() + + self.transfer.serialized_length() + + self.env_balance.serialized_length() + + self.upgrade.serialized_length() + + self.call.serialized_length() + + self.print.serialized_length() + + self.emit.serialized_length() + + self.env_info.serialized_length() + } +} + +impl FromBytes for HostFunctionCostsV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (read, rem) = FromBytes::from_bytes(bytes)?; + let (write, rem) = FromBytes::from_bytes(rem)?; + let (remove, rem) = FromBytes::from_bytes(rem)?; + let (copy_input, rem) = FromBytes::from_bytes(rem)?; + let (ret, rem) = FromBytes::from_bytes(rem)?; + let (create, rem) = FromBytes::from_bytes(rem)?; + let (transfer, rem) = FromBytes::from_bytes(rem)?; + let (env_balance, rem) = FromBytes::from_bytes(rem)?; + let (upgrade, rem) = FromBytes::from_bytes(rem)?; + let (call, rem) = FromBytes::from_bytes(rem)?; + let (print, rem) = FromBytes::from_bytes(rem)?; + let (emit, rem) = FromBytes::from_bytes(rem)?; + let (env_info, rem) = FromBytes::from_bytes(rem)?; + Ok(( + HostFunctionCostsV2 { + read, + write, + remove, + copy_input, + ret, + create, + transfer, + env_balance, + upgrade, + call, + print, + emit, + env_info, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> HostFunctionCostsV2 { + HostFunctionCostsV2 { + read: rng.gen(), + write: rng.gen(), + remove: rng.gen(), + copy_input: rng.gen(), + ret: rng.gen(), + create: rng.gen(), + transfer: rng.gen(), + env_balance: rng.gen(), + upgrade: rng.gen(), + call: rng.gen(), + print: rng.gen(), + emit: rng.gen(), + env_info: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use super::*; + + #[allow(unused)] + pub fn host_function_cost_v2_arb( + ) -> impl Strategy> { + (any::(), any::<[u64; N]>()) + .prop_map(|(cost, arguments)| HostFunctionV2::new(cost, arguments)) + } + + prop_compose! { + pub fn host_function_costs_v2_arb() ( + read in host_function_cost_v2_arb(), + write in host_function_cost_v2_arb(), + remove in host_function_cost_v2_arb(), + copy_input in host_function_cost_v2_arb(), + ret in host_function_cost_v2_arb(), + create in host_function_cost_v2_arb(), + transfer in host_function_cost_v2_arb(), + env_balance in host_function_cost_v2_arb(), + upgrade in host_function_cost_v2_arb(), + call in host_function_cost_v2_arb(), + print in host_function_cost_v2_arb(), + emit in host_function_cost_v2_arb(), + env_info in host_function_cost_v2_arb(), + ) -> HostFunctionCostsV2 { + HostFunctionCostsV2 { + read, + write, + remove, + copy_input, + ret, + create, + transfer, + env_balance, + upgrade, + call, + print, + emit, + env_info + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::{Gas, U512}; + + use super::*; + + const COST: Cost = 42; + const ARGUMENT_COSTS: [Cost; 3] = [123, 456, 789]; + const WEIGHTS: [u64; 3] = [1000, 1000, 1000]; + + #[test] + fn calculate_gas_cost_for_host_function() { + let host_function = HostFunctionV2::new(COST, ARGUMENT_COSTS); + let expected_cost = COST + + (ARGUMENT_COSTS[0] * Cost::from(WEIGHTS[0])) + + (ARGUMENT_COSTS[1] * Cost::from(WEIGHTS[1])) + + (ARGUMENT_COSTS[2] * Cost::from(WEIGHTS[2])); + assert_eq!( + host_function.calculate_gas_cost(WEIGHTS), + Some(Gas::new(expected_cost)) + ); + } + + #[test] + fn calculate_gas_cost_would_overflow() { + let large_value = Cost::MAX; + + let host_function = HostFunctionV2::new( + large_value, + [large_value, large_value, large_value, large_value], + ); + + let lhs = + host_function.calculate_gas_cost([large_value, large_value, large_value, large_value]); + + let large_value = U512::from(large_value); + let rhs = large_value + (U512::from(4) * large_value * large_value); + + assert_eq!(lhs, Some(Gas::new(rhs))); + } + #[test] + fn calculate_large_gas_cost() { + let hf = HostFunctionV2::new(1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); + assert_eq!( + hf.calculate_gas_cost([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + Some(Gas::new( + 1 + (1 + 2 * 2 + 3 * 3 + 4 * 4 + 5 * 5 + 6 * 6 + 7 * 7 + 8 * 8 + 9 * 9 + 10 * 10) + )) + ); + } +} + +#[cfg(test)] +mod proptests { + use proptest::prelude::*; + + use crate::bytesrepr; + + use super::*; + + proptest! { + #[test] + fn test_host_function(host_function in gens::host_function_cost_v2_arb::<10>()) { + bytesrepr::test_serialization_roundtrip(&host_function); + } + + #[test] + fn test_host_function_costs(host_function_costs in gens::host_function_costs_v2_arb()) { + bytesrepr::test_serialization_roundtrip(&host_function_costs); + } + } +} diff --git a/types/src/chainspec/vm_config/message_limits.rs b/types/src/chainspec/vm_config/message_limits.rs new file mode 100644 index 0000000000..6f5d131fdf --- /dev/null +++ b/types/src/chainspec/vm_config/message_limits.rs @@ -0,0 +1,136 @@ +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Configuration for messages limits. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct MessageLimits { + /// Maximum size (in bytes) of a topic name string. + pub max_topic_name_size: u32, + /// Maximum message size in bytes. + pub max_message_size: u32, + /// Maximum number of topics that a contract can register. + pub max_topics_per_contract: u32, +} + +impl MessageLimits { + /// Returns the max number of topics a contract can register. + pub fn max_topics_per_contract(&self) -> u32 { + self.max_topics_per_contract + } + + /// Returns the maximum allowed size for the topic name string. + pub fn max_topic_name_size(&self) -> u32 { + self.max_topic_name_size + } + + /// Returns the maximum allowed size (in bytes) of the serialized message payload. + pub fn max_message_size(&self) -> u32 { + self.max_message_size + } +} + +impl Default for MessageLimits { + fn default() -> Self { + Self { + max_topic_name_size: 256, + max_message_size: 1024, + max_topics_per_contract: 128, + } + } +} + +impl ToBytes for MessageLimits { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.max_topic_name_size.to_bytes()?); + ret.append(&mut self.max_message_size.to_bytes()?); + ret.append(&mut self.max_topics_per_contract.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.max_topic_name_size.serialized_length() + + self.max_message_size.serialized_length() + + self.max_topics_per_contract.serialized_length() + } +} + +impl FromBytes for MessageLimits { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_topic_name_size, rem) = FromBytes::from_bytes(bytes)?; + let (max_message_size, rem) = FromBytes::from_bytes(rem)?; + let (max_topics_per_contract, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + MessageLimits { + max_topic_name_size, + max_message_size, + max_topics_per_contract, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MessageLimits { + MessageLimits { + max_topic_name_size: rng.gen(), + max_message_size: rng.gen(), + max_topics_per_contract: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::MessageLimits; + + prop_compose! { + pub fn message_limits_arb()( + max_topic_name_size in num::u32::ANY, + max_message_size in num::u32::ANY, + max_topics_per_contract in num::u32::ANY, + ) -> MessageLimits { + MessageLimits { + max_topic_name_size, + max_message_size, + max_topics_per_contract, + } + } + } +} + +#[cfg(test)] +mod tests { + use proptest::proptest; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn should_serialize_and_deserialize_with_arbitrary_values( + message_limits in gens::message_limits_arb() + ) { + bytesrepr::test_serialization_roundtrip(&message_limits); + } + } +} diff --git a/types/src/chainspec/vm_config/mint_costs.rs b/types/src/chainspec/vm_config/mint_costs.rs new file mode 100644 index 0000000000..25f4ca3e30 --- /dev/null +++ b/types/src/chainspec/vm_config/mint_costs.rs @@ -0,0 +1,191 @@ +//! Costs of the mint system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `mint` mint entry point. +pub const DEFAULT_MINT_COST: u32 = 2_500_000_000; +/// Default cost of the `reduce_total_supply` mint entry point. +pub const DEFAULT_REDUCE_TOTAL_SUPPLY_COST: u32 = 2_500_000_000; +/// Default cost of the `burn` mint entry point. +pub const DEFAULT_BURN_COST: u32 = 100_000_000; +/// Default cost of the `create` mint entry point. +pub const DEFAULT_CREATE_COST: u32 = 2_500_000_000; +/// Default cost of the `balance` mint entry point. +pub const DEFAULT_BALANCE_COST: u32 = 100_000_000; +/// Default cost of the `transfer` mint entry point. +pub const DEFAULT_TRANSFER_COST: u32 = 100_000_000; +/// Default cost of the `read_base_round_reward` mint entry point. +pub const DEFAULT_READ_BASE_ROUND_REWARD_COST: u32 = 2_500_000_000; +/// Default cost of the `mint_into_existing_purse` mint entry point. +pub const DEFAULT_MINT_INTO_EXISTING_PURSE_COST: u32 = 2_500_000_000; + +/// Description of the costs of calling mint entry points. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct MintCosts { + /// Cost of calling the `mint` entry point. + pub mint: u32, + /// Cost of calling the `reduce_total_supply` entry point. + pub reduce_total_supply: u32, + /// Cost of calling the `burn` entry point. + pub burn: u32, + /// Cost of calling the `create` entry point. + pub create: u32, + /// Cost of calling the `balance` entry point. + pub balance: u32, + /// Cost of calling the `transfer` entry point. + pub transfer: u32, + /// Cost of calling the `read_base_round_reward` entry point. + pub read_base_round_reward: u32, + /// Cost of calling the `mint_into_existing_purse` entry point. + pub mint_into_existing_purse: u32, +} + +impl Default for MintCosts { + fn default() -> Self { + Self { + mint: DEFAULT_MINT_COST, + reduce_total_supply: DEFAULT_REDUCE_TOTAL_SUPPLY_COST, + burn: DEFAULT_BURN_COST, + create: DEFAULT_CREATE_COST, + balance: DEFAULT_BALANCE_COST, + transfer: DEFAULT_TRANSFER_COST, + read_base_round_reward: DEFAULT_READ_BASE_ROUND_REWARD_COST, + mint_into_existing_purse: DEFAULT_MINT_INTO_EXISTING_PURSE_COST, + } + } +} + +impl ToBytes for MintCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + mint, + reduce_total_supply, + burn, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + } = self; + + ret.append(&mut mint.to_bytes()?); + ret.append(&mut reduce_total_supply.to_bytes()?); + ret.append(&mut create.to_bytes()?); + ret.append(&mut balance.to_bytes()?); + ret.append(&mut transfer.to_bytes()?); + ret.append(&mut read_base_round_reward.to_bytes()?); + ret.append(&mut mint_into_existing_purse.to_bytes()?); + ret.append(&mut burn.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + mint, + reduce_total_supply, + burn, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + } = self; + + mint.serialized_length() + + reduce_total_supply.serialized_length() + + burn.serialized_length() + + create.serialized_length() + + balance.serialized_length() + + transfer.serialized_length() + + read_base_round_reward.serialized_length() + + mint_into_existing_purse.serialized_length() + } +} + +impl FromBytes for MintCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (mint, rem) = FromBytes::from_bytes(bytes)?; + let (reduce_total_supply, rem) = FromBytes::from_bytes(rem)?; + let (create, rem) = FromBytes::from_bytes(rem)?; + let (balance, rem) = FromBytes::from_bytes(rem)?; + let (transfer, rem) = FromBytes::from_bytes(rem)?; + let (read_base_round_reward, rem) = FromBytes::from_bytes(rem)?; + let (mint_into_existing_purse, rem) = FromBytes::from_bytes(rem)?; + let (burn, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + Self { + mint, + reduce_total_supply, + burn, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MintCosts { + MintCosts { + mint: rng.gen(), + burn: rng.gen(), + reduce_total_supply: rng.gen(), + create: rng.gen(), + balance: rng.gen(), + transfer: rng.gen(), + read_base_round_reward: rng.gen(), + mint_into_existing_purse: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::MintCosts; + + prop_compose! { + pub fn mint_costs_arb()( + mint in num::u32::ANY, + reduce_total_supply in num::u32::ANY, + burn in num::u32::ANY, + create in num::u32::ANY, + balance in num::u32::ANY, + transfer in num::u32::ANY, + read_base_round_reward in num::u32::ANY, + mint_into_existing_purse in num::u32::ANY, + ) -> MintCosts { + MintCosts { + mint, + reduce_total_supply, + burn, + create, + balance, + transfer, + read_base_round_reward, + mint_into_existing_purse, + } + } + } +} diff --git a/types/src/chainspec/vm_config/opcode_costs.rs b/types/src/chainspec/vm_config/opcode_costs.rs new file mode 100644 index 0000000000..176b2a89ef --- /dev/null +++ b/types/src/chainspec/vm_config/opcode_costs.rs @@ -0,0 +1,796 @@ +//! Support for Wasm opcode costs. + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Add; +use num_traits::Zero; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `bit` Wasm opcode. +pub const DEFAULT_BIT_COST: u32 = 105; +/// Default cost of the `add` Wasm opcode. +pub const DEFAULT_ADD_COST: u32 = 105; +/// Default cost of the `mul` Wasm opcode. +pub const DEFAULT_MUL_COST: u32 = 105; +/// Default cost of the `div` Wasm opcode. +pub const DEFAULT_DIV_COST: u32 = 105; +/// Default cost of the `load` Wasm opcode. +pub const DEFAULT_LOAD_COST: u32 = 105; +/// Default cost of the `store` Wasm opcode. +pub const DEFAULT_STORE_COST: u32 = 105; +/// Default cost of the `const` Wasm opcode. +pub const DEFAULT_CONST_COST: u32 = 105; +/// Default cost of the `local` Wasm opcode. +pub const DEFAULT_LOCAL_COST: u32 = 105; +/// Default cost of the `global` Wasm opcode. +pub const DEFAULT_GLOBAL_COST: u32 = 105; +/// Default cost of the `integer_comparison` Wasm opcode. +pub const DEFAULT_INTEGER_COMPARISON_COST: u32 = 105; +/// Default cost of the `conversion` Wasm opcode. +pub const DEFAULT_CONVERSION_COST: u32 = 105; +/// Default cost of the `unreachable` Wasm opcode. +pub const DEFAULT_UNREACHABLE_COST: u32 = 105; +/// Default cost of the `nop` Wasm opcode. +pub const DEFAULT_NOP_COST: u32 = 105; +/// Default cost of the `current_memory` Wasm opcode. +pub const DEFAULT_CURRENT_MEMORY_COST: u32 = 105; +/// Default cost of the `grow_memory` Wasm opcode. +pub const DEFAULT_GROW_MEMORY_COST: u32 = 900; +/// Default cost of the `block` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BLOCK_OPCODE: u32 = 255; +/// Default cost of the `loop` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_LOOP_OPCODE: u32 = 255; +/// Default cost of the `if` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_IF_OPCODE: u32 = 105; +/// Default cost of the `else` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_ELSE_OPCODE: u32 = 105; +/// Default cost of the `end` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_END_OPCODE: u32 = 105; +/// Default cost of the `br` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_OPCODE: u32 = 1665; +/// Default cost of the `br_if` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_IF_OPCODE: u32 = 510; +/// Default cost of the `return` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_RETURN_OPCODE: u32 = 105; +/// Default cost of the `select` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_SELECT_OPCODE: u32 = 105; +/// Default cost of the `call` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_CALL_OPCODE: u32 = 225; +/// Default cost of the `call_indirect` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE: u32 = 270; +/// Default cost of the `drop` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_DROP_OPCODE: u32 = 105; +/// Default fixed cost of the `br_table` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE: u32 = 150; +/// Default multiplier for the size of targets in `br_table` Wasm opcode. +pub const DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER: u32 = 100; +/// Default cost of the sign extension opcodes +pub const DEFAULT_SIGN_COST: u32 = 105; + +/// Definition of a cost table for a Wasm `br_table` opcode. +/// +/// Charge of a `br_table` opcode is calculated as follows: +/// +/// ```text +/// cost + (len(br_table.targets) * size_multiplier) +/// ``` +// This is done to encourage users to avoid writing code with very long `br_table`s. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct BrTableCost { + /// Fixed cost charge for `br_table` opcode. + pub cost: u32, + /// Multiplier for size of target labels in the `br_table` opcode. + pub size_multiplier: u32, +} + +impl Default for BrTableCost { + fn default() -> Self { + Self { + cost: DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, + size_multiplier: DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BrTableCost { + BrTableCost { + cost: rng.gen(), + size_multiplier: rng.gen(), + } + } +} + +impl ToBytes for BrTableCost { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let Self { + cost, + size_multiplier, + } = self; + + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut cost.to_bytes()?); + ret.append(&mut size_multiplier.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + cost, + size_multiplier, + } = self; + + cost.serialized_length() + size_multiplier.serialized_length() + } +} + +impl FromBytes for BrTableCost { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (cost, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (size_multiplier, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + Ok(( + Self { + cost, + size_multiplier, + }, + bytes, + )) + } +} + +impl Zero for BrTableCost { + fn zero() -> Self { + BrTableCost { + cost: 0, + size_multiplier: 0, + } + } + + fn is_zero(&self) -> bool { + let BrTableCost { + cost, + size_multiplier, + } = self; + cost.is_zero() && size_multiplier.is_zero() + } +} + +/// Definition of a cost table for a Wasm control flow opcodes. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct ControlFlowCosts { + /// Cost for `block` opcode. + pub block: u32, + /// Cost for `loop` opcode. + #[serde(rename = "loop")] + pub op_loop: u32, + /// Cost for `if` opcode. + #[serde(rename = "if")] + pub op_if: u32, + /// Cost for `else` opcode. + #[serde(rename = "else")] + pub op_else: u32, + /// Cost for `end` opcode. + pub end: u32, + /// Cost for `br` opcode. + pub br: u32, + /// Cost for `br_if` opcode. + pub br_if: u32, + /// Cost for `return` opcode. + #[serde(rename = "return")] + pub op_return: u32, + /// Cost for `call` opcode. + pub call: u32, + /// Cost for `call_indirect` opcode. + pub call_indirect: u32, + /// Cost for `drop` opcode. + pub drop: u32, + /// Cost for `select` opcode. + pub select: u32, + /// Cost for `br_table` opcode. + pub br_table: BrTableCost, +} + +impl Default for ControlFlowCosts { + fn default() -> Self { + Self { + block: DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, + op_loop: DEFAULT_CONTROL_FLOW_LOOP_OPCODE, + op_if: DEFAULT_CONTROL_FLOW_IF_OPCODE, + op_else: DEFAULT_CONTROL_FLOW_ELSE_OPCODE, + end: DEFAULT_CONTROL_FLOW_END_OPCODE, + br: DEFAULT_CONTROL_FLOW_BR_OPCODE, + br_if: DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, + op_return: DEFAULT_CONTROL_FLOW_RETURN_OPCODE, + call: DEFAULT_CONTROL_FLOW_CALL_OPCODE, + call_indirect: DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, + drop: DEFAULT_CONTROL_FLOW_DROP_OPCODE, + select: DEFAULT_CONTROL_FLOW_SELECT_OPCODE, + br_table: Default::default(), + } + } +} + +impl ToBytes for ControlFlowCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + } = self; + ret.append(&mut block.to_bytes()?); + ret.append(&mut op_loop.to_bytes()?); + ret.append(&mut op_if.to_bytes()?); + ret.append(&mut op_else.to_bytes()?); + ret.append(&mut end.to_bytes()?); + ret.append(&mut br.to_bytes()?); + ret.append(&mut br_if.to_bytes()?); + ret.append(&mut op_return.to_bytes()?); + ret.append(&mut call.to_bytes()?); + ret.append(&mut call_indirect.to_bytes()?); + ret.append(&mut drop.to_bytes()?); + ret.append(&mut select.to_bytes()?); + ret.append(&mut br_table.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + } = self; + block.serialized_length() + + op_loop.serialized_length() + + op_if.serialized_length() + + op_else.serialized_length() + + end.serialized_length() + + br.serialized_length() + + br_if.serialized_length() + + op_return.serialized_length() + + call.serialized_length() + + call_indirect.serialized_length() + + drop.serialized_length() + + select.serialized_length() + + br_table.serialized_length() + } +} + +impl FromBytes for ControlFlowCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_loop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_if, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_else, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (end, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (br, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (br_if, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (op_return, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (call, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (call_indirect, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (drop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (select, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (br_table, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + + let control_flow_cost = ControlFlowCosts { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + }; + Ok((control_flow_cost, bytes)) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ControlFlowCosts { + ControlFlowCosts { + block: rng.gen(), + op_loop: rng.gen(), + op_if: rng.gen(), + op_else: rng.gen(), + end: rng.gen(), + br: rng.gen(), + br_if: rng.gen(), + op_return: rng.gen(), + call: rng.gen(), + call_indirect: rng.gen(), + drop: rng.gen(), + select: rng.gen(), + br_table: rng.gen(), + } + } +} + +impl Zero for ControlFlowCosts { + fn zero() -> Self { + ControlFlowCosts { + block: 0, + op_loop: 0, + op_if: 0, + op_else: 0, + end: 0, + br: 0, + br_if: 0, + op_return: 0, + call: 0, + call_indirect: 0, + drop: 0, + select: 0, + br_table: BrTableCost::zero(), + } + } + + fn is_zero(&self) -> bool { + let ControlFlowCosts { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + op_return, + call, + call_indirect, + drop, + select, + br_table, + } = self; + block.is_zero() + && op_loop.is_zero() + && op_if.is_zero() + && op_else.is_zero() + && end.is_zero() + && br.is_zero() + && br_if.is_zero() + && op_return.is_zero() + && call.is_zero() + && call_indirect.is_zero() + && drop.is_zero() + && select.is_zero() + && br_table.is_zero() + } +} + +/// Definition of a cost table for Wasm opcodes. +/// +/// This is taken (partially) from parity-ethereum. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct OpcodeCosts { + /// Bit operations multiplier. + pub bit: u32, + /// Arithmetic add operations multiplier. + pub add: u32, + /// Mul operations multiplier. + pub mul: u32, + /// Div operations multiplier. + pub div: u32, + /// Memory load operation multiplier. + pub load: u32, + /// Memory store operation multiplier. + pub store: u32, + /// Const operation multiplier. + #[serde(rename = "const")] + pub op_const: u32, + /// Local operations multiplier. + pub local: u32, + /// Global operations multiplier. + pub global: u32, + /// Integer operations multiplier. + pub integer_comparison: u32, + /// Conversion operations multiplier. + pub conversion: u32, + /// Unreachable operation multiplier. + pub unreachable: u32, + /// Nop operation multiplier. + pub nop: u32, + /// Get current memory operation multiplier. + pub current_memory: u32, + /// Grow memory cost, per page (64kb) + pub grow_memory: u32, + /// Control flow operations multiplier. + pub control_flow: ControlFlowCosts, + /// Sign ext operations costs + pub sign: u32, +} + +impl Default for OpcodeCosts { + fn default() -> Self { + OpcodeCosts { + bit: DEFAULT_BIT_COST, + add: DEFAULT_ADD_COST, + mul: DEFAULT_MUL_COST, + div: DEFAULT_DIV_COST, + load: DEFAULT_LOAD_COST, + store: DEFAULT_STORE_COST, + op_const: DEFAULT_CONST_COST, + local: DEFAULT_LOCAL_COST, + global: DEFAULT_GLOBAL_COST, + integer_comparison: DEFAULT_INTEGER_COMPARISON_COST, + conversion: DEFAULT_CONVERSION_COST, + unreachable: DEFAULT_UNREACHABLE_COST, + nop: DEFAULT_NOP_COST, + current_memory: DEFAULT_CURRENT_MEMORY_COST, + grow_memory: DEFAULT_GROW_MEMORY_COST, + control_flow: ControlFlowCosts::default(), + sign: DEFAULT_SIGN_COST, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> OpcodeCosts { + OpcodeCosts { + bit: rng.gen(), + add: rng.gen(), + mul: rng.gen(), + div: rng.gen(), + load: rng.gen(), + store: rng.gen(), + op_const: rng.gen(), + local: rng.gen(), + global: rng.gen(), + integer_comparison: rng.gen(), + conversion: rng.gen(), + unreachable: rng.gen(), + nop: rng.gen(), + current_memory: rng.gen(), + grow_memory: rng.gen(), + control_flow: rng.gen(), + sign: rng.gen(), + } + } +} + +impl ToBytes for OpcodeCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + let Self { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + sign, + } = self; + + ret.append(&mut bit.to_bytes()?); + ret.append(&mut add.to_bytes()?); + ret.append(&mut mul.to_bytes()?); + ret.append(&mut div.to_bytes()?); + ret.append(&mut load.to_bytes()?); + ret.append(&mut store.to_bytes()?); + ret.append(&mut op_const.to_bytes()?); + ret.append(&mut local.to_bytes()?); + ret.append(&mut global.to_bytes()?); + ret.append(&mut integer_comparison.to_bytes()?); + ret.append(&mut conversion.to_bytes()?); + ret.append(&mut unreachable.to_bytes()?); + ret.append(&mut nop.to_bytes()?); + ret.append(&mut current_memory.to_bytes()?); + ret.append(&mut grow_memory.to_bytes()?); + ret.append(&mut control_flow.to_bytes()?); + ret.append(&mut sign.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + let Self { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + sign, + } = self; + bit.serialized_length() + + add.serialized_length() + + mul.serialized_length() + + div.serialized_length() + + load.serialized_length() + + store.serialized_length() + + op_const.serialized_length() + + local.serialized_length() + + global.serialized_length() + + integer_comparison.serialized_length() + + conversion.serialized_length() + + unreachable.serialized_length() + + nop.serialized_length() + + current_memory.serialized_length() + + grow_memory.serialized_length() + + control_flow.serialized_length() + + sign.serialized_length() + } +} + +impl FromBytes for OpcodeCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bit, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (add, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (mul, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (div, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (load, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (store, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (const_, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (local, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (global, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (integer_comparison, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (conversion, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (unreachable, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (nop, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (current_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (grow_memory, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (control_flow, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + let (sign, bytes): (_, &[u8]) = FromBytes::from_bytes(bytes)?; + + let opcode_costs = OpcodeCosts { + bit, + add, + mul, + div, + load, + store, + op_const: const_, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + sign, + }; + Ok((opcode_costs, bytes)) + } +} + +impl Zero for OpcodeCosts { + fn zero() -> Self { + Self { + bit: 0, + add: 0, + mul: 0, + div: 0, + load: 0, + store: 0, + op_const: 0, + local: 0, + global: 0, + integer_comparison: 0, + conversion: 0, + unreachable: 0, + nop: 0, + current_memory: 0, + grow_memory: 0, + control_flow: ControlFlowCosts::zero(), + sign: 0, + } + } + + fn is_zero(&self) -> bool { + let OpcodeCosts { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + sign, + } = self; + bit.is_zero() + && add.is_zero() + && mul.is_zero() + && div.is_zero() + && load.is_zero() + && store.is_zero() + && op_const.is_zero() + && local.is_zero() + && global.is_zero() + && integer_comparison.is_zero() + && conversion.is_zero() + && unreachable.is_zero() + && nop.is_zero() + && current_memory.is_zero() + && grow_memory.is_zero() + && control_flow.is_zero() + && sign.is_zero() + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use crate::{BrTableCost, ControlFlowCosts, OpcodeCosts}; + + prop_compose! { + pub fn br_table_cost_arb()( + cost in num::u32::ANY, + size_multiplier in num::u32::ANY, + ) -> BrTableCost { + BrTableCost { cost, size_multiplier } + } + } + + prop_compose! { + pub fn control_flow_cost_arb()( + block in num::u32::ANY, + op_loop in num::u32::ANY, + op_if in num::u32::ANY, + op_else in num::u32::ANY, + end in num::u32::ANY, + br in num::u32::ANY, + br_if in num::u32::ANY, + br_table in br_table_cost_arb(), + op_return in num::u32::ANY, + call in num::u32::ANY, + call_indirect in num::u32::ANY, + drop in num::u32::ANY, + select in num::u32::ANY, + ) -> ControlFlowCosts { + ControlFlowCosts { + block, + op_loop, + op_if, + op_else, + end, + br, + br_if, + br_table, + op_return, + call, + call_indirect, + drop, + select + } + } + + } + + prop_compose! { + pub fn opcode_costs_arb()( + bit in num::u32::ANY, + add in num::u32::ANY, + mul in num::u32::ANY, + div in num::u32::ANY, + load in num::u32::ANY, + store in num::u32::ANY, + op_const in num::u32::ANY, + local in num::u32::ANY, + global in num::u32::ANY, + integer_comparison in num::u32::ANY, + conversion in num::u32::ANY, + unreachable in num::u32::ANY, + nop in num::u32::ANY, + current_memory in num::u32::ANY, + grow_memory in num::u32::ANY, + control_flow in control_flow_cost_arb(), + sign in num::u32::ANY, + ) -> OpcodeCosts { + OpcodeCosts { + bit, + add, + mul, + div, + load, + store, + op_const, + local, + global, + integer_comparison, + conversion, + unreachable, + nop, + current_memory, + grow_memory, + control_flow, + sign, + } + } + } +} + +#[cfg(test)] +mod tests { + use proptest::proptest; + + use crate::bytesrepr; + + use super::gens; + + proptest! { + #[test] + fn should_serialize_and_deserialize_with_arbitrary_values( + opcode_costs in gens::opcode_costs_arb() + ) { + bytesrepr::test_serialization_roundtrip(&opcode_costs); + } + } +} diff --git a/types/src/chainspec/vm_config/standard_payment_costs.rs b/types/src/chainspec/vm_config/standard_payment_costs.rs new file mode 100644 index 0000000000..0669865edf --- /dev/null +++ b/types/src/chainspec/vm_config/standard_payment_costs.rs @@ -0,0 +1,75 @@ +//! Costs of the standard payment system contract. +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// Default cost of the `pay` standard payment entry point. +const DEFAULT_PAY_COST: u32 = 10_000; + +/// Description of the costs of calling standard payment entry points. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct StandardPaymentCosts { + /// Cost of calling the `pay` entry point. + pub pay: u32, +} + +impl Default for StandardPaymentCosts { + fn default() -> Self { + Self { + pay: DEFAULT_PAY_COST, + } + } +} + +impl ToBytes for StandardPaymentCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.pay.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.pay.serialized_length() + } +} + +impl FromBytes for StandardPaymentCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (pay, rem) = FromBytes::from_bytes(bytes)?; + Ok((Self { pay }, rem)) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StandardPaymentCosts { + StandardPaymentCosts { pay: rng.gen() } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::{num, prop_compose}; + + use super::StandardPaymentCosts; + + prop_compose! { + pub fn standard_payment_costs_arb()( + pay in num::u32::ANY, + ) -> StandardPaymentCosts { + StandardPaymentCosts { + pay, + } + } + } +} diff --git a/types/src/chainspec/vm_config/storage_costs.rs b/types/src/chainspec/vm_config/storage_costs.rs new file mode 100644 index 0000000000..67df0a563b --- /dev/null +++ b/types/src/chainspec/vm_config/storage_costs.rs @@ -0,0 +1,146 @@ +//! Support for storage costs. +#[cfg(feature = "datasize")] +use datasize::DataSize; +use derive_more::Add; +use num_traits::Zero; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Gas, U512, +}; + +/// Default gas cost per byte stored. +pub const DEFAULT_GAS_PER_BYTE_COST: u32 = 1_117_587; + +/// Represents a cost table for storage costs. +#[derive(Add, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct StorageCosts { + /// Gas charged per byte stored in the global state. + gas_per_byte: u32, +} + +impl StorageCosts { + /// Creates new `StorageCosts`. + pub const fn new(gas_per_byte: u32) -> Self { + Self { gas_per_byte } + } + + /// Returns amount of gas per byte stored. + pub fn gas_per_byte(&self) -> u32 { + self.gas_per_byte + } + + /// Calculates gas cost for storing `bytes`. + pub fn calculate_gas_cost(&self, bytes: usize) -> Gas { + let value = U512::from(self.gas_per_byte) * U512::from(bytes); + Gas::new(value) + } +} + +impl Default for StorageCosts { + fn default() -> Self { + Self { + gas_per_byte: DEFAULT_GAS_PER_BYTE_COST, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StorageCosts { + StorageCosts { + gas_per_byte: rng.gen(), + } + } +} + +impl ToBytes for StorageCosts { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.gas_per_byte.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.gas_per_byte.serialized_length() + } +} + +impl FromBytes for StorageCosts { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (gas_per_byte, rem) = FromBytes::from_bytes(bytes)?; + + Ok((StorageCosts { gas_per_byte }, rem)) + } +} + +impl Zero for StorageCosts { + fn zero() -> Self { + StorageCosts { gas_per_byte: 0 } + } + + fn is_zero(&self) -> bool { + self.gas_per_byte.is_zero() + } +} + +#[cfg(test)] +pub mod tests { + use crate::U512; + + use super::*; + use proptest::prelude::*; + + const SMALL_WEIGHT: usize = 123456789; + const LARGE_WEIGHT: usize = usize::MAX; + + #[test] + fn should_calculate_gas_cost() { + let storage_costs = StorageCosts::default(); + + let cost = storage_costs.calculate_gas_cost(SMALL_WEIGHT); + + let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(SMALL_WEIGHT); + assert_eq!(cost, Gas::new(expected_cost)); + } + + #[test] + fn should_calculate_big_gas_cost() { + let storage_costs = StorageCosts::default(); + + let cost = storage_costs.calculate_gas_cost(LARGE_WEIGHT); + + let expected_cost = U512::from(DEFAULT_GAS_PER_BYTE_COST) * U512::from(LARGE_WEIGHT); + assert_eq!(cost, Gas::new(expected_cost)); + } + + proptest! { + #[test] + fn bytesrepr_roundtrip(storage_costs in super::gens::storage_costs_arb()) { + bytesrepr::test_serialization_roundtrip(&storage_costs); + } + } +} + +#[doc(hidden)] +#[cfg(test)] +pub mod gens { + use crate::gens::example_u32_arb; + + use super::StorageCosts; + use proptest::prelude::*; + + pub(super) fn storage_costs_arb() -> impl Strategy { + example_u32_arb().prop_map(StorageCosts::new) + } +} diff --git a/types/src/chainspec/vm_config/system_config.rs b/types/src/chainspec/vm_config/system_config.rs new file mode 100644 index 0000000000..9c5454e95e --- /dev/null +++ b/types/src/chainspec/vm_config/system_config.rs @@ -0,0 +1,215 @@ +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + chainspec::vm_config::{AuctionCosts, HandlePaymentCosts, MintCosts, StandardPaymentCosts}, +}; + +/// Default cost for calls not a non-existent entrypoint. +pub const DEFAULT_NO_SUCH_ENTRYPOINT_COST: u64 = 2_500_000_000; + +/// Definition of costs in the system. +/// +/// This structure contains the costs of all the system contract's entry points and, additionally, +/// it defines a wasmless mint cost. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct SystemConfig { + /// The cost of the calling non-existing system contract entry point. + no_such_entrypoint: u64, + + /// Configuration of auction entrypoint costs. + auction_costs: AuctionCosts, + + /// Configuration of mint entrypoint costs. + mint_costs: MintCosts, + + /// Configuration of handle payment entrypoint costs. + handle_payment_costs: HandlePaymentCosts, + + /// Configuration of standard payment costs. + standard_payment_costs: StandardPaymentCosts, +} + +impl Default for SystemConfig { + /// Implements Default for SystemConfig. + fn default() -> Self { + Self { + no_such_entrypoint: DEFAULT_NO_SUCH_ENTRYPOINT_COST, + auction_costs: Default::default(), + handle_payment_costs: Default::default(), + mint_costs: Default::default(), + standard_payment_costs: Default::default(), + } + } +} + +impl SystemConfig { + /// Creates new system config instance. + pub fn new( + no_such_entrypoint: u64, + auction_costs: AuctionCosts, + mint_costs: MintCosts, + handle_payment_costs: HandlePaymentCosts, + standard_payment_costs: StandardPaymentCosts, + ) -> Self { + Self { + no_such_entrypoint, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + } + } + + /// Returns the cost of calling a non-existing system contract entry point. + pub fn no_such_entrypoint(&self) -> u64 { + self.no_such_entrypoint + } + + /// Returns the costs of executing auction entry points. + pub fn auction_costs(&self) -> &AuctionCosts { + &self.auction_costs + } + + /// Returns the costs of executing mint entry points. + pub fn mint_costs(&self) -> &MintCosts { + &self.mint_costs + } + + /// Sets mint costs. + pub fn with_mint_costs(mut self, mint_costs: MintCosts) -> Self { + self.mint_costs = mint_costs; + self + } + + /// Returns the costs of executing `handle_payment` entry points. + pub fn handle_payment_costs(&self) -> &HandlePaymentCosts { + &self.handle_payment_costs + } + + /// Returns the costs of executing `standard_payment` entry points. + pub fn standard_payment_costs(&self) -> &StandardPaymentCosts { + &self.standard_payment_costs + } +} + +#[cfg(any(feature = "testing", test))] +impl SystemConfig { + /// Generates a random instance using a `TestRng`. + pub fn random(rng: &mut TestRng) -> Self { + // there's a bug in toml...under the hood it uses an i64 when it should use a u64 + // this causes flaky test failures if the random result exceeds i64::MAX + let no_such_entrypoint = rng.gen_range(0..i64::MAX as u64); + let auction_costs = rng.gen(); + let mint_costs = rng.gen(); + let handle_payment_costs = rng.gen(); + let standard_payment_costs = rng.gen(); + + SystemConfig { + no_such_entrypoint, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> SystemConfig { + SystemConfig { + no_such_entrypoint: rng.gen_range(0..i64::MAX) as u64, + auction_costs: rng.gen(), + mint_costs: rng.gen(), + handle_payment_costs: rng.gen(), + standard_payment_costs: rng.gen(), + } + } +} + +impl ToBytes for SystemConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + + ret.append(&mut self.no_such_entrypoint.to_bytes()?); + ret.append(&mut self.auction_costs.to_bytes()?); + ret.append(&mut self.mint_costs.to_bytes()?); + ret.append(&mut self.handle_payment_costs.to_bytes()?); + ret.append(&mut self.standard_payment_costs.to_bytes()?); + + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.no_such_entrypoint.serialized_length() + + self.auction_costs.serialized_length() + + self.mint_costs.serialized_length() + + self.handle_payment_costs.serialized_length() + + self.standard_payment_costs.serialized_length() + } +} + +impl FromBytes for SystemConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (no_such_entrypoint, rem) = FromBytes::from_bytes(bytes)?; + let (auction_costs, rem) = FromBytes::from_bytes(rem)?; + let (mint_costs, rem) = FromBytes::from_bytes(rem)?; + let (handle_payment_costs, rem) = FromBytes::from_bytes(rem)?; + let (standard_payment_costs, rem) = FromBytes::from_bytes(rem)?; + Ok(( + SystemConfig { + no_such_entrypoint, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + }, + rem, + )) + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::prelude::*; + + use crate::{ + chainspec::vm_config::{ + auction_costs::gens::auction_costs_arb, + handle_payment_costs::gens::handle_payment_costs_arb, mint_costs::gens::mint_costs_arb, + standard_payment_costs::gens::standard_payment_costs_arb, + }, + SystemConfig, + }; + + prop_compose! { + pub fn system_config_arb()( + no_such_entrypoint in 0..i64::MAX as u64, + auction_costs in auction_costs_arb(), + mint_costs in mint_costs_arb(), + handle_payment_costs in handle_payment_costs_arb(), + standard_payment_costs in standard_payment_costs_arb(), + ) -> SystemConfig { + SystemConfig { + no_such_entrypoint, + auction_costs, + mint_costs, + handle_payment_costs, + standard_payment_costs, + } + } + } +} diff --git a/types/src/chainspec/vm_config/wasm_config.rs b/types/src/chainspec/vm_config/wasm_config.rs new file mode 100644 index 0000000000..e4161cad08 --- /dev/null +++ b/types/src/chainspec/vm_config/wasm_config.rs @@ -0,0 +1,154 @@ +//! Configuration of the Wasm execution engine. +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + chainspec::vm_config::MessageLimits, +}; + +use super::{wasm_v1_config::WasmV1Config, wasm_v2_config::WasmV2Config}; + +/// Configuration of the Wasm execution environment. +/// +/// This structure contains various Wasm execution configuration options, such as memory limits, +/// stack limits and costs. +#[derive(Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct WasmConfig { + /// Messages limits. + messages_limits: MessageLimits, + /// Configuration for wasms in v1 execution engine. + v1: WasmV1Config, + /// Configuration for wasms in v2 execution engine. + v2: WasmV2Config, +} + +impl WasmConfig { + /// Creates new Wasm config. + pub const fn new(messages_limits: MessageLimits, v1: WasmV1Config, v2: WasmV2Config) -> Self { + Self { + messages_limits, + v1, + v2, + } + } + + /// Returns the limits config for messages. + pub fn messages_limits(&self) -> MessageLimits { + self.messages_limits + } + + /// Returns the config for v1 wasms. + pub fn v1(&self) -> &WasmV1Config { + &self.v1 + } + + /// Returns mutable v1 reference + #[cfg(any(feature = "testing", test))] + pub fn v1_mut(&mut self) -> &mut WasmV1Config { + &mut self.v1 + } + + /// Returns the config for v2 wasms. + pub fn v2(&self) -> &WasmV2Config { + &self.v2 + } + + /// Returns mutable v2 reference + #[cfg(any(feature = "testing", test))] + pub fn v2_mut(&mut self) -> &mut WasmV2Config { + &mut self.v2 + } +} + +impl ToBytes for WasmConfig { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.messages_limits.to_bytes()?); + ret.append(&mut self.v1.to_bytes()?); + ret.append(&mut self.v2.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.messages_limits.serialized_length() + + self.v1.serialized_length() + + self.v2.serialized_length() + } +} + +impl FromBytes for WasmConfig { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (messages_limits, rem) = FromBytes::from_bytes(bytes)?; + let (v1, rem) = FromBytes::from_bytes(rem)?; + let (v2, rem) = FromBytes::from_bytes(rem)?; + + Ok(( + WasmConfig { + messages_limits, + v1, + v2, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> WasmConfig { + WasmConfig { + messages_limits: rng.gen(), + v1: rng.gen(), + v2: rng.gen(), + } + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use proptest::prelude::*; + proptest! { + #[test] + fn bytesrepr_roundtrip(wasm_config in super::gens::wasm_config_arb()) { + bytesrepr::test_serialization_roundtrip(&wasm_config); + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use proptest::prop_compose; + + use crate::{ + chainspec::vm_config::{ + message_limits::gens::message_limits_arb, wasm_v1_config::gens::wasm_v1_config_arb, + wasm_v2_config::gens::wasm_v2_config_arb, + }, + WasmConfig, + }; + + prop_compose! { + pub fn wasm_config_arb() ( + v1 in wasm_v1_config_arb(), + v2 in wasm_v2_config_arb(), + messages_limits in message_limits_arb(), + ) -> WasmConfig { + WasmConfig { + messages_limits, + v1, + v2 + } + } + } +} diff --git a/types/src/chainspec/vm_config/wasm_v1_config.rs b/types/src/chainspec/vm_config/wasm_v1_config.rs new file mode 100644 index 0000000000..45d4979519 --- /dev/null +++ b/types/src/chainspec/vm_config/wasm_v1_config.rs @@ -0,0 +1,174 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + chainspec::vm_config::{HostFunctionCostsV1, OpcodeCosts}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +/// Default maximum number of pages of the Wasm memory. +pub const DEFAULT_WASM_MAX_MEMORY: u32 = 64; +/// Default maximum stack height. +pub const DEFAULT_MAX_STACK_HEIGHT: u32 = 500; + +/// Configuration of the Wasm execution environment for V1 execution machine. +/// +/// This structure contains various Wasm execution configuration options, such as memory limits, +/// stack limits and costs. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct WasmV1Config { + /// Maximum amount of heap memory (represented in 64kB pages) each contract can use. + max_memory: u32, + /// Max stack height (native WebAssembly stack limiter). + max_stack_height: u32, + /// Wasm opcode costs table. + opcode_costs: OpcodeCosts, + /// Host function costs table. + host_function_costs: HostFunctionCostsV1, +} + +impl WasmV1Config { + /// ctor + pub fn new( + max_memory: u32, + max_stack_height: u32, + opcode_costs: OpcodeCosts, + host_function_costs: HostFunctionCostsV1, + ) -> Self { + WasmV1Config { + max_memory, + max_stack_height, + opcode_costs, + host_function_costs, + } + } + + /// Returns opcode costs. + pub fn opcode_costs(&self) -> OpcodeCosts { + self.opcode_costs + } + + /// Returns host function costs and consumes this object. + pub fn take_host_function_costs(self) -> HostFunctionCostsV1 { + self.host_function_costs + } + + /// Returns max_memory. + pub fn max_memory(&self) -> u32 { + self.max_memory + } + + /// Returns mutable max_memory reference + #[cfg(any(feature = "testing", test))] + pub fn max_memory_mut(&mut self) -> &mut u32 { + &mut self.max_memory + } + + /// Returns mutable max_stack_height reference + #[cfg(any(feature = "testing", test))] + pub fn max_stack_height_mut(&mut self) -> &mut u32 { + &mut self.max_stack_height + } + + /// Returns max_stack_height. + pub fn max_stack_height(&self) -> u32 { + self.max_stack_height + } +} + +impl Default for WasmV1Config { + fn default() -> Self { + Self { + max_memory: DEFAULT_WASM_MAX_MEMORY, + max_stack_height: DEFAULT_MAX_STACK_HEIGHT, + opcode_costs: OpcodeCosts::default(), + host_function_costs: HostFunctionCostsV1::default(), + } + } +} + +impl ToBytes for WasmV1Config { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.max_memory.to_bytes()?); + ret.append(&mut self.max_stack_height.to_bytes()?); + ret.append(&mut self.opcode_costs.to_bytes()?); + ret.append(&mut self.host_function_costs.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.max_memory.serialized_length() + + self.max_stack_height.serialized_length() + + self.opcode_costs.serialized_length() + + self.host_function_costs.serialized_length() + } +} + +impl FromBytes for WasmV1Config { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_memory, rem) = FromBytes::from_bytes(bytes)?; + let (max_stack_height, rem) = FromBytes::from_bytes(rem)?; + let (opcode_costs, rem) = FromBytes::from_bytes(rem)?; + let (host_function_costs, rem) = FromBytes::from_bytes(rem)?; + Ok(( + WasmV1Config { + max_memory, + max_stack_height, + opcode_costs, + host_function_costs, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> WasmV1Config { + WasmV1Config { + max_memory: rng.gen(), + max_stack_height: rng.gen(), + opcode_costs: rng.gen(), + host_function_costs: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use crate::{ + chainspec::vm_config::{ + host_function_costs::gens::host_function_costs_arb, + opcode_costs::gens::opcode_costs_arb, + }, + gens::example_u32_arb, + }; + use proptest::prop_compose; + + use super::WasmV1Config; + + prop_compose! { + pub fn wasm_v1_config_arb() ( + max_memory in example_u32_arb(), + max_stack_height in example_u32_arb(), + opcode_costs in opcode_costs_arb(), + host_function_costs in host_function_costs_arb(), + ) -> WasmV1Config { + WasmV1Config { + max_memory, + max_stack_height, + opcode_costs, + host_function_costs, + } + } + } +} diff --git a/types/src/chainspec/vm_config/wasm_v2_config.rs b/types/src/chainspec/vm_config/wasm_v2_config.rs new file mode 100644 index 0000000000..d0379150d3 --- /dev/null +++ b/types/src/chainspec/vm_config/wasm_v2_config.rs @@ -0,0 +1,156 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + chainspec::vm_config::OpcodeCosts, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +use serde::{Deserialize, Serialize}; + +use super::HostFunctionCostsV2; + +/// Default maximum number of pages of the Wasm memory. +pub const DEFAULT_V2_WASM_MAX_MEMORY: u32 = 64; + +/// Configuration of the Wasm execution environment for V2 execution machine. +/// +/// This structure contains various Wasm execution configuration options, such as memory limits and +/// costs. +#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct WasmV2Config { + /// Maximum amount of heap memory each contract can use. + max_memory: u32, + /// Wasm opcode costs table. + opcode_costs: OpcodeCosts, + /// Host function costs table. + host_function_costs: HostFunctionCostsV2, +} + +impl WasmV2Config { + /// ctor + pub fn new( + max_memory: u32, + opcode_costs: OpcodeCosts, + host_function_costs: HostFunctionCostsV2, + ) -> Self { + WasmV2Config { + max_memory, + opcode_costs, + host_function_costs, + } + } + + /// Returns opcode costs. + pub fn opcode_costs(&self) -> OpcodeCosts { + self.opcode_costs + } + + /// Returns a reference to host function costs + pub fn host_function_costs(&self) -> &HostFunctionCostsV2 { + &self.host_function_costs + } + + /// Returns host function costs and consumes this object. + pub fn take_host_function_costs(self) -> HostFunctionCostsV2 { + self.host_function_costs + } + + /// Returns max_memory. + pub fn max_memory(&self) -> u32 { + self.max_memory + } + + /// Returns mutable max_memory reference + #[cfg(any(feature = "testing", test))] + pub fn max_memory_mut(&mut self) -> &mut u32 { + &mut self.max_memory + } +} + +impl Default for WasmV2Config { + fn default() -> Self { + Self { + max_memory: DEFAULT_V2_WASM_MAX_MEMORY, + opcode_costs: OpcodeCosts::default(), + host_function_costs: HostFunctionCostsV2::default(), + } + } +} + +impl ToBytes for WasmV2Config { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + ret.append(&mut self.max_memory.to_bytes()?); + ret.append(&mut self.opcode_costs.to_bytes()?); + ret.append(&mut self.host_function_costs.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.max_memory.serialized_length() + + self.opcode_costs.serialized_length() + + self.host_function_costs.serialized_length() + } +} + +impl FromBytes for WasmV2Config { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (max_memory, rem) = FromBytes::from_bytes(bytes)?; + let (opcode_costs, rem) = FromBytes::from_bytes(rem)?; + let (host_function_costs, rem) = FromBytes::from_bytes(rem)?; + Ok(( + WasmV2Config { + max_memory, + opcode_costs, + host_function_costs, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> WasmV2Config { + WasmV2Config { + max_memory: rng.gen(), + opcode_costs: rng.gen(), + host_function_costs: rng.gen(), + } + } +} + +#[doc(hidden)] +#[cfg(any(feature = "gens", test))] +pub mod gens { + use crate::{ + chainspec::vm_config::{ + host_function_costs_v2::gens::host_function_costs_v2_arb, + opcode_costs::gens::opcode_costs_arb, + }, + gens::example_u32_arb, + }; + use proptest::prop_compose; + + use super::WasmV2Config; + + prop_compose! { + pub fn wasm_v2_config_arb() ( + max_memory in example_u32_arb(), + opcode_costs in opcode_costs_arb(), + host_function_costs in host_function_costs_v2_arb(), + ) -> WasmV2Config { + WasmV2Config { + max_memory, + opcode_costs, + host_function_costs, + } + } + } +} diff --git a/types/src/checksummed_hex.rs b/types/src/checksummed_hex.rs new file mode 100644 index 0000000000..2b7aa19307 --- /dev/null +++ b/types/src/checksummed_hex.rs @@ -0,0 +1,241 @@ +//! Checksummed hex encoding following an [EIP-55][1]-like scheme. +//! +//! [1]: https://eips.ethereum.org/EIPS/eip-55 + +use alloc::vec::Vec; +use core::ops::RangeInclusive; + +use base16; + +use crate::crypto; + +/// The number of input bytes, at or below which [`decode`] will checksum-decode the output. +pub const SMALL_BYTES_COUNT: usize = 75; + +const HEX_CHARS: [char; 22] = [ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', + 'D', 'E', 'F', +]; + +/// Takes a slice of bytes and breaks it up into a vector of *nibbles* (ie, 4-bit values) +/// represented as `u8`s. +fn bytes_to_nibbles<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator + 'a { + input + .as_ref() + .iter() + .flat_map(move |byte| [4, 0].iter().map(move |offset| (byte >> offset) & 0x0f)) +} + +/// Takes a slice of bytes and outputs an infinite cyclic stream of bits for those bytes. +fn bytes_to_bits_cycle(bytes: Vec) -> impl Iterator { + bytes + .into_iter() + .cycle() + .flat_map(move |byte| (0..8usize).map(move |offset| ((byte >> offset) & 0x01) == 0x01)) +} + +/// Returns the bytes encoded as hexadecimal with mixed-case based checksums following a scheme +/// similar to [EIP-55](https://eips.ethereum.org/EIPS/eip-55). +/// +/// Key differences: +/// - Works on any length of data, not just 20-byte addresses +/// - Uses Blake2b hashes rather than Keccak +/// - Uses hash bits rather than nibbles +fn encode_iter<'a, T: 'a + AsRef<[u8]>>(input: &'a T) -> impl Iterator + 'a { + let nibbles = bytes_to_nibbles(input); + let mut hash_bits = bytes_to_bits_cycle(crypto::blake2b(input.as_ref()).to_vec()); + nibbles.map(move |mut nibble| { + // Base 16 numbers greater than 10 are represented by the ascii characters a through f. + if nibble >= 10 && hash_bits.next().unwrap_or(true) { + // We are using nibble to index HEX_CHARS, so adding 6 to nibble gives us the index + // of the uppercase character. HEX_CHARS[10] == 'a', HEX_CHARS[16] == 'A'. + nibble += 6; + } + HEX_CHARS[nibble as usize] + }) +} + +/// Returns true if all chars in a string are uppercase or lowercase. +/// Returns false if the string is mixed case or if there are no alphabetic chars. +fn string_is_same_case>(s: T) -> bool { + const LOWER_RANGE: RangeInclusive = b'a'..=b'f'; + const UPPER_RANGE: RangeInclusive = b'A'..=b'F'; + + let mut chars = s + .as_ref() + .iter() + .filter(|c| LOWER_RANGE.contains(c) || UPPER_RANGE.contains(c)); + + match chars.next() { + Some(first) => { + let is_upper = UPPER_RANGE.contains(first); + chars.all(|c| UPPER_RANGE.contains(c) == is_upper) + } + None => { + // String has no actual characters. + true + } + } +} + +/// Decodes a mixed-case hexadecimal string, verifying that it conforms to the checksum scheme +/// similar to scheme in [EIP-55][1]. +/// +/// Key differences: +/// - Works on any length of (decoded) data up to `SMALL_BYTES_COUNT`, not just 20-byte addresses +/// - Uses Blake2b hashes rather than Keccak +/// - Uses hash bits rather than nibbles +/// +/// For backward compatibility: if the hex string is all uppercase or all lowercase, the check is +/// skipped. +/// +/// [1]: https://eips.ethereum.org/EIPS/eip-55 +pub fn decode>(input: T) -> Result, base16::DecodeError> { + let bytes = base16::decode(input.as_ref())?; + + // If the string was not small or not mixed case, don't verify the checksum. + if bytes.len() > SMALL_BYTES_COUNT || string_is_same_case(input.as_ref()) { + return Ok(bytes); + } + + encode_iter(&bytes) + .zip(input.as_ref().iter()) + .enumerate() + .try_for_each(|(index, (expected_case_hex_char, &input_hex_char))| { + if expected_case_hex_char as u8 == input_hex_char { + Ok(()) + } else { + Err(base16::DecodeError::InvalidByte { + index, + byte: expected_case_hex_char as u8, + }) + } + })?; + Ok(bytes) +} + +#[cfg(test)] +mod tests { + use alloc::string::String; + + use proptest::{ + collection::vec, + prelude::{any, prop_assert, prop_assert_eq}, + }; + use proptest_attr_macro::proptest; + + use super::*; + + #[test] + fn should_decode_empty_input() { + let input = String::new(); + let actual = decode(input).unwrap(); + assert!(actual.is_empty()); + } + + #[test] + fn string_is_same_case_true_when_same_case() { + let input = "aaaaaaaaaaa"; + assert!(string_is_same_case(input)); + + let input = "AAAAAAAAAAA"; + assert!(string_is_same_case(input)); + } + + #[test] + fn string_is_same_case_false_when_mixed_case() { + let input = "aAaAaAaAaAa"; + assert!(!string_is_same_case(input)); + } + + #[test] + fn string_is_same_case_no_alphabetic_chars_in_string() { + let input = "424242424242"; + assert!(string_is_same_case(input)); + } + + #[test] + fn should_checksum_decode_only_if_small() { + let input = [255; SMALL_BYTES_COUNT]; + let small_encoded: String = encode_iter(&input).collect(); + assert_eq!(input.to_vec(), decode(&small_encoded).unwrap()); + + assert!(decode("A1a2").is_err()); + + let large_encoded = format!("A1{}", small_encoded); + assert!(decode(large_encoded).is_ok()); + } + + #[proptest] + fn hex_roundtrip(input: Vec) { + prop_assert_eq!( + input.clone(), + decode(encode_iter(&input).collect::()).expect("Failed to decode input.") + ); + } + + proptest::proptest! { + #[test] + fn should_fail_on_invalid_checksum(input in vec(any::(), 0..75)) { + let encoded: String = encode_iter(&input).collect(); + + // Swap the case of the first letter in the checksum hex-encoded value. + let mut expected_error = None; + let mutated: String = encoded + .char_indices() + .map(|(index, mut c)| { + if expected_error.is_some() || c.is_ascii_digit() { + return c; + } + expected_error = Some(base16::DecodeError::InvalidByte { + index, + byte: c as u8, + }); + if c.is_ascii_uppercase() { + c.make_ascii_lowercase(); + } else { + c.make_ascii_uppercase(); + } + c + }) + .collect(); + + // If the encoded form is now all the same case or digits, just return. + if string_is_same_case(&mutated) { + return Ok(()); + } + + // Assert we can still decode to original input using `base16::decode`. + prop_assert_eq!( + input, + base16::decode(&mutated).expect("Failed to decode input.") + ); + + // Assert decoding using `checksummed_hex::decode` returns the expected error. + prop_assert_eq!(expected_error.unwrap(), decode(&mutated).unwrap_err()) + } + } + + #[proptest] + fn hex_roundtrip_sanity(input: Vec) { + prop_assert!(decode(encode_iter(&input).collect::()).is_ok()) + } + + #[proptest] + fn is_same_case_uppercase(input: String) { + let input = input.to_uppercase(); + prop_assert!(string_is_same_case(input)); + } + + #[proptest] + fn is_same_case_lowercase(input: String) { + let input = input.to_lowercase(); + prop_assert!(string_is_same_case(input)); + } + + #[proptest] + fn is_not_same_case(input: String) { + let input = format!("aA{}", input); + prop_assert!(!string_is_same_case(input)); + } +} diff --git a/types/src/cl_type.rs b/types/src/cl_type.rs index a1c87ae36b..55abaee605 100644 --- a/types/src/cl_type.rs +++ b/types/src/cl_type.rs @@ -1,16 +1,15 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::{ boxed::Box, - collections::{BTreeMap, VecDeque}, + collections::{BTreeMap, BTreeSet, VecDeque}, string::String, vec::Vec, }; -use core::mem; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; use num_rational::Ratio; -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -19,6 +18,9 @@ use crate::{ Key, URef, U128, U256, U512, }; +// This must be less than 300 in order to avoid a stack overflow when deserializing. +pub(crate) const CL_TYPE_RECURSION_DEPTH: u8 = 50; + const CL_TYPE_TAG_BOOL: u8 = 0; const CL_TYPE_TAG_I32: u8 = 1; const CL_TYPE_TAG_I64: u8 = 2; @@ -47,7 +49,8 @@ const CL_TYPE_TAG_PUBLIC_KEY: u8 = 22; /// /// Provides a description of the underlying data type of a [`CLValue`](crate::CLValue). #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub enum CLType { /// `bool` primitive. @@ -79,25 +82,32 @@ pub enum CLType { /// [`PublicKey`](crate::PublicKey) system type. PublicKey, /// `Option` of a `CLType`. + #[cfg_attr(feature = "datasize", data_size(skip))] Option(Box), /// Variable-length list of a single `CLType` (comparable to a `Vec`). + #[cfg_attr(feature = "datasize", data_size(skip))] List(Box), /// Fixed-length list of a single `CLType` (comparable to a Rust array). ByteArray(u32), /// `Result` with `Ok` and `Err` variants of `CLType`s. #[allow(missing_docs)] // generated docs are explicit enough. + #[cfg_attr(feature = "datasize", data_size(skip))] Result { ok: Box, err: Box }, /// Map with keys of a single `CLType` and values of a single `CLType`. #[allow(missing_docs)] // generated docs are explicit enough. + #[cfg_attr(feature = "datasize", data_size(skip))] Map { key: Box, value: Box, }, /// 1-ary tuple of a `CLType`. + #[cfg_attr(feature = "datasize", data_size(skip))] Tuple1([Box; 1]), /// 2-ary tuple of `CLType`s. + #[cfg_attr(feature = "datasize", data_size(skip))] Tuple2([Box; 2]), /// 3-ary tuple of `CLType`s. + #[cfg_attr(feature = "datasize", data_size(skip))] Tuple3([Box; 3]), /// Unspecified type. Any, @@ -106,7 +116,7 @@ pub enum CLType { impl CLType { /// The `len()` of the `Vec` resulting from `self.to_bytes()`. pub fn serialized_length(&self) -> usize { - mem::size_of::() + size_of::() + match self { CLType::Bool | CLType::I32 @@ -132,6 +142,19 @@ impl CLType { CLType::Tuple3(cl_type_array) => serialized_length_of_cl_tuple_type(cl_type_array), } } + + /// Returns `true` if the [`CLType`] is [`Option`]. + pub fn is_option(&self) -> bool { + matches!(self, Self::Option(..)) + } + + /// Creates a `CLType::Map`. + pub fn map(key: CLType, value: CLType) -> Self { + CLType::Map { + key: Box::new(key), + value: Box::new(value), + } + } } /// Returns the `CLType` describing a "named key" on the system, i.e. a `(String, Key)`. @@ -193,89 +216,126 @@ impl CLType { } } -#[allow(clippy::cognitive_complexity)] +impl Display for CLType { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + CLType::Bool => write!(formatter, "bool"), + CLType::I32 => write!(formatter, "i32"), + CLType::I64 => write!(formatter, "i64"), + CLType::U8 => write!(formatter, "u8"), + CLType::U32 => write!(formatter, "u32"), + CLType::U64 => write!(formatter, "u64"), + CLType::U128 => write!(formatter, "u128"), + CLType::U256 => write!(formatter, "u256"), + CLType::U512 => write!(formatter, "u512"), + CLType::Unit => write!(formatter, "unit"), + CLType::String => write!(formatter, "string"), + CLType::Key => write!(formatter, "key"), + CLType::URef => write!(formatter, "uref"), + CLType::PublicKey => write!(formatter, "public-key"), + CLType::Option(t) => write!(formatter, "option<{t}>"), + CLType::List(t) => write!(formatter, "list<{t}>"), + CLType::ByteArray(len) => write!(formatter, "byte-array[{len}]"), + CLType::Result { ok, err } => write!(formatter, "result<{ok}, {err}>"), + CLType::Map { key, value } => write!(formatter, "map<{key}, {value}>"), + CLType::Tuple1([t1]) => write!(formatter, "({t1},)"), + CLType::Tuple2([t1, t2]) => write!(formatter, "({t1}, {t2})"), + CLType::Tuple3([t1, t2, t3]) => write!(formatter, "({t1}, {t2}, {t3})"), + CLType::Any => write!(formatter, "any"), + } + } +} + impl FromBytes for CLType { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; - match tag { - CL_TYPE_TAG_BOOL => Ok((CLType::Bool, remainder)), - CL_TYPE_TAG_I32 => Ok((CLType::I32, remainder)), - CL_TYPE_TAG_I64 => Ok((CLType::I64, remainder)), - CL_TYPE_TAG_U8 => Ok((CLType::U8, remainder)), - CL_TYPE_TAG_U32 => Ok((CLType::U32, remainder)), - CL_TYPE_TAG_U64 => Ok((CLType::U64, remainder)), - CL_TYPE_TAG_U128 => Ok((CLType::U128, remainder)), - CL_TYPE_TAG_U256 => Ok((CLType::U256, remainder)), - CL_TYPE_TAG_U512 => Ok((CLType::U512, remainder)), - CL_TYPE_TAG_UNIT => Ok((CLType::Unit, remainder)), - CL_TYPE_TAG_STRING => Ok((CLType::String, remainder)), - CL_TYPE_TAG_KEY => Ok((CLType::Key, remainder)), - CL_TYPE_TAG_UREF => Ok((CLType::URef, remainder)), - CL_TYPE_TAG_PUBLIC_KEY => Ok((CLType::PublicKey, remainder)), - CL_TYPE_TAG_OPTION => { - let (inner_type, remainder) = CLType::from_bytes(remainder)?; - let cl_type = CLType::Option(Box::new(inner_type)); - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_LIST => { - let (inner_type, remainder) = CLType::from_bytes(remainder)?; - let cl_type = CLType::List(Box::new(inner_type)); - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_BYTE_ARRAY => { - let (len, remainder) = u32::from_bytes(remainder)?; - let cl_type = CLType::ByteArray(len); - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_RESULT => { - let (ok_type, remainder) = CLType::from_bytes(remainder)?; - let (err_type, remainder) = CLType::from_bytes(remainder)?; - let cl_type = CLType::Result { - ok: Box::new(ok_type), - err: Box::new(err_type), - }; - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_MAP => { - let (key_type, remainder) = CLType::from_bytes(remainder)?; - let (value_type, remainder) = CLType::from_bytes(remainder)?; - let cl_type = CLType::Map { - key: Box::new(key_type), - value: Box::new(value_type), - }; - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_TUPLE1 => { - let (mut inner_types, remainder) = parse_cl_tuple_types(1, remainder)?; - // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 1 - // element - let cl_type = CLType::Tuple1([inner_types.pop_front().unwrap()]); - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_TUPLE2 => { - let (mut inner_types, remainder) = parse_cl_tuple_types(2, remainder)?; - // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 2 - // elements - let cl_type = CLType::Tuple2([ - inner_types.pop_front().unwrap(), - inner_types.pop_front().unwrap(), - ]); - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_TUPLE3 => { - let (mut inner_types, remainder) = parse_cl_tuple_types(3, remainder)?; - // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 3 - // elements - let cl_type = CLType::Tuple3([ - inner_types.pop_front().unwrap(), - inner_types.pop_front().unwrap(), - inner_types.pop_front().unwrap(), - ]); - Ok((cl_type, remainder)) - } - CL_TYPE_TAG_ANY => Ok((CLType::Any, remainder)), - _ => Err(bytesrepr::Error::Formatting), + depth_limited_from_bytes(0, bytes) + } +} + +fn depth_limited_from_bytes(depth: u8, bytes: &[u8]) -> Result<(CLType, &[u8]), bytesrepr::Error> { + if depth >= CL_TYPE_RECURSION_DEPTH { + return Err(bytesrepr::Error::ExceededRecursionDepth); + } + let depth = depth + 1; + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + CL_TYPE_TAG_BOOL => Ok((CLType::Bool, remainder)), + CL_TYPE_TAG_I32 => Ok((CLType::I32, remainder)), + CL_TYPE_TAG_I64 => Ok((CLType::I64, remainder)), + CL_TYPE_TAG_U8 => Ok((CLType::U8, remainder)), + CL_TYPE_TAG_U32 => Ok((CLType::U32, remainder)), + CL_TYPE_TAG_U64 => Ok((CLType::U64, remainder)), + CL_TYPE_TAG_U128 => Ok((CLType::U128, remainder)), + CL_TYPE_TAG_U256 => Ok((CLType::U256, remainder)), + CL_TYPE_TAG_U512 => Ok((CLType::U512, remainder)), + CL_TYPE_TAG_UNIT => Ok((CLType::Unit, remainder)), + CL_TYPE_TAG_STRING => Ok((CLType::String, remainder)), + CL_TYPE_TAG_KEY => Ok((CLType::Key, remainder)), + CL_TYPE_TAG_UREF => Ok((CLType::URef, remainder)), + CL_TYPE_TAG_PUBLIC_KEY => Ok((CLType::PublicKey, remainder)), + CL_TYPE_TAG_OPTION => { + let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Option(Box::new(inner_type)); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_LIST => { + let (inner_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::List(Box::new(inner_type)); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_BYTE_ARRAY => { + let (len, remainder) = u32::from_bytes(remainder)?; + let cl_type = CLType::ByteArray(len); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_RESULT => { + let (ok_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let (err_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Result { + ok: Box::new(ok_type), + err: Box::new(err_type), + }; + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_MAP => { + let (key_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let (value_type, remainder) = depth_limited_from_bytes(depth, remainder)?; + let cl_type = CLType::Map { + key: Box::new(key_type), + value: Box::new(value_type), + }; + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE1 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 1, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 1 + // element + let cl_type = CLType::Tuple1([inner_types.pop_front().unwrap()]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_TUPLE2 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 2, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 2 + // elements + let cl_type = CLType::Tuple2([ + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + ]); + Ok((cl_type, remainder)) } + CL_TYPE_TAG_TUPLE3 => { + let (mut inner_types, remainder) = parse_cl_tuple_types(depth, 3, remainder)?; + // NOTE: Assumed safe as `parse_cl_tuple_types` is expected to have exactly 3 + // elements + let cl_type = CLType::Tuple3([ + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + inner_types.pop_front().unwrap(), + ]); + Ok((cl_type, remainder)) + } + CL_TYPE_TAG_ANY => Ok((CLType::Any, remainder)), + _ => Err(bytesrepr::Error::Formatting), } } @@ -292,12 +352,13 @@ fn serialize_cl_tuple_type<'a, T: IntoIterator>>( } fn parse_cl_tuple_types( + depth: u8, count: usize, mut bytes: &[u8], ) -> Result<(VecDeque>, &[u8]), bytesrepr::Error> { let mut cl_types = VecDeque::with_capacity(count); for _ in 0..count { - let (cl_type, remainder) = CLType::from_bytes(bytes)?; + let (cl_type, remainder) = depth_limited_from_bytes(depth, bytes)?; cl_types.push_back(Box::new(cl_type)); bytes = remainder; } @@ -416,24 +477,22 @@ impl CLTyped for Vec { } } -macro_rules! impl_cl_typed_for_array { - ($($N:literal)+) => { - $( - impl CLTyped for [u8; $N] { - fn cl_type() -> CLType { - CLType::ByteArray($N as u32) - } - } - )+ +impl CLTyped for BTreeSet { + fn cl_type() -> CLType { + CLType::List(Box::new(T::cl_type())) } } -impl_cl_typed_for_array! { - 0 1 2 3 4 5 6 7 8 9 - 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 - 64 128 256 512 +impl CLTyped for &T { + fn cl_type() -> CLType { + T::cl_type() + } +} + +impl CLTyped for [u8; COUNT] { + fn cl_type() -> CLType { + CLType::ByteArray(COUNT as u32) + } } impl CLTyped for Result { @@ -482,7 +541,7 @@ impl CLTyped for Ratio { #[cfg(test)] mod tests { - use std::{fmt::Debug, string::ToString}; + use std::{fmt::Debug, iter, string::ToString}; use super::*; use crate::{ @@ -490,15 +549,15 @@ mod tests { AccessRights, CLValue, }; - fn round_trip(value: &T) { - let cl_value = CLValue::from_t(value.clone()).unwrap(); + fn round_trip(value: &T) { + let cl_value = CLValue::from_t(value).unwrap(); let serialized_cl_value = cl_value.to_bytes().unwrap(); assert_eq!(serialized_cl_value.len(), cl_value.serialized_length()); let parsed_cl_value: CLValue = bytesrepr::deserialize(serialized_cl_value).unwrap(); assert_eq!(cl_value, parsed_cl_value); - let parsed_value = CLValue::into_t(cl_value).unwrap(); + let parsed_value = cl_value.into_t().unwrap(); assert_eq!(*value, parsed_value); } @@ -677,6 +736,45 @@ mod tests { round_trip(&x); } + #[test] + fn parsing_nested_tuple_1_cltype_should_not_stack_overflow() { + // The bytesrepr representation of the CLType for a + // nested (((...((),),...),),) looks like: + // [18, 18, 18, ..., 9] + + for i in 1..1000 { + let bytes = iter::repeat(CL_TYPE_TAG_TUPLE1) + .take(i) + .chain(iter::once(CL_TYPE_TAG_UNIT)) + .collect(); + match bytesrepr::deserialize(bytes) { + Ok(parsed_cltype) => assert!(matches!(parsed_cltype, CLType::Tuple1(_))), + Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth), + } + } + } + + #[test] + fn parsing_nested_tuple_1_value_should_not_stack_overflow() { + // The bytesrepr representation of the CLValue for a + // nested (((...((),),...),),) looks like: + // [0, 0, 0, 0, 18, 18, 18, ..., 18, 9] + + for i in 1..1000 { + let bytes = iter::repeat(0) + .take(4) + .chain(iter::repeat(CL_TYPE_TAG_TUPLE1).take(i)) + .chain(iter::once(CL_TYPE_TAG_UNIT)) + .collect(); + match bytesrepr::deserialize::(bytes) { + Ok(parsed_clvalue) => { + assert!(matches!(parsed_clvalue.cl_type(), CLType::Tuple1(_))) + } + Err(error) => assert_eq!(error, bytesrepr::Error::ExceededRecursionDepth), + } + } + } + #[test] fn any_should_work() { #[derive(PartialEq, Debug, Clone)] @@ -708,4 +806,9 @@ mod tests { let any = Any("Any test".to_string()); round_trip(&any); } + + #[test] + fn should_have_cltype_of_ref_to_cltyped() { + assert_eq!(>::cl_type(), >::cl_type()) + } } diff --git a/types/src/cl_value.rs b/types/src/cl_value.rs index cfe3142928..059e2f7b46 100644 --- a/types/src/cl_value.rs +++ b/types/src/cl_value.rs @@ -1,26 +1,33 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::{string::String, vec::Vec}; -use core::fmt; +use core::fmt::{self, Display, Formatter}; +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, CLType, CLTyped, +}; +#[cfg(feature = "datasize")] use datasize::DataSize; -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] use serde_json::Value; -#[cfg(feature = "std")] -use thiserror::Error; - -use crate::{ - bytesrepr::{self, Bytes, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, - CLType, CLTyped, -}; +mod checksum_registry; +mod dictionary; +#[cfg(feature = "json-schema")] +pub use jsonrepr::cl_value_to_json; +#[cfg(feature = "json-schema")] mod jsonrepr; +mod system_entity_registry; + +pub use checksum_registry::ChecksumRegistry; +pub use dictionary::{handle_stored_dictionary_value, DictionaryValue}; +pub use system_entity_registry::SystemHashRegistry; /// Error while converting a [`CLValue`] into a given type. -#[derive(PartialEq, Eq, Clone, Debug)] +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct CLTypeMismatch { /// The [`CLType`] into which the `CLValue` was being converted. pub expected: CLType, @@ -29,8 +36,8 @@ pub struct CLTypeMismatch { pub found: CLType, } -impl fmt::Display for CLTypeMismatch { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { +impl Display for CLTypeMismatch { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!( f, "Expected {:?} but found {:?}.", @@ -40,14 +47,12 @@ impl fmt::Display for CLTypeMismatch { } /// Error relating to [`CLValue`] operations. -#[derive(PartialEq, Eq, Clone, Debug)] -#[cfg_attr(feature = "std", derive(Error))] +#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub enum CLValueError { /// An error while serializing or deserializing the underlying data. - #[cfg_attr(feature = "std", error("CLValue error: {}", _0))] Serialization(bytesrepr::Error), /// A type mismatch while trying to convert a [`CLValue`] into a given type. - #[cfg_attr(feature = "std", error("Type mismatch: {}", _0))] Type(CLTypeMismatch), } @@ -57,13 +62,22 @@ impl From for CLValueError { } } +impl Display for CLValueError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + CLValueError::Serialization(error) => write!(formatter, "CLValue error: {}", error), + CLValueError::Type(error) => write!(formatter, "Type mismatch: {}", error), + } + } +} + /// A Casper value, i.e. a value which can be stored and manipulated by smart contracts. /// /// It holds the underlying data as a type-erased, serialized `Vec` and also holds the /// [`CLType`] of the underlying data as a separate member. -#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug, DataSize)] +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct CLValue { - #[data_size(skip)] cl_type: CLType, bytes: Bytes, } @@ -79,12 +93,26 @@ impl CLValue { }) } + /// Converts `self` into its underlying type. + pub fn to_t(&self) -> Result { + let expected = T::cl_type(); + + if self.cl_type == expected { + Ok(bytesrepr::deserialize_from_slice(&self.bytes)?) + } else { + Err(CLValueError::Type(CLTypeMismatch { + expected, + found: self.cl_type.clone(), + })) + } + } + /// Consumes and converts `self` back into its underlying type. pub fn into_t(self) -> Result { let expected = T::cl_type(); if self.cl_type == expected { - Ok(bytesrepr::deserialize(self.bytes.into())?) + Ok(bytesrepr::deserialize_from_slice(&self.bytes)?) } else { Err(CLValueError::Type(CLTypeMismatch { expected, @@ -147,6 +175,12 @@ impl ToBytes for CLValue { fn serialized_length(&self) -> usize { self.bytes.serialized_length() + self.cl_type.serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bytes.write_bytes(writer)?; + self.cl_type.append_bytes(writer)?; + Ok(()) + } } impl FromBytes for CLValue { @@ -159,7 +193,7 @@ impl FromBytes for CLValue { } /// We need to implement `JsonSchema` for `CLValue` as though it is a `CLValueJson`. -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for CLValue { fn schema_name() -> String { "CLValue".to_string() @@ -179,11 +213,12 @@ impl JsonSchema for CLValue { /// CLValue is encoded to JSON, and can always be set to null if preferred. #[derive(Serialize, Deserialize)] #[serde(deny_unknown_fields)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[cfg_attr(feature = "std", schemars(rename = "CLValue"))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "json-schema", schemars(rename = "CLValue"))] struct CLValueJson { cl_type: CLType, bytes: String, + #[cfg(feature = "json-schema")] parsed: Option, } @@ -193,7 +228,8 @@ impl Serialize for CLValue { CLValueJson { cl_type: self.cl_type.clone(), bytes: base16::encode_lower(&self.bytes), - parsed: jsonrepr::cl_value_to_json(&self), + #[cfg(feature = "json-schema")] + parsed: jsonrepr::cl_value_to_json(self), } .serialize(serializer) } else { @@ -208,7 +244,7 @@ impl<'de> Deserialize<'de> for CLValue { let json = CLValueJson::deserialize(deserializer)?; ( json.cl_type.clone(), - base16::decode(&json.bytes).map_err(D::Error::custom)?, + checksummed_hex::decode(&json.bytes).map_err(D::Error::custom)?, ) } else { <(CLType, Vec)>::deserialize(deserializer)? @@ -224,18 +260,18 @@ impl<'de> Deserialize<'de> for CLValue { mod tests { use alloc::string::ToString; - #[cfg(feature = "std")] + #[cfg(feature = "json-schema")] use schemars::schema_for; use super::*; use crate::{ account::{AccountHash, ACCOUNT_HASH_LENGTH}, key::KEY_HASH_LENGTH, - AccessRights, DeployHash, Key, PublicKey, TransferAddr, URef, DEPLOY_HASH_LENGTH, - TRANSFER_ADDR_LENGTH, U128, U256, U512, UREF_ADDR_LENGTH, + AccessRights, DeployHash, Digest, Key, PublicKey, TransferAddr, URef, TRANSFER_ADDR_LENGTH, + U128, U256, U512, UREF_ADDR_LENGTH, }; - #[cfg(feature = "std")] + #[cfg(feature = "json-schema")] #[test] fn json_schema() { let json_clvalue_schema = schema_for!(CLValueJson); @@ -291,23 +327,20 @@ mod tests { #[test] fn i32_cl_value_should_encode_to_json() { - check_to_json( - i32::min_value(), - r#"{"cl_type":"I32","parsed":-2147483648}"#, - ); + check_to_json(i32::MIN, r#"{"cl_type":"I32","parsed":-2147483648}"#); check_to_json(0_i32, r#"{"cl_type":"I32","parsed":0}"#); - check_to_json(i32::max_value(), r#"{"cl_type":"I32","parsed":2147483647}"#); + check_to_json(i32::MAX, r#"{"cl_type":"I32","parsed":2147483647}"#); } #[test] fn i64_cl_value_should_encode_to_json() { check_to_json( - i64::min_value(), + i64::MIN, r#"{"cl_type":"I64","parsed":-9223372036854775808}"#, ); check_to_json(0_i64, r#"{"cl_type":"I64","parsed":0}"#); check_to_json( - i64::max_value(), + i64::MAX, r#"{"cl_type":"I64","parsed":9223372036854775807}"#, ); } @@ -315,20 +348,20 @@ mod tests { #[test] fn u8_cl_value_should_encode_to_json() { check_to_json(0_u8, r#"{"cl_type":"U8","parsed":0}"#); - check_to_json(u8::max_value(), r#"{"cl_type":"U8","parsed":255}"#); + check_to_json(u8::MAX, r#"{"cl_type":"U8","parsed":255}"#); } #[test] fn u32_cl_value_should_encode_to_json() { check_to_json(0_u32, r#"{"cl_type":"U32","parsed":0}"#); - check_to_json(u32::max_value(), r#"{"cl_type":"U32","parsed":4294967295}"#); + check_to_json(u32::MAX, r#"{"cl_type":"U32","parsed":4294967295}"#); } #[test] fn u64_cl_value_should_encode_to_json() { check_to_json(0_u64, r#"{"cl_type":"U64","parsed":0}"#); check_to_json( - u64::max_value(), + u64::MAX, r#"{"cl_type":"U64","parsed":18446744073709551615}"#, ); } @@ -337,7 +370,7 @@ mod tests { fn u128_cl_value_should_encode_to_json() { check_to_json(U128::zero(), r#"{"cl_type":"U128","parsed":"0"}"#); check_to_json( - U128::max_value(), + U128::MAX, r#"{"cl_type":"U128","parsed":"340282366920938463463374607431768211455"}"#, ); } @@ -346,7 +379,7 @@ mod tests { fn u256_cl_value_should_encode_to_json() { check_to_json(U256::zero(), r#"{"cl_type":"U256","parsed":"0"}"#); check_to_json( - U256::max_value(), + U256::MAX, r#"{"cl_type":"U256","parsed":"115792089237316195423570985008687907853269984665640564039457584007913129639935"}"#, ); } @@ -355,7 +388,7 @@ mod tests { fn u512_cl_value_should_encode_to_json() { check_to_json(U512::zero(), r#"{"cl_type":"U512","parsed":"0"}"#); check_to_json( - U512::max_value(), + U512::MAX, r#"{"cl_type":"U512","parsed":"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"}"#, ); } @@ -379,31 +412,31 @@ mod tests { let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH])); check_to_json( key_account, - r#"{"cl_type":"Key","parsed":{"Account":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}}"#, + r#"{"cl_type":"Key","parsed":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}"#, ); let key_hash = Key::Hash([2; KEY_HASH_LENGTH]); check_to_json( key_hash, - r#"{"cl_type":"Key","parsed":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + r#"{"cl_type":"Key","parsed":"hash-0202020202020202020202020202020202020202020202020202020202020202"}"#, ); let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ)); check_to_json( key_uref, - r#"{"cl_type":"Key","parsed":{"URef":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}}"#, + r#"{"cl_type":"Key","parsed":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}"#, ); let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH])); check_to_json( key_transfer, - r#"{"cl_type":"Key","parsed":{"Transfer":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}}"#, + r#"{"cl_type":"Key","parsed":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}"#, ); - let key_deploy_info = Key::DeployInfo(DeployHash::new([5; DEPLOY_HASH_LENGTH])); + let key_deploy_info = Key::DeployInfo(DeployHash::from_raw([5; Digest::LENGTH])); check_to_json( key_deploy_info, - r#"{"cl_type":"Key","parsed":{"DeployInfo":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}}"#, + r#"{"cl_type":"Key","parsed":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}"#, ); } @@ -420,13 +453,13 @@ mod tests { fn public_key_cl_value_should_encode_to_json() { check_to_json( PublicKey::from( - SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), + &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), ), r#"{"cl_type":"PublicKey","parsed":"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c"}"#, ); check_to_json( PublicKey::from( - SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), + &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), ), r#"{"cl_type":"PublicKey","parsed":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}"#, ); @@ -453,12 +486,12 @@ mod tests { #[test] fn i32_cl_value_should_encode_to_json() { check_to_json( - Some(i32::min_value()), + Some(i32::MIN), r#"{"cl_type":{"Option":"I32"},"parsed":-2147483648}"#, ); check_to_json(Some(0_i32), r#"{"cl_type":{"Option":"I32"},"parsed":0}"#); check_to_json( - Some(i32::max_value()), + Some(i32::MAX), r#"{"cl_type":{"Option":"I32"},"parsed":2147483647}"#, ); check_to_json( @@ -470,12 +503,12 @@ mod tests { #[test] fn i64_cl_value_should_encode_to_json() { check_to_json( - Some(i64::min_value()), + Some(i64::MIN), r#"{"cl_type":{"Option":"I64"},"parsed":-9223372036854775808}"#, ); check_to_json(Some(0_i64), r#"{"cl_type":{"Option":"I64"},"parsed":0}"#); check_to_json( - Some(i64::max_value()), + Some(i64::MAX), r#"{"cl_type":{"Option":"I64"},"parsed":9223372036854775807}"#, ); check_to_json( @@ -487,10 +520,7 @@ mod tests { #[test] fn u8_cl_value_should_encode_to_json() { check_to_json(Some(0_u8), r#"{"cl_type":{"Option":"U8"},"parsed":0}"#); - check_to_json( - Some(u8::max_value()), - r#"{"cl_type":{"Option":"U8"},"parsed":255}"#, - ); + check_to_json(Some(u8::MAX), r#"{"cl_type":{"Option":"U8"},"parsed":255}"#); check_to_json( Option::::None, r#"{"cl_type":{"Option":"U8"},"parsed":null}"#, @@ -501,7 +531,7 @@ mod tests { fn u32_cl_value_should_encode_to_json() { check_to_json(Some(0_u32), r#"{"cl_type":{"Option":"U32"},"parsed":0}"#); check_to_json( - Some(u32::max_value()), + Some(u32::MAX), r#"{"cl_type":{"Option":"U32"},"parsed":4294967295}"#, ); check_to_json( @@ -514,7 +544,7 @@ mod tests { fn u64_cl_value_should_encode_to_json() { check_to_json(Some(0_u64), r#"{"cl_type":{"Option":"U64"},"parsed":0}"#); check_to_json( - Some(u64::max_value()), + Some(u64::MAX), r#"{"cl_type":{"Option":"U64"},"parsed":18446744073709551615}"#, ); check_to_json( @@ -530,7 +560,7 @@ mod tests { r#"{"cl_type":{"Option":"U128"},"parsed":"0"}"#, ); check_to_json( - Some(U128::max_value()), + Some(U128::MAX), r#"{"cl_type":{"Option":"U128"},"parsed":"340282366920938463463374607431768211455"}"#, ); check_to_json( @@ -546,7 +576,7 @@ mod tests { r#"{"cl_type":{"Option":"U256"},"parsed":"0"}"#, ); check_to_json( - Some(U256::max_value()), + Some(U256::MAX), r#"{"cl_type":{"Option":"U256"},"parsed":"115792089237316195423570985008687907853269984665640564039457584007913129639935"}"#, ); check_to_json( @@ -562,7 +592,7 @@ mod tests { r#"{"cl_type":{"Option":"U512"},"parsed":"0"}"#, ); check_to_json( - Some(U512::max_value()), + Some(U512::MAX), r#"{"cl_type":{"Option":"U512"},"parsed":"13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095"}"#, ); check_to_json( @@ -601,31 +631,31 @@ mod tests { let key_account = Key::Account(AccountHash::new([1; ACCOUNT_HASH_LENGTH])); check_to_json( Some(key_account), - r#"{"cl_type":{"Option":"Key"},"parsed":{"Account":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}}"#, + r#"{"cl_type":{"Option":"Key"},"parsed":"account-hash-0101010101010101010101010101010101010101010101010101010101010101"}"#, ); let key_hash = Key::Hash([2; KEY_HASH_LENGTH]); check_to_json( Some(key_hash), - r#"{"cl_type":{"Option":"Key"},"parsed":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, + r#"{"cl_type":{"Option":"Key"},"parsed":"hash-0202020202020202020202020202020202020202020202020202020202020202"}"#, ); let key_uref = Key::URef(URef::new([3; UREF_ADDR_LENGTH], AccessRights::READ)); check_to_json( Some(key_uref), - r#"{"cl_type":{"Option":"Key"},"parsed":{"URef":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}}"#, + r#"{"cl_type":{"Option":"Key"},"parsed":"uref-0303030303030303030303030303030303030303030303030303030303030303-001"}"#, ); let key_transfer = Key::Transfer(TransferAddr::new([4; TRANSFER_ADDR_LENGTH])); check_to_json( Some(key_transfer), - r#"{"cl_type":{"Option":"Key"},"parsed":{"Transfer":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}}"#, + r#"{"cl_type":{"Option":"Key"},"parsed":"transfer-0404040404040404040404040404040404040404040404040404040404040404"}"#, ); - let key_deploy_info = Key::DeployInfo(DeployHash::new([5; DEPLOY_HASH_LENGTH])); + let key_deploy_info = Key::DeployInfo(DeployHash::from_raw([5; Digest::LENGTH])); check_to_json( Some(key_deploy_info), - r#"{"cl_type":{"Option":"Key"},"parsed":{"DeployInfo":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}}"#, + r#"{"cl_type":{"Option":"Key"},"parsed":"deploy-0505050505050505050505050505050505050505050505050505050505050505"}"#, ); check_to_json( @@ -651,13 +681,13 @@ mod tests { fn public_key_cl_value_should_encode_to_json() { check_to_json( Some(PublicKey::from( - SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), + &SecretKey::ed25519_from_bytes([7; SecretKey::ED25519_LENGTH]).unwrap(), )), r#"{"cl_type":{"Option":"PublicKey"},"parsed":"01ea4a6c63e29c520abef5507b132ec5f9954776aebebe7b92421eea691446d22c"}"#, ); check_to_json( Some(PublicKey::from( - SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), + &SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(), )), r#"{"cl_type":{"Option":"PublicKey"},"parsed":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}"#, ); @@ -1073,19 +1103,19 @@ mod tests { let key = Key::Hash([2; KEY_HASH_LENGTH]); check_to_json( Result::::Ok(key), - r#"{"cl_type":{"Result":{"ok":"Key","err":"I32"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + r#"{"cl_type":{"Result":{"ok":"Key","err":"I32"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, ); check_to_json( Result::::Ok(key), - r#"{"cl_type":{"Result":{"ok":"Key","err":"U32"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + r#"{"cl_type":{"Result":{"ok":"Key","err":"U32"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, ); check_to_json( Result::::Ok(key), - r#"{"cl_type":{"Result":{"ok":"Key","err":"Unit"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + r#"{"cl_type":{"Result":{"ok":"Key","err":"Unit"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, ); check_to_json( Result::::Ok(key), - r#"{"cl_type":{"Result":{"ok":"Key","err":"String"}},"parsed":{"Ok":{"Hash":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}}"#, + r#"{"cl_type":{"Result":{"ok":"Key","err":"String"}},"parsed":{"Ok":"hash-0202020202020202020202020202020202020202020202020202020202020202"}}"#, ); check_to_json( Result::::Err(-1), @@ -1144,10 +1174,9 @@ mod tests { #[test] fn public_key_cl_value_should_encode_to_json() { - let public_key: PublicKey = - SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]) - .unwrap() - .into(); + let secret_key = + SecretKey::secp256k1_from_bytes([8; SecretKey::SECP256K1_LENGTH]).unwrap(); + let public_key = PublicKey::from(&secret_key); check_to_json( Result::::Ok(public_key.clone()), r#"{"cl_type":{"Result":{"ok":"PublicKey","err":"I32"}},"parsed":{"Ok":"0203f991f944d1e1954a7fc8b9bf62e0d78f015f4c07762d505e20e6c45260a3661b"}}"#, diff --git a/types/src/cl_value/checksum_registry.rs b/types/src/cl_value/checksum_registry.rs new file mode 100644 index 0000000000..290b623814 --- /dev/null +++ b/types/src/cl_value/checksum_registry.rs @@ -0,0 +1,70 @@ +//! The registry of checksums. + +use alloc::{ + collections::BTreeMap, + string::{String, ToString}, + vec::Vec, +}; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, Digest, +}; + +/// The checksum registry. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug, Default)] +pub struct ChecksumRegistry(BTreeMap); + +impl ChecksumRegistry { + /// Returns a new `ChecksumRegistry`. + pub fn new() -> Self { + ChecksumRegistry(BTreeMap::new()) + } + + /// Inserts a checksum into the registry. + pub fn insert(&mut self, checksum_name: &str, checksum: Digest) { + self.0.insert(checksum_name.to_string(), checksum); + } + + /// Gets a checksum from the registry. + pub fn get(&self, checksum_name: &str) -> Option<&Digest> { + self.0.get(checksum_name) + } +} + +impl ToBytes for ChecksumRegistry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for ChecksumRegistry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = BTreeMap::from_bytes(bytes)?; + Ok((ChecksumRegistry(inner), remainder)) + } +} + +impl CLTyped for ChecksumRegistry { + fn cl_type() -> CLType { + BTreeMap::::cl_type() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut checksum_registry = ChecksumRegistry::new(); + checksum_registry.insert("a", Digest::hash([9; 100])); + bytesrepr::test_serialization_roundtrip(&checksum_registry); + } +} diff --git a/types/src/cl_value/dictionary.rs b/types/src/cl_value/dictionary.rs new file mode 100644 index 0000000000..09a6f1a223 --- /dev/null +++ b/types/src/cl_value/dictionary.rs @@ -0,0 +1,103 @@ +use alloc::vec::Vec; + +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes}, + CLType, CLTyped, CLValue, CLValueError, Key, StoredValue, +}; + +/// Wraps a [`CLValue`] for storage in a dictionary. +/// +/// Note that we include the dictionary [`super::super::URef`] and key used to create the +/// `Key::Dictionary` under which this value is stored. This is to allow migration to a different +/// key representation in the future. +#[derive(Clone)] +pub struct DictionaryValue { + /// Actual [`CLValue`] written to global state. + cl_value: CLValue, + /// [`URef`] seed bytes. + seed_uref_addr: Bytes, + /// Original key bytes. + dictionary_item_key_bytes: Bytes, +} + +impl DictionaryValue { + /// Constructor. + pub fn new( + cl_value: CLValue, + seed_uref_addr: Vec, + dictionary_item_key_bytes: Vec, + ) -> Self { + Self { + cl_value, + seed_uref_addr: seed_uref_addr.into(), + dictionary_item_key_bytes: dictionary_item_key_bytes.into(), + } + } + + /// Get a reference to the [`DictionaryValue`]'s wrapper's cl value. + pub fn into_cl_value(self) -> CLValue { + self.cl_value + } +} + +impl CLTyped for DictionaryValue { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl FromBytes for DictionaryValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (cl_value, remainder) = FromBytes::from_bytes(bytes)?; + let (uref_addr, remainder) = FromBytes::from_bytes(remainder)?; + let (key_bytes, remainder) = FromBytes::from_bytes(remainder)?; + let dictionary_value = DictionaryValue { + cl_value, + seed_uref_addr: uref_addr, + dictionary_item_key_bytes: key_bytes, + }; + Ok((dictionary_value, remainder)) + } +} + +impl ToBytes for DictionaryValue { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.cl_value.to_bytes()?); + buffer.extend(self.seed_uref_addr.to_bytes()?); + buffer.extend(self.dictionary_item_key_bytes.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.cl_value.serialized_length() + + self.seed_uref_addr.serialized_length() + + self.dictionary_item_key_bytes.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.cl_value.write_bytes(writer)?; + self.seed_uref_addr.write_bytes(writer)?; + self.dictionary_item_key_bytes.write_bytes(writer)?; + Ok(()) + } +} + +/// Inspects `key` argument whether it contains a dictionary variant, and checks if `stored_value` +/// contains a [`CLValue`], then it will attempt a conversion from the held clvalue into +/// [`DictionaryValue`] and returns the real [`CLValue`] held by it. +/// +/// For any other combination of `key` and `stored_value` it returns its unmodified value. +pub fn handle_stored_dictionary_value( + key: Key, + stored_value: StoredValue, +) -> Result { + match (key, stored_value) { + (Key::Dictionary(_), StoredValue::CLValue(cl_value)) => { + let wrapped_cl_value: DictionaryValue = cl_value.into_t()?; + let cl_value = wrapped_cl_value.into_cl_value(); + Ok(StoredValue::CLValue(cl_value)) + } + (_, stored_value) => Ok(stored_value), + } +} diff --git a/types/src/cl_value/jsonrepr.rs b/types/src/cl_value/jsonrepr.rs index 3b71ecd654..5148561e99 100644 --- a/types/src/cl_value/jsonrepr.rs +++ b/types/src/cl_value/jsonrepr.rs @@ -1,25 +1,37 @@ -use alloc::{string::String, vec, vec::Vec}; +use alloc::{string::String, vec::Vec}; use serde::Serialize; use serde_json::{json, Value}; use crate::{ bytesrepr::{self, FromBytes, OPTION_NONE_TAG, OPTION_SOME_TAG, RESULT_ERR_TAG, RESULT_OK_TAG}, + cl_type::CL_TYPE_RECURSION_DEPTH, CLType, CLValue, Key, PublicKey, URef, U128, U256, U512, }; /// Returns a best-effort attempt to convert the `CLValue` into a meaningful JSON value. pub fn cl_value_to_json(cl_value: &CLValue) -> Option { - to_json(&cl_value.cl_type(), cl_value.inner_bytes()).and_then(|(json_value, remainder)| { - if remainder.is_empty() { - Some(json_value) - } else { - None - } - }) + depth_limited_to_json(0, cl_value.cl_type(), cl_value.inner_bytes()).and_then( + |(json_value, remainder)| { + if remainder.is_empty() { + Some(json_value) + } else { + None + } + }, + ) } -fn to_json<'a>(cl_type: &CLType, bytes: &'a [u8]) -> Option<(Value, &'a [u8])> { +fn depth_limited_to_json<'a>( + depth: u8, + cl_type: &CLType, + bytes: &'a [u8], +) -> Option<(Value, &'a [u8])> { + if depth >= CL_TYPE_RECURSION_DEPTH { + return None; + } + let depth = depth + 1; + match cl_type { CLType::Bool => simple_type_to_json::(bytes), CLType::I32 => simple_type_to_json::(bytes), @@ -39,7 +51,7 @@ fn to_json<'a>(cl_type: &CLType, bytes: &'a [u8]) -> Option<(Value, &'a [u8])> { let (variant, remainder) = u8::from_bytes(bytes).ok()?; match variant { OPTION_NONE_TAG => Some((Value::Null, remainder)), - OPTION_SOME_TAG => Some(to_json(inner_cl_type, remainder)?), + OPTION_SOME_TAG => Some(depth_limited_to_json(depth, inner_cl_type, remainder)?), _ => None, } } @@ -47,7 +59,7 @@ fn to_json<'a>(cl_type: &CLType, bytes: &'a [u8]) -> Option<(Value, &'a [u8])> { let (count, mut stream) = u32::from_bytes(bytes).ok()?; let mut result: Vec = Vec::new(); for _ in 0..count { - let (value, remainder) = to_json(inner_cl_type, &stream)?; + let (value, remainder) = depth_limited_to_json(depth, inner_cl_type, stream)?; result.push(value); stream = remainder; } @@ -55,46 +67,47 @@ fn to_json<'a>(cl_type: &CLType, bytes: &'a [u8]) -> Option<(Value, &'a [u8])> { } CLType::ByteArray(length) => { let (bytes, remainder) = bytesrepr::safe_split_at(bytes, *length as usize).ok()?; - Some((json![hex::encode(bytes)], remainder)) + let hex_encoded_bytes = base16::encode_lower(&bytes); + Some((json![hex_encoded_bytes], remainder)) } CLType::Result { ok, err } => { let (variant, remainder) = u8::from_bytes(bytes).ok()?; match variant { RESULT_ERR_TAG => { - let (value, remainder) = to_json(err, remainder)?; + let (value, remainder) = depth_limited_to_json(depth, err, remainder)?; Some((json!({ "Err": value }), remainder)) } RESULT_OK_TAG => { - let (value, remainder) = to_json(ok, remainder)?; + let (value, remainder) = depth_limited_to_json(depth, ok, remainder)?; Some((json!({ "Ok": value }), remainder)) } _ => None, } } CLType::Map { key, value } => { - let (num_keys, mut stream) = u32::from_bytes(bytes).unwrap(); + let (num_keys, mut stream) = u32::from_bytes(bytes).ok()?; let mut result: Vec = Vec::new(); for _ in 0..num_keys { - let (k, remainder) = to_json(key, stream)?; - let (v, remainder) = to_json(value, remainder)?; + let (k, remainder) = depth_limited_to_json(depth, key, stream)?; + let (v, remainder) = depth_limited_to_json(depth, value, remainder)?; result.push(json!({"key": k, "value": v})); stream = remainder; } Some((json!(result), stream)) } CLType::Tuple1(arr) => { - let (t1, remainder) = to_json(&arr[0], bytes)?; + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; Some((json!([t1]), remainder)) } CLType::Tuple2(arr) => { - let (t1, remainder) = to_json(&arr[0], bytes)?; - let (t2, remainder) = to_json(&arr[1], remainder)?; + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?; Some((json!([t1, t2]), remainder)) } CLType::Tuple3(arr) => { - let (t1, remainder) = to_json(&arr[0], bytes)?; - let (t2, remainder) = to_json(&arr[1], remainder)?; - let (t3, remainder) = to_json(&arr[2], remainder)?; + let (t1, remainder) = depth_limited_to_json(depth, &arr[0], bytes)?; + let (t2, remainder) = depth_limited_to_json(depth, &arr[1], remainder)?; + let (t3, remainder) = depth_limited_to_json(depth, &arr[2], remainder)?; Some((json!([t1, t2, t3]), remainder)) } CLType::Any => None, @@ -138,10 +151,11 @@ mod tests { #[test] fn list_of_public_keys_to_json_value() { let a = PublicKey::from( - SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + ); + let b = PublicKey::from( + &SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap(), ); - let b = - PublicKey::from(SecretKey::ed25519_from_bytes([3; SecretKey::ED25519_LENGTH]).unwrap()); let a_hex = a.to_hex(); let b_hex = b.to_hex(); let cl_value = CLValue::from_t(vec![a, b]).unwrap(); @@ -153,12 +167,14 @@ mod tests { #[test] fn list_of_list_of_public_keys_to_json_value() { let a = PublicKey::from( - SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + &SecretKey::secp256k1_from_bytes([3; SecretKey::SECP256K1_LENGTH]).unwrap(), + ); + let b = PublicKey::from( + &SecretKey::ed25519_from_bytes([3; PublicKey::ED25519_LENGTH]).unwrap(), + ); + let c = PublicKey::from( + &SecretKey::ed25519_from_bytes([6; PublicKey::ED25519_LENGTH]).unwrap(), ); - let b = - PublicKey::from(SecretKey::ed25519_from_bytes([3; PublicKey::ED25519_LENGTH]).unwrap()); - let c = - PublicKey::from(SecretKey::ed25519_from_bytes([6; PublicKey::ED25519_LENGTH]).unwrap()); let a_hex = a.to_hex(); let b_hex = b.to_hex(); let c_hex = c.to_hex(); @@ -201,7 +217,7 @@ mod tests { let bytes = [1_u8, 2]; let cl_value = CLValue::from_t(bytes).unwrap(); let cl_value_as_json = cl_value_to_json(&cl_value).unwrap(); - let expected = json!(hex::encode(&bytes)); + let expected = json!(base16::encode_lower(&bytes)); assert_eq!(cl_value_as_json, expected); } @@ -225,4 +241,32 @@ mod tests { test_value((v1.clone(), v2.clone())); test_value((v1, v2, v3)); } + + #[test] + fn json_encoding_nested_tuple_1_value_should_not_stack_overflow() { + // Returns a CLType corresponding to (((...(cl_type,),...),),) nested in tuples to + // `depth_limit`. + fn wrap_in_tuple1(cl_type: CLType, current_depth: usize, depth_limit: usize) -> CLType { + if current_depth == depth_limit { + return cl_type; + } + wrap_in_tuple1( + CLType::Tuple1([Box::new(cl_type)]), + current_depth + 1, + depth_limit, + ) + } + + for depth_limit in &[1, CL_TYPE_RECURSION_DEPTH as usize] { + let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit); + let cl_value = CLValue::from_components(cl_type, vec![]); + assert!(cl_value_to_json(&cl_value).is_some()); + } + + for depth_limit in &[CL_TYPE_RECURSION_DEPTH as usize + 1, 1000] { + let cl_type = wrap_in_tuple1(CLType::Unit, 1, *depth_limit); + let cl_value = CLValue::from_components(cl_type, vec![]); + assert!(cl_value_to_json(&cl_value).is_none()); + } + } } diff --git a/types/src/cl_value/system_entity_registry.rs b/types/src/cl_value/system_entity_registry.rs new file mode 100644 index 0000000000..eaae411040 --- /dev/null +++ b/types/src/cl_value/system_entity_registry.rs @@ -0,0 +1,99 @@ +//! The registry of system contracts. + +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +// #[cfg(feature = "datasize")] +// use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::STANDARD_PAYMENT, + AddressableEntityHash, CLType, CLTyped, HashAddr, +}; + +/// The system entity registry. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +pub struct SystemHashRegistry(BTreeMap); + +impl SystemHashRegistry { + /// Returns a new `SystemEntityRegistry`. + #[allow(clippy::new_without_default)] // This empty `new()` will be replaced in the future. + pub fn new() -> Self { + SystemHashRegistry(BTreeMap::new()) + } + + /// Inserts a contract's details into the registry. + pub fn insert(&mut self, contract_name: String, contract_hash: HashAddr) { + self.0.insert(contract_name, contract_hash); + } + + /// Gets a contract's hash from the registry. + pub fn get(&self, contract_name: &str) -> Option<&HashAddr> { + self.0.get(contract_name) + } + + /// Returns `true` if the given hash_addr exists as a value in the registry. + pub fn exists(&self, hash_addr: &HashAddr) -> bool { + self.0 + .values() + .any(|system_contract_hash| system_contract_hash == hash_addr) + } + + /// Remove standard payment from the contract registry. + pub fn remove_standard_payment(&mut self) -> Option { + self.0.remove(STANDARD_PAYMENT) + } + + pub fn inner(self) -> BTreeMap { + self.0 + } +} + +impl ToBytes for SystemHashRegistry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for SystemHashRegistry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = BTreeMap::from_bytes(bytes)?; + Ok((SystemHashRegistry(inner), remainder)) + } +} + +impl CLTyped for SystemHashRegistry { + fn cl_type() -> CLType { + BTreeMap::::cl_type() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let mut system_entity_registry = SystemHashRegistry::new(); + system_entity_registry.insert("a".to_string(), [9; 32]); + bytesrepr::test_serialization_roundtrip(&system_entity_registry); + } + + #[test] + fn bytesrepr_transparent() { + // this test ensures that the serialization is not affected by the wrapper, because + // this data is deserialized in other places as a BTree, e.g. GetAuctionInfo in the sidecar + let mut system_entity_registry = SystemHashRegistry::new(); + system_entity_registry.insert("a".to_string(), [9; 32]); + let serialized = + ToBytes::to_bytes(&system_entity_registry).expect("Unable to serialize data"); + let deserialized: BTreeMap = + bytesrepr::deserialize_from_slice(serialized).expect("Unable to deserialize data"); + assert_eq!(system_entity_registry, SystemHashRegistry(deserialized)); + } +} diff --git a/types/src/contract_messages.rs b/types/src/contract_messages.rs new file mode 100644 index 0000000000..035f991046 --- /dev/null +++ b/types/src/contract_messages.rs @@ -0,0 +1,227 @@ +//! Data types for interacting with contract level messages. + +mod error; +mod messages; +mod topics; + +pub use error::FromStrError; +pub use messages::{Message, MessageChecksum, MessagePayload, Messages}; +pub use topics::{ + MessageTopicOperation, MessageTopicSummary, TopicNameHash, TOPIC_NAME_HASH_LENGTH, +}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + EntityAddr, +}; + +use alloc::{string::String, vec::Vec}; +use core::fmt::{Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const TOPIC_FORMATTED_STRING_PREFIX: &str = "topic-"; +const MESSAGE_ADDR_PREFIX: &str = "message-"; + +/// MessageTopicAddr +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct MessageAddr { + /// The entity addr. + entity_addr: EntityAddr, + /// The hash of the name of the message topic. + topic_name_hash: TopicNameHash, + /// The message index. + message_index: Option, +} + +impl MessageAddr { + /// Constructs a new topic address based on the addressable entity addr and the hash of the + /// message topic name. + pub const fn new_topic_addr(entity_addr: EntityAddr, topic_name_hash: TopicNameHash) -> Self { + Self { + entity_addr, + topic_name_hash, + message_index: None, + } + } + + /// Constructs a new message address based on the addressable entity addr, the hash of the + /// message topic name and the message index in the topic. + pub const fn new_message_addr( + entity_addr: EntityAddr, + topic_name_hash: TopicNameHash, + message_index: u32, + ) -> Self { + Self { + entity_addr, + topic_name_hash, + message_index: Some(message_index), + } + } + + /// Formats the [`MessageAddr`] as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + match self.message_index { + Some(index) => { + format!( + "{}{}-{}-{:x}", + MESSAGE_ADDR_PREFIX, + self.entity_addr, + self.topic_name_hash.to_formatted_string(), + index, + ) + } + None => { + format!( + "{}{}{}-{}", + MESSAGE_ADDR_PREFIX, + TOPIC_FORMATTED_STRING_PREFIX, + self.entity_addr, + self.topic_name_hash.to_formatted_string(), + ) + } + } + } + + /// Parses a formatted string into a [`MessageAddr`]. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(MESSAGE_ADDR_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let (remainder, message_index) = match remainder.strip_prefix(TOPIC_FORMATTED_STRING_PREFIX) + { + Some(topic_string) => (topic_string, None), + None => { + let (remainder, message_index_str) = remainder + .rsplit_once('-') + .ok_or(FromStrError::MissingMessageIndex)?; + (remainder, Some(u32::from_str_radix(message_index_str, 16)?)) + } + }; + + let (entity_addr_str, topic_name_hash_str) = remainder + .rsplit_once('-') + .ok_or(FromStrError::MissingMessageIndex)?; + + let entity_addr = EntityAddr::from_formatted_str(entity_addr_str) + .map_err(FromStrError::EntityAddrParseError)?; + + let topic_name_hash = TopicNameHash::from_formatted_str(topic_name_hash_str)?; + Ok(MessageAddr { + entity_addr, + topic_name_hash, + message_index, + }) + } + + /// Returns the entity addr of this message topic. + pub fn entity_addr(&self) -> EntityAddr { + self.entity_addr + } + + /// Returns the topic name hash of this message topic. + pub fn topic_name_hash(&self) -> TopicNameHash { + self.topic_name_hash + } + + /// Returns None in the case of the key for a message topic summary, + /// else Some with the sequential index of the underlying message within the topic. + pub fn message_index(&self) -> Option { + self.message_index + } +} + +impl Display for MessageAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self.message_index { + Some(index) => { + write!( + f, + "{}-{}-{:x}", + self.entity_addr, self.topic_name_hash, index, + ) + } + None => { + write!(f, "{}-{}", self.entity_addr, self.topic_name_hash) + } + } + } +} + +impl ToBytes for MessageAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.entity_addr.to_bytes()?); + buffer.append(&mut self.topic_name_hash.to_bytes()?); + buffer.append(&mut self.message_index.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.entity_addr.serialized_length() + + self.topic_name_hash.serialized_length() + + self.message_index.serialized_length() + } +} + +impl FromBytes for MessageAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entity_addr, rem) = FromBytes::from_bytes(bytes)?; + let (topic_hash, rem) = FromBytes::from_bytes(rem)?; + let (message_index, rem) = FromBytes::from_bytes(rem)?; + Ok(( + MessageAddr { + entity_addr, + topic_name_hash: topic_hash, + message_index, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> MessageAddr { + MessageAddr { + entity_addr: rng.gen(), + topic_name_hash: rng.gen(), + message_index: rng.gen(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, KEY_HASH_LENGTH}; + + use super::{topics::TOPIC_NAME_HASH_LENGTH, *}; + + #[test] + fn serialization_roundtrip() { + let topic_addr = MessageAddr::new_topic_addr( + EntityAddr::SmartContract([1; KEY_HASH_LENGTH]), + [2; TOPIC_NAME_HASH_LENGTH].into(), + ); + bytesrepr::test_serialization_roundtrip(&topic_addr); + + let message_addr = MessageAddr::new_message_addr( + EntityAddr::SmartContract([1; KEY_HASH_LENGTH]), + [2; TOPIC_NAME_HASH_LENGTH].into(), + 3, + ); + bytesrepr::test_serialization_roundtrip(&message_addr); + } +} diff --git a/types/src/contract_messages/error.rs b/types/src/contract_messages/error.rs new file mode 100644 index 0000000000..245b7058b5 --- /dev/null +++ b/types/src/contract_messages/error.rs @@ -0,0 +1,74 @@ +use core::array::TryFromSliceError; + +use alloc::string::String; +use core::{ + fmt::{self, Debug, Display, Formatter}, + num::ParseIntError, +}; + +/// Error while parsing message hashes from string. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// No message index at the end of the string. + MissingMessageIndex, + /// String not formatted correctly. + Formatting, + /// Cannot parse entity hash. + EntityAddrParseError(crate::addressable_entity::FromStrError), + /// Cannot parse message topic hash. + MessageTopicParseError(String), + /// Failed to decode address portion of URef. + Hex(base16::DecodeError), + /// Failed to parse an int. + Int(ParseIntError), + /// The slice is the wrong length. + Length(TryFromSliceError), +} + +impl From for FromStrError { + fn from(error: base16::DecodeError) -> Self { + FromStrError::Hex(error) + } +} + +impl From for FromStrError { + fn from(error: ParseIntError) -> Self { + FromStrError::Int(error) + } +} + +impl From for FromStrError { + fn from(error: TryFromSliceError) -> Self { + FromStrError::Length(error) + } +} + +impl Display for FromStrError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + FromStrError::InvalidPrefix => { + write!(f, "prefix is invalid") + } + FromStrError::MissingMessageIndex => { + write!(f, "no message index found at the end of the string") + } + FromStrError::Formatting => { + write!(f, "string not properly formatted") + } + FromStrError::EntityAddrParseError(err) => { + write!(f, "could not parse entity addr: {}", err) + } + FromStrError::MessageTopicParseError(err) => { + write!(f, "could not parse topic hash: {}", err) + } + FromStrError::Hex(error) => { + write!(f, "failed to decode address portion from hex: {}", error) + } + FromStrError::Int(error) => write!(f, "failed to parse an int: {}", error), + FromStrError::Length(error) => write!(f, "address portion is wrong length: {}", error), + } + } +} diff --git a/types/src/contract_messages/messages.rs b/types/src/contract_messages/messages.rs new file mode 100644 index 0000000000..42a7e06a79 --- /dev/null +++ b/types/src/contract_messages/messages.rs @@ -0,0 +1,531 @@ +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, crypto, EntityAddr, Key, +}; + +use alloc::{string::String, vec::Vec}; +use core::{convert::TryFrom, fmt::Debug}; +#[cfg(any(feature = "std", test))] +use thiserror::Error; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Alphanumeric, DistString, Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +use super::{FromStrError, TopicNameHash}; + +/// Collection of multiple messages. +pub type Messages = Vec; + +/// The length of a message digest +pub const MESSAGE_CHECKSUM_LENGTH: usize = 32; + +const MESSAGE_CHECKSUM_STRING_PREFIX: &str = "message-checksum-"; + +/// A newtype wrapping an array which contains the raw bytes of +/// the hash of the message emitted. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Message checksum as a formatted string.") +)] +pub struct MessageChecksum( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub [u8; MESSAGE_CHECKSUM_LENGTH], +); + +impl MessageChecksum { + /// Returns inner value of the message checksum. + pub fn value(&self) -> [u8; MESSAGE_CHECKSUM_LENGTH] { + self.0 + } + + /// Formats the `MessageChecksum` as a human readable string. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + MESSAGE_CHECKSUM_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `MessageChecksum`. + pub fn from_formatted_str(input: &str) -> Result { + let hex_addr = input + .strip_prefix(MESSAGE_CHECKSUM_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let bytes = + <[u8; MESSAGE_CHECKSUM_LENGTH]>::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; + Ok(MessageChecksum(bytes)) + } +} + +impl ToBytes for MessageChecksum { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.0.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for MessageChecksum { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (checksum, rem) = FromBytes::from_bytes(bytes)?; + Ok((MessageChecksum(checksum), rem)) + } +} + +impl Serialize for MessageChecksum { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for MessageChecksum { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + MessageChecksum::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; MESSAGE_CHECKSUM_LENGTH]>::deserialize(deserializer)?; + Ok(MessageChecksum(bytes)) + } + } +} + +const MESSAGE_PAYLOAD_TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; + +/// Tag for a message payload that contains a human readable string. +pub const MESSAGE_PAYLOAD_STRING_TAG: u8 = 0; +/// Tag for a message payload that contains raw bytes. +pub const MESSAGE_PAYLOAD_BYTES_TAG: u8 = 1; + +/// The payload of the message emitted by an addressable entity during execution. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum MessagePayload { + /// Human readable string message. + String(String), + /// Message represented as raw bytes. + Bytes(Bytes), +} + +impl MessagePayload { + #[cfg(any(feature = "testing", test))] + /// Returns a random `MessagePayload`. + pub fn random(rng: &mut TestRng) -> Self { + let count = rng.gen_range(16..128); + if rng.gen() { + MessagePayload::String(Alphanumeric.sample_string(rng, count)) + } else { + MessagePayload::Bytes( + std::iter::repeat_with(|| rng.gen()) + .take(count) + .collect::>() + .into(), + ) + } + } +} + +impl From for MessagePayload +where + T: Into, +{ + fn from(value: T) -> Self { + Self::String(value.into()) + } +} + +impl From for MessagePayload { + fn from(bytes: Bytes) -> Self { + Self::Bytes(bytes) + } +} + +impl ToBytes for MessagePayload { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + MessagePayload::String(message_string) => { + buffer.insert(0, MESSAGE_PAYLOAD_STRING_TAG); + buffer.extend(message_string.to_bytes()?); + } + MessagePayload::Bytes(message_bytes) => { + buffer.insert(0, MESSAGE_PAYLOAD_BYTES_TAG); + buffer.extend(message_bytes.to_bytes()?); + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + MESSAGE_PAYLOAD_TAG_LENGTH + + match self { + MessagePayload::String(message_string) => message_string.serialized_length(), + MessagePayload::Bytes(message_bytes) => message_bytes.serialized_length(), + } + } +} + +impl FromBytes for MessagePayload { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + MESSAGE_PAYLOAD_STRING_TAG => { + let (message, remainder): (String, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::String(message), remainder)) + } + MESSAGE_PAYLOAD_BYTES_TAG => { + let (message_bytes, remainder): (Bytes, _) = FromBytes::from_bytes(remainder)?; + Ok((Self::Bytes(message_bytes), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// Message that was emitted by an addressable entity during execution. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Message { + /// The identity of the entity that produced the message. + entity_addr: EntityAddr, + /// The payload of the message. + message: MessagePayload, + /// The name of the topic on which the message was emitted on. + topic_name: String, + /// The hash of the name of the topic. + topic_name_hash: TopicNameHash, + /// Message index in the topic. + topic_index: u32, + /// Message index in the block. + block_index: u64, +} + +#[cfg(any(feature = "std", test))] +#[derive(Serialize, Deserialize)] +struct HumanReadableMessage { + entity_addr: String, + message: MessagePayload, + topic_name: String, + topic_name_hash: TopicNameHash, + topic_index: u32, + block_index: u64, +} + +#[cfg(any(feature = "std", test))] +impl From<&Message> for HumanReadableMessage { + fn from(message: &Message) -> Self { + Self { + entity_addr: message.entity_addr.to_formatted_string(), + message: message.message.clone(), + topic_name: message.topic_name.clone(), + topic_name_hash: message.topic_name_hash, + topic_index: message.topic_index, + block_index: message.block_index, + } + } +} + +#[cfg(any(feature = "std", test))] +impl From<&Message> for NonHumanReadableMessage { + fn from(message: &Message) -> Self { + Self { + entity_addr: message.entity_addr, + message: message.message.clone(), + topic_name: message.topic_name.clone(), + topic_name_hash: message.topic_name_hash, + topic_index: message.topic_index, + block_index: message.block_index, + } + } +} + +#[cfg(any(feature = "std", test))] +impl From for Message { + fn from(message: NonHumanReadableMessage) -> Self { + Self { + entity_addr: message.entity_addr, + message: message.message, + topic_name: message.topic_name, + topic_name_hash: message.topic_name_hash, + topic_index: message.topic_index, + block_index: message.block_index, + } + } +} + +#[cfg(any(feature = "std", test))] +#[derive(Error, Debug)] +enum MessageDeserializationError { + #[error("{0}")] + FailedToParseEntityAddr(crate::addressable_entity::FromStrError), +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for Message { + type Error = MessageDeserializationError; + fn try_from(message: HumanReadableMessage) -> Result { + let entity_addr = EntityAddr::from_formatted_str(&message.entity_addr) + .map_err(Self::Error::FailedToParseEntityAddr)?; + + Ok(Self { + entity_addr, + message: message.message, + topic_name: message.topic_name, + topic_name_hash: message.topic_name_hash, + topic_index: message.topic_index, + block_index: message.block_index, + }) + } +} + +#[cfg(any(feature = "std", test))] +#[derive(Serialize, Deserialize)] +struct NonHumanReadableMessage { + entity_addr: EntityAddr, + message: MessagePayload, + topic_name: String, + topic_name_hash: TopicNameHash, + topic_index: u32, + block_index: u64, +} + +#[cfg(any(feature = "std", test))] +impl Serialize for Message { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + HumanReadableMessage::from(self).serialize(serializer) + } else { + NonHumanReadableMessage::from(self).serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for Message { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let human_readable = HumanReadableMessage::deserialize(deserializer)?; + Message::try_from(human_readable) + .map_err(|error| SerdeError::custom(format!("{:?}", error))) + } else { + let non_human_readable = NonHumanReadableMessage::deserialize(deserializer)?; + Ok(Message::from(non_human_readable)) + } + } +} + +impl Message { + /// Creates new instance of [`Message`] with the specified source and message payload. + pub fn new( + source: EntityAddr, + message: MessagePayload, + topic_name: String, + topic_name_hash: TopicNameHash, + topic_index: u32, + block_index: u64, + ) -> Self { + Self { + entity_addr: source, + message, + topic_name, + topic_name_hash, + topic_index, + block_index, + } + } + + /// Returns a reference to the identity of the entity that produced the message. + pub fn entity_addr(&self) -> &EntityAddr { + &self.entity_addr + } + + /// Returns a reference to the payload of the message. + pub fn payload(&self) -> &MessagePayload { + &self.message + } + + /// Returns a reference to the name of the topic on which the message was emitted on. + pub fn topic_name(&self) -> &str { + &self.topic_name + } + + /// Returns a reference to the hash of the name of the topic. + pub fn topic_name_hash(&self) -> &TopicNameHash { + &self.topic_name_hash + } + + /// Returns the index of the message in the topic. + pub fn topic_index(&self) -> u32 { + self.topic_index + } + + /// Returns the index of the message relative to other messages emitted in the block. + pub fn block_index(&self) -> u64 { + self.block_index + } + + /// Returns a new [`Key::Message`] based on the information in the message. + /// This key can be used to query the checksum record for the message in global state. + pub fn message_key(&self) -> Key { + Key::message(self.entity_addr, self.topic_name_hash, self.topic_index) + } + + /// Returns a new [`Key::Message`] based on the information in the message. + /// This key can be used to query the control record for the topic of this message in global + /// state. + pub fn topic_key(&self) -> Key { + Key::message_topic(self.entity_addr, self.topic_name_hash) + } + + /// Returns the checksum of the message. + pub fn checksum(&self) -> Result { + let input = (&self.block_index, &self.message).to_bytes()?; + let checksum = crypto::blake2b(input); + + Ok(MessageChecksum(checksum)) + } + + /// Returns a random `Message`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let count = rng.gen_range(16..128); + Self { + entity_addr: rng.gen(), + message: MessagePayload::random(rng), + topic_name: Alphanumeric.sample_string(rng, count), + topic_name_hash: rng.gen(), + topic_index: rng.gen(), + block_index: rng.gen(), + } + } +} + +impl ToBytes for Message { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.entity_addr.to_bytes()?); + buffer.append(&mut self.message.to_bytes()?); + buffer.append(&mut self.topic_name.to_bytes()?); + buffer.append(&mut self.topic_name_hash.to_bytes()?); + buffer.append(&mut self.topic_index.to_bytes()?); + buffer.append(&mut self.block_index.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.entity_addr.serialized_length() + + self.message.serialized_length() + + self.topic_name.serialized_length() + + self.topic_name_hash.serialized_length() + + self.topic_index.serialized_length() + + self.block_index.serialized_length() + } +} + +impl FromBytes for Message { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entity_addr, rem) = FromBytes::from_bytes(bytes)?; + let (message, rem) = FromBytes::from_bytes(rem)?; + let (topic_name, rem) = FromBytes::from_bytes(rem)?; + let (topic_name_hash, rem) = FromBytes::from_bytes(rem)?; + let (topic_index, rem) = FromBytes::from_bytes(rem)?; + let (block_index, rem) = FromBytes::from_bytes(rem)?; + Ok(( + Message { + entity_addr, + message, + topic_name, + topic_name_hash, + topic_index, + block_index, + }, + rem, + )) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Message { + let topic_name = Alphanumeric.sample_string(rng, 32); + let topic_name_hash = crypto::blake2b(&topic_name).into(); + let message = Alphanumeric.sample_string(rng, 64).into(); + + Message { + entity_addr: rng.gen(), + message, + topic_name, + topic_name_hash, + topic_index: rng.gen(), + block_index: rng.gen(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::bytesrepr; + + use super::*; + + #[test] + fn serialization_roundtrip() { + let rng = &mut TestRng::new(); + + let message_checksum = MessageChecksum([1; MESSAGE_CHECKSUM_LENGTH]); + bytesrepr::test_serialization_roundtrip(&message_checksum); + + let message_payload = MessagePayload::random(rng); + bytesrepr::test_serialization_roundtrip(&message_payload); + + let message = Message::random(rng); + bytesrepr::test_serialization_roundtrip(&message); + } + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + + let message_payload = MessagePayload::random(rng); + let json_string = serde_json::to_string_pretty(&message_payload).unwrap(); + let decoded: MessagePayload = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, message_payload); + } + + #[test] + fn message_json_roundtrip() { + let rng = &mut TestRng::new(); + + let message = Message::random(rng); + let json_string = serde_json::to_string_pretty(&message).unwrap(); + let decoded: Message = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, message); + } +} diff --git a/types/src/contract_messages/topics.rs b/types/src/contract_messages/topics.rs new file mode 100644 index 0000000000..4ea6a65e79 --- /dev/null +++ b/types/src/contract_messages/topics.rs @@ -0,0 +1,276 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, BlockTime, +}; + +use core::convert::TryFrom; + +use alloc::{string::String, vec::Vec}; +use core::fmt::{Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use super::error::FromStrError; + +/// The length in bytes of a topic name hash. +pub const TOPIC_NAME_HASH_LENGTH: usize = 32; + +/// The hash of the name of the message topic. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Clone, Copy, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The hash of the name of the message topic.") +)] +pub struct TopicNameHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub [u8; TOPIC_NAME_HASH_LENGTH], +); + +impl TopicNameHash { + /// Returns a new [`TopicNameHash`] based on the specified value. + pub const fn new(topic_name_hash: [u8; TOPIC_NAME_HASH_LENGTH]) -> TopicNameHash { + TopicNameHash(topic_name_hash) + } + + /// Returns inner value of the topic hash. + pub fn value(&self) -> [u8; TOPIC_NAME_HASH_LENGTH] { + self.0 + } + + /// Formats the [`TopicNameHash`] as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + base16::encode_lower(&self.0) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a [`TopicNameHash`]. + pub fn from_formatted_str(input: &str) -> Result { + let bytes = + <[u8; TOPIC_NAME_HASH_LENGTH]>::try_from(checksummed_hex::decode(input)?.as_ref())?; + Ok(TopicNameHash(bytes)) + } +} + +impl ToBytes for TopicNameHash { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.0.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TopicNameHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (hash, rem) = FromBytes::from_bytes(bytes)?; + Ok((TopicNameHash(hash), rem)) + } +} + +impl Serialize for TopicNameHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for TopicNameHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + TopicNameHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; TOPIC_NAME_HASH_LENGTH]>::deserialize(deserializer)?; + Ok(TopicNameHash(bytes)) + } + } +} + +impl Display for TopicNameHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for TopicNameHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "MessageTopicHash({})", base16::encode_lower(&self.0)) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> TopicNameHash { + TopicNameHash(rng.gen()) + } +} + +impl From<[u8; TOPIC_NAME_HASH_LENGTH]> for TopicNameHash { + fn from(value: [u8; TOPIC_NAME_HASH_LENGTH]) -> Self { + TopicNameHash(value) + } +} + +/// Summary of a message topic that will be stored in global state. +#[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct MessageTopicSummary { + /// Number of messages in this topic. + pub(crate) message_count: u32, + /// Block timestamp in which these messages were emitted. + pub(crate) blocktime: BlockTime, + /// Name of the topic. + pub(crate) topic_name: String, +} + +impl MessageTopicSummary { + /// Creates a new topic summary. + pub fn new(message_count: u32, blocktime: BlockTime, topic_name: String) -> Self { + Self { + message_count, + blocktime, + topic_name, + } + } + + /// Returns the number of messages that were sent on this topic. + pub fn message_count(&self) -> u32 { + self.message_count + } + + /// Returns the block time. + pub fn blocktime(&self) -> BlockTime { + self.blocktime + } + + /// Returns the topic name. + pub fn topic_name(&self) -> &str { + &self.topic_name + } +} + +impl ToBytes for MessageTopicSummary { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.append(&mut self.message_count.to_bytes()?); + buffer.append(&mut self.blocktime.to_bytes()?); + buffer.append(&mut self.topic_name.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.message_count.serialized_length() + + self.blocktime.serialized_length() + + self.topic_name.serialized_length() + } +} + +impl FromBytes for MessageTopicSummary { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (message_count, rem) = FromBytes::from_bytes(bytes)?; + let (blocktime, rem) = FromBytes::from_bytes(rem)?; + let (topic_name, rem) = FromBytes::from_bytes(rem)?; + Ok(( + MessageTopicSummary { + message_count, + blocktime, + topic_name, + }, + rem, + )) + } +} + +const TOPIC_OPERATION_ADD_TAG: u8 = 0; +const OPERATION_MAX_SERIALIZED_LEN: usize = 1; + +/// Operations that can be performed on message topics. +#[derive(Debug, PartialEq)] +pub enum MessageTopicOperation { + /// Add a new message topic. + Add, +} + +impl MessageTopicOperation { + /// Maximum serialized length of a message topic operation. + pub const fn max_serialized_len() -> usize { + OPERATION_MAX_SERIALIZED_LEN + } +} + +impl ToBytes for MessageTopicOperation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + match self { + MessageTopicOperation::Add => buffer.push(TOPIC_OPERATION_ADD_TAG), + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + match self { + MessageTopicOperation::Add => 1, + } + } +} + +impl FromBytes for MessageTopicOperation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + TOPIC_OPERATION_ADD_TAG => Ok((MessageTopicOperation::Add, remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use crate::bytesrepr; + + use super::*; + + #[test] + fn serialization_roundtrip() { + let topic_name_hash = TopicNameHash::new([0x4du8; TOPIC_NAME_HASH_LENGTH]); + bytesrepr::test_serialization_roundtrip(&topic_name_hash); + + let topic_summary = + MessageTopicSummary::new(10, BlockTime::new(100), "topic_name".to_string()); + bytesrepr::test_serialization_roundtrip(&topic_summary); + + let topic_operation = MessageTopicOperation::Add; + bytesrepr::test_serialization_roundtrip(&topic_operation); + } + + #[test] + fn json_roundtrip() { + let topic_name_hash = TopicNameHash::new([0x4du8; TOPIC_NAME_HASH_LENGTH]); + let json_string = serde_json::to_string_pretty(&topic_name_hash).unwrap(); + let decoded: TopicNameHash = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, topic_name_hash); + + let topic_summary = + MessageTopicSummary::new(10, BlockTime::new(100), "topic_name".to_string()); + let json_string = serde_json::to_string_pretty(&topic_summary).unwrap(); + let decoded: MessageTopicSummary = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, topic_summary); + } +} diff --git a/types/src/contract_wasm.rs b/types/src/contract_wasm.rs index ddd3bcd9e4..fed4a99078 100644 --- a/types/src/contract_wasm.rs +++ b/types/src/contract_wasm.rs @@ -5,16 +5,17 @@ use core::{ fmt::{self, Debug, Display, Formatter}, }; +#[cfg(feature = "datasize")] use datasize::DataSize; -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; use crate::{ account, - account::TryFromSliceForAccountHashError, + addressable_entity::TryFromSliceForAccountHashError, bytesrepr::{Bytes, Error, FromBytes, ToBytes}, - uref, CLType, CLTyped, HashAddr, + checksummed_hex, uref, ByteCode, ByteCodeKind, CLType, CLTyped, HashAddr, }; const CONTRACT_WASM_MAX_DISPLAY_LEN: usize = 16; @@ -26,6 +27,7 @@ const WASM_STRING_PREFIX: &str = "contract-wasm-"; pub struct TryFromSliceForContractHashError(()); #[derive(Debug)] +#[non_exhaustive] pub enum FromStrError { InvalidPrefix, Hex(base16::DecodeError), @@ -82,7 +84,8 @@ impl Display for FromStrError { /// A newtype wrapping a `HashAddr` which is the raw bytes of /// the ContractWasmHash -#[derive(DataSize, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ContractWasmHash(HashAddr); impl ContractWasmHash { @@ -102,7 +105,7 @@ impl ContractWasmHash { } /// Formats the `ContractWasmHash` for users getting and putting. - pub fn to_formatted_string(&self) -> String { + pub fn to_formatted_string(self) -> String { format!("{}{}", WASM_STRING_PREFIX, base16::encode_lower(&self.0),) } @@ -112,11 +115,17 @@ impl ContractWasmHash { let remainder = input .strip_prefix(WASM_STRING_PREFIX) .ok_or(FromStrError::InvalidPrefix)?; - let bytes = HashAddr::try_from(base16::decode(remainder)?.as_ref())?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; Ok(ContractWasmHash(bytes)) } } +impl Default for ContractWasmHash { + fn default() -> Self { + ContractWasmHash::new([0; 32]) + } +} + impl Display for ContractWasmHash { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { write!(f, "{}", base16::encode_lower(&self.0)) @@ -128,6 +137,7 @@ impl Debug for ContractWasmHash { write!(f, "ContractWasmHash({})", base16::encode_lower(&self.0)) } } + impl CLTyped for ContractWasmHash { fn cl_type() -> CLType { CLType::ByteArray(KEY_HASH_LENGTH as u32) @@ -144,6 +154,12 @@ impl ToBytes for ContractWasmHash { fn serialized_length(&self) -> usize { self.0.serialized_length() } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.0.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for ContractWasmHash { @@ -207,7 +223,7 @@ impl TryFrom<&Vec> for ContractWasmHash { } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for ContractWasmHash { fn schema_name() -> String { String::from("ContractWasmHash") @@ -223,11 +239,26 @@ impl JsonSchema for ContractWasmHash { } /// A container for contract's WASM bytes. -#[derive(PartialEq, Eq, Clone, Serialize)] +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ContractWasm { bytes: Bytes, } +impl ContractWasm { + /// Creates a new `ContractWasm`. + pub fn new(bytes: Vec) -> Self { + Self { + bytes: bytes.into(), + } + } + + pub fn take_bytes(self) -> Vec { + self.bytes.into() + } +} + impl Debug for ContractWasm { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { if self.bytes.len() > CONTRACT_WASM_MAX_DISPLAY_LEN { @@ -242,25 +273,6 @@ impl Debug for ContractWasm { } } -impl ContractWasm { - /// Creates new WASM object from bytes. - pub fn new(bytes: Vec) -> Self { - ContractWasm { - bytes: bytes.into(), - } - } - - /// Consumes instance of [`ContractWasm`] and returns its bytes. - pub fn take_bytes(self) -> Vec { - self.bytes.into() - } - - /// Returns a slice of contained WASM bytes. - pub fn bytes(&self) -> &[u8] { - self.bytes.as_ref() - } -} - impl ToBytes for ContractWasm { fn to_bytes(&self) -> Result, Error> { self.bytes.to_bytes() @@ -269,6 +281,11 @@ impl ToBytes for ContractWasm { fn serialized_length(&self) -> usize { self.bytes.serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.bytes.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for ContractWasm { @@ -278,6 +295,12 @@ impl FromBytes for ContractWasm { } } +impl From for ByteCode { + fn from(value: ContractWasm) -> Self { + ByteCode::new(ByteCodeKind::V1CasperWasm, value.take_bytes()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/types/src/contracts.rs b/types/src/contracts.rs index dc1d492d1d..5eee24582c 100644 --- a/types/src/contracts.rs +++ b/types/src/contracts.rs @@ -2,6 +2,8 @@ // TODO - remove once schemars stops causing warning. #![allow(clippy::field_reassign_with_default)] +mod named_keys; + use alloc::{ collections::{BTreeMap, BTreeSet}, format, @@ -10,63 +12,129 @@ use alloc::{ }; use core::{ array::TryFromSliceError, - convert::TryFrom, + convert::{TryFrom, TryInto}, fmt::{self, Debug, Display, Formatter}, }; +use serde_bytes::ByteBuf; +#[cfg(feature = "datasize")] use datasize::DataSize; -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; -use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{ + de::{self, Error as SerdeError}, + ser, Deserialize, Deserializer, Serialize, Serializer, +}; + +pub use self::named_keys::NamedKeys; use crate::{ account, - account::TryFromSliceForAccountHashError, + addressable_entity::TryFromSliceForAccountHashError, bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, contract_wasm::ContractWasmHash, - uref, - uref::URef, - CLType, CLTyped, HashAddr, Key, ProtocolVersion, KEY_HASH_LENGTH, + package::PackageStatus, + serde_helpers::contract_package::HumanReadableContractPackage, + uref::{self, URef}, + AddressableEntityHash, CLType, CLTyped, EntityAddr, EntityEntryPoint, EntityVersionKey, + EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints as EntityEntryPoints, Group, + Groups, HashAddr, Key, Package, PackageHash, Parameter, Parameters, ProtocolVersion, + KEY_HASH_LENGTH, }; -/// Maximum number of distinct user groups. -pub const MAX_GROUPS: u8 = 10; -/// Maximum number of URefs which can be assigned across all user groups. -pub const MAX_TOTAL_UREFS: usize = 100; - const CONTRACT_STRING_PREFIX: &str = "contract-"; -const PACKAGE_STRING_PREFIX: &str = "contract-package-wasm"; +const CONTRACT_PACKAGE_STRING_PREFIX: &str = "contract-package-"; +// We need to support the legacy prefix of "contract-package-wasm". +const CONTRACT_PACKAGE_STRING_LEGACY_EXTRA_PREFIX: &str = "wasm"; /// Set of errors which may happen when working with contract headers. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Eq)] #[repr(u8)] +#[non_exhaustive] pub enum Error { /// Attempt to override an existing or previously existing version with a /// new header (this is not allowed to ensure immutability of a given /// version). + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(1, Error::PreviouslyUsedVersion as u8); + /// ``` PreviouslyUsedVersion = 1, /// Attempted to disable a contract that does not exist. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(2, Error::ContractNotFound as u8); + /// ``` ContractNotFound = 2, /// Attempted to create a user group which already exists (use the update /// function to change an existing user group). + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(3, Error::GroupAlreadyExists as u8); + /// ``` GroupAlreadyExists = 3, /// Attempted to add a new user group which exceeds the allowed maximum /// number of groups. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(4, Error::MaxGroupsExceeded as u8); + /// ``` MaxGroupsExceeded = 4, /// Attempted to add a new URef to a group, which resulted in the total /// number of URefs across all user groups to exceed the allowed maximum. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(5, Error::MaxTotalURefsExceeded as u8); + /// ``` MaxTotalURefsExceeded = 5, /// Attempted to remove a URef from a group, which does not exist in the /// group. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(6, Error::GroupDoesNotExist as u8); + /// ``` GroupDoesNotExist = 6, /// Attempted to remove unknown URef from the group. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(7, Error::UnableToRemoveURef as u8); + /// ``` UnableToRemoveURef = 7, /// Group is use by at least one active contract. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(8, Error::GroupInUse as u8); + /// ``` GroupInUse = 8, /// URef already exists in given group. + /// ``` + /// # use casper_types::contracts::Error; + /// assert_eq!(9, Error::URefAlreadyExists as u8); + /// ``` URefAlreadyExists = 9, } +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Self::PreviouslyUsedVersion as u8 => Self::PreviouslyUsedVersion, + v if v == Self::ContractNotFound as u8 => Self::ContractNotFound, + v if v == Self::GroupAlreadyExists as u8 => Self::GroupAlreadyExists, + v if v == Self::MaxGroupsExceeded as u8 => Self::MaxGroupsExceeded, + v if v == Self::MaxTotalURefsExceeded as u8 => Self::MaxTotalURefsExceeded, + v if v == Self::GroupDoesNotExist as u8 => Self::GroupDoesNotExist, + v if v == Self::UnableToRemoveURef as u8 => Self::UnableToRemoveURef, + v if v == Self::GroupInUse as u8 => Self::GroupInUse, + v if v == Self::URefAlreadyExists as u8 => Self::URefAlreadyExists, + _ => return Err(()), + }; + Ok(error) + } +} + /// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. #[derive(Debug)] pub struct TryFromSliceForContractHashError(()); @@ -79,6 +147,7 @@ impl Display for TryFromSliceForContractHashError { /// An error from parsing a formatted contract string #[derive(Debug)] +#[non_exhaustive] pub enum FromStrError { /// Invalid formatted string prefix. InvalidPrefix, @@ -139,46 +208,6 @@ impl Display for FromStrError { } } -/// A (labelled) "user group". Each method of a versioned contract may be -/// assoicated with one or more user groups which are allowed to call it. -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub struct Group(String); - -impl Group { - /// Basic constructor - pub fn new>(s: T) -> Self { - Group(s.into()) - } - - /// Retrieves underlying name. - pub fn value(&self) -> &str { - &self.0 - } -} - -impl From for String { - fn from(group: Group) -> Self { - group.0 - } -} - -impl ToBytes for Group { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for Group { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - String::from_bytes(bytes).map(|(label, bytes)| (Group(label), bytes)) - } -} - /// Automatically incremented value for a contract version within a major `ProtocolVersion`. pub type ContractVersion = u32; @@ -189,7 +218,9 @@ pub const CONTRACT_INITIAL_VERSION: ContractVersion = 1; pub type ProtocolVersionMajor = u32; /// Major element of `ProtocolVersion` combined with `ContractVersion`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Hash)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ContractVersionKey(ProtocolVersionMajor, ContractVersion); impl ContractVersionKey { @@ -233,6 +264,12 @@ impl ToBytes for ContractVersionKey { fn serialized_length(&self) -> usize { CONTRACT_VERSION_KEY_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + self.1.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for ContractVersionKey { @@ -256,12 +293,9 @@ pub type ContractVersions = BTreeMap; /// contract versions to be executed. pub type DisabledVersions = BTreeSet; -/// Collection of named groups. -pub type Groups = BTreeMap>; - -/// A newtype wrapping a `HashAddr` which is the raw bytes of -/// the ContractHash -#[derive(DataSize, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +/// A newtype wrapping a `HashAddr` which references a [`Contract`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ContractHash(HashAddr); impl ContractHash { @@ -281,7 +315,7 @@ impl ContractHash { } /// Formats the `ContractHash` for users getting and putting. - pub fn to_formatted_string(&self) -> String { + pub fn to_formatted_string(self) -> String { format!( "{}{}", CONTRACT_STRING_PREFIX, @@ -295,7 +329,7 @@ impl ContractHash { let remainder = input .strip_prefix(CONTRACT_STRING_PREFIX) .ok_or(FromStrError::InvalidPrefix)?; - let bytes = HashAddr::try_from(base16::decode(remainder)?.as_ref())?; + let bytes = HashAddr::try_from(checksummed_hex::decode(remainder)?.as_ref())?; Ok(ContractHash(bytes)) } } @@ -318,6 +352,12 @@ impl CLTyped for ContractHash { } } +impl From for ContractHash { + fn from(entity_hash: AddressableEntityHash) -> Self { + ContractHash::new(entity_hash.value()) + } +} + impl ToBytes for ContractHash { #[inline(always)] fn to_bytes(&self) -> Result, bytesrepr::Error> { @@ -328,6 +368,12 @@ impl ToBytes for ContractHash { fn serialized_length(&self) -> usize { self.0.serialized_length() } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } } impl FromBytes for ContractHash { @@ -391,7 +437,7 @@ impl TryFrom<&Vec> for ContractHash { } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for ContractHash { fn schema_name() -> String { String::from("ContractHash") @@ -405,9 +451,9 @@ impl JsonSchema for ContractHash { } } -/// A newtype wrapping a `HashAddr` which is the raw bytes of -/// the ContractPackageHash -#[derive(DataSize, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +/// A newtype wrapping a `HashAddr` which references a [`ContractPackage`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ContractPackageHash(HashAddr); impl ContractPackageHash { @@ -427,21 +473,36 @@ impl ContractPackageHash { } /// Formats the `ContractPackageHash` for users getting and putting. - pub fn to_formatted_string(&self) -> String { - format!("{}{}", PACKAGE_STRING_PREFIX, base16::encode_lower(&self.0),) + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + CONTRACT_PACKAGE_STRING_PREFIX, + base16::encode_lower(&self.0), + ) } /// Parses a string formatted as per `Self::to_formatted_string()` into a /// `ContractPackageHash`. pub fn from_formatted_str(input: &str) -> Result { let remainder = input - .strip_prefix(PACKAGE_STRING_PREFIX) + .strip_prefix(CONTRACT_PACKAGE_STRING_PREFIX) .ok_or(FromStrError::InvalidPrefix)?; - let bytes = HashAddr::try_from(base16::decode(remainder)?.as_ref())?; + + let hex_addr = remainder + .strip_prefix(CONTRACT_PACKAGE_STRING_LEGACY_EXTRA_PREFIX) + .unwrap_or(remainder); + + let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; Ok(ContractPackageHash(bytes)) } } +impl From for ContractPackageHash { + fn from(value: PackageHash) -> Self { + ContractPackageHash::new(value.value()) + } +} + impl Display for ContractPackageHash { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { write!(f, "{}", base16::encode_lower(&self.0)) @@ -470,6 +531,12 @@ impl ToBytes for ContractPackageHash { fn serialized_length(&self) -> usize { self.0.serialized_length() } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } } impl FromBytes for ContractPackageHash { @@ -533,7 +600,7 @@ impl TryFrom<&Vec> for ContractPackageHash { } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for ContractPackageHash { fn schema_name() -> String { String::from("ContractPackageHash") @@ -549,7 +616,9 @@ impl JsonSchema for ContractPackageHash { } /// A enum to determine the lock status of the contract package. -#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] pub enum ContractPackageStatus { /// The package is locked and cannot be versioned. Locked, @@ -590,6 +659,14 @@ impl ToBytes for ContractPackageStatus { ContractPackageStatus::Locked => true.serialized_length(), } } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ContractPackageStatus::Locked => writer.push(u8::from(true)), + ContractPackageStatus::Unlocked => writer.push(u8::from(false)), + } + Ok(()) + } } impl FromBytes for ContractPackageStatus { @@ -601,23 +678,37 @@ impl FromBytes for ContractPackageStatus { } /// Contract definition, metadata, and security container. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ContractPackage { /// Key used to add or disable versions access_key: URef, /// All versions (enabled & disabled) + #[cfg_attr( + feature = "json-schema", + schemars( + with = "Vec" + ) + )] versions: ContractVersions, /// Disabled versions disabled_versions: DisabledVersions, /// Mapping maintaining the set of URefs associated with each "user /// group". This can be used to control access to methods in a particular /// version of the contract. A method is callable by any context which - /// "knows" any of the URefs assoicated with the mthod's user group. + /// "knows" any of the URefs associated with the method's user group. groups: Groups, /// A flag that determines whether a contract is locked lock_status: ContractPackageStatus, } +impl CLTyped for ContractPackage { + fn cl_type() -> CLType { + CLType::Any + } +} + impl ContractPackage { /// Create new `ContractPackage` (with no versions) from given access key. pub fn new( @@ -641,58 +732,58 @@ impl ContractPackage { self.access_key } - /// Get the mutable group definitions for this contract. - pub fn groups_mut(&mut self) -> &mut Groups { - &mut self.groups - } - /// Get the group definitions for this contract. pub fn groups(&self) -> &Groups { &self.groups } - /// Adds new group to this contract. - pub fn add_group(&mut self, group: Group, urefs: BTreeSet) { - let v = self.groups.entry(group).or_insert_with(Default::default); - v.extend(urefs) + /// Returns reference to all of this contract's versions. + pub fn versions(&self) -> &ContractVersions { + &self.versions } - /// Lookup the contract hash for a given contract version (if present) - pub fn lookup_contract_hash( - &self, - contract_version_key: ContractVersionKey, - ) -> Option<&ContractHash> { - if !self.is_version_enabled(contract_version_key) { - return None; - } - self.versions.get(&contract_version_key) + /// Returns mutable reference to all of this contract's versions (enabled and disabled). + pub fn versions_mut(&mut self) -> &mut ContractVersions { + &mut self.versions } - /// Checks if the given contract version exists and is available for use. - pub fn is_version_enabled(&self, contract_version_key: ContractVersionKey) -> bool { - !self.disabled_versions.contains(&contract_version_key) - && self.versions.contains_key(&contract_version_key) + /// Consumes the object and returns all of this contract's versions (enabled and disabled). + pub fn take_versions(self) -> ContractVersions { + self.versions } - /// Insert a new contract version; the next sequential version number will be issued. - pub fn insert_contract_version( - &mut self, - protocol_version_major: ProtocolVersionMajor, - contract_hash: ContractHash, - ) -> ContractVersionKey { - let contract_version = self.next_contract_version_for(protocol_version_major); - let key = ContractVersionKey::new(protocol_version_major, contract_version); - self.versions.insert(key, contract_hash); - key + /// Consumes the object and returns all the groups of the contract package. + pub fn take_groups(self) -> Groups { + self.groups + } + + /// Returns all of this contract's disabled versions. + pub fn disabled_versions(&self) -> &DisabledVersions { + &self.disabled_versions + } + + /// Returns mut reference to all of this contract's disabled versions. + pub fn disabled_versions_mut(&mut self) -> &mut DisabledVersions { + &mut self.disabled_versions + } + + /// Returns lock_status of the contract package. + pub fn lock_status(&self) -> ContractPackageStatus { + self.lock_status.clone() + } + + pub fn is_locked(&self) -> bool { + match self.lock_status { + ContractPackageStatus::Locked => true, + ContractPackageStatus::Unlocked => false, + } } /// Disable the contract version corresponding to the given hash (if it exists). pub fn disable_contract_version(&mut self, contract_hash: ContractHash) -> Result<(), Error> { let contract_version_key = self - .versions - .iter() - .filter_map(|(k, v)| if *v == contract_hash { Some(*k) } else { None }) - .next() + .find_contract_version_key_by_hash(&contract_hash) + .copied() .ok_or(Error::ContractNotFound)?; if !self.disabled_versions.contains(&contract_version_key) { @@ -702,49 +793,32 @@ impl ContractPackage { Ok(()) } - /// Returns reference to all of this contract's versions. - pub fn versions(&self) -> &ContractVersions { - &self.versions - } + /// Enable the contract version corresponding to the given hash (if it exists). + pub fn enable_contract_version(&mut self, contract_hash: ContractHash) -> Result<(), Error> { + let contract_version_key = self + .find_contract_version_key_by_hash(&contract_hash) + .copied() + .ok_or(Error::ContractNotFound)?; - /// Returns all of this contract's enabled contract versions. - pub fn enabled_versions(&self) -> ContractVersions { - let mut ret = ContractVersions::new(); - for version in &self.versions { - if !self.is_version_enabled(*version.0) { - continue; - } - ret.insert(*version.0, *version.1); - } - ret - } + self.disabled_versions.remove(&contract_version_key); - /// Returns mutable reference to all of this contract's versions (enabled and disabled). - pub fn versions_mut(&mut self) -> &mut ContractVersions { - &mut self.versions + Ok(()) } - /// Consumes the object and returns all of this contract's versions (enabled and disabled). - pub fn take_versions(self) -> ContractVersions { + fn find_contract_version_key_by_hash( + &self, + contract_hash: &ContractHash, + ) -> Option<&ContractVersionKey> { self.versions + .iter() + .filter_map(|(k, v)| if v == contract_hash { Some(k) } else { None }) + .next() } - /// Returns all of this contract's disabled versions. - pub fn disabled_versions(&self) -> &DisabledVersions { - &self.disabled_versions - } - - /// Returns mut reference to all of this contract's disabled versions. - pub fn disabled_versions_mut(&mut self) -> &mut DisabledVersions { - &mut self.disabled_versions - } - - /// Removes a group from this contract (if it exists). + /// Removes a group from this entity (if it exists). pub fn remove_group(&mut self, group: &Group) -> bool { - self.groups.remove(group).is_some() + self.groups.0.remove(group).is_some() } - - /// Gets the next available contract version for the given protocol version fn next_contract_version_for(&self, protocol_version: ProtocolVersionMajor) -> ContractVersion { let current_version = self .versions @@ -762,46 +836,84 @@ impl ContractPackage { current_version + 1 } + /// Returns `true` if the given contract version exists and is enabled. + pub fn is_version_enabled(&self, contract_version_key: ContractVersionKey) -> bool { + !self.disabled_versions.contains(&contract_version_key) + && self.versions.contains_key(&contract_version_key) + } + + /// Returns all of this contract's enabled contract versions. + pub fn enabled_versions(&self) -> ContractVersions { + let mut ret = ContractVersions::new(); + for version in &self.versions { + if !self.is_version_enabled(*version.0) { + continue; + } + ret.insert(*version.0, *version.1); + } + ret + } + /// Return the contract version key for the newest enabled contract version. pub fn current_contract_version(&self) -> Option { - match self.enabled_versions().keys().next_back() { - Some(contract_version_key) => Some(*contract_version_key), - None => None, - } + self.enabled_versions().keys().next_back().copied() } /// Return the contract hash for the newest enabled contract version. pub fn current_contract_hash(&self) -> Option { - match self.enabled_versions().values().next_back() { - Some(contract_hash) => Some(*contract_hash), - None => None, - } + self.enabled_versions().values().next_back().copied() } - /// Return the lock status of the contract package. - pub fn is_locked(&self) -> bool { - match self.lock_status { - ContractPackageStatus::Unlocked => false, - ContractPackageStatus::Locked => true, + pub fn insert_contract_version( + &mut self, + protocol_version_major: ProtocolVersionMajor, + contract_hash: ContractHash, + ) -> ContractVersionKey { + let contract_version = self.next_contract_version_for(protocol_version_major); + let key = ContractVersionKey::new(protocol_version_major, contract_version); + self.versions.insert(key, contract_hash); + key + } + + pub fn groups_mut(&mut self) -> &mut Groups { + &mut self.groups + } +} + +impl Serialize for ContractPackage { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + HumanReadableContractPackage::from(self).serialize(serializer) + } else { + let bytes = self + .to_bytes() + .map_err(|error| ser::Error::custom(format!("{:?}", error)))?; + ByteBuf::from(bytes).serialize(serializer) } } +} - /// Return the package status itself - pub fn get_lock_status(&self) -> ContractPackageStatus { - self.lock_status.clone() +impl<'de> Deserialize<'de> for ContractPackage { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let json_helper = HumanReadableContractPackage::deserialize(deserializer)?; + json_helper.try_into().map_err(de::Error::custom) + } else { + let bytes = ByteBuf::deserialize(deserializer)?.into_vec(); + bytesrepr::deserialize::(bytes) + .map_err(|error| de::Error::custom(format!("{:?}", error))) + } } } impl ToBytes for ContractPackage { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut result = bytesrepr::allocate_buffer(self)?; - - result.append(&mut self.access_key.to_bytes()?); - result.append(&mut self.versions.to_bytes()?); - result.append(&mut self.disabled_versions.to_bytes()?); - result.append(&mut self.groups.to_bytes()?); - result.append(&mut self.lock_status.to_bytes()?); - + self.access_key().write_bytes(&mut result)?; + self.versions().write_bytes(&mut result)?; + self.disabled_versions().write_bytes(&mut result)?; + self.groups().write_bytes(&mut result)?; + self.lock_status.write_bytes(&mut result)?; Ok(result) } @@ -812,6 +924,15 @@ impl ToBytes for ContractPackage { + self.groups.serialized_length() + self.lock_status.serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.access_key().write_bytes(writer)?; + self.versions().write_bytes(writer)?; + self.disabled_versions().write_bytes(writer)?; + self.groups().write_bytes(writer)?; + self.lock_status.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for ContractPackage { @@ -833,47 +954,272 @@ impl FromBytes for ContractPackage { } } -/// Type alias for a container used inside [`EntryPoints`]. -pub type EntryPointsMap = BTreeMap; +impl From for Package { + fn from(value: ContractPackage) -> Self { + let versions: BTreeMap = value + .versions + .into_iter() + .map(|(version, contract_hash)| { + let entity_version = EntityVersionKey::new( + version.protocol_version_major(), + version.contract_version(), + ); + let entity_hash = EntityAddr::SmartContract(contract_hash.value()); + (entity_version, entity_hash) + }) + .collect(); -/// Collection of named entry points -#[derive(Debug, Clone, PartialEq, Eq, Serialize)] -pub struct EntryPoints(EntryPointsMap); + let disabled_versions = value + .disabled_versions + .into_iter() + .map(|contract_versions| { + EntityVersionKey::new( + contract_versions.protocol_version_major(), + contract_versions.contract_version(), + ) + }) + .collect(); -impl Default for EntryPoints { - fn default() -> Self { - let mut entry_points = EntryPoints::new(); - let entry_point = EntryPoint::default(); - entry_points.add_entry_point(entry_point); - entry_points - } -} + let lock_status = if value.lock_status == ContractPackageStatus::Locked { + PackageStatus::Locked + } else { + PackageStatus::Unlocked + }; -impl ToBytes for EntryPoints { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - fn serialized_length(&self) -> usize { - self.0.serialized_length() + Package::new( + versions.into(), + disabled_versions, + value.groups, + lock_status, + ) } } -impl FromBytes for EntryPoints { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (entry_points_map, rem) = EntryPointsMap::from_bytes(bytes)?; - Ok((EntryPoints(entry_points_map), rem)) - } +/// Type signature of a method. Order of arguments matter since can be +/// referenced by index as well as name. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EntryPoint { + name: String, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, } -impl EntryPoints { - /// Creates empty instance of [`EntryPoints`]. - pub fn new() -> EntryPoints { - EntryPoints(EntryPointsMap::new()) +impl From for (String, Parameters, CLType, EntryPointAccess, EntryPointType) { + fn from(entry_point: EntryPoint) -> Self { + ( + entry_point.name, + entry_point.args, + entry_point.ret, + entry_point.access, + entry_point.entry_point_type, + ) } +} - /// Adds new [`EntryPoint`]. - pub fn add_entry_point(&mut self, entry_point: EntryPoint) { - self.0.insert(entry_point.name().to_string(), entry_point); +impl EntryPoint { + /// `EntryPoint` constructor. + pub fn new>( + name: T, + args: Parameters, + ret: CLType, + access: EntryPointAccess, + entry_point_type: EntryPointType, + ) -> Self { + EntryPoint { + name: name.into(), + args, + ret, + access, + entry_point_type, + } + } + + /// Create a default [`EntryPoint`] with specified name. + pub fn default_with_name>(name: T) -> Self { + EntryPoint { + name: name.into(), + ..Default::default() + } + } + + /// Get name. + pub fn name(&self) -> &str { + &self.name + } + + /// Get access enum. + pub fn access(&self) -> &EntryPointAccess { + &self.access + } + + /// Get the arguments for this method. + pub fn args(&self) -> &[Parameter] { + self.args.as_slice() + } + + /// Get the return type. + pub fn ret(&self) -> &CLType { + &self.ret + } + + /// Obtains entry point + pub fn entry_point_type(&self) -> EntryPointType { + self.entry_point_type + } +} + +impl Default for EntryPoint { + /// constructor for a public session `EntryPoint` that takes no args and returns `Unit` + fn default() -> Self { + EntryPoint { + name: DEFAULT_ENTRY_POINT_NAME.to_string(), + args: Vec::new(), + ret: CLType::Unit, + access: EntryPointAccess::Public, + entry_point_type: EntryPointType::Caller, + } + } +} + +impl ToBytes for EntryPoint { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + + self.args.serialized_length() + + self.ret.serialized_length() + + self.access.serialized_length() + + self.entry_point_type.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.args.write_bytes(writer)?; + self.ret.append_bytes(writer)?; + self.access.write_bytes(writer)?; + self.entry_point_type.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for EntryPoint { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, bytes) = String::from_bytes(bytes)?; + let (args, bytes) = Vec::::from_bytes(bytes)?; + let (ret, bytes) = CLType::from_bytes(bytes)?; + let (access, bytes) = EntryPointAccess::from_bytes(bytes)?; + let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?; + + Ok(( + EntryPoint { + name, + args, + ret, + access, + entry_point_type, + }, + bytes, + )) + } +} + +/// Collection of named entry points. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(transparent, deny_unknown_fields)] +pub struct EntryPoints(BTreeMap); + +impl From for EntryPoints { + fn from(value: EntityEntryPoints) -> Self { + let mut ret = EntryPoints::new(); + for entity_entry_point in value.take_entry_points() { + let entry_point = EntryPoint::new( + entity_entry_point.name(), + Parameters::from(entity_entry_point.args()), + entity_entry_point.ret().clone(), + entity_entry_point.access().clone(), + entity_entry_point.entry_point_type(), + ); + ret.add_entry_point(entry_point); + } + ret + } +} + +impl ToBytes for EntryPoints { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for EntryPoints { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (entry_points_map, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((EntryPoints(entry_points_map), remainder)) + } +} + +impl Default for EntryPoints { + fn default() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } +} + +impl From for EntityEntryPoint { + fn from(value: EntryPoint) -> Self { + EntityEntryPoint::from(&value) + } +} + +impl From<&EntryPoint> for EntityEntryPoint { + fn from(value: &EntryPoint) -> Self { + EntityEntryPoint::new( + value.name.clone(), + value.args.clone(), + value.ret.clone(), + value.access.clone(), + value.entry_point_type, + EntryPointPayment::Caller, + ) + } +} + +impl EntryPoints { + /// Constructs a new, empty `EntryPoints`. + pub const fn new() -> EntryPoints { + EntryPoints(BTreeMap::::new()) + } + + /// Constructs a new `EntryPoints` with a single entry for the default `EntryPoint`. + pub fn new_with_default_entry_point() -> Self { + let mut entry_points = EntryPoints::new(); + let entry_point = EntryPoint::default(); + entry_points.add_entry_point(entry_point); + entry_points + } + + /// Adds new [`EntryPoint`]. + pub fn add_entry_point(&mut self, entry_point: EntryPoint) -> Option { + self.0.insert(entry_point.name().to_string(), entry_point) } /// Checks if given [`EntryPoint`] exists. @@ -893,7 +1239,24 @@ impl EntryPoints { /// Takes all entry points. pub fn take_entry_points(self) -> Vec { - self.0.into_iter().map(|(_name, value)| value).collect() + self.0.into_values().collect() + } + + /// Returns the length of the entry points + pub fn len(&self) -> usize { + self.0.len() + } + + /// Checks if the `EntryPoints` is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Checks if any of the entry points are of the type Session. + pub fn contains_stored_session(&self) -> bool { + self.0 + .values() + .any(|entry_point| entry_point.entry_point_type == EntryPointType::Caller) } } @@ -907,39 +1270,29 @@ impl From> for EntryPoints { } } -/// Collection of named keys -pub type NamedKeys = BTreeMap; +impl From for EntityEntryPoints { + fn from(value: EntryPoints) -> Self { + let mut entry_points = EntityEntryPoints::new(); + for contract_entry_point in value.take_entry_points() { + entry_points.add_entry_point(EntityEntryPoint::from(contract_entry_point)); + } + entry_points + } +} /// Methods and type signatures supported by a contract. -#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] pub struct Contract { contract_package_hash: ContractPackageHash, contract_wasm_hash: ContractWasmHash, named_keys: NamedKeys, + #[cfg_attr(feature = "json-schema", schemars(with = "Vec"))] entry_points: EntryPoints, protocol_version: ProtocolVersion, } -impl From - for ( - ContractPackageHash, - ContractWasmHash, - NamedKeys, - EntryPoints, - ProtocolVersion, - ) -{ - fn from(contract: Contract) -> Self { - ( - contract.contract_package_hash, - contract.contract_wasm_hash, - contract.named_keys, - contract.entry_points, - contract.protocol_version, - ) - } -} - impl Contract { /// `Contract` constructor. pub fn new( @@ -1009,7 +1362,7 @@ impl Contract { } /// Appends `keys` to `named_keys` - pub fn named_keys_append(&mut self, keys: &mut NamedKeys) { + pub fn named_keys_append(&mut self, keys: NamedKeys) { self.named_keys.append(keys); } @@ -1023,7 +1376,7 @@ impl Contract { self.protocol_version = protocol_version; } - /// Determines if `Contract` is compatibile with a given `ProtocolVersion`. + /// Determines if `Contract` is compatible with a given `ProtocolVersion`. pub fn is_compatible_protocol_version(&self, protocol_version: ProtocolVersion) -> bool { self.protocol_version.value().major == protocol_version.value().major } @@ -1032,11 +1385,11 @@ impl Contract { impl ToBytes for Contract { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut result = bytesrepr::allocate_buffer(self)?; - result.append(&mut self.contract_package_hash.to_bytes()?); - result.append(&mut self.contract_wasm_hash.to_bytes()?); - result.append(&mut self.named_keys.to_bytes()?); - result.append(&mut self.entry_points.to_bytes()?); - result.append(&mut self.protocol_version.to_bytes()?); + self.contract_package_hash().write_bytes(&mut result)?; + self.contract_wasm_hash().write_bytes(&mut result)?; + self.named_keys().write_bytes(&mut result)?; + self.entry_points().write_bytes(&mut result)?; + self.protocol_version().write_bytes(&mut result)?; Ok(result) } @@ -1047,6 +1400,15 @@ impl ToBytes for Contract { + ToBytes::serialized_length(&self.protocol_version) + ToBytes::serialized_length(&self.named_keys) } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.contract_package_hash().write_bytes(writer)?; + self.contract_wasm_hash().write_bytes(writer)?; + self.named_keys().write_bytes(writer)?; + self.entry_points().write_bytes(writer)?; + self.protocol_version().write_bytes(writer)?; + Ok(()) + } } impl FromBytes for Contract { @@ -1081,302 +1443,24 @@ impl Default for Contract { } } -/// Context of method execution -#[repr(u8)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub enum EntryPointType { - /// Runs as session code - Session = 0, - /// Runs within contract's context - Contract = 1, -} - -impl ToBytes for EntryPointType { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - (*self as u8).to_bytes() - } - - fn serialized_length(&self) -> usize { - 1 - } -} - -impl FromBytes for EntryPointType { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (value, bytes) = u8::from_bytes(bytes)?; - match value { - 0 => Ok((EntryPointType::Session, bytes)), - 1 => Ok((EntryPointType::Contract, bytes)), - _ => Err(bytesrepr::Error::Formatting), - } - } -} - /// Default name for an entry point pub const DEFAULT_ENTRY_POINT_NAME: &str = "call"; /// Default name for an installer entry point pub const ENTRY_POINT_NAME_INSTALL: &str = "install"; -/// Default name for an upgrader entry point +/// Default name for an upgrade entry point pub const UPGRADE_ENTRY_POINT_NAME: &str = "upgrade"; -/// Collection of entry point parameters. -pub type Parameters = Vec; - -/// Type signature of a method. Order of arguments matter since can be -/// referenced by index as well as name. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub struct EntryPoint { - name: String, - args: Parameters, - ret: CLType, - access: EntryPointAccess, - entry_point_type: EntryPointType, -} - -impl From for (String, Parameters, CLType, EntryPointAccess, EntryPointType) { - fn from(entry_point: EntryPoint) -> Self { - ( - entry_point.name, - entry_point.args, - entry_point.ret, - entry_point.access, - entry_point.entry_point_type, - ) - } -} - -impl EntryPoint { - /// `EntryPoint` constructor. - pub fn new>( - name: T, - args: Parameters, - ret: CLType, - access: EntryPointAccess, - entry_point_type: EntryPointType, - ) -> Self { - EntryPoint { - name: name.into(), - args, - ret, - access, - entry_point_type, - } - } - - /// Create a default [`EntryPoint`] with specified name. - pub fn default_with_name>(name: T) -> Self { - EntryPoint { - name: name.into(), - ..Default::default() - } - } - - /// Get name. - pub fn name(&self) -> &str { - &self.name - } - - /// Get access enum. - pub fn access(&self) -> &EntryPointAccess { - &self.access - } - - /// Get the arguments for this method. - pub fn args(&self) -> &[Parameter] { - self.args.as_slice() - } - - /// Get the return type. - pub fn ret(&self) -> &CLType { - &self.ret - } - - /// Obtains entry point - pub fn entry_point_type(&self) -> EntryPointType { - self.entry_point_type - } -} - -impl Default for EntryPoint { - /// constructor for a public session `EntryPoint` that takes no args and returns `Unit` - fn default() -> Self { - EntryPoint { - name: DEFAULT_ENTRY_POINT_NAME.to_string(), - args: Vec::new(), - ret: CLType::Unit, - access: EntryPointAccess::Public, - entry_point_type: EntryPointType::Session, - } - } -} - -impl ToBytes for EntryPoint { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - result.append(&mut self.name.to_bytes()?); - result.append(&mut self.args.to_bytes()?); - self.ret.append_bytes(&mut result)?; - result.append(&mut self.access.to_bytes()?); - result.append(&mut self.entry_point_type.to_bytes()?); - - Ok(result) - } - - fn serialized_length(&self) -> usize { - self.name.serialized_length() - + self.args.serialized_length() - + self.ret.serialized_length() - + self.access.serialized_length() - + self.entry_point_type.serialized_length() - } -} - -impl FromBytes for EntryPoint { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (name, bytes) = String::from_bytes(bytes)?; - let (args, bytes) = Vec::::from_bytes(bytes)?; - let (ret, bytes) = CLType::from_bytes(bytes)?; - let (access, bytes) = EntryPointAccess::from_bytes(bytes)?; - let (entry_point_type, bytes) = EntryPointType::from_bytes(bytes)?; - - Ok(( - EntryPoint { - name, - args, - ret, - access, - entry_point_type, - }, - bytes, - )) - } -} - -/// Enum describing the possible access control options for a contract entry -/// point (method). -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub enum EntryPointAccess { - /// Anyone can call this method (no access controls). - Public, - /// Only users from the listed groups may call this method. Note: if the - /// list is empty then this method is not callable from outside the - /// contract. - Groups(Vec), -} - -const ENTRYPOINTACCESS_PUBLIC_TAG: u8 = 1; -const ENTRYPOINTACCESS_GROUPS_TAG: u8 = 2; - -impl EntryPointAccess { - /// Constructor for access granted to only listed groups. - pub fn groups(labels: &[&str]) -> Self { - let list: Vec = labels.iter().map(|s| Group(String::from(*s))).collect(); - EntryPointAccess::Groups(list) - } -} - -impl ToBytes for EntryPointAccess { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - - match self { - EntryPointAccess::Public => { - result.push(ENTRYPOINTACCESS_PUBLIC_TAG); - } - EntryPointAccess::Groups(groups) => { - result.push(ENTRYPOINTACCESS_GROUPS_TAG); - result.append(&mut groups.to_bytes()?); - } - } - Ok(result) - } - - fn serialized_length(&self) -> usize { - match self { - EntryPointAccess::Public => 1, - EntryPointAccess::Groups(groups) => 1 + groups.serialized_length(), - } - } -} - -impl FromBytes for EntryPointAccess { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, bytes) = u8::from_bytes(bytes)?; - - match tag { - ENTRYPOINTACCESS_PUBLIC_TAG => Ok((EntryPointAccess::Public, bytes)), - ENTRYPOINTACCESS_GROUPS_TAG => { - let (groups, bytes) = Vec::::from_bytes(bytes)?; - let result = EntryPointAccess::Groups(groups); - Ok((result, bytes)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -/// Parameter to a method -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub struct Parameter { - name: String, - cl_type: CLType, -} - -impl Parameter { - /// `Parameter` constructor. - pub fn new>(name: T, cl_type: CLType) -> Self { - Parameter { - name: name.into(), - cl_type, - } - } - - /// Get the type of this argument. - pub fn cl_type(&self) -> &CLType { - &self.cl_type - } -} - -impl From for (String, CLType) { - fn from(parameter: Parameter) -> Self { - (parameter.name, parameter.cl_type) - } -} - -impl ToBytes for Parameter { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = ToBytes::to_bytes(&self.name)?; - self.cl_type.append_bytes(&mut result)?; - - Ok(result) - } - - fn serialized_length(&self) -> usize { - ToBytes::serialized_length(&self.name) + self.cl_type.serialized_length() - } -} - -impl FromBytes for Parameter { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (name, bytes) = String::from_bytes(bytes)?; - let (cl_type, bytes) = CLType::from_bytes(bytes)?; - - Ok((Parameter { name, cl_type }, bytes)) - } -} - #[cfg(test)] mod tests { use super::*; - use crate::{AccessRights, URef}; + use crate::{AccessRights, EntryPointAccess, EntryPointType, Group, Parameter, URef}; use alloc::borrow::ToOwned; + const CONTRACT_HASH_V1: ContractHash = ContractHash::new([42; 32]); + const CONTRACT_HASH_V2: ContractHash = ContractHash::new([84; 32]); + fn make_contract_package() -> ContractPackage { let mut contract_package = ContractPackage::new( URef::new([0; 32], AccessRights::NONE), @@ -1411,7 +1495,7 @@ mod tests { vec![], CLType::U32, EntryPointAccess::groups(&["Group 2"]), - EntryPointType::Session, + EntryPointType::Caller, ); ret.insert(entrypoint.name().to_owned(), entrypoint); let entrypoint = EntryPoint::new( @@ -1419,46 +1503,25 @@ mod tests { vec![Parameter::new("Foo", CLType::U32)], CLType::U32, EntryPointAccess::groups(&["Group 1"]), - EntryPointType::Session, + EntryPointType::Caller, ); ret.insert(entrypoint.name().to_owned(), entrypoint); ret }; let _contract_package_hash = [41; 32]; - let contract_hash = [42; 32]; let _contract_wasm_hash = [43; 32]; let _named_keys = NamedKeys::new(); let protocol_version = ProtocolVersion::V1_0_0; - contract_package - .insert_contract_version(protocol_version.value().major, contract_hash.into()); - - contract_package - } - - #[test] - fn next_contract_version() { - let major = 1; - let mut contract_package = ContractPackage::new( - URef::new([0; 32], AccessRights::NONE), - ContractVersions::default(), - DisabledVersions::default(), - Groups::default(), - ContractPackageStatus::default(), - ); - assert_eq!(contract_package.next_contract_version_for(major), 1); + let v1 = contract_package + .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V1); + let v2 = contract_package + .insert_contract_version(protocol_version.value().major, CONTRACT_HASH_V2); - let next_version = contract_package.insert_contract_version(major, [123; 32].into()); - assert_eq!(next_version, ContractVersionKey::new(major, 1)); - assert_eq!(contract_package.next_contract_version_for(major), 2); - let next_version_2 = contract_package.insert_contract_version(major, [124; 32].into()); - assert_eq!(next_version_2, ContractVersionKey::new(major, 2)); + assert!(v2 > v1); - let major = 2; - assert_eq!(contract_package.next_contract_version_for(major), 1); - let next_version_3 = contract_package.insert_contract_version(major, [42; 32].into()); - assert_eq!(next_version_3, ContractVersionKey::new(major, 1)); + contract_package } #[test] @@ -1471,50 +1534,6 @@ mod tests { assert_eq!(rem.len(), 0); } - #[test] - fn should_remove_group() { - let mut contract_package = make_contract_package(); - - assert!(!contract_package.remove_group(&Group::new("Non-existent group"))); - assert!(contract_package.remove_group(&Group::new("Group 1"))); - assert!(!contract_package.remove_group(&Group::new("Group 1"))); // Group no longer exists - } - - #[test] - fn should_disable_contract_version() { - const CONTRACT_HASH: ContractHash = ContractHash::new([123; 32]); - let mut contract_package = make_contract_package(); - - assert_eq!( - contract_package.disable_contract_version(CONTRACT_HASH), - Err(Error::ContractNotFound), - "should return contract not found error" - ); - - let next_version = contract_package.insert_contract_version(1, CONTRACT_HASH); - assert!( - contract_package.is_version_enabled(next_version), - "version should exist and be enabled" - ); - - assert_eq!( - contract_package.disable_contract_version(CONTRACT_HASH), - Ok(()), - "should be able to disable version" - ); - - assert_eq!( - contract_package.lookup_contract_hash(next_version), - None, - "should not return disabled contract version" - ); - - assert!( - !contract_package.is_version_enabled(next_version), - "version should not be enabled" - ); - } - #[test] fn contract_hash_from_slice() { let bytes: Vec = (0..32).collect(); @@ -1556,26 +1575,79 @@ mod tests { #[test] fn contract_package_hash_from_str() { - let contract_hash = ContractPackageHash([3; 32]); - let encoded = contract_hash.to_formatted_string(); + let contract_package_hash = ContractPackageHash([3; 32]); + let encoded = contract_package_hash.to_formatted_string(); let decoded = ContractPackageHash::from_formatted_str(&encoded).unwrap(); - assert_eq!(contract_hash, decoded); + assert_eq!(contract_package_hash, decoded); let invalid_prefix = - "contractpackage-0000000000000000000000000000000000000000000000000000000000000000"; - assert!(ContractPackageHash::from_formatted_str(invalid_prefix).is_err()); + "contract-package0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); let short_addr = "contract-package-00000000000000000000000000000000000000000000000000000000000000"; - assert!(ContractPackageHash::from_formatted_str(short_addr).is_err()); + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); let long_addr = "contract-package-000000000000000000000000000000000000000000000000000000000000000000"; - assert!(ContractPackageHash::from_formatted_str(long_addr).is_err()); + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); let invalid_hex = "contract-package-000000000000000000000000000000000000000000000000000000000000000g"; - assert!(ContractPackageHash::from_formatted_str(invalid_hex).is_err()); + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } + + #[test] + fn contract_package_hash_from_legacy_str() { + let contract_package_hash = ContractPackageHash([3; 32]); + let hex_addr = contract_package_hash.to_string(); + let legacy_encoded = format!("contract-package-wasm{}", hex_addr); + let decoded_from_legacy = ContractPackageHash::from_formatted_str(&legacy_encoded) + .expect("should accept legacy prefixed string"); + assert_eq!( + contract_package_hash, decoded_from_legacy, + "decoded_from_legacy should equal decoded" + ); + + let invalid_prefix = + "contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-wasm00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); } #[test] @@ -1609,4 +1681,76 @@ mod tests { let decoded = serde_json::from_str(&json_string).unwrap(); assert_eq!(contract_hash, decoded) } + + #[test] + fn package_hash_from_legacy_str() { + let package_hash = ContractPackageHash([3; 32]); + let hex_addr = package_hash.to_string(); + let legacy_encoded = format!("contract-package-wasm{}", hex_addr); + let decoded_from_legacy = ContractPackageHash::from_formatted_str(&legacy_encoded) + .expect("should accept legacy prefixed string"); + assert_eq!( + package_hash, decoded_from_legacy, + "decoded_from_legacy should equal decoded" + ); + + let invalid_prefix = + "contract-packagewasm0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = + "contract-package-wasm00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + ContractPackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "contract-package-wasm000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + ContractPackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, contracts::ContractPackage, gens}; + + proptest! { + #![proptest_config(ProptestConfig { + cases: 1024, + .. ProptestConfig::default() + })] + + #[test] + fn test_value_contract(contract in gens::contract_arb()) { + bytesrepr::test_serialization_roundtrip(&contract); + } + + #[test] + fn test_value_contract_package(contract_pkg in gens::contract_package_arb()) { + bytesrepr::test_serialization_roundtrip(&contract_pkg); + } + + #[test] + fn test_json_contract_package(v in gens::contract_package_arb()) { + let json_str = serde_json::to_string(&v).unwrap(); + let deserialized = serde_json::from_str::(&json_str).unwrap(); + assert_eq!(v, deserialized); + } + } } diff --git a/types/src/contracts/named_keys.rs b/types/src/contracts/named_keys.rs new file mode 100644 index 0000000000..8574fa83c8 --- /dev/null +++ b/types/src/contracts/named_keys.rs @@ -0,0 +1,181 @@ +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +#[cfg(feature = "json-schema")] +use crate::execution::execution_result_v1::NamedKey; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, Key, +}; + +/// A collection of named keys. +#[derive(Clone, Eq, PartialEq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[rustfmt::skip] +pub struct NamedKeys( + #[serde(with = "BTreeMapToArray::")] + #[cfg_attr(feature = "json-schema", schemars(with = "Vec"))] + BTreeMap, +); + +impl NamedKeys { + /// Constructs a new, empty `NamedKeys`. + pub const fn new() -> Self { + NamedKeys(BTreeMap::new()) + } + + /// Consumes `self`, returning the wrapped map. + pub fn into_inner(self) -> BTreeMap { + self.0 + } + + /// Inserts a named key. + /// + /// If the map did not have this name present, `None` is returned. If the map did have this + /// name present, the `Key` is updated, and the old `Key` is returned. + pub fn insert(&mut self, name: String, key: Key) -> Option { + self.0.insert(name, key) + } + + /// Moves all elements from `other` into `self`. + pub fn append(&mut self, mut other: Self) { + self.0.append(&mut other.0) + } + + /// Removes a named `Key`, returning the `Key` if it existed in the collection. + pub fn remove(&mut self, name: &str) -> Option { + self.0.remove(name) + } + + /// Returns a reference to the `Key` under the given `name` if any. + pub fn get(&self, name: &str) -> Option<&Key> { + self.0.get(name) + } + + /// Returns `true` if the named `Key` exists in the collection. + pub fn contains(&self, name: &str) -> bool { + self.0.contains_key(name) + } + + /// Returns an iterator over the names. + pub fn names(&self) -> impl Iterator { + self.0.keys() + } + + /// Returns an iterator over the `Key`s (i.e. the map's values). + pub fn keys(&self) -> impl Iterator { + self.0.values() + } + + /// Returns a mutable iterator over the `Key`s (i.e. the map's values). + pub fn keys_mut(&mut self) -> impl Iterator { + self.0.values_mut() + } + + /// Returns an iterator over the name-key pairs. + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns the number of named `Key`s. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if there are no named `Key`s. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +impl From> for NamedKeys { + fn from(value: BTreeMap) -> Self { + NamedKeys(value) + } +} + +impl ToBytes for NamedKeys { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for NamedKeys { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (named_keys, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((NamedKeys(named_keys), remainder)) + } +} + +impl CLTyped for NamedKeys { + fn cl_type() -> CLType { + BTreeMap::::cl_type() + } +} + +struct Labels; + +impl KeyValueLabels for Labels { + const KEY: &'static str = "name"; + const VALUE: &'static str = "key"; +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + use crate::testing::TestRng; + + /// `NamedKeys` was previously (pre node v2.0.0) just an alias for `BTreeMap`. + /// Check if we serialize as the old form, that can deserialize to the new. + #[test] + fn should_be_backwards_compatible() { + let rng = &mut TestRng::new(); + let mut named_keys = NamedKeys::new(); + assert!(named_keys.insert("a".to_string(), rng.gen()).is_none()); + assert!(named_keys.insert("bb".to_string(), rng.gen()).is_none()); + assert!(named_keys.insert("ccc".to_string(), rng.gen()).is_none()); + + let serialized_old = bincode::serialize(&named_keys.0).unwrap(); + let parsed_new = bincode::deserialize(&serialized_old).unwrap(); + assert_eq!(named_keys, parsed_new); + + let serialized_old = bytesrepr::serialize(&named_keys.0).unwrap(); + let parsed_new = bytesrepr::deserialize(serialized_old).unwrap(); + assert_eq!(named_keys, parsed_new); + } + + #[test] + fn should_match_field_names() { + // this test was written to ensure that the schema generated by schemars matches the serde + // encoding, both are configured using attributes and they can get out of sync + let mut named_keys = NamedKeys::new(); + named_keys.insert("key".to_string(), Key::Hash([0u8; 32])); + assert_eq!( + serde_json::to_value(&named_keys).expect("should serialize"), + serde_json::json!([{ + Labels::KEY: "key", + Labels::VALUE: "hash-0000000000000000000000000000000000000000000000000000000000000000" + }]) + ); + } +} diff --git a/types/src/crypto.rs b/types/src/crypto.rs index 62b0ae55e0..1e64e1ca98 100644 --- a/types/src/crypto.rs +++ b/types/src/crypto.rs @@ -3,10 +3,55 @@ mod asymmetric_key; mod error; -#[cfg(any(feature = "gens", test))] +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; + +use num::FromPrimitive; +use num_derive::FromPrimitive; + +pub use crate::key::BLAKE2B_DIGEST_LENGTH; +#[cfg(any(feature = "std", test))] +pub use asymmetric_key::generate_ed25519_keypair; +#[cfg(any(feature = "testing", feature = "gens", test))] pub use asymmetric_key::gens; pub use asymmetric_key::{ - AsymmetricType, PublicKey, SecretKey, Signature, ED25519_TAG, SECP256K1_TAG, SYSTEM_ACCOUNT, - SYSTEM_TAG, + recover_secp256k1, sign, verify, AsymmetricType, PublicKey, SecretKey, Signature, ED25519_TAG, + SECP256K1_TAG, SYSTEM_ACCOUNT, SYSTEM_TAG, }; pub use error::Error; +#[cfg(any(feature = "std", feature = "testing", test))] +pub use error::ErrorExt; + +pub(crate) fn blake2b>(data: T) -> [u8; BLAKE2B_DIGEST_LENGTH] { + let mut result = [0; BLAKE2B_DIGEST_LENGTH]; + // NOTE: Assumed safe as `BLAKE2B_DIGEST_LENGTH` is a valid value for a hasher + let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); + + hasher.update(data); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + result +} + +/// A type of hashing algorithm. +#[repr(u8)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive)] +pub enum HashAlgorithm { + /// Blake2b + Blake2b = 0, + /// Blake3 + Blake3 = 1, + /// Sha256, + Sha256 = 2, +} + +impl TryFrom for HashAlgorithm { + type Error = (); + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(()) + } +} diff --git a/types/src/crypto/asymmetric_key.rs b/types/src/crypto/asymmetric_key.rs index d0430de383..3a60de816b 100644 --- a/types/src/crypto/asymmetric_key.rs +++ b/types/src/crypto/asymmetric_key.rs @@ -13,34 +13,56 @@ use core::{ iter, marker::Copy, }; +#[cfg(any(feature = "testing", test))] +use rand::distributions::{Distribution, Standard}; +#[cfg(any(feature = "std-fs-io", test))] +use std::path::Path; +#[cfg(feature = "datasize")] use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use derp::{Der, Tag}; use ed25519_dalek::{ - ed25519::signature::Signature as _Signature, PUBLIC_KEY_LENGTH as ED25519_PUBLIC_KEY_LENGTH, + Signature as Ed25519Signature, SigningKey as Ed25519SecretKey, + VerifyingKey as Ed25519PublicKey, PUBLIC_KEY_LENGTH as ED25519_PUBLIC_KEY_LENGTH, SECRET_KEY_LENGTH as ED25519_SECRET_KEY_LENGTH, SIGNATURE_LENGTH as ED25519_SIGNATURE_LENGTH, }; use hex_fmt::HexFmt; -use k256::{ - ecdsa::{ - Signature as Secp256k1Signature, SigningKey as Secp256k1SecretKey, - VerifyingKey as Secp256k1PublicKey, - }, - elliptic_curve::zeroize::Zeroize, - SecretBytes as Secp256k1SecretBytes, +use k256::ecdsa::{ + signature::{Signer, Verifier}, + RecoveryId, Signature as Secp256k1Signature, SigningKey as Secp256k1SecretKey, VerifyingKey, + VerifyingKey as Secp256k1PublicKey, }; -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "std", test))] +use pem::Pem; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; - +#[cfg(feature = "json-schema")] +use serde_json::json; +#[cfg(any(feature = "std", test))] +use untrusted::Input; + +#[cfg(any(feature = "std", feature = "testing", test))] +use crate::crypto::ErrorExt; +#[cfg(any(feature = "std-fs-io", test))] +use crate::file_utils::{read_file, write_file, write_private_file}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; use crate::{ account::AccountHash, bytesrepr, bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, crypto::Error, CLType, CLTyped, Tagged, }; -#[cfg(any(feature = "gens", test))] +#[cfg(any(feature = "testing", test))] pub mod gens; #[cfg(test)] mod tests; @@ -66,43 +88,82 @@ const SECP256K1_SIGNATURE_LENGTH: usize = 64; /// Public key for system account. pub const SYSTEM_ACCOUNT: PublicKey = PublicKey::System; +// See https://www.secg.org/sec1-v2.pdf#subsection.C.4 +#[cfg(any(feature = "std", test))] +const EC_PUBLIC_KEY_OBJECT_IDENTIFIER: [u8; 7] = [42, 134, 72, 206, 61, 2, 1]; + +// See https://tools.ietf.org/html/rfc8410#section-10.3 +#[cfg(any(feature = "std", test))] +const ED25519_OBJECT_IDENTIFIER: [u8; 3] = [43, 101, 112]; +#[cfg(any(feature = "std", test))] +const ED25519_PEM_SECRET_KEY_TAG: &str = "PRIVATE KEY"; +#[cfg(any(feature = "std", test))] +const ED25519_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; + +// Ref? +#[cfg(any(feature = "std", test))] +const SECP256K1_OBJECT_IDENTIFIER: [u8; 5] = [43, 129, 4, 0, 10]; +#[cfg(any(feature = "std", test))] +const SECP256K1_PEM_SECRET_KEY_TAG: &str = "EC PRIVATE KEY"; +#[cfg(any(feature = "std", test))] +const SECP256K1_PEM_PUBLIC_KEY_TAG: &str = "PUBLIC KEY"; + +#[cfg(feature = "json-schema")] +static ED25519_SECRET_KEY: Lazy = Lazy::new(|| { + let bytes = [15u8; SecretKey::ED25519_LENGTH]; + SecretKey::ed25519_from_bytes(bytes).unwrap() +}); + +#[cfg(feature = "json-schema")] +static ED25519_PUBLIC_KEY: Lazy = Lazy::new(|| { + let bytes = [15u8; SecretKey::ED25519_LENGTH]; + let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); + PublicKey::from(&secret_key) +}); + /// Operations on asymmetric cryptographic type. pub trait AsymmetricType<'a> where Self: 'a + Sized + Tagged, - &'a Self: Into>, + Vec: From<&'a Self>, { /// Converts `self` to hex, where the first byte represents the algorithm tag. fn to_hex(&'a self) -> String { let bytes = iter::once(self.tag()) - .chain(self.into()) + .chain(Vec::::from(self)) .collect::>(); - hex::encode(bytes) + base16::encode_lower(&bytes) } /// Tries to decode `Self` from its hex-representation. The hex format should be as produced /// by `AsymmetricType::to_hex()`. fn from_hex>(input: A) -> Result { if input.as_ref().len() < 2 { - return Err(Error::AsymmetricKey("too short".to_string())); + return Err(Error::AsymmetricKey( + "failed to decode from hex: too short".to_string(), + )); } - let (tag_bytes, key_bytes) = input.as_ref().split_at(2); - let mut tag = [0u8; 1]; - hex::decode_to_slice(tag_bytes, tag.as_mut())?; + let (tag_hex, key_hex) = input.as_ref().split_at(2); + + let tag = checksummed_hex::decode(tag_hex)?; + let key_bytes = checksummed_hex::decode(key_hex)?; match tag[0] { - ED25519_TAG => { - let bytes = hex::decode(key_bytes)?; - Self::ed25519_from_bytes(&bytes) - } - SECP256K1_TAG => { - let bytes = hex::decode(key_bytes)?; - Self::secp256k1_from_bytes(&bytes) + SYSTEM_TAG => { + if key_bytes.is_empty() { + Ok(Self::system()) + } else { + Err(Error::AsymmetricKey( + "failed to decode from hex: invalid system variant".to_string(), + )) + } } + ED25519_TAG => Self::ed25519_from_bytes(&key_bytes), + SECP256K1_TAG => Self::secp256k1_from_bytes(&key_bytes), _ => Err(Error::AsymmetricKey(format!( - "invalid tag. Expected {} or {}, got {}", - ED25519_TAG, SECP256K1_TAG, tag[0] + "failed to decode from hex: invalid tag. Expected {}, {} or {}, got {}", + SYSTEM_TAG, ED25519_TAG, SECP256K1_TAG, tag[0] ))), } } @@ -118,15 +179,17 @@ where } /// A secret or private asymmetric key. -#[derive(DataSize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] pub enum SecretKey { /// System secret key. System, /// Ed25519 secret key. - #[data_size(skip)] // Manually verified to have no data on the heap. - Ed25519(ed25519_dalek::SecretKey), + #[cfg_attr(feature = "datasize", data_size(skip))] + // Manually verified to have no data on the heap. + Ed25519(Ed25519SecretKey), /// secp256k1 secret key. - #[data_size(skip)] + #[cfg_attr(feature = "datasize", data_size(skip))] Secp256k1(Secp256k1SecretKey), } @@ -147,43 +210,247 @@ impl SecretKey { /// Constructs a new ed25519 variant from a byte slice. pub fn ed25519_from_bytes>(bytes: T) -> Result { - Ok(SecretKey::Ed25519(ed25519_dalek::SecretKey::from_bytes( + Ok(SecretKey::Ed25519(Ed25519SecretKey::try_from( bytes.as_ref(), )?)) } /// Constructs a new secp256k1 variant from a byte slice. pub fn secp256k1_from_bytes>(bytes: T) -> Result { - Ok(SecretKey::Secp256k1(Secp256k1SecretKey::from_bytes( + Ok(SecretKey::Secp256k1(Secp256k1SecretKey::from_slice( bytes.as_ref(), )?)) } - fn variant_name(&self) -> &str { - match self { - SecretKey::System => SYSTEM, - SecretKey::Ed25519(_) => ED25519, - SecretKey::Secp256k1(_) => SECP256K1, - } + /// Generates a new ed25519 variant using the system's secure random number generator. + #[cfg(any(feature = "std", feature = "testing", test))] + pub fn generate_ed25519() -> Result { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + getrandom::getrandom(&mut bytes[..])?; + SecretKey::ed25519_from_bytes(bytes).map_err(Into::into) + } + + /// Generates a new secp256k1 variant using the system's secure random number generator. + #[cfg(any(feature = "std", feature = "testing", test))] + pub fn generate_secp256k1() -> Result { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + getrandom::getrandom(&mut bytes[..])?; + SecretKey::secp256k1_from_bytes(bytes).map_err(Into::into) } -} -impl Clone for SecretKey { - fn clone(&self) -> Self { + /// Attempts to write the key bytes to the configured file path. + #[cfg(any(feature = "std-fs-io", test))] + pub fn to_file>(&self, file: P) -> Result<(), ErrorExt> { + write_private_file(file, self.to_pem()?).map_err(ErrorExt::SecretKeySave) + } + + /// Attempts to read the key bytes from configured file path. + #[cfg(any(feature = "std-fs-io", test))] + pub fn from_file>(file: P) -> Result { + let data = read_file(file).map_err(ErrorExt::SecretKeyLoad)?; + Self::from_pem(data) + } + + /// DER encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_der(&self) -> Result, ErrorExt> { match self { - SecretKey::System => SecretKey::System, + SecretKey::System => Err(Error::System(String::from("to_der")).into()), SecretKey::Ed25519(secret_key) => { - Self::ed25519_from_bytes(*secret_key.as_bytes()).unwrap() + // See https://tools.ietf.org/html/rfc8410#section-10.3 + let mut key_bytes = vec![]; + let mut der = Der::new(&mut key_bytes); + der.octet_string(&secret_key.to_bytes())?; + + let mut encoded = vec![]; + der = Der::new(&mut encoded); + der.sequence(|der| { + der.integer(&[0])?; + der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; + der.octet_string(&key_bytes) + })?; + Ok(encoded) } SecretKey::Secp256k1(secret_key) => { - // Use `Secp256k1SecretBytes` to ensure the raw bytes are zeroized after use. - let mut secret_bytes = Secp256k1SecretBytes::from(secret_key.to_bytes()); - let secret_key = Self::secp256k1_from_bytes(secret_bytes.as_ref()).unwrap(); - secret_bytes.zeroize(); - secret_key + // See https://www.secg.org/sec1-v2.pdf#subsection.C.4 + let mut oid_bytes = vec![]; + let mut der = Der::new(&mut oid_bytes); + der.oid(&SECP256K1_OBJECT_IDENTIFIER)?; + + let mut encoded = vec![]; + der = Der::new(&mut encoded); + der.sequence(|der| { + der.integer(&[1])?; + der.octet_string(secret_key.to_bytes().as_slice())?; + der.element(Tag::ContextSpecificConstructed0, &oid_bytes) + })?; + Ok(encoded) } } } + + /// Decodes a key from a DER-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_der>(input: T) -> Result { + let input = Input::from(input.as_ref()); + + let (key_type_tag, raw_bytes) = input.read_all(derp::Error::Read, |input| { + derp::nested(input, Tag::Sequence, |input| { + // Safe to ignore the first value which should be an integer. + let version_slice = + derp::expect_tag_and_get_value(input, Tag::Integer)?.as_slice_less_safe(); + if version_slice.len() != 1 { + return Err(derp::Error::NonZeroUnusedBits); + } + let version = version_slice[0]; + + // Read the next value. + let (tag, value) = derp::read_tag_and_get_value(input)?; + if tag == Tag::Sequence as u8 { + // Expecting an Ed25519 key. + if version != 0 { + return Err(derp::Error::WrongValue); + } + + // The sequence should have one element: an object identifier defining Ed25519. + let object_identifier = value.read_all(derp::Error::Read, |input| { + derp::expect_tag_and_get_value(input, Tag::Oid) + })?; + if object_identifier.as_slice_less_safe() != ED25519_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + // The third and final value should be the raw bytes of the secret key as an + // octet string in an octet string. + let raw_bytes = derp::nested(input, Tag::OctetString, |input| { + derp::expect_tag_and_get_value(input, Tag::OctetString) + })? + .as_slice_less_safe(); + + return Ok((ED25519_TAG, raw_bytes)); + } else if tag == Tag::OctetString as u8 { + // Expecting a secp256k1 key. + if version != 1 { + return Err(derp::Error::WrongValue); + } + + // The octet string is the secret key. + let raw_bytes = value.as_slice_less_safe(); + + // The object identifier is next. + let parameter0 = + derp::expect_tag_and_get_value(input, Tag::ContextSpecificConstructed0)?; + let object_identifier = parameter0.read_all(derp::Error::Read, |input| { + derp::expect_tag_and_get_value(input, Tag::Oid) + })?; + if object_identifier.as_slice_less_safe() != SECP256K1_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + // There might be an optional public key as the final value, but we're not + // interested in parsing that. Read it to ensure `input.read_all` doesn't fail + // with unused bytes error. + let _ = derp::read_tag_and_get_value(input); + + return Ok((SECP256K1_TAG, raw_bytes)); + } + + Err(derp::Error::WrongValue) + }) + })?; + + match key_type_tag { + SYSTEM_TAG => Err(Error::AsymmetricKey("cannot construct variant".to_string()).into()), + ED25519_TAG => SecretKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), + SECP256K1_TAG => SecretKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), + _ => Err(Error::AsymmetricKey("unknown type tag".to_string()).into()), + } + } + + /// PEM encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_pem(&self) -> Result { + let tag = match self { + SecretKey::System => return Err(Error::System(String::from("to_pem")).into()), + SecretKey::Ed25519(_) => ED25519_PEM_SECRET_KEY_TAG.to_string(), + SecretKey::Secp256k1(_) => SECP256K1_PEM_SECRET_KEY_TAG.to_string(), + }; + let contents = self.to_der()?; + let pem = Pem { tag, contents }; + Ok(pem::encode(&pem)) + } + + /// Decodes a key from a PEM-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_pem>(input: T) -> Result { + let pem = pem::parse(input)?; + + let secret_key = Self::from_der(&pem.contents)?; + + let bad_tag = |expected_tag: &str| { + ErrorExt::FromPem(format!( + "invalid tag: expected {}, got {}", + expected_tag, pem.tag + )) + }; + + match secret_key { + SecretKey::System => return Err(Error::System(String::from("from_pem")).into()), + SecretKey::Ed25519(_) => { + if pem.tag != ED25519_PEM_SECRET_KEY_TAG { + return Err(bad_tag(ED25519_PEM_SECRET_KEY_TAG)); + } + } + SecretKey::Secp256k1(_) => { + if pem.tag != SECP256K1_PEM_SECRET_KEY_TAG { + return Err(bad_tag(SECP256K1_PEM_SECRET_KEY_TAG)); + } + } + } + + Ok(secret_key) + } + + /// Returns a random `SecretKey`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + Self::random_ed25519(rng) + } else { + Self::random_secp256k1(rng) + } + } + + /// Returns a random Ed25519 variant of `SecretKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_ed25519(rng: &mut TestRng) -> Self { + let mut bytes = [0u8; Self::ED25519_LENGTH]; + rng.fill_bytes(&mut bytes[..]); + SecretKey::ed25519_from_bytes(bytes).unwrap() + } + + /// Returns a random secp256k1 variant of `SecretKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_secp256k1(rng: &mut TestRng) -> Self { + let mut bytes = [0u8; Self::SECP256K1_LENGTH]; + rng.fill_bytes(&mut bytes[..]); + SecretKey::secp256k1_from_bytes(bytes).unwrap() + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ED25519_SECRET_KEY + } + + fn variant_name(&self) -> &str { + match self { + SecretKey::System => SYSTEM, + SecretKey::Ed25519(_) => ED25519, + SecretKey::Secp256k1(_) => SECP256K1, + } + } } impl Debug for SecretKey { @@ -209,15 +476,17 @@ impl Tagged for SecretKey { } /// A public asymmetric key. -#[derive(DataSize, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] pub enum PublicKey { /// System public key. System, /// Ed25519 public key. - #[data_size(skip)] // Manually verified to have no data on the heap. - Ed25519(ed25519_dalek::PublicKey), + #[cfg_attr(feature = "datasize", data_size(skip))] + Ed25519(Ed25519PublicKey), /// secp256k1 public key. - #[data_size(skip)] + #[cfg_attr(feature = "datasize", data_size(skip))] Secp256k1(Secp256k1PublicKey), } @@ -236,6 +505,168 @@ impl PublicKey { AccountHash::from(self) } + /// Hexadecimal representation of the key. + pub fn to_hex_string(&self) -> String { + self.to_hex() + } + + /// Returns `true` if this public key is of the `System` variant. + pub fn is_system(&self) -> bool { + matches!(self, PublicKey::System) + } + + /// Attempts to write the key bytes to the configured file path. + #[cfg(any(feature = "std-fs-io", test))] + pub fn to_file>(&self, file: P) -> Result<(), ErrorExt> { + write_file(file, self.to_pem()?).map_err(ErrorExt::PublicKeySave) + } + + /// Attempts to read the key bytes from configured file path. + #[cfg(any(feature = "std-fs-io", test))] + pub fn from_file>(file: P) -> Result { + let data = read_file(file).map_err(ErrorExt::PublicKeyLoad)?; + Self::from_pem(data) + } + + /// DER encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_der(&self) -> Result, ErrorExt> { + match self { + PublicKey::System => Err(Error::System(String::from("to_der")).into()), + PublicKey::Ed25519(public_key) => { + // See https://tools.ietf.org/html/rfc8410#section-10.1 + let mut encoded = vec![]; + let mut der = Der::new(&mut encoded); + der.sequence(|der| { + der.sequence(|der| der.oid(&ED25519_OBJECT_IDENTIFIER))?; + der.bit_string(0, public_key.as_ref()) + })?; + Ok(encoded) + } + PublicKey::Secp256k1(public_key) => { + // See https://www.secg.org/sec1-v2.pdf#subsection.C.3 + let mut encoded = vec![]; + let mut der = Der::new(&mut encoded); + der.sequence(|der| { + der.sequence(|der| { + der.oid(&EC_PUBLIC_KEY_OBJECT_IDENTIFIER)?; + der.oid(&SECP256K1_OBJECT_IDENTIFIER) + })?; + der.bit_string(0, public_key.to_encoded_point(true).as_ref()) + })?; + Ok(encoded) + } + } + } + + /// Decodes a key from a DER-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_der>(input: T) -> Result { + let input = Input::from(input.as_ref()); + + let mut key_type_tag = ED25519_TAG; + let raw_bytes = input.read_all(derp::Error::Read, |input| { + derp::nested(input, Tag::Sequence, |input| { + derp::nested(input, Tag::Sequence, |input| { + // Read the first value. + let object_identifier = + derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); + if object_identifier == ED25519_OBJECT_IDENTIFIER { + key_type_tag = ED25519_TAG; + Ok(()) + } else if object_identifier == EC_PUBLIC_KEY_OBJECT_IDENTIFIER { + // Assert the next object identifier is the secp256k1 ID. + let next_object_identifier = + derp::expect_tag_and_get_value(input, Tag::Oid)?.as_slice_less_safe(); + if next_object_identifier != SECP256K1_OBJECT_IDENTIFIER { + return Err(derp::Error::WrongValue); + } + + key_type_tag = SECP256K1_TAG; + Ok(()) + } else { + Err(derp::Error::WrongValue) + } + })?; + Ok(derp::bit_string_with_no_unused_bits(input)?.as_slice_less_safe()) + }) + })?; + + match key_type_tag { + ED25519_TAG => PublicKey::ed25519_from_bytes(raw_bytes).map_err(Into::into), + SECP256K1_TAG => PublicKey::secp256k1_from_bytes(raw_bytes).map_err(Into::into), + _ => unreachable!(), + } + } + + /// PEM encodes a key. + #[cfg(any(feature = "std", test))] + pub fn to_pem(&self) -> Result { + let tag = match self { + PublicKey::System => return Err(Error::System(String::from("to_pem")).into()), + PublicKey::Ed25519(_) => ED25519_PEM_PUBLIC_KEY_TAG.to_string(), + PublicKey::Secp256k1(_) => SECP256K1_PEM_PUBLIC_KEY_TAG.to_string(), + }; + let contents = self.to_der()?; + let pem = Pem { tag, contents }; + Ok(pem::encode(&pem)) + } + + /// Decodes a key from a PEM-encoded slice. + #[cfg(any(feature = "std", test))] + pub fn from_pem>(input: T) -> Result { + let pem = pem::parse(input)?; + let public_key = Self::from_der(&pem.contents)?; + let bad_tag = |expected_tag: &str| { + ErrorExt::FromPem(format!( + "invalid tag: expected {}, got {}", + expected_tag, pem.tag + )) + }; + match public_key { + PublicKey::System => return Err(Error::System(String::from("from_pem")).into()), + PublicKey::Ed25519(_) => { + if pem.tag != ED25519_PEM_PUBLIC_KEY_TAG { + return Err(bad_tag(ED25519_PEM_PUBLIC_KEY_TAG)); + } + } + PublicKey::Secp256k1(_) => { + if pem.tag != SECP256K1_PEM_PUBLIC_KEY_TAG { + return Err(bad_tag(SECP256K1_PEM_PUBLIC_KEY_TAG)); + } + } + } + Ok(public_key) + } + + /// Returns a random `PublicKey`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + PublicKey::from(&secret_key) + } + + /// Returns a random Ed25519 variant of `PublicKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_ed25519(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random_ed25519(rng); + PublicKey::from(&secret_key) + } + + /// Returns a random secp256k1 variant of `PublicKey`. + #[cfg(any(feature = "testing", test))] + pub fn random_secp256k1(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random_secp256k1(rng); + PublicKey::from(&secret_key) + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &ED25519_PUBLIC_KEY + } + fn variant_name(&self) -> &str { match self { PublicKey::System => SYSTEM, @@ -251,15 +682,16 @@ impl AsymmetricType<'_> for PublicKey { } fn ed25519_from_bytes>(bytes: T) -> Result { - Ok(PublicKey::Ed25519(ed25519_dalek::PublicKey::from_bytes( + Ok(PublicKey::Ed25519(Ed25519PublicKey::try_from( bytes.as_ref(), )?)) } fn secp256k1_from_bytes>(bytes: T) -> Result { - Ok(PublicKey::Secp256k1(Secp256k1PublicKey::from_sec1_bytes( - bytes.as_ref(), - )?)) + Ok(PublicKey::Secp256k1( + Secp256k1PublicKey::from_sec1_bytes(bytes.as_ref()) + .map_err(|_| Error::SignatureError)?, + )) } } @@ -273,18 +705,56 @@ impl From<&SecretKey> for PublicKey { } } -impl From for PublicKey { - fn from(secret_key: SecretKey) -> PublicKey { +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> PublicKey { + let secret_key = if rng.gen() { + SecretKey::generate_ed25519().unwrap() + } else { + SecretKey::generate_secp256k1().unwrap() + }; PublicKey::from(&secret_key) } } +#[cfg(any(feature = "testing", test))] +impl PartialEq for SecretKey { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Self::System, Self::System) => true, + (Self::Ed25519(k1), Self::Ed25519(k2)) => k1.to_bytes() == k2.to_bytes(), + (Self::Secp256k1(k1), Self::Secp256k1(k2)) => k1.to_bytes() == k2.to_bytes(), + _ => false, + } + } +} +#[cfg(any(feature = "testing", test))] +impl Eq for SecretKey {} + +#[cfg(any(feature = "testing", test))] +impl Ord for SecretKey { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::System, Self::System) => Ordering::Equal, + (Self::Ed25519(k1), Self::Ed25519(k2)) => k1.to_bytes().cmp(&k2.to_bytes()), + (Self::Secp256k1(k1), Self::Secp256k1(k2)) => k1.to_bytes().cmp(&k2.to_bytes()), + (k1, k2) => k1.variant_name().cmp(k2.variant_name()), + } + } +} +#[cfg(any(feature = "testing", test))] +impl PartialOrd for SecretKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + impl From<&PublicKey> for Vec { fn from(public_key: &PublicKey) -> Self { match public_key { PublicKey::System => Vec::new(), PublicKey::Ed25519(key) => key.to_bytes().into(), - PublicKey::Secp256k1(key) => key.to_bytes().into(), + PublicKey::Secp256k1(key) => key.to_encoded_point(true).as_ref().into(), } } } @@ -301,7 +771,7 @@ impl Debug for PublicKey { formatter, "PublicKey::{}({})", self.variant_name(), - HexFmt(Into::>::into(self)) + base16::encode_lower(&Into::>::into(self)) ) } } @@ -337,7 +807,7 @@ impl Ord for PublicKey { // This implementation of `Hash` agrees with the derived `PartialEq`. It's required since // `ed25519_dalek::PublicKey` doesn't implement `Hash`. -#[allow(clippy::derive_hash_xor_eq)] +#[allow(clippy::derived_hash_with_manual_eq)] impl Hash for PublicKey { fn hash(&self, state: &mut H) { self.tag().hash(state); @@ -355,37 +825,10 @@ impl Tagged for PublicKey { } } -impl Clone for PublicKey { - fn clone(&self) -> Self { - match self { - PublicKey::System => PublicKey::System, - PublicKey::Ed25519(public_key) => PublicKey::Ed25519(*public_key), - PublicKey::Secp256k1(public_key) => { - let raw_bytes: [u8; SECP256K1_COMPRESSED_PUBLIC_KEY_LENGTH] = public_key.to_bytes(); - Self::secp256k1_from_bytes(raw_bytes).unwrap() - } - } - } -} - impl ToBytes for PublicKey { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - PublicKey::System => { - buffer.insert(0, SYSTEM_TAG); - } - PublicKey::Ed25519(public_key) => { - buffer.insert(0, ED25519_TAG); - let ed25519_bytes = public_key.as_bytes(); - buffer.extend_from_slice(ed25519_bytes); - } - PublicKey::Secp256k1(public_key) => { - buffer.insert(0, SECP256K1_TAG); - let secp256k1_bytes = public_key.to_bytes(); - buffer.extend_from_slice(&secp256k1_bytes); - } - } + self.write_bytes(&mut buffer)?; Ok(buffer) } @@ -397,6 +840,21 @@ impl ToBytes for PublicKey { PublicKey::Secp256k1(_) => Self::SECP256K1_LENGTH, } } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PublicKey::System => writer.push(SYSTEM_TAG), + PublicKey::Ed25519(public_key) => { + writer.push(ED25519_TAG); + writer.extend_from_slice(public_key.as_bytes()); + } + PublicKey::Secp256k1(public_key) => { + writer.push(SECP256K1_TAG); + writer.extend_from_slice(public_key.to_encoded_point(true).as_ref()); + } + } + Ok(()) + } } impl FromBytes for PublicKey { @@ -435,7 +893,7 @@ impl<'de> Deserialize<'de> for PublicKey { } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for PublicKey { fn schema_name() -> String { String::from("PublicKey") @@ -447,6 +905,27 @@ impl JsonSchema for PublicKey { schema_object.metadata().description = Some( "Hex-encoded cryptographic public key, including the algorithm tag prefix.".to_string(), ); + schema_object.metadata().examples = vec![ + json!({ + "name": "SystemPublicKey", + "description": "A pseudo public key, used for example when the system proposes an \ + immediate switch block after a network upgrade rather than a specific validator. \ + Its hex-encoded value is always '00', as is the corresponding pseudo signature's", + "value": "00" + }), + json!({ + "name": "Ed25519PublicKey", + "description": "An Ed25519 public key. Its hex-encoded value begins '01' and is \ + followed by 64 characters", + "value": "018a88e3dd7409f195fd52db2d3cba5d72ca6709bf1d94121bf3748801b40f6f5c" + }), + json!({ + "name": "Secp256k1PublicKey", + "description": "A secp256k1 public key. Its hex-encoded value begins '02' and is \ + followed by 66 characters", + "value": "0203408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084" + }), + ]; schema_object.into() } } @@ -458,15 +937,17 @@ impl CLTyped for PublicKey { } /// A signature of given data. -#[derive(Clone, Copy, DataSize)] +#[derive(Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] pub enum Signature { /// System signature. Cannot be verified. System, /// Ed25519 signature. - #[data_size(skip)] - Ed25519(ed25519_dalek::Signature), + #[cfg_attr(feature = "datasize", data_size(skip))] + Ed25519(Ed25519Signature), /// Secp256k1 signature. - #[data_size(skip)] + #[cfg_attr(feature = "datasize", data_size(skip))] Secp256k1(Secp256k1Signature), } @@ -482,13 +963,7 @@ impl Signature { /// Constructs a new Ed25519 variant from a byte array. pub fn ed25519(bytes: [u8; Self::ED25519_LENGTH]) -> Result { - let signature = ed25519_dalek::Signature::from_bytes(&bytes).map_err(|_| { - Error::AsymmetricKey(format!( - "failed to construct Ed25519 signature from {:?}", - &bytes[..] - )) - })?; - + let signature = Ed25519Signature::from_bytes(&bytes); Ok(Signature::Ed25519(signature)) } @@ -511,6 +986,11 @@ impl Signature { Signature::Secp256k1(_) => SECP256K1, } } + + /// Hexadecimal representation of the signature. + pub fn to_hex_string(&self) -> String { + self.to_hex() + } } impl AsymmetricType<'_> for Signature { @@ -519,7 +999,7 @@ impl AsymmetricType<'_> for Signature { } fn ed25519_from_bytes>(bytes: T) -> Result { - let signature = ed25519_dalek::Signature::from_bytes(bytes.as_ref()).map_err(|_| { + let signature = Ed25519Signature::try_from(bytes.as_ref()).map_err(|_| { Error::AsymmetricKey(format!( "failed to construct Ed25519 signature from {:?}", bytes.as_ref() @@ -529,7 +1009,7 @@ impl AsymmetricType<'_> for Signature { } fn secp256k1_from_bytes>(bytes: T) -> Result { - let signature = k256::ecdsa::Signature::try_from(bytes.as_ref()).map_err(|_| { + let signature = Secp256k1Signature::try_from(bytes.as_ref()).map_err(|_| { Error::AsymmetricKey(format!( "failed to construct secp256k1 signature from {:?}", bytes.as_ref() @@ -545,7 +1025,7 @@ impl Debug for Signature { formatter, "Signature::{}({})", self.variant_name(), - HexFmt(Into::>::into(*self)) + base16::encode_lower(&Into::>::into(*self)) ) } } @@ -607,21 +1087,7 @@ impl Tagged for Signature { impl ToBytes for Signature { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - Signature::System => { - buffer.insert(0, SYSTEM_TAG); - } - Signature::Ed25519(signature) => { - buffer.insert(0, ED25519_TAG); - let ed5519_bytes = signature.to_bytes(); - buffer.extend(&ed5519_bytes); - } - Signature::Secp256k1(signature) => { - buffer.insert(0, SECP256K1_TAG); - let secp256k1_bytes = signature.as_ref(); - buffer.extend_from_slice(secp256k1_bytes); - } - } + self.write_bytes(&mut buffer)?; Ok(buffer) } @@ -633,6 +1099,23 @@ impl ToBytes for Signature { Signature::Secp256k1(_) => Self::SECP256K1_LENGTH, } } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Signature::System => { + writer.push(SYSTEM_TAG); + } + Signature::Ed25519(signature) => { + writer.push(ED25519_TAG); + writer.extend(signature.to_bytes()); + } + Signature::Secp256k1(signature) => { + writer.push(SECP256K1_TAG); + writer.extend_from_slice(&signature.to_bytes()); + } + } + Ok(()) + } } impl FromBytes for Signature { @@ -676,7 +1159,7 @@ impl From<&Signature> for Vec { match signature { Signature::System => Vec::new(), Signature::Ed25519(signature) => signature.to_bytes().into(), - Signature::Secp256k1(signature) => signature.as_ref().into(), + Signature::Secp256k1(signature) => (*signature.to_bytes()).into(), } } } @@ -687,7 +1170,7 @@ impl From for Vec { } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for Signature { fn schema_name() -> String { String::from("Signature") @@ -703,6 +1186,91 @@ impl JsonSchema for Signature { } } +/// Signs the given message using the given key pair. +pub fn sign>( + message: T, + secret_key: &SecretKey, + public_key: &PublicKey, +) -> Signature { + match (secret_key, public_key) { + (SecretKey::System, PublicKey::System) => { + panic!("cannot create signature with system keys",) + } + (SecretKey::Ed25519(secret_key), PublicKey::Ed25519(_public_key)) => { + let signature = secret_key.sign(message.as_ref()); + Signature::Ed25519(signature) + } + (SecretKey::Secp256k1(secret_key), PublicKey::Secp256k1(_public_key)) => { + let signer = secret_key; + let signature: Secp256k1Signature = signer + .try_sign(message.as_ref()) + .expect("should create signature"); + Signature::Secp256k1(signature) + } + _ => panic!("secret and public key types must match"), + } +} + +/// Attempts to recover a Secp256k1 [`PublicKey`] from a message and a signature over it. +pub fn recover_secp256k1>( + message: T, + signature: &Signature, + recovery_id: u8, +) -> Result { + let Signature::Secp256k1(signature) = signature else { + return Err(Error::AsymmetricKey(String::from( + "public keys can only be recovered from Secp256k1 signatures", + ))); + }; + + let Ok(key) = VerifyingKey::recover_from_msg( + message.as_ref(), + signature, + RecoveryId::try_from(recovery_id)?, + ) else { + return Err(Error::AsymmetricKey(String::from("Key extraction failed"))); + }; + + Ok(PublicKey::Secp256k1(key)) +} + +/// Verifies the signature of the given message against the given public key. +pub fn verify>( + message: T, + signature: &Signature, + public_key: &PublicKey, +) -> Result<(), Error> { + match (signature, public_key) { + (Signature::System, _) => Err(Error::AsymmetricKey(String::from( + "signatures based on the system key cannot be verified", + ))), + (Signature::Ed25519(signature), PublicKey::Ed25519(public_key)) => public_key + .verify_strict(message.as_ref(), signature) + .map_err(|_| Error::AsymmetricKey(String::from("failed to verify Ed25519 signature"))), + (Signature::Secp256k1(signature), PublicKey::Secp256k1(public_key)) => { + let verifier: &Secp256k1PublicKey = public_key; + verifier + .verify(message.as_ref(), signature) + .map_err(|error| { + Error::AsymmetricKey(format!("failed to verify secp256k1 signature: {}", error)) + }) + } + _ => Err(Error::AsymmetricKey(format!( + "type mismatch between {} and {}", + signature, public_key + ))), + } +} + +/// Generates an Ed25519 keypair using the operating system's cryptographically secure random number +/// generator. +#[cfg(any(feature = "std", test))] +pub fn generate_ed25519_keypair() -> (SecretKey, PublicKey) { + let secret_key = SecretKey::generate_ed25519().unwrap(); + let public_key = PublicKey::from(&secret_key); + (secret_key, public_key) +} + mod detail { use alloc::{string::String, vec::Vec}; @@ -716,7 +1284,7 @@ mod detail { /// /// The wrapped contents are the result of calling `t_as_ref()` on the type. #[derive(Serialize, Deserialize)] - pub enum AsymmetricTypeAsBytes { + pub(super) enum AsymmetricTypeAsBytes { System, Ed25519(Vec), Secp256k1(Vec), @@ -742,7 +1310,7 @@ mod detail { } } - pub fn serialize<'a, T, S>(value: &'a T, serializer: S) -> Result + pub(super) fn serialize<'a, T, S>(value: &'a T, serializer: S) -> Result where T: AsymmetricType<'a>, Vec: From<&'a T>, @@ -756,7 +1324,7 @@ mod detail { AsymmetricTypeAsBytes::from(value).serialize(serializer) } - pub fn deserialize<'a, 'de, T, D>(deserializer: D) -> Result + pub(super) fn deserialize<'a, 'de, T, D>(deserializer: D) -> Result where T: AsymmetricType<'a>, Vec: From<&'a T>, diff --git a/types/src/crypto/asymmetric_key/gens.rs b/types/src/crypto/asymmetric_key/gens.rs index 2e4d3a6296..38ffd4ec92 100644 --- a/types/src/crypto/asymmetric_key/gens.rs +++ b/types/src/crypto/asymmetric_key/gens.rs @@ -10,22 +10,49 @@ use proptest::{ use crate::{crypto::SecretKey, PublicKey}; -/// Creates an arbitrary [`SecretKey`] -pub fn secret_key_arb() -> impl Strategy { +/// Creates an arbitrary [`PublicKey`] +pub fn public_key_arb() -> impl Strategy { prop_oneof![ - Just(SecretKey::System), + Just(PublicKey::System), collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); - SecretKey::ed25519_from_bytes(byte_array).unwrap() + let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap(); + PublicKey::from(&secret_key) }), collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); - SecretKey::secp256k1_from_bytes(bytes_array).unwrap() + let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap(); + PublicKey::from(&secret_key) }) ] } -/// Creates an arbitrary [`PublicKey`] -pub fn public_key_arb() -> impl Strategy { - secret_key_arb().prop_map(Into::into) +/// Returns a strategy for creating random [`PublicKey`] instances but NOT system variant. +pub fn public_key_arb_no_system() -> impl Strategy { + prop_oneof![ + collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { + let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::ed25519_from_bytes(byte_array).unwrap(); + PublicKey::from(&secret_key) + }), + collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { + let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); + let secret_key = SecretKey::secp256k1_from_bytes(bytes_array).unwrap(); + PublicKey::from(&secret_key) + }) + ] +} + +/// Returns a strategy for creating random [`SecretKey`] instances but NOT system variant. +pub fn secret_key_arb_no_system() -> impl Strategy { + prop_oneof![ + collection::vec(::arbitrary(), SecretKey::ED25519_LENGTH).prop_map(|bytes| { + let byte_array: [u8; SecretKey::ED25519_LENGTH] = bytes.try_into().unwrap(); + SecretKey::ed25519_from_bytes(byte_array).unwrap() + }), + collection::vec(::arbitrary(), SecretKey::SECP256K1_LENGTH).prop_map(|bytes| { + let bytes_array: [u8; SecretKey::SECP256K1_LENGTH] = bytes.try_into().unwrap(); + SecretKey::secp256k1_from_bytes(bytes_array).unwrap() + }) + ] } diff --git a/types/src/crypto/asymmetric_key/tests.rs b/types/src/crypto/asymmetric_key/tests.rs index 5ef927d917..79eea43496 100644 --- a/types/src/crypto/asymmetric_key/tests.rs +++ b/types/src/crypto/asymmetric_key/tests.rs @@ -1,10 +1,26 @@ -use crate::{crypto::SecretKey, AsymmetricType, PublicKey}; +use std::{ + cmp::Ordering, + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + iter, +}; + +use rand::RngCore; + +use k256::elliptic_curve::sec1::ToEncodedPoint; +use openssl::pkey::{PKey, Private, Public}; + +use super::*; +use crate::{ + bytesrepr, checksummed_hex, crypto::SecretKey, testing::TestRng, AsymmetricType, PublicKey, + Tagged, +}; #[test] fn can_construct_ed25519_keypair_from_zeroes() { let bytes = [0; SecretKey::ED25519_LENGTH]; let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); - let _public_key: PublicKey = secret_key.into(); + let _public_key: PublicKey = (&secret_key).into(); } #[test] @@ -12,30 +28,838 @@ fn can_construct_ed25519_keypair_from_zeroes() { fn cannot_construct_secp256k1_keypair_from_zeroes() { let bytes = [0; SecretKey::SECP256K1_LENGTH]; let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap(); - let _public_key: PublicKey = secret_key.into(); + let _public_key: PublicKey = (&secret_key).into(); } #[test] fn can_construct_ed25519_keypair_from_ones() { let bytes = [1; SecretKey::ED25519_LENGTH]; let secret_key = SecretKey::ed25519_from_bytes(bytes).unwrap(); - let _public_key: PublicKey = secret_key.into(); + let _public_key: PublicKey = (&secret_key).into(); } #[test] fn can_construct_secp256k1_keypair_from_ones() { let bytes = [1; SecretKey::SECP256K1_LENGTH]; let secret_key = SecretKey::secp256k1_from_bytes(bytes).unwrap(); - let _public_key: PublicKey = secret_key.into(); + let _public_key: PublicKey = (&secret_key).into(); +} + +type OpenSSLSecretKey = PKey; +type OpenSSLPublicKey = PKey; + +// `SecretKey` does not implement `PartialEq`, so just compare derived `PublicKey`s. +fn assert_secret_keys_equal(lhs: &SecretKey, rhs: &SecretKey) { + assert_eq!(PublicKey::from(lhs), PublicKey::from(rhs)); +} + +fn secret_key_der_roundtrip(secret_key: SecretKey) { + let der_encoded = secret_key.to_der().unwrap(); + let decoded = SecretKey::from_der(&der_encoded).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + SecretKey::from_der(&der_encoded[1..]).unwrap_err(); +} + +fn secret_key_pem_roundtrip(secret_key: SecretKey) { + let pem_encoded = secret_key.to_pem().unwrap(); + let decoded = SecretKey::from_pem(pem_encoded.as_bytes()).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); + + // Check PEM-encoded can be decoded by openssl. + let _ = OpenSSLSecretKey::private_key_from_pem(pem_encoded.as_bytes()).unwrap(); + + // Ensure malformed encoded version fails to decode. + SecretKey::from_pem(&pem_encoded[1..]).unwrap_err(); +} + +fn known_secret_key_to_pem(expected_key: &SecretKey, known_key_pem: &str, expected_tag: u8) { + let decoded = SecretKey::from_pem(known_key_pem.as_bytes()).unwrap(); + assert_secret_keys_equal(expected_key, &decoded); + assert_eq!(expected_tag, decoded.tag()); +} + +#[cfg(any(feature = "std-fs-io", test))] +fn secret_key_file_roundtrip(secret_key: SecretKey) { + let tempdir = tempfile::tempdir().unwrap(); + let path = tempdir.path().join("test_secret_key.pem"); + + secret_key.to_file(&path).unwrap(); + let decoded = SecretKey::from_file(&path).unwrap(); + assert_secret_keys_equal(&secret_key, &decoded); + assert_eq!(secret_key.tag(), decoded.tag()); +} + +fn public_key_serialization_roundtrip(public_key: PublicKey) { + // Try to/from bincode. + let serialized = bincode::serialize(&public_key).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(public_key, deserialized); + assert_eq!(public_key.tag(), deserialized.tag()); + + // Try to/from JSON. + let serialized = serde_json::to_vec_pretty(&public_key).unwrap(); + let deserialized = serde_json::from_slice(&serialized).unwrap(); + assert_eq!(public_key, deserialized); + assert_eq!(public_key.tag(), deserialized.tag()); + + // Using bytesrepr. + bytesrepr::test_serialization_roundtrip(&public_key); +} + +fn public_key_der_roundtrip(public_key: PublicKey) { + let der_encoded = public_key.to_der().unwrap(); + let decoded = PublicKey::from_der(&der_encoded).unwrap(); + assert_eq!(public_key, decoded); + + // Check DER-encoded can be decoded by openssl. + let _ = OpenSSLPublicKey::public_key_from_der(&der_encoded).unwrap(); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_der(&der_encoded[1..]).unwrap_err(); +} + +fn public_key_pem_roundtrip(public_key: PublicKey) { + let pem_encoded = public_key.to_pem().unwrap(); + let decoded = PublicKey::from_pem(pem_encoded.as_bytes()).unwrap(); + assert_eq!(public_key, decoded); + assert_eq!(public_key.tag(), decoded.tag()); + + // Check PEM-encoded can be decoded by openssl. + let _ = OpenSSLPublicKey::public_key_from_pem(pem_encoded.as_bytes()).unwrap(); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_pem(&pem_encoded[1..]).unwrap_err(); +} + +fn known_public_key_to_pem(known_key_hex: &str, known_key_pem: &str) { + let key_bytes = checksummed_hex::decode(known_key_hex).unwrap(); + let decoded = PublicKey::from_pem(known_key_pem.as_bytes()).unwrap(); + assert_eq!(key_bytes, Into::>::into(decoded)); +} + +#[cfg(any(feature = "std-fs-io", test))] +fn public_key_file_roundtrip(public_key: PublicKey) { + let tempdir = tempfile::tempdir().unwrap(); + let path = tempdir.path().join("test_public_key.pem"); + + public_key.to_file(&path).unwrap(); + let decoded = PublicKey::from_file(&path).unwrap(); + assert_eq!(public_key, decoded); +} + +fn public_key_hex_roundtrip(public_key: PublicKey) { + let hex_encoded = public_key.to_hex(); + let decoded = PublicKey::from_hex(&hex_encoded).unwrap(); + assert_eq!(public_key, decoded); + assert_eq!(public_key.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + PublicKey::from_hex(&hex_encoded[..1]).unwrap_err(); + PublicKey::from_hex(&hex_encoded[1..]).unwrap_err(); +} + +fn signature_serialization_roundtrip(signature: Signature) { + // Try to/from bincode. + let serialized = bincode::serialize(&signature).unwrap(); + let deserialized: Signature = bincode::deserialize(&serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()); + + // Try to/from JSON. + let serialized = serde_json::to_vec_pretty(&signature).unwrap(); + let deserialized = serde_json::from_slice(&serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()); + + // Try to/from using bytesrepr. + let serialized = bytesrepr::serialize(signature).unwrap(); + let deserialized = bytesrepr::deserialize(serialized).unwrap(); + assert_eq!(signature, deserialized); + assert_eq!(signature.tag(), deserialized.tag()) +} + +fn signature_hex_roundtrip(signature: Signature) { + let hex_encoded = signature.to_hex(); + let decoded = Signature::from_hex(hex_encoded.as_bytes()).unwrap(); + assert_eq!(signature, decoded); + assert_eq!(signature.tag(), decoded.tag()); + + // Ensure malformed encoded version fails to decode. + Signature::from_hex(&hex_encoded[..1]).unwrap_err(); + Signature::from_hex(&hex_encoded[1..]).unwrap_err(); +} + +fn hash(data: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + data.hash(&mut hasher); + hasher.finish() +} + +fn check_ord_and_hash(low: T, high: T) { + let low_copy = low.clone(); + + assert_eq!(hash(&low), hash(&low_copy)); + assert_ne!(hash(&low), hash(&high)); + + assert_eq!(Ordering::Less, low.cmp(&high)); + assert_eq!(Some(Ordering::Less), low.partial_cmp(&high)); + + assert_eq!(Ordering::Greater, high.cmp(&low)); + assert_eq!(Some(Ordering::Greater), high.partial_cmp(&low)); + + assert_eq!(Ordering::Equal, low.cmp(&low_copy)); + assert_eq!(Some(Ordering::Equal), low.partial_cmp(&low_copy)); +} + +mod system { + #[cfg(any(feature = "std-fs-io", test))] + use std::path::Path; + + use super::{sign, verify}; + use crate::crypto::{AsymmetricType, PublicKey, SecretKey, Signature}; + + #[test] + fn secret_key_to_der_should_error() { + assert!(SecretKey::system().to_der().is_err()); + } + + #[test] + fn secret_key_to_pem_should_error() { + assert!(SecretKey::system().to_pem().is_err()); + } + + #[cfg(any(feature = "std-fs-io", test))] + #[test] + fn secret_key_to_file_should_error() { + assert!(SecretKey::system().to_file(Path::new("/dev/null")).is_err()); + } + + #[test] + fn public_key_serialization_roundtrip() { + super::public_key_serialization_roundtrip(PublicKey::system()); + } + + #[test] + fn public_key_to_der_should_error() { + assert!(PublicKey::system().to_der().is_err()); + } + + #[test] + fn public_key_to_pem_should_error() { + assert!(PublicKey::system().to_pem().is_err()); + } + + #[cfg(any(feature = "std-fs-io", test))] + #[test] + fn public_key_to_file_should_error() { + assert!(PublicKey::system().to_file(Path::new("/dev/null")).is_err()); + } + + #[test] + fn public_key_to_and_from_hex() { + super::public_key_hex_roundtrip(PublicKey::system()); + } + + #[test] + #[should_panic] + fn sign_should_panic() { + sign([], &SecretKey::system(), &PublicKey::system()); + } + + #[test] + fn signature_to_and_from_hex() { + super::signature_hex_roundtrip(Signature::system()); + } + + #[test] + fn public_key_to_account_hash() { + assert_ne!( + PublicKey::system().to_account_hash().as_ref(), + Into::>::into(PublicKey::system()) + ); + } + + #[test] + fn verify_should_error() { + assert!(verify([], &Signature::system(), &PublicKey::system()).is_err()); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + crate::bytesrepr::test_serialization_roundtrip(&Signature::system()); + } +} + +mod ed25519 { + use rand::Rng; + + use super::*; + use crate::ED25519_TAG; + + const SECRET_KEY_LENGTH: usize = SecretKey::ED25519_LENGTH; + const PUBLIC_KEY_LENGTH: usize = PublicKey::ED25519_LENGTH; + const SIGNATURE_LENGTH: usize = Signature::ED25519_LENGTH; + + #[test] + fn secret_key_from_bytes() { + // Secret key should be `SecretKey::ED25519_LENGTH` bytes. + let bytes = [0; SECRET_KEY_LENGTH + 1]; + assert!(SecretKey::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(SecretKey::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(SecretKey::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn secret_key_to_and_from_der() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let der_encoded = secret_key.to_der().unwrap(); + secret_key_der_roundtrip(secret_key); + + // Check DER-encoded can be decoded by openssl. + let _ = OpenSSLSecretKey::private_key_from_der(&der_encoded).unwrap(); + } + + #[test] + fn secret_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + secret_key_pem_roundtrip(secret_key); + } + + #[test] + fn known_secret_key_to_pem() { + // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.3 + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC +-----END PRIVATE KEY-----"#; + let key_bytes = + base16::decode("d4ee72dbf913584ad5b6d8f1f769f8ad3afe7c28cbf1d4fbe097a88f44755842") + .unwrap(); + let expected_key = SecretKey::ed25519_from_bytes(key_bytes).unwrap(); + super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, ED25519_TAG); + } + + #[cfg(any(feature = "std-fs-io", test))] + #[test] + fn secret_key_to_and_from_file() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + secret_key_file_roundtrip(secret_key); + } + + #[test] + fn public_key_serialization_roundtrip() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + super::public_key_serialization_roundtrip(public_key); + } + + #[test] + fn public_key_from_bytes() { + // Public key should be `PublicKey::ED25519_LENGTH` bytes. Create vec with an extra + // byte. + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + let bytes: Vec = iter::once(rng.gen()) + .chain(Into::>::into(public_key)) + .collect::>(); + + assert!(PublicKey::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(PublicKey::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(PublicKey::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn public_key_to_and_from_der() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_der_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_pem_roundtrip(public_key); + } + + #[test] + fn known_public_key_to_pem() { + // Example values taken from https://tools.ietf.org/html/rfc8410#section-10.1 + const KNOWN_KEY_HEX: &str = + "19bf44096984cdfe8541bac167dc3b96c85086aa30b6b6cb0c5c38ad703166e1"; + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- +MCowBQYDK2VwAyEAGb9ECWmEzf6FQbrBZ9w7lshQhqowtrbLDFw4rXAxZuE= +-----END PUBLIC KEY-----"#; + super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); + } + + #[cfg(any(feature = "std-fs-io", test))] + #[test] + fn public_key_to_and_from_file() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_file_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_ed25519(&mut rng); + public_key_hex_roundtrip(public_key); + } + + #[test] + fn signature_serialization_roundtrip() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + super::signature_serialization_roundtrip(signature); + } + + #[test] + fn signature_from_bytes() { + // Signature should be `Signature::ED25519_LENGTH` bytes. + let bytes = [2; SIGNATURE_LENGTH + 1]; + assert!(Signature::ed25519_from_bytes(&bytes[..]).is_err()); + assert!(Signature::ed25519_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(Signature::ed25519_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn signature_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + signature_hex_roundtrip(signature); + } + + #[test] + fn public_key_traits() { + let public_key_low = PublicKey::ed25519_from_bytes([1; PUBLIC_KEY_LENGTH]).unwrap(); + let public_key_high = PublicKey::ed25519_from_bytes([3; PUBLIC_KEY_LENGTH]).unwrap(); + check_ord_and_hash(public_key_low, public_key_high) + } + + #[test] + fn public_key_to_account_hash() { + let public_key_high = PublicKey::ed25519_from_bytes([255; PUBLIC_KEY_LENGTH]).unwrap(); + assert_ne!( + public_key_high.to_account_hash().as_ref(), + Into::>::into(public_key_high) + ); + } + + #[test] + fn signature_traits() { + let signature_low = Signature::ed25519([1; SIGNATURE_LENGTH]).unwrap(); + let signature_high = Signature::ed25519([3; SIGNATURE_LENGTH]).unwrap(); + check_ord_and_hash(signature_low, signature_high) + } + + #[test] + fn sign_and_verify() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_ed25519(&mut rng); + + let public_key = PublicKey::from(&secret_key); + let other_public_key = PublicKey::random_ed25519(&mut rng); + let wrong_type_public_key = PublicKey::random_secp256k1(&mut rng); + + let message = b"message"; + let signature = sign(message, &secret_key, &public_key); + + assert!(verify(message, &signature, &public_key).is_ok()); + assert!(verify(message, &signature, &other_public_key).is_err()); + assert!(verify(message, &signature, &wrong_type_public_key).is_err()); + assert!(verify(&message[1..], &signature, &public_key).is_err()); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + let mut rng = TestRng::new(); + let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); + let public_key = PublicKey::from(&ed25519_secret_key); + let data = b"data"; + let signature = sign(data, &ed25519_secret_key, &public_key); + bytesrepr::test_serialization_roundtrip(&signature); + } + + #[test] + fn validate_known_signature() { + // In the event that this test fails, we need to consider pinning the version of the + // `ed25519-dalek` crate to maintain backwards compatibility with existing data on the + // Casper network. + + // Values taken from: + // https://github.com/dalek-cryptography/ed25519-dalek/blob/925eb9ea56192053c9eb93b9d30d1b9419eee128/TESTVECTORS#L62 + let secret_key_hex = "bf5ba5d6a49dd5ef7b4d5d7d3e4ecc505c01f6ccee4c54b5ef7b40af6a454140"; + let public_key_hex = "1be034f813017b900d8990af45fad5b5214b573bd303ef7a75ef4b8c5c5b9842"; + let message_hex = + "16152c2e037b1c0d3219ced8e0674aee6b57834b55106c5344625322da638ecea2fc9a424a05ee9512\ + d48fcf75dd8bd4691b3c10c28ec98ee1afa5b863d1c36795ed18105db3a9aabd9d2b4c1747adbaf1a56\ + ffcc0c533c1c0faef331cdb79d961fa39f880a1b8b1164741822efb15a7259a465bef212855751fab66\ + a897bfa211abe0ea2f2e1cd8a11d80e142cde1263eec267a3138ae1fcf4099db0ab53d64f336f4bcd7a\ + 363f6db112c0a2453051a0006f813aaf4ae948a2090619374fa58052409c28ef76225687df3cb2d1b0b\ + fb43b09f47f1232f790e6d8dea759e57942099f4c4bd3390f28afc2098244961465c643fc8b29766af2\ + bcbc5440b86e83608cfc937be98bb4827fd5e6b689adc2e26513db531076a6564396255a09975b7034d\ + ac06461b255642e3a7ed75fa9fc265011f5f6250382a84ac268d63ba64"; + let signature_hex = + "279cace6fdaf3945e3837df474b28646143747632bede93e7a66f5ca291d2c24978512ca0cb8827c8c\ + 322685bd605503a5ec94dbae61bbdcae1e49650602bc07"; + + let secret_key_bytes = base16::decode(secret_key_hex).unwrap(); + let public_key_bytes = base16::decode(public_key_hex).unwrap(); + let message_bytes = base16::decode(message_hex).unwrap(); + let signature_bytes = base16::decode(signature_hex).unwrap(); + + let secret_key = SecretKey::ed25519_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::ed25519_from_bytes(public_key_bytes).unwrap(); + assert_eq!(public_key, PublicKey::from(&secret_key)); + + let signature = Signature::ed25519_from_bytes(signature_bytes).unwrap(); + assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature); + assert!(verify(&message_bytes, &signature, &public_key).is_ok()); + } +} + +mod secp256k1 { + use rand::Rng; + + use super::*; + use crate::SECP256K1_TAG; + + const SECRET_KEY_LENGTH: usize = SecretKey::SECP256K1_LENGTH; + const SIGNATURE_LENGTH: usize = Signature::SECP256K1_LENGTH; + + #[test] + fn secret_key_from_bytes() { + // Secret key should be `SecretKey::SECP256K1_LENGTH` bytes. + // The k256 library will ensure that a byte stream of a length not equal to + // `SECP256K1_LENGTH` will fail due to an assertion internal to the library. + // We can check that invalid byte streams e.g [0;32] does not generate a valid key. + let bytes = [0; SECRET_KEY_LENGTH]; + assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_err()); + + // Check that a valid byte stream produces a valid key + let bytes = [1; SECRET_KEY_LENGTH]; + assert!(SecretKey::secp256k1_from_bytes(&bytes[..]).is_ok()); + } + + #[test] + fn secret_key_to_and_from_der() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_der_roundtrip(secret_key); + } + + #[test] + fn secret_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_pem_roundtrip(secret_key); + } + + #[test] + fn known_secret_key_to_pem() { + // Example values taken from Python client. + const KNOWN_KEY_PEM: &str = r#"-----BEGIN EC PRIVATE KEY----- +MHQCAQEEIL3fqaMKAfXSK1D2PnVVbZlZ7jTv133nukq4+95s6kmcoAcGBSuBBAAK +oUQDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtdkv+kBR5u4ISEAkuc2TFWQHX0 +Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== +-----END EC PRIVATE KEY-----"#; + let key_bytes = + base16::decode("bddfa9a30a01f5d22b50f63e75556d9959ee34efd77de7ba4ab8fbde6cea499c") + .unwrap(); + let expected_key = SecretKey::secp256k1_from_bytes(key_bytes).unwrap(); + super::known_secret_key_to_pem(&expected_key, KNOWN_KEY_PEM, SECP256K1_TAG); + } + + #[test] + #[cfg(any(feature = "std-fs-io", test))] + fn secret_key_to_and_from_file() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + secret_key_file_roundtrip(secret_key); + } + + #[test] + fn public_key_serialization_roundtrip() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + super::public_key_serialization_roundtrip(public_key); + } + + #[test] + fn public_key_from_bytes() { + // Public key should be `PublicKey::SECP256K1_LENGTH` bytes. Create vec with an extra + // byte. + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + let bytes: Vec = iter::once(rng.gen()) + .chain(Into::>::into(public_key)) + .collect::>(); + + assert!(PublicKey::secp256k1_from_bytes(&bytes[..]).is_err()); + assert!(PublicKey::secp256k1_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(PublicKey::secp256k1_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn public_key_to_and_from_der() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_der_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_pem() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_pem_roundtrip(public_key); + } + + #[test] + fn known_public_key_to_pem() { + // Example values taken from Python client. + const KNOWN_KEY_HEX: &str = + "03408e9526316fd1f8def480dd45b2cc72ffd732771c9ceb5d92ffa4051e6ee084"; + const KNOWN_KEY_PEM: &str = r#"-----BEGIN PUBLIC KEY----- +MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEQI6VJjFv0fje9IDdRbLMcv/XMnccnOtd +kv+kBR5u4ISEAkuc2TFWQHX0Yj9oTB9fx9+vvQdxJOhMtu46kGo0Uw== +-----END PUBLIC KEY-----"#; + super::known_public_key_to_pem(KNOWN_KEY_HEX, KNOWN_KEY_PEM); + } + + #[cfg(any(feature = "std-fs-io", test))] + #[test] + fn public_key_to_and_from_file() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_file_roundtrip(public_key); + } + + #[test] + fn public_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + public_key_hex_roundtrip(public_key); + } + + #[test] + fn signature_serialization_roundtrip() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + super::signature_serialization_roundtrip(signature); + } + + #[test] + fn bytesrepr_roundtrip_signature() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + bytesrepr::test_serialization_roundtrip(&signature); + } + + #[test] + fn signature_from_bytes() { + // Signature should be `Signature::SECP256K1_LENGTH` bytes. + let bytes = [2; SIGNATURE_LENGTH + 1]; + assert!(Signature::secp256k1_from_bytes(&bytes[..]).is_err()); + assert!(Signature::secp256k1_from_bytes(&bytes[2..]).is_err()); + + // Check the same bytes but of the right length succeeds. + assert!(Signature::secp256k1_from_bytes(&bytes[1..]).is_ok()); + } + + #[test] + fn signature_key_to_and_from_hex() { + let mut rng = TestRng::new(); + let secret_key = SecretKey::random_secp256k1(&mut rng); + let public_key = PublicKey::from(&secret_key); + let data = b"data"; + let signature = sign(data, &secret_key, &public_key); + signature_hex_roundtrip(signature); + } + + #[test] + fn public_key_traits() { + let mut rng = TestRng::new(); + let public_key1 = PublicKey::random_secp256k1(&mut rng); + let public_key2 = PublicKey::random_secp256k1(&mut rng); + if Into::>::into(public_key1.clone()) < Into::>::into(public_key2.clone()) { + check_ord_and_hash(public_key1, public_key2) + } else { + check_ord_and_hash(public_key2, public_key1) + } + } + + #[test] + fn public_key_to_account_hash() { + let mut rng = TestRng::new(); + let public_key = PublicKey::random_secp256k1(&mut rng); + assert_ne!( + public_key.to_account_hash().as_ref(), + Into::>::into(public_key) + ); + } + + #[test] + fn signature_traits() { + let signature_low = Signature::secp256k1([1; SIGNATURE_LENGTH]).unwrap(); + let signature_high = Signature::secp256k1([3; SIGNATURE_LENGTH]).unwrap(); + check_ord_and_hash(signature_low, signature_high) + } + + #[test] + fn validate_known_signature() { + // In the event that this test fails, we need to consider pinning the version of the + // `k256` crate to maintain backwards compatibility with existing data on the Casper + // network. + let secret_key_hex = "833fe62409237b9d62ec77587520911e9a759cec1d19755b7da901b96dca3d42"; + let public_key_hex = "028e24fd9654f12c793d3d376c15f7abe53e0fbd537884a3a98d10d2dc6d513b4e"; + let message_hex = "616263"; + let signature_hex = "8016162860f0795154643d15c5ab5bb840d8c695d6de027421755579ea7f2a4629b7e0c88fc3428669a6a89496f426181b73f10c6c8a05ac8f49d6cb5032eb89"; + + let secret_key_bytes = base16::decode(secret_key_hex).unwrap(); + let public_key_bytes = base16::decode(public_key_hex).unwrap(); + let message_bytes = base16::decode(message_hex).unwrap(); + let signature_bytes = base16::decode(signature_hex).unwrap(); + + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::secp256k1_from_bytes(public_key_bytes).unwrap(); + assert_eq!(public_key, PublicKey::from(&secret_key)); + + let signature = Signature::secp256k1_from_bytes(signature_bytes).unwrap(); + assert_eq!(sign(&message_bytes, &secret_key, &public_key), signature); + assert!(verify(&message_bytes, &signature, &public_key).is_ok()); + } +} + +#[test] +fn public_key_traits() { + let system_key = PublicKey::system(); + let mut rng = TestRng::new(); + let ed25519_public_key = PublicKey::random_ed25519(&mut rng); + let secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); + check_ord_and_hash(ed25519_public_key.clone(), secp256k1_public_key.clone()); + check_ord_and_hash(system_key.clone(), ed25519_public_key); + check_ord_and_hash(system_key, secp256k1_public_key); +} + +#[test] +fn signature_traits() { + let system_sig = Signature::system(); + let ed25519_sig = Signature::ed25519([3; Signature::ED25519_LENGTH]).unwrap(); + let secp256k1_sig = Signature::secp256k1([1; Signature::SECP256K1_LENGTH]).unwrap(); + check_ord_and_hash(ed25519_sig, secp256k1_sig); + check_ord_and_hash(system_sig, ed25519_sig); + check_ord_and_hash(system_sig, secp256k1_sig); } #[test] -fn can_construct_system_public_key() { - let public_key_bytes = [0; PublicKey::ED25519_LENGTH]; - let public_key = PublicKey::ed25519_from_bytes(public_key_bytes).unwrap(); +fn sign_and_verify() { + let mut rng = TestRng::new(); + let ed25519_secret_key = SecretKey::random_ed25519(&mut rng); + let secp256k1_secret_key = SecretKey::random_secp256k1(&mut rng); - let secret_key_bytes = [0; SecretKey::ED25519_LENGTH]; - let secret_key = SecretKey::ed25519_from_bytes(secret_key_bytes).unwrap(); + let ed25519_public_key = PublicKey::from(&ed25519_secret_key); + let secp256k1_public_key = PublicKey::from(&secp256k1_secret_key); - assert_ne!(public_key, secret_key.into()) + let other_ed25519_public_key = PublicKey::random_ed25519(&mut rng); + let other_secp256k1_public_key = PublicKey::random_secp256k1(&mut rng); + + let message = b"message"; + let ed25519_signature = sign(message, &ed25519_secret_key, &ed25519_public_key); + let secp256k1_signature = sign(message, &secp256k1_secret_key, &secp256k1_public_key); + + assert!(verify(message, &ed25519_signature, &ed25519_public_key).is_ok()); + assert!(verify(message, &secp256k1_signature, &secp256k1_public_key).is_ok()); + + assert!(verify(message, &ed25519_signature, &other_ed25519_public_key).is_err()); + assert!(verify(message, &secp256k1_signature, &other_secp256k1_public_key).is_err()); + + assert!(verify(message, &ed25519_signature, &secp256k1_public_key).is_err()); + assert!(verify(message, &secp256k1_signature, &ed25519_public_key).is_err()); + + assert!(verify(&message[1..], &ed25519_signature, &ed25519_public_key).is_err()); + assert!(verify(&message[1..], &secp256k1_signature, &secp256k1_public_key).is_err()); +} + +#[test] +fn should_construct_secp256k1_from_uncompressed_bytes() { + let mut rng = TestRng::new(); + + let mut secret_key_bytes = [0u8; SecretKey::SECP256K1_LENGTH]; + rng.fill_bytes(&mut secret_key_bytes[..]); + + // Construct a secp256k1 secret key and use that to construct a public key. + let secp256k1_secret_key = k256::SecretKey::from_slice(&secret_key_bytes).unwrap(); + let secp256k1_public_key = secp256k1_secret_key.public_key(); + + // Construct a CL secret key and public key from that (which will be a compressed key). + let secret_key = SecretKey::secp256k1_from_bytes(secret_key_bytes).unwrap(); + let public_key = PublicKey::from(&secret_key); + assert_eq!( + Into::>::into(public_key.clone()).len(), + PublicKey::SECP256K1_LENGTH + ); + assert_ne!( + secp256k1_public_key + .to_encoded_point(false) + .as_bytes() + .len(), + PublicKey::SECP256K1_LENGTH + ); + + // Construct a CL public key from uncompressed public key bytes and ensure it's compressed. + let from_uncompressed_bytes = + PublicKey::secp256k1_from_bytes(secp256k1_public_key.to_encoded_point(false).as_bytes()) + .unwrap(); + assert_eq!(public_key, from_uncompressed_bytes); + + // Construct a CL public key from the uncompressed one's hex representation and ensure it's + // compressed. + let uncompressed_hex = { + let tag_bytes = vec![0x02u8]; + base16::encode_lower(&tag_bytes) + + &base16::encode_lower(&secp256k1_public_key.to_encoded_point(false).as_bytes()) + }; + + let from_uncompressed_hex = PublicKey::from_hex(uncompressed_hex).unwrap(); + assert_eq!(public_key, from_uncompressed_hex); +} + +#[test] +fn generate_ed25519_should_generate_an_ed25519_key() { + let secret_key = SecretKey::generate_ed25519().unwrap(); + assert!(matches!(secret_key, SecretKey::Ed25519(_))) +} + +#[test] +fn generate_secp256k1_should_generate_an_secp256k1_key() { + let secret_key = SecretKey::generate_secp256k1().unwrap(); + assert!(matches!(secret_key, SecretKey::Secp256k1(_))) } diff --git a/types/src/crypto/error.rs b/types/src/crypto/error.rs index 870d5bd2e7..03deba3f11 100644 --- a/types/src/crypto/error.rs +++ b/types/src/crypto/error.rs @@ -1,40 +1,164 @@ use alloc::string::String; -use core::fmt::{self, Debug, Display, Formatter}; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(feature = "std", feature = "testing", test))] +use std::error::Error as StdError; -use base64::DecodeError; +#[cfg(feature = "datasize")] +use datasize::DataSize; use ed25519_dalek::ed25519::Error as SignatureError; -use hex::FromHexError; // Re-exported of signature::Error; used by both dalek and k256 libs +#[cfg(any(feature = "std", test))] +use pem::PemError; +use serde::Serialize; +#[cfg(any(feature = "std", feature = "testing", test))] +use thiserror::Error; + +#[cfg(any(feature = "std-fs-io", test))] +use crate::file_utils::{ReadFileError, WriteFileError}; /// Cryptographic errors. -#[derive(Debug)] +#[derive(Clone, Eq, PartialEq, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] pub enum Error { /// Error resulting from creating or using asymmetric key types. AsymmetricKey(String), /// Error resulting when decoding a type from a hex-encoded representation. - FromHex(FromHexError), + #[serde(with = "serde_helpers::Base16DecodeError")] + #[cfg_attr(feature = "datasize", data_size(skip))] + FromHex(base16::DecodeError), /// Error resulting when decoding a type from a base64 representation. - FromBase64(DecodeError), + #[serde(with = "serde_helpers::Base64DecodeError")] + #[cfg_attr(feature = "datasize", data_size(skip))] + FromBase64(base64::DecodeError), /// Signature error. - SignatureError(SignatureError), + #[cfg_attr(any(feature = "testing", test), default)] + SignatureError, + + /// Error trying to manipulate the system key. + System(String), } impl Display for Error { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - Debug::fmt(self, formatter) + match self { + Error::AsymmetricKey(error_msg) => { + write!(formatter, "asymmetric key error: {}", error_msg) + } + Error::FromHex(error) => { + write!(formatter, "decoding from hex: {}", error) + } + Error::FromBase64(error) => { + write!(formatter, "decoding from base 64: {}", error) + } + Error::SignatureError => { + write!(formatter, "error in signature") + } + Error::System(error_msg) => { + write!(formatter, "invalid operation on system key: {}", error_msg) + } + } } } -impl From for Error { - fn from(error: FromHexError) -> Self { +impl From for Error { + fn from(error: base16::DecodeError) -> Self { Error::FromHex(error) } } impl From for Error { - fn from(error: SignatureError) -> Self { - Error::SignatureError(error) + fn from(_error: SignatureError) -> Self { + Error::SignatureError + } +} + +#[cfg(any(feature = "std", feature = "testing", test))] +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Error::FromHex(error) => Some(error), + Error::FromBase64(error) => Some(error), + Error::AsymmetricKey(_) | Error::SignatureError | Error::System(_) => None, + } + } +} + +/// Cryptographic errors extended with some additional variants. +#[cfg(any(feature = "std", feature = "testing", test))] +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum ErrorExt { + /// A basic crypto error. + #[error("crypto error: {0:?}")] + CryptoError(#[from] Error), + + /// Error trying to read a secret key. + #[cfg(any(feature = "std-fs-io", test))] + #[error("secret key load failed: {0}")] + SecretKeyLoad(ReadFileError), + + /// Error trying to read a public key. + #[cfg(any(feature = "std-fs-io", test))] + #[error("public key load failed: {0}")] + PublicKeyLoad(ReadFileError), + + /// Error trying to write a secret key. + #[cfg(any(feature = "std-fs-io", test))] + #[error("secret key save failed: {0}")] + SecretKeySave(WriteFileError), + + /// Error trying to write a public key. + #[cfg(any(feature = "std-fs-io", test))] + #[error("public key save failed: {0}")] + PublicKeySave(WriteFileError), + + /// Pem format error. + #[error("pem error: {0}")] + FromPem(String), + + /// DER format error. + #[error("der error: {0}")] + FromDer(#[from] derp::Error), + + /// Error in getting random bytes from the system's preferred random number source. + #[error("failed to get random bytes: {0}")] + GetRandomBytes(#[from] getrandom::Error), +} + +#[cfg(any(feature = "std", test))] +impl From for ErrorExt { + fn from(error: PemError) -> Self { + ErrorExt::FromPem(error.to_string()) + } +} + +/// This module allows us to derive `Serialize` for the third party error types which don't +/// themselves derive it. +/// +/// See for more info. +#[allow(clippy::enum_variant_names)] +mod serde_helpers { + use serde::Serialize; + + #[derive(Serialize)] + #[serde(remote = "base16::DecodeError")] + pub(super) enum Base16DecodeError { + InvalidByte { index: usize, byte: u8 }, + InvalidLength { length: usize }, + } + + #[derive(Serialize)] + #[serde(remote = "base64::DecodeError")] + pub(super) enum Base64DecodeError { + #[allow(dead_code)] + InvalidByte(usize, u8), + InvalidLength, + #[allow(dead_code)] + InvalidLastSymbol(usize, u8), } } diff --git a/types/src/deploy_info.rs b/types/src/deploy_info.rs index c9eb25cc72..6ba9436580 100644 --- a/types/src/deploy_info.rs +++ b/types/src/deploy_info.rs @@ -1,26 +1,31 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::vec::Vec; -#[cfg(feature = "std")] +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use crate::{ account::AccountHash, bytesrepr::{self, FromBytes, ToBytes}, - DeployHash, TransferAddr, URef, U512, + serde_helpers, DeployHash, TransferAddr, URef, U512, }; /// Information relating to the given Deploy. #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub struct DeployInfo { /// The relevant Deploy. + #[serde(with = "serde_helpers::deploy_hash_as_array")] + #[cfg_attr( + feature = "json-schema", + schemars(with = "DeployHash", description = "Hex-encoded Deploy hash.") + )] pub deploy_hash: DeployHash, - /// Transfers performed by the Deploy. + /// Version 1 transfers performed by the Deploy. pub transfers: Vec, /// Account identifier of the creator of the Deploy. pub from: AccountHash, @@ -73,11 +78,11 @@ impl FromBytes for DeployInfo { impl ToBytes for DeployInfo { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut result = bytesrepr::allocate_buffer(self)?; - result.append(&mut self.deploy_hash.to_bytes()?); - result.append(&mut self.transfers.to_bytes()?); - result.append(&mut self.from.to_bytes()?); - result.append(&mut self.source.to_bytes()?); - result.append(&mut self.gas.to_bytes()?); + self.deploy_hash.write_bytes(&mut result)?; + self.transfers.write_bytes(&mut result)?; + self.from.write_bytes(&mut result)?; + self.source.write_bytes(&mut result)?; + self.gas.write_bytes(&mut result)?; Ok(result) } @@ -88,47 +93,33 @@ impl ToBytes for DeployInfo { + self.source.serialized_length() + self.gas.serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.transfers.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + Ok(()) + } } -/// Generators for a [`Deploy`] -#[cfg(any(feature = "gens", test))] +/// Generators for a `DeployInfo` +#[cfg(any(feature = "testing", feature = "gens", test))] pub(crate) mod gens { - use alloc::vec::Vec; - - use proptest::{ - array, - collection::{self, SizeRange}, - prelude::{Arbitrary, Strategy}, - }; - use crate::{ - account::AccountHash, - gens::{u512_arb, uref_arb}, - DeployHash, DeployInfo, TransferAddr, + gens::{account_hash_arb, u512_arb, uref_arb}, + transaction::gens::deploy_hash_arb, + transfer::gens::transfer_v1_addr_arb, + DeployInfo, }; + use proptest::{collection, prelude::Strategy}; - pub fn deploy_hash_arb() -> impl Strategy { - array::uniform32(::arbitrary()).prop_map(DeployHash::new) - } - - pub fn transfer_addr_arb() -> impl Strategy { - array::uniform32(::arbitrary()).prop_map(TransferAddr::new) - } - - pub fn transfers_arb(size: impl Into) -> impl Strategy> { - collection::vec(transfer_addr_arb(), size) - } - - pub fn account_hash_arb() -> impl Strategy { - array::uniform32(::arbitrary()).prop_map(AccountHash::new) - } - - /// Creates an arbitrary [`Deploy`] pub fn deploy_info_arb() -> impl Strategy { let transfers_length_range = 0..5; ( deploy_hash_arb(), - transfers_arb(transfers_length_range), + collection::vec(transfer_v1_addr_arb(), transfers_length_range), account_hash_arb(), uref_arb(), u512_arb(), diff --git a/types/src/digest.rs b/types/src/digest.rs new file mode 100644 index 0000000000..bbf0002b2f --- /dev/null +++ b/types/src/digest.rs @@ -0,0 +1,730 @@ +//! Contains digest and merkle chunking used throughout the system. + +mod chunk_with_proof; +mod error; +mod indexed_merkle_proof; + +use alloc::{collections::BTreeMap, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + convert::{TryFrom, TryInto}, + fmt::{self, Debug, Display, Formatter, LowerHex, UpperHex}, +}; + +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +use itertools::Itertools; +#[cfg(feature = "once_cell")] +use once_cell::sync::OnceCell; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, CLType, CLTyped, +}; +pub use chunk_with_proof::ChunkWithProof; +pub use error::{ + ChunkWithProofVerificationError, Error as DigestError, MerkleConstructionError, + MerkleVerificationError, +}; +pub use indexed_merkle_proof::IndexedMerkleProof; + +/// The output of the hash function. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded hash digest.") +)] +pub struct Digest( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + pub(super) [u8; Digest::LENGTH], +); + +const CHUNK_DATA_ZEROED: &[u8] = &[0u8; ChunkWithProof::CHUNK_SIZE_BYTES]; + +impl Digest { + /// The number of bytes in a `Digest`. + pub const LENGTH: usize = 32; + + /// Sentinel hash to be used for hashing options in the case of `None`. + pub const SENTINEL_NONE: Digest = Digest([0u8; Digest::LENGTH]); + /// Sentinel hash to be used by `hash_slice_rfold`. Terminates the fold. + pub const SENTINEL_RFOLD: Digest = Digest([1u8; Digest::LENGTH]); + /// Sentinel hash to be used by `hash_merkle_tree` in the case of an empty list. + pub const SENTINEL_MERKLE_TREE: Digest = Digest([2u8; Digest::LENGTH]); + + /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data. + #[inline(always)] + pub fn hash>(data: T) -> Digest { + Self::blake2b_hash(data) + } + + /// Creates a 32-byte BLAKE2b hash digest from a given a piece of data + pub(crate) fn blake2b_hash>(data: T) -> Digest { + let mut ret = [0u8; Digest::LENGTH]; + // NOTE: Safe to unwrap here because our digest length is constant and valid + let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); + hasher.update(data); + hasher.finalize_variable(|hash| ret.clone_from_slice(hash)); + Digest(ret) + } + + /// Hashes a pair of byte slices. + pub fn hash_pair, U: AsRef<[u8]>>(data1: T, data2: U) -> Digest { + let mut result = [0; Digest::LENGTH]; + let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); + hasher.update(data1); + hasher.update(data2); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + Digest(result) + } + + /// Hashes a raw Merkle root and leaf count to firm the final Merkle hash. + /// + /// To avoid pre-image attacks, the final hash that is based upon the number of leaves in the + /// Merkle tree and the root hash is prepended with a padding to ensure it is longer than the + /// actual chunk size. + /// + /// Without this feature, an attacker could construct an item that is only a few bytes long but + /// hashes to the same value as a much longer, chunked item by hashing `(len || root hash of + /// longer item's Merkle tree root)`. + /// + /// This function computes the correct final hash by ensuring the hasher used has been + /// initialized with padding before. + /// + /// With `once_cell` feature enabled (generally done by enabling `std` feature), for efficiency + /// reasons it uses a memoized hasher state computed on first run and cloned afterwards. + fn hash_merkle_root(leaf_count: u64, root: Digest) -> Digest { + #[cfg(feature = "once_cell")] + static PAIR_PREFIX_HASHER: OnceCell = OnceCell::new(); + + let mut result = [0; Digest::LENGTH]; + let get_hasher = || { + let mut hasher = VarBlake2b::new(Digest::LENGTH).unwrap(); + hasher.update(CHUNK_DATA_ZEROED); + hasher + }; + #[cfg(feature = "once_cell")] + let mut hasher = PAIR_PREFIX_HASHER.get_or_init(get_hasher).clone(); + #[cfg(not(feature = "once_cell"))] + let mut hasher = get_hasher(); + + hasher.update(leaf_count.to_le_bytes()); + hasher.update(root); + hasher.finalize_variable(|slice| { + result.copy_from_slice(slice); + }); + Digest(result) + } + + /// Returns the underlying BLAKE2b hash bytes + pub fn value(&self) -> [u8; Digest::LENGTH] { + self.0 + } + + /// Converts the underlying BLAKE2b hash digest array to a `Vec` + pub fn into_vec(self) -> Vec { + self.0.to_vec() + } + + /// Hashes an `impl IntoIterator` of [`Digest`]s into a single [`Digest`] by + /// constructing a [Merkle tree][1]. Reduces pairs of elements in the collection by repeatedly + /// calling [Digest::hash_pair]. + /// + /// The pattern of hashing is as follows. It is akin to [graph reduction][2]: + /// + /// ```text + /// 1 2 4 5 8 9 + /// │ │ │ │ │ │ + /// └─3 └─6 └─10 + /// │ │ │ + /// └───7 │ + /// │ │ + /// └───11 + /// ``` + /// + /// Finally hashes the number of elements with the resulting hash. In the example above the + /// final output would be `hash_pair(6_u64.to_le_bytes(), l)`. + /// + /// Returns [`Digest::SENTINEL_MERKLE_TREE`] when the input is empty. + /// + /// [1]: https://en.wikipedia.org/wiki/Merkle_tree + /// [2]: https://en.wikipedia.org/wiki/Graph_reduction + pub fn hash_merkle_tree(leaves: I) -> Digest + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + let leaves = leaves.into_iter(); + let leaf_count = leaves.len() as u64; + + leaves.tree_fold1(Digest::hash_pair).map_or_else( + || Digest::SENTINEL_MERKLE_TREE, + |raw_root| Digest::hash_merkle_root(leaf_count, raw_root), + ) + } + + /// Hashes a `BTreeMap`. + pub fn hash_btree_map(btree_map: &BTreeMap) -> Result + where + K: ToBytes, + V: ToBytes, + { + let mut kv_hashes: Vec = Vec::with_capacity(btree_map.len()); + for (key, value) in btree_map.iter() { + kv_hashes.push(Digest::hash_pair( + Digest::hash(key.to_bytes()?), + Digest::hash(value.to_bytes()?), + )) + } + Ok(Self::hash_merkle_tree(kv_hashes)) + } + + /// Hashes a `&[Digest]` using a [right fold][1]. + /// + /// This pattern of hashing is as follows: + /// + /// ```text + /// hash_pair(a, &hash_pair(b, &hash_pair(c, &SENTINEL_RFOLD))) + /// ``` + /// + /// Unlike Merkle trees, this is suited to hashing heterogeneous lists we may wish to extend in + /// the future (ie, hashes of data structures that may undergo revision). + /// + /// Returns [`Digest::SENTINEL_RFOLD`] when given an empty slice as input. + /// + /// [1]: https://en.wikipedia.org/wiki/Fold_(higher-order_function)#Linear_folds + pub fn hash_slice_rfold(slice: &[Digest]) -> Digest { + Self::hash_slice_with_proof(slice, Self::SENTINEL_RFOLD) + } + + /// Hashes a `&[Digest]` using a [right fold][1]. Uses `proof` as a Merkle proof for the + /// missing tail of the slice. + /// + /// [1]: https://en.wikipedia.org/wiki/Fold_(higher-order_function)#Linear_folds + pub fn hash_slice_with_proof(slice: &[Digest], proof: Digest) -> Digest { + slice + .iter() + .rfold(proof, |prev, next| Digest::hash_pair(next, prev)) + } + + /// Returns a `Digest` parsed from a hex-encoded `Digest`. + pub fn from_hex>(hex_input: T) -> Result { + let bytes = checksummed_hex::decode(&hex_input).map_err(DigestError::Base16DecodeError)?; + let slice: [u8; Self::LENGTH] = bytes + .try_into() + .map_err(|_| DigestError::IncorrectDigestLength(hex_input.as_ref().len()))?; + Ok(Digest(slice)) + } + + /// Hash data into chunks if necessary. + pub fn hash_into_chunks_if_necessary(bytes: &[u8]) -> Digest { + if bytes.len() <= ChunkWithProof::CHUNK_SIZE_BYTES { + Digest::blake2b_hash(bytes) + } else { + Digest::hash_merkle_tree( + bytes + .chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .map(Digest::blake2b_hash), + ) + } + } + + /// Returns a new `Digest` directly initialized with the provided bytes; no hashing is done. + /// + /// This is equivalent to `Digest::from`, but is a const function. + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + Digest(raw_digest) + } + + /// Returns a random `Digest`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Digest(rng.gen()) + } +} + +impl CLTyped for Digest { + fn cl_type() -> CLType { + CLType::ByteArray(Digest::LENGTH as u32) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Digest { + Digest(rng.gen()) + } +} + +impl LowerHex for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let hex_string = base16::encode_lower(&self.value()); + if f.alternate() { + write!(f, "0x{}", hex_string) + } else { + write!(f, "{}", hex_string) + } + } +} + +impl UpperHex for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let hex_string = base16::encode_upper(&self.value()); + if f.alternate() { + write!(f, "0x{}", hex_string) + } else { + write!(f, "{}", hex_string) + } + } +} + +impl Display for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{:10}", HexFmt(&self.0)) + } +} + +impl Debug for Digest { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl From<[u8; Digest::LENGTH]> for Digest { + fn from(arr: [u8; Digest::LENGTH]) -> Self { + Digest(arr) + } +} + +impl TryFrom<&[u8]> for Digest { + type Error = TryFromSliceError; + + fn try_from(slice: &[u8]) -> Result { + <[u8; Digest::LENGTH]>::try_from(slice).map(Digest) + } +} + +impl AsRef<[u8]> for Digest { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From for [u8; Digest::LENGTH] { + fn from(hash: Digest) -> Self { + hash.0 + } +} + +impl ToBytes for Digest { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for Digest { + #[inline(always)] + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + FromBytes::from_bytes(bytes).map(|(arr, rem)| (Digest(arr), rem)) + } +} + +impl Serialize for Digest { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(&self.0).serialize(serializer) + } else { + // This is to keep backwards compatibility with how HexForm encodes + // byte arrays. HexForm treats this like a slice. + self.0[..].serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for Digest { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let bytes = + checksummed_hex::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + let data = + <[u8; Digest::LENGTH]>::try_from(bytes.as_ref()).map_err(SerdeError::custom)?; + Ok(Digest::from(data)) + } else { + let data = >::deserialize(deserializer)?; + Digest::try_from(data.as_slice()).map_err(D::Error::custom) + } + } +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, iter}; + + use proptest_attr_macro::proptest; + + use super::Digest; + + use crate::{ + bytesrepr::{self, ToBytes}, + ChunkWithProof, + }; + + #[proptest] + fn bytesrepr_roundtrip(hash: [u8; Digest::LENGTH]) { + let digest = Digest(hash); + bytesrepr::test_serialization_roundtrip(&digest); + } + + #[proptest] + fn serde_roundtrip(hash: [u8; Digest::LENGTH]) { + let preser_digest = Digest(hash); + let serialized = serde_json::to_string(&preser_digest).unwrap(); + let deser_digest: Digest = serde_json::from_str(&serialized).unwrap(); + assert_eq!(preser_digest, deser_digest); + } + + #[test] + fn serde_custom_serialization() { + let serialized = serde_json::to_string(&Digest::SENTINEL_RFOLD).unwrap(); + let expected = format!("\"{:?}\"", Digest::SENTINEL_RFOLD); + assert_eq!(expected, serialized); + } + + #[test] + fn hash_known() { + // Data of length less or equal to [ChunkWithProof::CHUNK_SIZE_BYTES] + // are hashed using Blake2B algorithm. + // Larger data are chunked and Merkle tree hash is calculated. + // + // Please note that [ChunkWithProof::CHUNK_SIZE_BYTES] is `test` configuration + // is smaller than in production, to allow testing with more chunks + // with still reasonable time and memory consumption. + // + // See: [Digest::hash] + let inputs_and_digests = [ + ( + "", + "0e5751c026e543b2e8ab2eb06099daa1d1e5df47778f7787faab45cdf12fe3a8", + ), + ( + "abc", + "bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319", + ), + ( + "0123456789", + "7b6cb8d374484e221785288b035dc53fc9ddf000607f473fc2a3258d89a70398", + ), + ( + "01234567890", + "3d199478c18b7fe3ca1f4f2a9b3e07f708ff66ed52eb345db258abe8a812ed5c", + ), + ( + "The quick brown fox jumps over the lazy dog", + "01718cec35cd3d796dd00020e0bfecb473ad23457d063b75eff29c0ffa2e58a9", + ), + ]; + for (known_input, expected_digest) in &inputs_and_digests { + let known_input: &[u8] = known_input.as_ref(); + assert_eq!(*expected_digest, format!("{:?}", Digest::hash(known_input))); + } + } + + #[test] + fn from_valid_hex_should_succeed() { + for char in "abcdefABCDEF0123456789".chars() { + let input: String = iter::repeat(char).take(64).collect(); + assert!(Digest::from_hex(input).is_ok()); + } + } + + #[test] + fn from_hex_invalid_length_should_fail() { + for len in &[2_usize, 62, 63, 65, 66] { + let input: String = "f".repeat(*len); + assert!(Digest::from_hex(input).is_err()); + } + } + + #[test] + fn from_hex_invalid_char_should_fail() { + for char in "g %-".chars() { + let input: String = iter::repeat('f').take(63).chain(iter::once(char)).collect(); + assert!(Digest::from_hex(input).is_err()); + } + } + + #[test] + fn should_display_digest_in_hex() { + let hash = Digest([0u8; 32]); + let hash_hex = format!("{:?}", hash); + assert_eq!( + hash_hex, + "0000000000000000000000000000000000000000000000000000000000000000" + ); + } + + #[test] + fn should_print_digest_lower_hex() { + let hash = Digest([10u8; 32]); + let hash_lower_hex = format!("{:x}", hash); + assert_eq!( + hash_lower_hex, + "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a" + ) + } + + #[test] + fn should_print_digest_upper_hex() { + let hash = Digest([10u8; 32]); + let hash_upper_hex = format!("{:X}", hash); + assert_eq!( + hash_upper_hex, + "0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A" + ) + } + + #[test] + fn alternate_should_prepend_0x() { + let hash = Digest([0u8; 32]); + let hash_hex_alt = format!("{:#x}", hash); + assert_eq!( + hash_hex_alt, + "0x0000000000000000000000000000000000000000000000000000000000000000" + ) + } + + #[test] + fn test_hash_pair() { + let hash1 = Digest([1u8; 32]); + let hash2 = Digest([2u8; 32]); + + let hash = Digest::hash_pair(hash1, hash2); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "30b600fb1f0cc0b3f0fc28cdcb7389405a6659be81c7d5c5905725aa3a5119ce" + ); + } + + #[test] + fn test_hash_rfold() { + let hashes = [ + Digest([1u8; 32]), + Digest([2u8; 32]), + Digest([3u8; 32]), + Digest([4u8; 32]), + Digest([5u8; 32]), + ]; + + let hash = Digest::hash_slice_rfold(&hashes[..]); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "e137f4eb94d2387065454eecfe2cdb5584e3dbd5f1ca07fc511fffd13d234e8e" + ); + + let proof = Digest::hash_slice_rfold(&hashes[2..]); + let hash_proof = Digest::hash_slice_with_proof(&hashes[..2], proof); + + assert_eq!(hash, hash_proof); + } + + #[test] + fn test_hash_merkle_odd() { + let hashes = vec![ + Digest([1u8; 32]), + Digest([2u8; 32]), + Digest([3u8; 32]), + Digest([4u8; 32]), + Digest([5u8; 32]), + ]; + + let hash = Digest::hash_merkle_tree(hashes); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "775cec8133b97b0e8d4e97659025d5bac4ed7c8927d1bd99cf62114df57f3e74" + ); + } + + #[test] + fn test_hash_merkle_even() { + let hashes = vec![ + Digest([1u8; 32]), + Digest([2u8; 32]), + Digest([3u8; 32]), + Digest([4u8; 32]), + Digest([5u8; 32]), + Digest([6u8; 32]), + ]; + + let hash = Digest::hash_merkle_tree(hashes); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "4bd50b08a8366b28c35bc831b95d147123bad01c29ffbf854b659c4b3ea4086c" + ); + } + + #[test] + fn test_hash_btreemap() { + let mut map = BTreeMap::new(); + let _ = map.insert(Digest([1u8; 32]), Digest([2u8; 32])); + let _ = map.insert(Digest([3u8; 32]), Digest([4u8; 32])); + let _ = map.insert(Digest([5u8; 32]), Digest([6u8; 32])); + let _ = map.insert(Digest([7u8; 32]), Digest([8u8; 32])); + let _ = map.insert(Digest([9u8; 32]), Digest([10u8; 32])); + + let hash = Digest::hash_btree_map(&map).unwrap(); + let hash_lower_hex = format!("{:x}", hash); + + assert_eq!( + hash_lower_hex, + "fd1214a627473ffc6d6cc97e7012e6344d74abbf987b48cde5d0642049a0db98" + ); + } + + #[test] + fn digest_deserialize_regression() { + let input = Digest([0; 32]); + let serialized = bincode::serialize(&input).expect("failed to serialize."); + + let expected = vec![ + 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + + assert_eq!(expected, serialized); + } + + #[test] + fn should_assert_simple_digest_serialization_format() { + let digest_bytes = [0; 32]; + + assert_eq!( + Digest(digest_bytes).to_bytes().unwrap(), + digest_bytes.to_vec() + ); + } + + #[test] + fn merkle_roots_are_preimage_resistent() { + // Input data is two chunks long. + // + // The resulting tree will look like this: + // + // 1..0 a..j + // │ │ + // └─────── R + // + // The Merkle root is thus: R = h( h(1..0) || h(a..j) ) + // + // h(1..0) = 807f1ba73147c3a96c2d63b38dd5a5f514f66290a1436bb9821e9f2a72eff263 + // h(a..j) = 499e1cdb476523fedafc9d9db31125e2744f271578ea95b16ab4bd1905f05fea + // R=h(h(1..0)||h(a..j)) = 1319394a98d0cb194f960e3748baeb2045a9ec28aa51e0d42011be43f4a91f5f + // h(2u64le || R) = c31f0bb6ef569354d1a26c3a51f1ad4b6d87cef7f73a290ab6be8db6a9c7d4ee + // + // The final step is to hash h(2u64le || R), which is the length as little endian + // concatenated with the root. + + // Constants used here assume a chunk size of 10 bytes. + assert_eq!(ChunkWithProof::CHUNK_SIZE_BYTES, 10); + + let long_data = b"1234567890abcdefghij"; + assert_eq!(long_data.len(), ChunkWithProof::CHUNK_SIZE_BYTES * 2); + + // The `long_data_hash` is constructed manually here, as `Digest::hash` still had + // deactivated chunking code at the time this test was written. + let long_data_hash = Digest::hash_merkle_tree( + long_data + .as_ref() + .chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .map(Digest::blake2b_hash), + ); + + // The concatenation of `2u64` in little endian + the Merkle root hash `R`. Note that this + // is a valid hashable object on its own. + let maybe_colliding_short_data = [ + 2, 0, 0, 0, 0, 0, 0, 0, 19, 25, 57, 74, 152, 208, 203, 25, 79, 150, 14, 55, 72, 186, + 235, 32, 69, 169, 236, 40, 170, 81, 224, 212, 32, 17, 190, 67, 244, 169, 31, 95, + ]; + + // Use `blake2b_hash` to work around the issue of the chunk size being shorter than the + // digest length. + let short_data_hash = Digest::blake2b_hash(maybe_colliding_short_data); + + // Ensure there is no collision. You can verify this test is correct by temporarily changing + // the `Digest::hash_merkle_tree` function to use the unpadded `hash_pair` function, instead + // of `hash_merkle_root`. + assert_ne!(long_data_hash, short_data_hash); + + // The expected input for the root hash is the colliding data, but prefixed with a full + // chunk of zeros. + let expected_final_hash_input = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 19, 25, 57, 74, 152, 208, 203, + 25, 79, 150, 14, 55, 72, 186, 235, 32, 69, 169, 236, 40, 170, 81, 224, 212, 32, 17, + 190, 67, 244, 169, 31, 95, + ]; + assert_eq!( + Digest::blake2b_hash(expected_final_hash_input), + long_data_hash + ); + + // Another way to specify this sanity check is to say that the short and long data should + // hash differently. + // + // Note: This condition is true at the time of writing this test, where chunk hashing is + // disabled. It should still hold true once enabled. + assert_ne!( + Digest::hash(maybe_colliding_short_data), + Digest::hash(long_data) + ); + + // In a similar manner, the internal padded data should also not hash equal to either, as it + // should be hashed using the chunking function. + assert_ne!( + Digest::hash(maybe_colliding_short_data), + Digest::hash(expected_final_hash_input) + ); + assert_ne!( + Digest::hash(long_data), + Digest::hash(expected_final_hash_input) + ); + } +} diff --git a/types/src/digest/chunk_with_proof.rs b/types/src/digest/chunk_with_proof.rs new file mode 100644 index 0000000000..862b0ff637 --- /dev/null +++ b/types/src/digest/chunk_with_proof.rs @@ -0,0 +1,333 @@ +//! Chunks with Merkle proofs. + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{ChunkWithProofVerificationError, Digest, IndexedMerkleProof, MerkleConstructionError}; +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +/// Represents a chunk of data with attached proof. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ChunkWithProof { + proof: IndexedMerkleProof, + chunk: Bytes, +} + +impl ToBytes for ChunkWithProof { + fn write_bytes(&self, buf: &mut Vec) -> Result<(), bytesrepr::Error> { + buf.append(&mut self.proof.to_bytes()?); + buf.append(&mut self.chunk.to_bytes()?); + + Ok(()) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.proof.serialized_length() + self.chunk.serialized_length() + } +} + +impl FromBytes for ChunkWithProof { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (proof, remainder) = FromBytes::from_bytes(bytes)?; + let (chunk, remainder) = FromBytes::from_bytes(remainder)?; + + Ok((ChunkWithProof { proof, chunk }, remainder)) + } +} + +impl ChunkWithProof { + #[cfg(test)] + /// 10 bytes for testing purposes. + pub const CHUNK_SIZE_BYTES: usize = 10; + + #[cfg(not(test))] + /// 8 MiB + pub const CHUNK_SIZE_BYTES: usize = 8 * 1024 * 1024; + + /// Constructs the [`ChunkWithProof`] that contains the chunk of data with the appropriate index + /// and the cryptographic proof. + /// + /// Empty data is always represented as single, empty chunk and not as zero chunks. + pub fn new(data: &[u8], index: u64) -> Result { + Ok(if data.is_empty() { + ChunkWithProof { + proof: IndexedMerkleProof::new([Digest::blake2b_hash([])], index)?, + chunk: Bytes::new(), + } + } else { + ChunkWithProof { + proof: IndexedMerkleProof::new( + data.chunks(Self::CHUNK_SIZE_BYTES) + .map(Digest::blake2b_hash), + index, + )?, + chunk: Bytes::from( + data.chunks(Self::CHUNK_SIZE_BYTES) + .nth(index as usize) + .ok_or_else(|| MerkleConstructionError::IndexOutOfBounds { + count: data.chunks(Self::CHUNK_SIZE_BYTES).len() as u64, + index, + })?, + ), + } + }) + } + + /// Get a reference to the `ChunkWithProof`'s chunk. + pub fn chunk(&self) -> &[u8] { + self.chunk.as_slice() + } + + /// Convert a chunk with proof into the underlying chunk. + pub fn into_chunk(self) -> Bytes { + self.chunk + } + + /// Returns the `IndexedMerkleProof`. + pub fn proof(&self) -> &IndexedMerkleProof { + &self.proof + } + + /// Verify the integrity of this chunk with indexed Merkle proof. + pub fn verify(&self) -> Result<(), ChunkWithProofVerificationError> { + self.proof().verify()?; + let first_digest_in_indexed_merkle_proof = + self.proof().merkle_proof().first().ok_or_else(|| { + ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { + chunk_with_proof: self.clone(), + } + })?; + let hash_of_chunk = Digest::hash(self.chunk()); + if *first_digest_in_indexed_merkle_proof != hash_of_chunk { + return Err( + ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk { + first_digest_in_indexed_merkle_proof: *first_digest_in_indexed_merkle_proof, + hash_of_chunk, + }, + ); + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use proptest::{ + arbitrary::Arbitrary, + strategy::{BoxedStrategy, Strategy}, + }; + use proptest_attr_macro::proptest; + use rand::Rng; + + use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + ChunkWithProof, Digest, MerkleConstructionError, + }; + + fn prepare_bytes(length: usize) -> Vec { + let mut rng = rand::thread_rng(); + + (0..length).map(|_| rng.gen()).collect() + } + + fn random_chunk_with_proof() -> ChunkWithProof { + let mut rng = rand::thread_rng(); + let data: Vec = prepare_bytes(rng.gen_range(1..1024)); + let index = rng.gen_range(0..data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES).len() as u64); + + ChunkWithProof::new(&data, index).unwrap() + } + + impl ChunkWithProof { + fn replace_first_proof(self) -> Self { + let mut rng = rand::thread_rng(); + let ChunkWithProof { mut proof, chunk } = self; + + // Keep the same number of proofs, but replace the first one with some random hash + let mut merkle_proof: Vec<_> = proof.merkle_proof().to_vec(); + merkle_proof.pop(); + merkle_proof.insert(0, Digest::hash(rng.gen::().to_string())); + proof.inject_merkle_proof(merkle_proof); + + ChunkWithProof { proof, chunk } + } + } + + #[derive(Debug)] + pub struct TestDataSize(usize); + impl Arbitrary for TestDataSize { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + (0usize..32usize) + .prop_map(|chunk_count| { + TestDataSize(chunk_count * ChunkWithProof::CHUNK_SIZE_BYTES) + }) + .boxed() + } + } + + #[derive(Debug)] + pub struct TestDataSizeAtLeastTwoChunks(usize); + impl Arbitrary for TestDataSizeAtLeastTwoChunks { + type Parameters = (); + type Strategy = BoxedStrategy; + + fn arbitrary_with(_: Self::Parameters) -> Self::Strategy { + (2usize..32usize) + .prop_map(|chunk_count| { + TestDataSizeAtLeastTwoChunks(chunk_count * ChunkWithProof::CHUNK_SIZE_BYTES) + }) + .boxed() + } + } + + #[proptest] + fn generates_valid_proof(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let number_of_chunks: u64 = data + .chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .len() + .try_into() + .unwrap(); + + assert!((0..number_of_chunks) + .map(|chunk_index| { ChunkWithProof::new(data.as_slice(), chunk_index).unwrap() }) + .all(|chunk_with_proof| chunk_with_proof.verify().is_ok())); + } + } + + #[proptest] + fn validate_chunks_against_hash_merkle_tree(test_data: TestDataSizeAtLeastTwoChunks) { + // This test requires at least two chunks + assert!(test_data.0 >= ChunkWithProof::CHUNK_SIZE_BYTES * 2); + + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let expected_root = Digest::hash_merkle_tree( + data.chunks(ChunkWithProof::CHUNK_SIZE_BYTES) + .map(Digest::hash), + ); + + // Calculate proof with `ChunkWithProof` + let ChunkWithProof { + proof: proof_0, + chunk: _, + } = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + let ChunkWithProof { + proof: proof_1, + chunk: _, + } = ChunkWithProof::new(data.as_slice(), 1).unwrap(); + + assert_eq!(proof_0.root_hash(), expected_root); + assert_eq!(proof_1.root_hash(), expected_root); + } + } + + #[proptest] + fn verifies_chunk_with_proofs(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + assert!(chunk_with_proof.verify().is_ok()); + + let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof(); + assert!(chunk_with_incorrect_proof.verify().is_err()); + } + } + + #[proptest] + fn serde_deserialization_of_malformed_chunk_should_work(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + + let json = serde_json::to_string(&chunk_with_proof).unwrap(); + assert_eq!( + chunk_with_proof, + serde_json::from_str::(&json) + .expect("should deserialize correctly") + ); + + let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof(); + let json = serde_json::to_string(&chunk_with_incorrect_proof).unwrap(); + serde_json::from_str::(&json).expect("should deserialize correctly"); + } + } + + #[proptest] + fn bytesrepr_deserialization_of_malformed_chunk_should_work(test_data: TestDataSize) { + for data in [prepare_bytes(test_data.0), vec![0u8; test_data.0]] { + let chunk_with_proof = ChunkWithProof::new(data.as_slice(), 0).unwrap(); + + let bytes = chunk_with_proof + .to_bytes() + .expect("should serialize correctly"); + + let (deserialized_chunk_with_proof, _) = + ChunkWithProof::from_bytes(&bytes).expect("should deserialize correctly"); + + assert_eq!(chunk_with_proof, deserialized_chunk_with_proof); + + let chunk_with_incorrect_proof = chunk_with_proof.replace_first_proof(); + let bytes = chunk_with_incorrect_proof + .to_bytes() + .expect("should serialize correctly"); + + ChunkWithProof::from_bytes(&bytes).expect("should deserialize correctly"); + } + } + + #[test] + fn returns_error_on_incorrect_index() { + // This test needs specific data sizes, hence it doesn't use the proptest + + let chunk_with_proof = ChunkWithProof::new(&[], 0).expect("should create with empty data"); + assert!(chunk_with_proof.verify().is_ok()); + + let chunk_with_proof = + ChunkWithProof::new(&[], 1).expect_err("should error with empty data and index > 0"); + if let MerkleConstructionError::IndexOutOfBounds { count, index } = chunk_with_proof { + assert_eq!(count, 1); + assert_eq!(index, 1); + } else { + panic!("expected MerkleConstructionError::IndexOutOfBounds"); + } + + let data_larger_than_single_chunk = vec![0u8; ChunkWithProof::CHUNK_SIZE_BYTES * 10]; + ChunkWithProof::new(data_larger_than_single_chunk.as_slice(), 9).unwrap(); + + let chunk_with_proof = + ChunkWithProof::new(data_larger_than_single_chunk.as_slice(), 10).unwrap_err(); + if let MerkleConstructionError::IndexOutOfBounds { count, index } = chunk_with_proof { + assert_eq!(count, 10); + assert_eq!(index, 10); + } else { + panic!("expected MerkleConstructionError::IndexOutOfBounds"); + } + } + + #[test] + fn bytesrepr_serialization() { + let chunk_with_proof = random_chunk_with_proof(); + bytesrepr::test_serialization_roundtrip(&chunk_with_proof); + } + + #[test] + fn chunk_with_empty_data_contains_a_single_proof() { + let chunk_with_proof = ChunkWithProof::new(&[], 0).unwrap(); + assert_eq!(chunk_with_proof.proof.merkle_proof().len(), 1) + } +} diff --git a/types/src/digest/error.rs b/types/src/digest/error.rs new file mode 100644 index 0000000000..539e72678f --- /dev/null +++ b/types/src/digest/error.rs @@ -0,0 +1,233 @@ +//! Errors in constructing and validating indexed Merkle proofs, chunks with indexed Merkle proofs. + +use alloc::string::String; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +use super::{ChunkWithProof, Digest}; +use crate::bytesrepr; + +/// Possible hashing errors. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// The digest length was an incorrect size. + IncorrectDigestLength(usize), + /// There was a decoding error. + Base16DecodeError(base16::DecodeError), +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::IncorrectDigestLength(length) => { + write!( + formatter, + "incorrect digest length {}, expected length {}.", + length, + Digest::LENGTH + ) + } + Error::Base16DecodeError(error) => { + write!(formatter, "base16 decode error: {}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Error::IncorrectDigestLength(_) => None, + Error::Base16DecodeError(error) => Some(error), + } + } +} + +/// Error validating a Merkle proof of a chunk. +#[derive(Debug, PartialEq, Eq)] +#[non_exhaustive] +pub enum MerkleVerificationError { + /// Index out of bounds. + IndexOutOfBounds { + /// Count. + count: u64, + /// Index. + index: u64, + }, + + /// Unexpected proof length. + UnexpectedProofLength { + /// Count. + count: u64, + /// Index. + index: u64, + /// Expected proof length. + expected_proof_length: u8, + /// Actual proof length. + actual_proof_length: usize, + }, +} + +impl Display for MerkleVerificationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + MerkleVerificationError::IndexOutOfBounds { count, index } => { + write!( + formatter, + "index out of bounds - count: {}, index: {}", + count, index + ) + } + MerkleVerificationError::UnexpectedProofLength { + count, + index, + expected_proof_length, + actual_proof_length, + } => { + write!( + formatter, + "unexpected proof length - count: {}, index: {}, expected length: {}, actual \ + length: {}", + count, index, expected_proof_length, actual_proof_length + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for MerkleVerificationError {} + +/// Error validating a chunk with proof. +#[derive(Debug)] +#[non_exhaustive] +pub enum ChunkWithProofVerificationError { + /// Indexed Merkle proof verification error. + MerkleVerificationError(MerkleVerificationError), + + /// Empty Merkle proof for trie with chunk. + ChunkWithProofHasEmptyMerkleProof { + /// Chunk with empty Merkle proof. + chunk_with_proof: ChunkWithProof, + }, + /// Unexpected Merkle root hash. + UnexpectedRootHash, + /// Bytesrepr error. + Bytesrepr(bytesrepr::Error), + + /// First digest in indexed Merkle proof did not match hash of chunk. + FirstDigestInMerkleProofDidNotMatchHashOfChunk { + /// First digest in indexed Merkle proof. + first_digest_in_indexed_merkle_proof: Digest, + /// Hash of chunk. + hash_of_chunk: Digest, + }, +} + +impl Display for ChunkWithProofVerificationError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ChunkWithProofVerificationError::MerkleVerificationError(error) => { + write!(formatter, "{}", error) + } + ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { + chunk_with_proof, + } => { + write!( + formatter, + "chunk with proof has empty merkle proof: {:?}", + chunk_with_proof + ) + } + ChunkWithProofVerificationError::UnexpectedRootHash => { + write!(formatter, "merkle proof has an unexpected root hash") + } + ChunkWithProofVerificationError::Bytesrepr(error) => { + write!( + formatter, + "bytesrepr error computing chunkable hash: {}", + error + ) + } + ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk { + first_digest_in_indexed_merkle_proof, + hash_of_chunk, + } => { + write!( + formatter, + "first digest in merkle proof did not match hash of chunk - first digest: \ + {:?}, hash of chunk: {:?}", + first_digest_in_indexed_merkle_proof, hash_of_chunk + ) + } + } + } +} + +impl From for ChunkWithProofVerificationError { + fn from(error: MerkleVerificationError) -> Self { + ChunkWithProofVerificationError::MerkleVerificationError(error) + } +} + +#[cfg(feature = "std")] +impl StdError for ChunkWithProofVerificationError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + ChunkWithProofVerificationError::MerkleVerificationError(error) => Some(error), + ChunkWithProofVerificationError::Bytesrepr(error) => Some(error), + ChunkWithProofVerificationError::ChunkWithProofHasEmptyMerkleProof { .. } + | ChunkWithProofVerificationError::UnexpectedRootHash + | ChunkWithProofVerificationError::FirstDigestInMerkleProofDidNotMatchHashOfChunk { + .. + } => None, + } + } +} + +/// Error during the construction of a Merkle proof. +#[derive(Debug, Eq, PartialEq, Clone)] +#[non_exhaustive] +pub enum MerkleConstructionError { + /// Chunk index was out of bounds. + IndexOutOfBounds { + /// Total chunks count. + count: u64, + /// Requested index. + index: u64, + }, + /// Too many Merkle tree leaves. + TooManyLeaves { + /// Total chunks count. + count: String, + }, +} + +impl Display for MerkleConstructionError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + MerkleConstructionError::IndexOutOfBounds { count, index } => { + write!( + formatter, + "could not construct merkle proof - index out of bounds - count: {}, index: {}", + count, index + ) + } + MerkleConstructionError::TooManyLeaves { count } => { + write!( + formatter, + "could not construct merkle proof - too many leaves - count: {}, max: {} \ + (u64::MAX)", + count, + u64::MAX + ) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for MerkleConstructionError {} diff --git a/types/src/digest/indexed_merkle_proof.rs b/types/src/digest/indexed_merkle_proof.rs new file mode 100644 index 0000000000..7e8a7f7c55 --- /dev/null +++ b/types/src/digest/indexed_merkle_proof.rs @@ -0,0 +1,514 @@ +//! Constructing and validating indexed Merkle proofs. +use alloc::{string::ToString, vec::Vec}; +use core::convert::TryInto; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use itertools::Itertools; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{Digest, MerkleConstructionError, MerkleVerificationError}; +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// A Merkle proof of the given chunk. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct IndexedMerkleProof { + index: u64, + count: u64, + merkle_proof: Vec, + #[cfg_attr(any(feature = "once_cell", test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + root_hash: OnceCell, +} + +impl ToBytes for IndexedMerkleProof { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.index.to_bytes()?); + result.append(&mut self.count.to_bytes()?); + result.append(&mut self.merkle_proof.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.index.serialized_length() + + self.count.serialized_length() + + self.merkle_proof.serialized_length() + } +} + +impl FromBytes for IndexedMerkleProof { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (index, remainder) = FromBytes::from_bytes(bytes)?; + let (count, remainder) = FromBytes::from_bytes(remainder)?; + let (merkle_proof, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + IndexedMerkleProof { + index, + count, + merkle_proof, + #[cfg(any(feature = "once_cell", test))] + root_hash: OnceCell::new(), + }, + remainder, + )) + } +} + +impl IndexedMerkleProof { + /// Attempts to construct a new instance. + pub fn new(leaves: I, index: u64) -> Result + where + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + { + use HashOrProof::{Hash as H, Proof as P}; + + enum HashOrProof { + Hash(Digest), + Proof(Vec), + } + + let leaves = leaves.into_iter(); + let count: u64 = + leaves + .len() + .try_into() + .map_err(|_| MerkleConstructionError::TooManyLeaves { + count: leaves.len().to_string(), + })?; + + let maybe_proof = leaves + .enumerate() + .map(|(i, hash)| { + if i as u64 == index { + P(vec![hash]) + } else { + H(hash) + } + }) + .tree_fold1(|x, y| match (x, y) { + (H(hash_x), H(hash_y)) => H(Digest::hash_pair(hash_x, hash_y)), + (H(hash), P(mut proof)) | (P(mut proof), H(hash)) => { + proof.push(hash); + P(proof) + } + (P(_), P(_)) => unreachable!(), + }); + + match maybe_proof { + None | Some(H(_)) => Err(MerkleConstructionError::IndexOutOfBounds { count, index }), + Some(P(merkle_proof)) => Ok(IndexedMerkleProof { + index, + count, + merkle_proof, + #[cfg(any(feature = "once_cell", test))] + root_hash: OnceCell::new(), + }), + } + } + + /// Returns the index. + pub fn index(&self) -> u64 { + self.index + } + + /// Returns the total count of chunks. + pub fn count(&self) -> u64 { + self.count + } + + /// Returns the root hash of this proof (i.e. the index hashed with the Merkle root hash). + /// + /// Note that with the `once_cell` feature enabled (generally done by enabling the `std` + /// feature), the root hash is memoized, and hence calling this method is cheap after the first + /// call. Without `once_cell` enabled, every call to this method calculates the root hash. + pub fn root_hash(&self) -> Digest { + #[cfg(any(feature = "once_cell", test))] + return *self.root_hash.get_or_init(|| self.compute_root_hash()); + + #[cfg(not(any(feature = "once_cell", test)))] + self.compute_root_hash() + } + + /// Returns the full collection of hash digests of the proof. + pub fn merkle_proof(&self) -> &[Digest] { + &self.merkle_proof + } + + /// Attempts to verify self. + pub fn verify(&self) -> Result<(), MerkleVerificationError> { + if self.index >= self.count { + return Err(MerkleVerificationError::IndexOutOfBounds { + count: self.count, + index: self.index, + }); + } + let expected_proof_length = self.compute_expected_proof_length(); + if self.merkle_proof.len() != expected_proof_length as usize { + return Err(MerkleVerificationError::UnexpectedProofLength { + count: self.count, + index: self.index, + expected_proof_length, + actual_proof_length: self.merkle_proof.len(), + }); + } + Ok(()) + } + + fn compute_root_hash(&self) -> Digest { + let IndexedMerkleProof { + count, + merkle_proof, + .. + } = self; + + let mut hashes = merkle_proof.iter(); + let raw_root = if let Some(leaf_hash) = hashes.next().cloned() { + // Compute whether to hash left or right for the elements of the Merkle proof. + // This gives a path to the value with the specified index. + // We represent this path as a sequence of 64 bits. 1 here means "hash right". + let mut path: u64 = 0; + let mut n = self.count; + let mut i = self.index; + while n > 1 { + path <<= 1; + let pivot = 1u64 << (63 - (n - 1).leading_zeros()); + if i < pivot { + n = pivot; + } else { + path |= 1; + n -= pivot; + i -= pivot; + } + } + + // Compute the raw Merkle root by hashing the proof from leaf hash up. + hashes.fold(leaf_hash, |acc, hash| { + let digest = if (path & 1) == 1 { + Digest::hash_pair(hash, acc) + } else { + Digest::hash_pair(acc, hash) + }; + path >>= 1; + digest + }) + } else { + Digest::SENTINEL_MERKLE_TREE + }; + + // The Merkle root is the hash of the count with the raw root. + Digest::hash_merkle_root(*count, raw_root) + } + + // Proof lengths are never bigger than 65 is because we are using 64 bit counts + fn compute_expected_proof_length(&self) -> u8 { + if self.count == 0 { + return 0; + } + let mut l = 1; + let mut n = self.count; + let mut i = self.index; + while n > 1 { + let pivot = 1u64 << (63 - (n - 1).leading_zeros()); + if i < pivot { + n = pivot; + } else { + n -= pivot; + i -= pivot; + } + l += 1; + } + l + } + + #[cfg(test)] + pub fn inject_merkle_proof(&mut self, merkle_proof: Vec) { + self.merkle_proof = merkle_proof; + } +} + +#[cfg(test)] +mod tests { + use once_cell::sync::OnceCell; + use proptest::prelude::{prop_assert, prop_assert_eq}; + use proptest_attr_macro::proptest; + use rand::{distributions::Standard, Rng}; + + use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, IndexedMerkleProof, MerkleVerificationError, + }; + + fn random_indexed_merkle_proof() -> IndexedMerkleProof { + let mut rng = rand::thread_rng(); + let leaf_count: u64 = rng.gen_range(1..100); + let index = rng.gen_range(0..leaf_count); + let leaves: Vec = (0..leaf_count) + .map(|i| Digest::hash(i.to_le_bytes())) + .collect(); + IndexedMerkleProof::new(leaves.iter().cloned(), index) + .expect("should create indexed Merkle proof") + } + + #[test] + fn test_merkle_proofs() { + let mut rng = rand::thread_rng(); + for _ in 0..20 { + let leaf_count: u64 = rng.gen_range(1..100); + let index = rng.gen_range(0..leaf_count); + let leaves: Vec = (0..leaf_count) + .map(|i| Digest::hash(i.to_le_bytes())) + .collect(); + let root = Digest::hash_merkle_tree(leaves.clone()); + let indexed_merkle_proof = IndexedMerkleProof::new(leaves.clone(), index).unwrap(); + assert_eq!( + indexed_merkle_proof.compute_expected_proof_length(), + indexed_merkle_proof.merkle_proof().len() as u8 + ); + assert_eq!(indexed_merkle_proof.verify(), Ok(())); + assert_eq!(leaf_count, indexed_merkle_proof.count); + assert_eq!(leaves[index as usize], indexed_merkle_proof.merkle_proof[0]); + assert_eq!(root, indexed_merkle_proof.root_hash()); + } + } + + #[test] + fn out_of_bounds_index() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 23, + count: 4, + merkle_proof: vec![Digest([0u8; 32]); 3], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::IndexOutOfBounds { + count: 4, + index: 23 + }) + ) + } + + #[test] + fn unexpected_proof_length() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 1235, + count: 5647, + merkle_proof: vec![Digest([0u8; 32]); 13], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::UnexpectedProofLength { + count: 5647, + index: 1235, + expected_proof_length: 14, + actual_proof_length: 13 + }) + ) + } + + #[test] + fn empty_unexpected_proof_length() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 0, + count: 0, + merkle_proof: vec![Digest([0u8; 32]); 3], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::IndexOutOfBounds { count: 0, index: 0 }) + ) + } + + #[test] + fn empty_out_of_bounds_index() { + let out_of_bounds_indexed_merkle_proof = IndexedMerkleProof { + index: 23, + count: 0, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + assert_eq!( + out_of_bounds_indexed_merkle_proof.verify(), + Err(MerkleVerificationError::IndexOutOfBounds { + count: 0, + index: 23 + }) + ) + } + + #[test] + fn deep_proof_doesnt_kill_stack() { + const PROOF_LENGTH: usize = 63; + let indexed_merkle_proof = IndexedMerkleProof { + index: 42, + count: 1 << (PROOF_LENGTH - 1), + merkle_proof: vec![Digest([0u8; Digest::LENGTH]); PROOF_LENGTH], + root_hash: OnceCell::new(), + }; + let _hash = indexed_merkle_proof.root_hash(); + } + + #[test] + fn empty_proof() { + let empty_merkle_root = Digest::hash_merkle_tree(vec![]); + assert_eq!(empty_merkle_root, Digest::SENTINEL_MERKLE_TREE); + let indexed_merkle_proof = IndexedMerkleProof { + index: 0, + count: 0, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + assert!(indexed_merkle_proof.verify().is_err()); + } + + #[proptest] + fn expected_proof_length_le_65(index: u64, count: u64) { + let indexed_merkle_proof = IndexedMerkleProof { + index, + count, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + prop_assert!(indexed_merkle_proof.compute_expected_proof_length() <= 65); + } + + fn reference_root_from_proof(index: u64, count: u64, proof: &[Digest]) -> Digest { + fn compute_raw_root_from_proof(index: u64, leaf_count: u64, proof: &[Digest]) -> Digest { + if leaf_count == 0 { + return Digest::SENTINEL_MERKLE_TREE; + } + if leaf_count == 1 { + return proof[0]; + } + let half = 1u64 << (63 - (leaf_count - 1).leading_zeros()); + let last = proof.len() - 1; + if index < half { + let left = compute_raw_root_from_proof(index, half, &proof[..last]); + Digest::hash_pair(left, proof[last]) + } else { + let right = + compute_raw_root_from_proof(index - half, leaf_count - half, &proof[..last]); + Digest::hash_pair(proof[last], right) + } + } + + let raw_root = compute_raw_root_from_proof(index, count, proof); + Digest::hash_merkle_root(count, raw_root) + } + + /// Construct an `IndexedMerkleProof` with a proof of zero digests. + fn test_indexed_merkle_proof(index: u64, count: u64) -> IndexedMerkleProof { + let mut indexed_merkle_proof = IndexedMerkleProof { + index, + count, + merkle_proof: vec![], + root_hash: OnceCell::new(), + }; + let expected_proof_length = indexed_merkle_proof.compute_expected_proof_length(); + indexed_merkle_proof.merkle_proof = rand::thread_rng() + .sample_iter(Standard) + .take(expected_proof_length as usize) + .collect(); + indexed_merkle_proof + } + + #[proptest] + fn root_from_proof_agrees_with_recursion(index: u64, count: u64) { + let indexed_merkle_proof = test_indexed_merkle_proof(index, count); + prop_assert_eq!( + indexed_merkle_proof.root_hash(), + reference_root_from_proof( + indexed_merkle_proof.index, + indexed_merkle_proof.count, + indexed_merkle_proof.merkle_proof(), + ), + "Result did not agree with reference implementation.", + ); + } + + #[test] + fn root_from_proof_agrees_with_recursion_2147483648_4294967297() { + let indexed_merkle_proof = test_indexed_merkle_proof(2147483648, 4294967297); + assert_eq!( + indexed_merkle_proof.root_hash(), + reference_root_from_proof( + indexed_merkle_proof.index, + indexed_merkle_proof.count, + indexed_merkle_proof.merkle_proof(), + ), + "Result did not agree with reference implementation.", + ); + } + + #[test] + fn serde_deserialization_of_malformed_proof_should_work() { + let indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + + let json = serde_json::to_string(&indexed_merkle_proof).unwrap(); + assert_eq!( + indexed_merkle_proof, + serde_json::from_str::(&json) + .expect("should deserialize correctly") + ); + + // Check that proof with index greater than count deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.index += 1; + let json = serde_json::to_string(&indexed_merkle_proof).unwrap(); + serde_json::from_str::(&json).expect("should deserialize correctly"); + + // Check that proof with incorrect length deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.merkle_proof.push(Digest::hash("XXX")); + let json = serde_json::to_string(&indexed_merkle_proof).unwrap(); + serde_json::from_str::(&json).expect("should deserialize correctly"); + } + + #[test] + fn bytesrepr_deserialization_of_malformed_proof_should_work() { + let indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + + let bytes = indexed_merkle_proof + .to_bytes() + .expect("should serialize correctly"); + IndexedMerkleProof::from_bytes(&bytes).expect("should deserialize correctly"); + + // Check that proof with index greater than count deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.index += 1; + let bytes = indexed_merkle_proof + .to_bytes() + .expect("should serialize correctly"); + IndexedMerkleProof::from_bytes(&bytes).expect("should deserialize correctly"); + + // Check that proof with incorrect length deserializes correctly + let mut indexed_merkle_proof = test_indexed_merkle_proof(10, 10); + indexed_merkle_proof.merkle_proof.push(Digest::hash("XXX")); + let bytes = indexed_merkle_proof + .to_bytes() + .expect("should serialize correctly"); + IndexedMerkleProof::from_bytes(&bytes).expect("should deserialize correctly"); + } + + #[test] + fn bytesrepr_serialization() { + let indexed_merkle_proof = random_indexed_merkle_proof(); + bytesrepr::test_serialization_roundtrip(&indexed_merkle_proof); + } +} diff --git a/types/src/display_iter.rs b/types/src/display_iter.rs new file mode 100644 index 0000000000..00b23e8488 --- /dev/null +++ b/types/src/display_iter.rs @@ -0,0 +1,40 @@ +use core::{ + cell::RefCell, + fmt::{self, Display, Formatter}, +}; + +/// A helper to allow `Display` printing the items of an iterator with a comma and space between +/// each. +#[derive(Debug)] +pub struct DisplayIter(RefCell>); + +impl DisplayIter { + /// Returns a new `DisplayIter`. + pub fn new(item: T) -> Self { + DisplayIter(RefCell::new(Some(item))) + } +} + +impl Display for DisplayIter +where + I: IntoIterator, + T: Display, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + if let Some(src) = self.0.borrow_mut().take() { + let mut first = true; + for item in src.into_iter().take(f.width().unwrap_or(usize::MAX)) { + if first { + first = false; + write!(f, "{}", item)?; + } else { + write!(f, ", {}", item)?; + } + } + + Ok(()) + } else { + write!(f, "DisplayIter:GONE") + } + } +} diff --git a/types/src/era_id.rs b/types/src/era_id.rs index 78bff9755f..61c855fa8b 100644 --- a/types/src/era_id.rs +++ b/types/src/era_id.rs @@ -1,6 +1,3 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::vec::Vec; use core::{ fmt::{self, Debug, Display, Formatter}, @@ -9,15 +6,16 @@ use core::{ str::FromStr, }; +#[cfg(feature = "datasize")] use datasize::DataSize; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -#[cfg(feature = "std")] +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; use crate::{ bytesrepr::{self, FromBytes, ToBytes}, CLType, CLTyped, @@ -25,32 +23,29 @@ use crate::{ /// Era ID newtype. #[derive( - DataSize, - Debug, - Default, - Clone, - Copy, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, + Debug, Default, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, )] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "testing", derive(proptest_derive::Arbitrary))] #[serde(deny_unknown_fields)] pub struct EraId(u64); impl EraId { /// Maximum possible value an [`EraId`] can hold. - pub const MAX: EraId = EraId(u64::max_value()); + pub const MAX: EraId = EraId(u64::MAX); /// Creates new [`EraId`] instance. pub const fn new(value: u64) -> EraId { EraId(value) } + /// Returns an iterator over era IDs of `num_eras` future eras starting from current. + pub fn iter(&self, num_eras: u64) -> impl Iterator { + let current_era_id = self.0; + (current_era_id..current_era_id + num_eras).map(EraId) + } + /// Returns an iterator over era IDs of `num_eras` future eras starting from current, plus the /// provided one. pub fn iter_inclusive(&self, num_eras: u64) -> impl Iterator { @@ -58,10 +53,41 @@ impl EraId { (current_era_id..=current_era_id + num_eras).map(EraId) } + /// Returns an iterator over a range of era IDs, starting from `start` and ending at `end`, + /// inclusive. + pub fn iter_range_inclusive( + start: EraId, + end: EraId, + ) -> impl DoubleEndedIterator { + (start.0..=end.0).map(EraId) + } + + /// Increments the era. + /// + /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and + /// that era number will never be reached in practice. + pub fn increment(&mut self) { + self.0 = self.0.saturating_add(1); + } + /// Returns a successor to current era. - #[allow(clippy::integer_arithmetic)] // The caller must make sure this doesn't overflow. + /// + /// For `u64::MAX`, this returns `u64::MAX` again: We want to make sure this doesn't panic, and + /// that era number will never be reached in practice. + #[must_use] pub fn successor(self) -> EraId { - EraId::from(self.0 + 1) + EraId::from(self.0.saturating_add(1)) + } + + /// Returns the predecessor to current era, or `None` if genesis. + #[must_use] + pub fn predecessor(self) -> Option { + self.0.checked_sub(1).map(EraId) + } + + /// Returns the current era plus `x`, or `None` if that would overflow + pub fn checked_add(&self, x: u64) -> Option { + self.0.checked_add(x).map(EraId) } /// Returns the current era minus `x`, or `None` if that would be less than `0`. @@ -70,16 +96,19 @@ impl EraId { } /// Returns the current era minus `x`, or `0` if that would be less than `0`. + #[must_use] pub fn saturating_sub(&self, x: u64) -> EraId { EraId::from(self.0.saturating_sub(x)) } /// Returns the current era plus `x`, or [`EraId::MAX`] if overflow would occur. - pub fn saturating_add(self, rhs: EraId) -> EraId { - EraId(self.0.saturating_add(rhs.0)) + #[must_use] + pub fn saturating_add(self, rhs: u64) -> EraId { + EraId(self.0.saturating_add(rhs)) } /// Returns the current era times `x`, or [`EraId::MAX`] if overflow would occur. + #[must_use] pub fn saturating_mul(&self, x: u64) -> EraId { EraId::from(self.0.saturating_mul(x)) } @@ -94,17 +123,18 @@ impl EraId { self.0.to_le_bytes() } - /// Creates new [`EraId`] instance from little endian bytes. - pub(crate) fn from_le_bytes(bytes: [u8; 8]) -> EraId { - EraId::from(u64::from_le_bytes(bytes)) - } - /// Returns a raw value held by this [`EraId`] instance. /// /// You should prefer [`From`] trait implementations over this method where possible. pub fn value(self) -> u64 { self.0 } + + /// Returns a random `EraId`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + EraId(rng.gen_range(0..1_000_000)) + } } impl FromStr for EraId { @@ -118,7 +148,7 @@ impl FromStr for EraId { impl Add for EraId { type Output = EraId; - #[allow(clippy::integer_arithmetic)] // The caller must make sure this doesn't overflow. + #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow. fn add(self, x: u64) -> EraId { EraId::from(self.0 + x) } @@ -133,7 +163,7 @@ impl AddAssign for EraId { impl Sub for EraId { type Output = EraId; - #[allow(clippy::integer_arithmetic)] // The caller must make sure this doesn't overflow. + #[allow(clippy::arithmetic_side_effects)] // The caller must make sure this doesn't overflow. fn sub(self, x: u64) -> EraId { EraId::from(self.0 - x) } @@ -165,6 +195,12 @@ impl ToBytes for EraId { fn serialized_length(&self) -> usize { self.0.serialized_length() } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for EraId { @@ -181,12 +217,6 @@ impl CLTyped for EraId { } } -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> EraId { - EraId(rng.gen()) - } -} - #[cfg(test)] mod tests { use proptest::prelude::*; @@ -202,9 +232,9 @@ mod tests { let window: Vec = current_era.iter_inclusive(auction_delay).collect(); assert_eq!(window.len(), auction_delay as usize + 1); - assert_eq!(window.get(0), Some(¤t_era)); + assert_eq!(window.first(), Some(¤t_era)); assert_eq!( - window.iter().rev().next(), + window.iter().next_back(), Some(&(current_era + auction_delay)) ); } @@ -216,6 +246,14 @@ mod tests { assert!(!expected_initial_era_id.successor().is_genesis()) } + #[test] + fn should_increment_era_id() { + let mut era = EraId::from(0); + assert!(era.is_genesis()); + era.increment(); + assert_eq!(era.value(), 1, "should have incremented to 1"); + } + proptest! { #[test] fn bytesrepr_roundtrip(era_id in era_id_arb()) { diff --git a/types/src/execution.rs b/types/src/execution.rs new file mode 100644 index 0000000000..f1f190ad44 --- /dev/null +++ b/types/src/execution.rs @@ -0,0 +1,17 @@ +//! Types related to execution of deploys. + +mod effects; +mod execution_result; +pub mod execution_result_v1; +mod execution_result_v2; +mod transform; +mod transform_error; +mod transform_kind; + +pub use effects::Effects; +pub use execution_result::ExecutionResult; +pub use execution_result_v1::ExecutionResultV1; +pub use execution_result_v2::ExecutionResultV2; +pub use transform::TransformV2; +pub use transform_error::TransformError; +pub use transform_kind::{TransformInstruction, TransformKindV2}; diff --git a/types/src/execution/effects.rs b/types/src/execution/effects.rs new file mode 100644 index 0000000000..e68d076a61 --- /dev/null +++ b/types/src/execution/effects.rs @@ -0,0 +1,105 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use super::TransformKindV2; +use super::TransformV2; +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// A log of all transforms produced during execution. +#[derive(Debug, Clone, Eq, Default, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Effects(Vec); + +impl Effects { + /// Constructs a new, empty `Effects`. + pub const fn new() -> Self { + Effects(vec![]) + } + + /// Returns a reference to the transforms. + pub fn transforms(&self) -> &[TransformV2] { + &self.0 + } + + /// Appends a transform. + pub fn push(&mut self, transform: TransformV2) { + self.0.push(transform) + } + + /// Moves all elements from `other` into `self`. + pub fn append(&mut self, mut other: Self) { + self.0.append(&mut other.0); + } + + /// Returns `true` if there are no transforms recorded. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns the number of transforms recorded. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Consumes `self`, returning the wrapped vec. + pub fn value(self) -> Vec { + self.0 + } + + /// Returns a random `Effects`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut R) -> Self { + let mut effects = Effects::new(); + let transform_count = rng.gen_range(0..6); + for _ in 0..transform_count { + effects.push(TransformV2::new(rng.gen(), TransformKindV2::random(rng))); + } + effects + } +} + +impl ToBytes for Effects { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Effects { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transforms, remainder) = Vec::::from_bytes(bytes)?; + Ok((Effects(transforms), remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::testing::TestRng; + + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let effects = Effects::random(rng); + bytesrepr::test_serialization_roundtrip(&effects); + } +} diff --git a/types/src/execution/execution_result.rs b/types/src/execution/execution_result.rs new file mode 100644 index 0000000000..04b9ab1273 --- /dev/null +++ b/types/src/execution/execution_result.rs @@ -0,0 +1,210 @@ +use alloc::{boxed::Box, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::distributions::Distribution; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use tracing::error; + +use super::{ExecutionResultV1, ExecutionResultV2}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Transfer, U512, +}; + +const V1_TAG: u8 = 0; +const V2_TAG: u8 = 1; + +/// The versioned result of executing a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutionResult { + /// Version 1 of execution result type. + #[serde(rename = "Version1")] + V1(ExecutionResultV1), + /// Version 2 of execution result type. + #[serde(rename = "Version2")] + V2(Box), +} + +impl ExecutionResult { + /// Returns cost. + pub fn cost(&self) -> U512 { + match self { + ExecutionResult::V1(result) => result.cost(), + ExecutionResult::V2(result) => result.cost, + } + } + + /// Returns consumed amount. + pub fn consumed(&self) -> U512 { + match self { + ExecutionResult::V1(result) => result.cost(), + ExecutionResult::V2(result) => result.consumed.value(), + } + } + + /// Returns refund amount. + pub fn refund(&self) -> Option { + match self { + ExecutionResult::V1(_) => None, + ExecutionResult::V2(result) => Some(result.refund), + } + } + + /// Returns a random ExecutionResult. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen_bool(0.5) { + Self::V1(rand::distributions::Standard.sample(rng)) + } else { + Self::V2(Box::new(ExecutionResultV2::random(rng))) + } + } + + /// Returns the error message, if any. + pub fn error_message(&self) -> Option { + match self { + ExecutionResult::V1(v1) => match v1 { + ExecutionResultV1::Failure { error_message, .. } => Some(error_message.clone()), + ExecutionResultV1::Success { .. } => None, + }, + ExecutionResult::V2(v2) => v2.error_message.clone(), + } + } + + /// Returns transfers, if any. + pub fn transfers(&self) -> Vec { + match self { + ExecutionResult::V1(_) => { + vec![] + } + ExecutionResult::V2(execution_result) => execution_result.transfers.clone(), + } + } +} + +impl From for ExecutionResult { + fn from(value: ExecutionResultV1) -> Self { + ExecutionResult::V1(value) + } +} + +impl From for ExecutionResult { + fn from(value: ExecutionResultV2) -> Self { + ExecutionResult::V2(Box::new(value)) + } +} + +impl ToBytes for ExecutionResult { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ExecutionResult::V1(result) => result.serialized_length(), + ExecutionResult::V2(result) => result.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutionResult::V1(result) => { + V1_TAG.write_bytes(writer)?; + result.write_bytes(writer) + } + ExecutionResult::V2(result) => { + V2_TAG.write_bytes(writer)?; + result.write_bytes(writer) + } + } + } +} + +impl FromBytes for ExecutionResult { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + if bytes.is_empty() { + error!("FromBytes for ExecutionResult: bytes length should not be 0"); + } + let (tag, remainder) = match u8::from_bytes(bytes) { + Ok((tag, rem)) => (tag, rem), + Err(err) => { + error!(%err, "FromBytes for ExecutionResult"); + return Err(err); + } + }; + match tag { + V1_TAG => { + let (result, remainder) = ExecutionResultV1::from_bytes(remainder)?; + Ok((ExecutionResult::V1(result), remainder)) + } + V2_TAG => { + let (result, remainder) = ExecutionResultV2::from_bytes(remainder)?; + Ok((ExecutionResult::V2(Box::new(result)), remainder)) + } + _ => { + error!(%tag, rem_len = remainder.len(), "FromBytes for ExecutionResult: unknown tag"); + Err(bytesrepr::Error::Formatting) + } + } + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let execution_result = ExecutionResult::V1(rng.gen()); + bytesrepr::test_serialization_roundtrip(&execution_result); + let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng)); + bytesrepr::test_serialization_roundtrip(&execution_result); + } + + #[test] + fn bincode_roundtrip() { + let rng = &mut TestRng::new(); + let execution_result = ExecutionResult::V1(rng.gen()); + let serialized = bincode::serialize(&execution_result).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + + let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng)); + let serialized = bincode::serialize(&execution_result).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + } + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + let execution_result = ExecutionResult::V1(rng.gen()); + let serialized = serde_json::to_string(&execution_result).unwrap(); + let deserialized = serde_json::from_str(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + + let execution_result = ExecutionResult::from(ExecutionResultV2::random(rng)); + let serialized = serde_json::to_string(&execution_result).unwrap(); + println!("{:#}", serialized); + let deserialized = serde_json::from_str(&serialized).unwrap(); + assert_eq!(execution_result, deserialized); + } +} diff --git a/types/src/execution/execution_result_v1.rs b/types/src/execution/execution_result_v1.rs new file mode 100644 index 0000000000..c7cd8cb62b --- /dev/null +++ b/types/src/execution/execution_result_v1.rs @@ -0,0 +1,808 @@ +//! Types for reporting results of execution pre `casper-node` v2.0.0. + +use core::convert::TryFrom; + +use alloc::{boxed::Box, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::{FromPrimitive, ToPrimitive}; +use num_derive::{FromPrimitive, ToPrimitive}; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + seq::SliceRandom, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::{Bid, BidKind, EraInfo, UnbondingPurse, WithdrawPurse}, + CLValue, DeployInfo, Key, TransferAddr, TransferV1, U128, U256, U512, +}; + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum ExecutionResultTag { + Failure = 0, + Success = 1, +} + +impl TryFrom for ExecutionResultTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum OpTag { + Read = 0, + Write = 1, + Add = 2, + NoOp = 3, + Prune = 4, +} + +impl TryFrom for OpTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +#[derive(FromPrimitive, ToPrimitive, Debug)] +#[repr(u8)] +enum TransformTag { + Identity = 0, + WriteCLValue = 1, + WriteAccount = 2, + WriteByteCode = 3, + WriteContract = 4, + WritePackage = 5, + WriteDeployInfo = 6, + WriteTransfer = 7, + WriteEraInfo = 8, + WriteBid = 9, + WriteWithdraw = 10, + AddInt32 = 11, + AddUInt64 = 12, + AddUInt128 = 13, + AddUInt256 = 14, + AddUInt512 = 15, + AddKeys = 16, + Failure = 17, + WriteUnbonding = 18, + WriteAddressableEntity = 19, + Prune = 20, + WriteBidKind = 21, +} + +impl TryFrom for TransformTag { + type Error = bytesrepr::Error; + + fn try_from(value: u8) -> Result { + FromPrimitive::from_u8(value).ok_or(bytesrepr::Error::Formatting) + } +} + +/// The result of executing a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutionResultV1 { + /// The result of a failed execution. + Failure { + /// The effect of executing the deploy. + effect: ExecutionEffect, + /// A record of version 1 Transfers performed while executing the deploy. + transfers: Vec, + /// The cost of executing the deploy. + cost: U512, + /// The error message associated with executing the deploy. + error_message: String, + }, + /// The result of a successful execution. + Success { + /// The effect of executing the deploy. + effect: ExecutionEffect, + /// A record of Transfers performed while executing the deploy. + transfers: Vec, + /// The cost of executing the deploy. + cost: U512, + }, +} + +impl ExecutionResultV1 { + /// Returns cost amount. + pub fn cost(&self) -> U512 { + match self { + ExecutionResultV1::Failure { cost, .. } | ExecutionResultV1::Success { cost, .. } => { + *cost + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExecutionResultV1 { + let op_count = rng.gen_range(0..6); + let mut operations = Vec::new(); + for _ in 0..op_count { + let op = [OpKind::Read, OpKind::Add, OpKind::NoOp, OpKind::Write] + .choose(rng) + .unwrap(); + operations.push(Operation { + key: rng.gen::().to_string(), + kind: *op, + }); + } + + let transform_count = rng.gen_range(0..6); + let mut transforms = Vec::new(); + for _ in 0..transform_count { + transforms.push(TransformV1 { + key: rng.gen::().to_string(), + transform: rng.gen(), + }); + } + + let execution_effect = ExecutionEffect { + operations, + transforms, + }; + + let transfer_count = rng.gen_range(0..6); + let mut transfers = Vec::new(); + for _ in 0..transfer_count { + transfers.push(TransferAddr::new(rng.gen())) + } + + if rng.gen() { + ExecutionResultV1::Failure { + effect: execution_effect, + transfers, + cost: rng.gen::().into(), + error_message: format!("Error message {}", rng.gen::()), + } + } else { + ExecutionResultV1::Success { + effect: execution_effect, + transfers, + cost: rng.gen::().into(), + } + } + } +} + +impl ToBytes for ExecutionResultV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutionResultV1::Failure { + effect, + transfers, + cost, + error_message, + } => { + (ExecutionResultTag::Failure as u8).write_bytes(writer)?; + effect.write_bytes(writer)?; + transfers.write_bytes(writer)?; + cost.write_bytes(writer)?; + error_message.write_bytes(writer) + } + ExecutionResultV1::Success { + effect, + transfers, + cost, + } => { + (ExecutionResultTag::Success as u8).write_bytes(writer)?; + effect.write_bytes(writer)?; + transfers.write_bytes(writer)?; + cost.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + ExecutionResultV1::Failure { + effect, + transfers, + cost, + error_message, + } => { + effect.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + + error_message.serialized_length() + } + ExecutionResultV1::Success { + effect, + transfers, + cost, + } => { + effect.serialized_length() + + transfers.serialized_length() + + cost.serialized_length() + } + } + } +} + +impl FromBytes for ExecutionResultV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + ExecutionResultTag::Failure => { + let (effect, remainder) = ExecutionEffect::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let (error_message, remainder) = String::from_bytes(remainder)?; + let execution_result = ExecutionResultV1::Failure { + effect, + transfers, + cost, + error_message, + }; + Ok((execution_result, remainder)) + } + ExecutionResultTag::Success => { + let (execution_effect, remainder) = ExecutionEffect::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let execution_result = ExecutionResultV1::Success { + effect: execution_effect, + transfers, + cost, + }; + Ok((execution_result, remainder)) + } + } + } +} + +/// The sequence of execution transforms from a single deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ExecutionEffect { + /// The resulting operations. + pub operations: Vec, + /// The sequence of execution transforms. + pub transforms: Vec, +} + +impl ToBytes for ExecutionEffect { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.operations.write_bytes(writer)?; + self.transforms.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.operations.serialized_length() + self.transforms.serialized_length() + } +} + +impl FromBytes for ExecutionEffect { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (operations, remainder) = Vec::::from_bytes(bytes)?; + let (transforms, remainder) = Vec::::from_bytes(remainder)?; + let json_effects = ExecutionEffect { + operations, + transforms, + }; + Ok((json_effects, remainder)) + } +} + +/// An operation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Operation { + /// The formatted string of the `Key`. + pub key: String, + /// The type of operation. + pub kind: OpKind, +} + +impl ToBytes for Operation { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + self.kind.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.kind.serialized_length() + } +} + +impl FromBytes for Operation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = String::from_bytes(bytes)?; + let (kind, remainder) = OpKind::from_bytes(remainder)?; + let operation = Operation { key, kind }; + Ok((operation, remainder)) + } +} + +/// The type of operation performed while executing a deploy. +#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum OpKind { + /// A read operation. + Read, + /// A write operation. + Write, + /// An addition. + Add, + /// An operation which has no effect. + NoOp, + /// A prune operation. + Prune, +} + +impl OpKind { + fn tag(&self) -> OpTag { + match self { + OpKind::Read => OpTag::Read, + OpKind::Write => OpTag::Write, + OpKind::Add => OpTag::Add, + OpKind::NoOp => OpTag::NoOp, + OpKind::Prune => OpTag::Prune, + } + } +} + +impl ToBytes for OpKind { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + let tag_byte = self.tag().to_u8().ok_or(bytesrepr::Error::Formatting)?; + tag_byte.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for OpKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + OpTag::Read => Ok((OpKind::Read, remainder)), + OpTag::Write => Ok((OpKind::Write, remainder)), + OpTag::Add => Ok((OpKind::Add, remainder)), + OpTag::NoOp => Ok((OpKind::NoOp, remainder)), + OpTag::Prune => Ok((OpKind::Prune, remainder)), + } + } +} + +/// A transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransformV1 { + /// The formatted string of the `Key`. + pub key: String, + /// The transformation. + pub transform: TransformKindV1, +} + +impl ToBytes for TransformV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + self.transform.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.transform.serialized_length() + } +} + +impl FromBytes for TransformV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = String::from_bytes(bytes)?; + let (transform, remainder) = TransformKindV1::from_bytes(remainder)?; + let transform_entry = TransformV1 { key, transform }; + Ok((transform_entry, remainder)) + } +} + +/// The actual transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum TransformKindV1 { + /// A transform having no effect. + Identity, + /// Writes the given CLValue to global state. + WriteCLValue(CLValue), + /// Writes the given Account to global state. + WriteAccount(AccountHash), + /// Writes a smart contract as Wasm to global state. + WriteContractWasm, + /// Writes a smart contract to global state. + WriteContract, + /// Writes a smart contract package to global state. + WriteContractPackage, + /// Writes the given DeployInfo to global state. + WriteDeployInfo(DeployInfo), + /// Writes the given EraInfo to global state. + WriteEraInfo(EraInfo), + /// Writes the given version 1 Transfer to global state. + WriteTransfer(TransferV1), + /// Writes the given Bid to global state. + WriteBid(Box), + /// Writes the given Withdraw to global state. + WriteWithdraw(Vec), + /// Adds the given `i32`. + AddInt32(i32), + /// Adds the given `u64`. + AddUInt64(u64), + /// Adds the given `U128`. + AddUInt128(U128), + /// Adds the given `U256`. + AddUInt256(U256), + /// Adds the given `U512`. + AddUInt512(U512), + /// Adds the given collection of named keys. + AddKeys(Vec), + /// A failed transformation, containing an error message. + Failure(String), + /// Writes the given Unbonding to global state. + WriteUnbonding(Vec), + /// Writes the addressable entity to global state. + WriteAddressableEntity, + /// Removes pathing to keyed value within global state. This is a form of soft delete; the + /// underlying value remains in global state and is reachable from older global state root + /// hashes where it was included in the hash up. + Prune(Key), + /// Writes the given BidKind to global state. + WriteBidKind(BidKind), +} + +impl ToBytes for TransformKindV1 { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransformKindV1::Identity => (TransformTag::Identity as u8).write_bytes(writer), + TransformKindV1::WriteCLValue(value) => { + (TransformTag::WriteCLValue as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::WriteAccount(account_hash) => { + (TransformTag::WriteAccount as u8).write_bytes(writer)?; + account_hash.write_bytes(writer) + } + TransformKindV1::WriteContractWasm => { + (TransformTag::WriteByteCode as u8).write_bytes(writer) + } + TransformKindV1::WriteContract => { + (TransformTag::WriteContract as u8).write_bytes(writer) + } + TransformKindV1::WriteContractPackage => { + (TransformTag::WritePackage as u8).write_bytes(writer) + } + TransformKindV1::WriteDeployInfo(deploy_info) => { + (TransformTag::WriteDeployInfo as u8).write_bytes(writer)?; + deploy_info.write_bytes(writer) + } + TransformKindV1::WriteEraInfo(era_info) => { + (TransformTag::WriteEraInfo as u8).write_bytes(writer)?; + era_info.write_bytes(writer) + } + TransformKindV1::WriteTransfer(transfer) => { + (TransformTag::WriteTransfer as u8).write_bytes(writer)?; + transfer.write_bytes(writer) + } + TransformKindV1::WriteBid(bid) => { + (TransformTag::WriteBid as u8).write_bytes(writer)?; + bid.write_bytes(writer) + } + TransformKindV1::WriteWithdraw(unbonding_purses) => { + (TransformTag::WriteWithdraw as u8).write_bytes(writer)?; + unbonding_purses.write_bytes(writer) + } + TransformKindV1::AddInt32(value) => { + (TransformTag::AddInt32 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::AddUInt64(value) => { + (TransformTag::AddUInt64 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::AddUInt128(value) => { + (TransformTag::AddUInt128 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::AddUInt256(value) => { + (TransformTag::AddUInt256 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::AddUInt512(value) => { + (TransformTag::AddUInt512 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::AddKeys(value) => { + (TransformTag::AddKeys as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::Failure(value) => { + (TransformTag::Failure as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::WriteUnbonding(value) => { + (TransformTag::WriteUnbonding as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::WriteAddressableEntity => { + (TransformTag::WriteAddressableEntity as u8).write_bytes(writer) + } + TransformKindV1::Prune(value) => { + (TransformTag::Prune as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV1::WriteBidKind(value) => { + (TransformTag::WriteBidKind as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + let body_len = match self { + TransformKindV1::Prune(key) => key.serialized_length(), + TransformKindV1::WriteCLValue(value) => value.serialized_length(), + TransformKindV1::WriteAccount(value) => value.serialized_length(), + TransformKindV1::WriteDeployInfo(value) => value.serialized_length(), + TransformKindV1::WriteEraInfo(value) => value.serialized_length(), + TransformKindV1::WriteTransfer(value) => value.serialized_length(), + TransformKindV1::AddInt32(value) => value.serialized_length(), + TransformKindV1::AddUInt64(value) => value.serialized_length(), + TransformKindV1::AddUInt128(value) => value.serialized_length(), + TransformKindV1::AddUInt256(value) => value.serialized_length(), + TransformKindV1::AddUInt512(value) => value.serialized_length(), + TransformKindV1::AddKeys(value) => value.serialized_length(), + TransformKindV1::Failure(value) => value.serialized_length(), + TransformKindV1::Identity + | TransformKindV1::WriteContractWasm + | TransformKindV1::WriteContract + | TransformKindV1::WriteContractPackage + | TransformKindV1::WriteAddressableEntity => 0, + TransformKindV1::WriteBid(value) => value.serialized_length(), + TransformKindV1::WriteBidKind(value) => value.serialized_length(), + TransformKindV1::WriteWithdraw(value) => value.serialized_length(), + TransformKindV1::WriteUnbonding(value) => value.serialized_length(), + }; + U8_SERIALIZED_LENGTH + body_len + } +} + +impl FromBytes for TransformKindV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match TryFrom::try_from(tag)? { + TransformTag::Identity => Ok((TransformKindV1::Identity, remainder)), + TransformTag::WriteCLValue => { + let (cl_value, remainder) = CLValue::from_bytes(remainder)?; + Ok((TransformKindV1::WriteCLValue(cl_value), remainder)) + } + TransformTag::WriteAccount => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((TransformKindV1::WriteAccount(account_hash), remainder)) + } + TransformTag::WriteByteCode => Ok((TransformKindV1::WriteContractWasm, remainder)), + TransformTag::WriteContract => Ok((TransformKindV1::WriteContract, remainder)), + TransformTag::WritePackage => Ok((TransformKindV1::WriteContractPackage, remainder)), + TransformTag::WriteDeployInfo => { + let (deploy_info, remainder) = DeployInfo::from_bytes(remainder)?; + Ok((TransformKindV1::WriteDeployInfo(deploy_info), remainder)) + } + TransformTag::WriteEraInfo => { + let (era_info, remainder) = EraInfo::from_bytes(remainder)?; + Ok((TransformKindV1::WriteEraInfo(era_info), remainder)) + } + TransformTag::WriteTransfer => { + let (transfer, remainder) = TransferV1::from_bytes(remainder)?; + Ok((TransformKindV1::WriteTransfer(transfer), remainder)) + } + TransformTag::AddInt32 => { + let (value_i32, remainder) = i32::from_bytes(remainder)?; + Ok((TransformKindV1::AddInt32(value_i32), remainder)) + } + TransformTag::AddUInt64 => { + let (value_u64, remainder) = u64::from_bytes(remainder)?; + Ok((TransformKindV1::AddUInt64(value_u64), remainder)) + } + TransformTag::AddUInt128 => { + let (value_u128, remainder) = U128::from_bytes(remainder)?; + Ok((TransformKindV1::AddUInt128(value_u128), remainder)) + } + TransformTag::AddUInt256 => { + let (value_u256, remainder) = U256::from_bytes(remainder)?; + Ok((TransformKindV1::AddUInt256(value_u256), remainder)) + } + TransformTag::AddUInt512 => { + let (value_u512, remainder) = U512::from_bytes(remainder)?; + Ok((TransformKindV1::AddUInt512(value_u512), remainder)) + } + TransformTag::AddKeys => { + let (value, remainder) = Vec::::from_bytes(remainder)?; + Ok((TransformKindV1::AddKeys(value), remainder)) + } + TransformTag::Failure => { + let (value, remainder) = String::from_bytes(remainder)?; + Ok((TransformKindV1::Failure(value), remainder)) + } + TransformTag::WriteBid => { + let (bid, remainder) = Bid::from_bytes(remainder)?; + Ok((TransformKindV1::WriteBid(Box::new(bid)), remainder)) + } + TransformTag::WriteWithdraw => { + let (withdraw_purses, remainder) = + as FromBytes>::from_bytes(remainder)?; + Ok((TransformKindV1::WriteWithdraw(withdraw_purses), remainder)) + } + TransformTag::WriteUnbonding => { + let (unbonding_purses, remainder) = + as FromBytes>::from_bytes(remainder)?; + Ok((TransformKindV1::WriteUnbonding(unbonding_purses), remainder)) + } + TransformTag::WriteAddressableEntity => { + Ok((TransformKindV1::WriteAddressableEntity, remainder)) + } + TransformTag::Prune => { + let (key, remainder) = Key::from_bytes(remainder)?; + Ok((TransformKindV1::Prune(key), remainder)) + } + TransformTag::WriteBidKind => { + let (value, remainder) = BidKind::from_bytes(remainder)?; + Ok((TransformKindV1::WriteBidKind(value), remainder)) + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> TransformKindV1 { + // TODO - cover all options + match rng.gen_range(0..13) { + 0 => TransformKindV1::Identity, + 1 => TransformKindV1::WriteCLValue(CLValue::from_t(true).unwrap()), + 2 => TransformKindV1::WriteAccount(AccountHash::new(rng.gen())), + 3 => TransformKindV1::WriteContractWasm, + 4 => TransformKindV1::WriteContract, + 5 => TransformKindV1::WriteContractPackage, + 6 => TransformKindV1::AddInt32(rng.gen()), + 7 => TransformKindV1::AddUInt64(rng.gen()), + 8 => TransformKindV1::AddUInt128(rng.gen::().into()), + 9 => TransformKindV1::AddUInt256(rng.gen::().into()), + 10 => TransformKindV1::AddUInt512(rng.gen::().into()), + 11 => { + let mut named_keys = Vec::new(); + for _ in 0..rng.gen_range(1..6) { + named_keys.push(NamedKey { + name: rng.gen::().to_string(), + key: rng.gen::().to_string(), + }); + } + TransformKindV1::AddKeys(named_keys) + } + 12 => TransformKindV1::Failure(rng.gen::().to_string()), + 13 => TransformKindV1::WriteAddressableEntity, + _ => unreachable!(), + } + } +} + +/// A key with a name. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct NamedKey { + /// The name of the entry. + pub name: String, + /// The value of the entry: a casper `Key` type. + #[cfg_attr(feature = "json-schema", schemars(with = "Key"))] + pub key: String, +} + +impl ToBytes for NamedKey { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.name.write_bytes(writer)?; + self.key.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.name.serialized_length() + self.key.serialized_length() + } +} + +impl FromBytes for NamedKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (key, remainder) = String::from_bytes(remainder)?; + let named_key = NamedKey { name, key }; + Ok((named_key, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_test_transform() { + let mut rng = TestRng::new(); + let transform: TransformKindV1 = rng.gen(); + bytesrepr::test_serialization_roundtrip(&transform); + } + + #[test] + fn bytesrepr_test_execution_result() { + let mut rng = TestRng::new(); + let execution_result: ExecutionResultV1 = rng.gen(); + bytesrepr::test_serialization_roundtrip(&execution_result); + } +} diff --git a/types/src/execution/execution_result_v2.rs b/types/src/execution/execution_result_v2.rs new file mode 100644 index 0000000000..39d3bcd153 --- /dev/null +++ b/types/src/execution/execution_result_v2.rs @@ -0,0 +1,262 @@ +//! This file provides types to allow conversion from an EE `ExecutionResult` into a similar type +//! which can be serialized to a valid binary or JSON representation. +//! +//! It is stored as metadata related to a given transaction, and made available to clients via the +//! JSON-RPC API. + +#[cfg(any(feature = "testing", test))] +use alloc::format; +use alloc::{string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::Effects; +#[cfg(feature = "json-schema")] +use super::{TransformKindV2, TransformV2}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "json-schema")] +use crate::Key; +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + Gas, InitiatorAddr, Transfer, U512, +}; + +#[cfg(feature = "json-schema")] +static EXECUTION_RESULT: Lazy = Lazy::new(|| { + let key1 = Key::from_formatted_str( + "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb", + ) + .unwrap(); + let key2 = Key::from_formatted_str( + "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1", + ) + .unwrap(); + let mut effects = Effects::new(); + effects.push(TransformV2::new(key1, TransformKindV2::AddUInt64(8u64))); + effects.push(TransformV2::new(key2, TransformKindV2::Identity)); + + let transfers = vec![Transfer::example().clone()]; + + // NOTE: these are arbitrary values for schema and type demonstration, + // they are not properly derived actual values. Depending on current chainspec + // settings on a given chain, we may or may not be issuing a refund and if we are + // the percentage can vary. And the cost is affected by dynamic gas pricing + // for a given era, within an inclusive range defined in the chainspec. + // Thus, real values cannot be calculated in a vacuum. + const LIMIT: u64 = 123_456; + const CONSUMED: u64 = 100_000; + const COST: u64 = 246_912; + + const PRICE: u8 = 2; + + let refund = COST.saturating_sub(CONSUMED); + + ExecutionResultV2 { + initiator: InitiatorAddr::from(crate::PublicKey::example().clone()), + error_message: None, + current_price: PRICE, + limit: Gas::new(LIMIT), + consumed: Gas::new(CONSUMED), + cost: U512::from(COST), + refund: U512::from(refund), + size_estimate: Transfer::example().serialized_length() as u64, + transfers, + effects, + } +}); + +/// The result of executing a single transaction. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ExecutionResultV2 { + /// Who initiated this transaction. + pub initiator: InitiatorAddr, + /// If there is no error message, this execution was processed successfully. + /// If there is an error message, this execution failed to fully process for the stated reason. + pub error_message: Option, + /// The current gas price. I.e. how many motes are charged for each unit of computation. + pub current_price: u8, + /// The maximum allowed gas limit for this transaction + pub limit: Gas, + /// How much gas was consumed executing this transaction. + pub consumed: Gas, + /// How much was paid for this transaction. + pub cost: U512, + /// How much unconsumed gas was refunded (if any)? + pub refund: U512, + /// A record of transfers performed while executing this transaction. + pub transfers: Vec, + /// The size estimate of the transaction + pub size_estimate: u64, + /// The effects of executing this transaction. + pub effects: Effects, +} + +impl ExecutionResultV2 { + /// The refunded amount, if any. + pub fn refund(&self) -> U512 { + self.refund + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &EXECUTION_RESULT + } + + /// Returns a random `ExecutionResultV2`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let effects = Effects::random(rng); + + let transfer_count = rng.gen_range(0..6); + let mut transfers = vec![]; + for _ in 0..transfer_count { + transfers.push(Transfer::random(rng)) + } + + let limit = Gas::new(rng.gen::()); + let gas_price = rng.gen_range(1..6); + // cost = the limit * the price + let cost = limit.value() * U512::from(gas_price); + let range = limit.value().as_u64(); + + // can range from 0 to limit + let consumed = limit + .checked_sub(Gas::new(rng.gen_range(0..=range))) + .expect("consumed"); + + // this assumes 100% refund ratio + let refund = cost.saturating_sub(consumed.value()); + + let size_estimate = rng.gen(); + + ExecutionResultV2 { + initiator: InitiatorAddr::random(rng), + effects, + transfers, + current_price: gas_price, + cost, + limit, + consumed, + refund, + size_estimate, + error_message: if rng.gen() { + Some(format!("Error message {}", rng.gen::())) + } else { + None + }, + } + } +} + +impl ToBytes for ExecutionResultV2 { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.initiator.serialized_length() + + self.error_message.serialized_length() + + self.limit.serialized_length() + + self.consumed.serialized_length() + + self.cost.serialized_length() + + self.transfers.serialized_length() + + self.size_estimate.serialized_length() + + self.effects.serialized_length() + + self.refund.serialized_length() + + self.current_price.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.initiator.write_bytes(writer)?; // initiator should logically be first + self.error_message.write_bytes(writer)?; + self.limit.write_bytes(writer)?; + self.consumed.write_bytes(writer)?; + self.cost.write_bytes(writer)?; + self.transfers.write_bytes(writer)?; + self.size_estimate.write_bytes(writer)?; + self.effects.write_bytes(writer)?; + self.refund.write_bytes(writer)?; + self.current_price.write_bytes(writer) + } +} + +impl FromBytes for ExecutionResultV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (initiator, remainder) = InitiatorAddr::from_bytes(bytes)?; + let (error_message, remainder) = Option::::from_bytes(remainder)?; + let (limit, remainder) = Gas::from_bytes(remainder)?; + let (consumed, remainder) = Gas::from_bytes(remainder)?; + let (cost, remainder) = U512::from_bytes(remainder)?; + let (transfers, remainder) = Vec::::from_bytes(remainder)?; + let (size_estimate, remainder) = FromBytes::from_bytes(remainder)?; + let (effects, remainder) = Effects::from_bytes(remainder)?; + // refund && current_price were added after 2.0 was upgraded into on + // DevNet and IntegrationNet, thus the bytes repr must be appended and optional + let (refund, remainder) = match U512::from_bytes(remainder) { + Ok((ret, rem)) => (ret, rem), + Err(_) => { + let rem: &[u8] = &[]; + (U512::zero(), rem) + } + }; + let (current_price, remainder) = match u8::from_bytes(remainder) { + Ok((ret, rem)) => (ret, rem), + Err(_) => { + let ret = { + let div = cost.checked_div(limit.value()).unwrap_or_default(); + if div > U512::from(u8::MAX) { + u8::MAX + } else { + div.as_u32() as u8 + } + }; + + let rem: &[u8] = &[]; + (ret, rem) + } + }; + let execution_result = ExecutionResultV2 { + initiator, + error_message, + current_price, + limit, + consumed, + cost, + refund, + transfers, + size_estimate, + effects, + }; + Ok((execution_result, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + let execution_result = ExecutionResultV2::random(rng); + bytesrepr::test_serialization_roundtrip(&execution_result); + } + } +} diff --git a/types/src/execution/transform.rs b/types/src/execution/transform.rs new file mode 100644 index 0000000000..aab03c7a12 --- /dev/null +++ b/types/src/execution/transform.rs @@ -0,0 +1,92 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use tracing::error; + +use super::TransformKindV2; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Key, +}; + +/// A transformation performed while executing a deploy. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransformV2 { + key: Key, + kind: TransformKindV2, +} + +impl TransformV2 { + /// Constructs a new `Transform`. + pub fn new(key: Key, kind: TransformKindV2) -> Self { + TransformV2 { key, kind } + } + + /// Returns the key whose value was transformed. + pub fn key(&self) -> &Key { + &self.key + } + + /// Returns the transformation kind. + pub fn kind(&self) -> &TransformKindV2 { + &self.kind + } + + /// Consumes `self`, returning its constituent parts. + pub fn destructure(self) -> (Key, TransformKindV2) { + (self.key, self.kind) + } +} + +impl ToBytes for TransformV2 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + self.kind.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.key.write_bytes(writer)?; + if let Err(err) = self.kind.write_bytes(writer) { + error!(%err, "ToBytes for TransformV2"); + Err(err) + } else { + Ok(()) + } + } +} + +impl FromBytes for TransformV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, remainder) = match Key::from_bytes(bytes) { + Ok((k, rem)) => (k, rem), + Err(err) => { + error!(%err, "FromBytes for TransformV2: key"); + return Err(err); + } + }; + let (transform, remainder) = match TransformKindV2::from_bytes(remainder) { + Ok((tk, rem)) => (tk, rem), + Err(err) => { + error!(%err, "FromBytes for TransformV2: transform"); + return Err(err); + } + }; + let transform_entry = TransformV2 { + key, + kind: transform, + }; + Ok((transform_entry, remainder)) + } +} diff --git a/types/src/execution/transform_error.rs b/types/src/execution/transform_error.rs new file mode 100644 index 0000000000..7936b8fac5 --- /dev/null +++ b/types/src/execution/transform_error.rs @@ -0,0 +1,136 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLValueError, StoredValueTypeMismatch, +}; + +/// Error type for applying and combining transforms. +/// +/// A `TypeMismatch` occurs when a transform cannot be applied because the types are not compatible +/// (e.g. trying to add a number to a string). +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[non_exhaustive] +pub enum TransformError { + /// Error while (de)serializing data. + Serialization(bytesrepr::Error), + /// Type mismatch error. + TypeMismatch(StoredValueTypeMismatch), + /// Type no longer supported. + Deprecated, +} + +impl Display for TransformError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransformError::Serialization(error) => { + write!(formatter, "{}", error) + } + TransformError::TypeMismatch(error) => { + write!(formatter, "{}", error) + } + TransformError::Deprecated => { + write!(formatter, "type no longer supported") + } + } + } +} + +impl From for TransformError { + fn from(error: StoredValueTypeMismatch) -> Self { + TransformError::TypeMismatch(error) + } +} + +impl From for TransformError { + fn from(cl_value_error: CLValueError) -> TransformError { + match cl_value_error { + CLValueError::Serialization(error) => TransformError::Serialization(error), + CLValueError::Type(cl_type_mismatch) => { + let expected = format!("{:?}", cl_type_mismatch.expected); + let found = format!("{:?}", cl_type_mismatch.found); + let type_mismatch = StoredValueTypeMismatch::new(expected, found); + TransformError::TypeMismatch(type_mismatch) + } + } + } +} + +impl ToBytes for TransformError { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransformError::Serialization(error) => { + (TransformErrorTag::Serialization as u8).write_bytes(writer)?; + error.write_bytes(writer) + } + TransformError::TypeMismatch(error) => { + (TransformErrorTag::TypeMismatch as u8).write_bytes(writer)?; + error.write_bytes(writer) + } + TransformError::Deprecated => (TransformErrorTag::Deprecated as u8).write_bytes(writer), + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransformError::Serialization(error) => error.serialized_length(), + TransformError::TypeMismatch(error) => error.serialized_length(), + TransformError::Deprecated => 0, + } + } +} + +impl FromBytes for TransformError { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == TransformErrorTag::Serialization as u8 => { + let (error, remainder) = bytesrepr::Error::from_bytes(remainder)?; + Ok((TransformError::Serialization(error), remainder)) + } + tag if tag == TransformErrorTag::TypeMismatch as u8 => { + let (error, remainder) = StoredValueTypeMismatch::from_bytes(remainder)?; + Ok((TransformError::TypeMismatch(error), remainder)) + } + tag if tag == TransformErrorTag::Deprecated as u8 => { + Ok((TransformError::Deprecated, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(feature = "std")] +impl StdError for TransformError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + TransformError::Serialization(error) => Some(error), + TransformError::TypeMismatch(_) | TransformError::Deprecated => None, + } + } +} + +#[repr(u8)] +enum TransformErrorTag { + Serialization = 0, + TypeMismatch = 1, + Deprecated = 2, +} diff --git a/types/src/execution/transform_kind.rs b/types/src/execution/transform_kind.rs new file mode 100644 index 0000000000..461739d4c7 --- /dev/null +++ b/types/src/execution/transform_kind.rs @@ -0,0 +1,888 @@ +use alloc::{string::ToString, vec::Vec}; +use core::{any, convert::TryFrom}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use num::traits::{AsPrimitive, WrappingAdd}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use tracing::error; + +use super::TransformError; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::NamedKeys, + CLType, CLTyped, CLValue, Key, StoredValue, StoredValueTypeMismatch, U128, U256, U512, +}; + +/// Taxonomy of Transform. +#[derive(PartialEq, Eq, Debug, Clone)] +#[allow(clippy::large_enum_variant)] +pub enum TransformInstruction { + /// Store a StoredValue. + Store(StoredValue), + /// Prune a StoredValue by Key. + Prune(Key), +} + +impl TransformInstruction { + /// Store instruction. + pub fn store(stored_value: StoredValue) -> Self { + Self::Store(stored_value) + } + + /// Prune instruction. + pub fn prune(key: Key) -> Self { + Self::Prune(key) + } +} + +impl From for TransformInstruction { + fn from(value: StoredValue) -> Self { + TransformInstruction::Store(value) + } +} + +/// Representation of a single transformation occurring during execution. +/// +/// Note that all arithmetic variants of `TransformKindV2` are commutative which means that a given +/// collection of them can be executed in any order to produce the same end result. +#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +#[allow(clippy::large_enum_variant)] +pub enum TransformKindV2 { + /// An identity transformation that does not modify a value in the global state. + /// + /// Created as a result of reading from the global state. + #[default] + Identity, + /// Writes a new value in the global state. + Write(StoredValue), + /// A wrapping addition of an `i32` to an existing numeric value (not necessarily an `i32`) in + /// the global state. + AddInt32(i32), + /// A wrapping addition of a `u64` to an existing numeric value (not necessarily an `u64`) in + /// the global state. + AddUInt64(u64), + /// A wrapping addition of a `U128` to an existing numeric value (not necessarily an `U128`) in + /// the global state. + AddUInt128(U128), + /// A wrapping addition of a `U256` to an existing numeric value (not necessarily an `U256`) in + /// the global state. + AddUInt256(U256), + /// A wrapping addition of a `U512` to an existing numeric value (not necessarily an `U512`) in + /// the global state. + AddUInt512(U512), + /// Adds new named keys to an existing entry in the global state. + /// + /// This transform assumes that the existing stored value is either an Account or a Contract. + AddKeys(NamedKeys), + /// Removes the pathing to the global state entry of the specified key. The pruned element + /// remains reachable from previously generated global state root hashes, but will not be + /// included in the next generated global state root hash and subsequent state accumulated + /// from it. + Prune(Key), + /// Represents the case where applying a transform would cause an error. + Failure(TransformError), +} + +impl TransformKindV2 { + /// Applies the transformation on a specified stored value instance. + /// + /// This method produces a new `StoredValue` instance based on the `TransformKind` variant. + pub fn apply(self, stored_value: StoredValue) -> Result { + fn store(sv: StoredValue) -> TransformInstruction { + TransformInstruction::Store(sv) + } + match self { + TransformKindV2::Identity => Ok(store(stored_value)), + TransformKindV2::Write(new_value) => Ok(store(new_value)), + TransformKindV2::Prune(key) => Ok(TransformInstruction::prune(key)), + TransformKindV2::AddInt32(to_add) => wrapping_addition(stored_value, to_add), + TransformKindV2::AddUInt64(to_add) => wrapping_addition(stored_value, to_add), + TransformKindV2::AddUInt128(to_add) => wrapping_addition(stored_value, to_add), + TransformKindV2::AddUInt256(to_add) => wrapping_addition(stored_value, to_add), + TransformKindV2::AddUInt512(to_add) => wrapping_addition(stored_value, to_add), + TransformKindV2::AddKeys(keys) => match stored_value { + StoredValue::Contract(mut contract) => { + contract.named_keys_append(keys); + Ok(store(StoredValue::Contract(contract))) + } + StoredValue::Account(mut account) => { + account.named_keys_append(keys); + Ok(store(StoredValue::Account(account))) + } + StoredValue::AddressableEntity(_) => Err(TransformError::Deprecated), + StoredValue::CLValue(cl_value) => { + let expected = "Contract or Account".to_string(); + let found = format!("{:?}", cl_value.cl_type()); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::SmartContract(_) => { + let expected = "Contract or Account".to_string(); + let found = "ContractPackage".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::ByteCode(_) => { + let expected = "Contract or Account".to_string(); + let found = "ByteCode".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Transfer(_) => { + let expected = "Contract or Account".to_string(); + let found = "Transfer".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::DeployInfo(_) => { + let expected = "Contract or Account".to_string(); + let found = "DeployInfo".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::EraInfo(_) => { + let expected = "Contract or Account".to_string(); + let found = "EraInfo".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Bid(_) => { + let expected = "Contract or Account".to_string(); + let found = "Bid".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::BidKind(_) => { + let expected = "Contract or Account".to_string(); + let found = "BidKind".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Withdraw(_) => { + let expected = "Contract or Account".to_string(); + let found = "Withdraw".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Unbonding(_) => { + let expected = "Contract or Account".to_string(); + let found = "Unbonding".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::ContractWasm(_) => { + let expected = "Contract or Account".to_string(); + let found = "ContractWasm".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::ContractPackage(_) => { + let expected = "Contract or Account".to_string(); + let found = "ContractPackage".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::NamedKey(_) => { + let expected = "Contract or Account".to_string(); + let found = "NamedKeyValue".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::MessageTopic(_) => { + let expected = "Contract or Account".to_string(); + let found = "MessageTopic".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Message(_) => { + let expected = "Contract or Account".to_string(); + let found = "Message".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::RawBytes(_) => { + let expected = "Contract or Account".to_string(); + let found = "RawBytes".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::Prepayment(_) => { + let expected = "Contract or Account".to_string(); + let found = "Prepayment".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + StoredValue::EntryPoint(_) => { + let expected = "Contract or Account".to_string(); + let found = "EntryPoint".to_string(); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + }, + TransformKindV2::Failure(error) => Err(error), + } + } + + /// Returns a random `TransformKind`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut R) -> Self { + match rng.gen_range(0..10) { + 0 => TransformKindV2::Identity, + 1 => TransformKindV2::Write(StoredValue::CLValue(CLValue::from_t(true).unwrap())), + 2 => TransformKindV2::AddInt32(rng.gen()), + 3 => TransformKindV2::AddUInt64(rng.gen()), + 4 => TransformKindV2::AddUInt128(rng.gen::().into()), + 5 => TransformKindV2::AddUInt256(rng.gen::().into()), + 6 => TransformKindV2::AddUInt512(rng.gen::().into()), + 7 => { + let mut named_keys = NamedKeys::new(); + for _ in 0..rng.gen_range(1..6) { + named_keys.insert(rng.gen::().to_string(), rng.gen()); + } + TransformKindV2::AddKeys(named_keys) + } + 8 => TransformKindV2::Failure(TransformError::Serialization( + bytesrepr::Error::EarlyEndOfStream, + )), + 9 => TransformKindV2::Prune(rng.gen::()), + _ => unreachable!(), + } + } +} + +impl ToBytes for TransformKindV2 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransformKindV2::Identity => 0, + TransformKindV2::Write(stored_value) => stored_value.serialized_length(), + TransformKindV2::AddInt32(value) => value.serialized_length(), + TransformKindV2::AddUInt64(value) => value.serialized_length(), + TransformKindV2::AddUInt128(value) => value.serialized_length(), + TransformKindV2::AddUInt256(value) => value.serialized_length(), + TransformKindV2::AddUInt512(value) => value.serialized_length(), + TransformKindV2::AddKeys(named_keys) => named_keys.serialized_length(), + TransformKindV2::Failure(error) => error.serialized_length(), + TransformKindV2::Prune(value) => value.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransformKindV2::Identity => (TransformTag::Identity as u8).write_bytes(writer), + TransformKindV2::Write(stored_value) => { + (TransformTag::Write as u8).write_bytes(writer)?; + stored_value.write_bytes(writer) + } + TransformKindV2::AddInt32(value) => { + (TransformTag::AddInt32 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV2::AddUInt64(value) => { + (TransformTag::AddUInt64 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV2::AddUInt128(value) => { + (TransformTag::AddUInt128 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV2::AddUInt256(value) => { + (TransformTag::AddUInt256 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV2::AddUInt512(value) => { + (TransformTag::AddUInt512 as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + TransformKindV2::AddKeys(named_keys) => { + (TransformTag::AddKeys as u8).write_bytes(writer)?; + named_keys.write_bytes(writer) + } + TransformKindV2::Failure(error) => { + (TransformTag::Failure as u8).write_bytes(writer)?; + error.write_bytes(writer) + } + TransformKindV2::Prune(value) => { + (TransformTag::Prune as u8).write_bytes(writer)?; + value.write_bytes(writer) + } + } + } +} + +impl FromBytes for TransformKindV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + if bytes.is_empty() { + error!("FromBytes for TransformKindV2: bytes length should not be 0"); + } + let (tag, remainder) = match u8::from_bytes(bytes) { + Ok((tag, rem)) => (tag, rem), + Err(err) => { + error!(%err, "FromBytes for TransformKindV2"); + return Err(err); + } + }; + match tag { + tag if tag == TransformTag::Identity as u8 => { + Ok((TransformKindV2::Identity, remainder)) + } + tag if tag == TransformTag::Write as u8 => { + let (stored_value, remainder) = StoredValue::from_bytes(remainder)?; + Ok((TransformKindV2::Write(stored_value), remainder)) + } + tag if tag == TransformTag::AddInt32 as u8 => { + let (value, remainder) = i32::from_bytes(remainder)?; + Ok((TransformKindV2::AddInt32(value), remainder)) + } + tag if tag == TransformTag::AddUInt64 as u8 => { + let (value, remainder) = u64::from_bytes(remainder)?; + Ok((TransformKindV2::AddUInt64(value), remainder)) + } + tag if tag == TransformTag::AddUInt128 as u8 => { + let (value, remainder) = U128::from_bytes(remainder)?; + Ok((TransformKindV2::AddUInt128(value), remainder)) + } + tag if tag == TransformTag::AddUInt256 as u8 => { + let (value, remainder) = U256::from_bytes(remainder)?; + Ok((TransformKindV2::AddUInt256(value), remainder)) + } + tag if tag == TransformTag::AddUInt512 as u8 => { + let (value, remainder) = U512::from_bytes(remainder)?; + Ok((TransformKindV2::AddUInt512(value), remainder)) + } + tag if tag == TransformTag::AddKeys as u8 => { + let (named_keys, remainder) = NamedKeys::from_bytes(remainder)?; + Ok((TransformKindV2::AddKeys(named_keys), remainder)) + } + tag if tag == TransformTag::Failure as u8 => { + let (error, remainder) = TransformError::from_bytes(remainder)?; + Ok((TransformKindV2::Failure(error), remainder)) + } + tag if tag == TransformTag::Prune as u8 => { + let (key, remainder) = Key::from_bytes(remainder)?; + Ok((TransformKindV2::Prune(key), remainder)) + } + _ => { + error!(%tag, rem_len = remainder.len(), "FromBytes for TransformKindV2: unknown tag"); + Err(bytesrepr::Error::Formatting) + } + } + } +} + +/// Attempts a wrapping addition of `to_add` to `stored_value`, assuming `stored_value` is +/// compatible with type `Y`. +fn wrapping_addition( + stored_value: StoredValue, + to_add: Y, +) -> Result +where + Y: AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive, +{ + let cl_value = CLValue::try_from(stored_value)?; + + match cl_value.cl_type() { + CLType::I32 => do_wrapping_addition::(cl_value, to_add), + CLType::I64 => do_wrapping_addition::(cl_value, to_add), + CLType::U8 => do_wrapping_addition::(cl_value, to_add), + CLType::U32 => do_wrapping_addition::(cl_value, to_add), + CLType::U64 => do_wrapping_addition::(cl_value, to_add), + CLType::U128 => do_wrapping_addition::(cl_value, to_add), + CLType::U256 => do_wrapping_addition::(cl_value, to_add), + CLType::U512 => do_wrapping_addition::(cl_value, to_add), + other => { + let expected = format!("integral type compatible with {}", any::type_name::()); + let found = format!("{:?}", other); + Err(StoredValueTypeMismatch::new(expected, found).into()) + } + } +} + +/// Attempts a wrapping addition of `to_add` to the value represented by `cl_value`. +fn do_wrapping_addition( + cl_value: CLValue, + to_add: Y, +) -> Result +where + X: WrappingAdd + CLTyped + ToBytes + FromBytes + Copy + 'static, + Y: AsPrimitive, +{ + let x: X = cl_value.into_t()?; + let result = x.wrapping_add(&(to_add.as_())); + let stored_value = StoredValue::CLValue(CLValue::from_t(result)?); + Ok(TransformInstruction::store(stored_value)) +} + +#[derive(Debug)] +#[repr(u8)] +enum TransformTag { + Identity = 0, + Write = 1, + AddInt32 = 2, + AddUInt64 = 3, + AddUInt128 = 4, + AddUInt256 = 5, + AddUInt512 = 6, + AddKeys = 7, + Failure = 8, + Prune = 9, +} + +#[cfg(test)] +mod tests { + use std::{collections::BTreeMap, fmt}; + + use num::{Bounded, Num}; + + use crate::{ + byte_code::ByteCodeKind, bytesrepr::Bytes, testing::TestRng, AccessRights, ByteCode, Key, + URef, U128, U256, U512, + }; + + use super::*; + + const ZERO_ARRAY: [u8; 32] = [0; 32]; + const TEST_STR: &str = "a"; + const TEST_BOOL: bool = true; + + const ZERO_I32: i32 = 0; + const ONE_I32: i32 = 1; + const NEG_ONE_I32: i32 = -1; + const NEG_TWO_I32: i32 = -2; + const MIN_I32: i32 = i32::MIN; + const MAX_I32: i32 = i32::MAX; + + const ZERO_I64: i64 = 0; + const ONE_I64: i64 = 1; + const NEG_ONE_I64: i64 = -1; + const NEG_TWO_I64: i64 = -2; + const MIN_I64: i64 = i64::MIN; + const MAX_I64: i64 = i64::MAX; + + const ZERO_U8: u8 = 0; + const ONE_U8: u8 = 1; + const MAX_U8: u8 = u8::MAX; + + const ZERO_U32: u32 = 0; + const ONE_U32: u32 = 1; + const MAX_U32: u32 = u32::MAX; + + const ZERO_U64: u64 = 0; + const ONE_U64: u64 = 1; + const MAX_U64: u64 = u64::MAX; + + const ZERO_U128: U128 = U128([0; 2]); + const ONE_U128: U128 = U128([1, 0]); + const MAX_U128: U128 = U128([MAX_U64; 2]); + + const ZERO_U256: U256 = U256([0; 4]); + const ONE_U256: U256 = U256([1, 0, 0, 0]); + const MAX_U256: U256 = U256([MAX_U64; 4]); + + const ZERO_U512: U512 = U512([0; 8]); + const ONE_U512: U512 = U512([1, 0, 0, 0, 0, 0, 0, 0]); + const MAX_U512: U512 = U512([MAX_U64; 8]); + + impl From for TransformKindV2 { + fn from(x: U128) -> Self { + TransformKindV2::AddUInt128(x) + } + } + impl From for TransformKindV2 { + fn from(x: U256) -> Self { + TransformKindV2::AddUInt256(x) + } + } + impl From for TransformKindV2 { + fn from(x: U512) -> Self { + TransformKindV2::AddUInt512(x) + } + } + + #[test] + fn i32_overflow() { + let max = i32::MAX; + let min = i32::MIN; + + let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap()); + let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap()); + + let apply_overflow = TransformKindV2::AddInt32(1).apply(max_value.clone()); + let apply_underflow = TransformKindV2::AddInt32(-1).apply(min_value.clone()); + + assert_eq!( + apply_overflow.expect("Unexpected overflow"), + TransformInstruction::store(min_value) + ); + assert_eq!( + apply_underflow.expect("Unexpected underflow"), + TransformInstruction::store(max_value) + ); + } + + fn uint_overflow_test() + where + T: Num + Bounded + CLTyped + ToBytes + Into + Copy, + { + let max = T::max_value(); + let min = T::min_value(); + let one = T::one(); + let zero = T::zero(); + + let max_value = StoredValue::CLValue(CLValue::from_t(max).unwrap()); + let min_value = StoredValue::CLValue(CLValue::from_t(min).unwrap()); + let zero_value = StoredValue::CLValue(CLValue::from_t(zero).unwrap()); + + let one_transform: TransformKindV2 = one.into(); + + let apply_overflow = TransformKindV2::AddInt32(1).apply(max_value.clone()); + + let apply_overflow_uint = one_transform.apply(max_value.clone()); + let apply_underflow = TransformKindV2::AddInt32(-1).apply(min_value); + + assert_eq!(apply_overflow, Ok(zero_value.clone().into())); + assert_eq!(apply_overflow_uint, Ok(zero_value.into())); + assert_eq!(apply_underflow, Ok(max_value.into())); + } + + #[test] + fn u128_overflow() { + uint_overflow_test::(); + } + + #[test] + fn u256_overflow() { + uint_overflow_test::(); + } + + #[test] + fn u512_overflow() { + uint_overflow_test::(); + } + + #[test] + fn addition_between_mismatched_types_should_fail() { + fn assert_yields_type_mismatch_error(stored_value: StoredValue) { + match wrapping_addition(stored_value, ZERO_I32) { + Err(TransformError::TypeMismatch(_)) => (), + _ => panic!("wrapping addition should yield TypeMismatch error"), + }; + } + + let byte_code = StoredValue::ByteCode(ByteCode::new(ByteCodeKind::V1CasperWasm, vec![])); + assert_yields_type_mismatch_error(byte_code); + + let uref = URef::new(ZERO_ARRAY, AccessRights::READ); + + let cl_bool = + StoredValue::CLValue(CLValue::from_t(TEST_BOOL).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_bool); + + let cl_unit = StoredValue::CLValue(CLValue::from_t(()).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_unit); + + let cl_string = + StoredValue::CLValue(CLValue::from_t(TEST_STR).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_string); + + let cl_key = StoredValue::CLValue( + CLValue::from_t(Key::Hash(ZERO_ARRAY)).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_key); + + let cl_uref = StoredValue::CLValue(CLValue::from_t(uref).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_uref); + + let cl_option = + StoredValue::CLValue(CLValue::from_t(Some(ZERO_U8)).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_option); + + let cl_list = StoredValue::CLValue( + CLValue::from_t(Bytes::from(vec![ZERO_U8])).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_list); + + let cl_fixed_list = + StoredValue::CLValue(CLValue::from_t([ZERO_U8]).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_fixed_list); + + let cl_result: Result<(), u8> = Err(ZERO_U8); + let cl_result = + StoredValue::CLValue(CLValue::from_t(cl_result).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_result); + + let cl_map = StoredValue::CLValue( + CLValue::from_t(BTreeMap::::new()).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_map); + + let cl_tuple1 = + StoredValue::CLValue(CLValue::from_t((ZERO_U8,)).expect("should create CLValue")); + assert_yields_type_mismatch_error(cl_tuple1); + + let cl_tuple2 = StoredValue::CLValue( + CLValue::from_t((ZERO_U8, ZERO_U8)).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_tuple2); + + let cl_tuple3 = StoredValue::CLValue( + CLValue::from_t((ZERO_U8, ZERO_U8, ZERO_U8)).expect("should create CLValue"), + ); + assert_yields_type_mismatch_error(cl_tuple3); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn wrapping_addition_should_succeed() { + fn add(current_value: X, to_add: Y) -> X + where + X: CLTyped + ToBytes + FromBytes + PartialEq + fmt::Debug, + Y: AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive + + AsPrimitive, + { + let current = StoredValue::CLValue( + CLValue::from_t(current_value).expect("should create CLValue"), + ); + if let TransformInstruction::Store(result) = + wrapping_addition(current, to_add).expect("wrapping addition should succeed") + { + CLValue::try_from(result) + .expect("should be CLValue") + .into_t() + .expect("should parse to X") + } else { + panic!("expected TransformInstruction::Store"); + } + } + + // Adding to i32 + assert_eq!(ONE_I32, add(ZERO_I32, ONE_I32)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_I32)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32)); + assert_eq!(ZERO_I32, add(ONE_I32, NEG_ONE_I32)); + assert_eq!(NEG_ONE_I32, add(ZERO_I32, NEG_ONE_I32)); + assert_eq!(MAX_I32, add(NEG_ONE_I32, MIN_I32)); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U64)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U64)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, MAX_I32 as u64)); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U128)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U128)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, U128::from(MAX_I32))); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U256)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U256)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, U256::from(MAX_I32))); + + assert_eq!(ONE_I32, add(ZERO_I32, ONE_U512)); + assert_eq!(MIN_I32, add(MAX_I32, ONE_U512)); + assert_eq!(NEG_TWO_I32, add(MAX_I32, U512::from(MAX_I32))); + + // Adding to i64 + assert_eq!(ONE_I64, add(ZERO_I64, ONE_I32)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_I32)); + assert_eq!(ZERO_I64, add(ONE_I64, NEG_ONE_I32)); + assert_eq!(NEG_ONE_I64, add(ZERO_I64, NEG_ONE_I32)); + assert_eq!(MAX_I64, add(MIN_I64, NEG_ONE_I32)); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U64)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U64)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, MAX_I64 as u64)); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U128)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U128)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, U128::from(MAX_I64))); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U256)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U256)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, U256::from(MAX_I64))); + + assert_eq!(ONE_I64, add(ZERO_I64, ONE_U512)); + assert_eq!(MIN_I64, add(MAX_I64, ONE_U512)); + assert_eq!(NEG_TWO_I64, add(MAX_I64, U512::from(MAX_I64))); + + // Adding to u8 + assert_eq!(ONE_U8, add(ZERO_U8, ONE_I32)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_I32)); + assert_eq!(MAX_U8, add(MAX_U8, 256_i32)); + assert_eq!(ZERO_U8, add(MAX_U8, 257_i32)); + assert_eq!(ZERO_U8, add(ONE_U8, NEG_ONE_I32)); + assert_eq!(MAX_U8, add(ZERO_U8, NEG_ONE_I32)); + assert_eq!(ZERO_U8, add(ZERO_U8, -256_i32)); + assert_eq!(MAX_U8, add(ZERO_U8, -257_i32)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_I32)); + assert_eq!(ZERO_U8, add(ZERO_U8, MIN_I32)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U64)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U64)); + assert_eq!(ONE_U8, add(ZERO_U8, u64::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U64)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U128)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U128)); + assert_eq!(ONE_U8, add(ZERO_U8, U128::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U128)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U256)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U256)); + assert_eq!(ONE_U8, add(ZERO_U8, U256::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U256)); + + assert_eq!(ONE_U8, add(ZERO_U8, ONE_U512)); + assert_eq!(ZERO_U8, add(MAX_U8, ONE_U512)); + assert_eq!(ONE_U8, add(ZERO_U8, U512::from(MAX_U8) + 2)); + assert_eq!(MAX_U8, add(ZERO_U8, MAX_U512)); + + // Adding to u32 + assert_eq!(ONE_U32, add(ZERO_U32, ONE_I32)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_I32)); + assert_eq!(ZERO_U32, add(ONE_U32, NEG_ONE_I32)); + assert_eq!(MAX_U32, add(ZERO_U32, NEG_ONE_I32)); + assert_eq!(MAX_I32 as u32 + 1, add(ZERO_U32, MIN_I32)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U64)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U64)); + assert_eq!(ONE_U32, add(ZERO_U32, u64::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U64)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U128)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U128)); + assert_eq!(ONE_U32, add(ZERO_U32, U128::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U128)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U256)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U256)); + assert_eq!(ONE_U32, add(ZERO_U32, U256::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U256)); + + assert_eq!(ONE_U32, add(ZERO_U32, ONE_U512)); + assert_eq!(ZERO_U32, add(MAX_U32, ONE_U512)); + assert_eq!(ONE_U32, add(ZERO_U32, U512::from(MAX_U32) + 2)); + assert_eq!(MAX_U32, add(ZERO_U32, MAX_U512)); + + // Adding to u64 + assert_eq!(ONE_U64, add(ZERO_U64, ONE_I32)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_I32)); + assert_eq!(ZERO_U64, add(ONE_U64, NEG_ONE_I32)); + assert_eq!(MAX_U64, add(ZERO_U64, NEG_ONE_I32)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U64)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U64)); + assert_eq!(MAX_U64 - 1, add(MAX_U64, MAX_U64)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U128)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U128)); + assert_eq!(ONE_U64, add(ZERO_U64, U128::from(MAX_U64) + 2)); + assert_eq!(MAX_U64, add(ZERO_U64, MAX_U128)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U256)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U256)); + assert_eq!(ONE_U64, add(ZERO_U64, U256::from(MAX_U64) + 2)); + assert_eq!(MAX_U64, add(ZERO_U64, MAX_U256)); + + assert_eq!(ONE_U64, add(ZERO_U64, ONE_U512)); + assert_eq!(ZERO_U64, add(MAX_U64, ONE_U512)); + assert_eq!(ONE_U64, add(ZERO_U64, U512::from(MAX_U64) + 2)); + assert_eq!(MAX_U64, add(ZERO_U64, MAX_U512)); + + // Adding to U128 + assert_eq!(ONE_U128, add(ZERO_U128, ONE_I32)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_I32)); + assert_eq!(ZERO_U128, add(ONE_U128, NEG_ONE_I32)); + assert_eq!(MAX_U128, add(ZERO_U128, NEG_ONE_I32)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U64)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U64)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U128)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U128)); + assert_eq!(MAX_U128 - 1, add(MAX_U128, MAX_U128)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U256)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U256)); + assert_eq!( + ONE_U128, + add( + ZERO_U128, + U256::from_dec_str(&MAX_U128.to_string()).unwrap() + 2, + ) + ); + assert_eq!(MAX_U128, add(ZERO_U128, MAX_U256)); + + assert_eq!(ONE_U128, add(ZERO_U128, ONE_U512)); + assert_eq!(ZERO_U128, add(MAX_U128, ONE_U512)); + assert_eq!( + ONE_U128, + add( + ZERO_U128, + U512::from_dec_str(&MAX_U128.to_string()).unwrap() + 2, + ) + ); + assert_eq!(MAX_U128, add(ZERO_U128, MAX_U512)); + + // Adding to U256 + assert_eq!(ONE_U256, add(ZERO_U256, ONE_I32)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_I32)); + assert_eq!(ZERO_U256, add(ONE_U256, NEG_ONE_I32)); + assert_eq!(MAX_U256, add(ZERO_U256, NEG_ONE_I32)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U64)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U64)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U128)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U128)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U256)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U256)); + assert_eq!(MAX_U256 - 1, add(MAX_U256, MAX_U256)); + + assert_eq!(ONE_U256, add(ZERO_U256, ONE_U512)); + assert_eq!(ZERO_U256, add(MAX_U256, ONE_U512)); + assert_eq!( + ONE_U256, + add( + ZERO_U256, + U512::from_dec_str(&MAX_U256.to_string()).unwrap() + 2, + ) + ); + assert_eq!(MAX_U256, add(ZERO_U256, MAX_U512)); + + // Adding to U512 + assert_eq!(ONE_U512, add(ZERO_U512, ONE_I32)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_I32)); + assert_eq!(ZERO_U512, add(ONE_U512, NEG_ONE_I32)); + assert_eq!(MAX_U512, add(ZERO_U512, NEG_ONE_I32)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U64)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U64)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U128)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U128)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U256)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U256)); + + assert_eq!(ONE_U512, add(ZERO_U512, ONE_U512)); + assert_eq!(ZERO_U512, add(MAX_U512, ONE_U512)); + assert_eq!(MAX_U512 - 1, add(MAX_U512, MAX_U512)); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..11 { + let execution_result = TransformKindV2::random(rng); + bytesrepr::test_serialization_roundtrip(&execution_result); + } + } +} diff --git a/types/src/execution_result.rs b/types/src/execution_result.rs deleted file mode 100644 index b78e8dafab..0000000000 --- a/types/src/execution_result.rs +++ /dev/null @@ -1,689 +0,0 @@ -//! This file provides types to allow conversion from an EE `ExecutionResult` into a similar type -//! which can be serialized to a valid binary or JSON representation. -//! -//! It is stored as metadata related to a given deploy, and made available to clients via the -//! JSON-RPC API. - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use alloc::{ - boxed::Box, - format, - string::{String, ToString}, - vec, - vec::Vec, -}; - -#[cfg(feature = "std")] -use once_cell::sync::Lazy; -use rand::{ - distributions::{Distribution, Standard}, - seq::SliceRandom, - Rng, -}; -#[cfg(feature = "std")] -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "std")] -use crate::KEY_HASH_LENGTH; -use crate::{ - account::AccountHash, - bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - system::auction::{Bid, EraInfo, SeigniorageRecipients, UnbondingPurse}, - CLValue, DeployInfo, NamedKey, Transfer, TransferAddr, U128, U256, U512, -}; - -/// Constants to track ExecutionResult serialization. -const EXECUTION_RESULT_FAILURE_TAG: u8 = 0; -const EXECUTION_RESULT_SUCCESS_TAG: u8 = 1; - -/// Constants to track operation serialization. -const OP_READ_TAG: u8 = 0; -const OP_WRITE_TAG: u8 = 1; -const OP_ADD_TAG: u8 = 2; -const OP_NOOP_TAG: u8 = 3; - -/// Constants to track Transform serialization. -const TRANSFORM_IDENTITY_TAG: u8 = 0; -const TRANSFORM_WRITE_CLVALUE_TAG: u8 = 1; -const TRANSFORM_WRITE_ACCOUNT_TAG: u8 = 2; -const TRANSFORM_WRITE_CONTRACT_WASM_TAG: u8 = 3; -const TRANSFORM_WRITE_CONTRACT_TAG: u8 = 4; -const TRANSFORM_WRITE_CONTRACT_PACKAGE_TAG: u8 = 5; -const TRANSFORM_WRITE_DEPLOY_INFO_TAG: u8 = 6; -const TRANSFORM_WRITE_TRANSFER_TAG: u8 = 7; -const TRANSFORM_WRITE_ERA_INFO_TAG: u8 = 8; -const TRANSFORM_WRITE_BID_TAG: u8 = 9; -const TRANSFORM_WRITE_WITHDRAW_TAG: u8 = 10; -const TRANSFORM_WRITE_ERA_VALIDATORS_TAG: u8 = 11; -const TRANSFORM_ADD_INT32_TAG: u8 = 12; -const TRANSFORM_ADD_UINT64_TAG: u8 = 13; -const TRANSFORM_ADD_UINT128_TAG: u8 = 14; -const TRANSFORM_ADD_UINT256_TAG: u8 = 15; -const TRANSFORM_ADD_UINT512_TAG: u8 = 16; -const TRANSFORM_ADD_KEYS_TAG: u8 = 17; -const TRANSFORM_FAILURE_TAG: u8 = 18; - -#[cfg(feature = "std")] -static EXECUTION_RESULT: Lazy = Lazy::new(|| { - let mut operations = Vec::new(); - operations.push(Operation { - key: "account-hash-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb" - .to_string(), - kind: OpKind::Write, - }); - operations.push(Operation { - key: "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1".to_string(), - kind: OpKind::Read, - }); - - let mut transforms = Vec::new(); - transforms.push(TransformEntry { - key: "uref-2c4a11c062a8a337bfc97e27fd66291caeb2c65865dcb5d3ef3759c4c97efecb-007" - .to_string(), - transform: Transform::AddUInt64(8u64), - }); - transforms.push(TransformEntry { - key: "deploy-af684263911154d26fa05be9963171802801a0b6aff8f199b7391eacb8edc9e1".to_string(), - transform: Transform::Identity, - }); - - let effect = ExecutionEffect { - operations, - transforms, - }; - - let transfers = vec![ - TransferAddr::new([89; KEY_HASH_LENGTH]), - TransferAddr::new([130; KEY_HASH_LENGTH]), - ]; - - ExecutionResult::Success { - effect, - transfers, - cost: U512::from(123_456), - } -}); - -/// The result of executing a single deploy. -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub enum ExecutionResult { - /// The result of a failed execution. - Failure { - /// The effect of executing the deploy. - effect: ExecutionEffect, - /// A record of Transfers performed while executing the deploy. - transfers: Vec, - /// The cost of executing the deploy. - cost: U512, - /// The error message associated with executing the deploy. - error_message: String, - }, - /// The result of a successful execution. - Success { - /// The effect of executing the deploy. - effect: ExecutionEffect, - /// A record of Transfers performed while executing the deploy. - transfers: Vec, - /// The cost of executing the deploy. - cost: U512, - }, -} - -impl ExecutionResult { - // This method is not intended to be used by third party crates. - #[doc(hidden)] - #[cfg(feature = "std")] - pub fn example() -> &'static Self { - &*EXECUTION_RESULT - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> ExecutionResult { - let op_count = rng.gen_range(0..6); - let mut operations = Vec::new(); - for _ in 0..op_count { - let op = [OpKind::Read, OpKind::Add, OpKind::NoOp, OpKind::Write] - .choose(rng) - .unwrap(); - operations.push(Operation { - key: rng.gen::().to_string(), - kind: *op, - }); - } - - let transform_count = rng.gen_range(0..6); - let mut transforms = Vec::new(); - for _ in 0..transform_count { - transforms.push(TransformEntry { - key: rng.gen::().to_string(), - transform: rng.gen(), - }); - } - - let effect = ExecutionEffect { - operations, - transforms, - }; - - let transfer_count = rng.gen_range(0..6); - let mut transfers = vec![]; - for _ in 0..transfer_count { - transfers.push(TransferAddr::new(rng.gen())) - } - - if rng.gen() { - ExecutionResult::Failure { - effect, - transfers, - cost: rng.gen::().into(), - error_message: format!("Error message {}", rng.gen::()), - } - } else { - ExecutionResult::Success { - effect, - transfers, - cost: rng.gen::().into(), - } - } - } -} - -impl ToBytes for ExecutionResult { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - ExecutionResult::Failure { - effect, - transfers, - cost, - error_message, - } => { - buffer.push(EXECUTION_RESULT_FAILURE_TAG); - buffer.extend(effect.to_bytes()?); - buffer.extend(transfers.to_bytes()?); - buffer.extend(cost.to_bytes()?); - buffer.extend(error_message.to_bytes()?); - } - ExecutionResult::Success { - effect, - transfers, - cost, - } => { - buffer.push(EXECUTION_RESULT_SUCCESS_TAG); - buffer.extend(effect.to_bytes()?); - buffer.extend(transfers.to_bytes()?); - buffer.extend(cost.to_bytes()?); - } - } - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH - + match self { - ExecutionResult::Failure { - effect, - transfers, - cost, - error_message, - } => { - effect.serialized_length() - + transfers.serialized_length() - + cost.serialized_length() - + error_message.serialized_length() - } - ExecutionResult::Success { - effect, - transfers, - cost, - } => { - effect.serialized_length() - + transfers.serialized_length() - + cost.serialized_length() - } - } - } -} - -impl FromBytes for ExecutionResult { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; - match tag { - EXECUTION_RESULT_FAILURE_TAG => { - let (effect, remainder) = ExecutionEffect::from_bytes(remainder)?; - let (transfers, remainder) = Vec::::from_bytes(remainder)?; - let (cost, remainder) = U512::from_bytes(remainder)?; - let (error_message, remainder) = String::from_bytes(remainder)?; - let execution_result = ExecutionResult::Failure { - effect, - transfers, - cost, - error_message, - }; - Ok((execution_result, remainder)) - } - EXECUTION_RESULT_SUCCESS_TAG => { - let (effect, remainder) = ExecutionEffect::from_bytes(remainder)?; - let (transfers, remainder) = Vec::::from_bytes(remainder)?; - let (cost, remainder) = U512::from_bytes(remainder)?; - let execution_result = ExecutionResult::Success { - effect, - transfers, - cost, - }; - Ok((execution_result, remainder)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -/// The effect of executing a single deploy. -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub struct ExecutionEffect { - /// The resulting operations. - pub operations: Vec, - /// The resulting transformations. - pub transforms: Vec, -} - -impl ToBytes for ExecutionEffect { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.operations.to_bytes()?); - buffer.extend(self.transforms.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.operations.serialized_length() + self.transforms.serialized_length() - } -} - -impl FromBytes for ExecutionEffect { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (operations, remainder) = Vec::::from_bytes(bytes)?; - let (transforms, remainder) = Vec::::from_bytes(remainder)?; - let execution_effect = ExecutionEffect { - operations, - transforms, - }; - Ok((execution_effect, remainder)) - } -} - -/// An operation performed while executing a deploy. -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub struct Operation { - /// The formatted string of the `Key`. - pub key: String, - /// The type of operation. - pub kind: OpKind, -} - -impl ToBytes for Operation { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.key.to_bytes()?); - buffer.extend(self.kind.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.key.serialized_length() + self.kind.serialized_length() - } -} - -impl FromBytes for Operation { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (key, remainder) = String::from_bytes(bytes)?; - let (kind, remainder) = OpKind::from_bytes(remainder)?; - let operation = Operation { key, kind }; - Ok((operation, remainder)) - } -} - -/// The type of operation performed while executing a deploy. -#[derive(Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub enum OpKind { - /// A read operation. - Read, - /// A write operation. - Write, - /// An addition. - Add, - /// An operation which has no effect. - NoOp, -} - -impl ToBytes for OpKind { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - match self { - OpKind::Read => OP_READ_TAG.to_bytes(), - OpKind::Write => OP_WRITE_TAG.to_bytes(), - OpKind::Add => OP_ADD_TAG.to_bytes(), - OpKind::NoOp => OP_NOOP_TAG.to_bytes(), - } - } - - fn serialized_length(&self) -> usize { - U8_SERIALIZED_LENGTH - } -} - -impl FromBytes for OpKind { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; - match tag { - OP_READ_TAG => Ok((OpKind::Read, remainder)), - OP_WRITE_TAG => Ok((OpKind::Write, remainder)), - OP_ADD_TAG => Ok((OpKind::Add, remainder)), - OP_NOOP_TAG => Ok((OpKind::NoOp, remainder)), - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -/// A transformation performed while executing a deploy. -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub struct TransformEntry { - /// The formatted string of the `Key`. - pub key: String, - /// The transformation. - pub transform: Transform, -} - -impl ToBytes for TransformEntry { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.key.to_bytes()?); - buffer.extend(self.transform.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.key.serialized_length() + self.transform.serialized_length() - } -} - -impl FromBytes for TransformEntry { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (key, remainder) = String::from_bytes(bytes)?; - let (transform, remainder) = Transform::from_bytes(remainder)?; - let transform_entry = TransformEntry { key, transform }; - Ok((transform_entry, remainder)) - } -} - -/// The actual transformation performed while executing a deploy. -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub enum Transform { - /// A transform having no effect. - Identity, - /// Writes the given CLValue to global state. - WriteCLValue(CLValue), - /// Writes the given Account to global state. - WriteAccount(AccountHash), - /// Writes a smart contract as Wasm to global state. - WriteContractWasm, - /// Writes a smart contract to global state. - WriteContract, - /// Writes a smart contract package to global state. - WriteContractPackage, - /// Writes the given DeployInfo to global state. - WriteDeployInfo(DeployInfo), - /// Writes the given EraInfo to global state. - WriteEraInfo(EraInfo), - /// Writes the given Transfer to global state. - WriteTransfer(Transfer), - /// Writes the given Bid to global state. - WriteBid(Box), - /// Writes the given Withdraw to global state. - WriteWithdraw(Vec), - /// Writes the given EraValidators to global state. - WriteEraValidators(SeigniorageRecipients), - /// Adds the given `i32`. - AddInt32(i32), - /// Adds the given `u64`. - AddUInt64(u64), - /// Adds the given `U128`. - AddUInt128(U128), - /// Adds the given `U256`. - AddUInt256(U256), - /// Adds the given `U512`. - AddUInt512(U512), - /// Adds the given collection of named keys. - AddKeys(Vec), - /// A failed transformation, containing an error message. - Failure(String), -} - -impl ToBytes for Transform { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - match self { - Transform::Identity => buffer.insert(0, TRANSFORM_IDENTITY_TAG), - Transform::WriteCLValue(value) => { - buffer.insert(0, TRANSFORM_WRITE_CLVALUE_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::WriteAccount(account_hash) => { - buffer.insert(0, TRANSFORM_WRITE_ACCOUNT_TAG); - buffer.extend(account_hash.to_bytes()?); - } - Transform::WriteContractWasm => buffer.insert(0, TRANSFORM_WRITE_CONTRACT_WASM_TAG), - Transform::WriteContract => buffer.insert(0, TRANSFORM_WRITE_CONTRACT_TAG), - Transform::WriteContractPackage => { - buffer.insert(0, TRANSFORM_WRITE_CONTRACT_PACKAGE_TAG) - } - Transform::WriteDeployInfo(deploy_info) => { - buffer.insert(0, TRANSFORM_WRITE_DEPLOY_INFO_TAG); - buffer.extend(deploy_info.to_bytes()?); - } - Transform::WriteEraInfo(era_info) => { - buffer.insert(0, TRANSFORM_WRITE_ERA_INFO_TAG); - buffer.extend(era_info.to_bytes()?); - } - Transform::WriteTransfer(transfer) => { - buffer.insert(0, TRANSFORM_WRITE_TRANSFER_TAG); - buffer.extend(transfer.to_bytes()?); - } - Transform::WriteBid(bid) => { - buffer.insert(0, TRANSFORM_WRITE_BID_TAG); - buffer.extend(bid.to_bytes()?); - } - Transform::WriteWithdraw(unbonding_purses) => { - buffer.insert(0, TRANSFORM_WRITE_WITHDRAW_TAG); - buffer.extend(unbonding_purses.to_bytes()?); - } - Transform::WriteEraValidators(recipients) => { - buffer.insert(0, TRANSFORM_WRITE_ERA_VALIDATORS_TAG); - buffer.extend(recipients.to_bytes()?); - } - Transform::AddInt32(value) => { - buffer.insert(0, TRANSFORM_ADD_INT32_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::AddUInt64(value) => { - buffer.insert(0, TRANSFORM_ADD_UINT64_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::AddUInt128(value) => { - buffer.insert(0, TRANSFORM_ADD_UINT128_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::AddUInt256(value) => { - buffer.insert(0, TRANSFORM_ADD_UINT256_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::AddUInt512(value) => { - buffer.insert(0, TRANSFORM_ADD_UINT512_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::AddKeys(value) => { - buffer.insert(0, TRANSFORM_ADD_KEYS_TAG); - buffer.extend(value.to_bytes()?); - } - Transform::Failure(value) => { - buffer.insert(0, TRANSFORM_FAILURE_TAG); - buffer.extend(value.to_bytes()?); - } - } - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - match self { - Transform::WriteCLValue(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::WriteAccount(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::WriteDeployInfo(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::WriteEraInfo(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::WriteTransfer(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::AddInt32(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::AddUInt64(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::AddUInt128(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::AddUInt256(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::AddUInt512(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::AddKeys(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - Transform::Failure(value) => value.serialized_length() + U8_SERIALIZED_LENGTH, - _ => U8_SERIALIZED_LENGTH, - } - } -} - -impl FromBytes for Transform { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; - match tag { - TRANSFORM_IDENTITY_TAG => Ok((Transform::Identity, remainder)), - TRANSFORM_WRITE_CLVALUE_TAG => { - let (cl_value, remainder) = CLValue::from_bytes(remainder)?; - Ok((Transform::WriteCLValue(cl_value), remainder)) - } - TRANSFORM_WRITE_ACCOUNT_TAG => { - let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; - Ok((Transform::WriteAccount(account_hash), remainder)) - } - TRANSFORM_WRITE_CONTRACT_WASM_TAG => Ok((Transform::WriteContractWasm, remainder)), - TRANSFORM_WRITE_CONTRACT_TAG => Ok((Transform::WriteContract, remainder)), - TRANSFORM_WRITE_CONTRACT_PACKAGE_TAG => { - Ok((Transform::WriteContractPackage, remainder)) - } - TRANSFORM_WRITE_DEPLOY_INFO_TAG => { - let (deploy_info, remainder) = DeployInfo::from_bytes(remainder)?; - Ok((Transform::WriteDeployInfo(deploy_info), remainder)) - } - TRANSFORM_WRITE_ERA_INFO_TAG => { - let (era_info, remainder) = EraInfo::from_bytes(remainder)?; - Ok((Transform::WriteEraInfo(era_info), remainder)) - } - TRANSFORM_WRITE_TRANSFER_TAG => { - let (transfer, remainder) = Transfer::from_bytes(remainder)?; - Ok((Transform::WriteTransfer(transfer), remainder)) - } - TRANSFORM_ADD_INT32_TAG => { - let (value_i32, remainder) = i32::from_bytes(remainder)?; - Ok((Transform::AddInt32(value_i32), remainder)) - } - TRANSFORM_ADD_UINT64_TAG => { - let (value_u64, remainder) = u64::from_bytes(remainder)?; - Ok((Transform::AddUInt64(value_u64), remainder)) - } - TRANSFORM_ADD_UINT128_TAG => { - let (value_u128, remainder) = U128::from_bytes(remainder)?; - Ok((Transform::AddUInt128(value_u128), remainder)) - } - TRANSFORM_ADD_UINT256_TAG => { - let (value_u256, remainder) = U256::from_bytes(remainder)?; - Ok((Transform::AddUInt256(value_u256), remainder)) - } - TRANSFORM_ADD_UINT512_TAG => { - let (value_u512, remainder) = U512::from_bytes(remainder)?; - Ok((Transform::AddUInt512(value_u512), remainder)) - } - TRANSFORM_ADD_KEYS_TAG => { - let (value, remainder) = Vec::::from_bytes(remainder)?; - Ok((Transform::AddKeys(value), remainder)) - } - TRANSFORM_FAILURE_TAG => { - let (value, remainder) = String::from_bytes(remainder)?; - Ok((Transform::Failure(value), remainder)) - } - _ => Err(bytesrepr::Error::Formatting), - } - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> Transform { - // TODO - include WriteDeployInfo and WriteTransfer as options - match rng.gen_range(0..13) { - 0 => Transform::Identity, - 1 => Transform::WriteCLValue(CLValue::from_t(true).unwrap()), - 2 => Transform::WriteAccount(AccountHash::new(rng.gen())), - 3 => Transform::WriteContractWasm, - 4 => Transform::WriteContract, - 5 => Transform::WriteContractPackage, - 6 => Transform::AddInt32(rng.gen()), - 7 => Transform::AddUInt64(rng.gen()), - 8 => Transform::AddUInt128(rng.gen::().into()), - 9 => Transform::AddUInt256(rng.gen::().into()), - 10 => Transform::AddUInt512(rng.gen::().into()), - 11 => { - let mut named_keys = Vec::new(); - for _ in 0..rng.gen_range(1..6) { - let _ = named_keys.push(NamedKey { - name: rng.gen::().to_string(), - key: rng.gen::().to_string(), - }); - } - Transform::AddKeys(named_keys) - } - 12 => Transform::Failure(rng.gen::().to_string()), - _ => unreachable!(), - } - } -} - -#[cfg(test)] -mod tests { - use rand::{rngs::SmallRng, Rng, SeedableRng}; - - use super::*; - - fn get_rng() -> SmallRng { - let mut seed = [0u8; 32]; - getrandom::getrandom(seed.as_mut()).unwrap(); - SmallRng::from_seed(seed) - } - - #[test] - fn bytesrepr_test_transform() { - let mut rng = get_rng(); - let transform: Transform = rng.gen(); - bytesrepr::test_serialization_roundtrip(&transform); - } - - #[test] - fn bytesrepr_test_execution_result() { - let mut rng = get_rng(); - let execution_result: ExecutionResult = rng.gen(); - bytesrepr::test_serialization_roundtrip(&execution_result); - } -} diff --git a/types/src/file_utils.rs b/types/src/file_utils.rs new file mode 100644 index 0000000000..2b220aaaa0 --- /dev/null +++ b/types/src/file_utils.rs @@ -0,0 +1,78 @@ +//! Utilities for handling reading from and writing to files. + +use std::{ + fs, + io::{self, Write}, + os::unix::fs::OpenOptionsExt, + path::{Path, PathBuf}, +}; + +use thiserror::Error; + +/// Error reading a file. +#[derive(Debug, Error)] +#[error("could not read '{0}': {error}", .path.display())] +pub struct ReadFileError { + /// Path that failed to be read. + path: PathBuf, + /// The underlying OS error. + #[source] + error: io::Error, +} + +/// Error writing a file +#[derive(Debug, Error)] +#[error("could not write to '{0}': {error}", .path.display())] +pub struct WriteFileError { + /// Path that failed to be written to. + path: PathBuf, + /// The underlying OS error. + #[source] + error: io::Error, +} + +/// Read complete at `path` into memory. +/// +/// Wraps `fs::read`, but preserves the filename for better error printing. +pub fn read_file>(filename: P) -> Result, ReadFileError> { + let path = filename.as_ref(); + fs::read(path).map_err(|error| ReadFileError { + path: path.to_owned(), + error, + }) +} + +/// Write data to `path`. +/// +/// Wraps `fs::write`, but preserves the filename for better error printing. +pub(crate) fn write_file, B: AsRef<[u8]>>( + filename: P, + data: B, +) -> Result<(), WriteFileError> { + let path = filename.as_ref(); + fs::write(path, data.as_ref()).map_err(|error| WriteFileError { + path: path.to_owned(), + error, + }) +} + +/// Writes data to `path`, ensuring only the owner can read or write it. +/// +/// Otherwise functions like [`write_file`]. +pub(crate) fn write_private_file, B: AsRef<[u8]>>( + filename: P, + data: B, +) -> Result<(), WriteFileError> { + let path = filename.as_ref(); + fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .mode(0o600) + .open(path) + .and_then(|mut file| file.write_all(data.as_ref())) + .map_err(|error| WriteFileError { + path: path.to_owned(), + error, + }) +} diff --git a/types/src/gas.rs b/types/src/gas.rs new file mode 100644 index 0000000000..12ecb6a0fe --- /dev/null +++ b/types/src/gas.rs @@ -0,0 +1,253 @@ +//! The `gas` module is used for working with Gas including converting to and from Motes. + +use alloc::vec::Vec; +use core::fmt; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Motes, U512, +}; + +/// The `Gas` struct represents a `U512` amount of gas. +#[derive( + Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Gas(U512); + +impl Gas { + /// The maximum value of `Gas`. + pub const MAX: Gas = Gas(U512::MAX); + + /// Constructs a new `Gas`. + pub fn new>(value: T) -> Self { + Gas(value.into()) + } + + /// Constructs a new `Gas` with value `0`. + pub const fn zero() -> Self { + Gas(U512::zero()) + } + + /// Returns the inner `U512` value. + pub fn value(&self) -> U512 { + self.0 + } + + /// Converts the given `motes` to `Gas` by dividing them by `conv_rate`. + /// + /// Returns `None` if `motes_per_unit_of_gas == 0`. + pub fn from_motes(motes: Motes, motes_per_unit_of_gas: u8) -> Option { + motes + .value() + .checked_div(U512::from(motes_per_unit_of_gas)) + .map(Self::new) + } + + /// Converts the given `U512` to `Gas` by dividing it by `gas_price`. + /// + /// Returns `None` if `gas_price == 0`. + pub fn from_price(base_amount: U512, gas_price: u8) -> Option { + base_amount + .checked_div(U512::from(gas_price)) + .map(Self::new) + } + + /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(&self, rhs: Self) -> Option { + self.0.checked_add(rhs.value()).map(Self::new) + } + + /// Saturating integer addition. Computes `self + rhs`, returning max if overflow occurred. + pub fn saturating_add(self, rhs: Self) -> Self { + Gas(self.0.saturating_add(rhs.value())) + } + + /// Saturating integer subtraction. Computes `self + rhs`, returning min if overflow occurred. + pub fn saturating_sub(self, rhs: Self) -> Self { + Gas(self.0.saturating_sub(rhs.value())) + } + + /// Checked integer subtraction. Computes `self - rhs`, returning `None` if overflow occurred. + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.value()).map(Self::new) + } + + /// Checked integer subtraction. Computes `self * rhs`, returning `None` if overflow occurred. + pub fn checked_mul(&self, rhs: Self) -> Option { + self.0.checked_mul(rhs.value()).map(Self::new) + } + + /// Checked integer division. Computes `self / rhs`, returning `None` if overflow occurred. + pub fn checked_div(&self, rhs: Self) -> Option { + self.0.checked_div(rhs.value()).map(Self::new) + } + + /// Returns a random `Gas`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Self(rng.gen::().into()) + } +} + +impl ToBytes for Gas { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for Gas { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = U512::from_bytes(bytes)?; + Ok((Gas(value), remainder)) + } +} + +impl fmt::Display for Gas { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl From for Gas { + fn from(gas: u32) -> Self { + let gas_u512: U512 = gas.into(); + Gas::new(gas_u512) + } +} + +impl From for Gas { + fn from(gas: u64) -> Self { + let gas_u512: U512 = gas.into(); + Gas::new(gas_u512) + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use crate::{Gas, Motes}; + + #[test] + fn should_be_able_to_get_instance_of_gas() { + let initial_value = 1; + let gas = Gas::new(U512::from(initial_value)); + assert_eq!( + initial_value, + gas.value().as_u64(), + "should have equal value" + ) + } + + #[test] + fn should_be_able_to_compare_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + assert_eq!(left_gas, right_gas, "should be equal"); + let right_gas = Gas::new(U512::from(2)); + assert_ne!(left_gas, right_gas, "should not be equal") + } + + #[test] + fn should_be_able_to_add_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + let expected_gas = Gas::new(U512::from(2)); + assert_eq!( + left_gas.checked_add(right_gas), + Some(expected_gas), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_subtract_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1)); + let right_gas = Gas::new(U512::from(1)); + let expected_gas = Gas::new(U512::from(0)); + assert_eq!( + left_gas.checked_sub(right_gas), + Some(expected_gas), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_multiply_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(100)); + let right_gas = Gas::new(U512::from(10)); + let expected_gas = Gas::new(U512::from(1000)); + assert_eq!( + left_gas.checked_mul(right_gas), + Some(expected_gas), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_divide_two_instances_of_gas() { + let left_gas = Gas::new(U512::from(1000)); + let right_gas = Gas::new(U512::from(100)); + let expected_gas = Gas::new(U512::from(10)); + assert_eq!( + left_gas.checked_div(right_gas), + Some(expected_gas), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_convert_from_mote() { + let mote = Motes::new(U512::from(100)); + let gas = Gas::from_motes(mote, 10).expect("should have gas"); + let expected_gas = Gas::new(U512::from(10)); + assert_eq!(gas, expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_default() { + let gas = Gas::default(); + let expected_gas = Gas::zero(); + assert_eq!(gas, expected_gas, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let left_gas = Gas::new(U512::from(100)); + let right_gas = Gas::new(U512::from(10)); + assert!(left_gas > right_gas, "should be gt"); + let right_gas = Gas::new(U512::from(100)); + assert!(left_gas >= right_gas, "should be gte"); + assert!(left_gas <= right_gas, "should be lte"); + let left_gas = Gas::new(U512::from(10)); + assert!(left_gas < right_gas, "should be lt"); + } + + #[test] + fn should_support_checked_div_from_motes() { + let motes = Motes::zero(); + let conv_rate = 0; + let maybe = Gas::from_motes(motes, conv_rate); + assert!(maybe.is_none(), "should be none due to divide by zero"); + } +} diff --git a/types/src/gens.rs b/types/src/gens.rs index 4475f606f8..163384ba9a 100644 --- a/types/src/gens.rs +++ b/types/src/gens.rs @@ -1,33 +1,73 @@ //! Contains functions for generating arbitrary values for use by //! [`Proptest`](https://crates.io/crates/proptest). #![allow(missing_docs)] - -use alloc::{boxed::Box, string::String, vec}; - -use proptest::{ - array, bits, - collection::{btree_map, btree_set, vec}, - option, - prelude::*, - result, +use alloc::{ + boxed::Box, + collections::{BTreeMap, BTreeSet}, + string::String, + vec, }; use crate::{ - account::{AccountHash, Weight}, + account::{ + self, action_thresholds::gens::account_action_thresholds_arb, + associated_keys::gens::account_associated_keys_arb, Account, AccountHash, + }, + addressable_entity::{ + action_thresholds::gens::action_thresholds_arb, associated_keys::gens::associated_keys_arb, + ContractRuntimeTag, MessageTopics, NamedKeyAddr, NamedKeyValue, Parameters, Weight, + }, + block::BlockGlobalAddr, + byte_code::ByteCodeKind, + bytesrepr::Bytes, + contract_messages::{MessageAddr, MessageChecksum, MessageTopicSummary, TopicNameHash}, contracts::{ - ContractPackageStatus, ContractVersions, DisabledVersions, Groups, NamedKeys, Parameters, + Contract, ContractHash, ContractPackage, ContractPackageStatus, ContractVersionKey, + ContractVersions, EntryPoint as ContractEntryPoint, EntryPoints as ContractEntryPoints, + NamedKeys, + }, + crypto::{ + self, + gens::{public_key_arb_no_system, secret_key_arb_no_system}, + }, + deploy_info::gens::deploy_info_arb, + global_state::{Pointer, TrieMerkleProof, TrieMerkleProofStep}, + package::{EntityVersionKey, EntityVersions, Groups, PackageStatus}, + system::{ + auction::{ + gens::era_info_arb, Bid, BidAddr, BidKind, DelegationRate, Delegator, DelegatorBid, + DelegatorKind, Reservation, UnbondingPurse, ValidatorBid, ValidatorCredit, + WithdrawPurse, DELEGATION_RATE_DENOMINATOR, + }, + mint::BalanceHoldAddr, + SystemEntityType, }, - transfer::TransferAddr, - AccessRights, CLType, CLValue, Contract, ContractHash, ContractPackage, ContractVersionKey, - ContractWasm, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, EraId, Group, Key, - NamedArg, Parameter, Phase, ProtocolVersion, SemVer, URef, U128, U256, U512, + transaction::{ + gens::deploy_hash_arb, FieldsContainer, InitiatorAddrAndSecretKey, TransactionArgs, + TransactionRuntimeParams, TransactionV1Payload, + }, + transfer::{ + gens::{transfer_v1_addr_arb, transfer_v1_arb}, + TransferAddr, + }, + AccessRights, AddressableEntity, AddressableEntityHash, BlockTime, ByteCode, ByteCodeAddr, + CLType, CLValue, Digest, EntityAddr, EntityEntryPoint, EntityKind, EntryPointAccess, + EntryPointAddr, EntryPointPayment, EntryPointType, EntryPoints, EraId, Group, InitiatorAddr, + Key, NamedArg, Package, Parameter, Phase, PricingMode, ProtocolVersion, PublicKey, RuntimeArgs, + SemVer, StoredValue, TimeDiff, Timestamp, Transaction, TransactionEntryPoint, + TransactionInvocationTarget, TransactionScheduling, TransactionTarget, TransactionV1, URef, + U128, U256, U512, +}; +use proptest::{ + array, bits, bool, + collection::{self, vec, SizeRange}, + option, + prelude::*, + result, }; - -use crate::deploy_info::gens::{deploy_hash_arb, transfer_addr_arb}; -pub use crate::{deploy_info::gens::deploy_info_arb, transfer::gens::transfer_arb}; pub fn u8_slice_32() -> impl Strategy { - vec(any::(), 32).prop_map(|b| { + collection::vec(any::(), 32).prop_map(|b| { let mut res = [0u8; 32]; res.clone_from_slice(b.as_slice()); res @@ -43,8 +83,8 @@ pub fn u2_slice_32() -> impl Strategy { }) } -pub fn named_keys_arb(depth: usize) -> impl Strategy { - btree_map("\\PC*", key_arb(), depth) +pub(crate) fn named_keys_arb(depth: usize) -> impl Strategy { + collection::btree_map("\\PC*", key_arb(), depth).prop_map(NamedKeys::from) } pub fn access_rights_arb() -> impl Strategy { @@ -77,18 +117,86 @@ pub fn era_id_arb() -> impl Strategy { any::().prop_map(EraId::from) } +pub fn named_key_addr_arb() -> impl Strategy { + (entity_addr_arb(), u8_slice_32()) + .prop_map(|(entity_addr, b)| NamedKeyAddr::new_named_key_entry(entity_addr, b)) +} + +pub fn message_addr_arb() -> impl Strategy { + prop_oneof![ + (entity_addr_arb(), u8_slice_32()).prop_map(|(hash_addr, topic_name_hash)| { + MessageAddr::new_topic_addr(hash_addr, TopicNameHash::new(topic_name_hash)) + }), + (entity_addr_arb(), u8_slice_32(), example_u32_arb()).prop_map( + |(hash_addr, topic_name_hash, index)| MessageAddr::new_message_addr( + hash_addr, + TopicNameHash::new(topic_name_hash), + index + ) + ), + ] +} + +pub fn entry_point_addr_arb() -> impl Strategy { + (entity_addr_arb(), any::()).prop_map(|(entity_addr, b)| { + EntryPointAddr::new_v1_entry_point_addr(entity_addr, &b).unwrap() + }) +} + +pub fn byte_code_addr_arb() -> impl Strategy { + prop_oneof![ + Just(ByteCodeAddr::Empty), + u8_slice_32().prop_map(ByteCodeAddr::V1CasperWasm), + u8_slice_32().prop_map(ByteCodeAddr::V2CasperWasm), + ] +} + pub fn key_arb() -> impl Strategy { prop_oneof![ account_hash_arb().prop_map(Key::Account), u8_slice_32().prop_map(Key::Hash), uref_arb().prop_map(Key::URef), - transfer_addr_arb().prop_map(Key::Transfer), + transfer_v1_addr_arb().prop_map(Key::Transfer), deploy_hash_arb().prop_map(Key::DeployInfo), era_id_arb().prop_map(Key::EraInfo), uref_arb().prop_map(|uref| Key::Balance(uref.addr())), - account_hash_arb().prop_map(Key::Bid), + bid_addr_validator_arb().prop_map(Key::BidAddr), + bid_addr_delegator_arb().prop_map(Key::BidAddr), account_hash_arb().prop_map(Key::Withdraw), - era_id_arb().prop_map(Key::EraValidators), + u8_slice_32().prop_map(Key::Dictionary), + balance_hold_addr_arb().prop_map(Key::BalanceHold), + Just(Key::EraSummary) + ] +} + +pub fn all_keys_arb() -> impl Strategy { + prop_oneof![ + account_hash_arb().prop_map(Key::Account), + u8_slice_32().prop_map(Key::Hash), + uref_arb().prop_map(Key::URef), + transfer_v1_addr_arb().prop_map(Key::Transfer), + deploy_hash_arb().prop_map(Key::DeployInfo), + era_id_arb().prop_map(Key::EraInfo), + uref_arb().prop_map(|uref| Key::Balance(uref.addr())), + account_hash_arb().prop_map(Key::Withdraw), + u8_slice_32().prop_map(Key::Dictionary), + balance_hold_addr_arb().prop_map(Key::BalanceHold), + Just(Key::EraSummary), + Just(Key::SystemEntityRegistry), + Just(Key::ChainspecRegistry), + Just(Key::ChecksumRegistry), + bid_addr_arb().prop_map(Key::BidAddr), + account_hash_arb().prop_map(Key::Bid), + account_hash_arb().prop_map(Key::Unbond), + u8_slice_32().prop_map(Key::SmartContract), + byte_code_addr_arb().prop_map(Key::ByteCode), + entity_addr_arb().prop_map(Key::AddressableEntity), + block_global_addr_arb().prop_map(Key::BlockGlobal), + message_addr_arb().prop_map(Key::Message), + named_key_addr_arb().prop_map(Key::NamedKey), + balance_hold_addr_arb().prop_map(Key::BalanceHold), + entry_point_addr_arb().prop_map(Key::EntryPoint), + entity_addr_arb().prop_map(Key::State), ] } @@ -98,6 +206,7 @@ pub fn colliding_key_arb() -> impl Strategy { u2_slice_32().prop_map(Key::Hash), u2_slice_32().prop_map(|bytes| Key::URef(URef::new(bytes, AccessRights::NONE))), u2_slice_32().prop_map(|bytes| Key::Transfer(TransferAddr::new(bytes))), + u2_slice_32().prop_map(Key::Dictionary), ] } @@ -105,10 +214,92 @@ pub fn account_hash_arb() -> impl Strategy { u8_slice_32().prop_map(AccountHash::new) } +pub fn entity_addr_arb() -> impl Strategy { + prop_oneof![ + u8_slice_32().prop_map(EntityAddr::System), + u8_slice_32().prop_map(EntityAddr::Account), + u8_slice_32().prop_map(EntityAddr::SmartContract), + ] +} + +pub fn topic_name_hash_arb() -> impl Strategy { + u8_slice_32().prop_map(TopicNameHash::new) +} + +pub fn bid_addr_validator_arb() -> impl Strategy { + u8_slice_32().prop_map(BidAddr::new_validator_addr) +} + +pub fn bid_addr_delegator_arb() -> impl Strategy { + let x = u8_slice_32(); + let y = u8_slice_32(); + (x, y).prop_map(BidAddr::new_delegator_account_addr) +} + +pub fn bid_legacy_arb() -> impl Strategy { + u8_slice_32().prop_map(BidAddr::legacy) +} + +pub fn bid_addr_delegated_arb() -> impl Strategy { + (public_key_arb_no_system(), delegator_kind_arb()).prop_map(|(validator, delegator_kind)| { + BidAddr::new_delegator_kind(&validator, &delegator_kind) + }) +} + +pub fn bid_addr_credit_arb() -> impl Strategy { + (public_key_arb_no_system(), era_id_arb()) + .prop_map(|(validator, era_id)| BidAddr::new_credit(&validator, era_id)) +} + +pub fn bid_addr_reservation_account_arb() -> impl Strategy { + (public_key_arb_no_system(), public_key_arb_no_system()) + .prop_map(|(validator, delegator)| BidAddr::new_reservation_account(&validator, &delegator)) +} + +pub fn bid_addr_reservation_purse_arb() -> impl Strategy { + (public_key_arb_no_system(), u8_slice_32()) + .prop_map(|(validator, uref)| BidAddr::new_reservation_purse(&validator, uref)) +} + +pub fn bid_addr_new_unbond_account_arb() -> impl Strategy { + (public_key_arb_no_system(), public_key_arb_no_system()) + .prop_map(|(validator, unbonder)| BidAddr::new_unbond_account(validator, unbonder)) +} + +pub fn bid_addr_arb() -> impl Strategy { + prop_oneof![ + bid_addr_validator_arb(), + bid_addr_delegator_arb(), + bid_legacy_arb(), + bid_addr_delegated_arb(), + bid_addr_credit_arb(), + bid_addr_reservation_account_arb(), + bid_addr_reservation_purse_arb(), + bid_addr_new_unbond_account_arb(), + ] +} + +pub fn balance_hold_addr_arb() -> impl Strategy { + let x = uref_arb().prop_map(|uref| uref.addr()); + let y = any::(); + (x, y).prop_map(|(x, y)| BalanceHoldAddr::new_gas(x, BlockTime::new(y))) +} + +pub fn block_global_addr_arb() -> impl Strategy { + prop_oneof![ + 0 => Just(BlockGlobalAddr::BlockTime), + 1 => Just(BlockGlobalAddr::MessageCount) + ] +} + pub fn weight_arb() -> impl Strategy { any::().prop_map(Weight::new) } +pub fn account_weight_arb() -> impl Strategy { + any::().prop_map(account::Weight::new) +} + pub fn sem_ver_arb() -> impl Strategy { (any::(), any::(), any::()) .prop_map(|(major, minor, patch)| SemVer::new(major, minor, patch)) @@ -119,15 +310,19 @@ pub fn protocol_version_arb() -> impl Strategy { } pub fn u128_arb() -> impl Strategy { - vec(any::(), 0..16).prop_map(|b| U128::from_little_endian(b.as_slice())) + collection::vec(any::(), 0..16).prop_map(|b| U128::from_little_endian(b.as_slice())) } pub fn u256_arb() -> impl Strategy { - vec(any::(), 0..32).prop_map(|b| U256::from_little_endian(b.as_slice())) + collection::vec(any::(), 0..32).prop_map(|b| U256::from_little_endian(b.as_slice())) } pub fn u512_arb() -> impl Strategy { - vec(any::(), 0..64).prop_map(|b| U512::from_little_endian(b.as_slice())) + prop_oneof![ + 1 => Just(U512::zero()), + 8 => collection::vec(any::(), 0..64).prop_map(|b| U512::from_little_endian(b.as_slice())), + 1 => Just(U512::MAX), + ] } pub fn cl_simple_type_arb() -> impl Strategy { @@ -234,12 +429,13 @@ pub fn cl_value_arb() -> impl Strategy { uref_arb().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), ".*".prop_map(|x: String| CLValue::from_t(x).expect("should create CLValue")), option::of(any::()).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), - vec(uref_arb(), 0..100).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + collection::vec(uref_arb(), 0..100) + .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), result::maybe_err(key_arb(), ".*") .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), - btree_map(".*", u512_arb(), 0..100) + collection::btree_map(".*", u512_arb(), 0..100) .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), - (any::()).prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), + any::().prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), (any::(), any::()) .prop_map(|x| CLValue::from_t(x).expect("should create CLValue")), (any::(), any::(), any::()) @@ -264,14 +460,24 @@ pub fn group_arb() -> impl Strategy { pub fn entry_point_access_arb() -> impl Strategy { prop_oneof![ Just(EntryPointAccess::Public), - vec(group_arb(), 0..32).prop_map(EntryPointAccess::Groups), + collection::vec(group_arb(), 0..32).prop_map(EntryPointAccess::Groups), + Just(EntryPointAccess::Template), ] } pub fn entry_point_type_arb() -> impl Strategy { prop_oneof![ - Just(EntryPointType::Session), - Just(EntryPointType::Contract), + Just(EntryPointType::Caller), + Just(EntryPointType::Called), + Just(EntryPointType::Factory), + ] +} + +pub fn entry_point_payment_arb() -> impl Strategy { + prop_oneof![ + Just(EntryPointPayment::Caller), + Just(EntryPointPayment::DirectInvocationOnly), + Just(EntryPointPayment::SelfOnward), ] } @@ -280,10 +486,33 @@ pub fn parameter_arb() -> impl Strategy { } pub fn parameters_arb() -> impl Strategy { - vec(parameter_arb(), 0..10) + collection::vec(parameter_arb(), 0..10) +} + +pub fn entry_point_arb() -> impl Strategy { + ( + ".*", + parameters_arb(), + entry_point_type_arb(), + entry_point_access_arb(), + entry_point_payment_arb(), + cl_type_arb(), + ) + .prop_map( + |(name, parameters, entry_point_type, entry_point_access, entry_point_payment, ret)| { + EntityEntryPoint::new( + name, + parameters, + ret, + entry_point_access, + entry_point_type, + entry_point_payment, + ) + }, + ) } -pub fn entry_point_arb() -> impl Strategy { +pub fn contract_entry_point_arb() -> impl Strategy { ( ".*", parameters_arb(), @@ -293,19 +522,76 @@ pub fn entry_point_arb() -> impl Strategy { ) .prop_map( |(name, parameters, entry_point_type, entry_point_access, ret)| { - EntryPoint::new(name, parameters, ret, entry_point_access, entry_point_type) + ContractEntryPoint::new(name, parameters, ret, entry_point_access, entry_point_type) }, ) } pub fn entry_points_arb() -> impl Strategy { - vec(entry_point_arb(), 1..10).prop_map(EntryPoints::from) + collection::vec(entry_point_arb(), 1..10).prop_map(EntryPoints::from) +} + +pub fn contract_entry_points_arb() -> impl Strategy { + collection::vec(contract_entry_point_arb(), 1..10).prop_map(ContractEntryPoints::from) +} + +pub fn message_topics_arb() -> impl Strategy { + collection::vec(any::(), 1..100).prop_map(|topic_names| { + MessageTopics::from( + topic_names + .into_iter() + .map(|name| { + let name_hash = crypto::blake2b(&name).into(); + (name, name_hash) + }) + .collect::>(), + ) + }) +} + +pub fn account_arb() -> impl Strategy { + ( + account_hash_arb(), + named_keys_arb(20), + uref_arb(), + account_associated_keys_arb(), + account_action_thresholds_arb(), + ) + .prop_map( + |(account_hash, named_keys, main_purse, associated_keys, action_thresholds)| { + Account::new( + account_hash, + named_keys, + main_purse, + associated_keys, + action_thresholds, + ) + }, + ) +} + +pub fn contract_package_arb() -> impl Strategy { + ( + uref_arb(), + contract_versions_arb(), + disabled_contract_versions_arb(), + groups_arb(), + ) + .prop_map(|(access_key, versions, disabled_versions, groups)| { + ContractPackage::new( + access_key, + versions, + disabled_versions, + groups, + ContractPackageStatus::default(), + ) + }) } pub fn contract_arb() -> impl Strategy { ( protocol_version_arb(), - entry_points_arb(), + contract_entry_points_arb(), u8_slice_32(), u8_slice_32(), named_keys_arb(20), @@ -329,8 +615,70 @@ pub fn contract_arb() -> impl Strategy { ) } -pub fn contract_wasm_arb() -> impl Strategy { - vec(any::(), 1..1000).prop_map(ContractWasm::new) +pub fn system_entity_type_arb() -> impl Strategy { + prop_oneof![ + Just(SystemEntityType::Mint), + Just(SystemEntityType::HandlePayment), + Just(SystemEntityType::StandardPayment), + Just(SystemEntityType::Auction), + ] +} + +pub fn contract_runtime_arb() -> impl Strategy { + prop_oneof![ + Just(ContractRuntimeTag::VmCasperV1), + Just(ContractRuntimeTag::VmCasperV2), + ] +} + +pub fn entity_kind_arb() -> impl Strategy { + prop_oneof![ + system_entity_type_arb().prop_map(EntityKind::System), + account_hash_arb().prop_map(EntityKind::Account), + contract_runtime_arb().prop_map(EntityKind::SmartContract), + ] +} + +pub fn addressable_entity_hash_arb() -> impl Strategy { + u8_slice_32().prop_map(AddressableEntityHash::new) +} + +pub fn addressable_entity_arb() -> impl Strategy { + ( + protocol_version_arb(), + u8_slice_32(), + u8_slice_32(), + uref_arb(), + associated_keys_arb(), + action_thresholds_arb(), + entity_kind_arb(), + ) + .prop_map( + |( + protocol_version, + contract_package_hash_arb, + contract_wasm_hash, + main_purse, + associated_keys, + action_thresholds, + entity_kind, + )| { + AddressableEntity::new( + contract_package_hash_arb.into(), + contract_wasm_hash.into(), + protocol_version, + main_purse, + associated_keys, + action_thresholds, + entity_kind, + ) + }, + ) +} + +pub fn byte_code_arb() -> impl Strategy { + collection::vec(any::(), 1..1000) + .prop_map(|byte_code| ByteCode::new(ByteCodeKind::V1CasperWasm, byte_code)) } pub fn contract_version_key_arb() -> impl Strategy { @@ -338,36 +686,727 @@ pub fn contract_version_key_arb() -> impl Strategy { .prop_map(|(major, contract_ver)| ContractVersionKey::new(major, contract_ver)) } +pub fn entity_version_key_arb() -> impl Strategy { + (1..32u32, 1..1000u32) + .prop_map(|(major, contract_ver)| EntityVersionKey::new(major, contract_ver)) +} + pub fn contract_versions_arb() -> impl Strategy { - btree_map( + collection::btree_map( contract_version_key_arb(), u8_slice_32().prop_map(ContractHash::new), 1..5, ) } -pub fn disabled_versions_arb() -> impl Strategy { - btree_set(contract_version_key_arb(), 0..5) +pub fn entity_versions_arb() -> impl Strategy { + collection::btree_map(entity_version_key_arb(), entity_addr_arb(), 1..5) + .prop_map(EntityVersions::from) +} + +pub fn disabled_versions_arb() -> impl Strategy> { + collection::btree_set(entity_version_key_arb(), 0..5) +} + +pub fn disabled_contract_versions_arb() -> impl Strategy> { + collection::btree_set(contract_version_key_arb(), 0..5) } pub fn groups_arb() -> impl Strategy { - btree_map(group_arb(), btree_set(uref_arb(), 1..10), 0..5) + collection::btree_map(group_arb(), collection::btree_set(uref_arb(), 1..10), 0..5) + .prop_map(Groups::from) } -pub fn contract_package_arb() -> impl Strategy { - ( - uref_arb(), - contract_versions_arb(), - disabled_versions_arb(), - groups_arb(), - ) - .prop_map(|(access_key, versions, disabled_versions, groups)| { - ContractPackage::new( - access_key, +pub fn package_arb() -> impl Strategy { + (entity_versions_arb(), disabled_versions_arb(), groups_arb()).prop_map( + |(versions, disabled_versions, groups)| { + Package::new( versions, disabled_versions, groups, - ContractPackageStatus::default(), + PackageStatus::default(), ) + }, + ) +} + +pub(crate) fn delegator_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + u512_arb(), + uref_arb(), + public_key_arb_no_system(), + ) + .prop_map( + |(delegator_pk, staked_amount, bonding_purse, validator_pk)| { + Delegator::unlocked(delegator_pk, staked_amount, bonding_purse, validator_pk) + }, + ) +} + +pub(crate) fn delegator_kind_arb() -> impl Strategy { + prop_oneof![ + public_key_arb_no_system().prop_map(DelegatorKind::PublicKey), + array::uniform32(bits::u8::ANY).prop_map(DelegatorKind::Purse) + ] +} + +pub(crate) fn delegator_bid_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + u512_arb(), + uref_arb(), + public_key_arb_no_system(), + ) + .prop_map( + |(delegator_pk, staked_amount, bonding_purse, validator_pk)| { + DelegatorBid::unlocked( + delegator_pk.into(), + staked_amount, + bonding_purse, + validator_pk, + ) + }, + ) +} + +fn delegation_rate_arb() -> impl Strategy { + 0..=DELEGATION_RATE_DENOMINATOR // Maximum, allowed value for delegation rate. +} + +pub(crate) fn reservation_bid_arb() -> impl Strategy { + reservation_arb().prop_map(|reservation| BidKind::Reservation(Box::new(reservation))) +} + +pub(crate) fn reservation_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + delegator_kind_arb(), + delegation_rate_arb(), + ) + .prop_map(|(validator_pk, delegator_kind, delegation_rate)| { + Reservation::new(validator_pk, delegator_kind, delegation_rate) + }) +} + +pub(crate) fn unified_bid_arb( + delegations_len: impl Into, +) -> impl Strategy { + ( + public_key_arb_no_system(), + uref_arb(), + u512_arb(), + delegation_rate_arb(), + bool::ANY, + collection::vec(delegator_arb(), delegations_len), + ) + .prop_map( + |( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + is_locked, + new_delegators, + )| { + let mut bid = if is_locked { + Bid::locked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 1u64, + ) + } else { + Bid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + ) + }; + let delegators = bid.delegators_mut(); + new_delegators.into_iter().for_each(|delegator| { + assert!(delegators + .insert(delegator.delegator_public_key().clone(), delegator) + .is_none()); + }); + BidKind::Unified(Box::new(bid)) + }, + ) +} + +pub(crate) fn delegator_bid_kind_arb() -> impl Strategy { + delegator_bid_arb().prop_map(|delegator| BidKind::Delegator(Box::new(delegator))) +} + +pub(crate) fn validator_bid_arb() -> impl Strategy { + ( + public_key_arb_no_system(), + uref_arb(), + u512_arb(), + delegation_rate_arb(), + bool::ANY, + ) + .prop_map( + |(validator_public_key, bonding_purse, staked_amount, delegation_rate, is_locked)| { + let validator_bid = if is_locked { + ValidatorBid::locked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 1u64, + 0, + u64::MAX, + 0, + ) + } else { + ValidatorBid::unlocked( + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + 0, + u64::MAX, + 0, + ) + }; + BidKind::Validator(Box::new(validator_bid)) + }, + ) +} + +pub(crate) fn credit_bid_arb() -> impl Strategy { + (public_key_arb_no_system(), era_id_arb(), u512_arb()).prop_map( + |(validator_public_key, era_id, amount)| { + BidKind::Credit(Box::new(ValidatorCredit::new( + validator_public_key, + era_id, + amount, + ))) + }, + ) +} + +fn withdraw_arb() -> impl Strategy { + ( + uref_arb(), + public_key_arb_no_system(), + public_key_arb_no_system(), + era_id_arb(), + u512_arb(), + ) + .prop_map(|(bonding_purse, validator_pk, unbonder_pk, era, amount)| { + WithdrawPurse::new(bonding_purse, validator_pk, unbonder_pk, era, amount) + }) +} + +fn withdraws_arb(size: impl Into) -> impl Strategy> { + collection::vec(withdraw_arb(), size) +} + +fn unbonding_arb() -> impl Strategy { + ( + uref_arb(), + public_key_arb_no_system(), + public_key_arb_no_system(), + era_id_arb(), + u512_arb(), + option::of(public_key_arb_no_system()), + ) + .prop_map( + |( + bonding_purse, + validator_public_key, + unbonder_public_key, + era, + amount, + new_validator, + )| { + UnbondingPurse::new( + bonding_purse, + validator_public_key, + unbonder_public_key, + era, + amount, + new_validator, + ) + }, + ) +} + +fn unbondings_arb(size: impl Into) -> impl Strategy> { + collection::vec(unbonding_arb(), size) +} + +fn message_topic_summary_arb() -> impl Strategy { + (any::(), any::(), "test").prop_map(|(message_count, blocktime, topic_name)| { + MessageTopicSummary { + message_count, + blocktime: BlockTime::new(blocktime), + topic_name, + } + }) +} + +fn message_summary_arb() -> impl Strategy { + u8_slice_32().prop_map(MessageChecksum) +} + +pub fn named_key_value_arb() -> impl Strategy { + (key_arb(), "test").prop_map(|(key, string)| { + let cl_key = CLValue::from_t(key).unwrap(); + let cl_string = CLValue::from_t(string).unwrap(); + NamedKeyValue::new(cl_key, cl_string) + }) +} + +pub fn stored_value_arb() -> impl Strategy { + prop_oneof![ + cl_value_arb().prop_map(StoredValue::CLValue), + account_arb().prop_map(StoredValue::Account), + byte_code_arb().prop_map(StoredValue::ByteCode), + contract_arb().prop_map(StoredValue::Contract), + contract_package_arb().prop_map(StoredValue::ContractPackage), + addressable_entity_arb().prop_map(StoredValue::AddressableEntity), + package_arb().prop_map(StoredValue::SmartContract), + transfer_v1_arb().prop_map(StoredValue::Transfer), + deploy_info_arb().prop_map(StoredValue::DeployInfo), + era_info_arb(1..10).prop_map(StoredValue::EraInfo), + unified_bid_arb(0..3).prop_map(StoredValue::BidKind), + validator_bid_arb().prop_map(StoredValue::BidKind), + delegator_bid_kind_arb().prop_map(StoredValue::BidKind), + reservation_bid_arb().prop_map(StoredValue::BidKind), + credit_bid_arb().prop_map(StoredValue::BidKind), + withdraws_arb(1..50).prop_map(StoredValue::Withdraw), + unbondings_arb(1..50).prop_map(StoredValue::Unbonding), + message_topic_summary_arb().prop_map(StoredValue::MessageTopic), + message_summary_arb().prop_map(StoredValue::Message), + named_key_value_arb().prop_map(StoredValue::NamedKey), + collection::vec(any::(), 0..1000).prop_map(StoredValue::RawBytes), + ] + .prop_map(|stored_value| + // The following match statement is here only to make sure + // we don't forget to update the generator when a new variant is added. + match stored_value { + StoredValue::CLValue(_) => stored_value, + StoredValue::Account(_) => stored_value, + StoredValue::ContractWasm(_) => stored_value, + StoredValue::Contract(_) => stored_value, + StoredValue::ContractPackage(_) => stored_value, + StoredValue::Transfer(_) => stored_value, + StoredValue::DeployInfo(_) => stored_value, + StoredValue::EraInfo(_) => stored_value, + StoredValue::Bid(_) => stored_value, + StoredValue::Withdraw(_) => stored_value, + StoredValue::Unbonding(_) => stored_value, + StoredValue::AddressableEntity(_) => stored_value, + StoredValue::BidKind(_) => stored_value, + StoredValue::SmartContract(_) => stored_value, + StoredValue::ByteCode(_) => stored_value, + StoredValue::MessageTopic(_) => stored_value, + StoredValue::Message(_) => stored_value, + StoredValue::NamedKey(_) => stored_value, + StoredValue::Prepayment(_) => stored_value, + StoredValue::EntryPoint(_) => stored_value, + StoredValue::RawBytes(_) => stored_value, + }) +} + +pub fn blake2b_hash_arb() -> impl Strategy { + vec(any::(), 0..1000).prop_map(Digest::hash) +} + +pub fn trie_pointer_arb() -> impl Strategy { + prop_oneof![ + blake2b_hash_arb().prop_map(Pointer::LeafPointer), + blake2b_hash_arb().prop_map(Pointer::NodePointer) + ] +} + +pub fn trie_merkle_proof_step_arb() -> impl Strategy { + const POINTERS_SIZE: usize = 32; + const AFFIX_SIZE: usize = 6; + + prop_oneof![ + ( + ::arbitrary(), + vec((::arbitrary(), trie_pointer_arb()), POINTERS_SIZE) + ) + .prop_map(|(hole_index, indexed_pointers_with_hole)| { + TrieMerkleProofStep::Node { + hole_index, + indexed_pointers_with_hole, + } + }), + vec(::arbitrary(), AFFIX_SIZE).prop_map(|affix| { + TrieMerkleProofStep::Extension { + affix: affix.into(), + } }) + ] +} + +pub fn trie_merkle_proof_arb() -> impl Strategy> { + const STEPS_SIZE: usize = 6; + + ( + key_arb(), + stored_value_arb(), + vec(trie_merkle_proof_step_arb(), STEPS_SIZE), + ) + .prop_map(|(key, value, proof_steps)| TrieMerkleProof::new(key, value, proof_steps.into())) +} + +pub fn transaction_scheduling_arb() -> impl Strategy { + prop_oneof![Just(TransactionScheduling::Standard),] +} + +pub fn json_compliant_transaction_scheduling_arb() -> impl Strategy { + prop_oneof![Just(TransactionScheduling::Standard),] +} + +pub fn transaction_invocation_target_arb() -> impl Strategy { + prop_oneof![ + addressable_entity_hash_arb().prop_map(TransactionInvocationTarget::new_invocable_entity), + Just(TransactionInvocationTarget::new_invocable_entity_alias( + "abcd".to_string() + )), + Just(TransactionInvocationTarget::new_package_alias_with_major( + "abcd".to_string(), + None, + None + )), + Just(TransactionInvocationTarget::new_package_alias_with_major( + "abcd".to_string(), + Some(1), + None + )), + Just(TransactionInvocationTarget::new_package_alias_with_major( + "abcd".to_string(), + Some(1), + Some(1) + )), + Just(TransactionInvocationTarget::new_package_alias_with_major( + "abcd".to_string(), + None, + Some(1) + )), + u8_slice_32().prop_map(|addr| { + TransactionInvocationTarget::new_package_with_major(addr.into(), None, None) + }), + u8_slice_32().prop_map(|addr| { + TransactionInvocationTarget::new_package_with_major(addr.into(), Some(1), Some(2)) + }), + u8_slice_32().prop_map(|addr| { + TransactionInvocationTarget::new_package_with_major(addr.into(), None, Some(2)) + }), + u8_slice_32().prop_map(|addr| { + TransactionInvocationTarget::new_package_with_major(addr.into(), Some(1), None) + }) + ] +} + +pub fn stored_transaction_target() -> impl Strategy { + ( + transaction_invocation_target_arb(), + transaction_stored_runtime_params_arb(), + ) + .prop_map(|(id, runtime)| TransactionTarget::Stored { id, runtime }) +} + +fn transferred_value_arb() -> impl Strategy { + any::() +} + +fn seed_arb() -> impl Strategy> { + option::of(array::uniform32(any::())) +} + +pub fn session_transaction_target() -> impl Strategy { + ( + any::(), + Just(Bytes::from(vec![1; 10])), + transaction_session_runtime_params_arb(), + ) + .prop_map( + |(is_install_upgrade, module_bytes, runtime)| TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + }, + ) +} + +pub(crate) fn transaction_stored_runtime_params_arb( +) -> impl Strategy { + prop_oneof![ + Just(TransactionRuntimeParams::VmCasperV1), + transferred_value_arb().prop_map(|transferred_value| { + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed: None, + } + }), + ] +} + +pub(crate) fn transaction_session_runtime_params_arb( +) -> impl Strategy { + prop_oneof![ + Just(TransactionRuntimeParams::VmCasperV1), + (transferred_value_arb(), seed_arb()).prop_map(|(transferred_value, seed)| { + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + } + }) + ] +} + +pub fn transaction_target_arb() -> impl Strategy { + prop_oneof![ + Just(TransactionTarget::Native), + ( + transaction_invocation_target_arb(), + transaction_stored_runtime_params_arb(), + ) + .prop_map(|(id, runtime)| TransactionTarget::Stored { id, runtime }), + ( + any::(), + Just(Bytes::from(vec![1; 10])), + transaction_session_runtime_params_arb(), + ) + .prop_map(|(is_install_upgrade, module_bytes, runtime)| { + TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + } + }) + ] +} + +pub fn legal_target_entry_point_calls_arb( +) -> impl Strategy { + prop_oneof![ + native_entry_point_arb().prop_map(|s| (TransactionTarget::Native, s)), + stored_transaction_target() + .prop_map(|s| (s, TransactionEntryPoint::Custom("ABC".to_string()))), + session_transaction_target().prop_map(|s| (s, TransactionEntryPoint::Call)), + ] +} + +pub fn native_entry_point_arb() -> impl Strategy { + prop_oneof![ + Just(TransactionEntryPoint::Transfer), + Just(TransactionEntryPoint::AddBid), + Just(TransactionEntryPoint::WithdrawBid), + Just(TransactionEntryPoint::Delegate), + Just(TransactionEntryPoint::Undelegate), + Just(TransactionEntryPoint::Redelegate), + Just(TransactionEntryPoint::ActivateBid), + Just(TransactionEntryPoint::ChangeBidPublicKey), + Just(TransactionEntryPoint::AddReservations), + Just(TransactionEntryPoint::CancelReservations), + ] +} +pub fn transaction_entry_point_arb() -> impl Strategy { + prop_oneof![ + native_entry_point_arb(), + Just(TransactionEntryPoint::Call), + Just(TransactionEntryPoint::Custom("custom".to_string())), + ] +} + +pub fn runtime_args_arb() -> impl Strategy { + let mut runtime_args_1 = RuntimeArgs::new(); + let semi_random_string_pairs = [ + ("977837db-8dba-48c2-86f1-32f9740631db", "b7b3b3b3-8b3b-48c2-86f1-32f9740631db"), + ("5de3eecc-b9c8-477f-bebe-937c3a16df85", "2ffd7939-34e5-4660-af9f-772a83011ce0"), + ("036db036-8b7b-4009-a0d4-c9ce", "515f4fe6-06c8-45c5-8554-f07e727a842d036db036-8b7b-4009-a0d4-c9ce036db036-8b7b-4009-a0d4-c9ce"), + ]; + for (key, val_str) in semi_random_string_pairs.iter() { + let _ = runtime_args_1.insert(key.to_string(), Bytes::from(val_str.as_bytes())); + } + prop_oneof![Just(runtime_args_1)] +} + +fn transaction_args_bytes_arbitrary() -> impl Strategy { + prop::collection::vec(any::(), 0..100) + .prop_map(|bytes| TransactionArgs::Bytesrepr(bytes.into())) +} + +pub fn transaction_args_arb() -> impl Strategy { + prop_oneof![ + runtime_args_arb().prop_map(TransactionArgs::Named), + transaction_args_bytes_arbitrary() + ] +} + +pub fn fields_arb() -> impl Strategy> { + collection::btree_map( + any::(), + any::().prop_map(|s| Bytes::from(s.as_bytes())), + 3..30, + ) +} +pub fn v1_transaction_payload_arb() -> impl Strategy { + ( + any::(), + timestamp_arb(), + any::(), + pricing_mode_arb(), + initiator_addr_arb(), + fields_arb(), + ) + .prop_map( + |(chain_name, timestamp, ttl_millis, pricing_mode, initiator_addr, fields)| { + TransactionV1Payload::new( + chain_name, + timestamp, + TimeDiff::from_millis(ttl_millis), + pricing_mode, + initiator_addr, + fields, + ) + }, + ) +} + +pub fn fixed_pricing_mode_arb() -> impl Strategy { + (any::(), any::()).prop_map(|(gas_price_tolerance, additional_computation_factor)| { + PricingMode::Fixed { + gas_price_tolerance, + additional_computation_factor, + } + }) +} + +pub fn pricing_mode_arb() -> impl Strategy { + prop_oneof![ + (any::(), any::(), any::()).prop_map( + |(payment_amount, gas_price_tolerance, standard_payment)| { + PricingMode::PaymentLimited { + payment_amount, + gas_price_tolerance, + standard_payment, + } + } + ), + fixed_pricing_mode_arb(), + ] +} + +pub fn initiator_addr_arb() -> impl Strategy { + prop_oneof![ + public_key_arb_no_system().prop_map(InitiatorAddr::PublicKey), + u2_slice_32().prop_map(|hash| InitiatorAddr::AccountHash(AccountHash::new(hash))), + ] +} + +pub fn timestamp_arb() -> impl Strategy { + //The weird u64 value is the max milliseconds that are bofeore year 10000. 5 digit years are + // not rfc3339 compliant and will cause an error when trying to serialize to json. + prop_oneof![Just(0_u64), Just(1_u64), Just(253_402_300_799_999_u64)].prop_map(Timestamp::from) +} + +pub fn legal_v1_transaction_arb() -> impl Strategy { + ( + any::(), + timestamp_arb(), + any::(), + pricing_mode_arb(), + secret_key_arb_no_system(), + transaction_args_arb(), + json_compliant_transaction_scheduling_arb(), + legal_target_entry_point_calls_arb(), + ) + .prop_map( + |( + chain_name, + timestamp, + ttl, + pricing_mode, + secret_key, + args, + scheduling, + (target, entry_point), + )| { + let public_key = PublicKey::from(&secret_key); + let initiator_addr = InitiatorAddr::PublicKey(public_key); + let initiator_addr_with_secret = InitiatorAddrAndSecretKey::Both { + initiator_addr, + secret_key: &secret_key, + }; + let container = FieldsContainer::new(args, target, entry_point, scheduling); + TransactionV1::build( + chain_name, + timestamp, + TimeDiff::from_seconds(ttl), + pricing_mode, + container.to_map().unwrap(), + initiator_addr_with_secret, + ) + }, + ) +} +pub fn v1_transaction_arb() -> impl Strategy { + ( + any::(), + timestamp_arb(), + any::(), + pricing_mode_arb(), + secret_key_arb_no_system(), + runtime_args_arb(), + transaction_target_arb(), + transaction_entry_point_arb(), + transaction_scheduling_arb(), + ) + .prop_map( + |( + chain_name, + timestamp, + ttl, + pricing_mode, + secret_key, + args, + target, + entry_point, + scheduling, + )| { + let public_key = PublicKey::from(&secret_key); + let initiator_addr = InitiatorAddr::PublicKey(public_key); + let initiator_addr_with_secret = InitiatorAddrAndSecretKey::Both { + initiator_addr, + secret_key: &secret_key, + }; + let container = FieldsContainer::new( + TransactionArgs::Named(args), + target, + entry_point, + scheduling, + ); + TransactionV1::build( + chain_name, + timestamp, + TimeDiff::from_seconds(ttl), + pricing_mode, + container.to_map().unwrap(), + initiator_addr_with_secret, + ) + }, + ) +} + +pub fn transaction_arb() -> impl Strategy { + (v1_transaction_arb()).prop_map(Transaction::V1) +} + +pub fn legal_transaction_arb() -> impl Strategy { + (legal_v1_transaction_arb()).prop_map(Transaction::V1) +} +pub fn example_u32_arb() -> impl Strategy { + prop_oneof![Just(0), Just(1), Just(u32::MAX / 2), Just(u32::MAX)] } diff --git a/types/src/global_state.rs b/types/src/global_state.rs new file mode 100644 index 0000000000..7cee2a19c4 --- /dev/null +++ b/types/src/global_state.rs @@ -0,0 +1,6 @@ +//! Types for global state. +mod merkle_proof; +mod pointer; + +pub use merkle_proof::{TrieMerkleProof, TrieMerkleProofStep}; +pub use pointer::Pointer; diff --git a/types/src/global_state/merkle_proof.rs b/types/src/global_state/merkle_proof.rs new file mode 100644 index 0000000000..7f101e1253 --- /dev/null +++ b/types/src/global_state/merkle_proof.rs @@ -0,0 +1,231 @@ +use alloc::{collections::VecDeque, vec::Vec}; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, Bytes, FromBytes, ToBytes}; + +use super::pointer::Pointer; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +const TRIE_MERKLE_PROOF_STEP_NODE_ID: u8 = 0; +const TRIE_MERKLE_PROOF_STEP_EXTENSION_ID: u8 = 1; + +/// A component of a proof that an entry exists in the Merkle trie. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum TrieMerkleProofStep { + /// Corresponds to a trie node. + Node { + /// Hole index. + hole_index: u8, + /// Indexed pointers with hole. + indexed_pointers_with_hole: Vec<(u8, Pointer)>, + }, + /// Corresponds to a trie extension. + Extension { + /// Affix bytes. + affix: Bytes, + }, +} + +impl TrieMerkleProofStep { + /// Constructor for [`TrieMerkleProofStep::Node`] + pub fn node(hole_index: u8, indexed_pointers_with_hole: Vec<(u8, Pointer)>) -> Self { + Self::Node { + hole_index, + indexed_pointers_with_hole, + } + } + + /// Constructor for [`TrieMerkleProofStep::Extension`] + pub fn extension(affix: Vec) -> Self { + Self::Extension { + affix: affix.into(), + } + } + + /// Returns a random `TrieMerkleProofStep`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + match rng.gen_range(0..2) { + 0 => { + let hole_index = rng.gen(); + let indexed_pointers_with_hole = (0..rng.gen_range(0..10)) + .map(|_| (rng.gen(), Pointer::random(rng))) + .collect(); + Self::node(hole_index, indexed_pointers_with_hole) + } + 1 => { + let affix = (0..rng.gen_range(0..10)).map(|_| rng.gen()).collect(); + Self::extension(affix) + } + _ => unreachable!(), + } + } +} + +impl ToBytes for TrieMerkleProofStep { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret: Vec = bytesrepr::allocate_buffer(self)?; + match self { + TrieMerkleProofStep::Node { + hole_index, + indexed_pointers_with_hole, + } => { + ret.push(TRIE_MERKLE_PROOF_STEP_NODE_ID); + ret.push(*hole_index); + ret.append(&mut indexed_pointers_with_hole.to_bytes()?) + } + TrieMerkleProofStep::Extension { affix } => { + ret.push(TRIE_MERKLE_PROOF_STEP_EXTENSION_ID); + ret.append(&mut affix.to_bytes()?) + } + }; + Ok(ret) + } + + fn serialized_length(&self) -> usize { + size_of::() + + match self { + TrieMerkleProofStep::Node { + hole_index, + indexed_pointers_with_hole, + } => { + (*hole_index).serialized_length() + + (*indexed_pointers_with_hole).serialized_length() + } + TrieMerkleProofStep::Extension { affix } => affix.serialized_length(), + } + } +} + +impl FromBytes for TrieMerkleProofStep { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + TRIE_MERKLE_PROOF_STEP_NODE_ID => { + let (hole_index, rem): (u8, &[u8]) = FromBytes::from_bytes(rem)?; + let (indexed_pointers_with_hole, rem): (Vec<(u8, Pointer)>, &[u8]) = + FromBytes::from_bytes(rem)?; + Ok(( + TrieMerkleProofStep::Node { + hole_index, + indexed_pointers_with_hole, + }, + rem, + )) + } + TRIE_MERKLE_PROOF_STEP_EXTENSION_ID => { + let (affix, rem): (_, &[u8]) = FromBytes::from_bytes(rem)?; + Ok((TrieMerkleProofStep::Extension { affix }, rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +/// A proof that a node with a specified `key` and `value` is present in the Merkle trie. +/// Given a state hash `x`, one can validate a proof `p` by checking `x == p.compute_state_hash()`. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct TrieMerkleProof { + key: K, + value: V, + proof_steps: VecDeque, +} + +impl TrieMerkleProof { + /// Constructor for [`TrieMerkleProof`] + pub fn new(key: K, value: V, proof_steps: VecDeque) -> Self { + TrieMerkleProof { + key, + value, + proof_steps, + } + } + + /// Getter for the key in [`TrieMerkleProof`] + pub fn key(&self) -> &K { + &self.key + } + + /// Getter for the value in [`TrieMerkleProof`] + pub fn value(&self) -> &V { + &self.value + } + + /// Getter for the proof steps in [`TrieMerkleProof`] + pub fn proof_steps(&self) -> &VecDeque { + &self.proof_steps + } + + /// Transforms a [`TrieMerkleProof`] into the value it contains + pub fn into_value(self) -> V { + self.value + } +} + +impl ToBytes for TrieMerkleProof +where + K: ToBytes, + V: ToBytes, +{ + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret: Vec = bytesrepr::allocate_buffer(self)?; + ret.append(&mut self.key.to_bytes()?); + ret.append(&mut self.value.to_bytes()?); + ret.append(&mut self.proof_steps.to_bytes()?); + Ok(ret) + } + + fn serialized_length(&self) -> usize { + self.key.serialized_length() + + self.value.serialized_length() + + self.proof_steps.serialized_length() + } +} + +impl FromBytes for TrieMerkleProof +where + K: FromBytes, + V: FromBytes, +{ + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (key, rem): (K, &[u8]) = FromBytes::from_bytes(bytes)?; + let (value, rem): (V, &[u8]) = FromBytes::from_bytes(rem)?; + let (proof_steps, rem): (VecDeque, &[u8]) = + FromBytes::from_bytes(rem)?; + Ok(( + TrieMerkleProof { + key, + value, + proof_steps, + }, + rem, + )) + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn trie_merkle_proof_step_serialization_is_correct( + step in gens::trie_merkle_proof_step_arb() + ) { + bytesrepr::test_serialization_roundtrip(&step) + } + + #[test] + fn trie_merkle_proof_serialization_is_correct( + proof in gens::trie_merkle_proof_arb() + ) { + bytesrepr::test_serialization_roundtrip(&proof) + } + } +} diff --git a/types/src/global_state/pointer.rs b/types/src/global_state/pointer.rs new file mode 100644 index 0000000000..33c0b3d7f1 --- /dev/null +++ b/types/src/global_state/pointer.rs @@ -0,0 +1,104 @@ +use core::fmt::Debug; + +use alloc::vec::Vec; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, +}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// Represents a pointer to the next object in a Merkle Trie +#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum Pointer { + /// Leaf pointer. + LeafPointer(Digest), + /// Node pointer. + NodePointer(Digest), +} + +impl Pointer { + /// Borrows the inner hash from a `Pointer`. + pub fn hash(&self) -> &Digest { + match self { + Pointer::LeafPointer(hash) => hash, + Pointer::NodePointer(hash) => hash, + } + } + + /// Takes ownership of the hash, consuming this `Pointer`. + pub fn into_hash(self) -> Digest { + match self { + Pointer::LeafPointer(hash) => hash, + Pointer::NodePointer(hash) => hash, + } + } + + /// Creates a new owned `Pointer` with a new `Digest`. + pub fn update(&self, hash: Digest) -> Self { + match self { + Pointer::LeafPointer(_) => Pointer::LeafPointer(hash), + Pointer::NodePointer(_) => Pointer::NodePointer(hash), + } + } + + /// Returns the `tag` value for a variant of `Pointer`. + fn tag(&self) -> u8 { + match self { + Pointer::LeafPointer(_) => 0, + Pointer::NodePointer(_) => 1, + } + } + + /// Returns a random `Pointer`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + match rng.gen_range(0..2) { + 0 => Pointer::LeafPointer(Digest::random(rng)), + 1 => Pointer::NodePointer(Digest::random(rng)), + _ => unreachable!(), + } + } +} + +impl ToBytes for Pointer { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut ret = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut ret)?; + Ok(ret) + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + Digest::LENGTH + } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag()); + writer.extend_from_slice(self.hash().as_ref()); + Ok(()) + } +} + +impl FromBytes for Pointer { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, rem) = u8::from_bytes(bytes)?; + match tag { + 0 => { + let (hash, rem) = Digest::from_bytes(rem)?; + Ok((Pointer::LeafPointer(hash), rem)) + } + 1 => { + let (hash, rem) = Digest::from_bytes(rem)?; + Ok((Pointer::NodePointer(hash), rem)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} diff --git a/types/src/json_pretty_printer.rs b/types/src/json_pretty_printer.rs index 116dcfbc7e..3f59664f98 100644 --- a/types/src/json_pretty_printer.rs +++ b/types/src/json_pretty_printer.rs @@ -1,15 +1,38 @@ extern crate alloc; -use alloc::{format, string::String}; +use alloc::{format, string::String, vec::Vec}; use serde::Serialize; + use serde_json::{json, Value}; -const MAX_STRING_LEN: usize = 100; +const MAX_STRING_LEN: usize = 150; + +/// Represents the information about a substring found in a string. +#[derive(Debug)] +struct SubstringSpec { + /// Index of the first character. + start_index: usize, + /// Length of the substring. + length: usize, +} + +impl SubstringSpec { + /// Constructs a new StringSpec with the given start index and length. + fn new(start_index: usize, length: usize) -> Self { + Self { + start_index, + length, + } + } +} -/// Serialize the given data structure as a pretty-printed String of JSON using -/// `serde_json::to_string_pretty()`, but after first reducing any string values over -/// `MAX_STRING_LEN` to display the field's number of chars instead of the actual value. +/// Serializes the given data structure as a pretty-printed `String` of JSON using +/// `serde_json::to_string_pretty()`, but after first reducing any large hex-string values. +/// +/// A large hex-string is one containing only hex characters and which is over `MAX_STRING_LEN`. +/// Such hex-strings will be replaced by an indication of the number of chars redacted, for example +/// `[130 hex chars]`. pub fn json_pretty_print(value: &T) -> serde_json::Result where T: ?Sized + Serialize, @@ -20,12 +43,53 @@ where serde_json::to_string_pretty(&json_value) } +/// Searches the given string for all occurrences of hex substrings +/// that are longer than the specified `max_len`. +fn find_hex_strings_longer_than(string: &str, max_len: usize) -> Vec { + let mut ranges_to_remove = Vec::new(); + let mut start_index = 0; + let mut contiguous_hex_count = 0; + + // Record all large hex-strings' start positions and lengths. + for (index, char) in string.char_indices() { + if char.is_ascii_hexdigit() { + if contiguous_hex_count == 0 { + // This is the start of a new hex-string. + start_index = index; + } + contiguous_hex_count += 1; + } else if contiguous_hex_count != 0 { + // This is the end of a hex-string: if it's too long, record it. + if contiguous_hex_count > max_len { + ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count)); + } + contiguous_hex_count = 0; + } + } + // If the string contains a large hex-string at the end, record it now. + if contiguous_hex_count > max_len { + ranges_to_remove.push(SubstringSpec::new(start_index, contiguous_hex_count)); + } + ranges_to_remove +} + fn shorten_string_field(value: &mut Value) { match value { Value::String(string) => { - if string.len() > MAX_STRING_LEN { - *string = format!("{} chars", string.len()); - } + // Iterate over the ranges to remove from last to first so each + // replacement start index remains valid. + find_hex_strings_longer_than(string, MAX_STRING_LEN) + .into_iter() + .rev() + .for_each( + |SubstringSpec { + start_index, + length, + }| { + let range = start_index..(start_index + length); + string.replace_range(range, &format!("[{} hex chars]", length)); + }, + ) } Value::Array(values) => { for value in values { @@ -43,38 +107,93 @@ fn shorten_string_field(value: &mut Value) { #[cfg(test)] mod tests { - use core::iter::{self, FromIterator}; - use super::*; + fn hex_string(length: usize) -> String { + "0123456789abcdef".chars().cycle().take(length).collect() + } + + impl PartialEq<(usize, usize)> for SubstringSpec { + fn eq(&self, other: &(usize, usize)) -> bool { + self.start_index == other.0 && self.length == other.1 + } + } + + #[test] + fn finds_hex_strings_longer_than() { + const TESTING_LEN: usize = 3; + + let input = "01234"; + let expected = vec![(0, 5)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "01234-0123"; + let expected = vec![(0, 5), (6, 4)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "012-34-0123"; + let expected = vec![(7, 4)]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "012-34-01-23"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = "0"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + + let input = ""; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, TESTING_LEN); + assert_eq!(actual, expected); + } + + #[test] + fn respects_length() { + let input = "I like beef"; + let expected = vec![(7, 4)]; + let actual = find_hex_strings_longer_than(input, 3); + assert_eq!(actual, expected); + + let input = "I like beef"; + let expected: Vec<(usize, usize)> = vec![]; + let actual = find_hex_strings_longer_than(input, 1000); + assert_eq!(actual, expected); + } + #[test] fn should_shorten_long_strings() { - let mut long_strings = vec![]; + let max_unshortened_hex_string = hex_string(MAX_STRING_LEN); + let long_hex_string = hex_string(MAX_STRING_LEN + 1); + let long_non_hex_string: String = "g".repeat(MAX_STRING_LEN + 1); + let long_hex_substring = format!("a-{}-b", hex_string(MAX_STRING_LEN + 1)); + let multiple_long_hex_substrings = + format!("a: {0}, b: {0}, c: {0}", hex_string(MAX_STRING_LEN + 1)); + + let mut long_strings: Vec = vec![]; for i in 1..=5 { - long_strings.push(String::from_iter( - iter::repeat('a').take(MAX_STRING_LEN + i), - )); + long_strings.push("a".repeat(MAX_STRING_LEN + i)); } let value = json!({ "field_1": Option::::None, "field_2": true, "field_3": 123, - "field_4": long_strings[0], - "field_5": [ - "short string value", - long_strings[1] - ], + "field_4": max_unshortened_hex_string, + "field_5": ["short string value", long_hex_string], "field_6": { "f1": Option::::None, "f2": false, "f3": -123, - "f4": long_strings[2], - "f5": [ - "short string value", - long_strings[3] - ], + "f4": long_non_hex_string, + "f5": ["short string value", long_hex_substring], "f6": { - "final long string": long_strings[4] + "final long string": multiple_long_hex_substrings } } }); @@ -83,33 +202,37 @@ mod tests { "field_1": null, "field_2": true, "field_3": 123, - "field_4": "101 chars", + "field_4": "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef012345", "field_5": [ "short string value", - "102 chars" + "[151 hex chars]" ], "field_6": { "f1": null, "f2": false, "f3": -123, - "f4": "103 chars", + "f4": "ggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg", "f5": [ "short string value", - "104 chars" + "a-[151 hex chars]-b" ], "f6": { - "final long string": "105 chars" + "final long string": "a: [151 hex chars], b: [151 hex chars], c: [151 hex chars]" } } }"#; let output = json_pretty_print(&value).unwrap(); - assert_eq!(output, expected); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); } #[test] fn should_not_modify_short_strings() { - let max_string = String::from_iter(iter::repeat('a').take(MAX_STRING_LEN)); + let max_string: String = "a".repeat(MAX_STRING_LEN); let value = json!({ "field_1": Option::::None, "field_2": true, @@ -136,6 +259,34 @@ mod tests { let expected = serde_json::to_string_pretty(&value).unwrap(); let output = json_pretty_print(&value).unwrap(); - assert_eq!(output, expected); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); + } + + #[test] + /// Ref: https://github.com/casper-network/casper-node/issues/1456 + fn regression_1456() { + let long_string = r#"state query failed: ValueNotFound("Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)")"#; + assert_eq!(long_string.len(), 148); + + let value = json!({ + "code": -32003, + "message": long_string, + }); + + let expected = r#"{ + "code": -32003, + "message": "state query failed: ValueNotFound(\"Failed to find base key at path: Key::Account(72698d4dc715a28347b15920b09b4f0f1d633be5a33f4686d06992415b0825e2)\")" +}"#; + + let output = json_pretty_print(&value).unwrap(); + assert_eq!( + output, expected, + "Actual:\n{}\nExpected:\n{}\n", + output, expected + ); } } diff --git a/types/src/key.rs b/types/src/key.rs index 347740f58b..90ee1f9a0f 100644 --- a/types/src/key.rs +++ b/types/src/key.rs @@ -1,37 +1,84 @@ -use alloc::{format, string::String, vec::Vec}; +//! Key types. + +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; + use core::{ - array::TryFromSliceError, convert::TryFrom, fmt::{self, Debug, Display, Formatter}, - num::ParseIntError, str::FromStr, }; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +#[cfg(doc)] +use crate::CLValue; +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +#[cfg(feature = "datasize")] use datasize::DataSize; -use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] use rand::{ distributions::{Distribution, Standard}, Rng, }; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use tracing::{error, warn}; use crate::{ - account::{self, AccountHash, AccountHashBytes, TryFromSliceForAccountHashError}, - bytesrepr::{self, Error, FromBytes, ToBytes}, + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + addressable_entity::{ + self, AddressableEntityHash, EntityAddr, EntityKindTag, EntryPointAddr, NamedKeyAddr, + }, + block::BlockGlobalAddr, + byte_code, + bytesrepr::{ + self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH, U64_SERIALIZED_LENGTH, + U8_SERIALIZED_LENGTH, + }, + checksummed_hex, + contract_messages::{self, MessageAddr, TopicNameHash, TOPIC_NAME_HASH_LENGTH}, contract_wasm::ContractWasmHash, contracts::{ContractHash, ContractPackageHash}, + package::PackageHash, + system::{ + auction::{BidAddr, BidAddrTag}, + mint::BalanceHoldAddr, + }, uref::{self, URef, URefAddr, UREF_SERIALIZED_LENGTH}, - DeployHash, EraId, Tagged, TransferAddr, DEPLOY_HASH_LENGTH, TRANSFER_ADDR_LENGTH, - UREF_ADDR_LENGTH, + ByteCodeAddr, DeployHash, Digest, EraId, Tagged, TransferAddr, TransferFromStrError, + TRANSFER_ADDR_LENGTH, UREF_ADDR_LENGTH, }; const HASH_PREFIX: &str = "hash-"; const DEPLOY_INFO_PREFIX: &str = "deploy-"; +const TRANSFER_PREFIX: &str = "transfer-"; const ERA_INFO_PREFIX: &str = "era-"; const BALANCE_PREFIX: &str = "balance-"; +const BALANCE_HOLD_PREFIX: &str = "balance-hold-"; const BID_PREFIX: &str = "bid-"; const WITHDRAW_PREFIX: &str = "withdraw-"; -const VALIDATORS_PREFIX: &str = "validator-era-"; +const DICTIONARY_PREFIX: &str = "dictionary-"; +const UNBOND_PREFIX: &str = "unbond-"; +const SYSTEM_ENTITY_REGISTRY_PREFIX: &str = "system-entity-registry-"; +const ERA_SUMMARY_PREFIX: &str = "era-summary-"; +const CHAINSPEC_REGISTRY_PREFIX: &str = "chainspec-registry-"; +const CHECKSUM_REGISTRY_PREFIX: &str = "checksum-registry-"; +const BID_ADDR_PREFIX: &str = "bid-addr-"; +const PACKAGE_PREFIX: &str = "package-"; +const BLOCK_GLOBAL_TIME_PREFIX: &str = "block-time-"; +const BLOCK_GLOBAL_MESSAGE_COUNT_PREFIX: &str = "block-message-count-"; +const BLOCK_GLOBAL_PROTOCOL_VERSION_PREFIX: &str = "block-protocol-version-"; +const BLOCK_GLOBAL_ADDRESSABLE_ENTITY_PREFIX: &str = "block-addressable-entity-"; +const STATE_PREFIX: &str = "state-"; /// The number of bytes in a Blake2b hash pub const BLAKE2B_DIGEST_LENGTH: usize = 32; @@ -40,30 +87,52 @@ pub const KEY_HASH_LENGTH: usize = 32; /// The number of bytes in a [`Key::Transfer`]. pub const KEY_TRANSFER_LENGTH: usize = TRANSFER_ADDR_LENGTH; /// The number of bytes in a [`Key::DeployInfo`]. -pub const KEY_DEPLOY_INFO_LENGTH: usize = DEPLOY_HASH_LENGTH; - +pub const KEY_DEPLOY_INFO_LENGTH: usize = DeployHash::LENGTH; +/// The number of bytes in a [`Key::Dictionary`]. +pub const KEY_DICTIONARY_LENGTH: usize = 32; +/// The maximum length for a `dictionary_item_key`. +pub const DICTIONARY_ITEM_KEY_MAX_LENGTH: usize = 128; +/// The maximum length for an `Addr`. +pub const ADDR_LENGTH: usize = 32; +const PADDING_BYTES: [u8; 32] = [0u8; 32]; +const BLOCK_GLOBAL_PADDING_BYTES: [u8; 31] = [0u8; 31]; const KEY_ID_SERIALIZED_LENGTH: usize = 1; // u8 used to determine the ID const KEY_HASH_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; const KEY_UREF_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_SERIALIZED_LENGTH; const KEY_TRANSFER_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_TRANSFER_LENGTH; const KEY_DEPLOY_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DEPLOY_INFO_LENGTH; -const KEY_ERA_INFO_SERIALIZED_LENGTH: usize = - KEY_ID_SERIALIZED_LENGTH + PaddedEraId::SERIALIZED_LENGTH; +const KEY_ERA_INFO_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + U64_SERIALIZED_LENGTH; const KEY_BALANCE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + UREF_ADDR_LENGTH; const KEY_BID_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; const KEY_WITHDRAW_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; -const KEY_ERA_VALIDATORS_SERIALIZED_LENGTH: usize = - KEY_ID_SERIALIZED_LENGTH + PaddedEraId::SERIALIZED_LENGTH; +const KEY_UNBOND_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_HASH_LENGTH; +const KEY_DICTIONARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + KEY_DICTIONARY_LENGTH; +const KEY_SYSTEM_ENTITY_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_ERA_SUMMARY_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH: usize = + KEY_ID_SERIALIZED_LENGTH + PADDING_BYTES.len(); +const KEY_PACKAGE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + 32; +const KEY_MESSAGE_SERIALIZED_LENGTH: usize = KEY_ID_SERIALIZED_LENGTH + + U8_SERIALIZED_LENGTH + + KEY_HASH_LENGTH + + TOPIC_NAME_HASH_LENGTH + + U8_SERIALIZED_LENGTH + + U32_SERIALIZED_LENGTH; + +const MAX_SERIALIZED_LENGTH: usize = KEY_MESSAGE_SERIALIZED_LENGTH; /// An alias for [`Key`]s hash variant. pub type HashAddr = [u8; KEY_HASH_LENGTH]; -impl From for Key { - fn from(addr: HashAddr) -> Self { - Key::Hash(addr) - } -} +/// An alias for [`Key`]s package variant. +pub type PackageAddr = [u8; ADDR_LENGTH]; + +/// An alias for [`Key`]s dictionary variant. +pub type DictionaryAddr = [u8; KEY_DICTIONARY_LENGTH]; #[allow(missing_docs)] #[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] @@ -78,13 +147,148 @@ pub enum KeyTag { Balance = 6, Bid = 7, Withdraw = 8, - EraValidators = 9, + Dictionary = 9, + SystemEntityRegistry = 10, + EraSummary = 11, + Unbond = 12, + ChainspecRegistry = 13, + ChecksumRegistry = 14, + BidAddr = 15, + Package = 16, + AddressableEntity = 17, + ByteCode = 18, + Message = 19, + NamedKey = 20, + BlockGlobal = 21, + BalanceHold = 22, + EntryPoint = 23, + State = 24, +} + +impl KeyTag { + /// Returns a random `KeyTag`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..=23) { + 0 => KeyTag::Account, + 1 => KeyTag::Hash, + 2 => KeyTag::URef, + 3 => KeyTag::Transfer, + 4 => KeyTag::DeployInfo, + 5 => KeyTag::EraInfo, + 6 => KeyTag::Balance, + 7 => KeyTag::Bid, + 8 => KeyTag::Withdraw, + 9 => KeyTag::Dictionary, + 10 => KeyTag::SystemEntityRegistry, + 11 => KeyTag::EraSummary, + 12 => KeyTag::Unbond, + 13 => KeyTag::ChainspecRegistry, + 14 => KeyTag::ChecksumRegistry, + 15 => KeyTag::BidAddr, + 16 => KeyTag::Package, + 17 => KeyTag::AddressableEntity, + 18 => KeyTag::ByteCode, + 19 => KeyTag::Message, + 20 => KeyTag::NamedKey, + 21 => KeyTag::BlockGlobal, + 22 => KeyTag::BalanceHold, + 23 => KeyTag::EntryPoint, + 24 => KeyTag::State, + _ => panic!(), + } + } +} + +impl Display for KeyTag { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + KeyTag::Account => write!(f, "Account"), + KeyTag::Hash => write!(f, "Hash"), + KeyTag::URef => write!(f, "URef"), + KeyTag::Transfer => write!(f, "Transfer"), + KeyTag::DeployInfo => write!(f, "DeployInfo"), + KeyTag::EraInfo => write!(f, "EraInfo"), + KeyTag::Balance => write!(f, "Balance"), + KeyTag::Bid => write!(f, "Bid"), + KeyTag::Withdraw => write!(f, "Withdraw"), + KeyTag::Dictionary => write!(f, "Dictionary"), + KeyTag::SystemEntityRegistry => write!(f, "SystemEntityRegistry"), + KeyTag::EraSummary => write!(f, "EraSummary"), + KeyTag::Unbond => write!(f, "Unbond"), + KeyTag::ChainspecRegistry => write!(f, "ChainspecRegistry"), + KeyTag::ChecksumRegistry => write!(f, "ChecksumRegistry"), + KeyTag::BidAddr => write!(f, "BidAddr"), + KeyTag::Package => write!(f, "Package"), + KeyTag::AddressableEntity => write!(f, "AddressableEntity"), + KeyTag::ByteCode => write!(f, "ByteCode"), + KeyTag::Message => write!(f, "Message"), + KeyTag::NamedKey => write!(f, "NamedKey"), + KeyTag::BlockGlobal => write!(f, "BlockGlobal"), + KeyTag::BalanceHold => write!(f, "BalanceHold"), + KeyTag::State => write!(f, "State"), + KeyTag::EntryPoint => write!(f, "EntryPoint"), + } + } } -/// The type under which data (e.g. [`CLValue`](crate::CLValue)s, smart contracts, user accounts) -/// are indexed on the network. +impl ToBytes for KeyTag { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::unchecked_allocate_buffer(self); + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + KEY_ID_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for KeyTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (id, rem) = u8::from_bytes(bytes)?; + let tag = match id { + tag if tag == KeyTag::Account as u8 => KeyTag::Account, + tag if tag == KeyTag::Hash as u8 => KeyTag::Hash, + tag if tag == KeyTag::URef as u8 => KeyTag::URef, + tag if tag == KeyTag::Transfer as u8 => KeyTag::Transfer, + tag if tag == KeyTag::DeployInfo as u8 => KeyTag::DeployInfo, + tag if tag == KeyTag::EraInfo as u8 => KeyTag::EraInfo, + tag if tag == KeyTag::Balance as u8 => KeyTag::Balance, + tag if tag == KeyTag::Bid as u8 => KeyTag::Bid, + tag if tag == KeyTag::Withdraw as u8 => KeyTag::Withdraw, + tag if tag == KeyTag::Dictionary as u8 => KeyTag::Dictionary, + tag if tag == KeyTag::SystemEntityRegistry as u8 => KeyTag::SystemEntityRegistry, + tag if tag == KeyTag::EraSummary as u8 => KeyTag::EraSummary, + tag if tag == KeyTag::Unbond as u8 => KeyTag::Unbond, + tag if tag == KeyTag::ChainspecRegistry as u8 => KeyTag::ChainspecRegistry, + tag if tag == KeyTag::ChecksumRegistry as u8 => KeyTag::ChecksumRegistry, + tag if tag == KeyTag::BidAddr as u8 => KeyTag::BidAddr, + tag if tag == KeyTag::Package as u8 => KeyTag::Package, + tag if tag == KeyTag::AddressableEntity as u8 => KeyTag::AddressableEntity, + tag if tag == KeyTag::ByteCode as u8 => KeyTag::ByteCode, + tag if tag == KeyTag::Message as u8 => KeyTag::Message, + tag if tag == KeyTag::NamedKey as u8 => KeyTag::NamedKey, + tag if tag == KeyTag::BlockGlobal as u8 => KeyTag::BlockGlobal, + tag if tag == KeyTag::BalanceHold as u8 => KeyTag::BalanceHold, + tag if tag == KeyTag::EntryPoint as u8 => KeyTag::EntryPoint, + tag if tag == KeyTag::State as u8 => KeyTag::State, + _ => return Err(Error::Formatting), + }; + Ok((tag, rem)) + } +} + +/// The key under which data (e.g. [`CLValue`]s, smart contracts, user accounts) are stored in +/// global state. #[repr(C)] -#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash, DataSize)] +#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub enum Key { /// A `Key` under which a user account is stored. Account(AccountHash), @@ -93,54 +297,132 @@ pub enum Key { Hash(HashAddr), /// A `Key` which is a [`URef`], under which most types of data can be stored. URef(URef), - /// A `Key` under which we store a transfer. + /// A `Key` under which a transfer is stored. Transfer(TransferAddr), - /// A `Key` under which we store a deploy info. + /// A `Key` under which a deploy info is stored. DeployInfo(DeployHash), - /// A `Key` under which we store an era info. + /// A `Key` under which an era info is stored. EraInfo(EraId), - /// A `Key` under which we store a purse balance. + /// A `Key` under which a purse balance is stored. Balance(URefAddr), - /// A `Key` under which we store bid information + /// A `Key` under which bid information is stored. Bid(AccountHash), - /// A `Key` under which we store unbond information. + /// A `Key` under which withdraw information is stored. Withdraw(AccountHash), - /// A `Key` under which we store validator information. - EraValidators(EraId), -} - -#[derive(Debug)] -pub enum FromStrError { - InvalidPrefix, - Hex(base16::DecodeError), - Account(TryFromSliceForAccountHashError), - Hash(TryFromSliceError), - AccountHash(account::FromStrError), - URef(uref::FromStrError), - EraId(ParseIntError), + /// A `Key` whose value is derived by hashing a [`URef`] address and arbitrary data, under + /// which a dictionary is stored. + Dictionary(DictionaryAddr), + /// A `Key` under which system entity hashes are stored. + SystemEntityRegistry, + /// A `Key` under which current era info is stored. + EraSummary, + /// A `Key` under which unbond information is stored. + Unbond(AccountHash), + /// A `Key` under which chainspec and other hashes are stored. + ChainspecRegistry, + /// A `Key` under which a registry of checksums is stored. + ChecksumRegistry, + /// A `Key` under which bid information is stored. + BidAddr(BidAddr), + /// A `Key` under which package information is stored. + SmartContract(PackageAddr), + /// A `Key` under which an addressable entity is stored. + AddressableEntity(EntityAddr), + /// A `Key` under which a byte code record is stored. + ByteCode(ByteCodeAddr), + /// A `Key` under which a message is stored. + Message(MessageAddr), + /// A `Key` under which a single named key entry is stored. + NamedKey(NamedKeyAddr), + /// A `Key` under which per-block details are stored to global state. + BlockGlobal(BlockGlobalAddr), + /// A `Key` under which a hold on a purse balance is stored. + BalanceHold(BalanceHoldAddr), + /// A `Key` under which a entrypoint record is written. + EntryPoint(EntryPointAddr), + /// A `Key` under which a contract's state lives. + State(EntityAddr), } -impl From for FromStrError { - fn from(error: base16::DecodeError) -> Self { - FromStrError::Hex(error) +#[cfg(feature = "json-schema")] +impl JsonSchema for Key { + fn schema_name() -> String { + String::from("Key") } -} -impl From for FromStrError { - fn from(error: TryFromSliceForAccountHashError) -> Self { - FromStrError::Account(error) + fn json_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + let schema = gen.subschema_for::(); + let mut schema_object = schema.into_object(); + schema_object.metadata().description = Some( + "The key as a formatted string, under which data (e.g. `CLValue`s, smart contracts, \ + user accounts) are stored in global state." + .to_string(), + ); + schema_object.into() } } -impl From for FromStrError { - fn from(error: TryFromSliceError) -> Self { - FromStrError::Hash(error) - } +/// Errors produced when converting a `String` into a `Key`. +#[derive(Debug)] +#[non_exhaustive] +pub enum FromStrError { + /// Account parse error. + Account(addressable_entity::FromStrError), + /// Hash parse error. + Hash(String), + /// URef parse error. + URef(uref::FromStrError), + /// Transfer parse error. + Transfer(TransferFromStrError), + /// DeployInfo parse error. + DeployInfo(String), + /// EraInfo parse error. + EraInfo(String), + /// Balance parse error. + Balance(String), + /// Bid parse error. + Bid(String), + /// Withdraw parse error. + Withdraw(String), + /// Dictionary parse error. + Dictionary(String), + /// System entity registry parse error. + SystemEntityRegistry(String), + /// Era summary parse error. + EraSummary(String), + /// Unbond parse error. + Unbond(String), + /// Chainspec registry error. + ChainspecRegistry(String), + /// Checksum registry error. + ChecksumRegistry(String), + /// Bid parse error. + BidAddr(String), + /// Package parse error. + Package(String), + /// Entity parse error. + AddressableEntity(String), + /// Byte code parse error. + ByteCode(String), + /// Message parse error. + Message(contract_messages::FromStrError), + /// Named key parse error. + NamedKey(String), + /// BlockGlobal key parse error. + BlockGlobal(String), + /// Balance hold parse error. + BalanceHold(String), + /// Entry point parse error. + EntryPoint(String), + /// State key parse error. + State(String), + /// Unknown prefix. + UnknownPrefix, } -impl From for FromStrError { - fn from(error: account::FromStrError) -> Self { - FromStrError::AccountHash(error) +impl From for FromStrError { + fn from(error: addressable_entity::FromStrError) -> Self { + FromStrError::Account(error) } } @@ -150,24 +432,75 @@ impl From for FromStrError { } } -impl From for FromStrError { - fn from(error: ParseIntError) -> Self { - FromStrError::EraId(error) +impl From for FromStrError { + fn from(error: contract_messages::FromStrError) -> Self { + FromStrError::Message(error) } } impl Display for FromStrError { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { - FromStrError::InvalidPrefix => write!(f, "invalid prefix"), - FromStrError::Hex(error) => write!(f, "decode from hex: {}", error), - FromStrError::Account(error) => write!(f, "account from string error: {:?}", error), - FromStrError::Hash(error) => write!(f, "hash from string error: {}", error), - FromStrError::AccountHash(error) => { - write!(f, "account hash from string error: {:?}", error) + FromStrError::Account(error) => write!(f, "account-key from string error: {}", error), + FromStrError::Hash(error) => write!(f, "hash-key from string error: {}", error), + FromStrError::URef(error) => write!(f, "uref-key from string error: {}", error), + FromStrError::Transfer(error) => { + write!(f, "legacy-transfer-key from string error: {}", error) + } + FromStrError::DeployInfo(error) => { + write!(f, "deploy-info-key from string error: {}", error) + } + FromStrError::EraInfo(error) => write!(f, "era-info-key from string error: {}", error), + FromStrError::Balance(error) => write!(f, "balance-key from string error: {}", error), + FromStrError::Bid(error) => write!(f, "bid-key from string error: {}", error), + FromStrError::Withdraw(error) => write!(f, "withdraw-key from string error: {}", error), + FromStrError::Dictionary(error) => { + write!(f, "dictionary-key from string error: {}", error) + } + FromStrError::SystemEntityRegistry(error) => { + write!( + f, + "system-contract-registry-key from string error: {}", + error + ) + } + FromStrError::EraSummary(error) => { + write!(f, "era-summary-key from string error: {}", error) + } + FromStrError::Unbond(error) => { + write!(f, "unbond-key from string error: {}", error) + } + FromStrError::ChainspecRegistry(error) => { + write!(f, "chainspec-registry-key from string error: {}", error) + } + FromStrError::ChecksumRegistry(error) => { + write!(f, "checksum-registry-key from string error: {}", error) + } + FromStrError::BidAddr(error) => write!(f, "bid-addr-key from string error: {}", error), + FromStrError::Package(error) => write!(f, "package-key from string error: {}", error), + FromStrError::AddressableEntity(error) => { + write!(f, "addressable-entity-key from string error: {}", error) + } + FromStrError::ByteCode(error) => { + write!(f, "byte-code-key from string error: {}", error) + } + FromStrError::Message(error) => { + write!(f, "message-key from string error: {}", error) } - FromStrError::URef(error) => write!(f, "uref from string error: {:?}", error), - FromStrError::EraId(error) => write!(f, "era id from string error: {}", error), + FromStrError::NamedKey(error) => { + write!(f, "named-key from string error: {}", error) + } + FromStrError::BlockGlobal(error) => { + write!(f, "block-message-count-key form string error: {}", error) + } + FromStrError::BalanceHold(error) => { + write!(f, "balance-hold from string error: {}", error) + } + FromStrError::EntryPoint(error) => { + write!(f, "entry-point from string error: {}", error) + } + FromStrError::UnknownPrefix => write!(f, "unknown prefix for key"), + FromStrError::State(error) => write!(f, "state-key from string error: {}", error), } } } @@ -186,18 +519,34 @@ impl Key { Key::Balance(_) => String::from("Key::Balance"), Key::Bid(_) => String::from("Key::Bid"), Key::Withdraw(_) => String::from("Key::Unbond"), - Key::EraValidators(_) => String::from("Key::EraValidators"), + Key::Dictionary(_) => String::from("Key::Dictionary"), + Key::SystemEntityRegistry => String::from("Key::SystemEntityRegistry"), + Key::EraSummary => String::from("Key::EraSummary"), + Key::Unbond(_) => String::from("Key::Unbond"), + Key::ChainspecRegistry => String::from("Key::ChainspecRegistry"), + Key::ChecksumRegistry => String::from("Key::ChecksumRegistry"), + Key::BidAddr(_) => String::from("Key::BidAddr"), + Key::SmartContract(_) => String::from("Key::SmartContract"), + Key::AddressableEntity(_) => String::from("Key::AddressableEntity"), + Key::ByteCode(_) => String::from("Key::ByteCode"), + Key::Message(_) => String::from("Key::Message"), + Key::NamedKey(_) => String::from("Key::NamedKey"), + Key::BlockGlobal(_) => String::from("Key::BlockGlobal"), + Key::BalanceHold(_) => String::from("Key::BalanceHold"), + Key::EntryPoint(_) => String::from("Key::EntryPoint"), + Key::State(_) => String::from("Key::State"), } } /// Returns the maximum size a [`Key`] can be serialized into. pub const fn max_serialized_length() -> usize { - KEY_UREF_SERIALIZED_LENGTH + MAX_SERIALIZED_LENGTH } /// If `self` is of type [`Key::URef`], returns `self` with the /// [`AccessRights`](crate::AccessRights) stripped from the wrapped [`URef`], otherwise /// returns `self` unmodified. + #[must_use] pub fn normalize(self) -> Key { match self { Key::URef(uref) => Key::URef(uref.remove_access_rights()), @@ -206,17 +555,23 @@ impl Key { } /// Returns a human-readable version of `self`, with the inner bytes encoded to Base16. - pub fn to_formatted_string(&self) -> String { + pub fn to_formatted_string(self) -> String { match self { Key::Account(account_hash) => account_hash.to_formatted_string(), - Key::Hash(addr) => format!("{}{}", HASH_PREFIX, base16::encode_lower(addr)), + Key::Hash(addr) => format!("{}{}", HASH_PREFIX, base16::encode_lower(&addr)), Key::URef(uref) => uref.to_formatted_string(), - Key::Transfer(transfer_addr) => transfer_addr.to_formatted_string(), - Key::DeployInfo(addr) => { + Key::Transfer(transfer_v1_addr) => { + format!( + "{}{}", + TRANSFER_PREFIX, + base16::encode_lower(&transfer_v1_addr.value()) + ) + } + Key::DeployInfo(deploy_hash) => { format!( "{}{}", DEPLOY_INFO_PREFIX, - base16::encode_lower(addr.as_bytes()) + base16::encode_lower(deploy_hash.as_ref()) ) } Key::EraInfo(era_id) => { @@ -231,47 +586,402 @@ impl Key { Key::Withdraw(account_hash) => { format!("{}{}", WITHDRAW_PREFIX, base16::encode_lower(&account_hash)) } - Key::EraValidators(era_id) => { - format!("{}{}", VALIDATORS_PREFIX, era_id.value()) + Key::Dictionary(dictionary_addr) => { + format!( + "{}{}", + DICTIONARY_PREFIX, + base16::encode_lower(&dictionary_addr) + ) + } + Key::SystemEntityRegistry => { + format!( + "{}{}", + SYSTEM_ENTITY_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::EraSummary => { + format!( + "{}{}", + ERA_SUMMARY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::Unbond(account_hash) => { + format!("{}{}", UNBOND_PREFIX, base16::encode_lower(&account_hash)) + } + Key::ChainspecRegistry => { + format!( + "{}{}", + CHAINSPEC_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::ChecksumRegistry => { + format!( + "{}{}", + CHECKSUM_REGISTRY_PREFIX, + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::BidAddr(bid_addr) => { + format!("{}{}", BID_ADDR_PREFIX, bid_addr) + } + Key::Message(message_addr) => message_addr.to_formatted_string(), + Key::SmartContract(package_addr) => { + format!("{}{}", PACKAGE_PREFIX, base16::encode_lower(&package_addr)) + } + Key::AddressableEntity(entity_addr) => { + format!("{}", entity_addr) + } + Key::ByteCode(byte_code_addr) => { + format!("{}", byte_code_addr) + } + Key::NamedKey(named_key) => { + format!("{}", named_key) + } + Key::BlockGlobal(addr) => { + let prefix = match addr { + BlockGlobalAddr::BlockTime => BLOCK_GLOBAL_TIME_PREFIX, + BlockGlobalAddr::MessageCount => BLOCK_GLOBAL_MESSAGE_COUNT_PREFIX, + BlockGlobalAddr::ProtocolVersion => BLOCK_GLOBAL_PROTOCOL_VERSION_PREFIX, + BlockGlobalAddr::AddressableEntity => BLOCK_GLOBAL_ADDRESSABLE_ENTITY_PREFIX, + }; + format!( + "{}{}", + prefix, + base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES) + ) + } + Key::BalanceHold(balance_hold_addr) => { + let tail = BalanceHoldAddr::to_formatted_string(&balance_hold_addr); + format!("{}{}", BALANCE_HOLD_PREFIX, tail) + } + Key::State(entity_addr) => { + format!("{}{}", STATE_PREFIX, entity_addr) + } + Key::EntryPoint(entry_point_addr) => { + format!("{}", entry_point_addr) } } } /// Parses a string formatted as per `Self::to_formatted_string()` into a `Key`. pub fn from_formatted_str(input: &str) -> Result { - if let Ok(account_hash) = AccountHash::from_formatted_str(input) { - Ok(Key::Account(account_hash)) - } else if let Some(hex) = input.strip_prefix(HASH_PREFIX) { - Ok(Key::Hash(HashAddr::try_from( - base16::decode(hex)?.as_ref(), - )?)) - } else if let Some(hex) = input.strip_prefix(DEPLOY_INFO_PREFIX) { - Ok(Key::DeployInfo(DeployHash::new( - <[u8; DEPLOY_HASH_LENGTH]>::try_from(base16::decode(hex)?.as_ref())?, - ))) - } else if let Ok(transfer_addr) = TransferAddr::from_formatted_str(input) { - Ok(Key::Transfer(transfer_addr)) - } else if let Ok(uref) = URef::from_formatted_str(input) { - Ok(Key::URef(uref)) - } else if let Some(era_id_str) = input.strip_prefix(ERA_INFO_PREFIX) { - Ok(Key::EraInfo(EraId::from_str(era_id_str)?)) - } else if let Some(hex) = input.strip_prefix(BALANCE_PREFIX) { - Ok(Key::Balance(URefAddr::try_from( - base16::decode(hex)?.as_ref(), - )?)) - } else if let Some(hex) = input.strip_prefix(BID_PREFIX) { - Ok(Key::Bid(AccountHash::new(AccountHashBytes::try_from( - base16::decode(hex)?.as_ref(), - )?))) - } else if let Some(hex) = input.strip_prefix(WITHDRAW_PREFIX) { - Ok(Key::Withdraw(AccountHash::new(AccountHashBytes::try_from( - base16::decode(hex)?.as_ref(), - )?))) - } else if let Some(era_id_str) = input.strip_prefix(VALIDATORS_PREFIX) { - Ok(Key::EraValidators(EraId::from_str(era_id_str)?)) - } else { - Err(FromStrError::InvalidPrefix) + match AccountHash::from_formatted_str(input) { + Ok(account_hash) => return Ok(Key::Account(account_hash)), + Err(addressable_entity::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + if let Some(hex) = input.strip_prefix(HASH_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Hash(error.to_string()))?; + let hash_addr = HashAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Hash(error.to_string()))?; + return Ok(Key::Hash(hash_addr)); + } + + if let Some(hex) = input.strip_prefix(DEPLOY_INFO_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::DeployInfo(error.to_string()))?; + let hash_array = <[u8; DeployHash::LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::DeployInfo(error.to_string()))?; + return Ok(Key::DeployInfo(DeployHash::new(Digest::from(hash_array)))); + } + + if let Some(hex) = input.strip_prefix(TRANSFER_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Transfer(TransferFromStrError::from(error)))?; + let addr_array = <[u8; TRANSFER_ADDR_LENGTH]>::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Transfer(TransferFromStrError::from(error)))?; + return Ok(Key::Transfer(TransferAddr::new(addr_array))); + } + + match URef::from_formatted_str(input) { + Ok(uref) => return Ok(Key::URef(uref)), + Err(uref::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + if let Some(era_summary_padding) = input.strip_prefix(ERA_SUMMARY_PREFIX) { + let padded_bytes = checksummed_hex::decode(era_summary_padding) + .map_err(|error| FromStrError::EraSummary(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::EraSummary("Failed to deserialize era summary key".to_string()) + })?; + return Ok(Key::EraSummary); + } + + if let Some(era_id_str) = input.strip_prefix(ERA_INFO_PREFIX) { + let era_id = EraId::from_str(era_id_str) + .map_err(|error| FromStrError::EraInfo(error.to_string()))?; + return Ok(Key::EraInfo(era_id)); + } + + // note: BALANCE_HOLD must come before BALANCE due to overlapping head (balance-) + if let Some(hex) = input.strip_prefix(BALANCE_HOLD_PREFIX) { + let balance_hold_addr = BalanceHoldAddr::from_formatted_string(hex)?; + return Ok(Key::BalanceHold(balance_hold_addr)); + } + + if let Some(hex) = input.strip_prefix(BALANCE_PREFIX) { + let addr = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Balance(error.to_string()))?; + let uref_addr = URefAddr::try_from(addr.as_ref()) + .map_err(|error| FromStrError::Balance(error.to_string()))?; + return Ok(Key::Balance(uref_addr)); + } + + // note: BID_ADDR must come before BID as their heads overlap (bid- / bid-addr-) + if let Some(hex) = input.strip_prefix(BID_ADDR_PREFIX) { + let bytes = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::BidAddr(error.to_string()))?; + if bytes.is_empty() { + return Err(FromStrError::BidAddr( + "bytes should not be 0 len".to_string(), + )); + } + let tag_bytes = <[u8; BidAddrTag::BID_ADDR_TAG_LENGTH]>::try_from(bytes[0..1].as_ref()) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + let tag = BidAddrTag::try_from_u8(tag_bytes[0]) + .ok_or_else(|| FromStrError::BidAddr("failed to parse bid addr tag".to_string()))?; + let validator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from( + bytes[1..BidAddr::VALIDATOR_BID_ADDR_LENGTH].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + + let bid_addr = match tag { + BidAddrTag::Unified => BidAddr::legacy(validator_bytes), + BidAddrTag::Validator => BidAddr::new_validator_addr(validator_bytes), + BidAddrTag::ValidatorRev => BidAddr::new_validator_rev_addr(validator_bytes), + BidAddrTag::DelegatedAccount => { + let delegator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::new_delegator_account_addr((validator_bytes, delegator_bytes)) + } + BidAddrTag::DelegatedPurse => { + let uref = <[u8; UREF_ADDR_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::DelegatedPurse { + validator: AccountHash::new(validator_bytes), + delegator: uref, + } + } + BidAddrTag::Credit => { + let era_id = bytesrepr::deserialize_from_slice( + &bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..], + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::Credit { + validator: AccountHash::new(validator_bytes), + era_id, + } + } + BidAddrTag::ReservedDelegationAccount => { + let delegator_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::new_reservation_account_addr((validator_bytes, delegator_bytes)) + } + BidAddrTag::ReservedDelegationPurse => { + let uref = <[u8; UREF_ADDR_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::ReservedDelegationPurse { + validator: AccountHash::new(validator_bytes), + delegator: uref, + } + } + BidAddrTag::UnbondAccount => { + let unbonder_bytes = <[u8; ACCOUNT_HASH_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::UnbondAccount { + validator: AccountHash::new(validator_bytes), + unbonder: AccountHash::new(unbonder_bytes), + } + } + BidAddrTag::UnbondPurse => { + let uref = <[u8; UREF_ADDR_LENGTH]>::try_from( + bytes[BidAddr::VALIDATOR_BID_ADDR_LENGTH..].as_ref(), + ) + .map_err(|err| FromStrError::BidAddr(err.to_string()))?; + BidAddr::UnbondPurse { + validator: AccountHash::new(validator_bytes), + unbonder: uref, + } + } + }; + return Ok(Key::BidAddr(bid_addr)); + } + + if let Some(hex) = input.strip_prefix(BID_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Bid(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Bid(error.to_string()))?; + return Ok(Key::Bid(AccountHash::new(account_hash))); + } + + if let Some(hex) = input.strip_prefix(WITHDRAW_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Withdraw(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Withdraw(error.to_string()))?; + return Ok(Key::Withdraw(AccountHash::new(account_hash))); + } + + if let Some(hex) = input.strip_prefix(UNBOND_PREFIX) { + let hash = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::Unbond(error.to_string()))?; + let account_hash = <[u8; ACCOUNT_HASH_LENGTH]>::try_from(hash.as_ref()) + .map_err(|error| FromStrError::Unbond(error.to_string()))?; + return Ok(Key::Unbond(AccountHash::new(account_hash))); + } + + if let Some(dictionary_addr) = input.strip_prefix(DICTIONARY_PREFIX) { + let dictionary_addr_bytes = checksummed_hex::decode(dictionary_addr) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + let addr = DictionaryAddr::try_from(dictionary_addr_bytes.as_ref()) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + return Ok(Key::Dictionary(addr)); + } + + if let Some(registry_address) = input.strip_prefix(SYSTEM_ENTITY_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::SystemEntityRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::SystemEntityRegistry( + "Failed to deserialize system registry key".to_string(), + ) + })?; + return Ok(Key::SystemEntityRegistry); + } + + if let Some(registry_address) = input.strip_prefix(CHAINSPEC_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::ChainspecRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::ChainspecRegistry( + "Failed to deserialize chainspec registry key".to_string(), + ) + })?; + return Ok(Key::ChainspecRegistry); + } + + if let Some(registry_address) = input.strip_prefix(CHECKSUM_REGISTRY_PREFIX) { + let padded_bytes = checksummed_hex::decode(registry_address) + .map_err(|error| FromStrError::ChecksumRegistry(error.to_string()))?; + let _padding: [u8; 32] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::ChecksumRegistry( + "Failed to deserialize checksum registry key".to_string(), + ) + })?; + return Ok(Key::ChecksumRegistry); + } + + if let Some(package_addr) = input.strip_prefix(PACKAGE_PREFIX) { + let package_addr_bytes = checksummed_hex::decode(package_addr) + .map_err(|error| FromStrError::Dictionary(error.to_string()))?; + let addr = PackageAddr::try_from(package_addr_bytes.as_ref()) + .map_err(|error| FromStrError::Package(error.to_string()))?; + return Ok(Key::SmartContract(addr)); + } + + match EntityAddr::from_formatted_str(input) { + Ok(entity_addr) => return Ok(Key::AddressableEntity(entity_addr)), + Err(addressable_entity::FromStrError::InvalidPrefix) => {} + Err(error) => { + return Err(FromStrError::AddressableEntity(error.to_string())); + } + } + + match ByteCodeAddr::from_formatted_string(input) { + Ok(byte_code_addr) => return Ok(Key::ByteCode(byte_code_addr)), + Err(byte_code::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(FromStrError::ByteCode(error.to_string())), + } + + match MessageAddr::from_formatted_str(input) { + Ok(message_addr) => return Ok(Key::Message(message_addr)), + Err(contract_messages::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(error.into()), + } + + match NamedKeyAddr::from_formatted_str(input) { + Ok(named_key) => return Ok(Key::NamedKey(named_key)), + Err(addressable_entity::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(FromStrError::NamedKey(error.to_string())), } + + if let Some(block_time) = input.strip_prefix(BLOCK_GLOBAL_TIME_PREFIX) { + let padded_bytes = checksummed_hex::decode(block_time) + .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?; + let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::BlockGlobal("Failed to deserialize global block time key".to_string()) + })?; + return Ok(BlockGlobalAddr::BlockTime.into()); + } + + if let Some(message_count) = input.strip_prefix(BLOCK_GLOBAL_MESSAGE_COUNT_PREFIX) { + let padded_bytes = checksummed_hex::decode(message_count) + .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?; + let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::BlockGlobal( + "Failed to deserialize global block message count key".to_string(), + ) + })?; + return Ok(BlockGlobalAddr::MessageCount.into()); + } + + if let Some(protocol_version) = input.strip_prefix(BLOCK_GLOBAL_PROTOCOL_VERSION_PREFIX) { + let padded_bytes = checksummed_hex::decode(protocol_version) + .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?; + let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::BlockGlobal( + "Failed to deserialize global block protocol version key".to_string(), + ) + })?; + return Ok(BlockGlobalAddr::ProtocolVersion.into()); + } + + if let Some(addressable_entity) = input.strip_prefix(BLOCK_GLOBAL_ADDRESSABLE_ENTITY_PREFIX) + { + let padded_bytes = checksummed_hex::decode(addressable_entity) + .map_err(|error| FromStrError::BlockGlobal(error.to_string()))?; + let _padding: [u8; 31] = TryFrom::try_from(padded_bytes.as_ref()).map_err(|_| { + FromStrError::BlockGlobal( + "Failed to deserialize global block addressable entity key".to_string(), + ) + })?; + return Ok(BlockGlobalAddr::AddressableEntity.into()); + } + + match EntryPointAddr::from_formatted_str(input) { + Ok(entry_point_addr) => return Ok(Key::EntryPoint(entry_point_addr)), + Err(addressable_entity::FromStrError::InvalidPrefix) => {} + Err(error) => return Err(FromStrError::EntryPoint(error.to_string())), + } + + if let Some(entity_addr_formatted) = input.strip_prefix(STATE_PREFIX) { + match EntityAddr::from_formatted_str(entity_addr_formatted) { + Ok(entity_addr) => return Ok(Key::State(entity_addr)), + Err(addressable_entity::FromStrError::InvalidPrefix) => {} + Err(error) => { + return Err(FromStrError::State(error.to_string())); + } + } + } + + Err(FromStrError::UnknownPrefix) } /// Returns the inner bytes of `self` if `self` is of type [`Key::Account`], otherwise returns @@ -285,13 +995,65 @@ impl Key { /// Returns the inner bytes of `self` if `self` is of type [`Key::Hash`], otherwise returns /// `None`. - pub fn into_hash(self) -> Option { + pub fn into_hash_addr(self) -> Option { + match self { + Key::Hash(hash) => Some(hash), + _ => None, + } + } + + /// Returns the inner bytes of `self` if `self` is of type [`Key::AddressableEntity`], otherwise + /// returns `None`. + pub fn into_entity_hash_addr(self) -> Option { match self { + Key::AddressableEntity(entity_addr) => Some(entity_addr.value()), + Key::Account(account_hash) => Some(account_hash.value()), Key::Hash(hash) => Some(hash), _ => None, } } + /// Returns the inner bytes of `self` if `self` is of type [`Key::SmartContract`], otherwise + /// returns `None`. + pub fn into_package_addr(self) -> Option { + match self { + Key::Hash(hash) => Some(hash), + Key::SmartContract(package_addr) => Some(package_addr), + _ => None, + } + } + + /// Returns [`AddressableEntityHash`] of `self` if `self` is of type [`Key::AddressableEntity`], + /// otherwise returns `None`. + pub fn into_entity_hash(self) -> Option { + let entity_addr = self.into_entity_hash_addr()?; + Some(AddressableEntityHash::new(entity_addr)) + } + + /// Returns [`PackageHash`] of `self` if `self` is of type [`Key::SmartContract`], otherwise + /// returns `None`. + pub fn into_package_hash(self) -> Option { + let package_addr = self.into_package_addr()?; + Some(PackageHash::new(package_addr)) + } + + /// Returns [`NamedKeyAddr`] of `self` if `self` is of type [`Key::NamedKey`], otherwise + /// returns `None`. + pub fn into_named_key_addr(self) -> Option { + match self { + Key::NamedKey(addr) => Some(addr), + _ => None, + } + } + + /// Returns the inner [`URef`] if `self` is of type [`Key::URef`], otherwise returns `None`. + pub fn into_uref(self) -> Option { + match self { + Key::URef(uref) => Some(uref), + _ => None, + } + } + /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise /// returns `None`. pub fn as_uref(&self) -> Option<&URef> { @@ -301,35 +1063,395 @@ impl Key { } } - /// Returns the inner [`URef`] if `self` is of type [`Key::URef`], otherwise returns `None`. - pub fn into_uref(self) -> Option { + /// Returns a reference to the inner [`URef`] if `self` is of type [`Key::URef`], otherwise + /// returns `None`. + pub fn as_uref_mut(&mut self) -> Option<&mut URef> { match self { Key::URef(uref) => Some(uref), _ => None, } } + /// Returns a reference to the inner `URefAddr` if `self` is of type [`Key::Balance`], + /// otherwise returns `None`. + pub fn as_balance(&self) -> Option<&URefAddr> { + if let Self::Balance(v) = self { + Some(v) + } else { + None + } + } + + /// Returns a reference to the inner `BalanceHoldAddr` if `self` is of type + /// [`Key::BalanceHold`], otherwise returns `None`. + pub fn as_balance_hold(&self) -> Option<&BalanceHoldAddr> { + if let Self::BalanceHold(addr) = self { + Some(addr) + } else { + None + } + } + + /// Returns a reference to the inner [`DictionaryAddr`] if `self` is of type + /// [`Key::Dictionary`], otherwise returns `None`. + pub fn as_dictionary(&self) -> Option<&DictionaryAddr> { + match self { + Key::Dictionary(v) => Some(v), + _ => None, + } + } + + /// Returns a reference to the inner `BidAddr` if `self` is of type [`Key::Bid`], + /// otherwise returns `None`. + pub fn as_bid_addr(&self) -> Option<&BidAddr> { + if let Self::BidAddr(addr) = self { + Some(addr) + } else { + None + } + } + + /// Returns a reference to the inner `TopicNameHash` if `self` is of the type [`Key::Message`] + /// otherwise returns `None`. + pub fn as_message_topic_name_hash(&self) -> Option { + if let Self::Message(addr) = self { + Some(addr.topic_name_hash()) + } else { + None + } + } + /// Casts a [`Key::URef`] to a [`Key::Hash`] pub fn uref_to_hash(&self) -> Option { let uref = self.as_uref()?; let addr = uref.addr(); Some(Key::Hash(addr)) } + + /// Casts a [`Key::Withdraw`] to a [`Key::Unbond`] + pub fn withdraw_to_unbond(&self) -> Option { + if let Key::Withdraw(account_hash) = self { + return Some(Key::Unbond(*account_hash)); + } + None + } + + /// Creates a new [`Key::Dictionary`] variant based on a `seed_uref` and a `dictionary_item_key` + /// bytes. + pub fn dictionary(seed_uref: URef, dictionary_item_key: &[u8]) -> Key { + // NOTE: Expect below is safe because the length passed is supported. + let mut hasher = VarBlake2b::new(BLAKE2B_DIGEST_LENGTH).expect("should create hasher"); + hasher.update(seed_uref.addr().as_ref()); + hasher.update(dictionary_item_key); + // NOTE: Assumed safe as size of `HashAddr` equals to the output provided by hasher. + let mut addr = HashAddr::default(); + hasher.finalize_variable(|hash| addr.clone_from_slice(hash)); + Key::Dictionary(addr) + } + + /// Creates a new [`Key::AddressableEntity`] variant from a package kind and an entity + /// hash. + pub fn addressable_entity_key( + entity_kind_tag: EntityKindTag, + entity_hash: AddressableEntityHash, + ) -> Self { + let entity_addr = match entity_kind_tag { + EntityKindTag::System => EntityAddr::new_system(entity_hash.value()), + EntityKindTag::Account => EntityAddr::new_account(entity_hash.value()), + EntityKindTag::SmartContract => EntityAddr::new_smart_contract(entity_hash.value()), + }; + + Key::AddressableEntity(entity_addr) + } + + /// Creates a new [`Key::AddressableEntity`] for a Smart contract. + pub fn contract_entity_key(entity_hash: AddressableEntityHash) -> Key { + Self::addressable_entity_key(EntityKindTag::SmartContract, entity_hash) + } + + /// Creates a new [`Key::ByteCode`] variant from a byte code kind and an byte code addr. + pub fn byte_code_key(byte_code_addr: ByteCodeAddr) -> Self { + Key::ByteCode(byte_code_addr) + } + + /// Creates a new [`Key::Message`] variant that identifies an indexed message based on an + /// `hash_addr`, `topic_name_hash` and message `index`. + pub fn message(entity_addr: EntityAddr, topic_name_hash: TopicNameHash, index: u32) -> Key { + Key::Message(MessageAddr::new_message_addr( + entity_addr, + topic_name_hash, + index, + )) + } + + /// Creates a new [`Key::Message`] variant that identifies a message topic based on an + /// `hash_addr` and a hash of the topic name. + pub fn message_topic(entity_addr: EntityAddr, topic_name_hash: TopicNameHash) -> Key { + Key::Message(MessageAddr::new_topic_addr(entity_addr, topic_name_hash)) + } + + /// Creates a new [`Key::EntryPoint`] variant from an entrypoint addr. + pub fn entry_point(entry_point_addr: EntryPointAddr) -> Self { + Key::EntryPoint(entry_point_addr) + } + + /// Returns true if the key is of type [`Key::Dictionary`]. + pub fn is_dictionary_key(&self) -> bool { + if let Key::Dictionary(_) = self { + return true; + } + false + } + + /// Returns true if the key is of type [`Key::Bid`]. + pub fn is_balance_key(&self) -> bool { + if let Key::Balance(_) = self { + return true; + } + false + } + + /// Returns true if the key is of type [`Key::BidAddr`]. + pub fn is_bid_addr_key(&self) -> bool { + if let Key::BidAddr(_) = self { + return true; + } + false + } + + /// Returns true if the key is of type [`Key::NamedKey`]. + pub fn is_named_key(&self) -> bool { + if let Key::NamedKey(_) = self { + return true; + } + + false + } + + /// Returns if the inner address is for a system contract entity. + pub fn is_system_key(&self) -> bool { + if let Self::AddressableEntity(entity_addr) = self { + return match entity_addr.tag() { + EntityKindTag::System => true, + EntityKindTag::SmartContract | EntityKindTag::Account => false, + }; + } + false + } + + /// Return true if the inner Key is of the smart contract type. + pub fn is_smart_contract_key(&self) -> bool { + matches!( + self, + Self::AddressableEntity(EntityAddr::SmartContract(_)) | Self::Hash(_) + ) + } + + /// Returns true if the key is of type [`Key::NamedKey`] and its Entry variant. + pub fn is_named_key_entry(&self) -> bool { + matches!(self, Self::NamedKey(_)) + } + + /// Returns true if the key is of type [`Key::NamedKey`] and the variants have the + /// same [`EntityAddr`]. + pub fn is_entry_for_base(&self, entity_addr: &EntityAddr) -> bool { + if let Self::NamedKey(named_key_addr) = self { + named_key_addr.entity_addr() == *entity_addr + } else { + false + } + } + + /// Is the record under this key readable by the entity corresponding to the imputed address? + pub fn is_readable(&self, entity_addr: &EntityAddr) -> bool { + if entity_addr.is_system() { + // the system can read everything + return true; + } + let ret = match self { + Key::BidAddr(_) => { + // all bids are public information + true + } + Key::URef(uref) => { + // uref's require explicit permissions + uref.is_readable() + } + Key::SystemEntityRegistry | Key::SmartContract(_) => { + // the system entities and all packages are public info + true + } + Key::Unbond(account_hash) => { + // and an account holder can read their own account record + entity_addr.tag() == EntityKindTag::Account + && entity_addr.value() == account_hash.value() + } + Key::NamedKey(named_key_addr) => { + // an entity can read its own named keys + &named_key_addr.entity_addr() == entity_addr + } + Key::ByteCode(_) + | Key::Account(_) + | Key::Hash(_) + | Key::AddressableEntity(_) + | Key::Balance(_) + | Key::BalanceHold(_) + | Key::Dictionary(_) + | Key::Message(_) + | Key::BlockGlobal(_) + | Key::EntryPoint(_) => true, + _ => false, + }; + if !ret { + let reading_entity_key = Key::AddressableEntity(*entity_addr); + warn!(?reading_entity_key, attempted_key=?self, "attempt to read without permission") + } + ret + } + + /// Is the record under this key addable by the entity corresponding to the imputed address? + pub fn is_addable(&self, entity_addr: &EntityAddr) -> bool { + // unlike readable / writeable which are universally supported, + // only some data types support commutative add / extension + let ret = match self { + Key::URef(uref) => uref.is_addable(), + Key::AddressableEntity(addr_entity_addr) => { + // an entity can extend itself (only associated keys, currently) + entity_addr == addr_entity_addr + } + Key::NamedKey(named_key_addr) => { + // an entity can extend its own named keys + &named_key_addr.entity_addr() == entity_addr + } + _ => { + // other data types do not support commutative addition / extension + let adding_entity_key = Key::AddressableEntity(*entity_addr); + warn!(?adding_entity_key, attempted_key=?self, "attempt to add on an unsupported data type"); + return false; // we want the above more explicit warn message, not both messages. + } + }; + if !ret { + let adding_entity_key = Key::AddressableEntity(*entity_addr); + warn!(?adding_entity_key, attempted_key=?self, "attempt to add without permission"); + } + ret + } + + /// Is the record under this key writeable by the entity corresponding to the imputed address? + pub fn is_writeable(&self, entity_addr: &EntityAddr) -> bool { + if entity_addr.is_system() { + // the system can write everything + return true; + } + let ret = match self { + Key::URef(uref) => uref.is_writeable(), + Key::NamedKey(named_key_addr) => { + // an entity can write to its own named keys + &named_key_addr.entity_addr() == entity_addr + } + _ => { + // only the system can write other kinds of records + false + } + }; + if !ret { + let writing_entity_key = Key::AddressableEntity(*entity_addr); + warn!(?writing_entity_key, attempted_key=?self, "attempt to write without permission") + } + ret + } + + /// Returns an entity addr for a [`Key::AddressableEntity`]. + pub fn into_entity_addr(self) -> Option { + match self { + Key::AddressableEntity(entity_addr) => Some(entity_addr), + _ => None, + } + } } impl Display for Key { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Key::Account(account_hash) => write!(f, "Key::Account({})", account_hash), - Key::Hash(addr) => write!(f, "Key::Hash({})", HexFmt(addr)), + Key::Hash(addr) => write!(f, "Key::Hash({})", base16::encode_lower(&addr)), Key::URef(uref) => write!(f, "Key::{}", uref), /* Display impl for URef will append */ - Key::Transfer(transfer_addr) => write!(f, "Key::Transfer({})", transfer_addr), - Key::DeployInfo(addr) => write!(f, "Key::DeployInfo({})", HexFmt(addr.as_bytes())), + Key::Transfer(transfer_v1_addr) => { + write!(f, "Key::Transfer({})", transfer_v1_addr) + } + Key::DeployInfo(addr) => write!( + f, + "Key::DeployInfo({})", + base16::encode_lower(addr.as_ref()) + ), Key::EraInfo(era_id) => write!(f, "Key::EraInfo({})", era_id), - Key::Balance(uref_addr) => write!(f, "Key::Balance({})", HexFmt(uref_addr)), + Key::Balance(uref_addr) => { + write!(f, "Key::Balance({})", base16::encode_lower(uref_addr)) + } Key::Bid(account_hash) => write!(f, "Key::Bid({})", account_hash), Key::Withdraw(account_hash) => write!(f, "Key::Withdraw({})", account_hash), - Key::EraValidators(era_id) => write!(f, "Key::EraValidators({})", era_id), + Key::Dictionary(addr) => { + write!(f, "Key::Dictionary({})", base16::encode_lower(addr)) + } + Key::SystemEntityRegistry => write!( + f, + "Key::SystemEntityRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ), + Key::EraSummary => write!( + f, + "Key::EraSummary({})", + base16::encode_lower(&PADDING_BYTES), + ), + Key::Unbond(account_hash) => write!(f, "Key::Unbond({})", account_hash), + Key::ChainspecRegistry => write!( + f, + "Key::ChainspecRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ), + Key::ChecksumRegistry => { + write!( + f, + "Key::ChecksumRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + } + Key::BidAddr(bid_addr) => write!(f, "Key::BidAddr({})", bid_addr), + Key::Message(message_addr) => { + write!(f, "Key::Message({})", message_addr) + } + Key::SmartContract(package_addr) => { + write!(f, "Key::Package({})", base16::encode_lower(package_addr)) + } + Key::AddressableEntity(entity_addr) => write!( + f, + "Key::AddressableEntity({}-{})", + entity_addr.tag(), + base16::encode_lower(&entity_addr.value()) + ), + Key::ByteCode(byte_code_addr) => { + write!(f, "Key::ByteCode({})", byte_code_addr) + } + Key::NamedKey(named_key_addr) => { + write!(f, "Key::NamedKey({})", named_key_addr) + } + Key::BlockGlobal(addr) => { + write!( + f, + "Key::BlockGlobal({}-{})", + addr, + base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES) + ) + } + Key::BalanceHold(balance_hold_addr) => { + write!(f, "Key::BalanceHold({})", balance_hold_addr) + } + Key::EntryPoint(entry_point_addr) => { + write!(f, "Key::EntryPointAddr({})", entry_point_addr) + } + Key::State(entity_addr) => { + write!(f, "Key::State({})", entity_addr) + } } } } @@ -352,7 +1474,22 @@ impl Tagged for Key { Key::Balance(_) => KeyTag::Balance, Key::Bid(_) => KeyTag::Bid, Key::Withdraw(_) => KeyTag::Withdraw, - Key::EraValidators(_) => KeyTag::EraValidators, + Key::Dictionary(_) => KeyTag::Dictionary, + Key::SystemEntityRegistry => KeyTag::SystemEntityRegistry, + Key::EraSummary => KeyTag::EraSummary, + Key::Unbond(_) => KeyTag::Unbond, + Key::ChainspecRegistry => KeyTag::ChainspecRegistry, + Key::ChecksumRegistry => KeyTag::ChecksumRegistry, + Key::BidAddr(_) => KeyTag::BidAddr, + Key::SmartContract(_) => KeyTag::Package, + Key::AddressableEntity(..) => KeyTag::AddressableEntity, + Key::ByteCode(..) => KeyTag::ByteCode, + Key::Message(_) => KeyTag::Message, + Key::NamedKey(_) => KeyTag::NamedKey, + Key::BlockGlobal(_) => KeyTag::BlockGlobal, + Key::BalanceHold(_) => KeyTag::BalanceHold, + Key::EntryPoint(_) => KeyTag::EntryPoint, + Key::State(_) => KeyTag::State, } } } @@ -376,103 +1513,52 @@ impl From for Key { } } -impl From for Key { - fn from(transfer_addr: TransferAddr) -> Key { - Key::Transfer(transfer_addr) - } -} - -impl From for Key { - fn from(contract_hash: ContractHash) -> Key { - Key::Hash(contract_hash.value()) +impl From for Key { + fn from(package_hash: PackageHash) -> Key { + Key::SmartContract(package_hash.value()) } } impl From for Key { - fn from(wasm_hash: ContractWasmHash) -> Key { + fn from(wasm_hash: ContractWasmHash) -> Self { Key::Hash(wasm_hash.value()) } } impl From for Key { - fn from(package_hash: ContractPackageHash) -> Key { - Key::Hash(package_hash.value()) + fn from(contract_package_hash: ContractPackageHash) -> Self { + Key::Hash(contract_package_hash.value()) } } -#[derive(Debug, PartialEq, Eq)] -struct PaddedEraId(EraId); - -impl PaddedEraId { - const SERIALIZED_LENGTH: usize = 32; - - const U64_LE_BYTES_LENGTH: usize = 8; - - const ZEROES_LENGTH: usize = Self::SERIALIZED_LENGTH - Self::U64_LE_BYTES_LENGTH; - - const fn into_inner(self) -> EraId { - self.0 +impl From for Key { + fn from(contract_hash: ContractHash) -> Self { + Key::Hash(contract_hash.value()) } } -impl ToBytes for PaddedEraId { - fn to_bytes(&self) -> Result, Error> { - let mut buff = Vec::new(); - buff.extend_from_slice(&[0u8; Self::ZEROES_LENGTH]); - buff.extend_from_slice(&self.0.to_le_bytes()); - Ok(buff) +impl From for Key { + fn from(entity_addr: EntityAddr) -> Self { + Key::AddressableEntity(entity_addr) } +} - fn serialized_length(&self) -> usize { - Self::SERIALIZED_LENGTH +impl From for Key { + fn from(value: NamedKeyAddr) -> Self { + Key::NamedKey(value) } } -impl FromBytes for PaddedEraId { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let mut le_bytes = [0u8; Self::U64_LE_BYTES_LENGTH]; - let (bytes, remainder) = bytesrepr::safe_split_at(bytes, Self::SERIALIZED_LENGTH)?; - le_bytes.copy_from_slice(&bytes[Self::ZEROES_LENGTH..]); - Ok((PaddedEraId(EraId::from_le_bytes(le_bytes)), remainder)) +impl From for Key { + fn from(value: ByteCodeAddr) -> Self { + Key::ByteCode(value) } } impl ToBytes for Key { fn to_bytes(&self) -> Result, Error> { let mut result = bytesrepr::unchecked_allocate_buffer(self); - result.push(self.tag()); - match self { - Key::Account(account_hash) => { - result.append(&mut account_hash.to_bytes()?); - } - Key::Hash(hash) => { - result.append(&mut hash.to_bytes()?); - } - Key::URef(uref) => { - result.append(&mut uref.to_bytes()?); - } - Key::Transfer(addr) => { - result.append(&mut addr.to_bytes()?); - } - Key::DeployInfo(addr) => { - result.append(&mut addr.to_bytes()?); - } - Key::EraInfo(era_id) => { - result.append(&mut PaddedEraId(*era_id).to_bytes()?); - } - Key::Balance(uref_addr) => { - result.append(&mut uref_addr.to_bytes()?); - } - Key::Bid(account_hash) => { - result.append(&mut account_hash.to_bytes()?); - } - Key::Withdraw(account_hash) => { - result.append(&mut account_hash.to_bytes()?); - } - Key::EraValidators(era_id) => { - result.append(&mut PaddedEraId(*era_id).to_bytes()?); - } - } + self.write_bytes(&mut result)?; Ok(result) } @@ -489,73 +1575,256 @@ impl ToBytes for Key { Key::Balance(_) => KEY_BALANCE_SERIALIZED_LENGTH, Key::Bid(_) => KEY_BID_SERIALIZED_LENGTH, Key::Withdraw(_) => KEY_WITHDRAW_SERIALIZED_LENGTH, - Key::EraValidators(_) => KEY_ERA_VALIDATORS_SERIALIZED_LENGTH, + Key::Dictionary(_) => KEY_DICTIONARY_SERIALIZED_LENGTH, + Key::SystemEntityRegistry => KEY_SYSTEM_ENTITY_REGISTRY_SERIALIZED_LENGTH, + Key::EraSummary => KEY_ERA_SUMMARY_SERIALIZED_LENGTH, + Key::Unbond(_) => KEY_UNBOND_SERIALIZED_LENGTH, + Key::ChainspecRegistry => KEY_CHAINSPEC_REGISTRY_SERIALIZED_LENGTH, + Key::ChecksumRegistry => KEY_CHECKSUM_REGISTRY_SERIALIZED_LENGTH, + Key::BidAddr(bid_addr) => KEY_ID_SERIALIZED_LENGTH + bid_addr.serialized_length(), + Key::SmartContract(_) => KEY_PACKAGE_SERIALIZED_LENGTH, + Key::AddressableEntity(entity_addr) => { + KEY_ID_SERIALIZED_LENGTH + entity_addr.serialized_length() + } + Key::ByteCode(byte_code_addr) => { + KEY_ID_SERIALIZED_LENGTH + byte_code_addr.serialized_length() + } + Key::Message(message_addr) => { + KEY_ID_SERIALIZED_LENGTH + message_addr.serialized_length() + } + Key::NamedKey(named_key_addr) => { + KEY_ID_SERIALIZED_LENGTH + named_key_addr.serialized_length() + } + Key::BlockGlobal(addr) => { + KEY_ID_SERIALIZED_LENGTH + + addr.serialized_length() + + BLOCK_GLOBAL_PADDING_BYTES.len() + } + Key::BalanceHold(balance_hold_addr) => { + KEY_ID_SERIALIZED_LENGTH + balance_hold_addr.serialized_length() + } + Key::EntryPoint(entry_point_addr) => { + U8_SERIALIZED_LENGTH + entry_point_addr.serialized_length() + } + Key::State(entity_addr) => KEY_ID_SERIALIZED_LENGTH + entity_addr.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(self.tag()); + match self { + Key::Account(account_hash) => account_hash.write_bytes(writer), + Key::Hash(hash) => hash.write_bytes(writer), + Key::URef(uref) => uref.write_bytes(writer), + Key::Transfer(addr) => addr.write_bytes(writer), + Key::DeployInfo(deploy_hash) => deploy_hash.write_bytes(writer), + Key::EraInfo(era_id) => era_id.write_bytes(writer), + Key::Balance(uref_addr) => uref_addr.write_bytes(writer), + Key::Bid(account_hash) => account_hash.write_bytes(writer), + Key::Withdraw(account_hash) => account_hash.write_bytes(writer), + Key::Dictionary(addr) => addr.write_bytes(writer), + Key::Unbond(account_hash) => account_hash.write_bytes(writer), + Key::SystemEntityRegistry + | Key::EraSummary + | Key::ChainspecRegistry + | Key::ChecksumRegistry => PADDING_BYTES.write_bytes(writer), + Key::BlockGlobal(addr) => { + addr.write_bytes(writer)?; + BLOCK_GLOBAL_PADDING_BYTES.write_bytes(writer) + } + Key::BidAddr(bid_addr) => bid_addr.write_bytes(writer), + Key::SmartContract(package_addr) => package_addr.write_bytes(writer), + Key::AddressableEntity(entity_addr) => entity_addr.write_bytes(writer), + Key::ByteCode(byte_code_addr) => byte_code_addr.write_bytes(writer), + Key::Message(message_addr) => message_addr.write_bytes(writer), + Key::NamedKey(named_key_addr) => named_key_addr.write_bytes(writer), + Key::BalanceHold(balance_hold_addr) => balance_hold_addr.write_bytes(writer), + Key::EntryPoint(entry_point_addr) => entry_point_addr.write_bytes(writer), + Key::State(entity_addr) => entity_addr.write_bytes(writer), } } } impl FromBytes for Key { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (tag, remainder) = u8::from_bytes(bytes)?; + if bytes.is_empty() { + error!("FromBytes for Key: bytes length should not be 0"); + } + let (tag, remainder) = match KeyTag::from_bytes(bytes) { + Ok((tag, rem)) => (tag, rem), + Err(err) => { + error!(%err, "FromBytes for Key"); + return Err(err); + } + }; match tag { - tag if tag == KeyTag::Account as u8 => { + KeyTag::Account => { let (account_hash, rem) = AccountHash::from_bytes(remainder)?; Ok((Key::Account(account_hash), rem)) } - tag if tag == KeyTag::Hash as u8 => { - let (hash, rem) = FromBytes::from_bytes(remainder)?; + KeyTag::Hash => { + let (hash, rem) = HashAddr::from_bytes(remainder)?; Ok((Key::Hash(hash), rem)) } - tag if tag == KeyTag::URef as u8 => { + KeyTag::URef => { let (uref, rem) = URef::from_bytes(remainder)?; Ok((Key::URef(uref), rem)) } - tag if tag == KeyTag::Transfer as u8 => { - let (transfer_addr, rem) = TransferAddr::from_bytes(remainder)?; - Ok((Key::Transfer(transfer_addr), rem)) + KeyTag::Transfer => { + let (transfer_v1_addr, rem) = TransferAddr::from_bytes(remainder)?; + Ok((Key::Transfer(transfer_v1_addr), rem)) } - tag if tag == KeyTag::DeployInfo as u8 => { - let (deploy_hash, rem) = FromBytes::from_bytes(remainder)?; + KeyTag::DeployInfo => { + let (deploy_hash, rem) = DeployHash::from_bytes(remainder)?; Ok((Key::DeployInfo(deploy_hash), rem)) } - tag if tag == KeyTag::EraInfo as u8 => { - let (era_id, rem) = PaddedEraId::from_bytes(remainder)?; - Ok((Key::EraInfo(era_id.into_inner()), rem)) + KeyTag::EraInfo => { + let (era_id, rem) = EraId::from_bytes(remainder)?; + Ok((Key::EraInfo(era_id), rem)) } - tag if tag == KeyTag::Balance as u8 => { + KeyTag::Balance => { let (uref_addr, rem) = URefAddr::from_bytes(remainder)?; Ok((Key::Balance(uref_addr), rem)) } - tag if tag == KeyTag::Bid as u8 => { + KeyTag::Bid => { let (account_hash, rem) = AccountHash::from_bytes(remainder)?; Ok((Key::Bid(account_hash), rem)) } - tag if tag == KeyTag::Withdraw as u8 => { + KeyTag::Withdraw => { let (account_hash, rem) = AccountHash::from_bytes(remainder)?; Ok((Key::Withdraw(account_hash), rem)) } - tag if tag == KeyTag::EraValidators as u8 => { - let (era_id, rem) = PaddedEraId::from_bytes(remainder)?; - Ok((Key::EraValidators(era_id.into_inner()), rem)) + KeyTag::Dictionary => { + let (addr, rem) = DictionaryAddr::from_bytes(remainder)?; + Ok((Key::Dictionary(addr), rem)) + } + KeyTag::SystemEntityRegistry => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::SystemEntityRegistry, rem)) + } + KeyTag::EraSummary => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::EraSummary, rem)) + } + KeyTag::Unbond => { + let (account_hash, rem) = AccountHash::from_bytes(remainder)?; + Ok((Key::Unbond(account_hash), rem)) + } + KeyTag::ChainspecRegistry => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::ChainspecRegistry, rem)) + } + KeyTag::ChecksumRegistry => { + let (_, rem) = <[u8; 32]>::from_bytes(remainder)?; + Ok((Key::ChecksumRegistry, rem)) + } + KeyTag::BidAddr => { + let (bid_addr, rem) = BidAddr::from_bytes(remainder)?; + Ok((Key::BidAddr(bid_addr), rem)) + } + KeyTag::Package => { + let (package_addr, rem) = PackageAddr::from_bytes(remainder)?; + Ok((Key::SmartContract(package_addr), rem)) + } + KeyTag::AddressableEntity => { + let (entity_addr, rem) = EntityAddr::from_bytes(remainder)?; + Ok((Key::AddressableEntity(entity_addr), rem)) + } + KeyTag::ByteCode => { + let (byte_code_addr, rem) = ByteCodeAddr::from_bytes(remainder)?; + Ok((Key::ByteCode(byte_code_addr), rem)) + } + KeyTag::Message => { + let (message_addr, rem) = MessageAddr::from_bytes(remainder)?; + Ok((Key::Message(message_addr), rem)) + } + KeyTag::NamedKey => { + let (named_key_addr, rem) = NamedKeyAddr::from_bytes(remainder)?; + Ok((Key::NamedKey(named_key_addr), rem)) + } + KeyTag::BlockGlobal => { + let (addr, rem) = BlockGlobalAddr::from_bytes(remainder)?; + let (_, rem) = <[u8; 31]>::from_bytes(rem)?; // strip padding + Ok((Key::BlockGlobal(addr), rem)) + } + KeyTag::BalanceHold => { + let (balance_hold_addr, rem) = BalanceHoldAddr::from_bytes(remainder)?; + Ok((Key::BalanceHold(balance_hold_addr), rem)) + } + KeyTag::EntryPoint => { + let (entry_point_addr, rem) = EntryPointAddr::from_bytes(remainder)?; + Ok((Key::EntryPoint(entry_point_addr), rem)) + } + KeyTag::State => { + let (entity_addr, rem) = EntityAddr::from_bytes(remainder)?; + Ok((Key::State(entity_addr), rem)) } - _ => Err(Error::Formatting), } } } +#[allow(dead_code)] +fn please_add_to_distribution_impl(key: Key) { + // If you've been forced to come here, you likely need to add your variant to the + // `Distribution` impl for `Key`. + match key { + Key::Account(_) => unimplemented!(), + Key::Hash(_) => unimplemented!(), + Key::URef(_) => unimplemented!(), + Key::Transfer(_) => unimplemented!(), + Key::DeployInfo(_) => unimplemented!(), + Key::EraInfo(_) => unimplemented!(), + Key::Balance(_) => unimplemented!(), + Key::Bid(_) => unimplemented!(), + Key::Withdraw(_) => unimplemented!(), + Key::Dictionary(_) => unimplemented!(), + Key::SystemEntityRegistry => unimplemented!(), + Key::EraSummary => unimplemented!(), + Key::Unbond(_) => unimplemented!(), + Key::ChainspecRegistry => unimplemented!(), + Key::ChecksumRegistry => unimplemented!(), + Key::BidAddr(_) => unimplemented!(), + Key::SmartContract(_) => unimplemented!(), + Key::AddressableEntity(..) => unimplemented!(), + Key::ByteCode(..) => unimplemented!(), + Key::Message(_) => unimplemented!(), + Key::NamedKey(_) => unimplemented!(), + Key::BlockGlobal(_) => unimplemented!(), + Key::BalanceHold(_) => unimplemented!(), + Key::EntryPoint(_) => unimplemented!(), + Key::State(_) => unimplemented!(), + } +} + +#[cfg(any(feature = "testing", test))] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> Key { - match rng.gen_range(0..8) { + match rng.gen_range(0..=24) { 0 => Key::Account(rng.gen()), 1 => Key::Hash(rng.gen()), 2 => Key::URef(rng.gen()), - 3 => Key::Transfer(rng.gen()), - 4 => Key::DeployInfo(rng.gen()), - 5 => Key::EraInfo(rng.gen()), + 3 => Key::Transfer(TransferAddr::new(rng.gen())), + 4 => Key::DeployInfo(DeployHash::from_raw(rng.gen())), + 5 => Key::EraInfo(EraId::new(rng.gen())), 6 => Key::Balance(rng.gen()), 7 => Key::Bid(rng.gen()), 8 => Key::Withdraw(rng.gen()), - 9 => Key::EraValidators(rng.gen()), + 9 => Key::Dictionary(rng.gen()), + 10 => Key::SystemEntityRegistry, + 11 => Key::EraSummary, + 12 => Key::Unbond(rng.gen()), + 13 => Key::ChainspecRegistry, + 14 => Key::ChecksumRegistry, + 15 => Key::BidAddr(rng.gen()), + 16 => Key::SmartContract(rng.gen()), + 17 => Key::AddressableEntity(rng.gen()), + 18 => Key::ByteCode(rng.gen()), + 19 => Key::Message(rng.gen()), + 20 => Key::NamedKey(NamedKeyAddr::new_named_key_entry(rng.gen(), rng.gen())), + 21 => Key::BlockGlobal(rng.gen()), + 22 => Key::BalanceHold(rng.gen()), + 23 => Key::EntryPoint(rng.gen()), + 24 => Key::State(rng.gen()), _ => unreachable!(), } } @@ -564,71 +1833,64 @@ impl Distribution for Standard { mod serde_helpers { use super::*; - #[derive(Serialize, Deserialize)] - pub(super) enum HumanReadable { - Account(String), - Hash(String), - URef(String), - Transfer(String), - DeployInfo(String), - EraInfo(String), - Balance(String), - Bid(String), - Withdraw(String), - EraValidators(String), - } - - impl From<&Key> for HumanReadable { - fn from(key: &Key) -> Self { - let formatted_string = key.to_formatted_string(); - match key { - Key::Account(_) => HumanReadable::Account(formatted_string), - Key::Hash(_) => HumanReadable::Hash(formatted_string), - Key::URef(_) => HumanReadable::URef(formatted_string), - Key::Transfer(_) => HumanReadable::Transfer(formatted_string), - Key::DeployInfo(_) => HumanReadable::DeployInfo(formatted_string), - Key::EraInfo(_) => HumanReadable::EraInfo(formatted_string), - Key::Balance(_) => HumanReadable::Balance(formatted_string), - Key::Bid(_) => HumanReadable::Bid(formatted_string), - Key::Withdraw(_) => HumanReadable::Withdraw(formatted_string), - Key::EraValidators(_) => HumanReadable::EraValidators(formatted_string), - } - } - } - - impl TryFrom for Key { - type Error = FromStrError; - - fn try_from(helper: HumanReadable) -> Result { - match helper { - HumanReadable::Account(formatted_string) - | HumanReadable::Hash(formatted_string) - | HumanReadable::URef(formatted_string) - | HumanReadable::Transfer(formatted_string) - | HumanReadable::DeployInfo(formatted_string) - | HumanReadable::EraInfo(formatted_string) - | HumanReadable::Balance(formatted_string) - | HumanReadable::Bid(formatted_string) - | HumanReadable::Withdraw(formatted_string) - | HumanReadable::EraValidators(formatted_string) => { - Key::from_formatted_str(&formatted_string) - } - } - } - } - #[derive(Serialize)] pub(super) enum BinarySerHelper<'a> { Account(&'a AccountHash), Hash(&'a HashAddr), URef(&'a URef), Transfer(&'a TransferAddr), + #[serde(with = "crate::serde_helpers::deploy_hash_as_array")] DeployInfo(&'a DeployHash), EraInfo(&'a EraId), Balance(&'a URefAddr), Bid(&'a AccountHash), Withdraw(&'a AccountHash), - EraValidators(&'a EraId), + Dictionary(&'a HashAddr), + SystemEntityRegistry, + EraSummary, + Unbond(&'a AccountHash), + ChainspecRegistry, + ChecksumRegistry, + BidAddr(&'a BidAddr), + Package(&'a PackageAddr), + AddressableEntity(&'a EntityAddr), + ByteCode(&'a ByteCodeAddr), + Message(&'a MessageAddr), + NamedKey(&'a NamedKeyAddr), + BlockGlobal(&'a BlockGlobalAddr), + BalanceHold(&'a BalanceHoldAddr), + EntryPoint(&'a EntryPointAddr), + State(&'a EntityAddr), + } + + #[derive(Deserialize)] + pub(super) enum BinaryDeserHelper { + Account(AccountHash), + Hash(HashAddr), + URef(URef), + Transfer(TransferAddr), + #[serde(with = "crate::serde_helpers::deploy_hash_as_array")] + DeployInfo(DeployHash), + EraInfo(EraId), + Balance(URefAddr), + Bid(AccountHash), + Withdraw(AccountHash), + Dictionary(DictionaryAddr), + SystemEntityRegistry, + EraSummary, + Unbond(AccountHash), + ChainspecRegistry, + ChecksumRegistry, + BidAddr(BidAddr), + Package(PackageAddr), + AddressableEntity(EntityAddr), + ByteCode(ByteCodeAddr), + Message(MessageAddr), + NamedKey(NamedKeyAddr), + BlockGlobal(BlockGlobalAddr), + BalanceHold(BalanceHoldAddr), + EntryPoint(EntryPointAddr), + State(EntityAddr), } impl<'a> From<&'a Key> for BinarySerHelper<'a> { @@ -637,44 +1899,70 @@ mod serde_helpers { Key::Account(account_hash) => BinarySerHelper::Account(account_hash), Key::Hash(hash_addr) => BinarySerHelper::Hash(hash_addr), Key::URef(uref) => BinarySerHelper::URef(uref), - Key::Transfer(transfer_addr) => BinarySerHelper::Transfer(transfer_addr), + Key::Transfer(transfer_v1_addr) => BinarySerHelper::Transfer(transfer_v1_addr), Key::DeployInfo(deploy_hash) => BinarySerHelper::DeployInfo(deploy_hash), Key::EraInfo(era_id) => BinarySerHelper::EraInfo(era_id), Key::Balance(uref_addr) => BinarySerHelper::Balance(uref_addr), Key::Bid(account_hash) => BinarySerHelper::Bid(account_hash), Key::Withdraw(account_hash) => BinarySerHelper::Withdraw(account_hash), - Key::EraValidators(era_id) => BinarySerHelper::EraValidators(era_id), + Key::Dictionary(addr) => BinarySerHelper::Dictionary(addr), + Key::SystemEntityRegistry => BinarySerHelper::SystemEntityRegistry, + Key::EraSummary => BinarySerHelper::EraSummary, + Key::Unbond(account_hash) => BinarySerHelper::Unbond(account_hash), + Key::ChainspecRegistry => BinarySerHelper::ChainspecRegistry, + Key::ChecksumRegistry => BinarySerHelper::ChecksumRegistry, + Key::BidAddr(bid_addr) => BinarySerHelper::BidAddr(bid_addr), + Key::Message(message_addr) => BinarySerHelper::Message(message_addr), + Key::SmartContract(package_addr) => BinarySerHelper::Package(package_addr), + Key::AddressableEntity(entity_addr) => { + BinarySerHelper::AddressableEntity(entity_addr) + } + Key::ByteCode(byte_code_addr) => BinarySerHelper::ByteCode(byte_code_addr), + Key::NamedKey(named_key_addr) => BinarySerHelper::NamedKey(named_key_addr), + Key::BlockGlobal(addr) => BinarySerHelper::BlockGlobal(addr), + Key::BalanceHold(balance_hold_addr) => { + BinarySerHelper::BalanceHold(balance_hold_addr) + } + Key::EntryPoint(entry_point_addr) => BinarySerHelper::EntryPoint(entry_point_addr), + Key::State(entity_addr) => BinarySerHelper::State(entity_addr), } } } - #[derive(Deserialize)] - pub(super) enum BinaryDeserHelper { - Account(AccountHash), - Hash(HashAddr), - URef(URef), - Transfer(TransferAddr), - DeployInfo(DeployHash), - EraInfo(EraId), - Balance(URefAddr), - Bid(AccountHash), - Withdraw(AccountHash), - EraValidators(EraId), - } - impl From for Key { fn from(helper: BinaryDeserHelper) -> Self { match helper { BinaryDeserHelper::Account(account_hash) => Key::Account(account_hash), BinaryDeserHelper::Hash(hash_addr) => Key::Hash(hash_addr), BinaryDeserHelper::URef(uref) => Key::URef(uref), - BinaryDeserHelper::Transfer(transfer_addr) => Key::Transfer(transfer_addr), + BinaryDeserHelper::Transfer(transfer_v1_addr) => Key::Transfer(transfer_v1_addr), BinaryDeserHelper::DeployInfo(deploy_hash) => Key::DeployInfo(deploy_hash), BinaryDeserHelper::EraInfo(era_id) => Key::EraInfo(era_id), BinaryDeserHelper::Balance(uref_addr) => Key::Balance(uref_addr), BinaryDeserHelper::Bid(account_hash) => Key::Bid(account_hash), BinaryDeserHelper::Withdraw(account_hash) => Key::Withdraw(account_hash), - BinaryDeserHelper::EraValidators(era_id) => Key::EraValidators(era_id), + BinaryDeserHelper::Dictionary(addr) => Key::Dictionary(addr), + BinaryDeserHelper::SystemEntityRegistry => Key::SystemEntityRegistry, + BinaryDeserHelper::EraSummary => Key::EraSummary, + BinaryDeserHelper::Unbond(account_hash) => Key::Unbond(account_hash), + BinaryDeserHelper::ChainspecRegistry => Key::ChainspecRegistry, + BinaryDeserHelper::ChecksumRegistry => Key::ChecksumRegistry, + BinaryDeserHelper::BidAddr(bid_addr) => Key::BidAddr(bid_addr), + BinaryDeserHelper::Message(message_addr) => Key::Message(message_addr), + BinaryDeserHelper::Package(package_addr) => Key::SmartContract(package_addr), + BinaryDeserHelper::AddressableEntity(entity_addr) => { + Key::AddressableEntity(entity_addr) + } + BinaryDeserHelper::ByteCode(byte_code_addr) => Key::ByteCode(byte_code_addr), + BinaryDeserHelper::NamedKey(named_key_addr) => Key::NamedKey(named_key_addr), + BinaryDeserHelper::BlockGlobal(addr) => Key::BlockGlobal(addr), + BinaryDeserHelper::BalanceHold(balance_hold_addr) => { + Key::BalanceHold(balance_hold_addr) + } + BinaryDeserHelper::EntryPoint(entry_point_addr) => { + Key::EntryPoint(entry_point_addr) + } + BinaryDeserHelper::State(entity_addr) => Key::State(entity_addr), } } } @@ -683,7 +1971,7 @@ mod serde_helpers { impl Serialize for Key { fn serialize(&self, serializer: S) -> Result { if serializer.is_human_readable() { - serde_helpers::HumanReadable::from(self).serialize(serializer) + self.to_formatted_string().serialize(serializer) } else { serde_helpers::BinarySerHelper::from(self).serialize(serializer) } @@ -693,8 +1981,8 @@ impl Serialize for Key { impl<'de> Deserialize<'de> for Key { fn deserialize>(deserializer: D) -> Result { if deserializer.is_human_readable() { - let human_readable = serde_helpers::HumanReadable::deserialize(deserializer)?; - Key::try_from(human_readable).map_err(SerdeError::custom) + let formatted_key = String::deserialize(deserializer)?; + Key::from_formatted_str(&formatted_key).map_err(SerdeError::custom) } else { let binary_helper = serde_helpers::BinaryDeserHelper::deserialize(deserializer)?; Ok(Key::from(binary_helper)) @@ -706,15 +1994,112 @@ impl<'de> Deserialize<'de> for Key { mod tests { use std::string::ToString; - use proptest::proptest; - use super::*; use crate::{ + account::ACCOUNT_HASH_FORMATTED_STRING_PREFIX, bytesrepr::{Error, FromBytes}, - gens::era_id_arb, - AccessRights, URef, + uref::UREF_FORMATTED_STRING_PREFIX, + AccessRights, BlockTime, URef, }; + const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = "transfer-"; + const ENTITY_PREFIX: &str = "entity-"; + const ACCOUNT_ENTITY_PREFIX: &str = "account-"; + + const BYTE_CODE_PREFIX: &str = "byte-code-"; + const EMPTY_PREFIX: &str = "empty-"; + + const ACCOUNT_KEY: Key = Key::Account(AccountHash::new([42; 32])); + const HASH_KEY: Key = Key::Hash([42; 32]); + const UREF_KEY: Key = Key::URef(URef::new([42; 32], AccessRights::READ)); + const TRANSFER_KEY: Key = Key::Transfer(TransferAddr::new([42; 32])); + const DEPLOY_INFO_KEY: Key = Key::DeployInfo(DeployHash::from_raw([42; 32])); + const ERA_INFO_KEY: Key = Key::EraInfo(EraId::new(42)); + const BALANCE_KEY: Key = Key::Balance([42; 32]); + const BID_KEY: Key = Key::Bid(AccountHash::new([42; 32])); + const UNIFIED_BID_KEY: Key = Key::BidAddr(BidAddr::legacy([42; 32])); + const VALIDATOR_BID_KEY: Key = Key::BidAddr(BidAddr::new_validator_addr([2; 32])); + const DELEGATOR_BID_KEY: Key = + Key::BidAddr(BidAddr::new_delegator_account_addr(([2; 32], [9; 32]))); + const WITHDRAW_KEY: Key = Key::Withdraw(AccountHash::new([42; 32])); + const DICTIONARY_KEY: Key = Key::Dictionary([42; 32]); + const SYSTEM_ENTITY_REGISTRY_KEY: Key = Key::SystemEntityRegistry; + const ERA_SUMMARY_KEY: Key = Key::EraSummary; + const UNBOND_KEY: Key = Key::Unbond(AccountHash::new([42; 32])); + const CHAINSPEC_REGISTRY_KEY: Key = Key::ChainspecRegistry; + const CHECKSUM_REGISTRY_KEY: Key = Key::ChecksumRegistry; + const PACKAGE_KEY: Key = Key::SmartContract([42; 32]); + const ADDRESSABLE_ENTITY_SYSTEM_KEY: Key = + Key::AddressableEntity(EntityAddr::new_system([42; 32])); + const ADDRESSABLE_ENTITY_ACCOUNT_KEY: Key = + Key::AddressableEntity(EntityAddr::new_account([42; 32])); + const ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY: Key = + Key::AddressableEntity(EntityAddr::new_smart_contract([42; 32])); + const BYTE_CODE_EMPTY_KEY: Key = Key::ByteCode(ByteCodeAddr::Empty); + const BYTE_CODE_V1_WASM_KEY: Key = Key::ByteCode(ByteCodeAddr::V1CasperWasm([42; 32])); + const MESSAGE_TOPIC_KEY: Key = Key::Message(MessageAddr::new_topic_addr( + EntityAddr::SmartContract([42; 32]), + TopicNameHash::new([42; 32]), + )); + const MESSAGE_KEY: Key = Key::Message(MessageAddr::new_message_addr( + EntityAddr::SmartContract([42; 32]), + TopicNameHash::new([2; 32]), + 15, + )); + const NAMED_KEY: Key = Key::NamedKey(NamedKeyAddr::new_named_key_entry( + EntityAddr::new_smart_contract([42; 32]), + [43; 32], + )); + const BLOCK_TIME_KEY: Key = Key::BlockGlobal(BlockGlobalAddr::BlockTime); + const BLOCK_MESSAGE_COUNT_KEY: Key = Key::BlockGlobal(BlockGlobalAddr::MessageCount); + // const STATE_KEY: Key = Key::State(EntityAddr::new_contract_entity_addr([42; 32])); + const BALANCE_HOLD: Key = + Key::BalanceHold(BalanceHoldAddr::new_gas([42; 32], BlockTime::new(100))); + const STATE_KEY: Key = Key::State(EntityAddr::new_smart_contract([42; 32])); + const KEYS: &[Key] = &[ + ACCOUNT_KEY, + HASH_KEY, + UREF_KEY, + TRANSFER_KEY, + DEPLOY_INFO_KEY, + ERA_INFO_KEY, + BALANCE_KEY, + BID_KEY, + WITHDRAW_KEY, + DICTIONARY_KEY, + SYSTEM_ENTITY_REGISTRY_KEY, + ERA_SUMMARY_KEY, + UNBOND_KEY, + CHAINSPEC_REGISTRY_KEY, + CHECKSUM_REGISTRY_KEY, + UNIFIED_BID_KEY, + VALIDATOR_BID_KEY, + DELEGATOR_BID_KEY, + PACKAGE_KEY, + ADDRESSABLE_ENTITY_SYSTEM_KEY, + ADDRESSABLE_ENTITY_ACCOUNT_KEY, + ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY, + BYTE_CODE_EMPTY_KEY, + BYTE_CODE_V1_WASM_KEY, + MESSAGE_TOPIC_KEY, + MESSAGE_KEY, + NAMED_KEY, + BLOCK_TIME_KEY, + BLOCK_MESSAGE_COUNT_KEY, + BALANCE_HOLD, + STATE_KEY, + ]; + const HEX_STRING: &str = "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"; + const TOPIC_NAME_HEX_STRING: &str = + "0202020202020202020202020202020202020202020202020202020202020202"; + const MESSAGE_INDEX_HEX_STRING: &str = "f"; + const UNIFIED_HEX_STRING: &str = + "002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"; + const VALIDATOR_HEX_STRING: &str = + "010202020202020202020202020202020202020202020202020202020202020202"; + const DELEGATOR_HEX_STRING: &str = + "0202020202020202020202020202020202020202020202020202020202020202020909090909090909090909090909090909090909090909090909090909090909"; + fn test_readable(right: AccessRights, is_true: bool) { assert_eq!(right.is_readable(), is_true) } @@ -762,39 +2147,149 @@ mod tests { #[test] fn should_display_key() { - let expected_hash = core::iter::repeat("0").take(64).collect::(); - let addr_array = [0u8; 32]; - let account_hash = AccountHash::new(addr_array); - let account_key = Key::Account(account_hash); assert_eq!( - format!("{}", account_key), - format!("Key::Account({})", expected_hash) + format!("{}", ACCOUNT_KEY), + format!("Key::Account({})", HEX_STRING) ); - let uref_key = Key::URef(URef::new(addr_array, AccessRights::READ)); assert_eq!( - format!("{}", uref_key), - format!("Key::URef({}, READ)", expected_hash) + format!("{}", HASH_KEY), + format!("Key::Hash({})", HEX_STRING) ); - let hash_key = Key::Hash(addr_array); assert_eq!( - format!("{}", hash_key), - format!("Key::Hash({})", expected_hash) + format!("{}", UREF_KEY), + format!("Key::URef({}, READ)", HEX_STRING) ); - let transfer_key = Key::Transfer(TransferAddr::new(addr_array)); assert_eq!( - format!("{}", transfer_key), - format!("Key::Transfer({})", expected_hash) + format!("{}", TRANSFER_KEY), + format!("Key::Transfer({})", HEX_STRING) ); - let deploy_info_key = Key::DeployInfo(DeployHash::new(addr_array)); assert_eq!( - format!("{}", deploy_info_key), - format!("Key::DeployInfo({})", expected_hash) + format!("{}", DEPLOY_INFO_KEY), + format!("Key::DeployInfo({})", HEX_STRING) ); - let era_info_key = Key::EraInfo(EraId::from(42)); assert_eq!( - format!("{}", era_info_key), + format!("{}", ERA_INFO_KEY), "Key::EraInfo(era 42)".to_string() ); + assert_eq!( + format!("{}", BALANCE_KEY), + format!("Key::Balance({})", HEX_STRING) + ); + assert_eq!(format!("{}", BID_KEY), format!("Key::Bid({})", HEX_STRING)); + assert_eq!( + format!("{}", UNIFIED_BID_KEY), + format!("Key::BidAddr({})", UNIFIED_HEX_STRING) + ); + assert_eq!( + format!("{}", VALIDATOR_BID_KEY), + format!("Key::BidAddr({})", VALIDATOR_HEX_STRING) + ); + assert_eq!( + format!("{}", DELEGATOR_BID_KEY), + format!("Key::BidAddr({})", DELEGATOR_HEX_STRING) + ); + assert_eq!( + format!("{}", WITHDRAW_KEY), + format!("Key::Withdraw({})", HEX_STRING) + ); + assert_eq!( + format!("{}", DICTIONARY_KEY), + format!("Key::Dictionary({})", HEX_STRING) + ); + assert_eq!( + format!("{}", SYSTEM_ENTITY_REGISTRY_KEY), + format!( + "Key::SystemEntityRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", ERA_SUMMARY_KEY), + format!("Key::EraSummary({})", base16::encode_lower(&PADDING_BYTES)) + ); + assert_eq!( + format!("{}", UNBOND_KEY), + format!("Key::Unbond({})", HEX_STRING) + ); + assert_eq!( + format!("{}", CHAINSPEC_REGISTRY_KEY), + format!( + "Key::ChainspecRegistry({})", + base16::encode_lower(&PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", CHECKSUM_REGISTRY_KEY), + format!( + "Key::ChecksumRegistry({})", + base16::encode_lower(&PADDING_BYTES), + ) + ); + assert_eq!( + format!("{}", PACKAGE_KEY), + format!("Key::Package({})", HEX_STRING) + ); + assert_eq!( + format!("{}", ADDRESSABLE_ENTITY_SYSTEM_KEY), + format!("Key::AddressableEntity(system-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", ADDRESSABLE_ENTITY_ACCOUNT_KEY), + format!("Key::AddressableEntity(account-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY), + format!("Key::AddressableEntity(contract-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", BYTE_CODE_EMPTY_KEY), + format!( + "Key::ByteCode(byte-code-empty-{})", + base16::encode_lower(&[0u8; 32]) + ) + ); + assert_eq!( + format!("{}", BYTE_CODE_V1_WASM_KEY), + format!("Key::ByteCode(byte-code-v1-wasm-{})", HEX_STRING) + ); + assert_eq!( + format!("{}", MESSAGE_TOPIC_KEY), + format!( + "Key::Message(entity-contract-{}-{})", + HEX_STRING, HEX_STRING + ) + ); + assert_eq!( + format!("{}", MESSAGE_KEY), + format!( + "Key::Message(entity-contract-{}-{}-{})", + HEX_STRING, TOPIC_NAME_HEX_STRING, MESSAGE_INDEX_HEX_STRING + ) + ); + + assert_eq!( + format!("{}", STATE_KEY), + format!( + "Key::State(entity-contract-{})", + base16::encode_lower(&[42; 32]) + ) + ); + assert_eq!( + format!("{}", BLOCK_TIME_KEY), + format!( + "Key::BlockGlobal({}-{})", + BlockGlobalAddr::BlockTime, + base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES) + ) + ); + assert_eq!( + format!("{}", BLOCK_MESSAGE_COUNT_KEY), + format!( + "Key::BlockGlobal({}-{})", + BlockGlobalAddr::MessageCount, + base16::encode_lower(&BLOCK_GLOBAL_PADDING_BYTES) + ) + ); } #[test] @@ -802,10 +2297,29 @@ mod tests { // Prefix is 2^32-1 = shouldn't allocate that much let bytes: Vec = vec![255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; let res: Result<(Vec, &[u8]), _> = FromBytes::from_bytes(&bytes); - #[cfg(target_os = "linux")] - assert_eq!(res.expect_err("should fail"), Error::OutOfMemory); - #[cfg(target_os = "macos")] - assert_eq!(res.expect_err("should fail"), Error::EarlyEndOfStream); + assert_eq!( + res.expect_err("should fail"), + Error::EarlyEndOfStream, + "length prefix says 2^32-1, but there's not enough data in the stream" + ); + + // Prefix is 2^32-2 = shouldn't allocate that much + let bytes: Vec = vec![255, 255, 255, 254, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let res: Result<(Vec, &[u8]), _> = FromBytes::from_bytes(&bytes); + assert_eq!( + res.expect_err("should fail"), + Error::EarlyEndOfStream, + "length prefix says 2^32-2, but there's not enough data in the stream" + ); + + // Valid prefix but not enough data in the stream + let bytes: Vec = vec![0, 0, 0, 254, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let res: Result<(Vec, &[u8]), _> = FromBytes::from_bytes(&bytes); + assert_eq!( + res.expect_err("should fail"), + Error::EarlyEndOfStream, + "length prefix says 254, but there's not enough data in the stream" + ); } #[test] @@ -814,7 +2328,7 @@ mod tests { let account_hash = AccountHash::new(account); let key1 = Key::Account(account_hash); assert_eq!(key1.into_account(), Some(account_hash)); - assert!(key1.into_hash().is_none()); + assert!(key1.into_entity_hash_addr().is_some()); assert!(key1.as_uref().is_none()); } @@ -823,7 +2337,25 @@ mod tests { let hash = [42; KEY_HASH_LENGTH]; let key1 = Key::Hash(hash); assert!(key1.into_account().is_none()); - assert_eq!(key1.into_hash(), Some(hash)); + assert_eq!(key1.into_hash_addr(), Some(hash)); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_entity_key_getters() { + let hash = [42; KEY_HASH_LENGTH]; + let key1 = Key::contract_entity_key(AddressableEntityHash::new(hash)); + assert!(key1.into_account().is_none()); + assert_eq!(key1.into_entity_hash_addr(), Some(hash)); + assert!(key1.as_uref().is_none()); + } + + #[test] + fn check_package_key_getters() { + let hash = [42; KEY_HASH_LENGTH]; + let key1 = Key::SmartContract(hash); + assert!(key1.into_account().is_none()); + assert_eq!(key1.into_package_addr(), Some(hash)); assert!(key1.as_uref().is_none()); } @@ -832,172 +2364,439 @@ mod tests { let uref = URef::new([42; 32], AccessRights::READ_ADD_WRITE); let key1 = Key::URef(uref); assert!(key1.into_account().is_none()); - assert!(key1.into_hash().is_none()); + assert!(key1.into_entity_hash_addr().is_none()); assert_eq!(key1.as_uref(), Some(&uref)); } #[test] fn key_max_serialized_length() { - let key_account = Key::Account(AccountHash::new([42; BLAKE2B_DIGEST_LENGTH])); - assert!(key_account.serialized_length() <= Key::max_serialized_length()); - - let key_hash = Key::Hash([42; KEY_HASH_LENGTH]); - assert!(key_hash.serialized_length() <= Key::max_serialized_length()); - - let key_uref = Key::URef(URef::new([42; BLAKE2B_DIGEST_LENGTH], AccessRights::READ)); - assert!(key_uref.serialized_length() <= Key::max_serialized_length()); - - let key_transfer = Key::Transfer(TransferAddr::new([42; BLAKE2B_DIGEST_LENGTH])); - assert!(key_transfer.serialized_length() <= Key::max_serialized_length()); - - let key_deploy_info = Key::DeployInfo(DeployHash::new([42; BLAKE2B_DIGEST_LENGTH])); - assert!(key_deploy_info.serialized_length() <= Key::max_serialized_length()); + let mut got_max = false; + for key in KEYS { + let expected = Key::max_serialized_length(); + let actual = key.serialized_length(); + assert!( + actual <= expected, + "key too long {} expected {} actual {}", + key, + expected, + actual + ); + if actual == Key::max_serialized_length() { + got_max = true; + } + } + assert!( + got_max, + "None of the Key variants has a serialized_length equal to \ + Key::max_serialized_length(), so Key::max_serialized_length() should be reduced" + ); + } - let key_era_info = Key::EraInfo(EraId::from(42)); - assert!(key_era_info.serialized_length() <= Key::max_serialized_length()); + #[test] + fn should_parse_legacy_bid_key_from_string() { + let account_hash = AccountHash([1; 32]); + let legacy_bid_key = Key::Bid(account_hash); + let original_string = legacy_bid_key.to_formatted_string(); + + let parsed_bid_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + if let Key::Bid(parsed_account_hash) = parsed_bid_key { + assert_eq!(parsed_account_hash, account_hash,); + assert_eq!(legacy_bid_key, parsed_bid_key); + + let translated_string = parsed_bid_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + } else { + panic!("should have account hash"); + } } - fn to_string_round_trip(key: Key) { - let string = key.to_formatted_string(); - let parsed_key = Key::from_formatted_str(&string).unwrap(); - assert_eq!(key, parsed_key); + #[test] + fn should_parse_legacy_unified_bid_key_from_string() { + let legacy_bid_addr = BidAddr::legacy([1; 32]); + let legacy_bid_key = Key::BidAddr(legacy_bid_addr); + assert_eq!(legacy_bid_addr.tag(), BidAddrTag::Unified,); + + let original_string = legacy_bid_key.to_formatted_string(); + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), legacy_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, legacy_bid_addr); + + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), legacy_bid_key.as_bid_addr(),); } #[test] - fn key_from_str() { - to_string_round_trip(Key::Account(AccountHash::new([42; BLAKE2B_DIGEST_LENGTH]))); - to_string_round_trip(Key::Hash([42; KEY_HASH_LENGTH])); - to_string_round_trip(Key::URef(URef::new( - [255; BLAKE2B_DIGEST_LENGTH], - AccessRights::READ, - ))); - to_string_round_trip(Key::Transfer(TransferAddr::new([42; KEY_HASH_LENGTH]))); - to_string_round_trip(Key::DeployInfo(DeployHash::new([42; KEY_HASH_LENGTH]))); - to_string_round_trip(Key::EraInfo(EraId::from(42))); + fn should_parse_validator_bid_key_from_string() { + let validator_bid_addr = BidAddr::new_validator_addr([1; 32]); + let validator_bid_key = Key::BidAddr(validator_bid_addr); + assert_eq!(validator_bid_addr.tag(), BidAddrTag::Validator,); + + let original_string = validator_bid_key.to_formatted_string(); + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), validator_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, validator_bid_addr,); + + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), validator_bid_key.as_bid_addr(),); + } - let invalid_prefix = "a-0000000000000000000000000000000000000000000000000000000000000000"; - assert!(Key::from_formatted_str(invalid_prefix).is_err()); + #[test] + fn should_parse_delegator_bid_key_from_string() { + let delegator_bid_addr = BidAddr::new_delegator_account_addr(([1; 32], [9; 32])); + let delegator_bid_key = Key::BidAddr(delegator_bid_addr); + assert_eq!(delegator_bid_addr.tag(), BidAddrTag::DelegatedAccount); + + let original_string = delegator_bid_key.to_formatted_string(); + + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), delegator_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, delegator_bid_addr,); + + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), delegator_bid_key.as_bid_addr(),); + } - let invalid_prefix = "hash0000000000000000000000000000000000000000000000000000000000000000"; - assert!(Key::from_formatted_str(invalid_prefix).is_err()); + #[test] + fn should_parse_credit_bid_key_from_string() { + let credit_bid_addr = BidAddr::Credit { + validator: AccountHash::new([1; 32]), + era_id: 1.into(), + }; + let delegator_bid_key = Key::BidAddr(credit_bid_addr); + assert_eq!(credit_bid_addr.tag(), BidAddrTag::Credit); - let short_addr = "00000000000000000000000000000000000000000000000000000000000000"; - assert!(Key::from_formatted_str(&format!("{}{}", HASH_PREFIX, short_addr)).is_err()); + let original_string = delegator_bid_key.to_formatted_string(); - let long_addr = "000000000000000000000000000000000000000000000000000000000000000000"; - assert!(Key::from_formatted_str(&format!("{}{}", HASH_PREFIX, long_addr)).is_err()); + let parsed_key = + Key::from_formatted_str(&original_string).expect("{string} (key = {key:?})"); + let parsed_bid_addr = parsed_key.as_bid_addr().expect("must have bid addr"); + assert!(parsed_key.is_bid_addr_key()); + assert_eq!(parsed_bid_addr.tag(), credit_bid_addr.tag(),); + assert_eq!(*parsed_bid_addr, credit_bid_addr,); - let invalid_hex = "000000000000000000000000000000000000000000000000000000000000000g"; - assert!(Key::from_formatted_str(&format!("{}{}", HASH_PREFIX, invalid_hex)).is_err()); + let translated_string = parsed_key.to_formatted_string(); + assert_eq!(original_string, translated_string); + assert_eq!(parsed_key.as_bid_addr(), delegator_bid_key.as_bid_addr()); } #[test] - fn key_to_json() { - let array = [42; BLAKE2B_DIGEST_LENGTH]; - let hex_bytes = "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"; + fn should_parse_key_from_str() { + for key in KEYS { + let string = key.to_formatted_string(); + let parsed_key = Key::from_formatted_str(&string).expect("{string} (key = {key:?})"); + assert_eq!(parsed_key, *key, "{string} (key = {key:?})"); + } + } - let key_account = Key::Account(AccountHash::new(array)); - assert_eq!( - serde_json::to_string(&key_account).unwrap(), - format!(r#"{{"Account":"account-hash-{}"}}"#, hex_bytes) + #[test] + fn should_fail_to_parse_key_from_str() { + assert!( + Key::from_formatted_str(ACCOUNT_HASH_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("account-key from string error: ") ); - - let key_hash = Key::Hash(array); - assert_eq!( - serde_json::to_string(&key_hash).unwrap(), - format!(r#"{{"Hash":"hash-{}"}}"#, hex_bytes) + assert!(Key::from_formatted_str(HASH_PREFIX) + .unwrap_err() + .to_string() + .starts_with("hash-key from string error: ")); + assert!(Key::from_formatted_str(UREF_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("uref-key from string error: ")); + assert!( + Key::from_formatted_str(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) + .unwrap_err() + .to_string() + .starts_with("legacy-transfer-key from string error: ") ); - - let key_uref = Key::URef(URef::new(array, AccessRights::READ)); - assert_eq!( - serde_json::to_string(&key_uref).unwrap(), - format!(r#"{{"URef":"uref-{}-001"}}"#, hex_bytes) + assert!(Key::from_formatted_str(DEPLOY_INFO_PREFIX) + .unwrap_err() + .to_string() + .starts_with("deploy-info-key from string error: ")); + assert!(Key::from_formatted_str(ERA_INFO_PREFIX) + .unwrap_err() + .to_string() + .starts_with("era-info-key from string error: ")); + assert!(Key::from_formatted_str(BALANCE_PREFIX) + .unwrap_err() + .to_string() + .starts_with("balance-key from string error: ")); + assert!(Key::from_formatted_str(BID_PREFIX) + .unwrap_err() + .to_string() + .starts_with("bid-key from string error: ")); + assert!(Key::from_formatted_str(WITHDRAW_PREFIX) + .unwrap_err() + .to_string() + .starts_with("withdraw-key from string error: ")); + assert!(Key::from_formatted_str(DICTIONARY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("dictionary-key from string error: ")); + assert!(Key::from_formatted_str(SYSTEM_ENTITY_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("system-contract-registry-key from string error: ")); + assert!(Key::from_formatted_str(ERA_SUMMARY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("era-summary-key from string error")); + assert!(Key::from_formatted_str(UNBOND_PREFIX) + .unwrap_err() + .to_string() + .starts_with("unbond-key from string error: ")); + assert!(Key::from_formatted_str(CHAINSPEC_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("chainspec-registry-key from string error: ")); + assert!(Key::from_formatted_str(CHECKSUM_REGISTRY_PREFIX) + .unwrap_err() + .to_string() + .starts_with("checksum-registry-key from string error: ")); + let bid_addr_err = Key::from_formatted_str(BID_ADDR_PREFIX) + .unwrap_err() + .to_string(); + assert!( + bid_addr_err.starts_with("bid-addr-key from string error: "), + "{}", + bid_addr_err ); - - let key_transfer = Key::Transfer(TransferAddr::new(array)); + assert!(Key::from_formatted_str(PACKAGE_PREFIX) + .unwrap_err() + .to_string() + .starts_with("package-key from string error: ")); + + let error_string = + Key::from_formatted_str(&format!("{}{}", ENTITY_PREFIX, ACCOUNT_ENTITY_PREFIX)) + .unwrap_err() + .to_string(); + assert!(error_string.starts_with("addressable-entity-key from string error: ")); + assert!( + Key::from_formatted_str(&format!("{}{}", BYTE_CODE_PREFIX, EMPTY_PREFIX)) + .unwrap_err() + .to_string() + .starts_with("byte-code-key from string error: ") + ); + let invalid_prefix = "a-0000000000000000000000000000000000000000000000000000000000000000"; assert_eq!( - serde_json::to_string(&key_transfer).unwrap(), - format!(r#"{{"Transfer":"transfer-{}"}}"#, hex_bytes) + Key::from_formatted_str(invalid_prefix) + .unwrap_err() + .to_string(), + "unknown prefix for key" ); - let key_deploy_info = Key::DeployInfo(DeployHash::new(array)); + let missing_hyphen_prefix = + "hash0000000000000000000000000000000000000000000000000000000000000000"; assert_eq!( - serde_json::to_string(&key_deploy_info).unwrap(), - format!(r#"{{"DeployInfo":"deploy-{}"}}"#, hex_bytes) + Key::from_formatted_str(missing_hyphen_prefix) + .unwrap_err() + .to_string(), + "unknown prefix for key" ); - let key_era_info = Key::EraInfo(EraId::from(42)); + let no_prefix = "0000000000000000000000000000000000000000000000000000000000000000"; assert_eq!( - serde_json::to_string(&key_era_info).unwrap(), - r#"{"EraInfo":"era-42"}"#.to_string() + Key::from_formatted_str(no_prefix).unwrap_err().to_string(), + "unknown prefix for key" ); - let key_validators_info = Key::EraValidators(EraId::from(42)); - assert_eq!( - serde_json::to_string(&key_validators_info).unwrap(), - r#"{"EraValidators":"validator-era-42"}"#.to_string() + let balance_hold_err = Key::from_formatted_str(BALANCE_HOLD_PREFIX) + .unwrap_err() + .to_string(); + assert!( + balance_hold_err.starts_with("balance-hold from string error: "), + "{}", + bid_addr_err ); } + #[test] + fn key_to_json() { + for key in KEYS.iter() { + assert_eq!( + serde_json::to_string(key).unwrap(), + format!("\"{}\"", key.to_formatted_string()) + ); + } + } + #[test] fn serialization_roundtrip_bincode() { - let round_trip = |key: &Key| { + for key in KEYS { let encoded = bincode::serialize(key).unwrap(); let decoded = bincode::deserialize(&encoded).unwrap(); assert_eq!(key, &decoded); - }; + } + } - let array = [42; BLAKE2B_DIGEST_LENGTH]; + #[test] + fn key_tag_bytes_roundtrip() { + for key in KEYS { + let tag: KeyTag = key.tag(); + bytesrepr::test_serialization_roundtrip(&tag); + } + } - round_trip(&Key::Account(AccountHash::new(array))); - round_trip(&Key::Hash(array)); - round_trip(&Key::URef(URef::new(array, AccessRights::READ))); - round_trip(&Key::Transfer(TransferAddr::new(array))); - round_trip(&Key::DeployInfo(DeployHash::new(array))); - round_trip(&Key::EraInfo(EraId::from(42))); - round_trip(&Key::Balance(URef::new(array, AccessRights::READ).addr())); - round_trip(&Key::Bid(AccountHash::new(array))); - round_trip(&Key::Withdraw(AccountHash::new(array))); + #[test] + fn bytesrepr_serialization_roundtrip() { + bytesrepr::test_serialization_roundtrip(&ACCOUNT_KEY); + bytesrepr::test_serialization_roundtrip(&HASH_KEY); + bytesrepr::test_serialization_roundtrip(&UREF_KEY); + bytesrepr::test_serialization_roundtrip(&TRANSFER_KEY); + bytesrepr::test_serialization_roundtrip(&DEPLOY_INFO_KEY); + bytesrepr::test_serialization_roundtrip(&ERA_INFO_KEY); + bytesrepr::test_serialization_roundtrip(&BALANCE_KEY); + bytesrepr::test_serialization_roundtrip(&BID_KEY); + bytesrepr::test_serialization_roundtrip(&WITHDRAW_KEY); + bytesrepr::test_serialization_roundtrip(&DICTIONARY_KEY); + // bytesrepr::test_serialization_roundtrip(&SYSTEM_CONTRACT_REGISTRY_KEY); + bytesrepr::test_serialization_roundtrip(&ERA_SUMMARY_KEY); + bytesrepr::test_serialization_roundtrip(&UNBOND_KEY); + bytesrepr::test_serialization_roundtrip(&CHAINSPEC_REGISTRY_KEY); + bytesrepr::test_serialization_roundtrip(&CHECKSUM_REGISTRY_KEY); + // bytesrepr::test_serialization_roundtrip(&UNIFIED_BID_KEY); + bytesrepr::test_serialization_roundtrip(&VALIDATOR_BID_KEY); + bytesrepr::test_serialization_roundtrip(&DELEGATOR_BID_KEY); + bytesrepr::test_serialization_roundtrip(&PACKAGE_KEY); + bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SYSTEM_KEY); + bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_ACCOUNT_KEY); + bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY); + bytesrepr::test_serialization_roundtrip(&BYTE_CODE_EMPTY_KEY); + bytesrepr::test_serialization_roundtrip(&BYTE_CODE_V1_WASM_KEY); + bytesrepr::test_serialization_roundtrip(&MESSAGE_TOPIC_KEY); + bytesrepr::test_serialization_roundtrip(&MESSAGE_KEY); + bytesrepr::test_serialization_roundtrip(&NAMED_KEY); + bytesrepr::test_serialization_roundtrip(&STATE_KEY); } #[test] fn serialization_roundtrip_json() { - let round_trip = |key: &Key| { - let encoded = serde_json::to_string_pretty(key).unwrap(); - let decoded = serde_json::from_str(&encoded).unwrap(); - assert_eq!(key, &decoded); - }; - - let array = [42; BLAKE2B_DIGEST_LENGTH]; - - round_trip(&Key::Account(AccountHash::new(array))); - round_trip(&Key::Hash(array)); - round_trip(&Key::URef(URef::new(array, AccessRights::READ))); - round_trip(&Key::Transfer(TransferAddr::new(array))); - round_trip(&Key::DeployInfo(DeployHash::new(array))); - round_trip(&Key::EraInfo(EraId::from(42))); - round_trip(&Key::Balance(URef::new(array, AccessRights::READ).addr())); - round_trip(&Key::Withdraw(AccountHash::new(array))); - round_trip(&Key::EraValidators(EraId::from(42))); + for key in KEYS { + round_trip(key); + } let zeros = [0; BLAKE2B_DIGEST_LENGTH]; + let nines = [9; BLAKE2B_DIGEST_LENGTH]; round_trip(&Key::Account(AccountHash::new(zeros))); round_trip(&Key::Hash(zeros)); round_trip(&Key::URef(URef::new(zeros, AccessRights::READ))); round_trip(&Key::Transfer(TransferAddr::new(zeros))); - round_trip(&Key::DeployInfo(DeployHash::new(zeros))); + round_trip(&Key::DeployInfo(DeployHash::from_raw(zeros))); round_trip(&Key::EraInfo(EraId::from(0))); round_trip(&Key::Balance(URef::new(zeros, AccessRights::READ).addr())); round_trip(&Key::Bid(AccountHash::new(zeros))); + round_trip(&Key::BidAddr(BidAddr::legacy(zeros))); + round_trip(&Key::BidAddr(BidAddr::new_validator_addr(zeros))); + round_trip(&Key::BidAddr(BidAddr::new_delegator_account_addr(( + zeros, nines, + )))); round_trip(&Key::Withdraw(AccountHash::new(zeros))); - round_trip(&Key::EraValidators(EraId::from(0))); + round_trip(&Key::Dictionary(zeros)); + round_trip(&Key::Unbond(AccountHash::new(zeros))); + round_trip(&Key::SmartContract(zeros)); + round_trip(&Key::AddressableEntity(EntityAddr::new_system(zeros))); + round_trip(&Key::AddressableEntity(EntityAddr::new_account(zeros))); + round_trip(&Key::AddressableEntity(EntityAddr::new_smart_contract( + zeros, + ))); + round_trip(&Key::ByteCode(ByteCodeAddr::Empty)); + round_trip(&Key::ByteCode(ByteCodeAddr::V1CasperWasm(zeros))); + round_trip(&Key::Message(MessageAddr::new_topic_addr( + EntityAddr::new_smart_contract(zeros), + nines.into(), + ))); + round_trip(&Key::Message(MessageAddr::new_message_addr( + EntityAddr::new_smart_contract(zeros), + nines.into(), + 1, + ))); + round_trip(&Key::NamedKey(NamedKeyAddr::default())); + round_trip(&Key::BlockGlobal(BlockGlobalAddr::BlockTime)); + round_trip(&Key::BlockGlobal(BlockGlobalAddr::MessageCount)); + round_trip(&Key::BlockGlobal(BlockGlobalAddr::ProtocolVersion)); + round_trip(&Key::BlockGlobal(BlockGlobalAddr::AddressableEntity)); + round_trip(&Key::BalanceHold(BalanceHoldAddr::default())); + round_trip(&Key::State(EntityAddr::new_system(zeros))); + } + + #[test] + fn state_json_deserialization() { + let mut test_rng = TestRng::new(); + let state_key = Key::State(EntityAddr::new_account(test_rng.gen())); + round_trip(&state_key); + + let state_key = Key::State(EntityAddr::new_system(test_rng.gen())); + round_trip(&state_key); + + let state_key = Key::State(EntityAddr::new_smart_contract(test_rng.gen())); + round_trip(&state_key); } + #[test] + fn roundtrip() { + bytesrepr::test_serialization_roundtrip(&ACCOUNT_KEY); + bytesrepr::test_serialization_roundtrip(&HASH_KEY); + bytesrepr::test_serialization_roundtrip(&UREF_KEY); + bytesrepr::test_serialization_roundtrip(&TRANSFER_KEY); + bytesrepr::test_serialization_roundtrip(&DEPLOY_INFO_KEY); + bytesrepr::test_serialization_roundtrip(&ERA_INFO_KEY); + bytesrepr::test_serialization_roundtrip(&BALANCE_KEY); + bytesrepr::test_serialization_roundtrip(&BID_KEY); + bytesrepr::test_serialization_roundtrip(&WITHDRAW_KEY); + bytesrepr::test_serialization_roundtrip(&DICTIONARY_KEY); + bytesrepr::test_serialization_roundtrip(&SYSTEM_ENTITY_REGISTRY_KEY); + bytesrepr::test_serialization_roundtrip(&ERA_SUMMARY_KEY); + bytesrepr::test_serialization_roundtrip(&UNBOND_KEY); + bytesrepr::test_serialization_roundtrip(&CHAINSPEC_REGISTRY_KEY); + bytesrepr::test_serialization_roundtrip(&CHECKSUM_REGISTRY_KEY); + bytesrepr::test_serialization_roundtrip(&UNIFIED_BID_KEY); + bytesrepr::test_serialization_roundtrip(&VALIDATOR_BID_KEY); + bytesrepr::test_serialization_roundtrip(&DELEGATOR_BID_KEY); + bytesrepr::test_serialization_roundtrip(&PACKAGE_KEY); + bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SYSTEM_KEY); + bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_ACCOUNT_KEY); + bytesrepr::test_serialization_roundtrip(&ADDRESSABLE_ENTITY_SMART_CONTRACT_KEY); + bytesrepr::test_serialization_roundtrip(&BYTE_CODE_EMPTY_KEY); + bytesrepr::test_serialization_roundtrip(&BYTE_CODE_V1_WASM_KEY); + bytesrepr::test_serialization_roundtrip(&MESSAGE_TOPIC_KEY); + bytesrepr::test_serialization_roundtrip(&MESSAGE_KEY); + bytesrepr::test_serialization_roundtrip(&NAMED_KEY); + } + + fn round_trip(key: &Key) { + let encoded = serde_json::to_value(key).unwrap(); + let decoded = serde_json::from_value(encoded.clone()) + .unwrap_or_else(|_| panic!("{} {}", key, encoded)); + assert_eq!(key, &decoded); + } +} + +#[cfg(test)] +mod proptest { + use crate::gens; + use proptest::prelude::*; + proptest! { #[test] - fn padded_era_id_serialization_roundtrip(era_id in era_id_arb()) { - bytesrepr::test_serialization_roundtrip(&PaddedEraId(era_id)) + fn test_json_roundtrip_for_bidaddr_key(key in gens::all_keys_arb()) { + let json_string = serde_json::to_string_pretty(&key).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(key, decoded); } } } diff --git a/types/src/lib.rs b/types/src/lib.rs index 64a847d970..1ab37948ac 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -1,88 +1,246 @@ //! Types used to allow creation of Wasm contracts and tests for use on the Casper Platform. -//! -//! # `no_std` -//! -//! By default, the library is `no_std`, however you can enable full `std` functionality by enabling -//! the crate's `std` feature. -#![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr( - not(feature = "no-unstable-features"), - feature(min_specialization, try_reserve) + not(any( + feature = "json-schema", + feature = "datasize", + feature = "std", + feature = "testing", + test, + )), + no_std )] -#![doc(html_root_url = "https://docs.rs/casper-types/1.0.0")] +#![doc(html_root_url = "https://docs.rs/casper-types/6.0.1")] #![doc( - html_favicon_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Favicon_RGB_50px.png", - html_logo_url = "https://raw.githubusercontent.com/CasperLabs/casper-node/master/images/CasperLabs_Logo_Symbol_RGB.png", - test(attr(forbid(warnings))) + html_favicon_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon_48.png", + html_logo_url = "https://raw.githubusercontent.com/casper-network/casper-node/blob/dev/images/Casper_Logo_Favicon.png" )] -#![warn(missing_docs)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#[cfg_attr(not(test), macro_use)] extern crate alloc; -#[cfg(any(feature = "std", test))] -#[macro_use] -extern crate std; + +extern crate core; mod access_rights; pub mod account; +pub mod addressable_entity; pub mod api_error; +mod auction_state; +mod block; mod block_time; +mod byte_code; pub mod bytesrepr; +#[cfg(any(feature = "std", test))] +mod chainspec; +pub mod checksummed_hex; mod cl_type; mod cl_value; +pub mod contract_messages; mod contract_wasm; pub mod contracts; pub mod crypto; mod deploy_info; +mod digest; +mod display_iter; mod era_id; -mod execution_result; -#[cfg(any(feature = "gens", test))] +pub mod execution; +#[cfg(any(feature = "std-fs-io", test))] +pub mod file_utils; +mod gas; +#[cfg(any(feature = "testing", feature = "gens", test))] pub mod gens; +pub mod global_state; +#[cfg(feature = "json-schema")] mod json_pretty_printer; mod key; -mod named_key; +mod motes; +mod package; +mod peers_map; mod phase; mod protocol_version; -pub mod runtime_args; +pub mod runtime_footprint; mod semver; +pub(crate) mod serde_helpers; +mod stored_value; pub mod system; mod tagged; +#[cfg(any(feature = "testing", test))] +pub mod testing; +mod timestamp; +mod transaction; mod transfer; mod transfer_result; mod uint; mod uref; +mod validator_change; + +#[cfg(all(feature = "std", any(feature = "std-fs-io", test)))] +use libc::{c_long, sysconf, _SC_PAGESIZE}; +#[cfg(feature = "std")] +use once_cell::sync::Lazy; -pub use access_rights::{AccessRights, ACCESS_RIGHTS_SERIALIZED_LENGTH}; +pub use crate::uint::{UIntParseError, U128, U256, U512}; + +pub use access_rights::{ + AccessRights, ContextAccessRights, GrantedAccess, ACCESS_RIGHTS_SERIALIZED_LENGTH, +}; +pub use account::Account; +#[doc(inline)] +pub use addressable_entity::{ + AddressableEntity, AddressableEntityHash, ContractRuntimeTag, EntityAddr, EntityEntryPoint, + EntityKind, EntryPointAccess, EntryPointAddr, EntryPointPayment, EntryPointType, + EntryPointValue, EntryPoints, Parameter, Parameters, DEFAULT_ENTRY_POINT_NAME, +}; #[doc(inline)] pub use api_error::ApiError; -pub use block_time::{BlockTime, BLOCKTIME_SERIALIZED_LENGTH}; +#[allow(deprecated)] +pub use auction_state::{AuctionState, JsonEraValidators, JsonValidatorWeights}; +#[cfg(all(feature = "std", feature = "json-schema"))] +pub use block::JsonBlockWithSignatures; +pub use block::{ + AvailableBlockRange, Block, BlockBody, BlockBodyV1, BlockBodyV2, BlockGlobalAddr, + BlockGlobalAddrTag, BlockHash, BlockHashAndHeight, BlockHeader, BlockHeaderV1, BlockHeaderV2, + BlockHeaderWithSignatures, BlockHeaderWithSignaturesValidationError, BlockIdentifier, + BlockSignatures, BlockSignaturesMergeError, BlockSignaturesV1, BlockSignaturesV2, + BlockSyncStatus, BlockSynchronizerStatus, BlockV1, BlockV2, BlockValidationError, + BlockWithSignatures, ChainNameDigest, EraEnd, EraEndV1, EraEndV2, EraReport, FinalitySignature, + FinalitySignatureId, FinalitySignatureV1, FinalitySignatureV2, RewardedSignatures, Rewards, + SingleBlockRewardedSignatures, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +pub use block::{TestBlockBuilder, TestBlockV1Builder}; +pub use block_time::{BlockTime, HoldsEpoch, BLOCKTIME_SERIALIZED_LENGTH}; +pub use byte_code::{ByteCode, ByteCodeAddr, ByteCodeHash, ByteCodeKind}; pub use cl_type::{named_key_type, CLType, CLTyped}; -pub use cl_value::{CLTypeMismatch, CLValue, CLValueError}; -pub use contract_wasm::{ContractWasm, ContractWasmHash}; -pub use contracts::{ - Contract, ContractHash, ContractPackage, ContractPackageHash, ContractVersion, - ContractVersionKey, EntryPoint, EntryPointAccess, EntryPointType, EntryPoints, Group, - Parameter, +#[cfg(feature = "json-schema")] +pub use cl_value::cl_value_to_json; +pub use cl_value::{ + handle_stored_dictionary_value, CLTypeMismatch, CLValue, CLValueError, ChecksumRegistry, + DictionaryValue as CLValueDictionary, SystemHashRegistry, }; +pub use global_state::Pointer; + +#[cfg(any(feature = "std", test))] +pub use chainspec::{ + AccountConfig, AccountsConfig, ActivationPoint, AdministratorAccount, AuctionCosts, + BrTableCost, Chainspec, ChainspecRawBytes, ChainspecRegistry, ConsensusProtocolName, + ControlFlowCosts, CoreConfig, DelegatorConfig, DeployConfig, FeeHandling, GenesisAccount, + GenesisConfig, GenesisValidator, GlobalStateUpdate, GlobalStateUpdateConfig, + GlobalStateUpdateError, HandlePaymentCosts, HighwayConfig, HoldBalanceHandling, HostFunction, + HostFunctionCost, HostFunctionCostsV1, HostFunctionCostsV2, HostFunctionV2, + LegacyRequiredFinality, MessageLimits, MintCosts, NetworkConfig, NextUpgrade, OpcodeCosts, + PricingHandling, ProtocolConfig, ProtocolUpgradeConfig, RefundHandling, StandardPaymentCosts, + StorageCosts, SystemConfig, TransactionConfig, TransactionLaneDefinition, TransactionV1Config, + VacancyConfig, ValidatorConfig, WasmConfig, WasmV1Config, WasmV2Config, + DEFAULT_BASELINE_MOTES_AMOUNT, DEFAULT_GAS_HOLD_INTERVAL, DEFAULT_HOST_FUNCTION_NEW_DICTIONARY, + DEFAULT_MINIMUM_BID_AMOUNT, DEFAULT_REFUND_HANDLING, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +pub use chainspec::{ + DEFAULT_ADD_BID_COST, DEFAULT_ADD_COST, DEFAULT_BIT_COST, DEFAULT_CONST_COST, + DEFAULT_CONTROL_FLOW_BLOCK_OPCODE, DEFAULT_CONTROL_FLOW_BR_IF_OPCODE, + DEFAULT_CONTROL_FLOW_BR_OPCODE, DEFAULT_CONTROL_FLOW_BR_TABLE_MULTIPLIER, + DEFAULT_CONTROL_FLOW_BR_TABLE_OPCODE, DEFAULT_CONTROL_FLOW_CALL_INDIRECT_OPCODE, + DEFAULT_CONTROL_FLOW_CALL_OPCODE, DEFAULT_CONTROL_FLOW_DROP_OPCODE, + DEFAULT_CONTROL_FLOW_ELSE_OPCODE, DEFAULT_CONTROL_FLOW_END_OPCODE, + DEFAULT_CONTROL_FLOW_IF_OPCODE, DEFAULT_CONTROL_FLOW_LOOP_OPCODE, + DEFAULT_CONTROL_FLOW_RETURN_OPCODE, DEFAULT_CONTROL_FLOW_SELECT_OPCODE, + DEFAULT_CONVERSION_COST, DEFAULT_CURRENT_MEMORY_COST, DEFAULT_DELEGATE_COST, DEFAULT_DIV_COST, + DEFAULT_FEE_HANDLING, DEFAULT_GLOBAL_COST, DEFAULT_GROW_MEMORY_COST, + DEFAULT_INTEGER_COMPARISON_COST, DEFAULT_LARGE_TRANSACTION_GAS_LIMIT, DEFAULT_LOAD_COST, + DEFAULT_LOCAL_COST, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MAX_STACK_HEIGHT, + DEFAULT_MIN_TRANSFER_MOTES, DEFAULT_MUL_COST, DEFAULT_NEW_DICTIONARY_COST, DEFAULT_NOP_COST, + DEFAULT_STORE_COST, DEFAULT_TRANSFER_COST, DEFAULT_UNREACHABLE_COST, DEFAULT_WASM_MAX_MEMORY, +}; +pub use contract_wasm::{ContractWasm, ContractWasmHash}; +#[doc(inline)] +pub use contracts::{Contract, NamedKeys}; pub use crypto::*; pub use deploy_info::DeployInfo; -pub use execution_result::{ - ExecutionEffect, ExecutionResult, OpKind, Operation, Transform, TransformEntry, +pub use digest::{ + ChunkWithProof, ChunkWithProofVerificationError, Digest, DigestError, IndexedMerkleProof, + MerkleConstructionError, MerkleVerificationError, }; +pub use display_iter::DisplayIter; +pub use era_id::EraId; +pub use gas::Gas; +#[cfg(feature = "json-schema")] pub use json_pretty_printer::json_pretty_print; #[doc(inline)] -pub use key::{HashAddr, Key, KeyTag, BLAKE2B_DIGEST_LENGTH, KEY_HASH_LENGTH}; -pub use named_key::NamedKey; +pub use key::{ + DictionaryAddr, FromStrError as KeyFromStrError, HashAddr, Key, KeyTag, PackageAddr, + BLAKE2B_DIGEST_LENGTH, DICTIONARY_ITEM_KEY_MAX_LENGTH, KEY_DICTIONARY_LENGTH, KEY_HASH_LENGTH, +}; +pub use motes::Motes; +#[doc(inline)] +pub use package::{ + EntityVersion, EntityVersionKey, EntityVersions, Group, Groups, Package, PackageHash, + PackageStatus, ENTITY_INITIAL_VERSION, +}; +pub use peers_map::{PeerEntry, Peers}; pub use phase::{Phase, PHASE_SERIALIZED_LENGTH}; pub use protocol_version::{ProtocolVersion, VersionCheckResult}; -pub use runtime_args::{NamedArg, RuntimeArgs}; +pub use runtime_footprint::RuntimeFootprint; pub use semver::{ParseSemVerError, SemVer, SEM_VER_SERIALIZED_LENGTH}; +pub use stored_value::{ + GlobalStateIdentifier, StoredValue, StoredValueTag, TypeMismatch as StoredValueTypeMismatch, +}; +pub use system::mint::METHOD_TRANSFER; pub use tagged::Tagged; -pub use transfer::{DeployHash, Transfer, TransferAddr, DEPLOY_HASH_LENGTH, TRANSFER_ADDR_LENGTH}; +#[cfg(any(feature = "std", test))] +pub use timestamp::serde_option_time_diff; +pub use timestamp::{TimeDiff, Timestamp}; +#[cfg(any(feature = "std", test))] +pub use transaction::{calculate_lane_id_for_deploy, calculate_transaction_lane, GasLimited}; +pub use transaction::{ + AddressableEntityIdentifier, Approval, ApprovalsHash, Deploy, DeployDecodeFromJsonError, + DeployError, DeployExcessiveSizeError, DeployHash, DeployHeader, DeployId, + ExecutableDeployItem, ExecutableDeployItemIdentifier, ExecutionInfo, InitiatorAddr, + InvalidDeploy, InvalidTransaction, InvalidTransactionV1, NamedArg, PackageIdentifier, + PricingMode, PricingModeError, RuntimeArgs, Transaction, TransactionArgs, + TransactionEntryPoint, TransactionHash, TransactionId, TransactionInvocationTarget, + TransactionRuntimeParams, TransactionScheduling, TransactionTarget, TransactionV1, + TransactionV1DecodeFromJsonError, TransactionV1Error, TransactionV1ExcessiveSizeError, + TransactionV1Hash, TransactionV1Payload, TransferTarget, +}; +pub use transfer::{ + Transfer, TransferAddr, TransferFromStrError, TransferV1, TransferV2, TRANSFER_ADDR_LENGTH, +}; pub use transfer_result::{TransferResult, TransferredTo}; -pub use uref::{FromStrError as URefFromStrError, URef, UREF_ADDR_LENGTH, UREF_SERIALIZED_LENGTH}; - -pub use crate::{ - era_id::EraId, - uint::{UIntParseError, U128, U256, U512}, +pub use uref::{ + FromStrError as URefFromStrError, URef, URefAddr, UREF_ADDR_LENGTH, UREF_SERIALIZED_LENGTH, }; +pub use validator_change::ValidatorChange; +/// The lane identifier for the native mint interaction. +pub const MINT_LANE_ID: u8 = 0; +/// The lane identifier for the native auction interaction. +pub const AUCTION_LANE_ID: u8 = 1; +/// The lane identifier for the install/upgrade auction interaction. +pub const INSTALL_UPGRADE_LANE_ID: u8 = 2; +/// The lane identifier for large wasms. +pub(crate) const LARGE_WASM_LANE_ID: u8 = 3; +/// The lane identifier for medium wasms. +pub(crate) const MEDIUM_WASM_LANE_ID: u8 = 4; +/// The lane identifier for small wasms. +pub(crate) const SMALL_WASM_LANE_ID: u8 = 5; + +/// OS page size. +#[cfg(feature = "std")] +pub static OS_PAGE_SIZE: Lazy = Lazy::new(|| { + /// Sensible default for many if not all systems. + const DEFAULT_PAGE_SIZE: usize = 4096; + + #[cfg(any(feature = "std-fs-io", test))] + // https://www.gnu.org/software/libc/manual/html_node/Sysconf.html + let value: c_long = unsafe { sysconf(_SC_PAGESIZE) }; + + #[cfg(not(any(feature = "std-fs-io", test)))] + let value = 0; + + if value <= 0 { + DEFAULT_PAGE_SIZE + } else { + value as usize + } +}); diff --git a/types/src/motes.rs b/types/src/motes.rs new file mode 100644 index 0000000000..7e03d9d7be --- /dev/null +++ b/types/src/motes.rs @@ -0,0 +1,217 @@ +//! The `motes` module is used for working with Motes. + +use alloc::vec::Vec; +use core::fmt; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Gas, U512, +}; + +/// A struct representing a number of `Motes`. +#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct Motes(U512); + +impl Motes { + /// The maximum value of `Motes`. + pub const MAX: Motes = Motes(U512::MAX); + + /// Constructs a new `Motes`. + pub fn new>(value: T) -> Self { + Motes(value.into()) + } + + /// Constructs a new `Motes` with value `0`. + pub const fn zero() -> Self { + Motes(U512::zero()) + } + + /// Checked integer addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub fn checked_add(&self, rhs: Self) -> Option { + self.0.checked_add(rhs.value()).map(Self::new) + } + + /// Checked integer subtraction. Computes `self - rhs`, returning `None` if underflow occurred. + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.value()).map(Self::new) + } + + /// Checked integer multiplication. Computes `self * rhs`, returning `None` if overflow + /// occurred. + pub fn checked_mul(&self, rhs: Self) -> Option { + self.0.checked_mul(rhs.value()).map(Self::new) + } + + /// Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0`. + pub fn checked_div(&self, rhs: Self) -> Option { + self.0.checked_div(rhs.value()).map(Self::new) + } + + /// Returns the inner `U512` value. + pub fn value(&self) -> U512 { + self.0 + } + + /// Converts the given `gas` to `Motes` by multiplying them by `conv_rate`. + /// + /// Returns `None` if an arithmetic overflow occurred. + pub fn from_gas(gas: Gas, conv_rate: u8) -> Option { + gas.value() + .checked_mul(U512::from(conv_rate)) + .map(Self::new) + } + + /// Converts the given `amount` to `Motes` by multiplying them by `price`. + /// + /// Returns `None` if an arithmetic overflow occurred. + pub fn from_price(amount: U512, price: u8) -> Option { + amount.checked_mul(U512::from(price)).map(Self::new) + } +} + +impl fmt::Display for Motes { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl ToBytes for Motes { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Motes { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (value, remainder) = FromBytes::from_bytes(bytes)?; + Ok((Motes(value), remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::U512; + + use crate::{Gas, Motes}; + + #[test] + fn should_be_able_to_get_instance_of_motes() { + let initial_value = 1; + let motes = Motes::new(initial_value); + assert_eq!( + initial_value, + motes.value().as_u64(), + "should have equal value" + ) + } + + #[test] + fn should_be_able_to_compare_two_instances_of_motes() { + let left_motes = Motes::new(1); + let right_motes = Motes::new(1); + assert_eq!(left_motes, right_motes, "should be equal"); + let right_motes = Motes::new(2); + assert_ne!(left_motes, right_motes, "should not be equal") + } + + #[test] + fn should_be_able_to_add_two_instances_of_motes() { + let left_motes = Motes::new(1); + let right_motes = Motes::new(1); + let expected_motes = Motes::new(2); + assert_eq!( + left_motes.checked_add(right_motes), + Some(expected_motes), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_subtract_two_instances_of_motes() { + let left_motes = Motes::new(1); + let right_motes = Motes::new(1); + let expected_motes = Motes::new(0); + assert_eq!( + left_motes.checked_sub(right_motes), + Some(expected_motes), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_multiply_two_instances_of_motes() { + let left_motes = Motes::new(100); + let right_motes = Motes::new(10); + let expected_motes = Motes::new(1000); + assert_eq!( + left_motes.checked_mul(right_motes), + Some(expected_motes), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_divide_two_instances_of_motes() { + let left_motes = Motes::new(1000); + let right_motes = Motes::new(100); + let expected_motes = Motes::new(10); + assert_eq!( + left_motes.checked_div(right_motes), + Some(expected_motes), + "should be equal" + ) + } + + #[test] + fn should_be_able_to_convert_from_motes() { + let gas = Gas::new(100); + let motes = Motes::from_gas(gas, 10).expect("should have value"); + let expected_motes = Motes::new(1000); + assert_eq!(motes, expected_motes, "should be equal") + } + + #[test] + fn should_be_able_to_default() { + let motes = Motes::default(); + let expected_motes = Motes::new(0); + assert_eq!(motes, expected_motes, "should be equal") + } + + #[test] + fn should_be_able_to_compare_relative_value() { + let left_motes = Motes::new(100); + let right_motes = Motes::new(10); + assert!(left_motes > right_motes, "should be gt"); + let right_motes = Motes::new(100); + assert!(left_motes >= right_motes, "should be gte"); + assert!(left_motes <= right_motes, "should be lte"); + let left_motes = Motes::new(10); + assert!(left_motes < right_motes, "should be lt"); + } + + #[test] + fn should_default() { + let left_motes = Motes::new(0); + let right_motes = Motes::default(); + assert_eq!(left_motes, right_motes, "should be equal"); + let u512 = U512::zero(); + assert_eq!(left_motes.value(), u512, "should be equal"); + } + + #[test] + fn should_support_checked_mul_from_gas() { + let gas = Gas::new(U512::MAX); + let conv_rate = 10; + let maybe = Motes::from_gas(gas, conv_rate); + assert!(maybe.is_none(), "should be none due to overflow"); + } +} diff --git a/types/src/named_key.rs b/types/src/named_key.rs deleted file mode 100644 index 7a7d36d8a3..0000000000 --- a/types/src/named_key.rs +++ /dev/null @@ -1,43 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use alloc::{string::String, vec::Vec}; - -#[cfg(feature = "std")] -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use crate::bytesrepr::{self, FromBytes, ToBytes}; - -/// A named key. -#[derive(Clone, Eq, PartialEq, Serialize, Deserialize, Default, Debug)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub struct NamedKey { - /// The name of the entry. - pub name: String, - /// The value of the entry: a casper `Key` type. - pub key: String, -} - -impl ToBytes for NamedKey { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.extend(self.name.to_bytes()?); - buffer.extend(self.key.to_bytes()?); - Ok(buffer) - } - - fn serialized_length(&self) -> usize { - self.name.serialized_length() + self.key.serialized_length() - } -} - -impl FromBytes for NamedKey { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (name, remainder) = String::from_bytes(bytes)?; - let (key, remainder) = String::from_bytes(remainder)?; - let named_key = NamedKey { name, key }; - Ok((named_key, remainder)) - } -} diff --git a/types/src/package.rs b/types/src/package.rs new file mode 100644 index 0000000000..c6286eeff5 --- /dev/null +++ b/types/src/package.rs @@ -0,0 +1,1266 @@ +//! Module containing the Package and associated types for addressable entities. + +use alloc::{ + collections::{BTreeMap, BTreeSet}, + format, + string::String, + vec::Vec, +}; +use core::{ + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(any(feature = "testing", feature = "gens", test))] +use rand::{distributions::Standard, prelude::Distribution, Rng}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + addressable_entity::{Error, FromStrError}, + bytesrepr::{self, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}, + checksummed_hex, + crypto::{self, PublicKey}, + uref::URef, + CLType, CLTyped, EntityAddr, HashAddr, BLAKE2B_DIGEST_LENGTH, KEY_HASH_LENGTH, +}; + +const PACKAGE_STRING_PREFIX: &str = "package-"; + +/// Associated error type of `TryFrom<&[u8]>` for `ContractHash`. +#[derive(Debug)] +pub struct TryFromSliceForPackageHashError(()); + +impl Display for TryFromSliceForPackageHashError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "failed to retrieve from slice") + } +} + +/// A (labelled) "user group". Each method of a versioned contract may be +/// associated with one or more user groups which are allowed to call it. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Group(String); + +impl Group { + /// Basic constructor + pub fn new>(s: T) -> Self { + Group(s.into()) + } + + /// Retrieves underlying name. + pub fn value(&self) -> &str { + &self.0 + } +} + +impl From for String { + fn from(group: Group) -> Self { + group.0 + } +} + +impl ToBytes for Group { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.value().write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Group { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + String::from_bytes(bytes).map(|(label, bytes)| (Group(label), bytes)) + } +} + +/// Automatically incremented value for a contract version within a major `ProtocolVersion`. +pub type EntityVersion = u32; + +/// Within each discrete major `ProtocolVersion`, entity version resets to this value. +pub const ENTITY_INITIAL_VERSION: EntityVersion = 1; + +/// Major element of `ProtocolVersion` a `EntityVersion` is compatible with. +pub type ProtocolVersionMajor = u32; + +/// Major element of `ProtocolVersion` combined with `EntityVersion`. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct EntityVersionKey { + /// Major element of `ProtocolVersion` a `ContractVersion` is compatible with. + protocol_version_major: ProtocolVersionMajor, + /// Automatically incremented value for a contract version within a major `ProtocolVersion`. + entity_version: EntityVersion, +} + +impl EntityVersionKey { + /// Returns a new instance of ContractVersionKey with provided values. + pub fn new( + protocol_version_major: ProtocolVersionMajor, + entity_version: EntityVersion, + ) -> Self { + Self { + protocol_version_major, + entity_version, + } + } + + /// Returns the major element of the protocol version this contract is compatible with. + pub fn protocol_version_major(self) -> ProtocolVersionMajor { + self.protocol_version_major + } + + /// Returns the contract version within the protocol major version. + pub fn entity_version(self) -> EntityVersion { + self.entity_version + } +} + +impl From for (ProtocolVersionMajor, EntityVersion) { + fn from(entity_version_key: EntityVersionKey) -> Self { + ( + entity_version_key.protocol_version_major, + entity_version_key.entity_version, + ) + } +} + +impl ToBytes for EntityVersionKey { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + ENTITY_VERSION_KEY_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.protocol_version_major.write_bytes(writer)?; + self.entity_version.write_bytes(writer) + } +} + +impl FromBytes for EntityVersionKey { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (protocol_version_major, remainder) = ProtocolVersionMajor::from_bytes(bytes)?; + let (entity_version, remainder) = EntityVersion::from_bytes(remainder)?; + Ok(( + EntityVersionKey { + protocol_version_major, + entity_version, + }, + remainder, + )) + } +} + +impl Display for EntityVersionKey { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}.{}", self.protocol_version_major, self.entity_version) + } +} + +#[cfg(any(feature = "testing", feature = "gens", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> EntityVersionKey { + EntityVersionKey { + protocol_version_major: rng.gen(), + entity_version: rng.gen(), + } + } +} + +/// Serialized length of `EntityVersionKey`. +pub const ENTITY_VERSION_KEY_SERIALIZED_LENGTH: usize = + U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH; + +/// Collection of entity versions. +#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct EntityVersions( + #[serde(with = "BTreeMapToArray::")] + BTreeMap, +); + +impl EntityVersions { + /// Constructs a new, empty `EntityVersions`. + pub const fn new() -> Self { + EntityVersions(BTreeMap::new()) + } + + /// Returns an iterator over the `AddressableEntityHash`s (i.e. the map's values). + pub fn contract_hashes(&self) -> impl Iterator { + self.0.values() + } + + /// Returns the `AddressableEntityHash` under the key + pub fn get(&self, key: &EntityVersionKey) -> Option<&EntityAddr> { + self.0.get(key) + } + + /// Retrieve the first entity version key if it exists + pub fn maybe_first(&mut self) -> Option<(EntityVersionKey, EntityAddr)> { + if let Some((entity_version_key, entity_hash)) = self.0.iter().next() { + Some((*entity_version_key, *entity_hash)) + } else { + None + } + } + + /// The number of versions present in the package. + pub fn version_count(&self) -> usize { + self.0.len() + } + + /// Returns the latest entity version key if it exists. + pub fn latest(&self) -> Option<&EntityAddr> { + let (_, value) = self.0.last_key_value()?; + Some(value) + } + + /// Returns an iterator over the `AddressableEntityHash`s (i.e. the map's values). + pub fn iter_entries(&self) -> impl Iterator { + self.0.iter() + } +} + +impl ToBytes for EntityVersions { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for EntityVersions { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (versions, remainder) = BTreeMap::::from_bytes(bytes)?; + Ok((EntityVersions(versions), remainder)) + } +} + +impl From> for EntityVersions { + fn from(value: BTreeMap) -> Self { + EntityVersions(value) + } +} + +struct EntityVersionLabels; + +impl KeyValueLabels for EntityVersionLabels { + const KEY: &'static str = "entity_version_key"; + const VALUE: &'static str = "entity_addr"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for EntityVersionLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("EntityVersionAndEntityAddr"); +} + +/// Collection of named groups. +#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(transparent, deny_unknown_fields)] +pub struct Groups( + #[serde(with = "BTreeMapToArray::, GroupLabels>")] + pub(crate) BTreeMap>, +); + +impl Groups { + /// Constructs a new, empty `Groups`. + pub const fn new() -> Self { + Groups(BTreeMap::new()) + } + + /// Inserts a named group. + /// + /// If the map did not have this name present, `None` is returned. If the map did have this + /// name present, its collection of `URef`s is overwritten, and the collection is returned. + pub fn insert(&mut self, name: Group, urefs: BTreeSet) -> Option> { + self.0.insert(name, urefs) + } + + /// Returns `true` if the named group exists in the collection. + pub fn contains(&self, name: &Group) -> bool { + self.0.contains_key(name) + } + + /// Returns a reference to the collection of `URef`s under the given `name` if any. + pub fn get(&self, name: &Group) -> Option<&BTreeSet> { + self.0.get(name) + } + + /// Returns a mutable reference to the collection of `URef`s under the given `name` if any. + pub fn get_mut(&mut self, name: &Group) -> Option<&mut BTreeSet> { + self.0.get_mut(name) + } + + /// Returns the number of named groups. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if there are no named groups. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Returns an iterator over the `Key`s (i.e. the map's values). + pub fn keys(&self) -> impl Iterator> { + self.0.values() + } + + /// Returns the total number of `URef`s contained in all the groups. + pub fn total_urefs(&self) -> usize { + self.0.values().map(|urefs| urefs.len()).sum() + } +} + +impl ToBytes for Groups { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for Groups { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (groups, remainder) = BTreeMap::>::from_bytes(bytes)?; + Ok((Groups(groups), remainder)) + } +} + +struct GroupLabels; + +impl KeyValueLabels for GroupLabels { + const KEY: &'static str = "group_name"; + const VALUE: &'static str = "group_users"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for GroupLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("NamedUserGroup"); +} + +#[cfg(any(feature = "testing", feature = "gens", test))] +impl From>> for Groups { + fn from(value: BTreeMap>) -> Self { + Groups(value) + } +} + +/// A newtype wrapping a `HashAddr` which references a [`Package`] in the global state. +#[derive(Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The hex-encoded address of the Package.") +)] +pub struct PackageHash( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] HashAddr, +); + +impl PackageHash { + /// Constructs a new `PackageHash` from the raw bytes of the package hash. + pub const fn new(value: HashAddr) -> PackageHash { + PackageHash(value) + } + + /// Returns the raw bytes of the entity hash as an array. + pub fn value(&self) -> HashAddr { + self.0 + } + + /// Returns the raw bytes of the entity hash as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `PackageHash` for users getting and putting. + pub fn to_formatted_string(self) -> String { + format!("{}{}", PACKAGE_STRING_PREFIX, base16::encode_lower(&self.0),) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a + /// `PackageHash`. + pub fn from_formatted_str(input: &str) -> Result { + let hex_addr = input + .strip_prefix(PACKAGE_STRING_PREFIX) + .ok_or(FromStrError::InvalidPrefix)?; + + let bytes = HashAddr::try_from(checksummed_hex::decode(hex_addr)?.as_ref())?; + Ok(PackageHash(bytes)) + } + + /// Parses a `PublicKey` and outputs the corresponding account hash. + pub fn from_public_key( + public_key: &PublicKey, + blake2b_hash_fn: impl Fn(Vec) -> [u8; BLAKE2B_DIGEST_LENGTH], + ) -> Self { + const SYSTEM_LOWERCASE: &str = "system"; + const ED25519_LOWERCASE: &str = "ed25519"; + const SECP256K1_LOWERCASE: &str = "secp256k1"; + + let algorithm_name = match public_key { + PublicKey::System => SYSTEM_LOWERCASE, + PublicKey::Ed25519(_) => ED25519_LOWERCASE, + PublicKey::Secp256k1(_) => SECP256K1_LOWERCASE, + }; + let public_key_bytes: Vec = public_key.into(); + + // Prepare preimage based on the public key parameters. + let preimage = { + let mut data = Vec::with_capacity(algorithm_name.len() + public_key_bytes.len() + 1); + data.extend(algorithm_name.as_bytes()); + data.push(0); + data.extend(public_key_bytes); + data + }; + // Hash the preimage data using blake2b256 and return it. + let digest = blake2b_hash_fn(preimage); + Self::new(digest) + } +} + +impl Display for PackageHash { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for PackageHash { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + write!(f, "PackageHash({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for PackageHash { + fn cl_type() -> CLType { + CLType::ByteArray(KEY_HASH_LENGTH as u32) + } +} + +impl ToBytes for PackageHash { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.extend_from_slice(&self.0); + Ok(()) + } +} + +impl FromBytes for PackageHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, rem) = FromBytes::from_bytes(bytes)?; + Ok((PackageHash::new(bytes), rem)) + } +} + +impl From<[u8; 32]> for PackageHash { + fn from(bytes: [u8; 32]) -> Self { + PackageHash(bytes) + } +} + +impl Serialize for PackageHash { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for PackageHash { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + PackageHash::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = HashAddr::deserialize(deserializer)?; + Ok(PackageHash(bytes)) + } + } +} + +impl AsRef<[u8]> for PackageHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl TryFrom<&[u8]> for PackageHash { + type Error = TryFromSliceForPackageHashError; + + fn try_from(bytes: &[u8]) -> Result { + HashAddr::try_from(bytes) + .map(PackageHash::new) + .map_err(|_| TryFromSliceForPackageHashError(())) + } +} + +impl TryFrom<&Vec> for PackageHash { + type Error = TryFromSliceForPackageHashError; + + fn try_from(bytes: &Vec) -> Result { + HashAddr::try_from(bytes as &[u8]) + .map(PackageHash::new) + .map_err(|_| TryFromSliceForPackageHashError(())) + } +} + +impl From<&PublicKey> for PackageHash { + fn from(public_key: &PublicKey) -> Self { + PackageHash::from_public_key(public_key, crypto::blake2b) + } +} + +/// A enum to determine the lock status of the package. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum PackageStatus { + /// The package is locked and cannot be versioned. + Locked, + /// The package is unlocked and can be versioned. + Unlocked, +} + +impl PackageStatus { + /// Create a new status flag based on a boolean value + pub fn new(is_locked: bool) -> Self { + if is_locked { + PackageStatus::Locked + } else { + PackageStatus::Unlocked + } + } +} + +impl Default for PackageStatus { + fn default() -> Self { + Self::Unlocked + } +} + +impl ToBytes for PackageStatus { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + match self { + PackageStatus::Unlocked => result.append(&mut false.to_bytes()?), + PackageStatus::Locked => result.append(&mut true.to_bytes()?), + } + Ok(result) + } + + fn serialized_length(&self) -> usize { + match self { + PackageStatus::Unlocked => false.serialized_length(), + PackageStatus::Locked => true.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PackageStatus::Locked => writer.push(u8::from(true)), + PackageStatus::Unlocked => writer.push(u8::from(false)), + } + Ok(()) + } +} + +impl FromBytes for PackageStatus { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (val, bytes) = bool::from_bytes(bytes)?; + let status = PackageStatus::new(val); + Ok((status, bytes)) + } +} + +/// Entity definition, metadata, and security container. +#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct Package { + /// All versions (enabled & disabled). + versions: EntityVersions, + /// Collection of disabled entity versions. The runtime will not permit disabled entity + /// versions to be executed. + disabled_versions: BTreeSet, + /// Mapping maintaining the set of URefs associated with each "user group". This can be used to + /// control access to methods in a particular version of the entity. A method is callable by + /// any context which "knows" any of the URefs associated with the method's user group. + groups: Groups, + /// A flag that determines whether a entity is locked + lock_status: PackageStatus, +} + +impl CLTyped for Package { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl Package { + /// Create new `Package` (with no versions) from given access key. + pub fn new( + versions: EntityVersions, + disabled_versions: BTreeSet, + groups: Groups, + lock_status: PackageStatus, + ) -> Self { + Package { + versions, + disabled_versions, + groups, + lock_status, + } + } + + /// Enable the entity version corresponding to the given hash (if it exists). + pub fn enable_version(&mut self, entity_addr: EntityAddr) -> Result<(), Error> { + let entity_version_key = self + .find_entity_version_key_by_hash(&entity_addr) + .copied() + .ok_or(Error::EntityNotFound)?; + + self.disabled_versions.remove(&entity_version_key); + + Ok(()) + } + + /// Get the mutable group definitions for this entity. + pub fn groups_mut(&mut self) -> &mut Groups { + &mut self.groups + } + + /// Get the group definitions for this entity. + pub fn groups(&self) -> &Groups { + &self.groups + } + + /// Adds new group to this entity. + pub fn add_group(&mut self, group: Group, urefs: BTreeSet) { + let v = self.groups.0.entry(group).or_default(); + v.extend(urefs) + } + + /// Lookup the entity hash for a given entity version (if present) + pub fn lookup_entity_hash(&self, entity_version_key: EntityVersionKey) -> Option<&EntityAddr> { + self.versions.0.get(&entity_version_key) + } + + /// Checks if the given entity version exists. + pub fn is_version_missing(&self, entity_version_key: EntityVersionKey) -> bool { + !self.versions.0.contains_key(&entity_version_key) + } + + /// Checks if the given entity version exists and is available for use. + pub fn is_version_enabled(&self, entity_version_key: EntityVersionKey) -> bool { + !self.is_version_missing(entity_version_key) + && !self.disabled_versions.contains(&entity_version_key) + } + + /// Returns `true` if the given entity hash exists and is enabled. + pub fn is_entity_enabled(&self, entity_hash: &EntityAddr) -> bool { + match self.find_entity_version_key_by_hash(entity_hash) { + Some(version_key) => !self.disabled_versions.contains(version_key), + None => false, + } + } + + /// Insert a new entity version; the next sequential version number will be issued. + pub fn insert_entity_version( + &mut self, + protocol_version_major: ProtocolVersionMajor, + entity_hash: EntityAddr, + ) -> EntityVersionKey { + let contract_version = self.next_entity_version_for(protocol_version_major); + let key = EntityVersionKey::new(protocol_version_major, contract_version); + self.versions.0.insert(key, entity_hash); + key + } + + /// Disable the entity version corresponding to the given hash (if it exists). + pub fn disable_entity_version(&mut self, entity_hash: EntityAddr) -> Result<(), Error> { + let entity_version_key = self + .versions + .0 + .iter() + .filter_map(|(k, v)| if *v == entity_hash { Some(*k) } else { None }) + .next() + .ok_or(Error::EntityNotFound)?; + + if !self.disabled_versions.contains(&entity_version_key) { + self.disabled_versions.insert(entity_version_key); + } + + Ok(()) + } + + fn find_entity_version_key_by_hash( + &self, + entity_hash: &EntityAddr, + ) -> Option<&EntityVersionKey> { + self.versions + .0 + .iter() + .filter_map(|(k, v)| if v == entity_hash { Some(k) } else { None }) + .next() + } + + /// Returns reference to all of this entity's versions. + pub fn versions(&self) -> &EntityVersions { + &self.versions + } + + /// Returns all of this entity's enabled entity versions. + pub fn enabled_versions(&self) -> EntityVersions { + let mut ret = EntityVersions::new(); + for version in &self.versions.0 { + if !self.is_version_enabled(*version.0) { + continue; + } + ret.0.insert(*version.0, *version.1); + } + ret + } + + /// Returns mutable reference to all of this entity's versions (enabled and disabled). + pub fn versions_mut(&mut self) -> &mut EntityVersions { + &mut self.versions + } + + /// Consumes the object and returns all of this entity's versions (enabled and disabled). + pub fn take_versions(self) -> EntityVersions { + self.versions + } + + /// Returns all of this entity's disabled versions. + pub fn disabled_versions(&self) -> &BTreeSet { + &self.disabled_versions + } + + /// Returns mut reference to all of this entity's disabled versions. + pub fn disabled_versions_mut(&mut self) -> &mut BTreeSet { + &mut self.disabled_versions + } + + /// Removes a group from this entity (if it exists). + pub fn remove_group(&mut self, group: &Group) -> bool { + self.groups.0.remove(group).is_some() + } + + /// Gets the next available entity version for the given protocol version + pub fn next_entity_version_for(&self, protocol_version: ProtocolVersionMajor) -> EntityVersion { + let current_version = self + .versions + .0 + .keys() + .rev() + .find_map(|&entity_version_key| { + if entity_version_key.protocol_version_major() == protocol_version { + Some(entity_version_key.entity_version()) + } else { + None + } + }) + .unwrap_or(0); + + current_version + 1 + } + + pub fn current_entity_version_for( + &self, + protocol_version: ProtocolVersionMajor, + ) -> EntityVersionKey { + let current_version = self + .enabled_versions() + .0 + .keys() + .rev() + .find_map(|&entity_version_key| { + if entity_version_key.protocol_version_major() == protocol_version { + Some(entity_version_key.entity_version()) + } else { + None + } + }) + .unwrap_or(0); + + EntityVersionKey::new(protocol_version, current_version) + } + + /// Return the entity version key for the newest enabled entity version. + pub fn current_entity_version(&self) -> Option { + self.enabled_versions().0.keys().next_back().copied() + } + + /// Return the entity hash for the newest enabled entity version. + pub fn current_entity_hash(&self) -> Option { + self.enabled_versions().0.values().next_back().copied() + } + + /// Return the lock status of the entity package. + pub fn is_locked(&self) -> bool { + if self.versions.0.is_empty() { + return false; + } + + match self.lock_status { + PackageStatus::Unlocked => false, + PackageStatus::Locked => true, + } + } + + /// Return the package status itself + pub fn get_lock_status(&self) -> PackageStatus { + self.lock_status.clone() + } +} + +impl ToBytes for Package { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.versions.serialized_length() + + self.disabled_versions.serialized_length() + + self.groups.serialized_length() + + self.lock_status.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.versions().write_bytes(writer)?; + self.disabled_versions().write_bytes(writer)?; + self.groups().write_bytes(writer)?; + self.lock_status.write_bytes(writer)?; + + Ok(()) + } +} + +impl FromBytes for Package { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (versions, bytes) = EntityVersions::from_bytes(bytes)?; + let (disabled_versions, bytes) = BTreeSet::::from_bytes(bytes)?; + let (groups, bytes) = Groups::from_bytes(bytes)?; + let (lock_status, bytes) = PackageStatus::from_bytes(bytes)?; + + let result = Package { + versions, + disabled_versions, + groups, + lock_status, + }; + + Ok((result, bytes)) + } +} + +#[cfg(test)] +mod tests { + use core::iter::FromIterator; + + use super::*; + use crate::{ + AccessRights, EntityEntryPoint, EntityVersionKey, EntryPointAccess, EntryPointPayment, + EntryPointType, Parameter, ProtocolVersion, URef, + }; + use alloc::borrow::ToOwned; + + const ENTITY_HASH_V1: EntityAddr = EntityAddr::new_smart_contract([42; 32]); + const ENTITY_HASH_V2: EntityAddr = EntityAddr::new_smart_contract([84; 32]); + + fn make_package_with_two_versions() -> Package { + let mut package = Package::new( + EntityVersions::default(), + BTreeSet::new(), + Groups::default(), + PackageStatus::default(), + ); + + // add groups + { + let group_urefs = { + let mut ret = BTreeSet::new(); + ret.insert(URef::new([1; 32], AccessRights::READ)); + ret + }; + + package + .groups_mut() + .insert(Group::new("Group 1"), group_urefs.clone()); + + package + .groups_mut() + .insert(Group::new("Group 2"), group_urefs); + } + + // add entry_points + let _entry_points = { + let mut ret = BTreeMap::new(); + let entrypoint = EntityEntryPoint::new( + "method0".to_string(), + vec![], + CLType::U32, + EntryPointAccess::groups(&["Group 2"]), + EntryPointType::Caller, + EntryPointPayment::Caller, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + let entrypoint = EntityEntryPoint::new( + "method1".to_string(), + vec![Parameter::new("Foo", CLType::U32)], + CLType::U32, + EntryPointAccess::groups(&["Group 1"]), + EntryPointType::Caller, + EntryPointPayment::Caller, + ); + ret.insert(entrypoint.name().to_owned(), entrypoint); + ret + }; + + let protocol_version = ProtocolVersion::V1_0_0; + + let v1 = package.insert_entity_version(protocol_version.value().major, ENTITY_HASH_V1); + let v2 = package.insert_entity_version(protocol_version.value().major, ENTITY_HASH_V2); + assert!(v2 > v1); + + package + } + + #[test] + fn next_entity_version() { + let major = 1; + let mut package = Package::new( + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::default(), + ); + assert_eq!(package.next_entity_version_for(major), 1); + + let next_version = + package.insert_entity_version(major, EntityAddr::SmartContract([123; 32])); + assert_eq!(next_version, EntityVersionKey::new(major, 1)); + assert_eq!(package.next_entity_version_for(major), 2); + let next_version_2 = + package.insert_entity_version(major, EntityAddr::SmartContract([124; 32])); + assert_eq!(next_version_2, EntityVersionKey::new(major, 2)); + + let major = 2; + assert_eq!(package.next_entity_version_for(major), 1); + let next_version_3 = + package.insert_entity_version(major, EntityAddr::SmartContract([42; 32])); + assert_eq!(next_version_3, EntityVersionKey::new(major, 1)); + } + + #[test] + fn roundtrip_serialization() { + let package = make_package_with_two_versions(); + let bytes = package.to_bytes().expect("should serialize"); + let (decoded_package, rem) = Package::from_bytes(&bytes).expect("should deserialize"); + assert_eq!(package, decoded_package); + assert_eq!(rem.len(), 0); + } + + #[test] + fn should_remove_group() { + let mut package = make_package_with_two_versions(); + + assert!(!package.remove_group(&Group::new("Non-existent group"))); + assert!(package.remove_group(&Group::new("Group 1"))); + assert!(!package.remove_group(&Group::new("Group 1"))); // Group no longer exists + } + + #[test] + fn should_disable_and_enable_entity_version() { + const ENTITY_HASH: EntityAddr = EntityAddr::new_smart_contract([123; 32]); + + let mut package = make_package_with_two_versions(); + + assert!( + !package.is_entity_enabled(&ENTITY_HASH), + "nonexisting entity should return false" + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 2)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + + assert_eq!( + package.versions(), + &EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2) + ])), + ); + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2) + ])), + ); + + assert!(!package.is_entity_enabled(&ENTITY_HASH)); + + assert_eq!( + package.disable_entity_version(ENTITY_HASH), + Err(Error::EntityNotFound), + "should return entity not found error" + ); + + assert!( + !package.is_entity_enabled(&ENTITY_HASH), + "disabling missing entity shouldnt change outcome" + ); + + let next_version = package.insert_entity_version(1, ENTITY_HASH); + assert!( + package.is_version_enabled(next_version), + "version should exist and be enabled" + ); + assert!(package.is_entity_enabled(&ENTITY_HASH)); + + assert!( + package.is_entity_enabled(&ENTITY_HASH), + "entity should be enabled" + ); + + assert_eq!( + package.disable_entity_version(ENTITY_HASH), + Ok(()), + "should be able to disable version" + ); + assert!(!package.is_entity_enabled(&ENTITY_HASH)); + + assert!( + !package.is_entity_enabled(&ENTITY_HASH), + "entity should be disabled" + ); + // This was once true, but look up vs disable checking have been decoupled in 2.0 + // assert_eq!( + // package.lookup_entity_hash(next_version), + // None, + // "should not return disabled entity version" + // ); + assert!( + !package.is_version_enabled(next_version), + "version should not be enabled" + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 2)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + assert_eq!( + package.versions(), + &EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + (next_version, ENTITY_HASH), + ])), + ); + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + ])), + ); + assert_eq!( + package.disabled_versions(), + &BTreeSet::from_iter([next_version]), + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 2)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + + assert_eq!( + package.disable_entity_version(ENTITY_HASH_V2), + Ok(()), + "should be able to disable version 2" + ); + + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([( + EntityVersionKey::new(1, 1), + ENTITY_HASH_V1 + ),])), + ); + + assert_eq!( + package.current_entity_version(), + Some(EntityVersionKey::new(1, 1)) + ); + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V1)); + + assert_eq!( + package.disabled_versions(), + &BTreeSet::from_iter([next_version, EntityVersionKey::new(1, 2)]), + ); + + assert_eq!(package.enable_version(ENTITY_HASH_V2), Ok(()),); + + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + ])), + ); + + assert_eq!( + package.disabled_versions(), + &BTreeSet::from_iter([next_version]) + ); + + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH_V2)); + + assert_eq!(package.enable_version(ENTITY_HASH), Ok(()),); + + assert_eq!( + package.enable_version(ENTITY_HASH), + Ok(()), + "enabling a entity twice should be a noop" + ); + + assert_eq!( + package.enabled_versions(), + EntityVersions::from(BTreeMap::from_iter([ + (EntityVersionKey::new(1, 1), ENTITY_HASH_V1), + (EntityVersionKey::new(1, 2), ENTITY_HASH_V2), + (next_version, ENTITY_HASH), + ])), + ); + + assert_eq!(package.disabled_versions(), &BTreeSet::new(),); + + assert_eq!(package.current_entity_hash(), Some(ENTITY_HASH)); + } + + #[test] + fn should_not_allow_to_enable_non_existing_version() { + let mut package = make_package_with_two_versions(); + + assert_eq!( + package.enable_version(EntityAddr::SmartContract(HashAddr::default())), + Err(Error::EntityNotFound), + ); + } + + #[test] + fn package_hash_from_slice() { + let bytes: Vec = (0..32).collect(); + let package_hash = HashAddr::try_from(&bytes[..]).expect("should create package hash"); + let package_hash = PackageHash::new(package_hash); + assert_eq!(&bytes, &package_hash.as_bytes()); + } + + #[test] + fn package_hash_from_str() { + let package_hash = PackageHash::new([3; 32]); + let encoded = package_hash.to_formatted_string(); + let decoded = PackageHash::from_formatted_str(&encoded).unwrap(); + assert_eq!(package_hash, decoded); + + let invalid_prefix = + "package0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(invalid_prefix).unwrap_err(), + FromStrError::InvalidPrefix + )); + + let short_addr = "package-00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(short_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let long_addr = + "package-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + PackageHash::from_formatted_str(long_addr).unwrap_err(), + FromStrError::Hash(_) + )); + + let invalid_hex = + "package-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + PackageHash::from_formatted_str(invalid_hex).unwrap_err(), + FromStrError::Hex(_) + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_contract_package(contract_pkg in gens::package_arb()) { + bytesrepr::test_serialization_roundtrip(&contract_pkg); + } + } +} diff --git a/types/src/peers_map.rs b/types/src/peers_map.rs new file mode 100644 index 0000000000..caaaf1c4c5 --- /dev/null +++ b/types/src/peers_map.rs @@ -0,0 +1,151 @@ +use alloc::collections::BTreeMap; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(any(feature = "testing", test))] +use core::iter; +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +/// Node peer entry. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct PeerEntry { + /// Node id. + pub node_id: String, + /// Node address. + pub address: String, +} + +impl PeerEntry { + #[cfg(any(feature = "testing", test))] + pub(crate) fn random(rng: &mut TestRng) -> Self { + Self { + node_id: rng.random_string(10..20), + address: rng.random_string(10..20), + } + } +} + +impl ToBytes for PeerEntry { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.node_id.write_bytes(writer)?; + self.address.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.node_id.serialized_length() + self.address.serialized_length() + } +} + +impl FromBytes for PeerEntry { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (node_id, remainder) = String::from_bytes(bytes)?; + let (address, remainder) = String::from_bytes(remainder)?; + Ok((PeerEntry { node_id, address }, remainder)) + } +} + +/// Map of peer IDs to network addresses. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Peers(Vec); + +impl Peers { + /// Retrieve collection of `PeerEntry` records. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let count = rng.gen_range(0..10); + let peers = iter::repeat(()) + .map(|_| PeerEntry::random(rng)) + .take(count) + .collect(); + Self(peers) + } +} + +impl From> for Peers { + fn from(input: BTreeMap) -> Self { + let ret = input + .into_iter() + .map(|(node_id, address)| PeerEntry { + node_id: node_id.to_string(), + address, + }) + .collect(); + Peers(ret) + } +} + +impl ToBytes for Peers { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Peers { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (inner, remainder) = Vec::::from_bytes(bytes)?; + Ok((Peers(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = Peers::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } + + #[test] + fn bytesrepr_empty_roundtrip() { + let val = Peers(vec![]); + bytesrepr::test_serialization_roundtrip(&val); + } + + #[test] + fn bytesrepr_empty_vec_should_have_count_0() { + let val = Peers(vec![]); + let x = Peers::to_bytes(&val).expect("should have vec"); + let (count, _) = u32::from_bytes(&x).expect("should have count"); + assert!(count == 0, "count should be 0"); + } +} diff --git a/types/src/protocol_version.rs b/types/src/protocol_version.rs index ccfb4d9d9e..5d5cad14b3 100644 --- a/types/src/protocol_version.rs +++ b/types/src/protocol_version.rs @@ -1,9 +1,9 @@ use alloc::{format, string::String, vec::Vec}; use core::{convert::TryFrom, fmt, str::FromStr}; +#[cfg(feature = "datasize")] use datasize::DataSize; - -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; @@ -13,7 +13,8 @@ use crate::{ }; /// A newtype wrapping a [`SemVer`] which represents a Casper Platform protocol version. -#[derive(Copy, Clone, DataSize, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct ProtocolVersion(SemVer); /// The result of [`ProtocolVersion::check_next_version`]. @@ -53,6 +54,13 @@ impl ProtocolVersion { patch: 0, }); + /// Version 2.0.0. + pub const V2_0_0: ProtocolVersion = ProtocolVersion(SemVer { + major: 2, + minor: 0, + patch: 0, + }); + /// Constructs a new `ProtocolVersion` from `version`. pub const fn new(version: SemVer) -> ProtocolVersion { ProtocolVersion(version) @@ -65,23 +73,24 @@ impl ProtocolVersion { } /// Returns the inner [`SemVer`]. - pub fn value(&self) -> SemVer { + pub const fn value(&self) -> SemVer { self.0 } + /// Returns the inner [`SemVer`] destructed into a tuple of (major, minor, patch). + pub const fn destructure(&self) -> (u32, u32, u32) { + (self.0.major, self.0.minor, self.0.patch) + } + /// Checks if next version can be followed. pub fn check_next_version(&self, next: &ProtocolVersion) -> VersionCheckResult { - if next.0.major < self.0.major || next.0.major > self.0.major + 1 { - // Protocol major versions should not go backwards and should increase monotonically by - // 1. + // Protocol major versions should increase monotonically by 1. + let major_bumped = self.0.major.saturating_add(1); + if next.0.major < self.0.major || next.0.major > major_bumped { return VersionCheckResult::Invalid; } - if next.0.major == self.0.major.saturating_add(1) { - // A major version increase resets both the minor and patch versions to ( 0.0 ). - if next.0.minor != 0 || next.0.patch != 0 { - return VersionCheckResult::Invalid; - } + if next.0.major == major_bumped { return VersionCheckResult::Valid { is_major_version: true, }; @@ -90,17 +99,12 @@ impl ProtocolVersion { // Covers the equal major versions debug_assert_eq!(next.0.major, self.0.major); - if next.0.minor < self.0.minor || next.0.minor > self.0.minor + 1 { - // Protocol minor versions should increase monotonically by 1 within the same major - // version and should not go backwards. + if next.0.minor < self.0.minor { + // Protocol minor versions within the same major version should not go backwards. return VersionCheckResult::Invalid; } - if next.0.minor == self.0.minor + 1 { - // A minor version increase resets the patch version to ( 0 ). - if next.0.patch != 0 { - return VersionCheckResult::Invalid; - } + if next.0.minor > self.0.minor { return VersionCheckResult::Valid { is_major_version: false, }; @@ -135,6 +139,13 @@ impl ToBytes for ProtocolVersion { fn serialized_length(&self) -> usize { self.value().serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.extend(self.0.major.to_le_bytes()); + writer.extend(self.0.minor.to_le_bytes()); + writer.extend(self.0.patch.to_le_bytes()); + Ok(()) + } } impl FromBytes for ProtocolVersion { @@ -177,7 +188,7 @@ impl<'de> Deserialize<'de> for ProtocolVersion { } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for ProtocolVersion { fn schema_name() -> String { String::from("ProtocolVersion") @@ -297,34 +308,53 @@ mod tests { } #[test] - fn should_check_if_minor_bump_resets_patch() { - // A minor version increase resets the patch version to ( 0 ). + fn should_not_care_if_minor_bump_resets_patch() { let prev = ProtocolVersion::new(SemVer::new(1, 2, 0)); let next = ProtocolVersion::new(SemVer::new(1, 3, 1)); - // wrong - patch version should be reset for minor version increase - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); let prev = ProtocolVersion::new(SemVer::new(1, 20, 42)); let next = ProtocolVersion::new(SemVer::new(1, 30, 43)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); } #[test] - fn should_check_if_major_resets_minor_and_patch() { + fn should_not_care_if_major_bump_resets_minor_or_patch() { // A major version increase resets both the minor and patch versions to ( 0.0 ). let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); let next = ProtocolVersion::new(SemVer::new(2, 1, 0)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); // wrong - major increase should reset minor + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); let next = ProtocolVersion::new(SemVer::new(2, 0, 1)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); // wrong - major increase should reset patch + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); let next = ProtocolVersion::new(SemVer::new(2, 1, 1)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); - // wrong - major - // increase - // should reset - // minor and patch + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); } #[test] @@ -370,27 +400,70 @@ mod tests { } #[test] - fn should_not_skip_minor_version_within_major_version() { - // minor can be updated only by 1 + fn should_allow_skip_minor_version_within_major_version() { let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); let next = ProtocolVersion::new(SemVer::new(1, 3, 0)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); let next = ProtocolVersion::new(SemVer::new(1, 7, 0)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); + } + + #[test] + fn should_allow_skip_patch_version_within_minor_version() { + let prev = ProtocolVersion::new(SemVer::new(1, 1, 0)); + + let next = ProtocolVersion::new(SemVer::new(1, 1, 2)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: false + } + ); } #[test] - fn should_reset_minor_and_patch_on_major_bump() { - // no upgrade - minor resets patch + fn should_allow_skipped_minor_and_patch_on_major_bump() { + // skip minor let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); - let next = ProtocolVersion::new(SemVer::new(2, 1, 1)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + let next = ProtocolVersion::new(SemVer::new(2, 1, 0)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); - let prev = ProtocolVersion::new(SemVer::new(1, 1, 1)); - let next = ProtocolVersion::new(SemVer::new(2, 2, 3)); - assert_eq!(prev.check_next_version(&next), VersionCheckResult::Invalid); + // skip patch + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 0, 1)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); + + // skip many minors and patches + let prev = ProtocolVersion::new(SemVer::new(1, 0, 0)); + let next = ProtocolVersion::new(SemVer::new(2, 3, 10)); + assert_eq!( + prev.check_next_version(&next), + VersionCheckResult::Valid { + is_major_version: true + } + ); } #[test] @@ -434,7 +507,7 @@ mod tests { ProtocolVersion::from_parts(1, 2, 0), ProtocolVersion::from_parts(1, 2, 3), ] { - assert_eq!(ver.check_next_version(&ver), VersionCheckResult::Invalid); + assert_eq!(ver.check_next_version(ver), VersionCheckResult::Invalid); } } diff --git a/types/src/runtime_args.rs b/types/src/runtime_args.rs deleted file mode 100644 index b2b94396df..0000000000 --- a/types/src/runtime_args.rs +++ /dev/null @@ -1,284 +0,0 @@ -//! Home of RuntimeArgs for calling contracts - -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -use alloc::{collections::BTreeMap, string::String, vec::Vec}; - -use datasize::DataSize; -#[cfg(feature = "std")] -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use crate::{ - bytesrepr::{self, Error, FromBytes, ToBytes}, - CLTyped, CLValue, CLValueError, -}; - -/// Named arguments to a contract -#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug, DataSize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub struct NamedArg(#[data_size(skip)] String, CLValue); - -impl NamedArg { - /// ctor - pub fn new(name: String, value: CLValue) -> Self { - NamedArg(name, value) - } - /// returns `name` - pub fn name(&self) -> &str { - &self.0 - } - /// returns `value` - pub fn cl_value(&self) -> &CLValue { - &self.1 - } -} - -impl From<(String, CLValue)> for NamedArg { - fn from((name, value): (String, CLValue)) -> NamedArg { - NamedArg(name, value) - } -} - -impl ToBytes for NamedArg { - fn to_bytes(&self) -> Result, Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - result.append(&mut self.0.to_bytes()?); - result.append(&mut self.1.to_bytes()?); - Ok(result) - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() + self.1.serialized_length() - } -} - -impl FromBytes for NamedArg { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (name, remainder) = String::from_bytes(bytes)?; - let (cl_value, remainder) = CLValue::from_bytes(remainder)?; - Ok((NamedArg(name, cl_value), remainder)) - } -} - -/// Represents a collection of arguments passed to a smart contract. -#[derive( - PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug, Default, DataSize, -)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub struct RuntimeArgs(#[data_size(skip)] Vec); - -impl RuntimeArgs { - /// Create an empty [`RuntimeArgs`] instance. - pub fn new() -> RuntimeArgs { - RuntimeArgs::default() - } - - /// A wrapper that lets you easily and safely create runtime arguments. - /// - /// This method is useful when you have to construct a [`RuntimeArgs`] with multiple entries, - /// but error handling at given call site would require to have a match statement for each - /// [`RuntimeArgs::insert`] call. With this method you can use ? operator inside the closure and - /// then handle single result. When `try_block` will be stabilized this method could be - /// deprecated in favor of using those blocks. - pub fn try_new(func: F) -> Result - where - F: FnOnce(&mut RuntimeArgs) -> Result<(), CLValueError>, - { - let mut runtime_args = RuntimeArgs::new(); - func(&mut runtime_args)?; - Ok(runtime_args) - } - - /// Gets an argument by its name. - pub fn get(&self, name: &str) -> Option<&CLValue> { - self.0.iter().find_map(|NamedArg(named_name, named_value)| { - if named_name == name { - Some(named_value) - } else { - None - } - }) - } - - /// Get length of the collection. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Check if collection of arguments is empty. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } - - /// Insert new named argument into the collection. - pub fn insert(&mut self, key: K, value: V) -> Result<(), CLValueError> - where - K: Into, - V: CLTyped + ToBytes, - { - let cl_value = CLValue::from_t(value)?; - self.0.push(NamedArg(key.into(), cl_value)); - Ok(()) - } - - /// Insert new named argument into the collection. - pub fn insert_cl_value(&mut self, key: K, cl_value: CLValue) - where - K: Into, - { - self.0.push(NamedArg(key.into(), cl_value)); - } - - /// Returns values held regardless of the variant. - pub fn to_values(&self) -> Vec<&CLValue> { - self.0.iter().map(|NamedArg(_name, value)| value).collect() - } -} - -impl From> for RuntimeArgs { - fn from(values: Vec) -> Self { - RuntimeArgs(values) - } -} - -impl From> for RuntimeArgs { - fn from(cl_values: BTreeMap) -> RuntimeArgs { - RuntimeArgs(cl_values.into_iter().map(NamedArg::from).collect()) - } -} - -impl Into> for RuntimeArgs { - fn into(self) -> BTreeMap { - let mut map = BTreeMap::new(); - for named in self.0 { - map.insert(named.0, named.1); - } - map - } -} - -impl ToBytes for RuntimeArgs { - fn to_bytes(&self) -> Result, Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for RuntimeArgs { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let (args, remainder) = Vec::::from_bytes(bytes)?; - Ok((RuntimeArgs(args), remainder)) - } -} - -/// Macro that makes it easier to construct named arguments. -/// -/// NOTE: This macro does not propagate possible errors that could occur while creating a -/// [`crate::CLValue`]. For such cases creating [`RuntimeArgs`] manually is recommended. -/// -/// # Example usage -/// ``` -/// use casper_types::{RuntimeArgs, runtime_args}; -/// let _named_args = runtime_args! { -/// "foo" => 42, -/// "bar" => "Hello, world!" -/// }; -/// ``` -#[macro_export] -macro_rules! runtime_args { - () => (RuntimeArgs::new()); - ( $($key:expr => $value:expr,)+ ) => (runtime_args!($($key => $value),+)); - ( $($key:expr => $value:expr),* ) => { - { - let mut named_args = RuntimeArgs::new(); - $( - named_args.insert($key, $value).unwrap(); - )* - named_args - } - }; -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_runtime_args() { - let arg1 = CLValue::from_t(1).unwrap(); - let arg2 = CLValue::from_t("Foo").unwrap(); - let arg3 = CLValue::from_t(Some(1)).unwrap(); - let args = { - let mut map = BTreeMap::new(); - map.insert("bar".into(), arg2.clone()); - map.insert("foo".into(), arg1.clone()); - map.insert("qwer".into(), arg3.clone()); - map - }; - let runtime_args = RuntimeArgs::from(args); - assert_eq!(runtime_args.get("qwer"), Some(&arg3)); - assert_eq!(runtime_args.get("foo"), Some(&arg1)); - assert_eq!(runtime_args.get("bar"), Some(&arg2)); - assert_eq!(runtime_args.get("aaa"), None); - - // Ensure macro works - - let runtime_args_2 = runtime_args! { - "bar" => "Foo", - "foo" => 1i32, - "qwer" => Some(1i32), - }; - assert_eq!(runtime_args, runtime_args_2); - } - - #[test] - fn empty_macro() { - assert_eq!(runtime_args! {}, RuntimeArgs::new()); - } - - #[test] - fn btreemap_compat() { - // This test assumes same serialization format as BTreeMap - let runtime_args_1 = runtime_args! { - "bar" => "Foo", - "foo" => 1i32, - "qwer" => Some(1i32), - }; - let tagless = runtime_args_1.to_bytes().unwrap().to_vec(); - - let mut runtime_args_2 = BTreeMap::new(); - runtime_args_2.insert(String::from("bar"), CLValue::from_t("Foo").unwrap()); - runtime_args_2.insert(String::from("foo"), CLValue::from_t(1i32).unwrap()); - runtime_args_2.insert(String::from("qwer"), CLValue::from_t(Some(1i32)).unwrap()); - - assert_eq!(tagless, runtime_args_2.to_bytes().unwrap()); - } - - #[test] - fn named_serialization_roundtrip() { - let args = runtime_args! { - "foo" => 1i32, - }; - bytesrepr::test_serialization_roundtrip(&args); - } - - #[test] - fn should_create_args_with() { - let res = RuntimeArgs::try_new(|runtime_args| { - runtime_args.insert(String::from("foo"), 123)?; - runtime_args.insert(String::from("bar"), 456)?; - Ok(()) - }); - - let expected = runtime_args! { - "foo" => 123, - "bar" => 456, - }; - assert!(matches!(res, Ok(args) if expected == args)); - } -} diff --git a/types/src/runtime_footprint.rs b/types/src/runtime_footprint.rs new file mode 100644 index 0000000000..f2874f5ca6 --- /dev/null +++ b/types/src/runtime_footprint.rs @@ -0,0 +1,350 @@ +use crate::{ + account::AccountHash, + addressable_entity::{AssociatedKeys, ContractRuntimeTag, Weight}, + contracts::{ContractHash, NamedKeys}, + system::SystemEntityType, + Account, AddressableEntity, ContextAccessRights, Contract, EntityAddr, EntityKind, EntryPoints, + HashAddr, Key, ProtocolVersion, URef, +}; +use alloc::{ + collections::{BTreeMap, BTreeSet}, + string::String, +}; +use core::{fmt::Debug, iter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// Runtime Address. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum RuntimeAddress { + /// Account address + Hash(HashAddr), + /// Runtime executable address. + StoredContract { + /// The hash addr of the runtime entity + hash_addr: HashAddr, + /// The package hash + package_hash_addr: HashAddr, + /// The wasm hash + wasm_hash_addr: HashAddr, + /// protocol version + protocol_version: ProtocolVersion, + }, +} + +impl RuntimeAddress { + /// Returns a new hash + pub fn new_hash(hash_addr: HashAddr) -> Self { + Self::Hash(hash_addr) + } + + /// Returns new stored contract + pub fn new_stored_contract( + hash_addr: HashAddr, + package_hash_addr: HashAddr, + wasm_hash_addr: HashAddr, + protocol_version: ProtocolVersion, + ) -> Self { + Self::StoredContract { + hash_addr, + package_hash_addr, + wasm_hash_addr, + protocol_version, + } + } + + /// The hash addr for the runtime. + pub fn hash_addr(&self) -> HashAddr { + match self { + RuntimeAddress::Hash(hash_addr) => *hash_addr, + RuntimeAddress::StoredContract { hash_addr, .. } => *hash_addr, + } + } +} + +#[repr(u8)] +#[allow(clippy::enum_variant_names)] +pub(crate) enum Action { + KeyManagement = 0, + DeployManagement, + UpgradeManagement, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub struct RuntimeFootprint { + named_keys: NamedKeys, + action_thresholds: BTreeMap, + associated_keys: AssociatedKeys, + entry_points: EntryPoints, + entity_kind: EntityKind, + + main_purse: Option, + runtime_address: RuntimeAddress, +} + +impl RuntimeFootprint { + pub fn new( + named_keys: NamedKeys, + action_thresholds: BTreeMap, + associated_keys: AssociatedKeys, + entry_points: EntryPoints, + entity_kind: EntityKind, + main_purse: Option, + runtime_address: RuntimeAddress, + ) -> Self { + Self { + named_keys, + action_thresholds, + associated_keys, + entry_points, + entity_kind, + main_purse, + runtime_address, + } + } + + pub fn new_account_footprint(account: Account) -> Self { + let named_keys = account.named_keys().clone(); + let action_thresholds = { + let mut ret = BTreeMap::new(); + ret.insert( + Action::KeyManagement as u8, + Weight::new(account.action_thresholds().key_management.value()), + ); + ret.insert( + Action::DeployManagement as u8, + Weight::new(account.action_thresholds().deployment.value()), + ); + ret + }; + let associated_keys = account.associated_keys().clone().into(); + let entry_points = EntryPoints::new(); + let entity_kind = EntityKind::Account(account.account_hash()); + let main_purse = Some(account.main_purse()); + let runtime_address = RuntimeAddress::new_hash(account.account_hash().value()); + + Self::new( + named_keys, + action_thresholds, + associated_keys, + entry_points, + entity_kind, + main_purse, + runtime_address, + ) + } + + pub fn new_contract_footprint( + contract_hash: ContractHash, + contract: Contract, + system_entity_type: Option, + ) -> Self { + let contract_package_hash = contract.contract_package_hash(); + let contract_wasm_hash = contract.contract_wasm_hash(); + let entry_points = contract.entry_points().clone().into(); + let protocol_version = contract.protocol_version(); + let named_keys = contract.take_named_keys(); + + let runtime_address = RuntimeAddress::new_stored_contract( + contract_hash.value(), + contract_package_hash.value(), + contract_wasm_hash.value(), + protocol_version, + ); + + let main_purse = None; + let action_thresholds = BTreeMap::new(); + let associated_keys = AssociatedKeys::empty_keys(); + + let entity_kind = match system_entity_type { + None => EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + Some(kind) => EntityKind::System(kind), + }; + + Self::new( + named_keys, + action_thresholds, + associated_keys, + entry_points, + entity_kind, + main_purse, + runtime_address, + ) + } + + pub fn new_entity_footprint( + entity_addr: EntityAddr, + entity: AddressableEntity, + named_keys: NamedKeys, + entry_points: EntryPoints, + ) -> Self { + let runtime_address = RuntimeAddress::new_stored_contract( + entity_addr.value(), + entity.package_hash().value(), + entity.byte_code_hash().value(), + entity.protocol_version(), + ); + let action_thresholds = { + let mut ret = BTreeMap::new(); + ret.insert( + Action::KeyManagement as u8, + entity.action_thresholds().key_management, + ); + ret.insert( + Action::DeployManagement as u8, + entity.action_thresholds().key_management, + ); + ret.insert( + Action::UpgradeManagement as u8, + entity.action_thresholds().upgrade_management, + ); + ret + }; + Self::new( + named_keys, + action_thresholds, + entity.associated_keys().clone(), + entry_points, + entity.entity_kind(), + Some(entity.main_purse()), + runtime_address, + ) + } + + pub fn package_hash(&self) -> Option { + match &self.runtime_address { + RuntimeAddress::Hash(_) => None, + RuntimeAddress::StoredContract { + package_hash_addr, .. + } => Some(*package_hash_addr), + } + } + + pub fn associated_keys(&self) -> &AssociatedKeys { + &self.associated_keys + } + + pub fn wasm_hash(&self) -> Option { + match &self.runtime_address { + RuntimeAddress::Hash(_) => None, + RuntimeAddress::StoredContract { wasm_hash_addr, .. } => Some(*wasm_hash_addr), + } + } + + pub fn hash_addr(&self) -> HashAddr { + match &self.runtime_address { + RuntimeAddress::Hash(hash_addr) => *hash_addr, + RuntimeAddress::StoredContract { hash_addr, .. } => *hash_addr, + } + } + + pub fn named_keys(&self) -> &NamedKeys { + &self.named_keys + } + + pub fn insert_into_named_keys(&mut self, name: String, key: Key) { + self.named_keys.insert(name, key); + } + + pub fn named_keys_mut(&mut self) -> &mut NamedKeys { + &mut self.named_keys + } + + pub fn take_named_keys(self) -> NamedKeys { + self.named_keys + } + + pub fn main_purse(&self) -> Option { + self.main_purse + } + + pub fn set_main_purse(&mut self, purse: URef) { + self.main_purse = Some(purse); + } + + pub fn entry_points(&self) -> &EntryPoints { + &self.entry_points + } + + pub fn entity_kind(&self) -> EntityKind { + self.entity_kind + } + + /// Checks whether all authorization keys are associated with this addressable entity. + pub fn can_authorize(&self, authorization_keys: &BTreeSet) -> bool { + !authorization_keys.is_empty() + && authorization_keys + .iter() + .any(|e| self.associated_keys.contains_key(e)) + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to key management threshold. + pub fn can_manage_keys_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + match self.action_thresholds.get(&(Action::KeyManagement as u8)) { + None => false, + Some(weight) => total_weight >= *weight, + } + } + + /// Checks whether the sum of the weights of all authorization keys is + /// greater or equal to deploy threshold. + pub fn can_deploy_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + match self + .action_thresholds + .get(&(Action::DeployManagement as u8)) + { + None => false, + Some(weight) => total_weight >= *weight, + } + } + + pub fn can_upgrade_with(&self, authorization_keys: &BTreeSet) -> bool { + let total_weight = self + .associated_keys + .calculate_keys_weight(authorization_keys); + + match self + .action_thresholds + .get(&(Action::UpgradeManagement as u8)) + { + None => false, + Some(weight) => total_weight >= *weight, + } + } + + /// Extracts the access rights from the named keys of the addressable entity. + pub fn extract_access_rights(&self, hash_addr: HashAddr) -> ContextAccessRights { + match self.main_purse { + Some(purse) => { + let urefs_iter = self + .named_keys + .keys() + .filter_map(|key| key.as_uref().copied()) + .chain(iter::once(purse)); + ContextAccessRights::new(hash_addr, urefs_iter) + } + None => { + let urefs_iter = self + .named_keys + .keys() + .filter_map(|key| key.as_uref().copied()); + ContextAccessRights::new(hash_addr, urefs_iter) + } + } + } +} diff --git a/types/src/semver.rs b/types/src/semver.rs index 22bbabfd98..5feafe53c6 100644 --- a/types/src/semver.rs +++ b/types/src/semver.rs @@ -1,14 +1,14 @@ use alloc::vec::Vec; -use core::{convert::TryFrom, fmt, num::ParseIntError}; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, + num::ParseIntError, +}; +#[cfg(feature = "datasize")] use datasize::DataSize; use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "std"))] -use displaydoc::Display; -#[cfg(feature = "std")] -use thiserror::Error; - use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH}; /// Length of SemVer when serialized @@ -16,19 +16,9 @@ pub const SEM_VER_SERIALIZED_LENGTH: usize = 3 * U32_SERIALIZED_LENGTH; /// A struct for semantic versioning. #[derive( - Copy, - Clone, - DataSize, - Debug, - Default, - Hash, - PartialEq, - Eq, - PartialOrd, - Ord, - Serialize, - Deserialize, + Copy, Clone, Debug, Default, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, )] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct SemVer { /// Major version. pub major: u32, @@ -79,25 +69,30 @@ impl FromBytes for SemVer { } } -impl fmt::Display for SemVer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl Display for SemVer { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "{}.{}.{}", self.major, self.minor, self.patch) } } /// Parsing error when creating a SemVer. #[derive(Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Error))] -#[cfg_attr(not(feature = "std"), derive(Display))] pub enum ParseSemVerError { - /// Invalid version format - #[cfg_attr(feature = "std", error("Invalid version format"))] + /// Invalid version format. InvalidVersionFormat, - /// {0} - #[cfg_attr(feature = "std", error("{}", _0))] + /// Error parsing an integer. ParseIntError(ParseIntError), } +impl Display for ParseSemVerError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ParseSemVerError::InvalidVersionFormat => formatter.write_str("invalid version format"), + ParseSemVerError::ParseIntError(error) => error.fmt(formatter), + } + } +} + impl From for ParseSemVerError { fn from(error: ParseIntError) -> ParseSemVerError { ParseSemVerError::ParseIntError(error) diff --git a/types/src/serde_helpers.rs b/types/src/serde_helpers.rs new file mode 100644 index 0000000000..e073a64825 --- /dev/null +++ b/types/src/serde_helpers.rs @@ -0,0 +1,278 @@ +use alloc::{string::String, vec::Vec}; +use core::convert::TryFrom; + +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::Digest; + +pub(crate) mod raw_32_byte_array { + use super::*; + + pub(crate) fn serialize( + array: &[u8; 32], + serializer: S, + ) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(array).serialize(serializer) + } else { + array.serialize(serializer) + } + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result<[u8; 32], D::Error> { + if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + <[u8; 32]>::try_from(bytes.as_ref()).map_err(SerdeError::custom) + } else { + <[u8; 32]>::deserialize(deserializer) + } + } +} + +pub(crate) mod contract_hash_as_digest { + use super::*; + use crate::contracts::ContractHash; + + pub(crate) fn serialize( + contract_hash: &ContractHash, + serializer: S, + ) -> Result { + Digest::from(contract_hash.value()).serialize(serializer) + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result { + let digest = Digest::deserialize(deserializer)?; + Ok(ContractHash::new(digest.value())) + } +} + +pub(crate) mod contract_package_hash_as_digest { + use super::*; + use crate::contracts::ContractPackageHash; + + pub(crate) fn serialize( + contract_package_hash: &ContractPackageHash, + serializer: S, + ) -> Result { + Digest::from(contract_package_hash.value()).serialize(serializer) + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result { + let digest = Digest::deserialize(deserializer)?; + Ok(ContractPackageHash::new(digest.value())) + } +} + +/// This module allows `DeployHash`es to be serialized and deserialized using the underlying +/// `[u8; 32]` rather than delegating to the wrapped `Digest`, which in turn delegates to a +/// `Vec` for legacy reasons. +/// +/// This is required as the `DeployHash` defined in `casper-types` up until v4.0.0 used the array +/// form, while the `DeployHash` defined in `casper-node` during this period delegated to `Digest`. +/// +/// We use this module in places where the old `casper_types::DeployHash` was held as a member of a +/// type which implements `Serialize` and/or `Deserialize`. +pub(crate) mod deploy_hash_as_array { + use super::*; + use crate::DeployHash; + + pub(crate) fn serialize( + deploy_hash: &DeployHash, + serializer: S, + ) -> Result { + if serializer.is_human_readable() { + base16::encode_lower(&deploy_hash.inner().value()).serialize(serializer) + } else { + deploy_hash.inner().value().serialize(serializer) + } + } + + pub(crate) fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result { + let bytes = if deserializer.is_human_readable() { + let hex_string = String::deserialize(deserializer)?; + let vec_bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; + <[u8; DeployHash::LENGTH]>::try_from(vec_bytes.as_ref()).map_err(SerdeError::custom)? + } else { + <[u8; DeployHash::LENGTH]>::deserialize(deserializer)? + }; + Ok(DeployHash::new(Digest::from(bytes))) + } +} + +pub(crate) mod contract { + use super::*; + use crate::{ + contracts::{ContractPackageHash, EntryPoint, EntryPoints}, + Contract, ContractWasmHash, NamedKeys, ProtocolVersion, + }; + use core::fmt::Display; + #[cfg(feature = "json-schema")] + use schemars::JsonSchema; + use serde::{Deserialize, Serialize}; + + #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] + #[cfg_attr(feature = "json-schema", derive(JsonSchema))] + #[cfg_attr( + feature = "json-schema", + schemars( + rename = "Contract", + description = "Methods and type signatures supported by a contract.", + ) + )] + pub(crate) struct HumanReadableContract { + contract_package_hash: ContractPackageHash, + contract_wasm_hash: ContractWasmHash, + named_keys: NamedKeys, + entry_points: Vec, + protocol_version: ProtocolVersion, + } + + impl From<&Contract> for HumanReadableContract { + fn from(value: &Contract) -> Self { + Self { + contract_package_hash: value.contract_package_hash(), + contract_wasm_hash: value.contract_wasm_hash(), + named_keys: value.named_keys().clone(), + protocol_version: value.protocol_version(), + entry_points: value.entry_points().clone().take_entry_points(), + } + } + } + + /// Parsing error when deserializing StoredValue. + #[derive(Debug, Clone)] + pub(crate) enum ContractDeserializationError { + /// Contract not deserializable. + NonUniqueEntryPointName, + } + + impl Display for ContractDeserializationError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + ContractDeserializationError::NonUniqueEntryPointName => { + write!(f, "Non unique `entry_points.name`") + } + } + } + } + + impl TryFrom for Contract { + type Error = ContractDeserializationError; + fn try_from(value: HumanReadableContract) -> Result { + let HumanReadableContract { + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points, + protocol_version, + } = value; + let mut entry_points_map = EntryPoints::new(); + for entry_point in entry_points { + if entry_points_map.add_entry_point(entry_point).is_some() { + //There were duplicate entries in regards to 'name' + return Err(ContractDeserializationError::NonUniqueEntryPointName); + } + } + + Ok(Contract::new( + contract_package_hash, + contract_wasm_hash, + named_keys, + entry_points_map, + protocol_version, + )) + } + } +} + +pub(crate) mod contract_package { + use core::convert::TryFrom; + + use super::*; + #[cfg(feature = "json-schema")] + use schemars::JsonSchema; + use serde::{Deserialize, Serialize}; + + use crate::{ + contracts::{ + ContractHash, ContractPackage, ContractPackageStatus, ContractVersion, + ContractVersionKey, ContractVersions, DisabledVersions, ProtocolVersionMajor, + }, + Groups, URef, + }; + + #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] + #[cfg_attr(feature = "json-schema", derive(JsonSchema))] + #[cfg_attr(feature = "json-schema", schemars(rename = "ContractVersion"))] + pub(crate) struct HumanReadableContractVersion { + protocol_version_major: ProtocolVersionMajor, + contract_version: ContractVersion, + contract_hash: ContractHash, + } + + /// Helper struct for deserializing/serializing `ContractPackage` from and to JSON. + #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] + #[cfg_attr(feature = "json-schema", derive(JsonSchema))] + #[cfg_attr(feature = "json-schema", schemars(rename = "ContractPackage"))] + pub(crate) struct HumanReadableContractPackage { + access_key: URef, + versions: Vec, + disabled_versions: DisabledVersions, + groups: Groups, + lock_status: ContractPackageStatus, + } + + impl From<&ContractPackage> for HumanReadableContractPackage { + fn from(package: &ContractPackage) -> Self { + let mut versions = vec![]; + for (key, hash) in package.versions() { + versions.push(HumanReadableContractVersion { + protocol_version_major: key.protocol_version_major(), + contract_version: key.contract_version(), + contract_hash: *hash, + }); + } + HumanReadableContractPackage { + access_key: package.access_key(), + versions, + disabled_versions: package.disabled_versions().clone(), + groups: package.groups().clone(), + lock_status: package.lock_status(), + } + } + } + + impl TryFrom for ContractPackage { + type Error = String; + + fn try_from(value: HumanReadableContractPackage) -> Result { + let mut versions = ContractVersions::default(); + for version in value.versions.iter() { + let key = ContractVersionKey::new( + version.protocol_version_major, + version.contract_version, + ); + if versions.contains_key(&key) { + return Err(format!("duplicate contract version: {:?}", key)); + } + versions.insert(key, version.contract_hash); + } + Ok(ContractPackage::new( + value.access_key, + versions, + value.disabled_versions, + value.groups, + value.lock_status, + )) + } + } +} diff --git a/types/src/stored_value.rs b/types/src/stored_value.rs new file mode 100644 index 0000000000..00e307b0b2 --- /dev/null +++ b/types/src/stored_value.rs @@ -0,0 +1,1305 @@ +mod global_state_identifier; +mod type_mismatch; + +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; +use core::{convert::TryFrom, fmt::Debug}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de, ser, Deserialize, Deserializer, Serialize, Serializer}; +use serde_bytes::ByteBuf; + +use crate::{ + account::Account, + addressable_entity::NamedKeyValue, + bytesrepr::{self, Bytes, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contract_messages::{MessageChecksum, MessageTopicSummary}, + contract_wasm::ContractWasm, + contracts::{Contract, ContractPackage}, + package::Package, + system::{ + auction::{Bid, BidKind, EraInfo, Unbond, UnbondingPurse, WithdrawPurse}, + prepayment::PrepaymentKind, + }, + AddressableEntity, ByteCode, CLValue, DeployInfo, EntryPointValue, TransferV1, +}; +pub use global_state_identifier::GlobalStateIdentifier; +pub use type_mismatch::TypeMismatch; + +/// Tag used to discriminate between different variants of `StoredValue`. +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[repr(u8)] +pub enum StoredValueTag { + /// A CLValue. + CLValue = 0, + /// An account. + Account = 1, + /// Contract wasm. + ContractWasm = 2, + /// A contract. + Contract = 3, + /// A contract package. + ContractPackage = 4, + /// A version 1 transfer. + Transfer = 5, + /// Info about a deploy. + DeployInfo = 6, + /// Info about an era. + EraInfo = 7, + /// A bid. + Bid = 8, + /// Withdraw information. + Withdraw = 9, + /// Unbonding information. + Unbonding = 10, + /// An `AddressableEntity`. + BidKind = 11, + /// A `Package`. + Package = 12, + /// A record of byte code. + AddressableEntity = 13, + /// A record of byte code. + ByteCode = 14, + /// A message topic. + MessageTopic = 15, + /// A message digest. + Message = 16, + /// A NamedKey record. + NamedKey = 17, + /// A prepayment record. + Prepayment = 18, + /// An entrypoint record. + EntryPoint = 19, + /// Raw bytes. + RawBytes = 20, +} + +/// A value stored in Global State. +#[allow(clippy::large_enum_variant)] +#[derive(Eq, PartialEq, Clone, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(with = "serde_helpers::HumanReadableDeserHelper") +)] +pub enum StoredValue { + /// A CLValue. + CLValue(CLValue), + /// An account. + Account(Account), + /// Contract wasm. + ContractWasm(ContractWasm), + /// A contract. + Contract(Contract), + /// A contract package. + ContractPackage(ContractPackage), + /// A version 1 transfer. + Transfer(TransferV1), + /// Info about a deploy. + DeployInfo(DeployInfo), + /// Info about an era. + EraInfo(EraInfo), + /// Variant that stores [`Bid`]. + Bid(Box), + /// Variant that stores withdraw information. + Withdraw(Vec), + /// Unbonding information. + Unbonding(Vec), + /// An `AddressableEntity`. + AddressableEntity(AddressableEntity), + /// Variant that stores [`BidKind`]. + BidKind(BidKind), + /// A smart contract `Package`. + SmartContract(Package), + /// A record of byte code. + ByteCode(ByteCode), + /// Variant that stores a message topic. + MessageTopic(MessageTopicSummary), + /// Variant that stores a message digest. + Message(MessageChecksum), + /// A NamedKey record. + NamedKey(NamedKeyValue), + /// A prepayment record. + Prepayment(PrepaymentKind), + /// An entrypoint record. + EntryPoint(EntryPointValue), + /// Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of a + /// [`crate::CLValue`] and [`crate::CLType`]. + RawBytes(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] Vec), +} + +impl StoredValue { + /// Returns a reference to the wrapped `CLValue` if this is a `CLValue` variant. + pub fn as_cl_value(&self) -> Option<&CLValue> { + match self { + StoredValue::CLValue(cl_value) => Some(cl_value), + _ => None, + } + } + + /// Returns a reference to the wrapped `Account` if this is an `Account` variant. + pub fn as_account(&self) -> Option<&Account> { + match self { + StoredValue::Account(account) => Some(account), + _ => None, + } + } + + /// Returns a reference to the wrapped `ByteCode` if this is a `ByteCode` variant. + pub fn as_byte_code(&self) -> Option<&ByteCode> { + match self { + StoredValue::ByteCode(byte_code) => Some(byte_code), + _ => None, + } + } + + pub fn as_contract_wasm(&self) -> Option<&ContractWasm> { + match self { + StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm), + _ => None, + } + } + + /// Returns a reference to the wrapped `Contract` if this is a `Contract` variant. + pub fn as_contract(&self) -> Option<&Contract> { + match self { + StoredValue::Contract(contract) => Some(contract), + _ => None, + } + } + + /// Returns a reference to the wrapped `Package` if this is a `Package` variant. + pub fn as_package(&self) -> Option<&Package> { + match self { + StoredValue::SmartContract(package) => Some(package), + _ => None, + } + } + + /// Returns a reference to the wrapped `ContractPackage` if this is a `ContractPackage` variant. + pub fn as_contract_package(&self) -> Option<&ContractPackage> { + match self { + StoredValue::ContractPackage(package) => Some(package), + _ => None, + } + } + + /// Returns a reference to the wrapped `TransferV1` if this is a `Transfer` variant. + pub fn as_transfer(&self) -> Option<&TransferV1> { + match self { + StoredValue::Transfer(transfer_v1) => Some(transfer_v1), + _ => None, + } + } + + /// Returns a reference to the wrapped `DeployInfo` if this is a `DeployInfo` variant. + pub fn as_deploy_info(&self) -> Option<&DeployInfo> { + match self { + StoredValue::DeployInfo(deploy_info) => Some(deploy_info), + _ => None, + } + } + + /// Returns a reference to the wrapped `EraInfo` if this is an `EraInfo` variant. + pub fn as_era_info(&self) -> Option<&EraInfo> { + match self { + StoredValue::EraInfo(era_info) => Some(era_info), + _ => None, + } + } + + /// Returns a reference to the wrapped `Bid` if this is a `Bid` variant. + pub fn as_bid(&self) -> Option<&Bid> { + match self { + StoredValue::Bid(bid) => Some(bid), + _ => None, + } + } + + /// Returns a reference to the wrapped list of `WithdrawPurse`s if this is a `Withdraw` variant. + pub fn as_withdraw(&self) -> Option<&Vec> { + match self { + StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses), + _ => None, + } + } + + /// Returns a reference to the wrapped list of `UnbondingPurse`s if this is an `Unbonding` + /// variant. + pub fn as_unbonding(&self) -> Option<&Vec> { + match self { + StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses), + _ => None, + } + } + + /// Returns a reference to the wrapped list of `UnbondingPurse`s if this is an `Unbonding` + /// variant. + pub fn as_unbond(&self) -> Option<&Unbond> { + match self { + StoredValue::BidKind(BidKind::Unbond(unbond)) => Some(unbond), + _ => None, + } + } + + /// Returns a reference to the wrapped `AddressableEntity` if this is an `AddressableEntity` + /// variant. + pub fn as_addressable_entity(&self) -> Option<&AddressableEntity> { + match self { + StoredValue::AddressableEntity(entity) => Some(entity), + _ => None, + } + } + + /// Returns a reference to the wrapped `MessageTopicSummary` if this is a `MessageTopic` + /// variant. + pub fn as_message_topic_summary(&self) -> Option<&MessageTopicSummary> { + match self { + StoredValue::MessageTopic(summary) => Some(summary), + _ => None, + } + } + + /// Returns a reference to the wrapped `MessageChecksum` if this is a `Message` + /// variant. + pub fn as_message_checksum(&self) -> Option<&MessageChecksum> { + match self { + StoredValue::Message(checksum) => Some(checksum), + _ => None, + } + } + + /// Returns a reference to the wrapped `BidKind` if this is a `BidKind` variant. + pub fn as_bid_kind(&self) -> Option<&BidKind> { + match self { + StoredValue::BidKind(bid_kind) => Some(bid_kind), + _ => None, + } + } + + /// Returns raw bytes if this is a `RawBytes` variant. + pub fn as_raw_bytes(&self) -> Option<&[u8]> { + match self { + StoredValue::RawBytes(bytes) => Some(bytes), + _ => None, + } + } + + /// Returns a reference to the wrapped `EntryPointValue` if this is a `EntryPointValue` variant. + pub fn as_entry_point_value(&self) -> Option<&EntryPointValue> { + match self { + StoredValue::EntryPoint(entry_point) => Some(entry_point), + _ => None, + } + } + + /// Returns the `CLValue` if this is a `CLValue` variant. + pub fn into_cl_value(self) -> Option { + match self { + StoredValue::CLValue(cl_value) => Some(cl_value), + _ => None, + } + } + + /// Returns the `Account` if this is an `Account` variant. + pub fn into_account(self) -> Option { + match self { + StoredValue::Account(account) => Some(account), + _ => None, + } + } + + /// Returns the `ContractWasm` if this is a `ContractWasm` variant. + pub fn into_contract_wasm(self) -> Option { + match self { + StoredValue::ContractWasm(contract_wasm) => Some(contract_wasm), + _ => None, + } + } + + /// Returns the `Contract` if this is a `Contract` variant. + pub fn into_contract(self) -> Option { + match self { + StoredValue::Contract(contract) => Some(contract), + _ => None, + } + } + + /// Returns the `ContractPackage` if this is a `ContractPackage` variant. + pub fn into_contract_package(self) -> Option { + match self { + StoredValue::ContractPackage(contract_package) => Some(contract_package), + _ => None, + } + } + + /// Returns the `Package` if this is a `Package` variant. + pub fn into_package(self) -> Option { + match self { + StoredValue::SmartContract(package) => Some(package), + _ => None, + } + } + + /// Returns the `TransferV1` if this is a `Transfer` variant. + pub fn into_legacy_transfer(self) -> Option { + match self { + StoredValue::Transfer(transfer_v1) => Some(transfer_v1), + _ => None, + } + } + + /// Returns the `DeployInfo` if this is a `DeployInfo` variant. + pub fn into_deploy_info(self) -> Option { + match self { + StoredValue::DeployInfo(deploy_info) => Some(deploy_info), + _ => None, + } + } + + /// Returns the `EraInfo` if this is an `EraInfo` variant. + pub fn into_era_info(self) -> Option { + match self { + StoredValue::EraInfo(era_info) => Some(era_info), + _ => None, + } + } + + /// Returns the `Bid` if this is a `Bid` variant. + pub fn into_bid(self) -> Option { + match self { + StoredValue::Bid(bid) => Some(*bid), + _ => None, + } + } + + /// Returns the list of `WithdrawPurse`s if this is a `Withdraw` variant. + pub fn into_withdraw(self) -> Option> { + match self { + StoredValue::Withdraw(withdraw_purses) => Some(withdraw_purses), + _ => None, + } + } + + /// Returns the list of `UnbondingPurse`s if this is an `Unbonding` variant. + pub fn into_unbonding(self) -> Option> { + match self { + StoredValue::Unbonding(unbonding_purses) => Some(unbonding_purses), + _ => None, + } + } + + /// Returns the `AddressableEntity` if this is an `AddressableEntity` variant. + pub fn into_addressable_entity(self) -> Option { + match self { + StoredValue::AddressableEntity(entity) => Some(entity), + _ => None, + } + } + + /// Returns the `BidKind` if this is a `BidKind` variant. + pub fn into_bid_kind(self) -> Option { + match self { + StoredValue::BidKind(bid_kind) => Some(bid_kind), + _ => None, + } + } + + /// Returns the `EntryPointValue` if this is a `EntryPointValue` variant. + pub fn into_entry_point_value(self) -> Option { + match self { + StoredValue::EntryPoint(value) => Some(value), + _ => None, + } + } + + /// Returns the type name of the [`StoredValue`] enum variant. + /// + /// For [`CLValue`] variants it will return the name of the [`CLType`](crate::cl_type::CLType) + pub fn type_name(&self) -> String { + match self { + StoredValue::CLValue(cl_value) => format!("{:?}", cl_value.cl_type()), + StoredValue::Account(_) => "Account".to_string(), + StoredValue::ContractWasm(_) => "ContractWasm".to_string(), + StoredValue::Contract(_) => "Contract".to_string(), + StoredValue::ContractPackage(_) => "ContractPackage".to_string(), + StoredValue::Transfer(_) => "Transfer".to_string(), + StoredValue::DeployInfo(_) => "DeployInfo".to_string(), + StoredValue::EraInfo(_) => "EraInfo".to_string(), + StoredValue::Bid(_) => "Bid".to_string(), + StoredValue::Withdraw(_) => "Withdraw".to_string(), + StoredValue::Unbonding(_) => "Unbonding".to_string(), + StoredValue::AddressableEntity(_) => "AddressableEntity".to_string(), + StoredValue::BidKind(_) => "BidKind".to_string(), + StoredValue::ByteCode(_) => "ByteCode".to_string(), + StoredValue::SmartContract(_) => "SmartContract".to_string(), + StoredValue::MessageTopic(_) => "MessageTopic".to_string(), + StoredValue::Message(_) => "Message".to_string(), + StoredValue::NamedKey(_) => "NamedKey".to_string(), + StoredValue::Prepayment(_) => "Prepayment".to_string(), + StoredValue::EntryPoint(_) => "EntryPoint".to_string(), + StoredValue::RawBytes(_) => "RawBytes".to_string(), + } + } + + /// Returns the tag of the `StoredValue`. + pub fn tag(&self) -> StoredValueTag { + match self { + StoredValue::CLValue(_) => StoredValueTag::CLValue, + StoredValue::Account(_) => StoredValueTag::Account, + StoredValue::ContractWasm(_) => StoredValueTag::ContractWasm, + StoredValue::ContractPackage(_) => StoredValueTag::ContractPackage, + StoredValue::Contract(_) => StoredValueTag::Contract, + StoredValue::Transfer(_) => StoredValueTag::Transfer, + StoredValue::DeployInfo(_) => StoredValueTag::DeployInfo, + StoredValue::EraInfo(_) => StoredValueTag::EraInfo, + StoredValue::Bid(_) => StoredValueTag::Bid, + StoredValue::Withdraw(_) => StoredValueTag::Withdraw, + StoredValue::Unbonding(_) => StoredValueTag::Unbonding, + StoredValue::AddressableEntity(_) => StoredValueTag::AddressableEntity, + StoredValue::BidKind(_) => StoredValueTag::BidKind, + StoredValue::SmartContract(_) => StoredValueTag::Package, + StoredValue::ByteCode(_) => StoredValueTag::ByteCode, + StoredValue::MessageTopic(_) => StoredValueTag::MessageTopic, + StoredValue::Message(_) => StoredValueTag::Message, + StoredValue::NamedKey(_) => StoredValueTag::NamedKey, + StoredValue::Prepayment(_) => StoredValueTag::Prepayment, + StoredValue::EntryPoint(_) => StoredValueTag::EntryPoint, + StoredValue::RawBytes(_) => StoredValueTag::RawBytes, + } + } + + /// Returns the serialized length of the `StoredValue`. + pub fn into_byte_code(self) -> Option { + match self { + StoredValue::ByteCode(byte_code) => Some(byte_code), + _ => None, + } + } + + /// Returns the serialized length of the `StoredValue`. + pub fn into_named_key(self) -> Option { + match self { + StoredValue::NamedKey(named_key_value) => Some(named_key_value), + _ => None, + } + } +} + +impl From for StoredValue { + fn from(value: CLValue) -> StoredValue { + StoredValue::CLValue(value) + } +} + +impl From for StoredValue { + fn from(value: Account) -> StoredValue { + StoredValue::Account(value) + } +} + +impl From for StoredValue { + fn from(value: ContractWasm) -> Self { + StoredValue::ContractWasm(value) + } +} + +impl From for StoredValue { + fn from(value: ContractPackage) -> Self { + StoredValue::ContractPackage(value) + } +} + +impl From for StoredValue { + fn from(value: Contract) -> Self { + StoredValue::Contract(value) + } +} + +impl From for StoredValue { + fn from(value: AddressableEntity) -> StoredValue { + StoredValue::AddressableEntity(value) + } +} + +impl From for StoredValue { + fn from(value: Package) -> StoredValue { + StoredValue::SmartContract(value) + } +} + +impl From for StoredValue { + fn from(bid: Bid) -> StoredValue { + StoredValue::Bid(Box::new(bid)) + } +} + +impl From for StoredValue { + fn from(bid_kind: BidKind) -> StoredValue { + StoredValue::BidKind(bid_kind) + } +} + +impl From for StoredValue { + fn from(value: ByteCode) -> StoredValue { + StoredValue::ByteCode(value) + } +} + +impl From for StoredValue { + fn from(value: EntryPointValue) -> Self { + StoredValue::EntryPoint(value) + } +} + +impl TryFrom for CLValue { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + let type_name = stored_value.type_name(); + match stored_value { + StoredValue::CLValue(cl_value) => Ok(cl_value), + StoredValue::BidKind(bid_kind) => Ok(CLValue::from_t(bid_kind) + .map_err(|_| TypeMismatch::new("BidKind".to_string(), type_name))?), + StoredValue::ContractPackage(contract_package) => Ok(CLValue::from_t(contract_package) + .map_err(|_error| TypeMismatch::new("ContractPackage".to_string(), type_name))?), + _ => Err(TypeMismatch::new("StoredValue".to_string(), type_name)), + } + } +} + +impl TryFrom for Account { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Account(account) => Ok(account), + _ => Err(TypeMismatch::new( + "Account".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ContractWasm { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::ContractWasm(contract_wasm) => Ok(contract_wasm), + _ => Err(TypeMismatch::new( + "ContractWasm".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ByteCode { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::ByteCode(byte_code) => Ok(byte_code), + _ => Err(TypeMismatch::new( + "ByteCode".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for ContractPackage { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::ContractPackage(contract_package) => Ok(contract_package), + _ => Err(TypeMismatch::new( + "ContractPackage".to_string(), + value.type_name(), + )), + } + } +} + +impl TryFrom for Contract { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::Contract(contract) => Ok(contract), + _ => Err(TypeMismatch::new( + "Contract".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for Package { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::SmartContract(contract_package) => Ok(contract_package), + StoredValue::ContractPackage(contract_package) => Ok(contract_package.into()), + _ => Err(TypeMismatch::new( + "ContractPackage".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for AddressableEntity { + type Error = TypeMismatch; + + fn try_from(stored_value: StoredValue) -> Result { + match stored_value { + StoredValue::AddressableEntity(contract) => Ok(contract), + _ => Err(TypeMismatch::new( + "AddressableEntity".to_string(), + stored_value.type_name(), + )), + } + } +} + +impl TryFrom for TransferV1 { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::Transfer(transfer_v1) => Ok(transfer_v1), + _ => Err(TypeMismatch::new("Transfer".to_string(), value.type_name())), + } + } +} + +impl TryFrom for DeployInfo { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::DeployInfo(deploy_info) => Ok(deploy_info), + _ => Err(TypeMismatch::new( + "DeployInfo".to_string(), + value.type_name(), + )), + } + } +} + +impl TryFrom for EraInfo { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::EraInfo(era_info) => Ok(era_info), + _ => Err(TypeMismatch::new("EraInfo".to_string(), value.type_name())), + } + } +} + +impl TryFrom for Bid { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::Bid(bid) => Ok(*bid), + _ => Err(TypeMismatch::new("Bid".to_string(), value.type_name())), + } + } +} + +impl TryFrom for BidKind { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::BidKind(bid_kind) => Ok(bid_kind), + _ => Err(TypeMismatch::new("BidKind".to_string(), value.type_name())), + } + } +} + +impl TryFrom for NamedKeyValue { + type Error = TypeMismatch; + + fn try_from(value: StoredValue) -> Result { + match value { + StoredValue::NamedKey(named_key_value) => Ok(named_key_value), + _ => Err(TypeMismatch::new( + "NamedKeyValue".to_string(), + value.type_name(), + )), + } + } +} + +impl ToBytes for StoredValue { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + StoredValue::CLValue(cl_value) => cl_value.serialized_length(), + StoredValue::Account(account) => account.serialized_length(), + StoredValue::ContractWasm(contract_wasm) => contract_wasm.serialized_length(), + StoredValue::Contract(contract_header) => contract_header.serialized_length(), + StoredValue::ContractPackage(contract_package) => { + contract_package.serialized_length() + } + StoredValue::Transfer(transfer_v1) => transfer_v1.serialized_length(), + StoredValue::DeployInfo(deploy_info) => deploy_info.serialized_length(), + StoredValue::EraInfo(era_info) => era_info.serialized_length(), + StoredValue::Bid(bid) => bid.serialized_length(), + StoredValue::Withdraw(withdraw_purses) => withdraw_purses.serialized_length(), + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.serialized_length(), + StoredValue::AddressableEntity(entity) => entity.serialized_length(), + StoredValue::BidKind(bid_kind) => bid_kind.serialized_length(), + StoredValue::SmartContract(package) => package.serialized_length(), + StoredValue::ByteCode(byte_code) => byte_code.serialized_length(), + StoredValue::MessageTopic(message_topic_summary) => { + message_topic_summary.serialized_length() + } + StoredValue::Message(message_digest) => message_digest.serialized_length(), + StoredValue::NamedKey(named_key_value) => named_key_value.serialized_length(), + StoredValue::Prepayment(prepayment_kind) => prepayment_kind.serialized_length(), + StoredValue::EntryPoint(entry_point_value) => entry_point_value.serialized_length(), + StoredValue::RawBytes(bytes) => bytes.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + writer.push(self.tag() as u8); + match self { + StoredValue::CLValue(cl_value) => cl_value.write_bytes(writer), + StoredValue::Account(account) => account.write_bytes(writer), + StoredValue::ContractWasm(contract_wasm) => contract_wasm.write_bytes(writer), + StoredValue::Contract(contract_header) => contract_header.write_bytes(writer), + StoredValue::ContractPackage(contract_package) => contract_package.write_bytes(writer), + StoredValue::Transfer(transfer_v1) => transfer_v1.write_bytes(writer), + StoredValue::DeployInfo(deploy_info) => deploy_info.write_bytes(writer), + StoredValue::EraInfo(era_info) => era_info.write_bytes(writer), + StoredValue::Bid(bid) => bid.write_bytes(writer), + StoredValue::Withdraw(unbonding_purses) => unbonding_purses.write_bytes(writer), + StoredValue::Unbonding(unbonding_purses) => unbonding_purses.write_bytes(writer), + StoredValue::AddressableEntity(entity) => entity.write_bytes(writer), + StoredValue::BidKind(bid_kind) => bid_kind.write_bytes(writer), + StoredValue::SmartContract(package) => package.write_bytes(writer), + StoredValue::ByteCode(byte_code) => byte_code.write_bytes(writer), + StoredValue::MessageTopic(message_topic_summary) => { + message_topic_summary.write_bytes(writer) + } + StoredValue::Message(message_digest) => message_digest.write_bytes(writer), + StoredValue::NamedKey(named_key_value) => named_key_value.write_bytes(writer), + StoredValue::Prepayment(prepayment_kind) => prepayment_kind.write_bytes(writer), + StoredValue::EntryPoint(entry_point_value) => entry_point_value.write_bytes(writer), + StoredValue::RawBytes(bytes) => bytes.write_bytes(writer), + } + } +} + +impl FromBytes for StoredValue { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + tag if tag == StoredValueTag::CLValue as u8 => CLValue::from_bytes(remainder) + .map(|(cl_value, remainder)| (StoredValue::CLValue(cl_value), remainder)), + tag if tag == StoredValueTag::Account as u8 => Account::from_bytes(remainder) + .map(|(account, remainder)| (StoredValue::Account(account), remainder)), + tag if tag == StoredValueTag::ContractWasm as u8 => ContractWasm::from_bytes(remainder) + .map(|(contract_wasm, remainder)| { + (StoredValue::ContractWasm(contract_wasm), remainder) + }), + tag if tag == StoredValueTag::ContractPackage as u8 => { + ContractPackage::from_bytes(remainder).map(|(contract_package, remainder)| { + (StoredValue::ContractPackage(contract_package), remainder) + }) + } + tag if tag == StoredValueTag::Contract as u8 => Contract::from_bytes(remainder) + .map(|(contract, remainder)| (StoredValue::Contract(contract), remainder)), + tag if tag == StoredValueTag::Transfer as u8 => TransferV1::from_bytes(remainder) + .map(|(transfer_v1, remainder)| (StoredValue::Transfer(transfer_v1), remainder)), + tag if tag == StoredValueTag::DeployInfo as u8 => DeployInfo::from_bytes(remainder) + .map(|(deploy_info, remainder)| (StoredValue::DeployInfo(deploy_info), remainder)), + tag if tag == StoredValueTag::EraInfo as u8 => EraInfo::from_bytes(remainder) + .map(|(deploy_info, remainder)| (StoredValue::EraInfo(deploy_info), remainder)), + tag if tag == StoredValueTag::Bid as u8 => Bid::from_bytes(remainder) + .map(|(bid, remainder)| (StoredValue::Bid(Box::new(bid)), remainder)), + tag if tag == StoredValueTag::BidKind as u8 => BidKind::from_bytes(remainder) + .map(|(bid_kind, remainder)| (StoredValue::BidKind(bid_kind), remainder)), + tag if tag == StoredValueTag::Withdraw as u8 => { + Vec::::from_bytes(remainder).map(|(withdraw_purses, remainder)| { + (StoredValue::Withdraw(withdraw_purses), remainder) + }) + } + tag if tag == StoredValueTag::Unbonding as u8 => { + Vec::::from_bytes(remainder).map(|(unbonding_purses, remainder)| { + (StoredValue::Unbonding(unbonding_purses), remainder) + }) + } + tag if tag == StoredValueTag::AddressableEntity as u8 => { + AddressableEntity::from_bytes(remainder) + .map(|(entity, remainder)| (StoredValue::AddressableEntity(entity), remainder)) + } + tag if tag == StoredValueTag::Package as u8 => Package::from_bytes(remainder) + .map(|(package, remainder)| (StoredValue::SmartContract(package), remainder)), + tag if tag == StoredValueTag::ByteCode as u8 => ByteCode::from_bytes(remainder) + .map(|(byte_code, remainder)| (StoredValue::ByteCode(byte_code), remainder)), + tag if tag == StoredValueTag::MessageTopic as u8 => { + MessageTopicSummary::from_bytes(remainder).map(|(message_summary, remainder)| { + (StoredValue::MessageTopic(message_summary), remainder) + }) + } + tag if tag == StoredValueTag::Message as u8 => MessageChecksum::from_bytes(remainder) + .map(|(checksum, remainder)| (StoredValue::Message(checksum), remainder)), + tag if tag == StoredValueTag::NamedKey as u8 => NamedKeyValue::from_bytes(remainder) + .map(|(named_key_value, remainder)| { + (StoredValue::NamedKey(named_key_value), remainder) + }), + tag if tag == StoredValueTag::EntryPoint as u8 => { + EntryPointValue::from_bytes(remainder).map(|(entry_point, remainder)| { + (StoredValue::EntryPoint(entry_point), remainder) + }) + } + tag if tag == StoredValueTag::RawBytes as u8 => { + let (bytes, remainder) = Bytes::from_bytes(remainder)?; + Ok((StoredValue::RawBytes(bytes.into()), remainder)) + } + _ => Err(Error::Formatting), + } + } +} + +pub mod serde_helpers { + use core::fmt::Display; + + use crate::serde_helpers::contract::HumanReadableContract; + + use super::*; + + #[derive(Serialize)] + #[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + rename = "StoredValue", + description = "A value stored in Global State." + ) + )] + pub(crate) enum HumanReadableSerHelper<'a> { + CLValue(&'a CLValue), + Account(&'a Account), + ContractWasm(&'a ContractWasm), + Contract(HumanReadableContract), + ContractPackage(&'a ContractPackage), + Transfer(&'a TransferV1), + DeployInfo(&'a DeployInfo), + EraInfo(&'a EraInfo), + Bid(&'a Bid), + Withdraw(&'a Vec), + Unbonding(&'a Vec), + AddressableEntity(&'a AddressableEntity), + BidKind(&'a BidKind), + SmartContract(&'a Package), + ByteCode(&'a ByteCode), + MessageTopic(&'a MessageTopicSummary), + Message(&'a MessageChecksum), + NamedKey(&'a NamedKeyValue), + Prepayment(&'a PrepaymentKind), + EntryPoint(&'a EntryPointValue), + RawBytes(Bytes), + } + + /// A value stored in Global State. + #[derive(Deserialize)] + #[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + rename = "StoredValue", + description = "A value stored in Global State." + ) + )] + pub(crate) enum HumanReadableDeserHelper { + /// A CLValue. + CLValue(CLValue), + /// An account. + Account(Account), + /// Contract wasm. + ContractWasm(ContractWasm), + /// A contract. + Contract(HumanReadableContract), + /// A contract package. + ContractPackage(ContractPackage), + /// A version 1 transfer. + Transfer(TransferV1), + /// Info about a deploy. + DeployInfo(DeployInfo), + /// Info about an era. + EraInfo(EraInfo), + /// Variant that stores [`Bid`]. + Bid(Box), + /// Variant that stores withdraw information. + Withdraw(Vec), + /// Unbonding information. + Unbonding(Vec), + /// An `AddressableEntity`. + AddressableEntity(AddressableEntity), + /// Variant that stores [`BidKind`]. + BidKind(BidKind), + /// A smart contract `Package`. + SmartContract(Package), + /// A record of byte code. + ByteCode(ByteCode), + /// Variant that stores a message topic. + MessageTopic(MessageTopicSummary), + /// Variant that stores a message digest. + Message(MessageChecksum), + /// A NamedKey record. + NamedKey(NamedKeyValue), + /// A prepayment record. + EntryPoint(EntryPointValue), + /// An entrypoint record. + Prepayment(PrepaymentKind), + /// Raw bytes. Similar to a [`crate::StoredValue::CLValue`] but does not incur overhead of + /// a [`crate::CLValue`] and [`crate::CLType`]. + RawBytes(Bytes), + } + + impl<'a> From<&'a StoredValue> for HumanReadableSerHelper<'a> { + fn from(stored_value: &'a StoredValue) -> Self { + match stored_value { + StoredValue::CLValue(payload) => HumanReadableSerHelper::CLValue(payload), + StoredValue::Account(payload) => HumanReadableSerHelper::Account(payload), + StoredValue::ContractWasm(payload) => HumanReadableSerHelper::ContractWasm(payload), + StoredValue::Contract(payload) => HumanReadableSerHelper::Contract(payload.into()), + StoredValue::ContractPackage(payload) => { + HumanReadableSerHelper::ContractPackage(payload) + } + StoredValue::Transfer(payload) => HumanReadableSerHelper::Transfer(payload), + StoredValue::DeployInfo(payload) => HumanReadableSerHelper::DeployInfo(payload), + StoredValue::EraInfo(payload) => HumanReadableSerHelper::EraInfo(payload), + StoredValue::Bid(payload) => HumanReadableSerHelper::Bid(payload), + StoredValue::Withdraw(payload) => HumanReadableSerHelper::Withdraw(payload), + StoredValue::Unbonding(payload) => HumanReadableSerHelper::Unbonding(payload), + StoredValue::AddressableEntity(payload) => { + HumanReadableSerHelper::AddressableEntity(payload) + } + StoredValue::BidKind(payload) => HumanReadableSerHelper::BidKind(payload), + StoredValue::SmartContract(payload) => { + HumanReadableSerHelper::SmartContract(payload) + } + StoredValue::ByteCode(payload) => HumanReadableSerHelper::ByteCode(payload), + StoredValue::MessageTopic(message_topic_summary) => { + HumanReadableSerHelper::MessageTopic(message_topic_summary) + } + StoredValue::Message(message_digest) => { + HumanReadableSerHelper::Message(message_digest) + } + StoredValue::NamedKey(payload) => HumanReadableSerHelper::NamedKey(payload), + StoredValue::Prepayment(payload) => HumanReadableSerHelper::Prepayment(payload), + StoredValue::EntryPoint(payload) => HumanReadableSerHelper::EntryPoint(payload), + StoredValue::RawBytes(bytes) => { + HumanReadableSerHelper::RawBytes(bytes.as_slice().into()) + } + } + } + } + + /// Parsing error when deserializing StoredValue. + #[derive(Debug, Clone)] + pub enum StoredValueDeserializationError { + /// Contract not deserializable. + CouldNotDeserializeContract(String), + } + + impl Display for StoredValueDeserializationError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + StoredValueDeserializationError::CouldNotDeserializeContract(reason) => { + write!( + f, + "Could not deserialize StoredValue::Contract. Reason: {reason}" + ) + } + } + } + } + + impl TryFrom for StoredValue { + type Error = StoredValueDeserializationError; + fn try_from(helper: HumanReadableDeserHelper) -> Result { + Ok(match helper { + HumanReadableDeserHelper::CLValue(payload) => StoredValue::CLValue(payload), + HumanReadableDeserHelper::Account(payload) => StoredValue::Account(payload), + HumanReadableDeserHelper::ContractWasm(payload) => { + StoredValue::ContractWasm(payload) + } + HumanReadableDeserHelper::Contract(payload) => { + StoredValue::Contract(Contract::try_from(payload).map_err(|e| { + StoredValueDeserializationError::CouldNotDeserializeContract(e.to_string()) + })?) + } + HumanReadableDeserHelper::ContractPackage(payload) => { + StoredValue::ContractPackage(payload) + } + HumanReadableDeserHelper::Transfer(payload) => StoredValue::Transfer(payload), + HumanReadableDeserHelper::DeployInfo(payload) => StoredValue::DeployInfo(payload), + HumanReadableDeserHelper::EraInfo(payload) => StoredValue::EraInfo(payload), + HumanReadableDeserHelper::Bid(bid) => StoredValue::Bid(bid), + HumanReadableDeserHelper::Withdraw(payload) => StoredValue::Withdraw(payload), + HumanReadableDeserHelper::Unbonding(payload) => StoredValue::Unbonding(payload), + HumanReadableDeserHelper::AddressableEntity(payload) => { + StoredValue::AddressableEntity(payload) + } + HumanReadableDeserHelper::BidKind(payload) => StoredValue::BidKind(payload), + HumanReadableDeserHelper::ByteCode(payload) => StoredValue::ByteCode(payload), + HumanReadableDeserHelper::SmartContract(payload) => { + StoredValue::SmartContract(payload) + } + HumanReadableDeserHelper::MessageTopic(message_topic_summary) => { + StoredValue::MessageTopic(message_topic_summary) + } + HumanReadableDeserHelper::Message(message_digest) => { + StoredValue::Message(message_digest) + } + HumanReadableDeserHelper::NamedKey(payload) => StoredValue::NamedKey(payload), + HumanReadableDeserHelper::EntryPoint(payload) => StoredValue::EntryPoint(payload), + HumanReadableDeserHelper::RawBytes(bytes) => StoredValue::RawBytes(bytes.into()), + HumanReadableDeserHelper::Prepayment(prepayment_kind) => { + StoredValue::Prepayment(prepayment_kind) + } + }) + } + } +} + +impl Serialize for StoredValue { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + serde_helpers::HumanReadableSerHelper::from(self).serialize(serializer) + } else { + let bytes = self + .to_bytes() + .map_err(|error| ser::Error::custom(format!("{:?}", error)))?; + ByteBuf::from(bytes).serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for StoredValue { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let json_helper = serde_helpers::HumanReadableDeserHelper::deserialize(deserializer)?; + StoredValue::try_from(json_helper).map_err(de::Error::custom) + } else { + let bytes = ByteBuf::deserialize(deserializer)?.into_vec(); + bytesrepr::deserialize::(bytes) + .map_err(|error| de::Error::custom(format!("{:?}", error))) + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, gens, StoredValue}; + use proptest::proptest; + use serde_json::Value; + + const STORED_VALUE_CONTRACT_RAW: &str = r#"{ + "Contract": { + "contract_package_hash": "contract-package-e26c7f95890f99b4d476609649939910e636e175c428add9b403ebe597673005", + "contract_wasm_hash": "contract-wasm-8447a228c6055df42fcedb18804786abcab0e7aed00e94ad0fc0a34cd09509fb", + "named_keys": [ + { + "name": "count_v2.0", + "key": "uref-53834a8313fa5eda357a75ef8eb017e1ed30bc64e6dbaa81a41abd0ffd761586-007" + } + ], + "entry_points": [ + { + "name": "counter_get", + "args": [], + "ret": "I32", + "access": "Public", + "entry_point_type": "Caller" + }, + { + "name": "counter_inc", + "args": [], + "ret": "Unit", + "access": "Public", + "entry_point_type": "Called" + }, + { + "name": "counter_zero", + "args": [], + "ret": "Unit", + "access": "Public", + "entry_point_type": "Factory" + } + + ], + "protocol_version": "2.0.0" + } +} + "#; + + const JSON_CONTRACT_NON_UNIQUE_ENTRYPOINT_NAMES_RAW: &str = r#"{ + "Contract": { + "contract_package_hash": "contract-package-e26c7f95890f99b4d476609649939910e636e175c428add9b403ebe597673005", + "contract_wasm_hash": "contract-wasm-8447a228c6055df42fcedb18804786abcab0e7aed00e94ad0fc0a34cd09509fb", + "named_keys": [ + { + "name": "count_v2.0", + "key": "uref-53834a8313fa5eda357a75ef8eb017e1ed30bc64e6dbaa81a41abd0ffd761586-007" + } + ], + "entry_points": [ + { + "name": "counter_get", + "args": [], + "ret": "I32", + "access": "Public", + "entry_point_type": "Caller" + }, + { + "name": "counter_get", + "args": [], + "ret": "Unit", + "access": "Public", + "entry_point_type": "Called" + }, + { + "name": "counter_inc", + "args": [], + "ret": "Unit", + "access": "Public", + "entry_point_type": "Factory" + } + ], + "protocol_version": "2.0.0" + } + } + "#; + + const STORED_VALUE_CONTRACT_PACKAGE_RAW: &str = r#" + { + "ContractPackage": { + "access_key": "uref-024d69e50a458f337817d3d11ba95bdbdd6258ba8f2dc980644c9efdbd64945d-007", + "versions": [ + { + "protocol_version_major": 1, + "contract_version": 1, + "contract_hash": "contract-1b301b49505ec5eaec1787686c54818bd60836b9301cce3f5c0237560e5a4bfd" + } + ], + "disabled_versions": [], + "groups": [], + "lock_status": "Unlocked" + } + }"#; + + const INCORRECT_STORED_VALUE_CONTRACT_PACKAGE_RAW: &str = r#" + { + "ContractPackage": { + "access_key": "uref-024d69e50a458f337817d3d11ba95bdbdd6258ba8f2dc980644c9efdbd64945d-007", + "versions": [ + { + "protocol_version_major": 1, + "contract_version": 1, + "contract_hash": "contract-1b301b49505ec5eaec1787686c54818bd60836b9301cce3f5c0237560e5a4bfd" + }, + { + "protocol_version_major": 1, + "contract_version": 1, + "contract_hash": "contract-1b301b49505ec5eaec1787686c54818bd60836b9301cce3f5c0237560e5a4bfe" + } + ], + "disabled_versions": [], + "groups": [], + "lock_status": "Unlocked" + } + } + "#; + + #[test] + fn cannot_deserialize_contract_with_non_unique_entry_point_names() { + let res = + serde_json::from_str::(JSON_CONTRACT_NON_UNIQUE_ENTRYPOINT_NAMES_RAW); + assert!(res.is_err()); + assert_eq!( + res.err().unwrap().to_string(), + "Could not deserialize StoredValue::Contract. Reason: Non unique `entry_points.name`" + ) + } + + #[test] + fn contract_stored_value_serializes_entry_points_to_flat_array() { + let value_from_raw_json = serde_json::from_str::(STORED_VALUE_CONTRACT_RAW).unwrap(); + let deserialized = serde_json::from_str::(STORED_VALUE_CONTRACT_RAW).unwrap(); + let roundtrip_value = serde_json::to_value(&deserialized).unwrap(); + assert_eq!(value_from_raw_json, roundtrip_value); + } + + #[test] + fn contract_package_stored_value_serializes_versions_to_flat_array() { + let value_from_raw_json = + serde_json::from_str::(STORED_VALUE_CONTRACT_PACKAGE_RAW).unwrap(); + let deserialized = + serde_json::from_str::(STORED_VALUE_CONTRACT_PACKAGE_RAW).unwrap(); + let roundtrip_value = serde_json::to_value(&deserialized).unwrap(); + assert_eq!(value_from_raw_json, roundtrip_value); + } + + #[test] + fn contract_package_stored_value_should_fail_on_duplicate_keys() { + let deserialization_res = + serde_json::from_str::(INCORRECT_STORED_VALUE_CONTRACT_PACKAGE_RAW); + assert!(deserialization_res.is_err()); + assert!(deserialization_res + .unwrap_err() + .to_string() + .contains("duplicate contract version: ContractVersionKey(1, 1)")); + } + + #[test] + fn json_serialization_of_raw_bytes() { + let stored_value = StoredValue::RawBytes(vec![1, 2, 3, 4]); + assert_eq!( + serde_json::to_string(&stored_value).unwrap(), + r#"{"RawBytes":"01020304"}"# + ); + } + + proptest! { + + #[test] + fn json_serialization_roundtrip(v in gens::stored_value_arb()) { + let json_str = serde_json::to_string(&v).unwrap(); + let deserialized = serde_json::from_str::(&json_str).unwrap(); + assert_eq!(v, deserialized); + } + + #[test] + fn serialization_roundtrip(v in gens::stored_value_arb()) { + bytesrepr::test_serialization_roundtrip(&v); + } + } +} diff --git a/types/src/stored_value/global_state_identifier.rs b/types/src/stored_value/global_state_identifier.rs new file mode 100644 index 0000000000..a909f05f52 --- /dev/null +++ b/types/src/stored_value/global_state_identifier.rs @@ -0,0 +1,128 @@ +use alloc::vec::Vec; + +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + BlockHash, BlockIdentifier, Digest, +}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +const BLOCK_HASH_TAG: u8 = 0; +const BLOCK_HEIGHT_TAG: u8 = 1; +const STATE_ROOT_HASH_TAG: u8 = 2; + +/// Identifier for possible ways to query Global State +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum GlobalStateIdentifier { + /// Query using a block hash. + BlockHash(BlockHash), + /// Query using a block height. + BlockHeight(u64), + /// Query using the state root hash. + StateRootHash(Digest), +} + +impl GlobalStateIdentifier { + /// Random. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => Self::BlockHash(BlockHash::random(rng)), + 1 => Self::BlockHeight(rng.gen()), + 2 => Self::StateRootHash(Digest::random(rng)), + _ => panic!(), + } + } +} + +impl From for GlobalStateIdentifier { + fn from(block_identifier: BlockIdentifier) -> Self { + match block_identifier { + BlockIdentifier::Hash(block_hash) => GlobalStateIdentifier::BlockHash(block_hash), + BlockIdentifier::Height(block_height) => { + GlobalStateIdentifier::BlockHeight(block_height) + } + } + } +} + +impl FromBytes for GlobalStateIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + match bytes.split_first() { + Some((&BLOCK_HASH_TAG, rem)) => { + let (block_hash, rem) = FromBytes::from_bytes(rem)?; + Ok((GlobalStateIdentifier::BlockHash(block_hash), rem)) + } + Some((&BLOCK_HEIGHT_TAG, rem)) => { + let (block_height, rem) = FromBytes::from_bytes(rem)?; + Ok((GlobalStateIdentifier::BlockHeight(block_height), rem)) + } + Some((&STATE_ROOT_HASH_TAG, rem)) => { + let (state_root_hash, rem) = FromBytes::from_bytes(rem)?; + Ok((GlobalStateIdentifier::StateRootHash(state_root_hash), rem)) + } + Some(_) | None => Err(bytesrepr::Error::Formatting), + } + } +} + +impl ToBytes for GlobalStateIdentifier { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + GlobalStateIdentifier::BlockHash(block_hash) => { + writer.push(BLOCK_HASH_TAG); + block_hash.write_bytes(writer)?; + } + GlobalStateIdentifier::BlockHeight(block_height) => { + writer.push(BLOCK_HEIGHT_TAG); + block_height.write_bytes(writer)?; + } + GlobalStateIdentifier::StateRootHash(state_root_hash) => { + writer.push(STATE_ROOT_HASH_TAG); + state_root_hash.write_bytes(writer)?; + } + } + Ok(()) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + GlobalStateIdentifier::BlockHash(block_hash) => block_hash.serialized_length(), + GlobalStateIdentifier::BlockHeight(block_height) => { + block_height.serialized_length() + } + GlobalStateIdentifier::StateRootHash(state_root_hash) => { + state_root_hash.serialized_length() + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = GlobalStateIdentifier::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/src/stored_value/type_mismatch.rs b/types/src/stored_value/type_mismatch.rs new file mode 100644 index 0000000000..d866f976db --- /dev/null +++ b/types/src/stored_value/type_mismatch.rs @@ -0,0 +1,68 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +/// An error struct representing a type mismatch in [`StoredValue`](crate::StoredValue) operations. +#[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct TypeMismatch { + /// The name of the expected type. + expected: String, + /// The actual type found. + found: String, +} + +impl TypeMismatch { + /// Creates a new `TypeMismatch`. + pub fn new(expected: String, found: String) -> TypeMismatch { + TypeMismatch { expected, found } + } +} + +impl Display for TypeMismatch { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "Type mismatch. Expected {} but found {}.", + self.expected, self.found + ) + } +} + +impl ToBytes for TypeMismatch { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.expected.write_bytes(writer)?; + self.found.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.expected.serialized_length() + self.found.serialized_length() + } +} + +impl FromBytes for TypeMismatch { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (expected, remainder) = String::from_bytes(bytes)?; + let (found, remainder) = String::from_bytes(remainder)?; + Ok((TypeMismatch { expected, found }, remainder)) + } +} + +#[cfg(feature = "std")] +impl StdError for TypeMismatch {} diff --git a/types/src/system.rs b/types/src/system.rs new file mode 100644 index 0000000000..a6d2cdd551 --- /dev/null +++ b/types/src/system.rs @@ -0,0 +1,13 @@ +//! System modules, formerly known as "system contracts" +pub mod auction; +mod caller; +mod error; +pub mod handle_payment; +pub mod mint; +pub mod prepayment; +pub mod standard_payment; +mod system_contract_type; + +pub use caller::{CallStackElement, Caller, CallerInfo, CallerTag}; +pub use error::Error; +pub use system_contract_type::{SystemEntityType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT}; diff --git a/types/src/system/auction.rs b/types/src/system/auction.rs new file mode 100644 index 0000000000..0bb4c03c6d --- /dev/null +++ b/types/src/system/auction.rs @@ -0,0 +1,605 @@ +//! Contains implementation of the Auction contract functionality. +mod bid; +mod bid_addr; +mod bid_kind; +mod bridge; +mod constants; +mod delegator; +mod delegator_bid; +mod delegator_kind; +mod entry_points; +mod era_info; +mod error; +mod reservation; +mod seigniorage_recipient; +mod unbond; +mod unbonding_purse; +mod validator_bid; +mod validator_credit; +mod withdraw_purse; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use alloc::collections::btree_map::Entry; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use itertools::Itertools; + +use alloc::{boxed::Box, collections::BTreeMap, vec::Vec}; + +pub use bid::{Bid, VESTING_SCHEDULE_LENGTH_MILLIS}; +pub use bid_addr::{BidAddr, BidAddrTag}; +pub use bid_kind::{BidKind, BidKindTag}; +pub use bridge::Bridge; +pub use constants::*; +pub use delegator::Delegator; +pub use delegator_bid::DelegatorBid; +pub use delegator_kind::DelegatorKind; +pub use entry_points::auction_entry_points; +pub use era_info::{EraInfo, SeigniorageAllocation}; +pub use error::Error; +pub use reservation::Reservation; +pub use seigniorage_recipient::{ + SeigniorageRecipient, SeigniorageRecipientV1, SeigniorageRecipientV2, +}; +pub use unbond::{Unbond, UnbondEra, UnbondKind}; +pub use unbonding_purse::UnbondingPurse; +pub use validator_bid::ValidatorBid; +pub use validator_credit::ValidatorCredit; +pub use withdraw_purse::WithdrawPurse; + +#[cfg(any(feature = "testing", test))] +pub(crate) mod gens { + pub use super::era_info::gens::*; +} + +use crate::{account::AccountHash, EraId, PublicKey, U512}; + +/// Representation of delegation rate of tokens. Range from 0..=100. +pub type DelegationRate = u8; + +/// Validators mapped to their bids. +pub type ValidatorBids = BTreeMap>; + +/// Delegator bids mapped to their validator. +pub type DelegatorBids = BTreeMap>>; + +/// Reservations mapped to their validator. +pub type Reservations = BTreeMap>>; + +/// Validators mapped to their credits by era. +pub type ValidatorCredits = BTreeMap>>; + +/// Weights of validators. "Weight" in this context means a sum of their stakes. +pub type ValidatorWeights = BTreeMap; + +#[derive(Debug)] +pub struct WeightsBreakout { + locked: ValidatorWeights, + unlocked_meets_min: ValidatorWeights, + unlocked_below_min: ValidatorWeights, +} + +impl WeightsBreakout { + pub fn new() -> Self { + WeightsBreakout { + locked: BTreeMap::default(), + unlocked_meets_min: BTreeMap::default(), + unlocked_below_min: BTreeMap::default(), + } + } + + pub fn register( + &mut self, + public_key: PublicKey, + weight: U512, + locked: bool, + meets_minimum: bool, + ) { + if locked { + self.locked.insert(public_key, weight); + } else if meets_minimum { + self.unlocked_meets_min.insert(public_key, weight); + } else { + self.unlocked_below_min.insert(public_key, weight); + } + } + + /// The count of locked weights. + pub fn locked_count(&self) -> usize { + self.locked.len() + } + + /// The count of unlocked weights with at least minimum bid amount. + pub fn unlocked_meets_min_count(&self) -> usize { + self.unlocked_meets_min.len() + } + + /// The count of unlocked weights that do not meet minimum bid amount. + pub fn unlocked_below_min_count(&self) -> usize { + self.unlocked_below_min.len() + } + + /// Takes all locked and remaining slots number of unlocked meets min. + pub fn take(self, validator_slots: usize, threshold: usize) -> ValidatorWeights { + let locked_count = self.locked.len(); + if locked_count >= validator_slots { + // locked validators are taken even if exceeding validator_slots count + // they are literally locked in + return self.locked; + } + let remaining_auction_slots = validator_slots.saturating_sub(locked_count); + let mut unlocked_hi = self + .unlocked_meets_min + .iter() + .map(|(public_key, validator_bid)| (public_key.clone(), *validator_bid)) + .collect::>(); + // sort highest to lowest (rhs to lhs) + unlocked_hi.sort_by(|(_, lhs), (_, rhs)| rhs.cmp(lhs)); + let unlocked_hi_count = unlocked_hi.len(); + let combined_count = unlocked_hi_count.saturating_add(locked_count); + let unlocked_low_count = self.unlocked_below_min.len(); + if unlocked_low_count == 0 + || unlocked_hi_count >= remaining_auction_slots + || combined_count >= threshold + { + return self + .locked + .into_iter() + .chain(unlocked_hi.into_iter().take(remaining_auction_slots)) + .collect(); + } + + // we have fewer locked bids and bids >= min bid than the safety threshold, + // so we will attempt to backfill slots up to the safety threshold from otherwise + // valid bids that have less than the min bid + let backfill_count = threshold.saturating_sub(combined_count); + let mut unlocked_low = self + .unlocked_below_min + .iter() + .map(|(public_key, validator_bid)| (public_key.clone(), *validator_bid)) + .collect::>(); + // sort highest to lowest (rhs to lhs) + unlocked_low.sort_by(|(_, lhs), (_, rhs)| rhs.cmp(lhs)); + self.locked + .into_iter() + .chain(unlocked_hi.into_iter().take(remaining_auction_slots)) + .chain(unlocked_low.into_iter().take(backfill_count)) + .collect() + } +} + +impl Default for WeightsBreakout { + fn default() -> Self { + Self::new() + } +} + +/// List of era validators +pub type EraValidators = BTreeMap; + +/// Collection of seigniorage recipients. Legacy version. +pub type SeigniorageRecipientsV1 = BTreeMap; +/// Collection of seigniorage recipients. +pub type SeigniorageRecipientsV2 = BTreeMap; +/// Wrapper enum for all variants of `SeigniorageRecipients`. +#[allow(missing_docs)] +pub enum SeigniorageRecipients { + V1(SeigniorageRecipientsV1), + V2(SeigniorageRecipientsV2), +} + +/// Snapshot of `SeigniorageRecipients` for a given era. Legacy version. +pub type SeigniorageRecipientsSnapshotV1 = BTreeMap; +/// Snapshot of `SeigniorageRecipients` for a given era. +pub type SeigniorageRecipientsSnapshotV2 = BTreeMap; +/// Wrapper enum for all variants of `SeigniorageRecipientsSnapshot`. +#[derive(Debug)] +#[allow(missing_docs)] +pub enum SeigniorageRecipientsSnapshot { + V1(SeigniorageRecipientsSnapshotV1), + V2(SeigniorageRecipientsSnapshotV2), +} + +impl SeigniorageRecipientsSnapshot { + /// Returns rewards for given validator in a specified era + pub fn get_seignorage_recipient( + &self, + era_id: &EraId, + validator_public_key: &PublicKey, + ) -> Option { + match self { + Self::V1(snapshot) => snapshot.get(era_id).and_then(|era| { + era.get(validator_public_key) + .map(|recipient| SeigniorageRecipient::V1(recipient.clone())) + }), + Self::V2(snapshot) => snapshot.get(era_id).and_then(|era| { + era.get(validator_public_key) + .map(|recipient| SeigniorageRecipient::V2(recipient.clone())) + }), + } + } +} + +/// Validators and delegators mapped to their withdraw purses. +pub type WithdrawPurses = BTreeMap>; + +/// Aggregated representation of validator and associated delegator bids. +pub type Staking = BTreeMap)>; + +/// Utils for working with a vector of BidKind. +#[cfg(any(all(feature = "std", feature = "testing"), test))] +pub trait BidsExt { + /// Returns Bid matching public_key, if present. + fn unified_bid(&self, public_key: &PublicKey) -> Option; + + /// Returns ValidatorBid matching public_key, if present. + fn validator_bid(&self, public_key: &PublicKey) -> Option; + + /// Returns a bridge record matching old and new public key, if present. + fn bridge( + &self, + public_key: &PublicKey, + new_public_key: &PublicKey, + era_id: &EraId, + ) -> Option; + + /// Returns ValidatorCredit matching public_key, if present. + fn credit(&self, public_key: &PublicKey) -> Option; + + /// Returns total validator stake, if present. + fn validator_total_stake(&self, public_key: &PublicKey) -> Option; + + /// Returns Delegator entries matching validator public key, if present. + fn delegators_by_validator_public_key( + &self, + public_key: &PublicKey, + ) -> Option>; + + /// Returns Delegator entry, if present. + fn delegator_by_kind( + &self, + validator_public_key: &PublicKey, + delegator_kind: &DelegatorKind, + ) -> Option; + + /// Returns Reservation entries matching validator public key, if present. + fn reservations_by_validator_public_key( + &self, + public_key: &PublicKey, + ) -> Option>; + + /// Returns Reservation entry, if present. + fn reservation_by_kind( + &self, + validator_public_key: &PublicKey, + delegator_kind: &DelegatorKind, + ) -> Option; + + /// Returns Unbond entry, if present. + fn unbond_by_kind( + &self, + validator_public_key: &PublicKey, + unbond_kind: &UnbondKind, + ) -> Option; + + /// Returns true if containing any elements matching the provided validator public key. + fn contains_validator_public_key(&self, public_key: &PublicKey) -> bool; + + /// Removes any items with a public key matching the provided validator public key. + fn remove_by_validator_public_key(&mut self, public_key: &PublicKey); + + /// Creates a map of Validator public keys to associated Delegators. + fn delegator_map(&self) -> BTreeMap>; + + /// Inserts if bid_kind does not exist, otherwise replaces. + fn upsert(&mut self, bid_kind: BidKind); +} + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +impl BidsExt for Vec { + fn unified_bid(&self, public_key: &PublicKey) -> Option { + if let BidKind::Unified(bid) = self + .iter() + .find(|x| x.is_validator() && &x.validator_public_key() == public_key)? + { + Some(*bid.clone()) + } else { + None + } + } + + fn validator_bid(&self, public_key: &PublicKey) -> Option { + if let BidKind::Validator(validator_bid) = self + .iter() + .find(|x| x.is_validator() && &x.validator_public_key() == public_key)? + { + Some(*validator_bid.clone()) + } else { + None + } + } + + fn bridge( + &self, + public_key: &PublicKey, + new_public_key: &PublicKey, + era_id: &EraId, + ) -> Option { + self.iter().find_map(|x| match x { + BidKind::Bridge(bridge) + if bridge.old_validator_public_key() == public_key + && bridge.new_validator_public_key() == new_public_key + && bridge.era_id() == era_id => + { + Some(*bridge.clone()) + } + _ => None, + }) + } + + fn credit(&self, public_key: &PublicKey) -> Option { + if let BidKind::Credit(credit) = self + .iter() + .find(|x| x.is_credit() && &x.validator_public_key() == public_key)? + { + Some(*credit.clone()) + } else { + None + } + } + + fn validator_total_stake(&self, public_key: &PublicKey) -> Option { + if let Some(validator_bid) = self.validator_bid(public_key) { + let delegator_stake = { + match self.delegators_by_validator_public_key(validator_bid.validator_public_key()) + { + None => U512::zero(), + Some(delegators) => delegators.iter().map(|x| x.staked_amount()).sum(), + } + }; + return Some(validator_bid.staked_amount() + delegator_stake); + } + + if let BidKind::Unified(bid) = self + .iter() + .find(|x| x.is_validator() && &x.validator_public_key() == public_key)? + { + return Some(*bid.staked_amount()); + } + + None + } + + fn delegators_by_validator_public_key( + &self, + public_key: &PublicKey, + ) -> Option> { + let mut ret = vec![]; + for delegator in self + .iter() + .filter(|x| x.is_delegator() && &x.validator_public_key() == public_key) + { + if let BidKind::Delegator(delegator) = delegator { + ret.push(*delegator.clone()); + } + } + + if ret.is_empty() { + None + } else { + Some(ret) + } + } + + fn delegator_by_kind( + &self, + validator_public_key: &PublicKey, + delegator_kind: &DelegatorKind, + ) -> Option { + if let BidKind::Delegator(delegator) = self.iter().find(|x| { + x.is_delegator() + && &x.validator_public_key() == validator_public_key + && x.delegator_kind() == Some(delegator_kind.clone()) + })? { + Some(*delegator.clone()) + } else { + None + } + } + + fn reservations_by_validator_public_key( + &self, + validator_public_key: &PublicKey, + ) -> Option> { + let mut ret = vec![]; + for reservation in self + .iter() + .filter(|x| x.is_reservation() && &x.validator_public_key() == validator_public_key) + { + if let BidKind::Reservation(reservation) = reservation { + ret.push(*reservation.clone()); + } + } + + if ret.is_empty() { + None + } else { + Some(ret) + } + } + + fn reservation_by_kind( + &self, + validator_public_key: &PublicKey, + delegator_kind: &DelegatorKind, + ) -> Option { + if let BidKind::Reservation(reservation) = self.iter().find(|x| { + x.is_reservation() + && &x.validator_public_key() == validator_public_key + && x.delegator_kind() == Some(delegator_kind.clone()) + })? { + Some(*reservation.clone()) + } else { + None + } + } + + fn unbond_by_kind( + &self, + validator_public_key: &PublicKey, + unbond_kind: &UnbondKind, + ) -> Option { + if let BidKind::Unbond(unbond) = self.iter().find(|x| { + x.is_unbond() + && &x.validator_public_key() == validator_public_key + && x.unbond_kind() == Some(unbond_kind.clone()) + })? { + Some(*unbond.clone()) + } else { + None + } + } + + fn contains_validator_public_key(&self, public_key: &PublicKey) -> bool { + self.iter().any(|x| &x.validator_public_key() == public_key) + } + + fn remove_by_validator_public_key(&mut self, public_key: &PublicKey) { + self.retain(|x| &x.validator_public_key() != public_key) + } + + fn delegator_map(&self) -> BTreeMap> { + let mut ret = BTreeMap::new(); + let validators = self + .iter() + .filter(|x| x.is_validator()) + .cloned() + .collect_vec(); + for bid_kind in validators { + ret.insert(bid_kind.validator_public_key().clone(), vec![]); + } + let delegators = self + .iter() + .filter(|x| x.is_delegator()) + .cloned() + .collect_vec(); + for bid_kind in delegators { + if let BidKind::Delegator(delegator) = bid_kind { + match ret.entry(delegator.validator_public_key().clone()) { + Entry::Vacant(ve) => { + ve.insert(vec![delegator.delegator_kind().clone()]); + } + Entry::Occupied(mut oe) => { + let delegators = oe.get_mut(); + delegators.push(delegator.delegator_kind().clone()) + } + } + } + } + let unified = self + .iter() + .filter(|x| x.is_unified()) + .cloned() + .collect_vec(); + for bid_kind in unified { + if let BidKind::Unified(unified) = bid_kind { + let delegators = unified + .delegators() + .iter() + .map(|(_, y)| DelegatorKind::PublicKey(y.delegator_public_key().clone())) + .collect(); + ret.insert(unified.validator_public_key().clone(), delegators); + } + } + ret + } + + fn upsert(&mut self, bid_kind: BidKind) { + let maybe_index = match bid_kind { + BidKind::Unified(_) | BidKind::Validator(_) => self + .iter() + .find_position(|x| { + x.validator_public_key() == bid_kind.validator_public_key() + && x.tag() == bid_kind.tag() + }) + .map(|(idx, _)| idx), + BidKind::Delegator(_) => self + .iter() + .find_position(|x| { + x.is_delegator() + && x.validator_public_key() == bid_kind.validator_public_key() + && x.delegator_kind() == bid_kind.delegator_kind() + }) + .map(|(idx, _)| idx), + BidKind::Bridge(_) => self + .iter() + .find_position(|x| { + x.is_bridge() + && x.validator_public_key() == bid_kind.validator_public_key() + && x.new_validator_public_key() == bid_kind.new_validator_public_key() + && x.era_id() == bid_kind.era_id() + }) + .map(|(idx, _)| idx), + BidKind::Credit(_) => self + .iter() + .find_position(|x| { + x.validator_public_key() == bid_kind.validator_public_key() + && x.tag() == bid_kind.tag() + && x.era_id() == bid_kind.era_id() + }) + .map(|(idx, _)| idx), + BidKind::Reservation(_) => self + .iter() + .find_position(|x| { + x.is_reservation() + && x.validator_public_key() == bid_kind.validator_public_key() + && x.delegator_kind() == bid_kind.delegator_kind() + }) + .map(|(idx, _)| idx), + BidKind::Unbond(_) => self + .iter() + .find_position(|x| { + x.is_unbond() + && x.validator_public_key() == bid_kind.validator_public_key() + && x.unbond_kind() == bid_kind.unbond_kind() + }) + .map(|(idx, _)| idx), + }; + + match maybe_index { + Some(index) => { + self.insert(index, bid_kind); + } + None => { + self.push(bid_kind); + } + } + } +} + +#[cfg(test)] +mod prop_test_delegator { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::delegator_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} + +#[cfg(test)] +mod prop_test_reservation { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::reservation_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/bid.rs b/types/src/system/auction/bid.rs new file mode 100644 index 0000000000..17c7fd5581 --- /dev/null +++ b/types/src/system/auction/bid.rs @@ -0,0 +1,639 @@ +mod vesting; + +use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +#[cfg(feature = "json-schema")] +use serde_map_to_array::KeyValueJsonSchema; +use serde_map_to_array::{BTreeMapToArray, KeyValueLabels}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{ + DelegationRate, Delegator, DelegatorBid, DelegatorKind, Error, ValidatorBid, + }, + CLType, CLTyped, PublicKey, URef, U512, +}; + +pub use vesting::{VestingSchedule, VESTING_SCHEDULE_LENGTH_MILLIS}; + +/// An entry in the validator map. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Bid { + /// Validator public key. + validator_public_key: PublicKey, + /// The purse that was used for bonding. + bonding_purse: URef, + /// The amount of tokens staked by a validator (not including delegators). + staked_amount: U512, + /// Delegation rate. + delegation_rate: DelegationRate, + /// Vesting schedule for a genesis validator. `None` if non-genesis validator. + vesting_schedule: Option, + /// This validator's delegators, indexed by their public keys. + #[serde(with = "BTreeMapToArray::")] + delegators: BTreeMap, + /// `true` if validator has been "evicted". + inactive: bool, +} + +impl Bid { + #[allow(missing_docs)] + pub fn from_non_unified( + validator_bid: ValidatorBid, + delegators: BTreeMap, + ) -> Self { + let mut map = BTreeMap::new(); + for (kind, bid) in delegators { + if let DelegatorKind::PublicKey(pk) = kind { + let delegator = Delegator::unlocked( + pk.clone(), + bid.staked_amount(), + *bid.bonding_purse(), + bid.validator_public_key().clone(), + ); + map.insert(pk, delegator); + } + } + Self { + validator_public_key: validator_bid.validator_public_key().clone(), + bonding_purse: *validator_bid.bonding_purse(), + staked_amount: validator_bid.staked_amount(), + delegation_rate: *validator_bid.delegation_rate(), + vesting_schedule: validator_bid.vesting_schedule().cloned(), + delegators: map, + inactive: validator_bid.inactive(), + } + } + + /// Creates new instance of a bid with locked funds. + pub fn locked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + let delegators = BTreeMap::new(); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Creates new instance of a bid with unlocked funds. + pub fn unlocked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + ) -> Self { + let vesting_schedule = None; + let delegators = BTreeMap::new(); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self { + let vesting_schedule = None; + let delegators = BTreeMap::new(); + let inactive = true; + let staked_amount = 0.into(); + let delegation_rate = Default::default(); + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + } + } + + /// Gets the validator public key of the provided bid + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the bonding purse of the provided bid + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount(&self) -> &U512 { + &self.staked_amount + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Gets the delegation rate of the provided bid + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided bid. `None` if a + /// non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Returns a reference to the delegators of the provided bid + pub fn delegators(&self) -> &BTreeMap { + &self.delegators + } + + /// Returns a mutable reference to the delegators of the provided bid + pub fn delegators_mut(&mut self) -> &mut BTreeMap { + &mut self.delegators + } + + /// Returns `true` if validator is inactive + pub fn inactive(&self) -> bool { + self.inactive + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::UnbondTooLarge)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::ValidatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::ValidatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Updates the delegation rate of the provided bid + pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self { + self.delegation_rate = delegation_rate; + self + } + + /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than + /// or equal to the bid's initial release timestamp and the bid is owned by a genesis + /// validator. This method initializes with default 14 week vesting schedule. + /// + /// Returns `true` if the provided bid's vesting schedule was initialized. + pub fn process(&mut self, timestamp_millis: u64) -> bool { + self.process_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than + /// or equal to the bid's initial release timestamp and the bid is owned by a genesis + /// validator. + /// + /// Returns `true` if the provided bid's vesting schedule was initialized. + pub fn process_with_vesting_schedule( + &mut self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + // Put timestamp-sensitive processing logic in here + let staked_amount = self.staked_amount; + let vesting_schedule = match self.vesting_schedule_mut() { + Some(vesting_schedule) => vesting_schedule, + None => return false, + }; + if timestamp_millis < vesting_schedule.initial_release_timestamp_millis() { + return false; + } + + let mut initialized = false; + + if vesting_schedule.initialize_with_schedule(staked_amount, vesting_schedule_period_millis) + { + initialized = true; + } + + for delegator in self.delegators_mut().values_mut() { + let staked_amount = delegator.staked_amount(); + if let Some(vesting_schedule) = delegator.vesting_schedule_mut() { + if timestamp_millis >= vesting_schedule.initial_release_timestamp_millis() + && vesting_schedule + .initialize_with_schedule(staked_amount, vesting_schedule_period_millis) + { + initialized = true; + } + } + } + + initialized + } + + /// Sets given bid's `inactive` field to `false` + pub fn activate(&mut self) -> bool { + self.inactive = false; + false + } + + /// Sets given bid's `inactive` field to `true` + pub fn deactivate(&mut self) -> bool { + self.inactive = true; + true + } + + /// Returns the total staked amount of validator + all delegators + pub fn total_staked_amount(&self) -> Result { + self.delegators + .iter() + .try_fold(U512::zero(), |a, (_, b)| a.checked_add(b.staked_amount())) + .and_then(|delegators_sum| delegators_sum.checked_add(*self.staked_amount())) + .ok_or(Error::InvalidAmount) + } +} + +impl CLTyped for Bid { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Bid { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.bonding_purse.serialized_length() + + self.staked_amount.serialized_length() + + self.delegation_rate.serialized_length() + + self.vesting_schedule.serialized_length() + + self.delegators.serialized_length() + + self.inactive.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + self.delegators.write_bytes(writer)?; + self.inactive.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Bid { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; + let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + let (delegators, bytes) = FromBytes::from_bytes(bytes)?; + let (inactive, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Bid { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + delegators, + inactive, + }, + bytes, + )) + } +} + +impl Display for Bid { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "bid {{ bonding purse {}, staked {}, delegation rate {}, delegators {{", + self.bonding_purse, self.staked_amount, self.delegation_rate + )?; + + let count = self.delegators.len(); + for (index, delegator) in self.delegators.values().enumerate() { + write!( + formatter, + "{}{}", + delegator, + if index + 1 == count { "" } else { ", " } + )?; + } + + write!( + formatter, + "}}, is {}inactive }}", + if self.inactive { "" } else { "not " } + ) + } +} + +struct DelegatorLabels; + +impl KeyValueLabels for DelegatorLabels { + const KEY: &'static str = "delegator_public_key"; + const VALUE: &'static str = "delegator"; +} + +#[cfg(feature = "json-schema")] +impl KeyValueJsonSchema for DelegatorLabels { + const JSON_SCHEMA_KV_NAME: Option<&'static str> = Some("PublicKeyAndDelegator"); + const JSON_SCHEMA_KV_DESCRIPTION: Option<&'static str> = + Some("A delegator associated with the given validator."); + const JSON_SCHEMA_KEY_DESCRIPTION: Option<&'static str> = + Some("The public key of the delegator."); + const JSON_SCHEMA_VALUE_DESCRIPTION: Option<&'static str> = Some("The delegator details."); +} + +#[cfg(test)] +mod tests { + use alloc::collections::BTreeMap; + + use crate::{ + bytesrepr, + system::auction::{bid::VestingSchedule, Bid, DelegationRate, Delegator}, + AccessRights, PublicKey, SecretKey, URef, U512, + }; + + const WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000; + const TEST_VESTING_SCHEDULE_LENGTH_MILLIS: u64 = 7 * WEEK_MILLIS; + + #[test] + fn serialization_roundtrip() { + let founding_validator = Bid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::MAX, + vesting_schedule: Some(VestingSchedule::default()), + delegators: BTreeMap::default(), + inactive: true, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn should_immediately_initialize_unlock_amounts() { + const TIMESTAMP_MILLIS: u64 = 0; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let vesting_schedule_period_millis = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let mut bid = Bid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(bid.process_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis, + )); + assert!(!bid.is_locked_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis + )); + } + + #[test] + fn should_initialize_delegators_different_timestamps() { + const TIMESTAMP_MILLIS: u64 = WEEK_MILLIS; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let delegator_1_pk: PublicKey = (&SecretKey::ed25519_from_bytes([43; 32]).unwrap()).into(); + let delegator_2_pk: PublicKey = (&SecretKey::ed25519_from_bytes([44; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let delegator_1_release_timestamp = TIMESTAMP_MILLIS + 1; + let delegator_1_bonding_purse = URef::new([52; 32], AccessRights::ADD); + let delegator_1_staked_amount = U512::from(2000); + + let delegator_2_release_timestamp = TIMESTAMP_MILLIS + 2; + let delegator_2_bonding_purse = URef::new([62; 32], AccessRights::ADD); + let delegator_2_staked_amount = U512::from(3000); + + let delegator_1 = Delegator::locked( + delegator_1_pk.clone(), + delegator_1_staked_amount, + delegator_1_bonding_purse, + validator_pk.clone(), + delegator_1_release_timestamp, + ); + + let delegator_2 = Delegator::locked( + delegator_2_pk.clone(), + delegator_2_staked_amount, + delegator_2_bonding_purse, + validator_pk.clone(), + delegator_2_release_timestamp, + ); + + let mut bid = Bid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + ); + + assert!(!bid.process_with_vesting_schedule( + validator_release_timestamp - 1, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + { + let delegators = bid.delegators_mut(); + + delegators.insert(delegator_1_pk.clone(), delegator_1); + delegators.insert(delegator_2_pk.clone(), delegator_2); + } + + assert!(bid.process_with_vesting_schedule( + delegator_1_release_timestamp, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + let delegator_1_updated_1 = bid + .delegators() + .get(&delegator_1_pk.clone()) + .cloned() + .unwrap(); + assert!(delegator_1_updated_1 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + + let delegator_2_updated_1 = bid + .delegators() + .get(&delegator_2_pk.clone()) + .cloned() + .unwrap(); + assert!(delegator_2_updated_1 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_none()); + + assert!(bid.process_with_vesting_schedule( + delegator_2_release_timestamp, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + + let delegator_1_updated_2 = bid + .delegators() + .get(&delegator_1_pk.clone()) + .cloned() + .unwrap(); + assert!(delegator_1_updated_2 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + // Delegator 1 is already initialized and did not change after 2nd Bid::process + assert_eq!(delegator_1_updated_1, delegator_1_updated_2); + + let delegator_2_updated_2 = bid + .delegators() + .get(&delegator_2_pk.clone()) + .cloned() + .unwrap(); + assert!(delegator_2_updated_2 + .vesting_schedule() + .unwrap() + .locked_amounts() + .is_some()); + + // Delegator 2 is different compared to first Bid::process + assert_ne!(delegator_2_updated_1, delegator_2_updated_2); + + // Validator initialized, and all delegators initialized + assert!(!bid.process_with_vesting_schedule( + delegator_2_release_timestamp + 1, + TEST_VESTING_SCHEDULE_LENGTH_MILLIS + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_unified_bid(bid in gens::unified_bid_arb(0..3)) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/bid/mod.rs b/types/src/system/auction/bid/mod.rs deleted file mode 100644 index d815d229e0..0000000000 --- a/types/src/system/auction/bid/mod.rs +++ /dev/null @@ -1,427 +0,0 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - -mod vesting; - -use alloc::{collections::BTreeMap, vec::Vec}; - -#[cfg(feature = "std")] -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - -use crate::{ - bytesrepr::{self, FromBytes, ToBytes}, - system::auction::{DelegationRate, Delegator, Error}, - CLType, CLTyped, PublicKey, URef, U512, -}; - -pub use vesting::VestingSchedule; - -/// An entry in the validator map. -#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub struct Bid { - /// Validator public key - validator_public_key: PublicKey, - /// The purse that was used for bonding. - bonding_purse: URef, - /// The amount of tokens staked by a validator (not including delegators). - staked_amount: U512, - /// Delegation rate - delegation_rate: DelegationRate, - /// Vesting schedule for a genesis validator. `None` if non-genesis validator. - vesting_schedule: Option, - /// This validator's delegators, indexed by their public keys - delegators: BTreeMap, - /// `true` if validator has been "evicted" - inactive: bool, -} - -impl Bid { - /// Creates new instance of a bid with locked funds. - pub fn locked( - validator_public_key: PublicKey, - bonding_purse: URef, - staked_amount: U512, - delegation_rate: DelegationRate, - release_timestamp_millis: u64, - ) -> Self { - let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); - let delegators = BTreeMap::new(); - let inactive = false; - Self { - validator_public_key, - bonding_purse, - staked_amount, - delegation_rate, - vesting_schedule, - delegators, - inactive, - } - } - - /// Creates new instance of a bid with unlocked funds. - pub fn unlocked( - validator_public_key: PublicKey, - bonding_purse: URef, - staked_amount: U512, - delegation_rate: DelegationRate, - ) -> Self { - let vesting_schedule = None; - let delegators = BTreeMap::new(); - let inactive = false; - Self { - validator_public_key, - bonding_purse, - staked_amount, - delegation_rate, - vesting_schedule, - delegators, - inactive, - } - } - - /// Gets the validator public key of the provided bid - pub fn validator_public_key(&self) -> &PublicKey { - &self.validator_public_key - } - - /// Gets the bonding purse of the provided bid - pub fn bonding_purse(&self) -> &URef { - &self.bonding_purse - } - - /// Gets the staked amount of the provided bid - pub fn staked_amount(&self) -> &U512 { - &self.staked_amount - } - - /// Gets the staked amount of the provided bid - pub fn staked_amount_mut(&mut self) -> &mut U512 { - &mut self.staked_amount - } - - /// Gets the delegation rate of the provided bid - pub fn delegation_rate(&self) -> &DelegationRate { - &self.delegation_rate - } - - /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis - /// validator. - pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { - self.vesting_schedule.as_ref() - } - - /// Returns a mutable reference to the vesting schedule of the provided bid. `None` if a - /// non-genesis validator. - pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { - self.vesting_schedule.as_mut() - } - - /// Returns a reference to the delegators of the provided bid - pub fn delegators(&self) -> &BTreeMap { - &self.delegators - } - - /// Returns a mutable reference to the delegators of the provided bid - pub fn delegators_mut(&mut self) -> &mut BTreeMap { - &mut self.delegators - } - - /// Returns `true` if validator is inactive - pub fn inactive(&self) -> bool { - self.inactive - } - - /// Decreases the stake of the provided bid - pub fn decrease_stake( - &mut self, - amount: U512, - era_end_timestamp_millis: u64, - ) -> Result { - let updated_staked_amount = self - .staked_amount - .checked_sub(amount) - .ok_or(Error::UnbondTooLarge)?; - - let vesting_schedule = match self.vesting_schedule.as_ref() { - Some(vesting_sechdule) => vesting_sechdule, - None => { - self.staked_amount = updated_staked_amount; - return Ok(updated_staked_amount); - } - }; - - match vesting_schedule.locked_amount(era_end_timestamp_millis) { - Some(locked_amount) if updated_staked_amount < locked_amount => { - Err(Error::ValidatorFundsLocked) - } - None => { - // If `None`, then the locked amounts table has yet to be initialized (likely - // pre-90 day mark) - Err(Error::ValidatorFundsLocked) - } - Some(_) => { - self.staked_amount = updated_staked_amount; - Ok(updated_staked_amount) - } - } - } - - /// Increases the stake of the provided bid - pub fn increase_stake(&mut self, amount: U512) -> Result { - let updated_staked_amount = self - .staked_amount - .checked_add(amount) - .ok_or(Error::InvalidAmount)?; - - self.staked_amount = updated_staked_amount; - - Ok(updated_staked_amount) - } - - /// Updates the delegation rate of the provided bid - pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self { - self.delegation_rate = delegation_rate; - self - } - - /// Initializes the vesting schedule of provided bid if the provided timestamp is greater than - /// or equal to the bid's initial release timestamp and the bid is owned by a genesis - /// validator. - /// - /// Returns `true` if the provided bid's vesting schedule was initialized. - pub fn process(&mut self, timestamp_millis: u64) -> bool { - // Put timestamp-sensitive processing logic in here - let staked_amount = self.staked_amount; - let vesting_schedule = match self.vesting_schedule_mut() { - Some(vesting_schedule) => vesting_schedule, - None => return false, - }; - if timestamp_millis < vesting_schedule.initial_release_timestamp_millis() { - return false; - } - - let mut initialized = false; - - if vesting_schedule.initialize(staked_amount) { - initialized = true; - } - - for delegator in self.delegators_mut().values_mut() { - let staked_amount = *delegator.staked_amount(); - if let Some(vesting_schedule) = delegator.vesting_schedule_mut() { - if timestamp_millis >= vesting_schedule.initial_release_timestamp_millis() - && vesting_schedule.initialize(staked_amount) - { - initialized = true; - } - } - } - - initialized - } - - /// Sets given bid's `inactive` field to `false` - pub fn activate(&mut self) -> bool { - self.inactive = false; - false - } - - /// Sets given bid's `inactive` field to `true` - pub fn deactivate(&mut self) -> bool { - self.inactive = true; - true - } - - /// Returns the total staked amount of validator + all delegators - pub fn total_staked_amount(&self) -> Result { - self.delegators - .iter() - .fold(Some(U512::zero()), |maybe_a, (_, b)| { - maybe_a.and_then(|a| a.checked_add(*b.staked_amount())) - }) - .and_then(|delegators_sum| delegators_sum.checked_add(*self.staked_amount())) - .ok_or(Error::InvalidAmount) - } -} - -impl CLTyped for Bid { - fn cl_type() -> CLType { - CLType::Any - } -} - -impl ToBytes for Bid { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - result.extend(self.validator_public_key.to_bytes()?); - result.extend(self.bonding_purse.to_bytes()?); - result.extend(self.staked_amount.to_bytes()?); - result.extend(self.delegation_rate.to_bytes()?); - result.extend(self.vesting_schedule.to_bytes()?); - result.extend(self.delegators.to_bytes()?); - result.extend(self.inactive.to_bytes()?); - Ok(result) - } - - fn serialized_length(&self) -> usize { - self.validator_public_key.serialized_length() - + self.bonding_purse.serialized_length() - + self.staked_amount.serialized_length() - + self.delegation_rate.serialized_length() - + self.vesting_schedule.serialized_length() - + self.delegators.serialized_length() - + self.inactive.serialized_length() - } -} - -impl FromBytes for Bid { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; - let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; - let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?; - let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; - let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; - let (delegators, bytes) = FromBytes::from_bytes(bytes)?; - let (inactive, bytes) = FromBytes::from_bytes(bytes)?; - Ok(( - Bid { - validator_public_key, - bonding_purse, - staked_amount, - delegation_rate, - vesting_schedule, - delegators, - inactive, - }, - bytes, - )) - } -} - -#[cfg(test)] -mod tests { - use alloc::collections::BTreeMap; - - use crate::{ - bytesrepr, - system::auction::{bid::VestingSchedule, Bid, DelegationRate, Delegator}, - AccessRights, PublicKey, SecretKey, URef, U512, - }; - - #[test] - fn serialization_roundtrip() { - let founding_validator = Bid { - validator_public_key: PublicKey::from( - SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), - ), - bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), - staked_amount: U512::one(), - delegation_rate: DelegationRate::max_value(), - vesting_schedule: Some(VestingSchedule::default()), - delegators: BTreeMap::default(), - inactive: true, - }; - bytesrepr::test_serialization_roundtrip(&founding_validator); - } - - #[test] - fn should_initialize_delegators_different_timestamps() { - const WEEK_MILLIS: u64 = 7 * 24 * 60 * 60 * 1000; - - const TIMESTAMP_MILLIS: u64 = WEEK_MILLIS as u64; - - let validator_pk: PublicKey = SecretKey::ed25519_from_bytes([42; 32]).unwrap().into(); - - let delegator_1_pk: PublicKey = SecretKey::ed25519_from_bytes([43; 32]).unwrap().into(); - let delegator_2_pk: PublicKey = SecretKey::ed25519_from_bytes([44; 32]).unwrap().into(); - - let validator_release_timestamp = TIMESTAMP_MILLIS; - let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); - let validator_staked_amount = U512::from(1000); - let validator_delegation_rate = 0; - - let delegator_1_release_timestamp = TIMESTAMP_MILLIS + 1; - let delegator_1_bonding_purse = URef::new([52; 32], AccessRights::ADD); - let delegator_1_staked_amount = U512::from(2000); - - let delegator_2_release_timestamp = TIMESTAMP_MILLIS + 2; - let delegator_2_bonding_purse = URef::new([62; 32], AccessRights::ADD); - let delegator_2_staked_amount = U512::from(3000); - - let delegator_1 = Delegator::locked( - delegator_1_pk.clone(), - delegator_1_staked_amount, - delegator_1_bonding_purse, - validator_pk.clone(), - delegator_1_release_timestamp, - ); - - let delegator_2 = Delegator::locked( - delegator_2_pk.clone(), - delegator_2_staked_amount, - delegator_2_bonding_purse, - validator_pk.clone(), - delegator_2_release_timestamp, - ); - - let mut bid = Bid::locked( - validator_pk, - validator_bonding_purse, - validator_staked_amount, - validator_delegation_rate, - validator_release_timestamp, - ); - - assert!(!bid.process(validator_release_timestamp - 1)); - - { - let delegators = bid.delegators_mut(); - - delegators.insert(delegator_1_pk.clone(), delegator_1); - delegators.insert(delegator_2_pk.clone(), delegator_2); - } - - assert!(bid.process(delegator_1_release_timestamp)); - - let delegator_1_updated_1 = bid.delegators().get(&delegator_1_pk).cloned().unwrap(); - assert!(delegator_1_updated_1 - .vesting_schedule() - .unwrap() - .locked_amounts() - .is_some()); - - let delegator_2_updated_1 = bid.delegators().get(&delegator_2_pk).cloned().unwrap(); - assert!(delegator_2_updated_1 - .vesting_schedule() - .unwrap() - .locked_amounts() - .is_none()); - - assert!(bid.process(delegator_2_release_timestamp)); - - let delegator_1_updated_2 = bid.delegators().get(&delegator_1_pk).cloned().unwrap(); - assert!(delegator_1_updated_2 - .vesting_schedule() - .unwrap() - .locked_amounts() - .is_some()); - // Delegator 1 is already initialized and did not change after 2nd Bid::process - assert_eq!(delegator_1_updated_1, delegator_1_updated_2); - - let delegator_2_updated_2 = bid.delegators().get(&delegator_2_pk).cloned().unwrap(); - assert!(delegator_2_updated_2 - .vesting_schedule() - .unwrap() - .locked_amounts() - .is_some()); - - // Delegator 2 is different compared to first Bid::process - assert_ne!(delegator_2_updated_1, delegator_2_updated_2); - - // Validator initialized, and all delegators initialized - assert!(!bid.process(delegator_2_release_timestamp + 1)); - } -} diff --git a/types/src/system/auction/bid/vesting.rs b/types/src/system/auction/bid/vesting.rs index 11c7bb6575..649a854c5d 100644 --- a/types/src/system/auction/bid/vesting.rs +++ b/types/src/system/auction/bid/vesting.rs @@ -1,10 +1,8 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::vec::Vec; -use core::mem::MaybeUninit; -#[cfg(feature = "std")] +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -13,20 +11,31 @@ use crate::{ U512, }; -const VESTING_SCHEDULE_LENGTH_DAYS: usize = 91; +const DAY_MILLIS: usize = 24 * 60 * 60 * 1000; const DAYS_IN_WEEK: usize = 7; +const WEEK_MILLIS: usize = DAYS_IN_WEEK * DAY_MILLIS; -const WEEK_MILLIS: usize = DAYS_IN_WEEK * 24 * 60 * 60 * 1000; - +/// Length of total vesting schedule in days. +const VESTING_SCHEDULE_LENGTH_DAYS: usize = 91; +/// Length of total vesting schedule expressed in days. +pub const VESTING_SCHEDULE_LENGTH_MILLIS: u64 = + VESTING_SCHEDULE_LENGTH_DAYS as u64 * DAY_MILLIS as u64; /// 91 days / 7 days in a week = 13 weeks -const LOCKED_AMOUNTS_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1; +const LOCKED_AMOUNTS_MAX_LENGTH: usize = (VESTING_SCHEDULE_LENGTH_DAYS / DAYS_IN_WEEK) + 1; -#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub struct VestingSchedule { initial_release_timestamp_millis: u64, - locked_amounts: Option<[U512; LOCKED_AMOUNTS_LENGTH]>, + locked_amounts: Option<[U512; LOCKED_AMOUNTS_MAX_LENGTH]>, +} + +fn vesting_schedule_period_to_weeks(vesting_schedule_period_millis: u64) -> usize { + debug_assert_ne!(DAY_MILLIS, 0); + debug_assert_ne!(DAYS_IN_WEEK, 0); + vesting_schedule_period_millis as usize / DAY_MILLIS / DAYS_IN_WEEK } impl VestingSchedule { @@ -38,85 +47,146 @@ impl VestingSchedule { } } + /// Initializes vesting schedule with a configured amount of weekly releases. + /// /// Returns `false` if already initialized. - pub fn initialize(&mut self, staked_amount: U512) -> bool { + /// + /// # Panics + /// + /// Panics if `vesting_schedule_period_millis` represents more than 13 weeks. + pub fn initialize_with_schedule( + &mut self, + staked_amount: U512, + vesting_schedule_period_millis: u64, + ) -> bool { if self.locked_amounts.is_some() { return false; } - let release_period: U512 = U512::from(LOCKED_AMOUNTS_LENGTH); + let locked_amounts_length = + vesting_schedule_period_to_weeks(vesting_schedule_period_millis); + + assert!( + locked_amounts_length < LOCKED_AMOUNTS_MAX_LENGTH, + "vesting schedule period must be less than {} weeks", + LOCKED_AMOUNTS_MAX_LENGTH, + ); + + if locked_amounts_length == 0 || vesting_schedule_period_millis == 0 { + // Zero weeks means instant unlock of staked amount. + self.locked_amounts = Some(Default::default()); + return true; + } + + let release_period: U512 = U512::from(locked_amounts_length + 1); let weekly_release = staked_amount / release_period; - let mut locked_amounts = [U512::zero(); LOCKED_AMOUNTS_LENGTH]; + let mut locked_amounts = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH]; let mut remaining_locked = staked_amount; - // Ed and Henry prefer this idiom - #[allow(clippy::needless_range_loop)] - for i in 0..LOCKED_AMOUNTS_LENGTH - 1 { + for locked_amount in locked_amounts.iter_mut().take(locked_amounts_length) { remaining_locked -= weekly_release; - locked_amounts[i] = remaining_locked; + *locked_amount = remaining_locked; } - locked_amounts[LOCKED_AMOUNTS_LENGTH - 1] = U512::zero(); + + assert_eq!( + locked_amounts.get(locked_amounts_length), + Some(&U512::zero()), + "first element after the schedule should be zero" + ); self.locked_amounts = Some(locked_amounts); true } + /// Initializes weekly release for a fixed amount of 14 weeks period. + /// + /// Returns `false` if already initialized. + pub fn initialize(&mut self, staked_amount: U512) -> bool { + self.initialize_with_schedule(staked_amount, VESTING_SCHEDULE_LENGTH_MILLIS) + } + pub fn initial_release_timestamp_millis(&self) -> u64 { self.initial_release_timestamp_millis } - pub fn locked_amounts(&self) -> Option<[U512; LOCKED_AMOUNTS_LENGTH]> { - self.locked_amounts + pub fn locked_amounts(&self) -> Option<&[U512]> { + let locked_amounts = self.locked_amounts.as_ref()?; + Some(locked_amounts.as_slice()) } pub fn locked_amount(&self, timestamp_millis: u64) -> Option { - self.locked_amounts.map(|locked_amounts| { - assert!(u64::MAX as usize <= usize::MAX); + let locked_amounts = self.locked_amounts()?; + + let index = { let index_timestamp = - (timestamp_millis - self.initial_release_timestamp_millis) as usize; - let index = index_timestamp / WEEK_MILLIS; - if index < LOCKED_AMOUNTS_LENGTH { - locked_amounts[index] - } else { - U512::zero() + timestamp_millis.checked_sub(self.initial_release_timestamp_millis)?; + (index_timestamp as usize).checked_div(WEEK_MILLIS)? + }; + + let locked_amount = locked_amounts.get(index).cloned().unwrap_or_default(); + + Some(locked_amount) + } + + /// Checks if this vesting schedule is still under the vesting + pub(crate) fn is_vesting( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + let vested_period = match self.locked_amounts() { + Some(locked_amounts) => { + let vesting_weeks = locked_amounts + .iter() + .position(|amount| amount.is_zero()) + .expect("vesting schedule should always have zero at the end"); // SAFETY: at least one zero is guaranteed by `initialize_with_schedule` method + + let vesting_weeks_millis = + (vesting_weeks as u64).saturating_mul(WEEK_MILLIS as u64); + + self.initial_release_timestamp_millis() + .saturating_add(vesting_weeks_millis) } - }) + None => { + // Uninitialized yet but we know this will be the configured period of time. + self.initial_release_timestamp_millis() + .saturating_add(vesting_schedule_period_millis) + } + }; + timestamp_millis < vested_period } } -impl ToBytes for [U512; LOCKED_AMOUNTS_LENGTH] { +impl ToBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] { fn to_bytes(&self) -> Result, Error> { let mut result = bytesrepr::allocate_buffer(self)?; - for item in self.iter() { - result.append(&mut item.to_bytes()?); - } + self.write_bytes(&mut result)?; Ok(result) } fn serialized_length(&self) -> usize { self.iter().map(ToBytes::serialized_length).sum::() } + + #[inline] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + for amount in self { + amount.write_bytes(writer)?; + } + Ok(()) + } } -impl FromBytes for [U512; LOCKED_AMOUNTS_LENGTH] { +impl FromBytes for [U512; LOCKED_AMOUNTS_MAX_LENGTH] { fn from_bytes(mut bytes: &[u8]) -> Result<(Self, &[u8]), Error> { - let mut result: MaybeUninit<[U512; LOCKED_AMOUNTS_LENGTH]> = MaybeUninit::uninit(); - let result_ptr = result.as_mut_ptr() as *mut U512; - for i in 0..LOCKED_AMOUNTS_LENGTH { - let (t, remainder) = match FromBytes::from_bytes(bytes) { - Ok(success) => success, - Err(error) => { - for j in 0..i { - unsafe { result_ptr.add(j).drop_in_place() } - } - return Err(error); - } - }; - unsafe { result_ptr.add(i).write(t) }; - bytes = remainder; + let mut result = [U512::zero(); LOCKED_AMOUNTS_MAX_LENGTH]; + for value in &mut result { + let (amount, rem) = FromBytes::from_bytes(bytes)?; + *value = amount; + bytes = rem; } - Ok((unsafe { result.assume_init() }, bytes)) + Ok((result, bytes)) } } @@ -177,25 +247,59 @@ mod tests { bytesrepr, gens::u512_arb, system::auction::bid::{ - vesting::{gens::vesting_schedule_arb, LOCKED_AMOUNTS_LENGTH, WEEK_MILLIS}, + vesting::{gens::vesting_schedule_arb, vesting_schedule_period_to_weeks, WEEK_MILLIS}, VestingSchedule, }, U512, }; + use super::*; + /// Default lock-in period of 90 days - const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * 24 * 60 * 60 * 1000; + const DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS: u64 = 90 * DAY_MILLIS as u64; + const RELEASE_TIMESTAMP: u64 = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + const STAKE: u64 = 140; + + const DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS: u64 = 91 * DAY_MILLIS as u64; + const LOCKED_AMOUNTS_LENGTH: usize = + (DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS as usize / WEEK_MILLIS) + 1; + + #[test] + #[should_panic = "vesting schedule period must be less than"] + fn test_vesting_schedule_exceeding_the_maximum_should_not_panic() { + let future_date = 98 * DAY_MILLIS as u64; + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize_with_schedule(U512::from(STAKE), future_date); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_amount_check_should_not_panic() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize(U512::from(STAKE)); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } + + #[test] + fn test_locked_with_zero_length_schedule_should_not_panic() { + let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); + vesting_schedule.initialize_with_schedule(U512::from(STAKE), 0); + + assert_eq!(vesting_schedule.locked_amount(0), None); + assert_eq!(vesting_schedule.locked_amount(RELEASE_TIMESTAMP - 1), None); + } #[test] fn test_locked_amount() { - const STAKE: u64 = 140; - const RELEASE_TIMESTAMP: u64 = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; let mut vesting_schedule = VestingSchedule::new(RELEASE_TIMESTAMP); vesting_schedule.initialize(U512::from(STAKE)); - let mut timestamp; + let mut timestamp = RELEASE_TIMESTAMP; - timestamp = RELEASE_TIMESTAMP; assert_eq!( vesting_schedule.locked_amount(timestamp), Some(U512::from(130)) @@ -302,11 +406,21 @@ mod tests { vesting_schedule.locked_amount(timestamp), Some(U512::from(0)) ); + + timestamp = RELEASE_TIMESTAMP + (WEEK_MILLIS as u64 * 14) + 1; + assert_eq!( + vesting_schedule.locked_amount(timestamp), + Some(U512::from(0)) + ); } - fn vested_amounts_match_initial_stake(initial_stake: U512, release_timestamp: u64) -> bool { + fn vested_amounts_match_initial_stake( + initial_stake: U512, + release_timestamp: u64, + vesting_schedule_length: u64, + ) -> bool { let mut vesting_schedule = VestingSchedule::new(release_timestamp); - vesting_schedule.initialize(initial_stake); + vesting_schedule.initialize_with_schedule(initial_stake, vesting_schedule_length); let mut total_vested_amounts = U512::zero(); @@ -326,16 +440,74 @@ mod tests { let stake = U512::from(1000); assert!(vested_amounts_match_initial_stake( stake, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, )) } + #[test] + fn is_vesting_with_default_schedule() { + let initial_stake = U512::from(1000u64); + let release_timestamp = DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS; + let mut vesting_schedule = VestingSchedule::new(release_timestamp); + + let is_vesting_before: Vec = (0..LOCKED_AMOUNTS_LENGTH + 1) + .map(|i| { + vesting_schedule.is_vesting( + release_timestamp + (WEEK_MILLIS * i) as u64, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + ) + }) + .collect(); + + assert_eq!( + is_vesting_before, + vec![ + true, true, true, true, true, true, true, true, true, true, true, true, true, + false, // week after is always set to zero + false + ] + ); + vesting_schedule.initialize(initial_stake); + + let is_vesting_after: Vec = (0..LOCKED_AMOUNTS_LENGTH + 1) + .map(|i| { + vesting_schedule.is_vesting( + release_timestamp + (WEEK_MILLIS * i) as u64, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, + ) + }) + .collect(); + + assert_eq!( + is_vesting_after, + vec![ + true, true, true, true, true, true, true, true, true, true, true, true, true, + false, // week after is always set to zero + false, + ] + ); + } + + #[test] + fn should_calculate_vesting_schedule_period_to_weeks() { + let thirteen_weeks_millis = 13 * 7 * DAY_MILLIS as u64; + assert_eq!(vesting_schedule_period_to_weeks(thirteen_weeks_millis), 13,); + + assert_eq!(vesting_schedule_period_to_weeks(0), 0); + assert_eq!( + vesting_schedule_period_to_weeks(u64::MAX), + 30_500_568_904usize + ); + } + proptest! { #[test] fn prop_total_vested_amounts_conserve_stake(stake in u512_arb()) { prop_assert!(vested_amounts_match_initial_stake( stake, - DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS + DEFAULT_LOCKED_FUNDS_PERIOD_MILLIS, + DEFAULT_VESTING_SCHEDULE_PERIOD_MILLIS, )) } diff --git a/types/src/system/auction/bid_addr.rs b/types/src/system/auction/bid_addr.rs new file mode 100644 index 0000000000..2bfe2a3ef6 --- /dev/null +++ b/types/src/system/auction/bid_addr.rs @@ -0,0 +1,810 @@ +use crate::{ + account::{AccountHash, ACCOUNT_HASH_LENGTH}, + bytesrepr, + bytesrepr::{FromBytes, ToBytes}, + system::auction::{error::Error, DelegatorKind}, + EraId, Key, KeyTag, PublicKey, URefAddr, +}; +use alloc::vec::Vec; +use core::fmt::{Debug, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const UNIFIED_TAG: u8 = 0; +const VALIDATOR_TAG: u8 = 1; +const DELEGATED_ACCOUNT_TAG: u8 = 2; +const DELEGATED_PURSE_TAG: u8 = 3; +const CREDIT_TAG: u8 = 4; +const RESERVATION_ACCOUNT_TAG: u8 = 5; +const RESERVATION_PURSE_TAG: u8 = 6; +const UNBOND_ACCOUNT_TAG: u8 = 7; +const UNBOND_PURSE_TAG: u8 = 8; +const VALIDATOR_REV_PURSE_TAG: u8 = 9; + +/// Serialization tag for BidAddr variants. +#[derive( + Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, +)] +#[repr(u8)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BidAddrTag { + /// BidAddr for legacy unified bid. + Unified = UNIFIED_TAG, + /// BidAddr for validator bid. + #[default] + Validator = VALIDATOR_TAG, + /// BidAddr for delegated account bid. + DelegatedAccount = DELEGATED_ACCOUNT_TAG, + /// BidAddr for delegated purse bid. + DelegatedPurse = DELEGATED_PURSE_TAG, + + /// BidAddr for auction credit. + Credit = CREDIT_TAG, + + /// BidAddr for reserved delegation account bid. + ReservedDelegationAccount = RESERVATION_ACCOUNT_TAG, + /// BidAddr for reserved delegation purse bid. + ReservedDelegationPurse = RESERVATION_PURSE_TAG, + /// BidAddr for unbonding accounts. + UnbondAccount = UNBOND_ACCOUNT_TAG, + /// BidAddr for unbonding purses. + UnbondPurse = UNBOND_PURSE_TAG, + /// BidAddr for reverse validator look up. + ValidatorRev = VALIDATOR_REV_PURSE_TAG, +} + +impl Display for BidAddrTag { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = match self { + BidAddrTag::Unified => UNIFIED_TAG, + BidAddrTag::Validator => VALIDATOR_TAG, + BidAddrTag::DelegatedAccount => DELEGATED_ACCOUNT_TAG, + BidAddrTag::DelegatedPurse => DELEGATED_PURSE_TAG, + + BidAddrTag::Credit => CREDIT_TAG, + BidAddrTag::ReservedDelegationAccount => RESERVATION_ACCOUNT_TAG, + BidAddrTag::ReservedDelegationPurse => RESERVATION_PURSE_TAG, + BidAddrTag::UnbondAccount => UNBOND_ACCOUNT_TAG, + BidAddrTag::UnbondPurse => UNBOND_PURSE_TAG, + BidAddrTag::ValidatorRev => VALIDATOR_REV_PURSE_TAG, + }; + write!(f, "{}", base16::encode_lower(&[tag])) + } +} + +impl BidAddrTag { + /// The length in bytes of a [`BidAddrTag`]. + pub const BID_ADDR_TAG_LENGTH: usize = 1; + + /// Attempts to map `BidAddrTag` from a u8. + pub fn try_from_u8(value: u8) -> Option { + // TryFrom requires std, so doing this instead. + if value == UNIFIED_TAG { + return Some(BidAddrTag::Unified); + } + if value == VALIDATOR_TAG { + return Some(BidAddrTag::Validator); + } + if value == DELEGATED_ACCOUNT_TAG { + return Some(BidAddrTag::DelegatedAccount); + } + if value == DELEGATED_PURSE_TAG { + return Some(BidAddrTag::DelegatedPurse); + } + + if value == CREDIT_TAG { + return Some(BidAddrTag::Credit); + } + if value == RESERVATION_ACCOUNT_TAG { + return Some(BidAddrTag::ReservedDelegationAccount); + } + if value == RESERVATION_PURSE_TAG { + return Some(BidAddrTag::ReservedDelegationPurse); + } + if value == UNBOND_ACCOUNT_TAG { + return Some(BidAddrTag::UnbondAccount); + } + if value == UNBOND_PURSE_TAG { + return Some(BidAddrTag::UnbondPurse); + } + if value == VALIDATOR_REV_PURSE_TAG { + return Some(BidAddrTag::ValidatorRev); + } + None + } +} + +/// Bid Address +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BidAddr { + /// Unified BidAddr. + Unified(AccountHash), + /// Validator BidAddr. + Validator(AccountHash), + /// Delegated account BidAddr. + DelegatedAccount { + /// The validator addr. + validator: AccountHash, + /// The delegator addr. + delegator: AccountHash, + }, + /// Delegated purse BidAddr. + DelegatedPurse { + /// The validator addr. + validator: AccountHash, + /// The delegated purse addr. + delegator: URefAddr, + }, + /// Validator credit BidAddr. + Credit { + /// The validator addr. + validator: AccountHash, + /// The era id. + era_id: EraId, + }, + /// Reserved delegation account BidAddr + ReservedDelegationAccount { + /// The validator addr. + validator: AccountHash, + /// The delegator addr. + delegator: AccountHash, + }, + /// Reserved delegation purse BidAddr + ReservedDelegationPurse { + /// The validator addr. + validator: AccountHash, + /// The delegated purse addr. + delegator: URefAddr, + }, + UnbondAccount { + /// The validator. + validator: AccountHash, + /// The unbonder. + unbonder: AccountHash, + }, + UnbondPurse { + /// The validator. + validator: AccountHash, + /// The unbonder. + unbonder: URefAddr, + }, + /// Validator BidAddr for reverse look up. + /// For instance, in the case of a changed public key. + ValidatorRev(AccountHash), +} + +impl BidAddr { + /// The length in bytes of a [`BidAddr`] for a validator bid. + pub const VALIDATOR_BID_ADDR_LENGTH: usize = + ACCOUNT_HASH_LENGTH + BidAddrTag::BID_ADDR_TAG_LENGTH; + + /// The length in bytes of a [`BidAddr`] for a delegator bid. + pub const DELEGATOR_BID_ADDR_LENGTH: usize = + (ACCOUNT_HASH_LENGTH * 2) + BidAddrTag::BID_ADDR_TAG_LENGTH; + + /// Constructs a new [`BidAddr`] instance from a validator's [`AccountHash`]. + pub const fn new_validator_addr(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self { + BidAddr::Validator(AccountHash::new(validator)) + } + + /// Constructs a new [`BidAddr`] instance from a validator's [`AccountHash`]. + pub const fn new_validator_rev_addr(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self { + BidAddr::ValidatorRev(AccountHash::new(validator)) + } + + /// Constructs a new [`BidAddr`] instance from a validator's [`PublicKey`]. + pub fn new_validator_addr_from_public_key(validator_public_key: PublicKey) -> Self { + BidAddr::Validator(validator_public_key.to_account_hash()) + } + + /// Constructs a new [`BidAddr`] instance from a validator's [`PublicKey`]. + pub fn new_validator_rev_addr_from_public_key(validator_public_key: PublicKey) -> Self { + BidAddr::ValidatorRev(validator_public_key.to_account_hash()) + } + + /// Constructs a new [`BidAddr::DelegatedAccount`] instance from the [`AccountHash`] pair of a + /// validator and a delegator. + pub const fn new_delegator_account_addr( + pair: ([u8; ACCOUNT_HASH_LENGTH], [u8; ACCOUNT_HASH_LENGTH]), + ) -> Self { + BidAddr::DelegatedAccount { + validator: AccountHash::new(pair.0), + delegator: AccountHash::new(pair.1), + } + } + + /// Constructs a new [`BidAddr::ReservedDelegationAccount`] instance from the [`AccountHash`] + /// pair of a validator and a delegator. + pub const fn new_reservation_account_addr( + pair: ([u8; ACCOUNT_HASH_LENGTH], [u8; ACCOUNT_HASH_LENGTH]), + ) -> Self { + BidAddr::ReservedDelegationAccount { + validator: AccountHash::new(pair.0), + delegator: AccountHash::new(pair.1), + } + } + + #[allow(missing_docs)] + pub const fn legacy(validator: [u8; ACCOUNT_HASH_LENGTH]) -> Self { + BidAddr::Unified(AccountHash::new(validator)) + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_delegator_kind_relaxed( + validator: AccountHash, + delegator_kind: &DelegatorKind, + ) -> Self { + match delegator_kind { + DelegatorKind::PublicKey(pk) => BidAddr::DelegatedAccount { + validator, + delegator: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::DelegatedPurse { + validator, + delegator: *addr, + }, + } + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_delegator_kind(validator: &PublicKey, delegator_kind: &DelegatorKind) -> Self { + Self::new_delegator_kind_relaxed(validator.to_account_hash(), delegator_kind) + } + + /// Create a new instance of a [`BidAddr`] for delegator unbonds. + pub fn new_delegator_unbond_relaxed( + validator: AccountHash, + delegator_kind: &DelegatorKind, + ) -> Self { + match &delegator_kind { + DelegatorKind::PublicKey(pk) => BidAddr::UnbondAccount { + validator, + unbonder: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::UnbondPurse { + validator, + unbonder: *addr, + }, + } + } + + /// Create a new instance of a [`BidAddr`] for delegator unbonds. + pub fn new_delegator_unbond(validator: &PublicKey, delegator_kind: &DelegatorKind) -> Self { + Self::new_delegator_unbond_relaxed(validator.to_account_hash(), delegator_kind) + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_from_public_keys( + validator: &PublicKey, + maybe_delegator: Option<&PublicKey>, + ) -> Self { + if let Some(delegator) = maybe_delegator { + BidAddr::DelegatedAccount { + validator: AccountHash::from(validator), + delegator: AccountHash::from(delegator), + } + } else { + BidAddr::Validator(AccountHash::from(validator)) + } + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_purse_delegation(validator: &PublicKey, delegator: URefAddr) -> Self { + BidAddr::DelegatedPurse { + validator: validator.to_account_hash(), + delegator, + } + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_credit(validator: &PublicKey, era_id: EraId) -> Self { + BidAddr::Credit { + validator: AccountHash::from(validator), + era_id, + } + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_reservation_account(validator: &PublicKey, delegator: &PublicKey) -> Self { + BidAddr::ReservedDelegationAccount { + validator: validator.into(), + delegator: delegator.into(), + } + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_reservation_purse(validator: &PublicKey, delegator: URefAddr) -> Self { + BidAddr::ReservedDelegationPurse { + validator: validator.to_account_hash(), + delegator, + } + } + + /// Create a new instance of a [`BidAddr`]. + pub fn new_unbond_account(validator: PublicKey, unbonder: PublicKey) -> Self { + BidAddr::UnbondAccount { + validator: validator.to_account_hash(), + unbonder: unbonder.to_account_hash(), + } + } + + /// Returns the common prefix of all delegated accounts to the cited validator. + pub fn delegated_account_prefix(&self) -> Result, Error> { + let validator = self.validator_account_hash(); + let mut ret = Vec::with_capacity(validator.serialized_length() + 2); + ret.push(KeyTag::BidAddr as u8); + ret.push(BidAddrTag::DelegatedAccount as u8); + validator.write_bytes(&mut ret)?; + Ok(ret) + } + + /// Returns the common prefix of all delegated purses to the cited validator. + pub fn delegated_purse_prefix(&self) -> Result, Error> { + let validator = self.validator_account_hash(); + let mut ret = Vec::with_capacity(validator.serialized_length() + 2); + ret.push(KeyTag::BidAddr as u8); + ret.push(BidAddrTag::DelegatedPurse as u8); + validator.write_bytes(&mut ret)?; + Ok(ret) + } + + /// Returns the common prefix of all reservations for accounts to the cited validator. + pub fn reserved_account_prefix(&self) -> Result, Error> { + let validator = self.validator_account_hash(); + let mut ret = Vec::with_capacity(validator.serialized_length() + 2); + ret.push(KeyTag::BidAddr as u8); + ret.push(BidAddrTag::ReservedDelegationAccount as u8); + validator.write_bytes(&mut ret)?; + Ok(ret) + } + + /// Returns the common prefix of all reservations for purses to the cited validator. + pub fn reserved_purse_prefix(&self) -> Result, Error> { + let validator = self.validator_account_hash(); + let mut ret = Vec::with_capacity(validator.serialized_length() + 2); + ret.push(KeyTag::BidAddr as u8); + ret.push(BidAddrTag::ReservedDelegationPurse as u8); + validator.write_bytes(&mut ret)?; + Ok(ret) + } + + /// Validator account hash. + pub fn validator_account_hash(&self) -> AccountHash { + match self { + BidAddr::Unified(account_hash) + | BidAddr::Validator(account_hash) + | BidAddr::ValidatorRev(account_hash) => *account_hash, + BidAddr::DelegatedAccount { validator, .. } + | BidAddr::DelegatedPurse { validator, .. } + | BidAddr::Credit { validator, .. } + | BidAddr::ReservedDelegationAccount { validator, .. } + | BidAddr::ReservedDelegationPurse { validator, .. } + | BidAddr::UnbondAccount { validator, .. } + | BidAddr::UnbondPurse { validator, .. } => *validator, + } + } + + /// Delegator account hash or none. + pub fn maybe_delegator_account_hash(&self) -> Option { + match self { + BidAddr::Unified(_) + | BidAddr::Validator(_) + | BidAddr::ValidatorRev(_) + | BidAddr::Credit { .. } + | BidAddr::DelegatedPurse { .. } + | BidAddr::ReservedDelegationPurse { .. } + | BidAddr::UnbondPurse { .. } => None, + BidAddr::DelegatedAccount { delegator, .. } + | BidAddr::ReservedDelegationAccount { delegator, .. } => Some(*delegator), + BidAddr::UnbondAccount { unbonder, .. } => Some(*unbonder), + } + } + + /// Delegator purse addr or none. + pub fn maybe_delegator_purse(&self) -> Option { + match self { + BidAddr::Unified(_) + | BidAddr::Validator(_) + | BidAddr::ValidatorRev(_) + | BidAddr::Credit { .. } + | BidAddr::DelegatedAccount { .. } + | BidAddr::ReservedDelegationAccount { .. } + | BidAddr::UnbondAccount { .. } => None, + BidAddr::DelegatedPurse { delegator, .. } + | BidAddr::ReservedDelegationPurse { delegator, .. } => Some(*delegator), + BidAddr::UnbondPurse { unbonder, .. } => Some(*unbonder), + } + } + + /// Era id or none. + pub fn maybe_era_id(&self) -> Option { + match self { + BidAddr::Unified(_) + | BidAddr::Validator(_) + | BidAddr::ValidatorRev(_) + | BidAddr::DelegatedAccount { .. } + | BidAddr::DelegatedPurse { .. } + | BidAddr::ReservedDelegationAccount { .. } + | BidAddr::ReservedDelegationPurse { .. } + | BidAddr::UnbondPurse { .. } + | BidAddr::UnbondAccount { .. } => None, + BidAddr::Credit { era_id, .. } => Some(*era_id), + } + } + + /// If true, this instance is the key for a delegator bid record. + /// Else, it is the key for a validator bid record. + pub fn is_delegator_bid_addr(&self) -> bool { + match self { + BidAddr::Unified(_) + | BidAddr::Validator(_) + | BidAddr::ValidatorRev(_) + | BidAddr::Credit { .. } + | BidAddr::ReservedDelegationAccount { .. } + | BidAddr::ReservedDelegationPurse { .. } + | BidAddr::UnbondPurse { .. } + | BidAddr::UnbondAccount { .. } => false, + BidAddr::DelegatedAccount { .. } | BidAddr::DelegatedPurse { .. } => true, + } + } + + /// How long will be the serialized value for this instance. + pub fn serialized_length(&self) -> usize { + match self { + BidAddr::Unified(account_hash) + | BidAddr::Validator(account_hash) + | BidAddr::ValidatorRev(account_hash) => ToBytes::serialized_length(account_hash) + 1, + BidAddr::DelegatedAccount { + validator, + delegator, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1, + BidAddr::DelegatedPurse { + validator, + delegator, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1, + BidAddr::Credit { validator, era_id } => { + ToBytes::serialized_length(validator) + ToBytes::serialized_length(era_id) + 1 + } + BidAddr::ReservedDelegationAccount { + validator, + delegator, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1, + BidAddr::ReservedDelegationPurse { + validator, + delegator, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(delegator) + 1, + BidAddr::UnbondAccount { + validator, + unbonder, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(unbonder) + 1, + BidAddr::UnbondPurse { + validator, + unbonder, + } => ToBytes::serialized_length(validator) + ToBytes::serialized_length(unbonder) + 1, + } + } + + /// Returns the BiddAddrTag of this instance. + pub fn tag(&self) -> BidAddrTag { + match self { + BidAddr::Unified(_) => BidAddrTag::Unified, + BidAddr::Validator(_) => BidAddrTag::Validator, + BidAddr::ValidatorRev(_) => BidAddrTag::ValidatorRev, + BidAddr::DelegatedAccount { .. } => BidAddrTag::DelegatedAccount, + BidAddr::DelegatedPurse { .. } => BidAddrTag::DelegatedPurse, + + BidAddr::Credit { .. } => BidAddrTag::Credit, + BidAddr::ReservedDelegationAccount { .. } => BidAddrTag::ReservedDelegationAccount, + BidAddr::ReservedDelegationPurse { .. } => BidAddrTag::ReservedDelegationPurse, + BidAddr::UnbondAccount { .. } => BidAddrTag::UnbondAccount, + BidAddr::UnbondPurse { .. } => BidAddrTag::UnbondPurse, + } + } +} + +impl ToBytes for BidAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.push(self.tag() as u8); + buffer.append(&mut self.validator_account_hash().to_bytes()?); + if let Some(delegator) = self.maybe_delegator_purse() { + buffer.append(&mut delegator.to_bytes()?); + } + if let Some(delegator) = self.maybe_delegator_account_hash() { + buffer.append(&mut delegator.to_bytes()?); + } + if let Some(era_id) = self.maybe_era_id() { + buffer.append(&mut era_id.to_bytes()?); + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.serialized_length() + } +} + +impl FromBytes for BidAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == BidAddrTag::Unified as u8 => AccountHash::from_bytes(remainder) + .map(|(account_hash, remainder)| (BidAddr::Unified(account_hash), remainder)), + tag if tag == BidAddrTag::Validator as u8 => AccountHash::from_bytes(remainder) + .map(|(account_hash, remainder)| (BidAddr::Validator(account_hash), remainder)), + tag if tag == BidAddrTag::ValidatorRev as u8 => AccountHash::from_bytes(remainder) + .map(|(account_hash, remainder)| (BidAddr::ValidatorRev(account_hash), remainder)), + tag if tag == BidAddrTag::DelegatedAccount as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (delegator, remainder) = AccountHash::from_bytes(remainder)?; + Ok(( + BidAddr::DelegatedAccount { + validator, + delegator, + }, + remainder, + )) + } + tag if tag == BidAddrTag::DelegatedPurse as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (delegator, remainder) = URefAddr::from_bytes(remainder)?; + Ok(( + BidAddr::DelegatedPurse { + validator, + delegator, + }, + remainder, + )) + } + + tag if tag == BidAddrTag::Credit as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (era_id, remainder) = EraId::from_bytes(remainder)?; + Ok((BidAddr::Credit { validator, era_id }, remainder)) + } + tag if tag == BidAddrTag::ReservedDelegationAccount as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (delegator, remainder) = AccountHash::from_bytes(remainder)?; + Ok(( + BidAddr::ReservedDelegationAccount { + validator, + delegator, + }, + remainder, + )) + } + tag if tag == BidAddrTag::ReservedDelegationPurse as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (delegator, remainder) = URefAddr::from_bytes(remainder)?; + Ok(( + BidAddr::ReservedDelegationPurse { + validator, + delegator, + }, + remainder, + )) + } + tag if tag == BidAddrTag::UnbondAccount as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (unbonder, remainder) = AccountHash::from_bytes(remainder)?; + Ok(( + BidAddr::UnbondAccount { + validator, + unbonder, + }, + remainder, + )) + } + tag if tag == BidAddrTag::UnbondPurse as u8 => { + let (validator, remainder) = AccountHash::from_bytes(remainder)?; + let (unbonder, remainder) = URefAddr::from_bytes(remainder)?; + Ok(( + BidAddr::UnbondPurse { + validator, + unbonder, + }, + remainder, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Default for BidAddr { + fn default() -> Self { + BidAddr::Validator(AccountHash::default()) + } +} + +impl From for Key { + fn from(bid_addr: BidAddr) -> Self { + Key::BidAddr(bid_addr) + } +} + +impl From for BidAddr { + fn from(account_hash: AccountHash) -> Self { + BidAddr::Validator(account_hash) + } +} + +impl From for BidAddr { + fn from(public_key: PublicKey) -> Self { + BidAddr::Validator(public_key.to_account_hash()) + } +} + +impl Display for BidAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = self.tag(); + match self { + BidAddr::Unified(account_hash) + | BidAddr::Validator(account_hash) + | BidAddr::ValidatorRev(account_hash) => { + write!(f, "{}{}", tag, account_hash) + } + BidAddr::DelegatedAccount { + validator, + delegator, + } => write!(f, "{}{}{}", tag, validator, delegator), + BidAddr::DelegatedPurse { + validator, + delegator, + } => write!( + f, + "{}{}{}", + tag, + validator, + base16::encode_lower(&delegator), + ), + + BidAddr::Credit { validator, era_id } => write!( + f, + "{}{}{}", + tag, + validator, + base16::encode_lower(&era_id.to_le_bytes()) + ), + BidAddr::ReservedDelegationAccount { + validator, + delegator, + } => write!(f, "{}{}{}", tag, validator, delegator), + BidAddr::ReservedDelegationPurse { + validator, + delegator, + } => write!( + f, + "{}{}{}", + tag, + validator, + base16::encode_lower(&delegator), + ), + BidAddr::UnbondAccount { + validator, + unbonder, + } => write!(f, "{}{}{}", tag, validator, unbonder,), + BidAddr::UnbondPurse { + validator, + unbonder, + } => write!(f, "{}{}{}", tag, validator, base16::encode_lower(&unbonder),), + } + } +} + +impl Debug for BidAddr { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + match self { + BidAddr::Unified(validator) => write!(f, "BidAddr::Unified({:?})", validator), + BidAddr::Validator(validator) => write!(f, "BidAddr::Validator({:?})", validator), + BidAddr::ValidatorRev(validator) => write!(f, "BidAddr::ValidatorRev({:?})", validator), + BidAddr::DelegatedAccount { + validator, + delegator, + } => { + write!( + f, + "BidAddr::DelegatedAccount({:?}{:?})", + validator, delegator + ) + } + BidAddr::DelegatedPurse { + validator, + delegator, + } => { + write!(f, "BidAddr::DelegatedPurse({:?}{:?})", validator, delegator) + } + BidAddr::Credit { validator, era_id } => { + write!(f, "BidAddr::Credit({:?}{:?})", validator, era_id) + } + BidAddr::ReservedDelegationAccount { + validator, + delegator, + } => { + write!( + f, + "BidAddr::ReservedDelegationAccount({:?}{:?})", + validator, delegator + ) + } + BidAddr::ReservedDelegationPurse { + validator, + delegator, + } => { + write!( + f, + "BidAddr::ReservedDelegationPurse({:?}{:?})", + validator, delegator + ) + } + BidAddr::UnbondAccount { + validator, + unbonder, + } => { + write!(f, "BidAddr::UnbondAccount({:?}{:?})", validator, unbonder) + } + BidAddr::UnbondPurse { + validator, + unbonder, + } => { + write!(f, "BidAddr::UnbondPurse({:?}{:?})", validator, unbonder) + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BidAddr { + BidAddr::Validator(AccountHash::new(rng.gen())) + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, system::auction::BidAddr, EraId, PublicKey, SecretKey}; + + #[test] + fn serialization_roundtrip() { + let bid_addr = BidAddr::legacy([1; 32]); + bytesrepr::test_serialization_roundtrip(&bid_addr); + let bid_addr = BidAddr::new_validator_addr([1; 32]); + bytesrepr::test_serialization_roundtrip(&bid_addr); + let bid_addr = BidAddr::new_delegator_account_addr(([1; 32], [2; 32])); + bytesrepr::test_serialization_roundtrip(&bid_addr); + let bid_addr = BidAddr::new_credit( + &PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + EraId::new(0), + ); + bytesrepr::test_serialization_roundtrip(&bid_addr); + let bid_addr = BidAddr::new_reservation_account_addr(([1; 32], [2; 32])); + bytesrepr::test_serialization_roundtrip(&bid_addr); + } +} + +#[cfg(test)] +mod proptest { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_addr_validator(bid_addr in gens::bid_addr_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_addr); + } + } +} diff --git a/types/src/system/auction/bid_kind.rs b/types/src/system/auction/bid_kind.rs new file mode 100644 index 0000000000..178675e040 --- /dev/null +++ b/types/src/system/auction/bid_kind.rs @@ -0,0 +1,534 @@ +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + system::auction::{ + bid::VestingSchedule, Bid, BidAddr, DelegatorBid, ValidatorBid, ValidatorCredit, + }, + CLType, CLTyped, EraId, PublicKey, URef, U512, +}; + +use crate::system::auction::{ + delegator_kind::DelegatorKind, + unbond::{Unbond, UnbondKind}, + Bridge, +}; +use alloc::{boxed::Box, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::Reservation; + +/// BidKindTag variants. +#[allow(clippy::large_enum_variant)] +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +pub enum BidKindTag { + /// Unified bid. + Unified = 0, + /// Validator bid. + Validator = 1, + /// Delegator bid. + Delegator = 2, + /// Bridge record. + Bridge = 3, + /// Validator credit bid. + Credit = 4, + /// Reservation bid. + Reservation = 5, + /// Unbond. + Unbond = 6, +} + +/// Auction bid variants. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BidKind { + /// A unified record indexed on validator data, with an embedded collection of all delegator + /// bids assigned to that validator. The Unified variant is for legacy retrograde support, new + /// instances will not be created going forward. + Unified(Box), + /// A bid record containing only validator data. + Validator(Box), + /// A bid record containing only delegator data. + Delegator(Box), + /// A bridge record pointing to a new `ValidatorBid` after the public key was changed. + Bridge(Box), + /// Credited amount. + Credit(Box), + /// Reservation + Reservation(Box), + /// Unbond + Unbond(Box), +} + +impl BidKind { + /// Returns validator public key. + pub fn validator_public_key(&self) -> PublicKey { + match self { + BidKind::Unified(bid) => bid.validator_public_key().clone(), + BidKind::Validator(validator_bid) => validator_bid.validator_public_key().clone(), + BidKind::Delegator(delegator_bid) => delegator_bid.validator_public_key().clone(), + BidKind::Bridge(bridge) => bridge.old_validator_public_key().clone(), + BidKind::Credit(validator_credit) => validator_credit.validator_public_key().clone(), + BidKind::Reservation(reservation) => reservation.validator_public_key().clone(), + BidKind::Unbond(unbond) => unbond.validator_public_key().clone(), + } + } + + /// Returns new validator public key if it was changed. + pub fn new_validator_public_key(&self) -> Option { + match self { + BidKind::Bridge(bridge) => Some(bridge.new_validator_public_key().clone()), + BidKind::Unified(_) + | BidKind::Validator(_) + | BidKind::Delegator(_) + | BidKind::Credit(_) + | BidKind::Reservation(_) + | BidKind::Unbond(_) => None, + } + } + + /// Returns BidAddr. + pub fn bid_addr(&self) -> BidAddr { + match self { + BidKind::Unified(bid) => BidAddr::Unified(bid.validator_public_key().to_account_hash()), + BidKind::Validator(validator_bid) => { + BidAddr::Validator(validator_bid.validator_public_key().to_account_hash()) + } + BidKind::Delegator(delegator_bid) => { + let validator = delegator_bid.validator_public_key().to_account_hash(); + match delegator_bid.delegator_kind() { + DelegatorKind::PublicKey(pk) => BidAddr::DelegatedAccount { + validator, + delegator: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::DelegatedPurse { + validator, + delegator: *addr, + }, + } + } + BidKind::Bridge(bridge) => { + BidAddr::Validator(bridge.old_validator_public_key().to_account_hash()) + } + BidKind::Credit(credit) => { + let validator = credit.validator_public_key().to_account_hash(); + let era_id = credit.era_id(); + BidAddr::Credit { validator, era_id } + } + BidKind::Reservation(reservation_bid) => { + let validator = reservation_bid.validator_public_key().to_account_hash(); + match reservation_bid.delegator_kind() { + DelegatorKind::PublicKey(pk) => BidAddr::ReservedDelegationAccount { + validator, + delegator: pk.to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::ReservedDelegationPurse { + validator, + delegator: *addr, + }, + } + } + BidKind::Unbond(unbond) => { + let validator = unbond.validator_public_key().to_account_hash(); + let unbond_kind = unbond.unbond_kind(); + match unbond_kind { + UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => { + BidAddr::UnbondAccount { + validator, + unbonder: pk.to_account_hash(), + } + } + UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse { + validator, + unbonder: *addr, + }, + } + } + } + } + + /// Is this instance a unified bid? + pub fn is_unified(&self) -> bool { + matches!(self, BidKind::Unified(_)) + } + + /// Is this instance a validator bid? + pub fn is_validator(&self) -> bool { + matches!(self, BidKind::Validator(_)) + } + + /// Is this instance a delegator bid? + pub fn is_delegator(&self) -> bool { + matches!(self, BidKind::Delegator(_)) + } + + /// Is this instance a bridge record? + pub fn is_bridge(&self) -> bool { + matches!(self, BidKind::Bridge(_)) + } + + /// Is this instance a validator credit? + pub fn is_credit(&self) -> bool { + matches!(self, BidKind::Credit(_)) + } + + /// Is this instance a reservation? + pub fn is_reservation(&self) -> bool { + matches!(self, BidKind::Reservation(_)) + } + + /// Is this instance a unbond record? + pub fn is_unbond(&self) -> bool { + matches!(self, BidKind::Unbond(_)) + } + + /// The staked amount. + pub fn staked_amount(&self) -> Option { + match self { + BidKind::Unified(bid) => Some(*bid.staked_amount()), + BidKind::Validator(validator_bid) => Some(validator_bid.staked_amount()), + BidKind::Delegator(delegator) => Some(delegator.staked_amount()), + BidKind::Credit(credit) => Some(credit.amount()), + BidKind::Bridge(_) | BidKind::Reservation(_) | BidKind::Unbond(_) => None, + } + } + + /// The bonding purse. + pub fn bonding_purse(&self) -> Option { + match self { + BidKind::Unified(bid) => Some(*bid.bonding_purse()), + BidKind::Validator(validator_bid) => Some(*validator_bid.bonding_purse()), + BidKind::Delegator(delegator) => Some(*delegator.bonding_purse()), + BidKind::Unbond(_) + | BidKind::Bridge(_) + | BidKind::Credit(_) + | BidKind::Reservation(_) => None, + } + } + + /// The delegator kind, if relevant. + pub fn delegator_kind(&self) -> Option { + match self { + BidKind::Unified(_) + | BidKind::Validator(_) + | BidKind::Bridge(_) + | BidKind::Credit(_) => None, + BidKind::Unbond(unbond) => match unbond.unbond_kind() { + UnbondKind::Validator(_) => None, + UnbondKind::DelegatedPublicKey(pk) => Some(DelegatorKind::PublicKey(pk.clone())), + UnbondKind::DelegatedPurse(addr) => Some(DelegatorKind::Purse(*addr)), + }, + BidKind::Delegator(bid) => Some(bid.delegator_kind().clone()), + BidKind::Reservation(bid) => Some(bid.delegator_kind().clone()), + } + } + + /// The unbond kind, if relevant. + pub fn unbond_kind(&self) -> Option { + match self { + BidKind::Unified(_) + | BidKind::Validator(_) + | BidKind::Bridge(_) + | BidKind::Credit(_) + | BidKind::Delegator(_) + | BidKind::Reservation(_) => None, + BidKind::Unbond(unbond) => Some(unbond.unbond_kind().clone()), + } + } + + /// Is this bid inactive? + pub fn inactive(&self) -> bool { + match self { + BidKind::Unified(bid) => bid.inactive(), + BidKind::Validator(validator_bid) => validator_bid.inactive(), + BidKind::Delegator(delegator) => delegator.staked_amount().is_zero(), + BidKind::Credit(credit) => credit.amount().is_zero(), + BidKind::Bridge(_) | BidKind::Reservation(_) | BidKind::Unbond(_) => false, + } + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + match self { + BidKind::Unified(bid) => bid.is_locked(timestamp_millis), + BidKind::Validator(validator_bid) => validator_bid.is_locked(timestamp_millis), + BidKind::Delegator(delegator) => delegator.is_locked(timestamp_millis), + BidKind::Bridge(_) + | BidKind::Credit(_) + | BidKind::Reservation(_) + | BidKind::Unbond(_) => false, + } + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match self { + BidKind::Unified(bid) => bid + .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis), + BidKind::Validator(validator_bid) => validator_bid + .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis), + BidKind::Delegator(delegator) => delegator + .is_locked_with_vesting_schedule(timestamp_millis, vesting_schedule_period_millis), + BidKind::Bridge(_) + | BidKind::Credit(_) + | BidKind::Reservation(_) + | BidKind::Unbond(_) => false, + } + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + match self { + BidKind::Unified(bid) => bid.vesting_schedule(), + BidKind::Validator(validator_bid) => validator_bid.vesting_schedule(), + BidKind::Delegator(delegator) => delegator.vesting_schedule(), + BidKind::Bridge(_) + | BidKind::Credit(_) + | BidKind::Reservation(_) + | BidKind::Unbond(_) => None, + } + } + + /// BidKindTag. + pub fn tag(&self) -> BidKindTag { + match self { + BidKind::Unified(_) => BidKindTag::Unified, + BidKind::Validator(_) => BidKindTag::Validator, + BidKind::Delegator(_) => BidKindTag::Delegator, + BidKind::Bridge(_) => BidKindTag::Bridge, + BidKind::Credit(_) => BidKindTag::Credit, + BidKind::Reservation(_) => BidKindTag::Reservation, + BidKind::Unbond(_) => BidKindTag::Unbond, + } + } + + /// The `[EraId]` associated with this `[BidKind]`, if any. + pub fn era_id(&self) -> Option { + match self { + BidKind::Bridge(bridge) => Some(*bridge.era_id()), + BidKind::Credit(credit) => Some(credit.era_id()), + BidKind::Unified(_) + | BidKind::Validator(_) + | BidKind::Delegator(_) + | BidKind::Reservation(_) + | BidKind::Unbond(_) => None, + } + } +} + +impl CLTyped for BidKind { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for BidKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + let (tag, mut serialized_data) = match self { + BidKind::Unified(bid) => (BidKindTag::Unified, bid.to_bytes()?), + BidKind::Validator(validator_bid) => (BidKindTag::Validator, validator_bid.to_bytes()?), + BidKind::Delegator(delegator_bid) => (BidKindTag::Delegator, delegator_bid.to_bytes()?), + BidKind::Bridge(bridge) => (BidKindTag::Bridge, bridge.to_bytes()?), + BidKind::Credit(credit) => (BidKindTag::Credit, credit.to_bytes()?), + BidKind::Reservation(reservation) => (BidKindTag::Reservation, reservation.to_bytes()?), + BidKind::Unbond(unbond) => (BidKindTag::Unbond, unbond.to_bytes()?), + }; + result.push(tag as u8); + result.append(&mut serialized_data); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + BidKind::Unified(bid) => bid.serialized_length(), + BidKind::Validator(validator_bid) => validator_bid.serialized_length(), + BidKind::Delegator(delegator_bid) => delegator_bid.serialized_length(), + BidKind::Bridge(bridge) => bridge.serialized_length(), + BidKind::Credit(credit) => credit.serialized_length(), + BidKind::Reservation(reservation) => reservation.serialized_length(), + BidKind::Unbond(unbond) => unbond.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag() as u8); + match self { + BidKind::Unified(bid) => bid.write_bytes(writer)?, + BidKind::Validator(validator_bid) => validator_bid.write_bytes(writer)?, + BidKind::Delegator(delegator_bid) => delegator_bid.write_bytes(writer)?, + BidKind::Bridge(bridge) => bridge.write_bytes(writer)?, + BidKind::Credit(credit) => credit.write_bytes(writer)?, + BidKind::Reservation(reservation) => reservation.write_bytes(writer)?, + BidKind::Unbond(unbond) => unbond.write_bytes(writer)?, + }; + Ok(()) + } +} + +impl FromBytes for BidKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == BidKindTag::Unified as u8 => Bid::from_bytes(remainder) + .map(|(bid, remainder)| (BidKind::Unified(Box::new(bid)), remainder)), + tag if tag == BidKindTag::Validator as u8 => { + ValidatorBid::from_bytes(remainder).map(|(validator_bid, remainder)| { + (BidKind::Validator(Box::new(validator_bid)), remainder) + }) + } + tag if tag == BidKindTag::Delegator as u8 => { + DelegatorBid::from_bytes(remainder).map(|(delegator_bid, remainder)| { + (BidKind::Delegator(Box::new(delegator_bid)), remainder) + }) + } + tag if tag == BidKindTag::Bridge as u8 => Bridge::from_bytes(remainder) + .map(|(bridge, remainder)| (BidKind::Bridge(Box::new(bridge)), remainder)), + tag if tag == BidKindTag::Credit as u8 => ValidatorCredit::from_bytes(remainder) + .map(|(credit, remainder)| (BidKind::Credit(Box::new(credit)), remainder)), + tag if tag == BidKindTag::Reservation as u8 => { + Reservation::from_bytes(remainder).map(|(reservation, remainder)| { + (BidKind::Reservation(Box::new(reservation)), remainder) + }) + } + tag if tag == BidKindTag::Unbond as u8 => Unbond::from_bytes(remainder) + .map(|(unbond, remainder)| (BidKind::Unbond(Box::new(unbond)), remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::{BidKind, *}; + use crate::{ + bytesrepr, + system::auction::{delegator_kind::DelegatorKind, DelegationRate}, + AccessRights, SecretKey, + }; + + #[test] + fn serialization_roundtrip() { + let validator_public_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let bid = Bid::unlocked( + validator_public_key.clone(), + bonding_purse, + U512::one(), + DelegationRate::MAX, + ); + let unified_bid = BidKind::Unified(Box::new(bid.clone())); + let validator_bid = ValidatorBid::from(bid.clone()); + + let delegator_public_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([1u8; SecretKey::ED25519_LENGTH]).unwrap(), + ); + + let delegator_kind = DelegatorKind::PublicKey(delegator_public_key); + + let delegator = DelegatorBid::unlocked( + delegator_kind, + U512::one(), + bonding_purse, + validator_public_key.clone(), + ); + let delegator_bid = BidKind::Delegator(Box::new(delegator)); + + let credit = ValidatorCredit::new(validator_public_key, EraId::new(0), U512::one()); + let credit_bid = BidKind::Credit(Box::new(credit)); + + bytesrepr::test_serialization_roundtrip(&bid); + bytesrepr::test_serialization_roundtrip(&unified_bid); + bytesrepr::test_serialization_roundtrip(&validator_bid); + bytesrepr::test_serialization_roundtrip(&delegator_bid); + bytesrepr::test_serialization_roundtrip(&credit_bid); + } +} + +#[cfg(test)] +mod prop_test_bid_kind_unified { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_unified(bid_kind in gens::unified_bid_arb(0..3)) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} + +#[cfg(test)] +mod prop_test_bid_kind_validator { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_validator(bid_kind in gens::validator_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} + +#[cfg(test)] +mod prop_test_bid_kind_delegator { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_delegator(bid_kind in gens::delegator_bid_kind_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} + +#[cfg(test)] +mod prop_test_bid_kind_validator_credit { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_validator_credit(bid_kind in gens::credit_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} + +#[cfg(test)] +mod prop_test_bid_kind_reservation { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid_kind_reservation(bid_kind in gens::reservation_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid_kind); + } + } +} diff --git a/types/src/system/auction/bridge.rs b/types/src/system/auction/bridge.rs new file mode 100644 index 0000000000..f8e7b07d78 --- /dev/null +++ b/types/src/system/auction/bridge.rs @@ -0,0 +1,98 @@ +use alloc::vec::Vec; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// A bridge record pointing to a new `ValidatorBid` after the public key was changed. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Bridge { + /// Previous validator public key associated with the bid. + old_validator_public_key: PublicKey, + /// New validator public key associated with the bid. + new_validator_public_key: PublicKey, + /// Era when bridge record was created. + era_id: EraId, +} + +impl Bridge { + /// Creates new instance of a bridge record. + pub fn new( + old_validator_public_key: PublicKey, + new_validator_public_key: PublicKey, + era_id: EraId, + ) -> Self { + Self { + old_validator_public_key, + new_validator_public_key, + era_id, + } + } + + /// Gets the old validator public key + pub fn old_validator_public_key(&self) -> &PublicKey { + &self.old_validator_public_key + } + + /// Gets the new validator public key + pub fn new_validator_public_key(&self) -> &PublicKey { + &self.new_validator_public_key + } + + /// Gets the era when key change happened + pub fn era_id(&self) -> &EraId { + &self.era_id + } +} + +impl CLTyped for Bridge { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Bridge { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.old_validator_public_key.serialized_length() + + self.new_validator_public_key.serialized_length() + + self.era_id.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.old_validator_public_key.write_bytes(writer)?; + self.new_validator_public_key.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Bridge { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (old_validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (new_validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (era_id, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Bridge { + old_validator_public_key, + new_validator_public_key, + era_id, + }, + bytes, + )) + } +} diff --git a/types/src/system/auction/constants.rs b/types/src/system/auction/constants.rs index c423db4877..ee1cdbe3f1 100644 --- a/types/src/system/auction/constants.rs +++ b/types/src/system/auction/constants.rs @@ -1,4 +1,4 @@ -use crate::system::auction::EraId; +use crate::EraId; use super::DelegationRate; @@ -20,26 +20,30 @@ pub const BLOCK_REWARD: u64 = 1_000_000_000_000; pub const ARG_AMOUNT: &str = "amount"; /// Named constant for `delegation_rate`. pub const ARG_DELEGATION_RATE: &str = "delegation_rate"; -/// Named constant for `account_hash`. +/// Named constant for `public_key`. pub const ARG_PUBLIC_KEY: &str = "public_key"; +/// Named constant for `new_public_key`. +pub const ARG_NEW_PUBLIC_KEY: &str = "new_public_key"; /// Named constant for `validator`. pub const ARG_VALIDATOR: &str = "validator"; /// Named constant for `delegator`. pub const ARG_DELEGATOR: &str = "delegator"; +/// Named constant for `delegator_purse`. +pub const ARG_DELEGATOR_PURSE: &str = "delegator_purse"; +/// Named constant for `delegators`. +pub const ARG_DELEGATORS: &str = "delegators"; +/// Named constant for `reservations`. +pub const ARG_RESERVATIONS: &str = "reservations"; /// Named constant for `validator_purse`. pub const ARG_VALIDATOR_PURSE: &str = "validator_purse"; /// Named constant for `validator_keys`. pub const ARG_VALIDATOR_KEYS: &str = "validator_keys"; /// Named constant for `validator_public_keys`. pub const ARG_VALIDATOR_PUBLIC_KEYS: &str = "validator_public_keys"; +/// Named constant for `new_validator`. +pub const ARG_NEW_VALIDATOR: &str = "new_validator"; /// Named constant for `era_id`. pub const ARG_ERA_ID: &str = "era_id"; -/// Named constant for `reward_factors`. -pub const ARG_REWARD_FACTORS: &str = "reward_factors"; -/// Named constant for `validator_public_key`. -pub const ARG_VALIDATOR_PUBLIC_KEY: &str = "validator_public_key"; -/// Named constant for `delegator_public_key`. -pub const ARG_DELEGATOR_PUBLIC_KEY: &str = "delegator_public_key"; /// Named constant for `validator_slots` argument. pub const ARG_VALIDATOR_SLOTS: &str = VALIDATOR_SLOTS_KEY; /// Named constant for `mint_contract_package_hash` @@ -56,6 +60,16 @@ pub const ARG_UNBONDING_DELAY: &str = "unbonding_delay"; pub const ARG_ERA_END_TIMESTAMP_MILLIS: &str = "era_end_timestamp_millis"; /// Named constant for `evicted_validators`; pub const ARG_EVICTED_VALIDATORS: &str = "evicted_validators"; +/// Named constant for `rewards_map`; +pub const ARG_REWARDS_MAP: &str = "rewards_map"; +/// Named constant for `entry_point`; +pub const ARG_ENTRY_POINT: &str = "entry_point"; +/// Named constrant for `minimum_delegation_amount`. +pub const ARG_MINIMUM_DELEGATION_AMOUNT: &str = "minimum_delegation_amount"; +/// Named constrant for `maximum_delegation_amount`. +pub const ARG_MAXIMUM_DELEGATION_AMOUNT: &str = "maximum_delegation_amount"; +/// Named constant for `reserved_slots`. +pub const ARG_RESERVED_SLOTS: &str = "reserved_slots"; /// Named constant for method `get_era_validators`. pub const METHOD_GET_ERA_VALIDATORS: &str = "get_era_validators"; @@ -67,6 +81,8 @@ pub const METHOD_WITHDRAW_BID: &str = "withdraw_bid"; pub const METHOD_DELEGATE: &str = "delegate"; /// Named constant for method `undelegate`. pub const METHOD_UNDELEGATE: &str = "undelegate"; +/// Named constant for method `redelegate`. +pub const METHOD_REDELEGATE: &str = "redelegate"; /// Named constant for method `run_auction`. pub const METHOD_RUN_AUCTION: &str = "run_auction"; /// Named constant for method `slash`. @@ -77,11 +93,25 @@ pub const METHOD_DISTRIBUTE: &str = "distribute"; pub const METHOD_READ_ERA_ID: &str = "read_era_id"; /// Named constant for method `activate_bid`. pub const METHOD_ACTIVATE_BID: &str = "activate_bid"; +/// Named constant for method `change_bid_public_key`. +pub const METHOD_CHANGE_BID_PUBLIC_KEY: &str = " change_bid_public_key"; +/// Named constant for method `add_reservations`. +pub const METHOD_ADD_RESERVATIONS: &str = "add_reservations"; +/// Named constant for method `cancel_reservations`. +pub const METHOD_CANCEL_RESERVATIONS: &str = "cancel_reservations"; /// Storage for `EraId`. pub const ERA_ID_KEY: &str = "era_id"; /// Storage for era-end timestamp. pub const ERA_END_TIMESTAMP_MILLIS_KEY: &str = "era_end_timestamp_millis"; +/// Storage for `SeigniorageRecipientsSnapshot`. +pub const SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY: &str = "seigniorage_recipients_snapshot"; +/// Storage for a flag determining current version of `SeigniorageRecipientsSnapshot`. +pub const SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION_KEY: &str = + "seigniorage_recipients_snapshot_version"; +/// Default value for the current version of `SeigniorageRecipientsSnapshot`. +pub const DEFAULT_SEIGNIORAGE_RECIPIENTS_SNAPSHOT_VERSION: u8 = 2; + /// Total validator slots allowed. pub const VALIDATOR_SLOTS_KEY: &str = "validator_slots"; /// Amount of auction delay. diff --git a/types/src/system/auction/delegator.rs b/types/src/system/auction/delegator.rs index c71882b4ff..39652beebe 100644 --- a/types/src/system/auction/delegator.rs +++ b/types/src/system/auction/delegator.rs @@ -1,21 +1,22 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; -#[cfg(feature = "std")] +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use crate::{ bytesrepr::{self, FromBytes, ToBytes}, - system::auction::{bid::VestingSchedule, Error}, + system::auction::{bid::VestingSchedule, Error, VESTING_SCHEDULE_LENGTH_MILLIS}, CLType, CLTyped, PublicKey, URef, U512, }; /// Represents a party delegating their stake to a validator (or "delegatee") #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub struct Delegator { delegator_public_key: PublicKey, @@ -61,9 +62,39 @@ impl Delegator { } } + /// Returns public key of the delegator. + pub fn delegator_public_key(&self) -> &PublicKey { + &self.delegator_public_key + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + /// Returns the staked amount - pub fn staked_amount(&self) -> &U512 { - &self.staked_amount + pub fn staked_amount(&self) -> U512 { + self.staked_amount } /// Returns the mutable staked amount @@ -93,7 +124,7 @@ impl Delegator { .ok_or(Error::InvalidAmount)?; let vesting_schedule = match self.vesting_schedule.as_ref() { - Some(vesting_sechdule) => vesting_sechdule, + Some(vesting_schedule) => vesting_schedule, None => { self.staked_amount = updated_staked_amount; return Ok(updated_staked_amount); @@ -139,6 +170,29 @@ impl Delegator { pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { self.vesting_schedule.as_mut() } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + bonding_purse: URef, + ) -> Self { + let vesting_schedule = None; + let staked_amount = 0.into(); + Self { + validator_public_key, + delegator_public_key, + bonding_purse, + staked_amount, + vesting_schedule, + } + } + + /// Sets validator public key + pub fn with_validator_public_key(&mut self, validator_public_key: PublicKey) -> &mut Self { + self.validator_public_key = validator_public_key; + self + } } impl CLTyped for Delegator { @@ -165,6 +219,15 @@ impl ToBytes for Delegator { + self.validator_public_key.serialized_length() + self.vesting_schedule.serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.delegator_public_key.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for Delegator { @@ -187,6 +250,19 @@ impl FromBytes for Delegator { } } +impl Display for Delegator { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "delegator {{ {} {} motes, bonding purse {}, validator {} }}", + self.delegator_public_key, + self.staked_amount, + self.bonding_purse, + self.validator_public_key + ) + } +} + #[cfg(test)] mod tests { use crate::{ @@ -197,14 +273,13 @@ mod tests { fn serialization_roundtrip() { let staked_amount = U512::one(); let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); - let delegator_public_key: PublicKey = - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - let validator_public_key: PublicKey = - SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); + let delegator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + + let validator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); let unlocked_delegator = Delegator::unlocked( delegator_public_key.clone(), staked_amount, @@ -224,3 +299,17 @@ mod tests { bytesrepr::test_serialization_roundtrip(&locked_delegator); } } + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::delegator_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/delegator_bid.rs b/types/src/system/auction/delegator_bid.rs new file mode 100644 index 0000000000..4224f9b9c8 --- /dev/null +++ b/types/src/system/auction/delegator_bid.rs @@ -0,0 +1,356 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{ + bid::VestingSchedule, delegator_kind::DelegatorKind, BidAddr, Delegator, Error, UnbondKind, + VESTING_SCHEDULE_LENGTH_MILLIS, + }, + CLType, CLTyped, PublicKey, URef, U512, +}; + +/// Represents a party delegating their stake to a validator (or "delegatee") +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct DelegatorBid { + delegator_kind: DelegatorKind, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + vesting_schedule: Option, +} + +impl DelegatorBid { + /// Creates a new [`DelegatorBid`] + pub fn unlocked( + delegator_kind: DelegatorKind, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + ) -> Self { + let vesting_schedule = None; + DelegatorBid { + delegator_kind, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + } + } + + /// Creates new instance of a [`DelegatorBid`] with locked funds. + pub fn locked( + delegator_kind: DelegatorKind, + staked_amount: U512, + bonding_purse: URef, + validator_public_key: PublicKey, + release_timestamp_millis: u64, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + DelegatorBid { + delegator_kind, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + } + } + + /// Returns the delegator kind. + pub fn delegator_kind(&self) -> &DelegatorKind { + &self.delegator_kind + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Returns the staked amount + pub fn staked_amount(&self) -> U512 { + self.staked_amount + } + + /// Returns the mutable staked amount + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Returns the bonding purse + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns delegatee + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::InvalidAmount)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::DelegatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::DelegatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Returns a reference to the vesting schedule of the provided + /// delegator bid. `None` if a non-genesis validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided + /// delegator bid. `None` if a non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty( + validator_public_key: PublicKey, + delegator_kind: DelegatorKind, + bonding_purse: URef, + ) -> Self { + let vesting_schedule = None; + let staked_amount = 0.into(); + Self { + validator_public_key, + delegator_kind, + bonding_purse, + staked_amount, + vesting_schedule, + } + } + + /// Sets validator public key + pub fn with_validator_public_key(&mut self, validator_public_key: PublicKey) -> &mut Self { + self.validator_public_key = validator_public_key; + self + } + + /// BidAddr for this instance. + pub fn bid_addr(&self) -> BidAddr { + match &self.delegator_kind { + DelegatorKind::PublicKey(pk) => BidAddr::DelegatedAccount { + validator: self.validator_public_key.clone().to_account_hash(), + delegator: pk.clone().to_account_hash(), + }, + DelegatorKind::Purse(addr) => BidAddr::DelegatedPurse { + validator: self.validator_public_key.clone().to_account_hash(), + delegator: *addr, + }, + } + } + + /// BidAddr for this instance. + pub fn unbond_kind(&self) -> UnbondKind { + match &self.delegator_kind { + DelegatorKind::PublicKey(pk) => UnbondKind::DelegatedPublicKey(pk.clone()), + DelegatorKind::Purse(addr) => UnbondKind::DelegatedPurse(*addr), + } + } +} + +impl CLTyped for DelegatorBid { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for DelegatorBid { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.delegator_kind.to_bytes()?); + buffer.extend(self.staked_amount.to_bytes()?); + buffer.extend(self.bonding_purse.to_bytes()?); + buffer.extend(self.validator_public_key.to_bytes()?); + buffer.extend(self.vesting_schedule.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.delegator_kind.serialized_length() + + self.staked_amount.serialized_length() + + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.vesting_schedule.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.delegator_kind.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for DelegatorBid { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (delegator_kind, bytes) = DelegatorKind::from_bytes(bytes)?; + let (staked_amount, bytes) = U512::from_bytes(bytes)?; + let (bonding_purse, bytes) = URef::from_bytes(bytes)?; + let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + DelegatorBid { + delegator_kind, + staked_amount, + bonding_purse, + validator_public_key, + vesting_schedule, + }, + bytes, + )) + } +} + +impl Display for DelegatorBid { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "delegator {{ {} {} motes, bonding purse {}, validator {} }}", + self.delegator_kind, self.staked_amount, self.bonding_purse, self.validator_public_key + ) + } +} + +impl From for DelegatorBid { + fn from(value: Delegator) -> Self { + DelegatorBid { + delegator_kind: value.delegator_public_key().clone().into(), + validator_public_key: value.validator_public_key().clone(), + staked_amount: value.staked_amount(), + bonding_purse: *value.bonding_purse(), + vesting_schedule: value.vesting_schedule().cloned(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, + system::auction::{delegator_kind::DelegatorKind, DelegatorBid}, + AccessRights, PublicKey, SecretKey, URef, U512, + }; + + #[test] + fn serialization_roundtrip() { + let staked_amount = U512::one(); + let bonding_purse = URef::new([42; 32], AccessRights::READ_ADD_WRITE); + let delegator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + + let delegator_kind = DelegatorKind::PublicKey(delegator_public_key); + + let validator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let unlocked_delegator = DelegatorBid::unlocked( + delegator_kind.clone(), + staked_amount, + bonding_purse, + validator_public_key.clone(), + ); + bytesrepr::test_serialization_roundtrip(&unlocked_delegator); + + let release_timestamp_millis = 42; + let locked_delegator = DelegatorBid::locked( + delegator_kind, + staked_amount, + bonding_purse, + validator_public_key, + release_timestamp_millis, + ); + bytesrepr::test_serialization_roundtrip(&locked_delegator); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::delegator_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/delegator_kind.rs b/types/src/system/auction/delegator_kind.rs new file mode 100644 index 0000000000..6227b9e76b --- /dev/null +++ b/types/src/system/auction/delegator_kind.rs @@ -0,0 +1,349 @@ +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, CLType, CLTyped, PublicKey, URef, URefAddr, +}; +use alloc::{string::String, vec::Vec}; +use core::{ + fmt, + fmt::{Display, Formatter}, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde_helpers::{HumanReadableDelegatorKind, NonHumanReadableDelegatorKind}; + +/// DelegatorKindTag variants. +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +pub enum DelegatorKindTag { + /// Public key. + PublicKey = 0, + /// Purse. + Purse = 1, +} + +/// Auction bid variants. +#[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +/// Kinds of delegation bids. +pub enum DelegatorKind { + /// Delegation from public key. + PublicKey(PublicKey), + /// Delegation from purse. + Purse(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] URefAddr), +} + +impl DelegatorKind { + /// DelegatorKindTag. + pub fn tag(&self) -> DelegatorKindTag { + match self { + DelegatorKind::PublicKey(_) => DelegatorKindTag::PublicKey, + DelegatorKind::Purse(_) => DelegatorKindTag::Purse, + } + } + + /// Returns true if the kind is a purse. + pub fn is_purse(&self) -> bool { + matches!(self, DelegatorKind::Purse(_)) + } +} + +impl ToBytes for DelegatorKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + let (tag, mut serialized_data) = match self { + DelegatorKind::PublicKey(public_key) => { + (DelegatorKindTag::PublicKey, public_key.to_bytes()?) + } + DelegatorKind::Purse(uref_addr) => (DelegatorKindTag::Purse, uref_addr.to_bytes()?), + }; + result.push(tag as u8); + result.append(&mut serialized_data); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + DelegatorKind::PublicKey(pk) => pk.serialized_length(), + DelegatorKind::Purse(addr) => addr.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag() as u8); + match self { + DelegatorKind::PublicKey(pk) => pk.write_bytes(writer)?, + DelegatorKind::Purse(addr) => addr.write_bytes(writer)?, + }; + Ok(()) + } +} + +impl FromBytes for DelegatorKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == DelegatorKindTag::PublicKey as u8 => PublicKey::from_bytes(remainder) + .map(|(pk, remainder)| (DelegatorKind::PublicKey(pk), remainder)), + tag if tag == DelegatorKindTag::Purse as u8 => URefAddr::from_bytes(remainder) + .map(|(addr, remainder)| (DelegatorKind::Purse(addr), remainder)), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for DelegatorKind { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DelegatorKind::PublicKey(pk) => { + write!(formatter, "{}", pk) + } + DelegatorKind::Purse(addr) => { + write!(formatter, "{}", base16::encode_lower(addr)) + } + } + } +} + +impl From for DelegatorKind { + fn from(value: PublicKey) -> Self { + DelegatorKind::PublicKey(value) + } +} + +impl From<&PublicKey> for DelegatorKind { + fn from(value: &PublicKey) -> Self { + DelegatorKind::PublicKey(value.clone()) + } +} + +impl From for DelegatorKind { + fn from(value: URef) -> Self { + DelegatorKind::Purse(value.addr()) + } +} + +impl From for DelegatorKind { + fn from(value: URefAddr) -> Self { + DelegatorKind::Purse(value) + } +} + +impl CLTyped for DelegatorKind { + fn cl_type() -> CLType { + CLType::Any + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> DelegatorKind { + if rng.gen() { + DelegatorKind::PublicKey(rng.gen()) + } else { + DelegatorKind::Purse(rng.gen()) + } + } +} + +impl Serialize for DelegatorKind { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + HumanReadableDelegatorKind::from(self).serialize(serializer) + } else { + NonHumanReadableDelegatorKind::from(self).serialize(serializer) + } + } +} + +#[derive(Debug)] +enum DelegatorKindError { + DeserializationError(String), +} + +impl Display for DelegatorKindError { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + DelegatorKindError::DeserializationError(error) => { + write!(f, "Error when deserializing DelegatorKind: {}", error) + } + } + } +} + +impl TryFrom for DelegatorKind { + type Error = DelegatorKindError; + + fn try_from(value: HumanReadableDelegatorKind) -> Result { + match value { + HumanReadableDelegatorKind::PublicKey(public_key) => { + Ok(DelegatorKind::PublicKey(public_key)) + } + HumanReadableDelegatorKind::Purse(encoded) => { + let decoded = checksummed_hex::decode(encoded).map_err(|e| { + DelegatorKindError::DeserializationError(format!( + "Failed to decode encoded URefAddr: {}", + e + )) + })?; + let uref_addr = URefAddr::try_from(decoded.as_ref()).map_err(|e| { + DelegatorKindError::DeserializationError(format!( + "Failed to build uref address: {}", + e + )) + })?; + Ok(DelegatorKind::Purse(uref_addr)) + } + } + } +} + +impl From for DelegatorKind { + fn from(value: NonHumanReadableDelegatorKind) -> Self { + match value { + NonHumanReadableDelegatorKind::PublicKey(public_key) => { + DelegatorKind::PublicKey(public_key) + } + NonHumanReadableDelegatorKind::Purse(addr) => DelegatorKind::Purse(addr), + } + } +} + +impl<'de> Deserialize<'de> for DelegatorKind { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let human_readable = HumanReadableDelegatorKind::deserialize(deserializer)?; + DelegatorKind::try_from(human_readable) + .map_err(|error| SerdeError::custom(format!("{:?}", error))) + } else { + let non_human_readable = NonHumanReadableDelegatorKind::deserialize(deserializer)?; + Ok(DelegatorKind::from(non_human_readable)) + } + } +} + +mod serde_helpers { + use super::DelegatorKind; + use crate::{PublicKey, URefAddr}; + use alloc::string::String; + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize)] + pub(super) enum HumanReadableDelegatorKind { + PublicKey(PublicKey), + Purse(String), + } + + #[derive(Serialize, Deserialize)] + pub(super) enum NonHumanReadableDelegatorKind { + PublicKey(PublicKey), + Purse(URefAddr), + } + + impl From<&DelegatorKind> for HumanReadableDelegatorKind { + fn from(delegator_kind: &DelegatorKind) -> Self { + match delegator_kind { + DelegatorKind::PublicKey(public_key) => { + HumanReadableDelegatorKind::PublicKey(public_key.clone()) + } + DelegatorKind::Purse(uref_addr) => { + HumanReadableDelegatorKind::Purse(base16::encode_lower(uref_addr)) + } + } + } + } + + impl From<&DelegatorKind> for NonHumanReadableDelegatorKind { + fn from(delegator_kind: &DelegatorKind) -> Self { + match delegator_kind { + DelegatorKind::PublicKey(public_key) => { + NonHumanReadableDelegatorKind::PublicKey(public_key.clone()) + } + DelegatorKind::Purse(uref_addr) => NonHumanReadableDelegatorKind::Purse(*uref_addr), + } + } + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use crate::{ + bytesrepr, system::auction::delegator_kind::DelegatorKind, testing::TestRng, PublicKey, + SecretKey, + }; + + #[test] + fn purse_serialized_as_string() { + let delegator_kind_payload = DelegatorKind::Purse([1; 32]); + let serialized = serde_json::to_string(&delegator_kind_payload).unwrap(); + assert_eq!( + serialized, + "{\"Purse\":\"0101010101010101010101010101010101010101010101010101010101010101\"}" + ); + } + + #[test] + fn given_broken_address_purse_deserialziation_fails() { + let failing = + "{\"Purse\":\"Z101010101010101010101010101010101010101010101010101010101010101\"}"; + let ret = serde_json::from_str::(failing); + assert!(ret.is_err()); + let failing = "{\"Purse\":\"01010101010101010101010101010101010101010101010101010101\"}"; + let ret = serde_json::from_str::(failing); + assert!(ret.is_err()); + } + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + + let delegator_kind_payload = DelegatorKind::PublicKey(PublicKey::random(rng)); + let json_string = serde_json::to_string_pretty(&delegator_kind_payload).unwrap(); + let decoded: DelegatorKind = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, delegator_kind_payload); + + let delegator_kind_payload = DelegatorKind::Purse(rng.gen()); + let json_string = serde_json::to_string_pretty(&delegator_kind_payload).unwrap(); + let decoded: DelegatorKind = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, delegator_kind_payload); + } + + #[test] + fn serialization_roundtrip() { + let delegator_kind = DelegatorKind::PublicKey(PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + )); + + bytesrepr::test_serialization_roundtrip(&delegator_kind); + + let delegator_kind = DelegatorKind::Purse([43; 32]); + + bytesrepr::test_serialization_roundtrip(&delegator_kind); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(kind in gens::delegator_kind_arb()) { + bytesrepr::test_serialization_roundtrip(&kind); + } + } +} diff --git a/types/src/system/auction/detail.rs b/types/src/system/auction/detail.rs deleted file mode 100644 index bdd0af5c7e..0000000000 --- a/types/src/system/auction/detail.rs +++ /dev/null @@ -1,342 +0,0 @@ -use alloc::{collections::BTreeMap, vec::Vec}; -use core::convert::TryInto; - -use num_rational::Ratio; - -use crate::{ - account::AccountHash, - bytesrepr::{FromBytes, ToBytes}, - system::auction::{ - constants::*, Auction, Bids, EraId, Error, RuntimeProvider, SeigniorageAllocation, - SeigniorageRecipientsSnapshot, StorageProvider, UnbondingPurse, UnbondingPurses, - }, - CLTyped, Key, KeyTag, PublicKey, URef, U512, -}; - -fn read_from(provider: &mut P, name: &str) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, - T: FromBytes + CLTyped, -{ - let key = provider.named_keys_get(name).ok_or(Error::MissingKey)?; - let uref = key.into_uref().ok_or(Error::InvalidKeyVariant)?; - let value: T = provider.read(uref)?.ok_or(Error::MissingValue)?; - Ok(value) -} - -fn write_to(provider: &mut P, name: &str, value: T) -> Result<(), Error> -where - P: StorageProvider + RuntimeProvider + ?Sized, - T: ToBytes + CLTyped, -{ - let key = provider.named_keys_get(name).ok_or(Error::MissingKey)?; - let uref = key.into_uref().ok_or(Error::InvalidKeyVariant)?; - provider.write(uref, value)?; - Ok(()) -} - -pub fn get_bids

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - let bids_keys = provider.get_keys(&KeyTag::Bid)?; - - let mut ret = BTreeMap::new(); - - for key in bids_keys { - let account_hash = match key { - Key::Bid(account_ash) => account_ash, - _ => return Err(Error::InvalidKeyVariant), - }; - let bid = match provider.read_bid(&account_hash)? { - Some(bid) => bid, - None => return Err(Error::ValidatorNotFound), - }; - ret.insert(bid.validator_public_key().clone(), bid); - } - - Ok(ret) -} - -pub fn set_bids

(provider: &mut P, validators: Bids) -> Result<(), Error> -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - for (_, bid) in validators.into_iter() { - let account_hash = AccountHash::from(bid.validator_public_key()); - provider.write_bid(account_hash, bid)?; - } - Ok(()) -} - -pub fn get_unbonding_purses

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - let withdraws_keys = provider.get_keys(&KeyTag::Withdraw)?; - - let mut ret = BTreeMap::new(); - - for key in withdraws_keys { - let account_hash = match key { - Key::Withdraw(account_ash) => account_ash, - _ => return Err(Error::InvalidKeyVariant), - }; - let unbonding_purses = provider.read_withdraw(&account_hash)?; - ret.insert(account_hash, unbonding_purses); - } - - Ok(ret) -} - -pub fn set_unbonding_purses

( - provider: &mut P, - unbonding_purses: UnbondingPurses, -) -> Result<(), Error> -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - for (account_hash, unbonding_purses) in unbonding_purses.into_iter() { - provider.write_withdraw(account_hash, unbonding_purses)?; - } - Ok(()) -} - -pub fn get_era_id

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - Ok(read_from(provider, ERA_ID_KEY)?) -} - -pub fn set_era_id

(provider: &mut P, era_id: EraId) -> Result<(), Error> -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - write_to(provider, ERA_ID_KEY, era_id) -} - -pub fn get_era_end_timestamp_millis

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - Ok(read_from(provider, ERA_END_TIMESTAMP_MILLIS_KEY)?) -} - -pub fn set_era_end_timestamp_millis

( - provider: &mut P, - era_end_timestamp_millis: u64, -) -> Result<(), Error> -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - write_to( - provider, - ERA_END_TIMESTAMP_MILLIS_KEY, - era_end_timestamp_millis, - ) -} - -pub fn get_seigniorage_recipients_snapshot

( - provider: &mut P, -) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - let current_era_id = get_era_id(provider)?; - - let auction_delay = get_auction_delay(provider)?; - - let mut ret = BTreeMap::new(); - - for era_id in current_era_id.iter_inclusive(auction_delay) { - let recipient = match provider.read_era_validators(era_id)? { - Some(recipient) => recipient, - None => return Err(Error::ValidatorNotFound), - }; - ret.insert(era_id, recipient); - } - - Ok(ret) -} - -pub fn get_validator_slots

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - let validator_slots: u32 = read_from(provider, VALIDATOR_SLOTS_KEY)?; - let validator_slots = validator_slots - .try_into() - .map_err(|_| Error::InvalidValidatorSlotsValue)?; - Ok(validator_slots) -} - -pub fn get_auction_delay

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - let auction_delay: u64 = read_from(provider, AUCTION_DELAY_KEY)?; - Ok(auction_delay) -} - -fn get_unbonding_delay

(provider: &mut P) -> Result -where - P: StorageProvider + RuntimeProvider + ?Sized, -{ - read_from(provider, UNBONDING_DELAY_KEY) -} - -/// Iterates over unbonding entries and checks if a locked amount can be paid already if -/// a specific era is reached. -/// -/// This function can be called by the system only. -pub(crate) fn process_unbond_requests(provider: &mut P) -> Result<(), Error> { - if provider.get_caller() != PublicKey::System.to_account_hash() { - return Err(Error::InvalidCaller); - } - - // Update `unbonding_purses` data - let mut unbonding_purses: UnbondingPurses = get_unbonding_purses(provider)?; - - let current_era_id = provider.read_era_id()?; - - let unbonding_delay = get_unbonding_delay(provider)?; - - for unbonding_list in unbonding_purses.values_mut() { - let mut new_unbonding_list = Vec::new(); - for unbonding_purse in unbonding_list.iter() { - // Since `process_unbond_requests` is run before `run_auction`, we should check if - // current era id + unbonding delay is equal or greater than the `era_of_creation` that - // was calculated on `unbond` attempt. - if current_era_id >= unbonding_purse.era_of_creation() + unbonding_delay { - let account_hash = - AccountHash::from_public_key(unbonding_purse.unbonder_public_key(), |x| { - provider.blake2b(x) - }); - - // Move funds from bid purse to unbonding purse - provider - .transfer_purse_to_account( - *unbonding_purse.bonding_purse(), - account_hash, - *unbonding_purse.amount(), - ) - .map_err(|_| Error::TransferToUnbondingPurse)?; - } else { - new_unbonding_list.push(unbonding_purse.clone()); - } - } - *unbonding_list = new_unbonding_list; - } - - set_unbonding_purses(provider, unbonding_purses)?; - Ok(()) -} - -/// Creates a new purse in unbonding_purses given a validator's key, amount, and a destination -/// unbonding purse. Returns the amount of motes remaining in the validator's bid purse. -pub(crate) fn create_unbonding_purse( - provider: &mut P, - validator_public_key: PublicKey, - unbonder_public_key: PublicKey, - bonding_purse: URef, - amount: U512, -) -> Result<(), Error> { - if provider.get_balance(bonding_purse)?.unwrap_or_default() < amount { - return Err(Error::UnbondTooLarge); - } - - let validator_account_hash = AccountHash::from(&validator_public_key); - let mut unbonding_purses = provider.read_withdraw(&validator_account_hash)?; - let era_of_creation = provider.read_era_id()?; - let new_unbonding_purse = UnbondingPurse::new( - bonding_purse, - validator_public_key, - unbonder_public_key, - era_of_creation, - amount, - ); - unbonding_purses.push(new_unbonding_purse); - provider.write_withdraw(validator_account_hash, unbonding_purses)?; - - Ok(()) -} - -/// Reinvests delegator reward by increasing its stake. -pub fn reinvest_delegator_rewards

( - provider: &mut P, - seigniorage_allocations: &mut Vec, - validator_public_key: PublicKey, - rewards: impl Iterator)>, -) -> Result, Error> -where - P: StorageProvider, -{ - let mut delegator_payouts = Vec::new(); - - let validator_account_hash = AccountHash::from(&validator_public_key); - - let mut bid = match provider.read_bid(&validator_account_hash)? { - Some(bid) => bid, - None => return Err(Error::ValidatorNotFound), - }; - - let delegators = bid.delegators_mut(); - - for (delegator_key, delegator_reward) in rewards { - let delegator = match delegators.get_mut(&delegator_key) { - Some(delegator) => delegator, - None => continue, - }; - - let delegator_reward_trunc = delegator_reward.to_integer(); - - delegator.increase_stake(delegator_reward_trunc)?; - - delegator_payouts.push((delegator_reward_trunc, *delegator.bonding_purse())); - - let allocation = SeigniorageAllocation::delegator( - delegator_key, - validator_public_key.clone(), - delegator_reward_trunc, - ); - - seigniorage_allocations.push(allocation); - } - - provider.write_bid(validator_account_hash, bid)?; - - Ok(delegator_payouts) -} - -/// Reinvests validator reward by increasing its stake and returns its bonding purse. -pub fn reinvest_validator_reward

( - provider: &mut P, - seigniorage_allocations: &mut Vec, - validator_public_key: PublicKey, - amount: U512, -) -> Result -where - P: StorageProvider, -{ - let validator_account_hash = AccountHash::from(&validator_public_key); - - let mut bid = match provider.read_bid(&validator_account_hash)? { - Some(bid) => bid, - None => { - return Err(Error::ValidatorNotFound); - } - }; - - bid.increase_stake(amount)?; - - let allocation = SeigniorageAllocation::validator(validator_public_key, amount); - - seigniorage_allocations.push(allocation); - - let bonding_purse = *bid.bonding_purse(); - - provider.write_bid(validator_account_hash, bid)?; - - Ok(bonding_purse) -} diff --git a/types/src/system/auction/entry_points.rs b/types/src/system/auction/entry_points.rs new file mode 100644 index 0000000000..acd3279eb5 --- /dev/null +++ b/types/src/system/auction/entry_points.rs @@ -0,0 +1,202 @@ +use crate::{ + system::auction::{ + DelegationRate, ValidatorWeights, ARG_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, + ARG_DELEGATORS, ARG_ERA_END_TIMESTAMP_MILLIS, ARG_NEW_VALIDATOR, ARG_PUBLIC_KEY, + ARG_VALIDATOR, METHOD_ACTIVATE_BID, METHOD_ADD_BID, METHOD_DELEGATE, METHOD_DISTRIBUTE, + METHOD_GET_ERA_VALIDATORS, METHOD_READ_ERA_ID, METHOD_REDELEGATE, METHOD_RUN_AUCTION, + METHOD_SLASH, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID, + }, + CLType, CLTyped, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, + EntryPoints, Parameter, PublicKey, U512, +}; +use alloc::boxed::Box; + +use super::{ + DelegatorKind, Reservation, ARG_MAXIMUM_DELEGATION_AMOUNT, ARG_MINIMUM_DELEGATION_AMOUNT, + ARG_NEW_PUBLIC_KEY, ARG_RESERVATIONS, ARG_REWARDS_MAP, METHOD_ADD_RESERVATIONS, + METHOD_CANCEL_RESERVATIONS, METHOD_CHANGE_BID_PUBLIC_KEY, +}; + +/// Creates auction contract entry points. +pub fn auction_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntityEntryPoint::new( + METHOD_GET_ERA_VALIDATORS, + vec![], + Option::::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_ADD_BID, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_DELEGATION_RATE, DelegationRate::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + Parameter::new(ARG_MINIMUM_DELEGATION_AMOUNT, u64::cl_type()), + Parameter::new(ARG_MAXIMUM_DELEGATION_AMOUNT, u64::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_WITHDRAW_BID, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_DELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_UNDELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_REDELEGATE, + vec![ + Parameter::new(ARG_DELEGATOR, PublicKey::cl_type()), + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new(ARG_AMOUNT, U512::cl_type()), + Parameter::new(ARG_NEW_VALIDATOR, PublicKey::cl_type()), + ], + U512::cl_type(), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_RUN_AUCTION, + vec![Parameter::new(ARG_ERA_END_TIMESTAMP_MILLIS, u64::cl_type())], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_SLASH, + vec![], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_DISTRIBUTE, + vec![Parameter::new( + ARG_REWARDS_MAP, + CLType::map(CLType::PublicKey, CLType::U512), + )], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_READ_ERA_ID, + vec![], + CLType::U64, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_ACTIVATE_BID, + vec![Parameter::new(ARG_VALIDATOR, CLType::PublicKey)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_CHANGE_BID_PUBLIC_KEY, + vec![ + Parameter::new(ARG_PUBLIC_KEY, PublicKey::cl_type()), + Parameter::new(ARG_NEW_PUBLIC_KEY, PublicKey::cl_type()), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_ADD_RESERVATIONS, + vec![Parameter::new( + ARG_RESERVATIONS, + CLType::List(Box::new(Reservation::cl_type())), + )], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_CANCEL_RESERVATIONS, + vec![ + Parameter::new(ARG_VALIDATOR, PublicKey::cl_type()), + Parameter::new( + ARG_DELEGATORS, + CLType::List(Box::new(DelegatorKind::cl_type())), + ), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/types/src/system/auction/era_info.rs b/types/src/system/auction/era_info.rs index 410dd8d5b7..9405f97401 100644 --- a/types/src/system/auction/era_info.rs +++ b/types/src/system/auction/era_info.rs @@ -1,23 +1,25 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::{boxed::Box, vec::Vec}; -#[cfg(feature = "std")] +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use crate::{ bytesrepr::{self, FromBytes, ToBytes}, + system::auction::DelegatorKind, CLType, CLTyped, PublicKey, U512, }; const SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG: u8 = 0; const SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG: u8 = 1; +const SEIGNIORAGE_ALLOCATION_DELEGATOR_KIND_TAG: u8 = 2; /// Information about a seigniorage allocation #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub enum SeigniorageAllocation { /// Info about a seigniorage allocation for a validator @@ -36,6 +38,15 @@ pub enum SeigniorageAllocation { /// Allocated amount amount: U512, }, + /// Info about a seigniorage allocation for a delegator + DelegatorKind { + /// Delegator kind + delegator_kind: DelegatorKind, + /// Validator's public key + validator_public_key: PublicKey, + /// Allocated amount + amount: U512, + }, } impl SeigniorageAllocation { @@ -60,11 +71,25 @@ impl SeigniorageAllocation { } } + /// Constructs a [`SeigniorageAllocation::DelegatorKind`] + pub const fn delegator_kind( + delegator_kind: DelegatorKind, + validator_public_key: PublicKey, + amount: U512, + ) -> Self { + SeigniorageAllocation::DelegatorKind { + delegator_kind, + validator_public_key, + amount, + } + } + /// Returns the amount for a given seigniorage allocation pub fn amount(&self) -> &U512 { match self { - SeigniorageAllocation::Validator { amount, .. } => amount, - SeigniorageAllocation::Delegator { amount, .. } => amount, + SeigniorageAllocation::Validator { amount, .. } + | SeigniorageAllocation::Delegator { amount, .. } + | SeigniorageAllocation::DelegatorKind { amount, .. } => amount, } } @@ -72,6 +97,9 @@ impl SeigniorageAllocation { match self { SeigniorageAllocation::Validator { .. } => SEIGNIORAGE_ALLOCATION_VALIDATOR_TAG, SeigniorageAllocation::Delegator { .. } => SEIGNIORAGE_ALLOCATION_DELEGATOR_TAG, + SeigniorageAllocation::DelegatorKind { .. } => { + SEIGNIORAGE_ALLOCATION_DELEGATOR_KIND_TAG + } } } } @@ -79,25 +107,7 @@ impl SeigniorageAllocation { impl ToBytes for SeigniorageAllocation { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut buffer = bytesrepr::allocate_buffer(self)?; - buffer.append(&mut self.tag().to_bytes()?); - match self { - SeigniorageAllocation::Validator { - validator_public_key, - amount, - } => { - buffer.append(&mut validator_public_key.to_bytes()?); - buffer.append(&mut amount.to_bytes()?); - } - SeigniorageAllocation::Delegator { - delegator_public_key, - validator_public_key, - amount, - } => { - buffer.append(&mut delegator_public_key.to_bytes()?); - buffer.append(&mut validator_public_key.to_bytes()?); - buffer.append(&mut amount.to_bytes()?); - } - } + self.write_bytes(&mut buffer)?; Ok(buffer) } @@ -117,8 +127,49 @@ impl ToBytes for SeigniorageAllocation { + validator_public_key.serialized_length() + amount.serialized_length() } + SeigniorageAllocation::DelegatorKind { + delegator_kind, + validator_public_key, + amount, + } => { + delegator_kind.serialized_length() + + validator_public_key.serialized_length() + + amount.serialized_length() + } } } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag()); + match self { + SeigniorageAllocation::Validator { + validator_public_key, + amount, + } => { + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + SeigniorageAllocation::Delegator { + delegator_public_key, + validator_public_key, + amount, + } => { + delegator_public_key.write_bytes(writer)?; + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + SeigniorageAllocation::DelegatorKind { + delegator_kind, + validator_public_key, + amount, + } => { + delegator_kind.write_bytes(writer)?; + validator_public_key.write_bytes(writer)?; + amount.write_bytes(writer)?; + } + } + Ok(()) + } } impl FromBytes for SeigniorageAllocation { @@ -146,6 +197,19 @@ impl FromBytes for SeigniorageAllocation { rem, )) } + SEIGNIORAGE_ALLOCATION_DELEGATOR_KIND_TAG => { + let (delegator_kind, rem) = DelegatorKind::from_bytes(rem)?; + let (validator_public_key, rem) = PublicKey::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + Ok(( + SeigniorageAllocation::delegator_kind( + delegator_kind, + validator_public_key, + amount, + ), + rem, + )) + } _ => Err(bytesrepr::Error::Formatting), } } @@ -159,7 +223,8 @@ impl CLTyped for SeigniorageAllocation { /// Auction metadata. Intended to be recorded at each era. #[derive(Debug, Default, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub struct EraInfo { seigniorage_allocations: Vec, @@ -189,7 +254,7 @@ impl EraInfo { /// * If the match candidate is a validator allocation, the provided public key is matched /// against the validator public key. /// * If the match candidate is a delegator allocation, the provided public key is matched - /// against the delegator public key. + /// against the delegator public key if any. pub fn select(&self, public_key: PublicKey) -> impl Iterator { self.seigniorage_allocations .iter() @@ -202,18 +267,32 @@ impl EraInfo { delegator_public_key, .. } => public_key == *delegator_public_key, + SeigniorageAllocation::DelegatorKind { delegator_kind, .. } => { + if let DelegatorKind::PublicKey(delegator_public_key) = delegator_kind { + public_key == *delegator_public_key + } else { + false + } + } }) } } impl ToBytes for EraInfo { fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.seigniorage_allocations.to_bytes() + let mut result = bytesrepr::allocate_buffer(self)?; + self.seigniorage_allocations().write_bytes(&mut result)?; + Ok(result) } fn serialized_length(&self) -> usize { self.seigniorage_allocations.serialized_length() } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.seigniorage_allocations().write_bytes(writer)?; + Ok(()) + } } impl FromBytes for EraInfo { @@ -235,7 +314,7 @@ impl CLTyped for EraInfo { } /// Generators for [`SeigniorageAllocation`] and [`EraInfo`] -#[cfg(any(feature = "gens", test))] +#[cfg(any(feature = "testing", feature = "gens", test))] pub mod gens { use proptest::{ collection::{self, SizeRange}, @@ -245,7 +324,7 @@ pub mod gens { use crate::{ crypto::gens::public_key_arb, - gens::u512_arb, + gens::{delegator_kind_arb, u512_arb}, system::auction::{EraInfo, SeigniorageAllocation}, }; @@ -256,14 +335,14 @@ pub mod gens { } fn seigniorage_allocation_delegator_arb() -> impl Strategy { - (public_key_arb(), public_key_arb(), u512_arb()).prop_map( - |(delegator_public_key, validator_public_key, amount)| { - SeigniorageAllocation::delegator(delegator_public_key, validator_public_key, amount) + (delegator_kind_arb(), public_key_arb(), u512_arb()).prop_map( + |(delegator_kind, validator_public_key, amount)| { + SeigniorageAllocation::delegator_kind(delegator_kind, validator_public_key, amount) }, ) } - /// Creates an arbitrary [`SeignorageAllocation`](crate::system::auction::SeigniorageAllocation) + /// Creates an arbitrary [`SeignorageAllocation`](SeigniorageAllocation) pub fn seigniorage_allocation_arb() -> impl Strategy { prop_oneof![ seigniorage_allocation_validator_arb(), diff --git a/types/src/system/auction/error.rs b/types/src/system/auction/error.rs index 19b22507a1..62cb075435 100644 --- a/types/src/system/auction/error.rs +++ b/types/src/system/auction/error.rs @@ -1,167 +1,490 @@ -//! Home of the Auction contract's [`Error`] type. +//! Home of the Auction contract's [`enum@Error`] type. use alloc::vec::Vec; use core::{ convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, result, }; -#[cfg(feature = "std")] -use thiserror::Error; - use crate::{ bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, CLType, CLTyped, }; /// Errors which can occur while executing the Auction contract. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Error))] +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(test, derive(strum::EnumIter))] #[repr(u8)] +#[non_exhaustive] pub enum Error { /// Unable to find named key in the contract's named keys. - #[cfg_attr(feature = "std", error("Missing key"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(0, Error::MissingKey as u8); + /// ``` MissingKey = 0, - /// Given named key contains invalid variant. - #[cfg_attr(feature = "std", error("Invalid key variant"))] + /// Invalid key variant. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(1, Error::InvalidKeyVariant as u8); + /// ``` InvalidKeyVariant = 1, /// Value under an uref does not exist. This means the installer contract didn't work properly. - #[cfg_attr(feature = "std", error("Missing value"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(2, Error::MissingValue as u8); + /// ``` MissingValue = 2, /// ABI serialization issue while reading or writing. - #[cfg_attr(feature = "std", error("Serialization error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(3, Error::Serialization as u8); + /// ``` Serialization = 3, /// Triggered when contract was unable to transfer desired amount of tokens into a bid purse. - #[cfg_attr(feature = "std", error("Transfer to bid purse error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(4, Error::TransferToBidPurse as u8); + /// ``` TransferToBidPurse = 4, /// User passed invalid amount of tokens which might result in wrong values after calculation. - #[cfg_attr(feature = "std", error("Invalid amount"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(5, Error::InvalidAmount as u8); + /// ``` InvalidAmount = 5, /// Unable to find a bid by account hash in `active_bids` map. - #[cfg_attr(feature = "std", error("Bid not found"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(6, Error::BidNotFound as u8); + /// ``` BidNotFound = 6, - /// Validator's account hash was not found in the map. - #[cfg_attr(feature = "std", error("Validator not found"))] + /// Validator was not found in the map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(7, Error::ValidatorNotFound as u8); + /// ``` ValidatorNotFound = 7, - /// Delegator's account hash was not found in the map. - #[cfg_attr(feature = "std", error("Delegator not found"))] + /// Delegator was not found in the map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(8, Error::DelegatorNotFound as u8); + /// ``` DelegatorNotFound = 8, /// Storage problem. - #[cfg_attr(feature = "std", error("Storage error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(9, Error::Storage as u8); + /// ``` Storage = 9, /// Raised when system is unable to bond. - #[cfg_attr(feature = "std", error("Bonding error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(10, Error::Bonding as u8); + /// ``` Bonding = 10, /// Raised when system is unable to unbond. - #[cfg_attr(feature = "std", error("Unbonding error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(11, Error::Unbonding as u8); + /// ``` Unbonding = 11, /// Raised when Mint contract is unable to release founder stake. - #[cfg_attr(feature = "std", error("Unable to release founder stake"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(12, Error::ReleaseFounderStake as u8); + /// ``` ReleaseFounderStake = 12, /// Raised when the system is unable to determine purse balance. - #[cfg_attr(feature = "std", error("Unable to get purse balance"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(13, Error::GetBalance as u8); + /// ``` GetBalance = 13, /// Raised when an entry point is called from invalid account context. - #[cfg_attr(feature = "std", error("Invalid context"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(14, Error::InvalidContext as u8); + /// ``` InvalidContext = 14, /// Raised whenever a validator's funds are still locked in but an attempt to withdraw was /// made. - #[cfg_attr(feature = "std", error("Validator's funds are locked"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(15, Error::ValidatorFundsLocked as u8); + /// ``` ValidatorFundsLocked = 15, /// Raised when caller is not the system account. - #[cfg_attr(feature = "std", error("Function must be called by system account"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(16, Error::InvalidCaller as u8); + /// ``` InvalidCaller = 16, - /// Raised when function is supplied a public key that does match the caller's. - #[cfg_attr( - feature = "std", - error("Supplied public key does not match caller's public key") - )] + /// Raised when function is supplied a public key that does match the caller's or does not have + /// an associated account. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(17, Error::InvalidPublicKey as u8); + /// ``` InvalidPublicKey = 17, - /// Validator is not not bonded. - #[cfg_attr(feature = "std", error("Validator's bond not found"))] + /// Validator is not bonded. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(18, Error::BondNotFound as u8); + /// ``` BondNotFound = 18, /// Unable to create purse. - #[cfg_attr(feature = "std", error("Unable to create purse"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(19, Error::CreatePurseFailed as u8); + /// ``` CreatePurseFailed = 19, /// Attempted to unbond an amount which was too large. - #[cfg_attr(feature = "std", error("Unbond is too large"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(20, Error::UnbondTooLarge as u8); + /// ``` UnbondTooLarge = 20, /// Attempted to bond with a stake which was too small. - #[cfg_attr(feature = "std", error("Bond is too small"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(21, Error::BondTooSmall as u8); + /// ``` BondTooSmall = 21, /// Raised when rewards are to be distributed to delegators, but the validator has no /// delegations. - #[cfg_attr(feature = "std", error("Validators has not received any delegations"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(22, Error::MissingDelegations as u8); + /// ``` MissingDelegations = 22, /// The validators returned by the consensus component should match /// current era validators when distributing rewards. - #[cfg_attr( - feature = "std", - error("Mismatched era validator sets to distribute rewards") - )] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(23, Error::MismatchedEraValidators as u8); + /// ``` MismatchedEraValidators = 23, /// Failed to mint reward tokens. - #[cfg_attr(feature = "std", error("Failed to mint rewards"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(24, Error::MintReward as u8); + /// ``` MintReward = 24, /// Invalid number of validator slots. - #[cfg_attr(feature = "std", error("Invalid number of validator slots"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(25, Error::InvalidValidatorSlotsValue as u8); + /// ``` InvalidValidatorSlotsValue = 25, /// Failed to reduce total supply. - #[cfg_attr(feature = "std", error("Failed to reduce total supply"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(26, Error::MintReduceTotalSupply as u8); + /// ``` MintReduceTotalSupply = 26, /// Triggered when contract was unable to transfer desired amount of tokens into a delegators /// purse. - #[cfg_attr(feature = "std", error("Transfer to delegators purse error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(27, Error::TransferToDelegatorPurse as u8); + /// ``` TransferToDelegatorPurse = 27, /// Triggered when contract was unable to perform a transfer to distribute validators reward. - #[cfg_attr(feature = "std", error("Reward transfer to validator error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(28, Error::ValidatorRewardTransfer as u8); + /// ``` ValidatorRewardTransfer = 28, /// Triggered when contract was unable to perform a transfer to distribute delegators rewards. - #[cfg_attr(feature = "std", error("Rewards transfer to delegator error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(29, Error::DelegatorRewardTransfer as u8); + /// ``` DelegatorRewardTransfer = 29, /// Failed to transfer desired amount while withdrawing delegators reward. - #[cfg_attr(feature = "std", error("Withdraw delegator reward error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(30, Error::WithdrawDelegatorReward as u8); + /// ``` WithdrawDelegatorReward = 30, /// Failed to transfer desired amount while withdrawing validators reward. - #[cfg_attr(feature = "std", error("Withdraw validator reward error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(31, Error::WithdrawValidatorReward as u8); + /// ``` WithdrawValidatorReward = 31, /// Failed to transfer desired amount into unbonding purse. - #[cfg_attr(feature = "std", error("Transfer to unbonding purse error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(32, Error::TransferToUnbondingPurse as u8); + /// ``` TransferToUnbondingPurse = 32, /// Failed to record era info. - #[cfg_attr(feature = "std", error("Record era info error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(33, Error::RecordEraInfo as u8); + /// ``` RecordEraInfo = 33, /// Failed to create a [`crate::CLValue`]. - #[cfg_attr(feature = "std", error("CLValue error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(34, Error::CLValue as u8); + /// ``` CLValue = 34, /// Missing seigniorage recipients for given era. - #[cfg_attr(feature = "std", error("Missing seigniorage recipients for given era"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(35, Error::MissingSeigniorageRecipients as u8); + /// ``` MissingSeigniorageRecipients = 35, /// Failed to transfer funds. - #[cfg_attr(feature = "std", error("Transfer error"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(36, Error::Transfer as u8); + /// ``` Transfer = 36, /// Delegation rate exceeds rate. - #[cfg_attr(feature = "std", error("Delegation rate too large"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(37, Error::DelegationRateTooLarge as u8); + /// ``` DelegationRateTooLarge = 37, /// Raised whenever a delegator's funds are still locked in but an attempt to undelegate was /// made. - #[cfg_attr(feature = "std", error("Delegator's funds are locked"))] + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(38, Error::DelegatorFundsLocked as u8); + /// ``` DelegatorFundsLocked = 38, - - // NOTE: These variants below and related plumbing will be removed once support for WASM - // system contracts will be dropped. - #[doc(hidden)] - #[cfg_attr(feature = "std", error("GasLimit"))] - GasLimit, - - #[cfg(test)] - #[doc(hidden)] - #[cfg_attr(feature = "std", error("Sentinel error"))] - Sentinel, + /// An arithmetic overflow has occurred. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(39, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 39, + /// Execution exceeded the gas limit. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(40, Error::GasLimit as u8); + /// ``` + GasLimit = 40, + /// Too many frames on the runtime stack. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(41, Error::RuntimeStackOverflow as u8); + /// ``` + RuntimeStackOverflow = 41, + /// An error that is raised when there is an error in the mint contract that cannot + /// be mapped to a specific auction error. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(42, Error::MintError as u8); + /// ``` + MintError = 42, + /// The validator has exceeded the maximum amount of delegators allowed. + /// NOTE: This variant is no longer in use. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(43, Error::ExceededDelegatorSizeLimit as u8); + /// ``` + ExceededDelegatorSizeLimit = 43, + /// The global delegator capacity for the auction has been reached. + /// NOTE: This variant is no longer in use. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(44, Error::GlobalDelegatorCapacityReached as u8); + /// ``` + GlobalDelegatorCapacityReached = 44, + /// The delegated amount is below the minimum allowed. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(45, Error::DelegationAmountTooSmall as u8); + /// ``` + DelegationAmountTooSmall = 45, + /// Runtime stack error. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(46, Error::RuntimeStack as u8); + /// ``` + RuntimeStack = 46, + /// An error that is raised on private chain only when a `disable_auction_bids` flag is set to + /// `true`. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(47, Error::AuctionBidsDisabled as u8); + /// ``` + AuctionBidsDisabled = 47, + /// Error getting accumulation purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(48, Error::GetAccumulationPurse as u8); + /// ``` + GetAccumulationPurse = 48, + /// Failed to transfer desired amount into administrators account. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(49, Error::TransferToAdministrator as u8); + /// ``` + TransferToAdministrator = 49, + /// Failed to transfer desired amount into administrators account. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(50, Error::ForgedReference as u8); + /// ``` + ForgedReference = 50, + /// Unable to find purse. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(51, Error::MissingPurse as u8); + /// ``` + MissingPurse = 51, + /// Failed to transfer validator bid to new public key. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(52, Error::ValidatorBidExistsAlready as u8); + /// ``` + ValidatorBidExistsAlready = 52, + /// Failed to look up current validator bid + /// because it's public key has been changed + /// and bridge record chain is too long to follow. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(53, Error::BridgeRecordChainTooLong as u8); + /// ``` + BridgeRecordChainTooLong = 53, + /// Unexpected bid variant. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(54, Error::UnexpectedBidVariant as u8); + /// ``` + UnexpectedBidVariant = 54, + /// The delegated amount is above the maximum allowed. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(55, Error::DelegationAmountTooLarge as u8); + /// ``` + DelegationAmountTooLarge = 55, + /// Reservation was not found in the map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(56, Error::ReservationNotFound as u8); + /// ``` + ReservationNotFound = 56, + /// Validator exceeded allowed number of reserved delegator slots. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(57, Error::ExceededReservationSlotsLimit as u8); + /// ``` + ExceededReservationSlotsLimit = 57, + /// All reserved slots for validator are already occupied. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(58, Error::ExceededReservationsLimit as u8); + /// ``` + ExceededReservationsLimit = 58, + /// Reserved slots count is less than number of existing reservations. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(59, Error::ReservationSlotsCountTooSmall as u8); + /// ``` + ReservationSlotsCountTooSmall = 59, + /// Unexpected unbond variant. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(60, Error::UnexpectedUnbondVariant as u8); + /// ``` + UnexpectedUnbondVariant = 60, + /// Unexpected stored value variant. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(61, Error::UnexpectedStoredValueVariant as u8); + /// ``` + UnexpectedStoredValueVariant = 61, + /// Redelegation validator was not found in the map. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(62, Error::RedelegationValidatorNotFound as u8); + /// ``` + RedelegationValidatorNotFound = 62, + /// Certain operations are not permitted on bid records during vesting periods. + /// ``` + /// # use casper_types::system::auction::Error; + /// assert_eq!(63, Error::VestingLockout as u8); + /// ``` + VestingLockout = 63, } -/// Used for testing; this should be guaranteed to be the maximum valid value of [`Error`] enum. -#[cfg(test)] -const MAX_ERROR_VALUE: u8 = Error::Sentinel as u8; +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::MissingKey => formatter.write_str("Missing key"), + Error::InvalidKeyVariant => formatter.write_str("Invalid key variant"), + Error::MissingValue => formatter.write_str("Missing value"), + Error::Serialization => formatter.write_str("Serialization error"), + Error::TransferToBidPurse => formatter.write_str("Transfer to bid purse error"), + Error::InvalidAmount => formatter.write_str("Invalid amount"), + Error::BidNotFound => formatter.write_str("Bid not found"), + Error::ValidatorNotFound => formatter.write_str("Validator not found"), + Error::DelegatorNotFound => formatter.write_str("Delegator not found"), + Error::Storage => formatter.write_str("Storage error"), + Error::Bonding => formatter.write_str("Bonding error"), + Error::Unbonding => formatter.write_str("Unbonding error"), + Error::ReleaseFounderStake => formatter.write_str("Unable to release founder stake"), + Error::GetBalance => formatter.write_str("Unable to get purse balance"), + Error::InvalidContext => formatter.write_str("Invalid context"), + Error::ValidatorFundsLocked => formatter.write_str("Validator's funds are locked"), + Error::InvalidCaller => formatter.write_str("Function must be called by system account"), + Error::InvalidPublicKey => formatter.write_str("Supplied public key does not match caller's public key or has no associated account"), + Error::BondNotFound => formatter.write_str("Validator's bond not found"), + Error::CreatePurseFailed => formatter.write_str("Unable to create purse"), + Error::UnbondTooLarge => formatter.write_str("Unbond is too large"), + Error::BondTooSmall => formatter.write_str("Bond is too small"), + Error::MissingDelegations => formatter.write_str("Validators has not received any delegations"), + Error::MismatchedEraValidators => formatter.write_str("Mismatched era validator sets to distribute rewards"), + Error::MintReward => formatter.write_str("Failed to mint rewards"), + Error::InvalidValidatorSlotsValue => formatter.write_str("Invalid number of validator slots"), + Error::MintReduceTotalSupply => formatter.write_str("Failed to reduce total supply"), + Error::TransferToDelegatorPurse => formatter.write_str("Transfer to delegators purse error"), + Error::ValidatorRewardTransfer => formatter.write_str("Reward transfer to validator error"), + Error::DelegatorRewardTransfer => formatter.write_str("Rewards transfer to delegator error"), + Error::WithdrawDelegatorReward => formatter.write_str("Withdraw delegator reward error"), + Error::WithdrawValidatorReward => formatter.write_str("Withdraw validator reward error"), + Error::TransferToUnbondingPurse => formatter.write_str("Transfer to unbonding purse error"), + Error::RecordEraInfo => formatter.write_str("Record era info error"), + Error::CLValue => formatter.write_str("CLValue error"), + Error::MissingSeigniorageRecipients => formatter.write_str("Missing seigniorage recipients for given era"), + Error::Transfer => formatter.write_str("Transfer error"), + Error::DelegationRateTooLarge => formatter.write_str("Delegation rate too large"), + Error::DelegatorFundsLocked => formatter.write_str("Delegator's funds are locked"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow"), + Error::GasLimit => formatter.write_str("Execution exceeded the gas limit"), + Error::RuntimeStackOverflow => formatter.write_str("Runtime stack overflow"), + Error::MintError => formatter.write_str("An error in the mint contract execution"), + Error::ExceededDelegatorSizeLimit => formatter.write_str("The amount of delegators per validator has been exceeded"), + Error::GlobalDelegatorCapacityReached => formatter.write_str("The global delegator capacity has been reached"), + Error::DelegationAmountTooSmall => formatter.write_str("The delegated amount is below the minimum allowed"), + Error::RuntimeStack => formatter.write_str("Runtime stack error"), + Error::AuctionBidsDisabled => formatter.write_str("Auction bids are disabled"), + Error::GetAccumulationPurse => formatter.write_str("Get accumulation purse error"), + Error::TransferToAdministrator => formatter.write_str("Transfer to administrator error"), + Error::ForgedReference => formatter.write_str("Forged reference"), + Error::MissingPurse => formatter.write_str("Missing purse"), + Error::ValidatorBidExistsAlready => formatter.write_str("Validator bid with given public key already exists"), + Error::BridgeRecordChainTooLong => formatter.write_str("Bridge record chain is too long to find current validator bid"), + Error::UnexpectedBidVariant => formatter.write_str("Unexpected bid variant"), + Error::DelegationAmountTooLarge => formatter.write_str("The delegated amount is above the maximum allowed"), + Error::ReservationNotFound => formatter.write_str("Reservation not found"), + Error::ExceededReservationSlotsLimit => formatter.write_str("Validator exceeded allowed number of reserved delegator slots"), + Error::ExceededReservationsLimit => formatter.write_str("All reserved slots for validator are already occupied"), + Error::ReservationSlotsCountTooSmall => formatter.write_str("Reserved slots count is less than number of existing reservations"), + Error::UnexpectedUnbondVariant => formatter.write_str("Unexpected unbond variant"), + Error::UnexpectedStoredValueVariant => formatter.write_str("Unexpected stored value variant"), + Error::RedelegationValidatorNotFound => formatter.write_str("Redelegation validator not found"), + Error::VestingLockout => formatter.write_str("Cannot perform attempted action during vesting periods"), + } + } +} impl CLTyped for Error { fn cl_type() -> CLType { @@ -171,6 +494,7 @@ impl CLTyped for Error { // This error type is not intended to be used by third party crates. #[doc(hidden)] +#[derive(Debug, PartialEq, Eq)] pub struct TryFromU8ForError(()); // This conversion is not intended to be used by third party crates. @@ -205,11 +529,10 @@ impl TryFrom for Error { d if d == Error::MissingDelegations as u8 => Ok(Error::MissingDelegations), d if d == Error::MismatchedEraValidators as u8 => Ok(Error::MismatchedEraValidators), d if d == Error::MintReward as u8 => Ok(Error::MintReward), - d if d == Error::MintReduceTotalSupply as u8 => Ok(Error::MintReduceTotalSupply), d if d == Error::InvalidValidatorSlotsValue as u8 => { Ok(Error::InvalidValidatorSlotsValue) } - d if d == Error::TransferToDelegatorPurse as u8 => Ok(Error::TransferToDelegatorPurse), + d if d == Error::MintReduceTotalSupply as u8 => Ok(Error::MintReduceTotalSupply), d if d == Error::TransferToDelegatorPurse as u8 => Ok(Error::TransferToDelegatorPurse), d if d == Error::ValidatorRewardTransfer as u8 => Ok(Error::ValidatorRewardTransfer), d if d == Error::DelegatorRewardTransfer as u8 => Ok(Error::DelegatorRewardTransfer), @@ -225,7 +548,47 @@ impl TryFrom for Error { d if d == Error::Transfer as u8 => Ok(Error::Transfer), d if d == Error::DelegationRateTooLarge as u8 => Ok(Error::DelegationRateTooLarge), d if d == Error::DelegatorFundsLocked as u8 => Ok(Error::DelegatorFundsLocked), + d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow), d if d == Error::GasLimit as u8 => Ok(Error::GasLimit), + d if d == Error::RuntimeStackOverflow as u8 => Ok(Error::RuntimeStackOverflow), + d if d == Error::MintError as u8 => Ok(Error::MintError), + d if d == Error::ExceededDelegatorSizeLimit as u8 => { + Ok(Error::ExceededDelegatorSizeLimit) + } + d if d == Error::GlobalDelegatorCapacityReached as u8 => { + Ok(Error::GlobalDelegatorCapacityReached) + } + d if d == Error::DelegationAmountTooSmall as u8 => Ok(Error::DelegationAmountTooSmall), + d if d == Error::RuntimeStack as u8 => Ok(Error::RuntimeStack), + d if d == Error::AuctionBidsDisabled as u8 => Ok(Error::AuctionBidsDisabled), + d if d == Error::GetAccumulationPurse as u8 => Ok(Error::GetAccumulationPurse), + d if d == Error::TransferToAdministrator as u8 => Ok(Error::TransferToAdministrator), + d if d == Error::ForgedReference as u8 => Ok(Error::ForgedReference), + d if d == Error::MissingPurse as u8 => Ok(Error::MissingPurse), + d if d == Error::ValidatorBidExistsAlready as u8 => { + Ok(Error::ValidatorBidExistsAlready) + } + d if d == Error::BridgeRecordChainTooLong as u8 => Ok(Error::BridgeRecordChainTooLong), + d if d == Error::UnexpectedBidVariant as u8 => Ok(Error::UnexpectedBidVariant), + d if d == Error::DelegationAmountTooLarge as u8 => Ok(Error::DelegationAmountTooLarge), + d if d == Error::ReservationNotFound as u8 => Ok(Error::ReservationNotFound), + d if d == Error::ExceededReservationSlotsLimit as u8 => { + Ok(Error::ExceededReservationSlotsLimit) + } + d if d == Error::ExceededReservationsLimit as u8 => { + Ok(Error::ExceededReservationsLimit) + } + d if d == Error::ReservationSlotsCountTooSmall as u8 => { + Ok(Error::ReservationSlotsCountTooSmall) + } + d if d == Error::UnexpectedUnbondVariant as u8 => Ok(Error::UnexpectedUnbondVariant), + d if d == Error::UnexpectedStoredValueVariant as u8 => { + Ok(Error::UnexpectedStoredValueVariant) + } + d if d == Error::RedelegationValidatorNotFound as u8 => { + Ok(Error::RedelegationValidatorNotFound) + } + d if d == Error::VestingLockout as u8 => Ok(Error::VestingLockout), _ => Err(TryFromU8ForError(())), } } @@ -248,7 +611,7 @@ impl FromBytes for Error { let error: Error = value .try_into() // In case an Error variant is unable to be determined it would return an - // Error::Formatting as if its unable to be correctly deserialized. + // Error::Formatting as if it's unable to be correctly deserialized. .map_err(|_| bytesrepr::Error::Formatting)?; Ok((error, rem)) } @@ -270,7 +633,7 @@ pub enum PurseLookupError { impl From for Error { fn from(error: PurseLookupError) -> Self { match error { - PurseLookupError::KeyNotFound => Error::MissingKey, + PurseLookupError::KeyNotFound => Error::MissingPurse, PurseLookupError::KeyUnexpectedType => Error::InvalidKeyVariant, } } @@ -278,24 +641,29 @@ impl From for Error { #[cfg(test)] mod tests { - use std::convert::TryFrom; + use strum::IntoEnumIterator; - use super::{Error, TryFromU8ForError, MAX_ERROR_VALUE}; + use super::Error; #[test] - fn error_round_trips() { - for i in 0..=u8::max_value() { - match Error::try_from(i) { - Ok(error) if i < MAX_ERROR_VALUE => assert_eq!(error as u8, i), - Ok(error) => panic!( - "value of variant {:?} ({}) exceeds MAX_ERROR_VALUE ({})", - error, i, MAX_ERROR_VALUE - ), - Err(TryFromU8ForError(())) if i >= MAX_ERROR_VALUE => (), - Err(TryFromU8ForError(())) => { - panic!("missing conversion from u8 to error value: {}", i) + fn error_forward_trips() { + for expected_error_variant in Error::iter() { + assert_eq!( + Error::try_from(expected_error_variant as u8), + Ok(expected_error_variant) + ) + } + } + + #[test] + fn error_backward_trips() { + for u8 in 0..=u8::MAX { + match Error::try_from(u8) { + Ok(error_variant) => { + assert_eq!(u8, error_variant as u8, "Error code mismatch") } - } + Err(_) => continue, + }; } } } diff --git a/types/src/system/auction/mod.rs b/types/src/system/auction/mod.rs deleted file mode 100644 index eb947f2eee..0000000000 --- a/types/src/system/auction/mod.rs +++ /dev/null @@ -1,572 +0,0 @@ -//! Contains implementation of a Auction contract functionality. -mod bid; -mod constants; -mod delegator; -mod detail; -mod era_info; -mod error; -mod providers; -mod seigniorage_recipient; -mod unbonding_purse; - -use alloc::{collections::BTreeMap, vec::Vec}; - -use num_rational::Ratio; - -use crate::{account::AccountHash, EraId, PublicKey, U512}; - -pub use bid::Bid; -pub use constants::*; -pub use delegator::Delegator; -pub use era_info::*; -pub use error::Error; -pub use providers::{ - AccountProvider, MintProvider, RuntimeProvider, StorageProvider, SystemProvider, -}; -pub use seigniorage_recipient::SeigniorageRecipient; -pub use unbonding_purse::UnbondingPurse; - -/// Representation of delegation rate of tokens. Range from 0..=100. -pub type DelegationRate = u8; - -/// Validators mapped to their bids. -pub type Bids = BTreeMap; - -/// Weights of validators. "Weight" in this context means a sum of their stakes. -pub type ValidatorWeights = BTreeMap; - -/// List of era validators -pub type EraValidators = BTreeMap; - -/// Collection of seigniorage recipients. -pub type SeigniorageRecipients = BTreeMap; - -/// Snapshot of `SeigniorageRecipients` for a given era. -pub type SeigniorageRecipientsSnapshot = BTreeMap; - -/// Validators and delegators mapped to their unbonding purses. -pub type UnbondingPurses = BTreeMap>; - -/// Bonding auction contract interface -pub trait Auction: - StorageProvider + SystemProvider + RuntimeProvider + MintProvider + AccountProvider + Sized -{ - /// Returns era_validators. - /// - /// Publicly accessible, but intended for periodic use by the Handle Payment contract to update - /// its own internal data structures recording current and past winners. - fn get_era_validators(&mut self) -> Result { - let snapshot = detail::get_seigniorage_recipients_snapshot(self)?; - let era_validators = snapshot - .into_iter() - .map(|(era_id, recipients)| { - let validator_weights = recipients - .into_iter() - .map(|(public_key, bid)| (public_key, bid.total_stake())) - .collect::(); - (era_id, validator_weights) - }) - .collect::>(); - Ok(era_validators) - } - - /// Returns validators in era_validators, mapped to their bids or founding stakes, delegation - /// rates and lists of delegators together with their delegated quantities from delegators. - /// This function is publicly accessible, but intended for system use by the Handle Payment - /// contract, because this data is necessary for distributing seigniorage. - fn read_seigniorage_recipients(&mut self) -> Result { - // `era_validators` are assumed to be computed already by calling "run_auction" entrypoint. - let era_index = detail::get_era_id(self)?; - match self.read_era_validators(era_index)? { - Some(seigniorage_recipients) => Ok(seigniorage_recipients), - None => Err(Error::MissingSeigniorageRecipients), - } - } - - /// For a non-founder validator, this adds, or modifies, an entry in the `bids` collection and - /// calls `bond` in the Mint contract to create (or top off) a bid purse. It also adjusts the - /// delegation rate. - fn add_bid( - &mut self, - public_key: PublicKey, - delegation_rate: DelegationRate, - amount: U512, - ) -> Result { - let account_hash = AccountHash::from_public_key(&public_key, |x| self.blake2b(x)); - if self.get_caller() != account_hash { - return Err(Error::InvalidPublicKey); - } - - if amount.is_zero() { - return Err(Error::BondTooSmall); - } - - if delegation_rate > DELEGATION_RATE_DENOMINATOR { - return Err(Error::DelegationRateTooLarge); - } - - let source = self.get_main_purse()?; - - let account_hash = AccountHash::from(&public_key); - - // Update bids or stakes - let updated_amount = match self.read_bid(&account_hash)? { - Some(mut bid) => { - if bid.inactive() { - bid.activate(); - } - self.transfer_purse_to_purse(source, *bid.bonding_purse(), amount) - .map_err(|_| Error::TransferToBidPurse)?; - let updated_amount = bid - .with_delegation_rate(delegation_rate) - .increase_stake(amount)?; - self.write_bid(account_hash, bid)?; - updated_amount - } - None => { - let bonding_purse = self.create_purse()?; - self.transfer_purse_to_purse(source, bonding_purse, amount) - .map_err(|_| Error::TransferToBidPurse)?; - let bid = Bid::unlocked(public_key, bonding_purse, amount, delegation_rate); - self.write_bid(account_hash, bid)?; - amount - } - }; - - Ok(updated_amount) - } - - /// For a non-founder validator, implements essentially the same logic as add_bid, but reducing - /// the number of tokens and calling unbond in lieu of bond. - /// - /// For a founding validator, this function first checks whether they are released, and fails - /// if they are not. - /// - /// The function returns a the new amount of motes remaining in the bid. If the target bid - /// does not exist, the function call returns an error. - fn withdraw_bid(&mut self, public_key: PublicKey, amount: U512) -> Result { - let account_hash = AccountHash::from_public_key(&public_key, |x| self.blake2b(x)); - if self.get_caller() != account_hash { - return Err(Error::InvalidPublicKey); - } - - let mut bid = self - .read_bid(&account_hash)? - .ok_or(Error::ValidatorNotFound)?; - - let era_end_timestamp_millis = detail::get_era_end_timestamp_millis(self)?; - - // Fails if requested amount is greater than either the total stake or the amount of vested - // stake. - let updated_stake = bid.decrease_stake(amount, era_end_timestamp_millis)?; - - detail::create_unbonding_purse( - self, - public_key.clone(), - public_key.clone(), // validator is the unbonder - *bid.bonding_purse(), - amount, - )?; - - if updated_stake.is_zero() { - // Automatically unbond delegators - for (delegator_public_key, delegator) in bid.delegators() { - detail::create_unbonding_purse( - self, - public_key.clone(), - delegator_public_key.clone(), - *delegator.bonding_purse(), - *delegator.staked_amount(), - )?; - } - - *bid.delegators_mut() = BTreeMap::new(); - - bid.deactivate(); - } - - self.write_bid(account_hash, bid)?; - - Ok(updated_stake) - } - - /// Adds a new delegator to delegators, or tops off a current one. If the target validator is - /// not in founders, the function call returns an error and does nothing. - /// - /// The function calls bond in the Mint contract to transfer motes to the validator's purse and - /// returns a tuple of that purse and the amount of motes contained in it after the transfer. - fn delegate( - &mut self, - delegator_public_key: PublicKey, - validator_public_key: PublicKey, - amount: U512, - ) -> Result { - let account_hash = AccountHash::from_public_key(&delegator_public_key, |x| self.blake2b(x)); - if self.get_caller() != account_hash { - return Err(Error::InvalidPublicKey); - } - - if amount.is_zero() { - return Err(Error::BondTooSmall); - } - - let source = self.get_main_purse()?; - - let validator_account_hash = AccountHash::from(&validator_public_key); - - let mut bid = match self.read_bid(&validator_account_hash)? { - Some(bid) => bid, - None => { - // Return early if target validator is not in `bids` - return Err(Error::ValidatorNotFound); - } - }; - - let delegators = bid.delegators_mut(); - - let new_delegation_amount = match delegators.get_mut(&delegator_public_key) { - Some(delegator) => { - self.transfer_purse_to_purse(source, *delegator.bonding_purse(), amount) - .map_err(|_| Error::TransferToDelegatorPurse)?; - delegator.increase_stake(amount)?; - *delegator.staked_amount() - } - None => { - let bonding_purse = self.create_purse()?; - self.transfer_purse_to_purse(source, bonding_purse, amount) - .map_err(|_| Error::TransferToDelegatorPurse)?; - let delegator = Delegator::unlocked( - delegator_public_key.clone(), - amount, - bonding_purse, - validator_public_key, - ); - delegators.insert(delegator_public_key.clone(), delegator); - amount - } - }; - - self.write_bid(validator_account_hash, bid)?; - - Ok(new_delegation_amount) - } - - /// Removes an amount of motes (or the entry altogether, if the remaining amount is 0) from - /// the entry in delegators and calls unbond in the Mint contract to create a new unbonding - /// purse. - /// - /// The arguments are the delegator’s key, the validator key and quantity of motes and - /// returns a tuple of the unbonding purse along with the remaining bid amount. - fn undelegate( - &mut self, - delegator_public_key: PublicKey, - validator_public_key: PublicKey, - amount: U512, - ) -> Result { - let account_hash = AccountHash::from_public_key(&delegator_public_key, |x| self.blake2b(x)); - if self.get_caller() != account_hash { - return Err(Error::InvalidPublicKey); - } - - let validator_account_hash = AccountHash::from(&validator_public_key); - let mut bid = match self.read_bid(&validator_account_hash)? { - Some(bid) => bid, - None => return Err(Error::ValidatorNotFound), - }; - - let delegators = bid.delegators_mut(); - - let new_amount = match delegators.get_mut(&delegator_public_key) { - Some(delegator) => { - detail::create_unbonding_purse( - self, - validator_public_key, - delegator_public_key.clone(), - *delegator.bonding_purse(), - amount, - )?; - - let era_end_timestamp_millis = detail::get_era_end_timestamp_millis(self)?; - let updated_stake = delegator.decrease_stake(amount, era_end_timestamp_millis)?; - if updated_stake == U512::zero() { - delegators.remove(&delegator_public_key); - }; - updated_stake - } - None => return Err(Error::DelegatorNotFound), - }; - - self.write_bid(validator_account_hash, bid)?; - - Ok(new_amount) - } - - /// Slashes each validator. - /// - /// This can be only invoked through a system call. - fn slash(&mut self, validator_public_keys: Vec) -> Result<(), Error> { - if self.get_caller() != PublicKey::System.to_account_hash() { - return Err(Error::InvalidCaller); - } - - let mut burned_amount: U512 = U512::zero(); - - for validator_public_key in validator_public_keys { - // Burn stake, deactivate - let validator_account_hash = AccountHash::from(&validator_public_key); - if let Some(mut bid) = self.read_bid(&validator_account_hash)? { - burned_amount += *bid.staked_amount(); - *bid.staked_amount_mut() = U512::zero(); - bid.deactivate(); - // Reset delegator stakes when deactivating validator bid. - for delegator in bid.delegators_mut().values_mut() { - *delegator.staked_amount_mut() = U512::zero(); - } - self.write_bid(validator_account_hash, bid)?; - }; - - let validator_account_hash = AccountHash::from(&validator_public_key); - // Update unbonding entries for given validator - let unbonding_purses = self.read_withdraw(&validator_account_hash)?; - if !unbonding_purses.is_empty() { - burned_amount += unbonding_purses - .into_iter() - .map(|unbonding_purse| *unbonding_purse.amount()) - .sum(); - self.write_withdraw(validator_account_hash, Vec::new())?; - } - } - - self.reduce_total_supply(burned_amount)?; - - Ok(()) - } - - /// Takes active_bids and delegators to construct a list of validators' total bids (their own - /// added to their delegators') ordered by size from largest to smallest, then takes the top N - /// (number of auction slots) bidders and replaces era_validators with these. - /// - /// Accessed by: node - fn run_auction( - &mut self, - era_end_timestamp_millis: u64, - evicted_validators: Vec, - ) -> Result<(), Error> { - if self.get_caller() != PublicKey::System.to_account_hash() { - return Err(Error::InvalidCaller); - } - - let validator_slots = detail::get_validator_slots(self)?; - let auction_delay = detail::get_auction_delay(self)?; - let mut era_id = detail::get_era_id(self)?; - let mut bids = detail::get_bids(self)?; - - // Process unbond requests - detail::process_unbond_requests(self)?; - - // Process bids - let mut bids_modified = false; - for (validator_public_key, bid) in bids.iter_mut() { - if bid.process(era_end_timestamp_millis) { - bids_modified = true; - } - - if evicted_validators.contains(validator_public_key) { - bids_modified = bid.deactivate() - } - } - - // Compute next auction winners - let winners: ValidatorWeights = { - let founder_weights: ValidatorWeights = bids - .iter() - .filter(|(_public_key, bid)| bid.vesting_schedule().is_some() && !bid.inactive()) - .map(|(public_key, bid)| { - let total_staked_amount = bid.total_staked_amount()?; - Ok((public_key.clone(), total_staked_amount)) - }) - .collect::>()?; - - // We collect these into a vec for sorting - let mut non_founder_weights: Vec<(PublicKey, U512)> = bids - .iter() - .filter(|(_public_key, bid)| bid.vesting_schedule().is_none() && !bid.inactive()) - .map(|(public_key, bid)| { - let total_staked_amount = bid.total_staked_amount()?; - Ok((public_key.clone(), total_staked_amount)) - }) - .collect::, Error>>()?; - - non_founder_weights.sort_by(|(_, lhs), (_, rhs)| rhs.cmp(lhs)); - - let remaining_auction_slots = validator_slots.saturating_sub(founder_weights.len()); - - founder_weights - .into_iter() - .chain( - non_founder_weights - .into_iter() - .take(remaining_auction_slots), - ) - .collect() - }; - - // Increment era - era_id += 1; - - let delayed_era = era_id + auction_delay; - - // Update seigniorage recipients for current era - { - let mut recipients = SeigniorageRecipients::new(); - - for era_validator in winners.keys() { - let seigniorage_recipient = match bids.get(era_validator) { - Some(bid) => bid.into(), - None => return Err(Error::BidNotFound), - }; - recipients.insert(era_validator.clone(), seigniorage_recipient); - } - - self.write_era_validators(delayed_era, recipients)?; - } - - detail::set_era_id(self, era_id)?; - detail::set_era_end_timestamp_millis(self, era_end_timestamp_millis)?; - - if bids_modified { - detail::set_bids(self, bids)?; - } - - Ok(()) - } - - /// Mint and distribute seigniorage rewards to validators and their delegators, - /// according to `reward_factors` returned by the consensus component. - fn distribute(&mut self, reward_factors: BTreeMap) -> Result<(), Error> { - if self.get_caller() != PublicKey::System.to_account_hash() { - return Err(Error::InvalidCaller); - } - - let seigniorage_recipients = self.read_seigniorage_recipients()?; - let base_round_reward = self.read_base_round_reward()?; - let era_id = detail::get_era_id(self)?; - - if reward_factors.keys().ne(seigniorage_recipients.keys()) { - return Err(Error::MismatchedEraValidators); - } - - let mut era_info = EraInfo::new(); - let mut seigniorage_allocations = era_info.seigniorage_allocations_mut(); - - for (public_key, reward_factor) in reward_factors { - let recipient = seigniorage_recipients - .get(&public_key) - .ok_or(Error::ValidatorNotFound)?; - - let total_stake = recipient.total_stake(); - if total_stake.is_zero() { - // TODO: error? - continue; - } - - let total_reward: Ratio = { - let reward_rate = Ratio::new(U512::from(reward_factor), U512::from(BLOCK_REWARD)); - reward_rate * base_round_reward - }; - - let delegator_total_stake: U512 = recipient.delegator_total_stake(); - - let delegators_part: Ratio = { - let commission_rate = Ratio::new( - U512::from(*recipient.delegation_rate()), - U512::from(DELEGATION_RATE_DENOMINATOR), - ); - let reward_multiplier: Ratio = Ratio::new(delegator_total_stake, total_stake); - let delegator_reward: Ratio = total_reward * reward_multiplier; - let commission: Ratio = delegator_reward * commission_rate; - delegator_reward - commission - }; - - let delegator_rewards = - recipient - .delegator_stake() - .iter() - .map(|(delegator_key, delegator_stake)| { - let reward_multiplier = Ratio::new(*delegator_stake, delegator_total_stake); - let reward = delegators_part * reward_multiplier; - (delegator_key.clone(), reward) - }); - let delegator_payouts = detail::reinvest_delegator_rewards( - self, - &mut seigniorage_allocations, - public_key.clone(), - delegator_rewards, - )?; - let total_delegator_payout = delegator_payouts - .iter() - .map(|(amount, _bonding_purse)| *amount) - .sum(); - - let validators_part: Ratio = total_reward - Ratio::from(total_delegator_payout); - let validator_reward = validators_part.to_integer(); - let validator_bonding_purse = detail::reinvest_validator_reward( - self, - &mut seigniorage_allocations, - public_key.clone(), - validator_reward, - )?; - // TODO: add "mint into existing purse" facility - let tmp_validator_reward_purse = - self.mint(validator_reward).map_err(|_| Error::MintReward)?; - self.transfer_purse_to_purse( - tmp_validator_reward_purse, - validator_bonding_purse, - validator_reward, - ) - .map_err(|_| Error::ValidatorRewardTransfer)?; - - // TODO: add "mint into existing purse" facility - let tmp_delegator_reward_purse = self - .mint(total_delegator_payout) - .map_err(|_| Error::MintReward)?; - for (delegator_payout, bonding_purse) in delegator_payouts { - self.transfer_purse_to_purse( - tmp_delegator_reward_purse, - bonding_purse, - delegator_payout, - ) - .map_err(|_| Error::DelegatorRewardTransfer)?; - } - } - - self.record_era_info(era_id, era_info)?; - - Ok(()) - } - - /// Reads current era id. - fn read_era_id(&mut self) -> Result { - detail::get_era_id(self) - } - - /// Activates a given validator's bid. To be used when a validator has been marked as inactive - /// by consensus (aka "evicted"). - fn activate_bid(&mut self, validator_public_key: PublicKey) -> Result<(), Error> { - let account_hash = AccountHash::from_public_key(&validator_public_key, |x| self.blake2b(x)); - if self.get_caller() != account_hash { - return Err(Error::InvalidPublicKey); - } - - let mut bid = match self.read_bid(&account_hash)? { - Some(bid) => bid, - None => return Err(Error::ValidatorNotFound), - }; - - bid.activate(); - - self.write_bid(account_hash, bid)?; - - Ok(()) - } -} diff --git a/types/src/system/auction/providers.rs b/types/src/system/auction/providers.rs deleted file mode 100644 index d68ddf188d..0000000000 --- a/types/src/system/auction/providers.rs +++ /dev/null @@ -1,120 +0,0 @@ -use alloc::{collections::BTreeSet, vec::Vec}; - -use crate::{ - account::AccountHash, - bytesrepr::{FromBytes, ToBytes}, - system::auction::{Bid, EraId, EraInfo, Error, SeigniorageRecipients, UnbondingPurse}, - CLTyped, Key, KeyTag, TransferredTo, URef, BLAKE2B_DIGEST_LENGTH, U512, -}; - -/// Provider of runtime host functionality. -pub trait RuntimeProvider { - /// This method should return the caller of the current context. - fn get_caller(&self) -> AccountHash; - - /// Gets named key under a `name`. - fn named_keys_get(&self, name: &str) -> Option; - - /// Gets keys in a given keyspace - fn get_keys(&mut self, key_tag: &KeyTag) -> Result, Error>; - - /// Returns a 32-byte BLAKE2b digest - fn blake2b>(&self, data: T) -> [u8; BLAKE2B_DIGEST_LENGTH]; -} - -/// Provides functionality of a contract storage. -pub trait StorageProvider { - /// Reads data from [`URef`]. - fn read(&mut self, uref: URef) -> Result, Error>; - - /// Writes data to [`URef]. - fn write(&mut self, uref: URef, value: T) -> Result<(), Error>; - - /// Reads [`Bid`] at account hash derived from given public key - fn read_bid(&mut self, account_hash: &AccountHash) -> Result, Error>; - - /// Writes given [`Bid`] at account hash derived from given public key - fn write_bid(&mut self, account_hash: AccountHash, bid: Bid) -> Result<(), Error>; - - /// Reads collection of [`UnbondingPurse`]s at account hash derived from given public key - fn read_withdraw(&mut self, account_hash: &AccountHash) -> Result, Error>; - - /// Writes given [`UnbondingPurse`]s at account hash derived from given public key - fn write_withdraw( - &mut self, - account_hash: AccountHash, - unbonding_purses: Vec, - ) -> Result<(), Error>; - - /// Reads collection of [`SeignorageRecipients`]s - fn read_era_validators( - &mut self, - era_id: EraId, - ) -> Result, Error>; - - /// Writes given EraValidators for a given EraId. - fn write_era_validators( - &mut self, - era_id: EraId, - recipients: SeigniorageRecipients, - ) -> Result<(), Error>; -} - -/// Provides functionality of a system module. -pub trait SystemProvider { - /// Creates new purse. - fn create_purse(&mut self) -> Result; - - /// Gets purse balance. - fn get_balance(&mut self, purse: URef) -> Result, Error>; - - /// Transfers specified `amount` of tokens from `source` purse into a `target` purse. - fn transfer_from_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), Error>; - - /// Records era info at the given era id. - fn record_era_info(&mut self, era_id: EraId, era_info: EraInfo) -> Result<(), Error>; -} - -/// Provides an access to mint. -pub trait MintProvider { - /// Transfers `amount` from `source` purse to a `target` account. - fn transfer_purse_to_account( - &mut self, - source: URef, - target: AccountHash, - amount: U512, - ) -> Result; - - /// Transfers `amount` from `source` purse to a `target` purse. - fn transfer_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), Error>; - - /// Checks balance of a `purse`. Returns `None` if given purse does not exist. - fn balance(&mut self, purse: URef) -> Result, Error>; - - /// Reads the base round reward. - fn read_base_round_reward(&mut self) -> Result; - - /// Mints new token with given `initial_balance` balance. Returns new purse on success, - /// otherwise an error. - fn mint(&mut self, amount: U512) -> Result; - - /// Reduce total supply by `amount`. Returns unit on success, otherwise - /// an error. - fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error>; -} - -/// Provider of an account related functionality. -pub trait AccountProvider { - /// Get currently executing account's purse. - fn get_main_purse(&self) -> Result; -} diff --git a/types/src/system/auction/reservation.rs b/types/src/system/auction/reservation.rs new file mode 100644 index 0000000000..01850aa139 --- /dev/null +++ b/types/src/system/auction/reservation.rs @@ -0,0 +1,163 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, PublicKey, +}; + +use super::{DelegationRate, DelegatorKind}; + +/// Represents a validator reserving a slot for specific delegator +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Reservation { + /// Delegator kind. + delegator_kind: DelegatorKind, + /// Validator public key. + validator_public_key: PublicKey, + /// Individual delegation rate. + delegation_rate: DelegationRate, +} + +impl Reservation { + /// Creates a new [`Reservation`] + pub fn new( + validator_public_key: PublicKey, + delegator_kind: DelegatorKind, + delegation_rate: DelegationRate, + ) -> Self { + Self { + delegator_kind, + validator_public_key, + delegation_rate, + } + } + + /// Returns kind of delegator. + pub fn delegator_kind(&self) -> &DelegatorKind { + &self.delegator_kind + } + + /// Returns delegatee + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the delegation rate of the provided bid + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } +} + +impl CLTyped for Reservation { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for Reservation { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.extend(self.delegator_kind.to_bytes()?); + buffer.extend(self.validator_public_key.to_bytes()?); + buffer.extend(self.delegation_rate.to_bytes()?); + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.delegator_kind.serialized_length() + + self.validator_public_key.serialized_length() + + self.delegation_rate.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.delegator_kind.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Reservation { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (delegator_kind, bytes) = DelegatorKind::from_bytes(bytes)?; + let (validator_public_key, bytes) = PublicKey::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Self { + delegator_kind, + validator_public_key, + delegation_rate, + }, + bytes, + )) + } +} + +impl Display for Reservation { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "Reservation {{ delegator {}, validator {} }}", + self.delegator_kind, self.validator_public_key + ) + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> Reservation { + Reservation { + delegator_kind: rng.gen(), + validator_public_key: rng.gen(), + delegation_rate: rng.gen(), + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, system::auction::Reservation, PublicKey, SecretKey}; + + #[test] + fn serialization_roundtrip() { + let delegator_kind = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + + let validator_public_key: PublicKey = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let entry = Reservation::new(validator_public_key, delegator_kind, 0); + bytesrepr::test_serialization_roundtrip(&entry); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::reservation_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/seigniorage_recipient.rs b/types/src/system/auction/seigniorage_recipient.rs index f5770167b0..7596e21918 100644 --- a/types/src/system/auction/seigniorage_recipient.rs +++ b/types/src/system/auction/seigniorage_recipient.rs @@ -1,22 +1,15 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::{collections::BTreeMap, vec::Vec}; -#[cfg(feature = "std")] -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; - use crate::{ bytesrepr::{self, FromBytes, ToBytes}, - system::auction::{Bid, DelegationRate}, + system::auction::{Bid, DelegationRate, DelegatorKind}, CLType, CLTyped, PublicKey, U512, }; /// The seigniorage recipient details. -#[derive(Default, PartialEq, Clone, Debug, Eq, Serialize, Deserialize)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -pub struct SeigniorageRecipient { +/// Legacy version required to deserialize old records. +#[derive(Default, PartialEq, Eq, Clone, Debug)] +pub struct SeigniorageRecipientV1 { /// Validator stake (not including delegators) stake: U512, /// Delegation rate of a seigniorage recipient. @@ -25,7 +18,20 @@ pub struct SeigniorageRecipient { delegator_stake: BTreeMap, } -impl SeigniorageRecipient { +impl SeigniorageRecipientV1 { + /// Creates a new SeigniorageRecipient + pub fn new( + stake: U512, + delegation_rate: DelegationRate, + delegator_stake: BTreeMap, + ) -> Self { + Self { + stake, + delegation_rate, + delegator_stake, + } + } + /// Returns stake of the provided recipient pub fn stake(&self) -> &U512 { &self.stake @@ -42,23 +48,27 @@ impl SeigniorageRecipient { } /// Calculates total stake, including delegators' total stake - pub fn total_stake(&self) -> U512 { - self.stake + self.delegator_total_stake() + pub fn total_stake(&self) -> Option { + self.delegator_total_stake()?.checked_add(self.stake) } - /// Caculates total stake for all delegators - pub fn delegator_total_stake(&self) -> U512 { - self.delegator_stake.values().cloned().sum() + /// Calculates total stake for all delegators + pub fn delegator_total_stake(&self) -> Option { + let mut total_stake: U512 = U512::zero(); + for stake in self.delegator_stake.values() { + total_stake = total_stake.checked_add(*stake)?; + } + Some(total_stake) } } -impl CLTyped for SeigniorageRecipient { +impl CLTyped for SeigniorageRecipientV1 { fn cl_type() -> CLType { CLType::Any } } -impl ToBytes for SeigniorageRecipient { +impl ToBytes for SeigniorageRecipientV1 { fn to_bytes(&self) -> Result, bytesrepr::Error> { let mut result = bytesrepr::allocate_buffer(self)?; result.extend(self.stake.to_bytes()?); @@ -74,13 +84,13 @@ impl ToBytes for SeigniorageRecipient { } } -impl FromBytes for SeigniorageRecipient { +impl FromBytes for SeigniorageRecipientV1 { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { let (stake, bytes) = FromBytes::from_bytes(bytes)?; let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; let (delegator_stake, bytes) = FromBytes::from_bytes(bytes)?; Ok(( - SeigniorageRecipient { + Self { stake, delegation_rate, delegator_stake, @@ -90,12 +100,14 @@ impl FromBytes for SeigniorageRecipient { } } -impl From<&Bid> for SeigniorageRecipient { +impl From<&Bid> for SeigniorageRecipientV1 { fn from(bid: &Bid) -> Self { let delegator_stake = bid .delegators() .iter() - .map(|(public_key, delegator)| (public_key.clone(), *delegator.staked_amount())) + .map(|(delegator_public_key, delegator)| { + (delegator_public_key.clone(), delegator.staked_amount()) + }) .collect(); Self { stake: *bid.staked_amount(), @@ -105,37 +117,327 @@ impl From<&Bid> for SeigniorageRecipient { } } +/// The seigniorage recipient details with delegation rates for reservations. +#[derive(Default, PartialEq, Eq, Clone, Debug)] +pub struct SeigniorageRecipientV2 { + /// Validator stake (not including delegators) + stake: U512, + /// Delegation rate of a seigniorage recipient. + delegation_rate: DelegationRate, + /// Delegators and their bids. + delegator_stake: BTreeMap, + /// Delegation rates for reserved slots + reservation_delegation_rates: BTreeMap, +} + +impl SeigniorageRecipientV2 { + /// Creates a new SeigniorageRecipient + pub fn new( + stake: U512, + delegation_rate: DelegationRate, + delegator_stake: BTreeMap, + reservation_delegation_rates: BTreeMap, + ) -> Self { + Self { + stake, + delegation_rate, + delegator_stake, + reservation_delegation_rates, + } + } + + /// Returns stake of the provided recipient + pub fn stake(&self) -> &U512 { + &self.stake + } + + /// Returns delegation rate of the provided recipient + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns delegators of the provided recipient and their stake + pub fn delegator_stake(&self) -> &BTreeMap { + &self.delegator_stake + } + + /// Calculates total stake, including delegators' total stake + pub fn total_stake(&self) -> Option { + self.delegator_total_stake()?.checked_add(self.stake) + } + + /// Calculates total stake for all delegators + pub fn delegator_total_stake(&self) -> Option { + let mut total_stake: U512 = U512::zero(); + for stake in self.delegator_stake.values() { + total_stake = total_stake.checked_add(*stake)?; + } + Some(total_stake) + } + + /// Returns delegation rates for reservations of the provided recipient + pub fn reservation_delegation_rates(&self) -> &BTreeMap { + &self.reservation_delegation_rates + } +} + +impl CLTyped for SeigniorageRecipientV2 { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for SeigniorageRecipientV2 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(self.stake.to_bytes()?); + result.extend(self.delegation_rate.to_bytes()?); + result.extend(self.delegator_stake.to_bytes()?); + result.extend(self.reservation_delegation_rates.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.stake.serialized_length() + + self.delegation_rate.serialized_length() + + self.delegator_stake.serialized_length() + + self.reservation_delegation_rates.serialized_length() + } +} + +impl FromBytes for SeigniorageRecipientV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (stake, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (delegator_stake, bytes) = FromBytes::from_bytes(bytes)?; + let (reservation_delegation_rates, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + Self { + stake, + delegation_rate, + delegator_stake, + reservation_delegation_rates, + }, + bytes, + )) + } +} + +impl From<&Bid> for SeigniorageRecipientV2 { + fn from(bid: &Bid) -> Self { + let delegator_stake = bid + .delegators() + .iter() + .map(|(delegator_public_key, delegator)| { + ( + DelegatorKind::PublicKey(delegator_public_key.clone()), + delegator.staked_amount(), + ) + }) + .collect(); + Self { + stake: *bid.staked_amount(), + delegation_rate: *bid.delegation_rate(), + delegator_stake, + reservation_delegation_rates: BTreeMap::new(), + } + } +} + +impl From for SeigniorageRecipientV2 { + fn from(snapshot: SeigniorageRecipientV1) -> Self { + let mut delegator_stake = BTreeMap::new(); + for (kind, amount) in snapshot.delegator_stake { + delegator_stake.insert(DelegatorKind::PublicKey(kind), amount); + } + + Self { + stake: snapshot.stake, + delegation_rate: snapshot.delegation_rate, + delegator_stake, + reservation_delegation_rates: Default::default(), + } + } +} + +/// Wrapper enum for all variants of `SeigniorageRecipient`. +#[allow(missing_docs)] +pub enum SeigniorageRecipient { + V1(SeigniorageRecipientV1), + V2(SeigniorageRecipientV2), +} + +impl SeigniorageRecipient { + /// Returns stake of the provided recipient + pub fn stake(&self) -> &U512 { + match self { + Self::V1(recipient) => &recipient.stake, + Self::V2(recipient) => &recipient.stake, + } + } + + /// Returns delegation rate of the provided recipient + pub fn delegation_rate(&self) -> &DelegationRate { + match self { + Self::V1(recipient) => &recipient.delegation_rate, + Self::V2(recipient) => &recipient.delegation_rate, + } + } + + /// Returns delegators of the provided recipient and their stake + pub fn delegator_stake(&self) -> BTreeMap { + let recipient = match self { + Self::V1(recipient) => { + let ret: SeigniorageRecipientV2 = recipient.clone().into(); + ret + } + Self::V2(recipient) => recipient.clone(), + }; + recipient.delegator_stake + } + + /// Calculates total stake, including delegators' total stake + pub fn total_stake(&self) -> Option { + match self { + Self::V1(recipient) => recipient.total_stake(), + Self::V2(recipient) => recipient.total_stake(), + } + } + + /// Calculates total stake for all delegators + pub fn delegator_total_stake(&self) -> Option { + match self { + Self::V1(recipient) => recipient.delegator_total_stake(), + Self::V2(recipient) => recipient.delegator_total_stake(), + } + } + + /// Returns delegation rates for reservations of the provided recipient + pub fn reservation_delegation_rates(&self) -> Option<&BTreeMap> { + match self { + Self::V1(_recipient) => None, + Self::V2(recipient) => Some(&recipient.reservation_delegation_rates), + } + } +} + #[cfg(test)] mod tests { use alloc::collections::BTreeMap; use core::iter::FromIterator; + use super::SeigniorageRecipientV2; use crate::{ bytesrepr, - system::auction::{DelegationRate, SeigniorageRecipient}, - SecretKey, U512, + system::auction::{DelegationRate, DelegatorKind, SeigniorageRecipientV1}, + PublicKey, SecretKey, U512, }; #[test] fn serialization_roundtrip() { - let delegator_1_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - let delegator_2_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - let delegator_3_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]) - .unwrap() - .into(); - let seigniorage_recipient = SeigniorageRecipient { + let delegator_1_kind: DelegatorKind = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + let delegator_2_kind = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + let delegator_3_kind = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + let seigniorage_recipient = SeigniorageRecipientV2 { stake: U512::max_value(), - delegation_rate: DelegationRate::max_value(), + delegation_rate: DelegationRate::MAX, delegator_stake: BTreeMap::from_iter(vec![ - (delegator_1_key, U512::max_value()), - (delegator_2_key, U512::max_value()), - (delegator_3_key, U512::zero()), + (delegator_1_kind.clone(), U512::max_value()), + (delegator_2_kind, U512::max_value()), + (delegator_3_kind, U512::zero()), ]), + reservation_delegation_rates: BTreeMap::from_iter(vec![( + delegator_1_kind, + DelegationRate::MIN, + )]), }; bytesrepr::test_serialization_roundtrip(&seigniorage_recipient); } + + #[test] + fn serialization_roundtrip_legacy_version() { + let delegator_1_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_2_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let delegator_3_key = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ); + let legacy_seigniorage_recipient = SeigniorageRecipientV1 { + stake: U512::max_value(), + delegation_rate: DelegationRate::MAX, + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_key.clone(), U512::max_value()), + (delegator_2_key.clone(), U512::max_value()), + (delegator_3_key.clone(), U512::zero()), + ]), + }; + + bytesrepr::test_serialization_roundtrip(&legacy_seigniorage_recipient); + } + + #[test] + fn test_overflow_in_delegation_rate() { + let delegator_1_kind = DelegatorKind::PublicKey(PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + )); + let delegator_2_kind = DelegatorKind::PublicKey(PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + )); + let delegator_3_kind = DelegatorKind::PublicKey(PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + )); + let seigniorage_recipient = SeigniorageRecipientV2 { + stake: U512::max_value(), + delegation_rate: DelegationRate::MAX, + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_kind.clone(), U512::max_value()), + (delegator_2_kind, U512::max_value()), + (delegator_3_kind, U512::zero()), + ]), + reservation_delegation_rates: BTreeMap::from_iter(vec![( + delegator_1_kind, + DelegationRate::MIN, + )]), + }; + assert_eq!(seigniorage_recipient.total_stake(), None) + } + + #[test] + fn test_overflow_in_delegation_total_stake() { + let delegator_1_kind = PublicKey::from( + &SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + let delegator_2_kind = PublicKey::from( + &SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + let delegator_3_kind = PublicKey::from( + &SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(), + ) + .into(); + let seigniorage_recipient = SeigniorageRecipientV2 { + stake: U512::max_value(), + delegation_rate: DelegationRate::MAX, + delegator_stake: BTreeMap::from_iter(vec![ + (delegator_1_kind, U512::MAX), + (delegator_2_kind, U512::MAX), + (delegator_3_kind, U512::MAX), + ]), + reservation_delegation_rates: BTreeMap::new(), + }; + assert_eq!(seigniorage_recipient.delegator_total_stake(), None) + } } diff --git a/types/src/system/auction/unbond.rs b/types/src/system/auction/unbond.rs new file mode 100644 index 0000000000..f2341d7b08 --- /dev/null +++ b/types/src/system/auction/unbond.rs @@ -0,0 +1,734 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; + +use super::{BidAddr, DelegatorKind, UnbondingPurse, WithdrawPurse}; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + checksummed_hex, CLType, CLTyped, EraId, PublicKey, URef, URefAddr, U512, +}; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; +use serde_helpers::{HumanReadableUnbondKind, NonHumanReadableUnbondKind}; + +/// UnbondKindTag variants. +#[allow(clippy::large_enum_variant)] +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +pub enum UnbondKindTag { + /// Validator bid. + Validator = 0, + /// Validator bid. + DelegatedAccount = 1, + /// Delegator bid. + DelegatedPurse = 2, +} + +/// Unbond variants. +#[derive(Debug, PartialEq, Eq, Clone, Ord, PartialOrd)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum UnbondKind { + Validator(PublicKey), + DelegatedPublicKey(PublicKey), + DelegatedPurse(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] URefAddr), +} + +impl UnbondKind { + /// Returns UnbondKindTag. + pub fn tag(&self) -> UnbondKindTag { + match self { + UnbondKind::Validator(_) => UnbondKindTag::Validator, + UnbondKind::DelegatedPublicKey(_) => UnbondKindTag::DelegatedAccount, + UnbondKind::DelegatedPurse(_) => UnbondKindTag::DelegatedPurse, + } + } + + /// Returns PublicKey, if any. + pub fn maybe_public_key(&self) -> Option { + match self { + UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => Some(pk.clone()), + UnbondKind::DelegatedPurse(_) => None, + } + } + + /// Is this a validator unbond? + pub fn is_validator(&self) -> bool { + match self { + UnbondKind::Validator(_) => true, + UnbondKind::DelegatedPublicKey(_) | UnbondKind::DelegatedPurse(_) => false, + } + } + + /// Is this a delegator unbond? + pub fn is_delegator(&self) -> bool { + !self.is_validator() + } + + /// The correct bid addr for this instance. + pub fn bid_addr(&self, validator_public_key: &PublicKey) -> BidAddr { + match self { + UnbondKind::Validator(pk) => BidAddr::UnbondAccount { + validator: validator_public_key.to_account_hash(), + unbonder: pk.to_account_hash(), + }, + UnbondKind::DelegatedPublicKey(pk) => BidAddr::DelegatedAccount { + delegator: pk.to_account_hash(), + validator: validator_public_key.to_account_hash(), + }, + UnbondKind::DelegatedPurse(addr) => BidAddr::DelegatedPurse { + validator: validator_public_key.to_account_hash(), + delegator: *addr, + }, + } + } +} + +impl ToBytes for UnbondKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + let (tag, mut serialized_data) = match self { + UnbondKind::Validator(pk) => (UnbondKindTag::Validator, pk.to_bytes()?), + UnbondKind::DelegatedPublicKey(pk) => (UnbondKindTag::DelegatedAccount, pk.to_bytes()?), + UnbondKind::DelegatedPurse(addr) => (UnbondKindTag::DelegatedPurse, addr.to_bytes()?), + }; + result.push(tag as u8); + result.append(&mut serialized_data); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + UnbondKind::Validator(pk) => pk.serialized_length(), + UnbondKind::DelegatedPublicKey(pk) => pk.serialized_length(), + UnbondKind::DelegatedPurse(addr) => addr.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(self.tag() as u8); + match self { + UnbondKind::Validator(pk) => pk.write_bytes(writer)?, + UnbondKind::DelegatedPublicKey(pk) => pk.write_bytes(writer)?, + UnbondKind::DelegatedPurse(addr) => addr.write_bytes(writer)?, + }; + Ok(()) + } +} + +impl FromBytes for UnbondKind { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == UnbondKindTag::Validator as u8 => PublicKey::from_bytes(remainder) + .map(|(pk, remainder)| (UnbondKind::Validator(pk), remainder)), + tag if tag == UnbondKindTag::DelegatedAccount as u8 => PublicKey::from_bytes(remainder) + .map(|(pk, remainder)| (UnbondKind::DelegatedPublicKey(pk), remainder)), + tag if tag == UnbondKindTag::DelegatedPurse as u8 => URefAddr::from_bytes(remainder) + .map(|(delegator_bid, remainder)| { + (UnbondKind::DelegatedPurse(delegator_bid), remainder) + }), + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl From for UnbondKind { + fn from(value: DelegatorKind) -> Self { + match value { + DelegatorKind::PublicKey(pk) => UnbondKind::DelegatedPublicKey(pk), + DelegatorKind::Purse(addr) => UnbondKind::DelegatedPurse(addr), + } + } +} + +impl Serialize for UnbondKind { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + HumanReadableUnbondKind::from(self).serialize(serializer) + } else { + NonHumanReadableUnbondKind::from(self).serialize(serializer) + } + } +} + +#[derive(Debug)] +enum UnbondKindError { + DeserializationError(String), +} + +impl Display for UnbondKindError { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + UnbondKindError::DeserializationError(msg) => { + write!(f, "Error when deserializing UnbondKind: {}", msg) + } + } + } +} + +impl TryFrom for UnbondKind { + type Error = UnbondKindError; + + fn try_from(value: HumanReadableUnbondKind) -> Result { + match value { + HumanReadableUnbondKind::Validator(public_key) => Ok(UnbondKind::Validator(public_key)), + HumanReadableUnbondKind::DelegatedPublicKey(public_key) => { + Ok(UnbondKind::DelegatedPublicKey(public_key)) + } + HumanReadableUnbondKind::DelegatedPurse(encoded) => { + let decoded = checksummed_hex::decode(encoded).map_err(|e| { + UnbondKindError::DeserializationError(format!( + "Failed to decode encoded URefAddr: {}", + e + )) + })?; + let uref_addr = URefAddr::try_from(decoded.as_ref()).map_err(|e| { + UnbondKindError::DeserializationError(format!( + "Failed to build uref address: {}", + e + )) + })?; + Ok(UnbondKind::DelegatedPurse(uref_addr)) + } + } + } +} + +impl From for UnbondKind { + fn from(value: NonHumanReadableUnbondKind) -> Self { + match value { + NonHumanReadableUnbondKind::Validator(public_key) => UnbondKind::Validator(public_key), + NonHumanReadableUnbondKind::DelegatedPublicKey(public_key) => { + UnbondKind::DelegatedPublicKey(public_key) + } + NonHumanReadableUnbondKind::DelegatedPurse(uref_addr) => { + UnbondKind::DelegatedPurse(uref_addr) + } + } + } +} + +impl<'de> Deserialize<'de> for UnbondKind { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let human_readable = HumanReadableUnbondKind::deserialize(deserializer)?; + UnbondKind::try_from(human_readable) + .map_err(|error| SerdeError::custom(format!("{:?}", error))) + } else { + let non_human_readable = NonHumanReadableUnbondKind::deserialize(deserializer)?; + Ok(UnbondKind::from(non_human_readable)) + } + } +} + +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Unbond { + /// Validators public key. + validator_public_key: PublicKey, + /// Unbond kind. + unbond_kind: UnbondKind, + /// Unbond amounts per era. + eras: Vec, +} + +impl Unbond { + /// Creates [`Unbond`] instance for an unbonding request. + pub const fn new( + validator_public_key: PublicKey, + unbond_kind: UnbondKind, + eras: Vec, + ) -> Self { + Self { + validator_public_key, + unbond_kind, + eras, + } + } + + /// Creates [`Unbond`] instance for an unbonding request. + pub fn new_validator_unbond(validator_public_key: PublicKey, eras: Vec) -> Self { + Self { + validator_public_key: validator_public_key.clone(), + unbond_kind: UnbondKind::Validator(validator_public_key), + eras, + } + } + + /// Creates [`Unbond`] instance for an unbonding request. + pub const fn new_delegated_account_unbond( + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + eras: Vec, + ) -> Self { + Self { + validator_public_key, + unbond_kind: UnbondKind::DelegatedPublicKey(delegator_public_key), + eras, + } + } + + /// Creates [`Unbond`] instance for an unbonding request. + pub const fn new_delegated_purse_unbond( + validator_public_key: PublicKey, + delegator_purse_addr: URefAddr, + eras: Vec, + ) -> Self { + Self { + validator_public_key, + unbond_kind: UnbondKind::DelegatedPurse(delegator_purse_addr), + eras, + } + } + + /// Checks if given request is made by a validator by checking if public key of unbonder is same + /// as a key owned by validator. + pub fn is_validator(&self) -> bool { + match self.unbond_kind.maybe_public_key() { + Some(pk) => pk == self.validator_public_key, + None => false, + } + } + + /// Returns public key of validator. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns unbond kind. + pub fn unbond_kind(&self) -> &UnbondKind { + &self.unbond_kind + } + + /// Returns eras unbond items. + pub fn eras(&self) -> &Vec { + &self.eras + } + + /// Returns eras unbond items. + pub fn eras_mut(&mut self) -> &mut Vec { + &mut self.eras + } + + /// Takes eras unbond items. + pub fn take_eras(mut self) -> Vec { + let eras = self.eras; + self.eras = vec![]; + eras + } + + /// Splits instance into eras that are not expired, and eras that are expired (if any). + pub fn expired(self, era_id: EraId, unbonding_delay: u64) -> (Unbond, Option>) { + let mut retained = vec![]; + let mut expired = vec![]; + for era in self.eras { + let threshold = era + .era_of_creation() + .value() + .saturating_add(unbonding_delay); + if era_id.value() >= threshold { + expired.push(era); + } else { + retained.push(era) + } + } + let ret = Unbond::new(self.validator_public_key, self.unbond_kind, retained); + if !expired.is_empty() { + (ret, Some(expired)) + } else { + (ret, None) + } + } + + /// Returns the unbond era with the highest era of creation. + pub fn target_unbond_era(&self) -> Option { + self.eras() + .iter() + .max_by(|x, y| x.era_of_creation().cmp(&y.era_of_creation())) + .cloned() + } + + /// Returns a mutable reference to the unbond era with the highest era of creation. + pub fn target_unbond_era_mut(&mut self) -> Option<&mut UnbondEra> { + self.eras_mut() + .iter_mut() + .max_by(|x, y| x.era_of_creation().cmp(&y.era_of_creation())) + } +} + +impl ToBytes for Unbond { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.validator_public_key.to_bytes()?); + result.extend(&self.unbond_kind.to_bytes()?); + result.extend(&self.eras.to_bytes()?); + Ok(result) + } + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.unbond_kind.serialized_length() + + self.eras.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.unbond_kind.write_bytes(writer)?; + self.eras.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for Unbond { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (unbond_kind, remainder) = FromBytes::from_bytes(remainder)?; + let (eras, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + Unbond { + validator_public_key, + unbond_kind, + eras, + }, + remainder, + )) + } +} + +impl CLTyped for Unbond { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl Default for Unbond { + fn default() -> Self { + Self { + unbond_kind: UnbondKind::Validator(PublicKey::System), + validator_public_key: PublicKey::System, + eras: vec![], + } + } +} + +impl From for Unbond { + fn from(unbonding_purse: UnbondingPurse) -> Self { + let unbond_kind = + if unbonding_purse.validator_public_key() == unbonding_purse.unbonder_public_key() { + UnbondKind::Validator(unbonding_purse.validator_public_key().clone()) + } else { + UnbondKind::DelegatedPublicKey(unbonding_purse.unbonder_public_key().clone()) + }; + Unbond::new( + unbonding_purse.validator_public_key().clone(), + unbond_kind, + vec![UnbondEra::new( + *unbonding_purse.bonding_purse(), + unbonding_purse.era_of_creation(), + *unbonding_purse.amount(), + None, + )], + ) + } +} + +impl From for Unbond { + fn from(withdraw_purse: WithdrawPurse) -> Self { + let unbond_kind = + if withdraw_purse.validator_public_key == withdraw_purse.unbonder_public_key { + UnbondKind::Validator(withdraw_purse.validator_public_key.clone()) + } else { + UnbondKind::DelegatedPublicKey(withdraw_purse.unbonder_public_key.clone()) + }; + Unbond::new( + withdraw_purse.validator_public_key, + unbond_kind, + vec![UnbondEra::new( + withdraw_purse.bonding_purse, + withdraw_purse.era_of_creation, + withdraw_purse.amount, + None, + )], + ) + } +} + +/// Unbond amounts per era. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct UnbondEra { + /// Bonding Purse + bonding_purse: URef, + /// Era in which this unbonding request was created. + era_of_creation: EraId, + /// Unbonding Amount. + amount: U512, + /// The validator public key to re-delegate to. + new_validator: Option, +} + +impl UnbondEra { + /// Creates [`UnbondEra`] instance for an unbonding request. + pub const fn new( + bonding_purse: URef, + era_of_creation: EraId, + amount: U512, + new_validator: Option, + ) -> Self { + Self { + bonding_purse, + era_of_creation, + amount, + new_validator, + } + } + + /// Returns bonding purse used to make this unbonding request. + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns era which was used to create this unbonding request. + pub fn era_of_creation(&self) -> EraId { + self.era_of_creation + } + + /// Returns unbonding amount. + pub fn amount(&self) -> &U512 { + &self.amount + } + + /// Returns the public key for the new validator. + pub fn new_validator(&self) -> &Option { + &self.new_validator + } + + /// Sets amount to provided value. + pub fn with_amount(&mut self, amount: U512) { + self.amount = amount; + } +} + +impl ToBytes for UnbondEra { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.bonding_purse.to_bytes()?); + result.extend(&self.era_of_creation.to_bytes()?); + result.extend(&self.amount.to_bytes()?); + result.extend(&self.new_validator.to_bytes()?); + Ok(result) + } + fn serialized_length(&self) -> usize { + self.bonding_purse.serialized_length() + + self.era_of_creation.serialized_length() + + self.amount.serialized_length() + + self.new_validator.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bonding_purse.write_bytes(writer)?; + self.era_of_creation.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.new_validator.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for UnbondEra { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + let (new_validator, remainder) = Option::::from_bytes(remainder)?; + + Ok(( + UnbondEra { + bonding_purse, + era_of_creation, + amount, + new_validator, + }, + remainder, + )) + } +} + +impl CLTyped for UnbondEra { + fn cl_type() -> CLType { + CLType::Any + } +} + +mod serde_helpers { + use super::UnbondKind; + use crate::{PublicKey, URefAddr}; + use alloc::string::String; + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize)] + pub(super) enum HumanReadableUnbondKind { + Validator(PublicKey), + DelegatedPublicKey(PublicKey), + DelegatedPurse(String), + } + + #[derive(Serialize, Deserialize)] + pub(super) enum NonHumanReadableUnbondKind { + Validator(PublicKey), + DelegatedPublicKey(PublicKey), + DelegatedPurse(URefAddr), + } + + impl From<&UnbondKind> for HumanReadableUnbondKind { + fn from(unbond_source: &UnbondKind) -> Self { + match unbond_source { + UnbondKind::Validator(public_key) => { + HumanReadableUnbondKind::Validator(public_key.clone()) + } + UnbondKind::DelegatedPublicKey(public_key) => { + HumanReadableUnbondKind::DelegatedPublicKey(public_key.clone()) + } + UnbondKind::DelegatedPurse(uref_addr) => { + HumanReadableUnbondKind::DelegatedPurse(base16::encode_lower(uref_addr)) + } + } + } + } + + impl From<&UnbondKind> for NonHumanReadableUnbondKind { + fn from(unbond_kind: &UnbondKind) -> Self { + match unbond_kind { + UnbondKind::Validator(public_key) => { + NonHumanReadableUnbondKind::Validator(public_key.clone()) + } + UnbondKind::DelegatedPublicKey(public_key) => { + NonHumanReadableUnbondKind::DelegatedPublicKey(public_key.clone()) + } + UnbondKind::DelegatedPurse(uref_addr) => { + NonHumanReadableUnbondKind::DelegatedPurse(*uref_addr) + } + } + } + } +} + +#[cfg(test)] +mod tests { + use rand::Rng; + + use crate::{ + bytesrepr, + system::auction::{ + unbond::{Unbond, UnbondKind}, + UnbondEra, + }, + testing::TestRng, + AccessRights, EraId, PublicKey, SecretKey, URef, U512, + }; + + const BONDING_PURSE: URef = URef::new([14; 32], AccessRights::READ_ADD_WRITE); + const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; + + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn delegated_account_unbond_kind() -> UnbondKind { + let secret_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + UnbondKind::DelegatedPublicKey(PublicKey::from(&secret_key)) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } + + #[test] + fn serialization_roundtrip_for_unbond() { + let unbond_era = UnbondEra { + bonding_purse: BONDING_PURSE, + era_of_creation: ERA_OF_WITHDRAWAL, + amount: amount(), + new_validator: None, + }; + + let unbond = Unbond { + validator_public_key: validator_public_key(), + unbond_kind: delegated_account_unbond_kind(), + eras: vec![unbond_era], + }; + + bytesrepr::test_serialization_roundtrip(&unbond); + } + + #[test] + fn should_be_validator_condition_for_unbond() { + let validator_pk = validator_public_key(); + let validator_unbond = Unbond::new( + validator_pk.clone(), + UnbondKind::Validator(validator_pk), + vec![], + ); + assert!(validator_unbond.is_validator()); + } + + #[test] + fn should_be_delegator_condition_for_unbond() { + let delegator_unbond = Unbond::new( + validator_public_key(), + delegated_account_unbond_kind(), + vec![], + ); + assert!(!delegator_unbond.is_validator()); + } + + #[test] + fn purse_serialized_as_string() { + let delegator_kind_payload = UnbondKind::DelegatedPurse([1; 32]); + let serialized = serde_json::to_string(&delegator_kind_payload).unwrap(); + assert_eq!( + serialized, + "{\"DelegatedPurse\":\"0101010101010101010101010101010101010101010101010101010101010101\"}" + ); + } + + #[test] + fn given_broken_address_purse_deserialziation_fails() { + let failing = + "{\"DelegatedPurse\":\"Z101010101010101010101010101010101010101010101010101010101010101\"}"; + let ret = serde_json::from_str::(failing); + assert!(ret.is_err()); + let failing = + "{\"DelegatedPurse\":\"01010101010101010101010101010101010101010101010101010101\"}"; + let ret = serde_json::from_str::(failing); + assert!(ret.is_err()); + } + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + + let entity = UnbondKind::Validator(PublicKey::random(rng)); + let json_string = serde_json::to_string_pretty(&entity).unwrap(); + let decoded: UnbondKind = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, entity); + + let entity = UnbondKind::DelegatedPublicKey(PublicKey::random(rng)); + let json_string = serde_json::to_string_pretty(&entity).unwrap(); + let decoded: UnbondKind = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, entity); + + let entity = UnbondKind::DelegatedPurse(rng.gen()); + let json_string = serde_json::to_string_pretty(&entity).unwrap(); + let decoded: UnbondKind = serde_json::from_str(&json_string).unwrap(); + assert_eq!(decoded, entity); + } +} diff --git a/types/src/system/auction/unbonding_purse.rs b/types/src/system/auction/unbonding_purse.rs index 9213197933..965376d211 100644 --- a/types/src/system/auction/unbonding_purse.rs +++ b/types/src/system/auction/unbonding_purse.rs @@ -1,9 +1,8 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::vec::Vec; -#[cfg(feature = "std")] +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -12,9 +11,12 @@ use crate::{ CLType, CLTyped, EraId, PublicKey, URef, U512, }; +use super::WithdrawPurse; + /// Unbonding purse. #[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] -#[cfg_attr(feature = "std", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] #[serde(deny_unknown_fields)] pub struct UnbondingPurse { /// Bonding Purse @@ -27,6 +29,8 @@ pub struct UnbondingPurse { era_of_creation: EraId, /// Unbonding Amount. amount: U512, + /// The validator public key to re-delegate to. + new_validator: Option, } impl UnbondingPurse { @@ -37,6 +41,7 @@ impl UnbondingPurse { unbonder_public_key: PublicKey, era_of_creation: EraId, amount: U512, + new_validator: Option, ) -> Self { Self { bonding_purse, @@ -44,6 +49,7 @@ impl UnbondingPurse { unbonder_public_key, era_of_creation, amount, + new_validator, } } @@ -65,9 +71,9 @@ impl UnbondingPurse { /// Returns public key of unbonder. /// - /// For withdrawal requests that originated from validator's public key through - /// [`crate::system::auction::Auction::withdraw_bid`] entrypoint this is equal to - /// [`UnbondingPurse::validator_public_key`] and [`UnbondingPurse::is_validator`] is `true`. + /// For withdrawal requests that originated from validator's public key through `withdraw_bid` + /// entrypoint this is equal to [`UnbondingPurse::validator_public_key`] and + /// [`UnbondingPurse::is_validator`] is `true`. pub fn unbonder_public_key(&self) -> &PublicKey { &self.unbonder_public_key } @@ -81,6 +87,16 @@ impl UnbondingPurse { pub fn amount(&self) -> &U512 { &self.amount } + + /// Returns the public key for the new validator. + pub fn new_validator(&self) -> &Option { + &self.new_validator + } + + /// Sets amount to provided value. + pub fn with_amount(&mut self, amount: U512) { + self.amount = amount; + } } impl ToBytes for UnbondingPurse { @@ -91,6 +107,7 @@ impl ToBytes for UnbondingPurse { result.extend(&self.unbonder_public_key.to_bytes()?); result.extend(&self.era_of_creation.to_bytes()?); result.extend(&self.amount.to_bytes()?); + result.extend(&self.new_validator.to_bytes()?); Ok(result) } fn serialized_length(&self) -> usize { @@ -99,16 +116,29 @@ impl ToBytes for UnbondingPurse { + self.unbonder_public_key.serialized_length() + self.era_of_creation.serialized_length() + self.amount.serialized_length() + + self.new_validator.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.bonding_purse.write_bytes(writer)?; + self.validator_public_key.write_bytes(writer)?; + self.unbonder_public_key.write_bytes(writer)?; + self.era_of_creation.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.new_validator.write_bytes(writer)?; + Ok(()) } } impl FromBytes for UnbondingPurse { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; - let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; - let (unbonder_public_key, bytes) = FromBytes::from_bytes(bytes)?; - let (era_of_creation, bytes) = FromBytes::from_bytes(bytes)?; - let (amount, bytes) = FromBytes::from_bytes(bytes)?; + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + let (new_validator, remainder) = Option::::from_bytes(remainder)?; + Ok(( UnbondingPurse { bonding_purse, @@ -116,8 +146,9 @@ impl FromBytes for UnbondingPurse { unbonder_public_key, era_of_creation, amount, + new_validator, }, - bytes, + remainder, )) } } @@ -128,63 +159,79 @@ impl CLTyped for UnbondingPurse { } } +impl From for UnbondingPurse { + fn from(withdraw_purse: WithdrawPurse) -> Self { + UnbondingPurse::new( + withdraw_purse.bonding_purse, + withdraw_purse.validator_public_key, + withdraw_purse.unbonder_public_key, + withdraw_purse.era_of_creation, + withdraw_purse.amount, + None, + ) + } +} + #[cfg(test)] mod tests { - use once_cell::sync::Lazy; - use crate::{ - bytesrepr, - system::auction::{EraId, UnbondingPurse}, - AccessRights, PublicKey, SecretKey, URef, U512, + bytesrepr, system::auction::UnbondingPurse, AccessRights, EraId, PublicKey, SecretKey, + URef, U512, }; - const BONDING_PURSE: URef = URef::new([41; 32], AccessRights::READ_ADD_WRITE); + const BONDING_PURSE: URef = URef::new([14; 32], AccessRights::READ_ADD_WRITE); const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; - static VALIDATOR_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() - }); - static UNBONDER_PUBLIC_KEY: Lazy = Lazy::new(|| { - SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]) - .unwrap() - .into() - }); - static AMOUNT: Lazy = Lazy::new(|| U512::max_value() - 1); + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([42; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn unbonder_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([43; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } #[test] - fn serialization_roundtrip() { + fn serialization_roundtrip_for_unbonding_purse() { let unbonding_purse = UnbondingPurse { bonding_purse: BONDING_PURSE, - validator_public_key: VALIDATOR_PUBLIC_KEY.clone(), - unbonder_public_key: UNBONDER_PUBLIC_KEY.clone(), + validator_public_key: validator_public_key(), + unbonder_public_key: unbonder_public_key(), era_of_creation: ERA_OF_WITHDRAWAL, - amount: *AMOUNT, + amount: amount(), + new_validator: None, }; bytesrepr::test_serialization_roundtrip(&unbonding_purse); } + #[test] - fn should_be_validator_condition() { + fn should_be_validator_condition_for_unbonding_purse() { let validator_unbonding_purse = UnbondingPurse::new( BONDING_PURSE, - VALIDATOR_PUBLIC_KEY.clone(), - VALIDATOR_PUBLIC_KEY.clone(), + validator_public_key(), + validator_public_key(), ERA_OF_WITHDRAWAL, - *AMOUNT, + amount(), + None, ); assert!(validator_unbonding_purse.is_validator()); } #[test] - fn should_be_delegator_condition() { + fn should_be_delegator_condition_for_unbonding_purse() { let delegator_unbonding_purse = UnbondingPurse::new( BONDING_PURSE, - VALIDATOR_PUBLIC_KEY.clone(), - UNBONDER_PUBLIC_KEY.clone(), + validator_public_key(), + unbonder_public_key(), ERA_OF_WITHDRAWAL, - *AMOUNT, + amount(), + None, ); assert!(!delegator_unbonding_purse.is_validator()); } diff --git a/types/src/system/auction/validator_bid.rs b/types/src/system/auction/validator_bid.rs new file mode 100644 index 0000000000..e557a5b925 --- /dev/null +++ b/types/src/system/auction/validator_bid.rs @@ -0,0 +1,481 @@ +// TODO - remove once schemars stops causing warning. +#![allow(clippy::field_reassign_with_default)] + +use alloc::vec::Vec; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + system::auction::{ + bid::VestingSchedule, Bid, DelegationRate, Error, VESTING_SCHEDULE_LENGTH_MILLIS, + }, + CLType, CLTyped, PublicKey, URef, U512, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// An entry in the validator map. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ValidatorBid { + /// Validator public key + validator_public_key: PublicKey, + /// The purse that was used for bonding. + bonding_purse: URef, + /// The amount of tokens staked by a validator (not including delegators). + staked_amount: U512, + /// Delegation rate + delegation_rate: DelegationRate, + /// Vesting schedule for a genesis validator. `None` if non-genesis validator. + vesting_schedule: Option, + /// `true` if validator has been "evicted" + inactive: bool, + /// Minimum allowed delegation amount in motes + minimum_delegation_amount: u64, + /// Maximum allowed delegation amount in motes + maximum_delegation_amount: u64, + /// Slots reserved for specific delegators + reserved_slots: u32, +} + +impl ValidatorBid { + /// Sets the maximum and minimum delegation amounts for a validators bid. + pub fn with_min_max_delegation_amount( + mut self, + maximum_delegation_amount: u64, + minimum_delegation_amount: u64, + ) -> Self { + self.maximum_delegation_amount = maximum_delegation_amount; + self.minimum_delegation_amount = minimum_delegation_amount; + self + } + + pub fn with_inactive(mut self, inactive: bool) -> Self { + self.inactive = inactive; + self + } +} + +impl ValidatorBid { + /// Creates new instance of a bid with locked funds. + #[allow(clippy::too_many_arguments)] + pub fn locked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + release_timestamp_millis: u64, + minimum_delegation_amount: u64, + maximum_delegation_amount: u64, + reserved_slots: u32, + ) -> Self { + let vesting_schedule = Some(VestingSchedule::new(release_timestamp_millis)); + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + } + } + + /// Creates new instance of a bid with unlocked funds. + pub fn unlocked( + validator_public_key: PublicKey, + bonding_purse: URef, + staked_amount: U512, + delegation_rate: DelegationRate, + minimum_delegation_amount: u64, + maximum_delegation_amount: u64, + reserved_slots: u32, + ) -> Self { + let vesting_schedule = None; + let inactive = false; + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + } + } + + /// Creates a new inactive instance of a bid with 0 staked amount. + pub fn empty(validator_public_key: PublicKey, bonding_purse: URef) -> Self { + let vesting_schedule = None; + let inactive = true; + let staked_amount = U512::zero(); + let delegation_rate = Default::default(); + Self { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + minimum_delegation_amount: 0, + maximum_delegation_amount: u64::MAX, + reserved_slots: 0, + } + } + + /// Gets the validator public key of the provided bid + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the bonding purse of the provided bid + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked(&self, timestamp_millis: u64) -> bool { + self.is_locked_with_vesting_schedule(timestamp_millis, VESTING_SCHEDULE_LENGTH_MILLIS) + } + + /// Checks if a bid is still locked under a vesting schedule. + /// + /// Returns true if a timestamp falls below the initial lockup period + 91 days release + /// schedule, otherwise false. + pub fn is_locked_with_vesting_schedule( + &self, + timestamp_millis: u64, + vesting_schedule_period_millis: u64, + ) -> bool { + match &self.vesting_schedule { + Some(vesting_schedule) => { + vesting_schedule.is_vesting(timestamp_millis, vesting_schedule_period_millis) + } + None => false, + } + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount(&self) -> U512 { + self.staked_amount + } + + /// Gets the staked amount of the provided bid + pub fn staked_amount_mut(&mut self) -> &mut U512 { + &mut self.staked_amount + } + + /// Gets the delegation rate of the provided bid + pub fn delegation_rate(&self) -> &DelegationRate { + &self.delegation_rate + } + + /// Returns a reference to the vesting schedule of the provided bid. `None` if a non-genesis + /// validator. + pub fn vesting_schedule(&self) -> Option<&VestingSchedule> { + self.vesting_schedule.as_ref() + } + + /// Returns a mutable reference to the vesting schedule of the provided bid. `None` if a + /// non-genesis validator. + pub fn vesting_schedule_mut(&mut self) -> Option<&mut VestingSchedule> { + self.vesting_schedule.as_mut() + } + + /// Gets the reserved slots of the provided bid + pub fn reserved_slots(&self) -> u32 { + self.reserved_slots + } + + /// Returns `true` if validator is inactive + pub fn inactive(&self) -> bool { + self.inactive + } + + /// Decreases the stake of the provided bid + pub fn decrease_stake( + &mut self, + amount: U512, + era_end_timestamp_millis: u64, + ) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_sub(amount) + .ok_or(Error::UnbondTooLarge)?; + + let vesting_schedule = match self.vesting_schedule.as_ref() { + Some(vesting_schedule) => vesting_schedule, + None => { + self.staked_amount = updated_staked_amount; + return Ok(updated_staked_amount); + } + }; + + match vesting_schedule.locked_amount(era_end_timestamp_millis) { + Some(locked_amount) if updated_staked_amount < locked_amount => { + Err(Error::ValidatorFundsLocked) + } + None => { + // If `None`, then the locked amounts table has yet to be initialized (likely + // pre-90 day mark) + Err(Error::ValidatorFundsLocked) + } + Some(_) => { + self.staked_amount = updated_staked_amount; + Ok(updated_staked_amount) + } + } + } + + /// Increases the stake of the provided bid + pub fn increase_stake(&mut self, amount: U512) -> Result { + let updated_staked_amount = self + .staked_amount + .checked_add(amount) + .ok_or(Error::InvalidAmount)?; + + self.staked_amount = updated_staked_amount; + + Ok(updated_staked_amount) + } + + /// Updates the delegation rate of the provided bid + pub fn with_delegation_rate(&mut self, delegation_rate: DelegationRate) -> &mut Self { + self.delegation_rate = delegation_rate; + self + } + + /// Updates the reserved slots of the provided bid + pub fn with_reserved_slots(&mut self, reserved_slots: u32) -> &mut Self { + self.reserved_slots = reserved_slots; + self + } + + /// Sets given bid's `inactive` field to `false` + pub fn activate(&mut self) { + self.inactive = false; + } + + /// Sets given bid's `inactive` field to `true` + pub fn deactivate(&mut self) { + self.inactive = true; + } + + /// Sets validator public key + pub fn with_validator_public_key(&mut self, validator_public_key: PublicKey) -> &mut Self { + self.validator_public_key = validator_public_key; + self + } + + /// Returns minimum allowed delegation amount in motes. + pub fn minimum_delegation_amount(&self) -> u64 { + self.minimum_delegation_amount + } + + /// Returns maximum allowed delegation amount in motes. + pub fn maximum_delegation_amount(&self) -> u64 { + self.maximum_delegation_amount + } + + /// Sets minimum and maximum delegation amounts in motes. + pub fn set_delegation_amount_boundaries( + &mut self, + minimum_delegation_amount: u64, + maximum_delegation_amount: u64, + ) { + self.minimum_delegation_amount = minimum_delegation_amount; + self.maximum_delegation_amount = maximum_delegation_amount; + } +} + +impl CLTyped for ValidatorBid { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for ValidatorBid { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.validator_public_key.write_bytes(&mut result)?; + self.bonding_purse.write_bytes(&mut result)?; + self.staked_amount.write_bytes(&mut result)?; + self.delegation_rate.write_bytes(&mut result)?; + self.vesting_schedule.write_bytes(&mut result)?; + self.inactive.write_bytes(&mut result)?; + self.minimum_delegation_amount.write_bytes(&mut result)?; + self.maximum_delegation_amount.write_bytes(&mut result)?; + self.reserved_slots.write_bytes(&mut result)?; + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.bonding_purse.serialized_length() + + self.staked_amount.serialized_length() + + self.delegation_rate.serialized_length() + + self.vesting_schedule.serialized_length() + + self.inactive.serialized_length() + + self.minimum_delegation_amount.serialized_length() + + self.maximum_delegation_amount.serialized_length() + + self.reserved_slots.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.bonding_purse.write_bytes(writer)?; + self.staked_amount.write_bytes(writer)?; + self.delegation_rate.write_bytes(writer)?; + self.vesting_schedule.write_bytes(writer)?; + self.inactive.write_bytes(writer)?; + self.minimum_delegation_amount.write_bytes(writer)?; + self.maximum_delegation_amount.write_bytes(writer)?; + self.reserved_slots.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ValidatorBid { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, bytes) = FromBytes::from_bytes(bytes)?; + let (bonding_purse, bytes) = FromBytes::from_bytes(bytes)?; + let (staked_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (delegation_rate, bytes) = FromBytes::from_bytes(bytes)?; + let (vesting_schedule, bytes) = FromBytes::from_bytes(bytes)?; + let (inactive, bytes) = FromBytes::from_bytes(bytes)?; + let (minimum_delegation_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (maximum_delegation_amount, bytes) = FromBytes::from_bytes(bytes)?; + let (reserved_slots, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + ValidatorBid { + validator_public_key, + bonding_purse, + staked_amount, + delegation_rate, + vesting_schedule, + inactive, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + }, + bytes, + )) + } +} + +impl From for ValidatorBid { + fn from(bid: Bid) -> Self { + ValidatorBid { + validator_public_key: bid.validator_public_key().clone(), + bonding_purse: *bid.bonding_purse(), + staked_amount: *bid.staked_amount(), + delegation_rate: *bid.delegation_rate(), + vesting_schedule: bid.vesting_schedule().cloned(), + inactive: bid.inactive(), + minimum_delegation_amount: 0, + maximum_delegation_amount: u64::MAX, + reserved_slots: 0, + } + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, + system::auction::{bid::VestingSchedule, DelegationRate, ValidatorBid}, + AccessRights, PublicKey, SecretKey, URef, U512, + }; + + #[test] + fn serialization_roundtrip_active() { + let founding_validator = ValidatorBid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::MAX, + vesting_schedule: Some(VestingSchedule::default()), + inactive: false, + minimum_delegation_amount: 0, + maximum_delegation_amount: u64::MAX, + reserved_slots: 0, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn serialization_roundtrip_inactive() { + let founding_validator = ValidatorBid { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + bonding_purse: URef::new([42; 32], AccessRights::READ_ADD_WRITE), + staked_amount: U512::one(), + delegation_rate: DelegationRate::MAX, + vesting_schedule: Some(VestingSchedule::default()), + inactive: true, + minimum_delegation_amount: 0, + maximum_delegation_amount: u64::MAX, + reserved_slots: 0, + }; + bytesrepr::test_serialization_roundtrip(&founding_validator); + } + + #[test] + fn should_immediately_initialize_unlock_amounts() { + const TIMESTAMP_MILLIS: u64 = 0; + + let validator_pk: PublicKey = (&SecretKey::ed25519_from_bytes([42; 32]).unwrap()).into(); + + let validator_release_timestamp = TIMESTAMP_MILLIS; + let vesting_schedule_period_millis = TIMESTAMP_MILLIS; + let validator_bonding_purse = URef::new([42; 32], AccessRights::ADD); + let validator_staked_amount = U512::from(1000); + let validator_delegation_rate = 0; + + let bid = ValidatorBid::locked( + validator_pk, + validator_bonding_purse, + validator_staked_amount, + validator_delegation_rate, + validator_release_timestamp, + 0, + u64::MAX, + 0, + ); + + assert!(!bid.is_locked_with_vesting_schedule( + validator_release_timestamp, + vesting_schedule_period_millis, + )); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::validator_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/validator_credit.rs b/types/src/system/auction/validator_credit.rs new file mode 100644 index 0000000000..9791322487 --- /dev/null +++ b/types/src/system/auction/validator_credit.rs @@ -0,0 +1,143 @@ +use crate::{ + bytesrepr, + bytesrepr::{FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, U512, +}; +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// Validator credit record. +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ValidatorCredit { + /// Validator public key + validator_public_key: PublicKey, + /// The era id the credit was created. + era_id: EraId, + /// The credit amount. + amount: U512, +} + +impl ValidatorCredit { + /// Returns a new instance of `[ValidatorCredit]`. + pub fn new(validator_public_key: PublicKey, era_id: EraId, amount: U512) -> Self { + ValidatorCredit { + validator_public_key, + era_id, + amount, + } + } + + /// Gets the validator public key of this instance. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Gets the era_id of this instance. + pub fn era_id(&self) -> EraId { + self.era_id + } + + /// Gets the era_id of this instance. + pub fn amount(&self) -> U512 { + self.amount + } + + /// Increase the credit amount. + pub fn increase(&mut self, additional_amount: U512) -> U512 { + self.amount.saturating_add(additional_amount); + self.amount + } + + /// Creates a new empty instance of a credit, with amount 0. + pub fn empty(validator_public_key: PublicKey, era_id: EraId) -> Self { + Self { + validator_public_key, + era_id, + amount: U512::zero(), + } + } +} + +impl CLTyped for ValidatorCredit { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl ToBytes for ValidatorCredit { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.validator_public_key.serialized_length() + + self.era_id.serialized_length() + + self.amount.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.validator_public_key.write_bytes(writer)?; + self.era_id.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for ValidatorCredit { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (validator_public_key, remainder) = FromBytes::from_bytes(bytes)?; + let (era_id, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + Ok(( + ValidatorCredit { + validator_public_key, + era_id, + amount, + }, + remainder, + )) + } +} + +#[cfg(test)] +mod tests { + use crate::{ + bytesrepr, system::auction::validator_credit::ValidatorCredit, EraId, PublicKey, SecretKey, + U512, + }; + + #[test] + fn serialization_roundtrip() { + let credit = ValidatorCredit { + validator_public_key: PublicKey::from( + &SecretKey::ed25519_from_bytes([0u8; SecretKey::ED25519_LENGTH]).unwrap(), + ), + era_id: EraId::new(0), + amount: U512::one(), + }; + bytesrepr::test_serialization_roundtrip(&credit); + } +} + +#[cfg(test)] +mod prop_tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_value_bid(bid in gens::credit_bid_arb()) { + bytesrepr::test_serialization_roundtrip(&bid); + } + } +} diff --git a/types/src/system/auction/withdraw_purse.rs b/types/src/system/auction/withdraw_purse.rs new file mode 100644 index 0000000000..9dc3806b0d --- /dev/null +++ b/types/src/system/auction/withdraw_purse.rs @@ -0,0 +1,192 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + CLType, CLTyped, EraId, PublicKey, URef, U512, +}; + +/// A withdraw purse, a legacy structure. +#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Clone)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct WithdrawPurse { + /// Bonding Purse + pub(crate) bonding_purse: URef, + /// Validators public key. + pub(crate) validator_public_key: PublicKey, + /// Unbonders public key. + pub(crate) unbonder_public_key: PublicKey, + /// Era in which this unbonding request was created. + pub(crate) era_of_creation: EraId, + /// Unbonding Amount. + pub(crate) amount: U512, +} + +impl WithdrawPurse { + /// Creates [`WithdrawPurse`] instance for an unbonding request. + pub const fn new( + bonding_purse: URef, + validator_public_key: PublicKey, + unbonder_public_key: PublicKey, + era_of_creation: EraId, + amount: U512, + ) -> Self { + Self { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + } + } + + /// Checks if given request is made by a validator by checking if public key of unbonder is same + /// as a key owned by validator. + pub fn is_validator(&self) -> bool { + self.validator_public_key == self.unbonder_public_key + } + + /// Returns bonding purse used to make this unbonding request. + pub fn bonding_purse(&self) -> &URef { + &self.bonding_purse + } + + /// Returns public key of validator. + pub fn validator_public_key(&self) -> &PublicKey { + &self.validator_public_key + } + + /// Returns public key of unbonder. + /// + /// For withdrawal requests that originated from validator's public key through `withdraw_bid` + /// entrypoint this is equal to [`WithdrawPurse::validator_public_key`] and + /// [`WithdrawPurse::is_validator`] is `true`. + pub fn unbonder_public_key(&self) -> &PublicKey { + &self.unbonder_public_key + } + + /// Returns era which was used to create this unbonding request. + pub fn era_of_creation(&self) -> EraId { + self.era_of_creation + } + + /// Returns unbonding amount. + pub fn amount(&self) -> &U512 { + &self.amount + } +} + +impl ToBytes for WithdrawPurse { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.extend(&self.bonding_purse.to_bytes()?); + result.extend(&self.validator_public_key.to_bytes()?); + result.extend(&self.unbonder_public_key.to_bytes()?); + result.extend(&self.era_of_creation.to_bytes()?); + result.extend(&self.amount.to_bytes()?); + + Ok(result) + } + fn serialized_length(&self) -> usize { + self.bonding_purse.serialized_length() + + self.validator_public_key.serialized_length() + + self.unbonder_public_key.serialized_length() + + self.era_of_creation.serialized_length() + + self.amount.serialized_length() + } +} + +impl FromBytes for WithdrawPurse { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bonding_purse, remainder) = FromBytes::from_bytes(bytes)?; + let (validator_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (unbonder_public_key, remainder) = FromBytes::from_bytes(remainder)?; + let (era_of_creation, remainder) = FromBytes::from_bytes(remainder)?; + let (amount, remainder) = FromBytes::from_bytes(remainder)?; + + Ok(( + WithdrawPurse { + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + }, + remainder, + )) + } +} + +impl CLTyped for WithdrawPurse { + fn cl_type() -> CLType { + CLType::Any + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, AccessRights, EraId, PublicKey, SecretKey, URef, U512}; + + use super::WithdrawPurse; + + const BONDING_PURSE: URef = URef::new([41; 32], AccessRights::READ_ADD_WRITE); + const ERA_OF_WITHDRAWAL: EraId = EraId::MAX; + + fn validator_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([44; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn unbonder_public_key() -> PublicKey { + let secret_key = SecretKey::ed25519_from_bytes([45; SecretKey::ED25519_LENGTH]).unwrap(); + PublicKey::from(&secret_key) + } + + fn amount() -> U512 { + U512::max_value() - 1 + } + + #[test] + fn serialization_roundtrip_for_withdraw_purse() { + let withdraw_purse = WithdrawPurse { + bonding_purse: BONDING_PURSE, + validator_public_key: validator_public_key(), + unbonder_public_key: unbonder_public_key(), + era_of_creation: ERA_OF_WITHDRAWAL, + amount: amount(), + }; + + bytesrepr::test_serialization_roundtrip(&withdraw_purse); + } + + #[test] + fn should_be_validator_condition_for_withdraw_purse() { + let validator_withdraw_purse = WithdrawPurse::new( + BONDING_PURSE, + validator_public_key(), + validator_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + ); + assert!(validator_withdraw_purse.is_validator()); + } + + #[test] + fn should_be_delegator_condition_for_withdraw_purse() { + let delegator_withdraw_purse = WithdrawPurse::new( + BONDING_PURSE, + validator_public_key(), + unbonder_public_key(), + ERA_OF_WITHDRAWAL, + amount(), + ); + assert!(!delegator_withdraw_purse.is_validator()); + } +} diff --git a/types/src/system/caller.rs b/types/src/system/caller.rs new file mode 100644 index 0000000000..122dd07911 --- /dev/null +++ b/types/src/system/caller.rs @@ -0,0 +1,313 @@ +pub mod call_stack_elements; + +use alloc::{collections::BTreeMap, vec::Vec}; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::FromPrimitive; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + package::PackageHash, + CLType, CLTyped, CLValue, CLValueError, EntityAddr, HashAddr, +}; + +use crate::{ + bytesrepr::Error, + contracts::{ContractHash, ContractPackageHash}, +}; +pub use call_stack_elements::CallStackElement; + +/// Tag representing variants of CallerTag for purposes of serialization. +#[derive(FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub enum CallerTag { + /// Initiator tag. + Initiator = 0, + /// Entity tag. + Entity, + /// Smart contract tag. + SmartContract, +} + +const ACCOUNT: u8 = 0; +const PACKAGE: u8 = 1; +const CONTRACT_PACKAGE: u8 = 2; +const ENTITY: u8 = 3; +const CONTRACT: u8 = 4; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct CallerInfo { + kind: u8, + fields: BTreeMap, +} + +impl CLTyped for CallerInfo { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl CallerInfo { + pub fn kind(&self) -> u8 { + self.kind + } + + pub fn get_field_by_index(&self, index: u8) -> Option<&CLValue> { + self.fields.get(&index) + } +} + +impl ToBytes for CallerInfo { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.kind.to_bytes()?); + result.append(&mut self.fields.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + self.fields.serialized_length() + } +} + +impl FromBytes for CallerInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (kind, remainder) = u8::from_bytes(bytes)?; + let (fields, remainder) = BTreeMap::from_bytes(remainder)?; + Ok((CallerInfo { kind, fields }, remainder)) + } +} + +impl TryFrom for CallerInfo { + type Error = CLValueError; + + fn try_from(value: Caller) -> Result { + match value { + Caller::Initiator { account_hash } => { + let kind = ACCOUNT; + + let mut ret = BTreeMap::new(); + ret.insert(ACCOUNT, CLValue::from_t(Some(account_hash))?); + ret.insert(PACKAGE, CLValue::from_t(Option::::None)?); + ret.insert( + CONTRACT_PACKAGE, + CLValue::from_t(Option::::None)?, + ); + ret.insert(ENTITY, CLValue::from_t(Option::::None)?); + ret.insert(CONTRACT, CLValue::from_t(Option::::None)?); + Ok(CallerInfo { kind, fields: ret }) + } + Caller::Entity { + package_hash, + entity_addr, + } => { + let kind = ENTITY; + + let mut ret = BTreeMap::new(); + ret.insert(ACCOUNT, CLValue::from_t(Option::::None)?); + ret.insert(PACKAGE, CLValue::from_t(Some(package_hash))?); + ret.insert( + CONTRACT_PACKAGE, + CLValue::from_t(Option::::None)?, + ); + ret.insert(ENTITY, CLValue::from_t(Some(entity_addr))?); + ret.insert(CONTRACT, CLValue::from_t(Option::::None)?); + Ok(CallerInfo { kind, fields: ret }) + } + Caller::SmartContract { + contract_package_hash, + contract_hash, + } => { + let kind = CONTRACT; + + let mut ret = BTreeMap::new(); + ret.insert(ACCOUNT, CLValue::from_t(Option::::None)?); + ret.insert(PACKAGE, CLValue::from_t(Option::::None)?); + ret.insert( + CONTRACT_PACKAGE, + CLValue::from_t(Some(contract_package_hash))?, + ); + + ret.insert(ENTITY, CLValue::from_t(Option::::None)?); + ret.insert(CONTRACT, CLValue::from_t(Some(contract_hash))?); + Ok(CallerInfo { kind, fields: ret }) + } + } + } +} + +/// Identity of a calling entity. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum Caller { + /// Initiator (calling account) + Initiator { + /// The account hash of the caller + account_hash: AccountHash, + }, + /// Entity (smart contract / system contract) + Entity { + /// The package hash + package_hash: PackageHash, + /// The entity addr. + entity_addr: EntityAddr, + }, + SmartContract { + /// The contract package hash. + contract_package_hash: ContractPackageHash, + /// The contract hash. + contract_hash: ContractHash, + }, +} + +impl Caller { + /// Creates a [`Caller::Initiator`]. This represents a call into session code, and + /// should only ever happen once in a call stack. + pub fn initiator(account_hash: AccountHash) -> Self { + Caller::Initiator { account_hash } + } + + /// Creates a [`'Caller::Entity`]. This represents a call into a contract with + /// `EntryPointType::Called`. + pub fn entity(package_hash: PackageHash, entity_addr: EntityAddr) -> Self { + Caller::Entity { + package_hash, + entity_addr, + } + } + + pub fn smart_contract( + contract_package_hash: ContractPackageHash, + contract_hash: ContractHash, + ) -> Self { + Caller::SmartContract { + contract_package_hash, + contract_hash, + } + } + + /// Gets the tag from self. + pub fn tag(&self) -> CallerTag { + match self { + Caller::Initiator { .. } => CallerTag::Initiator, + Caller::Entity { .. } => CallerTag::Entity, + Caller::SmartContract { .. } => CallerTag::SmartContract, + } + } + + /// Gets the [`HashAddr`] for both stored session and stored contract variants. + pub fn contract_hash(&self) -> Option { + match self { + Caller::Initiator { .. } => None, + Caller::Entity { entity_addr, .. } => Some(entity_addr.value()), + Caller::SmartContract { contract_hash, .. } => Some(contract_hash.value()), + } + } +} + +impl ToBytes for Caller { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.push(self.tag() as u8); + match self { + Caller::Initiator { account_hash } => result.append(&mut account_hash.to_bytes()?), + + Caller::Entity { + package_hash, + entity_addr, + } => { + result.append(&mut package_hash.to_bytes()?); + result.append(&mut entity_addr.to_bytes()?); + } + Caller::SmartContract { + contract_package_hash, + contract_hash, + } => { + result.append(&mut contract_package_hash.to_bytes()?); + result.append(&mut contract_hash.to_bytes()?); + } + }; + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Caller::Initiator { account_hash } => account_hash.serialized_length(), + Caller::Entity { + package_hash, + entity_addr, + } => package_hash.serialized_length() + entity_addr.serialized_length(), + Caller::SmartContract { + contract_package_hash, + contract_hash, + } => contract_package_hash.serialized_length() + contract_hash.serialized_length(), + } + } +} + +impl FromBytes for Caller { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + let tag = CallerTag::from_u8(tag).ok_or(bytesrepr::Error::Formatting)?; + match tag { + CallerTag::Initiator => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((Caller::Initiator { account_hash }, remainder)) + } + CallerTag::Entity => { + let (package_hash, remainder) = PackageHash::from_bytes(remainder)?; + let (entity_addr, remainder) = EntityAddr::from_bytes(remainder)?; + Ok(( + Caller::Entity { + package_hash, + entity_addr, + }, + remainder, + )) + } + CallerTag::SmartContract => { + let (contract_package_hash, remainder) = + ContractPackageHash::from_bytes(remainder)?; + let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?; + Ok(( + Caller::SmartContract { + contract_package_hash, + contract_hash, + }, + remainder, + )) + } + } + } +} + +impl CLTyped for Caller { + fn cl_type() -> CLType { + CLType::Any + } +} + +impl From<&Caller> for CallStackElement { + fn from(caller: &Caller) -> Self { + match caller { + Caller::Initiator { account_hash } => CallStackElement::Session { + account_hash: *account_hash, + }, + Caller::Entity { + package_hash, + entity_addr: entity_hash, + } => CallStackElement::StoredContract { + contract_package_hash: ContractPackageHash::new(package_hash.value()), + contract_hash: ContractHash::new(entity_hash.value()), + }, + Caller::SmartContract { + contract_package_hash, + contract_hash, + } => CallStackElement::StoredContract { + contract_package_hash: *contract_package_hash, + contract_hash: *contract_hash, + }, + } + } +} diff --git a/types/src/system/caller/call_stack_elements.rs b/types/src/system/caller/call_stack_elements.rs new file mode 100644 index 0000000000..be9fc8cbe6 --- /dev/null +++ b/types/src/system/caller/call_stack_elements.rs @@ -0,0 +1,195 @@ +use alloc::vec::Vec; + +use num_derive::{FromPrimitive, ToPrimitive}; +use num_traits::FromPrimitive; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::{ContractHash, ContractPackageHash}, + CLType, CLTyped, +}; + +/// Tag representing variants of CallStackElement for purposes of serialization. +#[derive(FromPrimitive, ToPrimitive)] +#[repr(u8)] +pub enum CallStackElementTag { + /// Session tag. + Session = 0, + /// StoredSession tag. + StoredSession, + /// StoredContract tag. + StoredContract, +} + +/// Represents the origin of a sub-call. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum CallStackElement { + /// Session + Session { + /// The account hash of the caller + account_hash: AccountHash, + }, + /// Effectively an EntryPointType::Session - stored access to a session. + StoredSession { + /// The account hash of the caller + account_hash: AccountHash, + /// The contract package hash + contract_package_hash: ContractPackageHash, + /// The contract hash + contract_hash: ContractHash, + }, + /// Contract + StoredContract { + /// The contract package hash + contract_package_hash: ContractPackageHash, + /// The contract hash + contract_hash: ContractHash, + }, +} + +impl CallStackElement { + /// Creates a [`CallStackElement::Session`]. This represents a call into session code, and + /// should only ever happen once in a call stack. + pub fn session(account_hash: AccountHash) -> Self { + CallStackElement::Session { account_hash } + } + + /// Creates a [`'CallStackElement::StoredContract`]. This represents a call into a contract with + /// `EntryPointType::Contract`. + pub fn stored_contract( + contract_package_hash: ContractPackageHash, + contract_hash: ContractHash, + ) -> Self { + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + } + } + + /// Creates a [`'CallStackElement::StoredSession`]. This represents a call into a contract with + /// `EntryPointType::Session`. + pub fn stored_session( + account_hash: AccountHash, + contract_package_hash: ContractPackageHash, + contract_hash: ContractHash, + ) -> Self { + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + } + } + + /// Gets the tag from self. + pub fn tag(&self) -> CallStackElementTag { + match self { + CallStackElement::Session { .. } => CallStackElementTag::Session, + CallStackElement::StoredSession { .. } => CallStackElementTag::StoredSession, + CallStackElement::StoredContract { .. } => CallStackElementTag::StoredContract, + } + } + + /// Gets the [`ContractHash`] for both stored session and stored contract variants. + pub fn contract_hash(&self) -> Option<&ContractHash> { + match self { + CallStackElement::Session { .. } => None, + CallStackElement::StoredSession { contract_hash, .. } + | CallStackElement::StoredContract { contract_hash, .. } => Some(contract_hash), + } + } +} + +impl ToBytes for CallStackElement { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.push(self.tag() as u8); + match self { + CallStackElement::Session { account_hash } => { + result.append(&mut account_hash.to_bytes()?) + } + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + } => { + result.append(&mut account_hash.to_bytes()?); + result.append(&mut contract_package_hash.to_bytes()?); + result.append(&mut contract_hash.to_bytes()?); + } + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + } => { + result.append(&mut contract_package_hash.to_bytes()?); + result.append(&mut contract_hash.to_bytes()?); + } + }; + Ok(result) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + CallStackElement::Session { account_hash } => account_hash.serialized_length(), + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + } => { + account_hash.serialized_length() + + contract_package_hash.serialized_length() + + contract_hash.serialized_length() + } + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + } => contract_package_hash.serialized_length() + contract_hash.serialized_length(), + } + } +} + +impl FromBytes for CallStackElement { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + let tag = CallStackElementTag::from_u8(tag).ok_or(bytesrepr::Error::Formatting)?; + match tag { + CallStackElementTag::Session => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + Ok((CallStackElement::Session { account_hash }, remainder)) + } + CallStackElementTag::StoredSession => { + let (account_hash, remainder) = AccountHash::from_bytes(remainder)?; + let (contract_package_hash, remainder) = + ContractPackageHash::from_bytes(remainder)?; + let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?; + Ok(( + CallStackElement::StoredSession { + account_hash, + contract_package_hash, + contract_hash, + }, + remainder, + )) + } + CallStackElementTag::StoredContract => { + let (contract_package_hash, remainder) = + ContractPackageHash::from_bytes(remainder)?; + let (contract_hash, remainder) = ContractHash::from_bytes(remainder)?; + Ok(( + CallStackElement::StoredContract { + contract_package_hash, + contract_hash, + }, + remainder, + )) + } + } + } +} + +impl CLTyped for CallStackElement { + fn cl_type() -> CLType { + CLType::Any + } +} diff --git a/types/src/system/error.rs b/types/src/system/error.rs new file mode 100644 index 0000000000..c63e3f585e --- /dev/null +++ b/types/src/system/error.rs @@ -0,0 +1,43 @@ +use core::fmt::{self, Display, Formatter}; + +use crate::system::{auction, handle_payment, mint}; + +/// An aggregate enum error with variants for each system contract's error. +#[derive(Debug, Copy, Clone)] +#[non_exhaustive] +pub enum Error { + /// Contains a [`mint::Error`]. + Mint(mint::Error), + /// Contains a [`handle_payment::Error`]. + HandlePayment(handle_payment::Error), + /// Contains a [`auction::Error`]. + Auction(auction::Error), +} + +impl From for Error { + fn from(error: mint::Error) -> Error { + Error::Mint(error) + } +} + +impl From for Error { + fn from(error: handle_payment::Error) -> Error { + Error::HandlePayment(error) + } +} + +impl From for Error { + fn from(error: auction::Error) -> Error { + Error::Auction(error) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::Mint(error) => write!(formatter, "Mint error: {}", error), + Error::HandlePayment(error) => write!(formatter, "HandlePayment error: {}", error), + Error::Auction(error) => write!(formatter, "Auction error: {}", error), + } + } +} diff --git a/types/src/system/handle_payment.rs b/types/src/system/handle_payment.rs new file mode 100644 index 0000000000..1b12f3ecfb --- /dev/null +++ b/types/src/system/handle_payment.rs @@ -0,0 +1,8 @@ +//! Contains implementation of a Handle Payment contract functionality. +mod constants; +mod entry_points; +mod error; + +pub use constants::*; +pub use entry_points::handle_payment_entry_points; +pub use error::Error; diff --git a/types/src/system/handle_payment/constants.rs b/types/src/system/handle_payment/constants.rs index 775c16babd..24fd10bada 100644 --- a/types/src/system/handle_payment/constants.rs +++ b/types/src/system/handle_payment/constants.rs @@ -13,8 +13,6 @@ pub const METHOD_GET_PAYMENT_PURSE: &str = "get_payment_purse"; pub const METHOD_SET_REFUND_PURSE: &str = "set_refund_purse"; /// Named constant for method `get_refund_purse`. pub const METHOD_GET_REFUND_PURSE: &str = "get_refund_purse"; -/// Named constant for method `finalize_payment`. -pub const METHOD_FINALIZE_PAYMENT: &str = "finalize_payment"; /// Storage for handle payment contract hash. pub const CONTRACT_HASH_KEY: &str = "contract_hash"; @@ -28,3 +26,8 @@ pub const PAYMENT_PURSE_KEY: &str = "payment_purse"; /// The uref name where the Handle Payment will refund unused payment back to the user. The uref /// this name corresponds to is set by the user. pub const REFUND_PURSE_KEY: &str = "refund_purse"; +/// Storage for handle payment accumulation purse key. +/// +/// This purse is used when `fee_elimination` config is set to `Accumulate` which makes sense for +/// some private chains. +pub const ACCUMULATION_PURSE_KEY: &str = "accumulation_purse"; diff --git a/types/src/system/handle_payment/entry_points.rs b/types/src/system/handle_payment/entry_points.rs new file mode 100644 index 0000000000..6f101895dc --- /dev/null +++ b/types/src/system/handle_payment/entry_points.rs @@ -0,0 +1,46 @@ +use alloc::boxed::Box; + +use crate::{ + system::handle_payment::{ + ARG_PURSE, METHOD_GET_PAYMENT_PURSE, METHOD_GET_REFUND_PURSE, METHOD_SET_REFUND_PURSE, + }, + CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, + Parameter, +}; + +/// Creates handle payment contract entry points. +pub fn handle_payment_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let get_payment_purse = EntityEntryPoint::new( + METHOD_GET_PAYMENT_PURSE, + vec![], + CLType::URef, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(get_payment_purse); + + let set_refund_purse = EntityEntryPoint::new( + METHOD_SET_REFUND_PURSE, + vec![Parameter::new(ARG_PURSE, CLType::URef)], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(set_refund_purse); + + let get_refund_purse = EntityEntryPoint::new( + METHOD_GET_REFUND_PURSE, + vec![], + CLType::Option(Box::new(CLType::URef)), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(get_refund_purse); + + entry_points +} diff --git a/types/src/system/handle_payment/error.rs b/types/src/system/handle_payment/error.rs index e09e39443a..4f1305b3e0 100644 --- a/types/src/system/handle_payment/error.rs +++ b/types/src/system/handle_payment/error.rs @@ -1,9 +1,9 @@ -//! Home of the Handle Payment contract's [`Error`] type. +//! Home of the Handle Payment contract's [`enum@Error`] type. use alloc::vec::Vec; -use core::result; - -#[cfg(feature = "std")] -use thiserror::Error; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; use crate::{ bytesrepr::{self, ToBytes, U8_SERIALIZED_LENGTH}, @@ -11,121 +11,424 @@ use crate::{ }; /// Errors which can occur while executing the Handle Payment contract. -// TODO: Split this up into user errors vs. system errors. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Error))] #[repr(u8)] +#[non_exhaustive] pub enum Error { // ===== User errors ===== /// The given validator is not bonded. - #[cfg_attr(feature = "std", error("Not bonded"))] + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(0, Error::NotBonded as u8); + /// ``` NotBonded = 0, /// There are too many bonding or unbonding attempts already enqueued to allow more. - #[cfg_attr(feature = "std", error("Too many events in queue"))] - TooManyEventsInQueue, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(1, Error::TooManyEventsInQueue as u8); + /// ``` + TooManyEventsInQueue = 1, /// At least one validator must remain bonded. - #[cfg_attr(feature = "std", error("Cannot unbond last validator"))] - CannotUnbondLastValidator, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(2, Error::CannotUnbondLastValidator as u8); + /// ``` + CannotUnbondLastValidator = 2, /// Failed to bond or unbond as this would have resulted in exceeding the maximum allowed /// difference between the largest and smallest stakes. - #[cfg_attr(feature = "std", error("Spread is too high"))] - SpreadTooHigh, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(3, Error::SpreadTooHigh as u8); + /// ``` + SpreadTooHigh = 3, /// The given validator already has a bond or unbond attempt enqueued. - #[cfg_attr(feature = "std", error("Multiple requests"))] - MultipleRequests, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(4, Error::MultipleRequests as u8); + /// ``` + MultipleRequests = 4, /// Attempted to bond with a stake which was too small. - #[cfg_attr(feature = "std", error("Bond is too small"))] - BondTooSmall, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(5, Error::BondTooSmall as u8); + /// ``` + BondTooSmall = 5, /// Attempted to bond with a stake which was too large. - #[cfg_attr(feature = "std", error("Bond is too large"))] - BondTooLarge, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(6, Error::BondTooLarge as u8); + /// ``` + BondTooLarge = 6, /// Attempted to unbond an amount which was too large. - #[cfg_attr(feature = "std", error("Unbond is too large"))] - UnbondTooLarge, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(7, Error::UnbondTooLarge as u8); + /// ``` + UnbondTooLarge = 7, /// While bonding, the transfer from source purse to the Handle Payment internal purse failed. - #[cfg_attr(feature = "std", error("Bond transfer failed"))] - BondTransferFailed, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(8, Error::BondTransferFailed as u8); + /// ``` + BondTransferFailed = 8, /// While unbonding, the transfer from the Handle Payment internal purse to the destination /// purse failed. - #[cfg_attr(feature = "std", error("Unbond transfer failed"))] - UnbondTransferFailed, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(9, Error::UnbondTransferFailed as u8); + /// ``` + UnbondTransferFailed = 9, // ===== System errors ===== /// Internal error: a [`BlockTime`](crate::BlockTime) was unexpectedly out of sequence. - #[cfg_attr(feature = "std", error("Time went backwards"))] - TimeWentBackwards, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(10, Error::TimeWentBackwards as u8); + /// ``` + TimeWentBackwards = 10, /// Internal error: stakes were unexpectedly empty. - #[cfg_attr(feature = "std", error("Stakes not found"))] - StakesNotFound, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(11, Error::StakesNotFound as u8); + /// ``` + StakesNotFound = 11, /// Internal error: the Handle Payment contract's payment purse wasn't found. - #[cfg_attr(feature = "std", error("Payment purse not found"))] - PaymentPurseNotFound, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(12, Error::PaymentPurseNotFound as u8); + /// ``` + PaymentPurseNotFound = 12, /// Internal error: the Handle Payment contract's payment purse key was the wrong type. - #[cfg_attr(feature = "std", error("Payment purse has unexpected type"))] - PaymentPurseKeyUnexpectedType, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(13, Error::PaymentPurseKeyUnexpectedType as u8); + /// ``` + PaymentPurseKeyUnexpectedType = 13, /// Internal error: couldn't retrieve the balance for the Handle Payment contract's payment /// purse. - #[cfg_attr(feature = "std", error("Payment purse balance not found"))] - PaymentPurseBalanceNotFound, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(14, Error::PaymentPurseBalanceNotFound as u8); + /// ``` + PaymentPurseBalanceNotFound = 14, /// Internal error: the Handle Payment contract's bonding purse wasn't found. - #[cfg_attr(feature = "std", error("Bonding purse not found"))] - BondingPurseNotFound, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(15, Error::BondingPurseNotFound as u8); + /// ``` + BondingPurseNotFound = 15, /// Internal error: the Handle Payment contract's bonding purse key was the wrong type. - #[cfg_attr(feature = "std", error("Bonding purse key has unexpected type"))] - BondingPurseKeyUnexpectedType, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(16, Error::BondingPurseKeyUnexpectedType as u8); + /// ``` + BondingPurseKeyUnexpectedType = 16, /// Internal error: the Handle Payment contract's refund purse key was the wrong type. - #[cfg_attr(feature = "std", error("Refund purse key has unexpected type"))] - RefundPurseKeyUnexpectedType, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(17, Error::RefundPurseKeyUnexpectedType as u8); + /// ``` + RefundPurseKeyUnexpectedType = 17, /// Internal error: the Handle Payment contract's rewards purse wasn't found. - #[cfg_attr(feature = "std", error("Rewards purse not found"))] - RewardsPurseNotFound, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(18, Error::RewardsPurseNotFound as u8); + /// ``` + RewardsPurseNotFound = 18, /// Internal error: the Handle Payment contract's rewards purse key was the wrong type. - #[cfg_attr(feature = "std", error("Rewards purse has unexpected type"))] - RewardsPurseKeyUnexpectedType, - // TODO: Put these in their own enum, and wrap them separately in `BondingError` and - // `UnbondingError`. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(19, Error::RewardsPurseKeyUnexpectedType as u8); + /// ``` + RewardsPurseKeyUnexpectedType = 19, /// Internal error: failed to deserialize the stake's key. - #[cfg_attr(feature = "std", error("Failed to deserialize stake's key"))] - StakesKeyDeserializationFailed, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(20, Error::StakesKeyDeserializationFailed as u8); + /// ``` + StakesKeyDeserializationFailed = 20, /// Internal error: failed to deserialize the stake's balance. - #[cfg_attr(feature = "std", error("Failed to deserialize stake's balance"))] - StakesDeserializationFailed, - /// The invoked Handle Payment function can only be called by system contracts, but was called - /// by a user contract. - #[cfg_attr(feature = "std", error("System function was called by user account"))] - SystemFunctionCalledByUserAccount, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(21, Error::StakesDeserializationFailed as u8); + /// ``` + StakesDeserializationFailed = 21, + /// Raised when caller is not the system account. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(22, Error::InvalidCaller as u8); + /// ``` + InvalidCaller = 22, /// Internal error: while finalizing payment, the amount spent exceeded the amount available. - #[cfg_attr(feature = "std", error("Insufficient payment for amount spent"))] - InsufficientPaymentForAmountSpent, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(23, Error::InsufficientPaymentForAmountSpent as u8); + /// ``` + InsufficientPaymentForAmountSpent = 23, /// Internal error: while finalizing payment, failed to pay the validators (the transfer from /// the Handle Payment contract's payment purse to rewards purse failed). - #[cfg_attr(feature = "std", error("Transfer to rewards purse has failed"))] - FailedTransferToRewardsPurse, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(24, Error::FailedTransferToRewardsPurse as u8); + /// ``` + FailedTransferToRewardsPurse = 24, /// Internal error: while finalizing payment, failed to refund the caller's purse (the transfer /// from the Handle Payment contract's payment purse to refund purse or account's main purse /// failed). - #[cfg_attr(feature = "std", error("Transfer to account's purse failed"))] - FailedTransferToAccountPurse, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(25, Error::FailedTransferToAccountPurse as u8); + /// ``` + FailedTransferToAccountPurse = 25, /// Handle Payment contract's "set_refund_purse" method can only be called by the payment code /// of a deploy, but was called by the session code. - #[cfg_attr(feature = "std", error("Set refund purse was called outside payment"))] - SetRefundPurseCalledOutsidePayment, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(26, Error::SetRefundPurseCalledOutsidePayment as u8); + /// ``` + SetRefundPurseCalledOutsidePayment = 26, /// Raised when the system is unable to determine purse balance. - #[cfg_attr(feature = "std", error("Unable to get purse balance"))] - GetBalance, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(27, Error::GetBalance as u8); + /// ``` + GetBalance = 27, /// Raised when the system is unable to put named key. - #[cfg_attr(feature = "std", error("Unable to put named key"))] - PutKey, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(28, Error::PutKey as u8); + /// ``` + PutKey = 28, /// Raised when the system is unable to remove given named key. - #[cfg_attr(feature = "std", error("Unable to remove named key"))] - RemoveKey, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(29, Error::RemoveKey as u8); + /// ``` + RemoveKey = 29, /// Failed to transfer funds. - #[cfg_attr(feature = "std", error("Failed to transfer funds"))] - Transfer, + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(30, Error::Transfer as u8); + /// ``` + Transfer = 30, + /// An arithmetic overflow occurred + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(31, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 31, // NOTE: These variants below will be removed once support for WASM system contracts will be // dropped. #[doc(hidden)] - #[cfg_attr(feature = "std", error("GasLimit"))] - GasLimit, + GasLimit = 32, + /// Refund purse is a payment purse. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(33, Error::RefundPurseIsPaymentPurse as u8); + /// ``` + RefundPurseIsPaymentPurse = 33, + /// Error raised while reducing total supply on the mint system contract. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(34, Error::ReduceTotalSupply as u8); + /// ``` + ReduceTotalSupply = 34, + /// Error writing to a storage. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(35, Error::Storage as u8); + /// ``` + Storage = 35, + /// Internal error: the Handle Payment contract's accumulation purse wasn't found. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(36, Error::AccumulationPurseNotFound as u8); + /// ``` + AccumulationPurseNotFound = 36, + /// Internal error: the Handle Payment contract's accumulation purse key was the wrong type. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(37, Error::AccumulationPurseKeyUnexpectedType as u8); + /// ``` + AccumulationPurseKeyUnexpectedType = 37, + /// Internal error: invalid fee and / or refund settings encountered during payment processing. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(38, Error::IncompatiblePaymentSettings as u8); + /// ``` + IncompatiblePaymentSettings = 38, + /// Unexpected key variant. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(39, Error::UnexpectedKeyVariant as u8); + /// ``` + UnexpectedKeyVariant = 39, + /// Attempt to persist payment purse. + /// ``` + /// # use casper_types::system::handle_payment::Error; + /// assert_eq!(40, Error::AttemptToPersistPaymentPurse as u8); + /// ``` + AttemptToPersistPaymentPurse = 40, +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::NotBonded => formatter.write_str("Not bonded"), + Error::TooManyEventsInQueue => formatter.write_str("Too many events in queue"), + Error::CannotUnbondLastValidator => formatter.write_str("Cannot unbond last validator"), + Error::SpreadTooHigh => formatter.write_str("Spread is too high"), + Error::MultipleRequests => formatter.write_str("Multiple requests"), + Error::BondTooSmall => formatter.write_str("Bond is too small"), + Error::BondTooLarge => formatter.write_str("Bond is too large"), + Error::UnbondTooLarge => formatter.write_str("Unbond is too large"), + Error::BondTransferFailed => formatter.write_str("Bond transfer failed"), + Error::UnbondTransferFailed => formatter.write_str("Unbond transfer failed"), + Error::TimeWentBackwards => formatter.write_str("Time went backwards"), + Error::StakesNotFound => formatter.write_str("Stakes not found"), + Error::PaymentPurseNotFound => formatter.write_str("Payment purse not found"), + Error::PaymentPurseKeyUnexpectedType => { + formatter.write_str("Payment purse has unexpected type") + } + Error::PaymentPurseBalanceNotFound => { + formatter.write_str("Payment purse balance not found") + } + Error::BondingPurseNotFound => formatter.write_str("Bonding purse not found"), + Error::BondingPurseKeyUnexpectedType => { + formatter.write_str("Bonding purse key has unexpected type") + } + Error::RefundPurseKeyUnexpectedType => { + formatter.write_str("Refund purse key has unexpected type") + } + Error::RewardsPurseNotFound => formatter.write_str("Rewards purse not found"), + Error::RewardsPurseKeyUnexpectedType => { + formatter.write_str("Rewards purse has unexpected type") + } + Error::StakesKeyDeserializationFailed => { + formatter.write_str("Failed to deserialize stake's key") + } + Error::StakesDeserializationFailed => { + formatter.write_str("Failed to deserialize stake's balance") + } + Error::InvalidCaller => { + formatter.write_str("System function was called by user account") + } + Error::InsufficientPaymentForAmountSpent => { + formatter.write_str("Insufficient payment for amount spent") + } + Error::FailedTransferToRewardsPurse => { + formatter.write_str("Transfer to rewards purse has failed") + } + Error::FailedTransferToAccountPurse => { + formatter.write_str("Transfer to account's purse failed") + } + Error::SetRefundPurseCalledOutsidePayment => { + formatter.write_str("Set refund purse was called outside payment") + } + Error::GetBalance => formatter.write_str("Unable to get purse balance"), + Error::PutKey => formatter.write_str("Unable to put named key"), + Error::RemoveKey => formatter.write_str("Unable to remove named key"), + Error::Transfer => formatter.write_str("Failed to transfer funds"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow"), + Error::GasLimit => formatter.write_str("GasLimit"), + Error::RefundPurseIsPaymentPurse => { + formatter.write_str("Refund purse is a payment purse.") + } + Error::ReduceTotalSupply => formatter.write_str("Failed to reduce total supply."), + Error::Storage => formatter.write_str("Failed to write to storage."), + Error::AccumulationPurseNotFound => formatter.write_str("Accumulation purse not found"), + Error::AccumulationPurseKeyUnexpectedType => { + formatter.write_str("Accumulation purse has unexpected type") + } + Error::IncompatiblePaymentSettings => { + formatter.write_str("Incompatible payment settings") + } + Error::UnexpectedKeyVariant => formatter.write_str("Unexpected key variant"), + Error::AttemptToPersistPaymentPurse => { + formatter.write_str("Attempt to persist payment purse") + } + } + } +} + +impl TryFrom for Error { + type Error = (); + + fn try_from(value: u8) -> Result { + let error = match value { + v if v == Error::NotBonded as u8 => Error::NotBonded, + v if v == Error::TooManyEventsInQueue as u8 => Error::TooManyEventsInQueue, + v if v == Error::CannotUnbondLastValidator as u8 => Error::CannotUnbondLastValidator, + v if v == Error::SpreadTooHigh as u8 => Error::SpreadTooHigh, + v if v == Error::MultipleRequests as u8 => Error::MultipleRequests, + v if v == Error::BondTooSmall as u8 => Error::BondTooSmall, + v if v == Error::BondTooLarge as u8 => Error::BondTooLarge, + v if v == Error::UnbondTooLarge as u8 => Error::UnbondTooLarge, + v if v == Error::BondTransferFailed as u8 => Error::BondTransferFailed, + v if v == Error::UnbondTransferFailed as u8 => Error::UnbondTransferFailed, + v if v == Error::TimeWentBackwards as u8 => Error::TimeWentBackwards, + v if v == Error::StakesNotFound as u8 => Error::StakesNotFound, + v if v == Error::PaymentPurseNotFound as u8 => Error::PaymentPurseNotFound, + v if v == Error::PaymentPurseKeyUnexpectedType as u8 => { + Error::PaymentPurseKeyUnexpectedType + } + v if v == Error::PaymentPurseBalanceNotFound as u8 => { + Error::PaymentPurseBalanceNotFound + } + v if v == Error::BondingPurseNotFound as u8 => Error::BondingPurseNotFound, + v if v == Error::BondingPurseKeyUnexpectedType as u8 => { + Error::BondingPurseKeyUnexpectedType + } + v if v == Error::RefundPurseKeyUnexpectedType as u8 => { + Error::RefundPurseKeyUnexpectedType + } + v if v == Error::RewardsPurseNotFound as u8 => Error::RewardsPurseNotFound, + v if v == Error::RewardsPurseKeyUnexpectedType as u8 => { + Error::RewardsPurseKeyUnexpectedType + } + v if v == Error::StakesKeyDeserializationFailed as u8 => { + Error::StakesKeyDeserializationFailed + } + v if v == Error::StakesDeserializationFailed as u8 => { + Error::StakesDeserializationFailed + } + v if v == Error::InvalidCaller as u8 => Error::InvalidCaller, + v if v == Error::InsufficientPaymentForAmountSpent as u8 => { + Error::InsufficientPaymentForAmountSpent + } + v if v == Error::FailedTransferToRewardsPurse as u8 => { + Error::FailedTransferToRewardsPurse + } + v if v == Error::FailedTransferToAccountPurse as u8 => { + Error::FailedTransferToAccountPurse + } + v if v == Error::SetRefundPurseCalledOutsidePayment as u8 => { + Error::SetRefundPurseCalledOutsidePayment + } + + v if v == Error::GetBalance as u8 => Error::GetBalance, + v if v == Error::PutKey as u8 => Error::PutKey, + v if v == Error::RemoveKey as u8 => Error::RemoveKey, + v if v == Error::Transfer as u8 => Error::Transfer, + v if v == Error::ArithmeticOverflow as u8 => Error::ArithmeticOverflow, + v if v == Error::GasLimit as u8 => Error::GasLimit, + v if v == Error::RefundPurseIsPaymentPurse as u8 => Error::RefundPurseIsPaymentPurse, + v if v == Error::ReduceTotalSupply as u8 => Error::ReduceTotalSupply, + v if v == Error::Storage as u8 => Error::Storage, + v if v == Error::AccumulationPurseNotFound as u8 => Error::AccumulationPurseNotFound, + v if v == Error::AccumulationPurseKeyUnexpectedType as u8 => { + Error::AccumulationPurseKeyUnexpectedType + } + v if v == Error::IncompatiblePaymentSettings as u8 => { + Error::IncompatiblePaymentSettings + } + v if v == Error::UnexpectedKeyVariant as u8 => Error::UnexpectedKeyVariant, + v if v == Error::AttemptToPersistPaymentPurse as u8 => { + Error::AttemptToPersistPaymentPurse + } + _ => return Err(()), + }; + Ok(error) + } } impl CLTyped for Error { @@ -135,7 +438,7 @@ impl CLTyped for Error { } impl ToBytes for Error { - fn to_bytes(&self) -> result::Result, bytesrepr::Error> { + fn to_bytes(&self) -> Result, bytesrepr::Error> { let value = *self as u8; value.to_bytes() } diff --git a/types/src/system/handle_payment/mint_provider.rs b/types/src/system/handle_payment/mint_provider.rs deleted file mode 100644 index ff2102525a..0000000000 --- a/types/src/system/handle_payment/mint_provider.rs +++ /dev/null @@ -1,23 +0,0 @@ -use crate::{account::AccountHash, system::handle_payment::Error, TransferredTo, URef, U512}; - -/// Provides an access to mint. -pub trait MintProvider { - /// Transfer `amount` from `source` purse to a `target` account. - fn transfer_purse_to_account( - &mut self, - source: URef, - target: AccountHash, - amount: U512, - ) -> Result; - - /// Transfer `amount` from `source` purse to a `target` purse. - fn transfer_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), Error>; - - /// Checks balance of a `purse`. Returns `None` if given purse does not exist. - fn balance(&mut self, purse: URef) -> Result, Error>; -} diff --git a/types/src/system/handle_payment/mod.rs b/types/src/system/handle_payment/mod.rs deleted file mode 100644 index 0e4a0db0de..0000000000 --- a/types/src/system/handle_payment/mod.rs +++ /dev/null @@ -1,170 +0,0 @@ -//! Contains implementation of a Handle Payment contract functionality. -mod constants; -mod error; -mod mint_provider; -mod runtime_provider; - -use core::marker::Sized; - -use crate::{account::AccountHash, AccessRights, URef, U512}; - -pub use crate::system::handle_payment::{ - constants::*, error::Error, mint_provider::MintProvider, runtime_provider::RuntimeProvider, -}; - -// A simplified representation of a refund percentage which is currently hardcoded to 0%. -const REFUND_PERCENTAGE: U512 = U512::zero(); - -/// Handle payment functionality implementation. -pub trait HandlePayment: MintProvider + RuntimeProvider + Sized { - /// Get payment purse. - fn get_payment_purse(&self) -> Result { - let purse = internal::get_payment_purse(self)?; - // Limit the access rights so only balance query and deposit are allowed. - Ok(URef::new(purse.addr(), AccessRights::READ_ADD)) - } - - /// Set refund purse. - fn set_refund_purse(&mut self, purse: URef) -> Result<(), Error> { - internal::set_refund(self, purse) - } - - /// Get refund purse. - fn get_refund_purse(&self) -> Result, Error> { - // We purposely choose to remove the access rights so that we do not - // accidentally give rights for a purse to some contract that is not - // supposed to have it. - let maybe_purse = internal::get_refund_purse(self)?; - Ok(maybe_purse.map(|p| p.remove_access_rights())) - } - - /// Finalize payment with `amount_spent` and a given `account`. - fn finalize_payment( - &mut self, - amount_spent: U512, - account: AccountHash, - target: URef, - ) -> Result<(), Error> { - internal::finalize_payment(self, amount_spent, account, target) - } -} - -mod internal { - use crate::{ - account::AccountHash, - system::handle_payment::{Error, MintProvider, RuntimeProvider}, - Key, Phase, PublicKey, URef, U512, - }; - - use super::{PAYMENT_PURSE_KEY, REFUND_PERCENTAGE, REFUND_PURSE_KEY}; - - /// Returns the purse for accepting payment for transactions. - pub fn get_payment_purse(runtime_provider: &R) -> Result { - match runtime_provider.get_key(PAYMENT_PURSE_KEY) { - Some(Key::URef(uref)) => Ok(uref), - Some(_) => Err(Error::PaymentPurseKeyUnexpectedType), - None => Err(Error::PaymentPurseNotFound), - } - } - - /// Sets the purse where refunds (excess funds not spent to pay for computation) will be sent. - /// Note that if this function is never called, the default location is the main purse of the - /// deployer's account. - pub fn set_refund( - runtime_provider: &mut R, - purse: URef, - ) -> Result<(), Error> { - if let Phase::Payment = runtime_provider.get_phase() { - runtime_provider.put_key(REFUND_PURSE_KEY, Key::URef(purse))?; - return Ok(()); - } - Err(Error::SetRefundPurseCalledOutsidePayment) - } - - /// Returns the currently set refund purse. - pub fn get_refund_purse( - runtime_provider: &R, - ) -> Result, Error> { - match runtime_provider.get_key(REFUND_PURSE_KEY) { - Some(Key::URef(uref)) => Ok(Some(uref)), - Some(_) => Err(Error::RefundPurseKeyUnexpectedType), - None => Ok(None), - } - } - - /// Transfers funds from the payment purse to the validator rewards purse, as well as to the - /// refund purse, depending on how much was spent on the computation. This function maintains - /// the invariant that the balance of the payment purse is zero at the beginning and end of each - /// deploy and that the refund purse is unset at the beginning and end of each deploy. - pub fn finalize_payment( - provider: &mut P, - amount_spent: U512, - account: AccountHash, - target: URef, - ) -> Result<(), Error> { - let caller = provider.get_caller(); - if caller != PublicKey::System.to_account_hash() { - return Err(Error::SystemFunctionCalledByUserAccount); - } - - let payment_purse = get_payment_purse(provider)?; - let total = match provider.balance(payment_purse)? { - Some(balance) => balance, - None => return Err(Error::PaymentPurseBalanceNotFound), - }; - - if total < amount_spent { - return Err(Error::InsufficientPaymentForAmountSpent); - } - - // User's part - let refund_amount = (total - amount_spent) * REFUND_PERCENTAGE; - - // Validator reward - let validator_reward = total - refund_amount; - - // Makes sure both parts: for user, and for validator sums to the total amount in the - // payment's purse. - debug_assert_eq!(validator_reward + refund_amount, total); - - let refund_purse = get_refund_purse(provider)?; - provider.remove_key(REFUND_PURSE_KEY)?; //unset refund purse after reading it - - // pay target validator - provider - .transfer_purse_to_purse(payment_purse, target, validator_reward) - .map_err(|_| Error::FailedTransferToRewardsPurse)?; - - if refund_amount.is_zero() { - return Ok(()); - } - - // give refund - let refund_purse = match refund_purse { - Some(uref) => uref, - None => return refund_to_account::

(provider, payment_purse, account, refund_amount), - }; - - // in case of failure to transfer to refund purse we fall back on the account's main purse - if provider - .transfer_purse_to_purse(payment_purse, refund_purse, refund_amount) - .is_err() - { - return refund_to_account::

(provider, payment_purse, account, refund_amount); - } - - Ok(()) - } - - pub fn refund_to_account( - mint_provider: &mut M, - payment_purse: URef, - account: AccountHash, - amount: U512, - ) -> Result<(), Error> { - match mint_provider.transfer_purse_to_account(payment_purse, account, amount) { - Ok(_) => Ok(()), - Err(_) => Err(Error::FailedTransferToAccountPurse), - } - } -} diff --git a/types/src/system/handle_payment/runtime_provider.rs b/types/src/system/handle_payment/runtime_provider.rs deleted file mode 100644 index fda96de8cd..0000000000 --- a/types/src/system/handle_payment/runtime_provider.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::{account::AccountHash, system::handle_payment::Error, BlockTime, Key, Phase}; - -/// Provider of runtime host functionality. -pub trait RuntimeProvider { - /// Get named key under a `name`. - fn get_key(&self, name: &str) -> Option; - - /// Put key under a `name`. - fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error>; - - /// Remove a named key by `name`. - fn remove_key(&mut self, name: &str) -> Result<(), Error>; - - /// Get current execution phase. - fn get_phase(&self) -> Phase; - - /// Get current block time. - fn get_block_time(&self) -> BlockTime; - - /// Get caller. - fn get_caller(&self) -> AccountHash; -} diff --git a/types/src/system/mint.rs b/types/src/system/mint.rs new file mode 100644 index 0000000000..3b37be8957 --- /dev/null +++ b/types/src/system/mint.rs @@ -0,0 +1,10 @@ +//! Contains implementation of a Mint contract functionality. +mod balance_hold; +mod constants; +mod entry_points; +mod error; + +pub use balance_hold::{BalanceHoldAddr, BalanceHoldAddrTag}; +pub use constants::*; +pub use entry_points::mint_entry_points; +pub use error::Error; diff --git a/types/src/system/mint/balance_hold.rs b/types/src/system/mint/balance_hold.rs new file mode 100644 index 0000000000..fa4f4ecb67 --- /dev/null +++ b/types/src/system/mint/balance_hold.rs @@ -0,0 +1,435 @@ +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; + +use core::{ + convert::TryFrom, + fmt::{Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::distributions::{Distribution, Standard}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, + key::FromStrError, + BlockTime, Key, Timestamp, URefAddr, BLOCKTIME_SERIALIZED_LENGTH, UREF_ADDR_LENGTH, +}; + +const GAS_TAG: u8 = 0; +const PROCESSING_TAG: u8 = 1; + +/// Serialization tag for BalanceHold variants. +#[derive( + Debug, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize, +)] +#[repr(u8)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BalanceHoldAddrTag { + #[default] + /// Tag for gas variant. + Gas = GAS_TAG, + /// Tag for processing variant. + Processing = PROCESSING_TAG, +} + +impl BalanceHoldAddrTag { + /// The length in bytes of a [`BalanceHoldAddrTag`]. + pub const BALANCE_HOLD_ADDR_TAG_LENGTH: usize = 1; + + /// Attempts to map `BalanceHoldAddrTag` from a u8. + pub fn try_from_u8(value: u8) -> Option { + // TryFrom requires std, so doing this instead. + if value == GAS_TAG { + return Some(BalanceHoldAddrTag::Gas); + } + if value == PROCESSING_TAG { + return Some(BalanceHoldAddrTag::Processing); + } + None + } +} + +impl Display for BalanceHoldAddrTag { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = match self { + BalanceHoldAddrTag::Gas => GAS_TAG, + BalanceHoldAddrTag::Processing => PROCESSING_TAG, + }; + write!(f, "{}", base16::encode_lower(&[tag])) + } +} + +impl ToBytes for BalanceHoldAddrTag { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + Self::BALANCE_HOLD_ADDR_TAG_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + writer.push(*self as u8); + Ok(()) + } +} + +impl FromBytes for BalanceHoldAddrTag { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + if let Some((byte, rem)) = bytes.split_first() { + let tag = BalanceHoldAddrTag::try_from_u8(*byte).ok_or(bytesrepr::Error::Formatting)?; + Ok((tag, rem)) + } else { + Err(bytesrepr::Error::Formatting) + } + } +} + +/// Balance hold address. +#[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum BalanceHoldAddr { + /// Gas hold variant. + Gas { + /// The address of the purse this hold is on. + purse_addr: URefAddr, + /// The block time this hold was placed. + block_time: BlockTime, + }, + /// Processing variant + Processing { + /// The address of the purse this hold is on. + purse_addr: URefAddr, + /// The block time this hold was placed. + block_time: BlockTime, + }, +} + +impl BalanceHoldAddr { + /// The length in bytes of a [`BalanceHoldAddr`] for a gas hold address. + pub const GAS_HOLD_ADDR_LENGTH: usize = UREF_ADDR_LENGTH + + BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH + + BLOCKTIME_SERIALIZED_LENGTH; + + /// Creates a Gas variant instance of [`BalanceHoldAddr`]. + pub const fn new_gas(purse_addr: URefAddr, block_time: BlockTime) -> BalanceHoldAddr { + BalanceHoldAddr::Gas { + purse_addr, + block_time, + } + } + + /// Creates a Processing variant instance of [`BalanceHoldAddr`]. + pub const fn new_processing(purse_addr: URefAddr, block_time: BlockTime) -> BalanceHoldAddr { + BalanceHoldAddr::Processing { + purse_addr, + block_time, + } + } + + /// How long is be the serialized value for this instance. + pub fn serialized_length(&self) -> usize { + match self { + BalanceHoldAddr::Gas { + purse_addr, + block_time, + } => { + BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH + + ToBytes::serialized_length(purse_addr) + + ToBytes::serialized_length(block_time) + } + BalanceHoldAddr::Processing { + purse_addr, + block_time, + } => { + BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH + + ToBytes::serialized_length(purse_addr) + + ToBytes::serialized_length(block_time) + } + } + } + + /// Returns the tag of this instance. + pub fn tag(&self) -> BalanceHoldAddrTag { + match self { + BalanceHoldAddr::Gas { .. } => BalanceHoldAddrTag::Gas, + BalanceHoldAddr::Processing { .. } => BalanceHoldAddrTag::Processing, + } + } + + /// Returns the `[URefAddr]` for the purse associated with this hold. + pub fn purse_addr(&self) -> URefAddr { + match self { + BalanceHoldAddr::Gas { purse_addr, .. } => *purse_addr, + BalanceHoldAddr::Processing { purse_addr, .. } => *purse_addr, + } + } + + /// Returns the `[BlockTime]` when this hold was written. + pub fn block_time(&self) -> BlockTime { + match self { + BalanceHoldAddr::Gas { block_time, .. } => *block_time, + BalanceHoldAddr::Processing { block_time, .. } => *block_time, + } + } + + /// To formatted string. + pub fn to_formatted_string(&self) -> String { + match self { + BalanceHoldAddr::Gas { + purse_addr, + block_time, + } => { + format!( + "{}{}{}", + // also, put the tag in readable form + base16::encode_lower(&GAS_TAG.to_le_bytes()), + base16::encode_lower(purse_addr), + base16::encode_lower(&block_time.value().to_le_bytes()) + ) + } + BalanceHoldAddr::Processing { + purse_addr, + block_time, + } => { + format!( + "{}{}{}", + // also, put the tag in readable form + base16::encode_lower(&PROCESSING_TAG.to_le_bytes()), + base16::encode_lower(purse_addr), + base16::encode_lower(&block_time.value().to_le_bytes()) + ) + } + } + } + + /// From formatted string. + pub fn from_formatted_string(hex: &str) -> Result { + let bytes = checksummed_hex::decode(hex) + .map_err(|error| FromStrError::BalanceHold(error.to_string()))?; + if bytes.is_empty() { + return Err(FromStrError::BalanceHold( + "bytes should not be 0 len".to_string(), + )); + } + let tag_bytes = <[u8; BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH]>::try_from( + bytes[0..BalanceHoldAddrTag::BALANCE_HOLD_ADDR_TAG_LENGTH].as_ref(), + ) + .map_err(|err| FromStrError::BalanceHold(err.to_string()))?; + let tag = ::from_le_bytes(tag_bytes); + let tag = BalanceHoldAddrTag::try_from_u8(tag).ok_or_else(|| { + FromStrError::BalanceHold("failed to parse balance hold addr tag".to_string()) + })?; + + let uref_addr = URefAddr::try_from(bytes[1..=UREF_ADDR_LENGTH].as_ref()) + .map_err(|err| FromStrError::BalanceHold(err.to_string()))?; + + // if more tags are added, extend the below logic to handle every case. + // it is possible that it will turn out that all further tags include blocktime + // in which case it can be pulled up out of the tag guard condition. + // however, im erring on the side of future tolerance and guarding it for now. + match tag { + BalanceHoldAddrTag::Gas => { + let block_time_bytes = + <[u8; BLOCKTIME_SERIALIZED_LENGTH]>::try_from(bytes[33..].as_ref()) + .map_err(|err| FromStrError::BalanceHold(err.to_string()))?; + + let block_time_millis = ::from_le_bytes(block_time_bytes); + let block_time = BlockTime::new(block_time_millis); + Ok(BalanceHoldAddr::new_gas(uref_addr, block_time)) + } + BalanceHoldAddrTag::Processing => { + let block_time_bytes = + <[u8; BLOCKTIME_SERIALIZED_LENGTH]>::try_from(bytes[33..].as_ref()) + .map_err(|err| FromStrError::BalanceHold(err.to_string()))?; + + let block_time_millis = ::from_le_bytes(block_time_bytes); + let block_time = BlockTime::new(block_time_millis); + Ok(BalanceHoldAddr::new_processing(uref_addr, block_time)) + } + } + } +} + +impl ToBytes for BalanceHoldAddr { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + buffer.push(self.tag() as u8); + match self { + BalanceHoldAddr::Gas { + purse_addr, + block_time, + } + | BalanceHoldAddr::Processing { + purse_addr, + block_time, + } => { + buffer.append(&mut purse_addr.to_bytes()?); + buffer.append(&mut block_time.to_bytes()?) + } + } + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.serialized_length() + } +} + +impl FromBytes for BalanceHoldAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder): (u8, &[u8]) = FromBytes::from_bytes(bytes)?; + match tag { + tag if tag == BalanceHoldAddrTag::Gas as u8 => { + let (purse_addr, rem) = URefAddr::from_bytes(remainder)?; + let (block_time, rem) = BlockTime::from_bytes(rem)?; + Ok(( + BalanceHoldAddr::Gas { + purse_addr, + block_time, + }, + rem, + )) + } + tag if tag == BalanceHoldAddrTag::Processing as u8 => { + let (purse_addr, rem) = URefAddr::from_bytes(remainder)?; + let (block_time, rem) = BlockTime::from_bytes(rem)?; + Ok(( + BalanceHoldAddr::Processing { + purse_addr, + block_time, + }, + rem, + )) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Default for BalanceHoldAddr { + fn default() -> Self { + BalanceHoldAddr::Gas { + purse_addr: URefAddr::default(), + block_time: BlockTime::default(), + } + } +} + +impl From for Key { + fn from(balance_hold_addr: BalanceHoldAddr) -> Self { + Key::BalanceHold(balance_hold_addr) + } +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for BalanceHoldAddr { + type Error = (); + + fn try_from(value: Key) -> Result { + if let Key::BalanceHold(balance_hold_addr) = value { + Ok(balance_hold_addr) + } else { + Err(()) + } + } +} + +impl Display for BalanceHoldAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + let tag = self.tag(); + match self { + BalanceHoldAddr::Gas { + purse_addr, + block_time, + } + | BalanceHoldAddr::Processing { + purse_addr, + block_time, + } => { + write!( + f, + "{}-{}-{}", + tag, + base16::encode_lower(&purse_addr), + Timestamp::from(block_time.value()) + ) + } + } + } +} + +impl Debug for BalanceHoldAddr { + fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { + match self { + BalanceHoldAddr::Gas { + purse_addr, + block_time, + } => write!( + f, + "BidAddr::Gas({}, {})", + base16::encode_lower(&purse_addr), + Timestamp::from(block_time.value()) + ), + BalanceHoldAddr::Processing { + purse_addr, + block_time, + } => write!( + f, + "BidAddr::Processing({}, {})", + base16::encode_lower(&purse_addr), + Timestamp::from(block_time.value()) + ), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> BalanceHoldAddr { + BalanceHoldAddr::new_gas(rng.gen(), BlockTime::new(rng.gen())) + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, system::mint::BalanceHoldAddr, BlockTime, Timestamp}; + + #[test] + fn serialization_roundtrip() { + let addr = BalanceHoldAddr::new_gas([1; 32], BlockTime::new(Timestamp::now().millis())); + bytesrepr::test_serialization_roundtrip(&addr); + let addr = + BalanceHoldAddr::new_processing([1; 32], BlockTime::new(Timestamp::now().millis())); + bytesrepr::test_serialization_roundtrip(&addr); + } +} + +#[cfg(test)] +mod prop_test_gas { + use proptest::prelude::*; + + use crate::{bytesrepr, gens}; + + proptest! { + #[test] + fn test_variant_gas(addr in gens::balance_hold_addr_arb()) { + bytesrepr::test_serialization_roundtrip(&addr); + } + } +} diff --git a/types/src/system/mint/constants.rs b/types/src/system/mint/constants.rs index 6888ea66cf..a44aa1fcbf 100644 --- a/types/src/system/mint/constants.rs +++ b/types/src/system/mint/constants.rs @@ -17,6 +17,8 @@ pub const ARG_ROUND_SEIGNIORAGE_RATE: &str = "round_seigniorage_rate"; pub const METHOD_MINT: &str = "mint"; /// Named constant for method `reduce_total_supply`. pub const METHOD_REDUCE_TOTAL_SUPPLY: &str = "reduce_total_supply"; +/// Named constant for method `burn`. +pub const METHOD_BURN: &str = "burn"; /// Named constant for (synthetic) method `create` pub const METHOD_CREATE: &str = "create"; /// Named constant for method `balance`. @@ -25,6 +27,8 @@ pub const METHOD_BALANCE: &str = "balance"; pub const METHOD_TRANSFER: &str = "transfer"; /// Named constant for method `read_base_round_reward`. pub const METHOD_READ_BASE_ROUND_REWARD: &str = "read_base_round_reward"; +/// Named constant for method `mint_into_existing_purse`. +pub const METHOD_MINT_INTO_EXISTING_PURSE: &str = "mint_into_existing_purse"; /// Storage for mint contract hash. pub const HASH_KEY: &str = "mint_hash"; @@ -36,3 +40,7 @@ pub const BASE_ROUND_REWARD_KEY: &str = "mint_base_round_reward"; pub const TOTAL_SUPPLY_KEY: &str = "total_supply"; /// Storage for mint round seigniorage rate. pub const ROUND_SEIGNIORAGE_RATE_KEY: &str = "round_seigniorage_rate"; +/// Storage for gas hold handling. +pub const MINT_GAS_HOLD_HANDLING_KEY: &str = "gas_hold_handling"; +/// Storage for gas hold interval. +pub const MINT_GAS_HOLD_INTERVAL_KEY: &str = "gas_hold_interval"; diff --git a/types/src/system/mint/entry_points.rs b/types/src/system/mint/entry_points.rs new file mode 100644 index 0000000000..dab8ccf627 --- /dev/null +++ b/types/src/system/mint/entry_points.rs @@ -0,0 +1,126 @@ +use alloc::boxed::Box; + +use crate::{ + addressable_entity::Parameters, + system::mint::{ + ARG_AMOUNT, ARG_ID, ARG_PURSE, ARG_SOURCE, ARG_TARGET, ARG_TO, METHOD_BALANCE, METHOD_BURN, + METHOD_CREATE, METHOD_MINT, METHOD_MINT_INTO_EXISTING_PURSE, METHOD_READ_BASE_ROUND_REWARD, + METHOD_REDUCE_TOTAL_SUPPLY, METHOD_TRANSFER, + }, + CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, + Parameter, +}; + +/// Returns entry points for a mint system contract. +pub fn mint_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntityEntryPoint::new( + METHOD_MINT, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::URef), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_REDUCE_TOTAL_SUPPLY, + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_BURN, + vec![ + Parameter::new(ARG_PURSE, CLType::URef), + Parameter::new(ARG_AMOUNT, CLType::U512), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_CREATE, + Parameters::new(), + CLType::URef, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_BALANCE, + vec![Parameter::new(ARG_PURSE, CLType::URef)], + CLType::Option(Box::new(CLType::U512)), + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_TRANSFER, + vec![ + Parameter::new(ARG_TO, CLType::Option(Box::new(CLType::ByteArray(32)))), + Parameter::new(ARG_SOURCE, CLType::URef), + Parameter::new(ARG_TARGET, CLType::URef), + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_ID, CLType::Option(Box::new(CLType::U64))), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_READ_BASE_ROUND_REWARD, + Parameters::new(), + CLType::U512, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + let entry_point = EntityEntryPoint::new( + METHOD_MINT_INTO_EXISTING_PURSE, + vec![ + Parameter::new(ARG_AMOUNT, CLType::U512), + Parameter::new(ARG_PURSE, CLType::URef), + ], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U8), + }, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/types/src/system/mint/error.rs b/types/src/system/mint/error.rs index fd7bf04740..35280dff56 100644 --- a/types/src/system/mint/error.rs +++ b/types/src/system/mint/error.rs @@ -1,85 +1,177 @@ -//! Home of the Mint contract's [`Error`] type. +//! Home of the Mint contract's [`enum@Error`] type. -use alloc::{fmt, vec::Vec}; -use core::convert::{TryFrom, TryInto}; - -#[cfg(feature = "std")] -use thiserror::Error; +use alloc::vec::Vec; +use core::{ + convert::{TryFrom, TryInto}, + fmt::{self, Display, Formatter}, +}; use crate::{ bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, - AccessRights, CLType, CLTyped, + CLType, CLTyped, }; /// Errors which can occur while executing the Mint contract. #[derive(Debug, Copy, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Error))] #[repr(u8)] +#[non_exhaustive] pub enum Error { /// Insufficient funds to complete the transfer. - #[cfg_attr(feature = "std", error("Insufficient funds"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(0, Error::InsufficientFunds as u8); + /// ``` InsufficientFunds = 0, /// Source purse not found. - #[cfg_attr(feature = "std", error("Source not found"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(1, Error::SourceNotFound as u8); + /// ``` SourceNotFound = 1, /// Destination purse not found. - #[cfg_attr(feature = "std", error("Destination not found"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(2, Error::DestNotFound as u8); + /// ``` DestNotFound = 2, - /// See [`PurseError::InvalidURef`]. - #[cfg_attr(feature = "std", error("Invalid URef"))] + /// The given [`URef`](crate::URef) does not reference the account holder's purse, or such a + /// `URef` does not have the required [`AccessRights`](crate::AccessRights). + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(3, Error::InvalidURef as u8); + /// ``` InvalidURef = 3, - /// See [`PurseError::InvalidAccessRights`]. - #[cfg_attr(feature = "std", error("Invalid AccessRights"))] + /// The source purse is not writeable (see [`URef::is_writeable`](crate::URef::is_writeable)), + /// or the destination purse is not addable (see + /// [`URef::is_addable`](crate::URef::is_addable)). + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(4, Error::InvalidAccessRights as u8); + /// ``` InvalidAccessRights = 4, /// Tried to create a new purse with a non-zero initial balance. - #[cfg_attr(feature = "std", error("Invalid non-empty purse creation"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(5, Error::InvalidNonEmptyPurseCreation as u8); + /// ``` InvalidNonEmptyPurseCreation = 5, /// Failed to read from local or global storage. - #[cfg_attr(feature = "std", error("Storage error"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(6, Error::Storage as u8); + /// ``` Storage = 6, /// Purse not found while trying to get balance. - #[cfg_attr(feature = "std", error("Purse not found"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(7, Error::PurseNotFound as u8); + /// ``` PurseNotFound = 7, /// Unable to obtain a key by its name. - #[cfg_attr(feature = "std", error("Missing key"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(8, Error::MissingKey as u8); + /// ``` MissingKey = 8, /// Total supply not found. - #[cfg_attr(feature = "std", error("Total supply not found"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(9, Error::TotalSupplyNotFound as u8); + /// ``` TotalSupplyNotFound = 9, /// Failed to record transfer. - #[cfg_attr(feature = "std", error("Failed to record transfer"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(10, Error::RecordTransferFailure as u8); + /// ``` RecordTransferFailure = 10, /// Invalid attempt to reduce total supply. - #[cfg_attr(feature = "std", error("Invalid attempt to reduce total supply"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(11, Error::InvalidTotalSupplyReductionAttempt as u8); + /// ``` InvalidTotalSupplyReductionAttempt = 11, /// Failed to create new uref. - #[cfg_attr(feature = "std", error("Failed to create new uref"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(12, Error::NewURef as u8); + /// ``` NewURef = 12, /// Failed to put key. - #[cfg_attr(feature = "std", error("Failed to put key"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(13, Error::PutKey as u8); + /// ``` PutKey = 13, - /// Failed to write local key. - #[cfg_attr(feature = "std", error("Failed to write local key"))] - WriteLocal = 14, + /// Failed to write to dictionary. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(14, Error::WriteDictionary as u8); + /// ``` + WriteDictionary = 14, /// Failed to create a [`crate::CLValue`]. - #[cfg_attr(feature = "std", error("Failed to create a CLValue"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(15, Error::CLValue as u8); + /// ``` CLValue = 15, /// Failed to serialize data. - #[cfg_attr(feature = "std", error("Failed to serialize data"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(16, Error::Serialize as u8); + /// ``` Serialize = 16, /// Source and target purse [`crate::URef`]s are equal. - #[cfg_attr(feature = "std", error("Invalid target purse"))] + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(17, Error::EqualSourceAndTarget as u8); + /// ``` EqualSourceAndTarget = 17, + /// An arithmetic overflow has occurred. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(18, Error::ArithmeticOverflow as u8); + /// ``` + ArithmeticOverflow = 18, // NOTE: These variants below will be removed once support for WASM system contracts will be // dropped. #[doc(hidden)] - #[cfg_attr(feature = "std", error("GasLimit"))] - GasLimit = 18, + GasLimit = 19, + + /// Raised when an entry point is called from invalid account context. + InvalidContext = 20, + + /// Session code tried to transfer more CSPR than user approved. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(21, Error::UnapprovedSpendingAmount as u8); + UnapprovedSpendingAmount = 21, + + /// Failed to transfer tokens on a private chain. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(22, Error::DisabledUnrestrictedTransfers as u8); + DisabledUnrestrictedTransfers = 22, + + /// Attempt to access a record using forged permissions. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(23, Error::ForgedReference as u8); + ForgedReference = 23, + /// Available balance can never be greater than total balance. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(24, Error::InconsistentBalances as u8); + InconsistentBalances = 24, + /// Unable to get the system registry. + /// ``` + /// # use casper_types::system::mint::Error; + /// assert_eq!(25, Error::UnableToGetSystemRegistry as u8); + UnableToGetSystemRegistry = 25, #[cfg(test)] #[doc(hidden)] - #[cfg_attr(feature = "std", error("Sentinel error"))] Sentinel, } @@ -87,20 +179,6 @@ pub enum Error { #[cfg(test)] const MAX_ERROR_VALUE: u8 = Error::Sentinel as u8; -impl From for Error { - fn from(purse_error: PurseError) -> Error { - match purse_error { - PurseError::InvalidURef => Error::InvalidURef, - PurseError::InvalidAccessRights(_) => { - // This one does not carry state from PurseError to the new Error enum. The reason - // is that Error is supposed to be simple in serialization and deserialization, so - // extra state is currently discarded. - Error::InvalidAccessRights - } - } - } -} - impl CLTyped for Error { fn cl_type() -> CLType { CLType::U8 @@ -136,11 +214,22 @@ impl TryFrom for Error { } d if d == Error::NewURef as u8 => Ok(Error::NewURef), d if d == Error::PutKey as u8 => Ok(Error::PutKey), - d if d == Error::WriteLocal as u8 => Ok(Error::WriteLocal), + d if d == Error::WriteDictionary as u8 => Ok(Error::WriteDictionary), d if d == Error::CLValue as u8 => Ok(Error::CLValue), d if d == Error::Serialize as u8 => Ok(Error::Serialize), d if d == Error::EqualSourceAndTarget as u8 => Ok(Error::EqualSourceAndTarget), + d if d == Error::ArithmeticOverflow as u8 => Ok(Error::ArithmeticOverflow), d if d == Error::GasLimit as u8 => Ok(Error::GasLimit), + d if d == Error::InvalidContext as u8 => Ok(Error::InvalidContext), + d if d == Error::UnapprovedSpendingAmount as u8 => Ok(Error::UnapprovedSpendingAmount), + d if d == Error::DisabledUnrestrictedTransfers as u8 => { + Ok(Error::DisabledUnrestrictedTransfers) + } + d if d == Error::ForgedReference as u8 => Ok(Error::ForgedReference), + d if d == Error::InconsistentBalances as u8 => Ok(Error::InconsistentBalances), + d if d == Error::UnableToGetSystemRegistry as u8 => { + Ok(Error::UnableToGetSystemRegistry) + } _ => Err(TryFromU8ForError(())), } } @@ -169,38 +258,58 @@ impl FromBytes for Error { } } -/// Errors relating to validity of source or destination purses. -#[derive(Debug, Copy, Clone)] -pub enum PurseError { - /// The given [`URef`](crate::URef) does not reference the account holder's purse, or such a - /// [`URef`](crate::URef) does not have the required [`AccessRights`]. - InvalidURef, - /// The source purse is not writeable (see [`URef::is_writeable`](crate::URef::is_writeable)), - /// or the destination purse is not addable (see - /// [`URef::is_addable`](crate::URef::is_addable)). - InvalidAccessRights(Option), -} - -impl fmt::Display for PurseError { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { match self { - PurseError::InvalidURef => write!(f, "invalid uref"), - PurseError::InvalidAccessRights(maybe_access_rights) => { - write!(f, "invalid access rights: {:?}", maybe_access_rights) + Error::InsufficientFunds => formatter.write_str("Insufficient funds"), + Error::SourceNotFound => formatter.write_str("Source not found"), + Error::DestNotFound => formatter.write_str("Destination not found"), + Error::InvalidURef => formatter.write_str("Invalid URef"), + Error::InvalidAccessRights => formatter.write_str("Invalid AccessRights"), + Error::InvalidNonEmptyPurseCreation => { + formatter.write_str("Invalid non-empty purse creation") + } + Error::Storage => formatter.write_str("Storage error"), + Error::PurseNotFound => formatter.write_str("Purse not found"), + Error::MissingKey => formatter.write_str("Missing key"), + Error::TotalSupplyNotFound => formatter.write_str("Total supply not found"), + Error::RecordTransferFailure => formatter.write_str("Failed to record transfer"), + Error::InvalidTotalSupplyReductionAttempt => { + formatter.write_str("Invalid attempt to reduce total supply") + } + Error::NewURef => formatter.write_str("Failed to create new uref"), + Error::PutKey => formatter.write_str("Failed to put key"), + Error::WriteDictionary => formatter.write_str("Failed to write dictionary"), + Error::CLValue => formatter.write_str("Failed to create a CLValue"), + Error::Serialize => formatter.write_str("Failed to serialize data"), + Error::EqualSourceAndTarget => formatter.write_str("Invalid target purse"), + Error::ArithmeticOverflow => formatter.write_str("Arithmetic overflow has occurred"), + Error::GasLimit => formatter.write_str("GasLimit"), + Error::InvalidContext => formatter.write_str("Invalid context"), + Error::UnapprovedSpendingAmount => formatter.write_str("Unapproved spending amount"), + Error::DisabledUnrestrictedTransfers => { + formatter.write_str("Disabled unrestricted transfers") } + Error::ForgedReference => formatter.write_str("Forged reference"), + Error::InconsistentBalances => { + formatter.write_str("Available balance can never be greater than total balance") + } + Error::UnableToGetSystemRegistry => { + formatter.write_str("Unable to get the system registry") + } + #[cfg(test)] + Error::Sentinel => formatter.write_str("Sentinel error"), } } } #[cfg(test)] mod tests { - use std::convert::TryFrom; - use super::{Error, TryFromU8ForError, MAX_ERROR_VALUE}; #[test] fn error_round_trips() { - for i in 0..=u8::max_value() { + for i in 0..=u8::MAX { match Error::try_from(i) { Ok(error) if i < MAX_ERROR_VALUE => assert_eq!(error as u8, i), Ok(error) => panic!( diff --git a/types/src/system/mint/mod.rs b/types/src/system/mint/mod.rs deleted file mode 100644 index 5a51dd7319..0000000000 --- a/types/src/system/mint/mod.rs +++ /dev/null @@ -1,142 +0,0 @@ -//! Contains implementation of a Mint contract functionality. -mod constants; -mod error; -mod runtime_provider; -mod storage_provider; -mod system_provider; - -use num_rational::Ratio; - -use crate::{account::AccountHash, Key, PublicKey, URef, U512}; - -pub use crate::system::mint::{ - constants::*, error::Error, runtime_provider::RuntimeProvider, - storage_provider::StorageProvider, system_provider::SystemProvider, -}; - -/// Mint trait. -pub trait Mint: RuntimeProvider + StorageProvider + SystemProvider { - /// Mint new token with given `initial_balance` balance. Returns new purse on success, otherwise - /// an error. - fn mint(&mut self, initial_balance: U512) -> Result { - let caller = self.get_caller(); - let is_empty_purse = initial_balance.is_zero(); - if !is_empty_purse && caller != PublicKey::System.to_account_hash() { - return Err(Error::InvalidNonEmptyPurseCreation); - } - - let purse_uref: URef = self.new_uref(())?; - self.write_balance(purse_uref, initial_balance)?; - - if !is_empty_purse { - // get total supply uref if exists, otherwise create it. - let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) { - None => { - // create total_supply value and track in mint context - let uref: URef = self.new_uref(U512::zero())?; - self.put_key(TOTAL_SUPPLY_KEY, uref.into())?; - uref - } - Some(Key::URef(uref)) => uref, - Some(_) => return Err(Error::MissingKey), - }; - // increase total supply - self.add(total_supply_uref, initial_balance)?; - } - - Ok(purse_uref) - } - - /// Reduce total supply by `amount`. Returns unit on success, otherwise - /// an error. - fn reduce_total_supply(&mut self, amount: U512) -> Result<(), Error> { - // only system may reduce total supply - let caller = self.get_caller(); - if caller != PublicKey::System.to_account_hash() { - return Err(Error::InvalidTotalSupplyReductionAttempt); - } - - if amount.is_zero() { - return Ok(()); // no change to supply - } - - // get total supply or error - let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) { - Some(Key::URef(uref)) => uref, - Some(_) => return Err(Error::MissingKey), // TODO - None => return Err(Error::MissingKey), - }; - let total_supply: U512 = self - .read(total_supply_uref)? - .ok_or(Error::TotalSupplyNotFound)?; - - // decrease total supply - let reduced_total_supply = total_supply - amount; - - // update total supply - self.write(total_supply_uref, reduced_total_supply)?; - - Ok(()) - } - - /// Read balance of given `purse`. - fn balance(&mut self, purse: URef) -> Result, Error> { - match self.read_balance(purse)? { - some @ Some(_) => Ok(some), - None => Err(Error::PurseNotFound), - } - } - - /// Transfers `amount` of tokens from `source` purse to a `target` purse. - fn transfer( - &mut self, - maybe_to: Option, - source: URef, - target: URef, - amount: U512, - id: Option, - ) -> Result<(), Error> { - if !source.is_writeable() || !target.is_addable() { - return Err(Error::InvalidAccessRights); - } - let source_balance: U512 = match self.read_balance(source)? { - Some(source_balance) => source_balance, - None => return Err(Error::SourceNotFound), - }; - if amount > source_balance { - return Err(Error::InsufficientFunds); - } - if self.read_balance(target)?.is_none() { - return Err(Error::DestNotFound); - } - self.write_balance(source, source_balance - amount)?; - self.add_balance(target, amount)?; - self.record_transfer(maybe_to, source, target, amount, id)?; - Ok(()) - } - - /// Retrieves the base round reward. - fn read_base_round_reward(&mut self) -> Result { - let total_supply_uref = match self.get_key(TOTAL_SUPPLY_KEY) { - Some(Key::URef(uref)) => uref, - Some(_) => return Err(Error::MissingKey), // TODO - None => return Err(Error::MissingKey), - }; - let total_supply: U512 = self - .read(total_supply_uref)? - .ok_or(Error::TotalSupplyNotFound)?; - - let round_seigniorage_rate_uref = match self.get_key(ROUND_SEIGNIORAGE_RATE_KEY) { - Some(Key::URef(uref)) => uref, - Some(_) => return Err(Error::MissingKey), // TODO - None => return Err(Error::MissingKey), - }; - let round_seigniorage_rate: Ratio = self - .read(round_seigniorage_rate_uref)? - .ok_or(Error::TotalSupplyNotFound)?; - - let ret = (round_seigniorage_rate * Ratio::from(total_supply)).to_integer(); - - Ok(ret) - } -} diff --git a/types/src/system/mint/runtime_provider.rs b/types/src/system/mint/runtime_provider.rs deleted file mode 100644 index b30fd29dec..0000000000 --- a/types/src/system/mint/runtime_provider.rs +++ /dev/null @@ -1,13 +0,0 @@ -use crate::{account::AccountHash, system::mint::Error, Key}; - -/// Provider of runtime host functionality. -pub trait RuntimeProvider { - /// This method should return the caller of the current context. - fn get_caller(&self) -> AccountHash; - - /// This method should handle storing given [`Key`] under `name`. - fn put_key(&mut self, name: &str, key: Key) -> Result<(), Error>; - - /// This method should handle obtaining a given named [`Key`] under a `name`. - fn get_key(&self, name: &str) -> Option; -} diff --git a/types/src/system/mint/storage_provider.rs b/types/src/system/mint/storage_provider.rs deleted file mode 100644 index 3a618124b8..0000000000 --- a/types/src/system/mint/storage_provider.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::{ - bytesrepr::{FromBytes, ToBytes}, - system::mint::Error, - CLTyped, URef, U512, -}; - -/// Provides functionality of a contract storage. -pub trait StorageProvider { - /// Create new [`URef`]. - fn new_uref(&mut self, init: T) -> Result; - - /// Read data from [`URef`]. - fn read(&mut self, uref: URef) -> Result, Error>; - - /// Write data under a [`URef`]. - fn write(&mut self, uref: URef, value: T) -> Result<(), Error>; - - /// Add data to a [`URef`]. - fn add(&mut self, uref: URef, value: T) -> Result<(), Error>; - - /// Read balance. - fn read_balance(&mut self, uref: URef) -> Result, Error>; - - /// Write balance. - fn write_balance(&mut self, uref: URef, balance: U512) -> Result<(), Error>; - - /// Add amount to an existing balance. - fn add_balance(&mut self, uref: URef, value: U512) -> Result<(), Error>; -} diff --git a/types/src/system/mod.rs b/types/src/system/mod.rs deleted file mode 100644 index 70d6801824..0000000000 --- a/types/src/system/mod.rs +++ /dev/null @@ -1,366 +0,0 @@ -//! System modules, formerly known as "system contracts" -pub mod auction; -pub mod handle_payment; -pub mod mint; -pub mod standard_payment; - -pub use error::Error; -pub use system_contract_type::{ - SystemContractType, AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT, -}; - -mod error { - #[cfg(feature = "std")] - use thiserror::Error; - - use crate::system::{auction, handle_payment, mint}; - - /// An aggregate enum error with variants for each system contract's error. - #[derive(Debug, Copy, Clone)] - #[cfg_attr(feature = "std", derive(Error))] - pub enum Error { - /// Contains a [`mint::Error`]. - #[cfg_attr(feature = "std", error("Mint error: {}", _0))] - Mint(mint::Error), - /// Contains a [`handle_payment::Error`]. - #[cfg_attr(feature = "std", error("HandlePayment error: {}", _0))] - HandlePayment(handle_payment::Error), - /// Contains a [`auction::Error`]. - #[cfg_attr(feature = "std", error("Auction error: {}", _0))] - Auction(auction::Error), - } - - impl From for Error { - fn from(error: mint::Error) -> Error { - Error::Mint(error) - } - } - - impl From for Error { - fn from(error: handle_payment::Error) -> Error { - Error::HandlePayment(error) - } - } - - impl From for Error { - fn from(error: auction::Error) -> Error { - Error::Auction(error) - } - } -} - -mod system_contract_type { - //! Home of system contract type enum. - - use core::{ - convert::TryFrom, - fmt::{self, Display, Formatter}, - }; - - use crate::{ApiError, ContractHash, HashAddr, U256}; - - /// System contract types. - /// - /// Represents a specific system contract and allows easy conversion into a [`ContractHash`]. - #[derive(Debug, Copy, Clone, PartialEq)] - #[repr(u8)] - pub enum SystemContractType { - /// Mint contract. - Mint = 1, - /// Auction contract. - Auction = 2, - /// Handle Payment contract. - HandlePayment = 3, - /// Standard Payment contract. - StandardPayment = 4, - } - - /// Name of mint system contract - pub const MINT: &str = "mint"; - /// Name of auction system contract - pub const AUCTION: &str = "auction"; - /// Name of handle payment system contract - pub const HANDLE_PAYMENT: &str = "handle payment"; - /// Name of standard payment system contract - pub const STANDARD_PAYMENT: &str = "standard payment"; - - impl SystemContractType { - fn into_address(self) -> HashAddr { - let address_value = U256::from(self as u8); - let mut address: HashAddr = Default::default(); - address_value.to_big_endian(&mut address); - address - } - - /// Returns a fixed [`ContractHash`] value for given contract type. - pub fn into_contract_hash(self) -> ContractHash { - ContractHash::new(self.into_address()) - } - } - - impl From for u32 { - fn from(system_contract_type: SystemContractType) -> u32 { - system_contract_type as u32 - } - } - - // This conversion is not intended to be used by third party crates. - #[doc(hidden)] - impl TryFrom for SystemContractType { - type Error = ApiError; - - fn try_from(value: u32) -> Result { - match value { - x if x == SystemContractType::Mint as u32 => Ok(SystemContractType::Mint), - x if x == SystemContractType::Auction as u32 => Ok(SystemContractType::Auction), - x if x == SystemContractType::StandardPayment as u32 => { - Ok(SystemContractType::StandardPayment) - } - x if x == SystemContractType::HandlePayment as u32 => { - Ok(SystemContractType::HandlePayment) - } - _ => Err(ApiError::InvalidSystemContract), - } - } - } - - impl Display for SystemContractType { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - match *self { - SystemContractType::Mint => write!(f, "{}", MINT), - SystemContractType::HandlePayment => write!(f, "{}", HANDLE_PAYMENT), - SystemContractType::StandardPayment => write!(f, "{}", STANDARD_PAYMENT), - SystemContractType::Auction => write!(f, "{}", AUCTION), - } - } - } - - #[cfg(test)] - mod tests { - use std::string::ToString; - - use super::*; - - const MINT_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - SystemContractType::Mint as u8, - ]); - const HANDLE_PAYMENT_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - SystemContractType::HandlePayment as u8, - ]); - const STANDARD_PAYMENT_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - SystemContractType::StandardPayment as u8, - ]); - const AUCTION_CONTRACT_HASH: ContractHash = ContractHash::new([ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - SystemContractType::Auction as u8, - ]); - - #[test] - fn get_index_of_mint_contract() { - let index: u32 = SystemContractType::Mint.into(); - assert_eq!(index, SystemContractType::Mint as u32); - assert_eq!(SystemContractType::Mint.to_string(), MINT); - } - - #[test] - fn get_index_of_handle_payment_contract() { - let index: u32 = SystemContractType::HandlePayment.into(); - assert_eq!(index, SystemContractType::HandlePayment as u32); - assert_eq!( - SystemContractType::HandlePayment.to_string(), - HANDLE_PAYMENT - ); - } - - #[test] - fn get_index_of_standard_payment_contract() { - let index: u32 = SystemContractType::StandardPayment.into(); - assert_eq!(index, SystemContractType::StandardPayment as u32); - assert_eq!( - SystemContractType::StandardPayment.to_string(), - STANDARD_PAYMENT - ); - } - - #[test] - fn get_index_of_auction_contract() { - let index: u32 = SystemContractType::Auction.into(); - assert_eq!(index, SystemContractType::Auction as u32); - assert_eq!(SystemContractType::Auction.to_string(), AUCTION); - } - - #[test] - fn create_invalid_variant_from_int() { - assert!(SystemContractType::try_from(0).is_err()); - } - - #[test] - fn create_mint_variant_from_int() { - let mint = SystemContractType::try_from(1).ok().unwrap(); - assert_eq!(mint, SystemContractType::Mint); - } - - #[test] - fn create_auction_variant_from_int() { - let auction = SystemContractType::try_from(2).ok().unwrap(); - assert_eq!(auction, SystemContractType::Auction); - } - - #[test] - fn create_handle_payment_from_int() { - let handle_payment = SystemContractType::try_from(3).ok().unwrap(); - assert_eq!(handle_payment, SystemContractType::HandlePayment); - } - - #[test] - fn create_unknown_system_contract_variant() { - assert!(SystemContractType::try_from(6).is_err()); - assert!(SystemContractType::try_from(5).is_err()); - assert!(SystemContractType::try_from(10).is_err()); - assert!(SystemContractType::try_from(u32::max_value()).is_err()); - } - - #[test] - fn create_contract_hash_from() { - assert_eq!( - SystemContractType::Auction.into_contract_hash(), - AUCTION_CONTRACT_HASH - ); - assert_eq!( - SystemContractType::HandlePayment.into_contract_hash(), - HANDLE_PAYMENT_CONTRACT_HASH - ); - assert_eq!( - SystemContractType::Mint.into_contract_hash(), - MINT_CONTRACT_HASH - ); - assert_eq!( - SystemContractType::StandardPayment.into_contract_hash(), - STANDARD_PAYMENT_CONTRACT_HASH - ); - } - } -} diff --git a/types/src/system/prepayment.rs b/types/src/system/prepayment.rs new file mode 100644 index 0000000000..2ac4f1dbf2 --- /dev/null +++ b/types/src/system/prepayment.rs @@ -0,0 +1,4 @@ +//! Contains implementation of the gas prepayment system +mod prepayment_kind; + +pub use prepayment_kind::PrepaymentKind; diff --git a/types/src/system/prepayment/prepayment_kind.rs b/types/src/system/prepayment/prepayment_kind.rs new file mode 100644 index 0000000000..179e8c2e89 --- /dev/null +++ b/types/src/system/prepayment/prepayment_kind.rs @@ -0,0 +1,42 @@ +use crate::{ + bytesrepr, + bytesrepr::{Bytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, +}; +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// Container for bytes recording location, type and data for a gas pre payment +#[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct PrepaymentKind { + receipt: Digest, + prepayment_kind: u8, + prepayment_data: Bytes, +} + +impl ToBytes for PrepaymentKind { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.receipt.serialized_length() + + U8_SERIALIZED_LENGTH + + self.prepayment_data.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.receipt.write_bytes(writer)?; + self.prepayment_kind.write_bytes(writer)?; + self.prepayment_data.write_bytes(writer)?; + Ok(()) + } +} diff --git a/types/src/system/standard_payment.rs b/types/src/system/standard_payment.rs new file mode 100644 index 0000000000..92c3fab30c --- /dev/null +++ b/types/src/system/standard_payment.rs @@ -0,0 +1,6 @@ +//! Contains implementation of a standard payment contract implementation. +mod constants; +mod entry_points; + +pub use constants::*; +pub use entry_points::standard_payment_entry_points; diff --git a/types/src/system/standard_payment/account_provider.rs b/types/src/system/standard_payment/account_provider.rs deleted file mode 100644 index edab1cc690..0000000000 --- a/types/src/system/standard_payment/account_provider.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::{ApiError, URef}; - -/// Provider of an account related functionality. -pub trait AccountProvider { - /// Get currently executing account's purse. - fn get_main_purse(&self) -> Result; -} diff --git a/types/src/system/standard_payment/entry_points.rs b/types/src/system/standard_payment/entry_points.rs new file mode 100644 index 0000000000..a0ca001f09 --- /dev/null +++ b/types/src/system/standard_payment/entry_points.rs @@ -0,0 +1,27 @@ +use alloc::{boxed::Box, string::ToString}; + +use crate::{ + system::standard_payment::{ARG_AMOUNT, METHOD_PAY}, + CLType, EntityEntryPoint, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPoints, + Parameter, +}; + +/// Creates standard payment contract entry points. +pub fn standard_payment_entry_points() -> EntryPoints { + let mut entry_points = EntryPoints::new(); + + let entry_point = EntityEntryPoint::new( + METHOD_PAY.to_string(), + vec![Parameter::new(ARG_AMOUNT, CLType::U512)], + CLType::Result { + ok: Box::new(CLType::Unit), + err: Box::new(CLType::U32), + }, + EntryPointAccess::Public, + EntryPointType::Caller, + EntryPointPayment::Caller, + ); + entry_points.add_entry_point(entry_point); + + entry_points +} diff --git a/types/src/system/standard_payment/handle_payment_provider.rs b/types/src/system/standard_payment/handle_payment_provider.rs deleted file mode 100644 index 62f4aad1dc..0000000000 --- a/types/src/system/standard_payment/handle_payment_provider.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::{ApiError, URef}; - -/// Provider of handle payment functionality. -pub trait HandlePaymentProvider { - /// Get payment purse for given deploy. - fn get_payment_purse(&mut self) -> Result; -} diff --git a/types/src/system/standard_payment/mint_provider.rs b/types/src/system/standard_payment/mint_provider.rs deleted file mode 100644 index 92c1b4a785..0000000000 --- a/types/src/system/standard_payment/mint_provider.rs +++ /dev/null @@ -1,12 +0,0 @@ -use crate::{ApiError, URef, U512}; - -/// Provides an access to mint. -pub trait MintProvider { - /// Transfer `amount` of tokens from `source` purse to a `target` purse. - fn transfer_purse_to_purse( - &mut self, - source: URef, - target: URef, - amount: U512, - ) -> Result<(), ApiError>; -} diff --git a/types/src/system/standard_payment/mod.rs b/types/src/system/standard_payment/mod.rs deleted file mode 100644 index 620e4fdfb5..0000000000 --- a/types/src/system/standard_payment/mod.rs +++ /dev/null @@ -1,24 +0,0 @@ -//! Contains implementation of a standard payment contract implementation. -mod account_provider; -mod constants; -mod handle_payment_provider; -mod mint_provider; - -use core::marker::Sized; - -use crate::{ApiError, U512}; - -pub use crate::system::standard_payment::{ - account_provider::AccountProvider, constants::*, - handle_payment_provider::HandlePaymentProvider, mint_provider::MintProvider, -}; - -/// Implementation of a standard payment contract. -pub trait StandardPayment: AccountProvider + MintProvider + HandlePaymentProvider + Sized { - /// Pay `amount` to a payment purse. - fn pay(&mut self, amount: U512) -> Result<(), ApiError> { - let main_purse = self.get_main_purse()?; - let payment_purse = self.get_payment_purse()?; - self.transfer_purse_to_purse(main_purse, payment_purse, amount) - } -} diff --git a/types/src/system/system_contract_type.rs b/types/src/system/system_contract_type.rs new file mode 100644 index 0000000000..2bfe9198a1 --- /dev/null +++ b/types/src/system/system_contract_type.rs @@ -0,0 +1,249 @@ +//! Home of system contract type enum. + +use alloc::{ + string::{String, ToString}, + vec::Vec, +}; +use core::{ + convert::TryFrom, + fmt::{self, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + ApiError, EntryPoints, +}; + +const MINT_TAG: u8 = 0; +const HANDLE_PAYMENT_TAG: u8 = 1; +const STANDARD_PAYMENT_TAG: u8 = 2; +const AUCTION_TAG: u8 = 3; + +use super::{ + auction::auction_entry_points, handle_payment::handle_payment_entry_points, + mint::mint_entry_points, standard_payment::standard_payment_entry_points, +}; + +/// System contract types. +/// +/// Used by converting to a `u32` and passing as the `system_contract_index` argument of +/// `ext_ffi::casper_get_system_contract()`. +#[derive( + Debug, Clone, PartialEq, Eq, Default, PartialOrd, Ord, Hash, Serialize, Deserialize, Copy, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum SystemEntityType { + /// Mint contract. + #[default] + Mint, + /// Handle Payment contract. + HandlePayment, + /// Standard Payment contract. + StandardPayment, + /// Auction contract. + Auction, +} + +impl ToBytes for SystemEntityType { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + match self { + SystemEntityType::Mint => { + writer.push(MINT_TAG); + } + SystemEntityType::HandlePayment => { + writer.push(HANDLE_PAYMENT_TAG); + } + SystemEntityType::StandardPayment => { + writer.push(STANDARD_PAYMENT_TAG); + } + SystemEntityType::Auction => writer.push(AUCTION_TAG), + } + Ok(()) + } +} + +impl FromBytes for SystemEntityType { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + MINT_TAG => Ok((SystemEntityType::Mint, remainder)), + HANDLE_PAYMENT_TAG => Ok((SystemEntityType::HandlePayment, remainder)), + STANDARD_PAYMENT_TAG => Ok((SystemEntityType::StandardPayment, remainder)), + AUCTION_TAG => Ok((SystemEntityType::Auction, remainder)), + _ => Err(Error::Formatting), + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> SystemEntityType { + match rng.gen_range(0..=3) { + 0 => SystemEntityType::Mint, + 1 => SystemEntityType::Auction, + 2 => SystemEntityType::StandardPayment, + 3 => SystemEntityType::HandlePayment, + _ => unreachable!(), + } + } +} + +/// Name of mint system contract +pub const MINT: &str = "mint"; +/// Name of handle payment system contract +pub const HANDLE_PAYMENT: &str = "handle payment"; +/// Name of standard payment system contract +pub const STANDARD_PAYMENT: &str = "standard payment"; +/// Name of auction system contract +pub const AUCTION: &str = "auction"; + +impl SystemEntityType { + /// Returns the name of the system contract. + pub fn entity_name(&self) -> String { + match self { + SystemEntityType::Mint => MINT.to_string(), + SystemEntityType::HandlePayment => HANDLE_PAYMENT.to_string(), + SystemEntityType::StandardPayment => STANDARD_PAYMENT.to_string(), + SystemEntityType::Auction => AUCTION.to_string(), + } + } + + /// Returns the entrypoint of the system contract. + pub fn entry_points(&self) -> EntryPoints { + match self { + SystemEntityType::Mint => mint_entry_points(), + SystemEntityType::HandlePayment => handle_payment_entry_points(), + SystemEntityType::StandardPayment => standard_payment_entry_points(), + SystemEntityType::Auction => auction_entry_points(), + } + } +} + +impl From for u32 { + fn from(system_contract_type: SystemEntityType) -> u32 { + match system_contract_type { + SystemEntityType::Mint => 0, + SystemEntityType::HandlePayment => 1, + SystemEntityType::StandardPayment => 2, + SystemEntityType::Auction => 3, + } + } +} + +// This conversion is not intended to be used by third party crates. +#[doc(hidden)] +impl TryFrom for SystemEntityType { + type Error = ApiError; + fn try_from(value: u32) -> Result { + match value { + 0 => Ok(SystemEntityType::Mint), + 1 => Ok(SystemEntityType::HandlePayment), + 2 => Ok(SystemEntityType::StandardPayment), + 3 => Ok(SystemEntityType::Auction), + _ => Err(ApiError::InvalidSystemContract), + } + } +} + +impl Display for SystemEntityType { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self { + SystemEntityType::Mint => write!(f, "{}", MINT), + SystemEntityType::HandlePayment => write!(f, "{}", HANDLE_PAYMENT), + SystemEntityType::StandardPayment => write!(f, "{}", STANDARD_PAYMENT), + SystemEntityType::Auction => write!(f, "{}", AUCTION), + } + } +} + +#[cfg(test)] +mod tests { + use std::string::ToString; + + use super::*; + + #[test] + fn get_index_of_mint_contract() { + let index: u32 = SystemEntityType::Mint.into(); + assert_eq!(index, 0u32); + assert_eq!(SystemEntityType::Mint.to_string(), MINT); + } + + #[test] + fn get_index_of_handle_payment_contract() { + let index: u32 = SystemEntityType::HandlePayment.into(); + assert_eq!(index, 1u32); + assert_eq!(SystemEntityType::HandlePayment.to_string(), HANDLE_PAYMENT); + } + + #[test] + fn get_index_of_standard_payment_contract() { + let index: u32 = SystemEntityType::StandardPayment.into(); + assert_eq!(index, 2u32); + assert_eq!( + SystemEntityType::StandardPayment.to_string(), + STANDARD_PAYMENT + ); + } + + #[test] + fn get_index_of_auction_contract() { + let index: u32 = SystemEntityType::Auction.into(); + assert_eq!(index, 3u32); + assert_eq!(SystemEntityType::Auction.to_string(), AUCTION); + } + + #[test] + fn create_mint_variant_from_int() { + let mint = SystemEntityType::try_from(0).ok().unwrap(); + assert_eq!(mint, SystemEntityType::Mint); + } + + #[test] + fn create_handle_payment_variant_from_int() { + let handle_payment = SystemEntityType::try_from(1).ok().unwrap(); + assert_eq!(handle_payment, SystemEntityType::HandlePayment); + } + + #[test] + fn create_standard_payment_variant_from_int() { + let handle_payment = SystemEntityType::try_from(2).ok().unwrap(); + assert_eq!(handle_payment, SystemEntityType::StandardPayment); + } + + #[test] + fn create_auction_variant_from_int() { + let auction = SystemEntityType::try_from(3).ok().unwrap(); + assert_eq!(auction, SystemEntityType::Auction); + } + + #[test] + fn create_unknown_system_contract_variant() { + assert!(SystemEntityType::try_from(4).is_err()); + assert!(SystemEntityType::try_from(5).is_err()); + assert!(SystemEntityType::try_from(10).is_err()); + assert!(SystemEntityType::try_from(u32::MAX).is_err()); + } +} diff --git a/types/src/testing.rs b/types/src/testing.rs new file mode 100644 index 0000000000..a81cd8677f --- /dev/null +++ b/types/src/testing.rs @@ -0,0 +1,195 @@ +//! An RNG for testing purposes. +use std::{ + cell::RefCell, + cmp, env, + fmt::{self, Debug, Display, Formatter}, + iter, thread, +}; + +use rand::{ + self, + distributions::{uniform::SampleRange, Distribution, Standard}, + CryptoRng, Error, Rng, RngCore, SeedableRng, +}; +use rand_pcg::Pcg64Mcg; + +thread_local! { + static THIS_THREAD_HAS_RNG: RefCell = const { RefCell::new(false) }; +} + +const CL_TEST_SEED: &str = "CL_TEST_SEED"; + +type Seed = ::Seed; // [u8; 16] + +/// A fast, seedable pseudorandom number generator for use in tests which prints the seed if the +/// thread in which it is created panics. +/// +/// Only one `TestRng` is permitted per thread. +pub struct TestRng { + seed: Seed, + rng: Pcg64Mcg, +} + +impl TestRng { + /// Constructs a new `TestRng` using a seed generated from the env var `CL_TEST_SEED` if set or + /// from cryptographically secure random data if not. + /// + /// Note that `new()` or `default()` should only be called once per test. If a test needs to + /// spawn multiple threads each with their own `TestRng`, then use `new()` to create a single, + /// master `TestRng`, then use it to create a seed per child thread. The child `TestRng`s can + /// then be constructed in their own threads via `from_seed()`. + /// + /// # Panics + /// + /// Panics if a `TestRng` has already been created on this thread. + pub fn new() -> Self { + Self::set_flag_or_panic(); + + let mut seed = Seed::default(); + match env::var(CL_TEST_SEED) { + Ok(seed_as_hex) => { + base16::decode_slice(&seed_as_hex, &mut seed).unwrap_or_else(|error| { + THIS_THREAD_HAS_RNG.with(|flag| { + *flag.borrow_mut() = false; + }); + panic!("can't parse '{}' as a TestRng seed: {}", seed_as_hex, error) + }); + } + Err(_) => { + rand::thread_rng().fill(&mut seed); + } + }; + + let rng = Pcg64Mcg::from_seed(seed); + + TestRng { seed, rng } + } + + /// Constructs a new `TestRng` using `seed`. This should be used in cases where a test needs to + /// spawn multiple threads each with their own `TestRng`. A single, master `TestRng` should be + /// constructed before any child threads are spawned, and that one should be used to create + /// seeds for the child threads' `TestRng`s. + /// + /// # Panics + /// + /// Panics if a `TestRng` has already been created on this thread. + pub fn from_seed(seed: Seed) -> Self { + Self::set_flag_or_panic(); + let rng = Pcg64Mcg::from_seed(seed); + TestRng { seed, rng } + } + + /// Returns a random `String` of length within the range specified by `length_range`. + pub fn random_string>(&mut self, length_range: R) -> String { + let count = self.gen_range(length_range); + iter::repeat_with(|| self.gen::()) + .take(count) + .collect() + } + + /// Returns a random `Vec` of length within the range specified by `length_range`. + pub fn random_vec, T>(&mut self, length_range: R) -> Vec + where + Standard: Distribution, + { + let count = self.gen_range(length_range); + iter::repeat_with(|| self.gen::()).take(count).collect() + } + + fn set_flag_or_panic() { + THIS_THREAD_HAS_RNG.with(|flag| { + if *flag.borrow() { + panic!("cannot create multiple TestRngs on the same thread"); + } + *flag.borrow_mut() = true; + }); + } + + /// Creates a child RNG. + /// + /// The resulting RNG is seeded from `self` deterministically. + pub fn create_child(&mut self) -> Self { + let seed = self.gen(); + let rng = Pcg64Mcg::from_seed(seed); + TestRng { seed, rng } + } +} + +impl Default for TestRng { + fn default() -> Self { + TestRng::new() + } +} + +impl Display for TestRng { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "TestRng seed: {}", + base16::encode_lower(&self.seed) + ) + } +} + +impl Debug for TestRng { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + Display::fmt(self, formatter) + } +} + +impl Drop for TestRng { + fn drop(&mut self) { + if thread::panicking() { + let line_1 = format!("Thread: {}", thread::current().name().unwrap_or("unnamed")); + let line_2 = "To reproduce failure, try running with env var:"; + let line_3 = format!("{}={}", CL_TEST_SEED, base16::encode_lower(&self.seed)); + let max_length = cmp::max(line_1.len(), line_2.len()); + let border = "=".repeat(max_length); + println!( + "\n{}\n{}\n{}\n{}\n{}\n", + border, line_1, line_2, line_3, border + ); + } + } +} + +impl SeedableRng for TestRng { + type Seed = ::Seed; + + fn from_seed(seed: Self::Seed) -> Self { + Self::from_seed(seed) + } +} + +impl RngCore for TestRng { + fn next_u32(&mut self) -> u32 { + self.rng.next_u32() + } + + fn next_u64(&mut self) -> u64 { + self.rng.next_u64() + } + + fn fill_bytes(&mut self, dest: &mut [u8]) { + self.rng.fill_bytes(dest) + } + + fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { + self.rng.try_fill_bytes(dest) + } +} + +impl CryptoRng for TestRng {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic(expected = "cannot create multiple TestRngs on the same thread")] + fn second_test_rng_in_thread_should_panic() { + let _test_rng1 = TestRng::new(); + let seed = [1; 16]; + let _test_rng2 = TestRng::from_seed(seed); + } +} diff --git a/types/src/timestamp.rs b/types/src/timestamp.rs new file mode 100644 index 0000000000..9210c38900 --- /dev/null +++ b/types/src/timestamp.rs @@ -0,0 +1,487 @@ +use alloc::vec::Vec; +use core::{ + fmt::{self, Display, Formatter}, + ops::{Add, AddAssign, Div, Mul, Rem, Shl, Shr, Sub, SubAssign}, + time::Duration, +}; +#[cfg(any(feature = "std", test))] +use std::{str::FromStr, time::SystemTime}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use humantime::{DurationError, TimestampError}; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::bytesrepr::{self, FromBytes, ToBytes}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// Example timestamp equal to 2020-11-17T00:39:24.072Z. +#[cfg(feature = "json-schema")] +const TIMESTAMP: Timestamp = Timestamp(1_605_573_564_072); + +/// A timestamp type, representing a concrete moment in time. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Timestamp formatted as per RFC 3339") +)] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] +pub struct Timestamp(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] u64); + +impl Timestamp { + /// The maximum value a timestamp can have. + pub const MAX: Timestamp = Timestamp(u64::MAX); + + #[cfg(any(feature = "std", test))] + /// Returns the timestamp of the current moment. + pub fn now() -> Self { + let millis = SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() as u64; + Timestamp(millis) + } + + #[cfg(any(feature = "std", test))] + /// Returns the time that has elapsed since this timestamp. + pub fn elapsed(&self) -> TimeDiff { + TimeDiff(Timestamp::now().0.saturating_sub(self.0)) + } + + /// Returns a zero timestamp. + pub fn zero() -> Self { + Timestamp(0) + } + + /// Returns the timestamp as the number of milliseconds since the Unix epoch + pub fn millis(&self) -> u64 { + self.0 + } + + /// Returns the difference between `self` and `other`, or `0` if `self` is earlier than `other`. + pub fn saturating_diff(self, other: Timestamp) -> TimeDiff { + TimeDiff(self.0.saturating_sub(other.0)) + } + + /// Returns the difference between `self` and `other`, or `0` if that would be before the epoch. + #[must_use] + pub fn saturating_sub(self, other: TimeDiff) -> Timestamp { + Timestamp(self.0.saturating_sub(other.0)) + } + + /// Returns the sum of `self` and `other`, or the maximum possible value if that would be + /// exceeded. + #[must_use] + pub fn saturating_add(self, other: TimeDiff) -> Timestamp { + Timestamp(self.0.saturating_add(other.0)) + } + + /// Returns the number of trailing zeros in the number of milliseconds since the epoch. + pub fn trailing_zeros(&self) -> u8 { + self.0.trailing_zeros() as u8 + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &TIMESTAMP + } + + /// Returns a random `Timestamp`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + Timestamp(1_596_763_000_000 + rng.gen_range(200_000..1_000_000)) + } + + /// Checked subtraction for timestamps + #[cfg(any(feature = "testing", test))] + pub fn checked_sub(self, other: TimeDiff) -> Option { + self.0.checked_sub(other.0).map(Timestamp) + } +} + +impl Display for Timestamp { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + #[cfg(any(feature = "std", test))] + return match SystemTime::UNIX_EPOCH.checked_add(Duration::from_millis(self.0)) { + Some(system_time) => write!(f, "{}", humantime::format_rfc3339_millis(system_time)) + .or_else(|e| write!(f, "Invalid timestamp: {}: {}", e, self.0)), + None => write!(f, "invalid Timestamp: {} ms after the Unix epoch", self.0), + }; + + #[cfg(not(any(feature = "std", test)))] + write!(f, "timestamp({}ms)", self.0) + } +} + +#[cfg(any(feature = "std", test))] +impl FromStr for Timestamp { + type Err = TimestampError; + + fn from_str(value: &str) -> Result { + let system_time = humantime::parse_rfc3339_weak(value)?; + let inner = system_time + .duration_since(SystemTime::UNIX_EPOCH) + .map_err(|_| TimestampError::OutOfRange)? + .as_millis() as u64; + Ok(Timestamp(inner)) + } +} + +impl Add for Timestamp { + type Output = Timestamp; + + fn add(self, diff: TimeDiff) -> Timestamp { + Timestamp(self.0 + diff.0) + } +} + +impl AddAssign for Timestamp { + fn add_assign(&mut self, rhs: TimeDiff) { + self.0 += rhs.0; + } +} + +#[cfg(any(feature = "testing", test))] +impl Sub for Timestamp { + type Output = Timestamp; + + fn sub(self, diff: TimeDiff) -> Timestamp { + Timestamp(self.0 - diff.0) + } +} + +impl Rem for Timestamp { + type Output = TimeDiff; + + fn rem(self, diff: TimeDiff) -> TimeDiff { + TimeDiff(self.0 % diff.0) + } +} + +impl Shl for Timestamp +where + u64: Shl, +{ + type Output = Timestamp; + + fn shl(self, rhs: T) -> Timestamp { + Timestamp(self.0 << rhs) + } +} + +impl Shr for Timestamp +where + u64: Shr, +{ + type Output = Timestamp; + + fn shr(self, rhs: T) -> Timestamp { + Timestamp(self.0 >> rhs) + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for Timestamp { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for Timestamp { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + Timestamp::from_str(&value_as_string).map_err(SerdeError::custom) + } else { + let inner = u64::deserialize(deserializer)?; + Ok(Timestamp(inner)) + } + } +} + +impl ToBytes for Timestamp { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for Timestamp { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + u64::from_bytes(bytes).map(|(inner, remainder)| (Timestamp(inner), remainder)) + } +} + +impl From for Timestamp { + fn from(milliseconds_since_epoch: u64) -> Timestamp { + Timestamp(milliseconds_since_epoch) + } +} + +/// A time difference between two timestamps. +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Human-readable duration.") +)] +pub struct TimeDiff(#[cfg_attr(feature = "json-schema", schemars(with = "String"))] u64); + +impl Display for TimeDiff { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + #[cfg(any(feature = "std", test))] + return write!(f, "{}", humantime::format_duration(Duration::from(*self))); + + #[cfg(not(any(feature = "std", test)))] + write!(f, "time diff({}ms)", self.0) + } +} + +#[cfg(any(feature = "std", test))] +impl FromStr for TimeDiff { + type Err = DurationError; + + fn from_str(value: &str) -> Result { + let inner = humantime::parse_duration(value)?.as_millis() as u64; + Ok(TimeDiff(inner)) + } +} + +impl TimeDiff { + /// Zero diff. + pub const ZERO: TimeDiff = TimeDiff(0); + + /// Returns the time difference as the number of milliseconds since the Unix epoch + pub const fn millis(&self) -> u64 { + self.0 + } + + /// Creates a new time difference from seconds. + pub const fn from_seconds(seconds: u32) -> Self { + TimeDiff(seconds as u64 * 1_000) + } + + /// Creates a new time difference from milliseconds. + pub const fn from_millis(millis: u64) -> Self { + TimeDiff(millis) + } + + /// Returns the sum, or `TimeDiff(u64::MAX)` if it would overflow. + #[must_use] + pub fn saturating_add(self, rhs: u64) -> Self { + TimeDiff(self.0.saturating_add(rhs)) + } + + /// Returns the product, or `TimeDiff(u64::MAX)` if it would overflow. + #[must_use] + pub fn saturating_mul(self, rhs: u64) -> Self { + TimeDiff(self.0.saturating_mul(rhs)) + } + + /// Returns the product, or `None` if it would overflow. + #[must_use] + pub fn checked_mul(self, rhs: u64) -> Option { + Some(TimeDiff(self.0.checked_mul(rhs)?)) + } +} + +impl Add for TimeDiff { + type Output = TimeDiff; + + fn add(self, rhs: TimeDiff) -> TimeDiff { + TimeDiff(self.0 + rhs.0) + } +} + +impl AddAssign for TimeDiff { + fn add_assign(&mut self, rhs: TimeDiff) { + self.0 += rhs.0; + } +} + +impl Sub for TimeDiff { + type Output = TimeDiff; + + fn sub(self, rhs: TimeDiff) -> TimeDiff { + TimeDiff(self.0 - rhs.0) + } +} + +impl SubAssign for TimeDiff { + fn sub_assign(&mut self, rhs: TimeDiff) { + self.0 -= rhs.0; + } +} + +impl Mul for TimeDiff { + type Output = TimeDiff; + + fn mul(self, rhs: u64) -> TimeDiff { + TimeDiff(self.0 * rhs) + } +} + +impl Div for TimeDiff { + type Output = TimeDiff; + + fn div(self, rhs: u64) -> TimeDiff { + TimeDiff(self.0 / rhs) + } +} + +impl Div for TimeDiff { + type Output = u64; + + fn div(self, rhs: TimeDiff) -> u64 { + self.0 / rhs.0 + } +} + +impl From for Duration { + fn from(diff: TimeDiff) -> Duration { + Duration::from_millis(diff.0) + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for TimeDiff { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for TimeDiff { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let value_as_string = String::deserialize(deserializer)?; + TimeDiff::from_str(&value_as_string).map_err(SerdeError::custom) + } else { + let inner = u64::deserialize(deserializer)?; + Ok(TimeDiff(inner)) + } + } +} + +impl ToBytes for TimeDiff { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TimeDiff { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + u64::from_bytes(bytes).map(|(inner, remainder)| (TimeDiff(inner), remainder)) + } +} + +impl From for TimeDiff { + fn from(duration: Duration) -> TimeDiff { + TimeDiff(duration.as_millis() as u64) + } +} + +/// A module for the `[serde(with = serde_option_time_diff)]` attribute, to serialize and +/// deserialize `Option` treating `None` as 0. +#[cfg(any(feature = "std", test))] +pub mod serde_option_time_diff { + use super::*; + + /// Serializes an `Option`, using `0` if the value is `None`. + pub fn serialize( + maybe_td: &Option, + serializer: S, + ) -> Result { + maybe_td + .unwrap_or_else(|| TimeDiff::from_millis(0)) + .serialize(serializer) + } + + /// Deserializes an `Option`, returning `None` if the value is `0`. + pub fn deserialize<'de, D: Deserializer<'de>>( + deserializer: D, + ) -> Result, D::Error> { + let td = TimeDiff::deserialize(deserializer)?; + if td.0 == 0 { + Ok(None) + } else { + Ok(Some(td)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn timestamp_serialization_roundtrip() { + let timestamp = Timestamp::now(); + + let timestamp_as_string = timestamp.to_string(); + assert_eq!( + timestamp, + Timestamp::from_str(×tamp_as_string).unwrap() + ); + + let serialized_json = serde_json::to_string(×tamp).unwrap(); + assert_eq!(timestamp, serde_json::from_str(&serialized_json).unwrap()); + + let serialized_bincode = bincode::serialize(×tamp).unwrap(); + assert_eq!( + timestamp, + bincode::deserialize(&serialized_bincode).unwrap() + ); + + bytesrepr::test_serialization_roundtrip(×tamp); + } + + #[test] + fn timediff_serialization_roundtrip() { + let mut rng = TestRng::new(); + let timediff = TimeDiff(rng.gen()); + + let timediff_as_string = timediff.to_string(); + assert_eq!(timediff, TimeDiff::from_str(&timediff_as_string).unwrap()); + + let serialized_json = serde_json::to_string(&timediff).unwrap(); + assert_eq!(timediff, serde_json::from_str(&serialized_json).unwrap()); + + let serialized_bincode = bincode::serialize(&timediff).unwrap(); + assert_eq!(timediff, bincode::deserialize(&serialized_bincode).unwrap()); + + bytesrepr::test_serialization_roundtrip(&timediff); + } + + #[test] + fn does_not_crash_for_big_timestamp_value() { + assert!(Timestamp::MAX.to_string().starts_with("Invalid timestamp:")); + } +} diff --git a/types/src/transaction.rs b/types/src/transaction.rs new file mode 100644 index 0000000000..4c9bbb838b --- /dev/null +++ b/types/src/transaction.rs @@ -0,0 +1,737 @@ +mod addressable_entity_identifier; +mod approval; +mod approvals_hash; +mod deploy; +mod error; +mod execution_info; +mod initiator_addr; +#[cfg(any(feature = "std", test, feature = "testing"))] +mod initiator_addr_and_secret_key; +mod package_identifier; +mod pricing_mode; +mod runtime_args; +mod serialization; +mod transaction_entry_point; +mod transaction_hash; +mod transaction_id; +mod transaction_invocation_target; +mod transaction_scheduling; +mod transaction_target; +mod transaction_v1; +mod transfer_target; + +#[cfg(feature = "json-schema")] +use crate::URef; +use alloc::{ + collections::BTreeSet, + string::{String, ToString}, + vec::Vec, +}; +use core::fmt::{self, Debug, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{de, ser, Deserializer, Serializer}; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use serde_bytes::ByteBuf; +#[cfg(any(feature = "std", test))] +use std::hash::Hash; +#[cfg(any(feature = "std", test))] +use thiserror::Error; +use tracing::error; +#[cfg(any(feature = "std", test))] +pub use transaction_v1::calculate_transaction_lane; +#[cfg(any(feature = "std", test))] +use transaction_v1::TransactionV1Json; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, Phase, SecretKey, TimeDiff, Timestamp, +}; +#[cfg(any(feature = "std", test))] +use crate::{Chainspec, Gas, Motes, TransactionV1Config}; +pub use addressable_entity_identifier::AddressableEntityIdentifier; +pub use approval::Approval; +pub use approvals_hash::ApprovalsHash; +#[cfg(any(feature = "std", test))] +pub use deploy::calculate_lane_id_for_deploy; +pub use deploy::{ + Deploy, DeployDecodeFromJsonError, DeployError, DeployExcessiveSizeError, DeployHash, + DeployHeader, DeployId, ExecutableDeployItem, ExecutableDeployItemIdentifier, InvalidDeploy, +}; +pub use error::InvalidTransaction; +pub use execution_info::ExecutionInfo; +pub use initiator_addr::InitiatorAddr; +#[cfg(any(feature = "std", feature = "testing", test))] +pub(crate) use initiator_addr_and_secret_key::InitiatorAddrAndSecretKey; +pub use package_identifier::PackageIdentifier; +pub use pricing_mode::{PricingMode, PricingModeError}; +pub use runtime_args::{NamedArg, RuntimeArgs}; +pub use transaction_entry_point::TransactionEntryPoint; +pub use transaction_hash::TransactionHash; +pub use transaction_id::TransactionId; +pub use transaction_invocation_target::TransactionInvocationTarget; +pub use transaction_scheduling::TransactionScheduling; +pub use transaction_target::{TransactionRuntimeParams, TransactionTarget}; +#[cfg(feature = "json-schema")] +pub(crate) use transaction_v1::arg_handling; +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +pub(crate) use transaction_v1::fields_container::FieldsContainer; +pub use transaction_v1::{ + InvalidTransactionV1, TransactionArgs, TransactionV1, TransactionV1DecodeFromJsonError, + TransactionV1Error, TransactionV1ExcessiveSizeError, TransactionV1Hash, TransactionV1Payload, +}; +pub use transfer_target::TransferTarget; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; + +#[cfg(feature = "json-schema")] +pub(super) static TRANSACTION: Lazy = Lazy::new(|| { + let secret_key = SecretKey::example(); + let source = URef::from_formatted_str( + "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + ) + .unwrap(); + let target = URef::from_formatted_str( + "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + ) + .unwrap(); + let id = Some(999); + let amount = 30_000_000_000_u64; + let args = arg_handling::new_transfer_args(amount, Some(source), target, id).unwrap(); + let container = FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + TransactionScheduling::Standard, + ); + let pricing_mode = PricingMode::Fixed { + gas_price_tolerance: 5, + additional_computation_factor: 0, + }; + let initiator_addr_and_secret_key = InitiatorAddrAndSecretKey::SecretKey(secret_key); + let v1_txn = TransactionV1::build( + "casper-example".to_string(), + *Timestamp::example(), + TimeDiff::from_seconds(3_600), + pricing_mode, + container.to_map().unwrap(), + initiator_addr_and_secret_key, + ); + Transaction::V1(v1_txn) +}); + +/// A versioned wrapper for a transaction or deploy. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum Transaction { + /// A deploy. + Deploy(Deploy), + /// A version 1 transaction. + #[cfg_attr( + feature = "json-schema", + serde(rename = "Version1"), + schemars(with = "TransactionV1Json") + )] + V1(TransactionV1), +} + +impl Transaction { + // Deploy variant ctor + pub fn from_deploy(deploy: Deploy) -> Self { + Transaction::Deploy(deploy) + } + + // V1 variant ctor + pub fn from_v1(v1: TransactionV1) -> Self { + Transaction::V1(v1) + } + + /// Returns the `TransactionHash` identifying this transaction. + pub fn hash(&self) -> TransactionHash { + match self { + Transaction::Deploy(deploy) => TransactionHash::from(*deploy.hash()), + Transaction::V1(txn) => TransactionHash::from(*txn.hash()), + } + } + + /// Size estimate. + pub fn size_estimate(&self) -> usize { + match self { + Transaction::Deploy(deploy) => deploy.serialized_length(), + Transaction::V1(v1) => v1.serialized_length(), + } + } + + /// Timestamp. + pub fn timestamp(&self) -> Timestamp { + match self { + Transaction::Deploy(deploy) => deploy.header().timestamp(), + Transaction::V1(v1) => v1.payload().timestamp(), + } + } + + /// Time to live. + pub fn ttl(&self) -> TimeDiff { + match self { + Transaction::Deploy(deploy) => deploy.header().ttl(), + Transaction::V1(v1) => v1.payload().ttl(), + } + } + + /// Returns `Ok` if the given transaction is valid. Verification procedure is delegated to the + /// implementation of the particular variant of the transaction. + pub fn verify(&self) -> Result<(), InvalidTransaction> { + match self { + Transaction::Deploy(deploy) => deploy.is_valid().map_err(Into::into), + Transaction::V1(v1) => v1.verify().map_err(Into::into), + } + } + + /// Adds a signature of this transaction's hash to its approvals. + pub fn sign(&mut self, secret_key: &SecretKey) { + match self { + Transaction::Deploy(deploy) => deploy.sign(secret_key), + Transaction::V1(v1) => v1.sign(secret_key), + } + } + + /// Returns the `Approval`s for this transaction. + pub fn approvals(&self) -> BTreeSet { + match self { + Transaction::Deploy(deploy) => deploy.approvals().clone(), + Transaction::V1(v1) => v1.approvals().clone(), + } + } + + /// Returns the computed approvals hash identifying this transaction's approvals. + pub fn compute_approvals_hash(&self) -> Result { + let approvals_hash = match self { + Transaction::Deploy(deploy) => deploy.compute_approvals_hash()?, + Transaction::V1(txn) => txn.compute_approvals_hash()?, + }; + Ok(approvals_hash) + } + + /// Returns the chain name for the transaction, whether it's a `Deploy` or `V1` transaction. + pub fn chain_name(&self) -> String { + match self { + Transaction::Deploy(txn) => txn.chain_name().to_string(), + Transaction::V1(txn) => txn.chain_name().to_string(), + } + } + + /// Checks if the transaction is a standard payment. + /// + /// For `Deploy` transactions, it checks if the session is a standard payment + /// in the payment phase. For `V1` transactions, it returns the value of + /// `standard_payment` if the pricing mode is `PaymentLimited`, otherwise it returns `true`. + pub fn is_standard_payment(&self) -> bool { + match self { + Transaction::Deploy(txn) => txn.session().is_standard_payment(Phase::Payment), + Transaction::V1(txn) => match txn.pricing_mode() { + PricingMode::PaymentLimited { + standard_payment, .. + } => *standard_payment, + _ => true, + }, + } + } + + /// Returns the computed `TransactionId` uniquely identifying this transaction and its + /// approvals. + pub fn compute_id(&self) -> TransactionId { + match self { + Transaction::Deploy(deploy) => { + let deploy_hash = *deploy.hash(); + let approvals_hash = deploy.compute_approvals_hash().unwrap_or_else(|error| { + error!(%error, "failed to serialize deploy approvals"); + ApprovalsHash::from(Digest::default()) + }); + TransactionId::new(TransactionHash::Deploy(deploy_hash), approvals_hash) + } + Transaction::V1(txn) => { + let txn_hash = *txn.hash(); + let approvals_hash = txn.compute_approvals_hash().unwrap_or_else(|error| { + error!(%error, "failed to serialize transaction approvals"); + ApprovalsHash::from(Digest::default()) + }); + TransactionId::new(TransactionHash::V1(txn_hash), approvals_hash) + } + } + } + + /// Returns the address of the initiator of the transaction. + pub fn initiator_addr(&self) -> InitiatorAddr { + match self { + Transaction::Deploy(deploy) => InitiatorAddr::PublicKey(deploy.account().clone()), + Transaction::V1(txn) => txn.initiator_addr().clone(), + } + } + + /// Returns `true` if the transaction has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + match self { + Transaction::Deploy(deploy) => deploy.expired(current_instant), + Transaction::V1(txn) => txn.expired(current_instant), + } + } + + /// Returns the timestamp of when the transaction expires, i.e. `self.timestamp + self.ttl`. + pub fn expires(&self) -> Timestamp { + match self { + Transaction::Deploy(deploy) => deploy.header().expires(), + Transaction::V1(txn) => txn.payload().expires(), + } + } + + /// Returns the set of account hashes corresponding to the public keys of the approvals. + pub fn signers(&self) -> BTreeSet { + match self { + Transaction::Deploy(deploy) => deploy + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + Transaction::V1(txn) => txn + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + } + } + + // This method is not intended to be used by third party crates. + // + // It is required to allow finalized approvals to be injected after reading a `Deploy` from + // storage. + #[doc(hidden)] + pub fn with_approvals(self, approvals: BTreeSet) -> Self { + match self { + Transaction::Deploy(deploy) => Transaction::Deploy(deploy.with_approvals(approvals)), + Transaction::V1(transaction_v1) => { + Transaction::V1(transaction_v1.with_approvals(approvals)) + } + } + } + + /// Get [`TransactionV1`] + pub fn as_transaction_v1(&self) -> Option<&TransactionV1> { + match self { + Transaction::Deploy(_) => None, + Transaction::V1(v1) => Some(v1), + } + } + + /// Authorization keys. + pub fn authorization_keys(&self) -> BTreeSet { + match self { + Transaction::Deploy(deploy) => deploy + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + Transaction::V1(transaction_v1) => transaction_v1 + .approvals() + .iter() + .map(|approval| approval.signer().to_account_hash()) + .collect(), + } + } + + /// Is the transaction the legacy deploy variant. + pub fn is_legacy_transaction(&self) -> bool { + match self { + Transaction::Deploy(_) => true, + Transaction::V1(_) => false, + } + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + /// Calcualates the gas limit for the transaction. + pub fn gas_limit(&self, chainspec: &Chainspec, lane_id: u8) -> Result { + match self { + Transaction::Deploy(deploy) => deploy + .gas_limit(chainspec) + .map_err(InvalidTransaction::from), + Transaction::V1(v1) => { + let pricing_mode = v1.pricing_mode(); + pricing_mode + .gas_limit(chainspec, lane_id) + .map_err(InvalidTransaction::from) + } + } + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + /// Returns a gas cost based upon the gas_limit, the gas price, + /// and the chainspec settings. + pub fn gas_cost( + &self, + chainspec: &Chainspec, + lane_id: u8, + gas_price: u8, + ) -> Result { + match self { + Transaction::Deploy(deploy) => deploy + .gas_cost(chainspec, gas_price) + .map_err(InvalidTransaction::from), + Transaction::V1(v1) => { + let pricing_mode = v1.pricing_mode(); + pricing_mode + .gas_cost(chainspec, lane_id, gas_price) + .map_err(InvalidTransaction::from) + } + } + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &TRANSACTION + } + + /// Returns a random, valid but possibly expired transaction. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + Transaction::Deploy(Deploy::random_valid_native_transfer(rng)) + } else { + Transaction::V1(TransactionV1::random(rng)) + } + } +} + +#[cfg(any(feature = "std", test))] +impl Serialize for Transaction { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + TransactionJson::try_from(self.clone()) + .map_err(|error| ser::Error::custom(format!("{:?}", error)))? + .serialize(serializer) + } else { + let bytes = self + .to_bytes() + .map_err(|error| ser::Error::custom(format!("{:?}", error)))?; + ByteBuf::from(bytes).serialize(serializer) + } + } +} + +#[cfg(any(feature = "std", test))] +impl<'de> Deserialize<'de> for Transaction { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let json_helper = TransactionJson::deserialize(deserializer)?; + Transaction::try_from(json_helper) + .map_err(|error| de::Error::custom(format!("{:?}", error))) + } else { + let bytes = ByteBuf::deserialize(deserializer)?.into_vec(); + bytesrepr::deserialize::(bytes) + .map_err(|error| de::Error::custom(format!("{:?}", error))) + } + } +} + +/// A util structure to json-serialize a transaction. +#[cfg(any(feature = "std", test))] +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +enum TransactionJson { + /// A deploy. + Deploy(Deploy), + /// A version 1 transaction. + #[serde(rename = "Version1")] + V1(TransactionV1Json), +} + +#[cfg(any(feature = "std", test))] +#[derive(Error, Debug)] +enum TransactionJsonError { + #[error("{0}")] + FailedToMap(String), +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for Transaction { + type Error = TransactionJsonError; + fn try_from(transaction: TransactionJson) -> Result { + match transaction { + TransactionJson::Deploy(deploy) => Ok(Transaction::Deploy(deploy)), + TransactionJson::V1(v1) => { + TransactionV1::try_from(v1) + .map(Transaction::V1) + .map_err(|error| { + TransactionJsonError::FailedToMap(format!( + "Failed to map TransactionJson::V1 to Transaction::V1, err: {}", + error + )) + }) + } + } + } +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for TransactionJson { + type Error = TransactionJsonError; + fn try_from(transaction: Transaction) -> Result { + match transaction { + Transaction::Deploy(deploy) => Ok(TransactionJson::Deploy(deploy)), + Transaction::V1(v1) => TransactionV1Json::try_from(v1) + .map(TransactionJson::V1) + .map_err(|error| { + TransactionJsonError::FailedToMap(format!( + "Failed to map Transaction::V1 to TransactionJson::V1, err: {}", + error + )) + }), + } + } +} +/// Calculates gas limit. +#[cfg(any(feature = "std", test))] +pub trait GasLimited { + /// The error type. + type Error; + + /// The minimum allowed gas price (aka the floor). + const GAS_PRICE_FLOOR: u8 = 1; + + /// Returns a gas cost based upon the gas_limit, the gas price, + /// and the chainspec settings. + fn gas_cost(&self, chainspec: &Chainspec, gas_price: u8) -> Result; + + /// Returns the gas / computation limit prior to execution. + fn gas_limit(&self, chainspec: &Chainspec) -> Result; + + /// Returns the gas price tolerance. + fn gas_price_tolerance(&self) -> Result; +} + +impl From for Transaction { + fn from(deploy: Deploy) -> Self { + Self::Deploy(deploy) + } +} + +impl From for Transaction { + fn from(txn: TransactionV1) -> Self { + Self::V1(txn) + } +} + +impl ToBytes for Transaction { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + Transaction::Deploy(deploy) => deploy.serialized_length(), + Transaction::V1(txn) => txn.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + Transaction::Deploy(deploy) => { + DEPLOY_TAG.write_bytes(writer)?; + deploy.write_bytes(writer) + } + Transaction::V1(txn) => { + V1_TAG.write_bytes(writer)?; + txn.write_bytes(writer) + } + } + } +} + +impl FromBytes for Transaction { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (deploy, remainder) = Deploy::from_bytes(remainder)?; + Ok((Transaction::Deploy(deploy), remainder)) + } + V1_TAG => { + let (txn, remainder) = TransactionV1::from_bytes(remainder)?; + Ok((Transaction::V1(txn), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for Transaction { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Transaction::Deploy(deploy) => Display::fmt(deploy, formatter), + Transaction::V1(txn) => Display::fmt(txn, formatter), + } + } +} + +#[cfg(any(feature = "std", test))] +pub(crate) enum GetLaneError { + NoLaneMatch, + PricingModeNotSupported, +} + +#[cfg(any(feature = "std", test))] +impl From for InvalidTransactionV1 { + fn from(value: GetLaneError) -> Self { + match value { + GetLaneError::NoLaneMatch => InvalidTransactionV1::NoLaneMatch, + GetLaneError::PricingModeNotSupported => InvalidTransactionV1::PricingModeNotSupported, + } + } +} + +#[cfg(any(feature = "std", test))] +impl From for InvalidDeploy { + fn from(value: GetLaneError) -> Self { + match value { + GetLaneError::NoLaneMatch => InvalidDeploy::NoLaneMatch, + GetLaneError::PricingModeNotSupported => InvalidDeploy::PricingModeNotSupported, + } + } +} + +#[cfg(any(feature = "std", test))] +pub(crate) fn get_lane_for_non_install_wasm( + config: &TransactionV1Config, + pricing_mode: &PricingMode, + transaction_size: u64, + runtime_args_size: u64, +) -> Result { + match pricing_mode { + PricingMode::PaymentLimited { payment_amount, .. } => config + .get_wasm_lane_id_by_payment_limited( + *payment_amount, + transaction_size, + runtime_args_size, + ) + .ok_or(GetLaneError::NoLaneMatch), + PricingMode::Fixed { + additional_computation_factor, + .. + } => config + .get_wasm_lane_id_by_size( + transaction_size, + *additional_computation_factor, + runtime_args_size, + ) + .ok_or(GetLaneError::NoLaneMatch), + PricingMode::Prepaid { .. } => Err(GetLaneError::PricingModeNotSupported), + } +} + +/// Proptest generators for [`Transaction`]. +#[cfg(any(feature = "testing", feature = "gens", test))] +pub mod gens { + use super::*; + use proptest::{ + array, + prelude::{Arbitrary, Strategy}, + }; + + /// Generates a random `DeployHash` for testing purposes. + /// + /// This function is used to generate random `DeployHash` values for testing purposes. + /// It produces a proptest `Strategy` that can be used to generate arbitrary `DeployHash` + /// values. + pub fn deploy_hash_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(DeployHash::from_raw) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + + let transaction = Transaction::from(Deploy::random(rng)); + let json_string = serde_json::to_string_pretty(&transaction).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transaction, decoded); + + let transaction = Transaction::from(TransactionV1::random(rng)); + let json_string = serde_json::to_string_pretty(&transaction).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transaction, decoded); + } + + #[test] + fn bincode_roundtrip() { + let rng = &mut TestRng::new(); + + let transaction = Transaction::from(Deploy::random(rng)); + let serialized = bincode::serialize(&transaction).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transaction, deserialized); + + let transaction = Transaction::from(TransactionV1::random(rng)); + let serialized = bincode::serialize(&transaction).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transaction, deserialized); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let transaction = Transaction::from(Deploy::random(rng)); + bytesrepr::test_serialization_roundtrip(&transaction); + + let transaction = Transaction::from(TransactionV1::random(rng)); + bytesrepr::test_serialization_roundtrip(&transaction); + } +} + +#[cfg(test)] +mod proptests { + use super::*; + use crate::{ + bytesrepr, + gens::{legal_transaction_arb, transaction_arb}, + }; + use proptest::prelude::*; + + proptest! { + #[test] + fn bytesrepr_roundtrip(transaction in transaction_arb()) { + bytesrepr::test_serialization_roundtrip(&transaction); + } + + #[test] + fn json_roundtrip(transaction in legal_transaction_arb()) { + let json_string = serde_json::to_string_pretty(&transaction).unwrap(); + let decoded = serde_json::from_str::(&json_string).unwrap(); + assert_eq!(transaction, decoded); + } + } +} diff --git a/types/src/transaction/addressable_entity_identifier.rs b/types/src/transaction/addressable_entity_identifier.rs new file mode 100644 index 0000000000..9fee6fad72 --- /dev/null +++ b/types/src/transaction/addressable_entity_identifier.rs @@ -0,0 +1,141 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::{ExecutableDeployItem, TransactionTarget}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + AddressableEntityHash, EntityAddr, +}; + +const HASH_TAG: u8 = 0; +const NAME_TAG: u8 = 1; +const ADDR_TAG: u8 = 2; + +/// Identifier for the contract object within a [`TransactionTarget::Stored`] or an +/// [`ExecutableDeployItem`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "Identifier for the contract object within a `Stored` transaction target \ + or an `ExecutableDeployItem`." + ) +)] +#[serde(deny_unknown_fields)] +pub enum AddressableEntityIdentifier { + /// The hash identifying the addressable entity. + Hash(AddressableEntityHash), + /// The name identifying the addressable entity. + Name(String), + /// The entity address + Addr(EntityAddr), +} + +impl AddressableEntityIdentifier { + /// Returns a random `AddressableEntityIdentifier`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + if rng.gen() { + if rng.gen() { + AddressableEntityIdentifier::Hash(AddressableEntityHash::new(rng.gen())) + } else { + AddressableEntityIdentifier::Addr(EntityAddr::new_of_kind(rng.gen(), rng.gen())) + } + } else { + AddressableEntityIdentifier::Name(rng.random_string(1..21)) + } + } +} + +impl Display for AddressableEntityIdentifier { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + AddressableEntityIdentifier::Hash(hash) => write!(formatter, "entity-hash({})", hash), + AddressableEntityIdentifier::Name(name) => write!(formatter, "entity-name({})", name), + AddressableEntityIdentifier::Addr(entity_addr) => { + write!(formatter, "entity-addr({})", entity_addr) + } + } + } +} + +impl ToBytes for AddressableEntityIdentifier { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + AddressableEntityIdentifier::Hash(hash) => { + HASH_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + AddressableEntityIdentifier::Name(name) => { + NAME_TAG.write_bytes(writer)?; + name.write_bytes(writer) + } + AddressableEntityIdentifier::Addr(entity_addr) => { + ADDR_TAG.write_bytes(writer)?; + entity_addr.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + AddressableEntityIdentifier::Hash(hash) => hash.serialized_length(), + AddressableEntityIdentifier::Name(name) => name.serialized_length(), + AddressableEntityIdentifier::Addr(addr) => addr.serialized_length(), + } + } +} + +impl FromBytes for AddressableEntityIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + HASH_TAG => { + let (hash, remainder) = AddressableEntityHash::from_bytes(remainder)?; + Ok((AddressableEntityIdentifier::Hash(hash), remainder)) + } + NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + Ok((AddressableEntityIdentifier::Name(name), remainder)) + } + ADDR_TAG => { + let (addr, remainder) = EntityAddr::from_bytes(remainder)?; + Ok((AddressableEntityIdentifier::Addr(addr), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&AddressableEntityIdentifier::random(rng)); + } + } +} diff --git a/types/src/transaction/approval.rs b/types/src/transaction/approval.rs new file mode 100644 index 0000000000..1d0ae68656 --- /dev/null +++ b/types/src/transaction/approval.rs @@ -0,0 +1,103 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, PublicKey, SecretKey, Signature, +}; + +use super::TransactionHash; + +/// A struct containing a signature of a transaction hash and the public key of the signer. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct Approval { + signer: PublicKey, + signature: Signature, +} + +impl Approval { + /// Creates an approval by signing the given transaction hash using the given secret key. + pub fn create(hash: &TransactionHash, secret_key: &SecretKey) -> Self { + let signer = PublicKey::from(secret_key); + let signature = crypto::sign(hash, secret_key, &signer); + Self { signer, signature } + } + + /// Returns a new approval. + pub fn new(signer: PublicKey, signature: Signature) -> Self { + Self { signer, signature } + } + + /// Returns the public key of the approval's signer. + pub fn signer(&self) -> &PublicKey { + &self.signer + } + + /// Returns the approval signature. + pub fn signature(&self) -> &Signature { + &self.signature + } + + /// Returns a random `Approval`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = TransactionHash::random(rng); + let secret_key = SecretKey::random(rng); + Approval::create(&hash, &secret_key) + } +} + +impl Display for Approval { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "approval({})", self.signer) + } +} + +impl ToBytes for Approval { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.signer.write_bytes(writer)?; + self.signature.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.signer.serialized_length() + self.signature.serialized_length() + } +} + +impl FromBytes for Approval { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (signer, remainder) = PublicKey::from_bytes(bytes)?; + let (signature, remainder) = Signature::from_bytes(remainder)?; + let approval = Approval { signer, signature }; + Ok((approval, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let approval = Approval::random(rng); + bytesrepr::test_serialization_roundtrip(&approval); + } +} diff --git a/types/src/transaction/approvals_hash.rs b/types/src/transaction/approvals_hash.rs new file mode 100644 index 0000000000..9bfa1248b6 --- /dev/null +++ b/types/src/transaction/approvals_hash.rs @@ -0,0 +1,111 @@ +use alloc::{collections::BTreeSet, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +use serde::{Deserialize, Serialize}; + +use super::Approval; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of the bytesrepr-encoded set of approvals for a single [``]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct ApprovalsHash(pub Digest); + +impl ApprovalsHash { + /// The number of bytes in a `ApprovalsHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `ApprovalsHash` by bytesrepr-encoding `approvals` and creating + /// a [`Digest`] of this. + pub fn compute(approvals: &BTreeSet) -> Result { + let digest = Digest::hash(approvals.to_bytes()?); + Ok(ApprovalsHash(digest)) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `ApprovalsHash` directly initialized with the provided bytes; no + /// hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + ApprovalsHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `ApprovalsHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + ApprovalsHash(hash) + } +} + +impl From for Digest { + fn from(hash: ApprovalsHash) -> Self { + hash.0 + } +} + +impl From for ApprovalsHash { + fn from(digest: Digest) -> Self { + Self(digest) + } +} + +impl Display for ApprovalsHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "transaction-v1-approvals-hash({})", self.0,) + } +} + +impl AsRef<[u8]> for ApprovalsHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for ApprovalsHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for ApprovalsHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (ApprovalsHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = ApprovalsHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/transaction/deploy.rs b/types/src/transaction/deploy.rs new file mode 100644 index 0000000000..a9a7609e94 --- /dev/null +++ b/types/src/transaction/deploy.rs @@ -0,0 +1,2794 @@ +pub mod deploy_category; +mod deploy_hash; +mod deploy_header; +mod deploy_id; +mod error; +mod executable_deploy_item; + +use alloc::{collections::BTreeSet, vec::Vec}; +use core::{ + cmp, + fmt::{self, Debug, Display, Formatter}, + hash, +}; + +#[cfg(any(feature = "std", test))] +use std::convert::TryFrom; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "std", test))] +use itertools::Itertools; +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use tracing::{debug, warn}; + +#[cfg(any(feature = "std", test))] +use super::{get_lane_for_non_install_wasm, InitiatorAddr, InitiatorAddrAndSecretKey, PricingMode}; +#[cfg(any( + all(feature = "std", feature = "testing"), + feature = "json-schema", + test +))] +use crate::runtime_args; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::{ + bytesrepr::Bytes, + system::auction::{ + ARG_AMOUNT as ARG_AUCTION_AMOUNT, ARG_DELEGATION_RATE, ARG_DELEGATOR, ARG_NEW_VALIDATOR, + ARG_PUBLIC_KEY as ARG_AUCTION_PUBLIC_KEY, ARG_VALIDATOR, METHOD_ADD_BID, METHOD_DELEGATE, + METHOD_REDELEGATE, METHOD_UNDELEGATE, METHOD_WITHDRAW_BID, + }, + testing::TestRng, + transaction::RuntimeArgs, + AddressableEntityHash, URef, DEFAULT_MAX_PAYMENT_MOTES, DEFAULT_MIN_TRANSFER_MOTES, +}; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + crypto, + transaction::{Approval, ApprovalsHash}, + Digest, DisplayIter, PublicKey, SecretKey, TimeDiff, Timestamp, +}; + +#[cfg(any(feature = "std", test))] +use crate::{chainspec::PricingHandling, Chainspec, Phase, TransactionV1Config, MINT_LANE_ID}; +#[cfg(any(feature = "std", test))] +use crate::{system::auction::ARG_AMOUNT, transaction::GasLimited, Gas, Motes, U512}; +pub use deploy_hash::DeployHash; +pub use deploy_header::DeployHeader; +pub use deploy_id::DeployId; +pub use error::{ + DecodeFromJsonError as DeployDecodeFromJsonError, Error as DeployError, + ExcessiveSizeError as DeployExcessiveSizeError, InvalidDeploy, +}; +pub use executable_deploy_item::{ExecutableDeployItem, ExecutableDeployItemIdentifier}; + +#[cfg(feature = "json-schema")] +static DEPLOY: Lazy = Lazy::new(|| { + let payment_args = runtime_args! { + "amount" => 1000 + }; + let payment = ExecutableDeployItem::StoredContractByName { + name: String::from("casper-example"), + entry_point: String::from("example-entry-point"), + args: payment_args, + }; + let session_args = runtime_args! { + "amount" => 1000 + }; + let session = ExecutableDeployItem::Transfer { args: session_args }; + let serialized_body = serialize_body(&payment, &session); + let body_hash = Digest::hash(serialized_body); + + let secret_key = SecretKey::example(); + let timestamp = *Timestamp::example(); + let header = DeployHeader::new( + PublicKey::from(secret_key), + timestamp, + TimeDiff::from_seconds(3_600), + 1, + body_hash, + vec![DeployHash::new(Digest::from([1u8; Digest::LENGTH]))], + String::from("casper-example"), + ); + let serialized_header = serialize_header(&header); + let hash = DeployHash::new(Digest::hash(serialized_header)); + + let mut approvals = BTreeSet::new(); + let approval = Approval::create(&hash.into(), secret_key); + approvals.insert(approval); + + Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: OnceCell::new(), + } +}); + +/// A signed smart contract. +#[derive(Clone, Eq, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "A signed smart contract.") +)] +pub struct Deploy { + hash: DeployHash, + header: DeployHeader, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + approvals: BTreeSet, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell>, +} + +impl Deploy { + /// Constructs a new `Deploy`. + pub fn new( + hash: DeployHash, + header: DeployHeader, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + ) -> Deploy { + Deploy { + hash, + header, + payment, + session, + approvals: BTreeSet::new(), + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell::new(), + } + } + /// Constructs a new signed `Deploy`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + #[allow(clippy::too_many_arguments)] + pub fn new_signed( + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + dependencies: Vec, + chain_name: String, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + secret_key: &SecretKey, + account: Option, + ) -> Deploy { + let account_and_secret_key = match account { + Some(account) => InitiatorAddrAndSecretKey::Both { + initiator_addr: InitiatorAddr::PublicKey(account), + secret_key, + }, + None => InitiatorAddrAndSecretKey::SecretKey(secret_key), + }; + + Deploy::build( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + account_and_secret_key, + ) + } + + #[cfg(any(feature = "std", test))] + #[allow(clippy::too_many_arguments)] + fn build( + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + dependencies: Vec, + chain_name: String, + payment: ExecutableDeployItem, + session: ExecutableDeployItem, + initiator_addr_and_secret_key: InitiatorAddrAndSecretKey, + ) -> Deploy { + let serialized_body = serialize_body(&payment, &session); + let body_hash = Digest::hash(serialized_body); + + let account = match initiator_addr_and_secret_key.initiator_addr() { + InitiatorAddr::PublicKey(public_key) => public_key, + InitiatorAddr::AccountHash(_) => unreachable!(), + }; + + let dependencies = dependencies.into_iter().unique().collect(); + let header = DeployHeader::new( + account, + timestamp, + ttl, + gas_price, + body_hash, + dependencies, + chain_name, + ); + let serialized_header = serialize_header(&header); + let hash = DeployHash::new(Digest::hash(serialized_header)); + + let mut deploy = Deploy { + hash, + header, + payment, + session, + approvals: BTreeSet::new(), + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell::new(), + }; + + if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() { + deploy.sign(secret_key); + } + deploy + } + + /// Returns the `DeployHash` identifying this `Deploy`. + pub fn hash(&self) -> &DeployHash { + &self.hash + } + + /// Returns the public key of the account providing the context in which to run the `Deploy`. + pub fn account(&self) -> &PublicKey { + self.header.account() + } + + /// Returns the creation timestamp of the `Deploy`. + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp() + } + + /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid. + /// + /// After this duration has ended, the `Deploy` will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.header.ttl() + } + + /// Returns `true` if the `Deploy` has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.header.expired(current_instant) + } + + /// Returns the sender's gas price tolerance for block inclusion. + pub fn gas_price(&self) -> u64 { + self.header.gas_price() + } + + /// Returns the hash of the body (i.e. the Wasm code) of the `Deploy`. + pub fn body_hash(&self) -> &Digest { + self.header.body_hash() + } + + /// Returns the name of the chain the `Deploy` should be executed on. + pub fn chain_name(&self) -> &str { + self.header.chain_name() + } + + /// Returns a reference to the `DeployHeader` of this `Deploy`. + pub fn header(&self) -> &DeployHeader { + &self.header + } + + /// Consumes `self`, returning the `DeployHeader` of this `Deploy`. + pub fn take_header(self) -> DeployHeader { + self.header + } + + /// Returns the `ExecutableDeployItem` for payment code. + pub fn payment(&self) -> &ExecutableDeployItem { + &self.payment + } + + /// Returns the `ExecutableDeployItem` for session code. + pub fn session(&self) -> &ExecutableDeployItem { + &self.session + } + + /// Returns the `Approval`s for this deploy. + pub fn approvals(&self) -> &BTreeSet { + &self.approvals + } + + /// Consumes `self`, returning a tuple of its constituent parts. + pub fn destructure( + self, + ) -> ( + DeployHash, + DeployHeader, + ExecutableDeployItem, + ExecutableDeployItem, + BTreeSet, + ) { + ( + self.hash, + self.header, + self.payment, + self.session, + self.approvals, + ) + } + + /// Adds a signature of this `Deploy`'s hash to its approvals. + pub fn sign(&mut self, secret_key: &SecretKey) { + let approval = Approval::create(&self.hash.into(), secret_key); + self.approvals.insert(approval); + } + + /// Returns the `ApprovalsHash` of this `Deploy`'s approvals. + pub fn compute_approvals_hash(&self) -> Result { + ApprovalsHash::compute(&self.approvals) + } + + /// Returns `true` if the serialized size of the deploy is not greater than + /// `max_transaction_size`. + #[cfg(any(feature = "std", test))] + pub fn is_valid_size(&self, max_transaction_size: u32) -> Result<(), DeployExcessiveSizeError> { + let deploy_size = self.serialized_length(); + if deploy_size > max_transaction_size as usize { + return Err(DeployExcessiveSizeError { + max_transaction_size, + actual_deploy_size: deploy_size, + }); + } + Ok(()) + } + + /// Returns `Ok` if and only if this `Deploy`'s body hashes to the value of `body_hash()`, and + /// if this `Deploy`'s header hashes to the value claimed as the deploy hash. + pub fn has_valid_hash(&self) -> Result<(), InvalidDeploy> { + let serialized_body = serialize_body(&self.payment, &self.session); + let body_hash = Digest::hash(serialized_body); + if body_hash != *self.header.body_hash() { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?self, ?body_hash, "invalid deploy body hash"); + return Err(InvalidDeploy::InvalidBodyHash); + } + + let serialized_header = serialize_header(&self.header); + let hash = DeployHash::new(Digest::hash(serialized_header)); + if hash != self.hash { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?self, ?hash, "invalid deploy hash"); + return Err(InvalidDeploy::InvalidDeployHash); + } + Ok(()) + } + + /// Returns `Ok` if and only if: + /// * the deploy hash is correct (should be the hash of the header), and + /// * the body hash is correct (should be the hash of the body), and + /// * approvals are non empty, and + /// * all approvals are valid signatures of the deploy hash + pub fn is_valid(&self) -> Result<(), InvalidDeploy> { + #[cfg(any(feature = "once_cell", test))] + return self.is_valid.get_or_init(|| validate_deploy(self)).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + validate_deploy(self) + } + + /// Returns `true` if this deploy is a native transfer. + pub fn is_transfer(&self) -> bool { + self.session.is_transfer() + } + + /// Should this transaction start in the initiating accounts context? + pub fn is_account_session(&self) -> bool { + // legacy deploys are always initiated by an account + true + } + + /// Returns `Ok` if and only if: + /// * the chain_name is correct, + /// * the configured parameters are complied with at the given timestamp + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn is_config_compliant( + &self, + chainspec: &Chainspec, + timestamp_leeway: TimeDiff, + at: Timestamp, + ) -> Result<(), InvalidDeploy> { + let config = &chainspec.transaction_config; + + if !config.runtime_config.vm_casper_v1 { + // Not config compliant if V1 runtime is disabled. + return Err(InvalidDeploy::InvalidRuntime); + } + let pricing_handling = chainspec.core_config.pricing_handling; + let v1_config = &chainspec.transaction_config.transaction_v1_config; + let lane_id = calculate_lane_id_for_deploy(self, pricing_handling, v1_config)?; + let lane_definition = v1_config + .get_lane_by_id(lane_id) + .ok_or(InvalidDeploy::NoLaneMatch)?; + + self.is_valid_size(lane_definition.max_transaction_length as u32)?; + + let header = self.header(); + let chain_name = &chainspec.network_config.name; + + if header.chain_name() != chain_name { + debug!( + deploy_hash = %self.hash(), + deploy_header = %header, + chain_name = %header.chain_name(), + "invalid chain identifier" + ); + return Err(InvalidDeploy::InvalidChainName { + expected: chain_name.to_string(), + got: header.chain_name().to_string(), + }); + } + + let min_gas_price = chainspec.vacancy_config.min_gas_price; + let gas_price_tolerance = self.gas_price_tolerance()?; + if gas_price_tolerance < min_gas_price { + return Err(InvalidDeploy::GasPriceToleranceTooLow { + min_gas_price_tolerance: min_gas_price, + provided_gas_price_tolerance: gas_price_tolerance, + }); + } + + header.is_valid(config, timestamp_leeway, at, &self.hash)?; + + let max_associated_keys = chainspec.core_config.max_associated_keys; + if self.approvals.len() > max_associated_keys as usize { + debug!( + deploy_hash = %self.hash(), + number_of_associated_keys = %self.approvals.len(), + max_associated_keys = %max_associated_keys, + "number of associated keys exceeds the maximum limit" + ); + return Err(InvalidDeploy::ExcessiveApprovals { + got: self.approvals.len() as u32, + max_associated_keys, + }); + } + + let gas_limit = self.gas_limit(chainspec)?; + if gas_limit == Gas::zero() { + return Err(InvalidDeploy::InvalidPaymentAmount); + } + + let block_gas_limit = Gas::new(config.block_gas_limit); + if gas_limit > block_gas_limit { + debug!( + payment_amount = %gas_limit, + %block_gas_limit, + "transaction gas limit exceeds block gas limit" + ); + return Err(InvalidDeploy::ExceededBlockGasLimit { + block_gas_limit: config.block_gas_limit, + got: Box::new(gas_limit.value()), + }); + } + let lane_limit = lane_definition.max_transaction_gas_limit; + let lane_limit_as_gas = Gas::new(lane_limit); + if gas_limit > lane_limit_as_gas { + debug!( + calculated_lane = lane_definition.id, + payment_amount = %gas_limit, + %block_gas_limit, + "transaction gas limit exceeds lane limit" + ); + return Err(InvalidDeploy::ExceededLaneGasLimit { + lane_gas_limit: lane_limit, + got: Box::new(gas_limit.value()), + }); + } + + let payment_args_length = self.payment().args().serialized_length(); + if payment_args_length > config.deploy_config.payment_args_max_length as usize { + debug!( + payment_args_length, + payment_args_max_length = config.deploy_config.payment_args_max_length, + "payment args excessive" + ); + return Err(InvalidDeploy::ExcessivePaymentArgsLength { + max_length: config.deploy_config.payment_args_max_length as usize, + got: payment_args_length, + }); + } + + let session_args_length = self.session().args().serialized_length(); + if session_args_length > config.deploy_config.session_args_max_length as usize { + debug!( + session_args_length, + session_args_max_length = config.deploy_config.session_args_max_length, + "session args excessive" + ); + return Err(InvalidDeploy::ExcessiveSessionArgsLength { + max_length: config.deploy_config.session_args_max_length as usize, + got: session_args_length, + }); + } + + if self.session().is_transfer() { + let item = self.session().clone(); + let attempted = item + .args() + .get(ARG_AMOUNT) + .ok_or_else(|| { + debug!("missing transfer 'amount' runtime argument"); + InvalidDeploy::MissingTransferAmount + })? + .clone() + .into_t::() + .map_err(|_| { + debug!("failed to parse transfer 'amount' runtime argument as a U512"); + InvalidDeploy::FailedToParseTransferAmount + })?; + let minimum = U512::from(config.native_transfer_minimum_motes); + if attempted < minimum { + debug!( + minimum = %config.native_transfer_minimum_motes, + amount = %attempted, + "insufficient transfer amount" + ); + return Err(InvalidDeploy::InsufficientTransferAmount { + minimum: Box::new(minimum), + attempted: Box::new(attempted), + }); + } + } else { + let payment_args = self.payment().args(); + let payment_amount = payment_args + .get(ARG_AMOUNT) + .ok_or_else(|| { + debug!("missing transfer 'amount' runtime argument"); + InvalidDeploy::MissingTransferAmount + })? + .clone() + .into_t::() + .map_err(|_| { + debug!("failed to parse transfer 'amount' runtime argument as a U512"); + InvalidDeploy::FailedToParseTransferAmount + })?; + if payment_amount < U512::from(chainspec.core_config.baseline_motes_amount) { + return Err(InvalidDeploy::InvalidPaymentAmount); + } + } + + Ok(()) + } + + // This method is not intended to be used by third party crates. + // + // It is required to allow finalized approvals to be injected after reading a `Deploy` from + // storage. + #[doc(hidden)] + pub fn with_approvals(mut self, approvals: BTreeSet) -> Self { + self.approvals = approvals; + self + } + + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &DEPLOY + } + + /// Returns a random `Deploy`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::random(rng); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..300)); + Deploy::random_with_timestamp_and_ttl(rng, timestamp, ttl) + } + + /// Returns a random `Deploy` but using the specified `timestamp` and `ttl`. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_timestamp_and_ttl( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let gas_price = rng.gen_range(1..100); + + let dependencies = vec![]; + let chain_name = String::from("casper-example"); + + // We need "amount" in order to be able to get correct info via `deploy_info()`. + let payment_args = runtime_args! { + "amount" => U512::from(DEFAULT_MAX_PAYMENT_MOTES), + }; + let payment = ExecutableDeployItem::StoredContractByName { + name: String::from("casper-example"), + entry_point: String::from("example-entry-point"), + args: payment_args, + }; + + let session = rng.gen(); + + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + } + + /// Turns `self` into an invalid `Deploy` by clearing the `chain_name`, invalidating the deploy + /// hash. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn invalidate(&mut self) { + self.header.invalidate(); + } + + /// Returns a random `Deploy` for a native transfer. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_valid_native_transfer(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..300)); + Self::random_valid_native_transfer_with_timestamp_and_ttl(rng, timestamp, ttl) + } + + /// Returns a random `Deploy` for a native transfer with timestamp and ttl. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_valid_native_transfer_with_timestamp_and_ttl( + rng: &mut TestRng, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let deploy = Self::random_with_timestamp_and_ttl(rng, timestamp, ttl); + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let payment_amount = 10_000_000_000u64; + let payment_args = runtime_args! { + "amount" => U512::from(payment_amount), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let secret_key = SecretKey::random(rng); + Deploy::new_signed( + timestamp, + ttl, + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random `Deploy` for a native transfer with no dependencies. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_valid_native_transfer_without_deps(rng: &mut TestRng) -> Self { + let deploy = Self::random(rng); + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let payment_args = runtime_args! { + "amount" => U512::from(10), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let secret_key = SecretKey::random(rng); + Deploy::new_signed( + Timestamp::now(), + deploy.header.ttl(), + deploy.header.gas_price(), + vec![], + deploy.header.chain_name().to_string(), + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random invalid `Deploy` without a payment amount specified. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_without_payment_amount(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with an invalid value for the payment amount. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_mangled_payment_amount(rng: &mut TestRng) -> Self { + let payment_args = runtime_args! { + "amount" => "invalid-argument" + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with insufficient payment amount. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_payment_one(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..3600)); + let payment_args = runtime_args! { + "amount" => U512::one() + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let gas_price = rng.gen_range(1..4); + + let dependencies = vec![]; + let chain_name = String::from("casper-example"); + let session = rng.gen(); + + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random invalid `Deploy` with insufficient payment amount. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_insufficient_payment_amount( + rng: &mut TestRng, + payment_amount: U512, + ) -> Self { + let payment_args = runtime_args! { + "amount" => payment_amount + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with an invalid value for the payment amount. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_oversized_payment_amount(rng: &mut TestRng) -> Self { + let payment_args = runtime_args! { + "amount" => U512::from(1_000_000_000_001u64) + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + + let session = ExecutableDeployItem::StoredContractByName { + name: "Test".to_string(), + entry_point: "call".to_string(), + args: Default::default(), + }; + + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + deploy.header.timestamp(), + deploy.header.ttl(), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random `Deploy` with custom payment specified as a stored contract by name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_custom_payment_contract_by_name(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredContractByName { + name: "Test".to_string(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored contract by + /// hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_payment_contract_by_hash(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredContractByHash { + hash: [19; 32].into(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored contract by + /// hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_entry_point_in_payment_contract(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredContractByHash { + hash: [19; 32].into(), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random `Deploy` with custom payment specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_versioned_payment_package_by_name( + version: Option, + rng: &mut TestRng, + ) -> Self { + let payment = ExecutableDeployItem::StoredVersionedContractByName { + name: "Test".to_string(), + version, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random `Deploy` with custom payment specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_custom_payment_package_by_name(rng: &mut TestRng) -> Self { + Self::random_with_versioned_payment_package_by_name(None, rng) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned + /// contract by hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_payment_package_by_hash(rng: &mut TestRng) -> Self { + Self::random_with_payment_package_version_by_hash(None, rng) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned + /// contract by hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_nonexistent_contract_version_in_payment_package(rng: &mut TestRng) -> Self { + let payment = ExecutableDeployItem::StoredVersionedContractByHash { + hash: [19; 32].into(), + version: Some(6u32), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random invalid `Deploy` with custom payment specified as a stored versioned + /// contract by hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_payment_package_version_by_hash( + version: Option, + rng: &mut TestRng, + ) -> Self { + let payment = ExecutableDeployItem::StoredVersionedContractByHash { + hash: Default::default(), + version, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_payment(rng, payment) + } + + /// Returns a random `Deploy` with custom session specified as a stored contract by name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_session_contract_by_name(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredContractByName { + name: "Test".to_string(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored contract by + /// hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_session_contract_by_hash(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::StoredContractByHash { + hash: Default::default(), + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored contract by + /// hash, but calling an invalid entry point. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_entry_point_in_session_contract(rng: &mut TestRng) -> Self { + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..3600)); + let session = ExecutableDeployItem::StoredContractByHash { + hash: [19; 32].into(), + entry_point: "non-existent-entry-point".to_string(), + args: Default::default(), + }; + + let payment_amount = 10_000_000_000u64; + let payment_args = runtime_args! { + "amount" => U512::from(payment_amount) + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let gas_price = rng.gen_range(1..4); + + let dependencies = vec![]; + let chain_name = String::from("casper-example"); + + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random `Deploy` with custom session specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_valid_session_package_by_name(rng: &mut TestRng) -> Self { + Self::random_with_versioned_session_package_by_name(None, rng) + } + + /// Returns a random `Deploy` with custom session specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_versioned_session_package_by_name( + version: Option, + rng: &mut TestRng, + ) -> Self { + let session = ExecutableDeployItem::StoredVersionedContractByName { + name: "Test".to_string(), + version, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random deploy with custom session specified as a stored versioned contract by + /// name. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_contract_by_name( + rng: &mut TestRng, + maybe_secret_key: Option, + maybe_contract_name: Option, + maybe_entry_point_name: Option, + maybe_timestamp: Option, + maybe_ttl: Option, + ) -> Self { + let payment_args = runtime_args! { + "amount" => U512::from(10), + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let contract_name = maybe_contract_name.unwrap_or_else(|| "Test".to_string()); + let entry_point_name = maybe_entry_point_name.unwrap_or_else(|| "Test".to_string()); + let session = ExecutableDeployItem::StoredVersionedContractByName { + name: contract_name, + version: None, + entry_point: entry_point_name, + args: Default::default(), + }; + let secret_key = match maybe_secret_key { + None => SecretKey::random(rng), + Some(secret_key) => secret_key, + }; + let timestamp = maybe_timestamp.unwrap_or_else(Timestamp::now); + let ttl = match maybe_ttl { + None => TimeDiff::from_seconds(rng.gen_range(60..3600)), + Some(ttl) => ttl, + }; + Deploy::new_signed( + timestamp, + ttl, + 1, + vec![], + "test_chain".to_string(), + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random invalid `Deploy` with custom session specified as a stored versioned + /// contract by hash, but missing the runtime args. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_missing_session_package_by_hash(rng: &mut TestRng) -> Self { + Self::random_with_versioned_session_package_by_hash(None, rng) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_versioned_session_package_by_hash( + version: Option, + rng: &mut TestRng, + ) -> Self { + let session = ExecutableDeployItem::StoredVersionedContractByHash { + hash: Default::default(), + version, + entry_point: "call".to_string(), + args: Default::default(), + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid transfer `Deploy` with the "target" runtime arg missing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_without_transfer_target(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid transfer `Deploy` with the "amount" runtime arg missing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_without_transfer_amount(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid transfer `Deploy` with an invalid "amount" runtime arg. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_mangled_transfer_amount(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "amount" => "mangled-transfer-amount", + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_session(rng, session) + } + + /// Returns a random invalid `Deploy` with empty session bytes. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_empty_session_module_bytes(rng: &mut TestRng) -> Self { + let session = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: Default::default(), + }; + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_seconds(rng.gen_range(60..3600)); + let amount = 10_000_000_000u64; + let payment_args = runtime_args! { + "amount" => U512::from(amount) + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + let gas_price = 1; + + let dependencies = vec![]; + let chain_name = String::from("casper-example"); + + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + timestamp, + ttl, + gas_price, + dependencies, + chain_name, + payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random invalid `Deploy` with an expired TTL. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_expired_deploy(rng: &mut TestRng) -> Self { + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + Timestamp::zero(), + TimeDiff::from_seconds(1u32), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + deploy.payment, + deploy.session, + &secret_key, + None, + ) + } + + /// Returns a random `Deploy` with native transfer as payment code. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_native_transfer_in_payment_logic(rng: &mut TestRng) -> Self { + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(rng).to_account_hash(), + "target" => PublicKey::random(rng).to_account_hash(), + }; + let payment = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + Self::random_transfer_with_payment(rng, payment) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + fn random_transfer_with_payment(rng: &mut TestRng, payment: ExecutableDeployItem) -> Self { + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + deploy.header.timestamp(), + deploy.header.ttl(), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + payment, + deploy.session, + &secret_key, + None, + ) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + fn random_transfer_with_session(rng: &mut TestRng, session: ExecutableDeployItem) -> Self { + let deploy = Self::random_valid_native_transfer(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + deploy.header.timestamp(), + deploy.header.ttl(), + deploy.header.gas_price(), + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + deploy.payment, + session, + &secret_key, + None, + ) + } + + /// Returns a random valid `Deploy` with specified gas price. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_gas_price(rng: &mut TestRng, gas_price: u64) -> Self { + let deploy = Self::random(rng); + let secret_key = SecretKey::random(rng); + + Deploy::new_signed( + deploy.header.timestamp(), + deploy.header.ttl(), + gas_price, + deploy.header.dependencies().clone(), + deploy.header.chain_name().to_string(), + deploy.payment, + deploy.session, + &secret_key, + None, + ) + } + + /// Creates an add bid deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn add_bid( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + public_key: PublicKey, + bid_amount: U512, + delegation_rate: u8, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(100_000_000_000u64) }, + }; + let args = runtime_args! { + ARG_AUCTION_AMOUNT => bid_amount, + ARG_AUCTION_PUBLIC_KEY => public_key.clone(), + ARG_DELEGATION_RATE => delegation_rate, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash.into(), + entry_point: METHOD_ADD_BID.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(public_key)), + ) + } + + /// Creates a withdraw bid deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn withdraw_bid( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_AUCTION_AMOUNT => amount, + ARG_AUCTION_PUBLIC_KEY => public_key.clone(), + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash.into(), + entry_point: METHOD_WITHDRAW_BID.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(public_key)), + ) + } + + /// Creates a delegate deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn delegate( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_DELEGATOR => delegator_public_key.clone(), + ARG_VALIDATOR => validator_public_key, + ARG_AUCTION_AMOUNT => amount, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash.into(), + entry_point: METHOD_DELEGATE.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey( + delegator_public_key, + )), + ) + } + + /// Creates an undelegate deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn undelegate( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_DELEGATOR => delegator_public_key.clone(), + ARG_VALIDATOR => validator_public_key, + ARG_AUCTION_AMOUNT => amount, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash.into(), + entry_point: METHOD_UNDELEGATE.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey( + delegator_public_key, + )), + ) + } + + /// Creates an redelegate deploy, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + #[allow(clippy::too_many_arguments)] + pub fn redelegate( + chain_name: String, + auction_contract_hash: AddressableEntityHash, + validator_public_key: PublicKey, + delegator_public_key: PublicKey, + redelegate_validator_public_key: PublicKey, + amount: U512, + timestamp: Timestamp, + ttl: TimeDiff, + ) -> Self { + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + let args = runtime_args! { + ARG_DELEGATOR => delegator_public_key.clone(), + ARG_VALIDATOR => validator_public_key, + ARG_NEW_VALIDATOR => redelegate_validator_public_key, + ARG_AUCTION_AMOUNT => amount, + }; + let session = ExecutableDeployItem::StoredContractByHash { + hash: auction_contract_hash.into(), + entry_point: METHOD_REDELEGATE.to_string(), + args, + }; + + Deploy::build( + timestamp, + ttl, + 1, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey( + delegator_public_key, + )), + ) + } + + /// Creates a native transfer, for testing. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + #[allow(clippy::too_many_arguments)] + pub fn native_transfer( + chain_name: String, + source_purse: Option, + sender_public_key: PublicKey, + receiver_public_key: PublicKey, + amount: Option, + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + ) -> Self { + let amount = amount.unwrap_or_else(|| U512::from(DEFAULT_MIN_TRANSFER_MOTES)); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { ARG_AMOUNT => U512::from(3_000_000_000_u64) }, + }; + + let mut transfer_args = runtime_args! { + "amount" => amount, + "target" => receiver_public_key.to_account_hash(), + }; + + if let Some(source) = source_purse { + transfer_args + .insert("source", source) + .expect("should serialize source arg"); + } + + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + + Deploy::build( + timestamp, + ttl, + gas_price, + vec![], + chain_name, + payment, + session, + InitiatorAddrAndSecretKey::InitiatorAddr(InitiatorAddr::PublicKey(sender_public_key)), + ) + } +} + +#[cfg(any(feature = "std", test))] +impl GasLimited for Deploy { + type Error = InvalidDeploy; + + fn gas_cost(&self, chainspec: &Chainspec, gas_price: u8) -> Result { + let gas_limit = self.gas_limit(chainspec)?; + let motes = + Motes::from_gas(gas_limit, gas_price).ok_or(InvalidDeploy::UnableToCalculateGasCost)?; + Ok(motes) + } + + fn gas_limit(&self, chainspec: &Chainspec) -> Result { + let pricing_handling = chainspec.core_config.pricing_handling; + let costs = &chainspec.system_costs_config; + let gas_limit = match pricing_handling { + PricingHandling::PaymentLimited => { + // in the original implementation, for standard deploys the payment amount + // specified by the sender is the gas limit (up to the max block limit). + if self.is_transfer() { + Gas::new(costs.mint_costs().transfer) + } else { + let value = self + .payment() + .args() + .get(ARG_AMOUNT) + .ok_or(InvalidDeploy::MissingPaymentAmount)?; + let payment_amount = value + .clone() + .into_t::() + .map_err(|_| InvalidDeploy::FailedToParsePaymentAmount)?; + Gas::new(payment_amount) + } + } + PricingHandling::Fixed => { + let v1_config = &chainspec.transaction_config.transaction_v1_config; + let lane_id = calculate_lane_id_for_deploy(self, pricing_handling, v1_config)?; + let lane_definition = v1_config + .get_lane_by_id(lane_id) + .ok_or(InvalidDeploy::NoLaneMatch)?; + let computation_limit = lane_definition.max_transaction_gas_limit; + Gas::new(computation_limit) + } // legacy deploys do not support prepaid + }; + Ok(gas_limit) + } + + fn gas_price_tolerance(&self) -> Result { + u8::try_from(self.gas_price()).map_err(|_| Self::Error::UnableToCalculateGasLimit) + } +} + +impl hash::Hash for Deploy { + fn hash(&self, state: &mut H) { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let Deploy { + hash, + header, + payment, + session, + approvals, + } = self; + hash.hash(state); + header.hash(state); + payment.hash(state); + session.hash(state); + approvals.hash(state); + } +} + +impl PartialEq for Deploy { + fn eq(&self, other: &Deploy) -> bool { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let Deploy { + hash, + header, + payment, + session, + approvals, + } = self; + *hash == other.hash + && *header == other.header + && *payment == other.payment + && *session == other.session + && *approvals == other.approvals + } +} + +impl Ord for Deploy { + fn cmp(&self, other: &Deploy) -> cmp::Ordering { + // Destructure to make sure we don't accidentally omit fields. + #[cfg(any(feature = "once_cell", test))] + let Deploy { + hash, + header, + payment, + session, + approvals, + is_valid: _, + } = self; + #[cfg(not(any(feature = "once_cell", test)))] + let Deploy { + hash, + header, + payment, + session, + approvals, + } = self; + hash.cmp(&other.hash) + .then_with(|| header.cmp(&other.header)) + .then_with(|| payment.cmp(&other.payment)) + .then_with(|| session.cmp(&other.session)) + .then_with(|| approvals.cmp(&other.approvals)) + } +} + +impl PartialOrd for Deploy { + fn partial_cmp(&self, other: &Deploy) -> Option { + Some(self.cmp(other)) + } +} + +impl ToBytes for Deploy { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.header.serialized_length() + + self.hash.serialized_length() + + self.payment.serialized_length() + + self.session.serialized_length() + + self.approvals.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.header.write_bytes(writer)?; + self.hash.write_bytes(writer)?; + self.payment.write_bytes(writer)?; + self.session.write_bytes(writer)?; + self.approvals.write_bytes(writer) + } +} + +impl FromBytes for Deploy { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (header, remainder) = DeployHeader::from_bytes(bytes)?; + let (hash, remainder) = DeployHash::from_bytes(remainder)?; + let (payment, remainder) = ExecutableDeployItem::from_bytes(remainder)?; + let (session, remainder) = ExecutableDeployItem::from_bytes(remainder)?; + let (approvals, remainder) = BTreeSet::::from_bytes(remainder)?; + let maybe_valid_deploy = Deploy { + header, + hash, + payment, + session, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_valid: OnceCell::new(), + }; + Ok((maybe_valid_deploy, remainder)) + } +} + +impl Display for Deploy { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy[{}, {}, payment_code: {}, session_code: {}, approvals: {}]", + self.hash, + self.header, + self.payment, + self.session, + DisplayIter::new(self.approvals.iter()) + ) + } +} + +fn serialize_header(header: &DeployHeader) -> Vec { + header + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize deploy header: {}", error)) +} + +fn serialize_body(payment: &ExecutableDeployItem, session: &ExecutableDeployItem) -> Vec { + let mut buffer = Vec::with_capacity(payment.serialized_length() + session.serialized_length()); + payment + .write_bytes(&mut buffer) + .unwrap_or_else(|error| panic!("should serialize payment code: {}", error)); + session + .write_bytes(&mut buffer) + .unwrap_or_else(|error| panic!("should serialize session code: {}", error)); + buffer +} + +/// Computationally expensive validity check for a given deploy instance, including asymmetric_key +/// signing verification. +fn validate_deploy(deploy: &Deploy) -> Result<(), InvalidDeploy> { + if deploy.approvals.is_empty() { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?deploy, "deploy has no approvals"); + return Err(InvalidDeploy::EmptyApprovals); + } + + deploy.has_valid_hash()?; + + for (index, approval) in deploy.approvals.iter().enumerate() { + if let Err(error) = crypto::verify(deploy.hash, approval.signature(), approval.signer()) { + #[cfg(any(all(feature = "std", feature = "testing"), test))] + warn!(?deploy, "failed to verify approval {}: {}", index, error); + return Err(InvalidDeploy::InvalidApproval { index, error }); + } + } + + Ok(()) +} + +#[cfg(any(feature = "std", test))] +/// Calculate lane id for deploy +pub fn calculate_lane_id_for_deploy( + deploy: &Deploy, + pricing_handling: PricingHandling, + config: &TransactionV1Config, +) -> Result { + if deploy.is_transfer() { + return Ok(MINT_LANE_ID); + } + let size_estimation = deploy.serialized_length() as u64; + let runtime_args_size = (deploy.payment().args().serialized_length() + + deploy.session().args().serialized_length()) as u64; + + let gas_price_tolerance = deploy.gas_price_tolerance()?; + let pricing_mode = match pricing_handling { + PricingHandling::PaymentLimited => { + let is_standard_payment = deploy.payment().is_standard_payment(Phase::Payment); + let value = deploy + .payment() + .args() + .get(ARG_AMOUNT) + .ok_or(InvalidDeploy::MissingPaymentAmount)?; + let payment_amount = value + .clone() + .into_t::() + .map_err(|_| InvalidDeploy::FailedToParsePaymentAmount)? + .as_u64(); + PricingMode::PaymentLimited { + payment_amount, + gas_price_tolerance, + standard_payment: is_standard_payment, + } + } + PricingHandling::Fixed => PricingMode::Fixed { + gas_price_tolerance, + // additional_computation_factor is not representable for Deploys, we default to 0 + additional_computation_factor: 0, + }, + }; + + get_lane_for_non_install_wasm(config, &pricing_mode, size_estimation, runtime_args_size) + .map_err(Into::into) +} + +#[cfg(test)] +mod tests { + use std::{iter, time::Duration}; + + use super::*; + use crate::{CLValue, TransactionConfig}; + + #[test] + fn json_roundtrip() { + let mut rng = TestRng::new(); + let deploy = Deploy::random(&mut rng); + let json_string = serde_json::to_string_pretty(&deploy).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(deploy, decoded); + } + + #[test] + fn bincode_roundtrip() { + let mut rng = TestRng::new(); + let deploy = Deploy::random(&mut rng); + let serialized = bincode::serialize(&deploy).unwrap(); + let deserialized = bincode::deserialize(&serialized).unwrap(); + assert_eq!(deploy, deserialized); + } + + #[test] + fn bytesrepr_roundtrip() { + let mut rng = TestRng::new(); + let deploy = Deploy::random(&mut rng); + bytesrepr::test_serialization_roundtrip(deploy.header()); + bytesrepr::test_serialization_roundtrip(&deploy); + } + + fn create_deploy( + rng: &mut TestRng, + ttl: TimeDiff, + dependency_count: usize, + chain_name: &str, + gas_price: u64, + ) -> Deploy { + let secret_key = SecretKey::random(rng); + let dependencies = iter::repeat_with(|| DeployHash::random(rng)) + .take(dependency_count) + .collect(); + let transfer_args = { + let mut transfer_args = RuntimeArgs::new(); + let value = CLValue::from_t(U512::from(DEFAULT_MIN_TRANSFER_MOTES)) + .expect("should create CLValue"); + transfer_args.insert_cl_value("amount", value); + transfer_args + }; + Deploy::new_signed( + Timestamp::now(), + ttl, + gas_price, + dependencies, + chain_name.to_string(), + ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::new(), + }, + ExecutableDeployItem::Transfer { + args: transfer_args, + }, + &secret_key, + None, + ) + } + + #[test] + fn is_valid() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + + let deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); + assert_eq!( + deploy.is_valid.get(), + None, + "is valid should initially be None" + ); + deploy.is_valid().expect("should be valid"); + assert_eq!( + deploy.is_valid.get(), + Some(&Ok(())), + "is valid should be true" + ); + } + + fn check_is_not_valid(invalid_deploy: Deploy, expected_error: InvalidDeploy) { + assert!( + invalid_deploy.is_valid.get().is_none(), + "is valid should initially be None" + ); + let actual_error = invalid_deploy.is_valid().unwrap_err(); + + // Ignore the `error_msg` field of `InvalidApproval` when comparing to expected error, as + // this makes the test too fragile. Otherwise expect the actual error should exactly match + // the expected error. + match expected_error { + InvalidDeploy::InvalidApproval { + index: expected_index, + .. + } => match actual_error { + InvalidDeploy::InvalidApproval { + index: actual_index, + .. + } => { + assert_eq!(actual_index, expected_index); + } + _ => panic!("expected {}, got: {}", expected_error, actual_error), + }, + _ => { + assert_eq!(actual_error, expected_error,); + } + } + + // The actual error should have been lazily initialized correctly. + assert_eq!( + invalid_deploy.is_valid.get(), + Some(&Err(actual_error)), + "is valid should now be Some" + ); + } + + #[test] + fn not_valid_due_to_invalid_body_hash() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); + + deploy.session = ExecutableDeployItem::Transfer { + args: runtime_args! { + "amount" => 1 + }, + }; + check_is_not_valid(deploy, InvalidDeploy::InvalidBodyHash); + } + + #[test] + fn not_valid_due_to_invalid_deploy_hash() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); + + // deploy.header.gas_price = 2; + deploy.invalidate(); + check_is_not_valid(deploy, InvalidDeploy::InvalidDeployHash); + } + + #[test] + fn not_valid_due_to_empty_approvals() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); + deploy.approvals = BTreeSet::new(); + assert!(deploy.approvals.is_empty()); + check_is_not_valid(deploy, InvalidDeploy::EmptyApprovals) + } + + #[test] + fn not_valid_due_to_invalid_approval() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let mut deploy = create_deploy( + &mut rng, + TransactionConfig::default().max_ttl, + 0, + "net-1", + GAS_PRICE_TOLERANCE as u64, + ); + + let deploy2 = Deploy::random(&mut rng); + + deploy.approvals.extend(deploy2.approvals.clone()); + // the expected index for the invalid approval will be the first index at which there is an + // approval coming from deploy2 + let expected_index = deploy + .approvals + .iter() + .enumerate() + .find(|(_, approval)| deploy2.approvals.contains(approval)) + .map(|(index, _)| index) + .unwrap(); + check_is_not_valid( + deploy, + InvalidDeploy::InvalidApproval { + index: expected_index, + error: crypto::Error::SignatureError, // This field is ignored in the check. + }, + ); + } + + #[test] + fn is_acceptable() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1".to_string(); + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + &chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + let current_timestamp = deploy.header().timestamp(); + deploy + .is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp) + .expect("should be acceptable"); + } + + #[test] + fn not_acceptable_due_to_invalid_chain_name() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let expected_chain_name = "net-1"; + let wrong_chain_name = "net-2".to_string(); + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(expected_chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + &wrong_chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let expected_error = InvalidDeploy::InvalidChainName { + expected: expected_chain_name.to_string(), + got: wrong_chain_name, + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_dependencies() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 1, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let expected_error = InvalidDeploy::DependenciesNoLongerSupported; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_excessive_ttl() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + + let ttl = config.max_ttl + TimeDiff::from(Duration::from_secs(1)); + + let deploy = create_deploy(&mut rng, ttl, 0, chain_name, GAS_PRICE_TOLERANCE as u64); + + let expected_error = InvalidDeploy::ExcessiveTimeToLive { + max_ttl: config.max_ttl, + got: ttl, + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_timestamp_in_future() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + let leeway = TimeDiff::from_seconds(2); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + let current_timestamp = deploy.header.timestamp() - leeway - TimeDiff::from_seconds(1); + + let expected_error = InvalidDeploy::TimestampInFuture { + validation_timestamp: current_timestamp, + timestamp_leeway: leeway, + got: deploy.header.timestamp(), + }; + + assert_eq!( + deploy.is_config_compliant(&chainspec, leeway, current_timestamp), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn acceptable_if_timestamp_slightly_in_future() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + let leeway = TimeDiff::from_seconds(2); + + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + let current_timestamp = deploy.header.timestamp() - (leeway / 2); + deploy + .is_config_compliant(&chainspec, leeway, current_timestamp) + .expect("should be acceptable"); + } + + #[test] + fn not_acceptable_due_to_missing_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + chainspec.with_pricing_handling(PricingHandling::PaymentLimited); + let config = chainspec.transaction_config.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: RuntimeArgs::default(), + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + deploy.payment = payment; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(InvalidDeploy::MissingPaymentAmount) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_mangled_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + chainspec.with_pricing_handling(PricingHandling::PaymentLimited); + let config = chainspec.transaction_config.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => "mangled-amount" + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + deploy.payment = payment; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(InvalidDeploy::FailedToParsePaymentAmount) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_if_doesnt_fit_in_any_lane() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + chainspec.with_pricing_handling(PricingHandling::PaymentLimited); + let config = chainspec.transaction_config.clone(); + let max_lane = chainspec + .transaction_config + .transaction_v1_config + .get_max_wasm_lane_by_gas_limit() + .unwrap(); + let amount = U512::from(max_lane.max_transaction_gas_limit + 1); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => amount + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + deploy.payment = payment; + deploy.session = session; + + let expected_error = InvalidDeploy::NoLaneMatch; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn not_acceptable_due_to_transaction_bigger_than_block_limit() { + //TODO we should consider validating on startup if the + // chainspec doesn't defined wasm lanes that are bigger than + // the block limit + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_block_gas_limit(100); // The default wasm lane is much bigger than + chainspec.with_chain_name(chain_name.to_string()); + chainspec.with_pricing_handling(PricingHandling::PaymentLimited); + let config = chainspec.transaction_config.clone(); + let max_lane = chainspec + .transaction_config + .transaction_v1_config + .get_max_wasm_lane_by_gas_limit() + .unwrap(); + let amount = U512::from(max_lane.max_transaction_gas_limit); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => amount + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + deploy.payment = payment; + deploy.session = session; + + let expected_error = InvalidDeploy::ExceededBlockGasLimit { + block_gas_limit: config.block_gas_limit, + got: Box::new(amount), + }; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp), + Err(expected_error) + ); + assert!( + deploy.is_valid.get().is_none(), + "deploy should not have run expensive `is_valid` call" + ); + } + + #[test] + fn transfer_acceptable_regardless_of_excessive_payment_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let secret_key = SecretKey::random(&mut rng); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + let amount = U512::from(config.block_gas_limit + 1); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => amount + }, + }; + + let transfer_args = { + let mut transfer_args = RuntimeArgs::new(); + let value = CLValue::from_t(U512::from(DEFAULT_MIN_TRANSFER_MOTES)) + .expect("should create CLValue"); + transfer_args.insert_cl_value("amount", value); + transfer_args + }; + + let deploy = Deploy::new_signed( + Timestamp::now(), + config.max_ttl, + GAS_PRICE_TOLERANCE as u64, + vec![], + chain_name.to_string(), + payment, + ExecutableDeployItem::Transfer { + args: transfer_args, + }, + &secret_key, + None, + ); + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Ok(()), + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp) + ) + } + + #[test] + fn not_acceptable_due_to_excessive_approvals() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + let config = chainspec.transaction_config.clone(); + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + // This test is to ensure a given limit is being checked. + // Therefore, set the limit to one less than the approvals in the deploy. + let max_associated_keys = (deploy.approvals.len() - 1) as u32; + chainspec.with_max_associated_keys(max_associated_keys); + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(InvalidDeploy::ExcessiveApprovals { + got: deploy.approvals.len() as u32, + max_associated_keys, + }), + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp) + ) + } + + #[test] + fn not_acceptable_due_to_missing_transfer_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + + let config = chainspec.transaction_config.clone(); + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let transfer_args = RuntimeArgs::default(); + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(InvalidDeploy::MissingTransferAmount), + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp) + ) + } + + #[test] + fn not_acceptable_due_to_mangled_transfer_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + + let config = chainspec.transaction_config.clone(); + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let transfer_args = runtime_args! { + "amount" => "mangled-amount", + "source" => PublicKey::random(&mut rng).to_account_hash(), + "target" => PublicKey::random(&mut rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(InvalidDeploy::FailedToParseTransferAmount), + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp) + ) + } + + #[test] + fn not_acceptable_due_to_too_low_gas_price_tolerance() { + const GAS_PRICE_TOLERANCE: u8 = 0; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + + let config = chainspec.transaction_config.clone(); + let deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let current_timestamp = deploy.header().timestamp(); + assert!(matches!( + deploy.is_config_compliant( + &chainspec, + TimeDiff::default(), + current_timestamp + ), + Err(InvalidDeploy::GasPriceToleranceTooLow { min_gas_price_tolerance, provided_gas_price_tolerance }) + if min_gas_price_tolerance == chainspec.vacancy_config.min_gas_price && provided_gas_price_tolerance == GAS_PRICE_TOLERANCE + )) + } + + #[test] + fn not_acceptable_due_to_insufficient_transfer_amount() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec.with_chain_name(chain_name.to_string()); + + let config = chainspec.transaction_config.clone(); + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + + let amount = config.native_transfer_minimum_motes - 1; + let insufficient_amount = U512::from(amount); + + let transfer_args = runtime_args! { + "amount" => insufficient_amount, + "source" => PublicKey::random(&mut rng).to_account_hash(), + "target" => PublicKey::random(&mut rng).to_account_hash(), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + deploy.session = session; + + let current_timestamp = deploy.header().timestamp(); + assert_eq!( + Err(InvalidDeploy::InsufficientTransferAmount { + minimum: Box::new(U512::from(config.native_transfer_minimum_motes)), + attempted: Box::new(insufficient_amount), + }), + deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp,) + ) + } + + #[test] + fn should_use_payment_amount_for_payment_limited_payment() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let payment_amount = 500u64; + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec + .with_chain_name(chain_name.to_string()) + .with_pricing_handling(PricingHandling::PaymentLimited); + + let config = chainspec.transaction_config.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(payment_amount) + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + deploy.payment = payment; + deploy.session = session; + + let mut gas_price = 1; + let cost = deploy + .gas_cost(&chainspec, gas_price) + .expect("should cost") + .value(); + assert_eq!( + cost, + U512::from(payment_amount), + "in payment limited pricing, the user selected amount should be the cost if gas price is 1" + ); + gas_price += 1; + let cost = deploy + .gas_cost(&chainspec, gas_price) + .expect("should cost") + .value(); + assert_eq!( + cost, + U512::from(payment_amount) * gas_price, + "in payment limited pricing, the cost should == user selected amount * gas_price" + ); + } + + #[test] + fn should_use_cost_table_for_fixed_payment() { + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + + let payment_amount = 500u64; + let mut rng = TestRng::new(); + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec + .with_chain_name(chain_name.to_string()) + .with_pricing_handling(PricingHandling::PaymentLimited); + + let config = chainspec.transaction_config.clone(); + + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + "amount" => U512::from(payment_amount) + }, + }; + + // Create an empty session object that is not transfer to ensure + // that the payment amount is checked. + let session = ExecutableDeployItem::StoredContractByName { + name: "".to_string(), + entry_point: "".to_string(), + args: Default::default(), + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + deploy.payment = payment; + deploy.session = session; + + let mut gas_price = 1; + let limit = deploy.gas_limit(&chainspec).expect("should limit").value(); + let cost = deploy + .gas_cost(&chainspec, gas_price) + .expect("should cost") + .value(); + assert_eq!( + cost, limit, + "in fixed pricing, the cost & limit should == if gas price is 1" + ); + gas_price += 1; + let cost = deploy + .gas_cost(&chainspec, gas_price) + .expect("should cost") + .value(); + assert_eq!( + cost, + limit * gas_price, + "in fixed pricing, the cost should == limit * gas_price" + ); + } + + #[test] + fn should_use_lane_specific_size_constraints() { + let mut rng = TestRng::new(); + // Deploy is a transfer; should select MINT_LANE_ID + // and apply size limitations appropriate to that + const GAS_PRICE_TOLERANCE: u8 = u8::MAX; + let chain_name = "net-1"; + let mut chainspec = Chainspec::default(); + chainspec + .with_chain_name(chain_name.to_string()) + .with_pricing_handling(PricingHandling::PaymentLimited); + + let config = chainspec.transaction_config.clone(); + + let transfer_args = runtime_args! { + "amount" => U512::from(DEFAULT_MIN_TRANSFER_MOTES), + "source" => PublicKey::random(&mut rng).to_account_hash(), + "target" => PublicKey::random(&mut rng).to_account_hash(), + "some_other" => vec![1; 1_000_000], //pumping a big runtime arg to make sure that we don't fit in the mint lane + }; + let payment_amount = 10_000_000_000u64; + let payment_args = runtime_args! { + "amount" => U512::from(payment_amount), + }; + let session = ExecutableDeployItem::Transfer { + args: transfer_args, + }; + let payment = ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: payment_args, + }; + + let mut deploy = create_deploy( + &mut rng, + config.max_ttl, + 0, + chain_name, + GAS_PRICE_TOLERANCE as u64, + ); + deploy.payment = payment; + deploy.session = session; + assert_eq!( + calculate_lane_id_for_deploy( + &deploy, + chainspec.core_config.pricing_handling, + &config.transaction_v1_config, + ), + Ok(MINT_LANE_ID) + ); + let current_timestamp = deploy.header().timestamp(); + let ret = deploy.is_config_compliant(&chainspec, TimeDiff::default(), current_timestamp); + assert!(ret.is_err()); + let err = ret.err().unwrap(); + assert!(matches!( + err, + InvalidDeploy::ExcessiveSize(DeployExcessiveSizeError { .. }) + )) + } +} diff --git a/types/src/transaction/deploy/deploy_category.rs b/types/src/transaction/deploy/deploy_category.rs new file mode 100644 index 0000000000..9071fc41bd --- /dev/null +++ b/types/src/transaction/deploy/deploy_category.rs @@ -0,0 +1,47 @@ +use core::fmt::{self, Formatter}; + +use crate::Deploy; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The category of a [`Transaction`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Session kind of legacy Deploy.") +)] +#[serde(deny_unknown_fields)] +#[repr(u8)] +pub enum DeployCategory { + /// Standard transaction (the default). + #[default] + Standard = 0, + /// Native transfer interaction. + Transfer = 1, +} + +impl fmt::Display for DeployCategory { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + DeployCategory::Standard => write!(f, "Standard"), + DeployCategory::Transfer => write!(f, "Transfer"), + } + } +} + +impl From for DeployCategory { + fn from(value: Deploy) -> Self { + if value.is_transfer() { + DeployCategory::Transfer + } else { + DeployCategory::Standard + } + } +} diff --git a/types/src/transaction/deploy/deploy_hash.rs b/types/src/transaction/deploy/deploy_hash.rs new file mode 100644 index 0000000000..11692945c3 --- /dev/null +++ b/types/src/transaction/deploy/deploy_hash.rs @@ -0,0 +1,121 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of a [`Deploy`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded deploy hash.") +)] +#[serde(deny_unknown_fields)] +pub struct DeployHash(Digest); + +impl DeployHash { + /// The number of bytes in a `DeployHash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `DeployHash`. + pub const fn new(hash: Digest) -> Self { + DeployHash(hash) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Hexadecimal representation of the hash. + pub fn to_hex_string(&self) -> String { + base16::encode_lower(self.inner()) + } + + /// Returns a new `DeployHash` directly initialized with the provided bytes; no hashing is done. + #[cfg(any(feature = "testing", test))] + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + DeployHash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `DeployHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + DeployHash(hash) + } +} + +impl From for DeployHash { + fn from(digest: Digest) -> Self { + DeployHash(digest) + } +} + +impl From for Digest { + fn from(deploy_hash: DeployHash) -> Self { + deploy_hash.0 + } +} + +impl Display for DeployHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "deploy-hash({})", self.0,) + } +} + +impl AsRef<[u8]> for DeployHash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for DeployHash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for DeployHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (DeployHash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = DeployHash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/transaction/deploy/deploy_header.rs b/types/src/transaction/deploy/deploy_header.rs new file mode 100644 index 0000000000..bff664b480 --- /dev/null +++ b/types/src/transaction/deploy/deploy_header.rs @@ -0,0 +1,244 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use tracing::debug; + +#[cfg(doc)] +use super::Deploy; +use super::DeployHash; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, DisplayIter, PublicKey, TimeDiff, Timestamp, +}; +#[cfg(any(feature = "std", test))] +use crate::{InvalidDeploy, TransactionConfig}; + +/// The header portion of a [`Deploy`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct DeployHeader { + account: PublicKey, + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + body_hash: Digest, + dependencies: Vec, + chain_name: String, +} + +impl DeployHeader { + #[cfg(any(feature = "std", feature = "json-schema", test))] + pub fn new( + account: PublicKey, + timestamp: Timestamp, + ttl: TimeDiff, + gas_price: u64, + body_hash: Digest, + dependencies: Vec, + chain_name: String, + ) -> Self { + DeployHeader { + account, + timestamp, + ttl, + gas_price, + body_hash, + dependencies, + chain_name, + } + } + + /// Returns the public key of the account providing the context in which to run the `Deploy`. + pub fn account(&self) -> &PublicKey { + &self.account + } + + /// Returns the creation timestamp of the `Deploy`. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the duration after the creation timestamp for which the `Deploy` will stay valid. + /// + /// After this duration has ended, the `Deploy` will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.ttl + } + + /// Returns `true` if the `Deploy` has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.expires() < current_instant + } + + /// Returns the sender's gas price tolerance for block inclusion. + pub fn gas_price(&self) -> u64 { + // in the original implementation, we did not have dynamic gas pricing + // but the sender of the deploy could specify a higher gas price, + // and the payment amount would be multiplied by that number + // for settlement purposes. This did not increase their computation limit, + // only how much they were charged. The intent was, the total cost + // would be a consideration for block proposal but in the end we shipped + // with an egalitarian subjective fifo proposer. Thus, there was no + // functional reason / no benefit to a sender setting gas price to + // anything higher than 1. + // + // As of 2.0 we have dynamic gas prices, this vestigial field has been + // repurposed, interpreted to indicate a gas price tolerance. + // If this deploy is buffered and the current gas price is higher than this + // value, it will not be included in a proposed block. + // + // This allowing the sender to opt out of block inclusion if the gas price is + // higher than they want to pay for. + self.gas_price + } + + /// Returns the hash of the body (i.e. the Wasm code) of the `Deploy`. + pub fn body_hash(&self) -> &Digest { + &self.body_hash + } + + /// Returns the list of other `Deploy`s that have to be executed before this one. + pub fn dependencies(&self) -> &Vec { + &self.dependencies + } + + /// Returns the name of the chain the `Deploy` should be executed on. + pub fn chain_name(&self) -> &str { + &self.chain_name + } + + /// Returns `Ok` if and only if the dependencies count and TTL are within limits, and the + /// timestamp is not later than `at + timestamp_leeway`. Does NOT check for expiry. + #[cfg(any(feature = "std", test))] + pub fn is_valid( + &self, + config: &TransactionConfig, + timestamp_leeway: TimeDiff, + at: Timestamp, + deploy_hash: &DeployHash, + ) -> Result<(), InvalidDeploy> { + // as of 2.0.0 deploy dependencies are not supported. + // a legacy deploy citing dependencies should be rejected + if !self.dependencies.is_empty() { + debug!( + %deploy_hash, + "deploy dependencies no longer supported" + ); + return Err(InvalidDeploy::DependenciesNoLongerSupported); + } + + if self.ttl() > config.max_ttl { + debug!( + %deploy_hash, + deploy_header = %self, + max_ttl = %config.max_ttl, + "deploy ttl excessive" + ); + return Err(InvalidDeploy::ExcessiveTimeToLive { + max_ttl: config.max_ttl, + got: self.ttl(), + }); + } + + if self.timestamp() > at + timestamp_leeway { + debug!(%deploy_hash, deploy_header = %self, %at, "deploy timestamp in the future"); + return Err(InvalidDeploy::TimestampInFuture { + validation_timestamp: at, + timestamp_leeway, + got: self.timestamp(), + }); + } + + Ok(()) + } + + /// Returns the timestamp of when the `Deploy` expires, i.e. `self.timestamp + self.ttl`. + pub fn expires(&self) -> Timestamp { + self.timestamp.saturating_add(self.ttl) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub(super) fn invalidate(&mut self) { + self.chain_name.clear(); + } +} + +impl ToBytes for DeployHeader { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.account.write_bytes(writer)?; + self.timestamp.write_bytes(writer)?; + self.ttl.write_bytes(writer)?; + self.gas_price.write_bytes(writer)?; + self.body_hash.write_bytes(writer)?; + self.dependencies.write_bytes(writer)?; + self.chain_name.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.account.serialized_length() + + self.timestamp.serialized_length() + + self.ttl.serialized_length() + + self.gas_price.serialized_length() + + self.body_hash.serialized_length() + + self.dependencies.serialized_length() + + self.chain_name.serialized_length() + } +} + +impl FromBytes for DeployHeader { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (account, remainder) = PublicKey::from_bytes(bytes)?; + let (timestamp, remainder) = Timestamp::from_bytes(remainder)?; + let (ttl, remainder) = TimeDiff::from_bytes(remainder)?; + let (gas_price, remainder) = u64::from_bytes(remainder)?; + let (body_hash, remainder) = Digest::from_bytes(remainder)?; + let (dependencies, remainder) = Vec::::from_bytes(remainder)?; + let (chain_name, remainder) = String::from_bytes(remainder)?; + let deploy_header = DeployHeader { + account, + timestamp, + ttl, + gas_price, + body_hash, + dependencies, + chain_name, + }; + Ok((deploy_header, remainder)) + } +} + +impl Display for DeployHeader { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy-header[account: {}, timestamp: {}, ttl: {}, gas_price: {}, body_hash: {}, \ + dependencies: [{}], chain_name: {}]", + self.account, + self.timestamp, + self.ttl, + self.gas_price, + self.body_hash, + DisplayIter::new(self.dependencies.iter()), + self.chain_name, + ) + } +} diff --git a/types/src/transaction/deploy/deploy_id.rs b/types/src/transaction/deploy/deploy_id.rs new file mode 100644 index 0000000000..9159f9d9cf --- /dev/null +++ b/types/src/transaction/deploy/deploy_id.rs @@ -0,0 +1,113 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +use super::DeployHash; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + transaction::{ApprovalsHash, TransactionHash, TransactionId}, +}; + +/// The unique identifier of a [`Deploy`], comprising its [`DeployHash`] and +/// [`ApprovalsHash`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct DeployId { + deploy_hash: DeployHash, + approvals_hash: ApprovalsHash, +} + +impl DeployId { + /// Returns a new `DeployId`. + pub fn new(deploy_hash: DeployHash, approvals_hash: ApprovalsHash) -> Self { + DeployId { + deploy_hash, + approvals_hash, + } + } + + /// Returns the deploy hash. + pub fn deploy_hash(&self) -> &DeployHash { + &self.deploy_hash + } + + /// Returns the approvals hash. + pub fn approvals_hash(&self) -> &ApprovalsHash { + &self.approvals_hash + } + + /// Consumes `self`, returning a tuple of the constituent parts. + pub fn destructure(self) -> (DeployHash, ApprovalsHash) { + (self.deploy_hash, self.approvals_hash) + } + + /// Returns a random `DeployId`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + DeployId::new(DeployHash::random(rng), ApprovalsHash::random(rng)) + } +} + +impl Display for DeployId { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy-id({}, {})", + self.deploy_hash, self.approvals_hash + ) + } +} + +impl ToBytes for DeployId { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.approvals_hash.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + self.approvals_hash.serialized_length() + } +} + +impl FromBytes for DeployId { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, remainder) = DeployHash::from_bytes(bytes)?; + let (approvals_hash, remainder) = ApprovalsHash::from_bytes(remainder)?; + let id = DeployId::new(deploy_hash, approvals_hash); + Ok((id, remainder)) + } +} + +impl From for TransactionId { + fn from(id: DeployId) -> Self { + TransactionId::new(TransactionHash::Deploy(id.deploy_hash), id.approvals_hash) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let id = DeployId::random(rng); + bytesrepr::test_serialization_roundtrip(&id); + } +} diff --git a/types/src/transaction/deploy/error.rs b/types/src/transaction/deploy/error.rs new file mode 100644 index 0000000000..14ca089463 --- /dev/null +++ b/types/src/transaction/deploy/error.rs @@ -0,0 +1,471 @@ +use alloc::{boxed::Box, string::String}; +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; +#[cfg(any(feature = "testing", test))] +use strum::EnumIter; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::Serialize; + +use crate::{crypto, TimeDiff, Timestamp, U512}; + +/// A representation of the way in which a deploy failed validation checks. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +// This derive should not be removed due to a completeness +// test that we have in binary-port. It checks if all variants +// of this error have corresponding binary port error codes +#[cfg_attr(any(feature = "testing", test), derive(EnumIter))] +pub enum InvalidDeploy { + /// Invalid chain name. + InvalidChainName { + /// The expected chain name. + expected: String, + /// The received chain name. + got: String, + }, + + /// Deploy dependencies are no longer supported. + DependenciesNoLongerSupported, + + /// Deploy is too large. + ExcessiveSize(ExcessiveSizeError), + + /// Excessive time-to-live. + ExcessiveTimeToLive { + /// The time-to-live limit. + max_ttl: TimeDiff, + /// The received time-to-live. + got: TimeDiff, + }, + + /// Deploy's timestamp is in the future. + TimestampInFuture { + /// The node's timestamp when validating the deploy. + validation_timestamp: Timestamp, + /// Any configured leeway added to `validation_timestamp`. + timestamp_leeway: TimeDiff, + /// The deploy's timestamp. + got: Timestamp, + }, + + /// The provided body hash does not match the actual hash of the body. + InvalidBodyHash, + + /// The provided deploy hash does not match the actual hash of the deploy. + InvalidDeployHash, + + /// The deploy has no approvals. + EmptyApprovals, + + /// Invalid approval. + InvalidApproval { + /// The index of the approval at fault. + index: usize, + /// The approval verification error. + error: crypto::Error, + }, + + /// Excessive length of deploy's session args. + ExcessiveSessionArgsLength { + /// The byte size limit of session arguments. + max_length: usize, + /// The received length of session arguments. + got: usize, + }, + + /// Excessive length of deploy's payment args. + ExcessivePaymentArgsLength { + /// The byte size limit of payment arguments. + max_length: usize, + /// The received length of payment arguments. + got: usize, + }, + + /// Missing payment "amount" runtime argument. + MissingPaymentAmount, + + /// Failed to parse payment "amount" runtime argument. + FailedToParsePaymentAmount, + + /// The payment amount associated with the deploy exceeds the block gas limit. + ExceededBlockGasLimit { + /// Configured block gas limit. + block_gas_limit: u64, + /// The payment amount received. + got: Box, + }, + + /// Missing payment "amount" runtime argument + MissingTransferAmount, + + /// Failed to parse transfer "amount" runtime argument. + FailedToParseTransferAmount, + + /// Insufficient transfer amount. + InsufficientTransferAmount { + /// The minimum transfer amount. + minimum: Box, + /// The attempted transfer amount. + attempted: Box, + }, + + /// The amount of approvals on the deploy exceeds the max_associated_keys limit. + ExcessiveApprovals { + /// Number of approvals on the deploy. + got: u32, + /// The chainspec limit for max_associated_keys. + max_associated_keys: u32, + }, + + /// Unable to calculate gas limit. + UnableToCalculateGasLimit, + + /// Unable to calculate gas cost. + UnableToCalculateGasCost, + + /// Gas limit is not supported in legacy deploys. + GasLimitNotSupported, + + /// Gas price tolerance too low. + GasPriceToleranceTooLow { + /// The minimum gas price tolerance. + min_gas_price_tolerance: u8, + /// The provided gas price tolerance. + provided_gas_price_tolerance: u8, + }, + + /// Invalid runtime. + InvalidRuntime, + + /// Could not match deploy with transaction lane + NoLaneMatch, + + /// The payment amount associated with the deploy exceeds the lane gas limit. + ExceededLaneGasLimit { + /// Configured lane gas limit. + lane_gas_limit: u64, + /// The payment amount received. + got: Box, + }, + + /// Invalid payment amount. + InvalidPaymentAmount, + + /// Pricing mode not supported + PricingModeNotSupported, +} + +impl Display for InvalidDeploy { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + InvalidDeploy::InvalidChainName { expected, got } => { + write!( + formatter, + "invalid chain name: expected {}, got {}", + expected, got + ) + } + InvalidDeploy::DependenciesNoLongerSupported => { + write!(formatter, "dependencies no longer supported",) + } + InvalidDeploy::ExcessiveSize(error) => { + write!(formatter, "deploy size too large: {}", error) + } + InvalidDeploy::ExcessiveTimeToLive { max_ttl, got } => { + write!( + formatter, + "time-to-live of {} exceeds limit of {}", + got, max_ttl + ) + } + InvalidDeploy::TimestampInFuture { + validation_timestamp, + timestamp_leeway, + got, + } => { + write!( + formatter, + "timestamp of {} is later than node's timestamp of {} plus leeway of {}", + got, validation_timestamp, timestamp_leeway + ) + } + InvalidDeploy::InvalidBodyHash => { + write!( + formatter, + "the provided body hash does not match the actual hash of the body" + ) + } + InvalidDeploy::InvalidDeployHash => { + write!( + formatter, + "the provided hash does not match the actual hash of the deploy" + ) + } + InvalidDeploy::EmptyApprovals => { + write!(formatter, "the deploy has no approvals") + } + InvalidDeploy::InvalidApproval { index, error } => { + write!( + formatter, + "the approval at index {} is invalid: {}", + index, error + ) + } + InvalidDeploy::ExcessiveSessionArgsLength { max_length, got } => { + write!( + formatter, + "serialized session code runtime args of {} exceeds limit of {}", + got, max_length + ) + } + InvalidDeploy::ExcessivePaymentArgsLength { max_length, got } => { + write!( + formatter, + "serialized payment code runtime args of {} exceeds limit of {}", + got, max_length + ) + } + InvalidDeploy::MissingPaymentAmount => { + write!(formatter, "missing payment 'amount' runtime argument") + } + InvalidDeploy::FailedToParsePaymentAmount => { + write!(formatter, "failed to parse payment 'amount' as U512") + } + InvalidDeploy::ExceededBlockGasLimit { + block_gas_limit, + got, + } => { + write!( + formatter, + "payment amount of {} exceeds the block gas limit of {}", + got, block_gas_limit + ) + } + InvalidDeploy::MissingTransferAmount => { + write!(formatter, "missing transfer 'amount' runtime argument") + } + InvalidDeploy::FailedToParseTransferAmount => { + write!(formatter, "failed to parse transfer 'amount' as U512") + } + InvalidDeploy::InsufficientTransferAmount { minimum, attempted } => { + write!( + formatter, + "insufficient transfer amount; minimum: {} attempted: {}", + minimum, attempted + ) + } + InvalidDeploy::ExcessiveApprovals { + got, + max_associated_keys, + } => { + write!( + formatter, + "number of approvals {} exceeds the maximum number of associated keys {}", + got, max_associated_keys + ) + } + InvalidDeploy::UnableToCalculateGasLimit => { + write!(formatter, "unable to calculate gas limit",) + } + InvalidDeploy::UnableToCalculateGasCost => { + write!(formatter, "unable to calculate gas cost",) + } + InvalidDeploy::GasLimitNotSupported => { + write!(formatter, "gas limit is not supported in legacy deploys",) + } + InvalidDeploy::GasPriceToleranceTooLow { min_gas_price_tolerance, provided_gas_price_tolerance } => write!( + formatter, + "received a deploy with gas price tolerance {} but this chain will only go as low as {}", + provided_gas_price_tolerance, min_gas_price_tolerance + ), + InvalidDeploy::InvalidRuntime => { + write!(formatter, "invalid runtime",) + } + InvalidDeploy::NoLaneMatch => write!(formatter, "chainspec didnt have any wasm lanes defined which is required for wasm based deploys",), + InvalidDeploy::ExceededLaneGasLimit { + lane_gas_limit: wasm_lane_gas_limit, + got, + } => { + write!( + formatter, + "payment amount of {} exceeds the largest wasm lane gas limit of {}", + got, wasm_lane_gas_limit + ) + } + InvalidDeploy::InvalidPaymentAmount => write!(formatter, "invalid payment amount",), + InvalidDeploy::PricingModeNotSupported => write!(formatter, "pricing mode not supported",), + } + } +} + +impl From for InvalidDeploy { + fn from(error: ExcessiveSizeError) -> Self { + InvalidDeploy::ExcessiveSize(error) + } +} + +#[cfg(feature = "std")] +impl StdError for InvalidDeploy { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + InvalidDeploy::InvalidApproval { error, .. } => Some(error), + InvalidDeploy::InvalidChainName { .. } + | InvalidDeploy::DependenciesNoLongerSupported { .. } + | InvalidDeploy::ExcessiveSize(_) + | InvalidDeploy::ExcessiveTimeToLive { .. } + | InvalidDeploy::TimestampInFuture { .. } + | InvalidDeploy::InvalidBodyHash + | InvalidDeploy::InvalidDeployHash + | InvalidDeploy::EmptyApprovals + | InvalidDeploy::ExcessiveSessionArgsLength { .. } + | InvalidDeploy::ExcessivePaymentArgsLength { .. } + | InvalidDeploy::MissingPaymentAmount + | InvalidDeploy::FailedToParsePaymentAmount + | InvalidDeploy::ExceededBlockGasLimit { .. } + | InvalidDeploy::MissingTransferAmount + | InvalidDeploy::FailedToParseTransferAmount + | InvalidDeploy::InsufficientTransferAmount { .. } + | InvalidDeploy::ExcessiveApprovals { .. } + | InvalidDeploy::UnableToCalculateGasLimit + | InvalidDeploy::GasLimitNotSupported + | InvalidDeploy::UnableToCalculateGasCost + | InvalidDeploy::GasPriceToleranceTooLow { .. } + | InvalidDeploy::InvalidRuntime + | InvalidDeploy::NoLaneMatch + | InvalidDeploy::ExceededLaneGasLimit { .. } + | InvalidDeploy::InvalidPaymentAmount + | InvalidDeploy::PricingModeNotSupported => None, + } + } +} + +/// Error returned when a Deploy is too large. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] +pub struct ExcessiveSizeError { + /// The maximum permitted serialized deploy size, in bytes. + pub max_transaction_size: u32, + /// The serialized size of the deploy provided, in bytes. + pub actual_deploy_size: usize, +} + +impl Display for ExcessiveSizeError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "deploy size of {} bytes exceeds limit of {}", + self.actual_deploy_size, self.max_transaction_size + ) + } +} + +#[cfg(feature = "std")] +impl StdError for ExcessiveSizeError {} +/// Errors other than validation failures relating to `Deploy`s. +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + /// Error while encoding to JSON. + EncodeToJson(serde_json::Error), + + /// Error while decoding from JSON. + DecodeFromJson(DecodeFromJsonError), + + /// Failed to get "amount" from `payment()`'s runtime args. + InvalidPayment, +} + +impl From for Error { + fn from(error: serde_json::Error) -> Self { + Error::EncodeToJson(error) + } +} + +impl From for Error { + fn from(error: DecodeFromJsonError) -> Self { + Error::DecodeFromJson(error) + } +} + +impl Display for Error { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + Error::EncodeToJson(error) => { + write!(formatter, "encoding to json: {}", error) + } + Error::DecodeFromJson(error) => { + write!(formatter, "decoding from json: {}", error) + } + Error::InvalidPayment => { + write!(formatter, "invalid payment: missing 'amount' arg") + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + Error::EncodeToJson(error) => Some(error), + Error::DecodeFromJson(error) => Some(error), + Error::InvalidPayment => None, + } + } +} + +/// Error while decoding a `Deploy` from JSON. +#[derive(Debug)] +#[non_exhaustive] +pub enum DecodeFromJsonError { + /// Failed to decode from base 16. + FromHex(base16::DecodeError), + + /// Failed to convert slice to array. + TryFromSlice(TryFromSliceError), +} + +impl From for DecodeFromJsonError { + fn from(error: base16::DecodeError) -> Self { + DecodeFromJsonError::FromHex(error) + } +} + +impl From for DecodeFromJsonError { + fn from(error: TryFromSliceError) -> Self { + DecodeFromJsonError::TryFromSlice(error) + } +} + +impl Display for DecodeFromJsonError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DecodeFromJsonError::FromHex(error) => { + write!(formatter, "{}", error) + } + DecodeFromJsonError::TryFromSlice(error) => { + write!(formatter, "{}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for DecodeFromJsonError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + DecodeFromJsonError::FromHex(error) => Some(error), + DecodeFromJsonError::TryFromSlice(error) => Some(error), + } + } +} diff --git a/types/src/transaction/deploy/executable_deploy_item.rs b/types/src/transaction/deploy/executable_deploy_item.rs new file mode 100644 index 0000000000..c684c35120 --- /dev/null +++ b/types/src/transaction/deploy/executable_deploy_item.rs @@ -0,0 +1,818 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::{ + distributions::{Alphanumeric, Distribution, Standard}, + Rng, +}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Deploy; +use crate::{ + addressable_entity::DEFAULT_ENTRY_POINT_NAME, + bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::{ContractHash, ContractPackageHash, ContractVersion}, + package::PackageHash, + runtime_args, serde_helpers, + system::mint::ARG_AMOUNT, + transaction::{RuntimeArgs, TransferTarget}, + AddressableEntityHash, AddressableEntityIdentifier, Gas, Motes, PackageIdentifier, Phase, URef, + METHOD_TRANSFER, U512, +}; +#[cfg(any(feature = "testing", test))] +use crate::{testing::TestRng, CLValue}; + +const TAG_LENGTH: usize = U8_SERIALIZED_LENGTH; +const MODULE_BYTES_TAG: u8 = 0; +const STORED_CONTRACT_BY_HASH_TAG: u8 = 1; +const STORED_CONTRACT_BY_NAME_TAG: u8 = 2; +const STORED_VERSIONED_CONTRACT_BY_HASH_TAG: u8 = 3; +const STORED_VERSIONED_CONTRACT_BY_NAME_TAG: u8 = 4; +const TRANSFER_TAG: u8 = 5; +const TRANSFER_ARG_AMOUNT: &str = "amount"; +const TRANSFER_ARG_SOURCE: &str = "source"; +const TRANSFER_ARG_TARGET: &str = "target"; +const TRANSFER_ARG_ID: &str = "id"; + +/// Identifier for an [`ExecutableDeployItem`]. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum ExecutableDeployItemIdentifier { + /// The deploy item is of the type [`ExecutableDeployItem::ModuleBytes`] + Module, + /// The deploy item is a variation of a stored contract. + AddressableEntity(AddressableEntityIdentifier), + /// The deploy item is a variation of a stored contract package. + Package(PackageIdentifier), + /// The deploy item is a native transfer. + Transfer, +} + +/// The executable component of a [`Deploy`]. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum ExecutableDeployItem { + /// Executable specified as raw bytes that represent Wasm code and an instance of + /// [`RuntimeArgs`]. + ModuleBytes { + /// Raw Wasm module bytes with 'call' exported as an entrypoint. + #[cfg_attr( + feature = "json-schema", + schemars(description = "Hex-encoded raw Wasm bytes.") + )] + module_bytes: Bytes, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored contract referenced by its [`AddressableEntityHash`], entry point and an instance of + /// [`RuntimeArgs`]. + StoredContractByHash { + /// Contract hash. + #[serde(with = "serde_helpers::contract_hash_as_digest")] + #[cfg_attr( + feature = "json-schema", + schemars( + // this attribute is necessary due to a bug: https://github.com/GREsau/schemars/issues/89 + with = "ContractHash", + description = "Hex-encoded contract hash." + ) + )] + hash: ContractHash, + /// Name of an entry point. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored contract referenced by a named key existing in the signer's account context, entry + /// point and an instance of [`RuntimeArgs`]. + StoredContractByName { + /// Named key. + name: String, + /// Name of an entry point. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored versioned contract referenced by its [`PackageHash`], entry point and an + /// instance of [`RuntimeArgs`]. + StoredVersionedContractByHash { + /// Contract package hash + #[serde(with = "serde_helpers::contract_package_hash_as_digest")] + #[cfg_attr( + feature = "json-schema", + schemars( + // this attribute is necessary due to a bug: https://github.com/GREsau/schemars/issues/89 + with = "ContractPackageHash", + description = "Hex-encoded contract package hash." + ) + )] + hash: ContractPackageHash, + /// An optional version of the contract to call. It will default to the highest enabled + /// version if no value is specified. + version: Option, + /// Entry point name. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// Stored versioned contract referenced by a named key existing in the signer's account + /// context, entry point and an instance of [`RuntimeArgs`]. + StoredVersionedContractByName { + /// Named key. + name: String, + /// An optional version of the contract to call. It will default to the highest enabled + /// version if no value is specified. + version: Option, + /// Entry point name. + entry_point: String, + /// Runtime arguments. + args: RuntimeArgs, + }, + /// A native transfer which does not contain or reference a Wasm code. + Transfer { + /// Runtime arguments. + args: RuntimeArgs, + }, +} + +impl ExecutableDeployItem { + /// Returns a new `ExecutableDeployItem::ModuleBytes`. + pub fn new_module_bytes(module_bytes: Bytes, args: RuntimeArgs) -> Self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } + } + + /// Returns a new `ExecutableDeployItem::ModuleBytes` suitable for use as standard payment code + /// of a `Deploy`. + pub fn new_standard_payment>(amount: A) -> Self { + ExecutableDeployItem::ModuleBytes { + module_bytes: Bytes::new(), + args: runtime_args! { + ARG_AMOUNT => amount.into(), + }, + } + } + + /// Returns a new `ExecutableDeployItem::StoredContractByHash`. + pub fn new_stored_contract_by_hash( + hash: ContractHash, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem::StoredContractByName`. + pub fn new_stored_contract_by_name( + name: String, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem::StoredVersionedContractByHash`. + pub fn new_stored_versioned_contract_by_hash( + hash: ContractPackageHash, + version: Option, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem::StoredVersionedKeyContractByName`. + pub fn new_stored_versioned_contract_by_name( + name: String, + version: Option, + entry_point: String, + args: RuntimeArgs, + ) -> Self { + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } + } + + /// Returns a new `ExecutableDeployItem` suitable for use as session code for a transfer. + /// + /// If `maybe_source` is None, the account's main purse is used as the source. + pub fn new_transfer, T: Into>( + amount: A, + maybe_source: Option, + target: T, + maybe_transfer_id: Option, + ) -> Self { + let mut args = RuntimeArgs::new(); + args.insert(TRANSFER_ARG_AMOUNT, amount.into()) + .expect("should serialize amount arg"); + + if let Some(source) = maybe_source { + args.insert(TRANSFER_ARG_SOURCE, source) + .expect("should serialize source arg"); + } + + match target.into() { + TransferTarget::PublicKey(public_key) => args + .insert(TRANSFER_ARG_TARGET, public_key) + .expect("should serialize public key target arg"), + TransferTarget::AccountHash(account_hash) => args + .insert(TRANSFER_ARG_TARGET, account_hash) + .expect("should serialize account hash target arg"), + TransferTarget::URef(uref) => args + .insert(TRANSFER_ARG_TARGET, uref) + .expect("should serialize uref target arg"), + } + + args.insert(TRANSFER_ARG_ID, maybe_transfer_id) + .expect("should serialize transfer id arg"); + + ExecutableDeployItem::Transfer { args } + } + + /// Returns the entry point name. + pub fn entry_point_name(&self) -> &str { + match self { + ExecutableDeployItem::ModuleBytes { .. } => DEFAULT_ENTRY_POINT_NAME, + ExecutableDeployItem::Transfer { .. } => METHOD_TRANSFER, + ExecutableDeployItem::StoredVersionedContractByName { entry_point, .. } + | ExecutableDeployItem::StoredVersionedContractByHash { entry_point, .. } + | ExecutableDeployItem::StoredContractByHash { entry_point, .. } + | ExecutableDeployItem::StoredContractByName { entry_point, .. } => entry_point, + } + } + + /// Returns the identifier of the `ExecutableDeployItem`. + pub fn identifier(&self) -> ExecutableDeployItemIdentifier { + match self { + ExecutableDeployItem::ModuleBytes { .. } => ExecutableDeployItemIdentifier::Module, + ExecutableDeployItem::StoredContractByHash { hash, .. } => { + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Hash(AddressableEntityHash::new(hash.value())), + ) + } + ExecutableDeployItem::StoredContractByName { name, .. } => { + ExecutableDeployItemIdentifier::AddressableEntity( + AddressableEntityIdentifier::Name(name.clone()), + ) + } + ExecutableDeployItem::StoredVersionedContractByHash { hash, version, .. } => { + ExecutableDeployItemIdentifier::Package(PackageIdentifier::Hash { + package_hash: PackageHash::new(hash.value()), + version: *version, + }) + } + ExecutableDeployItem::StoredVersionedContractByName { name, version, .. } => { + ExecutableDeployItemIdentifier::Package(PackageIdentifier::Name { + name: name.clone(), + version: *version, + }) + } + ExecutableDeployItem::Transfer { .. } => ExecutableDeployItemIdentifier::Transfer, + } + } + + /// Returns the identifier of the contract in the deploy item, if present. + pub fn contract_identifier(&self) -> Option { + match self { + ExecutableDeployItem::ModuleBytes { .. } + | ExecutableDeployItem::StoredVersionedContractByHash { .. } + | ExecutableDeployItem::StoredVersionedContractByName { .. } + | ExecutableDeployItem::Transfer { .. } => None, + ExecutableDeployItem::StoredContractByHash { hash, .. } => Some( + AddressableEntityIdentifier::Hash(AddressableEntityHash::new(hash.value())), + ), + ExecutableDeployItem::StoredContractByName { name, .. } => { + Some(AddressableEntityIdentifier::Name(name.clone())) + } + } + } + + /// Returns the identifier of the contract package in the deploy item, if present. + pub fn contract_package_identifier(&self) -> Option { + match self { + ExecutableDeployItem::ModuleBytes { .. } + | ExecutableDeployItem::StoredContractByHash { .. } + | ExecutableDeployItem::StoredContractByName { .. } + | ExecutableDeployItem::Transfer { .. } => None, + + ExecutableDeployItem::StoredVersionedContractByHash { hash, version, .. } => { + Some(PackageIdentifier::HashWithMajorVersion { + package_hash: PackageHash::new(hash.value()), + version: *version, + protocol_version_major: None, + }) + } + ExecutableDeployItem::StoredVersionedContractByName { name, version, .. } => { + Some(PackageIdentifier::NameWithMajorVersion { + name: name.clone(), + version: *version, + protocol_version_major: None, + }) + } + } + } + + /// Returns the runtime arguments. + pub fn args(&self) -> &RuntimeArgs { + match self { + ExecutableDeployItem::ModuleBytes { args, .. } + | ExecutableDeployItem::StoredContractByHash { args, .. } + | ExecutableDeployItem::StoredContractByName { args, .. } + | ExecutableDeployItem::StoredVersionedContractByHash { args, .. } + | ExecutableDeployItem::StoredVersionedContractByName { args, .. } + | ExecutableDeployItem::Transfer { args } => args, + } + } + + /// Returns the payment amount from args (if any) as Gas. + pub fn payment_amount(&self, conv_rate: u8) -> Option { + let cl_value = self.args().get(ARG_AMOUNT)?; + let motes = cl_value.clone().into_t::().ok()?; + Gas::from_motes(Motes::new(motes), conv_rate) + } + + /// Returns `true` if this deploy item is a native transfer. + pub fn is_transfer(&self) -> bool { + matches!(self, ExecutableDeployItem::Transfer { .. }) + } + + /// Returns `true` if this deploy item is a standard payment. + pub fn is_standard_payment(&self, phase: Phase) -> bool { + if phase != Phase::Payment { + return false; + } + + if let ExecutableDeployItem::ModuleBytes { module_bytes, .. } = self { + return module_bytes.is_empty(); + } + + false + } + + /// Returns `true` if the deploy item is a contract identified by its name. + pub fn is_by_name(&self) -> bool { + matches!( + self, + ExecutableDeployItem::StoredVersionedContractByName { .. } + ) || matches!(self, ExecutableDeployItem::StoredContractByName { .. }) + } + + /// Returns the name of the contract or contract package, if the deploy item is identified by + /// name. + pub fn by_name(&self) -> Option { + match self { + ExecutableDeployItem::StoredContractByName { name, .. } + | ExecutableDeployItem::StoredVersionedContractByName { name, .. } => { + Some(name.clone()) + } + ExecutableDeployItem::ModuleBytes { .. } + | ExecutableDeployItem::StoredContractByHash { .. } + | ExecutableDeployItem::StoredVersionedContractByHash { .. } + | ExecutableDeployItem::Transfer { .. } => None, + } + } + + /// Returns `true` if the deploy item is a stored contract. + pub fn is_stored_contract(&self) -> bool { + matches!(self, ExecutableDeployItem::StoredContractByHash { .. }) + || matches!(self, ExecutableDeployItem::StoredContractByName { .. }) + } + + /// Returns `true` if the deploy item is a stored contract package. + pub fn is_stored_contract_package(&self) -> bool { + matches!( + self, + ExecutableDeployItem::StoredVersionedContractByHash { .. } + ) || matches!( + self, + ExecutableDeployItem::StoredVersionedContractByName { .. } + ) + } + + /// Returns `true` if the deploy item is [`ModuleBytes`]. + /// + /// [`ModuleBytes`]: ExecutableDeployItem::ModuleBytes + pub fn is_module_bytes(&self) -> bool { + matches!(self, Self::ModuleBytes { .. }) + } + + /// Returns a random `ExecutableDeployItem`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + rng.gen() + } +} + +impl ToBytes for ExecutableDeployItem { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + writer.push(MODULE_BYTES_TAG); + module_bytes.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => { + writer.push(STORED_CONTRACT_BY_HASH_TAG); + hash.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => { + writer.push(STORED_CONTRACT_BY_NAME_TAG); + name.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => { + writer.push(STORED_VERSIONED_CONTRACT_BY_HASH_TAG); + hash.write_bytes(writer)?; + version.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => { + writer.push(STORED_VERSIONED_CONTRACT_BY_NAME_TAG); + name.write_bytes(writer)?; + version.write_bytes(writer)?; + entry_point.write_bytes(writer)?; + args.write_bytes(writer) + } + ExecutableDeployItem::Transfer { args } => { + writer.push(TRANSFER_TAG); + args.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + TAG_LENGTH + + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => { + module_bytes.serialized_length() + args.serialized_length() + } + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => { + hash.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => { + name.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => { + hash.serialized_length() + + version.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => { + name.serialized_length() + + version.serialized_length() + + entry_point.serialized_length() + + args.serialized_length() + } + ExecutableDeployItem::Transfer { args } => args.serialized_length(), + } + } +} + +impl FromBytes for ExecutableDeployItem { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + MODULE_BYTES_TAG => { + let (module_bytes, remainder) = Bytes::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::ModuleBytes { module_bytes, args }, + remainder, + )) + } + STORED_CONTRACT_BY_HASH_TAG => { + let (hash, remainder) = ContractHash::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + }, + remainder, + )) + } + STORED_CONTRACT_BY_NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + }, + remainder, + )) + } + STORED_VERSIONED_CONTRACT_BY_HASH_TAG => { + let (hash, remainder) = ContractPackageHash::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + }, + remainder, + )) + } + STORED_VERSIONED_CONTRACT_BY_NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let (entry_point, remainder) = String::from_bytes(remainder)?; + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok(( + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + }, + remainder, + )) + } + TRANSFER_TAG => { + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok((ExecutableDeployItem::Transfer { args }, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl Display for ExecutableDeployItem { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, .. } => { + write!(f, "module-bytes [{} bytes]", module_bytes.len()) + } + ExecutableDeployItem::StoredContractByHash { + hash, entry_point, .. + } => write!( + f, + "stored-contract-by-hash: {:10}, entry-point: {}", + HexFmt(hash), + entry_point, + ), + ExecutableDeployItem::StoredContractByName { + name, entry_point, .. + } => write!( + f, + "stored-contract-by-name: {}, entry-point: {}", + name, entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version: Some(ver), + entry_point, + .. + } => write!( + f, + "stored-versioned-contract-by-hash: {:10}, version: {}, entry-point: {}", + HexFmt(hash), + ver, + entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByHash { + hash, entry_point, .. + } => write!( + f, + "stored-versioned-contract-by-hash: {:10}, version: latest, entry-point: {}", + HexFmt(hash), + entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByName { + name, + version: Some(ver), + entry_point, + .. + } => write!( + f, + "stored-versioned-contract: {}, version: {}, entry-point: {}", + name, ver, entry_point, + ), + ExecutableDeployItem::StoredVersionedContractByName { + name, entry_point, .. + } => write!( + f, + "stored-versioned-contract: {}, version: latest, entry-point: {}", + name, entry_point, + ), + ExecutableDeployItem::Transfer { .. } => write!(f, "transfer"), + } + } +} + +impl Debug for ExecutableDeployItem { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + ExecutableDeployItem::ModuleBytes { module_bytes, args } => f + .debug_struct("ModuleBytes") + .field("module_bytes", &format!("[{} bytes]", module_bytes.len())) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredContractByHash { + hash, + entry_point, + args, + } => f + .debug_struct("StoredContractByHash") + .field("hash", &base16::encode_lower(hash)) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredContractByName { + name, + entry_point, + args, + } => f + .debug_struct("StoredContractByName") + .field("name", &name) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredVersionedContractByHash { + hash, + version, + entry_point, + args, + } => f + .debug_struct("StoredVersionedContractByHash") + .field("hash", &base16::encode_lower(hash)) + .field("version", version) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::StoredVersionedContractByName { + name, + version, + entry_point, + args, + } => f + .debug_struct("StoredVersionedContractByName") + .field("name", &name) + .field("version", version) + .field("entry_point", &entry_point) + .field("args", args) + .finish(), + ExecutableDeployItem::Transfer { args } => { + f.debug_struct("Transfer").field("args", args).finish() + } + } + } +} + +#[cfg(any(feature = "testing", test))] +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> ExecutableDeployItem { + fn random_bytes(rng: &mut R) -> Vec { + let mut bytes = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(bytes.as_mut()); + bytes + } + + fn random_string(rng: &mut R) -> String { + rng.sample_iter(&Alphanumeric) + .take(20) + .map(char::from) + .collect() + } + + let mut args = RuntimeArgs::new(); + let _ = args.insert(random_string(rng), Bytes::from(random_bytes(rng))); + + match rng.gen_range(0..5) { + 0 => ExecutableDeployItem::ModuleBytes { + module_bytes: random_bytes(rng).into(), + args, + }, + 1 => ExecutableDeployItem::StoredContractByHash { + hash: ContractHash::new(rng.gen()), + entry_point: random_string(rng), + args, + }, + 2 => ExecutableDeployItem::StoredContractByName { + name: random_string(rng), + entry_point: random_string(rng), + args, + }, + 3 => ExecutableDeployItem::StoredVersionedContractByHash { + hash: ContractPackageHash::new(rng.gen()), + version: rng.gen(), + entry_point: random_string(rng), + args, + }, + 4 => ExecutableDeployItem::StoredVersionedContractByName { + name: random_string(rng), + version: rng.gen(), + entry_point: random_string(rng), + args, + }, + 5 => { + let amount = rng.gen_range(2_500_000_000_u64..1_000_000_000_000_000); + let mut transfer_args = RuntimeArgs::new(); + transfer_args.insert_cl_value( + ARG_AMOUNT, + CLValue::from_t(U512::from(amount)).expect("should get CLValue from U512"), + ); + ExecutableDeployItem::Transfer { + args: transfer_args, + } + } + _ => unreachable!(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn serialization_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + let executable_deploy_item = ExecutableDeployItem::random(rng); + bytesrepr::test_serialization_roundtrip(&executable_deploy_item); + } + } +} diff --git a/types/src/transaction/error.rs b/types/src/transaction/error.rs new file mode 100644 index 0000000000..6676a2e8ef --- /dev/null +++ b/types/src/transaction/error.rs @@ -0,0 +1,54 @@ +use crate::InvalidDeploy; +use core::fmt::{Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; + +#[cfg(feature = "std")] +use serde::Serialize; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +pub use crate::transaction::transaction_v1::InvalidTransactionV1; + +/// A representation of the way in which a transaction failed validation checks. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +pub enum InvalidTransaction { + /// Deploys. + Deploy(InvalidDeploy), + /// V1 transactions. + V1(InvalidTransactionV1), +} + +impl From for InvalidTransaction { + fn from(value: InvalidDeploy) -> Self { + Self::Deploy(value) + } +} + +impl From for InvalidTransaction { + fn from(value: InvalidTransactionV1) -> Self { + Self::V1(value) + } +} + +#[cfg(feature = "std")] +impl StdError for InvalidTransaction { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + InvalidTransaction::Deploy(deploy) => deploy.source(), + InvalidTransaction::V1(v1) => v1.source(), + } + } +} + +impl Display for InvalidTransaction { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + InvalidTransaction::Deploy(inner) => Display::fmt(inner, f), + InvalidTransaction::V1(inner) => Display::fmt(inner, f), + } + } +} diff --git a/types/src/transaction/execution_info.rs b/types/src/transaction/execution_info.rs new file mode 100644 index 0000000000..26303f5c65 --- /dev/null +++ b/types/src/transaction/execution_info.rs @@ -0,0 +1,62 @@ +use alloc::vec::Vec; + +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + execution::ExecutionResult, + BlockHash, +}; + +/// The block hash and height in which a given deploy was executed, along with the execution result +/// if known. +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct ExecutionInfo { + /// The hash of the block in which the deploy was executed. + pub block_hash: BlockHash, + /// The height of the block in which the deploy was executed. + pub block_height: u64, + /// The execution result if known. + pub execution_result: Option, +} + +impl FromBytes for ExecutionInfo { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (block_hash, bytes) = FromBytes::from_bytes(bytes)?; + let (block_height, bytes) = FromBytes::from_bytes(bytes)?; + let (execution_result, bytes) = FromBytes::from_bytes(bytes)?; + Ok(( + ExecutionInfo { + block_hash, + block_height, + execution_result, + }, + bytes, + )) + } +} + +impl ToBytes for ExecutionInfo { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut result)?; + Ok(result) + } + + fn write_bytes(&self, bytes: &mut Vec) -> Result<(), bytesrepr::Error> { + self.block_hash.write_bytes(bytes)?; + self.block_height.write_bytes(bytes)?; + self.execution_result.write_bytes(bytes)?; + Ok(()) + } + + fn serialized_length(&self) -> usize { + self.block_hash.serialized_length() + + self.block_height.serialized_length() + + self.execution_result.serialized_length() + } +} diff --git a/types/src/transaction/initiator_addr.rs b/types/src/transaction/initiator_addr.rs new file mode 100644 index 0000000000..b9e6c40046 --- /dev/null +++ b/types/src/transaction/initiator_addr.rs @@ -0,0 +1,197 @@ +use super::serialization::CalltableSerializationEnvelope; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + account::AccountHash, + bytesrepr::{ + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + transaction::serialization::CalltableSerializationEnvelopeBuilder, + AsymmetricType, PublicKey, +}; +use alloc::vec::Vec; +use core::fmt::{self, Debug, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const TAG_FIELD_INDEX: u16 = 0; + +const PUBLIC_KEY_VARIANT_TAG: u8 = 0; +const PUBLIC_KEY_FIELD_INDEX: u16 = 1; + +const ACCOUNT_HASH_VARIANT_TAG: u8 = 1; +const ACCOUNT_HASH_FIELD_INDEX: u16 = 1; + +/// The address of the initiator of a [`crate::Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "The address of the initiator of a TransactionV1.") +)] +#[serde(deny_unknown_fields)] +pub enum InitiatorAddr { + /// The public key of the initiator. + PublicKey(PublicKey), + /// The account hash derived from the public key of the initiator. + AccountHash(AccountHash), +} + +impl InitiatorAddr { + /// Gets the account hash. + pub fn account_hash(&self) -> AccountHash { + match self { + InitiatorAddr::PublicKey(public_key) => public_key.to_account_hash(), + InitiatorAddr::AccountHash(hash) => *hash, + } + } + + /// Returns a random `InitiatorAddr`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..=1) { + 0 => InitiatorAddr::PublicKey(PublicKey::random(rng)), + 1 => InitiatorAddr::AccountHash(rng.gen()), + _ => unreachable!(), + } + } + + fn serialized_field_lengths(&self) -> Vec { + match self { + InitiatorAddr::PublicKey(pub_key) => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + pub_key.serialized_length(), + ] + } + InitiatorAddr::AccountHash(hash) => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + hash.serialized_length(), + ] + } + } + } +} + +impl ToBytes for InitiatorAddr { + fn to_bytes(&self) -> Result, Error> { + match self { + InitiatorAddr::PublicKey(pub_key) => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &PUBLIC_KEY_VARIANT_TAG)? + .add_field(PUBLIC_KEY_FIELD_INDEX, &pub_key)? + .binary_payload_bytes() + } + InitiatorAddr::AccountHash(hash) => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &ACCOUNT_HASH_VARIANT_TAG)? + .add_field(ACCOUNT_HASH_FIELD_INDEX, &hash)? + .binary_payload_bytes() + } + } + } + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for InitiatorAddr { + fn from_bytes(bytes: &[u8]) -> Result<(InitiatorAddr, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(2, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(TAG_FIELD_INDEX)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + PUBLIC_KEY_VARIANT_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(PUBLIC_KEY_FIELD_INDEX)?; + let (pub_key, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(InitiatorAddr::PublicKey(pub_key)) + } + ACCOUNT_HASH_VARIANT_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(ACCOUNT_HASH_FIELD_INDEX)?; + let (hash, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(InitiatorAddr::AccountHash(hash)) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl From for InitiatorAddr { + fn from(public_key: PublicKey) -> Self { + InitiatorAddr::PublicKey(public_key) + } +} + +impl From for InitiatorAddr { + fn from(account_hash: AccountHash) -> Self { + InitiatorAddr::AccountHash(account_hash) + } +} + +impl Display for InitiatorAddr { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + InitiatorAddr::PublicKey(public_key) => { + write!(formatter, "public key {}", public_key.to_hex()) + } + InitiatorAddr::AccountHash(account_hash) => { + write!(formatter, "account hash {}", account_hash) + } + } + } +} + +impl Debug for InitiatorAddr { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + InitiatorAddr::PublicKey(public_key) => formatter + .debug_tuple("PublicKey") + .field(public_key) + .finish(), + InitiatorAddr::AccountHash(account_hash) => formatter + .debug_tuple("AccountHash") + .field(account_hash) + .finish(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bytesrepr, gens::initiator_addr_arb}; + use proptest::prelude::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&InitiatorAddr::random(rng)); + } + } + + proptest! { + #[test] + fn generative_bytesrepr_roundtrip(val in initiator_addr_arb()) { + bytesrepr::test_serialization_roundtrip(&val); + } + } +} diff --git a/types/src/transaction/initiator_addr_and_secret_key.rs b/types/src/transaction/initiator_addr_and_secret_key.rs new file mode 100644 index 0000000000..7acb7a679a --- /dev/null +++ b/types/src/transaction/initiator_addr_and_secret_key.rs @@ -0,0 +1,44 @@ +use crate::{InitiatorAddr, PublicKey, SecretKey}; + +/// Used when constructing a deploy or transaction. +#[derive(Debug)] +pub(crate) enum InitiatorAddrAndSecretKey<'a> { + /// Provides both the initiator address and the secret key (not necessarily for the same + /// initiator address) used to sign the deploy or transaction. + Both { + /// The initiator address of the account. + initiator_addr: InitiatorAddr, + /// The secret key used to sign the deploy or transaction. + secret_key: &'a SecretKey, + }, + /// The initiator address only (no secret key). The deploy or transaction will be created + /// unsigned. + #[allow(unused)] + InitiatorAddr(InitiatorAddr), + /// The initiator address will be derived from the provided secret key, and the deploy or + /// transaction will be signed by the same secret key. + #[allow(unused)] + SecretKey(&'a SecretKey), +} + +impl InitiatorAddrAndSecretKey<'_> { + /// The address of the initiator of a `TransactionV1`. + pub fn initiator_addr(&self) -> InitiatorAddr { + match self { + InitiatorAddrAndSecretKey::Both { initiator_addr, .. } + | InitiatorAddrAndSecretKey::InitiatorAddr(initiator_addr) => initiator_addr.clone(), + InitiatorAddrAndSecretKey::SecretKey(secret_key) => { + InitiatorAddr::PublicKey(PublicKey::from(*secret_key)) + } + } + } + + /// The secret key of the initiator of a `TransactionV1`. + pub fn secret_key(&self) -> Option<&SecretKey> { + match self { + InitiatorAddrAndSecretKey::Both { secret_key, .. } + | InitiatorAddrAndSecretKey::SecretKey(secret_key) => Some(secret_key), + InitiatorAddrAndSecretKey::InitiatorAddr(_) => None, + } + } +} diff --git a/types/src/transaction/package_identifier.rs b/types/src/transaction/package_identifier.rs new file mode 100644 index 0000000000..abef553447 --- /dev/null +++ b/types/src/transaction/package_identifier.rs @@ -0,0 +1,331 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + contracts::ProtocolVersionMajor, + EntityVersion, PackageHash, +}; +#[cfg(doc)] +use crate::{ExecutableDeployItem, TransactionTarget}; + +const HASH_TAG: u8 = 0; +const NAME_TAG: u8 = 1; +const HASH_WITH_VERSION_TAG: u8 = 2; +const NAME_WITH_VERSION_TAG: u8 = 3; + +/// Identifier for the package object within a [`TransactionTarget::Stored`] or an +/// [`ExecutableDeployItem`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "Identifier for the package object within a `Stored` transaction target or \ + an `ExecutableDeployItem`." + ) +)] +pub enum PackageIdentifier { + /// The hash and optional version identifying the contract package. + Hash { + /// The hash of the contract package. + package_hash: PackageHash, + /// The version of the contract package. + /// + /// `None` implies latest version. + version: Option, + }, + /// The name and optional version identifying the contract package. + Name { + /// The name of the contract package. + name: String, + /// The version of the contract package. + /// + /// `None` implies latest version. + version: Option, + }, + /// The hash and optional version key identifying the contract package. + HashWithMajorVersion { + /// The hash of the contract package. + package_hash: PackageHash, + /// The major protocol version of the contract package. + /// + /// `None` implies latest major protocol version. + protocol_version_major: Option, + /// The version of the contract package. + /// + /// `None` implies latest version. + version: Option, + }, + /// The name and optional version key identifying the contract package. + NameWithMajorVersion { + /// The name of the contract package. + name: String, + /// The major protocol version of the contract package. + /// + /// `None` implies latest major protocol version. + protocol_version_major: Option, + /// The version of the contract package. + /// + /// `None` implies latest version. + version: Option, + }, +} + +impl PackageIdentifier { + /// Returns the optional version of the contract package. + /// + /// `None` implies latest version. + pub fn version(&self) -> Option { + match self { + PackageIdentifier::HashWithMajorVersion { version, .. } + | PackageIdentifier::NameWithMajorVersion { version, .. } + | PackageIdentifier::Hash { version, .. } + | PackageIdentifier::Name { version, .. } => *version, + } + } + + /// Returns the optional version key of the contract package. + /// + /// `None` implies latest version. + pub fn protocol_version_major(&self) -> Option { + match self { + PackageIdentifier::HashWithMajorVersion { + protocol_version_major, + .. + } + | PackageIdentifier::NameWithMajorVersion { + protocol_version_major, + .. + } => *protocol_version_major, + PackageIdentifier::Hash { .. } | PackageIdentifier::Name { .. } => None, + } + } + + /// Returns a random `PackageIdentifier`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..4) { + 0 => PackageIdentifier::Hash { + package_hash: PackageHash::new(rng.gen()), + version: rng.gen(), + }, + 1 => PackageIdentifier::Name { + name: rng.random_string(1..21), + version: rng.gen(), + }, + 2 => PackageIdentifier::HashWithMajorVersion { + package_hash: PackageHash::new(rng.gen()), + protocol_version_major: rng.gen(), + version: rng.gen(), + }, + 3 => PackageIdentifier::NameWithMajorVersion { + name: rng.random_string(1..21), + protocol_version_major: rng.gen(), + version: rng.gen(), + }, + _ => unreachable!("Unexpected tag"), + } + } +} + +impl Display for PackageIdentifier { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + PackageIdentifier::Hash { + package_hash: contract_package_hash, + version: Some(ver), + } => write!( + formatter, + "package-id({}, version {})", + HexFmt(contract_package_hash), + ver + ), + PackageIdentifier::Hash { + package_hash: contract_package_hash, + .. + } => write!( + formatter, + "package-id({}, latest)", + HexFmt(contract_package_hash), + ), + PackageIdentifier::Name { + name, + version: Some(ver), + } => write!(formatter, "package-id({}, version {})", name, ver), + PackageIdentifier::Name { name, .. } => { + write!(formatter, "package-id({}, latest)", name) + } + PackageIdentifier::HashWithMajorVersion { + package_hash, + protocol_version_major, + version, + } => { + write!( + formatter, + "package-id-HashWithVersion({}, protocol_version_major: {:?}, version: {:?})", + HexFmt(package_hash), + protocol_version_major, + version, + ) + } + PackageIdentifier::NameWithMajorVersion { + name, + protocol_version_major, + version, + } => { + write!( + formatter, + "package-id-NameWithVersion({},protocol_version_major: {:?}, version: {:?})", + name, protocol_version_major, version, + ) + } + } + } +} + +impl ToBytes for PackageIdentifier { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + PackageIdentifier::Hash { + package_hash, + version, + } => { + HASH_TAG.write_bytes(writer)?; + package_hash.write_bytes(writer)?; + version.write_bytes(writer) + } + PackageIdentifier::Name { name, version } => { + NAME_TAG.write_bytes(writer)?; + name.write_bytes(writer)?; + version.write_bytes(writer) + } + PackageIdentifier::HashWithMajorVersion { + package_hash, + protocol_version_major, + version, + } => { + HASH_WITH_VERSION_TAG.write_bytes(writer)?; + package_hash.write_bytes(writer)?; + protocol_version_major.write_bytes(writer)?; + version.write_bytes(writer) + } + PackageIdentifier::NameWithMajorVersion { + name, + protocol_version_major, + version, + } => { + NAME_WITH_VERSION_TAG.write_bytes(writer)?; + name.write_bytes(writer)?; + protocol_version_major.write_bytes(writer)?; + version.write_bytes(writer) + } + } + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + PackageIdentifier::Hash { + package_hash, + version, + } => package_hash.serialized_length() + version.serialized_length(), + PackageIdentifier::Name { name, version } => { + name.serialized_length() + version.serialized_length() + } + PackageIdentifier::HashWithMajorVersion { + package_hash, + protocol_version_major, + version, + } => { + package_hash.serialized_length() + + protocol_version_major.serialized_length() + + version.serialized_length() + } + PackageIdentifier::NameWithMajorVersion { + name, + protocol_version_major, + version, + } => { + name.serialized_length() + + protocol_version_major.serialized_length() + + version.serialized_length() + } + } + } +} + +impl FromBytes for PackageIdentifier { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + HASH_TAG => { + let (package_hash, remainder) = PackageHash::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let id = PackageIdentifier::Hash { + package_hash, + version, + }; + Ok((id, remainder)) + } + NAME_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (version, remainder) = Option::::from_bytes(remainder)?; + let id = PackageIdentifier::Name { name, version }; + Ok((id, remainder)) + } + HASH_WITH_VERSION_TAG => { + let (package_hash, remainder) = PackageHash::from_bytes(remainder)?; + let (protocol_version_major, remainder) = Option::from_bytes(remainder)?; + let (version, remainder) = Option::from_bytes(remainder)?; + let id = PackageIdentifier::HashWithMajorVersion { + package_hash, + protocol_version_major, + version, + }; + Ok((id, remainder)) + } + NAME_WITH_VERSION_TAG => { + let (name, remainder) = String::from_bytes(remainder)?; + let (protocol_version_major, remainder) = Option::from_bytes(remainder)?; + let (version, remainder) = Option::from_bytes(remainder)?; + let id = PackageIdentifier::NameWithMajorVersion { + name, + protocol_version_major, + version, + }; + Ok((id, remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + bytesrepr::test_serialization_roundtrip(&PackageIdentifier::random(rng)); + } +} diff --git a/types/src/transaction/pricing_mode.rs b/types/src/transaction/pricing_mode.rs new file mode 100644 index 0000000000..55ebed14a1 --- /dev/null +++ b/types/src/transaction/pricing_mode.rs @@ -0,0 +1,430 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use super::{ + serialization::CalltableSerializationEnvelope, InvalidTransaction, InvalidTransactionV1, + TransactionEntryPoint, +}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{ + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + transaction::serialization::CalltableSerializationEnvelopeBuilder, + Digest, +}; +#[cfg(any(feature = "std", test))] +use crate::{Chainspec, Gas, Motes}; + +/// The pricing mode of a [`Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Pricing mode of a Transaction.") +)] +#[serde(deny_unknown_fields)] +pub enum PricingMode { + /// The original payment model, where the creator of the transaction + /// specifies how much they will pay, at what gas price. + PaymentLimited { + /// User-specified payment amount. + payment_amount: u64, + /// User-specified gas_price tolerance (minimum 1). + /// This is interpreted to mean "do not include this transaction in a block + /// if the current gas price is greater than this number" + gas_price_tolerance: u8, + /// Standard payment. + standard_payment: bool, + }, + /// The cost of the transaction is determined by the cost table, per the + /// transaction category. + Fixed { + /// User-specified additional computation factor (minimum 0). If "0" is provided, + /// no additional logic is applied to the computation limit. Each value above "0" + /// tells the node that it needs to treat the transaction as if it uses more gas + /// than it's serialized size indicates. Each "1" will increase the "wasm lane" + /// size bucket for this transaction by 1. So if the size of the transaction + /// indicates bucket "0" and "additional_computation_factor = 2", the transaction + /// will be treated as a "2". + additional_computation_factor: u8, + /// User-specified gas_price tolerance (minimum 1). + /// This is interpreted to mean "do not include this transaction in a block + /// if the current gas price is greater than this number" + gas_price_tolerance: u8, + }, + /// The payment for this transaction was previously paid, as proven by + /// the receipt hash (this is for future use, not currently implemented). + Prepaid { + /// Pre-paid receipt. + receipt: Digest, + }, +} + +impl PricingMode { + /// Returns a random `PricingMode. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..=2) { + 0 => PricingMode::PaymentLimited { + payment_amount: rng.gen(), + gas_price_tolerance: 1, + standard_payment: true, + }, + 1 => PricingMode::Fixed { + gas_price_tolerance: rng.gen(), + additional_computation_factor: 1, + }, + 2 => PricingMode::Prepaid { receipt: rng.gen() }, + _ => unreachable!(), + } + } + + /// Returns standard payment flag, if it is a `PaymentLimited` variant. + pub fn is_standard_payment(&self) -> bool { + match self { + PricingMode::PaymentLimited { + standard_payment, .. + } => *standard_payment, + PricingMode::Fixed { .. } => true, + PricingMode::Prepaid { .. } => true, + } + } + + fn serialized_field_lengths(&self) -> Vec { + match self { + PricingMode::PaymentLimited { + payment_amount, + gas_price_tolerance, + standard_payment, + } => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + payment_amount.serialized_length(), + gas_price_tolerance.serialized_length(), + standard_payment.serialized_length(), + ] + } + PricingMode::Fixed { + gas_price_tolerance, + additional_computation_factor, + } => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + gas_price_tolerance.serialized_length(), + additional_computation_factor.serialized_length(), + ] + } + PricingMode::Prepaid { receipt } => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + receipt.serialized_length(), + ] + } + } + } + + #[cfg(any(feature = "std", test))] + /// Returns the gas limit. + pub fn gas_limit(&self, chainspec: &Chainspec, lane_id: u8) -> Result { + let gas = match self { + PricingMode::PaymentLimited { payment_amount, .. } => Gas::new(*payment_amount), + PricingMode::Fixed { .. } => { + //The lane_id should already include additional_computation_factor in case of wasm + Gas::new(chainspec.get_max_gas_limit_by_category(lane_id)) + } + PricingMode::Prepaid { receipt } => { + return Err(PricingModeError::InvalidPricingMode { + price_mode: PricingMode::Prepaid { receipt: *receipt }, + }); + } + }; + Ok(gas) + } + + #[cfg(any(feature = "std", test))] + /// Returns gas cost. + pub fn gas_cost( + &self, + chainspec: &Chainspec, + lane_id: u8, + gas_price: u8, + ) -> Result { + let gas_limit = self.gas_limit(chainspec, lane_id)?; + let motes = match self { + PricingMode::PaymentLimited { payment_amount, .. } => { + Motes::from_gas(Gas::from(*payment_amount), gas_price) + .ok_or(PricingModeError::UnableToCalculateGasCost)? + } + PricingMode::Fixed { .. } => Motes::from_gas(gas_limit, gas_price) + .ok_or(PricingModeError::UnableToCalculateGasCost)?, + PricingMode::Prepaid { .. } => { + Motes::zero() // prepaid + } + }; + Ok(motes) + } + + /// Returns gas cost. + pub fn additional_computation_factor(&self) -> u8 { + match self { + PricingMode::PaymentLimited { .. } => 0, + PricingMode::Fixed { + additional_computation_factor, + .. + } => *additional_computation_factor, + PricingMode::Prepaid { .. } => 0, + } + } +} + +// This impl is provided due to a completeness test that we +// have in binary-port. It checks if all variants of this +// error have corresponding binary port error codes +#[cfg(any(feature = "testing", test))] +impl Default for PricingMode { + fn default() -> Self { + Self::PaymentLimited { + payment_amount: 1, + gas_price_tolerance: 1, + standard_payment: true, + } + } +} + +///Errors that can occur when calling PricingMode functions +#[derive(Debug)] +pub enum PricingModeError { + /// The entry point for this transaction target cannot be `call`. + EntryPointCannotBeCall, + /// The entry point for this transaction target cannot be `TransactionEntryPoint::Custom`. + EntryPointCannotBeCustom { + /// The invalid entry point. + entry_point: TransactionEntryPoint, + }, + /// Invalid combination of pricing handling and pricing mode. + InvalidPricingMode { + /// The pricing mode as specified by the transaction. + price_mode: PricingMode, + }, + /// Unable to calculate gas cost. + UnableToCalculateGasCost, + /// Unexpected entry point. + UnexpectedEntryPoint { + entry_point: TransactionEntryPoint, + lane_id: u8, + }, +} + +impl From for InvalidTransaction { + fn from(err: PricingModeError) -> Self { + InvalidTransaction::V1(err.into()) + } +} + +impl From for InvalidTransactionV1 { + fn from(err: PricingModeError) -> Self { + match err { + PricingModeError::EntryPointCannotBeCall => { + InvalidTransactionV1::EntryPointCannotBeCall + } + PricingModeError::EntryPointCannotBeCustom { entry_point } => { + InvalidTransactionV1::EntryPointCannotBeCustom { entry_point } + } + PricingModeError::InvalidPricingMode { price_mode } => { + InvalidTransactionV1::InvalidPricingMode { price_mode } + } + PricingModeError::UnableToCalculateGasCost => { + InvalidTransactionV1::UnableToCalculateGasCost + } + PricingModeError::UnexpectedEntryPoint { + entry_point, + lane_id, + } => InvalidTransactionV1::UnexpectedEntryPoint { + entry_point, + lane_id, + }, + } + } +} +const TAG_FIELD_INDEX: u16 = 0; + +const PAYMENT_LIMITED_VARIANT_TAG: u8 = 0; +const PAYMENT_LIMITED_PAYMENT_AMOUNT_INDEX: u16 = 1; +const PAYMENT_LIMITED_GAS_PRICE_TOLERANCE_INDEX: u16 = 2; +const PAYMENT_LIMITED_STANDARD_PAYMENT_INDEX: u16 = 3; + +const FIXED_VARIANT_TAG: u8 = 1; +const FIXED_GAS_PRICE_TOLERANCE_INDEX: u16 = 1; +const FIXED_ADDITIONAL_COMPUTATION_FACTOR_INDEX: u16 = 2; + +const RESERVED_VARIANT_TAG: u8 = 2; +const RESERVED_RECEIPT_INDEX: u16 = 1; + +impl ToBytes for PricingMode { + fn to_bytes(&self) -> Result, Error> { + match self { + PricingMode::PaymentLimited { + payment_amount, + gas_price_tolerance, + standard_payment, + } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &PAYMENT_LIMITED_VARIANT_TAG)? + .add_field(PAYMENT_LIMITED_PAYMENT_AMOUNT_INDEX, &payment_amount)? + .add_field( + PAYMENT_LIMITED_GAS_PRICE_TOLERANCE_INDEX, + &gas_price_tolerance, + )? + .add_field(PAYMENT_LIMITED_STANDARD_PAYMENT_INDEX, &standard_payment)? + .binary_payload_bytes(), + PricingMode::Fixed { + gas_price_tolerance, + additional_computation_factor, + } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &FIXED_VARIANT_TAG)? + .add_field(FIXED_GAS_PRICE_TOLERANCE_INDEX, &gas_price_tolerance)? + .add_field( + FIXED_ADDITIONAL_COMPUTATION_FACTOR_INDEX, + &additional_computation_factor, + )? + .binary_payload_bytes(), + PricingMode::Prepaid { receipt } => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &RESERVED_VARIANT_TAG)? + .add_field(RESERVED_RECEIPT_INDEX, &receipt)? + .binary_payload_bytes() + } + } + } + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for PricingMode { + fn from_bytes(bytes: &[u8]) -> Result<(PricingMode, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(4, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(TAG_FIELD_INDEX)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + PAYMENT_LIMITED_VARIANT_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(PAYMENT_LIMITED_PAYMENT_AMOUNT_INDEX)?; + let (payment_amount, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(PAYMENT_LIMITED_GAS_PRICE_TOLERANCE_INDEX)?; + let (gas_price_tolerance, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(PAYMENT_LIMITED_STANDARD_PAYMENT_INDEX)?; + let (standard_payment, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(PricingMode::PaymentLimited { + payment_amount, + gas_price_tolerance, + standard_payment, + }) + } + FIXED_VARIANT_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(FIXED_GAS_PRICE_TOLERANCE_INDEX)?; + let (gas_price_tolerance, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(FIXED_ADDITIONAL_COMPUTATION_FACTOR_INDEX)?; + let (additional_computation_factor, window) = + window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(PricingMode::Fixed { + gas_price_tolerance, + additional_computation_factor, + }) + } + RESERVED_VARIANT_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(RESERVED_RECEIPT_INDEX)?; + let (receipt, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(PricingMode::Prepaid { receipt }) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl Display for PricingMode { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + PricingMode::PaymentLimited { + payment_amount, + gas_price_tolerance: gas_price, + standard_payment, + } => { + write!( + formatter, + "payment amount {}, gas price multiplier {} standard_payment {}", + payment_amount, gas_price, standard_payment + ) + } + PricingMode::Prepaid { receipt } => write!(formatter, "prepaid: {}", receipt), + PricingMode::Fixed { + gas_price_tolerance, + additional_computation_factor, + } => write!( + formatter, + "fixed pricing {} {}", + gas_price_tolerance, additional_computation_factor + ), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::bytesrepr; + + #[test] + fn test_to_bytes_and_from_bytes() { + bytesrepr::test_serialization_roundtrip(&PricingMode::PaymentLimited { + payment_amount: 100, + gas_price_tolerance: 1, + standard_payment: true, + }); + bytesrepr::test_serialization_roundtrip(&PricingMode::Fixed { + gas_price_tolerance: 2, + additional_computation_factor: 1, + }); + bytesrepr::test_serialization_roundtrip(&PricingMode::Prepaid { + receipt: Digest::hash(b"prepaid"), + }); + } + + use crate::gens::pricing_mode_arb; + use proptest::prelude::*; + proptest! { + #[test] + fn generative_bytesrepr_roundtrip(val in pricing_mode_arb()) { + bytesrepr::test_serialization_roundtrip(&val); + } + } +} diff --git a/types/src/transaction/runtime_args.rs b/types/src/transaction/runtime_args.rs new file mode 100644 index 0000000000..2a3ee2405f --- /dev/null +++ b/types/src/transaction/runtime_args.rs @@ -0,0 +1,388 @@ +//! Home of RuntimeArgs for calling contracts + +use alloc::{collections::BTreeMap, string::String, vec::Vec}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(any(feature = "testing", test))] +use crate::{bytesrepr::Bytes, testing::TestRng}; +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + CLType, CLTyped, CLValue, CLValueError, U512, +}; +/// Named arguments to a contract. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct NamedArg(String, CLValue); + +impl NamedArg { + /// Returns a new `NamedArg`. + pub fn new(name: String, value: CLValue) -> Self { + NamedArg(name, value) + } + + /// Returns the name of the named arg. + pub fn name(&self) -> &str { + &self.0 + } + + /// Returns the value of the named arg. + pub fn cl_value(&self) -> &CLValue { + &self.1 + } + + /// Returns a mutable reference to the value of the named arg. + pub fn cl_value_mut(&mut self) -> &mut CLValue { + &mut self.1 + } +} + +impl From<(String, CLValue)> for NamedArg { + fn from((name, value): (String, CLValue)) -> NamedArg { + NamedArg(name, value) + } +} + +impl ToBytes for NamedArg { + fn to_bytes(&self) -> Result, Error> { + let mut result = bytesrepr::allocate_buffer(self)?; + result.append(&mut self.0.to_bytes()?); + result.append(&mut self.1.to_bytes()?); + Ok(result) + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + self.1.serialized_length() + } +} + +impl FromBytes for NamedArg { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (name, remainder) = String::from_bytes(bytes)?; + let (cl_value, remainder) = CLValue::from_bytes(remainder)?; + Ok((NamedArg(name, cl_value), remainder)) + } +} + +/// Represents a collection of arguments passed to a smart contract. +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Serialize, Deserialize, Debug, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub struct RuntimeArgs(Vec); + +impl RuntimeArgs { + /// Create an empty [`RuntimeArgs`] instance. + pub fn new() -> RuntimeArgs { + RuntimeArgs::default() + } + + /// A wrapper that lets you easily and safely create runtime arguments. + /// + /// This method is useful when you have to construct a [`RuntimeArgs`] with multiple entries, + /// but error handling at given call site would require to have a match statement for each + /// [`RuntimeArgs::insert`] call. With this method you can use ? operator inside the closure and + /// then handle single result. When `try_block` will be stabilized this method could be + /// deprecated in favor of using those blocks. + pub fn try_new(func: F) -> Result + where + F: FnOnce(&mut RuntimeArgs) -> Result<(), CLValueError>, + { + let mut runtime_args = RuntimeArgs::new(); + func(&mut runtime_args)?; + Ok(runtime_args) + } + + /// Gets an argument by its name. + pub fn get(&self, name: &str) -> Option<&CLValue> { + self.0.iter().find_map(|NamedArg(named_name, named_value)| { + if named_name == name { + Some(named_value) + } else { + None + } + }) + } + + /// Gets the length of the collection. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the collection of arguments is empty. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Inserts a new named argument into the collection. + pub fn insert(&mut self, key: K, value: V) -> Result<(), CLValueError> + where + K: Into, + V: CLTyped + ToBytes, + { + let cl_value = CLValue::from_t(value)?; + self.0.push(NamedArg(key.into(), cl_value)); + Ok(()) + } + + /// Inserts a new named argument into the collection. + pub fn insert_cl_value(&mut self, key: K, cl_value: CLValue) + where + K: Into, + { + self.0.push(NamedArg(key.into(), cl_value)); + } + + /// Returns all the values of the named args. + pub fn to_values(&self) -> Vec<&CLValue> { + self.0.iter().map(|NamedArg(_name, value)| value).collect() + } + + /// Returns an iterator of references over all arguments in insertion order. + pub fn named_args(&self) -> impl Iterator { + self.0.iter() + } + + /// Returns an iterator of mutable references over all arguments in insertion order. + pub fn named_args_mut(&mut self) -> impl Iterator { + self.0.iter_mut() + } + + /// Returns the numeric value of `name` arg from the runtime arguments or defaults to + /// 0 if that arg doesn't exist or is not an integer type. + /// + /// Supported [`CLType`]s for numeric conversions are U64, and U512. + /// + /// Returns an error if parsing the arg fails. + pub fn try_get_number(&self, name: &str) -> Result { + let amount_arg = match self.get(name) { + None => return Ok(U512::zero()), + Some(arg) => arg, + }; + match amount_arg.cl_type() { + CLType::U512 => amount_arg.clone().into_t::(), + CLType::U64 => amount_arg.clone().into_t::().map(U512::from), + _ => Ok(U512::zero()), + } + } + + /// Returns a random `RuntimeArgs`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + fn random_bytes(rng: &mut TestRng) -> Bytes { + let mut buffer = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(buffer.as_mut()); + Bytes::from(buffer) + } + + let count = rng.gen_range(0..6); + let mut args = RuntimeArgs::new(); + for _ in 0..count { + let key = rng.random_string(1..21); + let value = random_bytes(rng); + let _ = args.insert(key, value); + } + args + } +} + +impl From> for RuntimeArgs { + fn from(values: Vec) -> Self { + RuntimeArgs(values) + } +} + +impl From> for RuntimeArgs { + fn from(cl_values: BTreeMap) -> RuntimeArgs { + RuntimeArgs(cl_values.into_iter().map(NamedArg::from).collect()) + } +} + +impl From for BTreeMap { + fn from(args: RuntimeArgs) -> BTreeMap { + let mut map = BTreeMap::new(); + for named in args.0 { + map.insert(named.0, named.1); + } + map + } +} + +impl ToBytes for RuntimeArgs { + fn to_bytes(&self) -> Result, Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for RuntimeArgs { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (args, remainder) = Vec::::from_bytes(bytes)?; + Ok((RuntimeArgs(args), remainder)) + } +} + +/// Macro that makes it easier to construct named arguments. +/// +/// NOTE: This macro does not propagate possible errors that could occur while creating a +/// [`CLValue`]. For such cases creating [`RuntimeArgs`] manually is recommended. +/// +/// # Example usage +/// ``` +/// use casper_types::runtime_args; +/// let _named_args = runtime_args! { +/// "foo" => 42, +/// "bar" => "Hello, world!" +/// }; +/// ``` +#[macro_export] +macro_rules! runtime_args { + () => ($crate::RuntimeArgs::new()); + ( $($key:expr => $value:expr,)+ ) => (runtime_args!($($key => $value),+)); + ( $($key:expr => $value:expr),* ) => { + { + let mut named_args = $crate::RuntimeArgs::new(); + $( + named_args.insert($key, $value).unwrap(); + )* + named_args + } + }; +} + +#[cfg(test)] +mod tests { + use super::*; + + const ARG_AMOUNT: &str = "amount"; + + #[test] + fn test_runtime_args() { + let arg1 = CLValue::from_t(1).unwrap(); + let arg2 = CLValue::from_t("Foo").unwrap(); + let arg3 = CLValue::from_t(Some(1)).unwrap(); + let args = { + let mut map = BTreeMap::new(); + map.insert("bar".into(), arg2.clone()); + map.insert("foo".into(), arg1.clone()); + map.insert("qwer".into(), arg3.clone()); + map + }; + let runtime_args = RuntimeArgs::from(args); + assert_eq!(runtime_args.get("qwer"), Some(&arg3)); + assert_eq!(runtime_args.get("foo"), Some(&arg1)); + assert_eq!(runtime_args.get("bar"), Some(&arg2)); + assert_eq!(runtime_args.get("aaa"), None); + + // Ensure macro works + + let runtime_args_2 = runtime_args! { + "bar" => "Foo", + "foo" => 1i32, + "qwer" => Some(1i32), + }; + assert_eq!(runtime_args, runtime_args_2); + } + + #[test] + fn empty_macro() { + assert_eq!(runtime_args! {}, RuntimeArgs::new()); + } + + #[test] + fn btreemap_compat() { + // This test assumes same serialization format as BTreeMap + let runtime_args_1 = runtime_args! { + "bar" => "Foo", + "foo" => 1i32, + "qwer" => Some(1i32), + }; + let tagless = runtime_args_1.to_bytes().unwrap().to_vec(); + + let mut runtime_args_2 = BTreeMap::new(); + runtime_args_2.insert(String::from("bar"), CLValue::from_t("Foo").unwrap()); + runtime_args_2.insert(String::from("foo"), CLValue::from_t(1i32).unwrap()); + runtime_args_2.insert(String::from("qwer"), CLValue::from_t(Some(1i32)).unwrap()); + + assert_eq!(tagless, runtime_args_2.to_bytes().unwrap()); + } + + #[test] + fn named_serialization_roundtrip() { + let args = runtime_args! { + "foo" => 1i32, + }; + bytesrepr::test_serialization_roundtrip(&args); + } + + #[test] + fn should_create_args_with() { + let res = RuntimeArgs::try_new(|runtime_args| { + runtime_args.insert(String::from("foo"), 123)?; + runtime_args.insert(String::from("bar"), 456)?; + Ok(()) + }); + + let expected = runtime_args! { + "foo" => 123, + "bar" => 456, + }; + assert!(matches!(res, Ok(args) if expected == args)); + } + + #[test] + fn try_get_number_should_work() { + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, 0u64).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, U512::zero()).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let args = RuntimeArgs::new(); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), U512::zero()); + + let hundred = 100u64; + + let mut args = RuntimeArgs::new(); + let input = U512::from(hundred); + args.insert(ARG_AMOUNT, input).expect("is ok"); + assert_eq!(args.try_get_number(ARG_AMOUNT).unwrap(), input); + + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, hundred).expect("is ok"); + assert_eq!( + args.try_get_number(ARG_AMOUNT).unwrap(), + U512::from(hundred) + ); + } + + #[test] + fn try_get_number_should_return_zero_for_non_numeric_type() { + let mut args = RuntimeArgs::new(); + args.insert(ARG_AMOUNT, "Non-numeric-string").unwrap(); + assert_eq!( + args.try_get_number(ARG_AMOUNT).expect("should get amount"), + U512::zero() + ); + } + + #[test] + fn try_get_number_should_return_zero_if_amount_is_missing() { + let args = RuntimeArgs::new(); + assert_eq!( + args.try_get_number(ARG_AMOUNT).expect("should get amount"), + U512::zero() + ); + } +} diff --git a/types/src/transaction/serialization/field.rs b/types/src/transaction/serialization/field.rs new file mode 100644 index 0000000000..7a590e804e --- /dev/null +++ b/types/src/transaction/serialization/field.rs @@ -0,0 +1,53 @@ +use crate::bytesrepr::{ + self, Error, FromBytes, ToBytes, U16_SERIALIZED_LENGTH, U32_SERIALIZED_LENGTH, +}; +use alloc::vec::Vec; + +#[derive(Eq, PartialEq, Debug)] +pub(crate) struct Field { + pub index: u16, + pub offset: u32, +} + +impl Field { + pub(crate) fn new(index: u16, offset: u32) -> Self { + Field { index, offset } + } +} + +impl ToBytes for Field { + fn to_bytes(&self) -> Result, Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), Error> { + self.index.write_bytes(writer)?; + self.offset.write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + U16_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH + } +} + +impl FromBytes for Field { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (index, remainder) = u16::from_bytes(bytes)?; + let (offset, remainder) = u32::from_bytes(remainder)?; + Ok((Field::new(index, offset), remainder)) + } +} + +impl Field { + pub fn serialized_vec_size(number_of_fields: usize) -> usize { + let mut size = U32_SERIALIZED_LENGTH; // Overhead of the vec itself + size += number_of_fields * Field::serialized_length(); + size + } + + pub fn serialized_length() -> usize { + U16_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH + } +} diff --git a/types/src/transaction/serialization/mod.rs b/types/src/transaction/serialization/mod.rs new file mode 100644 index 0000000000..e410ac711b --- /dev/null +++ b/types/src/transaction/serialization/mod.rs @@ -0,0 +1,346 @@ +mod field; +use alloc::vec::Vec; +use field::Field; + +use crate::bytesrepr::{ + self, Bytes, Error, FromBytes, ToBytes, U32_SERIALIZED_LENGTH, U8_SERIALIZED_LENGTH, +}; + +pub struct CalltableSerializationEnvelopeBuilder { + fields: Vec, + expected_payload_sizes: Vec, + bytes: Vec, + current_field_index: usize, + current_offset: usize, +} + +impl CalltableSerializationEnvelopeBuilder { + pub fn new( + expected_payload_sizes: Vec, + ) -> Result { + let number_of_fields = expected_payload_sizes.len(); + let fields_size = Field::serialized_vec_size(number_of_fields); + let bytes_of_payload_size = expected_payload_sizes.iter().sum::(); + let payload_and_vec_overhead: usize = U32_SERIALIZED_LENGTH + bytes_of_payload_size; // u32 for the overhead of serializing a vec + let mut payload_buffer = + bytesrepr::allocate_buffer_for_size(fields_size + payload_and_vec_overhead)?; + payload_buffer.extend(vec![0; fields_size]); // Making room for the call table + payload_buffer.extend((bytes_of_payload_size as u32).to_bytes()?); // Writing down number of bytes that are in the payload + Ok(CalltableSerializationEnvelopeBuilder { + fields: Vec::with_capacity(number_of_fields), + expected_payload_sizes, + bytes: payload_buffer, + current_field_index: 0, + current_offset: 0, + }) + } + + pub fn add_field( + mut self, + field_index: u16, + value: &T, + ) -> Result { + let current_field_index = self.current_field_index; + if current_field_index >= self.expected_payload_sizes.len() { + //We wrote more fields than expected + return Err(Error::NotRepresentable); + } + let fields = &mut self.fields; + if current_field_index > 0 && fields[current_field_index - 1].index >= field_index { + //Need to make sure we write fields in ascending order of tab index + return Err(Error::NotRepresentable); + } + let size = self.expected_payload_sizes[current_field_index]; + let bytes_before = self.bytes.len(); + value.write_bytes(&mut self.bytes)?; + fields.push(Field::new(field_index, self.current_offset as u32)); + self.current_field_index += 1; + self.current_offset += size; + let bytes_after = self.bytes.len(); + let wrote_bytes = bytes_after - bytes_before; + if wrote_bytes == 0 { + //We don't allow writing empty fields + return Err(Error::NotRepresentable); + } + if wrote_bytes != size { + //The written field occupied different amount of bytes than declared + return Err(Error::NotRepresentable); + } + Ok(self) + } + + pub fn binary_payload_bytes(mut self) -> Result, Error> { + if self.current_field_index != self.expected_payload_sizes.len() { + //We didn't write all the fields we expected + return Err(Error::NotRepresentable); + } + let write_at_slice = &mut self.bytes[0..]; + let calltable_bytes = self.fields.to_bytes()?; + for (pos, byte) in calltable_bytes.into_iter().enumerate() { + write_at_slice[pos] = byte; + } + Ok(self.bytes) + } +} + +pub struct CalltableSerializationEnvelope { + fields: Vec, + bytes: Bytes, +} + +impl CalltableSerializationEnvelope { + pub fn estimate_size(field_sizes: Vec) -> usize { + let number_of_fields = field_sizes.len(); + let payload_in_bytes: usize = field_sizes.iter().sum(); + let mut size = U32_SERIALIZED_LENGTH + U32_SERIALIZED_LENGTH; // Overhead of the fields vec and bytes vec + size += number_of_fields * Field::serialized_length(); + size += payload_in_bytes * U8_SERIALIZED_LENGTH; + size + } + + pub fn start_consuming(&self) -> Result, Error> { + if self.fields.is_empty() { + return Ok(None); + } + let field = &self.fields[0]; + let expected_size = if self.fields.len() == 1 { + self.bytes.len() + } else { + self.fields[1].offset as usize + }; + Ok(Some(CalltableFieldsIterator { + index_in_fields_vec: 0, + expected_size, + field, + bytes: &self.bytes, + parent: self, + })) + } + + pub fn from_bytes( + max_expected_fields: u32, + input_bytes: &[u8], + ) -> Result<(CalltableSerializationEnvelope, &[u8]), Error> { + if input_bytes.len() < U32_SERIALIZED_LENGTH { + //The first "thing" in the bytes of the payload should be a `fields` vector. We want to + // check the number of entries in that vector to avoid field pumping. If the + // payload doesn't have u32 size of bytes in it, then it's malformed. + return Err(Error::Formatting); + } + let (number_of_fields, _) = u32::from_bytes(input_bytes)?; + if number_of_fields > max_expected_fields { + return Err(Error::Formatting); + } + let (fields, remainder) = Vec::::from_bytes(input_bytes)?; + let (bytes, remainder) = Bytes::from_bytes(remainder)?; + Ok((CalltableSerializationEnvelope { fields, bytes }, remainder)) + } +} + +pub struct CalltableFieldsIterator<'a> { + index_in_fields_vec: usize, + expected_size: usize, + field: &'a Field, + bytes: &'a [u8], + parent: &'a CalltableSerializationEnvelope, +} + +impl CalltableFieldsIterator<'_> { + pub fn verify_index(&self, expected_index: u16) -> Result<(), Error> { + let field = self.field; + if field.index != expected_index { + return Err(Error::Formatting); + } + Ok(()) + } + + pub fn deserialize_and_maybe_next( + &self, + ) -> Result<(T, Option), Error> { + let (t, maybe_window) = self.step()?; + Ok((t, maybe_window)) + } + + fn step(&self) -> Result<(T, Option), Error> { + let (t, remainder) = T::from_bytes(self.bytes)?; + let parent_fields = &self.parent.fields; + let parent_fields_len = parent_fields.len(); + let is_last_field = self.index_in_fields_vec == parent_fields_len - 1; + if remainder.len() + self.expected_size != self.bytes.len() { + //The field occupied different amount of bytes than expected + return Err(Error::Formatting); + } + if !is_last_field { + let next_field_index = self.index_in_fields_vec + 1; + let next_field = &parent_fields[next_field_index]; // We already checked that this index exists + let is_next_field_last = next_field_index == parent_fields_len - 1; + let expected_size = if is_next_field_last { + remainder.len() + } else { + (parent_fields[next_field_index + 1].offset + - parent_fields[next_field_index].offset) as usize + }; + let next_window = CalltableFieldsIterator { + index_in_fields_vec: next_field_index, + expected_size, + field: next_field, + bytes: remainder, + parent: self.parent, + }; + Ok((t, Some(next_window))) + } else { + if !remainder.is_empty() { + //The payload of BinaryPayload should contain only the serialized, there should be + // no trailing bytes after consuming all the fields. + return Err(Error::Formatting); + } + Ok((t, None)) + } + } +} + +#[cfg(test)] +mod tests { + use super::{CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder, Field}; + use crate::bytesrepr::*; + + #[test] + fn binary_payload_should_serialize_fields() { + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + U32_SERIALIZED_LENGTH, + U16_SERIALIZED_LENGTH, + ]) + .unwrap(); + let bytes = binary_payload + .add_field(0, &(254_u8)) + .unwrap() + .add_field(1, &(u32::MAX)) + .unwrap() + .add_field(2, &(555_u16)) + .unwrap() + .binary_payload_bytes() + .unwrap(); + let (payload, remainder) = CalltableSerializationEnvelope::from_bytes(3, &bytes).unwrap(); + assert!(remainder.is_empty()); + assert_eq!( + payload.fields, + vec![Field::new(0, 0), Field::new(1, 1), Field::new(2, 5)] + ); + let bytes = &payload.bytes; + let (first_value, remainder) = u8::from_bytes(bytes).unwrap(); + let (second_value, remainder) = u32::from_bytes(remainder).unwrap(); + let (third_value, remainder) = u16::from_bytes(remainder).unwrap(); + assert!(remainder.is_empty()); + assert_eq!(first_value, 254); + assert_eq!(second_value, u32::MAX); + assert_eq!(third_value, 555); + } + + #[test] + fn binary_payload_should_fail_to_deserialzie_if_more_then_expected_fields() { + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + U32_SERIALIZED_LENGTH, + U16_SERIALIZED_LENGTH, + ]) + .unwrap(); + let bytes = binary_payload + .add_field(0, &(254_u8)) + .unwrap() + .add_field(1, &(u32::MAX)) + .unwrap() + .add_field(2, &(555_u16)) + .unwrap() + .binary_payload_bytes() + .unwrap(); + let res = CalltableSerializationEnvelope::from_bytes(2, &bytes); + assert!(res.is_err()) + } + + #[test] + fn binary_payload_should_fail_to_serialize_if_field_indexes_not_unique() { + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + U32_SERIALIZED_LENGTH, + U16_SERIALIZED_LENGTH, + ]) + .unwrap(); + let res = binary_payload + .add_field(0, &(254_u8)) + .unwrap() + .add_field(0, &(u32::MAX)); + assert!(res.is_err()) + } + + #[test] + fn binary_payload_should_fail_to_serialize_if_field_indexes_not_sequential() { + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + U32_SERIALIZED_LENGTH, + U16_SERIALIZED_LENGTH, + ]) + .unwrap(); + let res = binary_payload + .add_field(1, &(254_u8)) + .unwrap() + .add_field(0, &(u32::MAX)); + assert!(res.is_err()) + } + + #[test] + fn binary_payload_should_fail_to_serialize_if_proposed_fields_size_is_smaller_than_declaration() + { + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + U32_SERIALIZED_LENGTH, + U16_SERIALIZED_LENGTH, + ]) + .unwrap(); + let res = binary_payload + .add_field(0, &(254_u8)) + .unwrap() + .add_field(1, &(u16::MAX)); + assert!(res.is_err()) + } + + #[test] + fn binary_payload_should_fail_to_serialize_if_proposed_fields_size_is_greater_than_declaration() + { + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + U16_SERIALIZED_LENGTH, + ]) + .unwrap(); + let res = binary_payload + .add_field(0, &(254_u8)) + .unwrap() + .add_field(1, &(u32::MAX)); + assert!(res.is_err()) + } + + #[test] + fn binary_payload_should_fail_to_serialize_if_proposed_a_field_with_zero_bytes() { + struct FunkyType {} + impl ToBytes for FunkyType { + fn to_bytes(&self) -> Result, Error> { + Ok(Vec::new()) + } + + fn serialized_length(&self) -> usize { + 0 + } + } + let funky_instance = FunkyType {}; + let binary_payload = CalltableSerializationEnvelopeBuilder::new(vec![ + U8_SERIALIZED_LENGTH, + funky_instance.serialized_length(), + ]) + .unwrap(); + let res = binary_payload + .add_field(0, &(254_u8)) + .unwrap() + .add_field(1, &(funky_instance)); + assert!(res.is_err()) + } +} diff --git a/types/src/transaction/transaction_entry_point.rs b/types/src/transaction/transaction_entry_point.rs new file mode 100644 index 0000000000..741a0e37c1 --- /dev/null +++ b/types/src/transaction/transaction_entry_point.rs @@ -0,0 +1,535 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + alloc::string::ToString, + bytesrepr::{ + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + system::{auction, mint}, + transaction::serialization::{ + CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder, + }, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The entry point of a [`crate::Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Entry point of a Transaction.") +)] +#[serde(deny_unknown_fields)] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] +pub enum TransactionEntryPoint { + /// The default entry point name. + #[cfg_attr(any(feature = "testing", test), default)] + Call, + + /// A non-native, arbitrary entry point. + Custom(String), + + /// The `transfer` native entry point, used to transfer `Motes` from a source purse to a target + /// purse. + /// + /// Requires the following runtime args: + /// * "source": `URef` + /// * "target": `URef` + /// * "amount": `U512` + /// + /// The following optional runtime args can also be provided: + /// * "to": `Option` + /// * "id": `Option` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `transfer` native entry point, used to transfer `Motes` from a \ + source purse to a target purse." + ) + )] + Transfer, + + /// The `burn` native entry point, used to burn `Motes` from a source purse. + /// + /// Requires the following runtime args: + /// * "source": `URef` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `burn` native entry point, used to burn `Motes` from a \ + source purse." + ) + )] + Burn, + + /// The `add_bid` native entry point, used to create or top off a bid purse. + /// + /// Requires the following runtime args: + /// * "public_key": `PublicKey` + /// * "delegation_rate": `u8` + /// * "amount": `U512` + /// * "minimum_delegation_amount": `Option` + /// * "maximum_delegation_amount": `Option` + /// * "reserved_slots": `Option` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `add_bid` native entry point, used to create or top off a bid purse." + ) + )] + AddBid, + + /// The `withdraw_bid` native entry point, used to decrease a stake. + /// + /// Requires the following runtime args: + /// * "public_key": `PublicKey` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars(description = "The `withdraw_bid` native entry point, used to decrease a stake.") + )] + WithdrawBid, + + /// The `delegate` native entry point, used to add a new delegator or increase an existing + /// delegator's stake. + /// + /// Requires the following runtime args: + /// * "delegator": `PublicKey` + /// * "validator": `PublicKey` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `delegate` native entry point, used to add a new delegator or \ + increase an existing delegator's stake." + ) + )] + Delegate, + + /// The `undelegate` native entry point, used to reduce a delegator's stake or remove the + /// delegator if the remaining stake is 0. + /// + /// Requires the following runtime args: + /// * "delegator": `PublicKey` + /// * "validator": `PublicKey` + /// * "amount": `U512` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `undelegate` native entry point, used to reduce a delegator's \ + stake or remove the delegator if the remaining stake is 0." + ) + )] + Undelegate, + + /// The `redelegate` native entry point, used to reduce a delegator's stake or remove the + /// delegator if the remaining stake is 0, and after the unbonding delay, automatically + /// delegate to a new validator. + /// + /// Requires the following runtime args: + /// * "delegator": `PublicKey` + /// * "validator": `PublicKey` + /// * "amount": `U512` + /// * "new_validator": `PublicKey` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `redelegate` native entry point, used to reduce a delegator's stake \ + or remove the delegator if the remaining stake is 0, and after the unbonding delay, \ + automatically delegate to a new validator." + ) + )] + Redelegate, + + /// The `activate bid` native entry point, used to reactivate an inactive bid. + /// + /// Requires the following runtime args: + /// * "validator_public_key": `PublicKey` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `activate_bid` native entry point, used to used to reactivate an \ + inactive bid." + ) + )] + ActivateBid, + + /// The `change_bid_public_key` native entry point, used to change a bid's public key. + /// + /// Requires the following runtime args: + /// * "public_key": `PublicKey` + /// * "new_public_key": `PublicKey` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `change_bid_public_key` native entry point, used to change a bid's public key." + ) + )] + ChangeBidPublicKey, + + /// The `add_reservations` native entry point, used to add delegators to validator's reserve + /// list. + /// + /// Requires the following runtime args: + /// * "reservations": `Vec` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `add_reservations` native entry point, used to add delegator to \ + validator's reserve list" + ) + )] + AddReservations, + + /// The `cancel_reservations` native entry point, used to remove delegators from validator's + /// reserve list. + /// + /// Requires the following runtime args: + /// * "validator": `PublicKey` + /// * "delegators": `Vec` + #[cfg_attr( + feature = "json-schema", + schemars( + description = "The `cancel_reservations` native entry point, used to remove delegator \ + from validator's reserve list" + ) + )] + CancelReservations, +} + +impl TransactionEntryPoint { + /// Returns a random `TransactionEntryPoint`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..13) { + 0 => TransactionEntryPoint::Custom(rng.random_string(1..21)), + 1 => TransactionEntryPoint::Transfer, + 2 => TransactionEntryPoint::AddBid, + 3 => TransactionEntryPoint::WithdrawBid, + 4 => TransactionEntryPoint::Delegate, + 5 => TransactionEntryPoint::Undelegate, + 6 => TransactionEntryPoint::Redelegate, + 7 => TransactionEntryPoint::ActivateBid, + 8 => TransactionEntryPoint::ChangeBidPublicKey, + 9 => TransactionEntryPoint::Call, + 10 => TransactionEntryPoint::AddReservations, + 11 => TransactionEntryPoint::CancelReservations, + 12 => TransactionEntryPoint::Burn, + _ => unreachable!(), + } + } + + fn serialized_field_lengths(&self) -> Vec { + match self { + TransactionEntryPoint::Custom(custom) => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + custom.serialized_length(), + ] + } + TransactionEntryPoint::Call + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + vec![crate::bytesrepr::U8_SERIALIZED_LENGTH] + } + } + } + + /// Returns custom entry point name if relevant. + pub fn custom_entry_point(&self) -> Option { + if let TransactionEntryPoint::Custom(entry_point) = self { + Some(entry_point.clone()) + } else { + None + } + } +} + +const TAG_FIELD_INDEX: u16 = 0; + +const CALL_VARIANT_TAG: u8 = 0; + +const CUSTOM_VARIANT_TAG: u8 = 1; +const CUSTOM_CUSTOM_INDEX: u16 = 1; + +const TRANSFER_VARIANT_TAG: u8 = 2; +const ADD_BID_VARIANT_TAG: u8 = 3; +const WITHDRAW_BID_VARIANT_TAG: u8 = 4; +const DELEGATE_VARIANT_TAG: u8 = 5; +const UNDELEGATE_VARIANT_TAG: u8 = 6; +const REDELEGATE_VARIANT_TAG: u8 = 7; +const ACTIVATE_BID_VARIANT_TAG: u8 = 8; +const CHANGE_BID_PUBLIC_KEY_VARIANT_TAG: u8 = 9; +const ADD_RESERVATIONS_VARIANT_TAG: u8 = 10; +const CANCEL_RESERVATIONS_VARIANT_TAG: u8 = 11; +const BURN_VARIANT_TAG: u8 = 12; + +impl ToBytes for TransactionEntryPoint { + fn to_bytes(&self) -> Result, Error> { + match self { + TransactionEntryPoint::Call => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &CALL_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::Custom(custom) => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &CUSTOM_VARIANT_TAG)? + .add_field(CUSTOM_CUSTOM_INDEX, &custom)? + .binary_payload_bytes() + } + TransactionEntryPoint::Transfer => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &TRANSFER_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::Burn => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &BURN_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::AddBid => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &ADD_BID_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::WithdrawBid => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &WITHDRAW_BID_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::Delegate => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &DELEGATE_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::Undelegate => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &UNDELEGATE_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::Redelegate => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &REDELEGATE_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::ActivateBid => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &ACTIVATE_BID_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::ChangeBidPublicKey => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &CHANGE_BID_PUBLIC_KEY_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::AddReservations => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &ADD_RESERVATIONS_VARIANT_TAG)? + .binary_payload_bytes() + } + TransactionEntryPoint::CancelReservations => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &CANCEL_RESERVATIONS_VARIANT_TAG)? + .binary_payload_bytes() + } + } + } + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for TransactionEntryPoint { + fn from_bytes(bytes: &[u8]) -> Result<(TransactionEntryPoint, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(2u32, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(TAG_FIELD_INDEX)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + CALL_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Call) + } + CUSTOM_VARIANT_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(CUSTOM_CUSTOM_INDEX)?; + let (custom, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Custom(custom)) + } + TRANSFER_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Transfer) + } + BURN_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Burn) + } + ADD_BID_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::AddBid) + } + WITHDRAW_BID_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::WithdrawBid) + } + DELEGATE_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Delegate) + } + UNDELEGATE_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Undelegate) + } + REDELEGATE_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::Redelegate) + } + ACTIVATE_BID_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::ActivateBid) + } + CHANGE_BID_PUBLIC_KEY_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::ChangeBidPublicKey) + } + ADD_RESERVATIONS_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::AddReservations) + } + CANCEL_RESERVATIONS_VARIANT_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionEntryPoint::CancelReservations) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl Display for TransactionEntryPoint { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionEntryPoint::Call => write!(formatter, "call"), + TransactionEntryPoint::Custom(entry_point) => { + write!(formatter, "custom({entry_point})") + } + TransactionEntryPoint::Transfer => write!(formatter, "transfer"), + TransactionEntryPoint::Burn => write!(formatter, "burn"), + TransactionEntryPoint::AddBid => write!(formatter, "add_bid"), + TransactionEntryPoint::WithdrawBid => write!(formatter, "withdraw_bid"), + TransactionEntryPoint::Delegate => write!(formatter, "delegate"), + TransactionEntryPoint::Undelegate => write!(formatter, "undelegate"), + TransactionEntryPoint::Redelegate => write!(formatter, "redelegate"), + TransactionEntryPoint::ActivateBid => write!(formatter, "activate_bid"), + TransactionEntryPoint::ChangeBidPublicKey => write!(formatter, "change_bid_public_key"), + TransactionEntryPoint::AddReservations => write!(formatter, "add_reservations"), + TransactionEntryPoint::CancelReservations => write!(formatter, "cancel_reservations"), + } + } +} + +impl From<&str> for TransactionEntryPoint { + fn from(value: &str) -> Self { + if value.to_lowercase() == mint::METHOD_TRANSFER { + return TransactionEntryPoint::Transfer; + } + if value.to_lowercase() == mint::METHOD_BURN { + return TransactionEntryPoint::Burn; + } + if value.to_lowercase() == auction::METHOD_ACTIVATE_BID { + return TransactionEntryPoint::ActivateBid; + } + if value.to_lowercase() == auction::METHOD_ADD_BID { + return TransactionEntryPoint::AddBid; + } + if value.to_lowercase() == auction::METHOD_WITHDRAW_BID { + return TransactionEntryPoint::WithdrawBid; + } + if value.to_lowercase() == auction::METHOD_DELEGATE { + return TransactionEntryPoint::Delegate; + } + if value.to_lowercase() == auction::METHOD_UNDELEGATE { + return TransactionEntryPoint::Undelegate; + } + if value.to_lowercase() == auction::METHOD_REDELEGATE { + return TransactionEntryPoint::Redelegate; + } + if value.to_lowercase() == auction::METHOD_CHANGE_BID_PUBLIC_KEY { + return TransactionEntryPoint::ChangeBidPublicKey; + } + + TransactionEntryPoint::Custom(value.to_string()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bytesrepr::test_serialization_roundtrip, gens::transaction_entry_point_arb}; + use proptest::prelude::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + test_serialization_roundtrip(&TransactionEntryPoint::random(rng)); + } + } + + proptest! { + #[test] + fn bytesrepr_roundtrip_from_arb(entry_point in transaction_entry_point_arb()) { + test_serialization_roundtrip(&entry_point); + } + } +} diff --git a/types/src/transaction/transaction_hash.rs b/types/src/transaction/transaction_hash.rs new file mode 100644 index 0000000000..948be894da --- /dev/null +++ b/types/src/transaction/transaction_hash.rs @@ -0,0 +1,178 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use super::{DeployHash, TransactionV1Hash}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + Digest, +}; + +const DEPLOY_TAG: u8 = 0; +const V1_TAG: u8 = 1; +const TAG_LENGTH: u8 = 1; + +/// A versioned wrapper for a transaction hash or deploy hash. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub enum TransactionHash { + /// A deploy hash. + Deploy(DeployHash), + /// A version 1 transaction hash. + #[serde(rename = "Version1")] + V1(TransactionV1Hash), +} + +impl TransactionHash { + /// The number of bytes in a `DeployHash` digest. + pub const LENGTH: usize = TAG_LENGTH as usize + Digest::LENGTH; + /// Digest representation of hash. + pub fn digest(&self) -> Digest { + match self { + TransactionHash::Deploy(deploy_hash) => *deploy_hash.inner(), + TransactionHash::V1(transaction_hash) => *transaction_hash.inner(), + } + } + + /// Hexadecimal representation of the hash. + pub fn to_hex_string(&self) -> String { + base16::encode_lower(&self.digest()) + } + + /// Returns a random `TransactionHash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..2) { + 0 => TransactionHash::from(DeployHash::random(rng)), + 1 => TransactionHash::from(TransactionV1Hash::random(rng)), + _ => panic!(), + } + } + + /// Returns a new `TransactionHash` directly initialized with the provided bytes; no hashing + /// is done. + pub const fn from_raw(raw_digest: [u8; TransactionV1Hash::LENGTH]) -> Self { + TransactionHash::V1(TransactionV1Hash::from_raw(raw_digest)) + } +} + +impl From for TransactionHash { + fn from(hash: DeployHash) -> Self { + Self::Deploy(hash) + } +} + +impl From<&DeployHash> for TransactionHash { + fn from(hash: &DeployHash) -> Self { + Self::from(*hash) + } +} + +impl From for TransactionHash { + fn from(hash: TransactionV1Hash) -> Self { + Self::V1(hash) + } +} + +impl From<&TransactionV1Hash> for TransactionHash { + fn from(hash: &TransactionV1Hash) -> Self { + Self::from(*hash) + } +} + +impl Default for TransactionHash { + fn default() -> Self { + TransactionHash::V1(TransactionV1Hash::default()) + } +} + +impl Display for TransactionHash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionHash::Deploy(hash) => Display::fmt(hash, formatter), + TransactionHash::V1(hash) => Display::fmt(hash, formatter), + } + } +} + +impl AsRef<[u8]> for TransactionHash { + fn as_ref(&self) -> &[u8] { + match self { + TransactionHash::Deploy(hash) => hash.as_ref(), + TransactionHash::V1(hash) => hash.as_ref(), + } + } +} + +impl ToBytes for TransactionHash { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + U8_SERIALIZED_LENGTH + + match self { + TransactionHash::Deploy(hash) => hash.serialized_length(), + TransactionHash::V1(hash) => hash.serialized_length(), + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionHash::Deploy(hash) => { + DEPLOY_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + TransactionHash::V1(hash) => { + V1_TAG.write_bytes(writer)?; + hash.write_bytes(writer) + } + } + } +} + +impl FromBytes for TransactionHash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + DEPLOY_TAG => { + let (hash, remainder) = DeployHash::from_bytes(remainder)?; + Ok((TransactionHash::Deploy(hash), remainder)) + } + V1_TAG => { + let (hash, remainder) = TransactionV1Hash::from_bytes(remainder)?; + Ok((TransactionHash::V1(hash), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let hash = TransactionHash::from(DeployHash::random(rng)); + bytesrepr::test_serialization_roundtrip(&hash); + + let hash = TransactionHash::from(TransactionV1Hash::random(rng)); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/transaction/transaction_id.rs b/types/src/transaction/transaction_id.rs new file mode 100644 index 0000000000..c4964511a4 --- /dev/null +++ b/types/src/transaction/transaction_id.rs @@ -0,0 +1,100 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::Transaction; +use super::{ApprovalsHash, TransactionHash}; +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; + +/// The unique identifier of a [`Transaction`], comprising its [`TransactionHash`] and +/// [`ApprovalsHash`]. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[serde(deny_unknown_fields)] +pub struct TransactionId { + /// The transaction hash. + transaction_hash: TransactionHash, + /// The approvals hash. + approvals_hash: ApprovalsHash, +} + +impl TransactionId { + /// Returns a new `TransactionId`. + pub fn new(transaction_hash: TransactionHash, approvals_hash: ApprovalsHash) -> Self { + TransactionId { + transaction_hash, + approvals_hash, + } + } + + /// Returns the transaction hash. + pub fn transaction_hash(&self) -> TransactionHash { + self.transaction_hash + } + + /// Returns the approvals hash. + pub fn approvals_hash(&self) -> ApprovalsHash { + self.approvals_hash + } + + /// Returns a random `TransactionId`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + TransactionId::new(TransactionHash::random(rng), ApprovalsHash::random(rng)) + } +} + +impl Display for TransactionId { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction-id({}, {})", + self.transaction_hash(), + self.approvals_hash() + ) + } +} + +impl ToBytes for TransactionId { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.transaction_hash.write_bytes(writer)?; + self.approvals_hash.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.transaction_hash.serialized_length() + self.approvals_hash.serialized_length() + } +} + +impl FromBytes for TransactionId { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transaction_hash, rem) = TransactionHash::from_bytes(bytes)?; + let (approvals_hash, rem) = ApprovalsHash::from_bytes(rem)?; + let transaction_id = TransactionId::new(transaction_hash, approvals_hash); + Ok((transaction_id, rem)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let id = TransactionId::random(rng); + bytesrepr::test_serialization_roundtrip(&id); + } +} diff --git a/types/src/transaction/transaction_invocation_target.rs b/types/src/transaction/transaction_invocation_target.rs new file mode 100644 index 0000000000..1ba80ce131 --- /dev/null +++ b/types/src/transaction/transaction_invocation_target.rs @@ -0,0 +1,678 @@ +use alloc::{string::String, vec::Vec}; +use core::fmt::{self, Debug, Display, Formatter}; + +use super::{serialization::CalltableSerializationEnvelope, AddressableEntityIdentifier}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{ + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + contracts::ProtocolVersionMajor, + serde_helpers, + transaction::serialization::CalltableSerializationEnvelopeBuilder, + AddressableEntityHash, EntityVersion, HashAddr, PackageAddr, PackageHash, PackageIdentifier, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// The identifier of a [`crate::TransactionTarget::Stored`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Identifier of a `Stored` transaction target.") +)] +#[serde(deny_unknown_fields)] +pub enum TransactionInvocationTarget { + /// The address identifying the invocable entity. + #[serde(with = "serde_helpers::raw_32_byte_array")] + #[cfg_attr( + feature = "json-schema", + schemars( + with = "String", + description = "Hex-encoded entity address identifying the invocable entity." + ) + )] + ByHash(HashAddr), /* currently needs to be of contract tag + * variant */ + /// The alias identifying the invocable entity. + ByName(String), + /// The address and optional version identifying the package. + ByPackageHash { + /// The package address. + #[serde(with = "serde_helpers::raw_32_byte_array")] + #[cfg_attr( + feature = "json-schema", + schemars(with = "String", description = "Hex-encoded address of the package.") + )] + addr: PackageAddr, + /// The package version. + /// + /// If `None`, the latest enabled version is implied. + version: Option, + /// The major protocol version of the contract package. + /// + /// `None` implies latest major protocol version. + #[serde(skip_serializing_if = "Option::is_none")] + protocol_version_major: Option, + }, + /// The alias and optional version identifying the package. + ByPackageName { + /// The package name. + name: String, + /// The package version. + /// + /// If `None`, the latest enabled version is implied. + version: Option, + /// The major protocol version of the contract package. + /// + /// `None` implies latest major protocol version. + #[serde(skip_serializing_if = "Option::is_none")] + protocol_version_major: Option, + }, +} + +impl TransactionInvocationTarget { + /// Returns a new `TransactionInvocationTarget::InvocableEntity`. + pub fn new_invocable_entity(hash: AddressableEntityHash) -> Self { + TransactionInvocationTarget::ByHash(hash.value()) + } + + /// Returns a new `TransactionInvocationTarget::InvocableEntityAlias`. + pub fn new_invocable_entity_alias(alias: String) -> Self { + TransactionInvocationTarget::ByName(alias) + } + + /// Returns a new `TransactionInvocationTarget::Package`. + #[deprecated(since = "5.0.1", note = "please use `new_package_with_major` instead")] + pub fn new_package(hash: PackageHash, version: Option) -> Self { + TransactionInvocationTarget::ByPackageHash { + addr: hash.value(), + version, + protocol_version_major: None, + } + } + + /// Returns a new `TransactionInvocationTarget::Package`. + pub fn new_package_with_major( + hash: PackageHash, + version: Option, + protocol_version_major: Option, + ) -> Self { + TransactionInvocationTarget::ByPackageHash { + addr: hash.value(), + version, + protocol_version_major, + } + } + + /// Returns a new `TransactionInvocationTarget::PackageAlias`. + #[deprecated( + since = "5.0.1", + note = "please use `new_package_alias_with_major` instead" + )] + pub fn new_package_alias(alias: String, version: Option) -> Self { + TransactionInvocationTarget::ByPackageName { + name: alias, + version, + protocol_version_major: None, + } + } + + /// Returns a new `TransactionInvocationTarget::PackageAlias`. + pub fn new_package_alias_with_major( + alias: String, + version: Option, + protocol_version_major: Option, + ) -> Self { + TransactionInvocationTarget::ByPackageName { + name: alias, + version, + protocol_version_major, + } + } + + #[cfg(test)] + pub fn new_package_alias_with_major_and_entity( + hash: PackageHash, + version: Option, + protocol_version_major: Option, + ) -> Self { + TransactionInvocationTarget::ByPackageHash { + addr: hash.value(), + version, + protocol_version_major, + } + } + + /// Returns the contract `hash_addr`, if any. + pub fn contract_by_hash(&self) -> Option { + if let TransactionInvocationTarget::ByHash(hash_addr) = self { + Some(*hash_addr) + } else { + None + } + } + + /// Returns the identifier of the addressable entity, if present. + pub fn addressable_entity_identifier(&self) -> Option { + match self { + TransactionInvocationTarget::ByHash(addr) => Some(AddressableEntityIdentifier::Hash( + AddressableEntityHash::new(*addr), + )), + TransactionInvocationTarget::ByName(alias) => { + Some(AddressableEntityIdentifier::Name(alias.clone())) + } + TransactionInvocationTarget::ByPackageHash { .. } + | TransactionInvocationTarget::ByPackageName { .. } => None, + } + } + + /// Returns the identifier of the contract package, if present. + pub fn package_identifier(&self) -> Option { + match self { + TransactionInvocationTarget::ByHash(_) | TransactionInvocationTarget::ByName(_) => None, + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => Some(PackageIdentifier::HashWithMajorVersion { + package_hash: PackageHash::new(*addr), + version: *version, + protocol_version_major: *protocol_version_major, + }), + TransactionInvocationTarget::ByPackageName { + name: alias, + version, + protocol_version_major, + } => Some(PackageIdentifier::NameWithMajorVersion { + name: alias.clone(), + version: *version, + protocol_version_major: *protocol_version_major, + }), + } + } + + fn serialized_field_lengths(&self) -> Vec { + match self { + TransactionInvocationTarget::ByHash(hash) => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + hash.serialized_length(), + ] + } + TransactionInvocationTarget::ByName(name) => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + name.serialized_length(), + ] + } + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => { + let mut field_sizes = vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + addr.serialized_length(), + version.serialized_length(), + ]; + if let Some(protocol_version_major) = protocol_version_major { + //When we serialize protocol_version_major we put the actual value, + // if we want to denote `None` we don't put an entry in the calltable. + field_sizes.push(protocol_version_major.serialized_length()); + } + field_sizes + } + TransactionInvocationTarget::ByPackageName { + name, + version, + protocol_version_major, + } => { + let mut field_sizes = vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + name.serialized_length(), + version.serialized_length(), + ]; + if let Some(protocol_version_major) = protocol_version_major { + //When we serialize protocol_version_major we put the actual value, + // if we want to denote `None` we don't put an entry in the calltable. + field_sizes.push(protocol_version_major.serialized_length()); + } + field_sizes + } + } + } + + /// Returns a random `TransactionInvocationTarget`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..4) { + 0 => TransactionInvocationTarget::ByHash(rng.gen()), + 1 => TransactionInvocationTarget::ByName(rng.random_string(1..21)), + 2 => TransactionInvocationTarget::ByPackageHash { + addr: rng.gen(), + version: rng.gen(), + protocol_version_major: rng.gen(), + }, + 3 => TransactionInvocationTarget::ByPackageName { + name: rng.random_string(1..21), + version: rng.gen(), + protocol_version_major: rng.gen(), + }, + _ => unreachable!(), + } + } +} + +const TAG_FIELD_INDEX: u16 = 0; + +const BY_HASH_VARIANT: u8 = 0; +const BY_HASH_HASH_INDEX: u16 = 1; + +const BY_NAME_VARIANT: u8 = 1; +const BY_NAME_NAME_INDEX: u16 = 1; + +const BY_PACKAGE_HASH_VARIANT: u8 = 2; +const BY_PACKAGE_HASH_ADDR_INDEX: u16 = 1; +const BY_PACKAGE_HASH_VERSION_INDEX: u16 = 2; +const BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX: u16 = 3; + +const BY_PACKAGE_NAME_VARIANT: u8 = 3; +const BY_PACKAGE_NAME_NAME_INDEX: u16 = 1; +const BY_PACKAGE_NAME_VERSION_INDEX: u16 = 2; +const BY_PACKAGE_NAME_PROTOCOL_VERSION_MAJOR_INDEX: u16 = 3; + +impl ToBytes for TransactionInvocationTarget { + fn to_bytes(&self) -> Result, Error> { + match self { + TransactionInvocationTarget::ByHash(hash) => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &BY_HASH_VARIANT)? + .add_field(BY_HASH_HASH_INDEX, &hash)? + .binary_payload_bytes() + } + TransactionInvocationTarget::ByName(name) => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &BY_NAME_VARIANT)? + .add_field(BY_NAME_NAME_INDEX, &name)? + .binary_payload_bytes() + } + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => { + let mut builder = + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_HASH_VARIANT)? + .add_field(BY_PACKAGE_HASH_ADDR_INDEX, &addr)? + .add_field(BY_PACKAGE_HASH_VERSION_INDEX, &version)?; + if let Some(protocol_version_major) = protocol_version_major { + //We do this to support transactions that were created before the + // `protocol_version_major` fix. The pre-fix transactions + // will not have a BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX + // entry and we need to maintain ability to deserialize them. + builder = builder.add_field( + BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX, + protocol_version_major, + )?; + } + builder.binary_payload_bytes() + } + TransactionInvocationTarget::ByPackageName { + name, + version, + protocol_version_major, + } => { + let mut builder = + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_NAME_VARIANT)? + .add_field(BY_PACKAGE_NAME_NAME_INDEX, &name)? + .add_field(BY_PACKAGE_NAME_VERSION_INDEX, &version)?; + if let Some(protocol_version_major) = protocol_version_major { + //We do this to support transactions that were created before the + // `protocol_version_major` fix. The pre-fix transactions + // will not have a BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX + // entry and we need to maintain ability to deserialize them. + builder = builder.add_field( + BY_PACKAGE_NAME_PROTOCOL_VERSION_MAJOR_INDEX, + protocol_version_major, + )?; + } + builder.binary_payload_bytes() + } + } + } + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for TransactionInvocationTarget { + fn from_bytes(bytes: &[u8]) -> Result<(TransactionInvocationTarget, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(4, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(TAG_FIELD_INDEX)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + BY_HASH_VARIANT => { + let window = window.ok_or(Formatting)?; + window.verify_index(BY_HASH_HASH_INDEX)?; + let (hash, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionInvocationTarget::ByHash(hash)) + } + BY_NAME_VARIANT => { + let window = window.ok_or(Formatting)?; + window.verify_index(BY_NAME_NAME_INDEX)?; + let (name, window) = window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionInvocationTarget::ByName(name)) + } + BY_PACKAGE_HASH_VARIANT => { + let window = window.ok_or(Formatting)?; + window.verify_index(BY_PACKAGE_HASH_ADDR_INDEX)?; + let (addr, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(BY_PACKAGE_HASH_VERSION_INDEX)?; + let (version, window) = + window.deserialize_and_maybe_next::>()?; + let protocol_version_major = if let Some(window) = window { + window.verify_index(BY_PACKAGE_HASH_PROTOCOL_VERSION_MAJOR_INDEX)?; + let (protocol_version_major, window) = + window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Some(protocol_version_major) + } else { + if window.is_some() { + return Err(Formatting); + } + None + }; + + Ok(TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + }) + } + BY_PACKAGE_NAME_VARIANT => { + let window = window.ok_or(Formatting)?; + window.verify_index(BY_PACKAGE_NAME_NAME_INDEX)?; + let (name, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(BY_PACKAGE_NAME_VERSION_INDEX)?; + let (version, window) = + window.deserialize_and_maybe_next::>()?; + let protocol_version_major = if let Some(window) = window { + window.verify_index(BY_PACKAGE_NAME_PROTOCOL_VERSION_MAJOR_INDEX)?; + let (protocol_version_major, window) = + window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Some(protocol_version_major) + } else { + if window.is_some() { + return Err(Formatting); + } + None + }; + Ok(TransactionInvocationTarget::ByPackageName { + name, + version, + protocol_version_major, + }) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl Display for TransactionInvocationTarget { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionInvocationTarget::ByHash(addr) => { + write!(formatter, "invocable-entity({:10})", HexFmt(addr)) + } + TransactionInvocationTarget::ByName(alias) => { + write!(formatter, "invocable-entity({})", alias) + } + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => { + write!( + formatter, + "package({:10}, version {:?}, protocol_version_major {:?})", + HexFmt(addr), + version, + protocol_version_major + ) + } + TransactionInvocationTarget::ByPackageName { + name: alias, + version, + protocol_version_major, + } => { + write!( + formatter, + "package({}, version {:?}, protocol_version_major {:?})", + alias, version, protocol_version_major + ) + } + } + } +} + +impl Debug for TransactionInvocationTarget { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionInvocationTarget::ByHash(addr) => formatter + .debug_tuple("InvocableEntity") + .field(&HexFmt(addr)) + .finish(), + TransactionInvocationTarget::ByName(alias) => formatter + .debug_tuple("InvocableEntityAlias") + .field(alias) + .finish(), + TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major, + } => formatter + .debug_struct("Package") + .field("addr", &HexFmt(addr)) + .field("version", version) + .field("protocol_version_major", protocol_version_major) + .finish(), + TransactionInvocationTarget::ByPackageName { + name: alias, + version, + protocol_version_major, + } => formatter + .debug_struct("PackageAlias") + .field("alias", alias) + .field("version", version) + .field("protocol_version_major", protocol_version_major) + .finish(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bytesrepr, gens::transaction_invocation_target_arb}; + use proptest::prelude::*; + + #[test] + fn json_should_not_produce_version_key_if_none() { + let alias = TransactionInvocationTarget::new_package_alias_with_major( + "abc".to_owned(), + Some(111), + None, + ); + assert!(!serde_json::to_string(&alias) + .unwrap() + .contains("\"protocol_version_major\"")); + + let alias = TransactionInvocationTarget::new_package_alias_with_major( + "abc".to_owned(), + Some(111), + Some(5), + ); + assert!(serde_json::to_string(&alias) + .unwrap() + .contains("\"protocol_version_major\":5")); + + let package = TransactionInvocationTarget::new_package_with_major( + PackageHash::from([1; 32]), + Some(222), + None, + ); + assert!(!serde_json::to_string(&package) + .unwrap() + .contains("\"protocol_version_major\"")); + + let package = TransactionInvocationTarget::new_package_with_major( + PackageHash::from([1; 32]), + Some(222), + Some(5), + ); + assert!(serde_json::to_string(&package) + .unwrap() + .contains("\"protocol_version_major\":5")); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionInvocationTarget::random(rng)); + } + } + + #[test] + fn by_package_hash_variant_without_version_key_should_serialize_exactly_as_before_the_version_key_change( + ) { + let addr = [1; 32]; + let version = Some(1200); + let field_sizes = vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + addr.serialized_length(), + version.serialized_length(), + ]; + let builder = CalltableSerializationEnvelopeBuilder::new(field_sizes) + .unwrap() + .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_HASH_VARIANT) + .unwrap() + .add_field(BY_PACKAGE_HASH_ADDR_INDEX, &addr) + .unwrap() + .add_field(BY_PACKAGE_HASH_VERSION_INDEX, &version) + .unwrap(); + let bytes = builder.binary_payload_bytes().unwrap(); + let expected = TransactionInvocationTarget::ByPackageHash { + addr, + version, + protocol_version_major: None, + }; + let expected_bytes = expected.to_bytes().unwrap(); + assert_eq!(bytes, expected_bytes); //We want the "legacy" binary representation and current representation without + // protocol_version_major equal + + let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap(); + assert_eq!(expected, got); + assert!(remainder.is_empty()); + } + + #[test] + fn by_package_name_variant_without_version_key_should_serialize_exactly_as_before_the_version_key_change( + ) { + let name = "some_name".to_string(); + let version = Some(1200); + let field_sizes = vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + name.serialized_length(), + version.serialized_length(), + ]; + let builder = CalltableSerializationEnvelopeBuilder::new(field_sizes) + .unwrap() + .add_field(TAG_FIELD_INDEX, &BY_PACKAGE_NAME_VARIANT) + .unwrap() + .add_field(BY_PACKAGE_NAME_NAME_INDEX, &name) + .unwrap() + .add_field(BY_PACKAGE_NAME_VERSION_INDEX, &version) + .unwrap(); + let bytes = builder.binary_payload_bytes().unwrap(); + let expected = TransactionInvocationTarget::ByPackageName { + name, + version, + protocol_version_major: None, + }; + let expected_bytes = expected.to_bytes().unwrap(); + assert_eq!(bytes, expected_bytes); //We want the "legacy" binary representation and current representation without + // protocol_version_major equal + + let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap(); + assert_eq!(expected, got); + assert!(remainder.is_empty()); + } + + #[test] + fn by_package_hash_variant_should_deserialize_bytes_that_have_both_version_and_key() { + let target = TransactionInvocationTarget::ByPackageHash { + addr: [1; 32], + version: Some(11), + protocol_version_major: Some(2), + }; + let bytes = target.to_bytes().unwrap(); + let (number_of_fields, _) = u32::from_bytes(&bytes).unwrap(); + assert_eq!(number_of_fields, 4); //We want the enum tag, addr, version (even if it's None) and protocol_version_major to + // have been serialized + let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap(); + assert_eq!(target, got); + assert!(remainder.is_empty()); + } + + #[test] + fn by_package_name_variant_should_deserialize_bytes_that_have_both_version_and_key() { + let target = TransactionInvocationTarget::ByPackageName { + name: "xyz".to_string(), + version: Some(11), + protocol_version_major: Some(3), + }; + let bytes = target.to_bytes().unwrap(); + let (number_of_fields, _) = u32::from_bytes(&bytes).unwrap(); + assert_eq!(number_of_fields, 4); //We want the enum tag, addr, version (even if it's None) and protocol_version_major to + // have been serialized + let (got, remainder) = TransactionInvocationTarget::from_bytes(&bytes).unwrap(); + assert_eq!(target, got); + assert!(remainder.is_empty()); + } + + proptest! { + #[test] + fn generative_bytesrepr_roundtrip(val in transaction_invocation_target_arb()) { + bytesrepr::test_serialization_roundtrip(&val); + } + } +} diff --git a/types/src/transaction/transaction_scheduling.rs b/types/src/transaction/transaction_scheduling.rs new file mode 100644 index 0000000000..30eda2a488 --- /dev/null +++ b/types/src/transaction/transaction_scheduling.rs @@ -0,0 +1,125 @@ +use super::serialization::CalltableSerializationEnvelope; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{ + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + transaction::serialization::CalltableSerializationEnvelopeBuilder, +}; +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +/// The scheduling mode of a [`crate::Transaction`]. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Scheduling mode of a Transaction.") +)] +pub enum TransactionScheduling { + /// No special scheduling applied. + Standard, +} + +impl TransactionScheduling { + fn serialized_field_lengths(&self) -> Vec { + match self { + TransactionScheduling::Standard => { + vec![crate::bytesrepr::U8_SERIALIZED_LENGTH] + } + } + } + + /// Returns a random `TransactionScheduling`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..1) { + 0 => TransactionScheduling::Standard, + _ => unreachable!(), + } + } +} + +const TAG_FIELD_INDEX: u16 = 0; + +const STANDARD_VARIANT: u8 = 0; + +impl ToBytes for TransactionScheduling { + fn to_bytes(&self) -> Result, Error> { + match self { + TransactionScheduling::Standard => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &STANDARD_VARIANT)? + .binary_payload_bytes() + } + } + } + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for TransactionScheduling { + fn from_bytes(bytes: &[u8]) -> Result<(TransactionScheduling, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(2, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(0)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + STANDARD_VARIANT => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionScheduling::Standard) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl Display for TransactionScheduling { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionScheduling::Standard => write!(formatter, "schedule(standard)"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bytesrepr, gens::transaction_scheduling_arb}; + use proptest::prelude::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + for _ in 0..10 { + bytesrepr::test_serialization_roundtrip(&TransactionScheduling::random(rng)); + } + } + + proptest! { + #[test] + fn generative_bytesrepr_roundtrip(val in transaction_scheduling_arb()) { + bytesrepr::test_serialization_roundtrip(&val); + } + } +} diff --git a/types/src/transaction/transaction_target.rs b/types/src/transaction/transaction_target.rs new file mode 100644 index 0000000000..ef22eb1dcb --- /dev/null +++ b/types/src/transaction/transaction_target.rs @@ -0,0 +1,449 @@ +use alloc::vec::Vec; +use core::fmt::{self, Debug, Display, Formatter}; + +use super::{serialization::CalltableSerializationEnvelope, TransactionInvocationTarget}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{ + Bytes, + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + transaction::serialization::CalltableSerializationEnvelopeBuilder, + ContractRuntimeTag, HashAddr, +}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +const VM_CASPER_V1_TAG: u8 = 0; +const VM_CASPER_V2_TAG: u8 = 1; +const TRANSFERRED_VALUE_INDEX: u16 = 1; +const SEED_VALUE_INDEX: u16 = 2; + +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Session params of a TransactionTarget.") +)] +#[serde(deny_unknown_fields)] +pub enum TransactionRuntimeParams { + VmCasperV1, + VmCasperV2 { + /// The amount of motes to transfer before code is executed. + /// + /// This is for protection against phishing attack where a malicious session code drains + /// the balance of the caller account. The amount stated here is the maximum amount + /// that can be transferred from the caller account to the session account. + transferred_value: u64, + /// The seed for the session code that is used for an installer. + seed: Option<[u8; 32]>, + }, +} + +impl TransactionRuntimeParams { + /// Returns the contract runtime tag. + pub fn contract_runtime_tag(&self) -> ContractRuntimeTag { + match self { + TransactionRuntimeParams::VmCasperV1 => ContractRuntimeTag::VmCasperV1, + TransactionRuntimeParams::VmCasperV2 { .. } => ContractRuntimeTag::VmCasperV2, + } + } + + pub fn seed(&self) -> Option<[u8; 32]> { + match self { + TransactionRuntimeParams::VmCasperV1 => None, + TransactionRuntimeParams::VmCasperV2 { seed, .. } => *seed, + } + } + + pub fn serialized_field_lengths(&self) -> Vec { + match self { + TransactionRuntimeParams::VmCasperV1 => vec![crate::bytesrepr::U8_SERIALIZED_LENGTH], + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + } => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + transferred_value.serialized_length(), + seed.serialized_length(), + ] + } + } + } +} + +impl ToBytes for TransactionRuntimeParams { + fn to_bytes(&self) -> Result, Error> { + match self { + TransactionRuntimeParams::VmCasperV1 => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &VM_CASPER_V1_TAG)? + .binary_payload_bytes() + } + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &VM_CASPER_V2_TAG)? + .add_field(TRANSFERRED_VALUE_INDEX, transferred_value)? + .add_field(SEED_VALUE_INDEX, seed)? + .binary_payload_bytes(), + } + } + + fn serialized_length(&self) -> usize { + match self { + TransactionRuntimeParams::VmCasperV1 => { + CalltableSerializationEnvelope::estimate_size(vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + ]) + } + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + } => CalltableSerializationEnvelope::estimate_size(vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + transferred_value.serialized_length(), + seed.serialized_length(), + ]), + } + } +} + +impl FromBytes for TransactionRuntimeParams { + fn from_bytes(bytes: &[u8]) -> Result<(TransactionRuntimeParams, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(3, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(TAG_FIELD_INDEX)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + VM_CASPER_V1_TAG => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionRuntimeParams::VmCasperV1) + } + VM_CASPER_V2_TAG => { + let window = window.ok_or(Formatting)?; + window.verify_index(TRANSFERRED_VALUE_INDEX)?; + let (transferred_value, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(SEED_VALUE_INDEX)?; + let (seed, window) = window.deserialize_and_maybe_next::>()?; + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + }) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl Display for TransactionRuntimeParams { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionRuntimeParams::VmCasperV1 => write!(formatter, "vm-casper-v1"), + TransactionRuntimeParams::VmCasperV2 { + transferred_value, + seed, + } => write!( + formatter, + "vm-casper-v2 {{ transferred_value: {}, seed: {:?} }}", + transferred_value, seed + ), + } + } +} + +/// The execution target of a [`crate::Transaction`]. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Execution target of a Transaction.") +)] +#[serde(deny_unknown_fields)] +pub enum TransactionTarget { + /// The execution target is a native operation (e.g. a transfer). + Native, + /// The execution target is a stored entity or package. + Stored { + /// The identifier of the stored execution target. + id: TransactionInvocationTarget, + /// The execution runtime to use. + runtime: TransactionRuntimeParams, + }, + /// The execution target is the included module bytes, i.e. compiled Wasm. + Session { + /// Flag determining if the Wasm is an install/upgrade. + is_install_upgrade: bool, + /// The compiled Wasm. + module_bytes: Bytes, + /// The execution runtime to use. + runtime: TransactionRuntimeParams, + }, +} + +impl TransactionTarget { + /// Returns a new `TransactionTarget::Native`. + pub fn new_native() -> Self { + TransactionTarget::Native + } + + fn serialized_field_lengths(&self) -> Vec { + match self { + TransactionTarget::Native => { + vec![crate::bytesrepr::U8_SERIALIZED_LENGTH] + } + TransactionTarget::Stored { id, runtime } => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + id.serialized_length(), + runtime.serialized_length(), + ] + } + TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + } => { + vec![ + crate::bytesrepr::U8_SERIALIZED_LENGTH, + is_install_upgrade.serialized_length(), + runtime.serialized_length(), + module_bytes.serialized_length(), + ] + } + } + } + + /// Returns a `hash_addr` for a targeted contract, if known. + pub fn contract_hash_addr(&self) -> Option { + if let Some(invocation_target) = self.invocation_target() { + invocation_target.contract_by_hash() + } else { + None + } + } + + /// Returns the invocation target, if any. + pub fn invocation_target(&self) -> Option { + match self { + TransactionTarget::Native | TransactionTarget::Session { .. } => None, + TransactionTarget::Stored { id, .. } => Some(id.clone()), + } + } + + /// Returns a random `TransactionTarget`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => TransactionTarget::Native, + 1 => TransactionTarget::Stored { + id: TransactionInvocationTarget::random(rng), + runtime: TransactionRuntimeParams::VmCasperV1, + }, + 2 => { + let mut buffer = vec![0u8; rng.gen_range(0..100)]; + rng.fill_bytes(buffer.as_mut()); + let is_install_upgrade = rng.gen(); + TransactionTarget::Session { + is_install_upgrade, + module_bytes: Bytes::from(buffer), + runtime: TransactionRuntimeParams::VmCasperV1, + } + } + _ => unreachable!(), + } + } + + /// Returns `true` if the transaction target is [`Session`]. + /// + /// [`Session`]: TransactionTarget::Session + #[must_use] + pub fn is_session(&self) -> bool { + matches!(self, Self::Session { .. }) + } +} + +const TAG_FIELD_INDEX: u16 = 0; + +const NATIVE_VARIANT: u8 = 0; + +const STORED_VARIANT: u8 = 1; +const STORED_ID_INDEX: u16 = 1; +const STORED_RUNTIME_INDEX: u16 = 2; + +const SESSION_VARIANT: u8 = 2; +const SESSION_IS_INSTALL_INDEX: u16 = 1; +const SESSION_RUNTIME_INDEX: u16 = 2; +const SESSION_MODULE_BYTES_INDEX: u16 = 3; + +impl ToBytes for TransactionTarget { + fn to_bytes(&self) -> Result, Error> { + match self { + TransactionTarget::Native => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &NATIVE_VARIANT)? + .binary_payload_bytes() + } + TransactionTarget::Stored { id, runtime } => { + CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &STORED_VARIANT)? + .add_field(STORED_ID_INDEX, &id)? + .add_field(STORED_RUNTIME_INDEX, &runtime)? + .binary_payload_bytes() + } + TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + } => CalltableSerializationEnvelopeBuilder::new(self.serialized_field_lengths())? + .add_field(TAG_FIELD_INDEX, &SESSION_VARIANT)? + .add_field(SESSION_IS_INSTALL_INDEX, &is_install_upgrade)? + .add_field(SESSION_RUNTIME_INDEX, &runtime)? + .add_field(SESSION_MODULE_BYTES_INDEX, &module_bytes)? + .binary_payload_bytes(), + } + } + + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for TransactionTarget { + fn from_bytes(bytes: &[u8]) -> Result<(TransactionTarget, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(6, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + window.verify_index(TAG_FIELD_INDEX)?; + let (tag, window) = window.deserialize_and_maybe_next::()?; + let to_ret = match tag { + NATIVE_VARIANT => { + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionTarget::Native) + } + STORED_VARIANT => { + let window = window.ok_or(Formatting)?; + window.verify_index(STORED_ID_INDEX)?; + let (id, window) = + window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(STORED_RUNTIME_INDEX)?; + let (runtime, window) = + window.deserialize_and_maybe_next::()?; + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionTarget::Stored { id, runtime }) + } + SESSION_VARIANT => { + let window = window.ok_or(Formatting)?; + window.verify_index(SESSION_IS_INSTALL_INDEX)?; + let (is_install_upgrade, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(SESSION_RUNTIME_INDEX)?; + let (runtime, window) = + window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(SESSION_MODULE_BYTES_INDEX)?; + let (module_bytes, window) = window.deserialize_and_maybe_next::()?; + + if window.is_some() { + return Err(Formatting); + } + Ok(TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + }) + } + _ => Err(Formatting), + }; + to_ret.map(|endpoint| (endpoint, remainder)) + } +} + +impl Display for TransactionTarget { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransactionTarget::Native => write!(formatter, "native"), + TransactionTarget::Stored { id, runtime } => { + write!(formatter, "stored({}, {})", id, runtime,) + } + TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + } => write!( + formatter, + "session({} module bytes, runtime: {}, is_install_upgrade: {})", + module_bytes.len(), + runtime, + is_install_upgrade, + ), + } + } +} + +impl Debug for TransactionTarget { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + TransactionTarget::Native => formatter.debug_struct("Native").finish(), + TransactionTarget::Stored { id, runtime } => formatter + .debug_struct("Stored") + .field("id", id) + .field("runtime", runtime) + .finish(), + TransactionTarget::Session { + is_install_upgrade, + module_bytes, + runtime, + } => { + struct BytesLen(usize); + impl Debug for BytesLen { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "{} bytes", self.0) + } + } + + formatter + .debug_struct("Session") + .field("module_bytes", &BytesLen(module_bytes.len())) + .field("is_install_upgrade", is_install_upgrade) + .field("runtime", runtime) + .finish() + } + } + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, gens::transaction_target_arb}; + use proptest::prelude::*; + + proptest! { + #[test] + fn generative_bytesrepr_roundtrip(val in transaction_target_arb()) { + bytesrepr::test_serialization_roundtrip(&val); + } + } +} diff --git a/types/src/transaction/transaction_v1.rs b/types/src/transaction/transaction_v1.rs new file mode 100644 index 0000000000..41d4b7ac30 --- /dev/null +++ b/types/src/transaction/transaction_v1.rs @@ -0,0 +1,754 @@ +#[cfg(any(feature = "testing", test, feature = "json-schema"))] +pub(crate) mod arg_handling; +mod errors_v1; +pub mod fields_container; +mod transaction_args; +mod transaction_v1_hash; +pub mod transaction_v1_payload; + +#[cfg(any(feature = "std", feature = "testing", test))] +use super::InitiatorAddrAndSecretKey; +use crate::{ + bytesrepr::{self, Error, FromBytes, ToBytes}, + crypto, +}; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use crate::{testing::TestRng, TransactionConfig, LARGE_WASM_LANE_ID}; +#[cfg(any(feature = "std", test))] +use crate::{ + TransactionEntryPoint, TransactionTarget, TransactionV1Config, AUCTION_LANE_ID, + INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; +#[cfg(any(feature = "std", test, feature = "testing"))] +use alloc::collections::BTreeMap; +use alloc::{collections::BTreeSet, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +use errors_v1::FieldDeserializationError; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use fields_container::FieldsContainer; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use fields_container::{ENTRY_POINT_MAP_KEY, TARGET_MAP_KEY}; +#[cfg(any(feature = "once_cell", test))] +use once_cell::sync::OnceCell; +#[cfg(any(all(feature = "std", feature = "testing"), test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use thiserror::Error; +use tracing::{error, trace}; +pub use transaction_v1_payload::TransactionV1Payload; +#[cfg(any(feature = "std", test))] +use transaction_v1_payload::TransactionV1PayloadJson; + +use super::{ + serialization::{CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder}, + Approval, ApprovalsHash, InitiatorAddr, PricingMode, +}; +#[cfg(any(feature = "std", feature = "testing", test))] +use crate::bytesrepr::Bytes; +use crate::{Digest, DisplayIter, SecretKey, TimeDiff, Timestamp}; + +pub use errors_v1::{ + DecodeFromJsonErrorV1 as TransactionV1DecodeFromJsonError, ErrorV1 as TransactionV1Error, + ExcessiveSizeErrorV1 as TransactionV1ExcessiveSizeError, + InvalidTransaction as InvalidTransactionV1, +}; +pub use transaction_args::TransactionArgs; +pub use transaction_v1_hash::TransactionV1Hash; + +use core::{ + cmp, + fmt::{self, Debug, Display, Formatter}, + hash, +}; + +const HASH_FIELD_INDEX: u16 = 0; +const PAYLOAD_FIELD_INDEX: u16 = 1; +const APPROVALS_FIELD_INDEX: u16 = 2; + +/// A unit of work sent by a client to the network, which when executed can cause global state to +/// be altered. +#[derive(Clone, Eq, Debug)] +#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(with = "TransactionV1Json") +)] +pub struct TransactionV1 { + hash: TransactionV1Hash, + payload: TransactionV1Payload, + approvals: BTreeSet, + #[cfg_attr(any(all(feature = "std", feature = "once_cell"), test), serde(skip))] + #[cfg_attr( + all(any(feature = "once_cell", test), feature = "datasize"), + data_size(skip) + )] + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell>, +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for TransactionV1 { + type Error = TransactionV1JsonError; + fn try_from(transaction_v1_json: TransactionV1Json) -> Result { + Ok(TransactionV1 { + hash: transaction_v1_json.hash, + payload: transaction_v1_json.payload.try_into().map_err(|error| { + TransactionV1JsonError::FailedToMap(format!( + "Failed to map TransactionJson::V1 to Transaction::V1, err: {}", + error + )) + })?, + approvals: transaction_v1_json.approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }) + } +} + +/// A helper struct to represent the transaction as json. +#[cfg(any(feature = "std", test))] +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "A unit of work sent by a client to the network, which when executed can \ + cause global state to be altered.", + rename = "TransactionV1", + ) +)] +pub(super) struct TransactionV1Json { + hash: TransactionV1Hash, + payload: TransactionV1PayloadJson, + approvals: BTreeSet, +} + +#[cfg(any(feature = "std", test))] +#[derive(Error, Debug)] +pub(super) enum TransactionV1JsonError { + #[error("{0}")] + FailedToMap(String), +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for TransactionV1Json { + type Error = TransactionV1JsonError; + fn try_from(transaction: TransactionV1) -> Result { + Ok(TransactionV1Json { + hash: transaction.hash, + payload: transaction.payload.try_into().map_err(|error| { + TransactionV1JsonError::FailedToMap(format!( + "Failed to map Transaction::V1 to TransactionJson::V1, err: {}", + error + )) + })?, + approvals: transaction.approvals, + }) + } +} + +impl TransactionV1 { + /// ctor + pub fn new( + hash: TransactionV1Hash, + payload: TransactionV1Payload, + approvals: BTreeSet, + ) -> Self { + Self { + hash, + payload, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + } + } + + #[cfg(any(feature = "std", test, feature = "testing"))] + pub(crate) fn build( + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + pricing_mode: PricingMode, + fields: BTreeMap, + initiator_addr_and_secret_key: InitiatorAddrAndSecretKey, + ) -> TransactionV1 { + let initiator_addr = initiator_addr_and_secret_key.initiator_addr(); + let transaction_v1_payload = TransactionV1Payload::new( + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + ); + let hash = Digest::hash( + transaction_v1_payload + .to_bytes() + .unwrap_or_else(|error| panic!("should serialize body: {}", error)), + ); + let mut transaction = + TransactionV1::new(hash.into(), transaction_v1_payload, BTreeSet::new()); + + if let Some(secret_key) = initiator_addr_and_secret_key.secret_key() { + transaction.sign(secret_key); + } + transaction + } + + /// Adds a signature of this transaction's hash to its approvals. + pub fn sign(&mut self, secret_key: &SecretKey) { + let approval = Approval::create(&self.hash.into(), secret_key); + self.approvals.insert(approval); + } + + /// Returns the `ApprovalsHash` of this transaction's approvals. + pub fn hash(&self) -> &TransactionV1Hash { + &self.hash + } + + /// Returns the internal payload of this transaction. + pub fn payload(&self) -> &TransactionV1Payload { + &self.payload + } + + /// Returns transactions approvals. + pub fn approvals(&self) -> &BTreeSet { + &self.approvals + } + + /// Returns the address of the initiator of the transaction. + pub fn initiator_addr(&self) -> &InitiatorAddr { + self.payload.initiator_addr() + } + + /// Returns the name of the chain the transaction should be executed on. + pub fn chain_name(&self) -> &str { + self.payload.chain_name() + } + + /// Returns the creation timestamp of the transaction. + pub fn timestamp(&self) -> Timestamp { + self.payload.timestamp() + } + + /// Returns the duration after the creation timestamp for which the transaction will stay valid. + /// + /// After this duration has ended, the transaction will be considered expired. + pub fn ttl(&self) -> TimeDiff { + self.payload.ttl() + } + + /// Returns `true` if the transaction has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.payload.expired(current_instant) + } + + /// Returns the pricing mode for the transaction. + pub fn pricing_mode(&self) -> &PricingMode { + self.payload.pricing_mode() + } + + /// Returns the `ApprovalsHash` of this transaction's approvals. + pub fn compute_approvals_hash(&self) -> Result { + ApprovalsHash::compute(&self.approvals) + } + + #[doc(hidden)] + pub fn with_approvals(mut self, approvals: BTreeSet) -> Self { + self.approvals = approvals; + self + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn apply_approvals(&mut self, approvals: Vec) { + self.approvals.extend(approvals); + } + + /// Returns the payment amount if the txn is using payment limited mode. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn payment_amount(&self) -> Option { + if let PricingMode::PaymentLimited { payment_amount, .. } = self.pricing_mode() { + Some(*payment_amount) + } else { + None + } + } + + /// Returns a random, valid but possibly expired transaction. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random(rng: &mut TestRng) -> Self { + let secret_key = SecretKey::random(rng); + let ttl_millis = rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()); + let timestamp = Timestamp::random(rng); + let container = FieldsContainer::random(rng); + let initiator_addr_and_secret_key = InitiatorAddrAndSecretKey::SecretKey(&secret_key); + let pricing_mode = PricingMode::Fixed { + gas_price_tolerance: 5, + additional_computation_factor: 0, + }; + TransactionV1::build( + rng.random_string(5..10), + timestamp, + TimeDiff::from_millis(ttl_millis), + pricing_mode, + container.to_map().unwrap(), + initiator_addr_and_secret_key, + ) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_lane_and_timestamp_and_ttl( + rng: &mut TestRng, + lane: u8, + maybe_timestamp: Option, + ttl: Option, + ) -> Self { + let secret_key = SecretKey::random(rng); + let timestamp = maybe_timestamp.unwrap_or_else(Timestamp::now); + let ttl_millis = ttl.map_or( + rng.gen_range(60_000..TransactionConfig::default().max_ttl.millis()), + |ttl| ttl.millis(), + ); + let container = FieldsContainer::random_of_lane(rng, lane); + let initiator_addr_and_secret_key = InitiatorAddrAndSecretKey::SecretKey(&secret_key); + let pricing_mode = PricingMode::Fixed { + gas_price_tolerance: 5, + additional_computation_factor: 0, + }; + TransactionV1::build( + rng.random_string(5..10), + timestamp, + TimeDiff::from_millis(ttl_millis), + pricing_mode, + container.to_map().unwrap(), + initiator_addr_and_secret_key, + ) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_with_timestamp_and_ttl( + rng: &mut TestRng, + maybe_timestamp: Option, + ttl: Option, + ) -> Self { + Self::random_with_lane_and_timestamp_and_ttl( + rng, + INSTALL_UPGRADE_LANE_ID, + maybe_timestamp, + ttl, + ) + } + + /// Returns a random transaction with "transfer" category. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_transfer( + rng: &mut TestRng, + timestamp: Option, + ttl: Option, + ) -> Self { + TransactionV1::random_with_lane_and_timestamp_and_ttl(rng, MINT_LANE_ID, timestamp, ttl) + } + + /// Returns a random transaction with "standard" category. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_wasm( + rng: &mut TestRng, + timestamp: Option, + ttl: Option, + ) -> Self { + TransactionV1::random_with_lane_and_timestamp_and_ttl( + rng, + LARGE_WASM_LANE_ID, + timestamp, + ttl, + ) + } + + /// Returns a random transaction with "install/upgrade" category. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_auction( + rng: &mut TestRng, + timestamp: Option, + ttl: Option, + ) -> Self { + TransactionV1::random_with_lane_and_timestamp_and_ttl(rng, AUCTION_LANE_ID, timestamp, ttl) + } + + /// Returns a random transaction with "install/upgrade" category. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn random_install_upgrade( + rng: &mut TestRng, + timestamp: Option, + ttl: Option, + ) -> Self { + TransactionV1::random_with_lane_and_timestamp_and_ttl( + rng, + INSTALL_UPGRADE_LANE_ID, + timestamp, + ttl, + ) + } + + /// Returns result of attempting to deserailize a field from the amorphic `fields` container. + pub fn deserialize_field( + &self, + index: u16, + ) -> Result { + self.payload.deserialize_field(index) + } + + /// Returns number of fields in the amorphic `fields` container. + pub fn number_of_fields(&self) -> usize { + self.payload.number_of_fields() + } + + /// Checks if the declared hash of the transaction matches calculated hash. + pub fn has_valid_hash(&self) -> Result<(), InvalidTransactionV1> { + let computed_hash = Digest::hash(self.payload.to_bytes().map_err(|error| { + error!( + ?error, + "Could not serialize transaction for purpose of calculating hash." + ); + InvalidTransactionV1::CouldNotSerializeTransaction + })?); + if TransactionV1Hash::new(computed_hash) != self.hash { + trace!(?self, ?computed_hash, "invalid transaction hash"); + return Err(InvalidTransactionV1::InvalidTransactionHash); + } + Ok(()) + } + + /// Returns `Ok` if and only if: + /// * the transaction hash is correct (see [`TransactionV1::has_valid_hash`] for details) + /// * approvals are non-empty, and + /// * all approvals are valid signatures of the signed hash + pub fn verify(&self) -> Result<(), InvalidTransactionV1> { + #[cfg(any(feature = "once_cell", test))] + return self.is_verified.get_or_init(|| self.do_verify()).clone(); + + #[cfg(not(any(feature = "once_cell", test)))] + self.do_verify() + } + + fn do_verify(&self) -> Result<(), InvalidTransactionV1> { + if self.approvals.is_empty() { + trace!(?self, "transaction has no approvals"); + return Err(InvalidTransactionV1::EmptyApprovals); + } + + self.has_valid_hash()?; + + for (index, approval) in self.approvals.iter().enumerate() { + if let Err(error) = crypto::verify(self.hash, approval.signature(), approval.signer()) { + trace!( + ?self, + "failed to verify transaction approval {}: {}", + index, + error + ); + return Err(InvalidTransactionV1::InvalidApproval { index, error }); + } + } + + Ok(()) + } + + /// Returns the hash of the transaction's payload. + pub fn payload_hash(&self) -> Result { + let bytes = self + .payload + .fields() + .to_bytes() + .map_err(|_| InvalidTransactionV1::CannotCalculateFieldsHash)?; + Ok(Digest::hash(bytes)) + } + + fn serialized_field_lengths(&self) -> Vec { + vec![ + self.hash.serialized_length(), + self.payload.serialized_length(), + self.approvals.serialized_length(), + ] + } + + /// Turns `self` into an invalid `TransactionV1` by clearing the `chain_name`, invalidating the + /// transaction hash + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn invalidate(&mut self) { + self.payload.invalidate(); + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub(crate) fn get_transaction_target(&self) -> Result { + self.deserialize_field::(TARGET_MAP_KEY) + .map_err(|error| InvalidTransactionV1::CouldNotDeserializeField { error }) + } + + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub(crate) fn get_transaction_entry_point( + &self, + ) -> Result { + self.deserialize_field::(ENTRY_POINT_MAP_KEY) + .map_err(|error| InvalidTransactionV1::CouldNotDeserializeField { error }) + } + + /// Returns the gas price tolerance for the given transaction. + pub fn gas_price_tolerance(&self) -> u8 { + match self.pricing_mode() { + PricingMode::PaymentLimited { + gas_price_tolerance, + .. + } => *gas_price_tolerance, + PricingMode::Fixed { + gas_price_tolerance, + .. + } => *gas_price_tolerance, + PricingMode::Prepaid { .. } => { + // TODO: Change this when reserve gets implemented. + 0u8 + } + } + } +} + +impl ToBytes for TransactionV1 { + fn to_bytes(&self) -> Result, crate::bytesrepr::Error> { + let expected_payload_sizes = self.serialized_field_lengths(); + CalltableSerializationEnvelopeBuilder::new(expected_payload_sizes)? + .add_field(HASH_FIELD_INDEX, &self.hash)? + .add_field(PAYLOAD_FIELD_INDEX, &self.payload)? + .add_field(APPROVALS_FIELD_INDEX, &self.approvals)? + .binary_payload_bytes() + } + + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for TransactionV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(3, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Error::Formatting)?; + window.verify_index(HASH_FIELD_INDEX)?; + let (hash, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Error::Formatting)?; + window.verify_index(PAYLOAD_FIELD_INDEX)?; + let (payload, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Error::Formatting)?; + window.verify_index(APPROVALS_FIELD_INDEX)?; + let (approvals, window) = window.deserialize_and_maybe_next::>()?; + if window.is_some() { + return Err(Error::Formatting); + } + let from_bytes = TransactionV1 { + hash, + payload, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: OnceCell::new(), + }; + Ok((from_bytes, remainder)) + } +} + +impl Display for TransactionV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction-v1[{}, {}, approvals: {}]", + self.hash, + self.payload, + DisplayIter::new(self.approvals.iter()) + ) + } +} + +impl hash::Hash for TransactionV1 { + fn hash(&self, state: &mut H) { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1 { + hash, + payload, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: _, + } = self; + hash.hash(state); + payload.hash(state); + approvals.hash(state); + } +} + +impl PartialEq for TransactionV1 { + fn eq(&self, other: &TransactionV1) -> bool { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1 { + hash, + payload, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: _, + } = self; + *hash == other.hash && *payload == other.payload && *approvals == other.approvals + } +} + +impl Ord for TransactionV1 { + fn cmp(&self, other: &TransactionV1) -> cmp::Ordering { + // Destructure to make sure we don't accidentally omit fields. + let TransactionV1 { + hash, + payload, + approvals, + #[cfg(any(feature = "once_cell", test))] + is_verified: _, + } = self; + hash.cmp(&other.hash) + .then_with(|| payload.cmp(&other.payload)) + .then_with(|| approvals.cmp(&other.approvals)) + } +} + +impl PartialOrd for TransactionV1 { + fn partial_cmp(&self, other: &TransactionV1) -> Option { + Some(self.cmp(other)) + } +} + +#[cfg(any(feature = "std", test))] +/// Calculates the laned based on properties of the transaction +pub fn calculate_transaction_lane( + entry_point: &TransactionEntryPoint, + target: &TransactionTarget, + pricing_mode: &PricingMode, + config: &TransactionV1Config, + size_estimation: u64, + runtime_args_size: u64, +) -> Result { + use crate::TransactionRuntimeParams; + + use super::get_lane_for_non_install_wasm; + + match target { + TransactionTarget::Native => match entry_point { + TransactionEntryPoint::Transfer | TransactionEntryPoint::Burn => Ok(MINT_LANE_ID), + TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => Ok(AUCTION_LANE_ID), + TransactionEntryPoint::Call => Err(InvalidTransactionV1::EntryPointCannotBeCall), + TransactionEntryPoint::Custom(_) => { + Err(InvalidTransactionV1::EntryPointCannotBeCustom { + entry_point: entry_point.clone(), + }) + } + }, + TransactionTarget::Stored { .. } => match entry_point { + TransactionEntryPoint::Custom(_) => get_lane_for_non_install_wasm( + config, + pricing_mode, + size_estimation, + runtime_args_size, + ) + .map_err(Into::into), + TransactionEntryPoint::Call + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + Err(InvalidTransactionV1::EntryPointMustBeCustom { + entry_point: entry_point.clone(), + }) + } + }, + TransactionTarget::Session { + is_install_upgrade, + runtime: TransactionRuntimeParams::VmCasperV1, + .. + } => match entry_point { + TransactionEntryPoint::Call => { + if *is_install_upgrade { + Ok(INSTALL_UPGRADE_LANE_ID) + } else { + get_lane_for_non_install_wasm( + config, + pricing_mode, + size_estimation, + runtime_args_size, + ) + .map_err(Into::into) + } + } + TransactionEntryPoint::Custom(_) + | TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + Err(InvalidTransactionV1::EntryPointMustBeCall { + entry_point: entry_point.clone(), + }) + } + }, + TransactionTarget::Session { + is_install_upgrade, + runtime: TransactionRuntimeParams::VmCasperV2 { .. }, + .. + } => match entry_point { + TransactionEntryPoint::Call | TransactionEntryPoint::Custom(_) => { + if *is_install_upgrade { + Ok(INSTALL_UPGRADE_LANE_ID) + } else { + get_lane_for_non_install_wasm( + config, + pricing_mode, + size_estimation, + runtime_args_size, + ) + .map_err(Into::into) + } + } + TransactionEntryPoint::Transfer + | TransactionEntryPoint::Burn + | TransactionEntryPoint::AddBid + | TransactionEntryPoint::WithdrawBid + | TransactionEntryPoint::Delegate + | TransactionEntryPoint::Undelegate + | TransactionEntryPoint::Redelegate + | TransactionEntryPoint::ActivateBid + | TransactionEntryPoint::ChangeBidPublicKey + | TransactionEntryPoint::AddReservations + | TransactionEntryPoint::CancelReservations => { + Err(InvalidTransactionV1::EntryPointMustBeCall { + entry_point: entry_point.clone(), + }) + } + }, + } +} diff --git a/types/src/transaction/transaction_v1/arg_handling.rs b/types/src/transaction/transaction_v1/arg_handling.rs new file mode 100644 index 0000000000..b1c003ef90 --- /dev/null +++ b/types/src/transaction/transaction_v1/arg_handling.rs @@ -0,0 +1,184 @@ +use core::marker::PhantomData; + +use crate::TransferTarget; + +use crate::{bytesrepr::ToBytes, CLTyped, CLValueError, PublicKey, RuntimeArgs, URef, U512}; + +const TRANSFER_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const TRANSFER_ARG_SOURCE: OptionalArg = OptionalArg::new("source"); +const TRANSFER_ARG_TARGET: &str = "target"; +// "id" for legacy reasons, if the argument is passed it is [Option] +const TRANSFER_ARG_ID: OptionalArg> = OptionalArg::new("id"); + +const ADD_BID_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const ADD_BID_ARG_DELEGATION_RATE: RequiredArg = RequiredArg::new("delegation_rate"); +const ADD_BID_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT: OptionalArg = + OptionalArg::new("minimum_delegation_amount"); + +const ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT: OptionalArg = + OptionalArg::new("maximum_delegation_amount"); + +const ADD_BID_ARG_RESERVED_SLOTS: OptionalArg = OptionalArg::new("reserved_slots"); + +const WITHDRAW_BID_ARG_PUBLIC_KEY: RequiredArg = RequiredArg::new("public_key"); +const WITHDRAW_BID_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const DELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const DELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const DELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const UNDELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const UNDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const UNDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); + +const REDELEGATE_ARG_DELEGATOR: RequiredArg = RequiredArg::new("delegator"); +const REDELEGATE_ARG_VALIDATOR: RequiredArg = RequiredArg::new("validator"); +const REDELEGATE_ARG_AMOUNT: RequiredArg = RequiredArg::new("amount"); +const REDELEGATE_ARG_NEW_VALIDATOR: RequiredArg = RequiredArg::new("new_validator"); + +struct RequiredArg { + name: &'static str, + _phantom: PhantomData, +} + +impl RequiredArg { + const fn new(name: &'static str) -> Self { + Self { + name, + _phantom: PhantomData, + } + } + + fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError> + where + T: CLTyped + ToBytes, + { + args.insert(self.name, value) + } +} + +struct OptionalArg { + name: &'static str, + _phantom: PhantomData, +} + +impl OptionalArg { + const fn new(name: &'static str) -> Self { + Self { + name, + _phantom: PhantomData, + } + } + + fn insert(&self, args: &mut RuntimeArgs, value: T) -> Result<(), CLValueError> + where + T: CLTyped + ToBytes, + { + args.insert(self.name, value) + } +} + +/// Creates a `RuntimeArgs` suitable for use in a transfer transaction. +pub(crate) fn new_transfer_args, T: Into>( + amount: A, + maybe_source: Option, + target: T, + maybe_id: Option, +) -> Result { + let mut args = RuntimeArgs::new(); + if let Some(source) = maybe_source { + TRANSFER_ARG_SOURCE.insert(&mut args, source)?; + } + match target.into() { + TransferTarget::PublicKey(public_key) => args.insert(TRANSFER_ARG_TARGET, public_key)?, + TransferTarget::AccountHash(account_hash) => { + args.insert(TRANSFER_ARG_TARGET, account_hash)? + } + TransferTarget::URef(uref) => args.insert(TRANSFER_ARG_TARGET, uref)?, + } + TRANSFER_ARG_AMOUNT.insert(&mut args, amount.into())?; + if maybe_id.is_some() { + TRANSFER_ARG_ID.insert(&mut args, maybe_id)?; + } + Ok(args) +} + +/// Creates a `RuntimeArgs` suitable for use in an add_bid transaction. +pub(crate) fn new_add_bid_args>( + public_key: PublicKey, + delegation_rate: u8, + amount: A, + maybe_minimum_delegation_amount: Option, + maybe_maximum_delegation_amount: Option, + maybe_reserved_slots: Option, +) -> Result { + let mut args = RuntimeArgs::new(); + ADD_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + ADD_BID_ARG_DELEGATION_RATE.insert(&mut args, delegation_rate)?; + ADD_BID_ARG_AMOUNT.insert(&mut args, amount.into())?; + if let Some(minimum_delegation_amount) = maybe_minimum_delegation_amount { + ADD_BID_ARG_MINIMUM_DELEGATION_AMOUNT.insert(&mut args, minimum_delegation_amount)?; + }; + if let Some(maximum_delegation_amount) = maybe_maximum_delegation_amount { + ADD_BID_ARG_MAXIMUM_DELEGATION_AMOUNT.insert(&mut args, maximum_delegation_amount)?; + }; + if let Some(reserved_slots) = maybe_reserved_slots { + ADD_BID_ARG_RESERVED_SLOTS.insert(&mut args, reserved_slots)?; + }; + Ok(args) +} + +/// Creates a `RuntimeArgs` suitable for use in a withdraw_bid transaction. +pub fn new_withdraw_bid_args>( + public_key: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + WITHDRAW_BID_ARG_PUBLIC_KEY.insert(&mut args, public_key)?; + WITHDRAW_BID_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Creates a `RuntimeArgs` suitable for use in a delegate transaction. +pub(crate) fn new_delegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + DELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + DELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + DELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Creates a `RuntimeArgs` suitable for use in an undelegate transaction. +pub(crate) fn new_undelegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, +) -> Result { + let mut args = RuntimeArgs::new(); + UNDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + UNDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + UNDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + Ok(args) +} + +/// Creates a `RuntimeArgs` suitable for use in a redelegate transaction. +pub(crate) fn new_redelegate_args>( + delegator: PublicKey, + validator: PublicKey, + amount: A, + new_validator: PublicKey, +) -> Result { + let mut args = RuntimeArgs::new(); + REDELEGATE_ARG_DELEGATOR.insert(&mut args, delegator)?; + REDELEGATE_ARG_VALIDATOR.insert(&mut args, validator)?; + REDELEGATE_ARG_AMOUNT.insert(&mut args, amount.into())?; + REDELEGATE_ARG_NEW_VALIDATOR.insert(&mut args, new_validator)?; + Ok(args) +} diff --git a/types/src/transaction/transaction_v1/errors_v1.rs b/types/src/transaction/transaction_v1/errors_v1.rs new file mode 100644 index 0000000000..814803c168 --- /dev/null +++ b/types/src/transaction/transaction_v1/errors_v1.rs @@ -0,0 +1,727 @@ +use alloc::{boxed::Box, string::String, vec::Vec}; +use core::{ + array::TryFromSliceError, + fmt::{self, Display, Formatter}, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; +#[cfg(any(feature = "testing", test))] +use strum::EnumIter; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +use serde::Serialize; + +#[cfg(doc)] +use super::TransactionV1; +use crate::{ + addressable_entity::ContractRuntimeTag, bytesrepr, crypto, CLType, DisplayIter, PricingMode, + TimeDiff, Timestamp, TransactionEntryPoint, TransactionInvocationTarget, U512, +}; + +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum FieldDeserializationError { + IndexNotExists { index: u16 }, + FromBytesError { index: u16, error: bytesrepr::Error }, + LingeringBytesInField { index: u16 }, +} + +// This impl is provided due to a completeness test that we +// have in binary-port. It checks if all variants of this +// error have corresponding binary port error codes +#[cfg(any(feature = "testing", test))] +impl Default for FieldDeserializationError { + fn default() -> Self { + Self::IndexNotExists { index: 0 } + } +} + +/// Returned when a [`TransactionV1`] fails validation. +#[derive(Clone, Eq, PartialEq, Debug)] +#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[non_exhaustive] +// This derive should not be removed due to a completeness +// test that we have in binary-port. It checks if all variants +// of this error have corresponding binary port error codes +#[cfg_attr(any(feature = "testing", test), derive(EnumIter))] +pub enum InvalidTransaction { + /// Invalid chain name. + InvalidChainName { + /// The expected chain name. + expected: String, + /// The transaction's chain name. + got: String, + }, + + /// Transaction is too large. + ExcessiveSize(ExcessiveSizeErrorV1), + + /// Excessive time-to-live. + ExcessiveTimeToLive { + /// The time-to-live limit. + max_ttl: TimeDiff, + /// The transaction's time-to-live. + got: TimeDiff, + }, + + /// Transaction's timestamp is in the future. + TimestampInFuture { + /// The node's timestamp when validating the transaction. + validation_timestamp: Timestamp, + /// Any configured leeway added to `validation_timestamp`. + timestamp_leeway: TimeDiff, + /// The transaction's timestamp. + got: Timestamp, + }, + + /// The provided body hash does not match the actual hash of the body. + InvalidBodyHash, + + /// The provided transaction hash does not match the actual hash of the transaction. + InvalidTransactionHash, + + /// The transaction has no approvals. + EmptyApprovals, + + /// Invalid approval. + InvalidApproval { + /// The index of the approval at fault. + index: usize, + /// The approval verification error. + error: crypto::Error, + }, + + /// Excessive length of transaction's runtime args. + ExcessiveArgsLength { + /// The byte size limit of runtime arguments. + max_length: usize, + /// The length of the transaction's runtime arguments. + got: usize, + }, + + /// The amount of approvals on the transaction exceeds the configured limit. + ExcessiveApprovals { + /// The chainspec limit for max_associated_keys. + max_associated_keys: u32, + /// Number of approvals on the transaction. + got: u32, + }, + + /// The payment amount associated with the transaction exceeds the block gas limit. + ExceedsBlockGasLimit { + /// Configured block gas limit. + block_gas_limit: u64, + /// The transaction's calculated gas limit. + got: Box, + }, + + /// Missing a required runtime arg. + MissingArg { + /// The name of the missing arg. + arg_name: String, + }, + + /// Given runtime arg is not one of the expected types. + UnexpectedArgType { + /// The name of the invalid arg. + arg_name: String, + /// The choice of valid types for the given runtime arg. + expected: Vec, + /// The provided type of the given runtime arg. + got: String, + }, + + /// Failed to deserialize the given runtime arg. + InvalidArg { + /// The name of the invalid arg. + arg_name: String, + /// The deserialization error. + error: bytesrepr::Error, + }, + + /// Insufficient transfer amount. + InsufficientTransferAmount { + /// The minimum transfer amount. + minimum: u64, + /// The attempted transfer amount. + attempted: U512, + }, + + /// Insufficient burn amount. + InsufficientBurnAmount { + /// The minimum burn amount. + minimum: u64, + /// The attempted burn amount. + attempted: U512, + }, + + /// The entry point for this transaction target cannot be `call`. + EntryPointCannotBeCall, + /// The entry point for this transaction target cannot be `TransactionEntryPoint::Custom`. + EntryPointCannotBeCustom { + /// The invalid entry point. + entry_point: TransactionEntryPoint, + }, + /// The entry point for this transaction target must be `TransactionEntryPoint::Custom`. + EntryPointMustBeCustom { + /// The invalid entry point. + entry_point: TransactionEntryPoint, + }, + /// The entry point for this transaction target must be `TransactionEntryPoint::Call`. + EntryPointMustBeCall { + /// The invalid entry point. + entry_point: TransactionEntryPoint, + }, + /// The transaction has empty module bytes. + EmptyModuleBytes, + /// Attempt to factor the amount over the gas_price failed. + GasPriceConversion { + /// The base amount. + amount: u64, + /// The attempted gas price. + gas_price: u8, + }, + /// Unable to calculate gas limit. + UnableToCalculateGasLimit, + /// Unable to calculate gas cost. + UnableToCalculateGasCost, + /// Invalid combination of pricing handling and pricing mode. + InvalidPricingMode { + /// The pricing mode as specified by the transaction. + price_mode: PricingMode, + }, + /// The transaction provided is not supported. + InvalidTransactionLane(u8), + /// Could not match v1 with transaction lane + NoLaneMatch, + /// Gas price tolerance too low. + GasPriceToleranceTooLow { + /// The minimum gas price tolerance. + min_gas_price_tolerance: u8, + /// The provided gas price tolerance. + provided_gas_price_tolerance: u8, + }, + /// Error when trying to deserialize one of the transactionV1 payload fields. + CouldNotDeserializeField { + /// Underlying reason why the deserialization failed + error: FieldDeserializationError, + }, + + /// Unable to calculate hash for payloads transaction. + CannotCalculateFieldsHash, + + /// The transactions field map had entries that were unexpected + UnexpectedTransactionFieldEntries, + /// The transaction requires named arguments. + ExpectedNamedArguments, + /// The transaction required bytes arguments. + ExpectedBytesArguments, + /// The transaction runtime is invalid. + InvalidTransactionRuntime { + /// The expected runtime as specified by the chainspec. + expected: ContractRuntimeTag, + }, + /// The transaction is missing a seed field. + MissingSeed, + // Pricing mode not implemented yet + PricingModeNotSupported, + // Invalid payment amount. + InvalidPaymentAmount, + /// Unexpected entry point detected. + UnexpectedEntryPoint { + entry_point: TransactionEntryPoint, + lane_id: u8, + }, + /// Could not serialize transaction + CouldNotSerializeTransaction, + + /// Insufficient value for amount argument. + InsufficientAmount { + /// The attempted amount. + attempted: U512, + }, + + /// Invalid minimum delegation amount. + InvalidMinimumDelegationAmount { + /// The lowest allowed amount. + floor: u64, + /// The attempted amount. + attempted: u64, + }, + + /// Invalid maximum delegation amount. + InvalidMaximumDelegationAmount { + /// The highest allowed amount. + ceiling: u64, + /// The attempted amount. + attempted: u64, + }, + + /// Invalid reserved slots. + InvalidReservedSlots { + /// The highest allowed amount. + ceiling: u32, + /// The attempted amount. + attempted: u64, + }, + + /// Invalid delegation amount. + InvalidDelegationAmount { + /// The highest allowed amount. + ceiling: u64, + /// The attempted amount. + attempted: U512, + }, + /// The transaction invocation target is unsupported under V2 runtime. + /// + /// This error is returned when the transaction invocation target is not supported by the + /// current runtime version. + UnsupportedInvocationTarget { + id: Option, + }, +} + +impl Display for InvalidTransaction { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + InvalidTransaction::InvalidChainName { expected, got } => { + write!( + formatter, + "invalid chain name: expected {expected}, got {got}" + ) + } + InvalidTransaction::ExcessiveSize(error) => { + write!(formatter, "transaction size too large: {error}") + } + InvalidTransaction::ExcessiveTimeToLive { max_ttl, got } => { + write!( + formatter, + "time-to-live of {got} exceeds limit of {max_ttl}" + ) + } + InvalidTransaction::TimestampInFuture { + validation_timestamp, + timestamp_leeway, + got, + } => { + write!( + formatter, + "timestamp of {got} is later than node's validation timestamp of \ + {validation_timestamp} plus leeway of {timestamp_leeway}" + ) + } + InvalidTransaction::InvalidBodyHash => { + write!( + formatter, + "the provided hash does not match the actual hash of the transaction body" + ) + } + InvalidTransaction::InvalidTransactionHash => { + write!( + formatter, + "the provided hash does not match the actual hash of the transaction" + ) + } + InvalidTransaction::EmptyApprovals => { + write!(formatter, "the transaction has no approvals") + } + InvalidTransaction::InvalidApproval { index, error } => { + write!( + formatter, + "the transaction approval at index {index} is invalid: {error}" + ) + } + InvalidTransaction::ExcessiveArgsLength { max_length, got } => { + write!( + formatter, + "serialized transaction runtime args of {got} bytes exceeds limit of \ + {max_length} bytes" + ) + } + InvalidTransaction::ExcessiveApprovals { + max_associated_keys, + got, + } => { + write!( + formatter, + "number of transaction approvals {got} exceeds the maximum number of \ + associated keys {max_associated_keys}", + ) + } + InvalidTransaction::ExceedsBlockGasLimit { + block_gas_limit, + got, + } => { + write!( + formatter, + "payment amount of {got} exceeds the block gas limit of {block_gas_limit}" + ) + } + InvalidTransaction::MissingArg { arg_name } => { + write!(formatter, "missing required runtime argument '{arg_name}'") + } + InvalidTransaction::UnexpectedArgType { + arg_name, + expected, + got, + } => { + write!( + formatter, + "expected type of '{arg_name}' runtime argument to be one of {}, but got {got}", + DisplayIter::new(expected) + ) + } + InvalidTransaction::InvalidArg { arg_name, error } => { + write!(formatter, "invalid runtime argument '{arg_name}': {error}") + } + InvalidTransaction::InsufficientTransferAmount { minimum, attempted } => { + write!( + formatter, + "insufficient transfer amount; minimum: {minimum} attempted: {attempted}" + ) + } + InvalidTransaction::EntryPointCannotBeCall => { + write!(formatter, "entry point cannot be call") + } + InvalidTransaction::EntryPointCannotBeCustom { entry_point } => { + write!(formatter, "entry point cannot be custom: {entry_point}") + } + InvalidTransaction::EntryPointMustBeCustom { entry_point } => { + write!(formatter, "entry point must be custom: {entry_point}") + } + InvalidTransaction::EmptyModuleBytes => { + write!(formatter, "the transaction has empty module bytes") + } + InvalidTransaction::GasPriceConversion { amount, gas_price } => { + write!( + formatter, + "failed to divide the amount {} by the gas price {}", + amount, gas_price + ) + } + InvalidTransaction::UnableToCalculateGasLimit => { + write!(formatter, "unable to calculate gas limit", ) + } + InvalidTransaction::UnableToCalculateGasCost => { + write!(formatter, "unable to calculate gas cost", ) + } + InvalidTransaction::InvalidPricingMode { price_mode } => { + write!( + formatter, + "received a transaction with an invalid mode {price_mode}" + ) + } + InvalidTransaction::InvalidTransactionLane(kind) => { + write!( + formatter, + "received a transaction with an invalid kind {kind}" + ) + } + InvalidTransaction::GasPriceToleranceTooLow { + min_gas_price_tolerance, + provided_gas_price_tolerance, + } => { + write!( + formatter, + "received a transaction with gas price tolerance {} but this chain will only go as low as {}", + provided_gas_price_tolerance, min_gas_price_tolerance + ) + } + InvalidTransaction::CouldNotDeserializeField { error } => { + match error { + FieldDeserializationError::IndexNotExists { index } => write!( + formatter, + "tried to deserialize a field under index {} but it is not present in the payload", + index + ), + FieldDeserializationError::FromBytesError { index, error } => write!( + formatter, + "tried to deserialize a field under index {} but it failed with error: {}", + index, + error + ), + FieldDeserializationError::LingeringBytesInField { index } => write!( + formatter, + "tried to deserialize a field under index {} but after deserialization there were still bytes left", + index, + ), + } + } + InvalidTransaction::CannotCalculateFieldsHash => write!( + formatter, + "cannot calculate a hash digest for the transaction" + ), + InvalidTransaction::EntryPointMustBeCall { entry_point } => { + write!(formatter, "entry point must be call: {entry_point}") + } + InvalidTransaction::NoLaneMatch => write!(formatter, "Could not match any lane to the specified transaction"), + InvalidTransaction::UnexpectedTransactionFieldEntries => write!(formatter, "There were entries in the fields map of the payload that could not be matched"), + InvalidTransaction::ExpectedNamedArguments => { + write!(formatter, "transaction requires named arguments") + } + InvalidTransaction::ExpectedBytesArguments => { + write!(formatter, "transaction requires bytes arguments") + } + InvalidTransaction::InvalidTransactionRuntime { expected } => { + write!( + formatter, + "invalid transaction runtime: expected {expected}" + ) + } + InvalidTransaction::MissingSeed => { + write!(formatter, "missing seed for install or upgrade") + } + InvalidTransaction::PricingModeNotSupported => { + write!(formatter, "Pricing mode not supported") + } + InvalidTransaction::InvalidPaymentAmount => { + write!(formatter, "invalid payment amount") + } + InvalidTransaction::UnexpectedEntryPoint { + entry_point, lane_id + } => { + write!(formatter, "unexpected entry_point {} lane_id {}", entry_point, lane_id) + } + InvalidTransaction::InsufficientBurnAmount { minimum, attempted } => { + write!(formatter, "insufficient burn amount: {minimum} {attempted}") + } + InvalidTransaction::CouldNotSerializeTransaction => write!(formatter, "Could not serialize transaction."), + InvalidTransaction::InsufficientAmount { attempted } => { + write!( + formatter, + "the value provided for the argument ({attempted}) named amount is too low.", + ) + } + InvalidTransaction::InvalidMinimumDelegationAmount { floor, attempted } => { + write!( + formatter, + "the value provided for the minimum delegation amount ({attempted}) cannot be lower than {floor}.", + )} + InvalidTransaction::InvalidMaximumDelegationAmount { ceiling, attempted } => { + write!( + formatter, + "the value provided for the maximum delegation amount ({ceiling}) cannot be higher than {attempted}.", + )} + InvalidTransaction::InvalidReservedSlots { ceiling, attempted } => { + write!( + formatter, + "the value provided for reserved slots ({ceiling}) cannot be higher than {attempted}.", + )} + InvalidTransaction::InvalidDelegationAmount { ceiling, attempted } => { + write!( + formatter, + "the value provided for the delegation amount ({attempted}) cannot be higher than {ceiling}.", + )} + InvalidTransaction::UnsupportedInvocationTarget { id: Some(target) } => { + write!( + formatter, + "the transaction invocation target is unsupported under V2 runtime: {target:?}", + ) + } + InvalidTransaction::UnsupportedInvocationTarget { id :None} => { + write!( + formatter, + "the transaction invocation target is unsupported under V2 runtime", + ) + } + } + } +} + +impl From for InvalidTransaction { + fn from(error: ExcessiveSizeErrorV1) -> Self { + InvalidTransaction::ExcessiveSize(error) + } +} + +#[cfg(feature = "std")] +impl StdError for InvalidTransaction { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + InvalidTransaction::InvalidApproval { error, .. } => Some(error), + InvalidTransaction::InvalidArg { error, .. } => Some(error), + InvalidTransaction::InvalidChainName { .. } + | InvalidTransaction::ExcessiveSize(_) + | InvalidTransaction::ExcessiveTimeToLive { .. } + | InvalidTransaction::TimestampInFuture { .. } + | InvalidTransaction::InvalidBodyHash + | InvalidTransaction::InvalidTransactionHash + | InvalidTransaction::EmptyApprovals + | InvalidTransaction::ExcessiveArgsLength { .. } + | InvalidTransaction::ExcessiveApprovals { .. } + | InvalidTransaction::ExceedsBlockGasLimit { .. } + | InvalidTransaction::MissingArg { .. } + | InvalidTransaction::UnexpectedArgType { .. } + | InvalidTransaction::InsufficientTransferAmount { .. } + | InvalidTransaction::EntryPointCannotBeCall + | InvalidTransaction::EntryPointCannotBeCustom { .. } + | InvalidTransaction::EntryPointMustBeCustom { .. } + | InvalidTransaction::EntryPointMustBeCall { .. } + | InvalidTransaction::EmptyModuleBytes + | InvalidTransaction::GasPriceConversion { .. } + | InvalidTransaction::UnableToCalculateGasLimit + | InvalidTransaction::UnableToCalculateGasCost + | InvalidTransaction::InvalidPricingMode { .. } + | InvalidTransaction::GasPriceToleranceTooLow { .. } + | InvalidTransaction::InvalidTransactionLane(_) + | InvalidTransaction::CannotCalculateFieldsHash + | InvalidTransaction::NoLaneMatch + | InvalidTransaction::UnexpectedTransactionFieldEntries => None, + InvalidTransaction::CouldNotDeserializeField { error } => match error { + FieldDeserializationError::IndexNotExists { .. } + | FieldDeserializationError::LingeringBytesInField { .. } => None, + FieldDeserializationError::FromBytesError { error, .. } => Some(error), + }, + InvalidTransaction::ExpectedNamedArguments + | InvalidTransaction::ExpectedBytesArguments + | InvalidTransaction::InvalidTransactionRuntime { .. } + | InvalidTransaction::MissingSeed + | InvalidTransaction::PricingModeNotSupported + | InvalidTransaction::InvalidPaymentAmount + | InvalidTransaction::InsufficientBurnAmount { .. } + | InvalidTransaction::UnexpectedEntryPoint { .. } + | InvalidTransaction::CouldNotSerializeTransaction + | InvalidTransaction::InsufficientAmount { .. } + | InvalidTransaction::InvalidMinimumDelegationAmount { .. } + | InvalidTransaction::InvalidMaximumDelegationAmount { .. } + | InvalidTransaction::InvalidReservedSlots { .. } + | InvalidTransaction::InvalidDelegationAmount { .. } + | InvalidTransaction::UnsupportedInvocationTarget { .. } => None, + } + } +} + +impl InvalidTransaction { + pub fn unexpected_arg_type(arg_name: String, expected: Vec, got: CLType) -> Self { + let expected = expected.iter().map(|el| format!("{}", el)).collect(); + InvalidTransaction::UnexpectedArgType { + arg_name, + expected, + got: format!("{}", got), + } + } +} +/// Error returned when a transaction is too large. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug, Serialize)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +//Default is needed only in testing to meet EnumIter needs +#[cfg_attr(any(feature = "testing", test), derive(Default))] +pub struct ExcessiveSizeErrorV1 { + /// The maximum permitted serialized transaction size, in bytes. + pub max_transaction_size: u32, + /// The serialized size of the transaction provided, in bytes. + pub actual_transaction_size: usize, +} + +impl Display for ExcessiveSizeErrorV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction size of {} bytes exceeds limit of {}", + self.actual_transaction_size, self.max_transaction_size + ) + } +} + +#[cfg(feature = "std")] +impl StdError for ExcessiveSizeErrorV1 {} + +/// Errors other than validation failures relating to Transactions. +#[derive(Debug)] +#[non_exhaustive] +pub enum ErrorV1 { + /// Error while encoding to JSON. + EncodeToJson(serde_json::Error), + + /// Error while decoding from JSON. + DecodeFromJson(DecodeFromJsonErrorV1), + + /// Unable to calculate payment. + InvalidPayment, +} + +impl From for ErrorV1 { + fn from(error: serde_json::Error) -> Self { + ErrorV1::EncodeToJson(error) + } +} + +impl From for ErrorV1 { + fn from(error: DecodeFromJsonErrorV1) -> Self { + ErrorV1::DecodeFromJson(error) + } +} + +impl Display for ErrorV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + ErrorV1::EncodeToJson(error) => { + write!(formatter, "encoding to json: {}", error) + } + ErrorV1::DecodeFromJson(error) => { + write!(formatter, "decoding from json: {}", error) + } + ErrorV1::InvalidPayment => write!(formatter, "invalid payment"), + } + } +} + +#[cfg(feature = "std")] +impl StdError for ErrorV1 { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + ErrorV1::EncodeToJson(error) => Some(error), + ErrorV1::DecodeFromJson(error) => Some(error), + ErrorV1::InvalidPayment => None, + } + } +} + +/// Error while decoding a `TransactionV1` from JSON. +#[derive(Debug)] +#[non_exhaustive] +pub enum DecodeFromJsonErrorV1 { + /// Failed to decode from base 16. + FromHex(base16::DecodeError), + + /// Failed to convert slice to array. + TryFromSlice(TryFromSliceError), +} + +impl From for DecodeFromJsonErrorV1 { + fn from(error: base16::DecodeError) -> Self { + DecodeFromJsonErrorV1::FromHex(error) + } +} + +impl From for DecodeFromJsonErrorV1 { + fn from(error: TryFromSliceError) -> Self { + DecodeFromJsonErrorV1::TryFromSlice(error) + } +} + +impl Display for DecodeFromJsonErrorV1 { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + DecodeFromJsonErrorV1::FromHex(error) => { + write!(formatter, "{}", error) + } + DecodeFromJsonErrorV1::TryFromSlice(error) => { + write!(formatter, "{}", error) + } + } + } +} + +#[cfg(feature = "std")] +impl StdError for DecodeFromJsonErrorV1 { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + DecodeFromJsonErrorV1::FromHex(error) => Some(error), + DecodeFromJsonErrorV1::TryFromSlice(error) => Some(error), + } + } +} diff --git a/types/src/transaction/transaction_v1/fields_container.rs b/types/src/transaction/transaction_v1/fields_container.rs new file mode 100644 index 0000000000..985f31da8e --- /dev/null +++ b/types/src/transaction/transaction_v1/fields_container.rs @@ -0,0 +1,290 @@ +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(any(feature = "std", feature = "testing", test))] +use crate::{ + bytesrepr::{Bytes, ToBytes}, + transaction::transaction_v1::*, + TransactionEntryPoint, TransactionScheduling, TransactionTarget, +}; +#[cfg(any(feature = "testing", test))] +use crate::{ + PublicKey, RuntimeArgs, TransactionInvocationTarget, TransferTarget, AUCTION_LANE_ID, + INSTALL_UPGRADE_LANE_ID, MINT_LANE_ID, +}; +#[cfg(any(feature = "std", feature = "testing", test))] +use alloc::collections::BTreeMap; +#[cfg(any(feature = "testing", test))] +use rand::{Rng, RngCore}; + +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +pub(crate) const ARGS_MAP_KEY: u16 = 0; +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +pub(crate) const TARGET_MAP_KEY: u16 = 1; +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +pub(crate) const ENTRY_POINT_MAP_KEY: u16 = 2; +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +pub(crate) const SCHEDULING_MAP_KEY: u16 = 3; + +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +#[derive(Clone, Eq, PartialEq, Debug)] +pub(crate) enum FieldsContainerError { + CouldNotSerializeField { field_index: u16 }, +} + +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +pub(crate) struct FieldsContainer { + pub(super) args: TransactionArgs, + pub(super) target: TransactionTarget, + pub(super) entry_point: TransactionEntryPoint, + pub(super) scheduling: TransactionScheduling, +} + +#[cfg(any(feature = "std", feature = "testing", feature = "gens", test))] +impl FieldsContainer { + pub(crate) fn new( + args: TransactionArgs, + target: TransactionTarget, + entry_point: TransactionEntryPoint, + scheduling: TransactionScheduling, + ) -> Self { + FieldsContainer { + args, + target, + entry_point, + scheduling, + } + } + + pub(crate) fn to_map(&self) -> Result, FieldsContainerError> { + let mut map: BTreeMap = BTreeMap::new(); + map.insert( + ARGS_MAP_KEY, + self.args.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: ARGS_MAP_KEY, + } + })?, + ); + map.insert( + TARGET_MAP_KEY, + self.target.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: TARGET_MAP_KEY, + } + })?, + ); + map.insert( + ENTRY_POINT_MAP_KEY, + self.entry_point.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: ENTRY_POINT_MAP_KEY, + } + })?, + ); + map.insert( + SCHEDULING_MAP_KEY, + self.scheduling.to_bytes().map(Into::into).map_err(|_| { + FieldsContainerError::CouldNotSerializeField { + field_index: SCHEDULING_MAP_KEY, + } + })?, + ); + Ok(map) + } + + /// Returns a random `FieldsContainer`. + #[cfg(any(feature = "testing", test))] + pub(crate) fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..8) { + 0 => { + let amount = rng.gen_range(2_500_000_000..=u64::MAX); + let maybe_source = if rng.gen() { Some(rng.gen()) } else { None }; + let target = TransferTarget::random(rng); + let maybe_id = rng.gen::().then(|| rng.gen()); + let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + TransactionScheduling::random(rng), + ) + } + 1 => { + let public_key = PublicKey::random(rng); + let delegation_rate = rng.gen(); + let amount = rng.gen::(); + let minimum_delegation_amount = rng.gen::().then(|| rng.gen()); + let maximum_delegation_amount = + minimum_delegation_amount.map(|minimum_delegation_amount| { + minimum_delegation_amount + rng.gen::() as u64 + }); + let reserved_slots = rng.gen::().then(|| rng.gen::()); + let args = arg_handling::new_add_bid_args( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::AddBid, + TransactionScheduling::random(rng), + ) + } + 2 => { + let public_key = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_withdraw_bid_args(public_key, amount).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::WithdrawBid, + TransactionScheduling::random(rng), + ) + } + 3 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_delegate_args(delegator, validator, amount).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Delegate, + TransactionScheduling::random(rng), + ) + } + 4 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let args = arg_handling::new_undelegate_args(delegator, validator, amount).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Undelegate, + TransactionScheduling::random(rng), + ) + } + 5 => { + let delegator = PublicKey::random(rng); + let validator = PublicKey::random(rng); + let amount = rng.gen::(); + let new_validator = PublicKey::random(rng); + let args = + arg_handling::new_redelegate_args(delegator, validator, amount, new_validator) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Redelegate, + TransactionScheduling::random(rng), + ) + } + 6 => Self::random_standard(rng), + 7 => { + let mut buffer = vec![0u8; rng.gen_range(1..100)]; + rng.fill_bytes(buffer.as_mut()); + let is_install_upgrade = rng.gen(); + let target = TransactionTarget::Session { + is_install_upgrade, + module_bytes: Bytes::from(buffer), + runtime: crate::TransactionRuntimeParams::VmCasperV1, + }; + FieldsContainer::new( + TransactionArgs::Named(RuntimeArgs::random(rng)), + target, + TransactionEntryPoint::Call, + TransactionScheduling::random(rng), + ) + } + _ => unreachable!(), + } + } + + /// Returns a random `FieldsContainer`. + #[cfg(any(feature = "testing", test))] + pub fn random_of_lane(rng: &mut TestRng, lane_id: u8) -> Self { + match lane_id { + MINT_LANE_ID => Self::random_transfer(rng), + AUCTION_LANE_ID => Self::random_staking(rng), + INSTALL_UPGRADE_LANE_ID => Self::random_install_upgrade(rng), + _ => Self::random_standard(rng), + } + } + + #[cfg(any(feature = "testing", test))] + fn random_transfer(rng: &mut TestRng) -> Self { + let amount = rng.gen_range(2_500_000_000..=u64::MAX); + let maybe_source = if rng.gen() { Some(rng.gen()) } else { None }; + let target = TransferTarget::random(rng); + let maybe_id = rng.gen::().then(|| rng.gen()); + let args = arg_handling::new_transfer_args(amount, maybe_source, target, maybe_id).unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::Transfer, + TransactionScheduling::random(rng), + ) + } + + #[cfg(any(feature = "testing", test))] + fn random_install_upgrade(rng: &mut TestRng) -> Self { + let target = TransactionTarget::Session { + module_bytes: Bytes::from(rng.random_vec(0..100)), + runtime: crate::TransactionRuntimeParams::VmCasperV1, + is_install_upgrade: true, + }; + FieldsContainer::new( + TransactionArgs::Named(RuntimeArgs::random(rng)), + target, + TransactionEntryPoint::Call, + TransactionScheduling::random(rng), + ) + } + + #[cfg(any(feature = "testing", test))] + fn random_staking(rng: &mut TestRng) -> Self { + let public_key = PublicKey::random(rng); + let delegation_rate = rng.gen(); + let amount = rng.gen::(); + let minimum_delegation_amount = rng.gen::().then(|| rng.gen()); + let maximum_delegation_amount = minimum_delegation_amount + .map(|minimum_delegation_amount| minimum_delegation_amount + rng.gen::() as u64); + let reserved_slots = rng.gen::().then(|| rng.gen::()); + let args = arg_handling::new_add_bid_args( + public_key, + delegation_rate, + amount, + minimum_delegation_amount, + maximum_delegation_amount, + reserved_slots, + ) + .unwrap(); + FieldsContainer::new( + TransactionArgs::Named(args), + TransactionTarget::Native, + TransactionEntryPoint::AddBid, + TransactionScheduling::random(rng), + ) + } + + #[cfg(any(feature = "testing", test))] + fn random_standard(rng: &mut TestRng) -> Self { + let target = TransactionTarget::Stored { + id: TransactionInvocationTarget::random(rng), + runtime: crate::transaction::transaction_target::TransactionRuntimeParams::VmCasperV1, + }; + FieldsContainer::new( + TransactionArgs::Named(RuntimeArgs::random(rng)), + target, + TransactionEntryPoint::Custom(rng.random_string(1..11)), + TransactionScheduling::random(rng), + ) + } +} diff --git a/types/src/transaction/transaction_v1/transaction_args.rs b/types/src/transaction/transaction_v1/transaction_args.rs new file mode 100644 index 0000000000..8c95568ee3 --- /dev/null +++ b/types/src/transaction/transaction_v1/transaction_args.rs @@ -0,0 +1,158 @@ +use crate::{ + bytesrepr::{self, Bytes, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + CLTyped, CLValueError, RuntimeArgs, +}; +use alloc::{string::String, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; + +/// The arguments of a transaction, which can be either a named set of runtime arguments or a +/// chunked bytes. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Body of a `TransactionArgs`.") +)] +pub enum TransactionArgs { + /// Named runtime arguments. + Named(RuntimeArgs), + /// Bytesrepr bytes. + Bytesrepr(Bytes), +} + +impl TransactionArgs { + /// Returns `RuntimeArgs` if the transaction arguments are named. + pub fn as_named(&self) -> Option<&RuntimeArgs> { + match self { + TransactionArgs::Named(args) => Some(args), + TransactionArgs::Bytesrepr(_) => None, + } + } + + /// Returns `RuntimeArgs` if the transaction arguments are mnamed. + pub fn into_named(self) -> Option { + match self { + TransactionArgs::Named(args) => Some(args), + TransactionArgs::Bytesrepr(_) => None, + } + } + + /// Returns `Bytes` if the transaction arguments are chunked. + pub fn into_bytesrepr(self) -> Option { + match self { + TransactionArgs::Named(_) => None, + TransactionArgs::Bytesrepr(bytes) => Some(bytes), + } + } + + /// Returns `Bytes` if the transaction arguments are bytes. + pub fn as_bytesrepr(&self) -> Option<&Bytes> { + match self { + TransactionArgs::Named(_) => None, + TransactionArgs::Bytesrepr(bytes) => Some(bytes), + } + } + + /// Inserts a key-value pair into the named runtime arguments. + pub fn insert(&mut self, key: K, value: V) -> Result<(), CLValueError> + where + K: Into, + V: CLTyped + ToBytes, + { + match self { + TransactionArgs::Named(args) => { + args.insert(key, value)?; + Ok(()) + } + TransactionArgs::Bytesrepr(_) => { + Err(CLValueError::Serialization(bytesrepr::Error::Formatting)) + } + } + } + + /// Returns `true` if the transaction args is [`Named`]. + /// + /// [`Named`]: TransactionArgs::Named + #[must_use] + pub fn is_named(&self) -> bool { + matches!(self, Self::Named(..)) + } + + /// Returns `true` if the transaction args is [`Bytesrepr`]. + /// + /// [`Bytesrepr`]: TransactionArgs::Bytesrepr + #[must_use] + pub fn is_bytesrepr(&self) -> bool { + matches!(self, Self::Bytesrepr(..)) + } +} + +impl FromBytes for TransactionArgs { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + 0 => { + let (args, remainder) = RuntimeArgs::from_bytes(remainder)?; + Ok((TransactionArgs::Named(args), remainder)) + } + 1 => { + let (bytes, remainder) = Bytes::from_bytes(remainder)?; + Ok((TransactionArgs::Bytesrepr(bytes), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } + } +} + +impl ToBytes for TransactionArgs { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + match self { + TransactionArgs::Named(args) => args.serialized_length() + U8_SERIALIZED_LENGTH, + TransactionArgs::Bytesrepr(bytes) => bytes.serialized_length() + U8_SERIALIZED_LENGTH, + } + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + TransactionArgs::Named(args) => { + writer.push(0); + args.write_bytes(writer) + } + TransactionArgs::Bytesrepr(bytes) => { + writer.push(1); + bytes.write_bytes(writer) + } + } + } +} + +#[cfg(test)] +mod tests { + use proptest::prelude::*; + + use crate::{bytesrepr, gens::transaction_args_arb}; + + proptest! { + #[test] + fn serialization_roundtrip(args in transaction_args_arb()) { + bytesrepr::test_serialization_roundtrip(&args); + } + } +} diff --git a/types/src/transaction/transaction_v1/transaction_v1_hash.rs b/types/src/transaction/transaction_v1/transaction_v1_hash.rs new file mode 100644 index 0000000000..86908478d1 --- /dev/null +++ b/types/src/transaction/transaction_v1/transaction_v1_hash.rs @@ -0,0 +1,116 @@ +use alloc::vec::Vec; +use core::fmt::{self, Display, Formatter}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(doc)] +use super::TransactionV1; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + Digest, +}; + +/// The cryptographic hash of a [`TransactionV1`]. +#[derive( + Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Serialize, Deserialize, Debug, Default, +)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded TransactionV1 hash.") +)] +#[serde(deny_unknown_fields)] +pub struct TransactionV1Hash(Digest); + +impl TransactionV1Hash { + /// The number of bytes in a `TransactionV1Hash` digest. + pub const LENGTH: usize = Digest::LENGTH; + + /// Constructs a new `TransactionV1Hash`. + pub const fn new(hash: Digest) -> Self { + TransactionV1Hash(hash) + } + + /// Returns the wrapped inner digest. + pub fn inner(&self) -> &Digest { + &self.0 + } + + /// Returns a new `TransactionV1Hash` directly initialized with the provided bytes; no hashing + /// is done. + pub const fn from_raw(raw_digest: [u8; Self::LENGTH]) -> Self { + TransactionV1Hash(Digest::from_raw(raw_digest)) + } + + /// Returns a random `TransactionV1Hash`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + let hash = rng.gen::<[u8; Digest::LENGTH]>().into(); + TransactionV1Hash(hash) + } +} + +impl From for TransactionV1Hash { + fn from(digest: Digest) -> Self { + TransactionV1Hash(digest) + } +} + +impl From for Digest { + fn from(transaction_hash: TransactionV1Hash) -> Self { + transaction_hash.0 + } +} + +impl Display for TransactionV1Hash { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!(formatter, "transaction-v1-hash({})", self.0) + } +} + +impl AsRef<[u8]> for TransactionV1Hash { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl ToBytes for TransactionV1Hash { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } + + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } +} + +impl FromBytes for TransactionV1Hash { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + Digest::from_bytes(bytes).map(|(inner, remainder)| (TransactionV1Hash(inner), remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let hash = TransactionV1Hash::random(rng); + bytesrepr::test_serialization_roundtrip(&hash); + } +} diff --git a/types/src/transaction/transaction_v1/transaction_v1_payload.rs b/types/src/transaction/transaction_v1/transaction_v1_payload.rs new file mode 100644 index 0000000000..e1b421b4c4 --- /dev/null +++ b/types/src/transaction/transaction_v1/transaction_v1_payload.rs @@ -0,0 +1,651 @@ +use core::fmt::{self, Debug, Display, Formatter}; + +use super::{errors_v1::FieldDeserializationError, PricingMode}; +use crate::{ + bytesrepr::{ + Bytes, + Error::{self, Formatting}, + FromBytes, ToBytes, + }, + transaction::serialization::{ + CalltableSerializationEnvelope, CalltableSerializationEnvelopeBuilder, + }, + DisplayIter, InitiatorAddr, TimeDiff, Timestamp, +}; +#[cfg(any(feature = "std", test))] +use crate::{TransactionArgs, TransactionEntryPoint, TransactionScheduling, TransactionTarget}; +use alloc::{collections::BTreeMap, string::String, vec::Vec}; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +#[cfg(any(feature = "std", test))] +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +#[cfg(any(feature = "std", test))] +use serde_json::Value; +#[cfg(any(feature = "std", test))] +use thiserror::Error; + +const INITIATOR_ADDR_FIELD_INDEX: u16 = 0; +const TIMESTAMP_FIELD_INDEX: u16 = 1; +const TTL_FIELD_INDEX: u16 = 2; +const CHAIN_NAME_FIELD_INDEX: u16 = 3; +const PRICING_MODE_FIELD_INDEX: u16 = 4; +const FIELDS_FIELD_INDEX: u16 = 5; + +const ARGS_MAP_KEY: u16 = 0; +const TARGET_MAP_KEY: u16 = 1; +const ENTRY_POINT_MAP_KEY: u16 = 2; +const SCHEDULING_MAP_KEY: u16 = 3; +#[cfg(any(feature = "std", test))] +const ARGS_MAP_HUMAN_READABLE_KEY: &str = "args"; +#[cfg(any(feature = "std", test))] +const TARGET_MAP_HUMAN_READABLE_KEY: &str = "target"; +#[cfg(any(feature = "std", test))] +const ENTRY_POINT_MAP_HUMAN_READABLE_KEY: &str = "entry_point"; +#[cfg(any(feature = "std", test))] +const SCHEDULING_MAP_HUMAN_READABLE_KEY: &str = "scheduling"; + +const EXPECTED_FIELD_KEYS: [u16; 4] = [ + ARGS_MAP_KEY, + TARGET_MAP_KEY, + ENTRY_POINT_MAP_KEY, + SCHEDULING_MAP_KEY, +]; + +/// Structure aggregating internal data of V1 transaction. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize), + serde(deny_unknown_fields) +)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(with = "TransactionV1PayloadJson") +)] +pub struct TransactionV1Payload { + initiator_addr: InitiatorAddr, + timestamp: Timestamp, + ttl: TimeDiff, + chain_name: String, + pricing_mode: PricingMode, + fields: BTreeMap, +} + +impl TransactionV1Payload { + // ctor + pub fn new( + chain_name: String, + timestamp: Timestamp, + ttl: TimeDiff, + pricing_mode: PricingMode, + initiator_addr: InitiatorAddr, + fields: BTreeMap, + ) -> TransactionV1Payload { + TransactionV1Payload { + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + } + } + + fn serialized_field_lengths(&self) -> Vec { + vec![ + self.initiator_addr.serialized_length(), + self.timestamp.serialized_length(), + self.ttl.serialized_length(), + self.chain_name.serialized_length(), + self.pricing_mode.serialized_length(), + self.fields.serialized_length(), + ] + } + + /// Returns the chain name of the transaction. + pub fn chain_name(&self) -> &str { + &self.chain_name + } + + /// Returns the timestamp of the transaction. + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } + + /// Returns the time-to-live of the transaction. + pub fn ttl(&self) -> TimeDiff { + self.ttl + } + + /// Returns the pricing mode of the transaction. + pub fn pricing_mode(&self) -> &PricingMode { + &self.pricing_mode + } + + /// Returns the initiator address of the transaction. + pub fn initiator_addr(&self) -> &InitiatorAddr { + &self.initiator_addr + } + + /// Returns the fields of the transaction. + pub fn fields(&self) -> &BTreeMap { + &self.fields + } + + /// Returns the timestamp of when the transaction expires, i.e. `self.timestamp + self.ttl`. + pub fn expires(&self) -> Timestamp { + self.timestamp.saturating_add(self.ttl) + } + + /// Returns `true` if the transaction has expired. + pub fn expired(&self, current_instant: Timestamp) -> bool { + self.expires() < current_instant + } + + /// Fetches field from the amorphic `field` map and attempts to deserialize it into a type `T`. + /// The deserialization is done using the `FromBytes` trait. + pub fn deserialize_field( + &self, + index: u16, + ) -> Result { + let field = self + .fields + .get(&index) + .ok_or(FieldDeserializationError::IndexNotExists { index })?; + let (value, remainder) = T::from_bytes(field) + .map_err(|error| FieldDeserializationError::FromBytesError { index, error })?; + if !remainder.is_empty() { + return Err(FieldDeserializationError::LingeringBytesInField { index }); + } + Ok(value) + } + + /// Helper method to return size of `fields`. + pub fn number_of_fields(&self) -> usize { + self.fields.len() + } + + /// Makes transaction payload invalid. + #[cfg(any(all(feature = "std", feature = "testing"), test))] + pub fn invalidate(&mut self) { + self.chain_name.clear(); + } +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for TransactionV1Payload { + type Error = TransactionV1PayloadJsonError; + fn try_from(transaction_v1_json: TransactionV1PayloadJson) -> Result { + Ok(TransactionV1Payload { + initiator_addr: transaction_v1_json.initiator_addr, + timestamp: transaction_v1_json.timestamp, + ttl: transaction_v1_json.ttl, + chain_name: transaction_v1_json.chain_name, + pricing_mode: transaction_v1_json.pricing_mode, + fields: from_human_readable_fields(&transaction_v1_json.fields)?, + }) + } +} + +#[cfg(any(feature = "std", test))] +#[derive(Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars( + description = "Internal payload of the transaction. The actual data over which the signing is done.", + rename = "TransactionV1Payload", + ) +)] +pub(super) struct TransactionV1PayloadJson { + initiator_addr: InitiatorAddr, + timestamp: Timestamp, + ttl: TimeDiff, + chain_name: String, + pricing_mode: PricingMode, + fields: BTreeMap, +} + +#[cfg(any(feature = "std", test))] +#[derive(Error, Debug)] + +pub(super) enum TransactionV1PayloadJsonError { + #[error("{0}")] + FailedToMap(String), +} + +#[cfg(any(feature = "std", test))] +impl TryFrom for TransactionV1PayloadJson { + type Error = TransactionV1PayloadJsonError; + + fn try_from(value: TransactionV1Payload) -> Result { + Ok(TransactionV1PayloadJson { + initiator_addr: value.initiator_addr, + timestamp: value.timestamp, + ttl: value.ttl, + chain_name: value.chain_name, + pricing_mode: value.pricing_mode, + fields: to_human_readable_fields(&value.fields)?, + }) + } +} + +#[cfg(any(feature = "std", test))] +fn from_human_readable_fields( + fields: &BTreeMap, +) -> Result, TransactionV1PayloadJsonError> { + let number_of_expected_fields = EXPECTED_FIELD_KEYS.len(); + if fields.len() != number_of_expected_fields { + return Err(TransactionV1PayloadJsonError::FailedToMap(format!( + "Expected exactly {} fields", + number_of_expected_fields + ))); + } + let args_bytes = to_bytesrepr::(fields, ARGS_MAP_HUMAN_READABLE_KEY)?; + let target_bytes = to_bytesrepr::(fields, TARGET_MAP_HUMAN_READABLE_KEY)?; + let entry_point_bytes = + to_bytesrepr::(fields, ENTRY_POINT_MAP_HUMAN_READABLE_KEY)?; + let schedule_bytes = + to_bytesrepr::(fields, SCHEDULING_MAP_HUMAN_READABLE_KEY)?; + Ok(BTreeMap::from_iter(vec![ + (ARGS_MAP_KEY, args_bytes), + (TARGET_MAP_KEY, target_bytes), + (ENTRY_POINT_MAP_KEY, entry_point_bytes), + (SCHEDULING_MAP_KEY, schedule_bytes), + ])) +} + +#[cfg(any(feature = "std", test))] +fn to_human_readable_fields( + fields: &BTreeMap, +) -> Result, TransactionV1PayloadJsonError> { + let args_value = + extract_and_deserialize_field::(fields, ARGS_MAP_KEY, "args")?; + let target_value = + extract_and_deserialize_field::(fields, TARGET_MAP_KEY, "target")?; + let entry_point_value = extract_and_deserialize_field::( + fields, + ENTRY_POINT_MAP_KEY, + "entry_point", + )?; + let scheduling_value = extract_and_deserialize_field::( + fields, + SCHEDULING_MAP_KEY, + "scheduling", + )?; + + Ok(BTreeMap::from_iter(vec![ + (ARGS_MAP_HUMAN_READABLE_KEY.to_string(), args_value), + (TARGET_MAP_HUMAN_READABLE_KEY.to_string(), target_value), + ( + ENTRY_POINT_MAP_HUMAN_READABLE_KEY.to_string(), + entry_point_value, + ), + ( + SCHEDULING_MAP_HUMAN_READABLE_KEY.to_string(), + scheduling_value, + ), + ])) +} + +#[cfg(any(feature = "std", test))] +fn to_bytesrepr( + fields: &BTreeMap, + field_name: &str, +) -> Result { + let value_json = fields + .get(field_name) + .ok_or(TransactionV1PayloadJsonError::FailedToMap(format!( + "Could not find {field_name} field" + )))?; + let deserialized = serde_json::from_value::(value_json.clone()) + .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!("{:?}", e)))?; + deserialized + .to_bytes() + .map(|bytes| bytes.into()) + .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!("{:?}", e))) +} + +#[cfg(any(feature = "std", test))] +fn extract_and_deserialize_field( + fields: &BTreeMap, + key: u16, + field_name: &str, +) -> Result { + let value_bytes = fields + .get(&key) + .ok_or(TransactionV1PayloadJsonError::FailedToMap(format!( + "Could not find {field_name} field" + )))?; + let (from_bytes, remainder) = T::from_bytes(value_bytes) + .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!("{:?}", e)))?; + if !remainder.is_empty() { + return Err(TransactionV1PayloadJsonError::FailedToMap(format!( + "Unexpexcted bytes in {field_name} field" + ))); + } + let value = serde_json::to_value(from_bytes) + .map_err(|e| TransactionV1PayloadJsonError::FailedToMap(format!("{:?}", e)))?; + Ok(value) +} + +impl ToBytes for TransactionV1Payload { + fn to_bytes(&self) -> Result, crate::bytesrepr::Error> { + let expected_payload_sizes = self.serialized_field_lengths(); + CalltableSerializationEnvelopeBuilder::new(expected_payload_sizes)? + .add_field(INITIATOR_ADDR_FIELD_INDEX, &self.initiator_addr)? + .add_field(TIMESTAMP_FIELD_INDEX, &self.timestamp)? + .add_field(TTL_FIELD_INDEX, &self.ttl)? + .add_field(CHAIN_NAME_FIELD_INDEX, &self.chain_name)? + .add_field(PRICING_MODE_FIELD_INDEX, &self.pricing_mode)? + .add_field(FIELDS_FIELD_INDEX, &self.fields)? + .binary_payload_bytes() + } + + fn serialized_length(&self) -> usize { + CalltableSerializationEnvelope::estimate_size(self.serialized_field_lengths()) + } +} + +impl FromBytes for TransactionV1Payload { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), Error> { + let (binary_payload, remainder) = CalltableSerializationEnvelope::from_bytes(6, bytes)?; + let window = binary_payload.start_consuming()?.ok_or(Formatting)?; + + window.verify_index(INITIATOR_ADDR_FIELD_INDEX)?; + let (initiator_addr, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(TIMESTAMP_FIELD_INDEX)?; + let (timestamp, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(TTL_FIELD_INDEX)?; + let (ttl, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(CHAIN_NAME_FIELD_INDEX)?; + let (chain_name, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(PRICING_MODE_FIELD_INDEX)?; + let (pricing_mode, window) = window.deserialize_and_maybe_next::()?; + let window = window.ok_or(Formatting)?; + window.verify_index(FIELDS_FIELD_INDEX)?; + let (fields_as_vec, window) = window.deserialize_and_maybe_next::>()?; + let fields = build_map(fields_as_vec)?; + if window.is_some() { + return Err(Formatting); + } + if fields.len() != EXPECTED_FIELD_KEYS.len() + || EXPECTED_FIELD_KEYS + .iter() + .any(|expected_key| !fields.contains_key(expected_key)) + { + return Err(Formatting); + } + let from_bytes = TransactionV1Payload { + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + }; + + Ok((from_bytes, remainder)) + } +} + +// We need to make sure that the bytes of the `fields` field are serialized in the correct order. +// A BTreeMap is serialized the same as Vec<(K, V)> and it actually, on deserialization, doesn't +// check if the keys are in ascending order. We need to make sure that the incoming transaction +// payload is serialized in a strict way, otherwise we would have trouble with verifying the +// signature(s). +fn build_map(fields_as_vec: Vec<(u16, Bytes)>) -> Result, Error> { + let mut ret = BTreeMap::new(); + let mut max_idx: i32 = -1; + for (key, value) in fields_as_vec { + let key_signed = key as i32; + if key_signed <= max_idx { + return Err(Formatting); + } + max_idx = key_signed; + ret.insert(key, value); + } + + Ok(ret) +} + +impl Display for TransactionV1Payload { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + write!( + formatter, + "transaction-v1-payload[{}, {}, {}, {}, {}, fields: {}]", + self.chain_name, + self.timestamp, + self.ttl, + self.pricing_mode, + self.initiator_addr, + DisplayIter::new(self.fields.keys()) + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + testing::TestRng, RuntimeArgs, TransactionEntryPoint, TransactionScheduling, + TransactionTarget, + }; + use std::collections::BTreeMap; + + #[test] + fn reserialize_should_work_with_ascending_ids() { + let input = vec![ + (0, Bytes::from(vec![1])), + (1, Bytes::from(vec![2])), + (4, Bytes::from(vec![3])), + ]; + let map = build_map(input).expect("Should not fail"); + assert_eq!( + map, + BTreeMap::from_iter(vec![ + (0, Bytes::from(vec![1])), + (1, Bytes::from(vec![2])), + (4, Bytes::from(vec![3])) + ]) + ); + } + + #[test] + fn reserialize_should_fail_when_ids_not_unique() { + let input = vec![ + (0, Bytes::from(vec![1])), + (0, Bytes::from(vec![2])), + (4, Bytes::from(vec![3])), + ]; + let map_ret = build_map(input); + assert!(map_ret.is_err()); + } + + #[test] + fn reserialize_should_fail_when_ids_not_ascending() { + let input = vec![ + (0, Bytes::from(vec![1])), + (2, Bytes::from(vec![2])), + (1, Bytes::from(vec![3])), + ]; + assert!(build_map(input).is_err()); + let input = vec![ + (0, Bytes::from(vec![1])), + (2, Bytes::from(vec![2])), + (0, Bytes::from(vec![3])), + ]; + assert!(build_map(input).is_err()); + let input = vec![ + (0, Bytes::from(vec![1])), + (1, Bytes::from(vec![2])), + (2, Bytes::from(vec![3])), + (3, Bytes::from(vec![4])), + (2, Bytes::from(vec![5])), + ]; + assert!(build_map(input).is_err()); + } + + #[test] + fn should_fail_if_deserialized_payload_has_too_many_fields() { + let rng = &mut TestRng::new(); + let ( + args, + target, + entry_point, + scheduling, + initiator_addr, + timestamp, + ttl, + chain_name, + pricing_mode, + ) = random_payload_data(rng); + let mut fields = BTreeMap::new(); + fields.insert(ARGS_MAP_KEY, args.to_bytes().unwrap().into()); + fields.insert(TARGET_MAP_KEY, target.to_bytes().unwrap().into()); + fields.insert(ENTRY_POINT_MAP_KEY, entry_point.to_bytes().unwrap().into()); + fields.insert(SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into()); + fields.insert(4, 111_u64.to_bytes().unwrap().into()); + + let bytes = TransactionV1Payload::new( + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + ) + .to_bytes() + .unwrap(); + let result = TransactionV1Payload::from_bytes(&bytes); + assert!(result.is_err()); + } + + #[test] + fn should_fail_if_deserialized_payload_has_unrecognized_fields() { + let rng = &mut TestRng::new(); + let ( + args, + target, + entry_point, + scheduling, + initiator_addr, + timestamp, + ttl, + chain_name, + pricing_mode, + ) = random_payload_data(rng); + let mut fields = BTreeMap::new(); + fields.insert(ARGS_MAP_KEY, args.to_bytes().unwrap().into()); + fields.insert(TARGET_MAP_KEY, target.to_bytes().unwrap().into()); + fields.insert(100, entry_point.to_bytes().unwrap().into()); + fields.insert(SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into()); + + let bytes = TransactionV1Payload::new( + chain_name, + timestamp, + ttl, + pricing_mode, + initiator_addr, + fields, + ) + .to_bytes() + .unwrap(); + let result = TransactionV1Payload::from_bytes(&bytes); + assert!(result.is_err()); + } + + #[test] + fn should_fail_if_serialized_payoad_has_fields_out_of_order() { + let rng = &mut TestRng::new(); + let ( + args, + target, + entry_point, + scheduling, + initiator_addr, + timestamp, + ttl, + chain_name, + pricing_mode, + ) = random_payload_data(rng); + let fields: Vec<(u16, Bytes)> = vec![ + (SCHEDULING_MAP_KEY, scheduling.to_bytes().unwrap().into()), + (TARGET_MAP_KEY, target.to_bytes().unwrap().into()), + (ENTRY_POINT_MAP_KEY, entry_point.to_bytes().unwrap().into()), + (ARGS_MAP_KEY, args.to_bytes().unwrap().into()), + ]; + + let expected_payload_sizes = vec![ + initiator_addr.serialized_length(), + timestamp.serialized_length(), + ttl.serialized_length(), + chain_name.serialized_length(), + pricing_mode.serialized_length(), + fields.serialized_length(), + ]; + + let bytes = CalltableSerializationEnvelopeBuilder::new(expected_payload_sizes) + .unwrap() + .add_field(INITIATOR_ADDR_FIELD_INDEX, &initiator_addr) + .unwrap() + .add_field(TIMESTAMP_FIELD_INDEX, ×tamp) + .unwrap() + .add_field(TTL_FIELD_INDEX, &ttl) + .unwrap() + .add_field(CHAIN_NAME_FIELD_INDEX, &chain_name) + .unwrap() + .add_field(PRICING_MODE_FIELD_INDEX, &pricing_mode) + .unwrap() + .add_field(FIELDS_FIELD_INDEX, &fields) + .unwrap() + .binary_payload_bytes() + .unwrap(); + let payload_res = TransactionV1Payload::from_bytes(&bytes); + assert!(payload_res.is_err()); + } + + fn random_payload_data( + rng: &mut TestRng, + ) -> ( + RuntimeArgs, + TransactionTarget, + TransactionEntryPoint, + TransactionScheduling, + InitiatorAddr, + Timestamp, + TimeDiff, + String, + PricingMode, + ) { + let args = RuntimeArgs::random(rng); + let target = TransactionTarget::random(rng); + let entry_point = TransactionEntryPoint::random(rng); + let scheduling = TransactionScheduling::random(rng); + let initiator_addr = InitiatorAddr::random(rng); + let timestamp = Timestamp::now(); + let ttl = TimeDiff::from_millis(1000); + let chain_name = "chain-name".to_string(); + let pricing_mode = PricingMode::random(rng); + ( + args, + target, + entry_point, + scheduling, + initiator_addr, + timestamp, + ttl, + chain_name, + pricing_mode, + ) + } +} diff --git a/types/src/transaction/transfer_target.rs b/types/src/transaction/transfer_target.rs new file mode 100644 index 0000000000..e1500a5384 --- /dev/null +++ b/types/src/transaction/transfer_target.rs @@ -0,0 +1,48 @@ +#[cfg(any(feature = "testing", test))] +use rand::Rng; + +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{account::AccountHash, PublicKey, URef}; + +/// The various types which can be used as the `target` runtime argument of a native transfer. +#[derive(Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] +pub enum TransferTarget { + /// A public key. + PublicKey(PublicKey), + /// An account hash. + AccountHash(AccountHash), + /// A URef. + URef(URef), +} + +impl TransferTarget { + /// Returns a random `TransferTarget`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + match rng.gen_range(0..3) { + 0 => TransferTarget::PublicKey(PublicKey::random(rng)), + 1 => TransferTarget::AccountHash(rng.gen()), + 2 => TransferTarget::URef(rng.gen()), + _ => unreachable!(), + } + } +} + +impl From for TransferTarget { + fn from(public_key: PublicKey) -> Self { + Self::PublicKey(public_key) + } +} + +impl From for TransferTarget { + fn from(account_hash: AccountHash) -> Self { + Self::AccountHash(account_hash) + } +} + +impl From for TransferTarget { + fn from(uref: URef) -> Self { + Self::URef(uref) + } +} diff --git a/types/src/transfer.rs b/types/src/transfer.rs index c6935b79bd..4072819694 100644 --- a/types/src/transfer.rs +++ b/types/src/transfer.rs @@ -1,409 +1,201 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] +mod error; +mod transfer_v1; +mod transfer_v2; -use alloc::{format, string::String, vec::Vec}; -use core::{ - array::TryFromSliceError, - convert::TryFrom, - fmt::{self, Debug, Display, Formatter}, -}; +use alloc::vec::Vec; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +#[cfg(feature = "datasize")] use datasize::DataSize; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -#[cfg(feature = "std")] -use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; -use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; - +#[cfg(feature = "json-schema")] +use once_cell::sync::Lazy; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "json-schema")] +use crate::{account::AccountHash, transaction::TransactionV1Hash, URef}; use crate::{ - account::AccountHash, - bytesrepr::{self, FromBytes, ToBytes}, - CLType, CLTyped, URef, U512, + bytesrepr::{self, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}, + U512, }; - -/// The length of a deploy hash. -pub const DEPLOY_HASH_LENGTH: usize = 32; -/// The length of a transfer address. -pub const TRANSFER_ADDR_LENGTH: usize = 32; -const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = "transfer-"; - -/// A newtype wrapping a [`[u8; DEPLOY_HASH_LENGTH]`] which is the raw bytes of the deploy hash. -#[derive(DataSize, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct DeployHash([u8; DEPLOY_HASH_LENGTH]); - -impl DeployHash { - /// Constructs a new `DeployHash` instance from the raw bytes of a deploy hash. - pub const fn new(value: [u8; DEPLOY_HASH_LENGTH]) -> DeployHash { - DeployHash(value) - } - - /// Returns the raw bytes of the deploy hash as an array. - pub fn value(&self) -> [u8; DEPLOY_HASH_LENGTH] { - self.0 - } - - /// Returns the raw bytes of the deploy hash as a `slice`. - pub fn as_bytes(&self) -> &[u8] { - &self.0 - } -} - -#[cfg(feature = "std")] -impl JsonSchema for DeployHash { - fn schema_name() -> String { - String::from("DeployHash") - } - - fn json_schema(gen: &mut SchemaGenerator) -> Schema { - let schema = gen.subschema_for::(); - let mut schema_object = schema.into_object(); - schema_object.metadata().description = Some("Hex-encoded deploy hash.".to_string()); - schema_object.into() - } +#[cfg(any(feature = "testing", feature = "json-schema", test))] +use crate::{transaction::TransactionHash, Gas, InitiatorAddr}; +pub use error::TransferFromStrError; +pub use transfer_v1::{TransferAddr, TransferV1, TRANSFER_ADDR_LENGTH}; +pub use transfer_v2::TransferV2; + +const V1_TAG: u8 = 0; +const V2_TAG: u8 = 1; + +#[cfg(feature = "json-schema")] +pub(super) static TRANSFER: Lazy = Lazy::new(|| { + let transaction_hash = TransactionHash::V1(TransactionV1Hash::from_raw([1; 32])); + let from = InitiatorAddr::AccountHash(AccountHash::new([2; 32])); + let to = Some(AccountHash::new([3; 32])); + let source = URef::from_formatted_str( + "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + ) + .unwrap(); + let target = URef::from_formatted_str( + "uref-1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b-000", + ) + .unwrap(); + let amount = U512::from(1_000_000_000_000_u64); + let gas = Gas::new(2_500_000_000_u64); + let id = Some(999); + Transfer::V2(TransferV2::new( + transaction_hash, + from, + to, + source, + target, + amount, + gas, + id, + )) +}); + +/// A versioned wrapper for a transfer. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +pub enum Transfer { + /// A version 1 transfer. + #[serde(rename = "Version1")] + V1(TransferV1), + /// A version 2 transfer. + #[serde(rename = "Version2")] + V2(TransferV2), } -impl ToBytes for DeployHash { - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for DeployHash { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - <[u8; DEPLOY_HASH_LENGTH]>::from_bytes(bytes) - .map(|(inner, remainder)| (DeployHash(inner), remainder)) - } -} - -impl Serialize for DeployHash { - fn serialize(&self, serializer: S) -> Result { - if serializer.is_human_readable() { - base16::encode_lower(&self.0).serialize(serializer) - } else { - self.0.serialize(serializer) +impl Transfer { + /// Transfer amount. + pub fn amount(&self) -> U512 { + match self { + Transfer::V1(transfer_v1) => transfer_v1.amount, + Transfer::V2(transfer_v2) => transfer_v2.amount, } } -} -impl<'de> Deserialize<'de> for DeployHash { - fn deserialize>(deserializer: D) -> Result { - let bytes = if deserializer.is_human_readable() { - let hex_string = String::deserialize(deserializer)?; - let vec_bytes = base16::decode(hex_string.as_bytes()).map_err(SerdeError::custom)?; - <[u8; DEPLOY_HASH_LENGTH]>::try_from(vec_bytes.as_ref()).map_err(SerdeError::custom)? + // This method is not intended to be used by third party crates. + #[doc(hidden)] + #[cfg(feature = "json-schema")] + pub fn example() -> &'static Self { + &TRANSFER + } + + /// Returns a random `Transfer`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use crate::DeployHash; + + if rng.gen() { + Transfer::V1(TransferV1::new( + DeployHash::random(rng), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + )) } else { - <[u8; DEPLOY_HASH_LENGTH]>::deserialize(deserializer)? - }; - Ok(DeployHash(bytes)) - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> DeployHash { - DeployHash::new(rng.gen()) + Transfer::V2(TransferV2::new( + TransactionHash::random(rng), + InitiatorAddr::random(rng), + rng.gen(), + rng.gen(), + rng.gen(), + rng.gen(), + Gas::new(rng.gen::()), + rng.gen(), + )) + } } } -/// Represents a transfer from one purse to another -#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Serialize, Deserialize, Default)] -#[cfg_attr(feature = "std", derive(JsonSchema))] -#[serde(deny_unknown_fields)] -pub struct Transfer { - /// Deploy that created the transfer - pub deploy_hash: DeployHash, - /// Account from which transfer was executed - pub from: AccountHash, - /// Account to which funds are transferred - pub to: Option, - /// Source purse - pub source: URef, - /// Target purse - pub target: URef, - /// Transfer amount - pub amount: U512, - /// Gas - pub gas: U512, - /// User-defined id - pub id: Option, -} - -impl Transfer { - /// Creates a [`Transfer`]. - #[allow(clippy::too_many_arguments)] - pub fn new( - deploy_hash: DeployHash, - from: AccountHash, - to: Option, - source: URef, - target: URef, - amount: U512, - gas: U512, - id: Option, - ) -> Self { - Transfer { - deploy_hash, - from, - to, - source, - target, - amount, - gas, - id, - } +impl From for Transfer { + fn from(v1_transfer: TransferV1) -> Self { + Transfer::V1(v1_transfer) } } -impl FromBytes for Transfer { - fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (deploy_hash, rem) = FromBytes::from_bytes(bytes)?; - let (from, rem) = AccountHash::from_bytes(rem)?; - let (to, rem) = >::from_bytes(rem)?; - let (source, rem) = URef::from_bytes(rem)?; - let (target, rem) = URef::from_bytes(rem)?; - let (amount, rem) = U512::from_bytes(rem)?; - let (gas, rem) = U512::from_bytes(rem)?; - let (id, rem) = >::from_bytes(rem)?; - Ok(( - Transfer { - deploy_hash, - from, - to, - source, - target, - amount, - gas, - id, - }, - rem, - )) +impl From for Transfer { + fn from(v2_transfer: TransferV2) -> Self { + Transfer::V2(v2_transfer) } } impl ToBytes for Transfer { fn to_bytes(&self) -> Result, bytesrepr::Error> { - let mut result = bytesrepr::allocate_buffer(self)?; - result.append(&mut self.deploy_hash.to_bytes()?); - result.append(&mut self.from.to_bytes()?); - result.append(&mut self.to.to_bytes()?); - result.append(&mut self.source.to_bytes()?); - result.append(&mut self.target.to_bytes()?); - result.append(&mut self.amount.to_bytes()?); - result.append(&mut self.gas.to_bytes()?); - result.append(&mut self.id.to_bytes()?); - Ok(result) + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) } fn serialized_length(&self) -> usize { - self.deploy_hash.serialized_length() - + self.from.serialized_length() - + self.to.serialized_length() - + self.source.serialized_length() - + self.target.serialized_length() - + self.amount.serialized_length() - + self.gas.serialized_length() - + self.id.serialized_length() - } -} - -/// Error returned when decoding a `TransferAddr` from a formatted string. -#[derive(Debug)] -pub enum FromStrError { - /// The prefix is invalid. - InvalidPrefix, - /// The address is not valid hex. - Hex(base16::DecodeError), - /// The slice is the wrong length. - Length(TryFromSliceError), -} - -impl From for FromStrError { - fn from(error: base16::DecodeError) -> Self { - FromStrError::Hex(error) - } -} - -impl From for FromStrError { - fn from(error: TryFromSliceError) -> Self { - FromStrError::Length(error) + U8_SERIALIZED_LENGTH + + match self { + Transfer::V1(transfer) => transfer.serialized_length(), + Transfer::V2(transfer) => transfer.serialized_length(), + } } -} -impl Display for FromStrError { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { match self { - FromStrError::InvalidPrefix => write!(f, "prefix is not 'transfer-'"), - FromStrError::Hex(error) => { - write!(f, "failed to decode address portion from hex: {}", error) + Transfer::V1(transfer) => { + V1_TAG.write_bytes(writer)?; + transfer.write_bytes(writer) + } + Transfer::V2(transfer) => { + V2_TAG.write_bytes(writer)?; + transfer.write_bytes(writer) } - FromStrError::Length(error) => write!(f, "address portion is wrong length: {}", error), - } - } -} - -/// A newtype wrapping a [`[u8; TRANSFER_ADDR_LENGTH]`] which is the raw bytes of the transfer -/// address. -#[derive(DataSize, Default, PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy)] -pub struct TransferAddr([u8; TRANSFER_ADDR_LENGTH]); - -impl TransferAddr { - /// Constructs a new `TransferAddr` instance from the raw bytes. - pub const fn new(value: [u8; TRANSFER_ADDR_LENGTH]) -> TransferAddr { - TransferAddr(value) - } - - /// Returns the raw bytes of the transfer address as an array. - pub fn value(&self) -> [u8; TRANSFER_ADDR_LENGTH] { - self.0 - } - - /// Returns the raw bytes of the transfer address as a `slice`. - pub fn as_bytes(&self) -> &[u8] { - &self.0 - } - - /// Formats the `TransferAddr` as a prefixed, hex-encoded string. - pub fn to_formatted_string(&self) -> String { - format!( - "{}{}", - TRANSFER_ADDR_FORMATTED_STRING_PREFIX, - base16::encode_lower(&self.0), - ) - } - - /// Parses a string formatted as per `Self::to_formatted_string()` into a `TransferAddr`. - pub fn from_formatted_str(input: &str) -> Result { - let remainder = input - .strip_prefix(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) - .ok_or(FromStrError::InvalidPrefix)?; - let bytes = <[u8; TRANSFER_ADDR_LENGTH]>::try_from(base16::decode(remainder)?.as_ref())?; - Ok(TransferAddr(bytes)) - } -} - -#[cfg(feature = "std")] -impl JsonSchema for TransferAddr { - fn schema_name() -> String { - String::from("TransferAddr") - } - - fn json_schema(gen: &mut SchemaGenerator) -> Schema { - let schema = gen.subschema_for::(); - let mut schema_object = schema.into_object(); - schema_object.metadata().description = Some("Hex-encoded transfer address.".to_string()); - schema_object.into() - } -} - -impl Serialize for TransferAddr { - fn serialize(&self, serializer: S) -> Result { - if serializer.is_human_readable() { - self.to_formatted_string().serialize(serializer) - } else { - self.0.serialize(serializer) - } - } -} - -impl<'de> Deserialize<'de> for TransferAddr { - fn deserialize>(deserializer: D) -> Result { - if deserializer.is_human_readable() { - let formatted_string = String::deserialize(deserializer)?; - TransferAddr::from_formatted_str(&formatted_string).map_err(SerdeError::custom) - } else { - let bytes = <[u8; TRANSFER_ADDR_LENGTH]>::deserialize(deserializer)?; - Ok(TransferAddr(bytes)) } } } -// impl TryFrom<&[u8]> for AccountHash { -// type Error = TryFromSliceForAccountHashError; -// -// fn try_from(bytes: &[u8]) -> Result { -// [u8; TRANSFER_ADDR_LENGTH]::try_from(bytes) -// .map(AccountHash::new) -// .map_err(|_| TryFromSliceForAccountHashError(())) -// } -// } -// -// impl TryFrom<&alloc::vec::Vec> for AccountHash { -// type Error = TryFromSliceForAccountHashError; -// -// fn try_from(bytes: &Vec) -> Result { -// [u8; TRANSFER_ADDR_LENGTH]::try_from(bytes as &[u8]) -// .map(AccountHash::new) -// .map_err(|_| TryFromSliceForAccountHashError(())) -// } -// } - -impl Display for TransferAddr { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", base16::encode_lower(&self.0)) - } -} - -impl Debug for TransferAddr { - fn fmt(&self, f: &mut Formatter) -> core::fmt::Result { - write!(f, "TransferAddr({})", base16::encode_lower(&self.0)) - } -} - -impl CLTyped for TransferAddr { - fn cl_type() -> CLType { - CLType::ByteArray(TRANSFER_ADDR_LENGTH as u32) - } -} - -impl ToBytes for TransferAddr { - #[inline(always)] - fn to_bytes(&self) -> Result, bytesrepr::Error> { - self.0.to_bytes() - } - - #[inline(always)] - fn serialized_length(&self) -> usize { - self.0.serialized_length() - } -} - -impl FromBytes for TransferAddr { +impl FromBytes for Transfer { fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { - let (bytes, remainder) = FromBytes::from_bytes(bytes)?; - Ok((TransferAddr::new(bytes), remainder)) - } -} - -impl AsRef<[u8]> for TransferAddr { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl Distribution for Standard { - fn sample(&self, rng: &mut R) -> TransferAddr { - TransferAddr::new(rng.gen()) + let (tag, remainder) = u8::from_bytes(bytes)?; + match tag { + V1_TAG => { + let (transfer, remainder) = TransferV1::from_bytes(remainder)?; + Ok((Transfer::V1(transfer), remainder)) + } + V2_TAG => { + let (transfer, remainder) = TransferV2::from_bytes(remainder)?; + Ok((Transfer::V2(transfer), remainder)) + } + _ => Err(bytesrepr::Error::Formatting), + } } } -/// Generators for [`Transfer`] -#[cfg(any(feature = "gens", test))] +/// Proptest generators for [`Transfer`]. +#[cfg(any(feature = "testing", feature = "gens", test))] pub mod gens { - use proptest::prelude::{prop::option, Arbitrary, Strategy}; + use proptest::{ + array, + prelude::{prop::option, Arbitrary, Strategy}, + }; + use super::*; use crate::{ - deploy_info::gens::{account_hash_arb, deploy_hash_arb}, - gens::{u512_arb, uref_arb}, - Transfer, + gens::{account_hash_arb, u512_arb, uref_arb}, + transaction::gens::deploy_hash_arb, }; - /// Creates an arbitrary [`Transfer`] - pub fn transfer_arb() -> impl Strategy { + pub fn transfer_v1_addr_arb() -> impl Strategy { + array::uniform32(::arbitrary()).prop_map(TransferAddr::new) + } + + pub fn transfer_v1_arb() -> impl Strategy { ( deploy_hash_arb(), account_hash_arb(), @@ -415,7 +207,7 @@ pub mod gens { option::of(::arbitrary()), ) .prop_map(|(deploy_hash, from, to, source, target, amount, gas, id)| { - Transfer { + TransferV1 { deploy_hash, from, to, @@ -431,59 +223,15 @@ pub mod gens { #[cfg(test)] mod tests { - use proptest::prelude::*; - use crate::bytesrepr; use super::*; - proptest! { - #[test] - fn test_serialization_roundtrip(transfer in gens::transfer_arb()) { - bytesrepr::test_serialization_roundtrip(&transfer) - } - } - #[test] - fn transfer_addr_from_str() { - let transfer_address = TransferAddr([4; 32]); - let encoded = transfer_address.to_formatted_string(); - let decoded = TransferAddr::from_formatted_str(&encoded).unwrap(); - assert_eq!(transfer_address, decoded); - - let invalid_prefix = - "transfe-0000000000000000000000000000000000000000000000000000000000000000"; - assert!(TransferAddr::from_formatted_str(invalid_prefix).is_err()); - - let invalid_prefix = - "transfer0000000000000000000000000000000000000000000000000000000000000000"; - assert!(TransferAddr::from_formatted_str(invalid_prefix).is_err()); - - let short_addr = "transfer-00000000000000000000000000000000000000000000000000000000000000"; - assert!(TransferAddr::from_formatted_str(short_addr).is_err()); + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); - let long_addr = - "transfer-000000000000000000000000000000000000000000000000000000000000000000"; - assert!(TransferAddr::from_formatted_str(long_addr).is_err()); - - let invalid_hex = - "transfer-000000000000000000000000000000000000000000000000000000000000000g"; - assert!(TransferAddr::from_formatted_str(invalid_hex).is_err()); - } - - #[test] - fn transfer_addr_serde_roundtrip() { - let transfer_address = TransferAddr([255; 32]); - let serialized = bincode::serialize(&transfer_address).unwrap(); - let decoded = bincode::deserialize(&serialized).unwrap(); - assert_eq!(transfer_address, decoded); - } - - #[test] - fn transfer_addr_json_roundtrip() { - let transfer_address = TransferAddr([255; 32]); - let json_string = serde_json::to_string_pretty(&transfer_address).unwrap(); - let decoded = serde_json::from_str(&json_string).unwrap(); - assert_eq!(transfer_address, decoded); + let transfer = Transfer::random(rng); + bytesrepr::test_serialization_roundtrip(&transfer); } } diff --git a/types/src/transfer/error.rs b/types/src/transfer/error.rs new file mode 100644 index 0000000000..b97ec03d6d --- /dev/null +++ b/types/src/transfer/error.rs @@ -0,0 +1,63 @@ +use core::{ + array::TryFromSliceError, + fmt::{self, Debug, Display, Formatter}, +}; +#[cfg(feature = "std")] +use std::error::Error as StdError; + +/// Error returned when decoding a `TransferAddr` from a formatted string. +#[derive(Debug, Clone)] +#[non_exhaustive] +pub enum TransferFromStrError { + /// The prefix is invalid. + InvalidPrefix, + /// The address is not valid hex. + Hex(base16::DecodeError), + /// The slice is the wrong length. + Length(TryFromSliceError), +} + +impl From for TransferFromStrError { + fn from(error: base16::DecodeError) -> Self { + TransferFromStrError::Hex(error) + } +} + +impl From for TransferFromStrError { + fn from(error: TryFromSliceError) -> Self { + TransferFromStrError::Length(error) + } +} + +impl Display for TransferFromStrError { + fn fmt(&self, formatter: &mut Formatter) -> fmt::Result { + match self { + TransferFromStrError::InvalidPrefix => { + write!(formatter, "transfer addr prefix is invalid",) + } + TransferFromStrError::Hex(error) => { + write!( + formatter, + "failed to decode address portion of transfer addr from hex: {}", + error + ) + } + TransferFromStrError::Length(error) => write!( + formatter, + "address portion of transfer addr is wrong length: {}", + error + ), + } + } +} + +#[cfg(feature = "std")] +impl StdError for TransferFromStrError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + match self { + TransferFromStrError::InvalidPrefix => None, + TransferFromStrError::Hex(error) => Some(error), + TransferFromStrError::Length(error) => Some(error), + } + } +} diff --git a/types/src/transfer/transfer_v1.rs b/types/src/transfer/transfer_v1.rs new file mode 100644 index 0000000000..515b3c2db5 --- /dev/null +++ b/types/src/transfer/transfer_v1.rs @@ -0,0 +1,131 @@ +mod transfer_v1_addr; + +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes}, + serde_helpers, DeployHash, URef, U512, +}; +pub use transfer_v1_addr::{TransferAddr, TRANSFER_ADDR_LENGTH}; + +/// Represents a version 1 transfer from one purse to another. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Default, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransferV1 { + /// Deploy that created the transfer + #[serde(with = "serde_helpers::deploy_hash_as_array")] + #[cfg_attr( + feature = "json-schema", + schemars( + with = "DeployHash", + description = "Hex-encoded Deploy hash of Deploy that created the transfer." + ) + )] + pub deploy_hash: DeployHash, + /// Account from which transfer was executed + pub from: AccountHash, + /// Account to which funds are transferred + pub to: Option, + /// Source purse + pub source: URef, + /// Target purse + pub target: URef, + /// Transfer amount + pub amount: U512, + /// Gas + pub gas: U512, + /// User-defined id + pub id: Option, +} + +impl TransferV1 { + /// Creates a [`TransferV1`]. + #[allow(clippy::too_many_arguments)] + pub fn new( + deploy_hash: DeployHash, + from: AccountHash, + to: Option, + source: URef, + target: URef, + amount: U512, + gas: U512, + id: Option, + ) -> Self { + TransferV1 { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + } + } +} + +impl ToBytes for TransferV1 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn serialized_length(&self) -> usize { + self.deploy_hash.serialized_length() + + self.from.serialized_length() + + self.to.serialized_length() + + self.source.serialized_length() + + self.target.serialized_length() + + self.amount.serialized_length() + + self.gas.serialized_length() + + self.id.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.deploy_hash.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.to.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.target.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + self.id.write_bytes(writer)?; + Ok(()) + } +} + +impl FromBytes for TransferV1 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (deploy_hash, rem) = FromBytes::from_bytes(bytes)?; + let (from, rem) = AccountHash::from_bytes(rem)?; + let (to, rem) = >::from_bytes(rem)?; + let (source, rem) = URef::from_bytes(rem)?; + let (target, rem) = URef::from_bytes(rem)?; + let (amount, rem) = U512::from_bytes(rem)?; + let (gas, rem) = U512::from_bytes(rem)?; + let (id, rem) = >::from_bytes(rem)?; + Ok(( + TransferV1 { + deploy_hash, + from, + to, + source, + target, + amount, + gas, + id, + }, + rem, + )) + } +} diff --git a/types/src/transfer/transfer_v1/transfer_v1_addr.rs b/types/src/transfer/transfer_v1/transfer_v1_addr.rs new file mode 100644 index 0000000000..42381ac05e --- /dev/null +++ b/types/src/transfer/transfer_v1/transfer_v1_addr.rs @@ -0,0 +1,219 @@ +use alloc::{format, string::String, vec::Vec}; +use core::{ + convert::TryFrom, + fmt::{self, Debug, Display, Formatter}, +}; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(any(feature = "testing", test))] +use rand::Rng; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; + +use super::super::TransferFromStrError; +pub(super) const TRANSFER_ADDR_FORMATTED_STRING_PREFIX: &str = "transfer-"; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use crate::{ + bytesrepr::{self, FromBytes, ToBytes}, + checksummed_hex, CLType, CLTyped, +}; + +/// The length of a version 1 transfer address. +pub const TRANSFER_ADDR_LENGTH: usize = 32; + +/// A newtype wrapping a [u8; [TRANSFER_ADDR_LENGTH]] which is the raw bytes of the +/// transfer address. +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr( + feature = "json-schema", + derive(JsonSchema), + schemars(description = "Hex-encoded version 1 transfer address.") +)] +pub struct TransferAddr( + #[cfg_attr(feature = "json-schema", schemars(skip, with = "String"))] + [u8; TRANSFER_ADDR_LENGTH], +); + +impl TransferAddr { + /// Constructs a new `TransferV1Addr` instance from the raw bytes. + pub const fn new(value: [u8; TRANSFER_ADDR_LENGTH]) -> TransferAddr { + TransferAddr(value) + } + + /// Returns the raw bytes of the transfer address as an array. + pub fn value(&self) -> [u8; TRANSFER_ADDR_LENGTH] { + self.0 + } + + /// Returns the raw bytes of the transfer address as a `slice`. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Formats the `TransferV1Addr` as a prefixed, hex-encoded string. + pub fn to_formatted_string(self) -> String { + format!( + "{}{}", + TRANSFER_ADDR_FORMATTED_STRING_PREFIX, + base16::encode_lower(&self.0), + ) + } + + /// Parses a string formatted as per `Self::to_formatted_string()` into a `TransferV1Addr`. + pub fn from_formatted_str(input: &str) -> Result { + let remainder = input + .strip_prefix(TRANSFER_ADDR_FORMATTED_STRING_PREFIX) + .ok_or(TransferFromStrError::InvalidPrefix)?; + let bytes = + <[u8; TRANSFER_ADDR_LENGTH]>::try_from(checksummed_hex::decode(remainder)?.as_ref())?; + Ok(TransferAddr(bytes)) + } + + /// Returns a random `TransferV1Addr`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + TransferAddr(rng.gen()) + } +} + +impl Serialize for TransferAddr { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + self.to_formatted_string().serialize(serializer) + } else { + self.0.serialize(serializer) + } + } +} + +impl<'de> Deserialize<'de> for TransferAddr { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let formatted_string = String::deserialize(deserializer)?; + TransferAddr::from_formatted_str(&formatted_string).map_err(SerdeError::custom) + } else { + let bytes = <[u8; TRANSFER_ADDR_LENGTH]>::deserialize(deserializer)?; + Ok(TransferAddr(bytes)) + } + } +} + +impl Display for TransferAddr { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "{}", base16::encode_lower(&self.0)) + } +} + +impl Debug for TransferAddr { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "TransferV1Addr({})", base16::encode_lower(&self.0)) + } +} + +impl CLTyped for TransferAddr { + fn cl_type() -> CLType { + CLType::ByteArray(TRANSFER_ADDR_LENGTH as u32) + } +} + +impl ToBytes for TransferAddr { + #[inline(always)] + fn to_bytes(&self) -> Result, bytesrepr::Error> { + self.0.to_bytes() + } + + #[inline(always)] + fn serialized_length(&self) -> usize { + self.0.serialized_length() + } + + #[inline(always)] + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.0.write_bytes(writer) + } +} + +impl FromBytes for TransferAddr { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (bytes, remainder) = <[u8; TRANSFER_ADDR_LENGTH]>::from_bytes(bytes)?; + Ok((TransferAddr(bytes), remainder)) + } +} + +#[cfg(test)] +mod tests { + use crate::{bytesrepr, testing::TestRng}; + + use super::*; + + #[test] + fn transfer_addr_from_str() { + let transfer_address = TransferAddr([4; 32]); + let encoded = transfer_address.to_formatted_string(); + let decoded = TransferAddr::from_formatted_str(&encoded).unwrap(); + assert_eq!(transfer_address, decoded); + + let invalid_prefix = + "transferv-0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + TransferAddr::from_formatted_str(invalid_prefix), + Err(TransferFromStrError::InvalidPrefix) + )); + + let invalid_prefix = + "transfer0000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + TransferAddr::from_formatted_str(invalid_prefix), + Err(TransferFromStrError::InvalidPrefix) + )); + + let short_addr = "transfer-00000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + TransferAddr::from_formatted_str(short_addr), + Err(TransferFromStrError::Length(_)) + )); + + let long_addr = + "transfer-000000000000000000000000000000000000000000000000000000000000000000"; + assert!(matches!( + TransferAddr::from_formatted_str(long_addr), + Err(TransferFromStrError::Length(_)) + )); + + let invalid_hex = + "transfer-000000000000000000000000000000000000000000000000000000000000000g"; + assert!(matches!( + TransferAddr::from_formatted_str(invalid_hex), + Err(TransferFromStrError::Hex(_)) + )); + } + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + let transfer_address = TransferAddr::random(rng); + bytesrepr::test_serialization_roundtrip(&transfer_address) + } + + #[test] + fn bincode_roundtrip() { + let rng = &mut TestRng::new(); + let transfer_address = TransferAddr::random(rng); + let serialized = bincode::serialize(&transfer_address).unwrap(); + let decoded = bincode::deserialize(&serialized).unwrap(); + assert_eq!(transfer_address, decoded); + } + + #[test] + fn json_roundtrip() { + let rng = &mut TestRng::new(); + let transfer_address = TransferAddr::random(rng); + let json_string = serde_json::to_string_pretty(&transfer_address).unwrap(); + let decoded = serde_json::from_str(&json_string).unwrap(); + assert_eq!(transfer_address, decoded); + } +} diff --git a/types/src/transfer/transfer_v2.rs b/types/src/transfer/transfer_v2.rs new file mode 100644 index 0000000000..193242321d --- /dev/null +++ b/types/src/transfer/transfer_v2.rs @@ -0,0 +1,120 @@ +use alloc::vec::Vec; + +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +use crate::{ + account::AccountHash, + bytesrepr::{self, FromBytes, ToBytes}, + transaction::TransactionHash, + Gas, InitiatorAddr, URef, U512, +}; + +/// Represents a version 2 transfer from one purse to another. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Debug)] +#[cfg_attr(feature = "datasize", derive(DataSize))] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[serde(deny_unknown_fields)] +pub struct TransferV2 { + /// Transaction that created the transfer. + pub transaction_hash: TransactionHash, + /// Entity from which transfer was executed. + pub from: InitiatorAddr, + /// Account to which funds are transferred. + pub to: Option, + /// Source purse. + pub source: URef, + /// Target purse. + pub target: URef, + /// Transfer amount. + pub amount: U512, + /// Gas. + pub gas: Gas, + /// User-defined ID. + pub id: Option, +} + +impl TransferV2 { + /// Creates a [`TransferV2`]. + #[allow(clippy::too_many_arguments)] + pub fn new( + transaction_hash: TransactionHash, + from: InitiatorAddr, + to: Option, + source: URef, + target: URef, + amount: U512, + gas: Gas, + id: Option, + ) -> Self { + TransferV2 { + transaction_hash, + from, + to, + source, + target, + amount, + gas, + id, + } + } +} + +impl ToBytes for TransferV2 { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buf = Vec::new(); + self.write_bytes(&mut buf)?; + Ok(buf) + } + + fn serialized_length(&self) -> usize { + self.transaction_hash.serialized_length() + + self.from.serialized_length() + + self.to.serialized_length() + + self.source.serialized_length() + + self.target.serialized_length() + + self.amount.serialized_length() + + self.gas.serialized_length() + + self.id.serialized_length() + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + self.transaction_hash.write_bytes(writer)?; + self.from.write_bytes(writer)?; + self.to.write_bytes(writer)?; + self.source.write_bytes(writer)?; + self.target.write_bytes(writer)?; + self.amount.write_bytes(writer)?; + self.gas.write_bytes(writer)?; + self.id.write_bytes(writer) + } +} + +impl FromBytes for TransferV2 { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (transaction_hash, remainder) = TransactionHash::from_bytes(bytes)?; + let (from, remainder) = InitiatorAddr::from_bytes(remainder)?; + let (to, remainder) = >::from_bytes(remainder)?; + let (source, remainder) = URef::from_bytes(remainder)?; + let (target, remainder) = URef::from_bytes(remainder)?; + let (amount, remainder) = U512::from_bytes(remainder)?; + let (gas, remainder) = Gas::from_bytes(remainder)?; + let (id, remainder) = >::from_bytes(remainder)?; + Ok(( + TransferV2 { + transaction_hash, + from, + to, + source, + target, + amount, + gas, + id, + }, + remainder, + )) + } +} diff --git a/types/src/uint.rs b/types/src/uint.rs index 53666f50bc..ca8d7ab280 100644 --- a/types/src/uint.rs +++ b/types/src/uint.rs @@ -10,7 +10,10 @@ use core::{ }; use num_integer::Integer; -use num_traits::{AsPrimitive, Bounded, Num, One, Unsigned, WrappingAdd, WrappingSub, Zero}; +use num_traits::{ + AsPrimitive, Bounded, CheckedAdd, CheckedMul, CheckedSub, Num, One, Unsigned, WrappingAdd, + WrappingSub, Zero, +}; use rand::{ distributions::{Distribution, Standard}, Rng, @@ -28,21 +31,24 @@ use crate::bytesrepr::{self, Error, FromBytes, ToBytes, U8_SERIALIZED_LENGTH}; clippy::manual_range_contains, clippy::range_plus_one, clippy::transmute_ptr_to_ptr, - clippy::clippy::reversed_empty_ranges + clippy::reversed_empty_ranges, + clippy::manual_div_ceil )] mod macro_code { + #[cfg(feature = "datasize")] + use datasize::DataSize; use uint::construct_uint; construct_uint! { - #[derive(datasize::DataSize)] + #[cfg_attr(feature = "datasize", derive(DataSize))] pub struct U512(8); } construct_uint! { - #[derive(datasize::DataSize)] + #[cfg_attr(feature = "datasize", derive(DataSize))] pub struct U256(4); } construct_uint! { - #[derive(datasize::DataSize)] + #[cfg_attr(feature = "datasize", derive(DataSize))] pub struct U128(2); } } @@ -51,6 +57,7 @@ pub use self::macro_code::{U128, U256, U512}; /// Error type for parsing [`U128`], [`U256`], [`U512`] from a string. #[derive(Debug)] +#[non_exhaustive] pub enum UIntParseError { /// Contains the parsing error from the `uint` crate, which only supports base-10 parsing. FromDecStr(uint::FromDecStrErr), @@ -62,6 +69,11 @@ pub enum UIntParseError { macro_rules! impl_traits_for_uint { ($type:ident, $total_bytes:expr, $test_mod:ident) => { + impl $type { + /// The smallest value that can be represented by this type. + pub const MIN: $type = $type([0; $total_bytes / 8]); + } + impl Serialize for $type { fn serialize(&self, serializer: S) -> Result { if serializer.is_human_readable() { @@ -149,8 +161,8 @@ macro_rules! impl_traits_for_uint { if deserializer.is_human_readable() { let decimal_string = String::deserialize(deserializer)?; - return Ok(Self::from_dec_str(&decimal_string) - .map_err(|error| de::Error::custom(format!("{:?}", error)))?); + return Self::from_dec_str(&decimal_string) + .map_err(|error| de::Error::custom(format!("{:?}", error))); } deserializer.deserialize_struct("bigint", FIELDS, BigNumVisitor) @@ -249,6 +261,24 @@ macro_rules! impl_traits_for_uint { } } + impl CheckedMul for $type { + fn checked_mul(&self, v: &$type) -> Option<$type> { + $type::checked_mul(*self, *v) + } + } + + impl CheckedSub for $type { + fn checked_sub(&self, v: &$type) -> Option<$type> { + $type::checked_sub(*self, *v) + } + } + + impl CheckedAdd for $type { + fn checked_add(&self, v: &$type) -> Option<$type> { + $type::checked_add(*self, *v) + } + } + impl Integer for $type { /// Unsigned integer division. Returns the same result as `div` (`/`). #[inline] @@ -425,7 +455,7 @@ macro_rules! impl_traits_for_uint { } } - #[cfg(feature = "std")] + #[cfg(feature = "json-schema")] impl schemars::JsonSchema for $type { fn schema_name() -> String { format!("U{}", $total_bytes * 8) @@ -662,7 +692,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = i32::max_value() - 1; + input = i32::MAX - 1; check_as_i32(input, input); check_as_i64(i64::from(input), input); check_as_u8(input as u8, input); @@ -672,25 +702,16 @@ mod tests { check_as_u256(U256::from(input), input); check_as_u512(U512::from(input), input); - input = i32::min_value() + 1; + input = i32::MIN + 1; check_as_i32(input, input); check_as_i64(i64::from(input), input); check_as_u8(input as u8, input); check_as_u32(input as u32, input); check_as_u64(input as u64, input); - // i32::min_value() is -1 - i32::max_value() - check_as_u128( - U128::zero().wrapping_sub(&U128::from(i32::max_value())), - input, - ); - check_as_u256( - U256::zero().wrapping_sub(&U256::from(i32::max_value())), - input, - ); - check_as_u512( - U512::zero().wrapping_sub(&U512::from(i32::max_value())), - input, - ); + // i32::MIN is -1 - i32::MAX + check_as_u128(U128::zero().wrapping_sub(&U128::from(i32::MAX)), input); + check_as_u256(U256::zero().wrapping_sub(&U256::from(i32::MAX)), input); + check_as_u512(U512::zero().wrapping_sub(&U512::from(i32::MAX)), input); } #[test] @@ -705,7 +726,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = i64::max_value() - 1; + input = i64::MAX - 1; check_as_i32(input as i32, input); check_as_i64(input, input); check_as_u8(input as u8, input); @@ -715,25 +736,16 @@ mod tests { check_as_u256(U256::from(input), input); check_as_u512(U512::from(input), input); - input = i64::min_value() + 1; + input = i64::MIN + 1; check_as_i32(input as i32, input); check_as_i64(input, input); check_as_u8(input as u8, input); check_as_u32(input as u32, input); check_as_u64(input as u64, input); - // i64::min_value() is (-1 - i64::max_value()) - check_as_u128( - U128::zero().wrapping_sub(&U128::from(i64::max_value())), - input, - ); - check_as_u256( - U256::zero().wrapping_sub(&U256::from(i64::max_value())), - input, - ); - check_as_u512( - U512::zero().wrapping_sub(&U512::from(i64::max_value())), - input, - ); + // i64::MIN is (-1 - i64::MAX) + check_as_u128(U128::zero().wrapping_sub(&U128::from(i64::MAX)), input); + check_as_u256(U256::zero().wrapping_sub(&U256::from(i64::MAX)), input); + check_as_u512(U512::zero().wrapping_sub(&U512::from(i64::MAX)), input); } #[test] @@ -748,7 +760,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = u8::max_value() - 1; + input = u8::MAX - 1; check_as_i32(i32::from(input), input); check_as_i64(i64::from(input), input); check_as_u8(input, input); @@ -771,7 +783,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = u32::max_value() - 1; + input = u32::MAX - 1; check_as_i32(input as i32, input); check_as_i64(i64::from(input), input); check_as_u8(input as u8, input); @@ -794,7 +806,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = u64::max_value() - 1; + input = u64::MAX - 1; check_as_i32(input as i32, input); check_as_i64(input as i64, input); check_as_u8(input as u8, input); @@ -833,7 +845,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = U128::max_value() - 1; + input = U128::MAX - 1; let mut little_endian_bytes = [0_u8; 64]; input.to_little_endian(&mut little_endian_bytes[..16]); @@ -861,7 +873,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = U256::max_value() - 1; + input = U256::MAX - 1; let mut little_endian_bytes = [0_u8; 64]; input.to_little_endian(&mut little_endian_bytes[..32]); @@ -889,7 +901,7 @@ mod tests { check_as_u256(U256::zero(), input); check_as_u512(U512::zero(), input); - input = U512::max_value() - 1; + input = U512::MAX - 1; let mut little_endian_bytes = [0_u8; 64]; input.to_little_endian(&mut little_endian_bytes); @@ -907,35 +919,35 @@ mod tests { #[test] fn wrapping_test_u512() { - let max = U512::max_value(); + let max = U512::MAX; let value = max.wrapping_add(&1.into()); assert_eq!(value, 0.into()); - let min = U512::min_value(); + let min = U512::MIN; let value = min.wrapping_sub(&1.into()); - assert_eq!(value, U512::max_value()); + assert_eq!(value, U512::MAX); } #[test] fn wrapping_test_u256() { - let max = U256::max_value(); + let max = U256::MAX; let value = max.wrapping_add(&1.into()); assert_eq!(value, 0.into()); - let min = U256::min_value(); + let min = U256::MIN; let value = min.wrapping_sub(&1.into()); - assert_eq!(value, U256::max_value()); + assert_eq!(value, U256::MAX); } #[test] fn wrapping_test_u128() { - let max = U128::max_value(); + let max = U128::MAX; let value = max.wrapping_add(&1.into()); assert_eq!(value, 0.into()); - let min = U128::min_value(); + let min = U128::MIN; let value = min.wrapping_sub(&1.into()); - assert_eq!(value, U128::max_value()); + assert_eq!(value, U128::MAX); } fn serde_roundtrip(value: T) { @@ -953,25 +965,36 @@ mod tests { #[test] fn serde_roundtrip_u512() { - serde_roundtrip(U512::min_value()); + serde_roundtrip(U512::MIN); serde_roundtrip(U512::from(1)); - serde_roundtrip(U512::from(u64::max_value())); - serde_roundtrip(U512::max_value()); + serde_roundtrip(U512::from(u64::MAX)); + serde_roundtrip(U512::MAX); } #[test] fn serde_roundtrip_u256() { - serde_roundtrip(U256::min_value()); + serde_roundtrip(U256::MIN); serde_roundtrip(U256::from(1)); - serde_roundtrip(U256::from(u64::max_value())); - serde_roundtrip(U256::max_value()); + serde_roundtrip(U256::from(u64::MAX)); + serde_roundtrip(U256::MAX); } #[test] fn serde_roundtrip_u128() { - serde_roundtrip(U128::min_value()); + serde_roundtrip(U128::MIN); serde_roundtrip(U128::from(1)); - serde_roundtrip(U128::from(u64::max_value())); - serde_roundtrip(U128::max_value()); + serde_roundtrip(U128::from(u64::MAX)); + serde_roundtrip(U128::MAX); + } + + #[test] + fn safe_conversion_from_u512_to_u64() { + let mut value = U512::from(u64::MAX); + assert_eq!(value.try_into(), Ok(u64::MAX)); + value += U512::one(); + assert!( + matches!(value.try_into(), Result::::Err(_)), + "integer overflow when casting to u64" + ); } } diff --git a/types/src/uref.rs b/types/src/uref.rs index aab6012107..8e2b039298 100644 --- a/types/src/uref.rs +++ b/types/src/uref.rs @@ -1,6 +1,3 @@ -// TODO - remove once schemars stops causing warning. -#![allow(clippy::field_reassign_with_default)] - use alloc::{format, string::String, vec::Vec}; use core::{ array::TryFromSliceError, @@ -9,20 +6,21 @@ use core::{ num::ParseIntError, }; +#[cfg(feature = "datasize")] use datasize::DataSize; -use hex_fmt::HexFmt; +#[cfg(any(feature = "testing", test))] use rand::{ distributions::{Distribution, Standard}, Rng, }; -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema}; use serde::{de::Error as SerdeError, Deserialize, Deserializer, Serialize, Serializer}; use crate::{ bytesrepr, bytesrepr::{Error, FromBytes}, - AccessRights, ApiError, Key, ACCESS_RIGHTS_SERIALIZED_LENGTH, + checksummed_hex, AccessRights, ApiError, Key, ACCESS_RIGHTS_SERIALIZED_LENGTH, }; /// The number of bytes in a [`URef`] address. @@ -31,13 +29,14 @@ pub const UREF_ADDR_LENGTH: usize = 32; /// The number of bytes in a serialized [`URef`] where the [`AccessRights`] are not `None`. pub const UREF_SERIALIZED_LENGTH: usize = UREF_ADDR_LENGTH + ACCESS_RIGHTS_SERIALIZED_LENGTH; -const FORMATTED_STRING_PREFIX: &str = "uref-"; +pub(super) const UREF_FORMATTED_STRING_PREFIX: &str = "uref-"; /// The address of a `URef` (unforgeable reference) on the network. pub type URefAddr = [u8; UREF_ADDR_LENGTH]; /// Error while parsing a URef from a formatted string. #[derive(Debug)] +#[non_exhaustive] pub enum FromStrError { /// Prefix is not "uref-". InvalidPrefix, @@ -92,7 +91,8 @@ impl Display for FromStrError { /// the [`AccessRights`] of the reference. /// /// A `URef` can be used to index entities such as [`CLValue`](crate::CLValue)s, or smart contracts. -#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default, DataSize)] +#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +#[cfg_attr(feature = "datasize", derive(DataSize))] pub struct URef(URefAddr, AccessRights); impl URef { @@ -112,32 +112,56 @@ impl URef { } /// Returns a new [`URef`] with the same address and updated access rights. + #[must_use] pub fn with_access_rights(self, access_rights: AccessRights) -> Self { URef(self.0, access_rights) } /// Removes the access rights from this [`URef`]. + #[must_use] pub fn remove_access_rights(self) -> Self { URef(self.0, AccessRights::NONE) } /// Returns `true` if the access rights are `Some` and /// [`is_readable`](AccessRights::is_readable) is `true` for them. + #[must_use] pub fn is_readable(self) -> bool { self.1.is_readable() } /// Returns a new [`URef`] with the same address and [`AccessRights::READ`] permission. + #[must_use] pub fn into_read(self) -> URef { URef(self.0, AccessRights::READ) } + /// Returns a new [`URef`] with the same address and [`AccessRights::WRITE`] permission. + #[must_use] + pub fn into_write(self) -> URef { + URef(self.0, AccessRights::WRITE) + } + + /// Returns a new [`URef`] with the same address and [`AccessRights::ADD`] permission. + #[must_use] + pub fn into_add(self) -> URef { + URef(self.0, AccessRights::ADD) + } + /// Returns a new [`URef`] with the same address and [`AccessRights::READ_ADD_WRITE`] /// permission. + #[must_use] pub fn into_read_add_write(self) -> URef { URef(self.0, AccessRights::READ_ADD_WRITE) } + /// Returns a new [`URef`] with the same address and [`AccessRights::READ_WRITE`] + /// permission. + #[must_use] + pub fn into_read_write(self) -> URef { + URef(self.0, AccessRights::READ_WRITE) + } + /// Returns `true` if the access rights are `Some` and /// [`is_writeable`](AccessRights::is_writeable) is `true` for them. pub fn is_writeable(self) -> bool { @@ -152,14 +176,14 @@ impl URef { /// Formats the address and access rights of the [`URef`] in a unique way that could be used as /// a name when storing the given `URef` in a global state. - pub fn to_formatted_string(&self) -> String { + pub fn to_formatted_string(self) -> String { // Extract bits as numerical value, with no flags marked as 0. let access_rights_bits = self.access_rights().bits(); // Access rights is represented as octal, which means that max value of u8 can // be represented as maximum of 3 octal digits. format!( "{}{}-{:03o}", - FORMATTED_STRING_PREFIX, + UREF_FORMATTED_STRING_PREFIX, base16::encode_lower(&self.addr()), access_rights_bits ) @@ -168,21 +192,26 @@ impl URef { /// Parses a string formatted as per `Self::to_formatted_string()` into a `URef`. pub fn from_formatted_str(input: &str) -> Result { let remainder = input - .strip_prefix(FORMATTED_STRING_PREFIX) + .strip_prefix(UREF_FORMATTED_STRING_PREFIX) .ok_or(FromStrError::InvalidPrefix)?; let parts = remainder.splitn(2, '-').collect::>(); if parts.len() != 2 { return Err(FromStrError::MissingSuffix); } - let addr = URefAddr::try_from(base16::decode(parts[0])?.as_ref())?; + let addr = URefAddr::try_from(checksummed_hex::decode(parts[0])?.as_ref())?; let access_rights_value = u8::from_str_radix(parts[1], 8)?; let access_rights = AccessRights::from_bits(access_rights_value) .ok_or(FromStrError::InvalidAccessRights)?; Ok(URef(addr, access_rights)) } + + /// Removes specific access rights from this URef if present. + pub fn disable_access_rights(&mut self, access_rights: AccessRights) { + self.1.remove(access_rights); + } } -#[cfg(feature = "std")] +#[cfg(feature = "json-schema")] impl JsonSchema for URef { fn schema_name() -> String { String::from("URef") @@ -191,7 +220,7 @@ impl JsonSchema for URef { fn json_schema(gen: &mut SchemaGenerator) -> Schema { let schema = gen.subschema_for::(); let mut schema_object = schema.into_object(); - schema_object.metadata().description = Some("Hex-encoded, formatted URef.".to_string()); + schema_object.metadata().description = Some(String::from("Hex-encoded, formatted URef.")); schema_object.into() } } @@ -200,7 +229,12 @@ impl Display for URef { fn fmt(&self, f: &mut Formatter) -> fmt::Result { let addr = self.addr(); let access_rights = self.access_rights(); - write!(f, "URef({}, {})", HexFmt(&addr), access_rights) + write!( + f, + "URef({}, {})", + base16::encode_lower(&addr), + access_rights + ) } } @@ -221,6 +255,12 @@ impl bytesrepr::ToBytes for URef { fn serialized_length(&self) -> usize { UREF_SERIALIZED_LENGTH } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), self::Error> { + writer.extend_from_slice(&self.0); + self.1.write_bytes(writer)?; + Ok(()) + } } impl FromBytes for URef { @@ -265,6 +305,7 @@ impl TryFrom for URef { } } +#[cfg(any(feature = "testing", test))] impl Distribution for Standard { fn sample(&self, rng: &mut R) -> URef { URef::new(rng.gen(), rng.gen()) @@ -357,4 +398,29 @@ mod tests { let decoded = serde_json::from_str(&json_string).unwrap(); assert_eq!(uref, decoded); } + + #[test] + fn should_disable_access_rights() { + let mut uref = URef::new([255; 32], AccessRights::READ_ADD_WRITE); + assert!(uref.is_writeable()); + uref.disable_access_rights(AccessRights::WRITE); + assert_eq!(uref.access_rights(), AccessRights::READ_ADD); + + uref.disable_access_rights(AccessRights::WRITE); + assert!( + !uref.is_writeable(), + "Disabling access bit twice should be a noop" + ); + + assert_eq!(uref.access_rights(), AccessRights::READ_ADD); + + uref.disable_access_rights(AccessRights::READ_ADD); + assert_eq!(uref.access_rights(), AccessRights::NONE); + + uref.disable_access_rights(AccessRights::READ_ADD); + assert_eq!(uref.access_rights(), AccessRights::NONE); + + uref.disable_access_rights(AccessRights::NONE); + assert_eq!(uref.access_rights(), AccessRights::NONE); + } } diff --git a/types/src/validator_change.rs b/types/src/validator_change.rs new file mode 100644 index 0000000000..92b66f8d27 --- /dev/null +++ b/types/src/validator_change.rs @@ -0,0 +1,101 @@ +use crate::bytesrepr::{self, FromBytes, ToBytes}; +#[cfg(any(feature = "testing", test))] +use crate::testing::TestRng; +use alloc::vec::Vec; +#[cfg(feature = "datasize")] +use datasize::DataSize; +#[cfg(feature = "json-schema")] +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// A change to a validator's status between two eras. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Ord, PartialOrd)] +#[cfg_attr(feature = "json-schema", derive(JsonSchema))] +#[cfg_attr(feature = "datasize", derive(DataSize))] +pub enum ValidatorChange { + /// The validator got newly added to the validator set. + Added, + /// The validator was removed from the validator set. + Removed, + /// The validator was banned from this era. + Banned, + /// The validator was excluded from proposing new blocks in this era. + CannotPropose, + /// We saw the validator misbehave in this era. + SeenAsFaulty, +} + +impl ValidatorChange { + /// Returns a random `ValidatorChange`. + #[cfg(any(feature = "testing", test))] + pub fn random(rng: &mut TestRng) -> Self { + use rand::Rng; + + match rng.gen_range(0..5) { + ADDED_TAG => ValidatorChange::Added, + REMOVED_TAG => ValidatorChange::Removed, + BANNED_TAG => ValidatorChange::Banned, + CANNOT_PROPOSE_TAG => ValidatorChange::CannotPropose, + SEEN_AS_FAULTY_TAG => ValidatorChange::SeenAsFaulty, + _ => unreachable!(), + } + } +} + +const ADDED_TAG: u8 = 0; +const REMOVED_TAG: u8 = 1; +const BANNED_TAG: u8 = 2; +const CANNOT_PROPOSE_TAG: u8 = 3; +const SEEN_AS_FAULTY_TAG: u8 = 4; + +impl ToBytes for ValidatorChange { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + let mut buffer = bytesrepr::allocate_buffer(self)?; + self.write_bytes(&mut buffer)?; + Ok(buffer) + } + + fn write_bytes(&self, writer: &mut Vec) -> Result<(), bytesrepr::Error> { + match self { + ValidatorChange::Added => ADDED_TAG, + ValidatorChange::Removed => REMOVED_TAG, + ValidatorChange::Banned => BANNED_TAG, + ValidatorChange::CannotPropose => CANNOT_PROPOSE_TAG, + ValidatorChange::SeenAsFaulty => SEEN_AS_FAULTY_TAG, + } + .write_bytes(writer) + } + + fn serialized_length(&self) -> usize { + bytesrepr::U8_SERIALIZED_LENGTH + } +} + +impl FromBytes for ValidatorChange { + fn from_bytes(bytes: &[u8]) -> Result<(Self, &[u8]), bytesrepr::Error> { + let (tag, remainder) = u8::from_bytes(bytes)?; + let id = match tag { + ADDED_TAG => ValidatorChange::Added, + REMOVED_TAG => ValidatorChange::Removed, + BANNED_TAG => ValidatorChange::Banned, + CANNOT_PROPOSE_TAG => ValidatorChange::CannotPropose, + SEEN_AS_FAULTY_TAG => ValidatorChange::SeenAsFaulty, + _ => return Err(bytesrepr::Error::NotRepresentable), + }; + Ok((id, remainder)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::TestRng; + + #[test] + fn bytesrepr_roundtrip() { + let rng = &mut TestRng::new(); + + let val = ValidatorChange::random(rng); + bytesrepr::test_serialization_roundtrip(&val); + } +} diff --git a/types/tests/version_numbers.rs b/types/tests/version_numbers.rs index 9f1d04aae3..5787cf5077 100644 --- a/types/tests/version_numbers.rs +++ b/types/tests/version_numbers.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "version-sync")] #[test] fn test_html_root_url() { version_sync::assert_html_root_url_updated!("src/lib.rs"); diff --git a/update-rev.sh b/update-rev.sh deleted file mode 100755 index 422180d866..0000000000 --- a/update-rev.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -ue - -DEST_FILES=("client/Cargo.toml") - -for f in "${DEST_FILES[@]}"; do - if [ -f $f ]; then - echo "[INFO] Going to update $f with revision set to $DRONE_BUILD_NUMBER" - sed -i s'/^revision =.*/revision = "'"${DRONE_BUILD_NUMBER}"'"/' $f - cat $f | grep -i revision - else - echo "[ERRPR] Unable to find: $f" - exit 1 - fi -done diff --git a/upload.sh b/upload.sh deleted file mode 100755 index 408aa1dbc7..0000000000 --- a/upload.sh +++ /dev/null @@ -1,206 +0,0 @@ -#!/bin/bash - -get_help() { - echo -e "Usage: $0 --repo-name --package-name [--package-version ]\n" - echo -e "Example: $0 --repo-name debian --package-name [casper-node|casper-client]\n" - echo "Note: If --package-version is not set DRONE_TAG will be used." -} - -parse_args() { - if [ $# -eq 0 ]; then - get_help - exit 1 - fi - optspec=":h-:" - while getopts "$optspec" optchar; do - case "${optchar}" in - -) - case "${OPTARG}" in - repo-name) - val="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) - #echo "Parsing option: '--${OPTARG}', value: '${val}'" >&2 - BINTRAY_REPO_NAME=${val} - ;; - package-name) - val="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) - #echo "Parsing option: '--${OPTARG}', value: '${val}'" >&2 - BINTRAY_PACKAGE_NAME=${val} - ;; - package-version) - val="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) - #echo "Parsing option: '--${OPTARG}', value: '${val}'" >&2 - PACKAGE_VERSION=${val} - ;; - package-tag) - val="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) - #echo "Parsing option: '--${OPTARG}', value: '${val}'" >&2 - PACKAGE_TAG=${val} - ;; - help) - get_help - exit 1 - ;; - *) - echo "${optspec:0:1} ${OPTARG}" - #if [ "$OPTERR" = 1 ] && [ "${optspec:0:1}" != ":" ]; then - if [ "$OPTERR" = 1 ]; then - echo -e "Unknown option --${OPTARG}\n" >&2 - get_help - exit 1 - fi - ;; - esac;; - h) - get_help - exit 1 - ;; - *) - if [ "$OPTERR" = 1 ] || [ "${optspec:0:1}" = ":" ]; then - echo "Non-option argument: '-${OPTARG}'" >&2 - exit 1 - fi - ;; - esac - done - # obligatory paramas goes here - if [ -z ${BINTRAY_REPO_NAME+x} ]; then - echo "[ERROR] Missing repository name." - get_help - exit 1 - fi - - if [ -z ${BINTRAY_PACKAGE_NAME+x} ]; then - echo "[ERROR] Missing package name." - get_help - exit 1 - fi - - # if not set take it from node/Cargo.toml - if [ -z ${PACKAGE_VERSION+x} ]; then - NODE_CONFIG_FILE="$RUN_DIR/node/Cargo.toml" - PACKAGE_VERSION="$(grep -oP "^version\s=\s\"\K(.*)\"" $NODE_CONFIG_FILE | sed -e s'/"//g')" - fi - - echo "[INFO] PACKAGE_VERSION set to $PACKAGE_VERSION" -} - -abspath() { - # generate absolute path from relative path - # $1 : relative filename - # return : absolute path - if [ -d "$1" ]; then - # dir - (cd "$1"; pwd) - elif [ -f "$1" ]; then - # file - if [[ $1 == */* ]]; then - echo "$(cd "${1%/*}"; pwd)/${1##*/}" - else - echo "$(pwd)/$1" - fi - fi -} - -export RUN_DIR=$(dirname $(abspath $0)) -parse_args "$@" - -export CREDENTIAL_FILE="$RUN_DIR/credentials.json" -export CREDENTIAL_FILE_TMP="$RUN_DIR/vault_output.json" -export API_URL="https://api.bintray.com" -export UPLOAD_DIR="$(pwd)/target/debian" -export BINTRAY_USER='casperlabs-service' -export BINTRAY_ORG_NAME='casperlabs' -export BINTRAY_REPO_URL="$BINTRAY_ORG_NAME/$BINTRAY_REPO_NAME/$BINTRAY_PACKAGE_NAME" -export CL_VAULT_URL="${CL_VAULT_HOST}/v1/sre/cicd/bintray" - -echo "Run dir set to: $RUN_DIR" -echo "Repo URL: $BINTRAY_REPO_URL" -echo "Package version set to: $PACKAGE_VERSION" - -# get bintray private key and passphrase -echo "-H \"X-Vault-Token: $CL_VAULT_TOKEN\"" > ~/.curlrc -curl -s -q -X GET $CL_VAULT_URL/credentials --output $CREDENTIAL_FILE_TMP -if [ ! -f $CREDENTIAL_FILE_TMP ]; then - echo "[ERROR] Unable to fetch credentails for bintray from vault: $CL_VAULT_URL" - exit 1 -else - echo "Found bintray credentials file - $CREDENTIAL_FILE_TMP" - # get just the body required by bintray, strip off vault payload - /bin/cat $CREDENTIAL_FILE_TMP | jq -r .data > $CREDENTIAL_FILE -fi - -# get bintray api key -curl -s -q -X GET $CL_VAULT_URL/bintray_api_key --output $CREDENTIAL_FILE_TMP -if [ ! -f $CREDENTIAL_FILE_TMP ]; then - echo "[ERROR] Unable to fetch api_key for bintray from vault: $CL_VAULT_URL" - exit 1 -else - echo "Found bintray credentials file - $CREDENTIAL_FILE_TMP" - # get just the body required by bintray, strip off vault payload - export BINTRAY_API_KEY=$(/bin/cat $CREDENTIAL_FILE_TMP | jq -r .data.bintray_api_key) -fi - -if [ "$PACKAGE_TAG" == "true" ]; then - REV="0" -else - REV=${DRONE_BUILD_NUMBER} -fi - -if [ -d "$UPLOAD_DIR" ]; then - DEB_FILE="${BINTRAY_PACKAGE_NAME}_${PACKAGE_VERSION}-${REV}_amd64.deb" - DEB_FILE_PATH="$UPLOAD_DIR/$DEB_FILE" -else - echo "[ERROR] Not such dir: $UPLOAD_DIR" - exit 1 -fi - -# allow overwrite version for test repo -if [ "$BINTRAY_REPO_NAME" == "casper-debian-tests" ]; then - echo "[INFO] Setting override=1 for the test repo: $BINTRAY_REPO_NAME" - export BINTRAY_UPLOAD_URL="$API_URL/content/$BINTRAY_REPO_URL/${PACKAGE_VERSION}/$DEB_FILE?override=1" -else - export BINTRAY_UPLOAD_URL="$API_URL/content/$BINTRAY_REPO_URL/${PACKAGE_VERSION}/$DEB_FILE" -fi - -echo "Uploading file ${DEB_FILE_PATH} to bintray:${PACKAGE_VERSION} ..." -if [ -f "$DEB_FILE_PATH" ]; then - curl -T $DEB_FILE_PATH -u$BINTRAY_USER:$BINTRAY_API_KEY $BINTRAY_UPLOAD_URL -else - echo "[ERROR] Unable to find $DEB_FILE_PATH in $(pwd)" - exit 1 -fi - -sleep 5 && echo -e "\nPublishing CL Packages on bintray..." -curl -s -X POST -u$BINTRAY_USER:$BINTRAY_API_KEY $API_URL/content/$BINTRAY_REPO_URL/${PACKAGE_VERSION}/publish - -sleep 5 && echo -e "\nGPG Signing CL Packages on bintray..." -curl -s -X POST -u$BINTRAY_USER:$BINTRAY_API_KEY -H "Content-Type: application/json" --data "@$CREDENTIAL_FILE" $API_URL/gpg/$BINTRAY_REPO_URL/versions/${PACKAGE_VERSION} - -sleep 5 && echo -e "\nPublishing GPG Signatures on bintray..." -curl -s -X POST -u$BINTRAY_USER:$BINTRAY_API_KEY $API_URL/content/$BINTRAY_REPO_URL/${PACKAGE_VERSION}/publish - -sleep 5 && echo -e "\nCalculating repo metadata on bintray..." -curl -s -X POST -u$BINTRAY_USER:$BINTRAY_API_KEY -H "Content-Type: application/json" --data "@$CREDENTIAL_FILE" $API_URL/calc_metadata/$BINTRAY_REPO_URL/versions/${PACKAGE_VERSION} - -echo -e "\n Fetch meta data for uploaded files" -TEMP_DEB_FILE=uploaded_contents_debian_${PACKAGE_VERSION}.json -curl -s -X GET -u$BINTRAY_USER:$BINTRAY_API_KEY -H "Content-Type: application/json" $API_URL/packages/$BINTRAY_REPO_URL/files?include_unpublished=1 > $TEMP_DEB_FILE - -# checking -DEB_FILE_NAME=$(cat $TEMP_DEB_FILE | jq -r 'nth(1; .[] | select (.version == "'${PACKAGE_VERSION}'") ) | .path' ) - -DEB_ASC_FILE_NAME=$(cat $TEMP_DEB_FILE | jq -r 'nth(0; .[] | select (.version == "'${PACKAGE_VERSION}'") ) | .path' ) - -if [[ "$DEB_FILE_NAME" =~ $BINTRAY_PACKAGE_NAME.*.deb$ ]]; then - echo "Found $DEB_FILE_NAME on bintray"; -else - echo "[ERRROR] Unable to find uploaded packages on bintray - missing $DEB_FILE_NAME" - exit 1 -fi - -if [[ "$DEB_ASC_FILE_NAME" =~ $BINTRAY_PACKAGE_NAME.*.deb.asc$ ]]; then - echo "Found $DEB_ASC_FILE_NAME on bintray"; -else - echo "[ERRROR] Unable to find uploaded packages on bintray - missing $DEB_ASC_FILE_NAME" - exit 1 -fi diff --git a/utils/casper-tool/casper-tool.py b/utils/casper-tool/casper-tool.py index 664cf1aa1a..78b52d7a80 100755 --- a/utils/casper-tool/casper-tool.py +++ b/utils/casper-tool/casper-tool.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import os import subprocess @@ -226,7 +226,7 @@ def create_chainspec(template, network_name, genesis_in, contract_paths): chainspec = toml.load(open(template)) show_val("Chain name", network_name) - genesis_timestamp = (datetime.utcnow() + timedelta(seconds=genesis_in)).isoformat( + genesis_timestamp = (datetime.now(timezone.utc).replace(tzinfo=None) + timedelta(seconds=genesis_in)).isoformat( "T" ) + "Z" diff --git a/utils/dump-cpu-features.sh b/utils/dump-cpu-features.sh new file mode 100755 index 0000000000..0ca25658fa --- /dev/null +++ b/utils/dump-cpu-features.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +# Dumps a list of X86 extensions found to be in use by the given binary, in alphabetical order. + +set -e + +if [ $# -ne 1 ]; then + echo "usage: $(basename $0) binary" +fi; + +BINARY=$1 + +export PATH="$HOME/.cargo/bin:$PATH" + +elfx86exts $BINARY | grep -v 'CPU Generation' | cut -f1 -d ' ' | sort diff --git a/utils/global-state-update-gen/Cargo.toml b/utils/global-state-update-gen/Cargo.toml new file mode 100644 index 0000000000..ecb75740eb --- /dev/null +++ b/utils/global-state-update-gen/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "global-state-update-gen" +version = "0.3.0" +authors = ["Bartłomiej Kamiński "] +edition = "2021" +license-file = "../../LICENSE" +description = "A tool used to make changes to casper-node's global state" +readme = "README.md" + +[dependencies] +itertools = "0.10.3" +base16 = "0.2.1" +base64 = "0.13" +casper-engine-test-support = { path = "../../execution_engine_testing/test_support" } +casper-execution-engine = { path = "../../execution_engine" } +casper-storage = { path = "../../storage" } +casper-types = { path = "../../types" } +clap = "2.33" +lmdb-rkv = "0.14" +rand = "0.8" +serde = "1" +toml = "0.5" + +[package.metadata.deb] +revision = "0" +depends = "$auto" +assets = [ + # binary + ["../../target/release/global-state-update-gen", "/usr/bin/", "755"], +] + +[package.metadata.deb.variants.bionic] +name = "global-state-update-gen" +revision = "0+bionic" + +[package.metadata.deb.variants.focal] +name = "global-state-update-gen" +revision = "0+focal" diff --git a/utils/global-state-update-gen/README.md b/utils/global-state-update-gen/README.md new file mode 100644 index 0000000000..668a6250f4 --- /dev/null +++ b/utils/global-state-update-gen/README.md @@ -0,0 +1,126 @@ +# global-state-update-gen + +If the network experiences a catastrophic failure, it might become impossible to make changes to the global state required for fixing the situation via normal channels (i.e. executing deploys on the network), and we might instead need to resort to social consensus outside the blockchain and applying the changes manually. This tool facilitates generating files specifying such changes, which can then be applied during an emergency upgrade. + +The tool consists of 1 main subcommand and 3 legacy subcommands: +- `generic` - a generic update based on a config file, +- `change-validators` (legacy) - updating the set of validators on the network, +- `balances` (legacy) - performing some transfers between accounts, +- `migrate-into-system-contract-registry` (legacy) - this was a single-use subcommand intended to introduce some changes to the system structures in the global state that couldn't be made otherwise. + +## A detailed description of the subcommands + +All subcommands share 3 parameters: + +- `-h`, `--help` - prints help information about the subcommand, +- `-d`, `--data-dir` - path to the data directory of a node, containing its storage and global state database, +- `-s`, `--state-hash` - the root hash of the global state to be used as the base for the update - usually the state root hash from the last block before the planned upgrade. + +### `generic` + +Usage: `global-state-update-gen generic -d DATA-DIRECTORY -s STATE-ROOT-HASH CONFIG-FILE` + +The config file should be a TOML file, which can contain the following values: + +```toml +# can be true or false, optional, false if not present; more detailed description below +# *must* be listed before all [[accounts]] and [[transfers]] entries +only_listed_validators = false + +# can be true or false, optional, false if not present; more detailed description below +# *must* be listed before all [[accounts]] and [[transfers]] entries +slash_instead_of_unbonding = false + +# multiple [[accounts]] definitions are possible +[[accounts]] +public_key = "..." # the public key of the account owner +balance = "..." # account balance, in motes (optional) + +# if the account is supposed to be validator, define the section below +[accounts.validator] +bonded_amount = "..." # the staked amount for this account, in motes +delegation_rate = ... # the delegation rate for this validator (optional) + +# define delegators as entries in accounts.validator.delegators +# multiple definitions per validator are possible +[[accounts.validator.delegators]] +public_key = "..." # the delegator's public key +delegated_amount = "..." # the amount delegated to the validator, in motes + +# multiple [[transfers]] definitions are possible +[[transfers]] +from = "account-hash-..." # the account hash to transfer funds from +to = "account-hash-..." # the account hash to transfer funds to +amount = "..." # the amount to be transferred, in motes +``` + +The `[[accounts]]` definitions control the balances and stakes of accounts on the network. It is possible to change the set of validators using these definitions, by changing the staked amounts. + +For every such definition, if the `balance` key is present, the balance of the account will be updated. The account will be created if it didn't exist previously. If the `balance` key is not present, the pre-existing balance (if any) will be left untouched. + +Updating the validator properties (stake, delegators) behaves differently based on the value of `only_listed_validators`. If it is false, the existing list of validators is treated as a base, and validator properties are modified based on the entries in the config. If the `validator` key is present, the stake and delegators are set to the configured values. If it is not present, the pre-existing properties are left untouched. + +If `only_listed_validators` is true, pre-existing validators are discarded, and only the accounts with non-zero stakes configured in the config file will be validators after the update. This option exists to match the behavior of the legacy `change-validators` subcommand and to cater to some use cases in testing. + +If `slash_instead_of_unbonding` is true, pre-existing validators which are being discarded and their delegators have their staked amounts slashed rather than unbonded. + +So, for example, if the network has 100 validators and we want to only change the stake of a single one: +- with `only_listed_validators` set to false, we need only a single `[[accounts]]` entry for the validator we want to change, +- with `only_listed_validators` set to true, we need 100 `[[accounts]]` entries, one per each account that is supposed to be a validator after the upgrade. + +On the other hand, replacing 100 validators with 5 different ones (a use case in testing setups) would require: +- just 5 entries for the new validators if `only_listed_validators` is true, +- 105 entries - 100 to remove the old validators, and 5 to add the new ones - if `only_listed_validators` is false. + +The `[[transfers]]` definitions simply transfer funds from one account to another. Every definition requires a source account, a target account and an amount to be defined. If the source account doesn't contain enough funds for the transfer, it won't be executed. If the target account doesn't exist, it will be created. + +**Note:** transfers are executed before the `[[accounts]]` definitions. This means that it is possible to overwrite the effects of a transfer if the source or target account is also specified among `[[accounts]]`. + +After the transfers have been executed and account balances and stakes have been updated, the tool also updates the auction contract state in the form of the snapshot of validators sets for the next few eras, as well as bids and withdraws: +- bids of accounts with zero stake are set to empty, and bids of accounts with nonzero stake are created or updated to reflect the configured amounts, +- if `only_listed_validators` is true, any bid larger than the smallest stake among the new set of validators is reset to zero, +- the withdraws of validators that are being removed from the set are cancelled. + +The tool also takes care to update the total supply in the network to reflect the changes in balances resulting from the configured modifications to the state. + +### Legacy commands + +#### `change-validators` + +Usage: `global-state-update-gen change-validators -d DATA-DIRECTORY -s STATE-ROOT-HASH -v VALIDATOR-KEY,STAKE,BALANCE -v VALIDATOR-KEY,STAKE,BALANCE ...` + +Apart from the common `-d` and `-s` parameters, the subcommand has one additional parameter, `-v` or `--validator`. Multiple such parameters can be supplied. Also note that the third field, `BALANCE`, is optional (ie., the definition can be just `-v VALIDATOR-KEY,STAKE`). + +Every `-v` instance configures a single validator to be included in the set after the upgrade. A `-v KEY,STAKE,BALANCE` corresponds to an `[[accounts]]` entry in the config file: + +```toml +[[accounts]] +public_key = "KEY" +balance = "BALANCE" + +[accounts.validator] +bonded_amount = "STAKE" +``` + +The command as a whole works just like a config file with only `[[accounts]]` entries, `only_listed_validators` set to `true` and `slash_instead_of_unbonding` set to `false` or omitted. + +#### `balances` + +Usage: `global-state-update-gen balances -d DATA_DIRECTORY -s STATE_ROOT_HASH -f FROM-ACCOUNT -t TO-ACCOUNT -a AMOUNT` + +This functions exactly like a config file with just a single transfer configured: + +```toml +[[transfers]] +from = "FROM-ACCOUNT" +to = "TO-ACCOUNT" +amount = "AMOUNT" +``` + +#### `migrate-into-system-contract-registry` + +Usage: `global-state-update-gen migrate-into-system-contract-registry -d DATA_DIRECTORY -s STATE_ROOT_HASH` + +This subcommand doesn't take any additional parameters. It adds a registry of system contracts to the global state, based either on the data contained within the state (if `-s` is present), or based on the protocol data in storage (if `-s` is not present). + +It has been used to add the registry to the global state during the upgrade to 1.4.0 and will most likely never be needed again. diff --git a/utils/global-state-update-gen/src/admins.rs b/utils/global-state-update-gen/src/admins.rs new file mode 100644 index 0000000000..6ac17bdd36 --- /dev/null +++ b/utils/global-state-update-gen/src/admins.rs @@ -0,0 +1,98 @@ +use casper_engine_test_support::LmdbWasmTestBuilder; +use casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION; +use casper_types::{ + account::Account, bytesrepr::ToBytes, contracts::NamedKeys, system::mint, AccessRights, + AsymmetricType, CLTyped, CLValue, EntityAddr, Key, PublicKey, StoredValue, URef, U512, +}; +use clap::ArgMatches; +use rand::Rng; + +use crate::utils::{hash_from_str, print_entry}; + +const DEFAULT_MAIN_PURSE_ACCESS_RIGHTS: AccessRights = AccessRights::READ_ADD_WRITE; + +fn create_purse() -> URef { + URef::new(rand::thread_rng().gen(), DEFAULT_MAIN_PURSE_ACCESS_RIGHTS) +} + +fn make_stored_clvalue(value: T) -> StoredValue { + let cl = CLValue::from_t(value).unwrap(); + StoredValue::CLValue(cl) +} + +pub(crate) fn generate_admins(matches: &ArgMatches<'_>) { + let data_dir = matches.value_of("data_dir").unwrap_or("."); + let state_hash = matches.value_of("hash").unwrap(); + + // Open the global state that should be in the supplied directory. + let post_state_hash = hash_from_str(state_hash); + let test_builder = LmdbWasmTestBuilder::open_raw( + data_dir, + Default::default(), + DEFAULT_PROTOCOL_VERSION, + post_state_hash, + ); + + let admin_values = matches.values_of("admin").expect("at least one argument"); + let protocol_version = DEFAULT_PROTOCOL_VERSION; + let mut total_supply = test_builder.total_supply(protocol_version, Some(post_state_hash)); + let total_supply_before = total_supply; + + for value in admin_values { + let mut fields = value.split(',').peekable(); + let field1 = fields.next().unwrap(); + let field2 = fields.next().unwrap(); + if fields.peek().is_some() { + panic!("correct syntax for --admin parameter is [PUBLIC_KEY,BALANCE]") + } + let pub_key = PublicKey::from_hex(field1.as_bytes()).expect("valid public key"); + let balance = U512::from_dec_str(field2).expect("valid balance amount"); + + let main_purse = create_purse(); + + let purse_balance_key = Key::Balance(main_purse.addr()); + let purse_balance_value = make_stored_clvalue(balance); + print_entry(&purse_balance_key, &purse_balance_value); + + let purse_uref_key = Key::URef(main_purse); + let purse_uref_value = make_stored_clvalue(()); + print_entry(&purse_uref_key, &purse_uref_value); + + let account_key = Key::Account(pub_key.to_account_hash()); + let account_value = { + let account = { + let account_hash = pub_key.to_account_hash(); + let named_keys = NamedKeys::default(); + Account::create(account_hash, named_keys, main_purse) + }; + StoredValue::Account(account) + }; + print_entry(&account_key, &account_value); + + total_supply = total_supply.checked_add(balance).expect("no overflow"); + } + + if total_supply == total_supply_before { + // Don't update total supply if it did not change + return; + } + + println!( + "# total supply increases from {} to {}", + total_supply_before, total_supply + ); + + let total_supply_key = { + let mint_contract_hash = test_builder.get_mint_contract_hash(); + + let mint_named_keys = + test_builder.get_named_keys(EntityAddr::new_system(mint_contract_hash.value())); + + mint_named_keys + .get(mint::TOTAL_SUPPLY_KEY) + .cloned() + .expect("valid key in mint named keys") + }; + let total_supply_value = make_stored_clvalue(total_supply); + print_entry(&total_supply_key, &total_supply_value); +} diff --git a/utils/global-state-update-gen/src/balances.rs b/utils/global-state-update-gen/src/balances.rs new file mode 100644 index 0000000000..e9a7a5d4a4 --- /dev/null +++ b/utils/global-state-update-gen/src/balances.rs @@ -0,0 +1,44 @@ +use clap::ArgMatches; + +use casper_engine_test_support::LmdbWasmTestBuilder; +use casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION; +use casper_types::{account::AccountHash, U512}; + +use crate::{ + generic::{ + config::{Config, Transfer}, + update_from_config, + }, + utils::{hash_from_str, protocol_version_from_matches}, +}; + +pub(crate) fn generate_balances_update(matches: &ArgMatches<'_>) { + let data_dir = matches.value_of("data_dir").unwrap_or("."); + let state_hash = hash_from_str(matches.value_of("hash").unwrap()); + + let from_account = AccountHash::from_formatted_str(matches.value_of("from").unwrap()).unwrap(); + let to_account = AccountHash::from_formatted_str(matches.value_of("to").unwrap()).unwrap(); + let amount = U512::from_str_radix(matches.value_of("amount").unwrap(), 10).unwrap(); + + let protocol_version = protocol_version_from_matches(matches); + + let config = Config { + accounts: vec![], + transfers: vec![Transfer { + from: from_account, + to: to_account, + amount, + }], + only_listed_validators: false, + slash_instead_of_unbonding: false, + protocol_version, + }; + + let builder = LmdbWasmTestBuilder::open_raw( + data_dir, + Default::default(), + DEFAULT_PROTOCOL_VERSION, + state_hash, + ); + update_from_config(builder, config); +} diff --git a/utils/global-state-update-gen/src/decode.rs b/utils/global-state-update-gen/src/decode.rs new file mode 100644 index 0000000000..49433465c7 --- /dev/null +++ b/utils/global-state-update-gen/src/decode.rs @@ -0,0 +1,51 @@ +use std::{collections::BTreeMap, fmt, fs::File, io::Read}; + +use clap::ArgMatches; + +use casper_types::{ + bytesrepr::FromBytes, system::auction::SeigniorageRecipientsSnapshotV2, CLType, + GlobalStateUpdate, GlobalStateUpdateConfig, Key, StoredValue, +}; + +struct Entries(BTreeMap); + +impl fmt::Debug for Entries { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut map = f.debug_map(); + for (k, v) in &self.0 { + let debug_v: Box = match v { + StoredValue::CLValue(clv) => match clv.cl_type() { + CLType::Map { key, value: _ } if **key == CLType::U64 => { + // this should be the seigniorage recipient snapshot + let snapshot: SeigniorageRecipientsSnapshotV2 = + clv.clone().into_t().unwrap(); + Box::new(snapshot) + } + _ => Box::new(clv), + }, + _ => Box::new(v), + }; + map.key(k).value(&debug_v); + } + map.finish() + } +} + +pub(crate) fn decode_file(matches: &ArgMatches<'_>) { + let file_name = matches.value_of("file").unwrap(); + let mut file = File::open(file_name).unwrap(); + + let mut contents = String::new(); + file.read_to_string(&mut contents).unwrap(); + + let config: GlobalStateUpdateConfig = toml::from_str(&contents).unwrap(); + let update_data: GlobalStateUpdate = config.try_into().unwrap(); + + println!("validators = {:#?}", &update_data.validators); + let entries: BTreeMap<_, _> = update_data + .entries + .iter() + .map(|(key, bytes)| (*key, StoredValue::from_bytes(bytes).unwrap().0)) + .collect(); + println!("entries = {:#?}", Entries(entries)); +} diff --git a/utils/global-state-update-gen/src/generic.rs b/utils/global-state-update-gen/src/generic.rs new file mode 100644 index 0000000000..b5cce825eb --- /dev/null +++ b/utils/global-state-update-gen/src/generic.rs @@ -0,0 +1,647 @@ +pub(crate) mod config; +mod state_reader; +mod state_tracker; +#[cfg(test)] +mod testing; +mod update; + +use std::{ + collections::{BTreeMap, BTreeSet}, + fs, +}; + +use clap::ArgMatches; +use itertools::Itertools; + +use casper_engine_test_support::LmdbWasmTestBuilder; +use casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION; + +use casper_types::{ + system::auction::{ + Bid, BidKind, BidsExt, DelegatorBid, DelegatorKind, Reservation, SeigniorageRecipientV2, + SeigniorageRecipientsSnapshotV2, Unbond, ValidatorBid, ValidatorCredit, + }, + CLValue, EraId, PublicKey, StoredValue, U512, +}; + +use crate::utils::{hash_from_str, validators_diff, ValidatorInfo, ValidatorsDiff}; + +use self::{ + config::{AccountConfig, Config, Transfer}, + state_reader::StateReader, + state_tracker::StateTracker, + update::Update, +}; + +pub(crate) fn generate_generic_update(matches: &ArgMatches<'_>) { + let data_dir = matches.value_of("data_dir").unwrap_or("."); + let state_hash = hash_from_str(matches.value_of("hash").unwrap()); + let config_path = matches.value_of("config_file").unwrap(); + + let config_bytes = fs::read(config_path).expect("couldn't read the config file"); + let config: Config = toml::from_slice(&config_bytes).expect("couldn't parse the config file"); + + let builder = LmdbWasmTestBuilder::open_raw( + data_dir, + Default::default(), + DEFAULT_PROTOCOL_VERSION, + state_hash, + ); + + update_from_config(builder, config); +} + +fn get_update(reader: T, config: Config) -> Update { + let protocol_version = config.protocol_version; + let mut state_tracker = StateTracker::new(reader, protocol_version); + + process_transfers(&mut state_tracker, &config.transfers); + + update_account_balances(&mut state_tracker, &config.accounts); + + let validators = update_auction_state( + &mut state_tracker, + &config.accounts, + config.only_listed_validators, + config.slash_instead_of_unbonding, + ); + + let entries = state_tracker.get_entries(); + + Update::new(entries, validators) +} + +pub(crate) fn update_from_config(reader: T, config: Config) { + let update = get_update(reader, config); + update.print(); +} + +fn process_transfers(state: &mut StateTracker, transfers: &[Transfer]) { + for transfer in transfers { + state.execute_transfer(transfer); + } +} + +fn update_account_balances( + state: &mut StateTracker, + accounts: &[AccountConfig], +) { + for account in accounts { + if let Some(target_balance) = account.balance { + let account_hash = account.public_key.to_account_hash(); + if let Some(account) = state.get_account(&account_hash) { + state.set_purse_balance(account.main_purse(), target_balance); + } else { + state.create_addressable_entity_for_account(account_hash, target_balance); + } + } + } +} + +/// Returns the complete set of validators immediately after the upgrade, +/// if the validator set changed. +fn update_auction_state( + state: &mut StateTracker, + accounts: &[AccountConfig], + only_listed_validators: bool, + slash_instead_of_unbonding: bool, +) -> Option> { + // Read the old SeigniorageRecipientsSnapshot + let (snapshot_key, old_snapshot) = state.read_snapshot(); + + // Create a new snapshot based on the old one and the supplied validators. + let new_snapshot = if only_listed_validators { + gen_snapshot_only_listed( + *old_snapshot.keys().next().unwrap(), + old_snapshot.len() as u64, + accounts, + ) + } else { + gen_snapshot_from_old(old_snapshot.clone(), accounts) + }; + + if new_snapshot == old_snapshot { + return None; + } + + // Save the write to the snapshot key. + state.write_entry( + snapshot_key, + StoredValue::from(CLValue::from_t(new_snapshot.clone()).unwrap()), + ); + + let validators_diff = validators_diff(&old_snapshot, &new_snapshot); + + let bids = state.get_bids(); + if slash_instead_of_unbonding { + // zero the unbonds for the removed validators independently of set_bid; set_bid will take + // care of zeroing the delegators if necessary + for bid_kind in bids { + if validators_diff + .removed + .contains(&bid_kind.validator_public_key()) + { + if let Some(bonding_purse) = bid_kind.bonding_purse() { + state.remove_withdraws_and_unbonds_with_bonding_purse(&bonding_purse); + } + } + } + } + + add_and_remove_bids( + state, + &validators_diff, + &new_snapshot, + only_listed_validators, + slash_instead_of_unbonding, + ); + + // We need to output the validators for the next era, which are contained in the first entry + // in the snapshot. + Some( + new_snapshot + .values() + .next() + .expect("snapshot should have at least one entry") + .iter() + .map(|(public_key, seigniorage_recipient)| ValidatorInfo { + public_key: public_key.clone(), + weight: seigniorage_recipient + .total_stake() + .expect("total validator stake too large"), + }) + .collect(), + ) +} + +/// Generates a new `SeigniorageRecipientsSnapshotV2` based on: +/// - The starting era ID (the era ID at which the snapshot should start). +/// - Count - the number of eras to be included in the snapshot. +/// - The list of configured accounts. +fn gen_snapshot_only_listed( + starting_era_id: EraId, + count: u64, + accounts: &[AccountConfig], +) -> SeigniorageRecipientsSnapshotV2 { + let mut new_snapshot = BTreeMap::new(); + let mut era_validators = BTreeMap::new(); + for account in accounts { + // don't add validators with zero stake to the snapshot + let validator_cfg = match &account.validator { + Some(validator) if validator.bonded_amount != U512::zero() => validator, + _ => continue, + }; + let seigniorage_recipient = SeigniorageRecipientV2::new( + validator_cfg.bonded_amount, + validator_cfg.delegation_rate.unwrap_or_default(), + validator_cfg.delegators_map().unwrap_or_default(), + validator_cfg.reservations_map().unwrap_or_default(), + ); + let _ = era_validators.insert(account.public_key.clone(), seigniorage_recipient); + } + for era_id in starting_era_id.iter(count) { + let _ = new_snapshot.insert(era_id, era_validators.clone()); + } + new_snapshot +} + +/// Generates a new `SeigniorageRecipientsSnapshotV2` by modifying the stakes listed in the old +/// snapshot according to the supplied list of configured accounts. +fn gen_snapshot_from_old( + mut snapshot: SeigniorageRecipientsSnapshotV2, + accounts: &[AccountConfig], +) -> SeigniorageRecipientsSnapshotV2 { + // Read the modifications to be applied to the validators set from the config. + let validators_map: BTreeMap<_, _> = accounts + .iter() + .filter_map(|acc| { + acc.validator + .as_ref() + .map(|validator| (acc.public_key.clone(), validator.clone())) + }) + .collect(); + + // We will be modifying the entries in the old snapshot passed in as `snapshot` according to + // the config. + for recipients in snapshot.values_mut() { + // We use `retain` to drop some entries and modify some of the ones that will be retained. + recipients.retain( + |public_key, recipient| match validators_map.get(public_key) { + // If the validator's stake is configured to be zero, we drop them from the + // snapshot. + Some(validator) if validator.bonded_amount.is_zero() => false, + // Otherwise, we keep them, but modify the properties. + Some(validator) => { + *recipient = SeigniorageRecipientV2::new( + validator.bonded_amount, + validator + .delegation_rate + // If the delegation rate wasn't specified in the config, keep the one + // from the old snapshot. + .unwrap_or(*recipient.delegation_rate()), + validator + .delegators_map() + // If the delegators weren't specified in the config, keep the ones + // from the old snapshot. + .unwrap_or_else(|| recipient.delegator_stake().clone()), + validator + .reservations_map() + // If the delegators weren't specified in the config, keep the ones + // from the old snapshot. + .unwrap_or_else(|| recipient.reservation_delegation_rates().clone()), + ); + true + } + // Validators not present in the config will be kept unmodified. + None => true, + }, + ); + + // Add the validators that weren't present in the old snapshot. + for (public_key, validator) in &validators_map { + if recipients.contains_key(public_key) { + continue; + } + + if validator.bonded_amount != U512::zero() { + recipients.insert( + public_key.clone(), + SeigniorageRecipientV2::new( + validator.bonded_amount, + // Unspecified delegation rate will be treated as 0. + validator.delegation_rate.unwrap_or_default(), + // Unspecified delegators will be treated as an empty list. + validator.delegators_map().unwrap_or_default(), + // Unspecified reservation delegation rates will be treated as an empty + // list. + validator.reservations_map().unwrap_or_default(), + ), + ); + } + } + } + + // Return the modified snapshot. + snapshot +} + +/// Generates a set of writes necessary to "fix" the bids, ie.: +/// - set the bids of the new validators to their desired stakes, +/// - remove the bids of the old validators that are no longer validators, +/// - if `only_listed_validators` is true, remove all the bids that are larger than the smallest bid +/// among the new validators (necessary, because such bidders would outbid the validators decided +/// by the social consensus). +pub fn add_and_remove_bids( + state: &mut StateTracker, + validators_diff: &ValidatorsDiff, + new_snapshot: &SeigniorageRecipientsSnapshotV2, + only_listed_validators: bool, + slash_instead_of_unbonding: bool, +) { + let to_unbid = if only_listed_validators { + let large_bids = find_large_bids(state, new_snapshot); + validators_diff + .removed + .union(&large_bids) + .cloned() + .collect() + } else { + validators_diff.removed.clone() + }; + + for (pub_key, seigniorage_recipient) in new_snapshot.values().next_back().unwrap() { + create_or_update_bid( + state, + pub_key, + seigniorage_recipient, + slash_instead_of_unbonding, + ); + } + + // Refresh the bids - we modified them above. + let bids = state.get_bids(); + for public_key in to_unbid { + for bid_kind in bids + .iter() + .filter(|x| x.validator_public_key() == public_key) + { + let reset_bid = match bid_kind { + BidKind::Unified(bid) => BidKind::Unified(Box::new(Bid::empty( + public_key.clone(), + *bid.bonding_purse(), + ))), + BidKind::Validator(validator_bid) => { + let mut new_bid = + ValidatorBid::empty(public_key.clone(), *validator_bid.bonding_purse()); + new_bid.set_delegation_amount_boundaries( + validator_bid.minimum_delegation_amount(), + validator_bid.maximum_delegation_amount(), + ); + BidKind::Validator(Box::new(new_bid)) + } + BidKind::Delegator(delegator_bid) => { + BidKind::Delegator(Box::new(DelegatorBid::empty( + public_key.clone(), + delegator_bid.delegator_kind().clone(), + *delegator_bid.bonding_purse(), + ))) + } + // there should be no need to modify bridge records + // since they don't influence the bidding process + BidKind::Bridge(_) => continue, + BidKind::Credit(credit) => BidKind::Credit(Box::new(ValidatorCredit::empty( + public_key.clone(), + credit.era_id(), + ))), + BidKind::Reservation(reservation_bid) => { + BidKind::Reservation(Box::new(Reservation::new( + public_key.clone(), + reservation_bid.delegator_kind().clone(), + *reservation_bid.delegation_rate(), + ))) + } + BidKind::Unbond(unbond) => BidKind::Unbond(Box::new(Unbond::new( + unbond.validator_public_key().clone(), + unbond.unbond_kind().clone(), + unbond.eras().clone(), + ))), + }; + state.set_bid(reset_bid, slash_instead_of_unbonding); + } + } +} + +/// Returns the set of public keys that have bids larger than the smallest bid among the new +/// validators. +fn find_large_bids( + state: &mut StateTracker, + snapshot: &SeigniorageRecipientsSnapshotV2, +) -> BTreeSet { + let seigniorage_recipients = snapshot.values().next().unwrap(); + let min_bid = seigniorage_recipients + .values() + .map(|recipient| { + recipient + .total_stake() + .expect("should have valid total stake") + }) + .min() + .unwrap(); + let new_validators: BTreeSet<_> = seigniorage_recipients.keys().collect(); + + let mut ret = BTreeSet::new(); + + let validator_bids = state + .get_bids() + .iter() + .filter(|x| x.is_validator() || x.is_delegator()) + .cloned() + .collect_vec(); + + for bid_kind in validator_bids { + if let BidKind::Unified(bid) = bid_kind { + if bid.total_staked_amount().unwrap_or_default() > min_bid + && !new_validators.contains(bid.validator_public_key()) + { + ret.insert(bid.validator_public_key().clone()); + } + } else if let BidKind::Validator(validator_bid) = bid_kind { + if new_validators.contains(validator_bid.validator_public_key()) { + // The validator is still going to be a validator - we don't remove their bid. + continue; + } + if validator_bid.staked_amount() > min_bid { + ret.insert(validator_bid.validator_public_key().clone()); + continue; + } + let delegator_stake = state + .get_bids() + .iter() + .filter(|x| { + x.validator_public_key() == *validator_bid.validator_public_key() + && x.is_delegator() + }) + .map(|x| x.staked_amount().unwrap()) + .sum(); + + let total = validator_bid + .staked_amount() + .checked_add(delegator_stake) + .unwrap_or_default(); + if total > min_bid { + ret.insert(validator_bid.validator_public_key().clone()); + } + } + } + ret +} + +/// Updates the amount of an existing bid for the given public key, or creates a new one. +fn create_or_update_bid( + state: &mut StateTracker, + validator_public_key: &PublicKey, + updated_recipient: &SeigniorageRecipientV2, + slash_instead_of_unbonding: bool, +) { + let existing_bids = state.get_bids(); + + let maybe_existing_recipient = existing_bids + .iter() + .find(|x| { + (x.is_unified() || x.is_validator()) + && &x.validator_public_key() == validator_public_key + }) + .map(|existing_bid| { + let reservation_delegation_rates = + match existing_bids.reservations_by_validator_public_key(validator_public_key) { + None => BTreeMap::new(), + Some(reservations) => reservations + .iter() + .map(|reservation| { + ( + reservation.delegator_kind().clone(), + *reservation.delegation_rate(), + ) + }) + .collect(), + }; + + match existing_bid { + BidKind::Unified(bid) => { + let delegator_stake = bid + .delegators() + .iter() + .map(|(k, d)| (DelegatorKind::PublicKey(k.clone()), d.staked_amount())) + .collect(); + + ( + bid.bonding_purse(), + SeigniorageRecipientV2::new( + *bid.staked_amount(), + *bid.delegation_rate(), + delegator_stake, + reservation_delegation_rates, + ), + 0, + u64::MAX, + 0, + ) + } + BidKind::Validator(validator_bid) => { + let delegator_stake = match existing_bids + .delegators_by_validator_public_key(validator_public_key) + { + None => BTreeMap::new(), + Some(delegators) => delegators + .iter() + .map(|d| (d.delegator_kind().clone(), d.staked_amount())) + .collect(), + }; + + ( + validator_bid.bonding_purse(), + SeigniorageRecipientV2::new( + validator_bid.staked_amount(), + *validator_bid.delegation_rate(), + delegator_stake, + reservation_delegation_rates, + ), + validator_bid.minimum_delegation_amount(), + validator_bid.maximum_delegation_amount(), + validator_bid.reserved_slots(), + ) + } + _ => unreachable!(), + } + }); + + // existing bid + if let Some(( + bonding_purse, + existing_recipient, + min_delegation_amount, + max_delegation_amount, + reserved_slots, + )) = maybe_existing_recipient + { + if existing_recipient == *updated_recipient { + return; // noop + } + + let delegators = existing_bids + .delegators_by_validator_public_key(validator_public_key) + .unwrap_or_default(); + + for delegator in delegators { + let delegator_bid = match updated_recipient + .delegator_stake() + .get(delegator.delegator_kind()) + { + None => { + // todo!() this is a remove; the global state update tool does not + // yet support prune so in the meantime, setting the amount + // to 0. + DelegatorBid::empty( + delegator.validator_public_key().clone(), + delegator.delegator_kind().clone(), + *delegator.bonding_purse(), + ) + } + Some(updated_delegator_stake) => DelegatorBid::unlocked( + delegator.delegator_kind().clone(), + *updated_delegator_stake, + *delegator.bonding_purse(), + validator_public_key.clone(), + ), + }; + if delegator.staked_amount() == delegator_bid.staked_amount() { + continue; // effectively noop + } + state.set_bid( + BidKind::Delegator(Box::new(delegator_bid)), + slash_instead_of_unbonding, + ); + } + + for (delegator_pub_key, delegator_stake) in updated_recipient.delegator_stake() { + if existing_recipient + .delegator_stake() + .contains_key(delegator_pub_key) + { + // we handled this scenario above + continue; + } + // this is a entirely new delegator + let delegator_bonding_purse = state.create_purse(*delegator_stake); + let delegator_bid = DelegatorBid::unlocked( + delegator_pub_key.clone(), + *delegator_stake, + delegator_bonding_purse, + validator_public_key.clone(), + ); + + state.set_bid( + BidKind::Delegator(Box::new(delegator_bid)), + slash_instead_of_unbonding, + ); + } + + if *existing_recipient.stake() == *updated_recipient.stake() { + // if the delegators changed, do the above, but if the validator's + // personal stake is unchanged their bid doesn't need to be modified. + return; + } + + let updated_bid = ValidatorBid::unlocked( + validator_public_key.clone(), + *bonding_purse, + *updated_recipient.stake(), + *updated_recipient.delegation_rate(), + min_delegation_amount, + max_delegation_amount, + reserved_slots, + ); + + state.set_bid( + BidKind::Validator(Box::new(updated_bid)), + slash_instead_of_unbonding, + ); + return; + } + + // new bid + let stake = *updated_recipient.stake(); + if stake.is_zero() { + return; + } + + for (delegator_pub_key, delegator_stake) in updated_recipient.delegator_stake() { + let delegator_bonding_purse = state.create_purse(*delegator_stake); + let delegator_bid = DelegatorBid::unlocked( + delegator_pub_key.clone(), + *delegator_stake, + delegator_bonding_purse, + validator_public_key.clone(), + ); + + state.set_bid( + BidKind::Delegator(Box::new(delegator_bid)), + slash_instead_of_unbonding, + ); + } + + let bonding_purse = state.create_purse(stake); + let validator_bid = ValidatorBid::unlocked( + validator_public_key.clone(), + bonding_purse, + stake, + *updated_recipient.delegation_rate(), + 0, + u64::MAX, + 0, + ); + state.set_bid( + BidKind::Validator(Box::new(validator_bid)), + slash_instead_of_unbonding, + ); +} diff --git a/utils/global-state-update-gen/src/generic/config.rs b/utils/global-state-update-gen/src/generic/config.rs new file mode 100644 index 0000000000..eeb0854f16 --- /dev/null +++ b/utils/global-state-update-gen/src/generic/config.rs @@ -0,0 +1,87 @@ +use std::collections::BTreeMap; + +use serde::{Deserialize, Serialize}; + +use casper_types::{ + account::AccountHash, + system::auction::{DelegationRate, DelegatorKind}, + ProtocolVersion, PublicKey, U512, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct Config { + #[serde(default)] + pub transfers: Vec, + #[serde(default)] + pub accounts: Vec, + #[serde(default)] + pub only_listed_validators: bool, + #[serde(default)] + pub slash_instead_of_unbonding: bool, + #[serde(default)] + pub protocol_version: ProtocolVersion, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Transfer { + pub from: AccountHash, + pub to: AccountHash, + pub amount: U512, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AccountConfig { + pub public_key: PublicKey, + pub balance: Option, + pub validator: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ValidatorConfig { + pub bonded_amount: U512, + pub delegation_rate: Option, + pub delegators: Option>, + pub reservations: Option>, +} + +impl ValidatorConfig { + pub fn delegators_map(&self) -> Option> { + self.delegators.as_ref().map(|delegators| { + delegators + .iter() + .map(|delegator| { + ( + DelegatorKind::PublicKey(delegator.public_key.clone()), + delegator.delegated_amount, + ) + }) + .collect() + }) + } + + pub fn reservations_map(&self) -> Option> { + self.reservations.as_ref().map(|reservations| { + reservations + .iter() + .map(|reservation| { + ( + DelegatorKind::PublicKey(reservation.public_key.clone()), + reservation.delegation_rate, + ) + }) + .collect() + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DelegatorConfig { + pub public_key: PublicKey, + pub delegated_amount: U512, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReservationConfig { + pub public_key: PublicKey, + pub delegation_rate: DelegationRate, +} diff --git a/utils/global-state-update-gen/src/generic/state_reader.rs b/utils/global-state-update-gen/src/generic/state_reader.rs new file mode 100644 index 0000000000..fccc5dca81 --- /dev/null +++ b/utils/global-state-update-gen/src/generic/state_reader.rs @@ -0,0 +1,145 @@ +use casper_engine_test_support::LmdbWasmTestBuilder; +use casper_types::{ + account::AccountHash, + contracts::ContractHash, + system::{ + auction::{ + BidKind, Unbond, UnbondKind, UnbondingPurse, WithdrawPurses, + SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY, + }, + mint::TOTAL_SUPPLY_KEY, + }, + AddressableEntity, Key, StoredValue, +}; +use std::collections::BTreeMap; + +pub trait StateReader { + fn query(&mut self, key: Key) -> Option; + + fn get_total_supply_key(&mut self) -> Key; + + fn get_seigniorage_recipients_key(&mut self) -> Key; + + fn get_account(&mut self, account_hash: AccountHash) -> Option; + + fn get_bids(&mut self) -> Vec; + + #[deprecated(note = "superseded by get_unbonding_purses")] + fn get_withdraws(&mut self) -> WithdrawPurses; + + #[deprecated(note = "superseded by get_unbonds")] + fn get_unbonding_purses(&mut self) -> BTreeMap>; + + fn get_unbonds(&mut self) -> BTreeMap>; +} + +impl StateReader for &mut T +where + T: StateReader, +{ + fn query(&mut self, key: Key) -> Option { + T::query(self, key) + } + + fn get_total_supply_key(&mut self) -> Key { + T::get_total_supply_key(self) + } + + fn get_seigniorage_recipients_key(&mut self) -> Key { + T::get_seigniorage_recipients_key(self) + } + + fn get_account(&mut self, account_hash: AccountHash) -> Option { + T::get_account(self, account_hash) + } + + fn get_bids(&mut self) -> Vec { + T::get_bids(self) + } + + #[allow(deprecated)] + fn get_withdraws(&mut self) -> WithdrawPurses { + T::get_withdraws(self) + } + + #[allow(deprecated)] + fn get_unbonding_purses(&mut self) -> BTreeMap> { + T::get_unbonding_purses(self) + } + + fn get_unbonds(&mut self) -> BTreeMap> { + T::get_unbonds(self) + } +} + +impl StateReader for LmdbWasmTestBuilder { + fn query(&mut self, key: Key) -> Option { + LmdbWasmTestBuilder::query(self, None, key, &[]).ok() + } + + fn get_total_supply_key(&mut self) -> Key { + // Find the hash of the mint contract. + let mint_contract_hash = self.get_system_mint_hash(); + + if let Some(entity) = self.get_entity_with_named_keys_by_entity_hash(mint_contract_hash) { + entity + .named_keys() + .get(TOTAL_SUPPLY_KEY) + .copied() + .expect("total_supply should exist in mint named keys") + } else { + let mint_legacy_contract_hash: ContractHash = + ContractHash::new(mint_contract_hash.value()); + + self.get_contract(mint_legacy_contract_hash) + .expect("mint should exist") + .named_keys() + .get(TOTAL_SUPPLY_KEY) + .copied() + .expect("total_supply should exist in mint named keys") + } + } + + fn get_seigniorage_recipients_key(&mut self) -> Key { + // Find the hash of the auction contract. + let auction_contract_hash = self.get_system_auction_hash(); + + if let Some(entity) = self.get_entity_with_named_keys_by_entity_hash(auction_contract_hash) + { + entity + .named_keys() + .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) + .copied() + .expect("seigniorage_recipients_snapshot should exist in auction named keys") + } else { + let auction_legacy_contract_hash = ContractHash::new(auction_contract_hash.value()); + + self.get_contract(auction_legacy_contract_hash) + .expect("auction should exist") + .named_keys() + .get(SEIGNIORAGE_RECIPIENTS_SNAPSHOT_KEY) + .copied() + .expect("seigniorage_recipients_snapshot should exist in auction named keys") + } + } + + fn get_account(&mut self, account_hash: AccountHash) -> Option { + LmdbWasmTestBuilder::get_entity_by_account_hash(self, account_hash) + } + + fn get_bids(&mut self) -> Vec { + LmdbWasmTestBuilder::get_bids(self) + } + + fn get_withdraws(&mut self) -> WithdrawPurses { + LmdbWasmTestBuilder::get_withdraw_purses(self) + } + + fn get_unbonding_purses(&mut self) -> BTreeMap> { + LmdbWasmTestBuilder::get_unbonding_purses(self) + } + + fn get_unbonds(&mut self) -> BTreeMap> { + LmdbWasmTestBuilder::get_unbonds(self) + } +} diff --git a/utils/global-state-update-gen/src/generic/state_tracker.rs b/utils/global-state-update-gen/src/generic/state_tracker.rs new file mode 100644 index 0000000000..c96b826371 --- /dev/null +++ b/utils/global-state-update-gen/src/generic/state_tracker.rs @@ -0,0 +1,625 @@ +use std::{ + cmp::Ordering, + collections::{btree_map::Entry, BTreeMap, BTreeSet}, + convert::TryFrom, +}; + +use rand::Rng; + +use casper_types::{ + account::AccountHash, + addressable_entity::{ActionThresholds, AssociatedKeys, Weight}, + system::auction::{ + BidAddr, BidKind, BidsExt, DelegatorKind, SeigniorageRecipientsSnapshotV2, Unbond, + UnbondEra, UnbondKind, UnbondingPurse, WithdrawPurse, WithdrawPurses, + }, + AccessRights, AddressableEntity, AddressableEntityHash, ByteCodeHash, CLValue, EntityAddr, + EntityKind, EntityVersions, Groups, Key, Package, PackageHash, PackageStatus, ProtocolVersion, + PublicKey, StoredValue, URef, U512, +}; + +use super::{config::Transfer, state_reader::StateReader}; + +/// A struct tracking changes to be made to the global state. +pub struct StateTracker { + reader: T, + entries_to_write: BTreeMap, + total_supply: U512, + total_supply_key: Key, + accounts_cache: BTreeMap, + withdraws_cache: BTreeMap>, + unbonding_purses_cache: BTreeMap>, + unbonds_cache: BTreeMap>, + purses_cache: BTreeMap, + staking: Option>, + seigniorage_recipients: Option<(Key, SeigniorageRecipientsSnapshotV2)>, + protocol_version: ProtocolVersion, +} + +impl StateTracker { + /// Creates a new `StateTracker`. + pub fn new(mut reader: T, protocol_version: ProtocolVersion) -> Self { + // Read the URef under which total supply is stored. + let total_supply_key = reader.get_total_supply_key(); + + // Read the total supply. + let total_supply_sv = reader.query(total_supply_key).expect("should query"); + let total_supply = total_supply_sv.into_cl_value().expect("should be cl value"); + + Self { + reader, + entries_to_write: Default::default(), + total_supply_key, + total_supply: total_supply.into_t().expect("should be U512"), + accounts_cache: BTreeMap::new(), + withdraws_cache: BTreeMap::new(), + unbonding_purses_cache: BTreeMap::new(), + unbonds_cache: BTreeMap::new(), + purses_cache: BTreeMap::new(), + staking: None, + seigniorage_recipients: None, + protocol_version, + } + } + + /// Returns all the entries to be written to the global state + pub fn get_entries(&self) -> BTreeMap { + self.entries_to_write.clone() + } + + /// Stores a write of an entry in the global state. + pub fn write_entry(&mut self, key: Key, value: StoredValue) { + let _ = self.entries_to_write.insert(key, value); + } + + pub fn write_bid(&mut self, bid_kind: BidKind) { + let bid_addr = bid_kind.bid_addr(); + + let _ = self + .entries_to_write + .insert(bid_addr.into(), bid_kind.into()); + } + + /// Increases the total supply of the tokens in the network. + pub fn increase_supply(&mut self, to_add: U512) { + self.total_supply += to_add; + self.write_entry( + self.total_supply_key, + StoredValue::CLValue(CLValue::from_t(self.total_supply).unwrap()), + ); + } + + /// Decreases the total supply of the tokens in the network. + pub fn decrease_supply(&mut self, to_sub: U512) { + self.total_supply -= to_sub; + self.write_entry( + self.total_supply_key, + StoredValue::CLValue(CLValue::from_t(self.total_supply).unwrap()), + ); + } + + /// Creates a new purse containing the given amount of motes and returns its URef. + pub fn create_purse(&mut self, amount: U512) -> URef { + let mut rng = rand::thread_rng(); + let new_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE); + + // Purse URef pointing to `()` so that the owner cannot modify the purse directly. + self.write_entry(Key::URef(new_purse), StoredValue::CLValue(CLValue::unit())); + + self.set_purse_balance(new_purse, amount); + + new_purse + } + + /// Gets the balance of the purse, taking into account changes made during the update. + pub fn get_purse_balance(&mut self, purse: URef) -> U512 { + match self.purses_cache.get(&purse).cloned() { + Some(amount) => amount, + None => { + let base_key = Key::Balance(purse.addr()); + let amount = self + .reader + .query(base_key) + .map(|v| CLValue::try_from(v).expect("purse balance should be a CLValue")) + .map(|cl_value| cl_value.into_t().expect("purse balance should be a U512")) + .unwrap_or_else(U512::zero); + self.purses_cache.insert(purse, amount); + amount + } + } + } + + /// Sets the balance of the purse. + pub fn set_purse_balance(&mut self, purse: URef, balance: U512) { + let current_balance = self.get_purse_balance(purse); + + match balance.cmp(¤t_balance) { + Ordering::Greater => self.increase_supply(balance - current_balance), + Ordering::Less => self.decrease_supply(current_balance - balance), + Ordering::Equal => return, + } + + self.write_entry( + Key::Balance(purse.addr()), + StoredValue::CLValue(CLValue::from_t(balance).unwrap()), + ); + self.purses_cache.insert(purse, balance); + } + + /// Creates a new account for the given public key and seeds it with the given amount of + /// tokens. + pub fn create_addressable_entity_for_account( + &mut self, + account_hash: AccountHash, + amount: U512, + ) -> AddressableEntity { + let main_purse = self.create_purse(amount); + + let mut rng = rand::thread_rng(); + + let entity_hash = AddressableEntityHash::new(account_hash.value()); + let package_hash = PackageHash::new(rng.gen()); + let contract_wasm_hash = ByteCodeHash::new([0u8; 32]); + + let associated_keys = AssociatedKeys::new(account_hash, Weight::new(1)); + + let addressable_entity = AddressableEntity::new( + package_hash, + contract_wasm_hash, + self.protocol_version, + main_purse, + associated_keys, + ActionThresholds::default(), + EntityKind::Account(account_hash), + ); + + let mut contract_package = Package::new( + EntityVersions::default(), + BTreeSet::default(), + Groups::default(), + PackageStatus::Locked, + ); + + contract_package.insert_entity_version( + self.protocol_version.value().major, + EntityAddr::Account(account_hash.value()), + ); + self.write_entry( + package_hash.into(), + StoredValue::SmartContract(contract_package.clone()), + ); + + let entity_key = addressable_entity.entity_key(entity_hash); + + self.write_entry( + entity_key, + StoredValue::AddressableEntity(addressable_entity.clone()), + ); + + let addressable_entity_by_account_hash = + { CLValue::from_t(entity_key).expect("must convert to cl_value") }; + + self.accounts_cache + .insert(account_hash, addressable_entity.clone()); + + self.write_entry( + Key::Account(account_hash), + StoredValue::CLValue(addressable_entity_by_account_hash), + ); + + addressable_entity + } + + /// Gets the account for the given public key. + pub fn get_account(&mut self, account_hash: &AccountHash) -> Option { + match self.accounts_cache.entry(*account_hash) { + Entry::Vacant(vac) => self + .reader + .get_account(*account_hash) + .map(|account| vac.insert(account).clone()), + Entry::Occupied(occupied) => Some(occupied.into_mut().clone()), + } + } + + pub fn execute_transfer(&mut self, transfer: &Transfer) { + let from_account = if let Some(account) = self.get_account(&transfer.from) { + account + } else { + eprintln!("\"from\" account doesn't exist; transfer: {:?}", transfer); + return; + }; + + let to_account = if let Some(account) = self.get_account(&transfer.to) { + account + } else { + self.create_addressable_entity_for_account(transfer.to, U512::zero()) + }; + + let from_balance = self.get_purse_balance(from_account.main_purse()); + + if from_balance < transfer.amount { + eprintln!( + "\"from\" account balance insufficient; balance = {}, transfer = {:?}", + from_balance, transfer + ); + return; + } + + let to_balance = self.get_purse_balance(to_account.main_purse()); + + self.set_purse_balance(from_account.main_purse(), from_balance - transfer.amount); + self.set_purse_balance(to_account.main_purse(), to_balance + transfer.amount); + } + + /// Reads the `SeigniorageRecipientsSnapshot` stored in the global state. + pub fn read_snapshot(&mut self) -> (Key, SeigniorageRecipientsSnapshotV2) { + if let Some(key_and_snapshot) = &self.seigniorage_recipients { + return key_and_snapshot.clone(); + } + // Read the key under which the snapshot is stored. + let validators_key = self.reader.get_seigniorage_recipients_key(); + + // Decode the old snapshot. + let stored_value = self.reader.query(validators_key).expect("should query"); + let cl_value = stored_value.into_cl_value().expect("should be cl value"); + let snapshot: SeigniorageRecipientsSnapshotV2 = cl_value.into_t().expect("should convert"); + self.seigniorage_recipients = Some((validators_key, snapshot.clone())); + (validators_key, snapshot) + } + + /// Reads the bids from the global state. + pub fn get_bids(&mut self) -> Vec { + if let Some(ref staking) = self.staking { + staking.clone() + } else { + let staking = self.reader.get_bids(); + self.staking = Some(staking.clone()); + staking + } + } + + fn existing_bid(&mut self, bid_kind: &BidKind, existing_bids: Vec) -> Option { + match bid_kind.clone() { + BidKind::Unified(bid) => existing_bids + .unified_bid(bid.validator_public_key()) + .map(|existing_bid| BidKind::Unified(Box::new(existing_bid))), + BidKind::Validator(validator_bid) => existing_bids + .validator_bid(validator_bid.validator_public_key()) + .map(|existing_validator| BidKind::Validator(Box::new(existing_validator))), + BidKind::Delegator(delegator_bid) => { + // this one is a little tricky due to legacy issues. + match existing_bids.delegator_by_kind( + delegator_bid.validator_public_key(), + delegator_bid.delegator_kind(), + ) { + Some(existing_delegator) => { + Some(BidKind::Delegator(Box::new(existing_delegator))) + } + None => match existing_bids.unified_bid(delegator_bid.validator_public_key()) { + Some(existing_bid) => { + if let BidKind::Delegator(delegator_bid) = bid_kind { + for delegator in existing_bid.delegators().values() { + if let DelegatorKind::PublicKey(dpk) = + delegator_bid.delegator_kind() + { + if delegator.delegator_public_key() != dpk { + continue; + } + return Some(BidKind::Delegator(delegator_bid.clone())); + } + } + } + None + } + None => None, + }, + } + } + // dont modify bridge records + BidKind::Bridge(_) => None, + BidKind::Credit(credit) => existing_bids + .credit(credit.validator_public_key()) + .map(|existing_credit| BidKind::Credit(Box::new(existing_credit))), + BidKind::Reservation(reservation) => existing_bids + .reservation_by_kind( + reservation.validator_public_key(), + reservation.delegator_kind(), + ) + .map(|exisiting_reservation| BidKind::Reservation(Box::new(exisiting_reservation))), + BidKind::Unbond(unbond) => existing_bids + .unbond_by_kind(unbond.validator_public_key(), unbond.unbond_kind()) + .map(|existing_unbond| BidKind::Unbond(Box::new(existing_unbond))), + } + } + + /// Sets the bid for the given account. + pub fn set_bid(&mut self, bid_kind: BidKind, slash_instead_of_unbonding: bool) { + // skip bridge records since they shouldn't need to be overwritten + if let BidKind::Bridge(_) = bid_kind { + return; + } + + let bids = self.get_bids(); + let maybe_existing_bid = self.existing_bid(&bid_kind, bids); + + // since we skip bridge records optional values should be present + let new_stake = bid_kind.staked_amount().expect("should have staked amount"); + let bonding_purse = bid_kind.bonding_purse().expect("should have bonding purse"); + + let previous_stake = match maybe_existing_bid { + None => U512::zero(), + Some(existing_bid) => { + let previously_bonded = + self.get_purse_balance(existing_bid.bonding_purse().unwrap()); + if existing_bid + .bonding_purse() + .expect("should have bonding purse") + != bonding_purse + { + println!("foo"); + self.set_purse_balance(existing_bid.bonding_purse().unwrap(), U512::zero()); + self.set_purse_balance(bonding_purse, previously_bonded); + // the old bonding purse gets zeroed - the unbonds will get invalid, anyway + self.remove_withdraws_and_unbonds_with_bonding_purse( + &existing_bid.bonding_purse().unwrap(), + ); + } + + previously_bonded + } + }; + + // we called `get_bids` above, so `staking` will be `Some` + self.staking.as_mut().unwrap().upsert(bid_kind.clone()); + + // Replace the bid (overwrite the previous bid, if any): + self.write_bid(bid_kind.clone()); + + // Remove all the relevant unbonds if we're slashing + if slash_instead_of_unbonding { + self.remove_withdraws_and_unbonds_with_bonding_purse(&bonding_purse); + } + + let unbond_kind = match bid_kind.delegator_kind() { + None => UnbondKind::Validator(bid_kind.validator_public_key()), + Some(kind) => match kind { + DelegatorKind::PublicKey(pk) => UnbondKind::DelegatedPublicKey(pk), + DelegatorKind::Purse(addr) => UnbondKind::DelegatedPurse(addr), + }, + }; + + // This will be zero if the unbonds got removed above. + let already_unbonded = self.already_unbonding_amount(&bid_kind); + + // This is the amount that should be in the bonding purse. + let new_stake = new_stake + already_unbonded; + + if (slash_instead_of_unbonding && new_stake != previous_stake) || new_stake > previous_stake + { + self.set_purse_balance(bonding_purse, new_stake); + } else if new_stake < previous_stake { + let amount = previous_stake - new_stake; + self.create_unbond( + bonding_purse, + &bid_kind.validator_public_key(), + &unbond_kind, + amount, + ); + } + } + + #[allow(deprecated)] + fn get_withdraws(&mut self) -> WithdrawPurses { + let mut result = self.reader.get_withdraws(); + for (acc, purses) in &self.withdraws_cache { + result.insert(*acc, purses.clone()); + } + result + } + + #[allow(deprecated)] + fn get_unbonding_purses(&mut self) -> BTreeMap> { + let mut result = self.reader.get_unbonding_purses(); + for (acc, purses) in &self.unbonding_purses_cache { + result.insert(*acc, purses.clone()); + } + result + } + + fn get_unbonds(&mut self) -> BTreeMap> { + let mut result = self.reader.get_unbonds(); + for (kind, unbond) in &self.unbonds_cache { + match result.get_mut(kind) { + None => { + result.insert(kind.clone(), unbond.clone()); + } + Some(unbonds) => { + unbonds.append(&mut unbond.clone()); + } + } + } + result + } + + fn write_withdraws(&mut self, account_hash: AccountHash, withdraws: Vec) { + self.withdraws_cache.insert(account_hash, withdraws.clone()); + self.write_entry( + Key::Withdraw(account_hash), + StoredValue::Withdraw(withdraws), + ); + } + + fn write_unbonding_purses(&mut self, account_hash: AccountHash, unbonds: Vec) { + self.unbonding_purses_cache + .insert(account_hash, unbonds.clone()); + self.write_entry(Key::Unbond(account_hash), StoredValue::Unbonding(unbonds)); + } + + fn write_unbond(&mut self, unbond_kind: UnbondKind, unbond: Unbond) { + match self.unbonds_cache.get_mut(&unbond_kind) { + Some(unbonds) => unbonds.push(unbond.clone()), + None => { + let _ = self + .unbonds_cache + .insert(unbond_kind.clone(), vec![unbond.clone()]); + } + } + + let bid_addr = unbond_kind.bid_addr(unbond.validator_public_key()); + self.write_entry( + Key::BidAddr(bid_addr), + StoredValue::BidKind(BidKind::Unbond(Box::new(unbond))), + ); + } + + /// Returns the sum of already unbonding purses for the given validator account & unbonder. + fn already_unbonding_amount(&mut self, bid_kind: &BidKind) -> U512 { + let unbonds = self.get_unbonds(); + let validator_public_key = bid_kind.validator_public_key(); + if let Some(unbond) = unbonds.get(&UnbondKind::Validator(validator_public_key.clone())) { + return unbond + .iter() + .map(|unbond| { + if unbond.is_validator() { + if let Some(unbond_era) = unbond + .eras() + .iter() + .max_by(|x, y| x.era_of_creation().cmp(&y.era_of_creation())) + { + *unbond_era.amount() + } else { + U512::zero() + } + } else { + U512::zero() + } + }) + .sum(); + } + + if let BidKind::Unbond(unbond) = bid_kind { + match unbond.unbond_kind() { + UnbondKind::Validator(unbonder_public_key) + | UnbondKind::DelegatedPublicKey(unbonder_public_key) => { + let unbonding_purses = self.get_unbonding_purses(); + let account_hash = validator_public_key.to_account_hash(); + if let Some(purses) = unbonding_purses.get(&account_hash) { + if let Some(purse) = purses + .iter() + .find(|x| x.unbonder_public_key() == unbonder_public_key) + { + return *purse.amount(); + } + } + } + UnbondKind::DelegatedPurse(_) => { + // noop + } + } + } + + let withdrawals = self.get_withdraws(); + if let Some(withdraws) = withdrawals.get(&validator_public_key.to_account_hash()) { + if let Some(withdraw) = withdraws + .iter() + .find(|x| x.unbonder_public_key() == &validator_public_key) + { + return *withdraw.amount(); + } + } + + U512::zero() + } + + pub fn remove_withdraws_and_unbonds_with_bonding_purse(&mut self, affected_purse: &URef) { + let withdraws = self.get_withdraws(); + let unbonding_purses = self.get_unbonding_purses(); + let unbonds = self.get_unbonds(); + for (acc, mut purses) in withdraws { + let old_len = purses.len(); + purses.retain(|purse| purse.bonding_purse().addr() != affected_purse.addr()); + if purses.len() != old_len { + self.write_withdraws(acc, purses); + } + } + + for (acc, mut purses) in unbonding_purses { + let old_len = purses.len(); + purses.retain(|purse| purse.bonding_purse().addr() != affected_purse.addr()); + if purses.len() != old_len { + self.write_unbonding_purses(acc, purses); + } + } + + for (unbond_kind, mut unbonds) in unbonds { + for unbond in unbonds.iter_mut() { + let old_len = unbond.eras().len(); + unbond + .eras_mut() + .retain(|purse| purse.bonding_purse().addr() != affected_purse.addr()); + if unbond.eras().len() != old_len { + self.write_unbond(unbond_kind.clone(), unbond.clone()); + } + } + } + } + + pub fn create_unbond( + &mut self, + bonding_purse: URef, + validator_key: &PublicKey, + unbond_kind: &UnbondKind, + amount: U512, + ) { + let era_id = &self.read_snapshot().1.keys().next().copied().unwrap(); + let unbond_era = UnbondEra::new(bonding_purse, *era_id, amount, None); + let unbonds = match self.unbonds_cache.entry(unbond_kind.clone()) { + Entry::Occupied(ref entry) => entry.get().clone(), + Entry::Vacant(entry) => { + // Fill the cache with the information from the reader when the cache is empty: + let rec = match self.reader.get_unbonds().get(unbond_kind).cloned() { + Some(rec) => rec, + None => vec![Unbond::new( + validator_key.clone(), + unbond_kind.clone(), + vec![unbond_era.clone()], + )], + }; + + entry.insert(rec.clone()); + rec + } + }; + + if amount == U512::zero() { + return; + } + + for mut unbond in unbonds { + if !unbond.eras().contains(&unbond_era.clone()) { + unbond.eras_mut().push(unbond_era.clone()); + } + + let bid_addr = match unbond_kind { + UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => { + BidAddr::UnbondAccount { + validator: validator_key.to_account_hash(), + unbonder: pk.to_account_hash(), + } + } + UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse { + validator: validator_key.to_account_hash(), + unbonder: *addr, + }, + }; + + // This doesn't actually transfer or create any funds - the funds will be transferred + // from the bonding purse to the unbonder's main purse later by the auction + // contract. + self.write_entry( + Key::BidAddr(bid_addr), + StoredValue::BidKind(BidKind::Unbond(Box::new(unbond.clone()))), + ); + } + } +} diff --git a/utils/global-state-update-gen/src/generic/testing.rs b/utils/global-state-update-gen/src/generic/testing.rs new file mode 100644 index 0000000000..e4b51a4bdf --- /dev/null +++ b/utils/global-state-update-gen/src/generic/testing.rs @@ -0,0 +1,2580 @@ +use std::collections::BTreeMap; + +use itertools::Itertools; +use rand::Rng; + +use casper_types::{ + account::AccountHash, + addressable_entity::{ActionThresholds, AssociatedKeys, Weight}, + system::auction::{ + BidKind, BidsExt, DelegatorBid, DelegatorKind, SeigniorageRecipientV2, + SeigniorageRecipientsSnapshotV2, SeigniorageRecipientsV2, Unbond, UnbondEra, UnbondKind, + UnbondingPurse, ValidatorBid, WithdrawPurse, WithdrawPurses, + }, + testing::TestRng, + AccessRights, AddressableEntity, ByteCodeHash, CLValue, EntityKind, EraId, Key, PackageHash, + ProtocolVersion, PublicKey, StoredValue, URef, URefAddr, U512, +}; + +#[cfg(test)] +use crate::utils::ValidatorInfo; + +use super::{ + config::{AccountConfig, Config, DelegatorConfig, Transfer, ValidatorConfig}, + get_update, + state_reader::StateReader, +}; + +const TOTAL_SUPPLY_KEY: URef = URef::new([1; 32], AccessRights::READ_ADD_WRITE); +const SEIGNIORAGE_RECIPIENTS_KEY: URef = URef::new([2; 32], AccessRights::READ_ADD_WRITE); + +struct MockStateReader { + accounts: BTreeMap, + purses: BTreeMap, + total_supply: U512, + seigniorage_recipients: SeigniorageRecipientsSnapshotV2, + bids: Vec, + withdraws: WithdrawPurses, + unbonding_purses: BTreeMap>, + unbonds: BTreeMap>, + protocol_version: ProtocolVersion, + last_bonding_purse: Option, +} + +impl MockStateReader { + fn new() -> Self { + Self { + accounts: BTreeMap::new(), + purses: BTreeMap::new(), + total_supply: U512::zero(), + seigniorage_recipients: SeigniorageRecipientsSnapshotV2::new(), + bids: vec![], + withdraws: WithdrawPurses::new(), + unbonding_purses: BTreeMap::new(), + unbonds: BTreeMap::new(), + protocol_version: ProtocolVersion::V1_0_0, + last_bonding_purse: None, + } + } + + fn with_account( + mut self, + account_hash: AccountHash, + balance: U512, + rng: &mut R, + ) -> Self { + let main_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE); + let entity = AddressableEntity::new( + PackageHash::new(rng.gen()), + ByteCodeHash::new(rng.gen()), + self.protocol_version, + main_purse, + AssociatedKeys::new(account_hash, Weight::new(1)), + ActionThresholds::default(), + EntityKind::Account(account_hash), + ); + + self.purses.insert(main_purse.addr(), balance); + // If `insert` returns `Some()`, it means we used the same account hash twice, which is + // a programmer error and the function will panic. + assert!(self.accounts.insert(account_hash, entity).is_none()); + self.total_supply += balance; + self + } + + fn with_validators( + mut self, + validators: Vec<(PublicKey, U512, ValidatorConfig)>, + rng: &mut R, + ) -> Self { + let mut recipients = SeigniorageRecipientsV2::new(); + for (public_key, balance, validator_cfg) in validators { + let stake = validator_cfg.bonded_amount; + let delegation_rate = validator_cfg.delegation_rate.unwrap_or_default(); + let delegators = validator_cfg.delegators_map().unwrap_or_default(); + let reservation_delegation_rates = validator_cfg.reservations_map().unwrap_or_default(); + // add an entry to the recipients snapshot + let recipient = SeigniorageRecipientV2::new( + stake, + delegation_rate, + delegators.clone(), + reservation_delegation_rates, + ); + recipients.insert(public_key.clone(), recipient); + + // create the account if it doesn't exist + let account_hash = public_key.to_account_hash(); + if !self.accounts.contains_key(&account_hash) { + self = self.with_account(account_hash, balance, rng); + } + + let bonding_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE); + self.last_bonding_purse = Some(bonding_purse); + self.purses.insert(bonding_purse.addr(), stake); + self.total_supply += stake; + + for delegator_kind in delegators.keys() { + match delegator_kind { + DelegatorKind::PublicKey(delegator_pub_key) => { + let account_hash = delegator_pub_key.to_account_hash(); + + if !self.accounts.contains_key(&account_hash) { + self = self.with_account(account_hash, U512::zero(), rng); + } + } + DelegatorKind::Purse(_) => { + continue; + } + } + } + + // create the bid + for (delegator_kind, delegator_stake) in &delegators { + let bonding_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE); + self.last_bonding_purse = Some(bonding_purse); + self.purses.insert(bonding_purse.addr(), *delegator_stake); + self.total_supply += *delegator_stake; + + let delegator = DelegatorBid::unlocked( + delegator_kind.clone(), + *delegator_stake, + bonding_purse, + public_key.clone(), + ); + + self.bids.push(BidKind::Delegator(Box::new(delegator))); + } + + let validator_bid = ValidatorBid::unlocked( + public_key.clone(), + bonding_purse, + stake, + delegation_rate, + 0, + u64::MAX, + 0, + ); + + self.bids.push(BidKind::Validator(Box::new(validator_bid))); + } + + for era_id in 0..5 { + self.seigniorage_recipients + .insert(era_id.into(), recipients.clone()); + } + + self + } + + /// Returns the bonding purse if the unbonder exists in `self.bids`. + fn unbonder_bonding_purse( + &self, + validator_public_key: &PublicKey, + unbond_kind: &UnbondKind, + ) -> Option { + let bid = self.bids.validator_bid(validator_public_key)?; + if unbond_kind.is_validator() { + return Some(*bid.bonding_purse()); + } + + match self.bids.iter().find(|x| { + &x.validator_public_key() == validator_public_key + && x.unbond_kind() == Some(unbond_kind.clone()) + }) { + Some(x) => x.bonding_purse(), + None => None, + } + } + + /// Returns the bonding purse if the unbonder exists in `self.bids`, or creates a new account + /// with a nominal stake with the given validator and returns the new unbonder's bonding purse. + fn create_or_get_unbonder_bonding_purse( + mut self, + validator_public_key: &PublicKey, + unbond_kind: &UnbondKind, + rng: &mut R, + ) -> Self { + if let Some(purse) = self.unbonder_bonding_purse(validator_public_key, unbond_kind) { + self.last_bonding_purse = Some(purse); + return self; + } + + let bonding_purse = URef::new(rng.gen(), AccessRights::READ_ADD_WRITE); + self.purses.insert(bonding_purse.addr(), U512::zero()); + // it is not clear to me why this method would increment stake here? -Ed + // let stake = U512::from(10); + // self.purses.insert(bonding_purse.addr(), stake); + // self.total_supply += stake; + self.last_bonding_purse = Some(bonding_purse); + self + } + + /// Creates a `WithdrawPurse` for 1 mote. If the validator or delegator don't exist in + /// `self.bids`, a random bonding purse is assigned. + fn with_withdraw( + mut self, + validator_public_key: PublicKey, + unbond_kind: UnbondKind, + era_of_creation: EraId, + amount: U512, + rng: &mut R, + ) -> Self { + self = self.create_or_get_unbonder_bonding_purse(&validator_public_key, &unbond_kind, rng); + let bonding_purse = self.last_bonding_purse.expect("should have bonding purse"); + + let unbonder_public_key = unbond_kind + .maybe_public_key() + .expect("withdraw purses is legacy tech"); + + let withdraw = WithdrawPurse::new( + bonding_purse, + validator_public_key, + unbonder_public_key, + era_of_creation, + amount, + ); + + let withdraws = self + .withdraws + .entry(withdraw.validator_public_key().to_account_hash()) + .or_default(); + withdraws.push(withdraw); + self + } + + /// Creates an `Unbond` for 1 mote. If the validator or delegator don't exist in + /// `self.bids`, a random bonding purse is assigned. + fn with_unbond( + mut self, + validator_public_key: PublicKey, + unbond_kind: UnbondKind, + amount: U512, + rng: &mut R, + ) -> Self { + self = self.create_or_get_unbonder_bonding_purse(&validator_public_key, &unbond_kind, rng); + let purse_uref = self.last_bonding_purse.expect("should have bonding purse"); + let unbond_era = UnbondEra::new(purse_uref, EraId::new(10), amount, None); + + match self.unbonds.get_mut(&unbond_kind) { + None => { + let unbond = + Unbond::new(validator_public_key, unbond_kind.clone(), vec![unbond_era]); + self.unbonds.insert(unbond_kind, vec![unbond]); + } + Some(existing_unbond) => { + for unbond in existing_unbond { + if !unbond.eras().contains(&unbond_era) { + unbond.eras_mut().push(unbond_era.clone()); + } + } + } + } + self + } + + fn total_supply(&self) -> U512 { + self.total_supply + } +} + +impl StateReader for MockStateReader { + fn query(&mut self, key: Key) -> Option { + match key { + Key::URef(uref) if uref == TOTAL_SUPPLY_KEY => Some(StoredValue::from( + CLValue::from_t(self.total_supply).expect("should convert to CLValue"), + )), + Key::URef(uref) if uref == SEIGNIORAGE_RECIPIENTS_KEY => Some(StoredValue::from( + CLValue::from_t(self.seigniorage_recipients.clone()) + .expect("should convert seigniorage recipients to CLValue"), + )), + Key::Account(acc_hash) => self + .accounts + .get(&acc_hash) + .map(|account| StoredValue::from(account.clone())), + Key::Balance(purse_addr) => self.purses.get(&purse_addr).map(|balance| { + StoredValue::from(CLValue::from_t(*balance).expect("should convert to CLValue")) + }), + key => unimplemented!( + "Querying a key of type {:?} is not handled", + key.type_string() + ), + } + } + + fn get_total_supply_key(&mut self) -> Key { + Key::URef(TOTAL_SUPPLY_KEY) + } + + fn get_seigniorage_recipients_key(&mut self) -> Key { + Key::URef(SEIGNIORAGE_RECIPIENTS_KEY) + } + + fn get_account(&mut self, account_hash: AccountHash) -> Option { + self.accounts.get(&account_hash).cloned() + } + + fn get_bids(&mut self) -> Vec { + self.bids.clone() + } + + fn get_withdraws(&mut self) -> WithdrawPurses { + self.withdraws.clone() + } + + fn get_unbonding_purses(&mut self) -> BTreeMap> { + self.unbonding_purses.clone() + } + + fn get_unbonds(&mut self) -> BTreeMap> { + self.unbonds.clone() + } +} + +impl ValidatorInfo { + pub fn new(public_key: &PublicKey, weight: U512) -> Self { + ValidatorInfo { + public_key: public_key.clone(), + weight, + } + } +} + +#[test] +fn should_transfer_funds() { + let mut rng = TestRng::new(); + + let public_key1 = PublicKey::random(&mut rng); + let public_key2 = PublicKey::random(&mut rng); + let account1 = public_key1.to_account_hash(); + let account2 = public_key2.to_account_hash(); + + let mut reader = MockStateReader::new().with_validators( + vec![ + ( + public_key1, + U512::from(1_000_000_000), + ValidatorConfig { + bonded_amount: U512::from(1), + ..Default::default() + }, + ), + ( + public_key2, + U512::zero(), + ValidatorConfig { + bonded_amount: U512::zero(), + ..Default::default() + }, + ), + ], + &mut rng, + ); + + let config = Config { + transfers: vec![Transfer { + from: account1, + to: account2, + amount: U512::from(300_000_000), + }], + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators_unchanged(); + + // should write decreased balance to the first purse + let account1 = reader.get_account(account1).expect("should have account"); + update.assert_written_balance(account1.main_purse(), 700_000_000); + + // should write increased balance to the second purse + let account2 = reader.get_account(account2).expect("should have account"); + update.assert_written_balance(account2.main_purse(), 300_000_000); + + // total supply is written on every purse balance change, so we'll have a write to this key + // even though the changes cancel each other out + update.assert_total_supply(&mut reader, 1_000_000_001); + + // 3 keys should be written: + // - balance of account 1 + // - balance of account 2 + // - total supply + assert_eq!(update.len(), 3); +} + +#[test] +fn should_create_account_when_transferring_funds() { + let mut rng = TestRng::new(); + + let public_key1 = PublicKey::random(&mut rng); + let public_key2 = PublicKey::random(&mut rng); + let account1 = public_key1.to_account_hash(); + let account2 = public_key2.to_account_hash(); + + let mut reader = MockStateReader::new().with_validators( + vec![( + public_key1, + U512::from(1_000_000_000), + ValidatorConfig { + bonded_amount: U512::from(1), + ..Default::default() + }, + )], + &mut rng, + ); + + let config = Config { + transfers: vec![Transfer { + from: account1, + to: account2, + amount: U512::from(300_000_000), + }], + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators_unchanged(); + + let account1 = reader.get_account(account1).expect("should have account"); + // account2 shouldn't exist in the reader itself, only the update should be creating it + assert!(reader.get_account(account2).is_none()); + let account2 = update.get_written_addressable_entity(account2); + + // should write decreased balance to the first purse + update.assert_written_balance(account1.main_purse(), 700_000_000); + + // check that the main purse for the new account has been created with the correct amount + update.assert_written_balance(account2.main_purse(), 300_000_000); + update.assert_written_purse_is_unit(account2.main_purse()); + + // total supply is written on every purse balance change, so we'll have a write to this key + // even though the changes cancel each other out + update.assert_total_supply(&mut reader, 1_000_000_001); + + // 7 keys should be written: + // - balance of account 1 + // - account indirection for account 2 + // - the package for the addressable entity associated with account 2 + // - the addressable entity associated with account 2. + // - main purse of account 2 + // - balance of account 2 + // - total supply + assert_eq!(update.len(), 7); +} + +fn validator_config( + public_key: &PublicKey, + balance: U512, + staked: U512, +) -> (PublicKey, U512, ValidatorConfig) { + ( + public_key.clone(), + balance, + ValidatorConfig { + bonded_amount: staked, + ..Default::default() + }, + ) +} + +#[test] +fn should_change_one_validator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator1_staked = U512::from(1); + let validator2 = PublicKey::random(&mut rng); + let validator2_staked = U512::from(2); + let validator3 = PublicKey::random(&mut rng); + let validator3_staked = U512::from(3); + + let liquid = U512::from(5); + + let validators = vec![ + validator_config(&validator1, liquid, validator1_staked), + validator_config(&validator2, liquid, validator2_staked), + validator_config(&validator3, liquid, validator3_staked), + ]; + let mut reader = MockStateReader::new().with_validators(validators, &mut rng); + + let mut total_supply: U512 = + (liquid * 3) + validator1_staked + validator2_staked + validator3_staked; + + assert_eq!( + reader.total_supply(), + total_supply, + "initial total supply mismatch" + ); + + let validator3_new_balance = liquid.saturating_add(1.into()); + let validator3_new_staked = validator3_staked.saturating_add(1.into()); + total_supply = total_supply.saturating_add(2.into()); + + // we'll be increasing the stake and balance of validator 3 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator3.clone(), + balance: Some(validator3_new_balance), + validator: Some(ValidatorConfig { + bonded_amount: validator3_new_staked, + delegation_rate: None, + delegators: None, + reservations: None, + }), + }], + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ + ValidatorInfo::new(&validator1, validator1_staked), + ValidatorInfo::new(&validator2, validator2_staked), + ValidatorInfo::new(&validator3, validator3_new_staked), + ]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, total_supply.as_u64()); + + let account3_hash = validator3.to_account_hash(); + let account3 = reader + .get_account(account3_hash) + .expect("should have account"); + update.assert_written_balance(account3.main_purse(), validator3_new_balance.as_u64()); + + let bids = reader.get_bids(); + + let old_bid3 = bids.validator_bid(&validator3).expect("should have bid"); + let bid_purse = *old_bid3.bonding_purse(); + update.assert_written_balance(bid_purse, validator3_new_staked.as_u64()); + + // check bid overwrite + let expected_bid = ValidatorBid::unlocked( + validator3, + bid_purse, + validator3_new_staked, + Default::default(), + 0, + u64::MAX, + 0, + ); + update.assert_written_bid(account3_hash, BidKind::Validator(Box::new(expected_bid))); + + // 5 keys should be written: + // - seigniorage recipients + // - total supply + // - balance of bid purse of validator 3 + // - balance of main purse of validator 3 + // - bid of validator 3 + assert_eq!(update.len(), 5); +} + +#[test] +fn should_change_only_stake_of_one_validator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + let validator3 = PublicKey::random(&mut rng); + + let mut reader = MockStateReader::new().with_validators( + vec![ + ( + validator1.clone(), + U512::from(101), + ValidatorConfig { + bonded_amount: U512::from(101), + ..Default::default() + }, + ), + ( + validator2.clone(), + U512::from(102), + ValidatorConfig { + bonded_amount: U512::from(102), + ..Default::default() + }, + ), + ( + validator3.clone(), + U512::from(103), + ValidatorConfig { + bonded_amount: U512::from(103), + ..Default::default() + }, + ), + ], + &mut rng, + ); + + // we'll be updating only the stake of validator 3 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator3.clone(), + balance: None, + validator: Some(ValidatorConfig { + bonded_amount: U512::from(104), + delegation_rate: None, + delegators: None, + reservations: None, + }), + }], + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ + ValidatorInfo::new(&validator1, U512::from(101)), + ValidatorInfo::new(&validator2, U512::from(102)), + ValidatorInfo::new(&validator3, U512::from(104)), + ]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, 613); + + // check purse writes + let account3_hash = validator3.to_account_hash(); + let old_bid3 = reader + .get_bids() + .validator_bid(&validator3) + .expect("should have bid"); + let bid_purse = *old_bid3.bonding_purse(); + + update.assert_written_balance(bid_purse, 104); + + // check bid overwrite + let expected_bid = ValidatorBid::unlocked( + validator3, + bid_purse, + U512::from(104), + Default::default(), + 0, + u64::MAX, + 0, + ); + update.assert_written_bid(account3_hash, BidKind::Validator(Box::new(expected_bid))); + + // 4 keys should be written: + // - seigniorage recipients + // - total supply + // - bid purse balance for validator 3 + // - bid for validator 3 + assert_eq!(update.len(), 4); +} + +#[test] +fn should_change_only_balance_of_one_validator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + let validator3 = PublicKey::random(&mut rng); + + let mut reader = MockStateReader::new().with_validators( + vec![ + ( + validator1, + U512::from(101), + ValidatorConfig { + bonded_amount: U512::from(101), + ..Default::default() + }, + ), + ( + validator2, + U512::from(102), + ValidatorConfig { + bonded_amount: U512::from(102), + ..Default::default() + }, + ), + ( + validator3.clone(), + U512::from(103), + ValidatorConfig { + bonded_amount: U512::from(103), + ..Default::default() + }, + ), + ], + &mut rng, + ); + + // we'll be updating only the balance of validator 3 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator3.clone(), + balance: Some(U512::from(100)), + validator: None, + }], + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators_unchanged(); + + update.assert_total_supply(&mut reader, 609); + + // check purse writes + let account3_hash = validator3.to_account_hash(); + let account3 = reader + .get_account(account3_hash) + .expect("should have account"); + + update.assert_written_balance(account3.main_purse(), 100); + + // 2 keys should be written: + // - total supply + // - balance for main purse of validator 3 + assert_eq!(update.len(), 2); +} + +#[test] +fn should_replace_one_validator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(101), + ValidatorConfig { + bonded_amount: U512::from(101), + ..Default::default() + }, + )], + &mut rng, + ); + + // we'll be updating the validators set to only contain validator2 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator2.clone(), + balance: Some(U512::from(102)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(102), + delegation_rate: None, + delegators: None, + reservations: None, + }), + }], + only_listed_validators: true, + slash_instead_of_unbonding: true, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&validator2, U512::from(102))]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, 305); + + // check purse write for validator1 + let old_bid1 = reader + .get_bids() + .validator_bid(&validator1) + .expect("should have bid"); + let bid_purse = *old_bid1.bonding_purse(); + + update.assert_written_balance(bid_purse, 0); + + // check bid overwrite + let account1_hash = validator1.to_account_hash(); + let mut expected_bid_1 = ValidatorBid::unlocked( + validator1, + bid_purse, + U512::zero(), + Default::default(), + 0, + u64::MAX, + 0, + ); + expected_bid_1.deactivate(); + update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1))); + + // check writes for validator2 + let account2_hash = validator2.to_account_hash(); + + // the new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), 102); + + let bid_write = update.get_written_bid(account2_hash); + assert_eq!(bid_write.validator_public_key(), validator2); + let total_stake = update + .get_total_stake(account2_hash) + .expect("should have total stake"); + assert_eq!(total_stake, U512::from(102)); + assert!(!bid_write.inactive()); + + // check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap()); + update.assert_written_balance(bid_write.bonding_purse().unwrap(), 102); + + // 12 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for validator 1 + // - bonding purse balance for validator 1 + // - account indirection for validator 2 + // - the package for the addressable entity associated with validator 2 + // - the addressable entity associated with validator 2. + // - main purse for account for validator 2 + // - main purse balance for account for validator 2 + // - bid for validator 2 + // - bonding purse for validator 2 + // - bonding purse balance for validator 2 + assert_eq!(update.len(), 12); +} + +#[test] +fn should_replace_one_validator_with_unbonding() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(101), + ValidatorConfig { + bonded_amount: U512::from(101), + ..Default::default() + }, + )], + &mut rng, + ); + + // we'll be updating the validators set to only contain validator2 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator2.clone(), + balance: Some(U512::from(102)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(102), + ..Default::default() + }), + }], + only_listed_validators: true, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&validator2, U512::from(102))]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, 406); + + // check purse write for validator1 + let old_bid1 = reader + .get_bids() + .validator_bid(&validator1) + .expect("should have bid"); + let bid_purse = *old_bid1.bonding_purse(); + + // bid purse balance should be unchanged + update.assert_key_absent(&Key::Balance(bid_purse.addr())); + + // should write an unbonding purse + update.assert_unbond_bid_kind( + bid_purse, + &validator1, + &UnbondKind::Validator(validator1.clone()), + 101, + ); + + // check bid overwrite + let account1_hash = validator1.to_account_hash(); + let mut expected_bid_1 = ValidatorBid::unlocked( + validator1, + bid_purse, + U512::zero(), + Default::default(), + 0, + u64::MAX, + 0, + ); + expected_bid_1.deactivate(); + update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1))); + + // check writes for validator2 + let account2_hash = validator2.to_account_hash(); + + // the new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), 102); + + let bid_write = update.get_written_bid(account2_hash); + assert_eq!(bid_write.validator_public_key(), validator2); + let total_stake = update + .get_total_stake(account2_hash) + .expect("should have total stake"); + assert_eq!(total_stake, U512::from(102)); + assert!(!bid_write.inactive()); + + // check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap()); + update.assert_written_balance(bid_write.bonding_purse().unwrap(), 102); + + // 12 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for validator 1 + // - unbonding purse for validator 1 + // - account indirection for validator 2 + // - the package for the addressable entity associated with validator 2 + // - the addressable entity associated with validator 2. + // - main purse for account for validator 2 + // - main purse balance for account for validator 2 + // - bid for validator 2 + // - bonding purse for validator 2 + // - bonding purse balance for validator 2 + assert_eq!(update.len(), 12); +} + +#[test] +fn should_add_one_validator() { + let mut rng = TestRng::new(); + + let mut validators = BTreeMap::new(); + for index in 1..4 { + let balance = index * 10; + validators.insert( + PublicKey::random(&mut rng), + (U512::from(balance), U512::from(index)), + ); + } + + let initial_validators = validators + .iter() + .map(|(k, (b, s))| { + ( + k.clone(), + *b, + ValidatorConfig { + bonded_amount: *s, + ..Default::default() + }, + ) + }) + .collect(); + + let initial_supply: u64 = validators.iter().map(|(_, (b, s))| (b + s).as_u64()).sum(); + + let mut reader = MockStateReader::new().with_validators(initial_validators, &mut rng); + + assert_eq!( + reader.total_supply().as_u64(), + initial_supply, + "initial supply should equal" + ); + + let validator4 = PublicKey::random(&mut rng); + let v4_balance = U512::from(40); + let v4_stake = U512::from(4); + validators.insert(validator4.clone(), (v4_balance, v4_stake)); + let config = Config { + accounts: vec![AccountConfig { + public_key: validator4.clone(), + balance: Some(v4_balance), + validator: Some(ValidatorConfig { + bonded_amount: v4_stake, + delegation_rate: None, + delegators: None, + reservations: None, + }), + }], + only_listed_validators: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + let expected_supply: u64 = validators.iter().map(|(_, (b, s))| (b + s).as_u64()).sum(); + assert_eq!( + initial_supply + (v4_stake + v4_balance).as_u64(), + expected_supply, + "should match" + ); + + update.assert_total_supply(&mut reader, expected_supply); + + let expected_staking = validators + .iter() + .map(|(k, (_, s))| ValidatorInfo::new(k, *s)) + .collect_vec(); + + // check that the update contains the correct list of validators + update.assert_validators(&expected_staking); + update.assert_seigniorage_recipients_written(&mut reader); + + // check writes for validator4 + let account4_hash = validator4.to_account_hash(); + // the new account should be created + let account4 = update.get_written_addressable_entity(account4_hash); + let total_stake = update + .get_total_stake(account4_hash) + .expect("should have total stake"); + assert_eq!(total_stake, v4_stake); + // check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account4.main_purse()); + update.assert_written_balance(account4.main_purse(), v4_balance.as_u64()); + // check that the bid purse for the new validator has been created with the correct amount + let bid4 = update.get_written_bid(account4_hash); + assert_eq!(bid4.validator_public_key(), validator4); + update.assert_written_balance(bid4.bonding_purse().unwrap(), v4_stake.as_u64()); + update.assert_written_purse_is_unit(bid4.bonding_purse().unwrap()); + + assert!(!bid4.inactive()); + + // 8 keys should be written: + // - seigniorage recipients snapshot + // - total supply + // - account indirection for validator 4 + // - package for the addressable entity associated with validator 4 + // - the addressable entity record associated with validator 4 + // - main purse for account for validator 4 + // - main purse balance for account for validator 4 + // - bid for validator 4 + // - bonding purse for validator 4 + // - bonding purse balance for validator 4 + assert_eq!(update.len(), 10); +} + +#[test] +fn should_add_one_validator_with_delegators() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + let delegator1 = PublicKey::random(&mut rng); + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(101), + ValidatorConfig { + bonded_amount: U512::from(101), + ..Default::default() + }, + )], + &mut rng, + ); + + // we'll be adding validator 2 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator2.clone(), + balance: Some(U512::from(100)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(102), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: U512::from(13), + }]), + reservations: None, + }), + }], + only_listed_validators: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ + ValidatorInfo::new(&validator1, U512::from(101)), + ValidatorInfo::new(&validator2, U512::from(115)), + ]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, 417); + + // check writes for validator2 + let account2_hash = validator2.to_account_hash(); + + // the new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), 100); + + let bid2 = update.get_written_bid(account2_hash); + assert_eq!(bid2.validator_public_key(), validator2); + assert_eq!(bid2.staked_amount().unwrap(), U512::from(102)); + let total_staked = update + .get_total_stake(account2_hash) + .expect("should have total stake"); + assert_eq!(total_staked, U512::from(115)); + assert!(!bid2.inactive()); + + // check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid2.bonding_purse().unwrap()); + update.assert_written_balance(bid2.bonding_purse().unwrap(), 102); + + if let BidKind::Validator(validator_bid) = bid2 { + let bid_delegator_purse = *update + .delegator(&validator_bid, &delegator1.into()) + .expect("should have delegator") + .bonding_purse(); + // check that the bid purse for the new delegator has been created with the correct amount + update.assert_written_purse_is_unit(bid_delegator_purse); + update.assert_written_balance(bid_delegator_purse, 13); + } + + // 13 keys should be written: + // - seigniorage recipients + // - total supply + // - account indirection for validator 2 + // - main purse for account for validator 2 + // - main purse balance for account for validator 2 + // - package for the addressable entity associated with validator 2 + // - the addressable entity record associated with validator 2 + // - bid for validator 2 + // - bonding purse for validator 2 + // - bonding purse balance for validator2 + // - bid for delegator + // - bonding purse for delegator + // - bonding purse balance for delegator + assert_eq!(update.len(), 13); +} + +#[test] +fn should_replace_a_delegator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let v1_stake = 1; + let v1_balance = 100; + let v1_updated_balance = 100; + let v1_updated_stake = 4; + let delegator1 = PublicKey::random(&mut rng); + let d1_stake = 2; + let delegator2 = PublicKey::random(&mut rng); + let d2_stake = 3; + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(v1_balance), + ValidatorConfig { + bonded_amount: U512::from(v1_stake), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: U512::from(d1_stake), + }]), + reservations: None, + }, + )], + &mut rng, + ); + + // we'll be replacing the delegator + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(U512::from(v1_updated_balance)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(v1_updated_stake), + delegation_rate: None, + delegators: Some(vec![DelegatorConfig { + public_key: delegator2.clone(), + delegated_amount: U512::from(d2_stake), + }]), + reservations: None, + }), + }], + only_listed_validators: false, + slash_instead_of_unbonding: true, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new( + &validator1, + U512::from(v1_updated_stake + d2_stake), + )]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply( + &mut reader, + v1_updated_balance + v1_updated_stake + d2_stake, + ); + + let account1_hash = validator1.to_account_hash(); + + let bid1 = update.get_written_bid(account1_hash); + assert_eq!(bid1.validator_public_key(), validator1); + assert_eq!(bid1.staked_amount().unwrap(), U512::from(v1_updated_stake)); + let total_stake = update + .get_total_stake(account1_hash) + .expect("should have total stake"); + assert_eq!(total_stake, U512::from(v1_updated_stake + d2_stake)); + assert!(!bid1.inactive()); + + let initial_bids = reader.get_bids(); + + let validator_bid = initial_bids + .validator_bid(&validator1) + .expect("should have old bid"); + let delegator1_bid_purse = *initial_bids + .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone())) + .expect("should have old delegator") + .bonding_purse(); + + let delegator2_bid_purse = *update + .delegator(&validator_bid, &delegator2.into()) + .expect("should have new delegator") + .bonding_purse(); + + // check that the old delegator's bid purse got zeroed + update.assert_written_balance(delegator1_bid_purse, 0); + + // check that the bid purse for the new delegator has been created with the correct amount + update.assert_written_purse_is_unit(delegator2_bid_purse); + update.assert_written_balance(delegator2_bid_purse, d2_stake); + + // 9 keys should be written: + // - seigniorage recipients + // - total supply + // - main purse for validator 1 + // - 3 bids, 3 balances + assert_eq!(update.len(), 9); +} + +#[test] +fn should_replace_a_delegator_with_unbonding() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let (v1_stake, v1_balance) = (1, 100); + let (v1_updated_stake, v1_updated_balance) = (4, 200); + let delegator1 = PublicKey::random(&mut rng); + let d1_stake = 2; + let delegator2 = PublicKey::random(&mut rng); + let d2_stake = 3; + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(v1_balance), + ValidatorConfig { + bonded_amount: U512::from(v1_stake), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: U512::from(d1_stake), + }]), + reservations: None, + }, + )], + &mut rng, + ); + + // we'll be replacing the delegator + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(U512::from(v1_updated_balance)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(v1_updated_stake), + delegation_rate: None, + delegators: Some(vec![DelegatorConfig { + public_key: delegator2.clone(), + delegated_amount: U512::from(d2_stake), + }]), + reservations: None, + }), + }], + only_listed_validators: false, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new( + &validator1, + U512::from(v1_updated_stake + d2_stake), + )]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply( + &mut reader, + v1_updated_balance + v1_updated_stake + d1_stake + d2_stake, + ); + + let account1_hash = validator1.to_account_hash(); + + let bid1 = update.get_written_bid(account1_hash); + assert_eq!(bid1.validator_public_key(), validator1); + assert_eq!(bid1.staked_amount().unwrap(), U512::from(v1_updated_stake)); + let total_stake = update + .get_total_stake(account1_hash) + .expect("should have total stake"); + assert_eq!(total_stake, U512::from(v1_updated_stake + d2_stake)); + assert!(!bid1.inactive()); + + let initial_bids = reader.get_bids(); + + let validator_bid = initial_bids + .validator_bid(&validator1) + .expect("should have old bid"); + let delegator1_bid_purse = *initial_bids + .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone())) + .expect("should have old delegator") + .bonding_purse(); + + let delegator2_bid_purse = *update + .delegator(&validator_bid, &delegator2.into()) + .expect("should have new delegator") + .bonding_purse(); + + // check that the old delegator's bid purse hasn't been updated + update.assert_key_absent(&Key::Balance(delegator1_bid_purse.addr())); + + // check that the old delegator has been unbonded + update.assert_unbond_bid_kind( + delegator1_bid_purse, + &validator1, + &UnbondKind::DelegatedPublicKey(delegator1.clone()), + d1_stake, + ); + + // check that the bid purse for the new delegator has been created with the correct amount + update.assert_written_purse_is_unit(delegator2_bid_purse); + update.assert_written_balance(delegator2_bid_purse, d2_stake); + + // 10 keys should be written: + // - seigniorage recipients + // - total supply + // - main purse for validator 1 + // - 3 bids, 3 balances, 1 unbond + assert_eq!(update.len(), 10); +} + +#[test] +fn should_not_change_the_delegator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let v1_balance = 100; + let v1_stake = 1; + let delegator1 = PublicKey::random(&mut rng); + let d1_stake = 2; + let v1_updated_stake = 3; + let v1_updated_balance = 200; + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(v1_balance), + ValidatorConfig { + bonded_amount: U512::from(v1_stake), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1, + delegated_amount: U512::from(d1_stake), + }]), + reservations: None, + }, + )], + &mut rng, + ); + + // we'll be changing the validator's stake + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(U512::from(v1_updated_balance)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(v1_updated_stake), + delegation_rate: None, + delegators: None, + reservations: None, + }), + }], + only_listed_validators: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new( + &validator1, + U512::from(d1_stake + v1_updated_stake), + )]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply( + &mut reader, + v1_updated_balance + d1_stake + v1_updated_stake, + ); + + let account1_hash = validator1.to_account_hash(); + + let bid1 = update.get_written_bid(account1_hash); + assert_eq!(bid1.validator_public_key(), validator1); + assert_eq!(bid1.staked_amount().unwrap(), U512::from(v1_updated_stake)); + let total_stake = update + .get_total_stake(account1_hash) + .expect("should have total stake"); + assert_eq!(total_stake, U512::from(v1_updated_stake)); + assert!(!bid1.inactive()); + + // check that the validator's bid purse got updated + update.assert_written_balance(bid1.bonding_purse().unwrap(), v1_updated_stake); + + // 5 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for validator 1 + // - bid for delegator 1 + // - bonding purse balance for validator 1 + assert_eq!(update.len(), 5); +} + +#[test] +fn should_remove_the_delegator() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let delegator1 = PublicKey::random(&mut rng); + + let v_balance = U512::from(10); + let v_stake = U512::from(1); + let d_stake = U512::from(2); + let initial_supply = v_balance + v_stake + d_stake; + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + v_balance, + ValidatorConfig { + bonded_amount: v_stake, + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: d_stake, + }]), + reservations: None, + }, + )], + &mut rng, + ); + + assert_eq!( + reader.total_supply(), + initial_supply, + "should match initial supply" + ); + + /* validator and delegator bids should be present */ + let original_bids = reader.get_bids(); + let validator_bid = original_bids + .validator_bid(&validator1) + .expect("should have old bid"); + let validator_initial_stake = reader + .purses + .get(&validator_bid.bonding_purse().addr()) + .expect("should have validator initial stake"); + assert_eq!( + *validator_initial_stake, v_stake, + "validator initial balance should match" + ); + let delegator_bid = original_bids + .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone())) + .expect("should have delegator"); + let delegator_initial_stake = reader + .purses + .get(&delegator_bid.bonding_purse().addr()) + .expect("should have delegator initial stake"); + assert_eq!( + *delegator_initial_stake, d_stake, + "delegator initial balance should match" + ); + + let v_updated_balance = U512::from(20); + let v_updated_stake = U512::from(2); + let updated_supply = v_updated_balance + v_updated_stake; + + /* make various changes to the bid, including removal of delegator */ + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(v_updated_balance), + validator: Some(ValidatorConfig { + bonded_amount: v_updated_stake, + delegation_rate: None, + delegators: Some(vec![]), + reservations: None, + }), + }], + only_listed_validators: false, + slash_instead_of_unbonding: true, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + /* check high level details */ + let expected_validator_set = &[ValidatorInfo::new(&validator1, v_updated_stake)]; + update.assert_validators(expected_validator_set); + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, updated_supply.as_u64()); + + /* check validator bid details */ + let account1_hash = validator1.to_account_hash(); + let updated_validator_bid = update.get_written_bid(account1_hash); + update.assert_written_balance( + updated_validator_bid.bonding_purse().unwrap(), + v_updated_stake.as_u64(), + ); + assert_eq!(updated_validator_bid.validator_public_key(), validator1); + assert_eq!( + updated_validator_bid.staked_amount().unwrap(), + v_updated_stake + ); + let total_staked = update + .get_total_stake(account1_hash) + .expect("should have total stake"); + assert_eq!(total_staked, v_updated_stake); + assert!(!updated_validator_bid.inactive()); + // The delegator's bonding purse should be 0'd + update.assert_written_balance(*delegator_bid.bonding_purse(), 0); + + // 7 keys should be written: + // - seigniorage recipients + // - total supply + // - main purse for validator 1 + // - bonding purse balance for validator 1 + // - bonding purse balance for delegator 1 + // - unbonding for delegator 1 + // - bid for validator 1 + assert_eq!(update.len(), 7); +} + +#[test] +fn should_remove_the_delegator_with_unbonding() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let delegator1 = PublicKey::random(&mut rng); + + let mut reader = MockStateReader::new().with_validators( + vec![( + validator1.clone(), + U512::from(101), + ValidatorConfig { + bonded_amount: U512::from(101), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: U512::from(13), + }]), + reservations: None, + }, + )], + &mut rng, + ); + + // we'll be removing the delegator + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(U512::from(101)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(111), + delegation_rate: None, + delegators: Some(vec![]), + reservations: None, + }), + }], + only_listed_validators: false, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&validator1, U512::from(111))]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply(&mut reader, 225); + + let account1_hash = validator1.to_account_hash(); + + let expected = U512::from(111); + let bid1 = update.get_written_bid(account1_hash); + assert_eq!(bid1.validator_public_key(), validator1); + assert_eq!(bid1.staked_amount().unwrap(), expected); + + let total_stake = update + .get_total_stake(account1_hash) + .expect("should have total stake"); + + assert_eq!(total_stake, expected); + assert!(!bid1.inactive()); + + // check that the validator's bid purse got updated + update.assert_written_balance(bid1.bonding_purse().unwrap(), 111); + + let old_bids1 = reader.get_bids(); + let _ = old_bids1 + .validator_bid(&validator1) + .expect("should have validator1"); + + let delegator1_bid = old_bids1 + .delegator_by_kind(&validator1, &DelegatorKind::PublicKey(delegator1.clone())) + .expect("should have delegator1"); + + let delegator1_bid_purse = *delegator1_bid.bonding_purse(); + + // check that the old delegator's bid purse hasn't been updated + update.assert_key_absent(&Key::Balance(delegator1_bid_purse.addr())); + + // check that the unbonding purse got created + update.assert_unbond_bid_kind( + delegator1_bid_purse, + &validator1, + &UnbondKind::DelegatedPublicKey(delegator1.clone()), + 13, + ); + + // 6 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for validator 1 + // - bid for delegator 1 + // - bonding purse balance for validator 1 + // - unbonding purse for delegator + assert_eq!(update.len(), 6); +} + +#[test] +fn should_slash_a_validator_and_delegator_with_enqueued_withdraws() { + let mut rng = TestRng::new(); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + let delegator1 = PublicKey::random(&mut rng); + let delegator2 = PublicKey::random(&mut rng); + let past_delegator1 = PublicKey::random(&mut rng); + let past_delegator2 = PublicKey::random(&mut rng); + + let amount = U512::one(); + let era_id = EraId::new(1); + + let validator1_config = ValidatorConfig { + bonded_amount: amount, + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: amount, + }]), + reservations: None, + }; + + let mut reader = MockStateReader::new() + .with_validators( + vec![ + (validator1.clone(), amount, validator1_config.clone()), + ( + validator2.clone(), + amount, + ValidatorConfig { + bonded_amount: amount, + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator2.clone(), + delegated_amount: amount, + }]), + reservations: None, + }, + ), + ], + &mut rng, + ) + .with_withdraw( + validator1.clone(), + UnbondKind::Validator(validator1.clone()), + era_id, + amount, + &mut rng, + ) + .with_withdraw( + validator1.clone(), + UnbondKind::DelegatedPublicKey(delegator1), + era_id, + amount, + &mut rng, + ) + .with_withdraw( + validator1.clone(), + UnbondKind::DelegatedPublicKey(past_delegator1), + era_id, + amount, + &mut rng, + ) + .with_withdraw( + validator2.clone(), + UnbondKind::Validator(validator2.clone()), + era_id, + amount, + &mut rng, + ) + .with_withdraw( + validator2.clone(), + UnbondKind::DelegatedPublicKey(delegator2.clone()), + era_id, + amount, + &mut rng, + ) + .with_withdraw( + validator2.clone(), + UnbondKind::DelegatedPublicKey(past_delegator2.clone()), + era_id, + amount, + &mut rng, + ); + + // we'll be removing validator2 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(amount), + validator: Some(validator1_config), + }], + only_listed_validators: true, + slash_instead_of_unbonding: true, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&validator1, amount * 2)]); + + update.assert_seigniorage_recipients_written(&mut reader); + + // check validator2 slashed + let old_bids2 = reader.get_bids(); + let old_bid2 = old_bids2 + .validator_bid(&validator2) + .expect("should have validator2"); + + update.assert_written_balance(*old_bid2.bonding_purse(), 0); + let delegator2_record = old_bids2 + .delegator_by_kind(&validator2, &DelegatorKind::PublicKey(delegator2.clone())) + .expect("should have delegator 2"); + + // check delegator2 slashed + update.assert_written_balance(*delegator2_record.bonding_purse(), 0); + // check past_delegator2 untouched + let past_delegator2_bid_purse = reader + .withdraws + .get(&validator2.to_account_hash()) + .expect("should have withdraws for validator2") + .iter() + .find(|withdraw| withdraw.unbonder_public_key() == &past_delegator2) + .expect("should have withdraw purses") + .bonding_purse(); + update.assert_key_absent(&Key::Balance(past_delegator2_bid_purse.addr())); + + // check validator1 and its delegators not slashed + for withdraw in reader + .withdraws + .get(&validator1.to_account_hash()) + .expect("should have withdraws for validator2") + { + update.assert_key_absent(&Key::Balance(withdraw.bonding_purse().addr())); + } + + // check the withdraws under validator 2 still contain the past delegator's withdraw + update.assert_withdraw_purse(*past_delegator2_bid_purse, &validator2, &past_delegator2, 1); + + // check the withdraws under validator 1 are unchanged + update.assert_key_absent(&Key::Withdraw(validator1.to_account_hash())); + + // 8 keys should be written: + // - seigniorage recipients + // - total supply + // - 2 balances, 2 bids, 1 withdraw + assert_eq!(update.len(), 7); +} + +#[ignore] +#[test] +fn should_slash_a_validator_and_delegator_with_enqueued_unbonds() { + let mut rng = TestRng::new(); + + let (v1_balance, v2_balance) = (100u64, 200u64); + let (v1_stake, v2_stake, d1_stake, d2_stake) = (1u64, 2u64, 3u64, 4u64); + let (pd1_stake, pd2_stake) = (10u64, 11u64); + + let validator1 = PublicKey::random(&mut rng); + let validator2 = PublicKey::random(&mut rng); + let delegator1 = PublicKey::random(&mut rng); + let delegator2 = PublicKey::random(&mut rng); + + let past_delegator1 = PublicKey::random(&mut rng); + let past_delegator2 = PublicKey::random(&mut rng); + + let validator1_config = ValidatorConfig { + bonded_amount: U512::from(v1_stake), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator1.clone(), + delegated_amount: U512::from(d1_stake), + }]), + reservations: None, + }; + + let validator_2_config = ValidatorConfig { + bonded_amount: U512::from(v2_stake), + delegation_rate: Some(5), + delegators: Some(vec![DelegatorConfig { + public_key: delegator2.clone(), + delegated_amount: U512::from(d2_stake), + }]), + reservations: None, + }; + + let mut reader = MockStateReader::new() + .with_validators( + vec![ + ( + validator1.clone(), + v1_balance.into(), + validator1_config.clone(), + ), + ( + validator2.clone(), + v2_balance.into(), + validator_2_config.clone(), + ), + ], + &mut rng, + ) + .with_unbond( + validator1.clone(), + UnbondKind::Validator(validator1.clone()), + v1_stake.into(), + &mut rng, + ) + .with_unbond( + validator1.clone(), + UnbondKind::DelegatedPublicKey(delegator1.clone()), + d1_stake.into(), + &mut rng, + ) + .with_unbond( + validator1.clone(), + UnbondKind::DelegatedPublicKey(past_delegator1.clone()), + pd1_stake.into(), + &mut rng, + ) + .with_unbond( + validator2.clone(), + UnbondKind::Validator(validator2.clone()), + v2_stake.into(), + &mut rng, + ) + .with_unbond( + validator2.clone(), + UnbondKind::DelegatedPublicKey(delegator2.clone()), + d2_stake.into(), + &mut rng, + ) + .with_unbond( + validator2.clone(), + UnbondKind::DelegatedPublicKey(past_delegator2.clone()), + pd2_stake.into(), + &mut rng, + ); + + // we'll be removing validator2 + let config = Config { + accounts: vec![AccountConfig { + public_key: validator1.clone(), + balance: Some(v1_stake.into()), + validator: Some(validator1_config), + }], + only_listed_validators: true, + slash_instead_of_unbonding: true, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new( + &validator1, + U512::from(v1_stake + d1_stake), + )]); + + update.assert_seigniorage_recipients_written(&mut reader); + + let old_bids = reader.get_bids(); + // check validator2 slashed + let old_bid2 = old_bids + .validator_bid(&validator2) + .expect("should have bid"); + update.assert_written_balance(*old_bid2.bonding_purse(), 0); + + let delegator = old_bids + .delegator_by_kind(&validator2, &DelegatorKind::PublicKey(delegator2.clone())) + .expect("should have delegator"); + + // check delegator2 slashed + update.assert_written_balance(*delegator.bonding_purse(), 0); + let unbond_kind = UnbondKind::DelegatedPublicKey(past_delegator2.clone()); + // check past_delegator2 untouched + let past_delegator2_bid_purse = reader + .unbonds + .get(&unbond_kind) + .expect("should have unbonds for validator2") + .first() + .expect("must have at least one entry") + .eras() + .first() + .expect("should have unbonding purses") + .bonding_purse(); + update.assert_key_absent(&Key::Balance(past_delegator2_bid_purse.addr())); + let unbond_kind = UnbondKind::Validator(validator1.clone()); + // check validator1 and its delegators not slashed + for unbond in reader + .unbonds + .get(&unbond_kind) + .expect("should have unbonds for validator2") + .first() + .expect("must have at least one entry") + .eras() + { + update.assert_key_absent(&Key::Balance(unbond.bonding_purse().addr())); + } + + // check the unbonds under validator 1 are unchanged + update.assert_key_absent(&Key::Unbond(validator1.to_account_hash())); + update.assert_key_absent(&Key::Unbond(delegator1.to_account_hash())); + update.assert_key_absent(&Key::Unbond(past_delegator1.to_account_hash())); + + // 8 keys should be written for validator1: + // - seigniorage recipients + // - total supply + // - 3 balances, 2 bids, + // - 1 unbonds + assert_eq!(update.len(), 7); +} + +#[test] +fn should_handle_unbonding_to_oneself_correctly() { + let rng = &mut TestRng::new(); + + let old_validator = PublicKey::random(rng); + let new_validator = PublicKey::random(rng); + + const OLD_BALANCE: u64 = 31; + const NEW_BALANCE: u64 = 73; + const OLD_STAKE: u64 = 97; + const NEW_STAKE: u64 = 103; + + let mut reader = MockStateReader::new() + .with_account(old_validator.to_account_hash(), OLD_BALANCE.into(), rng) + .with_validators( + vec![( + old_validator.clone(), + U512::from(OLD_BALANCE), + ValidatorConfig { + bonded_amount: U512::from(OLD_STAKE), + ..Default::default() + }, + )], + rng, + ) + // One token is being unbonded to the validator: + .with_unbond( + old_validator.clone(), + UnbondKind::Validator(old_validator.clone()), + OLD_STAKE.into(), + rng, + ); + + // We'll be updating the validators set to only contain new_validator: + let config = Config { + accounts: vec![AccountConfig { + public_key: new_validator.clone(), + balance: Some(U512::from(NEW_BALANCE)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(NEW_STAKE), + ..Default::default() + }), + }], + only_listed_validators: true, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // Check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&new_validator, U512::from(NEW_STAKE))]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply( + &mut reader, + OLD_BALANCE + OLD_STAKE + NEW_BALANCE + NEW_STAKE, + ); + + // Check purse write for validator1 + let bid_purse = *reader + .get_bids() + .validator_bid(&old_validator) + .expect("should have bid") + .bonding_purse(); + + // Bid purse balance should be unchanged + update.assert_key_absent(&Key::Balance(bid_purse.addr())); + + // Check bid overwrite + let account1_hash = old_validator.to_account_hash(); + let mut expected_bid_1 = ValidatorBid::unlocked( + old_validator, + bid_purse, + U512::zero(), + Default::default(), + 0, + u64::MAX, + 0, + ); + expected_bid_1.deactivate(); + update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1))); + + // Check writes for validator2 + let account2_hash = new_validator.to_account_hash(); + + // The new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // Check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), NEW_BALANCE); + + let bid_write = update.get_written_bid(account2_hash); + assert_eq!(bid_write.validator_public_key(), new_validator); + let total = update + .get_total_stake(account2_hash) + .expect("should read total staked amount"); + assert_eq!(total, U512::from(NEW_STAKE)); + assert!(!bid_write.inactive()); + + // Check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap()); + update.assert_written_balance(bid_write.bonding_purse().unwrap(), NEW_STAKE); + + // 11 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for old validator + // - account for new validator + // - main purse for account for new validator + // - main purse balance for account for new validator + // - addressable entity for new validator + // - package for the newly created addressable entity + // - bid for new validator + // - bonding purse for new validator + // - bonding purse balance for new validator + assert_eq!(update.len(), 11); +} + +#[test] +fn should_handle_unbonding_to_a_delegator_correctly() { + let rng = &mut TestRng::new(); + + let old_validator = PublicKey::random(rng); + let new_validator = PublicKey::random(rng); + let delegator = PublicKey::random(rng); + + const OLD_BALANCE: u64 = 100; + const NEW_BALANCE: u64 = 200; + const DELEGATOR_BALANCE: u64 = 50; + const OLD_STAKE: u64 = 1; + const DELEGATOR_STAKE: u64 = 2; + const NEW_STAKE: u64 = 3; + + let mut reader = MockStateReader::new() + .with_account(delegator.to_account_hash(), DELEGATOR_BALANCE.into(), rng) + .with_account(old_validator.to_account_hash(), OLD_BALANCE.into(), rng) + .with_validators( + vec![( + old_validator.clone(), + U512::from(OLD_BALANCE), + ValidatorConfig { + bonded_amount: U512::from(OLD_STAKE), + delegators: Some(vec![DelegatorConfig { + public_key: delegator.clone(), + delegated_amount: DELEGATOR_STAKE.into(), + }]), + ..Default::default() + }, + )], + rng, + ) + // One token is being unbonded to the validator: + .with_unbond( + old_validator.clone(), + UnbondKind::Validator(old_validator.clone()), + OLD_STAKE.into(), + rng, + ) + // One token is being unbonded to the delegator: + .with_unbond( + old_validator.clone(), + UnbondKind::DelegatedPublicKey(delegator.clone()), + OLD_STAKE.into(), + rng, + ); + + // We'll be updating the validators set to only contain new_validator: + let config = Config { + accounts: vec![AccountConfig { + public_key: new_validator.clone(), + balance: Some(U512::from(NEW_BALANCE)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(NEW_STAKE), + ..Default::default() + }), + }], + only_listed_validators: true, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // Check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&new_validator, U512::from(NEW_STAKE))]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply( + &mut reader, + OLD_BALANCE + OLD_STAKE + NEW_BALANCE + NEW_STAKE + DELEGATOR_BALANCE + DELEGATOR_STAKE, + ); + let unbond_kind = UnbondKind::Validator(old_validator.clone()); + let unbond = reader + .get_unbonds() + .get(&unbond_kind) + .cloned() + .expect("should have unbond purses"); + let validator_purse = unbond + .first() + .expect("must have unbond entry") + .eras() + .first() + .map(|purse| *purse.bonding_purse()) + .expect("A bonding purse for the validator"); + let unbond_kind = UnbondKind::DelegatedPublicKey(delegator.clone()); + let unbonds = reader + .get_unbonds() + .get(&unbond_kind) + .cloned() + .expect("should have unbond purses"); + let unbonding_purses = unbonds.first().expect("must have at least one entry"); + let _ = unbonding_purses + .eras() + .first() + .map(|purse| *purse.bonding_purse()) + .expect("A bonding purse for the delegator"); + + // Bid purse balance should be unchanged + update.assert_key_absent(&Key::Balance(validator_purse.addr())); + + // Check bid overwrite + let account1_hash = old_validator.to_account_hash(); + let mut expected_bid_1 = ValidatorBid::unlocked( + old_validator, + validator_purse, + U512::zero(), + Default::default(), + 0, + u64::MAX, + 0, + ); + expected_bid_1.deactivate(); + update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1))); + + // Check writes for validator2 + let account2_hash = new_validator.to_account_hash(); + + // The new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // Check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), NEW_BALANCE); + + let bid_write = update.get_written_bid(account2_hash); + assert_eq!(bid_write.validator_public_key(), new_validator); + let total = update + .get_total_stake(account2_hash) + .expect("should read total staked amount"); + assert_eq!(total, U512::from(NEW_STAKE)); + assert!(!bid_write.inactive()); + + // Check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap()); + update.assert_written_balance(bid_write.bonding_purse().unwrap(), NEW_STAKE); + + // 13 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for old validator + // - bid for delegator + // - unbonding purse for old validator + // - account for new validator + // - main purse for account for new validator + // - main purse balance for account for new validator + // - addressable entity for new validator + // - package for the newly created addressable entity + // - bid for new validator + // - bonding purse for new validator + // - bonding purse balance for new validator + assert_eq!(update.len(), 13); +} + +#[test] +fn should_handle_legacy_unbonding_to_oneself_correctly() { + let rng = &mut TestRng::new(); + + let old_validator = PublicKey::random(rng); + let new_validator = PublicKey::random(rng); + + const OLD_BALANCE: u64 = 100; + const NEW_BALANCE: u64 = 200; + const OLD_STAKE: u64 = 1; + const NEW_STAKE: u64 = 2; + + let mut reader = MockStateReader::new() + .with_account(old_validator.to_account_hash(), OLD_BALANCE.into(), rng) + .with_validators( + vec![( + old_validator.clone(), + U512::from(OLD_BALANCE), + ValidatorConfig { + bonded_amount: U512::from(OLD_STAKE), + ..Default::default() + }, + )], + rng, + ) + // Two tokens are being unbonded to the validator, one legacy, the other not: + .with_unbond( + old_validator.clone(), + UnbondKind::Validator(old_validator.clone()), + OLD_STAKE.into(), + rng, + ) + .with_withdraw( + old_validator.clone(), + UnbondKind::Validator(old_validator.clone()), + EraId::new(1), + OLD_STAKE.into(), + rng, + ); + + // We'll be updating the validators set to only contain new_validator: + let config = Config { + accounts: vec![AccountConfig { + public_key: new_validator.clone(), + balance: Some(U512::from(NEW_BALANCE)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(NEW_STAKE), + ..Default::default() + }), + }], + only_listed_validators: true, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + // Check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new(&new_validator, U512::from(NEW_STAKE))]); + + update.assert_seigniorage_recipients_written(&mut reader); + update.assert_total_supply( + &mut reader, + OLD_BALANCE + OLD_STAKE + NEW_BALANCE + NEW_STAKE, + ); + + // Check purse write for validator1 + let bid_purse = *reader + .get_bids() + .validator_bid(&old_validator) + .expect("should have bid") + .bonding_purse(); + + // Bid purse balance should be unchanged + update.assert_key_absent(&Key::Balance(bid_purse.addr())); + + // Check bid overwrite + let account1_hash = old_validator.to_account_hash(); + let mut expected_bid_1 = ValidatorBid::unlocked( + old_validator, + bid_purse, + U512::zero(), + Default::default(), + 0, + u64::MAX, + 0, + ); + expected_bid_1.deactivate(); + update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1))); + + // Check writes for validator2 + let account2_hash = new_validator.to_account_hash(); + + // The new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // Check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), NEW_BALANCE); + + let bid_write = update.get_written_bid(account2_hash); + assert_eq!(bid_write.validator_public_key(), new_validator); + let total = update + .get_total_stake(account2_hash) + .expect("should read total staked amount"); + assert_eq!(total, U512::from(NEW_STAKE)); + assert!(!bid_write.inactive()); + + // Check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap()); + update.assert_written_balance(bid_write.bonding_purse().unwrap(), NEW_STAKE); + + // 11 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for old validator + // - account for new validator + // - main purse for account for new validator + // - main purse balance for account for new validator + // - addressable entity for new validator + // - package for the newly created addressable entity + // - bid for new validator + // - bonding purse for new validator + // - bonding purse balance for new validator + assert_eq!(update.len(), 11); +} + +#[test] +fn should_handle_legacy_unbonding_to_a_delegator_correctly() { + let rng = &mut TestRng::new(); + + let v1_public_key = PublicKey::random(rng); + let v2_public_key = PublicKey::random(rng); + let d1_public_key = PublicKey::random(rng); + + const V1_INITIAL_BALANCE: u64 = 100; + const V1_INITIAL_STAKE: u64 = 1; + const V2_INITIAL_BALANCE: u64 = 200; + const V2_INITIAL_STAKE: u64 = 3; + + const D1_INITIAL_BALANCE: u64 = 20; + const D1_INITIAL_STAKE: u64 = 2; + + const WITHDRAW_ERA: EraId = EraId::new(0); + + let mut reader = MockStateReader::new() + .with_account( + d1_public_key.to_account_hash(), + D1_INITIAL_BALANCE.into(), + rng, + ) + .with_account( + v1_public_key.to_account_hash(), + V1_INITIAL_BALANCE.into(), + rng, + ) + .with_validators( + vec![( + v1_public_key.clone(), + U512::from(V1_INITIAL_BALANCE), + ValidatorConfig { + bonded_amount: U512::from(V1_INITIAL_STAKE), + delegators: Some(vec![DelegatorConfig { + public_key: d1_public_key.clone(), + delegated_amount: D1_INITIAL_STAKE.into(), + }]), + ..Default::default() + }, + )], + rng, + ) + .with_withdraw( + v1_public_key.clone(), + UnbondKind::Validator(v1_public_key.clone()), + WITHDRAW_ERA, + U512::from(V1_INITIAL_STAKE), + rng, + ) + // Two tokens are being unbonded to the validator, one legacy, the other not: + .with_unbond( + v1_public_key.clone(), + UnbondKind::Validator(v1_public_key.clone()), + U512::from(V1_INITIAL_STAKE), + rng, + ) + // Two tokens are being unbonded to the delegator, one legacy, the other not: + .with_withdraw( + v1_public_key.clone(), + UnbondKind::DelegatedPublicKey(d1_public_key.clone()), + WITHDRAW_ERA, + U512::from(D1_INITIAL_STAKE), + rng, + ) + .with_unbond( + v1_public_key.clone(), + UnbondKind::DelegatedPublicKey(d1_public_key), + U512::from(D1_INITIAL_STAKE), + rng, + ); + + assert_eq!( + reader.total_supply.as_u64(), + V1_INITIAL_BALANCE + V1_INITIAL_STAKE + D1_INITIAL_BALANCE + D1_INITIAL_STAKE, + "should equal" + ); + + // We'll be updating the validators set to only contain new_validator: + let config = Config { + accounts: vec![AccountConfig { + public_key: v2_public_key.clone(), + balance: Some(U512::from(V2_INITIAL_BALANCE)), + validator: Some(ValidatorConfig { + bonded_amount: U512::from(V2_INITIAL_STAKE), + ..Default::default() + }), + }], + only_listed_validators: true, + slash_instead_of_unbonding: false, + ..Default::default() + }; + + let update = get_update(&mut reader, config); + + update.assert_total_supply( + &mut reader, + V1_INITIAL_BALANCE + + V1_INITIAL_STAKE + + D1_INITIAL_BALANCE + + D1_INITIAL_STAKE + + V2_INITIAL_BALANCE + + V2_INITIAL_STAKE, + ); + + // Check that the update contains the correct list of validators + update.assert_validators(&[ValidatorInfo::new( + &v2_public_key, + U512::from(V2_INITIAL_STAKE), + )]); + + let unbond_kind = UnbondKind::Validator(v1_public_key.clone()); + let unbonds = reader + .get_unbonds() + .get(&unbond_kind) + .cloned() + .expect("should have unbond purses"); + let unbonding_purses = unbonds.first().expect("must have at least one entry"); + let validator_purse = unbonding_purses + .eras() + .first() + .map(|purse| *purse.bonding_purse()) + .expect("A bonding purse for the validator"); + + // Bid purse balance should be unchanged + update.assert_key_absent(&Key::Balance(validator_purse.addr())); + + // Check bid overwrite + let account1_hash = v1_public_key.to_account_hash(); + let mut expected_bid_1 = ValidatorBid::unlocked( + v1_public_key, + validator_purse, + U512::zero(), + Default::default(), + 0, + u64::MAX, + 0, + ); + expected_bid_1.deactivate(); + update.assert_written_bid(account1_hash, BidKind::Validator(Box::new(expected_bid_1))); + + // Check writes for validator2 + let account2_hash = v2_public_key.to_account_hash(); + + // The new account should be created + let account2 = update.get_written_addressable_entity(account2_hash); + + // Check that the main purse for the new account has been created with the correct amount + update.assert_written_purse_is_unit(account2.main_purse()); + update.assert_written_balance(account2.main_purse(), V2_INITIAL_BALANCE); + + let bid_write = update.get_written_bid(account2_hash); + assert_eq!(bid_write.validator_public_key(), v2_public_key); + let total = update + .get_total_stake(account2_hash) + .expect("should read total staked amount"); + assert_eq!(total, U512::from(V2_INITIAL_STAKE)); + assert!(!bid_write.inactive()); + + // Check that the bid purse for the new validator has been created with the correct amount + update.assert_written_purse_is_unit(bid_write.bonding_purse().unwrap()); + update.assert_written_balance(bid_write.bonding_purse().unwrap(), V2_INITIAL_STAKE); + + // 13 keys should be written: + // - seigniorage recipients + // - total supply + // - bid for old validator + // - unbonding for old validator + // - unbond for delegator + // - account for new validator + // - main purse for account for new validator + // - main purse balance for account for new validator + // - addressable entity for new validator + // - package for the newly created addressable entity + // - bid for new validator + // - bonding purse for new validator + // - bonding purse balance for new validator + assert_eq!(update.len(), 13); +} diff --git a/utils/global-state-update-gen/src/generic/update.rs b/utils/global-state-update-gen/src/generic/update.rs new file mode 100644 index 0000000000..336e0663b7 --- /dev/null +++ b/utils/global-state-update-gen/src/generic/update.rs @@ -0,0 +1,340 @@ +use std::collections::BTreeMap; +#[cfg(test)] +use std::collections::HashSet; + +#[cfg(test)] +use casper_types::{account::AccountHash, AddressableEntity, CLValue, PublicKey, URef, U512}; +use casper_types::{Key, StoredValue}; + +#[cfg(test)] +use casper_types::system::auction::{ + BidAddr, BidKind, DelegatorBid, DelegatorKind, UnbondKind, ValidatorBid, +}; + +#[cfg(test)] +use super::state_reader::StateReader; + +use crate::utils::{print_entry, print_validators, ValidatorInfo}; + +#[derive(Debug)] +pub(crate) struct Update { + entries: BTreeMap, + // Holds the complete set of validators, only if the validator set changed + validators: Option>, +} + +impl Update { + pub(crate) fn new( + entries: BTreeMap, + validators: Option>, + ) -> Self { + Self { + entries, + validators, + } + } + + pub(crate) fn print(&self) { + if let Some(validators) = &self.validators { + print_validators(validators); + } + for (key, value) in &self.entries { + print_entry(key, value); + } + } +} + +#[cfg(test)] +impl Update { + pub(crate) fn len(&self) -> usize { + self.entries.len() + } + + pub(crate) fn get_written_addressable_entity( + &self, + account_hash: AccountHash, + ) -> AddressableEntity { + let entity_key = self + .entries + .get(&Key::Account(account_hash)) + .expect("must have Key for account hash") + .as_cl_value() + .expect("must have underlying cl value") + .to_owned() + .into_t::() + .expect("must convert to key"); + + self.entries + .get(&entity_key) + .expect("must have addressable entity") + .as_addressable_entity() + .expect("should be addressable entity") + .clone() + } + + pub(crate) fn get_written_bid(&self, account: AccountHash) -> BidKind { + self.entries + .get(&Key::BidAddr(BidAddr::from(account))) + .expect("stored value should exist") + .as_bid_kind() + .expect("stored value should be BidKind") + .clone() + } + + #[track_caller] + pub(crate) fn get_total_stake(&self, account: AccountHash) -> Option { + let bid_addr = BidAddr::from(account); + + if let BidKind::Validator(validator_bid) = self + .entries + .get(&bid_addr.into()) + .expect("should create bid") + .as_bid_kind() + .expect("should be bid") + { + let delegator_stake: U512 = self + .delegators(validator_bid) + .iter() + .map(|x| x.staked_amount()) + .sum(); + + Some(validator_bid.staked_amount() + delegator_stake) + } else { + None + } + } + + #[track_caller] + pub(crate) fn delegators(&self, validator_bid: &ValidatorBid) -> Vec { + let mut ret = vec![]; + + for (_, v) in self.entries.clone() { + if let StoredValue::BidKind(BidKind::Delegator(delegator)) = v { + if delegator.validator_public_key() != validator_bid.validator_public_key() { + continue; + } + ret.push(*delegator); + } + } + + ret + } + + #[track_caller] + pub(crate) fn delegator( + &self, + validator_bid: &ValidatorBid, + delegator_kind: &DelegatorKind, + ) -> Option { + let delegators = self.delegators(validator_bid); + for delegator in delegators { + if delegator.delegator_kind() != delegator_kind { + continue; + } + return Some(delegator); + } + None + } + + #[track_caller] + pub(crate) fn assert_written_balance(&self, purse: URef, balance: u64) { + if let StoredValue::CLValue(cl_value) = self + .entries + .get(&Key::Balance(purse.addr())) + .expect("must have balance") + { + let actual = CLValue::to_t::(cl_value).expect("must get u512"); + assert_eq!(actual, U512::from(balance)) + }; + } + + #[track_caller] + pub(crate) fn assert_total_supply(&self, reader: &mut R, supply: u64) { + let total = self + .entries + .get(&reader.get_total_supply_key()) + .expect("should have total supply") + .as_cl_value() + .expect("total supply should be CLValue") + .clone() + .into_t::() + .expect("total supply should be a U512"); + let expected = U512::from(supply); + assert_eq!( + total, expected, + "total supply ({total}) not as expected ({expected})", + ); + } + + #[track_caller] + pub(crate) fn assert_written_purse_is_unit(&self, purse: URef) { + assert_eq!( + self.entries.get(&Key::URef(purse)), + Some(&StoredValue::from( + CLValue::from_t(()).expect("should convert unit to CLValue") + )) + ); + } + + #[track_caller] + pub(crate) fn assert_seigniorage_recipients_written(&self, reader: &mut R) { + assert!(self + .entries + .contains_key(&reader.get_seigniorage_recipients_key())); + } + + #[track_caller] + pub(crate) fn assert_written_bid(&self, account: AccountHash, bid: BidKind) { + assert_eq!( + self.entries.get(&Key::BidAddr(BidAddr::from(account))), + Some(&StoredValue::from(bid)) + ); + } + + #[track_caller] + pub(crate) fn assert_withdraw_purse( + &self, + bid_purse: URef, + validator_key: &PublicKey, + unbonder_key: &PublicKey, + amount: u64, + ) { + let account_hash = validator_key.to_account_hash(); + let withdraws = self + .entries + .get(&Key::Withdraw(account_hash)) + .expect("should have withdraws for the account") + .as_withdraw() + .expect("should be withdraw purses"); + assert!(withdraws.iter().any( + |withdraw_purse| withdraw_purse.bonding_purse() == &bid_purse + && withdraw_purse.validator_public_key() == validator_key + && withdraw_purse.unbonder_public_key() == unbonder_key + && withdraw_purse.amount() == &U512::from(amount) + )) + } + + #[track_caller] + #[allow(unused)] + pub(crate) fn assert_unbonding_purse( + &self, + bid_purse: URef, + validator_key: &PublicKey, + unbonder_key: &PublicKey, + amount: u64, + ) { + let account_hash = unbonder_key.to_account_hash(); + let unbonds = self + .entries + .get(&Key::Unbond(account_hash)) + .expect("should have unbonds for the account") + .as_unbonding() + .expect("should be unbonding purses"); + assert!(unbonds.iter().any( + |unbonding_purse| unbonding_purse.bonding_purse() == &bid_purse + && unbonding_purse.validator_public_key() == validator_key + && unbonding_purse.unbonder_public_key() == unbonder_key + && unbonding_purse.amount() == &U512::from(amount) + )) + } + + /// `expected`: (bid_purse, unbonder_key, amount) + #[track_caller] + #[allow(unused)] + pub(crate) fn assert_unbonding_purses<'a>( + &self, + account_hash: AccountHash, + expected: impl IntoIterator, + ) { + let mut expected: Vec<_> = expected + .into_iter() + .map(|(bid_purse, unbonder_key, amount)| { + (account_hash, bid_purse, unbonder_key, U512::from(amount)) + }) + .collect(); + let mut data: Vec<_> = self + .entries + .get(&Key::Unbond(account_hash)) + .expect("should have unbonds for the account") + .as_unbonding() + .expect("should be unbonding purses") + .iter() + .map(|unbonding_purse| { + ( + unbonding_purse.unbonder_public_key().to_account_hash(), + *unbonding_purse.bonding_purse(), + unbonding_purse.unbonder_public_key(), + *unbonding_purse.amount(), + ) + }) + .collect(); + + expected.sort(); + data.sort(); + + assert_eq!( + data, expected, + "\nThe data we got:\n{data:#?}\nExpected values:\n{expected:#?}" + ); + } + + #[track_caller] + pub(crate) fn assert_unbond_bid_kind( + &self, + bid_purse: URef, + validator_key: &PublicKey, + unbond_kind: &UnbondKind, + amount: u64, + ) { + println!( + "assert_unbond_bid_kind {:?} {:?}", + validator_key, + validator_key.to_account_hash() + ); + println!("assert_unbond_bid_kind {:?}", unbond_kind); + let bid_addr = match unbond_kind { + UnbondKind::Validator(pk) | UnbondKind::DelegatedPublicKey(pk) => { + BidAddr::UnbondAccount { + validator: validator_key.to_account_hash(), + unbonder: pk.to_account_hash(), + } + } + UnbondKind::DelegatedPurse(addr) => BidAddr::UnbondPurse { + validator: validator_key.to_account_hash(), + unbonder: *addr, + }, + }; + + println!("assert_unbond_bid_kind {:?}", Key::BidAddr(bid_addr)); + + let entries = &self.entries; + let unbonds = entries + .get(&Key::BidAddr(bid_addr)) + .expect("should have record") + .as_unbond() + .expect("should be unbond"); + + assert!(unbonds + .eras() + .iter() + .any(|unbond_era| unbond_era.bonding_purse() == &bid_purse + && unbond_era.amount() == &U512::from(amount))) + } + + #[track_caller] + pub(crate) fn assert_key_absent(&self, key: &Key) { + assert!(!self.entries.contains_key(key)) + } + + #[track_caller] + pub(crate) fn assert_validators(&self, validators: &[ValidatorInfo]) { + let self_set: HashSet<_> = self.validators.as_ref().unwrap().iter().collect(); + let other_set: HashSet<_> = validators.iter().collect(); + assert_eq!(self_set, other_set); + } + + #[track_caller] + pub(crate) fn assert_validators_unchanged(&self) { + assert!(self.validators.is_none()); + } +} diff --git a/utils/global-state-update-gen/src/main.rs b/utils/global-state-update-gen/src/main.rs new file mode 100644 index 0000000000..247860d25d --- /dev/null +++ b/utils/global-state-update-gen/src/main.rs @@ -0,0 +1,214 @@ +mod admins; +mod balances; +mod decode; +mod generic; +mod system_entity_registry; +mod utils; +mod validators; + +use admins::generate_admins; +use clap::{crate_version, App, Arg, SubCommand}; + +use crate::{ + balances::generate_balances_update, decode::decode_file, generic::generate_generic_update, + system_entity_registry::generate_system_entity_registry, + validators::generate_validators_update, +}; + +fn main() { + let matches = App::new("Global State Update Generator") + .version(crate_version!()) + .about("Generates a global state update file based on the supplied parameters") + .subcommand( + SubCommand::with_name("change-validators") + .about("Generates an update changing the validators set") + .arg( + Arg::with_name("data_dir") + .short("d") + .long("data-dir") + .value_name("PATH") + .help("Data storage directory containing the global state database file") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("hash") + .short("s") + .long("state-hash") + .value_name("HEX_STRING") + .help("The global state hash to be used as the base") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("validator") + .short("v") + .long("validator") + .value_name("KEY,STAKE[,BALANCE]") + .help("A validator config in the format 'public_key,stake[,balance]'") + .takes_value(true) + .required(true) + .multiple(true) + .number_of_values(1), + ), + ) + .subcommand( + SubCommand::with_name("balances") + .about("Generates an update changing account balances") + .arg( + Arg::with_name("data_dir") + .short("d") + .long("data-dir") + .value_name("PATH") + .help("Data storage directory containing the global state database file") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("hash") + .short("s") + .long("state-hash") + .value_name("HEX_STRING") + .help("The global state hash to be used as the base") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("from") + .short("f") + .long("from") + .value_name("ACCOUNT_HASH") + .help("Source account hash (with the account-hash- prefix)") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("to") + .short("t") + .long("to") + .value_name("ACCOUNT_HASH") + .help("Target account hash (with the account-hash- prefix)") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("amount") + .short("a") + .long("amount") + .value_name("MOTES") + .help("Amount to be transferred") + .takes_value(true) + .required(true), + ), + ) + .subcommand( + SubCommand::with_name("migrate-into-system-contract-registry") + .about("Generates an update creating the system entity registry") + .arg( + Arg::with_name("data_dir") + .short("d") + .long("data-dir") + .value_name("PATH") + .help("Data storage directory containing the global state database file") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("hash") + .short("s") + .long("state-hash") + .value_name("HEX_STRING") + .help("The global state hash to be used as the base") + .takes_value(true) + .required(false), + ), + ) + .subcommand( + SubCommand::with_name("generic") + .about("Generates a generic update based on a config file") + .arg( + Arg::with_name("data_dir") + .short("d") + .long("data-dir") + .value_name("PATH") + .help("Data storage directory containing the global state database file") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("hash") + .short("s") + .long("state-hash") + .value_name("HEX_STRING") + .help("The global state hash to be used as the base") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("config_file") + .value_name("FILE") + .index(1) + .required(true) + .help("The config file to be used for generating the update"), + ), + ) + .subcommand( + SubCommand::with_name("generate-admins") + .about("Generates entries to create new admin accounts on a private chain") + .arg( + Arg::with_name("data_dir") + .short("d") + .long("data-dir") + .value_name("PATH") + .help("Data storage directory containing the global state database file") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("hash") + .short("s") + .long("state-hash") + .value_name("HEX_STRING") + .help("The global state hash to be used as the base") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("admin") + .short("a") + .long("admin") + .value_name("PUBLIC_KEY,BALANCE") + .help("A new admin account") + .takes_value(true) + .required(true) + .multiple(true) + .number_of_values(1), + ), + ) + .subcommand( + SubCommand::with_name("decode") + .about("Decodes the global_state.toml file into a readable form") + .arg( + Arg::with_name("file") + .value_name("FILE") + .index(1) + .required(true) + .help("The file to be decoded"), + ), + ) + .get_matches(); + + match matches.subcommand() { + ("change-validators", Some(sub_matches)) => generate_validators_update(sub_matches), + ("balances", Some(sub_matches)) => generate_balances_update(sub_matches), + ("migrate-into-system-contract-registry", Some(sub_matches)) => { + generate_system_entity_registry(sub_matches) + } + ("generic", Some(sub_matches)) => generate_generic_update(sub_matches), + ("generate-admins", Some(sub_matches)) => generate_admins(sub_matches), + ("decode", Some(sub_matches)) => decode_file(sub_matches), + (subcommand, _) => { + println!("Unknown subcommand: \"{}\"", subcommand); + } + } +} diff --git a/utils/global-state-update-gen/src/system_entity_registry.rs b/utils/global-state-update-gen/src/system_entity_registry.rs new file mode 100644 index 0000000000..3e55ac63f8 --- /dev/null +++ b/utils/global-state-update-gen/src/system_entity_registry.rs @@ -0,0 +1,154 @@ +use std::path::Path; + +use clap::ArgMatches; +use lmdb::{self, Cursor, Environment, EnvironmentFlags, Transaction}; + +use casper_engine_test_support::LmdbWasmTestBuilder; +use casper_execution_engine::engine_state::engine_config::{ + DEFAULT_ENABLE_ENTITY, DEFAULT_PROTOCOL_VERSION, +}; +use casper_storage::{ + data_access_layer::{ + SystemEntityRegistryPayload, SystemEntityRegistryRequest, SystemEntityRegistrySelector, + }, + global_state::state::StateProvider, +}; +use casper_types::{ + bytesrepr::FromBytes, + system::{AUCTION, HANDLE_PAYMENT, MINT, STANDARD_PAYMENT}, + AddressableEntityHash, CLValue, Key, ProtocolVersion, StoredValue, SystemHashRegistry, + KEY_HASH_LENGTH, +}; + +use crate::utils::{hash_from_str, print_entry}; + +const DATABASE_NAME: &str = "PROTOCOL_DATA_STORE"; + +pub(crate) fn generate_system_entity_registry(matches: &ArgMatches<'_>) { + let data_dir = Path::new(matches.value_of("data_dir").unwrap_or(".")); + match matches.value_of("hash") { + None => generate_system_entity_registry_using_protocol_data(data_dir), + Some(hash) => generate_system_entity_registry_using_global_state(data_dir, hash), + } +} + +fn generate_system_entity_registry_using_protocol_data(data_dir: &Path) { + let database_path = data_dir.join("data.lmdb"); + + let env = Environment::new() + .set_flags(EnvironmentFlags::READ_ONLY | EnvironmentFlags::NO_SUB_DIR) + .set_max_dbs(2) + .open(&database_path) + .unwrap_or_else(|error| { + panic!( + "failed to initialize database environment at {}: {}", + database_path.display(), + error + ) + }); + + let protocol_data_db = env.open_db(Some(DATABASE_NAME)).unwrap_or_else(|error| { + panic!("failed to open database named {}: {}", DATABASE_NAME, error) + }); + + let ro_transaction = env + .begin_ro_txn() + .unwrap_or_else(|error| panic!("failed to initialize read-only transaction: {}", error)); + let mut cursor = ro_transaction + .open_ro_cursor(protocol_data_db) + .unwrap_or_else(|error| panic!("failed to open a read-only cursor: {}", error)); + + let serialized_protocol_data = match cursor.iter().next().map(Result::unwrap) { + Some((_key, value)) => value, + None => { + println!("No protocol data found"); + return; + } + }; + + // The last four 32-byte chunks of the serialized data are the contract hashes. + let start_index = serialized_protocol_data + .len() + .saturating_sub(4 * KEY_HASH_LENGTH); + let remainder = &serialized_protocol_data[start_index..]; + let (mint_hash, remainder) = + AddressableEntityHash::from_bytes(remainder).unwrap_or_else(|error| { + panic!( + "failed to parse mint hash: {:?}\nraw_bytes: {:?}", + error, serialized_protocol_data + ) + }); + let (handle_payment_hash, remainder) = AddressableEntityHash::from_bytes(remainder) + .unwrap_or_else(|error| { + panic!( + "failed to parse handle_payment hash: {:?}\nraw_bytes: {:?}", + error, serialized_protocol_data + ) + }); + let (standard_payment_hash, remainder) = AddressableEntityHash::from_bytes(remainder) + .unwrap_or_else(|error| { + panic!( + "failed to parse standard_payment hash: {:?}\nraw_bytes: {:?}", + error, serialized_protocol_data + ) + }); + let (auction_hash, remainder) = + AddressableEntityHash::from_bytes(remainder).unwrap_or_else(|error| { + panic!( + "failed to parse auction hash: {:?}\nraw_bytes: {:?}", + error, serialized_protocol_data + ) + }); + assert!(remainder.is_empty()); + + let mut registry = SystemHashRegistry::new(); + registry.insert(MINT.to_string(), mint_hash.value()); + registry.insert(HANDLE_PAYMENT.to_string(), handle_payment_hash.value()); + registry.insert(STANDARD_PAYMENT.to_string(), standard_payment_hash.value()); + registry.insert(AUCTION.to_string(), auction_hash.value()); + + print_entry( + &Key::SystemEntityRegistry, + &StoredValue::from(CLValue::from_t(registry).unwrap()), + ); +} + +fn generate_system_entity_registry_using_global_state(data_dir: &Path, state_hash: &str) { + let builder = LmdbWasmTestBuilder::open_raw( + data_dir, + Default::default(), + DEFAULT_PROTOCOL_VERSION, + hash_from_str(state_hash), + ); + + let registry_req = SystemEntityRegistryRequest::new( + builder.get_post_state_hash(), + ProtocolVersion::V2_0_0, + SystemEntityRegistrySelector::All, + DEFAULT_ENABLE_ENTITY, + ); + + let registry = match builder + .data_access_layer() + .system_entity_registry(registry_req) + .as_registry_payload() + .expect("should have payload") + { + SystemEntityRegistryPayload::All(registry) => registry, + SystemEntityRegistryPayload::EntityKey(_) => { + panic!("expected registry"); + } + }; + + // make sure expected entries exist + let _ = *registry.get(MINT).expect("mint should exist"); + let _ = *registry.get(AUCTION).expect("auction should exist"); + let _ = *registry + .get(HANDLE_PAYMENT) + .expect("handle payment should exist"); + + print_entry( + &Key::SystemEntityRegistry, + &StoredValue::from(CLValue::from_t(registry).unwrap()), + ); +} diff --git a/utils/global-state-update-gen/src/utils.rs b/utils/global-state-update-gen/src/utils.rs new file mode 100644 index 0000000000..16ecc64506 --- /dev/null +++ b/utils/global-state-update-gen/src/utils.rs @@ -0,0 +1,89 @@ +use clap::ArgMatches; +use std::{ + collections::{BTreeMap, BTreeSet}, + convert::TryInto, +}; + +use casper_types::{ + bytesrepr::ToBytes, checksummed_hex, system::auction::SeigniorageRecipientsSnapshotV2, + AsymmetricType, Digest, Key, ProtocolVersion, PublicKey, StoredValue, U512, +}; + +/// Parses a Digest from a string. Panics if parsing fails. +pub fn hash_from_str(hex_str: &str) -> Digest { + (&checksummed_hex::decode(hex_str).unwrap()[..]) + .try_into() + .unwrap() +} + +pub fn num_from_str(str: Option<&str>) -> Option { + match str { + Some(val) => val.parse::().ok(), + None => None, + } +} + +pub fn protocol_version_from_matches(matches: &ArgMatches<'_>) -> ProtocolVersion { + let major = num_from_str(matches.value_of("major")).unwrap_or(2); + let minor = num_from_str(matches.value_of("minor")).unwrap_or(0); + let patch = num_from_str(matches.value_of("patch")).unwrap_or(0); + ProtocolVersion::from_parts(major, minor, patch) +} + +pub(crate) fn print_validators(validators: &[ValidatorInfo]) { + for validator in validators { + println!("[[validators]]"); + println!("public_key = \"{}\"", validator.public_key.to_hex()); + println!("weight = \"{}\"", validator.weight); + println!(); + } + println!(); +} + +/// Prints a global state update entry in a format ready for inclusion in a TOML file. +pub(crate) fn print_entry(key: &Key, value: &StoredValue) { + println!("[[entries]]"); + println!("key = \"{}\"", key.to_formatted_string()); + println!("value = \"{}\"", base64::encode(value.to_bytes().unwrap())); + println!(); +} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub(crate) struct ValidatorInfo { + pub public_key: PublicKey, + pub weight: U512, +} + +#[derive(Debug, PartialEq, Eq, Hash)] +pub struct ValidatorsDiff { + pub added: BTreeSet, + pub removed: BTreeSet, +} + +/// Calculates the sets of added and removed validators between the two snapshots. +pub fn validators_diff( + old_snapshot: &SeigniorageRecipientsSnapshotV2, + new_snapshot: &SeigniorageRecipientsSnapshotV2, +) -> ValidatorsDiff { + let old_validators: BTreeSet<_> = old_snapshot + .values() + .flat_map(BTreeMap::keys) + .cloned() + .collect(); + let new_validators: BTreeSet<_> = new_snapshot + .values() + .flat_map(BTreeMap::keys) + .cloned() + .collect(); + + ValidatorsDiff { + added: new_validators + .difference(&old_validators) + .cloned() + .collect(), + removed: old_validators + .difference(&new_validators) + .cloned() + .collect(), + } +} diff --git a/utils/global-state-update-gen/src/validators.rs b/utils/global-state-update-gen/src/validators.rs new file mode 100644 index 0000000000..0d99b70120 --- /dev/null +++ b/utils/global-state-update-gen/src/validators.rs @@ -0,0 +1,71 @@ +use casper_engine_test_support::LmdbWasmTestBuilder; +use casper_execution_engine::engine_state::engine_config::DEFAULT_PROTOCOL_VERSION; +use casper_types::{AsymmetricType, PublicKey, U512}; +use clap::ArgMatches; + +use crate::{ + generic::{ + config::{AccountConfig, Config, ValidatorConfig}, + update_from_config, + }, + utils::{hash_from_str, protocol_version_from_matches}, +}; + +pub(crate) fn generate_validators_update(matches: &ArgMatches<'_>) { + let data_dir = matches.value_of("data_dir").unwrap_or("."); + let state_hash = hash_from_str(matches.value_of("hash").unwrap()); + let accounts = match matches.values_of("validator") { + None => vec![], + Some(values) => values + .map(|validator_def| { + let mut fields = validator_def.split(',').map(str::to_owned); + + let public_key_str = fields + .next() + .expect("validator config should contain a public key"); + let public_key = PublicKey::from_hex(public_key_str.as_bytes()) + .expect("validator config should have a valid public key"); + + let stake_str = fields + .next() + .expect("validator config should contain a stake"); + let stake = + U512::from_dec_str(&stake_str).expect("stake should be a valid decimal number"); + + let maybe_new_balance_str = fields.next(); + let maybe_new_balance = maybe_new_balance_str.as_ref().map(|balance_str| { + U512::from_dec_str(balance_str) + .expect("balance should be a valid decimal number") + }); + + AccountConfig { + public_key, + balance: maybe_new_balance, + validator: Some(ValidatorConfig { + bonded_amount: stake, + delegation_rate: None, + delegators: None, + reservations: None, + }), + } + }) + .collect(), + }; + let protocol_version = protocol_version_from_matches(matches); + + let config = Config { + accounts, + transfers: vec![], + only_listed_validators: true, + slash_instead_of_unbonding: false, + protocol_version, + }; + + let builder = LmdbWasmTestBuilder::open_raw( + data_dir, + Default::default(), + DEFAULT_PROTOCOL_VERSION, + state_hash, + ); + update_from_config(builder, config); +} diff --git a/utils/highway-rewards-analysis/Cargo.toml b/utils/highway-rewards-analysis/Cargo.toml new file mode 100644 index 0000000000..c634d913f8 --- /dev/null +++ b/utils/highway-rewards-analysis/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "highway-rewards-analysis" +version = "0.1.0" +edition = "2021" + +[dependencies] +bincode = "1" +clap = { version = "4", features = ["derive"] } +casper-node = { path = "../../node" } +casper-types = { path = "../../types" } +flate2 = "1" +serde = "1" diff --git a/utils/highway-rewards-analysis/README.md b/utils/highway-rewards-analysis/README.md new file mode 100644 index 0000000000..2f95e074df --- /dev/null +++ b/utils/highway-rewards-analysis/README.md @@ -0,0 +1,11 @@ +# Highway State Analyzer + +This tool analyzes a Highway protocol state dump and prints some information that may be helpful in explaining decreased reward amounts. + +Usage: `highway-rewards-analysis [-v] FILE` + +`FILE` should contain a Highway protocol state dump in the Bincode format, either plain or gzip-compressed. The `-v` flag causes the tool to print some additional information (see below). + +The tool prints out 10 nodes that missed the most rounds in the era contained in the dump, as well as 10 nodes with the lowest average maximum level-1 summit quorum. The reward for a given block for a node is proportional to the maximum quorum of a level-1 summit containing that node in the round in which it was proposed - such quora for all the rounds in the era are what is averaged in the latter statistic. + +If the `-v` flag is set, in addition to printing the 10 nodes with the most rounds missed, the tool also prints which rounds were missed by those nodes. diff --git a/utils/highway-rewards-analysis/src/main.rs b/utils/highway-rewards-analysis/src/main.rs new file mode 100644 index 0000000000..541c422d2e --- /dev/null +++ b/utils/highway-rewards-analysis/src/main.rs @@ -0,0 +1,201 @@ +use std::{ + collections::{BTreeMap, HashSet}, + fs::File, + io::Read, +}; + +use clap::Parser; +use flate2::read::GzDecoder; +use serde::{Deserialize, Serialize}; + +use casper_node::consensus::{ + highway_core::{ + finality_detector::{ + assigned_weight_and_latest_unit, find_max_quora, round_participation, + RoundParticipation, + }, + State, + }, + protocols::common::validators, + utils::Validators, + ClContext, +}; +use casper_types::{EraId, PublicKey, Timestamp, U512}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + filename: String, + #[arg(short, long)] + verbose: bool, +} + +/// Debug dump of era used for serialization. +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct EraDump { + /// The era that is being dumped. + pub id: EraId, + + /// The scheduled starting time of this era. + pub start_time: Timestamp, + /// The height of this era's first block. + pub start_height: u64, + + // omitted: pending blocks + /// Validators that have been faulty in any of the recent BONDED_ERAS switch blocks. This + /// includes `new_faulty`. + pub faulty: HashSet, + /// Validators that are excluded from proposing new blocks. + pub cannot_propose: HashSet, + /// Accusations collected in this era so far. + pub accusations: HashSet, + /// The validator weights. + pub validators: BTreeMap, + + /// The state of the highway instance associated with the era. + pub highway_state: State, +} + +fn main() { + let args = Args::parse(); + + let mut data = vec![]; + let mut file = File::open(&args.filename).unwrap(); + + if args.filename.ends_with(".gz") { + let mut gz = GzDecoder::new(file); + gz.read_to_end(&mut data).unwrap(); + } else { + file.read_to_end(&mut data).unwrap(); + } + + let dump: EraDump = bincode::deserialize(&data).unwrap(); + + let validators = + validators::(&dump.faulty, &dump.cannot_propose, dump.validators.clone()); + + print_faults(&validators, &dump.highway_state); + + print_skipped_rounds(&validators, &dump, args.verbose); + + print_lowest_quorum_participation(&validators, &dump); +} + +fn print_faults(validators: &Validators, state: &State) { + if state.faulty_validators().count() == 0 { + return; + } + + println!("Faults:"); + for vid in state.faulty_validators() { + let fault = state.maybe_fault(vid).unwrap(); + println!("{}: {:?}", validators.id(vid).unwrap(), fault); + } + println!(); +} + +const TOP_TO_PRINT: usize = 10; + +fn round_num(dump: &EraDump, round_id: Timestamp) -> u64 { + let min_round_length = dump.highway_state.params().min_round_length(); + (round_id.millis() - dump.start_time.millis()) / min_round_length.millis() +} + +fn print_skipped_rounds(validators: &Validators, dump: &EraDump, verbose: bool) { + let state = &dump.highway_state; + let highest_block = state.fork_choice(state.panorama()).unwrap(); + let all_blocks = std::iter::once(highest_block).chain(state.ancestor_hashes(highest_block)); + let mut skipped_rounds = vec![vec![]; validators.len()]; + + for block_hash in all_blocks { + let block_unit = state.unit(block_hash); + let round_id = block_unit.round_id(); + for i in 0..validators.len() as u32 { + let observation = state.panorama().get(i.into()).unwrap(); + let round_participation = round_participation(state, observation, round_id); + if matches!(round_participation, RoundParticipation::No) { + skipped_rounds[i as usize].push(round_id); + } + } + } + + for rounds in skipped_rounds.iter_mut() { + rounds.sort(); + } + + let mut num_skipped_rounds: Vec<_> = skipped_rounds + .iter() + .enumerate() + .map(|(i, skipped)| (i as u32, skipped.len())) + .collect(); + num_skipped_rounds.sort_by_key(|(_, len)| *len); + + println!("{} validators who skipped the most rounds:", TOP_TO_PRINT); + for (vid, len) in num_skipped_rounds.iter().rev().take(TOP_TO_PRINT) { + println!( + "{}: skipped {} rounds", + validators.id((*vid).into()).unwrap(), + len + ); + } + + if verbose { + println!(); + for (vid, _) in num_skipped_rounds.iter().rev().take(TOP_TO_PRINT) { + let skipped_rounds: Vec<_> = skipped_rounds[*vid as usize] + .iter() + .map(|rid| format!("{}", round_num(dump, *rid))) + .collect(); + println!( + "{} skipped rounds: {}", + validators.id((*vid).into()).unwrap(), + skipped_rounds.join(", ") + ); + } + } + + println!(); +} + +fn print_lowest_quorum_participation(validators: &Validators, dump: &EraDump) { + let state = &dump.highway_state; + let highest_block = state.fork_choice(state.panorama()).unwrap(); + let mut quora_sum = vec![0.0; validators.len()]; + let mut num_rounds = 0; + + let hb_unit = state.unit(highest_block); + for bhash in state.ancestor_hashes(highest_block) { + let proposal_unit = state.unit(bhash); + let r_id = proposal_unit.round_id(); + + let (assigned_weight, latest) = + assigned_weight_and_latest_unit(state, &hb_unit.panorama, r_id); + + let max_quora = find_max_quora(state, bhash, &latest); + let max_quora: Vec = max_quora + .into_iter() + .map(|quorum_w| quorum_w.0 as f32 / assigned_weight.0 as f32 * 100.0) + .collect(); + + for (q_sum, max_q) in quora_sum.iter_mut().zip(&max_quora) { + *q_sum += max_q; + } + num_rounds += 1; + } + + let mut quora_avg: Vec<_> = quora_sum + .into_iter() + .enumerate() + .map(|(vid, q_sum)| (vid as u32, q_sum / num_rounds as f32)) + .collect(); + quora_avg.sort_by(|(_, q_avg1), (_, q_avg2)| q_avg1.partial_cmp(q_avg2).unwrap()); + + println!("{} validators with lowest average max quora:", TOP_TO_PRINT); + for (vid, q_avg) in quora_avg.iter().take(TOP_TO_PRINT) { + println!( + "{}: average max quorum {:3.1}%", + validators.id((*vid).into()).unwrap(), + q_avg + ); + } +} diff --git a/utils/highway-state-grapher/Cargo.toml b/utils/highway-state-grapher/Cargo.toml new file mode 100644 index 0000000000..addc0f40f1 --- /dev/null +++ b/utils/highway-state-grapher/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "highway-state-grapher" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bincode = "1" +clap = { version = "4", features = ["derive"] } +casper-node = { path = "../../node" } +casper-types = { path = "../../types" } +flate2 = "1" +freetype-sys = "0.13" +glium = "0.26" +glium_text_rusttype = "0.3" +libc = "0.2" +nalgebra = "0.32" +serde = "1" diff --git a/utils/highway-state-grapher/DejaVuSans.ttf b/utils/highway-state-grapher/DejaVuSans.ttf new file mode 100644 index 0000000000..e5f7eecce4 Binary files /dev/null and b/utils/highway-state-grapher/DejaVuSans.ttf differ diff --git a/utils/highway-state-grapher/src/main.rs b/utils/highway-state-grapher/src/main.rs new file mode 100644 index 0000000000..24056d5b8b --- /dev/null +++ b/utils/highway-state-grapher/src/main.rs @@ -0,0 +1,613 @@ +mod renderer; + +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}, + fmt::{self, Debug}, + fs::File, + io::Read, + ops::RangeBounds, +}; + +use casper_node::consensus::{ + highway_core::{ + finality_detector::{assigned_weight_and_latest_unit, find_max_quora}, + Panorama, State, + }, + utils::{ValidatorIndex, ValidatorMap}, + ClContext, +}; +use casper_types::{Digest, EraId, PublicKey, Timestamp, U512}; + +use clap::Parser; +use flate2::read::GzDecoder; +use glium::{ + glutin::{ + event::{ElementState, Event, MouseButton, MouseScrollDelta, VirtualKeyCode, WindowEvent}, + event_loop::{ControlFlow, EventLoop}, + window::WindowBuilder, + ContextBuilder, + }, + Display, +}; +use serde::{Deserialize, Serialize}; + +use crate::renderer::Renderer; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + filename: String, +} + +/// Debug dump of era used for serialization. +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct EraDump { + /// The era that is being dumped. + pub id: EraId, + + /// The scheduled starting time of this era. + pub start_time: Timestamp, + /// The height of this era's first block. + pub start_height: u64, + + // omitted: pending blocks + /// Validators that have been faulty in any of the recent BONDED_ERAS switch blocks. This + /// includes `new_faulty`. + pub faulty: HashSet, + /// Validators that are excluded from proposing new blocks. + pub cannot_propose: HashSet, + /// Accusations collected in this era so far. + pub accusations: HashSet, + /// The validator weights. + pub validators: BTreeMap, + + /// The state of the highway instance associated with the era. + pub highway_state: State, +} + +/// Helper struct for sorting the units with regards to the implicit partial ordering in the DAG. +struct Units { + set: HashSet, + order: Vec, +} + +impl Units { + /// Collects all the unit hashes and orders them roughly from the newest to the oldest. + fn do_collect_ancestor_units( + &mut self, + state: &State, + panorama: &Panorama, + ) { + let hashes_to_add: Vec<_> = panorama.iter_correct_hashes().collect(); + let mut hashes_to_proceed_with = vec![]; + for hash in hashes_to_add { + if self.set.insert(*hash) { + self.order.push(*hash); + hashes_to_proceed_with.push(*hash); + } + } + for hash in hashes_to_proceed_with { + let unit = state.unit(&hash); + self.do_collect_ancestor_units(state, &unit.panorama); + } + } + + /// Reorders the units in self.order so that every unit comes after all its dependencies. + fn reorder(&mut self, state: &State) { + let mut new_order_set = HashSet::new(); + let mut new_order = vec![]; + let mut queue: VecDeque<_> = std::mem::take(&mut self.order).into_iter().rev().collect(); + loop { + if queue.is_empty() { + break; + } + let unit = queue.pop_front().unwrap(); + if state + .unit(&unit) + .panorama + .iter_correct_hashes() + .all(|cited| new_order_set.contains(cited)) + { + new_order_set.insert(unit); + new_order.push(unit) + } else { + queue.push_back(unit); + } + } + self.order = new_order; + } + + /// Collects all the unit hashes and orders them so that every unit comes after all its + /// dependencies. + fn collect_ancestor_units(&mut self, state: &State) { + self.do_collect_ancestor_units(state, state.panorama()); + self.reorder(state); + } +} + +/// A more readable unit ID: the validator index together with the height in that validator's +/// swimlane +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct UnitId(ValidatorIndex, usize); + +impl Debug for UnitId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "V{}_{}", self.0 .0, self.1) + } +} + +/// A more readable block id. The first field is the block height, the second is the number of the +/// block among all the blocks at that height (if there are no orphan blocks, all the block IDs will +/// have 0s in the second field). +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct BlockId(u64, u8); + +impl Debug for BlockId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "B{}", self.0)?; + for _ in 0..self.1 { + write!(f, "'")?; + } + Ok(()) + } +} + +/// A helper struct for coloring units based on the validator's max quorum. +/// `max_rank` is the number of distinct values of max quorum. `rank` is the index relative to the +/// maximum value (ie. the largest max quorum has rank 0, the second largest has rank 1 etc.) +#[derive(Clone, Copy)] +pub struct Quorum { + pub rank: usize, + pub max_rank: usize, + pub weight_percent: f32, +} + +impl Debug for Quorum { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:3.1}", self.weight_percent) + } +} + +/// A representation of the protocol state unit for the purpose of drawing it on the screen. +/// `graph_height` is the maximum of graph heights of the cited units, plus 1 - drawing based on +/// graph height guarantees that every unit will appear higher than all its dependencies. +#[derive(Clone)] +pub struct GraphUnit { + pub id: UnitId, + pub creator: ValidatorIndex, + pub vote: BlockId, + pub is_proposal: bool, + pub cited_units: Vec, + pub height: usize, + pub graph_height: usize, + pub timestamp: u64, + pub round_num: u64, + pub round_id: Timestamp, + pub round_exp: u8, + pub max_quorum: Option, +} + +impl Debug for GraphUnit { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("GraphUnit") + .field("id", &self.id) + .field("creator", &format!("V{}", self.creator.0)) + .field("height", &self.height) + .field("graph_height", &self.graph_height) + .field("vote", &self.vote) + .field("is_proposal", &self.is_proposal) + .field("timestamp", &self.timestamp) + .field("round_num", &self.round_num) + .field("round_id", &self.round_id) + .field("round_exp", &self.round_exp) + .field("max_quorum", &self.max_quorum) + .field("cited_units", &self.cited_units) + .finish() + } +} + +/// A struct helping in assigning readable IDs to blocks. +#[derive(Clone, Debug)] +struct BlockMapper { + hash_to_id: HashMap, + id_to_hash: HashMap, + last_id_by_height: HashMap, +} + +impl BlockMapper { + fn new() -> Self { + Self { + hash_to_id: HashMap::new(), + id_to_hash: HashMap::new(), + last_id_by_height: HashMap::new(), + } + } + + /// Inserts the new ID, updating the mappings as necessary. + fn insert(&mut self, hash: Digest, id: BlockId) { + self.hash_to_id.insert(hash, id); + self.id_to_hash.insert(id, hash); + let entry = self.last_id_by_height.entry(id.0).or_insert(id.1); + *entry = (*entry).max(id.1); + } + + /// Returns the block ID for the next block at the given height. + /// Usually, there will only be one block at a given height, but in some cases forks are + /// possible - in those cases, blocks at the same height will get sequential IDs. + fn next_id_for_height(&self, height: u64) -> BlockId { + BlockId( + height, + self.last_id_by_height + .get(&height) + .map(|idx| *idx + 1) + .unwrap_or(0), + ) + } + + /// Gets the readable block ID corresponding to the given hash. + fn get(&self, hash: &Digest) -> Option { + self.hash_to_id.get(hash).copied() + } + + /// Gets the block hash corresponding to the given ID. + #[allow(unused)] // Will be useful if we add features related to the blocks + fn get_by_id(&self, id: &BlockId) -> Option { + self.id_to_hash.get(id).copied() + } +} + +/// All the data needed for drawing the unit DAG. +#[derive(Clone, Debug)] +pub struct Graph { + units: ValidatorMap>, + reverse_edges: HashMap>, + #[allow(unused)] // Will be useful if we add features related to the blocks + blocks: BlockMapper, + weight_percentages: ValidatorMap, +} + +impl Graph { + /// Creates a `Graph` based on the `state`. + fn new(state: &State, start_time: Timestamp) -> Self { + let mut units: BTreeMap> = state + .weights() + .iter() + .enumerate() + .map(|(idx, _)| (ValidatorIndex::from(idx as u32), vec![])) + .collect(); + let mut reverse_edges: HashMap> = HashMap::new(); + let mut unit_ids_by_hash: HashMap = HashMap::new(); + let mut blocks = BlockMapper::new(); + + let mut units_set = Units { + set: HashSet::new(), + order: vec![], + }; + + units_set.collect_ancestor_units(state); + + eprintln!("num units: {}", units_set.order.len()); + + let max_round_exp = (state.params().max_round_length().millis() + / state.params().min_round_length().millis()) + .trailing_zeros(); + let max_round_length = state.params().min_round_length().millis() << max_round_exp; + let rounded_era_start = + Timestamp::from((start_time.millis() / max_round_length) * max_round_length); + + let mut highest_block: Option<(u64, Digest)> = None; + + for unit_hash in &units_set.order { + let unit = state.unit(unit_hash); + let block = state.block(&unit.block); + if highest_block.is_none_or( |(height, _)| height < block.height) { + highest_block = Some((block.height, unit.block)); + } + let block_id = if let Some(b_id) = blocks.get(&unit.block) { + b_id + } else { + let b_id = blocks.next_id_for_height(block.height); + blocks.insert(unit.block, b_id); + b_id + }; + let is_proposal = unit + .panorama + .iter_correct_hashes() + .all(|hash| state.unit(hash).block != unit.block); + let cited_units: Vec = unit + .panorama + .iter_correct_hashes() + .map(|hash| *unit_ids_by_hash.get(hash).unwrap()) + .collect(); + let graph_height = cited_units + .iter() + .map(|unit_id| &units.get(&unit_id.0).unwrap()[unit_id.1]) + .map(|g_unit| g_unit.graph_height) + .max() + .map(|max_height| max_height + 1) + .unwrap_or(0); + let unit_id = UnitId(unit.creator, units.get(&unit.creator).unwrap().len()); + + for cited_unit_id in &cited_units { + reverse_edges + .entry(*cited_unit_id) + .or_default() + .push(unit_id); + } + + let time_since_era_start = unit.timestamp.saturating_diff(rounded_era_start).millis(); + let round_num = time_since_era_start / state.params().min_round_length().millis(); + + let graph_unit = GraphUnit { + id: unit_id, + creator: unit.creator, + vote: block_id, + is_proposal, + cited_units, + height: unit.seq_number as usize, + graph_height, + timestamp: time_since_era_start, + round_num, + round_id: unit.round_id(), + round_exp: (unit.round_len().millis() / state.params().min_round_length().millis()) + .trailing_zeros() as u8, + max_quorum: None, + }; + unit_ids_by_hash.insert(*unit_hash, unit_id); + units.get_mut(&unit.creator).unwrap().push(graph_unit); + } + + // fill in max quora + if let Some((_hb_height, hb_hash)) = highest_block { + let hb_unit = state.unit(&hb_hash); + for bhash in state.ancestor_hashes(&hb_hash) { + let proposal_unit = state.unit(bhash); + let r_id = proposal_unit.round_id(); + + let (assigned_weight, latest) = + assigned_weight_and_latest_unit(state, &hb_unit.panorama, r_id); + + let max_quora = find_max_quora(state, bhash, &latest); + // deduplicate and sort max quora + let max_quora_set: BTreeSet<_> = max_quora.iter().copied().collect(); + let max_quora_rank_map: BTreeMap<_, _> = max_quora_set + .into_iter() + .rev() + .enumerate() + .map(|(rank, quorum)| (quorum, rank)) + .collect(); + + for unit in latest.iter().flatten() { + let gunit_id = unit_ids_by_hash.get(*unit).unwrap(); + let gunit = &mut units.get_mut(&gunit_id.0).unwrap()[gunit_id.1]; + let quorum_w = max_quora[gunit.creator]; + let rank = max_quora_rank_map[&quorum_w]; + let weight_percent = quorum_w.0 as f32 / assigned_weight.0 as f32 * 100.0; + gunit.max_quorum = Some(Quorum { + rank, + max_rank: max_quora_rank_map.len(), + weight_percent, + }); + } + } + } + + let weight_percentages: ValidatorMap = state + .weights() + .iter() + .map(|weight| weight.0 as f32 / state.total_weight().0 as f32 * 100.0) + .collect(); + + Self { + units: units.into_values().collect(), + reverse_edges, + blocks, + weight_percentages, + } + } + + /// Returns the unit under the given `unit_id`. + pub fn get(&self, unit_id: &UnitId) -> Option<&GraphUnit> { + self.units + .get(unit_id.0) + .and_then(|swimlane| swimlane.get(unit_id.1)) + } + + /// Returns the validator weights. + pub fn validator_weights(&self) -> &ValidatorMap { + &self.weight_percentages + } + + /// Iterates over all the units created by validators within `range_vid` and with graph heights + /// within `range_graph_height`. + pub fn iter_range( + &self, + range_vid: R1, + range_graph_height: R2, + ) -> impl Iterator + where + R1: RangeBounds + Clone, + R2: RangeBounds + Clone, + { + let range_vid_clone = range_vid.clone(); + self.units + .iter() + .enumerate() + .skip_while(move |(vid, _)| !range_vid.contains(vid)) + .take_while(move |(vid, _)| range_vid_clone.contains(vid)) + .flat_map(move |(_, swimlane)| { + let range_graph_height_clone1 = range_graph_height.clone(); + let range_graph_height_clone2 = range_graph_height.clone(); + swimlane + .iter() + .skip_while(move |unit| !range_graph_height_clone1.contains(&unit.graph_height)) + .take_while(move |unit| range_graph_height_clone2.contains(&unit.graph_height)) + }) + } +} + +fn main() { + let args = Args::parse(); + + let mut data = vec![]; + let mut file = File::open(&args.filename).unwrap(); + + if args.filename.ends_with(".gz") { + let mut gz = GzDecoder::new(file); + gz.read_to_end(&mut data).unwrap(); + } else { + file.read_to_end(&mut data).unwrap(); + } + + let dump: EraDump = bincode::deserialize(&data).unwrap(); + + eprintln!("{}", dump.id); + + let graph = Graph::new(&dump.highway_state, dump.start_time); + + for (index, (pub_key, _)) in dump.validators.iter().enumerate() { + eprintln!("{}: {}", index, pub_key); + } + + start_rendering(graph); +} + +/// Struct keeping the current state of some keys (the events only report the current state, so we +/// need to store the old state to know when it changes). +#[derive(Clone, Copy)] +struct KeyboardState { + /// State of the 'E' key. + e_state: bool, +} + +impl KeyboardState { + fn e_pressed(&mut self) -> bool { + let was_pressed = self.e_state; + self.e_state = true; + !was_pressed + } + + fn e_released(&mut self) { + self.e_state = false; + } +} + +/// Enum keeping the state of mouse input. +#[derive(Clone, Copy)] +enum MouseState { + /// Mouse is freely moving. + Free { position: (f64, f64) }, + /// The user is dragging something. + Dragging { last_position: (f64, f64) }, +} + +impl MouseState { + /// Handles a mouse move event. + /// Returns `Some(delta_x, delta_y)` if dragging is in progress. + fn handle_move(&mut self, new_position: (f64, f64)) -> Option<(f32, f32)> { + match self { + Self::Free { position } => { + *position = new_position; + None + } + Self::Dragging { last_position } => { + let delta_x = (new_position.0 - last_position.0) as f32; + let delta_y = (new_position.1 - last_position.1) as f32; + *last_position = new_position; + Some((delta_x, delta_y)) + } + } + } + + /// Switches between `Free` and `Dragging` based on the button presses. + fn handle_button(&mut self, button_down: bool) { + match (*self, button_down) { + (Self::Free { position }, true) => { + *self = Self::Dragging { + last_position: position, + }; + } + (Self::Dragging { last_position }, false) => { + *self = Self::Free { + position: last_position, + }; + } + _ => (), + } + } + + /// Returns the current position of the cursor. + fn cursor(&self) -> (f32, f32) { + match self { + Self::Free { position } => (position.0 as f32, position.1 as f32), + Self::Dragging { last_position } => (last_position.0 as f32, last_position.1 as f32), + } + } +} + +/// The main loop of the program. +fn start_rendering(graph: Graph) { + let event_loop = EventLoop::new(); + + let wb = WindowBuilder::new() + .with_title("Consensus Graph Visualization") + .with_maximized(true) + .with_resizable(true); + let cb = ContextBuilder::new(); + let display = Display::new(wb, cb, &event_loop).unwrap(); + + let mut renderer = Renderer::new(&display); + let mut mouse_state = MouseState::Free { + position: (0.0, 0.0), + }; + let mut keyboard_state = KeyboardState { e_state: false }; + + event_loop.run(move |ev, _, control_flow| { + match ev { + Event::WindowEvent { event, .. } => match event { + WindowEvent::CloseRequested => { + *control_flow = ControlFlow::Exit; + return; + } + WindowEvent::MouseWheel { delta, .. } => match delta { + MouseScrollDelta::LineDelta(_, vertical) => { + renderer.mouse_scroll(vertical); + } + MouseScrollDelta::PixelDelta(pixels) => { + renderer.mouse_scroll(pixels.y as f32 / 30.0); + } + }, + WindowEvent::KeyboardInput { input, .. } => { + match (input.virtual_keycode, input.state) { + (Some(VirtualKeyCode::E), ElementState::Pressed) => { + if keyboard_state.e_pressed() { + renderer.toggle_edges(); + } + } + (Some(VirtualKeyCode::E), ElementState::Released) => { + keyboard_state.e_released(); + } + _ => (), + } + } + WindowEvent::MouseInput { state, button, .. } => { + if let (state, MouseButton::Left) = (state, button) { + mouse_state.handle_button(matches!(state, ElementState::Pressed)); + } + } + WindowEvent::CursorMoved { position, .. } => { + if let Some(delta) = mouse_state.handle_move((position.x, position.y)) { + renderer.pan(delta.0, delta.1); + } + } + _ => (), + }, + Event::MainEventsCleared => { + let (cursor_x, cursor_y) = mouse_state.cursor(); + renderer.draw(&display, &graph, cursor_x, cursor_y); + } + _ => (), + } + *control_flow = ControlFlow::Poll; + }); +} diff --git a/utils/highway-state-grapher/src/renderer.rs b/utils/highway-state-grapher/src/renderer.rs new file mode 100644 index 0000000000..efbc712e0d --- /dev/null +++ b/utils/highway-state-grapher/src/renderer.rs @@ -0,0 +1,450 @@ +mod matrix; + +use std::{collections::HashSet, f32::consts::PI}; + +use casper_node::consensus::utils::ValidatorMap; +use glium::{ + implement_vertex, index, uniform, Display, DrawParameters, Frame, Program, Surface, + VertexBuffer, +}; +use glium_text_rusttype::{self, FontTexture, TextDisplay, TextSystem}; +use nalgebra::Vector2; + +use crate::{renderer::matrix::Matrix, Graph, GraphUnit, UnitId}; + +const VERTEX_SHADER_SRC: &str = r#" + #version 140 + + in vec2 position; + + uniform mat4 matrix; + uniform vec3 color; + out vec3 in_color; + + void main() { + gl_Position = matrix * vec4(position, 0.0, 1.0); + in_color = color; + } +"#; + +const FRAGMENT_SHADER_SRC: &str = r#" + #version 140 + + in vec3 in_color; + out vec4 color; + + void main() { + color = vec4(in_color, 1.0); + } +"#; + +const FONT_FILE: &[u8] = include_bytes!("../DejaVuSans.ttf"); + +#[derive(Debug, Clone, Copy)] +struct Vertex { + position: [f32; 2], +} + +implement_vertex!(Vertex, position); + +/// Rendering-specific data. +pub struct Renderer { + /// The coordinates at the center of the screen. + center: Vector2, + /// The width of the window, in pixels. + window_width: f32, + /// The current width of the viewport. + width: f32, + /// The shading program. + program: Program, + /// Stuff for rendering text. + text_system: TextSystem, + font: FontTexture, + + /// Pre-generated vertices for a unit. + unit_vertex_buffer: VertexBuffer, + interior_indices: index::NoIndices, + frame_indices: index::IndexBuffer, + + /// `True` if we're drawing edges. + edges_enabled: bool, +} + +const UNIT_WIDTH: f32 = 0.5; +const UNIT_HEIGHT: f32 = 0.4; +const CORNER_RADIUS: f32 = 0.05; +const LINE_WIDTH: f32 = 0.015; + +impl Renderer { + pub fn new(display: &Display) -> Self { + let text_system = TextSystem::new(display); + let font = + FontTexture::new(display, FONT_FILE, 32, FontTexture::ascii_character_list()).unwrap(); + + let (unit_vertex_buffer, interior_indices, frame_indices) = + Self::unit_vertex_buffer(display); + + Renderer { + center: Vector2::new(3.5, 2.5), + window_width: 3000.0, // will get updated on first frame draw + width: 8.0, + program: Program::from_source(display, VERTEX_SHADER_SRC, FRAGMENT_SHADER_SRC, None) + .unwrap(), + text_system, + font, + + unit_vertex_buffer, + interior_indices, + frame_indices, + edges_enabled: true, + } + } + + /// Creates vertices for a rounded rectangle. + fn unit_vertex_buffer( + display: &Display, + ) -> ( + VertexBuffer, + index::NoIndices, + index::IndexBuffer, + ) { + let mut shape = vec![]; + let n_vertices_corner = 8; + + let corner_radius = CORNER_RADIUS; + let width = UNIT_WIDTH; + let height = UNIT_HEIGHT; + + let corners = [ + ( + width / 2.0 - corner_radius, + height / 2.0 - corner_radius, + 0.0, + ), + ( + -width / 2.0 + corner_radius, + height / 2.0 - corner_radius, + PI * 0.5, + ), + ( + -width / 2.0 + corner_radius, + -height / 2.0 + corner_radius, + PI, + ), + ( + width / 2.0 - corner_radius, + -height / 2.0 + corner_radius, + PI * 1.5, + ), + ]; + + shape.push(Vertex { + position: [0.0, 0.0], + }); + for (x, y, phase) in corners { + for i in 0..n_vertices_corner { + let ang = 0.5 * PI * (i as f32) / n_vertices_corner as f32 + phase; + shape.push(Vertex { + position: [corner_radius * ang.cos() + x, corner_radius * ang.sin() + y], + }); + } + } + shape.push(shape[1]); + + ( + VertexBuffer::new(display, &shape).unwrap(), + index::NoIndices(index::PrimitiveType::TriangleFan), + index::IndexBuffer::new( + display, + index::PrimitiveType::LineLoop, + &(1..(shape.len() - 1) as u32).collect::>(), + ) + .unwrap(), + ) + } + + /// Draws the graph. + pub fn draw(&mut self, display: &Display, graph: &Graph, cursor_x: f32, cursor_y: f32) { + let mut target = display.draw(); + + let (size_x, size_y) = target.get_dimensions(); + self.window_width = size_x as f32; + + let (cursor_x, cursor_y) = self.convert_cursor(cursor_x, cursor_y, size_x, size_y); + + let aspect = (size_y as f32) / (size_x as f32); + + let height = self.width * aspect; + + let max_graph_height = (self.center.y + height / 2.0 + 1.0) as usize; + let min_graph_height = (self.center.y - height / 2.0 - 1.0).max(0.0) as usize; + + let max_validator_index = (self.center.x + self.width / 2.0 + 1.0) as usize; + let min_validator_index = (self.center.x - self.width / 2.0 - 1.0).max(0.0) as usize; + + target.clear_color(0.0, 0.0, 0.2, 1.0); + + let matrix = Matrix::translation(-self.center.x, -self.center.y) + * Matrix::scale(2.0 / self.width, 2.0 / height); + + let mut edges_to_draw = HashSet::new(); + let mut highlighted_edges_to_draw = HashSet::new(); + + for unit in graph.iter_range( + min_validator_index..=max_validator_index, + min_graph_height..=max_graph_height, + ) { + let set_to_insert = if Self::unit_contains_cursor(unit, cursor_x, cursor_y) { + &mut highlighted_edges_to_draw + } else { + &mut edges_to_draw + }; + for cited_unit in &unit.cited_units { + set_to_insert.insert((unit.id, *cited_unit)); + } + for dependent_unit in graph.reverse_edges.get(&unit.id).into_iter().flatten() { + set_to_insert.insert((*dependent_unit, unit.id)); + } + } + + // draw edges first, so that the units are drawn over them + if self.edges_enabled { + self.draw_edges(display, &mut target, &matrix, graph, edges_to_draw, false); + } + self.draw_edges( + display, + &mut target, + &matrix, + graph, + highlighted_edges_to_draw, + true, + ); + + for unit in graph.iter_range( + min_validator_index..=max_validator_index, + min_graph_height..=max_graph_height, + ) { + self.draw_unit(&mut target, unit, graph.validator_weights(), &matrix); + } + + target.finish().unwrap(); + } + + /// Converts the cursor coordinates in pixels into the scene coordinates. + fn convert_cursor(&self, cursor_x: f32, cursor_y: f32, size_x: u32, size_y: u32) -> (f32, f32) { + let size_x = size_x as f32; + let size_y = size_y as f32; + let delta_x = (cursor_x / size_x - 0.5) * self.width; + let delta_y = (0.5 - cursor_y / size_y) * self.width * size_y / size_x; + (self.center.x + delta_x, self.center.y + delta_y) + } + + /// Checks whether the cursor hovers over a unit. + fn unit_contains_cursor(unit: &GraphUnit, cursor_x: f32, cursor_y: f32) -> bool { + let (unit_x, unit_y) = Self::unit_pos(unit); + (unit_x - cursor_x).abs() < UNIT_WIDTH / 2.0 + && (unit_y - cursor_y).abs() < UNIT_HEIGHT / 2.0 + } + + /// Draws a unit. + fn draw_unit( + &mut self, + target: &mut Frame, + unit: &GraphUnit, + weights: &ValidatorMap, + view: &Matrix, + ) { + let (x, y) = Self::unit_pos(unit); + + let matrix2 = Matrix::translation(x, y) * *view; + + let color = match (unit.is_proposal, unit.max_quorum.as_ref()) { + (false, Some(quorum)) => { + if quorum.max_rank <= 1 { + Self::quorum_color_spectrum(0.0) + } else { + let frac = quorum.rank as f32 / (quorum.max_rank - 1) as f32; + Self::quorum_color_spectrum(frac) + } + } + (true, _) => [0.0_f32, 0.5, 0.5], + _ => [0.0_f32, 0.0, 0.2], + }; + + let uniforms = uniform! { + matrix: matrix2.inner(), + color: color, + }; + + target + .draw( + &self.unit_vertex_buffer, + self.interior_indices, + &self.program, + &uniforms, + &Default::default(), + ) + .unwrap(); + + let uniforms = uniform! { + matrix: matrix2.inner(), + color: [ 1.0_f32, 1.0, 0.0 ], + }; + + let draw_params = DrawParameters { + line_width: Some(LINE_WIDTH), + ..Default::default() + }; + + target + .draw( + &self.unit_vertex_buffer, + &self.frame_indices, + &self.program, + &uniforms, + &draw_params, + ) + .unwrap(); + + if self.width < 10.0 { + let text1 = format!("{:?}", unit.id); + let text2 = format!( + "Creator weight: {:3.1}%", + weights.get(unit.creator).unwrap() + ); + let text3 = format!("Vote: {:?}", unit.vote); + let text4 = format!("round_exp: {}", unit.round_exp); + let text5 = format!("round_id: {}", unit.round_id); + let text6 = format!("timestamp: {} (round {})", unit.timestamp, unit.round_num); + let text7 = if let Some(quorum) = unit.max_quorum.as_ref() { + format!("max quorum: {:3.1}%", quorum.weight_percent) + } else { + "".to_string() + }; + self.draw_text(target, -0.4, 0.7, &text1, 1.3, &matrix2); + self.draw_text(target, -0.8, 0.46, &text2, 0.8, &matrix2); + self.draw_text(target, -0.8, 0.22, &text3, 0.8, &matrix2); + self.draw_text(target, -0.8, -0.02, &text4, 0.8, &matrix2); + self.draw_text(target, -0.8, -0.26, &text5, 0.8, &matrix2); + self.draw_text(target, -0.8, -0.5, &text6, 0.8, &matrix2); + self.draw_text(target, -0.8, -0.74, &text7, 0.8, &matrix2); + } else { + let text = format!("{:?}", unit.id); + self.draw_text(target, -0.4, -0.15, &text, 3.0, &matrix2); + } + } + + /// Renders a string. + fn draw_text( + &self, + target: &mut Frame, + x: f32, + y: f32, + text: &str, + scale: f32, + matrix: &Matrix, + ) { + let basic_scale = UNIT_HEIGHT / 12.0; + let scale = basic_scale * scale; + let matrix = Matrix::scale(scale, scale) + * Matrix::translation(x * UNIT_WIDTH / 2.0, y * UNIT_HEIGHT / 2.0) + * *matrix; + let text = TextDisplay::new(&self.text_system, &self.font, text); + + glium_text_rusttype::draw( + &text, + &self.text_system, + target, + matrix.inner(), + (1.0, 1.0, 1.0, 1.0), + ) + .unwrap(); + } + + /// Draws the edges between units. + fn draw_edges( + &mut self, + display: &Display, + target: &mut Frame, + view: &Matrix, + graph: &Graph, + edges: HashSet<(UnitId, UnitId)>, + highlight: bool, + ) { + let mut vertices = vec![]; + + for (unit1, unit2) in edges { + let pos1 = Self::unit_pos(graph.get(&unit1).unwrap()); + let pos2 = Self::unit_pos(graph.get(&unit2).unwrap()); + + vertices.push(Vertex { + position: [pos1.0, pos1.1], + }); + vertices.push(Vertex { + position: [pos2.0, pos2.1], + }); + } + + let vertex_buffer = VertexBuffer::new(display, &vertices).unwrap(); + let indices = index::NoIndices(index::PrimitiveType::LinesList); + + let color = if highlight { + [1.0_f32, 1.0, 1.0] + } else { + [1.0_f32, 1.0, 0.0] + }; + + let uniforms = uniform! { + matrix: view.inner(), + color: color, + }; + + let draw_parameters = DrawParameters { + line_width: Some(if highlight { + LINE_WIDTH * 2.0 + } else { + LINE_WIDTH + }), + ..Default::default() + }; + + target + .draw( + &vertex_buffer, + indices, + &self.program, + &uniforms, + &draw_parameters, + ) + .unwrap(); + } + + /// Returns the position of the units in scene coordinates. + fn unit_pos(unit: &GraphUnit) -> (f32, f32) { + let x = unit.creator.0 as f32; + let y = unit.graph_height as f32; + (x, y) + } + + /// Handles a mouse scroll event (zooms in or out). + pub fn mouse_scroll(&mut self, lines: f32) { + self.width *= 2.0_f32.powf(lines / 3.0); + } + + /// Handles a dragging event (pans the screen). + pub fn pan(&mut self, delta_x: f32, delta_y: f32) { + let scale = self.width / self.window_width; + self.center += Vector2::new(-delta_x * scale, delta_y * scale); + } + + pub fn toggle_edges(&mut self) { + self.edges_enabled = !self.edges_enabled; + } + + /// Returns a color for the max quorum based on its rank. + fn quorum_color_spectrum(frac: f32) -> [f32; 3] { + let r = if frac < 0.5 { frac } else { 1.0 }; + let g = if frac < 0.5 { 1.0 } else { 1.0 - frac }; + [r * 0.5, g * 0.5, 0.0] + } +} diff --git a/utils/highway-state-grapher/src/renderer/matrix.rs b/utils/highway-state-grapher/src/renderer/matrix.rs new file mode 100644 index 0000000000..ed2575869e --- /dev/null +++ b/utils/highway-state-grapher/src/renderer/matrix.rs @@ -0,0 +1,84 @@ +use std::ops; + +#[derive(Clone, Copy)] +pub struct Matrix { + coords: [[f32; 4]; 4], +} + +impl Matrix { + pub fn identity() -> Matrix { + Matrix { + coords: [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + } + } + + pub fn inner(self) -> [[f32; 4]; 4] { + self.coords + } + + pub fn translation(x: f32, y: f32) -> Matrix { + let mut result = Matrix::identity(); + result.coords[3][0] = x; + result.coords[3][1] = y; + result + } + + pub fn scale(x: f32, y: f32) -> Matrix { + Matrix { + coords: [ + [x, 0.0, 0.0, 0.0], + [0.0, y, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + } + } +} + +impl ops::Add for Matrix { + type Output = Matrix; + + fn add(mut self, other: Matrix) -> Matrix { + for i in 0..4 { + for j in 0..4 { + self.coords[i][j] += other.coords[i][j]; + } + } + self + } +} + +impl ops::Sub for Matrix { + type Output = Matrix; + + fn sub(mut self, other: Matrix) -> Matrix { + for i in 0..4 { + for j in 0..4 { + self.coords[i][j] -= other.coords[i][j]; + } + } + self + } +} + +impl ops::Mul for Matrix { + type Output = Matrix; + + #[allow(clippy::needless_range_loop)] + fn mul(self, other: Matrix) -> Matrix { + let mut new_coords = [[0.0; 4]; 4]; + for i in 0..4 { + for j in 0..4 { + for k in 0..4 { + new_coords[i][j] += self.coords[i][k] * other.coords[k][j]; + } + } + } + Matrix { coords: new_coords } + } +} diff --git a/utils/nctl-metrics/.gitignore b/utils/nctl-metrics/.gitignore deleted file mode 100644 index b4ccb91789..0000000000 --- a/utils/nctl-metrics/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -prometheus.yml -supervisord.sock -supervisord.log -memory-stats-collector.log -prometheus.log diff --git a/utils/nctl-metrics/README.md b/utils/nctl-metrics/README.md deleted file mode 100644 index b9cca3356c..0000000000 --- a/utils/nctl-metrics/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# Metrics for nctl - -A small setup that runs enough containers to get metrics working when using nctl. - -## How to run - -1. Ensure nctl has generated assets (`nctl-assets-setup`). -2. Run `supervisord -c utils/nctl-metrics/supervisord.conf`. -3. Navigate to and watch metrics. - -## Architecture - -The directory contains - -* a python script that will scrape memory metrics from the OS and make them available via HTTP for prometheus, -* a generator for a prometheus configuration file based on current nctl assets (only `net-1` supported), and -* a supervisord configuration to run the generator and prometheus conveniently. - -## Metrics offered - -In addition to the usual node metrics, the following metrics are available: - -* `os_mem_rss_bytes` -* `os_mem_vms_bytes` -* `os_mem_shared_bytes` -* `os_mem_text_bytes` -* `os_mem_lib_bytes` -* `os_mem_data_bytes` -* `os_mem_dirty_bytes` - -Each has a `node` label indicating which node's memory usage is shown. - -## Common Issues - -* Why am I not getting any memory metrics? - -Check the logs `memory-stats-collector.log`. If there are messages stating `AF_UNIX path too long`, the root path of your `casper-node/utils/nctl/assets/...`, which contains the `supervisord.sock` directory is too long. - -* Why is podman complaining about `policy.json`? - -You can either read up on how `policy.json` works or throw caution to the wind (like Docker) and place the following into `~/.config/containers/policy.json`: - -```json -{ - "default": [ { "type": "insecureAcceptAnything" } ], - "transports": { - "docker-daemon": { "": [{"type":"insecureAcceptAnything"}] } - } -} -``` diff --git a/utils/nctl-metrics/gen_prometheus_config.py b/utils/nctl-metrics/gen_prometheus_config.py deleted file mode 100755 index 16cc031e92..0000000000 --- a/utils/nctl-metrics/gen_prometheus_config.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python - -import os -import sys - -import toml - -net_name = "net-1" -nodes_dir = os.path.join( - os.path.dirname(__file__), "..", "nctl", "assets", net_name, "nodes" -) - -# We start with the `mem_export` service. -addrs = ["127.0.0.1:8000"] - -for node_dir in os.listdir(nodes_dir): - node_path = os.path.join(nodes_dir, node_dir) - if not os.path.isdir(node_path) or not node_dir.startswith("node-"): - continue - - cfg_path = os.path.join(node_path, "config", "1_0_0", "config.toml") - try: - config = toml.load(open(cfg_path)) - addr = config["rest_server"]["address"].replace("0.0.0.0", "127.0.0.1") - addrs.append(addr) - except Exception as e: - sys.stderr.write("error loading {}\n".format(cfg_path)) - continue - - -# Slightly dirty, we're not dealing with an extra dependency to generate YAML just yet and just -# abuse that pythons list display rendering is valid yaml. -cfg = """scrape_configs: - - job_name: nctl_scrape - scrape_interval: 5s - static_configs: - - targets: {} -""".format( - addrs -) - -print(cfg) diff --git a/utils/nctl-metrics/mem_export.py b/utils/nctl-metrics/mem_export.py deleted file mode 100755 index 6cb125810d..0000000000 --- a/utils/nctl-metrics/mem_export.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python - -#: A small script that makes memory usage of nctl nodes available to prometheus. - -# Requirements: `prometheus_client`, `psutil` - -from http.client import HTTPConnection -import os -import socket -import sys -import time -from xmlrpc import client - -from prometheus_client import start_http_server, Summary, Gauge -import psutil - - -class UnixStreamHTTPConnection(HTTPConnection): - def connect(self): - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.sock.connect(self.host) - - -class UnixStreamTransport(client.Transport, object): - def __init__(self, socket_path): - self.socket_path = socket_path - super(UnixStreamTransport, self).__init__() - - def make_connection(self, host): - return UnixStreamHTTPConnection(self.socket_path) - - -def main(): - net_name = "net-1" - - # Workaround letting us use symlink paths to shorten socket names. Otherwise symlinks will be - # resolved by Python's cwd functions (which call libc internally) to resolve `.`. - cwd = os.popen("pwd -L").read().strip() - - sock_addr = os.path.abspath( - os.path.join( - cwd, - "..", - "nctl", - "assets", - net_name, - "daemon", - "socket", - "supervisord.sock", - ) - ) - delay = 1 - - gauges = { - "rss": Gauge("os_mem_rss_bytes", "Resident Set Size", ["node"]), - "vms": Gauge("os_mem_vms_bytes", "Virtual Memory Size", ["node"]), - "shared": Gauge("os_mem_shared_bytes", "Shared memory size", ["node"]), - "text": Gauge("os_mem_text_bytes", "Text memory size", ["node"]), - "lib": Gauge("os_mem_lib_bytes", "Lib memory size", ["node"]), - "data": Gauge("os_mem_data_bytes", "Data memory size", ["node"]), - "dirty": Gauge("os_mem_dirty_bytes", "Dirty memory size", ["node"]), - } - - start_http_server(8000) - - while True: - print("Retrieving data for from {}".format(sock_addr)) - - try: - proxy = client.ServerProxy( - "http://localhost", transport=UnixStreamTransport(sock_addr) - ) - - all_proc_info = proxy.supervisor.getAllProcessInfo() - - for info in all_proc_info: - name = info["name"] - - # Only interested in casper nodes. - if not name.startswith("casper-net"): - continue - - # PID 0 means the process is not running. - pid = info["pid"] - if pid == 0: - continue - - try: - proc = psutil.Process(info["pid"]) - mem_info = proc.memory_info() - print("{}: {}".format(name, mem_info)) - - for key in gauges.keys(): - gauges[key].labels(node=name).set(getattr(mem_info, key)) - except Exception as e: - print("failed to get process info for {}: {}".format(name, e)) - except Exception as e: - print("failed: {}".format(e)) - - time.sleep(delay) - - -if __name__ == "__main__": - main() diff --git a/utils/nctl-metrics/prometheus.sh b/utils/nctl-metrics/prometheus.sh deleted file mode 100755 index 5196ac7cd7..0000000000 --- a/utils/nctl-metrics/prometheus.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -#: Run a prometheus instance that collects metrics from a local nctl network. - -cd $(dirname $0) - -PROMETHEUS_TAG=prom/prometheus - -echo "Genarating config." -./gen_prometheus_config.py > prometheus.yml - -echo "Starting prometheus." -exec docker run \ - -i \ - --rm \ - --net=host \ - -p 9090:9090 \ - -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \ - ${PROMETHEUS_TAG} diff --git a/utils/nctl-metrics/supervisord.conf b/utils/nctl-metrics/supervisord.conf deleted file mode 100644 index 606c2973c8..0000000000 --- a/utils/nctl-metrics/supervisord.conf +++ /dev/null @@ -1,24 +0,0 @@ -[supervisord] -nodaemon = True - -[supervisorctl] -serverurl = unix://%(here)s/supervisord.sock - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[unix_http_server] -file=%(here)s/supervisord.sock - -[program:memory-stats-collector] -directory=%(here)s -command=%(here)s/mem_export.py -stdout_logfile=%(here)s/memory-stats-collector.log -redirect_stderr=True -environment=PYTHONUNBUFFERED=1 - -[program:prometheus-container] -directory=%(here)s -command=%(here)s/prometheus.sh -stdout_logfile=%(here)s/prometheus.log -redirect_stderr=True diff --git a/utils/nctl/README.md b/utils/nctl/README.md deleted file mode 100644 index 9e166ce30c..0000000000 --- a/utils/nctl/README.md +++ /dev/null @@ -1,46 +0,0 @@ -nctl -=============== - -CLI application to setup & control multiple local Casper networks. - -What is nctl ? --------------------------------------- - -nctl stands for **n**[etwork|ode] **c**on**t**ro**l**. Its goal is to simplify localised control of a test Casper network. - -Why nctl ? --------------------------------------- - -Many developers & community users will wish to spin up relatively small test networks in a localised setting. Adopting a standardised approach is thus helpful. - -Who uses nctl ? --------------------------------------- - -CSPR network community. This encompasses developers, validators, evaluators ... etc. - - -Requirements --------------------------------------- - - python3 + pip - - make - - cargo - - bash - - jq (bash utility library for parsing json) - - -Plus the requirements to build [casper-node](https://github.com/CasperLabs/casper-node#pre-requisites-for-building) - -Setup --------------------------------------- - -See [here](docs/setup.md) for setup details. - -Commands --------------------------------------- - -See [here](docs/commands.md) for command details. - -Usage --------------------------------------- - -See [here](docs/usage.md) for usage details. diff --git a/utils/nctl/activate b/utils/nctl/activate deleted file mode 100644 index d910ca883c..0000000000 --- a/utils/nctl/activate +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env bash - -# ############################################################### -# VARS -# ############################################################### - -# Set here. -export NCTL="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# Set casper-node root. -NCTL_CASPER_HOME="$( cd "$( dirname "${NCTL[0]}" )" && pwd )" -export NCTL_CASPER_HOME="$( cd "$( dirname "${NCTL_CASPER_HOME[0]}" )" && pwd )" - -# Set casper-node-launcher root. -NCTL_CASPER_NODE_LAUNCHER_ROOT=${NCTL_CASPER_NODE_LAUNCHER_ROOT:-$( cd "$( dirname "${NCTL_CASPER_HOME[0]}" )" && pwd )} -export NCTL_CASPER_NODE_LAUNCHER_HOME="$NCTL_CASPER_NODE_LAUNCHER_ROOT"/casper-node-launcher - -# ############################################################### -# UTILS -# ############################################################### - -source "$NCTL"/sh/utils/main.sh - -# ############################################################### -# ALIASES -# ############################################################### - -# Assets. -alias nctl-assets-dump='source $NCTL/sh/assets/dump.sh' -alias nctl-assets-ls='source $NCTL/sh/assets/list.sh' -alias nctl-assets-setup='source $NCTL/sh/assets/setup.sh' -alias nctl-assets-teardown='source $NCTL/sh/assets/teardown.sh' - -# Binaries. -alias nctl-compile='source $NCTL/sh/assets/compile.sh' -alias nctl-compile-client='source $NCTL/sh/assets/compile_client.sh' -alias nctl-compile-node='source $NCTL/sh/assets/compile_node.sh' -alias nctl-compile-node-launcher='source $NCTL/sh/assets/compile_node_launcher.sh' - -# Node control. -alias nctl-clean='source $NCTL/sh/node/clean.sh' -alias nctl-clean-logs='source $NCTL/sh/node/clean_logs.sh' -alias nctl-interactive='source $NCTL/sh/node/interactive.sh' -alias nctl-join='source $NCTL/sh/node/join.sh' -alias nctl-leave='source $NCTL/sh/node/leave.sh' -alias nctl-ports='lsof -i tcp | grep casper-no | grep LISTEN | sort' -alias nctl-processes='ps -aux | grep "$NCTL" | less' -alias nctl-restart='source $NCTL/sh/node/restart.sh' -alias nctl-rotate='source $NCTL/sh/misc/rotate_nodeset.sh' -alias nctl-start='source $NCTL/sh/node/start.sh' -alias nctl-start-after-n-blocks='source $NCTL/sh/node/start_after_n_blocks.sh' -alias nctl-start-after-n-eras='source $NCTL/sh/node/start_after_n_eras.sh' -alias nctl-status='source $NCTL/sh/node/status.sh' -alias nctl-stop='source $NCTL/sh/node/stop.sh' -alias nctl-upgrade-protocol='source $NCTL/sh/node/upgrade.sh' - -# Blocking commands. -alias nctl-await-n-blocks='source $NCTL/sh/misc/await_n_blocks.sh' -alias nctl-await-n-eras='source $NCTL/sh/misc/await_n_eras.sh' -alias nctl-await-until-block-n='source $NCTL/sh/misc/await_until_block_n.sh' -alias nctl-await-until-era-n='source $NCTL/sh/misc/await_until_era_n.sh' - -# Views #1: chain. -alias nctl-view-chain-account='source $NCTL/sh/views/view_chain_account.sh' -alias nctl-view-chain-auction-info='source $NCTL/sh/views/view_chain_auction_info.sh' -alias nctl-view-chain-balance='source $NCTL/sh/views/view_chain_balance.sh' -alias nctl-view-chain-balances='source $NCTL/sh/views/view_chain_balances.sh' -alias nctl-view-chain-block='source $NCTL/sh/views/view_chain_block.sh' -alias nctl-view-chain-block-transfers='source $NCTL/sh/views/view_chain_block_transfers.sh' -alias nctl-view-chain-deploy='source $NCTL/sh/views/view_chain_deploy.sh' -alias nctl-view-chain-era='source $NCTL/sh/views/view_chain_era.sh' -alias nctl-view-chain-era-info='source $NCTL/sh/views/view_chain_era_info.sh' -alias nctl-view-chain-height='source $NCTL/sh/views/view_chain_height.sh' -alias nctl-view-chain-state-root-hash='source $NCTL/sh/views/view_chain_state_root_hash.sh' -alias nctl-view-chain-lfb='source $NCTL/sh/views/view_chain_lfb.sh' -alias nctl-view-chain-spec='source $NCTL/sh/views/view_chain_spec.sh' -alias nctl-view-chain-spec-accounts='source $NCTL/sh/views/view_chain_spec_accounts.sh' - -# Views #2: node. -alias nctl-view-node-config='source $NCTL/sh/views/view_node_config.sh' -alias nctl-view-node-error-log='source $NCTL/sh/views/view_node_log_stderr.sh' -alias nctl-view-node-log='source $NCTL/sh/views/view_node_log_stdout.sh' -alias nctl-view-node-peers='source $NCTL/sh/views/view_node_peers.sh' -alias nctl-view-node-peer-count='source $NCTL/sh/views/view_node_peer_count.sh' -alias nctl-view-node-ports='source $NCTL/sh/views/view_node_ports.sh' -alias nctl-view-node-rpc-endpoint='source $NCTL/sh/views/view_node_rpc_endpoint.sh' -alias nctl-view-node-rpc-schema='source $NCTL/sh/views/view_node_rpc_schema.sh' -alias nctl-view-node-status='source $NCTL/sh/views/view_node_status.sh' -alias nctl-view-node-storage='source $NCTL/sh/views/view_node_storage.sh' -alias nctl-view-node-storage-consensus='source $NCTL/sh/views/view_node_storage_consensus.sh' - -# Views #3: node metrics. -alias nctl-view-node-metrics='source $NCTL/sh/views/view_node_metrics.sh' -alias nctl-view-node-pending-deploy-count='source $NCTL/sh/views/view_node_metrics.sh metric=pending_deploy' -alias nctl-view-node-finalised-block-count='source $NCTL/sh/views/view_node_metrics.sh metric=amount_of_blocks' -alias nctl-view-node-finalisation-time='source $NCTL/sh/views/view_node_metrics.sh metric=finalization_time' - -# Views #4: faucet. -alias nctl-view-faucet-account='source $NCTL/sh/views/view_faucet_account.sh' - -# Views #5: user. -alias nctl-view-user-account='source $NCTL/sh/views/view_user_account.sh' - -# Views #6: validator. -alias nctl-view-validator-account='source $NCTL/sh/views/view_validator_account.sh' - -# Contracts #1: Transfers. -alias nctl-transfer='source $NCTL/sh/contracts-transfers/do_dispatch_native.sh' -alias nctl-transfer-native='source $NCTL/sh/contracts-transfers/do_dispatch_native.sh' -alias nctl-transfer-native-batch-dispatch='source $NCTL/sh/contracts-transfers/do_dispatch_native_batch.sh' -alias nctl-transfer-native-batch-prepare='source $NCTL/sh/contracts-transfers/do_prepare_native_batch.sh' -alias nctl-transfer-wasm='source $NCTL/sh/contracts-transfers/do_dispatch_wasm.sh' -alias nctl-transfer-wasm-batch-dispatch='source $NCTL/sh/contracts-transfers/do_dispatch_wasm_batch.sh' -alias nctl-transfer-wasm-batch-prepare='source $NCTL/sh/contracts-transfers/do_prepare_wasm_batch.sh' - -# Contracts #2: Auction. -alias nctl-auction-activate='source $NCTL/sh/contracts-auction/do_bid_activate.sh' -alias nctl-auction-bid='source $NCTL/sh/contracts-auction/do_bid.sh' -alias nctl-auction-withdraw='source $NCTL/sh/contracts-auction/do_bid_withdraw.sh' -alias nctl-auction-delegate='source $NCTL/sh/contracts-auction/do_delegate.sh' -alias nctl-auction-undelegate='source $NCTL/sh/contracts-auction/do_delegate_withdraw.sh' - -# Contracts #3: ERC-20. -alias nctl-erc20-approve='source $NCTL/sh/contracts-erc20/do_approve.sh' -alias nctl-erc20-install='source $NCTL/sh/contracts-erc20/do_install.sh' -alias nctl-erc20-fund-users='source $NCTL/sh/contracts-erc20/do_fund_users.sh' -alias nctl-erc20-transfer='source $NCTL/sh/contracts-erc20/do_transfer.sh' -alias nctl-erc20-view-allowances='source $NCTL/sh/contracts-erc20/view_allowances.sh' -alias nctl-erc20-view-details='source $NCTL/sh/contracts-erc20/view_details.sh' -alias nctl-erc20-view-balances='source $NCTL/sh/contracts-erc20/view_balances.sh' - -# Contracts #4: KV storage. -alias nctl-kv-storage-get-key='source $NCTL/sh/contracts-kv/get_key.sh' -alias nctl-kv-storage-install='source $NCTL/sh/contracts-kv/do_install.sh' -alias nctl-kv-storage-set-key='source $NCTL/sh/contracts-kv/set_key.sh' diff --git a/utils/nctl/docs/commands-ctl.md b/utils/nctl/docs/commands-ctl.md deleted file mode 100644 index 7b93432548..0000000000 --- a/utils/nctl/docs/commands-ctl.md +++ /dev/null @@ -1,141 +0,0 @@ -# NCTL Control Commands - -## Overview - -The aim of NCTL is to enable a user to spin up a test network within 15-20 seconds. Once a network is up & running the user can control each node as well adding new nodes to the network. Hereby are listed the set of NCTL commands to control a test network. - -### nctl-clean node={X:-all} - -Stops node X (if running) & deletes node logs & storage. - -``` -nctl-clean - -nctl-clean node=all (same as above) - -nctl-clean node=3 -``` - -### nctl-clean-logs node={X:-all} - -Deletes node logs. - -``` -nctl-clean-logs - -nctl-clean-logs node=all (same as above) - -nctl-clean-logs node=3 -``` - -### nctl-interactive node={X:-1} loglevel={Y:-($RUST_LOG | debug)} - -Starts (in interactive mode) node X with logging level set to Y. - -``` -nctl-interactive - -nctl-interactive node=1 (same as above) - -nctl-interactive node=3 -``` - -### nctl-join node={X:-6} amount={Y:-1000000} rate={Z:-125} - -Attempts to join node X to test network by submitting an auction bid of Y with a delegation rate of Z. This command will await 3 eras after the auction bid has been submitted before starting node with a trusted hash. - -``` -nctl-join - -nctl-join node=6 amount=1000000 rate=125 (same as above) - -nctl-join node=8 amount=500000 rate=250 -``` - -### nctl-leave node={X:-6} amount={Y:-1000000} - -Attempts to detach node X from test network by submitting an auction withdrawal Y. This command awaits 1 era before stopping the node. - -``` -nctl-leave - -nctl-leave node=6 amount=1000000 (same as above) - -nctl-leave node=8 amount=500000 -``` - -### nctl-restart node={X:-all} clean={Y:-true} - -Restarts node Y, if Y=all then all nodes are restarted. Node storage state and logs are cleared by default. - -``` -nctl-restart - -nctl-restart node=all clean=true (same as above) - -nctl-restart node=3 clean=false -``` - -### nctl-rotate - -Attempts to rotate a network's validator set - used to verify joining/leaving scenarios. - -``` -nctl-rotate -``` - -### nctl-start node={X:-all} hash=Y loglevel={Z:-($RUST_LOG | debug)} - -Starts node X, if X=all then all nodes are started. If trusted hash is specified then it is injected into node's config prior to spin-up. - -``` -nctl-start - -nctl-start node=all loglevel=debug (same as above) - -nctl-start node=3 hash= loglevel=info -``` - -### nctl-start-after-n-blocks node={W:-all} offset={X:-1} hash=Y loglevel={Z:-($RUST_LOG | debug)} - -Starts node W after chain has advanced by X blocks. If W=all then all nodes are started. If hash is specified then it is injected into node's config prior to spin-up. - -``` -nctl-start-after-n-blocks - -nctl-start-after-n-blocks node=all (same as above) - -nctl-start-after-n-blocks node=6 offset=4 hash= -``` - -### nctl-start-after-n-eras node={W:-all} offset={X:-1} hash=Y loglevel={Z:-($RUST_LOG | debug)} - -Starts node W after chain has advanced by X eras. If W=all then all nodes are started. If hash is specified then it is injected into node's config prior to spin-up. - -``` -nctl-start-after-n-eras - -nctl-start-after-n-eras node=all offset=1 (same as above) - -nctl-start-after-n-eras node=8 offset=4 hash= -``` - -### nctl-status - -Displays process status of all nodes. - -``` -nctl-status -``` - -### nctl-stop node={Y:-all} - -Stops node Y, if Y=all then all nodes are stopped. - -``` -nctl-stop - -nctl-stop node=all (same as above) - -nctl-stop node=3 -``` diff --git a/utils/nctl/docs/commands-deploy-auction.md b/utils/nctl/docs/commands-deploy-auction.md deleted file mode 100644 index 7e8dab1ac4..0000000000 --- a/utils/nctl/docs/commands-deploy-auction.md +++ /dev/null @@ -1,49 +0,0 @@ -# NCTL Deploy Commands - Proof Of Stake Auction - -### nctl-auction-bid node={X:-6} amount={Y:-1000000} rate={Z:-125} - -Dispatches on behalf of validator X, a Proof-Of-Stake auction bid **submission** deploy for amount Y (motes) with a delegation rate of Z. Displays relevant deploy hash for subsequent querying. - -``` -nctl-auction-bid - -nctl-auction-bid node=6 amount=6000000000000000 rate=125 (same as above) - -nctl-auction-bid node=7 amount=7000000000000000 rate=250 -``` - -### nctl-auction-withdraw node={X:-6} amount={Y:-1000000} - -Dispatches on behalf of validator X, a Proof-Of-Stake auction bid **withdrawal** deploy for amount Y (motes). Displays relevant deploy hash for subsequent querying. - -``` -nctl-auction-withdraw - -nctl-auction-withdraw node=6 amount=6000000000000000 (same as above) - -nctl-auction-withdraw node=7 amount=7000000000000000 -``` - -### nctl-auction-delegate amount={X:-1000000} delegator={Y:-1} validator={Z:-1} - -Dispatches on behalf of user Y, a Proof-Of-Stake **delegate** bid for amount X (motes) nominating validator Y. Displays relevant deploy hash for subsequent querying. - -``` -nctl-auction-delegate - -nctl-auction-delegate amount=1000000 delegator=1 validator=1 (same as above) - -nctl-auction-delegate amount=2000000 delegator=3 validator=4 -``` - -### nctl-auction-undelegate amount={X:-1000000} delegator={Y:-1} validator={Z:-1} - -Dispatches on behalf of user Y, a Proof-Of-Stake **undelegate** bid for amount X (motes) un-nominating validator Y. Displays relevant deploy hash for subsequent querying. - -``` -nctl-auction-undelegate - -nctl-auction-undelegate amount=1000000 delegator=1 validator=1 (same as above) - -nctl-auction-undelegate amount=2000000 delegator=3 validator=4 -``` diff --git a/utils/nctl/docs/commands-deploy-erc20.md b/utils/nctl/docs/commands-deploy-erc20.md deleted file mode 100644 index 444f6deeea..0000000000 --- a/utils/nctl/docs/commands-deploy-erc20.md +++ /dev/null @@ -1,75 +0,0 @@ -# NCTL Deploy Commands - ERC-20 - -### nctl-erc20-approve owner={X:-1} spender={Y:-1} amount={Z:-1000000000} - -Allows a user (the spender) to withdraw upto Z tokens from the account of a token holder (the owner). - -``` -nctl-erc20-approve - -nctl-erc20-approve owner=1 spender=2 amount=1000000000 (same as above) - -nctl-erc20-approve owner=2 spender=4 amount=4400000000 (same as above) -``` - -### nctl-erc20-fund-users amount={X:-1000000000} - -Transfers from the installed ERC-20 smart contract X tokens to each of the test user accounts. - -``` -nctl-erc20-fund-users - -nctl-erc20-fund-users amount=1000000000 (same as above) - -nctl-erc20-fund-users amount=4440000000 -``` - -### nctl-erc20-install name={X:-Acme Token} symbol={Y:-Acme} supply={Z:-1e33} - -Installs the ERC0-20 smart contract under the network's faucet account. - -NOTE : The compiled wasm file must have been previously moved to the test network's bin folder (e.g. `casper-node/utils/nctl/assets/net-1/bin`). For further information refer to the network's smart contract SDK. - -``` -nctl-erc20-install - -nctl-erc20-install name="Acme Token" symbol="ACME" supply=1000000000000000000000000000000000 (same as above) - -nctl-erc20-install name="Casper Labs" symbol="CSPR" supply=1000000000000000000000000000 -``` - -### nctl-erc20-transfer from={X:-1} to={Y:-1} amount={Z:-1000000000} - -Transfers Z tokens from user X to user Y. - -``` -nctl-erc20-transfer-from - -nctl-erc20-transfer-from from=1 to=2 amount=1000000000 (same as above) - -nctl-erc20-transfer-from from=2 to=5 amount=4440000000 -``` - -### nctl-erc20-view-balances net={X:-1} node={Y:-1} - -Renders ERC-20 token balances of the network's faucet and user accounts. - -``` -nctl-erc20-view-balances - -nctl-erc20-view-balances net=1 node=1 (same as above) - -nctl-erc20-view-balances net=2 node=4 -``` - -### nctl-erc20-view-details net={X:-1} node={Y:-1} - -Renders information related to the installed smart contract, e.g. contract hash. - -``` -nctl-erc20-view-details - -nctl-erc20-view-details net=1 node=1 (same as above) - -nctl-erc20-view-details net=2 node=4 -``` diff --git a/utils/nctl/docs/commands-deploy-transfers.md b/utils/nctl/docs/commands-deploy-transfers.md deleted file mode 100644 index 32488197e6..0000000000 --- a/utils/nctl/docs/commands-deploy-transfers.md +++ /dev/null @@ -1,75 +0,0 @@ -# NCTL Deploy Commands - Simple Transfers - -### nctl-transfer-native amount={A:-1000000000} transfers={T:-100} interval={I:-0.01} user={U:-1} node={N:-random} - -Dispatches T native transfers from network faucet to user U. If node=all then each transfer is dispatched to a node chosen JIT. If node=random then all transfers are dispatched to node chosen up-front. - -``` -nctl-transfer-native - -nctl-transfer-native net=1 node=1 payment=1000000000 gas=10 transfers=100 interval=0.01 user=1 (same as above) - -nctl-transfer-native transfers=10000 interval=0.001 -``` - -Note: has a synonym: `nctl-transfer` - -### nctl-transfer-native-batch-dispatch net={X:-1} node={Y:-1} interval={Z:-0.01} node={N:-random} - -Dispatches to node Y in network X previously prepared native transfers at an interval of Z seconds. - -``` -nctl-transfer-native-batch-dispatch - -nctl-transfer-native-batch-dispatch net=1 node=1 interval=0.01 (same as above) - -nctl-transfer-native-batch-dispatch net=1 node=3 interval=0.001 -``` - -### nctl-transfer-native-batch-prepare amount={A:-1000000000} count={C:-10} size={S:-10} - -Writes to file system C batches of signed native transfers, with S deploys per user per batch. - -``` -nctl-transfer-native-batch-prepare - -nctl-transfer-native-batch-prepare amount=1000000000 count=10 size=10 net=1 node=1 payment=1000000000 gas=10 (same as above) - -nctl-transfer-native-batch-prepare transfers=10000 interval=0.001 -``` - -### nctl-transfer-wasm amount={A:-1000000000} transfers={T:-100} interval={I:-0.01} user={U:-1} node={N:-random} - -Dispatches to node Y in network X, T wasm based transfers from network faucet to user U. If node=all then transfers are dispatched to nodes in a round-robin fashion. - -``` -nctl-transfer-wasm - -nctl-transfer-wasm net=1 node=1 payment=1000000000 gas=10 transfers=100 interval=0.01 user=1 (same as above) - -nctl-transfer-wasm transfers=10000 interval=0.001 -``` - -### nctl-transfer-wasm-batch-dispatch net={X:-1} node={Y:-1} interval={Z:-0.01} node={N:-random} - -Dispatches to node Y in network X previously prepared wasm transfers at an interval of Z seconds. - -``` -nctl-transfer-wasm-batch-dispatch - -nctl-transfer-wasm-batch-dispatch net=1 node=1 interval=0.01 (same as above) - -nctl-transfer-wasm-batch-dispatch net=1 node=3 interval=0.001 -``` - -### nctl-transfer-wasm-batch-prepare amount={A:-1000000000} count={C:-10} size={S:-10} - -Writes to file system C batches of signed wasm transfers, with S deploys per user per batch. - -``` -nctl-transfer-wasm-batch-prepare - -nctl-transfer-wasm-batch-prepare amount=1000000000 count=10 size=10 net=1 node=1 payment=1000000000 gas=10 (same as above) - -nctl-transfer-wasm-batch-prepare transfers=10000 interval=0.001 -``` diff --git a/utils/nctl/docs/commands-setup.md b/utils/nctl/docs/commands-setup.md deleted file mode 100644 index 1f82c548c0..0000000000 --- a/utils/nctl/docs/commands-setup.md +++ /dev/null @@ -1,67 +0,0 @@ -# NCTL Setup Commands - -## Overview - -The aim of NCTL is to enable a user to spin up a test network within 15-20 seconds. Once a network is up & running the user should be able to control each of the node's within the network as well as add new nodes to the network. Hereby are listed the set of NCTL commands to setup assets (binaries, config files, directories ... etc ) associated with a test network. - -## Compiling network binaries - -The NCTL library can be used to compile the node's binary set, i.e. node, client & smart contract binaries. Note that NCTL library does not immediately copy compiled binary sets into a test directory, that is done whilst setting up test assets (see `nctl-assets-setup` below). - -### nctl-compile - -Compiles casper node, node launcher, client + client contracts using `make` + `cargo`. - -### nctl-compile-node - -Compiles casper node using `make` + `cargo`. - -### nctl-compile-node-launcher - -Compiles casper node launcher using `cargo`. - -### nctl-compile-client - -Compiles casper client + client contracts using `make` + `cargo`. - -## Managing network assets - -### nctl-assets-ls - -List previously created network assets. - -### nctl-assets-setup net={V:-1} nodes={W:-5} delay={X:-30} accounts_path={Y:-""} chainspec_path={Z:-"casper-node/resources/local/chainspec.toml.in"} - -Sets up assets required to run a local network - this includes binaries, chainspec, config, faucet, keys ... etc. NCTL creates assets for 2 nodesets: genesis & non-genesis - this permits testing nodeset rotation scenarios (see `nctl-rotate`). Thus if nodes=5, then assets for 10 nodes are generated in total. - -If `accounts_path` points to a valid custom accounts.toml template file, then the template is copied, & parsed. The parsing process injects faucet, validator and user public keys into the copied template file. An example custom accounts.toml can be inspected [here](example-custom-accounts.toml). - -If `chainspec_path` points to a valid custom chainspec.toml, then the template is copied across to the test network asset set. - -``` -nctl-assets-setup - -nctl-assets-setup net=1 nodes=5 deelay=30 (same as above) - -nctl-assets-setup net=2 nodes=10 delay=60 -``` - -### nctl-assets-teardown net={X:-1} - -Stops network & destroys all related assets. - -``` -nctl-assets-teardown - -nctl-assets-teardown net=1 (same as above) - -nctl-assets-teardown net=2 -``` - -### nctl-assets-dump - -Dumps transient network assets such as logs + configuration. - -``` -nctl-assets-dump -``` diff --git a/utils/nctl/docs/commands-view-accounts.md b/utils/nctl/docs/commands-view-accounts.md deleted file mode 100644 index ca63288234..0000000000 --- a/utils/nctl/docs/commands-view-accounts.md +++ /dev/null @@ -1,43 +0,0 @@ -# NCTL Viewing Account Information - -## Overview - -As part of the process of setting up a network, NCTL creates various on-chain accounts. Such accounts relate either to node operators (i.e. validators), to a network level faucet, or to tests user accounts. Each account is initialised with a valid key pair and an on-chain CSPR balance. NCTL greatly simplifies account management in a test setting. - -## Viewing faucet information - -### nctl-view-faucet-account - -Displays faucet account information. - -``` -nctl-view-faucet-account -``` - -## Viewing user information - -### nctl-view-user-account user={X:-1} - -Displays on-chain user account information. - -``` -nctl-view-user-account - -nctl-view-user-account user=1 (same as above) - -nctl-view-user-account user=3 -``` - -## Viewing validator information - -### nctl-view-validator-account node={X:-1} - -Displays on-chain validator account information. - -``` -nctl-view-validator-account - -nctl-view-validator-account node=1 (same as above) - -nctl-view-validator-account node=4 -``` diff --git a/utils/nctl/docs/commands-view-chain.md b/utils/nctl/docs/commands-view-chain.md deleted file mode 100644 index 6fab468401..0000000000 --- a/utils/nctl/docs/commands-view-chain.md +++ /dev/null @@ -1,131 +0,0 @@ -# NCTL Viewing Chain Information - -### nctl-view-chain-account account-key=X root-hash={Y:-LATEST} - -Displays result of performing a state query by account key X at state root hash Y. If state root hash is undefined then it defaults to latest. - -``` -nctl-view-chain-account account-key=AN_ACCOUNT_KEY - -nctl-view-chain-account account-key=AN_ACCOUNT_KEY root-hash=A_STATE_ROOT_HASH -``` - -### nctl-view-chain-auction-info - -Displays Proof of Stake auction contract information. - -``` -nctl-view-chain-auction-info -``` - -### nctl-view-chain-balance purse-uref=X root-hash={Y:-LATEST} - -Displays balance of an account purse with uref X at state root hash Y. If state root hash is undefined then it defaults to latest. - -``` -nctl-view-chain-balance purse-uref=A_PURSE_UREF - -nctl-view-chain-balance purse-uref=A_PURSE_UREF root-hash=A_STATE_ROOT_HASH -``` - -### nctl-view-chain-balances - -Displays balances of faucet, validator and user accounts (main purses). - -``` -nctl-view-chain-balances -``` - -### nctl-view-chain-block block={X:-LATEST} - -Displays details of block X. If block is undefined then it defaults to latest. - -``` -nctl-view-chain-block - -nctl-view-chain-block block=A_BLOCK_HASH -``` - -### nctl-view-chain-block-transfers block={X:-LATEST} - -Displays details of transfers within block X. If block is undefined then it defaults to latest. - -``` -nctl-view-chain-block-transfers - -nctl-view-chain-block-transfers block=A_BLOCK_HASH -``` - -### nctl-view-chain-deploy deploy={X:-$DEPLOY_HASH} - -Displays details of deploy X. The hash of deploy X may be assigned as output of a previous command. - -``` -nctl-view-chain-deploy - -nctl-view-chain-deploy deploy=A_DEPLOY_HASH -``` - -### nctl-view-chain-era node={X:-all} - -Displays a chain's era at node X. - -``` -nctl-view-chain-era - -nctl-view-chain-era node=all (same as above) - -nctl-view-chain-era node=3 -``` - -### nctl-view-chain-era-info node={X:-1} - -Displays switch block era information at node X. - -``` -nctl-view-chain-era-info - -nctl-view-chain-era-info node=1 (same as above) - -nctl-view-chain-era-info node=3 -``` - -### nctl-view-chain-height node={X:-all} - -Displays a chain's block height at node X. - -``` -nctl-view-chain-height - -nctl-view-chain-height node=all (same as above) - -nctl-view-chain-height node=3 -``` - -### nctl-view-chain-spec - -Displays a chain's chainspec toml file. - -``` -nctl-view-chain-spec -``` - -### nctl-view-chain-spec-accounts - -Displays a chain's accounts.toml file. - -``` -nctl-view-chain-spec-accounts -``` - -### nctl-view-chain-state-root-hash node={X:-all} - -Displays a chain's state root hash. - -``` -nctl-view-chain-state-root-hash - -nctl-view-chain-state-root-hash node=all (same as above) - -nctl-view-chain-state-root-hash node=3 -``` diff --git a/utils/nctl/docs/commands-view-node.md b/utils/nctl/docs/commands-view-node.md deleted file mode 100644 index ade986b58e..0000000000 --- a/utils/nctl/docs/commands-view-node.md +++ /dev/null @@ -1,161 +0,0 @@ -# NCTL Viewing Node Information - -### nctl-view-node-config node={X:-1} - -Displays configuraiton file node X. - -``` -nctl-view-node-config - -nctl-view-node-config node=1 (same as above) - -nctl-view-node-config node=3 -``` - -### nctl-view-node-error-log node={X:-1} - -Displays error log of node X. - -``` -nctl-view-node-error-log - -nctl-view-node-error-log node=1 (same as above) - -nctl-view-node-error-log node=3 -``` - -### nctl-view-node-finalisation-time node={X:-all} - -Renders time to finalisation of blocks at node X to stdout. - -``` -nctl-view-node-metric-finalisation-time - -nctl-view-node-metric-finalisation-time node=all (same as above) -``` - -### nctl-view-node-finalised-block-count node={X:-all} - -Renders count of finalised blocks at node X to stdout. - -``` -nctl-view-node-metric-finalised-block-count - -nctl-view-node-metric-finalised-block-count node=all (same as above) -``` - -### nctl-view-node-log node={X:-1} - -Displays stdout log of node X. - -``` -nctl-view-node-log - -nctl-view-node-log node=1 (same as above) - -nctl-view-node-log node=3 -``` - -### nctl-view-node-metrics node={X:-all} metric={Y:-all} - -Renders metrics of node X to stdout. Assign the metric parameter to filter accordingly. - -``` -nctl-view-node-metrics - -nctl-view-node-metrics node=all metric=all (same as above) - -nctl-view-node-metrics node=all metric=scheduler_queue_regular_count - -nctl-view-node-metrics node=2 metric=runner_events -``` - -### nctl-view-node-peers node={X:-all} - -Renders peers of node X to stdout. - -``` -nctl-view-node-peers - -nctl-view-node-peers node=all (same as above) - -nctl-view-node-peers node=3 -``` - -### nctl-view-node-pending-deploy-count node={X:-all} - -Renders count of pending deploys at node X to stdout. - -``` -nctl-view-node-metric-pending-deploy - -nctl-view-node-metric-pending-deploy node=all (same as above) -``` - -### nctl-view-node-ports node={X:-all} - -Renders ports of node X to stdout. - -``` -nctl-view-node-ports - -nctl-view-node-ports node=all (same as above) - -nctl-view-node-ports node=3 -``` - -### nctl-view-node-rpc-endpoint endpoint={X:-all} - -Renders information related to RPC schema endpoint X to stdout, if X=all then a list of all endpoints is displayed. - -``` -nctl-view-node-rpc-endpoint - -nctl-view-node-rpc-endpoint endpoint=all (same as above) - -nctl-view-node-rpc-endpoint endpoint=state_get_item -``` - -### nctl-view-node-rpc-schema - -Renders RPC schema exposed to stdout. - -``` -nctl-view-node-rpc-schema -``` - -### nctl-view-node-status node={X:-all} - -Renders status of node X to stdout. - -``` -nctl-view-node-status - -nctl-view-node-status node=all (same as above) - -nctl-view-node-status node=3 -``` - -### nctl-view-node-storage node={X:-all} - -Renders storage stats of node X to stdout. - -``` -nctl-view-node-storage - -nctl-view-node-storage node=all (same as above) - -nctl-view-node-storage node=3 -``` - -### nctl-view-node-storage-consensus node={X:-all} - -Renders consensus storage stats of node X to stdout. - -``` -nctl-view-node-storage-consensus - -nctl-view-node-storage-consensus node=all (same as above) - -nctl-view-node-storage-consensus node=3 -``` diff --git a/utils/nctl/docs/commands.md b/utils/nctl/docs/commands.md deleted file mode 100644 index ce91b51ae6..0000000000 --- a/utils/nctl/docs/commands.md +++ /dev/null @@ -1,37 +0,0 @@ -# NCTL Commands - -## Overview - -Upon successful setup, NCTL commands are made available via aliases for execution from within a terminal session. All such commands are prefixed by `nctl-` and allow you to perform various tasks: - -## Node Control Commands - -- For setting up a network see [here](commands-setup.md). - -- For controlling a network see [here](commands-ctl.md). - -## View Commands - -- For viewing account information see [here](commands-view-accounts.md). - -- For viewing chain information see [here](commands-view-chain.md). - -- For viewing node information see [here](commands-view-node.md). - -## Deploy Dispatch Commands - -- For dispatching simple transfer deploys see [here](commands-deploy-transfers.md). - -- For dispatching auction deploys see [here](commands-deploy-auction.md). - -- For dispatching ERC-20 deploys see [here](commands-deploy-erc20.md). - -## Notes - -- NOTE 1: all ordinal identifiers are 1 based. - -- NOTE 2: all command parameters have default values to simplify the general case of testing a single local network. - -- NOTE 3: when executing either the `nctl-interactive` or `nctl-start` commands, the node logging level output can be assigned by passing in the `loglevel` parameter. If you do not pass in this variable then NCTL defaults either to the current value of RUST_LOG or `debug`. - -- NOTE 4: many command will accept a `node` parameter to determine to which node a query or deploy will be dispatched. If node=random then a dispatch node is determined JIT. If node=0 then a single node is chosen for dispatch. diff --git a/utils/nctl/docs/example-custom-accounts.toml b/utils/nctl/docs/example-custom-accounts.toml deleted file mode 100644 index 4c78889100..0000000000 --- a/utils/nctl/docs/example-custom-accounts.toml +++ /dev/null @@ -1,134 +0,0 @@ -# FAUCET. -[[accounts]] -public_key = "PBK_FAUCET" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 1. -[[accounts]] -public_key = "PBK_V1" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1000000000000001" -delegation_rate = 1 - -# VALIDATOR 2. -[[accounts]] -public_key = "PBK_V2" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1000000000000002" -delegation_rate = 2 - -# VALIDATOR 3. -[[accounts]] -public_key = "PBK_V3" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1000000000000003" -delegation_rate = 3 - -# VALIDATOR 4. -[[accounts]] -public_key = "PBK_V4" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1000000000000004" -delegation_rate = 4 - -# VALIDATOR 5. -[[accounts]] -public_key = "PBK_V5" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1000000000000005" -delegation_rate = 5 - -# VALIDATOR 6. -[[accounts]] -public_key = "PBK_V6" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 7. -[[accounts]] -public_key = "PBK_V7" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 8. -[[accounts]] -public_key = "PBK_V8" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 9. -[[accounts]] -public_key = "PBK_V9" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 10. -[[accounts]] -public_key = "PBK_V10" -balance = "1000000000000000000000000000000000" - -# USER 1. -[[delegators]] -validator_public_key = "PBK_V1" -delegator_public_key = "PBK_U1" -balance = "1000000000000000000000000000000000" -delegated_amount = "1000000000000001" - -# USER 2. -[[delegators]] -validator_public_key = "PBK_V2" -delegator_public_key = "PBK_U2" -balance = "1000000000000000000000000000000000" -delegated_amount = "1000000000000002" - -# USER 3. -[[delegators]] -validator_public_key = "PBK_V3" -delegator_public_key = "PBK_U3" -balance = "1000000000000000000000000000000000" -delegated_amount = "1000000000000003" - -# USER 4. -[[delegators]] -validator_public_key = "PBK_V4" -delegator_public_key = "PBK_U4" -balance = "1000000000000000000000000000000000" -delegated_amount = "1000000000000004" - -# USER 5. -[[delegators]] -validator_public_key = "PBK_V5" -delegator_public_key = "PBK_U5" -balance = "1000000000000000000000000000000000" -delegated_amount = "1000000000000005" - -# USER 6. -[[accounts]] -public_key = "PBK_U6" -balance = "1000000000000000000000000000000000" - -# USER 7. -[[accounts]] -public_key = "PBK_U7" -balance = "1000000000000000000000000000000000" - -# USER 8. -[[accounts]] -public_key = "PBK_U8" -balance = "1000000000000000000000000000000000" - -# USER 9. -[[accounts]] -public_key = "PBK_U9" -balance = "1000000000000000000000000000000000" - -# USER 10. -[[accounts]] -public_key = "PBK_U10" -balance = "1000000000000000000000000000000000" diff --git a/utils/nctl/docs/setup.md b/utils/nctl/docs/setup.md deleted file mode 100644 index 40d966ee1b..0000000000 --- a/utils/nctl/docs/setup.md +++ /dev/null @@ -1,45 +0,0 @@ -# NCTL setup - -### Step 0 - pre-requisites. - -0. bash shell. -1. python3 + pip3. -2. The casper-node software (https://github.com/CasperLabs/casper-node) cloned into YOUR_WORKING_DIRECTORY. -3. The casper-node-launcher software (https://github.com/CasperLabs/casper-node-launcher) cloned into YOUR_WORKING_DIRECTORY. - -### Step 1 - install pre-requisites. - -``` -# Supervisor - cross-platform process manager. -python3 -m pip install supervisor - -# toml - Config file parser. -python3 -m pip install toml - -# Rust toolchain and smart contracts - required by casper-node software. -cd YOUR_WORKING_DIRECTORY/casper-node -make setup-rs -``` - -### Step 2 - extend bashrc file to make NCTL commands available from terminal session. - -``` -cd YOUR_WORKING_DIRECTORY/casper-node - -cat >> $HOME/.bashrc <<- EOM - -# ---------------------------------------------------------------------- -# CASPER - NCTL -# ---------------------------------------------------------------------- - -# Activate NCTL shell. -. $(pwd)/utils/nctl/activate - -EOM -``` - -### Step 3 - refresh bash session. - -``` -. $HOME/.bashrc -``` diff --git a/utils/nctl/docs/usage.md b/utils/nctl/docs/usage.md deleted file mode 100644 index f9020753b4..0000000000 --- a/utils/nctl/docs/usage.md +++ /dev/null @@ -1,119 +0,0 @@ -# NCTL Usage - -Once activated, NCTL commands can be used to setup & control nodes within a local test network. Most NCTL users will be testing a single local network, however developers wishing to test multiple networks in parallel may do so. This usage guide focusses upon the former use case, i.e. testing a single network, and thus all NCTL commands described below are executed with their default values. Please refer [here](commands.md) for full details of supported NCTL commands. - -## Step 0: Compile network binaries. - -Prior to testing a network ensure that the binary sets are available: - -``` -nctl-compile -``` - -This runs `make setup-rs`, and compiles both `casper-node` and `casper-client` in release mode. - -## Step 1: Create network assets. - -- Once network binary compilation is complete we need to setup test network assets. The following command instantiates the full set of assets required to run a 5 node local network with 5 users. It also creates the assets for a further 5 nodes in order to test join/leave scenarios. The assets are copied to `$NCTL/assets/net-1`, where $NCTL is the nctl home directory. - -``` -nctl-assets-setup -``` - -- Examining the contents of `$NCTL/assets/net-1` you will observe the following (self-explanatory) sub-folders: - -``` -/bin -/chainspec -/daemon -/faucet -/nodes -/users -``` - -- Examining the contents of `$NCTL/assets/net-1/nodes/node-1`, i.e. node 1, you will observe the following (self-explanatory) sub-folders: - -``` -/config -/keys -/logs -/storage -``` - -- Examining the contents of `$NCTL/assets/net-1/users/user-1`, i.e. user 1, you will find both cryptographic keys & account public key (hex) files. - -- Once assets have been created you are advised to review contents of toml files, i.e. `/chainspec/chainspec.toml` & the various `/nodes/node-X/config/node-config.toml` files. - -- If you wish to test modifications to a network's chainspec, you can: - -``` -vi $NCTL/assets/net-1/chainspec/chainspec.toml -``` - -- If you wish to test modifications to a node's config, e.g. node 3, you can: - -``` -vi $NCTL/assets/net-1/nodes/node-3/config/node-config.toml -``` - -## Step 2: Start a node in interactive mode. - -- Starting a node interactively is useful to verify that the network assets have been correctly established and that the network is ready for testing. - -``` -nctl-interactive -``` - -## Step 3: Start a network in daemon mode. - -- To start with all or a single node in daemonised mode (this is the preferred modus operandi): - -``` -# Start all nodes in daemon mode. -nctl-start - -# Start node 1 in daemon mode. -nctl-start node=1 -``` - -- To view process status of all daemonised nodes: - -``` -nctl-status -``` - -- To stop either a single or all daemonised nodes: - -``` -# Stop all nodes. -nctl-stop - -# Stop node 1. -nctl-stop node=1 -``` - -## Step 4: Dump logs & other files. - -Upon observation of a network behavioural anomaly you can dump relevant assets such as logs & configuration as follows: - -``` -# Writes dumped files -> $NCTL/dumps/net-1 -nctl-assets-dump -``` - -## Step 5: Viewing information. - -You can view chain, faucet, node & user information using the set of `nctl-view-*` commands. See [here](commands.md) for further information. - -## Step 6: End testing session. - -To teardown a network once a testing session is complete: - -``` -# Delete previously created assets and stops all running nodes. -nctl-assets-teardown -``` - -## Summary - -Using NCTL one can spin up either a single or multiple test networks. Each network is isolated in terms of its assets - this includes port numbers. The NCTL commands parameter defaults are set for the general use case of testing a single local 5 node network. You are encouraged to integrate NCTL into your daily workflow so as to standardise the manner in which the network is tested in a localised setting. diff --git a/utils/nctl/sh/assets/compile.sh b/utils/nctl/sh/assets/compile.sh deleted file mode 100644 index faa8dcbf00..0000000000 --- a/utils/nctl/sh/assets/compile.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -# -####################################### -# Compiles software. -# Globals: -# NCTL - path to nctl home directory. -######################################## - -source "$NCTL"/sh/assets/compile_node.sh -source "$NCTL"/sh/assets/compile_node_launcher.sh -source "$NCTL"/sh/assets/compile_client.sh diff --git a/utils/nctl/sh/assets/compile_client.sh b/utils/nctl/sh/assets/compile_client.sh deleted file mode 100644 index d1989c7639..0000000000 --- a/utils/nctl/sh/assets/compile_client.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash -# -####################################### -# Compiles client software. -# Globals: -# NCTL - path to nctl home directory. -# NCTL_CASPER_HOME - path to casper node repo. -# NCTL_COMPILE_TARGET - flag indicating whether software compilation target is release | debug. -######################################## - -# Import utils. -source "$NCTL"/sh/utils/main.sh - -pushd "$NCTL_CASPER_HOME" || exit - -# Build client utility. -if [ "$NCTL_COMPILE_TARGET" = "debug" ]; then - cargo build --package casper-client -else - cargo build --release --package casper-client -fi - -# Build client side contracts. -make build-contract-rs/activate-bid -make build-contract-rs/add-bid -make build-contract-rs/delegate -make build-contract-rs/transfer-to-account-u512 -make build-contract-rs/transfer-to-account-u512-stored -make build-contract-rs/undelegate -make build-contract-rs/withdraw-bid - -popd || exit diff --git a/utils/nctl/sh/assets/compile_node.sh b/utils/nctl/sh/assets/compile_node.sh deleted file mode 100644 index 96524cfa1c..0000000000 --- a/utils/nctl/sh/assets/compile_node.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -# -####################################### -# Compiles node software. -# Globals: -# NCTL - path to nctl home directory. -# NCTL_CASPER_HOME - path to casper node repo. -# NCTL_COMPILE_TARGET - flag indicating whether software compilation target is release | debug. -######################################## - -source "$NCTL"/sh/utils/main.sh - -pushd "$NCTL_CASPER_HOME" || exit - -if [ "$NCTL_COMPILE_TARGET" = "debug" ]; then - cargo build --package casper-node -else - cargo build --release --package casper-node -fi - -popd || exit diff --git a/utils/nctl/sh/assets/compile_node_launcher.sh b/utils/nctl/sh/assets/compile_node_launcher.sh deleted file mode 100644 index 05bbd3bd15..0000000000 --- a/utils/nctl/sh/assets/compile_node_launcher.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -# -####################################### -# Compiles node launcher software. -# Globals: -# NCTL - path to nctl home directory. -# NCTL_CASPER_NODE_LAUNCHER_HOME - path to casper node launcher repo. -# NCTL_COMPILE_TARGET - flag indicating whether software compilation target is release | debug. -######################################## - -# Import utils. -source "$NCTL"/sh/utils/main.sh - -pushd "$NCTL_CASPER_NODE_LAUNCHER_HOME" || exit - -if [ "$NCTL_COMPILE_TARGET" = "debug" ]; then - cargo build -else - cargo build --release -fi - -popd || exit diff --git a/utils/nctl/sh/assets/dump.sh b/utils/nctl/sh/assets/dump.sh deleted file mode 100644 index 4e200999cd..0000000000 --- a/utils/nctl/sh/assets/dump.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -log "transient asset dump ... starts" - -# Set paths. -PATH_TO_NET=$(get_path_to_net) -PATH_TO_DUMP=$(get_path_to_net_dump) - -# Set dump directory. -if [ -d "$PATH_TO_DUMP" ]; then - rm -rf "$PATH_TO_DUMP" -fi -mkdir -p "$PATH_TO_DUMP" - -# Dump chainspec. -cp "$PATH_TO_NET"/chainspec/accounts.toml "$PATH_TO_DUMP"/accounts.toml -cp "$PATH_TO_NET"/chainspec/chainspec.toml "$PATH_TO_DUMP" - -# Dump daemon. -if [ "$NCTL_DAEMON_TYPE" = "supervisord" ]; then - cp "$PATH_TO_NET"/daemon/config/supervisord.conf "$PATH_TO_DUMP"/daemon.conf - cp "$PATH_TO_NET"/daemon/logs/supervisord.log "$PATH_TO_DUMP"/daemon.log -fi - -# Dump faucet. -cp "$PATH_TO_NET"/faucet/public_key_hex "$PATH_TO_DUMP"/faucet-public_key_hex -cp "$PATH_TO_NET"/faucet/public_key.pem "$PATH_TO_DUMP"/faucet-public_key.pem -cp "$PATH_TO_NET"/faucet/secret_key.pem "$PATH_TO_DUMP"/faucet-secret_key.pem - -# Dump nodes. -for NODE_ID in $(seq 1 "$(get_count_of_genesis_nodes)") -do - PATH_TO_NODE_KEYS=$(get_path_to_node_keys "$NODE_ID") - PATH_TO_NODE_LOGS=$(get_path_to_node_logs "$NODE_ID") - PATH_TO_NODE_CFG=$(get_path_to_node_config "$NODE_ID") - - cp "$PATH_TO_NODE_CFG"/1_0_0/config.toml "$PATH_TO_DUMP"/node-"$NODE_ID"-config.toml - cp "$PATH_TO_NODE_KEYS"/public_key_hex "$PATH_TO_DUMP"/node-"$NODE_ID"-public_key_hex - cp "$PATH_TO_NODE_KEYS"/public_key.pem "$PATH_TO_DUMP"/node-"$NODE_ID"-public_key.pem - cp "$PATH_TO_NODE_KEYS"/secret_key.pem "$PATH_TO_DUMP"/node-"$NODE_ID"-secret_key.pem - cp "$PATH_TO_NODE_LOGS"/stderr.log "$PATH_TO_DUMP"/node-"$NODE_ID"-stderr.log - cp "$PATH_TO_NODE_LOGS"/stdout.log "$PATH_TO_DUMP"/node-"$NODE_ID"-stdout.log -done - -# Dump users. -for USER_ID in $(seq 1 "$(get_count_of_users)") -do - PATH_TO_USER=$(get_path_to_user "$USER_ID") - cp "$PATH_TO_USER"/public_key_hex "$PATH_TO_DUMP"/user-"$USER_ID"-public_key_hex - cp "$PATH_TO_USER"/public_key.pem "$PATH_TO_DUMP"/user-"$USER_ID"-public_key.pem - cp "$PATH_TO_USER"/secret_key.pem "$PATH_TO_DUMP"/user-"$USER_ID"-secret_key.pem -done - -log "transient asset dump ... complete" diff --git a/utils/nctl/sh/assets/list.sh b/utils/nctl/sh/assets/list.sh deleted file mode 100644 index 6d62f72d08..0000000000 --- a/utils/nctl/sh/assets/list.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -if [ -d "$NCTL"/assets ]; then - ls "$NCTL"/assets -fi diff --git a/utils/nctl/sh/assets/setup.sh b/utils/nctl/sh/assets/setup.sh deleted file mode 100644 index ae0828c827..0000000000 --- a/utils/nctl/sh/assets/setup.sh +++ /dev/null @@ -1,429 +0,0 @@ -#!/usr/bin/env bash -# -# Sets assets required to run an N node network. -# Arguments: -# Network ordinal identifier (default=1). -# Count of nodes to setup (default=5). -# Delay in seconds to apply to genesis timestamp (default=30). -# Path to custom chain spec template file. - -####################################### -# Imports -####################################### - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Sets network accounts.toml. -####################################### -function _set_accounts() -{ - log "... setting accounts.toml" - - local PATH_TO_NET - local PATH_TO_ACCOUNTS - local IDX - - # Set accounts.toml. - PATH_TO_NET=$(get_path_to_net) - PATH_TO_ACCOUNTS="$PATH_TO_NET"/chainspec/accounts.toml - touch "$PATH_TO_ACCOUNTS" - - # Set faucet account entry. - cat >> "$PATH_TO_ACCOUNTS" <<- EOM -# FAUCET. -[[accounts]] -public_key = "$(cat "$PATH_TO_NET/faucet/public_key_hex")" -balance = "$NCTL_INITIAL_BALANCE_FAUCET" -EOM - - # Set validator account entries. - for IDX in $(seq 1 "$(get_count_of_nodes)") - do - cat >> "$PATH_TO_ACCOUNTS" <<- EOM - -# VALIDATOR $IDX. -[[accounts]] -public_key = "$(cat "$PATH_TO_NET/nodes/node-$IDX/keys/public_key_hex")" -balance = "$NCTL_INITIAL_BALANCE_VALIDATOR" -EOM - if [ "$IDX" -le "$(get_count_of_genesis_nodes)" ]; then - cat >> "$PATH_TO_ACCOUNTS" <<- EOM - -[accounts.validator] -bonded_amount = "$(_get_node_pos_stake_weight "$IDX")" -delegation_rate = $IDX -EOM - fi - done - - # Set user account entries. - for IDX in $(seq 1 "$(get_count_of_users)") - do - if [ "$IDX" -le "$(get_count_of_genesis_nodes)" ]; then - cat >> "$PATH_TO_ACCOUNTS" <<- EOM - -# USER $IDX. -[[delegators]] -validator_public_key = "$(cat "$PATH_TO_NET/nodes/node-$IDX/keys/public_key_hex")" -delegator_public_key = "$(cat "$PATH_TO_NET/users/user-$IDX/public_key_hex")" -balance = "$NCTL_INITIAL_BALANCE_USER" -delegated_amount = "$((NCTL_INITIAL_DELEGATION_AMOUNT + IDX))" -EOM - else - cat >> "$PATH_TO_ACCOUNTS" <<- EOM - -# USER $IDX. -[[accounts]] -public_key = "$(cat "$PATH_TO_NET/users/user-$IDX/public_key_hex")" -balance = "$NCTL_INITIAL_BALANCE_USER" -EOM - fi - done -} - -####################################### -# Sets network accounts.toml from an existing template. -####################################### -function _set_accounts_from_template() -{ - log "... setting accounts.toml (from template)" - - local ACCOUNT_KEY - local PATH_TO_ACCOUNTS - local PATH_TO_TEMPLATE=${1} - local PBK_KEY - local IDX - - # Copy across template. - PATH_TO_ACCOUNTS="$(get_path_to_net)"/chainspec/accounts.toml - cp "$PATH_TO_TEMPLATE" "$PATH_TO_ACCOUNTS" - - # Set faucet. - PBK_KEY="PBK_FAUCET" - ACCOUNT_KEY="$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET")" - sed -i "s/""$PBK_KEY""/""$ACCOUNT_KEY""/" "$PATH_TO_ACCOUNTS" - - # Set validators. - for IDX in $(seq "$(get_count_of_nodes)" -1 1 ) - do - PBK_KEY=PBK_V"$IDX" - ACCOUNT_KEY="$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$IDX")" - sed -i "s/""$PBK_KEY""/""$ACCOUNT_KEY""/" "$PATH_TO_ACCOUNTS" - done - - # Set users. - for IDX in $(seq "$(get_count_of_users)" -1 1) - do - PBK_KEY=PBK_U"$IDX" - ACCOUNT_KEY="$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$IDX")" - sed -i "s/""$PBK_KEY""/""$ACCOUNT_KEY""/" "$PATH_TO_ACCOUNTS" - done -} - -####################################### -# Sets network binaries. -####################################### -function _set_binaries() -{ - log "... setting binaries" - - local PATH_TO_NET - local PATH_TO_NODE_BIN - local PATH_TO_NODE_BIN_SEMVAR - local PATH_TO_CONTRACT - local CONTRACT - local IDX - - PATH_TO_NET="$(get_path_to_net)" - - # Set node binaries. - for IDX in $(seq 1 "$(get_count_of_nodes)") - do - PATH_TO_NODE_BIN=$(get_path_to_node_bin "$IDX") - PATH_TO_NODE_BIN_SEMVAR="$PATH_TO_NODE_BIN"/1_0_0 - - if [ "$NCTL_COMPILE_TARGET" = "debug" ]; then - cp "$NCTL_CASPER_NODE_LAUNCHER_HOME/target/debug/casper-node-launcher" "$PATH_TO_NODE_BIN" - cp "$NCTL_CASPER_HOME"/target/debug/casper-node "$PATH_TO_NODE_BIN_SEMVAR" - else - cp "$NCTL_CASPER_NODE_LAUNCHER_HOME/target/release/casper-node-launcher" "$PATH_TO_NODE_BIN" - cp "$NCTL_CASPER_HOME"/target/release/casper-node "$PATH_TO_NODE_BIN_SEMVAR" - fi - done - - # Set client binary. - if [ "$NCTL_COMPILE_TARGET" = "debug" ]; then - cp "$NCTL_CASPER_HOME"/target/debug/casper-client "$PATH_TO_NET"/bin - else - cp "$NCTL_CASPER_HOME"/target/release/casper-client "$PATH_TO_NET"/bin - fi - - # Set client contracts. - for CONTRACT in "${NCTL_CONTRACTS_CLIENT_AUCTION[@]}" - do - PATH_TO_CONTRACT="$NCTL_CASPER_HOME"/target/wasm32-unknown-unknown/release/"$CONTRACT" - if [ -f "$PATH_TO_CONTRACT" ]; then - cp "$PATH_TO_CONTRACT" "$PATH_TO_NET"/bin/auction - fi - done - for CONTRACT in "${NCTL_CONTRACTS_CLIENT_TRANSFERS[@]}" - do - PATH_TO_CONTRACT="$NCTL_CASPER_HOME"/target/wasm32-unknown-unknown/release/"$CONTRACT" - cp "$PATH_TO_CONTRACT" "$PATH_TO_NET"/bin/transfers - done -} - -####################################### -# Sets network chainspec. -# Arguments: -# Delay in seconds to apply to genesis timestamp. -# Path to chainspec template file. -####################################### -function _set_chainspec() -{ - log "... setting chainspec.toml" - - local GENESIS_DELAY=${1} - local PATH_TO_CHAINSPEC_TEMPLATE=${2} - local PATH_TO_CHAINSPEC_FILE - local PATH_TO_NET - local SCRIPT - - # Set file. - PATH_TO_NET=$(get_path_to_net) - PATH_TO_CHAINSPEC_FILE=$PATH_TO_NET/chainspec/chainspec.toml - cp "$PATH_TO_CHAINSPEC_TEMPLATE" "$PATH_TO_CHAINSPEC_FILE" - - # Write contents. - local SCRIPT=( - "import toml;" - "cfg=toml.load('$PATH_TO_CHAINSPEC_FILE');" - "cfg['protocol']['activation_point']='$(get_genesis_timestamp "$GENESIS_DELAY")';" - "cfg['network']['name']='$(get_chain_name)';" - "cfg['core']['validator_slots']=$(($(get_count_of_nodes) * 2));" - "toml.dump(cfg, open('$PATH_TO_CHAINSPEC_FILE', 'w'));" - ) - python3 -c "${SCRIPT[*]}" -} - -####################################### -# Sets network daemon configuration. -# Globals: -# NCTL - path to nctl home directory. -# NCTL_DAEMON_TYPE - type of daemon service manager. -####################################### -function _set_daemon() -{ - log "... setting daemon config" - - if [ "$NCTL_DAEMON_TYPE" = "supervisord" ]; then - source "$NCTL"/sh/assets/setup_supervisord.sh - fi -} - -####################################### -# Sets network directories. -# Arguments: -# Count of nodes to setup (default=5). -# Count of users to setup (default=5). -####################################### -function _set_directories() -{ - log "... setting directories" - - local COUNT_NODES=${1} - local COUNT_USERS=${2} - local PATH_TO_NET - local PATH_TO_NODE - local IDX - - PATH_TO_NET="$(get_path_to_net)" - - mkdir "$PATH_TO_NET"/bin - mkdir "$PATH_TO_NET"/bin/auction - mkdir "$PATH_TO_NET"/bin/eco - mkdir "$PATH_TO_NET"/bin/transfers - mkdir "$PATH_TO_NET"/chainspec - mkdir "$PATH_TO_NET"/daemon - mkdir "$PATH_TO_NET"/daemon/config - mkdir "$PATH_TO_NET"/daemon/logs - mkdir "$PATH_TO_NET"/daemon/socket - mkdir "$PATH_TO_NET"/faucet - mkdir "$PATH_TO_NET"/nodes - mkdir "$PATH_TO_NET"/users - - for IDX in $(seq 1 "$COUNT_NODES") - do - PATH_TO_NODE="$PATH_TO_NET"/nodes/node-"$IDX" - mkdir "$PATH_TO_NODE" - mkdir "$PATH_TO_NODE"/bin - mkdir "$PATH_TO_NODE"/bin/1_0_0 - mkdir "$PATH_TO_NODE"/config - mkdir "$PATH_TO_NODE"/config/1_0_0 - mkdir "$PATH_TO_NODE"/keys - mkdir "$PATH_TO_NODE"/logs - mkdir "$PATH_TO_NODE"/storage - mkdir "$PATH_TO_NODE"/storage-consensus - done - - for IDX in $(seq 1 "$COUNT_USERS") - do - mkdir "$PATH_TO_NET"/users/user-"$IDX" - done -} - -####################################### -# Sets network keys. -####################################### -function _set_keys() -{ - log "... setting cryptographic keys" - - "$(get_path_to_client)" keygen -f "$(get_path_to_net)"/faucet > /dev/null 2>&1 - for IDX in $(seq 1 "$(get_count_of_nodes)") - do - "$(get_path_to_client)" keygen -f "$(get_path_to_net)"/nodes/node-"$IDX"/keys > /dev/null 2>&1 - done - for IDX in $(seq 1 "$(get_count_of_users)") - do - "$(get_path_to_client)" keygen -f "$(get_path_to_net)"/users/user-"$IDX" > /dev/null 2>&1 - done -} - -####################################### -# Sets network nodes. -####################################### -function _set_nodes() -{ - log "... setting node config" - - local IDX - local PATH_TO_FILE - local PATH_TO_NODE - - for IDX in $(seq 1 "$(get_count_of_nodes)") - do - PATH_TO_CFG=$(get_path_to_node "$IDX")/config/1_0_0 - PATH_TO_FILE="$PATH_TO_CFG"/config.toml - - cp "$NCTL_CASPER_HOME"/resources/local/config.toml "$PATH_TO_CFG" - cp "$(get_path_to_net)"/chainspec/* "$PATH_TO_CFG" - - local SCRIPT=( - "import toml;" - "cfg=toml.load('$PATH_TO_FILE');" - "cfg['consensus']['secret_key_path']='../../keys/secret_key.pem';" - "cfg['consensus']['highway']['unit_hashes_folder']='../../storage-consensus';" - "cfg['logging']['format']='$NCTL_NODE_LOG_FORMAT';" - "cfg['network']['bind_address']='$(get_network_bind_address "$IDX")';" - "cfg['network']['known_addresses']=[$(get_network_known_addresses "$IDX")];" - "cfg['storage']['path']='../../storage';" - "cfg['rest_server']['address']='0.0.0.0:$(get_node_port_rest "$IDX")';" - "cfg['rpc_server']['address']='0.0.0.0:$(get_node_port_rpc "$IDX")';" - "cfg['event_stream_server']['address']='0.0.0.0:$(get_node_port_sse "$IDX")';" - "toml.dump(cfg, open('$PATH_TO_FILE', 'w'));" - ) - python3 -c "${SCRIPT[*]}" - done -} - -####################################### -# Gets a node's default POS weight. -# Arguments: -# Node ordinal identifier. -####################################### -function _get_node_pos_stake_weight() -{ - local NODE_ID=${1} - local POS_WEIGHT - - if [ "$NODE_ID" -le "$(get_count_of_genesis_nodes)" ]; then - POS_WEIGHT=$(get_node_staking_weight "$NODE_ID") - else - POS_WEIGHT="0" - fi - if [ "x$POS_WEIGHT" = 'x' ]; then - POS_WEIGHT="0" - fi - - echo $POS_WEIGHT -} - -####################################### -# Main -# Globals: -# NET_ID - ordinal identifier of network being setup. -# Arguments: -# Count of nodes to setup. -# Delay in seconds to apply to genesis timestamp. -# Path to template chainspec. -# Path to template accounts.toml. -####################################### -function _main() -{ - local COUNT_NODES=$((${1} * 2)) - local GENESIS_DELAY=${2} - local CHAINSPEC_PATH=${3} - local ACCOUNTS_PATH=${4} - local COUNT_USERS="$COUNT_NODES" - local PATH_TO_NET - - # Tear down previous. - PATH_TO_NET=$(get_path_to_net) - if [ -d "$PATH_TO_NET" ]; then - source "$NCTL"/sh/assets/teardown.sh net="$NET_ID" - fi - mkdir -p "$PATH_TO_NET" - - # Setup new. - log "asset setup begins ... please wait" - _set_directories "$COUNT_NODES" "$COUNT_USERS" - _set_binaries - _set_keys - _set_daemon - _set_chainspec "$GENESIS_DELAY" "$CHAINSPEC_PATH" - if [ "$ACCOUNTS_PATH" = "" ]; then - _set_accounts - else - _set_accounts_from_template "$ACCOUNTS_PATH" - fi - _set_nodes - log "asset setup complete" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset ACCOUNTS_PATH -unset GENESIS_DELAY_SECONDS -unset NET_ID -unset NODE_COUNT -unset CHAINSPEC_PATH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - delay) GENESIS_DELAY_SECONDS=${VALUE} ;; - net) NET_ID=${VALUE} ;; - nodes) NODE_COUNT=${VALUE} ;; - chainspec_path) CHAINSPEC_PATH=${VALUE} ;; - accounts_path) ACCOUNTS_PATH=${VALUE} ;; - *) - esac -done - -export NET_ID=${NET_ID:-1} -GENESIS_DELAY_SECONDS=${GENESIS_DELAY_SECONDS:-30} -NODE_COUNT=${NODE_COUNT:-5} -CHAINSPEC_PATH=${CHAINSPEC_PATH:-"${NCTL_CASPER_HOME}/resources/local/chainspec.toml.in"} -ACCOUNTS_PATH=${ACCOUNTS_PATH:-""} - -if [ 3 -gt "$NODE_COUNT" ]; then - log_error "Invalid input: |nodes| MUST BE >= 3" -else - _main "$NODE_COUNT" "$GENESIS_DELAY_SECONDS" "$CHAINSPEC_PATH" "$ACCOUNTS_PATH" -fi diff --git a/utils/nctl/sh/assets/setup_supervisord.sh b/utils/nctl/sh/assets/setup_supervisord.sh deleted file mode 100644 index eadbd75724..0000000000 --- a/utils/nctl/sh/assets/setup_supervisord.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bash -# -####################################### -# Sets artefacts pertaining to network daemon. -# Globals: -# NCTL_PROCESS_GROUP_1 - process 1 group identifier. -# NCTL_PROCESS_GROUP_2 - process 2 group identifier. -# NCTL_PROCESS_GROUP_3 - process 3 group identifier. -####################################### - -PATH_TO_NET=$(get_path_to_net) -PATH_SUPERVISOR_CONFIG=$(get_path_net_supervisord_cfg) -touch "$PATH_SUPERVISOR_CONFIG" - -# ------------------------------------------------------------------------ -# Set supervisord.conf header. -# ------------------------------------------------------------------------ -cat >> "$PATH_SUPERVISOR_CONFIG" <<- EOM -[unix_http_server] -file=$PATH_TO_NET/daemon/socket/supervisord.sock ; - -[supervisord] -logfile=$PATH_TO_NET/daemon/logs/supervisord.log ; -logfile_maxbytes=200MB ; -logfile_backups=10 ; -loglevel=info ; -pidfile=$PATH_TO_NET/daemon/socket/supervisord.pid ; - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[supervisorctl] -serverurl=unix:///$PATH_TO_NET/daemon/socket/supervisord.sock ; -EOM - -# ------------------------------------------------------------------------ -# Set supervisord.conf app sections. -# ------------------------------------------------------------------------ -for NODE_ID in $(seq 1 "$(get_count_of_nodes)") -do - PATH_NODE_BIN=$(get_path_to_node_bin "$NODE_ID") - PATH_NODE_CONFIG=$(get_path_to_node_config "$NODE_ID") - PATH_NODE_LOGS=$(get_path_to_node_logs "$NODE_ID") - - cat >> "$PATH_SUPERVISOR_CONFIG" <<- EOM - -[program:casper-net-$NET_ID-node-$NODE_ID] -autostart=false -autorestart=false -command=$PATH_NODE_BIN/casper-node-launcher -environment=CASPER_BIN_DIR="$PATH_NODE_BIN",CASPER_CONFIG_DIR="$PATH_NODE_CONFIG" -numprocs=1 -numprocs_start=0 -startsecs=0 -stopwaitsecs=0 -stopasgroup=true -stderr_logfile=$PATH_NODE_LOGS/stderr.log ; -stderr_logfile_backups=5 ; -stderr_logfile_maxbytes=500MB ; -stdout_logfile=$PATH_NODE_LOGS/stdout.log ; -stdout_logfile_backups=5 ; -stdout_logfile_maxbytes=500MB ; -EOM -done - -# ------------------------------------------------------------------------ -# Set supervisord.conf group sections. -# ------------------------------------------------------------------------ -cat >> "$PATH_SUPERVISOR_CONFIG" <<- EOM - -[group:$NCTL_PROCESS_GROUP_1] -programs=$(get_process_group_members "$NCTL_PROCESS_GROUP_1") - -[group:$NCTL_PROCESS_GROUP_2] -programs=$(get_process_group_members "$NCTL_PROCESS_GROUP_2") - -[group:$NCTL_PROCESS_GROUP_3] -programs=$(get_process_group_members "$NCTL_PROCESS_GROUP_3") - -EOM diff --git a/utils/nctl/sh/assets/teardown.sh b/utils/nctl/sh/assets/teardown.sh deleted file mode 100644 index 6e20755298..0000000000 --- a/utils/nctl/sh/assets/teardown.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Destructure input args. -####################################### - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - net) NET_ID=${VALUE} ;; - *) - esac -done - -export NET_ID=${NET_ID:-1} - -####################################### -# Main -####################################### - -log "asset tear-down begins ... please wait" - -source "$NCTL"/sh/node/stop.sh node=all -if [ -d "$(get_path_to_net)" ]; then - log "... deleting files" - rm -rf "$(get_path_to_net)" - rm -rf "$NCTL"/dumps -fi -sleep 2.0 - -log "asset tear-down complete" diff --git a/utils/nctl/sh/assets/upgrade.sh b/utils/nctl/sh/assets/upgrade.sh deleted file mode 100644 index 652d280b22..0000000000 --- a/utils/nctl/sh/assets/upgrade.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Imports -####################################### - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Upgrades node in the network -# Arguments: -# Protocol version -# Era at which new version should be upgraded -# ID of the node to upgrade -####################################### -function _upgrade_node() { - local PROTOCOL_VERSION=${1} - local ACTIVATE_ERA=${2} - local NODE_ID=${3} - - local PATH_TO_NET - local PATH_TO_NODE - - PATH_TO_NET=$(get_path_to_net) - - # Set chainspec file. - PATH_TO_CHAINSPEC_FILE="$PATH_TO_NET"/chainspec/chainspec.toml - mkdir -p "$PATH_TO_NET"/chainspec/"$PROTOCOL_VERSION" - PATH_TO_UPGRADED_CHAINSPEC_FILE="$PATH_TO_NET"/chainspec/"$PROTOCOL_VERSION"/chainspec.toml - cp "$PATH_TO_CHAINSPEC_FILE" "$PATH_TO_UPGRADED_CHAINSPEC_FILE" - - # Write chainspec contents. - local SCRIPT=( - "import toml;" - "cfg=toml.load('$PATH_TO_CHAINSPEC_FILE');" - "cfg['protocol']['version']='$PROTOCOL_VERSION'.replace('_', '.');" - "cfg['protocol']['activation_point']=$ACTIVATE_ERA;" - "toml.dump(cfg, open('$PATH_TO_UPGRADED_CHAINSPEC_FILE', 'w'));" - ) - python3 -c "${SCRIPT[*]}" - - # Copy casper-node binary. - PATH_TO_NODE=$(get_path_to_node "$NODE_ID") - mkdir -p "$PATH_TO_NODE"/bin/"$PROTOCOL_VERSION" - - if [ "$NCTL_COMPILE_TARGET" = "debug" ]; then - cp "$NCTL_CASPER_HOME"/target/debug/casper-node "$PATH_TO_NODE"/bin/"$PROTOCOL_VERSION"/ - else - cp "$NCTL_CASPER_HOME"/target/release/casper-node "$PATH_TO_NODE"/bin/"$PROTOCOL_VERSION"/ - fi - - # Copy chainspec. - mkdir -p "$PATH_TO_NODE"/config/"$PROTOCOL_VERSION"/ - cp "$PATH_TO_UPGRADED_CHAINSPEC_FILE" "$PATH_TO_NODE"/config/"$PROTOCOL_VERSION"/ - - # Copy config file. - cp "$PATH_TO_NODE"/config/1_0_0/config.toml "$PATH_TO_NODE"/config/"$PROTOCOL_VERSION"/ - - # Clean up. - rm "$PATH_TO_UPGRADED_CHAINSPEC_FILE" -} diff --git a/utils/nctl/sh/contracts-auction/do_bid.sh b/utils/nctl/sh/contracts-auction/do_bid.sh deleted file mode 100755 index 1544f0b2b6..0000000000 --- a/utils/nctl/sh/contracts-auction/do_bid.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Submits an auction bid. -# Arguments: -# Bidder ordinal identifier. -# Bid amount. -# Delegation rate. -# Flag indicating whether to emit log messages. -####################################### -function main() -{ - local BIDDER_ID=${1} - local BID_AMOUNT=${2} - local BID_DELEGATION_RATE=${3} - local QUIET=${4:-"FALSE"} - - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local BIDDER_ACCOUNT_KEY - local BIDDER_SECRET_KEY - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "auction/add_bid.wasm") - - BIDDER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$BIDDER_ID") - BIDDER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_NODE" "$BIDDER_ID") - - if [ "$QUIET" != "TRUE" ]; then - log "dispatching deploy -> add_bid.wasm" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... contract = $PATH_TO_CONTRACT" - log "... bidder id = $BIDDER_ID" - log "... bidder secret key = $BIDDER_SECRET_KEY" - log "... bid amount = $BID_AMOUNT" - log "... bid delegation rate = $BID_DELEGATION_RATE" - fi - - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$BIDDER_SECRET_KEY" \ - --session-arg "$(get_cl_arg_account_key 'public_key' "$BIDDER_ACCOUNT_KEY")" \ - --session-arg "$(get_cl_arg_u512 'amount' "$BID_AMOUNT")" \ - --session-arg "$(get_cl_arg_u8 'delegation_rate' "$BID_DELEGATION_RATE")" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - if [ "$QUIET" != "TRUE" ]; then - log "deploy dispatched:" - log "... deploy hash = $DEPLOY_HASH" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset NODE_ID -unset DELEGATION_RATE - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - rate) DELEGATION_RATE=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-6}" \ - "${AMOUNT:-$(get_node_staking_weight "${NODE_ID:-6}")}" \ - "${DELEGATION_RATE:-6}" diff --git a/utils/nctl/sh/contracts-auction/do_bid_activate.sh b/utils/nctl/sh/contracts-auction/do_bid_activate.sh deleted file mode 100644 index 591ad69980..0000000000 --- a/utils/nctl/sh/contracts-auction/do_bid_activate.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Submits an auction bid activation - required when ejected from validator set due to liveness fault. -# Arguments: -# Node ordinal identifier. -# Flag indicating whether to emit log messages. -####################################### -function main() -{ - local VALIDATOR_ID=${1} - local QUIET=${2:-"FALSE"} - - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local VALIDATOR_ACCOUNT_KEY - local VALIDATOR_SECRET_KEY - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "auction/activate_bid.wasm") - - VALIDATOR_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$VALIDATOR_ID") - VALIDATOR_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_NODE" "$VALIDATOR_ID") - - if [ "$QUIET" != "TRUE" ]; then - log "dispatching deploy -> activate_bid.wasm" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... contract = $PATH_TO_CONTRACT" - log "... validator id = $VALIDATOR_ID" - log "... validator account key = $VALIDATOR_ACCOUNT_KEY" - log "... validator secret key = $VALIDATOR_SECRET_KEY" - fi - - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$VALIDATOR_SECRET_KEY" \ - --session-arg "$(get_cl_arg_account_key 'validator_public_key' "$VALIDATOR_ACCOUNT_KEY")" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - if [ "$QUIET" != "TRUE" ]; then - log "deploy dispatched:" - log "... deploy hash = $DEPLOY_HASH" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - validator) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-1}" diff --git a/utils/nctl/sh/contracts-auction/do_bid_withdraw.sh b/utils/nctl/sh/contracts-auction/do_bid_withdraw.sh deleted file mode 100644 index f19bf80c09..0000000000 --- a/utils/nctl/sh/contracts-auction/do_bid_withdraw.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Submits an auction withdrawal. -# Arguments: -# Validator ordinal identifier. -# Withdrawal amount. -# Flag indicating whether to emit log messages. -####################################### -function main() -{ - local BIDDER_ID=${1} - local AMOUNT=${2} - local QUIET=${3:-"FALSE"} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local BIDDER_SECRET_KEY - local BIDDER_ACCOUNT_KEY - local BIDDER_MAIN_PURSE_UREF - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "auction/withdraw_bid.wasm") - - BIDDER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_NODE" "$BIDDER_ID") - BIDDER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$BIDDER_ID") - BIDDER_MAIN_PURSE_UREF=$(get_main_purse_uref "$BIDDER_ACCOUNT_KEY") - - if [ "$QUIET" != "TRUE" ]; then - log "dispatching deploy -> withdraw_bid.wasm" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... contract = $PATH_TO_CONTRACT" - log "... bidder id = $BIDDER_ID" - log "... bidder account key = $BIDDER_ACCOUNT_KEY" - log "... bidder secret key = $BIDDER_SECRET_KEY" - log "... bidder main purse uref = $BIDDER_MAIN_PURSE_UREF" - log "... withdrawal amount = $AMOUNT" - fi - - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$BIDDER_SECRET_KEY" \ - --session-arg "$(get_cl_arg_account_key 'public_key' "$BIDDER_ACCOUNT_KEY")" \ - --session-arg "$(get_cl_arg_u512 'amount' "$AMOUNT")" \ - --session-arg "$(get_cl_arg_opt_uref 'unbond_purse' "$BIDDER_MAIN_PURSE_UREF")" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - if [ "$QUIET" != "TRUE" ]; then - log "deploy dispatched:" - log "... deploy hash = $DEPLOY_HASH" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-1}" \ - "${AMOUNT:-$(get_node_staking_weight "${NODE_ID:-1}")}" diff --git a/utils/nctl/sh/contracts-auction/do_delegate.sh b/utils/nctl/sh/contracts-auction/do_delegate.sh deleted file mode 100644 index 1cdcfd4e1d..0000000000 --- a/utils/nctl/sh/contracts-auction/do_delegate.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Submits an auction delegate. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Delegator ordinal identifier. -# Validator ordinal identifier. -# Amount to delegate. -# Gas price. -# Gas payment. -####################################### -function main() -{ - local AMOUNT=${1} - local DELEGATOR_ID=${2} - local VALIDATOR_ID=${3} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local DELEGATOR_ACCOUNT_KEY - local DELEGATOR_SECRET_KEY - local VALIDATOR_ACCOUNT_KEY - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "auction/delegate.wasm") - - DELEGATOR_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$DELEGATOR_ID") - DELEGATOR_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_USER" "$DELEGATOR_ID") - VALIDATOR_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$VALIDATOR_ID") - - log "dispatching deploy -> delegate.wasm" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... contract = $PATH_TO_CONTRACT" - log "... delegator id = $DELEGATOR_ID" - log "... delegator account key = $DELEGATOR_ACCOUNT_KEY" - log "... delegator secret key = $DELEGATOR_SECRET_KEY" - log "... amount = $AMOUNT" - - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$DELEGATOR_SECRET_KEY" \ - --session-arg "$(get_cl_arg_u512 'amount' "$AMOUNT")" \ - --session-arg "$(get_cl_arg_account_key 'delegator' "$DELEGATOR_ACCOUNT_KEY")" \ - --session-arg "$(get_cl_arg_account_key 'validator' "$VALIDATOR_ACCOUNT_KEY")" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - log "deploy dispatched:" - log "... deploy hash = $DEPLOY_HASH" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset DELEGATOR_ID -unset VALIDATOR_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - delegator) DELEGATOR_ID=${VALUE} ;; - validator) VALIDATOR_ID=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-$NCTL_DEFAULT_AUCTION_DELEGATE_AMOUNT}" \ - "${DELEGATOR_ID:-1}" \ - "${VALIDATOR_ID:-1}" diff --git a/utils/nctl/sh/contracts-auction/do_delegate_withdraw.sh b/utils/nctl/sh/contracts-auction/do_delegate_withdraw.sh deleted file mode 100644 index 74770b37c1..0000000000 --- a/utils/nctl/sh/contracts-auction/do_delegate_withdraw.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Submits an auction delegate withdrawal. -# Arguments: -# Amount to delegate. -# Delegator ordinal identifier. -# Validator ordinal identifier. -####################################### -function main() -{ - local AMOUNT=${1} - local DELEGATOR_ID=${2} - local VALIDATOR_ID=${3} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local DELEGATOR_ACCOUNT_KEY - local DELEGATOR_SECRET_KEY - local DELEGATOR_MAIN_PURSE_UREF - local VALIDATOR_ACCOUNT_KEY - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "auction/undelegate.wasm") - - DELEGATOR_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$DELEGATOR_ID") - DELEGATOR_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_USER" "$DELEGATOR_ID") - DELEGATOR_MAIN_PURSE_UREF=$(get_main_purse_uref "$DELEGATOR_ACCOUNT_KEY") - VALIDATOR_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$VALIDATOR_ID") - - log "dispatching deploy -> undelegate.wasm" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... contract = $PATH_TO_CONTRACT" - log "... delegator id = $DELEGATOR_ID" - log "... delegator account key = $DELEGATOR_ACCOUNT_KEY" - log "... delegator secret key = $DELEGATOR_SECRET_KEY" - log "... delegator main purse uref = $DELEGATOR_MAIN_PURSE_UREF" - log "... amount = $AMOUNT" - - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$DELEGATOR_SECRET_KEY" \ - --session-arg "$(get_cl_arg_u512 'amount' "$AMOUNT")" \ - --session-arg "$(get_cl_arg_account_key 'delegator' "$DELEGATOR_ACCOUNT_KEY")" \ - --session-arg "$(get_cl_arg_account_key 'validator' "$VALIDATOR_ACCOUNT_KEY")" \ - --session-arg "$(get_cl_arg_opt_uref 'unbond_purse' "$DELEGATOR_MAIN_PURSE_UREF")" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - log "deploy dispatched:" - log "... deploy hash = $DEPLOY_HASH" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset DELEGATOR_ID -unset VALIDATOR_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - delegator) DELEGATOR_ID=${VALUE} ;; - validator) VALIDATOR_ID=${VALUE} ;; - *) - esac -done - -main \ - "${AMOUNT:-$NCTL_DEFAULT_AUCTION_DELEGATE_AMOUNT}" \ - "${DELEGATOR_ID:-1}" \ - "${VALIDATOR_ID:-1}" diff --git a/utils/nctl/sh/contracts-erc20/do_approve.sh b/utils/nctl/sh/contracts-erc20/do_approve.sh deleted file mode 100644 index 7be63b6c61..0000000000 --- a/utils/nctl/sh/contracts-erc20/do_approve.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Approves ERC-20 token transfers from a test user account. -# Arguments: -# Amount of ERC-20 token to approve. -####################################### -function main() -{ - local AMOUNT=${1} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local CONTRACT_OWNER_SECRET_KEY - local USER_ACCOUNT_KEY - local USER_ACCOUNT_HASH - - # Set standard deploy parameters. - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract owner secret key. - CONTRACT_OWNER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_erc20_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Enumerate set of users. - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - # Set user account key. - USER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - - # Set user account hash. - USER_ACCOUNT_HASH=$(get_account_hash "$USER_ACCOUNT_KEY") - - # Dispatch deploy (hits node api). - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --secret-key "$CONTRACT_OWNER_SECRET_KEY" \ - --ttl "1day" \ - --session-hash "$CONTRACT_HASH" \ - --session-entry-point "approve" \ - --session-arg "$(get_cl_arg_account_hash 'spender' "$USER_ACCOUNT_HASH")" \ - --session-arg "$(get_cl_arg_u256 'amount' "$AMOUNT")" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - log "approving user $USER_ID deploy hash = $DEPLOY_HASH" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-2000000000}" diff --git a/utils/nctl/sh/contracts-erc20/do_fund_users.sh b/utils/nctl/sh/contracts-erc20/do_fund_users.sh deleted file mode 100644 index 132553827b..0000000000 --- a/utils/nctl/sh/contracts-erc20/do_fund_users.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Transfers ERC-20 tokens from contract owner to test user accounts. -# Arguments: -# Amount of ERC-20 token to transfer. -####################################### -function main() -{ - local AMOUNT=${1} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local CONTRACT_OWNER_SECRET_KEY - local USER_ACCOUNT_KEY - local USER_ACCOUNT_HASH - - # Set standard deploy parameters. - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract owner secret key. - CONTRACT_OWNER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_erc20_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Enumerate set of users. - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - # Set user account key. - USER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - - # Set user account hash. - USER_ACCOUNT_HASH=$(get_account_hash "$USER_ACCOUNT_KEY") - - # Dispatch deploy (hits node api). - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --secret-key "$CONTRACT_OWNER_SECRET_KEY" \ - --ttl "1day" \ - --session-hash "$CONTRACT_HASH" \ - --session-entry-point "transfer" \ - --session-arg "$(get_cl_arg_account_hash 'recipient' "$USER_ACCOUNT_HASH")" \ - --session-arg "$(get_cl_arg_u256 'amount' "$AMOUNT")" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - log "funding user $USER_ID deploy hash = $DEPLOY_HASH" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-2000000000}" diff --git a/utils/nctl/sh/contracts-erc20/do_install.sh b/utils/nctl/sh/contracts-erc20/do_install.sh deleted file mode 100644 index 8aaa2b5bd6..0000000000 --- a/utils/nctl/sh/contracts-erc20/do_install.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Installs ERC-20 token contract under network faucet account. -# Arguments: -# Name of ERC-20 token being created. -# Symbol associated with ERC-20 token. -# Total supply of ERC-20 token. -####################################### -function main() -{ - local TOKEN_NAME=${1} - local TOKEN_SYMBOL=${2} - local TOKEN_SUPPLY=${3} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local CONTRACT_OWNER_SECRET_KEY - - # Set standard deploy parameters. - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=10000000000000 - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - - # Set contract path. - PATH_TO_CONTRACT=$(get_path_to_contract "eco/erc20.wasm") - if [ ! -f "$PATH_TO_CONTRACT" ]; then - echo "ERROR: The erc20.wasm binary file cannot be found. Please compile it and move it to the following directory: $(get_path_to_net)" - return - fi - - # Set contract owner secret key. - CONTRACT_OWNER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - log "installing contract -> ERC-20" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... gas payment = $GAS_PAYMENT" - log "... gas price = $GAS_PRICE" - log "contract constructor args:" - log "... token name = $TOKEN_NAME" - log "... token symbol = $TOKEN_SYMBOL" - log "... token supply = $TOKEN_SUPPLY" - log "contract installation details:" - log "... path = $PATH_TO_CONTRACT" - - # Dispatch deploy (hits node api). - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$CONTRACT_OWNER_SECRET_KEY" \ - --session-path "$PATH_TO_CONTRACT" \ - --session-arg "$(get_cl_arg_string 'tokenName' "$TOKEN_NAME")" \ - --session-arg "$(get_cl_arg_string 'tokenSymbol' "$TOKEN_SYMBOL")" \ - --session-arg "$(get_cl_arg_u256 'tokenTotalSupply' "$TOKEN_SUPPLY")" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - log "... deploy hash = $DEPLOY_HASH" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset TOKEN_NAME -unset TOKEN_SUPPLY -unset TOKEN_SYMBOL - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - name) TOKEN_NAME=${VALUE} ;; - supply) TOKEN_SUPPLY=${VALUE} ;; - symbol) TOKEN_SYMBOL=${VALUE} ;; - *) - esac -done - -main "${TOKEN_NAME:-"Acme Token"}" \ - "${TOKEN_SYMBOL:-"ACME"}" \ - "${TOKEN_SUPPLY:-1000000000000000000000000000000000}" diff --git a/utils/nctl/sh/contracts-erc20/do_transfer.sh b/utils/nctl/sh/contracts-erc20/do_transfer.sh deleted file mode 100644 index 2113414b95..0000000000 --- a/utils/nctl/sh/contracts-erc20/do_transfer.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Transfers ERC-20 between test user accounts. -# Arguments: -# Amount of ERC-20 token to approve. -####################################### -function main() -{ - local AMOUNT=${1} - local CHAIN_NAME - local DEPLOY_HASH - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local USER_1_ACCOUNT_HASH - local USER_1_ACCOUNT_KEY - local USER_1_ID - local USER_2_ACCOUNT_HASH - local USER_2_ACCOUNT_KEY - local USER_2_ID - - # Set standard deploy parameters. - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract owner secret key. - CONTRACT_OWNER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_erc20_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Enumerate set of users. - for USER_1_ID in $(seq 1 "$(get_count_of_users)") - do - # Set user 2 id. - if [ "$USER_1_ID" -lt "$(get_count_of_users)" ]; then - USER_2_ID=$((USER_1_ID + 1)) - else - USER_2_ID=1 - fi - - # Set user account info. - USER_1_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_1_ID") - USER_2_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_2_ID") - USER_1_ACCOUNT_HASH=$(get_account_hash "$USER_1_ACCOUNT_KEY") - USER_2_ACCOUNT_HASH=$(get_account_hash "$USER_2_ACCOUNT_KEY") - - # Dispatch deploy (hits node api). - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --secret-key "$CONTRACT_OWNER_SECRET_KEY" \ - --ttl "1day" \ - --session-hash "$CONTRACT_HASH" \ - --session-entry-point "transferFrom" \ - --session-arg "$(get_cl_arg_account_hash 'owner' "$USER_1_ACCOUNT_HASH")" \ - --session-arg "$(get_cl_arg_account_hash 'recipient' "$USER_2_ACCOUNT_HASH")" \ - --session-arg "$(get_cl_arg_u256 'amount' "$AMOUNT")" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - log "token transfer from user $USER_1_ID -> $USER_2_ID deploy hash = $DEPLOY_HASH" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-1000000000}" diff --git a/utils/nctl/sh/contracts-erc20/recipe.md b/utils/nctl/sh/contracts-erc20/recipe.md deleted file mode 100644 index f030fac660..0000000000 --- a/utils/nctl/sh/contracts-erc20/recipe.md +++ /dev/null @@ -1,25 +0,0 @@ -# Spin up net. -nctl-assets-setup -nctl-start - -# Await for the faucet to be funded. -sleep 30.0 -nctl-view-faucet-accounts - -# Fund user accounts. -nctl-do-fund-users -sleep 10.0 - -# Install ERC20 contract under the faucet account. -nctl-erc20-install name=Broadleaf symbol=BLF supply=1000000000000000000000000000000000 -sleep 5.0 - -# View contract details. -nctl-erc20-view-details - -# Fund user accounts with ERC-20 tokens. -nctl-erc20-fund-users -sleep 5.0 - -# View ERC-20 token user balances. -nctl-erc20-view-balances diff --git a/utils/nctl/sh/contracts-erc20/utils.sh b/utils/nctl/sh/contracts-erc20/utils.sh deleted file mode 100644 index 62be2dc352..0000000000 --- a/utils/nctl/sh/contracts-erc20/utils.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# ERC-20: get on-chain contract hash. -# Arguments: -# Contract owner account key. -####################################### -function get_erc20_contract_hash () -{ - local ACCOUNT_KEY=${1} - - $(get_path_to_client) query-state \ - --node-address "$(get_node_address_rpc)" \ - --state-root-hash "$(get_state_root_hash)" \ - --key "$ACCOUNT_KEY" \ - | jq '.result.stored_value.Account.named_keys[] | select(.name == "ERC20") | .key' \ - | sed -e 's/^"//' -e 's/"$//' -} - -####################################### -# ERC-20: get on-chain contract key value. -# Arguments: -# Contract owner account key. -# State query path. -####################################### -function get_erc20_contract_key_value () -{ - local QUERY_KEY=${1} - local QUERY_PATH=${2} - - $(get_path_to_client) query-state \ - --node-address "$(get_node_address_rpc)" \ - --state-root-hash "$(get_state_root_hash)" \ - --key "$QUERY_KEY" \ - --query-path "$QUERY_PATH" \ - | jq '.result.stored_value.CLValue.parsed' \ - | sed -e 's/^"//' -e 's/"$//' -} diff --git a/utils/nctl/sh/contracts-erc20/view_allowances.sh b/utils/nctl/sh/contracts-erc20/view_allowances.sh deleted file mode 100644 index 02e72e2d71..0000000000 --- a/utils/nctl/sh/contracts-erc20/view_allowances.sh +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Renders ERC-20 token contract balances. -####################################### -function main() -{ - local ALLOWANCE_KEY - local CONTRACT_OWNER_ACCOUNT_KEY - local CONTRACT_OWNER_ACCOUNT_HASH - local CONTRACT_HASH - local TOKEN_SYMBOL - local USER_ID - local USER_ACCOUNT_KEY - local USER_ACCOUNT_HASH - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract owner account hash. - CONTRACT_OWNER_ACCOUNT_HASH=$(get_account_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_erc20_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Set token symbol (hits node api). - TOKEN_SYMBOL=$(get_erc20_contract_key_value "$CONTRACT_HASH" "_symbol") - - log "ERC-20 $TOKEN_SYMBOL contract:" - log "... contract hash = $CONTRACT_HASH" - log "... account allowances:" - - # Render user account approvals. - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - # Set user account key. - USER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - - # Set user account hash. - USER_ACCOUNT_HASH=$(get_account_hash "$USER_ACCOUNT_KEY") - - # Set faucet <-> user allowance state query key. - ALLOWANCE_KEY="_allowances_"$CONTRACT_OWNER_ACCOUNT_HASH$USER_ACCOUNT_HASH - - # Set faucet <-> user allowance (hits node api). - ALLOWANCE=$(get_erc20_contract_key_value "$CONTRACT_HASH" "$ALLOWANCE_KEY") - - log "... ... user $USER_ID = $ALLOWANCE" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/contracts-erc20/view_balances.sh b/utils/nctl/sh/contracts-erc20/view_balances.sh deleted file mode 100644 index a2d5a3b5d7..0000000000 --- a/utils/nctl/sh/contracts-erc20/view_balances.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Renders ERC-20 token contract balances. -####################################### -function main() -{ - local CONTRACT_OWNER_ACCOUNT_KEY - local CONTRACT_OWNER_ACCOUNT_HASH - local CONTRACT_OWNER_ACCOUNT_BALANCE_KEY - local CONTRACT_HASH - local CONTRACT_OWNER_ACCOUNT_BALANCE - local TOKEN_SYMBOL - local USER_ID - local USER_ACCOUNT_KEY - local USER_ACCOUNT_HASH - local USER_ACCOUNT_BALANCE_KEY - local USER_ACCOUNT_BALANCE - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract owner account hash. - CONTRACT_OWNER_ACCOUNT_HASH=$(get_account_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Set contract owner ERC-20 balance key. - CONTRACT_OWNER_ACCOUNT_BALANCE_KEY="_balances_$CONTRACT_OWNER_ACCOUNT_HASH" - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_erc20_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - log "ERC-20 $TOKEN_SYMBOL contract:" - log "... contract hash = $CONTRACT_HASH" - - # Set contract owner account balance (hits node api). - CONTRACT_OWNER_ACCOUNT_BALANCE=$(get_erc20_contract_key_value "$CONTRACT_HASH" "$CONTRACT_OWNER_ACCOUNT_BALANCE_KEY") - - # Set token symbol (hits node api). - TOKEN_SYMBOL=$(get_erc20_contract_key_value "$CONTRACT_HASH" "_symbol") - - log "... account balances:" - log "... ... contract owner = $CONTRACT_OWNER_ACCOUNT_BALANCE" - - # Render user account balances. - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - # Set user account key. - USER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - - # Set user account hash. - USER_ACCOUNT_HASH=$(get_account_hash "$USER_ACCOUNT_KEY") - - # Set user ERC-20 balance key. - USER_ACCOUNT_BALANCE_KEY="_balances_$USER_ACCOUNT_HASH" - - # Set user account balance (hits node api). - USER_ACCOUNT_BALANCE=$(get_erc20_contract_key_value "$CONTRACT_HASH" "$USER_ACCOUNT_BALANCE_KEY") - - log "... ... user $USER_ID = $USER_ACCOUNT_BALANCE" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/contracts-erc20/view_details.sh b/utils/nctl/sh/contracts-erc20/view_details.sh deleted file mode 100644 index 52c8f68163..0000000000 --- a/utils/nctl/sh/contracts-erc20/view_details.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-erc20/utils.sh - -####################################### -# Renders ERC-20 token contract details. -####################################### -function main() -{ - local CONTRACT_OWNER_ACCOUNT_KEY - local CONTRACT_HASH - local TOKEN_NAME - local TOKEN_SYMBOL - local TOKEN_SUPPLY - local TOKEN_DECIMALS - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_erc20_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Set token name (hits node api). - TOKEN_NAME=$(get_erc20_contract_key_value "$CONTRACT_HASH" "_name") - - # Set token symbol (hits node api). - TOKEN_SYMBOL=$(get_erc20_contract_key_value "$CONTRACT_HASH" "_symbol") - - # Set token supply (hits node api). - TOKEN_SUPPLY=$(get_erc20_contract_key_value "$CONTRACT_HASH" "_totalSupply") - - # Set token decimals (hits node api). - TOKEN_DECIMALS=$(get_erc20_contract_key_value "$CONTRACT_HASH" "_decimals") - - log "Contract Details -> ERC-20" - log "... on-chain name = ERC20" - log "... on-chain hash = $CONTRACT_HASH" - log "... owner account = $CONTRACT_OWNER_ACCOUNT_KEY" - log "... token name = $TOKEN_NAME" - log "... token symbol = $TOKEN_SYMBOL" - log "... token supply = $TOKEN_SUPPLY" - log "... token decimals = $TOKEN_DECIMALS" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/contracts-kv/do_install.sh b/utils/nctl/sh/contracts-kv/do_install.sh deleted file mode 100644 index 1a4caceb10..0000000000 --- a/utils/nctl/sh/contracts-kv/do_install.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-kv/utils.sh - -####################################### -# KV-STORAGE: Installs contract under network faucet account. -####################################### -function main() -{ - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local PATH_TO_CONTRACT - local CONTRACT_OWNER_SECRET_KEY - - # Set standard deploy parameters. - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=10000000000000 - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - - # Set contract path. - PATH_TO_CONTRACT=$(get_path_to_contract "eco/kv-storage.wasm") - if [ ! -f "$PATH_TO_CONTRACT" ]; then - echo "ERROR: The kv-storage.wasm binary file cannot be found. Please compile it and move it to the following directory: $(get_path_to_net)" - return - fi - - # Set contract owner secret key. - CONTRACT_OWNER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - log "installing contract -> KV storage" - log "... chain = $CHAIN_NAME" - log "... dispatch node = $NODE_ADDRESS" - log "... gas payment = $GAS_PAYMENT" - log "... gas price = $GAS_PRICE" - log "contract constructor args:" - log "... N/A" - log "contract installation details:" - log "... path = $PATH_TO_CONTRACT" - - # Dispatch deploy (hits node api). - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$CONTRACT_OWNER_SECRET_KEY" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - log "... deploy hash = $DEPLOY_HASH" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/contracts-kv/get_key.sh b/utils/nctl/sh/contracts-kv/get_key.sh deleted file mode 100644 index e78de655bb..0000000000 --- a/utils/nctl/sh/contracts-kv/get_key.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-kv/utils.sh - -####################################### -# KV-STORAGE: gets key stored within contract storage. -# Arguments: -# Name of key to be read. -####################################### -function main() -{ - local KEY_NAME=${1} - local CONTRACT_OWNER_ACCOUNT_KEY - local CONTRACT_HASH - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_kv_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Set stored key value (hits node api). - KEY_VALUE=$(get_kv_contract_key_value "$CONTRACT_HASH" "$KEY_NAME") - - log "Contract Details -> KV-STORAGE" - log "... contract name : kvstorage_contract" - log "... contract hash : $CONTRACT_HASH" - log "... contract owner : $CONTRACT_OWNER_ACCOUNT_KEY" - log "... key name : $KEY_NAME" - log "... key value type : $(jq -n --argjson data "$KEY_VALUE" '$data.cl_type')" - log "... key value raw : $(jq -n --argjson data "$KEY_VALUE" '$data.bytes')" - log "... key value parsed : $(jq -n --argjson data "$KEY_VALUE" '$data.parsed')" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset KEY_NAME - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - key) KEY_NAME=${VALUE} ;; - *) - esac -done - -main \ - "${KEY_NAME:-"main"}" diff --git a/utils/nctl/sh/contracts-kv/set_key.sh b/utils/nctl/sh/contracts-kv/set_key.sh deleted file mode 100644 index cb71b13eb1..0000000000 --- a/utils/nctl/sh/contracts-kv/set_key.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-kv/utils.sh - -####################################### -# KV-STORAGE: sets value of key stored under contract. -# Arguments: -# Name of key to be written. -# Type of key to be written. -# Value of key to be written. -####################################### -function main() -{ - local KEY_NAME=${1} - local KEY_TYPE=${2} - local KEY_VALUE=${3} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local CONTRACT_HASH - local CONTRACT_OWNER_ACCOUNT_KEY - local CONTRACT_OWNER_SECRET_KEY - - # Set standard deploy parameters. - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - NODE_ADDRESS=$(get_node_address_rpc) - PATH_TO_CLIENT=$(get_path_to_client) - - # Set contract owner account key - i.e. faucet account. - CONTRACT_OWNER_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract owner secret key. - CONTRACT_OWNER_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - # Set contract hash (hits node api). - CONTRACT_HASH=$(get_kv_contract_hash "$CONTRACT_OWNER_ACCOUNT_KEY") - - # Dispatch deploy (hits node api). - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --secret-key "$CONTRACT_OWNER_SECRET_KEY" \ - --ttl "1day" \ - --session-hash "$CONTRACT_HASH" \ - --session-entry-point "$(get_contract_entry_point "$KEY_TYPE")" \ - --session-arg "$(get_cl_arg_string 'name' "$KEY_NAME")" \ - --session-arg "$(get_key_value_session_arg "$KEY_TYPE" "$KEY_VALUE")" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - log "set key deploy hash = $DEPLOY_HASH" -} - -####################################### -# Returns contract entry point mapped from key type. -####################################### -function get_contract_entry_point () -{ - local KEY_TYPE=${1} - - if [ "$KEY_TYPE" == "string" ]; then - echo "store_string" - elif [ "$KEY_TYPE" == "u64" ]; then - echo "store_u64" - elif [ "$KEY_TYPE" == "u512" ]; then - echo "store_u512" - elif [ "$KEY_TYPE" == "account-hash" ]; then - echo "store_account_hash" - else - echo "store_string" - fi -} - -####################################### -# Returns contract key value session arg mapped from key type. -####################################### -function get_key_value_session_arg () -{ - local KEY_TYPE=${1} - local KEY_VALUE=${2} - - if [ "$KEY_TYPE" == "string" ]; then - get_cl_arg_string 'value' "$KEY_VALUE" - elif [ "$KEY_TYPE" == "u64" ]; then - get_cl_arg_u64 'value' "$KEY_VALUE" - elif [ "$KEY_TYPE" == "u512" ]; then - get_cl_arg_u512 'value' "$KEY_VALUE" - elif [ "$KEY_TYPE" == "account-hash" ]; then - get_cl_arg_account_hash 'value' "$KEY_VALUE" - else - get_cl_arg_string 'value' "$KEY_VALUE" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset KEY_NAME -unset KEY_TYPE -unset KEY_VALUE - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - key) KEY_NAME=${VALUE} ;; - type) KEY_TYPE=${VALUE} ;; - value) KEY_VALUE=${VALUE} ;; - *) - esac -done - -main \ - "${KEY_NAME:-"main"}" \ - "${KEY_TYPE:-"string"}" \ - "${KEY_VALUE:-"hello dolly"}" diff --git a/utils/nctl/sh/contracts-kv/utils.sh b/utils/nctl/sh/contracts-kv/utils.sh deleted file mode 100644 index f68bd37f75..0000000000 --- a/utils/nctl/sh/contracts-kv/utils.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# KV-STORAGE: get on-chain contract hash. -# Arguments: -# Contract owner account key. -####################################### -function get_kv_contract_hash () -{ - local ACCOUNT_KEY=${1} - - $(get_path_to_client) query-state \ - --node-address "$(get_node_address_rpc)" \ - --state-root-hash "$(get_state_root_hash)" \ - --key "$ACCOUNT_KEY" \ - | jq '.result.stored_value.Account.named_keys[] | select(.name == "kvstorage_contract") | .key' \ - | sed -e 's/^"//' -e 's/"$//' -} - -####################################### -# KV-STORAGE: get on-chain contract key value. -# Arguments: -# Contract owner account key. -# State query path. -####################################### -function get_kv_contract_key_value () -{ - local QUERY_KEY=${1} - local QUERY_PATH=${2} - - $(get_path_to_client) query-state \ - --node-address "$(get_node_address_rpc)" \ - --state-root-hash "$(get_state_root_hash)" \ - --key "$QUERY_KEY" \ - --query-path "$QUERY_PATH" \ - | jq '.result.stored_value.CLValue'; -} diff --git a/utils/nctl/sh/contracts-transfers/do_dispatch_native.sh b/utils/nctl/sh/contracts-transfers/do_dispatch_native.sh deleted file mode 100644 index 8aaf33d69a..0000000000 --- a/utils/nctl/sh/contracts-transfers/do_dispatch_native.sh +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Dispatches native transfers to a test net. -# Arguments: -# Transfer amount. -# User ordinal identifier. -# Count of transfers to be dispatched. -# Transfer dispatch interval. -# Node ordinal identifier. -####################################### -function main() -{ - local AMOUNT=${1} - local USER_ID=${2} - local TRANSFERS=${3} - local INTERVAL=${4} - local NODE_ID=${5} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local CP1_SECRET_KEY - local CP1_ACCOUNT_KEY - local CP2_ACCOUNT_KEY - local DISPATCHED - local DISPATCH_NODE_ADDRESS - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - PATH_TO_CLIENT=$(get_path_to_client) - - if [ "$NODE_ID" == "random" ]; then - unset NODE_ADDRESS - elif [ "$NODE_ID" -eq 0 ]; then - NODE_ADDRESS=$(get_node_address_rpc) - else - NODE_ADDRESS=$(get_node_address_rpc "$NODE_ID") - fi - - CP1_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - CP1_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - CP2_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - - log "dispatching $TRANSFERS native transfers" - log "... chain=$CHAIN_NAME" - log "... transfer amount=$AMOUNT" - log "... transfer interval=$INTERVAL (s)" - log "... counter-party 1 public key=$CP1_ACCOUNT_KEY" - log "... counter-party 2 public key=$CP2_ACCOUNT_KEY" - log "... dispatched deploys:" - - DISPATCHED=0 - while [ $DISPATCHED -lt "$TRANSFERS" ]; - do - DISPATCH_NODE_ADDRESS=${NODE_ADDRESS:-$(get_node_address_rpc)} - DEPLOY_HASH=$( - $PATH_TO_CLIENT transfer \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$DISPATCH_NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$CP1_SECRET_KEY" \ - --amount "$AMOUNT" \ - --target-account "$CP2_ACCOUNT_KEY" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - DISPATCHED=$((DISPATCHED + 1)) - log "... #$DISPATCHED :: $DISPATCH_NODE_ADDRESS :: $DEPLOY_HASH" - sleep "$INTERVAL" - done - - log "dispatched $TRANSFERS native transfers" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset INTERVAL -unset NODE_ID -unset TRANSFERS -unset USER_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - interval) INTERVAL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - transfers) TRANSFERS=${VALUE} ;; - user) USER_ID=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-$NCTL_DEFAULT_TRANSFER_AMOUNT}" \ - "${USER_ID:-1}" \ - "${TRANSFERS:-100}" \ - "${INTERVAL:-0.01}" \ - "${NODE_ID:-"random"}" diff --git a/utils/nctl/sh/contracts-transfers/do_dispatch_native_batch.sh b/utils/nctl/sh/contracts-transfers/do_dispatch_native_batch.sh deleted file mode 100644 index 0ead52f08b..0000000000 --- a/utils/nctl/sh/contracts-transfers/do_dispatch_native_batch.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-transfers/utils.sh - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset BATCH_ID -unset INTERVAL -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - batch) BATCH_ID=${VALUE} ;; - interval) INTERVAL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -do_dispatch_batch "${BATCH_ID:-1}" \ - "transfer-native" \ - "${INTERVAL:-0.01}" \ - "${NODE_ID:-"random"}" diff --git a/utils/nctl/sh/contracts-transfers/do_dispatch_wasm.sh b/utils/nctl/sh/contracts-transfers/do_dispatch_wasm.sh deleted file mode 100644 index 94734256d0..0000000000 --- a/utils/nctl/sh/contracts-transfers/do_dispatch_wasm.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Dispatches wasm transfers to a test net. -# Arguments: -# Transfer amount. -# User ordinal identifier. -# Count of transfers to be dispatched. -# Transfer dispatch interval. -# Node ordinal identifier. -####################################### -function main() -{ - local AMOUNT=${1} - local USER_ID=${2} - local TRANSFERS=${3} - local INTERVAL=${4} - local NODE_ID=${5} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local NODE_ADDRESS - local PATH_TO_CLIENT - local CP1_SECRET_KEY - local CP1_ACCOUNT_KEY - local CP2_ACCOUNT_KEY - local DISPATCHED - local DISPATCH_NODE_ADDRESS - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "transfers/transfer_to_account_u512.wasm") - - CP1_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - CP1_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") - CP2_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - CP2_ACCOUNT_HASH=$(get_account_hash "$CP2_ACCOUNT_KEY") - - if [ "$NODE_ID" == "random" ]; then - unset NODE_ADDRESS - elif [ "$NODE_ID" -eq 0 ]; then - NODE_ADDRESS=$(get_node_address_rpc) - else - NODE_ADDRESS=$(get_node_address_rpc "$NODE_ID") - fi - - log "dispatching $TRANSFERS wasm transfers" - log "... chain=$CHAIN_NAME" - log "... transfer amount=$AMOUNT" - log "... transfer contract=$PATH_TO_CONTRACT" - log "... transfer interval=$INTERVAL (s)" - log "... counter-party 1 public key=$CP1_ACCOUNT_KEY" - log "... counter-party 2 public key=$CP2_ACCOUNT_KEY" - log "... counter-party 2 account hash=$CP2_ACCOUNT_HASH" - log "... dispatched deploys:" - - DISPATCHED=0 - while [ $DISPATCHED -lt "$TRANSFERS" ]; - do - DISPATCH_NODE_ADDRESS=${NODE_ADDRESS:-$(get_node_address_rpc)} - DEPLOY_HASH=$( - $PATH_TO_CLIENT put-deploy \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --node-address "$DISPATCH_NODE_ADDRESS" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$CP1_SECRET_KEY" \ - --session-arg "$(get_cl_arg_u512 'amount' "$AMOUNT")" \ - --session-arg "$(get_cl_arg_account_hash 'target' "$CP2_ACCOUNT_HASH")" \ - --session-path "$PATH_TO_CONTRACT" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - DISPATCHED=$((DISPATCHED + 1)) - log "... #$DISPATCHED :: $DISPATCH_NODE_ADDRESS :: $DEPLOY_HASH" - sleep "$INTERVAL" - done - - log "dispatched $TRANSFERS wasm transfers" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset INTERVAL -unset NODE_ID -unset TRANSFERS -unset USER_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - interval) INTERVAL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - transfers) TRANSFERS=${VALUE} ;; - user) USER_ID=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-$NCTL_DEFAULT_TRANSFER_AMOUNT}" \ - "${USER_ID:-1}" \ - "${TRANSFERS:-100}" \ - "${INTERVAL:-0.01}" \ - "${NODE_ID:-"random"}" diff --git a/utils/nctl/sh/contracts-transfers/do_dispatch_wasm_batch.sh b/utils/nctl/sh/contracts-transfers/do_dispatch_wasm_batch.sh deleted file mode 100644 index 7a2b82088b..0000000000 --- a/utils/nctl/sh/contracts-transfers/do_dispatch_wasm_batch.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/contracts-transfers/utils.sh - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset BATCH_ID -unset INTERVAL -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - batch) BATCH_ID=${VALUE} ;; - interval) INTERVAL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -do_dispatch_batch "${BATCH_ID:-1}" \ - "transfer-wasm" \ - "${INTERVAL:-0.01}" \ - "${NODE_ID:-"random"}" diff --git a/utils/nctl/sh/contracts-transfers/do_prepare_native_batch.sh b/utils/nctl/sh/contracts-transfers/do_prepare_native_batch.sh deleted file mode 100644 index ecad44396a..0000000000 --- a/utils/nctl/sh/contracts-transfers/do_prepare_native_batch.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Prepares native transfers for dispatch to a test net. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Transfer dispatch interval. -####################################### -function main() -{ - echo "prepare native not implemented - awaiting core dev feedback" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset BATCH_COUNT -unset BATCH_SIZE - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - count) BATCH_COUNT=${VALUE} ;; - size) BATCH_SIZE=${VALUE} ;; - *) - esac -done - -main "${AMOUNT:-$NCTL_DEFAULT_TRANSFER_AMOUNT}" \ - "${BATCH_COUNT:-5}" \ - "${BATCH_SIZE:-200}" diff --git a/utils/nctl/sh/contracts-transfers/do_prepare_wasm_batch.sh b/utils/nctl/sh/contracts-transfers/do_prepare_wasm_batch.sh deleted file mode 100644 index 754ad16c93..0000000000 --- a/utils/nctl/sh/contracts-transfers/do_prepare_wasm_batch.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Prepares wasm transfers for dispatch to a test net. -# Arguments: -# Transfer amount. -# Count of transfer batches to be dispatched. -# Size of transfer batches to be dispatched. -####################################### -function main() -{ - local AMOUNT=${1} - local BATCH_COUNT=${2} - local BATCH_SIZE=${3} - local CHAIN_NAME - local GAS_PRICE - local GAS_PAYMENT - local PATH_TO_CLIENT - local CP1_SECRET_KEY - local CP2_ACCOUNT_KEY - local CP2_ACCOUNT_HASH - local PATH_TO_OUTPUT - local PATH_TO_OUTPUT_UNSIGNED - local PATH_TO_OUTPUT_SIGNED - - CHAIN_NAME=$(get_chain_name) - GAS_PRICE=${GAS_PRICE:-$NCTL_DEFAULT_GAS_PRICE} - GAS_PAYMENT=${GAS_PAYMENT:-$NCTL_DEFAULT_GAS_PAYMENT} - PATH_TO_CLIENT=$(get_path_to_client) - PATH_TO_CONTRACT=$(get_path_to_contract "transfers/transfer_to_account_u512.wasm") - PATH_TO_NET=$(get_path_to_net) - - CP1_SECRET_KEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") - - if [ -d "$PATH_TO_NET"/deploys/transfer-wasm ]; then - rm -rf "$PATH_TO_NET"/deploys/transfer-wasm - fi - - # Enumerate set of users. - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - CP2_ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - CP2_ACCOUNT_HASH=$(get_account_hash "$CP2_ACCOUNT_KEY") - - # Enumerate set of batches. - for BATCH_ID in $(seq 1 "$BATCH_COUNT") - do - # Set path to output. - PATH_TO_OUTPUT="$PATH_TO_NET"/deploys/transfer-wasm/batch-"$BATCH_ID"/user-"$USER_ID" - mkdir -p "$PATH_TO_OUTPUT" - - # Enumerate set of transfer to prepare. - for TRANSFER_ID in $(seq 1 "$BATCH_SIZE") - do - # Set unsigned deploy. - PATH_TO_OUTPUT_UNSIGNED="$PATH_TO_OUTPUT"/transfer-$TRANSFER_ID-unsigned.json - $PATH_TO_CLIENT make-deploy \ - --output "$PATH_TO_OUTPUT_UNSIGNED" \ - --chain-name "$CHAIN_NAME" \ - --gas-price "$GAS_PRICE" \ - --payment-amount "$GAS_PAYMENT" \ - --ttl "1day" \ - --secret-key "$CP1_SECRET_KEY" \ - --session-arg "$(get_cl_arg_u512 'amount' "$AMOUNT")" \ - --session-arg "$(get_cl_arg_account_hash 'target' "$CP2_ACCOUNT_HASH")" \ - --session-path "$PATH_TO_CONTRACT" > \ - /dev/null 2>&1 - - # Set signed deploy. - PATH_TO_OUTPUT_SIGNED="$PATH_TO_OUTPUT/transfer-$TRANSFER_ID.json" - $PATH_TO_CLIENT sign-deploy \ - --secret-key "$CP1_SECRET_KEY" \ - --input "$PATH_TO_OUTPUT_UNSIGNED" \ - --output "$PATH_TO_OUTPUT_SIGNED" \ - > /dev/null 2>&1 - - # Tidy up. - rm "$PATH_TO_OUTPUT_UNSIGNED" - done - done - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset AMOUNT -unset BATCH_COUNT -unset BATCH_SIZE - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) AMOUNT=${VALUE} ;; - count) BATCH_COUNT=${VALUE} ;; - size) BATCH_SIZE=${VALUE} ;; - *) - esac -done - -AMOUNT=${AMOUNT:-$NCTL_DEFAULT_TRANSFER_AMOUNT} -BATCH_COUNT=${BATCH_COUNT:-5} -BATCH_SIZE=${BATCH_SIZE:-200} - -log "Preparing batch of wasm transfers:" -log "... # of batches: $BATCH_COUNT" -log "... batch size: $BATCH_SIZE" -log "... amount per transfer: $AMOUNT" - -main "$AMOUNT" \ - "$BATCH_COUNT" \ - "$BATCH_SIZE" diff --git a/utils/nctl/sh/contracts-transfers/utils.sh b/utils/nctl/sh/contracts-transfers/utils.sh deleted file mode 100644 index f77b0f6a9e..0000000000 --- a/utils/nctl/sh/contracts-transfers/utils.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Dispatches previously prepared transfers to a test net. -# Arguments: -# Batch ordinal identifier. -# Batch type. -# Transfer dispatch interval. -# Node ordinal identifier. -####################################### -function do_dispatch_batch() -{ - local BATCH_ID=${1} - local BATCH_TYPE=${2} - local INTERVAL=${3} - local NODE_ID=${4} - - local DEPLOY_ID - local DISPATCH_NODE_ADDRESS - local NODE_ADDRESS - local PATH_TO_BATCH - local PATH_TO_CLIENT - local PATH_TO_DEPLOY - - - # Set node address. - if [ "$NODE_ID" == "random" ]; then - unset NODE_ADDRESS - elif [ "$NODE_ID" -eq 0 ]; then - NODE_ADDRESS=$(get_node_address_rpc) - else - NODE_ADDRESS=$(get_node_address_rpc "$NODE_ID") - fi - - # Dispatch deploy batch. - PATH_TO_BATCH=$(get_path_to_net)/deploys/$BATCH_TYPE/batch-$BATCH_ID - if [ ! -d "$PATH_TO_BATCH" ]; then - log "ERROR: no batch exists on file system - have you prepared it ?" - else - DEPLOY_ID=0 - PATH_TO_CLIENT=$(get_path_to_client) - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - for TRANSFER_ID in $(seq 1 100000) - do - PATH_TO_DEPLOY=$PATH_TO_BATCH/user-$USER_ID/transfer-$TRANSFER_ID.json - if [ ! -f "$PATH_TO_DEPLOY" ]; then - break - else - DEPLOY_ID=$((DEPLOY_ID + 1)) - DISPATCH_NODE_ADDRESS=${NODE_ADDRESS:-$(get_node_address_rpc)} - DEPLOY_HASH=$( - $PATH_TO_CLIENT send-deploy \ - --node-address "$DISPATCH_NODE_ADDRESS" \ - --input "$PATH_TO_DEPLOY" \ - | jq '.result.deploy_hash' \ - | sed -e 's/^"//' -e 's/"$//' - ) - log "deploy #$DEPLOY_ID :: batch #$BATCH_ID :: user #$USER_ID :: $DEPLOY_HASH :: $DISPATCH_NODE_ADDRESS" - sleep "$INTERVAL" - fi - done - done - fi -} diff --git a/utils/nctl/sh/misc/await_n_blocks.sh b/utils/nctl/sh/misc/await_n_blocks.sh deleted file mode 100644 index 76420af52a..0000000000 --- a/utils/nctl/sh/misc/await_n_blocks.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -unset OFFSET - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - offset) OFFSET=${VALUE} ;; - *) - esac -done - -OFFSET=${OFFSET:-1} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -source "$NCTL"/sh/utils/main.sh - -await_n_blocks "$OFFSET" true diff --git a/utils/nctl/sh/misc/await_n_eras.sh b/utils/nctl/sh/misc/await_n_eras.sh deleted file mode 100644 index 1a5c23887d..0000000000 --- a/utils/nctl/sh/misc/await_n_eras.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -unset OFFSET - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - offset) OFFSET=${VALUE} ;; - *) - esac -done - -OFFSET=${OFFSET:-1} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -source "$NCTL"/sh/utils/main.sh - -await_n_eras "$OFFSET" true diff --git a/utils/nctl/sh/misc/await_until_block_n.sh b/utils/nctl/sh/misc/await_until_block_n.sh deleted file mode 100644 index 60cd979735..0000000000 --- a/utils/nctl/sh/misc/await_until_block_n.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -unset FUTURE_HEIGHT - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - height) FUTURE_HEIGHT=${VALUE} ;; - *) - esac -done - -FUTURE_HEIGHT=${FUTURE_HEIGHT:-1} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -source "$NCTL"/sh/utils/main.sh - -while [ "$(get_chain_height)" -lt "$FUTURE_HEIGHT" ]; -do - sleep 1.0 -done diff --git a/utils/nctl/sh/misc/await_until_era_n.sh b/utils/nctl/sh/misc/await_until_era_n.sh deleted file mode 100644 index 411a222843..0000000000 --- a/utils/nctl/sh/misc/await_until_era_n.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -unset FUTURE_ERA_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - era) FUTURE_ERA_ID=${VALUE} ;; - *) - esac -done - -FUTURE_ERA_ID=${FUTURE_ERA_ID:-1} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -source "$NCTL"/sh/utils/main.sh - -while [ "$(get_chain_era)" -lt "$FUTURE_ERA_ID" ]; -do - sleep 5.0 -done diff --git a/utils/nctl/sh/misc/rotate_nodeset.sh b/utils/nctl/sh/misc/rotate_nodeset.sh deleted file mode 100644 index a60c7a2a4d..0000000000 --- a/utils/nctl/sh/misc/rotate_nodeset.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -function main() -{ - log "------------------------------------------------------------" - log "Network nodeset rotation begins" - log "------------------------------------------------------------" - - do_display_root_state_hashes - - do_await_genesis_era_to_complete - - do_submit_auction_bids - do_start_newly_bonded_nodes - - do_submit_auction_withdrawals - do_await_unbonding_eras_to_complete - - do_stop_genesis_nodes - - do_display_root_state_hashes - - log "------------------------------------------------------------" - log "Network nodeset rotation complete" - log "------------------------------------------------------------" -} - -function log_step() -{ - local COMMENT=${1} - log "------------------------------------------------------------" - log "STEP $STEP: $COMMENT" - STEP=$((STEP + 1)) -} - -function do_await_genesis_era_to_complete() -{ - log_step "awaiting genesis era to complete" - while [ "$(get_chain_era)" -lt 1 ]; - do - sleep 1.0 - done -} - -function do_await_pre_bonding_eras_to_complete() -{ - log_step "awaiting 4 eras" - await_n_eras 4 true -} - -function do_await_unbonding_eras_to_complete() -{ - log_step "awaiting 15 eras prior to bringing down genesis nodes" - await_n_eras 15 -} - -function do_display_root_state_hashes() -{ - log_step "state root hash at nodes:" - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - render_chain_state_root_hash "$NODE_ID" - done -} - -function do_start_newly_bonded_nodes() -{ - log_step "starting non-genesis nodes:" - for NODE_ID in $(seq $(($(get_count_of_genesis_nodes) + 1)) "$(get_count_of_nodes)") - do - source "$NCTL"/sh/node/start.sh node="$NODE_ID" - log "node-$NODE_ID started" - done - - log_step "awaiting 10 seconds for non-genesis nodes to spin-up & join network" - sleep 10.0 -} - -function do_stop_genesis_nodes() -{ - log_step "stopping genesis nodes" - for NODE_ID in $(seq 1 "$(get_count_of_genesis_nodes)") - do - source "$NCTL"/sh/node/stop.sh node="$NODE_ID" - sleep 1.0 - log "node-$NODE_ID stopped" - done -} - -function do_submit_auction_bids() -{ - log_step "submitting POS auction bids:" - for NODE_ID in $(seq $(($(get_count_of_genesis_nodes) + 1)) "$(get_count_of_nodes)") - do - log "----- ----- ----- ----- ----- -----" - BID_AMOUNT=$(get_node_staking_weight "$NODE_ID") - BID_DELEGATION_RATE=2 - - source "$NCTL"/sh/contracts-auction/do_bid.sh \ - node="$NODE_ID" \ - amount="$BID_AMOUNT" \ - rate="$BID_DELEGATION_RATE" \ - quiet="TRUE" - - log "node-$NODE_ID auction bid submitted -> $BID_AMOUNT CSPR" - done - - log_step "awaiting 10 seconds for auction bid deploys to finalise" - sleep 10.0 -} - -function do_submit_auction_withdrawals() -{ - local WITHDRAWAL_AMOUNT - - log_step "submitting auction withdrawals:" - for NODE_ID in $(seq 1 "$(get_count_of_genesis_nodes)") - do - WITHDRAWAL_AMOUNT=$(get_node_staking_weight "$NODE_ID") - - source "$NCTL"/sh/contracts-auction/do_bid_withdraw.sh \ - node="$NODE_ID" \ - amount="$WITHDRAWAL_AMOUNT" \ - quiet="TRUE" - - log "node-$NODE_ID auction bid withdrawn -> $WITHDRAWAL_AMOUNT CSPR" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -STEP=0 - -main diff --git a/utils/nctl/sh/node/clean.sh b/utils/nctl/sh/node/clean.sh deleted file mode 100644 index bed471195a..0000000000 --- a/utils/nctl/sh/node/clean.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - - -####################################### -# Clean resources of specified nodeset. -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - echo "------------------------------------------------------------------------------------------------------------------------------------" - do_clean "$NODE_ID" - done - echo "------------------------------------------------------------------------------------------------------------------------------------" - else - do_clean "$NODE_ID" - fi -} - -####################################### -# Clean resources of specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function do_clean() -{ - local NODE_ID=${1} - - # Stop node. - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - source "$NCTL"/sh/node/stop.sh node="$NODE_ID" - fi - - # Remove logs. - rm "$(get_path_to_node_logs "$NODE_ID")"/*.log > /dev/null 2>&1 - - # Remove state. - rm "$(get_path_to_node_storage "$NODE_ID")"/*.lmdb* > /dev/null 2>&1 - rm "$(get_path_to_node_storage "$NODE_ID")"-consensus/*.dat > /dev/null 2>&1 - - log "cleaned node-$NODE_ID" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/node/clean_logs.sh b/utils/nctl/sh/node/clean_logs.sh deleted file mode 100644 index df626876a7..0000000000 --- a/utils/nctl/sh/node/clean_logs.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -NODE_ID=${NODE_ID:-"all"} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -if [ "$NODE_ID" = "all" ]; then - rm "$(get_path_to_net)"/nodes/node-*/logs/*.log > /dev/null 2>&1 -else - rm "$(get_path_to_node_logs "$NODE_ID")"/*.log > /dev/null 2>&1 -fi diff --git a/utils/nctl/sh/node/interactive.sh b/utils/nctl/sh/node/interactive.sh deleted file mode 100644 index f6d7c15405..0000000000 --- a/utils/nctl/sh/node/interactive.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Spins up a node in interactive mode. -# Arguments: -# Node ordinal identifier. -# Node software logging level. -####################################### -function main() -{ - local NODE_ID=${1} - local LOG_LEVEL=${2} - local PATH_NODE_BIN - local PATH_NODE_CONFIG - local CASPER_BIN_DIR - local CASPER_CONFIG_DIR - - PATH_NODE_BIN=$(get_path_to_node_bin "$NODE_ID") - PATH_NODE_CONFIG=$(get_path_to_node_config "$NODE_ID") - - # Export so that launcher picks them up. - export RUST_LOG=$LOG_LEVEL - export CASPER_BIN_DIR=$PATH_NODE_BIN - export CASPER_CONFIG_DIR=$PATH_NODE_CONFIG - - "$PATH_NODE_BIN"/casper-node-launcher -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset LOG_LEVEL -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - loglevel) LOG_LEVEL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -LOG_LEVEL=${LOG_LEVEL:-$RUST_LOG} -LOG_LEVEL=${LOG_LEVEL:-debug} -NODE_ID=${NODE_ID:-1} - -main "$NODE_ID" "$LOG_LEVEL" diff --git a/utils/nctl/sh/node/join.sh b/utils/nctl/sh/node/join.sh deleted file mode 100644 index 644f783360..0000000000 --- a/utils/nctl/sh/node/join.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -unset NODE_ID -unset BID_AMOUNT -unset BID_DELEGATION_RATE - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) BID_AMOUNT=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - rate) BID_DELEGATION_RATE=${VALUE} ;; - *) - esac -done - -NODE_ID=${NODE_ID:-6} -BID_AMOUNT=${BID_AMOUNT:-$(get_node_staking_weight "$NODE_ID")} -BID_DELEGATION_RATE=${BID_DELEGATION_RATE:-2} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Await genesis era to complete. -if [ "$(get_chain_era)" -eq 0 ]; then - log "awaiting genesis era to complete" - while [ "$(get_chain_era)" -lt 1 ]; - do - sleep 1.0 - done -fi - -# Submit auction bid. -log "dispatching auction bid deploy" -source "$NCTL"/sh/contracts-auction/do_bid.sh \ - node="$NODE_ID" \ - amount="$BID_AMOUNT" \ - rate="$BID_DELEGATION_RATE" - -# Await until time to start node. -log "awaiting 3 eras + 1 block" -await_n_eras 3 true -await_n_blocks 1 false - -# Start/Restart node (with trusted hash). -if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - log "restarting node :: node-$NODE_ID" - do_node_restart "$NODE_ID" "$(get_chain_latest_block_hash)" -else - log "starting node :: node-$NODE_ID" - do_node_start "$NODE_ID" "$(get_chain_latest_block_hash)" -fi diff --git a/utils/nctl/sh/node/leave.sh b/utils/nctl/sh/node/leave.sh deleted file mode 100644 index f8fc2644bf..0000000000 --- a/utils/nctl/sh/node/leave.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -unset NODE_ID -unset WITHDRAWAL_AMOUNT - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - amount) WITHDRAWAL_AMOUNT=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -NODE_ID=${NODE_ID:-1} -WITHDRAWAL_AMOUNT=${WITHDRAWAL_AMOUNT:-$(get_node_staking_weight "$NODE_ID")} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Submit auction withdrawal. -log "dispatching auction withdrawal deploy" -source "$NCTL"/sh/contracts-auction/do_bid_withdraw.sh \ - node="$NODE_ID" \ - amount="$WITHDRAWAL_AMOUNT" - -# If node is up then await & stop. -if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - log "awaiting 4 eras" - await_n_eras 4 true - log "node-$NODE_ID: stopping node ... " - do_node_stop "$NODE_ID" -fi diff --git a/utils/nctl/sh/node/restart.sh b/utils/nctl/sh/node/restart.sh deleted file mode 100644 index 915e5d103d..0000000000 --- a/utils/nctl/sh/node/restart.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -unset CLEAN -unset NODE_ID -unset TRUSTED_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - hash) TRUSTED_HASH=${VALUE} ;; - clean) CLEAN=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -CLEAN=${CLEAN:-true} -NODE_ID=${NODE_ID:-"all"} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Stop. -if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - source "$NCTL"/sh/node/stop.sh node="$NODE_ID" -fi - -# Clean. -if [ "$CLEAN" = true ]; then - log "node-$NODE_ID: clearing logs" - rm "$(get_path_to_node_logs "$NODE_ID")"/*.log > /dev/null 2>&1 - log "node-$NODE_ID: clearing storage" - rm "$(get_path_to_node_storage "$NODE_ID")"/*.lmdb* > /dev/null 2>&1 - rm "$(get_path_to_node_storage "$NODE_ID")"-consensus/*.dat > /dev/null 2>&1 -fi - -# Start. -source "$NCTL"/sh/node/start.sh node="$NODE_ID" hash="$TRUSTED_HASH" diff --git a/utils/nctl/sh/node/start.sh b/utils/nctl/sh/node/start.sh deleted file mode 100644 index abdad1322d..0000000000 --- a/utils/nctl/sh/node/start.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -unset LOG_LEVEL -unset NODE_ID -unset TRUSTED_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - hash) TRUSTED_HASH=${VALUE} ;; - loglevel) LOG_LEVEL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -LOG_LEVEL=${LOG_LEVEL:-$RUST_LOG} -LOG_LEVEL=${LOG_LEVEL:-debug} -export RUST_LOG=$LOG_LEVEL -NODE_ID=${NODE_ID:-"all"} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Start node(s). -if [ "$NODE_ID" == "all" ]; then - log "starting node(s) begins ... please wait" - do_node_start_all "$TRUSTED_HASH" - log "starting node(s) complete" -else - log "node-$NODE_ID: starting ..." - do_node_start "$NODE_ID" "$TRUSTED_HASH" -fi - -# Display status. -sleep 1.0 -source "$NCTL"/sh/node/status.sh node="$NODE_ID" diff --git a/utils/nctl/sh/node/start_after_n_blocks.sh b/utils/nctl/sh/node/start_after_n_blocks.sh deleted file mode 100644 index d829977c58..0000000000 --- a/utils/nctl/sh/node/start_after_n_blocks.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -unset OFFSET -unset LOG_LEVEL -unset NODE_ID -unset TRUSTED_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - hash) TRUSTED_HASH=${VALUE} ;; - loglevel) LOG_LEVEL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - offset) OFFSET=${VALUE} ;; - *) - esac -done - -OFFSET=${OFFSET:-1} -NODE_ID=${NODE_ID:-6} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Await until N blocks have been added to linear block chain. -await_n_blocks "$OFFSET" true - -# Start node. -source "$NCTL"/sh/node/start.sh \ - hash="$TRUSTED_HASH" \ - node="$NODE_ID" \ - loglevel="$LOG_LEVEL" diff --git a/utils/nctl/sh/node/start_after_n_eras.sh b/utils/nctl/sh/node/start_after_n_eras.sh deleted file mode 100644 index 47b1ea3556..0000000000 --- a/utils/nctl/sh/node/start_after_n_eras.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -unset OFFSET -unset LOG_LEVEL -unset NODE_ID -unset TRUSTED_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - hash) TRUSTED_HASH=${VALUE} ;; - loglevel) LOG_LEVEL=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - offset) OFFSET=${VALUE} ;; - *) - esac -done - -OFFSET=${OFFSET:-1} -NODE_ID=${NODE_ID:-6} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Await until N era have passed. -await_n_eras "$OFFSET" true - -# Start node. -source "$NCTL"/sh/node/start.sh \ - hash="$TRUSTED_HASH" \ - node="$NODE_ID" \ - loglevel="$LOG_LEVEL" diff --git a/utils/nctl/sh/node/status.sh b/utils/nctl/sh/node/status.sh deleted file mode 100644 index 45558a7f59..0000000000 --- a/utils/nctl/sh/node/status.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -NODE_ID=${NODE_ID:-"all"} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -if [ "$NODE_ID" == "all" ]; then - do_node_status_all -else - do_node_status "$NODE_ID" -fi diff --git a/utils/nctl/sh/node/stop.sh b/utils/nctl/sh/node/stop.sh deleted file mode 100644 index 4b933846b2..0000000000 --- a/utils/nctl/sh/node/stop.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -# ---------------------------------------------------------------- -# ARGS -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -NODE_ID=${NODE_ID:-"all"} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -if [ "$NODE_ID" == "all" ]; then - do_node_stop_all - do_node_status_all -else - log "node-$NODE_ID: stopping node ... " - do_node_stop "$NODE_ID" - sleep 1.0 - source "$NCTL"/sh/node/status.sh node="$NODE_ID" -fi diff --git a/utils/nctl/sh/node/svc_supervisord.sh b/utils/nctl/sh/node/svc_supervisord.sh deleted file mode 100644 index df54dc6c25..0000000000 --- a/utils/nctl/sh/node/svc_supervisord.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Spins up a node using supervisord. -# Arguments: -# Node ordinal identifier. -# A trusted hash to streamline joining. -####################################### -function do_node_start() -{ - local NODE_ID=${1} - local TRUSTED_HASH=${2} - - local PATH_TO_NODE_CONFIG - local PROCESS_NAME - - if [ ! -e "$(get_path_net_supervisord_sock)" ]; then - _do_supervisord_start - fi - - if [ -n "$TRUSTED_HASH" ]; then - PATH_TO_NODE_CONFIG=$(get_path_to_net)/nodes/node-"$NODE_ID"/config/1_0_0/config.toml - _update_node_config_on_start "$PATH_TO_NODE_CONFIG" "$TRUSTED_HASH" - fi - - PROCESS_NAME=$(get_process_name_of_node_in_group "$NODE_ID") - supervisorctl -c "$(get_path_net_supervisord_cfg)" start "$PROCESS_NAME" > /dev/null 2>&1 -} - -####################################### -# Spins up all nodes using supervisord. -# Arguments: -# Network ordinal identifier. -# Count of nodes within network. -# Count of bootstraps within network. -####################################### -function do_node_start_all() -{ - if [ ! -e "$(get_path_net_supervisord_sock)" ]; then - _do_supervisord_start - fi - - log "... starting nodes: genesis bootstraps" - supervisorctl -c "$(get_path_net_supervisord_cfg)" start "$NCTL_PROCESS_GROUP_1":* > /dev/null 2>&1 - sleep 1.0 - - log "... starting nodes: genesis non-bootstraps" - supervisorctl -c "$(get_path_net_supervisord_cfg)" start "$NCTL_PROCESS_GROUP_2":* > /dev/null 2>&1 - sleep 1.0 -} - -####################################### -# Renders to stdout status of a node running under supervisord. -# Arguments: -# Node ordinal identifier. -####################################### -function do_node_status() -{ - local NODE_ID=${1} - local NODE_PROCESS_NAME - - if [ -e "$(get_path_net_supervisord_sock)" ]; then - NODE_PROCESS_NAME=$(get_process_name_of_node_in_group "$NODE_ID") - # True is necessary due to supervisorctl exiting 3 - supervisorctl -c "$(get_path_net_supervisord_cfg)" status "$NODE_PROCESS_NAME" || true - fi -} - -####################################### -# Renders to stdout status of all nodes running under supervisord. -# Arguments: -# Network ordinal identifier. -####################################### -function do_node_status_all() -{ - if [ -e "$(get_path_net_supervisord_sock)" ]; then - # True is necessary due to supervisorctl exiting 3 - supervisorctl -c "$(get_path_net_supervisord_cfg)" status all || true - fi -} - -####################################### -# Stops a node running via supervisord. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -####################################### -function do_node_stop() -{ - local NODE_ID=${1} - local NODE_PROCESS_NAME - - if [ -e "$(get_path_net_supervisord_sock)" ]; then - NODE_PROCESS_NAME=$(get_process_name_of_node_in_group "$NODE_ID") - supervisorctl -c "$(get_path_net_supervisord_cfg)" stop "$NODE_PROCESS_NAME" > /dev/null 2>&1 - fi -} - -####################################### -# Stops all nodes running via supervisord. -####################################### -function do_node_stop_all() -{ - if [ -e "$(get_path_net_supervisord_sock)" ]; then - log "... stopping supervisord" - supervisorctl -c "$(get_path_net_supervisord_cfg)" shutdown > /dev/null 2>&1 - sleep 2.0 - fi -} - -####################################### -# Starts supervisord (if necessary). -# Arguments: -# Network ordinal identifier. -####################################### -function _do_supervisord_start() -{ - if [ ! -e "$(get_path_net_supervisord_sock)" ]; then - log "... starting supervisord" - supervisord -c "$(get_path_net_supervisord_cfg)" - sleep 2.0 - fi -} - -####################################### -# Sets entry in node's config file. -# Arguments: -# Node ordinal identifier. -# A trused block hash from which to build chain state. -####################################### -function _get_node_pid() -{ - local NODE_ID=${1} - local NODE_PROCESS_NAME - - if [ -e "$(get_path_net_supervisord_sock)" ]; then - NODE_PROCESS_NAME=$(get_process_name_of_node_in_group "$NODE_ID") - echo $(supervisorctl -c "$(get_path_net_supervisord_cfg)" pid "$NODE_PROCESS_NAME") - else - echo "0" - fi -} - -####################################### -# Sets entry in node's config file. -# Arguments: -# Node ordinal identifier. -# A trused block hash from which to build chain state. -####################################### -function _update_node_config_on_start() -{ - local FILEPATH=${1} - local TRUSTED_HASH=${2} - local SCRIPT - - log "trusted hash = $TRUSTED_HASH" - - SCRIPT=( - "import toml;" - "cfg=toml.load('$FILEPATH');" - "cfg['node']['trusted_hash']='$TRUSTED_HASH';" - "toml.dump(cfg, open('$FILEPATH', 'w'));" - ) - python3 -c "${SCRIPT[*]}" -} diff --git a/utils/nctl/sh/node/upgrade.sh b/utils/nctl/sh/node/upgrade.sh deleted file mode 100644 index edd4ce438c..0000000000 --- a/utils/nctl/sh/node/upgrade.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh -source "$NCTL"/sh/assets/upgrade.sh - -unset LOG_LEVEL -unset NODE_ID -unset PROTOCOL_VERSION -unset ACTIVATE_ERA - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - version) PROTOCOL_VERSION=${VALUE} ;; - era) ACTIVATE_ERA=${VALUE} ;; - loglevel) LOG_LEVEL=${VALUE} ;; - *) echo "Unknown argument '${KEY}'. Use 'version', 'era' or 'loglevel'." && exit 1 ;; - esac -done - -LOG_LEVEL=${LOG_LEVEL:-$RUST_LOG} -LOG_LEVEL=${LOG_LEVEL:-debug} -export RUST_LOG=$LOG_LEVEL - -function do_upgrade() { - local PROTOCOL_VERSION=${1} - local ACTIVATE_ERA=${2} - local NODE_COUNT=${3:-5} - - for NODE_ID in $(seq 1 $((NODE_COUNT * 2))); do - _upgrade_node "$PROTOCOL_VERSION" "$ACTIVATE_ERA" "$NODE_ID" - done -} - -####################################### -# Upgrades all nodes in the network to a specified protocol version. -# Does not modify the chainspec file in any way that has an influence on the network, -# except for setting required entries for the upgrade to take place. -####################################### - -# -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -# Upgrade node(s). -log "upgrade node(s) begins ... please wait" -do_upgrade "$PROTOCOL_VERSION" "$ACTIVATE_ERA" -log "upgrade node(s) complete" diff --git a/utils/nctl/sh/scenarios/accounts_toml/bond_its.accounts.toml b/utils/nctl/sh/scenarios/accounts_toml/bond_its.accounts.toml deleted file mode 100644 index 02a84d8fe9..0000000000 --- a/utils/nctl/sh/scenarios/accounts_toml/bond_its.accounts.toml +++ /dev/null @@ -1,134 +0,0 @@ -# FAUCET. -[[accounts]] -public_key = "PBK_FAUCET" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 1. -[[accounts]] -public_key = "PBK_V1" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 1 - -# VALIDATOR 2. -[[accounts]] -public_key = "PBK_V2" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 2 - -# VALIDATOR 3. -[[accounts]] -public_key = "PBK_V3" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 3 - -# VALIDATOR 4. -[[accounts]] -public_key = "PBK_V4" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 4 - -# VALIDATOR 5. -[[accounts]] -public_key = "PBK_V5" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 5 - -# VALIDATOR 6. -[[accounts]] -public_key = "PBK_V6" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 7. -[[accounts]] -public_key = "PBK_V7" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 8. -[[accounts]] -public_key = "PBK_V8" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 9. -[[accounts]] -public_key = "PBK_V9" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 10. -[[accounts]] -public_key = "PBK_V10" -balance = "1000000000000000000000000000000000" - -# USER 1. -[[delegators]] -validator_public_key = "PBK_V1" -delegator_public_key = "PBK_U1" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 2. -[[delegators]] -validator_public_key = "PBK_V2" -delegator_public_key = "PBK_U2" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 3. -[[delegators]] -validator_public_key = "PBK_V3" -delegator_public_key = "PBK_U3" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 4. -[[delegators]] -validator_public_key = "PBK_V4" -delegator_public_key = "PBK_U4" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 5. -[[delegators]] -validator_public_key = "PBK_V5" -delegator_public_key = "PBK_U5" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 6. -[[accounts]] -public_key = "PBK_U6" -balance = "1000000000000000000000000000000000" - -# USER 7. -[[accounts]] -public_key = "PBK_U7" -balance = "1000000000000000000000000000000000" - -# USER 8. -[[accounts]] -public_key = "PBK_U8" -balance = "1000000000000000000000000000000000" - -# USER 9. -[[accounts]] -public_key = "PBK_U9" -balance = "1000000000000000000000000000000000" - -# USER 10. -[[accounts]] -public_key = "PBK_U10" -balance = "1000000000000000000000000000000000" diff --git a/utils/nctl/sh/scenarios/accounts_toml/itst14.accounts.toml b/utils/nctl/sh/scenarios/accounts_toml/itst14.accounts.toml deleted file mode 100644 index de49ed1a7f..0000000000 --- a/utils/nctl/sh/scenarios/accounts_toml/itst14.accounts.toml +++ /dev/null @@ -1,134 +0,0 @@ -# FAUCET. -[[accounts]] -public_key = "PBK_FAUCET" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 1. -[[accounts]] -public_key = "PBK_V1" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 1 - -# VALIDATOR 2. -[[accounts]] -public_key = "PBK_V2" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 2 - -# VALIDATOR 3. -[[accounts]] -public_key = "PBK_V3" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 3 - -# VALIDATOR 4. -[[accounts]] -public_key = "PBK_V4" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1" -delegation_rate = 4 - -# VALIDATOR 5. -[[accounts]] -public_key = "PBK_V5" -balance = "1000000000000000000000000000000000" - -[accounts.validator] -bonded_amount = "1000" -delegation_rate = 5 - -# VALIDATOR 6. -[[accounts]] -public_key = "PBK_V6" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 7. -[[accounts]] -public_key = "PBK_V7" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 8. -[[accounts]] -public_key = "PBK_V8" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 9. -[[accounts]] -public_key = "PBK_V9" -balance = "1000000000000000000000000000000000" - -# VALIDATOR 10. -[[accounts]] -public_key = "PBK_V10" -balance = "1000000000000000000000000000000000" - -# USER 1. -[[delegators]] -validator_public_key = "PBK_V1" -delegator_public_key = "PBK_U1" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 2. -[[delegators]] -validator_public_key = "PBK_V2" -delegator_public_key = "PBK_U2" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 3. -[[delegators]] -validator_public_key = "PBK_V3" -delegator_public_key = "PBK_U3" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 4. -[[delegators]] -validator_public_key = "PBK_V4" -delegator_public_key = "PBK_U4" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 5. -[[delegators]] -validator_public_key = "PBK_V5" -delegator_public_key = "PBK_U5" -balance = "1000000000000000000000000000000000" -delegated_amount = "1" - -# USER 6. -[[accounts]] -public_key = "PBK_U6" -balance = "1000000000000000000000000000000000" - -# USER 7. -[[accounts]] -public_key = "PBK_U7" -balance = "1000000000000000000000000000000000" - -# USER 8. -[[accounts]] -public_key = "PBK_U8" -balance = "1000000000000000000000000000000000" - -# USER 9. -[[accounts]] -public_key = "PBK_U9" -balance = "1000000000000000000000000000000000" - -# USER 10. -[[accounts]] -public_key = "PBK_U10" -balance = "1000000000000000000000000000000000" diff --git a/utils/nctl/sh/scenarios/bond_its.sh b/utils/nctl/sh/scenarios/bond_its.sh deleted file mode 100644 index 7ac2e40110..0000000000 --- a/utils/nctl/sh/scenarios/bond_its.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -function main() { - log "------------------------------------------------------------" - log "Starting Scenario: Bonding test" - log "------------------------------------------------------------" - - # 0. Wait for network to start up - do_await_genesis_era_to_complete - # 1. Allow the chain to progress - do_await_era_change 1 - # 2. Verify all nodes are in sync - check_network_sync - # 3. Submit bid for node 6 - do_submit_auction_bids "6" - do_read_lfb_hash "5" - do_start_node "6" "$LFB_HASH" - # 4. wait auction_delay + 1 - do_await_era_change "4" - # 5. Assert that the validator is bonded in. - assert_new_bonded_validator "6" - log "The new node has bonded in." - # 6. Assert that the new bonded validator is producing blocks. - assert_node_proposed "6" "180" - - - log "------------------------------------------------------------" - log "Scenario bonding complete" - log "------------------------------------------------------------" - -} - -function do_submit_auction_bids() -{ - local NODE_ID=${1} - log_step "submitting POS auction bids:" - log "----- ----- ----- ----- ----- -----" - BID_AMOUNT="1000000000000000000000000000000" - BID_DELEGATION_RATE=6 - - source "$NCTL"/sh/contracts-auction/do_bid.sh \ - node="$NODE_ID" \ - amount="$BID_AMOUNT" \ - rate="$BID_DELEGATION_RATE" \ - quiet="TRUE" - - log "node-$NODE_ID auction bid submitted -> $BID_AMOUNT CSPR" - - log_step "awaiting 10 seconds for auction bid deploys to finalise" - sleep 10.0 -} - - -function assert_new_bonded_validator() { - local NODE_ID=${1} - local HEX=$(get_node_public_key_hex "$NODE_ID") - if ! $(nctl-view-chain-auction-info | grep -q "$HEX"); then - echo "Could not find key in bids" - exit 1 - fi -} - -function assert_node_proposed() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node "$NODE_ID") - local PUBLIC_KEY_HEX=$(get_node_public_key_hex "$NODE_ID") - local TIMEOUT=${2:-300} - log_step "Waiting for a node-$NODE_ID to produce a block..." - local OUTPUT=$(timeout "$TIMEOUT" tail -n 1 -f "$NODE_PATH/logs/stdout.log" | grep -o -m 1 "proposer: PublicKey::Ed25519($PUBLIC_KEY_HEX)") - if ( echo "$OUTPUT" | grep -q "proposer: PublicKey::Ed25519($PUBLIC_KEY_HEX)" ); then - log "Node-$NODE_ID created a block!" - log "$OUTPUT" - else - log "ERROR: Node-$NODE_ID didn't create a block within timeout=$TIMEOUT" - exit 1 - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -STEP=0 - -main diff --git a/utils/nctl/sh/scenarios/chainspecs/bond_its.chainspec.toml.in b/utils/nctl/sh/scenarios/chainspecs/bond_its.chainspec.toml.in deleted file mode 100644 index d0d4c1bd68..0000000000 --- a/utils/nctl/sh/scenarios/chainspecs/bond_its.chainspec.toml.in +++ /dev/null @@ -1,212 +0,0 @@ -[protocol] -# Protocol version. -version = '1.0.0' -# Whether we need to clear latest blocks back to the switch block just before the activation point or not. -hard_reset = false -# This protocol version becomes active at this point. -# -# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By -# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up -# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used -# in contract-runtime for computing genesis post-state hash. -# -# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. -activation_point = '${TIMESTAMP}' - -[network] -# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by -# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis -# post-state hash. -name = 'casper-example' -# The maximum size of an acceptable networking message in bytes. Any message larger than this will -# be rejected at the networking level. -maximum_net_message_size = 23_068_672 - -[core] -# Era duration. -era_duration = '41seconds' -# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the -# minimum height. -minimum_era_height = 10 -# Number of slots available in validator auction. -validator_slots = 5 -# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, -# you will be a validator in era N + auction_delay + 1. -auction_delay = 3 -# The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' -# Default number of eras that need to pass to be able to withdraw unbonded funds. -unbonding_delay = 14 -# Round seigniorage rate represented as a fraction of the total supply. -# -# Annual issuance: 2% -# Minimum round exponent: 12 -# Ticks per year: 31536000000 -# -# (1+0.02)^((2^12)/31536000000)-1 is expressed as a fractional number below. -round_seigniorage_rate = [15_959, 6_204_824_582_392] - -[highway] -# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. -# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as -# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize -# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. -finality_threshold_fraction = [1, 3] -# Integer between 0 and 255. The power of two that is the number of milliseconds in the minimum round length, and -# therefore the minimum delay between a block and its child. E.g. 14 means 2^14 milliseconds, i.e. about 16 seconds. -minimum_round_exponent = 12 -# Integer between 0 and 255. Must be greater than `minimum_round_exponent`. The power of two that is the number of -# milliseconds in the maximum round length, and therefore the maximum delay between a block and its child. E.g. 19 -# means 2^19 milliseconds, i.e. about 8.7 minutes. -maximum_round_exponent = 14 -# The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. -# Expressed as a fraction (1/5 by default). -reduced_reward_multiplier = [1, 5] - -[deploys] -# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. -max_payment_cost = '0' -# The duration after the deploy timestamp that it can be included in a block. -max_ttl = '1day' -# The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). -max_dependencies = 10 -# Maximum block size in bytes including deploys contained by the block. 0 means unlimited. -max_block_size = 10_485_760 -# The maximum number of non-transfer deploys permitted in a single block. -block_max_deploy_count = 100 -# The maximum number of wasm-less transfer deploys permitted in a single block. -block_max_transfer_count = 1 -# The upper limit of total gas of all deploys in a block. -block_gas_limit = 10_000_000_000_000 -# The limit of length of serialized payment code arguments. -payment_args_max_length = 1024 -# The limit of length of serialized session code arguments. -session_args_max_length = 1024 -# The minimum amount in motes for a valid native transfer. -native_transfer_minimum_motes = 2_500_000_000 - -[wasm] -# Amount of free memory (in 64kB pages) each contract can use for stack. -max_memory = 64 -# Max stack height (native WebAssembly stack limiter). -max_stack_height = 65_536 - -[wasm.storage_costs] -# Gas charged per byte stored in the global state. -gas_per_byte = 630_000 - -[wasm.opcode_costs] -# Bit operations multiplier. -bit = 300 -# Arithmetic add operations multiplier. -add = 210 -# Mul operations multiplier. -mul = 240 -# Div operations multiplier. -div = 320 -# Memory load operation multiplier. -load = 2_500 -# Memory store operation multiplier. -store = 4_700 -# Const store operation multiplier. -const = 110 -# Local operations multiplier. -local = 390 -# Global operations multiplier. -global = 390 -# Control flow operations multiplier. -control_flow = 440 -# Integer operations multiplier. -integer_comparison = 250 -# Conversion operations multiplier. -conversion = 420 -# Unreachable operation multiplier. -unreachable = 270 -# Nop operation multiplier. -nop = 200 -# Get current memory operation multiplier. -current_memory = 290 -# Grow memory cost, per page (64kb). -grow_memory = 240_000 -# Regular opcode cost. -regular = 210 - -# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs -[wasm.host_function_costs] -add = { cost = 5_800, arguments = [0, 0, 0, 0] } -add_associated_key = { cost = 9_000, arguments = [0, 0, 0] } -add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } -blake2b = { cost = 200, arguments = [0, 0, 0, 0] } -call_contract = { cost = 4_500, arguments = [0, 0, 0, 0, 0, 420, 0] } -call_versioned_contract = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } -create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -create_purse = { cost = 170_000, arguments = [0, 0] } -disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } -get_balance = { cost = 3_800, arguments = [0, 0, 0] } -get_blocktime = { cost = 330, arguments = [0] } -get_caller = { cost = 380, arguments = [0] } -get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } -get_main_purse = { cost = 1_300, arguments = [0] } -get_named_arg = { cost = 200, arguments = [0, 0, 0, 0] } -get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } -get_phase = { cost = 710, arguments = [0] } -get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } -has_key = { cost = 1_500, arguments = [0, 840] } -is_valid_uref = { cost = 760, arguments = [0, 0] } -load_named_keys = { cost = 42_000, arguments = [0, 0] } -new_uref = { cost = 17_000, arguments = [0, 0, 590] } -print = { cost = 20_000, arguments = [0, 4_600] } -provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } -put_key = { cost = 38_000, arguments = [0, 1_100, 0, 0] } -read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } -read_value = { cost = 6_000, arguments = [0, 0, 0] } -read_value_local = { cost = 5_500, arguments = [0, 590, 0] } -remove_associated_key = { cost = 4_200, arguments = [0, 0] } -remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } -remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 0] } -remove_key = { cost = 61_000, arguments = [0, 3_200] } -ret = { cost = 23_000, arguments = [0, 420_000] } -revert = { cost = 500, arguments = [0] } -set_action_threshold = { cost = 74_000, arguments = [0, 0] } -transfer_from_purse_to_account = { cost = 160_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -transfer_from_purse_to_purse = { cost = 82_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -transfer_to_account = { cost = 24_000, arguments = [0, 0, 0, 0, 0, 0, 0] } -update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } -write = { cost = 14_000, arguments = [0, 0, 0, 980] } -write_local = { cost = 9_500, arguments = [0, 1_800, 0, 520] } - -[system_costs] -wasmless_transfer_cost = 10_000 - -[system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 - -[system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 - -[system_costs.handle_payment_costs] -get_payment_purse = 10_000 -set_refund_purse = 10_000 -get_refund_purse = 10_000 -finalize_payment = 10_000 - -[system_costs.standard_payment_costs] -pay = 10_000 diff --git a/utils/nctl/sh/scenarios/chainspecs/itst13.chainspec.toml.in b/utils/nctl/sh/scenarios/chainspecs/itst13.chainspec.toml.in deleted file mode 100644 index 795c3a80d0..0000000000 --- a/utils/nctl/sh/scenarios/chainspecs/itst13.chainspec.toml.in +++ /dev/null @@ -1,212 +0,0 @@ -[protocol] -# Protocol version. -version = '1.0.0' -# Whether we need to clear latest blocks back to the switch block just before the activation point or not. -hard_reset = false -# This protocol version becomes active at this point. -# -# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By -# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up -# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used -# in contract-runtime for computing genesis post-state hash. -# -# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. -activation_point = '${TIMESTAMP}' - -[network] -# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by -# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis -# post-state hash. -name = 'casper-example' -# The maximum size of an acceptable networking message in bytes. Any message larger than this will -# be rejected at the networking level. -maximum_net_message_size = 23_068_672 - -[core] -# Era duration. -era_duration = '41seconds' -# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the -# minimum height. -minimum_era_height = 10 -# Number of slots available in validator auction. -validator_slots = 5 -# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, -# you will be a validator in era N + auction_delay + 1. -auction_delay = 3 -# The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' -# Default number of eras that need to pass to be able to withdraw unbonded funds. -unbonding_delay = 14 -# Round seigniorage rate represented as a fraction of the total supply. -# -# Annual issuance: 2% -# Minimum round exponent: 12 -# Ticks per year: 31536000000 -# -# (1+0.02)^((2^12)/31536000000)-1 is expressed as a fractional number below. -round_seigniorage_rate = [15_959, 6_204_824_582_392] - -[highway] -# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. -# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as -# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize -# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. -finality_threshold_fraction = [1, 3] -# Integer between 0 and 255. The power of two that is the number of milliseconds in the minimum round length, and -# therefore the minimum delay between a block and its child. E.g. 14 means 2^14 milliseconds, i.e. about 16 seconds. -minimum_round_exponent = 12 -# Integer between 0 and 255. Must be greater than `minimum_round_exponent`. The power of two that is the number of -# milliseconds in the maximum round length, and therefore the maximum delay between a block and its child. E.g. 19 -# means 2^19 milliseconds, i.e. about 8.7 minutes. -maximum_round_exponent = 14 -# The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. -# Expressed as a fraction (1/5 by default). -reduced_reward_multiplier = [1, 5] - -[deploys] -# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. -max_payment_cost = '0' -# The duration after the deploy timestamp that it can be included in a block. -max_ttl = '1day' -# The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). -max_dependencies = 10 -# Maximum block size in bytes including deploys contained by the block. 0 means unlimited. -max_block_size = 10_485_760 -# The maximum number of non-transfer deploys permitted in a single block. -block_max_deploy_count = 100 -# The maximum number of wasm-less transfer deploys permitted in a single block. -block_max_transfer_count = 1000 -# The upper limit of total gas of all deploys in a block. -block_gas_limit = 10_000_000_000_000 -# The limit of length of serialized payment code arguments. -payment_args_max_length = 1024 -# The limit of length of serialized session code arguments. -session_args_max_length = 1024 -# The minimum amount in motes for a valid native transfer. -native_transfer_minimum_motes = 2_500_000_000 - -[wasm] -# Amount of free memory (in 64kB pages) each contract can use for stack. -max_memory = 64 -# Max stack height (native WebAssembly stack limiter). -max_stack_height = 65_536 - -[wasm.storage_costs] -# Gas charged per byte stored in the global state. -gas_per_byte = 630_000 - -[wasm.opcode_costs] -# Bit operations multiplier. -bit = 300 -# Arithmetic add operations multiplier. -add = 210 -# Mul operations multiplier. -mul = 240 -# Div operations multiplier. -div = 320 -# Memory load operation multiplier. -load = 2_500 -# Memory store operation multiplier. -store = 4_700 -# Const store operation multiplier. -const = 110 -# Local operations multiplier. -local = 390 -# Global operations multiplier. -global = 390 -# Control flow operations multiplier. -control_flow = 440 -# Integer operations multiplier. -integer_comparison = 250 -# Conversion operations multiplier. -conversion = 420 -# Unreachable operation multiplier. -unreachable = 270 -# Nop operation multiplier. -nop = 200 -# Get current memory operation multiplier. -current_memory = 290 -# Grow memory cost, per page (64kb). -grow_memory = 240_000 -# Regular opcode cost. -regular = 210 - -# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs -[wasm.host_function_costs] -add = { cost = 5_800, arguments = [0, 0, 0, 0] } -add_associated_key = { cost = 9_000, arguments = [0, 0, 0] } -add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } -blake2b = { cost = 200, arguments = [0, 0, 0, 0] } -call_contract = { cost = 4_500, arguments = [0, 0, 0, 0, 0, 420, 0] } -call_versioned_contract = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } -create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -create_purse = { cost = 170_000, arguments = [0, 0] } -disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } -get_balance = { cost = 3_800, arguments = [0, 0, 0] } -get_blocktime = { cost = 330, arguments = [0] } -get_caller = { cost = 380, arguments = [0] } -get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } -get_main_purse = { cost = 1_300, arguments = [0] } -get_named_arg = { cost = 200, arguments = [0, 0, 0, 0] } -get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } -get_phase = { cost = 710, arguments = [0] } -get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } -has_key = { cost = 1_500, arguments = [0, 840] } -is_valid_uref = { cost = 760, arguments = [0, 0] } -load_named_keys = { cost = 42_000, arguments = [0, 0] } -new_uref = { cost = 17_000, arguments = [0, 0, 590] } -print = { cost = 20_000, arguments = [0, 4_600] } -provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } -put_key = { cost = 38_000, arguments = [0, 1_100, 0, 0] } -read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } -read_value = { cost = 6_000, arguments = [0, 0, 0] } -read_value_local = { cost = 5_500, arguments = [0, 590, 0] } -remove_associated_key = { cost = 4_200, arguments = [0, 0] } -remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } -remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 0] } -remove_key = { cost = 61_000, arguments = [0, 3_200] } -ret = { cost = 23_000, arguments = [0, 420_000] } -revert = { cost = 500, arguments = [0] } -set_action_threshold = { cost = 74_000, arguments = [0, 0] } -transfer_from_purse_to_account = { cost = 160_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -transfer_from_purse_to_purse = { cost = 82_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -transfer_to_account = { cost = 24_000, arguments = [0, 0, 0, 0, 0, 0, 0] } -update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } -write = { cost = 14_000, arguments = [0, 0, 0, 980] } -write_local = { cost = 9_500, arguments = [0, 1_800, 0, 520] } - -[system_costs] -wasmless_transfer_cost = 10_000 - -[system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 - -[system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 - -[system_costs.handle_payment_costs] -get_payment_purse = 10_000 -set_refund_purse = 10_000 -get_refund_purse = 10_000 -finalize_payment = 10_000 - -[system_costs.standard_payment_costs] -pay = 10_000 diff --git a/utils/nctl/sh/scenarios/chainspecs/itst14.chainspec.toml.in b/utils/nctl/sh/scenarios/chainspecs/itst14.chainspec.toml.in deleted file mode 100644 index f0662c18dd..0000000000 --- a/utils/nctl/sh/scenarios/chainspecs/itst14.chainspec.toml.in +++ /dev/null @@ -1,212 +0,0 @@ -[protocol] -# Protocol version. -version = '1.0.0' -# Whether we need to clear latest blocks back to the switch block just before the activation point or not. -hard_reset = false -# This protocol version becomes active at this point. -# -# If it is a timestamp string, it represents the timestamp for the genesis block. This is the beginning of era 0. By -# this time, a sufficient majority (> 50% + F/2 — see finality_threshold_fraction below) of validator nodes must be up -# and running to start the blockchain. This timestamp is also used in seeding the pseudo-random number generator used -# in contract-runtime for computing genesis post-state hash. -# -# If it is an integer, it represents an era ID, meaning the protocol version becomes active at the start of this era. -activation_point = '${TIMESTAMP}' - -[network] -# Human readable name for convenience; the genesis_hash is the true identifier. The name influences the genesis hash by -# contributing to the seeding of the pseudo-random number generator used in contract-runtime for computing genesis -# post-state hash. -name = 'casper-example' -# The maximum size of an acceptable networking message in bytes. Any message larger than this will -# be rejected at the networking level. -maximum_net_message_size = 23_068_672 - -[core] -# Era duration. -era_duration = '180s' -# Minimum number of blocks per era. An era will take longer than `era_duration` if that is necessary to reach the -# minimum height. -minimum_era_height = 10 -# Number of slots available in validator auction. -validator_slots = 5 -# Number of eras before an auction actually defines the set of validators. If you bond with a sufficient bid in era N, -# you will be a validator in era N + auction_delay + 1. -auction_delay = 3 -# The period after genesis during which a genesis validator's bid is locked. -locked_funds_period = '90days' -# Default number of eras that need to pass to be able to withdraw unbonded funds. -unbonding_delay = 14 -# Round seigniorage rate represented as a fraction of the total supply. -# -# Annual issuance: 2% -# Minimum round exponent: 12 -# Ticks per year: 31536000000 -# -# (1+0.02)^((2^12)/31536000000)-1 is expressed as a fractional number below. -round_seigniorage_rate = [15_959, 6_204_824_582_392] - -[highway] -# A number between 0 and 1 representing the fault tolerance threshold as a fraction, used by the internal finalizer. -# It is the fraction of validators that would need to equivocate to make two honest nodes see two conflicting blocks as -# finalized: A higher value F makes it safer to rely on finalized blocks. It also makes it more difficult to finalize -# blocks, however, and requires strictly more than (F + 1)/2 validators to be working correctly. -finality_threshold_fraction = [1, 3] -# Integer between 0 and 255. The power of two that is the number of milliseconds in the minimum round length, and -# therefore the minimum delay between a block and its child. E.g. 14 means 2^14 milliseconds, i.e. about 16 seconds. -minimum_round_exponent = 12 -# Integer between 0 and 255. Must be greater than `minimum_round_exponent`. The power of two that is the number of -# milliseconds in the maximum round length, and therefore the maximum delay between a block and its child. E.g. 19 -# means 2^19 milliseconds, i.e. about 8.7 minutes. -maximum_round_exponent = 14 -# The factor by which rewards for a round are multiplied if the greatest summit has ≤50% quorum, i.e. no finality. -# Expressed as a fraction (1/5 by default). -reduced_reward_multiplier = [1, 5] - -[deploys] -# The maximum number of Motes allowed to be spent during payment. 0 means unlimited. -max_payment_cost = '0' -# The duration after the deploy timestamp that it can be included in a block. -max_ttl = '1day' -# The maximum number of other deploys a deploy can depend on (require to have been executed before it can execute). -max_dependencies = 10 -# Maximum block size in bytes including deploys contained by the block. 0 means unlimited. -max_block_size = 10_485_760 -# The maximum number of non-transfer deploys permitted in a single block. -block_max_deploy_count = 100 -# The maximum number of wasm-less transfer deploys permitted in a single block. -block_max_transfer_count = 1000 -# The upper limit of total gas of all deploys in a block. -block_gas_limit = 10_000_000_000_000 -# The limit of length of serialized payment code arguments. -payment_args_max_length = 1024 -# The limit of length of serialized session code arguments. -session_args_max_length = 1024 -# The minimum amount in motes for a valid native transfer. -native_transfer_minimum_motes = 2_500_000_000 - -[wasm] -# Amount of free memory (in 64kB pages) each contract can use for stack. -max_memory = 64 -# Max stack height (native WebAssembly stack limiter). -max_stack_height = 65_536 - -[wasm.storage_costs] -# Gas charged per byte stored in the global state. -gas_per_byte = 630_000 - -[wasm.opcode_costs] -# Bit operations multiplier. -bit = 300 -# Arithmetic add operations multiplier. -add = 210 -# Mul operations multiplier. -mul = 240 -# Div operations multiplier. -div = 320 -# Memory load operation multiplier. -load = 2_500 -# Memory store operation multiplier. -store = 4_700 -# Const store operation multiplier. -const = 110 -# Local operations multiplier. -local = 390 -# Global operations multiplier. -global = 390 -# Control flow operations multiplier. -control_flow = 440 -# Integer operations multiplier. -integer_comparison = 250 -# Conversion operations multiplier. -conversion = 420 -# Unreachable operation multiplier. -unreachable = 270 -# Nop operation multiplier. -nop = 200 -# Get current memory operation multiplier. -current_memory = 290 -# Grow memory cost, per page (64kb). -grow_memory = 240_000 -# Regular opcode cost. -regular = 210 - -# Host function declarations are located in smart_contracts/contract/src/ext_ffi.rs -[wasm.host_function_costs] -add = { cost = 5_800, arguments = [0, 0, 0, 0] } -add_associated_key = { cost = 9_000, arguments = [0, 0, 0] } -add_contract_version = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] } -blake2b = { cost = 200, arguments = [0, 0, 0, 0] } -call_contract = { cost = 4_500, arguments = [0, 0, 0, 0, 0, 420, 0] } -call_versioned_contract = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -create_contract_package_at_hash = { cost = 200, arguments = [0, 0] } -create_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -create_purse = { cost = 170_000, arguments = [0, 0] } -disable_contract_version = { cost = 200, arguments = [0, 0, 0, 0] } -get_balance = { cost = 3_800, arguments = [0, 0, 0] } -get_blocktime = { cost = 330, arguments = [0] } -get_caller = { cost = 380, arguments = [0] } -get_key = { cost = 2_000, arguments = [0, 440, 0, 0, 0] } -get_main_purse = { cost = 1_300, arguments = [0] } -get_named_arg = { cost = 200, arguments = [0, 0, 0, 0] } -get_named_arg_size = { cost = 200, arguments = [0, 0, 0] } -get_phase = { cost = 710, arguments = [0] } -get_system_contract = { cost = 1_100, arguments = [0, 0, 0] } -has_key = { cost = 1_500, arguments = [0, 840] } -is_valid_uref = { cost = 760, arguments = [0, 0] } -load_named_keys = { cost = 42_000, arguments = [0, 0] } -new_uref = { cost = 17_000, arguments = [0, 0, 590] } -print = { cost = 20_000, arguments = [0, 4_600] } -provision_contract_user_group_uref = { cost = 200, arguments = [0, 0, 0, 0, 0] } -put_key = { cost = 38_000, arguments = [0, 1_100, 0, 0] } -read_host_buffer = { cost = 3_500, arguments = [0, 310, 0] } -read_value = { cost = 6_000, arguments = [0, 0, 0] } -read_value_local = { cost = 5_500, arguments = [0, 590, 0] } -remove_associated_key = { cost = 4_200, arguments = [0, 0] } -remove_contract_user_group = { cost = 200, arguments = [0, 0, 0, 0] } -remove_contract_user_group_urefs = { cost = 200, arguments = [0, 0, 0, 0, 0, 0] } -remove_key = { cost = 61_000, arguments = [0, 3_200] } -ret = { cost = 23_000, arguments = [0, 420_000] } -revert = { cost = 500, arguments = [0] } -set_action_threshold = { cost = 74_000, arguments = [0, 0] } -transfer_from_purse_to_account = { cost = 160_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0, 0] } -transfer_from_purse_to_purse = { cost = 82_000, arguments = [0, 0, 0, 0, 0, 0, 0, 0] } -transfer_to_account = { cost = 24_000, arguments = [0, 0, 0, 0, 0, 0, 0] } -update_associated_key = { cost = 4_200, arguments = [0, 0, 0] } -write = { cost = 14_000, arguments = [0, 0, 0, 980] } -write_local = { cost = 9_500, arguments = [0, 1_800, 0, 520] } - -[system_costs] -wasmless_transfer_cost = 10_000 - -[system_costs.auction_costs] -get_era_validators = 10_000 -read_seigniorage_recipients = 10_000 -add_bid = 10_000 -withdraw_bid = 10_000 -delegate = 10_000 -undelegate = 10_000 -run_auction = 10_000 -slash = 10_000 -distribute = 10_000 -withdraw_delegator_reward = 10_000 -withdraw_validator_reward = 10_000 -read_era_id = 10_000 -activate_bid = 10_000 - -[system_costs.mint_costs] -mint = 10_000 -reduce_total_supply = 10_000 -create = 10_000 -balance = 10_000 -transfer = 10_000 -read_base_round_reward = 10_000 - -[system_costs.handle_payment_costs] -get_payment_purse = 10_000 -set_refund_purse = 10_000 -get_refund_purse = 10_000 -finalize_payment = 10_000 - -[system_costs.standard_payment_costs] -pay = 10_000 diff --git a/utils/nctl/sh/scenarios/common/itst.sh b/utils/nctl/sh/scenarios/common/itst.sh deleted file mode 100644 index d83f4010a9..0000000000 --- a/utils/nctl/sh/scenarios/common/itst.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -function log_step() { - local COMMENT=${1} - log "------------------------------------------------------------" - log "STEP $STEP: $COMMENT" - STEP=$((STEP + 1)) -} - -function do_await_genesis_era_to_complete() { - log_step "awaiting genesis era to complete" - while [ "$(get_chain_era)" != "1" ]; do - sleep 1.0 - done -} - -function do_read_lfb_hash() { - local NODE_ID=${1} - LFB_HASH=$(render_last_finalized_block_hash "$NODE_ID" | cut -f2 -d= | cut -f2 -d ' ') - echo "$LFB_HASH" -} - -function do_start_node() { - local NODE_ID=${1} - local LFB_HASH=${2} - log_step "starting node-$NODE_ID. Syncing from hash=${LFB_HASH}" - do_node_start "$NODE_ID" "$LFB_HASH" - sleep 1 - if [ "$(do_node_status ${NODE_ID} | awk '{ print $2 }')" != "RUNNING" ]; then - log "ERROR: node-${NODE_ID} is not running" - exit 1 - else - log "node-${NODE_ID} is running" - fi -} - -function do_stop_node() { - local NODE_ID=${1} - log_step "stopping node-$NODE_ID." - do_node_stop "$NODE_ID" - sleep 1 - if [ "$(do_node_status ${NODE_ID} | awk '{ print $2 }')" = "RUNNING" ]; then - log "ERROR: node-${NODE_ID} is still running" - exit 1 - else - log "node-${NODE_ID} was shutdown in era: $(check_current_era)" - fi -} - -function check_network_sync() { - local WAIT_TIME_SEC=0 - log_step "check all node's LFBs are in sync" - while [ "$WAIT_TIME_SEC" != "$SYNC_TIMEOUT_SEC" ]; do - if [ "$(do_read_lfb_hash '5')" = "$(do_read_lfb_hash '1')" ] && \ - [ "$(do_read_lfb_hash '4')" = "$(do_read_lfb_hash '1')" ] && \ - [ "$(do_read_lfb_hash '3')" = "$(do_read_lfb_hash '1')" ] && \ - [ "$(do_read_lfb_hash '2')" = "$(do_read_lfb_hash '1')" ]; then - log "all nodes in sync, proceeding..." - break - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to confirm network sync" - exit 1 - fi - sleep 1 - done -} - -function do_await_era_change() { - # allow chain height to grow - local ERA_COUNT=${1:-"1"} - log_step "awaiting $ERA_COUNT eras…" - await_n_eras "$ERA_COUNT" -} - -function check_current_era { - local ERA="null" - while true; do - ERA=$(get_chain_era $(get_node_for_dispatch) | awk '{print $NF}') - if [ "$ERA" = "null" ] || [ "$ERA" = "N/A" ]; then - sleep 1 - else - break - fi - done - echo "$ERA" -} - -function check_faulty() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node $NODE_ID) - grep -q 'this validator is faulty' "$NODE_PATH/logs/stdout.log" - return $? -} - -# Captures the public key in hex of a node minus the '01' prefix. -# This is because the logs don't include the '01' prefix when -# searching in them for the public key in hex. See itst14 for -# use case. -function get_node_public_key_hex() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node $NODE_ID) - local PUBLIC_KEY_HEX=$(cat "$NODE_PATH"/keys/public_key_hex) - echo "${PUBLIC_KEY_HEX:2}" -} - -# Same as get_node_public_key_hex but includes the '01' prefix. -# When parsing the state with jq, the '01' prefix is included. -# See itst13 for use case. -function get_node_public_key_hex_extended() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node $NODE_ID) - local PUBLIC_KEY_HEX=$(cat "$NODE_PATH"/keys/public_key_hex) - echo "$PUBLIC_KEY_HEX" -} - -function do_await_n_blocks() { - local BLOCK_COUNT=${1:-1} - log_step "Waiting $BLOCK_COUNT blocks..." - nctl-await-n-blocks offset="$BLOCK_COUNT" -} - -function get_switch_block_equivocators() { - local NODE_ID=${1} - # Number of blocks to walkback before erroring out - local WALKBACK=${2} - local BLOCK_HASH=${3} - local JSON_OUT - local PARENT - local BLOCK_HEADER - - if [ -z "$BLOCK_HASH" ]; then - JSON_OUT=$($(get_path_to_client) get-block --node-address $(get_node_address_rpc "$NODE_ID")) - else - JSON_OUT=$($(get_path_to_client) get-block --node-address $(get_node_address_rpc "$NODE_ID") -b "$BLOCK_HASH") - fi - - if [ "$WALKBACK" -gt 0 ]; then - BLOCK_HEADER=$(echo "$JSON_OUT" | jq '.result.block.header') - if [ "$(echo "$BLOCK_HEADER" | jq '.era_end')" = "null" ]; then - PARENT=$(echo "$BLOCK_HEADER" | jq -r '.parent_hash') - WALKBACK=$((WALKBACK - 1)) - log "$WALKBACK: Walking back to block: $PARENT" - get_switch_block_equivocators "$NODE_ID" "$WALKBACK" "$PARENT" - else - log "equivocators: $(echo "$BLOCK_HEADER" | jq '.era_end.era_report.equivocators')" - fi - else - log "Error: Switch block not found within walkback!" - exit 1 - fi -} - -function get_running_node_count { - local RUNNING_COUNT=$(nctl-status | grep 'RUNNING' | wc -l) - echo "$RUNNING_COUNT" -} - -# Check that a certain node has produced blocks. -function assert_node_proposed() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node $NODE_ID) - local PUBLIC_KEY_HEX=$(get_node_public_key_hex $NODE_ID) - log_step "Waiting for node-$NODE_ID to produce a block..." - local OUTPUT=$(tail -f "$NODE_PATH/logs/stdout.log" | grep -m 1 "proposer: PublicKey::Ed25519($PUBLIC_KEY_HEX)") - log "node-$NODE_ID created a block!" - log "$OUTPUT" -} diff --git a/utils/nctl/sh/scenarios/itst01.sh b/utils/nctl/sh/scenarios/itst01.sh deleted file mode 100755 index a2665b20d7..0000000000 --- a/utils/nctl/sh/scenarios/itst01.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration tests that tries to simulate -# stopping and restarting a random node. -# -# Arguments: -# `timeout=XXX` timeout (in seconds) when syncing. -####################################### -function main() { - log "------------------------------------------------------------" - log "Starting Scenario: itst01" - log "------------------------------------------------------------" - - log "Node to be stopped: $NODE_ID" - - # 0. Wait for network start up - do_await_genesis_era_to_complete - # 1. Allow chain to progress - do_await_era_change - # 2. Stop random node - do_stop_node "$NODE_ID" - # 3. Allow chain to progress - do_await_era_change - # 4. Get another random running node to compare - do_get_another_node - do_read_lfb_hash "$COMPARE_NODE_ID" - # 5. Restart node from LFB - do_start_node "$NODE_ID" "$LFB_HASH" - # 6-8. Check sync of restarted node, - # wait 1 era, and then check they are still in sync. - # This way we can verify that the node is up-to-date with the protocol state - # after transitioning to an active validator. - check_network_sync - do_await_era_change - check_network_sync - - log "------------------------------------------------------------" - log "Scenario itst01 complete" - log "------------------------------------------------------------" -} - -function do_get_another_node() { - COMPARE_NODE_ID=$(get_node_for_dispatch) - log_step "comparison node: $COMPARE_NODE_ID" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset SYNC_TIMEOUT_SEC -unset LFB_HASH -unset NODE_ID -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - *) ;; - esac -done - -NODE_ID=$(get_node_for_dispatch) -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} - -main "$NODE_ID" diff --git a/utils/nctl/sh/scenarios/itst02.sh b/utils/nctl/sh/scenarios/itst02.sh deleted file mode 100755 index af2739e7d6..0000000000 --- a/utils/nctl/sh/scenarios/itst02.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration tests that tries to simulate -# stalling consensus by stopping enough nodes. It -# then restarts the nodes and checks for the chain -# to progress. -# -# Arguments: -# `timeout=XXX` timeout (in seconds) when syncing. -####################################### -function main() { - log "------------------------------------------------------------" - log "Starting Scenario: itst02" - log "------------------------------------------------------------" - - # 0. Wait for network start up - do_await_genesis_era_to_complete - # 1. Allow chain to progress - do_await_era_change - # 2. Verify all nodes are in sync - check_network_sync - # 3-5. Stop three nodes - do_stop_node "5" - do_stop_node "4" - do_stop_node "3" - # 6. Ensure chain stalled - assert_chain_stalled "60" - # 7-9. Restart three nodes - do_start_node "5" "$STALLED_LFB" - do_start_node "4" "$STALLED_LFB" - do_start_node "3" "$STALLED_LFB" - # 10. Verify all nodes are in sync - check_network_sync - # 11. Ensure era proceeds after restart - do_await_era_change "2" - # 12. Verify all nodes are in sync - check_network_sync - # 13-15. Compare stalled lfb hash to current - assert_chain_progressed "5" "$STALLED_LFB" - assert_chain_progressed "4" "$STALLED_LFB" - assert_chain_progressed "3" "$STALLED_LFB" - - log "------------------------------------------------------------" - log "Scenario itst02 complete" - log "------------------------------------------------------------" -} - -function assert_chain_progressed() { - # Function accepts two hashes as arguments and checks to - # see if they match. - log_step "node-${1}: checking chain progressed" - local LFB1=$(do_read_lfb_hash ${1}) - local LFB2=${2} - - if [ "$LFB1" = "$LFB2" ]; then - log "error: $LFB1 = $LFB2, chain didn't progress." - exit 1 - fi -} - -function assert_chain_stalled() { - # Fucntion checks that the two remaining node's LFB checked - # n-seconds apart doesnt progress - log_step "ensuring chain stalled" - local SLEEP_TIME=${1} - # Sleep 5 seconds to allow for final message propagation. - sleep 5 - local LFB_1_PRE=$(do_read_lfb_hash 1) - local LFB_2_PRE=$(do_read_lfb_hash 2) - log "Sleeping ${SLEEP_TIME}s..." - sleep $SLEEP_TIME - local LFB_1_POST=$(do_read_lfb_hash 1) - local LFB_2_POST=$(do_read_lfb_hash 2) - - if [ "$LFB_1_PRE" != "$LFB_1_POST" ] && [ "$LFB_2_PRE" != "$LFB_2_POST" ]; then - log "Error: Chain progressed." - exit 1 - fi - - if [ "$LFB_1_POST" != "$LFB_2_POST" ]; then - log "Error: LFB mismatch on nodes" - exit 1 - else - STALLED_LFB=$LFB_1_POST - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset SYNC_TIMEOUT_SEC -unset LFB_HASH -unset STALLED_LFB -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - *) ;; - esac -done - -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} - -main "$NODE_ID" diff --git a/utils/nctl/sh/scenarios/itst11.sh b/utils/nctl/sh/scenarios/itst11.sh deleted file mode 100755 index 53e9a38745..0000000000 --- a/utils/nctl/sh/scenarios/itst11.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration test that tries to simulate -# a single doppelganger situation. -# -# Arguments: -# `timeout=XXX` timeout (in seconds) when syncing. -####################################### -function main() { - log "------------------------------------------------------------" - log "Starting Scenario: itst11" - log "------------------------------------------------------------" - - # 0. Wait for network start up - do_await_genesis_era_to_complete - # 1. Allow chain to progress - do_await_era_change - # 2. Verify all nodes are in sync - check_network_sync - # 3. Create the doppelganger - create_doppelganger '5' '6' - # 4. Get LFB Hash - do_read_lfb_hash '5' - # 5. Start doppelganger - do_start_node "6" "$LFB_HASH" - # 6. Look for one of the two nodes to report as faulty - assert_equivication "5" "6" - log "------------------------------------------------------------" - log "Scenario itst11 complete" - log "------------------------------------------------------------" -} - -function create_doppelganger() { - local NODE_ID=${1} - local DOPPEL_ID=${2} - log_step "Copying keys from $NODE_ID into $DOPPEL_ID" - cp -r "$(get_path_to_node $NODE_ID)/keys" "$(get_path_to_node $DOPPEL_ID)/" -} - -function check_doppel() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node $NODE_ID) - grep -q 'received vertex from a doppelganger' "$NODE_PATH/logs/stdout.log" - return $? -} - -function assert_equivication() { - local NODE_ID=${1} - local DOPPEL_ID=${2} - log_step "Checking for a faulty node..." - while [ "$WAIT_TIME_SEC" != "$SYNC_TIMEOUT_SEC" ]; do - if ( check_faulty "$NODE_ID" ); then - log "validator node-$NODE_ID found as faulty! [expected]" - break - elif ( check_faulty "$DOPPEL_ID" ); then - log "doppelganger node-$DOPPEL_ID found as faulty! [expected]" - break - elif ( check_doppel "$NODE_ID" ); then - log "node-$NODE_ID received vertex from a doppelganger! [expected]" - break - elif ( check_doppel "$DOPPEL_ID" ); then - log "node-$DOPPEL_ID received vertex from a doppelganger! [expected]" - break - fi - - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to confirm a faulty validator" - exit 1 - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - sleep 1 - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset SYNC_TIMEOUT_SEC -unset LFB_HASH -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - *) ;; - esac -done - -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} - -main diff --git a/utils/nctl/sh/scenarios/itst13.sh b/utils/nctl/sh/scenarios/itst13.sh deleted file mode 100755 index 2087740fba..0000000000 --- a/utils/nctl/sh/scenarios/itst13.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration test that tries to simulate -# and verify validator ejection. -# -# Arguments: -# `timeout=XXX` timeout (in seconds) when syncing. -####################################### -function main() { - log "------------------------------------------------------------" - log "Starting Scenario: itst13" - log "------------------------------------------------------------" - - # 0. Wait for network start up - do_await_genesis_era_to_complete - # 1. Allow chain to progress - do_await_era_change - # 2. Verify all nodes are in sync - check_network_sync - # 3. Stop the node - do_stop_node '5' - # 4. wait auction_delay + 1 - do_await_era_change '4' - # 5. Validate eviction occured - assert_eviction '5' - log "------------------------------------------------------------" - log "Scenario itst13 complete" - log "------------------------------------------------------------" -} - -# Checks that a validator gets marked as inactive -function check_inactive() { - local NODE_ID=${1} - local HEX=$(get_node_public_key_hex_extended "$NODE_ID") - # In order to pass bash variables into jq you must specify a jq arg. - # Below the jq arg 'node_hex' is set to $HEX. The query looks at the - # state of the auction and checks to see if a validator gets marked - # as inactive. The validator is found via his public key $HEX (node_hex). - # We return the exit code of the grep to check success. - nctl-view-chain-auction-info | jq --arg node_hex "$HEX" '.auction_state.bids[] | select(.public_key == $node_hex).bid.inactive' | grep -q 'true' - return $? -} - -# Checks that the current era + 1 contains a nodes -# public key hex -function is_trusted_validator() { - local NODE_ID=${1} - local HEX=$(get_node_public_key_hex_extended "$NODE_ID") - local ERA=$(check_current_era) - # Plus 1 to avoid query issue if era switches mid run - local ERA_PLUS_1=$(expr $ERA + 1) - # note: tonumber is a must here to prevent jq from being too smart. - # The jq arg 'era' is set to $ERA_PLUS_1. The query looks to find that - # the validator is removed from era_validators list. We grep for - # the public_key_hex to see if the validator is still listed and return - # the exit code to check success. - nctl-view-chain-auction-info | jq --arg era "$ERA_PLUS_1" '.auction_state.era_validators[] | select(.era_id == ($era | tonumber))' | grep -q "$HEX" - return $? -} - -function assert_eviction() { - local NODE_ID=${1} - log_step "Checking for evicted node-$NODE_ID..." - while [ "$WAIT_TIME_SEC" != "$SYNC_TIMEOUT_SEC" ]; do - if ( ! is_trusted_validator "$NODE_ID" ) && ( check_inactive "$NODE_ID" ); then - log "validator node-$NODE_ID was ejected! [expected]" - break - fi - - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Time out. Failed to confirm a faulty validator in $SYNC_TIMEOUT_SEC seconds." - exit 1 - fi - sleep 1 - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset SYNC_TIMEOUT_SEC -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - *) ;; - esac -done - -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} - -main diff --git a/utils/nctl/sh/scenarios/itst14.sh b/utils/nctl/sh/scenarios/itst14.sh deleted file mode 100755 index 346c604e17..0000000000 --- a/utils/nctl/sh/scenarios/itst14.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration tests that tries to simulate -# if a validator node can restart within single era -# and not equivocate. -# Arguments: -# `timeout=XXX` timeout (in seconds) when syncing. -####################################### -function main() { - log "------------------------------------------------------------" - log "Starting Scenario: itst14" - log "------------------------------------------------------------" - - # 0. Verify network is creating blocks - do_await_n_blocks "5" - # 1. Verify network is in sync - check_network_sync - # 2a. Get era - STOPPED_ERA=$(check_current_era) - # 2b. Stop node - do_stop_node "5" - # 3. Let the node go down - log_step "Sleeping for 10s before bringing node back online..." - sleep 10 - # 4. Restart Node - do_read_lfb_hash 1 - do_start_node "5" "$LFB_HASH" - # 5. Verify all nodes are in sync - check_network_sync - # 6. Verify network is creating blocks post-restart - do_await_n_blocks "5" - # 7. Verify all nodes are in sync - check_network_sync - # 8. Verify node proposed a block - assert_node_proposed '5' '180' - # 9. Verify we are in the same era - assert_same_era "$STOPPED_ERA" - # 10. Wait an era - do_await_era_change - # 11. Verify all nodes are in sync - check_network_sync - # 12. Check for equivication - assert_no_equivocation "5" "1" "100" - - log "------------------------------------------------------------" - log "Scenario itst14 complete" - log "------------------------------------------------------------" -} - -function assert_same_era() { - local ERA=${1} - log_step "Checking if within same era..." - if [ "$ERA" == "$(check_current_era)" ]; then - log "Still within the era. Continuing..." - else - log "Error: Era progressed! Exiting..." - exit 1 - fi -} - -function assert_node_proposed() { - local NODE_ID=${1} - local NODE_PATH=$(get_path_to_node "$NODE_ID") - local PUBLIC_KEY_HEX=$(get_node_public_key_hex "$NODE_ID") - local TIMEOUT=${2:-300} - log_step "Waiting for a node-$NODE_ID to produce a block..." - local OUTPUT=$(timeout "$TIMEOUT" tail -n 1 -f "$NODE_PATH/logs/stdout.log" | grep -o -m 1 "proposer: PublicKey::Ed25519($PUBLIC_KEY_HEX)") - if ( echo "$OUTPUT" | grep -q "proposer: PublicKey::Ed25519($PUBLIC_KEY_HEX)" ); then - log "Node-$NODE_ID created a block!" - log "$OUTPUT" - else - log "ERROR: Node-$NODE_ID didn't create a block within timeout=$TIMEOUT" - exit 1 - fi -} - -function assert_no_equivocation() { - local NODE_ID=${1} - local QUERY_NODE_ID=${2} - local WALKBACK=${3} - local EQUIVOCATORS - # "casper-client list-rpc" shows this including '01' prefix. Using extended version. - local PUBLIC_KEY_HEX=$(get_node_public_key_hex_extended "$NODE_ID") - log_step "Checking to see if node-$NODE_ID:$PUBLIC_KEY_HEX equivocated..." - EQUIVOCATORS=$(get_switch_block_equivocators "$QUERY_NODE_ID" "$WALKBACK") - log "$EQUIVOCATORS" - if ( ! echo "$EQUIVOCATORS" | grep -q "$PUBLIC_KEY_HEX" ); then - log "Node-$NODE_ID didn't equivocate! yay!" - else - log "ERROR: Node-$NODE_ID equivocated!" - exit 1 - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset SYNC_TIMEOUT_SEC -unset LFB_HASH -unset PUBLIC_KEY_HEX -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - *) ;; - esac -done - -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} - -main "$NODE_ID" diff --git a/utils/nctl/sh/scenarios/sync_test.sh b/utils/nctl/sh/scenarios/sync_test.sh deleted file mode 100755 index 38562d92f7..0000000000 --- a/utils/nctl/sh/scenarios/sync_test.sh +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration tests that tries to sync a new node -# to an already running network. -# -# Arguments: -# `node=XXX` ID of a new node. -# `timeout=XXX` timeout (in seconds) when syncing. -####################################### -function main() { - log "------------------------------------------------------------" - log "Syncing node begins" - log "------------------------------------------------------------" - - do_await_genesis_era_to_complete - - # 1. Send batch of Wasm deploys - do_send_wasm_deploys - # 2. Send batch of native transfers - do_send_transfers - # 3. Wait until they're all included in the chain. - do_await_deploy_inclusion - # 4. Take a note of the last finalized block hash - do_read_lfb_hash - # 5. Send batch of Wasm deploys - do_send_wasm_deploys - # 6. Send batch of native transfers - do_send_transfers - # 7. Wait until they're all included in the chain. - # 8. Start the node in sync mode using hash from 4) - do_start_new_node "$NEW_NODE_ID" - # 9. Wait until it's synchronized. - # 10. Verify that its last finalized block matches other nodes'. - # nctl-view-chain-root-hash - do_await_full_synchronization "$NEW_NODE_ID" - - log "------------------------------------------------------------" - log "Syncing node complete" - log "------------------------------------------------------------" -} - -function log_step() { - local COMMENT=${1} - log "------------------------------------------------------------" - log "STEP $STEP: $COMMENT" - STEP=$((STEP + 1)) -} - -function do_await_genesis_era_to_complete() { - log_step "awaiting genesis era to complete" - while [ "$(get_chain_era)" != "1" ]; do - sleep 1.0 - done -} - -function do_send_wasm_deploys() { - # NOTE: Maybe make these arguments to the test? - local BATCH_COUNT=1 - local BATCH_SIZE=1 - local TRANSFER_AMOUNT=2500000000 - log_step "sending Wasm deploys" - # prepare wasm batch - prepare_wasm_batch "$TRANSFER_AMOUNT" "$BATCH_COUNT" "$BATCH_SIZE" - # dispatch wasm batches - for BATCH_ID in $(seq 1 $BATCH_COUNT); do - dispatch_wasm_batch "$BATCH_ID" - done -} - -function do_send_transfers() { - log_step "sending native transfers" - # NOTE: Maybe make these arguments to the test? - local AMOUNT=2500000000 - local TRANSFERS_COUNT=5 - local NODE_ID="random" - - # Enumerate set of users. - for USER_ID in $(seq 1 "$(get_count_of_users)"); do - dispatch_native "$AMOUNT" "$USER_ID" "$TRANSFERS_COUNT" "$NODE_ID" - done -} - -function do_await_deploy_inclusion() { - # Should be enough to await for one era. - log_step "awaiting one era…" - await_n_eras 1 -} - -function do_read_lfb_hash() { - local NODE_ID=${1} - LFB_HASH=$(render_last_finalized_block_hash "$NODE_ID" | cut -f2 -d= | cut -f2 -d ' ') - echo "$LFB_HASH" -} - -function do_start_new_node() { - local NODE_ID=${1} - log_step "starting new node-$NODE_ID. Syncing from hash=${LFB_HASH}" - export RUST_LOG="info,casper_node::components::linear_chain_sync=trace" - # TODO: Do not hardcode. - do_node_start "$NODE_ID" "$LFB_HASH" -} - -function do_await_full_synchronization() { - local NODE_ID=${1} - local WAIT_TIME_SEC=0 - log_step "awaiting full synchronization of the new node=${NODE_ID}…" - while [ "$(do_read_lfb_hash "$NODE_ID")" != "$(do_read_lfb_hash 1)" ]; do - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to synchronize in ${SYNC_TIMEOUT_SEC} seconds" - exit 1 - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - sleep 1.0 - done - # Wait one more era and then test LFB again. - # This way we can verify that the node is up-to-date with the protocol state - # after transitioning to an active validator. - await_n_eras 1 - while [ "$(do_read_lfb_hash "$NODE_ID")" != "$(do_read_lfb_hash 1)" ]; do - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to keep up with the protocol state" - exit 1 - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - sleep 1.0 - done -} - -function dispatch_native() { - local AMOUNT=${1} - local USER_ID=${2} - local TRANSFERS=${3} - local NODE_ID=${4} - - source "$NCTL"/sh/contracts-transfers/do_dispatch_native.sh amount="$AMOUNT" \ - user="$USER_ID" \ - transfers="$TRANSFERS" \ - node="$NODE_ID" -} - -function dispatch_wasm_batch() { - local BATCH_ID=${1:-1} - local INTERVAL=${2:-0.01} - local NODE_ID=${3:-"random"} - - source "$NCTL"/sh/contracts-transfers/do_dispatch_wasm_batch.sh batch="$BATCH_ID" \ - interval="$INTERVAL" \ - node="$NODE_ID" -} - -function prepare_wasm_batch() { - local AMOUNT=${1} - local BATCH_COUNT=${2} - local BATCH_SIZE=${3} - - source "$NCTL"/sh/contracts-transfers/do_prepare_wasm_batch.sh amount="$AMOUNT" \ - count="$BATCH_COUNT" \ - size="$BATCH_SIZE" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NEW_NODE_ID -unset SYNC_TIMEOUT_SEC -unset LFB_HASH -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NEW_NODE_ID=${VALUE} ;; - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - *) ;; - esac -done - -NEW_NODE_ID=${NEW_NODE_ID:-"6"} -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} - -main "$NEW_NODE_ID" diff --git a/utils/nctl/sh/scenarios/sync_upgrade_test.sh b/utils/nctl/sh/scenarios/sync_upgrade_test.sh deleted file mode 100755 index 7871078292..0000000000 --- a/utils/nctl/sh/scenarios/sync_upgrade_test.sh +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh -source "$NCTL"/sh/assets/upgrade.sh -source "$NCTL"/sh/node/svc_"$NCTL_DAEMON_TYPE".sh -source "$NCTL"/sh/scenarios/common/itst.sh - -# Exit if any of the commands fail. -set -e - -####################################### -# Runs an integration tests that tries to sync a new node -# with an upgraded network. -# -# Arguments: -# `node=XXX` ID of a new node. Default=6 -# `timeout=XXX` timeout (in seconds) when syncing. Default=300 seconds. -# `version=X_Y_Z` new protocol version to upgrade to. Default=2_0_0. -# `era=X` at which the upgrade should take place. Default=3. -####################################### -function main() { - log "------------------------------------------------------------" - log "Syncing node begins" - log "------------------------------------------------------------" - - do_await_genesis_era_to_complete - - # 1. Send batch of Wasm deploys - do_send_wasm_deploys - # 2. Send batch of native transfers - do_send_transfers - # 3. Wait until they're all included in the chain. - do_await_deploy_inclusion - # 4. Upgrade the network - do_upgrade_network - # 5. Wait for the network to upgrade. - do_await_network_upgrade - assert_network_upgrade "$PROTOCOL_VERSION" - # 6. Take a note of the last finalized block hash - do_read_lfb_hash - # 7. Send batch of Wasm deploys - do_send_wasm_deploys - # 8. Send batch of native transfers - do_send_transfers - # 9. Wait until they're all included in the chain. - do_await_deploy_inclusion - # 10. Start the node in sync mode using hash from 4) - do_start_new_node "$NEW_NODE_ID" - # 11. Wait until it's synchronized - # and verify that its last finalized block matches other nodes'. - do_await_full_synchronization "$NEW_NODE_ID" - - log "------------------------------------------------------------" - log "Syncing node complete" - log "------------------------------------------------------------" -} - -function log_step() { - local COMMENT=${1} - log "------------------------------------------------------------" - log "STEP $STEP: $COMMENT" - log "------------------------------------------------------------" - STEP=$((STEP + 1)) -} - -function do_await_genesis_era_to_complete() { - log_step "awaiting genesis era to complete" - while [ "$(get_chain_era)" != "1" ]; do - sleep 1.0 - done -} - -function do_upgrade_network() { - log_step "scheduling the network upgrade to version ${PROTOCOL_VERSION} at era ${ACTIVATE_ERA}" - for NODE_ID in $(seq 1 "$(get_count_of_nodes)"); do - _upgrade_node "$PROTOCOL_VERSION" "$ACTIVATE_ERA" "$NODE_ID" - done -} - -function do_await_network_upgrade() { - log_step "wait for the network to upgrade" - local WAIT_TIME_SEC=0 - local WAIT_UNTIL=$((ACTIVATE_ERA + 1)) - while [ "$(get_chain_era)" != "$WAIT_UNTIL" ]; do - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to upgrade the network in ${SYNC_TIMEOUT_SEC} seconds" - exit 1 - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - sleep 1.0 - done -} - -function assert_network_upgrade() { - local STATUS - local COUNT - local RUNNING_COUNT - local PROTO=${1} - local CONVERTED - log_step "checking that entire network upgraded to $PROTO" - CONVERTED=$(echo $PROTO | sed 's/_/./g') - STATUS=$(nctl-view-node-status) - COUNT=$(grep 'api_version' <<< $STATUS[*] | grep -o "$CONVERTED" | wc -l) - RUNNING_COUNT=$(get_running_node_count) - - if [ ! "$COUNT" = "$RUNNING_COUNT" ]; then - log "ERROR: Upgrade failed, $COUNT out of $RUNNING_COUNT upgraded successfully." - exit 1 - fi - log "$COUNT out of $RUNNING_COUNT upgraded successfully!" -} - -function do_send_wasm_deploys() { - # NOTE: Maybe make these arguments to the test? - local BATCH_COUNT=1 - local BATCH_SIZE=1 - local TRANSFER_AMOUNT=10000 - log_step "sending Wasm deploys" - # prepare wasm batch - prepare_wasm_batch "$TRANSFER_AMOUNT" "$BATCH_COUNT" "$BATCH_SIZE" - # dispatch wasm batches - for BATCH_ID in $(seq 1 $BATCH_COUNT); do - dispatch_wasm_batch "$BATCH_ID" - done -} - -function do_send_transfers() { - log_step "sending native transfers" - # NOTE: Maybe make these arguments to the test? - local AMOUNT=2500000000 - local TRANSFERS_COUNT=5 - local NODE_ID="random" - - # Enumerate set of users. - for USER_ID in $(seq 1 "$(get_count_of_users)"); do - dispatch_native "$AMOUNT" "$USER_ID" "$TRANSFERS_COUNT" "$NODE_ID" - done -} - -function do_await_deploy_inclusion() { - # Should be enough to await for one era. - log_step "awaiting one era…" - await_n_eras 1 -} - -function do_read_lfb_hash() { - local NODE_ID=${1} - LFB_HASH=$(render_last_finalized_block_hash "$NODE_ID" | cut -f2 -d= | cut -f2 -d ' ') - echo "$LFB_HASH" -} - -function do_start_new_node() { - local NODE_ID=${1} - log_step "starting new node-$NODE_ID. Syncing from hash=${LFB_HASH}" - export RUST_LOG="info,casper_node::components::linear_chain_sync=trace" - # TODO: Do not hardcode. - do_node_start "$NODE_ID" "$LFB_HASH" -} - -function do_await_full_synchronization() { - local NODE_ID=${1} - local WAIT_TIME_SEC=0 - log_step "awaiting full synchronization of the new node=${NODE_ID}…" - while [ "$(do_read_lfb_hash "$NODE_ID")" != "$(do_read_lfb_hash 1)" ]; do - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to synchronize in ${SYNC_TIMEOUT_SEC} seconds" - exit 1 - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - sleep 1.0 - done - # Wait one more era and then test LFB again. - # This way we can verify that the node is up-to-date with the protocol state - # after transitioning to an active validator. - await_n_eras 1 - while [ "$(do_read_lfb_hash "$NODE_ID")" != "$(do_read_lfb_hash 1)" ]; do - if [ "$WAIT_TIME_SEC" = "$SYNC_TIMEOUT_SEC" ]; then - log "ERROR: Failed to keep up with the protocol state" - exit 1 - fi - WAIT_TIME_SEC=$((WAIT_TIME_SEC + 1)) - sleep 1.0 - done -} - -function dispatch_native() { - local AMOUNT=${1} - local USER_ID=${2} - local TRANSFERS=${3} - local NODE_ID=${4} - - source "$NCTL"/sh/contracts-transfers/do_dispatch_native.sh amount="$AMOUNT" \ - user="$USER_ID" \ - transfers="$TRANSFERS" \ - node="$NODE_ID" -} - -function dispatch_wasm_batch() { - local BATCH_ID=${1:-1} - local INTERVAL=${2:-0.01} - local NODE_ID=${3:-"random"} - - source "$NCTL"/sh/contracts-transfers/do_dispatch_wasm_batch.sh batch="$BATCH_ID" \ - interval="$INTERVAL" \ - node="$NODE_ID" -} - -function prepare_wasm_batch() { - local AMOUNT=${1} - local BATCH_COUNT=${2} - local BATCH_SIZE=${3} - - source "$NCTL"/sh/contracts-transfers/do_prepare_wasm_batch.sh amount="$AMOUNT" \ - count="$BATCH_COUNT" \ - size="$BATCH_SIZE" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NEW_NODE_ID -unset SYNC_TIMEOUT_SEC -unset LFB_HASH -unset ACTIVATE_ERA -unset PROTOCOL_VERSION -STEP=0 - -for ARGUMENT in "$@"; do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NEW_NODE_ID=${VALUE} ;; - timeout) SYNC_TIMEOUT_SEC=${VALUE} ;; - era) ACTIVATE_ERA=${VALUE} ;; - version) PROTOCOL_VERSION=${VALUE} ;; - *) ;; - esac -done - -NEW_NODE_ID=${NEW_NODE_ID:-"6"} -SYNC_TIMEOUT_SEC=${SYNC_TIMEOUT_SEC:-"300"} -ACTIVATE_ERA=${ACTIVATE_ERA:-"3"} -PROTOCOL_VERSION=${PROTOCOL_VERSION:-"2_0_0"} - -main diff --git a/utils/nctl/sh/utils/accounts.sh b/utils/nctl/sh/utils/accounts.sh deleted file mode 100644 index e8ed114307..0000000000 --- a/utils/nctl/sh/utils/accounts.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Returns an on-chain account balance. -# Arguments: -# Data to be hashed. -####################################### -function get_account_balance() -{ - local PURSE_UREF=${1} - local STATE_ROOT_HASH=${2:-$(get_state_root_hash)} - local ACCOUNT_BALANCE - local NODE_ADDRESS - - NODE_ADDRESS=$(get_node_address_rpc) - ACCOUNT_BALANCE=$( - $(get_path_to_client) get-balance \ - --node-address "$NODE_ADDRESS" \ - --state-root-hash "$STATE_ROOT_HASH" \ - --purse-uref "$PURSE_UREF" \ - | jq '.result.balance_value' \ - | sed -e 's/^"//' -e 's/"$//' - ) - - echo "$ACCOUNT_BALANCE" -} - -####################################### -# Returns an on-chain account hash. -# Arguments: -# Data to be hashed. -####################################### -function get_account_hash() -{ - local ACCOUNT_KEY=${1} - local ACCOUNT_PBK=${ACCOUNT_KEY:2} - - local SCRIPT=( - "import hashlib;" - "as_bytes=bytes('ed25519', 'utf-8') + bytearray(1) + bytes.fromhex('$ACCOUNT_PBK');" - "h=hashlib.blake2b(digest_size=32);" - "h.update(as_bytes);" - "print(h.digest().hex());" - ) - - python3 -c "${SCRIPT[*]}" -} - -####################################### -# Returns an account key. -# Globals: -# NCTL_ACCOUNT_TYPE_FAUCET - faucet account type. -# NCTL_ACCOUNT_TYPE_NODE - node account type. -# NCTL_ACCOUNT_TYPE_USER - user account type. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function get_account_key() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - - if [ "$ACCOUNT_TYPE" = "$NCTL_ACCOUNT_TYPE_FAUCET" ]; then - cat "$(get_path_to_faucet)"/public_key_hex - elif [ "$ACCOUNT_TYPE" = "$NCTL_ACCOUNT_TYPE_NODE" ]; then - cat "$(get_path_to_node "$ACCOUNT_IDX")"/keys/public_key_hex - elif [ "$ACCOUNT_TYPE" = "$NCTL_ACCOUNT_TYPE_USER" ]; then - cat "$(get_path_to_user "$ACCOUNT_IDX")"/public_key_hex - fi -} - -####################################### -# Returns an account prefix used when logging. -# Globals: -# NCTL_ACCOUNT_TYPE_FAUCET - faucet account type. -# Arguments: -# Account type. -# Account index (optional). -####################################### -function get_account_prefix() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2:-} - local NET_ID=${NET_ID:-1} - - local PREFIX="net-$NET_ID.$ACCOUNT_TYPE" - if [ "$ACCOUNT_TYPE" != "$NCTL_ACCOUNT_TYPE_FAUCET" ]; then - PREFIX=$PREFIX"-"$ACCOUNT_IDX - fi - - echo "$PREFIX" -} - -####################################### -# Returns a main purse uref. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Account key. -# State root hash. -####################################### -function get_main_purse_uref() -{ - local ACCOUNT_KEY=${1} - local STATE_ROOT_HASH=${2:-$(get_state_root_hash)} - - source "$NCTL"/sh/views/view_chain_account.sh \ - account-key="$ACCOUNT_KEY" \ - root-hash="$STATE_ROOT_HASH" \ - | jq '.stored_value.Account.main_purse' \ - | sed -e 's/^"//' -e 's/"$//' -} diff --git a/utils/nctl/sh/utils/blocking.sh b/utils/nctl/sh/utils/blocking.sh deleted file mode 100644 index 4badc040e7..0000000000 --- a/utils/nctl/sh/utils/blocking.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Awaits for the chain to proceed N eras. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Future era offset to apply. -####################################### -function await_n_eras() -{ - local OFFSET=${1} - local EMIT_LOG=${2:-false} - local CURRENT - local FUTURE - - CURRENT=$(get_chain_era) - FUTURE=$((CURRENT + OFFSET)) - - while [ "$CURRENT" -lt "$FUTURE" ]; - do - if [ "$EMIT_LOG" = true ]; then - log "current era = $CURRENT :: future era = $FUTURE ... sleeping 20 seconds" - fi - sleep 20.0 - CURRENT=$(get_chain_era) - done - - if [ "$EMIT_LOG" = true ]; then - log "current era = $CURRENT" - fi -} - -####################################### -# Awaits for the chain to proceed N blocks. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Future block height offset to apply. -####################################### -function await_n_blocks() -{ - local OFFSET=${1} - local EMIT_LOG=${2:-false} - - local CURRENT - local FUTURE - - CURRENT=$(get_chain_height) - FUTURE=$((CURRENT + OFFSET)) - - while [ "$CURRENT" -lt "$FUTURE" ]; - do - if [ "$EMIT_LOG" = true ]; then - log "current block height = $CURRENT :: future height = $FUTURE ... sleeping 2 seconds" - fi - sleep 2.0 - CURRENT=$(get_chain_height) - done - - if [ "$EMIT_LOG" = true ]; then - log "current block height = $CURRENT" - fi -} - -####################################### -# Awaits for the chain to proceed N eras. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Future era offset to apply. -####################################### -function await_until_era_n() -{ - local ERA=${1} - - while [ "$ERA" -lt "$(get_chain_era)" ]; - do - sleep 10.0 - done -} - -####################################### -# Awaits for the chain to proceed N blocks. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Future block offset to apply. -####################################### -function await_until_block_n() -{ - local HEIGHT=${1} - - while [ "$HEIGHT" -lt "$(get_chain_height)" ]; - do - sleep 10.0 - done -} diff --git a/utils/nctl/sh/utils/cl_args.sh b/utils/nctl/sh/utils/cl_args.sh deleted file mode 100644 index 1d49613403..0000000000 --- a/utils/nctl/sh/utils/cl_args.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Returns a formatted session argument. -# Arguments: -# Argument name. -# Argument value. -# CL type suffix to apply to argument type. -# CL type prefix to apply to argument value. -####################################### -function get_cl_arg() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - local CL_TYPE_SUFFIX=${3} - local CL_VALUE_PREFIX=${4:-""} - - echo "$ARG_NAME:$CL_TYPE_SUFFIX='$CL_VALUE_PREFIX$ARG_VALUE'" -} - -####################################### -# Returns a formatted session argument (cl type=account hash). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_account_hash() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "account_hash" "account-hash-" -} - -####################################### -# Returns a formatted session argument (cl type=account key). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_account_key() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "public_key" -} - -####################################### -# Returns a formatted session argument (cl type=optional uref). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_opt_uref() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "opt_uref" -} - -####################################### -# Returns a formatted session argument (cl type=string). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_string() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "string" -} - -####################################### -# Returns a formatted session argument (cl type=u8). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u8() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U8" -} - -####################################### -# Returns a formatted session argument (cl type=u16). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u16() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U16" -} - -####################################### -# Returns a formatted session argument (cl type=u32). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u32() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U32" -} - -####################################### -# Returns a formatted session argument (cl type=u64). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u64() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U64" -} - -####################################### -# Returns a formatted session argument (cl type=u128). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u128() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U128" -} - -####################################### -# Returns a formatted session argument (cl type=u256). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u256() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U256" -} - -####################################### -# Returns a formatted session argument (cl type=u512). -# Arguments: -# Argument name. -# Argument value. -####################################### -function get_cl_arg_u512() -{ - local ARG_NAME=${1} - local ARG_VALUE=${2} - - get_cl_arg "$ARG_NAME" "$ARG_VALUE" "U512" -} diff --git a/utils/nctl/sh/utils/constants.sh b/utils/nctl/sh/utils/constants.sh deleted file mode 100644 index 1b59f3b5d6..0000000000 --- a/utils/nctl/sh/utils/constants.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env bash - -# A type of actor representing a participating node. -export NCTL_ACCOUNT_TYPE_FAUCET="faucet" - -# A type of actor representing a participating node. -export NCTL_ACCOUNT_TYPE_NODE="node" - -# A type of actor representing a user. -export NCTL_ACCOUNT_TYPE_USER="user" - -# Base RPC server port number. -export NCTL_BASE_PORT_RPC=40000 - -# Base JSON server port number. -export NCTL_BASE_PORT_REST=50000 - -# Base event server port number. -export NCTL_BASE_PORT_SSE=60000 - -# Base network server port number. -export NCTL_BASE_PORT_NETWORK=34452 - -# Set of client side auction contracts. -export NCTL_CONTRACTS_CLIENT_AUCTION=( - "activate_bid.wasm" - "add_bid.wasm" - "delegate.wasm" - "undelegate.wasm" - "withdraw_bid.wasm" -) - -# Set of client side transfer contracts. -export NCTL_CONTRACTS_CLIENT_TRANSFERS=( - "transfer_to_account_u512.wasm" - "transfer_to_account_u512_stored.wasm" -) - -# Default amount used when delegating. -export NCTL_DEFAULT_AUCTION_DELEGATE_AMOUNT=1000000000 # (1e9) - -# Default motes to pay for consumed gas. -export NCTL_DEFAULT_GAS_PAYMENT=10000000000 # (1e10) - -# Default gas price multiplier. -export NCTL_DEFAULT_GAS_PRICE=10 - -# Default amount used when making transfers. -export NCTL_DEFAULT_TRANSFER_AMOUNT=2500000000 # (1e9) - -# Intitial balance of faucet account. -export NCTL_INITIAL_BALANCE_FAUCET=1000000000000000000000000000000000 # (1e33) - -# Intitial balance of user account. -export NCTL_INITIAL_BALANCE_USER=1000000000000000000000000000000000 # (1e33) - -# Intitial balance of validator account. -export NCTL_INITIAL_BALANCE_VALIDATOR=1000000000000000000000000000000000 # (1e33) - -# Intitial delegation amount of a user account. -export NCTL_INITIAL_DELEGATION_AMOUNT=1000000000000000 # (1e15) - -# Base weight applied to a validator at genesis. -export NCTL_VALIDATOR_BASE_WEIGHT=1000000000000000 # (1e15) - -# Name of process group: boostrap validators. -export NCTL_PROCESS_GROUP_1=validators-1 - -# Name of process group: genesis validators. -export NCTL_PROCESS_GROUP_2=validators-2 - -# Name of process group: non-genesis validators. -export NCTL_PROCESS_GROUP_3=validators-3 diff --git a/utils/nctl/sh/utils/defaults.sh b/utils/nctl/sh/utils/defaults.sh deleted file mode 100644 index 4909f4d5e4..0000000000 --- a/utils/nctl/sh/utils/defaults.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -# Set default type of daemon to run. -export NCTL_DAEMON_TYPE=${NCTL_DAEMON_TYPE:-supervisord} - -# Set default compilation target. -export NCTL_COMPILE_TARGET=${NCTL_COMPILE_TARGET:-release} - -# Set default logging output format. -export NCTL_NODE_LOG_FORMAT=${NCTL_NODE_LOG_FORMAT:-json} diff --git a/utils/nctl/sh/utils/infra.sh b/utils/nctl/sh/utils/infra.sh deleted file mode 100644 index 2d6d5b2cab..0000000000 --- a/utils/nctl/sh/utils/infra.sh +++ /dev/null @@ -1,324 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Returns a bootstrap known address - i.e. those of bootstrap nodes. -# Globals: -# NCTL_BASE_PORT_NETWORK - base network port number. -# Arguments: -# Node ordinal identifier. -####################################### -function get_bootstrap_known_address() -{ - local NODE_ID=${1} - local NET_ID=${NET_ID:-1} - local NODE_PORT=$((NCTL_BASE_PORT_NETWORK + (NET_ID * 100) + NODE_ID)) - - echo "'127.0.0.1:$NODE_PORT'" -} - -####################################### -# Returns count of a network's bootstrap nodes. -####################################### -function get_count_of_bootstrap_nodes() -{ - # Hard-coded. - echo 3 -} - -####################################### -# Returns count of a network's bootstrap nodes. -####################################### -function get_count_of_genesis_nodes() -{ - echo $(($(get_count_of_nodes) / 2)) -} - -####################################### -# Returns count of all network nodes. -####################################### -function get_count_of_nodes() -{ - find "$(get_path_to_net)"/nodes/* -maxdepth 0 -type d | wc -l -} - -####################################### -# Returns count of test users. -####################################### -function get_count_of_users() -{ - find "$(get_path_to_net)"/users/* -maxdepth 0 -type d | wc -l -} - -####################################### -# Returns network bind address. -# Arguments: -# Node ordinal identifier. -####################################### -function get_network_bind_address() -{ - local NODE_ID=${1} - - echo "0.0.0.0:$(get_node_port "$NCTL_BASE_PORT_NETWORK" "$NODE_ID")" -} - -####################################### -# Returns network known addresses. -####################################### -function get_network_known_addresses() -{ - local NODE_ID=${1} - local RESULT - - # If a bootstrap node then return set of bootstraps. - RESULT=$(get_bootstrap_known_address 1) - if [ "$NODE_ID" -lt "$(get_count_of_bootstrap_nodes)" ]; then - for IDX in $(seq 2 "$(get_count_of_bootstrap_nodes)") - do - RESULT=$RESULT","$(get_bootstrap_known_address "$IDX") - done - # If a non-bootstrap node then return full set of nodes. - # Note: could be modified to return full set of spinning nodes. - else - for IDX in $(seq 2 "$NODE_ID") - do - RESULT=$RESULT","$(get_bootstrap_known_address "$IDX") - done - fi - - echo "$RESULT" -} - -####################################### -# Returns node event address. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_address_event() -{ - local NODE_ID=${1} - - echo "http://localhost:$(get_node_port "$NCTL_BASE_PORT_SSE" "$NODE_ID")" -} - -####################################### -# Returns node JSON address. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_address_rest() -{ - local NODE_ID=${1} - - echo "http://localhost:$(get_node_port "$NCTL_BASE_PORT_REST" "$NODE_ID")" -} - -####################################### -# Returns node RPC address. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_address_rpc() -{ - local NODE_ID=${1} - - echo "http://localhost:$(get_node_port "$NCTL_BASE_PORT_RPC" "$NODE_ID")" -} - -####################################### -# Returns node RPC address, intended for use with cURL. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_address_rpc_for_curl() -{ - local NODE_ID=${1} - - echo "$(get_node_address_rpc "$NODE_ID")/rpc" -} - -####################################### -# Returns ordinal identifier of a random validator node able to be used for deploy dispatch. -# Arguments: -# Network ordinal identifier. -####################################### -function get_node_for_dispatch() -{ - for NODE_ID in $(seq 1 "$(get_count_of_nodes)" | shuf) - do - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - echo "$NODE_ID" - break - fi - done -} - -####################################### -# Returns flag indicating whether a node is currently up. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_is_up() -{ - local NODE_ID=${1} - local NODE_PORT - - NODE_PORT=$(get_node_port_rpc "$NODE_ID") - - if grep -q "$NODE_PORT (LISTEN)" <<< "$(lsof -i -P -n)"; then - echo true - else - echo false - fi -} - -####################################### -# Calculate port for a given base port, network id, and node id. -# Arguments: -# Base starting port. -# Node ordinal identifier. -####################################### -function get_node_port() -{ - local BASE_PORT=${1} - local NODE_ID=${2:-$(get_node_for_dispatch)} - local NET_ID=${NET_ID:-1} - - # TODO: Need to handle case of more than 99 nodes. - echo $((BASE_PORT + (NET_ID * 100) + NODE_ID)) -} - -####################################### -# Calculates REST port. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_port_rest() -{ - local NODE_ID=${1} - - get_node_port "$NCTL_BASE_PORT_REST" "$NODE_ID" -} - -####################################### -# Calculates RPC port. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_port_rpc() -{ - local NODE_ID=${1} - - get_node_port "$NCTL_BASE_PORT_RPC" "$NODE_ID" -} - -####################################### -# Calculates SSE port. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_port_sse() -{ - local NODE_ID=${1} - - get_node_port "$NCTL_BASE_PORT_SSE" "$NODE_ID" -} - -####################################### -# Calculates a node's default staking weight. -# Arguments: -# Node ordinal identifier. -####################################### -function get_node_staking_weight() -{ - local NODE_ID=${1} - - echo $((NCTL_VALIDATOR_BASE_WEIGHT + NODE_ID)) -} - -####################################### -# Returns set of nodes within a process group. -# Arguments: -# Process group identifier. -####################################### -function get_process_group_members() -{ - local PROCESS_GROUP=${1} - local SEQ_END - local SEQ_START - - # Set range. - if [ "$PROCESS_GROUP" == "$NCTL_PROCESS_GROUP_1" ]; then - SEQ_START=1 - SEQ_END=$(get_count_of_bootstrap_nodes) - - elif [ "$PROCESS_GROUP" == "$NCTL_PROCESS_GROUP_2" ]; then - SEQ_START=$(($(get_count_of_bootstrap_nodes) + 1)) - SEQ_END=$(get_count_of_genesis_nodes) - - elif [ "$PROCESS_GROUP" == "$NCTL_PROCESS_GROUP_3" ]; then - SEQ_START=$(($(get_count_of_genesis_nodes) + 1)) - SEQ_END=$(get_count_of_nodes) - fi - - # Set members of process group. - local RESULT="" - for NODE_ID in $(seq "$SEQ_START" "$SEQ_END") - do - if [ "$NODE_ID" -gt "$SEQ_START" ]; then - RESULT=$RESULT", " - fi - RESULT=$RESULT$(get_process_name_of_node "$NODE_ID") - done - - echo "$RESULT" -} - -####################################### -# Returns name of a daemonized node process within a group. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -####################################### -function get_process_name_of_node() -{ - local NODE_ID=${1} - local NET_ID=${NET_ID:-1} - - echo "casper-net-$NET_ID-node-$NODE_ID" -} - -####################################### -# Returns name of a daemonized node process within a group. -# Arguments: -# Node ordinal identifier. -####################################### -function get_process_name_of_node_in_group() -{ - local NODE_ID=${1} - local NODE_PROCESS_NAME - local PROCESS_GROUP_NAME - - NODE_PROCESS_NAME=$(get_process_name_of_node "$NODE_ID") - PROCESS_GROUP_NAME=$(get_process_name_of_node_group "$NODE_ID") - - echo "$PROCESS_GROUP_NAME:$NODE_PROCESS_NAME" -} - -####################################### -# Returns name of a daemonized node process group. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -####################################### -function get_process_name_of_node_group() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" -le "$(get_count_of_bootstrap_nodes)" ]; then - echo "$NCTL_PROCESS_GROUP_1" - elif [ "$NODE_ID" -le "$(get_count_of_genesis_nodes)" ]; then - echo "$NCTL_PROCESS_GROUP_2" - else - echo "$NCTL_PROCESS_GROUP_3" - fi -} diff --git a/utils/nctl/sh/utils/main.sh b/utils/nctl/sh/utils/main.sh deleted file mode 100644 index 65609fd1fb..0000000000 --- a/utils/nctl/sh/utils/main.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -# Pick defaults up first as other utils may depend on them. -source "$NCTL"/sh/utils/defaults.sh - -source "$NCTL"/sh/utils/accounts.sh -source "$NCTL"/sh/utils/blocking.sh -source "$NCTL"/sh/utils/constants.sh -source "$NCTL"/sh/utils/cl_args.sh -source "$NCTL"/sh/utils/infra.sh -source "$NCTL"/sh/utils/os.sh -source "$NCTL"/sh/utils/paths.sh -source "$NCTL"/sh/utils/queries.sh diff --git a/utils/nctl/sh/utils/os.sh b/utils/nctl/sh/utils/os.sh deleted file mode 100644 index d450d582c9..0000000000 --- a/utils/nctl/sh/utils/os.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash - -# OS types. -declare _OS_LINUX="linux" -declare _OS_LINUX_REDHAT="$_OS_LINUX-redhat" -declare _OS_LINUX_SUSE="$_OS_LINUX-suse" -declare _OS_LINUX_ARCH="$_OS_LINUX-arch" -declare _OS_LINUX_DEBIAN="$_OS_LINUX-debian" -declare _OS_MACOSX="macosx" -declare _OS_UNKNOWN="unknown" - -####################################### -# Returns OS type. -# Globals: -# OSTYPE: type of OS being run. -####################################### -function get_os() -{ - if [[ "$OSTYPE" == "linux-gnu" ]]; then - if [ -f /etc/redhat-release ]; then - echo $_OS_LINUX_REDHAT - elif [ -f /etc/SuSE-release ]; then - echo $_OS_LINUX_SUSE - elif [ -f /etc/arch-release ]; then - echo $_OS_LINUX_ARCH - elif [ -f /etc/debian_version ]; then - echo $_OS_LINUX_DEBIAN - fi - elif [[ "$OSTYPE" == "darwin"* ]]; then - echo $_OS_MACOSX - else - echo $_OS_UNKNOWN - fi -} - -####################################### -# Wraps standard echo by adding application prefix. -####################################### -function log () -{ - local MSG=${1} - local NOW - - NOW=$(date +%Y-%m-%dT%H:%M:%S.%6N) - - echo -e "$NOW [INFO] [$$] NCTL :: $MSG" -} - -####################################### -# Wraps standard echo by adding application error prefix. -####################################### -function log_error () -{ - local MSG=${1} - local NOW - - NOW=$(date +%Y-%m-%dT%H:%M:%S.%6N) - - echo -e "$NOW [ERROR] [$$] NCTL :: $MSG" -} - -####################################### -# Wraps pushd command to suppress stdout. -####################################### -function pushd () -{ - command pushd "$@" > /dev/null -} - -####################################### -# Wraps popd command to suppress stdout. -####################################### -function popd () -{ - command popd "$@" > /dev/null -} - -####################################### -# Forces a directory delete / recreate. -# Arguments: -# Directory to be reset / recreated. -####################################### -function resetd () -{ - local DPATH=${1} - - if [ -d "$DPATH" ]; then - rm -rf "$DPATH" - fi - mkdir -p "$DPATH" -} diff --git a/utils/nctl/sh/utils/paths.sh b/utils/nctl/sh/utils/paths.sh deleted file mode 100644 index d9a89979f9..0000000000 --- a/utils/nctl/sh/utils/paths.sh +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Returns path to a binary file. -# Arguments: -# Binary file name. -####################################### -function get_path_to_binary() -{ - local FILENAME=${1} - - echo "$(get_path_to_net)"/bin/"$FILENAME" -} - -####################################### -# Returns path to client binary. -####################################### -function get_path_to_client() -{ - get_path_to_binary "casper-client" -} - -####################################### -# Returns path to a smart contract. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Contract wasm file name. -####################################### -function get_path_to_contract() -{ - local FILENAME=${1} - - get_path_to_binary "$FILENAME" -} - -####################################### -# Returns path to a network faucet. -####################################### -function get_path_to_faucet() -{ - echo "$(get_path_to_net)"/faucet -} - -####################################### -# Returns path to a network's assets. -# Globals: -# NCTL - path to nctl home directory. -####################################### -function get_path_to_net() -{ - local NET_ID=${NET_ID:-1} - - echo "$NCTL"/assets/net-"$NET_ID" -} - -####################################### -# Returns path to a network's binary folder. -####################################### -function get_path_to_net_bin() -{ - echo "$(get_path_to_net)"/bin -} - -####################################### -# Returns path to a network's dump folder. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Network ordinal identifier. -####################################### -function get_path_to_net_dump() -{ - local NET_ID=${NET_ID:-1} - - echo "$NCTL"/dumps/net-"$NET_ID" -} - -####################################### -# Returns path to a network's supervisord config file. -####################################### -function get_path_net_supervisord_cfg() -{ - echo "$(get_path_to_net)"/daemon/config/supervisord.conf -} - -####################################### -# Returns path to a network's supervisord socket file. -####################################### -function get_path_net_supervisord_sock() -{ - echo "$(get_path_to_net)"/daemon/socket/supervisord.sock -} - -####################################### -# Returns path to a node's assets. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node() -{ - local NODE_ID=${1} - - echo "$(get_path_to_net)"/nodes/node-"$NODE_ID" -} - -####################################### -# Returns path to a node's binary folder. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node_bin() -{ - echo "$(get_path_to_node "$1")"/bin -} - -####################################### -# Returns path to a node's config file. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node_config() -{ - echo "$(get_path_to_node "$1")"/config -} - -####################################### -# Returns path to a node's keys directory. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node_keys() -{ - echo "$(get_path_to_node "$1")"/keys -} - -####################################### -# Returns path to a node's logs directory. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node_logs() -{ - echo "$(get_path_to_node "$1")"/logs -} - -####################################### -# Returns path to a node's storage directory. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node_storage() -{ - echo "$(get_path_to_node "$1")"/storage -} - -####################################### -# Returns path to a node's secret key. -# Arguments: -# Node ordinal identifier. -####################################### -function get_path_to_node_secret_key() -{ - local NODE_ID=${1} - - get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_NODE" "$NODE_ID" -} - -####################################### -# Returns path to a secret key. -# Globals: -# NCTL_ACCOUNT_TYPE_FAUCET - faucet account type. -# NCTL_ACCOUNT_TYPE_NODE - node account type. -# NCTL_ACCOUNT_TYPE_USER - user account type. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function get_path_to_secret_key() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - - if [ "$ACCOUNT_TYPE" = "$NCTL_ACCOUNT_TYPE_FAUCET" ]; then - echo "$(get_path_to_faucet)"/secret_key.pem - elif [ "$ACCOUNT_TYPE" = "$NCTL_ACCOUNT_TYPE_NODE" ]; then - echo "$(get_path_to_node "$ACCOUNT_IDX")"/keys/secret_key.pem - elif [ "$ACCOUNT_TYPE" = "$NCTL_ACCOUNT_TYPE_USER" ]; then - echo "$(get_path_to_user "$ACCOUNT_IDX")"/secret_key.pem - fi -} - -####################################### -# Returns path to a user's assets. -# Arguments: -# User ordinal identifier. -####################################### -function get_path_to_user() -{ - local USER_ID=${1} - - echo "$(get_path_to_net)"/users/user-"$USER_ID" -} diff --git a/utils/nctl/sh/utils/queries.sh b/utils/nctl/sh/utils/queries.sh deleted file mode 100644 index 6d65575bc5..0000000000 --- a/utils/nctl/sh/utils/queries.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Returns a chain era. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -####################################### -function get_chain_era() -{ - local NODE_ID=${1:-$(get_node_for_dispatch)} - - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc "$NODE_ID")" \ - --block-identifier "" \ - | jq '.result.block.header.era_id' - else - echo "N/A" - fi -} - -####################################### -# Returns a chain height. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -####################################### -function get_chain_height() -{ - local NODE_ID=${1:-$(get_node_for_dispatch)} - - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc "$NODE_ID")" \ - --block-identifier "" \ - | jq '.result.block.header.height' - else - echo "N/A" - fi -} - -####################################### -# Returns a chain name. -# Arguments: -# Network ordinal identifier. -####################################### -function get_chain_name() -{ - local NET_ID=${NET_ID:-1} - - echo casper-net-"$NET_ID" -} - -####################################### -# Returns hash of latest block finalized at a node. -####################################### -function get_chain_latest_block_hash() -{ - local NODE_ID=${1:-$(get_node_for_dispatch)} - - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc "$NODE_ID")" \ - | jq '.result.block.hash' \ - | sed -e 's/^"//' -e 's/"$//' -} - -####################################### -# Returns latest block finalized at a node. -####################################### -function get_chain_latest_block() -{ - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc)" \ - | jq '.result.block' \ - | sed -e 's/^"//' -e 's/"$//' -} - -####################################### -# Returns a timestamp for use in chainspec.toml. -# Arguments: -# Delay in seconds to apply to genesis timestamp. -####################################### -function get_genesis_timestamp() -{ - local DELAY=${1} - local SCRIPT=( - "from datetime import datetime, timedelta;" - "print((datetime.utcnow() + timedelta(seconds=$DELAY)).isoformat('T') + 'Z');" - ) - - python3 -c "${SCRIPT[*]}" -} - -####################################### -# Returns a state root hash. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Node ordinal identifier. -# Block identifier. -####################################### -function get_state_root_hash() -{ - local NODE_ID=${1} - local BLOCK_HASH=${2} - - $(get_path_to_client) get-state-root-hash \ - --node-address "$(get_node_address_rpc "$NODE_ID")" \ - --block-identifier "${BLOCK_HASH:-""}" \ - | jq '.result.state_root_hash' \ - | sed -e 's/^"//' -e 's/"$//' -} diff --git a/utils/nctl/sh/views/utils.sh b/utils/nctl/sh/views/utils.sh deleted file mode 100644 index 811ed077ae..0000000000 --- a/utils/nctl/sh/views/utils.sh +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env bash - -####################################### -# Renders an account. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function render_account() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - local ACCOUNT_KEY - local STATE_ROOT_HASH - - ACCOUNT_KEY=$(get_account_key "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - STATE_ROOT_HASH=$(get_state_root_hash) - - source "$NCTL"/sh/views/view_chain_account.sh \ - root-hash="$STATE_ROOT_HASH" \ - account-key="$ACCOUNT_KEY" -} - -####################################### -# Renders an account balance. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function render_account_balance() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - local ACCOUNT_KEY - local ACCOUNT_PREFIX - local STATE_ROOT_HASH - local PURSE_UREF - - ACCOUNT_KEY=$(get_account_key "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - ACCOUNT_PREFIX=$(get_account_prefix "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - STATE_ROOT_HASH=$(get_state_root_hash) - PURSE_UREF=$(get_main_purse_uref "$ACCOUNT_KEY" "$STATE_ROOT_HASH") - - source "$NCTL"/sh/views/view_chain_balance.sh \ - root-hash="$STATE_ROOT_HASH" \ - purse-uref="$PURSE_UREF" \ - prefix="$ACCOUNT_PREFIX" -} - -####################################### -# Renders an account hash. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function render_account_hash() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - local ACCOUNT_KEY - local ACCOUNT_HASH - local ACCOUNT_PREFIX - - ACCOUNT_KEY=$(get_account_key "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - ACCOUNT_HASH=$(get_account_hash "$ACCOUNT_KEY") - ACCOUNT_PREFIX=$(get_account_prefix "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - - log "$ACCOUNT_PREFIX.account-hash = $ACCOUNT_HASH" -} - -####################################### -# Renders an account key. -# Globals: -# NCTL_ACCOUNT_TYPE_FAUCET - faucet account type. -# NCTL_ACCOUNT_TYPE_NODE - node account type. -# NCTL_ACCOUNT_TYPE_USER - user account type. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function render_account_key() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - local ACCOUNT_KEY - local ACCOUNT_PREFIX - - ACCOUNT_KEY=$(get_account_key "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - ACCOUNT_PREFIX=$(get_account_prefix "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - - log "$ACCOUNT_PREFIX.account-key = $ACCOUNT_KEY" -} - -####################################### -# Renders an account's main purse uref. -# Globals: -# NCTL_ACCOUNT_TYPE_FAUCET - faucet account type. -# NCTL_ACCOUNT_TYPE_NODE - node account type. -# NCTL_ACCOUNT_TYPE_USER - user account type. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -# State root hash (optional). -####################################### -function render_account_main_purse_uref() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - local STATE_ROOT_HASH=${3:-$(get_state_root_hash)} - local ACCOUNT_KEY - local ACCOUNT_PREFIX - local PURSE_UREF - - ACCOUNT_KEY=$(get_account_key "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - ACCOUNT_PREFIX=$(get_account_prefix "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - PURSE_UREF=$(get_main_purse_uref "$ACCOUNT_KEY" "$STATE_ROOT_HASH") - - log "$ACCOUNT_PREFIX.main-purse-uref = $PURSE_UREF" -} - -####################################### -# Renders an account secret key path. -# Arguments: -# Account type (node | user | faucet). -# Account ordinal identifier (optional). -####################################### -function render_account_secret_key() -{ - local ACCOUNT_TYPE=${1} - local ACCOUNT_IDX=${2} - local ACCOUNT_PREFIX - local PATH_TO_KEY - - ACCOUNT_PREFIX=$(get_account_prefix "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - PATH_TO_KEY=$(get_path_to_secret_key "$ACCOUNT_TYPE" "$ACCOUNT_IDX") - - log "$ACCOUNT_PREFIX.secret-key-path = $PATH_TO_KEY" -} - -####################################### -# Renders a state root hash at a certain node. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Node ordinal identifier. -# Hash of block at which to return associated state root hash. -####################################### -function render_chain_state_root_hash() -{ - local NODE_ID=${1} - local BLOCK_HASH=${2} - local NODE_IS_UP - local STATE_ROOT_HASH - - NODE_IS_UP=$(get_node_is_up "$NODE_ID") - if [ "$NODE_IS_UP" = true ]; then - STATE_ROOT_HASH=$(get_state_root_hash "$NODE_ID" "$BLOCK_HASH") - fi - - log "state root hash @ node-$NODE_ID = ${STATE_ROOT_HASH:-'N/A'}" -} - -####################################### -# Renders a last finalized block hash at a certain node. -# Globals: -# NCTL - path to nctl home directory. -# Arguments: -# Node ordinal identifier. -####################################### -function render_last_finalized_block_hash() -{ - local NODE_ID=${1} - local NODE_IS_UP - local LFB_HASH - - NODE_IS_UP=$(get_node_is_up "$NODE_ID") - if [ "$NODE_IS_UP" = true ]; then - LFB_HASH=$(get_chain_latest_block_hash "$NODE_ID") - fi - - log "last finalized block hash @ node-$NODE_ID = ${LFB_HASH:-'N/A'}" -} diff --git a/utils/nctl/sh/views/view_chain_account.sh b/utils/nctl/sh/views/view_chain_account.sh deleted file mode 100644 index df38cded04..0000000000 --- a/utils/nctl/sh/views/view_chain_account.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -unset ACCOUNT_KEY -unset STATE_ROOT_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - account-key) ACCOUNT_KEY=${VALUE} ;; - root-hash) STATE_ROOT_HASH=${VALUE} ;; - *) - esac -done - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -source "$NCTL"/sh/utils/main.sh - -NODE_ADDRESS=$(get_node_address_rpc) -STATE_ROOT_HASH=${STATE_ROOT_HASH:-$(get_state_root_hash)} - -$(get_path_to_client) query-state \ - --node-address "$NODE_ADDRESS" \ - --state-root-hash "$STATE_ROOT_HASH" \ - --key "$ACCOUNT_KEY" \ - | jq '.result' diff --git a/utils/nctl/sh/views/view_chain_auction_info.sh b/utils/nctl/sh/views/view_chain_auction_info.sh deleted file mode 100644 index fdb63fa665..0000000000 --- a/utils/nctl/sh/views/view_chain_auction_info.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders on-chain auction information. -####################################### -function main() -{ - $(get_path_to_client) get-auction-info \ - --node-address "$(get_node_address_rpc)" \ - | jq '.result' -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/views/view_chain_balance.sh b/utils/nctl/sh/views/view_chain_balance.sh deleted file mode 100644 index f251897711..0000000000 --- a/utils/nctl/sh/views/view_chain_balance.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -unset PURSE_UREF -unset STATE_ROOT_HASH -unset PREFIX - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - purse-uref) PURSE_UREF=${VALUE} ;; - root-hash) STATE_ROOT_HASH=${VALUE} ;; - prefix) PREFIX=${VALUE} ;; - *) - esac -done - -PREFIX=${PREFIX:-"account"} - -# ---------------------------------------------------------------- -# MAIN -# ---------------------------------------------------------------- - -source "$NCTL"/sh/utils/main.sh - -NODE_ADDRESS=$(get_node_address_rpc) -STATE_ROOT_HASH=${STATE_ROOT_HASH:-$(get_state_root_hash)} - -ACCOUNT_BALANCE=$( - $(get_path_to_client) get-balance \ - --node-address "$NODE_ADDRESS" \ - --state-root-hash "$STATE_ROOT_HASH" \ - --purse-uref "$PURSE_UREF" \ - | jq '.result.balance_value' \ - | sed -e 's/^"//' -e 's/"$//' - ) - -log "$PREFIX balance = $ACCOUNT_BALANCE" diff --git a/utils/nctl/sh/views/view_chain_balances.sh b/utils/nctl/sh/views/view_chain_balances.sh deleted file mode 100644 index 19e5895afa..0000000000 --- a/utils/nctl/sh/views/view_chain_balances.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -####################################### -# Renders on-chain auction information. -####################################### -function main() -{ - log "---- faucet balance ----" - render_account_balance "$NCTL_ACCOUNT_TYPE_FAUCET" - - log "---- validator balances ----" - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - render_account_balance "$NCTL_ACCOUNT_TYPE_NODE" "$NODE_ID" - done - - log "---- user balances ----" - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - render_account_balance "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID" - done -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/views/view_chain_block.sh b/utils/nctl/sh/views/view_chain_block.sh deleted file mode 100644 index 883046ab34..0000000000 --- a/utils/nctl/sh/views/view_chain_block.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders on-chain block information. -# Arguments: -# Block hash. -####################################### -function main() -{ - local BLOCK_HASH=${1} - - if [ "$BLOCK_HASH" ]; then - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc)" \ - --block-identifier "$BLOCK_HASH" \ - | jq '.result.block' - else - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc)" \ - | jq '.result.block' - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset BLOCK_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - block) BLOCK_HASH=${VALUE} ;; - *) - esac -done - -main "$BLOCK_HASH" diff --git a/utils/nctl/sh/views/view_chain_block_transfers.sh b/utils/nctl/sh/views/view_chain_block_transfers.sh deleted file mode 100644 index 2cf8add437..0000000000 --- a/utils/nctl/sh/views/view_chain_block_transfers.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders on-chain block transfer information. -# Arguments: -# Block hash. -####################################### -function main() -{ - local BLOCK_HASH=${1} - - if [ "$BLOCK_HASH" ]; then - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc)" \ - --block-identifier "$BLOCK_HASH" \ - | jq '.result.block' - else - $(get_path_to_client) get-block \ - --node-address "$(get_node_address_rpc)" \ - | jq '.result.block' - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset BLOCK_HASH - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - block) BLOCK_HASH=${VALUE} ;; - *) - esac -done - -main "${BLOCK_HASH:-""}" diff --git a/utils/nctl/sh/views/view_chain_deploy.sh b/utils/nctl/sh/views/view_chain_deploy.sh deleted file mode 100644 index aeb45c2c33..0000000000 --- a/utils/nctl/sh/views/view_chain_deploy.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders on-chain deploy information. -# Arguments: -# Deploy hash. -####################################### -function main() -{ - local DEPLOY_HASH=${1} - - $(get_path_to_client) get-deploy \ - --node-address "$(get_node_address_rpc)" \ - "$DEPLOY_HASH" \ - | jq '.result' -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - deploy) DEPLOY_HASH=${VALUE} ;; - *) - esac -done - -main "$DEPLOY_HASH" diff --git a/utils/nctl/sh/views/view_chain_era.sh b/utils/nctl/sh/views/view_chain_era.sh deleted file mode 100644 index 1f94949be4..0000000000 --- a/utils/nctl/sh/views/view_chain_era.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders chain era at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - log "chain era @ node-$NODE_ID = $(get_chain_era "$NODE_ID")" - done - else - log "chain era @ node-$NODE_ID = $(get_chain_era "$NODE_ID")" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_chain_era_info.sh b/utils/nctl/sh/views/view_chain_era_info.sh deleted file mode 100644 index 252c511715..0000000000 --- a/utils/nctl/sh/views/view_chain_era_info.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders on-chain era information. -# Arguments: -# Node ordinal identifier (optional). -####################################### -function main() -{ - local NODE_ID=${1} - - $(get_path_to_client) get-era-info-by-switch-block \ - --node-address "$(get_node_address_rpc "$NODE_ID")" \ - --block-identifier "" \ - | jq '.result' -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "$NODE_ID" diff --git a/utils/nctl/sh/views/view_chain_height.sh b/utils/nctl/sh/views/view_chain_height.sh deleted file mode 100644 index 05b86e704f..0000000000 --- a/utils/nctl/sh/views/view_chain_height.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders chain height at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - log "chain height @ node-$NODE_ID = $(get_chain_height "$NODE_ID")" - done - else - log "chain height @ node-$NODE_ID = $(get_chain_height "$NODE_ID")" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_chain_lfb.sh b/utils/nctl/sh/views/view_chain_lfb.sh deleted file mode 100644 index e2fc482146..0000000000 --- a/utils/nctl/sh/views/view_chain_lfb.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -####################################### -# Renders last finalized block at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - render_last_finalized_block_hash "$NODE_ID" - done - else - render_last_finalized_block_hash "$NODE_ID" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_chain_spec.sh b/utils/nctl/sh/views/view_chain_spec.sh deleted file mode 100644 index 5f09c87f3b..0000000000 --- a/utils/nctl/sh/views/view_chain_spec.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -less "$(get_path_to_net)"/chainspec/chainspec.toml diff --git a/utils/nctl/sh/views/view_chain_spec_accounts.sh b/utils/nctl/sh/views/view_chain_spec_accounts.sh deleted file mode 100644 index dcc76c5a0f..0000000000 --- a/utils/nctl/sh/views/view_chain_spec_accounts.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -less "$(get_path_to_net)"/chainspec/accounts.toml diff --git a/utils/nctl/sh/views/view_chain_state_root_hash.sh b/utils/nctl/sh/views/view_chain_state_root_hash.sh deleted file mode 100644 index 024003346d..0000000000 --- a/utils/nctl/sh/views/view_chain_state_root_hash.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -####################################### -# Renders chain state root hash at specified node(s). -# Arguments: -# Node ordinal identifier. -# Block hash. -####################################### -function main() -{ - local NODE_ID=${1} - local BLOCK_HASH=${2} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - render_chain_state_root_hash "$NODE_ID" "$BLOCK_HASH" - done - else - render_chain_state_root_hash "$NODE_ID" "$BLOCK_HASH" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset BLOCK_HASH -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - block) BLOCK_HASH=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" "${BLOCK_HASH:-""}" diff --git a/utils/nctl/sh/views/view_faucet_account.sh b/utils/nctl/sh/views/view_faucet_account.sh deleted file mode 100644 index 2d3065a6eb..0000000000 --- a/utils/nctl/sh/views/view_faucet_account.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_FAUCET") -ACCOUNT_HASH=$(get_account_hash "$ACCOUNT_KEY") -PATH_TO_ACCOUNT_SKEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_FAUCET") -STATE_ROOT_HASH=$(get_state_root_hash) -PURSE_UREF=$(get_main_purse_uref "$ACCOUNT_KEY" "$STATE_ROOT_HASH") -ACCOUNT_BALANCE=$(get_account_balance "$PURSE_UREF" "$STATE_ROOT_HASH") - -log "faucet a/c secret key : $PATH_TO_ACCOUNT_SKEY" -log "faucet a/c key : $ACCOUNT_KEY" -log "faucet a/c hash : $ACCOUNT_HASH" -log "faucet a/c purse : $PURSE_UREF" -log "faucet a/c purse balance : $ACCOUNT_BALANCE" -log "faucet on-chain account : see below" -render_account "$NCTL_ACCOUNT_TYPE_FAUCET" diff --git a/utils/nctl/sh/views/view_node_config.sh b/utils/nctl/sh/views/view_node_config.sh deleted file mode 100644 index 8b701c29ef..0000000000 --- a/utils/nctl/sh/views/view_node_config.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -less "$(get_path_to_node "${NODE_ID:-1}")"/config/1_0_0/config.toml diff --git a/utils/nctl/sh/views/view_node_log_stderr.sh b/utils/nctl/sh/views/view_node_log_stderr.sh deleted file mode 100644 index b4515d7cbd..0000000000 --- a/utils/nctl/sh/views/view_node_log_stderr.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -less "$(get_path_to_node "${NODE_ID:-1}")"/logs/stderr.log diff --git a/utils/nctl/sh/views/view_node_log_stdout.sh b/utils/nctl/sh/views/view_node_log_stdout.sh deleted file mode 100644 index b970d0914e..0000000000 --- a/utils/nctl/sh/views/view_node_log_stdout.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -less "$(get_path_to_node "${NODE_ID:-1}")"/logs/stdout.log diff --git a/utils/nctl/sh/views/view_node_metrics.sh b/utils/nctl/sh/views/view_node_metrics.sh deleted file mode 100644 index bffd4e9b0a..0000000000 --- a/utils/nctl/sh/views/view_node_metrics.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders chain height at specified node(s). -# Arguments: -# Node ordinal identifier. -# Metric identifier. -####################################### -function main() -{ - local NODE_ID=${1} - local METRIC=${2} - local NODE_ID - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - do_render "$NODE_ID" "$METRIC" - fi - done - else - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - do_render "$NODE_ID" "$METRIC" - fi - fi -} - -####################################### -# Displays to stdout current node metrics. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -# Metric name. -####################################### -function do_render() -{ - local NODE_ID=${1} - local METRICS=${2} - local ENDPOINT - - ENDPOINT="$(get_node_address_rest "$NODE_ID")"/metrics - - if [ "$METRICS" = "all" ]; then - curl -s --location --request GET "$ENDPOINT" - else - echo "node #$NODE_ID :: $(curl -s --location --request GET "$ENDPOINT" | grep "$METRICS" | tail -n 1)" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID -unset METRIC - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - metric) METRIC=${VALUE} ;; - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" "${METRIC:-"all"}" diff --git a/utils/nctl/sh/views/view_node_peer_count.sh b/utils/nctl/sh/views/view_node_peer_count.sh deleted file mode 100644 index 3d9d6ac911..0000000000 --- a/utils/nctl/sh/views/view_node_peer_count.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders peer set at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - do_render "$NODE_ID" - done - else - do_render "$NODE_ID" - fi -} - -####################################### -# Displays to stdout count of current node peers. -# Arguments: -# Node ordinal identifier. -####################################### -function do_render() -{ - local NODE_ID=${1} - local NODE_ADDRESS_CURL - local NODE_PEER_COUNT - - NODE_ADDRESS_CURL=$(get_node_address_rpc_for_curl "$NODE_ID") - NODE_PEER_COUNT=$( - curl -s --header 'Content-Type: application/json' \ - --request POST "$NODE_ADDRESS_CURL" \ - --data-raw '{ - "id": 1, - "jsonrpc": "2.0", - "method": "info_get_peers" - }' | jq '.result.peers | length' - ) - - if [ -z "$NODE_PEER_COUNT" ]; then - log "node #$NODE_ID :: peers: N/A" - else - log "node #$NODE_ID :: peers: $NODE_PEER_COUNT" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_node_peers.sh b/utils/nctl/sh/views/view_node_peers.sh deleted file mode 100644 index d537c90238..0000000000 --- a/utils/nctl/sh/views/view_node_peers.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders peer set at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - echo "------------------------------------------------------------------------------------------------------------------------------------" - do_render "$NODE_ID" - fi - done - echo "------------------------------------------------------------------------------------------------------------------------------------" - else - do_render "$NODE_ID" - fi -} - -####################################### -# Displays to stdout current node peers. -# Arguments: -# Node ordinal identifier. -####################################### -function do_render() -{ - local NODE_ID=${1} - local NODE_ADDRESS_CURL - local NODE_API_RESPONSE - - NODE_ADDRESS_CURL=$(get_node_address_rpc_for_curl "$NODE_ID") - NODE_API_RESPONSE=$( - curl -s --header 'Content-Type: application/json' \ - --request POST "$NODE_ADDRESS_CURL" \ - --data-raw '{ - "id": 1, - "jsonrpc": "2.0", - "method": "info_get_peers" - }' | jq '.result.peers' - ) - - if [ -z "$NODE_API_RESPONSE" ]; then - log "node #$NODE_ID :: peers: N/A" - else - log "node #$NODE_ID :: peers:" - echo "$NODE_API_RESPONSE" | jq - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_node_ports.sh b/utils/nctl/sh/views/view_node_ports.sh deleted file mode 100644 index 45e748a889..0000000000 --- a/utils/nctl/sh/views/view_node_ports.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders ports at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - echo "------------------------------------------------------------------------------------------------------------------------------------" - do_render "$NODE_ID" - done - echo "------------------------------------------------------------------------------------------------------------------------------------" - else - do_render "$NODE_ID" - fi -} - -####################################### -# Displays to stdout current node ports. -# Globals: -# NCTL_BASE_PORT_NETWORK - base port type. -# Arguments: -# Node ordinal identifier. -####################################### -function do_render() -{ - local NODE_ID=${1} - local PORT_VNET - local PORT_REST - local PORT_RPC - local PORT_SSE - - PORT_VNET=$(get_node_port "$NCTL_BASE_PORT_NETWORK" "$NODE_ID") - PORT_REST=$(get_node_port_rest "$NODE_ID") - PORT_RPC=$(get_node_port_rpc "$NODE_ID") - PORT_SSE=$(get_node_port_sse "$NODE_ID") - - log "node-$NODE_ID :: VNET @ $PORT_VNET :: RPC @ $PORT_RPC :: REST @ $PORT_REST :: SSE @ $PORT_SSE" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_node_rpc_endpoint.sh b/utils/nctl/sh/views/view_node_rpc_endpoint.sh deleted file mode 100644 index 54644b3728..0000000000 --- a/utils/nctl/sh/views/view_node_rpc_endpoint.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Displays to stdout RPC schema. -####################################### -function main() -{ - local ENDPOINT=${1} - - if [ "$ENDPOINT" = "all" ]; then - curl -s --header 'Content-Type: application/json' \ - --request POST "$(get_node_address_rpc_for_curl)" \ - --data-raw '{ - "id": 1, - "jsonrpc": "2.0", - "method": "rpc.discover" - }' | jq '.result.schema.methods[].name' - else - curl -s --header 'Content-Type: application/json' \ - --request POST "$(get_node_address_rpc_for_curl)" \ - --data-raw '{ - "id": 1, - "jsonrpc": "2.0", - "method": "rpc.discover" - }' | jq '.result.schema.methods[] | select(.name == "'"$ENDPOINT"'")' - - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset ENDPOINT - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - endpoint) ENDPOINT=${VALUE} ;; - *) - esac -done - -main "${ENDPOINT:-"all"}" diff --git a/utils/nctl/sh/views/view_node_rpc_schema.sh b/utils/nctl/sh/views/view_node_rpc_schema.sh deleted file mode 100644 index 41f2b8aa3a..0000000000 --- a/utils/nctl/sh/views/view_node_rpc_schema.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Displays to stdout RPC schema. -####################################### -function main() -{ - curl -s --header 'Content-Type: application/json' \ - --request POST "$(get_node_address_rpc_for_curl)" \ - --data-raw '{ - "id": 1, - "jsonrpc": "2.0", - "method": "rpc.discover" - }' | jq '.result.schema' -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -main diff --git a/utils/nctl/sh/views/view_node_status.sh b/utils/nctl/sh/views/view_node_status.sh deleted file mode 100644 index 5c461475ad..0000000000 --- a/utils/nctl/sh/views/view_node_status.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders status at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - echo "------------------------------------------------------------------------------------------------------------------------------------" - do_render "$NODE_ID" - fi - done - echo "------------------------------------------------------------------------------------------------------------------------------------" - else - if [ "$(get_node_is_up "$NODE_ID")" = true ]; then - do_render "$NODE_ID" - fi - fi -} - -####################################### -# Displays to stdout current node status. -# Arguments: -# Network ordinal identifier. -# Node ordinal identifier. -####################################### -function do_render() -{ - local NODE_ID=${1} - local NODE_ADDRESS_CURL - local NODE_API_RESPONSE - - NODE_ADDRESS_CURL=$(get_node_address_rpc_for_curl "$NODE_ID") - NODE_API_RESPONSE=$( - curl -s --header 'Content-Type: application/json' \ - --request POST "$NODE_ADDRESS_CURL" \ - --data-raw '{ - "id": 1, - "jsonrpc": "2.0", - "method": "info_get_status" - }' | jq '.result' - ) - - if [ -z "$NODE_API_RESPONSE" ]; then - log "node #$NODE_ID :: status: N/A" - else - log "node #$NODE_ID :: status:" - echo "$NODE_API_RESPONSE" | jq '.' - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_node_storage.sh b/utils/nctl/sh/views/view_node_storage.sh deleted file mode 100644 index c338dabdca..0000000000 --- a/utils/nctl/sh/views/view_node_storage.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders status at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - echo "------------------------------------------------------------------------------------------------------------------------------------" - do_render "$NODE_ID" - done - echo "------------------------------------------------------------------------------------------------------------------------------------" - else - do_render "$NODE_ID" - fi -} - -####################################### -# Displays to stdout current node storage stats. -# Globals: -# _OS_LINUX - linux OS literal. -# _OS_MACOSX - Mac OS literal. -# Arguments: -# Node ordinal identifier. -####################################### -function do_render() -{ - local NODE_ID=${1} - local OS_TYPE - local PATH_TO_NODE_STORAGE - - OS_TYPE="$(get_os)" - PATH_TO_NODE_STORAGE="$(get_path_to_node "$NODE_ID")/storage" - - log "node #$NODE_ID :: storage @ $PATH_TO_NODE_STORAGE" - - if [[ $OS_TYPE == "$_OS_LINUX*" ]]; then - ll "$PATH_TO_NODE_STORAGE" - elif [[ $OS_TYPE == "$_OS_MACOSX" ]]; then - ls -lG "$PATH_TO_NODE_STORAGE" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_node_storage_consensus.sh b/utils/nctl/sh/views/view_node_storage_consensus.sh deleted file mode 100644 index 24137c85ba..0000000000 --- a/utils/nctl/sh/views/view_node_storage_consensus.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh - -####################################### -# Renders status at specified node(s). -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - echo "------------------------------------------------------------------------------------------------------------------------------------" - do_render "$NODE_ID" - done - echo "------------------------------------------------------------------------------------------------------------------------------------" - else - do_render "$NODE_ID" - fi -} - -####################################### -# Displays to stdout current node storage stats. -# Globals: -# _OS_LINUX - linux OS literal. -# _OS_MACOSX - Mac OS literal. -# Arguments: -# Node ordinal identifier. -####################################### -function do_render() -{ - local NODE_ID=${1} - local OS_TYPE - local PATH_TO_NODE_STORAGE - - OS_TYPE="$(get_os)" - PATH_TO_NODE_STORAGE="$(get_path_to_node "$NODE_ID")/storage-consensus" - - log "node #$NODE_ID :: consensus storage @ $PATH_TO_NODE_STORAGE" - - if [[ $OS_TYPE == "$_OS_LINUX*" ]]; then - ll "$PATH_TO_NODE_STORAGE" - elif [[ $OS_TYPE == "$_OS_MACOSX" ]]; then - ls -lG "$PATH_TO_NODE_STORAGE" - fi -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_user_account.sh b/utils/nctl/sh/views/view_user_account.sh deleted file mode 100644 index bd6c406d2a..0000000000 --- a/utils/nctl/sh/views/view_user_account.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -####################################### -# Renders user on-chain account details. -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local USER_ID=${1} - - if [ "$USER_ID" = "all" ]; then - for USER_ID in $(seq 1 "$(get_count_of_users)") - do - echo "------------------------------------------------------------------------------------------------------------------------------------" - render "$USER_ID" - done - else - render "$USER_ID" - fi -} - -####################################### -# Validator account details renderer. -# Globals: -# NCTL_ACCOUNT_TYPE_USER - node account type literal. -# Arguments: -# Node ordinal identifier. -####################################### -function render() -{ - local USER_ID=${1} - - PATH_TO_ACCOUNT_SKEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID") - ACCOUNT_HASH=$(get_account_hash "$ACCOUNT_KEY") - STATE_ROOT_HASH=$(get_state_root_hash) - PURSE_UREF=$(get_main_purse_uref "$ACCOUNT_KEY" "$STATE_ROOT_HASH") - ACCOUNT_BALANCE=$(get_account_balance "$PURSE_UREF" "$STATE_ROOT_HASH") - - log "user #$USER_ID a/c secret key : $PATH_TO_ACCOUNT_SKEY" - log "user #$USER_ID a/c key : $ACCOUNT_KEY" - log "user #$USER_ID a/c hash : $ACCOUNT_HASH" - log "user #$USER_ID a/c purse : $PURSE_UREF" - log "user #$USER_ID a/c purse balance : $ACCOUNT_BALANCE" - log "user #$USER_ID on-chain account : see below" - render_account "$NCTL_ACCOUNT_TYPE_USER" "$USER_ID" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset USER_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - user) USER_ID=${VALUE} ;; - *) - esac -done - -main "${USER_ID:-"all"}" diff --git a/utils/nctl/sh/views/view_validator_account.sh b/utils/nctl/sh/views/view_validator_account.sh deleted file mode 100644 index dd6158a6c8..0000000000 --- a/utils/nctl/sh/views/view_validator_account.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env bash - -source "$NCTL"/sh/utils/main.sh -source "$NCTL"/sh/views/utils.sh - -####################################### -# Renders node on-chain account details. -# Arguments: -# Node ordinal identifier. -####################################### -function main() -{ - local NODE_ID=${1} - - if [ "$NODE_ID" = "all" ]; then - for NODE_ID in $(seq 1 "$(get_count_of_nodes)") - do - echo "------------------------------------------------------------------------------------------------------------------------------------" - render "$NODE_ID" - done - else - render "$NODE_ID" - fi -} - -####################################### -# Validator account details renderer. -# Globals: -# NCTL_ACCOUNT_TYPE_NODE - node account type literal. -# Arguments: -# Node ordinal identifier. -####################################### -function render() -{ - local NODE_ID=${1} - - PATH_TO_ACCOUNT_SKEY=$(get_path_to_secret_key "$NCTL_ACCOUNT_TYPE_NODE" "$NODE_ID") - ACCOUNT_KEY=$(get_account_key "$NCTL_ACCOUNT_TYPE_NODE" "$NODE_ID") - ACCOUNT_HASH=$(get_account_hash "$ACCOUNT_KEY") - STATE_ROOT_HASH=$(get_state_root_hash) - PURSE_UREF=$(get_main_purse_uref "$ACCOUNT_KEY" "$STATE_ROOT_HASH") - ACCOUNT_BALANCE=$(get_account_balance "$PURSE_UREF" "$STATE_ROOT_HASH") - - log "validator #$NODE_ID a/c secret key : $PATH_TO_ACCOUNT_SKEY" - log "validator #$NODE_ID a/c key : $ACCOUNT_KEY" - log "validator #$NODE_ID a/c hash : $ACCOUNT_HASH" - log "validator #$NODE_ID a/c purse : $PURSE_UREF" - log "validator #$NODE_ID a/c purse balance : $ACCOUNT_BALANCE" - log "validator #$NODE_ID on-chain account : see below" - render_account "$NCTL_ACCOUNT_TYPE_NODE" "$NODE_ID" -} - -# ---------------------------------------------------------------- -# ENTRY POINT -# ---------------------------------------------------------------- - -unset NODE_ID - -for ARGUMENT in "$@" -do - KEY=$(echo "$ARGUMENT" | cut -f1 -d=) - VALUE=$(echo "$ARGUMENT" | cut -f2 -d=) - case "$KEY" in - node) NODE_ID=${VALUE} ;; - validator) NODE_ID=${VALUE} ;; - *) - esac -done - -main "${NODE_ID:-"all"}" diff --git a/utils/validation/Cargo.toml b/utils/validation/Cargo.toml new file mode 100644 index 0000000000..9e148318e7 --- /dev/null +++ b/utils/validation/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "casper-validation" +version = "0.1.0" +authors = ["Michał Papierski "] +edition = "2021" + +[dependencies] +anyhow = "1" +base16 = "0.2.1" +casper-types = { path = "../../types", features = ["testing", "std", "json-schema"] } +clap = { version = "3.0.0-rc.0", features = ["derive"] } +derive_more = "0.99.13" +hex = { version = "0.4.2", features = ["serde"] } +serde = "1" +serde_json = "1" +thiserror = "1.0.18" + +[[test]] +name = "validation_test" +harness = false + +[[bin]] +name = "casper-validation" +test = false +doctest = false + +[lib] +test = false +doctest = false diff --git a/utils/validation/README.md b/utils/validation/README.md new file mode 100644 index 0000000000..72c8aea45e --- /dev/null +++ b/utils/validation/README.md @@ -0,0 +1,28 @@ +validation +=============== + +Automation tool to validate the code based on fixtures. + +What is casper-validation? +-------------------------------------- + +This tool validates the code by loading fixtures JSON that contains an input, and the expected output by applying an operation. + +Usage +-------------------------------------- + +To generate new fixtures with a generator run: + +``` +cargo run -p casper-validation -- generate --output utils/validation/tests/fixtures +``` + +**Important note** + +Do not use this with day to day development - for example to fix an error in serialization code by replacing the fixture with possibly invalid code. + +To validate the implementation using all the fixtures: + +``` +cargo test -p casper-validation +``` diff --git a/utils/validation/src/abi.rs b/utils/validation/src/abi.rs new file mode 100644 index 0000000000..70d4770b07 --- /dev/null +++ b/utils/validation/src/abi.rs @@ -0,0 +1,164 @@ +use std::collections::BTreeMap; + +use serde::{Deserialize, Serialize}; + +use casper_types::{ + bytesrepr::{self, ToBytes}, + CLValue, Key, StoredValue, U512, +}; + +use crate::test_case::{Error, TestCase}; + +/// Representation of supported input value. +#[derive(Serialize, Deserialize, Debug, From)] +#[serde(tag = "type", content = "value")] +pub enum Input { + U8(u8), + U16(u16), + U32(u32), + U64(u64), + String(String), + Bool(bool), + U512(U512), + CLValue(CLValue), + Key(Key), + StoredValue(StoredValue), +} + +impl ToBytes for Input { + fn to_bytes(&self) -> Result, bytesrepr::Error> { + match self { + Input::U8(value) => value.to_bytes(), + Input::U16(value) => value.to_bytes(), + Input::U32(value) => value.to_bytes(), + Input::U64(value) => value.to_bytes(), + Input::String(value) => value.to_bytes(), + Input::Bool(value) => value.to_bytes(), + Input::U512(value) => value.to_bytes(), + Input::CLValue(value) => value.to_bytes(), + Input::Key(value) => value.to_bytes(), + Input::StoredValue(value) => value.to_bytes(), + } + } + + fn serialized_length(&self) -> usize { + match self { + Input::U8(value) => value.serialized_length(), + Input::U16(value) => value.serialized_length(), + Input::U32(value) => value.serialized_length(), + Input::U64(value) => value.serialized_length(), + Input::String(value) => value.serialized_length(), + Input::Bool(value) => value.serialized_length(), + Input::U512(value) => value.serialized_length(), + Input::CLValue(value) => value.serialized_length(), + Input::Key(value) => value.serialized_length(), + Input::StoredValue(value) => value.serialized_length(), + } + } +} + +/// Test case defines a list of inputs and an output. +#[derive(Serialize, Deserialize, Debug)] +pub struct ABITestCase { + input: Vec, + output: String, +} + +impl ABITestCase { + pub fn from_inputs(inputs: Vec) -> Result { + // This is manually going through each input passed as we can't use `ToBytes for Vec` as + // the `output` would be a serialized collection. + let mut truth = Vec::new(); + for input in &inputs { + // Input::to_bytes uses static dispatch to call into each raw value impl. + let mut generated_truth = input.to_bytes()?; + truth.append(&mut generated_truth); + } + + let input_values = inputs + .into_iter() + .map(serde_json::to_value) + .collect::, _>>()?; + + Ok(ABITestCase { + input: input_values, + output: hex::encode(truth), + }) + } + + pub fn input(&self) -> Result, Error> { + let mut res = Vec::new(); + for input_value in &self.input { + let input: Input = serde_json::from_value(input_value.clone())?; + res.push(input); + } + Ok(res) + } + + pub fn output(&self) -> Result, Error> { + let output = hex::decode(&self.output)?; + Ok(output) + } + + pub fn to_bytes(&self) -> Result, Error> { + let mut res = Vec::with_capacity(self.serialized_length()?); + + for input in self.input()? { + res.append(&mut input.to_bytes()?); + } + + Ok(res) + } + + pub fn serialized_length(&self) -> Result { + Ok(self.input()?.iter().map(ToBytes::serialized_length).sum()) + } +} + +impl TestCase for ABITestCase { + /// Compares input to output. + /// + /// This gets executed for each test case. + fn run_test(&self) -> Result<(), Error> { + let serialized_length = self.serialized_length()?; + let serialized_data = self.to_bytes()?; + + let output = self.output()?; + + // Serialized data should match the output + if serialized_data != output { + return Err(Error::DataMismatch { + actual: serialized_data, + expected: output.to_vec(), + }); + } + + // Output from serialized_length should match the output data length + if serialized_length != output.len() { + return Err(Error::LengthMismatch { + expected: serialized_length, + actual: output.len(), + }); + } + + Ok(()) + } +} + +/// A fixture consists of multiple test cases. +#[derive(Serialize, Deserialize, Debug, From)] +pub struct ABIFixture(BTreeMap); + +impl ABIFixture { + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn into_inner(self) -> BTreeMap { + self.0 + } +} diff --git a/utils/validation/src/error.rs b/utils/validation/src/error.rs new file mode 100644 index 0000000000..396c218490 --- /dev/null +++ b/utils/validation/src/error.rs @@ -0,0 +1,31 @@ +use std::{io, path::PathBuf}; + +use thiserror::Error; + +use casper_types::bytesrepr; + +use crate::test_case; + +#[derive(Error, Debug)] +pub enum Error { + #[error(transparent)] + Io(#[from] io::Error), + #[error(transparent)] + Deserialize(#[from] serde_json::Error), + #[error("missing file stem in: {0}")] + NoStem(PathBuf), + #[error("unsupported file format at {0}")] + UnsupportedFormat(PathBuf), + #[error("file {0} lacks extension")] + NoExtension(PathBuf), + #[error("{0}")] + Bytesrepr(bytesrepr::Error), + #[error(transparent)] + TestCase(#[from] test_case::Error), +} + +impl From for Error { + fn from(error: bytesrepr::Error) -> Self { + Error::Bytesrepr(error) + } +} diff --git a/utils/validation/src/generators.rs b/utils/validation/src/generators.rs new file mode 100644 index 0000000000..0c09bfb9ae --- /dev/null +++ b/utils/validation/src/generators.rs @@ -0,0 +1,495 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + iter::FromIterator, +}; + +use casper_types::{ + account::{ + Account, AccountHash, ActionThresholds as AccountActionThresholds, + AssociatedKeys as AccountAssociatedKeys, Weight as AccountWeight, + }, + addressable_entity::{ActionThresholds, AddressableEntity, AssociatedKeys, EntityKind}, + contracts::NamedKeys, + system::{ + auction::{ + Bid, BidAddr, BidKind, Delegator, DelegatorBid, DelegatorKind, EraInfo, + SeigniorageAllocation, UnbondingPurse, ValidatorBid, WithdrawPurse, + }, + mint::BalanceHoldAddr, + }, + AccessRights, BlockTime, ByteCode, ByteCodeHash, ByteCodeKind, CLType, CLTyped, CLValue, + ContractRuntimeTag, DeployHash, DeployInfo, EntityAddr, EntityEntryPoint, EntityVersionKey, + EntityVersions, EntryPointAccess, EntryPointPayment, EntryPointType, EntryPointValue, EraId, + Group, Groups, Key, Package, PackageHash, PackageStatus, Parameter, ProtocolVersion, PublicKey, + SecretKey, StoredValue, TransferAddr, TransferV1, URef, U512, +}; +use casper_validation::{ + abi::{ABIFixture, ABITestCase}, + error::Error, + Fixture, TestFixtures, +}; + +const DO_NOTHING_BYTES: &[u8] = b"\x00asm\x01\x00\x00\x00\x01\x04\x01`\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x07\x08\x01\x04call\x00\x00\n\x04\x01\x02\x00\x0b"; + +pub fn make_abi_test_fixtures() -> Result { + let basic_fixture = { + let mut basic = BTreeMap::new(); + basic.insert( + "SerializeU8".to_string(), + ABITestCase::from_inputs(vec![254u8.into()])?, + ); + basic.insert( + "SerializeU16".to_string(), + ABITestCase::from_inputs(vec![62356u16.into()])?, + ); + basic.insert( + "SerializeU32".to_string(), + ABITestCase::from_inputs(vec![3490072870u32.into()])?, + ); + basic.insert( + "SerializeU64".to_string(), + ABITestCase::from_inputs(vec![10829133186225377555u64.into()])?, + ); + basic.insert( + "SerializeEmptyString".to_string(), + ABITestCase::from_inputs(vec![String::new().into()])?, + ); + basic.insert( + "SerializeString".to_string(), + ABITestCase::from_inputs(vec!["Hello, world!".to_string().into()])?, + ); + basic.insert( + "SerializeBool".to_string(), + ABITestCase::from_inputs(vec![true.into(), false.into()])?, + ); + Fixture::ABI { + name: "basic".to_string(), + fixture: ABIFixture::from(basic), + } + }; + + let legacy_transfer = TransferV1::new( + DeployHash::from_raw([44; 32]), + AccountHash::new([100; 32]), + Some(AccountHash::new([101; 32])), + URef::new([10; 32], AccessRights::WRITE), + URef::new([11; 32], AccessRights::WRITE), + U512::from(15_000_000_000u64), + U512::from(2_500_000_000u64), + Some(1), + ); + let deploy_info = DeployInfo::new( + DeployHash::from_raw([55; 32]), + &[TransferAddr::new([1; 32]), TransferAddr::new([2; 32])], + AccountHash::new([100; 32]), + URef::new([10; 32], AccessRights::READ_ADD_WRITE), + U512::from(2_500_000_000u64), + ); + + let validator_secret_key = + SecretKey::ed25519_from_bytes([42; 32]).expect("should create secret key"); + let delegator_secret_key = + SecretKey::secp256k1_from_bytes([43; 32]).expect("should create secret key"); + + let era_info = { + let mut era_info = EraInfo::new(); + + era_info + .seigniorage_allocations_mut() + .push(SeigniorageAllocation::Validator { + validator_public_key: PublicKey::from(&validator_secret_key), + amount: U512::from(1_000_000_000), + }); + + era_info + .seigniorage_allocations_mut() + .push(SeigniorageAllocation::Delegator { + validator_public_key: PublicKey::from(&validator_secret_key), + delegator_public_key: PublicKey::from(&delegator_secret_key), + amount: U512::from(1_000_000_000), + }); + era_info + .seigniorage_allocations_mut() + .push(SeigniorageAllocation::DelegatorKind { + validator_public_key: PublicKey::from(&validator_secret_key), + delegator_kind: PublicKey::from(&delegator_secret_key).into(), + amount: U512::from(1_000_000_000), + }); + era_info + }; + + let validator_public_key = PublicKey::from(&validator_secret_key); + let validator_bid_key = + Key::BidAddr(BidAddr::new_from_public_keys(&validator_public_key, None)); + let validator_bid = ValidatorBid::locked( + validator_public_key.clone(), + URef::new([10; 32], AccessRights::READ_ADD_WRITE), + U512::from(50_000_000_000u64), + 100, + u64::MAX, + 0, + u64::MAX, + 0, + ); + let validator_bid_kind = BidKind::Validator(Box::new(validator_bid)); + let delegator_public_key = PublicKey::from(&delegator_secret_key); + let delegator_bid_key = Key::BidAddr(BidAddr::new_from_public_keys( + &validator_public_key, + Some(&delegator_public_key.clone()), + )); + let delegator = Delegator::locked( + delegator_public_key.clone(), + U512::from(1_000_000_000u64), + URef::new([11; 32], AccessRights::READ_ADD_WRITE), + validator_public_key.clone(), + u64::MAX, + ); + + let delegator_bid_kind = BidKind::Delegator(Box::new(DelegatorBid::locked( + DelegatorKind::PublicKey(delegator_public_key.clone()), + U512::from(1_000_000_000u64), + URef::new([11; 32], AccessRights::READ_ADD_WRITE), + validator_public_key.clone(), + u64::MAX, + ))); + + let _delegator_bid = DelegatorBid::locked( + delegator_public_key.clone().into(), + U512::from(1_000_000_000u64), + URef::new([11; 32], AccessRights::READ_ADD_WRITE), + validator_public_key.clone(), + u64::MAX, + ); + + let unified_bid_key = Key::BidAddr(BidAddr::legacy( + validator_public_key.to_account_hash().value(), + )); + let unified_bid = { + let mut unified_bid = Bid::locked( + validator_public_key.clone(), + URef::new([10; 32], AccessRights::READ_ADD_WRITE), + U512::from(50_000_000_000u64), + 100, + u64::MAX, + ); + unified_bid + .delegators_mut() + .insert(delegator.delegator_public_key().clone(), delegator.clone()); + unified_bid + }; + let unified_bid_kind = BidKind::Unified(Box::new(unified_bid)); + + let original_bid_key = Key::Bid(validator_public_key.to_account_hash()); + let original_bid = { + let mut bid = Bid::locked( + validator_public_key, + URef::new([10; 32], AccessRights::READ_ADD_WRITE), + U512::from(50_000_000_000u64), + 100, + u64::MAX, + ); + bid.delegators_mut() + .insert(delegator.delegator_public_key().clone(), delegator); + bid + }; + + let withdraw_purse_1 = WithdrawPurse::new( + URef::new([10; 32], AccessRights::READ), + PublicKey::from(&validator_secret_key), + PublicKey::from(&validator_secret_key), + EraId::new(41), + U512::from(60_000_000_000u64), + ); + let withdraw_purse_2 = WithdrawPurse::new( + URef::new([11; 32], AccessRights::READ), + PublicKey::from(&validator_secret_key), + PublicKey::from(&delegator_secret_key), + EraId::new(42), + U512::from(50_000_000_000u64), + ); + let unbonding_purse_1 = UnbondingPurse::new( + URef::new([10; 32], AccessRights::READ), + PublicKey::from(&validator_secret_key), + PublicKey::from(&validator_secret_key), + EraId::new(41), + U512::from(60_000_000_000u64), + None, + ); + let unbonding_purse_2 = UnbondingPurse::new( + URef::new([11; 32], AccessRights::READ), + PublicKey::from(&validator_secret_key), + PublicKey::from(&delegator_secret_key), + EraId::new(42), + U512::from(50_000_000_000u64), + None, + ); + + let keys_fixture = { + const ACCOUNT_KEY: Key = Key::Account(AccountHash::new([42; 32])); + const HASH_KEY: Key = Key::Hash([42; 32]); + const UREF_KEY: Key = Key::URef(URef::new([42; 32], AccessRights::READ)); + const TRANSFER_KEY: Key = Key::Transfer(TransferAddr::new([42; 32])); + const DEPLOY_INFO_KEY: Key = Key::DeployInfo(DeployHash::from_raw([42; 32])); + const ERA_INFO_KEY: Key = Key::EraInfo(EraId::new(42)); + const BALANCE_KEY: Key = Key::Balance([42; 32]); + const BALANCE_HOLD_KEY: Key = Key::BalanceHold(BalanceHoldAddr::Gas { + purse_addr: [42; 32], + block_time: BlockTime::new(0), + }); + const WITHDRAW_KEY: Key = Key::Withdraw(AccountHash::new([42; 32])); + const DICTIONARY_KEY: Key = Key::Dictionary([42; 32]); + const SYSTEM_ENTITY_REGISTRY_KEY: Key = Key::SystemEntityRegistry; + const ERA_SUMMARY_KEY: Key = Key::EraSummary; + const UNBOND_KEY: Key = Key::Unbond(AccountHash::new([42; 32])); + const CHAINSPEC_REGISTRY_KEY: Key = Key::ChainspecRegistry; + const CHECKSUM_REGISTRY_KEY: Key = Key::ChecksumRegistry; + + let mut keys = BTreeMap::new(); + keys.insert( + "Account".to_string(), + ABITestCase::from_inputs(vec![ACCOUNT_KEY.into()])?, + ); + keys.insert( + "Hash".to_string(), + ABITestCase::from_inputs(vec![HASH_KEY.into()])?, + ); + keys.insert( + "URef".to_string(), + ABITestCase::from_inputs(vec![UREF_KEY.into()])?, + ); + keys.insert( + "Transfer".to_string(), + ABITestCase::from_inputs(vec![TRANSFER_KEY.into()])?, + ); + keys.insert( + "DeployInfo".to_string(), + ABITestCase::from_inputs(vec![DEPLOY_INFO_KEY.into()])?, + ); + keys.insert( + "EraInfo".to_string(), + ABITestCase::from_inputs(vec![ERA_INFO_KEY.into()])?, + ); + keys.insert( + "Balance".to_string(), + ABITestCase::from_inputs(vec![BALANCE_KEY.into()])?, + ); + keys.insert( + "BalanceHold".to_string(), + ABITestCase::from_inputs(vec![BALANCE_HOLD_KEY.into()])?, + ); + keys.insert( + "WriteBid".to_string(), + ABITestCase::from_inputs(vec![original_bid_key.into()])?, + ); + keys.insert( + "WriteUnifiedBid".to_string(), + ABITestCase::from_inputs(vec![unified_bid_key.into()])?, + ); + keys.insert( + "WriteValidatorBid".to_string(), + ABITestCase::from_inputs(vec![validator_bid_key.into()])?, + ); + keys.insert( + "WriteDelegatorBid".to_string(), + ABITestCase::from_inputs(vec![delegator_bid_key.into()])?, + ); + + keys.insert( + "Withdraw".to_string(), + ABITestCase::from_inputs(vec![WITHDRAW_KEY.into()])?, + ); + keys.insert( + "Dictionary".to_string(), + ABITestCase::from_inputs(vec![DICTIONARY_KEY.into()])?, + ); + keys.insert( + "SystemEntityRegistry".to_string(), + ABITestCase::from_inputs(vec![SYSTEM_ENTITY_REGISTRY_KEY.into()])?, + ); + keys.insert( + "EraSummary".to_string(), + ABITestCase::from_inputs(vec![ERA_SUMMARY_KEY.into()])?, + ); + keys.insert( + "Unbond".to_string(), + ABITestCase::from_inputs(vec![UNBOND_KEY.into()])?, + ); + keys.insert( + "ChainspecRegistry".to_string(), + ABITestCase::from_inputs(vec![CHAINSPEC_REGISTRY_KEY.into()])?, + ); + keys.insert( + "ChecksumRegistry".to_string(), + ABITestCase::from_inputs(vec![CHECKSUM_REGISTRY_KEY.into()])?, + ); + Fixture::ABI { + name: "key".to_string(), + fixture: ABIFixture::from(keys), + } + }; + + let stored_value_fixture = { + let mut stored_value = BTreeMap::new(); + + let cl_value = CLValue::from_t("Hello, world!").expect("should create cl value"); + + stored_value.insert( + "CLValue".to_string(), + ABITestCase::from_inputs(vec![StoredValue::CLValue(cl_value).into()])?, + ); + + let account_secret_key = + SecretKey::ed25519_from_bytes([42; 32]).expect("should create secret key"); + let account_public_key = PublicKey::from(&account_secret_key); + let account_hash = account_public_key.to_account_hash(); + + let account_named_keys = { + let mut named_keys = NamedKeys::new(); + named_keys.insert("hash".to_string(), Key::Hash([42; 32])); + named_keys.insert( + "uref".to_string(), + Key::URef(URef::new([16; 32], AccessRights::READ_ADD_WRITE)), + ); + named_keys + }; + + let associated_keys = AccountAssociatedKeys::new(account_hash, AccountWeight::new(1)); + + let account = Account::new( + account_hash, + account_named_keys, + URef::new([17; 32], AccessRights::WRITE), + associated_keys, + AccountActionThresholds::new(AccountWeight::new(1), AccountWeight::new(1)).unwrap(), + ); + + stored_value.insert( + "Account".to_string(), + ABITestCase::from_inputs(vec![StoredValue::Account(account).into()])?, + ); + + let byte_code = ByteCode::new(ByteCodeKind::V1CasperWasm, DO_NOTHING_BYTES.to_vec()); + + stored_value.insert( + "ByteCode".to_string(), + ABITestCase::from_inputs(vec![StoredValue::ByteCode(byte_code).into()])?, + ); + + let public_contract_entry_point = EntityEntryPoint::new( + "public_entry_point_func", + vec![ + Parameter::new("param1", U512::cl_type()), + Parameter::new("param2", String::cl_type()), + ], + CLType::Unit, + EntryPointAccess::Public, + EntryPointType::Called, + EntryPointPayment::Caller, + ); + + stored_value.insert( + "EntryPoint".to_string(), + ABITestCase::from_inputs(vec![StoredValue::EntryPoint(EntryPointValue::V1CasperVm( + public_contract_entry_point, + )) + .into()])?, + ); + + let entity = AddressableEntity::new( + PackageHash::new([100; 32]), + ByteCodeHash::new([101; 32]), + ProtocolVersion::V1_0_0, + URef::default(), + AssociatedKeys::default(), + ActionThresholds::default(), + EntityKind::SmartContract(ContractRuntimeTag::VmCasperV1), + ); + stored_value.insert( + "AddressableEntity".to_string(), + ABITestCase::from_inputs(vec![StoredValue::AddressableEntity(entity).into()])?, + ); + + let mut active_versions = BTreeMap::new(); + let v1_hash = EntityAddr::SmartContract([99; 32]); + let v2_hash = EntityAddr::SmartContract([100; 32]); + active_versions.insert(EntityVersionKey::new(1, 2), v1_hash); + let v1 = EntityVersionKey::new(1, 1); + active_versions.insert(v1, v2_hash); + let active_versions = EntityVersions::from(active_versions); + + let mut disabled_versions = BTreeSet::new(); + disabled_versions.insert(v1); + + let mut groups = Groups::new(); + groups.insert(Group::new("Empty"), BTreeSet::new()); + groups.insert( + Group::new("Single"), + BTreeSet::from_iter(vec![URef::new([55; 32], AccessRights::READ)]), + ); + + let package = Package::new( + active_versions, + disabled_versions, + groups, + PackageStatus::Locked, + ); + + stored_value.insert( + "SmartContract".to_string(), + ABITestCase::from_inputs(vec![StoredValue::SmartContract(package).into()])?, + ); + + stored_value.insert( + "Transfer".to_string(), + ABITestCase::from_inputs(vec![StoredValue::Transfer(legacy_transfer).into()])?, + ); + stored_value.insert( + "DeployInfo".to_string(), + ABITestCase::from_inputs(vec![StoredValue::DeployInfo(deploy_info).into()])?, + ); + stored_value.insert( + "EraInfo".to_string(), + ABITestCase::from_inputs(vec![StoredValue::EraInfo(era_info).into()])?, + ); + + stored_value.insert( + "Bid".to_string(), + ABITestCase::from_inputs(vec![StoredValue::Bid(Box::new(original_bid)).into()])?, + ); + stored_value.insert( + "UnifiedBid".to_string(), + ABITestCase::from_inputs(vec![StoredValue::BidKind(unified_bid_kind).into()])?, + ); + stored_value.insert( + "ValidatorBid".to_string(), + ABITestCase::from_inputs(vec![StoredValue::BidKind(validator_bid_kind).into()])?, + ); + stored_value.insert( + "DelegatorBid".to_string(), + ABITestCase::from_inputs(vec![StoredValue::BidKind(delegator_bid_kind).into()])?, + ); + stored_value.insert( + "Withdraw".to_string(), + ABITestCase::from_inputs(vec![StoredValue::Withdraw(vec![ + withdraw_purse_1, + withdraw_purse_2, + ]) + .into()])?, + ); + stored_value.insert( + "Unbonding".to_string(), + ABITestCase::from_inputs(vec![StoredValue::Unbonding(vec![ + unbonding_purse_1, + unbonding_purse_2, + ]) + .into()])?, + ); + + Fixture::ABI { + name: "stored_value".to_string(), + fixture: ABIFixture::from(stored_value), + } + }; + + Ok(vec![basic_fixture, stored_value_fixture, keys_fixture]) +} diff --git a/utils/validation/src/lib.rs b/utils/validation/src/lib.rs new file mode 100644 index 0000000000..5770a8db5c --- /dev/null +++ b/utils/validation/src/lib.rs @@ -0,0 +1,98 @@ +//! This crate contains types that contain the logic necessary to validate Casper implementation +//! correctness using external test fixtures. +//! +//! Casper test fixtures can contain multiple directories at the root level, which corresponds to a +//! test category. For example structure of files found inside `ABI` can differ from files in other +//! directories. +//! +//! Currently supported test fixtures: +//! +//! * [ABI](abi) + +#[macro_use] +extern crate derive_more; + +pub mod abi; +pub mod error; +pub mod test_case; +pub mod utils; + +use std::{ + ffi::OsStr, + fs::{self, File}, + io::BufReader, + path::{Path, PathBuf}, +}; + +use serde::de::DeserializeOwned; + +use abi::ABIFixture; +use error::Error; + +pub const ABI_TEST_FIXTURES: &str = "ABI"; +const JSON_FILE_EXT: &str = "json"; + +#[derive(Debug)] +pub enum Fixture { + /// ABI fixture. + ABI { + /// Name of the test fixture (taken from a file name). + name: String, + /// ABI fixture itself. + fixture: ABIFixture, + }, +} + +/// Loads a generic test fixture from a file with a reader based on a file extension. +/// +/// Currently only JSON files are supported. +pub fn load_fixture(path: PathBuf) -> Result { + let file = File::open(&path)?; + let buffered_reader = BufReader::new(file); + + let fixture = match path.extension().and_then(OsStr::to_str) { + Some(extension) if extension.to_ascii_lowercase() == JSON_FILE_EXT => { + serde_json::from_reader(buffered_reader)? + } + Some(_) => return Err(Error::UnsupportedFormat(path)), + None => return Err(Error::NoExtension(path)), + }; + Ok(fixture) +} + +/// A series of fixtures. One element represents a single structured file. +pub type TestFixtures = Vec; + +/// Loads fixtures from a directory. +pub fn load_fixtures(path: &Path) -> Result { + let mut test_fixtures = TestFixtures::new(); + + for entry in fs::read_dir(path)? { + let entry = entry?; + + if !entry.metadata()?.is_dir() { + continue; + } + + let dir_entries = match entry.path().file_name() { + Some(file_name) if file_name == ABI_TEST_FIXTURES => { + utils::recursive_read_dir(&entry.path())? + } + None | Some(_) => continue, + }; + + for dir_entry in dir_entries { + let dir_entry_path = dir_entry.path(); + let fixture = load_fixture(dir_entry_path.clone())?; + let filename = dir_entry_path + .file_stem() + .and_then(OsStr::to_str) + .ok_or_else(|| Error::NoStem(dir_entry_path.clone()))?; + test_fixtures.push(Fixture::ABI { + name: filename.to_string(), + fixture, + }); + } + } + Ok(test_fixtures) +} diff --git a/utils/validation/src/main.rs b/utils/validation/src/main.rs new file mode 100644 index 0000000000..f2abdb7f65 --- /dev/null +++ b/utils/validation/src/main.rs @@ -0,0 +1,64 @@ +mod generators; + +use std::{fs::File, io::BufWriter, path::PathBuf}; + +use anyhow::Context; +use clap::Parser; + +use casper_validation::{Fixture, ABI_TEST_FIXTURES}; + +#[derive(Parser)] +#[clap(version = "1.0")] +struct Opts { + #[clap(subcommand)] + subcmd: SubCommand, +} + +#[derive(Parser)] +enum SubCommand { + Generate(Generate), +} + +/// Generates example test fixtures from the code. +/// +/// Do not use with day to day development - for example to fix an error in serialization code by +/// replacing the fixture with possibly invalid code. +#[derive(Parser)] +struct Generate { + /// Path to fixtures directory. + #[clap(short, long, parse(from_os_str))] + output: PathBuf, +} + +impl Generate { + fn run(self) -> anyhow::Result<()> { + let fixtures = generators::make_abi_test_fixtures()?; + + for Fixture::ABI { + name: file_name, + fixture, + } in fixtures + { + let output_path = { + let mut output_path = self.output.clone(); + output_path.push(ABI_TEST_FIXTURES); + output_path.push(file_name + ".json"); + output_path + }; + + let file = File::create(&output_path) + .context(format!("Unable to create output file {:?}", output_path))?; + let buffered_writer = BufWriter::new(file); + serde_json::to_writer_pretty(buffered_writer, &fixture)?; + } + + Ok(()) + } +} + +fn main() -> anyhow::Result<()> { + let opts: Opts = Opts::parse(); + match opts.subcmd { + SubCommand::Generate(generate) => generate.run(), + } +} diff --git a/utils/validation/src/test_case.rs b/utils/validation/src/test_case.rs new file mode 100644 index 0000000000..db53f3f964 --- /dev/null +++ b/utils/validation/src/test_case.rs @@ -0,0 +1,29 @@ +use casper_types::bytesrepr; +use hex::FromHexError; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Error { + #[error("{0}")] + Bytesrepr(bytesrepr::Error), + #[error("data mismatch expected {} != actual {}", base16::encode_lower(&.expected), base16::encode_lower(&.actual))] + DataMismatch { expected: Vec, actual: Vec }, + #[error("length mismatch expected {expected} != actual {actual}")] + LengthMismatch { expected: usize, actual: usize }, + #[error("expected JSON string in output field")] + WrongOutputType, + #[error("not a valid hex string")] + Hex(#[from] FromHexError), + #[error(transparent)] + Json(#[from] serde_json::Error), +} + +impl From for Error { + fn from(error: bytesrepr::Error) -> Self { + Error::Bytesrepr(error) + } +} + +pub trait TestCase { + fn run_test(&self) -> Result<(), Error>; +} diff --git a/utils/validation/src/utils.rs b/utils/validation/src/utils.rs new file mode 100644 index 0000000000..51884bd358 --- /dev/null +++ b/utils/validation/src/utils.rs @@ -0,0 +1,22 @@ +use std::{ + fs::{self, DirEntry}, + io, + path::Path, +}; + +/// Like [`fs::read_dir]` but recursive. +pub fn recursive_read_dir(dir: &Path) -> io::Result> { + let mut result = Vec::new(); + + for entry in fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + result.append(&mut recursive_read_dir(&path)?); + } else { + result.push(entry); + } + } + + Ok(result) +} diff --git a/utils/validation/tests/fixtures/ABI/basic.json b/utils/validation/tests/fixtures/ABI/basic.json new file mode 100644 index 0000000000..53eb27a103 --- /dev/null +++ b/utils/validation/tests/fixtures/ABI/basic.json @@ -0,0 +1,69 @@ +{ + "SerializeBool": { + "input": [ + { + "type": "Bool", + "value": true + }, + { + "type": "Bool", + "value": false + } + ], + "output": "0100" + }, + "SerializeEmptyString": { + "input": [ + { + "type": "String", + "value": "" + } + ], + "output": "00000000" + }, + "SerializeString": { + "input": [ + { + "type": "String", + "value": "Hello, world!" + } + ], + "output": "0d00000048656c6c6f2c20776f726c6421" + }, + "SerializeU16": { + "input": [ + { + "type": "U16", + "value": 62356 + } + ], + "output": "94f3" + }, + "SerializeU32": { + "input": [ + { + "type": "U32", + "value": 3490072870 + } + ], + "output": "264906d0" + }, + "SerializeU64": { + "input": [ + { + "type": "U64", + "value": 10829133186225377555 + } + ], + "output": "13915bf641cf4896" + }, + "SerializeU8": { + "input": [ + { + "type": "U8", + "value": 254 + } + ], + "output": "fe" + } +} \ No newline at end of file diff --git a/utils/validation/tests/fixtures/ABI/bignum.json b/utils/validation/tests/fixtures/ABI/bignum.json new file mode 100644 index 0000000000..25939e5213 --- /dev/null +++ b/utils/validation/tests/fixtures/ABI/bignum.json @@ -0,0 +1,47 @@ +{ + "U512_zero": { + "input": [ + [ + "U512", + "0" + ] + ], + "output": "00" + }, + "U512_one": { + "input": [ + [ + "U512", + "1" + ] + ], + "output": "0101" + }, + "U512_u32_max_value": { + "input": [ + [ + "U512", + "4294967294" + ] + ], + "output": "04feffffff" + }, + "U512_u64_max_value_plus_one": { + "input": [ + [ + "U512", + "18446744073709551616" + ] + ], + "output": "09000000000000000001" + }, + "U512_max": { + "input": [ + [ + "U512", + "13407807929942597099574024998205846127479365820592393377723561443721764030073546976801874298166903427690031858186486050853753882811946569946433649006084095" + ] + ], + "output": "40ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } +} \ No newline at end of file diff --git a/utils/validation/tests/fixtures/ABI/clvalue.json b/utils/validation/tests/fixtures/ABI/clvalue.json new file mode 100644 index 0000000000..5d81315216 --- /dev/null +++ b/utils/validation/tests/fixtures/ABI/clvalue.json @@ -0,0 +1,42 @@ +{ + "String": { + "input": [ + [ + "CLValue", + { + "cl_type": "String", + "bytes": "0d00000048656c6c6f2c20776f726c6421", + "parsed": "Hello, world!" + } + ] + ], + "output": "110000000d00000048656c6c6f2c20776f726c64210a" + }, + "Map": { + "input": [ + [ + "CLValue", + { + "cl_type": { + "Map": { + "key": "String", + "value": "U64" + } + }, + "bytes": "020000000300000061626301000000000000000300000078797a0200000000000000", + "parsed": [ + { + "key": "abc", + "value": 1 + }, + { + "key": "xyz", + "value": 2 + } + ] + } + ] + ], + "output": "22000000020000000300000061626301000000000000000300000078797a0200000000000000110a05" + } +} \ No newline at end of file diff --git a/utils/validation/tests/fixtures/ABI/collections.json b/utils/validation/tests/fixtures/ABI/collections.json new file mode 100644 index 0000000000..1a864164d4 --- /dev/null +++ b/utils/validation/tests/fixtures/ABI/collections.json @@ -0,0 +1,23 @@ +{ + "VectorOfStrings": { + "input": [ + [ + "U32", + 3 + ], + [ + "String", + "Hello" + ], + [ + "String", + "world" + ], + [ + "String", + "!" + ] + ], + "output": "030000000500000048656c6c6f05000000776f726c640100000021" + } +} \ No newline at end of file diff --git a/utils/validation/tests/fixtures/ABI/key.json b/utils/validation/tests/fixtures/ABI/key.json new file mode 100644 index 0000000000..4eb050c240 --- /dev/null +++ b/utils/validation/tests/fixtures/ABI/key.json @@ -0,0 +1,173 @@ +{ + "Account": { + "input": [ + { + "type": "Key", + "value": "account-hash-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "Balance": { + "input": [ + { + "type": "Key", + "value": "balance-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "062a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "BalanceHold": { + "input": [ + { + "type": "Key", + "value": "balance-hold-002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a0000000000000000" + } + ], + "output": "16002a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a0000000000000000" + }, + "ChainspecRegistry": { + "input": [ + { + "type": "Key", + "value": "chainspec-registry-0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "output": "0d0000000000000000000000000000000000000000000000000000000000000000" + }, + "ChecksumRegistry": { + "input": [ + { + "type": "Key", + "value": "checksum-registry-0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "output": "0e0000000000000000000000000000000000000000000000000000000000000000" + }, + "DeployInfo": { + "input": [ + { + "type": "Key", + "value": "deploy-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "042a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "Dictionary": { + "input": [ + { + "type": "Key", + "value": "dictionary-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "092a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "EraInfo": { + "input": [ + { + "type": "Key", + "value": "era-42" + } + ], + "output": "052a00000000000000" + }, + "EraSummary": { + "input": [ + { + "type": "Key", + "value": "era-summary-0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "output": "0b0000000000000000000000000000000000000000000000000000000000000000" + }, + "Hash": { + "input": [ + { + "type": "Key", + "value": "hash-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "012a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "SystemEntityRegistry": { + "input": [ + { + "type": "Key", + "value": "system-entity-registry-0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "output": "0a0000000000000000000000000000000000000000000000000000000000000000" + }, + "Transfer": { + "input": [ + { + "type": "Key", + "value": "transfer-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "032a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "URef": { + "input": [ + { + "type": "Key", + "value": "uref-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a-001" + } + ], + "output": "022a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a01" + }, + "Unbond": { + "input": [ + { + "type": "Key", + "value": "unbond-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "0c2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "Withdraw": { + "input": [ + { + "type": "Key", + "value": "withdraw-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + } + ], + "output": "082a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + "WriteBid": { + "input": [ + { + "type": "Key", + "value": "bid-306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1" + } + ], + "output": "07306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1" + }, + "WriteDelegatorBid": { + "input": [ + { + "type": "Key", + "value": "bid-addr-02306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1b27960a454670985e9072683f779602413d6ebf738ba0dc4200534c57de17e12" + } + ], + "output": "0f02306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1b27960a454670985e9072683f779602413d6ebf738ba0dc4200534c57de17e12" + }, + "WriteUnifiedBid": { + "input": [ + { + "type": "Key", + "value": "bid-addr-00306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1" + } + ], + "output": "0f00306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1" + }, + "WriteValidatorBid": { + "input": [ + { + "type": "Key", + "value": "bid-addr-01306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1" + } + ], + "output": "0f01306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1" + } +} \ No newline at end of file diff --git a/utils/validation/tests/fixtures/ABI/stored_value.json b/utils/validation/tests/fixtures/ABI/stored_value.json new file mode 100644 index 0000000000..17524b6875 --- /dev/null +++ b/utils/validation/tests/fixtures/ABI/stored_value.json @@ -0,0 +1,420 @@ +{ + "Account": { + "input": [ + { + "type": "StoredValue", + "value": { + "Account": { + "account_hash": "account-hash-306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1", + "named_keys": [ + { + "name": "hash", + "key": "hash-2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" + }, + { + "name": "uref", + "key": "uref-1010101010101010101010101010101010101010101010101010101010101010-007" + } + ], + "main_purse": "uref-1111111111111111111111111111111111111111111111111111111111111111-002", + "associated_keys": [ + { + "account_hash": "account-hash-306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1", + "weight": 1 + } + ], + "action_thresholds": { + "deployment": 1, + "key_management": 1 + } + } + } + } + ], + "output": "01306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1020000000400000068617368012a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a04000000757265660210101010101010101010101010101010101010101010101010101010101010100711111111111111111111111111111111111111111111111111111111111111110201000000306633f962155a7d46658adb36143f28668f530454fe788c927cecf62e5964a1010101" + }, + "AddressableEntity": { + "input": [ + { + "type": "StoredValue", + "value": { + "AddressableEntity": { + "protocol_version": "1.0.0", + "entity_kind": { + "SmartContract": "VmCasperV1" + }, + "package_hash": "package-6464646464646464646464646464646464646464646464646464646464646464", + "byte_code_hash": "byte-code-6565656565656565656565656565656565656565656565656565656565656565", + "main_purse": "uref-0000000000000000000000000000000000000000000000000000000000000000-000", + "associated_keys": [], + "action_thresholds": { + "deployment": 1, + "upgrade_management": 1, + "key_management": 1 + } + } + } + } + ], + "output": "0d64646464646464646464646464646464646464646464646464646464646464646565656565656565656565656565656565656565656565656565656565656565010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101010200" + }, + "Bid": { + "input": [ + { + "type": "StoredValue", + "value": { + "Bid": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bonding_purse": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "staked_amount": "50000000000", + "delegation_rate": 100, + "vesting_schedule": { + "initial_release_timestamp_millis": 18446744073709551615, + "locked_amounts": null + }, + "delegators": [ + { + "delegator_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "delegator": { + "delegator_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "staked_amount": "1000000000", + "bonding_purse": "uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-007", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "vesting_schedule": { + "initial_release_timestamp_millis": 18446744073709551615, + "locked_amounts": null + } + } + } + ], + "inactive": false + } + } + } + ], + "output": "0801197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070500743ba40b6401ffffffffffffffff00010000000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20400ca9a3b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0701197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101ffffffffffffffff0000" + }, + "ByteCode": { + "input": [ + { + "type": "StoredValue", + "value": { + "ByteCode": { + "kind": "V1CasperWasm", + "bytes": "0061736d010000000104016000000302010005030100010708010463616c6c00000a040102000b" + } + } + } + ], + "output": "0e01270000000061736d010000000104016000000302010005030100010708010463616c6c00000a040102000b" + }, + "CLValue": { + "input": [ + { + "type": "StoredValue", + "value": { + "CLValue": { + "cl_type": "String", + "bytes": "0d00000048656c6c6f2c20776f726c6421", + "parsed": "Hello, world!" + } + } + } + ], + "output": "00110000000d00000048656c6c6f2c20776f726c64210a" + }, + "DelegatorBid": { + "input": [ + { + "type": "StoredValue", + "value": { + "BidKind": { + "Delegator": { + "delegator_kind": { + "PublicKey": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2" + }, + "staked_amount": "1000000000", + "bonding_purse": "uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-007", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "vesting_schedule": { + "initial_release_timestamp_millis": 18446744073709551615, + "locked_amounts": null + } + } + } + } + } + ], + "output": "0b02000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20400ca9a3b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0701197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101ffffffffffffffff00" + }, + "DeployInfo": { + "input": [ + { + "type": "StoredValue", + "value": { + "DeployInfo": { + "deploy_hash": "3737373737373737373737373737373737373737373737373737373737373737", + "transfers": [ + "transfer-0101010101010101010101010101010101010101010101010101010101010101", + "transfer-0202020202020202020202020202020202020202020202020202020202020202" + ], + "from": "account-hash-6464646464646464646464646464646464646464646464646464646464646464", + "source": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "gas": "2500000000" + } + } + } + ], + "output": "063737373737373737373737373737373737373737373737373737373737373737020000000101010101010101010101010101010101010101010101010101010101010101020202020202020202020202020202020202020202020202020202020202020264646464646464646464646464646464646464646464646464646464646464640a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070400f90295" + }, + "EntryPoint": { + "input": [ + { + "type": "StoredValue", + "value": { + "EntryPoint": { + "V1CasperVm": { + "name": "public_entry_point_func", + "args": [ + { + "name": "param1", + "cl_type": "U512" + }, + { + "name": "param2", + "cl_type": "String" + } + ], + "ret": "Unit", + "access": "Public", + "entry_point_type": "Called", + "entry_point_payment": "Caller" + } + } + } + } + ], + "output": "1300170000007075626c69635f656e7472795f706f696e745f66756e630200000006000000706172616d310806000000706172616d320a09010100" + }, + "EraInfo": { + "input": [ + { + "type": "StoredValue", + "value": { + "EraInfo": { + "seigniorage_allocations": [ + { + "Validator": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "amount": "1000000000" + } + }, + { + "Delegator": { + "delegator_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "amount": "1000000000" + } + }, + { + "DelegatorKind": { + "delegator_kind": { + "PublicKey": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2" + }, + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "amount": "1000000000" + } + } + ] + } + } + } + ], + "output": "07030000000001197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610400ca9a3b010202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f201197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610400ca9a3b02000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f201197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610400ca9a3b" + }, + "SmartContract": { + "input": [ + { + "type": "StoredValue", + "value": { + "SmartContract": { + "versions": [ + { + "entity_version_key": { + "protocol_version_major": 1, + "entity_version": 1 + }, + "entity_addr": "entity-contract-6464646464646464646464646464646464646464646464646464646464646464" + }, + { + "entity_version_key": { + "protocol_version_major": 1, + "entity_version": 2 + }, + "entity_addr": "entity-contract-6363636363636363636363636363636363636363636363636363636363636363" + } + ], + "disabled_versions": [ + { + "protocol_version_major": 1, + "entity_version": 1 + } + ], + "groups": [ + { + "group_name": "Empty", + "group_users": [] + }, + { + "group_name": "Single", + "group_users": [ + "uref-3737373737373737373737373737373737373737373737373737373737373737-001" + ] + } + ], + "lock_status": "Locked" + } + } + } + ], + "output": "0c02000000010000000100000002646464646464646464646464646464646464646464646464646464646464646401000000020000000263636363636363636363636363636363636363636363636363636363636363630100000001000000010000000200000005000000456d707479000000000600000053696e676c650100000037373737373737373737373737373737373737373737373737373737373737370101" + }, + "Transfer": { + "input": [ + { + "type": "StoredValue", + "value": { + "Transfer": { + "deploy_hash": "2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c", + "from": "account-hash-6464646464646464646464646464646464646464646464646464646464646464", + "to": "account-hash-6565656565656565656565656565656565656565656565656565656565656565", + "source": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-002", + "target": "uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-002", + "amount": "15000000000", + "gas": "2500000000", + "id": 1 + } + } + } + ], + "output": "052c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c64646464646464646464646464646464646464646464646464646464646464640165656565656565656565656565656565656565656565656565656565656565650a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a020b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b020500d6117e030400f90295010100000000000000" + }, + "Unbonding": { + "input": [ + { + "type": "StoredValue", + "value": { + "Unbonding": [ + { + "bonding_purse": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-001", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "unbonder_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "era_of_creation": 41, + "amount": "60000000000", + "new_validator": null + }, + { + "bonding_purse": "uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-001", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "unbonder_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "era_of_creation": 42, + "amount": "50000000000", + "new_validator": null + } + ] + } + } + ], + "output": "0a020000000a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61290000000000000005005847f80d000b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f22a000000000000000500743ba40b00" + }, + "UnifiedBid": { + "input": [ + { + "type": "StoredValue", + "value": { + "BidKind": { + "Unified": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bonding_purse": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "staked_amount": "50000000000", + "delegation_rate": 100, + "vesting_schedule": { + "initial_release_timestamp_millis": 18446744073709551615, + "locked_amounts": null + }, + "delegators": [ + { + "delegator_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "delegator": { + "delegator_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "staked_amount": "1000000000", + "bonding_purse": "uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-007", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "vesting_schedule": { + "initial_release_timestamp_millis": 18446744073709551615, + "locked_amounts": null + } + } + } + ], + "inactive": false + } + } + } + } + ], + "output": "0b0001197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070500743ba40b6401ffffffffffffffff00010000000202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f20400ca9a3b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0701197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101ffffffffffffffff0000" + }, + "ValidatorBid": { + "input": [ + { + "type": "StoredValue", + "value": { + "BidKind": { + "Validator": { + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "bonding_purse": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-007", + "staked_amount": "50000000000", + "delegation_rate": 100, + "vesting_schedule": { + "initial_release_timestamp_millis": 18446744073709551615, + "locked_amounts": null + }, + "inactive": false, + "minimum_delegation_amount": 0, + "maximum_delegation_amount": 18446744073709551615, + "reserved_slots": 0 + } + } + } + } + ], + "output": "0b0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a070500743ba40b6401ffffffffffffffff00000000000000000000ffffffffffffffff00000000" + }, + "Withdraw": { + "input": [ + { + "type": "StoredValue", + "value": { + "Withdraw": [ + { + "bonding_purse": "uref-0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a-001", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "unbonder_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "era_of_creation": 41, + "amount": "60000000000" + }, + { + "bonding_purse": "uref-0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b-001", + "validator_public_key": "01197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61", + "unbonder_public_key": "0202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f2", + "era_of_creation": 42, + "amount": "50000000000" + } + ] + } + } + ], + "output": "09020000000a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d6101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d61290000000000000005005847f80d0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0101197f6b23e16c8532c6abc838facd5ea789be0c76b2920334039bfa8b3d368d610202bb58b5feca505c74edc000d8282fc556e51a1024fc8e7d7e56c6f887c5c8d5f22a000000000000000500743ba40b" + } +} \ No newline at end of file diff --git a/utils/validation/tests/validation_test.rs b/utils/validation/tests/validation_test.rs new file mode 100644 index 0000000000..3cb8504461 --- /dev/null +++ b/utils/validation/tests/validation_test.rs @@ -0,0 +1,82 @@ +use std::{ + env, + path::{Path, PathBuf}, +}; + +use anyhow::bail; + +use casper_validation::{abi::ABIFixture, error::Error, test_case::TestCase, Fixture}; + +type TestPair = (String, Box); + +fn get_fixtures_path() -> PathBuf { + let mut path = Path::new(env!("CARGO_MANIFEST_DIR")).to_path_buf(); + path.push("tests"); + path.push("fixtures"); + path +} + +fn prog() -> Option { + let first_arg = env::args().next()?; + let path = Path::new(&first_arg); + let filename = path.file_name()?.to_str()?; + let prog_name = match filename.split('-').next() { + Some(name) => name, + None => filename, + }; + Some(prog_name.to_string()) +} + +fn make_abi_tests(test_name: &str, test_fixture: ABIFixture) -> Vec { + let prog_name = prog().expect("should get exe"); + + let mut tests = Vec::with_capacity(test_fixture.len()); + + for (test_case, data) in test_fixture.into_inner() { + // validation_test::fixture_file_name::test_case + let desc = format!("{}::{}::{}", prog_name, test_name, test_case); + + tests.push((desc, Box::new(data) as Box)); + } + + tests +} + +fn make_test_cases() -> Result, Error> { + let fixtures = get_fixtures_path(); + let test_fixtures = casper_validation::load_fixtures(&fixtures)?; + + let mut tests = Vec::new(); + + for test_fixture in test_fixtures { + match test_fixture { + Fixture::ABI { + name, + fixture: abi_test_case, + } => tests.append(&mut make_abi_tests(&name, abi_test_case)), + } + } + + Ok(tests) +} + +fn main() -> anyhow::Result<()> { + let mut failed_tests = Vec::new(); + + for (name, test_case) in make_test_cases()? { + print!("{}... ", name); + match test_case.run_test() { + Ok(()) => println!("OK"), + Err(error) => { + println!("ERROR: {}", error); + failed_tests.push(name); + } + } + } + + if !failed_tests.is_empty() { + bail!("List of failed tests: {:?}", failed_tests); + } + + Ok(()) +} diff --git a/vm2-build-contracts.sh b/vm2-build-contracts.sh new file mode 100755 index 0000000000..a060c47830 --- /dev/null +++ b/vm2-build-contracts.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -e + +VM2_BINS=( + "vm2-harness" + "vm2-cep18-caller" +) + +VM2_LIBS=( + "vm2-trait" + "vm2-cep18" + "vm2-flipper" + "vm2-upgradable" + "vm2-upgradable-v2" + "vm2-legacy-counter-proxy" + "vm2-host" +) + + +for contract in "${VM2_LIBS[@]}" +do + pushd smart_contracts/contracts/vm2/$contract/ + pwd + cargo build --target wasm32-unknown-unknown -p $contract --release + popd +done + +for contract in "${VM2_BINS[@]}" +do + pushd smart_contracts/contracts/vm2/$contract/ + pwd + cargo build --target wasm32-unknown-unknown -p $contract --release + popd +done + +echo "Stripping linked wasm" +for wasm in executor/wasm/*.wasm; do + echo "Stripping $wasm" + wasm-strip $wasm +done diff --git a/vm2_cargo_casper/Cargo.toml b/vm2_cargo_casper/Cargo.toml new file mode 100644 index 0000000000..19d5059a98 --- /dev/null +++ b/vm2_cargo_casper/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "vm2-cargo-casper" +version = "0.1.0" +edition = "2021" + +[dependencies] +casper-contract-sdk-sys = { path = "../smart_contracts/sdk_sys" } +casper-contract-sdk = { path = "../smart_contracts/sdk", features = ["__abi_generator"] } +clap = { version = "4.4.11", features = ["derive"] } +clap-cargo = { version = "0.14.0", features = ["cargo_metadata"] } +libloading = "0.8.6" +include_dir = "0.7.4" +anyhow = "1.0.86" +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0.140" } +cargo_metadata = "0.19.2" +wabt = "0.10.0" +once_cell = "1.21.3" +crossterm = "0.29.0" +thiserror = "2.0.12" +atty = "0.2.14" diff --git a/vm2_cargo_casper/build.rs b/vm2_cargo_casper/build.rs new file mode 100644 index 0000000000..e1447f553c --- /dev/null +++ b/vm2_cargo_casper/build.rs @@ -0,0 +1,12 @@ +use std::env; + +fn main() { + match env::var("TARGET") { + Ok(target) => { + println!("cargo:rustc-env=TARGET={}", target); + } + Err(_) => { + println!("cargo:warning=Failed to obtain target triple"); + } + } +} diff --git a/vm2_cargo_casper/project_template/Cargo.toml b/vm2_cargo_casper/project_template/Cargo.toml new file mode 100644 index 0000000000..7de8efd575 --- /dev/null +++ b/vm2_cargo_casper/project_template/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "project-template" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "rlib"] + +[dependencies] +casper-contract-macros = "0.1.0" +casper-contract-sdk = "0.1.0" \ No newline at end of file diff --git a/vm2_cargo_casper/project_template/src/lib.rs b/vm2_cargo_casper/project_template/src/lib.rs new file mode 100644 index 0000000000..4eeae37dce --- /dev/null +++ b/vm2_cargo_casper/project_template/src/lib.rs @@ -0,0 +1,51 @@ +#![cfg_attr(target_arch = "wasm32", no_main)] +#![cfg_attr(target_arch = "wasm32", no_std)] + +use casper_contract_sdk::prelude::*; + +#[casper(contract_state)] +pub struct Contract { + counter: u64, +} + +impl Default for Contract { + fn default() -> Self { + panic!("Unable to instantiate contract without a constructor!"); + } +} + +#[casper] +impl Contract { + #[casper(constructor)] + pub fn new() -> Self { + Self { counter: 0 } + } + + #[casper(constructor)] + pub fn default() -> Self { + Self::new() + } + + pub fn increase(&mut self) { + self.counter += 1; + } + + pub fn get(&self) -> u64 { + self.counter + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_counter() { + let mut counter = Contract::new(); + assert_eq!(counter.get(), 0); + counter.increase(); + assert_eq!(counter.get(), 1); + counter.increase(); + assert_eq!(counter.get(), 2); + } +} diff --git a/vm2_cargo_casper/src/cli.rs b/vm2_cargo_casper/src/cli.rs new file mode 100644 index 0000000000..9a1da09c57 --- /dev/null +++ b/vm2_cargo_casper/src/cli.rs @@ -0,0 +1,72 @@ +use std::{ + io, + path::{Path, PathBuf}, +}; + +use clap::Subcommand; +use include_dir::{Dir, DirEntry}; + +pub mod build; +pub mod build_schema; +pub mod new; + +/// Writes the binary-embedded directory into a filesystem directory. +/// Returns the path to the extracted dir. +pub(crate) fn extract_embedded_dir(target: &Path, dir: &Dir) -> io::Result { + // Ensure the target directory exists. + std::fs::create_dir_all(target)?; + + // Iterate over each entry in the directory. + for entry in dir.entries() { + match entry { + DirEntry::File(file) => { + let file_path = target.join(file.path()); + if let Some(parent) = file_path.parent() { + std::fs::create_dir_all(parent)?; + } + std::fs::write(file_path, file.contents())?; + } + DirEntry::Dir(sub_dir) => { + extract_embedded_dir(target, sub_dir)?; + } + } + } + + Ok(target.into()) +} + +#[derive(Debug, Subcommand)] +pub(crate) enum Command { + /// Build the JSON schema of the contract. + BuildSchema { + /// Where should the build artifacts be saved? + #[arg(short, long)] + output: Option, + /// The cargo workspace + #[command(flatten)] + workspace: clap_cargo::Workspace, + }, + /// Build the contract with its JSON schema embedded. + Build { + /// Where should the build artifacts be saved? + #[arg(short, long)] + output: Option, + /// Should the schema be embedded and exposed in the contract? (Default: true) + #[arg(short, long)] + embed_schema: Option, + /// The cargo workspace + #[command(flatten)] + workspace: clap_cargo::Workspace, + }, + /// Creates a new VM2 smart contract project from a template. + New { + /// Name of the project to create + name: String, + }, +} + +#[derive(Debug, clap::Parser)] +pub(crate) struct Cli { + #[command(subcommand)] + pub command: Command, +} diff --git a/vm2_cargo_casper/src/cli/build.rs b/vm2_cargo_casper/src/cli/build.rs new file mode 100644 index 0000000000..bca029a1d4 --- /dev/null +++ b/vm2_cargo_casper/src/cli/build.rs @@ -0,0 +1,96 @@ +use std::{io::Cursor, path::PathBuf, process::Command}; + +use anyhow::Context; + +use crate::compilation::CompileJob; + +/// The `build` subcommand flow. +pub fn build_impl( + package_name: Option<&str>, + output_dir: Option, + embed_schema: bool, +) -> Result<(), anyhow::Error> { + // Build the contract package targetting wasm32-unknown-unknown without + // extra feature flags - this is the production contract wasm file. + // + // Optionally (but by default) create an entrypoint in the wasm that will have + // embedded schema JSON file for discoverability (aka internal schema). + let production_wasm_path = if embed_schema { + // Build the schema first + let mut buffer = Cursor::new(Vec::new()); + super::build_schema::build_schema_impl(package_name, &mut buffer) + .context("Failed to build contract schema")?; + + let contract_schema = + String::from_utf8(buffer.into_inner()).context("Failed to read contract schema")?; + + // Build the contract with above schema injected + eprintln!("🔨 Step 2: Building contract with schema injected..."); + let production_wasm_path = CompileJob::new( + package_name, + None, + vec![("__CARGO_CASPER_INJECT_SCHEMA_MARKER", &contract_schema)], + ) + .dispatch( + "wasm32-unknown-unknown", + ["casper-contract-sdk/__embed_schema"], + ) + .context("Failed to compile user wasm")? + .get_artifact_by_extension("wasm") + .context("Build artifacts for contract wasm didn't include a wasm file")?; + + // Write the schema next to the wasm + let schema_file_path = production_wasm_path.with_extension("json"); + + std::fs::create_dir_all(schema_file_path.parent().unwrap()) + .context("Failed creating directory for wasm schema")?; + + std::fs::write(&schema_file_path, contract_schema) + .context("Failed writing contract schema")?; + + production_wasm_path + } else { + // Compile and move to specified output directory + eprintln!("🔨 Step 2: Building contract..."); + CompileJob::new(package_name, None, vec![]) + .dispatch("wasm32-unknown-unknown", Option::::None) + .context("Failed to compile user wasm")? + .get_artifact_by_extension("wasm") + .context("Failed extracting build artifacts to directory")? + }; + + // Run wasm optimizations passes that will shrink the size of the wasm. + eprintln!("🔨 Step 3: Applying optimizations..."); + Command::new("wasm-strip") + .args([&production_wasm_path]) + .spawn() + .context("Failed to execute wasm-strip command. Is wabt installed?")?; + + // Move to output_dir if specified + let mut out_wasm_path = production_wasm_path.clone(); + let mut out_schema_path = None; + + if let Some(output_dir) = output_dir { + out_wasm_path = output_dir + .join(out_wasm_path.file_stem().unwrap()) + .with_extension("wasm"); + std::fs::rename(&production_wasm_path, &out_wasm_path) + .context("Couldn't write to the specified output directory.")?; + } + + if embed_schema { + out_schema_path = Some(out_wasm_path.with_extension("json")); + let production_schema_path = production_wasm_path.with_extension("json"); + std::fs::rename(&production_schema_path, out_schema_path.as_ref().unwrap()) + .context("Couldn't write to the specified output directory.")?; + } + + // Report paths + eprintln!("✅ Completed. Build artifacts:"); + eprintln!("{:?}", out_wasm_path.canonicalize()?); + if let Some(schema_path) = out_schema_path { + eprintln!("{:?}", schema_path.canonicalize()?); + } + + Ok(()) +} diff --git a/vm2_cargo_casper/src/cli/build_schema.rs b/vm2_cargo_casper/src/cli/build_schema.rs new file mode 100644 index 0000000000..40d84a48c4 --- /dev/null +++ b/vm2_cargo_casper/src/cli/build_schema.rs @@ -0,0 +1,87 @@ +mod artifact; + +use std::{env::consts::DLL_EXTENSION, ffi::OsStr, io::Write, path::PathBuf}; + +use anyhow::Context; +use artifact::Artifact; +use cargo_metadata::MetadataCommand; + +use crate::compilation::CompileJob; + +/// The `build-schema` subcommand flow. The schema is written to the specified +/// [`Write`] implementer. +pub fn build_schema_impl( + package_name: Option<&str>, + output_writer: &mut W, +) -> Result<(), anyhow::Error> { + // Compile contract package to a native library with extra code that will + // produce ABI information including entrypoints, types, etc. + eprintln!("🔨 Step 1: Building contract schema..."); + + let rustflags = { + let current = std::env::var("RUSTFLAGS").unwrap_or_default(); + format!("-C link-dead-code {current}") + }; + + let compilation = CompileJob::new(package_name, None, vec![("RUSTFLAGS", &rustflags)]); + + // Get all of the direct user contract dependencies. + // + // This is a naive approach -- if a dep is feature gated, it won't be resolved correctly. + // In practice, we only care about casper-contract-sdk and casper-macros being used, and there + // is little to no reason to feature gate them. So this approach should be good enough. + let dependencies: Vec = { + let metadata = MetadataCommand::new().exec()?; + + // Find the root package (the one whose manifest path matches our Cargo.toml) + let package = match package_name { + Some(package_name) => metadata + .packages + .iter() + .find(|p| p.name == package_name) + .context("Root package not found in metadata")?, + None => { + let manifest_path_target = PathBuf::from("./Cargo.toml").canonicalize()?; + metadata + .packages + .iter() + .find(|p| p.manifest_path.canonicalize().unwrap() == manifest_path_target) + .context("Root package not found in metadata")? + } + }; + + // Extract the direct dependency names from the package. + package + .dependencies + .iter() + .map(|dep| dep.name.clone()) + .collect() + }; + + // Determine extra features based on the dependencies detected + let mut features = Vec::new(); + + if dependencies.contains(&"casper-contract-sdk".into()) { + features.push("casper-contract-sdk/__abi_generator".to_owned()); + } + + if dependencies.contains(&"casper-macros".into()) { + features.push("casper-macros/__abi_generator".to_owned()); + } + + let build_result = compilation + .dispatch(env!("TARGET"), &features) + .context("ABI-rich wasm compilation failure")?; + + // Extract ABI information from the built contract + let artifact_path = build_result + .artifacts() + .iter() + .find(|x| x.extension() == Some(OsStr::new(DLL_EXTENSION))) + .context("Failed loading the built contract")?; + + let artifact = Artifact::from_path(artifact_path).context("Load library")?; + let collected = artifact.collect_schema().context("Collect schema")?; + serde_json::to_writer(output_writer, &collected).context("Serialize collected schema")?; + Ok(()) +} diff --git a/vm2_cargo_casper/src/cli/build_schema/artifact.rs b/vm2_cargo_casper/src/cli/build_schema/artifact.rs new file mode 100644 index 0000000000..f7563cc84d --- /dev/null +++ b/vm2_cargo_casper/src/cli/build_schema/artifact.rs @@ -0,0 +1,40 @@ +use std::{mem::MaybeUninit, path::Path}; + +use libloading::{Library, Symbol}; + +const COLLECT_SCHEMA_FUNC: &str = "__cargo_casper_collect_schema"; + +type CollectSchema = unsafe extern "C" fn(size_ptr: *mut u64) -> *mut u8; + +pub(crate) struct Artifact { + library: Library, +} + +impl Artifact { + pub(crate) fn from_path>( + artifact_path: P, + ) -> Result { + let library = unsafe { libloading::Library::new(artifact_path.as_ref()) }?; + + Ok(Self { library }) + } + + /// Collects schema from the built artifact. + /// + /// This returns a [`serde_json::Value`] to skip validation of a `Schema` object structure which + /// (in theory) can differ. + pub(crate) fn collect_schema(&self) -> serde_json::Result { + let collect_schema: Symbol = + unsafe { self.library.get(COLLECT_SCHEMA_FUNC.as_bytes()).unwrap() }; + + let json_bytes = { + let mut value = MaybeUninit::uninit(); + let leaked_json_bytes = unsafe { collect_schema(value.as_mut_ptr()) }; + let size = unsafe { value.assume_init() }; + let length: usize = size.try_into().unwrap(); + unsafe { Vec::from_raw_parts(leaked_json_bytes, length, length) } + }; + + serde_json::from_slice(&json_bytes) + } +} diff --git a/vm2_cargo_casper/src/cli/new.rs b/vm2_cargo_casper/src/cli/new.rs new file mode 100644 index 0000000000..cfe7a67259 --- /dev/null +++ b/vm2_cargo_casper/src/cli/new.rs @@ -0,0 +1,31 @@ +use std::path::PathBuf; + +use anyhow::Context; + +use include_dir::{include_dir, Dir}; + +static TEMPLATE_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/project_template"); +const TEMPLATE_NAME_MARKER: &str = "project_template"; + +/// The `new` subcommand flow. +pub fn new_impl(name: &str) -> Result<(), anyhow::Error> { + let name = name + .trim() + .to_lowercase() + .split_whitespace() + .collect::>() + .join("-"); + + let template_dir = super::extract_embedded_dir(&PathBuf::from(&name), &TEMPLATE_DIR) + .context("Failed extracting template directory")?; + + let toml_path = template_dir.join("Cargo.toml"); + + let toml_content = std::fs::read_to_string(&toml_path) + .context("Failed reading template Cargo.toml file")? + .replace(TEMPLATE_NAME_MARKER, &name); + + std::fs::write(toml_path, toml_content).context("Failed updating template Cargo.toml file")?; + + Ok(()) +} diff --git a/vm2_cargo_casper/src/compilation.rs b/vm2_cargo_casper/src/compilation.rs new file mode 100644 index 0000000000..a1739b5488 --- /dev/null +++ b/vm2_cargo_casper/src/compilation.rs @@ -0,0 +1,159 @@ +use std::{ + ffi::OsStr, + path::PathBuf, + process::{Command, Stdio}, +}; + +use anyhow::{anyhow, Result}; + +use crate::utils::command_runner::{self, DEFAULT_MAX_LINES}; + +/// Represents a job to compile a Cargo project. +pub(crate) struct CompileJob<'a> { + package_name: Option<&'a str>, + features: Vec, + env_vars: Vec<(&'a str, &'a str)>, + in_dir: Option, +} + +impl<'a> CompileJob<'a> { + /// Creates a new compile job with the given manifest path, optional features, + /// and environmental variables. + pub fn new( + package_name: Option<&'a str>, + features: Option>, + env_vars: Vec<(&'a str, &'a str)>, + ) -> Self { + Self { + package_name, + features: features.unwrap_or_default(), + env_vars, + in_dir: None, + } + } + + /// Dispatches the compilation job. This builds the Cargo project into a temporary target + /// directory. + pub fn dispatch(&self, target: T, extra_features: I) -> Result + where + T: Into, + I: IntoIterator, + S: Into, + { + let target: String = target.into(); + + // Merge the configured features with any extra features + let mut features = self.features.clone(); + features.extend(extra_features.into_iter().map(Into::into)); + let features_str = features.join(","); + + let mut build_args = vec!["build"]; + + if let Some(package_name) = self.package_name { + build_args.push("-p"); + build_args.push(package_name); + } + + build_args.extend_from_slice(&[ + "--target", + target.as_str(), + "--features", + &features_str, + "--lib", + "--release", + "--color=always", + "--message-format=json-diagnostic-rendered-ansi", + ]); + + // Run the cargo build command and capture the output + let mut command = Command::new("cargo"); + command.args(&build_args); + command.stdout(Stdio::piped()); + command.stderr(Stdio::piped()); + for (key, value) in &self.env_vars { + command.env(key, value); + } + + if let Some(in_directory) = &self.in_dir { + command.current_dir(in_directory); + } + + // Run the process and capture the output from both stdout and stderr. + let handle = command_runner::run_process(&mut command)?; + + let mut log_trail = command_runner::LogTrailBuilder::new() + .max_lines(DEFAULT_MAX_LINES) + .interactive(command_runner::Interactive::Auto) + .build(); + let mut artifacts = Vec::new(); + for line in &handle.receiver { + match line { + command_runner::Line::Stdout(line) => { + match serde_json::from_str::(&line.to_string()) + .expect("Parse") + { + cargo_metadata::Message::CompilerArtifact(artifact) => { + for artifact in &artifact.filenames { + let path = PathBuf::from(artifact); + if path + .parent() + .and_then(|p| p.file_name()) + .and_then(OsStr::to_str) + != Some("deps") + { + artifacts.push(PathBuf::from(artifact)); + } + } + } + cargo_metadata::Message::CompilerMessage(compiler_message) => { + log_trail.push_line(compiler_message.to_string())?; + } + cargo_metadata::Message::BuildScriptExecuted(_build_script) => {} + cargo_metadata::Message::BuildFinished(_build_finished) => {} + cargo_metadata::Message::TextLine(text) => log_trail.push_line(text)?, + _ => todo!(), + } + } + command_runner::Line::Stderr(line) => { + log_trail.push_line(line)?; + } + } + } + + match handle.wait() { + Ok(()) => { + // Process completed successfully. + } + Err(command_runner::Outcome::Io(error)) => { + return Err(anyhow!("Cargo build failed with error code: {error}")); + } + Err(command_runner::Outcome::ErrorCode(code)) => { + return Err(anyhow!("Cargo build failed with error code: {code}")); + } + Err(command_runner::Outcome::Signal(signal)) => { + return Err(anyhow!("Cargo build was terminated by signal: {signal}")); + } + } + + Ok(CompilationResults { artifacts }) + } +} + +/// Results of a compilation job. +pub(crate) struct CompilationResults { + artifacts: Vec, +} + +impl CompilationResults { + /// Returns a slice of paths to the build artifacts. + pub fn artifacts(&self) -> &[PathBuf] { + &self.artifacts + } + + pub fn get_artifact_by_extension(&self, extension: &str) -> Option { + self.artifacts() + .iter() + .find(|x| x.extension().and_then(|y| y.to_str()) == Some(extension)) + .map(|x| x.into()) + } +} diff --git a/vm2_cargo_casper/src/main.rs b/vm2_cargo_casper/src/main.rs new file mode 100644 index 0000000000..cd740b8ef2 --- /dev/null +++ b/vm2_cargo_casper/src/main.rs @@ -0,0 +1,39 @@ +use std::{fs::File, io::Write}; + +use clap::Parser; +use cli::{Cli, Command}; + +pub(crate) mod cli; +pub(crate) mod compilation; +pub mod utils; + +fn main() -> anyhow::Result<()> { + let cli = Cli::parse(); + match cli.command { + Command::BuildSchema { output, workspace } => { + // If user specified an output path, write there. + // Otherwise print to standard output. + let mut schema_writer: Box = match output { + Some(path) => Box::new(File::create(path)?), + None => Box::new(std::io::stdout()), + }; + + // Select the package to build + let package_name = workspace.package.first().map(|x| x.as_str()); + + cli::build_schema::build_schema_impl(package_name, &mut schema_writer)? + } + Command::Build { + output, + embed_schema, + workspace, + } => { + // Select the package to build + let package_name = workspace.package.first().map(|x| x.as_str()); + + cli::build::build_impl(package_name, output, embed_schema.unwrap_or(true))? + } + Command::New { name } => cli::new::new_impl(&name)?, + } + Ok(()) +} diff --git a/vm2_cargo_casper/src/utils.rs b/vm2_cargo_casper/src/utils.rs new file mode 100644 index 0000000000..99eaeb9197 --- /dev/null +++ b/vm2_cargo_casper/src/utils.rs @@ -0,0 +1 @@ +pub mod command_runner; diff --git a/vm2_cargo_casper/src/utils/command_runner.rs b/vm2_cargo_casper/src/utils/command_runner.rs new file mode 100644 index 0000000000..def31057f8 --- /dev/null +++ b/vm2_cargo_casper/src/utils/command_runner.rs @@ -0,0 +1,314 @@ +use std::{ + collections::VecDeque, + fmt::{Display, Formatter}, + io::{self, BufRead, BufReader, Write}, + os::unix::process::ExitStatusExt, + process::{Command, Stdio}, + sync::mpsc, + thread, +}; + +use atty::Stream; +use crossterm::{cursor, style, terminal, QueueableCommand}; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Outcome { + #[error("Input/Output error: {0}")] + Io(#[from] io::Error), + #[error("Subprocess exited with error code: {0}")] + ErrorCode(i32), + #[error("Subprocess terminated by signal: {0}")] + Signal(i32), +} + +#[derive(Debug)] +pub enum Line { + Stdout(String), + Stderr(String), +} + +impl Display for Line { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Line::Stdout(text) => write!(f, "{}", text), + Line::Stderr(text) => write!(f, "{}", text), + } + } +} + +/// Maximum number of lines to keep in the rolling log. +pub const DEFAULT_MAX_LINES: usize = 10; + +#[derive(Debug)] +pub struct ProcessHandle { + pub receiver: mpsc::Receiver, + pub stdout_thread_handle: thread::JoinHandle<()>, + pub stderr_thread_handle: thread::JoinHandle<()>, + pub child: std::process::Child, +} + +impl ProcessHandle { + pub fn wait(mut self) -> Result<(), Outcome> { + // Ensure the reader threads have completed. + self.stdout_thread_handle + .join() + .expect("Stdout thread panicked"); + self.stderr_thread_handle + .join() + .expect("Stderr thread panicked"); + + // Wait for the subprocess to finish. + let exit_status = self.child.wait().expect("Failed to wait on child process"); + + match exit_status.code() { + Some(code) => { + if code == 0 { + // Subprocess completed successfully. + Ok(()) + } else { + // Subprocess exited with error code. + Err(Outcome::ErrorCode(code)) + } + } + None => { + // Subprocess terminated by signal. + if let Some(signal) = exit_status.signal() { + // Subprocess terminated by signal + Err(Outcome::Signal(signal)) + } else { + unreachable!("Unexpected exit status: {:?}", exit_status); + } + } + } + } +} + +/// Runs a subprocess and captures its output. +/// +/// Returns a `ProcessHandle` that can be used to read the output and wait for the process to +/// finish. +/// +/// Lines captured are available in a `receiver` attribute and can be piped to a `LogTrail` +/// instance. +pub fn run_process(command: &mut Command) -> io::Result { + // Spawn the subprocess with stdout and stderr piped. + let mut child = command + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn()?; + + // Take the stdout and stderr handles. + let stdout_pipe = child.stdout.take().expect("Failed to capture stdout"); + let stderr_pipe = child.stderr.take().expect("Failed to capture stderr"); + + // Create a channel to receive lines from both stdout and stderr. + let (tx, rx) = mpsc::channel(); + + // Spawn a thread to read stdout. + let stdout_thread = thread::spawn({ + let tx = tx.clone(); + + move || { + let reader = BufReader::new(stdout_pipe); + for line in reader.lines() { + if let Ok(line_text) = line { + // If send fails, the main thread is likely gone. + if tx.send(Line::Stdout(line_text)).is_err() { + break; + } + } else { + break; + } + } + } + }); + + // Spawn a second thread to read stderr. + + let stderr_thread = thread::spawn({ + let tx_err = tx.clone(); + move || { + let reader = BufReader::new(stderr_pipe); + for line in reader.lines() { + if let Ok(line_text) = line { + if tx_err.send(Line::Stderr(line_text)).is_err() { + break; + } + } else { + break; + } + } + } + }); + + // Drop the extra sender so that the channel closes when both threads finish. + drop(tx); + + Ok(ProcessHandle { + receiver: rx, + stdout_thread_handle: stdout_thread, + stderr_thread_handle: stderr_thread, + child, + }) +} + +/// Enum representing the interactive mode for the log trail. +pub enum Interactive { + /// Program will figure it out if a logs can be printed interactively. + Auto, + /// Interactive mode is enabled. + Yes, + /// Interactive mode is disabled. + No, +} + +impl Interactive { + /// Check if the interactive mode is enabled. + pub fn is_enabled(&self) -> bool { + match self { + Interactive::Auto => atty::is(Stream::Stdout), + Interactive::Yes => true, + Interactive::No => false, + } + } +} +/// A stateful log trail that maintains a rolling window of log lines. +pub struct LogTrail { + max_lines: usize, + interactive: Interactive, + current_lines: VecDeque, + printed_lines: usize, + stdout: std::io::Stdout, +} + +impl LogTrail { + /// Create a new LogTrail. + /// + /// * `max_lines` specifies how many lines to keep in the rolling window. + /// * `interactive` should be true when you want the dynamic updating behavior (e.g. when + /// running in a terminal). + pub fn new(max_lines: usize, interactive: Interactive) -> Self { + Self { + max_lines, + interactive, + current_lines: VecDeque::with_capacity(max_lines), + printed_lines: 0, + stdout: io::stdout(), + } + } + + /// Push a new line into the log trail. + /// + /// This method tracks the line numbering and either updates the dynamic window (if interactive) + /// or prints the new line immediately. + pub fn push_line>(&mut self, line: S) -> io::Result<()> { + let line_text = line.into(); + if self.interactive.is_enabled() { + // Maintain a rolling window of at most max_lines. + if self.current_lines.len() == self.max_lines { + self.current_lines.pop_front(); + } + self.current_lines.push_back(line_text); + // Move the cursor up by the number of previously printed lines plus one extra + // (e.g. if a static header line is printed above the log). + if self.printed_lines > 0 { + self.stdout + .queue(cursor::MoveUp(self.printed_lines as u16))?; + } + // Clear everything from the current cursor position downward. + self.stdout + .queue(terminal::Clear(terminal::ClearType::FromCursorDown))?; + + // Reprint the rolling buffer with each line prefixed. + for text in self.current_lines.iter() { + self.stdout.queue(style::Print(text))?; + self.stdout.queue(style::Print("\n"))?; + } + self.printed_lines = self.current_lines.len(); + } else { + // In non-interactive mode simply print the line. + self.stdout.queue(style::Print(line_text))?; + self.stdout.queue(style::Print("\n"))?; + } + self.stdout.flush()?; + Ok(()) + } +} + +/// Builder for creating a `LogTrail` instance. +#[derive(Default)] +pub struct LogTrailBuilder { + max_lines: Option, + interactive: Option, +} + +impl LogTrailBuilder { + /// Creates a new builder with default values. + pub fn new() -> Self { + Self::default() + } + + /// Sets the maximum number of lines for the rolling log. + pub fn max_lines(mut self, max_lines: usize) -> Self { + self.max_lines = Some(max_lines); + self + } + + /// Sets whether the log trail should be interactive. + pub fn interactive(mut self, interactive: Interactive) -> Self { + self.interactive = Some(interactive); + self + } + + /// Builds the `LogTrail` instance. + pub fn build(self) -> LogTrail { + let max_lines = self.max_lines.expect("Max lines must be set"); + let interactive = self.interactive.expect("Interactive mode must be set"); + LogTrail::new(max_lines, interactive) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_run_process() { + // This test will run the `echo` command, which should always succeed. + let result = run_process(Command::new("echo").args(["Hello, world!"])) + .expect("Failed to run process"); + assert!(result.wait().is_ok()); + } + + #[test] + fn test_run_interactive_process() { + // This test will run the `echo` command, which should always succeed. + let result = run_process(Command::new("echo").args(["Hello, world!"])) + .expect("Failed to run process"); + assert!(result.wait().is_ok()); + } + + #[test] + fn test_run_process_failure() { + // This test will run a non-existent command, which should fail. + let result = run_process(&mut Command::new("non_existent_command")) + .expect_err("Failed to run process"); + assert_eq!(result.kind(), io::ErrorKind::NotFound); + } + + #[test] + fn test_run_process_with_env() { + // This test will run the `env` command to print environment variables. + let handle = run_process(Command::new("env").envs([("TEST_VAR", "test_value")])) + .expect("Failed to run process"); + + let captured_lines: Vec = handle + .receiver + .into_iter() + .map(|line| line.to_string()) + .collect(); + let output = captured_lines.join("\n"); + assert!(output.contains("TEST_VAR=test_value")); + } +} diff --git a/vm2_cargo_casper/test.py b/vm2_cargo_casper/test.py new file mode 100755 index 0000000000..640f1236a7 --- /dev/null +++ b/vm2_cargo_casper/test.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +import itertools +import time +import sys +import os + +for a in itertools.count(): + if a % 2 == 0: + print(f'line {a}') + else: + print(f'error line {a}', file=sys.stderr) + time.sleep(0.05) + if a == 44: + os.kill(os.getpid(), 9) + if a > 100: + break + +print('Goodbye')